nskb              453 drivers/bluetooth/bfusb.c 	struct sk_buff *nskb;
nskb              478 drivers/bluetooth/bfusb.c 	nskb = bt_skb_alloc(count + 32, GFP_KERNEL);
nskb              479 drivers/bluetooth/bfusb.c 	if (!nskb) {
nskb              484 drivers/bluetooth/bfusb.c 	nskb->dev = (void *) data;
nskb              493 drivers/bluetooth/bfusb.c 		skb_put_data(nskb, buf, 3);
nskb              494 drivers/bluetooth/bfusb.c 		skb_copy_from_linear_data_offset(skb, sent, skb_put(nskb, size), size);
nskb              501 drivers/bluetooth/bfusb.c 	if ((nskb->len % data->bulk_pkt_size) == 0) {
nskb              504 drivers/bluetooth/bfusb.c 		skb_put_data(nskb, buf, 2);
nskb              509 drivers/bluetooth/bfusb.c 	skb_queue_tail(&data->transmit_q, nskb);
nskb              166 drivers/bluetooth/hci_bcsp.c 	struct sk_buff *nskb;
nskb              218 drivers/bluetooth/hci_bcsp.c 	nskb = alloc_skb((len + 6) * 2 + 2, GFP_ATOMIC);
nskb              219 drivers/bluetooth/hci_bcsp.c 	if (!nskb)
nskb              222 drivers/bluetooth/hci_bcsp.c 	hci_skb_pkt_type(nskb) = pkt_type;
nskb              224 drivers/bluetooth/hci_bcsp.c 	bcsp_slip_msgdelim(nskb);
nskb              245 drivers/bluetooth/hci_bcsp.c 		bcsp_slip_one_byte(nskb, hdr[i]);
nskb              253 drivers/bluetooth/hci_bcsp.c 		bcsp_slip_one_byte(nskb, data[i]);
nskb              262 drivers/bluetooth/hci_bcsp.c 		bcsp_slip_one_byte(nskb, (u8)((bcsp_txmsg_crc >> 8) & 0x00ff));
nskb              263 drivers/bluetooth/hci_bcsp.c 		bcsp_slip_one_byte(nskb, (u8)(bcsp_txmsg_crc & 0x00ff));
nskb              266 drivers/bluetooth/hci_bcsp.c 	bcsp_slip_msgdelim(nskb);
nskb              267 drivers/bluetooth/hci_bcsp.c 	return nskb;
nskb              283 drivers/bluetooth/hci_bcsp.c 		struct sk_buff *nskb;
nskb              285 drivers/bluetooth/hci_bcsp.c 		nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len,
nskb              287 drivers/bluetooth/hci_bcsp.c 		if (nskb) {
nskb              289 drivers/bluetooth/hci_bcsp.c 			return nskb;
nskb              306 drivers/bluetooth/hci_bcsp.c 			struct sk_buff *nskb;
nskb              308 drivers/bluetooth/hci_bcsp.c 			nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len,
nskb              310 drivers/bluetooth/hci_bcsp.c 			if (nskb) {
nskb              314 drivers/bluetooth/hci_bcsp.c 				return nskb;
nskb              333 drivers/bluetooth/hci_bcsp.c 		struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, NULL, 0, BCSP_ACK_PKT);
nskb              334 drivers/bluetooth/hci_bcsp.c 		return nskb;
nskb              407 drivers/bluetooth/hci_bcsp.c 		struct sk_buff *nskb = alloc_skb(4, GFP_ATOMIC);
nskb              410 drivers/bluetooth/hci_bcsp.c 		if (!nskb)
nskb              412 drivers/bluetooth/hci_bcsp.c 		skb_put_data(nskb, conf_rsp_pkt, 4);
nskb              413 drivers/bluetooth/hci_bcsp.c 		hci_skb_pkt_type(nskb) = BCSP_LE_PKT;
nskb              415 drivers/bluetooth/hci_bcsp.c 		skb_queue_head(&bcsp->unrel, nskb);
nskb              113 drivers/bluetooth/hci_h5.c 	struct sk_buff *nskb;
nskb              115 drivers/bluetooth/hci_h5.c 	nskb = alloc_skb(3, GFP_ATOMIC);
nskb              116 drivers/bluetooth/hci_h5.c 	if (!nskb)
nskb              119 drivers/bluetooth/hci_h5.c 	hci_skb_pkt_type(nskb) = HCI_3WIRE_LINK_PKT;
nskb              121 drivers/bluetooth/hci_h5.c 	skb_put_data(nskb, data, len);
nskb              123 drivers/bluetooth/hci_h5.c 	skb_queue_tail(&h5->unrel, nskb);
nskb              651 drivers/bluetooth/hci_h5.c 	struct sk_buff *nskb;
nskb              666 drivers/bluetooth/hci_h5.c 	nskb = alloc_skb((len + 6) * 2 + 2, GFP_ATOMIC);
nskb              667 drivers/bluetooth/hci_h5.c 	if (!nskb)
nskb              670 drivers/bluetooth/hci_h5.c 	hci_skb_pkt_type(nskb) = pkt_type;
nskb              672 drivers/bluetooth/hci_h5.c 	h5_slip_delim(nskb);
nskb              694 drivers/bluetooth/hci_h5.c 		h5_slip_one_byte(nskb, hdr[i]);
nskb              697 drivers/bluetooth/hci_h5.c 		h5_slip_one_byte(nskb, data[i]);
nskb              699 drivers/bluetooth/hci_h5.c 	h5_slip_delim(nskb);
nskb              701 drivers/bluetooth/hci_h5.c 	return nskb;
nskb              708 drivers/bluetooth/hci_h5.c 	struct sk_buff *skb, *nskb;
nskb              725 drivers/bluetooth/hci_h5.c 		nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb),
nskb              727 drivers/bluetooth/hci_h5.c 		if (nskb) {
nskb              729 drivers/bluetooth/hci_h5.c 			return nskb;
nskb              743 drivers/bluetooth/hci_h5.c 		nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb),
nskb              745 drivers/bluetooth/hci_h5.c 		if (nskb) {
nskb              749 drivers/bluetooth/hci_h5.c 			return nskb;
nskb              386 drivers/isdn/capi/capi.c 	struct sk_buff *nskb;
nskb              387 drivers/isdn/capi/capi.c 	nskb = alloc_skb(CAPI_DATA_B3_RESP_LEN, GFP_KERNEL);
nskb              388 drivers/isdn/capi/capi.c 	if (nskb) {
nskb              390 drivers/isdn/capi/capi.c 		unsigned char *s = skb_put(nskb, CAPI_DATA_B3_RESP_LEN);
nskb              399 drivers/isdn/capi/capi.c 	return nskb;
nskb              406 drivers/isdn/capi/capi.c 	struct sk_buff *nskb;
nskb              440 drivers/isdn/capi/capi.c 	nskb = gen_data_b3_resp_for(mp, skb);
nskb              441 drivers/isdn/capi/capi.c 	if (!nskb) {
nskb              448 drivers/isdn/capi/capi.c 	errcode = capi20_put_message(mp->ap, nskb);
nskb              458 drivers/isdn/capi/capi.c 		kfree_skb(nskb);
nskb             1315 drivers/isdn/mISDN/dsp_cmx.c 	struct sk_buff *nskb, *txskb;
nskb             1354 drivers/isdn/mISDN/dsp_cmx.c 	nskb = mI_alloc_skb(len + preload, GFP_ATOMIC);
nskb             1355 drivers/isdn/mISDN/dsp_cmx.c 	if (!nskb) {
nskb             1361 drivers/isdn/mISDN/dsp_cmx.c 	hh = mISDN_HEAD_P(nskb);
nskb             1370 drivers/isdn/mISDN/dsp_cmx.c 	d = skb_put(nskb, preload + len); /* result */
nskb             1584 drivers/isdn/mISDN/dsp_cmx.c 			skb_queue_tail(&dsp->sendq, nskb);
nskb             1598 drivers/isdn/mISDN/dsp_cmx.c 				skb_put_data(txskb, nskb->data + preload, len);
nskb             1608 drivers/isdn/mISDN/dsp_cmx.c 		dsp_change_volume(nskb, dsp->tx_volume);
nskb             1611 drivers/isdn/mISDN/dsp_cmx.c 		dsp_pipeline_process_tx(&dsp->pipeline, nskb->data,
nskb             1612 drivers/isdn/mISDN/dsp_cmx.c 					nskb->len);
nskb             1615 drivers/isdn/mISDN/dsp_cmx.c 		dsp_bf_encrypt(dsp, nskb->data, nskb->len);
nskb             1617 drivers/isdn/mISDN/dsp_cmx.c 	skb_queue_tail(&dsp->sendq, nskb);
nskb             1918 drivers/isdn/mISDN/dsp_cmx.c 	struct sk_buff *nskb = NULL;
nskb             1934 drivers/isdn/mISDN/dsp_cmx.c 			nskb = skb_clone(skb, GFP_ATOMIC);
nskb             1935 drivers/isdn/mISDN/dsp_cmx.c 			if (nskb) {
nskb             1936 drivers/isdn/mISDN/dsp_cmx.c 				hh = mISDN_HEAD_P(nskb);
nskb             1939 drivers/isdn/mISDN/dsp_cmx.c 				skb_queue_tail(&dsp->sendq, nskb);
nskb             1950 drivers/isdn/mISDN/dsp_cmx.c 			nskb = skb_clone(skb, GFP_ATOMIC);
nskb             1951 drivers/isdn/mISDN/dsp_cmx.c 			if (nskb) {
nskb             1952 drivers/isdn/mISDN/dsp_cmx.c 				hh = mISDN_HEAD_P(nskb);
nskb             1955 drivers/isdn/mISDN/dsp_cmx.c 				skb_queue_tail(&member->dsp->sendq, nskb);
nskb              283 drivers/isdn/mISDN/dsp_core.c 	struct sk_buff	*nskb;
nskb              589 drivers/isdn/mISDN/dsp_core.c 		nskb = _alloc_mISDN_skb(PH_CONTROL_IND, MISDN_ID_ANY,
nskb              591 drivers/isdn/mISDN/dsp_core.c 		if (nskb) {
nskb              593 drivers/isdn/mISDN/dsp_core.c 				if (dsp->up->send(dsp->up, nskb))
nskb              594 drivers/isdn/mISDN/dsp_core.c 					dev_kfree_skb(nskb);
nskb              596 drivers/isdn/mISDN/dsp_core.c 				dev_kfree_skb(nskb);
nskb              741 drivers/isdn/mISDN/dsp_core.c 				struct sk_buff *nskb;
nskb              747 drivers/isdn/mISDN/dsp_core.c 				nskb = _alloc_mISDN_skb(PH_CONTROL_IND,
nskb              750 drivers/isdn/mISDN/dsp_core.c 				if (nskb) {
nskb              753 drivers/isdn/mISDN/dsp_core.c 							    dsp->up, nskb))
nskb              754 drivers/isdn/mISDN/dsp_core.c 							dev_kfree_skb(nskb);
nskb              756 drivers/isdn/mISDN/dsp_core.c 						dev_kfree_skb(nskb);
nskb              787 drivers/isdn/mISDN/dsp_core.c 				struct sk_buff *nskb;
nskb              793 drivers/isdn/mISDN/dsp_core.c 				nskb = _alloc_mISDN_skb(PH_CONTROL_IND,
nskb              796 drivers/isdn/mISDN/dsp_core.c 				if (nskb) {
nskb              799 drivers/isdn/mISDN/dsp_core.c 							    dsp->up, nskb))
nskb              800 drivers/isdn/mISDN/dsp_core.c 							dev_kfree_skb(nskb);
nskb              802 drivers/isdn/mISDN/dsp_core.c 						dev_kfree_skb(nskb);
nskb              440 drivers/isdn/mISDN/dsp_tones.c 	struct sk_buff *nskb;
nskb              443 drivers/isdn/mISDN/dsp_tones.c 	nskb = _alloc_mISDN_skb(PH_CONTROL_REQ,
nskb              446 drivers/isdn/mISDN/dsp_tones.c 	if (nskb) {
nskb              448 drivers/isdn/mISDN/dsp_tones.c 			if (dsp->ch.recv(dsp->ch.peer, nskb))
nskb              449 drivers/isdn/mISDN/dsp_tones.c 				dev_kfree_skb(nskb);
nskb              451 drivers/isdn/mISDN/dsp_tones.c 			dev_kfree_skb(nskb);
nskb              353 drivers/isdn/mISDN/l1oip_core.c 	struct sk_buff *nskb;
nskb              384 drivers/isdn/mISDN/l1oip_core.c 	nskb = mI_alloc_skb((remotecodec == 3) ? (len << 1) : len, GFP_ATOMIC);
nskb              385 drivers/isdn/mISDN/l1oip_core.c 	if (!nskb) {
nskb              389 drivers/isdn/mISDN/l1oip_core.c 	p = skb_put(nskb, (remotecodec == 3) ? (len << 1) : len);
nskb              402 drivers/isdn/mISDN/l1oip_core.c 		dch->rx_skb = nskb;
nskb              429 drivers/isdn/mISDN/l1oip_core.c 			swap(hc->chan[channel].disorder_skb, nskb);
nskb              433 drivers/isdn/mISDN/l1oip_core.c 		if (nskb)
nskb              435 drivers/isdn/mISDN/l1oip_core.c 			queue_ch_frame(&bch->ch, PH_DATA_IND, rx_counter, nskb);
nskb              238 drivers/isdn/mISDN/layer2.c 	struct sk_buff *nskb = skb;
nskb              243 drivers/isdn/mISDN/layer2.c 			nskb = skb_dequeue(&l2->down_queue);
nskb              244 drivers/isdn/mISDN/layer2.c 			if (nskb) {
nskb              245 drivers/isdn/mISDN/layer2.c 				l2->down_id = mISDN_HEAD_ID(nskb);
nskb              246 drivers/isdn/mISDN/layer2.c 				if (l2down_skb(l2, nskb)) {
nskb              247 drivers/isdn/mISDN/layer2.c 					dev_kfree_skb(nskb);
nskb              263 drivers/isdn/mISDN/layer2.c 		nskb = skb_dequeue(&l2->down_queue);
nskb              264 drivers/isdn/mISDN/layer2.c 		if (nskb) {
nskb              265 drivers/isdn/mISDN/layer2.c 			l2->down_id = mISDN_HEAD_ID(nskb);
nskb              266 drivers/isdn/mISDN/layer2.c 			if (l2down_skb(l2, nskb)) {
nskb              267 drivers/isdn/mISDN/layer2.c 				dev_kfree_skb(nskb);
nskb             1469 drivers/isdn/mISDN/layer2.c 	struct sk_buff	*skb, *nskb;
nskb             1485 drivers/isdn/mISDN/layer2.c 	nskb = skb_realloc_headroom(skb, i);
nskb             1486 drivers/isdn/mISDN/layer2.c 	if (!nskb) {
nskb             1506 drivers/isdn/mISDN/layer2.c 	memcpy(skb_push(nskb, i), header, i);
nskb             1507 drivers/isdn/mISDN/layer2.c 	l2down(l2, PH_DATA_REQ, l2_newid(l2), nskb);
nskb              370 drivers/net/ethernet/broadcom/bcm63xx_enet.c 			struct sk_buff *nskb;
nskb              372 drivers/net/ethernet/broadcom/bcm63xx_enet.c 			nskb = napi_alloc_skb(&priv->napi, len);
nskb              373 drivers/net/ethernet/broadcom/bcm63xx_enet.c 			if (!nskb) {
nskb              381 drivers/net/ethernet/broadcom/bcm63xx_enet.c 			memcpy(nskb->data, skb->data, len);
nskb              384 drivers/net/ethernet/broadcom/bcm63xx_enet.c 			skb = nskb;
nskb              587 drivers/net/ethernet/broadcom/bcm63xx_enet.c 			struct sk_buff *nskb;
nskb              589 drivers/net/ethernet/broadcom/bcm63xx_enet.c 			nskb = skb_copy_expand(skb, 0, needed, GFP_ATOMIC);
nskb              590 drivers/net/ethernet/broadcom/bcm63xx_enet.c 			if (!nskb) {
nskb              595 drivers/net/ethernet/broadcom/bcm63xx_enet.c 			skb = nskb;
nskb             1215 drivers/net/ethernet/broadcom/bcmsysport.c 	struct sk_buff *nskb;
nskb             1224 drivers/net/ethernet/broadcom/bcmsysport.c 		nskb = skb_realloc_headroom(skb, sizeof(*tsb));
nskb             1225 drivers/net/ethernet/broadcom/bcmsysport.c 		if (!nskb) {
nskb             1233 drivers/net/ethernet/broadcom/bcmsysport.c 		skb = nskb;
nskb             7877 drivers/net/ethernet/broadcom/tg3.c 	struct sk_buff *segs, *nskb;
nskb             7902 drivers/net/ethernet/broadcom/tg3.c 		nskb = segs;
nskb             7904 drivers/net/ethernet/broadcom/tg3.c 		nskb->next = NULL;
nskb             7905 drivers/net/ethernet/broadcom/tg3.c 		tg3_start_xmit(nskb, tp->dev);
nskb             1725 drivers/net/ethernet/cadence/macb_main.c 	struct sk_buff *nskb;
nskb             1752 drivers/net/ethernet/cadence/macb_main.c 		nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC);
nskb             1753 drivers/net/ethernet/cadence/macb_main.c 		if (!nskb)
nskb             1757 drivers/net/ethernet/cadence/macb_main.c 		*skb = nskb;
nskb             3086 drivers/net/ethernet/marvell/skge.c 		struct sk_buff *nskb;
nskb             3088 drivers/net/ethernet/marvell/skge.c 		nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size);
nskb             3089 drivers/net/ethernet/marvell/skge.c 		if (!nskb)
nskb             3097 drivers/net/ethernet/marvell/skge.c 		if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) {
nskb             3098 drivers/net/ethernet/marvell/skge.c 			dev_kfree_skb(nskb);
nskb              144 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 					struct sk_buff *nskb, u32 tcp_seq,
nskb              153 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	nskb->dev = skb->dev;
nskb              154 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	skb_reset_mac_header(nskb);
nskb              155 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	skb_set_network_header(nskb, skb_network_offset(skb));
nskb              156 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	skb_set_transport_header(nskb, skb_transport_offset(skb));
nskb              157 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	memcpy(nskb->data, skb->data, headln);
nskb              158 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	memcpy(nskb->data + headln, &rcd_sn, sizeof(rcd_sn));
nskb              160 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	iph = ip_hdr(nskb);
nskb              161 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	iph->tot_len = htons(nskb->len - skb_network_offset(nskb));
nskb              162 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	th = tcp_hdr(nskb);
nskb              163 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	data_len = nskb->len - headln;
nskb              167 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	mss = nskb->dev->mtu - (headln - skb_network_offset(nskb));
nskb              168 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	skb_shinfo(nskb)->gso_size = 0;
nskb              170 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 		skb_shinfo(nskb)->gso_size = mss;
nskb              171 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 		skb_shinfo(nskb)->gso_segs = DIV_ROUND_UP(data_len, mss);
nskb              173 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
nskb              175 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	pet = (struct mlx5e_tls_metadata *)(nskb->data + sizeof(struct ethhdr));
nskb              182 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	nskb->ip_summed = CHECKSUM_PARTIAL;
nskb              184 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	nskb->queue_mapping = skb->queue_mapping;
nskb              196 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	struct sk_buff *nskb;
nskb              233 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	nskb = alloc_skb(linear_len, GFP_ATOMIC);
nskb              234 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	if (unlikely(!nskb)) {
nskb              240 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	skb_put(nskb, linear_len);
nskb              242 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 		skb_shinfo(nskb)->frags[i] = info.frags[i];
nskb              244 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	skb_shinfo(nskb)->nr_frags = info.nr_frags;
nskb              245 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	nskb->data_len = info.sync_len;
nskb              246 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	nskb->len += info.sync_len;
nskb              247 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	sq->stats->tls_resync_bytes += nskb->len;
nskb              248 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln,
nskb              250 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	mlx5e_sq_xmit(sq, nskb, *wqe, *pi, true);
nskb              831 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	struct sk_buff *nskb;
nskb              853 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		nskb = tls_encrypt_skb(skb);
nskb              854 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		if (!nskb) {
nskb              861 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		if (nskb == skb)
nskb              864 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		if (unlikely(skb_is_nonlinear(nskb))) {
nskb              869 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			dev_kfree_skb_any(nskb);
nskb              875 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			tls_offload_tx_resync_request(nskb->sk);
nskb              878 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		return nskb;
nskb             1110 drivers/net/ethernet/sun/sunvnet_common.c 	struct sk_buff *nskb;
nskb             1136 drivers/net/ethernet/sun/sunvnet_common.c 		nskb = alloc_and_align_skb(skb->dev, len);
nskb             1137 drivers/net/ethernet/sun/sunvnet_common.c 		if (!nskb) {
nskb             1141 drivers/net/ethernet/sun/sunvnet_common.c 		skb_reserve(nskb, VNET_PACKET_SKIP);
nskb             1143 drivers/net/ethernet/sun/sunvnet_common.c 		nskb->protocol = skb->protocol;
nskb             1145 drivers/net/ethernet/sun/sunvnet_common.c 		skb_set_mac_header(nskb, offset);
nskb             1147 drivers/net/ethernet/sun/sunvnet_common.c 		skb_set_network_header(nskb, offset);
nskb             1149 drivers/net/ethernet/sun/sunvnet_common.c 		skb_set_transport_header(nskb, offset);
nskb             1152 drivers/net/ethernet/sun/sunvnet_common.c 		nskb->csum_offset = skb->csum_offset;
nskb             1153 drivers/net/ethernet/sun/sunvnet_common.c 		nskb->ip_summed = skb->ip_summed;
nskb             1158 drivers/net/ethernet/sun/sunvnet_common.c 			int offset = start + nskb->csum_offset;
nskb             1161 drivers/net/ethernet/sun/sunvnet_common.c 			if (skb_copy_bits(skb, 0, nskb->data, start)) {
nskb             1162 drivers/net/ethernet/sun/sunvnet_common.c 				dev_kfree_skb(nskb);
nskb             1170 drivers/net/ethernet/sun/sunvnet_common.c 						      nskb->data + start,
nskb             1175 drivers/net/ethernet/sun/sunvnet_common.c 				struct iphdr *iph = ip_hdr(nskb);
nskb             1186 drivers/net/ethernet/sun/sunvnet_common.c 				struct ipv6hdr *ip6h = ipv6_hdr(nskb);
nskb             1199 drivers/net/ethernet/sun/sunvnet_common.c 			*(__sum16 *)(nskb->data + offset) = csum;
nskb             1201 drivers/net/ethernet/sun/sunvnet_common.c 			nskb->ip_summed = CHECKSUM_NONE;
nskb             1202 drivers/net/ethernet/sun/sunvnet_common.c 		} else if (skb_copy_bits(skb, 0, nskb->data, skb->len)) {
nskb             1203 drivers/net/ethernet/sun/sunvnet_common.c 			dev_kfree_skb(nskb);
nskb             1207 drivers/net/ethernet/sun/sunvnet_common.c 		(void)skb_put(nskb, skb->len);
nskb             1209 drivers/net/ethernet/sun/sunvnet_common.c 			skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size;
nskb             1210 drivers/net/ethernet/sun/sunvnet_common.c 			skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
nskb             1212 drivers/net/ethernet/sun/sunvnet_common.c 		nskb->queue_mapping = skb->queue_mapping;
nskb             1214 drivers/net/ethernet/sun/sunvnet_common.c 		skb = nskb;
nskb              230 drivers/net/ipvlan/ipvlan_core.c 	struct sk_buff *skb, *nskb;
nskb              267 drivers/net/ipvlan/ipvlan_core.c 			nskb = skb_clone(skb, GFP_ATOMIC);
nskb              269 drivers/net/ipvlan/ipvlan_core.c 			if (nskb) {
nskb              271 drivers/net/ipvlan/ipvlan_core.c 				nskb->pkt_type = pkt_type;
nskb              272 drivers/net/ipvlan/ipvlan_core.c 				nskb->dev = ipvlan->dev;
nskb              274 drivers/net/ipvlan/ipvlan_core.c 					ret = dev_forward_skb(ipvlan->dev, nskb);
nskb              276 drivers/net/ipvlan/ipvlan_core.c 					ret = netif_rx(nskb);
nskb              711 drivers/net/ipvlan/ipvlan_core.c 			struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
nskb              719 drivers/net/ipvlan/ipvlan_core.c 			if (nskb) {
nskb              720 drivers/net/ipvlan/ipvlan_core.c 				ipvlan_skb_crossing_ns(nskb, NULL);
nskb              721 drivers/net/ipvlan/ipvlan_core.c 				ipvlan_multicast_enqueue(port, nskb, false);
nskb              677 drivers/net/macsec.c 		struct sk_buff *nskb = skb_copy_expand(skb,
nskb              681 drivers/net/macsec.c 		if (likely(nskb)) {
nskb              683 drivers/net/macsec.c 			skb = nskb;
nskb             1046 drivers/net/macsec.c 		struct sk_buff *nskb;
nskb             1057 drivers/net/macsec.c 		nskb = skb_clone(skb, GFP_ATOMIC);
nskb             1058 drivers/net/macsec.c 		if (!nskb)
nskb             1061 drivers/net/macsec.c 		nskb->dev = macsec->secy.netdev;
nskb             1063 drivers/net/macsec.c 		if (netif_rx(nskb) == NET_RX_SUCCESS) {
nskb             1268 drivers/net/macsec.c 		struct sk_buff *nskb;
nskb             1286 drivers/net/macsec.c 		nskb = skb_clone(skb, GFP_ATOMIC);
nskb             1287 drivers/net/macsec.c 		if (!nskb)
nskb             1290 drivers/net/macsec.c 		macsec_reset_skb(nskb, macsec->secy.netdev);
nskb             1292 drivers/net/macsec.c 		ret = netif_rx(nskb);
nskb              264 drivers/net/macvlan.c 	struct sk_buff *nskb;
nskb              282 drivers/net/macvlan.c 			nskb = skb_clone(skb, GFP_ATOMIC);
nskb              283 drivers/net/macvlan.c 			if (likely(nskb))
nskb              285 drivers/net/macvlan.c 					nskb, vlan, eth,
nskb              287 drivers/net/macvlan.c 				      netif_rx_ni(nskb);
nskb              346 drivers/net/macvlan.c 	struct sk_buff *nskb;
nskb              349 drivers/net/macvlan.c 	nskb = skb_clone(skb, GFP_ATOMIC);
nskb              350 drivers/net/macvlan.c 	if (!nskb)
nskb              353 drivers/net/macvlan.c 	MACVLAN_SKB_CB(nskb)->src = src;
nskb              359 drivers/net/macvlan.c 		__skb_queue_tail(&port->bc_queue, nskb);
nskb              372 drivers/net/macvlan.c 	kfree_skb(nskb);
nskb              400 drivers/net/macvlan.c 	struct sk_buff *nskb;
nskb              409 drivers/net/macvlan.c 	nskb = skb_clone(skb, GFP_ATOMIC);
nskb              410 drivers/net/macvlan.c 	if (!nskb)
nskb              413 drivers/net/macvlan.c 	len = nskb->len + ETH_HLEN;
nskb              414 drivers/net/macvlan.c 	nskb->dev = dev;
nskb              417 drivers/net/macvlan.c 		nskb->pkt_type = PACKET_HOST;
nskb              419 drivers/net/macvlan.c 	ret = netif_rx(nskb);
nskb              356 drivers/net/tap.c 			struct sk_buff *nskb = segs->next;
nskb              361 drivers/net/tap.c 				kfree_skb_list(nskb);
nskb              364 drivers/net/tap.c 			segs = nskb;
nskb             1569 drivers/net/tun.c 		struct sk_buff *nskb;
nskb             1572 drivers/net/tun.c 		while ((nskb = __skb_dequeue(&process_queue))) {
nskb             1573 drivers/net/tun.c 			skb_record_rx_queue(nskb, tfile->queue_index);
nskb             1574 drivers/net/tun.c 			netif_receive_skb(nskb);
nskb             1714 drivers/net/usb/r8152.c 		struct sk_buff *segs, *nskb;
nskb             1724 drivers/net/usb/r8152.c 			nskb = segs;
nskb             1726 drivers/net/usb/r8152.c 			nskb->next = NULL;
nskb             1727 drivers/net/usb/r8152.c 			__skb_queue_tail(&seg_list, nskb);
nskb              618 drivers/net/veth.c 		struct sk_buff *nskb;
nskb              639 drivers/net/veth.c 		nskb = veth_build_skb(head,
nskb              642 drivers/net/veth.c 		if (!nskb) {
nskb              647 drivers/net/veth.c 		skb_copy_header(nskb, skb);
nskb              648 drivers/net/veth.c 		head_off = skb_headroom(nskb) - skb_headroom(skb);
nskb              649 drivers/net/veth.c 		skb_headers_offset_update(nskb, head_off);
nskb              651 drivers/net/veth.c 		skb = nskb;
nskb              955 drivers/net/virtio_net.c 			struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
nskb              957 drivers/net/virtio_net.c 			if (unlikely(!nskb))
nskb              960 drivers/net/virtio_net.c 				skb_shinfo(curr_skb)->frag_list = nskb;
nskb              962 drivers/net/virtio_net.c 				curr_skb->next = nskb;
nskb              963 drivers/net/virtio_net.c 			curr_skb = nskb;
nskb              964 drivers/net/virtio_net.c 			head_skb->truesize += nskb->truesize;
nskb              489 drivers/net/wireless/ath/ath6kl/txrx.c 		struct sk_buff *nskb;
nskb              491 drivers/net/wireless/ath/ath6kl/txrx.c 		nskb = skb_copy_expand(skb, HTC_HDR_LENGTH, 0, GFP_ATOMIC);
nskb              492 drivers/net/wireless/ath/ath6kl/txrx.c 		if (nskb == NULL)
nskb              495 drivers/net/wireless/ath/ath6kl/txrx.c 		skb = nskb;
nskb              313 drivers/net/wireless/ath/ath9k/hif_usb.c 	struct sk_buff *nskb = NULL;
nskb              333 drivers/net/wireless/ath/ath9k/hif_usb.c 		nskb = __skb_dequeue(&hif_dev->tx.tx_skb_queue);
nskb              336 drivers/net/wireless/ath/ath9k/hif_usb.c 		BUG_ON(!nskb);
nskb              343 drivers/net/wireless/ath/ath9k/hif_usb.c 		*hdr++ = cpu_to_le16(nskb->len);
nskb              346 drivers/net/wireless/ath/ath9k/hif_usb.c 		memcpy(buf, nskb->data, nskb->len);
nskb              347 drivers/net/wireless/ath/ath9k/hif_usb.c 		tx_buf->len = nskb->len + 4;
nskb              355 drivers/net/wireless/ath/ath9k/hif_usb.c 		__skb_queue_tail(&tx_buf->skb_queue, nskb);
nskb              532 drivers/net/wireless/ath/ath9k/hif_usb.c 	struct sk_buff *nskb, *skb_pool[MAX_PKT_NUM_IN_TRANSFER];
nskb              598 drivers/net/wireless/ath/ath9k/hif_usb.c 			nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC);
nskb              599 drivers/net/wireless/ath/ath9k/hif_usb.c 			if (!nskb) {
nskb              605 drivers/net/wireless/ath/ath9k/hif_usb.c 			skb_reserve(nskb, 32);
nskb              608 drivers/net/wireless/ath/ath9k/hif_usb.c 			memcpy(nskb->data, &(skb->data[chk_idx+4]),
nskb              612 drivers/net/wireless/ath/ath9k/hif_usb.c 			hif_dev->remain_skb = nskb;
nskb              620 drivers/net/wireless/ath/ath9k/hif_usb.c 			nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC);
nskb              621 drivers/net/wireless/ath/ath9k/hif_usb.c 			if (!nskb) {
nskb              626 drivers/net/wireless/ath/ath9k/hif_usb.c 			skb_reserve(nskb, 32);
nskb              629 drivers/net/wireless/ath/ath9k/hif_usb.c 			memcpy(nskb->data, &(skb->data[chk_idx+4]), pkt_len);
nskb              630 drivers/net/wireless/ath/ath9k/hif_usb.c 			skb_put(nskb, pkt_len);
nskb              631 drivers/net/wireless/ath/ath9k/hif_usb.c 			skb_pool[pool_index++] = nskb;
nskb              696 drivers/net/wireless/ath/ath9k/hif_usb.c 	struct sk_buff *nskb;
nskb              728 drivers/net/wireless/ath/ath9k/hif_usb.c 		nskb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_ATOMIC);
nskb              729 drivers/net/wireless/ath/ath9k/hif_usb.c 		if (!nskb) {
nskb              739 drivers/net/wireless/ath/ath9k/hif_usb.c 				 nskb->data, MAX_REG_IN_BUF_SIZE,
nskb              740 drivers/net/wireless/ath/ath9k/hif_usb.c 				 ath9k_hif_usb_reg_in_cb, nskb, 1);
nskb             1285 drivers/net/wireless/mac80211_hwsim.c 		struct sk_buff *nskb;
nskb             1323 drivers/net/wireless/mac80211_hwsim.c 			nskb = dev_alloc_skb(128);
nskb             1324 drivers/net/wireless/mac80211_hwsim.c 			if (!nskb) {
nskb             1330 drivers/net/wireless/mac80211_hwsim.c 			skb_add_rx_frag(nskb, 0, page, 0, skb->len, skb->len);
nskb             1332 drivers/net/wireless/mac80211_hwsim.c 			nskb = skb_copy(skb, GFP_ATOMIC);
nskb             1333 drivers/net/wireless/mac80211_hwsim.c 			if (!nskb)
nskb             1342 drivers/net/wireless/mac80211_hwsim.c 		memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status));
nskb             1344 drivers/net/wireless/mac80211_hwsim.c 		mac80211_hwsim_add_vendor_rtap(nskb);
nskb             1347 drivers/net/wireless/mac80211_hwsim.c 		data2->rx_bytes += nskb->len;
nskb             1348 drivers/net/wireless/mac80211_hwsim.c 		ieee80211_rx_irqsafe(data2->hw, nskb);
nskb              365 drivers/net/xen-netback/netback.c 							struct sk_buff *nskb)
nskb              389 drivers/net/xen-netback/netback.c 		shinfo = skb_shinfo(nskb);
nskb              402 drivers/net/xen-netback/netback.c 		skb_shinfo(skb)->frag_list = nskb;
nskb              794 drivers/net/xen-netback/netback.c 	struct sk_buff *skb, *nskb;
nskb              921 drivers/net/xen-netback/netback.c 		nskb = NULL;
nskb              926 drivers/net/xen-netback/netback.c 			nskb = xenvif_alloc_skb(0);
nskb              927 drivers/net/xen-netback/netback.c 			if (unlikely(nskb == NULL)) {
nskb              946 drivers/net/xen-netback/netback.c 				kfree_skb(nskb);
nskb             1014 drivers/net/xen-netback/netback.c 				          frag_overflow, nskb);
nskb             1038 drivers/net/xen-netback/netback.c 	struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
nskb             1043 drivers/net/xen-netback/netback.c 	xenvif_fill_frags(queue, nskb);
nskb             1046 drivers/net/xen-netback/netback.c 	skb->len += nskb->len;
nskb             1047 drivers/net/xen-netback/netback.c 	skb->data_len += nskb->len;
nskb             1117 drivers/net/xen-netback/netback.c 				struct sk_buff *nskb =
nskb             1119 drivers/net/xen-netback/netback.c 				skb_shinfo(nskb)->nr_frags = 0;
nskb             1145 drivers/net/xen-netback/netback.c 			struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
nskb             1146 drivers/net/xen-netback/netback.c 			xenvif_skb_zerocopy_prepare(queue, nskb);
nskb             1157 drivers/net/xen-netback/netback.c 			kfree_skb(nskb);
nskb              580 drivers/net/xen-netfront.c 	struct sk_buff *nskb;
nskb              614 drivers/net/xen-netfront.c 		nskb = skb_copy(skb, GFP_ATOMIC);
nskb              615 drivers/net/xen-netfront.c 		if (!nskb)
nskb              618 drivers/net/xen-netfront.c 		skb = nskb;
nskb              895 drivers/net/xen-netfront.c 	struct sk_buff *nskb;
nskb              897 drivers/net/xen-netfront.c 	while ((nskb = __skb_dequeue(list))) {
nskb              900 drivers/net/xen-netfront.c 		skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
nskb              910 drivers/net/xen-netfront.c 			kfree_skb(nskb);
nskb              918 drivers/net/xen-netfront.c 		skb_shinfo(nskb)->nr_frags = 0;
nskb              919 drivers/net/xen-netfront.c 		kfree_skb(nskb);
nskb              472 drivers/s390/net/ctcm_main.c 	struct sk_buff *nskb;
nskb              519 drivers/s390/net/ctcm_main.c 		nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
nskb              520 drivers/s390/net/ctcm_main.c 		if (!nskb) {
nskb              526 drivers/s390/net/ctcm_main.c 			skb_put_data(nskb, skb->data, skb->len);
nskb              527 drivers/s390/net/ctcm_main.c 			refcount_inc(&nskb->users);
nskb              530 drivers/s390/net/ctcm_main.c 			skb = nskb;
nskb              670 drivers/s390/net/ctcm_main.c 	struct sk_buff *nskb;
nskb              727 drivers/s390/net/ctcm_main.c 		nskb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
nskb              728 drivers/s390/net/ctcm_main.c 		if (!nskb) {
nskb              731 drivers/s390/net/ctcm_main.c 			skb_put_data(nskb, skb->data, skb->len);
nskb              732 drivers/s390/net/ctcm_main.c 			refcount_inc(&nskb->users);
nskb              735 drivers/s390/net/ctcm_main.c 			skb = nskb;
nskb             1172 drivers/s390/net/netiucv.c 		struct sk_buff *nskb = skb;
nskb             1181 drivers/s390/net/netiucv.c 			nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
nskb             1183 drivers/s390/net/netiucv.c 			if (!nskb) {
nskb             1188 drivers/s390/net/netiucv.c 				skb_reserve(nskb, NETIUCV_HDRLEN);
nskb             1189 drivers/s390/net/netiucv.c 				skb_put_data(nskb, skb->data, skb->len);
nskb             1196 drivers/s390/net/netiucv.c 		header.next = nskb->len + NETIUCV_HDRLEN;
nskb             1197 drivers/s390/net/netiucv.c 		memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
nskb             1199 drivers/s390/net/netiucv.c 		skb_put_data(nskb, &header, NETIUCV_HDRLEN);
nskb             1207 drivers/s390/net/netiucv.c 				       nskb->data, nskb->len);
nskb             1221 drivers/s390/net/netiucv.c 				dev_kfree_skb(nskb);
nskb             1234 drivers/s390/net/netiucv.c 			refcount_inc(&nskb->users);
nskb             1235 drivers/s390/net/netiucv.c 			skb_queue_tail(&conn->commit_queue, nskb);
nskb              348 drivers/scsi/fcoe/fcoe_transport.c 	struct sk_buff *nskb;
nskb              351 drivers/scsi/fcoe/fcoe_transport.c 	nskb = skb_clone(skb, GFP_ATOMIC);
nskb              352 drivers/scsi/fcoe/fcoe_transport.c 	if (!nskb)
nskb              354 drivers/scsi/fcoe/fcoe_transport.c 	rc = dev_queue_xmit(nskb);
nskb               65 include/linux/can/skb.h 		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
nskb               67 include/linux/can/skb.h 		if (likely(nskb)) {
nskb               68 include/linux/can/skb.h 			can_skb_set_owner(nskb, skb->sk);
nskb               70 include/linux/can/skb.h 			return nskb;
nskb              152 include/linux/netlink.h 	struct sk_buff *nskb;
nskb              154 include/linux/netlink.h 	nskb = skb_clone(skb, gfp_mask);
nskb              155 include/linux/netlink.h 	if (!nskb)
nskb              160 include/linux/netlink.h 		nskb->destructor = skb->destructor;
nskb              162 include/linux/netlink.h 	return nskb;
nskb             1685 include/linux/skbuff.h 		struct sk_buff *nskb = skb_clone(skb, pri);
nskb             1687 include/linux/skbuff.h 		if (likely(nskb))
nskb             1691 include/linux/skbuff.h 		skb = nskb;
nskb             1721 include/linux/skbuff.h 		struct sk_buff *nskb = skb_copy(skb, pri);
nskb             1724 include/linux/skbuff.h 		if (likely(nskb))
nskb             1728 include/linux/skbuff.h 		skb = nskb;
nskb              276 include/net/dst.h static inline void __skb_dst_copy(struct sk_buff *nskb, unsigned long refdst)
nskb              278 include/net/dst.h 	nskb->_skb_refdst = refdst;
nskb              279 include/net/dst.h 	if (!(nskb->_skb_refdst & SKB_DST_NOREF))
nskb              280 include/net/dst.h 		dst_clone(skb_dst(nskb));
nskb              283 include/net/dst.h static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb)
nskb              285 include/net/dst.h 	__skb_dst_copy(nskb, oskb->_skb_refdst);
nskb               15 include/net/netfilter/ipv4/nf_reject.h struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb,
nskb               18 include/net/netfilter/ipv4/nf_reject.h void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb,
nskb               16 include/net/netfilter/ipv6/nf_reject.h struct ipv6hdr *nf_reject_ip6hdr_put(struct sk_buff *nskb,
nskb               19 include/net/netfilter/ipv6/nf_reject.h void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb,
nskb             1378 net/appletalk/ddp.c 		struct sk_buff *nskb = skb_realloc_headroom(skb, 32);
nskb             1380 net/appletalk/ddp.c 		skb = nskb;
nskb              301 net/bluetooth/bnep/core.c 	struct sk_buff *nskb;
nskb              366 net/bluetooth/bnep/core.c 	nskb = alloc_skb(2 + ETH_HLEN + skb->len, GFP_KERNEL);
nskb              367 net/bluetooth/bnep/core.c 	if (!nskb) {
nskb              372 net/bluetooth/bnep/core.c 	skb_reserve(nskb, 2);
nskb              377 net/bluetooth/bnep/core.c 		__skb_put_data(nskb, &s->eh, ETH_HLEN);
nskb              381 net/bluetooth/bnep/core.c 		__skb_put_data(nskb, s->eh.h_dest, ETH_ALEN);
nskb              382 net/bluetooth/bnep/core.c 		__skb_put_data(nskb, skb_mac_header(skb), ETH_ALEN);
nskb              383 net/bluetooth/bnep/core.c 		put_unaligned(s->eh.h_proto, (__be16 *) __skb_put(nskb, 2));
nskb              387 net/bluetooth/bnep/core.c 		__skb_put_data(nskb, skb_mac_header(skb), ETH_ALEN);
nskb              388 net/bluetooth/bnep/core.c 		__skb_put_data(nskb, s->eh.h_source, ETH_ALEN + 2);
nskb              392 net/bluetooth/bnep/core.c 		__skb_put_data(nskb, skb_mac_header(skb), ETH_ALEN * 2);
nskb              393 net/bluetooth/bnep/core.c 		put_unaligned(s->eh.h_proto, (__be16 *) __skb_put(nskb, 2));
nskb              397 net/bluetooth/bnep/core.c 	skb_copy_from_linear_data(skb, __skb_put(nskb, skb->len), skb->len);
nskb              401 net/bluetooth/bnep/core.c 	nskb->ip_summed = CHECKSUM_NONE;
nskb              402 net/bluetooth/bnep/core.c 	nskb->protocol  = eth_type_trans(nskb, dev);
nskb              403 net/bluetooth/bnep/core.c 	netif_rx_ni(nskb);
nskb              109 net/bluetooth/cmtp/core.c 	struct sk_buff *skb = session->reassembly[id], *nskb;
nskb              116 net/bluetooth/cmtp/core.c 	nskb = alloc_skb(size, GFP_ATOMIC);
nskb              117 net/bluetooth/cmtp/core.c 	if (!nskb) {
nskb              123 net/bluetooth/cmtp/core.c 		skb_copy_from_linear_data(skb, skb_put(nskb, skb->len), skb->len);
nskb              125 net/bluetooth/cmtp/core.c 	skb_put_data(nskb, buf, count);
nskb              127 net/bluetooth/cmtp/core.c 	session->reassembly[id] = nskb;
nskb              211 net/bluetooth/cmtp/core.c 	struct sk_buff *skb, *nskb;
nskb              217 net/bluetooth/cmtp/core.c 	nskb = alloc_skb(session->mtu, GFP_ATOMIC);
nskb              218 net/bluetooth/cmtp/core.c 	if (!nskb) {
nskb              226 net/bluetooth/cmtp/core.c 		tail = session->mtu - nskb->len;
nskb              228 net/bluetooth/cmtp/core.c 			cmtp_send_frame(session, nskb->data, nskb->len);
nskb              229 net/bluetooth/cmtp/core.c 			skb_trim(nskb, 0);
nskb              244 net/bluetooth/cmtp/core.c 			hdr = skb_put(nskb, 2);
nskb              250 net/bluetooth/cmtp/core.c 			hdr = skb_put(nskb, 3);
nskb              258 net/bluetooth/cmtp/core.c 		skb_copy_from_linear_data(skb, skb_put(nskb, size), size);
nskb              266 net/bluetooth/cmtp/core.c 				cmtp_send_frame(session, nskb->data, nskb->len);
nskb              267 net/bluetooth/cmtp/core.c 				skb_trim(nskb, 0);
nskb              273 net/bluetooth/cmtp/core.c 	cmtp_send_frame(session, nskb->data, nskb->len);
nskb              275 net/bluetooth/cmtp/core.c 	kfree_skb(nskb);
nskb              201 net/bluetooth/hci_sock.c 		struct sk_buff *nskb;
nskb              240 net/bluetooth/hci_sock.c 		nskb = skb_clone(skb_copy, GFP_ATOMIC);
nskb              241 net/bluetooth/hci_sock.c 		if (!nskb)
nskb              244 net/bluetooth/hci_sock.c 		if (sock_queue_rcv_skb(sk, nskb))
nskb              245 net/bluetooth/hci_sock.c 			kfree_skb(nskb);
nskb              262 net/bluetooth/hci_sock.c 		struct sk_buff *nskb;
nskb              278 net/bluetooth/hci_sock.c 		nskb = skb_clone(skb, GFP_ATOMIC);
nskb              279 net/bluetooth/hci_sock.c 		if (!nskb)
nskb              282 net/bluetooth/hci_sock.c 		if (sock_queue_rcv_skb(sk, nskb))
nskb              283 net/bluetooth/hci_sock.c 			kfree_skb(nskb);
nskb             2896 net/bluetooth/l2cap_core.c 	struct sk_buff *nskb;
nskb             2911 net/bluetooth/l2cap_core.c 		nskb = skb_clone(skb, GFP_KERNEL);
nskb             2912 net/bluetooth/l2cap_core.c 		if (!nskb)
nskb             2914 net/bluetooth/l2cap_core.c 		if (chan->ops->recv(chan, nskb))
nskb             2915 net/bluetooth/l2cap_core.c 			kfree_skb(nskb);
nskb               24 net/bridge/netfilter/nft_reject_bridge.c 					struct sk_buff *nskb)
nskb               28 net/bridge/netfilter/nft_reject_bridge.c 	eth = skb_push(nskb, ETH_HLEN);
nskb               29 net/bridge/netfilter/nft_reject_bridge.c 	skb_reset_mac_header(nskb);
nskb               33 net/bridge/netfilter/nft_reject_bridge.c 	skb_pull(nskb, ETH_HLEN);
nskb               38 net/bridge/netfilter/nft_reject_bridge.c 		__vlan_hwaccel_put_tag(nskb, oldskb->vlan_proto, vid);
nskb               74 net/bridge/netfilter/nft_reject_bridge.c 	struct sk_buff *nskb;
nskb               86 net/bridge/netfilter/nft_reject_bridge.c 	nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
nskb               88 net/bridge/netfilter/nft_reject_bridge.c 	if (!nskb)
nskb               91 net/bridge/netfilter/nft_reject_bridge.c 	skb_reserve(nskb, LL_MAX_HEADER);
nskb               92 net/bridge/netfilter/nft_reject_bridge.c 	niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
nskb               94 net/bridge/netfilter/nft_reject_bridge.c 	nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
nskb               95 net/bridge/netfilter/nft_reject_bridge.c 	niph->tot_len = htons(nskb->len);
nskb               98 net/bridge/netfilter/nft_reject_bridge.c 	nft_reject_br_push_etherhdr(oldskb, nskb);
nskb              100 net/bridge/netfilter/nft_reject_bridge.c 	br_forward(br_port_get_rcu(dev), nskb, false, true);
nskb              108 net/bridge/netfilter/nft_reject_bridge.c 	struct sk_buff *nskb;
nskb              138 net/bridge/netfilter/nft_reject_bridge.c 	nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) +
nskb              140 net/bridge/netfilter/nft_reject_bridge.c 	if (!nskb)
nskb              143 net/bridge/netfilter/nft_reject_bridge.c 	skb_reserve(nskb, LL_MAX_HEADER);
nskb              144 net/bridge/netfilter/nft_reject_bridge.c 	niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_ICMP,
nskb              147 net/bridge/netfilter/nft_reject_bridge.c 	skb_reset_transport_header(nskb);
nskb              148 net/bridge/netfilter/nft_reject_bridge.c 	icmph = skb_put_zero(nskb, sizeof(struct icmphdr));
nskb              152 net/bridge/netfilter/nft_reject_bridge.c 	skb_put_data(nskb, skb_network_header(oldskb), len);
nskb              157 net/bridge/netfilter/nft_reject_bridge.c 	niph->tot_len	= htons(nskb->len);
nskb              160 net/bridge/netfilter/nft_reject_bridge.c 	nft_reject_br_push_etherhdr(oldskb, nskb);
nskb              162 net/bridge/netfilter/nft_reject_bridge.c 	br_forward(br_port_get_rcu(dev), nskb, false, true);
nskb              189 net/bridge/netfilter/nft_reject_bridge.c 	struct sk_buff *nskb;
nskb              202 net/bridge/netfilter/nft_reject_bridge.c 	nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct tcphdr) +
nskb              204 net/bridge/netfilter/nft_reject_bridge.c 	if (!nskb)
nskb              207 net/bridge/netfilter/nft_reject_bridge.c 	skb_reserve(nskb, LL_MAX_HEADER);
nskb              208 net/bridge/netfilter/nft_reject_bridge.c 	nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
nskb              210 net/bridge/netfilter/nft_reject_bridge.c 	nf_reject_ip6_tcphdr_put(nskb, oldskb, oth, otcplen);
nskb              211 net/bridge/netfilter/nft_reject_bridge.c 	nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
nskb              213 net/bridge/netfilter/nft_reject_bridge.c 	nft_reject_br_push_etherhdr(oldskb, nskb);
nskb              215 net/bridge/netfilter/nft_reject_bridge.c 	br_forward(br_port_get_rcu(dev), nskb, false, true);
nskb              248 net/bridge/netfilter/nft_reject_bridge.c 	struct sk_buff *nskb;
nskb              267 net/bridge/netfilter/nft_reject_bridge.c 	nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr) +
nskb              269 net/bridge/netfilter/nft_reject_bridge.c 	if (!nskb)
nskb              272 net/bridge/netfilter/nft_reject_bridge.c 	skb_reserve(nskb, LL_MAX_HEADER);
nskb              273 net/bridge/netfilter/nft_reject_bridge.c 	nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_ICMPV6,
nskb              276 net/bridge/netfilter/nft_reject_bridge.c 	skb_reset_transport_header(nskb);
nskb              277 net/bridge/netfilter/nft_reject_bridge.c 	icmp6h = skb_put_zero(nskb, sizeof(struct icmp6hdr));
nskb              281 net/bridge/netfilter/nft_reject_bridge.c 	skb_put_data(nskb, skb_network_header(oldskb), len);
nskb              282 net/bridge/netfilter/nft_reject_bridge.c 	nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
nskb              286 net/bridge/netfilter/nft_reject_bridge.c 				nskb->len - sizeof(struct ipv6hdr),
nskb              289 net/bridge/netfilter/nft_reject_bridge.c 					     nskb->len - sizeof(struct ipv6hdr),
nskb              292 net/bridge/netfilter/nft_reject_bridge.c 	nft_reject_br_push_etherhdr(oldskb, nskb);
nskb              294 net/bridge/netfilter/nft_reject_bridge.c 	br_forward(br_port_get_rcu(dev), nskb, false, true);
nskb              400 net/can/gw.c   	struct sk_buff *nskb;
nskb              449 net/can/gw.c   		nskb = skb_copy(skb, GFP_ATOMIC);
nskb              451 net/can/gw.c   		nskb = skb_clone(skb, GFP_ATOMIC);
nskb              453 net/can/gw.c   	if (!nskb) {
nskb              459 net/can/gw.c   	cgw_hops(nskb) = cgw_hops(skb) + 1;
nskb              462 net/can/gw.c   	if (gwj->limit_hops && cgw_hops(nskb) == 1)
nskb              463 net/can/gw.c   		cgw_hops(nskb) = max_hops - gwj->limit_hops + 1;
nskb              465 net/can/gw.c   	nskb->dev = gwj->dst.dev;
nskb              468 net/can/gw.c   	cf = (struct canfd_frame *)nskb->data;
nskb              477 net/can/gw.c   		int max_len = nskb->len - offsetof(struct canfd_frame, data);
nskb              483 net/can/gw.c   			kfree_skb(nskb);
nskb              497 net/can/gw.c   		nskb->tstamp = 0;
nskb              500 net/can/gw.c   	if (can_send(nskb, gwj->flags & CGW_FLAGS_CAN_ECHO))
nskb              139 net/core/datagram.c 	struct sk_buff *nskb;
nskb              148 net/core/datagram.c 	nskb = skb_clone(skb, GFP_ATOMIC);
nskb              149 net/core/datagram.c 	if (!nskb)
nskb              152 net/core/datagram.c 	skb->prev->next = nskb;
nskb              153 net/core/datagram.c 	skb->next->prev = nskb;
nskb              154 net/core/datagram.c 	nskb->prev = skb->prev;
nskb              155 net/core/datagram.c 	nskb->next = skb->next;
nskb              158 net/core/datagram.c 	skb = nskb;
nskb              490 net/core/drop_monitor.c 	struct sk_buff *nskb;
nskb              496 net/core/drop_monitor.c 	nskb = skb_clone(skb, GFP_ATOMIC);
nskb              497 net/core/drop_monitor.c 	if (!nskb)
nskb              500 net/core/drop_monitor.c 	NET_DM_SKB_CB(nskb)->pc = location;
nskb              504 net/core/drop_monitor.c 	nskb->tstamp = tstamp;
nskb              510 net/core/drop_monitor.c 		__skb_queue_tail(&data->drop_queue, nskb);
nskb              524 net/core/drop_monitor.c 	consume_skb(nskb);
nskb              906 net/core/drop_monitor.c 	struct sk_buff *nskb;
nskb              912 net/core/drop_monitor.c 	nskb = skb_clone(skb, GFP_ATOMIC);
nskb              913 net/core/drop_monitor.c 	if (!nskb)
nskb              920 net/core/drop_monitor.c 	NET_DM_SKB_CB(nskb)->hw_metadata = n_hw_metadata;
nskb              921 net/core/drop_monitor.c 	nskb->tstamp = tstamp;
nskb              927 net/core/drop_monitor.c 		__skb_queue_tail(&hw_data->drop_queue, nskb);
nskb              943 net/core/drop_monitor.c 	consume_skb(nskb);
nskb             3346 net/core/rtnetlink.c 	struct sk_buff *nskb;
nskb             3386 net/core/rtnetlink.c 	nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL);
nskb             3387 net/core/rtnetlink.c 	if (nskb == NULL)
nskb             3390 net/core/rtnetlink.c 	err = rtnl_fill_ifinfo(nskb, dev, net,
nskb             3397 net/core/rtnetlink.c 		kfree_skb(nskb);
nskb             3399 net/core/rtnetlink.c 		err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
nskb             5049 net/core/rtnetlink.c 	struct sk_buff *nskb;
nskb             5071 net/core/rtnetlink.c 	nskb = nlmsg_new(if_nlmsg_stats_size(dev, filter_mask), GFP_KERNEL);
nskb             5072 net/core/rtnetlink.c 	if (!nskb)
nskb             5075 net/core/rtnetlink.c 	err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS,
nskb             5081 net/core/rtnetlink.c 		kfree_skb(nskb);
nskb             5083 net/core/rtnetlink.c 		err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
nskb             1307 net/core/skbuff.c static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig,
nskb             1311 net/core/skbuff.c 		if (skb_zcopy(nskb)) {
nskb             1317 net/core/skbuff.c 			if (skb_uarg(nskb) == skb_uarg(orig))
nskb             1319 net/core/skbuff.c 			if (skb_copy_ubufs(nskb, GFP_ATOMIC))
nskb             1322 net/core/skbuff.c 		skb_zcopy_set(nskb, skb_uarg(orig), NULL);
nskb             3747 net/core/skbuff.c 		struct sk_buff *nskb;
nskb             3788 net/core/skbuff.c 			nskb = skb_clone(list_skb, GFP_ATOMIC);
nskb             3791 net/core/skbuff.c 			if (unlikely(!nskb))
nskb             3794 net/core/skbuff.c 			if (unlikely(pskb_trim(nskb, len))) {
nskb             3795 net/core/skbuff.c 				kfree_skb(nskb);
nskb             3799 net/core/skbuff.c 			hsize = skb_end_offset(nskb);
nskb             3800 net/core/skbuff.c 			if (skb_cow_head(nskb, doffset + headroom)) {
nskb             3801 net/core/skbuff.c 				kfree_skb(nskb);
nskb             3805 net/core/skbuff.c 			nskb->truesize += skb_end_offset(nskb) - hsize;
nskb             3806 net/core/skbuff.c 			skb_release_head_state(nskb);
nskb             3807 net/core/skbuff.c 			__skb_push(nskb, doffset);
nskb             3809 net/core/skbuff.c 			nskb = __alloc_skb(hsize + doffset + headroom,
nskb             3813 net/core/skbuff.c 			if (unlikely(!nskb))
nskb             3816 net/core/skbuff.c 			skb_reserve(nskb, headroom);
nskb             3817 net/core/skbuff.c 			__skb_put(nskb, doffset);
nskb             3821 net/core/skbuff.c 			tail->next = nskb;
nskb             3823 net/core/skbuff.c 			segs = nskb;
nskb             3824 net/core/skbuff.c 		tail = nskb;
nskb             3826 net/core/skbuff.c 		__copy_skb_header(nskb, head_skb);
nskb             3828 net/core/skbuff.c 		skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
nskb             3829 net/core/skbuff.c 		skb_reset_mac_len(nskb);
nskb             3832 net/core/skbuff.c 						 nskb->data - tnl_hlen,
nskb             3835 net/core/skbuff.c 		if (nskb->len == len + doffset)
nskb             3839 net/core/skbuff.c 			if (!nskb->remcsum_offload)
nskb             3840 net/core/skbuff.c 				nskb->ip_summed = CHECKSUM_NONE;
nskb             3841 net/core/skbuff.c 			SKB_GSO_CB(nskb)->csum =
nskb             3843 net/core/skbuff.c 						       skb_put(nskb, len),
nskb             3845 net/core/skbuff.c 			SKB_GSO_CB(nskb)->csum_start =
nskb             3846 net/core/skbuff.c 				skb_headroom(nskb) + doffset;
nskb             3850 net/core/skbuff.c 		nskb_frag = skb_shinfo(nskb)->frags;
nskb             3853 net/core/skbuff.c 						 skb_put(nskb, hsize), hsize);
nskb             3855 net/core/skbuff.c 		skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags &
nskb             3859 net/core/skbuff.c 		    skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
nskb             3878 net/core/skbuff.c 				    skb_zerocopy_clone(nskb, frag_skb,
nskb             3885 net/core/skbuff.c 			if (unlikely(skb_shinfo(nskb)->nr_frags >=
nskb             3903 net/core/skbuff.c 			skb_shinfo(nskb)->nr_frags++;
nskb             3918 net/core/skbuff.c 		nskb->data_len = len - hsize;
nskb             3919 net/core/skbuff.c 		nskb->len += nskb->data_len;
nskb             3920 net/core/skbuff.c 		nskb->truesize += nskb->data_len;
nskb             3924 net/core/skbuff.c 			if (skb_has_shared_frag(nskb) &&
nskb             3925 net/core/skbuff.c 			    __skb_linearize(nskb))
nskb             3928 net/core/skbuff.c 			if (!nskb->remcsum_offload)
nskb             3929 net/core/skbuff.c 				nskb->ip_summed = CHECKSUM_NONE;
nskb             3930 net/core/skbuff.c 			SKB_GSO_CB(nskb)->csum =
nskb             3931 net/core/skbuff.c 				skb_checksum(nskb, doffset,
nskb             3932 net/core/skbuff.c 					     nskb->len - doffset, 0);
nskb             3933 net/core/skbuff.c 			SKB_GSO_CB(nskb)->csum_start =
nskb             3934 net/core/skbuff.c 				skb_headroom(nskb) + doffset;
nskb              229 net/core/sock_reuseport.c 	struct sk_buff *nskb = NULL;
nskb              233 net/core/sock_reuseport.c 		nskb = skb_clone(skb, GFP_ATOMIC);
nskb              234 net/core/sock_reuseport.c 		if (!nskb)
nskb              236 net/core/sock_reuseport.c 		skb = nskb;
nskb              241 net/core/sock_reuseport.c 		kfree_skb(nskb);
nskb              247 net/core/sock_reuseport.c 	consume_skb(nskb);
nskb              207 net/dsa/dsa.c  	struct sk_buff *nskb = NULL;
nskb              220 net/dsa/dsa.c  	nskb = cpu_dp->rcv(skb, dev, pt);
nskb              221 net/dsa/dsa.c  	if (!nskb) {
nskb              226 net/dsa/dsa.c  	skb = nskb;
nskb              509 net/dsa/slave.c 	struct sk_buff *nskb;
nskb              528 net/dsa/slave.c 	nskb = p->xmit(skb, dev);
nskb              529 net/dsa/slave.c 	if (!nskb) {
nskb              535 net/dsa/slave.c 	return dsa_enqueue_skb(nskb, dev);
nskb              161 net/dsa/tag_brcm.c 	struct sk_buff *nskb;
nskb              164 net/dsa/tag_brcm.c 	nskb = brcm_tag_rcv_ll(skb, dev, pt, 2);
nskb              165 net/dsa/tag_brcm.c 	if (!nskb)
nskb              166 net/dsa/tag_brcm.c 		return nskb;
nskb              169 net/dsa/tag_brcm.c 	memmove(nskb->data - ETH_HLEN,
nskb              170 net/dsa/tag_brcm.c 		nskb->data - ETH_HLEN - BRCM_TAG_LEN,
nskb              173 net/dsa/tag_brcm.c 	return nskb;
nskb               20 net/dsa/tag_ksz.c 	struct sk_buff *nskb;
nskb               30 net/dsa/tag_ksz.c 		nskb = skb;
nskb               32 net/dsa/tag_ksz.c 		nskb = alloc_skb(NET_IP_ALIGN + skb->len +
nskb               34 net/dsa/tag_ksz.c 		if (!nskb)
nskb               36 net/dsa/tag_ksz.c 		skb_reserve(nskb, NET_IP_ALIGN);
nskb               38 net/dsa/tag_ksz.c 		skb_reset_mac_header(nskb);
nskb               39 net/dsa/tag_ksz.c 		skb_set_network_header(nskb,
nskb               41 net/dsa/tag_ksz.c 		skb_set_transport_header(nskb,
nskb               43 net/dsa/tag_ksz.c 		skb_copy_and_csum_dev(skb, skb_put(nskb, skb->len));
nskb               48 net/dsa/tag_ksz.c 		if (skb_put_padto(nskb, nskb->len + padlen))
nskb               54 net/dsa/tag_ksz.c 	return nskb;
nskb               95 net/dsa/tag_ksz.c 	struct sk_buff *nskb;
nskb               99 net/dsa/tag_ksz.c 	nskb = ksz_common_xmit(skb, dev, KSZ8795_INGRESS_TAG_LEN);
nskb              100 net/dsa/tag_ksz.c 	if (!nskb)
nskb              104 net/dsa/tag_ksz.c 	tag = skb_put(nskb, KSZ8795_INGRESS_TAG_LEN);
nskb              105 net/dsa/tag_ksz.c 	addr = skb_mac_header(nskb);
nskb              111 net/dsa/tag_ksz.c 	return nskb;
nskb              160 net/dsa/tag_ksz.c 	struct sk_buff *nskb;
nskb              164 net/dsa/tag_ksz.c 	nskb = ksz_common_xmit(skb, dev, KSZ9477_INGRESS_TAG_LEN);
nskb              165 net/dsa/tag_ksz.c 	if (!nskb)
nskb              169 net/dsa/tag_ksz.c 	tag = skb_put(nskb, KSZ9477_INGRESS_TAG_LEN);
nskb              170 net/dsa/tag_ksz.c 	addr = skb_mac_header(nskb);
nskb              179 net/dsa/tag_ksz.c 	return nskb;
nskb              215 net/dsa/tag_ksz.c 	struct sk_buff *nskb;
nskb              219 net/dsa/tag_ksz.c 	nskb = ksz_common_xmit(skb, dev, KSZ_INGRESS_TAG_LEN);
nskb              220 net/dsa/tag_ksz.c 	if (!nskb)
nskb              224 net/dsa/tag_ksz.c 	tag = skb_put(nskb, KSZ_INGRESS_TAG_LEN);
nskb              225 net/dsa/tag_ksz.c 	addr = skb_mac_header(nskb);
nskb              232 net/dsa/tag_ksz.c 	return nskb;
nskb               16 net/dsa/tag_trailer.c 	struct sk_buff *nskb;
nskb               30 net/dsa/tag_trailer.c 	nskb = alloc_skb(NET_IP_ALIGN + skb->len + padlen + 4, GFP_ATOMIC);
nskb               31 net/dsa/tag_trailer.c 	if (!nskb)
nskb               33 net/dsa/tag_trailer.c 	skb_reserve(nskb, NET_IP_ALIGN);
nskb               35 net/dsa/tag_trailer.c 	skb_reset_mac_header(nskb);
nskb               36 net/dsa/tag_trailer.c 	skb_set_network_header(nskb, skb_network_header(skb) - skb->head);
nskb               37 net/dsa/tag_trailer.c 	skb_set_transport_header(nskb, skb_transport_header(skb) - skb->head);
nskb               38 net/dsa/tag_trailer.c 	skb_copy_and_csum_dev(skb, skb_put(nskb, skb->len));
nskb               42 net/dsa/tag_trailer.c 		skb_put_zero(nskb, padlen);
nskb               45 net/dsa/tag_trailer.c 	trailer = skb_put(nskb, 4);
nskb               51 net/dsa/tag_trailer.c 	return nskb;
nskb              265 net/ieee802154/6lowpan/tx.c 		struct sk_buff *nskb;
nskb              267 net/ieee802154/6lowpan/tx.c 		nskb = skb_copy_expand(skb, ldev->needed_headroom,
nskb              269 net/ieee802154/6lowpan/tx.c 		if (likely(nskb)) {
nskb              271 net/ieee802154/6lowpan/tx.c 			skb = nskb;
nskb              276 net/ipv4/ip_output.c 		struct sk_buff *nskb = segs->next;
nskb              284 net/ipv4/ip_output.c 		segs = nskb;
nskb             1667 net/ipv4/ip_output.c 	struct sk_buff *nskb;
nskb             1715 net/ipv4/ip_output.c 	nskb = skb_peek(&sk->sk_write_queue);
nskb             1716 net/ipv4/ip_output.c 	if (nskb) {
nskb             1718 net/ipv4/ip_output.c 			*((__sum16 *)skb_transport_header(nskb) +
nskb             1719 net/ipv4/ip_output.c 			  arg->csumoffset) = csum_fold(csum_add(nskb->csum,
nskb             1721 net/ipv4/ip_output.c 		nskb->ip_summed = CHECKSUM_NONE;
nskb               44 net/ipv4/netfilter/nf_reject_ipv4.c struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb,
nskb               50 net/ipv4/netfilter/nf_reject_ipv4.c 	skb_reset_network_header(nskb);
nskb               51 net/ipv4/netfilter/nf_reject_ipv4.c 	niph = skb_put(nskb, sizeof(struct iphdr));
nskb               63 net/ipv4/netfilter/nf_reject_ipv4.c 	nskb->protocol = htons(ETH_P_IP);
nskb               69 net/ipv4/netfilter/nf_reject_ipv4.c void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb,
nskb               72 net/ipv4/netfilter/nf_reject_ipv4.c 	struct iphdr *niph = ip_hdr(nskb);
nskb               75 net/ipv4/netfilter/nf_reject_ipv4.c 	skb_reset_transport_header(nskb);
nskb               76 net/ipv4/netfilter/nf_reject_ipv4.c 	tcph = skb_put_zero(nskb, sizeof(struct tcphdr));
nskb               93 net/ipv4/netfilter/nf_reject_ipv4.c 	nskb->ip_summed = CHECKSUM_PARTIAL;
nskb               94 net/ipv4/netfilter/nf_reject_ipv4.c 	nskb->csum_start = (unsigned char *)tcph - nskb->head;
nskb               95 net/ipv4/netfilter/nf_reject_ipv4.c 	nskb->csum_offset = offsetof(struct tcphdr, check);
nskb              103 net/ipv4/netfilter/nf_reject_ipv4.c 	struct sk_buff *nskb;
nskb              115 net/ipv4/netfilter/nf_reject_ipv4.c 	nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
nskb              117 net/ipv4/netfilter/nf_reject_ipv4.c 	if (!nskb)
nskb              121 net/ipv4/netfilter/nf_reject_ipv4.c 	skb_dst_set_noref(nskb, skb_dst(oldskb));
nskb              123 net/ipv4/netfilter/nf_reject_ipv4.c 	nskb->mark = IP4_REPLY_MARK(net, oldskb->mark);
nskb              125 net/ipv4/netfilter/nf_reject_ipv4.c 	skb_reserve(nskb, LL_MAX_HEADER);
nskb              126 net/ipv4/netfilter/nf_reject_ipv4.c 	niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
nskb              127 net/ipv4/netfilter/nf_reject_ipv4.c 				   ip4_dst_hoplimit(skb_dst(nskb)));
nskb              128 net/ipv4/netfilter/nf_reject_ipv4.c 	nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
nskb              130 net/ipv4/netfilter/nf_reject_ipv4.c 	if (ip_route_me_harder(net, nskb, RTN_UNSPEC))
nskb              133 net/ipv4/netfilter/nf_reject_ipv4.c 	niph = ip_hdr(nskb);
nskb              136 net/ipv4/netfilter/nf_reject_ipv4.c 	if (nskb->len > dst_mtu(skb_dst(nskb)))
nskb              139 net/ipv4/netfilter/nf_reject_ipv4.c 	nf_ct_attach(nskb, oldskb);
nskb              152 net/ipv4/netfilter/nf_reject_ipv4.c 		nskb->dev = br_indev;
nskb              153 net/ipv4/netfilter/nf_reject_ipv4.c 		niph->tot_len = htons(nskb->len);
nskb              155 net/ipv4/netfilter/nf_reject_ipv4.c 		if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol),
nskb              156 net/ipv4/netfilter/nf_reject_ipv4.c 				    oeth->h_source, oeth->h_dest, nskb->len) < 0)
nskb              158 net/ipv4/netfilter/nf_reject_ipv4.c 		dev_queue_xmit(nskb);
nskb              161 net/ipv4/netfilter/nf_reject_ipv4.c 		ip_local_out(net, nskb->sk, nskb);
nskb              166 net/ipv4/netfilter/nf_reject_ipv4.c 	kfree_skb(nskb);
nskb             4964 net/ipv4/tcp_input.c 		struct sk_buff *nskb;
nskb             4966 net/ipv4/tcp_input.c 		nskb = alloc_skb(copy, GFP_ATOMIC);
nskb             4967 net/ipv4/tcp_input.c 		if (!nskb)
nskb             4970 net/ipv4/tcp_input.c 		memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
nskb             4972 net/ipv4/tcp_input.c 		nskb->decrypted = skb->decrypted;
nskb             4974 net/ipv4/tcp_input.c 		TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
nskb             4976 net/ipv4/tcp_input.c 			__skb_queue_before(list, skb, nskb);
nskb             4978 net/ipv4/tcp_input.c 			__skb_queue_tail(&tmp, nskb); /* defer rbtree insertion */
nskb             4979 net/ipv4/tcp_input.c 		skb_set_owner_r(nskb, sk);
nskb             4989 net/ipv4/tcp_input.c 				if (skb_copy_bits(skb, offset, skb_put(nskb, size), size))
nskb             4991 net/ipv4/tcp_input.c 				TCP_SKB_CB(nskb)->end_seq += size;
nskb             5002 net/ipv4/tcp_input.c 				if (skb->decrypted != nskb->decrypted)
nskb             2097 net/ipv4/tcp_output.c 	struct sk_buff *skb, *nskb, *next;
nskb             2160 net/ipv4/tcp_output.c 	nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
nskb             2161 net/ipv4/tcp_output.c 	if (!nskb)
nskb             2163 net/ipv4/tcp_output.c 	sk_wmem_queued_add(sk, nskb->truesize);
nskb             2164 net/ipv4/tcp_output.c 	sk_mem_charge(sk, nskb->truesize);
nskb             2167 net/ipv4/tcp_output.c 	skb_copy_decrypted(nskb, skb);
nskb             2169 net/ipv4/tcp_output.c 	TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
nskb             2170 net/ipv4/tcp_output.c 	TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
nskb             2171 net/ipv4/tcp_output.c 	TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK;
nskb             2172 net/ipv4/tcp_output.c 	TCP_SKB_CB(nskb)->sacked = 0;
nskb             2173 net/ipv4/tcp_output.c 	nskb->csum = 0;
nskb             2174 net/ipv4/tcp_output.c 	nskb->ip_summed = CHECKSUM_PARTIAL;
nskb             2176 net/ipv4/tcp_output.c 	tcp_insert_write_queue_before(nskb, skb, sk);
nskb             2177 net/ipv4/tcp_output.c 	tcp_highest_sack_replace(sk, skb, nskb);
nskb             2182 net/ipv4/tcp_output.c 		skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
nskb             2187 net/ipv4/tcp_output.c 			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
nskb             2191 net/ipv4/tcp_output.c 			TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
nskb             2192 net/ipv4/tcp_output.c 			tcp_skb_collapse_tstamp(nskb, skb);
nskb             2196 net/ipv4/tcp_output.c 			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags &
nskb             2212 net/ipv4/tcp_output.c 	tcp_init_tso_segs(nskb, nskb->len);
nskb             2217 net/ipv4/tcp_output.c 	if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
nskb             2221 net/ipv4/tcp_output.c 		tcp_event_new_data_sent(sk, nskb);
nskb             2223 net/ipv4/tcp_output.c 		icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
nskb             2224 net/ipv4/tcp_output.c 		tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
nskb             2225 net/ipv4/tcp_output.c 		tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
nskb             2979 net/ipv4/tcp_output.c 		struct sk_buff *nskb;
nskb             2982 net/ipv4/tcp_output.c 			nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
nskb             2983 net/ipv4/tcp_output.c 			if (nskb) {
nskb             2984 net/ipv4/tcp_output.c 				nskb->dev = NULL;
nskb             2985 net/ipv4/tcp_output.c 				err = tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC);
nskb             3234 net/ipv4/tcp_output.c 			struct sk_buff *nskb;
nskb             3237 net/ipv4/tcp_output.c 				nskb = skb_copy(skb, GFP_ATOMIC);
nskb             3239 net/ipv4/tcp_output.c 			if (!nskb)
nskb             3241 net/ipv4/tcp_output.c 			INIT_LIST_HEAD(&nskb->tcp_tsorted_anchor);
nskb             3242 net/ipv4/tcp_output.c 			tcp_highest_sack_replace(sk, skb, nskb);
nskb             3244 net/ipv4/tcp_output.c 			__skb_header_release(nskb);
nskb             3245 net/ipv4/tcp_output.c 			tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb);
nskb             3246 net/ipv4/tcp_output.c 			sk_wmem_queued_add(sk, nskb->truesize);
nskb             3247 net/ipv4/tcp_output.c 			sk_mem_charge(sk, nskb->truesize);
nskb             3248 net/ipv4/tcp_output.c 			skb = nskb;
nskb             2157 net/ipv4/udp.c 	struct sk_buff *nskb;
nskb             2177 net/ipv4/udp.c 		nskb = skb_clone(skb, GFP_ATOMIC);
nskb             2179 net/ipv4/udp.c 		if (unlikely(!nskb)) {
nskb             2187 net/ipv4/udp.c 		if (udp_queue_rcv_skb(sk, nskb) > 0)
nskb             2188 net/ipv4/udp.c 			consume_skb(nskb);
nskb               63 net/ipv6/netfilter/nf_reject_ipv6.c struct ipv6hdr *nf_reject_ip6hdr_put(struct sk_buff *nskb,
nskb               72 net/ipv6/netfilter/nf_reject_ipv6.c 	skb_put(nskb, sizeof(struct ipv6hdr));
nskb               73 net/ipv6/netfilter/nf_reject_ipv6.c 	skb_reset_network_header(nskb);
nskb               74 net/ipv6/netfilter/nf_reject_ipv6.c 	ip6h = ipv6_hdr(nskb);
nskb               81 net/ipv6/netfilter/nf_reject_ipv6.c 	nskb->protocol = htons(ETH_P_IPV6);
nskb               87 net/ipv6/netfilter/nf_reject_ipv6.c void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb,
nskb               94 net/ipv6/netfilter/nf_reject_ipv6.c 	skb_reset_transport_header(nskb);
nskb               95 net/ipv6/netfilter/nf_reject_ipv6.c 	tcph = skb_put(nskb, sizeof(struct tcphdr));
nskb              121 net/ipv6/netfilter/nf_reject_ipv6.c 	tcph->check = csum_ipv6_magic(&ipv6_hdr(nskb)->saddr,
nskb              122 net/ipv6/netfilter/nf_reject_ipv6.c 				      &ipv6_hdr(nskb)->daddr,
nskb              132 net/ipv6/netfilter/nf_reject_ipv6.c 	struct sk_buff *nskb;
nskb              170 net/ipv6/netfilter/nf_reject_ipv6.c 	nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr)
nskb              174 net/ipv6/netfilter/nf_reject_ipv6.c 	if (!nskb) {
nskb              180 net/ipv6/netfilter/nf_reject_ipv6.c 	skb_dst_set(nskb, dst);
nskb              182 net/ipv6/netfilter/nf_reject_ipv6.c 	nskb->mark = fl6.flowi6_mark;
nskb              184 net/ipv6/netfilter/nf_reject_ipv6.c 	skb_reserve(nskb, hh_len + dst->header_len);
nskb              185 net/ipv6/netfilter/nf_reject_ipv6.c 	ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
nskb              187 net/ipv6/netfilter/nf_reject_ipv6.c 	nf_reject_ip6_tcphdr_put(nskb, oldskb, otcph, otcplen);
nskb              189 net/ipv6/netfilter/nf_reject_ipv6.c 	nf_ct_attach(nskb, oldskb);
nskb              202 net/ipv6/netfilter/nf_reject_ipv6.c 		nskb->dev = br_indev;
nskb              203 net/ipv6/netfilter/nf_reject_ipv6.c 		nskb->protocol = htons(ETH_P_IPV6);
nskb              205 net/ipv6/netfilter/nf_reject_ipv6.c 		if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol),
nskb              206 net/ipv6/netfilter/nf_reject_ipv6.c 				    oeth->h_source, oeth->h_dest, nskb->len) < 0) {
nskb              207 net/ipv6/netfilter/nf_reject_ipv6.c 			kfree_skb(nskb);
nskb              210 net/ipv6/netfilter/nf_reject_ipv6.c 		dev_queue_xmit(nskb);
nskb              213 net/ipv6/netfilter/nf_reject_ipv6.c 		ip6_local_out(net, nskb->sk, nskb);
nskb              756 net/ipv6/udp.c 	struct sk_buff *nskb;
nskb              781 net/ipv6/udp.c 		nskb = skb_clone(skb, GFP_ATOMIC);
nskb              782 net/ipv6/udp.c 		if (unlikely(!nskb)) {
nskb              791 net/ipv6/udp.c 		if (udpv6_queue_rcv_skb(sk, nskb) > 0)
nskb              792 net/ipv6/udp.c 			consume_skb(nskb);
nskb              322 net/iucv/af_iucv.c 	struct sk_buff *nskb;
nskb              373 net/iucv/af_iucv.c 	nskb = skb_clone(skb, GFP_ATOMIC);
nskb              374 net/iucv/af_iucv.c 	if (!nskb) {
nskb              379 net/iucv/af_iucv.c 	skb_queue_tail(&iucv->send_skb_q, nskb);
nskb              382 net/iucv/af_iucv.c 		skb_unlink(nskb, &iucv->send_skb_q);
nskb              383 net/iucv/af_iucv.c 		kfree_skb(nskb);
nskb             2282 net/iucv/af_iucv.c 	struct sk_buff *nskb;
nskb             2298 net/iucv/af_iucv.c 	skb_queue_walk_safe(list, list_skb, nskb) {
nskb              202 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0);
nskb              204 net/llc/llc_c_ac.c 	if (nskb) {
nskb              207 net/llc/llc_c_ac.c 		llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap,
nskb              209 net/llc/llc_c_ac.c 		llc_pdu_init_as_disc_cmd(nskb, 1);
nskb              210 net/llc/llc_c_ac.c 		rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
nskb              213 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
nskb              219 net/llc/llc_c_ac.c 	kfree_skb(nskb);
nskb              227 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0);
nskb              229 net/llc/llc_c_ac.c 	if (nskb) {
nskb              234 net/llc/llc_c_ac.c 		llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap,
nskb              236 net/llc/llc_c_ac.c 		llc_pdu_init_as_dm_rsp(nskb, f_bit);
nskb              237 net/llc/llc_c_ac.c 		rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
nskb              240 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
nskb              245 net/llc/llc_c_ac.c 	kfree_skb(nskb);
nskb              253 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0);
nskb              255 net/llc/llc_c_ac.c 	if (nskb) {
nskb              258 net/llc/llc_c_ac.c 		llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap,
nskb              260 net/llc/llc_c_ac.c 		llc_pdu_init_as_dm_rsp(nskb, 1);
nskb              261 net/llc/llc_c_ac.c 		rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
nskb              264 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
nskb              269 net/llc/llc_c_ac.c 	kfree_skb(nskb);
nskb              277 net/llc/llc_c_ac.c 	struct sk_buff *nskb;
nskb              286 net/llc/llc_c_ac.c 	nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U,
nskb              288 net/llc/llc_c_ac.c 	if (nskb) {
nskb              291 net/llc/llc_c_ac.c 		llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap,
nskb              293 net/llc/llc_c_ac.c 		llc_pdu_init_as_frmr_rsp(nskb, pdu, f_bit, llc->vS,
nskb              295 net/llc/llc_c_ac.c 		rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
nskb              298 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
nskb              303 net/llc/llc_c_ac.c 	kfree_skb(nskb);
nskb              311 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U,
nskb              314 net/llc/llc_c_ac.c 	if (nskb) {
nskb              318 net/llc/llc_c_ac.c 		llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap,
nskb              320 net/llc/llc_c_ac.c 		llc_pdu_init_as_frmr_rsp(nskb, pdu, 0, llc->vS,
nskb              322 net/llc/llc_c_ac.c 		rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
nskb              325 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
nskb              330 net/llc/llc_c_ac.c 	kfree_skb(nskb);
nskb              338 net/llc/llc_c_ac.c 	struct sk_buff *nskb;
nskb              342 net/llc/llc_c_ac.c 	nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U,
nskb              344 net/llc/llc_c_ac.c 	if (nskb) {
nskb              348 net/llc/llc_c_ac.c 		llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap,
nskb              350 net/llc/llc_c_ac.c 		llc_pdu_init_as_frmr_rsp(nskb, pdu, f_bit, llc->vS,
nskb              352 net/llc/llc_c_ac.c 		rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
nskb              355 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
nskb              360 net/llc/llc_c_ac.c 	kfree_skb(nskb);
nskb              434 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0);
nskb              436 net/llc/llc_c_ac.c 	if (nskb) {
nskb              439 net/llc/llc_c_ac.c 		llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap,
nskb              441 net/llc/llc_c_ac.c 		llc_pdu_init_as_rr_rsp(nskb, 0, llc->vR);
nskb              442 net/llc/llc_c_ac.c 		rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
nskb              444 net/llc/llc_c_ac.c 			llc_conn_send_pdu(sk, nskb);
nskb              469 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0);
nskb              471 net/llc/llc_c_ac.c 	if (nskb) {
nskb              474 net/llc/llc_c_ac.c 		llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap,
nskb              476 net/llc/llc_c_ac.c 		llc_pdu_init_as_rej_cmd(nskb, 1, llc->vR);
nskb              477 net/llc/llc_c_ac.c 		rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
nskb              480 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
nskb              485 net/llc/llc_c_ac.c 	kfree_skb(nskb);
nskb              493 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0);
nskb              495 net/llc/llc_c_ac.c 	if (nskb) {
nskb              498 net/llc/llc_c_ac.c 		llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap,
nskb              500 net/llc/llc_c_ac.c 		llc_pdu_init_as_rej_rsp(nskb, 1, llc->vR);
nskb              501 net/llc/llc_c_ac.c 		rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
nskb              504 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
nskb              509 net/llc/llc_c_ac.c 	kfree_skb(nskb);
nskb              517 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0);
nskb              519 net/llc/llc_c_ac.c 	if (nskb) {
nskb              522 net/llc/llc_c_ac.c 		llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap,
nskb              524 net/llc/llc_c_ac.c 		llc_pdu_init_as_rej_rsp(nskb, 0, llc->vR);
nskb              525 net/llc/llc_c_ac.c 		rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
nskb              528 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
nskb              533 net/llc/llc_c_ac.c 	kfree_skb(nskb);
nskb              541 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0);
nskb              543 net/llc/llc_c_ac.c 	if (nskb) {
nskb              546 net/llc/llc_c_ac.c 		llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap,
nskb              548 net/llc/llc_c_ac.c 		llc_pdu_init_as_rnr_cmd(nskb, 1, llc->vR);
nskb              549 net/llc/llc_c_ac.c 		rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
nskb              552 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
nskb              557 net/llc/llc_c_ac.c 	kfree_skb(nskb);
nskb              565 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0);
nskb              567 net/llc/llc_c_ac.c 	if (nskb) {
nskb              570 net/llc/llc_c_ac.c 		llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap,
nskb              572 net/llc/llc_c_ac.c 		llc_pdu_init_as_rnr_rsp(nskb, 1, llc->vR);
nskb              573 net/llc/llc_c_ac.c 		rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
nskb              576 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
nskb              581 net/llc/llc_c_ac.c 	kfree_skb(nskb);
nskb              589 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0);
nskb              591 net/llc/llc_c_ac.c 	if (nskb) {
nskb              594 net/llc/llc_c_ac.c 		llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap,
nskb              596 net/llc/llc_c_ac.c 		llc_pdu_init_as_rnr_rsp(nskb, 0, llc->vR);
nskb              597 net/llc/llc_c_ac.c 		rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
nskb              600 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
nskb              605 net/llc/llc_c_ac.c 	kfree_skb(nskb);
nskb              625 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0);
nskb              627 net/llc/llc_c_ac.c 	if (nskb) {
nskb              630 net/llc/llc_c_ac.c 		llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap,
nskb              632 net/llc/llc_c_ac.c 		llc_pdu_init_as_rnr_rsp(nskb, 0, llc->vR);
nskb              633 net/llc/llc_c_ac.c 		rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
nskb              636 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
nskb              641 net/llc/llc_c_ac.c 	kfree_skb(nskb);
nskb              649 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0);
nskb              651 net/llc/llc_c_ac.c 	if (nskb) {
nskb              654 net/llc/llc_c_ac.c 		llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap,
nskb              656 net/llc/llc_c_ac.c 		llc_pdu_init_as_rr_cmd(nskb, 1, llc->vR);
nskb              657 net/llc/llc_c_ac.c 		rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
nskb              660 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
nskb              665 net/llc/llc_c_ac.c 	kfree_skb(nskb);
nskb              673 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0);
nskb              675 net/llc/llc_c_ac.c 	if (nskb) {
nskb              679 net/llc/llc_c_ac.c 		llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap,
nskb              681 net/llc/llc_c_ac.c 		llc_pdu_init_as_rr_rsp(nskb, f_bit, llc->vR);
nskb              682 net/llc/llc_c_ac.c 		rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
nskb              685 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
nskb              690 net/llc/llc_c_ac.c 	kfree_skb(nskb);
nskb              698 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0);
nskb              700 net/llc/llc_c_ac.c 	if (nskb) {
nskb              703 net/llc/llc_c_ac.c 		llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap,
nskb              705 net/llc/llc_c_ac.c 		llc_pdu_init_as_rr_rsp(nskb, 1, llc->vR);
nskb              706 net/llc/llc_c_ac.c 		rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
nskb              709 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
nskb              714 net/llc/llc_c_ac.c 	kfree_skb(nskb);
nskb              722 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0);
nskb              724 net/llc/llc_c_ac.c 	if (nskb) {
nskb              727 net/llc/llc_c_ac.c 		llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap,
nskb              729 net/llc/llc_c_ac.c 		llc_pdu_init_as_rr_rsp(nskb, 0, llc->vR);
nskb              730 net/llc/llc_c_ac.c 		rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
nskb              733 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
nskb              738 net/llc/llc_c_ac.c 	kfree_skb(nskb);
nskb              746 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0);
nskb              748 net/llc/llc_c_ac.c 	if (nskb) {
nskb              751 net/llc/llc_c_ac.c 		llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap,
nskb              753 net/llc/llc_c_ac.c 		llc_pdu_init_as_rr_rsp(nskb, 0, llc->vR);
nskb              754 net/llc/llc_c_ac.c 		rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
nskb              757 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
nskb              762 net/llc/llc_c_ac.c 	kfree_skb(nskb);
nskb              780 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0);
nskb              782 net/llc/llc_c_ac.c 	if (nskb) {
nskb              788 net/llc/llc_c_ac.c 		llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap,
nskb              790 net/llc/llc_c_ac.c 		llc_pdu_init_as_sabme_cmd(nskb, 1);
nskb              791 net/llc/llc_c_ac.c 		rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, dmac);
nskb              794 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
nskb              800 net/llc/llc_c_ac.c 	kfree_skb(nskb);
nskb              809 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0);
nskb              812 net/llc/llc_c_ac.c 	if (nskb) {
nskb              815 net/llc/llc_c_ac.c 		nskb->dev = llc->dev;
nskb              816 net/llc/llc_c_ac.c 		llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap,
nskb              818 net/llc/llc_c_ac.c 		llc_pdu_init_as_ua_rsp(nskb, f_bit);
nskb              819 net/llc/llc_c_ac.c 		rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
nskb              822 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
nskb              827 net/llc/llc_c_ac.c 	kfree_skb(nskb);
nskb              970 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0);
nskb              972 net/llc/llc_c_ac.c 	if (nskb) {
nskb              975 net/llc/llc_c_ac.c 		llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap,
nskb              977 net/llc/llc_c_ac.c 		llc_pdu_init_as_rr_rsp(nskb, llc->ack_pf, llc->vR);
nskb              978 net/llc/llc_c_ac.c 		rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
nskb              981 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
nskb              986 net/llc/llc_c_ac.c 	kfree_skb(nskb);
nskb              105 net/llc/llc_s_ac.c 	struct sk_buff *nskb;
nskb              110 net/llc/llc_s_ac.c 	nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U,
nskb              112 net/llc/llc_s_ac.c 	if (!nskb)
nskb              114 net/llc/llc_s_ac.c 	llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, dsap,
nskb              116 net/llc/llc_s_ac.c 	llc_pdu_init_as_xid_rsp(nskb, LLC_XID_NULL_CLASS_2, 0);
nskb              117 net/llc/llc_s_ac.c 	rc = llc_mac_hdr_init(nskb, mac_sa, mac_da);
nskb              119 net/llc/llc_s_ac.c 		rc = dev_queue_xmit(nskb);
nskb              152 net/llc/llc_s_ac.c 	struct sk_buff *nskb;
nskb              162 net/llc/llc_s_ac.c 	nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, data_size);
nskb              163 net/llc/llc_s_ac.c 	if (!nskb)
nskb              165 net/llc/llc_s_ac.c 	llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, dsap,
nskb              167 net/llc/llc_s_ac.c 	llc_pdu_init_as_test_rsp(nskb, skb);
nskb              168 net/llc/llc_s_ac.c 	rc = llc_mac_hdr_init(nskb, mac_sa, mac_da);
nskb              170 net/llc/llc_s_ac.c 		rc = dev_queue_xmit(nskb);
nskb               52 net/llc/llc_station.c 	struct sk_buff *nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U,
nskb               55 net/llc/llc_station.c 	if (!nskb)
nskb               60 net/llc/llc_station.c 	llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, 0, dsap, LLC_PDU_RSP);
nskb               61 net/llc/llc_station.c 	llc_pdu_init_as_xid_rsp(nskb, LLC_XID_NULL_CLASS_2, 127);
nskb               62 net/llc/llc_station.c 	rc = llc_mac_hdr_init(nskb, skb->dev->dev_addr, mac_da);
nskb               65 net/llc/llc_station.c 	dev_queue_xmit(nskb);
nskb               69 net/llc/llc_station.c 	kfree_skb(nskb);
nskb               78 net/llc/llc_station.c 	struct sk_buff *nskb;
nskb               82 net/llc/llc_station.c 	nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, data_size);
nskb               84 net/llc/llc_station.c 	if (!nskb)
nskb               89 net/llc/llc_station.c 	llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, 0, dsap, LLC_PDU_RSP);
nskb               90 net/llc/llc_station.c 	llc_pdu_init_as_test_rsp(nskb, skb);
nskb               91 net/llc/llc_station.c 	rc = llc_mac_hdr_init(nskb, skb->dev->dev_addr, mac_da);
nskb               94 net/llc/llc_station.c 	dev_queue_xmit(nskb);
nskb               98 net/llc/llc_station.c 	kfree_skb(nskb);
nskb             3389 net/mac80211/rx.c 	struct sk_buff *nskb;
nskb             3418 net/mac80211/rx.c 	nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
nskb             3420 net/mac80211/rx.c 	if (nskb) {
nskb             3421 net/mac80211/rx.c 		struct ieee80211_mgmt *nmgmt = (void *)nskb->data;
nskb             3427 net/mac80211/rx.c 		memset(nskb->cb, 0, sizeof(nskb->cb));
nskb             3430 net/mac80211/rx.c 			struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb);
nskb             3440 net/mac80211/rx.c 		__ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7,
nskb               58 net/mac802154/tx.c 		struct sk_buff *nskb;
nskb               62 net/mac802154/tx.c 			nskb = skb_copy_expand(skb, 0, IEEE802154_FCS_LEN,
nskb               64 net/mac802154/tx.c 			if (likely(nskb)) {
nskb               66 net/mac802154/tx.c 				skb = nskb;
nskb             1865 net/netfilter/nf_conntrack_core.c static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
nskb             1878 net/netfilter/nf_conntrack_core.c 	nf_ct_set(nskb, ct, ctinfo);
nskb             1879 net/netfilter/nf_conntrack_core.c 	nf_conntrack_get(skb_nfct(nskb));
nskb              437 net/netfilter/nf_synproxy_core.c 		  const struct sk_buff *skb, struct sk_buff *nskb,
nskb              443 net/netfilter/nf_synproxy_core.c 	nskb->ip_summed   = CHECKSUM_PARTIAL;
nskb              444 net/netfilter/nf_synproxy_core.c 	nskb->csum_start  = (unsigned char *)nth - nskb->head;
nskb              445 net/netfilter/nf_synproxy_core.c 	nskb->csum_offset = offsetof(struct tcphdr, check);
nskb              447 net/netfilter/nf_synproxy_core.c 	skb_dst_set_noref(nskb, skb_dst(skb));
nskb              448 net/netfilter/nf_synproxy_core.c 	nskb->protocol = htons(ETH_P_IP);
nskb              449 net/netfilter/nf_synproxy_core.c 	if (ip_route_me_harder(net, nskb, RTN_UNSPEC))
nskb              453 net/netfilter/nf_synproxy_core.c 		nf_ct_set(nskb, (struct nf_conn *)nfct, ctinfo);
nskb              457 net/netfilter/nf_synproxy_core.c 	ip_local_out(net, nskb->sk, nskb);
nskb              461 net/netfilter/nf_synproxy_core.c 	kfree_skb(nskb);
nskb              469 net/netfilter/nf_synproxy_core.c 	struct sk_buff *nskb;
nskb              478 net/netfilter/nf_synproxy_core.c 	nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
nskb              480 net/netfilter/nf_synproxy_core.c 	if (!nskb)
nskb              482 net/netfilter/nf_synproxy_core.c 	skb_reserve(nskb, MAX_TCP_HEADER);
nskb              484 net/netfilter/nf_synproxy_core.c 	niph = synproxy_build_ip(net, nskb, iph->daddr, iph->saddr);
nskb              486 net/netfilter/nf_synproxy_core.c 	skb_reset_transport_header(nskb);
nskb              487 net/netfilter/nf_synproxy_core.c 	nth = skb_put(nskb, tcp_hdr_size);
nskb              502 net/netfilter/nf_synproxy_core.c 	synproxy_send_tcp(net, skb, nskb, skb_nfct(skb),
nskb              513 net/netfilter/nf_synproxy_core.c 	struct sk_buff *nskb;
nskb              521 net/netfilter/nf_synproxy_core.c 	nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
nskb              523 net/netfilter/nf_synproxy_core.c 	if (!nskb)
nskb              525 net/netfilter/nf_synproxy_core.c 	skb_reserve(nskb, MAX_TCP_HEADER);
nskb              527 net/netfilter/nf_synproxy_core.c 	niph = synproxy_build_ip(net, nskb, iph->saddr, iph->daddr);
nskb              529 net/netfilter/nf_synproxy_core.c 	skb_reset_transport_header(nskb);
nskb              530 net/netfilter/nf_synproxy_core.c 	nth = skb_put(nskb, tcp_hdr_size);
nskb              548 net/netfilter/nf_synproxy_core.c 	synproxy_send_tcp(net, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
nskb              558 net/netfilter/nf_synproxy_core.c 	struct sk_buff *nskb;
nskb              566 net/netfilter/nf_synproxy_core.c 	nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
nskb              568 net/netfilter/nf_synproxy_core.c 	if (!nskb)
nskb              570 net/netfilter/nf_synproxy_core.c 	skb_reserve(nskb, MAX_TCP_HEADER);
nskb              572 net/netfilter/nf_synproxy_core.c 	niph = synproxy_build_ip(net, nskb, iph->daddr, iph->saddr);
nskb              574 net/netfilter/nf_synproxy_core.c 	skb_reset_transport_header(nskb);
nskb              575 net/netfilter/nf_synproxy_core.c 	nth = skb_put(nskb, tcp_hdr_size);
nskb              588 net/netfilter/nf_synproxy_core.c 	synproxy_send_tcp(net, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
nskb              596 net/netfilter/nf_synproxy_core.c 	struct sk_buff *nskb;
nskb              604 net/netfilter/nf_synproxy_core.c 	nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
nskb              606 net/netfilter/nf_synproxy_core.c 	if (!nskb)
nskb              608 net/netfilter/nf_synproxy_core.c 	skb_reserve(nskb, MAX_TCP_HEADER);
nskb              610 net/netfilter/nf_synproxy_core.c 	niph = synproxy_build_ip(net, nskb, iph->saddr, iph->daddr);
nskb              612 net/netfilter/nf_synproxy_core.c 	skb_reset_transport_header(nskb);
nskb              613 net/netfilter/nf_synproxy_core.c 	nth = skb_put(nskb, tcp_hdr_size);
nskb              626 net/netfilter/nf_synproxy_core.c 	synproxy_send_tcp(net, skb, nskb, skb_nfct(skb),
nskb              832 net/netfilter/nf_synproxy_core.c 		       const struct sk_buff *skb, struct sk_buff *nskb,
nskb              842 net/netfilter/nf_synproxy_core.c 	nskb->ip_summed   = CHECKSUM_PARTIAL;
nskb              843 net/netfilter/nf_synproxy_core.c 	nskb->csum_start  = (unsigned char *)nth - nskb->head;
nskb              844 net/netfilter/nf_synproxy_core.c 	nskb->csum_offset = offsetof(struct tcphdr, check);
nskb              863 net/netfilter/nf_synproxy_core.c 	skb_dst_set(nskb, dst);
nskb              866 net/netfilter/nf_synproxy_core.c 		nf_ct_set(nskb, (struct nf_conn *)nfct, ctinfo);
nskb              870 net/netfilter/nf_synproxy_core.c 	ip6_local_out(net, nskb->sk, nskb);
nskb              874 net/netfilter/nf_synproxy_core.c 	kfree_skb(nskb);
nskb              883 net/netfilter/nf_synproxy_core.c 	struct sk_buff *nskb;
nskb              892 net/netfilter/nf_synproxy_core.c 	nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
nskb              894 net/netfilter/nf_synproxy_core.c 	if (!nskb)
nskb              896 net/netfilter/nf_synproxy_core.c 	skb_reserve(nskb, MAX_TCP_HEADER);
nskb              898 net/netfilter/nf_synproxy_core.c 	niph = synproxy_build_ip_ipv6(net, nskb, &iph->daddr, &iph->saddr);
nskb              900 net/netfilter/nf_synproxy_core.c 	skb_reset_transport_header(nskb);
nskb              901 net/netfilter/nf_synproxy_core.c 	nth = skb_put(nskb, tcp_hdr_size);
nskb              916 net/netfilter/nf_synproxy_core.c 	synproxy_send_tcp_ipv6(net, skb, nskb, skb_nfct(skb),
nskb              928 net/netfilter/nf_synproxy_core.c 	struct sk_buff *nskb;
nskb              936 net/netfilter/nf_synproxy_core.c 	nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
nskb              938 net/netfilter/nf_synproxy_core.c 	if (!nskb)
nskb              940 net/netfilter/nf_synproxy_core.c 	skb_reserve(nskb, MAX_TCP_HEADER);
nskb              942 net/netfilter/nf_synproxy_core.c 	niph = synproxy_build_ip_ipv6(net, nskb, &iph->saddr, &iph->daddr);
nskb              944 net/netfilter/nf_synproxy_core.c 	skb_reset_transport_header(nskb);
nskb              945 net/netfilter/nf_synproxy_core.c 	nth = skb_put(nskb, tcp_hdr_size);
nskb              963 net/netfilter/nf_synproxy_core.c 	synproxy_send_tcp_ipv6(net, skb, nskb, &snet->tmpl->ct_general,
nskb              973 net/netfilter/nf_synproxy_core.c 	struct sk_buff *nskb;
nskb              981 net/netfilter/nf_synproxy_core.c 	nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
nskb              983 net/netfilter/nf_synproxy_core.c 	if (!nskb)
nskb              985 net/netfilter/nf_synproxy_core.c 	skb_reserve(nskb, MAX_TCP_HEADER);
nskb              987 net/netfilter/nf_synproxy_core.c 	niph = synproxy_build_ip_ipv6(net, nskb, &iph->daddr, &iph->saddr);
nskb              989 net/netfilter/nf_synproxy_core.c 	skb_reset_transport_header(nskb);
nskb              990 net/netfilter/nf_synproxy_core.c 	nth = skb_put(nskb, tcp_hdr_size);
nskb             1003 net/netfilter/nf_synproxy_core.c 	synproxy_send_tcp_ipv6(net, skb, nskb, NULL, 0, niph, nth,
nskb             1012 net/netfilter/nf_synproxy_core.c 	struct sk_buff *nskb;
nskb             1020 net/netfilter/nf_synproxy_core.c 	nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
nskb             1022 net/netfilter/nf_synproxy_core.c 	if (!nskb)
nskb             1024 net/netfilter/nf_synproxy_core.c 	skb_reserve(nskb, MAX_TCP_HEADER);
nskb             1026 net/netfilter/nf_synproxy_core.c 	niph = synproxy_build_ip_ipv6(net, nskb, &iph->saddr, &iph->daddr);
nskb             1028 net/netfilter/nf_synproxy_core.c 	skb_reset_transport_header(nskb);
nskb             1029 net/netfilter/nf_synproxy_core.c 	nth = skb_put(nskb, tcp_hdr_size);
nskb             1042 net/netfilter/nf_synproxy_core.c 	synproxy_send_tcp_ipv6(net, skb, nskb, skb_nfct(skb),
nskb              654 net/netfilter/nfnetlink_queue.c 	struct sk_buff *nskb;
nskb              659 net/netfilter/nfnetlink_queue.c 	nskb = nfqnl_build_packet_message(net, queue, entry, &packet_id_ptr);
nskb              660 net/netfilter/nfnetlink_queue.c 	if (nskb == NULL) {
nskb              684 net/netfilter/nfnetlink_queue.c 	err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT);
nskb              701 net/netfilter/nfnetlink_queue.c 	kfree_skb(nskb);
nskb              819 net/netfilter/nfnetlink_queue.c 		struct sk_buff *nskb = segs->next;
nskb              827 net/netfilter/nfnetlink_queue.c 		segs = nskb;
nskb              844 net/netfilter/nfnetlink_queue.c 	struct sk_buff *nskb;
nskb              853 net/netfilter/nfnetlink_queue.c 			nskb = skb_copy_expand(e->skb, skb_headroom(e->skb),
nskb              855 net/netfilter/nfnetlink_queue.c 			if (!nskb)
nskb              858 net/netfilter/nfnetlink_queue.c 			e->skb = nskb;
nskb              275 net/netlink/af_netlink.c 	struct sk_buff *nskb;
nskb              285 net/netlink/af_netlink.c 		nskb = netlink_to_full_skb(skb, GFP_ATOMIC);
nskb              287 net/netlink/af_netlink.c 		nskb = skb_clone(skb, GFP_ATOMIC);
nskb              288 net/netlink/af_netlink.c 	if (nskb) {
nskb              289 net/netlink/af_netlink.c 		nskb->dev = dev;
nskb              290 net/netlink/af_netlink.c 		nskb->protocol = htons((u16) sk->sk_protocol);
nskb              291 net/netlink/af_netlink.c 		nskb->pkt_type = netlink_is_kernel(sk) ?
nskb              293 net/netlink/af_netlink.c 		skb_reset_network_header(nskb);
nskb              294 net/netlink/af_netlink.c 		ret = dev_queue_xmit(nskb);
nskb             1278 net/netlink/af_netlink.c 		struct sk_buff *nskb = skb_clone(skb, allocation);
nskb             1279 net/netlink/af_netlink.c 		if (!nskb)
nskb             1282 net/netlink/af_netlink.c 		skb = nskb;
nskb              677 net/nfc/llcp_core.c 	struct sk_buff *skb_copy = NULL, *nskb;
nskb              701 net/nfc/llcp_core.c 		nskb = skb_clone(skb_copy, GFP_ATOMIC);
nskb              702 net/nfc/llcp_core.c 		if (!nskb)
nskb              705 net/nfc/llcp_core.c 		if (sock_queue_rcv_skb(sk, nskb))
nskb              706 net/nfc/llcp_core.c 			kfree_skb(nskb);
nskb              361 net/nfc/rawsock.c 	struct sk_buff *skb_copy = NULL, *nskb;
nskb              381 net/nfc/rawsock.c 		nskb = skb_clone(skb_copy, GFP_ATOMIC);
nskb              382 net/nfc/rawsock.c 		if (!nskb)
nskb              385 net/nfc/rawsock.c 		if (sock_queue_rcv_skb(sk, nskb))
nskb              386 net/nfc/rawsock.c 			kfree_skb(nskb);
nskb              303 net/openvswitch/datapath.c 	struct sk_buff *segs, *nskb;
nskb              337 net/openvswitch/datapath.c 		nskb = skb->next;
nskb              342 net/openvswitch/datapath.c 	} while ((skb = nskb));
nskb              389 net/openvswitch/datapath.c 	struct sk_buff *nskb = NULL;
nskb              401 net/openvswitch/datapath.c 		nskb = skb_clone(skb, GFP_ATOMIC);
nskb              402 net/openvswitch/datapath.c 		if (!nskb)
nskb              405 net/openvswitch/datapath.c 		nskb = __vlan_hwaccel_push_inside(nskb);
nskb              406 net/openvswitch/datapath.c 		if (!nskb)
nskb              409 net/openvswitch/datapath.c 		skb = nskb;
nskb              529 net/openvswitch/datapath.c 	kfree_skb(nskb);
nskb             2094 net/packet/af_packet.c 		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
nskb             2095 net/packet/af_packet.c 		if (nskb == NULL)
nskb             2103 net/packet/af_packet.c 		skb = nskb;
nskb             1270 net/rxrpc/input.c 			struct sk_buff *nskb = skb_unshare(skb, GFP_ATOMIC);
nskb             1271 net/rxrpc/input.c 			if (!nskb) {
nskb             1276 net/rxrpc/input.c 			if (nskb != skb) {
nskb             1278 net/rxrpc/input.c 				skb = nskb;
nskb             1678 net/sched/sch_cake.c 		struct sk_buff *segs, *nskb;
nskb             1687 net/sched/sch_cake.c 			nskb = segs->next;
nskb             1700 net/sched/sch_cake.c 			segs = nskb;
nskb              207 net/sched/sch_choke.c 			       struct sk_buff *nskb,
nskb              216 net/sched/sch_choke.c 	return choke_match_flow(oskb, nskb);
nskb               75 net/sched/sch_etf.c static bool is_packet_valid(struct Qdisc *sch, struct sk_buff *nskb)
nskb               78 net/sched/sch_etf.c 	ktime_t txtime = nskb->tstamp;
nskb               79 net/sched/sch_etf.c 	struct sock *sk = nskb->sk;
nskb              162 net/sched/sch_etf.c static int etf_enqueue_timesortedlist(struct sk_buff *nskb, struct Qdisc *sch,
nskb              167 net/sched/sch_etf.c 	ktime_t txtime = nskb->tstamp;
nskb              170 net/sched/sch_etf.c 	if (!is_packet_valid(sch, nskb)) {
nskb              171 net/sched/sch_etf.c 		report_sock_error(nskb, EINVAL,
nskb              173 net/sched/sch_etf.c 		return qdisc_drop(nskb, sch, to_free);
nskb              188 net/sched/sch_etf.c 	rb_link_node(&nskb->rbnode, parent, p);
nskb              189 net/sched/sch_etf.c 	rb_insert_color_cached(&nskb->rbnode, &q->head, leftmost);
nskb              191 net/sched/sch_etf.c 	qdisc_qstats_backlog_inc(sch, nskb);
nskb              160 net/sched/sch_generic.c 		struct sk_buff *nskb = q->dequeue(q);
nskb              162 net/sched/sch_generic.c 		if (!nskb)
nskb              165 net/sched/sch_generic.c 		bytelimit -= nskb->len; /* covers GSO len */
nskb              166 net/sched/sch_generic.c 		skb->next = nskb;
nskb              167 net/sched/sch_generic.c 		skb = nskb;
nskb              181 net/sched/sch_generic.c 	struct sk_buff *nskb;
nskb              185 net/sched/sch_generic.c 		nskb = q->dequeue(q);
nskb              186 net/sched/sch_generic.c 		if (!nskb)
nskb              188 net/sched/sch_generic.c 		if (unlikely(skb_get_queue_mapping(nskb) != mapping)) {
nskb              189 net/sched/sch_generic.c 			qdisc_enqueue_skb_bad_txq(q, nskb);
nskb              192 net/sched/sch_generic.c 		skb->next = nskb;
nskb              193 net/sched/sch_generic.c 		skb = nskb;
nskb              378 net/sched/sch_netem.c static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
nskb              381 net/sched/sch_netem.c 	u64 tnext = netem_skb_cb(nskb)->time_to_send;
nskb              385 net/sched/sch_netem.c 			q->t_tail->next = nskb;
nskb              387 net/sched/sch_netem.c 			q->t_head = nskb;
nskb              388 net/sched/sch_netem.c 		q->t_tail = nskb;
nskb              402 net/sched/sch_netem.c 		rb_link_node(&nskb->rbnode, parent, p);
nskb              403 net/sched/sch_netem.c 		rb_insert_color(&nskb->rbnode, &q->t_root);
nskb              147 net/sched/sch_tbf.c 	struct sk_buff *segs, *nskb;
nskb              159 net/sched/sch_tbf.c 		nskb = segs->next;
nskb              170 net/sched/sch_tbf.c 		segs = nskb;
nskb              414 net/sctp/output.c 	struct sk_buff *nskb;
nskb              421 net/sctp/output.c 		nskb = head;
nskb              442 net/sctp/output.c 		nskb = alloc_skb(pkt_size + MAX_HEADER, gfp);
nskb              443 net/sctp/output.c 		if (!nskb)
nskb              445 net/sctp/output.c 		skb_reserve(nskb, packet->overhead + MAX_HEADER);
nskb              468 net/sctp/output.c 							skb_tail_pointer(nskb);
nskb              470 net/sctp/output.c 			skb_put_data(nskb, chunk->skb->data, chunk->skb->len);
nskb              490 net/sctp/output.c 			sctp_auth_calculate_hmac(tp->asoc, nskb, auth,
nskb              501 net/sctp/output.c 			sctp_packet_gso_append(head, nskb);
nskb              195 net/tls/tls_device_fallback.c static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
nskb              200 net/tls/tls_device_fallback.c 	skb_copy_header(nskb, skb);
nskb              202 net/tls/tls_device_fallback.c 	skb_put(nskb, skb->len);
nskb              203 net/tls/tls_device_fallback.c 	memcpy(nskb->data, skb->data, headln);
nskb              205 net/tls/tls_device_fallback.c 	nskb->destructor = skb->destructor;
nskb              206 net/tls/tls_device_fallback.c 	nskb->sk = sk;
nskb              210 net/tls/tls_device_fallback.c 	update_chksum(nskb, headln);
nskb              213 net/tls/tls_device_fallback.c 	if (nskb->destructor == sock_efree)
nskb              216 net/tls/tls_device_fallback.c 	delta = nskb->truesize - skb->truesize;
nskb              294 net/tls/tls_device_fallback.c 			struct sk_buff *nskb,
nskb              301 net/tls/tls_device_fallback.c 	sg_set_buf(&sg_out[1], nskb->data + tcp_payload_offset, payload_len);
nskb              318 net/tls/tls_device_fallback.c 	struct sk_buff *nskb = NULL;
nskb              341 net/tls/tls_device_fallback.c 	nskb = alloc_skb(skb_headroom(skb) + skb->len, GFP_ATOMIC);
nskb              342 net/tls/tls_device_fallback.c 	if (!nskb)
nskb              345 net/tls/tls_device_fallback.c 	skb_reserve(nskb, skb_headroom(skb));
nskb              347 net/tls/tls_device_fallback.c 	fill_sg_out(sg_out, buf, tls_ctx, nskb, tcp_payload_offset,
nskb              354 net/tls/tls_device_fallback.c 	complete_skb(nskb, skb, tcp_payload_offset);
nskb              359 net/tls/tls_device_fallback.c 	nskb->prev = nskb;
nskb              365 net/tls/tls_device_fallback.c 	return nskb;
nskb              367 net/tls/tls_device_fallback.c 	kfree_skb(nskb);
nskb              368 net/tls/tls_device_fallback.c 	nskb = NULL;
nskb              379 net/tls/tls_device_fallback.c 	struct sk_buff *nskb = NULL;
nskb              404 net/tls/tls_device_fallback.c 			nskb = skb_get(skb);
nskb              408 net/tls/tls_device_fallback.c 	nskb = tls_enc_skb(tls_ctx, sg_out, sg_in, skb, sync_size, rcd_sn);
nskb              415 net/tls/tls_device_fallback.c 	if (nskb)
nskb              419 net/tls/tls_device_fallback.c 	return nskb;
nskb               64 net/vmw_vsock/af_vsock_tap.c 	struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
nskb               66 net/vmw_vsock/af_vsock_tap.c 	if (nskb) {
nskb               69 net/vmw_vsock/af_vsock_tap.c 		nskb->dev = dev;
nskb               70 net/vmw_vsock/af_vsock_tap.c 		ret = dev_queue_xmit(nskb);
nskb               97 net/x25/x25_dev.c 	struct sk_buff *nskb;
nskb              103 net/x25/x25_dev.c 	nskb = skb_copy(skb, GFP_ATOMIC);
nskb              104 net/x25/x25_dev.c 	if (!nskb)
nskb              107 net/x25/x25_dev.c 	skb = nskb;
nskb              152 net/xfrm/xfrm_device.c 		struct sk_buff *nskb = skb2->next;
nskb              164 net/xfrm/xfrm_device.c 			skb2->next = nskb;
nskb              167 net/xfrm/xfrm_device.c 			skb2->next = nskb;
nskb              172 net/xfrm/xfrm_device.c 				skb = nskb;
nskb              183 net/xfrm/xfrm_device.c 		skb2 = nskb;
nskb              548 net/xfrm/xfrm_output.c 		struct sk_buff *nskb = segs->next;
nskb              555 net/xfrm/xfrm_output.c 			kfree_skb_list(nskb);
nskb              559 net/xfrm/xfrm_output.c 		segs = nskb;