skbn               92 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 	struct sk_buff *skb, *skbn;
skbn              229 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 			skbn = fep->rx_skbuff[curidx];
skbn              242 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 				skbn = netdev_alloc_skb(dev, pkt_len + 2);
skbn              243 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 				if (skbn != NULL) {
skbn              244 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 					skb_reserve(skbn, 2);	/* align IP header */
skbn              246 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 						      skbn->data, pkt_len);
skbn              247 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 					swap(skb, skbn);
skbn              254 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 				skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
skbn              256 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 				if (skbn) {
skbn              259 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 					skb_align(skbn, ENET_RX_ALIGN);
skbn              267 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 						skbn->data,
skbn              274 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 			if (skbn != NULL) {
skbn              281 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 				skbn = skb;
skbn              285 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 		fep->rx_skbuff[curidx] = skbn;
skbn              104 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 	struct sk_buff *skbn;
skbn              116 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 		while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL)
skbn              117 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 			__rmnet_map_ingress_handler(skbn, port);
skbn              315 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 	struct sk_buff *skbn;
skbn              334 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 	skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC);
skbn              335 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 	if (!skbn)
skbn              338 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 	skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
skbn              339 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 	skb_put(skbn, packet_len);
skbn              340 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 	memcpy(skbn->data, skb->data, packet_len);
skbn              343 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 	return skbn;
skbn              203 drivers/net/usb/qmi_wwan.c 	struct sk_buff *skbn;
skbn              227 drivers/net/usb/qmi_wwan.c 		skbn = netdev_alloc_skb(net, pkt_len);
skbn              228 drivers/net/usb/qmi_wwan.c 		if (!skbn)
skbn              230 drivers/net/usb/qmi_wwan.c 		skbn->dev = net;
skbn              234 drivers/net/usb/qmi_wwan.c 			skbn->protocol = htons(ETH_P_IP);
skbn              237 drivers/net/usb/qmi_wwan.c 			skbn->protocol = htons(ETH_P_IPV6);
skbn              244 drivers/net/usb/qmi_wwan.c 		skb_put_data(skbn, skb->data + offset + qmimux_hdr_sz, pkt_len);
skbn              245 drivers/net/usb/qmi_wwan.c 		if (netif_rx(skbn) != NET_RX_SUCCESS) {
skbn               36 net/ax25/ax25_in.c 	struct sk_buff *skbn, *skbo;
skbn               49 net/ax25/ax25_in.c 					skbn = alloc_skb(AX25_MAX_HEADER_LEN +
skbn               52 net/ax25/ax25_in.c 					if (!skbn) {
skbn               57 net/ax25/ax25_in.c 					skb_reserve(skbn, AX25_MAX_HEADER_LEN);
skbn               59 net/ax25/ax25_in.c 					skbn->dev   = ax25->ax25_dev->dev;
skbn               60 net/ax25/ax25_in.c 					skb_reset_network_header(skbn);
skbn               61 net/ax25/ax25_in.c 					skb_reset_transport_header(skbn);
skbn               66 net/ax25/ax25_in.c 							  skb_put(skbn, skbo->len),
skbn               73 net/ax25/ax25_in.c 					if (ax25_rx_iframe(ax25, skbn) == 0)
skbn               74 net/ax25/ax25_in.c 						kfree_skb(skbn);
skbn              116 net/ax25/ax25_in.c 		struct sk_buff *skbn = skb_copy(skb, GFP_ATOMIC);
skbn              117 net/ax25/ax25_in.c 		if (skbn != NULL) {
skbn              119 net/ax25/ax25_in.c 			skb = skbn;
skbn              118 net/ax25/ax25_out.c 	struct sk_buff *skbn;
skbn              144 net/ax25/ax25_out.c 			if ((skbn = alloc_skb(paclen + 2 + frontlen, GFP_ATOMIC)) == NULL) {
skbn              151 net/ax25/ax25_out.c 				skb_set_owner_w(skbn, skb->sk);
skbn              158 net/ax25/ax25_out.c 				skb_reserve(skbn, frontlen + 2);
skbn              159 net/ax25/ax25_out.c 				skb_set_network_header(skbn,
skbn              161 net/ax25/ax25_out.c 				skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
skbn              162 net/ax25/ax25_out.c 				p = skb_push(skbn, 2);
skbn              172 net/ax25/ax25_out.c 				skb_reserve(skbn, frontlen + 1);
skbn              173 net/ax25/ax25_out.c 				skb_set_network_header(skbn,
skbn              175 net/ax25/ax25_out.c 				skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
skbn              176 net/ax25/ax25_out.c 				p = skb_push(skbn, 1);
skbn              181 net/ax25/ax25_out.c 			skb_queue_tail(&ax25->write_queue, skbn); /* Throw it on the queue */
skbn              243 net/ax25/ax25_out.c 	struct sk_buff *skb, *skbn;
skbn              279 net/ax25/ax25_out.c 		if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
skbn              285 net/ax25/ax25_out.c 			skb_set_owner_w(skbn, skb->sk);
skbn              298 net/ax25/ax25_out.c 			ax25_send_iframe(ax25, skbn, (last) ? AX25_POLLON : AX25_POLLOFF);
skbn              303 net/ax25/ax25_out.c 			ax25_send_iframe(ax25, skbn, AX25_POLLOFF);
skbn              328 net/ax25/ax25_out.c 	struct sk_buff *skbn;
skbn              340 net/ax25/ax25_out.c 		if ((skbn = skb_realloc_headroom(skb, headroom)) == NULL) {
skbn              347 net/ax25/ax25_out.c 			skb_set_owner_w(skbn, skb->sk);
skbn              350 net/ax25/ax25_out.c 		skb = skbn;
skbn              444 net/ax25/ax25_route.c 	struct sk_buff *skbn;
skbn              451 net/ax25/ax25_route.c 		if ((skbn = skb_realloc_headroom(skb, len)) == NULL) {
skbn              457 net/ax25/ax25_route.c 			skb_set_owner_w(skbn, skb->sk);
skbn              461 net/ax25/ax25_route.c 		skb = skbn;
skbn               68 net/lapb/lapb_out.c 	struct sk_buff *skb, *skbn;
skbn               85 net/lapb/lapb_out.c 			if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
skbn               91 net/lapb/lapb_out.c 				skb_set_owner_w(skbn, skb->sk);
skbn               96 net/lapb/lapb_out.c 			lapb_send_iframe(lapb, skbn, LAPB_POLLOFF);
skbn               31 net/netrom/nr_in.c 	struct sk_buff *skbo, *skbn = skb;
skbn               48 net/netrom/nr_in.c 		if ((skbn = alloc_skb(nr->fraglen, GFP_ATOMIC)) == NULL)
skbn               51 net/netrom/nr_in.c 		skb_reset_transport_header(skbn);
skbn               55 net/netrom/nr_in.c 						  skb_put(skbn, skbo->len),
skbn               63 net/netrom/nr_in.c 	return sock_queue_rcv_skb(sk, skbn);
skbn              150 net/netrom/nr_in.c 	struct sk_buff *skbn;
skbn              233 net/netrom/nr_in.c 			while ((skbn = skb_dequeue(&nrom->reseq_queue)) != NULL) {
skbn              234 net/netrom/nr_in.c 				ns = skbn->data[17];
skbn              236 net/netrom/nr_in.c 					if (nr_queue_rx_frame(sk, skbn, frametype & NR_MORE_FLAG) == 0) {
skbn              240 net/netrom/nr_in.c 						skb_queue_tail(&temp_queue, skbn);
skbn              243 net/netrom/nr_in.c 					skb_queue_tail(&temp_queue, skbn);
skbn              245 net/netrom/nr_in.c 					kfree_skb(skbn);
skbn              248 net/netrom/nr_in.c 			while ((skbn = skb_dequeue(&temp_queue)) != NULL) {
skbn              249 net/netrom/nr_in.c 				skb_queue_tail(&nrom->reseq_queue, skbn);
skbn               32 net/netrom/nr_loopback.c 	struct sk_buff *skbn;
skbn               34 net/netrom/nr_loopback.c 	if ((skbn = alloc_skb(skb->len, GFP_ATOMIC)) != NULL) {
skbn               35 net/netrom/nr_loopback.c 		skb_copy_from_linear_data(skb, skb_put(skbn, skb->len), skb->len);
skbn               36 net/netrom/nr_loopback.c 		skb_reset_transport_header(skbn);
skbn               38 net/netrom/nr_loopback.c 		skb_queue_tail(&loopback_queue, skbn);
skbn               34 net/netrom/nr_out.c 	struct sk_buff *skbn;
skbn               46 net/netrom/nr_out.c 			if ((skbn = sock_alloc_send_skb(sk, frontlen + NR_MAX_PACKET_SIZE, 0, &err)) == NULL)
skbn               49 net/netrom/nr_out.c 			skb_reserve(skbn, frontlen);
skbn               54 net/netrom/nr_out.c 			skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
skbn               58 net/netrom/nr_out.c 			skb_push(skbn, NR_TRANSPORT_LEN);
skbn               59 net/netrom/nr_out.c 			skb_copy_to_linear_data(skbn, transport,
skbn               62 net/netrom/nr_out.c 				skbn->data[4] |= NR_MORE_FLAG;
skbn               64 net/netrom/nr_out.c 			skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */
skbn               99 net/netrom/nr_out.c 	struct sk_buff *skb, *skbn;
skbn              105 net/netrom/nr_out.c 	if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL)
skbn              108 net/netrom/nr_out.c 	skbn->data[2] = nr->va;
skbn              109 net/netrom/nr_out.c 	skbn->data[3] = nr->vr;
skbn              112 net/netrom/nr_out.c 		skbn->data[4] |= NR_CHOKE_FLAG;
skbn              114 net/netrom/nr_out.c 	nr_transmit_buffer(sk, skbn);
skbn              125 net/netrom/nr_out.c 	struct sk_buff *skb, *skbn;
skbn              156 net/netrom/nr_out.c 		if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
skbn              161 net/netrom/nr_out.c 		skb_set_owner_w(skbn, sk);
skbn              166 net/netrom/nr_out.c 		nr_send_iframe(sk, skbn);
skbn              757 net/netrom/nr_route.c 	struct sk_buff *skbn;
skbn              810 net/netrom/nr_route.c 	if ((skbn=skb_copy_expand(skb, dev->hard_header_len, 0, GFP_ATOMIC)) == NULL) {
skbn              817 net/netrom/nr_route.c 	skb=skbn;
skbn              213 net/netrom/nr_subr.c 	struct sk_buff *skbn;
skbn              219 net/netrom/nr_subr.c 	if ((skbn = alloc_skb(len, GFP_ATOMIC)) == NULL)
skbn              222 net/netrom/nr_subr.c 	skb_reserve(skbn, 0);
skbn              224 net/netrom/nr_subr.c 	dptr = skb_put(skbn, NR_NETWORK_LEN + NR_TRANSPORT_LEN);
skbn              255 net/netrom/nr_subr.c 	if (!nr_route_frame(skbn, NULL))
skbn              256 net/netrom/nr_subr.c 		kfree_skb(skbn);
skbn              702 net/qrtr/qrtr.c 	struct sk_buff *skbn;
skbn              706 net/qrtr/qrtr.c 		skbn = skb_clone(skb, GFP_KERNEL);
skbn              707 net/qrtr/qrtr.c 		if (!skbn)
skbn              709 net/qrtr/qrtr.c 		skb_set_owner_w(skbn, skb->sk);
skbn              710 net/qrtr/qrtr.c 		qrtr_node_enqueue(node, skbn, type, from, to);
skbn             1139 net/rose/af_rose.c 		struct sk_buff *skbn;
skbn             1150 net/rose/af_rose.c 			if ((skbn = sock_alloc_send_skb(sk, frontlen + ROSE_PACLEN, 0, &err)) == NULL) {
skbn             1155 net/rose/af_rose.c 			skbn->sk   = sk;
skbn             1156 net/rose/af_rose.c 			skbn->free = 1;
skbn             1157 net/rose/af_rose.c 			skbn->arp  = 1;
skbn             1159 net/rose/af_rose.c 			skb_reserve(skbn, frontlen);
skbn             1164 net/rose/af_rose.c 			skb_copy_from_linear_data(skb, skb_put(skbn, lg), lg);
skbn             1168 net/rose/af_rose.c 			skb_push(skbn, ROSE_MIN_LEN);
skbn             1169 net/rose/af_rose.c 			skb_copy_to_linear_data(skbn, header, ROSE_MIN_LEN);
skbn             1172 net/rose/af_rose.c 				skbn->data[2] |= M_BIT;
skbn             1174 net/rose/af_rose.c 			skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */
skbn              141 net/rose/rose_link.c 	struct sk_buff *skbn;
skbn              167 net/rose/rose_link.c 		while ((skbn = skb_dequeue(&neigh->queue)) != NULL)
skbn              168 net/rose/rose_link.c 			if (!rose_send_frame(skbn, neigh))
skbn              169 net/rose/rose_link.c 				kfree_skb(skbn);
skbn               36 net/rose/rose_loopback.c 	struct sk_buff *skbn = NULL;
skbn               39 net/rose/rose_loopback.c 		skbn = skb_clone(skb, GFP_ATOMIC);
skbn               41 net/rose/rose_loopback.c 	if (skbn) {
skbn               43 net/rose/rose_loopback.c 		skb_queue_tail(&loopback_queue, skbn);
skbn               48 net/rose/rose_out.c 	struct sk_buff *skb, *skbn;
skbn               76 net/rose/rose_out.c 		if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
skbn               81 net/rose/rose_out.c 		skb_set_owner_w(skbn, sk);
skbn               86 net/rose/rose_out.c 		rose_send_iframe(sk, skbn);
skbn               24 net/x25/x25_forward.c 	struct sk_buff *skbn;
skbn               74 net/x25/x25_forward.c 	if ( (skbn = skb_clone(skb, GFP_ATOMIC)) == NULL){
skbn               77 net/x25/x25_forward.c 	x25_transmit_link(skbn, neigh_new);
skbn               98 net/x25/x25_forward.c 	struct sk_buff *skbn;
skbn              119 net/x25/x25_forward.c 	if ( (skbn = pskb_copy(skb, GFP_ATOMIC)) == NULL){
skbn              123 net/x25/x25_forward.c 	x25_transmit_link(skbn, nb);
skbn               34 net/x25/x25_in.c 	struct sk_buff *skbo, *skbn = skb;
skbn               47 net/x25/x25_in.c 		if ((skbn = alloc_skb(len, GFP_ATOMIC)) == NULL){
skbn               54 net/x25/x25_in.c 		skb_reset_transport_header(skbn);
skbn               57 net/x25/x25_in.c 		skb_copy_from_linear_data(skbo, skb_put(skbn, skbo->len),
skbn               66 net/x25/x25_in.c 						  skb_put(skbn, skbo->len),
skbn               74 net/x25/x25_in.c 	skb_set_owner_r(skbn, sk);
skbn               75 net/x25/x25_in.c 	skb_queue_tail(&sk->sk_receive_queue, skbn);
skbn               72 net/x25/x25_link.c 	struct sk_buff *skbn;
skbn              105 net/x25/x25_link.c 		while ((skbn = skb_dequeue(&nb->queue)) != NULL)
skbn              106 net/x25/x25_link.c 			x25_send_frame(skbn, nb);
skbn               49 net/x25/x25_out.c 	struct sk_buff *skbn;
skbn               67 net/x25/x25_out.c 			skbn = sock_alloc_send_skb(sk, frontlen + max_len,
skbn               70 net/x25/x25_out.c 			if (!skbn) {
skbn               81 net/x25/x25_out.c 			skb_reserve(skbn, frontlen);
skbn               86 net/x25/x25_out.c 			skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
skbn               90 net/x25/x25_out.c 			skb_push(skbn, header_len);
skbn               91 net/x25/x25_out.c 			skb_copy_to_linear_data(skbn, header, header_len);
skbn               95 net/x25/x25_out.c 					skbn->data[3] |= X25_EXT_M_BIT;
skbn               97 net/x25/x25_out.c 					skbn->data[2] |= X25_STD_M_BIT;
skbn              100 net/x25/x25_out.c 			skb_queue_tail(&sk->sk_write_queue, skbn);
skbn              138 net/x25/x25_out.c 	struct sk_buff *skb, *skbn;
skbn              180 net/x25/x25_out.c 		if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
skbn              185 net/x25/x25_out.c 		skb_set_owner_w(skbn, sk);
skbn              190 net/x25/x25_out.c 		x25_send_iframe(sk, skbn);