1/* 2 * Routines having to do with the 'struct sk_buff' memory handlers. 3 * 4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 5 * Florian La Roche <rzsfl@rz.uni-sb.de> 6 * 7 * Fixes: 8 * Alan Cox : Fixed the worst of the load 9 * balancer bugs. 10 * Dave Platt : Interrupt stacking fix. 11 * Richard Kooijman : Timestamp fixes. 12 * Alan Cox : Changed buffer format. 13 * Alan Cox : destructor hook for AF_UNIX etc. 14 * Linus Torvalds : Better skb_clone. 15 * Alan Cox : Added skb_copy. 16 * Alan Cox : Added all the changed routines Linus 17 * only put in the headers 18 * Ray VanTassle : Fixed --skb->lock in free 19 * Alan Cox : skb_copy copy arp field 20 * Andi Kleen : slabified it. 21 * Robert Olsson : Removed skb_head_pool 22 * 23 * NOTE: 24 * The __skb_ routines should be called with interrupts 25 * disabled, or you better be *real* sure that the operation is atomic 26 * with respect to whatever list is being frobbed (e.g. via lock_sock() 27 * or via disabling bottom half handlers, etc). 28 * 29 * This program is free software; you can redistribute it and/or 30 * modify it under the terms of the GNU General Public License 31 * as published by the Free Software Foundation; either version 32 * 2 of the License, or (at your option) any later version. 33 */ 34 35/* 36 * The functions in this file will not compile correctly with gcc 2.4.x 37 */ 38 39#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 40 41#include <linux/module.h> 42#include <linux/types.h> 43#include <linux/kernel.h> 44#include <linux/kmemcheck.h> 45#include <linux/mm.h> 46#include <linux/interrupt.h> 47#include <linux/in.h> 48#include <linux/inet.h> 49#include <linux/slab.h> 50#include <linux/tcp.h> 51#include <linux/udp.h> 52#include <linux/netdevice.h> 53#ifdef CONFIG_NET_CLS_ACT 54#include <net/pkt_sched.h> 55#endif 56#include <linux/string.h> 57#include <linux/skbuff.h> 58#include <linux/splice.h> 59#include <linux/cache.h> 60#include <linux/rtnetlink.h> 61#include <linux/init.h> 62#include <linux/scatterlist.h> 63#include <linux/errqueue.h> 64#include <linux/prefetch.h> 65#include <linux/if_vlan.h> 66 67#include <net/protocol.h> 68#include <net/dst.h> 69#include <net/sock.h> 70#include <net/checksum.h> 71#include <net/ip6_checksum.h> 72#include <net/xfrm.h> 73 74#include <asm/uaccess.h> 75#include <trace/events/skb.h> 76#include <linux/highmem.h> 77#include <linux/capability.h> 78#include <linux/user_namespace.h> 79 80struct kmem_cache *skbuff_head_cache __read_mostly; 81static struct kmem_cache *skbuff_fclone_cache __read_mostly; 82int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS; 83EXPORT_SYMBOL(sysctl_max_skb_frags); 84 85/** 86 * skb_panic - private function for out-of-line support 87 * @skb: buffer 88 * @sz: size 89 * @addr: address 90 * @msg: skb_over_panic or skb_under_panic 91 * 92 * Out-of-line support for skb_put() and skb_push(). 93 * Called via the wrapper skb_over_panic() or skb_under_panic(). 94 * Keep out of line to prevent kernel bloat. 95 * __builtin_return_address is not used because it is not always reliable. 96 */ 97static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, 98 const char msg[]) 99{ 100 pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n", 101 msg, addr, skb->len, sz, skb->head, skb->data, 102 (unsigned long)skb->tail, (unsigned long)skb->end, 103 skb->dev ? skb->dev->name : "<NULL>"); 104 BUG(); 105} 106 107static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) 108{ 109 skb_panic(skb, sz, addr, __func__); 110} 111 112static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) 113{ 114 skb_panic(skb, sz, addr, __func__); 115} 116 117/* 118 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells 119 * the caller if emergency pfmemalloc reserves are being used. If it is and 120 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves 121 * may be used. Otherwise, the packet data may be discarded until enough 122 * memory is free 123 */ 124#define kmalloc_reserve(size, gfp, node, pfmemalloc) \ 125 __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc) 126 127static void *__kmalloc_reserve(size_t size, gfp_t flags, int node, 128 unsigned long ip, bool *pfmemalloc) 129{ 130 void *obj; 131 bool ret_pfmemalloc = false; 132 133 /* 134 * Try a regular allocation, when that fails and we're not entitled 135 * to the reserves, fail. 136 */ 137 obj = kmalloc_node_track_caller(size, 138 flags | __GFP_NOMEMALLOC | __GFP_NOWARN, 139 node); 140 if (obj || !(gfp_pfmemalloc_allowed(flags))) 141 goto out; 142 143 /* Try again but now we are using pfmemalloc reserves */ 144 ret_pfmemalloc = true; 145 obj = kmalloc_node_track_caller(size, flags, node); 146 147out: 148 if (pfmemalloc) 149 *pfmemalloc = ret_pfmemalloc; 150 151 return obj; 152} 153 154/* Allocate a new skbuff. We do this ourselves so we can fill in a few 155 * 'private' fields and also do memory statistics to find all the 156 * [BEEP] leaks. 157 * 158 */ 159 160struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node) 161{ 162 struct sk_buff *skb; 163 164 /* Get the HEAD */ 165 skb = kmem_cache_alloc_node(skbuff_head_cache, 166 gfp_mask & ~__GFP_DMA, node); 167 if (!skb) 168 goto out; 169 170 /* 171 * Only clear those fields we need to clear, not those that we will 172 * actually initialise below. Hence, don't put any more fields after 173 * the tail pointer in struct sk_buff! 174 */ 175 memset(skb, 0, offsetof(struct sk_buff, tail)); 176 skb->head = NULL; 177 skb->truesize = sizeof(struct sk_buff); 178 atomic_set(&skb->users, 1); 179 180 skb->mac_header = (typeof(skb->mac_header))~0U; 181out: 182 return skb; 183} 184 185/** 186 * __alloc_skb - allocate a network buffer 187 * @size: size to allocate 188 * @gfp_mask: allocation mask 189 * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache 190 * instead of head cache and allocate a cloned (child) skb. 191 * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for 192 * allocations in case the data is required for writeback 193 * @node: numa node to allocate memory on 194 * 195 * Allocate a new &sk_buff. The returned buffer has no headroom and a 196 * tail room of at least size bytes. The object has a reference count 197 * of one. The return is the buffer. On a failure the return is %NULL. 198 * 199 * Buffers may only be allocated from interrupts using a @gfp_mask of 200 * %GFP_ATOMIC. 201 */ 202struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 203 int flags, int node) 204{ 205 struct kmem_cache *cache; 206 struct skb_shared_info *shinfo; 207 struct sk_buff *skb; 208 u8 *data; 209 bool pfmemalloc; 210 211 cache = (flags & SKB_ALLOC_FCLONE) 212 ? skbuff_fclone_cache : skbuff_head_cache; 213 214 if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) 215 gfp_mask |= __GFP_MEMALLOC; 216 217 /* Get the HEAD */ 218 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); 219 if (!skb) 220 goto out; 221 prefetchw(skb); 222 223 /* We do our best to align skb_shared_info on a separate cache 224 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives 225 * aligned memory blocks, unless SLUB/SLAB debug is enabled. 226 * Both skb->head and skb_shared_info are cache line aligned. 227 */ 228 size = SKB_DATA_ALIGN(size); 229 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 230 data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc); 231 if (!data) 232 goto nodata; 233 /* kmalloc(size) might give us more room than requested. 234 * Put skb_shared_info exactly at the end of allocated zone, 235 * to allow max possible filling before reallocation. 236 */ 237 size = SKB_WITH_OVERHEAD(ksize(data)); 238 prefetchw(data + size); 239 240 /* 241 * Only clear those fields we need to clear, not those that we will 242 * actually initialise below. Hence, don't put any more fields after 243 * the tail pointer in struct sk_buff! 244 */ 245 memset(skb, 0, offsetof(struct sk_buff, tail)); 246 /* Account for allocated memory : skb + skb->head */ 247 skb->truesize = SKB_TRUESIZE(size); 248 skb->pfmemalloc = pfmemalloc; 249 atomic_set(&skb->users, 1); 250 skb->head = data; 251 skb->data = data; 252 skb_reset_tail_pointer(skb); 253 skb->end = skb->tail + size; 254 skb->mac_header = (typeof(skb->mac_header))~0U; 255 skb->transport_header = (typeof(skb->transport_header))~0U; 256 257 /* make sure we initialize shinfo sequentially */ 258 shinfo = skb_shinfo(skb); 259 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 260 atomic_set(&shinfo->dataref, 1); 261 kmemcheck_annotate_variable(shinfo->destructor_arg); 262 263 if (flags & SKB_ALLOC_FCLONE) { 264 struct sk_buff_fclones *fclones; 265 266 fclones = container_of(skb, struct sk_buff_fclones, skb1); 267 268 kmemcheck_annotate_bitfield(&fclones->skb2, flags1); 269 skb->fclone = SKB_FCLONE_ORIG; 270 atomic_set(&fclones->fclone_ref, 1); 271 272 fclones->skb2.fclone = SKB_FCLONE_CLONE; 273 fclones->skb2.pfmemalloc = pfmemalloc; 274 } 275out: 276 return skb; 277nodata: 278 kmem_cache_free(cache, skb); 279 skb = NULL; 280 goto out; 281} 282EXPORT_SYMBOL(__alloc_skb); 283 284/** 285 * __build_skb - build a network buffer 286 * @data: data buffer provided by caller 287 * @frag_size: size of data, or 0 if head was kmalloced 288 * 289 * Allocate a new &sk_buff. Caller provides space holding head and 290 * skb_shared_info. @data must have been allocated by kmalloc() only if 291 * @frag_size is 0, otherwise data should come from the page allocator 292 * or vmalloc() 293 * The return is the new skb buffer. 294 * On a failure the return is %NULL, and @data is not freed. 295 * Notes : 296 * Before IO, driver allocates only data buffer where NIC put incoming frame 297 * Driver should add room at head (NET_SKB_PAD) and 298 * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) 299 * After IO, driver calls build_skb(), to allocate sk_buff and populate it 300 * before giving packet to stack. 301 * RX rings only contains data buffers, not full skbs. 302 */ 303struct sk_buff *__build_skb(void *data, unsigned int frag_size) 304{ 305 struct skb_shared_info *shinfo; 306 struct sk_buff *skb; 307 unsigned int size = frag_size ? : ksize(data); 308 309 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); 310 if (!skb) 311 return NULL; 312 313 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 314 315 memset(skb, 0, offsetof(struct sk_buff, tail)); 316 skb->truesize = SKB_TRUESIZE(size); 317 atomic_set(&skb->users, 1); 318 skb->head = data; 319 skb->data = data; 320 skb_reset_tail_pointer(skb); 321 skb->end = skb->tail + size; 322 skb->mac_header = (typeof(skb->mac_header))~0U; 323 skb->transport_header = (typeof(skb->transport_header))~0U; 324 325 /* make sure we initialize shinfo sequentially */ 326 shinfo = skb_shinfo(skb); 327 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 328 atomic_set(&shinfo->dataref, 1); 329 kmemcheck_annotate_variable(shinfo->destructor_arg); 330 331 return skb; 332} 333 334/* build_skb() is wrapper over __build_skb(), that specifically 335 * takes care of skb->head and skb->pfmemalloc 336 * This means that if @frag_size is not zero, then @data must be backed 337 * by a page fragment, not kmalloc() or vmalloc() 338 */ 339struct sk_buff *build_skb(void *data, unsigned int frag_size) 340{ 341 struct sk_buff *skb = __build_skb(data, frag_size); 342 343 if (skb && frag_size) { 344 skb->head_frag = 1; 345 if (page_is_pfmemalloc(virt_to_head_page(data))) 346 skb->pfmemalloc = 1; 347 } 348 return skb; 349} 350EXPORT_SYMBOL(build_skb); 351 352struct netdev_alloc_cache { 353 struct page_frag frag; 354 /* we maintain a pagecount bias, so that we dont dirty cache line 355 * containing page->_count every time we allocate a fragment. 356 */ 357 unsigned int pagecnt_bias; 358}; 359static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache); 360static DEFINE_PER_CPU(struct netdev_alloc_cache, napi_alloc_cache); 361 362static struct page *__page_frag_refill(struct netdev_alloc_cache *nc, 363 gfp_t gfp_mask) 364{ 365 const unsigned int order = NETDEV_FRAG_PAGE_MAX_ORDER; 366 struct page *page = NULL; 367 gfp_t gfp = gfp_mask; 368 369 if (order) { 370 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY | 371 __GFP_NOMEMALLOC; 372 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); 373 nc->frag.size = PAGE_SIZE << (page ? order : 0); 374 } 375 376 if (unlikely(!page)) 377 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); 378 379 nc->frag.page = page; 380 381 return page; 382} 383 384static void *__alloc_page_frag(struct netdev_alloc_cache __percpu *cache, 385 unsigned int fragsz, gfp_t gfp_mask) 386{ 387 struct netdev_alloc_cache *nc = this_cpu_ptr(cache); 388 struct page *page = nc->frag.page; 389 unsigned int size; 390 int offset; 391 392 if (unlikely(!page)) { 393refill: 394 page = __page_frag_refill(nc, gfp_mask); 395 if (!page) 396 return NULL; 397 398 /* if size can vary use frag.size else just use PAGE_SIZE */ 399 size = NETDEV_FRAG_PAGE_MAX_ORDER ? nc->frag.size : PAGE_SIZE; 400 401 /* Even if we own the page, we do not use atomic_set(). 402 * This would break get_page_unless_zero() users. 403 */ 404 atomic_add(size - 1, &page->_count); 405 406 /* reset page count bias and offset to start of new frag */ 407 nc->pagecnt_bias = size; 408 nc->frag.offset = size; 409 } 410 411 offset = nc->frag.offset - fragsz; 412 if (unlikely(offset < 0)) { 413 if (!atomic_sub_and_test(nc->pagecnt_bias, &page->_count)) 414 goto refill; 415 416 /* if size can vary use frag.size else just use PAGE_SIZE */ 417 size = NETDEV_FRAG_PAGE_MAX_ORDER ? nc->frag.size : PAGE_SIZE; 418 419 /* OK, page count is 0, we can safely set it */ 420 atomic_set(&page->_count, size); 421 422 /* reset page count bias and offset to start of new frag */ 423 nc->pagecnt_bias = size; 424 offset = size - fragsz; 425 } 426 427 nc->pagecnt_bias--; 428 nc->frag.offset = offset; 429 430 return page_address(page) + offset; 431} 432 433static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) 434{ 435 unsigned long flags; 436 void *data; 437 438 local_irq_save(flags); 439 data = __alloc_page_frag(&netdev_alloc_cache, fragsz, gfp_mask); 440 local_irq_restore(flags); 441 return data; 442} 443 444/** 445 * netdev_alloc_frag - allocate a page fragment 446 * @fragsz: fragment size 447 * 448 * Allocates a frag from a page for receive buffer. 449 * Uses GFP_ATOMIC allocations. 450 */ 451void *netdev_alloc_frag(unsigned int fragsz) 452{ 453 return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD); 454} 455EXPORT_SYMBOL(netdev_alloc_frag); 456 457static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) 458{ 459 return __alloc_page_frag(&napi_alloc_cache, fragsz, gfp_mask); 460} 461 462void *napi_alloc_frag(unsigned int fragsz) 463{ 464 return __napi_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD); 465} 466EXPORT_SYMBOL(napi_alloc_frag); 467 468/** 469 * __alloc_rx_skb - allocate an skbuff for rx 470 * @length: length to allocate 471 * @gfp_mask: get_free_pages mask, passed to alloc_skb 472 * @flags: If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for 473 * allocations in case we have to fallback to __alloc_skb() 474 * If SKB_ALLOC_NAPI is set, page fragment will be allocated 475 * from napi_cache instead of netdev_cache. 476 * 477 * Allocate a new &sk_buff and assign it a usage count of one. The 478 * buffer has unspecified headroom built in. Users should allocate 479 * the headroom they think they need without accounting for the 480 * built in space. The built in space is used for optimisations. 481 * 482 * %NULL is returned if there is no free memory. 483 */ 484static struct sk_buff *__alloc_rx_skb(unsigned int length, gfp_t gfp_mask, 485 int flags) 486{ 487 struct sk_buff *skb = NULL; 488 unsigned int fragsz = SKB_DATA_ALIGN(length) + 489 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 490 491 if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) { 492 void *data; 493 494 if (sk_memalloc_socks()) 495 gfp_mask |= __GFP_MEMALLOC; 496 497 data = (flags & SKB_ALLOC_NAPI) ? 498 __napi_alloc_frag(fragsz, gfp_mask) : 499 __netdev_alloc_frag(fragsz, gfp_mask); 500 501 if (likely(data)) { 502 skb = build_skb(data, fragsz); 503 if (unlikely(!skb)) 504 put_page(virt_to_head_page(data)); 505 } 506 } else { 507 skb = __alloc_skb(length, gfp_mask, 508 SKB_ALLOC_RX, NUMA_NO_NODE); 509 } 510 return skb; 511} 512 513/** 514 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 515 * @dev: network device to receive on 516 * @length: length to allocate 517 * @gfp_mask: get_free_pages mask, passed to alloc_skb 518 * 519 * Allocate a new &sk_buff and assign it a usage count of one. The 520 * buffer has NET_SKB_PAD headroom built in. Users should allocate 521 * the headroom they think they need without accounting for the 522 * built in space. The built in space is used for optimisations. 523 * 524 * %NULL is returned if there is no free memory. 525 */ 526struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 527 unsigned int length, gfp_t gfp_mask) 528{ 529 struct sk_buff *skb; 530 531 length += NET_SKB_PAD; 532 skb = __alloc_rx_skb(length, gfp_mask, 0); 533 534 if (likely(skb)) { 535 skb_reserve(skb, NET_SKB_PAD); 536 skb->dev = dev; 537 } 538 539 return skb; 540} 541EXPORT_SYMBOL(__netdev_alloc_skb); 542 543/** 544 * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance 545 * @napi: napi instance this buffer was allocated for 546 * @length: length to allocate 547 * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages 548 * 549 * Allocate a new sk_buff for use in NAPI receive. This buffer will 550 * attempt to allocate the head from a special reserved region used 551 * only for NAPI Rx allocation. By doing this we can save several 552 * CPU cycles by avoiding having to disable and re-enable IRQs. 553 * 554 * %NULL is returned if there is no free memory. 555 */ 556struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, 557 unsigned int length, gfp_t gfp_mask) 558{ 559 struct sk_buff *skb; 560 561 length += NET_SKB_PAD + NET_IP_ALIGN; 562 skb = __alloc_rx_skb(length, gfp_mask, SKB_ALLOC_NAPI); 563 564 if (likely(skb)) { 565 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); 566 skb->dev = napi->dev; 567 } 568 569 return skb; 570} 571EXPORT_SYMBOL(__napi_alloc_skb); 572 573void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, 574 int size, unsigned int truesize) 575{ 576 skb_fill_page_desc(skb, i, page, off, size); 577 skb->len += size; 578 skb->data_len += size; 579 skb->truesize += truesize; 580} 581EXPORT_SYMBOL(skb_add_rx_frag); 582 583void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, 584 unsigned int truesize) 585{ 586 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 587 588 skb_frag_size_add(frag, size); 589 skb->len += size; 590 skb->data_len += size; 591 skb->truesize += truesize; 592} 593EXPORT_SYMBOL(skb_coalesce_rx_frag); 594 595static void skb_drop_list(struct sk_buff **listp) 596{ 597 kfree_skb_list(*listp); 598 *listp = NULL; 599} 600 601static inline void skb_drop_fraglist(struct sk_buff *skb) 602{ 603 skb_drop_list(&skb_shinfo(skb)->frag_list); 604} 605 606static void skb_clone_fraglist(struct sk_buff *skb) 607{ 608 struct sk_buff *list; 609 610 skb_walk_frags(skb, list) 611 skb_get(list); 612} 613 614static void skb_free_head(struct sk_buff *skb) 615{ 616 if (skb->head_frag) 617 put_page(virt_to_head_page(skb->head)); 618 else 619 kfree(skb->head); 620} 621 622static void skb_release_data(struct sk_buff *skb) 623{ 624 struct skb_shared_info *shinfo = skb_shinfo(skb); 625 int i; 626 627 if (skb->cloned && 628 atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, 629 &shinfo->dataref)) 630 return; 631 632 for (i = 0; i < shinfo->nr_frags; i++) 633 __skb_frag_unref(&shinfo->frags[i]); 634 635 /* 636 * If skb buf is from userspace, we need to notify the caller 637 * the lower device DMA has done; 638 */ 639 if (shinfo->tx_flags & SKBTX_DEV_ZEROCOPY) { 640 struct ubuf_info *uarg; 641 642 uarg = shinfo->destructor_arg; 643 if (uarg->callback) 644 uarg->callback(uarg, true); 645 } 646 647 if (shinfo->frag_list) 648 kfree_skb_list(shinfo->frag_list); 649 650 skb_free_head(skb); 651} 652 653/* 654 * Free an skbuff by memory without cleaning the state. 655 */ 656static void kfree_skbmem(struct sk_buff *skb) 657{ 658 struct sk_buff_fclones *fclones; 659 660 switch (skb->fclone) { 661 case SKB_FCLONE_UNAVAILABLE: 662 kmem_cache_free(skbuff_head_cache, skb); 663 return; 664 665 case SKB_FCLONE_ORIG: 666 fclones = container_of(skb, struct sk_buff_fclones, skb1); 667 668 /* We usually free the clone (TX completion) before original skb 669 * This test would have no chance to be true for the clone, 670 * while here, branch prediction will be good. 671 */ 672 if (atomic_read(&fclones->fclone_ref) == 1) 673 goto fastpath; 674 break; 675 676 default: /* SKB_FCLONE_CLONE */ 677 fclones = container_of(skb, struct sk_buff_fclones, skb2); 678 break; 679 } 680 if (!atomic_dec_and_test(&fclones->fclone_ref)) 681 return; 682fastpath: 683 kmem_cache_free(skbuff_fclone_cache, fclones); 684} 685 686static void skb_release_head_state(struct sk_buff *skb) 687{ 688 skb_dst_drop(skb); 689#ifdef CONFIG_XFRM 690 secpath_put(skb->sp); 691#endif 692 if (skb->destructor) { 693 WARN_ON(in_irq()); 694 skb->destructor(skb); 695 } 696#if IS_ENABLED(CONFIG_NF_CONNTRACK) 697 nf_conntrack_put(skb->nfct); 698#endif 699#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 700 nf_bridge_put(skb->nf_bridge); 701#endif 702} 703 704/* Free everything but the sk_buff shell. */ 705static void skb_release_all(struct sk_buff *skb) 706{ 707 skb_release_head_state(skb); 708 if (likely(skb->head)) 709 skb_release_data(skb); 710} 711 712/** 713 * __kfree_skb - private function 714 * @skb: buffer 715 * 716 * Free an sk_buff. Release anything attached to the buffer. 717 * Clean the state. This is an internal helper function. Users should 718 * always call kfree_skb 719 */ 720 721void __kfree_skb(struct sk_buff *skb) 722{ 723 skb_release_all(skb); 724 kfree_skbmem(skb); 725} 726EXPORT_SYMBOL(__kfree_skb); 727 728/** 729 * kfree_skb - free an sk_buff 730 * @skb: buffer to free 731 * 732 * Drop a reference to the buffer and free it if the usage count has 733 * hit zero. 734 */ 735void kfree_skb(struct sk_buff *skb) 736{ 737 if (unlikely(!skb)) 738 return; 739 if (likely(atomic_read(&skb->users) == 1)) 740 smp_rmb(); 741 else if (likely(!atomic_dec_and_test(&skb->users))) 742 return; 743 trace_kfree_skb(skb, __builtin_return_address(0)); 744 __kfree_skb(skb); 745} 746EXPORT_SYMBOL(kfree_skb); 747 748void kfree_skb_list(struct sk_buff *segs) 749{ 750 while (segs) { 751 struct sk_buff *next = segs->next; 752 753 kfree_skb(segs); 754 segs = next; 755 } 756} 757EXPORT_SYMBOL(kfree_skb_list); 758 759/** 760 * skb_tx_error - report an sk_buff xmit error 761 * @skb: buffer that triggered an error 762 * 763 * Report xmit error if a device callback is tracking this skb. 764 * skb must be freed afterwards. 765 */ 766void skb_tx_error(struct sk_buff *skb) 767{ 768 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 769 struct ubuf_info *uarg; 770 771 uarg = skb_shinfo(skb)->destructor_arg; 772 if (uarg->callback) 773 uarg->callback(uarg, false); 774 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 775 } 776} 777EXPORT_SYMBOL(skb_tx_error); 778 779/** 780 * consume_skb - free an skbuff 781 * @skb: buffer to free 782 * 783 * Drop a ref to the buffer and free it if the usage count has hit zero 784 * Functions identically to kfree_skb, but kfree_skb assumes that the frame 785 * is being dropped after a failure and notes that 786 */ 787void consume_skb(struct sk_buff *skb) 788{ 789 if (unlikely(!skb)) 790 return; 791 if (likely(atomic_read(&skb->users) == 1)) 792 smp_rmb(); 793 else if (likely(!atomic_dec_and_test(&skb->users))) 794 return; 795 trace_consume_skb(skb); 796 __kfree_skb(skb); 797} 798EXPORT_SYMBOL(consume_skb); 799 800/* Make sure a field is enclosed inside headers_start/headers_end section */ 801#define CHECK_SKB_FIELD(field) \ 802 BUILD_BUG_ON(offsetof(struct sk_buff, field) < \ 803 offsetof(struct sk_buff, headers_start)); \ 804 BUILD_BUG_ON(offsetof(struct sk_buff, field) > \ 805 offsetof(struct sk_buff, headers_end)); \ 806 807static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 808{ 809 new->tstamp = old->tstamp; 810 /* We do not copy old->sk */ 811 new->dev = old->dev; 812 memcpy(new->cb, old->cb, sizeof(old->cb)); 813 skb_dst_copy(new, old); 814#ifdef CONFIG_XFRM 815 new->sp = secpath_get(old->sp); 816#endif 817 __nf_copy(new, old, false); 818 819 /* Note : this field could be in headers_start/headers_end section 820 * It is not yet because we do not want to have a 16 bit hole 821 */ 822 new->queue_mapping = old->queue_mapping; 823 824 memcpy(&new->headers_start, &old->headers_start, 825 offsetof(struct sk_buff, headers_end) - 826 offsetof(struct sk_buff, headers_start)); 827 CHECK_SKB_FIELD(protocol); 828 CHECK_SKB_FIELD(csum); 829 CHECK_SKB_FIELD(hash); 830 CHECK_SKB_FIELD(priority); 831 CHECK_SKB_FIELD(skb_iif); 832 CHECK_SKB_FIELD(vlan_proto); 833 CHECK_SKB_FIELD(vlan_tci); 834 CHECK_SKB_FIELD(transport_header); 835 CHECK_SKB_FIELD(network_header); 836 CHECK_SKB_FIELD(mac_header); 837 CHECK_SKB_FIELD(inner_protocol); 838 CHECK_SKB_FIELD(inner_transport_header); 839 CHECK_SKB_FIELD(inner_network_header); 840 CHECK_SKB_FIELD(inner_mac_header); 841 CHECK_SKB_FIELD(mark); 842#ifdef CONFIG_NETWORK_SECMARK 843 CHECK_SKB_FIELD(secmark); 844#endif 845#ifdef CONFIG_NET_RX_BUSY_POLL 846 CHECK_SKB_FIELD(napi_id); 847#endif 848#ifdef CONFIG_XPS 849 CHECK_SKB_FIELD(sender_cpu); 850#endif 851#ifdef CONFIG_NET_SCHED 852 CHECK_SKB_FIELD(tc_index); 853#ifdef CONFIG_NET_CLS_ACT 854 CHECK_SKB_FIELD(tc_verd); 855#endif 856#endif 857 858} 859 860/* 861 * You should not add any new code to this function. Add it to 862 * __copy_skb_header above instead. 863 */ 864static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 865{ 866#define C(x) n->x = skb->x 867 868 n->next = n->prev = NULL; 869 n->sk = NULL; 870 __copy_skb_header(n, skb); 871 872 C(len); 873 C(data_len); 874 C(mac_len); 875 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 876 n->cloned = 1; 877 n->nohdr = 0; 878 n->destructor = NULL; 879 C(tail); 880 C(end); 881 C(head); 882 C(head_frag); 883 C(data); 884 C(truesize); 885 atomic_set(&n->users, 1); 886 887 atomic_inc(&(skb_shinfo(skb)->dataref)); 888 skb->cloned = 1; 889 890 return n; 891#undef C 892} 893 894/** 895 * skb_morph - morph one skb into another 896 * @dst: the skb to receive the contents 897 * @src: the skb to supply the contents 898 * 899 * This is identical to skb_clone except that the target skb is 900 * supplied by the user. 901 * 902 * The target skb is returned upon exit. 903 */ 904struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 905{ 906 skb_release_all(dst); 907 return __skb_clone(dst, src); 908} 909EXPORT_SYMBOL_GPL(skb_morph); 910 911/** 912 * skb_copy_ubufs - copy userspace skb frags buffers to kernel 913 * @skb: the skb to modify 914 * @gfp_mask: allocation priority 915 * 916 * This must be called on SKBTX_DEV_ZEROCOPY skb. 917 * It will copy all frags into kernel and drop the reference 918 * to userspace pages. 919 * 920 * If this function is called from an interrupt gfp_mask() must be 921 * %GFP_ATOMIC. 922 * 923 * Returns 0 on success or a negative error code on failure 924 * to allocate kernel memory to copy to. 925 */ 926int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 927{ 928 int i; 929 int num_frags = skb_shinfo(skb)->nr_frags; 930 struct page *page, *head = NULL; 931 struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg; 932 933 for (i = 0; i < num_frags; i++) { 934 u8 *vaddr; 935 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 936 937 page = alloc_page(gfp_mask); 938 if (!page) { 939 while (head) { 940 struct page *next = (struct page *)page_private(head); 941 put_page(head); 942 head = next; 943 } 944 return -ENOMEM; 945 } 946 vaddr = kmap_atomic(skb_frag_page(f)); 947 memcpy(page_address(page), 948 vaddr + f->page_offset, skb_frag_size(f)); 949 kunmap_atomic(vaddr); 950 set_page_private(page, (unsigned long)head); 951 head = page; 952 } 953 954 /* skb frags release userspace buffers */ 955 for (i = 0; i < num_frags; i++) 956 skb_frag_unref(skb, i); 957 958 uarg->callback(uarg, false); 959 960 /* skb frags point to kernel buffers */ 961 for (i = num_frags - 1; i >= 0; i--) { 962 __skb_fill_page_desc(skb, i, head, 0, 963 skb_shinfo(skb)->frags[i].size); 964 head = (struct page *)page_private(head); 965 } 966 967 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 968 return 0; 969} 970EXPORT_SYMBOL_GPL(skb_copy_ubufs); 971 972/** 973 * skb_clone - duplicate an sk_buff 974 * @skb: buffer to clone 975 * @gfp_mask: allocation priority 976 * 977 * Duplicate an &sk_buff. The new one is not owned by a socket. Both 978 * copies share the same packet data but not structure. The new 979 * buffer has a reference count of 1. If the allocation fails the 980 * function returns %NULL otherwise the new buffer is returned. 981 * 982 * If this function is called from an interrupt gfp_mask() must be 983 * %GFP_ATOMIC. 984 */ 985 986struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 987{ 988 struct sk_buff_fclones *fclones = container_of(skb, 989 struct sk_buff_fclones, 990 skb1); 991 struct sk_buff *n; 992 993 if (skb_orphan_frags(skb, gfp_mask)) 994 return NULL; 995 996 if (skb->fclone == SKB_FCLONE_ORIG && 997 atomic_read(&fclones->fclone_ref) == 1) { 998 n = &fclones->skb2; 999 atomic_set(&fclones->fclone_ref, 2); 1000 } else { 1001 if (skb_pfmemalloc(skb)) 1002 gfp_mask |= __GFP_MEMALLOC; 1003 1004 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 1005 if (!n) 1006 return NULL; 1007 1008 kmemcheck_annotate_bitfield(n, flags1); 1009 n->fclone = SKB_FCLONE_UNAVAILABLE; 1010 } 1011 1012 return __skb_clone(n, skb); 1013} 1014EXPORT_SYMBOL(skb_clone); 1015 1016static void skb_headers_offset_update(struct sk_buff *skb, int off) 1017{ 1018 /* Only adjust this if it actually is csum_start rather than csum */ 1019 if (skb->ip_summed == CHECKSUM_PARTIAL) 1020 skb->csum_start += off; 1021 /* {transport,network,mac}_header and tail are relative to skb->head */ 1022 skb->transport_header += off; 1023 skb->network_header += off; 1024 if (skb_mac_header_was_set(skb)) 1025 skb->mac_header += off; 1026 skb->inner_transport_header += off; 1027 skb->inner_network_header += off; 1028 skb->inner_mac_header += off; 1029} 1030 1031static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 1032{ 1033 __copy_skb_header(new, old); 1034 1035 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 1036 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 1037 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 1038} 1039 1040static inline int skb_alloc_rx_flag(const struct sk_buff *skb) 1041{ 1042 if (skb_pfmemalloc(skb)) 1043 return SKB_ALLOC_RX; 1044 return 0; 1045} 1046 1047/** 1048 * skb_copy - create private copy of an sk_buff 1049 * @skb: buffer to copy 1050 * @gfp_mask: allocation priority 1051 * 1052 * Make a copy of both an &sk_buff and its data. This is used when the 1053 * caller wishes to modify the data and needs a private copy of the 1054 * data to alter. Returns %NULL on failure or the pointer to the buffer 1055 * on success. The returned buffer has a reference count of 1. 1056 * 1057 * As by-product this function converts non-linear &sk_buff to linear 1058 * one, so that &sk_buff becomes completely private and caller is allowed 1059 * to modify all the data of returned buffer. This means that this 1060 * function is not recommended for use in circumstances when only 1061 * header is going to be modified. Use pskb_copy() instead. 1062 */ 1063 1064struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 1065{ 1066 int headerlen = skb_headroom(skb); 1067 unsigned int size = skb_end_offset(skb) + skb->data_len; 1068 struct sk_buff *n = __alloc_skb(size, gfp_mask, 1069 skb_alloc_rx_flag(skb), NUMA_NO_NODE); 1070 1071 if (!n) 1072 return NULL; 1073 1074 /* Set the data pointer */ 1075 skb_reserve(n, headerlen); 1076 /* Set the tail pointer and length */ 1077 skb_put(n, skb->len); 1078 1079 if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) 1080 BUG(); 1081 1082 copy_skb_header(n, skb); 1083 return n; 1084} 1085EXPORT_SYMBOL(skb_copy); 1086 1087/** 1088 * __pskb_copy_fclone - create copy of an sk_buff with private head. 1089 * @skb: buffer to copy 1090 * @headroom: headroom of new skb 1091 * @gfp_mask: allocation priority 1092 * @fclone: if true allocate the copy of the skb from the fclone 1093 * cache instead of the head cache; it is recommended to set this 1094 * to true for the cases where the copy will likely be cloned 1095 * 1096 * Make a copy of both an &sk_buff and part of its data, located 1097 * in header. Fragmented data remain shared. This is used when 1098 * the caller wishes to modify only header of &sk_buff and needs 1099 * private copy of the header to alter. Returns %NULL on failure 1100 * or the pointer to the buffer on success. 1101 * The returned buffer has a reference count of 1. 1102 */ 1103 1104struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, 1105 gfp_t gfp_mask, bool fclone) 1106{ 1107 unsigned int size = skb_headlen(skb) + headroom; 1108 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); 1109 struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE); 1110 1111 if (!n) 1112 goto out; 1113 1114 /* Set the data pointer */ 1115 skb_reserve(n, headroom); 1116 /* Set the tail pointer and length */ 1117 skb_put(n, skb_headlen(skb)); 1118 /* Copy the bytes */ 1119 skb_copy_from_linear_data(skb, n->data, n->len); 1120 1121 n->truesize += skb->data_len; 1122 n->data_len = skb->data_len; 1123 n->len = skb->len; 1124 1125 if (skb_shinfo(skb)->nr_frags) { 1126 int i; 1127 1128 if (skb_orphan_frags(skb, gfp_mask)) { 1129 kfree_skb(n); 1130 n = NULL; 1131 goto out; 1132 } 1133 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1134 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 1135 skb_frag_ref(skb, i); 1136 } 1137 skb_shinfo(n)->nr_frags = i; 1138 } 1139 1140 if (skb_has_frag_list(skb)) { 1141 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 1142 skb_clone_fraglist(n); 1143 } 1144 1145 copy_skb_header(n, skb); 1146out: 1147 return n; 1148} 1149EXPORT_SYMBOL(__pskb_copy_fclone); 1150 1151/** 1152 * pskb_expand_head - reallocate header of &sk_buff 1153 * @skb: buffer to reallocate 1154 * @nhead: room to add at head 1155 * @ntail: room to add at tail 1156 * @gfp_mask: allocation priority 1157 * 1158 * Expands (or creates identical copy, if @nhead and @ntail are zero) 1159 * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have 1160 * reference count of 1. Returns zero in the case of success or error, 1161 * if expansion failed. In the last case, &sk_buff is not changed. 1162 * 1163 * All the pointers pointing into skb header may change and must be 1164 * reloaded after call to this function. 1165 */ 1166 1167int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 1168 gfp_t gfp_mask) 1169{ 1170 int i; 1171 u8 *data; 1172 int size = nhead + skb_end_offset(skb) + ntail; 1173 long off; 1174 1175 BUG_ON(nhead < 0); 1176 1177 if (skb_shared(skb)) 1178 BUG(); 1179 1180 size = SKB_DATA_ALIGN(size); 1181 1182 if (skb_pfmemalloc(skb)) 1183 gfp_mask |= __GFP_MEMALLOC; 1184 data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 1185 gfp_mask, NUMA_NO_NODE, NULL); 1186 if (!data) 1187 goto nodata; 1188 size = SKB_WITH_OVERHEAD(ksize(data)); 1189 1190 /* Copy only real data... and, alas, header. This should be 1191 * optimized for the cases when header is void. 1192 */ 1193 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); 1194 1195 memcpy((struct skb_shared_info *)(data + size), 1196 skb_shinfo(skb), 1197 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 1198 1199 /* 1200 * if shinfo is shared we must drop the old head gracefully, but if it 1201 * is not we can just drop the old head and let the existing refcount 1202 * be since all we did is relocate the values 1203 */ 1204 if (skb_cloned(skb)) { 1205 /* copy this zero copy skb frags */ 1206 if (skb_orphan_frags(skb, gfp_mask)) 1207 goto nofrags; 1208 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1209 skb_frag_ref(skb, i); 1210 1211 if (skb_has_frag_list(skb)) 1212 skb_clone_fraglist(skb); 1213 1214 skb_release_data(skb); 1215 } else { 1216 skb_free_head(skb); 1217 } 1218 off = (data + nhead) - skb->head; 1219 1220 skb->head = data; 1221 skb->head_frag = 0; 1222 skb->data += off; 1223#ifdef NET_SKBUFF_DATA_USES_OFFSET 1224 skb->end = size; 1225 off = nhead; 1226#else 1227 skb->end = skb->head + size; 1228#endif 1229 skb->tail += off; 1230 skb_headers_offset_update(skb, nhead); 1231 skb->cloned = 0; 1232 skb->hdr_len = 0; 1233 skb->nohdr = 0; 1234 atomic_set(&skb_shinfo(skb)->dataref, 1); 1235 return 0; 1236 1237nofrags: 1238 kfree(data); 1239nodata: 1240 return -ENOMEM; 1241} 1242EXPORT_SYMBOL(pskb_expand_head); 1243 1244/* Make private copy of skb with writable head and some headroom */ 1245 1246struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 1247{ 1248 struct sk_buff *skb2; 1249 int delta = headroom - skb_headroom(skb); 1250 1251 if (delta <= 0) 1252 skb2 = pskb_copy(skb, GFP_ATOMIC); 1253 else { 1254 skb2 = skb_clone(skb, GFP_ATOMIC); 1255 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 1256 GFP_ATOMIC)) { 1257 kfree_skb(skb2); 1258 skb2 = NULL; 1259 } 1260 } 1261 return skb2; 1262} 1263EXPORT_SYMBOL(skb_realloc_headroom); 1264 1265/** 1266 * skb_copy_expand - copy and expand sk_buff 1267 * @skb: buffer to copy 1268 * @newheadroom: new free bytes at head 1269 * @newtailroom: new free bytes at tail 1270 * @gfp_mask: allocation priority 1271 * 1272 * Make a copy of both an &sk_buff and its data and while doing so 1273 * allocate additional space. 1274 * 1275 * This is used when the caller wishes to modify the data and needs a 1276 * private copy of the data to alter as well as more space for new fields. 1277 * Returns %NULL on failure or the pointer to the buffer 1278 * on success. The returned buffer has a reference count of 1. 1279 * 1280 * You must pass %GFP_ATOMIC as the allocation priority if this function 1281 * is called from an interrupt. 1282 */ 1283struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 1284 int newheadroom, int newtailroom, 1285 gfp_t gfp_mask) 1286{ 1287 /* 1288 * Allocate the copy buffer 1289 */ 1290 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, 1291 gfp_mask, skb_alloc_rx_flag(skb), 1292 NUMA_NO_NODE); 1293 int oldheadroom = skb_headroom(skb); 1294 int head_copy_len, head_copy_off; 1295 1296 if (!n) 1297 return NULL; 1298 1299 skb_reserve(n, newheadroom); 1300 1301 /* Set the tail pointer and length */ 1302 skb_put(n, skb->len); 1303 1304 head_copy_len = oldheadroom; 1305 head_copy_off = 0; 1306 if (newheadroom <= head_copy_len) 1307 head_copy_len = newheadroom; 1308 else 1309 head_copy_off = newheadroom - head_copy_len; 1310 1311 /* Copy the linear header and data. */ 1312 if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 1313 skb->len + head_copy_len)) 1314 BUG(); 1315 1316 copy_skb_header(n, skb); 1317 1318 skb_headers_offset_update(n, newheadroom - oldheadroom); 1319 1320 return n; 1321} 1322EXPORT_SYMBOL(skb_copy_expand); 1323 1324/** 1325 * skb_pad - zero pad the tail of an skb 1326 * @skb: buffer to pad 1327 * @pad: space to pad 1328 * 1329 * Ensure that a buffer is followed by a padding area that is zero 1330 * filled. Used by network drivers which may DMA or transfer data 1331 * beyond the buffer end onto the wire. 1332 * 1333 * May return error in out of memory cases. The skb is freed on error. 1334 */ 1335 1336int skb_pad(struct sk_buff *skb, int pad) 1337{ 1338 int err; 1339 int ntail; 1340 1341 /* If the skbuff is non linear tailroom is always zero.. */ 1342 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 1343 memset(skb->data+skb->len, 0, pad); 1344 return 0; 1345 } 1346 1347 ntail = skb->data_len + pad - (skb->end - skb->tail); 1348 if (likely(skb_cloned(skb) || ntail > 0)) { 1349 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 1350 if (unlikely(err)) 1351 goto free_skb; 1352 } 1353 1354 /* FIXME: The use of this function with non-linear skb's really needs 1355 * to be audited. 1356 */ 1357 err = skb_linearize(skb); 1358 if (unlikely(err)) 1359 goto free_skb; 1360 1361 memset(skb->data + skb->len, 0, pad); 1362 return 0; 1363 1364free_skb: 1365 kfree_skb(skb); 1366 return err; 1367} 1368EXPORT_SYMBOL(skb_pad); 1369 1370/** 1371 * pskb_put - add data to the tail of a potentially fragmented buffer 1372 * @skb: start of the buffer to use 1373 * @tail: tail fragment of the buffer to use 1374 * @len: amount of data to add 1375 * 1376 * This function extends the used data area of the potentially 1377 * fragmented buffer. @tail must be the last fragment of @skb -- or 1378 * @skb itself. If this would exceed the total buffer size the kernel 1379 * will panic. A pointer to the first byte of the extra data is 1380 * returned. 1381 */ 1382 1383unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) 1384{ 1385 if (tail != skb) { 1386 skb->data_len += len; 1387 skb->len += len; 1388 } 1389 return skb_put(tail, len); 1390} 1391EXPORT_SYMBOL_GPL(pskb_put); 1392 1393/** 1394 * skb_put - add data to a buffer 1395 * @skb: buffer to use 1396 * @len: amount of data to add 1397 * 1398 * This function extends the used data area of the buffer. If this would 1399 * exceed the total buffer size the kernel will panic. A pointer to the 1400 * first byte of the extra data is returned. 1401 */ 1402unsigned char *skb_put(struct sk_buff *skb, unsigned int len) 1403{ 1404 unsigned char *tmp = skb_tail_pointer(skb); 1405 SKB_LINEAR_ASSERT(skb); 1406 skb->tail += len; 1407 skb->len += len; 1408 if (unlikely(skb->tail > skb->end)) 1409 skb_over_panic(skb, len, __builtin_return_address(0)); 1410 return tmp; 1411} 1412EXPORT_SYMBOL(skb_put); 1413 1414/** 1415 * skb_push - add data to the start of a buffer 1416 * @skb: buffer to use 1417 * @len: amount of data to add 1418 * 1419 * This function extends the used data area of the buffer at the buffer 1420 * start. If this would exceed the total buffer headroom the kernel will 1421 * panic. A pointer to the first byte of the extra data is returned. 1422 */ 1423unsigned char *skb_push(struct sk_buff *skb, unsigned int len) 1424{ 1425 skb->data -= len; 1426 skb->len += len; 1427 if (unlikely(skb->data<skb->head)) 1428 skb_under_panic(skb, len, __builtin_return_address(0)); 1429 return skb->data; 1430} 1431EXPORT_SYMBOL(skb_push); 1432 1433/** 1434 * skb_pull - remove data from the start of a buffer 1435 * @skb: buffer to use 1436 * @len: amount of data to remove 1437 * 1438 * This function removes data from the start of a buffer, returning 1439 * the memory to the headroom. A pointer to the next data in the buffer 1440 * is returned. Once the data has been pulled future pushes will overwrite 1441 * the old data. 1442 */ 1443unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) 1444{ 1445 return skb_pull_inline(skb, len); 1446} 1447EXPORT_SYMBOL(skb_pull); 1448 1449/** 1450 * skb_trim - remove end from a buffer 1451 * @skb: buffer to alter 1452 * @len: new length 1453 * 1454 * Cut the length of a buffer down by removing data from the tail. If 1455 * the buffer is already under the length specified it is not modified. 1456 * The skb must be linear. 1457 */ 1458void skb_trim(struct sk_buff *skb, unsigned int len) 1459{ 1460 if (skb->len > len) 1461 __skb_trim(skb, len); 1462} 1463EXPORT_SYMBOL(skb_trim); 1464 1465/* Trims skb to length len. It can change skb pointers. 1466 */ 1467 1468int ___pskb_trim(struct sk_buff *skb, unsigned int len) 1469{ 1470 struct sk_buff **fragp; 1471 struct sk_buff *frag; 1472 int offset = skb_headlen(skb); 1473 int nfrags = skb_shinfo(skb)->nr_frags; 1474 int i; 1475 int err; 1476 1477 if (skb_cloned(skb) && 1478 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 1479 return err; 1480 1481 i = 0; 1482 if (offset >= len) 1483 goto drop_pages; 1484 1485 for (; i < nfrags; i++) { 1486 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); 1487 1488 if (end < len) { 1489 offset = end; 1490 continue; 1491 } 1492 1493 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); 1494 1495drop_pages: 1496 skb_shinfo(skb)->nr_frags = i; 1497 1498 for (; i < nfrags; i++) 1499 skb_frag_unref(skb, i); 1500 1501 if (skb_has_frag_list(skb)) 1502 skb_drop_fraglist(skb); 1503 goto done; 1504 } 1505 1506 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 1507 fragp = &frag->next) { 1508 int end = offset + frag->len; 1509 1510 if (skb_shared(frag)) { 1511 struct sk_buff *nfrag; 1512 1513 nfrag = skb_clone(frag, GFP_ATOMIC); 1514 if (unlikely(!nfrag)) 1515 return -ENOMEM; 1516 1517 nfrag->next = frag->next; 1518 consume_skb(frag); 1519 frag = nfrag; 1520 *fragp = frag; 1521 } 1522 1523 if (end < len) { 1524 offset = end; 1525 continue; 1526 } 1527 1528 if (end > len && 1529 unlikely((err = pskb_trim(frag, len - offset)))) 1530 return err; 1531 1532 if (frag->next) 1533 skb_drop_list(&frag->next); 1534 break; 1535 } 1536 1537done: 1538 if (len > skb_headlen(skb)) { 1539 skb->data_len -= skb->len - len; 1540 skb->len = len; 1541 } else { 1542 skb->len = len; 1543 skb->data_len = 0; 1544 skb_set_tail_pointer(skb, len); 1545 } 1546 1547 return 0; 1548} 1549EXPORT_SYMBOL(___pskb_trim); 1550 1551/** 1552 * __pskb_pull_tail - advance tail of skb header 1553 * @skb: buffer to reallocate 1554 * @delta: number of bytes to advance tail 1555 * 1556 * The function makes a sense only on a fragmented &sk_buff, 1557 * it expands header moving its tail forward and copying necessary 1558 * data from fragmented part. 1559 * 1560 * &sk_buff MUST have reference count of 1. 1561 * 1562 * Returns %NULL (and &sk_buff does not change) if pull failed 1563 * or value of new tail of skb in the case of success. 1564 * 1565 * All the pointers pointing into skb header may change and must be 1566 * reloaded after call to this function. 1567 */ 1568 1569/* Moves tail of skb head forward, copying data from fragmented part, 1570 * when it is necessary. 1571 * 1. It may fail due to malloc failure. 1572 * 2. It may change skb pointers. 1573 * 1574 * It is pretty complicated. Luckily, it is called only in exceptional cases. 1575 */ 1576unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) 1577{ 1578 /* If skb has not enough free space at tail, get new one 1579 * plus 128 bytes for future expansions. If we have enough 1580 * room at tail, reallocate without expansion only if skb is cloned. 1581 */ 1582 int i, k, eat = (skb->tail + delta) - skb->end; 1583 1584 if (eat > 0 || skb_cloned(skb)) { 1585 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 1586 GFP_ATOMIC)) 1587 return NULL; 1588 } 1589 1590 if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta)) 1591 BUG(); 1592 1593 /* Optimization: no fragments, no reasons to preestimate 1594 * size of pulled pages. Superb. 1595 */ 1596 if (!skb_has_frag_list(skb)) 1597 goto pull_pages; 1598 1599 /* Estimate size of pulled pages. */ 1600 eat = delta; 1601 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1602 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 1603 1604 if (size >= eat) 1605 goto pull_pages; 1606 eat -= size; 1607 } 1608 1609 /* If we need update frag list, we are in troubles. 1610 * Certainly, it possible to add an offset to skb data, 1611 * but taking into account that pulling is expected to 1612 * be very rare operation, it is worth to fight against 1613 * further bloating skb head and crucify ourselves here instead. 1614 * Pure masohism, indeed. 8)8) 1615 */ 1616 if (eat) { 1617 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1618 struct sk_buff *clone = NULL; 1619 struct sk_buff *insp = NULL; 1620 1621 do { 1622 BUG_ON(!list); 1623 1624 if (list->len <= eat) { 1625 /* Eaten as whole. */ 1626 eat -= list->len; 1627 list = list->next; 1628 insp = list; 1629 } else { 1630 /* Eaten partially. */ 1631 1632 if (skb_shared(list)) { 1633 /* Sucks! We need to fork list. :-( */ 1634 clone = skb_clone(list, GFP_ATOMIC); 1635 if (!clone) 1636 return NULL; 1637 insp = list->next; 1638 list = clone; 1639 } else { 1640 /* This may be pulled without 1641 * problems. */ 1642 insp = list; 1643 } 1644 if (!pskb_pull(list, eat)) { 1645 kfree_skb(clone); 1646 return NULL; 1647 } 1648 break; 1649 } 1650 } while (eat); 1651 1652 /* Free pulled out fragments. */ 1653 while ((list = skb_shinfo(skb)->frag_list) != insp) { 1654 skb_shinfo(skb)->frag_list = list->next; 1655 kfree_skb(list); 1656 } 1657 /* And insert new clone at head. */ 1658 if (clone) { 1659 clone->next = list; 1660 skb_shinfo(skb)->frag_list = clone; 1661 } 1662 } 1663 /* Success! Now we may commit changes to skb data. */ 1664 1665pull_pages: 1666 eat = delta; 1667 k = 0; 1668 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1669 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 1670 1671 if (size <= eat) { 1672 skb_frag_unref(skb, i); 1673 eat -= size; 1674 } else { 1675 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 1676 if (eat) { 1677 skb_shinfo(skb)->frags[k].page_offset += eat; 1678 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat); 1679 eat = 0; 1680 } 1681 k++; 1682 } 1683 } 1684 skb_shinfo(skb)->nr_frags = k; 1685 1686 skb->tail += delta; 1687 skb->data_len -= delta; 1688 1689 return skb_tail_pointer(skb); 1690} 1691EXPORT_SYMBOL(__pskb_pull_tail); 1692 1693/** 1694 * skb_copy_bits - copy bits from skb to kernel buffer 1695 * @skb: source skb 1696 * @offset: offset in source 1697 * @to: destination buffer 1698 * @len: number of bytes to copy 1699 * 1700 * Copy the specified number of bytes from the source skb to the 1701 * destination buffer. 1702 * 1703 * CAUTION ! : 1704 * If its prototype is ever changed, 1705 * check arch/{*}/net/{*}.S files, 1706 * since it is called from BPF assembly code. 1707 */ 1708int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 1709{ 1710 int start = skb_headlen(skb); 1711 struct sk_buff *frag_iter; 1712 int i, copy; 1713 1714 if (offset > (int)skb->len - len) 1715 goto fault; 1716 1717 /* Copy header. */ 1718 if ((copy = start - offset) > 0) { 1719 if (copy > len) 1720 copy = len; 1721 skb_copy_from_linear_data_offset(skb, offset, to, copy); 1722 if ((len -= copy) == 0) 1723 return 0; 1724 offset += copy; 1725 to += copy; 1726 } 1727 1728 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1729 int end; 1730 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 1731 1732 WARN_ON(start > offset + len); 1733 1734 end = start + skb_frag_size(f); 1735 if ((copy = end - offset) > 0) { 1736 u8 *vaddr; 1737 1738 if (copy > len) 1739 copy = len; 1740 1741 vaddr = kmap_atomic(skb_frag_page(f)); 1742 memcpy(to, 1743 vaddr + f->page_offset + offset - start, 1744 copy); 1745 kunmap_atomic(vaddr); 1746 1747 if ((len -= copy) == 0) 1748 return 0; 1749 offset += copy; 1750 to += copy; 1751 } 1752 start = end; 1753 } 1754 1755 skb_walk_frags(skb, frag_iter) { 1756 int end; 1757 1758 WARN_ON(start > offset + len); 1759 1760 end = start + frag_iter->len; 1761 if ((copy = end - offset) > 0) { 1762 if (copy > len) 1763 copy = len; 1764 if (skb_copy_bits(frag_iter, offset - start, to, copy)) 1765 goto fault; 1766 if ((len -= copy) == 0) 1767 return 0; 1768 offset += copy; 1769 to += copy; 1770 } 1771 start = end; 1772 } 1773 1774 if (!len) 1775 return 0; 1776 1777fault: 1778 return -EFAULT; 1779} 1780EXPORT_SYMBOL(skb_copy_bits); 1781 1782/* 1783 * Callback from splice_to_pipe(), if we need to release some pages 1784 * at the end of the spd in case we error'ed out in filling the pipe. 1785 */ 1786static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 1787{ 1788 put_page(spd->pages[i]); 1789} 1790 1791static struct page *linear_to_page(struct page *page, unsigned int *len, 1792 unsigned int *offset, 1793 struct sock *sk) 1794{ 1795 struct page_frag *pfrag = sk_page_frag(sk); 1796 1797 if (!sk_page_frag_refill(sk, pfrag)) 1798 return NULL; 1799 1800 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset); 1801 1802 memcpy(page_address(pfrag->page) + pfrag->offset, 1803 page_address(page) + *offset, *len); 1804 *offset = pfrag->offset; 1805 pfrag->offset += *len; 1806 1807 return pfrag->page; 1808} 1809 1810static bool spd_can_coalesce(const struct splice_pipe_desc *spd, 1811 struct page *page, 1812 unsigned int offset) 1813{ 1814 return spd->nr_pages && 1815 spd->pages[spd->nr_pages - 1] == page && 1816 (spd->partial[spd->nr_pages - 1].offset + 1817 spd->partial[spd->nr_pages - 1].len == offset); 1818} 1819 1820/* 1821 * Fill page/offset/length into spd, if it can hold more pages. 1822 */ 1823static bool spd_fill_page(struct splice_pipe_desc *spd, 1824 struct pipe_inode_info *pipe, struct page *page, 1825 unsigned int *len, unsigned int offset, 1826 bool linear, 1827 struct sock *sk) 1828{ 1829 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) 1830 return true; 1831 1832 if (linear) { 1833 page = linear_to_page(page, len, &offset, sk); 1834 if (!page) 1835 return true; 1836 } 1837 if (spd_can_coalesce(spd, page, offset)) { 1838 spd->partial[spd->nr_pages - 1].len += *len; 1839 return false; 1840 } 1841 get_page(page); 1842 spd->pages[spd->nr_pages] = page; 1843 spd->partial[spd->nr_pages].len = *len; 1844 spd->partial[spd->nr_pages].offset = offset; 1845 spd->nr_pages++; 1846 1847 return false; 1848} 1849 1850static bool __splice_segment(struct page *page, unsigned int poff, 1851 unsigned int plen, unsigned int *off, 1852 unsigned int *len, 1853 struct splice_pipe_desc *spd, bool linear, 1854 struct sock *sk, 1855 struct pipe_inode_info *pipe) 1856{ 1857 if (!*len) 1858 return true; 1859 1860 /* skip this segment if already processed */ 1861 if (*off >= plen) { 1862 *off -= plen; 1863 return false; 1864 } 1865 1866 /* ignore any bits we already processed */ 1867 poff += *off; 1868 plen -= *off; 1869 *off = 0; 1870 1871 do { 1872 unsigned int flen = min(*len, plen); 1873 1874 if (spd_fill_page(spd, pipe, page, &flen, poff, 1875 linear, sk)) 1876 return true; 1877 poff += flen; 1878 plen -= flen; 1879 *len -= flen; 1880 } while (*len && plen); 1881 1882 return false; 1883} 1884 1885/* 1886 * Map linear and fragment data from the skb to spd. It reports true if the 1887 * pipe is full or if we already spliced the requested length. 1888 */ 1889static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, 1890 unsigned int *offset, unsigned int *len, 1891 struct splice_pipe_desc *spd, struct sock *sk) 1892{ 1893 int seg; 1894 1895 /* map the linear part : 1896 * If skb->head_frag is set, this 'linear' part is backed by a 1897 * fragment, and if the head is not shared with any clones then 1898 * we can avoid a copy since we own the head portion of this page. 1899 */ 1900 if (__splice_segment(virt_to_page(skb->data), 1901 (unsigned long) skb->data & (PAGE_SIZE - 1), 1902 skb_headlen(skb), 1903 offset, len, spd, 1904 skb_head_is_locked(skb), 1905 sk, pipe)) 1906 return true; 1907 1908 /* 1909 * then map the fragments 1910 */ 1911 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 1912 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 1913 1914 if (__splice_segment(skb_frag_page(f), 1915 f->page_offset, skb_frag_size(f), 1916 offset, len, spd, false, sk, pipe)) 1917 return true; 1918 } 1919 1920 return false; 1921} 1922 1923/* 1924 * Map data from the skb to a pipe. Should handle both the linear part, 1925 * the fragments, and the frag list. It does NOT handle frag lists within 1926 * the frag list, if such a thing exists. We'd probably need to recurse to 1927 * handle that cleanly. 1928 */ 1929int skb_splice_bits(struct sk_buff *skb, unsigned int offset, 1930 struct pipe_inode_info *pipe, unsigned int tlen, 1931 unsigned int flags) 1932{ 1933 struct partial_page partial[MAX_SKB_FRAGS]; 1934 struct page *pages[MAX_SKB_FRAGS]; 1935 struct splice_pipe_desc spd = { 1936 .pages = pages, 1937 .partial = partial, 1938 .nr_pages_max = MAX_SKB_FRAGS, 1939 .flags = flags, 1940 .ops = &nosteal_pipe_buf_ops, 1941 .spd_release = sock_spd_release, 1942 }; 1943 struct sk_buff *frag_iter; 1944 struct sock *sk = skb->sk; 1945 int ret = 0; 1946 1947 /* 1948 * __skb_splice_bits() only fails if the output has no room left, 1949 * so no point in going over the frag_list for the error case. 1950 */ 1951 if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk)) 1952 goto done; 1953 else if (!tlen) 1954 goto done; 1955 1956 /* 1957 * now see if we have a frag_list to map 1958 */ 1959 skb_walk_frags(skb, frag_iter) { 1960 if (!tlen) 1961 break; 1962 if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk)) 1963 break; 1964 } 1965 1966done: 1967 if (spd.nr_pages) { 1968 /* 1969 * Drop the socket lock, otherwise we have reverse 1970 * locking dependencies between sk_lock and i_mutex 1971 * here as compared to sendfile(). We enter here 1972 * with the socket lock held, and splice_to_pipe() will 1973 * grab the pipe inode lock. For sendfile() emulation, 1974 * we call into ->sendpage() with the i_mutex lock held 1975 * and networking will grab the socket lock. 1976 */ 1977 release_sock(sk); 1978 ret = splice_to_pipe(pipe, &spd); 1979 lock_sock(sk); 1980 } 1981 1982 return ret; 1983} 1984 1985/** 1986 * skb_store_bits - store bits from kernel buffer to skb 1987 * @skb: destination buffer 1988 * @offset: offset in destination 1989 * @from: source buffer 1990 * @len: number of bytes to copy 1991 * 1992 * Copy the specified number of bytes from the source buffer to the 1993 * destination skb. This function handles all the messy bits of 1994 * traversing fragment lists and such. 1995 */ 1996 1997int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 1998{ 1999 int start = skb_headlen(skb); 2000 struct sk_buff *frag_iter; 2001 int i, copy; 2002 2003 if (offset > (int)skb->len - len) 2004 goto fault; 2005 2006 if ((copy = start - offset) > 0) { 2007 if (copy > len) 2008 copy = len; 2009 skb_copy_to_linear_data_offset(skb, offset, from, copy); 2010 if ((len -= copy) == 0) 2011 return 0; 2012 offset += copy; 2013 from += copy; 2014 } 2015 2016 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2017 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2018 int end; 2019 2020 WARN_ON(start > offset + len); 2021 2022 end = start + skb_frag_size(frag); 2023 if ((copy = end - offset) > 0) { 2024 u8 *vaddr; 2025 2026 if (copy > len) 2027 copy = len; 2028 2029 vaddr = kmap_atomic(skb_frag_page(frag)); 2030 memcpy(vaddr + frag->page_offset + offset - start, 2031 from, copy); 2032 kunmap_atomic(vaddr); 2033 2034 if ((len -= copy) == 0) 2035 return 0; 2036 offset += copy; 2037 from += copy; 2038 } 2039 start = end; 2040 } 2041 2042 skb_walk_frags(skb, frag_iter) { 2043 int end; 2044 2045 WARN_ON(start > offset + len); 2046 2047 end = start + frag_iter->len; 2048 if ((copy = end - offset) > 0) { 2049 if (copy > len) 2050 copy = len; 2051 if (skb_store_bits(frag_iter, offset - start, 2052 from, copy)) 2053 goto fault; 2054 if ((len -= copy) == 0) 2055 return 0; 2056 offset += copy; 2057 from += copy; 2058 } 2059 start = end; 2060 } 2061 if (!len) 2062 return 0; 2063 2064fault: 2065 return -EFAULT; 2066} 2067EXPORT_SYMBOL(skb_store_bits); 2068 2069/* Checksum skb data. */ 2070__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, 2071 __wsum csum, const struct skb_checksum_ops *ops) 2072{ 2073 int start = skb_headlen(skb); 2074 int i, copy = start - offset; 2075 struct sk_buff *frag_iter; 2076 int pos = 0; 2077 2078 /* Checksum header. */ 2079 if (copy > 0) { 2080 if (copy > len) 2081 copy = len; 2082 csum = ops->update(skb->data + offset, copy, csum); 2083 if ((len -= copy) == 0) 2084 return csum; 2085 offset += copy; 2086 pos = copy; 2087 } 2088 2089 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2090 int end; 2091 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2092 2093 WARN_ON(start > offset + len); 2094 2095 end = start + skb_frag_size(frag); 2096 if ((copy = end - offset) > 0) { 2097 __wsum csum2; 2098 u8 *vaddr; 2099 2100 if (copy > len) 2101 copy = len; 2102 vaddr = kmap_atomic(skb_frag_page(frag)); 2103 csum2 = ops->update(vaddr + frag->page_offset + 2104 offset - start, copy, 0); 2105 kunmap_atomic(vaddr); 2106 csum = ops->combine(csum, csum2, pos, copy); 2107 if (!(len -= copy)) 2108 return csum; 2109 offset += copy; 2110 pos += copy; 2111 } 2112 start = end; 2113 } 2114 2115 skb_walk_frags(skb, frag_iter) { 2116 int end; 2117 2118 WARN_ON(start > offset + len); 2119 2120 end = start + frag_iter->len; 2121 if ((copy = end - offset) > 0) { 2122 __wsum csum2; 2123 if (copy > len) 2124 copy = len; 2125 csum2 = __skb_checksum(frag_iter, offset - start, 2126 copy, 0, ops); 2127 csum = ops->combine(csum, csum2, pos, copy); 2128 if ((len -= copy) == 0) 2129 return csum; 2130 offset += copy; 2131 pos += copy; 2132 } 2133 start = end; 2134 } 2135 BUG_ON(len); 2136 2137 return csum; 2138} 2139EXPORT_SYMBOL(__skb_checksum); 2140 2141__wsum skb_checksum(const struct sk_buff *skb, int offset, 2142 int len, __wsum csum) 2143{ 2144 const struct skb_checksum_ops ops = { 2145 .update = csum_partial_ext, 2146 .combine = csum_block_add_ext, 2147 }; 2148 2149 return __skb_checksum(skb, offset, len, csum, &ops); 2150} 2151EXPORT_SYMBOL(skb_checksum); 2152 2153/* Both of above in one bottle. */ 2154 2155__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 2156 u8 *to, int len, __wsum csum) 2157{ 2158 int start = skb_headlen(skb); 2159 int i, copy = start - offset; 2160 struct sk_buff *frag_iter; 2161 int pos = 0; 2162 2163 /* Copy header. */ 2164 if (copy > 0) { 2165 if (copy > len) 2166 copy = len; 2167 csum = csum_partial_copy_nocheck(skb->data + offset, to, 2168 copy, csum); 2169 if ((len -= copy) == 0) 2170 return csum; 2171 offset += copy; 2172 to += copy; 2173 pos = copy; 2174 } 2175 2176 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2177 int end; 2178 2179 WARN_ON(start > offset + len); 2180 2181 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 2182 if ((copy = end - offset) > 0) { 2183 __wsum csum2; 2184 u8 *vaddr; 2185 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2186 2187 if (copy > len) 2188 copy = len; 2189 vaddr = kmap_atomic(skb_frag_page(frag)); 2190 csum2 = csum_partial_copy_nocheck(vaddr + 2191 frag->page_offset + 2192 offset - start, to, 2193 copy, 0); 2194 kunmap_atomic(vaddr); 2195 csum = csum_block_add(csum, csum2, pos); 2196 if (!(len -= copy)) 2197 return csum; 2198 offset += copy; 2199 to += copy; 2200 pos += copy; 2201 } 2202 start = end; 2203 } 2204 2205 skb_walk_frags(skb, frag_iter) { 2206 __wsum csum2; 2207 int end; 2208 2209 WARN_ON(start > offset + len); 2210 2211 end = start + frag_iter->len; 2212 if ((copy = end - offset) > 0) { 2213 if (copy > len) 2214 copy = len; 2215 csum2 = skb_copy_and_csum_bits(frag_iter, 2216 offset - start, 2217 to, copy, 0); 2218 csum = csum_block_add(csum, csum2, pos); 2219 if ((len -= copy) == 0) 2220 return csum; 2221 offset += copy; 2222 to += copy; 2223 pos += copy; 2224 } 2225 start = end; 2226 } 2227 BUG_ON(len); 2228 return csum; 2229} 2230EXPORT_SYMBOL(skb_copy_and_csum_bits); 2231 2232 /** 2233 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy() 2234 * @from: source buffer 2235 * 2236 * Calculates the amount of linear headroom needed in the 'to' skb passed 2237 * into skb_zerocopy(). 2238 */ 2239unsigned int 2240skb_zerocopy_headlen(const struct sk_buff *from) 2241{ 2242 unsigned int hlen = 0; 2243 2244 if (!from->head_frag || 2245 skb_headlen(from) < L1_CACHE_BYTES || 2246 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) 2247 hlen = skb_headlen(from); 2248 2249 if (skb_has_frag_list(from)) 2250 hlen = from->len; 2251 2252 return hlen; 2253} 2254EXPORT_SYMBOL_GPL(skb_zerocopy_headlen); 2255 2256/** 2257 * skb_zerocopy - Zero copy skb to skb 2258 * @to: destination buffer 2259 * @from: source buffer 2260 * @len: number of bytes to copy from source buffer 2261 * @hlen: size of linear headroom in destination buffer 2262 * 2263 * Copies up to `len` bytes from `from` to `to` by creating references 2264 * to the frags in the source buffer. 2265 * 2266 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the 2267 * headroom in the `to` buffer. 2268 * 2269 * Return value: 2270 * 0: everything is OK 2271 * -ENOMEM: couldn't orphan frags of @from due to lack of memory 2272 * -EFAULT: skb_copy_bits() found some problem with skb geometry 2273 */ 2274int 2275skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen) 2276{ 2277 int i, j = 0; 2278 int plen = 0; /* length of skb->head fragment */ 2279 int ret; 2280 struct page *page; 2281 unsigned int offset; 2282 2283 BUG_ON(!from->head_frag && !hlen); 2284 2285 /* dont bother with small payloads */ 2286 if (len <= skb_tailroom(to)) 2287 return skb_copy_bits(from, 0, skb_put(to, len), len); 2288 2289 if (hlen) { 2290 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen); 2291 if (unlikely(ret)) 2292 return ret; 2293 len -= hlen; 2294 } else { 2295 plen = min_t(int, skb_headlen(from), len); 2296 if (plen) { 2297 page = virt_to_head_page(from->head); 2298 offset = from->data - (unsigned char *)page_address(page); 2299 __skb_fill_page_desc(to, 0, page, offset, plen); 2300 get_page(page); 2301 j = 1; 2302 len -= plen; 2303 } 2304 } 2305 2306 to->truesize += len + plen; 2307 to->len += len + plen; 2308 to->data_len += len + plen; 2309 2310 if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) { 2311 skb_tx_error(from); 2312 return -ENOMEM; 2313 } 2314 2315 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { 2316 if (!len) 2317 break; 2318 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; 2319 skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len); 2320 len -= skb_shinfo(to)->frags[j].size; 2321 skb_frag_ref(to, j); 2322 j++; 2323 } 2324 skb_shinfo(to)->nr_frags = j; 2325 2326 return 0; 2327} 2328EXPORT_SYMBOL_GPL(skb_zerocopy); 2329 2330void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 2331{ 2332 __wsum csum; 2333 long csstart; 2334 2335 if (skb->ip_summed == CHECKSUM_PARTIAL) 2336 csstart = skb_checksum_start_offset(skb); 2337 else 2338 csstart = skb_headlen(skb); 2339 2340 BUG_ON(csstart > skb_headlen(skb)); 2341 2342 skb_copy_from_linear_data(skb, to, csstart); 2343 2344 csum = 0; 2345 if (csstart != skb->len) 2346 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 2347 skb->len - csstart, 0); 2348 2349 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2350 long csstuff = csstart + skb->csum_offset; 2351 2352 *((__sum16 *)(to + csstuff)) = csum_fold(csum); 2353 } 2354} 2355EXPORT_SYMBOL(skb_copy_and_csum_dev); 2356 2357/** 2358 * skb_dequeue - remove from the head of the queue 2359 * @list: list to dequeue from 2360 * 2361 * Remove the head of the list. The list lock is taken so the function 2362 * may be used safely with other locking list functions. The head item is 2363 * returned or %NULL if the list is empty. 2364 */ 2365 2366struct sk_buff *skb_dequeue(struct sk_buff_head *list) 2367{ 2368 unsigned long flags; 2369 struct sk_buff *result; 2370 2371 spin_lock_irqsave(&list->lock, flags); 2372 result = __skb_dequeue(list); 2373 spin_unlock_irqrestore(&list->lock, flags); 2374 return result; 2375} 2376EXPORT_SYMBOL(skb_dequeue); 2377 2378/** 2379 * skb_dequeue_tail - remove from the tail of the queue 2380 * @list: list to dequeue from 2381 * 2382 * Remove the tail of the list. The list lock is taken so the function 2383 * may be used safely with other locking list functions. The tail item is 2384 * returned or %NULL if the list is empty. 2385 */ 2386struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 2387{ 2388 unsigned long flags; 2389 struct sk_buff *result; 2390 2391 spin_lock_irqsave(&list->lock, flags); 2392 result = __skb_dequeue_tail(list); 2393 spin_unlock_irqrestore(&list->lock, flags); 2394 return result; 2395} 2396EXPORT_SYMBOL(skb_dequeue_tail); 2397 2398/** 2399 * skb_queue_purge - empty a list 2400 * @list: list to empty 2401 * 2402 * Delete all buffers on an &sk_buff list. Each buffer is removed from 2403 * the list and one reference dropped. This function takes the list 2404 * lock and is atomic with respect to other list locking functions. 2405 */ 2406void skb_queue_purge(struct sk_buff_head *list) 2407{ 2408 struct sk_buff *skb; 2409 while ((skb = skb_dequeue(list)) != NULL) 2410 kfree_skb(skb); 2411} 2412EXPORT_SYMBOL(skb_queue_purge); 2413 2414/** 2415 * skb_queue_head - queue a buffer at the list head 2416 * @list: list to use 2417 * @newsk: buffer to queue 2418 * 2419 * Queue a buffer at the start of the list. This function takes the 2420 * list lock and can be used safely with other locking &sk_buff functions 2421 * safely. 2422 * 2423 * A buffer cannot be placed on two lists at the same time. 2424 */ 2425void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 2426{ 2427 unsigned long flags; 2428 2429 spin_lock_irqsave(&list->lock, flags); 2430 __skb_queue_head(list, newsk); 2431 spin_unlock_irqrestore(&list->lock, flags); 2432} 2433EXPORT_SYMBOL(skb_queue_head); 2434 2435/** 2436 * skb_queue_tail - queue a buffer at the list tail 2437 * @list: list to use 2438 * @newsk: buffer to queue 2439 * 2440 * Queue a buffer at the tail of the list. This function takes the 2441 * list lock and can be used safely with other locking &sk_buff functions 2442 * safely. 2443 * 2444 * A buffer cannot be placed on two lists at the same time. 2445 */ 2446void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 2447{ 2448 unsigned long flags; 2449 2450 spin_lock_irqsave(&list->lock, flags); 2451 __skb_queue_tail(list, newsk); 2452 spin_unlock_irqrestore(&list->lock, flags); 2453} 2454EXPORT_SYMBOL(skb_queue_tail); 2455 2456/** 2457 * skb_unlink - remove a buffer from a list 2458 * @skb: buffer to remove 2459 * @list: list to use 2460 * 2461 * Remove a packet from a list. The list locks are taken and this 2462 * function is atomic with respect to other list locked calls 2463 * 2464 * You must know what list the SKB is on. 2465 */ 2466void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 2467{ 2468 unsigned long flags; 2469 2470 spin_lock_irqsave(&list->lock, flags); 2471 __skb_unlink(skb, list); 2472 spin_unlock_irqrestore(&list->lock, flags); 2473} 2474EXPORT_SYMBOL(skb_unlink); 2475 2476/** 2477 * skb_append - append a buffer 2478 * @old: buffer to insert after 2479 * @newsk: buffer to insert 2480 * @list: list to use 2481 * 2482 * Place a packet after a given packet in a list. The list locks are taken 2483 * and this function is atomic with respect to other list locked calls. 2484 * A buffer cannot be placed on two lists at the same time. 2485 */ 2486void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 2487{ 2488 unsigned long flags; 2489 2490 spin_lock_irqsave(&list->lock, flags); 2491 __skb_queue_after(list, old, newsk); 2492 spin_unlock_irqrestore(&list->lock, flags); 2493} 2494EXPORT_SYMBOL(skb_append); 2495 2496/** 2497 * skb_insert - insert a buffer 2498 * @old: buffer to insert before 2499 * @newsk: buffer to insert 2500 * @list: list to use 2501 * 2502 * Place a packet before a given packet in a list. The list locks are 2503 * taken and this function is atomic with respect to other list locked 2504 * calls. 2505 * 2506 * A buffer cannot be placed on two lists at the same time. 2507 */ 2508void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 2509{ 2510 unsigned long flags; 2511 2512 spin_lock_irqsave(&list->lock, flags); 2513 __skb_insert(newsk, old->prev, old, list); 2514 spin_unlock_irqrestore(&list->lock, flags); 2515} 2516EXPORT_SYMBOL(skb_insert); 2517 2518static inline void skb_split_inside_header(struct sk_buff *skb, 2519 struct sk_buff* skb1, 2520 const u32 len, const int pos) 2521{ 2522 int i; 2523 2524 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 2525 pos - len); 2526 /* And move data appendix as is. */ 2527 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 2528 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 2529 2530 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 2531 skb_shinfo(skb)->nr_frags = 0; 2532 skb1->data_len = skb->data_len; 2533 skb1->len += skb1->data_len; 2534 skb->data_len = 0; 2535 skb->len = len; 2536 skb_set_tail_pointer(skb, len); 2537} 2538 2539static inline void skb_split_no_header(struct sk_buff *skb, 2540 struct sk_buff* skb1, 2541 const u32 len, int pos) 2542{ 2543 int i, k = 0; 2544 const int nfrags = skb_shinfo(skb)->nr_frags; 2545 2546 skb_shinfo(skb)->nr_frags = 0; 2547 skb1->len = skb1->data_len = skb->len - len; 2548 skb->len = len; 2549 skb->data_len = len - pos; 2550 2551 for (i = 0; i < nfrags; i++) { 2552 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2553 2554 if (pos + size > len) { 2555 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 2556 2557 if (pos < len) { 2558 /* Split frag. 2559 * We have two variants in this case: 2560 * 1. Move all the frag to the second 2561 * part, if it is possible. F.e. 2562 * this approach is mandatory for TUX, 2563 * where splitting is expensive. 2564 * 2. Split is accurately. We make this. 2565 */ 2566 skb_frag_ref(skb, i); 2567 skb_shinfo(skb1)->frags[0].page_offset += len - pos; 2568 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); 2569 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); 2570 skb_shinfo(skb)->nr_frags++; 2571 } 2572 k++; 2573 } else 2574 skb_shinfo(skb)->nr_frags++; 2575 pos += size; 2576 } 2577 skb_shinfo(skb1)->nr_frags = k; 2578} 2579 2580/** 2581 * skb_split - Split fragmented skb to two parts at length len. 2582 * @skb: the buffer to split 2583 * @skb1: the buffer to receive the second part 2584 * @len: new length for skb 2585 */ 2586void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 2587{ 2588 int pos = skb_headlen(skb); 2589 2590 skb_shinfo(skb1)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; 2591 if (len < pos) /* Split line is inside header. */ 2592 skb_split_inside_header(skb, skb1, len, pos); 2593 else /* Second chunk has no header, nothing to copy. */ 2594 skb_split_no_header(skb, skb1, len, pos); 2595} 2596EXPORT_SYMBOL(skb_split); 2597 2598/* Shifting from/to a cloned skb is a no-go. 2599 * 2600 * Caller cannot keep skb_shinfo related pointers past calling here! 2601 */ 2602static int skb_prepare_for_shift(struct sk_buff *skb) 2603{ 2604 return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2605} 2606 2607/** 2608 * skb_shift - Shifts paged data partially from skb to another 2609 * @tgt: buffer into which tail data gets added 2610 * @skb: buffer from which the paged data comes from 2611 * @shiftlen: shift up to this many bytes 2612 * 2613 * Attempts to shift up to shiftlen worth of bytes, which may be less than 2614 * the length of the skb, from skb to tgt. Returns number bytes shifted. 2615 * It's up to caller to free skb if everything was shifted. 2616 * 2617 * If @tgt runs out of frags, the whole operation is aborted. 2618 * 2619 * Skb cannot include anything else but paged data while tgt is allowed 2620 * to have non-paged data as well. 2621 * 2622 * TODO: full sized shift could be optimized but that would need 2623 * specialized skb free'er to handle frags without up-to-date nr_frags. 2624 */ 2625int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) 2626{ 2627 int from, to, merge, todo; 2628 struct skb_frag_struct *fragfrom, *fragto; 2629 2630 BUG_ON(shiftlen > skb->len); 2631 BUG_ON(skb_headlen(skb)); /* Would corrupt stream */ 2632 2633 todo = shiftlen; 2634 from = 0; 2635 to = skb_shinfo(tgt)->nr_frags; 2636 fragfrom = &skb_shinfo(skb)->frags[from]; 2637 2638 /* Actual merge is delayed until the point when we know we can 2639 * commit all, so that we don't have to undo partial changes 2640 */ 2641 if (!to || 2642 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), 2643 fragfrom->page_offset)) { 2644 merge = -1; 2645 } else { 2646 merge = to - 1; 2647 2648 todo -= skb_frag_size(fragfrom); 2649 if (todo < 0) { 2650 if (skb_prepare_for_shift(skb) || 2651 skb_prepare_for_shift(tgt)) 2652 return 0; 2653 2654 /* All previous frag pointers might be stale! */ 2655 fragfrom = &skb_shinfo(skb)->frags[from]; 2656 fragto = &skb_shinfo(tgt)->frags[merge]; 2657 2658 skb_frag_size_add(fragto, shiftlen); 2659 skb_frag_size_sub(fragfrom, shiftlen); 2660 fragfrom->page_offset += shiftlen; 2661 2662 goto onlymerged; 2663 } 2664 2665 from++; 2666 } 2667 2668 /* Skip full, not-fitting skb to avoid expensive operations */ 2669 if ((shiftlen == skb->len) && 2670 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) 2671 return 0; 2672 2673 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) 2674 return 0; 2675 2676 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { 2677 if (to == MAX_SKB_FRAGS) 2678 return 0; 2679 2680 fragfrom = &skb_shinfo(skb)->frags[from]; 2681 fragto = &skb_shinfo(tgt)->frags[to]; 2682 2683 if (todo >= skb_frag_size(fragfrom)) { 2684 *fragto = *fragfrom; 2685 todo -= skb_frag_size(fragfrom); 2686 from++; 2687 to++; 2688 2689 } else { 2690 __skb_frag_ref(fragfrom); 2691 fragto->page = fragfrom->page; 2692 fragto->page_offset = fragfrom->page_offset; 2693 skb_frag_size_set(fragto, todo); 2694 2695 fragfrom->page_offset += todo; 2696 skb_frag_size_sub(fragfrom, todo); 2697 todo = 0; 2698 2699 to++; 2700 break; 2701 } 2702 } 2703 2704 /* Ready to "commit" this state change to tgt */ 2705 skb_shinfo(tgt)->nr_frags = to; 2706 2707 if (merge >= 0) { 2708 fragfrom = &skb_shinfo(skb)->frags[0]; 2709 fragto = &skb_shinfo(tgt)->frags[merge]; 2710 2711 skb_frag_size_add(fragto, skb_frag_size(fragfrom)); 2712 __skb_frag_unref(fragfrom); 2713 } 2714 2715 /* Reposition in the original skb */ 2716 to = 0; 2717 while (from < skb_shinfo(skb)->nr_frags) 2718 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; 2719 skb_shinfo(skb)->nr_frags = to; 2720 2721 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); 2722 2723onlymerged: 2724 /* Most likely the tgt won't ever need its checksum anymore, skb on 2725 * the other hand might need it if it needs to be resent 2726 */ 2727 tgt->ip_summed = CHECKSUM_PARTIAL; 2728 skb->ip_summed = CHECKSUM_PARTIAL; 2729 2730 /* Yak, is it really working this way? Some helper please? */ 2731 skb->len -= shiftlen; 2732 skb->data_len -= shiftlen; 2733 skb->truesize -= shiftlen; 2734 tgt->len += shiftlen; 2735 tgt->data_len += shiftlen; 2736 tgt->truesize += shiftlen; 2737 2738 return shiftlen; 2739} 2740 2741/** 2742 * skb_prepare_seq_read - Prepare a sequential read of skb data 2743 * @skb: the buffer to read 2744 * @from: lower offset of data to be read 2745 * @to: upper offset of data to be read 2746 * @st: state variable 2747 * 2748 * Initializes the specified state variable. Must be called before 2749 * invoking skb_seq_read() for the first time. 2750 */ 2751void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 2752 unsigned int to, struct skb_seq_state *st) 2753{ 2754 st->lower_offset = from; 2755 st->upper_offset = to; 2756 st->root_skb = st->cur_skb = skb; 2757 st->frag_idx = st->stepped_offset = 0; 2758 st->frag_data = NULL; 2759} 2760EXPORT_SYMBOL(skb_prepare_seq_read); 2761 2762/** 2763 * skb_seq_read - Sequentially read skb data 2764 * @consumed: number of bytes consumed by the caller so far 2765 * @data: destination pointer for data to be returned 2766 * @st: state variable 2767 * 2768 * Reads a block of skb data at @consumed relative to the 2769 * lower offset specified to skb_prepare_seq_read(). Assigns 2770 * the head of the data block to @data and returns the length 2771 * of the block or 0 if the end of the skb data or the upper 2772 * offset has been reached. 2773 * 2774 * The caller is not required to consume all of the data 2775 * returned, i.e. @consumed is typically set to the number 2776 * of bytes already consumed and the next call to 2777 * skb_seq_read() will return the remaining part of the block. 2778 * 2779 * Note 1: The size of each block of data returned can be arbitrary, 2780 * this limitation is the cost for zerocopy sequential 2781 * reads of potentially non linear data. 2782 * 2783 * Note 2: Fragment lists within fragments are not implemented 2784 * at the moment, state->root_skb could be replaced with 2785 * a stack for this purpose. 2786 */ 2787unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 2788 struct skb_seq_state *st) 2789{ 2790 unsigned int block_limit, abs_offset = consumed + st->lower_offset; 2791 skb_frag_t *frag; 2792 2793 if (unlikely(abs_offset >= st->upper_offset)) { 2794 if (st->frag_data) { 2795 kunmap_atomic(st->frag_data); 2796 st->frag_data = NULL; 2797 } 2798 return 0; 2799 } 2800 2801next_skb: 2802 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 2803 2804 if (abs_offset < block_limit && !st->frag_data) { 2805 *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 2806 return block_limit - abs_offset; 2807 } 2808 2809 if (st->frag_idx == 0 && !st->frag_data) 2810 st->stepped_offset += skb_headlen(st->cur_skb); 2811 2812 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 2813 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 2814 block_limit = skb_frag_size(frag) + st->stepped_offset; 2815 2816 if (abs_offset < block_limit) { 2817 if (!st->frag_data) 2818 st->frag_data = kmap_atomic(skb_frag_page(frag)); 2819 2820 *data = (u8 *) st->frag_data + frag->page_offset + 2821 (abs_offset - st->stepped_offset); 2822 2823 return block_limit - abs_offset; 2824 } 2825 2826 if (st->frag_data) { 2827 kunmap_atomic(st->frag_data); 2828 st->frag_data = NULL; 2829 } 2830 2831 st->frag_idx++; 2832 st->stepped_offset += skb_frag_size(frag); 2833 } 2834 2835 if (st->frag_data) { 2836 kunmap_atomic(st->frag_data); 2837 st->frag_data = NULL; 2838 } 2839 2840 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { 2841 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 2842 st->frag_idx = 0; 2843 goto next_skb; 2844 } else if (st->cur_skb->next) { 2845 st->cur_skb = st->cur_skb->next; 2846 st->frag_idx = 0; 2847 goto next_skb; 2848 } 2849 2850 return 0; 2851} 2852EXPORT_SYMBOL(skb_seq_read); 2853 2854/** 2855 * skb_abort_seq_read - Abort a sequential read of skb data 2856 * @st: state variable 2857 * 2858 * Must be called if skb_seq_read() was not called until it 2859 * returned 0. 2860 */ 2861void skb_abort_seq_read(struct skb_seq_state *st) 2862{ 2863 if (st->frag_data) 2864 kunmap_atomic(st->frag_data); 2865} 2866EXPORT_SYMBOL(skb_abort_seq_read); 2867 2868#define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 2869 2870static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 2871 struct ts_config *conf, 2872 struct ts_state *state) 2873{ 2874 return skb_seq_read(offset, text, TS_SKB_CB(state)); 2875} 2876 2877static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 2878{ 2879 skb_abort_seq_read(TS_SKB_CB(state)); 2880} 2881 2882/** 2883 * skb_find_text - Find a text pattern in skb data 2884 * @skb: the buffer to look in 2885 * @from: search offset 2886 * @to: search limit 2887 * @config: textsearch configuration 2888 * 2889 * Finds a pattern in the skb data according to the specified 2890 * textsearch configuration. Use textsearch_next() to retrieve 2891 * subsequent occurrences of the pattern. Returns the offset 2892 * to the first occurrence or UINT_MAX if no match was found. 2893 */ 2894unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 2895 unsigned int to, struct ts_config *config) 2896{ 2897 struct ts_state state; 2898 unsigned int ret; 2899 2900 config->get_next_block = skb_ts_get_next_block; 2901 config->finish = skb_ts_finish; 2902 2903 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); 2904 2905 ret = textsearch_find(config, &state); 2906 return (ret <= to - from ? ret : UINT_MAX); 2907} 2908EXPORT_SYMBOL(skb_find_text); 2909 2910/** 2911 * skb_append_datato_frags - append the user data to a skb 2912 * @sk: sock structure 2913 * @skb: skb structure to be appended with user data. 2914 * @getfrag: call back function to be used for getting the user data 2915 * @from: pointer to user message iov 2916 * @length: length of the iov message 2917 * 2918 * Description: This procedure append the user data in the fragment part 2919 * of the skb if any page alloc fails user this procedure returns -ENOMEM 2920 */ 2921int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 2922 int (*getfrag)(void *from, char *to, int offset, 2923 int len, int odd, struct sk_buff *skb), 2924 void *from, int length) 2925{ 2926 int frg_cnt = skb_shinfo(skb)->nr_frags; 2927 int copy; 2928 int offset = 0; 2929 int ret; 2930 struct page_frag *pfrag = ¤t->task_frag; 2931 2932 do { 2933 /* Return error if we don't have space for new frag */ 2934 if (frg_cnt >= MAX_SKB_FRAGS) 2935 return -EMSGSIZE; 2936 2937 if (!sk_page_frag_refill(sk, pfrag)) 2938 return -ENOMEM; 2939 2940 /* copy the user data to page */ 2941 copy = min_t(int, length, pfrag->size - pfrag->offset); 2942 2943 ret = getfrag(from, page_address(pfrag->page) + pfrag->offset, 2944 offset, copy, 0, skb); 2945 if (ret < 0) 2946 return -EFAULT; 2947 2948 /* copy was successful so update the size parameters */ 2949 skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset, 2950 copy); 2951 frg_cnt++; 2952 pfrag->offset += copy; 2953 get_page(pfrag->page); 2954 2955 skb->truesize += copy; 2956 atomic_add(copy, &sk->sk_wmem_alloc); 2957 skb->len += copy; 2958 skb->data_len += copy; 2959 offset += copy; 2960 length -= copy; 2961 2962 } while (length > 0); 2963 2964 return 0; 2965} 2966EXPORT_SYMBOL(skb_append_datato_frags); 2967 2968/** 2969 * skb_pull_rcsum - pull skb and update receive checksum 2970 * @skb: buffer to update 2971 * @len: length of data pulled 2972 * 2973 * This function performs an skb_pull on the packet and updates 2974 * the CHECKSUM_COMPLETE checksum. It should be used on 2975 * receive path processing instead of skb_pull unless you know 2976 * that the checksum difference is zero (e.g., a valid IP header) 2977 * or you are setting ip_summed to CHECKSUM_NONE. 2978 */ 2979unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 2980{ 2981 unsigned char *data = skb->data; 2982 2983 BUG_ON(len > skb->len); 2984 __skb_pull(skb, len); 2985 skb_postpull_rcsum(skb, data, len); 2986 return skb->data; 2987} 2988EXPORT_SYMBOL_GPL(skb_pull_rcsum); 2989 2990/** 2991 * skb_segment - Perform protocol segmentation on skb. 2992 * @head_skb: buffer to segment 2993 * @features: features for the output path (see dev->features) 2994 * 2995 * This function performs segmentation on the given skb. It returns 2996 * a pointer to the first in a list of new skbs for the segments. 2997 * In case of error it returns ERR_PTR(err). 2998 */ 2999struct sk_buff *skb_segment(struct sk_buff *head_skb, 3000 netdev_features_t features) 3001{ 3002 struct sk_buff *segs = NULL; 3003 struct sk_buff *tail = NULL; 3004 struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list; 3005 skb_frag_t *frag = skb_shinfo(head_skb)->frags; 3006 unsigned int mss = skb_shinfo(head_skb)->gso_size; 3007 unsigned int doffset = head_skb->data - skb_mac_header(head_skb); 3008 struct sk_buff *frag_skb = head_skb; 3009 unsigned int offset = doffset; 3010 unsigned int tnl_hlen = skb_tnl_header_len(head_skb); 3011 unsigned int headroom; 3012 unsigned int len; 3013 __be16 proto; 3014 bool csum; 3015 int sg = !!(features & NETIF_F_SG); 3016 int nfrags = skb_shinfo(head_skb)->nr_frags; 3017 int err = -ENOMEM; 3018 int i = 0; 3019 int pos; 3020 int dummy; 3021 3022 __skb_push(head_skb, doffset); 3023 proto = skb_network_protocol(head_skb, &dummy); 3024 if (unlikely(!proto)) 3025 return ERR_PTR(-EINVAL); 3026 3027 csum = !head_skb->encap_hdr_csum && 3028 !!can_checksum_protocol(features, proto); 3029 3030 headroom = skb_headroom(head_skb); 3031 pos = skb_headlen(head_skb); 3032 3033 do { 3034 struct sk_buff *nskb; 3035 skb_frag_t *nskb_frag; 3036 int hsize; 3037 int size; 3038 3039 len = head_skb->len - offset; 3040 if (len > mss) 3041 len = mss; 3042 3043 hsize = skb_headlen(head_skb) - offset; 3044 if (hsize < 0) 3045 hsize = 0; 3046 if (hsize > len || !sg) 3047 hsize = len; 3048 3049 if (!hsize && i >= nfrags && skb_headlen(list_skb) && 3050 (skb_headlen(list_skb) == len || sg)) { 3051 BUG_ON(skb_headlen(list_skb) > len); 3052 3053 i = 0; 3054 nfrags = skb_shinfo(list_skb)->nr_frags; 3055 frag = skb_shinfo(list_skb)->frags; 3056 frag_skb = list_skb; 3057 pos += skb_headlen(list_skb); 3058 3059 while (pos < offset + len) { 3060 BUG_ON(i >= nfrags); 3061 3062 size = skb_frag_size(frag); 3063 if (pos + size > offset + len) 3064 break; 3065 3066 i++; 3067 pos += size; 3068 frag++; 3069 } 3070 3071 nskb = skb_clone(list_skb, GFP_ATOMIC); 3072 list_skb = list_skb->next; 3073 3074 if (unlikely(!nskb)) 3075 goto err; 3076 3077 if (unlikely(pskb_trim(nskb, len))) { 3078 kfree_skb(nskb); 3079 goto err; 3080 } 3081 3082 hsize = skb_end_offset(nskb); 3083 if (skb_cow_head(nskb, doffset + headroom)) { 3084 kfree_skb(nskb); 3085 goto err; 3086 } 3087 3088 nskb->truesize += skb_end_offset(nskb) - hsize; 3089 skb_release_head_state(nskb); 3090 __skb_push(nskb, doffset); 3091 } else { 3092 nskb = __alloc_skb(hsize + doffset + headroom, 3093 GFP_ATOMIC, skb_alloc_rx_flag(head_skb), 3094 NUMA_NO_NODE); 3095 3096 if (unlikely(!nskb)) 3097 goto err; 3098 3099 skb_reserve(nskb, headroom); 3100 __skb_put(nskb, doffset); 3101 } 3102 3103 if (segs) 3104 tail->next = nskb; 3105 else 3106 segs = nskb; 3107 tail = nskb; 3108 3109 __copy_skb_header(nskb, head_skb); 3110 3111 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); 3112 skb_reset_mac_len(nskb); 3113 3114 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen, 3115 nskb->data - tnl_hlen, 3116 doffset + tnl_hlen); 3117 3118 if (nskb->len == len + doffset) 3119 goto perform_csum_check; 3120 3121 if (!sg && !nskb->remcsum_offload) { 3122 nskb->ip_summed = CHECKSUM_NONE; 3123 nskb->csum = skb_copy_and_csum_bits(head_skb, offset, 3124 skb_put(nskb, len), 3125 len, 0); 3126 SKB_GSO_CB(nskb)->csum_start = 3127 skb_headroom(nskb) + doffset; 3128 continue; 3129 } 3130 3131 nskb_frag = skb_shinfo(nskb)->frags; 3132 3133 skb_copy_from_linear_data_offset(head_skb, offset, 3134 skb_put(nskb, hsize), hsize); 3135 3136 skb_shinfo(nskb)->tx_flags = skb_shinfo(head_skb)->tx_flags & 3137 SKBTX_SHARED_FRAG; 3138 3139 while (pos < offset + len) { 3140 if (i >= nfrags) { 3141 BUG_ON(skb_headlen(list_skb)); 3142 3143 i = 0; 3144 nfrags = skb_shinfo(list_skb)->nr_frags; 3145 frag = skb_shinfo(list_skb)->frags; 3146 frag_skb = list_skb; 3147 3148 BUG_ON(!nfrags); 3149 3150 list_skb = list_skb->next; 3151 } 3152 3153 if (unlikely(skb_shinfo(nskb)->nr_frags >= 3154 MAX_SKB_FRAGS)) { 3155 net_warn_ratelimited( 3156 "skb_segment: too many frags: %u %u\n", 3157 pos, mss); 3158 goto err; 3159 } 3160 3161 if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC))) 3162 goto err; 3163 3164 *nskb_frag = *frag; 3165 __skb_frag_ref(nskb_frag); 3166 size = skb_frag_size(nskb_frag); 3167 3168 if (pos < offset) { 3169 nskb_frag->page_offset += offset - pos; 3170 skb_frag_size_sub(nskb_frag, offset - pos); 3171 } 3172 3173 skb_shinfo(nskb)->nr_frags++; 3174 3175 if (pos + size <= offset + len) { 3176 i++; 3177 frag++; 3178 pos += size; 3179 } else { 3180 skb_frag_size_sub(nskb_frag, pos + size - (offset + len)); 3181 goto skip_fraglist; 3182 } 3183 3184 nskb_frag++; 3185 } 3186 3187skip_fraglist: 3188 nskb->data_len = len - hsize; 3189 nskb->len += nskb->data_len; 3190 nskb->truesize += nskb->data_len; 3191 3192perform_csum_check: 3193 if (!csum && !nskb->remcsum_offload) { 3194 nskb->csum = skb_checksum(nskb, doffset, 3195 nskb->len - doffset, 0); 3196 nskb->ip_summed = CHECKSUM_NONE; 3197 SKB_GSO_CB(nskb)->csum_start = 3198 skb_headroom(nskb) + doffset; 3199 } 3200 } while ((offset += len) < head_skb->len); 3201 3202 /* Some callers want to get the end of the list. 3203 * Put it in segs->prev to avoid walking the list. 3204 * (see validate_xmit_skb_list() for example) 3205 */ 3206 segs->prev = tail; 3207 3208 /* Following permits correct backpressure, for protocols 3209 * using skb_set_owner_w(). 3210 * Idea is to tranfert ownership from head_skb to last segment. 3211 */ 3212 if (head_skb->destructor == sock_wfree) { 3213 swap(tail->truesize, head_skb->truesize); 3214 swap(tail->destructor, head_skb->destructor); 3215 swap(tail->sk, head_skb->sk); 3216 } 3217 return segs; 3218 3219err: 3220 kfree_skb_list(segs); 3221 return ERR_PTR(err); 3222} 3223EXPORT_SYMBOL_GPL(skb_segment); 3224 3225int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) 3226{ 3227 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); 3228 unsigned int offset = skb_gro_offset(skb); 3229 unsigned int headlen = skb_headlen(skb); 3230 unsigned int len = skb_gro_len(skb); 3231 struct sk_buff *lp, *p = *head; 3232 unsigned int delta_truesize; 3233 3234 if (unlikely(p->len + len >= 65536)) 3235 return -E2BIG; 3236 3237 lp = NAPI_GRO_CB(p)->last; 3238 pinfo = skb_shinfo(lp); 3239 3240 if (headlen <= offset) { 3241 skb_frag_t *frag; 3242 skb_frag_t *frag2; 3243 int i = skbinfo->nr_frags; 3244 int nr_frags = pinfo->nr_frags + i; 3245 3246 if (nr_frags > MAX_SKB_FRAGS) 3247 goto merge; 3248 3249 offset -= headlen; 3250 pinfo->nr_frags = nr_frags; 3251 skbinfo->nr_frags = 0; 3252 3253 frag = pinfo->frags + nr_frags; 3254 frag2 = skbinfo->frags + i; 3255 do { 3256 *--frag = *--frag2; 3257 } while (--i); 3258 3259 frag->page_offset += offset; 3260 skb_frag_size_sub(frag, offset); 3261 3262 /* all fragments truesize : remove (head size + sk_buff) */ 3263 delta_truesize = skb->truesize - 3264 SKB_TRUESIZE(skb_end_offset(skb)); 3265 3266 skb->truesize -= skb->data_len; 3267 skb->len -= skb->data_len; 3268 skb->data_len = 0; 3269 3270 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; 3271 goto done; 3272 } else if (skb->head_frag) { 3273 int nr_frags = pinfo->nr_frags; 3274 skb_frag_t *frag = pinfo->frags + nr_frags; 3275 struct page *page = virt_to_head_page(skb->head); 3276 unsigned int first_size = headlen - offset; 3277 unsigned int first_offset; 3278 3279 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) 3280 goto merge; 3281 3282 first_offset = skb->data - 3283 (unsigned char *)page_address(page) + 3284 offset; 3285 3286 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; 3287 3288 frag->page.p = page; 3289 frag->page_offset = first_offset; 3290 skb_frag_size_set(frag, first_size); 3291 3292 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); 3293 /* We dont need to clear skbinfo->nr_frags here */ 3294 3295 delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 3296 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; 3297 goto done; 3298 } 3299 3300merge: 3301 delta_truesize = skb->truesize; 3302 if (offset > headlen) { 3303 unsigned int eat = offset - headlen; 3304 3305 skbinfo->frags[0].page_offset += eat; 3306 skb_frag_size_sub(&skbinfo->frags[0], eat); 3307 skb->data_len -= eat; 3308 skb->len -= eat; 3309 offset = headlen; 3310 } 3311 3312 __skb_pull(skb, offset); 3313 3314 if (NAPI_GRO_CB(p)->last == p) 3315 skb_shinfo(p)->frag_list = skb; 3316 else 3317 NAPI_GRO_CB(p)->last->next = skb; 3318 NAPI_GRO_CB(p)->last = skb; 3319 __skb_header_release(skb); 3320 lp = p; 3321 3322done: 3323 NAPI_GRO_CB(p)->count++; 3324 p->data_len += len; 3325 p->truesize += delta_truesize; 3326 p->len += len; 3327 if (lp != p) { 3328 lp->data_len += len; 3329 lp->truesize += delta_truesize; 3330 lp->len += len; 3331 } 3332 NAPI_GRO_CB(skb)->same_flow = 1; 3333 return 0; 3334} 3335 3336void __init skb_init(void) 3337{ 3338 skbuff_head_cache = kmem_cache_create("skbuff_head_cache", 3339 sizeof(struct sk_buff), 3340 0, 3341 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 3342 NULL); 3343 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 3344 sizeof(struct sk_buff_fclones), 3345 0, 3346 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 3347 NULL); 3348} 3349 3350/** 3351 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 3352 * @skb: Socket buffer containing the buffers to be mapped 3353 * @sg: The scatter-gather list to map into 3354 * @offset: The offset into the buffer's contents to start mapping 3355 * @len: Length of buffer space to be mapped 3356 * 3357 * Fill the specified scatter-gather list with mappings/pointers into a 3358 * region of the buffer space attached to a socket buffer. 3359 */ 3360static int 3361__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 3362{ 3363 int start = skb_headlen(skb); 3364 int i, copy = start - offset; 3365 struct sk_buff *frag_iter; 3366 int elt = 0; 3367 3368 if (copy > 0) { 3369 if (copy > len) 3370 copy = len; 3371 sg_set_buf(sg, skb->data + offset, copy); 3372 elt++; 3373 if ((len -= copy) == 0) 3374 return elt; 3375 offset += copy; 3376 } 3377 3378 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3379 int end; 3380 3381 WARN_ON(start > offset + len); 3382 3383 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 3384 if ((copy = end - offset) > 0) { 3385 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3386 3387 if (copy > len) 3388 copy = len; 3389 sg_set_page(&sg[elt], skb_frag_page(frag), copy, 3390 frag->page_offset+offset-start); 3391 elt++; 3392 if (!(len -= copy)) 3393 return elt; 3394 offset += copy; 3395 } 3396 start = end; 3397 } 3398 3399 skb_walk_frags(skb, frag_iter) { 3400 int end; 3401 3402 WARN_ON(start > offset + len); 3403 3404 end = start + frag_iter->len; 3405 if ((copy = end - offset) > 0) { 3406 if (copy > len) 3407 copy = len; 3408 elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start, 3409 copy); 3410 if ((len -= copy) == 0) 3411 return elt; 3412 offset += copy; 3413 } 3414 start = end; 3415 } 3416 BUG_ON(len); 3417 return elt; 3418} 3419 3420/* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given 3421 * sglist without mark the sg which contain last skb data as the end. 3422 * So the caller can mannipulate sg list as will when padding new data after 3423 * the first call without calling sg_unmark_end to expend sg list. 3424 * 3425 * Scenario to use skb_to_sgvec_nomark: 3426 * 1. sg_init_table 3427 * 2. skb_to_sgvec_nomark(payload1) 3428 * 3. skb_to_sgvec_nomark(payload2) 3429 * 3430 * This is equivalent to: 3431 * 1. sg_init_table 3432 * 2. skb_to_sgvec(payload1) 3433 * 3. sg_unmark_end 3434 * 4. skb_to_sgvec(payload2) 3435 * 3436 * When mapping mutilple payload conditionally, skb_to_sgvec_nomark 3437 * is more preferable. 3438 */ 3439int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, 3440 int offset, int len) 3441{ 3442 return __skb_to_sgvec(skb, sg, offset, len); 3443} 3444EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark); 3445 3446int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 3447{ 3448 int nsg = __skb_to_sgvec(skb, sg, offset, len); 3449 3450 sg_mark_end(&sg[nsg - 1]); 3451 3452 return nsg; 3453} 3454EXPORT_SYMBOL_GPL(skb_to_sgvec); 3455 3456/** 3457 * skb_cow_data - Check that a socket buffer's data buffers are writable 3458 * @skb: The socket buffer to check. 3459 * @tailbits: Amount of trailing space to be added 3460 * @trailer: Returned pointer to the skb where the @tailbits space begins 3461 * 3462 * Make sure that the data buffers attached to a socket buffer are 3463 * writable. If they are not, private copies are made of the data buffers 3464 * and the socket buffer is set to use these instead. 3465 * 3466 * If @tailbits is given, make sure that there is space to write @tailbits 3467 * bytes of data beyond current end of socket buffer. @trailer will be 3468 * set to point to the skb in which this space begins. 3469 * 3470 * The number of scatterlist elements required to completely map the 3471 * COW'd and extended socket buffer will be returned. 3472 */ 3473int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 3474{ 3475 int copyflag; 3476 int elt; 3477 struct sk_buff *skb1, **skb_p; 3478 3479 /* If skb is cloned or its head is paged, reallocate 3480 * head pulling out all the pages (pages are considered not writable 3481 * at the moment even if they are anonymous). 3482 */ 3483 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 3484 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) 3485 return -ENOMEM; 3486 3487 /* Easy case. Most of packets will go this way. */ 3488 if (!skb_has_frag_list(skb)) { 3489 /* A little of trouble, not enough of space for trailer. 3490 * This should not happen, when stack is tuned to generate 3491 * good frames. OK, on miss we reallocate and reserve even more 3492 * space, 128 bytes is fair. */ 3493 3494 if (skb_tailroom(skb) < tailbits && 3495 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 3496 return -ENOMEM; 3497 3498 /* Voila! */ 3499 *trailer = skb; 3500 return 1; 3501 } 3502 3503 /* Misery. We are in troubles, going to mincer fragments... */ 3504 3505 elt = 1; 3506 skb_p = &skb_shinfo(skb)->frag_list; 3507 copyflag = 0; 3508 3509 while ((skb1 = *skb_p) != NULL) { 3510 int ntail = 0; 3511 3512 /* The fragment is partially pulled by someone, 3513 * this can happen on input. Copy it and everything 3514 * after it. */ 3515 3516 if (skb_shared(skb1)) 3517 copyflag = 1; 3518 3519 /* If the skb is the last, worry about trailer. */ 3520 3521 if (skb1->next == NULL && tailbits) { 3522 if (skb_shinfo(skb1)->nr_frags || 3523 skb_has_frag_list(skb1) || 3524 skb_tailroom(skb1) < tailbits) 3525 ntail = tailbits + 128; 3526 } 3527 3528 if (copyflag || 3529 skb_cloned(skb1) || 3530 ntail || 3531 skb_shinfo(skb1)->nr_frags || 3532 skb_has_frag_list(skb1)) { 3533 struct sk_buff *skb2; 3534 3535 /* Fuck, we are miserable poor guys... */ 3536 if (ntail == 0) 3537 skb2 = skb_copy(skb1, GFP_ATOMIC); 3538 else 3539 skb2 = skb_copy_expand(skb1, 3540 skb_headroom(skb1), 3541 ntail, 3542 GFP_ATOMIC); 3543 if (unlikely(skb2 == NULL)) 3544 return -ENOMEM; 3545 3546 if (skb1->sk) 3547 skb_set_owner_w(skb2, skb1->sk); 3548 3549 /* Looking around. Are we still alive? 3550 * OK, link new skb, drop old one */ 3551 3552 skb2->next = skb1->next; 3553 *skb_p = skb2; 3554 kfree_skb(skb1); 3555 skb1 = skb2; 3556 } 3557 elt++; 3558 *trailer = skb1; 3559 skb_p = &skb1->next; 3560 } 3561 3562 return elt; 3563} 3564EXPORT_SYMBOL_GPL(skb_cow_data); 3565 3566static void sock_rmem_free(struct sk_buff *skb) 3567{ 3568 struct sock *sk = skb->sk; 3569 3570 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 3571} 3572 3573/* 3574 * Note: We dont mem charge error packets (no sk_forward_alloc changes) 3575 */ 3576int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 3577{ 3578 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 3579 (unsigned int)sk->sk_rcvbuf) 3580 return -ENOMEM; 3581 3582 skb_orphan(skb); 3583 skb->sk = sk; 3584 skb->destructor = sock_rmem_free; 3585 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 3586 3587 /* before exiting rcu section, make sure dst is refcounted */ 3588 skb_dst_force(skb); 3589 3590 skb_queue_tail(&sk->sk_error_queue, skb); 3591 if (!sock_flag(sk, SOCK_DEAD)) 3592 sk->sk_data_ready(sk); 3593 return 0; 3594} 3595EXPORT_SYMBOL(sock_queue_err_skb); 3596 3597struct sk_buff *sock_dequeue_err_skb(struct sock *sk) 3598{ 3599 struct sk_buff_head *q = &sk->sk_error_queue; 3600 struct sk_buff *skb, *skb_next; 3601 unsigned long flags; 3602 int err = 0; 3603 3604 spin_lock_irqsave(&q->lock, flags); 3605 skb = __skb_dequeue(q); 3606 if (skb && (skb_next = skb_peek(q))) 3607 err = SKB_EXT_ERR(skb_next)->ee.ee_errno; 3608 spin_unlock_irqrestore(&q->lock, flags); 3609 3610 sk->sk_err = err; 3611 if (err) 3612 sk->sk_error_report(sk); 3613 3614 return skb; 3615} 3616EXPORT_SYMBOL(sock_dequeue_err_skb); 3617 3618/** 3619 * skb_clone_sk - create clone of skb, and take reference to socket 3620 * @skb: the skb to clone 3621 * 3622 * This function creates a clone of a buffer that holds a reference on 3623 * sk_refcnt. Buffers created via this function are meant to be 3624 * returned using sock_queue_err_skb, or free via kfree_skb. 3625 * 3626 * When passing buffers allocated with this function to sock_queue_err_skb 3627 * it is necessary to wrap the call with sock_hold/sock_put in order to 3628 * prevent the socket from being released prior to being enqueued on 3629 * the sk_error_queue. 3630 */ 3631struct sk_buff *skb_clone_sk(struct sk_buff *skb) 3632{ 3633 struct sock *sk = skb->sk; 3634 struct sk_buff *clone; 3635 3636 if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt)) 3637 return NULL; 3638 3639 clone = skb_clone(skb, GFP_ATOMIC); 3640 if (!clone) { 3641 sock_put(sk); 3642 return NULL; 3643 } 3644 3645 clone->sk = sk; 3646 clone->destructor = sock_efree; 3647 3648 return clone; 3649} 3650EXPORT_SYMBOL(skb_clone_sk); 3651 3652static void __skb_complete_tx_timestamp(struct sk_buff *skb, 3653 struct sock *sk, 3654 int tstype) 3655{ 3656 struct sock_exterr_skb *serr; 3657 int err; 3658 3659 serr = SKB_EXT_ERR(skb); 3660 memset(serr, 0, sizeof(*serr)); 3661 serr->ee.ee_errno = ENOMSG; 3662 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 3663 serr->ee.ee_info = tstype; 3664 if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { 3665 serr->ee.ee_data = skb_shinfo(skb)->tskey; 3666 if (sk->sk_protocol == IPPROTO_TCP && 3667 sk->sk_type == SOCK_STREAM) 3668 serr->ee.ee_data -= sk->sk_tskey; 3669 } 3670 3671 err = sock_queue_err_skb(sk, skb); 3672 3673 if (err) 3674 kfree_skb(skb); 3675} 3676 3677static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly) 3678{ 3679 bool ret; 3680 3681 if (likely(sysctl_tstamp_allow_data || tsonly)) 3682 return true; 3683 3684 read_lock_bh(&sk->sk_callback_lock); 3685 ret = sk->sk_socket && sk->sk_socket->file && 3686 file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW); 3687 read_unlock_bh(&sk->sk_callback_lock); 3688 return ret; 3689} 3690 3691void skb_complete_tx_timestamp(struct sk_buff *skb, 3692 struct skb_shared_hwtstamps *hwtstamps) 3693{ 3694 struct sock *sk = skb->sk; 3695 3696 if (!skb_may_tx_timestamp(sk, false)) 3697 return; 3698 3699 /* take a reference to prevent skb_orphan() from freeing the socket */ 3700 sock_hold(sk); 3701 3702 *skb_hwtstamps(skb) = *hwtstamps; 3703 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND); 3704 3705 sock_put(sk); 3706} 3707EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); 3708 3709void __skb_tstamp_tx(struct sk_buff *orig_skb, 3710 struct skb_shared_hwtstamps *hwtstamps, 3711 struct sock *sk, int tstype) 3712{ 3713 struct sk_buff *skb; 3714 bool tsonly; 3715 3716 if (!sk) 3717 return; 3718 3719 tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY; 3720 if (!skb_may_tx_timestamp(sk, tsonly)) 3721 return; 3722 3723 if (tsonly) 3724 skb = alloc_skb(0, GFP_ATOMIC); 3725 else 3726 skb = skb_clone(orig_skb, GFP_ATOMIC); 3727 if (!skb) 3728 return; 3729 3730 if (tsonly) { 3731 skb_shinfo(skb)->tx_flags = skb_shinfo(orig_skb)->tx_flags; 3732 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; 3733 } 3734 3735 if (hwtstamps) 3736 *skb_hwtstamps(skb) = *hwtstamps; 3737 else 3738 skb->tstamp = ktime_get_real(); 3739 3740 __skb_complete_tx_timestamp(skb, sk, tstype); 3741} 3742EXPORT_SYMBOL_GPL(__skb_tstamp_tx); 3743 3744void skb_tstamp_tx(struct sk_buff *orig_skb, 3745 struct skb_shared_hwtstamps *hwtstamps) 3746{ 3747 return __skb_tstamp_tx(orig_skb, hwtstamps, orig_skb->sk, 3748 SCM_TSTAMP_SND); 3749} 3750EXPORT_SYMBOL_GPL(skb_tstamp_tx); 3751 3752void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) 3753{ 3754 struct sock *sk = skb->sk; 3755 struct sock_exterr_skb *serr; 3756 int err; 3757 3758 skb->wifi_acked_valid = 1; 3759 skb->wifi_acked = acked; 3760 3761 serr = SKB_EXT_ERR(skb); 3762 memset(serr, 0, sizeof(*serr)); 3763 serr->ee.ee_errno = ENOMSG; 3764 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; 3765 3766 /* take a reference to prevent skb_orphan() from freeing the socket */ 3767 sock_hold(sk); 3768 3769 err = sock_queue_err_skb(sk, skb); 3770 if (err) 3771 kfree_skb(skb); 3772 3773 sock_put(sk); 3774} 3775EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); 3776 3777/** 3778 * skb_partial_csum_set - set up and verify partial csum values for packet 3779 * @skb: the skb to set 3780 * @start: the number of bytes after skb->data to start checksumming. 3781 * @off: the offset from start to place the checksum. 3782 * 3783 * For untrusted partially-checksummed packets, we need to make sure the values 3784 * for skb->csum_start and skb->csum_offset are valid so we don't oops. 3785 * 3786 * This function checks and sets those values and skb->ip_summed: if this 3787 * returns false you should drop the packet. 3788 */ 3789bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 3790{ 3791 if (unlikely(start > skb_headlen(skb)) || 3792 unlikely((int)start + off > skb_headlen(skb) - 2)) { 3793 net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n", 3794 start, off, skb_headlen(skb)); 3795 return false; 3796 } 3797 skb->ip_summed = CHECKSUM_PARTIAL; 3798 skb->csum_start = skb_headroom(skb) + start; 3799 skb->csum_offset = off; 3800 skb_set_transport_header(skb, start); 3801 return true; 3802} 3803EXPORT_SYMBOL_GPL(skb_partial_csum_set); 3804 3805static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, 3806 unsigned int max) 3807{ 3808 if (skb_headlen(skb) >= len) 3809 return 0; 3810 3811 /* If we need to pullup then pullup to the max, so we 3812 * won't need to do it again. 3813 */ 3814 if (max > skb->len) 3815 max = skb->len; 3816 3817 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) 3818 return -ENOMEM; 3819 3820 if (skb_headlen(skb) < len) 3821 return -EPROTO; 3822 3823 return 0; 3824} 3825 3826#define MAX_TCP_HDR_LEN (15 * 4) 3827 3828static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb, 3829 typeof(IPPROTO_IP) proto, 3830 unsigned int off) 3831{ 3832 switch (proto) { 3833 int err; 3834 3835 case IPPROTO_TCP: 3836 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), 3837 off + MAX_TCP_HDR_LEN); 3838 if (!err && !skb_partial_csum_set(skb, off, 3839 offsetof(struct tcphdr, 3840 check))) 3841 err = -EPROTO; 3842 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; 3843 3844 case IPPROTO_UDP: 3845 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), 3846 off + sizeof(struct udphdr)); 3847 if (!err && !skb_partial_csum_set(skb, off, 3848 offsetof(struct udphdr, 3849 check))) 3850 err = -EPROTO; 3851 return err ? ERR_PTR(err) : &udp_hdr(skb)->check; 3852 } 3853 3854 return ERR_PTR(-EPROTO); 3855} 3856 3857/* This value should be large enough to cover a tagged ethernet header plus 3858 * maximally sized IP and TCP or UDP headers. 3859 */ 3860#define MAX_IP_HDR_LEN 128 3861 3862static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate) 3863{ 3864 unsigned int off; 3865 bool fragment; 3866 __sum16 *csum; 3867 int err; 3868 3869 fragment = false; 3870 3871 err = skb_maybe_pull_tail(skb, 3872 sizeof(struct iphdr), 3873 MAX_IP_HDR_LEN); 3874 if (err < 0) 3875 goto out; 3876 3877 if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF)) 3878 fragment = true; 3879 3880 off = ip_hdrlen(skb); 3881 3882 err = -EPROTO; 3883 3884 if (fragment) 3885 goto out; 3886 3887 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); 3888 if (IS_ERR(csum)) 3889 return PTR_ERR(csum); 3890 3891 if (recalculate) 3892 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 3893 ip_hdr(skb)->daddr, 3894 skb->len - off, 3895 ip_hdr(skb)->protocol, 0); 3896 err = 0; 3897 3898out: 3899 return err; 3900} 3901 3902/* This value should be large enough to cover a tagged ethernet header plus 3903 * an IPv6 header, all options, and a maximal TCP or UDP header. 3904 */ 3905#define MAX_IPV6_HDR_LEN 256 3906 3907#define OPT_HDR(type, skb, off) \ 3908 (type *)(skb_network_header(skb) + (off)) 3909 3910static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) 3911{ 3912 int err; 3913 u8 nexthdr; 3914 unsigned int off; 3915 unsigned int len; 3916 bool fragment; 3917 bool done; 3918 __sum16 *csum; 3919 3920 fragment = false; 3921 done = false; 3922 3923 off = sizeof(struct ipv6hdr); 3924 3925 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); 3926 if (err < 0) 3927 goto out; 3928 3929 nexthdr = ipv6_hdr(skb)->nexthdr; 3930 3931 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); 3932 while (off <= len && !done) { 3933 switch (nexthdr) { 3934 case IPPROTO_DSTOPTS: 3935 case IPPROTO_HOPOPTS: 3936 case IPPROTO_ROUTING: { 3937 struct ipv6_opt_hdr *hp; 3938 3939 err = skb_maybe_pull_tail(skb, 3940 off + 3941 sizeof(struct ipv6_opt_hdr), 3942 MAX_IPV6_HDR_LEN); 3943 if (err < 0) 3944 goto out; 3945 3946 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); 3947 nexthdr = hp->nexthdr; 3948 off += ipv6_optlen(hp); 3949 break; 3950 } 3951 case IPPROTO_AH: { 3952 struct ip_auth_hdr *hp; 3953 3954 err = skb_maybe_pull_tail(skb, 3955 off + 3956 sizeof(struct ip_auth_hdr), 3957 MAX_IPV6_HDR_LEN); 3958 if (err < 0) 3959 goto out; 3960 3961 hp = OPT_HDR(struct ip_auth_hdr, skb, off); 3962 nexthdr = hp->nexthdr; 3963 off += ipv6_authlen(hp); 3964 break; 3965 } 3966 case IPPROTO_FRAGMENT: { 3967 struct frag_hdr *hp; 3968 3969 err = skb_maybe_pull_tail(skb, 3970 off + 3971 sizeof(struct frag_hdr), 3972 MAX_IPV6_HDR_LEN); 3973 if (err < 0) 3974 goto out; 3975 3976 hp = OPT_HDR(struct frag_hdr, skb, off); 3977 3978 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) 3979 fragment = true; 3980 3981 nexthdr = hp->nexthdr; 3982 off += sizeof(struct frag_hdr); 3983 break; 3984 } 3985 default: 3986 done = true; 3987 break; 3988 } 3989 } 3990 3991 err = -EPROTO; 3992 3993 if (!done || fragment) 3994 goto out; 3995 3996 csum = skb_checksum_setup_ip(skb, nexthdr, off); 3997 if (IS_ERR(csum)) 3998 return PTR_ERR(csum); 3999 4000 if (recalculate) 4001 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 4002 &ipv6_hdr(skb)->daddr, 4003 skb->len - off, nexthdr, 0); 4004 err = 0; 4005 4006out: 4007 return err; 4008} 4009 4010/** 4011 * skb_checksum_setup - set up partial checksum offset 4012 * @skb: the skb to set up 4013 * @recalculate: if true the pseudo-header checksum will be recalculated 4014 */ 4015int skb_checksum_setup(struct sk_buff *skb, bool recalculate) 4016{ 4017 int err; 4018 4019 switch (skb->protocol) { 4020 case htons(ETH_P_IP): 4021 err = skb_checksum_setup_ipv4(skb, recalculate); 4022 break; 4023 4024 case htons(ETH_P_IPV6): 4025 err = skb_checksum_setup_ipv6(skb, recalculate); 4026 break; 4027 4028 default: 4029 err = -EPROTO; 4030 break; 4031 } 4032 4033 return err; 4034} 4035EXPORT_SYMBOL(skb_checksum_setup); 4036 4037void __skb_warn_lro_forwarding(const struct sk_buff *skb) 4038{ 4039 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", 4040 skb->dev->name); 4041} 4042EXPORT_SYMBOL(__skb_warn_lro_forwarding); 4043 4044void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) 4045{ 4046 if (head_stolen) { 4047 skb_release_head_state(skb); 4048 kmem_cache_free(skbuff_head_cache, skb); 4049 } else { 4050 __kfree_skb(skb); 4051 } 4052} 4053EXPORT_SYMBOL(kfree_skb_partial); 4054 4055/** 4056 * skb_try_coalesce - try to merge skb to prior one 4057 * @to: prior buffer 4058 * @from: buffer to add 4059 * @fragstolen: pointer to boolean 4060 * @delta_truesize: how much more was allocated than was requested 4061 */ 4062bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, 4063 bool *fragstolen, int *delta_truesize) 4064{ 4065 int i, delta, len = from->len; 4066 4067 *fragstolen = false; 4068 4069 if (skb_cloned(to)) 4070 return false; 4071 4072 if (len <= skb_tailroom(to)) { 4073 if (len) 4074 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); 4075 *delta_truesize = 0; 4076 return true; 4077 } 4078 4079 if (skb_has_frag_list(to) || skb_has_frag_list(from)) 4080 return false; 4081 4082 if (skb_headlen(from) != 0) { 4083 struct page *page; 4084 unsigned int offset; 4085 4086 if (skb_shinfo(to)->nr_frags + 4087 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) 4088 return false; 4089 4090 if (skb_head_is_locked(from)) 4091 return false; 4092 4093 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 4094 4095 page = virt_to_head_page(from->head); 4096 offset = from->data - (unsigned char *)page_address(page); 4097 4098 skb_fill_page_desc(to, skb_shinfo(to)->nr_frags, 4099 page, offset, skb_headlen(from)); 4100 *fragstolen = true; 4101 } else { 4102 if (skb_shinfo(to)->nr_frags + 4103 skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS) 4104 return false; 4105 4106 delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); 4107 } 4108 4109 WARN_ON_ONCE(delta < len); 4110 4111 memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags, 4112 skb_shinfo(from)->frags, 4113 skb_shinfo(from)->nr_frags * sizeof(skb_frag_t)); 4114 skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags; 4115 4116 if (!skb_cloned(from)) 4117 skb_shinfo(from)->nr_frags = 0; 4118 4119 /* if the skb is not cloned this does nothing 4120 * since we set nr_frags to 0. 4121 */ 4122 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) 4123 skb_frag_ref(from, i); 4124 4125 to->truesize += delta; 4126 to->len += len; 4127 to->data_len += len; 4128 4129 *delta_truesize = delta; 4130 return true; 4131} 4132EXPORT_SYMBOL(skb_try_coalesce); 4133 4134/** 4135 * skb_scrub_packet - scrub an skb 4136 * 4137 * @skb: buffer to clean 4138 * @xnet: packet is crossing netns 4139 * 4140 * skb_scrub_packet can be used after encapsulating or decapsulting a packet 4141 * into/from a tunnel. Some information have to be cleared during these 4142 * operations. 4143 * skb_scrub_packet can also be used to clean a skb before injecting it in 4144 * another namespace (@xnet == true). We have to clear all information in the 4145 * skb that could impact namespace isolation. 4146 */ 4147void skb_scrub_packet(struct sk_buff *skb, bool xnet) 4148{ 4149 skb->tstamp.tv64 = 0; 4150 skb->pkt_type = PACKET_HOST; 4151 skb->skb_iif = 0; 4152 skb->ignore_df = 0; 4153 skb_dst_drop(skb); 4154 skb_sender_cpu_clear(skb); 4155 secpath_reset(skb); 4156 nf_reset(skb); 4157 nf_reset_trace(skb); 4158 4159 if (!xnet) 4160 return; 4161 4162 skb_orphan(skb); 4163 skb->mark = 0; 4164} 4165EXPORT_SYMBOL_GPL(skb_scrub_packet); 4166 4167/** 4168 * skb_gso_transport_seglen - Return length of individual segments of a gso packet 4169 * 4170 * @skb: GSO skb 4171 * 4172 * skb_gso_transport_seglen is used to determine the real size of the 4173 * individual segments, including Layer4 headers (TCP/UDP). 4174 * 4175 * The MAC/L2 or network (IP, IPv6) headers are not accounted for. 4176 */ 4177unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) 4178{ 4179 const struct skb_shared_info *shinfo = skb_shinfo(skb); 4180 unsigned int thlen = 0; 4181 4182 if (skb->encapsulation) { 4183 thlen = skb_inner_transport_header(skb) - 4184 skb_transport_header(skb); 4185 4186 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) 4187 thlen += inner_tcp_hdrlen(skb); 4188 } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { 4189 thlen = tcp_hdrlen(skb); 4190 } 4191 /* UFO sets gso_size to the size of the fragmentation 4192 * payload, i.e. the size of the L4 (UDP) header is already 4193 * accounted for. 4194 */ 4195 return thlen + shinfo->gso_size; 4196} 4197EXPORT_SYMBOL_GPL(skb_gso_transport_seglen); 4198 4199static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) 4200{ 4201 if (skb_cow(skb, skb_headroom(skb)) < 0) { 4202 kfree_skb(skb); 4203 return NULL; 4204 } 4205 4206 memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len - VLAN_HLEN, 4207 2 * ETH_ALEN); 4208 skb->mac_header += VLAN_HLEN; 4209 return skb; 4210} 4211 4212struct sk_buff *skb_vlan_untag(struct sk_buff *skb) 4213{ 4214 struct vlan_hdr *vhdr; 4215 u16 vlan_tci; 4216 4217 if (unlikely(skb_vlan_tag_present(skb))) { 4218 /* vlan_tci is already set-up so leave this for another time */ 4219 return skb; 4220 } 4221 4222 skb = skb_share_check(skb, GFP_ATOMIC); 4223 if (unlikely(!skb)) 4224 goto err_free; 4225 4226 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) 4227 goto err_free; 4228 4229 vhdr = (struct vlan_hdr *)skb->data; 4230 vlan_tci = ntohs(vhdr->h_vlan_TCI); 4231 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); 4232 4233 skb_pull_rcsum(skb, VLAN_HLEN); 4234 vlan_set_encap_proto(skb, vhdr); 4235 4236 skb = skb_reorder_vlan_header(skb); 4237 if (unlikely(!skb)) 4238 goto err_free; 4239 4240 skb_reset_network_header(skb); 4241 skb_reset_transport_header(skb); 4242 skb_reset_mac_len(skb); 4243 4244 return skb; 4245 4246err_free: 4247 kfree_skb(skb); 4248 return NULL; 4249} 4250EXPORT_SYMBOL(skb_vlan_untag); 4251 4252int skb_ensure_writable(struct sk_buff *skb, int write_len) 4253{ 4254 if (!pskb_may_pull(skb, write_len)) 4255 return -ENOMEM; 4256 4257 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) 4258 return 0; 4259 4260 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 4261} 4262EXPORT_SYMBOL(skb_ensure_writable); 4263 4264/* remove VLAN header from packet and update csum accordingly. */ 4265static int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) 4266{ 4267 struct vlan_hdr *vhdr; 4268 unsigned int offset = skb->data - skb_mac_header(skb); 4269 int err; 4270 4271 __skb_push(skb, offset); 4272 err = skb_ensure_writable(skb, VLAN_ETH_HLEN); 4273 if (unlikely(err)) 4274 goto pull; 4275 4276 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 4277 4278 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); 4279 *vlan_tci = ntohs(vhdr->h_vlan_TCI); 4280 4281 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); 4282 __skb_pull(skb, VLAN_HLEN); 4283 4284 vlan_set_encap_proto(skb, vhdr); 4285 skb->mac_header += VLAN_HLEN; 4286 4287 if (skb_network_offset(skb) < ETH_HLEN) 4288 skb_set_network_header(skb, ETH_HLEN); 4289 4290 skb_reset_mac_len(skb); 4291pull: 4292 __skb_pull(skb, offset); 4293 4294 return err; 4295} 4296 4297int skb_vlan_pop(struct sk_buff *skb) 4298{ 4299 u16 vlan_tci; 4300 __be16 vlan_proto; 4301 int err; 4302 4303 if (likely(skb_vlan_tag_present(skb))) { 4304 skb->vlan_tci = 0; 4305 } else { 4306 if (unlikely((skb->protocol != htons(ETH_P_8021Q) && 4307 skb->protocol != htons(ETH_P_8021AD)) || 4308 skb->len < VLAN_ETH_HLEN)) 4309 return 0; 4310 4311 err = __skb_vlan_pop(skb, &vlan_tci); 4312 if (err) 4313 return err; 4314 } 4315 /* move next vlan tag to hw accel tag */ 4316 if (likely((skb->protocol != htons(ETH_P_8021Q) && 4317 skb->protocol != htons(ETH_P_8021AD)) || 4318 skb->len < VLAN_ETH_HLEN)) 4319 return 0; 4320 4321 vlan_proto = skb->protocol; 4322 err = __skb_vlan_pop(skb, &vlan_tci); 4323 if (unlikely(err)) 4324 return err; 4325 4326 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 4327 return 0; 4328} 4329EXPORT_SYMBOL(skb_vlan_pop); 4330 4331int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) 4332{ 4333 if (skb_vlan_tag_present(skb)) { 4334 unsigned int offset = skb->data - skb_mac_header(skb); 4335 int err; 4336 4337 /* __vlan_insert_tag expect skb->data pointing to mac header. 4338 * So change skb->data before calling it and change back to 4339 * original position later 4340 */ 4341 __skb_push(skb, offset); 4342 err = __vlan_insert_tag(skb, skb->vlan_proto, 4343 skb_vlan_tag_get(skb)); 4344 if (err) 4345 return err; 4346 skb->protocol = skb->vlan_proto; 4347 skb->mac_len += VLAN_HLEN; 4348 __skb_pull(skb, offset); 4349 4350 if (skb->ip_summed == CHECKSUM_COMPLETE) 4351 skb->csum = csum_add(skb->csum, csum_partial(skb->data 4352 + (2 * ETH_ALEN), VLAN_HLEN, 0)); 4353 } 4354 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 4355 return 0; 4356} 4357EXPORT_SYMBOL(skb_vlan_push); 4358 4359/** 4360 * alloc_skb_with_frags - allocate skb with page frags 4361 * 4362 * @header_len: size of linear part 4363 * @data_len: needed length in frags 4364 * @max_page_order: max page order desired. 4365 * @errcode: pointer to error code if any 4366 * @gfp_mask: allocation mask 4367 * 4368 * This can be used to allocate a paged skb, given a maximal order for frags. 4369 */ 4370struct sk_buff *alloc_skb_with_frags(unsigned long header_len, 4371 unsigned long data_len, 4372 int max_page_order, 4373 int *errcode, 4374 gfp_t gfp_mask) 4375{ 4376 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 4377 unsigned long chunk; 4378 struct sk_buff *skb; 4379 struct page *page; 4380 gfp_t gfp_head; 4381 int i; 4382 4383 *errcode = -EMSGSIZE; 4384 /* Note this test could be relaxed, if we succeed to allocate 4385 * high order pages... 4386 */ 4387 if (npages > MAX_SKB_FRAGS) 4388 return NULL; 4389 4390 gfp_head = gfp_mask; 4391 if (gfp_head & __GFP_WAIT) 4392 gfp_head |= __GFP_REPEAT; 4393 4394 *errcode = -ENOBUFS; 4395 skb = alloc_skb(header_len, gfp_head); 4396 if (!skb) 4397 return NULL; 4398 4399 skb->truesize += npages << PAGE_SHIFT; 4400 4401 for (i = 0; npages > 0; i++) { 4402 int order = max_page_order; 4403 4404 while (order) { 4405 if (npages >= 1 << order) { 4406 page = alloc_pages((gfp_mask & ~__GFP_WAIT) | 4407 __GFP_COMP | 4408 __GFP_NOWARN | 4409 __GFP_NORETRY, 4410 order); 4411 if (page) 4412 goto fill_page; 4413 /* Do not retry other high order allocations */ 4414 order = 1; 4415 max_page_order = 0; 4416 } 4417 order--; 4418 } 4419 page = alloc_page(gfp_mask); 4420 if (!page) 4421 goto failure; 4422fill_page: 4423 chunk = min_t(unsigned long, data_len, 4424 PAGE_SIZE << order); 4425 skb_fill_page_desc(skb, i, page, 0, chunk); 4426 data_len -= chunk; 4427 npages -= 1 << order; 4428 } 4429 return skb; 4430 4431failure: 4432 kfree_skb(skb); 4433 return NULL; 4434} 4435EXPORT_SYMBOL(alloc_skb_with_frags); 4436