Searched refs:SKB_DATA_ALIGN (Results 1 – 21 of 21) sorted by relevance
367 #define BGMAC_RX_ALLOC_SIZE (SKB_DATA_ALIGN(BGMAC_RX_BUF_SIZE + BGMAC_RX_BUF_OFFSET) + \368 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
5367 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD + in bnx2_set_rx_ring_size()5368 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); in bnx2_set_rx_ring_size()5392 bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) + in bnx2_set_rx_ring_size()5393 NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); in bnx2_set_rx_ring_size()
6628 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) + in tg3_rx_data_free()6629 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); in tg3_rx_data_free()6687 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) + in tg3_alloc_rx_data()6688 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); in tg3_alloc_rx_data()
228 size = SKB_DATA_ALIGN(size); in __alloc_skb()229 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); in __alloc_skb()313 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); in __build_skb()488 unsigned int fragsz = SKB_DATA_ALIGN(length) + in __alloc_rx_skb()489 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); in __alloc_rx_skb()1180 size = SKB_DATA_ALIGN(size); in pskb_expand_head()1184 data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), in pskb_expand_head()1255 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, in skb_realloc_headroom()3295 delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); in skb_gro_receive()4093 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); in skb_try_coalesce()
163 int head_delta = SKB_DATA_ALIGN(min_headroom - in gre_tnl_send()
141 #define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES) macro143 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))151 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \152 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
2526 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + in mvneta_change_mtu()2527 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); in mvneta_change_mtu()2700 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + in mvneta_open()2701 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); in mvneta_open()
373 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
442 pskb_expand_head(clone, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) { in tipc_l2_send_msg()
784 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); in hip04_alloc_ring()
809 primary_buf_len = SKB_DATA_ALIGN(buf_len) + in netcp_allocate_rx_buf()810 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); in netcp_allocate_rx_buf()
299 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); in efx_enqueue_skb_pio()
492 head_size = SKB_DATA_ALIGN(adapter->rx_buffer_len + NET_SKB_PAD) + in atl1c_set_rxbufsize()493 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); in atl1c_set_rxbufsize()
1678 size = SKB_DATA_ALIGN(size) + in netlink_alloc_large_skb()1679 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); in netlink_alloc_large_skb()
3048 (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : in t3_sge_alloc_qset()3288 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); in t3_sge_prep()
2025 #define TCP_SKB_MIN_TRUESIZE (2048 + SKB_DATA_ALIGN(sizeof(struct sk_buff)))
589 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); in alloc_rx_resources()
1499 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
2068 return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) + in e1000_frag_len()2069 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); in e1000_frag_len()
294 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); in tcp_sndbuf_expand()297 SKB_DATA_ALIGN(sizeof(struct sk_buff)); in tcp_sndbuf_expand()
1348 rx->page_offset += SKB_DATA_ALIGN(bytes); in myri10ge_alloc_rx_pages()