n_frags 45 drivers/net/ethernet/sfc/efx.h unsigned int n_frags, unsigned int len, u16 flags); n_frags 48 drivers/net/ethernet/sfc/falcon/efx.h unsigned int n_frags, unsigned int len, u16 flags); n_frags 291 drivers/net/ethernet/sfc/falcon/rx.c unsigned int n_frags) n_frags 298 drivers/net/ethernet/sfc/falcon/rx.c } while (--n_frags); n_frags 303 drivers/net/ethernet/sfc/falcon/rx.c unsigned int n_frags) n_frags 307 drivers/net/ethernet/sfc/falcon/rx.c ef4_recycle_rx_pages(channel, rx_buf, n_frags); n_frags 309 drivers/net/ethernet/sfc/falcon/rx.c ef4_free_rx_buffers(rx_queue, rx_buf, n_frags); n_frags 424 drivers/net/ethernet/sfc/falcon/rx.c unsigned int n_frags, u8 *eh) n_frags 435 drivers/net/ethernet/sfc/falcon/rx.c ef4_free_rx_buffers(rx_queue, rx_buf, n_frags); n_frags 451 drivers/net/ethernet/sfc/falcon/rx.c if (skb_shinfo(skb)->nr_frags == n_frags) n_frags 458 drivers/net/ethernet/sfc/falcon/rx.c skb->truesize += n_frags * efx->rx_buffer_truesize; n_frags 468 drivers/net/ethernet/sfc/falcon/rx.c unsigned int n_frags, n_frags 502 drivers/net/ethernet/sfc/falcon/rx.c if (skb_shinfo(skb)->nr_frags == n_frags) n_frags 510 drivers/net/ethernet/sfc/falcon/rx.c n_frags = 0; n_frags 513 drivers/net/ethernet/sfc/falcon/rx.c skb->truesize += n_frags * efx->rx_buffer_truesize; n_frags 524 drivers/net/ethernet/sfc/falcon/rx.c unsigned int n_frags, unsigned int len, u16 flags) n_frags 536 drivers/net/ethernet/sfc/falcon/rx.c if (n_frags == 1) { n_frags 539 drivers/net/ethernet/sfc/falcon/rx.c } else if (unlikely(n_frags > EF4_RX_MAX_FRAGS) || n_frags 540 drivers/net/ethernet/sfc/falcon/rx.c unlikely(len <= (n_frags - 1) * efx->rx_dma_len) || n_frags 541 drivers/net/ethernet/sfc/falcon/rx.c unlikely(len > n_frags * efx->rx_dma_len) || n_frags 553 drivers/net/ethernet/sfc/falcon/rx.c (index + n_frags - 1) & rx_queue->ptr_mask, len, n_frags 562 drivers/net/ethernet/sfc/falcon/rx.c ef4_discard_rx_packet(channel, rx_buf, n_frags); n_frags 566 drivers/net/ethernet/sfc/falcon/rx.c if (n_frags == 1 && !(flags & EF4_RX_PKT_PREFIX_LEN)) n_frags 582 drivers/net/ethernet/sfc/falcon/rx.c if (n_frags > 1) { n_frags 586 drivers/net/ethernet/sfc/falcon/rx.c unsigned int tail_frags = n_frags - 1; n_frags 594 drivers/net/ethernet/sfc/falcon/rx.c rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len; n_frags 600 drivers/net/ethernet/sfc/falcon/rx.c ef4_recycle_rx_pages(channel, rx_buf, n_frags); n_frags 606 drivers/net/ethernet/sfc/falcon/rx.c channel->rx_pkt_n_frags = n_frags; n_frags 612 drivers/net/ethernet/sfc/falcon/rx.c unsigned int n_frags) n_frags 617 drivers/net/ethernet/sfc/falcon/rx.c skb = ef4_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len); n_frags 622 drivers/net/ethernet/sfc/falcon/rx.c ef4_free_rx_buffers(rx_queue, rx_buf, n_frags); n_frags 291 drivers/net/ethernet/sfc/rx.c unsigned int n_frags) n_frags 298 drivers/net/ethernet/sfc/rx.c } while (--n_frags); n_frags 303 drivers/net/ethernet/sfc/rx.c unsigned int n_frags) n_frags 307 drivers/net/ethernet/sfc/rx.c efx_recycle_rx_pages(channel, rx_buf, n_frags); n_frags 309 drivers/net/ethernet/sfc/rx.c efx_free_rx_buffers(rx_queue, rx_buf, n_frags); n_frags 412 drivers/net/ethernet/sfc/rx.c unsigned int n_frags, u8 *eh) n_frags 423 drivers/net/ethernet/sfc/rx.c efx_free_rx_buffers(rx_queue, rx_buf, n_frags); n_frags 440 drivers/net/ethernet/sfc/rx.c if (skb_shinfo(skb)->nr_frags == n_frags) n_frags 447 drivers/net/ethernet/sfc/rx.c skb->truesize += n_frags * efx->rx_buffer_truesize; n_frags 457 drivers/net/ethernet/sfc/rx.c unsigned int n_frags, n_frags 491 drivers/net/ethernet/sfc/rx.c if (skb_shinfo(skb)->nr_frags == n_frags) n_frags 499 drivers/net/ethernet/sfc/rx.c n_frags = 0; n_frags 502 drivers/net/ethernet/sfc/rx.c skb->truesize += n_frags * efx->rx_buffer_truesize; n_frags 513 drivers/net/ethernet/sfc/rx.c unsigned int n_frags, unsigned int len, u16 flags) n_frags 525 drivers/net/ethernet/sfc/rx.c if (n_frags == 1) { n_frags 528 drivers/net/ethernet/sfc/rx.c } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) || n_frags 529 drivers/net/ethernet/sfc/rx.c unlikely(len <= (n_frags - 1) * efx->rx_dma_len) || n_frags 530 drivers/net/ethernet/sfc/rx.c unlikely(len > n_frags * efx->rx_dma_len) || n_frags 542 drivers/net/ethernet/sfc/rx.c (index + n_frags - 1) & rx_queue->ptr_mask, len, n_frags 551 drivers/net/ethernet/sfc/rx.c efx_discard_rx_packet(channel, rx_buf, n_frags); n_frags 555 drivers/net/ethernet/sfc/rx.c if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN)) n_frags 571 drivers/net/ethernet/sfc/rx.c if (n_frags > 1) { n_frags 575 drivers/net/ethernet/sfc/rx.c unsigned int tail_frags = n_frags - 1; n_frags 583 drivers/net/ethernet/sfc/rx.c rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len; n_frags 589 drivers/net/ethernet/sfc/rx.c efx_recycle_rx_pages(channel, rx_buf, n_frags); n_frags 595 drivers/net/ethernet/sfc/rx.c channel->rx_pkt_n_frags = n_frags; n_frags 601 drivers/net/ethernet/sfc/rx.c unsigned int n_frags) n_frags 606 drivers/net/ethernet/sfc/rx.c skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len); n_frags 611 drivers/net/ethernet/sfc/rx.c efx_free_rx_buffers(rx_queue, rx_buf, n_frags); n_frags 6532 drivers/net/ethernet/sun/niu.c u64 n_frags) n_frags 6537 drivers/net/ethernet/sun/niu.c (n_frags << TX_DESC_NUM_PTR_SHIFT) |