Lines Matching refs:rx_queue

79 efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf)  in efx_rx_buf_next()  argument
81 if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask))) in efx_rx_buf_next()
82 return efx_rx_buffer(rx_queue, 0); in efx_rx_buf_next()
109 static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue) in efx_reuse_page() argument
111 struct efx_nic *efx = rx_queue->efx; in efx_reuse_page()
116 index = rx_queue->page_remove & rx_queue->page_ptr_mask; in efx_reuse_page()
117 page = rx_queue->page_ring[index]; in efx_reuse_page()
121 rx_queue->page_ring[index] = NULL; in efx_reuse_page()
123 if (rx_queue->page_remove != rx_queue->page_add) in efx_reuse_page()
124 ++rx_queue->page_remove; in efx_reuse_page()
128 ++rx_queue->page_recycle_count; in efx_reuse_page()
136 ++rx_queue->page_recycle_failed; in efx_reuse_page()
152 static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic) in efx_init_rx_buffers() argument
154 struct efx_nic *efx = rx_queue->efx; in efx_init_rx_buffers()
164 page = efx_reuse_page(rx_queue); in efx_init_rx_buffers()
191 index = rx_queue->added_count & rx_queue->ptr_mask; in efx_init_rx_buffers()
192 rx_buf = efx_rx_buffer(rx_queue, index); in efx_init_rx_buffers()
198 ++rx_queue->added_count; in efx_init_rx_buffers()
227 static void efx_free_rx_buffers(struct efx_rx_queue *rx_queue, in efx_free_rx_buffers() argument
236 rx_buf = efx_rx_buf_next(rx_queue, rx_buf); in efx_free_rx_buffers()
248 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); in efx_recycle_rx_page() local
249 struct efx_nic *efx = rx_queue->efx; in efx_recycle_rx_page()
256 index = rx_queue->page_add & rx_queue->page_ptr_mask; in efx_recycle_rx_page()
257 if (rx_queue->page_ring[index] == NULL) { in efx_recycle_rx_page()
258 unsigned read_index = rx_queue->page_remove & in efx_recycle_rx_page()
259 rx_queue->page_ptr_mask; in efx_recycle_rx_page()
266 ++rx_queue->page_remove; in efx_recycle_rx_page()
267 rx_queue->page_ring[index] = page; in efx_recycle_rx_page()
268 ++rx_queue->page_add; in efx_recycle_rx_page()
271 ++rx_queue->page_recycle_full; in efx_recycle_rx_page()
276 static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, in efx_fini_rx_buffer() argument
285 efx_unmap_rx_buffer(rx_queue->efx, rx_buf); in efx_fini_rx_buffer()
286 efx_free_rx_buffers(rx_queue, rx_buf, 1); in efx_fini_rx_buffer()
296 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); in efx_recycle_rx_pages() local
300 rx_buf = efx_rx_buf_next(rx_queue, rx_buf); in efx_recycle_rx_pages()
308 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); in efx_discard_rx_packet() local
312 efx_free_rx_buffers(rx_queue, rx_buf, n_frags); in efx_discard_rx_packet()
327 void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic) in efx_fast_push_rx_descriptors() argument
329 struct efx_nic *efx = rx_queue->efx; in efx_fast_push_rx_descriptors()
333 if (!rx_queue->refill_enabled) in efx_fast_push_rx_descriptors()
337 fill_level = (rx_queue->added_count - rx_queue->removed_count); in efx_fast_push_rx_descriptors()
338 EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries); in efx_fast_push_rx_descriptors()
339 if (fill_level >= rx_queue->fast_fill_trigger) in efx_fast_push_rx_descriptors()
343 if (unlikely(fill_level < rx_queue->min_fill)) { in efx_fast_push_rx_descriptors()
345 rx_queue->min_fill = fill_level; in efx_fast_push_rx_descriptors()
349 space = rx_queue->max_fill - fill_level; in efx_fast_push_rx_descriptors()
352 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, in efx_fast_push_rx_descriptors()
355 efx_rx_queue_index(rx_queue), fill_level, in efx_fast_push_rx_descriptors()
356 rx_queue->max_fill); in efx_fast_push_rx_descriptors()
360 rc = efx_init_rx_buffers(rx_queue, atomic); in efx_fast_push_rx_descriptors()
363 if (rx_queue->added_count == rx_queue->removed_count) in efx_fast_push_rx_descriptors()
364 efx_schedule_slow_fill(rx_queue); in efx_fast_push_rx_descriptors()
369 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, in efx_fast_push_rx_descriptors()
371 "to level %d\n", efx_rx_queue_index(rx_queue), in efx_fast_push_rx_descriptors()
372 rx_queue->added_count - rx_queue->removed_count); in efx_fast_push_rx_descriptors()
375 if (rx_queue->notified_count != rx_queue->added_count) in efx_fast_push_rx_descriptors()
376 efx_nic_notify_rx_desc(rx_queue); in efx_fast_push_rx_descriptors()
381 struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context; in efx_rx_slow_fill() local
384 efx_nic_generate_fill_event(rx_queue); in efx_rx_slow_fill()
385 ++rx_queue->slow_fill_count; in efx_rx_slow_fill()
388 static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, in efx_rx_packet__check_len() argument
392 struct efx_nic *efx = rx_queue->efx; in efx_rx_packet__check_len()
408 efx_rx_queue_index(rx_queue), len, max_len, in efx_rx_packet__check_len()
416 efx_rx_queue_index(rx_queue), len, max_len); in efx_rx_packet__check_len()
419 efx_rx_queue_channel(rx_queue)->n_rx_overlength++; in efx_rx_packet__check_len()
436 struct efx_rx_queue *rx_queue; in efx_rx_packet_gro() local
438 rx_queue = efx_channel_get_rx_queue(channel); in efx_rx_packet_gro()
439 efx_free_rx_buffers(rx_queue, rx_buf, n_frags); in efx_rx_packet_gro()
458 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); in efx_rx_packet_gro()
464 skb_record_rx_queue(skb, channel->rx_queue.core_index); in efx_rx_packet_gro()
512 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); in efx_rx_mk_skb()
530 void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, in efx_rx_packet() argument
533 struct efx_nic *efx = rx_queue->efx; in efx_rx_packet()
534 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); in efx_rx_packet()
537 rx_queue->rx_packets++; in efx_rx_packet()
539 rx_buf = efx_rx_buffer(rx_queue, index); in efx_rx_packet()
545 efx_rx_packet__check_len(rx_queue, rx_buf, len); in efx_rx_packet()
559 efx_rx_queue_index(rx_queue), index, in efx_rx_packet()
560 (index + n_frags - 1) & rx_queue->ptr_mask, len, in efx_rx_packet()
596 rx_buf = efx_rx_buf_next(rx_queue, rx_buf); in efx_rx_packet()
606 rx_buf = efx_rx_buffer(rx_queue, index); in efx_rx_packet()
626 struct efx_rx_queue *rx_queue; in efx_rx_deliver() local
628 rx_queue = efx_channel_get_rx_queue(channel); in efx_rx_deliver()
629 efx_free_rx_buffers(rx_queue, rx_buf, n_frags); in efx_rx_deliver()
632 skb_record_rx_queue(skb, channel->rx_queue.core_index); in efx_rx_deliver()
654 efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index); in __efx_rx_packet()
668 struct efx_rx_queue *rx_queue; in __efx_rx_packet() local
671 rx_queue = efx_channel_get_rx_queue(channel); in __efx_rx_packet()
672 efx_free_rx_buffers(rx_queue, rx_buf, in __efx_rx_packet()
689 int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) in efx_probe_rx_queue() argument
691 struct efx_nic *efx = rx_queue->efx; in efx_probe_rx_queue()
698 rx_queue->ptr_mask = entries - 1; in efx_probe_rx_queue()
702 efx_rx_queue_index(rx_queue), efx->rxq_entries, in efx_probe_rx_queue()
703 rx_queue->ptr_mask); in efx_probe_rx_queue()
706 rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer), in efx_probe_rx_queue()
708 if (!rx_queue->buffer) in efx_probe_rx_queue()
711 rc = efx_nic_probe_rx(rx_queue); in efx_probe_rx_queue()
713 kfree(rx_queue->buffer); in efx_probe_rx_queue()
714 rx_queue->buffer = NULL; in efx_probe_rx_queue()
721 struct efx_rx_queue *rx_queue) in efx_init_rx_recycle_ring() argument
737 rx_queue->page_ring = kcalloc(page_ring_size, in efx_init_rx_recycle_ring()
738 sizeof(*rx_queue->page_ring), GFP_KERNEL); in efx_init_rx_recycle_ring()
739 rx_queue->page_ptr_mask = page_ring_size - 1; in efx_init_rx_recycle_ring()
742 void efx_init_rx_queue(struct efx_rx_queue *rx_queue) in efx_init_rx_queue() argument
744 struct efx_nic *efx = rx_queue->efx; in efx_init_rx_queue()
747 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, in efx_init_rx_queue()
748 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue)); in efx_init_rx_queue()
751 rx_queue->added_count = 0; in efx_init_rx_queue()
752 rx_queue->notified_count = 0; in efx_init_rx_queue()
753 rx_queue->removed_count = 0; in efx_init_rx_queue()
754 rx_queue->min_fill = -1U; in efx_init_rx_queue()
755 efx_init_rx_recycle_ring(efx, rx_queue); in efx_init_rx_queue()
757 rx_queue->page_remove = 0; in efx_init_rx_queue()
758 rx_queue->page_add = rx_queue->page_ptr_mask + 1; in efx_init_rx_queue()
759 rx_queue->page_recycle_count = 0; in efx_init_rx_queue()
760 rx_queue->page_recycle_failed = 0; in efx_init_rx_queue()
761 rx_queue->page_recycle_full = 0; in efx_init_rx_queue()
775 rx_queue->max_fill = max_fill; in efx_init_rx_queue()
776 rx_queue->fast_fill_trigger = trigger; in efx_init_rx_queue()
777 rx_queue->refill_enabled = true; in efx_init_rx_queue()
780 efx_nic_init_rx(rx_queue); in efx_init_rx_queue()
783 void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) in efx_fini_rx_queue() argument
786 struct efx_nic *efx = rx_queue->efx; in efx_fini_rx_queue()
789 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, in efx_fini_rx_queue()
790 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue)); in efx_fini_rx_queue()
792 del_timer_sync(&rx_queue->slow_fill); in efx_fini_rx_queue()
795 if (rx_queue->buffer) { in efx_fini_rx_queue()
796 for (i = rx_queue->removed_count; i < rx_queue->added_count; in efx_fini_rx_queue()
798 unsigned index = i & rx_queue->ptr_mask; in efx_fini_rx_queue()
799 rx_buf = efx_rx_buffer(rx_queue, index); in efx_fini_rx_queue()
800 efx_fini_rx_buffer(rx_queue, rx_buf); in efx_fini_rx_queue()
805 for (i = 0; i <= rx_queue->page_ptr_mask; i++) { in efx_fini_rx_queue()
806 struct page *page = rx_queue->page_ring[i]; in efx_fini_rx_queue()
818 kfree(rx_queue->page_ring); in efx_fini_rx_queue()
819 rx_queue->page_ring = NULL; in efx_fini_rx_queue()
822 void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) in efx_remove_rx_queue() argument
824 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, in efx_remove_rx_queue()
825 "destroying RX queue %d\n", efx_rx_queue_index(rx_queue)); in efx_remove_rx_queue()
827 efx_nic_remove_rx(rx_queue); in efx_remove_rx_queue()
829 kfree(rx_queue->buffer); in efx_remove_rx_queue()
830 rx_queue->buffer = NULL; in efx_remove_rx_queue()