Lines Matching refs:ring

53 	struct mlx4_en_tx_ring *ring;  in mlx4_en_create_tx_ring()  local
57 ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node); in mlx4_en_create_tx_ring()
58 if (!ring) { in mlx4_en_create_tx_ring()
59 ring = kzalloc(sizeof(*ring), GFP_KERNEL); in mlx4_en_create_tx_ring()
60 if (!ring) { in mlx4_en_create_tx_ring()
66 ring->size = size; in mlx4_en_create_tx_ring()
67 ring->size_mask = size - 1; in mlx4_en_create_tx_ring()
68 ring->stride = stride; in mlx4_en_create_tx_ring()
69 ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS; in mlx4_en_create_tx_ring()
72 ring->tx_info = kmalloc_node(tmp, GFP_KERNEL | __GFP_NOWARN, node); in mlx4_en_create_tx_ring()
73 if (!ring->tx_info) { in mlx4_en_create_tx_ring()
74 ring->tx_info = vmalloc(tmp); in mlx4_en_create_tx_ring()
75 if (!ring->tx_info) { in mlx4_en_create_tx_ring()
82 ring->tx_info, tmp); in mlx4_en_create_tx_ring()
84 ring->bounce_buf = kmalloc_node(MAX_DESC_SIZE, GFP_KERNEL, node); in mlx4_en_create_tx_ring()
85 if (!ring->bounce_buf) { in mlx4_en_create_tx_ring()
86 ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL); in mlx4_en_create_tx_ring()
87 if (!ring->bounce_buf) { in mlx4_en_create_tx_ring()
92 ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); in mlx4_en_create_tx_ring()
96 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, in mlx4_en_create_tx_ring()
104 err = mlx4_en_map_buffer(&ring->wqres.buf); in mlx4_en_create_tx_ring()
110 ring->buf = ring->wqres.buf.direct.buf; in mlx4_en_create_tx_ring()
113 ring, ring->buf, ring->size, ring->buf_size, in mlx4_en_create_tx_ring()
114 (unsigned long long) ring->wqres.buf.direct.map); in mlx4_en_create_tx_ring()
116 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn, in mlx4_en_create_tx_ring()
123 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL); in mlx4_en_create_tx_ring()
125 en_err(priv, "Failed allocating qp %d\n", ring->qpn); in mlx4_en_create_tx_ring()
128 ring->qp.event = mlx4_en_sqp_event; in mlx4_en_create_tx_ring()
130 err = mlx4_bf_alloc(mdev->dev, &ring->bf, node); in mlx4_en_create_tx_ring()
133 ring->bf.uar = &mdev->priv_uar; in mlx4_en_create_tx_ring()
134 ring->bf.uar->map = mdev->uar_map; in mlx4_en_create_tx_ring()
135 ring->bf_enabled = false; in mlx4_en_create_tx_ring()
136 ring->bf_alloced = false; in mlx4_en_create_tx_ring()
139 ring->bf_alloced = true; in mlx4_en_create_tx_ring()
140 ring->bf_enabled = !!(priv->pflags & in mlx4_en_create_tx_ring()
144 ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type; in mlx4_en_create_tx_ring()
145 ring->queue_index = queue_index; in mlx4_en_create_tx_ring()
150 &ring->affinity_mask); in mlx4_en_create_tx_ring()
152 *pring = ring; in mlx4_en_create_tx_ring()
156 mlx4_qp_release_range(mdev->dev, ring->qpn, 1); in mlx4_en_create_tx_ring()
158 mlx4_en_unmap_buffer(&ring->wqres.buf); in mlx4_en_create_tx_ring()
160 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); in mlx4_en_create_tx_ring()
162 kfree(ring->bounce_buf); in mlx4_en_create_tx_ring()
163 ring->bounce_buf = NULL; in mlx4_en_create_tx_ring()
165 kvfree(ring->tx_info); in mlx4_en_create_tx_ring()
166 ring->tx_info = NULL; in mlx4_en_create_tx_ring()
168 kfree(ring); in mlx4_en_create_tx_ring()
177 struct mlx4_en_tx_ring *ring = *pring; in mlx4_en_destroy_tx_ring() local
178 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); in mlx4_en_destroy_tx_ring()
180 if (ring->bf_alloced) in mlx4_en_destroy_tx_ring()
181 mlx4_bf_free(mdev->dev, &ring->bf); in mlx4_en_destroy_tx_ring()
182 mlx4_qp_remove(mdev->dev, &ring->qp); in mlx4_en_destroy_tx_ring()
183 mlx4_qp_free(mdev->dev, &ring->qp); in mlx4_en_destroy_tx_ring()
184 mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1); in mlx4_en_destroy_tx_ring()
185 mlx4_en_unmap_buffer(&ring->wqres.buf); in mlx4_en_destroy_tx_ring()
186 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); in mlx4_en_destroy_tx_ring()
187 kfree(ring->bounce_buf); in mlx4_en_destroy_tx_ring()
188 ring->bounce_buf = NULL; in mlx4_en_destroy_tx_ring()
189 kvfree(ring->tx_info); in mlx4_en_destroy_tx_ring()
190 ring->tx_info = NULL; in mlx4_en_destroy_tx_ring()
191 kfree(ring); in mlx4_en_destroy_tx_ring()
196 struct mlx4_en_tx_ring *ring, in mlx4_en_activate_tx_ring() argument
202 ring->cqn = cq; in mlx4_en_activate_tx_ring()
203 ring->prod = 0; in mlx4_en_activate_tx_ring()
204 ring->cons = 0xffffffff; in mlx4_en_activate_tx_ring()
205 ring->last_nr_txbb = 1; in mlx4_en_activate_tx_ring()
206 memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info)); in mlx4_en_activate_tx_ring()
207 memset(ring->buf, 0, ring->buf_size); in mlx4_en_activate_tx_ring()
209 ring->qp_state = MLX4_QP_STATE_RST; in mlx4_en_activate_tx_ring()
210 ring->doorbell_qpn = cpu_to_be32(ring->qp.qpn << 8); in mlx4_en_activate_tx_ring()
211 ring->mr_key = cpu_to_be32(mdev->mr.key); in mlx4_en_activate_tx_ring()
213 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, in mlx4_en_activate_tx_ring()
214 ring->cqn, user_prio, &ring->context); in mlx4_en_activate_tx_ring()
215 if (ring->bf_alloced) in mlx4_en_activate_tx_ring()
216 ring->context.usr_page = cpu_to_be32(ring->bf.uar->index); in mlx4_en_activate_tx_ring()
218 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, in mlx4_en_activate_tx_ring()
219 &ring->qp, &ring->qp_state); in mlx4_en_activate_tx_ring()
220 if (!cpumask_empty(&ring->affinity_mask)) in mlx4_en_activate_tx_ring()
221 netif_set_xps_queue(priv->dev, &ring->affinity_mask, in mlx4_en_activate_tx_ring()
222 ring->queue_index); in mlx4_en_activate_tx_ring()
228 struct mlx4_en_tx_ring *ring) in mlx4_en_deactivate_tx_ring() argument
232 mlx4_qp_modify(mdev->dev, NULL, ring->qp_state, in mlx4_en_deactivate_tx_ring()
233 MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp); in mlx4_en_deactivate_tx_ring()
236 static inline bool mlx4_en_is_tx_ring_full(struct mlx4_en_tx_ring *ring) in mlx4_en_is_tx_ring_full() argument
238 return ring->prod - ring->cons > ring->full_size; in mlx4_en_is_tx_ring_full()
242 struct mlx4_en_tx_ring *ring, int index, in mlx4_en_stamp_wqe() argument
246 struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE; in mlx4_en_stamp_wqe()
247 struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; in mlx4_en_stamp_wqe()
248 void *end = ring->buf + ring->buf_size; in mlx4_en_stamp_wqe()
267 ptr = ring->buf; in mlx4_en_stamp_wqe()
276 struct mlx4_en_tx_ring *ring, in mlx4_en_free_tx_desc() argument
279 struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; in mlx4_en_free_tx_desc()
280 struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE; in mlx4_en_free_tx_desc()
282 void *end = ring->buf + ring->buf_size; in mlx4_en_free_tx_desc()
323 data = ring->buf + ((void *)data - end); in mlx4_en_free_tx_desc()
340 data = ring->buf; in mlx4_en_free_tx_desc()
353 int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) in mlx4_en_free_tx_buf() argument
359 ring->cons += ring->last_nr_txbb; in mlx4_en_free_tx_buf()
361 ring->cons, ring->prod); in mlx4_en_free_tx_buf()
363 if ((u32) (ring->prod - ring->cons) > ring->size) { in mlx4_en_free_tx_buf()
369 while (ring->cons != ring->prod) { in mlx4_en_free_tx_buf()
370 ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring, in mlx4_en_free_tx_buf()
371 ring->cons & ring->size_mask, in mlx4_en_free_tx_buf()
372 !!(ring->cons & ring->size), 0); in mlx4_en_free_tx_buf()
373 ring->cons += ring->last_nr_txbb; in mlx4_en_free_tx_buf()
377 netdev_tx_reset_queue(ring->tx_queue); in mlx4_en_free_tx_buf()
390 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring]; in mlx4_en_process_tx_cq() local
398 u32 size_mask = ring->size_mask; in mlx4_en_process_tx_cq()
411 netdev_txq_bql_complete_prefetchw(ring->tx_queue); in mlx4_en_process_tx_cq()
415 last_nr_txbb = ACCESS_ONCE(ring->last_nr_txbb); in mlx4_en_process_tx_cq()
416 ring_cons = ACCESS_ONCE(ring->cons); in mlx4_en_process_tx_cq()
447 if (unlikely(ring->tx_info[ring_index].ts_requested)) in mlx4_en_process_tx_cq()
452 priv, ring, ring_index, in mlx4_en_process_tx_cq()
454 ring->size), timestamp); in mlx4_en_process_tx_cq()
456 mlx4_en_stamp_wqe(priv, ring, stamp_index, in mlx4_en_process_tx_cq()
458 ring->size)); in mlx4_en_process_tx_cq()
462 bytes += ring->tx_info[ring_index].nr_bytes; in mlx4_en_process_tx_cq()
480 ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb; in mlx4_en_process_tx_cq()
481 ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped; in mlx4_en_process_tx_cq()
483 netdev_tx_completed_queue(ring->tx_queue, packets, bytes); in mlx4_en_process_tx_cq()
487 if (netif_tx_queue_stopped(ring->tx_queue) && in mlx4_en_process_tx_cq()
488 !mlx4_en_is_tx_ring_full(ring)) { in mlx4_en_process_tx_cq()
489 netif_tx_wake_queue(ring->tx_queue); in mlx4_en_process_tx_cq()
490 ring->wake_queue++; in mlx4_en_process_tx_cq()
525 struct mlx4_en_tx_ring *ring, in mlx4_en_bounce_to_desc() argument
529 u32 copy = (ring->size - index) * TXBB_SIZE; in mlx4_en_bounce_to_desc()
536 *((u32 *) (ring->buf + i)) = in mlx4_en_bounce_to_desc()
537 *((u32 *) (ring->bounce_buf + copy + i)); in mlx4_en_bounce_to_desc()
544 *((u32 *) (ring->buf + index * TXBB_SIZE + i)) = in mlx4_en_bounce_to_desc()
545 *((u32 *) (ring->bounce_buf + i)); in mlx4_en_bounce_to_desc()
549 return ring->buf + index * TXBB_SIZE; in mlx4_en_bounce_to_desc()
712 struct mlx4_en_tx_ring *ring; in mlx4_en_xmit() local
737 ring = priv->tx_ring[tx_ind]; in mlx4_en_xmit()
740 ring_cons = ACCESS_ONCE(ring->cons); in mlx4_en_xmit()
761 netdev_txq_bql_enqueue_prefetchw(ring->tx_queue); in mlx4_en_xmit()
765 (u32)(ring->prod - ring_cons - 1)); in mlx4_en_xmit()
768 index = ring->prod & ring->size_mask; in mlx4_en_xmit()
769 bf_index = ring->prod; in mlx4_en_xmit()
773 if (likely(index + nr_txbb <= ring->size)) in mlx4_en_xmit()
774 tx_desc = ring->buf + index * TXBB_SIZE; in mlx4_en_xmit()
776 tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf; in mlx4_en_xmit()
781 tx_info = &ring->tx_info[index]; in mlx4_en_xmit()
818 data->lkey = ring->mr_key; in mlx4_en_xmit()
835 data->lkey = ring->mr_key; in mlx4_en_xmit()
849 if (unlikely(ring->hwtstamp_tx_type == HWTSTAMP_TX_ON && in mlx4_en_xmit()
864 ring->tx_csum++; in mlx4_en_xmit()
884 ((ring->prod & ring->size) ? in mlx4_en_xmit()
895 ring->tso_packets++; in mlx4_en_xmit()
900 ring->packets += i; in mlx4_en_xmit()
904 ((ring->prod & ring->size) ? in mlx4_en_xmit()
907 ring->packets++; in mlx4_en_xmit()
909 ring->bytes += tx_info->nr_bytes; in mlx4_en_xmit()
910 netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes); in mlx4_en_xmit()
925 ring->prod += nr_txbb; in mlx4_en_xmit()
929 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size); in mlx4_en_xmit()
934 stop_queue = mlx4_en_is_tx_ring_full(ring); in mlx4_en_xmit()
936 netif_tx_stop_queue(ring->tx_queue); in mlx4_en_xmit()
937 ring->queue_stopped++; in mlx4_en_xmit()
939 send_doorbell = !skb->xmit_more || netif_xmit_stopped(ring->tx_queue); in mlx4_en_xmit()
943 if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && in mlx4_en_xmit()
945 tx_desc->ctrl.bf_qpn = ring->doorbell_qpn | in mlx4_en_xmit()
957 mlx4_bf_copy(ring->bf.reg + ring->bf.offset, &tx_desc->ctrl, in mlx4_en_xmit()
962 ring->bf.offset ^= ring->bf.buf_size; in mlx4_en_xmit()
991 ring->doorbell_qpn, in mlx4_en_xmit()
992 ring->bf.uar->map + MLX4_SEND_DOORBELL); in mlx4_en_xmit()
994 ring->xmit_more++; in mlx4_en_xmit()
1007 ring_cons = ACCESS_ONCE(ring->cons); in mlx4_en_xmit()
1008 if (unlikely(!mlx4_en_is_tx_ring_full(ring))) { in mlx4_en_xmit()
1009 netif_tx_wake_queue(ring->tx_queue); in mlx4_en_xmit()
1010 ring->wake_queue++; in mlx4_en_xmit()