This source file includes following definitions.
- vnic_sdma_complete
- build_vnic_ulp_payload
- build_vnic_tx_desc
- hfi1_vnic_update_pad
- hfi1_vnic_send_dma
- hfi1_vnic_sdma_sleep
- hfi1_vnic_sdma_wakeup
- hfi1_vnic_sdma_write_avail
- hfi1_vnic_sdma_init
- hfi1_vnic_txreq_init
- hfi1_vnic_txreq_deinit
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52 #include "sdma.h"
53 #include "vnic.h"
54
55 #define HFI1_VNIC_SDMA_Q_ACTIVE BIT(0)
56 #define HFI1_VNIC_SDMA_Q_DEFERRED BIT(1)
57
58 #define HFI1_VNIC_TXREQ_NAME_LEN 32
59 #define HFI1_VNIC_SDMA_DESC_WTRMRK 64
60
61
62
63
64
65
66
67
68
69
70 struct vnic_txreq {
71 struct sdma_txreq txreq;
72 struct hfi1_vnic_sdma *sdma;
73
74 struct sk_buff *skb;
75 unsigned char pad[HFI1_VNIC_MAX_PAD];
76 u16 plen;
77 __le64 pbc_val;
78 };
79
80 static void vnic_sdma_complete(struct sdma_txreq *txreq,
81 int status)
82 {
83 struct vnic_txreq *tx = container_of(txreq, struct vnic_txreq, txreq);
84 struct hfi1_vnic_sdma *vnic_sdma = tx->sdma;
85
86 sdma_txclean(vnic_sdma->dd, txreq);
87 dev_kfree_skb_any(tx->skb);
88 kmem_cache_free(vnic_sdma->dd->vnic.txreq_cache, tx);
89 }
90
91 static noinline int build_vnic_ulp_payload(struct sdma_engine *sde,
92 struct vnic_txreq *tx)
93 {
94 int i, ret = 0;
95
96 ret = sdma_txadd_kvaddr(
97 sde->dd,
98 &tx->txreq,
99 tx->skb->data,
100 skb_headlen(tx->skb));
101 if (unlikely(ret))
102 goto bail_txadd;
103
104 for (i = 0; i < skb_shinfo(tx->skb)->nr_frags; i++) {
105 skb_frag_t *frag = &skb_shinfo(tx->skb)->frags[i];
106
107
108 ret = sdma_txadd_page(sde->dd,
109 &tx->txreq,
110 skb_frag_page(frag),
111 skb_frag_off(frag),
112 skb_frag_size(frag));
113 if (unlikely(ret))
114 goto bail_txadd;
115 }
116
117 if (tx->plen)
118 ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq,
119 tx->pad + HFI1_VNIC_MAX_PAD - tx->plen,
120 tx->plen);
121
122 bail_txadd:
123 return ret;
124 }
125
126 static int build_vnic_tx_desc(struct sdma_engine *sde,
127 struct vnic_txreq *tx,
128 u64 pbc)
129 {
130 int ret = 0;
131 u16 hdrbytes = 2 << 2;
132
133 ret = sdma_txinit_ahg(
134 &tx->txreq,
135 0,
136 hdrbytes + tx->skb->len + tx->plen,
137 0,
138 0,
139 NULL,
140 0,
141 vnic_sdma_complete);
142 if (unlikely(ret))
143 goto bail_txadd;
144
145
146 tx->pbc_val = cpu_to_le64(pbc);
147 ret = sdma_txadd_kvaddr(
148 sde->dd,
149 &tx->txreq,
150 &tx->pbc_val,
151 hdrbytes);
152 if (unlikely(ret))
153 goto bail_txadd;
154
155
156 ret = build_vnic_ulp_payload(sde, tx);
157 bail_txadd:
158 return ret;
159 }
160
161
162 static inline void hfi1_vnic_update_pad(unsigned char *pad, u8 plen)
163 {
164 pad[HFI1_VNIC_MAX_PAD - 1] = plen - OPA_VNIC_ICRC_TAIL_LEN;
165 }
166
167 int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx,
168 struct hfi1_vnic_vport_info *vinfo,
169 struct sk_buff *skb, u64 pbc, u8 plen)
170 {
171 struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx];
172 struct sdma_engine *sde = vnic_sdma->sde;
173 struct vnic_txreq *tx;
174 int ret = -ECOMM;
175
176 if (unlikely(READ_ONCE(vnic_sdma->state) != HFI1_VNIC_SDMA_Q_ACTIVE))
177 goto tx_err;
178
179 if (unlikely(!sde || !sdma_running(sde)))
180 goto tx_err;
181
182 tx = kmem_cache_alloc(dd->vnic.txreq_cache, GFP_ATOMIC);
183 if (unlikely(!tx)) {
184 ret = -ENOMEM;
185 goto tx_err;
186 }
187
188 tx->sdma = vnic_sdma;
189 tx->skb = skb;
190 hfi1_vnic_update_pad(tx->pad, plen);
191 tx->plen = plen;
192 ret = build_vnic_tx_desc(sde, tx, pbc);
193 if (unlikely(ret))
194 goto free_desc;
195
196 ret = sdma_send_txreq(sde, iowait_get_ib_work(&vnic_sdma->wait),
197 &tx->txreq, vnic_sdma->pkts_sent);
198
199 if (unlikely(ret && unlikely(ret != -ECOMM)))
200 goto free_desc;
201
202 if (!ret) {
203 vnic_sdma->pkts_sent = true;
204 iowait_starve_clear(vnic_sdma->pkts_sent, &vnic_sdma->wait);
205 }
206 return ret;
207
208 free_desc:
209 sdma_txclean(dd, &tx->txreq);
210 kmem_cache_free(dd->vnic.txreq_cache, tx);
211 tx_err:
212 if (ret != -EBUSY)
213 dev_kfree_skb_any(skb);
214 else
215 vnic_sdma->pkts_sent = false;
216 return ret;
217 }
218
219
220
221
222
223
224
225
226
227 static int hfi1_vnic_sdma_sleep(struct sdma_engine *sde,
228 struct iowait_work *wait,
229 struct sdma_txreq *txreq,
230 uint seq,
231 bool pkts_sent)
232 {
233 struct hfi1_vnic_sdma *vnic_sdma =
234 container_of(wait->iow, struct hfi1_vnic_sdma, wait);
235
236 write_seqlock(&sde->waitlock);
237 if (sdma_progress(sde, seq, txreq)) {
238 write_sequnlock(&sde->waitlock);
239 return -EAGAIN;
240 }
241
242 vnic_sdma->state = HFI1_VNIC_SDMA_Q_DEFERRED;
243 if (list_empty(&vnic_sdma->wait.list)) {
244 iowait_get_priority(wait->iow);
245 iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
246 }
247 write_sequnlock(&sde->waitlock);
248 return -EBUSY;
249 }
250
251
252
253
254
255
256
257
258 static void hfi1_vnic_sdma_wakeup(struct iowait *wait, int reason)
259 {
260 struct hfi1_vnic_sdma *vnic_sdma =
261 container_of(wait, struct hfi1_vnic_sdma, wait);
262 struct hfi1_vnic_vport_info *vinfo = vnic_sdma->vinfo;
263
264 vnic_sdma->state = HFI1_VNIC_SDMA_Q_ACTIVE;
265 if (__netif_subqueue_stopped(vinfo->netdev, vnic_sdma->q_idx))
266 netif_wake_subqueue(vinfo->netdev, vnic_sdma->q_idx);
267 };
268
269 inline bool hfi1_vnic_sdma_write_avail(struct hfi1_vnic_vport_info *vinfo,
270 u8 q_idx)
271 {
272 struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx];
273
274 return (READ_ONCE(vnic_sdma->state) == HFI1_VNIC_SDMA_Q_ACTIVE);
275 }
276
277 void hfi1_vnic_sdma_init(struct hfi1_vnic_vport_info *vinfo)
278 {
279 int i;
280
281 for (i = 0; i < vinfo->num_tx_q; i++) {
282 struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[i];
283
284 iowait_init(&vnic_sdma->wait, 0, NULL, NULL,
285 hfi1_vnic_sdma_sleep,
286 hfi1_vnic_sdma_wakeup, NULL, NULL);
287 vnic_sdma->sde = &vinfo->dd->per_sdma[i];
288 vnic_sdma->dd = vinfo->dd;
289 vnic_sdma->vinfo = vinfo;
290 vnic_sdma->q_idx = i;
291 vnic_sdma->state = HFI1_VNIC_SDMA_Q_ACTIVE;
292
293
294 if (vnic_sdma->sde->descq_cnt > HFI1_VNIC_SDMA_DESC_WTRMRK) {
295 struct iowait_work *work;
296
297 INIT_LIST_HEAD(&vnic_sdma->stx.list);
298 vnic_sdma->stx.num_desc = HFI1_VNIC_SDMA_DESC_WTRMRK;
299 work = iowait_get_ib_work(&vnic_sdma->wait);
300 list_add_tail(&vnic_sdma->stx.list, &work->tx_head);
301 }
302 }
303 }
304
305 int hfi1_vnic_txreq_init(struct hfi1_devdata *dd)
306 {
307 char buf[HFI1_VNIC_TXREQ_NAME_LEN];
308
309 snprintf(buf, sizeof(buf), "hfi1_%u_vnic_txreq_cache", dd->unit);
310 dd->vnic.txreq_cache = kmem_cache_create(buf,
311 sizeof(struct vnic_txreq),
312 0, SLAB_HWCACHE_ALIGN,
313 NULL);
314 if (!dd->vnic.txreq_cache)
315 return -ENOMEM;
316 return 0;
317 }
318
319 void hfi1_vnic_txreq_deinit(struct hfi1_devdata *dd)
320 {
321 kmem_cache_destroy(dd->vnic.txreq_cache);
322 dd->vnic.txreq_cache = NULL;
323 }