This source file includes following definitions.
- fill_static_params_ctx
- build_static_params
- fill_progress_params_ctx
- build_progress_params
- tx_fill_wi
- mlx5e_ktls_tx_offload_set_pending
- mlx5e_ktls_tx_offload_test_and_clear_pending
- post_static_params
- post_progress_params
- mlx5e_ktls_tx_post_param_wqes
- tx_sync_info_get
- tx_post_resync_params
- tx_post_resync_dump
- mlx5e_ktls_tx_handle_resync_dump_comp
- tx_post_fence_nop
- mlx5e_ktls_tx_handle_ooo
- mlx5e_ktls_handle_tx_skb
1
2
3
4 #include <linux/tls.h>
5 #include "en.h"
6 #include "en/txrx.h"
7 #include "en_accel/ktls.h"
8
9 enum {
10 MLX5E_STATIC_PARAMS_CONTEXT_TLS_1_2 = 0x2,
11 };
12
13 enum {
14 MLX5E_ENCRYPTION_STANDARD_TLS = 0x1,
15 };
16
17 #define EXTRACT_INFO_FIELDS do { \
18 salt = info->salt; \
19 rec_seq = info->rec_seq; \
20 salt_sz = sizeof(info->salt); \
21 rec_seq_sz = sizeof(info->rec_seq); \
22 } while (0)
23
24 static void
25 fill_static_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx)
26 {
27 struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info;
28 char *initial_rn, *gcm_iv;
29 u16 salt_sz, rec_seq_sz;
30 char *salt, *rec_seq;
31 u8 tls_version;
32
33 EXTRACT_INFO_FIELDS;
34
35 gcm_iv = MLX5_ADDR_OF(tls_static_params, ctx, gcm_iv);
36 initial_rn = MLX5_ADDR_OF(tls_static_params, ctx, initial_record_number);
37
38 memcpy(gcm_iv, salt, salt_sz);
39 memcpy(initial_rn, rec_seq, rec_seq_sz);
40
41 tls_version = MLX5E_STATIC_PARAMS_CONTEXT_TLS_1_2;
42
43 MLX5_SET(tls_static_params, ctx, tls_version, tls_version);
44 MLX5_SET(tls_static_params, ctx, const_1, 1);
45 MLX5_SET(tls_static_params, ctx, const_2, 2);
46 MLX5_SET(tls_static_params, ctx, encryption_standard,
47 MLX5E_ENCRYPTION_STANDARD_TLS);
48 MLX5_SET(tls_static_params, ctx, dek_index, priv_tx->key_id);
49 }
50
51 static void
52 build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn,
53 struct mlx5e_ktls_offload_context_tx *priv_tx,
54 bool fence)
55 {
56 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
57 struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
58
59 #define STATIC_PARAMS_DS_CNT \
60 DIV_ROUND_UP(MLX5E_KTLS_STATIC_UMR_WQE_SZ, MLX5_SEND_WQE_DS)
61
62 cseg->opmod_idx_opcode = cpu_to_be32((pc << 8) | MLX5_OPCODE_UMR |
63 (MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS << 24));
64 cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
65 STATIC_PARAMS_DS_CNT);
66 cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
67 cseg->tisn = cpu_to_be32(priv_tx->tisn << 8);
68
69 ucseg->flags = MLX5_UMR_INLINE;
70 ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16);
71
72 fill_static_params_ctx(wqe->tls_static_params_ctx, priv_tx);
73 }
74
75 static void
76 fill_progress_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx)
77 {
78 MLX5_SET(tls_progress_params, ctx, tisn, priv_tx->tisn);
79 MLX5_SET(tls_progress_params, ctx, record_tracker_state,
80 MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START);
81 MLX5_SET(tls_progress_params, ctx, auth_state,
82 MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD);
83 }
84
85 static void
86 build_progress_params(struct mlx5e_tx_wqe *wqe, u16 pc, u32 sqn,
87 struct mlx5e_ktls_offload_context_tx *priv_tx,
88 bool fence)
89 {
90 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
91
92 #define PROGRESS_PARAMS_DS_CNT \
93 DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_DS)
94
95 cseg->opmod_idx_opcode =
96 cpu_to_be32((pc << 8) | MLX5_OPCODE_SET_PSV |
97 (MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS << 24));
98 cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
99 PROGRESS_PARAMS_DS_CNT);
100 cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
101
102 fill_progress_params_ctx(wqe->tls_progress_params_ctx, priv_tx);
103 }
104
105 static void tx_fill_wi(struct mlx5e_txqsq *sq,
106 u16 pi, u8 num_wqebbs, u32 num_bytes,
107 struct page *page)
108 {
109 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
110
111 memset(wi, 0, sizeof(*wi));
112 wi->num_wqebbs = num_wqebbs;
113 wi->num_bytes = num_bytes;
114 wi->resync_dump_frag_page = page;
115 }
116
117 void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
118 {
119 priv_tx->ctx_post_pending = true;
120 }
121
122 static bool
123 mlx5e_ktls_tx_offload_test_and_clear_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
124 {
125 bool ret = priv_tx->ctx_post_pending;
126
127 priv_tx->ctx_post_pending = false;
128
129 return ret;
130 }
131
132 static void
133 post_static_params(struct mlx5e_txqsq *sq,
134 struct mlx5e_ktls_offload_context_tx *priv_tx,
135 bool fence)
136 {
137 struct mlx5e_umr_wqe *umr_wqe;
138 u16 pi;
139
140 umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi);
141 build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence);
142 tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, 0, NULL);
143 sq->pc += MLX5E_KTLS_STATIC_WQEBBS;
144 }
145
146 static void
147 post_progress_params(struct mlx5e_txqsq *sq,
148 struct mlx5e_ktls_offload_context_tx *priv_tx,
149 bool fence)
150 {
151 struct mlx5e_tx_wqe *wqe;
152 u16 pi;
153
154 wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi);
155 build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence);
156 tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, 0, NULL);
157 sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS;
158 }
159
160 static void
161 mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
162 struct mlx5e_ktls_offload_context_tx *priv_tx,
163 bool skip_static_post, bool fence_first_post)
164 {
165 bool progress_fence = skip_static_post || !fence_first_post;
166 struct mlx5_wq_cyc *wq = &sq->wq;
167 u16 contig_wqebbs_room, pi;
168
169 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
170 contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
171 if (unlikely(contig_wqebbs_room <
172 MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS))
173 mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
174
175 if (!skip_static_post)
176 post_static_params(sq, priv_tx, fence_first_post);
177
178 post_progress_params(sq, priv_tx, progress_fence);
179 }
180
181 struct tx_sync_info {
182 u64 rcd_sn;
183 u32 sync_len;
184 int nr_frags;
185 skb_frag_t frags[MAX_SKB_FRAGS];
186 };
187
188 enum mlx5e_ktls_sync_retval {
189 MLX5E_KTLS_SYNC_DONE,
190 MLX5E_KTLS_SYNC_FAIL,
191 MLX5E_KTLS_SYNC_SKIP_NO_DATA,
192 };
193
194 static enum mlx5e_ktls_sync_retval
195 tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
196 u32 tcp_seq, int datalen, struct tx_sync_info *info)
197 {
198 struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
199 enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE;
200 struct tls_record_info *record;
201 int remaining, i = 0;
202 unsigned long flags;
203 bool ends_before;
204
205 spin_lock_irqsave(&tx_ctx->lock, flags);
206 record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
207
208 if (unlikely(!record)) {
209 ret = MLX5E_KTLS_SYNC_FAIL;
210 goto out;
211 }
212
213
214
215
216
217
218
219
220
221 ends_before = before(tcp_seq + datalen - 1, tls_record_start_seq(record));
222
223 if (unlikely(tls_record_is_start_marker(record))) {
224 ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
225 goto out;
226 } else if (ends_before) {
227 ret = MLX5E_KTLS_SYNC_FAIL;
228 goto out;
229 }
230
231 info->sync_len = tcp_seq - tls_record_start_seq(record);
232 remaining = info->sync_len;
233 while (remaining > 0) {
234 skb_frag_t *frag = &record->frags[i];
235
236 get_page(skb_frag_page(frag));
237 remaining -= skb_frag_size(frag);
238 info->frags[i++] = *frag;
239 }
240
241 if (remaining < 0)
242 skb_frag_size_add(&info->frags[i - 1], remaining);
243 info->nr_frags = i;
244 out:
245 spin_unlock_irqrestore(&tx_ctx->lock, flags);
246 return ret;
247 }
248
249 static void
250 tx_post_resync_params(struct mlx5e_txqsq *sq,
251 struct mlx5e_ktls_offload_context_tx *priv_tx,
252 u64 rcd_sn)
253 {
254 struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info;
255 __be64 rn_be = cpu_to_be64(rcd_sn);
256 bool skip_static_post;
257 u16 rec_seq_sz;
258 char *rec_seq;
259
260 rec_seq = info->rec_seq;
261 rec_seq_sz = sizeof(info->rec_seq);
262
263 skip_static_post = !memcmp(rec_seq, &rn_be, rec_seq_sz);
264 if (!skip_static_post)
265 memcpy(rec_seq, &rn_be, rec_seq_sz);
266
267 mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true);
268 }
269
270 static int
271 tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool first)
272 {
273 struct mlx5_wqe_ctrl_seg *cseg;
274 struct mlx5_wqe_data_seg *dseg;
275 struct mlx5e_dump_wqe *wqe;
276 dma_addr_t dma_addr = 0;
277 u16 ds_cnt;
278 int fsz;
279 u16 pi;
280
281 wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
282
283 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
284
285 cseg = &wqe->ctrl;
286 dseg = &wqe->data;
287
288 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP);
289 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
290 cseg->tisn = cpu_to_be32(tisn << 8);
291 cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
292
293 fsz = skb_frag_size(frag);
294 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
295 DMA_TO_DEVICE);
296 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
297 return -ENOMEM;
298
299 dseg->addr = cpu_to_be64(dma_addr);
300 dseg->lkey = sq->mkey_be;
301 dseg->byte_count = cpu_to_be32(fsz);
302 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
303
304 tx_fill_wi(sq, pi, MLX5E_KTLS_DUMP_WQEBBS, fsz, skb_frag_page(frag));
305 sq->pc += MLX5E_KTLS_DUMP_WQEBBS;
306
307 return 0;
308 }
309
310 void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
311 struct mlx5e_tx_wqe_info *wi,
312 u32 *dma_fifo_cc)
313 {
314 struct mlx5e_sq_stats *stats;
315 struct mlx5e_sq_dma *dma;
316
317 if (!wi->resync_dump_frag_page)
318 return;
319
320 dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++);
321 stats = sq->stats;
322
323 mlx5e_tx_dma_unmap(sq->pdev, dma);
324 put_page(wi->resync_dump_frag_page);
325 stats->tls_dump_packets++;
326 stats->tls_dump_bytes += wi->num_bytes;
327 }
328
329 static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
330 {
331 struct mlx5_wq_cyc *wq = &sq->wq;
332 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
333
334 tx_fill_wi(sq, pi, 1, 0, NULL);
335
336 mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
337 }
338
339 static enum mlx5e_ktls_sync_retval
340 mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
341 struct mlx5e_txqsq *sq,
342 int datalen,
343 u32 seq)
344 {
345 struct mlx5e_sq_stats *stats = sq->stats;
346 struct mlx5_wq_cyc *wq = &sq->wq;
347 enum mlx5e_ktls_sync_retval ret;
348 struct tx_sync_info info = {};
349 u16 contig_wqebbs_room, pi;
350 u8 num_wqebbs;
351 int i = 0;
352
353 ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
354 if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) {
355 if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) {
356 stats->tls_skip_no_sync_data++;
357 return MLX5E_KTLS_SYNC_SKIP_NO_DATA;
358 }
359
360
361
362
363 stats->tls_drop_no_sync_data++;
364 goto err_out;
365 }
366
367 stats->tls_ooo++;
368
369 tx_post_resync_params(sq, priv_tx, info.rcd_sn);
370
371
372
373
374 if (!info.nr_frags) {
375 tx_post_fence_nop(sq);
376 return MLX5E_KTLS_SYNC_DONE;
377 }
378
379 num_wqebbs = mlx5e_ktls_dumps_num_wqebbs(sq, info.nr_frags, info.sync_len);
380 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
381 contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
382
383 if (unlikely(contig_wqebbs_room < num_wqebbs))
384 mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
385
386 for (; i < info.nr_frags; i++) {
387 unsigned int orig_fsz, frag_offset = 0, n = 0;
388 skb_frag_t *f = &info.frags[i];
389
390 orig_fsz = skb_frag_size(f);
391
392 do {
393 bool fence = !(i || frag_offset);
394 unsigned int fsz;
395
396 n++;
397 fsz = min_t(unsigned int, sq->hw_mtu, orig_fsz - frag_offset);
398 skb_frag_size_set(f, fsz);
399 if (tx_post_resync_dump(sq, f, priv_tx->tisn, fence)) {
400 page_ref_add(skb_frag_page(f), n - 1);
401 goto err_out;
402 }
403
404 skb_frag_off_add(f, fsz);
405 frag_offset += fsz;
406 } while (frag_offset < orig_fsz);
407
408 page_ref_add(skb_frag_page(f), n - 1);
409 }
410
411 return MLX5E_KTLS_SYNC_DONE;
412
413 err_out:
414 for (; i < info.nr_frags; i++)
415
416
417
418
419
420 put_page(skb_frag_page(&info.frags[i]));
421
422 return MLX5E_KTLS_SYNC_FAIL;
423 }
424
425 struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
426 struct mlx5e_txqsq *sq,
427 struct sk_buff *skb,
428 struct mlx5e_tx_wqe **wqe, u16 *pi)
429 {
430 struct mlx5e_ktls_offload_context_tx *priv_tx;
431 struct mlx5e_sq_stats *stats = sq->stats;
432 struct mlx5_wqe_ctrl_seg *cseg;
433 struct tls_context *tls_ctx;
434 int datalen;
435 u32 seq;
436
437 if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
438 goto out;
439
440 datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
441 if (!datalen)
442 goto out;
443
444 tls_ctx = tls_get_ctx(skb->sk);
445 if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
446 goto err_out;
447
448 priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
449
450 if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) {
451 mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
452 *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
453 stats->tls_ctx++;
454 }
455
456 seq = ntohl(tcp_hdr(skb)->seq);
457 if (unlikely(priv_tx->expected_seq != seq)) {
458 enum mlx5e_ktls_sync_retval ret =
459 mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
460
461 switch (ret) {
462 case MLX5E_KTLS_SYNC_DONE:
463 *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
464 break;
465 case MLX5E_KTLS_SYNC_SKIP_NO_DATA:
466 if (likely(!skb->decrypted))
467 goto out;
468 WARN_ON_ONCE(1);
469
470 default:
471 goto err_out;
472 }
473 }
474
475 priv_tx->expected_seq = seq + datalen;
476
477 cseg = &(*wqe)->ctrl;
478 cseg->tisn = cpu_to_be32(priv_tx->tisn << 8);
479
480 stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
481 stats->tls_encrypted_bytes += datalen;
482
483 out:
484 return skb;
485
486 err_out:
487 dev_kfree_skb_any(skb);
488 return NULL;
489 }