This source file includes following definitions.
- mlx5e_set_ktls_tx_priv_ctx
- mlx5e_get_ktls_tx_priv_ctx
- mlx5e_ktls_dumps_num_wqebbs
- mlx5e_ktls_build_netdev
- mlx5e_ktls_tx_handle_resync_dump_comp
1
2
3
4 #ifndef __MLX5E_KTLS_H__
5 #define __MLX5E_KTLS_H__
6
7 #include "en.h"
8
9 #ifdef CONFIG_MLX5_EN_TLS
10 #include <net/tls.h>
11 #include "accel/tls.h"
12
13 #define MLX5E_KTLS_STATIC_UMR_WQE_SZ \
14 (offsetof(struct mlx5e_umr_wqe, tls_static_params_ctx) + \
15 MLX5_ST_SZ_BYTES(tls_static_params))
16 #define MLX5E_KTLS_STATIC_WQEBBS \
17 (DIV_ROUND_UP(MLX5E_KTLS_STATIC_UMR_WQE_SZ, MLX5_SEND_WQE_BB))
18
19 #define MLX5E_KTLS_PROGRESS_WQE_SZ \
20 (offsetof(struct mlx5e_tx_wqe, tls_progress_params_ctx) + \
21 MLX5_ST_SZ_BYTES(tls_progress_params))
22 #define MLX5E_KTLS_PROGRESS_WQEBBS \
23 (DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_BB))
24
25 struct mlx5e_dump_wqe {
26 struct mlx5_wqe_ctrl_seg ctrl;
27 struct mlx5_wqe_data_seg data;
28 };
29
30 #define MLX5E_KTLS_DUMP_WQEBBS \
31 (DIV_ROUND_UP(sizeof(struct mlx5e_dump_wqe), MLX5_SEND_WQE_BB))
32
33 enum {
34 MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD = 0,
35 MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_OFFLOAD = 1,
36 MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_AUTHENTICATION = 2,
37 };
38
39 enum {
40 MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START = 0,
41 MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING = 1,
42 MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_SEARCHING = 2,
43 };
44
45 struct mlx5e_ktls_offload_context_tx {
46 struct tls_offload_context_tx *tx_ctx;
47 struct tls12_crypto_info_aes_gcm_128 crypto_info;
48 u32 expected_seq;
49 u32 tisn;
50 u32 key_id;
51 bool ctx_post_pending;
52 };
53
54 struct mlx5e_ktls_offload_context_tx_shadow {
55 struct tls_offload_context_tx tx_ctx;
56 struct mlx5e_ktls_offload_context_tx *priv_tx;
57 };
58
59 static inline void
60 mlx5e_set_ktls_tx_priv_ctx(struct tls_context *tls_ctx,
61 struct mlx5e_ktls_offload_context_tx *priv_tx)
62 {
63 struct tls_offload_context_tx *tx_ctx = tls_offload_ctx_tx(tls_ctx);
64 struct mlx5e_ktls_offload_context_tx_shadow *shadow;
65
66 BUILD_BUG_ON(sizeof(*shadow) > TLS_OFFLOAD_CONTEXT_SIZE_TX);
67
68 shadow = (struct mlx5e_ktls_offload_context_tx_shadow *)tx_ctx;
69
70 shadow->priv_tx = priv_tx;
71 priv_tx->tx_ctx = tx_ctx;
72 }
73
74 static inline struct mlx5e_ktls_offload_context_tx *
75 mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx)
76 {
77 struct tls_offload_context_tx *tx_ctx = tls_offload_ctx_tx(tls_ctx);
78 struct mlx5e_ktls_offload_context_tx_shadow *shadow;
79
80 BUILD_BUG_ON(sizeof(*shadow) > TLS_OFFLOAD_CONTEXT_SIZE_TX);
81
82 shadow = (struct mlx5e_ktls_offload_context_tx_shadow *)tx_ctx;
83
84 return shadow->priv_tx;
85 }
86
87 void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv);
88 void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx);
89
90 struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
91 struct mlx5e_txqsq *sq,
92 struct sk_buff *skb,
93 struct mlx5e_tx_wqe **wqe, u16 *pi);
94 void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
95 struct mlx5e_tx_wqe_info *wi,
96 u32 *dma_fifo_cc);
97 static inline u8
98 mlx5e_ktls_dumps_num_wqebbs(struct mlx5e_txqsq *sq, unsigned int nfrags,
99 unsigned int sync_len)
100 {
101
102
103
104 return MLX5E_KTLS_DUMP_WQEBBS *
105 (nfrags + DIV_ROUND_UP(sync_len, sq->hw_mtu));
106 }
107 #else
108
109 static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
110 {
111 }
112
113 static inline void
114 mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
115 struct mlx5e_tx_wqe_info *wi,
116 u32 *dma_fifo_cc) {}
117
118 #endif
119
120 #endif