This source file includes following definitions.
- mlx5e_tls_add_metadata
- mlx5e_tls_get_sync_data
- mlx5e_tls_complete_sync_skb
- mlx5e_tls_handle_ooo
- mlx5e_tls_handle_tx_skb
- tls_update_resync_sn
- mlx5e_tls_handle_rx_skb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34 #include "en_accel/tls.h"
35 #include "en_accel/tls_rxtx.h"
36 #include "accel/accel.h"
37
38 #include <net/inet6_hashtables.h>
39 #include <linux/ipv6.h>
40
41 #define SYNDROM_DECRYPTED 0x30
42 #define SYNDROM_RESYNC_REQUEST 0x31
43 #define SYNDROM_AUTH_FAILED 0x32
44
45 #define SYNDROME_OFFLOAD_REQUIRED 32
46 #define SYNDROME_SYNC 33
47
48 struct sync_info {
49 u64 rcd_sn;
50 s32 sync_len;
51 int nr_frags;
52 skb_frag_t frags[MAX_SKB_FRAGS];
53 };
54
55 struct recv_metadata_content {
56 u8 syndrome;
57 u8 reserved;
58 __be32 sync_seq;
59 } __packed;
60
61 struct send_metadata_content {
62
63 __be32 syndrome_swid;
64 __be16 first_seq;
65 } __packed;
66
67 struct mlx5e_tls_metadata {
68 union {
69
70 struct recv_metadata_content recv;
71
72 struct send_metadata_content send;
73 unsigned char raw[6];
74 } __packed content;
75
76 __be16 ethertype;
77 } __packed;
78
79 static int mlx5e_tls_add_metadata(struct sk_buff *skb, __be32 swid)
80 {
81 struct mlx5e_tls_metadata *pet;
82 struct ethhdr *eth;
83
84 if (skb_cow_head(skb, sizeof(struct mlx5e_tls_metadata)))
85 return -ENOMEM;
86
87 eth = (struct ethhdr *)skb_push(skb, sizeof(struct mlx5e_tls_metadata));
88 skb->mac_header -= sizeof(struct mlx5e_tls_metadata);
89 pet = (struct mlx5e_tls_metadata *)(eth + 1);
90
91 memmove(skb->data, skb->data + sizeof(struct mlx5e_tls_metadata),
92 2 * ETH_ALEN);
93
94 eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE);
95 pet->content.send.syndrome_swid =
96 htonl(SYNDROME_OFFLOAD_REQUIRED << 24) | swid;
97
98 return 0;
99 }
100
101 static int mlx5e_tls_get_sync_data(struct mlx5e_tls_offload_context_tx *context,
102 u32 tcp_seq, struct sync_info *info)
103 {
104 int remaining, i = 0, ret = -EINVAL;
105 struct tls_record_info *record;
106 unsigned long flags;
107 s32 sync_size;
108
109 spin_lock_irqsave(&context->base.lock, flags);
110 record = tls_get_record(&context->base, tcp_seq, &info->rcd_sn);
111
112 if (unlikely(!record))
113 goto out;
114
115 sync_size = tcp_seq - tls_record_start_seq(record);
116 info->sync_len = sync_size;
117 if (unlikely(sync_size < 0)) {
118 if (tls_record_is_start_marker(record))
119 goto done;
120
121 goto out;
122 }
123
124 remaining = sync_size;
125 while (remaining > 0) {
126 info->frags[i] = record->frags[i];
127 __skb_frag_ref(&info->frags[i]);
128 remaining -= skb_frag_size(&info->frags[i]);
129
130 if (remaining < 0)
131 skb_frag_size_add(&info->frags[i], remaining);
132
133 i++;
134 }
135 info->nr_frags = i;
136 done:
137 ret = 0;
138 out:
139 spin_unlock_irqrestore(&context->base.lock, flags);
140 return ret;
141 }
142
143 static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
144 struct sk_buff *nskb, u32 tcp_seq,
145 int headln, __be64 rcd_sn)
146 {
147 struct mlx5e_tls_metadata *pet;
148 u8 syndrome = SYNDROME_SYNC;
149 struct iphdr *iph;
150 struct tcphdr *th;
151 int data_len, mss;
152
153 nskb->dev = skb->dev;
154 skb_reset_mac_header(nskb);
155 skb_set_network_header(nskb, skb_network_offset(skb));
156 skb_set_transport_header(nskb, skb_transport_offset(skb));
157 memcpy(nskb->data, skb->data, headln);
158 memcpy(nskb->data + headln, &rcd_sn, sizeof(rcd_sn));
159
160 iph = ip_hdr(nskb);
161 iph->tot_len = htons(nskb->len - skb_network_offset(nskb));
162 th = tcp_hdr(nskb);
163 data_len = nskb->len - headln;
164 tcp_seq -= data_len;
165 th->seq = htonl(tcp_seq);
166
167 mss = nskb->dev->mtu - (headln - skb_network_offset(nskb));
168 skb_shinfo(nskb)->gso_size = 0;
169 if (data_len > mss) {
170 skb_shinfo(nskb)->gso_size = mss;
171 skb_shinfo(nskb)->gso_segs = DIV_ROUND_UP(data_len, mss);
172 }
173 skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
174
175 pet = (struct mlx5e_tls_metadata *)(nskb->data + sizeof(struct ethhdr));
176 memcpy(pet, &syndrome, sizeof(syndrome));
177 pet->content.send.first_seq = htons(tcp_seq);
178
179
180
181
182 nskb->ip_summed = CHECKSUM_PARTIAL;
183
184 nskb->queue_mapping = skb->queue_mapping;
185 }
186
187 static struct sk_buff *
188 mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
189 struct mlx5e_txqsq *sq, struct sk_buff *skb,
190 struct mlx5e_tx_wqe **wqe,
191 u16 *pi,
192 struct mlx5e_tls *tls)
193 {
194 u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
195 struct sync_info info;
196 struct sk_buff *nskb;
197 int linear_len = 0;
198 int headln;
199 int i;
200
201 sq->stats->tls_ooo++;
202
203 if (mlx5e_tls_get_sync_data(context, tcp_seq, &info)) {
204
205
206
207
208 atomic64_inc(&tls->sw_stats.tx_tls_drop_no_sync_data);
209 goto err_out;
210 }
211
212 if (unlikely(info.sync_len < 0)) {
213 u32 payload;
214
215 headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
216 payload = skb->len - headln;
217 if (likely(payload <= -info.sync_len))
218
219
220 return skb;
221
222 atomic64_inc(&tls->sw_stats.tx_tls_drop_bypass_required);
223 goto err_out;
224 }
225
226 if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
227 atomic64_inc(&tls->sw_stats.tx_tls_drop_metadata);
228 goto err_out;
229 }
230
231 headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
232 linear_len += headln + sizeof(info.rcd_sn);
233 nskb = alloc_skb(linear_len, GFP_ATOMIC);
234 if (unlikely(!nskb)) {
235 atomic64_inc(&tls->sw_stats.tx_tls_drop_resync_alloc);
236 goto err_out;
237 }
238
239 context->expected_seq = tcp_seq + skb->len - headln;
240 skb_put(nskb, linear_len);
241 for (i = 0; i < info.nr_frags; i++)
242 skb_shinfo(nskb)->frags[i] = info.frags[i];
243
244 skb_shinfo(nskb)->nr_frags = info.nr_frags;
245 nskb->data_len = info.sync_len;
246 nskb->len += info.sync_len;
247 sq->stats->tls_resync_bytes += nskb->len;
248 mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln,
249 cpu_to_be64(info.rcd_sn));
250 mlx5e_sq_xmit(sq, nskb, *wqe, *pi, true);
251 *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
252 return skb;
253
254 err_out:
255 dev_kfree_skb_any(skb);
256 return NULL;
257 }
258
259 struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
260 struct mlx5e_txqsq *sq,
261 struct sk_buff *skb,
262 struct mlx5e_tx_wqe **wqe,
263 u16 *pi)
264 {
265 struct mlx5e_priv *priv = netdev_priv(netdev);
266 struct mlx5e_tls_offload_context_tx *context;
267 struct tls_context *tls_ctx;
268 u32 expected_seq;
269 int datalen;
270 u32 skb_seq;
271
272 if (MLX5_CAP_GEN(sq->channel->mdev, tls_tx)) {
273 skb = mlx5e_ktls_handle_tx_skb(netdev, sq, skb, wqe, pi);
274 goto out;
275 }
276
277 if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
278 goto out;
279
280 datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
281 if (!datalen)
282 goto out;
283
284 tls_ctx = tls_get_ctx(skb->sk);
285 if (unlikely(tls_ctx->netdev != netdev))
286 goto out;
287
288 skb_seq = ntohl(tcp_hdr(skb)->seq);
289 context = mlx5e_get_tls_tx_context(tls_ctx);
290 expected_seq = context->expected_seq;
291
292 if (unlikely(expected_seq != skb_seq)) {
293 skb = mlx5e_tls_handle_ooo(context, sq, skb, wqe, pi, priv->tls);
294 goto out;
295 }
296
297 if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
298 atomic64_inc(&priv->tls->sw_stats.tx_tls_drop_metadata);
299 dev_kfree_skb_any(skb);
300 skb = NULL;
301 goto out;
302 }
303
304 context->expected_seq = skb_seq + datalen;
305 out:
306 return skb;
307 }
308
309 static int tls_update_resync_sn(struct net_device *netdev,
310 struct sk_buff *skb,
311 struct mlx5e_tls_metadata *mdata)
312 {
313 struct sock *sk = NULL;
314 struct iphdr *iph;
315 struct tcphdr *th;
316 __be32 seq;
317
318 if (mdata->ethertype != htons(ETH_P_IP))
319 return -EINVAL;
320
321 iph = (struct iphdr *)(mdata + 1);
322
323 th = ((void *)iph) + iph->ihl * 4;
324
325 if (iph->version == 4) {
326 sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo,
327 iph->saddr, th->source, iph->daddr,
328 th->dest, netdev->ifindex);
329 #if IS_ENABLED(CONFIG_IPV6)
330 } else {
331 struct ipv6hdr *ipv6h = (struct ipv6hdr *)iph;
332
333 sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo,
334 &ipv6h->saddr, th->source,
335 &ipv6h->daddr, ntohs(th->dest),
336 netdev->ifindex, 0);
337 #endif
338 }
339 if (!sk || sk->sk_state == TCP_TIME_WAIT) {
340 struct mlx5e_priv *priv = netdev_priv(netdev);
341
342 atomic64_inc(&priv->tls->sw_stats.rx_tls_drop_resync_request);
343 goto out;
344 }
345
346 skb->sk = sk;
347 skb->destructor = sock_edemux;
348
349 memcpy(&seq, &mdata->content.recv.sync_seq, sizeof(seq));
350 tls_offload_rx_resync_request(sk, seq);
351 out:
352 return 0;
353 }
354
355 void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
356 u32 *cqe_bcnt)
357 {
358 struct mlx5e_tls_metadata *mdata;
359 struct mlx5e_priv *priv;
360
361 if (!is_metadata_hdr_valid(skb))
362 return;
363
364
365 mdata = (struct mlx5e_tls_metadata *)(skb->data + ETH_HLEN);
366 switch (mdata->content.recv.syndrome) {
367 case SYNDROM_DECRYPTED:
368 skb->decrypted = 1;
369 break;
370 case SYNDROM_RESYNC_REQUEST:
371 tls_update_resync_sn(netdev, skb, mdata);
372 priv = netdev_priv(netdev);
373 atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_request);
374 break;
375 case SYNDROM_AUTH_FAILED:
376
377 priv = netdev_priv(netdev);
378 atomic64_inc(&priv->tls->sw_stats.rx_tls_auth_fail);
379 break;
380 default:
381
382 return;
383 }
384
385 remove_metadata_hdr(skb);
386 *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN;
387 }