This source file includes following definitions.
- register_listen_notifier
- unregister_listen_notifier
- listen_notify_handler
- listen_backlog_rcv
- chtls_start_listen
- chtls_stop_listen
- chtls_inline_feature
- chtls_create_hash
- chtls_destroy_hash
- chtls_free_uld
- chtls_dev_release
- chtls_register_dev
- process_deferq
- chtls_get_skb
- chtls_uld_add
- chtls_free_all_uld
- chtls_uld_state_change
- copy_gl_to_skb_pkt
- chtls_recv_packet
- chtls_recv_rsp
- chtls_recv
- chtls_uld_rx_handler
- do_chtls_getsockopt
- chtls_getsockopt
- do_chtls_setsockopt
- chtls_setsockopt
- chtls_install_cpl_ops
- chtls_init_ulp_ops
- chtls_register
- chtls_unregister
1
2
3
4
5
6
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/skbuff.h>
10 #include <linux/socket.h>
11 #include <linux/hash.h>
12 #include <linux/in.h>
13 #include <linux/net.h>
14 #include <linux/ip.h>
15 #include <linux/tcp.h>
16 #include <net/tcp.h>
17 #include <net/tls.h>
18
19 #include "chtls.h"
20 #include "chtls_cm.h"
21
22 #define DRV_NAME "chtls"
23
24
25
26
27
28 static LIST_HEAD(cdev_list);
29 static DEFINE_MUTEX(cdev_mutex);
30
31 static DEFINE_MUTEX(notify_mutex);
32 static RAW_NOTIFIER_HEAD(listen_notify_list);
33 static struct proto chtls_cpl_prot;
34 struct request_sock_ops chtls_rsk_ops;
35 static uint send_page_order = (14 - PAGE_SHIFT < 0) ? 0 : 14 - PAGE_SHIFT;
36
37 static void register_listen_notifier(struct notifier_block *nb)
38 {
39 mutex_lock(¬ify_mutex);
40 raw_notifier_chain_register(&listen_notify_list, nb);
41 mutex_unlock(¬ify_mutex);
42 }
43
44 static void unregister_listen_notifier(struct notifier_block *nb)
45 {
46 mutex_lock(¬ify_mutex);
47 raw_notifier_chain_unregister(&listen_notify_list, nb);
48 mutex_unlock(¬ify_mutex);
49 }
50
51 static int listen_notify_handler(struct notifier_block *this,
52 unsigned long event, void *data)
53 {
54 struct chtls_listen *clisten;
55 int ret = NOTIFY_DONE;
56
57 clisten = (struct chtls_listen *)data;
58
59 switch (event) {
60 case CHTLS_LISTEN_START:
61 ret = chtls_listen_start(clisten->cdev, clisten->sk);
62 kfree(clisten);
63 break;
64 case CHTLS_LISTEN_STOP:
65 chtls_listen_stop(clisten->cdev, clisten->sk);
66 kfree(clisten);
67 break;
68 }
69 return ret;
70 }
71
72 static struct notifier_block listen_notifier = {
73 .notifier_call = listen_notify_handler
74 };
75
76 static int listen_backlog_rcv(struct sock *sk, struct sk_buff *skb)
77 {
78 if (likely(skb_transport_header(skb) != skb_network_header(skb)))
79 return tcp_v4_do_rcv(sk, skb);
80 BLOG_SKB_CB(skb)->backlog_rcv(sk, skb);
81 return 0;
82 }
83
84 static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk)
85 {
86 struct chtls_listen *clisten;
87 int err;
88
89 if (sk->sk_protocol != IPPROTO_TCP)
90 return -EPROTONOSUPPORT;
91
92 if (sk->sk_family == PF_INET &&
93 LOOPBACK(inet_sk(sk)->inet_rcv_saddr))
94 return -EADDRNOTAVAIL;
95
96 sk->sk_backlog_rcv = listen_backlog_rcv;
97 clisten = kmalloc(sizeof(*clisten), GFP_KERNEL);
98 if (!clisten)
99 return -ENOMEM;
100 clisten->cdev = cdev;
101 clisten->sk = sk;
102 mutex_lock(¬ify_mutex);
103 err = raw_notifier_call_chain(&listen_notify_list,
104 CHTLS_LISTEN_START, clisten);
105 mutex_unlock(¬ify_mutex);
106 return err;
107 }
108
109 static void chtls_stop_listen(struct chtls_dev *cdev, struct sock *sk)
110 {
111 struct chtls_listen *clisten;
112
113 if (sk->sk_protocol != IPPROTO_TCP)
114 return;
115
116 clisten = kmalloc(sizeof(*clisten), GFP_KERNEL);
117 if (!clisten)
118 return;
119 clisten->cdev = cdev;
120 clisten->sk = sk;
121 mutex_lock(¬ify_mutex);
122 raw_notifier_call_chain(&listen_notify_list,
123 CHTLS_LISTEN_STOP, clisten);
124 mutex_unlock(¬ify_mutex);
125 }
126
127 static int chtls_inline_feature(struct tls_device *dev)
128 {
129 struct net_device *netdev;
130 struct chtls_dev *cdev;
131 int i;
132
133 cdev = to_chtls_dev(dev);
134
135 for (i = 0; i < cdev->lldi->nports; i++) {
136 netdev = cdev->ports[i];
137 if (netdev->features & NETIF_F_HW_TLS_RECORD)
138 return 1;
139 }
140 return 0;
141 }
142
143 static int chtls_create_hash(struct tls_device *dev, struct sock *sk)
144 {
145 struct chtls_dev *cdev = to_chtls_dev(dev);
146
147 if (sk->sk_state == TCP_LISTEN)
148 return chtls_start_listen(cdev, sk);
149 return 0;
150 }
151
152 static void chtls_destroy_hash(struct tls_device *dev, struct sock *sk)
153 {
154 struct chtls_dev *cdev = to_chtls_dev(dev);
155
156 if (sk->sk_state == TCP_LISTEN)
157 chtls_stop_listen(cdev, sk);
158 }
159
160 static void chtls_free_uld(struct chtls_dev *cdev)
161 {
162 int i;
163
164 tls_unregister_device(&cdev->tlsdev);
165 kvfree(cdev->kmap.addr);
166 idr_destroy(&cdev->hwtid_idr);
167 for (i = 0; i < (1 << RSPQ_HASH_BITS); i++)
168 kfree_skb(cdev->rspq_skb_cache[i]);
169 kfree(cdev->lldi);
170 kfree_skb(cdev->askb);
171 kfree(cdev);
172 }
173
174 static inline void chtls_dev_release(struct kref *kref)
175 {
176 struct chtls_dev *cdev;
177 struct tls_device *dev;
178
179 dev = container_of(kref, struct tls_device, kref);
180 cdev = to_chtls_dev(dev);
181 chtls_free_uld(cdev);
182 }
183
184 static void chtls_register_dev(struct chtls_dev *cdev)
185 {
186 struct tls_device *tlsdev = &cdev->tlsdev;
187
188 strlcpy(tlsdev->name, "chtls", TLS_DEVICE_NAME_MAX);
189 strlcat(tlsdev->name, cdev->lldi->ports[0]->name,
190 TLS_DEVICE_NAME_MAX);
191 tlsdev->feature = chtls_inline_feature;
192 tlsdev->hash = chtls_create_hash;
193 tlsdev->unhash = chtls_destroy_hash;
194 tlsdev->release = chtls_dev_release;
195 kref_init(&tlsdev->kref);
196 tls_register_device(tlsdev);
197 cdev->cdev_state = CHTLS_CDEV_STATE_UP;
198 }
199
200 static void process_deferq(struct work_struct *task_param)
201 {
202 struct chtls_dev *cdev = container_of(task_param,
203 struct chtls_dev, deferq_task);
204 struct sk_buff *skb;
205
206 spin_lock_bh(&cdev->deferq.lock);
207 while ((skb = __skb_dequeue(&cdev->deferq)) != NULL) {
208 spin_unlock_bh(&cdev->deferq.lock);
209 DEFERRED_SKB_CB(skb)->handler(cdev, skb);
210 spin_lock_bh(&cdev->deferq.lock);
211 }
212 spin_unlock_bh(&cdev->deferq.lock);
213 }
214
215 static int chtls_get_skb(struct chtls_dev *cdev)
216 {
217 cdev->askb = alloc_skb(sizeof(struct tcphdr), GFP_KERNEL);
218 if (!cdev->askb)
219 return -ENOMEM;
220
221 skb_put(cdev->askb, sizeof(struct tcphdr));
222 skb_reset_transport_header(cdev->askb);
223 memset(cdev->askb->data, 0, cdev->askb->len);
224 return 0;
225 }
226
227 static void *chtls_uld_add(const struct cxgb4_lld_info *info)
228 {
229 struct cxgb4_lld_info *lldi;
230 struct chtls_dev *cdev;
231 int i, j;
232
233 cdev = kzalloc(sizeof(*cdev) + info->nports *
234 (sizeof(struct net_device *)), GFP_KERNEL);
235 if (!cdev)
236 goto out;
237
238 lldi = kzalloc(sizeof(*lldi), GFP_KERNEL);
239 if (!lldi)
240 goto out_lldi;
241
242 if (chtls_get_skb(cdev))
243 goto out_skb;
244
245 *lldi = *info;
246 cdev->lldi = lldi;
247 cdev->pdev = lldi->pdev;
248 cdev->tids = lldi->tids;
249 cdev->ports = lldi->ports;
250 cdev->mtus = lldi->mtus;
251 cdev->tids = lldi->tids;
252 cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0]))
253 << FW_VIID_PFN_S;
254
255 for (i = 0; i < (1 << RSPQ_HASH_BITS); i++) {
256 unsigned int size = 64 - sizeof(struct rsp_ctrl) - 8;
257
258 cdev->rspq_skb_cache[i] = __alloc_skb(size,
259 gfp_any(), 0,
260 lldi->nodeid);
261 if (unlikely(!cdev->rspq_skb_cache[i]))
262 goto out_rspq_skb;
263 }
264
265 idr_init(&cdev->hwtid_idr);
266 INIT_WORK(&cdev->deferq_task, process_deferq);
267 spin_lock_init(&cdev->listen_lock);
268 spin_lock_init(&cdev->idr_lock);
269 cdev->send_page_order = min_t(uint, get_order(32768),
270 send_page_order);
271 cdev->max_host_sndbuf = 48 * 1024;
272
273 if (lldi->vr->key.size)
274 if (chtls_init_kmap(cdev, lldi))
275 goto out_rspq_skb;
276
277 mutex_lock(&cdev_mutex);
278 list_add_tail(&cdev->list, &cdev_list);
279 mutex_unlock(&cdev_mutex);
280
281 return cdev;
282 out_rspq_skb:
283 for (j = 0; j < i; j++)
284 kfree_skb(cdev->rspq_skb_cache[j]);
285 kfree_skb(cdev->askb);
286 out_skb:
287 kfree(lldi);
288 out_lldi:
289 kfree(cdev);
290 out:
291 return NULL;
292 }
293
294 static void chtls_free_all_uld(void)
295 {
296 struct chtls_dev *cdev, *tmp;
297
298 mutex_lock(&cdev_mutex);
299 list_for_each_entry_safe(cdev, tmp, &cdev_list, list) {
300 if (cdev->cdev_state == CHTLS_CDEV_STATE_UP) {
301 list_del(&cdev->list);
302 kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release);
303 }
304 }
305 mutex_unlock(&cdev_mutex);
306 }
307
308 static int chtls_uld_state_change(void *handle, enum cxgb4_state new_state)
309 {
310 struct chtls_dev *cdev = handle;
311
312 switch (new_state) {
313 case CXGB4_STATE_UP:
314 chtls_register_dev(cdev);
315 break;
316 case CXGB4_STATE_DOWN:
317 break;
318 case CXGB4_STATE_START_RECOVERY:
319 break;
320 case CXGB4_STATE_DETACH:
321 mutex_lock(&cdev_mutex);
322 list_del(&cdev->list);
323 mutex_unlock(&cdev_mutex);
324 kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release);
325 break;
326 default:
327 break;
328 }
329 return 0;
330 }
331
332 static struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
333 const __be64 *rsp,
334 u32 pktshift)
335 {
336 struct sk_buff *skb;
337
338
339
340
341
342 skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req)
343 - pktshift, GFP_ATOMIC);
344 if (unlikely(!skb))
345 return NULL;
346 __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req)
347 - pktshift);
348
349 skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_rx_pkt));
350 skb_copy_to_linear_data_offset(skb, sizeof(struct cpl_pass_accept_req)
351 , gl->va + pktshift,
352 gl->tot_len - pktshift);
353
354 return skb;
355 }
356
357 static int chtls_recv_packet(struct chtls_dev *cdev,
358 const struct pkt_gl *gl, const __be64 *rsp)
359 {
360 unsigned int opcode = *(u8 *)rsp;
361 struct sk_buff *skb;
362 int ret;
363
364 skb = copy_gl_to_skb_pkt(gl, rsp, cdev->lldi->sge_pktshift);
365 if (!skb)
366 return -ENOMEM;
367
368 ret = chtls_handlers[opcode](cdev, skb);
369 if (ret & CPL_RET_BUF_DONE)
370 kfree_skb(skb);
371
372 return 0;
373 }
374
375 static int chtls_recv_rsp(struct chtls_dev *cdev, const __be64 *rsp)
376 {
377 unsigned long rspq_bin;
378 unsigned int opcode;
379 struct sk_buff *skb;
380 unsigned int len;
381 int ret;
382
383 len = 64 - sizeof(struct rsp_ctrl) - 8;
384 opcode = *(u8 *)rsp;
385
386 rspq_bin = hash_ptr((void *)rsp, RSPQ_HASH_BITS);
387 skb = cdev->rspq_skb_cache[rspq_bin];
388 if (skb && !skb_is_nonlinear(skb) &&
389 !skb_shared(skb) && !skb_cloned(skb)) {
390 refcount_inc(&skb->users);
391 if (refcount_read(&skb->users) == 2) {
392 __skb_trim(skb, 0);
393 if (skb_tailroom(skb) >= len)
394 goto copy_out;
395 }
396 refcount_dec(&skb->users);
397 }
398 skb = alloc_skb(len, GFP_ATOMIC);
399 if (unlikely(!skb))
400 return -ENOMEM;
401
402 copy_out:
403 __skb_put(skb, len);
404 skb_copy_to_linear_data(skb, rsp, len);
405 skb_reset_network_header(skb);
406 skb_reset_transport_header(skb);
407 ret = chtls_handlers[opcode](cdev, skb);
408
409 if (ret & CPL_RET_BUF_DONE)
410 kfree_skb(skb);
411 return 0;
412 }
413
414 static void chtls_recv(struct chtls_dev *cdev,
415 struct sk_buff **skbs, const __be64 *rsp)
416 {
417 struct sk_buff *skb = *skbs;
418 unsigned int opcode;
419 int ret;
420
421 opcode = *(u8 *)rsp;
422
423 __skb_push(skb, sizeof(struct rss_header));
424 skb_copy_to_linear_data(skb, rsp, sizeof(struct rss_header));
425
426 ret = chtls_handlers[opcode](cdev, skb);
427 if (ret & CPL_RET_BUF_DONE)
428 kfree_skb(skb);
429 }
430
431 static int chtls_uld_rx_handler(void *handle, const __be64 *rsp,
432 const struct pkt_gl *gl)
433 {
434 struct chtls_dev *cdev = handle;
435 unsigned int opcode;
436 struct sk_buff *skb;
437
438 opcode = *(u8 *)rsp;
439
440 if (unlikely(opcode == CPL_RX_PKT)) {
441 if (chtls_recv_packet(cdev, gl, rsp) < 0)
442 goto nomem;
443 return 0;
444 }
445
446 if (!gl)
447 return chtls_recv_rsp(cdev, rsp);
448
449 #define RX_PULL_LEN 128
450 skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
451 if (unlikely(!skb))
452 goto nomem;
453 chtls_recv(cdev, &skb, rsp);
454 return 0;
455
456 nomem:
457 return -ENOMEM;
458 }
459
460 static int do_chtls_getsockopt(struct sock *sk, char __user *optval,
461 int __user *optlen)
462 {
463 struct tls_crypto_info crypto_info = { 0 };
464
465 crypto_info.version = TLS_1_2_VERSION;
466 if (copy_to_user(optval, &crypto_info, sizeof(struct tls_crypto_info)))
467 return -EFAULT;
468 return 0;
469 }
470
471 static int chtls_getsockopt(struct sock *sk, int level, int optname,
472 char __user *optval, int __user *optlen)
473 {
474 struct tls_context *ctx = tls_get_ctx(sk);
475
476 if (level != SOL_TLS)
477 return ctx->sk_proto->getsockopt(sk, level,
478 optname, optval, optlen);
479
480 return do_chtls_getsockopt(sk, optval, optlen);
481 }
482
483 static int do_chtls_setsockopt(struct sock *sk, int optname,
484 char __user *optval, unsigned int optlen)
485 {
486 struct tls_crypto_info *crypto_info, tmp_crypto_info;
487 struct chtls_sock *csk;
488 int keylen;
489 int rc = 0;
490
491 csk = rcu_dereference_sk_user_data(sk);
492
493 if (!optval || optlen < sizeof(*crypto_info)) {
494 rc = -EINVAL;
495 goto out;
496 }
497
498 rc = copy_from_user(&tmp_crypto_info, optval, sizeof(*crypto_info));
499 if (rc) {
500 rc = -EFAULT;
501 goto out;
502 }
503
504
505 if (tmp_crypto_info.version != TLS_1_2_VERSION) {
506 rc = -ENOTSUPP;
507 goto out;
508 }
509
510 crypto_info = (struct tls_crypto_info *)&csk->tlshws.crypto_info;
511
512 switch (tmp_crypto_info.cipher_type) {
513 case TLS_CIPHER_AES_GCM_128: {
514
515 crypto_info[0] = tmp_crypto_info;
516
517 rc = copy_from_user((char *)crypto_info + sizeof(*crypto_info),
518 optval + sizeof(*crypto_info),
519 sizeof(struct tls12_crypto_info_aes_gcm_128)
520 - sizeof(*crypto_info));
521
522 if (rc) {
523 rc = -EFAULT;
524 goto out;
525 }
526
527 keylen = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
528 rc = chtls_setkey(csk, keylen, optname);
529 break;
530 }
531 default:
532 rc = -EINVAL;
533 goto out;
534 }
535 out:
536 return rc;
537 }
538
539 static int chtls_setsockopt(struct sock *sk, int level, int optname,
540 char __user *optval, unsigned int optlen)
541 {
542 struct tls_context *ctx = tls_get_ctx(sk);
543
544 if (level != SOL_TLS)
545 return ctx->sk_proto->setsockopt(sk, level,
546 optname, optval, optlen);
547
548 return do_chtls_setsockopt(sk, optname, optval, optlen);
549 }
550
551 static struct cxgb4_uld_info chtls_uld_info = {
552 .name = DRV_NAME,
553 .nrxq = MAX_ULD_QSETS,
554 .ntxq = MAX_ULD_QSETS,
555 .rxq_size = 1024,
556 .add = chtls_uld_add,
557 .state_change = chtls_uld_state_change,
558 .rx_handler = chtls_uld_rx_handler,
559 };
560
561 void chtls_install_cpl_ops(struct sock *sk)
562 {
563 sk->sk_prot = &chtls_cpl_prot;
564 }
565
566 static void __init chtls_init_ulp_ops(void)
567 {
568 chtls_cpl_prot = tcp_prot;
569 chtls_init_rsk_ops(&chtls_cpl_prot, &chtls_rsk_ops,
570 &tcp_prot, PF_INET);
571 chtls_cpl_prot.close = chtls_close;
572 chtls_cpl_prot.disconnect = chtls_disconnect;
573 chtls_cpl_prot.destroy = chtls_destroy_sock;
574 chtls_cpl_prot.shutdown = chtls_shutdown;
575 chtls_cpl_prot.sendmsg = chtls_sendmsg;
576 chtls_cpl_prot.sendpage = chtls_sendpage;
577 chtls_cpl_prot.recvmsg = chtls_recvmsg;
578 chtls_cpl_prot.setsockopt = chtls_setsockopt;
579 chtls_cpl_prot.getsockopt = chtls_getsockopt;
580 }
581
582 static int __init chtls_register(void)
583 {
584 chtls_init_ulp_ops();
585 register_listen_notifier(&listen_notifier);
586 cxgb4_register_uld(CXGB4_ULD_TLS, &chtls_uld_info);
587 return 0;
588 }
589
590 static void __exit chtls_unregister(void)
591 {
592 unregister_listen_notifier(&listen_notifier);
593 chtls_free_all_uld();
594 cxgb4_unregister_uld(CXGB4_ULD_TLS);
595 }
596
597 module_init(chtls_register);
598 module_exit(chtls_unregister);
599
600 MODULE_DESCRIPTION("Chelsio TLS Inline driver");
601 MODULE_LICENSE("GPL");
602 MODULE_AUTHOR("Chelsio Communications");
603 MODULE_VERSION(DRV_VERSION);