This source file includes following definitions.
- tbnet_fill_header
- tbnet_login_response
- tbnet_login_request
- tbnet_logout_response
- tbnet_logout_request
- start_login
- stop_login
- tbnet_frame_size
- tbnet_free_buffers
- tbnet_tear_down
- tbnet_handle_packet
- tbnet_available_buffers
- tbnet_alloc_rx_buffers
- tbnet_get_tx_buffer
- tbnet_tx_callback
- tbnet_alloc_tx_buffers
- tbnet_connected_work
- tbnet_login_work
- tbnet_disconnect_work
- tbnet_check_frame
- tbnet_poll
- tbnet_start_poll
- tbnet_open
- tbnet_stop
- tbnet_xmit_csum_and_map
- tbnet_kmap_frag
- tbnet_start_xmit
- tbnet_get_stats64
- tbnet_generate_mac
- tbnet_probe
- tbnet_remove
- tbnet_shutdown
- tbnet_suspend
- tbnet_resume
- tbnet_init
- tbnet_exit
1
2
3
4
5
6
7
8
9
10
11 #include <linux/atomic.h>
12 #include <linux/highmem.h>
13 #include <linux/if_vlan.h>
14 #include <linux/jhash.h>
15 #include <linux/module.h>
16 #include <linux/etherdevice.h>
17 #include <linux/rtnetlink.h>
18 #include <linux/sizes.h>
19 #include <linux/thunderbolt.h>
20 #include <linux/uuid.h>
21 #include <linux/workqueue.h>
22
23 #include <net/ip6_checksum.h>
24
25
26 #define TBNET_LOGIN_DELAY 4500
27 #define TBNET_LOGIN_TIMEOUT 500
28 #define TBNET_LOGOUT_TIMEOUT 100
29
30 #define TBNET_RING_SIZE 256
31 #define TBNET_LOCAL_PATH 0xf
32 #define TBNET_LOGIN_RETRIES 60
33 #define TBNET_LOGOUT_RETRIES 5
34 #define TBNET_MATCH_FRAGS_ID BIT(1)
35 #define TBNET_MAX_MTU SZ_64K
36 #define TBNET_FRAME_SIZE SZ_4K
37 #define TBNET_MAX_PAYLOAD_SIZE \
38 (TBNET_FRAME_SIZE - sizeof(struct thunderbolt_ip_frame_header))
39
40 #define TBNET_RX_MAX_SIZE \
41 (TBNET_FRAME_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
42 #define TBNET_RX_PAGE_ORDER get_order(TBNET_RX_MAX_SIZE)
43 #define TBNET_RX_PAGE_SIZE (PAGE_SIZE << TBNET_RX_PAGE_ORDER)
44
45 #define TBNET_L0_PORT_NUM(route) ((route) & GENMASK(5, 0))
46
47
48
49
50
51
52
53
54
55
56
57
58 struct thunderbolt_ip_frame_header {
59 u32 frame_size;
60 u16 frame_index;
61 u16 frame_id;
62 u32 frame_count;
63 };
64
65 enum thunderbolt_ip_frame_pdf {
66 TBIP_PDF_FRAME_START = 1,
67 TBIP_PDF_FRAME_END,
68 };
69
70 enum thunderbolt_ip_type {
71 TBIP_LOGIN,
72 TBIP_LOGIN_RESPONSE,
73 TBIP_LOGOUT,
74 TBIP_STATUS,
75 };
76
77 struct thunderbolt_ip_header {
78 u32 route_hi;
79 u32 route_lo;
80 u32 length_sn;
81 uuid_t uuid;
82 uuid_t initiator_uuid;
83 uuid_t target_uuid;
84 u32 type;
85 u32 command_id;
86 };
87
88 #define TBIP_HDR_LENGTH_MASK GENMASK(5, 0)
89 #define TBIP_HDR_SN_MASK GENMASK(28, 27)
90 #define TBIP_HDR_SN_SHIFT 27
91
92 struct thunderbolt_ip_login {
93 struct thunderbolt_ip_header hdr;
94 u32 proto_version;
95 u32 transmit_path;
96 u32 reserved[4];
97 };
98
99 #define TBIP_LOGIN_PROTO_VERSION 1
100
101 struct thunderbolt_ip_login_response {
102 struct thunderbolt_ip_header hdr;
103 u32 status;
104 u32 receiver_mac[2];
105 u32 receiver_mac_len;
106 u32 reserved[4];
107 };
108
109 struct thunderbolt_ip_logout {
110 struct thunderbolt_ip_header hdr;
111 };
112
113 struct thunderbolt_ip_status {
114 struct thunderbolt_ip_header hdr;
115 u32 status;
116 };
117
118 struct tbnet_stats {
119 u64 tx_packets;
120 u64 rx_packets;
121 u64 tx_bytes;
122 u64 rx_bytes;
123 u64 rx_errors;
124 u64 tx_errors;
125 u64 rx_length_errors;
126 u64 rx_over_errors;
127 u64 rx_crc_errors;
128 u64 rx_missed_errors;
129 };
130
131 struct tbnet_frame {
132 struct net_device *dev;
133 struct page *page;
134 struct ring_frame frame;
135 };
136
137 struct tbnet_ring {
138 struct tbnet_frame frames[TBNET_RING_SIZE];
139 unsigned int cons;
140 unsigned int prod;
141 struct tb_ring *ring;
142 };
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176 struct tbnet {
177 const struct tb_service *svc;
178 struct tb_xdomain *xd;
179 struct tb_protocol_handler handler;
180 struct net_device *dev;
181 struct napi_struct napi;
182 struct tbnet_stats stats;
183 struct sk_buff *skb;
184 atomic_t command_id;
185 bool login_sent;
186 bool login_received;
187 u32 transmit_path;
188 struct mutex connection_lock;
189 int login_retries;
190 struct delayed_work login_work;
191 struct work_struct connected_work;
192 struct work_struct disconnect_work;
193 struct thunderbolt_ip_frame_header rx_hdr;
194 struct tbnet_ring rx_ring;
195 atomic_t frame_id;
196 struct tbnet_ring tx_ring;
197 };
198
199
200 static const uuid_t tbnet_dir_uuid =
201 UUID_INIT(0xc66189ca, 0x1cce, 0x4195,
202 0xbd, 0xb8, 0x49, 0x59, 0x2e, 0x5f, 0x5a, 0x4f);
203
204
205 static const uuid_t tbnet_svc_uuid =
206 UUID_INIT(0x798f589e, 0x3616, 0x8a47,
207 0x97, 0xc6, 0x56, 0x64, 0xa9, 0x20, 0xc8, 0xdd);
208
209 static struct tb_property_dir *tbnet_dir;
210
211 static void tbnet_fill_header(struct thunderbolt_ip_header *hdr, u64 route,
212 u8 sequence, const uuid_t *initiator_uuid, const uuid_t *target_uuid,
213 enum thunderbolt_ip_type type, size_t size, u32 command_id)
214 {
215 u32 length_sn;
216
217
218 length_sn = (size - 3 * 4) / 4;
219 length_sn |= (sequence << TBIP_HDR_SN_SHIFT) & TBIP_HDR_SN_MASK;
220
221 hdr->route_hi = upper_32_bits(route);
222 hdr->route_lo = lower_32_bits(route);
223 hdr->length_sn = length_sn;
224 uuid_copy(&hdr->uuid, &tbnet_svc_uuid);
225 uuid_copy(&hdr->initiator_uuid, initiator_uuid);
226 uuid_copy(&hdr->target_uuid, target_uuid);
227 hdr->type = type;
228 hdr->command_id = command_id;
229 }
230
231 static int tbnet_login_response(struct tbnet *net, u64 route, u8 sequence,
232 u32 command_id)
233 {
234 struct thunderbolt_ip_login_response reply;
235 struct tb_xdomain *xd = net->xd;
236
237 memset(&reply, 0, sizeof(reply));
238 tbnet_fill_header(&reply.hdr, route, sequence, xd->local_uuid,
239 xd->remote_uuid, TBIP_LOGIN_RESPONSE, sizeof(reply),
240 command_id);
241 memcpy(reply.receiver_mac, net->dev->dev_addr, ETH_ALEN);
242 reply.receiver_mac_len = ETH_ALEN;
243
244 return tb_xdomain_response(xd, &reply, sizeof(reply),
245 TB_CFG_PKG_XDOMAIN_RESP);
246 }
247
248 static int tbnet_login_request(struct tbnet *net, u8 sequence)
249 {
250 struct thunderbolt_ip_login_response reply;
251 struct thunderbolt_ip_login request;
252 struct tb_xdomain *xd = net->xd;
253
254 memset(&request, 0, sizeof(request));
255 tbnet_fill_header(&request.hdr, xd->route, sequence, xd->local_uuid,
256 xd->remote_uuid, TBIP_LOGIN, sizeof(request),
257 atomic_inc_return(&net->command_id));
258
259 request.proto_version = TBIP_LOGIN_PROTO_VERSION;
260 request.transmit_path = TBNET_LOCAL_PATH;
261
262 return tb_xdomain_request(xd, &request, sizeof(request),
263 TB_CFG_PKG_XDOMAIN_RESP, &reply,
264 sizeof(reply), TB_CFG_PKG_XDOMAIN_RESP,
265 TBNET_LOGIN_TIMEOUT);
266 }
267
268 static int tbnet_logout_response(struct tbnet *net, u64 route, u8 sequence,
269 u32 command_id)
270 {
271 struct thunderbolt_ip_status reply;
272 struct tb_xdomain *xd = net->xd;
273
274 memset(&reply, 0, sizeof(reply));
275 tbnet_fill_header(&reply.hdr, route, sequence, xd->local_uuid,
276 xd->remote_uuid, TBIP_STATUS, sizeof(reply),
277 atomic_inc_return(&net->command_id));
278 return tb_xdomain_response(xd, &reply, sizeof(reply),
279 TB_CFG_PKG_XDOMAIN_RESP);
280 }
281
282 static int tbnet_logout_request(struct tbnet *net)
283 {
284 struct thunderbolt_ip_logout request;
285 struct thunderbolt_ip_status reply;
286 struct tb_xdomain *xd = net->xd;
287
288 memset(&request, 0, sizeof(request));
289 tbnet_fill_header(&request.hdr, xd->route, 0, xd->local_uuid,
290 xd->remote_uuid, TBIP_LOGOUT, sizeof(request),
291 atomic_inc_return(&net->command_id));
292
293 return tb_xdomain_request(xd, &request, sizeof(request),
294 TB_CFG_PKG_XDOMAIN_RESP, &reply,
295 sizeof(reply), TB_CFG_PKG_XDOMAIN_RESP,
296 TBNET_LOGOUT_TIMEOUT);
297 }
298
299 static void start_login(struct tbnet *net)
300 {
301 mutex_lock(&net->connection_lock);
302 net->login_sent = false;
303 net->login_received = false;
304 mutex_unlock(&net->connection_lock);
305
306 queue_delayed_work(system_long_wq, &net->login_work,
307 msecs_to_jiffies(1000));
308 }
309
310 static void stop_login(struct tbnet *net)
311 {
312 cancel_delayed_work_sync(&net->login_work);
313 cancel_work_sync(&net->connected_work);
314 }
315
316 static inline unsigned int tbnet_frame_size(const struct tbnet_frame *tf)
317 {
318 return tf->frame.size ? : TBNET_FRAME_SIZE;
319 }
320
321 static void tbnet_free_buffers(struct tbnet_ring *ring)
322 {
323 unsigned int i;
324
325 for (i = 0; i < TBNET_RING_SIZE; i++) {
326 struct device *dma_dev = tb_ring_dma_device(ring->ring);
327 struct tbnet_frame *tf = &ring->frames[i];
328 enum dma_data_direction dir;
329 unsigned int order;
330 size_t size;
331
332 if (!tf->page)
333 continue;
334
335 if (ring->ring->is_tx) {
336 dir = DMA_TO_DEVICE;
337 order = 0;
338 size = TBNET_FRAME_SIZE;
339 } else {
340 dir = DMA_FROM_DEVICE;
341 order = TBNET_RX_PAGE_ORDER;
342 size = TBNET_RX_PAGE_SIZE;
343 }
344
345 if (tf->frame.buffer_phy)
346 dma_unmap_page(dma_dev, tf->frame.buffer_phy, size,
347 dir);
348
349 __free_pages(tf->page, order);
350 tf->page = NULL;
351 }
352
353 ring->cons = 0;
354 ring->prod = 0;
355 }
356
357 static void tbnet_tear_down(struct tbnet *net, bool send_logout)
358 {
359 netif_carrier_off(net->dev);
360 netif_stop_queue(net->dev);
361
362 stop_login(net);
363
364 mutex_lock(&net->connection_lock);
365
366 if (net->login_sent && net->login_received) {
367 int retries = TBNET_LOGOUT_RETRIES;
368
369 while (send_logout && retries-- > 0) {
370 int ret = tbnet_logout_request(net);
371 if (ret != -ETIMEDOUT)
372 break;
373 }
374
375 tb_ring_stop(net->rx_ring.ring);
376 tb_ring_stop(net->tx_ring.ring);
377 tbnet_free_buffers(&net->rx_ring);
378 tbnet_free_buffers(&net->tx_ring);
379
380 if (tb_xdomain_disable_paths(net->xd))
381 netdev_warn(net->dev, "failed to disable DMA paths\n");
382 }
383
384 net->login_retries = 0;
385 net->login_sent = false;
386 net->login_received = false;
387
388 mutex_unlock(&net->connection_lock);
389 }
390
391 static int tbnet_handle_packet(const void *buf, size_t size, void *data)
392 {
393 const struct thunderbolt_ip_login *pkg = buf;
394 struct tbnet *net = data;
395 u32 command_id;
396 int ret = 0;
397 u32 sequence;
398 u64 route;
399
400
401 if (size < sizeof(struct thunderbolt_ip_header))
402 return 0;
403 if (!uuid_equal(&pkg->hdr.initiator_uuid, net->xd->remote_uuid))
404 return 0;
405 if (!uuid_equal(&pkg->hdr.target_uuid, net->xd->local_uuid))
406 return 0;
407
408 route = ((u64)pkg->hdr.route_hi << 32) | pkg->hdr.route_lo;
409 route &= ~BIT_ULL(63);
410 if (route != net->xd->route)
411 return 0;
412
413 sequence = pkg->hdr.length_sn & TBIP_HDR_SN_MASK;
414 sequence >>= TBIP_HDR_SN_SHIFT;
415 command_id = pkg->hdr.command_id;
416
417 switch (pkg->hdr.type) {
418 case TBIP_LOGIN:
419 if (!netif_running(net->dev))
420 break;
421
422 ret = tbnet_login_response(net, route, sequence,
423 pkg->hdr.command_id);
424 if (!ret) {
425 mutex_lock(&net->connection_lock);
426 net->login_received = true;
427 net->transmit_path = pkg->transmit_path;
428
429
430
431
432
433 if (net->login_retries >= TBNET_LOGIN_RETRIES ||
434 !net->login_sent) {
435 net->login_retries = 0;
436 queue_delayed_work(system_long_wq,
437 &net->login_work, 0);
438 }
439 mutex_unlock(&net->connection_lock);
440
441 queue_work(system_long_wq, &net->connected_work);
442 }
443 break;
444
445 case TBIP_LOGOUT:
446 ret = tbnet_logout_response(net, route, sequence, command_id);
447 if (!ret)
448 queue_work(system_long_wq, &net->disconnect_work);
449 break;
450
451 default:
452 return 0;
453 }
454
455 if (ret)
456 netdev_warn(net->dev, "failed to send ThunderboltIP response\n");
457
458 return 1;
459 }
460
461 static unsigned int tbnet_available_buffers(const struct tbnet_ring *ring)
462 {
463 return ring->prod - ring->cons;
464 }
465
466 static int tbnet_alloc_rx_buffers(struct tbnet *net, unsigned int nbuffers)
467 {
468 struct tbnet_ring *ring = &net->rx_ring;
469 int ret;
470
471 while (nbuffers--) {
472 struct device *dma_dev = tb_ring_dma_device(ring->ring);
473 unsigned int index = ring->prod & (TBNET_RING_SIZE - 1);
474 struct tbnet_frame *tf = &ring->frames[index];
475 dma_addr_t dma_addr;
476
477 if (tf->page)
478 break;
479
480
481
482
483
484 tf->page = dev_alloc_pages(TBNET_RX_PAGE_ORDER);
485 if (!tf->page) {
486 ret = -ENOMEM;
487 goto err_free;
488 }
489
490 dma_addr = dma_map_page(dma_dev, tf->page, 0,
491 TBNET_RX_PAGE_SIZE, DMA_FROM_DEVICE);
492 if (dma_mapping_error(dma_dev, dma_addr)) {
493 ret = -ENOMEM;
494 goto err_free;
495 }
496
497 tf->frame.buffer_phy = dma_addr;
498 tf->dev = net->dev;
499
500 tb_ring_rx(ring->ring, &tf->frame);
501
502 ring->prod++;
503 }
504
505 return 0;
506
507 err_free:
508 tbnet_free_buffers(ring);
509 return ret;
510 }
511
512 static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net)
513 {
514 struct tbnet_ring *ring = &net->tx_ring;
515 struct device *dma_dev = tb_ring_dma_device(ring->ring);
516 struct tbnet_frame *tf;
517 unsigned int index;
518
519 if (!tbnet_available_buffers(ring))
520 return NULL;
521
522 index = ring->cons++ & (TBNET_RING_SIZE - 1);
523
524 tf = &ring->frames[index];
525 tf->frame.size = 0;
526
527 dma_sync_single_for_cpu(dma_dev, tf->frame.buffer_phy,
528 tbnet_frame_size(tf), DMA_TO_DEVICE);
529
530 return tf;
531 }
532
533 static void tbnet_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
534 bool canceled)
535 {
536 struct tbnet_frame *tf = container_of(frame, typeof(*tf), frame);
537 struct tbnet *net = netdev_priv(tf->dev);
538
539
540 net->tx_ring.prod++;
541
542 if (tbnet_available_buffers(&net->tx_ring) >= TBNET_RING_SIZE / 2)
543 netif_wake_queue(net->dev);
544 }
545
546 static int tbnet_alloc_tx_buffers(struct tbnet *net)
547 {
548 struct tbnet_ring *ring = &net->tx_ring;
549 struct device *dma_dev = tb_ring_dma_device(ring->ring);
550 unsigned int i;
551
552 for (i = 0; i < TBNET_RING_SIZE; i++) {
553 struct tbnet_frame *tf = &ring->frames[i];
554 dma_addr_t dma_addr;
555
556 tf->page = alloc_page(GFP_KERNEL);
557 if (!tf->page) {
558 tbnet_free_buffers(ring);
559 return -ENOMEM;
560 }
561
562 dma_addr = dma_map_page(dma_dev, tf->page, 0, TBNET_FRAME_SIZE,
563 DMA_TO_DEVICE);
564 if (dma_mapping_error(dma_dev, dma_addr)) {
565 __free_page(tf->page);
566 tf->page = NULL;
567 tbnet_free_buffers(ring);
568 return -ENOMEM;
569 }
570
571 tf->dev = net->dev;
572 tf->frame.buffer_phy = dma_addr;
573 tf->frame.callback = tbnet_tx_callback;
574 tf->frame.sof = TBIP_PDF_FRAME_START;
575 tf->frame.eof = TBIP_PDF_FRAME_END;
576 }
577
578 ring->cons = 0;
579 ring->prod = TBNET_RING_SIZE - 1;
580
581 return 0;
582 }
583
584 static void tbnet_connected_work(struct work_struct *work)
585 {
586 struct tbnet *net = container_of(work, typeof(*net), connected_work);
587 bool connected;
588 int ret;
589
590 if (netif_carrier_ok(net->dev))
591 return;
592
593 mutex_lock(&net->connection_lock);
594 connected = net->login_sent && net->login_received;
595 mutex_unlock(&net->connection_lock);
596
597 if (!connected)
598 return;
599
600
601
602
603 ret = tb_xdomain_enable_paths(net->xd, TBNET_LOCAL_PATH,
604 net->rx_ring.ring->hop,
605 net->transmit_path,
606 net->tx_ring.ring->hop);
607 if (ret) {
608 netdev_err(net->dev, "failed to enable DMA paths\n");
609 return;
610 }
611
612 tb_ring_start(net->tx_ring.ring);
613 tb_ring_start(net->rx_ring.ring);
614
615 ret = tbnet_alloc_rx_buffers(net, TBNET_RING_SIZE);
616 if (ret)
617 goto err_stop_rings;
618
619 ret = tbnet_alloc_tx_buffers(net);
620 if (ret)
621 goto err_free_rx_buffers;
622
623 netif_carrier_on(net->dev);
624 netif_start_queue(net->dev);
625 return;
626
627 err_free_rx_buffers:
628 tbnet_free_buffers(&net->rx_ring);
629 err_stop_rings:
630 tb_ring_stop(net->rx_ring.ring);
631 tb_ring_stop(net->tx_ring.ring);
632 }
633
634 static void tbnet_login_work(struct work_struct *work)
635 {
636 struct tbnet *net = container_of(work, typeof(*net), login_work.work);
637 unsigned long delay = msecs_to_jiffies(TBNET_LOGIN_DELAY);
638 int ret;
639
640 if (netif_carrier_ok(net->dev))
641 return;
642
643 ret = tbnet_login_request(net, net->login_retries % 4);
644 if (ret) {
645 if (net->login_retries++ < TBNET_LOGIN_RETRIES) {
646 queue_delayed_work(system_long_wq, &net->login_work,
647 delay);
648 } else {
649 netdev_info(net->dev, "ThunderboltIP login timed out\n");
650 }
651 } else {
652 net->login_retries = 0;
653
654 mutex_lock(&net->connection_lock);
655 net->login_sent = true;
656 mutex_unlock(&net->connection_lock);
657
658 queue_work(system_long_wq, &net->connected_work);
659 }
660 }
661
662 static void tbnet_disconnect_work(struct work_struct *work)
663 {
664 struct tbnet *net = container_of(work, typeof(*net), disconnect_work);
665
666 tbnet_tear_down(net, false);
667 }
668
669 static bool tbnet_check_frame(struct tbnet *net, const struct tbnet_frame *tf,
670 const struct thunderbolt_ip_frame_header *hdr)
671 {
672 u32 frame_id, frame_count, frame_size, frame_index;
673 unsigned int size;
674
675 if (tf->frame.flags & RING_DESC_CRC_ERROR) {
676 net->stats.rx_crc_errors++;
677 return false;
678 } else if (tf->frame.flags & RING_DESC_BUFFER_OVERRUN) {
679 net->stats.rx_over_errors++;
680 return false;
681 }
682
683
684 size = tbnet_frame_size(tf);
685 if (size <= sizeof(*hdr)) {
686 net->stats.rx_length_errors++;
687 return false;
688 }
689
690 frame_count = le32_to_cpu(hdr->frame_count);
691 frame_size = le32_to_cpu(hdr->frame_size);
692 frame_index = le16_to_cpu(hdr->frame_index);
693 frame_id = le16_to_cpu(hdr->frame_id);
694
695 if ((frame_size > size - sizeof(*hdr)) || !frame_size) {
696 net->stats.rx_length_errors++;
697 return false;
698 }
699
700
701
702
703 if (net->skb && net->rx_hdr.frame_count) {
704
705 if (frame_count != net->rx_hdr.frame_count) {
706 net->stats.rx_length_errors++;
707 return false;
708 }
709
710
711
712
713 if (frame_index != net->rx_hdr.frame_index + 1 ||
714 frame_id != net->rx_hdr.frame_id) {
715 net->stats.rx_missed_errors++;
716 return false;
717 }
718
719 if (net->skb->len + frame_size > TBNET_MAX_MTU) {
720 net->stats.rx_length_errors++;
721 return false;
722 }
723
724 return true;
725 }
726
727
728 if (frame_count == 0 || frame_count > TBNET_RING_SIZE / 4) {
729 net->stats.rx_length_errors++;
730 return false;
731 }
732 if (frame_index != 0) {
733 net->stats.rx_missed_errors++;
734 return false;
735 }
736
737 return true;
738 }
739
740 static int tbnet_poll(struct napi_struct *napi, int budget)
741 {
742 struct tbnet *net = container_of(napi, struct tbnet, napi);
743 unsigned int cleaned_count = tbnet_available_buffers(&net->rx_ring);
744 struct device *dma_dev = tb_ring_dma_device(net->rx_ring.ring);
745 unsigned int rx_packets = 0;
746
747 while (rx_packets < budget) {
748 const struct thunderbolt_ip_frame_header *hdr;
749 unsigned int hdr_size = sizeof(*hdr);
750 struct sk_buff *skb = NULL;
751 struct ring_frame *frame;
752 struct tbnet_frame *tf;
753 struct page *page;
754 bool last = true;
755 u32 frame_size;
756
757
758
759
760
761 if (cleaned_count >= MAX_SKB_FRAGS) {
762 tbnet_alloc_rx_buffers(net, cleaned_count);
763 cleaned_count = 0;
764 }
765
766 frame = tb_ring_poll(net->rx_ring.ring);
767 if (!frame)
768 break;
769
770 dma_unmap_page(dma_dev, frame->buffer_phy,
771 TBNET_RX_PAGE_SIZE, DMA_FROM_DEVICE);
772
773 tf = container_of(frame, typeof(*tf), frame);
774
775 page = tf->page;
776 tf->page = NULL;
777 net->rx_ring.cons++;
778 cleaned_count++;
779
780 hdr = page_address(page);
781 if (!tbnet_check_frame(net, tf, hdr)) {
782 __free_pages(page, TBNET_RX_PAGE_ORDER);
783 dev_kfree_skb_any(net->skb);
784 net->skb = NULL;
785 continue;
786 }
787
788 frame_size = le32_to_cpu(hdr->frame_size);
789
790 skb = net->skb;
791 if (!skb) {
792 skb = build_skb(page_address(page),
793 TBNET_RX_PAGE_SIZE);
794 if (!skb) {
795 __free_pages(page, TBNET_RX_PAGE_ORDER);
796 net->stats.rx_errors++;
797 break;
798 }
799
800 skb_reserve(skb, hdr_size);
801 skb_put(skb, frame_size);
802
803 net->skb = skb;
804 } else {
805 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
806 page, hdr_size, frame_size,
807 TBNET_RX_PAGE_SIZE - hdr_size);
808 }
809
810 net->rx_hdr.frame_size = frame_size;
811 net->rx_hdr.frame_count = le32_to_cpu(hdr->frame_count);
812 net->rx_hdr.frame_index = le16_to_cpu(hdr->frame_index);
813 net->rx_hdr.frame_id = le16_to_cpu(hdr->frame_id);
814 last = net->rx_hdr.frame_index == net->rx_hdr.frame_count - 1;
815
816 rx_packets++;
817 net->stats.rx_bytes += frame_size;
818
819 if (last) {
820 skb->protocol = eth_type_trans(skb, net->dev);
821 napi_gro_receive(&net->napi, skb);
822 net->skb = NULL;
823 }
824 }
825
826 net->stats.rx_packets += rx_packets;
827
828 if (cleaned_count)
829 tbnet_alloc_rx_buffers(net, cleaned_count);
830
831 if (rx_packets >= budget)
832 return budget;
833
834 napi_complete_done(napi, rx_packets);
835
836 tb_ring_poll_complete(net->rx_ring.ring);
837
838 return rx_packets;
839 }
840
841 static void tbnet_start_poll(void *data)
842 {
843 struct tbnet *net = data;
844
845 napi_schedule(&net->napi);
846 }
847
848 static int tbnet_open(struct net_device *dev)
849 {
850 struct tbnet *net = netdev_priv(dev);
851 struct tb_xdomain *xd = net->xd;
852 u16 sof_mask, eof_mask;
853 struct tb_ring *ring;
854
855 netif_carrier_off(dev);
856
857 ring = tb_ring_alloc_tx(xd->tb->nhi, -1, TBNET_RING_SIZE,
858 RING_FLAG_FRAME);
859 if (!ring) {
860 netdev_err(dev, "failed to allocate Tx ring\n");
861 return -ENOMEM;
862 }
863 net->tx_ring.ring = ring;
864
865 sof_mask = BIT(TBIP_PDF_FRAME_START);
866 eof_mask = BIT(TBIP_PDF_FRAME_END);
867
868 ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE,
869 RING_FLAG_FRAME | RING_FLAG_E2E, sof_mask,
870 eof_mask, tbnet_start_poll, net);
871 if (!ring) {
872 netdev_err(dev, "failed to allocate Rx ring\n");
873 tb_ring_free(net->tx_ring.ring);
874 net->tx_ring.ring = NULL;
875 return -ENOMEM;
876 }
877 net->rx_ring.ring = ring;
878
879 napi_enable(&net->napi);
880 start_login(net);
881
882 return 0;
883 }
884
885 static int tbnet_stop(struct net_device *dev)
886 {
887 struct tbnet *net = netdev_priv(dev);
888
889 napi_disable(&net->napi);
890
891 cancel_work_sync(&net->disconnect_work);
892 tbnet_tear_down(net, true);
893
894 tb_ring_free(net->rx_ring.ring);
895 net->rx_ring.ring = NULL;
896 tb_ring_free(net->tx_ring.ring);
897 net->tx_ring.ring = NULL;
898
899 return 0;
900 }
901
902 static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
903 struct tbnet_frame **frames, u32 frame_count)
904 {
905 struct thunderbolt_ip_frame_header *hdr = page_address(frames[0]->page);
906 struct device *dma_dev = tb_ring_dma_device(net->tx_ring.ring);
907 __wsum wsum = htonl(skb->len - skb_transport_offset(skb));
908 unsigned int i, len, offset = skb_transport_offset(skb);
909 __be16 protocol = skb->protocol;
910 void *data = skb->data;
911 void *dest = hdr + 1;
912 __sum16 *tucso;
913
914 if (skb->ip_summed != CHECKSUM_PARTIAL) {
915
916
917
918 for (i = 0; i < frame_count; i++) {
919 hdr = page_address(frames[i]->page);
920 hdr->frame_count = cpu_to_le32(frame_count);
921 dma_sync_single_for_device(dma_dev,
922 frames[i]->frame.buffer_phy,
923 tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
924 }
925
926 return true;
927 }
928
929 if (protocol == htons(ETH_P_8021Q)) {
930 struct vlan_hdr *vhdr, vh;
931
932 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(vh), &vh);
933 if (!vhdr)
934 return false;
935
936 protocol = vhdr->h_vlan_encapsulated_proto;
937 }
938
939
940
941
942
943
944 if (protocol == htons(ETH_P_IP)) {
945 __sum16 *ipcso = dest + ((void *)&(ip_hdr(skb)->check) - data);
946
947 *ipcso = 0;
948 *ipcso = ip_fast_csum(dest + skb_network_offset(skb),
949 ip_hdr(skb)->ihl);
950
951 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
952 tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data);
953 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
954 tucso = dest + ((void *)&(udp_hdr(skb)->check) - data);
955 else
956 return false;
957
958 *tucso = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
959 ip_hdr(skb)->daddr, 0,
960 ip_hdr(skb)->protocol, 0);
961 } else if (skb_is_gso_v6(skb)) {
962 tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data);
963 *tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
964 &ipv6_hdr(skb)->daddr, 0,
965 IPPROTO_TCP, 0);
966 return false;
967 } else if (protocol == htons(ETH_P_IPV6)) {
968 tucso = dest + skb_checksum_start_offset(skb) + skb->csum_offset;
969 *tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
970 &ipv6_hdr(skb)->daddr, 0,
971 ipv6_hdr(skb)->nexthdr, 0);
972 } else {
973 return false;
974 }
975
976
977
978
979 for (i = 0; i < frame_count; i++) {
980 hdr = page_address(frames[i]->page);
981 dest = (void *)(hdr + 1) + offset;
982 len = le32_to_cpu(hdr->frame_size) - offset;
983 wsum = csum_partial(dest, len, wsum);
984 hdr->frame_count = cpu_to_le32(frame_count);
985
986 offset = 0;
987 }
988
989 *tucso = csum_fold(wsum);
990
991
992
993
994 for (i = 0; i < frame_count; i++) {
995 dma_sync_single_for_device(dma_dev, frames[i]->frame.buffer_phy,
996 tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
997 }
998
999 return true;
1000 }
1001
1002 static void *tbnet_kmap_frag(struct sk_buff *skb, unsigned int frag_num,
1003 unsigned int *len)
1004 {
1005 const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num];
1006
1007 *len = skb_frag_size(frag);
1008 return kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag);
1009 }
1010
1011 static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb,
1012 struct net_device *dev)
1013 {
1014 struct tbnet *net = netdev_priv(dev);
1015 struct tbnet_frame *frames[MAX_SKB_FRAGS];
1016 u16 frame_id = atomic_read(&net->frame_id);
1017 struct thunderbolt_ip_frame_header *hdr;
1018 unsigned int len = skb_headlen(skb);
1019 unsigned int data_len = skb->len;
1020 unsigned int nframes, i;
1021 unsigned int frag = 0;
1022 void *src = skb->data;
1023 u32 frame_index = 0;
1024 bool unmap = false;
1025 void *dest;
1026
1027 nframes = DIV_ROUND_UP(data_len, TBNET_MAX_PAYLOAD_SIZE);
1028 if (tbnet_available_buffers(&net->tx_ring) < nframes) {
1029 netif_stop_queue(net->dev);
1030 return NETDEV_TX_BUSY;
1031 }
1032
1033 frames[frame_index] = tbnet_get_tx_buffer(net);
1034 if (!frames[frame_index])
1035 goto err_drop;
1036
1037 hdr = page_address(frames[frame_index]->page);
1038 dest = hdr + 1;
1039
1040
1041 while (data_len > TBNET_MAX_PAYLOAD_SIZE) {
1042 unsigned int size_left = TBNET_MAX_PAYLOAD_SIZE;
1043
1044 hdr->frame_size = cpu_to_le32(TBNET_MAX_PAYLOAD_SIZE);
1045 hdr->frame_index = cpu_to_le16(frame_index);
1046 hdr->frame_id = cpu_to_le16(frame_id);
1047
1048 do {
1049 if (len > size_left) {
1050
1051
1052
1053
1054 memcpy(dest, src, size_left);
1055 len -= size_left;
1056 dest += size_left;
1057 src += size_left;
1058 break;
1059 }
1060
1061 memcpy(dest, src, len);
1062 size_left -= len;
1063 dest += len;
1064
1065 if (unmap) {
1066 kunmap_atomic(src);
1067 unmap = false;
1068 }
1069
1070
1071 if (frag < skb_shinfo(skb)->nr_frags) {
1072
1073 src = tbnet_kmap_frag(skb, frag++, &len);
1074 unmap = true;
1075 } else if (unlikely(size_left > 0)) {
1076 goto err_drop;
1077 }
1078 } while (size_left > 0);
1079
1080 data_len -= TBNET_MAX_PAYLOAD_SIZE;
1081 frame_index++;
1082
1083 frames[frame_index] = tbnet_get_tx_buffer(net);
1084 if (!frames[frame_index])
1085 goto err_drop;
1086
1087 hdr = page_address(frames[frame_index]->page);
1088 dest = hdr + 1;
1089 }
1090
1091 hdr->frame_size = cpu_to_le32(data_len);
1092 hdr->frame_index = cpu_to_le16(frame_index);
1093 hdr->frame_id = cpu_to_le16(frame_id);
1094
1095 frames[frame_index]->frame.size = data_len + sizeof(*hdr);
1096
1097
1098 while (len < data_len) {
1099 memcpy(dest, src, len);
1100 data_len -= len;
1101 dest += len;
1102
1103 if (unmap) {
1104 kunmap_atomic(src);
1105 unmap = false;
1106 }
1107
1108 if (frag < skb_shinfo(skb)->nr_frags) {
1109 src = tbnet_kmap_frag(skb, frag++, &len);
1110 unmap = true;
1111 } else if (unlikely(data_len > 0)) {
1112 goto err_drop;
1113 }
1114 }
1115
1116 memcpy(dest, src, data_len);
1117
1118 if (unmap)
1119 kunmap_atomic(src);
1120
1121 if (!tbnet_xmit_csum_and_map(net, skb, frames, frame_index + 1))
1122 goto err_drop;
1123
1124 for (i = 0; i < frame_index + 1; i++)
1125 tb_ring_tx(net->tx_ring.ring, &frames[i]->frame);
1126
1127 if (net->svc->prtcstns & TBNET_MATCH_FRAGS_ID)
1128 atomic_inc(&net->frame_id);
1129
1130 net->stats.tx_packets++;
1131 net->stats.tx_bytes += skb->len;
1132
1133 dev_consume_skb_any(skb);
1134
1135 return NETDEV_TX_OK;
1136
1137 err_drop:
1138
1139 net->tx_ring.cons -= frame_index;
1140
1141 dev_kfree_skb_any(skb);
1142 net->stats.tx_errors++;
1143
1144 return NETDEV_TX_OK;
1145 }
1146
1147 static void tbnet_get_stats64(struct net_device *dev,
1148 struct rtnl_link_stats64 *stats)
1149 {
1150 struct tbnet *net = netdev_priv(dev);
1151
1152 stats->tx_packets = net->stats.tx_packets;
1153 stats->rx_packets = net->stats.rx_packets;
1154 stats->tx_bytes = net->stats.tx_bytes;
1155 stats->rx_bytes = net->stats.rx_bytes;
1156 stats->rx_errors = net->stats.rx_errors + net->stats.rx_length_errors +
1157 net->stats.rx_over_errors + net->stats.rx_crc_errors +
1158 net->stats.rx_missed_errors;
1159 stats->tx_errors = net->stats.tx_errors;
1160 stats->rx_length_errors = net->stats.rx_length_errors;
1161 stats->rx_over_errors = net->stats.rx_over_errors;
1162 stats->rx_crc_errors = net->stats.rx_crc_errors;
1163 stats->rx_missed_errors = net->stats.rx_missed_errors;
1164 }
1165
1166 static const struct net_device_ops tbnet_netdev_ops = {
1167 .ndo_open = tbnet_open,
1168 .ndo_stop = tbnet_stop,
1169 .ndo_start_xmit = tbnet_start_xmit,
1170 .ndo_get_stats64 = tbnet_get_stats64,
1171 };
1172
1173 static void tbnet_generate_mac(struct net_device *dev)
1174 {
1175 const struct tbnet *net = netdev_priv(dev);
1176 const struct tb_xdomain *xd = net->xd;
1177 u8 phy_port;
1178 u32 hash;
1179
1180 phy_port = tb_phy_port_from_link(TBNET_L0_PORT_NUM(xd->route));
1181
1182
1183 dev->dev_addr[0] = phy_port << 4 | 0x02;
1184 hash = jhash2((u32 *)xd->local_uuid, 4, 0);
1185 memcpy(dev->dev_addr + 1, &hash, sizeof(hash));
1186 hash = jhash2((u32 *)xd->local_uuid, 4, hash);
1187 dev->dev_addr[5] = hash & 0xff;
1188 }
1189
1190 static int tbnet_probe(struct tb_service *svc, const struct tb_service_id *id)
1191 {
1192 struct tb_xdomain *xd = tb_service_parent(svc);
1193 struct net_device *dev;
1194 struct tbnet *net;
1195 int ret;
1196
1197 dev = alloc_etherdev(sizeof(*net));
1198 if (!dev)
1199 return -ENOMEM;
1200
1201 SET_NETDEV_DEV(dev, &svc->dev);
1202
1203 net = netdev_priv(dev);
1204 INIT_DELAYED_WORK(&net->login_work, tbnet_login_work);
1205 INIT_WORK(&net->connected_work, tbnet_connected_work);
1206 INIT_WORK(&net->disconnect_work, tbnet_disconnect_work);
1207 mutex_init(&net->connection_lock);
1208 atomic_set(&net->command_id, 0);
1209 atomic_set(&net->frame_id, 0);
1210 net->svc = svc;
1211 net->dev = dev;
1212 net->xd = xd;
1213
1214 tbnet_generate_mac(dev);
1215
1216 strcpy(dev->name, "thunderbolt%d");
1217 dev->netdev_ops = &tbnet_netdev_ops;
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232 dev->hw_features = NETIF_F_SG | NETIF_F_ALL_TSO | NETIF_F_GRO |
1233 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1234 dev->features = dev->hw_features | NETIF_F_HIGHDMA;
1235 dev->hard_header_len += sizeof(struct thunderbolt_ip_frame_header);
1236
1237 netif_napi_add(dev, &net->napi, tbnet_poll, NAPI_POLL_WEIGHT);
1238
1239
1240 dev->min_mtu = ETH_MIN_MTU;
1241 dev->max_mtu = TBNET_MAX_MTU - ETH_HLEN;
1242
1243 net->handler.uuid = &tbnet_svc_uuid;
1244 net->handler.callback = tbnet_handle_packet,
1245 net->handler.data = net;
1246 tb_register_protocol_handler(&net->handler);
1247
1248 tb_service_set_drvdata(svc, net);
1249
1250 ret = register_netdev(dev);
1251 if (ret) {
1252 tb_unregister_protocol_handler(&net->handler);
1253 free_netdev(dev);
1254 return ret;
1255 }
1256
1257 return 0;
1258 }
1259
1260 static void tbnet_remove(struct tb_service *svc)
1261 {
1262 struct tbnet *net = tb_service_get_drvdata(svc);
1263
1264 unregister_netdev(net->dev);
1265 tb_unregister_protocol_handler(&net->handler);
1266 free_netdev(net->dev);
1267 }
1268
1269 static void tbnet_shutdown(struct tb_service *svc)
1270 {
1271 tbnet_tear_down(tb_service_get_drvdata(svc), true);
1272 }
1273
1274 static int __maybe_unused tbnet_suspend(struct device *dev)
1275 {
1276 struct tb_service *svc = tb_to_service(dev);
1277 struct tbnet *net = tb_service_get_drvdata(svc);
1278
1279 stop_login(net);
1280 if (netif_running(net->dev)) {
1281 netif_device_detach(net->dev);
1282 tbnet_tear_down(net, true);
1283 }
1284
1285 tb_unregister_protocol_handler(&net->handler);
1286 return 0;
1287 }
1288
1289 static int __maybe_unused tbnet_resume(struct device *dev)
1290 {
1291 struct tb_service *svc = tb_to_service(dev);
1292 struct tbnet *net = tb_service_get_drvdata(svc);
1293
1294 tb_register_protocol_handler(&net->handler);
1295
1296 netif_carrier_off(net->dev);
1297 if (netif_running(net->dev)) {
1298 netif_device_attach(net->dev);
1299 start_login(net);
1300 }
1301
1302 return 0;
1303 }
1304
1305 static const struct dev_pm_ops tbnet_pm_ops = {
1306 SET_SYSTEM_SLEEP_PM_OPS(tbnet_suspend, tbnet_resume)
1307 };
1308
1309 static const struct tb_service_id tbnet_ids[] = {
1310 { TB_SERVICE("network", 1) },
1311 { },
1312 };
1313 MODULE_DEVICE_TABLE(tbsvc, tbnet_ids);
1314
1315 static struct tb_service_driver tbnet_driver = {
1316 .driver = {
1317 .owner = THIS_MODULE,
1318 .name = "thunderbolt-net",
1319 .pm = &tbnet_pm_ops,
1320 },
1321 .probe = tbnet_probe,
1322 .remove = tbnet_remove,
1323 .shutdown = tbnet_shutdown,
1324 .id_table = tbnet_ids,
1325 };
1326
1327 static int __init tbnet_init(void)
1328 {
1329 int ret;
1330
1331 tbnet_dir = tb_property_create_dir(&tbnet_dir_uuid);
1332 if (!tbnet_dir)
1333 return -ENOMEM;
1334
1335 tb_property_add_immediate(tbnet_dir, "prtcid", 1);
1336 tb_property_add_immediate(tbnet_dir, "prtcvers", 1);
1337 tb_property_add_immediate(tbnet_dir, "prtcrevs", 1);
1338 tb_property_add_immediate(tbnet_dir, "prtcstns",
1339 TBNET_MATCH_FRAGS_ID);
1340
1341 ret = tb_register_property_dir("network", tbnet_dir);
1342 if (ret) {
1343 tb_property_free_dir(tbnet_dir);
1344 return ret;
1345 }
1346
1347 return tb_register_service_driver(&tbnet_driver);
1348 }
1349 module_init(tbnet_init);
1350
1351 static void __exit tbnet_exit(void)
1352 {
1353 tb_unregister_service_driver(&tbnet_driver);
1354 tb_unregister_property_dir("network", tbnet_dir);
1355 tb_property_free_dir(tbnet_dir);
1356 }
1357 module_exit(tbnet_exit);
1358
1359 MODULE_AUTHOR("Amir Levy <amir.jer.levy@intel.com>");
1360 MODULE_AUTHOR("Michael Jamet <michael.jamet@intel.com>");
1361 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
1362 MODULE_DESCRIPTION("Thunderbolt network driver");
1363 MODULE_LICENSE("GPL v2");