This source file includes following definitions.
- __nvm_get_auth_status
- nvm_get_auth_status
- nvm_set_auth_status
- nvm_clear_auth_status
- nvm_validate_and_write
- nvm_authenticate_host
- nvm_authenticate_device
- tb_switch_nvm_read
- tb_switch_nvm_no_read
- tb_switch_nvm_write
- register_nvmem
- tb_switch_nvm_add
- tb_switch_nvm_remove
- tb_port_type
- tb_dump_port
- tb_port_state
- tb_wait_for_port
- tb_port_add_nfc_credits
- tb_port_set_initial_credits
- tb_port_clear_counter
- tb_init_port
- tb_port_alloc_hopid
- tb_port_alloc_in_hopid
- tb_port_alloc_out_hopid
- tb_port_release_in_hopid
- tb_port_release_out_hopid
- tb_next_port_on_path
- tb_port_is_enabled
- tb_pci_port_is_enabled
- tb_pci_port_enable
- tb_dp_port_hpd_is_active
- tb_dp_port_hpd_clear
- tb_dp_port_set_hops
- tb_dp_port_is_enabled
- tb_dp_port_enable
- tb_dump_switch
- tb_switch_reset
- tb_plug_events_active
- authorized_show
- tb_switch_set_authorized
- authorized_store
- boot_show
- device_show
- device_name_show
- key_show
- key_store
- nvm_authenticate_start
- nvm_authenticate_complete
- nvm_authenticate_show
- nvm_authenticate_store
- nvm_version_show
- vendor_show
- vendor_name_show
- unique_id_show
- switch_attr_is_visible
- tb_switch_release
- tb_switch_runtime_suspend
- tb_switch_runtime_resume
- tb_switch_get_generation
- tb_switch_alloc
- tb_switch_alloc_safe_mode
- tb_switch_configure
- tb_switch_set_uuid
- tb_switch_add_dma_port
- tb_switch_add
- tb_switch_remove
- tb_sw_set_unplugged
- tb_switch_resume
- tb_switch_suspend
- tb_switch_match
- tb_switch_find_by_link_depth
- tb_switch_find_by_uuid
- tb_switch_find_by_route
- tb_switch_exit
1
2
3
4
5
6
7
8
9 #include <linux/delay.h>
10 #include <linux/idr.h>
11 #include <linux/nvmem-provider.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/sched/signal.h>
14 #include <linux/sizes.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
17
18 #include "tb.h"
19
20
21
22 #define NVM_DEVID 0x05
23 #define NVM_VERSION 0x08
24 #define NVM_CSS 0x10
25 #define NVM_FLASH_SIZE 0x45
26
27 #define NVM_MIN_SIZE SZ_32K
28 #define NVM_MAX_SIZE SZ_512K
29
30 static DEFINE_IDA(nvm_ida);
31
32 struct nvm_auth_status {
33 struct list_head list;
34 uuid_t uuid;
35 u32 status;
36 };
37
38
39
40
41
42
43 static LIST_HEAD(nvm_auth_status_cache);
44 static DEFINE_MUTEX(nvm_auth_status_lock);
45
46 static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
47 {
48 struct nvm_auth_status *st;
49
50 list_for_each_entry(st, &nvm_auth_status_cache, list) {
51 if (uuid_equal(&st->uuid, sw->uuid))
52 return st;
53 }
54
55 return NULL;
56 }
57
58 static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
59 {
60 struct nvm_auth_status *st;
61
62 mutex_lock(&nvm_auth_status_lock);
63 st = __nvm_get_auth_status(sw);
64 mutex_unlock(&nvm_auth_status_lock);
65
66 *status = st ? st->status : 0;
67 }
68
69 static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
70 {
71 struct nvm_auth_status *st;
72
73 if (WARN_ON(!sw->uuid))
74 return;
75
76 mutex_lock(&nvm_auth_status_lock);
77 st = __nvm_get_auth_status(sw);
78
79 if (!st) {
80 st = kzalloc(sizeof(*st), GFP_KERNEL);
81 if (!st)
82 goto unlock;
83
84 memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
85 INIT_LIST_HEAD(&st->list);
86 list_add_tail(&st->list, &nvm_auth_status_cache);
87 }
88
89 st->status = status;
90 unlock:
91 mutex_unlock(&nvm_auth_status_lock);
92 }
93
94 static void nvm_clear_auth_status(const struct tb_switch *sw)
95 {
96 struct nvm_auth_status *st;
97
98 mutex_lock(&nvm_auth_status_lock);
99 st = __nvm_get_auth_status(sw);
100 if (st) {
101 list_del(&st->list);
102 kfree(st);
103 }
104 mutex_unlock(&nvm_auth_status_lock);
105 }
106
107 static int nvm_validate_and_write(struct tb_switch *sw)
108 {
109 unsigned int image_size, hdr_size;
110 const u8 *buf = sw->nvm->buf;
111 u16 ds_size;
112 int ret;
113
114 if (!buf)
115 return -EINVAL;
116
117 image_size = sw->nvm->buf_data_size;
118 if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
119 return -EINVAL;
120
121
122
123
124
125 hdr_size = (*(u32 *)buf) & 0xffffff;
126 if (hdr_size + NVM_DEVID + 2 >= image_size)
127 return -EINVAL;
128
129
130 if (!IS_ALIGNED(hdr_size, SZ_4K))
131 return -EINVAL;
132
133
134
135
136
137 ds_size = *(u16 *)(buf + hdr_size);
138 if (ds_size >= image_size)
139 return -EINVAL;
140
141 if (!sw->safe_mode) {
142 u16 device_id;
143
144
145
146
147
148 device_id = *(u16 *)(buf + hdr_size + NVM_DEVID);
149 if (device_id != sw->config.device_id)
150 return -EINVAL;
151
152 if (sw->generation < 3) {
153
154 ret = dma_port_flash_write(sw->dma_port,
155 DMA_PORT_CSS_ADDRESS, buf + NVM_CSS,
156 DMA_PORT_CSS_MAX_SIZE);
157 if (ret)
158 return ret;
159 }
160
161
162 buf += hdr_size;
163 image_size -= hdr_size;
164 }
165
166 return dma_port_flash_write(sw->dma_port, 0, buf, image_size);
167 }
168
169 static int nvm_authenticate_host(struct tb_switch *sw)
170 {
171 int ret = 0;
172
173
174
175
176
177
178 if (!sw->safe_mode) {
179 u32 status;
180
181 ret = tb_domain_disconnect_all_paths(sw->tb);
182 if (ret)
183 return ret;
184
185
186
187
188 ret = dma_port_flash_update_auth(sw->dma_port);
189 if (!ret || ret == -ETIMEDOUT)
190 return 0;
191
192
193
194
195
196 tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n");
197 if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0)
198 nvm_set_auth_status(sw, status);
199 }
200
201
202
203
204
205 dma_port_power_cycle(sw->dma_port);
206 return ret;
207 }
208
209 static int nvm_authenticate_device(struct tb_switch *sw)
210 {
211 int ret, retries = 10;
212
213 ret = dma_port_flash_update_auth(sw->dma_port);
214 switch (ret) {
215 case 0:
216 case -ETIMEDOUT:
217 case -EACCES:
218 case -EINVAL:
219
220 break;
221 default:
222 return ret;
223 }
224
225
226
227
228
229
230
231 do {
232 u32 status;
233
234 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
235 if (ret < 0 && ret != -ETIMEDOUT)
236 return ret;
237 if (ret > 0) {
238 if (status) {
239 tb_sw_warn(sw, "failed to authenticate NVM\n");
240 nvm_set_auth_status(sw, status);
241 }
242
243 tb_sw_info(sw, "power cycling the switch now\n");
244 dma_port_power_cycle(sw->dma_port);
245 return 0;
246 }
247
248 msleep(500);
249 } while (--retries);
250
251 return -ETIMEDOUT;
252 }
253
254 static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
255 size_t bytes)
256 {
257 struct tb_switch *sw = priv;
258 int ret;
259
260 pm_runtime_get_sync(&sw->dev);
261
262 if (!mutex_trylock(&sw->tb->lock)) {
263 ret = restart_syscall();
264 goto out;
265 }
266
267 ret = dma_port_flash_read(sw->dma_port, offset, val, bytes);
268 mutex_unlock(&sw->tb->lock);
269
270 out:
271 pm_runtime_mark_last_busy(&sw->dev);
272 pm_runtime_put_autosuspend(&sw->dev);
273
274 return ret;
275 }
276
277 static int tb_switch_nvm_no_read(void *priv, unsigned int offset, void *val,
278 size_t bytes)
279 {
280 return -EPERM;
281 }
282
283 static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
284 size_t bytes)
285 {
286 struct tb_switch *sw = priv;
287 int ret = 0;
288
289 if (!mutex_trylock(&sw->tb->lock))
290 return restart_syscall();
291
292
293
294
295
296
297
298 if (!sw->nvm->buf) {
299 sw->nvm->buf = vmalloc(NVM_MAX_SIZE);
300 if (!sw->nvm->buf) {
301 ret = -ENOMEM;
302 goto unlock;
303 }
304 }
305
306 sw->nvm->buf_data_size = offset + bytes;
307 memcpy(sw->nvm->buf + offset, val, bytes);
308
309 unlock:
310 mutex_unlock(&sw->tb->lock);
311
312 return ret;
313 }
314
315 static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id,
316 size_t size, bool active)
317 {
318 struct nvmem_config config;
319
320 memset(&config, 0, sizeof(config));
321
322 if (active) {
323 config.name = "nvm_active";
324 config.reg_read = tb_switch_nvm_read;
325 config.read_only = true;
326 } else {
327 config.name = "nvm_non_active";
328 config.reg_read = tb_switch_nvm_no_read;
329 config.reg_write = tb_switch_nvm_write;
330 config.root_only = true;
331 }
332
333 config.id = id;
334 config.stride = 4;
335 config.word_size = 4;
336 config.size = size;
337 config.dev = &sw->dev;
338 config.owner = THIS_MODULE;
339 config.priv = sw;
340
341 return nvmem_register(&config);
342 }
343
344 static int tb_switch_nvm_add(struct tb_switch *sw)
345 {
346 struct nvmem_device *nvm_dev;
347 struct tb_switch_nvm *nvm;
348 u32 val;
349 int ret;
350
351 if (!sw->dma_port)
352 return 0;
353
354 nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
355 if (!nvm)
356 return -ENOMEM;
357
358 nvm->id = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL);
359
360
361
362
363
364
365 if (!sw->safe_mode) {
366 u32 nvm_size, hdr_size;
367
368 ret = dma_port_flash_read(sw->dma_port, NVM_FLASH_SIZE, &val,
369 sizeof(val));
370 if (ret)
371 goto err_ida;
372
373 hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
374 nvm_size = (SZ_1M << (val & 7)) / 8;
375 nvm_size = (nvm_size - hdr_size) / 2;
376
377 ret = dma_port_flash_read(sw->dma_port, NVM_VERSION, &val,
378 sizeof(val));
379 if (ret)
380 goto err_ida;
381
382 nvm->major = val >> 16;
383 nvm->minor = val >> 8;
384
385 nvm_dev = register_nvmem(sw, nvm->id, nvm_size, true);
386 if (IS_ERR(nvm_dev)) {
387 ret = PTR_ERR(nvm_dev);
388 goto err_ida;
389 }
390 nvm->active = nvm_dev;
391 }
392
393 if (!sw->no_nvm_upgrade) {
394 nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false);
395 if (IS_ERR(nvm_dev)) {
396 ret = PTR_ERR(nvm_dev);
397 goto err_nvm_active;
398 }
399 nvm->non_active = nvm_dev;
400 }
401
402 sw->nvm = nvm;
403 return 0;
404
405 err_nvm_active:
406 if (nvm->active)
407 nvmem_unregister(nvm->active);
408 err_ida:
409 ida_simple_remove(&nvm_ida, nvm->id);
410 kfree(nvm);
411
412 return ret;
413 }
414
415 static void tb_switch_nvm_remove(struct tb_switch *sw)
416 {
417 struct tb_switch_nvm *nvm;
418
419 nvm = sw->nvm;
420 sw->nvm = NULL;
421
422 if (!nvm)
423 return;
424
425
426 if (!nvm->authenticating)
427 nvm_clear_auth_status(sw);
428
429 if (nvm->non_active)
430 nvmem_unregister(nvm->non_active);
431 if (nvm->active)
432 nvmem_unregister(nvm->active);
433 ida_simple_remove(&nvm_ida, nvm->id);
434 vfree(nvm->buf);
435 kfree(nvm);
436 }
437
438
439
440 static const char *tb_port_type(struct tb_regs_port_header *port)
441 {
442 switch (port->type >> 16) {
443 case 0:
444 switch ((u8) port->type) {
445 case 0:
446 return "Inactive";
447 case 1:
448 return "Port";
449 case 2:
450 return "NHI";
451 default:
452 return "unknown";
453 }
454 case 0x2:
455 return "Ethernet";
456 case 0x8:
457 return "SATA";
458 case 0xe:
459 return "DP/HDMI";
460 case 0x10:
461 return "PCIe";
462 case 0x20:
463 return "USB";
464 default:
465 return "unknown";
466 }
467 }
468
469 static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port)
470 {
471 tb_dbg(tb,
472 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
473 port->port_number, port->vendor_id, port->device_id,
474 port->revision, port->thunderbolt_version, tb_port_type(port),
475 port->type);
476 tb_dbg(tb, " Max hop id (in/out): %d/%d\n",
477 port->max_in_hop_id, port->max_out_hop_id);
478 tb_dbg(tb, " Max counters: %d\n", port->max_counters);
479 tb_dbg(tb, " NFC Credits: %#x\n", port->nfc_credits);
480 }
481
482
483
484
485
486
487
488
489 static int tb_port_state(struct tb_port *port)
490 {
491 struct tb_cap_phy phy;
492 int res;
493 if (port->cap_phy == 0) {
494 tb_port_WARN(port, "does not have a PHY\n");
495 return -EINVAL;
496 }
497 res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
498 if (res)
499 return res;
500 return phy.state;
501 }
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516 int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
517 {
518 int retries = 10;
519 int state;
520 if (!port->cap_phy) {
521 tb_port_WARN(port, "does not have PHY\n");
522 return -EINVAL;
523 }
524 if (tb_is_upstream_port(port)) {
525 tb_port_WARN(port, "is the upstream port\n");
526 return -EINVAL;
527 }
528
529 while (retries--) {
530 state = tb_port_state(port);
531 if (state < 0)
532 return state;
533 if (state == TB_PORT_DISABLED) {
534 tb_port_dbg(port, "is disabled (state: 0)\n");
535 return 0;
536 }
537 if (state == TB_PORT_UNPLUGGED) {
538 if (wait_if_unplugged) {
539
540 tb_port_dbg(port,
541 "is unplugged (state: 7), retrying...\n");
542 msleep(100);
543 continue;
544 }
545 tb_port_dbg(port, "is unplugged (state: 7)\n");
546 return 0;
547 }
548 if (state == TB_PORT_UP) {
549 tb_port_dbg(port, "is connected, link is up (state: 2)\n");
550 return 1;
551 }
552
553
554
555
556
557 tb_port_dbg(port,
558 "is connected, link is not up (state: %d), retrying...\n",
559 state);
560 msleep(100);
561 }
562 tb_port_warn(port,
563 "failed to reach state TB_PORT_UP. Ignoring port...\n");
564 return 0;
565 }
566
567
568
569
570
571
572
573
574
575 int tb_port_add_nfc_credits(struct tb_port *port, int credits)
576 {
577 u32 nfc_credits;
578
579 if (credits == 0 || port->sw->is_unplugged)
580 return 0;
581
582 nfc_credits = port->config.nfc_credits & TB_PORT_NFC_CREDITS_MASK;
583 nfc_credits += credits;
584
585 tb_port_dbg(port, "adding %d NFC credits to %lu",
586 credits, port->config.nfc_credits & TB_PORT_NFC_CREDITS_MASK);
587
588 port->config.nfc_credits &= ~TB_PORT_NFC_CREDITS_MASK;
589 port->config.nfc_credits |= nfc_credits;
590
591 return tb_port_write(port, &port->config.nfc_credits,
592 TB_CFG_PORT, 4, 1);
593 }
594
595
596
597
598
599
600
601
602 int tb_port_set_initial_credits(struct tb_port *port, u32 credits)
603 {
604 u32 data;
605 int ret;
606
607 ret = tb_port_read(port, &data, TB_CFG_PORT, 5, 1);
608 if (ret)
609 return ret;
610
611 data &= ~TB_PORT_LCA_MASK;
612 data |= (credits << TB_PORT_LCA_SHIFT) & TB_PORT_LCA_MASK;
613
614 return tb_port_write(port, &data, TB_CFG_PORT, 5, 1);
615 }
616
617
618
619
620
621
622 int tb_port_clear_counter(struct tb_port *port, int counter)
623 {
624 u32 zero[3] = { 0, 0, 0 };
625 tb_port_dbg(port, "clearing counter %d\n", counter);
626 return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
627 }
628
629
630
631
632
633
634
635
636
637 static int tb_init_port(struct tb_port *port)
638 {
639 int res;
640 int cap;
641
642 res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
643 if (res) {
644 if (res == -ENODEV) {
645 tb_dbg(port->sw->tb, " Port %d: not implemented\n",
646 port->port);
647 return 0;
648 }
649 return res;
650 }
651
652
653 if (port->config.type == TB_TYPE_PORT && port->port != 0) {
654 cap = tb_port_find_cap(port, TB_PORT_CAP_PHY);
655
656 if (cap > 0)
657 port->cap_phy = cap;
658 else
659 tb_port_WARN(port, "non switch port without a PHY\n");
660 } else if (port->port != 0) {
661 cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
662 if (cap > 0)
663 port->cap_adap = cap;
664 }
665
666 tb_dump_port(port->sw->tb, &port->config);
667
668
669 if (port->port) {
670 ida_init(&port->in_hopids);
671 ida_init(&port->out_hopids);
672 }
673
674 return 0;
675
676 }
677
678 static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
679 int max_hopid)
680 {
681 int port_max_hopid;
682 struct ida *ida;
683
684 if (in) {
685 port_max_hopid = port->config.max_in_hop_id;
686 ida = &port->in_hopids;
687 } else {
688 port_max_hopid = port->config.max_out_hop_id;
689 ida = &port->out_hopids;
690 }
691
692
693 if (min_hopid < TB_PATH_MIN_HOPID)
694 min_hopid = TB_PATH_MIN_HOPID;
695
696 if (max_hopid < 0 || max_hopid > port_max_hopid)
697 max_hopid = port_max_hopid;
698
699 return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
700 }
701
702
703
704
705
706
707
708
709
710
711 int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid)
712 {
713 return tb_port_alloc_hopid(port, true, min_hopid, max_hopid);
714 }
715
716
717
718
719
720
721
722
723
724
725 int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
726 {
727 return tb_port_alloc_hopid(port, false, min_hopid, max_hopid);
728 }
729
730
731
732
733
734
735 void tb_port_release_in_hopid(struct tb_port *port, int hopid)
736 {
737 ida_simple_remove(&port->in_hopids, hopid);
738 }
739
740
741
742
743
744
745 void tb_port_release_out_hopid(struct tb_port *port, int hopid)
746 {
747 ida_simple_remove(&port->out_hopids, hopid);
748 }
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765 struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
766 struct tb_port *prev)
767 {
768 struct tb_port *next;
769
770 if (!prev)
771 return start;
772
773 if (prev->sw == end->sw) {
774 if (prev == end)
775 return NULL;
776 return end;
777 }
778
779 if (start->sw->config.depth < end->sw->config.depth) {
780 if (prev->remote &&
781 prev->remote->sw->config.depth > prev->sw->config.depth)
782 next = prev->remote;
783 else
784 next = tb_port_at(tb_route(end->sw), prev->sw);
785 } else {
786 if (tb_is_upstream_port(prev)) {
787 next = prev->remote;
788 } else {
789 next = tb_upstream_port(prev->sw);
790
791
792
793
794 if (next->dual_link_port &&
795 next->link_nr != prev->link_nr) {
796 next = next->dual_link_port;
797 }
798 }
799 }
800
801 return next;
802 }
803
804
805
806
807
808 bool tb_port_is_enabled(struct tb_port *port)
809 {
810 switch (port->config.type) {
811 case TB_TYPE_PCIE_UP:
812 case TB_TYPE_PCIE_DOWN:
813 return tb_pci_port_is_enabled(port);
814
815 case TB_TYPE_DP_HDMI_IN:
816 case TB_TYPE_DP_HDMI_OUT:
817 return tb_dp_port_is_enabled(port);
818
819 default:
820 return false;
821 }
822 }
823
824
825
826
827
828 bool tb_pci_port_is_enabled(struct tb_port *port)
829 {
830 u32 data;
831
832 if (tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap, 1))
833 return false;
834
835 return !!(data & TB_PCI_EN);
836 }
837
838
839
840
841
842
843 int tb_pci_port_enable(struct tb_port *port, bool enable)
844 {
845 u32 word = enable ? TB_PCI_EN : 0x0;
846 if (!port->cap_adap)
847 return -ENXIO;
848 return tb_port_write(port, &word, TB_CFG_PORT, port->cap_adap, 1);
849 }
850
851
852
853
854
855
856
857 int tb_dp_port_hpd_is_active(struct tb_port *port)
858 {
859 u32 data;
860 int ret;
861
862 ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap + 2, 1);
863 if (ret)
864 return ret;
865
866 return !!(data & TB_DP_HDP);
867 }
868
869
870
871
872
873
874
875 int tb_dp_port_hpd_clear(struct tb_port *port)
876 {
877 u32 data;
878 int ret;
879
880 ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap + 3, 1);
881 if (ret)
882 return ret;
883
884 data |= TB_DP_HPDC;
885 return tb_port_write(port, &data, TB_CFG_PORT, port->cap_adap + 3, 1);
886 }
887
888
889
890
891
892
893
894
895
896
897 int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
898 unsigned int aux_tx, unsigned int aux_rx)
899 {
900 u32 data[2];
901 int ret;
902
903 ret = tb_port_read(port, data, TB_CFG_PORT, port->cap_adap,
904 ARRAY_SIZE(data));
905 if (ret)
906 return ret;
907
908 data[0] &= ~TB_DP_VIDEO_HOPID_MASK;
909 data[1] &= ~(TB_DP_AUX_RX_HOPID_MASK | TB_DP_AUX_TX_HOPID_MASK);
910
911 data[0] |= (video << TB_DP_VIDEO_HOPID_SHIFT) & TB_DP_VIDEO_HOPID_MASK;
912 data[1] |= aux_tx & TB_DP_AUX_TX_HOPID_MASK;
913 data[1] |= (aux_rx << TB_DP_AUX_RX_HOPID_SHIFT) & TB_DP_AUX_RX_HOPID_MASK;
914
915 return tb_port_write(port, data, TB_CFG_PORT, port->cap_adap,
916 ARRAY_SIZE(data));
917 }
918
919
920
921
922
923 bool tb_dp_port_is_enabled(struct tb_port *port)
924 {
925 u32 data[2];
926
927 if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap,
928 ARRAY_SIZE(data)))
929 return false;
930
931 return !!(data[0] & (TB_DP_VIDEO_EN | TB_DP_AUX_EN));
932 }
933
934
935
936
937
938
939
940
941
942 int tb_dp_port_enable(struct tb_port *port, bool enable)
943 {
944 u32 data[2];
945 int ret;
946
947 ret = tb_port_read(port, data, TB_CFG_PORT, port->cap_adap,
948 ARRAY_SIZE(data));
949 if (ret)
950 return ret;
951
952 if (enable)
953 data[0] |= TB_DP_VIDEO_EN | TB_DP_AUX_EN;
954 else
955 data[0] &= ~(TB_DP_VIDEO_EN | TB_DP_AUX_EN);
956
957 return tb_port_write(port, data, TB_CFG_PORT, port->cap_adap,
958 ARRAY_SIZE(data));
959 }
960
961
962
963 static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw)
964 {
965 tb_dbg(tb, " Switch: %x:%x (Revision: %d, TB Version: %d)\n",
966 sw->vendor_id, sw->device_id, sw->revision,
967 sw->thunderbolt_version);
968 tb_dbg(tb, " Max Port Number: %d\n", sw->max_port_number);
969 tb_dbg(tb, " Config:\n");
970 tb_dbg(tb,
971 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
972 sw->upstream_port_number, sw->depth,
973 (((u64) sw->route_hi) << 32) | sw->route_lo,
974 sw->enabled, sw->plug_events_delay);
975 tb_dbg(tb, " unknown1: %#x unknown4: %#x\n",
976 sw->__unknown1, sw->__unknown4);
977 }
978
979
980
981
982
983
984 int tb_switch_reset(struct tb *tb, u64 route)
985 {
986 struct tb_cfg_result res;
987 struct tb_regs_switch_header header = {
988 header.route_hi = route >> 32,
989 header.route_lo = route,
990 header.enabled = true,
991 };
992 tb_dbg(tb, "resetting switch at %llx\n", route);
993 res.err = tb_cfg_write(tb->ctl, ((u32 *) &header) + 2, route,
994 0, 2, 2, 2);
995 if (res.err)
996 return res.err;
997 res = tb_cfg_reset(tb->ctl, route, TB_CFG_DEFAULT_TIMEOUT);
998 if (res.err > 0)
999 return -EIO;
1000 return res.err;
1001 }
1002
1003
1004
1005
1006
1007
1008
1009
1010 static int tb_plug_events_active(struct tb_switch *sw, bool active)
1011 {
1012 u32 data;
1013 int res;
1014
1015 if (!sw->config.enabled)
1016 return 0;
1017
1018 sw->config.plug_events_delay = 0xff;
1019 res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
1020 if (res)
1021 return res;
1022
1023 res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
1024 if (res)
1025 return res;
1026
1027 if (active) {
1028 data = data & 0xFFFFFF83;
1029 switch (sw->config.device_id) {
1030 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1031 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
1032 case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
1033 break;
1034 default:
1035 data |= 4;
1036 }
1037 } else {
1038 data = data | 0x7c;
1039 }
1040 return tb_sw_write(sw, &data, TB_CFG_SWITCH,
1041 sw->cap_plug_events + 1, 1);
1042 }
1043
1044 static ssize_t authorized_show(struct device *dev,
1045 struct device_attribute *attr,
1046 char *buf)
1047 {
1048 struct tb_switch *sw = tb_to_switch(dev);
1049
1050 return sprintf(buf, "%u\n", sw->authorized);
1051 }
1052
1053 static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
1054 {
1055 int ret = -EINVAL;
1056
1057 if (!mutex_trylock(&sw->tb->lock))
1058 return restart_syscall();
1059
1060 if (sw->authorized)
1061 goto unlock;
1062
1063 switch (val) {
1064
1065 case 1:
1066 if (sw->key)
1067 ret = tb_domain_approve_switch_key(sw->tb, sw);
1068 else
1069 ret = tb_domain_approve_switch(sw->tb, sw);
1070 break;
1071
1072
1073 case 2:
1074 if (sw->key)
1075 ret = tb_domain_challenge_switch_key(sw->tb, sw);
1076 break;
1077
1078 default:
1079 break;
1080 }
1081
1082 if (!ret) {
1083 sw->authorized = val;
1084
1085 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
1086 }
1087
1088 unlock:
1089 mutex_unlock(&sw->tb->lock);
1090 return ret;
1091 }
1092
1093 static ssize_t authorized_store(struct device *dev,
1094 struct device_attribute *attr,
1095 const char *buf, size_t count)
1096 {
1097 struct tb_switch *sw = tb_to_switch(dev);
1098 unsigned int val;
1099 ssize_t ret;
1100
1101 ret = kstrtouint(buf, 0, &val);
1102 if (ret)
1103 return ret;
1104 if (val > 2)
1105 return -EINVAL;
1106
1107 pm_runtime_get_sync(&sw->dev);
1108 ret = tb_switch_set_authorized(sw, val);
1109 pm_runtime_mark_last_busy(&sw->dev);
1110 pm_runtime_put_autosuspend(&sw->dev);
1111
1112 return ret ? ret : count;
1113 }
1114 static DEVICE_ATTR_RW(authorized);
1115
1116 static ssize_t boot_show(struct device *dev, struct device_attribute *attr,
1117 char *buf)
1118 {
1119 struct tb_switch *sw = tb_to_switch(dev);
1120
1121 return sprintf(buf, "%u\n", sw->boot);
1122 }
1123 static DEVICE_ATTR_RO(boot);
1124
1125 static ssize_t device_show(struct device *dev, struct device_attribute *attr,
1126 char *buf)
1127 {
1128 struct tb_switch *sw = tb_to_switch(dev);
1129
1130 return sprintf(buf, "%#x\n", sw->device);
1131 }
1132 static DEVICE_ATTR_RO(device);
1133
1134 static ssize_t
1135 device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1136 {
1137 struct tb_switch *sw = tb_to_switch(dev);
1138
1139 return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : "");
1140 }
1141 static DEVICE_ATTR_RO(device_name);
1142
1143 static ssize_t key_show(struct device *dev, struct device_attribute *attr,
1144 char *buf)
1145 {
1146 struct tb_switch *sw = tb_to_switch(dev);
1147 ssize_t ret;
1148
1149 if (!mutex_trylock(&sw->tb->lock))
1150 return restart_syscall();
1151
1152 if (sw->key)
1153 ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
1154 else
1155 ret = sprintf(buf, "\n");
1156
1157 mutex_unlock(&sw->tb->lock);
1158 return ret;
1159 }
1160
1161 static ssize_t key_store(struct device *dev, struct device_attribute *attr,
1162 const char *buf, size_t count)
1163 {
1164 struct tb_switch *sw = tb_to_switch(dev);
1165 u8 key[TB_SWITCH_KEY_SIZE];
1166 ssize_t ret = count;
1167 bool clear = false;
1168
1169 if (!strcmp(buf, "\n"))
1170 clear = true;
1171 else if (hex2bin(key, buf, sizeof(key)))
1172 return -EINVAL;
1173
1174 if (!mutex_trylock(&sw->tb->lock))
1175 return restart_syscall();
1176
1177 if (sw->authorized) {
1178 ret = -EBUSY;
1179 } else {
1180 kfree(sw->key);
1181 if (clear) {
1182 sw->key = NULL;
1183 } else {
1184 sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
1185 if (!sw->key)
1186 ret = -ENOMEM;
1187 }
1188 }
1189
1190 mutex_unlock(&sw->tb->lock);
1191 return ret;
1192 }
1193 static DEVICE_ATTR(key, 0600, key_show, key_store);
1194
1195 static void nvm_authenticate_start(struct tb_switch *sw)
1196 {
1197 struct pci_dev *root_port;
1198
1199
1200
1201
1202
1203
1204
1205 root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
1206 if (root_port)
1207 pm_runtime_get_noresume(&root_port->dev);
1208 }
1209
1210 static void nvm_authenticate_complete(struct tb_switch *sw)
1211 {
1212 struct pci_dev *root_port;
1213
1214 root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
1215 if (root_port)
1216 pm_runtime_put(&root_port->dev);
1217 }
1218
1219 static ssize_t nvm_authenticate_show(struct device *dev,
1220 struct device_attribute *attr, char *buf)
1221 {
1222 struct tb_switch *sw = tb_to_switch(dev);
1223 u32 status;
1224
1225 nvm_get_auth_status(sw, &status);
1226 return sprintf(buf, "%#x\n", status);
1227 }
1228
1229 static ssize_t nvm_authenticate_store(struct device *dev,
1230 struct device_attribute *attr, const char *buf, size_t count)
1231 {
1232 struct tb_switch *sw = tb_to_switch(dev);
1233 bool val;
1234 int ret;
1235
1236 pm_runtime_get_sync(&sw->dev);
1237
1238 if (!mutex_trylock(&sw->tb->lock)) {
1239 ret = restart_syscall();
1240 goto exit_rpm;
1241 }
1242
1243
1244 if (!sw->nvm) {
1245 ret = -EAGAIN;
1246 goto exit_unlock;
1247 }
1248
1249 ret = kstrtobool(buf, &val);
1250 if (ret)
1251 goto exit_unlock;
1252
1253
1254 nvm_clear_auth_status(sw);
1255
1256 if (val) {
1257 if (!sw->nvm->buf) {
1258 ret = -EINVAL;
1259 goto exit_unlock;
1260 }
1261
1262 ret = nvm_validate_and_write(sw);
1263 if (ret)
1264 goto exit_unlock;
1265
1266 sw->nvm->authenticating = true;
1267
1268 if (!tb_route(sw)) {
1269
1270
1271
1272
1273 nvm_authenticate_start(sw);
1274 ret = nvm_authenticate_host(sw);
1275 } else {
1276 ret = nvm_authenticate_device(sw);
1277 }
1278 }
1279
1280 exit_unlock:
1281 mutex_unlock(&sw->tb->lock);
1282 exit_rpm:
1283 pm_runtime_mark_last_busy(&sw->dev);
1284 pm_runtime_put_autosuspend(&sw->dev);
1285
1286 if (ret)
1287 return ret;
1288 return count;
1289 }
1290 static DEVICE_ATTR_RW(nvm_authenticate);
1291
1292 static ssize_t nvm_version_show(struct device *dev,
1293 struct device_attribute *attr, char *buf)
1294 {
1295 struct tb_switch *sw = tb_to_switch(dev);
1296 int ret;
1297
1298 if (!mutex_trylock(&sw->tb->lock))
1299 return restart_syscall();
1300
1301 if (sw->safe_mode)
1302 ret = -ENODATA;
1303 else if (!sw->nvm)
1304 ret = -EAGAIN;
1305 else
1306 ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
1307
1308 mutex_unlock(&sw->tb->lock);
1309
1310 return ret;
1311 }
1312 static DEVICE_ATTR_RO(nvm_version);
1313
1314 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
1315 char *buf)
1316 {
1317 struct tb_switch *sw = tb_to_switch(dev);
1318
1319 return sprintf(buf, "%#x\n", sw->vendor);
1320 }
1321 static DEVICE_ATTR_RO(vendor);
1322
1323 static ssize_t
1324 vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1325 {
1326 struct tb_switch *sw = tb_to_switch(dev);
1327
1328 return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : "");
1329 }
1330 static DEVICE_ATTR_RO(vendor_name);
1331
1332 static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
1333 char *buf)
1334 {
1335 struct tb_switch *sw = tb_to_switch(dev);
1336
1337 return sprintf(buf, "%pUb\n", sw->uuid);
1338 }
1339 static DEVICE_ATTR_RO(unique_id);
1340
1341 static struct attribute *switch_attrs[] = {
1342 &dev_attr_authorized.attr,
1343 &dev_attr_boot.attr,
1344 &dev_attr_device.attr,
1345 &dev_attr_device_name.attr,
1346 &dev_attr_key.attr,
1347 &dev_attr_nvm_authenticate.attr,
1348 &dev_attr_nvm_version.attr,
1349 &dev_attr_vendor.attr,
1350 &dev_attr_vendor_name.attr,
1351 &dev_attr_unique_id.attr,
1352 NULL,
1353 };
1354
1355 static umode_t switch_attr_is_visible(struct kobject *kobj,
1356 struct attribute *attr, int n)
1357 {
1358 struct device *dev = container_of(kobj, struct device, kobj);
1359 struct tb_switch *sw = tb_to_switch(dev);
1360
1361 if (attr == &dev_attr_device.attr) {
1362 if (!sw->device)
1363 return 0;
1364 } else if (attr == &dev_attr_device_name.attr) {
1365 if (!sw->device_name)
1366 return 0;
1367 } else if (attr == &dev_attr_vendor.attr) {
1368 if (!sw->vendor)
1369 return 0;
1370 } else if (attr == &dev_attr_vendor_name.attr) {
1371 if (!sw->vendor_name)
1372 return 0;
1373 } else if (attr == &dev_attr_key.attr) {
1374 if (tb_route(sw) &&
1375 sw->tb->security_level == TB_SECURITY_SECURE &&
1376 sw->security_level == TB_SECURITY_SECURE)
1377 return attr->mode;
1378 return 0;
1379 } else if (attr == &dev_attr_nvm_authenticate.attr) {
1380 if (sw->dma_port && !sw->no_nvm_upgrade)
1381 return attr->mode;
1382 return 0;
1383 } else if (attr == &dev_attr_nvm_version.attr) {
1384 if (sw->dma_port)
1385 return attr->mode;
1386 return 0;
1387 } else if (attr == &dev_attr_boot.attr) {
1388 if (tb_route(sw))
1389 return attr->mode;
1390 return 0;
1391 }
1392
1393 return sw->safe_mode ? 0 : attr->mode;
1394 }
1395
1396 static struct attribute_group switch_group = {
1397 .is_visible = switch_attr_is_visible,
1398 .attrs = switch_attrs,
1399 };
1400
1401 static const struct attribute_group *switch_groups[] = {
1402 &switch_group,
1403 NULL,
1404 };
1405
1406 static void tb_switch_release(struct device *dev)
1407 {
1408 struct tb_switch *sw = tb_to_switch(dev);
1409 int i;
1410
1411 dma_port_free(sw->dma_port);
1412
1413 for (i = 1; i <= sw->config.max_port_number; i++) {
1414 if (!sw->ports[i].disabled) {
1415 ida_destroy(&sw->ports[i].in_hopids);
1416 ida_destroy(&sw->ports[i].out_hopids);
1417 }
1418 }
1419
1420 kfree(sw->uuid);
1421 kfree(sw->device_name);
1422 kfree(sw->vendor_name);
1423 kfree(sw->ports);
1424 kfree(sw->drom);
1425 kfree(sw->key);
1426 kfree(sw);
1427 }
1428
1429
1430
1431
1432
1433 static int __maybe_unused tb_switch_runtime_suspend(struct device *dev)
1434 {
1435 struct tb_switch *sw = tb_to_switch(dev);
1436 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
1437
1438 if (cm_ops->runtime_suspend_switch)
1439 return cm_ops->runtime_suspend_switch(sw);
1440
1441 return 0;
1442 }
1443
1444 static int __maybe_unused tb_switch_runtime_resume(struct device *dev)
1445 {
1446 struct tb_switch *sw = tb_to_switch(dev);
1447 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
1448
1449 if (cm_ops->runtime_resume_switch)
1450 return cm_ops->runtime_resume_switch(sw);
1451 return 0;
1452 }
1453
1454 static const struct dev_pm_ops tb_switch_pm_ops = {
1455 SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume,
1456 NULL)
1457 };
1458
1459 struct device_type tb_switch_type = {
1460 .name = "thunderbolt_device",
1461 .release = tb_switch_release,
1462 .pm = &tb_switch_pm_ops,
1463 };
1464
1465 static int tb_switch_get_generation(struct tb_switch *sw)
1466 {
1467 switch (sw->config.device_id) {
1468 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1469 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
1470 case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
1471 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
1472 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
1473 case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
1474 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
1475 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
1476 return 1;
1477
1478 case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
1479 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
1480 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
1481 return 2;
1482
1483 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
1484 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
1485 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
1486 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
1487 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
1488 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
1489 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
1490 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
1491 case PCI_DEVICE_ID_INTEL_ICL_NHI0:
1492 case PCI_DEVICE_ID_INTEL_ICL_NHI1:
1493 return 3;
1494
1495 default:
1496
1497
1498
1499
1500 tb_sw_warn(sw, "unsupported switch device id %#x\n",
1501 sw->config.device_id);
1502 return 1;
1503 }
1504 }
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520 struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
1521 u64 route)
1522 {
1523 struct tb_switch *sw;
1524 int upstream_port;
1525 int i, ret, depth;
1526
1527
1528 depth = tb_route_length(route);
1529 if (depth > TB_SWITCH_MAX_DEPTH)
1530 return ERR_PTR(-EADDRNOTAVAIL);
1531
1532 upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
1533 if (upstream_port < 0)
1534 return ERR_PTR(upstream_port);
1535
1536 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
1537 if (!sw)
1538 return ERR_PTR(-ENOMEM);
1539
1540 sw->tb = tb;
1541 ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5);
1542 if (ret)
1543 goto err_free_sw_ports;
1544
1545 tb_dbg(tb, "current switch config:\n");
1546 tb_dump_switch(tb, &sw->config);
1547
1548
1549 sw->config.upstream_port_number = upstream_port;
1550 sw->config.depth = depth;
1551 sw->config.route_hi = upper_32_bits(route);
1552 sw->config.route_lo = lower_32_bits(route);
1553 sw->config.enabled = 0;
1554
1555
1556 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
1557 GFP_KERNEL);
1558 if (!sw->ports) {
1559 ret = -ENOMEM;
1560 goto err_free_sw_ports;
1561 }
1562
1563 for (i = 0; i <= sw->config.max_port_number; i++) {
1564
1565 sw->ports[i].sw = sw;
1566 sw->ports[i].port = i;
1567 }
1568
1569 sw->generation = tb_switch_get_generation(sw);
1570
1571 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
1572 if (ret < 0) {
1573 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
1574 goto err_free_sw_ports;
1575 }
1576 sw->cap_plug_events = ret;
1577
1578 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
1579 if (ret > 0)
1580 sw->cap_lc = ret;
1581
1582
1583 if (!route)
1584 sw->authorized = true;
1585
1586 device_initialize(&sw->dev);
1587 sw->dev.parent = parent;
1588 sw->dev.bus = &tb_bus_type;
1589 sw->dev.type = &tb_switch_type;
1590 sw->dev.groups = switch_groups;
1591 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
1592
1593 return sw;
1594
1595 err_free_sw_ports:
1596 kfree(sw->ports);
1597 kfree(sw);
1598
1599 return ERR_PTR(ret);
1600 }
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616 struct tb_switch *
1617 tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
1618 {
1619 struct tb_switch *sw;
1620
1621 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
1622 if (!sw)
1623 return ERR_PTR(-ENOMEM);
1624
1625 sw->tb = tb;
1626 sw->config.depth = tb_route_length(route);
1627 sw->config.route_hi = upper_32_bits(route);
1628 sw->config.route_lo = lower_32_bits(route);
1629 sw->safe_mode = true;
1630
1631 device_initialize(&sw->dev);
1632 sw->dev.parent = parent;
1633 sw->dev.bus = &tb_bus_type;
1634 sw->dev.type = &tb_switch_type;
1635 sw->dev.groups = switch_groups;
1636 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
1637
1638 return sw;
1639 }
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651 int tb_switch_configure(struct tb_switch *sw)
1652 {
1653 struct tb *tb = sw->tb;
1654 u64 route;
1655 int ret;
1656
1657 route = tb_route(sw);
1658 tb_dbg(tb, "initializing Switch at %#llx (depth: %d, up port: %d)\n",
1659 route, tb_route_length(route), sw->config.upstream_port_number);
1660
1661 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
1662 tb_sw_warn(sw, "unknown switch vendor id %#x\n",
1663 sw->config.vendor_id);
1664
1665 sw->config.enabled = 1;
1666
1667
1668 ret = tb_sw_write(sw, 1 + (u32 *)&sw->config, TB_CFG_SWITCH, 1, 3);
1669 if (ret)
1670 return ret;
1671
1672 ret = tb_lc_configure_link(sw);
1673 if (ret)
1674 return ret;
1675
1676 return tb_plug_events_active(sw, true);
1677 }
1678
1679 static int tb_switch_set_uuid(struct tb_switch *sw)
1680 {
1681 u32 uuid[4];
1682 int ret;
1683
1684 if (sw->uuid)
1685 return 0;
1686
1687
1688
1689
1690
1691 ret = tb_lc_read_uuid(sw, uuid);
1692 if (ret) {
1693
1694
1695
1696
1697
1698
1699 uuid[0] = sw->uid & 0xffffffff;
1700 uuid[1] = (sw->uid >> 32) & 0xffffffff;
1701 uuid[2] = 0xffffffff;
1702 uuid[3] = 0xffffffff;
1703 }
1704
1705 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
1706 if (!sw->uuid)
1707 return -ENOMEM;
1708 return 0;
1709 }
1710
1711 static int tb_switch_add_dma_port(struct tb_switch *sw)
1712 {
1713 u32 status;
1714 int ret;
1715
1716 switch (sw->generation) {
1717 case 2:
1718
1719 if (tb_route(sw))
1720 return 0;
1721
1722
1723 case 3:
1724 ret = tb_switch_set_uuid(sw);
1725 if (ret)
1726 return ret;
1727 break;
1728
1729 default:
1730
1731
1732
1733
1734 if (!sw->safe_mode)
1735 return 0;
1736 break;
1737 }
1738
1739
1740 if (!tb_route(sw) && sw->config.enabled)
1741 return 0;
1742
1743 sw->dma_port = dma_port_alloc(sw);
1744 if (!sw->dma_port)
1745 return 0;
1746
1747 if (sw->no_nvm_upgrade)
1748 return 0;
1749
1750
1751
1752
1753
1754
1755
1756 nvm_get_auth_status(sw, &status);
1757 if (status) {
1758 if (!tb_route(sw))
1759 nvm_authenticate_complete(sw);
1760 return 0;
1761 }
1762
1763
1764
1765
1766
1767
1768 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
1769 if (ret <= 0)
1770 return ret;
1771
1772
1773 if (!tb_route(sw))
1774 nvm_authenticate_complete(sw);
1775
1776 if (status) {
1777 tb_sw_info(sw, "switch flash authentication failed\n");
1778 nvm_set_auth_status(sw, status);
1779 }
1780
1781 tb_sw_info(sw, "power cycling the switch now\n");
1782 dma_port_power_cycle(sw->dma_port);
1783
1784
1785
1786
1787
1788 return -ESHUTDOWN;
1789 }
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803 int tb_switch_add(struct tb_switch *sw)
1804 {
1805 int i, ret;
1806
1807
1808
1809
1810
1811
1812
1813
1814 ret = tb_switch_add_dma_port(sw);
1815 if (ret)
1816 return ret;
1817
1818 if (!sw->safe_mode) {
1819
1820 ret = tb_drom_read(sw);
1821 if (ret) {
1822 tb_sw_warn(sw, "tb_eeprom_read_rom failed\n");
1823 return ret;
1824 }
1825 tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
1826
1827 ret = tb_switch_set_uuid(sw);
1828 if (ret)
1829 return ret;
1830
1831 for (i = 0; i <= sw->config.max_port_number; i++) {
1832 if (sw->ports[i].disabled) {
1833 tb_port_dbg(&sw->ports[i], "disabled by eeprom\n");
1834 continue;
1835 }
1836 ret = tb_init_port(&sw->ports[i]);
1837 if (ret)
1838 return ret;
1839 }
1840 }
1841
1842 ret = device_add(&sw->dev);
1843 if (ret)
1844 return ret;
1845
1846 if (tb_route(sw)) {
1847 dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
1848 sw->vendor, sw->device);
1849 if (sw->vendor_name && sw->device_name)
1850 dev_info(&sw->dev, "%s %s\n", sw->vendor_name,
1851 sw->device_name);
1852 }
1853
1854 ret = tb_switch_nvm_add(sw);
1855 if (ret) {
1856 device_del(&sw->dev);
1857 return ret;
1858 }
1859
1860 pm_runtime_set_active(&sw->dev);
1861 if (sw->rpm) {
1862 pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
1863 pm_runtime_use_autosuspend(&sw->dev);
1864 pm_runtime_mark_last_busy(&sw->dev);
1865 pm_runtime_enable(&sw->dev);
1866 pm_request_autosuspend(&sw->dev);
1867 }
1868
1869 return 0;
1870 }
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880 void tb_switch_remove(struct tb_switch *sw)
1881 {
1882 int i;
1883
1884 if (sw->rpm) {
1885 pm_runtime_get_sync(&sw->dev);
1886 pm_runtime_disable(&sw->dev);
1887 }
1888
1889
1890 for (i = 1; i <= sw->config.max_port_number; i++) {
1891 if (tb_port_has_remote(&sw->ports[i])) {
1892 tb_switch_remove(sw->ports[i].remote->sw);
1893 sw->ports[i].remote = NULL;
1894 } else if (sw->ports[i].xdomain) {
1895 tb_xdomain_remove(sw->ports[i].xdomain);
1896 sw->ports[i].xdomain = NULL;
1897 }
1898 }
1899
1900 if (!sw->is_unplugged)
1901 tb_plug_events_active(sw, false);
1902 tb_lc_unconfigure_link(sw);
1903
1904 tb_switch_nvm_remove(sw);
1905
1906 if (tb_route(sw))
1907 dev_info(&sw->dev, "device disconnected\n");
1908 device_unregister(&sw->dev);
1909 }
1910
1911
1912
1913
1914 void tb_sw_set_unplugged(struct tb_switch *sw)
1915 {
1916 int i;
1917 if (sw == sw->tb->root_switch) {
1918 tb_sw_WARN(sw, "cannot unplug root switch\n");
1919 return;
1920 }
1921 if (sw->is_unplugged) {
1922 tb_sw_WARN(sw, "is_unplugged already set\n");
1923 return;
1924 }
1925 sw->is_unplugged = true;
1926 for (i = 0; i <= sw->config.max_port_number; i++) {
1927 if (tb_port_has_remote(&sw->ports[i]))
1928 tb_sw_set_unplugged(sw->ports[i].remote->sw);
1929 else if (sw->ports[i].xdomain)
1930 sw->ports[i].xdomain->is_unplugged = true;
1931 }
1932 }
1933
1934 int tb_switch_resume(struct tb_switch *sw)
1935 {
1936 int i, err;
1937 tb_sw_dbg(sw, "resuming switch\n");
1938
1939
1940
1941
1942
1943 if (tb_route(sw)) {
1944 u64 uid;
1945
1946
1947
1948
1949
1950
1951 err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw));
1952 if (err < 0) {
1953 tb_sw_info(sw, "switch not present anymore\n");
1954 return err;
1955 }
1956
1957 err = tb_drom_read_uid_only(sw, &uid);
1958 if (err) {
1959 tb_sw_warn(sw, "uid read failed\n");
1960 return err;
1961 }
1962 if (sw->uid != uid) {
1963 tb_sw_info(sw,
1964 "changed while suspended (uid %#llx -> %#llx)\n",
1965 sw->uid, uid);
1966 return -ENODEV;
1967 }
1968 }
1969
1970
1971 err = tb_sw_write(sw, 1 + (u32 *) &sw->config, TB_CFG_SWITCH, 1, 3);
1972 if (err)
1973 return err;
1974
1975 err = tb_lc_configure_link(sw);
1976 if (err)
1977 return err;
1978
1979 err = tb_plug_events_active(sw, true);
1980 if (err)
1981 return err;
1982
1983
1984 for (i = 1; i <= sw->config.max_port_number; i++) {
1985 struct tb_port *port = &sw->ports[i];
1986
1987 if (!tb_port_has_remote(port) && !port->xdomain)
1988 continue;
1989
1990 if (tb_wait_for_port(port, true) <= 0) {
1991 tb_port_warn(port,
1992 "lost during suspend, disconnecting\n");
1993 if (tb_port_has_remote(port))
1994 tb_sw_set_unplugged(port->remote->sw);
1995 else if (port->xdomain)
1996 port->xdomain->is_unplugged = true;
1997 } else if (tb_port_has_remote(port)) {
1998 if (tb_switch_resume(port->remote->sw)) {
1999 tb_port_warn(port,
2000 "lost during suspend, disconnecting\n");
2001 tb_sw_set_unplugged(port->remote->sw);
2002 }
2003 }
2004 }
2005 return 0;
2006 }
2007
2008 void tb_switch_suspend(struct tb_switch *sw)
2009 {
2010 int i, err;
2011 err = tb_plug_events_active(sw, false);
2012 if (err)
2013 return;
2014
2015 for (i = 1; i <= sw->config.max_port_number; i++) {
2016 if (tb_port_has_remote(&sw->ports[i]))
2017 tb_switch_suspend(sw->ports[i].remote->sw);
2018 }
2019
2020 tb_lc_set_sleep(sw);
2021 }
2022
2023 struct tb_sw_lookup {
2024 struct tb *tb;
2025 u8 link;
2026 u8 depth;
2027 const uuid_t *uuid;
2028 u64 route;
2029 };
2030
2031 static int tb_switch_match(struct device *dev, const void *data)
2032 {
2033 struct tb_switch *sw = tb_to_switch(dev);
2034 const struct tb_sw_lookup *lookup = data;
2035
2036 if (!sw)
2037 return 0;
2038 if (sw->tb != lookup->tb)
2039 return 0;
2040
2041 if (lookup->uuid)
2042 return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
2043
2044 if (lookup->route) {
2045 return sw->config.route_lo == lower_32_bits(lookup->route) &&
2046 sw->config.route_hi == upper_32_bits(lookup->route);
2047 }
2048
2049
2050 if (!lookup->depth)
2051 return !sw->depth;
2052
2053 return sw->link == lookup->link && sw->depth == lookup->depth;
2054 }
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065 struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
2066 {
2067 struct tb_sw_lookup lookup;
2068 struct device *dev;
2069
2070 memset(&lookup, 0, sizeof(lookup));
2071 lookup.tb = tb;
2072 lookup.link = link;
2073 lookup.depth = depth;
2074
2075 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
2076 if (dev)
2077 return tb_to_switch(dev);
2078
2079 return NULL;
2080 }
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090 struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
2091 {
2092 struct tb_sw_lookup lookup;
2093 struct device *dev;
2094
2095 memset(&lookup, 0, sizeof(lookup));
2096 lookup.tb = tb;
2097 lookup.uuid = uuid;
2098
2099 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
2100 if (dev)
2101 return tb_to_switch(dev);
2102
2103 return NULL;
2104 }
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114 struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
2115 {
2116 struct tb_sw_lookup lookup;
2117 struct device *dev;
2118
2119 if (!route)
2120 return tb_switch_get(tb->root_switch);
2121
2122 memset(&lookup, 0, sizeof(lookup));
2123 lookup.tb = tb;
2124 lookup.route = route;
2125
2126 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
2127 if (dev)
2128 return tb_to_switch(dev);
2129
2130 return NULL;
2131 }
2132
2133 void tb_switch_exit(void)
2134 {
2135 ida_destroy(&nvm_ida);
2136 }