root/drivers/thunderbolt/tb.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. tb_queue_hotplug
  2. tb_discover_tunnels
  3. tb_scan_xdomain
  4. tb_scan_switch
  5. tb_scan_port
  6. tb_free_tunnel
  7. tb_free_invalid_tunnels
  8. tb_free_unplugged_children
  9. tb_find_port
  10. tb_find_unused_port
  11. tb_find_pcie_down
  12. tb_tunnel_dp
  13. tb_teardown_dp
  14. tb_tunnel_pci
  15. tb_approve_xdomain_paths
  16. __tb_disconnect_xdomain_paths
  17. tb_disconnect_xdomain_paths
  18. tb_handle_hotplug
  19. tb_handle_event
  20. tb_stop
  21. tb_scan_finalize_switch
  22. tb_start
  23. tb_suspend_noirq
  24. tb_resume_noirq
  25. tb_free_unplugged_xdomains
  26. tb_complete
  27. tb_probe

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Thunderbolt driver - bus logic (NHI independent)
   4  *
   5  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
   6  * Copyright (C) 2019, Intel Corporation
   7  */
   8 
   9 #include <linux/slab.h>
  10 #include <linux/errno.h>
  11 #include <linux/delay.h>
  12 #include <linux/platform_data/x86/apple.h>
  13 
  14 #include "tb.h"
  15 #include "tb_regs.h"
  16 #include "tunnel.h"
  17 
  18 /**
  19  * struct tb_cm - Simple Thunderbolt connection manager
  20  * @tunnel_list: List of active tunnels
  21  * @hotplug_active: tb_handle_hotplug will stop progressing plug
  22  *                  events and exit if this is not set (it needs to
  23  *                  acquire the lock one more time). Used to drain wq
  24  *                  after cfg has been paused.
  25  */
  26 struct tb_cm {
  27         struct list_head tunnel_list;
  28         bool hotplug_active;
  29 };
  30 
  31 struct tb_hotplug_event {
  32         struct work_struct work;
  33         struct tb *tb;
  34         u64 route;
  35         u8 port;
  36         bool unplug;
  37 };
  38 
  39 static void tb_handle_hotplug(struct work_struct *work);
  40 
  41 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
  42 {
  43         struct tb_hotplug_event *ev;
  44 
  45         ev = kmalloc(sizeof(*ev), GFP_KERNEL);
  46         if (!ev)
  47                 return;
  48 
  49         ev->tb = tb;
  50         ev->route = route;
  51         ev->port = port;
  52         ev->unplug = unplug;
  53         INIT_WORK(&ev->work, tb_handle_hotplug);
  54         queue_work(tb->wq, &ev->work);
  55 }
  56 
  57 /* enumeration & hot plug handling */
  58 
  59 static void tb_discover_tunnels(struct tb_switch *sw)
  60 {
  61         struct tb *tb = sw->tb;
  62         struct tb_cm *tcm = tb_priv(tb);
  63         struct tb_port *port;
  64         int i;
  65 
  66         for (i = 1; i <= sw->config.max_port_number; i++) {
  67                 struct tb_tunnel *tunnel = NULL;
  68 
  69                 port = &sw->ports[i];
  70                 switch (port->config.type) {
  71                 case TB_TYPE_DP_HDMI_IN:
  72                         tunnel = tb_tunnel_discover_dp(tb, port);
  73                         break;
  74 
  75                 case TB_TYPE_PCIE_DOWN:
  76                         tunnel = tb_tunnel_discover_pci(tb, port);
  77                         break;
  78 
  79                 default:
  80                         break;
  81                 }
  82 
  83                 if (!tunnel)
  84                         continue;
  85 
  86                 if (tb_tunnel_is_pci(tunnel)) {
  87                         struct tb_switch *parent = tunnel->dst_port->sw;
  88 
  89                         while (parent != tunnel->src_port->sw) {
  90                                 parent->boot = true;
  91                                 parent = tb_switch_parent(parent);
  92                         }
  93                 }
  94 
  95                 list_add_tail(&tunnel->list, &tcm->tunnel_list);
  96         }
  97 
  98         for (i = 1; i <= sw->config.max_port_number; i++) {
  99                 if (tb_port_has_remote(&sw->ports[i]))
 100                         tb_discover_tunnels(sw->ports[i].remote->sw);
 101         }
 102 }
 103 
 104 static void tb_scan_xdomain(struct tb_port *port)
 105 {
 106         struct tb_switch *sw = port->sw;
 107         struct tb *tb = sw->tb;
 108         struct tb_xdomain *xd;
 109         u64 route;
 110 
 111         route = tb_downstream_route(port);
 112         xd = tb_xdomain_find_by_route(tb, route);
 113         if (xd) {
 114                 tb_xdomain_put(xd);
 115                 return;
 116         }
 117 
 118         xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
 119                               NULL);
 120         if (xd) {
 121                 tb_port_at(route, sw)->xdomain = xd;
 122                 tb_xdomain_add(xd);
 123         }
 124 }
 125 
 126 static void tb_scan_port(struct tb_port *port);
 127 
 128 /**
 129  * tb_scan_switch() - scan for and initialize downstream switches
 130  */
 131 static void tb_scan_switch(struct tb_switch *sw)
 132 {
 133         int i;
 134         for (i = 1; i <= sw->config.max_port_number; i++)
 135                 tb_scan_port(&sw->ports[i]);
 136 }
 137 
 138 /**
 139  * tb_scan_port() - check for and initialize switches below port
 140  */
 141 static void tb_scan_port(struct tb_port *port)
 142 {
 143         struct tb_cm *tcm = tb_priv(port->sw->tb);
 144         struct tb_port *upstream_port;
 145         struct tb_switch *sw;
 146 
 147         if (tb_is_upstream_port(port))
 148                 return;
 149 
 150         if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
 151             !tb_dp_port_is_enabled(port)) {
 152                 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
 153                 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
 154                                  false);
 155                 return;
 156         }
 157 
 158         if (port->config.type != TB_TYPE_PORT)
 159                 return;
 160         if (port->dual_link_port && port->link_nr)
 161                 return; /*
 162                          * Downstream switch is reachable through two ports.
 163                          * Only scan on the primary port (link_nr == 0).
 164                          */
 165         if (tb_wait_for_port(port, false) <= 0)
 166                 return;
 167         if (port->remote) {
 168                 tb_port_dbg(port, "port already has a remote\n");
 169                 return;
 170         }
 171         sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
 172                              tb_downstream_route(port));
 173         if (IS_ERR(sw)) {
 174                 /*
 175                  * If there is an error accessing the connected switch
 176                  * it may be connected to another domain. Also we allow
 177                  * the other domain to be connected to a max depth switch.
 178                  */
 179                 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
 180                         tb_scan_xdomain(port);
 181                 return;
 182         }
 183 
 184         if (tb_switch_configure(sw)) {
 185                 tb_switch_put(sw);
 186                 return;
 187         }
 188 
 189         /*
 190          * If there was previously another domain connected remove it
 191          * first.
 192          */
 193         if (port->xdomain) {
 194                 tb_xdomain_remove(port->xdomain);
 195                 port->xdomain = NULL;
 196         }
 197 
 198         /*
 199          * Do not send uevents until we have discovered all existing
 200          * tunnels and know which switches were authorized already by
 201          * the boot firmware.
 202          */
 203         if (!tcm->hotplug_active)
 204                 dev_set_uevent_suppress(&sw->dev, true);
 205 
 206         if (tb_switch_add(sw)) {
 207                 tb_switch_put(sw);
 208                 return;
 209         }
 210 
 211         /* Link the switches using both links if available */
 212         upstream_port = tb_upstream_port(sw);
 213         port->remote = upstream_port;
 214         upstream_port->remote = port;
 215         if (port->dual_link_port && upstream_port->dual_link_port) {
 216                 port->dual_link_port->remote = upstream_port->dual_link_port;
 217                 upstream_port->dual_link_port->remote = port->dual_link_port;
 218         }
 219 
 220         tb_scan_switch(sw);
 221 }
 222 
 223 static int tb_free_tunnel(struct tb *tb, enum tb_tunnel_type type,
 224                           struct tb_port *src_port, struct tb_port *dst_port)
 225 {
 226         struct tb_cm *tcm = tb_priv(tb);
 227         struct tb_tunnel *tunnel;
 228 
 229         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
 230                 if (tunnel->type == type &&
 231                     ((src_port && src_port == tunnel->src_port) ||
 232                      (dst_port && dst_port == tunnel->dst_port))) {
 233                         tb_tunnel_deactivate(tunnel);
 234                         list_del(&tunnel->list);
 235                         tb_tunnel_free(tunnel);
 236                         return 0;
 237                 }
 238         }
 239 
 240         return -ENODEV;
 241 }
 242 
 243 /**
 244  * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
 245  */
 246 static void tb_free_invalid_tunnels(struct tb *tb)
 247 {
 248         struct tb_cm *tcm = tb_priv(tb);
 249         struct tb_tunnel *tunnel;
 250         struct tb_tunnel *n;
 251 
 252         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
 253                 if (tb_tunnel_is_invalid(tunnel)) {
 254                         tb_tunnel_deactivate(tunnel);
 255                         list_del(&tunnel->list);
 256                         tb_tunnel_free(tunnel);
 257                 }
 258         }
 259 }
 260 
 261 /**
 262  * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
 263  */
 264 static void tb_free_unplugged_children(struct tb_switch *sw)
 265 {
 266         int i;
 267         for (i = 1; i <= sw->config.max_port_number; i++) {
 268                 struct tb_port *port = &sw->ports[i];
 269 
 270                 if (!tb_port_has_remote(port))
 271                         continue;
 272 
 273                 if (port->remote->sw->is_unplugged) {
 274                         tb_switch_remove(port->remote->sw);
 275                         port->remote = NULL;
 276                         if (port->dual_link_port)
 277                                 port->dual_link_port->remote = NULL;
 278                 } else {
 279                         tb_free_unplugged_children(port->remote->sw);
 280                 }
 281         }
 282 }
 283 
 284 /**
 285  * tb_find_port() - return the first port of @type on @sw or NULL
 286  * @sw: Switch to find the port from
 287  * @type: Port type to look for
 288  */
 289 static struct tb_port *tb_find_port(struct tb_switch *sw,
 290                                     enum tb_port_type type)
 291 {
 292         int i;
 293         for (i = 1; i <= sw->config.max_port_number; i++)
 294                 if (sw->ports[i].config.type == type)
 295                         return &sw->ports[i];
 296         return NULL;
 297 }
 298 
 299 /**
 300  * tb_find_unused_port() - return the first inactive port on @sw
 301  * @sw: Switch to find the port on
 302  * @type: Port type to look for
 303  */
 304 static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
 305                                            enum tb_port_type type)
 306 {
 307         int i;
 308 
 309         for (i = 1; i <= sw->config.max_port_number; i++) {
 310                 if (tb_is_upstream_port(&sw->ports[i]))
 311                         continue;
 312                 if (sw->ports[i].config.type != type)
 313                         continue;
 314                 if (!sw->ports[i].cap_adap)
 315                         continue;
 316                 if (tb_port_is_enabled(&sw->ports[i]))
 317                         continue;
 318                 return &sw->ports[i];
 319         }
 320         return NULL;
 321 }
 322 
 323 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
 324                                          const struct tb_port *port)
 325 {
 326         /*
 327          * To keep plugging devices consistently in the same PCIe
 328          * hierarchy, do mapping here for root switch downstream PCIe
 329          * ports.
 330          */
 331         if (!tb_route(sw)) {
 332                 int phy_port = tb_phy_port_from_link(port->port);
 333                 int index;
 334 
 335                 /*
 336                  * Hard-coded Thunderbolt port to PCIe down port mapping
 337                  * per controller.
 338                  */
 339                 if (tb_switch_is_cr(sw))
 340                         index = !phy_port ? 6 : 7;
 341                 else if (tb_switch_is_fr(sw))
 342                         index = !phy_port ? 6 : 8;
 343                 else
 344                         goto out;
 345 
 346                 /* Validate the hard-coding */
 347                 if (WARN_ON(index > sw->config.max_port_number))
 348                         goto out;
 349                 if (WARN_ON(!tb_port_is_pcie_down(&sw->ports[index])))
 350                         goto out;
 351                 if (WARN_ON(tb_pci_port_is_enabled(&sw->ports[index])))
 352                         goto out;
 353 
 354                 return &sw->ports[index];
 355         }
 356 
 357 out:
 358         return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
 359 }
 360 
 361 static int tb_tunnel_dp(struct tb *tb, struct tb_port *out)
 362 {
 363         struct tb_cm *tcm = tb_priv(tb);
 364         struct tb_switch *sw = out->sw;
 365         struct tb_tunnel *tunnel;
 366         struct tb_port *in;
 367 
 368         if (tb_port_is_enabled(out))
 369                 return 0;
 370 
 371         do {
 372                 sw = tb_to_switch(sw->dev.parent);
 373                 if (!sw)
 374                         return 0;
 375                 in = tb_find_unused_port(sw, TB_TYPE_DP_HDMI_IN);
 376         } while (!in);
 377 
 378         tunnel = tb_tunnel_alloc_dp(tb, in, out);
 379         if (!tunnel) {
 380                 tb_port_dbg(out, "DP tunnel allocation failed\n");
 381                 return -ENOMEM;
 382         }
 383 
 384         if (tb_tunnel_activate(tunnel)) {
 385                 tb_port_info(out, "DP tunnel activation failed, aborting\n");
 386                 tb_tunnel_free(tunnel);
 387                 return -EIO;
 388         }
 389 
 390         list_add_tail(&tunnel->list, &tcm->tunnel_list);
 391         return 0;
 392 }
 393 
 394 static void tb_teardown_dp(struct tb *tb, struct tb_port *out)
 395 {
 396         tb_free_tunnel(tb, TB_TUNNEL_DP, NULL, out);
 397 }
 398 
 399 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
 400 {
 401         struct tb_port *up, *down, *port;
 402         struct tb_cm *tcm = tb_priv(tb);
 403         struct tb_switch *parent_sw;
 404         struct tb_tunnel *tunnel;
 405 
 406         up = tb_find_port(sw, TB_TYPE_PCIE_UP);
 407         if (!up)
 408                 return 0;
 409 
 410         /*
 411          * Look up available down port. Since we are chaining it should
 412          * be found right above this switch.
 413          */
 414         parent_sw = tb_to_switch(sw->dev.parent);
 415         port = tb_port_at(tb_route(sw), parent_sw);
 416         down = tb_find_pcie_down(parent_sw, port);
 417         if (!down)
 418                 return 0;
 419 
 420         tunnel = tb_tunnel_alloc_pci(tb, up, down);
 421         if (!tunnel)
 422                 return -ENOMEM;
 423 
 424         if (tb_tunnel_activate(tunnel)) {
 425                 tb_port_info(up,
 426                              "PCIe tunnel activation failed, aborting\n");
 427                 tb_tunnel_free(tunnel);
 428                 return -EIO;
 429         }
 430 
 431         list_add_tail(&tunnel->list, &tcm->tunnel_list);
 432         return 0;
 433 }
 434 
 435 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
 436 {
 437         struct tb_cm *tcm = tb_priv(tb);
 438         struct tb_port *nhi_port, *dst_port;
 439         struct tb_tunnel *tunnel;
 440         struct tb_switch *sw;
 441 
 442         sw = tb_to_switch(xd->dev.parent);
 443         dst_port = tb_port_at(xd->route, sw);
 444         nhi_port = tb_find_port(tb->root_switch, TB_TYPE_NHI);
 445 
 446         mutex_lock(&tb->lock);
 447         tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring,
 448                                      xd->transmit_path, xd->receive_ring,
 449                                      xd->receive_path);
 450         if (!tunnel) {
 451                 mutex_unlock(&tb->lock);
 452                 return -ENOMEM;
 453         }
 454 
 455         if (tb_tunnel_activate(tunnel)) {
 456                 tb_port_info(nhi_port,
 457                              "DMA tunnel activation failed, aborting\n");
 458                 tb_tunnel_free(tunnel);
 459                 mutex_unlock(&tb->lock);
 460                 return -EIO;
 461         }
 462 
 463         list_add_tail(&tunnel->list, &tcm->tunnel_list);
 464         mutex_unlock(&tb->lock);
 465         return 0;
 466 }
 467 
 468 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
 469 {
 470         struct tb_port *dst_port;
 471         struct tb_switch *sw;
 472 
 473         sw = tb_to_switch(xd->dev.parent);
 474         dst_port = tb_port_at(xd->route, sw);
 475 
 476         /*
 477          * It is possible that the tunnel was already teared down (in
 478          * case of cable disconnect) so it is fine if we cannot find it
 479          * here anymore.
 480          */
 481         tb_free_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
 482 }
 483 
 484 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
 485 {
 486         if (!xd->is_unplugged) {
 487                 mutex_lock(&tb->lock);
 488                 __tb_disconnect_xdomain_paths(tb, xd);
 489                 mutex_unlock(&tb->lock);
 490         }
 491         return 0;
 492 }
 493 
 494 /* hotplug handling */
 495 
 496 /**
 497  * tb_handle_hotplug() - handle hotplug event
 498  *
 499  * Executes on tb->wq.
 500  */
 501 static void tb_handle_hotplug(struct work_struct *work)
 502 {
 503         struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
 504         struct tb *tb = ev->tb;
 505         struct tb_cm *tcm = tb_priv(tb);
 506         struct tb_switch *sw;
 507         struct tb_port *port;
 508         mutex_lock(&tb->lock);
 509         if (!tcm->hotplug_active)
 510                 goto out; /* during init, suspend or shutdown */
 511 
 512         sw = tb_switch_find_by_route(tb, ev->route);
 513         if (!sw) {
 514                 tb_warn(tb,
 515                         "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
 516                         ev->route, ev->port, ev->unplug);
 517                 goto out;
 518         }
 519         if (ev->port > sw->config.max_port_number) {
 520                 tb_warn(tb,
 521                         "hotplug event from non existent port %llx:%x (unplug: %d)\n",
 522                         ev->route, ev->port, ev->unplug);
 523                 goto put_sw;
 524         }
 525         port = &sw->ports[ev->port];
 526         if (tb_is_upstream_port(port)) {
 527                 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
 528                        ev->route, ev->port, ev->unplug);
 529                 goto put_sw;
 530         }
 531         if (ev->unplug) {
 532                 if (tb_port_has_remote(port)) {
 533                         tb_port_dbg(port, "switch unplugged\n");
 534                         tb_sw_set_unplugged(port->remote->sw);
 535                         tb_free_invalid_tunnels(tb);
 536                         tb_switch_remove(port->remote->sw);
 537                         port->remote = NULL;
 538                         if (port->dual_link_port)
 539                                 port->dual_link_port->remote = NULL;
 540                 } else if (port->xdomain) {
 541                         struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
 542 
 543                         tb_port_dbg(port, "xdomain unplugged\n");
 544                         /*
 545                          * Service drivers are unbound during
 546                          * tb_xdomain_remove() so setting XDomain as
 547                          * unplugged here prevents deadlock if they call
 548                          * tb_xdomain_disable_paths(). We will tear down
 549                          * the path below.
 550                          */
 551                         xd->is_unplugged = true;
 552                         tb_xdomain_remove(xd);
 553                         port->xdomain = NULL;
 554                         __tb_disconnect_xdomain_paths(tb, xd);
 555                         tb_xdomain_put(xd);
 556                 } else if (tb_port_is_dpout(port)) {
 557                         tb_teardown_dp(tb, port);
 558                 } else {
 559                         tb_port_dbg(port,
 560                                    "got unplug event for disconnected port, ignoring\n");
 561                 }
 562         } else if (port->remote) {
 563                 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
 564         } else {
 565                 if (tb_port_is_null(port)) {
 566                         tb_port_dbg(port, "hotplug: scanning\n");
 567                         tb_scan_port(port);
 568                         if (!port->remote)
 569                                 tb_port_dbg(port, "hotplug: no switch found\n");
 570                 } else if (tb_port_is_dpout(port)) {
 571                         tb_tunnel_dp(tb, port);
 572                 }
 573         }
 574 
 575 put_sw:
 576         tb_switch_put(sw);
 577 out:
 578         mutex_unlock(&tb->lock);
 579         kfree(ev);
 580 }
 581 
 582 /**
 583  * tb_schedule_hotplug_handler() - callback function for the control channel
 584  *
 585  * Delegates to tb_handle_hotplug.
 586  */
 587 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
 588                             const void *buf, size_t size)
 589 {
 590         const struct cfg_event_pkg *pkg = buf;
 591         u64 route;
 592 
 593         if (type != TB_CFG_PKG_EVENT) {
 594                 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
 595                 return;
 596         }
 597 
 598         route = tb_cfg_get_route(&pkg->header);
 599 
 600         if (tb_cfg_error(tb->ctl, route, pkg->port,
 601                          TB_CFG_ERROR_ACK_PLUG_EVENT)) {
 602                 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
 603                         pkg->port);
 604         }
 605 
 606         tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
 607 }
 608 
 609 static void tb_stop(struct tb *tb)
 610 {
 611         struct tb_cm *tcm = tb_priv(tb);
 612         struct tb_tunnel *tunnel;
 613         struct tb_tunnel *n;
 614 
 615         /* tunnels are only present after everything has been initialized */
 616         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
 617                 /*
 618                  * DMA tunnels require the driver to be functional so we
 619                  * tear them down. Other protocol tunnels can be left
 620                  * intact.
 621                  */
 622                 if (tb_tunnel_is_dma(tunnel))
 623                         tb_tunnel_deactivate(tunnel);
 624                 tb_tunnel_free(tunnel);
 625         }
 626         tb_switch_remove(tb->root_switch);
 627         tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
 628 }
 629 
 630 static int tb_scan_finalize_switch(struct device *dev, void *data)
 631 {
 632         if (tb_is_switch(dev)) {
 633                 struct tb_switch *sw = tb_to_switch(dev);
 634 
 635                 /*
 636                  * If we found that the switch was already setup by the
 637                  * boot firmware, mark it as authorized now before we
 638                  * send uevent to userspace.
 639                  */
 640                 if (sw->boot)
 641                         sw->authorized = 1;
 642 
 643                 dev_set_uevent_suppress(dev, false);
 644                 kobject_uevent(&dev->kobj, KOBJ_ADD);
 645                 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
 646         }
 647 
 648         return 0;
 649 }
 650 
 651 static int tb_start(struct tb *tb)
 652 {
 653         struct tb_cm *tcm = tb_priv(tb);
 654         int ret;
 655 
 656         tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
 657         if (IS_ERR(tb->root_switch))
 658                 return PTR_ERR(tb->root_switch);
 659 
 660         /*
 661          * ICM firmware upgrade needs running firmware and in native
 662          * mode that is not available so disable firmware upgrade of the
 663          * root switch.
 664          */
 665         tb->root_switch->no_nvm_upgrade = true;
 666 
 667         ret = tb_switch_configure(tb->root_switch);
 668         if (ret) {
 669                 tb_switch_put(tb->root_switch);
 670                 return ret;
 671         }
 672 
 673         /* Announce the switch to the world */
 674         ret = tb_switch_add(tb->root_switch);
 675         if (ret) {
 676                 tb_switch_put(tb->root_switch);
 677                 return ret;
 678         }
 679 
 680         /* Full scan to discover devices added before the driver was loaded. */
 681         tb_scan_switch(tb->root_switch);
 682         /* Find out tunnels created by the boot firmware */
 683         tb_discover_tunnels(tb->root_switch);
 684         /* Make the discovered switches available to the userspace */
 685         device_for_each_child(&tb->root_switch->dev, NULL,
 686                               tb_scan_finalize_switch);
 687 
 688         /* Allow tb_handle_hotplug to progress events */
 689         tcm->hotplug_active = true;
 690         return 0;
 691 }
 692 
 693 static int tb_suspend_noirq(struct tb *tb)
 694 {
 695         struct tb_cm *tcm = tb_priv(tb);
 696 
 697         tb_dbg(tb, "suspending...\n");
 698         tb_switch_suspend(tb->root_switch);
 699         tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
 700         tb_dbg(tb, "suspend finished\n");
 701 
 702         return 0;
 703 }
 704 
 705 static int tb_resume_noirq(struct tb *tb)
 706 {
 707         struct tb_cm *tcm = tb_priv(tb);
 708         struct tb_tunnel *tunnel, *n;
 709 
 710         tb_dbg(tb, "resuming...\n");
 711 
 712         /* remove any pci devices the firmware might have setup */
 713         tb_switch_reset(tb, 0);
 714 
 715         tb_switch_resume(tb->root_switch);
 716         tb_free_invalid_tunnels(tb);
 717         tb_free_unplugged_children(tb->root_switch);
 718         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
 719                 tb_tunnel_restart(tunnel);
 720         if (!list_empty(&tcm->tunnel_list)) {
 721                 /*
 722                  * the pcie links need some time to get going.
 723                  * 100ms works for me...
 724                  */
 725                 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
 726                 msleep(100);
 727         }
 728          /* Allow tb_handle_hotplug to progress events */
 729         tcm->hotplug_active = true;
 730         tb_dbg(tb, "resume finished\n");
 731 
 732         return 0;
 733 }
 734 
 735 static int tb_free_unplugged_xdomains(struct tb_switch *sw)
 736 {
 737         int i, ret = 0;
 738 
 739         for (i = 1; i <= sw->config.max_port_number; i++) {
 740                 struct tb_port *port = &sw->ports[i];
 741 
 742                 if (tb_is_upstream_port(port))
 743                         continue;
 744                 if (port->xdomain && port->xdomain->is_unplugged) {
 745                         tb_xdomain_remove(port->xdomain);
 746                         port->xdomain = NULL;
 747                         ret++;
 748                 } else if (port->remote) {
 749                         ret += tb_free_unplugged_xdomains(port->remote->sw);
 750                 }
 751         }
 752 
 753         return ret;
 754 }
 755 
 756 static void tb_complete(struct tb *tb)
 757 {
 758         /*
 759          * Release any unplugged XDomains and if there is a case where
 760          * another domain is swapped in place of unplugged XDomain we
 761          * need to run another rescan.
 762          */
 763         mutex_lock(&tb->lock);
 764         if (tb_free_unplugged_xdomains(tb->root_switch))
 765                 tb_scan_switch(tb->root_switch);
 766         mutex_unlock(&tb->lock);
 767 }
 768 
 769 static const struct tb_cm_ops tb_cm_ops = {
 770         .start = tb_start,
 771         .stop = tb_stop,
 772         .suspend_noirq = tb_suspend_noirq,
 773         .resume_noirq = tb_resume_noirq,
 774         .complete = tb_complete,
 775         .handle_event = tb_handle_event,
 776         .approve_switch = tb_tunnel_pci,
 777         .approve_xdomain_paths = tb_approve_xdomain_paths,
 778         .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
 779 };
 780 
 781 struct tb *tb_probe(struct tb_nhi *nhi)
 782 {
 783         struct tb_cm *tcm;
 784         struct tb *tb;
 785 
 786         if (!x86_apple_machine)
 787                 return NULL;
 788 
 789         tb = tb_domain_alloc(nhi, sizeof(*tcm));
 790         if (!tb)
 791                 return NULL;
 792 
 793         tb->security_level = TB_SECURITY_USER;
 794         tb->cm_ops = &tb_cm_ops;
 795 
 796         tcm = tb_priv(tb);
 797         INIT_LIST_HEAD(&tcm->tunnel_list);
 798 
 799         return tb;
 800 }

/* [<][>][^][v][top][bottom][index][help] */