root/drivers/usb/early/xhci-dbc.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. xdbc_trace
  2. xdbc_map_pci_mmio
  3. xdbc_get_page
  4. xdbc_find_dbgp
  5. handshake
  6. xdbc_bios_handoff
  7. xdbc_alloc_ring
  8. xdbc_free_ring
  9. xdbc_reset_ring
  10. xdbc_put_utf16
  11. xdbc_mem_init
  12. xdbc_do_reset_debug_port
  13. xdbc_reset_debug_port
  14. xdbc_queue_trb
  15. xdbc_ring_doorbell
  16. xdbc_start
  17. xdbc_bulk_transfer
  18. xdbc_handle_external_reset
  19. xdbc_early_setup
  20. early_xdbc_parse_parameter
  21. early_xdbc_setup_hardware
  22. xdbc_handle_port_status
  23. xdbc_handle_tx_event
  24. xdbc_handle_events
  25. xdbc_bulk_write
  26. early_xdbc_write
  27. early_xdbc_register_console
  28. xdbc_unregister_console
  29. xdbc_scrub_function
  30. xdbc_init

   1 // SPDX-License-Identifier: GPL-2.0
   2 /**
   3  * xhci-dbc.c - xHCI debug capability early driver
   4  *
   5  * Copyright (C) 2016 Intel Corporation
   6  *
   7  * Author: Lu Baolu <baolu.lu@linux.intel.com>
   8  */
   9 
  10 #define pr_fmt(fmt)     KBUILD_MODNAME ":%s: " fmt, __func__
  11 
  12 #include <linux/console.h>
  13 #include <linux/pci_regs.h>
  14 #include <linux/pci_ids.h>
  15 #include <linux/memblock.h>
  16 #include <linux/io.h>
  17 #include <asm/pci-direct.h>
  18 #include <asm/fixmap.h>
  19 #include <linux/bcd.h>
  20 #include <linux/export.h>
  21 #include <linux/version.h>
  22 #include <linux/module.h>
  23 #include <linux/delay.h>
  24 #include <linux/kthread.h>
  25 
  26 #include "../host/xhci.h"
  27 #include "xhci-dbc.h"
  28 
  29 static struct xdbc_state xdbc;
  30 static bool early_console_keep;
  31 
  32 #ifdef XDBC_TRACE
  33 #define xdbc_trace      trace_printk
  34 #else
  35 static inline void xdbc_trace(const char *fmt, ...) { }
  36 #endif /* XDBC_TRACE */
  37 
  38 static void __iomem * __init xdbc_map_pci_mmio(u32 bus, u32 dev, u32 func)
  39 {
  40         u64 val64, sz64, mask64;
  41         void __iomem *base;
  42         u32 val, sz;
  43         u8 byte;
  44 
  45         val = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0);
  46         write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0, ~0);
  47         sz = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0);
  48         write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0, val);
  49 
  50         if (val == 0xffffffff || sz == 0xffffffff) {
  51                 pr_notice("invalid mmio bar\n");
  52                 return NULL;
  53         }
  54 
  55         val64   = val & PCI_BASE_ADDRESS_MEM_MASK;
  56         sz64    = sz & PCI_BASE_ADDRESS_MEM_MASK;
  57         mask64  = PCI_BASE_ADDRESS_MEM_MASK;
  58 
  59         if ((val & PCI_BASE_ADDRESS_MEM_TYPE_MASK) == PCI_BASE_ADDRESS_MEM_TYPE_64) {
  60                 val = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4);
  61                 write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4, ~0);
  62                 sz = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4);
  63                 write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4, val);
  64 
  65                 val64   |= (u64)val << 32;
  66                 sz64    |= (u64)sz << 32;
  67                 mask64  |= ~0ULL << 32;
  68         }
  69 
  70         sz64 &= mask64;
  71 
  72         if (!sz64) {
  73                 pr_notice("invalid mmio address\n");
  74                 return NULL;
  75         }
  76 
  77         sz64 = 1ULL << __ffs64(sz64);
  78 
  79         /* Check if the mem space is enabled: */
  80         byte = read_pci_config_byte(bus, dev, func, PCI_COMMAND);
  81         if (!(byte & PCI_COMMAND_MEMORY)) {
  82                 byte |= PCI_COMMAND_MEMORY;
  83                 write_pci_config_byte(bus, dev, func, PCI_COMMAND, byte);
  84         }
  85 
  86         xdbc.xhci_start = val64;
  87         xdbc.xhci_length = sz64;
  88         base = early_ioremap(val64, sz64);
  89 
  90         return base;
  91 }
  92 
  93 static void * __init xdbc_get_page(dma_addr_t *dma_addr)
  94 {
  95         void *virt;
  96 
  97         virt = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
  98         if (!virt)
  99                 return NULL;
 100 
 101         if (dma_addr)
 102                 *dma_addr = (dma_addr_t)__pa(virt);
 103 
 104         return virt;
 105 }
 106 
 107 static u32 __init xdbc_find_dbgp(int xdbc_num, u32 *b, u32 *d, u32 *f)
 108 {
 109         u32 bus, dev, func, class;
 110 
 111         for (bus = 0; bus < XDBC_PCI_MAX_BUSES; bus++) {
 112                 for (dev = 0; dev < XDBC_PCI_MAX_DEVICES; dev++) {
 113                         for (func = 0; func < XDBC_PCI_MAX_FUNCTION; func++) {
 114 
 115                                 class = read_pci_config(bus, dev, func, PCI_CLASS_REVISION);
 116                                 if ((class >> 8) != PCI_CLASS_SERIAL_USB_XHCI)
 117                                         continue;
 118 
 119                                 if (xdbc_num-- != 0)
 120                                         continue;
 121 
 122                                 *b = bus;
 123                                 *d = dev;
 124                                 *f = func;
 125 
 126                                 return 0;
 127                         }
 128                 }
 129         }
 130 
 131         return -1;
 132 }
 133 
 134 static int handshake(void __iomem *ptr, u32 mask, u32 done, int wait, int delay)
 135 {
 136         u32 result;
 137 
 138         do {
 139                 result = readl(ptr);
 140                 result &= mask;
 141                 if (result == done)
 142                         return 0;
 143                 udelay(delay);
 144                 wait -= delay;
 145         } while (wait > 0);
 146 
 147         return -ETIMEDOUT;
 148 }
 149 
 150 static void __init xdbc_bios_handoff(void)
 151 {
 152         int offset, timeout;
 153         u32 val;
 154 
 155         offset = xhci_find_next_ext_cap(xdbc.xhci_base, 0, XHCI_EXT_CAPS_LEGACY);
 156         val = readl(xdbc.xhci_base + offset);
 157 
 158         if (val & XHCI_HC_BIOS_OWNED) {
 159                 writel(val | XHCI_HC_OS_OWNED, xdbc.xhci_base + offset);
 160                 timeout = handshake(xdbc.xhci_base + offset, XHCI_HC_BIOS_OWNED, 0, 5000, 10);
 161 
 162                 if (timeout) {
 163                         pr_notice("failed to hand over xHCI control from BIOS\n");
 164                         writel(val & ~XHCI_HC_BIOS_OWNED, xdbc.xhci_base + offset);
 165                 }
 166         }
 167 
 168         /* Disable BIOS SMIs and clear all SMI events: */
 169         val = readl(xdbc.xhci_base + offset + XHCI_LEGACY_CONTROL_OFFSET);
 170         val &= XHCI_LEGACY_DISABLE_SMI;
 171         val |= XHCI_LEGACY_SMI_EVENTS;
 172         writel(val, xdbc.xhci_base + offset + XHCI_LEGACY_CONTROL_OFFSET);
 173 }
 174 
 175 static int __init
 176 xdbc_alloc_ring(struct xdbc_segment *seg, struct xdbc_ring *ring)
 177 {
 178         seg->trbs = xdbc_get_page(&seg->dma);
 179         if (!seg->trbs)
 180                 return -ENOMEM;
 181 
 182         ring->segment = seg;
 183 
 184         return 0;
 185 }
 186 
 187 static void __init xdbc_free_ring(struct xdbc_ring *ring)
 188 {
 189         struct xdbc_segment *seg = ring->segment;
 190 
 191         if (!seg)
 192                 return;
 193 
 194         memblock_free(seg->dma, PAGE_SIZE);
 195         ring->segment = NULL;
 196 }
 197 
 198 static void xdbc_reset_ring(struct xdbc_ring *ring)
 199 {
 200         struct xdbc_segment *seg = ring->segment;
 201         struct xdbc_trb *link_trb;
 202 
 203         memset(seg->trbs, 0, PAGE_SIZE);
 204 
 205         ring->enqueue = seg->trbs;
 206         ring->dequeue = seg->trbs;
 207         ring->cycle_state = 1;
 208 
 209         if (ring != &xdbc.evt_ring) {
 210                 link_trb = &seg->trbs[XDBC_TRBS_PER_SEGMENT - 1];
 211                 link_trb->field[0] = cpu_to_le32(lower_32_bits(seg->dma));
 212                 link_trb->field[1] = cpu_to_le32(upper_32_bits(seg->dma));
 213                 link_trb->field[3] = cpu_to_le32(TRB_TYPE(TRB_LINK)) | cpu_to_le32(LINK_TOGGLE);
 214         }
 215 }
 216 
 217 static inline void xdbc_put_utf16(u16 *s, const char *c, size_t size)
 218 {
 219         int i;
 220 
 221         for (i = 0; i < size; i++)
 222                 s[i] = cpu_to_le16(c[i]);
 223 }
 224 
 225 static void xdbc_mem_init(void)
 226 {
 227         struct xdbc_ep_context *ep_in, *ep_out;
 228         struct usb_string_descriptor *s_desc;
 229         struct xdbc_erst_entry *entry;
 230         struct xdbc_strings *strings;
 231         struct xdbc_context *ctx;
 232         unsigned int max_burst;
 233         u32 string_length;
 234         int index = 0;
 235         u32 dev_info;
 236 
 237         xdbc_reset_ring(&xdbc.evt_ring);
 238         xdbc_reset_ring(&xdbc.in_ring);
 239         xdbc_reset_ring(&xdbc.out_ring);
 240         memset(xdbc.table_base, 0, PAGE_SIZE);
 241         memset(xdbc.out_buf, 0, PAGE_SIZE);
 242 
 243         /* Initialize event ring segment table: */
 244         xdbc.erst_size  = 16;
 245         xdbc.erst_base  = xdbc.table_base + index * XDBC_TABLE_ENTRY_SIZE;
 246         xdbc.erst_dma   = xdbc.table_dma + index * XDBC_TABLE_ENTRY_SIZE;
 247 
 248         index += XDBC_ERST_ENTRY_NUM;
 249         entry = (struct xdbc_erst_entry *)xdbc.erst_base;
 250 
 251         entry->seg_addr         = cpu_to_le64(xdbc.evt_seg.dma);
 252         entry->seg_size         = cpu_to_le32(XDBC_TRBS_PER_SEGMENT);
 253         entry->__reserved_0     = 0;
 254 
 255         /* Initialize ERST registers: */
 256         writel(1, &xdbc.xdbc_reg->ersts);
 257         xdbc_write64(xdbc.erst_dma, &xdbc.xdbc_reg->erstba);
 258         xdbc_write64(xdbc.evt_seg.dma, &xdbc.xdbc_reg->erdp);
 259 
 260         /* Debug capability contexts: */
 261         xdbc.dbcc_size  = 64 * 3;
 262         xdbc.dbcc_base  = xdbc.table_base + index * XDBC_TABLE_ENTRY_SIZE;
 263         xdbc.dbcc_dma   = xdbc.table_dma + index * XDBC_TABLE_ENTRY_SIZE;
 264 
 265         index += XDBC_DBCC_ENTRY_NUM;
 266 
 267         /* Popluate the strings: */
 268         xdbc.string_size = sizeof(struct xdbc_strings);
 269         xdbc.string_base = xdbc.table_base + index * XDBC_TABLE_ENTRY_SIZE;
 270         xdbc.string_dma  = xdbc.table_dma + index * XDBC_TABLE_ENTRY_SIZE;
 271         strings          = (struct xdbc_strings *)xdbc.string_base;
 272 
 273         index += XDBC_STRING_ENTRY_NUM;
 274 
 275         /* Serial string: */
 276         s_desc                  = (struct usb_string_descriptor *)strings->serial;
 277         s_desc->bLength         = (strlen(XDBC_STRING_SERIAL) + 1) * 2;
 278         s_desc->bDescriptorType = USB_DT_STRING;
 279 
 280         xdbc_put_utf16(s_desc->wData, XDBC_STRING_SERIAL, strlen(XDBC_STRING_SERIAL));
 281         string_length = s_desc->bLength;
 282         string_length <<= 8;
 283 
 284         /* Product string: */
 285         s_desc                  = (struct usb_string_descriptor *)strings->product;
 286         s_desc->bLength         = (strlen(XDBC_STRING_PRODUCT) + 1) * 2;
 287         s_desc->bDescriptorType = USB_DT_STRING;
 288 
 289         xdbc_put_utf16(s_desc->wData, XDBC_STRING_PRODUCT, strlen(XDBC_STRING_PRODUCT));
 290         string_length += s_desc->bLength;
 291         string_length <<= 8;
 292 
 293         /* Manufacture string: */
 294         s_desc                  = (struct usb_string_descriptor *)strings->manufacturer;
 295         s_desc->bLength         = (strlen(XDBC_STRING_MANUFACTURER) + 1) * 2;
 296         s_desc->bDescriptorType = USB_DT_STRING;
 297 
 298         xdbc_put_utf16(s_desc->wData, XDBC_STRING_MANUFACTURER, strlen(XDBC_STRING_MANUFACTURER));
 299         string_length += s_desc->bLength;
 300         string_length <<= 8;
 301 
 302         /* String0: */
 303         strings->string0[0]     = 4;
 304         strings->string0[1]     = USB_DT_STRING;
 305         strings->string0[2]     = 0x09;
 306         strings->string0[3]     = 0x04;
 307 
 308         string_length += 4;
 309 
 310         /* Populate info Context: */
 311         ctx = (struct xdbc_context *)xdbc.dbcc_base;
 312 
 313         ctx->info.string0       = cpu_to_le64(xdbc.string_dma);
 314         ctx->info.manufacturer  = cpu_to_le64(xdbc.string_dma + XDBC_MAX_STRING_LENGTH);
 315         ctx->info.product       = cpu_to_le64(xdbc.string_dma + XDBC_MAX_STRING_LENGTH * 2);
 316         ctx->info.serial        = cpu_to_le64(xdbc.string_dma + XDBC_MAX_STRING_LENGTH * 3);
 317         ctx->info.length        = cpu_to_le32(string_length);
 318 
 319         /* Populate bulk out endpoint context: */
 320         max_burst = DEBUG_MAX_BURST(readl(&xdbc.xdbc_reg->control));
 321         ep_out = (struct xdbc_ep_context *)&ctx->out;
 322 
 323         ep_out->ep_info1        = 0;
 324         ep_out->ep_info2        = cpu_to_le32(EP_TYPE(BULK_OUT_EP) | MAX_PACKET(1024) | MAX_BURST(max_burst));
 325         ep_out->deq             = cpu_to_le64(xdbc.out_seg.dma | xdbc.out_ring.cycle_state);
 326 
 327         /* Populate bulk in endpoint context: */
 328         ep_in = (struct xdbc_ep_context *)&ctx->in;
 329 
 330         ep_in->ep_info1         = 0;
 331         ep_in->ep_info2         = cpu_to_le32(EP_TYPE(BULK_IN_EP) | MAX_PACKET(1024) | MAX_BURST(max_burst));
 332         ep_in->deq              = cpu_to_le64(xdbc.in_seg.dma | xdbc.in_ring.cycle_state);
 333 
 334         /* Set DbC context and info registers: */
 335         xdbc_write64(xdbc.dbcc_dma, &xdbc.xdbc_reg->dccp);
 336 
 337         dev_info = cpu_to_le32((XDBC_VENDOR_ID << 16) | XDBC_PROTOCOL);
 338         writel(dev_info, &xdbc.xdbc_reg->devinfo1);
 339 
 340         dev_info = cpu_to_le32((XDBC_DEVICE_REV << 16) | XDBC_PRODUCT_ID);
 341         writel(dev_info, &xdbc.xdbc_reg->devinfo2);
 342 
 343         xdbc.in_buf = xdbc.out_buf + XDBC_MAX_PACKET;
 344         xdbc.in_dma = xdbc.out_dma + XDBC_MAX_PACKET;
 345 }
 346 
 347 static void xdbc_do_reset_debug_port(u32 id, u32 count)
 348 {
 349         void __iomem *ops_reg;
 350         void __iomem *portsc;
 351         u32 val, cap_length;
 352         int i;
 353 
 354         cap_length = readl(xdbc.xhci_base) & 0xff;
 355         ops_reg = xdbc.xhci_base + cap_length;
 356 
 357         id--;
 358         for (i = id; i < (id + count); i++) {
 359                 portsc = ops_reg + 0x400 + i * 0x10;
 360                 val = readl(portsc);
 361                 if (!(val & PORT_CONNECT))
 362                         writel(val | PORT_RESET, portsc);
 363         }
 364 }
 365 
 366 static void xdbc_reset_debug_port(void)
 367 {
 368         u32 val, port_offset, port_count;
 369         int offset = 0;
 370 
 371         do {
 372                 offset = xhci_find_next_ext_cap(xdbc.xhci_base, offset, XHCI_EXT_CAPS_PROTOCOL);
 373                 if (!offset)
 374                         break;
 375 
 376                 val = readl(xdbc.xhci_base + offset);
 377                 if (XHCI_EXT_PORT_MAJOR(val) != 0x3)
 378                         continue;
 379 
 380                 val = readl(xdbc.xhci_base + offset + 8);
 381                 port_offset = XHCI_EXT_PORT_OFF(val);
 382                 port_count = XHCI_EXT_PORT_COUNT(val);
 383 
 384                 xdbc_do_reset_debug_port(port_offset, port_count);
 385         } while (1);
 386 }
 387 
 388 static void
 389 xdbc_queue_trb(struct xdbc_ring *ring, u32 field1, u32 field2, u32 field3, u32 field4)
 390 {
 391         struct xdbc_trb *trb, *link_trb;
 392 
 393         trb = ring->enqueue;
 394         trb->field[0] = cpu_to_le32(field1);
 395         trb->field[1] = cpu_to_le32(field2);
 396         trb->field[2] = cpu_to_le32(field3);
 397         trb->field[3] = cpu_to_le32(field4);
 398 
 399         ++(ring->enqueue);
 400         if (ring->enqueue >= &ring->segment->trbs[TRBS_PER_SEGMENT - 1]) {
 401                 link_trb = ring->enqueue;
 402                 if (ring->cycle_state)
 403                         link_trb->field[3] |= cpu_to_le32(TRB_CYCLE);
 404                 else
 405                         link_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
 406 
 407                 ring->enqueue = ring->segment->trbs;
 408                 ring->cycle_state ^= 1;
 409         }
 410 }
 411 
 412 static void xdbc_ring_doorbell(int target)
 413 {
 414         writel(DOOR_BELL_TARGET(target), &xdbc.xdbc_reg->doorbell);
 415 }
 416 
 417 static int xdbc_start(void)
 418 {
 419         u32 ctrl, status;
 420         int ret;
 421 
 422         ctrl = readl(&xdbc.xdbc_reg->control);
 423         writel(ctrl | CTRL_DBC_ENABLE | CTRL_PORT_ENABLE, &xdbc.xdbc_reg->control);
 424         ret = handshake(&xdbc.xdbc_reg->control, CTRL_DBC_ENABLE, CTRL_DBC_ENABLE, 100000, 100);
 425         if (ret) {
 426                 xdbc_trace("failed to initialize hardware\n");
 427                 return ret;
 428         }
 429 
 430         /* Reset port to avoid bus hang: */
 431         if (xdbc.vendor == PCI_VENDOR_ID_INTEL)
 432                 xdbc_reset_debug_port();
 433 
 434         /* Wait for port connection: */
 435         ret = handshake(&xdbc.xdbc_reg->portsc, PORTSC_CONN_STATUS, PORTSC_CONN_STATUS, 5000000, 100);
 436         if (ret) {
 437                 xdbc_trace("waiting for connection timed out\n");
 438                 return ret;
 439         }
 440 
 441         /* Wait for debug device to be configured: */
 442         ret = handshake(&xdbc.xdbc_reg->control, CTRL_DBC_RUN, CTRL_DBC_RUN, 5000000, 100);
 443         if (ret) {
 444                 xdbc_trace("waiting for device configuration timed out\n");
 445                 return ret;
 446         }
 447 
 448         /* Check port number: */
 449         status = readl(&xdbc.xdbc_reg->status);
 450         if (!DCST_DEBUG_PORT(status)) {
 451                 xdbc_trace("invalid root hub port number\n");
 452                 return -ENODEV;
 453         }
 454 
 455         xdbc.port_number = DCST_DEBUG_PORT(status);
 456 
 457         xdbc_trace("DbC is running now, control 0x%08x port ID %d\n",
 458                    readl(&xdbc.xdbc_reg->control), xdbc.port_number);
 459 
 460         return 0;
 461 }
 462 
 463 static int xdbc_bulk_transfer(void *data, int size, bool read)
 464 {
 465         struct xdbc_ring *ring;
 466         struct xdbc_trb *trb;
 467         u32 length, control;
 468         u32 cycle;
 469         u64 addr;
 470 
 471         if (size > XDBC_MAX_PACKET) {
 472                 xdbc_trace("bad parameter, size %d\n", size);
 473                 return -EINVAL;
 474         }
 475 
 476         if (!(xdbc.flags & XDBC_FLAGS_INITIALIZED) ||
 477             !(xdbc.flags & XDBC_FLAGS_CONFIGURED) ||
 478             (!read && (xdbc.flags & XDBC_FLAGS_OUT_STALL)) ||
 479             (read && (xdbc.flags & XDBC_FLAGS_IN_STALL))) {
 480 
 481                 xdbc_trace("connection not ready, flags %08x\n", xdbc.flags);
 482                 return -EIO;
 483         }
 484 
 485         ring = (read ? &xdbc.in_ring : &xdbc.out_ring);
 486         trb = ring->enqueue;
 487         cycle = ring->cycle_state;
 488         length = TRB_LEN(size);
 489         control = TRB_TYPE(TRB_NORMAL) | TRB_IOC;
 490 
 491         if (cycle)
 492                 control &= cpu_to_le32(~TRB_CYCLE);
 493         else
 494                 control |= cpu_to_le32(TRB_CYCLE);
 495 
 496         if (read) {
 497                 memset(xdbc.in_buf, 0, XDBC_MAX_PACKET);
 498                 addr = xdbc.in_dma;
 499                 xdbc.flags |= XDBC_FLAGS_IN_PROCESS;
 500         } else {
 501                 memset(xdbc.out_buf, 0, XDBC_MAX_PACKET);
 502                 memcpy(xdbc.out_buf, data, size);
 503                 addr = xdbc.out_dma;
 504                 xdbc.flags |= XDBC_FLAGS_OUT_PROCESS;
 505         }
 506 
 507         xdbc_queue_trb(ring, lower_32_bits(addr), upper_32_bits(addr), length, control);
 508 
 509         /*
 510          * Add a barrier between writes of trb fields and flipping
 511          * the cycle bit:
 512          */
 513         wmb();
 514         if (cycle)
 515                 trb->field[3] |= cpu_to_le32(cycle);
 516         else
 517                 trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
 518 
 519         xdbc_ring_doorbell(read ? IN_EP_DOORBELL : OUT_EP_DOORBELL);
 520 
 521         return size;
 522 }
 523 
 524 static int xdbc_handle_external_reset(void)
 525 {
 526         int ret = 0;
 527 
 528         xdbc.flags = 0;
 529         writel(0, &xdbc.xdbc_reg->control);
 530         ret = handshake(&xdbc.xdbc_reg->control, CTRL_DBC_ENABLE, 0, 100000, 10);
 531         if (ret)
 532                 goto reset_out;
 533 
 534         xdbc_mem_init();
 535 
 536         ret = xdbc_start();
 537         if (ret < 0)
 538                 goto reset_out;
 539 
 540         xdbc_trace("dbc recovered\n");
 541 
 542         xdbc.flags |= XDBC_FLAGS_INITIALIZED | XDBC_FLAGS_CONFIGURED;
 543 
 544         xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true);
 545 
 546         return 0;
 547 
 548 reset_out:
 549         xdbc_trace("failed to recover from external reset\n");
 550         return ret;
 551 }
 552 
 553 static int __init xdbc_early_setup(void)
 554 {
 555         int ret;
 556 
 557         writel(0, &xdbc.xdbc_reg->control);
 558         ret = handshake(&xdbc.xdbc_reg->control, CTRL_DBC_ENABLE, 0, 100000, 100);
 559         if (ret)
 560                 return ret;
 561 
 562         /* Allocate the table page: */
 563         xdbc.table_base = xdbc_get_page(&xdbc.table_dma);
 564         if (!xdbc.table_base)
 565                 return -ENOMEM;
 566 
 567         /* Get and store the transfer buffer: */
 568         xdbc.out_buf = xdbc_get_page(&xdbc.out_dma);
 569         if (!xdbc.out_buf)
 570                 return -ENOMEM;
 571 
 572         /* Allocate the event ring: */
 573         ret = xdbc_alloc_ring(&xdbc.evt_seg, &xdbc.evt_ring);
 574         if (ret < 0)
 575                 return ret;
 576 
 577         /* Allocate IN/OUT endpoint transfer rings: */
 578         ret = xdbc_alloc_ring(&xdbc.in_seg, &xdbc.in_ring);
 579         if (ret < 0)
 580                 return ret;
 581 
 582         ret = xdbc_alloc_ring(&xdbc.out_seg, &xdbc.out_ring);
 583         if (ret < 0)
 584                 return ret;
 585 
 586         xdbc_mem_init();
 587 
 588         ret = xdbc_start();
 589         if (ret < 0) {
 590                 writel(0, &xdbc.xdbc_reg->control);
 591                 return ret;
 592         }
 593 
 594         xdbc.flags |= XDBC_FLAGS_INITIALIZED | XDBC_FLAGS_CONFIGURED;
 595 
 596         xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true);
 597 
 598         return 0;
 599 }
 600 
 601 int __init early_xdbc_parse_parameter(char *s)
 602 {
 603         unsigned long dbgp_num = 0;
 604         u32 bus, dev, func, offset;
 605         int ret;
 606 
 607         if (!early_pci_allowed())
 608                 return -EPERM;
 609 
 610         if (strstr(s, "keep"))
 611                 early_console_keep = true;
 612 
 613         if (xdbc.xdbc_reg)
 614                 return 0;
 615 
 616         if (*s && kstrtoul(s, 0, &dbgp_num))
 617                 dbgp_num = 0;
 618 
 619         pr_notice("dbgp_num: %lu\n", dbgp_num);
 620 
 621         /* Locate the host controller: */
 622         ret = xdbc_find_dbgp(dbgp_num, &bus, &dev, &func);
 623         if (ret) {
 624                 pr_notice("failed to locate xhci host\n");
 625                 return -ENODEV;
 626         }
 627 
 628         xdbc.vendor     = read_pci_config_16(bus, dev, func, PCI_VENDOR_ID);
 629         xdbc.device     = read_pci_config_16(bus, dev, func, PCI_DEVICE_ID);
 630         xdbc.bus        = bus;
 631         xdbc.dev        = dev;
 632         xdbc.func       = func;
 633 
 634         /* Map the IO memory: */
 635         xdbc.xhci_base = xdbc_map_pci_mmio(bus, dev, func);
 636         if (!xdbc.xhci_base)
 637                 return -EINVAL;
 638 
 639         /* Locate DbC registers: */
 640         offset = xhci_find_next_ext_cap(xdbc.xhci_base, 0, XHCI_EXT_CAPS_DEBUG);
 641         if (!offset) {
 642                 pr_notice("xhci host doesn't support debug capability\n");
 643                 early_iounmap(xdbc.xhci_base, xdbc.xhci_length);
 644                 xdbc.xhci_base = NULL;
 645                 xdbc.xhci_length = 0;
 646 
 647                 return -ENODEV;
 648         }
 649         xdbc.xdbc_reg = (struct xdbc_regs __iomem *)(xdbc.xhci_base + offset);
 650 
 651         return 0;
 652 }
 653 
 654 int __init early_xdbc_setup_hardware(void)
 655 {
 656         int ret;
 657 
 658         if (!xdbc.xdbc_reg)
 659                 return -ENODEV;
 660 
 661         xdbc_bios_handoff();
 662 
 663         raw_spin_lock_init(&xdbc.lock);
 664 
 665         ret = xdbc_early_setup();
 666         if (ret) {
 667                 pr_notice("failed to setup the connection to host\n");
 668 
 669                 xdbc_free_ring(&xdbc.evt_ring);
 670                 xdbc_free_ring(&xdbc.out_ring);
 671                 xdbc_free_ring(&xdbc.in_ring);
 672 
 673                 if (xdbc.table_dma)
 674                         memblock_free(xdbc.table_dma, PAGE_SIZE);
 675 
 676                 if (xdbc.out_dma)
 677                         memblock_free(xdbc.out_dma, PAGE_SIZE);
 678 
 679                 xdbc.table_base = NULL;
 680                 xdbc.out_buf = NULL;
 681         }
 682 
 683         return ret;
 684 }
 685 
 686 static void xdbc_handle_port_status(struct xdbc_trb *evt_trb)
 687 {
 688         u32 port_reg;
 689 
 690         port_reg = readl(&xdbc.xdbc_reg->portsc);
 691         if (port_reg & PORTSC_CONN_CHANGE) {
 692                 xdbc_trace("connect status change event\n");
 693 
 694                 /* Check whether cable unplugged: */
 695                 if (!(port_reg & PORTSC_CONN_STATUS)) {
 696                         xdbc.flags = 0;
 697                         xdbc_trace("cable unplugged\n");
 698                 }
 699         }
 700 
 701         if (port_reg & PORTSC_RESET_CHANGE)
 702                 xdbc_trace("port reset change event\n");
 703 
 704         if (port_reg & PORTSC_LINK_CHANGE)
 705                 xdbc_trace("port link status change event\n");
 706 
 707         if (port_reg & PORTSC_CONFIG_CHANGE)
 708                 xdbc_trace("config error change\n");
 709 
 710         /* Write back the value to clear RW1C bits: */
 711         writel(port_reg, &xdbc.xdbc_reg->portsc);
 712 }
 713 
 714 static void xdbc_handle_tx_event(struct xdbc_trb *evt_trb)
 715 {
 716         u32 comp_code;
 717         int ep_id;
 718 
 719         comp_code       = GET_COMP_CODE(le32_to_cpu(evt_trb->field[2]));
 720         ep_id           = TRB_TO_EP_ID(le32_to_cpu(evt_trb->field[3]));
 721 
 722         switch (comp_code) {
 723         case COMP_SUCCESS:
 724         case COMP_SHORT_PACKET:
 725                 break;
 726         case COMP_TRB_ERROR:
 727         case COMP_BABBLE_DETECTED_ERROR:
 728         case COMP_USB_TRANSACTION_ERROR:
 729         case COMP_STALL_ERROR:
 730         default:
 731                 if (ep_id == XDBC_EPID_OUT || ep_id == XDBC_EPID_OUT_INTEL)
 732                         xdbc.flags |= XDBC_FLAGS_OUT_STALL;
 733                 if (ep_id == XDBC_EPID_IN || ep_id == XDBC_EPID_IN_INTEL)
 734                         xdbc.flags |= XDBC_FLAGS_IN_STALL;
 735 
 736                 xdbc_trace("endpoint %d stalled\n", ep_id);
 737                 break;
 738         }
 739 
 740         if (ep_id == XDBC_EPID_IN || ep_id == XDBC_EPID_IN_INTEL) {
 741                 xdbc.flags &= ~XDBC_FLAGS_IN_PROCESS;
 742                 xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true);
 743         } else if (ep_id == XDBC_EPID_OUT || ep_id == XDBC_EPID_OUT_INTEL) {
 744                 xdbc.flags &= ~XDBC_FLAGS_OUT_PROCESS;
 745         } else {
 746                 xdbc_trace("invalid endpoint id %d\n", ep_id);
 747         }
 748 }
 749 
 750 static void xdbc_handle_events(void)
 751 {
 752         struct xdbc_trb *evt_trb;
 753         bool update_erdp = false;
 754         u32 reg;
 755         u8 cmd;
 756 
 757         cmd = read_pci_config_byte(xdbc.bus, xdbc.dev, xdbc.func, PCI_COMMAND);
 758         if (!(cmd & PCI_COMMAND_MASTER)) {
 759                 cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
 760                 write_pci_config_byte(xdbc.bus, xdbc.dev, xdbc.func, PCI_COMMAND, cmd);
 761         }
 762 
 763         if (!(xdbc.flags & XDBC_FLAGS_INITIALIZED))
 764                 return;
 765 
 766         /* Handle external reset events: */
 767         reg = readl(&xdbc.xdbc_reg->control);
 768         if (!(reg & CTRL_DBC_ENABLE)) {
 769                 if (xdbc_handle_external_reset()) {
 770                         xdbc_trace("failed to recover connection\n");
 771                         return;
 772                 }
 773         }
 774 
 775         /* Handle configure-exit event: */
 776         reg = readl(&xdbc.xdbc_reg->control);
 777         if (reg & CTRL_DBC_RUN_CHANGE) {
 778                 writel(reg, &xdbc.xdbc_reg->control);
 779                 if (reg & CTRL_DBC_RUN)
 780                         xdbc.flags |= XDBC_FLAGS_CONFIGURED;
 781                 else
 782                         xdbc.flags &= ~XDBC_FLAGS_CONFIGURED;
 783         }
 784 
 785         /* Handle endpoint stall event: */
 786         reg = readl(&xdbc.xdbc_reg->control);
 787         if (reg & CTRL_HALT_IN_TR) {
 788                 xdbc.flags |= XDBC_FLAGS_IN_STALL;
 789         } else {
 790                 xdbc.flags &= ~XDBC_FLAGS_IN_STALL;
 791                 if (!(xdbc.flags & XDBC_FLAGS_IN_PROCESS))
 792                         xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true);
 793         }
 794 
 795         if (reg & CTRL_HALT_OUT_TR)
 796                 xdbc.flags |= XDBC_FLAGS_OUT_STALL;
 797         else
 798                 xdbc.flags &= ~XDBC_FLAGS_OUT_STALL;
 799 
 800         /* Handle the events in the event ring: */
 801         evt_trb = xdbc.evt_ring.dequeue;
 802         while ((le32_to_cpu(evt_trb->field[3]) & TRB_CYCLE) == xdbc.evt_ring.cycle_state) {
 803                 /*
 804                  * Add a barrier between reading the cycle flag and any
 805                  * reads of the event's flags/data below:
 806                  */
 807                 rmb();
 808 
 809                 switch ((le32_to_cpu(evt_trb->field[3]) & TRB_TYPE_BITMASK)) {
 810                 case TRB_TYPE(TRB_PORT_STATUS):
 811                         xdbc_handle_port_status(evt_trb);
 812                         break;
 813                 case TRB_TYPE(TRB_TRANSFER):
 814                         xdbc_handle_tx_event(evt_trb);
 815                         break;
 816                 default:
 817                         break;
 818                 }
 819 
 820                 ++(xdbc.evt_ring.dequeue);
 821                 if (xdbc.evt_ring.dequeue == &xdbc.evt_seg.trbs[TRBS_PER_SEGMENT]) {
 822                         xdbc.evt_ring.dequeue = xdbc.evt_seg.trbs;
 823                         xdbc.evt_ring.cycle_state ^= 1;
 824                 }
 825 
 826                 evt_trb = xdbc.evt_ring.dequeue;
 827                 update_erdp = true;
 828         }
 829 
 830         /* Update event ring dequeue pointer: */
 831         if (update_erdp)
 832                 xdbc_write64(__pa(xdbc.evt_ring.dequeue), &xdbc.xdbc_reg->erdp);
 833 }
 834 
 835 static int xdbc_bulk_write(const char *bytes, int size)
 836 {
 837         int ret, timeout = 0;
 838         unsigned long flags;
 839 
 840 retry:
 841         if (in_nmi()) {
 842                 if (!raw_spin_trylock_irqsave(&xdbc.lock, flags))
 843                         return -EAGAIN;
 844         } else {
 845                 raw_spin_lock_irqsave(&xdbc.lock, flags);
 846         }
 847 
 848         xdbc_handle_events();
 849 
 850         /* Check completion of the previous request: */
 851         if ((xdbc.flags & XDBC_FLAGS_OUT_PROCESS) && (timeout < 2000000)) {
 852                 raw_spin_unlock_irqrestore(&xdbc.lock, flags);
 853                 udelay(100);
 854                 timeout += 100;
 855                 goto retry;
 856         }
 857 
 858         if (xdbc.flags & XDBC_FLAGS_OUT_PROCESS) {
 859                 raw_spin_unlock_irqrestore(&xdbc.lock, flags);
 860                 xdbc_trace("previous transfer not completed yet\n");
 861 
 862                 return -ETIMEDOUT;
 863         }
 864 
 865         ret = xdbc_bulk_transfer((void *)bytes, size, false);
 866         raw_spin_unlock_irqrestore(&xdbc.lock, flags);
 867 
 868         return ret;
 869 }
 870 
 871 static void early_xdbc_write(struct console *con, const char *str, u32 n)
 872 {
 873         static char buf[XDBC_MAX_PACKET];
 874         int chunk, ret;
 875         int use_cr = 0;
 876 
 877         if (!xdbc.xdbc_reg)
 878                 return;
 879         memset(buf, 0, XDBC_MAX_PACKET);
 880         while (n > 0) {
 881                 for (chunk = 0; chunk < XDBC_MAX_PACKET && n > 0; str++, chunk++, n--) {
 882 
 883                         if (!use_cr && *str == '\n') {
 884                                 use_cr = 1;
 885                                 buf[chunk] = '\r';
 886                                 str--;
 887                                 n++;
 888                                 continue;
 889                         }
 890 
 891                         if (use_cr)
 892                                 use_cr = 0;
 893                         buf[chunk] = *str;
 894                 }
 895 
 896                 if (chunk > 0) {
 897                         ret = xdbc_bulk_write(buf, chunk);
 898                         if (ret < 0)
 899                                 xdbc_trace("missed message {%s}\n", buf);
 900                 }
 901         }
 902 }
 903 
 904 static struct console early_xdbc_console = {
 905         .name           = "earlyxdbc",
 906         .write          = early_xdbc_write,
 907         .flags          = CON_PRINTBUFFER,
 908         .index          = -1,
 909 };
 910 
 911 void __init early_xdbc_register_console(void)
 912 {
 913         if (early_console)
 914                 return;
 915 
 916         early_console = &early_xdbc_console;
 917         if (early_console_keep)
 918                 early_console->flags &= ~CON_BOOT;
 919         else
 920                 early_console->flags |= CON_BOOT;
 921         register_console(early_console);
 922 }
 923 
 924 static void xdbc_unregister_console(void)
 925 {
 926         if (early_xdbc_console.flags & CON_ENABLED)
 927                 unregister_console(&early_xdbc_console);
 928 }
 929 
 930 static int xdbc_scrub_function(void *ptr)
 931 {
 932         unsigned long flags;
 933 
 934         while (true) {
 935                 raw_spin_lock_irqsave(&xdbc.lock, flags);
 936                 xdbc_handle_events();
 937 
 938                 if (!(xdbc.flags & XDBC_FLAGS_INITIALIZED)) {
 939                         raw_spin_unlock_irqrestore(&xdbc.lock, flags);
 940                         break;
 941                 }
 942 
 943                 raw_spin_unlock_irqrestore(&xdbc.lock, flags);
 944                 schedule_timeout_interruptible(1);
 945         }
 946 
 947         xdbc_unregister_console();
 948         writel(0, &xdbc.xdbc_reg->control);
 949         xdbc_trace("dbc scrub function exits\n");
 950 
 951         return 0;
 952 }
 953 
 954 static int __init xdbc_init(void)
 955 {
 956         unsigned long flags;
 957         void __iomem *base;
 958         int ret = 0;
 959         u32 offset;
 960 
 961         if (!(xdbc.flags & XDBC_FLAGS_INITIALIZED))
 962                 return 0;
 963 
 964         /*
 965          * It's time to shut down the DbC, so that the debug
 966          * port can be reused by the host controller:
 967          */
 968         if (early_xdbc_console.index == -1 ||
 969             (early_xdbc_console.flags & CON_BOOT)) {
 970                 xdbc_trace("hardware not used anymore\n");
 971                 goto free_and_quit;
 972         }
 973 
 974         base = ioremap_nocache(xdbc.xhci_start, xdbc.xhci_length);
 975         if (!base) {
 976                 xdbc_trace("failed to remap the io address\n");
 977                 ret = -ENOMEM;
 978                 goto free_and_quit;
 979         }
 980 
 981         raw_spin_lock_irqsave(&xdbc.lock, flags);
 982         early_iounmap(xdbc.xhci_base, xdbc.xhci_length);
 983         xdbc.xhci_base = base;
 984         offset = xhci_find_next_ext_cap(xdbc.xhci_base, 0, XHCI_EXT_CAPS_DEBUG);
 985         xdbc.xdbc_reg = (struct xdbc_regs __iomem *)(xdbc.xhci_base + offset);
 986         raw_spin_unlock_irqrestore(&xdbc.lock, flags);
 987 
 988         kthread_run(xdbc_scrub_function, NULL, "%s", "xdbc");
 989 
 990         return 0;
 991 
 992 free_and_quit:
 993         xdbc_free_ring(&xdbc.evt_ring);
 994         xdbc_free_ring(&xdbc.out_ring);
 995         xdbc_free_ring(&xdbc.in_ring);
 996         memblock_free(xdbc.table_dma, PAGE_SIZE);
 997         memblock_free(xdbc.out_dma, PAGE_SIZE);
 998         writel(0, &xdbc.xdbc_reg->control);
 999         early_iounmap(xdbc.xhci_base, xdbc.xhci_length);
1000 
1001         return ret;
1002 }
1003 subsys_initcall(xdbc_init);

/* [<][>][^][v][top][bottom][index][help] */