1/* 2 * Copyright Altera Corporation (C) 2013-2015. All rights reserved 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program. If not, see <http://www.gnu.org/licenses/>. 15 */ 16 17#include <linux/delay.h> 18#include <linux/interrupt.h> 19#include <linux/irqchip/chained_irq.h> 20#include <linux/module.h> 21#include <linux/of_address.h> 22#include <linux/of_irq.h> 23#include <linux/of_pci.h> 24#include <linux/pci.h> 25#include <linux/platform_device.h> 26#include <linux/slab.h> 27 28#define RP_TX_REG0 0x2000 29#define RP_TX_REG1 0x2004 30#define RP_TX_CNTRL 0x2008 31#define RP_TX_EOP 0x2 32#define RP_TX_SOP 0x1 33#define RP_RXCPL_STATUS 0x2010 34#define RP_RXCPL_EOP 0x2 35#define RP_RXCPL_SOP 0x1 36#define RP_RXCPL_REG0 0x2014 37#define RP_RXCPL_REG1 0x2018 38#define P2A_INT_STATUS 0x3060 39#define P2A_INT_STS_ALL 0xf 40#define P2A_INT_ENABLE 0x3070 41#define P2A_INT_ENA_ALL 0xf 42#define RP_LTSSM 0x3c64 43#define LTSSM_L0 0xf 44 45/* TLP configuration type 0 and 1 */ 46#define TLP_FMTTYPE_CFGRD0 0x04 /* Configuration Read Type 0 */ 47#define TLP_FMTTYPE_CFGWR0 0x44 /* Configuration Write Type 0 */ 48#define TLP_FMTTYPE_CFGRD1 0x05 /* Configuration Read Type 1 */ 49#define TLP_FMTTYPE_CFGWR1 0x45 /* Configuration Write Type 1 */ 50#define TLP_PAYLOAD_SIZE 0x01 51#define TLP_READ_TAG 0x1d 52#define TLP_WRITE_TAG 0x10 53#define TLP_CFG_DW0(fmttype) (((fmttype) << 24) | TLP_PAYLOAD_SIZE) 54#define TLP_CFG_DW1(reqid, tag, be) (((reqid) << 16) | (tag << 8) | (be)) 55#define TLP_CFG_DW2(bus, devfn, offset) \ 56 (((bus) << 24) | ((devfn) << 16) | (offset)) 57#define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn)) 58#define TLP_COMP_STATUS(s) (((s) >> 12) & 7) 59#define TLP_HDR_SIZE 3 60#define TLP_LOOP 500 61#define RP_DEVFN 0 62 63#define INTX_NUM 4 64 65#define DWORD_MASK 3 66 67struct altera_pcie { 68 struct platform_device *pdev; 69 void __iomem *cra_base; 70 int irq; 71 u8 root_bus_nr; 72 struct irq_domain *irq_domain; 73 struct resource bus_range; 74 struct list_head resources; 75}; 76 77struct tlp_rp_regpair_t { 78 u32 ctrl; 79 u32 reg0; 80 u32 reg1; 81}; 82 83static void altera_pcie_retrain(struct pci_dev *dev) 84{ 85 u16 linkcap, linkstat; 86 87 /* 88 * Set the retrain bit if the PCIe rootport support > 2.5GB/s, but 89 * current speed is 2.5 GB/s. 90 */ 91 pcie_capability_read_word(dev, PCI_EXP_LNKCAP, &linkcap); 92 93 if ((linkcap & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB) 94 return; 95 96 pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &linkstat); 97 if ((linkstat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) 98 pcie_capability_set_word(dev, PCI_EXP_LNKCTL, 99 PCI_EXP_LNKCTL_RL); 100} 101DECLARE_PCI_FIXUP_EARLY(0x1172, PCI_ANY_ID, altera_pcie_retrain); 102 103/* 104 * Altera PCIe port uses BAR0 of RC's configuration space as the translation 105 * from PCI bus to native BUS. Entire DDR region is mapped into PCIe space 106 * using these registers, so it can be reached by DMA from EP devices. 107 * This BAR0 will also access to MSI vector when receiving MSI/MSIX interrupt 108 * from EP devices, eventually trigger interrupt to GIC. The BAR0 of bridge 109 * should be hidden during enumeration to avoid the sizing and resource 110 * allocation by PCIe core. 111 */ 112static bool altera_pcie_hide_rc_bar(struct pci_bus *bus, unsigned int devfn, 113 int offset) 114{ 115 if (pci_is_root_bus(bus) && (devfn == 0) && 116 (offset == PCI_BASE_ADDRESS_0)) 117 return true; 118 119 return false; 120} 121 122static inline void cra_writel(struct altera_pcie *pcie, const u32 value, 123 const u32 reg) 124{ 125 writel_relaxed(value, pcie->cra_base + reg); 126} 127 128static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg) 129{ 130 return readl_relaxed(pcie->cra_base + reg); 131} 132 133static void tlp_write_tx(struct altera_pcie *pcie, 134 struct tlp_rp_regpair_t *tlp_rp_regdata) 135{ 136 cra_writel(pcie, tlp_rp_regdata->reg0, RP_TX_REG0); 137 cra_writel(pcie, tlp_rp_regdata->reg1, RP_TX_REG1); 138 cra_writel(pcie, tlp_rp_regdata->ctrl, RP_TX_CNTRL); 139} 140 141static bool altera_pcie_link_is_up(struct altera_pcie *pcie) 142{ 143 return !!(cra_readl(pcie, RP_LTSSM) & LTSSM_L0); 144} 145 146static bool altera_pcie_valid_config(struct altera_pcie *pcie, 147 struct pci_bus *bus, int dev) 148{ 149 /* If there is no link, then there is no device */ 150 if (bus->number != pcie->root_bus_nr) { 151 if (!altera_pcie_link_is_up(pcie)) 152 return false; 153 } 154 155 /* access only one slot on each root port */ 156 if (bus->number == pcie->root_bus_nr && dev > 0) 157 return false; 158 159 /* 160 * Do not read more than one device on the bus directly attached 161 * to root port, root port can only attach to one downstream port. 162 */ 163 if (bus->primary == pcie->root_bus_nr && dev > 0) 164 return false; 165 166 return true; 167} 168 169static int tlp_read_packet(struct altera_pcie *pcie, u32 *value) 170{ 171 int i; 172 bool sop = 0; 173 u32 ctrl; 174 u32 reg0, reg1; 175 u32 comp_status = 1; 176 177 /* 178 * Minimum 2 loops to read TLP headers and 1 loop to read data 179 * payload. 180 */ 181 for (i = 0; i < TLP_LOOP; i++) { 182 ctrl = cra_readl(pcie, RP_RXCPL_STATUS); 183 if ((ctrl & RP_RXCPL_SOP) || (ctrl & RP_RXCPL_EOP) || sop) { 184 reg0 = cra_readl(pcie, RP_RXCPL_REG0); 185 reg1 = cra_readl(pcie, RP_RXCPL_REG1); 186 187 if (ctrl & RP_RXCPL_SOP) { 188 sop = true; 189 comp_status = TLP_COMP_STATUS(reg1); 190 } 191 192 if (ctrl & RP_RXCPL_EOP) { 193 if (comp_status) 194 return PCIBIOS_DEVICE_NOT_FOUND; 195 196 if (value) 197 *value = reg0; 198 199 return PCIBIOS_SUCCESSFUL; 200 } 201 } 202 udelay(5); 203 } 204 205 return PCIBIOS_DEVICE_NOT_FOUND; 206} 207 208static void tlp_write_packet(struct altera_pcie *pcie, u32 *headers, 209 u32 data, bool align) 210{ 211 struct tlp_rp_regpair_t tlp_rp_regdata; 212 213 tlp_rp_regdata.reg0 = headers[0]; 214 tlp_rp_regdata.reg1 = headers[1]; 215 tlp_rp_regdata.ctrl = RP_TX_SOP; 216 tlp_write_tx(pcie, &tlp_rp_regdata); 217 218 if (align) { 219 tlp_rp_regdata.reg0 = headers[2]; 220 tlp_rp_regdata.reg1 = 0; 221 tlp_rp_regdata.ctrl = 0; 222 tlp_write_tx(pcie, &tlp_rp_regdata); 223 224 tlp_rp_regdata.reg0 = data; 225 tlp_rp_regdata.reg1 = 0; 226 } else { 227 tlp_rp_regdata.reg0 = headers[2]; 228 tlp_rp_regdata.reg1 = data; 229 } 230 231 tlp_rp_regdata.ctrl = RP_TX_EOP; 232 tlp_write_tx(pcie, &tlp_rp_regdata); 233} 234 235static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn, 236 int where, u8 byte_en, u32 *value) 237{ 238 u32 headers[TLP_HDR_SIZE]; 239 240 if (bus == pcie->root_bus_nr) 241 headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGRD0); 242 else 243 headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGRD1); 244 245 headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN), 246 TLP_READ_TAG, byte_en); 247 headers[2] = TLP_CFG_DW2(bus, devfn, where); 248 249 tlp_write_packet(pcie, headers, 0, false); 250 251 return tlp_read_packet(pcie, value); 252} 253 254static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn, 255 int where, u8 byte_en, u32 value) 256{ 257 u32 headers[TLP_HDR_SIZE]; 258 int ret; 259 260 if (bus == pcie->root_bus_nr) 261 headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGWR0); 262 else 263 headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGWR1); 264 265 headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN), 266 TLP_WRITE_TAG, byte_en); 267 headers[2] = TLP_CFG_DW2(bus, devfn, where); 268 269 /* check alignment to Qword */ 270 if ((where & 0x7) == 0) 271 tlp_write_packet(pcie, headers, value, true); 272 else 273 tlp_write_packet(pcie, headers, value, false); 274 275 ret = tlp_read_packet(pcie, NULL); 276 if (ret != PCIBIOS_SUCCESSFUL) 277 return ret; 278 279 /* 280 * Monitor changes to PCI_PRIMARY_BUS register on root port 281 * and update local copy of root bus number accordingly. 282 */ 283 if ((bus == pcie->root_bus_nr) && (where == PCI_PRIMARY_BUS)) 284 pcie->root_bus_nr = (u8)(value); 285 286 return PCIBIOS_SUCCESSFUL; 287} 288 289static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn, 290 int where, int size, u32 *value) 291{ 292 struct altera_pcie *pcie = bus->sysdata; 293 int ret; 294 u32 data; 295 u8 byte_en; 296 297 if (altera_pcie_hide_rc_bar(bus, devfn, where)) 298 return PCIBIOS_BAD_REGISTER_NUMBER; 299 300 if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn))) { 301 *value = 0xffffffff; 302 return PCIBIOS_DEVICE_NOT_FOUND; 303 } 304 305 switch (size) { 306 case 1: 307 byte_en = 1 << (where & 3); 308 break; 309 case 2: 310 byte_en = 3 << (where & 3); 311 break; 312 default: 313 byte_en = 0xf; 314 break; 315 } 316 317 ret = tlp_cfg_dword_read(pcie, bus->number, devfn, 318 (where & ~DWORD_MASK), byte_en, &data); 319 if (ret != PCIBIOS_SUCCESSFUL) 320 return ret; 321 322 switch (size) { 323 case 1: 324 *value = (data >> (8 * (where & 0x3))) & 0xff; 325 break; 326 case 2: 327 *value = (data >> (8 * (where & 0x2))) & 0xffff; 328 break; 329 default: 330 *value = data; 331 break; 332 } 333 334 return PCIBIOS_SUCCESSFUL; 335} 336 337static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn, 338 int where, int size, u32 value) 339{ 340 struct altera_pcie *pcie = bus->sysdata; 341 u32 data32; 342 u32 shift = 8 * (where & 3); 343 u8 byte_en; 344 345 if (altera_pcie_hide_rc_bar(bus, devfn, where)) 346 return PCIBIOS_BAD_REGISTER_NUMBER; 347 348 if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn))) 349 return PCIBIOS_DEVICE_NOT_FOUND; 350 351 switch (size) { 352 case 1: 353 data32 = (value & 0xff) << shift; 354 byte_en = 1 << (where & 3); 355 break; 356 case 2: 357 data32 = (value & 0xffff) << shift; 358 byte_en = 3 << (where & 3); 359 break; 360 default: 361 data32 = value; 362 byte_en = 0xf; 363 break; 364 } 365 366 return tlp_cfg_dword_write(pcie, bus->number, devfn, 367 (where & ~DWORD_MASK), byte_en, data32); 368} 369 370static struct pci_ops altera_pcie_ops = { 371 .read = altera_pcie_cfg_read, 372 .write = altera_pcie_cfg_write, 373}; 374 375static int altera_pcie_intx_map(struct irq_domain *domain, unsigned int irq, 376 irq_hw_number_t hwirq) 377{ 378 irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); 379 irq_set_chip_data(irq, domain->host_data); 380 381 return 0; 382} 383 384static const struct irq_domain_ops intx_domain_ops = { 385 .map = altera_pcie_intx_map, 386}; 387 388static void altera_pcie_isr(struct irq_desc *desc) 389{ 390 struct irq_chip *chip = irq_desc_get_chip(desc); 391 struct altera_pcie *pcie; 392 unsigned long status; 393 u32 bit; 394 u32 virq; 395 396 chained_irq_enter(chip, desc); 397 pcie = irq_desc_get_handler_data(desc); 398 399 while ((status = cra_readl(pcie, P2A_INT_STATUS) 400 & P2A_INT_STS_ALL) != 0) { 401 for_each_set_bit(bit, &status, INTX_NUM) { 402 /* clear interrupts */ 403 cra_writel(pcie, 1 << bit, P2A_INT_STATUS); 404 405 virq = irq_find_mapping(pcie->irq_domain, bit + 1); 406 if (virq) 407 generic_handle_irq(virq); 408 else 409 dev_err(&pcie->pdev->dev, 410 "unexpected IRQ, INT%d\n", bit); 411 } 412 } 413 414 chained_irq_exit(chip, desc); 415} 416 417static void altera_pcie_release_of_pci_ranges(struct altera_pcie *pcie) 418{ 419 pci_free_resource_list(&pcie->resources); 420} 421 422static int altera_pcie_parse_request_of_pci_ranges(struct altera_pcie *pcie) 423{ 424 int err, res_valid = 0; 425 struct device *dev = &pcie->pdev->dev; 426 struct device_node *np = dev->of_node; 427 struct resource_entry *win; 428 429 err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pcie->resources, 430 NULL); 431 if (err) 432 return err; 433 434 resource_list_for_each_entry(win, &pcie->resources) { 435 struct resource *parent, *res = win->res; 436 437 switch (resource_type(res)) { 438 case IORESOURCE_MEM: 439 parent = &iomem_resource; 440 res_valid |= !(res->flags & IORESOURCE_PREFETCH); 441 break; 442 default: 443 continue; 444 } 445 446 err = devm_request_resource(dev, parent, res); 447 if (err) 448 goto out_release_res; 449 } 450 451 if (!res_valid) { 452 dev_err(dev, "non-prefetchable memory resource required\n"); 453 err = -EINVAL; 454 goto out_release_res; 455 } 456 457 return 0; 458 459out_release_res: 460 altera_pcie_release_of_pci_ranges(pcie); 461 return err; 462} 463 464static int altera_pcie_init_irq_domain(struct altera_pcie *pcie) 465{ 466 struct device *dev = &pcie->pdev->dev; 467 struct device_node *node = dev->of_node; 468 469 /* Setup INTx */ 470 pcie->irq_domain = irq_domain_add_linear(node, INTX_NUM + 1, 471 &intx_domain_ops, pcie); 472 if (!pcie->irq_domain) { 473 dev_err(dev, "Failed to get a INTx IRQ domain\n"); 474 return -ENOMEM; 475 } 476 477 return 0; 478} 479 480static int altera_pcie_parse_dt(struct altera_pcie *pcie) 481{ 482 struct resource *cra; 483 struct platform_device *pdev = pcie->pdev; 484 485 cra = platform_get_resource_byname(pdev, IORESOURCE_MEM, "Cra"); 486 if (!cra) { 487 dev_err(&pdev->dev, "no Cra memory resource defined\n"); 488 return -ENODEV; 489 } 490 491 pcie->cra_base = devm_ioremap_resource(&pdev->dev, cra); 492 if (IS_ERR(pcie->cra_base)) { 493 dev_err(&pdev->dev, "failed to map cra memory\n"); 494 return PTR_ERR(pcie->cra_base); 495 } 496 497 /* setup IRQ */ 498 pcie->irq = platform_get_irq(pdev, 0); 499 if (pcie->irq <= 0) { 500 dev_err(&pdev->dev, "failed to get IRQ: %d\n", pcie->irq); 501 return -EINVAL; 502 } 503 504 irq_set_chained_handler_and_data(pcie->irq, altera_pcie_isr, pcie); 505 506 return 0; 507} 508 509static int altera_pcie_probe(struct platform_device *pdev) 510{ 511 struct altera_pcie *pcie; 512 struct pci_bus *bus; 513 struct pci_bus *child; 514 int ret; 515 516 pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); 517 if (!pcie) 518 return -ENOMEM; 519 520 pcie->pdev = pdev; 521 522 ret = altera_pcie_parse_dt(pcie); 523 if (ret) { 524 dev_err(&pdev->dev, "Parsing DT failed\n"); 525 return ret; 526 } 527 528 INIT_LIST_HEAD(&pcie->resources); 529 530 ret = altera_pcie_parse_request_of_pci_ranges(pcie); 531 if (ret) { 532 dev_err(&pdev->dev, "Failed add resources\n"); 533 return ret; 534 } 535 536 ret = altera_pcie_init_irq_domain(pcie); 537 if (ret) { 538 dev_err(&pdev->dev, "Failed creating IRQ Domain\n"); 539 return ret; 540 } 541 542 /* clear all interrupts */ 543 cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS); 544 /* enable all interrupts */ 545 cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE); 546 547 bus = pci_scan_root_bus(&pdev->dev, pcie->root_bus_nr, &altera_pcie_ops, 548 pcie, &pcie->resources); 549 if (!bus) 550 return -ENOMEM; 551 552 pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); 553 pci_assign_unassigned_bus_resources(bus); 554 555 /* Configure PCI Express setting. */ 556 list_for_each_entry(child, &bus->children, node) 557 pcie_bus_configure_settings(child); 558 559 pci_bus_add_devices(bus); 560 561 platform_set_drvdata(pdev, pcie); 562 return ret; 563} 564 565static const struct of_device_id altera_pcie_of_match[] = { 566 { .compatible = "altr,pcie-root-port-1.0", }, 567 {}, 568}; 569MODULE_DEVICE_TABLE(of, altera_pcie_of_match); 570 571static struct platform_driver altera_pcie_driver = { 572 .probe = altera_pcie_probe, 573 .driver = { 574 .name = "altera-pcie", 575 .of_match_table = altera_pcie_of_match, 576 .suppress_bind_attrs = true, 577 }, 578}; 579 580static int altera_pcie_init(void) 581{ 582 return platform_driver_register(&altera_pcie_driver); 583} 584module_init(altera_pcie_init); 585 586MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>"); 587MODULE_DESCRIPTION("Altera PCIe host controller driver"); 588MODULE_LICENSE("GPL v2"); 589