1/*
2 * PCIe host controller driver for Tegra SoCs
3 *
4 * Copyright (c) 2010, CompuLab, Ltd.
5 * Author: Mike Rapoport <mike@compulab.co.il>
6 *
7 * Based on NVIDIA PCIe driver
8 * Copyright (c) 2008-2009, NVIDIA Corporation.
9 *
10 * Bits taken from arch/arm/mach-dove/pcie.c
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
20 * more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
25 */
26
27#include <linux/clk.h>
28#include <linux/debugfs.h>
29#include <linux/delay.h>
30#include <linux/export.h>
31#include <linux/interrupt.h>
32#include <linux/irq.h>
33#include <linux/irqdomain.h>
34#include <linux/kernel.h>
35#include <linux/module.h>
36#include <linux/msi.h>
37#include <linux/of_address.h>
38#include <linux/of_pci.h>
39#include <linux/of_platform.h>
40#include <linux/pci.h>
41#include <linux/phy/phy.h>
42#include <linux/platform_device.h>
43#include <linux/reset.h>
44#include <linux/sizes.h>
45#include <linux/slab.h>
46#include <linux/vmalloc.h>
47#include <linux/regulator/consumer.h>
48
49#include <soc/tegra/cpuidle.h>
50#include <soc/tegra/pmc.h>
51
52#include <asm/mach/irq.h>
53#include <asm/mach/map.h>
54#include <asm/mach/pci.h>
55
56#define INT_PCI_MSI_NR (8 * 32)
57
58/* register definitions */
59
60#define AFI_AXI_BAR0_SZ	0x00
61#define AFI_AXI_BAR1_SZ	0x04
62#define AFI_AXI_BAR2_SZ	0x08
63#define AFI_AXI_BAR3_SZ	0x0c
64#define AFI_AXI_BAR4_SZ	0x10
65#define AFI_AXI_BAR5_SZ	0x14
66
67#define AFI_AXI_BAR0_START	0x18
68#define AFI_AXI_BAR1_START	0x1c
69#define AFI_AXI_BAR2_START	0x20
70#define AFI_AXI_BAR3_START	0x24
71#define AFI_AXI_BAR4_START	0x28
72#define AFI_AXI_BAR5_START	0x2c
73
74#define AFI_FPCI_BAR0	0x30
75#define AFI_FPCI_BAR1	0x34
76#define AFI_FPCI_BAR2	0x38
77#define AFI_FPCI_BAR3	0x3c
78#define AFI_FPCI_BAR4	0x40
79#define AFI_FPCI_BAR5	0x44
80
81#define AFI_CACHE_BAR0_SZ	0x48
82#define AFI_CACHE_BAR0_ST	0x4c
83#define AFI_CACHE_BAR1_SZ	0x50
84#define AFI_CACHE_BAR1_ST	0x54
85
86#define AFI_MSI_BAR_SZ		0x60
87#define AFI_MSI_FPCI_BAR_ST	0x64
88#define AFI_MSI_AXI_BAR_ST	0x68
89
90#define AFI_MSI_VEC0		0x6c
91#define AFI_MSI_VEC1		0x70
92#define AFI_MSI_VEC2		0x74
93#define AFI_MSI_VEC3		0x78
94#define AFI_MSI_VEC4		0x7c
95#define AFI_MSI_VEC5		0x80
96#define AFI_MSI_VEC6		0x84
97#define AFI_MSI_VEC7		0x88
98
99#define AFI_MSI_EN_VEC0		0x8c
100#define AFI_MSI_EN_VEC1		0x90
101#define AFI_MSI_EN_VEC2		0x94
102#define AFI_MSI_EN_VEC3		0x98
103#define AFI_MSI_EN_VEC4		0x9c
104#define AFI_MSI_EN_VEC5		0xa0
105#define AFI_MSI_EN_VEC6		0xa4
106#define AFI_MSI_EN_VEC7		0xa8
107
108#define AFI_CONFIGURATION		0xac
109#define  AFI_CONFIGURATION_EN_FPCI	(1 << 0)
110
111#define AFI_FPCI_ERROR_MASKS	0xb0
112
113#define AFI_INTR_MASK		0xb4
114#define  AFI_INTR_MASK_INT_MASK	(1 << 0)
115#define  AFI_INTR_MASK_MSI_MASK	(1 << 8)
116
117#define AFI_INTR_CODE			0xb8
118#define  AFI_INTR_CODE_MASK		0xf
119#define  AFI_INTR_INI_SLAVE_ERROR	1
120#define  AFI_INTR_INI_DECODE_ERROR	2
121#define  AFI_INTR_TARGET_ABORT		3
122#define  AFI_INTR_MASTER_ABORT		4
123#define  AFI_INTR_INVALID_WRITE		5
124#define  AFI_INTR_LEGACY		6
125#define  AFI_INTR_FPCI_DECODE_ERROR	7
126#define  AFI_INTR_AXI_DECODE_ERROR	8
127#define  AFI_INTR_FPCI_TIMEOUT		9
128#define  AFI_INTR_PE_PRSNT_SENSE	10
129#define  AFI_INTR_PE_CLKREQ_SENSE	11
130#define  AFI_INTR_CLKCLAMP_SENSE	12
131#define  AFI_INTR_RDY4PD_SENSE		13
132#define  AFI_INTR_P2P_ERROR		14
133
134#define AFI_INTR_SIGNATURE	0xbc
135#define AFI_UPPER_FPCI_ADDRESS	0xc0
136#define AFI_SM_INTR_ENABLE	0xc4
137#define  AFI_SM_INTR_INTA_ASSERT	(1 << 0)
138#define  AFI_SM_INTR_INTB_ASSERT	(1 << 1)
139#define  AFI_SM_INTR_INTC_ASSERT	(1 << 2)
140#define  AFI_SM_INTR_INTD_ASSERT	(1 << 3)
141#define  AFI_SM_INTR_INTA_DEASSERT	(1 << 4)
142#define  AFI_SM_INTR_INTB_DEASSERT	(1 << 5)
143#define  AFI_SM_INTR_INTC_DEASSERT	(1 << 6)
144#define  AFI_SM_INTR_INTD_DEASSERT	(1 << 7)
145
146#define AFI_AFI_INTR_ENABLE		0xc8
147#define  AFI_INTR_EN_INI_SLVERR		(1 << 0)
148#define  AFI_INTR_EN_INI_DECERR		(1 << 1)
149#define  AFI_INTR_EN_TGT_SLVERR		(1 << 2)
150#define  AFI_INTR_EN_TGT_DECERR		(1 << 3)
151#define  AFI_INTR_EN_TGT_WRERR		(1 << 4)
152#define  AFI_INTR_EN_DFPCI_DECERR	(1 << 5)
153#define  AFI_INTR_EN_AXI_DECERR		(1 << 6)
154#define  AFI_INTR_EN_FPCI_TIMEOUT	(1 << 7)
155#define  AFI_INTR_EN_PRSNT_SENSE	(1 << 8)
156
157#define AFI_PCIE_CONFIG					0x0f8
158#define  AFI_PCIE_CONFIG_PCIE_DISABLE(x)		(1 << ((x) + 1))
159#define  AFI_PCIE_CONFIG_PCIE_DISABLE_ALL		0xe
160#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK	(0xf << 20)
161#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE	(0x0 << 20)
162#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420	(0x0 << 20)
163#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1	(0x0 << 20)
164#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL	(0x1 << 20)
165#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222	(0x1 << 20)
166#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1	(0x1 << 20)
167#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411	(0x2 << 20)
168
169#define AFI_FUSE			0x104
170#define  AFI_FUSE_PCIE_T0_GEN2_DIS	(1 << 2)
171
172#define AFI_PEX0_CTRL			0x110
173#define AFI_PEX1_CTRL			0x118
174#define AFI_PEX2_CTRL			0x128
175#define  AFI_PEX_CTRL_RST		(1 << 0)
176#define  AFI_PEX_CTRL_CLKREQ_EN		(1 << 1)
177#define  AFI_PEX_CTRL_REFCLK_EN		(1 << 3)
178#define  AFI_PEX_CTRL_OVERRIDE_EN	(1 << 4)
179
180#define AFI_PLLE_CONTROL		0x160
181#define  AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
182#define  AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
183
184#define AFI_PEXBIAS_CTRL_0		0x168
185
186#define RP_VEND_XP	0x00000F00
187#define  RP_VEND_XP_DL_UP	(1 << 30)
188
189#define RP_PRIV_MISC	0x00000FE0
190#define  RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xE << 0)
191#define  RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xF << 0)
192
193#define RP_LINK_CONTROL_STATUS			0x00000090
194#define  RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE	0x20000000
195#define  RP_LINK_CONTROL_STATUS_LINKSTAT_MASK	0x3fff0000
196
197#define PADS_CTL_SEL		0x0000009C
198
199#define PADS_CTL		0x000000A0
200#define  PADS_CTL_IDDQ_1L	(1 << 0)
201#define  PADS_CTL_TX_DATA_EN_1L	(1 << 6)
202#define  PADS_CTL_RX_DATA_EN_1L	(1 << 10)
203
204#define PADS_PLL_CTL_TEGRA20			0x000000B8
205#define PADS_PLL_CTL_TEGRA30			0x000000B4
206#define  PADS_PLL_CTL_RST_B4SM			(1 << 1)
207#define  PADS_PLL_CTL_LOCKDET			(1 << 8)
208#define  PADS_PLL_CTL_REFCLK_MASK		(0x3 << 16)
209#define  PADS_PLL_CTL_REFCLK_INTERNAL_CML	(0 << 16)
210#define  PADS_PLL_CTL_REFCLK_INTERNAL_CMOS	(1 << 16)
211#define  PADS_PLL_CTL_REFCLK_EXTERNAL		(2 << 16)
212#define  PADS_PLL_CTL_TXCLKREF_MASK		(0x1 << 20)
213#define  PADS_PLL_CTL_TXCLKREF_DIV10		(0 << 20)
214#define  PADS_PLL_CTL_TXCLKREF_DIV5		(1 << 20)
215#define  PADS_PLL_CTL_TXCLKREF_BUF_EN		(1 << 22)
216
217#define PADS_REFCLK_CFG0			0x000000C8
218#define PADS_REFCLK_CFG1			0x000000CC
219#define PADS_REFCLK_BIAS			0x000000D0
220
221/*
222 * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
223 * entries, one entry per PCIe port. These field definitions and desired
224 * values aren't in the TRM, but do come from NVIDIA.
225 */
226#define PADS_REFCLK_CFG_TERM_SHIFT		2  /* 6:2 */
227#define PADS_REFCLK_CFG_E_TERM_SHIFT		7
228#define PADS_REFCLK_CFG_PREDI_SHIFT		8  /* 11:8 */
229#define PADS_REFCLK_CFG_DRVI_SHIFT		12 /* 15:12 */
230
231/* Default value provided by HW engineering is 0xfa5c */
232#define PADS_REFCLK_CFG_VALUE \
233	( \
234		(0x17 << PADS_REFCLK_CFG_TERM_SHIFT)   | \
235		(0    << PADS_REFCLK_CFG_E_TERM_SHIFT) | \
236		(0xa  << PADS_REFCLK_CFG_PREDI_SHIFT)  | \
237		(0xf  << PADS_REFCLK_CFG_DRVI_SHIFT)     \
238	)
239
240struct tegra_msi {
241	struct msi_controller chip;
242	DECLARE_BITMAP(used, INT_PCI_MSI_NR);
243	struct irq_domain *domain;
244	unsigned long pages;
245	struct mutex lock;
246	int irq;
247};
248
249/* used to differentiate between Tegra SoC generations */
250struct tegra_pcie_soc_data {
251	unsigned int num_ports;
252	unsigned int msi_base_shift;
253	u32 pads_pll_ctl;
254	u32 tx_ref_sel;
255	bool has_pex_clkreq_en;
256	bool has_pex_bias_ctrl;
257	bool has_intr_prsnt_sense;
258	bool has_cml_clk;
259	bool has_gen2;
260};
261
262static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip)
263{
264	return container_of(chip, struct tegra_msi, chip);
265}
266
267struct tegra_pcie {
268	struct device *dev;
269
270	void __iomem *pads;
271	void __iomem *afi;
272	int irq;
273
274	struct list_head buses;
275	struct resource *cs;
276
277	struct resource all;
278	struct resource io;
279	struct resource pio;
280	struct resource mem;
281	struct resource prefetch;
282	struct resource busn;
283
284	struct clk *pex_clk;
285	struct clk *afi_clk;
286	struct clk *pll_e;
287	struct clk *cml_clk;
288
289	struct reset_control *pex_rst;
290	struct reset_control *afi_rst;
291	struct reset_control *pcie_xrst;
292
293	struct phy *phy;
294
295	struct tegra_msi msi;
296
297	struct list_head ports;
298	unsigned int num_ports;
299	u32 xbar_config;
300
301	struct regulator_bulk_data *supplies;
302	unsigned int num_supplies;
303
304	const struct tegra_pcie_soc_data *soc_data;
305	struct dentry *debugfs;
306};
307
308struct tegra_pcie_port {
309	struct tegra_pcie *pcie;
310	struct list_head list;
311	struct resource regs;
312	void __iomem *base;
313	unsigned int index;
314	unsigned int lanes;
315};
316
317struct tegra_pcie_bus {
318	struct vm_struct *area;
319	struct list_head list;
320	unsigned int nr;
321};
322
323static inline struct tegra_pcie *sys_to_pcie(struct pci_sys_data *sys)
324{
325	return sys->private_data;
326}
327
328static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
329			      unsigned long offset)
330{
331	writel(value, pcie->afi + offset);
332}
333
334static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
335{
336	return readl(pcie->afi + offset);
337}
338
339static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
340			       unsigned long offset)
341{
342	writel(value, pcie->pads + offset);
343}
344
345static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
346{
347	return readl(pcie->pads + offset);
348}
349
350/*
351 * The configuration space mapping on Tegra is somewhat similar to the ECAM
352 * defined by PCIe. However it deviates a bit in how the 4 bits for extended
353 * register accesses are mapped:
354 *
355 *    [27:24] extended register number
356 *    [23:16] bus number
357 *    [15:11] device number
358 *    [10: 8] function number
359 *    [ 7: 0] register number
360 *
361 * Mapping the whole extended configuration space would require 256 MiB of
362 * virtual address space, only a small part of which will actually be used.
363 * To work around this, a 1 MiB of virtual addresses are allocated per bus
364 * when the bus is first accessed. When the physical range is mapped, the
365 * the bus number bits are hidden so that the extended register number bits
366 * appear as bits [19:16]. Therefore the virtual mapping looks like this:
367 *
368 *    [19:16] extended register number
369 *    [15:11] device number
370 *    [10: 8] function number
371 *    [ 7: 0] register number
372 *
373 * This is achieved by stitching together 16 chunks of 64 KiB of physical
374 * address space via the MMU.
375 */
376static unsigned long tegra_pcie_conf_offset(unsigned int devfn, int where)
377{
378	return ((where & 0xf00) << 8) | (PCI_SLOT(devfn) << 11) |
379	       (PCI_FUNC(devfn) << 8) | (where & 0xfc);
380}
381
382static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
383						   unsigned int busnr)
384{
385	pgprot_t prot = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_XN |
386			L_PTE_MT_DEV_SHARED | L_PTE_SHARED;
387	phys_addr_t cs = pcie->cs->start;
388	struct tegra_pcie_bus *bus;
389	unsigned int i;
390	int err;
391
392	bus = kzalloc(sizeof(*bus), GFP_KERNEL);
393	if (!bus)
394		return ERR_PTR(-ENOMEM);
395
396	INIT_LIST_HEAD(&bus->list);
397	bus->nr = busnr;
398
399	/* allocate 1 MiB of virtual addresses */
400	bus->area = get_vm_area(SZ_1M, VM_IOREMAP);
401	if (!bus->area) {
402		err = -ENOMEM;
403		goto free;
404	}
405
406	/* map each of the 16 chunks of 64 KiB each */
407	for (i = 0; i < 16; i++) {
408		unsigned long virt = (unsigned long)bus->area->addr +
409				     i * SZ_64K;
410		phys_addr_t phys = cs + i * SZ_16M + busnr * SZ_64K;
411
412		err = ioremap_page_range(virt, virt + SZ_64K, phys, prot);
413		if (err < 0) {
414			dev_err(pcie->dev, "ioremap_page_range() failed: %d\n",
415				err);
416			goto unmap;
417		}
418	}
419
420	return bus;
421
422unmap:
423	vunmap(bus->area->addr);
424free:
425	kfree(bus);
426	return ERR_PTR(err);
427}
428
429/*
430 * Look up a virtual address mapping for the specified bus number. If no such
431 * mapping exists, try to create one.
432 */
433static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie,
434					unsigned int busnr)
435{
436	struct tegra_pcie_bus *bus;
437
438	list_for_each_entry(bus, &pcie->buses, list)
439		if (bus->nr == busnr)
440			return (void __iomem *)bus->area->addr;
441
442	bus = tegra_pcie_bus_alloc(pcie, busnr);
443	if (IS_ERR(bus))
444		return NULL;
445
446	list_add_tail(&bus->list, &pcie->buses);
447
448	return (void __iomem *)bus->area->addr;
449}
450
451static void __iomem *tegra_pcie_conf_address(struct pci_bus *bus,
452					     unsigned int devfn,
453					     int where)
454{
455	struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
456	void __iomem *addr = NULL;
457
458	if (bus->number == 0) {
459		unsigned int slot = PCI_SLOT(devfn);
460		struct tegra_pcie_port *port;
461
462		list_for_each_entry(port, &pcie->ports, list) {
463			if (port->index + 1 == slot) {
464				addr = port->base + (where & ~3);
465				break;
466			}
467		}
468	} else {
469		addr = tegra_pcie_bus_map(pcie, bus->number);
470		if (!addr) {
471			dev_err(pcie->dev,
472				"failed to map cfg. space for bus %u\n",
473				bus->number);
474			return NULL;
475		}
476
477		addr += tegra_pcie_conf_offset(devfn, where);
478	}
479
480	return addr;
481}
482
483static struct pci_ops tegra_pcie_ops = {
484	.map_bus = tegra_pcie_conf_address,
485	.read = pci_generic_config_read32,
486	.write = pci_generic_config_write32,
487};
488
489static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
490{
491	unsigned long ret = 0;
492
493	switch (port->index) {
494	case 0:
495		ret = AFI_PEX0_CTRL;
496		break;
497
498	case 1:
499		ret = AFI_PEX1_CTRL;
500		break;
501
502	case 2:
503		ret = AFI_PEX2_CTRL;
504		break;
505	}
506
507	return ret;
508}
509
510static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
511{
512	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
513	unsigned long value;
514
515	/* pulse reset signal */
516	value = afi_readl(port->pcie, ctrl);
517	value &= ~AFI_PEX_CTRL_RST;
518	afi_writel(port->pcie, value, ctrl);
519
520	usleep_range(1000, 2000);
521
522	value = afi_readl(port->pcie, ctrl);
523	value |= AFI_PEX_CTRL_RST;
524	afi_writel(port->pcie, value, ctrl);
525}
526
527static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
528{
529	const struct tegra_pcie_soc_data *soc = port->pcie->soc_data;
530	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
531	unsigned long value;
532
533	/* enable reference clock */
534	value = afi_readl(port->pcie, ctrl);
535	value |= AFI_PEX_CTRL_REFCLK_EN;
536
537	if (soc->has_pex_clkreq_en)
538		value |= AFI_PEX_CTRL_CLKREQ_EN;
539
540	value |= AFI_PEX_CTRL_OVERRIDE_EN;
541
542	afi_writel(port->pcie, value, ctrl);
543
544	tegra_pcie_port_reset(port);
545}
546
547static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
548{
549	const struct tegra_pcie_soc_data *soc = port->pcie->soc_data;
550	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
551	unsigned long value;
552
553	/* assert port reset */
554	value = afi_readl(port->pcie, ctrl);
555	value &= ~AFI_PEX_CTRL_RST;
556	afi_writel(port->pcie, value, ctrl);
557
558	/* disable reference clock */
559	value = afi_readl(port->pcie, ctrl);
560
561	if (soc->has_pex_clkreq_en)
562		value &= ~AFI_PEX_CTRL_CLKREQ_EN;
563
564	value &= ~AFI_PEX_CTRL_REFCLK_EN;
565	afi_writel(port->pcie, value, ctrl);
566}
567
568static void tegra_pcie_port_free(struct tegra_pcie_port *port)
569{
570	struct tegra_pcie *pcie = port->pcie;
571
572	devm_iounmap(pcie->dev, port->base);
573	devm_release_mem_region(pcie->dev, port->regs.start,
574				resource_size(&port->regs));
575	list_del(&port->list);
576	devm_kfree(pcie->dev, port);
577}
578
579/* Tegra PCIE root complex wrongly reports device class */
580static void tegra_pcie_fixup_class(struct pci_dev *dev)
581{
582	dev->class = PCI_CLASS_BRIDGE_PCI << 8;
583}
584DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
585DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
586DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
587DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
588
589/* Tegra PCIE requires relaxed ordering */
590static void tegra_pcie_relax_enable(struct pci_dev *dev)
591{
592	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
593}
594DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
595
596static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
597{
598	struct tegra_pcie *pcie = sys_to_pcie(sys);
599	int err;
600
601	err = devm_request_resource(pcie->dev, &pcie->all, &pcie->mem);
602	if (err < 0)
603		return err;
604
605	err = devm_request_resource(pcie->dev, &pcie->all, &pcie->prefetch);
606	if (err)
607		return err;
608
609	pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
610	pci_add_resource_offset(&sys->resources, &pcie->prefetch,
611				sys->mem_offset);
612	pci_add_resource(&sys->resources, &pcie->busn);
613
614	pci_ioremap_io(pcie->pio.start, pcie->io.start);
615
616	return 1;
617}
618
619static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
620{
621	struct tegra_pcie *pcie = sys_to_pcie(pdev->bus->sysdata);
622	int irq;
623
624	tegra_cpuidle_pcie_irqs_in_use();
625
626	irq = of_irq_parse_and_map_pci(pdev, slot, pin);
627	if (!irq)
628		irq = pcie->irq;
629
630	return irq;
631}
632
633static struct pci_bus *tegra_pcie_scan_bus(int nr, struct pci_sys_data *sys)
634{
635	struct tegra_pcie *pcie = sys_to_pcie(sys);
636	struct pci_bus *bus;
637
638	bus = pci_create_root_bus(pcie->dev, sys->busnr, &tegra_pcie_ops, sys,
639				  &sys->resources);
640	if (!bus)
641		return NULL;
642
643	pci_scan_child_bus(bus);
644
645	return bus;
646}
647
648static irqreturn_t tegra_pcie_isr(int irq, void *arg)
649{
650	const char *err_msg[] = {
651		"Unknown",
652		"AXI slave error",
653		"AXI decode error",
654		"Target abort",
655		"Master abort",
656		"Invalid write",
657		"Legacy interrupt",
658		"Response decoding error",
659		"AXI response decoding error",
660		"Transaction timeout",
661		"Slot present pin change",
662		"Slot clock request change",
663		"TMS clock ramp change",
664		"TMS ready for power down",
665		"Peer2Peer error",
666	};
667	struct tegra_pcie *pcie = arg;
668	u32 code, signature;
669
670	code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
671	signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
672	afi_writel(pcie, 0, AFI_INTR_CODE);
673
674	if (code == AFI_INTR_LEGACY)
675		return IRQ_NONE;
676
677	if (code >= ARRAY_SIZE(err_msg))
678		code = 0;
679
680	/*
681	 * do not pollute kernel log with master abort reports since they
682	 * happen a lot during enumeration
683	 */
684	if (code == AFI_INTR_MASTER_ABORT)
685		dev_dbg(pcie->dev, "%s, signature: %08x\n", err_msg[code],
686			signature);
687	else
688		dev_err(pcie->dev, "%s, signature: %08x\n", err_msg[code],
689			signature);
690
691	if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
692	    code == AFI_INTR_FPCI_DECODE_ERROR) {
693		u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
694		u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
695
696		if (code == AFI_INTR_MASTER_ABORT)
697			dev_dbg(pcie->dev, "  FPCI address: %10llx\n", address);
698		else
699			dev_err(pcie->dev, "  FPCI address: %10llx\n", address);
700	}
701
702	return IRQ_HANDLED;
703}
704
705/*
706 * FPCI map is as follows:
707 * - 0xfdfc000000: I/O space
708 * - 0xfdfe000000: type 0 configuration space
709 * - 0xfdff000000: type 1 configuration space
710 * - 0xfe00000000: type 0 extended configuration space
711 * - 0xfe10000000: type 1 extended configuration space
712 */
713static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
714{
715	u32 fpci_bar, size, axi_address;
716
717	/* Bar 0: type 1 extended configuration space */
718	fpci_bar = 0xfe100000;
719	size = resource_size(pcie->cs);
720	axi_address = pcie->cs->start;
721	afi_writel(pcie, axi_address, AFI_AXI_BAR0_START);
722	afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
723	afi_writel(pcie, fpci_bar, AFI_FPCI_BAR0);
724
725	/* Bar 1: downstream IO bar */
726	fpci_bar = 0xfdfc0000;
727	size = resource_size(&pcie->io);
728	axi_address = pcie->io.start;
729	afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
730	afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
731	afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
732
733	/* Bar 2: prefetchable memory BAR */
734	fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1;
735	size = resource_size(&pcie->prefetch);
736	axi_address = pcie->prefetch.start;
737	afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
738	afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
739	afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
740
741	/* Bar 3: non prefetchable memory BAR */
742	fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1;
743	size = resource_size(&pcie->mem);
744	axi_address = pcie->mem.start;
745	afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
746	afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
747	afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
748
749	/* NULL out the remaining BARs as they are not used */
750	afi_writel(pcie, 0, AFI_AXI_BAR4_START);
751	afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
752	afi_writel(pcie, 0, AFI_FPCI_BAR4);
753
754	afi_writel(pcie, 0, AFI_AXI_BAR5_START);
755	afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
756	afi_writel(pcie, 0, AFI_FPCI_BAR5);
757
758	/* map all upstream transactions as uncached */
759	afi_writel(pcie, PHYS_OFFSET, AFI_CACHE_BAR0_ST);
760	afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
761	afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
762	afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
763
764	/* MSI translations are setup only when needed */
765	afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
766	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
767	afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
768	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
769}
770
771static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
772{
773	const struct tegra_pcie_soc_data *soc = pcie->soc_data;
774	u32 value;
775
776	timeout = jiffies + msecs_to_jiffies(timeout);
777
778	while (time_before(jiffies, timeout)) {
779		value = pads_readl(pcie, soc->pads_pll_ctl);
780		if (value & PADS_PLL_CTL_LOCKDET)
781			return 0;
782	}
783
784	return -ETIMEDOUT;
785}
786
787static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
788{
789	const struct tegra_pcie_soc_data *soc = pcie->soc_data;
790	u32 value;
791	int err;
792
793	/* initialize internal PHY, enable up to 16 PCIE lanes */
794	pads_writel(pcie, 0x0, PADS_CTL_SEL);
795
796	/* override IDDQ to 1 on all 4 lanes */
797	value = pads_readl(pcie, PADS_CTL);
798	value |= PADS_CTL_IDDQ_1L;
799	pads_writel(pcie, value, PADS_CTL);
800
801	/*
802	 * Set up PHY PLL inputs select PLLE output as refclock,
803	 * set TX ref sel to div10 (not div5).
804	 */
805	value = pads_readl(pcie, soc->pads_pll_ctl);
806	value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
807	value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
808	pads_writel(pcie, value, soc->pads_pll_ctl);
809
810	/* reset PLL */
811	value = pads_readl(pcie, soc->pads_pll_ctl);
812	value &= ~PADS_PLL_CTL_RST_B4SM;
813	pads_writel(pcie, value, soc->pads_pll_ctl);
814
815	usleep_range(20, 100);
816
817	/* take PLL out of reset  */
818	value = pads_readl(pcie, soc->pads_pll_ctl);
819	value |= PADS_PLL_CTL_RST_B4SM;
820	pads_writel(pcie, value, soc->pads_pll_ctl);
821
822	/* Configure the reference clock driver */
823	value = PADS_REFCLK_CFG_VALUE | (PADS_REFCLK_CFG_VALUE << 16);
824	pads_writel(pcie, value, PADS_REFCLK_CFG0);
825	if (soc->num_ports > 2)
826		pads_writel(pcie, PADS_REFCLK_CFG_VALUE, PADS_REFCLK_CFG1);
827
828	/* wait for the PLL to lock */
829	err = tegra_pcie_pll_wait(pcie, 500);
830	if (err < 0) {
831		dev_err(pcie->dev, "PLL failed to lock: %d\n", err);
832		return err;
833	}
834
835	/* turn off IDDQ override */
836	value = pads_readl(pcie, PADS_CTL);
837	value &= ~PADS_CTL_IDDQ_1L;
838	pads_writel(pcie, value, PADS_CTL);
839
840	/* enable TX/RX data */
841	value = pads_readl(pcie, PADS_CTL);
842	value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
843	pads_writel(pcie, value, PADS_CTL);
844
845	return 0;
846}
847
848static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
849{
850	const struct tegra_pcie_soc_data *soc = pcie->soc_data;
851	struct tegra_pcie_port *port;
852	unsigned long value;
853	int err;
854
855	/* enable PLL power down */
856	if (pcie->phy) {
857		value = afi_readl(pcie, AFI_PLLE_CONTROL);
858		value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
859		value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
860		afi_writel(pcie, value, AFI_PLLE_CONTROL);
861	}
862
863	/* power down PCIe slot clock bias pad */
864	if (soc->has_pex_bias_ctrl)
865		afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
866
867	/* configure mode and disable all ports */
868	value = afi_readl(pcie, AFI_PCIE_CONFIG);
869	value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
870	value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
871
872	list_for_each_entry(port, &pcie->ports, list)
873		value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
874
875	afi_writel(pcie, value, AFI_PCIE_CONFIG);
876
877	if (soc->has_gen2) {
878		value = afi_readl(pcie, AFI_FUSE);
879		value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
880		afi_writel(pcie, value, AFI_FUSE);
881	} else {
882		value = afi_readl(pcie, AFI_FUSE);
883		value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
884		afi_writel(pcie, value, AFI_FUSE);
885	}
886
887	if (!pcie->phy)
888		err = tegra_pcie_phy_enable(pcie);
889	else
890		err = phy_power_on(pcie->phy);
891
892	if (err < 0) {
893		dev_err(pcie->dev, "failed to power on PHY: %d\n", err);
894		return err;
895	}
896
897	/* take the PCIe interface module out of reset */
898	reset_control_deassert(pcie->pcie_xrst);
899
900	/* finally enable PCIe */
901	value = afi_readl(pcie, AFI_CONFIGURATION);
902	value |= AFI_CONFIGURATION_EN_FPCI;
903	afi_writel(pcie, value, AFI_CONFIGURATION);
904
905	value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
906		AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
907		AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
908
909	if (soc->has_intr_prsnt_sense)
910		value |= AFI_INTR_EN_PRSNT_SENSE;
911
912	afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
913	afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
914
915	/* don't enable MSI for now, only when needed */
916	afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
917
918	/* disable all exceptions */
919	afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
920
921	return 0;
922}
923
924static void tegra_pcie_power_off(struct tegra_pcie *pcie)
925{
926	int err;
927
928	/* TODO: disable and unprepare clocks? */
929
930	err = phy_power_off(pcie->phy);
931	if (err < 0)
932		dev_warn(pcie->dev, "failed to power off PHY: %d\n", err);
933
934	reset_control_assert(pcie->pcie_xrst);
935	reset_control_assert(pcie->afi_rst);
936	reset_control_assert(pcie->pex_rst);
937
938	tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
939
940	err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
941	if (err < 0)
942		dev_warn(pcie->dev, "failed to disable regulators: %d\n", err);
943}
944
945static int tegra_pcie_power_on(struct tegra_pcie *pcie)
946{
947	const struct tegra_pcie_soc_data *soc = pcie->soc_data;
948	int err;
949
950	reset_control_assert(pcie->pcie_xrst);
951	reset_control_assert(pcie->afi_rst);
952	reset_control_assert(pcie->pex_rst);
953
954	tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
955
956	/* enable regulators */
957	err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
958	if (err < 0)
959		dev_err(pcie->dev, "failed to enable regulators: %d\n", err);
960
961	err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
962						pcie->pex_clk,
963						pcie->pex_rst);
964	if (err) {
965		dev_err(pcie->dev, "powerup sequence failed: %d\n", err);
966		return err;
967	}
968
969	reset_control_deassert(pcie->afi_rst);
970
971	err = clk_prepare_enable(pcie->afi_clk);
972	if (err < 0) {
973		dev_err(pcie->dev, "failed to enable AFI clock: %d\n", err);
974		return err;
975	}
976
977	if (soc->has_cml_clk) {
978		err = clk_prepare_enable(pcie->cml_clk);
979		if (err < 0) {
980			dev_err(pcie->dev, "failed to enable CML clock: %d\n",
981				err);
982			return err;
983		}
984	}
985
986	err = clk_prepare_enable(pcie->pll_e);
987	if (err < 0) {
988		dev_err(pcie->dev, "failed to enable PLLE clock: %d\n", err);
989		return err;
990	}
991
992	return 0;
993}
994
995static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
996{
997	const struct tegra_pcie_soc_data *soc = pcie->soc_data;
998
999	pcie->pex_clk = devm_clk_get(pcie->dev, "pex");
1000	if (IS_ERR(pcie->pex_clk))
1001		return PTR_ERR(pcie->pex_clk);
1002
1003	pcie->afi_clk = devm_clk_get(pcie->dev, "afi");
1004	if (IS_ERR(pcie->afi_clk))
1005		return PTR_ERR(pcie->afi_clk);
1006
1007	pcie->pll_e = devm_clk_get(pcie->dev, "pll_e");
1008	if (IS_ERR(pcie->pll_e))
1009		return PTR_ERR(pcie->pll_e);
1010
1011	if (soc->has_cml_clk) {
1012		pcie->cml_clk = devm_clk_get(pcie->dev, "cml");
1013		if (IS_ERR(pcie->cml_clk))
1014			return PTR_ERR(pcie->cml_clk);
1015	}
1016
1017	return 0;
1018}
1019
1020static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
1021{
1022	pcie->pex_rst = devm_reset_control_get(pcie->dev, "pex");
1023	if (IS_ERR(pcie->pex_rst))
1024		return PTR_ERR(pcie->pex_rst);
1025
1026	pcie->afi_rst = devm_reset_control_get(pcie->dev, "afi");
1027	if (IS_ERR(pcie->afi_rst))
1028		return PTR_ERR(pcie->afi_rst);
1029
1030	pcie->pcie_xrst = devm_reset_control_get(pcie->dev, "pcie_x");
1031	if (IS_ERR(pcie->pcie_xrst))
1032		return PTR_ERR(pcie->pcie_xrst);
1033
1034	return 0;
1035}
1036
1037static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1038{
1039	struct platform_device *pdev = to_platform_device(pcie->dev);
1040	struct resource *pads, *afi, *res;
1041	int err;
1042
1043	err = tegra_pcie_clocks_get(pcie);
1044	if (err) {
1045		dev_err(&pdev->dev, "failed to get clocks: %d\n", err);
1046		return err;
1047	}
1048
1049	err = tegra_pcie_resets_get(pcie);
1050	if (err) {
1051		dev_err(&pdev->dev, "failed to get resets: %d\n", err);
1052		return err;
1053	}
1054
1055	pcie->phy = devm_phy_optional_get(pcie->dev, "pcie");
1056	if (IS_ERR(pcie->phy)) {
1057		err = PTR_ERR(pcie->phy);
1058		dev_err(&pdev->dev, "failed to get PHY: %d\n", err);
1059		return err;
1060	}
1061
1062	err = phy_init(pcie->phy);
1063	if (err < 0) {
1064		dev_err(&pdev->dev, "failed to initialize PHY: %d\n", err);
1065		return err;
1066	}
1067
1068	err = tegra_pcie_power_on(pcie);
1069	if (err) {
1070		dev_err(&pdev->dev, "failed to power up: %d\n", err);
1071		return err;
1072	}
1073
1074	pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
1075	pcie->pads = devm_ioremap_resource(&pdev->dev, pads);
1076	if (IS_ERR(pcie->pads)) {
1077		err = PTR_ERR(pcie->pads);
1078		goto poweroff;
1079	}
1080
1081	afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
1082	pcie->afi = devm_ioremap_resource(&pdev->dev, afi);
1083	if (IS_ERR(pcie->afi)) {
1084		err = PTR_ERR(pcie->afi);
1085		goto poweroff;
1086	}
1087
1088	/* request configuration space, but remap later, on demand */
1089	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
1090	if (!res) {
1091		err = -EADDRNOTAVAIL;
1092		goto poweroff;
1093	}
1094
1095	pcie->cs = devm_request_mem_region(pcie->dev, res->start,
1096					   resource_size(res), res->name);
1097	if (!pcie->cs) {
1098		err = -EADDRNOTAVAIL;
1099		goto poweroff;
1100	}
1101
1102	/* request interrupt */
1103	err = platform_get_irq_byname(pdev, "intr");
1104	if (err < 0) {
1105		dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
1106		goto poweroff;
1107	}
1108
1109	pcie->irq = err;
1110
1111	err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
1112	if (err) {
1113		dev_err(&pdev->dev, "failed to register IRQ: %d\n", err);
1114		goto poweroff;
1115	}
1116
1117	return 0;
1118
1119poweroff:
1120	tegra_pcie_power_off(pcie);
1121	return err;
1122}
1123
1124static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1125{
1126	int err;
1127
1128	if (pcie->irq > 0)
1129		free_irq(pcie->irq, pcie);
1130
1131	tegra_pcie_power_off(pcie);
1132
1133	err = phy_exit(pcie->phy);
1134	if (err < 0)
1135		dev_err(pcie->dev, "failed to teardown PHY: %d\n", err);
1136
1137	return 0;
1138}
1139
1140static int tegra_msi_alloc(struct tegra_msi *chip)
1141{
1142	int msi;
1143
1144	mutex_lock(&chip->lock);
1145
1146	msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
1147	if (msi < INT_PCI_MSI_NR)
1148		set_bit(msi, chip->used);
1149	else
1150		msi = -ENOSPC;
1151
1152	mutex_unlock(&chip->lock);
1153
1154	return msi;
1155}
1156
1157static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
1158{
1159	struct device *dev = chip->chip.dev;
1160
1161	mutex_lock(&chip->lock);
1162
1163	if (!test_bit(irq, chip->used))
1164		dev_err(dev, "trying to free unused MSI#%lu\n", irq);
1165	else
1166		clear_bit(irq, chip->used);
1167
1168	mutex_unlock(&chip->lock);
1169}
1170
1171static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
1172{
1173	struct tegra_pcie *pcie = data;
1174	struct tegra_msi *msi = &pcie->msi;
1175	unsigned int i, processed = 0;
1176
1177	for (i = 0; i < 8; i++) {
1178		unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1179
1180		while (reg) {
1181			unsigned int offset = find_first_bit(&reg, 32);
1182			unsigned int index = i * 32 + offset;
1183			unsigned int irq;
1184
1185			/* clear the interrupt */
1186			afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4);
1187
1188			irq = irq_find_mapping(msi->domain, index);
1189			if (irq) {
1190				if (test_bit(index, msi->used))
1191					generic_handle_irq(irq);
1192				else
1193					dev_info(pcie->dev, "unhandled MSI\n");
1194			} else {
1195				/*
1196				 * that's weird who triggered this?
1197				 * just clear it
1198				 */
1199				dev_info(pcie->dev, "unexpected MSI\n");
1200			}
1201
1202			/* see if there's any more pending in this vector */
1203			reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1204
1205			processed++;
1206		}
1207	}
1208
1209	return processed > 0 ? IRQ_HANDLED : IRQ_NONE;
1210}
1211
1212static int tegra_msi_setup_irq(struct msi_controller *chip,
1213			       struct pci_dev *pdev, struct msi_desc *desc)
1214{
1215	struct tegra_msi *msi = to_tegra_msi(chip);
1216	struct msi_msg msg;
1217	unsigned int irq;
1218	int hwirq;
1219
1220	hwirq = tegra_msi_alloc(msi);
1221	if (hwirq < 0)
1222		return hwirq;
1223
1224	irq = irq_create_mapping(msi->domain, hwirq);
1225	if (!irq) {
1226		tegra_msi_free(msi, hwirq);
1227		return -EINVAL;
1228	}
1229
1230	irq_set_msi_desc(irq, desc);
1231
1232	msg.address_lo = virt_to_phys((void *)msi->pages);
1233	/* 32 bit address only */
1234	msg.address_hi = 0;
1235	msg.data = hwirq;
1236
1237	pci_write_msi_msg(irq, &msg);
1238
1239	return 0;
1240}
1241
1242static void tegra_msi_teardown_irq(struct msi_controller *chip,
1243				   unsigned int irq)
1244{
1245	struct tegra_msi *msi = to_tegra_msi(chip);
1246	struct irq_data *d = irq_get_irq_data(irq);
1247	irq_hw_number_t hwirq = irqd_to_hwirq(d);
1248
1249	irq_dispose_mapping(irq);
1250	tegra_msi_free(msi, hwirq);
1251}
1252
1253static struct irq_chip tegra_msi_irq_chip = {
1254	.name = "Tegra PCIe MSI",
1255	.irq_enable = pci_msi_unmask_irq,
1256	.irq_disable = pci_msi_mask_irq,
1257	.irq_mask = pci_msi_mask_irq,
1258	.irq_unmask = pci_msi_unmask_irq,
1259};
1260
1261static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
1262			 irq_hw_number_t hwirq)
1263{
1264	irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
1265	irq_set_chip_data(irq, domain->host_data);
1266	set_irq_flags(irq, IRQF_VALID);
1267
1268	tegra_cpuidle_pcie_irqs_in_use();
1269
1270	return 0;
1271}
1272
1273static const struct irq_domain_ops msi_domain_ops = {
1274	.map = tegra_msi_map,
1275};
1276
1277static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1278{
1279	struct platform_device *pdev = to_platform_device(pcie->dev);
1280	const struct tegra_pcie_soc_data *soc = pcie->soc_data;
1281	struct tegra_msi *msi = &pcie->msi;
1282	unsigned long base;
1283	int err;
1284	u32 reg;
1285
1286	mutex_init(&msi->lock);
1287
1288	msi->chip.dev = pcie->dev;
1289	msi->chip.setup_irq = tegra_msi_setup_irq;
1290	msi->chip.teardown_irq = tegra_msi_teardown_irq;
1291
1292	msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
1293					    &msi_domain_ops, &msi->chip);
1294	if (!msi->domain) {
1295		dev_err(&pdev->dev, "failed to create IRQ domain\n");
1296		return -ENOMEM;
1297	}
1298
1299	err = platform_get_irq_byname(pdev, "msi");
1300	if (err < 0) {
1301		dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
1302		goto err;
1303	}
1304
1305	msi->irq = err;
1306
1307	err = request_irq(msi->irq, tegra_pcie_msi_irq, IRQF_NO_THREAD,
1308			  tegra_msi_irq_chip.name, pcie);
1309	if (err < 0) {
1310		dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
1311		goto err;
1312	}
1313
1314	/* setup AFI/FPCI range */
1315	msi->pages = __get_free_pages(GFP_KERNEL, 0);
1316	base = virt_to_phys((void *)msi->pages);
1317
1318	afi_writel(pcie, base >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
1319	afi_writel(pcie, base, AFI_MSI_AXI_BAR_ST);
1320	/* this register is in 4K increments */
1321	afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
1322
1323	/* enable all MSI vectors */
1324	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0);
1325	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1);
1326	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2);
1327	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3);
1328	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4);
1329	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5);
1330	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6);
1331	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7);
1332
1333	/* and unmask the MSI interrupt */
1334	reg = afi_readl(pcie, AFI_INTR_MASK);
1335	reg |= AFI_INTR_MASK_MSI_MASK;
1336	afi_writel(pcie, reg, AFI_INTR_MASK);
1337
1338	return 0;
1339
1340err:
1341	irq_domain_remove(msi->domain);
1342	return err;
1343}
1344
1345static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1346{
1347	struct tegra_msi *msi = &pcie->msi;
1348	unsigned int i, irq;
1349	u32 value;
1350
1351	/* mask the MSI interrupt */
1352	value = afi_readl(pcie, AFI_INTR_MASK);
1353	value &= ~AFI_INTR_MASK_MSI_MASK;
1354	afi_writel(pcie, value, AFI_INTR_MASK);
1355
1356	/* disable all MSI vectors */
1357	afi_writel(pcie, 0, AFI_MSI_EN_VEC0);
1358	afi_writel(pcie, 0, AFI_MSI_EN_VEC1);
1359	afi_writel(pcie, 0, AFI_MSI_EN_VEC2);
1360	afi_writel(pcie, 0, AFI_MSI_EN_VEC3);
1361	afi_writel(pcie, 0, AFI_MSI_EN_VEC4);
1362	afi_writel(pcie, 0, AFI_MSI_EN_VEC5);
1363	afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
1364	afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
1365
1366	free_pages(msi->pages, 0);
1367
1368	if (msi->irq > 0)
1369		free_irq(msi->irq, pcie);
1370
1371	for (i = 0; i < INT_PCI_MSI_NR; i++) {
1372		irq = irq_find_mapping(msi->domain, i);
1373		if (irq > 0)
1374			irq_dispose_mapping(irq);
1375	}
1376
1377	irq_domain_remove(msi->domain);
1378
1379	return 0;
1380}
1381
1382static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1383				      u32 *xbar)
1384{
1385	struct device_node *np = pcie->dev->of_node;
1386
1387	if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
1388		switch (lanes) {
1389		case 0x0000104:
1390			dev_info(pcie->dev, "4x1, 1x1 configuration\n");
1391			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
1392			return 0;
1393
1394		case 0x0000102:
1395			dev_info(pcie->dev, "2x1, 1x1 configuration\n");
1396			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
1397			return 0;
1398		}
1399	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1400		switch (lanes) {
1401		case 0x00000204:
1402			dev_info(pcie->dev, "4x1, 2x1 configuration\n");
1403			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
1404			return 0;
1405
1406		case 0x00020202:
1407			dev_info(pcie->dev, "2x3 configuration\n");
1408			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
1409			return 0;
1410
1411		case 0x00010104:
1412			dev_info(pcie->dev, "4x1, 1x2 configuration\n");
1413			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
1414			return 0;
1415		}
1416	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1417		switch (lanes) {
1418		case 0x00000004:
1419			dev_info(pcie->dev, "single-mode configuration\n");
1420			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
1421			return 0;
1422
1423		case 0x00000202:
1424			dev_info(pcie->dev, "dual-mode configuration\n");
1425			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
1426			return 0;
1427		}
1428	}
1429
1430	return -EINVAL;
1431}
1432
1433/*
1434 * Check whether a given set of supplies is available in a device tree node.
1435 * This is used to check whether the new or the legacy device tree bindings
1436 * should be used.
1437 */
1438static bool of_regulator_bulk_available(struct device_node *np,
1439					struct regulator_bulk_data *supplies,
1440					unsigned int num_supplies)
1441{
1442	char property[32];
1443	unsigned int i;
1444
1445	for (i = 0; i < num_supplies; i++) {
1446		snprintf(property, 32, "%s-supply", supplies[i].supply);
1447
1448		if (of_find_property(np, property, NULL) == NULL)
1449			return false;
1450	}
1451
1452	return true;
1453}
1454
1455/*
1456 * Old versions of the device tree binding for this device used a set of power
1457 * supplies that didn't match the hardware inputs. This happened to work for a
1458 * number of cases but is not future proof. However to preserve backwards-
1459 * compatibility with old device trees, this function will try to use the old
1460 * set of supplies.
1461 */
1462static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
1463{
1464	struct device_node *np = pcie->dev->of_node;
1465
1466	if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
1467		pcie->num_supplies = 3;
1468	else if (of_device_is_compatible(np, "nvidia,tegra20-pcie"))
1469		pcie->num_supplies = 2;
1470
1471	if (pcie->num_supplies == 0) {
1472		dev_err(pcie->dev, "device %s not supported in legacy mode\n",
1473			np->full_name);
1474		return -ENODEV;
1475	}
1476
1477	pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1478				      sizeof(*pcie->supplies),
1479				      GFP_KERNEL);
1480	if (!pcie->supplies)
1481		return -ENOMEM;
1482
1483	pcie->supplies[0].supply = "pex-clk";
1484	pcie->supplies[1].supply = "vdd";
1485
1486	if (pcie->num_supplies > 2)
1487		pcie->supplies[2].supply = "avdd";
1488
1489	return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies,
1490				       pcie->supplies);
1491}
1492
1493/*
1494 * Obtains the list of regulators required for a particular generation of the
1495 * IP block.
1496 *
1497 * This would've been nice to do simply by providing static tables for use
1498 * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky
1499 * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB)
1500 * and either seems to be optional depending on which ports are being used.
1501 */
1502static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
1503{
1504	struct device_node *np = pcie->dev->of_node;
1505	unsigned int i = 0;
1506
1507	if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
1508		pcie->num_supplies = 7;
1509
1510		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1511					      sizeof(*pcie->supplies),
1512					      GFP_KERNEL);
1513		if (!pcie->supplies)
1514			return -ENOMEM;
1515
1516		pcie->supplies[i++].supply = "avddio-pex";
1517		pcie->supplies[i++].supply = "dvddio-pex";
1518		pcie->supplies[i++].supply = "avdd-pex-pll";
1519		pcie->supplies[i++].supply = "hvdd-pex";
1520		pcie->supplies[i++].supply = "hvdd-pex-pll-e";
1521		pcie->supplies[i++].supply = "vddio-pex-ctl";
1522		pcie->supplies[i++].supply = "avdd-pll-erefe";
1523	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1524		bool need_pexa = false, need_pexb = false;
1525
1526		/* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
1527		if (lane_mask & 0x0f)
1528			need_pexa = true;
1529
1530		/* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */
1531		if (lane_mask & 0x30)
1532			need_pexb = true;
1533
1534		pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
1535					 (need_pexb ? 2 : 0);
1536
1537		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1538					      sizeof(*pcie->supplies),
1539					      GFP_KERNEL);
1540		if (!pcie->supplies)
1541			return -ENOMEM;
1542
1543		pcie->supplies[i++].supply = "avdd-pex-pll";
1544		pcie->supplies[i++].supply = "hvdd-pex";
1545		pcie->supplies[i++].supply = "vddio-pex-ctl";
1546		pcie->supplies[i++].supply = "avdd-plle";
1547
1548		if (need_pexa) {
1549			pcie->supplies[i++].supply = "avdd-pexa";
1550			pcie->supplies[i++].supply = "vdd-pexa";
1551		}
1552
1553		if (need_pexb) {
1554			pcie->supplies[i++].supply = "avdd-pexb";
1555			pcie->supplies[i++].supply = "vdd-pexb";
1556		}
1557	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1558		pcie->num_supplies = 5;
1559
1560		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1561					      sizeof(*pcie->supplies),
1562					      GFP_KERNEL);
1563		if (!pcie->supplies)
1564			return -ENOMEM;
1565
1566		pcie->supplies[0].supply = "avdd-pex";
1567		pcie->supplies[1].supply = "vdd-pex";
1568		pcie->supplies[2].supply = "avdd-pex-pll";
1569		pcie->supplies[3].supply = "avdd-plle";
1570		pcie->supplies[4].supply = "vddio-pex-clk";
1571	}
1572
1573	if (of_regulator_bulk_available(pcie->dev->of_node, pcie->supplies,
1574					pcie->num_supplies))
1575		return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies,
1576					       pcie->supplies);
1577
1578	/*
1579	 * If not all regulators are available for this new scheme, assume
1580	 * that the device tree complies with an older version of the device
1581	 * tree binding.
1582	 */
1583	dev_info(pcie->dev, "using legacy DT binding for power supplies\n");
1584
1585	devm_kfree(pcie->dev, pcie->supplies);
1586	pcie->num_supplies = 0;
1587
1588	return tegra_pcie_get_legacy_regulators(pcie);
1589}
1590
1591static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
1592{
1593	const struct tegra_pcie_soc_data *soc = pcie->soc_data;
1594	struct device_node *np = pcie->dev->of_node, *port;
1595	struct of_pci_range_parser parser;
1596	struct of_pci_range range;
1597	u32 lanes = 0, mask = 0;
1598	unsigned int lane = 0;
1599	struct resource res;
1600	int err;
1601
1602	memset(&pcie->all, 0, sizeof(pcie->all));
1603	pcie->all.flags = IORESOURCE_MEM;
1604	pcie->all.name = np->full_name;
1605	pcie->all.start = ~0;
1606	pcie->all.end = 0;
1607
1608	if (of_pci_range_parser_init(&parser, np)) {
1609		dev_err(pcie->dev, "missing \"ranges\" property\n");
1610		return -EINVAL;
1611	}
1612
1613	for_each_of_pci_range(&parser, &range) {
1614		err = of_pci_range_to_resource(&range, np, &res);
1615		if (err < 0)
1616			return err;
1617
1618		switch (res.flags & IORESOURCE_TYPE_BITS) {
1619		case IORESOURCE_IO:
1620			memcpy(&pcie->pio, &res, sizeof(res));
1621			pcie->pio.name = np->full_name;
1622
1623			/*
1624			 * The Tegra PCIe host bridge uses this to program the
1625			 * mapping of the I/O space to the physical address,
1626			 * so we override the .start and .end fields here that
1627			 * of_pci_range_to_resource() converted to I/O space.
1628			 * We also set the IORESOURCE_MEM type to clarify that
1629			 * the resource is in the physical memory space.
1630			 */
1631			pcie->io.start = range.cpu_addr;
1632			pcie->io.end = range.cpu_addr + range.size - 1;
1633			pcie->io.flags = IORESOURCE_MEM;
1634			pcie->io.name = "I/O";
1635
1636			memcpy(&res, &pcie->io, sizeof(res));
1637			break;
1638
1639		case IORESOURCE_MEM:
1640			if (res.flags & IORESOURCE_PREFETCH) {
1641				memcpy(&pcie->prefetch, &res, sizeof(res));
1642				pcie->prefetch.name = "prefetchable";
1643			} else {
1644				memcpy(&pcie->mem, &res, sizeof(res));
1645				pcie->mem.name = "non-prefetchable";
1646			}
1647			break;
1648		}
1649
1650		if (res.start <= pcie->all.start)
1651			pcie->all.start = res.start;
1652
1653		if (res.end >= pcie->all.end)
1654			pcie->all.end = res.end;
1655	}
1656
1657	err = devm_request_resource(pcie->dev, &iomem_resource, &pcie->all);
1658	if (err < 0)
1659		return err;
1660
1661	err = of_pci_parse_bus_range(np, &pcie->busn);
1662	if (err < 0) {
1663		dev_err(pcie->dev, "failed to parse ranges property: %d\n",
1664			err);
1665		pcie->busn.name = np->name;
1666		pcie->busn.start = 0;
1667		pcie->busn.end = 0xff;
1668		pcie->busn.flags = IORESOURCE_BUS;
1669	}
1670
1671	/* parse root ports */
1672	for_each_child_of_node(np, port) {
1673		struct tegra_pcie_port *rp;
1674		unsigned int index;
1675		u32 value;
1676
1677		err = of_pci_get_devfn(port);
1678		if (err < 0) {
1679			dev_err(pcie->dev, "failed to parse address: %d\n",
1680				err);
1681			return err;
1682		}
1683
1684		index = PCI_SLOT(err);
1685
1686		if (index < 1 || index > soc->num_ports) {
1687			dev_err(pcie->dev, "invalid port number: %d\n", index);
1688			return -EINVAL;
1689		}
1690
1691		index--;
1692
1693		err = of_property_read_u32(port, "nvidia,num-lanes", &value);
1694		if (err < 0) {
1695			dev_err(pcie->dev, "failed to parse # of lanes: %d\n",
1696				err);
1697			return err;
1698		}
1699
1700		if (value > 16) {
1701			dev_err(pcie->dev, "invalid # of lanes: %u\n", value);
1702			return -EINVAL;
1703		}
1704
1705		lanes |= value << (index << 3);
1706
1707		if (!of_device_is_available(port)) {
1708			lane += value;
1709			continue;
1710		}
1711
1712		mask |= ((1 << value) - 1) << lane;
1713		lane += value;
1714
1715		rp = devm_kzalloc(pcie->dev, sizeof(*rp), GFP_KERNEL);
1716		if (!rp)
1717			return -ENOMEM;
1718
1719		err = of_address_to_resource(port, 0, &rp->regs);
1720		if (err < 0) {
1721			dev_err(pcie->dev, "failed to parse address: %d\n",
1722				err);
1723			return err;
1724		}
1725
1726		INIT_LIST_HEAD(&rp->list);
1727		rp->index = index;
1728		rp->lanes = value;
1729		rp->pcie = pcie;
1730
1731		rp->base = devm_ioremap_resource(pcie->dev, &rp->regs);
1732		if (IS_ERR(rp->base))
1733			return PTR_ERR(rp->base);
1734
1735		list_add_tail(&rp->list, &pcie->ports);
1736	}
1737
1738	err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
1739	if (err < 0) {
1740		dev_err(pcie->dev, "invalid lane configuration\n");
1741		return err;
1742	}
1743
1744	err = tegra_pcie_get_regulators(pcie, mask);
1745	if (err < 0)
1746		return err;
1747
1748	return 0;
1749}
1750
1751/*
1752 * FIXME: If there are no PCIe cards attached, then calling this function
1753 * can result in the increase of the bootup time as there are big timeout
1754 * loops.
1755 */
1756#define TEGRA_PCIE_LINKUP_TIMEOUT	200	/* up to 1.2 seconds */
1757static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
1758{
1759	unsigned int retries = 3;
1760	unsigned long value;
1761
1762	/* override presence detection */
1763	value = readl(port->base + RP_PRIV_MISC);
1764	value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
1765	value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
1766	writel(value, port->base + RP_PRIV_MISC);
1767
1768	do {
1769		unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1770
1771		do {
1772			value = readl(port->base + RP_VEND_XP);
1773
1774			if (value & RP_VEND_XP_DL_UP)
1775				break;
1776
1777			usleep_range(1000, 2000);
1778		} while (--timeout);
1779
1780		if (!timeout) {
1781			dev_err(port->pcie->dev, "link %u down, retrying\n",
1782				port->index);
1783			goto retry;
1784		}
1785
1786		timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1787
1788		do {
1789			value = readl(port->base + RP_LINK_CONTROL_STATUS);
1790
1791			if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
1792				return true;
1793
1794			usleep_range(1000, 2000);
1795		} while (--timeout);
1796
1797retry:
1798		tegra_pcie_port_reset(port);
1799	} while (--retries);
1800
1801	return false;
1802}
1803
1804static int tegra_pcie_enable(struct tegra_pcie *pcie)
1805{
1806	struct tegra_pcie_port *port, *tmp;
1807	struct hw_pci hw;
1808
1809	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
1810		dev_info(pcie->dev, "probing port %u, using %u lanes\n",
1811			 port->index, port->lanes);
1812
1813		tegra_pcie_port_enable(port);
1814
1815		if (tegra_pcie_port_check_link(port))
1816			continue;
1817
1818		dev_info(pcie->dev, "link %u down, ignoring\n", port->index);
1819
1820		tegra_pcie_port_disable(port);
1821		tegra_pcie_port_free(port);
1822	}
1823
1824	memset(&hw, 0, sizeof(hw));
1825
1826#ifdef CONFIG_PCI_MSI
1827	hw.msi_ctrl = &pcie->msi.chip;
1828#endif
1829
1830	hw.nr_controllers = 1;
1831	hw.private_data = (void **)&pcie;
1832	hw.setup = tegra_pcie_setup;
1833	hw.map_irq = tegra_pcie_map_irq;
1834	hw.scan = tegra_pcie_scan_bus;
1835	hw.ops = &tegra_pcie_ops;
1836
1837	pci_common_init_dev(pcie->dev, &hw);
1838
1839	return 0;
1840}
1841
1842static const struct tegra_pcie_soc_data tegra20_pcie_data = {
1843	.num_ports = 2,
1844	.msi_base_shift = 0,
1845	.pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
1846	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
1847	.has_pex_clkreq_en = false,
1848	.has_pex_bias_ctrl = false,
1849	.has_intr_prsnt_sense = false,
1850	.has_cml_clk = false,
1851	.has_gen2 = false,
1852};
1853
1854static const struct tegra_pcie_soc_data tegra30_pcie_data = {
1855	.num_ports = 3,
1856	.msi_base_shift = 8,
1857	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
1858	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
1859	.has_pex_clkreq_en = true,
1860	.has_pex_bias_ctrl = true,
1861	.has_intr_prsnt_sense = true,
1862	.has_cml_clk = true,
1863	.has_gen2 = false,
1864};
1865
1866static const struct tegra_pcie_soc_data tegra124_pcie_data = {
1867	.num_ports = 2,
1868	.msi_base_shift = 8,
1869	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
1870	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
1871	.has_pex_clkreq_en = true,
1872	.has_pex_bias_ctrl = true,
1873	.has_intr_prsnt_sense = true,
1874	.has_cml_clk = true,
1875	.has_gen2 = true,
1876};
1877
1878static const struct of_device_id tegra_pcie_of_match[] = {
1879	{ .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie_data },
1880	{ .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie_data },
1881	{ .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie_data },
1882	{ },
1883};
1884MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
1885
1886static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
1887{
1888	struct tegra_pcie *pcie = s->private;
1889
1890	if (list_empty(&pcie->ports))
1891		return NULL;
1892
1893	seq_printf(s, "Index  Status\n");
1894
1895	return seq_list_start(&pcie->ports, *pos);
1896}
1897
1898static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
1899{
1900	struct tegra_pcie *pcie = s->private;
1901
1902	return seq_list_next(v, &pcie->ports, pos);
1903}
1904
1905static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
1906{
1907}
1908
1909static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
1910{
1911	bool up = false, active = false;
1912	struct tegra_pcie_port *port;
1913	unsigned int value;
1914
1915	port = list_entry(v, struct tegra_pcie_port, list);
1916
1917	value = readl(port->base + RP_VEND_XP);
1918
1919	if (value & RP_VEND_XP_DL_UP)
1920		up = true;
1921
1922	value = readl(port->base + RP_LINK_CONTROL_STATUS);
1923
1924	if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
1925		active = true;
1926
1927	seq_printf(s, "%2u     ", port->index);
1928
1929	if (up)
1930		seq_printf(s, "up");
1931
1932	if (active) {
1933		if (up)
1934			seq_printf(s, ", ");
1935
1936		seq_printf(s, "active");
1937	}
1938
1939	seq_printf(s, "\n");
1940	return 0;
1941}
1942
1943static const struct seq_operations tegra_pcie_ports_seq_ops = {
1944	.start = tegra_pcie_ports_seq_start,
1945	.next = tegra_pcie_ports_seq_next,
1946	.stop = tegra_pcie_ports_seq_stop,
1947	.show = tegra_pcie_ports_seq_show,
1948};
1949
1950static int tegra_pcie_ports_open(struct inode *inode, struct file *file)
1951{
1952	struct tegra_pcie *pcie = inode->i_private;
1953	struct seq_file *s;
1954	int err;
1955
1956	err = seq_open(file, &tegra_pcie_ports_seq_ops);
1957	if (err)
1958		return err;
1959
1960	s = file->private_data;
1961	s->private = pcie;
1962
1963	return 0;
1964}
1965
1966static const struct file_operations tegra_pcie_ports_ops = {
1967	.owner = THIS_MODULE,
1968	.open = tegra_pcie_ports_open,
1969	.read = seq_read,
1970	.llseek = seq_lseek,
1971	.release = seq_release,
1972};
1973
1974static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
1975{
1976	struct dentry *file;
1977
1978	pcie->debugfs = debugfs_create_dir("pcie", NULL);
1979	if (!pcie->debugfs)
1980		return -ENOMEM;
1981
1982	file = debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs,
1983				   pcie, &tegra_pcie_ports_ops);
1984	if (!file)
1985		goto remove;
1986
1987	return 0;
1988
1989remove:
1990	debugfs_remove_recursive(pcie->debugfs);
1991	pcie->debugfs = NULL;
1992	return -ENOMEM;
1993}
1994
1995static int tegra_pcie_probe(struct platform_device *pdev)
1996{
1997	const struct of_device_id *match;
1998	struct tegra_pcie *pcie;
1999	int err;
2000
2001	match = of_match_device(tegra_pcie_of_match, &pdev->dev);
2002	if (!match)
2003		return -ENODEV;
2004
2005	pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
2006	if (!pcie)
2007		return -ENOMEM;
2008
2009	INIT_LIST_HEAD(&pcie->buses);
2010	INIT_LIST_HEAD(&pcie->ports);
2011	pcie->soc_data = match->data;
2012	pcie->dev = &pdev->dev;
2013
2014	err = tegra_pcie_parse_dt(pcie);
2015	if (err < 0)
2016		return err;
2017
2018	pcibios_min_mem = 0;
2019
2020	err = tegra_pcie_get_resources(pcie);
2021	if (err < 0) {
2022		dev_err(&pdev->dev, "failed to request resources: %d\n", err);
2023		return err;
2024	}
2025
2026	err = tegra_pcie_enable_controller(pcie);
2027	if (err)
2028		goto put_resources;
2029
2030	/* setup the AFI address translations */
2031	tegra_pcie_setup_translations(pcie);
2032
2033	if (IS_ENABLED(CONFIG_PCI_MSI)) {
2034		err = tegra_pcie_enable_msi(pcie);
2035		if (err < 0) {
2036			dev_err(&pdev->dev,
2037				"failed to enable MSI support: %d\n",
2038				err);
2039			goto put_resources;
2040		}
2041	}
2042
2043	err = tegra_pcie_enable(pcie);
2044	if (err < 0) {
2045		dev_err(&pdev->dev, "failed to enable PCIe ports: %d\n", err);
2046		goto disable_msi;
2047	}
2048
2049	if (IS_ENABLED(CONFIG_DEBUG_FS)) {
2050		err = tegra_pcie_debugfs_init(pcie);
2051		if (err < 0)
2052			dev_err(&pdev->dev, "failed to setup debugfs: %d\n",
2053				err);
2054	}
2055
2056	platform_set_drvdata(pdev, pcie);
2057	return 0;
2058
2059disable_msi:
2060	if (IS_ENABLED(CONFIG_PCI_MSI))
2061		tegra_pcie_disable_msi(pcie);
2062put_resources:
2063	tegra_pcie_put_resources(pcie);
2064	return err;
2065}
2066
2067static struct platform_driver tegra_pcie_driver = {
2068	.driver = {
2069		.name = "tegra-pcie",
2070		.of_match_table = tegra_pcie_of_match,
2071		.suppress_bind_attrs = true,
2072	},
2073	.probe = tegra_pcie_probe,
2074};
2075module_platform_driver(tegra_pcie_driver);
2076
2077MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
2078MODULE_DESCRIPTION("NVIDIA Tegra PCIe driver");
2079MODULE_LICENSE("GPL v2");
2080