This source file includes following definitions.
- bcma_pcie_read
- bcma_pcie_write
- bcma_pcie_mdio_set_phy
- bcma_pcie_mdio_read
- bcma_pcie_mdio_write
- bcma_pcie_mdio_writeread
- bcma_core_pci_fixcfg
- bcma_core_pci_early_init
- bcma_pcicore_polarity_workaround
- bcma_pcicore_serdes_workaround
- bcma_core_pci_config_fixup
- bcma_core_pci_clientmode_init
- bcma_core_pci_init
- bcma_core_pci_power_save
- bcma_core_pci_extend_L1timer
- bcma_core_pci_up
- bcma_core_pci_down
1
2
3
4
5
6
7
8
9
10
11
12 #include "bcma_private.h"
13 #include <linux/export.h>
14 #include <linux/bcma/bcma.h>
15
16
17
18
19
20 u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address)
21 {
22 pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_ADDR, address);
23 pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_ADDR);
24 return pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_DATA);
25 }
26
27 static void bcma_pcie_write(struct bcma_drv_pci *pc, u32 address, u32 data)
28 {
29 pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_ADDR, address);
30 pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_ADDR);
31 pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_DATA, data);
32 }
33
34 static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u16 phy)
35 {
36 u32 v;
37 int i;
38
39 v = BCMA_CORE_PCI_MDIODATA_START;
40 v |= BCMA_CORE_PCI_MDIODATA_WRITE;
41 v |= (BCMA_CORE_PCI_MDIODATA_DEV_ADDR <<
42 BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF);
43 v |= (BCMA_CORE_PCI_MDIODATA_BLK_ADDR <<
44 BCMA_CORE_PCI_MDIODATA_REGADDR_SHF);
45 v |= BCMA_CORE_PCI_MDIODATA_TA;
46 v |= (phy << 4);
47 pcicore_write32(pc, BCMA_CORE_PCI_MDIO_DATA, v);
48
49 udelay(10);
50 for (i = 0; i < 200; i++) {
51 v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL);
52 if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE)
53 break;
54 usleep_range(1000, 2000);
55 }
56 }
57
58 static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u16 device, u8 address)
59 {
60 int max_retries = 10;
61 u16 ret = 0;
62 u32 v;
63 int i;
64
65
66 v = BCMA_CORE_PCI_MDIOCTL_PREAM_EN;
67 v |= BCMA_CORE_PCI_MDIOCTL_DIVISOR_VAL;
68 pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, v);
69
70 if (pc->core->id.rev >= 10) {
71 max_retries = 200;
72 bcma_pcie_mdio_set_phy(pc, device);
73 v = (BCMA_CORE_PCI_MDIODATA_DEV_ADDR <<
74 BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF);
75 v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF);
76 } else {
77 v = (device << BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF_OLD);
78 v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF_OLD);
79 }
80
81 v |= BCMA_CORE_PCI_MDIODATA_START;
82 v |= BCMA_CORE_PCI_MDIODATA_READ;
83 v |= BCMA_CORE_PCI_MDIODATA_TA;
84
85 pcicore_write32(pc, BCMA_CORE_PCI_MDIO_DATA, v);
86
87 udelay(10);
88 for (i = 0; i < max_retries; i++) {
89 v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL);
90 if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE) {
91 udelay(10);
92 ret = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_DATA);
93 break;
94 }
95 usleep_range(1000, 2000);
96 }
97 pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0);
98 return ret;
99 }
100
101 static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u16 device,
102 u8 address, u16 data)
103 {
104 int max_retries = 10;
105 u32 v;
106 int i;
107
108
109 v = BCMA_CORE_PCI_MDIOCTL_PREAM_EN;
110 v |= BCMA_CORE_PCI_MDIOCTL_DIVISOR_VAL;
111 pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, v);
112
113 if (pc->core->id.rev >= 10) {
114 max_retries = 200;
115 bcma_pcie_mdio_set_phy(pc, device);
116 v = (BCMA_CORE_PCI_MDIODATA_DEV_ADDR <<
117 BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF);
118 v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF);
119 } else {
120 v = (device << BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF_OLD);
121 v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF_OLD);
122 }
123
124 v |= BCMA_CORE_PCI_MDIODATA_START;
125 v |= BCMA_CORE_PCI_MDIODATA_WRITE;
126 v |= BCMA_CORE_PCI_MDIODATA_TA;
127 v |= data;
128 pcicore_write32(pc, BCMA_CORE_PCI_MDIO_DATA, v);
129
130 udelay(10);
131 for (i = 0; i < max_retries; i++) {
132 v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL);
133 if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE)
134 break;
135 usleep_range(1000, 2000);
136 }
137 pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0);
138 }
139
140 static u16 bcma_pcie_mdio_writeread(struct bcma_drv_pci *pc, u16 device,
141 u8 address, u16 data)
142 {
143 bcma_pcie_mdio_write(pc, device, address, data);
144 return bcma_pcie_mdio_read(pc, device, address);
145 }
146
147
148
149
150
151 static void bcma_core_pci_fixcfg(struct bcma_drv_pci *pc)
152 {
153 struct bcma_device *core = pc->core;
154 u16 val16, core_index;
155 uint regoff;
156
157 regoff = BCMA_CORE_PCI_SPROM(BCMA_CORE_PCI_SPROM_PI_OFFSET);
158 core_index = (u16)core->core_index;
159
160 val16 = pcicore_read16(pc, regoff);
161 if (((val16 & BCMA_CORE_PCI_SPROM_PI_MASK) >> BCMA_CORE_PCI_SPROM_PI_SHIFT)
162 != core_index) {
163 val16 = (core_index << BCMA_CORE_PCI_SPROM_PI_SHIFT) |
164 (val16 & ~BCMA_CORE_PCI_SPROM_PI_MASK);
165 pcicore_write16(pc, regoff, val16);
166 }
167 }
168
169
170
171
172
173 void bcma_core_pci_early_init(struct bcma_drv_pci *pc)
174 {
175 if (pc->early_setup_done)
176 return;
177
178 pc->hostmode = bcma_core_pci_is_in_hostmode(pc);
179 if (pc->hostmode)
180 goto out;
181
182 bcma_core_pci_fixcfg(pc);
183
184 out:
185 pc->early_setup_done = true;
186 }
187
188
189
190
191
192 static u8 bcma_pcicore_polarity_workaround(struct bcma_drv_pci *pc)
193 {
194 u32 tmp;
195
196 tmp = bcma_pcie_read(pc, BCMA_CORE_PCI_PLP_STATUSREG);
197 if (tmp & BCMA_CORE_PCI_PLP_POLARITYINV_STAT)
198 return BCMA_CORE_PCI_SERDES_RX_CTRL_FORCE |
199 BCMA_CORE_PCI_SERDES_RX_CTRL_POLARITY;
200 else
201 return BCMA_CORE_PCI_SERDES_RX_CTRL_FORCE;
202 }
203
204 static void bcma_pcicore_serdes_workaround(struct bcma_drv_pci *pc)
205 {
206 u16 tmp;
207
208 bcma_pcie_mdio_write(pc, BCMA_CORE_PCI_MDIODATA_DEV_RX,
209 BCMA_CORE_PCI_SERDES_RX_CTRL,
210 bcma_pcicore_polarity_workaround(pc));
211 tmp = bcma_pcie_mdio_read(pc, BCMA_CORE_PCI_MDIODATA_DEV_PLL,
212 BCMA_CORE_PCI_SERDES_PLL_CTRL);
213 if (tmp & BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN)
214 bcma_pcie_mdio_write(pc, BCMA_CORE_PCI_MDIODATA_DEV_PLL,
215 BCMA_CORE_PCI_SERDES_PLL_CTRL,
216 tmp & ~BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN);
217 }
218
219
220
221 static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc)
222 {
223 u16 val16;
224 uint regoff;
225
226 regoff = BCMA_CORE_PCI_SPROM(BCMA_CORE_PCI_SPROM_MISC_CONFIG);
227
228 val16 = pcicore_read16(pc, regoff);
229
230 if (!(val16 & BCMA_CORE_PCI_SPROM_L23READY_EXIT_NOPERST)) {
231 val16 |= BCMA_CORE_PCI_SPROM_L23READY_EXIT_NOPERST;
232 pcicore_write16(pc, regoff, val16);
233 }
234 }
235
236
237
238
239
240 static void bcma_core_pci_clientmode_init(struct bcma_drv_pci *pc)
241 {
242 bcma_pcicore_serdes_workaround(pc);
243 bcma_core_pci_config_fixup(pc);
244 }
245
246 void bcma_core_pci_init(struct bcma_drv_pci *pc)
247 {
248 if (pc->setup_done)
249 return;
250
251 bcma_core_pci_early_init(pc);
252
253 if (pc->hostmode)
254 bcma_core_pci_hostmode_init(pc);
255 else
256 bcma_core_pci_clientmode_init(pc);
257 }
258
259 void bcma_core_pci_power_save(struct bcma_bus *bus, bool up)
260 {
261 struct bcma_drv_pci *pc;
262 u16 data;
263
264 if (bus->hosttype != BCMA_HOSTTYPE_PCI)
265 return;
266
267 pc = &bus->drv_pci[0];
268
269 if (pc->core->id.rev >= 15 && pc->core->id.rev <= 20) {
270 data = up ? 0x74 : 0x7C;
271 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
272 BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7F64);
273 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
274 BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
275 } else if (pc->core->id.rev >= 21 && pc->core->id.rev <= 22) {
276 data = up ? 0x75 : 0x7D;
277 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
278 BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7E65);
279 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
280 BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
281 }
282 }
283 EXPORT_SYMBOL_GPL(bcma_core_pci_power_save);
284
285 static void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend)
286 {
287 u32 w;
288
289 w = bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG);
290 if (extend)
291 w |= BCMA_CORE_PCI_ASPMTIMER_EXTEND;
292 else
293 w &= ~BCMA_CORE_PCI_ASPMTIMER_EXTEND;
294 bcma_pcie_write(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG, w);
295 bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG);
296 }
297
298 void bcma_core_pci_up(struct bcma_drv_pci *pc)
299 {
300 bcma_core_pci_extend_L1timer(pc, true);
301 }
302
303 void bcma_core_pci_down(struct bcma_drv_pci *pc)
304 {
305 bcma_core_pci_extend_L1timer(pc, false);
306 }