This source file includes following definitions.
- esdhc_readl_fixup
- esdhc_readw_fixup
- esdhc_readb_fixup
- esdhc_writel_fixup
- esdhc_writew_fixup
- esdhc_writeb_fixup
- esdhc_be_readl
- esdhc_le_readl
- esdhc_be_readw
- esdhc_le_readw
- esdhc_be_readb
- esdhc_le_readb
- esdhc_be_writel
- esdhc_le_writel
- esdhc_be_writew
- esdhc_le_writew
- esdhc_be_writeb
- esdhc_le_writeb
- esdhc_of_adma_workaround
- esdhc_of_enable_dma
- esdhc_of_get_max_clock
- esdhc_of_get_min_clock
- esdhc_clock_enable
- esdhc_of_set_clock
- esdhc_pltfm_set_bus_width
- esdhc_reset
- esdhc_signal_voltage_switch
- esdhc_tuning_block_enable
- esdhc_prepare_sw_tuning
- esdhc_execute_sw_tuning
- esdhc_execute_tuning
- esdhc_set_uhs_signaling
- esdhc_irq
- esdhc_of_suspend
- esdhc_of_resume
- esdhc_init
- esdhc_hs400_prepare_ddr
- sdhci_esdhc_probe
1
2
3
4
5
6
7
8
9
10
11
12 #include <linux/err.h>
13 #include <linux/io.h>
14 #include <linux/of.h>
15 #include <linux/of_address.h>
16 #include <linux/delay.h>
17 #include <linux/module.h>
18 #include <linux/sys_soc.h>
19 #include <linux/clk.h>
20 #include <linux/ktime.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/mmc/host.h>
23 #include <linux/mmc/mmc.h>
24 #include "sdhci-pltfm.h"
25 #include "sdhci-esdhc.h"
26
27 #define VENDOR_V_22 0x12
28 #define VENDOR_V_23 0x13
29
30 #define MMC_TIMING_NUM (MMC_TIMING_MMC_HS400 + 1)
31
32 struct esdhc_clk_fixup {
33 const unsigned int sd_dflt_max_clk;
34 const unsigned int max_clk[MMC_TIMING_NUM];
35 };
36
37 static const struct esdhc_clk_fixup ls1021a_esdhc_clk = {
38 .sd_dflt_max_clk = 25000000,
39 .max_clk[MMC_TIMING_MMC_HS] = 46500000,
40 .max_clk[MMC_TIMING_SD_HS] = 46500000,
41 };
42
43 static const struct esdhc_clk_fixup ls1046a_esdhc_clk = {
44 .sd_dflt_max_clk = 25000000,
45 .max_clk[MMC_TIMING_UHS_SDR104] = 167000000,
46 .max_clk[MMC_TIMING_MMC_HS200] = 167000000,
47 };
48
49 static const struct esdhc_clk_fixup ls1012a_esdhc_clk = {
50 .sd_dflt_max_clk = 25000000,
51 .max_clk[MMC_TIMING_UHS_SDR104] = 125000000,
52 .max_clk[MMC_TIMING_MMC_HS200] = 125000000,
53 };
54
55 static const struct esdhc_clk_fixup p1010_esdhc_clk = {
56 .sd_dflt_max_clk = 20000000,
57 .max_clk[MMC_TIMING_LEGACY] = 20000000,
58 .max_clk[MMC_TIMING_MMC_HS] = 42000000,
59 .max_clk[MMC_TIMING_SD_HS] = 40000000,
60 };
61
62 static const struct of_device_id sdhci_esdhc_of_match[] = {
63 { .compatible = "fsl,ls1021a-esdhc", .data = &ls1021a_esdhc_clk},
64 { .compatible = "fsl,ls1046a-esdhc", .data = &ls1046a_esdhc_clk},
65 { .compatible = "fsl,ls1012a-esdhc", .data = &ls1012a_esdhc_clk},
66 { .compatible = "fsl,p1010-esdhc", .data = &p1010_esdhc_clk},
67 { .compatible = "fsl,mpc8379-esdhc" },
68 { .compatible = "fsl,mpc8536-esdhc" },
69 { .compatible = "fsl,esdhc" },
70 { }
71 };
72 MODULE_DEVICE_TABLE(of, sdhci_esdhc_of_match);
73
74 struct sdhci_esdhc {
75 u8 vendor_ver;
76 u8 spec_ver;
77 bool quirk_incorrect_hostver;
78 bool quirk_limited_clk_division;
79 bool quirk_unreliable_pulse_detection;
80 bool quirk_tuning_erratum_type1;
81 bool quirk_tuning_erratum_type2;
82 bool quirk_ignore_data_inhibit;
83 bool quirk_delay_before_data_reset;
84 bool in_sw_tuning;
85 unsigned int peripheral_clock;
86 const struct esdhc_clk_fixup *clk_fixup;
87 u32 div_ratio;
88 };
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105 static u32 esdhc_readl_fixup(struct sdhci_host *host,
106 int spec_reg, u32 value)
107 {
108 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
109 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
110 u32 ret;
111
112
113
114
115
116
117
118
119
120 if ((spec_reg == SDHCI_CAPABILITIES) && (value & SDHCI_CAN_DO_ADMA1)) {
121 if (esdhc->vendor_ver > VENDOR_V_22) {
122 ret = value | SDHCI_CAN_DO_ADMA2;
123 return ret;
124 }
125 }
126
127
128
129
130
131
132
133 if (spec_reg == SDHCI_PRESENT_STATE) {
134 ret = value & 0x000fffff;
135 ret |= (value >> 4) & SDHCI_DATA_LVL_MASK;
136 ret |= (value << 1) & SDHCI_CMD_LVL;
137 return ret;
138 }
139
140
141
142
143
144
145 if (spec_reg == SDHCI_CAPABILITIES_1) {
146 ret = value & ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
147 SDHCI_SUPPORT_DDR50);
148 return ret;
149 }
150
151
152
153
154
155
156
157
158 if ((spec_reg == SDHCI_PRESENT_STATE) &&
159 (esdhc->quirk_ignore_data_inhibit == true)) {
160 ret = value & ~SDHCI_DATA_INHIBIT;
161 return ret;
162 }
163
164 ret = value;
165 return ret;
166 }
167
168 static u16 esdhc_readw_fixup(struct sdhci_host *host,
169 int spec_reg, u32 value)
170 {
171 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
172 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
173 u16 ret;
174 int shift = (spec_reg & 0x2) * 8;
175
176 if (spec_reg == SDHCI_HOST_VERSION)
177 ret = value & 0xffff;
178 else
179 ret = (value >> shift) & 0xffff;
180
181
182
183 if ((spec_reg == SDHCI_HOST_VERSION) &&
184 (esdhc->quirk_incorrect_hostver))
185 ret = (VENDOR_V_23 << SDHCI_VENDOR_VER_SHIFT) | SDHCI_SPEC_200;
186 return ret;
187 }
188
189 static u8 esdhc_readb_fixup(struct sdhci_host *host,
190 int spec_reg, u32 value)
191 {
192 u8 ret;
193 u8 dma_bits;
194 int shift = (spec_reg & 0x3) * 8;
195
196 ret = (value >> shift) & 0xff;
197
198
199
200
201
202 if (spec_reg == SDHCI_HOST_CONTROL) {
203
204 dma_bits = (value >> 5) & SDHCI_CTRL_DMA_MASK;
205
206 ret &= ~SDHCI_CTRL_DMA_MASK;
207 ret |= dma_bits;
208 }
209 return ret;
210 }
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228 static u32 esdhc_writel_fixup(struct sdhci_host *host,
229 int spec_reg, u32 value, u32 old_value)
230 {
231 u32 ret;
232
233
234
235
236
237
238 if (spec_reg == SDHCI_INT_ENABLE)
239 ret = value | SDHCI_INT_BLK_GAP;
240 else
241 ret = value;
242
243 return ret;
244 }
245
246 static u32 esdhc_writew_fixup(struct sdhci_host *host,
247 int spec_reg, u16 value, u32 old_value)
248 {
249 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
250 int shift = (spec_reg & 0x2) * 8;
251 u32 ret;
252
253 switch (spec_reg) {
254 case SDHCI_TRANSFER_MODE:
255
256
257
258
259 pltfm_host->xfer_mode_shadow = value;
260 return old_value;
261 case SDHCI_COMMAND:
262 ret = (value << 16) | pltfm_host->xfer_mode_shadow;
263 return ret;
264 }
265
266 ret = old_value & (~(0xffff << shift));
267 ret |= (value << shift);
268
269 if (spec_reg == SDHCI_BLOCK_SIZE) {
270
271
272
273
274
275 ret &= (~SDHCI_MAKE_BLKSZ(0x7, 0));
276 }
277 return ret;
278 }
279
280 static u32 esdhc_writeb_fixup(struct sdhci_host *host,
281 int spec_reg, u8 value, u32 old_value)
282 {
283 u32 ret;
284 u32 dma_bits;
285 u8 tmp;
286 int shift = (spec_reg & 0x3) * 8;
287
288
289
290
291
292 if (spec_reg == SDHCI_POWER_CONTROL)
293 return old_value;
294
295
296
297
298 if (spec_reg == SDHCI_HOST_CONTROL) {
299
300
301
302
303 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_HOST_CONTROL)
304 return old_value;
305
306
307 dma_bits = (value & SDHCI_CTRL_DMA_MASK) << 5;
308 ret = (old_value & (~(SDHCI_CTRL_DMA_MASK << 5))) | dma_bits;
309 tmp = (value & (~SDHCI_CTRL_DMA_MASK)) |
310 (old_value & SDHCI_CTRL_DMA_MASK);
311 ret = (ret & (~0xff)) | tmp;
312
313
314 ret &= ~ESDHC_HOST_CONTROL_RES;
315 return ret;
316 }
317
318 ret = (old_value & (~(0xff << shift))) | (value << shift);
319 return ret;
320 }
321
322 static u32 esdhc_be_readl(struct sdhci_host *host, int reg)
323 {
324 u32 ret;
325 u32 value;
326
327 if (reg == SDHCI_CAPABILITIES_1)
328 value = ioread32be(host->ioaddr + ESDHC_CAPABILITIES_1);
329 else
330 value = ioread32be(host->ioaddr + reg);
331
332 ret = esdhc_readl_fixup(host, reg, value);
333
334 return ret;
335 }
336
337 static u32 esdhc_le_readl(struct sdhci_host *host, int reg)
338 {
339 u32 ret;
340 u32 value;
341
342 if (reg == SDHCI_CAPABILITIES_1)
343 value = ioread32(host->ioaddr + ESDHC_CAPABILITIES_1);
344 else
345 value = ioread32(host->ioaddr + reg);
346
347 ret = esdhc_readl_fixup(host, reg, value);
348
349 return ret;
350 }
351
352 static u16 esdhc_be_readw(struct sdhci_host *host, int reg)
353 {
354 u16 ret;
355 u32 value;
356 int base = reg & ~0x3;
357
358 value = ioread32be(host->ioaddr + base);
359 ret = esdhc_readw_fixup(host, reg, value);
360 return ret;
361 }
362
363 static u16 esdhc_le_readw(struct sdhci_host *host, int reg)
364 {
365 u16 ret;
366 u32 value;
367 int base = reg & ~0x3;
368
369 value = ioread32(host->ioaddr + base);
370 ret = esdhc_readw_fixup(host, reg, value);
371 return ret;
372 }
373
374 static u8 esdhc_be_readb(struct sdhci_host *host, int reg)
375 {
376 u8 ret;
377 u32 value;
378 int base = reg & ~0x3;
379
380 value = ioread32be(host->ioaddr + base);
381 ret = esdhc_readb_fixup(host, reg, value);
382 return ret;
383 }
384
385 static u8 esdhc_le_readb(struct sdhci_host *host, int reg)
386 {
387 u8 ret;
388 u32 value;
389 int base = reg & ~0x3;
390
391 value = ioread32(host->ioaddr + base);
392 ret = esdhc_readb_fixup(host, reg, value);
393 return ret;
394 }
395
396 static void esdhc_be_writel(struct sdhci_host *host, u32 val, int reg)
397 {
398 u32 value;
399
400 value = esdhc_writel_fixup(host, reg, val, 0);
401 iowrite32be(value, host->ioaddr + reg);
402 }
403
404 static void esdhc_le_writel(struct sdhci_host *host, u32 val, int reg)
405 {
406 u32 value;
407
408 value = esdhc_writel_fixup(host, reg, val, 0);
409 iowrite32(value, host->ioaddr + reg);
410 }
411
412 static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg)
413 {
414 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
415 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
416 int base = reg & ~0x3;
417 u32 value;
418 u32 ret;
419
420 value = ioread32be(host->ioaddr + base);
421 ret = esdhc_writew_fixup(host, reg, val, value);
422 if (reg != SDHCI_TRANSFER_MODE)
423 iowrite32be(ret, host->ioaddr + base);
424
425
426
427
428 if (base == ESDHC_SYSTEM_CONTROL_2) {
429 if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
430 esdhc->in_sw_tuning) {
431 udelay(1);
432 ret |= ESDHC_SMPCLKSEL;
433 iowrite32be(ret, host->ioaddr + base);
434 }
435 }
436 }
437
438 static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg)
439 {
440 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
441 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
442 int base = reg & ~0x3;
443 u32 value;
444 u32 ret;
445
446 value = ioread32(host->ioaddr + base);
447 ret = esdhc_writew_fixup(host, reg, val, value);
448 if (reg != SDHCI_TRANSFER_MODE)
449 iowrite32(ret, host->ioaddr + base);
450
451
452
453
454 if (base == ESDHC_SYSTEM_CONTROL_2) {
455 if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
456 esdhc->in_sw_tuning) {
457 udelay(1);
458 ret |= ESDHC_SMPCLKSEL;
459 iowrite32(ret, host->ioaddr + base);
460 }
461 }
462 }
463
464 static void esdhc_be_writeb(struct sdhci_host *host, u8 val, int reg)
465 {
466 int base = reg & ~0x3;
467 u32 value;
468 u32 ret;
469
470 value = ioread32be(host->ioaddr + base);
471 ret = esdhc_writeb_fixup(host, reg, val, value);
472 iowrite32be(ret, host->ioaddr + base);
473 }
474
475 static void esdhc_le_writeb(struct sdhci_host *host, u8 val, int reg)
476 {
477 int base = reg & ~0x3;
478 u32 value;
479 u32 ret;
480
481 value = ioread32(host->ioaddr + base);
482 ret = esdhc_writeb_fixup(host, reg, val, value);
483 iowrite32(ret, host->ioaddr + base);
484 }
485
486
487
488
489
490
491
492
493 static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask)
494 {
495 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
496 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
497 bool applicable;
498 dma_addr_t dmastart;
499 dma_addr_t dmanow;
500
501 applicable = (intmask & SDHCI_INT_DATA_END) &&
502 (intmask & SDHCI_INT_BLK_GAP) &&
503 (esdhc->vendor_ver == VENDOR_V_23);
504 if (!applicable)
505 return;
506
507 host->data->error = 0;
508 dmastart = sg_dma_address(host->data->sg);
509 dmanow = dmastart + host->data->bytes_xfered;
510
511
512
513 dmanow = (dmanow & ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
514 SDHCI_DEFAULT_BOUNDARY_SIZE;
515 host->data->bytes_xfered = dmanow - dmastart;
516 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
517 }
518
519 static int esdhc_of_enable_dma(struct sdhci_host *host)
520 {
521 u32 value;
522 struct device *dev = mmc_dev(host->mmc);
523
524 if (of_device_is_compatible(dev->of_node, "fsl,ls1043a-esdhc") ||
525 of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc"))
526 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
527
528 value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
529
530 if (of_dma_is_coherent(dev->of_node))
531 value |= ESDHC_DMA_SNOOP;
532 else
533 value &= ~ESDHC_DMA_SNOOP;
534
535 sdhci_writel(host, value, ESDHC_DMA_SYSCTL);
536 return 0;
537 }
538
539 static unsigned int esdhc_of_get_max_clock(struct sdhci_host *host)
540 {
541 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
542 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
543
544 if (esdhc->peripheral_clock)
545 return esdhc->peripheral_clock;
546 else
547 return pltfm_host->clock;
548 }
549
550 static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
551 {
552 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
553 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
554 unsigned int clock;
555
556 if (esdhc->peripheral_clock)
557 clock = esdhc->peripheral_clock;
558 else
559 clock = pltfm_host->clock;
560 return clock / 256 / 16;
561 }
562
563 static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
564 {
565 u32 val;
566 ktime_t timeout;
567
568 val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
569
570 if (enable)
571 val |= ESDHC_CLOCK_SDCLKEN;
572 else
573 val &= ~ESDHC_CLOCK_SDCLKEN;
574
575 sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL);
576
577
578 timeout = ktime_add_ms(ktime_get(), 20);
579 val = ESDHC_CLOCK_STABLE;
580 while (1) {
581 bool timedout = ktime_after(ktime_get(), timeout);
582
583 if (sdhci_readl(host, ESDHC_PRSSTAT) & val)
584 break;
585 if (timedout) {
586 pr_err("%s: Internal clock never stabilised.\n",
587 mmc_hostname(host->mmc));
588 break;
589 }
590 udelay(10);
591 }
592 }
593
594 static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
595 {
596 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
597 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
598 int pre_div = 1;
599 int div = 1;
600 int division;
601 ktime_t timeout;
602 long fixup = 0;
603 u32 temp;
604
605 host->mmc->actual_clock = 0;
606
607 if (clock == 0) {
608 esdhc_clock_enable(host, false);
609 return;
610 }
611
612
613 if (esdhc->vendor_ver < VENDOR_V_23)
614 pre_div = 2;
615
616 if (host->mmc->card && mmc_card_sd(host->mmc->card) &&
617 esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY)
618 fixup = esdhc->clk_fixup->sd_dflt_max_clk;
619 else if (esdhc->clk_fixup)
620 fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing];
621
622 if (fixup && clock > fixup)
623 clock = fixup;
624
625 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
626 temp &= ~(ESDHC_CLOCK_SDCLKEN | ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN |
627 ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK);
628 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
629
630 while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
631 pre_div *= 2;
632
633 while (host->max_clk / pre_div / div > clock && div < 16)
634 div++;
635
636 if (esdhc->quirk_limited_clk_division &&
637 clock == MMC_HS200_MAX_DTR &&
638 (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 ||
639 host->flags & SDHCI_HS400_TUNING)) {
640 division = pre_div * div;
641 if (division <= 4) {
642 pre_div = 4;
643 div = 1;
644 } else if (division <= 8) {
645 pre_div = 4;
646 div = 2;
647 } else if (division <= 12) {
648 pre_div = 4;
649 div = 3;
650 } else {
651 pr_warn("%s: using unsupported clock division.\n",
652 mmc_hostname(host->mmc));
653 }
654 }
655
656 dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
657 clock, host->max_clk / pre_div / div);
658 host->mmc->actual_clock = host->max_clk / pre_div / div;
659 esdhc->div_ratio = pre_div * div;
660 pre_div >>= 1;
661 div--;
662
663 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
664 temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
665 | (div << ESDHC_DIVIDER_SHIFT)
666 | (pre_div << ESDHC_PREDIV_SHIFT));
667 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
668
669 if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 &&
670 clock == MMC_HS200_MAX_DTR) {
671 temp = sdhci_readl(host, ESDHC_TBCTL);
672 sdhci_writel(host, temp | ESDHC_HS400_MODE, ESDHC_TBCTL);
673 temp = sdhci_readl(host, ESDHC_SDCLKCTL);
674 sdhci_writel(host, temp | ESDHC_CMD_CLK_CTL, ESDHC_SDCLKCTL);
675 esdhc_clock_enable(host, true);
676
677 temp = sdhci_readl(host, ESDHC_DLLCFG0);
678 temp |= ESDHC_DLL_ENABLE;
679 if (host->mmc->actual_clock == MMC_HS200_MAX_DTR)
680 temp |= ESDHC_DLL_FREQ_SEL;
681 sdhci_writel(host, temp, ESDHC_DLLCFG0);
682 temp = sdhci_readl(host, ESDHC_TBCTL);
683 sdhci_writel(host, temp | ESDHC_HS400_WNDW_ADJUST, ESDHC_TBCTL);
684
685 esdhc_clock_enable(host, false);
686 temp = sdhci_readl(host, ESDHC_DMA_SYSCTL);
687 temp |= ESDHC_FLUSH_ASYNC_FIFO;
688 sdhci_writel(host, temp, ESDHC_DMA_SYSCTL);
689 }
690
691
692 timeout = ktime_add_ms(ktime_get(), 20);
693 while (1) {
694 bool timedout = ktime_after(ktime_get(), timeout);
695
696 if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
697 break;
698 if (timedout) {
699 pr_err("%s: Internal clock never stabilised.\n",
700 mmc_hostname(host->mmc));
701 return;
702 }
703 udelay(10);
704 }
705
706 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
707 temp |= ESDHC_CLOCK_SDCLKEN;
708 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
709 }
710
711 static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
712 {
713 u32 ctrl;
714
715 ctrl = sdhci_readl(host, ESDHC_PROCTL);
716 ctrl &= (~ESDHC_CTRL_BUSWIDTH_MASK);
717 switch (width) {
718 case MMC_BUS_WIDTH_8:
719 ctrl |= ESDHC_CTRL_8BITBUS;
720 break;
721
722 case MMC_BUS_WIDTH_4:
723 ctrl |= ESDHC_CTRL_4BITBUS;
724 break;
725
726 default:
727 break;
728 }
729
730 sdhci_writel(host, ctrl, ESDHC_PROCTL);
731 }
732
733 static void esdhc_reset(struct sdhci_host *host, u8 mask)
734 {
735 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
736 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
737 u32 val, bus_width = 0;
738
739
740
741
742
743 if (esdhc->quirk_delay_before_data_reset &&
744 (mask & SDHCI_RESET_DATA) &&
745 (host->flags & SDHCI_REQ_USE_DMA))
746 mdelay(5);
747
748
749
750
751
752 if ((mask & SDHCI_RESET_DATA) &&
753 (esdhc->vendor_ver <= VENDOR_V_22)) {
754 val = sdhci_readl(host, ESDHC_PROCTL);
755 bus_width = val & ESDHC_CTRL_BUSWIDTH_MASK;
756 }
757
758 sdhci_reset(host, mask);
759
760
761
762
763
764 if ((mask & SDHCI_RESET_DATA) &&
765 (esdhc->vendor_ver <= VENDOR_V_22)) {
766 val = sdhci_readl(host, ESDHC_PROCTL);
767 val &= ~ESDHC_CTRL_BUSWIDTH_MASK;
768 val |= bus_width;
769 sdhci_writel(host, val, ESDHC_PROCTL);
770
771 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
772 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
773 }
774
775
776
777
778
779 if ((mask & SDHCI_RESET_ALL) &&
780 (esdhc->spec_ver >= SDHCI_SPEC_300)) {
781 val = sdhci_readl(host, ESDHC_TBCTL);
782 val &= ~ESDHC_TB_EN;
783 sdhci_writel(host, val, ESDHC_TBCTL);
784
785
786
787
788
789 if (esdhc->quirk_unreliable_pulse_detection) {
790 val = sdhci_readl(host, ESDHC_DLLCFG1);
791 val &= ~ESDHC_DLL_PD_PULSE_STRETCH_SEL;
792 sdhci_writel(host, val, ESDHC_DLLCFG1);
793 }
794 }
795 }
796
797
798
799
800
801
802 static const struct of_device_id scfg_device_ids[] = {
803 { .compatible = "fsl,t1040-scfg", },
804 { .compatible = "fsl,ls1012a-scfg", },
805 { .compatible = "fsl,ls1046a-scfg", },
806 {}
807 };
808
809
810 #define SCFG_SDHCIOVSELCR 0x408
811 #define SDHCIOVSELCR_TGLEN 0x80000000
812 #define SDHCIOVSELCR_VSELVAL 0x60000000
813 #define SDHCIOVSELCR_SDHC_VS 0x00000001
814
815 static int esdhc_signal_voltage_switch(struct mmc_host *mmc,
816 struct mmc_ios *ios)
817 {
818 struct sdhci_host *host = mmc_priv(mmc);
819 struct device_node *scfg_node;
820 void __iomem *scfg_base = NULL;
821 u32 sdhciovselcr;
822 u32 val;
823
824
825
826
827
828 if (host->version < SDHCI_SPEC_300)
829 return 0;
830
831 val = sdhci_readl(host, ESDHC_PROCTL);
832
833 switch (ios->signal_voltage) {
834 case MMC_SIGNAL_VOLTAGE_330:
835 val &= ~ESDHC_VOLT_SEL;
836 sdhci_writel(host, val, ESDHC_PROCTL);
837 return 0;
838 case MMC_SIGNAL_VOLTAGE_180:
839 scfg_node = of_find_matching_node(NULL, scfg_device_ids);
840 if (scfg_node)
841 scfg_base = of_iomap(scfg_node, 0);
842 if (scfg_base) {
843 sdhciovselcr = SDHCIOVSELCR_TGLEN |
844 SDHCIOVSELCR_VSELVAL;
845 iowrite32be(sdhciovselcr,
846 scfg_base + SCFG_SDHCIOVSELCR);
847
848 val |= ESDHC_VOLT_SEL;
849 sdhci_writel(host, val, ESDHC_PROCTL);
850 mdelay(5);
851
852 sdhciovselcr = SDHCIOVSELCR_TGLEN |
853 SDHCIOVSELCR_SDHC_VS;
854 iowrite32be(sdhciovselcr,
855 scfg_base + SCFG_SDHCIOVSELCR);
856 iounmap(scfg_base);
857 } else {
858 val |= ESDHC_VOLT_SEL;
859 sdhci_writel(host, val, ESDHC_PROCTL);
860 }
861 return 0;
862 default:
863 return 0;
864 }
865 }
866
867 static struct soc_device_attribute soc_tuning_erratum_type1[] = {
868 { .family = "QorIQ T1023", .revision = "1.0", },
869 { .family = "QorIQ T1040", .revision = "1.0", },
870 { .family = "QorIQ T2080", .revision = "1.0", },
871 { .family = "QorIQ LS1021A", .revision = "1.0", },
872 { },
873 };
874
875 static struct soc_device_attribute soc_tuning_erratum_type2[] = {
876 { .family = "QorIQ LS1012A", .revision = "1.0", },
877 { .family = "QorIQ LS1043A", .revision = "1.*", },
878 { .family = "QorIQ LS1046A", .revision = "1.0", },
879 { .family = "QorIQ LS1080A", .revision = "1.0", },
880 { .family = "QorIQ LS2080A", .revision = "1.0", },
881 { .family = "QorIQ LA1575A", .revision = "1.0", },
882 { },
883 };
884
885 static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable)
886 {
887 u32 val;
888
889 esdhc_clock_enable(host, false);
890
891 val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
892 val |= ESDHC_FLUSH_ASYNC_FIFO;
893 sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
894
895 val = sdhci_readl(host, ESDHC_TBCTL);
896 if (enable)
897 val |= ESDHC_TB_EN;
898 else
899 val &= ~ESDHC_TB_EN;
900 sdhci_writel(host, val, ESDHC_TBCTL);
901
902 esdhc_clock_enable(host, true);
903 }
904
905 static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start,
906 u8 *window_end)
907 {
908 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
909 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
910 u8 tbstat_15_8, tbstat_7_0;
911 u32 val;
912
913 if (esdhc->quirk_tuning_erratum_type1) {
914 *window_start = 5 * esdhc->div_ratio;
915 *window_end = 3 * esdhc->div_ratio;
916 return;
917 }
918
919
920 val = sdhci_readl(host, ESDHC_TBCTL);
921 val &= ~(0xf << 8);
922 val |= 8 << 8;
923 sdhci_writel(host, val, ESDHC_TBCTL);
924
925 mdelay(1);
926
927
928 val = sdhci_readl(host, ESDHC_TBCTL);
929 sdhci_writel(host, val, ESDHC_TBCTL);
930
931 mdelay(1);
932
933
934 val = sdhci_readl(host, ESDHC_TBSTAT);
935 val = sdhci_readl(host, ESDHC_TBSTAT);
936
937
938 sdhci_reset(host, SDHCI_RESET_DATA);
939
940 sdhci_writel(host, 0xFFFFFFFF, SDHCI_INT_STATUS);
941
942
943
944
945
946
947 tbstat_7_0 = val & 0xff;
948 tbstat_15_8 = (val >> 8) & 0xff;
949
950 if (abs(tbstat_15_8 - tbstat_7_0) > (4 * esdhc->div_ratio)) {
951 *window_start = 8 * esdhc->div_ratio;
952 *window_end = 4 * esdhc->div_ratio;
953 } else {
954 *window_start = 5 * esdhc->div_ratio;
955 *window_end = 3 * esdhc->div_ratio;
956 }
957 }
958
959 static int esdhc_execute_sw_tuning(struct mmc_host *mmc, u32 opcode,
960 u8 window_start, u8 window_end)
961 {
962 struct sdhci_host *host = mmc_priv(mmc);
963 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
964 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
965 u32 val;
966 int ret;
967
968
969 val = ((u32)window_start << ESDHC_WNDW_STRT_PTR_SHIFT) &
970 ESDHC_WNDW_STRT_PTR_MASK;
971 val |= window_end & ESDHC_WNDW_END_PTR_MASK;
972 sdhci_writel(host, val, ESDHC_TBPTR);
973
974
975 val = sdhci_readl(host, ESDHC_TBCTL);
976 val &= ~ESDHC_TB_MODE_MASK;
977 val |= ESDHC_TB_MODE_SW;
978 sdhci_writel(host, val, ESDHC_TBCTL);
979
980 esdhc->in_sw_tuning = true;
981 ret = sdhci_execute_tuning(mmc, opcode);
982 esdhc->in_sw_tuning = false;
983 return ret;
984 }
985
986 static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
987 {
988 struct sdhci_host *host = mmc_priv(mmc);
989 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
990 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
991 u8 window_start, window_end;
992 int ret, retries = 1;
993 bool hs400_tuning;
994 unsigned int clk;
995 u32 val;
996
997
998
999
1000 clk = esdhc->peripheral_clock / 3;
1001 if (host->clock > clk)
1002 esdhc_of_set_clock(host, clk);
1003
1004 esdhc_tuning_block_enable(host, true);
1005
1006 hs400_tuning = host->flags & SDHCI_HS400_TUNING;
1007
1008 do {
1009 if (esdhc->quirk_limited_clk_division &&
1010 hs400_tuning)
1011 esdhc_of_set_clock(host, host->clock);
1012
1013
1014 val = sdhci_readl(host, ESDHC_TBCTL);
1015 val &= ~ESDHC_TB_MODE_MASK;
1016 val |= ESDHC_TB_MODE_3;
1017 sdhci_writel(host, val, ESDHC_TBCTL);
1018
1019 ret = sdhci_execute_tuning(mmc, opcode);
1020 if (ret)
1021 break;
1022
1023
1024
1025
1026 ret = host->tuning_err;
1027 if (ret == -EAGAIN &&
1028 (esdhc->quirk_tuning_erratum_type1 ||
1029 esdhc->quirk_tuning_erratum_type2)) {
1030
1031 if (hs400_tuning)
1032 host->flags |= SDHCI_HS400_TUNING;
1033 pr_info("%s: Hold on to use fixed sampling clock. Try SW tuning!\n",
1034 mmc_hostname(mmc));
1035
1036 esdhc_prepare_sw_tuning(host, &window_start,
1037 &window_end);
1038 ret = esdhc_execute_sw_tuning(mmc, opcode,
1039 window_start,
1040 window_end);
1041 if (ret)
1042 break;
1043
1044
1045 ret = host->tuning_err;
1046 if (ret == -EAGAIN && retries) {
1047
1048 if (hs400_tuning)
1049 host->flags |= SDHCI_HS400_TUNING;
1050
1051 clk = host->max_clk / (esdhc->div_ratio + 1);
1052 esdhc_of_set_clock(host, clk);
1053 pr_info("%s: Hold on to use fixed sampling clock. Try tuning with reduced clock!\n",
1054 mmc_hostname(mmc));
1055 } else {
1056 break;
1057 }
1058 } else {
1059 break;
1060 }
1061 } while (retries--);
1062
1063 if (ret) {
1064 esdhc_tuning_block_enable(host, false);
1065 } else if (hs400_tuning) {
1066 val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
1067 val |= ESDHC_FLW_CTL_BG;
1068 sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
1069 }
1070
1071 return ret;
1072 }
1073
1074 static void esdhc_set_uhs_signaling(struct sdhci_host *host,
1075 unsigned int timing)
1076 {
1077 if (timing == MMC_TIMING_MMC_HS400)
1078 esdhc_tuning_block_enable(host, true);
1079 else
1080 sdhci_set_uhs_signaling(host, timing);
1081 }
1082
1083 static u32 esdhc_irq(struct sdhci_host *host, u32 intmask)
1084 {
1085 u32 command;
1086
1087 if (of_find_compatible_node(NULL, NULL,
1088 "fsl,p2020-esdhc")) {
1089 command = SDHCI_GET_CMD(sdhci_readw(host,
1090 SDHCI_COMMAND));
1091 if (command == MMC_WRITE_MULTIPLE_BLOCK &&
1092 sdhci_readw(host, SDHCI_BLOCK_COUNT) &&
1093 intmask & SDHCI_INT_DATA_END) {
1094 intmask &= ~SDHCI_INT_DATA_END;
1095 sdhci_writel(host, SDHCI_INT_DATA_END,
1096 SDHCI_INT_STATUS);
1097 }
1098 }
1099 return intmask;
1100 }
1101
1102 #ifdef CONFIG_PM_SLEEP
1103 static u32 esdhc_proctl;
1104 static int esdhc_of_suspend(struct device *dev)
1105 {
1106 struct sdhci_host *host = dev_get_drvdata(dev);
1107
1108 esdhc_proctl = sdhci_readl(host, SDHCI_HOST_CONTROL);
1109
1110 if (host->tuning_mode != SDHCI_TUNING_MODE_3)
1111 mmc_retune_needed(host->mmc);
1112
1113 return sdhci_suspend_host(host);
1114 }
1115
1116 static int esdhc_of_resume(struct device *dev)
1117 {
1118 struct sdhci_host *host = dev_get_drvdata(dev);
1119 int ret = sdhci_resume_host(host);
1120
1121 if (ret == 0) {
1122
1123 esdhc_of_enable_dma(host);
1124 sdhci_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL);
1125 }
1126 return ret;
1127 }
1128 #endif
1129
1130 static SIMPLE_DEV_PM_OPS(esdhc_of_dev_pm_ops,
1131 esdhc_of_suspend,
1132 esdhc_of_resume);
1133
1134 static const struct sdhci_ops sdhci_esdhc_be_ops = {
1135 .read_l = esdhc_be_readl,
1136 .read_w = esdhc_be_readw,
1137 .read_b = esdhc_be_readb,
1138 .write_l = esdhc_be_writel,
1139 .write_w = esdhc_be_writew,
1140 .write_b = esdhc_be_writeb,
1141 .set_clock = esdhc_of_set_clock,
1142 .enable_dma = esdhc_of_enable_dma,
1143 .get_max_clock = esdhc_of_get_max_clock,
1144 .get_min_clock = esdhc_of_get_min_clock,
1145 .adma_workaround = esdhc_of_adma_workaround,
1146 .set_bus_width = esdhc_pltfm_set_bus_width,
1147 .reset = esdhc_reset,
1148 .set_uhs_signaling = esdhc_set_uhs_signaling,
1149 .irq = esdhc_irq,
1150 };
1151
1152 static const struct sdhci_ops sdhci_esdhc_le_ops = {
1153 .read_l = esdhc_le_readl,
1154 .read_w = esdhc_le_readw,
1155 .read_b = esdhc_le_readb,
1156 .write_l = esdhc_le_writel,
1157 .write_w = esdhc_le_writew,
1158 .write_b = esdhc_le_writeb,
1159 .set_clock = esdhc_of_set_clock,
1160 .enable_dma = esdhc_of_enable_dma,
1161 .get_max_clock = esdhc_of_get_max_clock,
1162 .get_min_clock = esdhc_of_get_min_clock,
1163 .adma_workaround = esdhc_of_adma_workaround,
1164 .set_bus_width = esdhc_pltfm_set_bus_width,
1165 .reset = esdhc_reset,
1166 .set_uhs_signaling = esdhc_set_uhs_signaling,
1167 .irq = esdhc_irq,
1168 };
1169
1170 static const struct sdhci_pltfm_data sdhci_esdhc_be_pdata = {
1171 .quirks = ESDHC_DEFAULT_QUIRKS |
1172 #ifdef CONFIG_PPC
1173 SDHCI_QUIRK_BROKEN_CARD_DETECTION |
1174 #endif
1175 SDHCI_QUIRK_NO_CARD_NO_RESET |
1176 SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
1177 .ops = &sdhci_esdhc_be_ops,
1178 };
1179
1180 static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata = {
1181 .quirks = ESDHC_DEFAULT_QUIRKS |
1182 SDHCI_QUIRK_NO_CARD_NO_RESET |
1183 SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
1184 .ops = &sdhci_esdhc_le_ops,
1185 };
1186
1187 static struct soc_device_attribute soc_incorrect_hostver[] = {
1188 { .family = "QorIQ T4240", .revision = "1.0", },
1189 { .family = "QorIQ T4240", .revision = "2.0", },
1190 { },
1191 };
1192
1193 static struct soc_device_attribute soc_fixup_sdhc_clkdivs[] = {
1194 { .family = "QorIQ LX2160A", .revision = "1.0", },
1195 { .family = "QorIQ LX2160A", .revision = "2.0", },
1196 { .family = "QorIQ LS1028A", .revision = "1.0", },
1197 { },
1198 };
1199
1200 static struct soc_device_attribute soc_unreliable_pulse_detection[] = {
1201 { .family = "QorIQ LX2160A", .revision = "1.0", },
1202 { },
1203 };
1204
1205 static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
1206 {
1207 const struct of_device_id *match;
1208 struct sdhci_pltfm_host *pltfm_host;
1209 struct sdhci_esdhc *esdhc;
1210 struct device_node *np;
1211 struct clk *clk;
1212 u32 val;
1213 u16 host_ver;
1214
1215 pltfm_host = sdhci_priv(host);
1216 esdhc = sdhci_pltfm_priv(pltfm_host);
1217
1218 host_ver = sdhci_readw(host, SDHCI_HOST_VERSION);
1219 esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >>
1220 SDHCI_VENDOR_VER_SHIFT;
1221 esdhc->spec_ver = host_ver & SDHCI_SPEC_VER_MASK;
1222 if (soc_device_match(soc_incorrect_hostver))
1223 esdhc->quirk_incorrect_hostver = true;
1224 else
1225 esdhc->quirk_incorrect_hostver = false;
1226
1227 if (soc_device_match(soc_fixup_sdhc_clkdivs))
1228 esdhc->quirk_limited_clk_division = true;
1229 else
1230 esdhc->quirk_limited_clk_division = false;
1231
1232 if (soc_device_match(soc_unreliable_pulse_detection))
1233 esdhc->quirk_unreliable_pulse_detection = true;
1234 else
1235 esdhc->quirk_unreliable_pulse_detection = false;
1236
1237 match = of_match_node(sdhci_esdhc_of_match, pdev->dev.of_node);
1238 if (match)
1239 esdhc->clk_fixup = match->data;
1240 np = pdev->dev.of_node;
1241
1242 if (of_device_is_compatible(np, "fsl,p2020-esdhc"))
1243 esdhc->quirk_delay_before_data_reset = true;
1244
1245 clk = of_clk_get(np, 0);
1246 if (!IS_ERR(clk)) {
1247
1248
1249
1250
1251
1252
1253
1254 if (of_device_is_compatible(np, "fsl,ls1046a-esdhc") ||
1255 of_device_is_compatible(np, "fsl,ls1028a-esdhc"))
1256 esdhc->peripheral_clock = clk_get_rate(clk) / 2;
1257 else
1258 esdhc->peripheral_clock = clk_get_rate(clk);
1259
1260 clk_put(clk);
1261 }
1262
1263 if (esdhc->peripheral_clock) {
1264 esdhc_clock_enable(host, false);
1265 val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
1266 val |= ESDHC_PERIPHERAL_CLK_SEL;
1267 sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
1268 esdhc_clock_enable(host, true);
1269 }
1270 }
1271
1272 static int esdhc_hs400_prepare_ddr(struct mmc_host *mmc)
1273 {
1274 esdhc_tuning_block_enable(mmc_priv(mmc), false);
1275 return 0;
1276 }
1277
1278 static int sdhci_esdhc_probe(struct platform_device *pdev)
1279 {
1280 struct sdhci_host *host;
1281 struct device_node *np;
1282 struct sdhci_pltfm_host *pltfm_host;
1283 struct sdhci_esdhc *esdhc;
1284 int ret;
1285
1286 np = pdev->dev.of_node;
1287
1288 if (of_property_read_bool(np, "little-endian"))
1289 host = sdhci_pltfm_init(pdev, &sdhci_esdhc_le_pdata,
1290 sizeof(struct sdhci_esdhc));
1291 else
1292 host = sdhci_pltfm_init(pdev, &sdhci_esdhc_be_pdata,
1293 sizeof(struct sdhci_esdhc));
1294
1295 if (IS_ERR(host))
1296 return PTR_ERR(host);
1297
1298 host->mmc_host_ops.start_signal_voltage_switch =
1299 esdhc_signal_voltage_switch;
1300 host->mmc_host_ops.execute_tuning = esdhc_execute_tuning;
1301 host->mmc_host_ops.hs400_prepare_ddr = esdhc_hs400_prepare_ddr;
1302 host->tuning_delay = 1;
1303
1304 esdhc_init(pdev, host);
1305
1306 sdhci_get_of_property(pdev);
1307
1308 pltfm_host = sdhci_priv(host);
1309 esdhc = sdhci_pltfm_priv(pltfm_host);
1310 if (soc_device_match(soc_tuning_erratum_type1))
1311 esdhc->quirk_tuning_erratum_type1 = true;
1312 else
1313 esdhc->quirk_tuning_erratum_type1 = false;
1314
1315 if (soc_device_match(soc_tuning_erratum_type2))
1316 esdhc->quirk_tuning_erratum_type2 = true;
1317 else
1318 esdhc->quirk_tuning_erratum_type2 = false;
1319
1320 if (esdhc->vendor_ver == VENDOR_V_22)
1321 host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
1322
1323 if (esdhc->vendor_ver > VENDOR_V_22)
1324 host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
1325
1326 if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) {
1327 host->quirks |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
1328 host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
1329 }
1330
1331 if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
1332 of_device_is_compatible(np, "fsl,p5020-esdhc") ||
1333 of_device_is_compatible(np, "fsl,p4080-esdhc") ||
1334 of_device_is_compatible(np, "fsl,p1020-esdhc") ||
1335 of_device_is_compatible(np, "fsl,t1040-esdhc"))
1336 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
1337
1338 if (of_device_is_compatible(np, "fsl,ls1021a-esdhc"))
1339 host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
1340
1341 esdhc->quirk_ignore_data_inhibit = false;
1342 if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
1343
1344
1345
1346
1347 host->quirks2 |= SDHCI_QUIRK2_BROKEN_HOST_CONTROL;
1348 esdhc->quirk_ignore_data_inhibit = true;
1349 }
1350
1351
1352 ret = mmc_of_parse(host->mmc);
1353 if (ret)
1354 goto err;
1355
1356 mmc_of_parse_voltage(np, &host->ocr_mask);
1357
1358 ret = sdhci_add_host(host);
1359 if (ret)
1360 goto err;
1361
1362 return 0;
1363 err:
1364 sdhci_pltfm_free(pdev);
1365 return ret;
1366 }
1367
1368 static struct platform_driver sdhci_esdhc_driver = {
1369 .driver = {
1370 .name = "sdhci-esdhc",
1371 .of_match_table = sdhci_esdhc_of_match,
1372 .pm = &esdhc_of_dev_pm_ops,
1373 },
1374 .probe = sdhci_esdhc_probe,
1375 .remove = sdhci_pltfm_unregister,
1376 };
1377
1378 module_platform_driver(sdhci_esdhc_driver);
1379
1380 MODULE_DESCRIPTION("SDHCI OF driver for Freescale MPC eSDHC");
1381 MODULE_AUTHOR("Xiaobo Xie <X.Xie@freescale.com>, "
1382 "Anton Vorontsov <avorontsov@ru.mvista.com>");
1383 MODULE_LICENSE("GPL v2");