This source file includes following definitions.
- sdhci_dumpregs
- sdhci_do_enable_v4_mode
- sdhci_enable_v4_mode
- sdhci_data_line_cmd
- sdhci_set_card_detection
- sdhci_enable_card_detection
- sdhci_disable_card_detection
- sdhci_runtime_pm_bus_on
- sdhci_runtime_pm_bus_off
- sdhci_reset
- sdhci_do_reset
- sdhci_set_default_irqs
- sdhci_config_dma
- sdhci_init
- sdhci_reinit
- __sdhci_led_activate
- __sdhci_led_deactivate
- sdhci_led_control
- sdhci_led_register
- sdhci_led_unregister
- sdhci_led_activate
- sdhci_led_deactivate
- sdhci_led_register
- sdhci_led_unregister
- sdhci_led_activate
- sdhci_led_deactivate
- sdhci_mod_timer
- sdhci_del_timer
- sdhci_has_requests
- sdhci_read_block_pio
- sdhci_write_block_pio
- sdhci_transfer_pio
- sdhci_pre_dma_transfer
- sdhci_kmap_atomic
- sdhci_kunmap_atomic
- sdhci_adma_write_desc
- __sdhci_adma_write_desc
- sdhci_adma_mark_end
- sdhci_adma_table_pre
- sdhci_adma_table_post
- sdhci_set_adma_addr
- sdhci_sdma_address
- sdhci_set_sdma_addr
- sdhci_target_timeout
- sdhci_calc_sw_timeout
- sdhci_calc_timeout
- sdhci_set_transfer_irqs
- sdhci_set_data_timeout_irq
- __sdhci_set_timeout
- sdhci_set_timeout
- sdhci_prepare_data
- sdhci_auto_cmd12
- sdhci_auto_cmd_select
- sdhci_set_transfer_mode
- sdhci_needs_reset
- __sdhci_finish_mrq
- sdhci_finish_mrq
- sdhci_finish_data
- sdhci_send_command
- sdhci_read_rsp_136
- sdhci_finish_command
- sdhci_get_preset_value
- sdhci_calc_clk
- sdhci_enable_clk
- sdhci_set_clock
- sdhci_set_power_reg
- sdhci_set_power_noreg
- sdhci_set_power
- sdhci_request
- sdhci_set_bus_width
- sdhci_set_uhs_signaling
- sdhci_set_ios
- sdhci_get_cd
- sdhci_check_ro
- sdhci_get_ro
- sdhci_hw_reset
- sdhci_enable_sdio_irq_nolock
- sdhci_enable_sdio_irq
- sdhci_ack_sdio_irq
- sdhci_start_signal_voltage_switch
- sdhci_card_busy
- sdhci_prepare_hs400_tuning
- sdhci_start_tuning
- sdhci_end_tuning
- sdhci_reset_tuning
- sdhci_abort_tuning
- sdhci_send_tuning
- __sdhci_execute_tuning
- sdhci_execute_tuning
- sdhci_enable_preset_value
- sdhci_post_req
- sdhci_pre_req
- sdhci_error_out_mrqs
- sdhci_card_event
- sdhci_request_done
- sdhci_complete_work
- sdhci_timeout_timer
- sdhci_timeout_data_timer
- sdhci_cmd_irq
- sdhci_adma_show_error
- sdhci_data_irq
- sdhci_defer_done
- sdhci_irq
- sdhci_thread_irq
- sdhci_cd_irq_can_wakeup
- sdhci_enable_irq_wakeups
- sdhci_disable_irq_wakeups
- sdhci_suspend_host
- sdhci_resume_host
- sdhci_runtime_suspend_host
- sdhci_runtime_resume_host
- sdhci_cqe_enable
- sdhci_cqe_disable
- sdhci_cqe_irq
- sdhci_alloc_host
- sdhci_set_dma_mask
- __sdhci_read_caps
- sdhci_allocate_bounce_buffer
- sdhci_can_64bit_dma
- sdhci_setup_host
- sdhci_cleanup_host
- __sdhci_add_host
- sdhci_add_host
- sdhci_remove_host
- sdhci_free_host
- sdhci_drv_init
- sdhci_drv_exit
1
2
3
4
5
6
7
8
9
10
11
12 #include <linux/delay.h>
13 #include <linux/ktime.h>
14 #include <linux/highmem.h>
15 #include <linux/io.h>
16 #include <linux/module.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/slab.h>
19 #include <linux/scatterlist.h>
20 #include <linux/sizes.h>
21 #include <linux/swiotlb.h>
22 #include <linux/regulator/consumer.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/of.h>
25
26 #include <linux/leds.h>
27
28 #include <linux/mmc/mmc.h>
29 #include <linux/mmc/host.h>
30 #include <linux/mmc/card.h>
31 #include <linux/mmc/sdio.h>
32 #include <linux/mmc/slot-gpio.h>
33
34 #include "sdhci.h"
35
36 #define DRIVER_NAME "sdhci"
37
38 #define DBG(f, x...) \
39 pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
40
41 #define SDHCI_DUMP(f, x...) \
42 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
43
44 #define MAX_TUNING_LOOP 40
45
46 static unsigned int debug_quirks = 0;
47 static unsigned int debug_quirks2;
48
49 static void sdhci_finish_data(struct sdhci_host *);
50
51 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
52
53 void sdhci_dumpregs(struct sdhci_host *host)
54 {
55 SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
56
57 SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n",
58 sdhci_readl(host, SDHCI_DMA_ADDRESS),
59 sdhci_readw(host, SDHCI_HOST_VERSION));
60 SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n",
61 sdhci_readw(host, SDHCI_BLOCK_SIZE),
62 sdhci_readw(host, SDHCI_BLOCK_COUNT));
63 SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n",
64 sdhci_readl(host, SDHCI_ARGUMENT),
65 sdhci_readw(host, SDHCI_TRANSFER_MODE));
66 SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n",
67 sdhci_readl(host, SDHCI_PRESENT_STATE),
68 sdhci_readb(host, SDHCI_HOST_CONTROL));
69 SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n",
70 sdhci_readb(host, SDHCI_POWER_CONTROL),
71 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
72 SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n",
73 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
74 sdhci_readw(host, SDHCI_CLOCK_CONTROL));
75 SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n",
76 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
77 sdhci_readl(host, SDHCI_INT_STATUS));
78 SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n",
79 sdhci_readl(host, SDHCI_INT_ENABLE),
80 sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
81 SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n",
82 sdhci_readw(host, SDHCI_AUTO_CMD_STATUS),
83 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
84 SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n",
85 sdhci_readl(host, SDHCI_CAPABILITIES),
86 sdhci_readl(host, SDHCI_CAPABILITIES_1));
87 SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n",
88 sdhci_readw(host, SDHCI_COMMAND),
89 sdhci_readl(host, SDHCI_MAX_CURRENT));
90 SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n",
91 sdhci_readl(host, SDHCI_RESPONSE),
92 sdhci_readl(host, SDHCI_RESPONSE + 4));
93 SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n",
94 sdhci_readl(host, SDHCI_RESPONSE + 8),
95 sdhci_readl(host, SDHCI_RESPONSE + 12));
96 SDHCI_DUMP("Host ctl2: 0x%08x\n",
97 sdhci_readw(host, SDHCI_HOST_CONTROL2));
98
99 if (host->flags & SDHCI_USE_ADMA) {
100 if (host->flags & SDHCI_USE_64_BIT_DMA) {
101 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
102 sdhci_readl(host, SDHCI_ADMA_ERROR),
103 sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
104 sdhci_readl(host, SDHCI_ADMA_ADDRESS));
105 } else {
106 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
107 sdhci_readl(host, SDHCI_ADMA_ERROR),
108 sdhci_readl(host, SDHCI_ADMA_ADDRESS));
109 }
110 }
111
112 SDHCI_DUMP("============================================\n");
113 }
114 EXPORT_SYMBOL_GPL(sdhci_dumpregs);
115
116
117
118
119
120
121
122 static void sdhci_do_enable_v4_mode(struct sdhci_host *host)
123 {
124 u16 ctrl2;
125
126 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
127 if (ctrl2 & SDHCI_CTRL_V4_MODE)
128 return;
129
130 ctrl2 |= SDHCI_CTRL_V4_MODE;
131 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
132 }
133
134
135
136
137
138 void sdhci_enable_v4_mode(struct sdhci_host *host)
139 {
140 host->v4_mode = true;
141 sdhci_do_enable_v4_mode(host);
142 }
143 EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode);
144
145 static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
146 {
147 return cmd->data || cmd->flags & MMC_RSP_BUSY;
148 }
149
150 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
151 {
152 u32 present;
153
154 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
155 !mmc_card_is_removable(host->mmc))
156 return;
157
158 if (enable) {
159 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
160 SDHCI_CARD_PRESENT;
161
162 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
163 SDHCI_INT_CARD_INSERT;
164 } else {
165 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
166 }
167
168 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
169 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
170 }
171
172 static void sdhci_enable_card_detection(struct sdhci_host *host)
173 {
174 sdhci_set_card_detection(host, true);
175 }
176
177 static void sdhci_disable_card_detection(struct sdhci_host *host)
178 {
179 sdhci_set_card_detection(host, false);
180 }
181
182 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
183 {
184 if (host->bus_on)
185 return;
186 host->bus_on = true;
187 pm_runtime_get_noresume(host->mmc->parent);
188 }
189
190 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
191 {
192 if (!host->bus_on)
193 return;
194 host->bus_on = false;
195 pm_runtime_put_noidle(host->mmc->parent);
196 }
197
198 void sdhci_reset(struct sdhci_host *host, u8 mask)
199 {
200 ktime_t timeout;
201
202 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
203
204 if (mask & SDHCI_RESET_ALL) {
205 host->clock = 0;
206
207 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
208 sdhci_runtime_pm_bus_off(host);
209 }
210
211
212 timeout = ktime_add_ms(ktime_get(), 100);
213
214
215 while (1) {
216 bool timedout = ktime_after(ktime_get(), timeout);
217
218 if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask))
219 break;
220 if (timedout) {
221 pr_err("%s: Reset 0x%x never completed.\n",
222 mmc_hostname(host->mmc), (int)mask);
223 sdhci_dumpregs(host);
224 return;
225 }
226 udelay(10);
227 }
228 }
229 EXPORT_SYMBOL_GPL(sdhci_reset);
230
231 static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
232 {
233 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
234 struct mmc_host *mmc = host->mmc;
235
236 if (!mmc->ops->get_cd(mmc))
237 return;
238 }
239
240 host->ops->reset(host, mask);
241
242 if (mask & SDHCI_RESET_ALL) {
243 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
244 if (host->ops->enable_dma)
245 host->ops->enable_dma(host);
246 }
247
248
249 host->preset_enabled = false;
250 }
251 }
252
253 static void sdhci_set_default_irqs(struct sdhci_host *host)
254 {
255 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
256 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
257 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
258 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
259 SDHCI_INT_RESPONSE;
260
261 if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
262 host->tuning_mode == SDHCI_TUNING_MODE_3)
263 host->ier |= SDHCI_INT_RETUNE;
264
265 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
266 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
267 }
268
269 static void sdhci_config_dma(struct sdhci_host *host)
270 {
271 u8 ctrl;
272 u16 ctrl2;
273
274 if (host->version < SDHCI_SPEC_200)
275 return;
276
277 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
278
279
280
281
282
283
284 ctrl &= ~SDHCI_CTRL_DMA_MASK;
285 if (!(host->flags & SDHCI_REQ_USE_DMA))
286 goto out;
287
288
289 if (host->flags & SDHCI_USE_ADMA)
290 ctrl |= SDHCI_CTRL_ADMA32;
291
292 if (host->flags & SDHCI_USE_64_BIT_DMA) {
293
294
295
296
297
298 if (host->v4_mode) {
299 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
300 ctrl2 |= SDHCI_CTRL_64BIT_ADDR;
301 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
302 } else if (host->flags & SDHCI_USE_ADMA) {
303
304
305
306
307 ctrl |= SDHCI_CTRL_ADMA64;
308 }
309 }
310
311 out:
312 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
313 }
314
315 static void sdhci_init(struct sdhci_host *host, int soft)
316 {
317 struct mmc_host *mmc = host->mmc;
318
319 if (soft)
320 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
321 else
322 sdhci_do_reset(host, SDHCI_RESET_ALL);
323
324 if (host->v4_mode)
325 sdhci_do_enable_v4_mode(host);
326
327 sdhci_set_default_irqs(host);
328
329 host->cqe_on = false;
330
331 if (soft) {
332
333 host->clock = 0;
334 mmc->ops->set_ios(mmc, &mmc->ios);
335 }
336 }
337
338 static void sdhci_reinit(struct sdhci_host *host)
339 {
340 sdhci_init(host, 0);
341 sdhci_enable_card_detection(host);
342 }
343
344 static void __sdhci_led_activate(struct sdhci_host *host)
345 {
346 u8 ctrl;
347
348 if (host->quirks & SDHCI_QUIRK_NO_LED)
349 return;
350
351 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
352 ctrl |= SDHCI_CTRL_LED;
353 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
354 }
355
356 static void __sdhci_led_deactivate(struct sdhci_host *host)
357 {
358 u8 ctrl;
359
360 if (host->quirks & SDHCI_QUIRK_NO_LED)
361 return;
362
363 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
364 ctrl &= ~SDHCI_CTRL_LED;
365 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
366 }
367
368 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
369 static void sdhci_led_control(struct led_classdev *led,
370 enum led_brightness brightness)
371 {
372 struct sdhci_host *host = container_of(led, struct sdhci_host, led);
373 unsigned long flags;
374
375 spin_lock_irqsave(&host->lock, flags);
376
377 if (host->runtime_suspended)
378 goto out;
379
380 if (brightness == LED_OFF)
381 __sdhci_led_deactivate(host);
382 else
383 __sdhci_led_activate(host);
384 out:
385 spin_unlock_irqrestore(&host->lock, flags);
386 }
387
388 static int sdhci_led_register(struct sdhci_host *host)
389 {
390 struct mmc_host *mmc = host->mmc;
391
392 if (host->quirks & SDHCI_QUIRK_NO_LED)
393 return 0;
394
395 snprintf(host->led_name, sizeof(host->led_name),
396 "%s::", mmc_hostname(mmc));
397
398 host->led.name = host->led_name;
399 host->led.brightness = LED_OFF;
400 host->led.default_trigger = mmc_hostname(mmc);
401 host->led.brightness_set = sdhci_led_control;
402
403 return led_classdev_register(mmc_dev(mmc), &host->led);
404 }
405
406 static void sdhci_led_unregister(struct sdhci_host *host)
407 {
408 if (host->quirks & SDHCI_QUIRK_NO_LED)
409 return;
410
411 led_classdev_unregister(&host->led);
412 }
413
414 static inline void sdhci_led_activate(struct sdhci_host *host)
415 {
416 }
417
418 static inline void sdhci_led_deactivate(struct sdhci_host *host)
419 {
420 }
421
422 #else
423
424 static inline int sdhci_led_register(struct sdhci_host *host)
425 {
426 return 0;
427 }
428
429 static inline void sdhci_led_unregister(struct sdhci_host *host)
430 {
431 }
432
433 static inline void sdhci_led_activate(struct sdhci_host *host)
434 {
435 __sdhci_led_activate(host);
436 }
437
438 static inline void sdhci_led_deactivate(struct sdhci_host *host)
439 {
440 __sdhci_led_deactivate(host);
441 }
442
443 #endif
444
445 static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
446 unsigned long timeout)
447 {
448 if (sdhci_data_line_cmd(mrq->cmd))
449 mod_timer(&host->data_timer, timeout);
450 else
451 mod_timer(&host->timer, timeout);
452 }
453
454 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
455 {
456 if (sdhci_data_line_cmd(mrq->cmd))
457 del_timer(&host->data_timer);
458 else
459 del_timer(&host->timer);
460 }
461
462 static inline bool sdhci_has_requests(struct sdhci_host *host)
463 {
464 return host->cmd || host->data_cmd;
465 }
466
467
468
469
470
471
472
473 static void sdhci_read_block_pio(struct sdhci_host *host)
474 {
475 unsigned long flags;
476 size_t blksize, len, chunk;
477 u32 uninitialized_var(scratch);
478 u8 *buf;
479
480 DBG("PIO reading\n");
481
482 blksize = host->data->blksz;
483 chunk = 0;
484
485 local_irq_save(flags);
486
487 while (blksize) {
488 BUG_ON(!sg_miter_next(&host->sg_miter));
489
490 len = min(host->sg_miter.length, blksize);
491
492 blksize -= len;
493 host->sg_miter.consumed = len;
494
495 buf = host->sg_miter.addr;
496
497 while (len) {
498 if (chunk == 0) {
499 scratch = sdhci_readl(host, SDHCI_BUFFER);
500 chunk = 4;
501 }
502
503 *buf = scratch & 0xFF;
504
505 buf++;
506 scratch >>= 8;
507 chunk--;
508 len--;
509 }
510 }
511
512 sg_miter_stop(&host->sg_miter);
513
514 local_irq_restore(flags);
515 }
516
517 static void sdhci_write_block_pio(struct sdhci_host *host)
518 {
519 unsigned long flags;
520 size_t blksize, len, chunk;
521 u32 scratch;
522 u8 *buf;
523
524 DBG("PIO writing\n");
525
526 blksize = host->data->blksz;
527 chunk = 0;
528 scratch = 0;
529
530 local_irq_save(flags);
531
532 while (blksize) {
533 BUG_ON(!sg_miter_next(&host->sg_miter));
534
535 len = min(host->sg_miter.length, blksize);
536
537 blksize -= len;
538 host->sg_miter.consumed = len;
539
540 buf = host->sg_miter.addr;
541
542 while (len) {
543 scratch |= (u32)*buf << (chunk * 8);
544
545 buf++;
546 chunk++;
547 len--;
548
549 if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
550 sdhci_writel(host, scratch, SDHCI_BUFFER);
551 chunk = 0;
552 scratch = 0;
553 }
554 }
555 }
556
557 sg_miter_stop(&host->sg_miter);
558
559 local_irq_restore(flags);
560 }
561
562 static void sdhci_transfer_pio(struct sdhci_host *host)
563 {
564 u32 mask;
565
566 if (host->blocks == 0)
567 return;
568
569 if (host->data->flags & MMC_DATA_READ)
570 mask = SDHCI_DATA_AVAILABLE;
571 else
572 mask = SDHCI_SPACE_AVAILABLE;
573
574
575
576
577
578
579 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
580 (host->data->blocks == 1))
581 mask = ~0;
582
583 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
584 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
585 udelay(100);
586
587 if (host->data->flags & MMC_DATA_READ)
588 sdhci_read_block_pio(host);
589 else
590 sdhci_write_block_pio(host);
591
592 host->blocks--;
593 if (host->blocks == 0)
594 break;
595 }
596
597 DBG("PIO transfer complete.\n");
598 }
599
600 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
601 struct mmc_data *data, int cookie)
602 {
603 int sg_count;
604
605
606
607
608
609 if (data->host_cookie == COOKIE_PRE_MAPPED)
610 return data->sg_count;
611
612
613 if (host->bounce_buffer) {
614 unsigned int length = data->blksz * data->blocks;
615
616 if (length > host->bounce_buffer_size) {
617 pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n",
618 mmc_hostname(host->mmc), length,
619 host->bounce_buffer_size);
620 return -EIO;
621 }
622 if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) {
623
624 sg_copy_to_buffer(data->sg, data->sg_len,
625 host->bounce_buffer,
626 length);
627 }
628
629 dma_sync_single_for_device(host->mmc->parent,
630 host->bounce_addr,
631 host->bounce_buffer_size,
632 mmc_get_dma_dir(data));
633
634 sg_count = 1;
635 } else {
636
637 sg_count = dma_map_sg(mmc_dev(host->mmc),
638 data->sg, data->sg_len,
639 mmc_get_dma_dir(data));
640 }
641
642 if (sg_count == 0)
643 return -ENOSPC;
644
645 data->sg_count = sg_count;
646 data->host_cookie = cookie;
647
648 return sg_count;
649 }
650
651 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
652 {
653 local_irq_save(*flags);
654 return kmap_atomic(sg_page(sg)) + sg->offset;
655 }
656
657 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
658 {
659 kunmap_atomic(buffer);
660 local_irq_restore(*flags);
661 }
662
663 void sdhci_adma_write_desc(struct sdhci_host *host, void **desc,
664 dma_addr_t addr, int len, unsigned int cmd)
665 {
666 struct sdhci_adma2_64_desc *dma_desc = *desc;
667
668
669 dma_desc->cmd = cpu_to_le16(cmd);
670 dma_desc->len = cpu_to_le16(len);
671 dma_desc->addr_lo = cpu_to_le32(lower_32_bits(addr));
672
673 if (host->flags & SDHCI_USE_64_BIT_DMA)
674 dma_desc->addr_hi = cpu_to_le32(upper_32_bits(addr));
675
676 *desc += host->desc_sz;
677 }
678 EXPORT_SYMBOL_GPL(sdhci_adma_write_desc);
679
680 static inline void __sdhci_adma_write_desc(struct sdhci_host *host,
681 void **desc, dma_addr_t addr,
682 int len, unsigned int cmd)
683 {
684 if (host->ops->adma_write_desc)
685 host->ops->adma_write_desc(host, desc, addr, len, cmd);
686 else
687 sdhci_adma_write_desc(host, desc, addr, len, cmd);
688 }
689
690 static void sdhci_adma_mark_end(void *desc)
691 {
692 struct sdhci_adma2_64_desc *dma_desc = desc;
693
694
695 dma_desc->cmd |= cpu_to_le16(ADMA2_END);
696 }
697
698 static void sdhci_adma_table_pre(struct sdhci_host *host,
699 struct mmc_data *data, int sg_count)
700 {
701 struct scatterlist *sg;
702 unsigned long flags;
703 dma_addr_t addr, align_addr;
704 void *desc, *align;
705 char *buffer;
706 int len, offset, i;
707
708
709
710
711
712
713 host->sg_count = sg_count;
714
715 desc = host->adma_table;
716 align = host->align_buffer;
717
718 align_addr = host->align_addr;
719
720 for_each_sg(data->sg, sg, host->sg_count, i) {
721 addr = sg_dma_address(sg);
722 len = sg_dma_len(sg);
723
724
725
726
727
728
729
730 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
731 SDHCI_ADMA2_MASK;
732 if (offset) {
733 if (data->flags & MMC_DATA_WRITE) {
734 buffer = sdhci_kmap_atomic(sg, &flags);
735 memcpy(align, buffer, offset);
736 sdhci_kunmap_atomic(buffer, &flags);
737 }
738
739
740 __sdhci_adma_write_desc(host, &desc, align_addr,
741 offset, ADMA2_TRAN_VALID);
742
743 BUG_ON(offset > 65536);
744
745 align += SDHCI_ADMA2_ALIGN;
746 align_addr += SDHCI_ADMA2_ALIGN;
747
748 addr += offset;
749 len -= offset;
750 }
751
752 BUG_ON(len > 65536);
753
754
755 if (len)
756 __sdhci_adma_write_desc(host, &desc, addr, len,
757 ADMA2_TRAN_VALID);
758
759
760
761
762
763 WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
764 }
765
766 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
767
768 if (desc != host->adma_table) {
769 desc -= host->desc_sz;
770 sdhci_adma_mark_end(desc);
771 }
772 } else {
773
774 __sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID);
775 }
776 }
777
778 static void sdhci_adma_table_post(struct sdhci_host *host,
779 struct mmc_data *data)
780 {
781 struct scatterlist *sg;
782 int i, size;
783 void *align;
784 char *buffer;
785 unsigned long flags;
786
787 if (data->flags & MMC_DATA_READ) {
788 bool has_unaligned = false;
789
790
791 for_each_sg(data->sg, sg, host->sg_count, i)
792 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
793 has_unaligned = true;
794 break;
795 }
796
797 if (has_unaligned) {
798 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
799 data->sg_len, DMA_FROM_DEVICE);
800
801 align = host->align_buffer;
802
803 for_each_sg(data->sg, sg, host->sg_count, i) {
804 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
805 size = SDHCI_ADMA2_ALIGN -
806 (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
807
808 buffer = sdhci_kmap_atomic(sg, &flags);
809 memcpy(buffer, align, size);
810 sdhci_kunmap_atomic(buffer, &flags);
811
812 align += SDHCI_ADMA2_ALIGN;
813 }
814 }
815 }
816 }
817 }
818
819 static void sdhci_set_adma_addr(struct sdhci_host *host, dma_addr_t addr)
820 {
821 sdhci_writel(host, lower_32_bits(addr), SDHCI_ADMA_ADDRESS);
822 if (host->flags & SDHCI_USE_64_BIT_DMA)
823 sdhci_writel(host, upper_32_bits(addr), SDHCI_ADMA_ADDRESS_HI);
824 }
825
826 static dma_addr_t sdhci_sdma_address(struct sdhci_host *host)
827 {
828 if (host->bounce_buffer)
829 return host->bounce_addr;
830 else
831 return sg_dma_address(host->data->sg);
832 }
833
834 static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr)
835 {
836 if (host->v4_mode)
837 sdhci_set_adma_addr(host, addr);
838 else
839 sdhci_writel(host, addr, SDHCI_DMA_ADDRESS);
840 }
841
842 static unsigned int sdhci_target_timeout(struct sdhci_host *host,
843 struct mmc_command *cmd,
844 struct mmc_data *data)
845 {
846 unsigned int target_timeout;
847
848
849 if (!data) {
850 target_timeout = cmd->busy_timeout * 1000;
851 } else {
852 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
853 if (host->clock && data->timeout_clks) {
854 unsigned long long val;
855
856
857
858
859
860
861 val = 1000000ULL * data->timeout_clks;
862 if (do_div(val, host->clock))
863 target_timeout++;
864 target_timeout += val;
865 }
866 }
867
868 return target_timeout;
869 }
870
871 static void sdhci_calc_sw_timeout(struct sdhci_host *host,
872 struct mmc_command *cmd)
873 {
874 struct mmc_data *data = cmd->data;
875 struct mmc_host *mmc = host->mmc;
876 struct mmc_ios *ios = &mmc->ios;
877 unsigned char bus_width = 1 << ios->bus_width;
878 unsigned int blksz;
879 unsigned int freq;
880 u64 target_timeout;
881 u64 transfer_time;
882
883 target_timeout = sdhci_target_timeout(host, cmd, data);
884 target_timeout *= NSEC_PER_USEC;
885
886 if (data) {
887 blksz = data->blksz;
888 freq = host->mmc->actual_clock ? : host->clock;
889 transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width);
890 do_div(transfer_time, freq);
891
892 transfer_time = transfer_time * 2;
893
894 host->data_timeout = data->blocks * target_timeout +
895 transfer_time;
896 } else {
897 host->data_timeout = target_timeout;
898 }
899
900 if (host->data_timeout)
901 host->data_timeout += MMC_CMD_TRANSFER_TIME;
902 }
903
904 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
905 bool *too_big)
906 {
907 u8 count;
908 struct mmc_data *data;
909 unsigned target_timeout, current_timeout;
910
911 *too_big = true;
912
913
914
915
916
917
918
919 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
920 return 0xE;
921
922
923 if (cmd == NULL)
924 return 0xE;
925
926 data = cmd->data;
927
928 if (!data && !cmd->busy_timeout)
929 return 0xE;
930
931
932 target_timeout = sdhci_target_timeout(host, cmd, data);
933
934
935
936
937
938
939
940
941
942
943
944 count = 0;
945 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
946 while (current_timeout < target_timeout) {
947 count++;
948 current_timeout <<= 1;
949 if (count >= 0xF)
950 break;
951 }
952
953 if (count >= 0xF) {
954 if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
955 DBG("Too large timeout 0x%x requested for CMD%d!\n",
956 count, cmd->opcode);
957 count = 0xE;
958 } else {
959 *too_big = false;
960 }
961
962 return count;
963 }
964
965 static void sdhci_set_transfer_irqs(struct sdhci_host *host)
966 {
967 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
968 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
969
970 if (host->flags & SDHCI_REQ_USE_DMA)
971 host->ier = (host->ier & ~pio_irqs) | dma_irqs;
972 else
973 host->ier = (host->ier & ~dma_irqs) | pio_irqs;
974
975 if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12))
976 host->ier |= SDHCI_INT_AUTO_CMD_ERR;
977 else
978 host->ier &= ~SDHCI_INT_AUTO_CMD_ERR;
979
980 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
981 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
982 }
983
984 void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
985 {
986 if (enable)
987 host->ier |= SDHCI_INT_DATA_TIMEOUT;
988 else
989 host->ier &= ~SDHCI_INT_DATA_TIMEOUT;
990 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
991 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
992 }
993 EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq);
994
995 void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
996 {
997 bool too_big = false;
998 u8 count = sdhci_calc_timeout(host, cmd, &too_big);
999
1000 if (too_big &&
1001 host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
1002 sdhci_calc_sw_timeout(host, cmd);
1003 sdhci_set_data_timeout_irq(host, false);
1004 } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
1005 sdhci_set_data_timeout_irq(host, true);
1006 }
1007
1008 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
1009 }
1010 EXPORT_SYMBOL_GPL(__sdhci_set_timeout);
1011
1012 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
1013 {
1014 if (host->ops->set_timeout)
1015 host->ops->set_timeout(host, cmd);
1016 else
1017 __sdhci_set_timeout(host, cmd);
1018 }
1019
1020 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
1021 {
1022 struct mmc_data *data = cmd->data;
1023
1024 host->data_timeout = 0;
1025
1026 if (sdhci_data_line_cmd(cmd))
1027 sdhci_set_timeout(host, cmd);
1028
1029 if (!data)
1030 return;
1031
1032 WARN_ON(host->data);
1033
1034
1035 BUG_ON(data->blksz * data->blocks > 524288);
1036 BUG_ON(data->blksz > host->mmc->max_blk_size);
1037 BUG_ON(data->blocks > 65535);
1038
1039 host->data = data;
1040 host->data_early = 0;
1041 host->data->bytes_xfered = 0;
1042
1043 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
1044 struct scatterlist *sg;
1045 unsigned int length_mask, offset_mask;
1046 int i;
1047
1048 host->flags |= SDHCI_REQ_USE_DMA;
1049
1050
1051
1052
1053
1054
1055
1056
1057 length_mask = 0;
1058 offset_mask = 0;
1059 if (host->flags & SDHCI_USE_ADMA) {
1060 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
1061 length_mask = 3;
1062
1063
1064
1065
1066
1067 offset_mask = 3;
1068 }
1069 } else {
1070 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
1071 length_mask = 3;
1072 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
1073 offset_mask = 3;
1074 }
1075
1076 if (unlikely(length_mask | offset_mask)) {
1077 for_each_sg(data->sg, sg, data->sg_len, i) {
1078 if (sg->length & length_mask) {
1079 DBG("Reverting to PIO because of transfer size (%d)\n",
1080 sg->length);
1081 host->flags &= ~SDHCI_REQ_USE_DMA;
1082 break;
1083 }
1084 if (sg->offset & offset_mask) {
1085 DBG("Reverting to PIO because of bad alignment\n");
1086 host->flags &= ~SDHCI_REQ_USE_DMA;
1087 break;
1088 }
1089 }
1090 }
1091 }
1092
1093 if (host->flags & SDHCI_REQ_USE_DMA) {
1094 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1095
1096 if (sg_cnt <= 0) {
1097
1098
1099
1100
1101 WARN_ON(1);
1102 host->flags &= ~SDHCI_REQ_USE_DMA;
1103 } else if (host->flags & SDHCI_USE_ADMA) {
1104 sdhci_adma_table_pre(host, data, sg_cnt);
1105 sdhci_set_adma_addr(host, host->adma_addr);
1106 } else {
1107 WARN_ON(sg_cnt != 1);
1108 sdhci_set_sdma_addr(host, sdhci_sdma_address(host));
1109 }
1110 }
1111
1112 sdhci_config_dma(host);
1113
1114 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
1115 int flags;
1116
1117 flags = SG_MITER_ATOMIC;
1118 if (host->data->flags & MMC_DATA_READ)
1119 flags |= SG_MITER_TO_SG;
1120 else
1121 flags |= SG_MITER_FROM_SG;
1122 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1123 host->blocks = data->blocks;
1124 }
1125
1126 sdhci_set_transfer_irqs(host);
1127
1128
1129 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
1130 SDHCI_BLOCK_SIZE);
1131
1132
1133
1134
1135
1136 if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
1137 (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) {
1138 if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
1139 sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
1140 sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
1141 } else {
1142 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
1143 }
1144 }
1145
1146 static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
1147 struct mmc_request *mrq)
1148 {
1149 return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
1150 !mrq->cap_cmd_during_tfr;
1151 }
1152
1153 static inline void sdhci_auto_cmd_select(struct sdhci_host *host,
1154 struct mmc_command *cmd,
1155 u16 *mode)
1156 {
1157 bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) &&
1158 (cmd->opcode != SD_IO_RW_EXTENDED);
1159 bool use_cmd23 = cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23);
1160 u16 ctrl2;
1161
1162
1163
1164
1165
1166
1167 if (host->version >= SDHCI_SPEC_410 && (use_cmd12 || use_cmd23)) {
1168 *mode |= SDHCI_TRNS_AUTO_SEL;
1169
1170 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1171 if (use_cmd23)
1172 ctrl2 |= SDHCI_CMD23_ENABLE;
1173 else
1174 ctrl2 &= ~SDHCI_CMD23_ENABLE;
1175 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
1176
1177 return;
1178 }
1179
1180
1181
1182
1183
1184 if (use_cmd12)
1185 *mode |= SDHCI_TRNS_AUTO_CMD12;
1186 else if (use_cmd23)
1187 *mode |= SDHCI_TRNS_AUTO_CMD23;
1188 }
1189
1190 static void sdhci_set_transfer_mode(struct sdhci_host *host,
1191 struct mmc_command *cmd)
1192 {
1193 u16 mode = 0;
1194 struct mmc_data *data = cmd->data;
1195
1196 if (data == NULL) {
1197 if (host->quirks2 &
1198 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
1199
1200 if (cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)
1201 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
1202 } else {
1203
1204 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
1205 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
1206 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
1207 }
1208 return;
1209 }
1210
1211 WARN_ON(!host->data);
1212
1213 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
1214 mode = SDHCI_TRNS_BLK_CNT_EN;
1215
1216 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
1217 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
1218 sdhci_auto_cmd_select(host, cmd, &mode);
1219 if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23))
1220 sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
1221 }
1222
1223 if (data->flags & MMC_DATA_READ)
1224 mode |= SDHCI_TRNS_READ;
1225 if (host->flags & SDHCI_REQ_USE_DMA)
1226 mode |= SDHCI_TRNS_DMA;
1227
1228 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
1229 }
1230
1231 static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
1232 {
1233 return (!(host->flags & SDHCI_DEVICE_DEAD) &&
1234 ((mrq->cmd && mrq->cmd->error) ||
1235 (mrq->sbc && mrq->sbc->error) ||
1236 (mrq->data && mrq->data->stop && mrq->data->stop->error) ||
1237 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
1238 }
1239
1240 static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1241 {
1242 int i;
1243
1244 if (host->cmd && host->cmd->mrq == mrq)
1245 host->cmd = NULL;
1246
1247 if (host->data_cmd && host->data_cmd->mrq == mrq)
1248 host->data_cmd = NULL;
1249
1250 if (host->data && host->data->mrq == mrq)
1251 host->data = NULL;
1252
1253 if (sdhci_needs_reset(host, mrq))
1254 host->pending_reset = true;
1255
1256 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1257 if (host->mrqs_done[i] == mrq) {
1258 WARN_ON(1);
1259 return;
1260 }
1261 }
1262
1263 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1264 if (!host->mrqs_done[i]) {
1265 host->mrqs_done[i] = mrq;
1266 break;
1267 }
1268 }
1269
1270 WARN_ON(i >= SDHCI_MAX_MRQS);
1271
1272 sdhci_del_timer(host, mrq);
1273
1274 if (!sdhci_has_requests(host))
1275 sdhci_led_deactivate(host);
1276 }
1277
1278 static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1279 {
1280 __sdhci_finish_mrq(host, mrq);
1281
1282 queue_work(host->complete_wq, &host->complete_work);
1283 }
1284
1285 static void sdhci_finish_data(struct sdhci_host *host)
1286 {
1287 struct mmc_command *data_cmd = host->data_cmd;
1288 struct mmc_data *data = host->data;
1289
1290 host->data = NULL;
1291 host->data_cmd = NULL;
1292
1293
1294
1295
1296
1297 if (data->error) {
1298 if (!host->cmd || host->cmd == data_cmd)
1299 sdhci_do_reset(host, SDHCI_RESET_CMD);
1300 sdhci_do_reset(host, SDHCI_RESET_DATA);
1301 }
1302
1303 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
1304 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
1305 sdhci_adma_table_post(host, data);
1306
1307
1308
1309
1310
1311
1312
1313
1314 if (data->error)
1315 data->bytes_xfered = 0;
1316 else
1317 data->bytes_xfered = data->blksz * data->blocks;
1318
1319
1320
1321
1322
1323
1324 if (data->stop &&
1325 (data->error ||
1326 !data->mrq->sbc)) {
1327
1328
1329
1330
1331
1332 if (data->mrq->cap_cmd_during_tfr) {
1333 __sdhci_finish_mrq(host, data->mrq);
1334 } else {
1335
1336 host->cmd = NULL;
1337 sdhci_send_command(host, data->stop);
1338 }
1339 } else {
1340 __sdhci_finish_mrq(host, data->mrq);
1341 }
1342 }
1343
1344 void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1345 {
1346 int flags;
1347 u32 mask;
1348 unsigned long timeout;
1349
1350 WARN_ON(host->cmd);
1351
1352
1353 cmd->error = 0;
1354
1355 if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
1356 cmd->opcode == MMC_STOP_TRANSMISSION)
1357 cmd->flags |= MMC_RSP_BUSY;
1358
1359
1360 timeout = 10;
1361
1362 mask = SDHCI_CMD_INHIBIT;
1363 if (sdhci_data_line_cmd(cmd))
1364 mask |= SDHCI_DATA_INHIBIT;
1365
1366
1367
1368 if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
1369 mask &= ~SDHCI_DATA_INHIBIT;
1370
1371 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
1372 if (timeout == 0) {
1373 pr_err("%s: Controller never released inhibit bit(s).\n",
1374 mmc_hostname(host->mmc));
1375 sdhci_dumpregs(host);
1376 cmd->error = -EIO;
1377 sdhci_finish_mrq(host, cmd->mrq);
1378 return;
1379 }
1380 timeout--;
1381 mdelay(1);
1382 }
1383
1384 host->cmd = cmd;
1385 if (sdhci_data_line_cmd(cmd)) {
1386 WARN_ON(host->data_cmd);
1387 host->data_cmd = cmd;
1388 }
1389
1390 sdhci_prepare_data(host, cmd);
1391
1392 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1393
1394 sdhci_set_transfer_mode(host, cmd);
1395
1396 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1397 pr_err("%s: Unsupported response type!\n",
1398 mmc_hostname(host->mmc));
1399 cmd->error = -EINVAL;
1400 sdhci_finish_mrq(host, cmd->mrq);
1401 return;
1402 }
1403
1404 if (!(cmd->flags & MMC_RSP_PRESENT))
1405 flags = SDHCI_CMD_RESP_NONE;
1406 else if (cmd->flags & MMC_RSP_136)
1407 flags = SDHCI_CMD_RESP_LONG;
1408 else if (cmd->flags & MMC_RSP_BUSY)
1409 flags = SDHCI_CMD_RESP_SHORT_BUSY;
1410 else
1411 flags = SDHCI_CMD_RESP_SHORT;
1412
1413 if (cmd->flags & MMC_RSP_CRC)
1414 flags |= SDHCI_CMD_CRC;
1415 if (cmd->flags & MMC_RSP_OPCODE)
1416 flags |= SDHCI_CMD_INDEX;
1417
1418
1419 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1420 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1421 flags |= SDHCI_CMD_DATA;
1422
1423 timeout = jiffies;
1424 if (host->data_timeout)
1425 timeout += nsecs_to_jiffies(host->data_timeout);
1426 else if (!cmd->data && cmd->busy_timeout > 9000)
1427 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1428 else
1429 timeout += 10 * HZ;
1430 sdhci_mod_timer(host, cmd->mrq, timeout);
1431
1432 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1433 }
1434 EXPORT_SYMBOL_GPL(sdhci_send_command);
1435
1436 static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd)
1437 {
1438 int i, reg;
1439
1440 for (i = 0; i < 4; i++) {
1441 reg = SDHCI_RESPONSE + (3 - i) * 4;
1442 cmd->resp[i] = sdhci_readl(host, reg);
1443 }
1444
1445 if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC)
1446 return;
1447
1448
1449 for (i = 0; i < 4; i++) {
1450 cmd->resp[i] <<= 8;
1451 if (i != 3)
1452 cmd->resp[i] |= cmd->resp[i + 1] >> 24;
1453 }
1454 }
1455
1456 static void sdhci_finish_command(struct sdhci_host *host)
1457 {
1458 struct mmc_command *cmd = host->cmd;
1459
1460 host->cmd = NULL;
1461
1462 if (cmd->flags & MMC_RSP_PRESENT) {
1463 if (cmd->flags & MMC_RSP_136) {
1464 sdhci_read_rsp_136(host, cmd);
1465 } else {
1466 cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1467 }
1468 }
1469
1470 if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
1471 mmc_command_done(host->mmc, cmd->mrq);
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483 if (cmd->flags & MMC_RSP_BUSY) {
1484 if (cmd->data) {
1485 DBG("Cannot wait for busy signal when also doing a data transfer");
1486 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
1487 cmd == host->data_cmd) {
1488
1489 return;
1490 }
1491 }
1492
1493
1494 if (cmd == cmd->mrq->sbc) {
1495 sdhci_send_command(host, cmd->mrq->cmd);
1496 } else {
1497
1498
1499 if (host->data && host->data_early)
1500 sdhci_finish_data(host);
1501
1502 if (!cmd->data)
1503 __sdhci_finish_mrq(host, cmd->mrq);
1504 }
1505 }
1506
1507 static u16 sdhci_get_preset_value(struct sdhci_host *host)
1508 {
1509 u16 preset = 0;
1510
1511 switch (host->timing) {
1512 case MMC_TIMING_UHS_SDR12:
1513 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1514 break;
1515 case MMC_TIMING_UHS_SDR25:
1516 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1517 break;
1518 case MMC_TIMING_UHS_SDR50:
1519 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1520 break;
1521 case MMC_TIMING_UHS_SDR104:
1522 case MMC_TIMING_MMC_HS200:
1523 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1524 break;
1525 case MMC_TIMING_UHS_DDR50:
1526 case MMC_TIMING_MMC_DDR52:
1527 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1528 break;
1529 case MMC_TIMING_MMC_HS400:
1530 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1531 break;
1532 default:
1533 pr_warn("%s: Invalid UHS-I mode selected\n",
1534 mmc_hostname(host->mmc));
1535 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1536 break;
1537 }
1538 return preset;
1539 }
1540
1541 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
1542 unsigned int *actual_clock)
1543 {
1544 int div = 0;
1545 int real_div = div, clk_mul = 1;
1546 u16 clk = 0;
1547 bool switch_base_clk = false;
1548
1549 if (host->version >= SDHCI_SPEC_300) {
1550 if (host->preset_enabled) {
1551 u16 pre_val;
1552
1553 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1554 pre_val = sdhci_get_preset_value(host);
1555 div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
1556 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
1557 if (host->clk_mul &&
1558 (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
1559 clk = SDHCI_PROG_CLOCK_MODE;
1560 real_div = div + 1;
1561 clk_mul = host->clk_mul;
1562 } else {
1563 real_div = max_t(int, 1, div << 1);
1564 }
1565 goto clock_set;
1566 }
1567
1568
1569
1570
1571
1572 if (host->clk_mul) {
1573 for (div = 1; div <= 1024; div++) {
1574 if ((host->max_clk * host->clk_mul / div)
1575 <= clock)
1576 break;
1577 }
1578 if ((host->max_clk * host->clk_mul / div) <= clock) {
1579
1580
1581
1582
1583 clk = SDHCI_PROG_CLOCK_MODE;
1584 real_div = div;
1585 clk_mul = host->clk_mul;
1586 div--;
1587 } else {
1588
1589
1590
1591
1592 switch_base_clk = true;
1593 }
1594 }
1595
1596 if (!host->clk_mul || switch_base_clk) {
1597
1598 if (host->max_clk <= clock)
1599 div = 1;
1600 else {
1601 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1602 div += 2) {
1603 if ((host->max_clk / div) <= clock)
1604 break;
1605 }
1606 }
1607 real_div = div;
1608 div >>= 1;
1609 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1610 && !div && host->max_clk <= 25000000)
1611 div = 1;
1612 }
1613 } else {
1614
1615 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1616 if ((host->max_clk / div) <= clock)
1617 break;
1618 }
1619 real_div = div;
1620 div >>= 1;
1621 }
1622
1623 clock_set:
1624 if (real_div)
1625 *actual_clock = (host->max_clk * clk_mul) / real_div;
1626 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1627 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1628 << SDHCI_DIVIDER_HI_SHIFT;
1629
1630 return clk;
1631 }
1632 EXPORT_SYMBOL_GPL(sdhci_calc_clk);
1633
1634 void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1635 {
1636 ktime_t timeout;
1637
1638 clk |= SDHCI_CLOCK_INT_EN;
1639 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1640
1641
1642 timeout = ktime_add_ms(ktime_get(), 150);
1643 while (1) {
1644 bool timedout = ktime_after(ktime_get(), timeout);
1645
1646 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1647 if (clk & SDHCI_CLOCK_INT_STABLE)
1648 break;
1649 if (timedout) {
1650 pr_err("%s: Internal clock never stabilised.\n",
1651 mmc_hostname(host->mmc));
1652 sdhci_dumpregs(host);
1653 return;
1654 }
1655 udelay(10);
1656 }
1657
1658 if (host->version >= SDHCI_SPEC_410 && host->v4_mode) {
1659 clk |= SDHCI_CLOCK_PLL_EN;
1660 clk &= ~SDHCI_CLOCK_INT_STABLE;
1661 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1662
1663
1664 timeout = ktime_add_ms(ktime_get(), 150);
1665 while (1) {
1666 bool timedout = ktime_after(ktime_get(), timeout);
1667
1668 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1669 if (clk & SDHCI_CLOCK_INT_STABLE)
1670 break;
1671 if (timedout) {
1672 pr_err("%s: PLL clock never stabilised.\n",
1673 mmc_hostname(host->mmc));
1674 sdhci_dumpregs(host);
1675 return;
1676 }
1677 udelay(10);
1678 }
1679 }
1680
1681 clk |= SDHCI_CLOCK_CARD_EN;
1682 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1683 }
1684 EXPORT_SYMBOL_GPL(sdhci_enable_clk);
1685
1686 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1687 {
1688 u16 clk;
1689
1690 host->mmc->actual_clock = 0;
1691
1692 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1693
1694 if (clock == 0)
1695 return;
1696
1697 clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
1698 sdhci_enable_clk(host, clk);
1699 }
1700 EXPORT_SYMBOL_GPL(sdhci_set_clock);
1701
1702 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
1703 unsigned short vdd)
1704 {
1705 struct mmc_host *mmc = host->mmc;
1706
1707 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
1708
1709 if (mode != MMC_POWER_OFF)
1710 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
1711 else
1712 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1713 }
1714
1715 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
1716 unsigned short vdd)
1717 {
1718 u8 pwr = 0;
1719
1720 if (mode != MMC_POWER_OFF) {
1721 switch (1 << vdd) {
1722 case MMC_VDD_165_195:
1723
1724
1725
1726
1727
1728
1729 case MMC_VDD_20_21:
1730 pwr = SDHCI_POWER_180;
1731 break;
1732 case MMC_VDD_29_30:
1733 case MMC_VDD_30_31:
1734 pwr = SDHCI_POWER_300;
1735 break;
1736 case MMC_VDD_32_33:
1737 case MMC_VDD_33_34:
1738 pwr = SDHCI_POWER_330;
1739 break;
1740 default:
1741 WARN(1, "%s: Invalid vdd %#x\n",
1742 mmc_hostname(host->mmc), vdd);
1743 break;
1744 }
1745 }
1746
1747 if (host->pwr == pwr)
1748 return;
1749
1750 host->pwr = pwr;
1751
1752 if (pwr == 0) {
1753 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1754 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1755 sdhci_runtime_pm_bus_off(host);
1756 } else {
1757
1758
1759
1760
1761 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1762 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1763
1764
1765
1766
1767
1768
1769 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1770 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1771
1772 pwr |= SDHCI_POWER_ON;
1773
1774 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1775
1776 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1777 sdhci_runtime_pm_bus_on(host);
1778
1779
1780
1781
1782
1783 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1784 mdelay(10);
1785 }
1786 }
1787 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
1788
1789 void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1790 unsigned short vdd)
1791 {
1792 if (IS_ERR(host->mmc->supply.vmmc))
1793 sdhci_set_power_noreg(host, mode, vdd);
1794 else
1795 sdhci_set_power_reg(host, mode, vdd);
1796 }
1797 EXPORT_SYMBOL_GPL(sdhci_set_power);
1798
1799
1800
1801
1802
1803
1804
1805 void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1806 {
1807 struct sdhci_host *host;
1808 int present;
1809 unsigned long flags;
1810
1811 host = mmc_priv(mmc);
1812
1813
1814 present = mmc->ops->get_cd(mmc);
1815
1816 spin_lock_irqsave(&host->lock, flags);
1817
1818 sdhci_led_activate(host);
1819
1820
1821
1822
1823
1824 if (sdhci_auto_cmd12(host, mrq)) {
1825 if (mrq->stop) {
1826 mrq->data->stop = NULL;
1827 mrq->stop = NULL;
1828 }
1829 }
1830
1831 if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1832 mrq->cmd->error = -ENOMEDIUM;
1833 sdhci_finish_mrq(host, mrq);
1834 } else {
1835 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1836 sdhci_send_command(host, mrq->sbc);
1837 else
1838 sdhci_send_command(host, mrq->cmd);
1839 }
1840
1841 spin_unlock_irqrestore(&host->lock, flags);
1842 }
1843 EXPORT_SYMBOL_GPL(sdhci_request);
1844
1845 void sdhci_set_bus_width(struct sdhci_host *host, int width)
1846 {
1847 u8 ctrl;
1848
1849 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1850 if (width == MMC_BUS_WIDTH_8) {
1851 ctrl &= ~SDHCI_CTRL_4BITBUS;
1852 ctrl |= SDHCI_CTRL_8BITBUS;
1853 } else {
1854 if (host->mmc->caps & MMC_CAP_8_BIT_DATA)
1855 ctrl &= ~SDHCI_CTRL_8BITBUS;
1856 if (width == MMC_BUS_WIDTH_4)
1857 ctrl |= SDHCI_CTRL_4BITBUS;
1858 else
1859 ctrl &= ~SDHCI_CTRL_4BITBUS;
1860 }
1861 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1862 }
1863 EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
1864
1865 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
1866 {
1867 u16 ctrl_2;
1868
1869 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1870
1871 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1872 if ((timing == MMC_TIMING_MMC_HS200) ||
1873 (timing == MMC_TIMING_UHS_SDR104))
1874 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1875 else if (timing == MMC_TIMING_UHS_SDR12)
1876 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1877 else if (timing == MMC_TIMING_UHS_SDR25)
1878 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1879 else if (timing == MMC_TIMING_UHS_SDR50)
1880 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1881 else if ((timing == MMC_TIMING_UHS_DDR50) ||
1882 (timing == MMC_TIMING_MMC_DDR52))
1883 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1884 else if (timing == MMC_TIMING_MMC_HS400)
1885 ctrl_2 |= SDHCI_CTRL_HS400;
1886 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1887 }
1888 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
1889
1890 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1891 {
1892 struct sdhci_host *host = mmc_priv(mmc);
1893 u8 ctrl;
1894
1895 if (ios->power_mode == MMC_POWER_UNDEFINED)
1896 return;
1897
1898 if (host->flags & SDHCI_DEVICE_DEAD) {
1899 if (!IS_ERR(mmc->supply.vmmc) &&
1900 ios->power_mode == MMC_POWER_OFF)
1901 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1902 return;
1903 }
1904
1905
1906
1907
1908
1909 if (ios->power_mode == MMC_POWER_OFF) {
1910 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1911 sdhci_reinit(host);
1912 }
1913
1914 if (host->version >= SDHCI_SPEC_300 &&
1915 (ios->power_mode == MMC_POWER_UP) &&
1916 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
1917 sdhci_enable_preset_value(host, false);
1918
1919 if (!ios->clock || ios->clock != host->clock) {
1920 host->ops->set_clock(host, ios->clock);
1921 host->clock = ios->clock;
1922
1923 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
1924 host->clock) {
1925 host->timeout_clk = host->mmc->actual_clock ?
1926 host->mmc->actual_clock / 1000 :
1927 host->clock / 1000;
1928 host->mmc->max_busy_timeout =
1929 host->ops->get_max_timeout_count ?
1930 host->ops->get_max_timeout_count(host) :
1931 1 << 27;
1932 host->mmc->max_busy_timeout /= host->timeout_clk;
1933 }
1934 }
1935
1936 if (host->ops->set_power)
1937 host->ops->set_power(host, ios->power_mode, ios->vdd);
1938 else
1939 sdhci_set_power(host, ios->power_mode, ios->vdd);
1940
1941 if (host->ops->platform_send_init_74_clocks)
1942 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
1943
1944 host->ops->set_bus_width(host, ios->bus_width);
1945
1946 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1947
1948 if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) {
1949 if (ios->timing == MMC_TIMING_SD_HS ||
1950 ios->timing == MMC_TIMING_MMC_HS ||
1951 ios->timing == MMC_TIMING_MMC_HS400 ||
1952 ios->timing == MMC_TIMING_MMC_HS200 ||
1953 ios->timing == MMC_TIMING_MMC_DDR52 ||
1954 ios->timing == MMC_TIMING_UHS_SDR50 ||
1955 ios->timing == MMC_TIMING_UHS_SDR104 ||
1956 ios->timing == MMC_TIMING_UHS_DDR50 ||
1957 ios->timing == MMC_TIMING_UHS_SDR25)
1958 ctrl |= SDHCI_CTRL_HISPD;
1959 else
1960 ctrl &= ~SDHCI_CTRL_HISPD;
1961 }
1962
1963 if (host->version >= SDHCI_SPEC_300) {
1964 u16 clk, ctrl_2;
1965
1966 if (!host->preset_enabled) {
1967 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1968
1969
1970
1971
1972 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1973 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
1974 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
1975 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
1976 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
1977 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1978 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
1979 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
1980 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
1981 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
1982 else {
1983 pr_warn("%s: invalid driver type, default to driver type B\n",
1984 mmc_hostname(mmc));
1985 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1986 }
1987
1988 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1989 } else {
1990
1991
1992
1993
1994
1995
1996
1997
1998 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1999 clk &= ~SDHCI_CLOCK_CARD_EN;
2000 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2001
2002 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2003
2004
2005 host->ops->set_clock(host, host->clock);
2006 }
2007
2008
2009 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2010 clk &= ~SDHCI_CLOCK_CARD_EN;
2011 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2012
2013 host->ops->set_uhs_signaling(host, ios->timing);
2014 host->timing = ios->timing;
2015
2016 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
2017 ((ios->timing == MMC_TIMING_UHS_SDR12) ||
2018 (ios->timing == MMC_TIMING_UHS_SDR25) ||
2019 (ios->timing == MMC_TIMING_UHS_SDR50) ||
2020 (ios->timing == MMC_TIMING_UHS_SDR104) ||
2021 (ios->timing == MMC_TIMING_UHS_DDR50) ||
2022 (ios->timing == MMC_TIMING_MMC_DDR52))) {
2023 u16 preset;
2024
2025 sdhci_enable_preset_value(host, true);
2026 preset = sdhci_get_preset_value(host);
2027 ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
2028 >> SDHCI_PRESET_DRV_SHIFT;
2029 }
2030
2031
2032 host->ops->set_clock(host, host->clock);
2033 } else
2034 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2035
2036
2037
2038
2039
2040
2041 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
2042 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
2043 }
2044 EXPORT_SYMBOL_GPL(sdhci_set_ios);
2045
2046 static int sdhci_get_cd(struct mmc_host *mmc)
2047 {
2048 struct sdhci_host *host = mmc_priv(mmc);
2049 int gpio_cd = mmc_gpio_get_cd(mmc);
2050
2051 if (host->flags & SDHCI_DEVICE_DEAD)
2052 return 0;
2053
2054
2055 if (!mmc_card_is_removable(host->mmc))
2056 return 1;
2057
2058
2059
2060
2061
2062 if (gpio_cd >= 0)
2063 return !!gpio_cd;
2064
2065
2066 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
2067 return 1;
2068
2069
2070 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
2071 }
2072
2073 static int sdhci_check_ro(struct sdhci_host *host)
2074 {
2075 unsigned long flags;
2076 int is_readonly;
2077
2078 spin_lock_irqsave(&host->lock, flags);
2079
2080 if (host->flags & SDHCI_DEVICE_DEAD)
2081 is_readonly = 0;
2082 else if (host->ops->get_ro)
2083 is_readonly = host->ops->get_ro(host);
2084 else if (mmc_can_gpio_ro(host->mmc))
2085 is_readonly = mmc_gpio_get_ro(host->mmc);
2086 else
2087 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
2088 & SDHCI_WRITE_PROTECT);
2089
2090 spin_unlock_irqrestore(&host->lock, flags);
2091
2092
2093 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
2094 !is_readonly : is_readonly;
2095 }
2096
2097 #define SAMPLE_COUNT 5
2098
2099 static int sdhci_get_ro(struct mmc_host *mmc)
2100 {
2101 struct sdhci_host *host = mmc_priv(mmc);
2102 int i, ro_count;
2103
2104 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
2105 return sdhci_check_ro(host);
2106
2107 ro_count = 0;
2108 for (i = 0; i < SAMPLE_COUNT; i++) {
2109 if (sdhci_check_ro(host)) {
2110 if (++ro_count > SAMPLE_COUNT / 2)
2111 return 1;
2112 }
2113 msleep(30);
2114 }
2115 return 0;
2116 }
2117
2118 static void sdhci_hw_reset(struct mmc_host *mmc)
2119 {
2120 struct sdhci_host *host = mmc_priv(mmc);
2121
2122 if (host->ops && host->ops->hw_reset)
2123 host->ops->hw_reset(host);
2124 }
2125
2126 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
2127 {
2128 if (!(host->flags & SDHCI_DEVICE_DEAD)) {
2129 if (enable)
2130 host->ier |= SDHCI_INT_CARD_INT;
2131 else
2132 host->ier &= ~SDHCI_INT_CARD_INT;
2133
2134 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2135 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2136 }
2137 }
2138
2139 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
2140 {
2141 struct sdhci_host *host = mmc_priv(mmc);
2142 unsigned long flags;
2143
2144 if (enable)
2145 pm_runtime_get_noresume(host->mmc->parent);
2146
2147 spin_lock_irqsave(&host->lock, flags);
2148 sdhci_enable_sdio_irq_nolock(host, enable);
2149 spin_unlock_irqrestore(&host->lock, flags);
2150
2151 if (!enable)
2152 pm_runtime_put_noidle(host->mmc->parent);
2153 }
2154 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
2155
2156 static void sdhci_ack_sdio_irq(struct mmc_host *mmc)
2157 {
2158 struct sdhci_host *host = mmc_priv(mmc);
2159 unsigned long flags;
2160
2161 spin_lock_irqsave(&host->lock, flags);
2162 sdhci_enable_sdio_irq_nolock(host, true);
2163 spin_unlock_irqrestore(&host->lock, flags);
2164 }
2165
2166 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
2167 struct mmc_ios *ios)
2168 {
2169 struct sdhci_host *host = mmc_priv(mmc);
2170 u16 ctrl;
2171 int ret;
2172
2173
2174
2175
2176
2177 if (host->version < SDHCI_SPEC_300)
2178 return 0;
2179
2180 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2181
2182 switch (ios->signal_voltage) {
2183 case MMC_SIGNAL_VOLTAGE_330:
2184 if (!(host->flags & SDHCI_SIGNALING_330))
2185 return -EINVAL;
2186
2187 ctrl &= ~SDHCI_CTRL_VDD_180;
2188 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2189
2190 if (!IS_ERR(mmc->supply.vqmmc)) {
2191 ret = mmc_regulator_set_vqmmc(mmc, ios);
2192 if (ret) {
2193 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
2194 mmc_hostname(mmc));
2195 return -EIO;
2196 }
2197 }
2198
2199 usleep_range(5000, 5500);
2200
2201
2202 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2203 if (!(ctrl & SDHCI_CTRL_VDD_180))
2204 return 0;
2205
2206 pr_warn("%s: 3.3V regulator output did not became stable\n",
2207 mmc_hostname(mmc));
2208
2209 return -EAGAIN;
2210 case MMC_SIGNAL_VOLTAGE_180:
2211 if (!(host->flags & SDHCI_SIGNALING_180))
2212 return -EINVAL;
2213 if (!IS_ERR(mmc->supply.vqmmc)) {
2214 ret = mmc_regulator_set_vqmmc(mmc, ios);
2215 if (ret) {
2216 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
2217 mmc_hostname(mmc));
2218 return -EIO;
2219 }
2220 }
2221
2222
2223
2224
2225
2226 ctrl |= SDHCI_CTRL_VDD_180;
2227 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2228
2229
2230 if (host->ops->voltage_switch)
2231 host->ops->voltage_switch(host);
2232
2233
2234 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2235 if (ctrl & SDHCI_CTRL_VDD_180)
2236 return 0;
2237
2238 pr_warn("%s: 1.8V regulator output did not became stable\n",
2239 mmc_hostname(mmc));
2240
2241 return -EAGAIN;
2242 case MMC_SIGNAL_VOLTAGE_120:
2243 if (!(host->flags & SDHCI_SIGNALING_120))
2244 return -EINVAL;
2245 if (!IS_ERR(mmc->supply.vqmmc)) {
2246 ret = mmc_regulator_set_vqmmc(mmc, ios);
2247 if (ret) {
2248 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
2249 mmc_hostname(mmc));
2250 return -EIO;
2251 }
2252 }
2253 return 0;
2254 default:
2255
2256 return 0;
2257 }
2258 }
2259 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch);
2260
2261 static int sdhci_card_busy(struct mmc_host *mmc)
2262 {
2263 struct sdhci_host *host = mmc_priv(mmc);
2264 u32 present_state;
2265
2266
2267 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
2268
2269 return !(present_state & SDHCI_DATA_0_LVL_MASK);
2270 }
2271
2272 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
2273 {
2274 struct sdhci_host *host = mmc_priv(mmc);
2275 unsigned long flags;
2276
2277 spin_lock_irqsave(&host->lock, flags);
2278 host->flags |= SDHCI_HS400_TUNING;
2279 spin_unlock_irqrestore(&host->lock, flags);
2280
2281 return 0;
2282 }
2283
2284 void sdhci_start_tuning(struct sdhci_host *host)
2285 {
2286 u16 ctrl;
2287
2288 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2289 ctrl |= SDHCI_CTRL_EXEC_TUNING;
2290 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
2291 ctrl |= SDHCI_CTRL_TUNED_CLK;
2292 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
2305 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
2306 }
2307 EXPORT_SYMBOL_GPL(sdhci_start_tuning);
2308
2309 void sdhci_end_tuning(struct sdhci_host *host)
2310 {
2311 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2312 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2313 }
2314 EXPORT_SYMBOL_GPL(sdhci_end_tuning);
2315
2316 void sdhci_reset_tuning(struct sdhci_host *host)
2317 {
2318 u16 ctrl;
2319
2320 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2321 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2322 ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
2323 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2324 }
2325 EXPORT_SYMBOL_GPL(sdhci_reset_tuning);
2326
2327 void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
2328 {
2329 sdhci_reset_tuning(host);
2330
2331 sdhci_do_reset(host, SDHCI_RESET_CMD);
2332 sdhci_do_reset(host, SDHCI_RESET_DATA);
2333
2334 sdhci_end_tuning(host);
2335
2336 mmc_abort_tuning(host->mmc, opcode);
2337 }
2338 EXPORT_SYMBOL_GPL(sdhci_abort_tuning);
2339
2340
2341
2342
2343
2344
2345
2346
2347 void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
2348 {
2349 struct mmc_host *mmc = host->mmc;
2350 struct mmc_command cmd = {};
2351 struct mmc_request mrq = {};
2352 unsigned long flags;
2353 u32 b = host->sdma_boundary;
2354
2355 spin_lock_irqsave(&host->lock, flags);
2356
2357 cmd.opcode = opcode;
2358 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
2359 cmd.mrq = &mrq;
2360
2361 mrq.cmd = &cmd;
2362
2363
2364
2365
2366
2367 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
2368 mmc->ios.bus_width == MMC_BUS_WIDTH_8)
2369 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE);
2370 else
2371 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE);
2372
2373
2374
2375
2376
2377
2378
2379 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2380
2381 sdhci_send_command(host, &cmd);
2382
2383 host->cmd = NULL;
2384
2385 sdhci_del_timer(host, &mrq);
2386
2387 host->tuning_done = 0;
2388
2389 spin_unlock_irqrestore(&host->lock, flags);
2390
2391
2392 wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
2393 msecs_to_jiffies(50));
2394
2395 }
2396 EXPORT_SYMBOL_GPL(sdhci_send_tuning);
2397
2398 static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
2399 {
2400 int i;
2401
2402
2403
2404
2405
2406 for (i = 0; i < host->tuning_loop_count; i++) {
2407 u16 ctrl;
2408
2409 sdhci_send_tuning(host, opcode);
2410
2411 if (!host->tuning_done) {
2412 pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n",
2413 mmc_hostname(host->mmc));
2414 sdhci_abort_tuning(host, opcode);
2415 return -ETIMEDOUT;
2416 }
2417
2418
2419 if (host->tuning_delay > 0)
2420 mdelay(host->tuning_delay);
2421
2422 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2423 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
2424 if (ctrl & SDHCI_CTRL_TUNED_CLK)
2425 return 0;
2426 break;
2427 }
2428
2429 }
2430
2431 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
2432 mmc_hostname(host->mmc));
2433 sdhci_reset_tuning(host);
2434 return -EAGAIN;
2435 }
2436
2437 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2438 {
2439 struct sdhci_host *host = mmc_priv(mmc);
2440 int err = 0;
2441 unsigned int tuning_count = 0;
2442 bool hs400_tuning;
2443
2444 hs400_tuning = host->flags & SDHCI_HS400_TUNING;
2445
2446 if (host->tuning_mode == SDHCI_TUNING_MODE_1)
2447 tuning_count = host->tuning_count;
2448
2449
2450
2451
2452
2453
2454
2455
2456 switch (host->timing) {
2457
2458 case MMC_TIMING_MMC_HS400:
2459 err = -EINVAL;
2460 goto out;
2461
2462 case MMC_TIMING_MMC_HS200:
2463
2464
2465
2466
2467 if (hs400_tuning)
2468 tuning_count = 0;
2469 break;
2470
2471 case MMC_TIMING_UHS_SDR104:
2472 case MMC_TIMING_UHS_DDR50:
2473 break;
2474
2475 case MMC_TIMING_UHS_SDR50:
2476 if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
2477 break;
2478
2479
2480 default:
2481 goto out;
2482 }
2483
2484 if (host->ops->platform_execute_tuning) {
2485 err = host->ops->platform_execute_tuning(host, opcode);
2486 goto out;
2487 }
2488
2489 host->mmc->retune_period = tuning_count;
2490
2491 if (host->tuning_delay < 0)
2492 host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK;
2493
2494 sdhci_start_tuning(host);
2495
2496 host->tuning_err = __sdhci_execute_tuning(host, opcode);
2497
2498 sdhci_end_tuning(host);
2499 out:
2500 host->flags &= ~SDHCI_HS400_TUNING;
2501
2502 return err;
2503 }
2504 EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
2505
2506 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2507 {
2508
2509 if (host->version < SDHCI_SPEC_300)
2510 return;
2511
2512
2513
2514
2515
2516 if (host->preset_enabled != enable) {
2517 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2518
2519 if (enable)
2520 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2521 else
2522 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2523
2524 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2525
2526 if (enable)
2527 host->flags |= SDHCI_PV_ENABLED;
2528 else
2529 host->flags &= ~SDHCI_PV_ENABLED;
2530
2531 host->preset_enabled = enable;
2532 }
2533 }
2534
2535 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2536 int err)
2537 {
2538 struct sdhci_host *host = mmc_priv(mmc);
2539 struct mmc_data *data = mrq->data;
2540
2541 if (data->host_cookie != COOKIE_UNMAPPED)
2542 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2543 mmc_get_dma_dir(data));
2544
2545 data->host_cookie = COOKIE_UNMAPPED;
2546 }
2547
2548 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
2549 {
2550 struct sdhci_host *host = mmc_priv(mmc);
2551
2552 mrq->data->host_cookie = COOKIE_UNMAPPED;
2553
2554
2555
2556
2557
2558
2559 if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer)
2560 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
2561 }
2562
2563 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
2564 {
2565 if (host->data_cmd) {
2566 host->data_cmd->error = err;
2567 sdhci_finish_mrq(host, host->data_cmd->mrq);
2568 }
2569
2570 if (host->cmd) {
2571 host->cmd->error = err;
2572 sdhci_finish_mrq(host, host->cmd->mrq);
2573 }
2574 }
2575
2576 static void sdhci_card_event(struct mmc_host *mmc)
2577 {
2578 struct sdhci_host *host = mmc_priv(mmc);
2579 unsigned long flags;
2580 int present;
2581
2582
2583 if (host->ops->card_event)
2584 host->ops->card_event(host);
2585
2586 present = mmc->ops->get_cd(mmc);
2587
2588 spin_lock_irqsave(&host->lock, flags);
2589
2590
2591 if (sdhci_has_requests(host) && !present) {
2592 pr_err("%s: Card removed during transfer!\n",
2593 mmc_hostname(host->mmc));
2594 pr_err("%s: Resetting controller.\n",
2595 mmc_hostname(host->mmc));
2596
2597 sdhci_do_reset(host, SDHCI_RESET_CMD);
2598 sdhci_do_reset(host, SDHCI_RESET_DATA);
2599
2600 sdhci_error_out_mrqs(host, -ENOMEDIUM);
2601 }
2602
2603 spin_unlock_irqrestore(&host->lock, flags);
2604 }
2605
2606 static const struct mmc_host_ops sdhci_ops = {
2607 .request = sdhci_request,
2608 .post_req = sdhci_post_req,
2609 .pre_req = sdhci_pre_req,
2610 .set_ios = sdhci_set_ios,
2611 .get_cd = sdhci_get_cd,
2612 .get_ro = sdhci_get_ro,
2613 .hw_reset = sdhci_hw_reset,
2614 .enable_sdio_irq = sdhci_enable_sdio_irq,
2615 .ack_sdio_irq = sdhci_ack_sdio_irq,
2616 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
2617 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
2618 .execute_tuning = sdhci_execute_tuning,
2619 .card_event = sdhci_card_event,
2620 .card_busy = sdhci_card_busy,
2621 };
2622
2623
2624
2625
2626
2627
2628
2629 static bool sdhci_request_done(struct sdhci_host *host)
2630 {
2631 unsigned long flags;
2632 struct mmc_request *mrq;
2633 int i;
2634
2635 spin_lock_irqsave(&host->lock, flags);
2636
2637 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
2638 mrq = host->mrqs_done[i];
2639 if (mrq)
2640 break;
2641 }
2642
2643 if (!mrq) {
2644 spin_unlock_irqrestore(&host->lock, flags);
2645 return true;
2646 }
2647
2648
2649
2650
2651
2652
2653 if (host->flags & SDHCI_REQ_USE_DMA) {
2654 struct mmc_data *data = mrq->data;
2655
2656 if (data && data->host_cookie == COOKIE_MAPPED) {
2657 if (host->bounce_buffer) {
2658
2659
2660
2661
2662 if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) {
2663 unsigned int length = data->bytes_xfered;
2664
2665 if (length > host->bounce_buffer_size) {
2666 pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
2667 mmc_hostname(host->mmc),
2668 host->bounce_buffer_size,
2669 data->bytes_xfered);
2670
2671 length = host->bounce_buffer_size;
2672 }
2673 dma_sync_single_for_cpu(
2674 host->mmc->parent,
2675 host->bounce_addr,
2676 host->bounce_buffer_size,
2677 DMA_FROM_DEVICE);
2678 sg_copy_from_buffer(data->sg,
2679 data->sg_len,
2680 host->bounce_buffer,
2681 length);
2682 } else {
2683
2684 dma_sync_single_for_cpu(
2685 host->mmc->parent,
2686 host->bounce_addr,
2687 host->bounce_buffer_size,
2688 mmc_get_dma_dir(data));
2689 }
2690 } else {
2691
2692 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
2693 data->sg_len,
2694 mmc_get_dma_dir(data));
2695 }
2696 data->host_cookie = COOKIE_UNMAPPED;
2697 }
2698 }
2699
2700
2701
2702
2703
2704 if (sdhci_needs_reset(host, mrq)) {
2705
2706
2707
2708
2709
2710
2711 if (host->cmd || host->data_cmd) {
2712 spin_unlock_irqrestore(&host->lock, flags);
2713 return true;
2714 }
2715
2716
2717 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2718
2719 host->ops->set_clock(host, host->clock);
2720
2721
2722
2723 sdhci_do_reset(host, SDHCI_RESET_CMD);
2724 sdhci_do_reset(host, SDHCI_RESET_DATA);
2725
2726 host->pending_reset = false;
2727 }
2728
2729 host->mrqs_done[i] = NULL;
2730
2731 spin_unlock_irqrestore(&host->lock, flags);
2732
2733 mmc_request_done(host->mmc, mrq);
2734
2735 return false;
2736 }
2737
2738 static void sdhci_complete_work(struct work_struct *work)
2739 {
2740 struct sdhci_host *host = container_of(work, struct sdhci_host,
2741 complete_work);
2742
2743 while (!sdhci_request_done(host))
2744 ;
2745 }
2746
2747 static void sdhci_timeout_timer(struct timer_list *t)
2748 {
2749 struct sdhci_host *host;
2750 unsigned long flags;
2751
2752 host = from_timer(host, t, timer);
2753
2754 spin_lock_irqsave(&host->lock, flags);
2755
2756 if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
2757 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
2758 mmc_hostname(host->mmc));
2759 sdhci_dumpregs(host);
2760
2761 host->cmd->error = -ETIMEDOUT;
2762 sdhci_finish_mrq(host, host->cmd->mrq);
2763 }
2764
2765 spin_unlock_irqrestore(&host->lock, flags);
2766 }
2767
2768 static void sdhci_timeout_data_timer(struct timer_list *t)
2769 {
2770 struct sdhci_host *host;
2771 unsigned long flags;
2772
2773 host = from_timer(host, t, data_timer);
2774
2775 spin_lock_irqsave(&host->lock, flags);
2776
2777 if (host->data || host->data_cmd ||
2778 (host->cmd && sdhci_data_line_cmd(host->cmd))) {
2779 pr_err("%s: Timeout waiting for hardware interrupt.\n",
2780 mmc_hostname(host->mmc));
2781 sdhci_dumpregs(host);
2782
2783 if (host->data) {
2784 host->data->error = -ETIMEDOUT;
2785 sdhci_finish_data(host);
2786 queue_work(host->complete_wq, &host->complete_work);
2787 } else if (host->data_cmd) {
2788 host->data_cmd->error = -ETIMEDOUT;
2789 sdhci_finish_mrq(host, host->data_cmd->mrq);
2790 } else {
2791 host->cmd->error = -ETIMEDOUT;
2792 sdhci_finish_mrq(host, host->cmd->mrq);
2793 }
2794 }
2795
2796 spin_unlock_irqrestore(&host->lock, flags);
2797 }
2798
2799
2800
2801
2802
2803
2804
2805 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
2806 {
2807
2808 if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) {
2809 struct mmc_request *mrq = host->data_cmd->mrq;
2810 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
2811 int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
2812 SDHCI_INT_DATA_TIMEOUT :
2813 SDHCI_INT_DATA_CRC;
2814
2815
2816 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
2817 *intmask_p |= data_err_bit;
2818 return;
2819 }
2820 }
2821
2822 if (!host->cmd) {
2823
2824
2825
2826
2827
2828 if (host->pending_reset)
2829 return;
2830 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
2831 mmc_hostname(host->mmc), (unsigned)intmask);
2832 sdhci_dumpregs(host);
2833 return;
2834 }
2835
2836 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
2837 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
2838 if (intmask & SDHCI_INT_TIMEOUT)
2839 host->cmd->error = -ETIMEDOUT;
2840 else
2841 host->cmd->error = -EILSEQ;
2842
2843
2844 if (host->cmd->data &&
2845 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
2846 SDHCI_INT_CRC) {
2847 host->cmd = NULL;
2848 *intmask_p |= SDHCI_INT_DATA_CRC;
2849 return;
2850 }
2851
2852 __sdhci_finish_mrq(host, host->cmd->mrq);
2853 return;
2854 }
2855
2856
2857 if (intmask & SDHCI_INT_AUTO_CMD_ERR) {
2858 struct mmc_request *mrq = host->cmd->mrq;
2859 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
2860 int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
2861 -ETIMEDOUT :
2862 -EILSEQ;
2863
2864 if (mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
2865 mrq->sbc->error = err;
2866 __sdhci_finish_mrq(host, mrq);
2867 return;
2868 }
2869 }
2870
2871 if (intmask & SDHCI_INT_RESPONSE)
2872 sdhci_finish_command(host);
2873 }
2874
2875 static void sdhci_adma_show_error(struct sdhci_host *host)
2876 {
2877 void *desc = host->adma_table;
2878 dma_addr_t dma = host->adma_addr;
2879
2880 sdhci_dumpregs(host);
2881
2882 while (true) {
2883 struct sdhci_adma2_64_desc *dma_desc = desc;
2884
2885 if (host->flags & SDHCI_USE_64_BIT_DMA)
2886 SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2887 (unsigned long long)dma,
2888 le32_to_cpu(dma_desc->addr_hi),
2889 le32_to_cpu(dma_desc->addr_lo),
2890 le16_to_cpu(dma_desc->len),
2891 le16_to_cpu(dma_desc->cmd));
2892 else
2893 SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2894 (unsigned long long)dma,
2895 le32_to_cpu(dma_desc->addr_lo),
2896 le16_to_cpu(dma_desc->len),
2897 le16_to_cpu(dma_desc->cmd));
2898
2899 desc += host->desc_sz;
2900 dma += host->desc_sz;
2901
2902 if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
2903 break;
2904 }
2905 }
2906
2907 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2908 {
2909 u32 command;
2910
2911
2912 if (intmask & SDHCI_INT_DATA_AVAIL) {
2913 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
2914 if (command == MMC_SEND_TUNING_BLOCK ||
2915 command == MMC_SEND_TUNING_BLOCK_HS200) {
2916 host->tuning_done = 1;
2917 wake_up(&host->buf_ready_int);
2918 return;
2919 }
2920 }
2921
2922 if (!host->data) {
2923 struct mmc_command *data_cmd = host->data_cmd;
2924
2925
2926
2927
2928
2929
2930 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
2931 if (intmask & SDHCI_INT_DATA_TIMEOUT) {
2932 host->data_cmd = NULL;
2933 data_cmd->error = -ETIMEDOUT;
2934 __sdhci_finish_mrq(host, data_cmd->mrq);
2935 return;
2936 }
2937 if (intmask & SDHCI_INT_DATA_END) {
2938 host->data_cmd = NULL;
2939
2940
2941
2942
2943
2944 if (host->cmd == data_cmd)
2945 return;
2946
2947 __sdhci_finish_mrq(host, data_cmd->mrq);
2948 return;
2949 }
2950 }
2951
2952
2953
2954
2955
2956
2957 if (host->pending_reset)
2958 return;
2959
2960 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
2961 mmc_hostname(host->mmc), (unsigned)intmask);
2962 sdhci_dumpregs(host);
2963
2964 return;
2965 }
2966
2967 if (intmask & SDHCI_INT_DATA_TIMEOUT)
2968 host->data->error = -ETIMEDOUT;
2969 else if (intmask & SDHCI_INT_DATA_END_BIT)
2970 host->data->error = -EILSEQ;
2971 else if ((intmask & SDHCI_INT_DATA_CRC) &&
2972 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
2973 != MMC_BUS_TEST_R)
2974 host->data->error = -EILSEQ;
2975 else if (intmask & SDHCI_INT_ADMA_ERROR) {
2976 pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
2977 intmask);
2978 sdhci_adma_show_error(host);
2979 host->data->error = -EIO;
2980 if (host->ops->adma_workaround)
2981 host->ops->adma_workaround(host, intmask);
2982 }
2983
2984 if (host->data->error)
2985 sdhci_finish_data(host);
2986 else {
2987 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
2988 sdhci_transfer_pio(host);
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999 if (intmask & SDHCI_INT_DMA_END) {
3000 dma_addr_t dmastart, dmanow;
3001
3002 dmastart = sdhci_sdma_address(host);
3003 dmanow = dmastart + host->data->bytes_xfered;
3004
3005
3006
3007 dmanow = (dmanow &
3008 ~((dma_addr_t)SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
3009 SDHCI_DEFAULT_BOUNDARY_SIZE;
3010 host->data->bytes_xfered = dmanow - dmastart;
3011 DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n",
3012 &dmastart, host->data->bytes_xfered, &dmanow);
3013 sdhci_set_sdma_addr(host, dmanow);
3014 }
3015
3016 if (intmask & SDHCI_INT_DATA_END) {
3017 if (host->cmd == host->data_cmd) {
3018
3019
3020
3021
3022
3023 host->data_early = 1;
3024 } else {
3025 sdhci_finish_data(host);
3026 }
3027 }
3028 }
3029 }
3030
3031 static inline bool sdhci_defer_done(struct sdhci_host *host,
3032 struct mmc_request *mrq)
3033 {
3034 struct mmc_data *data = mrq->data;
3035
3036 return host->pending_reset ||
3037 ((host->flags & SDHCI_REQ_USE_DMA) && data &&
3038 data->host_cookie == COOKIE_MAPPED);
3039 }
3040
3041 static irqreturn_t sdhci_irq(int irq, void *dev_id)
3042 {
3043 struct mmc_request *mrqs_done[SDHCI_MAX_MRQS] = {0};
3044 irqreturn_t result = IRQ_NONE;
3045 struct sdhci_host *host = dev_id;
3046 u32 intmask, mask, unexpected = 0;
3047 int max_loops = 16;
3048 int i;
3049
3050 spin_lock(&host->lock);
3051
3052 if (host->runtime_suspended) {
3053 spin_unlock(&host->lock);
3054 return IRQ_NONE;
3055 }
3056
3057 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3058 if (!intmask || intmask == 0xffffffff) {
3059 result = IRQ_NONE;
3060 goto out;
3061 }
3062
3063 do {
3064 DBG("IRQ status 0x%08x\n", intmask);
3065
3066 if (host->ops->irq) {
3067 intmask = host->ops->irq(host, intmask);
3068 if (!intmask)
3069 goto cont;
3070 }
3071
3072
3073 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
3074 SDHCI_INT_BUS_POWER);
3075 sdhci_writel(host, mask, SDHCI_INT_STATUS);
3076
3077 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3078 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
3079 SDHCI_CARD_PRESENT;
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092 host->ier &= ~(SDHCI_INT_CARD_INSERT |
3093 SDHCI_INT_CARD_REMOVE);
3094 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
3095 SDHCI_INT_CARD_INSERT;
3096 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3097 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3098
3099 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
3100 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
3101
3102 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
3103 SDHCI_INT_CARD_REMOVE);
3104 result = IRQ_WAKE_THREAD;
3105 }
3106
3107 if (intmask & SDHCI_INT_CMD_MASK)
3108 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask);
3109
3110 if (intmask & SDHCI_INT_DATA_MASK)
3111 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
3112
3113 if (intmask & SDHCI_INT_BUS_POWER)
3114 pr_err("%s: Card is consuming too much power!\n",
3115 mmc_hostname(host->mmc));
3116
3117 if (intmask & SDHCI_INT_RETUNE)
3118 mmc_retune_needed(host->mmc);
3119
3120 if ((intmask & SDHCI_INT_CARD_INT) &&
3121 (host->ier & SDHCI_INT_CARD_INT)) {
3122 sdhci_enable_sdio_irq_nolock(host, false);
3123 sdio_signal_irq(host->mmc);
3124 }
3125
3126 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
3127 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
3128 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
3129 SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
3130
3131 if (intmask) {
3132 unexpected |= intmask;
3133 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3134 }
3135 cont:
3136 if (result == IRQ_NONE)
3137 result = IRQ_HANDLED;
3138
3139 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3140 } while (intmask && --max_loops);
3141
3142
3143 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3144 struct mmc_request *mrq = host->mrqs_done[i];
3145
3146 if (!mrq)
3147 continue;
3148
3149 if (sdhci_defer_done(host, mrq)) {
3150 result = IRQ_WAKE_THREAD;
3151 } else {
3152 mrqs_done[i] = mrq;
3153 host->mrqs_done[i] = NULL;
3154 }
3155 }
3156 out:
3157 spin_unlock(&host->lock);
3158
3159
3160 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3161 if (mrqs_done[i])
3162 mmc_request_done(host->mmc, mrqs_done[i]);
3163 }
3164
3165 if (unexpected) {
3166 pr_err("%s: Unexpected interrupt 0x%08x.\n",
3167 mmc_hostname(host->mmc), unexpected);
3168 sdhci_dumpregs(host);
3169 }
3170
3171 return result;
3172 }
3173
3174 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
3175 {
3176 struct sdhci_host *host = dev_id;
3177 unsigned long flags;
3178 u32 isr;
3179
3180 while (!sdhci_request_done(host))
3181 ;
3182
3183 spin_lock_irqsave(&host->lock, flags);
3184 isr = host->thread_isr;
3185 host->thread_isr = 0;
3186 spin_unlock_irqrestore(&host->lock, flags);
3187
3188 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3189 struct mmc_host *mmc = host->mmc;
3190
3191 mmc->ops->card_event(mmc);
3192 mmc_detect_change(mmc, msecs_to_jiffies(200));
3193 }
3194
3195 return IRQ_HANDLED;
3196 }
3197
3198
3199
3200
3201
3202
3203
3204 #ifdef CONFIG_PM
3205
3206 static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host)
3207 {
3208 return mmc_card_is_removable(host->mmc) &&
3209 !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3210 !mmc_can_gpio_cd(host->mmc);
3211 }
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221 static bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
3222 {
3223 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE |
3224 SDHCI_WAKE_ON_INT;
3225 u32 irq_val = 0;
3226 u8 wake_val = 0;
3227 u8 val;
3228
3229 if (sdhci_cd_irq_can_wakeup(host)) {
3230 wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE;
3231 irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE;
3232 }
3233
3234 if (mmc_card_wake_sdio_irq(host->mmc)) {
3235 wake_val |= SDHCI_WAKE_ON_INT;
3236 irq_val |= SDHCI_INT_CARD_INT;
3237 }
3238
3239 if (!irq_val)
3240 return false;
3241
3242 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3243 val &= ~mask;
3244 val |= wake_val;
3245 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3246
3247 sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
3248
3249 host->irq_wake_enabled = !enable_irq_wake(host->irq);
3250
3251 return host->irq_wake_enabled;
3252 }
3253
3254 static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
3255 {
3256 u8 val;
3257 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
3258 | SDHCI_WAKE_ON_INT;
3259
3260 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3261 val &= ~mask;
3262 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3263
3264 disable_irq_wake(host->irq);
3265
3266 host->irq_wake_enabled = false;
3267 }
3268
3269 int sdhci_suspend_host(struct sdhci_host *host)
3270 {
3271 sdhci_disable_card_detection(host);
3272
3273 mmc_retune_timer_stop(host->mmc);
3274
3275 if (!device_may_wakeup(mmc_dev(host->mmc)) ||
3276 !sdhci_enable_irq_wakeups(host)) {
3277 host->ier = 0;
3278 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3279 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3280 free_irq(host->irq, host);
3281 }
3282
3283 return 0;
3284 }
3285
3286 EXPORT_SYMBOL_GPL(sdhci_suspend_host);
3287
3288 int sdhci_resume_host(struct sdhci_host *host)
3289 {
3290 struct mmc_host *mmc = host->mmc;
3291 int ret = 0;
3292
3293 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3294 if (host->ops->enable_dma)
3295 host->ops->enable_dma(host);
3296 }
3297
3298 if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
3299 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
3300
3301 sdhci_init(host, 0);
3302 host->pwr = 0;
3303 host->clock = 0;
3304 mmc->ops->set_ios(mmc, &mmc->ios);
3305 } else {
3306 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
3307 }
3308
3309 if (host->irq_wake_enabled) {
3310 sdhci_disable_irq_wakeups(host);
3311 } else {
3312 ret = request_threaded_irq(host->irq, sdhci_irq,
3313 sdhci_thread_irq, IRQF_SHARED,
3314 mmc_hostname(host->mmc), host);
3315 if (ret)
3316 return ret;
3317 }
3318
3319 sdhci_enable_card_detection(host);
3320
3321 return ret;
3322 }
3323
3324 EXPORT_SYMBOL_GPL(sdhci_resume_host);
3325
3326 int sdhci_runtime_suspend_host(struct sdhci_host *host)
3327 {
3328 unsigned long flags;
3329
3330 mmc_retune_timer_stop(host->mmc);
3331
3332 spin_lock_irqsave(&host->lock, flags);
3333 host->ier &= SDHCI_INT_CARD_INT;
3334 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3335 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3336 spin_unlock_irqrestore(&host->lock, flags);
3337
3338 synchronize_hardirq(host->irq);
3339
3340 spin_lock_irqsave(&host->lock, flags);
3341 host->runtime_suspended = true;
3342 spin_unlock_irqrestore(&host->lock, flags);
3343
3344 return 0;
3345 }
3346 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
3347
3348 int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset)
3349 {
3350 struct mmc_host *mmc = host->mmc;
3351 unsigned long flags;
3352 int host_flags = host->flags;
3353
3354 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3355 if (host->ops->enable_dma)
3356 host->ops->enable_dma(host);
3357 }
3358
3359 sdhci_init(host, soft_reset);
3360
3361 if (mmc->ios.power_mode != MMC_POWER_UNDEFINED &&
3362 mmc->ios.power_mode != MMC_POWER_OFF) {
3363
3364 host->pwr = 0;
3365 host->clock = 0;
3366 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
3367 mmc->ops->set_ios(mmc, &mmc->ios);
3368
3369 if ((host_flags & SDHCI_PV_ENABLED) &&
3370 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
3371 spin_lock_irqsave(&host->lock, flags);
3372 sdhci_enable_preset_value(host, true);
3373 spin_unlock_irqrestore(&host->lock, flags);
3374 }
3375
3376 if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
3377 mmc->ops->hs400_enhanced_strobe)
3378 mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
3379 }
3380
3381 spin_lock_irqsave(&host->lock, flags);
3382
3383 host->runtime_suspended = false;
3384
3385
3386 if (sdio_irq_claimed(mmc))
3387 sdhci_enable_sdio_irq_nolock(host, true);
3388
3389
3390 sdhci_enable_card_detection(host);
3391
3392 spin_unlock_irqrestore(&host->lock, flags);
3393
3394 return 0;
3395 }
3396 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
3397
3398 #endif
3399
3400
3401
3402
3403
3404
3405
3406 void sdhci_cqe_enable(struct mmc_host *mmc)
3407 {
3408 struct sdhci_host *host = mmc_priv(mmc);
3409 unsigned long flags;
3410 u8 ctrl;
3411
3412 spin_lock_irqsave(&host->lock, flags);
3413
3414 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
3415 ctrl &= ~SDHCI_CTRL_DMA_MASK;
3416
3417
3418
3419
3420
3421 if (host->v4_mode && (host->caps1 & SDHCI_CAN_DO_ADMA3))
3422 ctrl |= SDHCI_CTRL_ADMA3;
3423 else if (host->flags & SDHCI_USE_64_BIT_DMA)
3424 ctrl |= SDHCI_CTRL_ADMA64;
3425 else
3426 ctrl |= SDHCI_CTRL_ADMA32;
3427 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
3428
3429 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512),
3430 SDHCI_BLOCK_SIZE);
3431
3432
3433 sdhci_set_timeout(host, NULL);
3434
3435 host->ier = host->cqe_ier;
3436
3437 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3438 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3439
3440 host->cqe_on = true;
3441
3442 pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
3443 mmc_hostname(mmc), host->ier,
3444 sdhci_readl(host, SDHCI_INT_STATUS));
3445
3446 spin_unlock_irqrestore(&host->lock, flags);
3447 }
3448 EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
3449
3450 void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
3451 {
3452 struct sdhci_host *host = mmc_priv(mmc);
3453 unsigned long flags;
3454
3455 spin_lock_irqsave(&host->lock, flags);
3456
3457 sdhci_set_default_irqs(host);
3458
3459 host->cqe_on = false;
3460
3461 if (recovery) {
3462 sdhci_do_reset(host, SDHCI_RESET_CMD);
3463 sdhci_do_reset(host, SDHCI_RESET_DATA);
3464 }
3465
3466 pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
3467 mmc_hostname(mmc), host->ier,
3468 sdhci_readl(host, SDHCI_INT_STATUS));
3469
3470 spin_unlock_irqrestore(&host->lock, flags);
3471 }
3472 EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
3473
3474 bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
3475 int *data_error)
3476 {
3477 u32 mask;
3478
3479 if (!host->cqe_on)
3480 return false;
3481
3482 if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC))
3483 *cmd_error = -EILSEQ;
3484 else if (intmask & SDHCI_INT_TIMEOUT)
3485 *cmd_error = -ETIMEDOUT;
3486 else
3487 *cmd_error = 0;
3488
3489 if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
3490 *data_error = -EILSEQ;
3491 else if (intmask & SDHCI_INT_DATA_TIMEOUT)
3492 *data_error = -ETIMEDOUT;
3493 else if (intmask & SDHCI_INT_ADMA_ERROR)
3494 *data_error = -EIO;
3495 else
3496 *data_error = 0;
3497
3498
3499 mask = intmask & host->cqe_ier;
3500 sdhci_writel(host, mask, SDHCI_INT_STATUS);
3501
3502 if (intmask & SDHCI_INT_BUS_POWER)
3503 pr_err("%s: Card is consuming too much power!\n",
3504 mmc_hostname(host->mmc));
3505
3506 intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR);
3507 if (intmask) {
3508 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3509 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
3510 mmc_hostname(host->mmc), intmask);
3511 sdhci_dumpregs(host);
3512 }
3513
3514 return true;
3515 }
3516 EXPORT_SYMBOL_GPL(sdhci_cqe_irq);
3517
3518
3519
3520
3521
3522
3523
3524 struct sdhci_host *sdhci_alloc_host(struct device *dev,
3525 size_t priv_size)
3526 {
3527 struct mmc_host *mmc;
3528 struct sdhci_host *host;
3529
3530 WARN_ON(dev == NULL);
3531
3532 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
3533 if (!mmc)
3534 return ERR_PTR(-ENOMEM);
3535
3536 host = mmc_priv(mmc);
3537 host->mmc = mmc;
3538 host->mmc_host_ops = sdhci_ops;
3539 mmc->ops = &host->mmc_host_ops;
3540
3541 host->flags = SDHCI_SIGNALING_330;
3542
3543 host->cqe_ier = SDHCI_CQE_INT_MASK;
3544 host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;
3545
3546 host->tuning_delay = -1;
3547 host->tuning_loop_count = MAX_TUNING_LOOP;
3548
3549 host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;
3550
3551
3552
3553
3554
3555
3556 host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1;
3557
3558 return host;
3559 }
3560
3561 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
3562
3563 static int sdhci_set_dma_mask(struct sdhci_host *host)
3564 {
3565 struct mmc_host *mmc = host->mmc;
3566 struct device *dev = mmc_dev(mmc);
3567 int ret = -EINVAL;
3568
3569 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
3570 host->flags &= ~SDHCI_USE_64_BIT_DMA;
3571
3572
3573 if (host->flags & SDHCI_USE_64_BIT_DMA) {
3574 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
3575 if (ret) {
3576 pr_warn("%s: Failed to set 64-bit DMA mask.\n",
3577 mmc_hostname(mmc));
3578 host->flags &= ~SDHCI_USE_64_BIT_DMA;
3579 }
3580 }
3581
3582
3583 if (ret) {
3584 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
3585 if (ret)
3586 pr_warn("%s: Failed to set 32-bit DMA mask.\n",
3587 mmc_hostname(mmc));
3588 }
3589
3590 return ret;
3591 }
3592
3593 void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver,
3594 const u32 *caps, const u32 *caps1)
3595 {
3596 u16 v;
3597 u64 dt_caps_mask = 0;
3598 u64 dt_caps = 0;
3599
3600 if (host->read_caps)
3601 return;
3602
3603 host->read_caps = true;
3604
3605 if (debug_quirks)
3606 host->quirks = debug_quirks;
3607
3608 if (debug_quirks2)
3609 host->quirks2 = debug_quirks2;
3610
3611 sdhci_do_reset(host, SDHCI_RESET_ALL);
3612
3613 if (host->v4_mode)
3614 sdhci_do_enable_v4_mode(host);
3615
3616 of_property_read_u64(mmc_dev(host->mmc)->of_node,
3617 "sdhci-caps-mask", &dt_caps_mask);
3618 of_property_read_u64(mmc_dev(host->mmc)->of_node,
3619 "sdhci-caps", &dt_caps);
3620
3621 v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
3622 host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
3623
3624 if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
3625 return;
3626
3627 if (caps) {
3628 host->caps = *caps;
3629 } else {
3630 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
3631 host->caps &= ~lower_32_bits(dt_caps_mask);
3632 host->caps |= lower_32_bits(dt_caps);
3633 }
3634
3635 if (host->version < SDHCI_SPEC_300)
3636 return;
3637
3638 if (caps1) {
3639 host->caps1 = *caps1;
3640 } else {
3641 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
3642 host->caps1 &= ~upper_32_bits(dt_caps_mask);
3643 host->caps1 |= upper_32_bits(dt_caps);
3644 }
3645 }
3646 EXPORT_SYMBOL_GPL(__sdhci_read_caps);
3647
3648 static void sdhci_allocate_bounce_buffer(struct sdhci_host *host)
3649 {
3650 struct mmc_host *mmc = host->mmc;
3651 unsigned int max_blocks;
3652 unsigned int bounce_size;
3653 int ret;
3654
3655
3656
3657
3658
3659
3660 bounce_size = SZ_64K;
3661
3662
3663
3664
3665
3666 if (mmc->max_req_size < bounce_size)
3667 bounce_size = mmc->max_req_size;
3668 max_blocks = bounce_size / 512;
3669
3670
3671
3672
3673
3674
3675 host->bounce_buffer = devm_kmalloc(mmc->parent,
3676 bounce_size,
3677 GFP_KERNEL);
3678 if (!host->bounce_buffer) {
3679 pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n",
3680 mmc_hostname(mmc),
3681 bounce_size);
3682
3683
3684
3685
3686 return;
3687 }
3688
3689 host->bounce_addr = dma_map_single(mmc->parent,
3690 host->bounce_buffer,
3691 bounce_size,
3692 DMA_BIDIRECTIONAL);
3693 ret = dma_mapping_error(mmc->parent, host->bounce_addr);
3694 if (ret)
3695
3696 return;
3697 host->bounce_buffer_size = bounce_size;
3698
3699
3700 mmc->max_segs = max_blocks;
3701 mmc->max_seg_size = bounce_size;
3702 mmc->max_req_size = bounce_size;
3703
3704 pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n",
3705 mmc_hostname(mmc), max_blocks, bounce_size);
3706 }
3707
3708 static inline bool sdhci_can_64bit_dma(struct sdhci_host *host)
3709 {
3710
3711
3712
3713
3714
3715 if (host->version >= SDHCI_SPEC_410 && host->v4_mode)
3716 return host->caps & SDHCI_CAN_64BIT_V4;
3717
3718 return host->caps & SDHCI_CAN_64BIT;
3719 }
3720
3721 int sdhci_setup_host(struct sdhci_host *host)
3722 {
3723 struct mmc_host *mmc;
3724 u32 max_current_caps;
3725 unsigned int ocr_avail;
3726 unsigned int override_timeout_clk;
3727 u32 max_clk;
3728 int ret;
3729
3730 WARN_ON(host == NULL);
3731 if (host == NULL)
3732 return -EINVAL;
3733
3734 mmc = host->mmc;
3735
3736
3737
3738
3739
3740
3741
3742 ret = mmc_regulator_get_supply(mmc);
3743 if (ret)
3744 return ret;
3745
3746 DBG("Version: 0x%08x | Present: 0x%08x\n",
3747 sdhci_readw(host, SDHCI_HOST_VERSION),
3748 sdhci_readl(host, SDHCI_PRESENT_STATE));
3749 DBG("Caps: 0x%08x | Caps_1: 0x%08x\n",
3750 sdhci_readl(host, SDHCI_CAPABILITIES),
3751 sdhci_readl(host, SDHCI_CAPABILITIES_1));
3752
3753 sdhci_read_caps(host);
3754
3755 override_timeout_clk = host->timeout_clk;
3756
3757 if (host->version > SDHCI_SPEC_420) {
3758 pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
3759 mmc_hostname(mmc), host->version);
3760 }
3761
3762 if (host->quirks & SDHCI_QUIRK_BROKEN_CQE)
3763 mmc->caps2 &= ~MMC_CAP2_CQE;
3764
3765 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
3766 host->flags |= SDHCI_USE_SDMA;
3767 else if (!(host->caps & SDHCI_CAN_DO_SDMA))
3768 DBG("Controller doesn't have SDMA capability\n");
3769 else
3770 host->flags |= SDHCI_USE_SDMA;
3771
3772 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
3773 (host->flags & SDHCI_USE_SDMA)) {
3774 DBG("Disabling DMA as it is marked broken\n");
3775 host->flags &= ~SDHCI_USE_SDMA;
3776 }
3777
3778 if ((host->version >= SDHCI_SPEC_200) &&
3779 (host->caps & SDHCI_CAN_DO_ADMA2))
3780 host->flags |= SDHCI_USE_ADMA;
3781
3782 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
3783 (host->flags & SDHCI_USE_ADMA)) {
3784 DBG("Disabling ADMA as it is marked broken\n");
3785 host->flags &= ~SDHCI_USE_ADMA;
3786 }
3787
3788 if (sdhci_can_64bit_dma(host))
3789 host->flags |= SDHCI_USE_64_BIT_DMA;
3790
3791 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3792 if (host->ops->set_dma_mask)
3793 ret = host->ops->set_dma_mask(host);
3794 else
3795 ret = sdhci_set_dma_mask(host);
3796
3797 if (!ret && host->ops->enable_dma)
3798 ret = host->ops->enable_dma(host);
3799
3800 if (ret) {
3801 pr_warn("%s: No suitable DMA available - falling back to PIO\n",
3802 mmc_hostname(mmc));
3803 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
3804
3805 ret = 0;
3806 }
3807 }
3808
3809
3810 if ((host->flags & SDHCI_USE_64_BIT_DMA) && !host->v4_mode)
3811 host->flags &= ~SDHCI_USE_SDMA;
3812
3813 if (host->flags & SDHCI_USE_ADMA) {
3814 dma_addr_t dma;
3815 void *buf;
3816
3817 if (host->flags & SDHCI_USE_64_BIT_DMA) {
3818 host->adma_table_sz = host->adma_table_cnt *
3819 SDHCI_ADMA2_64_DESC_SZ(host);
3820 host->desc_sz = SDHCI_ADMA2_64_DESC_SZ(host);
3821 } else {
3822 host->adma_table_sz = host->adma_table_cnt *
3823 SDHCI_ADMA2_32_DESC_SZ;
3824 host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
3825 }
3826
3827 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
3828
3829
3830
3831
3832 buf = dma_alloc_coherent(mmc_dev(mmc),
3833 host->align_buffer_sz + host->adma_table_sz,
3834 &dma, GFP_KERNEL);
3835 if (!buf) {
3836 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
3837 mmc_hostname(mmc));
3838 host->flags &= ~SDHCI_USE_ADMA;
3839 } else if ((dma + host->align_buffer_sz) &
3840 (SDHCI_ADMA2_DESC_ALIGN - 1)) {
3841 pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
3842 mmc_hostname(mmc));
3843 host->flags &= ~SDHCI_USE_ADMA;
3844 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3845 host->adma_table_sz, buf, dma);
3846 } else {
3847 host->align_buffer = buf;
3848 host->align_addr = dma;
3849
3850 host->adma_table = buf + host->align_buffer_sz;
3851 host->adma_addr = dma + host->align_buffer_sz;
3852 }
3853 }
3854
3855
3856
3857
3858
3859
3860 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
3861 host->dma_mask = DMA_BIT_MASK(64);
3862 mmc_dev(mmc)->dma_mask = &host->dma_mask;
3863 }
3864
3865 if (host->version >= SDHCI_SPEC_300)
3866 host->max_clk = (host->caps & SDHCI_CLOCK_V3_BASE_MASK)
3867 >> SDHCI_CLOCK_BASE_SHIFT;
3868 else
3869 host->max_clk = (host->caps & SDHCI_CLOCK_BASE_MASK)
3870 >> SDHCI_CLOCK_BASE_SHIFT;
3871
3872 host->max_clk *= 1000000;
3873 if (host->max_clk == 0 || host->quirks &
3874 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
3875 if (!host->ops->get_max_clock) {
3876 pr_err("%s: Hardware doesn't specify base clock frequency.\n",
3877 mmc_hostname(mmc));
3878 ret = -ENODEV;
3879 goto undma;
3880 }
3881 host->max_clk = host->ops->get_max_clock(host);
3882 }
3883
3884
3885
3886
3887
3888 host->clk_mul = (host->caps1 & SDHCI_CLOCK_MUL_MASK) >>
3889 SDHCI_CLOCK_MUL_SHIFT;
3890
3891
3892
3893
3894
3895
3896
3897 if (host->clk_mul)
3898 host->clk_mul += 1;
3899
3900
3901
3902
3903 max_clk = host->max_clk;
3904
3905 if (host->ops->get_min_clock)
3906 mmc->f_min = host->ops->get_min_clock(host);
3907 else if (host->version >= SDHCI_SPEC_300) {
3908 if (host->clk_mul)
3909 max_clk = host->max_clk * host->clk_mul;
3910
3911
3912
3913
3914 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
3915 } else
3916 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
3917
3918 if (!mmc->f_max || mmc->f_max > max_clk)
3919 mmc->f_max = max_clk;
3920
3921 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
3922 host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >>
3923 SDHCI_TIMEOUT_CLK_SHIFT;
3924
3925 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
3926 host->timeout_clk *= 1000;
3927
3928 if (host->timeout_clk == 0) {
3929 if (!host->ops->get_timeout_clock) {
3930 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
3931 mmc_hostname(mmc));
3932 ret = -ENODEV;
3933 goto undma;
3934 }
3935
3936 host->timeout_clk =
3937 DIV_ROUND_UP(host->ops->get_timeout_clock(host),
3938 1000);
3939 }
3940
3941 if (override_timeout_clk)
3942 host->timeout_clk = override_timeout_clk;
3943
3944 mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
3945 host->ops->get_max_timeout_count(host) : 1 << 27;
3946 mmc->max_busy_timeout /= host->timeout_clk;
3947 }
3948
3949 if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT &&
3950 !host->ops->get_max_timeout_count)
3951 mmc->max_busy_timeout = 0;
3952
3953 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
3954 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
3955
3956 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
3957 host->flags |= SDHCI_AUTO_CMD12;
3958
3959
3960
3961
3962
3963 if ((host->version >= SDHCI_SPEC_300) &&
3964 ((host->flags & SDHCI_USE_ADMA) ||
3965 !(host->flags & SDHCI_USE_SDMA) || host->v4_mode) &&
3966 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
3967 host->flags |= SDHCI_AUTO_CMD23;
3968 DBG("Auto-CMD23 available\n");
3969 } else {
3970 DBG("Auto-CMD23 unavailable\n");
3971 }
3972
3973
3974
3975
3976
3977
3978
3979
3980 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
3981 mmc->caps |= MMC_CAP_4_BIT_DATA;
3982
3983 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
3984 mmc->caps &= ~MMC_CAP_CMD23;
3985
3986 if (host->caps & SDHCI_CAN_DO_HISPD)
3987 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3988
3989 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3990 mmc_card_is_removable(mmc) &&
3991 mmc_gpio_get_cd(host->mmc) < 0)
3992 mmc->caps |= MMC_CAP_NEEDS_POLL;
3993
3994 if (!IS_ERR(mmc->supply.vqmmc)) {
3995 ret = regulator_enable(mmc->supply.vqmmc);
3996
3997
3998 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
3999 1950000))
4000 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
4001 SDHCI_SUPPORT_SDR50 |
4002 SDHCI_SUPPORT_DDR50);
4003
4004
4005 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000,
4006 3600000))
4007 host->flags &= ~SDHCI_SIGNALING_330;
4008
4009 if (ret) {
4010 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
4011 mmc_hostname(mmc), ret);
4012 mmc->supply.vqmmc = ERR_PTR(-EINVAL);
4013 }
4014 }
4015
4016 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
4017 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
4018 SDHCI_SUPPORT_DDR50);
4019
4020
4021
4022
4023
4024
4025
4026
4027 mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES);
4028 mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS);
4029 }
4030
4031
4032 if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
4033 SDHCI_SUPPORT_DDR50))
4034 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
4035
4036
4037 if (host->caps1 & SDHCI_SUPPORT_SDR104) {
4038 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
4039
4040
4041
4042 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
4043 mmc->caps2 |= MMC_CAP2_HS200;
4044 } else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
4045 mmc->caps |= MMC_CAP_UHS_SDR50;
4046 }
4047
4048 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
4049 (host->caps1 & SDHCI_SUPPORT_HS400))
4050 mmc->caps2 |= MMC_CAP2_HS400;
4051
4052 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
4053 (IS_ERR(mmc->supply.vqmmc) ||
4054 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
4055 1300000)))
4056 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
4057
4058 if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
4059 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
4060 mmc->caps |= MMC_CAP_UHS_DDR50;
4061
4062
4063 if (host->caps1 & SDHCI_USE_SDR50_TUNING)
4064 host->flags |= SDHCI_SDR50_NEEDS_TUNING;
4065
4066
4067 if (host->caps1 & SDHCI_DRIVER_TYPE_A)
4068 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
4069 if (host->caps1 & SDHCI_DRIVER_TYPE_C)
4070 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
4071 if (host->caps1 & SDHCI_DRIVER_TYPE_D)
4072 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
4073
4074
4075 host->tuning_count = (host->caps1 & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
4076 SDHCI_RETUNING_TIMER_COUNT_SHIFT;
4077
4078
4079
4080
4081
4082 if (host->tuning_count)
4083 host->tuning_count = 1 << (host->tuning_count - 1);
4084
4085
4086 host->tuning_mode = (host->caps1 & SDHCI_RETUNING_MODE_MASK) >>
4087 SDHCI_RETUNING_MODE_SHIFT;
4088
4089 ocr_avail = 0;
4090
4091
4092
4093
4094
4095
4096
4097
4098 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
4099 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
4100 int curr = regulator_get_current_limit(mmc->supply.vmmc);
4101 if (curr > 0) {
4102
4103
4104 curr = curr/1000;
4105 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
4106
4107 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
4108 max_current_caps =
4109 (curr << SDHCI_MAX_CURRENT_330_SHIFT) |
4110 (curr << SDHCI_MAX_CURRENT_300_SHIFT) |
4111 (curr << SDHCI_MAX_CURRENT_180_SHIFT);
4112 }
4113 }
4114
4115 if (host->caps & SDHCI_CAN_VDD_330) {
4116 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
4117
4118 mmc->max_current_330 = ((max_current_caps &
4119 SDHCI_MAX_CURRENT_330_MASK) >>
4120 SDHCI_MAX_CURRENT_330_SHIFT) *
4121 SDHCI_MAX_CURRENT_MULTIPLIER;
4122 }
4123 if (host->caps & SDHCI_CAN_VDD_300) {
4124 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
4125
4126 mmc->max_current_300 = ((max_current_caps &
4127 SDHCI_MAX_CURRENT_300_MASK) >>
4128 SDHCI_MAX_CURRENT_300_SHIFT) *
4129 SDHCI_MAX_CURRENT_MULTIPLIER;
4130 }
4131 if (host->caps & SDHCI_CAN_VDD_180) {
4132 ocr_avail |= MMC_VDD_165_195;
4133
4134 mmc->max_current_180 = ((max_current_caps &
4135 SDHCI_MAX_CURRENT_180_MASK) >>
4136 SDHCI_MAX_CURRENT_180_SHIFT) *
4137 SDHCI_MAX_CURRENT_MULTIPLIER;
4138 }
4139
4140
4141 if (host->ocr_mask)
4142 ocr_avail = host->ocr_mask;
4143
4144
4145 if (mmc->ocr_avail)
4146 ocr_avail = mmc->ocr_avail;
4147
4148 mmc->ocr_avail = ocr_avail;
4149 mmc->ocr_avail_sdio = ocr_avail;
4150 if (host->ocr_avail_sdio)
4151 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
4152 mmc->ocr_avail_sd = ocr_avail;
4153 if (host->ocr_avail_sd)
4154 mmc->ocr_avail_sd &= host->ocr_avail_sd;
4155 else
4156 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
4157 mmc->ocr_avail_mmc = ocr_avail;
4158 if (host->ocr_avail_mmc)
4159 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
4160
4161 if (mmc->ocr_avail == 0) {
4162 pr_err("%s: Hardware doesn't report any support voltages.\n",
4163 mmc_hostname(mmc));
4164 ret = -ENODEV;
4165 goto unreg;
4166 }
4167
4168 if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
4169 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
4170 MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
4171 (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
4172 host->flags |= SDHCI_SIGNALING_180;
4173
4174 if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
4175 host->flags |= SDHCI_SIGNALING_120;
4176
4177 spin_lock_init(&host->lock);
4178
4179
4180
4181
4182
4183
4184 mmc->max_req_size = 524288;
4185
4186
4187
4188
4189
4190 if (host->flags & SDHCI_USE_ADMA) {
4191 mmc->max_segs = SDHCI_MAX_SEGS;
4192 } else if (host->flags & SDHCI_USE_SDMA) {
4193 mmc->max_segs = 1;
4194 if (swiotlb_max_segment()) {
4195 unsigned int max_req_size = (1 << IO_TLB_SHIFT) *
4196 IO_TLB_SEGSIZE;
4197 mmc->max_req_size = min(mmc->max_req_size,
4198 max_req_size);
4199 }
4200 } else {
4201 mmc->max_segs = SDHCI_MAX_SEGS;
4202 }
4203
4204
4205
4206
4207
4208
4209 if (host->flags & SDHCI_USE_ADMA) {
4210 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
4211 mmc->max_seg_size = 65535;
4212 else
4213 mmc->max_seg_size = 65536;
4214 } else {
4215 mmc->max_seg_size = mmc->max_req_size;
4216 }
4217
4218
4219
4220
4221
4222 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
4223 mmc->max_blk_size = 2;
4224 } else {
4225 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
4226 SDHCI_MAX_BLOCK_SHIFT;
4227 if (mmc->max_blk_size >= 3) {
4228 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
4229 mmc_hostname(mmc));
4230 mmc->max_blk_size = 0;
4231 }
4232 }
4233
4234 mmc->max_blk_size = 512 << mmc->max_blk_size;
4235
4236
4237
4238
4239 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
4240
4241 if (mmc->max_segs == 1)
4242
4243 sdhci_allocate_bounce_buffer(host);
4244
4245 return 0;
4246
4247 unreg:
4248 if (!IS_ERR(mmc->supply.vqmmc))
4249 regulator_disable(mmc->supply.vqmmc);
4250 undma:
4251 if (host->align_buffer)
4252 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4253 host->adma_table_sz, host->align_buffer,
4254 host->align_addr);
4255 host->adma_table = NULL;
4256 host->align_buffer = NULL;
4257
4258 return ret;
4259 }
4260 EXPORT_SYMBOL_GPL(sdhci_setup_host);
4261
4262 void sdhci_cleanup_host(struct sdhci_host *host)
4263 {
4264 struct mmc_host *mmc = host->mmc;
4265
4266 if (!IS_ERR(mmc->supply.vqmmc))
4267 regulator_disable(mmc->supply.vqmmc);
4268
4269 if (host->align_buffer)
4270 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4271 host->adma_table_sz, host->align_buffer,
4272 host->align_addr);
4273 host->adma_table = NULL;
4274 host->align_buffer = NULL;
4275 }
4276 EXPORT_SYMBOL_GPL(sdhci_cleanup_host);
4277
4278 int __sdhci_add_host(struct sdhci_host *host)
4279 {
4280 unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI;
4281 struct mmc_host *mmc = host->mmc;
4282 int ret;
4283
4284 host->complete_wq = alloc_workqueue("sdhci", flags, 0);
4285 if (!host->complete_wq)
4286 return -ENOMEM;
4287
4288 INIT_WORK(&host->complete_work, sdhci_complete_work);
4289
4290 timer_setup(&host->timer, sdhci_timeout_timer, 0);
4291 timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
4292
4293 init_waitqueue_head(&host->buf_ready_int);
4294
4295 sdhci_init(host, 0);
4296
4297 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
4298 IRQF_SHARED, mmc_hostname(mmc), host);
4299 if (ret) {
4300 pr_err("%s: Failed to request IRQ %d: %d\n",
4301 mmc_hostname(mmc), host->irq, ret);
4302 goto unwq;
4303 }
4304
4305 ret = sdhci_led_register(host);
4306 if (ret) {
4307 pr_err("%s: Failed to register LED device: %d\n",
4308 mmc_hostname(mmc), ret);
4309 goto unirq;
4310 }
4311
4312 ret = mmc_add_host(mmc);
4313 if (ret)
4314 goto unled;
4315
4316 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
4317 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
4318 (host->flags & SDHCI_USE_ADMA) ?
4319 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
4320 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
4321
4322 sdhci_enable_card_detection(host);
4323
4324 return 0;
4325
4326 unled:
4327 sdhci_led_unregister(host);
4328 unirq:
4329 sdhci_do_reset(host, SDHCI_RESET_ALL);
4330 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4331 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4332 free_irq(host->irq, host);
4333 unwq:
4334 destroy_workqueue(host->complete_wq);
4335
4336 return ret;
4337 }
4338 EXPORT_SYMBOL_GPL(__sdhci_add_host);
4339
4340 int sdhci_add_host(struct sdhci_host *host)
4341 {
4342 int ret;
4343
4344 ret = sdhci_setup_host(host);
4345 if (ret)
4346 return ret;
4347
4348 ret = __sdhci_add_host(host);
4349 if (ret)
4350 goto cleanup;
4351
4352 return 0;
4353
4354 cleanup:
4355 sdhci_cleanup_host(host);
4356
4357 return ret;
4358 }
4359 EXPORT_SYMBOL_GPL(sdhci_add_host);
4360
4361 void sdhci_remove_host(struct sdhci_host *host, int dead)
4362 {
4363 struct mmc_host *mmc = host->mmc;
4364 unsigned long flags;
4365
4366 if (dead) {
4367 spin_lock_irqsave(&host->lock, flags);
4368
4369 host->flags |= SDHCI_DEVICE_DEAD;
4370
4371 if (sdhci_has_requests(host)) {
4372 pr_err("%s: Controller removed during "
4373 " transfer!\n", mmc_hostname(mmc));
4374 sdhci_error_out_mrqs(host, -ENOMEDIUM);
4375 }
4376
4377 spin_unlock_irqrestore(&host->lock, flags);
4378 }
4379
4380 sdhci_disable_card_detection(host);
4381
4382 mmc_remove_host(mmc);
4383
4384 sdhci_led_unregister(host);
4385
4386 if (!dead)
4387 sdhci_do_reset(host, SDHCI_RESET_ALL);
4388
4389 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4390 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4391 free_irq(host->irq, host);
4392
4393 del_timer_sync(&host->timer);
4394 del_timer_sync(&host->data_timer);
4395
4396 destroy_workqueue(host->complete_wq);
4397
4398 if (!IS_ERR(mmc->supply.vqmmc))
4399 regulator_disable(mmc->supply.vqmmc);
4400
4401 if (host->align_buffer)
4402 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4403 host->adma_table_sz, host->align_buffer,
4404 host->align_addr);
4405
4406 host->adma_table = NULL;
4407 host->align_buffer = NULL;
4408 }
4409
4410 EXPORT_SYMBOL_GPL(sdhci_remove_host);
4411
4412 void sdhci_free_host(struct sdhci_host *host)
4413 {
4414 mmc_free_host(host->mmc);
4415 }
4416
4417 EXPORT_SYMBOL_GPL(sdhci_free_host);
4418
4419
4420
4421
4422
4423
4424
4425 static int __init sdhci_drv_init(void)
4426 {
4427 pr_info(DRIVER_NAME
4428 ": Secure Digital Host Controller Interface driver\n");
4429 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
4430
4431 return 0;
4432 }
4433
4434 static void __exit sdhci_drv_exit(void)
4435 {
4436 }
4437
4438 module_init(sdhci_drv_init);
4439 module_exit(sdhci_drv_exit);
4440
4441 module_param(debug_quirks, uint, 0444);
4442 module_param(debug_quirks2, uint, 0444);
4443
4444 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
4445 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
4446 MODULE_LICENSE("GPL");
4447
4448 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
4449 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");