This source file includes following definitions.
- mmc_cs_off
- mmc_spi_readbytes
- mmc_spi_skip
- mmc_spi_wait_unbusy
- mmc_spi_readtoken
- maptype
- mmc_spi_response_get
- mmc_spi_command_send
- mmc_spi_setup_data_message
- mmc_spi_writeblock
- mmc_spi_readblock
- mmc_spi_data_do
- mmc_spi_request
- mmc_spi_initsequence
- mmc_powerstring
- mmc_spi_set_ios
- mmc_spi_detect_irq
- mmc_spi_probe
- mmc_spi_remove
1
2
3
4
5
6
7
8
9
10
11
12
13 #include <linux/sched.h>
14 #include <linux/delay.h>
15 #include <linux/slab.h>
16 #include <linux/module.h>
17 #include <linux/bio.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/crc7.h>
20 #include <linux/crc-itu-t.h>
21 #include <linux/scatterlist.h>
22
23 #include <linux/mmc/host.h>
24 #include <linux/mmc/mmc.h>
25 #include <linux/mmc/slot-gpio.h>
26
27 #include <linux/spi/spi.h>
28 #include <linux/spi/mmc_spi.h>
29
30 #include <asm/unaligned.h>
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66 #define SPI_MMC_RESPONSE_CODE(x) ((x) & 0x1f)
67 #define SPI_RESPONSE_ACCEPTED ((2 << 1)|1)
68 #define SPI_RESPONSE_CRC_ERR ((5 << 1)|1)
69 #define SPI_RESPONSE_WRITE_ERR ((6 << 1)|1)
70
71
72
73
74 #define SPI_TOKEN_SINGLE 0xfe
75 #define SPI_TOKEN_MULTI_WRITE 0xfc
76 #define SPI_TOKEN_STOP_TRAN 0xfd
77
78 #define MMC_SPI_BLOCKSIZE 512
79
80
81
82
83
84
85
86
87 #define r1b_timeout (HZ * 3)
88
89
90
91
92
93
94
95
96 #define MMC_SPI_BLOCKSATONCE 128
97
98
99
100
101
102
103
104
105 struct scratch {
106 u8 status[29];
107 u8 data_token;
108 __be16 crc_val;
109 };
110
111 struct mmc_spi_host {
112 struct mmc_host *mmc;
113 struct spi_device *spi;
114
115 unsigned char power_mode;
116 u16 powerup_msecs;
117
118 struct mmc_spi_platform_data *pdata;
119
120
121 struct spi_transfer token, t, crc, early_status;
122 struct spi_message m;
123
124
125 struct spi_transfer status;
126 struct spi_message readback;
127
128
129 struct device *dma_dev;
130
131
132 struct scratch *data;
133 dma_addr_t data_dma;
134
135
136
137
138
139 void *ones;
140 dma_addr_t ones_dma;
141 };
142
143
144
145
146
147
148
149
150 static inline int mmc_cs_off(struct mmc_spi_host *host)
151 {
152
153 return spi_setup(host->spi);
154 }
155
156 static int
157 mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len)
158 {
159 int status;
160
161 if (len > sizeof(*host->data)) {
162 WARN_ON(1);
163 return -EIO;
164 }
165
166 host->status.len = len;
167
168 if (host->dma_dev)
169 dma_sync_single_for_device(host->dma_dev,
170 host->data_dma, sizeof(*host->data),
171 DMA_FROM_DEVICE);
172
173 status = spi_sync_locked(host->spi, &host->readback);
174
175 if (host->dma_dev)
176 dma_sync_single_for_cpu(host->dma_dev,
177 host->data_dma, sizeof(*host->data),
178 DMA_FROM_DEVICE);
179
180 return status;
181 }
182
183 static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout,
184 unsigned n, u8 byte)
185 {
186 u8 *cp = host->data->status;
187 unsigned long start = jiffies;
188
189 while (1) {
190 int status;
191 unsigned i;
192
193 status = mmc_spi_readbytes(host, n);
194 if (status < 0)
195 return status;
196
197 for (i = 0; i < n; i++) {
198 if (cp[i] != byte)
199 return cp[i];
200 }
201
202 if (time_is_before_jiffies(start + timeout))
203 break;
204
205
206
207
208
209 if (time_is_before_jiffies(start + 1))
210 schedule();
211 }
212 return -ETIMEDOUT;
213 }
214
215 static inline int
216 mmc_spi_wait_unbusy(struct mmc_spi_host *host, unsigned long timeout)
217 {
218 return mmc_spi_skip(host, timeout, sizeof(host->data->status), 0);
219 }
220
221 static int mmc_spi_readtoken(struct mmc_spi_host *host, unsigned long timeout)
222 {
223 return mmc_spi_skip(host, timeout, 1, 0xff);
224 }
225
226
227
228
229
230
231
232
233
234
235
236 static char *maptype(struct mmc_command *cmd)
237 {
238 switch (mmc_spi_resp_type(cmd)) {
239 case MMC_RSP_SPI_R1: return "R1";
240 case MMC_RSP_SPI_R1B: return "R1B";
241 case MMC_RSP_SPI_R2: return "R2/R5";
242 case MMC_RSP_SPI_R3: return "R3/R4/R7";
243 default: return "?";
244 }
245 }
246
247
248 static int mmc_spi_response_get(struct mmc_spi_host *host,
249 struct mmc_command *cmd, int cs_on)
250 {
251 u8 *cp = host->data->status;
252 u8 *end = cp + host->t.len;
253 int value = 0;
254 int bitshift;
255 u8 leftover = 0;
256 unsigned short rotator;
257 int i;
258 char tag[32];
259
260 snprintf(tag, sizeof(tag), " ... CMD%d response SPI_%s",
261 cmd->opcode, maptype(cmd));
262
263
264
265
266
267
268
269 cp += 8;
270 while (cp < end && *cp == 0xff)
271 cp++;
272
273
274 if (cp == end) {
275 cp = host->data->status;
276 end = cp+1;
277
278
279
280
281
282
283
284
285
286
287
288
289 for (i = 2; i < 16; i++) {
290 value = mmc_spi_readbytes(host, 1);
291 if (value < 0)
292 goto done;
293 if (*cp != 0xff)
294 goto checkstatus;
295 }
296 value = -ETIMEDOUT;
297 goto done;
298 }
299
300 checkstatus:
301 bitshift = 0;
302 if (*cp & 0x80) {
303
304 rotator = *cp++ << 8;
305
306 if (cp == end) {
307 value = mmc_spi_readbytes(host, 1);
308 if (value < 0)
309 goto done;
310 cp = host->data->status;
311 end = cp+1;
312 }
313 rotator |= *cp++;
314 while (rotator & 0x8000) {
315 bitshift++;
316 rotator <<= 1;
317 }
318 cmd->resp[0] = rotator >> 8;
319 leftover = rotator;
320 } else {
321 cmd->resp[0] = *cp++;
322 }
323 cmd->error = 0;
324
325
326 if (cmd->resp[0] != 0) {
327 if ((R1_SPI_PARAMETER | R1_SPI_ADDRESS)
328 & cmd->resp[0])
329 value = -EFAULT;
330 else if (R1_SPI_ILLEGAL_COMMAND & cmd->resp[0])
331 value = -ENOSYS;
332 else if (R1_SPI_COM_CRC & cmd->resp[0])
333 value = -EILSEQ;
334 else if ((R1_SPI_ERASE_SEQ | R1_SPI_ERASE_RESET)
335 & cmd->resp[0])
336 value = -EIO;
337
338 }
339
340 switch (mmc_spi_resp_type(cmd)) {
341
342
343
344
345 case MMC_RSP_SPI_R1B:
346
347 while (cp < end && *cp == 0)
348 cp++;
349 if (cp == end)
350 mmc_spi_wait_unbusy(host, r1b_timeout);
351 break;
352
353
354
355
356 case MMC_RSP_SPI_R2:
357
358 if (cp == end) {
359 value = mmc_spi_readbytes(host, 1);
360 if (value < 0)
361 goto done;
362 cp = host->data->status;
363 end = cp+1;
364 }
365 if (bitshift) {
366 rotator = leftover << 8;
367 rotator |= *cp << bitshift;
368 cmd->resp[0] |= (rotator & 0xFF00);
369 } else {
370 cmd->resp[0] |= *cp << 8;
371 }
372 break;
373
374
375 case MMC_RSP_SPI_R3:
376 rotator = leftover << 8;
377 cmd->resp[1] = 0;
378 for (i = 0; i < 4; i++) {
379 cmd->resp[1] <<= 8;
380
381 if (cp == end) {
382 value = mmc_spi_readbytes(host, 1);
383 if (value < 0)
384 goto done;
385 cp = host->data->status;
386 end = cp+1;
387 }
388 if (bitshift) {
389 rotator |= *cp++ << bitshift;
390 cmd->resp[1] |= (rotator >> 8);
391 rotator <<= 8;
392 } else {
393 cmd->resp[1] |= *cp++;
394 }
395 }
396 break;
397
398
399 case MMC_RSP_SPI_R1:
400 break;
401
402 default:
403 dev_dbg(&host->spi->dev, "bad response type %04x\n",
404 mmc_spi_resp_type(cmd));
405 if (value >= 0)
406 value = -EINVAL;
407 goto done;
408 }
409
410 if (value < 0)
411 dev_dbg(&host->spi->dev, "%s: resp %04x %08x\n",
412 tag, cmd->resp[0], cmd->resp[1]);
413
414
415 if (value >= 0 && cs_on)
416 return value;
417 done:
418 if (value < 0)
419 cmd->error = value;
420 mmc_cs_off(host);
421 return value;
422 }
423
424
425
426
427
428
429
430 static int
431 mmc_spi_command_send(struct mmc_spi_host *host,
432 struct mmc_request *mrq,
433 struct mmc_command *cmd, int cs_on)
434 {
435 struct scratch *data = host->data;
436 u8 *cp = data->status;
437 int status;
438 struct spi_transfer *t;
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453 memset(cp, 0xff, sizeof(data->status));
454
455 cp[1] = 0x40 | cmd->opcode;
456 put_unaligned_be32(cmd->arg, cp + 2);
457 cp[6] = crc7_be(0, cp + 1, 5) | 0x01;
458 cp += 7;
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495 if (cs_on && (mrq->data->flags & MMC_DATA_READ)) {
496 cp += 2;
497
498 } else {
499 cp += 10;
500 if (cmd->flags & MMC_RSP_SPI_S2)
501 cp++;
502 else if (cmd->flags & MMC_RSP_SPI_B4)
503 cp += 4;
504 else if (cmd->flags & MMC_RSP_BUSY)
505 cp = data->status + sizeof(data->status);
506
507 }
508
509 dev_dbg(&host->spi->dev, " mmc_spi: CMD%d, resp %s\n",
510 cmd->opcode, maptype(cmd));
511
512
513 spi_message_init(&host->m);
514
515 t = &host->t;
516 memset(t, 0, sizeof(*t));
517 t->tx_buf = t->rx_buf = data->status;
518 t->tx_dma = t->rx_dma = host->data_dma;
519 t->len = cp - data->status;
520 t->cs_change = 1;
521 spi_message_add_tail(t, &host->m);
522
523 if (host->dma_dev) {
524 host->m.is_dma_mapped = 1;
525 dma_sync_single_for_device(host->dma_dev,
526 host->data_dma, sizeof(*host->data),
527 DMA_BIDIRECTIONAL);
528 }
529 status = spi_sync_locked(host->spi, &host->m);
530
531 if (host->dma_dev)
532 dma_sync_single_for_cpu(host->dma_dev,
533 host->data_dma, sizeof(*host->data),
534 DMA_BIDIRECTIONAL);
535 if (status < 0) {
536 dev_dbg(&host->spi->dev, " ... write returned %d\n", status);
537 cmd->error = status;
538 return status;
539 }
540
541
542 return mmc_spi_response_get(host, cmd, cs_on);
543 }
544
545
546
547
548
549
550
551
552
553
554
555
556 static void
557 mmc_spi_setup_data_message(
558 struct mmc_spi_host *host,
559 int multiple,
560 enum dma_data_direction direction)
561 {
562 struct spi_transfer *t;
563 struct scratch *scratch = host->data;
564 dma_addr_t dma = host->data_dma;
565
566 spi_message_init(&host->m);
567 if (dma)
568 host->m.is_dma_mapped = 1;
569
570
571
572
573 if (direction == DMA_TO_DEVICE) {
574 t = &host->token;
575 memset(t, 0, sizeof(*t));
576 t->len = 1;
577 if (multiple)
578 scratch->data_token = SPI_TOKEN_MULTI_WRITE;
579 else
580 scratch->data_token = SPI_TOKEN_SINGLE;
581 t->tx_buf = &scratch->data_token;
582 if (dma)
583 t->tx_dma = dma + offsetof(struct scratch, data_token);
584 spi_message_add_tail(t, &host->m);
585 }
586
587
588
589
590 t = &host->t;
591 memset(t, 0, sizeof(*t));
592 t->tx_buf = host->ones;
593 t->tx_dma = host->ones_dma;
594
595 spi_message_add_tail(t, &host->m);
596
597 t = &host->crc;
598 memset(t, 0, sizeof(*t));
599 t->len = 2;
600 if (direction == DMA_TO_DEVICE) {
601
602 t->tx_buf = &scratch->crc_val;
603 if (dma)
604 t->tx_dma = dma + offsetof(struct scratch, crc_val);
605 } else {
606 t->tx_buf = host->ones;
607 t->tx_dma = host->ones_dma;
608 t->rx_buf = &scratch->crc_val;
609 if (dma)
610 t->rx_dma = dma + offsetof(struct scratch, crc_val);
611 }
612 spi_message_add_tail(t, &host->m);
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628 if (multiple || direction == DMA_TO_DEVICE) {
629 t = &host->early_status;
630 memset(t, 0, sizeof(*t));
631 t->len = (direction == DMA_TO_DEVICE) ? sizeof(scratch->status) : 1;
632 t->tx_buf = host->ones;
633 t->tx_dma = host->ones_dma;
634 t->rx_buf = scratch->status;
635 if (dma)
636 t->rx_dma = dma + offsetof(struct scratch, status);
637 t->cs_change = 1;
638 spi_message_add_tail(t, &host->m);
639 }
640 }
641
642
643
644
645
646
647
648
649
650
651
652
653
654 static int
655 mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t,
656 unsigned long timeout)
657 {
658 struct spi_device *spi = host->spi;
659 int status, i;
660 struct scratch *scratch = host->data;
661 u32 pattern;
662
663 if (host->mmc->use_spi_crc)
664 scratch->crc_val = cpu_to_be16(crc_itu_t(0, t->tx_buf, t->len));
665 if (host->dma_dev)
666 dma_sync_single_for_device(host->dma_dev,
667 host->data_dma, sizeof(*scratch),
668 DMA_BIDIRECTIONAL);
669
670 status = spi_sync_locked(spi, &host->m);
671
672 if (status != 0) {
673 dev_dbg(&spi->dev, "write error (%d)\n", status);
674 return status;
675 }
676
677 if (host->dma_dev)
678 dma_sync_single_for_cpu(host->dma_dev,
679 host->data_dma, sizeof(*scratch),
680 DMA_BIDIRECTIONAL);
681
682
683
684
685
686
687
688
689
690
691
692
693
694 pattern = get_unaligned_be32(scratch->status);
695
696
697 pattern |= 0xE0000000;
698
699
700 while (pattern & 0x80000000)
701 pattern <<= 1;
702
703 pattern >>= 27;
704
705 switch (pattern) {
706 case SPI_RESPONSE_ACCEPTED:
707 status = 0;
708 break;
709 case SPI_RESPONSE_CRC_ERR:
710
711 status = -EILSEQ;
712 break;
713 case SPI_RESPONSE_WRITE_ERR:
714
715
716
717 status = -EIO;
718 break;
719 default:
720 status = -EPROTO;
721 break;
722 }
723 if (status != 0) {
724 dev_dbg(&spi->dev, "write error %02x (%d)\n",
725 scratch->status[0], status);
726 return status;
727 }
728
729 t->tx_buf += t->len;
730 if (host->dma_dev)
731 t->tx_dma += t->len;
732
733
734
735
736 for (i = 4; i < sizeof(scratch->status); i++) {
737
738 if (scratch->status[i] & 0x01)
739 return 0;
740 }
741 return mmc_spi_wait_unbusy(host, timeout);
742 }
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760 static int
761 mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t,
762 unsigned long timeout)
763 {
764 struct spi_device *spi = host->spi;
765 int status;
766 struct scratch *scratch = host->data;
767 unsigned int bitshift;
768 u8 leftover;
769
770
771
772
773 status = mmc_spi_readbytes(host, 1);
774 if (status < 0)
775 return status;
776 status = scratch->status[0];
777 if (status == 0xff || status == 0)
778 status = mmc_spi_readtoken(host, timeout);
779
780 if (status < 0) {
781 dev_dbg(&spi->dev, "read error %02x (%d)\n", status, status);
782 return status;
783 }
784
785
786
787
788 bitshift = 7;
789 while (status & 0x80) {
790 status <<= 1;
791 bitshift--;
792 }
793 leftover = status << 1;
794
795 if (host->dma_dev) {
796 dma_sync_single_for_device(host->dma_dev,
797 host->data_dma, sizeof(*scratch),
798 DMA_BIDIRECTIONAL);
799 dma_sync_single_for_device(host->dma_dev,
800 t->rx_dma, t->len,
801 DMA_FROM_DEVICE);
802 }
803
804 status = spi_sync_locked(spi, &host->m);
805 if (status < 0) {
806 dev_dbg(&spi->dev, "read error %d\n", status);
807 return status;
808 }
809
810 if (host->dma_dev) {
811 dma_sync_single_for_cpu(host->dma_dev,
812 host->data_dma, sizeof(*scratch),
813 DMA_BIDIRECTIONAL);
814 dma_sync_single_for_cpu(host->dma_dev,
815 t->rx_dma, t->len,
816 DMA_FROM_DEVICE);
817 }
818
819 if (bitshift) {
820
821
822
823 u8 *cp = t->rx_buf;
824 unsigned int len;
825 unsigned int bitright = 8 - bitshift;
826 u8 temp;
827 for (len = t->len; len; len--) {
828 temp = *cp;
829 *cp++ = leftover | (temp >> bitshift);
830 leftover = temp << bitright;
831 }
832 cp = (u8 *) &scratch->crc_val;
833 temp = *cp;
834 *cp++ = leftover | (temp >> bitshift);
835 leftover = temp << bitright;
836 temp = *cp;
837 *cp = leftover | (temp >> bitshift);
838 }
839
840 if (host->mmc->use_spi_crc) {
841 u16 crc = crc_itu_t(0, t->rx_buf, t->len);
842
843 be16_to_cpus(&scratch->crc_val);
844 if (scratch->crc_val != crc) {
845 dev_dbg(&spi->dev,
846 "read - crc error: crc_val=0x%04x, computed=0x%04x len=%d\n",
847 scratch->crc_val, crc, t->len);
848 return -EILSEQ;
849 }
850 }
851
852 t->rx_buf += t->len;
853 if (host->dma_dev)
854 t->rx_dma += t->len;
855
856 return 0;
857 }
858
859
860
861
862
863
864 static void
865 mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
866 struct mmc_data *data, u32 blk_size)
867 {
868 struct spi_device *spi = host->spi;
869 struct device *dma_dev = host->dma_dev;
870 struct spi_transfer *t;
871 enum dma_data_direction direction;
872 struct scatterlist *sg;
873 unsigned n_sg;
874 int multiple = (data->blocks > 1);
875 u32 clock_rate;
876 unsigned long timeout;
877
878 direction = mmc_get_dma_dir(data);
879 mmc_spi_setup_data_message(host, multiple, direction);
880 t = &host->t;
881
882 if (t->speed_hz)
883 clock_rate = t->speed_hz;
884 else
885 clock_rate = spi->max_speed_hz;
886
887 timeout = data->timeout_ns +
888 data->timeout_clks * 1000000 / clock_rate;
889 timeout = usecs_to_jiffies((unsigned int)(timeout / 1000)) + 1;
890
891
892
893
894 for_each_sg(data->sg, sg, data->sg_len, n_sg) {
895 int status = 0;
896 dma_addr_t dma_addr = 0;
897 void *kmap_addr;
898 unsigned length = sg->length;
899 enum dma_data_direction dir = direction;
900
901
902
903
904 if (dma_dev) {
905
906 if ((sg->offset != 0 || length != PAGE_SIZE)
907 && dir == DMA_FROM_DEVICE)
908 dir = DMA_BIDIRECTIONAL;
909
910 dma_addr = dma_map_page(dma_dev, sg_page(sg), 0,
911 PAGE_SIZE, dir);
912 if (dma_mapping_error(dma_dev, dma_addr)) {
913 data->error = -EFAULT;
914 break;
915 }
916 if (direction == DMA_TO_DEVICE)
917 t->tx_dma = dma_addr + sg->offset;
918 else
919 t->rx_dma = dma_addr + sg->offset;
920 }
921
922
923 kmap_addr = kmap(sg_page(sg));
924 if (direction == DMA_TO_DEVICE)
925 t->tx_buf = kmap_addr + sg->offset;
926 else
927 t->rx_buf = kmap_addr + sg->offset;
928
929
930 while (length) {
931 t->len = min(length, blk_size);
932
933 dev_dbg(&host->spi->dev,
934 " mmc_spi: %s block, %d bytes\n",
935 (direction == DMA_TO_DEVICE) ? "write" : "read",
936 t->len);
937
938 if (direction == DMA_TO_DEVICE)
939 status = mmc_spi_writeblock(host, t, timeout);
940 else
941 status = mmc_spi_readblock(host, t, timeout);
942 if (status < 0)
943 break;
944
945 data->bytes_xfered += t->len;
946 length -= t->len;
947
948 if (!multiple)
949 break;
950 }
951
952
953 if (direction == DMA_FROM_DEVICE)
954 flush_kernel_dcache_page(sg_page(sg));
955 kunmap(sg_page(sg));
956 if (dma_dev)
957 dma_unmap_page(dma_dev, dma_addr, PAGE_SIZE, dir);
958
959 if (status < 0) {
960 data->error = status;
961 dev_dbg(&spi->dev, "%s status %d\n",
962 (direction == DMA_TO_DEVICE) ? "write" : "read",
963 status);
964 break;
965 }
966 }
967
968
969
970
971
972
973
974 if (direction == DMA_TO_DEVICE && multiple) {
975 struct scratch *scratch = host->data;
976 int tmp;
977 const unsigned statlen = sizeof(scratch->status);
978
979 dev_dbg(&spi->dev, " mmc_spi: STOP_TRAN\n");
980
981
982
983
984
985
986 INIT_LIST_HEAD(&host->m.transfers);
987 list_add(&host->early_status.transfer_list,
988 &host->m.transfers);
989
990 memset(scratch->status, 0xff, statlen);
991 scratch->status[0] = SPI_TOKEN_STOP_TRAN;
992
993 host->early_status.tx_buf = host->early_status.rx_buf;
994 host->early_status.tx_dma = host->early_status.rx_dma;
995 host->early_status.len = statlen;
996
997 if (host->dma_dev)
998 dma_sync_single_for_device(host->dma_dev,
999 host->data_dma, sizeof(*scratch),
1000 DMA_BIDIRECTIONAL);
1001
1002 tmp = spi_sync_locked(spi, &host->m);
1003
1004 if (host->dma_dev)
1005 dma_sync_single_for_cpu(host->dma_dev,
1006 host->data_dma, sizeof(*scratch),
1007 DMA_BIDIRECTIONAL);
1008
1009 if (tmp < 0) {
1010 if (!data->error)
1011 data->error = tmp;
1012 return;
1013 }
1014
1015
1016
1017
1018
1019 for (tmp = 2; tmp < statlen; tmp++) {
1020 if (scratch->status[tmp] != 0)
1021 return;
1022 }
1023 tmp = mmc_spi_wait_unbusy(host, timeout);
1024 if (tmp < 0 && !data->error)
1025 data->error = tmp;
1026 }
1027 }
1028
1029
1030
1031
1032
1033
1034
1035 static void mmc_spi_request(struct mmc_host *mmc, struct mmc_request *mrq)
1036 {
1037 struct mmc_spi_host *host = mmc_priv(mmc);
1038 int status = -EINVAL;
1039 int crc_retry = 5;
1040 struct mmc_command stop;
1041
1042 #ifdef DEBUG
1043
1044 {
1045 struct mmc_command *cmd;
1046 int invalid = 0;
1047
1048 cmd = mrq->cmd;
1049 if (!mmc_spi_resp_type(cmd)) {
1050 dev_dbg(&host->spi->dev, "bogus command\n");
1051 cmd->error = -EINVAL;
1052 invalid = 1;
1053 }
1054
1055 cmd = mrq->stop;
1056 if (cmd && !mmc_spi_resp_type(cmd)) {
1057 dev_dbg(&host->spi->dev, "bogus STOP command\n");
1058 cmd->error = -EINVAL;
1059 invalid = 1;
1060 }
1061
1062 if (invalid) {
1063 dump_stack();
1064 mmc_request_done(host->mmc, mrq);
1065 return;
1066 }
1067 }
1068 #endif
1069
1070
1071 spi_bus_lock(host->spi->master);
1072
1073 crc_recover:
1074
1075 status = mmc_spi_command_send(host, mrq, mrq->cmd, mrq->data != NULL);
1076 if (status == 0 && mrq->data) {
1077 mmc_spi_data_do(host, mrq->cmd, mrq->data, mrq->data->blksz);
1078
1079
1080
1081
1082
1083
1084
1085
1086 if (mrq->data->error == -EILSEQ && crc_retry) {
1087 stop.opcode = MMC_STOP_TRANSMISSION;
1088 stop.arg = 0;
1089 stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1090 status = mmc_spi_command_send(host, mrq, &stop, 0);
1091 crc_retry--;
1092 mrq->data->error = 0;
1093 goto crc_recover;
1094 }
1095
1096 if (mrq->stop)
1097 status = mmc_spi_command_send(host, mrq, mrq->stop, 0);
1098 else
1099 mmc_cs_off(host);
1100 }
1101
1102
1103 spi_bus_unlock(host->spi->master);
1104
1105 mmc_request_done(host->mmc, mrq);
1106 }
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116 static void mmc_spi_initsequence(struct mmc_spi_host *host)
1117 {
1118
1119
1120
1121 mmc_spi_wait_unbusy(host, r1b_timeout);
1122 mmc_spi_readbytes(host, 10);
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143 host->spi->mode ^= SPI_CS_HIGH;
1144 if (spi_setup(host->spi) != 0) {
1145
1146 dev_warn(&host->spi->dev,
1147 "can't change chip-select polarity\n");
1148 host->spi->mode ^= SPI_CS_HIGH;
1149 } else {
1150 mmc_spi_readbytes(host, 18);
1151
1152 host->spi->mode ^= SPI_CS_HIGH;
1153 if (spi_setup(host->spi) != 0) {
1154
1155 dev_err(&host->spi->dev,
1156 "can't restore chip-select polarity\n");
1157 }
1158 }
1159 }
1160
1161 static char *mmc_powerstring(u8 power_mode)
1162 {
1163 switch (power_mode) {
1164 case MMC_POWER_OFF: return "off";
1165 case MMC_POWER_UP: return "up";
1166 case MMC_POWER_ON: return "on";
1167 }
1168 return "?";
1169 }
1170
1171 static void mmc_spi_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1172 {
1173 struct mmc_spi_host *host = mmc_priv(mmc);
1174
1175 if (host->power_mode != ios->power_mode) {
1176 int canpower;
1177
1178 canpower = host->pdata && host->pdata->setpower;
1179
1180 dev_dbg(&host->spi->dev, "mmc_spi: power %s (%d)%s\n",
1181 mmc_powerstring(ios->power_mode),
1182 ios->vdd,
1183 canpower ? ", can switch" : "");
1184
1185
1186
1187
1188 if (canpower) {
1189 switch (ios->power_mode) {
1190 case MMC_POWER_OFF:
1191 case MMC_POWER_UP:
1192 host->pdata->setpower(&host->spi->dev,
1193 ios->vdd);
1194 if (ios->power_mode == MMC_POWER_UP)
1195 msleep(host->powerup_msecs);
1196 }
1197 }
1198
1199
1200 if (ios->power_mode == MMC_POWER_ON)
1201 mmc_spi_initsequence(host);
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212 if (canpower && ios->power_mode == MMC_POWER_OFF) {
1213 int mres;
1214 u8 nullbyte = 0;
1215
1216 host->spi->mode &= ~(SPI_CPOL|SPI_CPHA);
1217 mres = spi_setup(host->spi);
1218 if (mres < 0)
1219 dev_dbg(&host->spi->dev,
1220 "switch to SPI mode 0 failed\n");
1221
1222 if (spi_write(host->spi, &nullbyte, 1) < 0)
1223 dev_dbg(&host->spi->dev,
1224 "put spi signals to low failed\n");
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235 msleep(10);
1236 if (mres == 0) {
1237 host->spi->mode |= (SPI_CPOL|SPI_CPHA);
1238 mres = spi_setup(host->spi);
1239 if (mres < 0)
1240 dev_dbg(&host->spi->dev,
1241 "switch back to SPI mode 3 failed\n");
1242 }
1243 }
1244
1245 host->power_mode = ios->power_mode;
1246 }
1247
1248 if (host->spi->max_speed_hz != ios->clock && ios->clock != 0) {
1249 int status;
1250
1251 host->spi->max_speed_hz = ios->clock;
1252 status = spi_setup(host->spi);
1253 dev_dbg(&host->spi->dev,
1254 "mmc_spi: clock to %d Hz, %d\n",
1255 host->spi->max_speed_hz, status);
1256 }
1257 }
1258
1259 static const struct mmc_host_ops mmc_spi_ops = {
1260 .request = mmc_spi_request,
1261 .set_ios = mmc_spi_set_ios,
1262 .get_ro = mmc_gpio_get_ro,
1263 .get_cd = mmc_gpio_get_cd,
1264 };
1265
1266
1267
1268
1269
1270
1271
1272
1273 static irqreturn_t
1274 mmc_spi_detect_irq(int irq, void *mmc)
1275 {
1276 struct mmc_spi_host *host = mmc_priv(mmc);
1277 u16 delay_msec = max(host->pdata->detect_delay, (u16)100);
1278
1279 mmc_detect_change(mmc, msecs_to_jiffies(delay_msec));
1280 return IRQ_HANDLED;
1281 }
1282
1283 static int mmc_spi_probe(struct spi_device *spi)
1284 {
1285 void *ones;
1286 struct mmc_host *mmc;
1287 struct mmc_spi_host *host;
1288 int status;
1289 bool has_ro = false;
1290
1291
1292
1293
1294 if (spi->master->flags & SPI_MASTER_HALF_DUPLEX)
1295 return -EINVAL;
1296
1297
1298
1299
1300
1301
1302
1303 if (spi->mode != SPI_MODE_3)
1304 spi->mode = SPI_MODE_0;
1305 spi->bits_per_word = 8;
1306
1307 status = spi_setup(spi);
1308 if (status < 0) {
1309 dev_dbg(&spi->dev, "needs SPI mode %02x, %d KHz; %d\n",
1310 spi->mode, spi->max_speed_hz / 1000,
1311 status);
1312 return status;
1313 }
1314
1315
1316
1317
1318
1319
1320
1321 status = -ENOMEM;
1322 ones = kmalloc(MMC_SPI_BLOCKSIZE, GFP_KERNEL);
1323 if (!ones)
1324 goto nomem;
1325 memset(ones, 0xff, MMC_SPI_BLOCKSIZE);
1326
1327 mmc = mmc_alloc_host(sizeof(*host), &spi->dev);
1328 if (!mmc)
1329 goto nomem;
1330
1331 mmc->ops = &mmc_spi_ops;
1332 mmc->max_blk_size = MMC_SPI_BLOCKSIZE;
1333 mmc->max_segs = MMC_SPI_BLOCKSATONCE;
1334 mmc->max_req_size = MMC_SPI_BLOCKSATONCE * MMC_SPI_BLOCKSIZE;
1335 mmc->max_blk_count = MMC_SPI_BLOCKSATONCE;
1336
1337 mmc->caps = MMC_CAP_SPI;
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347 mmc->f_min = 400000;
1348 mmc->f_max = spi->max_speed_hz;
1349
1350 host = mmc_priv(mmc);
1351 host->mmc = mmc;
1352 host->spi = spi;
1353
1354 host->ones = ones;
1355
1356
1357
1358
1359 host->pdata = mmc_spi_get_pdata(spi);
1360 if (host->pdata)
1361 mmc->ocr_avail = host->pdata->ocr_mask;
1362 if (!mmc->ocr_avail) {
1363 dev_warn(&spi->dev, "ASSUMING 3.2-3.4 V slot power\n");
1364 mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34;
1365 }
1366 if (host->pdata && host->pdata->setpower) {
1367 host->powerup_msecs = host->pdata->powerup_msecs;
1368 if (!host->powerup_msecs || host->powerup_msecs > 250)
1369 host->powerup_msecs = 250;
1370 }
1371
1372 dev_set_drvdata(&spi->dev, mmc);
1373
1374
1375 host->data = kmalloc(sizeof(*host->data), GFP_KERNEL);
1376 if (!host->data)
1377 goto fail_nobuf1;
1378
1379 if (spi->master->dev.parent->dma_mask) {
1380 struct device *dev = spi->master->dev.parent;
1381
1382 host->dma_dev = dev;
1383 host->ones_dma = dma_map_single(dev, ones,
1384 MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
1385 if (dma_mapping_error(dev, host->ones_dma))
1386 goto fail_ones_dma;
1387 host->data_dma = dma_map_single(dev, host->data,
1388 sizeof(*host->data), DMA_BIDIRECTIONAL);
1389 if (dma_mapping_error(dev, host->data_dma))
1390 goto fail_data_dma;
1391
1392 dma_sync_single_for_cpu(host->dma_dev,
1393 host->data_dma, sizeof(*host->data),
1394 DMA_BIDIRECTIONAL);
1395 }
1396
1397
1398 spi_message_init(&host->readback);
1399 host->readback.is_dma_mapped = (host->dma_dev != NULL);
1400
1401 spi_message_add_tail(&host->status, &host->readback);
1402 host->status.tx_buf = host->ones;
1403 host->status.tx_dma = host->ones_dma;
1404 host->status.rx_buf = &host->data->status;
1405 host->status.rx_dma = host->data_dma + offsetof(struct scratch, status);
1406 host->status.cs_change = 1;
1407
1408
1409 if (host->pdata && host->pdata->init) {
1410 status = host->pdata->init(&spi->dev, mmc_spi_detect_irq, mmc);
1411 if (status != 0)
1412 goto fail_glue_init;
1413 }
1414
1415
1416 if (host->pdata) {
1417 mmc->caps |= host->pdata->caps;
1418 mmc->caps2 |= host->pdata->caps2;
1419 }
1420
1421 status = mmc_add_host(mmc);
1422 if (status != 0)
1423 goto fail_add_host;
1424
1425
1426
1427
1428
1429 status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1, NULL);
1430 if (status == -EPROBE_DEFER)
1431 goto fail_add_host;
1432 if (!status) {
1433
1434
1435
1436
1437
1438 mmc->caps &= ~MMC_CAP_NEEDS_POLL;
1439 mmc_gpiod_request_cd_irq(mmc);
1440 }
1441 mmc_detect_change(mmc, 0);
1442
1443
1444 status = mmc_gpiod_request_ro(mmc, NULL, 1, 0, NULL);
1445 if (status == -EPROBE_DEFER)
1446 goto fail_add_host;
1447 if (!status)
1448 has_ro = true;
1449
1450 dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n",
1451 dev_name(&mmc->class_dev),
1452 host->dma_dev ? "" : ", no DMA",
1453 has_ro ? "" : ", no WP",
1454 (host->pdata && host->pdata->setpower)
1455 ? "" : ", no poweroff",
1456 (mmc->caps & MMC_CAP_NEEDS_POLL)
1457 ? ", cd polling" : "");
1458 return 0;
1459
1460 fail_add_host:
1461 mmc_remove_host(mmc);
1462 fail_glue_init:
1463 if (host->dma_dev)
1464 dma_unmap_single(host->dma_dev, host->data_dma,
1465 sizeof(*host->data), DMA_BIDIRECTIONAL);
1466 fail_data_dma:
1467 if (host->dma_dev)
1468 dma_unmap_single(host->dma_dev, host->ones_dma,
1469 MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
1470 fail_ones_dma:
1471 kfree(host->data);
1472
1473 fail_nobuf1:
1474 mmc_free_host(mmc);
1475 mmc_spi_put_pdata(spi);
1476
1477 nomem:
1478 kfree(ones);
1479 return status;
1480 }
1481
1482
1483 static int mmc_spi_remove(struct spi_device *spi)
1484 {
1485 struct mmc_host *mmc = dev_get_drvdata(&spi->dev);
1486 struct mmc_spi_host *host = mmc_priv(mmc);
1487
1488
1489 if (host->pdata && host->pdata->exit)
1490 host->pdata->exit(&spi->dev, mmc);
1491
1492 mmc_remove_host(mmc);
1493
1494 if (host->dma_dev) {
1495 dma_unmap_single(host->dma_dev, host->ones_dma,
1496 MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
1497 dma_unmap_single(host->dma_dev, host->data_dma,
1498 sizeof(*host->data), DMA_BIDIRECTIONAL);
1499 }
1500
1501 kfree(host->data);
1502 kfree(host->ones);
1503
1504 spi->max_speed_hz = mmc->f_max;
1505 mmc_free_host(mmc);
1506 mmc_spi_put_pdata(spi);
1507 return 0;
1508 }
1509
1510 static const struct of_device_id mmc_spi_of_match_table[] = {
1511 { .compatible = "mmc-spi-slot", },
1512 {},
1513 };
1514 MODULE_DEVICE_TABLE(of, mmc_spi_of_match_table);
1515
1516 static struct spi_driver mmc_spi_driver = {
1517 .driver = {
1518 .name = "mmc_spi",
1519 .of_match_table = mmc_spi_of_match_table,
1520 },
1521 .probe = mmc_spi_probe,
1522 .remove = mmc_spi_remove,
1523 };
1524
1525 module_spi_driver(mmc_spi_driver);
1526
1527 MODULE_AUTHOR("Mike Lavender, David Brownell, Hans-Peter Nilsson, Jan Nikitenko");
1528 MODULE_DESCRIPTION("SPI SD/MMC host driver");
1529 MODULE_LICENSE("GPL");
1530 MODULE_ALIAS("spi:mmc_spi");