This source file includes following definitions.
- clear_poll_bit
- gpmi_reset_block
- __gpmi_enable_clk
- gpmi_init
- gpmi_dump_info
- gpmi_check_ecc
- set_geometry_by_ecc_info
- get_ecc_strength
- legacy_set_geometry
- common_nfc_set_geometry
- bch_set_geometry
- gpmi_nfc_compute_timings
- gpmi_nfc_apply_timings
- gpmi_setup_data_interface
- gpmi_clear_bch
- get_dma_chan
- dma_irq_callback
- bch_irq
- gpmi_raw_len_to_len
- prepare_data_dma
- gpmi_copy_bits
- gpmi_ooblayout_ecc
- gpmi_ooblayout_free
- acquire_register_block
- acquire_bch_irq
- release_dma_channels
- acquire_dma_channels
- gpmi_get_clks
- acquire_resources
- release_resources
- gpmi_free_dma_buffer
- gpmi_alloc_dma_buffer
- block_mark_swapping
- gpmi_count_bitflips
- gpmi_bch_layout_std
- gpmi_ecc_read_page
- gpmi_ecc_read_subpage
- gpmi_ecc_write_page
- gpmi_ecc_read_oob
- gpmi_ecc_write_oob
- gpmi_ecc_read_page_raw
- gpmi_ecc_write_page_raw
- gpmi_ecc_read_oob_raw
- gpmi_ecc_write_oob_raw
- gpmi_block_markbad
- nand_boot_set_geometry
- mx23_check_transcription_stamp
- mx23_write_transcription_stamp
- mx23_boot_init
- nand_boot_init
- gpmi_set_geometry
- gpmi_init_last
- gpmi_nand_attach_chip
- get_next_transfer
- gpmi_chain_command
- gpmi_chain_wait_ready
- gpmi_chain_data_read
- gpmi_chain_data_write
- gpmi_nfc_exec_op
- gpmi_nand_init
- gpmi_nand_probe
- gpmi_nand_remove
- gpmi_pm_suspend
- gpmi_pm_resume
- gpmi_runtime_suspend
- gpmi_runtime_resume
1
2
3
4
5
6
7
8 #include <linux/clk.h>
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <linux/sched/task_stack.h>
12 #include <linux/interrupt.h>
13 #include <linux/module.h>
14 #include <linux/mtd/partitions.h>
15 #include <linux/of.h>
16 #include <linux/of_device.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/dma/mxs-dma.h>
19 #include "gpmi-nand.h"
20 #include "gpmi-regs.h"
21 #include "bch-regs.h"
22
23
24 #define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "gpmi-nand"
25 #define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch"
26 #define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch"
27
28
29 #define TO_CYCLES(duration, period) DIV_ROUND_UP_ULL(duration, period)
30
31 #define MXS_SET_ADDR 0x4
32 #define MXS_CLR_ADDR 0x8
33
34
35
36
37
38 static int clear_poll_bit(void __iomem *addr, u32 mask)
39 {
40 int timeout = 0x400;
41
42
43 writel(mask, addr + MXS_CLR_ADDR);
44
45
46
47
48
49 udelay(1);
50
51
52 while ((readl(addr) & mask) && --timeout)
53 ;
54
55 return !timeout;
56 }
57
58 #define MODULE_CLKGATE (1 << 30)
59 #define MODULE_SFTRST (1 << 31)
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77 static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
78 {
79 int ret;
80 int timeout = 0x400;
81
82
83 ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
84 if (unlikely(ret))
85 goto error;
86
87
88 writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
89
90 if (!just_enable) {
91
92 writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR);
93 udelay(1);
94
95
96 while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout)
97 ;
98 if (unlikely(!timeout))
99 goto error;
100 }
101
102
103 ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
104 if (unlikely(ret))
105 goto error;
106
107
108 ret = clear_poll_bit(reset_addr, MODULE_CLKGATE);
109 if (unlikely(ret))
110 goto error;
111
112 return 0;
113
114 error:
115 pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
116 return -ETIMEDOUT;
117 }
118
119 static int __gpmi_enable_clk(struct gpmi_nand_data *this, bool v)
120 {
121 struct clk *clk;
122 int ret;
123 int i;
124
125 for (i = 0; i < GPMI_CLK_MAX; i++) {
126 clk = this->resources.clock[i];
127 if (!clk)
128 break;
129
130 if (v) {
131 ret = clk_prepare_enable(clk);
132 if (ret)
133 goto err_clk;
134 } else {
135 clk_disable_unprepare(clk);
136 }
137 }
138 return 0;
139
140 err_clk:
141 for (; i > 0; i--)
142 clk_disable_unprepare(this->resources.clock[i - 1]);
143 return ret;
144 }
145
146 static int gpmi_init(struct gpmi_nand_data *this)
147 {
148 struct resources *r = &this->resources;
149 int ret;
150
151 ret = pm_runtime_get_sync(this->dev);
152 if (ret < 0)
153 return ret;
154
155 ret = gpmi_reset_block(r->gpmi_regs, false);
156 if (ret)
157 goto err_out;
158
159
160
161
162
163 ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
164 if (ret)
165 goto err_out;
166
167
168 writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
169
170
171 writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY,
172 r->gpmi_regs + HW_GPMI_CTRL1_SET);
173
174
175 writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET);
176
177
178 writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
179
180
181
182
183
184 writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET);
185
186 err_out:
187 pm_runtime_mark_last_busy(this->dev);
188 pm_runtime_put_autosuspend(this->dev);
189 return ret;
190 }
191
192
193 static void gpmi_dump_info(struct gpmi_nand_data *this)
194 {
195 struct resources *r = &this->resources;
196 struct bch_geometry *geo = &this->bch_geometry;
197 u32 reg;
198 int i;
199
200 dev_err(this->dev, "Show GPMI registers :\n");
201 for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {
202 reg = readl(r->gpmi_regs + i * 0x10);
203 dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
204 }
205
206
207 dev_err(this->dev, "Show BCH registers :\n");
208 for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) {
209 reg = readl(r->bch_regs + i * 0x10);
210 dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
211 }
212 dev_err(this->dev, "BCH Geometry :\n"
213 "GF length : %u\n"
214 "ECC Strength : %u\n"
215 "Page Size in Bytes : %u\n"
216 "Metadata Size in Bytes : %u\n"
217 "ECC Chunk Size in Bytes: %u\n"
218 "ECC Chunk Count : %u\n"
219 "Payload Size in Bytes : %u\n"
220 "Auxiliary Size in Bytes: %u\n"
221 "Auxiliary Status Offset: %u\n"
222 "Block Mark Byte Offset : %u\n"
223 "Block Mark Bit Offset : %u\n",
224 geo->gf_len,
225 geo->ecc_strength,
226 geo->page_size,
227 geo->metadata_size,
228 geo->ecc_chunk_size,
229 geo->ecc_chunk_count,
230 geo->payload_size,
231 geo->auxiliary_size,
232 geo->auxiliary_status_offset,
233 geo->block_mark_byte_offset,
234 geo->block_mark_bit_offset);
235 }
236
237 static inline bool gpmi_check_ecc(struct gpmi_nand_data *this)
238 {
239 struct bch_geometry *geo = &this->bch_geometry;
240
241
242 if (GPMI_IS_MXS(this)) {
243
244 if (geo->gf_len == 14)
245 return false;
246 }
247 return geo->ecc_strength <= this->devdata->bch_max_ecc_strength;
248 }
249
250
251
252
253
254
255
256 static int set_geometry_by_ecc_info(struct gpmi_nand_data *this,
257 unsigned int ecc_strength,
258 unsigned int ecc_step)
259 {
260 struct bch_geometry *geo = &this->bch_geometry;
261 struct nand_chip *chip = &this->nand;
262 struct mtd_info *mtd = nand_to_mtd(chip);
263 unsigned int block_mark_bit_offset;
264
265 switch (ecc_step) {
266 case SZ_512:
267 geo->gf_len = 13;
268 break;
269 case SZ_1K:
270 geo->gf_len = 14;
271 break;
272 default:
273 dev_err(this->dev,
274 "unsupported nand chip. ecc bits : %d, ecc size : %d\n",
275 chip->base.eccreq.strength,
276 chip->base.eccreq.step_size);
277 return -EINVAL;
278 }
279 geo->ecc_chunk_size = ecc_step;
280 geo->ecc_strength = round_up(ecc_strength, 2);
281 if (!gpmi_check_ecc(this))
282 return -EINVAL;
283
284
285 if (geo->ecc_chunk_size < mtd->oobsize) {
286 dev_err(this->dev,
287 "unsupported nand chip. ecc size: %d, oob size : %d\n",
288 ecc_step, mtd->oobsize);
289 return -EINVAL;
290 }
291
292
293 geo->metadata_size = 10;
294
295 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345 geo->page_size = mtd->writesize + geo->metadata_size +
346 (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
347
348 geo->payload_size = mtd->writesize;
349
350 geo->auxiliary_status_offset = ALIGN(geo->metadata_size, 4);
351 geo->auxiliary_size = ALIGN(geo->metadata_size, 4)
352 + ALIGN(geo->ecc_chunk_count, 4);
353
354 if (!this->swap_block_mark)
355 return 0;
356
357
358 block_mark_bit_offset = mtd->writesize * 8 -
359 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
360 + geo->metadata_size * 8);
361
362 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
363 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
364 return 0;
365 }
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385 static inline int get_ecc_strength(struct gpmi_nand_data *this)
386 {
387 struct bch_geometry *geo = &this->bch_geometry;
388 struct mtd_info *mtd = nand_to_mtd(&this->nand);
389 int ecc_strength;
390
391 ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8)
392 / (geo->gf_len * geo->ecc_chunk_count);
393
394
395 return round_down(ecc_strength, 2);
396 }
397
398 static int legacy_set_geometry(struct gpmi_nand_data *this)
399 {
400 struct bch_geometry *geo = &this->bch_geometry;
401 struct mtd_info *mtd = nand_to_mtd(&this->nand);
402 unsigned int metadata_size;
403 unsigned int status_size;
404 unsigned int block_mark_bit_offset;
405
406
407
408
409
410
411 geo->metadata_size = 10;
412
413
414 geo->gf_len = 13;
415
416
417 geo->ecc_chunk_size = 512;
418 while (geo->ecc_chunk_size < mtd->oobsize) {
419 geo->ecc_chunk_size *= 2;
420 geo->gf_len = 14;
421 }
422
423 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
424
425
426 geo->ecc_strength = get_ecc_strength(this);
427 if (!gpmi_check_ecc(this)) {
428 dev_err(this->dev,
429 "ecc strength: %d cannot be supported by the controller (%d)\n"
430 "try to use minimum ecc strength that NAND chip required\n",
431 geo->ecc_strength,
432 this->devdata->bch_max_ecc_strength);
433 return -EINVAL;
434 }
435
436 geo->page_size = mtd->writesize + geo->metadata_size +
437 (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
438 geo->payload_size = mtd->writesize;
439
440
441
442
443
444
445
446 metadata_size = ALIGN(geo->metadata_size, 4);
447 status_size = ALIGN(geo->ecc_chunk_count, 4);
448
449 geo->auxiliary_size = metadata_size + status_size;
450 geo->auxiliary_status_offset = metadata_size;
451
452 if (!this->swap_block_mark)
453 return 0;
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501 block_mark_bit_offset = mtd->writesize * 8 -
502 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
503 + geo->metadata_size * 8);
504
505 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
506 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
507 return 0;
508 }
509
510 static int common_nfc_set_geometry(struct gpmi_nand_data *this)
511 {
512 struct nand_chip *chip = &this->nand;
513
514 if (chip->ecc.strength > 0 && chip->ecc.size > 0)
515 return set_geometry_by_ecc_info(this, chip->ecc.strength,
516 chip->ecc.size);
517
518 if ((of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc"))
519 || legacy_set_geometry(this)) {
520 if (!(chip->base.eccreq.strength > 0 &&
521 chip->base.eccreq.step_size > 0))
522 return -EINVAL;
523
524 return set_geometry_by_ecc_info(this,
525 chip->base.eccreq.strength,
526 chip->base.eccreq.step_size);
527 }
528
529 return 0;
530 }
531
532
533 static int bch_set_geometry(struct gpmi_nand_data *this)
534 {
535 struct resources *r = &this->resources;
536 int ret;
537
538 ret = common_nfc_set_geometry(this);
539 if (ret)
540 return ret;
541
542 ret = pm_runtime_get_sync(this->dev);
543 if (ret < 0)
544 return ret;
545
546
547
548
549
550
551 ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
552 if (ret)
553 goto err_out;
554
555
556 writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT);
557
558 ret = 0;
559 err_out:
560 pm_runtime_mark_last_busy(this->dev);
561 pm_runtime_put_autosuspend(this->dev);
562
563 return ret;
564 }
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640 static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
641 const struct nand_sdr_timings *sdr)
642 {
643 struct gpmi_nfc_hardware_timing *hw = &this->hw;
644 unsigned int dll_threshold_ps = this->devdata->max_chain_delay;
645 unsigned int period_ps, reference_period_ps;
646 unsigned int data_setup_cycles, data_hold_cycles, addr_setup_cycles;
647 unsigned int tRP_ps;
648 bool use_half_period;
649 int sample_delay_ps, sample_delay_factor;
650 u16 busy_timeout_cycles;
651 u8 wrn_dly_sel;
652
653 if (sdr->tRC_min >= 30000) {
654
655 hw->clk_rate = 22000000;
656 wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS;
657 } else if (sdr->tRC_min >= 25000) {
658
659 hw->clk_rate = 80000000;
660 wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
661 } else {
662
663 hw->clk_rate = 100000000;
664 wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
665 }
666
667
668 period_ps = div_u64((u64)NSEC_PER_SEC * 1000, hw->clk_rate);
669
670 addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps);
671 data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps);
672 data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps);
673 busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps);
674
675 hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) |
676 BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) |
677 BF_GPMI_TIMING0_DATA_SETUP(data_setup_cycles);
678 hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(busy_timeout_cycles * 4096);
679
680
681
682
683
684
685
686
687 if (period_ps > dll_threshold_ps) {
688 use_half_period = true;
689 reference_period_ps = period_ps / 2;
690 } else {
691 use_half_period = false;
692 reference_period_ps = period_ps;
693 }
694
695 tRP_ps = data_setup_cycles * period_ps;
696 sample_delay_ps = (sdr->tREA_max + 4000 - tRP_ps) * 8;
697 if (sample_delay_ps > 0)
698 sample_delay_factor = sample_delay_ps / reference_period_ps;
699 else
700 sample_delay_factor = 0;
701
702 hw->ctrl1n = BF_GPMI_CTRL1_WRN_DLY_SEL(wrn_dly_sel);
703 if (sample_delay_factor)
704 hw->ctrl1n |= BF_GPMI_CTRL1_RDN_DELAY(sample_delay_factor) |
705 BM_GPMI_CTRL1_DLL_ENABLE |
706 (use_half_period ? BM_GPMI_CTRL1_HALF_PERIOD : 0);
707 }
708
709 static void gpmi_nfc_apply_timings(struct gpmi_nand_data *this)
710 {
711 struct gpmi_nfc_hardware_timing *hw = &this->hw;
712 struct resources *r = &this->resources;
713 void __iomem *gpmi_regs = r->gpmi_regs;
714 unsigned int dll_wait_time_us;
715
716 clk_set_rate(r->clock[0], hw->clk_rate);
717
718 writel(hw->timing0, gpmi_regs + HW_GPMI_TIMING0);
719 writel(hw->timing1, gpmi_regs + HW_GPMI_TIMING1);
720
721
722
723
724
725 writel(BM_GPMI_CTRL1_CLEAR_MASK, gpmi_regs + HW_GPMI_CTRL1_CLR);
726 writel(hw->ctrl1n, gpmi_regs + HW_GPMI_CTRL1_SET);
727
728
729 dll_wait_time_us = USEC_PER_SEC / hw->clk_rate * 64;
730 if (!dll_wait_time_us)
731 dll_wait_time_us = 1;
732
733
734 udelay(dll_wait_time_us);
735 }
736
737 static int gpmi_setup_data_interface(struct nand_chip *chip, int chipnr,
738 const struct nand_data_interface *conf)
739 {
740 struct gpmi_nand_data *this = nand_get_controller_data(chip);
741 const struct nand_sdr_timings *sdr;
742
743
744 sdr = nand_get_sdr_timings(conf);
745 if (IS_ERR(sdr))
746 return PTR_ERR(sdr);
747
748
749 if (sdr->tRC_min <= 25000 && !GPMI_IS_MX6(this))
750 return -ENOTSUPP;
751
752
753 if (chipnr < 0)
754 return 0;
755
756
757 gpmi_nfc_compute_timings(this, sdr);
758
759 this->hw.must_apply_timings = true;
760
761 return 0;
762 }
763
764
765 static void gpmi_clear_bch(struct gpmi_nand_data *this)
766 {
767 struct resources *r = &this->resources;
768 writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR);
769 }
770
771 static struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
772 {
773
774 return this->dma_chans[0];
775 }
776
777
778 static void dma_irq_callback(void *param)
779 {
780 struct gpmi_nand_data *this = param;
781 struct completion *dma_c = &this->dma_done;
782
783 complete(dma_c);
784 }
785
786 static irqreturn_t bch_irq(int irq, void *cookie)
787 {
788 struct gpmi_nand_data *this = cookie;
789
790 gpmi_clear_bch(this);
791 complete(&this->bch_done);
792 return IRQ_HANDLED;
793 }
794
795 static int gpmi_raw_len_to_len(struct gpmi_nand_data *this, int raw_len)
796 {
797
798
799
800
801 if (this->bch)
802 return ALIGN_DOWN(raw_len, this->bch_geometry.ecc_chunk_size);
803 else
804 return raw_len;
805 }
806
807
808 static bool prepare_data_dma(struct gpmi_nand_data *this, const void *buf,
809 int raw_len, struct scatterlist *sgl,
810 enum dma_data_direction dr)
811 {
812 int ret;
813 int len = gpmi_raw_len_to_len(this, raw_len);
814
815
816 if (virt_addr_valid(buf) && !object_is_on_stack(buf)) {
817 sg_init_one(sgl, buf, len);
818 ret = dma_map_sg(this->dev, sgl, 1, dr);
819 if (ret == 0)
820 goto map_fail;
821
822 return true;
823 }
824
825 map_fail:
826
827 sg_init_one(sgl, this->data_buffer_dma, len);
828
829 if (dr == DMA_TO_DEVICE && buf != this->data_buffer_dma)
830 memcpy(this->data_buffer_dma, buf, len);
831
832 dma_map_sg(this->dev, sgl, 1, dr);
833
834 return false;
835 }
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852 static void gpmi_copy_bits(u8 *dst, size_t dst_bit_off, const u8 *src,
853 size_t src_bit_off, size_t nbits)
854 {
855 size_t i;
856 size_t nbytes;
857 u32 src_buffer = 0;
858 size_t bits_in_src_buffer = 0;
859
860 if (!nbits)
861 return;
862
863
864
865
866
867 src += src_bit_off / 8;
868 src_bit_off %= 8;
869
870 dst += dst_bit_off / 8;
871 dst_bit_off %= 8;
872
873
874
875
876
877 if (src_bit_off) {
878 src_buffer = src[0] >> src_bit_off;
879 if (nbits >= (8 - src_bit_off)) {
880 bits_in_src_buffer += 8 - src_bit_off;
881 } else {
882 src_buffer &= GENMASK(nbits - 1, 0);
883 bits_in_src_buffer += nbits;
884 }
885 nbits -= bits_in_src_buffer;
886 src++;
887 }
888
889
890 nbytes = nbits / 8;
891
892
893 if (dst_bit_off) {
894 if (bits_in_src_buffer < (8 - dst_bit_off) && nbytes) {
895 src_buffer |= src[0] << bits_in_src_buffer;
896 bits_in_src_buffer += 8;
897 src++;
898 nbytes--;
899 }
900
901 if (bits_in_src_buffer >= (8 - dst_bit_off)) {
902 dst[0] &= GENMASK(dst_bit_off - 1, 0);
903 dst[0] |= src_buffer << dst_bit_off;
904 src_buffer >>= (8 - dst_bit_off);
905 bits_in_src_buffer -= (8 - dst_bit_off);
906 dst_bit_off = 0;
907 dst++;
908 if (bits_in_src_buffer > 7) {
909 bits_in_src_buffer -= 8;
910 dst[0] = src_buffer;
911 dst++;
912 src_buffer >>= 8;
913 }
914 }
915 }
916
917 if (!bits_in_src_buffer && !dst_bit_off) {
918
919
920
921
922 if (nbytes)
923 memcpy(dst, src, nbytes);
924 } else {
925
926
927
928
929
930 for (i = 0; i < nbytes; i++) {
931 src_buffer |= src[i] << bits_in_src_buffer;
932 dst[i] = src_buffer;
933 src_buffer >>= 8;
934 }
935 }
936
937 dst += nbytes;
938 src += nbytes;
939
940
941
942
943
944 nbits %= 8;
945
946
947
948
949
950 if (!nbits && !bits_in_src_buffer)
951 return;
952
953
954 if (nbits)
955 src_buffer |= (*src & GENMASK(nbits - 1, 0)) <<
956 bits_in_src_buffer;
957 bits_in_src_buffer += nbits;
958
959
960
961
962
963
964
965 if (dst_bit_off)
966 src_buffer = (src_buffer << dst_bit_off) |
967 (*dst & GENMASK(dst_bit_off - 1, 0));
968 bits_in_src_buffer += dst_bit_off;
969
970
971
972
973
974 nbytes = bits_in_src_buffer / 8;
975 if (bits_in_src_buffer % 8) {
976 src_buffer |= (dst[nbytes] &
977 GENMASK(7, bits_in_src_buffer % 8)) <<
978 (nbytes * 8);
979 nbytes++;
980 }
981
982
983 for (i = 0; i < nbytes; i++) {
984 dst[i] = src_buffer;
985 src_buffer >>= 8;
986 }
987 }
988
989
990 static uint8_t scan_ff_pattern[] = { 0xff };
991 static struct nand_bbt_descr gpmi_bbt_descr = {
992 .options = 0,
993 .offs = 0,
994 .len = 1,
995 .pattern = scan_ff_pattern
996 };
997
998
999
1000
1001
1002 static int gpmi_ooblayout_ecc(struct mtd_info *mtd, int section,
1003 struct mtd_oob_region *oobregion)
1004 {
1005 struct nand_chip *chip = mtd_to_nand(mtd);
1006 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1007 struct bch_geometry *geo = &this->bch_geometry;
1008
1009 if (section)
1010 return -ERANGE;
1011
1012 oobregion->offset = 0;
1013 oobregion->length = geo->page_size - mtd->writesize;
1014
1015 return 0;
1016 }
1017
1018 static int gpmi_ooblayout_free(struct mtd_info *mtd, int section,
1019 struct mtd_oob_region *oobregion)
1020 {
1021 struct nand_chip *chip = mtd_to_nand(mtd);
1022 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1023 struct bch_geometry *geo = &this->bch_geometry;
1024
1025 if (section)
1026 return -ERANGE;
1027
1028
1029 if (geo->page_size < mtd->writesize + mtd->oobsize) {
1030 oobregion->offset = geo->page_size - mtd->writesize;
1031 oobregion->length = mtd->oobsize - oobregion->offset;
1032 }
1033
1034 return 0;
1035 }
1036
1037 static const char * const gpmi_clks_for_mx2x[] = {
1038 "gpmi_io",
1039 };
1040
1041 static const struct mtd_ooblayout_ops gpmi_ooblayout_ops = {
1042 .ecc = gpmi_ooblayout_ecc,
1043 .free = gpmi_ooblayout_free,
1044 };
1045
1046 static const struct gpmi_devdata gpmi_devdata_imx23 = {
1047 .type = IS_MX23,
1048 .bch_max_ecc_strength = 20,
1049 .max_chain_delay = 16000,
1050 .clks = gpmi_clks_for_mx2x,
1051 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
1052 };
1053
1054 static const struct gpmi_devdata gpmi_devdata_imx28 = {
1055 .type = IS_MX28,
1056 .bch_max_ecc_strength = 20,
1057 .max_chain_delay = 16000,
1058 .clks = gpmi_clks_for_mx2x,
1059 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
1060 };
1061
1062 static const char * const gpmi_clks_for_mx6[] = {
1063 "gpmi_io", "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
1064 };
1065
1066 static const struct gpmi_devdata gpmi_devdata_imx6q = {
1067 .type = IS_MX6Q,
1068 .bch_max_ecc_strength = 40,
1069 .max_chain_delay = 12000,
1070 .clks = gpmi_clks_for_mx6,
1071 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
1072 };
1073
1074 static const struct gpmi_devdata gpmi_devdata_imx6sx = {
1075 .type = IS_MX6SX,
1076 .bch_max_ecc_strength = 62,
1077 .max_chain_delay = 12000,
1078 .clks = gpmi_clks_for_mx6,
1079 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
1080 };
1081
1082 static const char * const gpmi_clks_for_mx7d[] = {
1083 "gpmi_io", "gpmi_bch_apb",
1084 };
1085
1086 static const struct gpmi_devdata gpmi_devdata_imx7d = {
1087 .type = IS_MX7D,
1088 .bch_max_ecc_strength = 62,
1089 .max_chain_delay = 12000,
1090 .clks = gpmi_clks_for_mx7d,
1091 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx7d),
1092 };
1093
1094 static int acquire_register_block(struct gpmi_nand_data *this,
1095 const char *res_name)
1096 {
1097 struct platform_device *pdev = this->pdev;
1098 struct resources *res = &this->resources;
1099 struct resource *r;
1100 void __iomem *p;
1101
1102 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
1103 p = devm_ioremap_resource(&pdev->dev, r);
1104 if (IS_ERR(p))
1105 return PTR_ERR(p);
1106
1107 if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME))
1108 res->gpmi_regs = p;
1109 else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME))
1110 res->bch_regs = p;
1111 else
1112 dev_err(this->dev, "unknown resource name : %s\n", res_name);
1113
1114 return 0;
1115 }
1116
1117 static int acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
1118 {
1119 struct platform_device *pdev = this->pdev;
1120 const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME;
1121 struct resource *r;
1122 int err;
1123
1124 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name);
1125 if (!r) {
1126 dev_err(this->dev, "Can't get resource for %s\n", res_name);
1127 return -ENODEV;
1128 }
1129
1130 err = devm_request_irq(this->dev, r->start, irq_h, 0, res_name, this);
1131 if (err)
1132 dev_err(this->dev, "error requesting BCH IRQ\n");
1133
1134 return err;
1135 }
1136
1137 static void release_dma_channels(struct gpmi_nand_data *this)
1138 {
1139 unsigned int i;
1140 for (i = 0; i < DMA_CHANS; i++)
1141 if (this->dma_chans[i]) {
1142 dma_release_channel(this->dma_chans[i]);
1143 this->dma_chans[i] = NULL;
1144 }
1145 }
1146
1147 static int acquire_dma_channels(struct gpmi_nand_data *this)
1148 {
1149 struct platform_device *pdev = this->pdev;
1150 struct dma_chan *dma_chan;
1151
1152
1153 dma_chan = dma_request_slave_channel(&pdev->dev, "rx-tx");
1154 if (!dma_chan) {
1155 dev_err(this->dev, "Failed to request DMA channel.\n");
1156 goto acquire_err;
1157 }
1158
1159 this->dma_chans[0] = dma_chan;
1160 return 0;
1161
1162 acquire_err:
1163 release_dma_channels(this);
1164 return -EINVAL;
1165 }
1166
1167 static int gpmi_get_clks(struct gpmi_nand_data *this)
1168 {
1169 struct resources *r = &this->resources;
1170 struct clk *clk;
1171 int err, i;
1172
1173 for (i = 0; i < this->devdata->clks_count; i++) {
1174 clk = devm_clk_get(this->dev, this->devdata->clks[i]);
1175 if (IS_ERR(clk)) {
1176 err = PTR_ERR(clk);
1177 goto err_clock;
1178 }
1179
1180 r->clock[i] = clk;
1181 }
1182
1183 if (GPMI_IS_MX6(this))
1184
1185
1186
1187
1188
1189
1190 clk_set_rate(r->clock[0], 22000000);
1191
1192 return 0;
1193
1194 err_clock:
1195 dev_dbg(this->dev, "failed in finding the clocks.\n");
1196 return err;
1197 }
1198
1199 static int acquire_resources(struct gpmi_nand_data *this)
1200 {
1201 int ret;
1202
1203 ret = acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME);
1204 if (ret)
1205 goto exit_regs;
1206
1207 ret = acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME);
1208 if (ret)
1209 goto exit_regs;
1210
1211 ret = acquire_bch_irq(this, bch_irq);
1212 if (ret)
1213 goto exit_regs;
1214
1215 ret = acquire_dma_channels(this);
1216 if (ret)
1217 goto exit_regs;
1218
1219 ret = gpmi_get_clks(this);
1220 if (ret)
1221 goto exit_clock;
1222 return 0;
1223
1224 exit_clock:
1225 release_dma_channels(this);
1226 exit_regs:
1227 return ret;
1228 }
1229
1230 static void release_resources(struct gpmi_nand_data *this)
1231 {
1232 release_dma_channels(this);
1233 }
1234
1235 static void gpmi_free_dma_buffer(struct gpmi_nand_data *this)
1236 {
1237 struct device *dev = this->dev;
1238 struct bch_geometry *geo = &this->bch_geometry;
1239
1240 if (this->auxiliary_virt && virt_addr_valid(this->auxiliary_virt))
1241 dma_free_coherent(dev, geo->auxiliary_size,
1242 this->auxiliary_virt,
1243 this->auxiliary_phys);
1244 kfree(this->data_buffer_dma);
1245 kfree(this->raw_buffer);
1246
1247 this->data_buffer_dma = NULL;
1248 this->raw_buffer = NULL;
1249 }
1250
1251
1252 static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
1253 {
1254 struct bch_geometry *geo = &this->bch_geometry;
1255 struct device *dev = this->dev;
1256 struct mtd_info *mtd = nand_to_mtd(&this->nand);
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266 this->data_buffer_dma = kzalloc(mtd->writesize ?: PAGE_SIZE,
1267 GFP_DMA | GFP_KERNEL);
1268 if (this->data_buffer_dma == NULL)
1269 goto error_alloc;
1270
1271 this->auxiliary_virt = dma_alloc_coherent(dev, geo->auxiliary_size,
1272 &this->auxiliary_phys, GFP_DMA);
1273 if (!this->auxiliary_virt)
1274 goto error_alloc;
1275
1276 this->raw_buffer = kzalloc((mtd->writesize ?: PAGE_SIZE) + mtd->oobsize, GFP_KERNEL);
1277 if (!this->raw_buffer)
1278 goto error_alloc;
1279
1280 return 0;
1281
1282 error_alloc:
1283 gpmi_free_dma_buffer(this);
1284 return -ENOMEM;
1285 }
1286
1287
1288
1289
1290
1291
1292 static void block_mark_swapping(struct gpmi_nand_data *this,
1293 void *payload, void *auxiliary)
1294 {
1295 struct bch_geometry *nfc_geo = &this->bch_geometry;
1296 unsigned char *p;
1297 unsigned char *a;
1298 unsigned int bit;
1299 unsigned char mask;
1300 unsigned char from_data;
1301 unsigned char from_oob;
1302
1303 if (!this->swap_block_mark)
1304 return;
1305
1306
1307
1308
1309
1310 bit = nfc_geo->block_mark_bit_offset;
1311 p = payload + nfc_geo->block_mark_byte_offset;
1312 a = auxiliary;
1313
1314
1315
1316
1317
1318
1319
1320 from_data = (p[0] >> bit) | (p[1] << (8 - bit));
1321
1322
1323 from_oob = a[0];
1324
1325
1326 a[0] = from_data;
1327
1328 mask = (0x1 << bit) - 1;
1329 p[0] = (p[0] & mask) | (from_oob << bit);
1330
1331 mask = ~0 << bit;
1332 p[1] = (p[1] & mask) | (from_oob >> (8 - bit));
1333 }
1334
1335 static int gpmi_count_bitflips(struct nand_chip *chip, void *buf, int first,
1336 int last, int meta)
1337 {
1338 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1339 struct bch_geometry *nfc_geo = &this->bch_geometry;
1340 struct mtd_info *mtd = nand_to_mtd(chip);
1341 int i;
1342 unsigned char *status;
1343 unsigned int max_bitflips = 0;
1344
1345
1346 status = this->auxiliary_virt + ALIGN(meta, 4);
1347
1348 for (i = first; i < last; i++, status++) {
1349 if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
1350 continue;
1351
1352 if (*status == STATUS_UNCORRECTABLE) {
1353 int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
1354 u8 *eccbuf = this->raw_buffer;
1355 int offset, bitoffset;
1356 int eccbytes;
1357 int flips;
1358
1359
1360 offset = nfc_geo->metadata_size * 8;
1361 offset += ((8 * nfc_geo->ecc_chunk_size) + eccbits) * (i + 1);
1362 offset -= eccbits;
1363 bitoffset = offset % 8;
1364 eccbytes = DIV_ROUND_UP(offset + eccbits, 8);
1365 offset /= 8;
1366 eccbytes -= offset;
1367 nand_change_read_column_op(chip, offset, eccbuf,
1368 eccbytes, false);
1369
1370
1371
1372
1373
1374
1375
1376
1377 if (bitoffset)
1378 eccbuf[0] |= GENMASK(bitoffset - 1, 0);
1379
1380 bitoffset = (bitoffset + eccbits) % 8;
1381 if (bitoffset)
1382 eccbuf[eccbytes - 1] |= GENMASK(7, bitoffset);
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395 if (i == 0) {
1396
1397 flips = nand_check_erased_ecc_chunk(
1398 buf + i * nfc_geo->ecc_chunk_size,
1399 nfc_geo->ecc_chunk_size,
1400 eccbuf, eccbytes,
1401 this->auxiliary_virt,
1402 nfc_geo->metadata_size,
1403 nfc_geo->ecc_strength);
1404 } else {
1405 flips = nand_check_erased_ecc_chunk(
1406 buf + i * nfc_geo->ecc_chunk_size,
1407 nfc_geo->ecc_chunk_size,
1408 eccbuf, eccbytes,
1409 NULL, 0,
1410 nfc_geo->ecc_strength);
1411 }
1412
1413 if (flips > 0) {
1414 max_bitflips = max_t(unsigned int, max_bitflips,
1415 flips);
1416 mtd->ecc_stats.corrected += flips;
1417 continue;
1418 }
1419
1420 mtd->ecc_stats.failed++;
1421 continue;
1422 }
1423
1424 mtd->ecc_stats.corrected += *status;
1425 max_bitflips = max_t(unsigned int, max_bitflips, *status);
1426 }
1427
1428 return max_bitflips;
1429 }
1430
1431 static void gpmi_bch_layout_std(struct gpmi_nand_data *this)
1432 {
1433 struct bch_geometry *geo = &this->bch_geometry;
1434 unsigned int ecc_strength = geo->ecc_strength >> 1;
1435 unsigned int gf_len = geo->gf_len;
1436 unsigned int block_size = geo->ecc_chunk_size;
1437
1438 this->bch_flashlayout0 =
1439 BF_BCH_FLASH0LAYOUT0_NBLOCKS(geo->ecc_chunk_count - 1) |
1440 BF_BCH_FLASH0LAYOUT0_META_SIZE(geo->metadata_size) |
1441 BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) |
1442 BF_BCH_FLASH0LAYOUT0_GF(gf_len, this) |
1443 BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this);
1444
1445 this->bch_flashlayout1 =
1446 BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(geo->page_size) |
1447 BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) |
1448 BF_BCH_FLASH0LAYOUT1_GF(gf_len, this) |
1449 BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this);
1450 }
1451
1452 static int gpmi_ecc_read_page(struct nand_chip *chip, uint8_t *buf,
1453 int oob_required, int page)
1454 {
1455 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1456 struct mtd_info *mtd = nand_to_mtd(chip);
1457 struct bch_geometry *geo = &this->bch_geometry;
1458 unsigned int max_bitflips;
1459 int ret;
1460
1461 gpmi_bch_layout_std(this);
1462 this->bch = true;
1463
1464 ret = nand_read_page_op(chip, page, 0, buf, geo->page_size);
1465 if (ret)
1466 return ret;
1467
1468 max_bitflips = gpmi_count_bitflips(chip, buf, 0,
1469 geo->ecc_chunk_count,
1470 geo->auxiliary_status_offset);
1471
1472
1473 block_mark_swapping(this, buf, this->auxiliary_virt);
1474
1475 if (oob_required) {
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486 memset(chip->oob_poi, ~0, mtd->oobsize);
1487 chip->oob_poi[0] = ((uint8_t *)this->auxiliary_virt)[0];
1488 }
1489
1490 return max_bitflips;
1491 }
1492
1493
1494 static int gpmi_ecc_read_subpage(struct nand_chip *chip, uint32_t offs,
1495 uint32_t len, uint8_t *buf, int page)
1496 {
1497 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1498 struct bch_geometry *geo = &this->bch_geometry;
1499 int size = chip->ecc.size;
1500 int meta, n, page_size;
1501 unsigned int max_bitflips;
1502 unsigned int ecc_strength;
1503 int first, last, marker_pos;
1504 int ecc_parity_size;
1505 int col = 0;
1506 int ret;
1507
1508
1509 ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
1510
1511
1512 first = offs / size;
1513 last = (offs + len - 1) / size;
1514
1515 if (this->swap_block_mark) {
1516
1517
1518
1519
1520
1521
1522
1523 marker_pos = geo->block_mark_byte_offset / size;
1524 if (last >= marker_pos && first <= marker_pos) {
1525 dev_dbg(this->dev,
1526 "page:%d, first:%d, last:%d, marker at:%d\n",
1527 page, first, last, marker_pos);
1528 return gpmi_ecc_read_page(chip, buf, 0, page);
1529 }
1530 }
1531
1532 meta = geo->metadata_size;
1533 if (first) {
1534 col = meta + (size + ecc_parity_size) * first;
1535 meta = 0;
1536 buf = buf + first * size;
1537 }
1538
1539 ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
1540
1541 n = last - first + 1;
1542 page_size = meta + (size + ecc_parity_size) * n;
1543 ecc_strength = geo->ecc_strength >> 1;
1544
1545 this->bch_flashlayout0 = BF_BCH_FLASH0LAYOUT0_NBLOCKS(n - 1) |
1546 BF_BCH_FLASH0LAYOUT0_META_SIZE(meta) |
1547 BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) |
1548 BF_BCH_FLASH0LAYOUT0_GF(geo->gf_len, this) |
1549 BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(geo->ecc_chunk_size, this);
1550
1551 this->bch_flashlayout1 = BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size) |
1552 BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) |
1553 BF_BCH_FLASH0LAYOUT1_GF(geo->gf_len, this) |
1554 BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(geo->ecc_chunk_size, this);
1555
1556 this->bch = true;
1557
1558 ret = nand_read_page_op(chip, page, col, buf, page_size);
1559 if (ret)
1560 return ret;
1561
1562 dev_dbg(this->dev, "page:%d(%d:%d)%d, chunk:(%d:%d), BCH PG size:%d\n",
1563 page, offs, len, col, first, n, page_size);
1564
1565 max_bitflips = gpmi_count_bitflips(chip, buf, first, last, meta);
1566
1567 return max_bitflips;
1568 }
1569
1570 static int gpmi_ecc_write_page(struct nand_chip *chip, const uint8_t *buf,
1571 int oob_required, int page)
1572 {
1573 struct mtd_info *mtd = nand_to_mtd(chip);
1574 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1575 struct bch_geometry *nfc_geo = &this->bch_geometry;
1576 int ret;
1577
1578 dev_dbg(this->dev, "ecc write page.\n");
1579
1580 gpmi_bch_layout_std(this);
1581 this->bch = true;
1582
1583 memcpy(this->auxiliary_virt, chip->oob_poi, nfc_geo->auxiliary_size);
1584
1585 if (this->swap_block_mark) {
1586
1587
1588
1589
1590 memcpy(this->data_buffer_dma, buf, mtd->writesize);
1591 buf = this->data_buffer_dma;
1592 block_mark_swapping(this, this->data_buffer_dma,
1593 this->auxiliary_virt);
1594 }
1595
1596 ret = nand_prog_page_op(chip, page, 0, buf, nfc_geo->page_size);
1597
1598 return ret;
1599 }
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661 static int gpmi_ecc_read_oob(struct nand_chip *chip, int page)
1662 {
1663 struct mtd_info *mtd = nand_to_mtd(chip);
1664 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1665 int ret;
1666
1667
1668 memset(chip->oob_poi, ~0, mtd->oobsize);
1669
1670
1671 ret = nand_read_page_op(chip, page, mtd->writesize, chip->oob_poi,
1672 mtd->oobsize);
1673 if (ret)
1674 return ret;
1675
1676
1677
1678
1679
1680
1681 if (GPMI_IS_MX23(this)) {
1682
1683 ret = nand_read_page_op(chip, page, 0, chip->oob_poi, 1);
1684 if (ret)
1685 return ret;
1686 }
1687
1688 return 0;
1689 }
1690
1691 static int gpmi_ecc_write_oob(struct nand_chip *chip, int page)
1692 {
1693 struct mtd_info *mtd = nand_to_mtd(chip);
1694 struct mtd_oob_region of = { };
1695
1696
1697 mtd_ooblayout_free(mtd, 0, &of);
1698 if (!of.length)
1699 return -EPERM;
1700
1701 if (!nand_is_slc(chip))
1702 return -EPERM;
1703
1704 return nand_prog_page_op(chip, page, mtd->writesize + of.offset,
1705 chip->oob_poi + of.offset, of.length);
1706 }
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720 static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
1721 int oob_required, int page)
1722 {
1723 struct mtd_info *mtd = nand_to_mtd(chip);
1724 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1725 struct bch_geometry *nfc_geo = &this->bch_geometry;
1726 int eccsize = nfc_geo->ecc_chunk_size;
1727 int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
1728 u8 *tmp_buf = this->raw_buffer;
1729 size_t src_bit_off;
1730 size_t oob_bit_off;
1731 size_t oob_byte_off;
1732 uint8_t *oob = chip->oob_poi;
1733 int step;
1734 int ret;
1735
1736 ret = nand_read_page_op(chip, page, 0, tmp_buf,
1737 mtd->writesize + mtd->oobsize);
1738 if (ret)
1739 return ret;
1740
1741
1742
1743
1744
1745
1746
1747
1748 if (this->swap_block_mark)
1749 swap(tmp_buf[0], tmp_buf[mtd->writesize]);
1750
1751
1752
1753
1754
1755 if (oob_required)
1756 memcpy(oob, tmp_buf, nfc_geo->metadata_size);
1757
1758 oob_bit_off = nfc_geo->metadata_size * 8;
1759 src_bit_off = oob_bit_off;
1760
1761
1762 for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
1763 if (buf)
1764 gpmi_copy_bits(buf, step * eccsize * 8,
1765 tmp_buf, src_bit_off,
1766 eccsize * 8);
1767 src_bit_off += eccsize * 8;
1768
1769
1770 if (step == nfc_geo->ecc_chunk_count - 1 &&
1771 (oob_bit_off + eccbits) % 8)
1772 eccbits += 8 - ((oob_bit_off + eccbits) % 8);
1773
1774 if (oob_required)
1775 gpmi_copy_bits(oob, oob_bit_off,
1776 tmp_buf, src_bit_off,
1777 eccbits);
1778
1779 src_bit_off += eccbits;
1780 oob_bit_off += eccbits;
1781 }
1782
1783 if (oob_required) {
1784 oob_byte_off = oob_bit_off / 8;
1785
1786 if (oob_byte_off < mtd->oobsize)
1787 memcpy(oob + oob_byte_off,
1788 tmp_buf + mtd->writesize + oob_byte_off,
1789 mtd->oobsize - oob_byte_off);
1790 }
1791
1792 return 0;
1793 }
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807 static int gpmi_ecc_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
1808 int oob_required, int page)
1809 {
1810 struct mtd_info *mtd = nand_to_mtd(chip);
1811 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1812 struct bch_geometry *nfc_geo = &this->bch_geometry;
1813 int eccsize = nfc_geo->ecc_chunk_size;
1814 int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
1815 u8 *tmp_buf = this->raw_buffer;
1816 uint8_t *oob = chip->oob_poi;
1817 size_t dst_bit_off;
1818 size_t oob_bit_off;
1819 size_t oob_byte_off;
1820 int step;
1821
1822
1823
1824
1825
1826
1827 if (!buf || !oob_required)
1828 memset(tmp_buf, 0xff, mtd->writesize + mtd->oobsize);
1829
1830
1831
1832
1833
1834 memcpy(tmp_buf, oob, nfc_geo->metadata_size);
1835 oob_bit_off = nfc_geo->metadata_size * 8;
1836 dst_bit_off = oob_bit_off;
1837
1838
1839 for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
1840 if (buf)
1841 gpmi_copy_bits(tmp_buf, dst_bit_off,
1842 buf, step * eccsize * 8, eccsize * 8);
1843 dst_bit_off += eccsize * 8;
1844
1845
1846 if (step == nfc_geo->ecc_chunk_count - 1 &&
1847 (oob_bit_off + eccbits) % 8)
1848 eccbits += 8 - ((oob_bit_off + eccbits) % 8);
1849
1850 if (oob_required)
1851 gpmi_copy_bits(tmp_buf, dst_bit_off,
1852 oob, oob_bit_off, eccbits);
1853
1854 dst_bit_off += eccbits;
1855 oob_bit_off += eccbits;
1856 }
1857
1858 oob_byte_off = oob_bit_off / 8;
1859
1860 if (oob_required && oob_byte_off < mtd->oobsize)
1861 memcpy(tmp_buf + mtd->writesize + oob_byte_off,
1862 oob + oob_byte_off, mtd->oobsize - oob_byte_off);
1863
1864
1865
1866
1867
1868
1869
1870
1871 if (this->swap_block_mark)
1872 swap(tmp_buf[0], tmp_buf[mtd->writesize]);
1873
1874 return nand_prog_page_op(chip, page, 0, tmp_buf,
1875 mtd->writesize + mtd->oobsize);
1876 }
1877
1878 static int gpmi_ecc_read_oob_raw(struct nand_chip *chip, int page)
1879 {
1880 return gpmi_ecc_read_page_raw(chip, NULL, 1, page);
1881 }
1882
1883 static int gpmi_ecc_write_oob_raw(struct nand_chip *chip, int page)
1884 {
1885 return gpmi_ecc_write_page_raw(chip, NULL, 1, page);
1886 }
1887
1888 static int gpmi_block_markbad(struct nand_chip *chip, loff_t ofs)
1889 {
1890 struct mtd_info *mtd = nand_to_mtd(chip);
1891 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1892 int ret = 0;
1893 uint8_t *block_mark;
1894 int column, page, chipnr;
1895
1896 chipnr = (int)(ofs >> chip->chip_shift);
1897 nand_select_target(chip, chipnr);
1898
1899 column = !GPMI_IS_MX23(this) ? mtd->writesize : 0;
1900
1901
1902 block_mark = this->data_buffer_dma;
1903 block_mark[0] = 0;
1904
1905
1906 page = (int)(ofs >> chip->page_shift);
1907
1908 ret = nand_prog_page_op(chip, page, column, block_mark, 1);
1909
1910 nand_deselect_target(chip);
1911
1912 return ret;
1913 }
1914
1915 static int nand_boot_set_geometry(struct gpmi_nand_data *this)
1916 {
1917 struct boot_rom_geometry *geometry = &this->rom_geometry;
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927 geometry->stride_size_in_pages = 64;
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937 geometry->search_area_stride_exponent = 2;
1938 return 0;
1939 }
1940
1941 static const char *fingerprint = "STMP";
1942 static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
1943 {
1944 struct boot_rom_geometry *rom_geo = &this->rom_geometry;
1945 struct device *dev = this->dev;
1946 struct nand_chip *chip = &this->nand;
1947 unsigned int search_area_size_in_strides;
1948 unsigned int stride;
1949 unsigned int page;
1950 u8 *buffer = nand_get_data_buf(chip);
1951 int found_an_ncb_fingerprint = false;
1952 int ret;
1953
1954
1955 search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
1956
1957 nand_select_target(chip, 0);
1958
1959
1960
1961
1962 dev_dbg(dev, "Scanning for an NCB fingerprint...\n");
1963
1964 for (stride = 0; stride < search_area_size_in_strides; stride++) {
1965
1966 page = stride * rom_geo->stride_size_in_pages;
1967
1968 dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page);
1969
1970
1971
1972
1973
1974 ret = nand_read_page_op(chip, page, 12, buffer,
1975 strlen(fingerprint));
1976 if (ret)
1977 continue;
1978
1979
1980 if (!memcmp(buffer, fingerprint, strlen(fingerprint))) {
1981 found_an_ncb_fingerprint = true;
1982 break;
1983 }
1984
1985 }
1986
1987 nand_deselect_target(chip);
1988
1989 if (found_an_ncb_fingerprint)
1990 dev_dbg(dev, "\tFound a fingerprint\n");
1991 else
1992 dev_dbg(dev, "\tNo fingerprint found\n");
1993 return found_an_ncb_fingerprint;
1994 }
1995
1996
1997 static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
1998 {
1999 struct device *dev = this->dev;
2000 struct boot_rom_geometry *rom_geo = &this->rom_geometry;
2001 struct nand_chip *chip = &this->nand;
2002 struct mtd_info *mtd = nand_to_mtd(chip);
2003 unsigned int block_size_in_pages;
2004 unsigned int search_area_size_in_strides;
2005 unsigned int search_area_size_in_pages;
2006 unsigned int search_area_size_in_blocks;
2007 unsigned int block;
2008 unsigned int stride;
2009 unsigned int page;
2010 u8 *buffer = nand_get_data_buf(chip);
2011 int status;
2012
2013
2014 block_size_in_pages = mtd->erasesize / mtd->writesize;
2015 search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
2016 search_area_size_in_pages = search_area_size_in_strides *
2017 rom_geo->stride_size_in_pages;
2018 search_area_size_in_blocks =
2019 (search_area_size_in_pages + (block_size_in_pages - 1)) /
2020 block_size_in_pages;
2021
2022 dev_dbg(dev, "Search Area Geometry :\n");
2023 dev_dbg(dev, "\tin Blocks : %u\n", search_area_size_in_blocks);
2024 dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides);
2025 dev_dbg(dev, "\tin Pages : %u\n", search_area_size_in_pages);
2026
2027 nand_select_target(chip, 0);
2028
2029
2030 dev_dbg(dev, "Erasing the search area...\n");
2031
2032 for (block = 0; block < search_area_size_in_blocks; block++) {
2033
2034 dev_dbg(dev, "\tErasing block 0x%x\n", block);
2035 status = nand_erase_op(chip, block);
2036 if (status)
2037 dev_err(dev, "[%s] Erase failed.\n", __func__);
2038 }
2039
2040
2041 memset(buffer, ~0, mtd->writesize);
2042 memcpy(buffer + 12, fingerprint, strlen(fingerprint));
2043
2044
2045 dev_dbg(dev, "Writing NCB fingerprints...\n");
2046 for (stride = 0; stride < search_area_size_in_strides; stride++) {
2047
2048 page = stride * rom_geo->stride_size_in_pages;
2049
2050
2051 dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
2052
2053 status = chip->ecc.write_page_raw(chip, buffer, 0, page);
2054 if (status)
2055 dev_err(dev, "[%s] Write failed.\n", __func__);
2056 }
2057
2058 nand_deselect_target(chip);
2059
2060 return 0;
2061 }
2062
2063 static int mx23_boot_init(struct gpmi_nand_data *this)
2064 {
2065 struct device *dev = this->dev;
2066 struct nand_chip *chip = &this->nand;
2067 struct mtd_info *mtd = nand_to_mtd(chip);
2068 unsigned int block_count;
2069 unsigned int block;
2070 int chipnr;
2071 int page;
2072 loff_t byte;
2073 uint8_t block_mark;
2074 int ret = 0;
2075
2076
2077
2078
2079
2080
2081
2082 if (mx23_check_transcription_stamp(this))
2083 return 0;
2084
2085
2086
2087
2088
2089 dev_dbg(dev, "Transcribing bad block marks...\n");
2090
2091
2092 block_count = nanddev_eraseblocks_per_target(&chip->base);
2093
2094
2095
2096
2097
2098 for (block = 0; block < block_count; block++) {
2099
2100
2101
2102
2103 chipnr = block >> (chip->chip_shift - chip->phys_erase_shift);
2104 page = block << (chip->phys_erase_shift - chip->page_shift);
2105 byte = block << chip->phys_erase_shift;
2106
2107
2108 nand_select_target(chip, chipnr);
2109 ret = nand_read_page_op(chip, page, mtd->writesize, &block_mark,
2110 1);
2111 nand_deselect_target(chip);
2112
2113 if (ret)
2114 continue;
2115
2116
2117
2118
2119
2120
2121 if (block_mark != 0xff) {
2122 dev_dbg(dev, "Transcribing mark in block %u\n", block);
2123 ret = chip->legacy.block_markbad(chip, byte);
2124 if (ret)
2125 dev_err(dev,
2126 "Failed to mark block bad with ret %d\n",
2127 ret);
2128 }
2129 }
2130
2131
2132 mx23_write_transcription_stamp(this);
2133 return 0;
2134 }
2135
2136 static int nand_boot_init(struct gpmi_nand_data *this)
2137 {
2138 nand_boot_set_geometry(this);
2139
2140
2141 if (GPMI_IS_MX23(this))
2142 return mx23_boot_init(this);
2143 return 0;
2144 }
2145
2146 static int gpmi_set_geometry(struct gpmi_nand_data *this)
2147 {
2148 int ret;
2149
2150
2151 gpmi_free_dma_buffer(this);
2152
2153
2154 ret = bch_set_geometry(this);
2155 if (ret) {
2156 dev_err(this->dev, "Error setting BCH geometry : %d\n", ret);
2157 return ret;
2158 }
2159
2160
2161 return gpmi_alloc_dma_buffer(this);
2162 }
2163
2164 static int gpmi_init_last(struct gpmi_nand_data *this)
2165 {
2166 struct nand_chip *chip = &this->nand;
2167 struct mtd_info *mtd = nand_to_mtd(chip);
2168 struct nand_ecc_ctrl *ecc = &chip->ecc;
2169 struct bch_geometry *bch_geo = &this->bch_geometry;
2170 int ret;
2171
2172
2173 ret = gpmi_set_geometry(this);
2174 if (ret)
2175 return ret;
2176
2177
2178 ecc->read_page = gpmi_ecc_read_page;
2179 ecc->write_page = gpmi_ecc_write_page;
2180 ecc->read_oob = gpmi_ecc_read_oob;
2181 ecc->write_oob = gpmi_ecc_write_oob;
2182 ecc->read_page_raw = gpmi_ecc_read_page_raw;
2183 ecc->write_page_raw = gpmi_ecc_write_page_raw;
2184 ecc->read_oob_raw = gpmi_ecc_read_oob_raw;
2185 ecc->write_oob_raw = gpmi_ecc_write_oob_raw;
2186 ecc->mode = NAND_ECC_HW;
2187 ecc->size = bch_geo->ecc_chunk_size;
2188 ecc->strength = bch_geo->ecc_strength;
2189 mtd_set_ooblayout(mtd, &gpmi_ooblayout_ops);
2190
2191
2192
2193
2194
2195
2196 if (GPMI_IS_MX6(this) &&
2197 ((bch_geo->gf_len * bch_geo->ecc_strength) % 8) == 0) {
2198 ecc->read_subpage = gpmi_ecc_read_subpage;
2199 chip->options |= NAND_SUBPAGE_READ;
2200 }
2201
2202 return 0;
2203 }
2204
2205 static int gpmi_nand_attach_chip(struct nand_chip *chip)
2206 {
2207 struct gpmi_nand_data *this = nand_get_controller_data(chip);
2208 int ret;
2209
2210 if (chip->bbt_options & NAND_BBT_USE_FLASH) {
2211 chip->bbt_options |= NAND_BBT_NO_OOB;
2212
2213 if (of_property_read_bool(this->dev->of_node,
2214 "fsl,no-blockmark-swap"))
2215 this->swap_block_mark = false;
2216 }
2217 dev_dbg(this->dev, "Blockmark swapping %sabled\n",
2218 this->swap_block_mark ? "en" : "dis");
2219
2220 ret = gpmi_init_last(this);
2221 if (ret)
2222 return ret;
2223
2224 chip->options |= NAND_SKIP_BBTSCAN;
2225
2226 return 0;
2227 }
2228
2229 static struct gpmi_transfer *get_next_transfer(struct gpmi_nand_data *this)
2230 {
2231 struct gpmi_transfer *transfer = &this->transfers[this->ntransfers];
2232
2233 this->ntransfers++;
2234
2235 if (this->ntransfers == GPMI_MAX_TRANSFERS)
2236 return NULL;
2237
2238 return transfer;
2239 }
2240
2241 static struct dma_async_tx_descriptor *gpmi_chain_command(
2242 struct gpmi_nand_data *this, u8 cmd, const u8 *addr, int naddr)
2243 {
2244 struct dma_chan *channel = get_dma_chan(this);
2245 struct dma_async_tx_descriptor *desc;
2246 struct gpmi_transfer *transfer;
2247 int chip = this->nand.cur_cs;
2248 u32 pio[3];
2249
2250
2251 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
2252 | BM_GPMI_CTRL0_WORD_LENGTH
2253 | BF_GPMI_CTRL0_CS(chip, this)
2254 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2255 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE)
2256 | BM_GPMI_CTRL0_ADDRESS_INCREMENT
2257 | BF_GPMI_CTRL0_XFER_COUNT(naddr + 1);
2258 pio[1] = 0;
2259 pio[2] = 0;
2260 desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
2261 DMA_TRANS_NONE, 0);
2262 if (!desc)
2263 return NULL;
2264
2265 transfer = get_next_transfer(this);
2266 if (!transfer)
2267 return NULL;
2268
2269 transfer->cmdbuf[0] = cmd;
2270 if (naddr)
2271 memcpy(&transfer->cmdbuf[1], addr, naddr);
2272
2273 sg_init_one(&transfer->sgl, transfer->cmdbuf, naddr + 1);
2274 dma_map_sg(this->dev, &transfer->sgl, 1, DMA_TO_DEVICE);
2275
2276 transfer->direction = DMA_TO_DEVICE;
2277
2278 desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1, DMA_MEM_TO_DEV,
2279 MXS_DMA_CTRL_WAIT4END);
2280 return desc;
2281 }
2282
2283 static struct dma_async_tx_descriptor *gpmi_chain_wait_ready(
2284 struct gpmi_nand_data *this)
2285 {
2286 struct dma_chan *channel = get_dma_chan(this);
2287 u32 pio[2];
2288
2289 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY)
2290 | BM_GPMI_CTRL0_WORD_LENGTH
2291 | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
2292 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2293 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
2294 | BF_GPMI_CTRL0_XFER_COUNT(0);
2295 pio[1] = 0;
2296
2297 return mxs_dmaengine_prep_pio(channel, pio, 2, DMA_TRANS_NONE,
2298 MXS_DMA_CTRL_WAIT4END | MXS_DMA_CTRL_WAIT4RDY);
2299 }
2300
2301 static struct dma_async_tx_descriptor *gpmi_chain_data_read(
2302 struct gpmi_nand_data *this, void *buf, int raw_len, bool *direct)
2303 {
2304 struct dma_async_tx_descriptor *desc;
2305 struct dma_chan *channel = get_dma_chan(this);
2306 struct gpmi_transfer *transfer;
2307 u32 pio[6] = {};
2308
2309 transfer = get_next_transfer(this);
2310 if (!transfer)
2311 return NULL;
2312
2313 transfer->direction = DMA_FROM_DEVICE;
2314
2315 *direct = prepare_data_dma(this, buf, raw_len, &transfer->sgl,
2316 DMA_FROM_DEVICE);
2317
2318 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ)
2319 | BM_GPMI_CTRL0_WORD_LENGTH
2320 | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
2321 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2322 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
2323 | BF_GPMI_CTRL0_XFER_COUNT(raw_len);
2324
2325 if (this->bch) {
2326 pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
2327 | BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE)
2328 | BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE
2329 | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY);
2330 pio[3] = raw_len;
2331 pio[4] = transfer->sgl.dma_address;
2332 pio[5] = this->auxiliary_phys;
2333 }
2334
2335 desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
2336 DMA_TRANS_NONE, 0);
2337 if (!desc)
2338 return NULL;
2339
2340 if (!this->bch)
2341 desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1,
2342 DMA_DEV_TO_MEM,
2343 MXS_DMA_CTRL_WAIT4END);
2344
2345 return desc;
2346 }
2347
2348 static struct dma_async_tx_descriptor *gpmi_chain_data_write(
2349 struct gpmi_nand_data *this, const void *buf, int raw_len)
2350 {
2351 struct dma_chan *channel = get_dma_chan(this);
2352 struct dma_async_tx_descriptor *desc;
2353 struct gpmi_transfer *transfer;
2354 u32 pio[6] = {};
2355
2356 transfer = get_next_transfer(this);
2357 if (!transfer)
2358 return NULL;
2359
2360 transfer->direction = DMA_TO_DEVICE;
2361
2362 prepare_data_dma(this, buf, raw_len, &transfer->sgl, DMA_TO_DEVICE);
2363
2364 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
2365 | BM_GPMI_CTRL0_WORD_LENGTH
2366 | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
2367 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2368 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
2369 | BF_GPMI_CTRL0_XFER_COUNT(raw_len);
2370
2371 if (this->bch) {
2372 pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
2373 | BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE)
2374 | BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
2375 BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY);
2376 pio[3] = raw_len;
2377 pio[4] = transfer->sgl.dma_address;
2378 pio[5] = this->auxiliary_phys;
2379 }
2380
2381 desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
2382 DMA_TRANS_NONE,
2383 (this->bch ? MXS_DMA_CTRL_WAIT4END : 0));
2384 if (!desc)
2385 return NULL;
2386
2387 if (!this->bch)
2388 desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1,
2389 DMA_MEM_TO_DEV,
2390 MXS_DMA_CTRL_WAIT4END);
2391
2392 return desc;
2393 }
2394
2395 static int gpmi_nfc_exec_op(struct nand_chip *chip,
2396 const struct nand_operation *op,
2397 bool check_only)
2398 {
2399 const struct nand_op_instr *instr;
2400 struct gpmi_nand_data *this = nand_get_controller_data(chip);
2401 struct dma_async_tx_descriptor *desc = NULL;
2402 int i, ret, buf_len = 0, nbufs = 0;
2403 u8 cmd = 0;
2404 void *buf_read = NULL;
2405 const void *buf_write = NULL;
2406 bool direct = false;
2407 struct completion *completion;
2408 unsigned long to;
2409
2410 this->ntransfers = 0;
2411 for (i = 0; i < GPMI_MAX_TRANSFERS; i++)
2412 this->transfers[i].direction = DMA_NONE;
2413
2414 ret = pm_runtime_get_sync(this->dev);
2415 if (ret < 0)
2416 return ret;
2417
2418
2419
2420
2421
2422
2423
2424 if (this->hw.must_apply_timings) {
2425 this->hw.must_apply_timings = false;
2426 gpmi_nfc_apply_timings(this);
2427 }
2428
2429 dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs);
2430
2431 for (i = 0; i < op->ninstrs; i++) {
2432 instr = &op->instrs[i];
2433
2434 nand_op_trace(" ", instr);
2435
2436 switch (instr->type) {
2437 case NAND_OP_WAITRDY_INSTR:
2438 desc = gpmi_chain_wait_ready(this);
2439 break;
2440 case NAND_OP_CMD_INSTR:
2441 cmd = instr->ctx.cmd.opcode;
2442
2443
2444
2445
2446
2447 if (i + 1 != op->ninstrs &&
2448 op->instrs[i + 1].type == NAND_OP_ADDR_INSTR)
2449 continue;
2450
2451 desc = gpmi_chain_command(this, cmd, NULL, 0);
2452
2453 break;
2454 case NAND_OP_ADDR_INSTR:
2455 desc = gpmi_chain_command(this, cmd, instr->ctx.addr.addrs,
2456 instr->ctx.addr.naddrs);
2457 break;
2458 case NAND_OP_DATA_OUT_INSTR:
2459 buf_write = instr->ctx.data.buf.out;
2460 buf_len = instr->ctx.data.len;
2461 nbufs++;
2462
2463 desc = gpmi_chain_data_write(this, buf_write, buf_len);
2464
2465 break;
2466 case NAND_OP_DATA_IN_INSTR:
2467 if (!instr->ctx.data.len)
2468 break;
2469 buf_read = instr->ctx.data.buf.in;
2470 buf_len = instr->ctx.data.len;
2471 nbufs++;
2472
2473 desc = gpmi_chain_data_read(this, buf_read, buf_len,
2474 &direct);
2475 break;
2476 }
2477
2478 if (!desc) {
2479 ret = -ENXIO;
2480 goto unmap;
2481 }
2482 }
2483
2484 dev_dbg(this->dev, "%s setup done\n", __func__);
2485
2486 if (nbufs > 1) {
2487 dev_err(this->dev, "Multiple data instructions not supported\n");
2488 ret = -EINVAL;
2489 goto unmap;
2490 }
2491
2492 if (this->bch) {
2493 writel(this->bch_flashlayout0,
2494 this->resources.bch_regs + HW_BCH_FLASH0LAYOUT0);
2495 writel(this->bch_flashlayout1,
2496 this->resources.bch_regs + HW_BCH_FLASH0LAYOUT1);
2497 }
2498
2499 if (this->bch && buf_read) {
2500 writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
2501 this->resources.bch_regs + HW_BCH_CTRL_SET);
2502 completion = &this->bch_done;
2503 } else {
2504 desc->callback = dma_irq_callback;
2505 desc->callback_param = this;
2506 completion = &this->dma_done;
2507 }
2508
2509 init_completion(completion);
2510
2511 dmaengine_submit(desc);
2512 dma_async_issue_pending(get_dma_chan(this));
2513
2514 to = wait_for_completion_timeout(completion, msecs_to_jiffies(1000));
2515 if (!to) {
2516 dev_err(this->dev, "DMA timeout, last DMA\n");
2517 gpmi_dump_info(this);
2518 ret = -ETIMEDOUT;
2519 goto unmap;
2520 }
2521
2522 writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
2523 this->resources.bch_regs + HW_BCH_CTRL_CLR);
2524 gpmi_clear_bch(this);
2525
2526 ret = 0;
2527
2528 unmap:
2529 for (i = 0; i < this->ntransfers; i++) {
2530 struct gpmi_transfer *transfer = &this->transfers[i];
2531
2532 if (transfer->direction != DMA_NONE)
2533 dma_unmap_sg(this->dev, &transfer->sgl, 1,
2534 transfer->direction);
2535 }
2536
2537 if (!ret && buf_read && !direct)
2538 memcpy(buf_read, this->data_buffer_dma,
2539 gpmi_raw_len_to_len(this, buf_len));
2540
2541 this->bch = false;
2542
2543 pm_runtime_mark_last_busy(this->dev);
2544 pm_runtime_put_autosuspend(this->dev);
2545
2546 return ret;
2547 }
2548
2549 static const struct nand_controller_ops gpmi_nand_controller_ops = {
2550 .attach_chip = gpmi_nand_attach_chip,
2551 .setup_data_interface = gpmi_setup_data_interface,
2552 .exec_op = gpmi_nfc_exec_op,
2553 };
2554
2555 static int gpmi_nand_init(struct gpmi_nand_data *this)
2556 {
2557 struct nand_chip *chip = &this->nand;
2558 struct mtd_info *mtd = nand_to_mtd(chip);
2559 int ret;
2560
2561
2562 mtd->name = "gpmi-nand";
2563 mtd->dev.parent = this->dev;
2564
2565
2566 nand_set_controller_data(chip, this);
2567 nand_set_flash_node(chip, this->pdev->dev.of_node);
2568 chip->legacy.block_markbad = gpmi_block_markbad;
2569 chip->badblock_pattern = &gpmi_bbt_descr;
2570 chip->options |= NAND_NO_SUBPAGE_WRITE;
2571
2572
2573 this->swap_block_mark = !GPMI_IS_MX23(this);
2574
2575
2576
2577
2578
2579 this->bch_geometry.payload_size = 1024;
2580 this->bch_geometry.auxiliary_size = 128;
2581 ret = gpmi_alloc_dma_buffer(this);
2582 if (ret)
2583 goto err_out;
2584
2585 nand_controller_init(&this->base);
2586 this->base.ops = &gpmi_nand_controller_ops;
2587 chip->controller = &this->base;
2588
2589 ret = nand_scan(chip, GPMI_IS_MX6(this) ? 2 : 1);
2590 if (ret)
2591 goto err_out;
2592
2593 ret = nand_boot_init(this);
2594 if (ret)
2595 goto err_nand_cleanup;
2596 ret = nand_create_bbt(chip);
2597 if (ret)
2598 goto err_nand_cleanup;
2599
2600 ret = mtd_device_register(mtd, NULL, 0);
2601 if (ret)
2602 goto err_nand_cleanup;
2603 return 0;
2604
2605 err_nand_cleanup:
2606 nand_cleanup(chip);
2607 err_out:
2608 gpmi_free_dma_buffer(this);
2609 return ret;
2610 }
2611
2612 static const struct of_device_id gpmi_nand_id_table[] = {
2613 {
2614 .compatible = "fsl,imx23-gpmi-nand",
2615 .data = &gpmi_devdata_imx23,
2616 }, {
2617 .compatible = "fsl,imx28-gpmi-nand",
2618 .data = &gpmi_devdata_imx28,
2619 }, {
2620 .compatible = "fsl,imx6q-gpmi-nand",
2621 .data = &gpmi_devdata_imx6q,
2622 }, {
2623 .compatible = "fsl,imx6sx-gpmi-nand",
2624 .data = &gpmi_devdata_imx6sx,
2625 }, {
2626 .compatible = "fsl,imx7d-gpmi-nand",
2627 .data = &gpmi_devdata_imx7d,
2628 }, {}
2629 };
2630 MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
2631
2632 static int gpmi_nand_probe(struct platform_device *pdev)
2633 {
2634 struct gpmi_nand_data *this;
2635 const struct of_device_id *of_id;
2636 int ret;
2637
2638 this = devm_kzalloc(&pdev->dev, sizeof(*this), GFP_KERNEL);
2639 if (!this)
2640 return -ENOMEM;
2641
2642 of_id = of_match_device(gpmi_nand_id_table, &pdev->dev);
2643 if (of_id) {
2644 this->devdata = of_id->data;
2645 } else {
2646 dev_err(&pdev->dev, "Failed to find the right device id.\n");
2647 return -ENODEV;
2648 }
2649
2650 platform_set_drvdata(pdev, this);
2651 this->pdev = pdev;
2652 this->dev = &pdev->dev;
2653
2654 ret = acquire_resources(this);
2655 if (ret)
2656 goto exit_acquire_resources;
2657
2658 ret = __gpmi_enable_clk(this, true);
2659 if (ret)
2660 goto exit_nfc_init;
2661
2662 pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
2663 pm_runtime_use_autosuspend(&pdev->dev);
2664 pm_runtime_set_active(&pdev->dev);
2665 pm_runtime_enable(&pdev->dev);
2666 pm_runtime_get_sync(&pdev->dev);
2667
2668 ret = gpmi_init(this);
2669 if (ret)
2670 goto exit_nfc_init;
2671
2672 ret = gpmi_nand_init(this);
2673 if (ret)
2674 goto exit_nfc_init;
2675
2676 pm_runtime_mark_last_busy(&pdev->dev);
2677 pm_runtime_put_autosuspend(&pdev->dev);
2678
2679 dev_info(this->dev, "driver registered.\n");
2680
2681 return 0;
2682
2683 exit_nfc_init:
2684 pm_runtime_put(&pdev->dev);
2685 pm_runtime_disable(&pdev->dev);
2686 release_resources(this);
2687 exit_acquire_resources:
2688
2689 return ret;
2690 }
2691
2692 static int gpmi_nand_remove(struct platform_device *pdev)
2693 {
2694 struct gpmi_nand_data *this = platform_get_drvdata(pdev);
2695
2696 pm_runtime_put_sync(&pdev->dev);
2697 pm_runtime_disable(&pdev->dev);
2698
2699 nand_release(&this->nand);
2700 gpmi_free_dma_buffer(this);
2701 release_resources(this);
2702 return 0;
2703 }
2704
2705 #ifdef CONFIG_PM_SLEEP
2706 static int gpmi_pm_suspend(struct device *dev)
2707 {
2708 struct gpmi_nand_data *this = dev_get_drvdata(dev);
2709
2710 release_dma_channels(this);
2711 return 0;
2712 }
2713
2714 static int gpmi_pm_resume(struct device *dev)
2715 {
2716 struct gpmi_nand_data *this = dev_get_drvdata(dev);
2717 int ret;
2718
2719 ret = acquire_dma_channels(this);
2720 if (ret < 0)
2721 return ret;
2722
2723
2724 ret = gpmi_init(this);
2725 if (ret) {
2726 dev_err(this->dev, "Error setting GPMI : %d\n", ret);
2727 return ret;
2728 }
2729
2730
2731 if (this->hw.clk_rate)
2732 this->hw.must_apply_timings = true;
2733
2734
2735 ret = bch_set_geometry(this);
2736 if (ret) {
2737 dev_err(this->dev, "Error setting BCH : %d\n", ret);
2738 return ret;
2739 }
2740
2741 return 0;
2742 }
2743 #endif
2744
2745 static int __maybe_unused gpmi_runtime_suspend(struct device *dev)
2746 {
2747 struct gpmi_nand_data *this = dev_get_drvdata(dev);
2748
2749 return __gpmi_enable_clk(this, false);
2750 }
2751
2752 static int __maybe_unused gpmi_runtime_resume(struct device *dev)
2753 {
2754 struct gpmi_nand_data *this = dev_get_drvdata(dev);
2755
2756 return __gpmi_enable_clk(this, true);
2757 }
2758
2759 static const struct dev_pm_ops gpmi_pm_ops = {
2760 SET_SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend, gpmi_pm_resume)
2761 SET_RUNTIME_PM_OPS(gpmi_runtime_suspend, gpmi_runtime_resume, NULL)
2762 };
2763
2764 static struct platform_driver gpmi_nand_driver = {
2765 .driver = {
2766 .name = "gpmi-nand",
2767 .pm = &gpmi_pm_ops,
2768 .of_match_table = gpmi_nand_id_table,
2769 },
2770 .probe = gpmi_nand_probe,
2771 .remove = gpmi_nand_remove,
2772 };
2773 module_platform_driver(gpmi_nand_driver);
2774
2775 MODULE_AUTHOR("Freescale Semiconductor, Inc.");
2776 MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver");
2777 MODULE_LICENSE("GPL");