1 /*
2 * drivers/mtd/nand/pxa3xx_nand.c
3 *
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
12 */
13
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/platform_device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/delay.h>
20 #include <linux/clk.h>
21 #include <linux/mtd/mtd.h>
22 #include <linux/mtd/nand.h>
23 #include <linux/mtd/partitions.h>
24 #include <linux/io.h>
25 #include <linux/irq.h>
26 #include <linux/slab.h>
27 #include <linux/of.h>
28 #include <linux/of_device.h>
29 #include <linux/of_mtd.h>
30
31 #if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
32 #define ARCH_HAS_DMA
33 #endif
34
35 #ifdef ARCH_HAS_DMA
36 #include <mach/dma.h>
37 #endif
38
39 #include <linux/platform_data/mtd-nand-pxa3xx.h>
40
41 #define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
42 #define NAND_STOP_DELAY msecs_to_jiffies(40)
43 #define PAGE_CHUNK_SIZE (2048)
44
45 /*
46 * Define a buffer size for the initial command that detects the flash device:
47 * STATUS, READID and PARAM. The largest of these is the PARAM command,
48 * needing 256 bytes.
49 */
50 #define INIT_BUFFER_SIZE 256
51
52 /* registers and bit definitions */
53 #define NDCR (0x00) /* Control register */
54 #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
55 #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
56 #define NDSR (0x14) /* Status Register */
57 #define NDPCR (0x18) /* Page Count Register */
58 #define NDBDR0 (0x1C) /* Bad Block Register 0 */
59 #define NDBDR1 (0x20) /* Bad Block Register 1 */
60 #define NDECCCTRL (0x28) /* ECC control */
61 #define NDDB (0x40) /* Data Buffer */
62 #define NDCB0 (0x48) /* Command Buffer0 */
63 #define NDCB1 (0x4C) /* Command Buffer1 */
64 #define NDCB2 (0x50) /* Command Buffer2 */
65
66 #define NDCR_SPARE_EN (0x1 << 31)
67 #define NDCR_ECC_EN (0x1 << 30)
68 #define NDCR_DMA_EN (0x1 << 29)
69 #define NDCR_ND_RUN (0x1 << 28)
70 #define NDCR_DWIDTH_C (0x1 << 27)
71 #define NDCR_DWIDTH_M (0x1 << 26)
72 #define NDCR_PAGE_SZ (0x1 << 24)
73 #define NDCR_NCSX (0x1 << 23)
74 #define NDCR_ND_MODE (0x3 << 21)
75 #define NDCR_NAND_MODE (0x0)
76 #define NDCR_CLR_PG_CNT (0x1 << 20)
77 #define NDCR_STOP_ON_UNCOR (0x1 << 19)
78 #define NDCR_RD_ID_CNT_MASK (0x7 << 16)
79 #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
80
81 #define NDCR_RA_START (0x1 << 15)
82 #define NDCR_PG_PER_BLK (0x1 << 14)
83 #define NDCR_ND_ARB_EN (0x1 << 12)
84 #define NDCR_INT_MASK (0xFFF)
85
86 #define NDSR_MASK (0xfff)
87 #define NDSR_ERR_CNT_OFF (16)
88 #define NDSR_ERR_CNT_MASK (0x1f)
89 #define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
90 #define NDSR_RDY (0x1 << 12)
91 #define NDSR_FLASH_RDY (0x1 << 11)
92 #define NDSR_CS0_PAGED (0x1 << 10)
93 #define NDSR_CS1_PAGED (0x1 << 9)
94 #define NDSR_CS0_CMDD (0x1 << 8)
95 #define NDSR_CS1_CMDD (0x1 << 7)
96 #define NDSR_CS0_BBD (0x1 << 6)
97 #define NDSR_CS1_BBD (0x1 << 5)
98 #define NDSR_UNCORERR (0x1 << 4)
99 #define NDSR_CORERR (0x1 << 3)
100 #define NDSR_WRDREQ (0x1 << 2)
101 #define NDSR_RDDREQ (0x1 << 1)
102 #define NDSR_WRCMDREQ (0x1)
103
104 #define NDCB0_LEN_OVRD (0x1 << 28)
105 #define NDCB0_ST_ROW_EN (0x1 << 26)
106 #define NDCB0_AUTO_RS (0x1 << 25)
107 #define NDCB0_CSEL (0x1 << 24)
108 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
109 #define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
110 #define NDCB0_CMD_TYPE_MASK (0x7 << 21)
111 #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
112 #define NDCB0_NC (0x1 << 20)
113 #define NDCB0_DBC (0x1 << 19)
114 #define NDCB0_ADDR_CYC_MASK (0x7 << 16)
115 #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
116 #define NDCB0_CMD2_MASK (0xff << 8)
117 #define NDCB0_CMD1_MASK (0xff)
118 #define NDCB0_ADDR_CYC_SHIFT (16)
119
120 #define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
121 #define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
122 #define EXT_CMD_TYPE_READ 4 /* Read */
123 #define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
124 #define EXT_CMD_TYPE_FINAL 3 /* Final command */
125 #define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
126 #define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
127
128 /* macros for registers read/write */
129 #define nand_writel(info, off, val) \
130 writel_relaxed((val), (info)->mmio_base + (off))
131
132 #define nand_readl(info, off) \
133 readl_relaxed((info)->mmio_base + (off))
134
135 /* error code and state */
136 enum {
137 ERR_NONE = 0,
138 ERR_DMABUSERR = -1,
139 ERR_SENDCMD = -2,
140 ERR_UNCORERR = -3,
141 ERR_BBERR = -4,
142 ERR_CORERR = -5,
143 };
144
145 enum {
146 STATE_IDLE = 0,
147 STATE_PREPARED,
148 STATE_CMD_HANDLE,
149 STATE_DMA_READING,
150 STATE_DMA_WRITING,
151 STATE_DMA_DONE,
152 STATE_PIO_READING,
153 STATE_PIO_WRITING,
154 STATE_CMD_DONE,
155 STATE_READY,
156 };
157
158 enum pxa3xx_nand_variant {
159 PXA3XX_NAND_VARIANT_PXA,
160 PXA3XX_NAND_VARIANT_ARMADA370,
161 };
162
163 struct pxa3xx_nand_host {
164 struct nand_chip chip;
165 struct mtd_info *mtd;
166 void *info_data;
167
168 /* page size of attached chip */
169 int use_ecc;
170 int cs;
171
172 /* calculated from pxa3xx_nand_flash data */
173 unsigned int col_addr_cycles;
174 unsigned int row_addr_cycles;
175 size_t read_id_bytes;
176
177 };
178
179 struct pxa3xx_nand_info {
180 struct nand_hw_control controller;
181 struct platform_device *pdev;
182
183 struct clk *clk;
184 void __iomem *mmio_base;
185 unsigned long mmio_phys;
186 struct completion cmd_complete, dev_ready;
187
188 unsigned int buf_start;
189 unsigned int buf_count;
190 unsigned int buf_size;
191 unsigned int data_buff_pos;
192 unsigned int oob_buff_pos;
193
194 /* DMA information */
195 int drcmr_dat;
196 int drcmr_cmd;
197
198 unsigned char *data_buff;
199 unsigned char *oob_buff;
200 dma_addr_t data_buff_phys;
201 int data_dma_ch;
202 struct pxa_dma_desc *data_desc;
203 dma_addr_t data_desc_addr;
204
205 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
206 unsigned int state;
207
208 /*
209 * This driver supports NFCv1 (as found in PXA SoC)
210 * and NFCv2 (as found in Armada 370/XP SoC).
211 */
212 enum pxa3xx_nand_variant variant;
213
214 int cs;
215 int use_ecc; /* use HW ECC ? */
216 int ecc_bch; /* using BCH ECC? */
217 int use_dma; /* use DMA ? */
218 int use_spare; /* use spare ? */
219 int need_wait;
220
221 unsigned int data_size; /* data to be read from FIFO */
222 unsigned int chunk_size; /* split commands chunk size */
223 unsigned int oob_size;
224 unsigned int spare_size;
225 unsigned int ecc_size;
226 unsigned int ecc_err_cnt;
227 unsigned int max_bitflips;
228 int retcode;
229
230 /* cached register value */
231 uint32_t reg_ndcr;
232 uint32_t ndtr0cs0;
233 uint32_t ndtr1cs0;
234
235 /* generated NDCBx register values */
236 uint32_t ndcb0;
237 uint32_t ndcb1;
238 uint32_t ndcb2;
239 uint32_t ndcb3;
240 };
241
242 static bool use_dma = 1;
243 module_param(use_dma, bool, 0444);
244 MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
245
246 static struct pxa3xx_nand_timing timing[] = {
247 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
248 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
249 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
250 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
251 };
252
253 static struct pxa3xx_nand_flash builtin_flash_types[] = {
254 { "DEFAULT FLASH", 0, 0, 2048, 8, 8, 0, &timing[0] },
255 { "64MiB 16-bit", 0x46ec, 32, 512, 16, 16, 4096, &timing[1] },
256 { "256MiB 8-bit", 0xdaec, 64, 2048, 8, 8, 2048, &timing[1] },
257 { "4GiB 8-bit", 0xd7ec, 128, 4096, 8, 8, 8192, &timing[1] },
258 { "128MiB 8-bit", 0xa12c, 64, 2048, 8, 8, 1024, &timing[2] },
259 { "128MiB 16-bit", 0xb12c, 64, 2048, 16, 16, 1024, &timing[2] },
260 { "512MiB 8-bit", 0xdc2c, 64, 2048, 8, 8, 4096, &timing[2] },
261 { "512MiB 16-bit", 0xcc2c, 64, 2048, 16, 16, 4096, &timing[2] },
262 { "256MiB 16-bit", 0xba20, 64, 2048, 16, 16, 2048, &timing[3] },
263 };
264
265 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
266 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
267
268 static struct nand_bbt_descr bbt_main_descr = {
269 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
270 | NAND_BBT_2BIT | NAND_BBT_VERSION,
271 .offs = 8,
272 .len = 6,
273 .veroffs = 14,
274 .maxblocks = 8, /* Last 8 blocks in each chip */
275 .pattern = bbt_pattern
276 };
277
278 static struct nand_bbt_descr bbt_mirror_descr = {
279 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
280 | NAND_BBT_2BIT | NAND_BBT_VERSION,
281 .offs = 8,
282 .len = 6,
283 .veroffs = 14,
284 .maxblocks = 8, /* Last 8 blocks in each chip */
285 .pattern = bbt_mirror_pattern
286 };
287
288 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
289 .eccbytes = 32,
290 .eccpos = {
291 32, 33, 34, 35, 36, 37, 38, 39,
292 40, 41, 42, 43, 44, 45, 46, 47,
293 48, 49, 50, 51, 52, 53, 54, 55,
294 56, 57, 58, 59, 60, 61, 62, 63},
295 .oobfree = { {2, 30} }
296 };
297
298 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
299 .eccbytes = 64,
300 .eccpos = {
301 32, 33, 34, 35, 36, 37, 38, 39,
302 40, 41, 42, 43, 44, 45, 46, 47,
303 48, 49, 50, 51, 52, 53, 54, 55,
304 56, 57, 58, 59, 60, 61, 62, 63,
305 96, 97, 98, 99, 100, 101, 102, 103,
306 104, 105, 106, 107, 108, 109, 110, 111,
307 112, 113, 114, 115, 116, 117, 118, 119,
308 120, 121, 122, 123, 124, 125, 126, 127},
309 /* Bootrom looks in bytes 0 & 5 for bad blocks */
310 .oobfree = { {6, 26}, { 64, 32} }
311 };
312
313 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
314 .eccbytes = 128,
315 .eccpos = {
316 32, 33, 34, 35, 36, 37, 38, 39,
317 40, 41, 42, 43, 44, 45, 46, 47,
318 48, 49, 50, 51, 52, 53, 54, 55,
319 56, 57, 58, 59, 60, 61, 62, 63},
320 .oobfree = { }
321 };
322
323 /* Define a default flash type setting serve as flash detecting only */
324 #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
325
326 #define NDTR0_tCH(c) (min((c), 7) << 19)
327 #define NDTR0_tCS(c) (min((c), 7) << 16)
328 #define NDTR0_tWH(c) (min((c), 7) << 11)
329 #define NDTR0_tWP(c) (min((c), 7) << 8)
330 #define NDTR0_tRH(c) (min((c), 7) << 3)
331 #define NDTR0_tRP(c) (min((c), 7) << 0)
332
333 #define NDTR1_tR(c) (min((c), 65535) << 16)
334 #define NDTR1_tWHR(c) (min((c), 15) << 4)
335 #define NDTR1_tAR(c) (min((c), 15) << 0)
336
337 /* convert nano-seconds to nand flash controller clock cycles */
338 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
339
340 static const struct of_device_id pxa3xx_nand_dt_ids[] = {
341 {
342 .compatible = "marvell,pxa3xx-nand",
343 .data = (void *)PXA3XX_NAND_VARIANT_PXA,
344 },
345 {
346 .compatible = "marvell,armada370-nand",
347 .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
348 },
349 {}
350 };
351 MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
352
353 static enum pxa3xx_nand_variant
pxa3xx_nand_get_variant(struct platform_device * pdev)354 pxa3xx_nand_get_variant(struct platform_device *pdev)
355 {
356 const struct of_device_id *of_id =
357 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
358 if (!of_id)
359 return PXA3XX_NAND_VARIANT_PXA;
360 return (enum pxa3xx_nand_variant)of_id->data;
361 }
362
pxa3xx_nand_set_timing(struct pxa3xx_nand_host * host,const struct pxa3xx_nand_timing * t)363 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
364 const struct pxa3xx_nand_timing *t)
365 {
366 struct pxa3xx_nand_info *info = host->info_data;
367 unsigned long nand_clk = clk_get_rate(info->clk);
368 uint32_t ndtr0, ndtr1;
369
370 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
371 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
372 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
373 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
374 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
375 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
376
377 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
378 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
379 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
380
381 info->ndtr0cs0 = ndtr0;
382 info->ndtr1cs0 = ndtr1;
383 nand_writel(info, NDTR0CS0, ndtr0);
384 nand_writel(info, NDTR1CS0, ndtr1);
385 }
386
387 /*
388 * Set the data and OOB size, depending on the selected
389 * spare and ECC configuration.
390 * Only applicable to READ0, READOOB and PAGEPROG commands.
391 */
pxa3xx_set_datasize(struct pxa3xx_nand_info * info,struct mtd_info * mtd)392 static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
393 struct mtd_info *mtd)
394 {
395 int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
396
397 info->data_size = mtd->writesize;
398 if (!oob_enable)
399 return;
400
401 info->oob_size = info->spare_size;
402 if (!info->use_ecc)
403 info->oob_size += info->ecc_size;
404 }
405
406 /**
407 * NOTE: it is a must to set ND_RUN firstly, then write
408 * command buffer, otherwise, it does not work.
409 * We enable all the interrupt at the same time, and
410 * let pxa3xx_nand_irq to handle all logic.
411 */
pxa3xx_nand_start(struct pxa3xx_nand_info * info)412 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
413 {
414 uint32_t ndcr;
415
416 ndcr = info->reg_ndcr;
417
418 if (info->use_ecc) {
419 ndcr |= NDCR_ECC_EN;
420 if (info->ecc_bch)
421 nand_writel(info, NDECCCTRL, 0x1);
422 } else {
423 ndcr &= ~NDCR_ECC_EN;
424 if (info->ecc_bch)
425 nand_writel(info, NDECCCTRL, 0x0);
426 }
427
428 if (info->use_dma)
429 ndcr |= NDCR_DMA_EN;
430 else
431 ndcr &= ~NDCR_DMA_EN;
432
433 if (info->use_spare)
434 ndcr |= NDCR_SPARE_EN;
435 else
436 ndcr &= ~NDCR_SPARE_EN;
437
438 ndcr |= NDCR_ND_RUN;
439
440 /* clear status bits and run */
441 nand_writel(info, NDCR, 0);
442 nand_writel(info, NDSR, NDSR_MASK);
443 nand_writel(info, NDCR, ndcr);
444 }
445
pxa3xx_nand_stop(struct pxa3xx_nand_info * info)446 static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
447 {
448 uint32_t ndcr;
449 int timeout = NAND_STOP_DELAY;
450
451 /* wait RUN bit in NDCR become 0 */
452 ndcr = nand_readl(info, NDCR);
453 while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
454 ndcr = nand_readl(info, NDCR);
455 udelay(1);
456 }
457
458 if (timeout <= 0) {
459 ndcr &= ~NDCR_ND_RUN;
460 nand_writel(info, NDCR, ndcr);
461 }
462 /* clear status bits */
463 nand_writel(info, NDSR, NDSR_MASK);
464 }
465
466 static void __maybe_unused
enable_int(struct pxa3xx_nand_info * info,uint32_t int_mask)467 enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
468 {
469 uint32_t ndcr;
470
471 ndcr = nand_readl(info, NDCR);
472 nand_writel(info, NDCR, ndcr & ~int_mask);
473 }
474
disable_int(struct pxa3xx_nand_info * info,uint32_t int_mask)475 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
476 {
477 uint32_t ndcr;
478
479 ndcr = nand_readl(info, NDCR);
480 nand_writel(info, NDCR, ndcr | int_mask);
481 }
482
drain_fifo(struct pxa3xx_nand_info * info,void * data,int len)483 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
484 {
485 if (info->ecc_bch) {
486 int timeout;
487
488 /*
489 * According to the datasheet, when reading from NDDB
490 * with BCH enabled, after each 32 bytes reads, we
491 * have to make sure that the NDSR.RDDREQ bit is set.
492 *
493 * Drain the FIFO 8 32 bits reads at a time, and skip
494 * the polling on the last read.
495 */
496 while (len > 8) {
497 __raw_readsl(info->mmio_base + NDDB, data, 8);
498
499 for (timeout = 0;
500 !(nand_readl(info, NDSR) & NDSR_RDDREQ);
501 timeout++) {
502 if (timeout >= 5) {
503 dev_err(&info->pdev->dev,
504 "Timeout on RDDREQ while draining the FIFO\n");
505 return;
506 }
507
508 mdelay(1);
509 }
510
511 data += 32;
512 len -= 8;
513 }
514 }
515
516 __raw_readsl(info->mmio_base + NDDB, data, len);
517 }
518
handle_data_pio(struct pxa3xx_nand_info * info)519 static void handle_data_pio(struct pxa3xx_nand_info *info)
520 {
521 unsigned int do_bytes = min(info->data_size, info->chunk_size);
522
523 switch (info->state) {
524 case STATE_PIO_WRITING:
525 __raw_writesl(info->mmio_base + NDDB,
526 info->data_buff + info->data_buff_pos,
527 DIV_ROUND_UP(do_bytes, 4));
528
529 if (info->oob_size > 0)
530 __raw_writesl(info->mmio_base + NDDB,
531 info->oob_buff + info->oob_buff_pos,
532 DIV_ROUND_UP(info->oob_size, 4));
533 break;
534 case STATE_PIO_READING:
535 drain_fifo(info,
536 info->data_buff + info->data_buff_pos,
537 DIV_ROUND_UP(do_bytes, 4));
538
539 if (info->oob_size > 0)
540 drain_fifo(info,
541 info->oob_buff + info->oob_buff_pos,
542 DIV_ROUND_UP(info->oob_size, 4));
543 break;
544 default:
545 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
546 info->state);
547 BUG();
548 }
549
550 /* Update buffer pointers for multi-page read/write */
551 info->data_buff_pos += do_bytes;
552 info->oob_buff_pos += info->oob_size;
553 info->data_size -= do_bytes;
554 }
555
556 #ifdef ARCH_HAS_DMA
start_data_dma(struct pxa3xx_nand_info * info)557 static void start_data_dma(struct pxa3xx_nand_info *info)
558 {
559 struct pxa_dma_desc *desc = info->data_desc;
560 int dma_len = ALIGN(info->data_size + info->oob_size, 32);
561
562 desc->ddadr = DDADR_STOP;
563 desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len;
564
565 switch (info->state) {
566 case STATE_DMA_WRITING:
567 desc->dsadr = info->data_buff_phys;
568 desc->dtadr = info->mmio_phys + NDDB;
569 desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG;
570 break;
571 case STATE_DMA_READING:
572 desc->dtadr = info->data_buff_phys;
573 desc->dsadr = info->mmio_phys + NDDB;
574 desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
575 break;
576 default:
577 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
578 info->state);
579 BUG();
580 }
581
582 DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch;
583 DDADR(info->data_dma_ch) = info->data_desc_addr;
584 DCSR(info->data_dma_ch) |= DCSR_RUN;
585 }
586
pxa3xx_nand_data_dma_irq(int channel,void * data)587 static void pxa3xx_nand_data_dma_irq(int channel, void *data)
588 {
589 struct pxa3xx_nand_info *info = data;
590 uint32_t dcsr;
591
592 dcsr = DCSR(channel);
593 DCSR(channel) = dcsr;
594
595 if (dcsr & DCSR_BUSERR) {
596 info->retcode = ERR_DMABUSERR;
597 }
598
599 info->state = STATE_DMA_DONE;
600 enable_int(info, NDCR_INT_MASK);
601 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
602 }
603 #else
start_data_dma(struct pxa3xx_nand_info * info)604 static void start_data_dma(struct pxa3xx_nand_info *info)
605 {}
606 #endif
607
pxa3xx_nand_irq_thread(int irq,void * data)608 static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
609 {
610 struct pxa3xx_nand_info *info = data;
611
612 handle_data_pio(info);
613
614 info->state = STATE_CMD_DONE;
615 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
616
617 return IRQ_HANDLED;
618 }
619
pxa3xx_nand_irq(int irq,void * devid)620 static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
621 {
622 struct pxa3xx_nand_info *info = devid;
623 unsigned int status, is_completed = 0, is_ready = 0;
624 unsigned int ready, cmd_done;
625 irqreturn_t ret = IRQ_HANDLED;
626
627 if (info->cs == 0) {
628 ready = NDSR_FLASH_RDY;
629 cmd_done = NDSR_CS0_CMDD;
630 } else {
631 ready = NDSR_RDY;
632 cmd_done = NDSR_CS1_CMDD;
633 }
634
635 status = nand_readl(info, NDSR);
636
637 if (status & NDSR_UNCORERR)
638 info->retcode = ERR_UNCORERR;
639 if (status & NDSR_CORERR) {
640 info->retcode = ERR_CORERR;
641 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
642 info->ecc_bch)
643 info->ecc_err_cnt = NDSR_ERR_CNT(status);
644 else
645 info->ecc_err_cnt = 1;
646
647 /*
648 * Each chunk composing a page is corrected independently,
649 * and we need to store maximum number of corrected bitflips
650 * to return it to the MTD layer in ecc.read_page().
651 */
652 info->max_bitflips = max_t(unsigned int,
653 info->max_bitflips,
654 info->ecc_err_cnt);
655 }
656 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
657 /* whether use dma to transfer data */
658 if (info->use_dma) {
659 disable_int(info, NDCR_INT_MASK);
660 info->state = (status & NDSR_RDDREQ) ?
661 STATE_DMA_READING : STATE_DMA_WRITING;
662 start_data_dma(info);
663 goto NORMAL_IRQ_EXIT;
664 } else {
665 info->state = (status & NDSR_RDDREQ) ?
666 STATE_PIO_READING : STATE_PIO_WRITING;
667 ret = IRQ_WAKE_THREAD;
668 goto NORMAL_IRQ_EXIT;
669 }
670 }
671 if (status & cmd_done) {
672 info->state = STATE_CMD_DONE;
673 is_completed = 1;
674 }
675 if (status & ready) {
676 info->state = STATE_READY;
677 is_ready = 1;
678 }
679
680 if (status & NDSR_WRCMDREQ) {
681 nand_writel(info, NDSR, NDSR_WRCMDREQ);
682 status &= ~NDSR_WRCMDREQ;
683 info->state = STATE_CMD_HANDLE;
684
685 /*
686 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
687 * must be loaded by writing directly either 12 or 16
688 * bytes directly to NDCB0, four bytes at a time.
689 *
690 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
691 * but each NDCBx register can be read.
692 */
693 nand_writel(info, NDCB0, info->ndcb0);
694 nand_writel(info, NDCB0, info->ndcb1);
695 nand_writel(info, NDCB0, info->ndcb2);
696
697 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
698 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
699 nand_writel(info, NDCB0, info->ndcb3);
700 }
701
702 /* clear NDSR to let the controller exit the IRQ */
703 nand_writel(info, NDSR, status);
704 if (is_completed)
705 complete(&info->cmd_complete);
706 if (is_ready)
707 complete(&info->dev_ready);
708 NORMAL_IRQ_EXIT:
709 return ret;
710 }
711
is_buf_blank(uint8_t * buf,size_t len)712 static inline int is_buf_blank(uint8_t *buf, size_t len)
713 {
714 for (; len > 0; len--)
715 if (*buf++ != 0xff)
716 return 0;
717 return 1;
718 }
719
set_command_address(struct pxa3xx_nand_info * info,unsigned int page_size,uint16_t column,int page_addr)720 static void set_command_address(struct pxa3xx_nand_info *info,
721 unsigned int page_size, uint16_t column, int page_addr)
722 {
723 /* small page addr setting */
724 if (page_size < PAGE_CHUNK_SIZE) {
725 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
726 | (column & 0xFF);
727
728 info->ndcb2 = 0;
729 } else {
730 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
731 | (column & 0xFFFF);
732
733 if (page_addr & 0xFF0000)
734 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
735 else
736 info->ndcb2 = 0;
737 }
738 }
739
prepare_start_command(struct pxa3xx_nand_info * info,int command)740 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
741 {
742 struct pxa3xx_nand_host *host = info->host[info->cs];
743 struct mtd_info *mtd = host->mtd;
744
745 /* reset data and oob column point to handle data */
746 info->buf_start = 0;
747 info->buf_count = 0;
748 info->oob_size = 0;
749 info->data_buff_pos = 0;
750 info->oob_buff_pos = 0;
751 info->use_ecc = 0;
752 info->use_spare = 1;
753 info->retcode = ERR_NONE;
754 info->ecc_err_cnt = 0;
755 info->ndcb3 = 0;
756 info->need_wait = 0;
757
758 switch (command) {
759 case NAND_CMD_READ0:
760 case NAND_CMD_PAGEPROG:
761 info->use_ecc = 1;
762 case NAND_CMD_READOOB:
763 pxa3xx_set_datasize(info, mtd);
764 break;
765 case NAND_CMD_PARAM:
766 info->use_spare = 0;
767 break;
768 default:
769 info->ndcb1 = 0;
770 info->ndcb2 = 0;
771 break;
772 }
773
774 /*
775 * If we are about to issue a read command, or about to set
776 * the write address, then clean the data buffer.
777 */
778 if (command == NAND_CMD_READ0 ||
779 command == NAND_CMD_READOOB ||
780 command == NAND_CMD_SEQIN) {
781
782 info->buf_count = mtd->writesize + mtd->oobsize;
783 memset(info->data_buff, 0xFF, info->buf_count);
784 }
785
786 }
787
prepare_set_command(struct pxa3xx_nand_info * info,int command,int ext_cmd_type,uint16_t column,int page_addr)788 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
789 int ext_cmd_type, uint16_t column, int page_addr)
790 {
791 int addr_cycle, exec_cmd;
792 struct pxa3xx_nand_host *host;
793 struct mtd_info *mtd;
794
795 host = info->host[info->cs];
796 mtd = host->mtd;
797 addr_cycle = 0;
798 exec_cmd = 1;
799
800 if (info->cs != 0)
801 info->ndcb0 = NDCB0_CSEL;
802 else
803 info->ndcb0 = 0;
804
805 if (command == NAND_CMD_SEQIN)
806 exec_cmd = 0;
807
808 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
809 + host->col_addr_cycles);
810
811 switch (command) {
812 case NAND_CMD_READOOB:
813 case NAND_CMD_READ0:
814 info->buf_start = column;
815 info->ndcb0 |= NDCB0_CMD_TYPE(0)
816 | addr_cycle
817 | NAND_CMD_READ0;
818
819 if (command == NAND_CMD_READOOB)
820 info->buf_start += mtd->writesize;
821
822 /*
823 * Multiple page read needs an 'extended command type' field,
824 * which is either naked-read or last-read according to the
825 * state.
826 */
827 if (mtd->writesize == PAGE_CHUNK_SIZE) {
828 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
829 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
830 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
831 | NDCB0_LEN_OVRD
832 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
833 info->ndcb3 = info->chunk_size +
834 info->oob_size;
835 }
836
837 set_command_address(info, mtd->writesize, column, page_addr);
838 break;
839
840 case NAND_CMD_SEQIN:
841
842 info->buf_start = column;
843 set_command_address(info, mtd->writesize, 0, page_addr);
844
845 /*
846 * Multiple page programming needs to execute the initial
847 * SEQIN command that sets the page address.
848 */
849 if (mtd->writesize > PAGE_CHUNK_SIZE) {
850 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
851 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
852 | addr_cycle
853 | command;
854 /* No data transfer in this case */
855 info->data_size = 0;
856 exec_cmd = 1;
857 }
858 break;
859
860 case NAND_CMD_PAGEPROG:
861 if (is_buf_blank(info->data_buff,
862 (mtd->writesize + mtd->oobsize))) {
863 exec_cmd = 0;
864 break;
865 }
866
867 /* Second command setting for large pages */
868 if (mtd->writesize > PAGE_CHUNK_SIZE) {
869 /*
870 * Multiple page write uses the 'extended command'
871 * field. This can be used to issue a command dispatch
872 * or a naked-write depending on the current stage.
873 */
874 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
875 | NDCB0_LEN_OVRD
876 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
877 info->ndcb3 = info->chunk_size +
878 info->oob_size;
879
880 /*
881 * This is the command dispatch that completes a chunked
882 * page program operation.
883 */
884 if (info->data_size == 0) {
885 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
886 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
887 | command;
888 info->ndcb1 = 0;
889 info->ndcb2 = 0;
890 info->ndcb3 = 0;
891 }
892 } else {
893 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
894 | NDCB0_AUTO_RS
895 | NDCB0_ST_ROW_EN
896 | NDCB0_DBC
897 | (NAND_CMD_PAGEPROG << 8)
898 | NAND_CMD_SEQIN
899 | addr_cycle;
900 }
901 break;
902
903 case NAND_CMD_PARAM:
904 info->buf_count = 256;
905 info->ndcb0 |= NDCB0_CMD_TYPE(0)
906 | NDCB0_ADDR_CYC(1)
907 | NDCB0_LEN_OVRD
908 | command;
909 info->ndcb1 = (column & 0xFF);
910 info->ndcb3 = 256;
911 info->data_size = 256;
912 break;
913
914 case NAND_CMD_READID:
915 info->buf_count = host->read_id_bytes;
916 info->ndcb0 |= NDCB0_CMD_TYPE(3)
917 | NDCB0_ADDR_CYC(1)
918 | command;
919 info->ndcb1 = (column & 0xFF);
920
921 info->data_size = 8;
922 break;
923 case NAND_CMD_STATUS:
924 info->buf_count = 1;
925 info->ndcb0 |= NDCB0_CMD_TYPE(4)
926 | NDCB0_ADDR_CYC(1)
927 | command;
928
929 info->data_size = 8;
930 break;
931
932 case NAND_CMD_ERASE1:
933 info->ndcb0 |= NDCB0_CMD_TYPE(2)
934 | NDCB0_AUTO_RS
935 | NDCB0_ADDR_CYC(3)
936 | NDCB0_DBC
937 | (NAND_CMD_ERASE2 << 8)
938 | NAND_CMD_ERASE1;
939 info->ndcb1 = page_addr;
940 info->ndcb2 = 0;
941
942 break;
943 case NAND_CMD_RESET:
944 info->ndcb0 |= NDCB0_CMD_TYPE(5)
945 | command;
946
947 break;
948
949 case NAND_CMD_ERASE2:
950 exec_cmd = 0;
951 break;
952
953 default:
954 exec_cmd = 0;
955 dev_err(&info->pdev->dev, "non-supported command %x\n",
956 command);
957 break;
958 }
959
960 return exec_cmd;
961 }
962
nand_cmdfunc(struct mtd_info * mtd,unsigned command,int column,int page_addr)963 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
964 int column, int page_addr)
965 {
966 struct pxa3xx_nand_host *host = mtd->priv;
967 struct pxa3xx_nand_info *info = host->info_data;
968 int exec_cmd;
969
970 /*
971 * if this is a x16 device ,then convert the input
972 * "byte" address into a "word" address appropriate
973 * for indexing a word-oriented device
974 */
975 if (info->reg_ndcr & NDCR_DWIDTH_M)
976 column /= 2;
977
978 /*
979 * There may be different NAND chip hooked to
980 * different chip select, so check whether
981 * chip select has been changed, if yes, reset the timing
982 */
983 if (info->cs != host->cs) {
984 info->cs = host->cs;
985 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
986 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
987 }
988
989 prepare_start_command(info, command);
990
991 info->state = STATE_PREPARED;
992 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
993
994 if (exec_cmd) {
995 init_completion(&info->cmd_complete);
996 init_completion(&info->dev_ready);
997 info->need_wait = 1;
998 pxa3xx_nand_start(info);
999
1000 if (!wait_for_completion_timeout(&info->cmd_complete,
1001 CHIP_DELAY_TIMEOUT)) {
1002 dev_err(&info->pdev->dev, "Wait time out!!!\n");
1003 /* Stop State Machine for next command cycle */
1004 pxa3xx_nand_stop(info);
1005 }
1006 }
1007 info->state = STATE_IDLE;
1008 }
1009
nand_cmdfunc_extended(struct mtd_info * mtd,const unsigned command,int column,int page_addr)1010 static void nand_cmdfunc_extended(struct mtd_info *mtd,
1011 const unsigned command,
1012 int column, int page_addr)
1013 {
1014 struct pxa3xx_nand_host *host = mtd->priv;
1015 struct pxa3xx_nand_info *info = host->info_data;
1016 int exec_cmd, ext_cmd_type;
1017
1018 /*
1019 * if this is a x16 device then convert the input
1020 * "byte" address into a "word" address appropriate
1021 * for indexing a word-oriented device
1022 */
1023 if (info->reg_ndcr & NDCR_DWIDTH_M)
1024 column /= 2;
1025
1026 /*
1027 * There may be different NAND chip hooked to
1028 * different chip select, so check whether
1029 * chip select has been changed, if yes, reset the timing
1030 */
1031 if (info->cs != host->cs) {
1032 info->cs = host->cs;
1033 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1034 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1035 }
1036
1037 /* Select the extended command for the first command */
1038 switch (command) {
1039 case NAND_CMD_READ0:
1040 case NAND_CMD_READOOB:
1041 ext_cmd_type = EXT_CMD_TYPE_MONO;
1042 break;
1043 case NAND_CMD_SEQIN:
1044 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1045 break;
1046 case NAND_CMD_PAGEPROG:
1047 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1048 break;
1049 default:
1050 ext_cmd_type = 0;
1051 break;
1052 }
1053
1054 prepare_start_command(info, command);
1055
1056 /*
1057 * Prepare the "is ready" completion before starting a command
1058 * transaction sequence. If the command is not executed the
1059 * completion will be completed, see below.
1060 *
1061 * We can do that inside the loop because the command variable
1062 * is invariant and thus so is the exec_cmd.
1063 */
1064 info->need_wait = 1;
1065 init_completion(&info->dev_ready);
1066 do {
1067 info->state = STATE_PREPARED;
1068 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1069 column, page_addr);
1070 if (!exec_cmd) {
1071 info->need_wait = 0;
1072 complete(&info->dev_ready);
1073 break;
1074 }
1075
1076 init_completion(&info->cmd_complete);
1077 pxa3xx_nand_start(info);
1078
1079 if (!wait_for_completion_timeout(&info->cmd_complete,
1080 CHIP_DELAY_TIMEOUT)) {
1081 dev_err(&info->pdev->dev, "Wait time out!!!\n");
1082 /* Stop State Machine for next command cycle */
1083 pxa3xx_nand_stop(info);
1084 break;
1085 }
1086
1087 /* Check if the sequence is complete */
1088 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1089 break;
1090
1091 /*
1092 * After a splitted program command sequence has issued
1093 * the command dispatch, the command sequence is complete.
1094 */
1095 if (info->data_size == 0 &&
1096 command == NAND_CMD_PAGEPROG &&
1097 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1098 break;
1099
1100 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1101 /* Last read: issue a 'last naked read' */
1102 if (info->data_size == info->chunk_size)
1103 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1104 else
1105 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1106
1107 /*
1108 * If a splitted program command has no more data to transfer,
1109 * the command dispatch must be issued to complete.
1110 */
1111 } else if (command == NAND_CMD_PAGEPROG &&
1112 info->data_size == 0) {
1113 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1114 }
1115 } while (1);
1116
1117 info->state = STATE_IDLE;
1118 }
1119
pxa3xx_nand_write_page_hwecc(struct mtd_info * mtd,struct nand_chip * chip,const uint8_t * buf,int oob_required)1120 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1121 struct nand_chip *chip, const uint8_t *buf, int oob_required)
1122 {
1123 chip->write_buf(mtd, buf, mtd->writesize);
1124 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1125
1126 return 0;
1127 }
1128
pxa3xx_nand_read_page_hwecc(struct mtd_info * mtd,struct nand_chip * chip,uint8_t * buf,int oob_required,int page)1129 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1130 struct nand_chip *chip, uint8_t *buf, int oob_required,
1131 int page)
1132 {
1133 struct pxa3xx_nand_host *host = mtd->priv;
1134 struct pxa3xx_nand_info *info = host->info_data;
1135
1136 chip->read_buf(mtd, buf, mtd->writesize);
1137 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1138
1139 if (info->retcode == ERR_CORERR && info->use_ecc) {
1140 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1141
1142 } else if (info->retcode == ERR_UNCORERR) {
1143 /*
1144 * for blank page (all 0xff), HW will calculate its ECC as
1145 * 0, which is different from the ECC information within
1146 * OOB, ignore such uncorrectable errors
1147 */
1148 if (is_buf_blank(buf, mtd->writesize))
1149 info->retcode = ERR_NONE;
1150 else
1151 mtd->ecc_stats.failed++;
1152 }
1153
1154 return info->max_bitflips;
1155 }
1156
pxa3xx_nand_read_byte(struct mtd_info * mtd)1157 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1158 {
1159 struct pxa3xx_nand_host *host = mtd->priv;
1160 struct pxa3xx_nand_info *info = host->info_data;
1161 char retval = 0xFF;
1162
1163 if (info->buf_start < info->buf_count)
1164 /* Has just send a new command? */
1165 retval = info->data_buff[info->buf_start++];
1166
1167 return retval;
1168 }
1169
pxa3xx_nand_read_word(struct mtd_info * mtd)1170 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1171 {
1172 struct pxa3xx_nand_host *host = mtd->priv;
1173 struct pxa3xx_nand_info *info = host->info_data;
1174 u16 retval = 0xFFFF;
1175
1176 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1177 retval = *((u16 *)(info->data_buff+info->buf_start));
1178 info->buf_start += 2;
1179 }
1180 return retval;
1181 }
1182
pxa3xx_nand_read_buf(struct mtd_info * mtd,uint8_t * buf,int len)1183 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1184 {
1185 struct pxa3xx_nand_host *host = mtd->priv;
1186 struct pxa3xx_nand_info *info = host->info_data;
1187 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1188
1189 memcpy(buf, info->data_buff + info->buf_start, real_len);
1190 info->buf_start += real_len;
1191 }
1192
pxa3xx_nand_write_buf(struct mtd_info * mtd,const uint8_t * buf,int len)1193 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1194 const uint8_t *buf, int len)
1195 {
1196 struct pxa3xx_nand_host *host = mtd->priv;
1197 struct pxa3xx_nand_info *info = host->info_data;
1198 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1199
1200 memcpy(info->data_buff + info->buf_start, buf, real_len);
1201 info->buf_start += real_len;
1202 }
1203
pxa3xx_nand_select_chip(struct mtd_info * mtd,int chip)1204 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1205 {
1206 return;
1207 }
1208
pxa3xx_nand_waitfunc(struct mtd_info * mtd,struct nand_chip * this)1209 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1210 {
1211 struct pxa3xx_nand_host *host = mtd->priv;
1212 struct pxa3xx_nand_info *info = host->info_data;
1213
1214 if (info->need_wait) {
1215 info->need_wait = 0;
1216 if (!wait_for_completion_timeout(&info->dev_ready,
1217 CHIP_DELAY_TIMEOUT)) {
1218 dev_err(&info->pdev->dev, "Ready time out!!!\n");
1219 return NAND_STATUS_FAIL;
1220 }
1221 }
1222
1223 /* pxa3xx_nand_send_command has waited for command complete */
1224 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1225 if (info->retcode == ERR_NONE)
1226 return 0;
1227 else
1228 return NAND_STATUS_FAIL;
1229 }
1230
1231 return NAND_STATUS_READY;
1232 }
1233
pxa3xx_nand_config_flash(struct pxa3xx_nand_info * info,const struct pxa3xx_nand_flash * f)1234 static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
1235 const struct pxa3xx_nand_flash *f)
1236 {
1237 struct platform_device *pdev = info->pdev;
1238 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1239 struct pxa3xx_nand_host *host = info->host[info->cs];
1240 uint32_t ndcr = 0x0; /* enable all interrupts */
1241
1242 if (f->page_size != 2048 && f->page_size != 512) {
1243 dev_err(&pdev->dev, "Current only support 2048 and 512 size\n");
1244 return -EINVAL;
1245 }
1246
1247 if (f->flash_width != 16 && f->flash_width != 8) {
1248 dev_err(&pdev->dev, "Only support 8bit and 16 bit!\n");
1249 return -EINVAL;
1250 }
1251
1252 /* calculate flash information */
1253 host->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
1254
1255 /* calculate addressing information */
1256 host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
1257
1258 if (f->num_blocks * f->page_per_block > 65536)
1259 host->row_addr_cycles = 3;
1260 else
1261 host->row_addr_cycles = 2;
1262
1263 ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1264 ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1265 ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
1266 ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
1267 ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
1268 ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
1269
1270 ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
1271 ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1272
1273 info->reg_ndcr = ndcr;
1274
1275 pxa3xx_nand_set_timing(host, f->timing);
1276 return 0;
1277 }
1278
pxa3xx_nand_detect_config(struct pxa3xx_nand_info * info)1279 static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1280 {
1281 /*
1282 * We set 0 by hard coding here, for we don't support keep_config
1283 * when there is more than one chip attached to the controller
1284 */
1285 struct pxa3xx_nand_host *host = info->host[0];
1286 uint32_t ndcr = nand_readl(info, NDCR);
1287
1288 if (ndcr & NDCR_PAGE_SZ) {
1289 /* Controller's FIFO size */
1290 info->chunk_size = 2048;
1291 host->read_id_bytes = 4;
1292 } else {
1293 info->chunk_size = 512;
1294 host->read_id_bytes = 2;
1295 }
1296
1297 /* Set an initial chunk size */
1298 info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
1299 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1300 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1301 return 0;
1302 }
1303
1304 #ifdef ARCH_HAS_DMA
pxa3xx_nand_init_buff(struct pxa3xx_nand_info * info)1305 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1306 {
1307 struct platform_device *pdev = info->pdev;
1308 int data_desc_offset = info->buf_size - sizeof(struct pxa_dma_desc);
1309
1310 if (use_dma == 0) {
1311 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1312 if (info->data_buff == NULL)
1313 return -ENOMEM;
1314 return 0;
1315 }
1316
1317 info->data_buff = dma_alloc_coherent(&pdev->dev, info->buf_size,
1318 &info->data_buff_phys, GFP_KERNEL);
1319 if (info->data_buff == NULL) {
1320 dev_err(&pdev->dev, "failed to allocate dma buffer\n");
1321 return -ENOMEM;
1322 }
1323
1324 info->data_desc = (void *)info->data_buff + data_desc_offset;
1325 info->data_desc_addr = info->data_buff_phys + data_desc_offset;
1326
1327 info->data_dma_ch = pxa_request_dma("nand-data", DMA_PRIO_LOW,
1328 pxa3xx_nand_data_dma_irq, info);
1329 if (info->data_dma_ch < 0) {
1330 dev_err(&pdev->dev, "failed to request data dma\n");
1331 dma_free_coherent(&pdev->dev, info->buf_size,
1332 info->data_buff, info->data_buff_phys);
1333 return info->data_dma_ch;
1334 }
1335
1336 /*
1337 * Now that DMA buffers are allocated we turn on
1338 * DMA proper for I/O operations.
1339 */
1340 info->use_dma = 1;
1341 return 0;
1342 }
1343
pxa3xx_nand_free_buff(struct pxa3xx_nand_info * info)1344 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1345 {
1346 struct platform_device *pdev = info->pdev;
1347 if (info->use_dma) {
1348 pxa_free_dma(info->data_dma_ch);
1349 dma_free_coherent(&pdev->dev, info->buf_size,
1350 info->data_buff, info->data_buff_phys);
1351 } else {
1352 kfree(info->data_buff);
1353 }
1354 }
1355 #else
pxa3xx_nand_init_buff(struct pxa3xx_nand_info * info)1356 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1357 {
1358 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1359 if (info->data_buff == NULL)
1360 return -ENOMEM;
1361 return 0;
1362 }
1363
pxa3xx_nand_free_buff(struct pxa3xx_nand_info * info)1364 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1365 {
1366 kfree(info->data_buff);
1367 }
1368 #endif
1369
pxa3xx_nand_sensing(struct pxa3xx_nand_info * info)1370 static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
1371 {
1372 struct mtd_info *mtd;
1373 struct nand_chip *chip;
1374 int ret;
1375
1376 mtd = info->host[info->cs]->mtd;
1377 chip = mtd->priv;
1378
1379 /* use the common timing to make a try */
1380 ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
1381 if (ret)
1382 return ret;
1383
1384 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1385 ret = chip->waitfunc(mtd, chip);
1386 if (ret & NAND_STATUS_FAIL)
1387 return -ENODEV;
1388
1389 return 0;
1390 }
1391
pxa_ecc_init(struct pxa3xx_nand_info * info,struct nand_ecc_ctrl * ecc,int strength,int ecc_stepsize,int page_size)1392 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1393 struct nand_ecc_ctrl *ecc,
1394 int strength, int ecc_stepsize, int page_size)
1395 {
1396 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1397 info->chunk_size = 2048;
1398 info->spare_size = 40;
1399 info->ecc_size = 24;
1400 ecc->mode = NAND_ECC_HW;
1401 ecc->size = 512;
1402 ecc->strength = 1;
1403
1404 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1405 info->chunk_size = 512;
1406 info->spare_size = 8;
1407 info->ecc_size = 8;
1408 ecc->mode = NAND_ECC_HW;
1409 ecc->size = 512;
1410 ecc->strength = 1;
1411
1412 /*
1413 * Required ECC: 4-bit correction per 512 bytes
1414 * Select: 16-bit correction per 2048 bytes
1415 */
1416 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1417 info->ecc_bch = 1;
1418 info->chunk_size = 2048;
1419 info->spare_size = 32;
1420 info->ecc_size = 32;
1421 ecc->mode = NAND_ECC_HW;
1422 ecc->size = info->chunk_size;
1423 ecc->layout = &ecc_layout_2KB_bch4bit;
1424 ecc->strength = 16;
1425
1426 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1427 info->ecc_bch = 1;
1428 info->chunk_size = 2048;
1429 info->spare_size = 32;
1430 info->ecc_size = 32;
1431 ecc->mode = NAND_ECC_HW;
1432 ecc->size = info->chunk_size;
1433 ecc->layout = &ecc_layout_4KB_bch4bit;
1434 ecc->strength = 16;
1435
1436 /*
1437 * Required ECC: 8-bit correction per 512 bytes
1438 * Select: 16-bit correction per 1024 bytes
1439 */
1440 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1441 info->ecc_bch = 1;
1442 info->chunk_size = 1024;
1443 info->spare_size = 0;
1444 info->ecc_size = 32;
1445 ecc->mode = NAND_ECC_HW;
1446 ecc->size = info->chunk_size;
1447 ecc->layout = &ecc_layout_4KB_bch8bit;
1448 ecc->strength = 16;
1449 } else {
1450 dev_err(&info->pdev->dev,
1451 "ECC strength %d at page size %d is not supported\n",
1452 strength, page_size);
1453 return -ENODEV;
1454 }
1455
1456 dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
1457 ecc->strength, ecc->size);
1458 return 0;
1459 }
1460
pxa3xx_nand_scan(struct mtd_info * mtd)1461 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1462 {
1463 struct pxa3xx_nand_host *host = mtd->priv;
1464 struct pxa3xx_nand_info *info = host->info_data;
1465 struct platform_device *pdev = info->pdev;
1466 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1467 struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL;
1468 const struct pxa3xx_nand_flash *f = NULL;
1469 struct nand_chip *chip = mtd->priv;
1470 uint32_t id = -1;
1471 uint64_t chipsize;
1472 int i, ret, num;
1473 uint16_t ecc_strength, ecc_step;
1474
1475 if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
1476 goto KEEP_CONFIG;
1477
1478 /* Set a default chunk size */
1479 info->chunk_size = 512;
1480
1481 ret = pxa3xx_nand_sensing(info);
1482 if (ret) {
1483 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
1484 info->cs);
1485
1486 return ret;
1487 }
1488
1489 chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0);
1490 id = *((uint16_t *)(info->data_buff));
1491 if (id != 0)
1492 dev_info(&info->pdev->dev, "Detect a flash id %x\n", id);
1493 else {
1494 dev_warn(&info->pdev->dev,
1495 "Read out ID 0, potential timing set wrong!!\n");
1496
1497 return -EINVAL;
1498 }
1499
1500 num = ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1;
1501 for (i = 0; i < num; i++) {
1502 if (i < pdata->num_flash)
1503 f = pdata->flash + i;
1504 else
1505 f = &builtin_flash_types[i - pdata->num_flash + 1];
1506
1507 /* find the chip in default list */
1508 if (f->chip_id == id)
1509 break;
1510 }
1511
1512 if (i >= (ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1)) {
1513 dev_err(&info->pdev->dev, "ERROR!! flash not defined!!!\n");
1514
1515 return -EINVAL;
1516 }
1517
1518 ret = pxa3xx_nand_config_flash(info, f);
1519 if (ret) {
1520 dev_err(&info->pdev->dev, "ERROR! Configure failed\n");
1521 return ret;
1522 }
1523
1524 memset(pxa3xx_flash_ids, 0, sizeof(pxa3xx_flash_ids));
1525
1526 pxa3xx_flash_ids[0].name = f->name;
1527 pxa3xx_flash_ids[0].dev_id = (f->chip_id >> 8) & 0xffff;
1528 pxa3xx_flash_ids[0].pagesize = f->page_size;
1529 chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size;
1530 pxa3xx_flash_ids[0].chipsize = chipsize >> 20;
1531 pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block;
1532 if (f->flash_width == 16)
1533 pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16;
1534 pxa3xx_flash_ids[1].name = NULL;
1535 def = pxa3xx_flash_ids;
1536 KEEP_CONFIG:
1537 if (info->reg_ndcr & NDCR_DWIDTH_M)
1538 chip->options |= NAND_BUSWIDTH_16;
1539
1540 /* Device detection must be done with ECC disabled */
1541 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1542 nand_writel(info, NDECCCTRL, 0x0);
1543
1544 if (nand_scan_ident(mtd, 1, def))
1545 return -ENODEV;
1546
1547 if (pdata->flash_bbt) {
1548 /*
1549 * We'll use a bad block table stored in-flash and don't
1550 * allow writing the bad block marker to the flash.
1551 */
1552 chip->bbt_options |= NAND_BBT_USE_FLASH |
1553 NAND_BBT_NO_OOB_BBM;
1554 chip->bbt_td = &bbt_main_descr;
1555 chip->bbt_md = &bbt_mirror_descr;
1556 }
1557
1558 /*
1559 * If the page size is bigger than the FIFO size, let's check
1560 * we are given the right variant and then switch to the extended
1561 * (aka splitted) command handling,
1562 */
1563 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1564 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1565 chip->cmdfunc = nand_cmdfunc_extended;
1566 } else {
1567 dev_err(&info->pdev->dev,
1568 "unsupported page size on this variant\n");
1569 return -ENODEV;
1570 }
1571 }
1572
1573 if (pdata->ecc_strength && pdata->ecc_step_size) {
1574 ecc_strength = pdata->ecc_strength;
1575 ecc_step = pdata->ecc_step_size;
1576 } else {
1577 ecc_strength = chip->ecc_strength_ds;
1578 ecc_step = chip->ecc_step_ds;
1579 }
1580
1581 /* Set default ECC strength requirements on non-ONFI devices */
1582 if (ecc_strength < 1 && ecc_step < 1) {
1583 ecc_strength = 1;
1584 ecc_step = 512;
1585 }
1586
1587 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1588 ecc_step, mtd->writesize);
1589 if (ret)
1590 return ret;
1591
1592 /* calculate addressing information */
1593 if (mtd->writesize >= 2048)
1594 host->col_addr_cycles = 2;
1595 else
1596 host->col_addr_cycles = 1;
1597
1598 /* release the initial buffer */
1599 kfree(info->data_buff);
1600
1601 /* allocate the real data + oob buffer */
1602 info->buf_size = mtd->writesize + mtd->oobsize;
1603 ret = pxa3xx_nand_init_buff(info);
1604 if (ret)
1605 return ret;
1606 info->oob_buff = info->data_buff + mtd->writesize;
1607
1608 if ((mtd->size >> chip->page_shift) > 65536)
1609 host->row_addr_cycles = 3;
1610 else
1611 host->row_addr_cycles = 2;
1612 return nand_scan_tail(mtd);
1613 }
1614
alloc_nand_resource(struct platform_device * pdev)1615 static int alloc_nand_resource(struct platform_device *pdev)
1616 {
1617 struct pxa3xx_nand_platform_data *pdata;
1618 struct pxa3xx_nand_info *info;
1619 struct pxa3xx_nand_host *host;
1620 struct nand_chip *chip = NULL;
1621 struct mtd_info *mtd;
1622 struct resource *r;
1623 int ret, irq, cs;
1624
1625 pdata = dev_get_platdata(&pdev->dev);
1626 if (pdata->num_cs <= 0)
1627 return -ENODEV;
1628 info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) +
1629 sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
1630 if (!info)
1631 return -ENOMEM;
1632
1633 info->pdev = pdev;
1634 info->variant = pxa3xx_nand_get_variant(pdev);
1635 for (cs = 0; cs < pdata->num_cs; cs++) {
1636 mtd = (struct mtd_info *)((unsigned int)&info[1] +
1637 (sizeof(*mtd) + sizeof(*host)) * cs);
1638 chip = (struct nand_chip *)(&mtd[1]);
1639 host = (struct pxa3xx_nand_host *)chip;
1640 info->host[cs] = host;
1641 host->mtd = mtd;
1642 host->cs = cs;
1643 host->info_data = info;
1644 mtd->priv = host;
1645 mtd->owner = THIS_MODULE;
1646
1647 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1648 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1649 chip->controller = &info->controller;
1650 chip->waitfunc = pxa3xx_nand_waitfunc;
1651 chip->select_chip = pxa3xx_nand_select_chip;
1652 chip->read_word = pxa3xx_nand_read_word;
1653 chip->read_byte = pxa3xx_nand_read_byte;
1654 chip->read_buf = pxa3xx_nand_read_buf;
1655 chip->write_buf = pxa3xx_nand_write_buf;
1656 chip->options |= NAND_NO_SUBPAGE_WRITE;
1657 chip->cmdfunc = nand_cmdfunc;
1658 }
1659
1660 spin_lock_init(&chip->controller->lock);
1661 init_waitqueue_head(&chip->controller->wq);
1662 info->clk = devm_clk_get(&pdev->dev, NULL);
1663 if (IS_ERR(info->clk)) {
1664 dev_err(&pdev->dev, "failed to get nand clock\n");
1665 return PTR_ERR(info->clk);
1666 }
1667 ret = clk_prepare_enable(info->clk);
1668 if (ret < 0)
1669 return ret;
1670
1671 if (use_dma) {
1672 /*
1673 * This is a dirty hack to make this driver work from
1674 * devicetree bindings. It can be removed once we have
1675 * a prober DMA controller framework for DT.
1676 */
1677 if (pdev->dev.of_node &&
1678 of_machine_is_compatible("marvell,pxa3xx")) {
1679 info->drcmr_dat = 97;
1680 info->drcmr_cmd = 99;
1681 } else {
1682 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1683 if (r == NULL) {
1684 dev_err(&pdev->dev,
1685 "no resource defined for data DMA\n");
1686 ret = -ENXIO;
1687 goto fail_disable_clk;
1688 }
1689 info->drcmr_dat = r->start;
1690
1691 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1692 if (r == NULL) {
1693 dev_err(&pdev->dev,
1694 "no resource defined for cmd DMA\n");
1695 ret = -ENXIO;
1696 goto fail_disable_clk;
1697 }
1698 info->drcmr_cmd = r->start;
1699 }
1700 }
1701
1702 irq = platform_get_irq(pdev, 0);
1703 if (irq < 0) {
1704 dev_err(&pdev->dev, "no IRQ resource defined\n");
1705 ret = -ENXIO;
1706 goto fail_disable_clk;
1707 }
1708
1709 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1710 info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
1711 if (IS_ERR(info->mmio_base)) {
1712 ret = PTR_ERR(info->mmio_base);
1713 goto fail_disable_clk;
1714 }
1715 info->mmio_phys = r->start;
1716
1717 /* Allocate a buffer to allow flash detection */
1718 info->buf_size = INIT_BUFFER_SIZE;
1719 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1720 if (info->data_buff == NULL) {
1721 ret = -ENOMEM;
1722 goto fail_disable_clk;
1723 }
1724
1725 /* initialize all interrupts to be disabled */
1726 disable_int(info, NDSR_MASK);
1727
1728 ret = request_threaded_irq(irq, pxa3xx_nand_irq,
1729 pxa3xx_nand_irq_thread, IRQF_ONESHOT,
1730 pdev->name, info);
1731 if (ret < 0) {
1732 dev_err(&pdev->dev, "failed to request IRQ\n");
1733 goto fail_free_buf;
1734 }
1735
1736 platform_set_drvdata(pdev, info);
1737
1738 return 0;
1739
1740 fail_free_buf:
1741 free_irq(irq, info);
1742 kfree(info->data_buff);
1743 fail_disable_clk:
1744 clk_disable_unprepare(info->clk);
1745 return ret;
1746 }
1747
pxa3xx_nand_remove(struct platform_device * pdev)1748 static int pxa3xx_nand_remove(struct platform_device *pdev)
1749 {
1750 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1751 struct pxa3xx_nand_platform_data *pdata;
1752 int irq, cs;
1753
1754 if (!info)
1755 return 0;
1756
1757 pdata = dev_get_platdata(&pdev->dev);
1758
1759 irq = platform_get_irq(pdev, 0);
1760 if (irq >= 0)
1761 free_irq(irq, info);
1762 pxa3xx_nand_free_buff(info);
1763
1764 clk_disable_unprepare(info->clk);
1765
1766 for (cs = 0; cs < pdata->num_cs; cs++)
1767 nand_release(info->host[cs]->mtd);
1768 return 0;
1769 }
1770
pxa3xx_nand_probe_dt(struct platform_device * pdev)1771 static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1772 {
1773 struct pxa3xx_nand_platform_data *pdata;
1774 struct device_node *np = pdev->dev.of_node;
1775 const struct of_device_id *of_id =
1776 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1777
1778 if (!of_id)
1779 return 0;
1780
1781 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1782 if (!pdata)
1783 return -ENOMEM;
1784
1785 if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1786 pdata->enable_arbiter = 1;
1787 if (of_get_property(np, "marvell,nand-keep-config", NULL))
1788 pdata->keep_config = 1;
1789 of_property_read_u32(np, "num-cs", &pdata->num_cs);
1790 pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
1791
1792 pdata->ecc_strength = of_get_nand_ecc_strength(np);
1793 if (pdata->ecc_strength < 0)
1794 pdata->ecc_strength = 0;
1795
1796 pdata->ecc_step_size = of_get_nand_ecc_step_size(np);
1797 if (pdata->ecc_step_size < 0)
1798 pdata->ecc_step_size = 0;
1799
1800 pdev->dev.platform_data = pdata;
1801
1802 return 0;
1803 }
1804
pxa3xx_nand_probe(struct platform_device * pdev)1805 static int pxa3xx_nand_probe(struct platform_device *pdev)
1806 {
1807 struct pxa3xx_nand_platform_data *pdata;
1808 struct mtd_part_parser_data ppdata = {};
1809 struct pxa3xx_nand_info *info;
1810 int ret, cs, probe_success;
1811
1812 #ifndef ARCH_HAS_DMA
1813 if (use_dma) {
1814 use_dma = 0;
1815 dev_warn(&pdev->dev,
1816 "This platform can't do DMA on this device\n");
1817 }
1818 #endif
1819 ret = pxa3xx_nand_probe_dt(pdev);
1820 if (ret)
1821 return ret;
1822
1823 pdata = dev_get_platdata(&pdev->dev);
1824 if (!pdata) {
1825 dev_err(&pdev->dev, "no platform data defined\n");
1826 return -ENODEV;
1827 }
1828
1829 ret = alloc_nand_resource(pdev);
1830 if (ret) {
1831 dev_err(&pdev->dev, "alloc nand resource failed\n");
1832 return ret;
1833 }
1834
1835 info = platform_get_drvdata(pdev);
1836 probe_success = 0;
1837 for (cs = 0; cs < pdata->num_cs; cs++) {
1838 struct mtd_info *mtd = info->host[cs]->mtd;
1839
1840 /*
1841 * The mtd name matches the one used in 'mtdparts' kernel
1842 * parameter. This name cannot be changed or otherwise
1843 * user's mtd partitions configuration would get broken.
1844 */
1845 mtd->name = "pxa3xx_nand-0";
1846 info->cs = cs;
1847 ret = pxa3xx_nand_scan(mtd);
1848 if (ret) {
1849 dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
1850 cs);
1851 continue;
1852 }
1853
1854 ppdata.of_node = pdev->dev.of_node;
1855 ret = mtd_device_parse_register(mtd, NULL,
1856 &ppdata, pdata->parts[cs],
1857 pdata->nr_parts[cs]);
1858 if (!ret)
1859 probe_success = 1;
1860 }
1861
1862 if (!probe_success) {
1863 pxa3xx_nand_remove(pdev);
1864 return -ENODEV;
1865 }
1866
1867 return 0;
1868 }
1869
1870 #ifdef CONFIG_PM
pxa3xx_nand_suspend(struct platform_device * pdev,pm_message_t state)1871 static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
1872 {
1873 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1874 struct pxa3xx_nand_platform_data *pdata;
1875 struct mtd_info *mtd;
1876 int cs;
1877
1878 pdata = dev_get_platdata(&pdev->dev);
1879 if (info->state) {
1880 dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
1881 return -EAGAIN;
1882 }
1883
1884 for (cs = 0; cs < pdata->num_cs; cs++) {
1885 mtd = info->host[cs]->mtd;
1886 mtd_suspend(mtd);
1887 }
1888
1889 return 0;
1890 }
1891
pxa3xx_nand_resume(struct platform_device * pdev)1892 static int pxa3xx_nand_resume(struct platform_device *pdev)
1893 {
1894 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1895 struct pxa3xx_nand_platform_data *pdata;
1896 struct mtd_info *mtd;
1897 int cs;
1898
1899 pdata = dev_get_platdata(&pdev->dev);
1900 /* We don't want to handle interrupt without calling mtd routine */
1901 disable_int(info, NDCR_INT_MASK);
1902
1903 /*
1904 * Directly set the chip select to a invalid value,
1905 * then the driver would reset the timing according
1906 * to current chip select at the beginning of cmdfunc
1907 */
1908 info->cs = 0xff;
1909
1910 /*
1911 * As the spec says, the NDSR would be updated to 0x1800 when
1912 * doing the nand_clk disable/enable.
1913 * To prevent it damaging state machine of the driver, clear
1914 * all status before resume
1915 */
1916 nand_writel(info, NDSR, NDSR_MASK);
1917 for (cs = 0; cs < pdata->num_cs; cs++) {
1918 mtd = info->host[cs]->mtd;
1919 mtd_resume(mtd);
1920 }
1921
1922 return 0;
1923 }
1924 #else
1925 #define pxa3xx_nand_suspend NULL
1926 #define pxa3xx_nand_resume NULL
1927 #endif
1928
1929 static struct platform_driver pxa3xx_nand_driver = {
1930 .driver = {
1931 .name = "pxa3xx-nand",
1932 .of_match_table = pxa3xx_nand_dt_ids,
1933 },
1934 .probe = pxa3xx_nand_probe,
1935 .remove = pxa3xx_nand_remove,
1936 .suspend = pxa3xx_nand_suspend,
1937 .resume = pxa3xx_nand_resume,
1938 };
1939
1940 module_platform_driver(pxa3xx_nand_driver);
1941
1942 MODULE_LICENSE("GPL");
1943 MODULE_DESCRIPTION("PXA3xx NAND controller driver");
1944