root/drivers/mtd/nand/raw/lpc32xx_mlc.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. lpc32xx_ooblayout_ecc
  2. lpc32xx_ooblayout_free
  3. lpc32xx_nand_setup
  4. lpc32xx_nand_cmd_ctrl
  5. lpc32xx_nand_device_ready
  6. lpc3xxx_nand_irq
  7. lpc32xx_waitfunc_nand
  8. lpc32xx_waitfunc_controller
  9. lpc32xx_waitfunc
  10. lpc32xx_wp_enable
  11. lpc32xx_wp_disable
  12. lpc32xx_dma_complete_func
  13. lpc32xx_xmit_dma
  14. lpc32xx_read_page
  15. lpc32xx_write_page_lowlevel
  16. lpc32xx_read_oob
  17. lpc32xx_write_oob
  18. lpc32xx_ecc_enable
  19. lpc32xx_dma_setup
  20. lpc32xx_parse_dt
  21. lpc32xx_nand_attach_chip
  22. lpc32xx_nand_probe
  23. lpc32xx_nand_remove
  24. lpc32xx_nand_resume
  25. lpc32xx_nand_suspend

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * Driver for NAND MLC Controller in LPC32xx
   4  *
   5  * Author: Roland Stigge <stigge@antcom.de>
   6  *
   7  * Copyright © 2011 WORK Microwave GmbH
   8  * Copyright © 2011, 2012 Roland Stigge
   9  *
  10  * NAND Flash Controller Operation:
  11  * - Read: Auto Decode
  12  * - Write: Auto Encode
  13  * - Tested Page Sizes: 2048, 4096
  14  */
  15 
  16 #include <linux/slab.h>
  17 #include <linux/module.h>
  18 #include <linux/platform_device.h>
  19 #include <linux/mtd/mtd.h>
  20 #include <linux/mtd/rawnand.h>
  21 #include <linux/mtd/partitions.h>
  22 #include <linux/clk.h>
  23 #include <linux/err.h>
  24 #include <linux/delay.h>
  25 #include <linux/completion.h>
  26 #include <linux/interrupt.h>
  27 #include <linux/of.h>
  28 #include <linux/of_gpio.h>
  29 #include <linux/mtd/lpc32xx_mlc.h>
  30 #include <linux/io.h>
  31 #include <linux/mm.h>
  32 #include <linux/dma-mapping.h>
  33 #include <linux/dmaengine.h>
  34 #include <linux/mtd/nand_ecc.h>
  35 
  36 #define DRV_NAME "lpc32xx_mlc"
  37 
  38 /**********************************************************************
  39 * MLC NAND controller register offsets
  40 **********************************************************************/
  41 
  42 #define MLC_BUFF(x)                     (x + 0x00000)
  43 #define MLC_DATA(x)                     (x + 0x08000)
  44 #define MLC_CMD(x)                      (x + 0x10000)
  45 #define MLC_ADDR(x)                     (x + 0x10004)
  46 #define MLC_ECC_ENC_REG(x)              (x + 0x10008)
  47 #define MLC_ECC_DEC_REG(x)              (x + 0x1000C)
  48 #define MLC_ECC_AUTO_ENC_REG(x)         (x + 0x10010)
  49 #define MLC_ECC_AUTO_DEC_REG(x)         (x + 0x10014)
  50 #define MLC_RPR(x)                      (x + 0x10018)
  51 #define MLC_WPR(x)                      (x + 0x1001C)
  52 #define MLC_RUBP(x)                     (x + 0x10020)
  53 #define MLC_ROBP(x)                     (x + 0x10024)
  54 #define MLC_SW_WP_ADD_LOW(x)            (x + 0x10028)
  55 #define MLC_SW_WP_ADD_HIG(x)            (x + 0x1002C)
  56 #define MLC_ICR(x)                      (x + 0x10030)
  57 #define MLC_TIME_REG(x)                 (x + 0x10034)
  58 #define MLC_IRQ_MR(x)                   (x + 0x10038)
  59 #define MLC_IRQ_SR(x)                   (x + 0x1003C)
  60 #define MLC_LOCK_PR(x)                  (x + 0x10044)
  61 #define MLC_ISR(x)                      (x + 0x10048)
  62 #define MLC_CEH(x)                      (x + 0x1004C)
  63 
  64 /**********************************************************************
  65 * MLC_CMD bit definitions
  66 **********************************************************************/
  67 #define MLCCMD_RESET                    0xFF
  68 
  69 /**********************************************************************
  70 * MLC_ICR bit definitions
  71 **********************************************************************/
  72 #define MLCICR_WPROT                    (1 << 3)
  73 #define MLCICR_LARGEBLOCK               (1 << 2)
  74 #define MLCICR_LONGADDR                 (1 << 1)
  75 #define MLCICR_16BIT                    (1 << 0)  /* unsupported by LPC32x0! */
  76 
  77 /**********************************************************************
  78 * MLC_TIME_REG bit definitions
  79 **********************************************************************/
  80 #define MLCTIMEREG_TCEA_DELAY(n)        (((n) & 0x03) << 24)
  81 #define MLCTIMEREG_BUSY_DELAY(n)        (((n) & 0x1F) << 19)
  82 #define MLCTIMEREG_NAND_TA(n)           (((n) & 0x07) << 16)
  83 #define MLCTIMEREG_RD_HIGH(n)           (((n) & 0x0F) << 12)
  84 #define MLCTIMEREG_RD_LOW(n)            (((n) & 0x0F) << 8)
  85 #define MLCTIMEREG_WR_HIGH(n)           (((n) & 0x0F) << 4)
  86 #define MLCTIMEREG_WR_LOW(n)            (((n) & 0x0F) << 0)
  87 
  88 /**********************************************************************
  89 * MLC_IRQ_MR and MLC_IRQ_SR bit definitions
  90 **********************************************************************/
  91 #define MLCIRQ_NAND_READY               (1 << 5)
  92 #define MLCIRQ_CONTROLLER_READY         (1 << 4)
  93 #define MLCIRQ_DECODE_FAILURE           (1 << 3)
  94 #define MLCIRQ_DECODE_ERROR             (1 << 2)
  95 #define MLCIRQ_ECC_READY                (1 << 1)
  96 #define MLCIRQ_WRPROT_FAULT             (1 << 0)
  97 
  98 /**********************************************************************
  99 * MLC_LOCK_PR bit definitions
 100 **********************************************************************/
 101 #define MLCLOCKPR_MAGIC                 0xA25E
 102 
 103 /**********************************************************************
 104 * MLC_ISR bit definitions
 105 **********************************************************************/
 106 #define MLCISR_DECODER_FAILURE          (1 << 6)
 107 #define MLCISR_ERRORS                   ((1 << 4) | (1 << 5))
 108 #define MLCISR_ERRORS_DETECTED          (1 << 3)
 109 #define MLCISR_ECC_READY                (1 << 2)
 110 #define MLCISR_CONTROLLER_READY         (1 << 1)
 111 #define MLCISR_NAND_READY               (1 << 0)
 112 
 113 /**********************************************************************
 114 * MLC_CEH bit definitions
 115 **********************************************************************/
 116 #define MLCCEH_NORMAL                   (1 << 0)
 117 
 118 struct lpc32xx_nand_cfg_mlc {
 119         uint32_t tcea_delay;
 120         uint32_t busy_delay;
 121         uint32_t nand_ta;
 122         uint32_t rd_high;
 123         uint32_t rd_low;
 124         uint32_t wr_high;
 125         uint32_t wr_low;
 126         int wp_gpio;
 127         struct mtd_partition *parts;
 128         unsigned num_parts;
 129 };
 130 
 131 static int lpc32xx_ooblayout_ecc(struct mtd_info *mtd, int section,
 132                                  struct mtd_oob_region *oobregion)
 133 {
 134         struct nand_chip *nand_chip = mtd_to_nand(mtd);
 135 
 136         if (section >= nand_chip->ecc.steps)
 137                 return -ERANGE;
 138 
 139         oobregion->offset = ((section + 1) * 16) - nand_chip->ecc.bytes;
 140         oobregion->length = nand_chip->ecc.bytes;
 141 
 142         return 0;
 143 }
 144 
 145 static int lpc32xx_ooblayout_free(struct mtd_info *mtd, int section,
 146                                   struct mtd_oob_region *oobregion)
 147 {
 148         struct nand_chip *nand_chip = mtd_to_nand(mtd);
 149 
 150         if (section >= nand_chip->ecc.steps)
 151                 return -ERANGE;
 152 
 153         oobregion->offset = 16 * section;
 154         oobregion->length = 16 - nand_chip->ecc.bytes;
 155 
 156         return 0;
 157 }
 158 
 159 static const struct mtd_ooblayout_ops lpc32xx_ooblayout_ops = {
 160         .ecc = lpc32xx_ooblayout_ecc,
 161         .free = lpc32xx_ooblayout_free,
 162 };
 163 
 164 static struct nand_bbt_descr lpc32xx_nand_bbt = {
 165         .options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
 166                    NAND_BBT_WRITE,
 167         .pages = { 524224, 0, 0, 0, 0, 0, 0, 0 },
 168 };
 169 
 170 static struct nand_bbt_descr lpc32xx_nand_bbt_mirror = {
 171         .options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
 172                    NAND_BBT_WRITE,
 173         .pages = { 524160, 0, 0, 0, 0, 0, 0, 0 },
 174 };
 175 
 176 struct lpc32xx_nand_host {
 177         struct platform_device  *pdev;
 178         struct nand_chip        nand_chip;
 179         struct lpc32xx_mlc_platform_data *pdata;
 180         struct clk              *clk;
 181         void __iomem            *io_base;
 182         int                     irq;
 183         struct lpc32xx_nand_cfg_mlc     *ncfg;
 184         struct completion       comp_nand;
 185         struct completion       comp_controller;
 186         uint32_t llptr;
 187         /*
 188          * Physical addresses of ECC buffer, DMA data buffers, OOB data buffer
 189          */
 190         dma_addr_t              oob_buf_phy;
 191         /*
 192          * Virtual addresses of ECC buffer, DMA data buffers, OOB data buffer
 193          */
 194         uint8_t                 *oob_buf;
 195         /* Physical address of DMA base address */
 196         dma_addr_t              io_base_phy;
 197 
 198         struct completion       comp_dma;
 199         struct dma_chan         *dma_chan;
 200         struct dma_slave_config dma_slave_config;
 201         struct scatterlist      sgl;
 202         uint8_t                 *dma_buf;
 203         uint8_t                 *dummy_buf;
 204         int                     mlcsubpages; /* number of 512bytes-subpages */
 205 };
 206 
 207 /*
 208  * Activate/Deactivate DMA Operation:
 209  *
 210  * Using the PL080 DMA Controller for transferring the 512 byte subpages
 211  * instead of doing readl() / writel() in a loop slows it down significantly.
 212  * Measurements via getnstimeofday() upon 512 byte subpage reads reveal:
 213  *
 214  * - readl() of 128 x 32 bits in a loop: ~20us
 215  * - DMA read of 512 bytes (32 bit, 4...128 words bursts): ~60us
 216  * - DMA read of 512 bytes (32 bit, no bursts): ~100us
 217  *
 218  * This applies to the transfer itself. In the DMA case: only the
 219  * wait_for_completion() (DMA setup _not_ included).
 220  *
 221  * Note that the 512 bytes subpage transfer is done directly from/to a
 222  * FIFO/buffer inside the NAND controller. Most of the time (~400-800us for a
 223  * 2048 bytes page) is spent waiting for the NAND IRQ, anyway. (The NAND
 224  * controller transferring data between its internal buffer to/from the NAND
 225  * chip.)
 226  *
 227  * Therefore, using the PL080 DMA is disabled by default, for now.
 228  *
 229  */
 230 static int use_dma;
 231 
 232 static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
 233 {
 234         uint32_t clkrate, tmp;
 235 
 236         /* Reset MLC controller */
 237         writel(MLCCMD_RESET, MLC_CMD(host->io_base));
 238         udelay(1000);
 239 
 240         /* Get base clock for MLC block */
 241         clkrate = clk_get_rate(host->clk);
 242         if (clkrate == 0)
 243                 clkrate = 104000000;
 244 
 245         /* Unlock MLC_ICR
 246          * (among others, will be locked again automatically) */
 247         writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
 248 
 249         /* Configure MLC Controller: Large Block, 5 Byte Address */
 250         tmp = MLCICR_LARGEBLOCK | MLCICR_LONGADDR;
 251         writel(tmp, MLC_ICR(host->io_base));
 252 
 253         /* Unlock MLC_TIME_REG
 254          * (among others, will be locked again automatically) */
 255         writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
 256 
 257         /* Compute clock setup values, see LPC and NAND manual */
 258         tmp = 0;
 259         tmp |= MLCTIMEREG_TCEA_DELAY(clkrate / host->ncfg->tcea_delay + 1);
 260         tmp |= MLCTIMEREG_BUSY_DELAY(clkrate / host->ncfg->busy_delay + 1);
 261         tmp |= MLCTIMEREG_NAND_TA(clkrate / host->ncfg->nand_ta + 1);
 262         tmp |= MLCTIMEREG_RD_HIGH(clkrate / host->ncfg->rd_high + 1);
 263         tmp |= MLCTIMEREG_RD_LOW(clkrate / host->ncfg->rd_low);
 264         tmp |= MLCTIMEREG_WR_HIGH(clkrate / host->ncfg->wr_high + 1);
 265         tmp |= MLCTIMEREG_WR_LOW(clkrate / host->ncfg->wr_low);
 266         writel(tmp, MLC_TIME_REG(host->io_base));
 267 
 268         /* Enable IRQ for CONTROLLER_READY and NAND_READY */
 269         writeb(MLCIRQ_CONTROLLER_READY | MLCIRQ_NAND_READY,
 270                         MLC_IRQ_MR(host->io_base));
 271 
 272         /* Normal nCE operation: nCE controlled by controller */
 273         writel(MLCCEH_NORMAL, MLC_CEH(host->io_base));
 274 }
 275 
 276 /*
 277  * Hardware specific access to control lines
 278  */
 279 static void lpc32xx_nand_cmd_ctrl(struct nand_chip *nand_chip, int cmd,
 280                                   unsigned int ctrl)
 281 {
 282         struct lpc32xx_nand_host *host = nand_get_controller_data(nand_chip);
 283 
 284         if (cmd != NAND_CMD_NONE) {
 285                 if (ctrl & NAND_CLE)
 286                         writel(cmd, MLC_CMD(host->io_base));
 287                 else
 288                         writel(cmd, MLC_ADDR(host->io_base));
 289         }
 290 }
 291 
 292 /*
 293  * Read Device Ready (NAND device _and_ controller ready)
 294  */
 295 static int lpc32xx_nand_device_ready(struct nand_chip *nand_chip)
 296 {
 297         struct lpc32xx_nand_host *host = nand_get_controller_data(nand_chip);
 298 
 299         if ((readb(MLC_ISR(host->io_base)) &
 300              (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY)) ==
 301             (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY))
 302                 return  1;
 303 
 304         return 0;
 305 }
 306 
 307 static irqreturn_t lpc3xxx_nand_irq(int irq, struct lpc32xx_nand_host *host)
 308 {
 309         uint8_t sr;
 310 
 311         /* Clear interrupt flag by reading status */
 312         sr = readb(MLC_IRQ_SR(host->io_base));
 313         if (sr & MLCIRQ_NAND_READY)
 314                 complete(&host->comp_nand);
 315         if (sr & MLCIRQ_CONTROLLER_READY)
 316                 complete(&host->comp_controller);
 317 
 318         return IRQ_HANDLED;
 319 }
 320 
 321 static int lpc32xx_waitfunc_nand(struct nand_chip *chip)
 322 {
 323         struct mtd_info *mtd = nand_to_mtd(chip);
 324         struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
 325 
 326         if (readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)
 327                 goto exit;
 328 
 329         wait_for_completion(&host->comp_nand);
 330 
 331         while (!(readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)) {
 332                 /* Seems to be delayed sometimes by controller */
 333                 dev_dbg(&mtd->dev, "Warning: NAND not ready.\n");
 334                 cpu_relax();
 335         }
 336 
 337 exit:
 338         return NAND_STATUS_READY;
 339 }
 340 
 341 static int lpc32xx_waitfunc_controller(struct nand_chip *chip)
 342 {
 343         struct mtd_info *mtd = nand_to_mtd(chip);
 344         struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
 345 
 346         if (readb(MLC_ISR(host->io_base)) & MLCISR_CONTROLLER_READY)
 347                 goto exit;
 348 
 349         wait_for_completion(&host->comp_controller);
 350 
 351         while (!(readb(MLC_ISR(host->io_base)) &
 352                  MLCISR_CONTROLLER_READY)) {
 353                 dev_dbg(&mtd->dev, "Warning: Controller not ready.\n");
 354                 cpu_relax();
 355         }
 356 
 357 exit:
 358         return NAND_STATUS_READY;
 359 }
 360 
 361 static int lpc32xx_waitfunc(struct nand_chip *chip)
 362 {
 363         lpc32xx_waitfunc_nand(chip);
 364         lpc32xx_waitfunc_controller(chip);
 365 
 366         return NAND_STATUS_READY;
 367 }
 368 
 369 /*
 370  * Enable NAND write protect
 371  */
 372 static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host)
 373 {
 374         if (gpio_is_valid(host->ncfg->wp_gpio))
 375                 gpio_set_value(host->ncfg->wp_gpio, 0);
 376 }
 377 
 378 /*
 379  * Disable NAND write protect
 380  */
 381 static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host)
 382 {
 383         if (gpio_is_valid(host->ncfg->wp_gpio))
 384                 gpio_set_value(host->ncfg->wp_gpio, 1);
 385 }
 386 
 387 static void lpc32xx_dma_complete_func(void *completion)
 388 {
 389         complete(completion);
 390 }
 391 
 392 static int lpc32xx_xmit_dma(struct mtd_info *mtd, void *mem, int len,
 393                             enum dma_transfer_direction dir)
 394 {
 395         struct nand_chip *chip = mtd_to_nand(mtd);
 396         struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
 397         struct dma_async_tx_descriptor *desc;
 398         int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
 399         int res;
 400 
 401         sg_init_one(&host->sgl, mem, len);
 402 
 403         res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
 404                          DMA_BIDIRECTIONAL);
 405         if (res != 1) {
 406                 dev_err(mtd->dev.parent, "Failed to map sg list\n");
 407                 return -ENXIO;
 408         }
 409         desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
 410                                        flags);
 411         if (!desc) {
 412                 dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
 413                 goto out1;
 414         }
 415 
 416         init_completion(&host->comp_dma);
 417         desc->callback = lpc32xx_dma_complete_func;
 418         desc->callback_param = &host->comp_dma;
 419 
 420         dmaengine_submit(desc);
 421         dma_async_issue_pending(host->dma_chan);
 422 
 423         wait_for_completion_timeout(&host->comp_dma, msecs_to_jiffies(1000));
 424 
 425         dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
 426                      DMA_BIDIRECTIONAL);
 427         return 0;
 428 out1:
 429         dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
 430                      DMA_BIDIRECTIONAL);
 431         return -ENXIO;
 432 }
 433 
 434 static int lpc32xx_read_page(struct nand_chip *chip, uint8_t *buf,
 435                              int oob_required, int page)
 436 {
 437         struct mtd_info *mtd = nand_to_mtd(chip);
 438         struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
 439         int i, j;
 440         uint8_t *oobbuf = chip->oob_poi;
 441         uint32_t mlc_isr;
 442         int res;
 443         uint8_t *dma_buf;
 444         bool dma_mapped;
 445 
 446         if ((void *)buf <= high_memory) {
 447                 dma_buf = buf;
 448                 dma_mapped = true;
 449         } else {
 450                 dma_buf = host->dma_buf;
 451                 dma_mapped = false;
 452         }
 453 
 454         /* Writing Command and Address */
 455         nand_read_page_op(chip, page, 0, NULL, 0);
 456 
 457         /* For all sub-pages */
 458         for (i = 0; i < host->mlcsubpages; i++) {
 459                 /* Start Auto Decode Command */
 460                 writeb(0x00, MLC_ECC_AUTO_DEC_REG(host->io_base));
 461 
 462                 /* Wait for Controller Ready */
 463                 lpc32xx_waitfunc_controller(chip);
 464 
 465                 /* Check ECC Error status */
 466                 mlc_isr = readl(MLC_ISR(host->io_base));
 467                 if (mlc_isr & MLCISR_DECODER_FAILURE) {
 468                         mtd->ecc_stats.failed++;
 469                         dev_warn(&mtd->dev, "%s: DECODER_FAILURE\n", __func__);
 470                 } else if (mlc_isr & MLCISR_ERRORS_DETECTED) {
 471                         mtd->ecc_stats.corrected += ((mlc_isr >> 4) & 0x3) + 1;
 472                 }
 473 
 474                 /* Read 512 + 16 Bytes */
 475                 if (use_dma) {
 476                         res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
 477                                                DMA_DEV_TO_MEM);
 478                         if (res)
 479                                 return res;
 480                 } else {
 481                         for (j = 0; j < (512 >> 2); j++) {
 482                                 *((uint32_t *)(buf)) =
 483                                         readl(MLC_BUFF(host->io_base));
 484                                 buf += 4;
 485                         }
 486                 }
 487                 for (j = 0; j < (16 >> 2); j++) {
 488                         *((uint32_t *)(oobbuf)) =
 489                                 readl(MLC_BUFF(host->io_base));
 490                         oobbuf += 4;
 491                 }
 492         }
 493 
 494         if (use_dma && !dma_mapped)
 495                 memcpy(buf, dma_buf, mtd->writesize);
 496 
 497         return 0;
 498 }
 499 
 500 static int lpc32xx_write_page_lowlevel(struct nand_chip *chip,
 501                                        const uint8_t *buf, int oob_required,
 502                                        int page)
 503 {
 504         struct mtd_info *mtd = nand_to_mtd(chip);
 505         struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
 506         const uint8_t *oobbuf = chip->oob_poi;
 507         uint8_t *dma_buf = (uint8_t *)buf;
 508         int res;
 509         int i, j;
 510 
 511         if (use_dma && (void *)buf >= high_memory) {
 512                 dma_buf = host->dma_buf;
 513                 memcpy(dma_buf, buf, mtd->writesize);
 514         }
 515 
 516         nand_prog_page_begin_op(chip, page, 0, NULL, 0);
 517 
 518         for (i = 0; i < host->mlcsubpages; i++) {
 519                 /* Start Encode */
 520                 writeb(0x00, MLC_ECC_ENC_REG(host->io_base));
 521 
 522                 /* Write 512 + 6 Bytes to Buffer */
 523                 if (use_dma) {
 524                         res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
 525                                                DMA_MEM_TO_DEV);
 526                         if (res)
 527                                 return res;
 528                 } else {
 529                         for (j = 0; j < (512 >> 2); j++) {
 530                                 writel(*((uint32_t *)(buf)),
 531                                        MLC_BUFF(host->io_base));
 532                                 buf += 4;
 533                         }
 534                 }
 535                 writel(*((uint32_t *)(oobbuf)), MLC_BUFF(host->io_base));
 536                 oobbuf += 4;
 537                 writew(*((uint16_t *)(oobbuf)), MLC_BUFF(host->io_base));
 538                 oobbuf += 12;
 539 
 540                 /* Auto Encode w/ Bit 8 = 0 (see LPC MLC Controller manual) */
 541                 writeb(0x00, MLC_ECC_AUTO_ENC_REG(host->io_base));
 542 
 543                 /* Wait for Controller Ready */
 544                 lpc32xx_waitfunc_controller(chip);
 545         }
 546 
 547         return nand_prog_page_end_op(chip);
 548 }
 549 
 550 static int lpc32xx_read_oob(struct nand_chip *chip, int page)
 551 {
 552         struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
 553 
 554         /* Read whole page - necessary with MLC controller! */
 555         lpc32xx_read_page(chip, host->dummy_buf, 1, page);
 556 
 557         return 0;
 558 }
 559 
 560 static int lpc32xx_write_oob(struct nand_chip *chip, int page)
 561 {
 562         /* None, write_oob conflicts with the automatic LPC MLC ECC decoder! */
 563         return 0;
 564 }
 565 
 566 /* Prepares MLC for transfers with H/W ECC enabled: always enabled anyway */
 567 static void lpc32xx_ecc_enable(struct nand_chip *chip, int mode)
 568 {
 569         /* Always enabled! */
 570 }
 571 
 572 static int lpc32xx_dma_setup(struct lpc32xx_nand_host *host)
 573 {
 574         struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
 575         dma_cap_mask_t mask;
 576 
 577         if (!host->pdata || !host->pdata->dma_filter) {
 578                 dev_err(mtd->dev.parent, "no DMA platform data\n");
 579                 return -ENOENT;
 580         }
 581 
 582         dma_cap_zero(mask);
 583         dma_cap_set(DMA_SLAVE, mask);
 584         host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter,
 585                                              "nand-mlc");
 586         if (!host->dma_chan) {
 587                 dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
 588                 return -EBUSY;
 589         }
 590 
 591         /*
 592          * Set direction to a sensible value even if the dmaengine driver
 593          * should ignore it. With the default (DMA_MEM_TO_MEM), the amba-pl08x
 594          * driver criticizes it as "alien transfer direction".
 595          */
 596         host->dma_slave_config.direction = DMA_DEV_TO_MEM;
 597         host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 598         host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 599         host->dma_slave_config.src_maxburst = 128;
 600         host->dma_slave_config.dst_maxburst = 128;
 601         /* DMA controller does flow control: */
 602         host->dma_slave_config.device_fc = false;
 603         host->dma_slave_config.src_addr = MLC_BUFF(host->io_base_phy);
 604         host->dma_slave_config.dst_addr = MLC_BUFF(host->io_base_phy);
 605         if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
 606                 dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
 607                 goto out1;
 608         }
 609 
 610         return 0;
 611 out1:
 612         dma_release_channel(host->dma_chan);
 613         return -ENXIO;
 614 }
 615 
 616 static struct lpc32xx_nand_cfg_mlc *lpc32xx_parse_dt(struct device *dev)
 617 {
 618         struct lpc32xx_nand_cfg_mlc *ncfg;
 619         struct device_node *np = dev->of_node;
 620 
 621         ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
 622         if (!ncfg)
 623                 return NULL;
 624 
 625         of_property_read_u32(np, "nxp,tcea-delay", &ncfg->tcea_delay);
 626         of_property_read_u32(np, "nxp,busy-delay", &ncfg->busy_delay);
 627         of_property_read_u32(np, "nxp,nand-ta", &ncfg->nand_ta);
 628         of_property_read_u32(np, "nxp,rd-high", &ncfg->rd_high);
 629         of_property_read_u32(np, "nxp,rd-low", &ncfg->rd_low);
 630         of_property_read_u32(np, "nxp,wr-high", &ncfg->wr_high);
 631         of_property_read_u32(np, "nxp,wr-low", &ncfg->wr_low);
 632 
 633         if (!ncfg->tcea_delay || !ncfg->busy_delay || !ncfg->nand_ta ||
 634             !ncfg->rd_high || !ncfg->rd_low || !ncfg->wr_high ||
 635             !ncfg->wr_low) {
 636                 dev_err(dev, "chip parameters not specified correctly\n");
 637                 return NULL;
 638         }
 639 
 640         ncfg->wp_gpio = of_get_named_gpio(np, "gpios", 0);
 641 
 642         return ncfg;
 643 }
 644 
 645 static int lpc32xx_nand_attach_chip(struct nand_chip *chip)
 646 {
 647         struct mtd_info *mtd = nand_to_mtd(chip);
 648         struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
 649         struct device *dev = &host->pdev->dev;
 650 
 651         host->dma_buf = devm_kzalloc(dev, mtd->writesize, GFP_KERNEL);
 652         if (!host->dma_buf)
 653                 return -ENOMEM;
 654 
 655         host->dummy_buf = devm_kzalloc(dev, mtd->writesize, GFP_KERNEL);
 656         if (!host->dummy_buf)
 657                 return -ENOMEM;
 658 
 659         chip->ecc.mode = NAND_ECC_HW;
 660         chip->ecc.size = 512;
 661         mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops);
 662         host->mlcsubpages = mtd->writesize / 512;
 663 
 664         return 0;
 665 }
 666 
 667 static const struct nand_controller_ops lpc32xx_nand_controller_ops = {
 668         .attach_chip = lpc32xx_nand_attach_chip,
 669 };
 670 
 671 /*
 672  * Probe for NAND controller
 673  */
 674 static int lpc32xx_nand_probe(struct platform_device *pdev)
 675 {
 676         struct lpc32xx_nand_host *host;
 677         struct mtd_info *mtd;
 678         struct nand_chip *nand_chip;
 679         struct resource *rc;
 680         int res;
 681 
 682         /* Allocate memory for the device structure (and zero it) */
 683         host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
 684         if (!host)
 685                 return -ENOMEM;
 686 
 687         host->pdev = pdev;
 688 
 689         rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 690         host->io_base = devm_ioremap_resource(&pdev->dev, rc);
 691         if (IS_ERR(host->io_base))
 692                 return PTR_ERR(host->io_base);
 693 
 694         host->io_base_phy = rc->start;
 695 
 696         nand_chip = &host->nand_chip;
 697         mtd = nand_to_mtd(nand_chip);
 698         if (pdev->dev.of_node)
 699                 host->ncfg = lpc32xx_parse_dt(&pdev->dev);
 700         if (!host->ncfg) {
 701                 dev_err(&pdev->dev,
 702                         "Missing or bad NAND config from device tree\n");
 703                 return -ENOENT;
 704         }
 705         if (host->ncfg->wp_gpio == -EPROBE_DEFER)
 706                 return -EPROBE_DEFER;
 707         if (gpio_is_valid(host->ncfg->wp_gpio) &&
 708                         gpio_request(host->ncfg->wp_gpio, "NAND WP")) {
 709                 dev_err(&pdev->dev, "GPIO not available\n");
 710                 return -EBUSY;
 711         }
 712         lpc32xx_wp_disable(host);
 713 
 714         host->pdata = dev_get_platdata(&pdev->dev);
 715 
 716         /* link the private data structures */
 717         nand_set_controller_data(nand_chip, host);
 718         nand_set_flash_node(nand_chip, pdev->dev.of_node);
 719         mtd->dev.parent = &pdev->dev;
 720 
 721         /* Get NAND clock */
 722         host->clk = clk_get(&pdev->dev, NULL);
 723         if (IS_ERR(host->clk)) {
 724                 dev_err(&pdev->dev, "Clock initialization failure\n");
 725                 res = -ENOENT;
 726                 goto free_gpio;
 727         }
 728         res = clk_prepare_enable(host->clk);
 729         if (res)
 730                 goto put_clk;
 731 
 732         nand_chip->legacy.cmd_ctrl = lpc32xx_nand_cmd_ctrl;
 733         nand_chip->legacy.dev_ready = lpc32xx_nand_device_ready;
 734         nand_chip->legacy.chip_delay = 25; /* us */
 735         nand_chip->legacy.IO_ADDR_R = MLC_DATA(host->io_base);
 736         nand_chip->legacy.IO_ADDR_W = MLC_DATA(host->io_base);
 737 
 738         /* Init NAND controller */
 739         lpc32xx_nand_setup(host);
 740 
 741         platform_set_drvdata(pdev, host);
 742 
 743         /* Initialize function pointers */
 744         nand_chip->ecc.hwctl = lpc32xx_ecc_enable;
 745         nand_chip->ecc.read_page_raw = lpc32xx_read_page;
 746         nand_chip->ecc.read_page = lpc32xx_read_page;
 747         nand_chip->ecc.write_page_raw = lpc32xx_write_page_lowlevel;
 748         nand_chip->ecc.write_page = lpc32xx_write_page_lowlevel;
 749         nand_chip->ecc.write_oob = lpc32xx_write_oob;
 750         nand_chip->ecc.read_oob = lpc32xx_read_oob;
 751         nand_chip->ecc.strength = 4;
 752         nand_chip->ecc.bytes = 10;
 753         nand_chip->legacy.waitfunc = lpc32xx_waitfunc;
 754 
 755         nand_chip->options = NAND_NO_SUBPAGE_WRITE;
 756         nand_chip->bbt_options = NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
 757         nand_chip->bbt_td = &lpc32xx_nand_bbt;
 758         nand_chip->bbt_md = &lpc32xx_nand_bbt_mirror;
 759 
 760         if (use_dma) {
 761                 res = lpc32xx_dma_setup(host);
 762                 if (res) {
 763                         res = -EIO;
 764                         goto unprepare_clk;
 765                 }
 766         }
 767 
 768         /* initially clear interrupt status */
 769         readb(MLC_IRQ_SR(host->io_base));
 770 
 771         init_completion(&host->comp_nand);
 772         init_completion(&host->comp_controller);
 773 
 774         host->irq = platform_get_irq(pdev, 0);
 775         if (host->irq < 0) {
 776                 dev_err(&pdev->dev, "failed to get platform irq\n");
 777                 res = -EINVAL;
 778                 goto release_dma_chan;
 779         }
 780 
 781         if (request_irq(host->irq, (irq_handler_t)&lpc3xxx_nand_irq,
 782                         IRQF_TRIGGER_HIGH, DRV_NAME, host)) {
 783                 dev_err(&pdev->dev, "Error requesting NAND IRQ\n");
 784                 res = -ENXIO;
 785                 goto release_dma_chan;
 786         }
 787 
 788         /*
 789          * Scan to find existence of the device and get the type of NAND device:
 790          * SMALL block or LARGE block.
 791          */
 792         nand_chip->legacy.dummy_controller.ops = &lpc32xx_nand_controller_ops;
 793         res = nand_scan(nand_chip, 1);
 794         if (res)
 795                 goto free_irq;
 796 
 797         mtd->name = DRV_NAME;
 798 
 799         res = mtd_device_register(mtd, host->ncfg->parts,
 800                                   host->ncfg->num_parts);
 801         if (res)
 802                 goto cleanup_nand;
 803 
 804         return 0;
 805 
 806 cleanup_nand:
 807         nand_cleanup(nand_chip);
 808 free_irq:
 809         free_irq(host->irq, host);
 810 release_dma_chan:
 811         if (use_dma)
 812                 dma_release_channel(host->dma_chan);
 813 unprepare_clk:
 814         clk_disable_unprepare(host->clk);
 815 put_clk:
 816         clk_put(host->clk);
 817 free_gpio:
 818         lpc32xx_wp_enable(host);
 819         gpio_free(host->ncfg->wp_gpio);
 820 
 821         return res;
 822 }
 823 
 824 /*
 825  * Remove NAND device
 826  */
 827 static int lpc32xx_nand_remove(struct platform_device *pdev)
 828 {
 829         struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
 830 
 831         nand_release(&host->nand_chip);
 832         free_irq(host->irq, host);
 833         if (use_dma)
 834                 dma_release_channel(host->dma_chan);
 835 
 836         clk_disable_unprepare(host->clk);
 837         clk_put(host->clk);
 838 
 839         lpc32xx_wp_enable(host);
 840         gpio_free(host->ncfg->wp_gpio);
 841 
 842         return 0;
 843 }
 844 
 845 #ifdef CONFIG_PM
 846 static int lpc32xx_nand_resume(struct platform_device *pdev)
 847 {
 848         struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
 849         int ret;
 850 
 851         /* Re-enable NAND clock */
 852         ret = clk_prepare_enable(host->clk);
 853         if (ret)
 854                 return ret;
 855 
 856         /* Fresh init of NAND controller */
 857         lpc32xx_nand_setup(host);
 858 
 859         /* Disable write protect */
 860         lpc32xx_wp_disable(host);
 861 
 862         return 0;
 863 }
 864 
 865 static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
 866 {
 867         struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
 868 
 869         /* Enable write protect for safety */
 870         lpc32xx_wp_enable(host);
 871 
 872         /* Disable clock */
 873         clk_disable_unprepare(host->clk);
 874         return 0;
 875 }
 876 
 877 #else
 878 #define lpc32xx_nand_resume NULL
 879 #define lpc32xx_nand_suspend NULL
 880 #endif
 881 
 882 static const struct of_device_id lpc32xx_nand_match[] = {
 883         { .compatible = "nxp,lpc3220-mlc" },
 884         { /* sentinel */ },
 885 };
 886 MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
 887 
 888 static struct platform_driver lpc32xx_nand_driver = {
 889         .probe          = lpc32xx_nand_probe,
 890         .remove         = lpc32xx_nand_remove,
 891         .resume         = lpc32xx_nand_resume,
 892         .suspend        = lpc32xx_nand_suspend,
 893         .driver         = {
 894                 .name   = DRV_NAME,
 895                 .of_match_table = lpc32xx_nand_match,
 896         },
 897 };
 898 
 899 module_platform_driver(lpc32xx_nand_driver);
 900 
 901 MODULE_LICENSE("GPL");
 902 MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
 903 MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX MLC controller");

/* [<][>][^][v][top][bottom][index][help] */