root/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. clear_poll_bit
  2. gpmi_reset_block
  3. __gpmi_enable_clk
  4. gpmi_init
  5. gpmi_dump_info
  6. gpmi_check_ecc
  7. set_geometry_by_ecc_info
  8. get_ecc_strength
  9. legacy_set_geometry
  10. common_nfc_set_geometry
  11. bch_set_geometry
  12. gpmi_nfc_compute_timings
  13. gpmi_nfc_apply_timings
  14. gpmi_setup_data_interface
  15. gpmi_clear_bch
  16. get_dma_chan
  17. dma_irq_callback
  18. bch_irq
  19. gpmi_raw_len_to_len
  20. prepare_data_dma
  21. gpmi_copy_bits
  22. gpmi_ooblayout_ecc
  23. gpmi_ooblayout_free
  24. acquire_register_block
  25. acquire_bch_irq
  26. release_dma_channels
  27. acquire_dma_channels
  28. gpmi_get_clks
  29. acquire_resources
  30. release_resources
  31. gpmi_free_dma_buffer
  32. gpmi_alloc_dma_buffer
  33. block_mark_swapping
  34. gpmi_count_bitflips
  35. gpmi_bch_layout_std
  36. gpmi_ecc_read_page
  37. gpmi_ecc_read_subpage
  38. gpmi_ecc_write_page
  39. gpmi_ecc_read_oob
  40. gpmi_ecc_write_oob
  41. gpmi_ecc_read_page_raw
  42. gpmi_ecc_write_page_raw
  43. gpmi_ecc_read_oob_raw
  44. gpmi_ecc_write_oob_raw
  45. gpmi_block_markbad
  46. nand_boot_set_geometry
  47. mx23_check_transcription_stamp
  48. mx23_write_transcription_stamp
  49. mx23_boot_init
  50. nand_boot_init
  51. gpmi_set_geometry
  52. gpmi_init_last
  53. gpmi_nand_attach_chip
  54. get_next_transfer
  55. gpmi_chain_command
  56. gpmi_chain_wait_ready
  57. gpmi_chain_data_read
  58. gpmi_chain_data_write
  59. gpmi_nfc_exec_op
  60. gpmi_nand_init
  61. gpmi_nand_probe
  62. gpmi_nand_remove
  63. gpmi_pm_suspend
  64. gpmi_pm_resume
  65. gpmi_runtime_suspend
  66. gpmi_runtime_resume

   1 // SPDX-License-Identifier: GPL-2.0+
   2 /*
   3  * Freescale GPMI NAND Flash Driver
   4  *
   5  * Copyright (C) 2010-2015 Freescale Semiconductor, Inc.
   6  * Copyright (C) 2008 Embedded Alley Solutions, Inc.
   7  */
   8 #include <linux/clk.h>
   9 #include <linux/delay.h>
  10 #include <linux/slab.h>
  11 #include <linux/sched/task_stack.h>
  12 #include <linux/interrupt.h>
  13 #include <linux/module.h>
  14 #include <linux/mtd/partitions.h>
  15 #include <linux/of.h>
  16 #include <linux/of_device.h>
  17 #include <linux/pm_runtime.h>
  18 #include <linux/dma/mxs-dma.h>
  19 #include "gpmi-nand.h"
  20 #include "gpmi-regs.h"
  21 #include "bch-regs.h"
  22 
  23 /* Resource names for the GPMI NAND driver. */
  24 #define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME  "gpmi-nand"
  25 #define GPMI_NAND_BCH_REGS_ADDR_RES_NAME   "bch"
  26 #define GPMI_NAND_BCH_INTERRUPT_RES_NAME   "bch"
  27 
  28 /* Converts time to clock cycles */
  29 #define TO_CYCLES(duration, period) DIV_ROUND_UP_ULL(duration, period)
  30 
  31 #define MXS_SET_ADDR            0x4
  32 #define MXS_CLR_ADDR            0x8
  33 /*
  34  * Clear the bit and poll it cleared.  This is usually called with
  35  * a reset address and mask being either SFTRST(bit 31) or CLKGATE
  36  * (bit 30).
  37  */
  38 static int clear_poll_bit(void __iomem *addr, u32 mask)
  39 {
  40         int timeout = 0x400;
  41 
  42         /* clear the bit */
  43         writel(mask, addr + MXS_CLR_ADDR);
  44 
  45         /*
  46          * SFTRST needs 3 GPMI clocks to settle, the reference manual
  47          * recommends to wait 1us.
  48          */
  49         udelay(1);
  50 
  51         /* poll the bit becoming clear */
  52         while ((readl(addr) & mask) && --timeout)
  53                 /* nothing */;
  54 
  55         return !timeout;
  56 }
  57 
  58 #define MODULE_CLKGATE          (1 << 30)
  59 #define MODULE_SFTRST           (1 << 31)
  60 /*
  61  * The current mxs_reset_block() will do two things:
  62  *  [1] enable the module.
  63  *  [2] reset the module.
  64  *
  65  * In most of the cases, it's ok.
  66  * But in MX23, there is a hardware bug in the BCH block (see erratum #2847).
  67  * If you try to soft reset the BCH block, it becomes unusable until
  68  * the next hard reset. This case occurs in the NAND boot mode. When the board
  69  * boots by NAND, the ROM of the chip will initialize the BCH blocks itself.
  70  * So If the driver tries to reset the BCH again, the BCH will not work anymore.
  71  * You will see a DMA timeout in this case. The bug has been fixed
  72  * in the following chips, such as MX28.
  73  *
  74  * To avoid this bug, just add a new parameter `just_enable` for
  75  * the mxs_reset_block(), and rewrite it here.
  76  */
  77 static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
  78 {
  79         int ret;
  80         int timeout = 0x400;
  81 
  82         /* clear and poll SFTRST */
  83         ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
  84         if (unlikely(ret))
  85                 goto error;
  86 
  87         /* clear CLKGATE */
  88         writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
  89 
  90         if (!just_enable) {
  91                 /* set SFTRST to reset the block */
  92                 writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR);
  93                 udelay(1);
  94 
  95                 /* poll CLKGATE becoming set */
  96                 while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout)
  97                         /* nothing */;
  98                 if (unlikely(!timeout))
  99                         goto error;
 100         }
 101 
 102         /* clear and poll SFTRST */
 103         ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
 104         if (unlikely(ret))
 105                 goto error;
 106 
 107         /* clear and poll CLKGATE */
 108         ret = clear_poll_bit(reset_addr, MODULE_CLKGATE);
 109         if (unlikely(ret))
 110                 goto error;
 111 
 112         return 0;
 113 
 114 error:
 115         pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
 116         return -ETIMEDOUT;
 117 }
 118 
 119 static int __gpmi_enable_clk(struct gpmi_nand_data *this, bool v)
 120 {
 121         struct clk *clk;
 122         int ret;
 123         int i;
 124 
 125         for (i = 0; i < GPMI_CLK_MAX; i++) {
 126                 clk = this->resources.clock[i];
 127                 if (!clk)
 128                         break;
 129 
 130                 if (v) {
 131                         ret = clk_prepare_enable(clk);
 132                         if (ret)
 133                                 goto err_clk;
 134                 } else {
 135                         clk_disable_unprepare(clk);
 136                 }
 137         }
 138         return 0;
 139 
 140 err_clk:
 141         for (; i > 0; i--)
 142                 clk_disable_unprepare(this->resources.clock[i - 1]);
 143         return ret;
 144 }
 145 
 146 static int gpmi_init(struct gpmi_nand_data *this)
 147 {
 148         struct resources *r = &this->resources;
 149         int ret;
 150 
 151         ret = pm_runtime_get_sync(this->dev);
 152         if (ret < 0)
 153                 return ret;
 154 
 155         ret = gpmi_reset_block(r->gpmi_regs, false);
 156         if (ret)
 157                 goto err_out;
 158 
 159         /*
 160          * Reset BCH here, too. We got failures otherwise :(
 161          * See later BCH reset for explanation of MX23 and MX28 handling
 162          */
 163         ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
 164         if (ret)
 165                 goto err_out;
 166 
 167         /* Choose NAND mode. */
 168         writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
 169 
 170         /* Set the IRQ polarity. */
 171         writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY,
 172                                 r->gpmi_regs + HW_GPMI_CTRL1_SET);
 173 
 174         /* Disable Write-Protection. */
 175         writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET);
 176 
 177         /* Select BCH ECC. */
 178         writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
 179 
 180         /*
 181          * Decouple the chip select from dma channel. We use dma0 for all
 182          * the chips.
 183          */
 184         writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET);
 185 
 186 err_out:
 187         pm_runtime_mark_last_busy(this->dev);
 188         pm_runtime_put_autosuspend(this->dev);
 189         return ret;
 190 }
 191 
 192 /* This function is very useful. It is called only when the bug occur. */
 193 static void gpmi_dump_info(struct gpmi_nand_data *this)
 194 {
 195         struct resources *r = &this->resources;
 196         struct bch_geometry *geo = &this->bch_geometry;
 197         u32 reg;
 198         int i;
 199 
 200         dev_err(this->dev, "Show GPMI registers :\n");
 201         for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {
 202                 reg = readl(r->gpmi_regs + i * 0x10);
 203                 dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
 204         }
 205 
 206         /* start to print out the BCH info */
 207         dev_err(this->dev, "Show BCH registers :\n");
 208         for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) {
 209                 reg = readl(r->bch_regs + i * 0x10);
 210                 dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
 211         }
 212         dev_err(this->dev, "BCH Geometry :\n"
 213                 "GF length              : %u\n"
 214                 "ECC Strength           : %u\n"
 215                 "Page Size in Bytes     : %u\n"
 216                 "Metadata Size in Bytes : %u\n"
 217                 "ECC Chunk Size in Bytes: %u\n"
 218                 "ECC Chunk Count        : %u\n"
 219                 "Payload Size in Bytes  : %u\n"
 220                 "Auxiliary Size in Bytes: %u\n"
 221                 "Auxiliary Status Offset: %u\n"
 222                 "Block Mark Byte Offset : %u\n"
 223                 "Block Mark Bit Offset  : %u\n",
 224                 geo->gf_len,
 225                 geo->ecc_strength,
 226                 geo->page_size,
 227                 geo->metadata_size,
 228                 geo->ecc_chunk_size,
 229                 geo->ecc_chunk_count,
 230                 geo->payload_size,
 231                 geo->auxiliary_size,
 232                 geo->auxiliary_status_offset,
 233                 geo->block_mark_byte_offset,
 234                 geo->block_mark_bit_offset);
 235 }
 236 
 237 static inline bool gpmi_check_ecc(struct gpmi_nand_data *this)
 238 {
 239         struct bch_geometry *geo = &this->bch_geometry;
 240 
 241         /* Do the sanity check. */
 242         if (GPMI_IS_MXS(this)) {
 243                 /* The mx23/mx28 only support the GF13. */
 244                 if (geo->gf_len == 14)
 245                         return false;
 246         }
 247         return geo->ecc_strength <= this->devdata->bch_max_ecc_strength;
 248 }
 249 
 250 /*
 251  * If we can get the ECC information from the nand chip, we do not
 252  * need to calculate them ourselves.
 253  *
 254  * We may have available oob space in this case.
 255  */
 256 static int set_geometry_by_ecc_info(struct gpmi_nand_data *this,
 257                                     unsigned int ecc_strength,
 258                                     unsigned int ecc_step)
 259 {
 260         struct bch_geometry *geo = &this->bch_geometry;
 261         struct nand_chip *chip = &this->nand;
 262         struct mtd_info *mtd = nand_to_mtd(chip);
 263         unsigned int block_mark_bit_offset;
 264 
 265         switch (ecc_step) {
 266         case SZ_512:
 267                 geo->gf_len = 13;
 268                 break;
 269         case SZ_1K:
 270                 geo->gf_len = 14;
 271                 break;
 272         default:
 273                 dev_err(this->dev,
 274                         "unsupported nand chip. ecc bits : %d, ecc size : %d\n",
 275                         chip->base.eccreq.strength,
 276                         chip->base.eccreq.step_size);
 277                 return -EINVAL;
 278         }
 279         geo->ecc_chunk_size = ecc_step;
 280         geo->ecc_strength = round_up(ecc_strength, 2);
 281         if (!gpmi_check_ecc(this))
 282                 return -EINVAL;
 283 
 284         /* Keep the C >= O */
 285         if (geo->ecc_chunk_size < mtd->oobsize) {
 286                 dev_err(this->dev,
 287                         "unsupported nand chip. ecc size: %d, oob size : %d\n",
 288                         ecc_step, mtd->oobsize);
 289                 return -EINVAL;
 290         }
 291 
 292         /* The default value, see comment in the legacy_set_geometry(). */
 293         geo->metadata_size = 10;
 294 
 295         geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
 296 
 297         /*
 298          * Now, the NAND chip with 2K page(data chunk is 512byte) shows below:
 299          *
 300          *    |                          P                            |
 301          *    |<----------------------------------------------------->|
 302          *    |                                                       |
 303          *    |                                        (Block Mark)   |
 304          *    |                      P'                      |      | |     |
 305          *    |<-------------------------------------------->|  D   | |  O' |
 306          *    |                                              |<---->| |<--->|
 307          *    V                                              V      V V     V
 308          *    +---+----------+-+----------+-+----------+-+----------+-+-----+
 309          *    | M |   data   |E|   data   |E|   data   |E|   data   |E|     |
 310          *    +---+----------+-+----------+-+----------+-+----------+-+-----+
 311          *                                                   ^              ^
 312          *                                                   |      O       |
 313          *                                                   |<------------>|
 314          *                                                   |              |
 315          *
 316          *      P : the page size for BCH module.
 317          *      E : The ECC strength.
 318          *      G : the length of Galois Field.
 319          *      N : The chunk count of per page.
 320          *      M : the metasize of per page.
 321          *      C : the ecc chunk size, aka the "data" above.
 322          *      P': the nand chip's page size.
 323          *      O : the nand chip's oob size.
 324          *      O': the free oob.
 325          *
 326          *      The formula for P is :
 327          *
 328          *                  E * G * N
 329          *             P = ------------ + P' + M
 330          *                      8
 331          *
 332          * The position of block mark moves forward in the ECC-based view
 333          * of page, and the delta is:
 334          *
 335          *                   E * G * (N - 1)
 336          *             D = (---------------- + M)
 337          *                          8
 338          *
 339          * Please see the comment in legacy_set_geometry().
 340          * With the condition C >= O , we still can get same result.
 341          * So the bit position of the physical block mark within the ECC-based
 342          * view of the page is :
 343          *             (P' - D) * 8
 344          */
 345         geo->page_size = mtd->writesize + geo->metadata_size +
 346                 (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
 347 
 348         geo->payload_size = mtd->writesize;
 349 
 350         geo->auxiliary_status_offset = ALIGN(geo->metadata_size, 4);
 351         geo->auxiliary_size = ALIGN(geo->metadata_size, 4)
 352                                 + ALIGN(geo->ecc_chunk_count, 4);
 353 
 354         if (!this->swap_block_mark)
 355                 return 0;
 356 
 357         /* For bit swap. */
 358         block_mark_bit_offset = mtd->writesize * 8 -
 359                 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
 360                                 + geo->metadata_size * 8);
 361 
 362         geo->block_mark_byte_offset = block_mark_bit_offset / 8;
 363         geo->block_mark_bit_offset  = block_mark_bit_offset % 8;
 364         return 0;
 365 }
 366 
 367 /*
 368  *  Calculate the ECC strength by hand:
 369  *      E : The ECC strength.
 370  *      G : the length of Galois Field.
 371  *      N : The chunk count of per page.
 372  *      O : the oobsize of the NAND chip.
 373  *      M : the metasize of per page.
 374  *
 375  *      The formula is :
 376  *              E * G * N
 377  *            ------------ <= (O - M)
 378  *                  8
 379  *
 380  *      So, we get E by:
 381  *                    (O - M) * 8
 382  *              E <= -------------
 383  *                       G * N
 384  */
 385 static inline int get_ecc_strength(struct gpmi_nand_data *this)
 386 {
 387         struct bch_geometry *geo = &this->bch_geometry;
 388         struct mtd_info *mtd = nand_to_mtd(&this->nand);
 389         int ecc_strength;
 390 
 391         ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8)
 392                         / (geo->gf_len * geo->ecc_chunk_count);
 393 
 394         /* We need the minor even number. */
 395         return round_down(ecc_strength, 2);
 396 }
 397 
 398 static int legacy_set_geometry(struct gpmi_nand_data *this)
 399 {
 400         struct bch_geometry *geo = &this->bch_geometry;
 401         struct mtd_info *mtd = nand_to_mtd(&this->nand);
 402         unsigned int metadata_size;
 403         unsigned int status_size;
 404         unsigned int block_mark_bit_offset;
 405 
 406         /*
 407          * The size of the metadata can be changed, though we set it to 10
 408          * bytes now. But it can't be too large, because we have to save
 409          * enough space for BCH.
 410          */
 411         geo->metadata_size = 10;
 412 
 413         /* The default for the length of Galois Field. */
 414         geo->gf_len = 13;
 415 
 416         /* The default for chunk size. */
 417         geo->ecc_chunk_size = 512;
 418         while (geo->ecc_chunk_size < mtd->oobsize) {
 419                 geo->ecc_chunk_size *= 2; /* keep C >= O */
 420                 geo->gf_len = 14;
 421         }
 422 
 423         geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
 424 
 425         /* We use the same ECC strength for all chunks. */
 426         geo->ecc_strength = get_ecc_strength(this);
 427         if (!gpmi_check_ecc(this)) {
 428                 dev_err(this->dev,
 429                         "ecc strength: %d cannot be supported by the controller (%d)\n"
 430                         "try to use minimum ecc strength that NAND chip required\n",
 431                         geo->ecc_strength,
 432                         this->devdata->bch_max_ecc_strength);
 433                 return -EINVAL;
 434         }
 435 
 436         geo->page_size = mtd->writesize + geo->metadata_size +
 437                 (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
 438         geo->payload_size = mtd->writesize;
 439 
 440         /*
 441          * The auxiliary buffer contains the metadata and the ECC status. The
 442          * metadata is padded to the nearest 32-bit boundary. The ECC status
 443          * contains one byte for every ECC chunk, and is also padded to the
 444          * nearest 32-bit boundary.
 445          */
 446         metadata_size = ALIGN(geo->metadata_size, 4);
 447         status_size   = ALIGN(geo->ecc_chunk_count, 4);
 448 
 449         geo->auxiliary_size = metadata_size + status_size;
 450         geo->auxiliary_status_offset = metadata_size;
 451 
 452         if (!this->swap_block_mark)
 453                 return 0;
 454 
 455         /*
 456          * We need to compute the byte and bit offsets of
 457          * the physical block mark within the ECC-based view of the page.
 458          *
 459          * NAND chip with 2K page shows below:
 460          *                                             (Block Mark)
 461          *                                                   |      |
 462          *                                                   |  D   |
 463          *                                                   |<---->|
 464          *                                                   V      V
 465          *    +---+----------+-+----------+-+----------+-+----------+-+
 466          *    | M |   data   |E|   data   |E|   data   |E|   data   |E|
 467          *    +---+----------+-+----------+-+----------+-+----------+-+
 468          *
 469          * The position of block mark moves forward in the ECC-based view
 470          * of page, and the delta is:
 471          *
 472          *                   E * G * (N - 1)
 473          *             D = (---------------- + M)
 474          *                          8
 475          *
 476          * With the formula to compute the ECC strength, and the condition
 477          *       : C >= O         (C is the ecc chunk size)
 478          *
 479          * It's easy to deduce to the following result:
 480          *
 481          *         E * G       (O - M)      C - M         C - M
 482          *      ----------- <= ------- <=  --------  <  ---------
 483          *           8            N           N          (N - 1)
 484          *
 485          *  So, we get:
 486          *
 487          *                   E * G * (N - 1)
 488          *             D = (---------------- + M) < C
 489          *                          8
 490          *
 491          *  The above inequality means the position of block mark
 492          *  within the ECC-based view of the page is still in the data chunk,
 493          *  and it's NOT in the ECC bits of the chunk.
 494          *
 495          *  Use the following to compute the bit position of the
 496          *  physical block mark within the ECC-based view of the page:
 497          *          (page_size - D) * 8
 498          *
 499          *  --Huang Shijie
 500          */
 501         block_mark_bit_offset = mtd->writesize * 8 -
 502                 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
 503                                 + geo->metadata_size * 8);
 504 
 505         geo->block_mark_byte_offset = block_mark_bit_offset / 8;
 506         geo->block_mark_bit_offset  = block_mark_bit_offset % 8;
 507         return 0;
 508 }
 509 
 510 static int common_nfc_set_geometry(struct gpmi_nand_data *this)
 511 {
 512         struct nand_chip *chip = &this->nand;
 513 
 514         if (chip->ecc.strength > 0 && chip->ecc.size > 0)
 515                 return set_geometry_by_ecc_info(this, chip->ecc.strength,
 516                                                 chip->ecc.size);
 517 
 518         if ((of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc"))
 519                                 || legacy_set_geometry(this)) {
 520                 if (!(chip->base.eccreq.strength > 0 &&
 521                       chip->base.eccreq.step_size > 0))
 522                         return -EINVAL;
 523 
 524                 return set_geometry_by_ecc_info(this,
 525                                                 chip->base.eccreq.strength,
 526                                                 chip->base.eccreq.step_size);
 527         }
 528 
 529         return 0;
 530 }
 531 
 532 /* Configures the geometry for BCH.  */
 533 static int bch_set_geometry(struct gpmi_nand_data *this)
 534 {
 535         struct resources *r = &this->resources;
 536         int ret;
 537 
 538         ret = common_nfc_set_geometry(this);
 539         if (ret)
 540                 return ret;
 541 
 542         ret = pm_runtime_get_sync(this->dev);
 543         if (ret < 0)
 544                 return ret;
 545 
 546         /*
 547         * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
 548         * chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
 549         * and MX28.
 550         */
 551         ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
 552         if (ret)
 553                 goto err_out;
 554 
 555         /* Set *all* chip selects to use layout 0. */
 556         writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT);
 557 
 558         ret = 0;
 559 err_out:
 560         pm_runtime_mark_last_busy(this->dev);
 561         pm_runtime_put_autosuspend(this->dev);
 562 
 563         return ret;
 564 }
 565 
 566 /*
 567  * <1> Firstly, we should know what's the GPMI-clock means.
 568  *     The GPMI-clock is the internal clock in the gpmi nand controller.
 569  *     If you set 100MHz to gpmi nand controller, the GPMI-clock's period
 570  *     is 10ns. Mark the GPMI-clock's period as GPMI-clock-period.
 571  *
 572  * <2> Secondly, we should know what's the frequency on the nand chip pins.
 573  *     The frequency on the nand chip pins is derived from the GPMI-clock.
 574  *     We can get it from the following equation:
 575  *
 576  *         F = G / (DS + DH)
 577  *
 578  *         F  : the frequency on the nand chip pins.
 579  *         G  : the GPMI clock, such as 100MHz.
 580  *         DS : GPMI_HW_GPMI_TIMING0:DATA_SETUP
 581  *         DH : GPMI_HW_GPMI_TIMING0:DATA_HOLD
 582  *
 583  * <3> Thirdly, when the frequency on the nand chip pins is above 33MHz,
 584  *     the nand EDO(extended Data Out) timing could be applied.
 585  *     The GPMI implements a feedback read strobe to sample the read data.
 586  *     The feedback read strobe can be delayed to support the nand EDO timing
 587  *     where the read strobe may deasserts before the read data is valid, and
 588  *     read data is valid for some time after read strobe.
 589  *
 590  *     The following figure illustrates some aspects of a NAND Flash read:
 591  *
 592  *                   |<---tREA---->|
 593  *                   |             |
 594  *                   |         |   |
 595  *                   |<--tRP-->|   |
 596  *                   |         |   |
 597  *                  __          ___|__________________________________
 598  *     RDN            \________/   |
 599  *                                 |
 600  *                                 /---------\
 601  *     Read Data    --------------<           >---------
 602  *                                 \---------/
 603  *                                |     |
 604  *                                |<-D->|
 605  *     FeedbackRDN  ________             ____________
 606  *                          \___________/
 607  *
 608  *          D stands for delay, set in the HW_GPMI_CTRL1:RDN_DELAY.
 609  *
 610  *
 611  * <4> Now, we begin to describe how to compute the right RDN_DELAY.
 612  *
 613  *  4.1) From the aspect of the nand chip pins:
 614  *        Delay = (tREA + C - tRP)               {1}
 615  *
 616  *        tREA : the maximum read access time.
 617  *        C    : a constant to adjust the delay. default is 4000ps.
 618  *        tRP  : the read pulse width, which is exactly:
 619  *                   tRP = (GPMI-clock-period) * DATA_SETUP
 620  *
 621  *  4.2) From the aspect of the GPMI nand controller:
 622  *         Delay = RDN_DELAY * 0.125 * RP        {2}
 623  *
 624  *         RP   : the DLL reference period.
 625  *            if (GPMI-clock-period > DLL_THRETHOLD)
 626  *                   RP = GPMI-clock-period / 2;
 627  *            else
 628  *                   RP = GPMI-clock-period;
 629  *
 630  *            Set the HW_GPMI_CTRL1:HALF_PERIOD if GPMI-clock-period
 631  *            is greater DLL_THRETHOLD. In other SOCs, the DLL_THRETHOLD
 632  *            is 16000ps, but in mx6q, we use 12000ps.
 633  *
 634  *  4.3) since {1} equals {2}, we get:
 635  *
 636  *                     (tREA + 4000 - tRP) * 8
 637  *         RDN_DELAY = -----------------------     {3}
 638  *                           RP
 639  */
 640 static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
 641                                      const struct nand_sdr_timings *sdr)
 642 {
 643         struct gpmi_nfc_hardware_timing *hw = &this->hw;
 644         unsigned int dll_threshold_ps = this->devdata->max_chain_delay;
 645         unsigned int period_ps, reference_period_ps;
 646         unsigned int data_setup_cycles, data_hold_cycles, addr_setup_cycles;
 647         unsigned int tRP_ps;
 648         bool use_half_period;
 649         int sample_delay_ps, sample_delay_factor;
 650         u16 busy_timeout_cycles;
 651         u8 wrn_dly_sel;
 652 
 653         if (sdr->tRC_min >= 30000) {
 654                 /* ONFI non-EDO modes [0-3] */
 655                 hw->clk_rate = 22000000;
 656                 wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS;
 657         } else if (sdr->tRC_min >= 25000) {
 658                 /* ONFI EDO mode 4 */
 659                 hw->clk_rate = 80000000;
 660                 wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
 661         } else {
 662                 /* ONFI EDO mode 5 */
 663                 hw->clk_rate = 100000000;
 664                 wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
 665         }
 666 
 667         /* SDR core timings are given in picoseconds */
 668         period_ps = div_u64((u64)NSEC_PER_SEC * 1000, hw->clk_rate);
 669 
 670         addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps);
 671         data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps);
 672         data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps);
 673         busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps);
 674 
 675         hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) |
 676                       BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) |
 677                       BF_GPMI_TIMING0_DATA_SETUP(data_setup_cycles);
 678         hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(busy_timeout_cycles * 4096);
 679 
 680         /*
 681          * Derive NFC ideal delay from {3}:
 682          *
 683          *                     (tREA + 4000 - tRP) * 8
 684          *         RDN_DELAY = -----------------------
 685          *                                RP
 686          */
 687         if (period_ps > dll_threshold_ps) {
 688                 use_half_period = true;
 689                 reference_period_ps = period_ps / 2;
 690         } else {
 691                 use_half_period = false;
 692                 reference_period_ps = period_ps;
 693         }
 694 
 695         tRP_ps = data_setup_cycles * period_ps;
 696         sample_delay_ps = (sdr->tREA_max + 4000 - tRP_ps) * 8;
 697         if (sample_delay_ps > 0)
 698                 sample_delay_factor = sample_delay_ps / reference_period_ps;
 699         else
 700                 sample_delay_factor = 0;
 701 
 702         hw->ctrl1n = BF_GPMI_CTRL1_WRN_DLY_SEL(wrn_dly_sel);
 703         if (sample_delay_factor)
 704                 hw->ctrl1n |= BF_GPMI_CTRL1_RDN_DELAY(sample_delay_factor) |
 705                               BM_GPMI_CTRL1_DLL_ENABLE |
 706                               (use_half_period ? BM_GPMI_CTRL1_HALF_PERIOD : 0);
 707 }
 708 
 709 static void gpmi_nfc_apply_timings(struct gpmi_nand_data *this)
 710 {
 711         struct gpmi_nfc_hardware_timing *hw = &this->hw;
 712         struct resources *r = &this->resources;
 713         void __iomem *gpmi_regs = r->gpmi_regs;
 714         unsigned int dll_wait_time_us;
 715 
 716         clk_set_rate(r->clock[0], hw->clk_rate);
 717 
 718         writel(hw->timing0, gpmi_regs + HW_GPMI_TIMING0);
 719         writel(hw->timing1, gpmi_regs + HW_GPMI_TIMING1);
 720 
 721         /*
 722          * Clear several CTRL1 fields, DLL must be disabled when setting
 723          * RDN_DELAY or HALF_PERIOD.
 724          */
 725         writel(BM_GPMI_CTRL1_CLEAR_MASK, gpmi_regs + HW_GPMI_CTRL1_CLR);
 726         writel(hw->ctrl1n, gpmi_regs + HW_GPMI_CTRL1_SET);
 727 
 728         /* Wait 64 clock cycles before using the GPMI after enabling the DLL */
 729         dll_wait_time_us = USEC_PER_SEC / hw->clk_rate * 64;
 730         if (!dll_wait_time_us)
 731                 dll_wait_time_us = 1;
 732 
 733         /* Wait for the DLL to settle. */
 734         udelay(dll_wait_time_us);
 735 }
 736 
 737 static int gpmi_setup_data_interface(struct nand_chip *chip, int chipnr,
 738                                      const struct nand_data_interface *conf)
 739 {
 740         struct gpmi_nand_data *this = nand_get_controller_data(chip);
 741         const struct nand_sdr_timings *sdr;
 742 
 743         /* Retrieve required NAND timings */
 744         sdr = nand_get_sdr_timings(conf);
 745         if (IS_ERR(sdr))
 746                 return PTR_ERR(sdr);
 747 
 748         /* Only MX6 GPMI controller can reach EDO timings */
 749         if (sdr->tRC_min <= 25000 && !GPMI_IS_MX6(this))
 750                 return -ENOTSUPP;
 751 
 752         /* Stop here if this call was just a check */
 753         if (chipnr < 0)
 754                 return 0;
 755 
 756         /* Do the actual derivation of the controller timings */
 757         gpmi_nfc_compute_timings(this, sdr);
 758 
 759         this->hw.must_apply_timings = true;
 760 
 761         return 0;
 762 }
 763 
 764 /* Clears a BCH interrupt. */
 765 static void gpmi_clear_bch(struct gpmi_nand_data *this)
 766 {
 767         struct resources *r = &this->resources;
 768         writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR);
 769 }
 770 
 771 static struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
 772 {
 773         /* We use the DMA channel 0 to access all the nand chips. */
 774         return this->dma_chans[0];
 775 }
 776 
 777 /* This will be called after the DMA operation is finished. */
 778 static void dma_irq_callback(void *param)
 779 {
 780         struct gpmi_nand_data *this = param;
 781         struct completion *dma_c = &this->dma_done;
 782 
 783         complete(dma_c);
 784 }
 785 
 786 static irqreturn_t bch_irq(int irq, void *cookie)
 787 {
 788         struct gpmi_nand_data *this = cookie;
 789 
 790         gpmi_clear_bch(this);
 791         complete(&this->bch_done);
 792         return IRQ_HANDLED;
 793 }
 794 
 795 static int gpmi_raw_len_to_len(struct gpmi_nand_data *this, int raw_len)
 796 {
 797         /*
 798          * raw_len is the length to read/write including bch data which
 799          * we are passed in exec_op. Calculate the data length from it.
 800          */
 801         if (this->bch)
 802                 return ALIGN_DOWN(raw_len, this->bch_geometry.ecc_chunk_size);
 803         else
 804                 return raw_len;
 805 }
 806 
 807 /* Can we use the upper's buffer directly for DMA? */
 808 static bool prepare_data_dma(struct gpmi_nand_data *this, const void *buf,
 809                              int raw_len, struct scatterlist *sgl,
 810                              enum dma_data_direction dr)
 811 {
 812         int ret;
 813         int len = gpmi_raw_len_to_len(this, raw_len);
 814 
 815         /* first try to map the upper buffer directly */
 816         if (virt_addr_valid(buf) && !object_is_on_stack(buf)) {
 817                 sg_init_one(sgl, buf, len);
 818                 ret = dma_map_sg(this->dev, sgl, 1, dr);
 819                 if (ret == 0)
 820                         goto map_fail;
 821 
 822                 return true;
 823         }
 824 
 825 map_fail:
 826         /* We have to use our own DMA buffer. */
 827         sg_init_one(sgl, this->data_buffer_dma, len);
 828 
 829         if (dr == DMA_TO_DEVICE && buf != this->data_buffer_dma)
 830                 memcpy(this->data_buffer_dma, buf, len);
 831 
 832         dma_map_sg(this->dev, sgl, 1, dr);
 833 
 834         return false;
 835 }
 836 
 837 /**
 838  * gpmi_copy_bits - copy bits from one memory region to another
 839  * @dst: destination buffer
 840  * @dst_bit_off: bit offset we're starting to write at
 841  * @src: source buffer
 842  * @src_bit_off: bit offset we're starting to read from
 843  * @nbits: number of bits to copy
 844  *
 845  * This functions copies bits from one memory region to another, and is used by
 846  * the GPMI driver to copy ECC sections which are not guaranteed to be byte
 847  * aligned.
 848  *
 849  * src and dst should not overlap.
 850  *
 851  */
 852 static void gpmi_copy_bits(u8 *dst, size_t dst_bit_off, const u8 *src,
 853                            size_t src_bit_off, size_t nbits)
 854 {
 855         size_t i;
 856         size_t nbytes;
 857         u32 src_buffer = 0;
 858         size_t bits_in_src_buffer = 0;
 859 
 860         if (!nbits)
 861                 return;
 862 
 863         /*
 864          * Move src and dst pointers to the closest byte pointer and store bit
 865          * offsets within a byte.
 866          */
 867         src += src_bit_off / 8;
 868         src_bit_off %= 8;
 869 
 870         dst += dst_bit_off / 8;
 871         dst_bit_off %= 8;
 872 
 873         /*
 874          * Initialize the src_buffer value with bits available in the first
 875          * byte of data so that we end up with a byte aligned src pointer.
 876          */
 877         if (src_bit_off) {
 878                 src_buffer = src[0] >> src_bit_off;
 879                 if (nbits >= (8 - src_bit_off)) {
 880                         bits_in_src_buffer += 8 - src_bit_off;
 881                 } else {
 882                         src_buffer &= GENMASK(nbits - 1, 0);
 883                         bits_in_src_buffer += nbits;
 884                 }
 885                 nbits -= bits_in_src_buffer;
 886                 src++;
 887         }
 888 
 889         /* Calculate the number of bytes that can be copied from src to dst. */
 890         nbytes = nbits / 8;
 891 
 892         /* Try to align dst to a byte boundary. */
 893         if (dst_bit_off) {
 894                 if (bits_in_src_buffer < (8 - dst_bit_off) && nbytes) {
 895                         src_buffer |= src[0] << bits_in_src_buffer;
 896                         bits_in_src_buffer += 8;
 897                         src++;
 898                         nbytes--;
 899                 }
 900 
 901                 if (bits_in_src_buffer >= (8 - dst_bit_off)) {
 902                         dst[0] &= GENMASK(dst_bit_off - 1, 0);
 903                         dst[0] |= src_buffer << dst_bit_off;
 904                         src_buffer >>= (8 - dst_bit_off);
 905                         bits_in_src_buffer -= (8 - dst_bit_off);
 906                         dst_bit_off = 0;
 907                         dst++;
 908                         if (bits_in_src_buffer > 7) {
 909                                 bits_in_src_buffer -= 8;
 910                                 dst[0] = src_buffer;
 911                                 dst++;
 912                                 src_buffer >>= 8;
 913                         }
 914                 }
 915         }
 916 
 917         if (!bits_in_src_buffer && !dst_bit_off) {
 918                 /*
 919                  * Both src and dst pointers are byte aligned, thus we can
 920                  * just use the optimized memcpy function.
 921                  */
 922                 if (nbytes)
 923                         memcpy(dst, src, nbytes);
 924         } else {
 925                 /*
 926                  * src buffer is not byte aligned, hence we have to copy each
 927                  * src byte to the src_buffer variable before extracting a byte
 928                  * to store in dst.
 929                  */
 930                 for (i = 0; i < nbytes; i++) {
 931                         src_buffer |= src[i] << bits_in_src_buffer;
 932                         dst[i] = src_buffer;
 933                         src_buffer >>= 8;
 934                 }
 935         }
 936         /* Update dst and src pointers */
 937         dst += nbytes;
 938         src += nbytes;
 939 
 940         /*
 941          * nbits is the number of remaining bits. It should not exceed 8 as
 942          * we've already copied as much bytes as possible.
 943          */
 944         nbits %= 8;
 945 
 946         /*
 947          * If there's no more bits to copy to the destination and src buffer
 948          * was already byte aligned, then we're done.
 949          */
 950         if (!nbits && !bits_in_src_buffer)
 951                 return;
 952 
 953         /* Copy the remaining bits to src_buffer */
 954         if (nbits)
 955                 src_buffer |= (*src & GENMASK(nbits - 1, 0)) <<
 956                               bits_in_src_buffer;
 957         bits_in_src_buffer += nbits;
 958 
 959         /*
 960          * In case there were not enough bits to get a byte aligned dst buffer
 961          * prepare the src_buffer variable to match the dst organization (shift
 962          * src_buffer by dst_bit_off and retrieve the least significant bits
 963          * from dst).
 964          */
 965         if (dst_bit_off)
 966                 src_buffer = (src_buffer << dst_bit_off) |
 967                              (*dst & GENMASK(dst_bit_off - 1, 0));
 968         bits_in_src_buffer += dst_bit_off;
 969 
 970         /*
 971          * Keep most significant bits from dst if we end up with an unaligned
 972          * number of bits.
 973          */
 974         nbytes = bits_in_src_buffer / 8;
 975         if (bits_in_src_buffer % 8) {
 976                 src_buffer |= (dst[nbytes] &
 977                                GENMASK(7, bits_in_src_buffer % 8)) <<
 978                               (nbytes * 8);
 979                 nbytes++;
 980         }
 981 
 982         /* Copy the remaining bytes to dst */
 983         for (i = 0; i < nbytes; i++) {
 984                 dst[i] = src_buffer;
 985                 src_buffer >>= 8;
 986         }
 987 }
 988 
 989 /* add our owner bbt descriptor */
 990 static uint8_t scan_ff_pattern[] = { 0xff };
 991 static struct nand_bbt_descr gpmi_bbt_descr = {
 992         .options        = 0,
 993         .offs           = 0,
 994         .len            = 1,
 995         .pattern        = scan_ff_pattern
 996 };
 997 
 998 /*
 999  * We may change the layout if we can get the ECC info from the datasheet,
1000  * else we will use all the (page + OOB).
1001  */
1002 static int gpmi_ooblayout_ecc(struct mtd_info *mtd, int section,
1003                               struct mtd_oob_region *oobregion)
1004 {
1005         struct nand_chip *chip = mtd_to_nand(mtd);
1006         struct gpmi_nand_data *this = nand_get_controller_data(chip);
1007         struct bch_geometry *geo = &this->bch_geometry;
1008 
1009         if (section)
1010                 return -ERANGE;
1011 
1012         oobregion->offset = 0;
1013         oobregion->length = geo->page_size - mtd->writesize;
1014 
1015         return 0;
1016 }
1017 
1018 static int gpmi_ooblayout_free(struct mtd_info *mtd, int section,
1019                                struct mtd_oob_region *oobregion)
1020 {
1021         struct nand_chip *chip = mtd_to_nand(mtd);
1022         struct gpmi_nand_data *this = nand_get_controller_data(chip);
1023         struct bch_geometry *geo = &this->bch_geometry;
1024 
1025         if (section)
1026                 return -ERANGE;
1027 
1028         /* The available oob size we have. */
1029         if (geo->page_size < mtd->writesize + mtd->oobsize) {
1030                 oobregion->offset = geo->page_size - mtd->writesize;
1031                 oobregion->length = mtd->oobsize - oobregion->offset;
1032         }
1033 
1034         return 0;
1035 }
1036 
1037 static const char * const gpmi_clks_for_mx2x[] = {
1038         "gpmi_io",
1039 };
1040 
1041 static const struct mtd_ooblayout_ops gpmi_ooblayout_ops = {
1042         .ecc = gpmi_ooblayout_ecc,
1043         .free = gpmi_ooblayout_free,
1044 };
1045 
1046 static const struct gpmi_devdata gpmi_devdata_imx23 = {
1047         .type = IS_MX23,
1048         .bch_max_ecc_strength = 20,
1049         .max_chain_delay = 16000,
1050         .clks = gpmi_clks_for_mx2x,
1051         .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
1052 };
1053 
1054 static const struct gpmi_devdata gpmi_devdata_imx28 = {
1055         .type = IS_MX28,
1056         .bch_max_ecc_strength = 20,
1057         .max_chain_delay = 16000,
1058         .clks = gpmi_clks_for_mx2x,
1059         .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
1060 };
1061 
1062 static const char * const gpmi_clks_for_mx6[] = {
1063         "gpmi_io", "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
1064 };
1065 
1066 static const struct gpmi_devdata gpmi_devdata_imx6q = {
1067         .type = IS_MX6Q,
1068         .bch_max_ecc_strength = 40,
1069         .max_chain_delay = 12000,
1070         .clks = gpmi_clks_for_mx6,
1071         .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
1072 };
1073 
1074 static const struct gpmi_devdata gpmi_devdata_imx6sx = {
1075         .type = IS_MX6SX,
1076         .bch_max_ecc_strength = 62,
1077         .max_chain_delay = 12000,
1078         .clks = gpmi_clks_for_mx6,
1079         .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
1080 };
1081 
1082 static const char * const gpmi_clks_for_mx7d[] = {
1083         "gpmi_io", "gpmi_bch_apb",
1084 };
1085 
1086 static const struct gpmi_devdata gpmi_devdata_imx7d = {
1087         .type = IS_MX7D,
1088         .bch_max_ecc_strength = 62,
1089         .max_chain_delay = 12000,
1090         .clks = gpmi_clks_for_mx7d,
1091         .clks_count = ARRAY_SIZE(gpmi_clks_for_mx7d),
1092 };
1093 
1094 static int acquire_register_block(struct gpmi_nand_data *this,
1095                                   const char *res_name)
1096 {
1097         struct platform_device *pdev = this->pdev;
1098         struct resources *res = &this->resources;
1099         struct resource *r;
1100         void __iomem *p;
1101 
1102         r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
1103         p = devm_ioremap_resource(&pdev->dev, r);
1104         if (IS_ERR(p))
1105                 return PTR_ERR(p);
1106 
1107         if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME))
1108                 res->gpmi_regs = p;
1109         else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME))
1110                 res->bch_regs = p;
1111         else
1112                 dev_err(this->dev, "unknown resource name : %s\n", res_name);
1113 
1114         return 0;
1115 }
1116 
1117 static int acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
1118 {
1119         struct platform_device *pdev = this->pdev;
1120         const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME;
1121         struct resource *r;
1122         int err;
1123 
1124         r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name);
1125         if (!r) {
1126                 dev_err(this->dev, "Can't get resource for %s\n", res_name);
1127                 return -ENODEV;
1128         }
1129 
1130         err = devm_request_irq(this->dev, r->start, irq_h, 0, res_name, this);
1131         if (err)
1132                 dev_err(this->dev, "error requesting BCH IRQ\n");
1133 
1134         return err;
1135 }
1136 
1137 static void release_dma_channels(struct gpmi_nand_data *this)
1138 {
1139         unsigned int i;
1140         for (i = 0; i < DMA_CHANS; i++)
1141                 if (this->dma_chans[i]) {
1142                         dma_release_channel(this->dma_chans[i]);
1143                         this->dma_chans[i] = NULL;
1144                 }
1145 }
1146 
1147 static int acquire_dma_channels(struct gpmi_nand_data *this)
1148 {
1149         struct platform_device *pdev = this->pdev;
1150         struct dma_chan *dma_chan;
1151 
1152         /* request dma channel */
1153         dma_chan = dma_request_slave_channel(&pdev->dev, "rx-tx");
1154         if (!dma_chan) {
1155                 dev_err(this->dev, "Failed to request DMA channel.\n");
1156                 goto acquire_err;
1157         }
1158 
1159         this->dma_chans[0] = dma_chan;
1160         return 0;
1161 
1162 acquire_err:
1163         release_dma_channels(this);
1164         return -EINVAL;
1165 }
1166 
1167 static int gpmi_get_clks(struct gpmi_nand_data *this)
1168 {
1169         struct resources *r = &this->resources;
1170         struct clk *clk;
1171         int err, i;
1172 
1173         for (i = 0; i < this->devdata->clks_count; i++) {
1174                 clk = devm_clk_get(this->dev, this->devdata->clks[i]);
1175                 if (IS_ERR(clk)) {
1176                         err = PTR_ERR(clk);
1177                         goto err_clock;
1178                 }
1179 
1180                 r->clock[i] = clk;
1181         }
1182 
1183         if (GPMI_IS_MX6(this))
1184                 /*
1185                  * Set the default value for the gpmi clock.
1186                  *
1187                  * If you want to use the ONFI nand which is in the
1188                  * Synchronous Mode, you should change the clock as you need.
1189                  */
1190                 clk_set_rate(r->clock[0], 22000000);
1191 
1192         return 0;
1193 
1194 err_clock:
1195         dev_dbg(this->dev, "failed in finding the clocks.\n");
1196         return err;
1197 }
1198 
1199 static int acquire_resources(struct gpmi_nand_data *this)
1200 {
1201         int ret;
1202 
1203         ret = acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME);
1204         if (ret)
1205                 goto exit_regs;
1206 
1207         ret = acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME);
1208         if (ret)
1209                 goto exit_regs;
1210 
1211         ret = acquire_bch_irq(this, bch_irq);
1212         if (ret)
1213                 goto exit_regs;
1214 
1215         ret = acquire_dma_channels(this);
1216         if (ret)
1217                 goto exit_regs;
1218 
1219         ret = gpmi_get_clks(this);
1220         if (ret)
1221                 goto exit_clock;
1222         return 0;
1223 
1224 exit_clock:
1225         release_dma_channels(this);
1226 exit_regs:
1227         return ret;
1228 }
1229 
1230 static void release_resources(struct gpmi_nand_data *this)
1231 {
1232         release_dma_channels(this);
1233 }
1234 
1235 static void gpmi_free_dma_buffer(struct gpmi_nand_data *this)
1236 {
1237         struct device *dev = this->dev;
1238         struct bch_geometry *geo = &this->bch_geometry;
1239 
1240         if (this->auxiliary_virt && virt_addr_valid(this->auxiliary_virt))
1241                 dma_free_coherent(dev, geo->auxiliary_size,
1242                                         this->auxiliary_virt,
1243                                         this->auxiliary_phys);
1244         kfree(this->data_buffer_dma);
1245         kfree(this->raw_buffer);
1246 
1247         this->data_buffer_dma   = NULL;
1248         this->raw_buffer        = NULL;
1249 }
1250 
1251 /* Allocate the DMA buffers */
1252 static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
1253 {
1254         struct bch_geometry *geo = &this->bch_geometry;
1255         struct device *dev = this->dev;
1256         struct mtd_info *mtd = nand_to_mtd(&this->nand);
1257 
1258         /*
1259          * [2] Allocate a read/write data buffer.
1260          *     The gpmi_alloc_dma_buffer can be called twice.
1261          *     We allocate a PAGE_SIZE length buffer if gpmi_alloc_dma_buffer
1262          *     is called before the NAND identification; and we allocate a
1263          *     buffer of the real NAND page size when the gpmi_alloc_dma_buffer
1264          *     is called after.
1265          */
1266         this->data_buffer_dma = kzalloc(mtd->writesize ?: PAGE_SIZE,
1267                                         GFP_DMA | GFP_KERNEL);
1268         if (this->data_buffer_dma == NULL)
1269                 goto error_alloc;
1270 
1271         this->auxiliary_virt = dma_alloc_coherent(dev, geo->auxiliary_size,
1272                                         &this->auxiliary_phys, GFP_DMA);
1273         if (!this->auxiliary_virt)
1274                 goto error_alloc;
1275 
1276         this->raw_buffer = kzalloc((mtd->writesize ?: PAGE_SIZE) + mtd->oobsize, GFP_KERNEL);
1277         if (!this->raw_buffer)
1278                 goto error_alloc;
1279 
1280         return 0;
1281 
1282 error_alloc:
1283         gpmi_free_dma_buffer(this);
1284         return -ENOMEM;
1285 }
1286 
1287 /*
1288  * Handles block mark swapping.
1289  * It can be called in swapping the block mark, or swapping it back,
1290  * because the the operations are the same.
1291  */
1292 static void block_mark_swapping(struct gpmi_nand_data *this,
1293                                 void *payload, void *auxiliary)
1294 {
1295         struct bch_geometry *nfc_geo = &this->bch_geometry;
1296         unsigned char *p;
1297         unsigned char *a;
1298         unsigned int  bit;
1299         unsigned char mask;
1300         unsigned char from_data;
1301         unsigned char from_oob;
1302 
1303         if (!this->swap_block_mark)
1304                 return;
1305 
1306         /*
1307          * If control arrives here, we're swapping. Make some convenience
1308          * variables.
1309          */
1310         bit = nfc_geo->block_mark_bit_offset;
1311         p   = payload + nfc_geo->block_mark_byte_offset;
1312         a   = auxiliary;
1313 
1314         /*
1315          * Get the byte from the data area that overlays the block mark. Since
1316          * the ECC engine applies its own view to the bits in the page, the
1317          * physical block mark won't (in general) appear on a byte boundary in
1318          * the data.
1319          */
1320         from_data = (p[0] >> bit) | (p[1] << (8 - bit));
1321 
1322         /* Get the byte from the OOB. */
1323         from_oob = a[0];
1324 
1325         /* Swap them. */
1326         a[0] = from_data;
1327 
1328         mask = (0x1 << bit) - 1;
1329         p[0] = (p[0] & mask) | (from_oob << bit);
1330 
1331         mask = ~0 << bit;
1332         p[1] = (p[1] & mask) | (from_oob >> (8 - bit));
1333 }
1334 
1335 static int gpmi_count_bitflips(struct nand_chip *chip, void *buf, int first,
1336                                int last, int meta)
1337 {
1338         struct gpmi_nand_data *this = nand_get_controller_data(chip);
1339         struct bch_geometry *nfc_geo = &this->bch_geometry;
1340         struct mtd_info *mtd = nand_to_mtd(chip);
1341         int i;
1342         unsigned char *status;
1343         unsigned int max_bitflips = 0;
1344 
1345         /* Loop over status bytes, accumulating ECC status. */
1346         status = this->auxiliary_virt + ALIGN(meta, 4);
1347 
1348         for (i = first; i < last; i++, status++) {
1349                 if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
1350                         continue;
1351 
1352                 if (*status == STATUS_UNCORRECTABLE) {
1353                         int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
1354                         u8 *eccbuf = this->raw_buffer;
1355                         int offset, bitoffset;
1356                         int eccbytes;
1357                         int flips;
1358 
1359                         /* Read ECC bytes into our internal raw_buffer */
1360                         offset = nfc_geo->metadata_size * 8;
1361                         offset += ((8 * nfc_geo->ecc_chunk_size) + eccbits) * (i + 1);
1362                         offset -= eccbits;
1363                         bitoffset = offset % 8;
1364                         eccbytes = DIV_ROUND_UP(offset + eccbits, 8);
1365                         offset /= 8;
1366                         eccbytes -= offset;
1367                         nand_change_read_column_op(chip, offset, eccbuf,
1368                                                    eccbytes, false);
1369 
1370                         /*
1371                          * ECC data are not byte aligned and we may have
1372                          * in-band data in the first and last byte of
1373                          * eccbuf. Set non-eccbits to one so that
1374                          * nand_check_erased_ecc_chunk() does not count them
1375                          * as bitflips.
1376                          */
1377                         if (bitoffset)
1378                                 eccbuf[0] |= GENMASK(bitoffset - 1, 0);
1379 
1380                         bitoffset = (bitoffset + eccbits) % 8;
1381                         if (bitoffset)
1382                                 eccbuf[eccbytes - 1] |= GENMASK(7, bitoffset);
1383 
1384                         /*
1385                          * The ECC hardware has an uncorrectable ECC status
1386                          * code in case we have bitflips in an erased page. As
1387                          * nothing was written into this subpage the ECC is
1388                          * obviously wrong and we can not trust it. We assume
1389                          * at this point that we are reading an erased page and
1390                          * try to correct the bitflips in buffer up to
1391                          * ecc_strength bitflips. If this is a page with random
1392                          * data, we exceed this number of bitflips and have a
1393                          * ECC failure. Otherwise we use the corrected buffer.
1394                          */
1395                         if (i == 0) {
1396                                 /* The first block includes metadata */
1397                                 flips = nand_check_erased_ecc_chunk(
1398                                                 buf + i * nfc_geo->ecc_chunk_size,
1399                                                 nfc_geo->ecc_chunk_size,
1400                                                 eccbuf, eccbytes,
1401                                                 this->auxiliary_virt,
1402                                                 nfc_geo->metadata_size,
1403                                                 nfc_geo->ecc_strength);
1404                         } else {
1405                                 flips = nand_check_erased_ecc_chunk(
1406                                                 buf + i * nfc_geo->ecc_chunk_size,
1407                                                 nfc_geo->ecc_chunk_size,
1408                                                 eccbuf, eccbytes,
1409                                                 NULL, 0,
1410                                                 nfc_geo->ecc_strength);
1411                         }
1412 
1413                         if (flips > 0) {
1414                                 max_bitflips = max_t(unsigned int, max_bitflips,
1415                                                      flips);
1416                                 mtd->ecc_stats.corrected += flips;
1417                                 continue;
1418                         }
1419 
1420                         mtd->ecc_stats.failed++;
1421                         continue;
1422                 }
1423 
1424                 mtd->ecc_stats.corrected += *status;
1425                 max_bitflips = max_t(unsigned int, max_bitflips, *status);
1426         }
1427 
1428         return max_bitflips;
1429 }
1430 
1431 static void gpmi_bch_layout_std(struct gpmi_nand_data *this)
1432 {
1433         struct bch_geometry *geo = &this->bch_geometry;
1434         unsigned int ecc_strength = geo->ecc_strength >> 1;
1435         unsigned int gf_len = geo->gf_len;
1436         unsigned int block_size = geo->ecc_chunk_size;
1437 
1438         this->bch_flashlayout0 =
1439                 BF_BCH_FLASH0LAYOUT0_NBLOCKS(geo->ecc_chunk_count - 1) |
1440                 BF_BCH_FLASH0LAYOUT0_META_SIZE(geo->metadata_size) |
1441                 BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) |
1442                 BF_BCH_FLASH0LAYOUT0_GF(gf_len, this) |
1443                 BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this);
1444 
1445         this->bch_flashlayout1 =
1446                 BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(geo->page_size) |
1447                 BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) |
1448                 BF_BCH_FLASH0LAYOUT1_GF(gf_len, this) |
1449                 BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this);
1450 }
1451 
1452 static int gpmi_ecc_read_page(struct nand_chip *chip, uint8_t *buf,
1453                               int oob_required, int page)
1454 {
1455         struct gpmi_nand_data *this = nand_get_controller_data(chip);
1456         struct mtd_info *mtd = nand_to_mtd(chip);
1457         struct bch_geometry *geo = &this->bch_geometry;
1458         unsigned int max_bitflips;
1459         int ret;
1460 
1461         gpmi_bch_layout_std(this);
1462         this->bch = true;
1463 
1464         ret = nand_read_page_op(chip, page, 0, buf, geo->page_size);
1465         if (ret)
1466                 return ret;
1467 
1468         max_bitflips = gpmi_count_bitflips(chip, buf, 0,
1469                                            geo->ecc_chunk_count,
1470                                            geo->auxiliary_status_offset);
1471 
1472         /* handle the block mark swapping */
1473         block_mark_swapping(this, buf, this->auxiliary_virt);
1474 
1475         if (oob_required) {
1476                 /*
1477                  * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
1478                  * for details about our policy for delivering the OOB.
1479                  *
1480                  * We fill the caller's buffer with set bits, and then copy the
1481                  * block mark to th caller's buffer. Note that, if block mark
1482                  * swapping was necessary, it has already been done, so we can
1483                  * rely on the first byte of the auxiliary buffer to contain
1484                  * the block mark.
1485                  */
1486                 memset(chip->oob_poi, ~0, mtd->oobsize);
1487                 chip->oob_poi[0] = ((uint8_t *)this->auxiliary_virt)[0];
1488         }
1489 
1490         return max_bitflips;
1491 }
1492 
1493 /* Fake a virtual small page for the subpage read */
1494 static int gpmi_ecc_read_subpage(struct nand_chip *chip, uint32_t offs,
1495                                  uint32_t len, uint8_t *buf, int page)
1496 {
1497         struct gpmi_nand_data *this = nand_get_controller_data(chip);
1498         struct bch_geometry *geo = &this->bch_geometry;
1499         int size = chip->ecc.size; /* ECC chunk size */
1500         int meta, n, page_size;
1501         unsigned int max_bitflips;
1502         unsigned int ecc_strength;
1503         int first, last, marker_pos;
1504         int ecc_parity_size;
1505         int col = 0;
1506         int ret;
1507 
1508         /* The size of ECC parity */
1509         ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
1510 
1511         /* Align it with the chunk size */
1512         first = offs / size;
1513         last = (offs + len - 1) / size;
1514 
1515         if (this->swap_block_mark) {
1516                 /*
1517                  * Find the chunk which contains the Block Marker.
1518                  * If this chunk is in the range of [first, last],
1519                  * we have to read out the whole page.
1520                  * Why? since we had swapped the data at the position of Block
1521                  * Marker to the metadata which is bound with the chunk 0.
1522                  */
1523                 marker_pos = geo->block_mark_byte_offset / size;
1524                 if (last >= marker_pos && first <= marker_pos) {
1525                         dev_dbg(this->dev,
1526                                 "page:%d, first:%d, last:%d, marker at:%d\n",
1527                                 page, first, last, marker_pos);
1528                         return gpmi_ecc_read_page(chip, buf, 0, page);
1529                 }
1530         }
1531 
1532         meta = geo->metadata_size;
1533         if (first) {
1534                 col = meta + (size + ecc_parity_size) * first;
1535                 meta = 0;
1536                 buf = buf + first * size;
1537         }
1538 
1539         ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
1540 
1541         n = last - first + 1;
1542         page_size = meta + (size + ecc_parity_size) * n;
1543         ecc_strength = geo->ecc_strength >> 1;
1544 
1545         this->bch_flashlayout0 = BF_BCH_FLASH0LAYOUT0_NBLOCKS(n - 1) |
1546                 BF_BCH_FLASH0LAYOUT0_META_SIZE(meta) |
1547                 BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) |
1548                 BF_BCH_FLASH0LAYOUT0_GF(geo->gf_len, this) |
1549                 BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(geo->ecc_chunk_size, this);
1550 
1551         this->bch_flashlayout1 = BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size) |
1552                 BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) |
1553                 BF_BCH_FLASH0LAYOUT1_GF(geo->gf_len, this) |
1554                 BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(geo->ecc_chunk_size, this);
1555 
1556         this->bch = true;
1557 
1558         ret = nand_read_page_op(chip, page, col, buf, page_size);
1559         if (ret)
1560                 return ret;
1561 
1562         dev_dbg(this->dev, "page:%d(%d:%d)%d, chunk:(%d:%d), BCH PG size:%d\n",
1563                 page, offs, len, col, first, n, page_size);
1564 
1565         max_bitflips = gpmi_count_bitflips(chip, buf, first, last, meta);
1566 
1567         return max_bitflips;
1568 }
1569 
1570 static int gpmi_ecc_write_page(struct nand_chip *chip, const uint8_t *buf,
1571                                int oob_required, int page)
1572 {
1573         struct mtd_info *mtd = nand_to_mtd(chip);
1574         struct gpmi_nand_data *this = nand_get_controller_data(chip);
1575         struct bch_geometry *nfc_geo = &this->bch_geometry;
1576         int ret;
1577 
1578         dev_dbg(this->dev, "ecc write page.\n");
1579 
1580         gpmi_bch_layout_std(this);
1581         this->bch = true;
1582 
1583         memcpy(this->auxiliary_virt, chip->oob_poi, nfc_geo->auxiliary_size);
1584 
1585         if (this->swap_block_mark) {
1586                 /*
1587                  * When doing bad block marker swapping we must always copy the
1588                  * input buffer as we can't modify the const buffer.
1589                  */
1590                 memcpy(this->data_buffer_dma, buf, mtd->writesize);
1591                 buf = this->data_buffer_dma;
1592                 block_mark_swapping(this, this->data_buffer_dma,
1593                                     this->auxiliary_virt);
1594         }
1595 
1596         ret = nand_prog_page_op(chip, page, 0, buf, nfc_geo->page_size);
1597 
1598         return ret;
1599 }
1600 
1601 /*
1602  * There are several places in this driver where we have to handle the OOB and
1603  * block marks. This is the function where things are the most complicated, so
1604  * this is where we try to explain it all. All the other places refer back to
1605  * here.
1606  *
1607  * These are the rules, in order of decreasing importance:
1608  *
1609  * 1) Nothing the caller does can be allowed to imperil the block mark.
1610  *
1611  * 2) In read operations, the first byte of the OOB we return must reflect the
1612  *    true state of the block mark, no matter where that block mark appears in
1613  *    the physical page.
1614  *
1615  * 3) ECC-based read operations return an OOB full of set bits (since we never
1616  *    allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
1617  *    return).
1618  *
1619  * 4) "Raw" read operations return a direct view of the physical bytes in the
1620  *    page, using the conventional definition of which bytes are data and which
1621  *    are OOB. This gives the caller a way to see the actual, physical bytes
1622  *    in the page, without the distortions applied by our ECC engine.
1623  *
1624  *
1625  * What we do for this specific read operation depends on two questions:
1626  *
1627  * 1) Are we doing a "raw" read, or an ECC-based read?
1628  *
1629  * 2) Are we using block mark swapping or transcription?
1630  *
1631  * There are four cases, illustrated by the following Karnaugh map:
1632  *
1633  *                    |           Raw           |         ECC-based       |
1634  *       -------------+-------------------------+-------------------------+
1635  *                    | Read the conventional   |                         |
1636  *                    | OOB at the end of the   |                         |
1637  *       Swapping     | page and return it. It  |                         |
1638  *                    | contains exactly what   |                         |
1639  *                    | we want.                | Read the block mark and |
1640  *       -------------+-------------------------+ return it in a buffer   |
1641  *                    | Read the conventional   | full of set bits.       |
1642  *                    | OOB at the end of the   |                         |
1643  *                    | page and also the block |                         |
1644  *       Transcribing | mark in the metadata.   |                         |
1645  *                    | Copy the block mark     |                         |
1646  *                    | into the first byte of  |                         |
1647  *                    | the OOB.                |                         |
1648  *       -------------+-------------------------+-------------------------+
1649  *
1650  * Note that we break rule #4 in the Transcribing/Raw case because we're not
1651  * giving an accurate view of the actual, physical bytes in the page (we're
1652  * overwriting the block mark). That's OK because it's more important to follow
1653  * rule #2.
1654  *
1655  * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
1656  * easy. When reading a page, for example, the NAND Flash MTD code calls our
1657  * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
1658  * ECC-based or raw view of the page is implicit in which function it calls
1659  * (there is a similar pair of ECC-based/raw functions for writing).
1660  */
1661 static int gpmi_ecc_read_oob(struct nand_chip *chip, int page)
1662 {
1663         struct mtd_info *mtd = nand_to_mtd(chip);
1664         struct gpmi_nand_data *this = nand_get_controller_data(chip);
1665         int ret;
1666 
1667         /* clear the OOB buffer */
1668         memset(chip->oob_poi, ~0, mtd->oobsize);
1669 
1670         /* Read out the conventional OOB. */
1671         ret = nand_read_page_op(chip, page, mtd->writesize, chip->oob_poi,
1672                                 mtd->oobsize);
1673         if (ret)
1674                 return ret;
1675 
1676         /*
1677          * Now, we want to make sure the block mark is correct. In the
1678          * non-transcribing case (!GPMI_IS_MX23()), we already have it.
1679          * Otherwise, we need to explicitly read it.
1680          */
1681         if (GPMI_IS_MX23(this)) {
1682                 /* Read the block mark into the first byte of the OOB buffer. */
1683                 ret = nand_read_page_op(chip, page, 0, chip->oob_poi, 1);
1684                 if (ret)
1685                         return ret;
1686         }
1687 
1688         return 0;
1689 }
1690 
1691 static int gpmi_ecc_write_oob(struct nand_chip *chip, int page)
1692 {
1693         struct mtd_info *mtd = nand_to_mtd(chip);
1694         struct mtd_oob_region of = { };
1695 
1696         /* Do we have available oob area? */
1697         mtd_ooblayout_free(mtd, 0, &of);
1698         if (!of.length)
1699                 return -EPERM;
1700 
1701         if (!nand_is_slc(chip))
1702                 return -EPERM;
1703 
1704         return nand_prog_page_op(chip, page, mtd->writesize + of.offset,
1705                                  chip->oob_poi + of.offset, of.length);
1706 }
1707 
1708 /*
1709  * This function reads a NAND page without involving the ECC engine (no HW
1710  * ECC correction).
1711  * The tricky part in the GPMI/BCH controller is that it stores ECC bits
1712  * inline (interleaved with payload DATA), and do not align data chunk on
1713  * byte boundaries.
1714  * We thus need to take care moving the payload data and ECC bits stored in the
1715  * page into the provided buffers, which is why we're using gpmi_copy_bits.
1716  *
1717  * See set_geometry_by_ecc_info inline comments to have a full description
1718  * of the layout used by the GPMI controller.
1719  */
1720 static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
1721                                   int oob_required, int page)
1722 {
1723         struct mtd_info *mtd = nand_to_mtd(chip);
1724         struct gpmi_nand_data *this = nand_get_controller_data(chip);
1725         struct bch_geometry *nfc_geo = &this->bch_geometry;
1726         int eccsize = nfc_geo->ecc_chunk_size;
1727         int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
1728         u8 *tmp_buf = this->raw_buffer;
1729         size_t src_bit_off;
1730         size_t oob_bit_off;
1731         size_t oob_byte_off;
1732         uint8_t *oob = chip->oob_poi;
1733         int step;
1734         int ret;
1735 
1736         ret = nand_read_page_op(chip, page, 0, tmp_buf,
1737                                 mtd->writesize + mtd->oobsize);
1738         if (ret)
1739                 return ret;
1740 
1741         /*
1742          * If required, swap the bad block marker and the data stored in the
1743          * metadata section, so that we don't wrongly consider a block as bad.
1744          *
1745          * See the layout description for a detailed explanation on why this
1746          * is needed.
1747          */
1748         if (this->swap_block_mark)
1749                 swap(tmp_buf[0], tmp_buf[mtd->writesize]);
1750 
1751         /*
1752          * Copy the metadata section into the oob buffer (this section is
1753          * guaranteed to be aligned on a byte boundary).
1754          */
1755         if (oob_required)
1756                 memcpy(oob, tmp_buf, nfc_geo->metadata_size);
1757 
1758         oob_bit_off = nfc_geo->metadata_size * 8;
1759         src_bit_off = oob_bit_off;
1760 
1761         /* Extract interleaved payload data and ECC bits */
1762         for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
1763                 if (buf)
1764                         gpmi_copy_bits(buf, step * eccsize * 8,
1765                                        tmp_buf, src_bit_off,
1766                                        eccsize * 8);
1767                 src_bit_off += eccsize * 8;
1768 
1769                 /* Align last ECC block to align a byte boundary */
1770                 if (step == nfc_geo->ecc_chunk_count - 1 &&
1771                     (oob_bit_off + eccbits) % 8)
1772                         eccbits += 8 - ((oob_bit_off + eccbits) % 8);
1773 
1774                 if (oob_required)
1775                         gpmi_copy_bits(oob, oob_bit_off,
1776                                        tmp_buf, src_bit_off,
1777                                        eccbits);
1778 
1779                 src_bit_off += eccbits;
1780                 oob_bit_off += eccbits;
1781         }
1782 
1783         if (oob_required) {
1784                 oob_byte_off = oob_bit_off / 8;
1785 
1786                 if (oob_byte_off < mtd->oobsize)
1787                         memcpy(oob + oob_byte_off,
1788                                tmp_buf + mtd->writesize + oob_byte_off,
1789                                mtd->oobsize - oob_byte_off);
1790         }
1791 
1792         return 0;
1793 }
1794 
1795 /*
1796  * This function writes a NAND page without involving the ECC engine (no HW
1797  * ECC generation).
1798  * The tricky part in the GPMI/BCH controller is that it stores ECC bits
1799  * inline (interleaved with payload DATA), and do not align data chunk on
1800  * byte boundaries.
1801  * We thus need to take care moving the OOB area at the right place in the
1802  * final page, which is why we're using gpmi_copy_bits.
1803  *
1804  * See set_geometry_by_ecc_info inline comments to have a full description
1805  * of the layout used by the GPMI controller.
1806  */
1807 static int gpmi_ecc_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
1808                                    int oob_required, int page)
1809 {
1810         struct mtd_info *mtd = nand_to_mtd(chip);
1811         struct gpmi_nand_data *this = nand_get_controller_data(chip);
1812         struct bch_geometry *nfc_geo = &this->bch_geometry;
1813         int eccsize = nfc_geo->ecc_chunk_size;
1814         int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
1815         u8 *tmp_buf = this->raw_buffer;
1816         uint8_t *oob = chip->oob_poi;
1817         size_t dst_bit_off;
1818         size_t oob_bit_off;
1819         size_t oob_byte_off;
1820         int step;
1821 
1822         /*
1823          * Initialize all bits to 1 in case we don't have a buffer for the
1824          * payload or oob data in order to leave unspecified bits of data
1825          * to their initial state.
1826          */
1827         if (!buf || !oob_required)
1828                 memset(tmp_buf, 0xff, mtd->writesize + mtd->oobsize);
1829 
1830         /*
1831          * First copy the metadata section (stored in oob buffer) at the
1832          * beginning of the page, as imposed by the GPMI layout.
1833          */
1834         memcpy(tmp_buf, oob, nfc_geo->metadata_size);
1835         oob_bit_off = nfc_geo->metadata_size * 8;
1836         dst_bit_off = oob_bit_off;
1837 
1838         /* Interleave payload data and ECC bits */
1839         for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
1840                 if (buf)
1841                         gpmi_copy_bits(tmp_buf, dst_bit_off,
1842                                        buf, step * eccsize * 8, eccsize * 8);
1843                 dst_bit_off += eccsize * 8;
1844 
1845                 /* Align last ECC block to align a byte boundary */
1846                 if (step == nfc_geo->ecc_chunk_count - 1 &&
1847                     (oob_bit_off + eccbits) % 8)
1848                         eccbits += 8 - ((oob_bit_off + eccbits) % 8);
1849 
1850                 if (oob_required)
1851                         gpmi_copy_bits(tmp_buf, dst_bit_off,
1852                                        oob, oob_bit_off, eccbits);
1853 
1854                 dst_bit_off += eccbits;
1855                 oob_bit_off += eccbits;
1856         }
1857 
1858         oob_byte_off = oob_bit_off / 8;
1859 
1860         if (oob_required && oob_byte_off < mtd->oobsize)
1861                 memcpy(tmp_buf + mtd->writesize + oob_byte_off,
1862                        oob + oob_byte_off, mtd->oobsize - oob_byte_off);
1863 
1864         /*
1865          * If required, swap the bad block marker and the first byte of the
1866          * metadata section, so that we don't modify the bad block marker.
1867          *
1868          * See the layout description for a detailed explanation on why this
1869          * is needed.
1870          */
1871         if (this->swap_block_mark)
1872                 swap(tmp_buf[0], tmp_buf[mtd->writesize]);
1873 
1874         return nand_prog_page_op(chip, page, 0, tmp_buf,
1875                                  mtd->writesize + mtd->oobsize);
1876 }
1877 
1878 static int gpmi_ecc_read_oob_raw(struct nand_chip *chip, int page)
1879 {
1880         return gpmi_ecc_read_page_raw(chip, NULL, 1, page);
1881 }
1882 
1883 static int gpmi_ecc_write_oob_raw(struct nand_chip *chip, int page)
1884 {
1885         return gpmi_ecc_write_page_raw(chip, NULL, 1, page);
1886 }
1887 
1888 static int gpmi_block_markbad(struct nand_chip *chip, loff_t ofs)
1889 {
1890         struct mtd_info *mtd = nand_to_mtd(chip);
1891         struct gpmi_nand_data *this = nand_get_controller_data(chip);
1892         int ret = 0;
1893         uint8_t *block_mark;
1894         int column, page, chipnr;
1895 
1896         chipnr = (int)(ofs >> chip->chip_shift);
1897         nand_select_target(chip, chipnr);
1898 
1899         column = !GPMI_IS_MX23(this) ? mtd->writesize : 0;
1900 
1901         /* Write the block mark. */
1902         block_mark = this->data_buffer_dma;
1903         block_mark[0] = 0; /* bad block marker */
1904 
1905         /* Shift to get page */
1906         page = (int)(ofs >> chip->page_shift);
1907 
1908         ret = nand_prog_page_op(chip, page, column, block_mark, 1);
1909 
1910         nand_deselect_target(chip);
1911 
1912         return ret;
1913 }
1914 
1915 static int nand_boot_set_geometry(struct gpmi_nand_data *this)
1916 {
1917         struct boot_rom_geometry *geometry = &this->rom_geometry;
1918 
1919         /*
1920          * Set the boot block stride size.
1921          *
1922          * In principle, we should be reading this from the OTP bits, since
1923          * that's where the ROM is going to get it. In fact, we don't have any
1924          * way to read the OTP bits, so we go with the default and hope for the
1925          * best.
1926          */
1927         geometry->stride_size_in_pages = 64;
1928 
1929         /*
1930          * Set the search area stride exponent.
1931          *
1932          * In principle, we should be reading this from the OTP bits, since
1933          * that's where the ROM is going to get it. In fact, we don't have any
1934          * way to read the OTP bits, so we go with the default and hope for the
1935          * best.
1936          */
1937         geometry->search_area_stride_exponent = 2;
1938         return 0;
1939 }
1940 
1941 static const char  *fingerprint = "STMP";
1942 static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
1943 {
1944         struct boot_rom_geometry *rom_geo = &this->rom_geometry;
1945         struct device *dev = this->dev;
1946         struct nand_chip *chip = &this->nand;
1947         unsigned int search_area_size_in_strides;
1948         unsigned int stride;
1949         unsigned int page;
1950         u8 *buffer = nand_get_data_buf(chip);
1951         int found_an_ncb_fingerprint = false;
1952         int ret;
1953 
1954         /* Compute the number of strides in a search area. */
1955         search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
1956 
1957         nand_select_target(chip, 0);
1958 
1959         /*
1960          * Loop through the first search area, looking for the NCB fingerprint.
1961          */
1962         dev_dbg(dev, "Scanning for an NCB fingerprint...\n");
1963 
1964         for (stride = 0; stride < search_area_size_in_strides; stride++) {
1965                 /* Compute the page addresses. */
1966                 page = stride * rom_geo->stride_size_in_pages;
1967 
1968                 dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page);
1969 
1970                 /*
1971                  * Read the NCB fingerprint. The fingerprint is four bytes long
1972                  * and starts in the 12th byte of the page.
1973                  */
1974                 ret = nand_read_page_op(chip, page, 12, buffer,
1975                                         strlen(fingerprint));
1976                 if (ret)
1977                         continue;
1978 
1979                 /* Look for the fingerprint. */
1980                 if (!memcmp(buffer, fingerprint, strlen(fingerprint))) {
1981                         found_an_ncb_fingerprint = true;
1982                         break;
1983                 }
1984 
1985         }
1986 
1987         nand_deselect_target(chip);
1988 
1989         if (found_an_ncb_fingerprint)
1990                 dev_dbg(dev, "\tFound a fingerprint\n");
1991         else
1992                 dev_dbg(dev, "\tNo fingerprint found\n");
1993         return found_an_ncb_fingerprint;
1994 }
1995 
1996 /* Writes a transcription stamp. */
1997 static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
1998 {
1999         struct device *dev = this->dev;
2000         struct boot_rom_geometry *rom_geo = &this->rom_geometry;
2001         struct nand_chip *chip = &this->nand;
2002         struct mtd_info *mtd = nand_to_mtd(chip);
2003         unsigned int block_size_in_pages;
2004         unsigned int search_area_size_in_strides;
2005         unsigned int search_area_size_in_pages;
2006         unsigned int search_area_size_in_blocks;
2007         unsigned int block;
2008         unsigned int stride;
2009         unsigned int page;
2010         u8 *buffer = nand_get_data_buf(chip);
2011         int status;
2012 
2013         /* Compute the search area geometry. */
2014         block_size_in_pages = mtd->erasesize / mtd->writesize;
2015         search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
2016         search_area_size_in_pages = search_area_size_in_strides *
2017                                         rom_geo->stride_size_in_pages;
2018         search_area_size_in_blocks =
2019                   (search_area_size_in_pages + (block_size_in_pages - 1)) /
2020                                     block_size_in_pages;
2021 
2022         dev_dbg(dev, "Search Area Geometry :\n");
2023         dev_dbg(dev, "\tin Blocks : %u\n", search_area_size_in_blocks);
2024         dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides);
2025         dev_dbg(dev, "\tin Pages  : %u\n", search_area_size_in_pages);
2026 
2027         nand_select_target(chip, 0);
2028 
2029         /* Loop over blocks in the first search area, erasing them. */
2030         dev_dbg(dev, "Erasing the search area...\n");
2031 
2032         for (block = 0; block < search_area_size_in_blocks; block++) {
2033                 /* Erase this block. */
2034                 dev_dbg(dev, "\tErasing block 0x%x\n", block);
2035                 status = nand_erase_op(chip, block);
2036                 if (status)
2037                         dev_err(dev, "[%s] Erase failed.\n", __func__);
2038         }
2039 
2040         /* Write the NCB fingerprint into the page buffer. */
2041         memset(buffer, ~0, mtd->writesize);
2042         memcpy(buffer + 12, fingerprint, strlen(fingerprint));
2043 
2044         /* Loop through the first search area, writing NCB fingerprints. */
2045         dev_dbg(dev, "Writing NCB fingerprints...\n");
2046         for (stride = 0; stride < search_area_size_in_strides; stride++) {
2047                 /* Compute the page addresses. */
2048                 page = stride * rom_geo->stride_size_in_pages;
2049 
2050                 /* Write the first page of the current stride. */
2051                 dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
2052 
2053                 status = chip->ecc.write_page_raw(chip, buffer, 0, page);
2054                 if (status)
2055                         dev_err(dev, "[%s] Write failed.\n", __func__);
2056         }
2057 
2058         nand_deselect_target(chip);
2059 
2060         return 0;
2061 }
2062 
2063 static int mx23_boot_init(struct gpmi_nand_data  *this)
2064 {
2065         struct device *dev = this->dev;
2066         struct nand_chip *chip = &this->nand;
2067         struct mtd_info *mtd = nand_to_mtd(chip);
2068         unsigned int block_count;
2069         unsigned int block;
2070         int     chipnr;
2071         int     page;
2072         loff_t  byte;
2073         uint8_t block_mark;
2074         int     ret = 0;
2075 
2076         /*
2077          * If control arrives here, we can't use block mark swapping, which
2078          * means we're forced to use transcription. First, scan for the
2079          * transcription stamp. If we find it, then we don't have to do
2080          * anything -- the block marks are already transcribed.
2081          */
2082         if (mx23_check_transcription_stamp(this))
2083                 return 0;
2084 
2085         /*
2086          * If control arrives here, we couldn't find a transcription stamp, so
2087          * so we presume the block marks are in the conventional location.
2088          */
2089         dev_dbg(dev, "Transcribing bad block marks...\n");
2090 
2091         /* Compute the number of blocks in the entire medium. */
2092         block_count = nanddev_eraseblocks_per_target(&chip->base);
2093 
2094         /*
2095          * Loop over all the blocks in the medium, transcribing block marks as
2096          * we go.
2097          */
2098         for (block = 0; block < block_count; block++) {
2099                 /*
2100                  * Compute the chip, page and byte addresses for this block's
2101                  * conventional mark.
2102                  */
2103                 chipnr = block >> (chip->chip_shift - chip->phys_erase_shift);
2104                 page = block << (chip->phys_erase_shift - chip->page_shift);
2105                 byte = block <<  chip->phys_erase_shift;
2106 
2107                 /* Send the command to read the conventional block mark. */
2108                 nand_select_target(chip, chipnr);
2109                 ret = nand_read_page_op(chip, page, mtd->writesize, &block_mark,
2110                                         1);
2111                 nand_deselect_target(chip);
2112 
2113                 if (ret)
2114                         continue;
2115 
2116                 /*
2117                  * Check if the block is marked bad. If so, we need to mark it
2118                  * again, but this time the result will be a mark in the
2119                  * location where we transcribe block marks.
2120                  */
2121                 if (block_mark != 0xff) {
2122                         dev_dbg(dev, "Transcribing mark in block %u\n", block);
2123                         ret = chip->legacy.block_markbad(chip, byte);
2124                         if (ret)
2125                                 dev_err(dev,
2126                                         "Failed to mark block bad with ret %d\n",
2127                                         ret);
2128                 }
2129         }
2130 
2131         /* Write the stamp that indicates we've transcribed the block marks. */
2132         mx23_write_transcription_stamp(this);
2133         return 0;
2134 }
2135 
2136 static int nand_boot_init(struct gpmi_nand_data  *this)
2137 {
2138         nand_boot_set_geometry(this);
2139 
2140         /* This is ROM arch-specific initilization before the BBT scanning. */
2141         if (GPMI_IS_MX23(this))
2142                 return mx23_boot_init(this);
2143         return 0;
2144 }
2145 
2146 static int gpmi_set_geometry(struct gpmi_nand_data *this)
2147 {
2148         int ret;
2149 
2150         /* Free the temporary DMA memory for reading ID. */
2151         gpmi_free_dma_buffer(this);
2152 
2153         /* Set up the NFC geometry which is used by BCH. */
2154         ret = bch_set_geometry(this);
2155         if (ret) {
2156                 dev_err(this->dev, "Error setting BCH geometry : %d\n", ret);
2157                 return ret;
2158         }
2159 
2160         /* Alloc the new DMA buffers according to the pagesize and oobsize */
2161         return gpmi_alloc_dma_buffer(this);
2162 }
2163 
2164 static int gpmi_init_last(struct gpmi_nand_data *this)
2165 {
2166         struct nand_chip *chip = &this->nand;
2167         struct mtd_info *mtd = nand_to_mtd(chip);
2168         struct nand_ecc_ctrl *ecc = &chip->ecc;
2169         struct bch_geometry *bch_geo = &this->bch_geometry;
2170         int ret;
2171 
2172         /* Set up the medium geometry */
2173         ret = gpmi_set_geometry(this);
2174         if (ret)
2175                 return ret;
2176 
2177         /* Init the nand_ecc_ctrl{} */
2178         ecc->read_page  = gpmi_ecc_read_page;
2179         ecc->write_page = gpmi_ecc_write_page;
2180         ecc->read_oob   = gpmi_ecc_read_oob;
2181         ecc->write_oob  = gpmi_ecc_write_oob;
2182         ecc->read_page_raw = gpmi_ecc_read_page_raw;
2183         ecc->write_page_raw = gpmi_ecc_write_page_raw;
2184         ecc->read_oob_raw = gpmi_ecc_read_oob_raw;
2185         ecc->write_oob_raw = gpmi_ecc_write_oob_raw;
2186         ecc->mode       = NAND_ECC_HW;
2187         ecc->size       = bch_geo->ecc_chunk_size;
2188         ecc->strength   = bch_geo->ecc_strength;
2189         mtd_set_ooblayout(mtd, &gpmi_ooblayout_ops);
2190 
2191         /*
2192          * We only enable the subpage read when:
2193          *  (1) the chip is imx6, and
2194          *  (2) the size of the ECC parity is byte aligned.
2195          */
2196         if (GPMI_IS_MX6(this) &&
2197                 ((bch_geo->gf_len * bch_geo->ecc_strength) % 8) == 0) {
2198                 ecc->read_subpage = gpmi_ecc_read_subpage;
2199                 chip->options |= NAND_SUBPAGE_READ;
2200         }
2201 
2202         return 0;
2203 }
2204 
2205 static int gpmi_nand_attach_chip(struct nand_chip *chip)
2206 {
2207         struct gpmi_nand_data *this = nand_get_controller_data(chip);
2208         int ret;
2209 
2210         if (chip->bbt_options & NAND_BBT_USE_FLASH) {
2211                 chip->bbt_options |= NAND_BBT_NO_OOB;
2212 
2213                 if (of_property_read_bool(this->dev->of_node,
2214                                           "fsl,no-blockmark-swap"))
2215                         this->swap_block_mark = false;
2216         }
2217         dev_dbg(this->dev, "Blockmark swapping %sabled\n",
2218                 this->swap_block_mark ? "en" : "dis");
2219 
2220         ret = gpmi_init_last(this);
2221         if (ret)
2222                 return ret;
2223 
2224         chip->options |= NAND_SKIP_BBTSCAN;
2225 
2226         return 0;
2227 }
2228 
2229 static struct gpmi_transfer *get_next_transfer(struct gpmi_nand_data *this)
2230 {
2231         struct gpmi_transfer *transfer = &this->transfers[this->ntransfers];
2232 
2233         this->ntransfers++;
2234 
2235         if (this->ntransfers == GPMI_MAX_TRANSFERS)
2236                 return NULL;
2237 
2238         return transfer;
2239 }
2240 
2241 static struct dma_async_tx_descriptor *gpmi_chain_command(
2242         struct gpmi_nand_data *this, u8 cmd, const u8 *addr, int naddr)
2243 {
2244         struct dma_chan *channel = get_dma_chan(this);
2245         struct dma_async_tx_descriptor *desc;
2246         struct gpmi_transfer *transfer;
2247         int chip = this->nand.cur_cs;
2248         u32 pio[3];
2249 
2250         /* [1] send out the PIO words */
2251         pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
2252                 | BM_GPMI_CTRL0_WORD_LENGTH
2253                 | BF_GPMI_CTRL0_CS(chip, this)
2254                 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2255                 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE)
2256                 | BM_GPMI_CTRL0_ADDRESS_INCREMENT
2257                 | BF_GPMI_CTRL0_XFER_COUNT(naddr + 1);
2258         pio[1] = 0;
2259         pio[2] = 0;
2260         desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
2261                                       DMA_TRANS_NONE, 0);
2262         if (!desc)
2263                 return NULL;
2264 
2265         transfer = get_next_transfer(this);
2266         if (!transfer)
2267                 return NULL;
2268 
2269         transfer->cmdbuf[0] = cmd;
2270         if (naddr)
2271                 memcpy(&transfer->cmdbuf[1], addr, naddr);
2272 
2273         sg_init_one(&transfer->sgl, transfer->cmdbuf, naddr + 1);
2274         dma_map_sg(this->dev, &transfer->sgl, 1, DMA_TO_DEVICE);
2275 
2276         transfer->direction = DMA_TO_DEVICE;
2277 
2278         desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1, DMA_MEM_TO_DEV,
2279                                        MXS_DMA_CTRL_WAIT4END);
2280         return desc;
2281 }
2282 
2283 static struct dma_async_tx_descriptor *gpmi_chain_wait_ready(
2284         struct gpmi_nand_data *this)
2285 {
2286         struct dma_chan *channel = get_dma_chan(this);
2287         u32 pio[2];
2288 
2289         pio[0] =  BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY)
2290                 | BM_GPMI_CTRL0_WORD_LENGTH
2291                 | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
2292                 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2293                 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
2294                 | BF_GPMI_CTRL0_XFER_COUNT(0);
2295         pio[1] = 0;
2296 
2297         return mxs_dmaengine_prep_pio(channel, pio, 2, DMA_TRANS_NONE,
2298                                 MXS_DMA_CTRL_WAIT4END | MXS_DMA_CTRL_WAIT4RDY);
2299 }
2300 
2301 static struct dma_async_tx_descriptor *gpmi_chain_data_read(
2302         struct gpmi_nand_data *this, void *buf, int raw_len, bool *direct)
2303 {
2304         struct dma_async_tx_descriptor *desc;
2305         struct dma_chan *channel = get_dma_chan(this);
2306         struct gpmi_transfer *transfer;
2307         u32 pio[6] = {};
2308 
2309         transfer = get_next_transfer(this);
2310         if (!transfer)
2311                 return NULL;
2312 
2313         transfer->direction = DMA_FROM_DEVICE;
2314 
2315         *direct = prepare_data_dma(this, buf, raw_len, &transfer->sgl,
2316                                    DMA_FROM_DEVICE);
2317 
2318         pio[0] =  BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ)
2319                 | BM_GPMI_CTRL0_WORD_LENGTH
2320                 | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
2321                 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2322                 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
2323                 | BF_GPMI_CTRL0_XFER_COUNT(raw_len);
2324 
2325         if (this->bch) {
2326                 pio[2] =  BM_GPMI_ECCCTRL_ENABLE_ECC
2327                         | BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE)
2328                         | BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE
2329                                 | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY);
2330                 pio[3] = raw_len;
2331                 pio[4] = transfer->sgl.dma_address;
2332                 pio[5] = this->auxiliary_phys;
2333         }
2334 
2335         desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
2336                                       DMA_TRANS_NONE, 0);
2337         if (!desc)
2338                 return NULL;
2339 
2340         if (!this->bch)
2341                 desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1,
2342                                              DMA_DEV_TO_MEM,
2343                                              MXS_DMA_CTRL_WAIT4END);
2344 
2345         return desc;
2346 }
2347 
2348 static struct dma_async_tx_descriptor *gpmi_chain_data_write(
2349         struct gpmi_nand_data *this, const void *buf, int raw_len)
2350 {
2351         struct dma_chan *channel = get_dma_chan(this);
2352         struct dma_async_tx_descriptor *desc;
2353         struct gpmi_transfer *transfer;
2354         u32 pio[6] = {};
2355 
2356         transfer = get_next_transfer(this);
2357         if (!transfer)
2358                 return NULL;
2359 
2360         transfer->direction = DMA_TO_DEVICE;
2361 
2362         prepare_data_dma(this, buf, raw_len, &transfer->sgl, DMA_TO_DEVICE);
2363 
2364         pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
2365                 | BM_GPMI_CTRL0_WORD_LENGTH
2366                 | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
2367                 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2368                 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
2369                 | BF_GPMI_CTRL0_XFER_COUNT(raw_len);
2370 
2371         if (this->bch) {
2372                 pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
2373                         | BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE)
2374                         | BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
2375                                         BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY);
2376                 pio[3] = raw_len;
2377                 pio[4] = transfer->sgl.dma_address;
2378                 pio[5] = this->auxiliary_phys;
2379         }
2380 
2381         desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
2382                                       DMA_TRANS_NONE,
2383                                       (this->bch ? MXS_DMA_CTRL_WAIT4END : 0));
2384         if (!desc)
2385                 return NULL;
2386 
2387         if (!this->bch)
2388                 desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1,
2389                                                DMA_MEM_TO_DEV,
2390                                                MXS_DMA_CTRL_WAIT4END);
2391 
2392         return desc;
2393 }
2394 
2395 static int gpmi_nfc_exec_op(struct nand_chip *chip,
2396                              const struct nand_operation *op,
2397                              bool check_only)
2398 {
2399         const struct nand_op_instr *instr;
2400         struct gpmi_nand_data *this = nand_get_controller_data(chip);
2401         struct dma_async_tx_descriptor *desc = NULL;
2402         int i, ret, buf_len = 0, nbufs = 0;
2403         u8 cmd = 0;
2404         void *buf_read = NULL;
2405         const void *buf_write = NULL;
2406         bool direct = false;
2407         struct completion *completion;
2408         unsigned long to;
2409 
2410         this->ntransfers = 0;
2411         for (i = 0; i < GPMI_MAX_TRANSFERS; i++)
2412                 this->transfers[i].direction = DMA_NONE;
2413 
2414         ret = pm_runtime_get_sync(this->dev);
2415         if (ret < 0)
2416                 return ret;
2417 
2418         /*
2419          * This driver currently supports only one NAND chip. Plus, dies share
2420          * the same configuration. So once timings have been applied on the
2421          * controller side, they will not change anymore. When the time will
2422          * come, the check on must_apply_timings will have to be dropped.
2423          */
2424         if (this->hw.must_apply_timings) {
2425                 this->hw.must_apply_timings = false;
2426                 gpmi_nfc_apply_timings(this);
2427         }
2428 
2429         dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs);
2430 
2431         for (i = 0; i < op->ninstrs; i++) {
2432                 instr = &op->instrs[i];
2433 
2434                 nand_op_trace("  ", instr);
2435 
2436                 switch (instr->type) {
2437                 case NAND_OP_WAITRDY_INSTR:
2438                         desc = gpmi_chain_wait_ready(this);
2439                         break;
2440                 case NAND_OP_CMD_INSTR:
2441                         cmd = instr->ctx.cmd.opcode;
2442 
2443                         /*
2444                          * When this command has an address cycle chain it
2445                          * together with the address cycle
2446                          */
2447                         if (i + 1 != op->ninstrs &&
2448                             op->instrs[i + 1].type == NAND_OP_ADDR_INSTR)
2449                                 continue;
2450 
2451                         desc = gpmi_chain_command(this, cmd, NULL, 0);
2452 
2453                         break;
2454                 case NAND_OP_ADDR_INSTR:
2455                         desc = gpmi_chain_command(this, cmd, instr->ctx.addr.addrs,
2456                                                   instr->ctx.addr.naddrs);
2457                         break;
2458                 case NAND_OP_DATA_OUT_INSTR:
2459                         buf_write = instr->ctx.data.buf.out;
2460                         buf_len = instr->ctx.data.len;
2461                         nbufs++;
2462 
2463                         desc = gpmi_chain_data_write(this, buf_write, buf_len);
2464 
2465                         break;
2466                 case NAND_OP_DATA_IN_INSTR:
2467                         if (!instr->ctx.data.len)
2468                                 break;
2469                         buf_read = instr->ctx.data.buf.in;
2470                         buf_len = instr->ctx.data.len;
2471                         nbufs++;
2472 
2473                         desc = gpmi_chain_data_read(this, buf_read, buf_len,
2474                                                    &direct);
2475                         break;
2476                 }
2477 
2478                 if (!desc) {
2479                         ret = -ENXIO;
2480                         goto unmap;
2481                 }
2482         }
2483 
2484         dev_dbg(this->dev, "%s setup done\n", __func__);
2485 
2486         if (nbufs > 1) {
2487                 dev_err(this->dev, "Multiple data instructions not supported\n");
2488                 ret = -EINVAL;
2489                 goto unmap;
2490         }
2491 
2492         if (this->bch) {
2493                 writel(this->bch_flashlayout0,
2494                        this->resources.bch_regs + HW_BCH_FLASH0LAYOUT0);
2495                 writel(this->bch_flashlayout1,
2496                        this->resources.bch_regs + HW_BCH_FLASH0LAYOUT1);
2497         }
2498 
2499         if (this->bch && buf_read) {
2500                 writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
2501                        this->resources.bch_regs + HW_BCH_CTRL_SET);
2502                 completion = &this->bch_done;
2503         } else {
2504                 desc->callback = dma_irq_callback;
2505                 desc->callback_param = this;
2506                 completion = &this->dma_done;
2507         }
2508 
2509         init_completion(completion);
2510 
2511         dmaengine_submit(desc);
2512         dma_async_issue_pending(get_dma_chan(this));
2513 
2514         to = wait_for_completion_timeout(completion, msecs_to_jiffies(1000));
2515         if (!to) {
2516                 dev_err(this->dev, "DMA timeout, last DMA\n");
2517                 gpmi_dump_info(this);
2518                 ret = -ETIMEDOUT;
2519                 goto unmap;
2520         }
2521 
2522         writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
2523                this->resources.bch_regs + HW_BCH_CTRL_CLR);
2524         gpmi_clear_bch(this);
2525 
2526         ret = 0;
2527 
2528 unmap:
2529         for (i = 0; i < this->ntransfers; i++) {
2530                 struct gpmi_transfer *transfer = &this->transfers[i];
2531 
2532                 if (transfer->direction != DMA_NONE)
2533                         dma_unmap_sg(this->dev, &transfer->sgl, 1,
2534                                      transfer->direction);
2535         }
2536 
2537         if (!ret && buf_read && !direct)
2538                 memcpy(buf_read, this->data_buffer_dma,
2539                        gpmi_raw_len_to_len(this, buf_len));
2540 
2541         this->bch = false;
2542 
2543         pm_runtime_mark_last_busy(this->dev);
2544         pm_runtime_put_autosuspend(this->dev);
2545 
2546         return ret;
2547 }
2548 
2549 static const struct nand_controller_ops gpmi_nand_controller_ops = {
2550         .attach_chip = gpmi_nand_attach_chip,
2551         .setup_data_interface = gpmi_setup_data_interface,
2552         .exec_op = gpmi_nfc_exec_op,
2553 };
2554 
2555 static int gpmi_nand_init(struct gpmi_nand_data *this)
2556 {
2557         struct nand_chip *chip = &this->nand;
2558         struct mtd_info  *mtd = nand_to_mtd(chip);
2559         int ret;
2560 
2561         /* init the MTD data structures */
2562         mtd->name               = "gpmi-nand";
2563         mtd->dev.parent         = this->dev;
2564 
2565         /* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */
2566         nand_set_controller_data(chip, this);
2567         nand_set_flash_node(chip, this->pdev->dev.of_node);
2568         chip->legacy.block_markbad = gpmi_block_markbad;
2569         chip->badblock_pattern  = &gpmi_bbt_descr;
2570         chip->options           |= NAND_NO_SUBPAGE_WRITE;
2571 
2572         /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
2573         this->swap_block_mark = !GPMI_IS_MX23(this);
2574 
2575         /*
2576          * Allocate a temporary DMA buffer for reading ID in the
2577          * nand_scan_ident().
2578          */
2579         this->bch_geometry.payload_size = 1024;
2580         this->bch_geometry.auxiliary_size = 128;
2581         ret = gpmi_alloc_dma_buffer(this);
2582         if (ret)
2583                 goto err_out;
2584 
2585         nand_controller_init(&this->base);
2586         this->base.ops = &gpmi_nand_controller_ops;
2587         chip->controller = &this->base;
2588 
2589         ret = nand_scan(chip, GPMI_IS_MX6(this) ? 2 : 1);
2590         if (ret)
2591                 goto err_out;
2592 
2593         ret = nand_boot_init(this);
2594         if (ret)
2595                 goto err_nand_cleanup;
2596         ret = nand_create_bbt(chip);
2597         if (ret)
2598                 goto err_nand_cleanup;
2599 
2600         ret = mtd_device_register(mtd, NULL, 0);
2601         if (ret)
2602                 goto err_nand_cleanup;
2603         return 0;
2604 
2605 err_nand_cleanup:
2606         nand_cleanup(chip);
2607 err_out:
2608         gpmi_free_dma_buffer(this);
2609         return ret;
2610 }
2611 
2612 static const struct of_device_id gpmi_nand_id_table[] = {
2613         {
2614                 .compatible = "fsl,imx23-gpmi-nand",
2615                 .data = &gpmi_devdata_imx23,
2616         }, {
2617                 .compatible = "fsl,imx28-gpmi-nand",
2618                 .data = &gpmi_devdata_imx28,
2619         }, {
2620                 .compatible = "fsl,imx6q-gpmi-nand",
2621                 .data = &gpmi_devdata_imx6q,
2622         }, {
2623                 .compatible = "fsl,imx6sx-gpmi-nand",
2624                 .data = &gpmi_devdata_imx6sx,
2625         }, {
2626                 .compatible = "fsl,imx7d-gpmi-nand",
2627                 .data = &gpmi_devdata_imx7d,
2628         }, {}
2629 };
2630 MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
2631 
2632 static int gpmi_nand_probe(struct platform_device *pdev)
2633 {
2634         struct gpmi_nand_data *this;
2635         const struct of_device_id *of_id;
2636         int ret;
2637 
2638         this = devm_kzalloc(&pdev->dev, sizeof(*this), GFP_KERNEL);
2639         if (!this)
2640                 return -ENOMEM;
2641 
2642         of_id = of_match_device(gpmi_nand_id_table, &pdev->dev);
2643         if (of_id) {
2644                 this->devdata = of_id->data;
2645         } else {
2646                 dev_err(&pdev->dev, "Failed to find the right device id.\n");
2647                 return -ENODEV;
2648         }
2649 
2650         platform_set_drvdata(pdev, this);
2651         this->pdev  = pdev;
2652         this->dev   = &pdev->dev;
2653 
2654         ret = acquire_resources(this);
2655         if (ret)
2656                 goto exit_acquire_resources;
2657 
2658         ret = __gpmi_enable_clk(this, true);
2659         if (ret)
2660                 goto exit_nfc_init;
2661 
2662         pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
2663         pm_runtime_use_autosuspend(&pdev->dev);
2664         pm_runtime_set_active(&pdev->dev);
2665         pm_runtime_enable(&pdev->dev);
2666         pm_runtime_get_sync(&pdev->dev);
2667 
2668         ret = gpmi_init(this);
2669         if (ret)
2670                 goto exit_nfc_init;
2671 
2672         ret = gpmi_nand_init(this);
2673         if (ret)
2674                 goto exit_nfc_init;
2675 
2676         pm_runtime_mark_last_busy(&pdev->dev);
2677         pm_runtime_put_autosuspend(&pdev->dev);
2678 
2679         dev_info(this->dev, "driver registered.\n");
2680 
2681         return 0;
2682 
2683 exit_nfc_init:
2684         pm_runtime_put(&pdev->dev);
2685         pm_runtime_disable(&pdev->dev);
2686         release_resources(this);
2687 exit_acquire_resources:
2688 
2689         return ret;
2690 }
2691 
2692 static int gpmi_nand_remove(struct platform_device *pdev)
2693 {
2694         struct gpmi_nand_data *this = platform_get_drvdata(pdev);
2695 
2696         pm_runtime_put_sync(&pdev->dev);
2697         pm_runtime_disable(&pdev->dev);
2698 
2699         nand_release(&this->nand);
2700         gpmi_free_dma_buffer(this);
2701         release_resources(this);
2702         return 0;
2703 }
2704 
2705 #ifdef CONFIG_PM_SLEEP
2706 static int gpmi_pm_suspend(struct device *dev)
2707 {
2708         struct gpmi_nand_data *this = dev_get_drvdata(dev);
2709 
2710         release_dma_channels(this);
2711         return 0;
2712 }
2713 
2714 static int gpmi_pm_resume(struct device *dev)
2715 {
2716         struct gpmi_nand_data *this = dev_get_drvdata(dev);
2717         int ret;
2718 
2719         ret = acquire_dma_channels(this);
2720         if (ret < 0)
2721                 return ret;
2722 
2723         /* re-init the GPMI registers */
2724         ret = gpmi_init(this);
2725         if (ret) {
2726                 dev_err(this->dev, "Error setting GPMI : %d\n", ret);
2727                 return ret;
2728         }
2729 
2730         /* Set flag to get timing setup restored for next exec_op */
2731         if (this->hw.clk_rate)
2732                 this->hw.must_apply_timings = true;
2733 
2734         /* re-init the BCH registers */
2735         ret = bch_set_geometry(this);
2736         if (ret) {
2737                 dev_err(this->dev, "Error setting BCH : %d\n", ret);
2738                 return ret;
2739         }
2740 
2741         return 0;
2742 }
2743 #endif /* CONFIG_PM_SLEEP */
2744 
2745 static int __maybe_unused gpmi_runtime_suspend(struct device *dev)
2746 {
2747         struct gpmi_nand_data *this = dev_get_drvdata(dev);
2748 
2749         return __gpmi_enable_clk(this, false);
2750 }
2751 
2752 static int __maybe_unused gpmi_runtime_resume(struct device *dev)
2753 {
2754         struct gpmi_nand_data *this = dev_get_drvdata(dev);
2755 
2756         return __gpmi_enable_clk(this, true);
2757 }
2758 
2759 static const struct dev_pm_ops gpmi_pm_ops = {
2760         SET_SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend, gpmi_pm_resume)
2761         SET_RUNTIME_PM_OPS(gpmi_runtime_suspend, gpmi_runtime_resume, NULL)
2762 };
2763 
2764 static struct platform_driver gpmi_nand_driver = {
2765         .driver = {
2766                 .name = "gpmi-nand",
2767                 .pm = &gpmi_pm_ops,
2768                 .of_match_table = gpmi_nand_id_table,
2769         },
2770         .probe   = gpmi_nand_probe,
2771         .remove  = gpmi_nand_remove,
2772 };
2773 module_platform_driver(gpmi_nand_driver);
2774 
2775 MODULE_AUTHOR("Freescale Semiconductor, Inc.");
2776 MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver");
2777 MODULE_LICENSE("GPL");

/* [<][>][^][v][top][bottom][index][help] */