root/drivers/mtd/nand/spi/core.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. spinand_read_reg_op
  2. spinand_write_reg_op
  3. spinand_read_status
  4. spinand_get_cfg
  5. spinand_set_cfg
  6. spinand_upd_cfg
  7. spinand_select_target
  8. spinand_init_cfg_cache
  9. spinand_init_quad_enable
  10. spinand_ecc_enable
  11. spinand_write_enable_op
  12. spinand_load_page_op
  13. spinand_read_from_cache_op
  14. spinand_write_to_cache_op
  15. spinand_program_op
  16. spinand_erase_op
  17. spinand_wait
  18. spinand_read_id_op
  19. spinand_reset_op
  20. spinand_lock_block
  21. spinand_check_ecc_status
  22. spinand_read_page
  23. spinand_write_page
  24. spinand_mtd_read
  25. spinand_mtd_write
  26. spinand_isbad
  27. spinand_mtd_block_isbad
  28. spinand_markbad
  29. spinand_mtd_block_markbad
  30. spinand_erase
  31. spinand_mtd_erase
  32. spinand_mtd_block_isreserved
  33. spinand_create_dirmap
  34. spinand_create_dirmaps
  35. spinand_manufacturer_detect
  36. spinand_manufacturer_init
  37. spinand_manufacturer_cleanup
  38. spinand_select_op_variant
  39. spinand_match_and_init
  40. spinand_detect
  41. spinand_noecc_ooblayout_ecc
  42. spinand_noecc_ooblayout_free
  43. spinand_init
  44. spinand_cleanup
  45. spinand_probe
  46. spinand_remove

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Copyright (C) 2016-2017 Micron Technology, Inc.
   4  *
   5  * Authors:
   6  *      Peter Pan <peterpandong@micron.com>
   7  *      Boris Brezillon <boris.brezillon@bootlin.com>
   8  */
   9 
  10 #define pr_fmt(fmt)     "spi-nand: " fmt
  11 
  12 #include <linux/device.h>
  13 #include <linux/jiffies.h>
  14 #include <linux/kernel.h>
  15 #include <linux/module.h>
  16 #include <linux/mtd/spinand.h>
  17 #include <linux/of.h>
  18 #include <linux/slab.h>
  19 #include <linux/spi/spi.h>
  20 #include <linux/spi/spi-mem.h>
  21 
  22 static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
  23 {
  24         struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
  25                                                       spinand->scratchbuf);
  26         int ret;
  27 
  28         ret = spi_mem_exec_op(spinand->spimem, &op);
  29         if (ret)
  30                 return ret;
  31 
  32         *val = *spinand->scratchbuf;
  33         return 0;
  34 }
  35 
  36 static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
  37 {
  38         struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
  39                                                       spinand->scratchbuf);
  40 
  41         *spinand->scratchbuf = val;
  42         return spi_mem_exec_op(spinand->spimem, &op);
  43 }
  44 
  45 static int spinand_read_status(struct spinand_device *spinand, u8 *status)
  46 {
  47         return spinand_read_reg_op(spinand, REG_STATUS, status);
  48 }
  49 
  50 static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg)
  51 {
  52         struct nand_device *nand = spinand_to_nand(spinand);
  53 
  54         if (WARN_ON(spinand->cur_target < 0 ||
  55                     spinand->cur_target >= nand->memorg.ntargets))
  56                 return -EINVAL;
  57 
  58         *cfg = spinand->cfg_cache[spinand->cur_target];
  59         return 0;
  60 }
  61 
  62 static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg)
  63 {
  64         struct nand_device *nand = spinand_to_nand(spinand);
  65         int ret;
  66 
  67         if (WARN_ON(spinand->cur_target < 0 ||
  68                     spinand->cur_target >= nand->memorg.ntargets))
  69                 return -EINVAL;
  70 
  71         if (spinand->cfg_cache[spinand->cur_target] == cfg)
  72                 return 0;
  73 
  74         ret = spinand_write_reg_op(spinand, REG_CFG, cfg);
  75         if (ret)
  76                 return ret;
  77 
  78         spinand->cfg_cache[spinand->cur_target] = cfg;
  79         return 0;
  80 }
  81 
  82 /**
  83  * spinand_upd_cfg() - Update the configuration register
  84  * @spinand: the spinand device
  85  * @mask: the mask encoding the bits to update in the config reg
  86  * @val: the new value to apply
  87  *
  88  * Update the configuration register.
  89  *
  90  * Return: 0 on success, a negative error code otherwise.
  91  */
  92 int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val)
  93 {
  94         int ret;
  95         u8 cfg;
  96 
  97         ret = spinand_get_cfg(spinand, &cfg);
  98         if (ret)
  99                 return ret;
 100 
 101         cfg &= ~mask;
 102         cfg |= val;
 103 
 104         return spinand_set_cfg(spinand, cfg);
 105 }
 106 
 107 /**
 108  * spinand_select_target() - Select a specific NAND target/die
 109  * @spinand: the spinand device
 110  * @target: the target/die to select
 111  *
 112  * Select a new target/die. If chip only has one die, this function is a NOOP.
 113  *
 114  * Return: 0 on success, a negative error code otherwise.
 115  */
 116 int spinand_select_target(struct spinand_device *spinand, unsigned int target)
 117 {
 118         struct nand_device *nand = spinand_to_nand(spinand);
 119         int ret;
 120 
 121         if (WARN_ON(target >= nand->memorg.ntargets))
 122                 return -EINVAL;
 123 
 124         if (spinand->cur_target == target)
 125                 return 0;
 126 
 127         if (nand->memorg.ntargets == 1) {
 128                 spinand->cur_target = target;
 129                 return 0;
 130         }
 131 
 132         ret = spinand->select_target(spinand, target);
 133         if (ret)
 134                 return ret;
 135 
 136         spinand->cur_target = target;
 137         return 0;
 138 }
 139 
 140 static int spinand_init_cfg_cache(struct spinand_device *spinand)
 141 {
 142         struct nand_device *nand = spinand_to_nand(spinand);
 143         struct device *dev = &spinand->spimem->spi->dev;
 144         unsigned int target;
 145         int ret;
 146 
 147         spinand->cfg_cache = devm_kcalloc(dev,
 148                                           nand->memorg.ntargets,
 149                                           sizeof(*spinand->cfg_cache),
 150                                           GFP_KERNEL);
 151         if (!spinand->cfg_cache)
 152                 return -ENOMEM;
 153 
 154         for (target = 0; target < nand->memorg.ntargets; target++) {
 155                 ret = spinand_select_target(spinand, target);
 156                 if (ret)
 157                         return ret;
 158 
 159                 /*
 160                  * We use spinand_read_reg_op() instead of spinand_get_cfg()
 161                  * here to bypass the config cache.
 162                  */
 163                 ret = spinand_read_reg_op(spinand, REG_CFG,
 164                                           &spinand->cfg_cache[target]);
 165                 if (ret)
 166                         return ret;
 167         }
 168 
 169         return 0;
 170 }
 171 
 172 static int spinand_init_quad_enable(struct spinand_device *spinand)
 173 {
 174         bool enable = false;
 175 
 176         if (!(spinand->flags & SPINAND_HAS_QE_BIT))
 177                 return 0;
 178 
 179         if (spinand->op_templates.read_cache->data.buswidth == 4 ||
 180             spinand->op_templates.write_cache->data.buswidth == 4 ||
 181             spinand->op_templates.update_cache->data.buswidth == 4)
 182                 enable = true;
 183 
 184         return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE,
 185                                enable ? CFG_QUAD_ENABLE : 0);
 186 }
 187 
 188 static int spinand_ecc_enable(struct spinand_device *spinand,
 189                               bool enable)
 190 {
 191         return spinand_upd_cfg(spinand, CFG_ECC_ENABLE,
 192                                enable ? CFG_ECC_ENABLE : 0);
 193 }
 194 
 195 static int spinand_write_enable_op(struct spinand_device *spinand)
 196 {
 197         struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
 198 
 199         return spi_mem_exec_op(spinand->spimem, &op);
 200 }
 201 
 202 static int spinand_load_page_op(struct spinand_device *spinand,
 203                                 const struct nand_page_io_req *req)
 204 {
 205         struct nand_device *nand = spinand_to_nand(spinand);
 206         unsigned int row = nanddev_pos_to_row(nand, &req->pos);
 207         struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
 208 
 209         return spi_mem_exec_op(spinand->spimem, &op);
 210 }
 211 
 212 static int spinand_read_from_cache_op(struct spinand_device *spinand,
 213                                       const struct nand_page_io_req *req)
 214 {
 215         struct nand_device *nand = spinand_to_nand(spinand);
 216         struct mtd_info *mtd = nanddev_to_mtd(nand);
 217         struct spi_mem_dirmap_desc *rdesc;
 218         unsigned int nbytes = 0;
 219         void *buf = NULL;
 220         u16 column = 0;
 221         ssize_t ret;
 222 
 223         if (req->datalen) {
 224                 buf = spinand->databuf;
 225                 nbytes = nanddev_page_size(nand);
 226                 column = 0;
 227         }
 228 
 229         if (req->ooblen) {
 230                 nbytes += nanddev_per_page_oobsize(nand);
 231                 if (!buf) {
 232                         buf = spinand->oobbuf;
 233                         column = nanddev_page_size(nand);
 234                 }
 235         }
 236 
 237         rdesc = spinand->dirmaps[req->pos.plane].rdesc;
 238 
 239         while (nbytes) {
 240                 ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf);
 241                 if (ret < 0)
 242                         return ret;
 243 
 244                 if (!ret || ret > nbytes)
 245                         return -EIO;
 246 
 247                 nbytes -= ret;
 248                 column += ret;
 249                 buf += ret;
 250         }
 251 
 252         if (req->datalen)
 253                 memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
 254                        req->datalen);
 255 
 256         if (req->ooblen) {
 257                 if (req->mode == MTD_OPS_AUTO_OOB)
 258                         mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
 259                                                     spinand->oobbuf,
 260                                                     req->ooboffs,
 261                                                     req->ooblen);
 262                 else
 263                         memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
 264                                req->ooblen);
 265         }
 266 
 267         return 0;
 268 }
 269 
 270 static int spinand_write_to_cache_op(struct spinand_device *spinand,
 271                                      const struct nand_page_io_req *req)
 272 {
 273         struct nand_device *nand = spinand_to_nand(spinand);
 274         struct mtd_info *mtd = nanddev_to_mtd(nand);
 275         struct spi_mem_dirmap_desc *wdesc;
 276         unsigned int nbytes, column = 0;
 277         void *buf = spinand->databuf;
 278         ssize_t ret;
 279 
 280         /*
 281          * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
 282          * the cache content to 0xFF (depends on vendor implementation), so we
 283          * must fill the page cache entirely even if we only want to program
 284          * the data portion of the page, otherwise we might corrupt the BBM or
 285          * user data previously programmed in OOB area.
 286          */
 287         nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
 288         memset(spinand->databuf, 0xff, nbytes);
 289 
 290         if (req->datalen)
 291                 memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
 292                        req->datalen);
 293 
 294         if (req->ooblen) {
 295                 if (req->mode == MTD_OPS_AUTO_OOB)
 296                         mtd_ooblayout_set_databytes(mtd, req->oobbuf.out,
 297                                                     spinand->oobbuf,
 298                                                     req->ooboffs,
 299                                                     req->ooblen);
 300                 else
 301                         memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
 302                                req->ooblen);
 303         }
 304 
 305         wdesc = spinand->dirmaps[req->pos.plane].wdesc;
 306 
 307         while (nbytes) {
 308                 ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf);
 309                 if (ret < 0)
 310                         return ret;
 311 
 312                 if (!ret || ret > nbytes)
 313                         return -EIO;
 314 
 315                 nbytes -= ret;
 316                 column += ret;
 317                 buf += ret;
 318         }
 319 
 320         return 0;
 321 }
 322 
 323 static int spinand_program_op(struct spinand_device *spinand,
 324                               const struct nand_page_io_req *req)
 325 {
 326         struct nand_device *nand = spinand_to_nand(spinand);
 327         unsigned int row = nanddev_pos_to_row(nand, &req->pos);
 328         struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
 329 
 330         return spi_mem_exec_op(spinand->spimem, &op);
 331 }
 332 
 333 static int spinand_erase_op(struct spinand_device *spinand,
 334                             const struct nand_pos *pos)
 335 {
 336         struct nand_device *nand = spinand_to_nand(spinand);
 337         unsigned int row = nanddev_pos_to_row(nand, pos);
 338         struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
 339 
 340         return spi_mem_exec_op(spinand->spimem, &op);
 341 }
 342 
 343 static int spinand_wait(struct spinand_device *spinand, u8 *s)
 344 {
 345         unsigned long timeo =  jiffies + msecs_to_jiffies(400);
 346         u8 status;
 347         int ret;
 348 
 349         do {
 350                 ret = spinand_read_status(spinand, &status);
 351                 if (ret)
 352                         return ret;
 353 
 354                 if (!(status & STATUS_BUSY))
 355                         goto out;
 356         } while (time_before(jiffies, timeo));
 357 
 358         /*
 359          * Extra read, just in case the STATUS_READY bit has changed
 360          * since our last check
 361          */
 362         ret = spinand_read_status(spinand, &status);
 363         if (ret)
 364                 return ret;
 365 
 366 out:
 367         if (s)
 368                 *s = status;
 369 
 370         return status & STATUS_BUSY ? -ETIMEDOUT : 0;
 371 }
 372 
 373 static int spinand_read_id_op(struct spinand_device *spinand, u8 *buf)
 374 {
 375         struct spi_mem_op op = SPINAND_READID_OP(0, spinand->scratchbuf,
 376                                                  SPINAND_MAX_ID_LEN);
 377         int ret;
 378 
 379         ret = spi_mem_exec_op(spinand->spimem, &op);
 380         if (!ret)
 381                 memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
 382 
 383         return ret;
 384 }
 385 
 386 static int spinand_reset_op(struct spinand_device *spinand)
 387 {
 388         struct spi_mem_op op = SPINAND_RESET_OP;
 389         int ret;
 390 
 391         ret = spi_mem_exec_op(spinand->spimem, &op);
 392         if (ret)
 393                 return ret;
 394 
 395         return spinand_wait(spinand, NULL);
 396 }
 397 
 398 static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
 399 {
 400         return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
 401 }
 402 
 403 static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
 404 {
 405         struct nand_device *nand = spinand_to_nand(spinand);
 406 
 407         if (spinand->eccinfo.get_status)
 408                 return spinand->eccinfo.get_status(spinand, status);
 409 
 410         switch (status & STATUS_ECC_MASK) {
 411         case STATUS_ECC_NO_BITFLIPS:
 412                 return 0;
 413 
 414         case STATUS_ECC_HAS_BITFLIPS:
 415                 /*
 416                  * We have no way to know exactly how many bitflips have been
 417                  * fixed, so let's return the maximum possible value so that
 418                  * wear-leveling layers move the data immediately.
 419                  */
 420                 return nand->eccreq.strength;
 421 
 422         case STATUS_ECC_UNCOR_ERROR:
 423                 return -EBADMSG;
 424 
 425         default:
 426                 break;
 427         }
 428 
 429         return -EINVAL;
 430 }
 431 
 432 static int spinand_read_page(struct spinand_device *spinand,
 433                              const struct nand_page_io_req *req,
 434                              bool ecc_enabled)
 435 {
 436         u8 status;
 437         int ret;
 438 
 439         ret = spinand_load_page_op(spinand, req);
 440         if (ret)
 441                 return ret;
 442 
 443         ret = spinand_wait(spinand, &status);
 444         if (ret < 0)
 445                 return ret;
 446 
 447         ret = spinand_read_from_cache_op(spinand, req);
 448         if (ret)
 449                 return ret;
 450 
 451         if (!ecc_enabled)
 452                 return 0;
 453 
 454         return spinand_check_ecc_status(spinand, status);
 455 }
 456 
 457 static int spinand_write_page(struct spinand_device *spinand,
 458                               const struct nand_page_io_req *req)
 459 {
 460         u8 status;
 461         int ret;
 462 
 463         ret = spinand_write_enable_op(spinand);
 464         if (ret)
 465                 return ret;
 466 
 467         ret = spinand_write_to_cache_op(spinand, req);
 468         if (ret)
 469                 return ret;
 470 
 471         ret = spinand_program_op(spinand, req);
 472         if (ret)
 473                 return ret;
 474 
 475         ret = spinand_wait(spinand, &status);
 476         if (!ret && (status & STATUS_PROG_FAILED))
 477                 ret = -EIO;
 478 
 479         return ret;
 480 }
 481 
 482 static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
 483                             struct mtd_oob_ops *ops)
 484 {
 485         struct spinand_device *spinand = mtd_to_spinand(mtd);
 486         struct nand_device *nand = mtd_to_nanddev(mtd);
 487         unsigned int max_bitflips = 0;
 488         struct nand_io_iter iter;
 489         bool enable_ecc = false;
 490         bool ecc_failed = false;
 491         int ret = 0;
 492 
 493         if (ops->mode != MTD_OPS_RAW && spinand->eccinfo.ooblayout)
 494                 enable_ecc = true;
 495 
 496         mutex_lock(&spinand->lock);
 497 
 498         nanddev_io_for_each_page(nand, from, ops, &iter) {
 499                 ret = spinand_select_target(spinand, iter.req.pos.target);
 500                 if (ret)
 501                         break;
 502 
 503                 ret = spinand_ecc_enable(spinand, enable_ecc);
 504                 if (ret)
 505                         break;
 506 
 507                 ret = spinand_read_page(spinand, &iter.req, enable_ecc);
 508                 if (ret < 0 && ret != -EBADMSG)
 509                         break;
 510 
 511                 if (ret == -EBADMSG) {
 512                         ecc_failed = true;
 513                         mtd->ecc_stats.failed++;
 514                 } else {
 515                         mtd->ecc_stats.corrected += ret;
 516                         max_bitflips = max_t(unsigned int, max_bitflips, ret);
 517                 }
 518 
 519                 ret = 0;
 520                 ops->retlen += iter.req.datalen;
 521                 ops->oobretlen += iter.req.ooblen;
 522         }
 523 
 524         mutex_unlock(&spinand->lock);
 525 
 526         if (ecc_failed && !ret)
 527                 ret = -EBADMSG;
 528 
 529         return ret ? ret : max_bitflips;
 530 }
 531 
 532 static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
 533                              struct mtd_oob_ops *ops)
 534 {
 535         struct spinand_device *spinand = mtd_to_spinand(mtd);
 536         struct nand_device *nand = mtd_to_nanddev(mtd);
 537         struct nand_io_iter iter;
 538         bool enable_ecc = false;
 539         int ret = 0;
 540 
 541         if (ops->mode != MTD_OPS_RAW && mtd->ooblayout)
 542                 enable_ecc = true;
 543 
 544         mutex_lock(&spinand->lock);
 545 
 546         nanddev_io_for_each_page(nand, to, ops, &iter) {
 547                 ret = spinand_select_target(spinand, iter.req.pos.target);
 548                 if (ret)
 549                         break;
 550 
 551                 ret = spinand_ecc_enable(spinand, enable_ecc);
 552                 if (ret)
 553                         break;
 554 
 555                 ret = spinand_write_page(spinand, &iter.req);
 556                 if (ret)
 557                         break;
 558 
 559                 ops->retlen += iter.req.datalen;
 560                 ops->oobretlen += iter.req.ooblen;
 561         }
 562 
 563         mutex_unlock(&spinand->lock);
 564 
 565         return ret;
 566 }
 567 
 568 static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
 569 {
 570         struct spinand_device *spinand = nand_to_spinand(nand);
 571         u8 marker[2] = { };
 572         struct nand_page_io_req req = {
 573                 .pos = *pos,
 574                 .ooblen = sizeof(marker),
 575                 .ooboffs = 0,
 576                 .oobbuf.in = marker,
 577                 .mode = MTD_OPS_RAW,
 578         };
 579 
 580         spinand_select_target(spinand, pos->target);
 581         spinand_read_page(spinand, &req, false);
 582         if (marker[0] != 0xff || marker[1] != 0xff)
 583                 return true;
 584 
 585         return false;
 586 }
 587 
 588 static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
 589 {
 590         struct nand_device *nand = mtd_to_nanddev(mtd);
 591         struct spinand_device *spinand = nand_to_spinand(nand);
 592         struct nand_pos pos;
 593         int ret;
 594 
 595         nanddev_offs_to_pos(nand, offs, &pos);
 596         mutex_lock(&spinand->lock);
 597         ret = nanddev_isbad(nand, &pos);
 598         mutex_unlock(&spinand->lock);
 599 
 600         return ret;
 601 }
 602 
 603 static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
 604 {
 605         struct spinand_device *spinand = nand_to_spinand(nand);
 606         u8 marker[2] = { };
 607         struct nand_page_io_req req = {
 608                 .pos = *pos,
 609                 .ooboffs = 0,
 610                 .ooblen = sizeof(marker),
 611                 .oobbuf.out = marker,
 612                 .mode = MTD_OPS_RAW,
 613         };
 614         int ret;
 615 
 616         ret = spinand_select_target(spinand, pos->target);
 617         if (ret)
 618                 return ret;
 619 
 620         ret = spinand_write_enable_op(spinand);
 621         if (ret)
 622                 return ret;
 623 
 624         return spinand_write_page(spinand, &req);
 625 }
 626 
 627 static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
 628 {
 629         struct nand_device *nand = mtd_to_nanddev(mtd);
 630         struct spinand_device *spinand = nand_to_spinand(nand);
 631         struct nand_pos pos;
 632         int ret;
 633 
 634         nanddev_offs_to_pos(nand, offs, &pos);
 635         mutex_lock(&spinand->lock);
 636         ret = nanddev_markbad(nand, &pos);
 637         mutex_unlock(&spinand->lock);
 638 
 639         return ret;
 640 }
 641 
 642 static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
 643 {
 644         struct spinand_device *spinand = nand_to_spinand(nand);
 645         u8 status;
 646         int ret;
 647 
 648         ret = spinand_select_target(spinand, pos->target);
 649         if (ret)
 650                 return ret;
 651 
 652         ret = spinand_write_enable_op(spinand);
 653         if (ret)
 654                 return ret;
 655 
 656         ret = spinand_erase_op(spinand, pos);
 657         if (ret)
 658                 return ret;
 659 
 660         ret = spinand_wait(spinand, &status);
 661         if (!ret && (status & STATUS_ERASE_FAILED))
 662                 ret = -EIO;
 663 
 664         return ret;
 665 }
 666 
 667 static int spinand_mtd_erase(struct mtd_info *mtd,
 668                              struct erase_info *einfo)
 669 {
 670         struct spinand_device *spinand = mtd_to_spinand(mtd);
 671         int ret;
 672 
 673         mutex_lock(&spinand->lock);
 674         ret = nanddev_mtd_erase(mtd, einfo);
 675         mutex_unlock(&spinand->lock);
 676 
 677         return ret;
 678 }
 679 
 680 static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
 681 {
 682         struct spinand_device *spinand = mtd_to_spinand(mtd);
 683         struct nand_device *nand = mtd_to_nanddev(mtd);
 684         struct nand_pos pos;
 685         int ret;
 686 
 687         nanddev_offs_to_pos(nand, offs, &pos);
 688         mutex_lock(&spinand->lock);
 689         ret = nanddev_isreserved(nand, &pos);
 690         mutex_unlock(&spinand->lock);
 691 
 692         return ret;
 693 }
 694 
 695 static int spinand_create_dirmap(struct spinand_device *spinand,
 696                                  unsigned int plane)
 697 {
 698         struct nand_device *nand = spinand_to_nand(spinand);
 699         struct spi_mem_dirmap_info info = {
 700                 .length = nanddev_page_size(nand) +
 701                           nanddev_per_page_oobsize(nand),
 702         };
 703         struct spi_mem_dirmap_desc *desc;
 704 
 705         /* The plane number is passed in MSB just above the column address */
 706         info.offset = plane << fls(nand->memorg.pagesize);
 707 
 708         info.op_tmpl = *spinand->op_templates.update_cache;
 709         desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
 710                                           spinand->spimem, &info);
 711         if (IS_ERR(desc))
 712                 return PTR_ERR(desc);
 713 
 714         spinand->dirmaps[plane].wdesc = desc;
 715 
 716         info.op_tmpl = *spinand->op_templates.read_cache;
 717         desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
 718                                           spinand->spimem, &info);
 719         if (IS_ERR(desc))
 720                 return PTR_ERR(desc);
 721 
 722         spinand->dirmaps[plane].rdesc = desc;
 723 
 724         return 0;
 725 }
 726 
 727 static int spinand_create_dirmaps(struct spinand_device *spinand)
 728 {
 729         struct nand_device *nand = spinand_to_nand(spinand);
 730         int i, ret;
 731 
 732         spinand->dirmaps = devm_kzalloc(&spinand->spimem->spi->dev,
 733                                         sizeof(*spinand->dirmaps) *
 734                                         nand->memorg.planes_per_lun,
 735                                         GFP_KERNEL);
 736         if (!spinand->dirmaps)
 737                 return -ENOMEM;
 738 
 739         for (i = 0; i < nand->memorg.planes_per_lun; i++) {
 740                 ret = spinand_create_dirmap(spinand, i);
 741                 if (ret)
 742                         return ret;
 743         }
 744 
 745         return 0;
 746 }
 747 
 748 static const struct nand_ops spinand_ops = {
 749         .erase = spinand_erase,
 750         .markbad = spinand_markbad,
 751         .isbad = spinand_isbad,
 752 };
 753 
 754 static const struct spinand_manufacturer *spinand_manufacturers[] = {
 755         &gigadevice_spinand_manufacturer,
 756         &macronix_spinand_manufacturer,
 757         &micron_spinand_manufacturer,
 758         &paragon_spinand_manufacturer,
 759         &toshiba_spinand_manufacturer,
 760         &winbond_spinand_manufacturer,
 761 };
 762 
 763 static int spinand_manufacturer_detect(struct spinand_device *spinand)
 764 {
 765         unsigned int i;
 766         int ret;
 767 
 768         for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
 769                 ret = spinand_manufacturers[i]->ops->detect(spinand);
 770                 if (ret > 0) {
 771                         spinand->manufacturer = spinand_manufacturers[i];
 772                         return 0;
 773                 } else if (ret < 0) {
 774                         return ret;
 775                 }
 776         }
 777 
 778         return -ENOTSUPP;
 779 }
 780 
 781 static int spinand_manufacturer_init(struct spinand_device *spinand)
 782 {
 783         if (spinand->manufacturer->ops->init)
 784                 return spinand->manufacturer->ops->init(spinand);
 785 
 786         return 0;
 787 }
 788 
 789 static void spinand_manufacturer_cleanup(struct spinand_device *spinand)
 790 {
 791         /* Release manufacturer private data */
 792         if (spinand->manufacturer->ops->cleanup)
 793                 return spinand->manufacturer->ops->cleanup(spinand);
 794 }
 795 
 796 static const struct spi_mem_op *
 797 spinand_select_op_variant(struct spinand_device *spinand,
 798                           const struct spinand_op_variants *variants)
 799 {
 800         struct nand_device *nand = spinand_to_nand(spinand);
 801         unsigned int i;
 802 
 803         for (i = 0; i < variants->nops; i++) {
 804                 struct spi_mem_op op = variants->ops[i];
 805                 unsigned int nbytes;
 806                 int ret;
 807 
 808                 nbytes = nanddev_per_page_oobsize(nand) +
 809                          nanddev_page_size(nand);
 810 
 811                 while (nbytes) {
 812                         op.data.nbytes = nbytes;
 813                         ret = spi_mem_adjust_op_size(spinand->spimem, &op);
 814                         if (ret)
 815                                 break;
 816 
 817                         if (!spi_mem_supports_op(spinand->spimem, &op))
 818                                 break;
 819 
 820                         nbytes -= op.data.nbytes;
 821                 }
 822 
 823                 if (!nbytes)
 824                         return &variants->ops[i];
 825         }
 826 
 827         return NULL;
 828 }
 829 
 830 /**
 831  * spinand_match_and_init() - Try to find a match between a device ID and an
 832  *                            entry in a spinand_info table
 833  * @spinand: SPI NAND object
 834  * @table: SPI NAND device description table
 835  * @table_size: size of the device description table
 836  *
 837  * Should be used by SPI NAND manufacturer drivers when they want to find a
 838  * match between a device ID retrieved through the READ_ID command and an
 839  * entry in the SPI NAND description table. If a match is found, the spinand
 840  * object will be initialized with information provided by the matching
 841  * spinand_info entry.
 842  *
 843  * Return: 0 on success, a negative error code otherwise.
 844  */
 845 int spinand_match_and_init(struct spinand_device *spinand,
 846                            const struct spinand_info *table,
 847                            unsigned int table_size, u16 devid)
 848 {
 849         struct nand_device *nand = spinand_to_nand(spinand);
 850         unsigned int i;
 851 
 852         for (i = 0; i < table_size; i++) {
 853                 const struct spinand_info *info = &table[i];
 854                 const struct spi_mem_op *op;
 855 
 856                 if (devid != info->devid)
 857                         continue;
 858 
 859                 nand->memorg = table[i].memorg;
 860                 nand->eccreq = table[i].eccreq;
 861                 spinand->eccinfo = table[i].eccinfo;
 862                 spinand->flags = table[i].flags;
 863                 spinand->select_target = table[i].select_target;
 864 
 865                 op = spinand_select_op_variant(spinand,
 866                                                info->op_variants.read_cache);
 867                 if (!op)
 868                         return -ENOTSUPP;
 869 
 870                 spinand->op_templates.read_cache = op;
 871 
 872                 op = spinand_select_op_variant(spinand,
 873                                                info->op_variants.write_cache);
 874                 if (!op)
 875                         return -ENOTSUPP;
 876 
 877                 spinand->op_templates.write_cache = op;
 878 
 879                 op = spinand_select_op_variant(spinand,
 880                                                info->op_variants.update_cache);
 881                 spinand->op_templates.update_cache = op;
 882 
 883                 return 0;
 884         }
 885 
 886         return -ENOTSUPP;
 887 }
 888 
 889 static int spinand_detect(struct spinand_device *spinand)
 890 {
 891         struct device *dev = &spinand->spimem->spi->dev;
 892         struct nand_device *nand = spinand_to_nand(spinand);
 893         int ret;
 894 
 895         ret = spinand_reset_op(spinand);
 896         if (ret)
 897                 return ret;
 898 
 899         ret = spinand_read_id_op(spinand, spinand->id.data);
 900         if (ret)
 901                 return ret;
 902 
 903         spinand->id.len = SPINAND_MAX_ID_LEN;
 904 
 905         ret = spinand_manufacturer_detect(spinand);
 906         if (ret) {
 907                 dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN,
 908                         spinand->id.data);
 909                 return ret;
 910         }
 911 
 912         if (nand->memorg.ntargets > 1 && !spinand->select_target) {
 913                 dev_err(dev,
 914                         "SPI NANDs with more than one die must implement ->select_target()\n");
 915                 return -EINVAL;
 916         }
 917 
 918         dev_info(&spinand->spimem->spi->dev,
 919                  "%s SPI NAND was found.\n", spinand->manufacturer->name);
 920         dev_info(&spinand->spimem->spi->dev,
 921                  "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n",
 922                  nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10,
 923                  nanddev_page_size(nand), nanddev_per_page_oobsize(nand));
 924 
 925         return 0;
 926 }
 927 
 928 static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
 929                                        struct mtd_oob_region *region)
 930 {
 931         return -ERANGE;
 932 }
 933 
 934 static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
 935                                         struct mtd_oob_region *region)
 936 {
 937         if (section)
 938                 return -ERANGE;
 939 
 940         /* Reserve 2 bytes for the BBM. */
 941         region->offset = 2;
 942         region->length = 62;
 943 
 944         return 0;
 945 }
 946 
 947 static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
 948         .ecc = spinand_noecc_ooblayout_ecc,
 949         .free = spinand_noecc_ooblayout_free,
 950 };
 951 
 952 static int spinand_init(struct spinand_device *spinand)
 953 {
 954         struct device *dev = &spinand->spimem->spi->dev;
 955         struct mtd_info *mtd = spinand_to_mtd(spinand);
 956         struct nand_device *nand = mtd_to_nanddev(mtd);
 957         int ret, i;
 958 
 959         /*
 960          * We need a scratch buffer because the spi_mem interface requires that
 961          * buf passed in spi_mem_op->data.buf be DMA-able.
 962          */
 963         spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL);
 964         if (!spinand->scratchbuf)
 965                 return -ENOMEM;
 966 
 967         ret = spinand_detect(spinand);
 968         if (ret)
 969                 goto err_free_bufs;
 970 
 971         /*
 972          * Use kzalloc() instead of devm_kzalloc() here, because some drivers
 973          * may use this buffer for DMA access.
 974          * Memory allocated by devm_ does not guarantee DMA-safe alignment.
 975          */
 976         spinand->databuf = kzalloc(nanddev_page_size(nand) +
 977                                nanddev_per_page_oobsize(nand),
 978                                GFP_KERNEL);
 979         if (!spinand->databuf) {
 980                 ret = -ENOMEM;
 981                 goto err_free_bufs;
 982         }
 983 
 984         spinand->oobbuf = spinand->databuf + nanddev_page_size(nand);
 985 
 986         ret = spinand_init_cfg_cache(spinand);
 987         if (ret)
 988                 goto err_free_bufs;
 989 
 990         ret = spinand_init_quad_enable(spinand);
 991         if (ret)
 992                 goto err_free_bufs;
 993 
 994         ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
 995         if (ret)
 996                 goto err_free_bufs;
 997 
 998         ret = spinand_manufacturer_init(spinand);
 999         if (ret) {
1000                 dev_err(dev,
1001                         "Failed to initialize the SPI NAND chip (err = %d)\n",
1002                         ret);
1003                 goto err_free_bufs;
1004         }
1005 
1006         ret = spinand_create_dirmaps(spinand);
1007         if (ret) {
1008                 dev_err(dev,
1009                         "Failed to create direct mappings for read/write operations (err = %d)\n",
1010                         ret);
1011                 goto err_manuf_cleanup;
1012         }
1013 
1014         /* After power up, all blocks are locked, so unlock them here. */
1015         for (i = 0; i < nand->memorg.ntargets; i++) {
1016                 ret = spinand_select_target(spinand, i);
1017                 if (ret)
1018                         goto err_manuf_cleanup;
1019 
1020                 ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
1021                 if (ret)
1022                         goto err_manuf_cleanup;
1023         }
1024 
1025         ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
1026         if (ret)
1027                 goto err_manuf_cleanup;
1028 
1029         /*
1030          * Right now, we don't support ECC, so let the whole oob
1031          * area is available for user.
1032          */
1033         mtd->_read_oob = spinand_mtd_read;
1034         mtd->_write_oob = spinand_mtd_write;
1035         mtd->_block_isbad = spinand_mtd_block_isbad;
1036         mtd->_block_markbad = spinand_mtd_block_markbad;
1037         mtd->_block_isreserved = spinand_mtd_block_isreserved;
1038         mtd->_erase = spinand_mtd_erase;
1039         mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
1040 
1041         if (spinand->eccinfo.ooblayout)
1042                 mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
1043         else
1044                 mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
1045 
1046         ret = mtd_ooblayout_count_freebytes(mtd);
1047         if (ret < 0)
1048                 goto err_cleanup_nanddev;
1049 
1050         mtd->oobavail = ret;
1051 
1052         /* Propagate ECC information to mtd_info */
1053         mtd->ecc_strength = nand->eccreq.strength;
1054         mtd->ecc_step_size = nand->eccreq.step_size;
1055 
1056         return 0;
1057 
1058 err_cleanup_nanddev:
1059         nanddev_cleanup(nand);
1060 
1061 err_manuf_cleanup:
1062         spinand_manufacturer_cleanup(spinand);
1063 
1064 err_free_bufs:
1065         kfree(spinand->databuf);
1066         kfree(spinand->scratchbuf);
1067         return ret;
1068 }
1069 
1070 static void spinand_cleanup(struct spinand_device *spinand)
1071 {
1072         struct nand_device *nand = spinand_to_nand(spinand);
1073 
1074         nanddev_cleanup(nand);
1075         spinand_manufacturer_cleanup(spinand);
1076         kfree(spinand->databuf);
1077         kfree(spinand->scratchbuf);
1078 }
1079 
1080 static int spinand_probe(struct spi_mem *mem)
1081 {
1082         struct spinand_device *spinand;
1083         struct mtd_info *mtd;
1084         int ret;
1085 
1086         spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand),
1087                                GFP_KERNEL);
1088         if (!spinand)
1089                 return -ENOMEM;
1090 
1091         spinand->spimem = mem;
1092         spi_mem_set_drvdata(mem, spinand);
1093         spinand_set_of_node(spinand, mem->spi->dev.of_node);
1094         mutex_init(&spinand->lock);
1095         mtd = spinand_to_mtd(spinand);
1096         mtd->dev.parent = &mem->spi->dev;
1097 
1098         ret = spinand_init(spinand);
1099         if (ret)
1100                 return ret;
1101 
1102         ret = mtd_device_register(mtd, NULL, 0);
1103         if (ret)
1104                 goto err_spinand_cleanup;
1105 
1106         return 0;
1107 
1108 err_spinand_cleanup:
1109         spinand_cleanup(spinand);
1110 
1111         return ret;
1112 }
1113 
1114 static int spinand_remove(struct spi_mem *mem)
1115 {
1116         struct spinand_device *spinand;
1117         struct mtd_info *mtd;
1118         int ret;
1119 
1120         spinand = spi_mem_get_drvdata(mem);
1121         mtd = spinand_to_mtd(spinand);
1122 
1123         ret = mtd_device_unregister(mtd);
1124         if (ret)
1125                 return ret;
1126 
1127         spinand_cleanup(spinand);
1128 
1129         return 0;
1130 }
1131 
1132 static const struct spi_device_id spinand_ids[] = {
1133         { .name = "spi-nand" },
1134         { /* sentinel */ },
1135 };
1136 
1137 #ifdef CONFIG_OF
1138 static const struct of_device_id spinand_of_ids[] = {
1139         { .compatible = "spi-nand" },
1140         { /* sentinel */ },
1141 };
1142 #endif
1143 
1144 static struct spi_mem_driver spinand_drv = {
1145         .spidrv = {
1146                 .id_table = spinand_ids,
1147                 .driver = {
1148                         .name = "spi-nand",
1149                         .of_match_table = of_match_ptr(spinand_of_ids),
1150                 },
1151         },
1152         .probe = spinand_probe,
1153         .remove = spinand_remove,
1154 };
1155 module_spi_mem_driver(spinand_drv);
1156 
1157 MODULE_DESCRIPTION("SPI NAND framework");
1158 MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>");
1159 MODULE_LICENSE("GPL v2");

/* [<][>][^][v][top][bottom][index][help] */