root/drivers/mtd/spi-nor/spi-nor.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. spi_nor_spimem_xfer_data
  2. spi_nor_spimem_read_data
  3. spi_nor_read_data
  4. spi_nor_spimem_write_data
  5. spi_nor_write_data
  6. read_sr
  7. read_fsr
  8. read_cr
  9. write_sr
  10. write_enable
  11. write_disable
  12. mtd_to_spi_nor
  13. spi_nor_convert_opcode
  14. spi_nor_convert_3to4_read
  15. spi_nor_convert_3to4_program
  16. spi_nor_convert_3to4_erase
  17. spi_nor_set_4byte_opcodes
  18. macronix_set_4byte
  19. st_micron_set_4byte
  20. spansion_set_4byte
  21. spi_nor_write_ear
  22. winbond_set_4byte
  23. spi_nor_xread_sr
  24. s3an_sr_ready
  25. spi_nor_clear_sr
  26. spi_nor_sr_ready
  27. spi_nor_clear_fsr
  28. spi_nor_fsr_ready
  29. spi_nor_ready
  30. spi_nor_wait_till_ready_with_timeout
  31. spi_nor_wait_till_ready
  32. erase_chip
  33. spi_nor_lock_and_prep
  34. spi_nor_unlock_and_unprep
  35. s3an_convert_addr
  36. spi_nor_convert_addr
  37. spi_nor_erase_sector
  38. spi_nor_div_by_erase_size
  39. spi_nor_find_best_erase_type
  40. spi_nor_region_next
  41. spi_nor_find_erase_region
  42. spi_nor_init_erase_cmd
  43. spi_nor_destroy_erase_cmd_list
  44. spi_nor_init_erase_cmd_list
  45. spi_nor_erase_multi_sectors
  46. spi_nor_erase
  47. write_sr_and_check
  48. stm_get_locked_range
  49. stm_check_lock_status_sr
  50. stm_is_locked_sr
  51. stm_is_unlocked_sr
  52. stm_lock
  53. stm_unlock
  54. stm_is_locked
  55. spi_nor_lock
  56. spi_nor_unlock
  57. spi_nor_is_locked
  58. write_sr_cr
  59. macronix_quad_enable
  60. spansion_quad_enable
  61. spansion_no_read_cr_quad_enable
  62. spansion_read_cr_quad_enable
  63. spi_nor_write_sr2
  64. spi_nor_read_sr2
  65. sr2_bit7_quad_enable
  66. spi_nor_clear_sr_bp
  67. spi_nor_spansion_clear_sr_bp
  68. is25lp256_post_bfpt_fixups
  69. mx25l25635_post_bfpt_fixups
  70. gd25q256_default_init
  71. spi_nor_read_id
  72. spi_nor_read
  73. sst_write
  74. spi_nor_write
  75. spi_nor_check
  76. s3an_nor_setup
  77. spi_nor_set_read_settings
  78. spi_nor_set_pp_settings
  79. spi_nor_hwcaps2cmd
  80. spi_nor_hwcaps_read2cmd
  81. spi_nor_hwcaps_pp2cmd
  82. spi_nor_read_raw
  83. spi_nor_read_sfdp
  84. spi_nor_spimem_check_op
  85. spi_nor_spimem_check_readop
  86. spi_nor_spimem_check_pp
  87. spi_nor_spimem_adjust_hwcaps
  88. spi_nor_read_sfdp_dma_unsafe
  89. spi_nor_set_read_settings_from_bfpt
  90. spi_nor_set_erase_type
  91. spi_nor_set_erase_settings_from_bfpt
  92. spi_nor_map_cmp_erase_type
  93. spi_nor_sort_erase_mask
  94. spi_nor_regions_sort_erase_types
  95. spi_nor_init_uniform_erase_map
  96. spi_nor_post_bfpt_fixups
  97. spi_nor_parse_bfpt
  98. spi_nor_smpt_addr_width
  99. spi_nor_smpt_read_dummy
  100. spi_nor_get_map_in_use
  101. spi_nor_region_check_overlay
  102. spi_nor_init_non_uniform_erase_map
  103. spi_nor_parse_smpt
  104. spi_nor_parse_4bait
  105. spi_nor_parse_sfdp
  106. spi_nor_select_read
  107. spi_nor_select_pp
  108. spi_nor_select_uniform_erase
  109. spi_nor_select_erase
  110. spi_nor_default_setup
  111. spi_nor_setup
  112. macronix_set_default_init
  113. st_micron_set_default_init
  114. winbond_set_default_init
  115. spi_nor_manufacturer_init_params
  116. spi_nor_sfdp_init_params
  117. spi_nor_info_init_params
  118. spansion_post_sfdp_fixups
  119. s3an_post_sfdp_fixups
  120. spi_nor_post_sfdp_fixups
  121. spi_nor_late_init_params
  122. spi_nor_init_params
  123. spi_nor_quad_enable
  124. spi_nor_init
  125. spi_nor_resume
  126. spi_nor_restore
  127. spi_nor_match_id
  128. spi_nor_set_addr_width
  129. spi_nor_debugfs_init
  130. spi_nor_get_flash_info
  131. spi_nor_scan
  132. spi_nor_probe
  133. spi_nor_remove
  134. spi_nor_shutdown

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
   4  * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
   5  *
   6  * Copyright (C) 2005, Intec Automation Inc.
   7  * Copyright (C) 2014, Freescale Semiconductor, Inc.
   8  */
   9 
  10 #include <linux/err.h>
  11 #include <linux/errno.h>
  12 #include <linux/module.h>
  13 #include <linux/device.h>
  14 #include <linux/mutex.h>
  15 #include <linux/math64.h>
  16 #include <linux/sizes.h>
  17 #include <linux/slab.h>
  18 #include <linux/sort.h>
  19 
  20 #include <linux/mtd/mtd.h>
  21 #include <linux/of_platform.h>
  22 #include <linux/sched/task_stack.h>
  23 #include <linux/spi/flash.h>
  24 #include <linux/mtd/spi-nor.h>
  25 
  26 /* Define max times to check status register before we give up. */
  27 
  28 /*
  29  * For everything but full-chip erase; probably could be much smaller, but kept
  30  * around for safety for now
  31  */
  32 #define DEFAULT_READY_WAIT_JIFFIES              (40UL * HZ)
  33 
  34 /*
  35  * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up
  36  * for larger flash
  37  */
  38 #define CHIP_ERASE_2MB_READY_WAIT_JIFFIES       (40UL * HZ)
  39 
  40 #define SPI_NOR_MAX_ID_LEN      6
  41 #define SPI_NOR_MAX_ADDR_WIDTH  4
  42 
  43 struct sfdp_parameter_header {
  44         u8              id_lsb;
  45         u8              minor;
  46         u8              major;
  47         u8              length; /* in double words */
  48         u8              parameter_table_pointer[3]; /* byte address */
  49         u8              id_msb;
  50 };
  51 
  52 #define SFDP_PARAM_HEADER_ID(p) (((p)->id_msb << 8) | (p)->id_lsb)
  53 #define SFDP_PARAM_HEADER_PTP(p) \
  54         (((p)->parameter_table_pointer[2] << 16) | \
  55          ((p)->parameter_table_pointer[1] <<  8) | \
  56          ((p)->parameter_table_pointer[0] <<  0))
  57 
  58 #define SFDP_BFPT_ID            0xff00  /* Basic Flash Parameter Table */
  59 #define SFDP_SECTOR_MAP_ID      0xff81  /* Sector Map Table */
  60 #define SFDP_4BAIT_ID           0xff84  /* 4-byte Address Instruction Table */
  61 
  62 #define SFDP_SIGNATURE          0x50444653U
  63 #define SFDP_JESD216_MAJOR      1
  64 #define SFDP_JESD216_MINOR      0
  65 #define SFDP_JESD216A_MINOR     5
  66 #define SFDP_JESD216B_MINOR     6
  67 
  68 struct sfdp_header {
  69         u32             signature; /* Ox50444653U <=> "SFDP" */
  70         u8              minor;
  71         u8              major;
  72         u8              nph; /* 0-base number of parameter headers */
  73         u8              unused;
  74 
  75         /* Basic Flash Parameter Table. */
  76         struct sfdp_parameter_header    bfpt_header;
  77 };
  78 
  79 /* Basic Flash Parameter Table */
  80 
  81 /*
  82  * JESD216 rev B defines a Basic Flash Parameter Table of 16 DWORDs.
  83  * They are indexed from 1 but C arrays are indexed from 0.
  84  */
  85 #define BFPT_DWORD(i)           ((i) - 1)
  86 #define BFPT_DWORD_MAX          16
  87 
  88 /* The first version of JESB216 defined only 9 DWORDs. */
  89 #define BFPT_DWORD_MAX_JESD216                  9
  90 
  91 /* 1st DWORD. */
  92 #define BFPT_DWORD1_FAST_READ_1_1_2             BIT(16)
  93 #define BFPT_DWORD1_ADDRESS_BYTES_MASK          GENMASK(18, 17)
  94 #define BFPT_DWORD1_ADDRESS_BYTES_3_ONLY        (0x0UL << 17)
  95 #define BFPT_DWORD1_ADDRESS_BYTES_3_OR_4        (0x1UL << 17)
  96 #define BFPT_DWORD1_ADDRESS_BYTES_4_ONLY        (0x2UL << 17)
  97 #define BFPT_DWORD1_DTR                         BIT(19)
  98 #define BFPT_DWORD1_FAST_READ_1_2_2             BIT(20)
  99 #define BFPT_DWORD1_FAST_READ_1_4_4             BIT(21)
 100 #define BFPT_DWORD1_FAST_READ_1_1_4             BIT(22)
 101 
 102 /* 5th DWORD. */
 103 #define BFPT_DWORD5_FAST_READ_2_2_2             BIT(0)
 104 #define BFPT_DWORD5_FAST_READ_4_4_4             BIT(4)
 105 
 106 /* 11th DWORD. */
 107 #define BFPT_DWORD11_PAGE_SIZE_SHIFT            4
 108 #define BFPT_DWORD11_PAGE_SIZE_MASK             GENMASK(7, 4)
 109 
 110 /* 15th DWORD. */
 111 
 112 /*
 113  * (from JESD216 rev B)
 114  * Quad Enable Requirements (QER):
 115  * - 000b: Device does not have a QE bit. Device detects 1-1-4 and 1-4-4
 116  *         reads based on instruction. DQ3/HOLD# functions are hold during
 117  *         instruction phase.
 118  * - 001b: QE is bit 1 of status register 2. It is set via Write Status with
 119  *         two data bytes where bit 1 of the second byte is one.
 120  *         [...]
 121  *         Writing only one byte to the status register has the side-effect of
 122  *         clearing status register 2, including the QE bit. The 100b code is
 123  *         used if writing one byte to the status register does not modify
 124  *         status register 2.
 125  * - 010b: QE is bit 6 of status register 1. It is set via Write Status with
 126  *         one data byte where bit 6 is one.
 127  *         [...]
 128  * - 011b: QE is bit 7 of status register 2. It is set via Write status
 129  *         register 2 instruction 3Eh with one data byte where bit 7 is one.
 130  *         [...]
 131  *         The status register 2 is read using instruction 3Fh.
 132  * - 100b: QE is bit 1 of status register 2. It is set via Write Status with
 133  *         two data bytes where bit 1 of the second byte is one.
 134  *         [...]
 135  *         In contrast to the 001b code, writing one byte to the status
 136  *         register does not modify status register 2.
 137  * - 101b: QE is bit 1 of status register 2. Status register 1 is read using
 138  *         Read Status instruction 05h. Status register2 is read using
 139  *         instruction 35h. QE is set via Write Status instruction 01h with
 140  *         two data bytes where bit 1 of the second byte is one.
 141  *         [...]
 142  */
 143 #define BFPT_DWORD15_QER_MASK                   GENMASK(22, 20)
 144 #define BFPT_DWORD15_QER_NONE                   (0x0UL << 20) /* Micron */
 145 #define BFPT_DWORD15_QER_SR2_BIT1_BUGGY         (0x1UL << 20)
 146 #define BFPT_DWORD15_QER_SR1_BIT6               (0x2UL << 20) /* Macronix */
 147 #define BFPT_DWORD15_QER_SR2_BIT7               (0x3UL << 20)
 148 #define BFPT_DWORD15_QER_SR2_BIT1_NO_RD         (0x4UL << 20)
 149 #define BFPT_DWORD15_QER_SR2_BIT1               (0x5UL << 20) /* Spansion */
 150 
 151 struct sfdp_bfpt {
 152         u32     dwords[BFPT_DWORD_MAX];
 153 };
 154 
 155 /**
 156  * struct spi_nor_fixups - SPI NOR fixup hooks
 157  * @default_init: called after default flash parameters init. Used to tweak
 158  *                flash parameters when information provided by the flash_info
 159  *                table is incomplete or wrong.
 160  * @post_bfpt: called after the BFPT table has been parsed
 161  * @post_sfdp: called after SFDP has been parsed (is also called for SPI NORs
 162  *             that do not support RDSFDP). Typically used to tweak various
 163  *             parameters that could not be extracted by other means (i.e.
 164  *             when information provided by the SFDP/flash_info tables are
 165  *             incomplete or wrong).
 166  *
 167  * Those hooks can be used to tweak the SPI NOR configuration when the SFDP
 168  * table is broken or not available.
 169  */
 170 struct spi_nor_fixups {
 171         void (*default_init)(struct spi_nor *nor);
 172         int (*post_bfpt)(struct spi_nor *nor,
 173                          const struct sfdp_parameter_header *bfpt_header,
 174                          const struct sfdp_bfpt *bfpt,
 175                          struct spi_nor_flash_parameter *params);
 176         void (*post_sfdp)(struct spi_nor *nor);
 177 };
 178 
 179 struct flash_info {
 180         char            *name;
 181 
 182         /*
 183          * This array stores the ID bytes.
 184          * The first three bytes are the JEDIC ID.
 185          * JEDEC ID zero means "no ID" (mostly older chips).
 186          */
 187         u8              id[SPI_NOR_MAX_ID_LEN];
 188         u8              id_len;
 189 
 190         /* The size listed here is what works with SPINOR_OP_SE, which isn't
 191          * necessarily called a "sector" by the vendor.
 192          */
 193         unsigned        sector_size;
 194         u16             n_sectors;
 195 
 196         u16             page_size;
 197         u16             addr_width;
 198 
 199         u16             flags;
 200 #define SECT_4K                 BIT(0)  /* SPINOR_OP_BE_4K works uniformly */
 201 #define SPI_NOR_NO_ERASE        BIT(1)  /* No erase command needed */
 202 #define SST_WRITE               BIT(2)  /* use SST byte programming */
 203 #define SPI_NOR_NO_FR           BIT(3)  /* Can't do fastread */
 204 #define SECT_4K_PMC             BIT(4)  /* SPINOR_OP_BE_4K_PMC works uniformly */
 205 #define SPI_NOR_DUAL_READ       BIT(5)  /* Flash supports Dual Read */
 206 #define SPI_NOR_QUAD_READ       BIT(6)  /* Flash supports Quad Read */
 207 #define USE_FSR                 BIT(7)  /* use flag status register */
 208 #define SPI_NOR_HAS_LOCK        BIT(8)  /* Flash supports lock/unlock via SR */
 209 #define SPI_NOR_HAS_TB          BIT(9)  /*
 210                                          * Flash SR has Top/Bottom (TB) protect
 211                                          * bit. Must be used with
 212                                          * SPI_NOR_HAS_LOCK.
 213                                          */
 214 #define SPI_NOR_XSR_RDY         BIT(10) /*
 215                                          * S3AN flashes have specific opcode to
 216                                          * read the status register.
 217                                          * Flags SPI_NOR_XSR_RDY and SPI_S3AN
 218                                          * use the same bit as one implies the
 219                                          * other, but we will get rid of
 220                                          * SPI_S3AN soon.
 221                                          */
 222 #define SPI_S3AN                BIT(10) /*
 223                                          * Xilinx Spartan 3AN In-System Flash
 224                                          * (MFR cannot be used for probing
 225                                          * because it has the same value as
 226                                          * ATMEL flashes)
 227                                          */
 228 #define SPI_NOR_4B_OPCODES      BIT(11) /*
 229                                          * Use dedicated 4byte address op codes
 230                                          * to support memory size above 128Mib.
 231                                          */
 232 #define NO_CHIP_ERASE           BIT(12) /* Chip does not support chip erase */
 233 #define SPI_NOR_SKIP_SFDP       BIT(13) /* Skip parsing of SFDP tables */
 234 #define USE_CLSR                BIT(14) /* use CLSR command */
 235 #define SPI_NOR_OCTAL_READ      BIT(15) /* Flash supports Octal Read */
 236 
 237         /* Part specific fixup hooks. */
 238         const struct spi_nor_fixups *fixups;
 239 };
 240 
 241 #define JEDEC_MFR(info) ((info)->id[0])
 242 
 243 /**
 244  * spi_nor_spimem_xfer_data() - helper function to read/write data to
 245  *                              flash's memory region
 246  * @nor:        pointer to 'struct spi_nor'
 247  * @op:         pointer to 'struct spi_mem_op' template for transfer
 248  *
 249  * Return: number of bytes transferred on success, -errno otherwise
 250  */
 251 static ssize_t spi_nor_spimem_xfer_data(struct spi_nor *nor,
 252                                         struct spi_mem_op *op)
 253 {
 254         bool usebouncebuf = false;
 255         void *rdbuf = NULL;
 256         const void *buf;
 257         int ret;
 258 
 259         if (op->data.dir == SPI_MEM_DATA_IN)
 260                 buf = op->data.buf.in;
 261         else
 262                 buf = op->data.buf.out;
 263 
 264         if (object_is_on_stack(buf) || !virt_addr_valid(buf))
 265                 usebouncebuf = true;
 266 
 267         if (usebouncebuf) {
 268                 if (op->data.nbytes > nor->bouncebuf_size)
 269                         op->data.nbytes = nor->bouncebuf_size;
 270 
 271                 if (op->data.dir == SPI_MEM_DATA_IN) {
 272                         rdbuf = op->data.buf.in;
 273                         op->data.buf.in = nor->bouncebuf;
 274                 } else {
 275                         op->data.buf.out = nor->bouncebuf;
 276                         memcpy(nor->bouncebuf, buf,
 277                                op->data.nbytes);
 278                 }
 279         }
 280 
 281         ret = spi_mem_adjust_op_size(nor->spimem, op);
 282         if (ret)
 283                 return ret;
 284 
 285         ret = spi_mem_exec_op(nor->spimem, op);
 286         if (ret)
 287                 return ret;
 288 
 289         if (usebouncebuf && op->data.dir == SPI_MEM_DATA_IN)
 290                 memcpy(rdbuf, nor->bouncebuf, op->data.nbytes);
 291 
 292         return op->data.nbytes;
 293 }
 294 
 295 /**
 296  * spi_nor_spimem_read_data() - read data from flash's memory region via
 297  *                              spi-mem
 298  * @nor:        pointer to 'struct spi_nor'
 299  * @from:       offset to read from
 300  * @len:        number of bytes to read
 301  * @buf:        pointer to dst buffer
 302  *
 303  * Return: number of bytes read successfully, -errno otherwise
 304  */
 305 static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from,
 306                                         size_t len, u8 *buf)
 307 {
 308         struct spi_mem_op op =
 309                 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 1),
 310                            SPI_MEM_OP_ADDR(nor->addr_width, from, 1),
 311                            SPI_MEM_OP_DUMMY(nor->read_dummy, 1),
 312                            SPI_MEM_OP_DATA_IN(len, buf, 1));
 313 
 314         /* get transfer protocols. */
 315         op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->read_proto);
 316         op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->read_proto);
 317         op.dummy.buswidth = op.addr.buswidth;
 318         op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
 319 
 320         /* convert the dummy cycles to the number of bytes */
 321         op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
 322 
 323         return spi_nor_spimem_xfer_data(nor, &op);
 324 }
 325 
 326 /**
 327  * spi_nor_read_data() - read data from flash memory
 328  * @nor:        pointer to 'struct spi_nor'
 329  * @from:       offset to read from
 330  * @len:        number of bytes to read
 331  * @buf:        pointer to dst buffer
 332  *
 333  * Return: number of bytes read successfully, -errno otherwise
 334  */
 335 static ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len,
 336                                  u8 *buf)
 337 {
 338         if (nor->spimem)
 339                 return spi_nor_spimem_read_data(nor, from, len, buf);
 340 
 341         return nor->read(nor, from, len, buf);
 342 }
 343 
 344 /**
 345  * spi_nor_spimem_write_data() - write data to flash memory via
 346  *                               spi-mem
 347  * @nor:        pointer to 'struct spi_nor'
 348  * @to:         offset to write to
 349  * @len:        number of bytes to write
 350  * @buf:        pointer to src buffer
 351  *
 352  * Return: number of bytes written successfully, -errno otherwise
 353  */
 354 static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to,
 355                                          size_t len, const u8 *buf)
 356 {
 357         struct spi_mem_op op =
 358                 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1),
 359                            SPI_MEM_OP_ADDR(nor->addr_width, to, 1),
 360                            SPI_MEM_OP_NO_DUMMY,
 361                            SPI_MEM_OP_DATA_OUT(len, buf, 1));
 362 
 363         op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto);
 364         op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto);
 365         op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
 366 
 367         if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
 368                 op.addr.nbytes = 0;
 369 
 370         return spi_nor_spimem_xfer_data(nor, &op);
 371 }
 372 
 373 /**
 374  * spi_nor_write_data() - write data to flash memory
 375  * @nor:        pointer to 'struct spi_nor'
 376  * @to:         offset to write to
 377  * @len:        number of bytes to write
 378  * @buf:        pointer to src buffer
 379  *
 380  * Return: number of bytes written successfully, -errno otherwise
 381  */
 382 static ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
 383                                   const u8 *buf)
 384 {
 385         if (nor->spimem)
 386                 return spi_nor_spimem_write_data(nor, to, len, buf);
 387 
 388         return nor->write(nor, to, len, buf);
 389 }
 390 
 391 /*
 392  * Read the status register, returning its value in the location
 393  * Return the status register value.
 394  * Returns negative if error occurred.
 395  */
 396 static int read_sr(struct spi_nor *nor)
 397 {
 398         int ret;
 399 
 400         if (nor->spimem) {
 401                 struct spi_mem_op op =
 402                         SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1),
 403                                    SPI_MEM_OP_NO_ADDR,
 404                                    SPI_MEM_OP_NO_DUMMY,
 405                                    SPI_MEM_OP_DATA_IN(1, nor->bouncebuf, 1));
 406 
 407                 ret = spi_mem_exec_op(nor->spimem, &op);
 408         } else {
 409                 ret = nor->read_reg(nor, SPINOR_OP_RDSR, nor->bouncebuf, 1);
 410         }
 411 
 412         if (ret < 0) {
 413                 pr_err("error %d reading SR\n", (int) ret);
 414                 return ret;
 415         }
 416 
 417         return nor->bouncebuf[0];
 418 }
 419 
 420 /*
 421  * Read the flag status register, returning its value in the location
 422  * Return the status register value.
 423  * Returns negative if error occurred.
 424  */
 425 static int read_fsr(struct spi_nor *nor)
 426 {
 427         int ret;
 428 
 429         if (nor->spimem) {
 430                 struct spi_mem_op op =
 431                         SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 1),
 432                                    SPI_MEM_OP_NO_ADDR,
 433                                    SPI_MEM_OP_NO_DUMMY,
 434                                    SPI_MEM_OP_DATA_IN(1, nor->bouncebuf, 1));
 435 
 436                 ret = spi_mem_exec_op(nor->spimem, &op);
 437         } else {
 438                 ret = nor->read_reg(nor, SPINOR_OP_RDFSR, nor->bouncebuf, 1);
 439         }
 440 
 441         if (ret < 0) {
 442                 pr_err("error %d reading FSR\n", ret);
 443                 return ret;
 444         }
 445 
 446         return nor->bouncebuf[0];
 447 }
 448 
 449 /*
 450  * Read configuration register, returning its value in the
 451  * location. Return the configuration register value.
 452  * Returns negative if error occurred.
 453  */
 454 static int read_cr(struct spi_nor *nor)
 455 {
 456         int ret;
 457 
 458         if (nor->spimem) {
 459                 struct spi_mem_op op =
 460                         SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR, 1),
 461                                    SPI_MEM_OP_NO_ADDR,
 462                                    SPI_MEM_OP_NO_DUMMY,
 463                                    SPI_MEM_OP_DATA_IN(1, nor->bouncebuf, 1));
 464 
 465                 ret = spi_mem_exec_op(nor->spimem, &op);
 466         } else {
 467                 ret = nor->read_reg(nor, SPINOR_OP_RDCR, nor->bouncebuf, 1);
 468         }
 469 
 470         if (ret < 0) {
 471                 dev_err(nor->dev, "error %d reading CR\n", ret);
 472                 return ret;
 473         }
 474 
 475         return nor->bouncebuf[0];
 476 }
 477 
 478 /*
 479  * Write status register 1 byte
 480  * Returns negative if error occurred.
 481  */
 482 static int write_sr(struct spi_nor *nor, u8 val)
 483 {
 484         nor->bouncebuf[0] = val;
 485         if (nor->spimem) {
 486                 struct spi_mem_op op =
 487                         SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1),
 488                                    SPI_MEM_OP_NO_ADDR,
 489                                    SPI_MEM_OP_NO_DUMMY,
 490                                    SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
 491 
 492                 return spi_mem_exec_op(nor->spimem, &op);
 493         }
 494 
 495         return nor->write_reg(nor, SPINOR_OP_WRSR, nor->bouncebuf, 1);
 496 }
 497 
 498 /*
 499  * Set write enable latch with Write Enable command.
 500  * Returns negative if error occurred.
 501  */
 502 static int write_enable(struct spi_nor *nor)
 503 {
 504         if (nor->spimem) {
 505                 struct spi_mem_op op =
 506                         SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1),
 507                                    SPI_MEM_OP_NO_ADDR,
 508                                    SPI_MEM_OP_NO_DUMMY,
 509                                    SPI_MEM_OP_NO_DATA);
 510 
 511                 return spi_mem_exec_op(nor->spimem, &op);
 512         }
 513 
 514         return nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
 515 }
 516 
 517 /*
 518  * Send write disable instruction to the chip.
 519  */
 520 static int write_disable(struct spi_nor *nor)
 521 {
 522         if (nor->spimem) {
 523                 struct spi_mem_op op =
 524                         SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1),
 525                                    SPI_MEM_OP_NO_ADDR,
 526                                    SPI_MEM_OP_NO_DUMMY,
 527                                    SPI_MEM_OP_NO_DATA);
 528 
 529                 return spi_mem_exec_op(nor->spimem, &op);
 530         }
 531 
 532         return nor->write_reg(nor, SPINOR_OP_WRDI, NULL, 0);
 533 }
 534 
 535 static struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
 536 {
 537         return mtd->priv;
 538 }
 539 
 540 
 541 static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
 542 {
 543         size_t i;
 544 
 545         for (i = 0; i < size; i++)
 546                 if (table[i][0] == opcode)
 547                         return table[i][1];
 548 
 549         /* No conversion found, keep input op code. */
 550         return opcode;
 551 }
 552 
 553 static u8 spi_nor_convert_3to4_read(u8 opcode)
 554 {
 555         static const u8 spi_nor_3to4_read[][2] = {
 556                 { SPINOR_OP_READ,       SPINOR_OP_READ_4B },
 557                 { SPINOR_OP_READ_FAST,  SPINOR_OP_READ_FAST_4B },
 558                 { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
 559                 { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
 560                 { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
 561                 { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
 562                 { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B },
 563                 { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B },
 564 
 565                 { SPINOR_OP_READ_1_1_1_DTR,     SPINOR_OP_READ_1_1_1_DTR_4B },
 566                 { SPINOR_OP_READ_1_2_2_DTR,     SPINOR_OP_READ_1_2_2_DTR_4B },
 567                 { SPINOR_OP_READ_1_4_4_DTR,     SPINOR_OP_READ_1_4_4_DTR_4B },
 568         };
 569 
 570         return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
 571                                       ARRAY_SIZE(spi_nor_3to4_read));
 572 }
 573 
 574 static u8 spi_nor_convert_3to4_program(u8 opcode)
 575 {
 576         static const u8 spi_nor_3to4_program[][2] = {
 577                 { SPINOR_OP_PP,         SPINOR_OP_PP_4B },
 578                 { SPINOR_OP_PP_1_1_4,   SPINOR_OP_PP_1_1_4_4B },
 579                 { SPINOR_OP_PP_1_4_4,   SPINOR_OP_PP_1_4_4_4B },
 580                 { SPINOR_OP_PP_1_1_8,   SPINOR_OP_PP_1_1_8_4B },
 581                 { SPINOR_OP_PP_1_8_8,   SPINOR_OP_PP_1_8_8_4B },
 582         };
 583 
 584         return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
 585                                       ARRAY_SIZE(spi_nor_3to4_program));
 586 }
 587 
 588 static u8 spi_nor_convert_3to4_erase(u8 opcode)
 589 {
 590         static const u8 spi_nor_3to4_erase[][2] = {
 591                 { SPINOR_OP_BE_4K,      SPINOR_OP_BE_4K_4B },
 592                 { SPINOR_OP_BE_32K,     SPINOR_OP_BE_32K_4B },
 593                 { SPINOR_OP_SE,         SPINOR_OP_SE_4B },
 594         };
 595 
 596         return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
 597                                       ARRAY_SIZE(spi_nor_3to4_erase));
 598 }
 599 
 600 static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
 601 {
 602         nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
 603         nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
 604         nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
 605 
 606         if (!spi_nor_has_uniform_erase(nor)) {
 607                 struct spi_nor_erase_map *map = &nor->params.erase_map;
 608                 struct spi_nor_erase_type *erase;
 609                 int i;
 610 
 611                 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
 612                         erase = &map->erase_type[i];
 613                         erase->opcode =
 614                                 spi_nor_convert_3to4_erase(erase->opcode);
 615                 }
 616         }
 617 }
 618 
 619 static int macronix_set_4byte(struct spi_nor *nor, bool enable)
 620 {
 621         if (nor->spimem) {
 622                 struct spi_mem_op op =
 623                         SPI_MEM_OP(SPI_MEM_OP_CMD(enable ?
 624                                                   SPINOR_OP_EN4B :
 625                                                   SPINOR_OP_EX4B,
 626                                                   1),
 627                                   SPI_MEM_OP_NO_ADDR,
 628                                   SPI_MEM_OP_NO_DUMMY,
 629                                   SPI_MEM_OP_NO_DATA);
 630 
 631                 return spi_mem_exec_op(nor->spimem, &op);
 632         }
 633 
 634         return nor->write_reg(nor, enable ? SPINOR_OP_EN4B : SPINOR_OP_EX4B,
 635                               NULL, 0);
 636 }
 637 
 638 static int st_micron_set_4byte(struct spi_nor *nor, bool enable)
 639 {
 640         int ret;
 641 
 642         write_enable(nor);
 643         ret = macronix_set_4byte(nor, enable);
 644         write_disable(nor);
 645 
 646         return ret;
 647 }
 648 
 649 static int spansion_set_4byte(struct spi_nor *nor, bool enable)
 650 {
 651         nor->bouncebuf[0] = enable << 7;
 652 
 653         if (nor->spimem) {
 654                 struct spi_mem_op op =
 655                         SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_BRWR, 1),
 656                                    SPI_MEM_OP_NO_ADDR,
 657                                    SPI_MEM_OP_NO_DUMMY,
 658                                    SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
 659 
 660                 return spi_mem_exec_op(nor->spimem, &op);
 661         }
 662 
 663         return nor->write_reg(nor, SPINOR_OP_BRWR, nor->bouncebuf, 1);
 664 }
 665 
 666 static int spi_nor_write_ear(struct spi_nor *nor, u8 ear)
 667 {
 668         nor->bouncebuf[0] = ear;
 669 
 670         if (nor->spimem) {
 671                 struct spi_mem_op op =
 672                         SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREAR, 1),
 673                                    SPI_MEM_OP_NO_ADDR,
 674                                    SPI_MEM_OP_NO_DUMMY,
 675                                    SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
 676 
 677                 return spi_mem_exec_op(nor->spimem, &op);
 678         }
 679 
 680         return nor->write_reg(nor, SPINOR_OP_WREAR, nor->bouncebuf, 1);
 681 }
 682 
 683 static int winbond_set_4byte(struct spi_nor *nor, bool enable)
 684 {
 685         int ret;
 686 
 687         ret = macronix_set_4byte(nor, enable);
 688         if (ret || enable)
 689                 return ret;
 690 
 691         /*
 692          * On Winbond W25Q256FV, leaving 4byte mode causes the Extended Address
 693          * Register to be set to 1, so all 3-byte-address reads come from the
 694          * second 16M. We must clear the register to enable normal behavior.
 695          */
 696         write_enable(nor);
 697         ret = spi_nor_write_ear(nor, 0);
 698         write_disable(nor);
 699 
 700         return ret;
 701 }
 702 
 703 static int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr)
 704 {
 705         if (nor->spimem) {
 706                 struct spi_mem_op op =
 707                         SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_XRDSR, 1),
 708                                    SPI_MEM_OP_NO_ADDR,
 709                                    SPI_MEM_OP_NO_DUMMY,
 710                                    SPI_MEM_OP_DATA_IN(1, sr, 1));
 711 
 712                 return spi_mem_exec_op(nor->spimem, &op);
 713         }
 714 
 715         return nor->read_reg(nor, SPINOR_OP_XRDSR, sr, 1);
 716 }
 717 
 718 static int s3an_sr_ready(struct spi_nor *nor)
 719 {
 720         int ret;
 721 
 722         ret = spi_nor_xread_sr(nor, nor->bouncebuf);
 723         if (ret < 0) {
 724                 dev_err(nor->dev, "error %d reading XRDSR\n", (int) ret);
 725                 return ret;
 726         }
 727 
 728         return !!(nor->bouncebuf[0] & XSR_RDY);
 729 }
 730 
 731 static int spi_nor_clear_sr(struct spi_nor *nor)
 732 {
 733         if (nor->spimem) {
 734                 struct spi_mem_op op =
 735                         SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 1),
 736                                    SPI_MEM_OP_NO_ADDR,
 737                                    SPI_MEM_OP_NO_DUMMY,
 738                                    SPI_MEM_OP_NO_DATA);
 739 
 740                 return spi_mem_exec_op(nor->spimem, &op);
 741         }
 742 
 743         return nor->write_reg(nor, SPINOR_OP_CLSR, NULL, 0);
 744 }
 745 
 746 static int spi_nor_sr_ready(struct spi_nor *nor)
 747 {
 748         int sr = read_sr(nor);
 749         if (sr < 0)
 750                 return sr;
 751 
 752         if (nor->flags & SNOR_F_USE_CLSR && sr & (SR_E_ERR | SR_P_ERR)) {
 753                 if (sr & SR_E_ERR)
 754                         dev_err(nor->dev, "Erase Error occurred\n");
 755                 else
 756                         dev_err(nor->dev, "Programming Error occurred\n");
 757 
 758                 spi_nor_clear_sr(nor);
 759                 return -EIO;
 760         }
 761 
 762         return !(sr & SR_WIP);
 763 }
 764 
 765 static int spi_nor_clear_fsr(struct spi_nor *nor)
 766 {
 767         if (nor->spimem) {
 768                 struct spi_mem_op op =
 769                         SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 1),
 770                                    SPI_MEM_OP_NO_ADDR,
 771                                    SPI_MEM_OP_NO_DUMMY,
 772                                    SPI_MEM_OP_NO_DATA);
 773 
 774                 return spi_mem_exec_op(nor->spimem, &op);
 775         }
 776 
 777         return nor->write_reg(nor, SPINOR_OP_CLFSR, NULL, 0);
 778 }
 779 
 780 static int spi_nor_fsr_ready(struct spi_nor *nor)
 781 {
 782         int fsr = read_fsr(nor);
 783         if (fsr < 0)
 784                 return fsr;
 785 
 786         if (fsr & (FSR_E_ERR | FSR_P_ERR)) {
 787                 if (fsr & FSR_E_ERR)
 788                         dev_err(nor->dev, "Erase operation failed.\n");
 789                 else
 790                         dev_err(nor->dev, "Program operation failed.\n");
 791 
 792                 if (fsr & FSR_PT_ERR)
 793                         dev_err(nor->dev,
 794                         "Attempted to modify a protected sector.\n");
 795 
 796                 spi_nor_clear_fsr(nor);
 797                 return -EIO;
 798         }
 799 
 800         return fsr & FSR_READY;
 801 }
 802 
 803 static int spi_nor_ready(struct spi_nor *nor)
 804 {
 805         int sr, fsr;
 806 
 807         if (nor->flags & SNOR_F_READY_XSR_RDY)
 808                 sr = s3an_sr_ready(nor);
 809         else
 810                 sr = spi_nor_sr_ready(nor);
 811         if (sr < 0)
 812                 return sr;
 813         fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
 814         if (fsr < 0)
 815                 return fsr;
 816         return sr && fsr;
 817 }
 818 
 819 /*
 820  * Service routine to read status register until ready, or timeout occurs.
 821  * Returns non-zero if error.
 822  */
 823 static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
 824                                                 unsigned long timeout_jiffies)
 825 {
 826         unsigned long deadline;
 827         int timeout = 0, ret;
 828 
 829         deadline = jiffies + timeout_jiffies;
 830 
 831         while (!timeout) {
 832                 if (time_after_eq(jiffies, deadline))
 833                         timeout = 1;
 834 
 835                 ret = spi_nor_ready(nor);
 836                 if (ret < 0)
 837                         return ret;
 838                 if (ret)
 839                         return 0;
 840 
 841                 cond_resched();
 842         }
 843 
 844         dev_err(nor->dev, "flash operation timed out\n");
 845 
 846         return -ETIMEDOUT;
 847 }
 848 
 849 static int spi_nor_wait_till_ready(struct spi_nor *nor)
 850 {
 851         return spi_nor_wait_till_ready_with_timeout(nor,
 852                                                     DEFAULT_READY_WAIT_JIFFIES);
 853 }
 854 
 855 /*
 856  * Erase the whole flash memory
 857  *
 858  * Returns 0 if successful, non-zero otherwise.
 859  */
 860 static int erase_chip(struct spi_nor *nor)
 861 {
 862         dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
 863 
 864         if (nor->spimem) {
 865                 struct spi_mem_op op =
 866                         SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CHIP_ERASE, 1),
 867                                    SPI_MEM_OP_NO_ADDR,
 868                                    SPI_MEM_OP_NO_DUMMY,
 869                                    SPI_MEM_OP_NO_DATA);
 870 
 871                 return spi_mem_exec_op(nor->spimem, &op);
 872         }
 873 
 874         return nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0);
 875 }
 876 
 877 static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
 878 {
 879         int ret = 0;
 880 
 881         mutex_lock(&nor->lock);
 882 
 883         if (nor->prepare) {
 884                 ret = nor->prepare(nor, ops);
 885                 if (ret) {
 886                         dev_err(nor->dev, "failed in the preparation.\n");
 887                         mutex_unlock(&nor->lock);
 888                         return ret;
 889                 }
 890         }
 891         return ret;
 892 }
 893 
 894 static void spi_nor_unlock_and_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
 895 {
 896         if (nor->unprepare)
 897                 nor->unprepare(nor, ops);
 898         mutex_unlock(&nor->lock);
 899 }
 900 
 901 /*
 902  * This code converts an address to the Default Address Mode, that has non
 903  * power of two page sizes. We must support this mode because it is the default
 904  * mode supported by Xilinx tools, it can access the whole flash area and
 905  * changing over to the Power-of-two mode is irreversible and corrupts the
 906  * original data.
 907  * Addr can safely be unsigned int, the biggest S3AN device is smaller than
 908  * 4 MiB.
 909  */
 910 static u32 s3an_convert_addr(struct spi_nor *nor, u32 addr)
 911 {
 912         u32 offset, page;
 913 
 914         offset = addr % nor->page_size;
 915         page = addr / nor->page_size;
 916         page <<= (nor->page_size > 512) ? 10 : 9;
 917 
 918         return page | offset;
 919 }
 920 
 921 static u32 spi_nor_convert_addr(struct spi_nor *nor, loff_t addr)
 922 {
 923         if (!nor->params.convert_addr)
 924                 return addr;
 925 
 926         return nor->params.convert_addr(nor, addr);
 927 }
 928 
 929 /*
 930  * Initiate the erasure of a single sector
 931  */
 932 static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
 933 {
 934         int i;
 935 
 936         addr = spi_nor_convert_addr(nor, addr);
 937 
 938         if (nor->erase)
 939                 return nor->erase(nor, addr);
 940 
 941         if (nor->spimem) {
 942                 struct spi_mem_op op =
 943                         SPI_MEM_OP(SPI_MEM_OP_CMD(nor->erase_opcode, 1),
 944                                    SPI_MEM_OP_ADDR(nor->addr_width, addr, 1),
 945                                    SPI_MEM_OP_NO_DUMMY,
 946                                    SPI_MEM_OP_NO_DATA);
 947 
 948                 return spi_mem_exec_op(nor->spimem, &op);
 949         }
 950 
 951         /*
 952          * Default implementation, if driver doesn't have a specialized HW
 953          * control
 954          */
 955         for (i = nor->addr_width - 1; i >= 0; i--) {
 956                 nor->bouncebuf[i] = addr & 0xff;
 957                 addr >>= 8;
 958         }
 959 
 960         return nor->write_reg(nor, nor->erase_opcode, nor->bouncebuf,
 961                               nor->addr_width);
 962 }
 963 
 964 /**
 965  * spi_nor_div_by_erase_size() - calculate remainder and update new dividend
 966  * @erase:      pointer to a structure that describes a SPI NOR erase type
 967  * @dividend:   dividend value
 968  * @remainder:  pointer to u32 remainder (will be updated)
 969  *
 970  * Return: the result of the division
 971  */
 972 static u64 spi_nor_div_by_erase_size(const struct spi_nor_erase_type *erase,
 973                                      u64 dividend, u32 *remainder)
 974 {
 975         /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
 976         *remainder = (u32)dividend & erase->size_mask;
 977         return dividend >> erase->size_shift;
 978 }
 979 
 980 /**
 981  * spi_nor_find_best_erase_type() - find the best erase type for the given
 982  *                                  offset in the serial flash memory and the
 983  *                                  number of bytes to erase. The region in
 984  *                                  which the address fits is expected to be
 985  *                                  provided.
 986  * @map:        the erase map of the SPI NOR
 987  * @region:     pointer to a structure that describes a SPI NOR erase region
 988  * @addr:       offset in the serial flash memory
 989  * @len:        number of bytes to erase
 990  *
 991  * Return: a pointer to the best fitted erase type, NULL otherwise.
 992  */
 993 static const struct spi_nor_erase_type *
 994 spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
 995                              const struct spi_nor_erase_region *region,
 996                              u64 addr, u32 len)
 997 {
 998         const struct spi_nor_erase_type *erase;
 999         u32 rem;
1000         int i;
1001         u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
1002 
1003         /*
1004          * Erase types are ordered by size, with the smallest erase type at
1005          * index 0.
1006          */
1007         for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
1008                 /* Does the erase region support the tested erase type? */
1009                 if (!(erase_mask & BIT(i)))
1010                         continue;
1011 
1012                 erase = &map->erase_type[i];
1013 
1014                 /* Don't erase more than what the user has asked for. */
1015                 if (erase->size > len)
1016                         continue;
1017 
1018                 /* Alignment is not mandatory for overlaid regions */
1019                 if (region->offset & SNOR_OVERLAID_REGION)
1020                         return erase;
1021 
1022                 spi_nor_div_by_erase_size(erase, addr, &rem);
1023                 if (rem)
1024                         continue;
1025                 else
1026                         return erase;
1027         }
1028 
1029         return NULL;
1030 }
1031 
1032 /**
1033  * spi_nor_region_next() - get the next spi nor region
1034  * @region:     pointer to a structure that describes a SPI NOR erase region
1035  *
1036  * Return: the next spi nor region or NULL if last region.
1037  */
1038 static struct spi_nor_erase_region *
1039 spi_nor_region_next(struct spi_nor_erase_region *region)
1040 {
1041         if (spi_nor_region_is_last(region))
1042                 return NULL;
1043         region++;
1044         return region;
1045 }
1046 
1047 /**
1048  * spi_nor_find_erase_region() - find the region of the serial flash memory in
1049  *                               which the offset fits
1050  * @map:        the erase map of the SPI NOR
1051  * @addr:       offset in the serial flash memory
1052  *
1053  * Return: a pointer to the spi_nor_erase_region struct, ERR_PTR(-errno)
1054  *         otherwise.
1055  */
1056 static struct spi_nor_erase_region *
1057 spi_nor_find_erase_region(const struct spi_nor_erase_map *map, u64 addr)
1058 {
1059         struct spi_nor_erase_region *region = map->regions;
1060         u64 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
1061         u64 region_end = region_start + region->size;
1062 
1063         while (addr < region_start || addr >= region_end) {
1064                 region = spi_nor_region_next(region);
1065                 if (!region)
1066                         return ERR_PTR(-EINVAL);
1067 
1068                 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
1069                 region_end = region_start + region->size;
1070         }
1071 
1072         return region;
1073 }
1074 
1075 /**
1076  * spi_nor_init_erase_cmd() - initialize an erase command
1077  * @region:     pointer to a structure that describes a SPI NOR erase region
1078  * @erase:      pointer to a structure that describes a SPI NOR erase type
1079  *
1080  * Return: the pointer to the allocated erase command, ERR_PTR(-errno)
1081  *         otherwise.
1082  */
1083 static struct spi_nor_erase_command *
1084 spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region,
1085                        const struct spi_nor_erase_type *erase)
1086 {
1087         struct spi_nor_erase_command *cmd;
1088 
1089         cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
1090         if (!cmd)
1091                 return ERR_PTR(-ENOMEM);
1092 
1093         INIT_LIST_HEAD(&cmd->list);
1094         cmd->opcode = erase->opcode;
1095         cmd->count = 1;
1096 
1097         if (region->offset & SNOR_OVERLAID_REGION)
1098                 cmd->size = region->size;
1099         else
1100                 cmd->size = erase->size;
1101 
1102         return cmd;
1103 }
1104 
1105 /**
1106  * spi_nor_destroy_erase_cmd_list() - destroy erase command list
1107  * @erase_list: list of erase commands
1108  */
1109 static void spi_nor_destroy_erase_cmd_list(struct list_head *erase_list)
1110 {
1111         struct spi_nor_erase_command *cmd, *next;
1112 
1113         list_for_each_entry_safe(cmd, next, erase_list, list) {
1114                 list_del(&cmd->list);
1115                 kfree(cmd);
1116         }
1117 }
1118 
1119 /**
1120  * spi_nor_init_erase_cmd_list() - initialize erase command list
1121  * @nor:        pointer to a 'struct spi_nor'
1122  * @erase_list: list of erase commands to be executed once we validate that the
1123  *              erase can be performed
1124  * @addr:       offset in the serial flash memory
1125  * @len:        number of bytes to erase
1126  *
1127  * Builds the list of best fitted erase commands and verifies if the erase can
1128  * be performed.
1129  *
1130  * Return: 0 on success, -errno otherwise.
1131  */
1132 static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
1133                                        struct list_head *erase_list,
1134                                        u64 addr, u32 len)
1135 {
1136         const struct spi_nor_erase_map *map = &nor->params.erase_map;
1137         const struct spi_nor_erase_type *erase, *prev_erase = NULL;
1138         struct spi_nor_erase_region *region;
1139         struct spi_nor_erase_command *cmd = NULL;
1140         u64 region_end;
1141         int ret = -EINVAL;
1142 
1143         region = spi_nor_find_erase_region(map, addr);
1144         if (IS_ERR(region))
1145                 return PTR_ERR(region);
1146 
1147         region_end = spi_nor_region_end(region);
1148 
1149         while (len) {
1150                 erase = spi_nor_find_best_erase_type(map, region, addr, len);
1151                 if (!erase)
1152                         goto destroy_erase_cmd_list;
1153 
1154                 if (prev_erase != erase ||
1155                     region->offset & SNOR_OVERLAID_REGION) {
1156                         cmd = spi_nor_init_erase_cmd(region, erase);
1157                         if (IS_ERR(cmd)) {
1158                                 ret = PTR_ERR(cmd);
1159                                 goto destroy_erase_cmd_list;
1160                         }
1161 
1162                         list_add_tail(&cmd->list, erase_list);
1163                 } else {
1164                         cmd->count++;
1165                 }
1166 
1167                 addr += cmd->size;
1168                 len -= cmd->size;
1169 
1170                 if (len && addr >= region_end) {
1171                         region = spi_nor_region_next(region);
1172                         if (!region)
1173                                 goto destroy_erase_cmd_list;
1174                         region_end = spi_nor_region_end(region);
1175                 }
1176 
1177                 prev_erase = erase;
1178         }
1179 
1180         return 0;
1181 
1182 destroy_erase_cmd_list:
1183         spi_nor_destroy_erase_cmd_list(erase_list);
1184         return ret;
1185 }
1186 
1187 /**
1188  * spi_nor_erase_multi_sectors() - perform a non-uniform erase
1189  * @nor:        pointer to a 'struct spi_nor'
1190  * @addr:       offset in the serial flash memory
1191  * @len:        number of bytes to erase
1192  *
1193  * Build a list of best fitted erase commands and execute it once we validate
1194  * that the erase can be performed.
1195  *
1196  * Return: 0 on success, -errno otherwise.
1197  */
1198 static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len)
1199 {
1200         LIST_HEAD(erase_list);
1201         struct spi_nor_erase_command *cmd, *next;
1202         int ret;
1203 
1204         ret = spi_nor_init_erase_cmd_list(nor, &erase_list, addr, len);
1205         if (ret)
1206                 return ret;
1207 
1208         list_for_each_entry_safe(cmd, next, &erase_list, list) {
1209                 nor->erase_opcode = cmd->opcode;
1210                 while (cmd->count) {
1211                         write_enable(nor);
1212 
1213                         ret = spi_nor_erase_sector(nor, addr);
1214                         if (ret)
1215                                 goto destroy_erase_cmd_list;
1216 
1217                         addr += cmd->size;
1218                         cmd->count--;
1219 
1220                         ret = spi_nor_wait_till_ready(nor);
1221                         if (ret)
1222                                 goto destroy_erase_cmd_list;
1223                 }
1224                 list_del(&cmd->list);
1225                 kfree(cmd);
1226         }
1227 
1228         return 0;
1229 
1230 destroy_erase_cmd_list:
1231         spi_nor_destroy_erase_cmd_list(&erase_list);
1232         return ret;
1233 }
1234 
1235 /*
1236  * Erase an address range on the nor chip.  The address range may extend
1237  * one or more erase sectors.  Return an error is there is a problem erasing.
1238  */
1239 static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
1240 {
1241         struct spi_nor *nor = mtd_to_spi_nor(mtd);
1242         u32 addr, len;
1243         uint32_t rem;
1244         int ret;
1245 
1246         dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
1247                         (long long)instr->len);
1248 
1249         if (spi_nor_has_uniform_erase(nor)) {
1250                 div_u64_rem(instr->len, mtd->erasesize, &rem);
1251                 if (rem)
1252                         return -EINVAL;
1253         }
1254 
1255         addr = instr->addr;
1256         len = instr->len;
1257 
1258         ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_ERASE);
1259         if (ret)
1260                 return ret;
1261 
1262         /* whole-chip erase? */
1263         if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
1264                 unsigned long timeout;
1265 
1266                 write_enable(nor);
1267 
1268                 if (erase_chip(nor)) {
1269                         ret = -EIO;
1270                         goto erase_err;
1271                 }
1272 
1273                 /*
1274                  * Scale the timeout linearly with the size of the flash, with
1275                  * a minimum calibrated to an old 2MB flash. We could try to
1276                  * pull these from CFI/SFDP, but these values should be good
1277                  * enough for now.
1278                  */
1279                 timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
1280                               CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
1281                               (unsigned long)(mtd->size / SZ_2M));
1282                 ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
1283                 if (ret)
1284                         goto erase_err;
1285 
1286         /* REVISIT in some cases we could speed up erasing large regions
1287          * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K.  We may have set up
1288          * to use "small sector erase", but that's not always optimal.
1289          */
1290 
1291         /* "sector"-at-a-time erase */
1292         } else if (spi_nor_has_uniform_erase(nor)) {
1293                 while (len) {
1294                         write_enable(nor);
1295 
1296                         ret = spi_nor_erase_sector(nor, addr);
1297                         if (ret)
1298                                 goto erase_err;
1299 
1300                         addr += mtd->erasesize;
1301                         len -= mtd->erasesize;
1302 
1303                         ret = spi_nor_wait_till_ready(nor);
1304                         if (ret)
1305                                 goto erase_err;
1306                 }
1307 
1308         /* erase multiple sectors */
1309         } else {
1310                 ret = spi_nor_erase_multi_sectors(nor, addr, len);
1311                 if (ret)
1312                         goto erase_err;
1313         }
1314 
1315         write_disable(nor);
1316 
1317 erase_err:
1318         spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
1319 
1320         return ret;
1321 }
1322 
1323 /* Write status register and ensure bits in mask match written values */
1324 static int write_sr_and_check(struct spi_nor *nor, u8 status_new, u8 mask)
1325 {
1326         int ret;
1327 
1328         write_enable(nor);
1329         ret = write_sr(nor, status_new);
1330         if (ret)
1331                 return ret;
1332 
1333         ret = spi_nor_wait_till_ready(nor);
1334         if (ret)
1335                 return ret;
1336 
1337         ret = read_sr(nor);
1338         if (ret < 0)
1339                 return ret;
1340 
1341         return ((ret & mask) != (status_new & mask)) ? -EIO : 0;
1342 }
1343 
1344 static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
1345                                  uint64_t *len)
1346 {
1347         struct mtd_info *mtd = &nor->mtd;
1348         u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1349         int shift = ffs(mask) - 1;
1350         int pow;
1351 
1352         if (!(sr & mask)) {
1353                 /* No protection */
1354                 *ofs = 0;
1355                 *len = 0;
1356         } else {
1357                 pow = ((sr & mask) ^ mask) >> shift;
1358                 *len = mtd->size >> pow;
1359                 if (nor->flags & SNOR_F_HAS_SR_TB && sr & SR_TB)
1360                         *ofs = 0;
1361                 else
1362                         *ofs = mtd->size - *len;
1363         }
1364 }
1365 
1366 /*
1367  * Return 1 if the entire region is locked (if @locked is true) or unlocked (if
1368  * @locked is false); 0 otherwise
1369  */
1370 static int stm_check_lock_status_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
1371                                     u8 sr, bool locked)
1372 {
1373         loff_t lock_offs;
1374         uint64_t lock_len;
1375 
1376         if (!len)
1377                 return 1;
1378 
1379         stm_get_locked_range(nor, sr, &lock_offs, &lock_len);
1380 
1381         if (locked)
1382                 /* Requested range is a sub-range of locked range */
1383                 return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
1384         else
1385                 /* Requested range does not overlap with locked range */
1386                 return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs);
1387 }
1388 
1389 static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
1390                             u8 sr)
1391 {
1392         return stm_check_lock_status_sr(nor, ofs, len, sr, true);
1393 }
1394 
1395 static int stm_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
1396                               u8 sr)
1397 {
1398         return stm_check_lock_status_sr(nor, ofs, len, sr, false);
1399 }
1400 
1401 /*
1402  * Lock a region of the flash. Compatible with ST Micro and similar flash.
1403  * Supports the block protection bits BP{0,1,2} in the status register
1404  * (SR). Does not support these features found in newer SR bitfields:
1405  *   - SEC: sector/block protect - only handle SEC=0 (block protect)
1406  *   - CMP: complement protect - only support CMP=0 (range is not complemented)
1407  *
1408  * Support for the following is provided conditionally for some flash:
1409  *   - TB: top/bottom protect
1410  *
1411  * Sample table portion for 8MB flash (Winbond w25q64fw):
1412  *
1413  *   SEC  |  TB   |  BP2  |  BP1  |  BP0  |  Prot Length  | Protected Portion
1414  *  --------------------------------------------------------------------------
1415  *    X   |   X   |   0   |   0   |   0   |  NONE         | NONE
1416  *    0   |   0   |   0   |   0   |   1   |  128 KB       | Upper 1/64
1417  *    0   |   0   |   0   |   1   |   0   |  256 KB       | Upper 1/32
1418  *    0   |   0   |   0   |   1   |   1   |  512 KB       | Upper 1/16
1419  *    0   |   0   |   1   |   0   |   0   |  1 MB         | Upper 1/8
1420  *    0   |   0   |   1   |   0   |   1   |  2 MB         | Upper 1/4
1421  *    0   |   0   |   1   |   1   |   0   |  4 MB         | Upper 1/2
1422  *    X   |   X   |   1   |   1   |   1   |  8 MB         | ALL
1423  *  ------|-------|-------|-------|-------|---------------|-------------------
1424  *    0   |   1   |   0   |   0   |   1   |  128 KB       | Lower 1/64
1425  *    0   |   1   |   0   |   1   |   0   |  256 KB       | Lower 1/32
1426  *    0   |   1   |   0   |   1   |   1   |  512 KB       | Lower 1/16
1427  *    0   |   1   |   1   |   0   |   0   |  1 MB         | Lower 1/8
1428  *    0   |   1   |   1   |   0   |   1   |  2 MB         | Lower 1/4
1429  *    0   |   1   |   1   |   1   |   0   |  4 MB         | Lower 1/2
1430  *
1431  * Returns negative on errors, 0 on success.
1432  */
1433 static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
1434 {
1435         struct mtd_info *mtd = &nor->mtd;
1436         int status_old, status_new;
1437         u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1438         u8 shift = ffs(mask) - 1, pow, val;
1439         loff_t lock_len;
1440         bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
1441         bool use_top;
1442 
1443         status_old = read_sr(nor);
1444         if (status_old < 0)
1445                 return status_old;
1446 
1447         /* If nothing in our range is unlocked, we don't need to do anything */
1448         if (stm_is_locked_sr(nor, ofs, len, status_old))
1449                 return 0;
1450 
1451         /* If anything below us is unlocked, we can't use 'bottom' protection */
1452         if (!stm_is_locked_sr(nor, 0, ofs, status_old))
1453                 can_be_bottom = false;
1454 
1455         /* If anything above us is unlocked, we can't use 'top' protection */
1456         if (!stm_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
1457                                 status_old))
1458                 can_be_top = false;
1459 
1460         if (!can_be_bottom && !can_be_top)
1461                 return -EINVAL;
1462 
1463         /* Prefer top, if both are valid */
1464         use_top = can_be_top;
1465 
1466         /* lock_len: length of region that should end up locked */
1467         if (use_top)
1468                 lock_len = mtd->size - ofs;
1469         else
1470                 lock_len = ofs + len;
1471 
1472         /*
1473          * Need smallest pow such that:
1474          *
1475          *   1 / (2^pow) <= (len / size)
1476          *
1477          * so (assuming power-of-2 size) we do:
1478          *
1479          *   pow = ceil(log2(size / len)) = log2(size) - floor(log2(len))
1480          */
1481         pow = ilog2(mtd->size) - ilog2(lock_len);
1482         val = mask - (pow << shift);
1483         if (val & ~mask)
1484                 return -EINVAL;
1485         /* Don't "lock" with no region! */
1486         if (!(val & mask))
1487                 return -EINVAL;
1488 
1489         status_new = (status_old & ~mask & ~SR_TB) | val;
1490 
1491         /* Disallow further writes if WP pin is asserted */
1492         status_new |= SR_SRWD;
1493 
1494         if (!use_top)
1495                 status_new |= SR_TB;
1496 
1497         /* Don't bother if they're the same */
1498         if (status_new == status_old)
1499                 return 0;
1500 
1501         /* Only modify protection if it will not unlock other areas */
1502         if ((status_new & mask) < (status_old & mask))
1503                 return -EINVAL;
1504 
1505         return write_sr_and_check(nor, status_new, mask);
1506 }
1507 
1508 /*
1509  * Unlock a region of the flash. See stm_lock() for more info
1510  *
1511  * Returns negative on errors, 0 on success.
1512  */
1513 static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
1514 {
1515         struct mtd_info *mtd = &nor->mtd;
1516         int status_old, status_new;
1517         u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1518         u8 shift = ffs(mask) - 1, pow, val;
1519         loff_t lock_len;
1520         bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
1521         bool use_top;
1522 
1523         status_old = read_sr(nor);
1524         if (status_old < 0)
1525                 return status_old;
1526 
1527         /* If nothing in our range is locked, we don't need to do anything */
1528         if (stm_is_unlocked_sr(nor, ofs, len, status_old))
1529                 return 0;
1530 
1531         /* If anything below us is locked, we can't use 'top' protection */
1532         if (!stm_is_unlocked_sr(nor, 0, ofs, status_old))
1533                 can_be_top = false;
1534 
1535         /* If anything above us is locked, we can't use 'bottom' protection */
1536         if (!stm_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
1537                                 status_old))
1538                 can_be_bottom = false;
1539 
1540         if (!can_be_bottom && !can_be_top)
1541                 return -EINVAL;
1542 
1543         /* Prefer top, if both are valid */
1544         use_top = can_be_top;
1545 
1546         /* lock_len: length of region that should remain locked */
1547         if (use_top)
1548                 lock_len = mtd->size - (ofs + len);
1549         else
1550                 lock_len = ofs;
1551 
1552         /*
1553          * Need largest pow such that:
1554          *
1555          *   1 / (2^pow) >= (len / size)
1556          *
1557          * so (assuming power-of-2 size) we do:
1558          *
1559          *   pow = floor(log2(size / len)) = log2(size) - ceil(log2(len))
1560          */
1561         pow = ilog2(mtd->size) - order_base_2(lock_len);
1562         if (lock_len == 0) {
1563                 val = 0; /* fully unlocked */
1564         } else {
1565                 val = mask - (pow << shift);
1566                 /* Some power-of-two sizes are not supported */
1567                 if (val & ~mask)
1568                         return -EINVAL;
1569         }
1570 
1571         status_new = (status_old & ~mask & ~SR_TB) | val;
1572 
1573         /* Don't protect status register if we're fully unlocked */
1574         if (lock_len == 0)
1575                 status_new &= ~SR_SRWD;
1576 
1577         if (!use_top)
1578                 status_new |= SR_TB;
1579 
1580         /* Don't bother if they're the same */
1581         if (status_new == status_old)
1582                 return 0;
1583 
1584         /* Only modify protection if it will not lock other areas */
1585         if ((status_new & mask) > (status_old & mask))
1586                 return -EINVAL;
1587 
1588         return write_sr_and_check(nor, status_new, mask);
1589 }
1590 
1591 /*
1592  * Check if a region of the flash is (completely) locked. See stm_lock() for
1593  * more info.
1594  *
1595  * Returns 1 if entire region is locked, 0 if any portion is unlocked, and
1596  * negative on errors.
1597  */
1598 static int stm_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
1599 {
1600         int status;
1601 
1602         status = read_sr(nor);
1603         if (status < 0)
1604                 return status;
1605 
1606         return stm_is_locked_sr(nor, ofs, len, status);
1607 }
1608 
1609 static const struct spi_nor_locking_ops stm_locking_ops = {
1610         .lock = stm_lock,
1611         .unlock = stm_unlock,
1612         .is_locked = stm_is_locked,
1613 };
1614 
1615 static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1616 {
1617         struct spi_nor *nor = mtd_to_spi_nor(mtd);
1618         int ret;
1619 
1620         ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_LOCK);
1621         if (ret)
1622                 return ret;
1623 
1624         ret = nor->params.locking_ops->lock(nor, ofs, len);
1625 
1626         spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_UNLOCK);
1627         return ret;
1628 }
1629 
1630 static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1631 {
1632         struct spi_nor *nor = mtd_to_spi_nor(mtd);
1633         int ret;
1634 
1635         ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
1636         if (ret)
1637                 return ret;
1638 
1639         ret = nor->params.locking_ops->unlock(nor, ofs, len);
1640 
1641         spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
1642         return ret;
1643 }
1644 
1645 static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1646 {
1647         struct spi_nor *nor = mtd_to_spi_nor(mtd);
1648         int ret;
1649 
1650         ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
1651         if (ret)
1652                 return ret;
1653 
1654         ret = nor->params.locking_ops->is_locked(nor, ofs, len);
1655 
1656         spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
1657         return ret;
1658 }
1659 
1660 /*
1661  * Write status Register and configuration register with 2 bytes
1662  * The first byte will be written to the status register, while the
1663  * second byte will be written to the configuration register.
1664  * Return negative if error occurred.
1665  */
1666 static int write_sr_cr(struct spi_nor *nor, u8 *sr_cr)
1667 {
1668         int ret;
1669 
1670         write_enable(nor);
1671 
1672         if (nor->spimem) {
1673                 struct spi_mem_op op =
1674                         SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1),
1675                                    SPI_MEM_OP_NO_ADDR,
1676                                    SPI_MEM_OP_NO_DUMMY,
1677                                    SPI_MEM_OP_DATA_OUT(2, sr_cr, 1));
1678 
1679                 ret = spi_mem_exec_op(nor->spimem, &op);
1680         } else {
1681                 ret = nor->write_reg(nor, SPINOR_OP_WRSR, sr_cr, 2);
1682         }
1683 
1684         if (ret < 0) {
1685                 dev_err(nor->dev,
1686                         "error while writing configuration register\n");
1687                 return -EINVAL;
1688         }
1689 
1690         ret = spi_nor_wait_till_ready(nor);
1691         if (ret) {
1692                 dev_err(nor->dev,
1693                         "timeout while writing configuration register\n");
1694                 return ret;
1695         }
1696 
1697         return 0;
1698 }
1699 
1700 /**
1701  * macronix_quad_enable() - set QE bit in Status Register.
1702  * @nor:        pointer to a 'struct spi_nor'
1703  *
1704  * Set the Quad Enable (QE) bit in the Status Register.
1705  *
1706  * bit 6 of the Status Register is the QE bit for Macronix like QSPI memories.
1707  *
1708  * Return: 0 on success, -errno otherwise.
1709  */
1710 static int macronix_quad_enable(struct spi_nor *nor)
1711 {
1712         int ret, val;
1713 
1714         val = read_sr(nor);
1715         if (val < 0)
1716                 return val;
1717         if (val & SR_QUAD_EN_MX)
1718                 return 0;
1719 
1720         write_enable(nor);
1721 
1722         write_sr(nor, val | SR_QUAD_EN_MX);
1723 
1724         ret = spi_nor_wait_till_ready(nor);
1725         if (ret)
1726                 return ret;
1727 
1728         ret = read_sr(nor);
1729         if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) {
1730                 dev_err(nor->dev, "Macronix Quad bit not set\n");
1731                 return -EINVAL;
1732         }
1733 
1734         return 0;
1735 }
1736 
1737 /**
1738  * spansion_quad_enable() - set QE bit in Configuraiton Register.
1739  * @nor:        pointer to a 'struct spi_nor'
1740  *
1741  * Set the Quad Enable (QE) bit in the Configuration Register.
1742  * This function is kept for legacy purpose because it has been used for a
1743  * long time without anybody complaining but it should be considered as
1744  * deprecated and maybe buggy.
1745  * First, this function doesn't care about the previous values of the Status
1746  * and Configuration Registers when it sets the QE bit (bit 1) in the
1747  * Configuration Register: all other bits are cleared, which may have unwanted
1748  * side effects like removing some block protections.
1749  * Secondly, it uses the Read Configuration Register (35h) instruction though
1750  * some very old and few memories don't support this instruction. If a pull-up
1751  * resistor is present on the MISO/IO1 line, we might still be able to pass the
1752  * "read back" test because the QSPI memory doesn't recognize the command,
1753  * so leaves the MISO/IO1 line state unchanged, hence read_cr() returns 0xFF.
1754  *
1755  * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
1756  * memories.
1757  *
1758  * Return: 0 on success, -errno otherwise.
1759  */
1760 static int spansion_quad_enable(struct spi_nor *nor)
1761 {
1762         u8 *sr_cr = nor->bouncebuf;
1763         int ret;
1764 
1765         sr_cr[0] = 0;
1766         sr_cr[1] = CR_QUAD_EN_SPAN;
1767         ret = write_sr_cr(nor, sr_cr);
1768         if (ret)
1769                 return ret;
1770 
1771         /* read back and check it */
1772         ret = read_cr(nor);
1773         if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
1774                 dev_err(nor->dev, "Spansion Quad bit not set\n");
1775                 return -EINVAL;
1776         }
1777 
1778         return 0;
1779 }
1780 
1781 /**
1782  * spansion_no_read_cr_quad_enable() - set QE bit in Configuration Register.
1783  * @nor:        pointer to a 'struct spi_nor'
1784  *
1785  * Set the Quad Enable (QE) bit in the Configuration Register.
1786  * This function should be used with QSPI memories not supporting the Read
1787  * Configuration Register (35h) instruction.
1788  *
1789  * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
1790  * memories.
1791  *
1792  * Return: 0 on success, -errno otherwise.
1793  */
1794 static int spansion_no_read_cr_quad_enable(struct spi_nor *nor)
1795 {
1796         u8 *sr_cr = nor->bouncebuf;
1797         int ret;
1798 
1799         /* Keep the current value of the Status Register. */
1800         ret = read_sr(nor);
1801         if (ret < 0) {
1802                 dev_err(nor->dev, "error while reading status register\n");
1803                 return -EINVAL;
1804         }
1805         sr_cr[0] = ret;
1806         sr_cr[1] = CR_QUAD_EN_SPAN;
1807 
1808         return write_sr_cr(nor, sr_cr);
1809 }
1810 
1811 /**
1812  * spansion_read_cr_quad_enable() - set QE bit in Configuration Register.
1813  * @nor:        pointer to a 'struct spi_nor'
1814  *
1815  * Set the Quad Enable (QE) bit in the Configuration Register.
1816  * This function should be used with QSPI memories supporting the Read
1817  * Configuration Register (35h) instruction.
1818  *
1819  * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
1820  * memories.
1821  *
1822  * Return: 0 on success, -errno otherwise.
1823  */
1824 static int spansion_read_cr_quad_enable(struct spi_nor *nor)
1825 {
1826         struct device *dev = nor->dev;
1827         u8 *sr_cr = nor->bouncebuf;
1828         int ret;
1829 
1830         /* Check current Quad Enable bit value. */
1831         ret = read_cr(nor);
1832         if (ret < 0) {
1833                 dev_err(dev, "error while reading configuration register\n");
1834                 return -EINVAL;
1835         }
1836 
1837         if (ret & CR_QUAD_EN_SPAN)
1838                 return 0;
1839 
1840         sr_cr[1] = ret | CR_QUAD_EN_SPAN;
1841 
1842         /* Keep the current value of the Status Register. */
1843         ret = read_sr(nor);
1844         if (ret < 0) {
1845                 dev_err(dev, "error while reading status register\n");
1846                 return -EINVAL;
1847         }
1848         sr_cr[0] = ret;
1849 
1850         ret = write_sr_cr(nor, sr_cr);
1851         if (ret)
1852                 return ret;
1853 
1854         /* Read back and check it. */
1855         ret = read_cr(nor);
1856         if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
1857                 dev_err(nor->dev, "Spansion Quad bit not set\n");
1858                 return -EINVAL;
1859         }
1860 
1861         return 0;
1862 }
1863 
1864 static int spi_nor_write_sr2(struct spi_nor *nor, u8 *sr2)
1865 {
1866         if (nor->spimem) {
1867                 struct spi_mem_op op =
1868                         SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2, 1),
1869                                    SPI_MEM_OP_NO_ADDR,
1870                                    SPI_MEM_OP_NO_DUMMY,
1871                                    SPI_MEM_OP_DATA_OUT(1, sr2, 1));
1872 
1873                 return spi_mem_exec_op(nor->spimem, &op);
1874         }
1875 
1876         return nor->write_reg(nor, SPINOR_OP_WRSR2, sr2, 1);
1877 }
1878 
1879 static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
1880 {
1881         if (nor->spimem) {
1882                 struct spi_mem_op op =
1883                         SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2, 1),
1884                                    SPI_MEM_OP_NO_ADDR,
1885                                    SPI_MEM_OP_NO_DUMMY,
1886                                    SPI_MEM_OP_DATA_IN(1, sr2, 1));
1887 
1888                 return spi_mem_exec_op(nor->spimem, &op);
1889         }
1890 
1891         return nor->read_reg(nor, SPINOR_OP_RDSR2, sr2, 1);
1892 }
1893 
1894 /**
1895  * sr2_bit7_quad_enable() - set QE bit in Status Register 2.
1896  * @nor:        pointer to a 'struct spi_nor'
1897  *
1898  * Set the Quad Enable (QE) bit in the Status Register 2.
1899  *
1900  * This is one of the procedures to set the QE bit described in the SFDP
1901  * (JESD216 rev B) specification but no manufacturer using this procedure has
1902  * been identified yet, hence the name of the function.
1903  *
1904  * Return: 0 on success, -errno otherwise.
1905  */
1906 static int sr2_bit7_quad_enable(struct spi_nor *nor)
1907 {
1908         u8 *sr2 = nor->bouncebuf;
1909         int ret;
1910 
1911         /* Check current Quad Enable bit value. */
1912         ret = spi_nor_read_sr2(nor, sr2);
1913         if (ret)
1914                 return ret;
1915         if (*sr2 & SR2_QUAD_EN_BIT7)
1916                 return 0;
1917 
1918         /* Update the Quad Enable bit. */
1919         *sr2 |= SR2_QUAD_EN_BIT7;
1920 
1921         write_enable(nor);
1922 
1923         ret = spi_nor_write_sr2(nor, sr2);
1924         if (ret < 0) {
1925                 dev_err(nor->dev, "error while writing status register 2\n");
1926                 return -EINVAL;
1927         }
1928 
1929         ret = spi_nor_wait_till_ready(nor);
1930         if (ret < 0) {
1931                 dev_err(nor->dev, "timeout while writing status register 2\n");
1932                 return ret;
1933         }
1934 
1935         /* Read back and check it. */
1936         ret = spi_nor_read_sr2(nor, sr2);
1937         if (!(ret > 0 && (*sr2 & SR2_QUAD_EN_BIT7))) {
1938                 dev_err(nor->dev, "SR2 Quad bit not set\n");
1939                 return -EINVAL;
1940         }
1941 
1942         return 0;
1943 }
1944 
1945 /**
1946  * spi_nor_clear_sr_bp() - clear the Status Register Block Protection bits.
1947  * @nor:        pointer to a 'struct spi_nor'
1948  *
1949  * Read-modify-write function that clears the Block Protection bits from the
1950  * Status Register without affecting other bits.
1951  *
1952  * Return: 0 on success, -errno otherwise.
1953  */
1954 static int spi_nor_clear_sr_bp(struct spi_nor *nor)
1955 {
1956         int ret;
1957         u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1958 
1959         ret = read_sr(nor);
1960         if (ret < 0) {
1961                 dev_err(nor->dev, "error while reading status register\n");
1962                 return ret;
1963         }
1964 
1965         write_enable(nor);
1966 
1967         ret = write_sr(nor, ret & ~mask);
1968         if (ret) {
1969                 dev_err(nor->dev, "write to status register failed\n");
1970                 return ret;
1971         }
1972 
1973         ret = spi_nor_wait_till_ready(nor);
1974         if (ret)
1975                 dev_err(nor->dev, "timeout while writing status register\n");
1976         return ret;
1977 }
1978 
1979 /**
1980  * spi_nor_spansion_clear_sr_bp() - clear the Status Register Block Protection
1981  * bits on spansion flashes.
1982  * @nor:        pointer to a 'struct spi_nor'
1983  *
1984  * Read-modify-write function that clears the Block Protection bits from the
1985  * Status Register without affecting other bits. The function is tightly
1986  * coupled with the spansion_quad_enable() function. Both assume that the Write
1987  * Register with 16 bits, together with the Read Configuration Register (35h)
1988  * instructions are supported.
1989  *
1990  * Return: 0 on success, -errno otherwise.
1991  */
1992 static int spi_nor_spansion_clear_sr_bp(struct spi_nor *nor)
1993 {
1994         int ret;
1995         u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1996         u8 *sr_cr =  nor->bouncebuf;
1997 
1998         /* Check current Quad Enable bit value. */
1999         ret = read_cr(nor);
2000         if (ret < 0) {
2001                 dev_err(nor->dev,
2002                         "error while reading configuration register\n");
2003                 return ret;
2004         }
2005 
2006         /*
2007          * When the configuration register Quad Enable bit is one, only the
2008          * Write Status (01h) command with two data bytes may be used.
2009          */
2010         if (ret & CR_QUAD_EN_SPAN) {
2011                 sr_cr[1] = ret;
2012 
2013                 ret = read_sr(nor);
2014                 if (ret < 0) {
2015                         dev_err(nor->dev,
2016                                 "error while reading status register\n");
2017                         return ret;
2018                 }
2019                 sr_cr[0] = ret & ~mask;
2020 
2021                 ret = write_sr_cr(nor, sr_cr);
2022                 if (ret)
2023                         dev_err(nor->dev, "16-bit write register failed\n");
2024                 return ret;
2025         }
2026 
2027         /*
2028          * If the Quad Enable bit is zero, use the Write Status (01h) command
2029          * with one data byte.
2030          */
2031         return spi_nor_clear_sr_bp(nor);
2032 }
2033 
2034 /* Used when the "_ext_id" is two bytes at most */
2035 #define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags)      \
2036                 .id = {                                                 \
2037                         ((_jedec_id) >> 16) & 0xff,                     \
2038                         ((_jedec_id) >> 8) & 0xff,                      \
2039                         (_jedec_id) & 0xff,                             \
2040                         ((_ext_id) >> 8) & 0xff,                        \
2041                         (_ext_id) & 0xff,                               \
2042                         },                                              \
2043                 .id_len = (!(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))),       \
2044                 .sector_size = (_sector_size),                          \
2045                 .n_sectors = (_n_sectors),                              \
2046                 .page_size = 256,                                       \
2047                 .flags = (_flags),
2048 
2049 #define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags)     \
2050                 .id = {                                                 \
2051                         ((_jedec_id) >> 16) & 0xff,                     \
2052                         ((_jedec_id) >> 8) & 0xff,                      \
2053                         (_jedec_id) & 0xff,                             \
2054                         ((_ext_id) >> 16) & 0xff,                       \
2055                         ((_ext_id) >> 8) & 0xff,                        \
2056                         (_ext_id) & 0xff,                               \
2057                         },                                              \
2058                 .id_len = 6,                                            \
2059                 .sector_size = (_sector_size),                          \
2060                 .n_sectors = (_n_sectors),                              \
2061                 .page_size = 256,                                       \
2062                 .flags = (_flags),
2063 
2064 #define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags)   \
2065                 .sector_size = (_sector_size),                          \
2066                 .n_sectors = (_n_sectors),                              \
2067                 .page_size = (_page_size),                              \
2068                 .addr_width = (_addr_width),                            \
2069                 .flags = (_flags),
2070 
2071 #define S3AN_INFO(_jedec_id, _n_sectors, _page_size)                    \
2072                 .id = {                                                 \
2073                         ((_jedec_id) >> 16) & 0xff,                     \
2074                         ((_jedec_id) >> 8) & 0xff,                      \
2075                         (_jedec_id) & 0xff                              \
2076                         },                                              \
2077                 .id_len = 3,                                            \
2078                 .sector_size = (8*_page_size),                          \
2079                 .n_sectors = (_n_sectors),                              \
2080                 .page_size = _page_size,                                \
2081                 .addr_width = 3,                                        \
2082                 .flags = SPI_NOR_NO_FR | SPI_S3AN,
2083 
2084 static int
2085 is25lp256_post_bfpt_fixups(struct spi_nor *nor,
2086                            const struct sfdp_parameter_header *bfpt_header,
2087                            const struct sfdp_bfpt *bfpt,
2088                            struct spi_nor_flash_parameter *params)
2089 {
2090         /*
2091          * IS25LP256 supports 4B opcodes, but the BFPT advertises a
2092          * BFPT_DWORD1_ADDRESS_BYTES_3_ONLY address width.
2093          * Overwrite the address width advertised by the BFPT.
2094          */
2095         if ((bfpt->dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) ==
2096                 BFPT_DWORD1_ADDRESS_BYTES_3_ONLY)
2097                 nor->addr_width = 4;
2098 
2099         return 0;
2100 }
2101 
2102 static struct spi_nor_fixups is25lp256_fixups = {
2103         .post_bfpt = is25lp256_post_bfpt_fixups,
2104 };
2105 
2106 static int
2107 mx25l25635_post_bfpt_fixups(struct spi_nor *nor,
2108                             const struct sfdp_parameter_header *bfpt_header,
2109                             const struct sfdp_bfpt *bfpt,
2110                             struct spi_nor_flash_parameter *params)
2111 {
2112         /*
2113          * MX25L25635F supports 4B opcodes but MX25L25635E does not.
2114          * Unfortunately, Macronix has re-used the same JEDEC ID for both
2115          * variants which prevents us from defining a new entry in the parts
2116          * table.
2117          * We need a way to differentiate MX25L25635E and MX25L25635F, and it
2118          * seems that the F version advertises support for Fast Read 4-4-4 in
2119          * its BFPT table.
2120          */
2121         if (bfpt->dwords[BFPT_DWORD(5)] & BFPT_DWORD5_FAST_READ_4_4_4)
2122                 nor->flags |= SNOR_F_4B_OPCODES;
2123 
2124         return 0;
2125 }
2126 
2127 static struct spi_nor_fixups mx25l25635_fixups = {
2128         .post_bfpt = mx25l25635_post_bfpt_fixups,
2129 };
2130 
2131 static void gd25q256_default_init(struct spi_nor *nor)
2132 {
2133         /*
2134          * Some manufacturer like GigaDevice may use different
2135          * bit to set QE on different memories, so the MFR can't
2136          * indicate the quad_enable method for this case, we need
2137          * to set it in the default_init fixup hook.
2138          */
2139         nor->params.quad_enable = macronix_quad_enable;
2140 }
2141 
2142 static struct spi_nor_fixups gd25q256_fixups = {
2143         .default_init = gd25q256_default_init,
2144 };
2145 
2146 /* NOTE: double check command sets and memory organization when you add
2147  * more nor chips.  This current list focusses on newer chips, which
2148  * have been converging on command sets which including JEDEC ID.
2149  *
2150  * All newly added entries should describe *hardware* and should use SECT_4K
2151  * (or SECT_4K_PMC) if hardware supports erasing 4 KiB sectors. For usage
2152  * scenarios excluding small sectors there is config option that can be
2153  * disabled: CONFIG_MTD_SPI_NOR_USE_4K_SECTORS.
2154  * For historical (and compatibility) reasons (before we got above config) some
2155  * old entries may be missing 4K flag.
2156  */
2157 static const struct flash_info spi_nor_ids[] = {
2158         /* Atmel -- some are (confusingly) marketed as "DataFlash" */
2159         { "at25fs010",  INFO(0x1f6601, 0, 32 * 1024,   4, SECT_4K) },
2160         { "at25fs040",  INFO(0x1f6604, 0, 64 * 1024,   8, SECT_4K) },
2161 
2162         { "at25df041a", INFO(0x1f4401, 0, 64 * 1024,   8, SECT_4K) },
2163         { "at25df321",  INFO(0x1f4700, 0, 64 * 1024,  64, SECT_4K) },
2164         { "at25df321a", INFO(0x1f4701, 0, 64 * 1024,  64, SECT_4K) },
2165         { "at25df641",  INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
2166 
2167         { "at26f004",   INFO(0x1f0400, 0, 64 * 1024,  8, SECT_4K) },
2168         { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
2169         { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
2170         { "at26df321",  INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
2171 
2172         { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
2173 
2174         /* EON -- en25xxx */
2175         { "en25f32",    INFO(0x1c3116, 0, 64 * 1024,   64, SECT_4K) },
2176         { "en25p32",    INFO(0x1c2016, 0, 64 * 1024,   64, 0) },
2177         { "en25q32b",   INFO(0x1c3016, 0, 64 * 1024,   64, 0) },
2178         { "en25p64",    INFO(0x1c2017, 0, 64 * 1024,  128, 0) },
2179         { "en25q64",    INFO(0x1c3017, 0, 64 * 1024,  128, SECT_4K) },
2180         { "en25q80a",   INFO(0x1c3014, 0, 64 * 1024,   16,
2181                         SECT_4K | SPI_NOR_DUAL_READ) },
2182         { "en25qh32",   INFO(0x1c7016, 0, 64 * 1024,   64, 0) },
2183         { "en25qh64",   INFO(0x1c7017, 0, 64 * 1024,  128,
2184                         SECT_4K | SPI_NOR_DUAL_READ) },
2185         { "en25qh128",  INFO(0x1c7018, 0, 64 * 1024,  256, 0) },
2186         { "en25qh256",  INFO(0x1c7019, 0, 64 * 1024,  512, 0) },
2187         { "en25s64",    INFO(0x1c3817, 0, 64 * 1024,  128, SECT_4K) },
2188 
2189         /* ESMT */
2190         { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) },
2191         { "f25l32qa", INFO(0x8c4116, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) },
2192         { "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_HAS_LOCK) },
2193 
2194         /* Everspin */
2195         { "mr25h128", CAT25_INFO( 16 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2196         { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2197         { "mr25h10",  CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2198         { "mr25h40",  CAT25_INFO(512 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2199 
2200         /* Fujitsu */
2201         { "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) },
2202 
2203         /* GigaDevice */
2204         {
2205                 "gd25q16", INFO(0xc84015, 0, 64 * 1024,  32,
2206                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2207                         SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2208         },
2209         {
2210                 "gd25q32", INFO(0xc84016, 0, 64 * 1024,  64,
2211                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2212                         SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2213         },
2214         {
2215                 "gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64,
2216                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2217                         SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2218         },
2219         {
2220                 "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128,
2221                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2222                         SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2223         },
2224         {
2225                 "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128,
2226                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2227                         SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2228         },
2229         {
2230                 "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256,
2231                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2232                         SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2233         },
2234         {
2235                 "gd25q256", INFO(0xc84019, 0, 64 * 1024, 512,
2236                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2237                         SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2238                         .fixups = &gd25q256_fixups,
2239         },
2240 
2241         /* Intel/Numonyx -- xxxs33b */
2242         { "160s33b",  INFO(0x898911, 0, 64 * 1024,  32, 0) },
2243         { "320s33b",  INFO(0x898912, 0, 64 * 1024,  64, 0) },
2244         { "640s33b",  INFO(0x898913, 0, 64 * 1024, 128, 0) },
2245 
2246         /* ISSI */
2247         { "is25cd512",  INFO(0x7f9d20, 0, 32 * 1024,   2, SECT_4K) },
2248         { "is25lq040b", INFO(0x9d4013, 0, 64 * 1024,   8,
2249                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2250         { "is25lp016d", INFO(0x9d6015, 0, 64 * 1024,  32,
2251                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2252         { "is25lp080d", INFO(0x9d6014, 0, 64 * 1024,  16,
2253                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2254         { "is25lp032",  INFO(0x9d6016, 0, 64 * 1024,  64,
2255                         SECT_4K | SPI_NOR_DUAL_READ) },
2256         { "is25lp064",  INFO(0x9d6017, 0, 64 * 1024, 128,
2257                         SECT_4K | SPI_NOR_DUAL_READ) },
2258         { "is25lp128",  INFO(0x9d6018, 0, 64 * 1024, 256,
2259                         SECT_4K | SPI_NOR_DUAL_READ) },
2260         { "is25lp256",  INFO(0x9d6019, 0, 64 * 1024, 512,
2261                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2262                         SPI_NOR_4B_OPCODES)
2263                         .fixups = &is25lp256_fixups },
2264         { "is25wp032",  INFO(0x9d7016, 0, 64 * 1024,  64,
2265                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2266         { "is25wp064",  INFO(0x9d7017, 0, 64 * 1024, 128,
2267                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2268         { "is25wp128",  INFO(0x9d7018, 0, 64 * 1024, 256,
2269                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2270 
2271         /* Macronix */
2272         { "mx25l512e",   INFO(0xc22010, 0, 64 * 1024,   1, SECT_4K) },
2273         { "mx25l2005a",  INFO(0xc22012, 0, 64 * 1024,   4, SECT_4K) },
2274         { "mx25l4005a",  INFO(0xc22013, 0, 64 * 1024,   8, SECT_4K) },
2275         { "mx25l8005",   INFO(0xc22014, 0, 64 * 1024,  16, 0) },
2276         { "mx25l1606e",  INFO(0xc22015, 0, 64 * 1024,  32, SECT_4K) },
2277         { "mx25l3205d",  INFO(0xc22016, 0, 64 * 1024,  64, SECT_4K) },
2278         { "mx25l3255e",  INFO(0xc29e16, 0, 64 * 1024,  64, SECT_4K) },
2279         { "mx25l6405d",  INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) },
2280         { "mx25u2033e",  INFO(0xc22532, 0, 64 * 1024,   4, SECT_4K) },
2281         { "mx25u3235f",  INFO(0xc22536, 0, 64 * 1024,  64,
2282                          SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2283         { "mx25u4035",   INFO(0xc22533, 0, 64 * 1024,   8, SECT_4K) },
2284         { "mx25u8035",   INFO(0xc22534, 0, 64 * 1024,  16, SECT_4K) },
2285         { "mx25u6435f",  INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
2286         { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
2287         { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
2288         { "mx25u12835f", INFO(0xc22538, 0, 64 * 1024, 256,
2289                          SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2290         { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512,
2291                          SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
2292                          .fixups = &mx25l25635_fixups },
2293         { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) },
2294         { "mx25v8035f",  INFO(0xc22314, 0, 64 * 1024,  16,
2295                          SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2296         { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
2297         { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
2298         { "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
2299         { "mx66l1g45g",  INFO(0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2300         { "mx66l1g55g",  INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
2301 
2302         /* Micron <--> ST Micro */
2303         { "n25q016a",    INFO(0x20bb15, 0, 64 * 1024,   32, SECT_4K | SPI_NOR_QUAD_READ) },
2304         { "n25q032",     INFO(0x20ba16, 0, 64 * 1024,   64, SPI_NOR_QUAD_READ) },
2305         { "n25q032a",    INFO(0x20bb16, 0, 64 * 1024,   64, SPI_NOR_QUAD_READ) },
2306         { "n25q064",     INFO(0x20ba17, 0, 64 * 1024,  128, SECT_4K | SPI_NOR_QUAD_READ) },
2307         { "n25q064a",    INFO(0x20bb17, 0, 64 * 1024,  128, SECT_4K | SPI_NOR_QUAD_READ) },
2308         { "n25q128a11",  INFO(0x20bb18, 0, 64 * 1024,  256, SECT_4K | SPI_NOR_QUAD_READ) },
2309         { "n25q128a13",  INFO(0x20ba18, 0, 64 * 1024,  256, SECT_4K | SPI_NOR_QUAD_READ) },
2310         { "n25q256a",    INFO(0x20ba19, 0, 64 * 1024,  512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2311         { "n25q256ax1",  INFO(0x20bb19, 0, 64 * 1024,  512, SECT_4K | SPI_NOR_QUAD_READ) },
2312         { "n25q512ax3",  INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
2313         { "mt25qu512a",  INFO6(0x20bb20, 0x104400, 64 * 1024, 1024,
2314                                SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
2315                                SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
2316         { "n25q512a",    INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K |
2317                               SPI_NOR_QUAD_READ) },
2318         { "n25q00",      INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
2319         { "n25q00a",     INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
2320         { "mt25ql02g",   INFO(0x20ba22, 0, 64 * 1024, 4096,
2321                               SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
2322                               NO_CHIP_ERASE) },
2323         { "mt25qu02g",   INFO(0x20bb22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
2324 
2325         /* Micron */
2326         {
2327                 "mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512,
2328                         SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
2329                         SPI_NOR_4B_OPCODES)
2330         },
2331         { "mt35xu02g",  INFO(0x2c5b1c, 0, 128 * 1024, 2048,
2332                              SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
2333                              SPI_NOR_4B_OPCODES) },
2334 
2335         /* PMC */
2336         { "pm25lv512",   INFO(0,        0, 32 * 1024,    2, SECT_4K_PMC) },
2337         { "pm25lv010",   INFO(0,        0, 32 * 1024,    4, SECT_4K_PMC) },
2338         { "pm25lq032",   INFO(0x7f9d46, 0, 64 * 1024,   64, SECT_4K) },
2339 
2340         /* Spansion/Cypress -- single (large) sector size only, at least
2341          * for the chips listed here (without boot sectors).
2342          */
2343         { "s25sl032p",  INFO(0x010215, 0x4d00,  64 * 1024,  64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2344         { "s25sl064p",  INFO(0x010216, 0x4d00,  64 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2345         { "s25fl128s0", INFO6(0x012018, 0x4d0080, 256 * 1024, 64,
2346                         SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
2347         { "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256,
2348                         SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
2349         { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, USE_CLSR) },
2350         { "s25fl256s1", INFO(0x010219, 0x4d01,  64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
2351         { "s25fl512s",  INFO6(0x010220, 0x4d0080, 256 * 1024, 256,
2352                         SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2353                         SPI_NOR_HAS_LOCK | USE_CLSR) },
2354         { "s25fs512s",  INFO6(0x010220, 0x4d0081, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
2355         { "s70fl01gs",  INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
2356         { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024,  64, 0) },
2357         { "s25sl12801", INFO(0x012018, 0x0301,  64 * 1024, 256, 0) },
2358         { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024,  64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
2359         { "s25fl129p1", INFO(0x012018, 0x4d01,  64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
2360         { "s25sl004a",  INFO(0x010212,      0,  64 * 1024,   8, 0) },
2361         { "s25sl008a",  INFO(0x010213,      0,  64 * 1024,  16, 0) },
2362         { "s25sl016a",  INFO(0x010214,      0,  64 * 1024,  32, 0) },
2363         { "s25sl032a",  INFO(0x010215,      0,  64 * 1024,  64, 0) },
2364         { "s25sl064a",  INFO(0x010216,      0,  64 * 1024, 128, 0) },
2365         { "s25fl004k",  INFO(0xef4013,      0,  64 * 1024,   8, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2366         { "s25fl008k",  INFO(0xef4014,      0,  64 * 1024,  16, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2367         { "s25fl016k",  INFO(0xef4015,      0,  64 * 1024,  32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2368         { "s25fl064k",  INFO(0xef4017,      0,  64 * 1024, 128, SECT_4K) },
2369         { "s25fl116k",  INFO(0x014015,      0,  64 * 1024,  32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2370         { "s25fl132k",  INFO(0x014016,      0,  64 * 1024,  64, SECT_4K) },
2371         { "s25fl164k",  INFO(0x014017,      0,  64 * 1024, 128, SECT_4K) },
2372         { "s25fl204k",  INFO(0x014013,      0,  64 * 1024,   8, SECT_4K | SPI_NOR_DUAL_READ) },
2373         { "s25fl208k",  INFO(0x014014,      0,  64 * 1024,  16, SECT_4K | SPI_NOR_DUAL_READ) },
2374         { "s25fl064l",  INFO(0x016017,      0,  64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
2375         { "s25fl128l",  INFO(0x016018,      0,  64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
2376         { "s25fl256l",  INFO(0x016019,      0,  64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
2377 
2378         /* SST -- large erase sizes are "overlays", "sectors" are 4K */
2379         { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024,  8, SECT_4K | SST_WRITE) },
2380         { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
2381         { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) },
2382         { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) },
2383         { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) },
2384         { "sst25wf512",  INFO(0xbf2501, 0, 64 * 1024,  1, SECT_4K | SST_WRITE) },
2385         { "sst25wf010",  INFO(0xbf2502, 0, 64 * 1024,  2, SECT_4K | SST_WRITE) },
2386         { "sst25wf020",  INFO(0xbf2503, 0, 64 * 1024,  4, SECT_4K | SST_WRITE) },
2387         { "sst25wf020a", INFO(0x621612, 0, 64 * 1024,  4, SECT_4K) },
2388         { "sst25wf040b", INFO(0x621613, 0, 64 * 1024,  8, SECT_4K) },
2389         { "sst25wf040",  INFO(0xbf2504, 0, 64 * 1024,  8, SECT_4K | SST_WRITE) },
2390         { "sst25wf080",  INFO(0xbf2505, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
2391         { "sst26wf016b", INFO(0xbf2651, 0, 64 * 1024, 32, SECT_4K |
2392                               SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2393         { "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2394 
2395         /* ST Microelectronics -- newer production may have feature updates */
2396         { "m25p05",  INFO(0x202010,  0,  32 * 1024,   2, 0) },
2397         { "m25p10",  INFO(0x202011,  0,  32 * 1024,   4, 0) },
2398         { "m25p20",  INFO(0x202012,  0,  64 * 1024,   4, 0) },
2399         { "m25p40",  INFO(0x202013,  0,  64 * 1024,   8, 0) },
2400         { "m25p80",  INFO(0x202014,  0,  64 * 1024,  16, 0) },
2401         { "m25p16",  INFO(0x202015,  0,  64 * 1024,  32, 0) },
2402         { "m25p32",  INFO(0x202016,  0,  64 * 1024,  64, 0) },
2403         { "m25p64",  INFO(0x202017,  0,  64 * 1024, 128, 0) },
2404         { "m25p128", INFO(0x202018,  0, 256 * 1024,  64, 0) },
2405 
2406         { "m25p05-nonjedec",  INFO(0, 0,  32 * 1024,   2, 0) },
2407         { "m25p10-nonjedec",  INFO(0, 0,  32 * 1024,   4, 0) },
2408         { "m25p20-nonjedec",  INFO(0, 0,  64 * 1024,   4, 0) },
2409         { "m25p40-nonjedec",  INFO(0, 0,  64 * 1024,   8, 0) },
2410         { "m25p80-nonjedec",  INFO(0, 0,  64 * 1024,  16, 0) },
2411         { "m25p16-nonjedec",  INFO(0, 0,  64 * 1024,  32, 0) },
2412         { "m25p32-nonjedec",  INFO(0, 0,  64 * 1024,  64, 0) },
2413         { "m25p64-nonjedec",  INFO(0, 0,  64 * 1024, 128, 0) },
2414         { "m25p128-nonjedec", INFO(0, 0, 256 * 1024,  64, 0) },
2415 
2416         { "m45pe10", INFO(0x204011,  0, 64 * 1024,    2, 0) },
2417         { "m45pe80", INFO(0x204014,  0, 64 * 1024,   16, 0) },
2418         { "m45pe16", INFO(0x204015,  0, 64 * 1024,   32, 0) },
2419 
2420         { "m25pe20", INFO(0x208012,  0, 64 * 1024,  4,       0) },
2421         { "m25pe80", INFO(0x208014,  0, 64 * 1024, 16,       0) },
2422         { "m25pe16", INFO(0x208015,  0, 64 * 1024, 32, SECT_4K) },
2423 
2424         { "m25px16",    INFO(0x207115,  0, 64 * 1024, 32, SECT_4K) },
2425         { "m25px32",    INFO(0x207116,  0, 64 * 1024, 64, SECT_4K) },
2426         { "m25px32-s0", INFO(0x207316,  0, 64 * 1024, 64, SECT_4K) },
2427         { "m25px32-s1", INFO(0x206316,  0, 64 * 1024, 64, SECT_4K) },
2428         { "m25px64",    INFO(0x207117,  0, 64 * 1024, 128, 0) },
2429         { "m25px80",    INFO(0x207114,  0, 64 * 1024, 16, 0) },
2430 
2431         /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
2432         { "w25x05", INFO(0xef3010, 0, 64 * 1024,  1,  SECT_4K) },
2433         { "w25x10", INFO(0xef3011, 0, 64 * 1024,  2,  SECT_4K) },
2434         { "w25x20", INFO(0xef3012, 0, 64 * 1024,  4,  SECT_4K) },
2435         { "w25x40", INFO(0xef3013, 0, 64 * 1024,  8,  SECT_4K) },
2436         { "w25x80", INFO(0xef3014, 0, 64 * 1024,  16, SECT_4K) },
2437         { "w25x16", INFO(0xef3015, 0, 64 * 1024,  32, SECT_4K) },
2438         {
2439                 "w25q16dw", INFO(0xef6015, 0, 64 * 1024,  32,
2440                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2441                         SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2442         },
2443         { "w25x32", INFO(0xef3016, 0, 64 * 1024,  64, SECT_4K) },
2444         {
2445                 "w25q16jv-im/jm", INFO(0xef7015, 0, 64 * 1024,  32,
2446                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2447                         SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2448         },
2449         { "w25q20cl", INFO(0xef4012, 0, 64 * 1024,  4, SECT_4K) },
2450         { "w25q20bw", INFO(0xef5012, 0, 64 * 1024,  4, SECT_4K) },
2451         { "w25q20ew", INFO(0xef6012, 0, 64 * 1024,  4, SECT_4K) },
2452         { "w25q32", INFO(0xef4016, 0, 64 * 1024,  64, SECT_4K) },
2453         {
2454                 "w25q32dw", INFO(0xef6016, 0, 64 * 1024,  64,
2455                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2456                         SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2457         },
2458         {
2459                 "w25q32jv", INFO(0xef7016, 0, 64 * 1024,  64,
2460                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2461                         SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2462         },
2463         { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
2464         { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
2465         {
2466                 "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128,
2467                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2468                         SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2469         },
2470         {
2471                 "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256,
2472                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2473                         SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2474         },
2475         {
2476                 "w25q128jv", INFO(0xef7018, 0, 64 * 1024, 256,
2477                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2478                         SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2479         },
2480         { "w25q80", INFO(0xef5014, 0, 64 * 1024,  16, SECT_4K) },
2481         { "w25q80bl", INFO(0xef4014, 0, 64 * 1024,  16, SECT_4K) },
2482         { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
2483         { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2484         { "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512,
2485                              SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2486         { "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024,
2487                         SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_DUAL_READ) },
2488 
2489         /* Catalyst / On Semiconductor -- non-JEDEC */
2490         { "cat25c11", CAT25_INFO(  16, 8, 16, 1, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2491         { "cat25c03", CAT25_INFO(  32, 8, 16, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2492         { "cat25c09", CAT25_INFO( 128, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2493         { "cat25c17", CAT25_INFO( 256, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2494         { "cat25128", CAT25_INFO(2048, 8, 64, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2495 
2496         /* Xilinx S3AN Internal Flash */
2497         { "3S50AN", S3AN_INFO(0x1f2200, 64, 264) },
2498         { "3S200AN", S3AN_INFO(0x1f2400, 256, 264) },
2499         { "3S400AN", S3AN_INFO(0x1f2400, 256, 264) },
2500         { "3S700AN", S3AN_INFO(0x1f2500, 512, 264) },
2501         { "3S1400AN", S3AN_INFO(0x1f2600, 512, 528) },
2502 
2503         /* XMC (Wuhan Xinxin Semiconductor Manufacturing Corp.) */
2504         { "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2505         { "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2506         { },
2507 };
2508 
2509 static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
2510 {
2511         int                     tmp;
2512         u8                      *id = nor->bouncebuf;
2513         const struct flash_info *info;
2514 
2515         if (nor->spimem) {
2516                 struct spi_mem_op op =
2517                         SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1),
2518                                    SPI_MEM_OP_NO_ADDR,
2519                                    SPI_MEM_OP_NO_DUMMY,
2520                                    SPI_MEM_OP_DATA_IN(SPI_NOR_MAX_ID_LEN, id, 1));
2521 
2522                 tmp = spi_mem_exec_op(nor->spimem, &op);
2523         } else {
2524                 tmp = nor->read_reg(nor, SPINOR_OP_RDID, id,
2525                                     SPI_NOR_MAX_ID_LEN);
2526         }
2527         if (tmp < 0) {
2528                 dev_err(nor->dev, "error %d reading JEDEC ID\n", tmp);
2529                 return ERR_PTR(tmp);
2530         }
2531 
2532         for (tmp = 0; tmp < ARRAY_SIZE(spi_nor_ids) - 1; tmp++) {
2533                 info = &spi_nor_ids[tmp];
2534                 if (info->id_len) {
2535                         if (!memcmp(info->id, id, info->id_len))
2536                                 return &spi_nor_ids[tmp];
2537                 }
2538         }
2539         dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n",
2540                 SPI_NOR_MAX_ID_LEN, id);
2541         return ERR_PTR(-ENODEV);
2542 }
2543 
2544 static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
2545                         size_t *retlen, u_char *buf)
2546 {
2547         struct spi_nor *nor = mtd_to_spi_nor(mtd);
2548         ssize_t ret;
2549 
2550         dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
2551 
2552         ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_READ);
2553         if (ret)
2554                 return ret;
2555 
2556         while (len) {
2557                 loff_t addr = from;
2558 
2559                 addr = spi_nor_convert_addr(nor, addr);
2560 
2561                 ret = spi_nor_read_data(nor, addr, len, buf);
2562                 if (ret == 0) {
2563                         /* We shouldn't see 0-length reads */
2564                         ret = -EIO;
2565                         goto read_err;
2566                 }
2567                 if (ret < 0)
2568                         goto read_err;
2569 
2570                 WARN_ON(ret > len);
2571                 *retlen += ret;
2572                 buf += ret;
2573                 from += ret;
2574                 len -= ret;
2575         }
2576         ret = 0;
2577 
2578 read_err:
2579         spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ);
2580         return ret;
2581 }
2582 
2583 static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
2584                 size_t *retlen, const u_char *buf)
2585 {
2586         struct spi_nor *nor = mtd_to_spi_nor(mtd);
2587         size_t actual;
2588         int ret;
2589 
2590         dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
2591 
2592         ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
2593         if (ret)
2594                 return ret;
2595 
2596         write_enable(nor);
2597 
2598         nor->sst_write_second = false;
2599 
2600         actual = to % 2;
2601         /* Start write from odd address. */
2602         if (actual) {
2603                 nor->program_opcode = SPINOR_OP_BP;
2604 
2605                 /* write one byte. */
2606                 ret = spi_nor_write_data(nor, to, 1, buf);
2607                 if (ret < 0)
2608                         goto sst_write_err;
2609                 WARN(ret != 1, "While writing 1 byte written %i bytes\n",
2610                      (int)ret);
2611                 ret = spi_nor_wait_till_ready(nor);
2612                 if (ret)
2613                         goto sst_write_err;
2614         }
2615         to += actual;
2616 
2617         /* Write out most of the data here. */
2618         for (; actual < len - 1; actual += 2) {
2619                 nor->program_opcode = SPINOR_OP_AAI_WP;
2620 
2621                 /* write two bytes. */
2622                 ret = spi_nor_write_data(nor, to, 2, buf + actual);
2623                 if (ret < 0)
2624                         goto sst_write_err;
2625                 WARN(ret != 2, "While writing 2 bytes written %i bytes\n",
2626                      (int)ret);
2627                 ret = spi_nor_wait_till_ready(nor);
2628                 if (ret)
2629                         goto sst_write_err;
2630                 to += 2;
2631                 nor->sst_write_second = true;
2632         }
2633         nor->sst_write_second = false;
2634 
2635         write_disable(nor);
2636         ret = spi_nor_wait_till_ready(nor);
2637         if (ret)
2638                 goto sst_write_err;
2639 
2640         /* Write out trailing byte if it exists. */
2641         if (actual != len) {
2642                 write_enable(nor);
2643 
2644                 nor->program_opcode = SPINOR_OP_BP;
2645                 ret = spi_nor_write_data(nor, to, 1, buf + actual);
2646                 if (ret < 0)
2647                         goto sst_write_err;
2648                 WARN(ret != 1, "While writing 1 byte written %i bytes\n",
2649                      (int)ret);
2650                 ret = spi_nor_wait_till_ready(nor);
2651                 if (ret)
2652                         goto sst_write_err;
2653                 write_disable(nor);
2654                 actual += 1;
2655         }
2656 sst_write_err:
2657         *retlen += actual;
2658         spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
2659         return ret;
2660 }
2661 
2662 /*
2663  * Write an address range to the nor chip.  Data must be written in
2664  * FLASH_PAGESIZE chunks.  The address range may be any size provided
2665  * it is within the physical boundaries.
2666  */
2667 static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
2668         size_t *retlen, const u_char *buf)
2669 {
2670         struct spi_nor *nor = mtd_to_spi_nor(mtd);
2671         size_t page_offset, page_remain, i;
2672         ssize_t ret;
2673 
2674         dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
2675 
2676         ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
2677         if (ret)
2678                 return ret;
2679 
2680         for (i = 0; i < len; ) {
2681                 ssize_t written;
2682                 loff_t addr = to + i;
2683 
2684                 /*
2685                  * If page_size is a power of two, the offset can be quickly
2686                  * calculated with an AND operation. On the other cases we
2687                  * need to do a modulus operation (more expensive).
2688                  * Power of two numbers have only one bit set and we can use
2689                  * the instruction hweight32 to detect if we need to do a
2690                  * modulus (do_div()) or not.
2691                  */
2692                 if (hweight32(nor->page_size) == 1) {
2693                         page_offset = addr & (nor->page_size - 1);
2694                 } else {
2695                         uint64_t aux = addr;
2696 
2697                         page_offset = do_div(aux, nor->page_size);
2698                 }
2699                 /* the size of data remaining on the first page */
2700                 page_remain = min_t(size_t,
2701                                     nor->page_size - page_offset, len - i);
2702 
2703                 addr = spi_nor_convert_addr(nor, addr);
2704 
2705                 write_enable(nor);
2706                 ret = spi_nor_write_data(nor, addr, page_remain, buf + i);
2707                 if (ret < 0)
2708                         goto write_err;
2709                 written = ret;
2710 
2711                 ret = spi_nor_wait_till_ready(nor);
2712                 if (ret)
2713                         goto write_err;
2714                 *retlen += written;
2715                 i += written;
2716         }
2717 
2718 write_err:
2719         spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
2720         return ret;
2721 }
2722 
2723 static int spi_nor_check(struct spi_nor *nor)
2724 {
2725         if (!nor->dev ||
2726             (!nor->spimem &&
2727             (!nor->read || !nor->write || !nor->read_reg ||
2728               !nor->write_reg))) {
2729                 pr_err("spi-nor: please fill all the necessary fields!\n");
2730                 return -EINVAL;
2731         }
2732 
2733         return 0;
2734 }
2735 
2736 static int s3an_nor_setup(struct spi_nor *nor,
2737                           const struct spi_nor_hwcaps *hwcaps)
2738 {
2739         int ret;
2740 
2741         ret = spi_nor_xread_sr(nor, nor->bouncebuf);
2742         if (ret < 0) {
2743                 dev_err(nor->dev, "error %d reading XRDSR\n", (int) ret);
2744                 return ret;
2745         }
2746 
2747         nor->erase_opcode = SPINOR_OP_XSE;
2748         nor->program_opcode = SPINOR_OP_XPP;
2749         nor->read_opcode = SPINOR_OP_READ;
2750         nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
2751 
2752         /*
2753          * This flashes have a page size of 264 or 528 bytes (known as
2754          * Default addressing mode). It can be changed to a more standard
2755          * Power of two mode where the page size is 256/512. This comes
2756          * with a price: there is 3% less of space, the data is corrupted
2757          * and the page size cannot be changed back to default addressing
2758          * mode.
2759          *
2760          * The current addressing mode can be read from the XRDSR register
2761          * and should not be changed, because is a destructive operation.
2762          */
2763         if (nor->bouncebuf[0] & XSR_PAGESIZE) {
2764                 /* Flash in Power of 2 mode */
2765                 nor->page_size = (nor->page_size == 264) ? 256 : 512;
2766                 nor->mtd.writebufsize = nor->page_size;
2767                 nor->mtd.size = 8 * nor->page_size * nor->info->n_sectors;
2768                 nor->mtd.erasesize = 8 * nor->page_size;
2769         } else {
2770                 /* Flash in Default addressing mode */
2771                 nor->params.convert_addr = s3an_convert_addr;
2772                 nor->mtd.erasesize = nor->info->sector_size;
2773         }
2774 
2775         return 0;
2776 }
2777 
2778 static void
2779 spi_nor_set_read_settings(struct spi_nor_read_command *read,
2780                           u8 num_mode_clocks,
2781                           u8 num_wait_states,
2782                           u8 opcode,
2783                           enum spi_nor_protocol proto)
2784 {
2785         read->num_mode_clocks = num_mode_clocks;
2786         read->num_wait_states = num_wait_states;
2787         read->opcode = opcode;
2788         read->proto = proto;
2789 }
2790 
2791 static void
2792 spi_nor_set_pp_settings(struct spi_nor_pp_command *pp,
2793                         u8 opcode,
2794                         enum spi_nor_protocol proto)
2795 {
2796         pp->opcode = opcode;
2797         pp->proto = proto;
2798 }
2799 
2800 static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
2801 {
2802         size_t i;
2803 
2804         for (i = 0; i < size; i++)
2805                 if (table[i][0] == (int)hwcaps)
2806                         return table[i][1];
2807 
2808         return -EINVAL;
2809 }
2810 
2811 static int spi_nor_hwcaps_read2cmd(u32 hwcaps)
2812 {
2813         static const int hwcaps_read2cmd[][2] = {
2814                 { SNOR_HWCAPS_READ,             SNOR_CMD_READ },
2815                 { SNOR_HWCAPS_READ_FAST,        SNOR_CMD_READ_FAST },
2816                 { SNOR_HWCAPS_READ_1_1_1_DTR,   SNOR_CMD_READ_1_1_1_DTR },
2817                 { SNOR_HWCAPS_READ_1_1_2,       SNOR_CMD_READ_1_1_2 },
2818                 { SNOR_HWCAPS_READ_1_2_2,       SNOR_CMD_READ_1_2_2 },
2819                 { SNOR_HWCAPS_READ_2_2_2,       SNOR_CMD_READ_2_2_2 },
2820                 { SNOR_HWCAPS_READ_1_2_2_DTR,   SNOR_CMD_READ_1_2_2_DTR },
2821                 { SNOR_HWCAPS_READ_1_1_4,       SNOR_CMD_READ_1_1_4 },
2822                 { SNOR_HWCAPS_READ_1_4_4,       SNOR_CMD_READ_1_4_4 },
2823                 { SNOR_HWCAPS_READ_4_4_4,       SNOR_CMD_READ_4_4_4 },
2824                 { SNOR_HWCAPS_READ_1_4_4_DTR,   SNOR_CMD_READ_1_4_4_DTR },
2825                 { SNOR_HWCAPS_READ_1_1_8,       SNOR_CMD_READ_1_1_8 },
2826                 { SNOR_HWCAPS_READ_1_8_8,       SNOR_CMD_READ_1_8_8 },
2827                 { SNOR_HWCAPS_READ_8_8_8,       SNOR_CMD_READ_8_8_8 },
2828                 { SNOR_HWCAPS_READ_1_8_8_DTR,   SNOR_CMD_READ_1_8_8_DTR },
2829         };
2830 
2831         return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
2832                                   ARRAY_SIZE(hwcaps_read2cmd));
2833 }
2834 
2835 static int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
2836 {
2837         static const int hwcaps_pp2cmd[][2] = {
2838                 { SNOR_HWCAPS_PP,               SNOR_CMD_PP },
2839                 { SNOR_HWCAPS_PP_1_1_4,         SNOR_CMD_PP_1_1_4 },
2840                 { SNOR_HWCAPS_PP_1_4_4,         SNOR_CMD_PP_1_4_4 },
2841                 { SNOR_HWCAPS_PP_4_4_4,         SNOR_CMD_PP_4_4_4 },
2842                 { SNOR_HWCAPS_PP_1_1_8,         SNOR_CMD_PP_1_1_8 },
2843                 { SNOR_HWCAPS_PP_1_8_8,         SNOR_CMD_PP_1_8_8 },
2844                 { SNOR_HWCAPS_PP_8_8_8,         SNOR_CMD_PP_8_8_8 },
2845         };
2846 
2847         return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
2848                                   ARRAY_SIZE(hwcaps_pp2cmd));
2849 }
2850 
2851 /*
2852  * Serial Flash Discoverable Parameters (SFDP) parsing.
2853  */
2854 
2855 /**
2856  * spi_nor_read_raw() - raw read of serial flash memory. read_opcode,
2857  *                      addr_width and read_dummy members of the struct spi_nor
2858  *                      should be previously
2859  * set.
2860  * @nor:        pointer to a 'struct spi_nor'
2861  * @addr:       offset in the serial flash memory
2862  * @len:        number of bytes to read
2863  * @buf:        buffer where the data is copied into (dma-safe memory)
2864  *
2865  * Return: 0 on success, -errno otherwise.
2866  */
2867 static int spi_nor_read_raw(struct spi_nor *nor, u32 addr, size_t len, u8 *buf)
2868 {
2869         ssize_t ret;
2870 
2871         while (len) {
2872                 ret = spi_nor_read_data(nor, addr, len, buf);
2873                 if (ret < 0)
2874                         return ret;
2875                 if (!ret || ret > len)
2876                         return -EIO;
2877 
2878                 buf += ret;
2879                 addr += ret;
2880                 len -= ret;
2881         }
2882         return 0;
2883 }
2884 
2885 /**
2886  * spi_nor_read_sfdp() - read Serial Flash Discoverable Parameters.
2887  * @nor:        pointer to a 'struct spi_nor'
2888  * @addr:       offset in the SFDP area to start reading data from
2889  * @len:        number of bytes to read
2890  * @buf:        buffer where the SFDP data are copied into (dma-safe memory)
2891  *
2892  * Whatever the actual numbers of bytes for address and dummy cycles are
2893  * for (Fast) Read commands, the Read SFDP (5Ah) instruction is always
2894  * followed by a 3-byte address and 8 dummy clock cycles.
2895  *
2896  * Return: 0 on success, -errno otherwise.
2897  */
2898 static int spi_nor_read_sfdp(struct spi_nor *nor, u32 addr,
2899                              size_t len, void *buf)
2900 {
2901         u8 addr_width, read_opcode, read_dummy;
2902         int ret;
2903 
2904         read_opcode = nor->read_opcode;
2905         addr_width = nor->addr_width;
2906         read_dummy = nor->read_dummy;
2907 
2908         nor->read_opcode = SPINOR_OP_RDSFDP;
2909         nor->addr_width = 3;
2910         nor->read_dummy = 8;
2911 
2912         ret = spi_nor_read_raw(nor, addr, len, buf);
2913 
2914         nor->read_opcode = read_opcode;
2915         nor->addr_width = addr_width;
2916         nor->read_dummy = read_dummy;
2917 
2918         return ret;
2919 }
2920 
2921 /**
2922  * spi_nor_spimem_check_op - check if the operation is supported
2923  *                           by controller
2924  *@nor:        pointer to a 'struct spi_nor'
2925  *@op:         pointer to op template to be checked
2926  *
2927  * Returns 0 if operation is supported, -ENOTSUPP otherwise.
2928  */
2929 static int spi_nor_spimem_check_op(struct spi_nor *nor,
2930                                    struct spi_mem_op *op)
2931 {
2932         /*
2933          * First test with 4 address bytes. The opcode itself might
2934          * be a 3B addressing opcode but we don't care, because
2935          * SPI controller implementation should not check the opcode,
2936          * but just the sequence.
2937          */
2938         op->addr.nbytes = 4;
2939         if (!spi_mem_supports_op(nor->spimem, op)) {
2940                 if (nor->mtd.size > SZ_16M)
2941                         return -ENOTSUPP;
2942 
2943                 /* If flash size <= 16MB, 3 address bytes are sufficient */
2944                 op->addr.nbytes = 3;
2945                 if (!spi_mem_supports_op(nor->spimem, op))
2946                         return -ENOTSUPP;
2947         }
2948 
2949         return 0;
2950 }
2951 
2952 /**
2953  * spi_nor_spimem_check_readop - check if the read op is supported
2954  *                               by controller
2955  *@nor:         pointer to a 'struct spi_nor'
2956  *@read:        pointer to op template to be checked
2957  *
2958  * Returns 0 if operation is supported, -ENOTSUPP otherwise.
2959  */
2960 static int spi_nor_spimem_check_readop(struct spi_nor *nor,
2961                                        const struct spi_nor_read_command *read)
2962 {
2963         struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(read->opcode, 1),
2964                                           SPI_MEM_OP_ADDR(3, 0, 1),
2965                                           SPI_MEM_OP_DUMMY(0, 1),
2966                                           SPI_MEM_OP_DATA_IN(0, NULL, 1));
2967 
2968         op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(read->proto);
2969         op.addr.buswidth = spi_nor_get_protocol_addr_nbits(read->proto);
2970         op.data.buswidth = spi_nor_get_protocol_data_nbits(read->proto);
2971         op.dummy.buswidth = op.addr.buswidth;
2972         op.dummy.nbytes = (read->num_mode_clocks + read->num_wait_states) *
2973                           op.dummy.buswidth / 8;
2974 
2975         return spi_nor_spimem_check_op(nor, &op);
2976 }
2977 
2978 /**
2979  * spi_nor_spimem_check_pp - check if the page program op is supported
2980  *                           by controller
2981  *@nor:         pointer to a 'struct spi_nor'
2982  *@pp:          pointer to op template to be checked
2983  *
2984  * Returns 0 if operation is supported, -ENOTSUPP otherwise.
2985  */
2986 static int spi_nor_spimem_check_pp(struct spi_nor *nor,
2987                                    const struct spi_nor_pp_command *pp)
2988 {
2989         struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(pp->opcode, 1),
2990                                           SPI_MEM_OP_ADDR(3, 0, 1),
2991                                           SPI_MEM_OP_NO_DUMMY,
2992                                           SPI_MEM_OP_DATA_OUT(0, NULL, 1));
2993 
2994         op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(pp->proto);
2995         op.addr.buswidth = spi_nor_get_protocol_addr_nbits(pp->proto);
2996         op.data.buswidth = spi_nor_get_protocol_data_nbits(pp->proto);
2997 
2998         return spi_nor_spimem_check_op(nor, &op);
2999 }
3000 
3001 /**
3002  * spi_nor_spimem_adjust_hwcaps - Find optimal Read/Write protocol
3003  *                                based on SPI controller capabilities
3004  * @nor:        pointer to a 'struct spi_nor'
3005  * @hwcaps:     pointer to resulting capabilities after adjusting
3006  *              according to controller and flash's capability
3007  */
3008 static void
3009 spi_nor_spimem_adjust_hwcaps(struct spi_nor *nor, u32 *hwcaps)
3010 {
3011         struct spi_nor_flash_parameter *params =  &nor->params;
3012         unsigned int cap;
3013 
3014         /* DTR modes are not supported yet, mask them all. */
3015         *hwcaps &= ~SNOR_HWCAPS_DTR;
3016 
3017         /* X-X-X modes are not supported yet, mask them all. */
3018         *hwcaps &= ~SNOR_HWCAPS_X_X_X;
3019 
3020         for (cap = 0; cap < sizeof(*hwcaps) * BITS_PER_BYTE; cap++) {
3021                 int rdidx, ppidx;
3022 
3023                 if (!(*hwcaps & BIT(cap)))
3024                         continue;
3025 
3026                 rdidx = spi_nor_hwcaps_read2cmd(BIT(cap));
3027                 if (rdidx >= 0 &&
3028                     spi_nor_spimem_check_readop(nor, &params->reads[rdidx]))
3029                         *hwcaps &= ~BIT(cap);
3030 
3031                 ppidx = spi_nor_hwcaps_pp2cmd(BIT(cap));
3032                 if (ppidx < 0)
3033                         continue;
3034 
3035                 if (spi_nor_spimem_check_pp(nor,
3036                                             &params->page_programs[ppidx]))
3037                         *hwcaps &= ~BIT(cap);
3038         }
3039 }
3040 
3041 /**
3042  * spi_nor_read_sfdp_dma_unsafe() - read Serial Flash Discoverable Parameters.
3043  * @nor:        pointer to a 'struct spi_nor'
3044  * @addr:       offset in the SFDP area to start reading data from
3045  * @len:        number of bytes to read
3046  * @buf:        buffer where the SFDP data are copied into
3047  *
3048  * Wrap spi_nor_read_sfdp() using a kmalloc'ed bounce buffer as @buf is now not
3049  * guaranteed to be dma-safe.
3050  *
3051  * Return: -ENOMEM if kmalloc() fails, the return code of spi_nor_read_sfdp()
3052  *          otherwise.
3053  */
3054 static int spi_nor_read_sfdp_dma_unsafe(struct spi_nor *nor, u32 addr,
3055                                         size_t len, void *buf)
3056 {
3057         void *dma_safe_buf;
3058         int ret;
3059 
3060         dma_safe_buf = kmalloc(len, GFP_KERNEL);
3061         if (!dma_safe_buf)
3062                 return -ENOMEM;
3063 
3064         ret = spi_nor_read_sfdp(nor, addr, len, dma_safe_buf);
3065         memcpy(buf, dma_safe_buf, len);
3066         kfree(dma_safe_buf);
3067 
3068         return ret;
3069 }
3070 
3071 /* Fast Read settings. */
3072 
3073 static void
3074 spi_nor_set_read_settings_from_bfpt(struct spi_nor_read_command *read,
3075                                     u16 half,
3076                                     enum spi_nor_protocol proto)
3077 {
3078         read->num_mode_clocks = (half >> 5) & 0x07;
3079         read->num_wait_states = (half >> 0) & 0x1f;
3080         read->opcode = (half >> 8) & 0xff;
3081         read->proto = proto;
3082 }
3083 
3084 struct sfdp_bfpt_read {
3085         /* The Fast Read x-y-z hardware capability in params->hwcaps.mask. */
3086         u32                     hwcaps;
3087 
3088         /*
3089          * The <supported_bit> bit in <supported_dword> BFPT DWORD tells us
3090          * whether the Fast Read x-y-z command is supported.
3091          */
3092         u32                     supported_dword;
3093         u32                     supported_bit;
3094 
3095         /*
3096          * The half-word at offset <setting_shift> in <setting_dword> BFPT DWORD
3097          * encodes the op code, the number of mode clocks and the number of wait
3098          * states to be used by Fast Read x-y-z command.
3099          */
3100         u32                     settings_dword;
3101         u32                     settings_shift;
3102 
3103         /* The SPI protocol for this Fast Read x-y-z command. */
3104         enum spi_nor_protocol   proto;
3105 };
3106 
3107 static const struct sfdp_bfpt_read sfdp_bfpt_reads[] = {
3108         /* Fast Read 1-1-2 */
3109         {
3110                 SNOR_HWCAPS_READ_1_1_2,
3111                 BFPT_DWORD(1), BIT(16), /* Supported bit */
3112                 BFPT_DWORD(4), 0,       /* Settings */
3113                 SNOR_PROTO_1_1_2,
3114         },
3115 
3116         /* Fast Read 1-2-2 */
3117         {
3118                 SNOR_HWCAPS_READ_1_2_2,
3119                 BFPT_DWORD(1), BIT(20), /* Supported bit */
3120                 BFPT_DWORD(4), 16,      /* Settings */
3121                 SNOR_PROTO_1_2_2,
3122         },
3123 
3124         /* Fast Read 2-2-2 */
3125         {
3126                 SNOR_HWCAPS_READ_2_2_2,
3127                 BFPT_DWORD(5),  BIT(0), /* Supported bit */
3128                 BFPT_DWORD(6), 16,      /* Settings */
3129                 SNOR_PROTO_2_2_2,
3130         },
3131 
3132         /* Fast Read 1-1-4 */
3133         {
3134                 SNOR_HWCAPS_READ_1_1_4,
3135                 BFPT_DWORD(1), BIT(22), /* Supported bit */
3136                 BFPT_DWORD(3), 16,      /* Settings */
3137                 SNOR_PROTO_1_1_4,
3138         },
3139 
3140         /* Fast Read 1-4-4 */
3141         {
3142                 SNOR_HWCAPS_READ_1_4_4,
3143                 BFPT_DWORD(1), BIT(21), /* Supported bit */
3144                 BFPT_DWORD(3), 0,       /* Settings */
3145                 SNOR_PROTO_1_4_4,
3146         },
3147 
3148         /* Fast Read 4-4-4 */
3149         {
3150                 SNOR_HWCAPS_READ_4_4_4,
3151                 BFPT_DWORD(5), BIT(4),  /* Supported bit */
3152                 BFPT_DWORD(7), 16,      /* Settings */
3153                 SNOR_PROTO_4_4_4,
3154         },
3155 };
3156 
3157 struct sfdp_bfpt_erase {
3158         /*
3159          * The half-word at offset <shift> in DWORD <dwoard> encodes the
3160          * op code and erase sector size to be used by Sector Erase commands.
3161          */
3162         u32                     dword;
3163         u32                     shift;
3164 };
3165 
3166 static const struct sfdp_bfpt_erase sfdp_bfpt_erases[] = {
3167         /* Erase Type 1 in DWORD8 bits[15:0] */
3168         {BFPT_DWORD(8), 0},
3169 
3170         /* Erase Type 2 in DWORD8 bits[31:16] */
3171         {BFPT_DWORD(8), 16},
3172 
3173         /* Erase Type 3 in DWORD9 bits[15:0] */
3174         {BFPT_DWORD(9), 0},
3175 
3176         /* Erase Type 4 in DWORD9 bits[31:16] */
3177         {BFPT_DWORD(9), 16},
3178 };
3179 
3180 /**
3181  * spi_nor_set_erase_type() - set a SPI NOR erase type
3182  * @erase:      pointer to a structure that describes a SPI NOR erase type
3183  * @size:       the size of the sector/block erased by the erase type
3184  * @opcode:     the SPI command op code to erase the sector/block
3185  */
3186 static void spi_nor_set_erase_type(struct spi_nor_erase_type *erase,
3187                                    u32 size, u8 opcode)
3188 {
3189         erase->size = size;
3190         erase->opcode = opcode;
3191         /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
3192         erase->size_shift = ffs(erase->size) - 1;
3193         erase->size_mask = (1 << erase->size_shift) - 1;
3194 }
3195 
3196 /**
3197  * spi_nor_set_erase_settings_from_bfpt() - set erase type settings from BFPT
3198  * @erase:      pointer to a structure that describes a SPI NOR erase type
3199  * @size:       the size of the sector/block erased by the erase type
3200  * @opcode:     the SPI command op code to erase the sector/block
3201  * @i:          erase type index as sorted in the Basic Flash Parameter Table
3202  *
3203  * The supported Erase Types will be sorted at init in ascending order, with
3204  * the smallest Erase Type size being the first member in the erase_type array
3205  * of the spi_nor_erase_map structure. Save the Erase Type index as sorted in
3206  * the Basic Flash Parameter Table since it will be used later on to
3207  * synchronize with the supported Erase Types defined in SFDP optional tables.
3208  */
3209 static void
3210 spi_nor_set_erase_settings_from_bfpt(struct spi_nor_erase_type *erase,
3211                                      u32 size, u8 opcode, u8 i)
3212 {
3213         erase->idx = i;
3214         spi_nor_set_erase_type(erase, size, opcode);
3215 }
3216 
3217 /**
3218  * spi_nor_map_cmp_erase_type() - compare the map's erase types by size
3219  * @l:  member in the left half of the map's erase_type array
3220  * @r:  member in the right half of the map's erase_type array
3221  *
3222  * Comparison function used in the sort() call to sort in ascending order the
3223  * map's erase types, the smallest erase type size being the first member in the
3224  * sorted erase_type array.
3225  *
3226  * Return: the result of @l->size - @r->size
3227  */
3228 static int spi_nor_map_cmp_erase_type(const void *l, const void *r)
3229 {
3230         const struct spi_nor_erase_type *left = l, *right = r;
3231 
3232         return left->size - right->size;
3233 }
3234 
3235 /**
3236  * spi_nor_sort_erase_mask() - sort erase mask
3237  * @map:        the erase map of the SPI NOR
3238  * @erase_mask: the erase type mask to be sorted
3239  *
3240  * Replicate the sort done for the map's erase types in BFPT: sort the erase
3241  * mask in ascending order with the smallest erase type size starting from
3242  * BIT(0) in the sorted erase mask.
3243  *
3244  * Return: sorted erase mask.
3245  */
3246 static u8 spi_nor_sort_erase_mask(struct spi_nor_erase_map *map, u8 erase_mask)
3247 {
3248         struct spi_nor_erase_type *erase_type = map->erase_type;
3249         int i;
3250         u8 sorted_erase_mask = 0;
3251 
3252         if (!erase_mask)
3253                 return 0;
3254 
3255         /* Replicate the sort done for the map's erase types. */
3256         for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
3257                 if (erase_type[i].size && erase_mask & BIT(erase_type[i].idx))
3258                         sorted_erase_mask |= BIT(i);
3259 
3260         return sorted_erase_mask;
3261 }
3262 
3263 /**
3264  * spi_nor_regions_sort_erase_types() - sort erase types in each region
3265  * @map:        the erase map of the SPI NOR
3266  *
3267  * Function assumes that the erase types defined in the erase map are already
3268  * sorted in ascending order, with the smallest erase type size being the first
3269  * member in the erase_type array. It replicates the sort done for the map's
3270  * erase types. Each region's erase bitmask will indicate which erase types are
3271  * supported from the sorted erase types defined in the erase map.
3272  * Sort the all region's erase type at init in order to speed up the process of
3273  * finding the best erase command at runtime.
3274  */
3275 static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map)
3276 {
3277         struct spi_nor_erase_region *region = map->regions;
3278         u8 region_erase_mask, sorted_erase_mask;
3279 
3280         while (region) {
3281                 region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
3282 
3283                 sorted_erase_mask = spi_nor_sort_erase_mask(map,
3284                                                             region_erase_mask);
3285 
3286                 /* Overwrite erase mask. */
3287                 region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) |
3288                                  sorted_erase_mask;
3289 
3290                 region = spi_nor_region_next(region);
3291         }
3292 }
3293 
3294 /**
3295  * spi_nor_init_uniform_erase_map() - Initialize uniform erase map
3296  * @map:                the erase map of the SPI NOR
3297  * @erase_mask:         bitmask encoding erase types that can erase the entire
3298  *                      flash memory
3299  * @flash_size:         the spi nor flash memory size
3300  */
3301 static void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
3302                                            u8 erase_mask, u64 flash_size)
3303 {
3304         /* Offset 0 with erase_mask and SNOR_LAST_REGION bit set */
3305         map->uniform_region.offset = (erase_mask & SNOR_ERASE_TYPE_MASK) |
3306                                      SNOR_LAST_REGION;
3307         map->uniform_region.size = flash_size;
3308         map->regions = &map->uniform_region;
3309         map->uniform_erase_type = erase_mask;
3310 }
3311 
3312 static int
3313 spi_nor_post_bfpt_fixups(struct spi_nor *nor,
3314                          const struct sfdp_parameter_header *bfpt_header,
3315                          const struct sfdp_bfpt *bfpt,
3316                          struct spi_nor_flash_parameter *params)
3317 {
3318         if (nor->info->fixups && nor->info->fixups->post_bfpt)
3319                 return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt,
3320                                                     params);
3321 
3322         return 0;
3323 }
3324 
3325 /**
3326  * spi_nor_parse_bfpt() - read and parse the Basic Flash Parameter Table.
3327  * @nor:                pointer to a 'struct spi_nor'
3328  * @bfpt_header:        pointer to the 'struct sfdp_parameter_header' describing
3329  *                      the Basic Flash Parameter Table length and version
3330  * @params:             pointer to the 'struct spi_nor_flash_parameter' to be
3331  *                      filled
3332  *
3333  * The Basic Flash Parameter Table is the main and only mandatory table as
3334  * defined by the SFDP (JESD216) specification.
3335  * It provides us with the total size (memory density) of the data array and
3336  * the number of address bytes for Fast Read, Page Program and Sector Erase
3337  * commands.
3338  * For Fast READ commands, it also gives the number of mode clock cycles and
3339  * wait states (regrouped in the number of dummy clock cycles) for each
3340  * supported instruction op code.
3341  * For Page Program, the page size is now available since JESD216 rev A, however
3342  * the supported instruction op codes are still not provided.
3343  * For Sector Erase commands, this table stores the supported instruction op
3344  * codes and the associated sector sizes.
3345  * Finally, the Quad Enable Requirements (QER) are also available since JESD216
3346  * rev A. The QER bits encode the manufacturer dependent procedure to be
3347  * executed to set the Quad Enable (QE) bit in some internal register of the
3348  * Quad SPI memory. Indeed the QE bit, when it exists, must be set before
3349  * sending any Quad SPI command to the memory. Actually, setting the QE bit
3350  * tells the memory to reassign its WP# and HOLD#/RESET# pins to functions IO2
3351  * and IO3 hence enabling 4 (Quad) I/O lines.
3352  *
3353  * Return: 0 on success, -errno otherwise.
3354  */
3355 static int spi_nor_parse_bfpt(struct spi_nor *nor,
3356                               const struct sfdp_parameter_header *bfpt_header,
3357                               struct spi_nor_flash_parameter *params)
3358 {
3359         struct spi_nor_erase_map *map = &params->erase_map;
3360         struct spi_nor_erase_type *erase_type = map->erase_type;
3361         struct sfdp_bfpt bfpt;
3362         size_t len;
3363         int i, cmd, err;
3364         u32 addr;
3365         u16 half;
3366         u8 erase_mask;
3367 
3368         /* JESD216 Basic Flash Parameter Table length is at least 9 DWORDs. */
3369         if (bfpt_header->length < BFPT_DWORD_MAX_JESD216)
3370                 return -EINVAL;
3371 
3372         /* Read the Basic Flash Parameter Table. */
3373         len = min_t(size_t, sizeof(bfpt),
3374                     bfpt_header->length * sizeof(u32));
3375         addr = SFDP_PARAM_HEADER_PTP(bfpt_header);
3376         memset(&bfpt, 0, sizeof(bfpt));
3377         err = spi_nor_read_sfdp_dma_unsafe(nor,  addr, len, &bfpt);
3378         if (err < 0)
3379                 return err;
3380 
3381         /* Fix endianness of the BFPT DWORDs. */
3382         for (i = 0; i < BFPT_DWORD_MAX; i++)
3383                 bfpt.dwords[i] = le32_to_cpu(bfpt.dwords[i]);
3384 
3385         /* Number of address bytes. */
3386         switch (bfpt.dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) {
3387         case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY:
3388                 nor->addr_width = 3;
3389                 break;
3390 
3391         case BFPT_DWORD1_ADDRESS_BYTES_4_ONLY:
3392                 nor->addr_width = 4;
3393                 break;
3394 
3395         default:
3396                 break;
3397         }
3398 
3399         /* Flash Memory Density (in bits). */
3400         params->size = bfpt.dwords[BFPT_DWORD(2)];
3401         if (params->size & BIT(31)) {
3402                 params->size &= ~BIT(31);
3403 
3404                 /*
3405                  * Prevent overflows on params->size. Anyway, a NOR of 2^64
3406                  * bits is unlikely to exist so this error probably means
3407                  * the BFPT we are reading is corrupted/wrong.
3408                  */
3409                 if (params->size > 63)
3410                         return -EINVAL;
3411 
3412                 params->size = 1ULL << params->size;
3413         } else {
3414                 params->size++;
3415         }
3416         params->size >>= 3; /* Convert to bytes. */
3417 
3418         /* Fast Read settings. */
3419         for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_reads); i++) {
3420                 const struct sfdp_bfpt_read *rd = &sfdp_bfpt_reads[i];
3421                 struct spi_nor_read_command *read;
3422 
3423                 if (!(bfpt.dwords[rd->supported_dword] & rd->supported_bit)) {
3424                         params->hwcaps.mask &= ~rd->hwcaps;
3425                         continue;
3426                 }
3427 
3428                 params->hwcaps.mask |= rd->hwcaps;
3429                 cmd = spi_nor_hwcaps_read2cmd(rd->hwcaps);
3430                 read = &params->reads[cmd];
3431                 half = bfpt.dwords[rd->settings_dword] >> rd->settings_shift;
3432                 spi_nor_set_read_settings_from_bfpt(read, half, rd->proto);
3433         }
3434 
3435         /*
3436          * Sector Erase settings. Reinitialize the uniform erase map using the
3437          * Erase Types defined in the bfpt table.
3438          */
3439         erase_mask = 0;
3440         memset(&params->erase_map, 0, sizeof(params->erase_map));
3441         for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_erases); i++) {
3442                 const struct sfdp_bfpt_erase *er = &sfdp_bfpt_erases[i];
3443                 u32 erasesize;
3444                 u8 opcode;
3445 
3446                 half = bfpt.dwords[er->dword] >> er->shift;
3447                 erasesize = half & 0xff;
3448 
3449                 /* erasesize == 0 means this Erase Type is not supported. */
3450                 if (!erasesize)
3451                         continue;
3452 
3453                 erasesize = 1U << erasesize;
3454                 opcode = (half >> 8) & 0xff;
3455                 erase_mask |= BIT(i);
3456                 spi_nor_set_erase_settings_from_bfpt(&erase_type[i], erasesize,
3457                                                      opcode, i);
3458         }
3459         spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
3460         /*
3461          * Sort all the map's Erase Types in ascending order with the smallest
3462          * erase size being the first member in the erase_type array.
3463          */
3464         sort(erase_type, SNOR_ERASE_TYPE_MAX, sizeof(erase_type[0]),
3465              spi_nor_map_cmp_erase_type, NULL);
3466         /*
3467          * Sort the erase types in the uniform region in order to update the
3468          * uniform_erase_type bitmask. The bitmask will be used later on when
3469          * selecting the uniform erase.
3470          */
3471         spi_nor_regions_sort_erase_types(map);
3472         map->uniform_erase_type = map->uniform_region.offset &
3473                                   SNOR_ERASE_TYPE_MASK;
3474 
3475         /* Stop here if not JESD216 rev A or later. */
3476         if (bfpt_header->length < BFPT_DWORD_MAX)
3477                 return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt,
3478                                                 params);
3479 
3480         /* Page size: this field specifies 'N' so the page size = 2^N bytes. */
3481         params->page_size = bfpt.dwords[BFPT_DWORD(11)];
3482         params->page_size &= BFPT_DWORD11_PAGE_SIZE_MASK;
3483         params->page_size >>= BFPT_DWORD11_PAGE_SIZE_SHIFT;
3484         params->page_size = 1U << params->page_size;
3485 
3486         /* Quad Enable Requirements. */
3487         switch (bfpt.dwords[BFPT_DWORD(15)] & BFPT_DWORD15_QER_MASK) {
3488         case BFPT_DWORD15_QER_NONE:
3489                 params->quad_enable = NULL;
3490                 break;
3491 
3492         case BFPT_DWORD15_QER_SR2_BIT1_BUGGY:
3493         case BFPT_DWORD15_QER_SR2_BIT1_NO_RD:
3494                 params->quad_enable = spansion_no_read_cr_quad_enable;
3495                 break;
3496 
3497         case BFPT_DWORD15_QER_SR1_BIT6:
3498                 params->quad_enable = macronix_quad_enable;
3499                 break;
3500 
3501         case BFPT_DWORD15_QER_SR2_BIT7:
3502                 params->quad_enable = sr2_bit7_quad_enable;
3503                 break;
3504 
3505         case BFPT_DWORD15_QER_SR2_BIT1:
3506                 params->quad_enable = spansion_read_cr_quad_enable;
3507                 break;
3508 
3509         default:
3510                 return -EINVAL;
3511         }
3512 
3513         return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt, params);
3514 }
3515 
3516 #define SMPT_CMD_ADDRESS_LEN_MASK               GENMASK(23, 22)
3517 #define SMPT_CMD_ADDRESS_LEN_0                  (0x0UL << 22)
3518 #define SMPT_CMD_ADDRESS_LEN_3                  (0x1UL << 22)
3519 #define SMPT_CMD_ADDRESS_LEN_4                  (0x2UL << 22)
3520 #define SMPT_CMD_ADDRESS_LEN_USE_CURRENT        (0x3UL << 22)
3521 
3522 #define SMPT_CMD_READ_DUMMY_MASK                GENMASK(19, 16)
3523 #define SMPT_CMD_READ_DUMMY_SHIFT               16
3524 #define SMPT_CMD_READ_DUMMY(_cmd) \
3525         (((_cmd) & SMPT_CMD_READ_DUMMY_MASK) >> SMPT_CMD_READ_DUMMY_SHIFT)
3526 #define SMPT_CMD_READ_DUMMY_IS_VARIABLE         0xfUL
3527 
3528 #define SMPT_CMD_READ_DATA_MASK                 GENMASK(31, 24)
3529 #define SMPT_CMD_READ_DATA_SHIFT                24
3530 #define SMPT_CMD_READ_DATA(_cmd) \
3531         (((_cmd) & SMPT_CMD_READ_DATA_MASK) >> SMPT_CMD_READ_DATA_SHIFT)
3532 
3533 #define SMPT_CMD_OPCODE_MASK                    GENMASK(15, 8)
3534 #define SMPT_CMD_OPCODE_SHIFT                   8
3535 #define SMPT_CMD_OPCODE(_cmd) \
3536         (((_cmd) & SMPT_CMD_OPCODE_MASK) >> SMPT_CMD_OPCODE_SHIFT)
3537 
3538 #define SMPT_MAP_REGION_COUNT_MASK              GENMASK(23, 16)
3539 #define SMPT_MAP_REGION_COUNT_SHIFT             16
3540 #define SMPT_MAP_REGION_COUNT(_header) \
3541         ((((_header) & SMPT_MAP_REGION_COUNT_MASK) >> \
3542           SMPT_MAP_REGION_COUNT_SHIFT) + 1)
3543 
3544 #define SMPT_MAP_ID_MASK                        GENMASK(15, 8)
3545 #define SMPT_MAP_ID_SHIFT                       8
3546 #define SMPT_MAP_ID(_header) \
3547         (((_header) & SMPT_MAP_ID_MASK) >> SMPT_MAP_ID_SHIFT)
3548 
3549 #define SMPT_MAP_REGION_SIZE_MASK               GENMASK(31, 8)
3550 #define SMPT_MAP_REGION_SIZE_SHIFT              8
3551 #define SMPT_MAP_REGION_SIZE(_region) \
3552         (((((_region) & SMPT_MAP_REGION_SIZE_MASK) >> \
3553            SMPT_MAP_REGION_SIZE_SHIFT) + 1) * 256)
3554 
3555 #define SMPT_MAP_REGION_ERASE_TYPE_MASK         GENMASK(3, 0)
3556 #define SMPT_MAP_REGION_ERASE_TYPE(_region) \
3557         ((_region) & SMPT_MAP_REGION_ERASE_TYPE_MASK)
3558 
3559 #define SMPT_DESC_TYPE_MAP                      BIT(1)
3560 #define SMPT_DESC_END                           BIT(0)
3561 
3562 /**
3563  * spi_nor_smpt_addr_width() - return the address width used in the
3564  *                             configuration detection command.
3565  * @nor:        pointer to a 'struct spi_nor'
3566  * @settings:   configuration detection command descriptor, dword1
3567  */
3568 static u8 spi_nor_smpt_addr_width(const struct spi_nor *nor, const u32 settings)
3569 {
3570         switch (settings & SMPT_CMD_ADDRESS_LEN_MASK) {
3571         case SMPT_CMD_ADDRESS_LEN_0:
3572                 return 0;
3573         case SMPT_CMD_ADDRESS_LEN_3:
3574                 return 3;
3575         case SMPT_CMD_ADDRESS_LEN_4:
3576                 return 4;
3577         case SMPT_CMD_ADDRESS_LEN_USE_CURRENT:
3578                 /* fall through */
3579         default:
3580                 return nor->addr_width;
3581         }
3582 }
3583 
3584 /**
3585  * spi_nor_smpt_read_dummy() - return the configuration detection command read
3586  *                             latency, in clock cycles.
3587  * @nor:        pointer to a 'struct spi_nor'
3588  * @settings:   configuration detection command descriptor, dword1
3589  *
3590  * Return: the number of dummy cycles for an SMPT read
3591  */
3592 static u8 spi_nor_smpt_read_dummy(const struct spi_nor *nor, const u32 settings)
3593 {
3594         u8 read_dummy = SMPT_CMD_READ_DUMMY(settings);
3595 
3596         if (read_dummy == SMPT_CMD_READ_DUMMY_IS_VARIABLE)
3597                 return nor->read_dummy;
3598         return read_dummy;
3599 }
3600 
3601 /**
3602  * spi_nor_get_map_in_use() - get the configuration map in use
3603  * @nor:        pointer to a 'struct spi_nor'
3604  * @smpt:       pointer to the sector map parameter table
3605  * @smpt_len:   sector map parameter table length
3606  *
3607  * Return: pointer to the map in use, ERR_PTR(-errno) otherwise.
3608  */
3609 static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
3610                                          u8 smpt_len)
3611 {
3612         const u32 *ret;
3613         u8 *buf;
3614         u32 addr;
3615         int err;
3616         u8 i;
3617         u8 addr_width, read_opcode, read_dummy;
3618         u8 read_data_mask, map_id;
3619 
3620         /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
3621         buf = kmalloc(sizeof(*buf), GFP_KERNEL);
3622         if (!buf)
3623                 return ERR_PTR(-ENOMEM);
3624 
3625         addr_width = nor->addr_width;
3626         read_dummy = nor->read_dummy;
3627         read_opcode = nor->read_opcode;
3628 
3629         map_id = 0;
3630         /* Determine if there are any optional Detection Command Descriptors */
3631         for (i = 0; i < smpt_len; i += 2) {
3632                 if (smpt[i] & SMPT_DESC_TYPE_MAP)
3633                         break;
3634 
3635                 read_data_mask = SMPT_CMD_READ_DATA(smpt[i]);
3636                 nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]);
3637                 nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]);
3638                 nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]);
3639                 addr = smpt[i + 1];
3640 
3641                 err = spi_nor_read_raw(nor, addr, 1, buf);
3642                 if (err) {
3643                         ret = ERR_PTR(err);
3644                         goto out;
3645                 }
3646 
3647                 /*
3648                  * Build an index value that is used to select the Sector Map
3649                  * Configuration that is currently in use.
3650                  */
3651                 map_id = map_id << 1 | !!(*buf & read_data_mask);
3652         }
3653 
3654         /*
3655          * If command descriptors are provided, they always precede map
3656          * descriptors in the table. There is no need to start the iteration
3657          * over smpt array all over again.
3658          *
3659          * Find the matching configuration map.
3660          */
3661         ret = ERR_PTR(-EINVAL);
3662         while (i < smpt_len) {
3663                 if (SMPT_MAP_ID(smpt[i]) == map_id) {
3664                         ret = smpt + i;
3665                         break;
3666                 }
3667 
3668                 /*
3669                  * If there are no more configuration map descriptors and no
3670                  * configuration ID matched the configuration identifier, the
3671                  * sector address map is unknown.
3672                  */
3673                 if (smpt[i] & SMPT_DESC_END)
3674                         break;
3675 
3676                 /* increment the table index to the next map */
3677                 i += SMPT_MAP_REGION_COUNT(smpt[i]) + 1;
3678         }
3679 
3680         /* fall through */
3681 out:
3682         kfree(buf);
3683         nor->addr_width = addr_width;
3684         nor->read_dummy = read_dummy;
3685         nor->read_opcode = read_opcode;
3686         return ret;
3687 }
3688 
3689 /**
3690  * spi_nor_region_check_overlay() - set overlay bit when the region is overlaid
3691  * @region:     pointer to a structure that describes a SPI NOR erase region
3692  * @erase:      pointer to a structure that describes a SPI NOR erase type
3693  * @erase_type: erase type bitmask
3694  */
3695 static void
3696 spi_nor_region_check_overlay(struct spi_nor_erase_region *region,
3697                              const struct spi_nor_erase_type *erase,
3698                              const u8 erase_type)
3699 {
3700         int i;
3701 
3702         for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
3703                 if (!(erase_type & BIT(i)))
3704                         continue;
3705                 if (region->size & erase[i].size_mask) {
3706                         spi_nor_region_mark_overlay(region);
3707                         return;
3708                 }
3709         }
3710 }
3711 
3712 /**
3713  * spi_nor_init_non_uniform_erase_map() - initialize the non-uniform erase map
3714  * @nor:        pointer to a 'struct spi_nor'
3715  * @params:     pointer to a duplicate 'struct spi_nor_flash_parameter' that is
3716  *              used for storing SFDP parsed data
3717  * @smpt:       pointer to the sector map parameter table
3718  *
3719  * Return: 0 on success, -errno otherwise.
3720  */
3721 static int
3722 spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
3723                                    struct spi_nor_flash_parameter *params,
3724                                    const u32 *smpt)
3725 {
3726         struct spi_nor_erase_map *map = &params->erase_map;
3727         struct spi_nor_erase_type *erase = map->erase_type;
3728         struct spi_nor_erase_region *region;
3729         u64 offset;
3730         u32 region_count;
3731         int i, j;
3732         u8 uniform_erase_type, save_uniform_erase_type;
3733         u8 erase_type, regions_erase_type;
3734 
3735         region_count = SMPT_MAP_REGION_COUNT(*smpt);
3736         /*
3737          * The regions will be freed when the driver detaches from the
3738          * device.
3739          */
3740         region = devm_kcalloc(nor->dev, region_count, sizeof(*region),
3741                               GFP_KERNEL);
3742         if (!region)
3743                 return -ENOMEM;
3744         map->regions = region;
3745 
3746         uniform_erase_type = 0xff;
3747         regions_erase_type = 0;
3748         offset = 0;
3749         /* Populate regions. */
3750         for (i = 0; i < region_count; i++) {
3751                 j = i + 1; /* index for the region dword */
3752                 region[i].size = SMPT_MAP_REGION_SIZE(smpt[j]);
3753                 erase_type = SMPT_MAP_REGION_ERASE_TYPE(smpt[j]);
3754                 region[i].offset = offset | erase_type;
3755 
3756                 spi_nor_region_check_overlay(&region[i], erase, erase_type);
3757 
3758                 /*
3759                  * Save the erase types that are supported in all regions and
3760                  * can erase the entire flash memory.
3761                  */
3762                 uniform_erase_type &= erase_type;
3763 
3764                 /*
3765                  * regions_erase_type mask will indicate all the erase types
3766                  * supported in this configuration map.
3767                  */
3768                 regions_erase_type |= erase_type;
3769 
3770                 offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) +
3771                          region[i].size;
3772         }
3773 
3774         save_uniform_erase_type = map->uniform_erase_type;
3775         map->uniform_erase_type = spi_nor_sort_erase_mask(map,
3776                                                           uniform_erase_type);
3777 
3778         if (!regions_erase_type) {
3779                 /*
3780                  * Roll back to the previous uniform_erase_type mask, SMPT is
3781                  * broken.
3782                  */
3783                 map->uniform_erase_type = save_uniform_erase_type;
3784                 return -EINVAL;
3785         }
3786 
3787         /*
3788          * BFPT advertises all the erase types supported by all the possible
3789          * map configurations. Mask out the erase types that are not supported
3790          * by the current map configuration.
3791          */
3792         for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
3793                 if (!(regions_erase_type & BIT(erase[i].idx)))
3794                         spi_nor_set_erase_type(&erase[i], 0, 0xFF);
3795 
3796         spi_nor_region_mark_end(&region[i - 1]);
3797 
3798         return 0;
3799 }
3800 
3801 /**
3802  * spi_nor_parse_smpt() - parse Sector Map Parameter Table
3803  * @nor:                pointer to a 'struct spi_nor'
3804  * @smpt_header:        sector map parameter table header
3805  * @params:             pointer to a duplicate 'struct spi_nor_flash_parameter'
3806  *                      that is used for storing SFDP parsed data
3807  *
3808  * This table is optional, but when available, we parse it to identify the
3809  * location and size of sectors within the main data array of the flash memory
3810  * device and to identify which Erase Types are supported by each sector.
3811  *
3812  * Return: 0 on success, -errno otherwise.
3813  */
3814 static int spi_nor_parse_smpt(struct spi_nor *nor,
3815                               const struct sfdp_parameter_header *smpt_header,
3816                               struct spi_nor_flash_parameter *params)
3817 {
3818         const u32 *sector_map;
3819         u32 *smpt;
3820         size_t len;
3821         u32 addr;
3822         int i, ret;
3823 
3824         /* Read the Sector Map Parameter Table. */
3825         len = smpt_header->length * sizeof(*smpt);
3826         smpt = kmalloc(len, GFP_KERNEL);
3827         if (!smpt)
3828                 return -ENOMEM;
3829 
3830         addr = SFDP_PARAM_HEADER_PTP(smpt_header);
3831         ret = spi_nor_read_sfdp(nor, addr, len, smpt);
3832         if (ret)
3833                 goto out;
3834 
3835         /* Fix endianness of the SMPT DWORDs. */
3836         for (i = 0; i < smpt_header->length; i++)
3837                 smpt[i] = le32_to_cpu(smpt[i]);
3838 
3839         sector_map = spi_nor_get_map_in_use(nor, smpt, smpt_header->length);
3840         if (IS_ERR(sector_map)) {
3841                 ret = PTR_ERR(sector_map);
3842                 goto out;
3843         }
3844 
3845         ret = spi_nor_init_non_uniform_erase_map(nor, params, sector_map);
3846         if (ret)
3847                 goto out;
3848 
3849         spi_nor_regions_sort_erase_types(&params->erase_map);
3850         /* fall through */
3851 out:
3852         kfree(smpt);
3853         return ret;
3854 }
3855 
3856 #define SFDP_4BAIT_DWORD_MAX    2
3857 
3858 struct sfdp_4bait {
3859         /* The hardware capability. */
3860         u32             hwcaps;
3861 
3862         /*
3863          * The <supported_bit> bit in DWORD1 of the 4BAIT tells us whether
3864          * the associated 4-byte address op code is supported.
3865          */
3866         u32             supported_bit;
3867 };
3868 
3869 /**
3870  * spi_nor_parse_4bait() - parse the 4-Byte Address Instruction Table
3871  * @nor:                pointer to a 'struct spi_nor'.
3872  * @param_header:       pointer to the 'struct sfdp_parameter_header' describing
3873  *                      the 4-Byte Address Instruction Table length and version.
3874  * @params:             pointer to the 'struct spi_nor_flash_parameter' to be.
3875  *
3876  * Return: 0 on success, -errno otherwise.
3877  */
3878 static int spi_nor_parse_4bait(struct spi_nor *nor,
3879                                const struct sfdp_parameter_header *param_header,
3880                                struct spi_nor_flash_parameter *params)
3881 {
3882         static const struct sfdp_4bait reads[] = {
3883                 { SNOR_HWCAPS_READ,             BIT(0) },
3884                 { SNOR_HWCAPS_READ_FAST,        BIT(1) },
3885                 { SNOR_HWCAPS_READ_1_1_2,       BIT(2) },
3886                 { SNOR_HWCAPS_READ_1_2_2,       BIT(3) },
3887                 { SNOR_HWCAPS_READ_1_1_4,       BIT(4) },
3888                 { SNOR_HWCAPS_READ_1_4_4,       BIT(5) },
3889                 { SNOR_HWCAPS_READ_1_1_1_DTR,   BIT(13) },
3890                 { SNOR_HWCAPS_READ_1_2_2_DTR,   BIT(14) },
3891                 { SNOR_HWCAPS_READ_1_4_4_DTR,   BIT(15) },
3892         };
3893         static const struct sfdp_4bait programs[] = {
3894                 { SNOR_HWCAPS_PP,               BIT(6) },
3895                 { SNOR_HWCAPS_PP_1_1_4,         BIT(7) },
3896                 { SNOR_HWCAPS_PP_1_4_4,         BIT(8) },
3897         };
3898         static const struct sfdp_4bait erases[SNOR_ERASE_TYPE_MAX] = {
3899                 { 0u /* not used */,            BIT(9) },
3900                 { 0u /* not used */,            BIT(10) },
3901                 { 0u /* not used */,            BIT(11) },
3902                 { 0u /* not used */,            BIT(12) },
3903         };
3904         struct spi_nor_pp_command *params_pp = params->page_programs;
3905         struct spi_nor_erase_map *map = &params->erase_map;
3906         struct spi_nor_erase_type *erase_type = map->erase_type;
3907         u32 *dwords;
3908         size_t len;
3909         u32 addr, discard_hwcaps, read_hwcaps, pp_hwcaps, erase_mask;
3910         int i, ret;
3911 
3912         if (param_header->major != SFDP_JESD216_MAJOR ||
3913             param_header->length < SFDP_4BAIT_DWORD_MAX)
3914                 return -EINVAL;
3915 
3916         /* Read the 4-byte Address Instruction Table. */
3917         len = sizeof(*dwords) * SFDP_4BAIT_DWORD_MAX;
3918 
3919         /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
3920         dwords = kmalloc(len, GFP_KERNEL);
3921         if (!dwords)
3922                 return -ENOMEM;
3923 
3924         addr = SFDP_PARAM_HEADER_PTP(param_header);
3925         ret = spi_nor_read_sfdp(nor, addr, len, dwords);
3926         if (ret)
3927                 goto out;
3928 
3929         /* Fix endianness of the 4BAIT DWORDs. */
3930         for (i = 0; i < SFDP_4BAIT_DWORD_MAX; i++)
3931                 dwords[i] = le32_to_cpu(dwords[i]);
3932 
3933         /*
3934          * Compute the subset of (Fast) Read commands for which the 4-byte
3935          * version is supported.
3936          */
3937         discard_hwcaps = 0;
3938         read_hwcaps = 0;
3939         for (i = 0; i < ARRAY_SIZE(reads); i++) {
3940                 const struct sfdp_4bait *read = &reads[i];
3941 
3942                 discard_hwcaps |= read->hwcaps;
3943                 if ((params->hwcaps.mask & read->hwcaps) &&
3944                     (dwords[0] & read->supported_bit))
3945                         read_hwcaps |= read->hwcaps;
3946         }
3947 
3948         /*
3949          * Compute the subset of Page Program commands for which the 4-byte
3950          * version is supported.
3951          */
3952         pp_hwcaps = 0;
3953         for (i = 0; i < ARRAY_SIZE(programs); i++) {
3954                 const struct sfdp_4bait *program = &programs[i];
3955 
3956                 /*
3957                  * The 4 Byte Address Instruction (Optional) Table is the only
3958                  * SFDP table that indicates support for Page Program Commands.
3959                  * Bypass the params->hwcaps.mask and consider 4BAIT the biggest
3960                  * authority for specifying Page Program support.
3961                  */
3962                 discard_hwcaps |= program->hwcaps;
3963                 if (dwords[0] & program->supported_bit)
3964                         pp_hwcaps |= program->hwcaps;
3965         }
3966 
3967         /*
3968          * Compute the subset of Sector Erase commands for which the 4-byte
3969          * version is supported.
3970          */
3971         erase_mask = 0;
3972         for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
3973                 const struct sfdp_4bait *erase = &erases[i];
3974 
3975                 if (dwords[0] & erase->supported_bit)
3976                         erase_mask |= BIT(i);
3977         }
3978 
3979         /* Replicate the sort done for the map's erase types in BFPT. */
3980         erase_mask = spi_nor_sort_erase_mask(map, erase_mask);
3981 
3982         /*
3983          * We need at least one 4-byte op code per read, program and erase
3984          * operation; the .read(), .write() and .erase() hooks share the
3985          * nor->addr_width value.
3986          */
3987         if (!read_hwcaps || !pp_hwcaps || !erase_mask)
3988                 goto out;
3989 
3990         /*
3991          * Discard all operations from the 4-byte instruction set which are
3992          * not supported by this memory.
3993          */
3994         params->hwcaps.mask &= ~discard_hwcaps;
3995         params->hwcaps.mask |= (read_hwcaps | pp_hwcaps);
3996 
3997         /* Use the 4-byte address instruction set. */
3998         for (i = 0; i < SNOR_CMD_READ_MAX; i++) {
3999                 struct spi_nor_read_command *read_cmd = &params->reads[i];
4000 
4001                 read_cmd->opcode = spi_nor_convert_3to4_read(read_cmd->opcode);
4002         }
4003 
4004         /* 4BAIT is the only SFDP table that indicates page program support. */
4005         if (pp_hwcaps & SNOR_HWCAPS_PP)
4006                 spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP],
4007                                         SPINOR_OP_PP_4B, SNOR_PROTO_1_1_1);
4008         if (pp_hwcaps & SNOR_HWCAPS_PP_1_1_4)
4009                 spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP_1_1_4],
4010                                         SPINOR_OP_PP_1_1_4_4B,
4011                                         SNOR_PROTO_1_1_4);
4012         if (pp_hwcaps & SNOR_HWCAPS_PP_1_4_4)
4013                 spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP_1_4_4],
4014                                         SPINOR_OP_PP_1_4_4_4B,
4015                                         SNOR_PROTO_1_4_4);
4016 
4017         for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
4018                 if (erase_mask & BIT(i))
4019                         erase_type[i].opcode = (dwords[1] >>
4020                                                 erase_type[i].idx * 8) & 0xFF;
4021                 else
4022                         spi_nor_set_erase_type(&erase_type[i], 0u, 0xFF);
4023         }
4024 
4025         /*
4026          * We set SNOR_F_HAS_4BAIT in order to skip spi_nor_set_4byte_opcodes()
4027          * later because we already did the conversion to 4byte opcodes. Also,
4028          * this latest function implements a legacy quirk for the erase size of
4029          * Spansion memory. However this quirk is no longer needed with new
4030          * SFDP compliant memories.
4031          */
4032         nor->addr_width = 4;
4033         nor->flags |= SNOR_F_4B_OPCODES | SNOR_F_HAS_4BAIT;
4034 
4035         /* fall through */
4036 out:
4037         kfree(dwords);
4038         return ret;
4039 }
4040 
4041 /**
4042  * spi_nor_parse_sfdp() - parse the Serial Flash Discoverable Parameters.
4043  * @nor:                pointer to a 'struct spi_nor'
4044  * @params:             pointer to the 'struct spi_nor_flash_parameter' to be
4045  *                      filled
4046  *
4047  * The Serial Flash Discoverable Parameters are described by the JEDEC JESD216
4048  * specification. This is a standard which tends to supported by almost all
4049  * (Q)SPI memory manufacturers. Those hard-coded tables allow us to learn at
4050  * runtime the main parameters needed to perform basic SPI flash operations such
4051  * as Fast Read, Page Program or Sector Erase commands.
4052  *
4053  * Return: 0 on success, -errno otherwise.
4054  */
4055 static int spi_nor_parse_sfdp(struct spi_nor *nor,
4056                               struct spi_nor_flash_parameter *params)
4057 {
4058         const struct sfdp_parameter_header *param_header, *bfpt_header;
4059         struct sfdp_parameter_header *param_headers = NULL;
4060         struct sfdp_header header;
4061         struct device *dev = nor->dev;
4062         size_t psize;
4063         int i, err;
4064 
4065         /* Get the SFDP header. */
4066         err = spi_nor_read_sfdp_dma_unsafe(nor, 0, sizeof(header), &header);
4067         if (err < 0)
4068                 return err;
4069 
4070         /* Check the SFDP header version. */
4071         if (le32_to_cpu(header.signature) != SFDP_SIGNATURE ||
4072             header.major != SFDP_JESD216_MAJOR)
4073                 return -EINVAL;
4074 
4075         /*
4076          * Verify that the first and only mandatory parameter header is a
4077          * Basic Flash Parameter Table header as specified in JESD216.
4078          */
4079         bfpt_header = &header.bfpt_header;
4080         if (SFDP_PARAM_HEADER_ID(bfpt_header) != SFDP_BFPT_ID ||
4081             bfpt_header->major != SFDP_JESD216_MAJOR)
4082                 return -EINVAL;
4083 
4084         /*
4085          * Allocate memory then read all parameter headers with a single
4086          * Read SFDP command. These parameter headers will actually be parsed
4087          * twice: a first time to get the latest revision of the basic flash
4088          * parameter table, then a second time to handle the supported optional
4089          * tables.
4090          * Hence we read the parameter headers once for all to reduce the
4091          * processing time. Also we use kmalloc() instead of devm_kmalloc()
4092          * because we don't need to keep these parameter headers: the allocated
4093          * memory is always released with kfree() before exiting this function.
4094          */
4095         if (header.nph) {
4096                 psize = header.nph * sizeof(*param_headers);
4097 
4098                 param_headers = kmalloc(psize, GFP_KERNEL);
4099                 if (!param_headers)
4100                         return -ENOMEM;
4101 
4102                 err = spi_nor_read_sfdp(nor, sizeof(header),
4103                                         psize, param_headers);
4104                 if (err < 0) {
4105                         dev_err(dev, "failed to read SFDP parameter headers\n");
4106                         goto exit;
4107                 }
4108         }
4109 
4110         /*
4111          * Check other parameter headers to get the latest revision of
4112          * the basic flash parameter table.
4113          */
4114         for (i = 0; i < header.nph; i++) {
4115                 param_header = &param_headers[i];
4116 
4117                 if (SFDP_PARAM_HEADER_ID(param_header) == SFDP_BFPT_ID &&
4118                     param_header->major == SFDP_JESD216_MAJOR &&
4119                     (param_header->minor > bfpt_header->minor ||
4120                      (param_header->minor == bfpt_header->minor &&
4121                       param_header->length > bfpt_header->length)))
4122                         bfpt_header = param_header;
4123         }
4124 
4125         err = spi_nor_parse_bfpt(nor, bfpt_header, params);
4126         if (err)
4127                 goto exit;
4128 
4129         /* Parse optional parameter tables. */
4130         for (i = 0; i < header.nph; i++) {
4131                 param_header = &param_headers[i];
4132 
4133                 switch (SFDP_PARAM_HEADER_ID(param_header)) {
4134                 case SFDP_SECTOR_MAP_ID:
4135                         err = spi_nor_parse_smpt(nor, param_header, params);
4136                         break;
4137 
4138                 case SFDP_4BAIT_ID:
4139                         err = spi_nor_parse_4bait(nor, param_header, params);
4140                         break;
4141 
4142                 default:
4143                         break;
4144                 }
4145 
4146                 if (err) {
4147                         dev_warn(dev, "Failed to parse optional parameter table: %04x\n",
4148                                  SFDP_PARAM_HEADER_ID(param_header));
4149                         /*
4150                          * Let's not drop all information we extracted so far
4151                          * if optional table parsers fail. In case of failing,
4152                          * each optional parser is responsible to roll back to
4153                          * the previously known spi_nor data.
4154                          */
4155                         err = 0;
4156                 }
4157         }
4158 
4159 exit:
4160         kfree(param_headers);
4161         return err;
4162 }
4163 
4164 static int spi_nor_select_read(struct spi_nor *nor,
4165                                u32 shared_hwcaps)
4166 {
4167         int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1;
4168         const struct spi_nor_read_command *read;
4169 
4170         if (best_match < 0)
4171                 return -EINVAL;
4172 
4173         cmd = spi_nor_hwcaps_read2cmd(BIT(best_match));
4174         if (cmd < 0)
4175                 return -EINVAL;
4176 
4177         read = &nor->params.reads[cmd];
4178         nor->read_opcode = read->opcode;
4179         nor->read_proto = read->proto;
4180 
4181         /*
4182          * In the spi-nor framework, we don't need to make the difference
4183          * between mode clock cycles and wait state clock cycles.
4184          * Indeed, the value of the mode clock cycles is used by a QSPI
4185          * flash memory to know whether it should enter or leave its 0-4-4
4186          * (Continuous Read / XIP) mode.
4187          * eXecution In Place is out of the scope of the mtd sub-system.
4188          * Hence we choose to merge both mode and wait state clock cycles
4189          * into the so called dummy clock cycles.
4190          */
4191         nor->read_dummy = read->num_mode_clocks + read->num_wait_states;
4192         return 0;
4193 }
4194 
4195 static int spi_nor_select_pp(struct spi_nor *nor,
4196                              u32 shared_hwcaps)
4197 {
4198         int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1;
4199         const struct spi_nor_pp_command *pp;
4200 
4201         if (best_match < 0)
4202                 return -EINVAL;
4203 
4204         cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match));
4205         if (cmd < 0)
4206                 return -EINVAL;
4207 
4208         pp = &nor->params.page_programs[cmd];
4209         nor->program_opcode = pp->opcode;
4210         nor->write_proto = pp->proto;
4211         return 0;
4212 }
4213 
4214 /**
4215  * spi_nor_select_uniform_erase() - select optimum uniform erase type
4216  * @map:                the erase map of the SPI NOR
4217  * @wanted_size:        the erase type size to search for. Contains the value of
4218  *                      info->sector_size or of the "small sector" size in case
4219  *                      CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is defined.
4220  *
4221  * Once the optimum uniform sector erase command is found, disable all the
4222  * other.
4223  *
4224  * Return: pointer to erase type on success, NULL otherwise.
4225  */
4226 static const struct spi_nor_erase_type *
4227 spi_nor_select_uniform_erase(struct spi_nor_erase_map *map,
4228                              const u32 wanted_size)
4229 {
4230         const struct spi_nor_erase_type *tested_erase, *erase = NULL;
4231         int i;
4232         u8 uniform_erase_type = map->uniform_erase_type;
4233 
4234         for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
4235                 if (!(uniform_erase_type & BIT(i)))
4236                         continue;
4237 
4238                 tested_erase = &map->erase_type[i];
4239 
4240                 /*
4241                  * If the current erase size is the one, stop here:
4242                  * we have found the right uniform Sector Erase command.
4243                  */
4244                 if (tested_erase->size == wanted_size) {
4245                         erase = tested_erase;
4246                         break;
4247                 }
4248 
4249                 /*
4250                  * Otherwise, the current erase size is still a valid canditate.
4251                  * Select the biggest valid candidate.
4252                  */
4253                 if (!erase && tested_erase->size)
4254                         erase = tested_erase;
4255                         /* keep iterating to find the wanted_size */
4256         }
4257 
4258         if (!erase)
4259                 return NULL;
4260 
4261         /* Disable all other Sector Erase commands. */
4262         map->uniform_erase_type &= ~SNOR_ERASE_TYPE_MASK;
4263         map->uniform_erase_type |= BIT(erase - map->erase_type);
4264         return erase;
4265 }
4266 
4267 static int spi_nor_select_erase(struct spi_nor *nor)
4268 {
4269         struct spi_nor_erase_map *map = &nor->params.erase_map;
4270         const struct spi_nor_erase_type *erase = NULL;
4271         struct mtd_info *mtd = &nor->mtd;
4272         u32 wanted_size = nor->info->sector_size;
4273         int i;
4274 
4275         /*
4276          * The previous implementation handling Sector Erase commands assumed
4277          * that the SPI flash memory has an uniform layout then used only one
4278          * of the supported erase sizes for all Sector Erase commands.
4279          * So to be backward compatible, the new implementation also tries to
4280          * manage the SPI flash memory as uniform with a single erase sector
4281          * size, when possible.
4282          */
4283 #ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
4284         /* prefer "small sector" erase if possible */
4285         wanted_size = 4096u;
4286 #endif
4287 
4288         if (spi_nor_has_uniform_erase(nor)) {
4289                 erase = spi_nor_select_uniform_erase(map, wanted_size);
4290                 if (!erase)
4291                         return -EINVAL;
4292                 nor->erase_opcode = erase->opcode;
4293                 mtd->erasesize = erase->size;
4294                 return 0;
4295         }
4296 
4297         /*
4298          * For non-uniform SPI flash memory, set mtd->erasesize to the
4299          * maximum erase sector size. No need to set nor->erase_opcode.
4300          */
4301         for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
4302                 if (map->erase_type[i].size) {
4303                         erase = &map->erase_type[i];
4304                         break;
4305                 }
4306         }
4307 
4308         if (!erase)
4309                 return -EINVAL;
4310 
4311         mtd->erasesize = erase->size;
4312         return 0;
4313 }
4314 
4315 static int spi_nor_default_setup(struct spi_nor *nor,
4316                                  const struct spi_nor_hwcaps *hwcaps)
4317 {
4318         struct spi_nor_flash_parameter *params = &nor->params;
4319         u32 ignored_mask, shared_mask;
4320         int err;
4321 
4322         /*
4323          * Keep only the hardware capabilities supported by both the SPI
4324          * controller and the SPI flash memory.
4325          */
4326         shared_mask = hwcaps->mask & params->hwcaps.mask;
4327 
4328         if (nor->spimem) {
4329                 /*
4330                  * When called from spi_nor_probe(), all caps are set and we
4331                  * need to discard some of them based on what the SPI
4332                  * controller actually supports (using spi_mem_supports_op()).
4333                  */
4334                 spi_nor_spimem_adjust_hwcaps(nor, &shared_mask);
4335         } else {
4336                 /*
4337                  * SPI n-n-n protocols are not supported when the SPI
4338                  * controller directly implements the spi_nor interface.
4339                  * Yet another reason to switch to spi-mem.
4340                  */
4341                 ignored_mask = SNOR_HWCAPS_X_X_X;
4342                 if (shared_mask & ignored_mask) {
4343                         dev_dbg(nor->dev,
4344                                 "SPI n-n-n protocols are not supported.\n");
4345                         shared_mask &= ~ignored_mask;
4346                 }
4347         }
4348 
4349         /* Select the (Fast) Read command. */
4350         err = spi_nor_select_read(nor, shared_mask);
4351         if (err) {
4352                 dev_err(nor->dev,
4353                         "can't select read settings supported by both the SPI controller and memory.\n");
4354                 return err;
4355         }
4356 
4357         /* Select the Page Program command. */
4358         err = spi_nor_select_pp(nor, shared_mask);
4359         if (err) {
4360                 dev_err(nor->dev,
4361                         "can't select write settings supported by both the SPI controller and memory.\n");
4362                 return err;
4363         }
4364 
4365         /* Select the Sector Erase command. */
4366         err = spi_nor_select_erase(nor);
4367         if (err) {
4368                 dev_err(nor->dev,
4369                         "can't select erase settings supported by both the SPI controller and memory.\n");
4370                 return err;
4371         }
4372 
4373         return 0;
4374 }
4375 
4376 static int spi_nor_setup(struct spi_nor *nor,
4377                          const struct spi_nor_hwcaps *hwcaps)
4378 {
4379         if (!nor->params.setup)
4380                 return 0;
4381 
4382         return nor->params.setup(nor, hwcaps);
4383 }
4384 
4385 static void macronix_set_default_init(struct spi_nor *nor)
4386 {
4387         nor->params.quad_enable = macronix_quad_enable;
4388         nor->params.set_4byte = macronix_set_4byte;
4389 }
4390 
4391 static void st_micron_set_default_init(struct spi_nor *nor)
4392 {
4393         nor->flags |= SNOR_F_HAS_LOCK;
4394         nor->params.quad_enable = NULL;
4395         nor->params.set_4byte = st_micron_set_4byte;
4396 }
4397 
4398 static void winbond_set_default_init(struct spi_nor *nor)
4399 {
4400         nor->params.set_4byte = winbond_set_4byte;
4401 }
4402 
4403 /**
4404  * spi_nor_manufacturer_init_params() - Initialize the flash's parameters and
4405  * settings based on MFR register and ->default_init() hook.
4406  * @nor:        pointer to a 'struct spi-nor'.
4407  */
4408 static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
4409 {
4410         /* Init flash parameters based on MFR */
4411         switch (JEDEC_MFR(nor->info)) {
4412         case SNOR_MFR_MACRONIX:
4413                 macronix_set_default_init(nor);
4414                 break;
4415 
4416         case SNOR_MFR_ST:
4417         case SNOR_MFR_MICRON:
4418                 st_micron_set_default_init(nor);
4419                 break;
4420 
4421         case SNOR_MFR_WINBOND:
4422                 winbond_set_default_init(nor);
4423                 break;
4424 
4425         default:
4426                 break;
4427         }
4428 
4429         if (nor->info->fixups && nor->info->fixups->default_init)
4430                 nor->info->fixups->default_init(nor);
4431 }
4432 
4433 /**
4434  * spi_nor_sfdp_init_params() - Initialize the flash's parameters and settings
4435  * based on JESD216 SFDP standard.
4436  * @nor:        pointer to a 'struct spi-nor'.
4437  *
4438  * The method has a roll-back mechanism: in case the SFDP parsing fails, the
4439  * legacy flash parameters and settings will be restored.
4440  */
4441 static void spi_nor_sfdp_init_params(struct spi_nor *nor)
4442 {
4443         struct spi_nor_flash_parameter sfdp_params;
4444 
4445         memcpy(&sfdp_params, &nor->params, sizeof(sfdp_params));
4446 
4447         if (spi_nor_parse_sfdp(nor, &sfdp_params)) {
4448                 nor->addr_width = 0;
4449                 nor->flags &= ~SNOR_F_4B_OPCODES;
4450         } else {
4451                 memcpy(&nor->params, &sfdp_params, sizeof(nor->params));
4452         }
4453 }
4454 
4455 /**
4456  * spi_nor_info_init_params() - Initialize the flash's parameters and settings
4457  * based on nor->info data.
4458  * @nor:        pointer to a 'struct spi-nor'.
4459  */
4460 static void spi_nor_info_init_params(struct spi_nor *nor)
4461 {
4462         struct spi_nor_flash_parameter *params = &nor->params;
4463         struct spi_nor_erase_map *map = &params->erase_map;
4464         const struct flash_info *info = nor->info;
4465         struct device_node *np = spi_nor_get_flash_node(nor);
4466         u8 i, erase_mask;
4467 
4468         /* Initialize legacy flash parameters and settings. */
4469         params->quad_enable = spansion_quad_enable;
4470         params->set_4byte = spansion_set_4byte;
4471         params->setup = spi_nor_default_setup;
4472 
4473         /* Set SPI NOR sizes. */
4474         params->size = (u64)info->sector_size * info->n_sectors;
4475         params->page_size = info->page_size;
4476 
4477         if (!(info->flags & SPI_NOR_NO_FR)) {
4478                 /* Default to Fast Read for DT and non-DT platform devices. */
4479                 params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
4480 
4481                 /* Mask out Fast Read if not requested at DT instantiation. */
4482                 if (np && !of_property_read_bool(np, "m25p,fast-read"))
4483                         params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
4484         }
4485 
4486         /* (Fast) Read settings. */
4487         params->hwcaps.mask |= SNOR_HWCAPS_READ;
4488         spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ],
4489                                   0, 0, SPINOR_OP_READ,
4490                                   SNOR_PROTO_1_1_1);
4491 
4492         if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST)
4493                 spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_FAST],
4494                                           0, 8, SPINOR_OP_READ_FAST,
4495                                           SNOR_PROTO_1_1_1);
4496 
4497         if (info->flags & SPI_NOR_DUAL_READ) {
4498                 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
4499                 spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_2],
4500                                           0, 8, SPINOR_OP_READ_1_1_2,
4501                                           SNOR_PROTO_1_1_2);
4502         }
4503 
4504         if (info->flags & SPI_NOR_QUAD_READ) {
4505                 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
4506                 spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_4],
4507                                           0, 8, SPINOR_OP_READ_1_1_4,
4508                                           SNOR_PROTO_1_1_4);
4509         }
4510 
4511         if (info->flags & SPI_NOR_OCTAL_READ) {
4512                 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
4513                 spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_8],
4514                                           0, 8, SPINOR_OP_READ_1_1_8,
4515                                           SNOR_PROTO_1_1_8);
4516         }
4517 
4518         /* Page Program settings. */
4519         params->hwcaps.mask |= SNOR_HWCAPS_PP;
4520         spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
4521                                 SPINOR_OP_PP, SNOR_PROTO_1_1_1);
4522 
4523         /*
4524          * Sector Erase settings. Sort Erase Types in ascending order, with the
4525          * smallest erase size starting at BIT(0).
4526          */
4527         erase_mask = 0;
4528         i = 0;
4529         if (info->flags & SECT_4K_PMC) {
4530                 erase_mask |= BIT(i);
4531                 spi_nor_set_erase_type(&map->erase_type[i], 4096u,
4532                                        SPINOR_OP_BE_4K_PMC);
4533                 i++;
4534         } else if (info->flags & SECT_4K) {
4535                 erase_mask |= BIT(i);
4536                 spi_nor_set_erase_type(&map->erase_type[i], 4096u,
4537                                        SPINOR_OP_BE_4K);
4538                 i++;
4539         }
4540         erase_mask |= BIT(i);
4541         spi_nor_set_erase_type(&map->erase_type[i], info->sector_size,
4542                                SPINOR_OP_SE);
4543         spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
4544 }
4545 
4546 static void spansion_post_sfdp_fixups(struct spi_nor *nor)
4547 {
4548         if (nor->params.size <= SZ_16M)
4549                 return;
4550 
4551         nor->flags |= SNOR_F_4B_OPCODES;
4552         /* No small sector erase for 4-byte command set */
4553         nor->erase_opcode = SPINOR_OP_SE;
4554         nor->mtd.erasesize = nor->info->sector_size;
4555 }
4556 
4557 static void s3an_post_sfdp_fixups(struct spi_nor *nor)
4558 {
4559         nor->params.setup = s3an_nor_setup;
4560 }
4561 
4562 /**
4563  * spi_nor_post_sfdp_fixups() - Updates the flash's parameters and settings
4564  * after SFDP has been parsed (is also called for SPI NORs that do not
4565  * support RDSFDP).
4566  * @nor:        pointer to a 'struct spi_nor'
4567  *
4568  * Typically used to tweak various parameters that could not be extracted by
4569  * other means (i.e. when information provided by the SFDP/flash_info tables
4570  * are incomplete or wrong).
4571  */
4572 static void spi_nor_post_sfdp_fixups(struct spi_nor *nor)
4573 {
4574         switch (JEDEC_MFR(nor->info)) {
4575         case SNOR_MFR_SPANSION:
4576                 spansion_post_sfdp_fixups(nor);
4577                 break;
4578 
4579         default:
4580                 break;
4581         }
4582 
4583         if (nor->info->flags & SPI_S3AN)
4584                 s3an_post_sfdp_fixups(nor);
4585 
4586         if (nor->info->fixups && nor->info->fixups->post_sfdp)
4587                 nor->info->fixups->post_sfdp(nor);
4588 }
4589 
4590 /**
4591  * spi_nor_late_init_params() - Late initialization of default flash parameters.
4592  * @nor:        pointer to a 'struct spi_nor'
4593  *
4594  * Used to set default flash parameters and settings when the ->default_init()
4595  * hook or the SFDP parser let voids.
4596  */
4597 static void spi_nor_late_init_params(struct spi_nor *nor)
4598 {
4599         /*
4600          * NOR protection support. When locking_ops are not provided, we pick
4601          * the default ones.
4602          */
4603         if (nor->flags & SNOR_F_HAS_LOCK && !nor->params.locking_ops)
4604                 nor->params.locking_ops = &stm_locking_ops;
4605 }
4606 
4607 /**
4608  * spi_nor_init_params() - Initialize the flash's parameters and settings.
4609  * @nor:        pointer to a 'struct spi-nor'.
4610  *
4611  * The flash parameters and settings are initialized based on a sequence of
4612  * calls that are ordered by priority:
4613  *
4614  * 1/ Default flash parameters initialization. The initializations are done
4615  *    based on nor->info data:
4616  *              spi_nor_info_init_params()
4617  *
4618  * which can be overwritten by:
4619  * 2/ Manufacturer flash parameters initialization. The initializations are
4620  *    done based on MFR register, or when the decisions can not be done solely
4621  *    based on MFR, by using specific flash_info tweeks, ->default_init():
4622  *              spi_nor_manufacturer_init_params()
4623  *
4624  * which can be overwritten by:
4625  * 3/ SFDP flash parameters initialization. JESD216 SFDP is a standard and
4626  *    should be more accurate that the above.
4627  *              spi_nor_sfdp_init_params()
4628  *
4629  *    Please note that there is a ->post_bfpt() fixup hook that can overwrite
4630  *    the flash parameters and settings immediately after parsing the Basic
4631  *    Flash Parameter Table.
4632  *
4633  * which can be overwritten by:
4634  * 4/ Post SFDP flash parameters initialization. Used to tweak various
4635  *    parameters that could not be extracted by other means (i.e. when
4636  *    information provided by the SFDP/flash_info tables are incomplete or
4637  *    wrong).
4638  *              spi_nor_post_sfdp_fixups()
4639  *
4640  * 5/ Late default flash parameters initialization, used when the
4641  * ->default_init() hook or the SFDP parser do not set specific params.
4642  *              spi_nor_late_init_params()
4643  */
4644 static void spi_nor_init_params(struct spi_nor *nor)
4645 {
4646         spi_nor_info_init_params(nor);
4647 
4648         spi_nor_manufacturer_init_params(nor);
4649 
4650         if ((nor->info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)) &&
4651             !(nor->info->flags & SPI_NOR_SKIP_SFDP))
4652                 spi_nor_sfdp_init_params(nor);
4653 
4654         spi_nor_post_sfdp_fixups(nor);
4655 
4656         spi_nor_late_init_params(nor);
4657 }
4658 
4659 /**
4660  * spi_nor_quad_enable() - enable Quad I/O if needed.
4661  * @nor:                pointer to a 'struct spi_nor'
4662  *
4663  * Return: 0 on success, -errno otherwise.
4664  */
4665 static int spi_nor_quad_enable(struct spi_nor *nor)
4666 {
4667         if (!nor->params.quad_enable)
4668                 return 0;
4669 
4670         if (!(spi_nor_get_protocol_width(nor->read_proto) == 4 ||
4671               spi_nor_get_protocol_width(nor->write_proto) == 4))
4672                 return 0;
4673 
4674         return nor->params.quad_enable(nor);
4675 }
4676 
4677 static int spi_nor_init(struct spi_nor *nor)
4678 {
4679         int err;
4680 
4681         if (nor->clear_sr_bp) {
4682                 if (nor->params.quad_enable == spansion_quad_enable)
4683                         nor->clear_sr_bp = spi_nor_spansion_clear_sr_bp;
4684 
4685                 err = nor->clear_sr_bp(nor);
4686                 if (err) {
4687                         dev_err(nor->dev,
4688                                 "fail to clear block protection bits\n");
4689                         return err;
4690                 }
4691         }
4692 
4693         err = spi_nor_quad_enable(nor);
4694         if (err) {
4695                 dev_err(nor->dev, "quad mode not supported\n");
4696                 return err;
4697         }
4698 
4699         if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES)) {
4700                 /*
4701                  * If the RESET# pin isn't hooked up properly, or the system
4702                  * otherwise doesn't perform a reset command in the boot
4703                  * sequence, it's impossible to 100% protect against unexpected
4704                  * reboots (e.g., crashes). Warn the user (or hopefully, system
4705                  * designer) that this is bad.
4706                  */
4707                 WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
4708                           "enabling reset hack; may not recover from unexpected reboots\n");
4709                 nor->params.set_4byte(nor, true);
4710         }
4711 
4712         return 0;
4713 }
4714 
4715 /* mtd resume handler */
4716 static void spi_nor_resume(struct mtd_info *mtd)
4717 {
4718         struct spi_nor *nor = mtd_to_spi_nor(mtd);
4719         struct device *dev = nor->dev;
4720         int ret;
4721 
4722         /* re-initialize the nor chip */
4723         ret = spi_nor_init(nor);
4724         if (ret)
4725                 dev_err(dev, "resume() failed\n");
4726 }
4727 
4728 void spi_nor_restore(struct spi_nor *nor)
4729 {
4730         /* restore the addressing mode */
4731         if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
4732             nor->flags & SNOR_F_BROKEN_RESET)
4733                 nor->params.set_4byte(nor, false);
4734 }
4735 EXPORT_SYMBOL_GPL(spi_nor_restore);
4736 
4737 static const struct flash_info *spi_nor_match_id(const char *name)
4738 {
4739         const struct flash_info *id = spi_nor_ids;
4740 
4741         while (id->name) {
4742                 if (!strcmp(name, id->name))
4743                         return id;
4744                 id++;
4745         }
4746         return NULL;
4747 }
4748 
4749 static int spi_nor_set_addr_width(struct spi_nor *nor)
4750 {
4751         if (nor->addr_width) {
4752                 /* already configured from SFDP */
4753         } else if (nor->info->addr_width) {
4754                 nor->addr_width = nor->info->addr_width;
4755         } else if (nor->mtd.size > 0x1000000) {
4756                 /* enable 4-byte addressing if the device exceeds 16MiB */
4757                 nor->addr_width = 4;
4758         } else {
4759                 nor->addr_width = 3;
4760         }
4761 
4762         if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
4763                 dev_err(nor->dev, "address width is too large: %u\n",
4764                         nor->addr_width);
4765                 return -EINVAL;
4766         }
4767 
4768         /* Set 4byte opcodes when possible. */
4769         if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
4770             !(nor->flags & SNOR_F_HAS_4BAIT))
4771                 spi_nor_set_4byte_opcodes(nor);
4772 
4773         return 0;
4774 }
4775 
4776 static void spi_nor_debugfs_init(struct spi_nor *nor,
4777                                  const struct flash_info *info)
4778 {
4779         struct mtd_info *mtd = &nor->mtd;
4780 
4781         mtd->dbg.partname = info->name;
4782         mtd->dbg.partid = devm_kasprintf(nor->dev, GFP_KERNEL, "spi-nor:%*phN",
4783                                          info->id_len, info->id);
4784 }
4785 
4786 static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
4787                                                        const char *name)
4788 {
4789         const struct flash_info *info = NULL;
4790 
4791         if (name)
4792                 info = spi_nor_match_id(name);
4793         /* Try to auto-detect if chip name wasn't specified or not found */
4794         if (!info)
4795                 info = spi_nor_read_id(nor);
4796         if (IS_ERR_OR_NULL(info))
4797                 return ERR_PTR(-ENOENT);
4798 
4799         /*
4800          * If caller has specified name of flash model that can normally be
4801          * detected using JEDEC, let's verify it.
4802          */
4803         if (name && info->id_len) {
4804                 const struct flash_info *jinfo;
4805 
4806                 jinfo = spi_nor_read_id(nor);
4807                 if (IS_ERR(jinfo)) {
4808                         return jinfo;
4809                 } else if (jinfo != info) {
4810                         /*
4811                          * JEDEC knows better, so overwrite platform ID. We
4812                          * can't trust partitions any longer, but we'll let
4813                          * mtd apply them anyway, since some partitions may be
4814                          * marked read-only, and we don't want to lose that
4815                          * information, even if it's not 100% accurate.
4816                          */
4817                         dev_warn(nor->dev, "found %s, expected %s\n",
4818                                  jinfo->name, info->name);
4819                         info = jinfo;
4820                 }
4821         }
4822 
4823         return info;
4824 }
4825 
4826 int spi_nor_scan(struct spi_nor *nor, const char *name,
4827                  const struct spi_nor_hwcaps *hwcaps)
4828 {
4829         const struct flash_info *info;
4830         struct device *dev = nor->dev;
4831         struct mtd_info *mtd = &nor->mtd;
4832         struct device_node *np = spi_nor_get_flash_node(nor);
4833         struct spi_nor_flash_parameter *params = &nor->params;
4834         int ret;
4835         int i;
4836 
4837         ret = spi_nor_check(nor);
4838         if (ret)
4839                 return ret;
4840 
4841         /* Reset SPI protocol for all commands. */
4842         nor->reg_proto = SNOR_PROTO_1_1_1;
4843         nor->read_proto = SNOR_PROTO_1_1_1;
4844         nor->write_proto = SNOR_PROTO_1_1_1;
4845 
4846         /*
4847          * We need the bounce buffer early to read/write registers when going
4848          * through the spi-mem layer (buffers have to be DMA-able).
4849          * For spi-mem drivers, we'll reallocate a new buffer if
4850          * nor->page_size turns out to be greater than PAGE_SIZE (which
4851          * shouldn't happen before long since NOR pages are usually less
4852          * than 1KB) after spi_nor_scan() returns.
4853          */
4854         nor->bouncebuf_size = PAGE_SIZE;
4855         nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size,
4856                                       GFP_KERNEL);
4857         if (!nor->bouncebuf)
4858                 return -ENOMEM;
4859 
4860         info = spi_nor_get_flash_info(nor, name);
4861         if (IS_ERR(info))
4862                 return PTR_ERR(info);
4863 
4864         nor->info = info;
4865 
4866         spi_nor_debugfs_init(nor, info);
4867 
4868         mutex_init(&nor->lock);
4869 
4870         /*
4871          * Make sure the XSR_RDY flag is set before calling
4872          * spi_nor_wait_till_ready(). Xilinx S3AN share MFR
4873          * with Atmel spi-nor
4874          */
4875         if (info->flags & SPI_NOR_XSR_RDY)
4876                 nor->flags |=  SNOR_F_READY_XSR_RDY;
4877 
4878         if (info->flags & SPI_NOR_HAS_LOCK)
4879                 nor->flags |= SNOR_F_HAS_LOCK;
4880 
4881         /*
4882          * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
4883          * with the software protection bits set.
4884          */
4885         if (JEDEC_MFR(nor->info) == SNOR_MFR_ATMEL ||
4886             JEDEC_MFR(nor->info) == SNOR_MFR_INTEL ||
4887             JEDEC_MFR(nor->info) == SNOR_MFR_SST ||
4888             nor->info->flags & SPI_NOR_HAS_LOCK)
4889                 nor->clear_sr_bp = spi_nor_clear_sr_bp;
4890 
4891         /* Init flash parameters based on flash_info struct and SFDP */
4892         spi_nor_init_params(nor);
4893 
4894         if (!mtd->name)
4895                 mtd->name = dev_name(dev);
4896         mtd->priv = nor;
4897         mtd->type = MTD_NORFLASH;
4898         mtd->writesize = 1;
4899         mtd->flags = MTD_CAP_NORFLASH;
4900         mtd->size = params->size;
4901         mtd->_erase = spi_nor_erase;
4902         mtd->_read = spi_nor_read;
4903         mtd->_resume = spi_nor_resume;
4904 
4905         if (nor->params.locking_ops) {
4906                 mtd->_lock = spi_nor_lock;
4907                 mtd->_unlock = spi_nor_unlock;
4908                 mtd->_is_locked = spi_nor_is_locked;
4909         }
4910 
4911         /* sst nor chips use AAI word program */
4912         if (info->flags & SST_WRITE)
4913                 mtd->_write = sst_write;
4914         else
4915                 mtd->_write = spi_nor_write;
4916 
4917         if (info->flags & USE_FSR)
4918                 nor->flags |= SNOR_F_USE_FSR;
4919         if (info->flags & SPI_NOR_HAS_TB)
4920                 nor->flags |= SNOR_F_HAS_SR_TB;
4921         if (info->flags & NO_CHIP_ERASE)
4922                 nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
4923         if (info->flags & USE_CLSR)
4924                 nor->flags |= SNOR_F_USE_CLSR;
4925 
4926         if (info->flags & SPI_NOR_NO_ERASE)
4927                 mtd->flags |= MTD_NO_ERASE;
4928 
4929         mtd->dev.parent = dev;
4930         nor->page_size = params->page_size;
4931         mtd->writebufsize = nor->page_size;
4932 
4933         if (of_property_read_bool(np, "broken-flash-reset"))
4934                 nor->flags |= SNOR_F_BROKEN_RESET;
4935 
4936         /*
4937          * Configure the SPI memory:
4938          * - select op codes for (Fast) Read, Page Program and Sector Erase.
4939          * - set the number of dummy cycles (mode cycles + wait states).
4940          * - set the SPI protocols for register and memory accesses.
4941          */
4942         ret = spi_nor_setup(nor, hwcaps);
4943         if (ret)
4944                 return ret;
4945 
4946         if (info->flags & SPI_NOR_4B_OPCODES)
4947                 nor->flags |= SNOR_F_4B_OPCODES;
4948 
4949         ret = spi_nor_set_addr_width(nor);
4950         if (ret)
4951                 return ret;
4952 
4953         /* Send all the required SPI flash commands to initialize device */
4954         ret = spi_nor_init(nor);
4955         if (ret)
4956                 return ret;
4957 
4958         dev_info(dev, "%s (%lld Kbytes)\n", info->name,
4959                         (long long)mtd->size >> 10);
4960 
4961         dev_dbg(dev,
4962                 "mtd .name = %s, .size = 0x%llx (%lldMiB), "
4963                 ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
4964                 mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
4965                 mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
4966 
4967         if (mtd->numeraseregions)
4968                 for (i = 0; i < mtd->numeraseregions; i++)
4969                         dev_dbg(dev,
4970                                 "mtd.eraseregions[%d] = { .offset = 0x%llx, "
4971                                 ".erasesize = 0x%.8x (%uKiB), "
4972                                 ".numblocks = %d }\n",
4973                                 i, (long long)mtd->eraseregions[i].offset,
4974                                 mtd->eraseregions[i].erasesize,
4975                                 mtd->eraseregions[i].erasesize / 1024,
4976                                 mtd->eraseregions[i].numblocks);
4977         return 0;
4978 }
4979 EXPORT_SYMBOL_GPL(spi_nor_scan);
4980 
4981 static int spi_nor_probe(struct spi_mem *spimem)
4982 {
4983         struct spi_device *spi = spimem->spi;
4984         struct flash_platform_data *data = dev_get_platdata(&spi->dev);
4985         struct spi_nor *nor;
4986         /*
4987          * Enable all caps by default. The core will mask them after
4988          * checking what's really supported using spi_mem_supports_op().
4989          */
4990         const struct spi_nor_hwcaps hwcaps = { .mask = SNOR_HWCAPS_ALL };
4991         char *flash_name;
4992         int ret;
4993 
4994         nor = devm_kzalloc(&spi->dev, sizeof(*nor), GFP_KERNEL);
4995         if (!nor)
4996                 return -ENOMEM;
4997 
4998         nor->spimem = spimem;
4999         nor->dev = &spi->dev;
5000         spi_nor_set_flash_node(nor, spi->dev.of_node);
5001 
5002         spi_mem_set_drvdata(spimem, nor);
5003 
5004         if (data && data->name)
5005                 nor->mtd.name = data->name;
5006 
5007         if (!nor->mtd.name)
5008                 nor->mtd.name = spi_mem_get_name(spimem);
5009 
5010         /*
5011          * For some (historical?) reason many platforms provide two different
5012          * names in flash_platform_data: "name" and "type". Quite often name is
5013          * set to "m25p80" and then "type" provides a real chip name.
5014          * If that's the case, respect "type" and ignore a "name".
5015          */
5016         if (data && data->type)
5017                 flash_name = data->type;
5018         else if (!strcmp(spi->modalias, "spi-nor"))
5019                 flash_name = NULL; /* auto-detect */
5020         else
5021                 flash_name = spi->modalias;
5022 
5023         ret = spi_nor_scan(nor, flash_name, &hwcaps);
5024         if (ret)
5025                 return ret;
5026 
5027         /*
5028          * None of the existing parts have > 512B pages, but let's play safe
5029          * and add this logic so that if anyone ever adds support for such
5030          * a NOR we don't end up with buffer overflows.
5031          */
5032         if (nor->page_size > PAGE_SIZE) {
5033                 nor->bouncebuf_size = nor->page_size;
5034                 devm_kfree(nor->dev, nor->bouncebuf);
5035                 nor->bouncebuf = devm_kmalloc(nor->dev,
5036                                               nor->bouncebuf_size,
5037                                               GFP_KERNEL);
5038                 if (!nor->bouncebuf)
5039                         return -ENOMEM;
5040         }
5041 
5042         return mtd_device_register(&nor->mtd, data ? data->parts : NULL,
5043                                    data ? data->nr_parts : 0);
5044 }
5045 
5046 static int spi_nor_remove(struct spi_mem *spimem)
5047 {
5048         struct spi_nor *nor = spi_mem_get_drvdata(spimem);
5049 
5050         spi_nor_restore(nor);
5051 
5052         /* Clean up MTD stuff. */
5053         return mtd_device_unregister(&nor->mtd);
5054 }
5055 
5056 static void spi_nor_shutdown(struct spi_mem *spimem)
5057 {
5058         struct spi_nor *nor = spi_mem_get_drvdata(spimem);
5059 
5060         spi_nor_restore(nor);
5061 }
5062 
5063 /*
5064  * Do NOT add to this array without reading the following:
5065  *
5066  * Historically, many flash devices are bound to this driver by their name. But
5067  * since most of these flash are compatible to some extent, and their
5068  * differences can often be differentiated by the JEDEC read-ID command, we
5069  * encourage new users to add support to the spi-nor library, and simply bind
5070  * against a generic string here (e.g., "jedec,spi-nor").
5071  *
5072  * Many flash names are kept here in this list (as well as in spi-nor.c) to
5073  * keep them available as module aliases for existing platforms.
5074  */
5075 static const struct spi_device_id spi_nor_dev_ids[] = {
5076         /*
5077          * Allow non-DT platform devices to bind to the "spi-nor" modalias, and
5078          * hack around the fact that the SPI core does not provide uevent
5079          * matching for .of_match_table
5080          */
5081         {"spi-nor"},
5082 
5083         /*
5084          * Entries not used in DTs that should be safe to drop after replacing
5085          * them with "spi-nor" in platform data.
5086          */
5087         {"s25sl064a"},  {"w25x16"},     {"m25p10"},     {"m25px64"},
5088 
5089         /*
5090          * Entries that were used in DTs without "jedec,spi-nor" fallback and
5091          * should be kept for backward compatibility.
5092          */
5093         {"at25df321a"}, {"at25df641"},  {"at26df081a"},
5094         {"mx25l4005a"}, {"mx25l1606e"}, {"mx25l6405d"}, {"mx25l12805d"},
5095         {"mx25l25635e"},{"mx66l51235l"},
5096         {"n25q064"},    {"n25q128a11"}, {"n25q128a13"}, {"n25q512a"},
5097         {"s25fl256s1"}, {"s25fl512s"},  {"s25sl12801"}, {"s25fl008k"},
5098         {"s25fl064k"},
5099         {"sst25vf040b"},{"sst25vf016b"},{"sst25vf032b"},{"sst25wf040"},
5100         {"m25p40"},     {"m25p80"},     {"m25p16"},     {"m25p32"},
5101         {"m25p64"},     {"m25p128"},
5102         {"w25x80"},     {"w25x32"},     {"w25q32"},     {"w25q32dw"},
5103         {"w25q80bl"},   {"w25q128"},    {"w25q256"},
5104 
5105         /* Flashes that can't be detected using JEDEC */
5106         {"m25p05-nonjedec"},    {"m25p10-nonjedec"},    {"m25p20-nonjedec"},
5107         {"m25p40-nonjedec"},    {"m25p80-nonjedec"},    {"m25p16-nonjedec"},
5108         {"m25p32-nonjedec"},    {"m25p64-nonjedec"},    {"m25p128-nonjedec"},
5109 
5110         /* Everspin MRAMs (non-JEDEC) */
5111         { "mr25h128" }, /* 128 Kib, 40 MHz */
5112         { "mr25h256" }, /* 256 Kib, 40 MHz */
5113         { "mr25h10" },  /*   1 Mib, 40 MHz */
5114         { "mr25h40" },  /*   4 Mib, 40 MHz */
5115 
5116         { },
5117 };
5118 MODULE_DEVICE_TABLE(spi, spi_nor_dev_ids);
5119 
5120 static const struct of_device_id spi_nor_of_table[] = {
5121         /*
5122          * Generic compatibility for SPI NOR that can be identified by the
5123          * JEDEC READ ID opcode (0x9F). Use this, if possible.
5124          */
5125         { .compatible = "jedec,spi-nor" },
5126         { /* sentinel */ },
5127 };
5128 MODULE_DEVICE_TABLE(of, spi_nor_of_table);
5129 
5130 /*
5131  * REVISIT: many of these chips have deep power-down modes, which
5132  * should clearly be entered on suspend() to minimize power use.
5133  * And also when they're otherwise idle...
5134  */
5135 static struct spi_mem_driver spi_nor_driver = {
5136         .spidrv = {
5137                 .driver = {
5138                         .name = "spi-nor",
5139                         .of_match_table = spi_nor_of_table,
5140                 },
5141                 .id_table = spi_nor_dev_ids,
5142         },
5143         .probe = spi_nor_probe,
5144         .remove = spi_nor_remove,
5145         .shutdown = spi_nor_shutdown,
5146 };
5147 module_spi_mem_driver(spi_nor_driver);
5148 
5149 MODULE_LICENSE("GPL v2");
5150 MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
5151 MODULE_AUTHOR("Mike Lavender");
5152 MODULE_DESCRIPTION("framework for SPI NOR");

/* [<][>][^][v][top][bottom][index][help] */