This source file includes following definitions.
- spi_nor_spimem_xfer_data
- spi_nor_spimem_read_data
- spi_nor_read_data
- spi_nor_spimem_write_data
- spi_nor_write_data
- read_sr
- read_fsr
- read_cr
- write_sr
- write_enable
- write_disable
- mtd_to_spi_nor
- spi_nor_convert_opcode
- spi_nor_convert_3to4_read
- spi_nor_convert_3to4_program
- spi_nor_convert_3to4_erase
- spi_nor_set_4byte_opcodes
- macronix_set_4byte
- st_micron_set_4byte
- spansion_set_4byte
- spi_nor_write_ear
- winbond_set_4byte
- spi_nor_xread_sr
- s3an_sr_ready
- spi_nor_clear_sr
- spi_nor_sr_ready
- spi_nor_clear_fsr
- spi_nor_fsr_ready
- spi_nor_ready
- spi_nor_wait_till_ready_with_timeout
- spi_nor_wait_till_ready
- erase_chip
- spi_nor_lock_and_prep
- spi_nor_unlock_and_unprep
- s3an_convert_addr
- spi_nor_convert_addr
- spi_nor_erase_sector
- spi_nor_div_by_erase_size
- spi_nor_find_best_erase_type
- spi_nor_region_next
- spi_nor_find_erase_region
- spi_nor_init_erase_cmd
- spi_nor_destroy_erase_cmd_list
- spi_nor_init_erase_cmd_list
- spi_nor_erase_multi_sectors
- spi_nor_erase
- write_sr_and_check
- stm_get_locked_range
- stm_check_lock_status_sr
- stm_is_locked_sr
- stm_is_unlocked_sr
- stm_lock
- stm_unlock
- stm_is_locked
- spi_nor_lock
- spi_nor_unlock
- spi_nor_is_locked
- write_sr_cr
- macronix_quad_enable
- spansion_quad_enable
- spansion_no_read_cr_quad_enable
- spansion_read_cr_quad_enable
- spi_nor_write_sr2
- spi_nor_read_sr2
- sr2_bit7_quad_enable
- spi_nor_clear_sr_bp
- spi_nor_spansion_clear_sr_bp
- is25lp256_post_bfpt_fixups
- mx25l25635_post_bfpt_fixups
- gd25q256_default_init
- spi_nor_read_id
- spi_nor_read
- sst_write
- spi_nor_write
- spi_nor_check
- s3an_nor_setup
- spi_nor_set_read_settings
- spi_nor_set_pp_settings
- spi_nor_hwcaps2cmd
- spi_nor_hwcaps_read2cmd
- spi_nor_hwcaps_pp2cmd
- spi_nor_read_raw
- spi_nor_read_sfdp
- spi_nor_spimem_check_op
- spi_nor_spimem_check_readop
- spi_nor_spimem_check_pp
- spi_nor_spimem_adjust_hwcaps
- spi_nor_read_sfdp_dma_unsafe
- spi_nor_set_read_settings_from_bfpt
- spi_nor_set_erase_type
- spi_nor_set_erase_settings_from_bfpt
- spi_nor_map_cmp_erase_type
- spi_nor_sort_erase_mask
- spi_nor_regions_sort_erase_types
- spi_nor_init_uniform_erase_map
- spi_nor_post_bfpt_fixups
- spi_nor_parse_bfpt
- spi_nor_smpt_addr_width
- spi_nor_smpt_read_dummy
- spi_nor_get_map_in_use
- spi_nor_region_check_overlay
- spi_nor_init_non_uniform_erase_map
- spi_nor_parse_smpt
- spi_nor_parse_4bait
- spi_nor_parse_sfdp
- spi_nor_select_read
- spi_nor_select_pp
- spi_nor_select_uniform_erase
- spi_nor_select_erase
- spi_nor_default_setup
- spi_nor_setup
- macronix_set_default_init
- st_micron_set_default_init
- winbond_set_default_init
- spi_nor_manufacturer_init_params
- spi_nor_sfdp_init_params
- spi_nor_info_init_params
- spansion_post_sfdp_fixups
- s3an_post_sfdp_fixups
- spi_nor_post_sfdp_fixups
- spi_nor_late_init_params
- spi_nor_init_params
- spi_nor_quad_enable
- spi_nor_init
- spi_nor_resume
- spi_nor_restore
- spi_nor_match_id
- spi_nor_set_addr_width
- spi_nor_debugfs_init
- spi_nor_get_flash_info
- spi_nor_scan
- spi_nor_probe
- spi_nor_remove
- spi_nor_shutdown
1
2
3
4
5
6
7
8
9
10 #include <linux/err.h>
11 #include <linux/errno.h>
12 #include <linux/module.h>
13 #include <linux/device.h>
14 #include <linux/mutex.h>
15 #include <linux/math64.h>
16 #include <linux/sizes.h>
17 #include <linux/slab.h>
18 #include <linux/sort.h>
19
20 #include <linux/mtd/mtd.h>
21 #include <linux/of_platform.h>
22 #include <linux/sched/task_stack.h>
23 #include <linux/spi/flash.h>
24 #include <linux/mtd/spi-nor.h>
25
26
27
28
29
30
31
32 #define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ)
33
34
35
36
37
38 #define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
39
40 #define SPI_NOR_MAX_ID_LEN 6
41 #define SPI_NOR_MAX_ADDR_WIDTH 4
42
43 struct sfdp_parameter_header {
44 u8 id_lsb;
45 u8 minor;
46 u8 major;
47 u8 length;
48 u8 parameter_table_pointer[3];
49 u8 id_msb;
50 };
51
52 #define SFDP_PARAM_HEADER_ID(p) (((p)->id_msb << 8) | (p)->id_lsb)
53 #define SFDP_PARAM_HEADER_PTP(p) \
54 (((p)->parameter_table_pointer[2] << 16) | \
55 ((p)->parameter_table_pointer[1] << 8) | \
56 ((p)->parameter_table_pointer[0] << 0))
57
58 #define SFDP_BFPT_ID 0xff00
59 #define SFDP_SECTOR_MAP_ID 0xff81
60 #define SFDP_4BAIT_ID 0xff84
61
62 #define SFDP_SIGNATURE 0x50444653U
63 #define SFDP_JESD216_MAJOR 1
64 #define SFDP_JESD216_MINOR 0
65 #define SFDP_JESD216A_MINOR 5
66 #define SFDP_JESD216B_MINOR 6
67
68 struct sfdp_header {
69 u32 signature;
70 u8 minor;
71 u8 major;
72 u8 nph;
73 u8 unused;
74
75
76 struct sfdp_parameter_header bfpt_header;
77 };
78
79
80
81
82
83
84
85 #define BFPT_DWORD(i) ((i) - 1)
86 #define BFPT_DWORD_MAX 16
87
88
89 #define BFPT_DWORD_MAX_JESD216 9
90
91
92 #define BFPT_DWORD1_FAST_READ_1_1_2 BIT(16)
93 #define BFPT_DWORD1_ADDRESS_BYTES_MASK GENMASK(18, 17)
94 #define BFPT_DWORD1_ADDRESS_BYTES_3_ONLY (0x0UL << 17)
95 #define BFPT_DWORD1_ADDRESS_BYTES_3_OR_4 (0x1UL << 17)
96 #define BFPT_DWORD1_ADDRESS_BYTES_4_ONLY (0x2UL << 17)
97 #define BFPT_DWORD1_DTR BIT(19)
98 #define BFPT_DWORD1_FAST_READ_1_2_2 BIT(20)
99 #define BFPT_DWORD1_FAST_READ_1_4_4 BIT(21)
100 #define BFPT_DWORD1_FAST_READ_1_1_4 BIT(22)
101
102
103 #define BFPT_DWORD5_FAST_READ_2_2_2 BIT(0)
104 #define BFPT_DWORD5_FAST_READ_4_4_4 BIT(4)
105
106
107 #define BFPT_DWORD11_PAGE_SIZE_SHIFT 4
108 #define BFPT_DWORD11_PAGE_SIZE_MASK GENMASK(7, 4)
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143 #define BFPT_DWORD15_QER_MASK GENMASK(22, 20)
144 #define BFPT_DWORD15_QER_NONE (0x0UL << 20)
145 #define BFPT_DWORD15_QER_SR2_BIT1_BUGGY (0x1UL << 20)
146 #define BFPT_DWORD15_QER_SR1_BIT6 (0x2UL << 20)
147 #define BFPT_DWORD15_QER_SR2_BIT7 (0x3UL << 20)
148 #define BFPT_DWORD15_QER_SR2_BIT1_NO_RD (0x4UL << 20)
149 #define BFPT_DWORD15_QER_SR2_BIT1 (0x5UL << 20)
150
151 struct sfdp_bfpt {
152 u32 dwords[BFPT_DWORD_MAX];
153 };
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170 struct spi_nor_fixups {
171 void (*default_init)(struct spi_nor *nor);
172 int (*post_bfpt)(struct spi_nor *nor,
173 const struct sfdp_parameter_header *bfpt_header,
174 const struct sfdp_bfpt *bfpt,
175 struct spi_nor_flash_parameter *params);
176 void (*post_sfdp)(struct spi_nor *nor);
177 };
178
179 struct flash_info {
180 char *name;
181
182
183
184
185
186
187 u8 id[SPI_NOR_MAX_ID_LEN];
188 u8 id_len;
189
190
191
192
193 unsigned sector_size;
194 u16 n_sectors;
195
196 u16 page_size;
197 u16 addr_width;
198
199 u16 flags;
200 #define SECT_4K BIT(0)
201 #define SPI_NOR_NO_ERASE BIT(1)
202 #define SST_WRITE BIT(2)
203 #define SPI_NOR_NO_FR BIT(3)
204 #define SECT_4K_PMC BIT(4)
205 #define SPI_NOR_DUAL_READ BIT(5)
206 #define SPI_NOR_QUAD_READ BIT(6)
207 #define USE_FSR BIT(7)
208 #define SPI_NOR_HAS_LOCK BIT(8)
209 #define SPI_NOR_HAS_TB BIT(9)
210
211
212
213
214 #define SPI_NOR_XSR_RDY BIT(10)
215
216
217
218
219
220
221
222 #define SPI_S3AN BIT(10)
223
224
225
226
227
228 #define SPI_NOR_4B_OPCODES BIT(11)
229
230
231
232 #define NO_CHIP_ERASE BIT(12)
233 #define SPI_NOR_SKIP_SFDP BIT(13)
234 #define USE_CLSR BIT(14)
235 #define SPI_NOR_OCTAL_READ BIT(15)
236
237
238 const struct spi_nor_fixups *fixups;
239 };
240
241 #define JEDEC_MFR(info) ((info)->id[0])
242
243
244
245
246
247
248
249
250
251 static ssize_t spi_nor_spimem_xfer_data(struct spi_nor *nor,
252 struct spi_mem_op *op)
253 {
254 bool usebouncebuf = false;
255 void *rdbuf = NULL;
256 const void *buf;
257 int ret;
258
259 if (op->data.dir == SPI_MEM_DATA_IN)
260 buf = op->data.buf.in;
261 else
262 buf = op->data.buf.out;
263
264 if (object_is_on_stack(buf) || !virt_addr_valid(buf))
265 usebouncebuf = true;
266
267 if (usebouncebuf) {
268 if (op->data.nbytes > nor->bouncebuf_size)
269 op->data.nbytes = nor->bouncebuf_size;
270
271 if (op->data.dir == SPI_MEM_DATA_IN) {
272 rdbuf = op->data.buf.in;
273 op->data.buf.in = nor->bouncebuf;
274 } else {
275 op->data.buf.out = nor->bouncebuf;
276 memcpy(nor->bouncebuf, buf,
277 op->data.nbytes);
278 }
279 }
280
281 ret = spi_mem_adjust_op_size(nor->spimem, op);
282 if (ret)
283 return ret;
284
285 ret = spi_mem_exec_op(nor->spimem, op);
286 if (ret)
287 return ret;
288
289 if (usebouncebuf && op->data.dir == SPI_MEM_DATA_IN)
290 memcpy(rdbuf, nor->bouncebuf, op->data.nbytes);
291
292 return op->data.nbytes;
293 }
294
295
296
297
298
299
300
301
302
303
304
305 static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from,
306 size_t len, u8 *buf)
307 {
308 struct spi_mem_op op =
309 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 1),
310 SPI_MEM_OP_ADDR(nor->addr_width, from, 1),
311 SPI_MEM_OP_DUMMY(nor->read_dummy, 1),
312 SPI_MEM_OP_DATA_IN(len, buf, 1));
313
314
315 op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->read_proto);
316 op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->read_proto);
317 op.dummy.buswidth = op.addr.buswidth;
318 op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
319
320
321 op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
322
323 return spi_nor_spimem_xfer_data(nor, &op);
324 }
325
326
327
328
329
330
331
332
333
334
335 static ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len,
336 u8 *buf)
337 {
338 if (nor->spimem)
339 return spi_nor_spimem_read_data(nor, from, len, buf);
340
341 return nor->read(nor, from, len, buf);
342 }
343
344
345
346
347
348
349
350
351
352
353
354 static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to,
355 size_t len, const u8 *buf)
356 {
357 struct spi_mem_op op =
358 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1),
359 SPI_MEM_OP_ADDR(nor->addr_width, to, 1),
360 SPI_MEM_OP_NO_DUMMY,
361 SPI_MEM_OP_DATA_OUT(len, buf, 1));
362
363 op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto);
364 op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto);
365 op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
366
367 if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
368 op.addr.nbytes = 0;
369
370 return spi_nor_spimem_xfer_data(nor, &op);
371 }
372
373
374
375
376
377
378
379
380
381
382 static ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
383 const u8 *buf)
384 {
385 if (nor->spimem)
386 return spi_nor_spimem_write_data(nor, to, len, buf);
387
388 return nor->write(nor, to, len, buf);
389 }
390
391
392
393
394
395
396 static int read_sr(struct spi_nor *nor)
397 {
398 int ret;
399
400 if (nor->spimem) {
401 struct spi_mem_op op =
402 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1),
403 SPI_MEM_OP_NO_ADDR,
404 SPI_MEM_OP_NO_DUMMY,
405 SPI_MEM_OP_DATA_IN(1, nor->bouncebuf, 1));
406
407 ret = spi_mem_exec_op(nor->spimem, &op);
408 } else {
409 ret = nor->read_reg(nor, SPINOR_OP_RDSR, nor->bouncebuf, 1);
410 }
411
412 if (ret < 0) {
413 pr_err("error %d reading SR\n", (int) ret);
414 return ret;
415 }
416
417 return nor->bouncebuf[0];
418 }
419
420
421
422
423
424
425 static int read_fsr(struct spi_nor *nor)
426 {
427 int ret;
428
429 if (nor->spimem) {
430 struct spi_mem_op op =
431 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 1),
432 SPI_MEM_OP_NO_ADDR,
433 SPI_MEM_OP_NO_DUMMY,
434 SPI_MEM_OP_DATA_IN(1, nor->bouncebuf, 1));
435
436 ret = spi_mem_exec_op(nor->spimem, &op);
437 } else {
438 ret = nor->read_reg(nor, SPINOR_OP_RDFSR, nor->bouncebuf, 1);
439 }
440
441 if (ret < 0) {
442 pr_err("error %d reading FSR\n", ret);
443 return ret;
444 }
445
446 return nor->bouncebuf[0];
447 }
448
449
450
451
452
453
454 static int read_cr(struct spi_nor *nor)
455 {
456 int ret;
457
458 if (nor->spimem) {
459 struct spi_mem_op op =
460 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR, 1),
461 SPI_MEM_OP_NO_ADDR,
462 SPI_MEM_OP_NO_DUMMY,
463 SPI_MEM_OP_DATA_IN(1, nor->bouncebuf, 1));
464
465 ret = spi_mem_exec_op(nor->spimem, &op);
466 } else {
467 ret = nor->read_reg(nor, SPINOR_OP_RDCR, nor->bouncebuf, 1);
468 }
469
470 if (ret < 0) {
471 dev_err(nor->dev, "error %d reading CR\n", ret);
472 return ret;
473 }
474
475 return nor->bouncebuf[0];
476 }
477
478
479
480
481
482 static int write_sr(struct spi_nor *nor, u8 val)
483 {
484 nor->bouncebuf[0] = val;
485 if (nor->spimem) {
486 struct spi_mem_op op =
487 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1),
488 SPI_MEM_OP_NO_ADDR,
489 SPI_MEM_OP_NO_DUMMY,
490 SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
491
492 return spi_mem_exec_op(nor->spimem, &op);
493 }
494
495 return nor->write_reg(nor, SPINOR_OP_WRSR, nor->bouncebuf, 1);
496 }
497
498
499
500
501
502 static int write_enable(struct spi_nor *nor)
503 {
504 if (nor->spimem) {
505 struct spi_mem_op op =
506 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1),
507 SPI_MEM_OP_NO_ADDR,
508 SPI_MEM_OP_NO_DUMMY,
509 SPI_MEM_OP_NO_DATA);
510
511 return spi_mem_exec_op(nor->spimem, &op);
512 }
513
514 return nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
515 }
516
517
518
519
520 static int write_disable(struct spi_nor *nor)
521 {
522 if (nor->spimem) {
523 struct spi_mem_op op =
524 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1),
525 SPI_MEM_OP_NO_ADDR,
526 SPI_MEM_OP_NO_DUMMY,
527 SPI_MEM_OP_NO_DATA);
528
529 return spi_mem_exec_op(nor->spimem, &op);
530 }
531
532 return nor->write_reg(nor, SPINOR_OP_WRDI, NULL, 0);
533 }
534
535 static struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
536 {
537 return mtd->priv;
538 }
539
540
541 static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
542 {
543 size_t i;
544
545 for (i = 0; i < size; i++)
546 if (table[i][0] == opcode)
547 return table[i][1];
548
549
550 return opcode;
551 }
552
553 static u8 spi_nor_convert_3to4_read(u8 opcode)
554 {
555 static const u8 spi_nor_3to4_read[][2] = {
556 { SPINOR_OP_READ, SPINOR_OP_READ_4B },
557 { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B },
558 { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
559 { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
560 { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
561 { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
562 { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B },
563 { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B },
564
565 { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B },
566 { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B },
567 { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B },
568 };
569
570 return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
571 ARRAY_SIZE(spi_nor_3to4_read));
572 }
573
574 static u8 spi_nor_convert_3to4_program(u8 opcode)
575 {
576 static const u8 spi_nor_3to4_program[][2] = {
577 { SPINOR_OP_PP, SPINOR_OP_PP_4B },
578 { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B },
579 { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B },
580 { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B },
581 { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B },
582 };
583
584 return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
585 ARRAY_SIZE(spi_nor_3to4_program));
586 }
587
588 static u8 spi_nor_convert_3to4_erase(u8 opcode)
589 {
590 static const u8 spi_nor_3to4_erase[][2] = {
591 { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B },
592 { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B },
593 { SPINOR_OP_SE, SPINOR_OP_SE_4B },
594 };
595
596 return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
597 ARRAY_SIZE(spi_nor_3to4_erase));
598 }
599
600 static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
601 {
602 nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
603 nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
604 nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
605
606 if (!spi_nor_has_uniform_erase(nor)) {
607 struct spi_nor_erase_map *map = &nor->params.erase_map;
608 struct spi_nor_erase_type *erase;
609 int i;
610
611 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
612 erase = &map->erase_type[i];
613 erase->opcode =
614 spi_nor_convert_3to4_erase(erase->opcode);
615 }
616 }
617 }
618
619 static int macronix_set_4byte(struct spi_nor *nor, bool enable)
620 {
621 if (nor->spimem) {
622 struct spi_mem_op op =
623 SPI_MEM_OP(SPI_MEM_OP_CMD(enable ?
624 SPINOR_OP_EN4B :
625 SPINOR_OP_EX4B,
626 1),
627 SPI_MEM_OP_NO_ADDR,
628 SPI_MEM_OP_NO_DUMMY,
629 SPI_MEM_OP_NO_DATA);
630
631 return spi_mem_exec_op(nor->spimem, &op);
632 }
633
634 return nor->write_reg(nor, enable ? SPINOR_OP_EN4B : SPINOR_OP_EX4B,
635 NULL, 0);
636 }
637
638 static int st_micron_set_4byte(struct spi_nor *nor, bool enable)
639 {
640 int ret;
641
642 write_enable(nor);
643 ret = macronix_set_4byte(nor, enable);
644 write_disable(nor);
645
646 return ret;
647 }
648
649 static int spansion_set_4byte(struct spi_nor *nor, bool enable)
650 {
651 nor->bouncebuf[0] = enable << 7;
652
653 if (nor->spimem) {
654 struct spi_mem_op op =
655 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_BRWR, 1),
656 SPI_MEM_OP_NO_ADDR,
657 SPI_MEM_OP_NO_DUMMY,
658 SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
659
660 return spi_mem_exec_op(nor->spimem, &op);
661 }
662
663 return nor->write_reg(nor, SPINOR_OP_BRWR, nor->bouncebuf, 1);
664 }
665
666 static int spi_nor_write_ear(struct spi_nor *nor, u8 ear)
667 {
668 nor->bouncebuf[0] = ear;
669
670 if (nor->spimem) {
671 struct spi_mem_op op =
672 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREAR, 1),
673 SPI_MEM_OP_NO_ADDR,
674 SPI_MEM_OP_NO_DUMMY,
675 SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
676
677 return spi_mem_exec_op(nor->spimem, &op);
678 }
679
680 return nor->write_reg(nor, SPINOR_OP_WREAR, nor->bouncebuf, 1);
681 }
682
683 static int winbond_set_4byte(struct spi_nor *nor, bool enable)
684 {
685 int ret;
686
687 ret = macronix_set_4byte(nor, enable);
688 if (ret || enable)
689 return ret;
690
691
692
693
694
695
696 write_enable(nor);
697 ret = spi_nor_write_ear(nor, 0);
698 write_disable(nor);
699
700 return ret;
701 }
702
703 static int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr)
704 {
705 if (nor->spimem) {
706 struct spi_mem_op op =
707 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_XRDSR, 1),
708 SPI_MEM_OP_NO_ADDR,
709 SPI_MEM_OP_NO_DUMMY,
710 SPI_MEM_OP_DATA_IN(1, sr, 1));
711
712 return spi_mem_exec_op(nor->spimem, &op);
713 }
714
715 return nor->read_reg(nor, SPINOR_OP_XRDSR, sr, 1);
716 }
717
718 static int s3an_sr_ready(struct spi_nor *nor)
719 {
720 int ret;
721
722 ret = spi_nor_xread_sr(nor, nor->bouncebuf);
723 if (ret < 0) {
724 dev_err(nor->dev, "error %d reading XRDSR\n", (int) ret);
725 return ret;
726 }
727
728 return !!(nor->bouncebuf[0] & XSR_RDY);
729 }
730
731 static int spi_nor_clear_sr(struct spi_nor *nor)
732 {
733 if (nor->spimem) {
734 struct spi_mem_op op =
735 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 1),
736 SPI_MEM_OP_NO_ADDR,
737 SPI_MEM_OP_NO_DUMMY,
738 SPI_MEM_OP_NO_DATA);
739
740 return spi_mem_exec_op(nor->spimem, &op);
741 }
742
743 return nor->write_reg(nor, SPINOR_OP_CLSR, NULL, 0);
744 }
745
746 static int spi_nor_sr_ready(struct spi_nor *nor)
747 {
748 int sr = read_sr(nor);
749 if (sr < 0)
750 return sr;
751
752 if (nor->flags & SNOR_F_USE_CLSR && sr & (SR_E_ERR | SR_P_ERR)) {
753 if (sr & SR_E_ERR)
754 dev_err(nor->dev, "Erase Error occurred\n");
755 else
756 dev_err(nor->dev, "Programming Error occurred\n");
757
758 spi_nor_clear_sr(nor);
759 return -EIO;
760 }
761
762 return !(sr & SR_WIP);
763 }
764
765 static int spi_nor_clear_fsr(struct spi_nor *nor)
766 {
767 if (nor->spimem) {
768 struct spi_mem_op op =
769 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 1),
770 SPI_MEM_OP_NO_ADDR,
771 SPI_MEM_OP_NO_DUMMY,
772 SPI_MEM_OP_NO_DATA);
773
774 return spi_mem_exec_op(nor->spimem, &op);
775 }
776
777 return nor->write_reg(nor, SPINOR_OP_CLFSR, NULL, 0);
778 }
779
780 static int spi_nor_fsr_ready(struct spi_nor *nor)
781 {
782 int fsr = read_fsr(nor);
783 if (fsr < 0)
784 return fsr;
785
786 if (fsr & (FSR_E_ERR | FSR_P_ERR)) {
787 if (fsr & FSR_E_ERR)
788 dev_err(nor->dev, "Erase operation failed.\n");
789 else
790 dev_err(nor->dev, "Program operation failed.\n");
791
792 if (fsr & FSR_PT_ERR)
793 dev_err(nor->dev,
794 "Attempted to modify a protected sector.\n");
795
796 spi_nor_clear_fsr(nor);
797 return -EIO;
798 }
799
800 return fsr & FSR_READY;
801 }
802
803 static int spi_nor_ready(struct spi_nor *nor)
804 {
805 int sr, fsr;
806
807 if (nor->flags & SNOR_F_READY_XSR_RDY)
808 sr = s3an_sr_ready(nor);
809 else
810 sr = spi_nor_sr_ready(nor);
811 if (sr < 0)
812 return sr;
813 fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
814 if (fsr < 0)
815 return fsr;
816 return sr && fsr;
817 }
818
819
820
821
822
823 static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
824 unsigned long timeout_jiffies)
825 {
826 unsigned long deadline;
827 int timeout = 0, ret;
828
829 deadline = jiffies + timeout_jiffies;
830
831 while (!timeout) {
832 if (time_after_eq(jiffies, deadline))
833 timeout = 1;
834
835 ret = spi_nor_ready(nor);
836 if (ret < 0)
837 return ret;
838 if (ret)
839 return 0;
840
841 cond_resched();
842 }
843
844 dev_err(nor->dev, "flash operation timed out\n");
845
846 return -ETIMEDOUT;
847 }
848
849 static int spi_nor_wait_till_ready(struct spi_nor *nor)
850 {
851 return spi_nor_wait_till_ready_with_timeout(nor,
852 DEFAULT_READY_WAIT_JIFFIES);
853 }
854
855
856
857
858
859
860 static int erase_chip(struct spi_nor *nor)
861 {
862 dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
863
864 if (nor->spimem) {
865 struct spi_mem_op op =
866 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CHIP_ERASE, 1),
867 SPI_MEM_OP_NO_ADDR,
868 SPI_MEM_OP_NO_DUMMY,
869 SPI_MEM_OP_NO_DATA);
870
871 return spi_mem_exec_op(nor->spimem, &op);
872 }
873
874 return nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0);
875 }
876
877 static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
878 {
879 int ret = 0;
880
881 mutex_lock(&nor->lock);
882
883 if (nor->prepare) {
884 ret = nor->prepare(nor, ops);
885 if (ret) {
886 dev_err(nor->dev, "failed in the preparation.\n");
887 mutex_unlock(&nor->lock);
888 return ret;
889 }
890 }
891 return ret;
892 }
893
894 static void spi_nor_unlock_and_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
895 {
896 if (nor->unprepare)
897 nor->unprepare(nor, ops);
898 mutex_unlock(&nor->lock);
899 }
900
901
902
903
904
905
906
907
908
909
910 static u32 s3an_convert_addr(struct spi_nor *nor, u32 addr)
911 {
912 u32 offset, page;
913
914 offset = addr % nor->page_size;
915 page = addr / nor->page_size;
916 page <<= (nor->page_size > 512) ? 10 : 9;
917
918 return page | offset;
919 }
920
921 static u32 spi_nor_convert_addr(struct spi_nor *nor, loff_t addr)
922 {
923 if (!nor->params.convert_addr)
924 return addr;
925
926 return nor->params.convert_addr(nor, addr);
927 }
928
929
930
931
932 static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
933 {
934 int i;
935
936 addr = spi_nor_convert_addr(nor, addr);
937
938 if (nor->erase)
939 return nor->erase(nor, addr);
940
941 if (nor->spimem) {
942 struct spi_mem_op op =
943 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->erase_opcode, 1),
944 SPI_MEM_OP_ADDR(nor->addr_width, addr, 1),
945 SPI_MEM_OP_NO_DUMMY,
946 SPI_MEM_OP_NO_DATA);
947
948 return spi_mem_exec_op(nor->spimem, &op);
949 }
950
951
952
953
954
955 for (i = nor->addr_width - 1; i >= 0; i--) {
956 nor->bouncebuf[i] = addr & 0xff;
957 addr >>= 8;
958 }
959
960 return nor->write_reg(nor, nor->erase_opcode, nor->bouncebuf,
961 nor->addr_width);
962 }
963
964
965
966
967
968
969
970
971
972 static u64 spi_nor_div_by_erase_size(const struct spi_nor_erase_type *erase,
973 u64 dividend, u32 *remainder)
974 {
975
976 *remainder = (u32)dividend & erase->size_mask;
977 return dividend >> erase->size_shift;
978 }
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993 static const struct spi_nor_erase_type *
994 spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
995 const struct spi_nor_erase_region *region,
996 u64 addr, u32 len)
997 {
998 const struct spi_nor_erase_type *erase;
999 u32 rem;
1000 int i;
1001 u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
1002
1003
1004
1005
1006
1007 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
1008
1009 if (!(erase_mask & BIT(i)))
1010 continue;
1011
1012 erase = &map->erase_type[i];
1013
1014
1015 if (erase->size > len)
1016 continue;
1017
1018
1019 if (region->offset & SNOR_OVERLAID_REGION)
1020 return erase;
1021
1022 spi_nor_div_by_erase_size(erase, addr, &rem);
1023 if (rem)
1024 continue;
1025 else
1026 return erase;
1027 }
1028
1029 return NULL;
1030 }
1031
1032
1033
1034
1035
1036
1037
1038 static struct spi_nor_erase_region *
1039 spi_nor_region_next(struct spi_nor_erase_region *region)
1040 {
1041 if (spi_nor_region_is_last(region))
1042 return NULL;
1043 region++;
1044 return region;
1045 }
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056 static struct spi_nor_erase_region *
1057 spi_nor_find_erase_region(const struct spi_nor_erase_map *map, u64 addr)
1058 {
1059 struct spi_nor_erase_region *region = map->regions;
1060 u64 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
1061 u64 region_end = region_start + region->size;
1062
1063 while (addr < region_start || addr >= region_end) {
1064 region = spi_nor_region_next(region);
1065 if (!region)
1066 return ERR_PTR(-EINVAL);
1067
1068 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
1069 region_end = region_start + region->size;
1070 }
1071
1072 return region;
1073 }
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083 static struct spi_nor_erase_command *
1084 spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region,
1085 const struct spi_nor_erase_type *erase)
1086 {
1087 struct spi_nor_erase_command *cmd;
1088
1089 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
1090 if (!cmd)
1091 return ERR_PTR(-ENOMEM);
1092
1093 INIT_LIST_HEAD(&cmd->list);
1094 cmd->opcode = erase->opcode;
1095 cmd->count = 1;
1096
1097 if (region->offset & SNOR_OVERLAID_REGION)
1098 cmd->size = region->size;
1099 else
1100 cmd->size = erase->size;
1101
1102 return cmd;
1103 }
1104
1105
1106
1107
1108
1109 static void spi_nor_destroy_erase_cmd_list(struct list_head *erase_list)
1110 {
1111 struct spi_nor_erase_command *cmd, *next;
1112
1113 list_for_each_entry_safe(cmd, next, erase_list, list) {
1114 list_del(&cmd->list);
1115 kfree(cmd);
1116 }
1117 }
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132 static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
1133 struct list_head *erase_list,
1134 u64 addr, u32 len)
1135 {
1136 const struct spi_nor_erase_map *map = &nor->params.erase_map;
1137 const struct spi_nor_erase_type *erase, *prev_erase = NULL;
1138 struct spi_nor_erase_region *region;
1139 struct spi_nor_erase_command *cmd = NULL;
1140 u64 region_end;
1141 int ret = -EINVAL;
1142
1143 region = spi_nor_find_erase_region(map, addr);
1144 if (IS_ERR(region))
1145 return PTR_ERR(region);
1146
1147 region_end = spi_nor_region_end(region);
1148
1149 while (len) {
1150 erase = spi_nor_find_best_erase_type(map, region, addr, len);
1151 if (!erase)
1152 goto destroy_erase_cmd_list;
1153
1154 if (prev_erase != erase ||
1155 region->offset & SNOR_OVERLAID_REGION) {
1156 cmd = spi_nor_init_erase_cmd(region, erase);
1157 if (IS_ERR(cmd)) {
1158 ret = PTR_ERR(cmd);
1159 goto destroy_erase_cmd_list;
1160 }
1161
1162 list_add_tail(&cmd->list, erase_list);
1163 } else {
1164 cmd->count++;
1165 }
1166
1167 addr += cmd->size;
1168 len -= cmd->size;
1169
1170 if (len && addr >= region_end) {
1171 region = spi_nor_region_next(region);
1172 if (!region)
1173 goto destroy_erase_cmd_list;
1174 region_end = spi_nor_region_end(region);
1175 }
1176
1177 prev_erase = erase;
1178 }
1179
1180 return 0;
1181
1182 destroy_erase_cmd_list:
1183 spi_nor_destroy_erase_cmd_list(erase_list);
1184 return ret;
1185 }
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198 static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len)
1199 {
1200 LIST_HEAD(erase_list);
1201 struct spi_nor_erase_command *cmd, *next;
1202 int ret;
1203
1204 ret = spi_nor_init_erase_cmd_list(nor, &erase_list, addr, len);
1205 if (ret)
1206 return ret;
1207
1208 list_for_each_entry_safe(cmd, next, &erase_list, list) {
1209 nor->erase_opcode = cmd->opcode;
1210 while (cmd->count) {
1211 write_enable(nor);
1212
1213 ret = spi_nor_erase_sector(nor, addr);
1214 if (ret)
1215 goto destroy_erase_cmd_list;
1216
1217 addr += cmd->size;
1218 cmd->count--;
1219
1220 ret = spi_nor_wait_till_ready(nor);
1221 if (ret)
1222 goto destroy_erase_cmd_list;
1223 }
1224 list_del(&cmd->list);
1225 kfree(cmd);
1226 }
1227
1228 return 0;
1229
1230 destroy_erase_cmd_list:
1231 spi_nor_destroy_erase_cmd_list(&erase_list);
1232 return ret;
1233 }
1234
1235
1236
1237
1238
1239 static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
1240 {
1241 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1242 u32 addr, len;
1243 uint32_t rem;
1244 int ret;
1245
1246 dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
1247 (long long)instr->len);
1248
1249 if (spi_nor_has_uniform_erase(nor)) {
1250 div_u64_rem(instr->len, mtd->erasesize, &rem);
1251 if (rem)
1252 return -EINVAL;
1253 }
1254
1255 addr = instr->addr;
1256 len = instr->len;
1257
1258 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_ERASE);
1259 if (ret)
1260 return ret;
1261
1262
1263 if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
1264 unsigned long timeout;
1265
1266 write_enable(nor);
1267
1268 if (erase_chip(nor)) {
1269 ret = -EIO;
1270 goto erase_err;
1271 }
1272
1273
1274
1275
1276
1277
1278
1279 timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
1280 CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
1281 (unsigned long)(mtd->size / SZ_2M));
1282 ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
1283 if (ret)
1284 goto erase_err;
1285
1286
1287
1288
1289
1290
1291
1292 } else if (spi_nor_has_uniform_erase(nor)) {
1293 while (len) {
1294 write_enable(nor);
1295
1296 ret = spi_nor_erase_sector(nor, addr);
1297 if (ret)
1298 goto erase_err;
1299
1300 addr += mtd->erasesize;
1301 len -= mtd->erasesize;
1302
1303 ret = spi_nor_wait_till_ready(nor);
1304 if (ret)
1305 goto erase_err;
1306 }
1307
1308
1309 } else {
1310 ret = spi_nor_erase_multi_sectors(nor, addr, len);
1311 if (ret)
1312 goto erase_err;
1313 }
1314
1315 write_disable(nor);
1316
1317 erase_err:
1318 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
1319
1320 return ret;
1321 }
1322
1323
1324 static int write_sr_and_check(struct spi_nor *nor, u8 status_new, u8 mask)
1325 {
1326 int ret;
1327
1328 write_enable(nor);
1329 ret = write_sr(nor, status_new);
1330 if (ret)
1331 return ret;
1332
1333 ret = spi_nor_wait_till_ready(nor);
1334 if (ret)
1335 return ret;
1336
1337 ret = read_sr(nor);
1338 if (ret < 0)
1339 return ret;
1340
1341 return ((ret & mask) != (status_new & mask)) ? -EIO : 0;
1342 }
1343
1344 static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
1345 uint64_t *len)
1346 {
1347 struct mtd_info *mtd = &nor->mtd;
1348 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1349 int shift = ffs(mask) - 1;
1350 int pow;
1351
1352 if (!(sr & mask)) {
1353
1354 *ofs = 0;
1355 *len = 0;
1356 } else {
1357 pow = ((sr & mask) ^ mask) >> shift;
1358 *len = mtd->size >> pow;
1359 if (nor->flags & SNOR_F_HAS_SR_TB && sr & SR_TB)
1360 *ofs = 0;
1361 else
1362 *ofs = mtd->size - *len;
1363 }
1364 }
1365
1366
1367
1368
1369
1370 static int stm_check_lock_status_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
1371 u8 sr, bool locked)
1372 {
1373 loff_t lock_offs;
1374 uint64_t lock_len;
1375
1376 if (!len)
1377 return 1;
1378
1379 stm_get_locked_range(nor, sr, &lock_offs, &lock_len);
1380
1381 if (locked)
1382
1383 return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
1384 else
1385
1386 return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs);
1387 }
1388
1389 static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
1390 u8 sr)
1391 {
1392 return stm_check_lock_status_sr(nor, ofs, len, sr, true);
1393 }
1394
1395 static int stm_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
1396 u8 sr)
1397 {
1398 return stm_check_lock_status_sr(nor, ofs, len, sr, false);
1399 }
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433 static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
1434 {
1435 struct mtd_info *mtd = &nor->mtd;
1436 int status_old, status_new;
1437 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1438 u8 shift = ffs(mask) - 1, pow, val;
1439 loff_t lock_len;
1440 bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
1441 bool use_top;
1442
1443 status_old = read_sr(nor);
1444 if (status_old < 0)
1445 return status_old;
1446
1447
1448 if (stm_is_locked_sr(nor, ofs, len, status_old))
1449 return 0;
1450
1451
1452 if (!stm_is_locked_sr(nor, 0, ofs, status_old))
1453 can_be_bottom = false;
1454
1455
1456 if (!stm_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
1457 status_old))
1458 can_be_top = false;
1459
1460 if (!can_be_bottom && !can_be_top)
1461 return -EINVAL;
1462
1463
1464 use_top = can_be_top;
1465
1466
1467 if (use_top)
1468 lock_len = mtd->size - ofs;
1469 else
1470 lock_len = ofs + len;
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481 pow = ilog2(mtd->size) - ilog2(lock_len);
1482 val = mask - (pow << shift);
1483 if (val & ~mask)
1484 return -EINVAL;
1485
1486 if (!(val & mask))
1487 return -EINVAL;
1488
1489 status_new = (status_old & ~mask & ~SR_TB) | val;
1490
1491
1492 status_new |= SR_SRWD;
1493
1494 if (!use_top)
1495 status_new |= SR_TB;
1496
1497
1498 if (status_new == status_old)
1499 return 0;
1500
1501
1502 if ((status_new & mask) < (status_old & mask))
1503 return -EINVAL;
1504
1505 return write_sr_and_check(nor, status_new, mask);
1506 }
1507
1508
1509
1510
1511
1512
1513 static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
1514 {
1515 struct mtd_info *mtd = &nor->mtd;
1516 int status_old, status_new;
1517 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1518 u8 shift = ffs(mask) - 1, pow, val;
1519 loff_t lock_len;
1520 bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
1521 bool use_top;
1522
1523 status_old = read_sr(nor);
1524 if (status_old < 0)
1525 return status_old;
1526
1527
1528 if (stm_is_unlocked_sr(nor, ofs, len, status_old))
1529 return 0;
1530
1531
1532 if (!stm_is_unlocked_sr(nor, 0, ofs, status_old))
1533 can_be_top = false;
1534
1535
1536 if (!stm_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
1537 status_old))
1538 can_be_bottom = false;
1539
1540 if (!can_be_bottom && !can_be_top)
1541 return -EINVAL;
1542
1543
1544 use_top = can_be_top;
1545
1546
1547 if (use_top)
1548 lock_len = mtd->size - (ofs + len);
1549 else
1550 lock_len = ofs;
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561 pow = ilog2(mtd->size) - order_base_2(lock_len);
1562 if (lock_len == 0) {
1563 val = 0;
1564 } else {
1565 val = mask - (pow << shift);
1566
1567 if (val & ~mask)
1568 return -EINVAL;
1569 }
1570
1571 status_new = (status_old & ~mask & ~SR_TB) | val;
1572
1573
1574 if (lock_len == 0)
1575 status_new &= ~SR_SRWD;
1576
1577 if (!use_top)
1578 status_new |= SR_TB;
1579
1580
1581 if (status_new == status_old)
1582 return 0;
1583
1584
1585 if ((status_new & mask) > (status_old & mask))
1586 return -EINVAL;
1587
1588 return write_sr_and_check(nor, status_new, mask);
1589 }
1590
1591
1592
1593
1594
1595
1596
1597
1598 static int stm_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
1599 {
1600 int status;
1601
1602 status = read_sr(nor);
1603 if (status < 0)
1604 return status;
1605
1606 return stm_is_locked_sr(nor, ofs, len, status);
1607 }
1608
1609 static const struct spi_nor_locking_ops stm_locking_ops = {
1610 .lock = stm_lock,
1611 .unlock = stm_unlock,
1612 .is_locked = stm_is_locked,
1613 };
1614
1615 static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1616 {
1617 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1618 int ret;
1619
1620 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_LOCK);
1621 if (ret)
1622 return ret;
1623
1624 ret = nor->params.locking_ops->lock(nor, ofs, len);
1625
1626 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_UNLOCK);
1627 return ret;
1628 }
1629
1630 static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1631 {
1632 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1633 int ret;
1634
1635 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
1636 if (ret)
1637 return ret;
1638
1639 ret = nor->params.locking_ops->unlock(nor, ofs, len);
1640
1641 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
1642 return ret;
1643 }
1644
1645 static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1646 {
1647 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1648 int ret;
1649
1650 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
1651 if (ret)
1652 return ret;
1653
1654 ret = nor->params.locking_ops->is_locked(nor, ofs, len);
1655
1656 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
1657 return ret;
1658 }
1659
1660
1661
1662
1663
1664
1665
1666 static int write_sr_cr(struct spi_nor *nor, u8 *sr_cr)
1667 {
1668 int ret;
1669
1670 write_enable(nor);
1671
1672 if (nor->spimem) {
1673 struct spi_mem_op op =
1674 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1),
1675 SPI_MEM_OP_NO_ADDR,
1676 SPI_MEM_OP_NO_DUMMY,
1677 SPI_MEM_OP_DATA_OUT(2, sr_cr, 1));
1678
1679 ret = spi_mem_exec_op(nor->spimem, &op);
1680 } else {
1681 ret = nor->write_reg(nor, SPINOR_OP_WRSR, sr_cr, 2);
1682 }
1683
1684 if (ret < 0) {
1685 dev_err(nor->dev,
1686 "error while writing configuration register\n");
1687 return -EINVAL;
1688 }
1689
1690 ret = spi_nor_wait_till_ready(nor);
1691 if (ret) {
1692 dev_err(nor->dev,
1693 "timeout while writing configuration register\n");
1694 return ret;
1695 }
1696
1697 return 0;
1698 }
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710 static int macronix_quad_enable(struct spi_nor *nor)
1711 {
1712 int ret, val;
1713
1714 val = read_sr(nor);
1715 if (val < 0)
1716 return val;
1717 if (val & SR_QUAD_EN_MX)
1718 return 0;
1719
1720 write_enable(nor);
1721
1722 write_sr(nor, val | SR_QUAD_EN_MX);
1723
1724 ret = spi_nor_wait_till_ready(nor);
1725 if (ret)
1726 return ret;
1727
1728 ret = read_sr(nor);
1729 if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) {
1730 dev_err(nor->dev, "Macronix Quad bit not set\n");
1731 return -EINVAL;
1732 }
1733
1734 return 0;
1735 }
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760 static int spansion_quad_enable(struct spi_nor *nor)
1761 {
1762 u8 *sr_cr = nor->bouncebuf;
1763 int ret;
1764
1765 sr_cr[0] = 0;
1766 sr_cr[1] = CR_QUAD_EN_SPAN;
1767 ret = write_sr_cr(nor, sr_cr);
1768 if (ret)
1769 return ret;
1770
1771
1772 ret = read_cr(nor);
1773 if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
1774 dev_err(nor->dev, "Spansion Quad bit not set\n");
1775 return -EINVAL;
1776 }
1777
1778 return 0;
1779 }
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794 static int spansion_no_read_cr_quad_enable(struct spi_nor *nor)
1795 {
1796 u8 *sr_cr = nor->bouncebuf;
1797 int ret;
1798
1799
1800 ret = read_sr(nor);
1801 if (ret < 0) {
1802 dev_err(nor->dev, "error while reading status register\n");
1803 return -EINVAL;
1804 }
1805 sr_cr[0] = ret;
1806 sr_cr[1] = CR_QUAD_EN_SPAN;
1807
1808 return write_sr_cr(nor, sr_cr);
1809 }
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824 static int spansion_read_cr_quad_enable(struct spi_nor *nor)
1825 {
1826 struct device *dev = nor->dev;
1827 u8 *sr_cr = nor->bouncebuf;
1828 int ret;
1829
1830
1831 ret = read_cr(nor);
1832 if (ret < 0) {
1833 dev_err(dev, "error while reading configuration register\n");
1834 return -EINVAL;
1835 }
1836
1837 if (ret & CR_QUAD_EN_SPAN)
1838 return 0;
1839
1840 sr_cr[1] = ret | CR_QUAD_EN_SPAN;
1841
1842
1843 ret = read_sr(nor);
1844 if (ret < 0) {
1845 dev_err(dev, "error while reading status register\n");
1846 return -EINVAL;
1847 }
1848 sr_cr[0] = ret;
1849
1850 ret = write_sr_cr(nor, sr_cr);
1851 if (ret)
1852 return ret;
1853
1854
1855 ret = read_cr(nor);
1856 if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
1857 dev_err(nor->dev, "Spansion Quad bit not set\n");
1858 return -EINVAL;
1859 }
1860
1861 return 0;
1862 }
1863
1864 static int spi_nor_write_sr2(struct spi_nor *nor, u8 *sr2)
1865 {
1866 if (nor->spimem) {
1867 struct spi_mem_op op =
1868 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2, 1),
1869 SPI_MEM_OP_NO_ADDR,
1870 SPI_MEM_OP_NO_DUMMY,
1871 SPI_MEM_OP_DATA_OUT(1, sr2, 1));
1872
1873 return spi_mem_exec_op(nor->spimem, &op);
1874 }
1875
1876 return nor->write_reg(nor, SPINOR_OP_WRSR2, sr2, 1);
1877 }
1878
1879 static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
1880 {
1881 if (nor->spimem) {
1882 struct spi_mem_op op =
1883 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2, 1),
1884 SPI_MEM_OP_NO_ADDR,
1885 SPI_MEM_OP_NO_DUMMY,
1886 SPI_MEM_OP_DATA_IN(1, sr2, 1));
1887
1888 return spi_mem_exec_op(nor->spimem, &op);
1889 }
1890
1891 return nor->read_reg(nor, SPINOR_OP_RDSR2, sr2, 1);
1892 }
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906 static int sr2_bit7_quad_enable(struct spi_nor *nor)
1907 {
1908 u8 *sr2 = nor->bouncebuf;
1909 int ret;
1910
1911
1912 ret = spi_nor_read_sr2(nor, sr2);
1913 if (ret)
1914 return ret;
1915 if (*sr2 & SR2_QUAD_EN_BIT7)
1916 return 0;
1917
1918
1919 *sr2 |= SR2_QUAD_EN_BIT7;
1920
1921 write_enable(nor);
1922
1923 ret = spi_nor_write_sr2(nor, sr2);
1924 if (ret < 0) {
1925 dev_err(nor->dev, "error while writing status register 2\n");
1926 return -EINVAL;
1927 }
1928
1929 ret = spi_nor_wait_till_ready(nor);
1930 if (ret < 0) {
1931 dev_err(nor->dev, "timeout while writing status register 2\n");
1932 return ret;
1933 }
1934
1935
1936 ret = spi_nor_read_sr2(nor, sr2);
1937 if (!(ret > 0 && (*sr2 & SR2_QUAD_EN_BIT7))) {
1938 dev_err(nor->dev, "SR2 Quad bit not set\n");
1939 return -EINVAL;
1940 }
1941
1942 return 0;
1943 }
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954 static int spi_nor_clear_sr_bp(struct spi_nor *nor)
1955 {
1956 int ret;
1957 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1958
1959 ret = read_sr(nor);
1960 if (ret < 0) {
1961 dev_err(nor->dev, "error while reading status register\n");
1962 return ret;
1963 }
1964
1965 write_enable(nor);
1966
1967 ret = write_sr(nor, ret & ~mask);
1968 if (ret) {
1969 dev_err(nor->dev, "write to status register failed\n");
1970 return ret;
1971 }
1972
1973 ret = spi_nor_wait_till_ready(nor);
1974 if (ret)
1975 dev_err(nor->dev, "timeout while writing status register\n");
1976 return ret;
1977 }
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992 static int spi_nor_spansion_clear_sr_bp(struct spi_nor *nor)
1993 {
1994 int ret;
1995 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1996 u8 *sr_cr = nor->bouncebuf;
1997
1998
1999 ret = read_cr(nor);
2000 if (ret < 0) {
2001 dev_err(nor->dev,
2002 "error while reading configuration register\n");
2003 return ret;
2004 }
2005
2006
2007
2008
2009
2010 if (ret & CR_QUAD_EN_SPAN) {
2011 sr_cr[1] = ret;
2012
2013 ret = read_sr(nor);
2014 if (ret < 0) {
2015 dev_err(nor->dev,
2016 "error while reading status register\n");
2017 return ret;
2018 }
2019 sr_cr[0] = ret & ~mask;
2020
2021 ret = write_sr_cr(nor, sr_cr);
2022 if (ret)
2023 dev_err(nor->dev, "16-bit write register failed\n");
2024 return ret;
2025 }
2026
2027
2028
2029
2030
2031 return spi_nor_clear_sr_bp(nor);
2032 }
2033
2034
2035 #define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
2036 .id = { \
2037 ((_jedec_id) >> 16) & 0xff, \
2038 ((_jedec_id) >> 8) & 0xff, \
2039 (_jedec_id) & 0xff, \
2040 ((_ext_id) >> 8) & 0xff, \
2041 (_ext_id) & 0xff, \
2042 }, \
2043 .id_len = (!(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))), \
2044 .sector_size = (_sector_size), \
2045 .n_sectors = (_n_sectors), \
2046 .page_size = 256, \
2047 .flags = (_flags),
2048
2049 #define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
2050 .id = { \
2051 ((_jedec_id) >> 16) & 0xff, \
2052 ((_jedec_id) >> 8) & 0xff, \
2053 (_jedec_id) & 0xff, \
2054 ((_ext_id) >> 16) & 0xff, \
2055 ((_ext_id) >> 8) & 0xff, \
2056 (_ext_id) & 0xff, \
2057 }, \
2058 .id_len = 6, \
2059 .sector_size = (_sector_size), \
2060 .n_sectors = (_n_sectors), \
2061 .page_size = 256, \
2062 .flags = (_flags),
2063
2064 #define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags) \
2065 .sector_size = (_sector_size), \
2066 .n_sectors = (_n_sectors), \
2067 .page_size = (_page_size), \
2068 .addr_width = (_addr_width), \
2069 .flags = (_flags),
2070
2071 #define S3AN_INFO(_jedec_id, _n_sectors, _page_size) \
2072 .id = { \
2073 ((_jedec_id) >> 16) & 0xff, \
2074 ((_jedec_id) >> 8) & 0xff, \
2075 (_jedec_id) & 0xff \
2076 }, \
2077 .id_len = 3, \
2078 .sector_size = (8*_page_size), \
2079 .n_sectors = (_n_sectors), \
2080 .page_size = _page_size, \
2081 .addr_width = 3, \
2082 .flags = SPI_NOR_NO_FR | SPI_S3AN,
2083
2084 static int
2085 is25lp256_post_bfpt_fixups(struct spi_nor *nor,
2086 const struct sfdp_parameter_header *bfpt_header,
2087 const struct sfdp_bfpt *bfpt,
2088 struct spi_nor_flash_parameter *params)
2089 {
2090
2091
2092
2093
2094
2095 if ((bfpt->dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) ==
2096 BFPT_DWORD1_ADDRESS_BYTES_3_ONLY)
2097 nor->addr_width = 4;
2098
2099 return 0;
2100 }
2101
2102 static struct spi_nor_fixups is25lp256_fixups = {
2103 .post_bfpt = is25lp256_post_bfpt_fixups,
2104 };
2105
2106 static int
2107 mx25l25635_post_bfpt_fixups(struct spi_nor *nor,
2108 const struct sfdp_parameter_header *bfpt_header,
2109 const struct sfdp_bfpt *bfpt,
2110 struct spi_nor_flash_parameter *params)
2111 {
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121 if (bfpt->dwords[BFPT_DWORD(5)] & BFPT_DWORD5_FAST_READ_4_4_4)
2122 nor->flags |= SNOR_F_4B_OPCODES;
2123
2124 return 0;
2125 }
2126
2127 static struct spi_nor_fixups mx25l25635_fixups = {
2128 .post_bfpt = mx25l25635_post_bfpt_fixups,
2129 };
2130
2131 static void gd25q256_default_init(struct spi_nor *nor)
2132 {
2133
2134
2135
2136
2137
2138
2139 nor->params.quad_enable = macronix_quad_enable;
2140 }
2141
2142 static struct spi_nor_fixups gd25q256_fixups = {
2143 .default_init = gd25q256_default_init,
2144 };
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157 static const struct flash_info spi_nor_ids[] = {
2158
2159 { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) },
2160 { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
2161
2162 { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) },
2163 { "at25df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
2164 { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
2165 { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
2166
2167 { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
2168 { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
2169 { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
2170 { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
2171
2172 { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
2173
2174
2175 { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
2176 { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
2177 { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
2178 { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
2179 { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
2180 { "en25q80a", INFO(0x1c3014, 0, 64 * 1024, 16,
2181 SECT_4K | SPI_NOR_DUAL_READ) },
2182 { "en25qh32", INFO(0x1c7016, 0, 64 * 1024, 64, 0) },
2183 { "en25qh64", INFO(0x1c7017, 0, 64 * 1024, 128,
2184 SECT_4K | SPI_NOR_DUAL_READ) },
2185 { "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) },
2186 { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
2187 { "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, SECT_4K) },
2188
2189
2190 { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) },
2191 { "f25l32qa", INFO(0x8c4116, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) },
2192 { "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_HAS_LOCK) },
2193
2194
2195 { "mr25h128", CAT25_INFO( 16 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2196 { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2197 { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2198 { "mr25h40", CAT25_INFO(512 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2199
2200
2201 { "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) },
2202
2203
2204 {
2205 "gd25q16", INFO(0xc84015, 0, 64 * 1024, 32,
2206 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2207 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2208 },
2209 {
2210 "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64,
2211 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2212 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2213 },
2214 {
2215 "gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64,
2216 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2217 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2218 },
2219 {
2220 "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128,
2221 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2222 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2223 },
2224 {
2225 "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128,
2226 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2227 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2228 },
2229 {
2230 "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256,
2231 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2232 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2233 },
2234 {
2235 "gd25q256", INFO(0xc84019, 0, 64 * 1024, 512,
2236 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2237 SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2238 .fixups = &gd25q256_fixups,
2239 },
2240
2241
2242 { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
2243 { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
2244 { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
2245
2246
2247 { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
2248 { "is25lq040b", INFO(0x9d4013, 0, 64 * 1024, 8,
2249 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2250 { "is25lp016d", INFO(0x9d6015, 0, 64 * 1024, 32,
2251 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2252 { "is25lp080d", INFO(0x9d6014, 0, 64 * 1024, 16,
2253 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2254 { "is25lp032", INFO(0x9d6016, 0, 64 * 1024, 64,
2255 SECT_4K | SPI_NOR_DUAL_READ) },
2256 { "is25lp064", INFO(0x9d6017, 0, 64 * 1024, 128,
2257 SECT_4K | SPI_NOR_DUAL_READ) },
2258 { "is25lp128", INFO(0x9d6018, 0, 64 * 1024, 256,
2259 SECT_4K | SPI_NOR_DUAL_READ) },
2260 { "is25lp256", INFO(0x9d6019, 0, 64 * 1024, 512,
2261 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2262 SPI_NOR_4B_OPCODES)
2263 .fixups = &is25lp256_fixups },
2264 { "is25wp032", INFO(0x9d7016, 0, 64 * 1024, 64,
2265 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2266 { "is25wp064", INFO(0x9d7017, 0, 64 * 1024, 128,
2267 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2268 { "is25wp128", INFO(0x9d7018, 0, 64 * 1024, 256,
2269 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2270
2271
2272 { "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) },
2273 { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
2274 { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
2275 { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
2276 { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
2277 { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, SECT_4K) },
2278 { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
2279 { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) },
2280 { "mx25u2033e", INFO(0xc22532, 0, 64 * 1024, 4, SECT_4K) },
2281 { "mx25u3235f", INFO(0xc22536, 0, 64 * 1024, 64,
2282 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2283 { "mx25u4035", INFO(0xc22533, 0, 64 * 1024, 8, SECT_4K) },
2284 { "mx25u8035", INFO(0xc22534, 0, 64 * 1024, 16, SECT_4K) },
2285 { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
2286 { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
2287 { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
2288 { "mx25u12835f", INFO(0xc22538, 0, 64 * 1024, 256,
2289 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2290 { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512,
2291 SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
2292 .fixups = &mx25l25635_fixups },
2293 { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) },
2294 { "mx25v8035f", INFO(0xc22314, 0, 64 * 1024, 16,
2295 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2296 { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
2297 { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
2298 { "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
2299 { "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2300 { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
2301
2302
2303 { "n25q016a", INFO(0x20bb15, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_QUAD_READ) },
2304 { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
2305 { "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
2306 { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
2307 { "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
2308 { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
2309 { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
2310 { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2311 { "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) },
2312 { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
2313 { "mt25qu512a", INFO6(0x20bb20, 0x104400, 64 * 1024, 1024,
2314 SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
2315 SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
2316 { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K |
2317 SPI_NOR_QUAD_READ) },
2318 { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
2319 { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
2320 { "mt25ql02g", INFO(0x20ba22, 0, 64 * 1024, 4096,
2321 SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
2322 NO_CHIP_ERASE) },
2323 { "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
2324
2325
2326 {
2327 "mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512,
2328 SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
2329 SPI_NOR_4B_OPCODES)
2330 },
2331 { "mt35xu02g", INFO(0x2c5b1c, 0, 128 * 1024, 2048,
2332 SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
2333 SPI_NOR_4B_OPCODES) },
2334
2335
2336 { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
2337 { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
2338 { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) },
2339
2340
2341
2342
2343 { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2344 { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2345 { "s25fl128s0", INFO6(0x012018, 0x4d0080, 256 * 1024, 64,
2346 SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
2347 { "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256,
2348 SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
2349 { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, USE_CLSR) },
2350 { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
2351 { "s25fl512s", INFO6(0x010220, 0x4d0080, 256 * 1024, 256,
2352 SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2353 SPI_NOR_HAS_LOCK | USE_CLSR) },
2354 { "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
2355 { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
2356 { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
2357 { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
2358 { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
2359 { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
2360 { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
2361 { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
2362 { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
2363 { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
2364 { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
2365 { "s25fl004k", INFO(0xef4013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2366 { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2367 { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2368 { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
2369 { "s25fl116k", INFO(0x014015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2370 { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, SECT_4K) },
2371 { "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128, SECT_4K) },
2372 { "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ) },
2373 { "s25fl208k", INFO(0x014014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ) },
2374 { "s25fl064l", INFO(0x016017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
2375 { "s25fl128l", INFO(0x016018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
2376 { "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
2377
2378
2379 { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
2380 { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
2381 { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) },
2382 { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) },
2383 { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) },
2384 { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K | SST_WRITE) },
2385 { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K | SST_WRITE) },
2386 { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K | SST_WRITE) },
2387 { "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4, SECT_4K) },
2388 { "sst25wf040b", INFO(0x621613, 0, 64 * 1024, 8, SECT_4K) },
2389 { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
2390 { "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
2391 { "sst26wf016b", INFO(0xbf2651, 0, 64 * 1024, 32, SECT_4K |
2392 SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2393 { "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2394
2395
2396 { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
2397 { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) },
2398 { "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) },
2399 { "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) },
2400 { "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) },
2401 { "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) },
2402 { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
2403 { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
2404 { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
2405
2406 { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) },
2407 { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) },
2408 { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4, 0) },
2409 { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8, 0) },
2410 { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16, 0) },
2411 { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32, 0) },
2412 { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64, 0) },
2413 { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128, 0) },
2414 { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64, 0) },
2415
2416 { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) },
2417 { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) },
2418 { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) },
2419
2420 { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4, 0) },
2421 { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
2422 { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
2423
2424 { "m25px16", INFO(0x207115, 0, 64 * 1024, 32, SECT_4K) },
2425 { "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) },
2426 { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) },
2427 { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) },
2428 { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
2429 { "m25px80", INFO(0x207114, 0, 64 * 1024, 16, 0) },
2430
2431
2432 { "w25x05", INFO(0xef3010, 0, 64 * 1024, 1, SECT_4K) },
2433 { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
2434 { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
2435 { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
2436 { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
2437 { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
2438 {
2439 "w25q16dw", INFO(0xef6015, 0, 64 * 1024, 32,
2440 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2441 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2442 },
2443 { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
2444 {
2445 "w25q16jv-im/jm", INFO(0xef7015, 0, 64 * 1024, 32,
2446 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2447 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2448 },
2449 { "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4, SECT_4K) },
2450 { "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4, SECT_4K) },
2451 { "w25q20ew", INFO(0xef6012, 0, 64 * 1024, 4, SECT_4K) },
2452 { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
2453 {
2454 "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64,
2455 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2456 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2457 },
2458 {
2459 "w25q32jv", INFO(0xef7016, 0, 64 * 1024, 64,
2460 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2461 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2462 },
2463 { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
2464 { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
2465 {
2466 "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128,
2467 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2468 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2469 },
2470 {
2471 "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256,
2472 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2473 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2474 },
2475 {
2476 "w25q128jv", INFO(0xef7018, 0, 64 * 1024, 256,
2477 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2478 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2479 },
2480 { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
2481 { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
2482 { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
2483 { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2484 { "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512,
2485 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2486 { "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024,
2487 SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_DUAL_READ) },
2488
2489
2490 { "cat25c11", CAT25_INFO( 16, 8, 16, 1, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2491 { "cat25c03", CAT25_INFO( 32, 8, 16, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2492 { "cat25c09", CAT25_INFO( 128, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2493 { "cat25c17", CAT25_INFO( 256, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2494 { "cat25128", CAT25_INFO(2048, 8, 64, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2495
2496
2497 { "3S50AN", S3AN_INFO(0x1f2200, 64, 264) },
2498 { "3S200AN", S3AN_INFO(0x1f2400, 256, 264) },
2499 { "3S400AN", S3AN_INFO(0x1f2400, 256, 264) },
2500 { "3S700AN", S3AN_INFO(0x1f2500, 512, 264) },
2501 { "3S1400AN", S3AN_INFO(0x1f2600, 512, 528) },
2502
2503
2504 { "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2505 { "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2506 { },
2507 };
2508
2509 static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
2510 {
2511 int tmp;
2512 u8 *id = nor->bouncebuf;
2513 const struct flash_info *info;
2514
2515 if (nor->spimem) {
2516 struct spi_mem_op op =
2517 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1),
2518 SPI_MEM_OP_NO_ADDR,
2519 SPI_MEM_OP_NO_DUMMY,
2520 SPI_MEM_OP_DATA_IN(SPI_NOR_MAX_ID_LEN, id, 1));
2521
2522 tmp = spi_mem_exec_op(nor->spimem, &op);
2523 } else {
2524 tmp = nor->read_reg(nor, SPINOR_OP_RDID, id,
2525 SPI_NOR_MAX_ID_LEN);
2526 }
2527 if (tmp < 0) {
2528 dev_err(nor->dev, "error %d reading JEDEC ID\n", tmp);
2529 return ERR_PTR(tmp);
2530 }
2531
2532 for (tmp = 0; tmp < ARRAY_SIZE(spi_nor_ids) - 1; tmp++) {
2533 info = &spi_nor_ids[tmp];
2534 if (info->id_len) {
2535 if (!memcmp(info->id, id, info->id_len))
2536 return &spi_nor_ids[tmp];
2537 }
2538 }
2539 dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n",
2540 SPI_NOR_MAX_ID_LEN, id);
2541 return ERR_PTR(-ENODEV);
2542 }
2543
2544 static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
2545 size_t *retlen, u_char *buf)
2546 {
2547 struct spi_nor *nor = mtd_to_spi_nor(mtd);
2548 ssize_t ret;
2549
2550 dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
2551
2552 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_READ);
2553 if (ret)
2554 return ret;
2555
2556 while (len) {
2557 loff_t addr = from;
2558
2559 addr = spi_nor_convert_addr(nor, addr);
2560
2561 ret = spi_nor_read_data(nor, addr, len, buf);
2562 if (ret == 0) {
2563
2564 ret = -EIO;
2565 goto read_err;
2566 }
2567 if (ret < 0)
2568 goto read_err;
2569
2570 WARN_ON(ret > len);
2571 *retlen += ret;
2572 buf += ret;
2573 from += ret;
2574 len -= ret;
2575 }
2576 ret = 0;
2577
2578 read_err:
2579 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ);
2580 return ret;
2581 }
2582
2583 static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
2584 size_t *retlen, const u_char *buf)
2585 {
2586 struct spi_nor *nor = mtd_to_spi_nor(mtd);
2587 size_t actual;
2588 int ret;
2589
2590 dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
2591
2592 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
2593 if (ret)
2594 return ret;
2595
2596 write_enable(nor);
2597
2598 nor->sst_write_second = false;
2599
2600 actual = to % 2;
2601
2602 if (actual) {
2603 nor->program_opcode = SPINOR_OP_BP;
2604
2605
2606 ret = spi_nor_write_data(nor, to, 1, buf);
2607 if (ret < 0)
2608 goto sst_write_err;
2609 WARN(ret != 1, "While writing 1 byte written %i bytes\n",
2610 (int)ret);
2611 ret = spi_nor_wait_till_ready(nor);
2612 if (ret)
2613 goto sst_write_err;
2614 }
2615 to += actual;
2616
2617
2618 for (; actual < len - 1; actual += 2) {
2619 nor->program_opcode = SPINOR_OP_AAI_WP;
2620
2621
2622 ret = spi_nor_write_data(nor, to, 2, buf + actual);
2623 if (ret < 0)
2624 goto sst_write_err;
2625 WARN(ret != 2, "While writing 2 bytes written %i bytes\n",
2626 (int)ret);
2627 ret = spi_nor_wait_till_ready(nor);
2628 if (ret)
2629 goto sst_write_err;
2630 to += 2;
2631 nor->sst_write_second = true;
2632 }
2633 nor->sst_write_second = false;
2634
2635 write_disable(nor);
2636 ret = spi_nor_wait_till_ready(nor);
2637 if (ret)
2638 goto sst_write_err;
2639
2640
2641 if (actual != len) {
2642 write_enable(nor);
2643
2644 nor->program_opcode = SPINOR_OP_BP;
2645 ret = spi_nor_write_data(nor, to, 1, buf + actual);
2646 if (ret < 0)
2647 goto sst_write_err;
2648 WARN(ret != 1, "While writing 1 byte written %i bytes\n",
2649 (int)ret);
2650 ret = spi_nor_wait_till_ready(nor);
2651 if (ret)
2652 goto sst_write_err;
2653 write_disable(nor);
2654 actual += 1;
2655 }
2656 sst_write_err:
2657 *retlen += actual;
2658 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
2659 return ret;
2660 }
2661
2662
2663
2664
2665
2666
2667 static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
2668 size_t *retlen, const u_char *buf)
2669 {
2670 struct spi_nor *nor = mtd_to_spi_nor(mtd);
2671 size_t page_offset, page_remain, i;
2672 ssize_t ret;
2673
2674 dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
2675
2676 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
2677 if (ret)
2678 return ret;
2679
2680 for (i = 0; i < len; ) {
2681 ssize_t written;
2682 loff_t addr = to + i;
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692 if (hweight32(nor->page_size) == 1) {
2693 page_offset = addr & (nor->page_size - 1);
2694 } else {
2695 uint64_t aux = addr;
2696
2697 page_offset = do_div(aux, nor->page_size);
2698 }
2699
2700 page_remain = min_t(size_t,
2701 nor->page_size - page_offset, len - i);
2702
2703 addr = spi_nor_convert_addr(nor, addr);
2704
2705 write_enable(nor);
2706 ret = spi_nor_write_data(nor, addr, page_remain, buf + i);
2707 if (ret < 0)
2708 goto write_err;
2709 written = ret;
2710
2711 ret = spi_nor_wait_till_ready(nor);
2712 if (ret)
2713 goto write_err;
2714 *retlen += written;
2715 i += written;
2716 }
2717
2718 write_err:
2719 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
2720 return ret;
2721 }
2722
2723 static int spi_nor_check(struct spi_nor *nor)
2724 {
2725 if (!nor->dev ||
2726 (!nor->spimem &&
2727 (!nor->read || !nor->write || !nor->read_reg ||
2728 !nor->write_reg))) {
2729 pr_err("spi-nor: please fill all the necessary fields!\n");
2730 return -EINVAL;
2731 }
2732
2733 return 0;
2734 }
2735
2736 static int s3an_nor_setup(struct spi_nor *nor,
2737 const struct spi_nor_hwcaps *hwcaps)
2738 {
2739 int ret;
2740
2741 ret = spi_nor_xread_sr(nor, nor->bouncebuf);
2742 if (ret < 0) {
2743 dev_err(nor->dev, "error %d reading XRDSR\n", (int) ret);
2744 return ret;
2745 }
2746
2747 nor->erase_opcode = SPINOR_OP_XSE;
2748 nor->program_opcode = SPINOR_OP_XPP;
2749 nor->read_opcode = SPINOR_OP_READ;
2750 nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763 if (nor->bouncebuf[0] & XSR_PAGESIZE) {
2764
2765 nor->page_size = (nor->page_size == 264) ? 256 : 512;
2766 nor->mtd.writebufsize = nor->page_size;
2767 nor->mtd.size = 8 * nor->page_size * nor->info->n_sectors;
2768 nor->mtd.erasesize = 8 * nor->page_size;
2769 } else {
2770
2771 nor->params.convert_addr = s3an_convert_addr;
2772 nor->mtd.erasesize = nor->info->sector_size;
2773 }
2774
2775 return 0;
2776 }
2777
2778 static void
2779 spi_nor_set_read_settings(struct spi_nor_read_command *read,
2780 u8 num_mode_clocks,
2781 u8 num_wait_states,
2782 u8 opcode,
2783 enum spi_nor_protocol proto)
2784 {
2785 read->num_mode_clocks = num_mode_clocks;
2786 read->num_wait_states = num_wait_states;
2787 read->opcode = opcode;
2788 read->proto = proto;
2789 }
2790
2791 static void
2792 spi_nor_set_pp_settings(struct spi_nor_pp_command *pp,
2793 u8 opcode,
2794 enum spi_nor_protocol proto)
2795 {
2796 pp->opcode = opcode;
2797 pp->proto = proto;
2798 }
2799
2800 static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
2801 {
2802 size_t i;
2803
2804 for (i = 0; i < size; i++)
2805 if (table[i][0] == (int)hwcaps)
2806 return table[i][1];
2807
2808 return -EINVAL;
2809 }
2810
2811 static int spi_nor_hwcaps_read2cmd(u32 hwcaps)
2812 {
2813 static const int hwcaps_read2cmd[][2] = {
2814 { SNOR_HWCAPS_READ, SNOR_CMD_READ },
2815 { SNOR_HWCAPS_READ_FAST, SNOR_CMD_READ_FAST },
2816 { SNOR_HWCAPS_READ_1_1_1_DTR, SNOR_CMD_READ_1_1_1_DTR },
2817 { SNOR_HWCAPS_READ_1_1_2, SNOR_CMD_READ_1_1_2 },
2818 { SNOR_HWCAPS_READ_1_2_2, SNOR_CMD_READ_1_2_2 },
2819 { SNOR_HWCAPS_READ_2_2_2, SNOR_CMD_READ_2_2_2 },
2820 { SNOR_HWCAPS_READ_1_2_2_DTR, SNOR_CMD_READ_1_2_2_DTR },
2821 { SNOR_HWCAPS_READ_1_1_4, SNOR_CMD_READ_1_1_4 },
2822 { SNOR_HWCAPS_READ_1_4_4, SNOR_CMD_READ_1_4_4 },
2823 { SNOR_HWCAPS_READ_4_4_4, SNOR_CMD_READ_4_4_4 },
2824 { SNOR_HWCAPS_READ_1_4_4_DTR, SNOR_CMD_READ_1_4_4_DTR },
2825 { SNOR_HWCAPS_READ_1_1_8, SNOR_CMD_READ_1_1_8 },
2826 { SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 },
2827 { SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 },
2828 { SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR },
2829 };
2830
2831 return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
2832 ARRAY_SIZE(hwcaps_read2cmd));
2833 }
2834
2835 static int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
2836 {
2837 static const int hwcaps_pp2cmd[][2] = {
2838 { SNOR_HWCAPS_PP, SNOR_CMD_PP },
2839 { SNOR_HWCAPS_PP_1_1_4, SNOR_CMD_PP_1_1_4 },
2840 { SNOR_HWCAPS_PP_1_4_4, SNOR_CMD_PP_1_4_4 },
2841 { SNOR_HWCAPS_PP_4_4_4, SNOR_CMD_PP_4_4_4 },
2842 { SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 },
2843 { SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 },
2844 { SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 },
2845 };
2846
2847 return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
2848 ARRAY_SIZE(hwcaps_pp2cmd));
2849 }
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867 static int spi_nor_read_raw(struct spi_nor *nor, u32 addr, size_t len, u8 *buf)
2868 {
2869 ssize_t ret;
2870
2871 while (len) {
2872 ret = spi_nor_read_data(nor, addr, len, buf);
2873 if (ret < 0)
2874 return ret;
2875 if (!ret || ret > len)
2876 return -EIO;
2877
2878 buf += ret;
2879 addr += ret;
2880 len -= ret;
2881 }
2882 return 0;
2883 }
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898 static int spi_nor_read_sfdp(struct spi_nor *nor, u32 addr,
2899 size_t len, void *buf)
2900 {
2901 u8 addr_width, read_opcode, read_dummy;
2902 int ret;
2903
2904 read_opcode = nor->read_opcode;
2905 addr_width = nor->addr_width;
2906 read_dummy = nor->read_dummy;
2907
2908 nor->read_opcode = SPINOR_OP_RDSFDP;
2909 nor->addr_width = 3;
2910 nor->read_dummy = 8;
2911
2912 ret = spi_nor_read_raw(nor, addr, len, buf);
2913
2914 nor->read_opcode = read_opcode;
2915 nor->addr_width = addr_width;
2916 nor->read_dummy = read_dummy;
2917
2918 return ret;
2919 }
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929 static int spi_nor_spimem_check_op(struct spi_nor *nor,
2930 struct spi_mem_op *op)
2931 {
2932
2933
2934
2935
2936
2937
2938 op->addr.nbytes = 4;
2939 if (!spi_mem_supports_op(nor->spimem, op)) {
2940 if (nor->mtd.size > SZ_16M)
2941 return -ENOTSUPP;
2942
2943
2944 op->addr.nbytes = 3;
2945 if (!spi_mem_supports_op(nor->spimem, op))
2946 return -ENOTSUPP;
2947 }
2948
2949 return 0;
2950 }
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960 static int spi_nor_spimem_check_readop(struct spi_nor *nor,
2961 const struct spi_nor_read_command *read)
2962 {
2963 struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(read->opcode, 1),
2964 SPI_MEM_OP_ADDR(3, 0, 1),
2965 SPI_MEM_OP_DUMMY(0, 1),
2966 SPI_MEM_OP_DATA_IN(0, NULL, 1));
2967
2968 op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(read->proto);
2969 op.addr.buswidth = spi_nor_get_protocol_addr_nbits(read->proto);
2970 op.data.buswidth = spi_nor_get_protocol_data_nbits(read->proto);
2971 op.dummy.buswidth = op.addr.buswidth;
2972 op.dummy.nbytes = (read->num_mode_clocks + read->num_wait_states) *
2973 op.dummy.buswidth / 8;
2974
2975 return spi_nor_spimem_check_op(nor, &op);
2976 }
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986 static int spi_nor_spimem_check_pp(struct spi_nor *nor,
2987 const struct spi_nor_pp_command *pp)
2988 {
2989 struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(pp->opcode, 1),
2990 SPI_MEM_OP_ADDR(3, 0, 1),
2991 SPI_MEM_OP_NO_DUMMY,
2992 SPI_MEM_OP_DATA_OUT(0, NULL, 1));
2993
2994 op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(pp->proto);
2995 op.addr.buswidth = spi_nor_get_protocol_addr_nbits(pp->proto);
2996 op.data.buswidth = spi_nor_get_protocol_data_nbits(pp->proto);
2997
2998 return spi_nor_spimem_check_op(nor, &op);
2999 }
3000
3001
3002
3003
3004
3005
3006
3007
3008 static void
3009 spi_nor_spimem_adjust_hwcaps(struct spi_nor *nor, u32 *hwcaps)
3010 {
3011 struct spi_nor_flash_parameter *params = &nor->params;
3012 unsigned int cap;
3013
3014
3015 *hwcaps &= ~SNOR_HWCAPS_DTR;
3016
3017
3018 *hwcaps &= ~SNOR_HWCAPS_X_X_X;
3019
3020 for (cap = 0; cap < sizeof(*hwcaps) * BITS_PER_BYTE; cap++) {
3021 int rdidx, ppidx;
3022
3023 if (!(*hwcaps & BIT(cap)))
3024 continue;
3025
3026 rdidx = spi_nor_hwcaps_read2cmd(BIT(cap));
3027 if (rdidx >= 0 &&
3028 spi_nor_spimem_check_readop(nor, ¶ms->reads[rdidx]))
3029 *hwcaps &= ~BIT(cap);
3030
3031 ppidx = spi_nor_hwcaps_pp2cmd(BIT(cap));
3032 if (ppidx < 0)
3033 continue;
3034
3035 if (spi_nor_spimem_check_pp(nor,
3036 ¶ms->page_programs[ppidx]))
3037 *hwcaps &= ~BIT(cap);
3038 }
3039 }
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054 static int spi_nor_read_sfdp_dma_unsafe(struct spi_nor *nor, u32 addr,
3055 size_t len, void *buf)
3056 {
3057 void *dma_safe_buf;
3058 int ret;
3059
3060 dma_safe_buf = kmalloc(len, GFP_KERNEL);
3061 if (!dma_safe_buf)
3062 return -ENOMEM;
3063
3064 ret = spi_nor_read_sfdp(nor, addr, len, dma_safe_buf);
3065 memcpy(buf, dma_safe_buf, len);
3066 kfree(dma_safe_buf);
3067
3068 return ret;
3069 }
3070
3071
3072
3073 static void
3074 spi_nor_set_read_settings_from_bfpt(struct spi_nor_read_command *read,
3075 u16 half,
3076 enum spi_nor_protocol proto)
3077 {
3078 read->num_mode_clocks = (half >> 5) & 0x07;
3079 read->num_wait_states = (half >> 0) & 0x1f;
3080 read->opcode = (half >> 8) & 0xff;
3081 read->proto = proto;
3082 }
3083
3084 struct sfdp_bfpt_read {
3085
3086 u32 hwcaps;
3087
3088
3089
3090
3091
3092 u32 supported_dword;
3093 u32 supported_bit;
3094
3095
3096
3097
3098
3099
3100 u32 settings_dword;
3101 u32 settings_shift;
3102
3103
3104 enum spi_nor_protocol proto;
3105 };
3106
3107 static const struct sfdp_bfpt_read sfdp_bfpt_reads[] = {
3108
3109 {
3110 SNOR_HWCAPS_READ_1_1_2,
3111 BFPT_DWORD(1), BIT(16),
3112 BFPT_DWORD(4), 0,
3113 SNOR_PROTO_1_1_2,
3114 },
3115
3116
3117 {
3118 SNOR_HWCAPS_READ_1_2_2,
3119 BFPT_DWORD(1), BIT(20),
3120 BFPT_DWORD(4), 16,
3121 SNOR_PROTO_1_2_2,
3122 },
3123
3124
3125 {
3126 SNOR_HWCAPS_READ_2_2_2,
3127 BFPT_DWORD(5), BIT(0),
3128 BFPT_DWORD(6), 16,
3129 SNOR_PROTO_2_2_2,
3130 },
3131
3132
3133 {
3134 SNOR_HWCAPS_READ_1_1_4,
3135 BFPT_DWORD(1), BIT(22),
3136 BFPT_DWORD(3), 16,
3137 SNOR_PROTO_1_1_4,
3138 },
3139
3140
3141 {
3142 SNOR_HWCAPS_READ_1_4_4,
3143 BFPT_DWORD(1), BIT(21),
3144 BFPT_DWORD(3), 0,
3145 SNOR_PROTO_1_4_4,
3146 },
3147
3148
3149 {
3150 SNOR_HWCAPS_READ_4_4_4,
3151 BFPT_DWORD(5), BIT(4),
3152 BFPT_DWORD(7), 16,
3153 SNOR_PROTO_4_4_4,
3154 },
3155 };
3156
3157 struct sfdp_bfpt_erase {
3158
3159
3160
3161
3162 u32 dword;
3163 u32 shift;
3164 };
3165
3166 static const struct sfdp_bfpt_erase sfdp_bfpt_erases[] = {
3167
3168 {BFPT_DWORD(8), 0},
3169
3170
3171 {BFPT_DWORD(8), 16},
3172
3173
3174 {BFPT_DWORD(9), 0},
3175
3176
3177 {BFPT_DWORD(9), 16},
3178 };
3179
3180
3181
3182
3183
3184
3185
3186 static void spi_nor_set_erase_type(struct spi_nor_erase_type *erase,
3187 u32 size, u8 opcode)
3188 {
3189 erase->size = size;
3190 erase->opcode = opcode;
3191
3192 erase->size_shift = ffs(erase->size) - 1;
3193 erase->size_mask = (1 << erase->size_shift) - 1;
3194 }
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209 static void
3210 spi_nor_set_erase_settings_from_bfpt(struct spi_nor_erase_type *erase,
3211 u32 size, u8 opcode, u8 i)
3212 {
3213 erase->idx = i;
3214 spi_nor_set_erase_type(erase, size, opcode);
3215 }
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228 static int spi_nor_map_cmp_erase_type(const void *l, const void *r)
3229 {
3230 const struct spi_nor_erase_type *left = l, *right = r;
3231
3232 return left->size - right->size;
3233 }
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246 static u8 spi_nor_sort_erase_mask(struct spi_nor_erase_map *map, u8 erase_mask)
3247 {
3248 struct spi_nor_erase_type *erase_type = map->erase_type;
3249 int i;
3250 u8 sorted_erase_mask = 0;
3251
3252 if (!erase_mask)
3253 return 0;
3254
3255
3256 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
3257 if (erase_type[i].size && erase_mask & BIT(erase_type[i].idx))
3258 sorted_erase_mask |= BIT(i);
3259
3260 return sorted_erase_mask;
3261 }
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275 static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map)
3276 {
3277 struct spi_nor_erase_region *region = map->regions;
3278 u8 region_erase_mask, sorted_erase_mask;
3279
3280 while (region) {
3281 region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
3282
3283 sorted_erase_mask = spi_nor_sort_erase_mask(map,
3284 region_erase_mask);
3285
3286
3287 region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) |
3288 sorted_erase_mask;
3289
3290 region = spi_nor_region_next(region);
3291 }
3292 }
3293
3294
3295
3296
3297
3298
3299
3300
3301 static void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
3302 u8 erase_mask, u64 flash_size)
3303 {
3304
3305 map->uniform_region.offset = (erase_mask & SNOR_ERASE_TYPE_MASK) |
3306 SNOR_LAST_REGION;
3307 map->uniform_region.size = flash_size;
3308 map->regions = &map->uniform_region;
3309 map->uniform_erase_type = erase_mask;
3310 }
3311
3312 static int
3313 spi_nor_post_bfpt_fixups(struct spi_nor *nor,
3314 const struct sfdp_parameter_header *bfpt_header,
3315 const struct sfdp_bfpt *bfpt,
3316 struct spi_nor_flash_parameter *params)
3317 {
3318 if (nor->info->fixups && nor->info->fixups->post_bfpt)
3319 return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt,
3320 params);
3321
3322 return 0;
3323 }
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355 static int spi_nor_parse_bfpt(struct spi_nor *nor,
3356 const struct sfdp_parameter_header *bfpt_header,
3357 struct spi_nor_flash_parameter *params)
3358 {
3359 struct spi_nor_erase_map *map = ¶ms->erase_map;
3360 struct spi_nor_erase_type *erase_type = map->erase_type;
3361 struct sfdp_bfpt bfpt;
3362 size_t len;
3363 int i, cmd, err;
3364 u32 addr;
3365 u16 half;
3366 u8 erase_mask;
3367
3368
3369 if (bfpt_header->length < BFPT_DWORD_MAX_JESD216)
3370 return -EINVAL;
3371
3372
3373 len = min_t(size_t, sizeof(bfpt),
3374 bfpt_header->length * sizeof(u32));
3375 addr = SFDP_PARAM_HEADER_PTP(bfpt_header);
3376 memset(&bfpt, 0, sizeof(bfpt));
3377 err = spi_nor_read_sfdp_dma_unsafe(nor, addr, len, &bfpt);
3378 if (err < 0)
3379 return err;
3380
3381
3382 for (i = 0; i < BFPT_DWORD_MAX; i++)
3383 bfpt.dwords[i] = le32_to_cpu(bfpt.dwords[i]);
3384
3385
3386 switch (bfpt.dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) {
3387 case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY:
3388 nor->addr_width = 3;
3389 break;
3390
3391 case BFPT_DWORD1_ADDRESS_BYTES_4_ONLY:
3392 nor->addr_width = 4;
3393 break;
3394
3395 default:
3396 break;
3397 }
3398
3399
3400 params->size = bfpt.dwords[BFPT_DWORD(2)];
3401 if (params->size & BIT(31)) {
3402 params->size &= ~BIT(31);
3403
3404
3405
3406
3407
3408
3409 if (params->size > 63)
3410 return -EINVAL;
3411
3412 params->size = 1ULL << params->size;
3413 } else {
3414 params->size++;
3415 }
3416 params->size >>= 3;
3417
3418
3419 for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_reads); i++) {
3420 const struct sfdp_bfpt_read *rd = &sfdp_bfpt_reads[i];
3421 struct spi_nor_read_command *read;
3422
3423 if (!(bfpt.dwords[rd->supported_dword] & rd->supported_bit)) {
3424 params->hwcaps.mask &= ~rd->hwcaps;
3425 continue;
3426 }
3427
3428 params->hwcaps.mask |= rd->hwcaps;
3429 cmd = spi_nor_hwcaps_read2cmd(rd->hwcaps);
3430 read = ¶ms->reads[cmd];
3431 half = bfpt.dwords[rd->settings_dword] >> rd->settings_shift;
3432 spi_nor_set_read_settings_from_bfpt(read, half, rd->proto);
3433 }
3434
3435
3436
3437
3438
3439 erase_mask = 0;
3440 memset(¶ms->erase_map, 0, sizeof(params->erase_map));
3441 for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_erases); i++) {
3442 const struct sfdp_bfpt_erase *er = &sfdp_bfpt_erases[i];
3443 u32 erasesize;
3444 u8 opcode;
3445
3446 half = bfpt.dwords[er->dword] >> er->shift;
3447 erasesize = half & 0xff;
3448
3449
3450 if (!erasesize)
3451 continue;
3452
3453 erasesize = 1U << erasesize;
3454 opcode = (half >> 8) & 0xff;
3455 erase_mask |= BIT(i);
3456 spi_nor_set_erase_settings_from_bfpt(&erase_type[i], erasesize,
3457 opcode, i);
3458 }
3459 spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
3460
3461
3462
3463
3464 sort(erase_type, SNOR_ERASE_TYPE_MAX, sizeof(erase_type[0]),
3465 spi_nor_map_cmp_erase_type, NULL);
3466
3467
3468
3469
3470
3471 spi_nor_regions_sort_erase_types(map);
3472 map->uniform_erase_type = map->uniform_region.offset &
3473 SNOR_ERASE_TYPE_MASK;
3474
3475
3476 if (bfpt_header->length < BFPT_DWORD_MAX)
3477 return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt,
3478 params);
3479
3480
3481 params->page_size = bfpt.dwords[BFPT_DWORD(11)];
3482 params->page_size &= BFPT_DWORD11_PAGE_SIZE_MASK;
3483 params->page_size >>= BFPT_DWORD11_PAGE_SIZE_SHIFT;
3484 params->page_size = 1U << params->page_size;
3485
3486
3487 switch (bfpt.dwords[BFPT_DWORD(15)] & BFPT_DWORD15_QER_MASK) {
3488 case BFPT_DWORD15_QER_NONE:
3489 params->quad_enable = NULL;
3490 break;
3491
3492 case BFPT_DWORD15_QER_SR2_BIT1_BUGGY:
3493 case BFPT_DWORD15_QER_SR2_BIT1_NO_RD:
3494 params->quad_enable = spansion_no_read_cr_quad_enable;
3495 break;
3496
3497 case BFPT_DWORD15_QER_SR1_BIT6:
3498 params->quad_enable = macronix_quad_enable;
3499 break;
3500
3501 case BFPT_DWORD15_QER_SR2_BIT7:
3502 params->quad_enable = sr2_bit7_quad_enable;
3503 break;
3504
3505 case BFPT_DWORD15_QER_SR2_BIT1:
3506 params->quad_enable = spansion_read_cr_quad_enable;
3507 break;
3508
3509 default:
3510 return -EINVAL;
3511 }
3512
3513 return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt, params);
3514 }
3515
3516 #define SMPT_CMD_ADDRESS_LEN_MASK GENMASK(23, 22)
3517 #define SMPT_CMD_ADDRESS_LEN_0 (0x0UL << 22)
3518 #define SMPT_CMD_ADDRESS_LEN_3 (0x1UL << 22)
3519 #define SMPT_CMD_ADDRESS_LEN_4 (0x2UL << 22)
3520 #define SMPT_CMD_ADDRESS_LEN_USE_CURRENT (0x3UL << 22)
3521
3522 #define SMPT_CMD_READ_DUMMY_MASK GENMASK(19, 16)
3523 #define SMPT_CMD_READ_DUMMY_SHIFT 16
3524 #define SMPT_CMD_READ_DUMMY(_cmd) \
3525 (((_cmd) & SMPT_CMD_READ_DUMMY_MASK) >> SMPT_CMD_READ_DUMMY_SHIFT)
3526 #define SMPT_CMD_READ_DUMMY_IS_VARIABLE 0xfUL
3527
3528 #define SMPT_CMD_READ_DATA_MASK GENMASK(31, 24)
3529 #define SMPT_CMD_READ_DATA_SHIFT 24
3530 #define SMPT_CMD_READ_DATA(_cmd) \
3531 (((_cmd) & SMPT_CMD_READ_DATA_MASK) >> SMPT_CMD_READ_DATA_SHIFT)
3532
3533 #define SMPT_CMD_OPCODE_MASK GENMASK(15, 8)
3534 #define SMPT_CMD_OPCODE_SHIFT 8
3535 #define SMPT_CMD_OPCODE(_cmd) \
3536 (((_cmd) & SMPT_CMD_OPCODE_MASK) >> SMPT_CMD_OPCODE_SHIFT)
3537
3538 #define SMPT_MAP_REGION_COUNT_MASK GENMASK(23, 16)
3539 #define SMPT_MAP_REGION_COUNT_SHIFT 16
3540 #define SMPT_MAP_REGION_COUNT(_header) \
3541 ((((_header) & SMPT_MAP_REGION_COUNT_MASK) >> \
3542 SMPT_MAP_REGION_COUNT_SHIFT) + 1)
3543
3544 #define SMPT_MAP_ID_MASK GENMASK(15, 8)
3545 #define SMPT_MAP_ID_SHIFT 8
3546 #define SMPT_MAP_ID(_header) \
3547 (((_header) & SMPT_MAP_ID_MASK) >> SMPT_MAP_ID_SHIFT)
3548
3549 #define SMPT_MAP_REGION_SIZE_MASK GENMASK(31, 8)
3550 #define SMPT_MAP_REGION_SIZE_SHIFT 8
3551 #define SMPT_MAP_REGION_SIZE(_region) \
3552 (((((_region) & SMPT_MAP_REGION_SIZE_MASK) >> \
3553 SMPT_MAP_REGION_SIZE_SHIFT) + 1) * 256)
3554
3555 #define SMPT_MAP_REGION_ERASE_TYPE_MASK GENMASK(3, 0)
3556 #define SMPT_MAP_REGION_ERASE_TYPE(_region) \
3557 ((_region) & SMPT_MAP_REGION_ERASE_TYPE_MASK)
3558
3559 #define SMPT_DESC_TYPE_MAP BIT(1)
3560 #define SMPT_DESC_END BIT(0)
3561
3562
3563
3564
3565
3566
3567
3568 static u8 spi_nor_smpt_addr_width(const struct spi_nor *nor, const u32 settings)
3569 {
3570 switch (settings & SMPT_CMD_ADDRESS_LEN_MASK) {
3571 case SMPT_CMD_ADDRESS_LEN_0:
3572 return 0;
3573 case SMPT_CMD_ADDRESS_LEN_3:
3574 return 3;
3575 case SMPT_CMD_ADDRESS_LEN_4:
3576 return 4;
3577 case SMPT_CMD_ADDRESS_LEN_USE_CURRENT:
3578
3579 default:
3580 return nor->addr_width;
3581 }
3582 }
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592 static u8 spi_nor_smpt_read_dummy(const struct spi_nor *nor, const u32 settings)
3593 {
3594 u8 read_dummy = SMPT_CMD_READ_DUMMY(settings);
3595
3596 if (read_dummy == SMPT_CMD_READ_DUMMY_IS_VARIABLE)
3597 return nor->read_dummy;
3598 return read_dummy;
3599 }
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609 static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
3610 u8 smpt_len)
3611 {
3612 const u32 *ret;
3613 u8 *buf;
3614 u32 addr;
3615 int err;
3616 u8 i;
3617 u8 addr_width, read_opcode, read_dummy;
3618 u8 read_data_mask, map_id;
3619
3620
3621 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
3622 if (!buf)
3623 return ERR_PTR(-ENOMEM);
3624
3625 addr_width = nor->addr_width;
3626 read_dummy = nor->read_dummy;
3627 read_opcode = nor->read_opcode;
3628
3629 map_id = 0;
3630
3631 for (i = 0; i < smpt_len; i += 2) {
3632 if (smpt[i] & SMPT_DESC_TYPE_MAP)
3633 break;
3634
3635 read_data_mask = SMPT_CMD_READ_DATA(smpt[i]);
3636 nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]);
3637 nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]);
3638 nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]);
3639 addr = smpt[i + 1];
3640
3641 err = spi_nor_read_raw(nor, addr, 1, buf);
3642 if (err) {
3643 ret = ERR_PTR(err);
3644 goto out;
3645 }
3646
3647
3648
3649
3650
3651 map_id = map_id << 1 | !!(*buf & read_data_mask);
3652 }
3653
3654
3655
3656
3657
3658
3659
3660
3661 ret = ERR_PTR(-EINVAL);
3662 while (i < smpt_len) {
3663 if (SMPT_MAP_ID(smpt[i]) == map_id) {
3664 ret = smpt + i;
3665 break;
3666 }
3667
3668
3669
3670
3671
3672
3673 if (smpt[i] & SMPT_DESC_END)
3674 break;
3675
3676
3677 i += SMPT_MAP_REGION_COUNT(smpt[i]) + 1;
3678 }
3679
3680
3681 out:
3682 kfree(buf);
3683 nor->addr_width = addr_width;
3684 nor->read_dummy = read_dummy;
3685 nor->read_opcode = read_opcode;
3686 return ret;
3687 }
3688
3689
3690
3691
3692
3693
3694
3695 static void
3696 spi_nor_region_check_overlay(struct spi_nor_erase_region *region,
3697 const struct spi_nor_erase_type *erase,
3698 const u8 erase_type)
3699 {
3700 int i;
3701
3702 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
3703 if (!(erase_type & BIT(i)))
3704 continue;
3705 if (region->size & erase[i].size_mask) {
3706 spi_nor_region_mark_overlay(region);
3707 return;
3708 }
3709 }
3710 }
3711
3712
3713
3714
3715
3716
3717
3718
3719
3720
3721 static int
3722 spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
3723 struct spi_nor_flash_parameter *params,
3724 const u32 *smpt)
3725 {
3726 struct spi_nor_erase_map *map = ¶ms->erase_map;
3727 struct spi_nor_erase_type *erase = map->erase_type;
3728 struct spi_nor_erase_region *region;
3729 u64 offset;
3730 u32 region_count;
3731 int i, j;
3732 u8 uniform_erase_type, save_uniform_erase_type;
3733 u8 erase_type, regions_erase_type;
3734
3735 region_count = SMPT_MAP_REGION_COUNT(*smpt);
3736
3737
3738
3739
3740 region = devm_kcalloc(nor->dev, region_count, sizeof(*region),
3741 GFP_KERNEL);
3742 if (!region)
3743 return -ENOMEM;
3744 map->regions = region;
3745
3746 uniform_erase_type = 0xff;
3747 regions_erase_type = 0;
3748 offset = 0;
3749
3750 for (i = 0; i < region_count; i++) {
3751 j = i + 1;
3752 region[i].size = SMPT_MAP_REGION_SIZE(smpt[j]);
3753 erase_type = SMPT_MAP_REGION_ERASE_TYPE(smpt[j]);
3754 region[i].offset = offset | erase_type;
3755
3756 spi_nor_region_check_overlay(®ion[i], erase, erase_type);
3757
3758
3759
3760
3761
3762 uniform_erase_type &= erase_type;
3763
3764
3765
3766
3767
3768 regions_erase_type |= erase_type;
3769
3770 offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) +
3771 region[i].size;
3772 }
3773
3774 save_uniform_erase_type = map->uniform_erase_type;
3775 map->uniform_erase_type = spi_nor_sort_erase_mask(map,
3776 uniform_erase_type);
3777
3778 if (!regions_erase_type) {
3779
3780
3781
3782
3783 map->uniform_erase_type = save_uniform_erase_type;
3784 return -EINVAL;
3785 }
3786
3787
3788
3789
3790
3791
3792 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
3793 if (!(regions_erase_type & BIT(erase[i].idx)))
3794 spi_nor_set_erase_type(&erase[i], 0, 0xFF);
3795
3796 spi_nor_region_mark_end(®ion[i - 1]);
3797
3798 return 0;
3799 }
3800
3801
3802
3803
3804
3805
3806
3807
3808
3809
3810
3811
3812
3813
3814 static int spi_nor_parse_smpt(struct spi_nor *nor,
3815 const struct sfdp_parameter_header *smpt_header,
3816 struct spi_nor_flash_parameter *params)
3817 {
3818 const u32 *sector_map;
3819 u32 *smpt;
3820 size_t len;
3821 u32 addr;
3822 int i, ret;
3823
3824
3825 len = smpt_header->length * sizeof(*smpt);
3826 smpt = kmalloc(len, GFP_KERNEL);
3827 if (!smpt)
3828 return -ENOMEM;
3829
3830 addr = SFDP_PARAM_HEADER_PTP(smpt_header);
3831 ret = spi_nor_read_sfdp(nor, addr, len, smpt);
3832 if (ret)
3833 goto out;
3834
3835
3836 for (i = 0; i < smpt_header->length; i++)
3837 smpt[i] = le32_to_cpu(smpt[i]);
3838
3839 sector_map = spi_nor_get_map_in_use(nor, smpt, smpt_header->length);
3840 if (IS_ERR(sector_map)) {
3841 ret = PTR_ERR(sector_map);
3842 goto out;
3843 }
3844
3845 ret = spi_nor_init_non_uniform_erase_map(nor, params, sector_map);
3846 if (ret)
3847 goto out;
3848
3849 spi_nor_regions_sort_erase_types(¶ms->erase_map);
3850
3851 out:
3852 kfree(smpt);
3853 return ret;
3854 }
3855
3856 #define SFDP_4BAIT_DWORD_MAX 2
3857
3858 struct sfdp_4bait {
3859
3860 u32 hwcaps;
3861
3862
3863
3864
3865
3866 u32 supported_bit;
3867 };
3868
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878 static int spi_nor_parse_4bait(struct spi_nor *nor,
3879 const struct sfdp_parameter_header *param_header,
3880 struct spi_nor_flash_parameter *params)
3881 {
3882 static const struct sfdp_4bait reads[] = {
3883 { SNOR_HWCAPS_READ, BIT(0) },
3884 { SNOR_HWCAPS_READ_FAST, BIT(1) },
3885 { SNOR_HWCAPS_READ_1_1_2, BIT(2) },
3886 { SNOR_HWCAPS_READ_1_2_2, BIT(3) },
3887 { SNOR_HWCAPS_READ_1_1_4, BIT(4) },
3888 { SNOR_HWCAPS_READ_1_4_4, BIT(5) },
3889 { SNOR_HWCAPS_READ_1_1_1_DTR, BIT(13) },
3890 { SNOR_HWCAPS_READ_1_2_2_DTR, BIT(14) },
3891 { SNOR_HWCAPS_READ_1_4_4_DTR, BIT(15) },
3892 };
3893 static const struct sfdp_4bait programs[] = {
3894 { SNOR_HWCAPS_PP, BIT(6) },
3895 { SNOR_HWCAPS_PP_1_1_4, BIT(7) },
3896 { SNOR_HWCAPS_PP_1_4_4, BIT(8) },
3897 };
3898 static const struct sfdp_4bait erases[SNOR_ERASE_TYPE_MAX] = {
3899 { 0u , BIT(9) },
3900 { 0u , BIT(10) },
3901 { 0u , BIT(11) },
3902 { 0u , BIT(12) },
3903 };
3904 struct spi_nor_pp_command *params_pp = params->page_programs;
3905 struct spi_nor_erase_map *map = ¶ms->erase_map;
3906 struct spi_nor_erase_type *erase_type = map->erase_type;
3907 u32 *dwords;
3908 size_t len;
3909 u32 addr, discard_hwcaps, read_hwcaps, pp_hwcaps, erase_mask;
3910 int i, ret;
3911
3912 if (param_header->major != SFDP_JESD216_MAJOR ||
3913 param_header->length < SFDP_4BAIT_DWORD_MAX)
3914 return -EINVAL;
3915
3916
3917 len = sizeof(*dwords) * SFDP_4BAIT_DWORD_MAX;
3918
3919
3920 dwords = kmalloc(len, GFP_KERNEL);
3921 if (!dwords)
3922 return -ENOMEM;
3923
3924 addr = SFDP_PARAM_HEADER_PTP(param_header);
3925 ret = spi_nor_read_sfdp(nor, addr, len, dwords);
3926 if (ret)
3927 goto out;
3928
3929
3930 for (i = 0; i < SFDP_4BAIT_DWORD_MAX; i++)
3931 dwords[i] = le32_to_cpu(dwords[i]);
3932
3933
3934
3935
3936
3937 discard_hwcaps = 0;
3938 read_hwcaps = 0;
3939 for (i = 0; i < ARRAY_SIZE(reads); i++) {
3940 const struct sfdp_4bait *read = &reads[i];
3941
3942 discard_hwcaps |= read->hwcaps;
3943 if ((params->hwcaps.mask & read->hwcaps) &&
3944 (dwords[0] & read->supported_bit))
3945 read_hwcaps |= read->hwcaps;
3946 }
3947
3948
3949
3950
3951
3952 pp_hwcaps = 0;
3953 for (i = 0; i < ARRAY_SIZE(programs); i++) {
3954 const struct sfdp_4bait *program = &programs[i];
3955
3956
3957
3958
3959
3960
3961
3962 discard_hwcaps |= program->hwcaps;
3963 if (dwords[0] & program->supported_bit)
3964 pp_hwcaps |= program->hwcaps;
3965 }
3966
3967
3968
3969
3970
3971 erase_mask = 0;
3972 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
3973 const struct sfdp_4bait *erase = &erases[i];
3974
3975 if (dwords[0] & erase->supported_bit)
3976 erase_mask |= BIT(i);
3977 }
3978
3979
3980 erase_mask = spi_nor_sort_erase_mask(map, erase_mask);
3981
3982
3983
3984
3985
3986
3987 if (!read_hwcaps || !pp_hwcaps || !erase_mask)
3988 goto out;
3989
3990
3991
3992
3993
3994 params->hwcaps.mask &= ~discard_hwcaps;
3995 params->hwcaps.mask |= (read_hwcaps | pp_hwcaps);
3996
3997
3998 for (i = 0; i < SNOR_CMD_READ_MAX; i++) {
3999 struct spi_nor_read_command *read_cmd = ¶ms->reads[i];
4000
4001 read_cmd->opcode = spi_nor_convert_3to4_read(read_cmd->opcode);
4002 }
4003
4004
4005 if (pp_hwcaps & SNOR_HWCAPS_PP)
4006 spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP],
4007 SPINOR_OP_PP_4B, SNOR_PROTO_1_1_1);
4008 if (pp_hwcaps & SNOR_HWCAPS_PP_1_1_4)
4009 spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP_1_1_4],
4010 SPINOR_OP_PP_1_1_4_4B,
4011 SNOR_PROTO_1_1_4);
4012 if (pp_hwcaps & SNOR_HWCAPS_PP_1_4_4)
4013 spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP_1_4_4],
4014 SPINOR_OP_PP_1_4_4_4B,
4015 SNOR_PROTO_1_4_4);
4016
4017 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
4018 if (erase_mask & BIT(i))
4019 erase_type[i].opcode = (dwords[1] >>
4020 erase_type[i].idx * 8) & 0xFF;
4021 else
4022 spi_nor_set_erase_type(&erase_type[i], 0u, 0xFF);
4023 }
4024
4025
4026
4027
4028
4029
4030
4031
4032 nor->addr_width = 4;
4033 nor->flags |= SNOR_F_4B_OPCODES | SNOR_F_HAS_4BAIT;
4034
4035
4036 out:
4037 kfree(dwords);
4038 return ret;
4039 }
4040
4041
4042
4043
4044
4045
4046
4047
4048
4049
4050
4051
4052
4053
4054
4055 static int spi_nor_parse_sfdp(struct spi_nor *nor,
4056 struct spi_nor_flash_parameter *params)
4057 {
4058 const struct sfdp_parameter_header *param_header, *bfpt_header;
4059 struct sfdp_parameter_header *param_headers = NULL;
4060 struct sfdp_header header;
4061 struct device *dev = nor->dev;
4062 size_t psize;
4063 int i, err;
4064
4065
4066 err = spi_nor_read_sfdp_dma_unsafe(nor, 0, sizeof(header), &header);
4067 if (err < 0)
4068 return err;
4069
4070
4071 if (le32_to_cpu(header.signature) != SFDP_SIGNATURE ||
4072 header.major != SFDP_JESD216_MAJOR)
4073 return -EINVAL;
4074
4075
4076
4077
4078
4079 bfpt_header = &header.bfpt_header;
4080 if (SFDP_PARAM_HEADER_ID(bfpt_header) != SFDP_BFPT_ID ||
4081 bfpt_header->major != SFDP_JESD216_MAJOR)
4082 return -EINVAL;
4083
4084
4085
4086
4087
4088
4089
4090
4091
4092
4093
4094
4095 if (header.nph) {
4096 psize = header.nph * sizeof(*param_headers);
4097
4098 param_headers = kmalloc(psize, GFP_KERNEL);
4099 if (!param_headers)
4100 return -ENOMEM;
4101
4102 err = spi_nor_read_sfdp(nor, sizeof(header),
4103 psize, param_headers);
4104 if (err < 0) {
4105 dev_err(dev, "failed to read SFDP parameter headers\n");
4106 goto exit;
4107 }
4108 }
4109
4110
4111
4112
4113
4114 for (i = 0; i < header.nph; i++) {
4115 param_header = ¶m_headers[i];
4116
4117 if (SFDP_PARAM_HEADER_ID(param_header) == SFDP_BFPT_ID &&
4118 param_header->major == SFDP_JESD216_MAJOR &&
4119 (param_header->minor > bfpt_header->minor ||
4120 (param_header->minor == bfpt_header->minor &&
4121 param_header->length > bfpt_header->length)))
4122 bfpt_header = param_header;
4123 }
4124
4125 err = spi_nor_parse_bfpt(nor, bfpt_header, params);
4126 if (err)
4127 goto exit;
4128
4129
4130 for (i = 0; i < header.nph; i++) {
4131 param_header = ¶m_headers[i];
4132
4133 switch (SFDP_PARAM_HEADER_ID(param_header)) {
4134 case SFDP_SECTOR_MAP_ID:
4135 err = spi_nor_parse_smpt(nor, param_header, params);
4136 break;
4137
4138 case SFDP_4BAIT_ID:
4139 err = spi_nor_parse_4bait(nor, param_header, params);
4140 break;
4141
4142 default:
4143 break;
4144 }
4145
4146 if (err) {
4147 dev_warn(dev, "Failed to parse optional parameter table: %04x\n",
4148 SFDP_PARAM_HEADER_ID(param_header));
4149
4150
4151
4152
4153
4154
4155 err = 0;
4156 }
4157 }
4158
4159 exit:
4160 kfree(param_headers);
4161 return err;
4162 }
4163
4164 static int spi_nor_select_read(struct spi_nor *nor,
4165 u32 shared_hwcaps)
4166 {
4167 int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1;
4168 const struct spi_nor_read_command *read;
4169
4170 if (best_match < 0)
4171 return -EINVAL;
4172
4173 cmd = spi_nor_hwcaps_read2cmd(BIT(best_match));
4174 if (cmd < 0)
4175 return -EINVAL;
4176
4177 read = &nor->params.reads[cmd];
4178 nor->read_opcode = read->opcode;
4179 nor->read_proto = read->proto;
4180
4181
4182
4183
4184
4185
4186
4187
4188
4189
4190
4191 nor->read_dummy = read->num_mode_clocks + read->num_wait_states;
4192 return 0;
4193 }
4194
4195 static int spi_nor_select_pp(struct spi_nor *nor,
4196 u32 shared_hwcaps)
4197 {
4198 int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1;
4199 const struct spi_nor_pp_command *pp;
4200
4201 if (best_match < 0)
4202 return -EINVAL;
4203
4204 cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match));
4205 if (cmd < 0)
4206 return -EINVAL;
4207
4208 pp = &nor->params.page_programs[cmd];
4209 nor->program_opcode = pp->opcode;
4210 nor->write_proto = pp->proto;
4211 return 0;
4212 }
4213
4214
4215
4216
4217
4218
4219
4220
4221
4222
4223
4224
4225
4226 static const struct spi_nor_erase_type *
4227 spi_nor_select_uniform_erase(struct spi_nor_erase_map *map,
4228 const u32 wanted_size)
4229 {
4230 const struct spi_nor_erase_type *tested_erase, *erase = NULL;
4231 int i;
4232 u8 uniform_erase_type = map->uniform_erase_type;
4233
4234 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
4235 if (!(uniform_erase_type & BIT(i)))
4236 continue;
4237
4238 tested_erase = &map->erase_type[i];
4239
4240
4241
4242
4243
4244 if (tested_erase->size == wanted_size) {
4245 erase = tested_erase;
4246 break;
4247 }
4248
4249
4250
4251
4252
4253 if (!erase && tested_erase->size)
4254 erase = tested_erase;
4255
4256 }
4257
4258 if (!erase)
4259 return NULL;
4260
4261
4262 map->uniform_erase_type &= ~SNOR_ERASE_TYPE_MASK;
4263 map->uniform_erase_type |= BIT(erase - map->erase_type);
4264 return erase;
4265 }
4266
4267 static int spi_nor_select_erase(struct spi_nor *nor)
4268 {
4269 struct spi_nor_erase_map *map = &nor->params.erase_map;
4270 const struct spi_nor_erase_type *erase = NULL;
4271 struct mtd_info *mtd = &nor->mtd;
4272 u32 wanted_size = nor->info->sector_size;
4273 int i;
4274
4275
4276
4277
4278
4279
4280
4281
4282
4283 #ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
4284
4285 wanted_size = 4096u;
4286 #endif
4287
4288 if (spi_nor_has_uniform_erase(nor)) {
4289 erase = spi_nor_select_uniform_erase(map, wanted_size);
4290 if (!erase)
4291 return -EINVAL;
4292 nor->erase_opcode = erase->opcode;
4293 mtd->erasesize = erase->size;
4294 return 0;
4295 }
4296
4297
4298
4299
4300
4301 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
4302 if (map->erase_type[i].size) {
4303 erase = &map->erase_type[i];
4304 break;
4305 }
4306 }
4307
4308 if (!erase)
4309 return -EINVAL;
4310
4311 mtd->erasesize = erase->size;
4312 return 0;
4313 }
4314
4315 static int spi_nor_default_setup(struct spi_nor *nor,
4316 const struct spi_nor_hwcaps *hwcaps)
4317 {
4318 struct spi_nor_flash_parameter *params = &nor->params;
4319 u32 ignored_mask, shared_mask;
4320 int err;
4321
4322
4323
4324
4325
4326 shared_mask = hwcaps->mask & params->hwcaps.mask;
4327
4328 if (nor->spimem) {
4329
4330
4331
4332
4333
4334 spi_nor_spimem_adjust_hwcaps(nor, &shared_mask);
4335 } else {
4336
4337
4338
4339
4340
4341 ignored_mask = SNOR_HWCAPS_X_X_X;
4342 if (shared_mask & ignored_mask) {
4343 dev_dbg(nor->dev,
4344 "SPI n-n-n protocols are not supported.\n");
4345 shared_mask &= ~ignored_mask;
4346 }
4347 }
4348
4349
4350 err = spi_nor_select_read(nor, shared_mask);
4351 if (err) {
4352 dev_err(nor->dev,
4353 "can't select read settings supported by both the SPI controller and memory.\n");
4354 return err;
4355 }
4356
4357
4358 err = spi_nor_select_pp(nor, shared_mask);
4359 if (err) {
4360 dev_err(nor->dev,
4361 "can't select write settings supported by both the SPI controller and memory.\n");
4362 return err;
4363 }
4364
4365
4366 err = spi_nor_select_erase(nor);
4367 if (err) {
4368 dev_err(nor->dev,
4369 "can't select erase settings supported by both the SPI controller and memory.\n");
4370 return err;
4371 }
4372
4373 return 0;
4374 }
4375
4376 static int spi_nor_setup(struct spi_nor *nor,
4377 const struct spi_nor_hwcaps *hwcaps)
4378 {
4379 if (!nor->params.setup)
4380 return 0;
4381
4382 return nor->params.setup(nor, hwcaps);
4383 }
4384
4385 static void macronix_set_default_init(struct spi_nor *nor)
4386 {
4387 nor->params.quad_enable = macronix_quad_enable;
4388 nor->params.set_4byte = macronix_set_4byte;
4389 }
4390
4391 static void st_micron_set_default_init(struct spi_nor *nor)
4392 {
4393 nor->flags |= SNOR_F_HAS_LOCK;
4394 nor->params.quad_enable = NULL;
4395 nor->params.set_4byte = st_micron_set_4byte;
4396 }
4397
4398 static void winbond_set_default_init(struct spi_nor *nor)
4399 {
4400 nor->params.set_4byte = winbond_set_4byte;
4401 }
4402
4403
4404
4405
4406
4407
4408 static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
4409 {
4410
4411 switch (JEDEC_MFR(nor->info)) {
4412 case SNOR_MFR_MACRONIX:
4413 macronix_set_default_init(nor);
4414 break;
4415
4416 case SNOR_MFR_ST:
4417 case SNOR_MFR_MICRON:
4418 st_micron_set_default_init(nor);
4419 break;
4420
4421 case SNOR_MFR_WINBOND:
4422 winbond_set_default_init(nor);
4423 break;
4424
4425 default:
4426 break;
4427 }
4428
4429 if (nor->info->fixups && nor->info->fixups->default_init)
4430 nor->info->fixups->default_init(nor);
4431 }
4432
4433
4434
4435
4436
4437
4438
4439
4440
4441 static void spi_nor_sfdp_init_params(struct spi_nor *nor)
4442 {
4443 struct spi_nor_flash_parameter sfdp_params;
4444
4445 memcpy(&sfdp_params, &nor->params, sizeof(sfdp_params));
4446
4447 if (spi_nor_parse_sfdp(nor, &sfdp_params)) {
4448 nor->addr_width = 0;
4449 nor->flags &= ~SNOR_F_4B_OPCODES;
4450 } else {
4451 memcpy(&nor->params, &sfdp_params, sizeof(nor->params));
4452 }
4453 }
4454
4455
4456
4457
4458
4459
4460 static void spi_nor_info_init_params(struct spi_nor *nor)
4461 {
4462 struct spi_nor_flash_parameter *params = &nor->params;
4463 struct spi_nor_erase_map *map = ¶ms->erase_map;
4464 const struct flash_info *info = nor->info;
4465 struct device_node *np = spi_nor_get_flash_node(nor);
4466 u8 i, erase_mask;
4467
4468
4469 params->quad_enable = spansion_quad_enable;
4470 params->set_4byte = spansion_set_4byte;
4471 params->setup = spi_nor_default_setup;
4472
4473
4474 params->size = (u64)info->sector_size * info->n_sectors;
4475 params->page_size = info->page_size;
4476
4477 if (!(info->flags & SPI_NOR_NO_FR)) {
4478
4479 params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
4480
4481
4482 if (np && !of_property_read_bool(np, "m25p,fast-read"))
4483 params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
4484 }
4485
4486
4487 params->hwcaps.mask |= SNOR_HWCAPS_READ;
4488 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ],
4489 0, 0, SPINOR_OP_READ,
4490 SNOR_PROTO_1_1_1);
4491
4492 if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST)
4493 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_FAST],
4494 0, 8, SPINOR_OP_READ_FAST,
4495 SNOR_PROTO_1_1_1);
4496
4497 if (info->flags & SPI_NOR_DUAL_READ) {
4498 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
4499 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_2],
4500 0, 8, SPINOR_OP_READ_1_1_2,
4501 SNOR_PROTO_1_1_2);
4502 }
4503
4504 if (info->flags & SPI_NOR_QUAD_READ) {
4505 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
4506 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_4],
4507 0, 8, SPINOR_OP_READ_1_1_4,
4508 SNOR_PROTO_1_1_4);
4509 }
4510
4511 if (info->flags & SPI_NOR_OCTAL_READ) {
4512 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
4513 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_8],
4514 0, 8, SPINOR_OP_READ_1_1_8,
4515 SNOR_PROTO_1_1_8);
4516 }
4517
4518
4519 params->hwcaps.mask |= SNOR_HWCAPS_PP;
4520 spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP],
4521 SPINOR_OP_PP, SNOR_PROTO_1_1_1);
4522
4523
4524
4525
4526
4527 erase_mask = 0;
4528 i = 0;
4529 if (info->flags & SECT_4K_PMC) {
4530 erase_mask |= BIT(i);
4531 spi_nor_set_erase_type(&map->erase_type[i], 4096u,
4532 SPINOR_OP_BE_4K_PMC);
4533 i++;
4534 } else if (info->flags & SECT_4K) {
4535 erase_mask |= BIT(i);
4536 spi_nor_set_erase_type(&map->erase_type[i], 4096u,
4537 SPINOR_OP_BE_4K);
4538 i++;
4539 }
4540 erase_mask |= BIT(i);
4541 spi_nor_set_erase_type(&map->erase_type[i], info->sector_size,
4542 SPINOR_OP_SE);
4543 spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
4544 }
4545
4546 static void spansion_post_sfdp_fixups(struct spi_nor *nor)
4547 {
4548 if (nor->params.size <= SZ_16M)
4549 return;
4550
4551 nor->flags |= SNOR_F_4B_OPCODES;
4552
4553 nor->erase_opcode = SPINOR_OP_SE;
4554 nor->mtd.erasesize = nor->info->sector_size;
4555 }
4556
4557 static void s3an_post_sfdp_fixups(struct spi_nor *nor)
4558 {
4559 nor->params.setup = s3an_nor_setup;
4560 }
4561
4562
4563
4564
4565
4566
4567
4568
4569
4570
4571
4572 static void spi_nor_post_sfdp_fixups(struct spi_nor *nor)
4573 {
4574 switch (JEDEC_MFR(nor->info)) {
4575 case SNOR_MFR_SPANSION:
4576 spansion_post_sfdp_fixups(nor);
4577 break;
4578
4579 default:
4580 break;
4581 }
4582
4583 if (nor->info->flags & SPI_S3AN)
4584 s3an_post_sfdp_fixups(nor);
4585
4586 if (nor->info->fixups && nor->info->fixups->post_sfdp)
4587 nor->info->fixups->post_sfdp(nor);
4588 }
4589
4590
4591
4592
4593
4594
4595
4596
4597 static void spi_nor_late_init_params(struct spi_nor *nor)
4598 {
4599
4600
4601
4602
4603 if (nor->flags & SNOR_F_HAS_LOCK && !nor->params.locking_ops)
4604 nor->params.locking_ops = &stm_locking_ops;
4605 }
4606
4607
4608
4609
4610
4611
4612
4613
4614
4615
4616
4617
4618
4619
4620
4621
4622
4623
4624
4625
4626
4627
4628
4629
4630
4631
4632
4633
4634
4635
4636
4637
4638
4639
4640
4641
4642
4643
4644 static void spi_nor_init_params(struct spi_nor *nor)
4645 {
4646 spi_nor_info_init_params(nor);
4647
4648 spi_nor_manufacturer_init_params(nor);
4649
4650 if ((nor->info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)) &&
4651 !(nor->info->flags & SPI_NOR_SKIP_SFDP))
4652 spi_nor_sfdp_init_params(nor);
4653
4654 spi_nor_post_sfdp_fixups(nor);
4655
4656 spi_nor_late_init_params(nor);
4657 }
4658
4659
4660
4661
4662
4663
4664
4665 static int spi_nor_quad_enable(struct spi_nor *nor)
4666 {
4667 if (!nor->params.quad_enable)
4668 return 0;
4669
4670 if (!(spi_nor_get_protocol_width(nor->read_proto) == 4 ||
4671 spi_nor_get_protocol_width(nor->write_proto) == 4))
4672 return 0;
4673
4674 return nor->params.quad_enable(nor);
4675 }
4676
4677 static int spi_nor_init(struct spi_nor *nor)
4678 {
4679 int err;
4680
4681 if (nor->clear_sr_bp) {
4682 if (nor->params.quad_enable == spansion_quad_enable)
4683 nor->clear_sr_bp = spi_nor_spansion_clear_sr_bp;
4684
4685 err = nor->clear_sr_bp(nor);
4686 if (err) {
4687 dev_err(nor->dev,
4688 "fail to clear block protection bits\n");
4689 return err;
4690 }
4691 }
4692
4693 err = spi_nor_quad_enable(nor);
4694 if (err) {
4695 dev_err(nor->dev, "quad mode not supported\n");
4696 return err;
4697 }
4698
4699 if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES)) {
4700
4701
4702
4703
4704
4705
4706
4707 WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
4708 "enabling reset hack; may not recover from unexpected reboots\n");
4709 nor->params.set_4byte(nor, true);
4710 }
4711
4712 return 0;
4713 }
4714
4715
4716 static void spi_nor_resume(struct mtd_info *mtd)
4717 {
4718 struct spi_nor *nor = mtd_to_spi_nor(mtd);
4719 struct device *dev = nor->dev;
4720 int ret;
4721
4722
4723 ret = spi_nor_init(nor);
4724 if (ret)
4725 dev_err(dev, "resume() failed\n");
4726 }
4727
4728 void spi_nor_restore(struct spi_nor *nor)
4729 {
4730
4731 if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
4732 nor->flags & SNOR_F_BROKEN_RESET)
4733 nor->params.set_4byte(nor, false);
4734 }
4735 EXPORT_SYMBOL_GPL(spi_nor_restore);
4736
4737 static const struct flash_info *spi_nor_match_id(const char *name)
4738 {
4739 const struct flash_info *id = spi_nor_ids;
4740
4741 while (id->name) {
4742 if (!strcmp(name, id->name))
4743 return id;
4744 id++;
4745 }
4746 return NULL;
4747 }
4748
4749 static int spi_nor_set_addr_width(struct spi_nor *nor)
4750 {
4751 if (nor->addr_width) {
4752
4753 } else if (nor->info->addr_width) {
4754 nor->addr_width = nor->info->addr_width;
4755 } else if (nor->mtd.size > 0x1000000) {
4756
4757 nor->addr_width = 4;
4758 } else {
4759 nor->addr_width = 3;
4760 }
4761
4762 if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
4763 dev_err(nor->dev, "address width is too large: %u\n",
4764 nor->addr_width);
4765 return -EINVAL;
4766 }
4767
4768
4769 if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
4770 !(nor->flags & SNOR_F_HAS_4BAIT))
4771 spi_nor_set_4byte_opcodes(nor);
4772
4773 return 0;
4774 }
4775
4776 static void spi_nor_debugfs_init(struct spi_nor *nor,
4777 const struct flash_info *info)
4778 {
4779 struct mtd_info *mtd = &nor->mtd;
4780
4781 mtd->dbg.partname = info->name;
4782 mtd->dbg.partid = devm_kasprintf(nor->dev, GFP_KERNEL, "spi-nor:%*phN",
4783 info->id_len, info->id);
4784 }
4785
4786 static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
4787 const char *name)
4788 {
4789 const struct flash_info *info = NULL;
4790
4791 if (name)
4792 info = spi_nor_match_id(name);
4793
4794 if (!info)
4795 info = spi_nor_read_id(nor);
4796 if (IS_ERR_OR_NULL(info))
4797 return ERR_PTR(-ENOENT);
4798
4799
4800
4801
4802
4803 if (name && info->id_len) {
4804 const struct flash_info *jinfo;
4805
4806 jinfo = spi_nor_read_id(nor);
4807 if (IS_ERR(jinfo)) {
4808 return jinfo;
4809 } else if (jinfo != info) {
4810
4811
4812
4813
4814
4815
4816
4817 dev_warn(nor->dev, "found %s, expected %s\n",
4818 jinfo->name, info->name);
4819 info = jinfo;
4820 }
4821 }
4822
4823 return info;
4824 }
4825
4826 int spi_nor_scan(struct spi_nor *nor, const char *name,
4827 const struct spi_nor_hwcaps *hwcaps)
4828 {
4829 const struct flash_info *info;
4830 struct device *dev = nor->dev;
4831 struct mtd_info *mtd = &nor->mtd;
4832 struct device_node *np = spi_nor_get_flash_node(nor);
4833 struct spi_nor_flash_parameter *params = &nor->params;
4834 int ret;
4835 int i;
4836
4837 ret = spi_nor_check(nor);
4838 if (ret)
4839 return ret;
4840
4841
4842 nor->reg_proto = SNOR_PROTO_1_1_1;
4843 nor->read_proto = SNOR_PROTO_1_1_1;
4844 nor->write_proto = SNOR_PROTO_1_1_1;
4845
4846
4847
4848
4849
4850
4851
4852
4853
4854 nor->bouncebuf_size = PAGE_SIZE;
4855 nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size,
4856 GFP_KERNEL);
4857 if (!nor->bouncebuf)
4858 return -ENOMEM;
4859
4860 info = spi_nor_get_flash_info(nor, name);
4861 if (IS_ERR(info))
4862 return PTR_ERR(info);
4863
4864 nor->info = info;
4865
4866 spi_nor_debugfs_init(nor, info);
4867
4868 mutex_init(&nor->lock);
4869
4870
4871
4872
4873
4874
4875 if (info->flags & SPI_NOR_XSR_RDY)
4876 nor->flags |= SNOR_F_READY_XSR_RDY;
4877
4878 if (info->flags & SPI_NOR_HAS_LOCK)
4879 nor->flags |= SNOR_F_HAS_LOCK;
4880
4881
4882
4883
4884
4885 if (JEDEC_MFR(nor->info) == SNOR_MFR_ATMEL ||
4886 JEDEC_MFR(nor->info) == SNOR_MFR_INTEL ||
4887 JEDEC_MFR(nor->info) == SNOR_MFR_SST ||
4888 nor->info->flags & SPI_NOR_HAS_LOCK)
4889 nor->clear_sr_bp = spi_nor_clear_sr_bp;
4890
4891
4892 spi_nor_init_params(nor);
4893
4894 if (!mtd->name)
4895 mtd->name = dev_name(dev);
4896 mtd->priv = nor;
4897 mtd->type = MTD_NORFLASH;
4898 mtd->writesize = 1;
4899 mtd->flags = MTD_CAP_NORFLASH;
4900 mtd->size = params->size;
4901 mtd->_erase = spi_nor_erase;
4902 mtd->_read = spi_nor_read;
4903 mtd->_resume = spi_nor_resume;
4904
4905 if (nor->params.locking_ops) {
4906 mtd->_lock = spi_nor_lock;
4907 mtd->_unlock = spi_nor_unlock;
4908 mtd->_is_locked = spi_nor_is_locked;
4909 }
4910
4911
4912 if (info->flags & SST_WRITE)
4913 mtd->_write = sst_write;
4914 else
4915 mtd->_write = spi_nor_write;
4916
4917 if (info->flags & USE_FSR)
4918 nor->flags |= SNOR_F_USE_FSR;
4919 if (info->flags & SPI_NOR_HAS_TB)
4920 nor->flags |= SNOR_F_HAS_SR_TB;
4921 if (info->flags & NO_CHIP_ERASE)
4922 nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
4923 if (info->flags & USE_CLSR)
4924 nor->flags |= SNOR_F_USE_CLSR;
4925
4926 if (info->flags & SPI_NOR_NO_ERASE)
4927 mtd->flags |= MTD_NO_ERASE;
4928
4929 mtd->dev.parent = dev;
4930 nor->page_size = params->page_size;
4931 mtd->writebufsize = nor->page_size;
4932
4933 if (of_property_read_bool(np, "broken-flash-reset"))
4934 nor->flags |= SNOR_F_BROKEN_RESET;
4935
4936
4937
4938
4939
4940
4941
4942 ret = spi_nor_setup(nor, hwcaps);
4943 if (ret)
4944 return ret;
4945
4946 if (info->flags & SPI_NOR_4B_OPCODES)
4947 nor->flags |= SNOR_F_4B_OPCODES;
4948
4949 ret = spi_nor_set_addr_width(nor);
4950 if (ret)
4951 return ret;
4952
4953
4954 ret = spi_nor_init(nor);
4955 if (ret)
4956 return ret;
4957
4958 dev_info(dev, "%s (%lld Kbytes)\n", info->name,
4959 (long long)mtd->size >> 10);
4960
4961 dev_dbg(dev,
4962 "mtd .name = %s, .size = 0x%llx (%lldMiB), "
4963 ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
4964 mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
4965 mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
4966
4967 if (mtd->numeraseregions)
4968 for (i = 0; i < mtd->numeraseregions; i++)
4969 dev_dbg(dev,
4970 "mtd.eraseregions[%d] = { .offset = 0x%llx, "
4971 ".erasesize = 0x%.8x (%uKiB), "
4972 ".numblocks = %d }\n",
4973 i, (long long)mtd->eraseregions[i].offset,
4974 mtd->eraseregions[i].erasesize,
4975 mtd->eraseregions[i].erasesize / 1024,
4976 mtd->eraseregions[i].numblocks);
4977 return 0;
4978 }
4979 EXPORT_SYMBOL_GPL(spi_nor_scan);
4980
4981 static int spi_nor_probe(struct spi_mem *spimem)
4982 {
4983 struct spi_device *spi = spimem->spi;
4984 struct flash_platform_data *data = dev_get_platdata(&spi->dev);
4985 struct spi_nor *nor;
4986
4987
4988
4989
4990 const struct spi_nor_hwcaps hwcaps = { .mask = SNOR_HWCAPS_ALL };
4991 char *flash_name;
4992 int ret;
4993
4994 nor = devm_kzalloc(&spi->dev, sizeof(*nor), GFP_KERNEL);
4995 if (!nor)
4996 return -ENOMEM;
4997
4998 nor->spimem = spimem;
4999 nor->dev = &spi->dev;
5000 spi_nor_set_flash_node(nor, spi->dev.of_node);
5001
5002 spi_mem_set_drvdata(spimem, nor);
5003
5004 if (data && data->name)
5005 nor->mtd.name = data->name;
5006
5007 if (!nor->mtd.name)
5008 nor->mtd.name = spi_mem_get_name(spimem);
5009
5010
5011
5012
5013
5014
5015
5016 if (data && data->type)
5017 flash_name = data->type;
5018 else if (!strcmp(spi->modalias, "spi-nor"))
5019 flash_name = NULL;
5020 else
5021 flash_name = spi->modalias;
5022
5023 ret = spi_nor_scan(nor, flash_name, &hwcaps);
5024 if (ret)
5025 return ret;
5026
5027
5028
5029
5030
5031
5032 if (nor->page_size > PAGE_SIZE) {
5033 nor->bouncebuf_size = nor->page_size;
5034 devm_kfree(nor->dev, nor->bouncebuf);
5035 nor->bouncebuf = devm_kmalloc(nor->dev,
5036 nor->bouncebuf_size,
5037 GFP_KERNEL);
5038 if (!nor->bouncebuf)
5039 return -ENOMEM;
5040 }
5041
5042 return mtd_device_register(&nor->mtd, data ? data->parts : NULL,
5043 data ? data->nr_parts : 0);
5044 }
5045
5046 static int spi_nor_remove(struct spi_mem *spimem)
5047 {
5048 struct spi_nor *nor = spi_mem_get_drvdata(spimem);
5049
5050 spi_nor_restore(nor);
5051
5052
5053 return mtd_device_unregister(&nor->mtd);
5054 }
5055
5056 static void spi_nor_shutdown(struct spi_mem *spimem)
5057 {
5058 struct spi_nor *nor = spi_mem_get_drvdata(spimem);
5059
5060 spi_nor_restore(nor);
5061 }
5062
5063
5064
5065
5066
5067
5068
5069
5070
5071
5072
5073
5074
5075 static const struct spi_device_id spi_nor_dev_ids[] = {
5076
5077
5078
5079
5080
5081 {"spi-nor"},
5082
5083
5084
5085
5086
5087 {"s25sl064a"}, {"w25x16"}, {"m25p10"}, {"m25px64"},
5088
5089
5090
5091
5092
5093 {"at25df321a"}, {"at25df641"}, {"at26df081a"},
5094 {"mx25l4005a"}, {"mx25l1606e"}, {"mx25l6405d"}, {"mx25l12805d"},
5095 {"mx25l25635e"},{"mx66l51235l"},
5096 {"n25q064"}, {"n25q128a11"}, {"n25q128a13"}, {"n25q512a"},
5097 {"s25fl256s1"}, {"s25fl512s"}, {"s25sl12801"}, {"s25fl008k"},
5098 {"s25fl064k"},
5099 {"sst25vf040b"},{"sst25vf016b"},{"sst25vf032b"},{"sst25wf040"},
5100 {"m25p40"}, {"m25p80"}, {"m25p16"}, {"m25p32"},
5101 {"m25p64"}, {"m25p128"},
5102 {"w25x80"}, {"w25x32"}, {"w25q32"}, {"w25q32dw"},
5103 {"w25q80bl"}, {"w25q128"}, {"w25q256"},
5104
5105
5106 {"m25p05-nonjedec"}, {"m25p10-nonjedec"}, {"m25p20-nonjedec"},
5107 {"m25p40-nonjedec"}, {"m25p80-nonjedec"}, {"m25p16-nonjedec"},
5108 {"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"},
5109
5110
5111 { "mr25h128" },
5112 { "mr25h256" },
5113 { "mr25h10" },
5114 { "mr25h40" },
5115
5116 { },
5117 };
5118 MODULE_DEVICE_TABLE(spi, spi_nor_dev_ids);
5119
5120 static const struct of_device_id spi_nor_of_table[] = {
5121
5122
5123
5124
5125 { .compatible = "jedec,spi-nor" },
5126 { },
5127 };
5128 MODULE_DEVICE_TABLE(of, spi_nor_of_table);
5129
5130
5131
5132
5133
5134
5135 static struct spi_mem_driver spi_nor_driver = {
5136 .spidrv = {
5137 .driver = {
5138 .name = "spi-nor",
5139 .of_match_table = spi_nor_of_table,
5140 },
5141 .id_table = spi_nor_dev_ids,
5142 },
5143 .probe = spi_nor_probe,
5144 .remove = spi_nor_remove,
5145 .shutdown = spi_nor_shutdown,
5146 };
5147 module_spi_mem_driver(spi_nor_driver);
5148
5149 MODULE_LICENSE("GPL v2");
5150 MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
5151 MODULE_AUTHOR("Mike Lavender");
5152 MODULE_DESCRIPTION("framework for SPI NOR");