This source file includes following definitions.
- nandsim_show
- nandsim_debugfs_create
- alloc_device
- free_device
- get_partition_name
- init_nandsim
- free_nandsim
- parse_badblocks
- parse_weakblocks
- erase_error
- parse_weakpages
- write_error
- parse_gravepages
- read_error
- free_lists
- setup_wear_reporting
- update_wear
- get_state_name
- check_command
- get_state_by_command
- accept_addr_byte
- switch_to_ready_state
- find_operation
- put_pages
- get_pages
- read_file
- write_file
- NS_GET_PAGE
- NS_PAGE_BYTE_OFF
- do_read_error
- do_bit_flips
- read_page
- erase_sector
- prog_page
- do_state_action
- switch_state
- ns_nand_read_byte
- ns_nand_write_byte
- ns_nand_write_buf
- ns_nand_read_buf
- ns_exec_op
- ns_attach_chip
- ns_init_module
- ns_cleanup_module
1
2
3
4
5
6
7
8
9
10
11
12
13 #define pr_fmt(fmt) "[nandsim]" fmt
14
15 #include <linux/init.h>
16 #include <linux/types.h>
17 #include <linux/module.h>
18 #include <linux/moduleparam.h>
19 #include <linux/vmalloc.h>
20 #include <linux/math64.h>
21 #include <linux/slab.h>
22 #include <linux/errno.h>
23 #include <linux/string.h>
24 #include <linux/mtd/mtd.h>
25 #include <linux/mtd/rawnand.h>
26 #include <linux/mtd/nand_bch.h>
27 #include <linux/mtd/partitions.h>
28 #include <linux/delay.h>
29 #include <linux/list.h>
30 #include <linux/random.h>
31 #include <linux/sched.h>
32 #include <linux/sched/mm.h>
33 #include <linux/fs.h>
34 #include <linux/pagemap.h>
35 #include <linux/seq_file.h>
36 #include <linux/debugfs.h>
37
38
39 #if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \
40 !defined(CONFIG_NANDSIM_SECOND_ID_BYTE) || \
41 !defined(CONFIG_NANDSIM_THIRD_ID_BYTE) || \
42 !defined(CONFIG_NANDSIM_FOURTH_ID_BYTE)
43 #define CONFIG_NANDSIM_FIRST_ID_BYTE 0x98
44 #define CONFIG_NANDSIM_SECOND_ID_BYTE 0x39
45 #define CONFIG_NANDSIM_THIRD_ID_BYTE 0xFF
46 #define CONFIG_NANDSIM_FOURTH_ID_BYTE 0xFF
47 #endif
48
49 #ifndef CONFIG_NANDSIM_ACCESS_DELAY
50 #define CONFIG_NANDSIM_ACCESS_DELAY 25
51 #endif
52 #ifndef CONFIG_NANDSIM_PROGRAMM_DELAY
53 #define CONFIG_NANDSIM_PROGRAMM_DELAY 200
54 #endif
55 #ifndef CONFIG_NANDSIM_ERASE_DELAY
56 #define CONFIG_NANDSIM_ERASE_DELAY 2
57 #endif
58 #ifndef CONFIG_NANDSIM_OUTPUT_CYCLE
59 #define CONFIG_NANDSIM_OUTPUT_CYCLE 40
60 #endif
61 #ifndef CONFIG_NANDSIM_INPUT_CYCLE
62 #define CONFIG_NANDSIM_INPUT_CYCLE 50
63 #endif
64 #ifndef CONFIG_NANDSIM_BUS_WIDTH
65 #define CONFIG_NANDSIM_BUS_WIDTH 8
66 #endif
67 #ifndef CONFIG_NANDSIM_DO_DELAYS
68 #define CONFIG_NANDSIM_DO_DELAYS 0
69 #endif
70 #ifndef CONFIG_NANDSIM_LOG
71 #define CONFIG_NANDSIM_LOG 0
72 #endif
73 #ifndef CONFIG_NANDSIM_DBG
74 #define CONFIG_NANDSIM_DBG 0
75 #endif
76 #ifndef CONFIG_NANDSIM_MAX_PARTS
77 #define CONFIG_NANDSIM_MAX_PARTS 32
78 #endif
79
80 static uint access_delay = CONFIG_NANDSIM_ACCESS_DELAY;
81 static uint programm_delay = CONFIG_NANDSIM_PROGRAMM_DELAY;
82 static uint erase_delay = CONFIG_NANDSIM_ERASE_DELAY;
83 static uint output_cycle = CONFIG_NANDSIM_OUTPUT_CYCLE;
84 static uint input_cycle = CONFIG_NANDSIM_INPUT_CYCLE;
85 static uint bus_width = CONFIG_NANDSIM_BUS_WIDTH;
86 static uint do_delays = CONFIG_NANDSIM_DO_DELAYS;
87 static uint log = CONFIG_NANDSIM_LOG;
88 static uint dbg = CONFIG_NANDSIM_DBG;
89 static unsigned long parts[CONFIG_NANDSIM_MAX_PARTS];
90 static unsigned int parts_num;
91 static char *badblocks = NULL;
92 static char *weakblocks = NULL;
93 static char *weakpages = NULL;
94 static unsigned int bitflips = 0;
95 static char *gravepages = NULL;
96 static unsigned int overridesize = 0;
97 static char *cache_file = NULL;
98 static unsigned int bbt;
99 static unsigned int bch;
100 static u_char id_bytes[8] = {
101 [0] = CONFIG_NANDSIM_FIRST_ID_BYTE,
102 [1] = CONFIG_NANDSIM_SECOND_ID_BYTE,
103 [2] = CONFIG_NANDSIM_THIRD_ID_BYTE,
104 [3] = CONFIG_NANDSIM_FOURTH_ID_BYTE,
105 [4 ... 7] = 0xFF,
106 };
107
108 module_param_array(id_bytes, byte, NULL, 0400);
109 module_param_named(first_id_byte, id_bytes[0], byte, 0400);
110 module_param_named(second_id_byte, id_bytes[1], byte, 0400);
111 module_param_named(third_id_byte, id_bytes[2], byte, 0400);
112 module_param_named(fourth_id_byte, id_bytes[3], byte, 0400);
113 module_param(access_delay, uint, 0400);
114 module_param(programm_delay, uint, 0400);
115 module_param(erase_delay, uint, 0400);
116 module_param(output_cycle, uint, 0400);
117 module_param(input_cycle, uint, 0400);
118 module_param(bus_width, uint, 0400);
119 module_param(do_delays, uint, 0400);
120 module_param(log, uint, 0400);
121 module_param(dbg, uint, 0400);
122 module_param_array(parts, ulong, &parts_num, 0400);
123 module_param(badblocks, charp, 0400);
124 module_param(weakblocks, charp, 0400);
125 module_param(weakpages, charp, 0400);
126 module_param(bitflips, uint, 0400);
127 module_param(gravepages, charp, 0400);
128 module_param(overridesize, uint, 0400);
129 module_param(cache_file, charp, 0400);
130 module_param(bbt, uint, 0400);
131 module_param(bch, uint, 0400);
132
133 MODULE_PARM_DESC(id_bytes, "The ID bytes returned by NAND Flash 'read ID' command");
134 MODULE_PARM_DESC(first_id_byte, "The first byte returned by NAND Flash 'read ID' command (manufacturer ID) (obsolete)");
135 MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID) (obsolete)");
136 MODULE_PARM_DESC(third_id_byte, "The third byte returned by NAND Flash 'read ID' command (obsolete)");
137 MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read ID' command (obsolete)");
138 MODULE_PARM_DESC(access_delay, "Initial page access delay (microseconds)");
139 MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds");
140 MODULE_PARM_DESC(erase_delay, "Sector erase delay (milliseconds)");
141 MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanoseconds)");
142 MODULE_PARM_DESC(input_cycle, "Word input (to flash) time (nanoseconds)");
143 MODULE_PARM_DESC(bus_width, "Chip's bus width (8- or 16-bit)");
144 MODULE_PARM_DESC(do_delays, "Simulate NAND delays using busy-waits if not zero");
145 MODULE_PARM_DESC(log, "Perform logging if not zero");
146 MODULE_PARM_DESC(dbg, "Output debug information if not zero");
147 MODULE_PARM_DESC(parts, "Partition sizes (in erase blocks) separated by commas");
148
149 MODULE_PARM_DESC(badblocks, "Erase blocks that are initially marked bad, separated by commas");
150 MODULE_PARM_DESC(weakblocks, "Weak erase blocks [: remaining erase cycles (defaults to 3)]"
151 " separated by commas e.g. 113:2 means eb 113"
152 " can be erased only twice before failing");
153 MODULE_PARM_DESC(weakpages, "Weak pages [: maximum writes (defaults to 3)]"
154 " separated by commas e.g. 1401:2 means page 1401"
155 " can be written only twice before failing");
156 MODULE_PARM_DESC(bitflips, "Maximum number of random bit flips per page (zero by default)");
157 MODULE_PARM_DESC(gravepages, "Pages that lose data [: maximum reads (defaults to 3)]"
158 " separated by commas e.g. 1401:2 means page 1401"
159 " can be read only twice before failing");
160 MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the ID bytes. "
161 "The size is specified in erase blocks and as the exponent of a power of two"
162 " e.g. 5 means a size of 32 erase blocks");
163 MODULE_PARM_DESC(cache_file, "File to use to cache nand pages instead of memory");
164 MODULE_PARM_DESC(bbt, "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in data area");
165 MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should "
166 "be correctable in 512-byte blocks");
167
168
169 #define NS_LARGEST_PAGE_SIZE 4096
170
171
172 #define NS_LOG(args...) \
173 do { if (log) pr_debug(" log: " args); } while(0)
174 #define NS_DBG(args...) \
175 do { if (dbg) pr_debug(" debug: " args); } while(0)
176 #define NS_WARN(args...) \
177 do { pr_warn(" warning: " args); } while(0)
178 #define NS_ERR(args...) \
179 do { pr_err(" error: " args); } while(0)
180 #define NS_INFO(args...) \
181 do { pr_info(" " args); } while(0)
182
183
184 #define NS_UDELAY(us) \
185 do { if (do_delays) udelay(us); } while(0)
186 #define NS_MDELAY(us) \
187 do { if (do_delays) mdelay(us); } while(0)
188
189
190 #define NS_IS_INITIALIZED(ns) ((ns)->geom.totsz != 0)
191
192
193 #define NS_STATUS_OK(ns) (NAND_STATUS_READY | (NAND_STATUS_WP * ((ns)->lines.wp == 0)))
194
195
196 #define NS_STATUS_FAILED(ns) (NAND_STATUS_FAIL | NS_STATUS_OK(ns))
197
198
199 #define NS_RAW_OFFSET(ns) \
200 (((ns)->regs.row * (ns)->geom.pgszoob) + (ns)->regs.column)
201
202
203 #define NS_RAW_OFFSET_OOB(ns) (NS_RAW_OFFSET(ns) + ns->geom.pgsz)
204
205
206 #define STATE_CMD_READ0 0x00000001
207 #define STATE_CMD_READ1 0x00000002
208 #define STATE_CMD_READSTART 0x00000003
209 #define STATE_CMD_PAGEPROG 0x00000004
210 #define STATE_CMD_READOOB 0x00000005
211 #define STATE_CMD_ERASE1 0x00000006
212 #define STATE_CMD_STATUS 0x00000007
213 #define STATE_CMD_SEQIN 0x00000009
214 #define STATE_CMD_READID 0x0000000A
215 #define STATE_CMD_ERASE2 0x0000000B
216 #define STATE_CMD_RESET 0x0000000C
217 #define STATE_CMD_RNDOUT 0x0000000D
218 #define STATE_CMD_RNDOUTSTART 0x0000000E
219 #define STATE_CMD_MASK 0x0000000F
220
221
222 #define STATE_ADDR_PAGE 0x00000010
223 #define STATE_ADDR_SEC 0x00000020
224 #define STATE_ADDR_COLUMN 0x00000030
225 #define STATE_ADDR_ZERO 0x00000040
226 #define STATE_ADDR_MASK 0x00000070
227
228
229 #define STATE_DATAIN 0x00000100
230 #define STATE_DATAIN_MASK 0x00000100
231
232 #define STATE_DATAOUT 0x00001000
233 #define STATE_DATAOUT_ID 0x00002000
234 #define STATE_DATAOUT_STATUS 0x00003000
235 #define STATE_DATAOUT_MASK 0x00007000
236
237
238 #define STATE_READY 0x00000000
239
240
241 #define STATE_UNKNOWN 0x10000000
242
243
244 #define ACTION_CPY 0x00100000
245 #define ACTION_PRGPAGE 0x00200000
246 #define ACTION_SECERASE 0x00300000
247 #define ACTION_ZEROOFF 0x00400000
248 #define ACTION_HALFOFF 0x00500000
249 #define ACTION_OOBOFF 0x00600000
250 #define ACTION_MASK 0x00700000
251
252 #define NS_OPER_NUM 13
253 #define NS_OPER_STATES 6
254
255 #define OPT_ANY 0xFFFFFFFF
256 #define OPT_PAGE512 0x00000002
257 #define OPT_PAGE2048 0x00000008
258 #define OPT_PAGE512_8BIT 0x00000040
259 #define OPT_PAGE4096 0x00000080
260 #define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096)
261 #define OPT_SMALLPAGE (OPT_PAGE512)
262
263
264 #define NS_STATE(x) ((x) & ~ACTION_MASK)
265
266
267
268
269
270
271 #define NS_MAX_PREVSTATES 1
272
273
274 #define NS_MAX_HELD_PAGES 16
275
276
277
278
279 union ns_mem {
280 u_char *byte;
281 uint16_t *word;
282 };
283
284
285
286
287 struct nandsim {
288 struct nand_chip chip;
289 struct nand_controller base;
290 struct mtd_partition partitions[CONFIG_NANDSIM_MAX_PARTS];
291 unsigned int nbparts;
292
293 uint busw;
294 u_char ids[8];
295 uint32_t options;
296 uint32_t state;
297 uint32_t nxstate;
298
299 uint32_t *op;
300 uint32_t pstates[NS_MAX_PREVSTATES];
301 uint16_t npstates;
302 uint16_t stateidx;
303
304
305 union ns_mem *pages;
306
307
308 struct kmem_cache *nand_pages_slab;
309
310
311 union ns_mem buf;
312
313
314 struct {
315 uint64_t totsz;
316 uint32_t secsz;
317 uint pgsz;
318 uint oobsz;
319 uint64_t totszoob;
320 uint pgszoob;
321 uint secszoob;
322 uint pgnum;
323 uint pgsec;
324 uint secshift;
325 uint pgshift;
326 uint pgaddrbytes;
327 uint secaddrbytes;
328 uint idbytes;
329 } geom;
330
331
332 struct {
333 unsigned command;
334 u_char status;
335 uint row;
336 uint column;
337 uint count;
338 uint num;
339 uint off;
340 } regs;
341
342
343 struct {
344 int ce;
345 int cle;
346 int ale;
347 int wp;
348 } lines;
349
350
351 struct file *cfile;
352 unsigned long *pages_written;
353 void *file_buf;
354 struct page *held_pages[NS_MAX_HELD_PAGES];
355 int held_cnt;
356 };
357
358
359
360
361
362 static struct nandsim_operations {
363 uint32_t reqopts;
364 uint32_t states[NS_OPER_STATES];
365 } ops[NS_OPER_NUM] = {
366
367 {OPT_SMALLPAGE, {STATE_CMD_READ0 | ACTION_ZEROOFF, STATE_ADDR_PAGE | ACTION_CPY,
368 STATE_DATAOUT, STATE_READY}},
369
370 {OPT_PAGE512_8BIT, {STATE_CMD_READ1 | ACTION_HALFOFF, STATE_ADDR_PAGE | ACTION_CPY,
371 STATE_DATAOUT, STATE_READY}},
372
373 {OPT_SMALLPAGE, {STATE_CMD_READOOB | ACTION_OOBOFF, STATE_ADDR_PAGE | ACTION_CPY,
374 STATE_DATAOUT, STATE_READY}},
375
376 {OPT_ANY, {STATE_CMD_SEQIN, STATE_ADDR_PAGE, STATE_DATAIN,
377 STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
378
379 {OPT_SMALLPAGE, {STATE_CMD_READ0, STATE_CMD_SEQIN | ACTION_ZEROOFF, STATE_ADDR_PAGE,
380 STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
381
382 {OPT_PAGE512, {STATE_CMD_READ1, STATE_CMD_SEQIN | ACTION_HALFOFF, STATE_ADDR_PAGE,
383 STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
384
385 {OPT_SMALLPAGE, {STATE_CMD_READOOB, STATE_CMD_SEQIN | ACTION_OOBOFF, STATE_ADDR_PAGE,
386 STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
387
388 {OPT_ANY, {STATE_CMD_ERASE1, STATE_ADDR_SEC, STATE_CMD_ERASE2 | ACTION_SECERASE, STATE_READY}},
389
390 {OPT_ANY, {STATE_CMD_STATUS, STATE_DATAOUT_STATUS, STATE_READY}},
391
392 {OPT_ANY, {STATE_CMD_READID, STATE_ADDR_ZERO, STATE_DATAOUT_ID, STATE_READY}},
393
394 {OPT_LARGEPAGE, {STATE_CMD_READ0, STATE_ADDR_PAGE, STATE_CMD_READSTART | ACTION_CPY,
395 STATE_DATAOUT, STATE_READY}},
396
397 {OPT_LARGEPAGE, {STATE_CMD_RNDOUT, STATE_ADDR_COLUMN, STATE_CMD_RNDOUTSTART | ACTION_CPY,
398 STATE_DATAOUT, STATE_READY}},
399 };
400
401 struct weak_block {
402 struct list_head list;
403 unsigned int erase_block_no;
404 unsigned int max_erases;
405 unsigned int erases_done;
406 };
407
408 static LIST_HEAD(weak_blocks);
409
410 struct weak_page {
411 struct list_head list;
412 unsigned int page_no;
413 unsigned int max_writes;
414 unsigned int writes_done;
415 };
416
417 static LIST_HEAD(weak_pages);
418
419 struct grave_page {
420 struct list_head list;
421 unsigned int page_no;
422 unsigned int max_reads;
423 unsigned int reads_done;
424 };
425
426 static LIST_HEAD(grave_pages);
427
428 static unsigned long *erase_block_wear = NULL;
429 static unsigned int wear_eb_count = 0;
430 static unsigned long total_wear = 0;
431
432
433 static struct mtd_info *nsmtd;
434
435 static int nandsim_show(struct seq_file *m, void *private)
436 {
437 unsigned long wmin = -1, wmax = 0, avg;
438 unsigned long deciles[10], decile_max[10], tot = 0;
439 unsigned int i;
440
441
442 for (i = 0; i < wear_eb_count; ++i) {
443 unsigned long wear = erase_block_wear[i];
444 if (wear < wmin)
445 wmin = wear;
446 if (wear > wmax)
447 wmax = wear;
448 tot += wear;
449 }
450
451 for (i = 0; i < 9; ++i) {
452 deciles[i] = 0;
453 decile_max[i] = (wmax * (i + 1) + 5) / 10;
454 }
455 deciles[9] = 0;
456 decile_max[9] = wmax;
457 for (i = 0; i < wear_eb_count; ++i) {
458 int d;
459 unsigned long wear = erase_block_wear[i];
460 for (d = 0; d < 10; ++d)
461 if (wear <= decile_max[d]) {
462 deciles[d] += 1;
463 break;
464 }
465 }
466 avg = tot / wear_eb_count;
467
468
469 seq_printf(m, "Total numbers of erases: %lu\n", tot);
470 seq_printf(m, "Number of erase blocks: %u\n", wear_eb_count);
471 seq_printf(m, "Average number of erases: %lu\n", avg);
472 seq_printf(m, "Maximum number of erases: %lu\n", wmax);
473 seq_printf(m, "Minimum number of erases: %lu\n", wmin);
474 for (i = 0; i < 10; ++i) {
475 unsigned long from = (i ? decile_max[i - 1] + 1 : 0);
476 if (from > decile_max[i])
477 continue;
478 seq_printf(m, "Number of ebs with erase counts from %lu to %lu : %lu\n",
479 from,
480 decile_max[i],
481 deciles[i]);
482 }
483
484 return 0;
485 }
486 DEFINE_SHOW_ATTRIBUTE(nandsim);
487
488
489
490
491
492
493
494
495 static int nandsim_debugfs_create(struct nandsim *dev)
496 {
497 struct dentry *root = nsmtd->dbg.dfs_dir;
498 struct dentry *dent;
499
500
501
502
503
504 if (IS_ERR_OR_NULL(root)) {
505 if (IS_ENABLED(CONFIG_DEBUG_FS) &&
506 !IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
507 NS_WARN("CONFIG_MTD_PARTITIONED_MASTER must be enabled to expose debugfs stuff\n");
508 return 0;
509 }
510
511 dent = debugfs_create_file("nandsim_wear_report", S_IRUSR,
512 root, dev, &nandsim_fops);
513 if (IS_ERR_OR_NULL(dent)) {
514 NS_ERR("cannot create \"nandsim_wear_report\" debugfs entry\n");
515 return -1;
516 }
517
518 return 0;
519 }
520
521
522
523
524
525
526
527 static int __init alloc_device(struct nandsim *ns)
528 {
529 struct file *cfile;
530 int i, err;
531
532 if (cache_file) {
533 cfile = filp_open(cache_file, O_CREAT | O_RDWR | O_LARGEFILE, 0600);
534 if (IS_ERR(cfile))
535 return PTR_ERR(cfile);
536 if (!(cfile->f_mode & FMODE_CAN_READ)) {
537 NS_ERR("alloc_device: cache file not readable\n");
538 err = -EINVAL;
539 goto err_close;
540 }
541 if (!(cfile->f_mode & FMODE_CAN_WRITE)) {
542 NS_ERR("alloc_device: cache file not writeable\n");
543 err = -EINVAL;
544 goto err_close;
545 }
546 ns->pages_written =
547 vzalloc(array_size(sizeof(unsigned long),
548 BITS_TO_LONGS(ns->geom.pgnum)));
549 if (!ns->pages_written) {
550 NS_ERR("alloc_device: unable to allocate pages written array\n");
551 err = -ENOMEM;
552 goto err_close;
553 }
554 ns->file_buf = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
555 if (!ns->file_buf) {
556 NS_ERR("alloc_device: unable to allocate file buf\n");
557 err = -ENOMEM;
558 goto err_free;
559 }
560 ns->cfile = cfile;
561 return 0;
562 }
563
564 ns->pages = vmalloc(array_size(sizeof(union ns_mem), ns->geom.pgnum));
565 if (!ns->pages) {
566 NS_ERR("alloc_device: unable to allocate page array\n");
567 return -ENOMEM;
568 }
569 for (i = 0; i < ns->geom.pgnum; i++) {
570 ns->pages[i].byte = NULL;
571 }
572 ns->nand_pages_slab = kmem_cache_create("nandsim",
573 ns->geom.pgszoob, 0, 0, NULL);
574 if (!ns->nand_pages_slab) {
575 NS_ERR("cache_create: unable to create kmem_cache\n");
576 return -ENOMEM;
577 }
578
579 return 0;
580
581 err_free:
582 vfree(ns->pages_written);
583 err_close:
584 filp_close(cfile, NULL);
585 return err;
586 }
587
588
589
590
591 static void free_device(struct nandsim *ns)
592 {
593 int i;
594
595 if (ns->cfile) {
596 kfree(ns->file_buf);
597 vfree(ns->pages_written);
598 filp_close(ns->cfile, NULL);
599 return;
600 }
601
602 if (ns->pages) {
603 for (i = 0; i < ns->geom.pgnum; i++) {
604 if (ns->pages[i].byte)
605 kmem_cache_free(ns->nand_pages_slab,
606 ns->pages[i].byte);
607 }
608 kmem_cache_destroy(ns->nand_pages_slab);
609 vfree(ns->pages);
610 }
611 }
612
613 static char __init *get_partition_name(int i)
614 {
615 return kasprintf(GFP_KERNEL, "NAND simulator partition %d", i);
616 }
617
618
619
620
621
622
623 static int __init init_nandsim(struct mtd_info *mtd)
624 {
625 struct nand_chip *chip = mtd_to_nand(mtd);
626 struct nandsim *ns = nand_get_controller_data(chip);
627 int i, ret = 0;
628 uint64_t remains;
629 uint64_t next_offset;
630
631 if (NS_IS_INITIALIZED(ns)) {
632 NS_ERR("init_nandsim: nandsim is already initialized\n");
633 return -EIO;
634 }
635
636
637 ns->busw = chip->options & NAND_BUSWIDTH_16 ? 16 : 8;
638 ns->geom.totsz = mtd->size;
639 ns->geom.pgsz = mtd->writesize;
640 ns->geom.oobsz = mtd->oobsize;
641 ns->geom.secsz = mtd->erasesize;
642 ns->geom.pgszoob = ns->geom.pgsz + ns->geom.oobsz;
643 ns->geom.pgnum = div_u64(ns->geom.totsz, ns->geom.pgsz);
644 ns->geom.totszoob = ns->geom.totsz + (uint64_t)ns->geom.pgnum * ns->geom.oobsz;
645 ns->geom.secshift = ffs(ns->geom.secsz) - 1;
646 ns->geom.pgshift = chip->page_shift;
647 ns->geom.pgsec = ns->geom.secsz / ns->geom.pgsz;
648 ns->geom.secszoob = ns->geom.secsz + ns->geom.oobsz * ns->geom.pgsec;
649 ns->options = 0;
650
651 if (ns->geom.pgsz == 512) {
652 ns->options |= OPT_PAGE512;
653 if (ns->busw == 8)
654 ns->options |= OPT_PAGE512_8BIT;
655 } else if (ns->geom.pgsz == 2048) {
656 ns->options |= OPT_PAGE2048;
657 } else if (ns->geom.pgsz == 4096) {
658 ns->options |= OPT_PAGE4096;
659 } else {
660 NS_ERR("init_nandsim: unknown page size %u\n", ns->geom.pgsz);
661 return -EIO;
662 }
663
664 if (ns->options & OPT_SMALLPAGE) {
665 if (ns->geom.totsz <= (32 << 20)) {
666 ns->geom.pgaddrbytes = 3;
667 ns->geom.secaddrbytes = 2;
668 } else {
669 ns->geom.pgaddrbytes = 4;
670 ns->geom.secaddrbytes = 3;
671 }
672 } else {
673 if (ns->geom.totsz <= (128 << 20)) {
674 ns->geom.pgaddrbytes = 4;
675 ns->geom.secaddrbytes = 2;
676 } else {
677 ns->geom.pgaddrbytes = 5;
678 ns->geom.secaddrbytes = 3;
679 }
680 }
681
682
683 if (parts_num > ARRAY_SIZE(ns->partitions)) {
684 NS_ERR("too many partitions.\n");
685 return -EINVAL;
686 }
687 remains = ns->geom.totsz;
688 next_offset = 0;
689 for (i = 0; i < parts_num; ++i) {
690 uint64_t part_sz = (uint64_t)parts[i] * ns->geom.secsz;
691
692 if (!part_sz || part_sz > remains) {
693 NS_ERR("bad partition size.\n");
694 return -EINVAL;
695 }
696 ns->partitions[i].name = get_partition_name(i);
697 if (!ns->partitions[i].name) {
698 NS_ERR("unable to allocate memory.\n");
699 return -ENOMEM;
700 }
701 ns->partitions[i].offset = next_offset;
702 ns->partitions[i].size = part_sz;
703 next_offset += ns->partitions[i].size;
704 remains -= ns->partitions[i].size;
705 }
706 ns->nbparts = parts_num;
707 if (remains) {
708 if (parts_num + 1 > ARRAY_SIZE(ns->partitions)) {
709 NS_ERR("too many partitions.\n");
710 return -EINVAL;
711 }
712 ns->partitions[i].name = get_partition_name(i);
713 if (!ns->partitions[i].name) {
714 NS_ERR("unable to allocate memory.\n");
715 return -ENOMEM;
716 }
717 ns->partitions[i].offset = next_offset;
718 ns->partitions[i].size = remains;
719 ns->nbparts += 1;
720 }
721
722 if (ns->busw == 16)
723 NS_WARN("16-bit flashes support wasn't tested\n");
724
725 printk("flash size: %llu MiB\n",
726 (unsigned long long)ns->geom.totsz >> 20);
727 printk("page size: %u bytes\n", ns->geom.pgsz);
728 printk("OOB area size: %u bytes\n", ns->geom.oobsz);
729 printk("sector size: %u KiB\n", ns->geom.secsz >> 10);
730 printk("pages number: %u\n", ns->geom.pgnum);
731 printk("pages per sector: %u\n", ns->geom.pgsec);
732 printk("bus width: %u\n", ns->busw);
733 printk("bits in sector size: %u\n", ns->geom.secshift);
734 printk("bits in page size: %u\n", ns->geom.pgshift);
735 printk("bits in OOB size: %u\n", ffs(ns->geom.oobsz) - 1);
736 printk("flash size with OOB: %llu KiB\n",
737 (unsigned long long)ns->geom.totszoob >> 10);
738 printk("page address bytes: %u\n", ns->geom.pgaddrbytes);
739 printk("sector address bytes: %u\n", ns->geom.secaddrbytes);
740 printk("options: %#x\n", ns->options);
741
742 if ((ret = alloc_device(ns)) != 0)
743 return ret;
744
745
746 ns->buf.byte = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
747 if (!ns->buf.byte) {
748 NS_ERR("init_nandsim: unable to allocate %u bytes for the internal buffer\n",
749 ns->geom.pgszoob);
750 return -ENOMEM;
751 }
752 memset(ns->buf.byte, 0xFF, ns->geom.pgszoob);
753
754 return 0;
755 }
756
757
758
759
760 static void free_nandsim(struct nandsim *ns)
761 {
762 kfree(ns->buf.byte);
763 free_device(ns);
764
765 return;
766 }
767
768 static int parse_badblocks(struct nandsim *ns, struct mtd_info *mtd)
769 {
770 char *w;
771 int zero_ok;
772 unsigned int erase_block_no;
773 loff_t offset;
774
775 if (!badblocks)
776 return 0;
777 w = badblocks;
778 do {
779 zero_ok = (*w == '0' ? 1 : 0);
780 erase_block_no = simple_strtoul(w, &w, 0);
781 if (!zero_ok && !erase_block_no) {
782 NS_ERR("invalid badblocks.\n");
783 return -EINVAL;
784 }
785 offset = (loff_t)erase_block_no * ns->geom.secsz;
786 if (mtd_block_markbad(mtd, offset)) {
787 NS_ERR("invalid badblocks.\n");
788 return -EINVAL;
789 }
790 if (*w == ',')
791 w += 1;
792 } while (*w);
793 return 0;
794 }
795
796 static int parse_weakblocks(void)
797 {
798 char *w;
799 int zero_ok;
800 unsigned int erase_block_no;
801 unsigned int max_erases;
802 struct weak_block *wb;
803
804 if (!weakblocks)
805 return 0;
806 w = weakblocks;
807 do {
808 zero_ok = (*w == '0' ? 1 : 0);
809 erase_block_no = simple_strtoul(w, &w, 0);
810 if (!zero_ok && !erase_block_no) {
811 NS_ERR("invalid weakblocks.\n");
812 return -EINVAL;
813 }
814 max_erases = 3;
815 if (*w == ':') {
816 w += 1;
817 max_erases = simple_strtoul(w, &w, 0);
818 }
819 if (*w == ',')
820 w += 1;
821 wb = kzalloc(sizeof(*wb), GFP_KERNEL);
822 if (!wb) {
823 NS_ERR("unable to allocate memory.\n");
824 return -ENOMEM;
825 }
826 wb->erase_block_no = erase_block_no;
827 wb->max_erases = max_erases;
828 list_add(&wb->list, &weak_blocks);
829 } while (*w);
830 return 0;
831 }
832
833 static int erase_error(unsigned int erase_block_no)
834 {
835 struct weak_block *wb;
836
837 list_for_each_entry(wb, &weak_blocks, list)
838 if (wb->erase_block_no == erase_block_no) {
839 if (wb->erases_done >= wb->max_erases)
840 return 1;
841 wb->erases_done += 1;
842 return 0;
843 }
844 return 0;
845 }
846
847 static int parse_weakpages(void)
848 {
849 char *w;
850 int zero_ok;
851 unsigned int page_no;
852 unsigned int max_writes;
853 struct weak_page *wp;
854
855 if (!weakpages)
856 return 0;
857 w = weakpages;
858 do {
859 zero_ok = (*w == '0' ? 1 : 0);
860 page_no = simple_strtoul(w, &w, 0);
861 if (!zero_ok && !page_no) {
862 NS_ERR("invalid weakpages.\n");
863 return -EINVAL;
864 }
865 max_writes = 3;
866 if (*w == ':') {
867 w += 1;
868 max_writes = simple_strtoul(w, &w, 0);
869 }
870 if (*w == ',')
871 w += 1;
872 wp = kzalloc(sizeof(*wp), GFP_KERNEL);
873 if (!wp) {
874 NS_ERR("unable to allocate memory.\n");
875 return -ENOMEM;
876 }
877 wp->page_no = page_no;
878 wp->max_writes = max_writes;
879 list_add(&wp->list, &weak_pages);
880 } while (*w);
881 return 0;
882 }
883
884 static int write_error(unsigned int page_no)
885 {
886 struct weak_page *wp;
887
888 list_for_each_entry(wp, &weak_pages, list)
889 if (wp->page_no == page_no) {
890 if (wp->writes_done >= wp->max_writes)
891 return 1;
892 wp->writes_done += 1;
893 return 0;
894 }
895 return 0;
896 }
897
898 static int parse_gravepages(void)
899 {
900 char *g;
901 int zero_ok;
902 unsigned int page_no;
903 unsigned int max_reads;
904 struct grave_page *gp;
905
906 if (!gravepages)
907 return 0;
908 g = gravepages;
909 do {
910 zero_ok = (*g == '0' ? 1 : 0);
911 page_no = simple_strtoul(g, &g, 0);
912 if (!zero_ok && !page_no) {
913 NS_ERR("invalid gravepagess.\n");
914 return -EINVAL;
915 }
916 max_reads = 3;
917 if (*g == ':') {
918 g += 1;
919 max_reads = simple_strtoul(g, &g, 0);
920 }
921 if (*g == ',')
922 g += 1;
923 gp = kzalloc(sizeof(*gp), GFP_KERNEL);
924 if (!gp) {
925 NS_ERR("unable to allocate memory.\n");
926 return -ENOMEM;
927 }
928 gp->page_no = page_no;
929 gp->max_reads = max_reads;
930 list_add(&gp->list, &grave_pages);
931 } while (*g);
932 return 0;
933 }
934
935 static int read_error(unsigned int page_no)
936 {
937 struct grave_page *gp;
938
939 list_for_each_entry(gp, &grave_pages, list)
940 if (gp->page_no == page_no) {
941 if (gp->reads_done >= gp->max_reads)
942 return 1;
943 gp->reads_done += 1;
944 return 0;
945 }
946 return 0;
947 }
948
949 static void free_lists(void)
950 {
951 struct list_head *pos, *n;
952 list_for_each_safe(pos, n, &weak_blocks) {
953 list_del(pos);
954 kfree(list_entry(pos, struct weak_block, list));
955 }
956 list_for_each_safe(pos, n, &weak_pages) {
957 list_del(pos);
958 kfree(list_entry(pos, struct weak_page, list));
959 }
960 list_for_each_safe(pos, n, &grave_pages) {
961 list_del(pos);
962 kfree(list_entry(pos, struct grave_page, list));
963 }
964 kfree(erase_block_wear);
965 }
966
967 static int setup_wear_reporting(struct mtd_info *mtd)
968 {
969 size_t mem;
970
971 wear_eb_count = div_u64(mtd->size, mtd->erasesize);
972 mem = wear_eb_count * sizeof(unsigned long);
973 if (mem / sizeof(unsigned long) != wear_eb_count) {
974 NS_ERR("Too many erase blocks for wear reporting\n");
975 return -ENOMEM;
976 }
977 erase_block_wear = kzalloc(mem, GFP_KERNEL);
978 if (!erase_block_wear) {
979 NS_ERR("Too many erase blocks for wear reporting\n");
980 return -ENOMEM;
981 }
982 return 0;
983 }
984
985 static void update_wear(unsigned int erase_block_no)
986 {
987 if (!erase_block_wear)
988 return;
989 total_wear += 1;
990
991
992
993
994 if (total_wear == 0)
995 NS_ERR("Erase counter total overflow\n");
996 erase_block_wear[erase_block_no] += 1;
997 if (erase_block_wear[erase_block_no] == 0)
998 NS_ERR("Erase counter overflow for erase block %u\n", erase_block_no);
999 }
1000
1001
1002
1003
1004 static char *get_state_name(uint32_t state)
1005 {
1006 switch (NS_STATE(state)) {
1007 case STATE_CMD_READ0:
1008 return "STATE_CMD_READ0";
1009 case STATE_CMD_READ1:
1010 return "STATE_CMD_READ1";
1011 case STATE_CMD_PAGEPROG:
1012 return "STATE_CMD_PAGEPROG";
1013 case STATE_CMD_READOOB:
1014 return "STATE_CMD_READOOB";
1015 case STATE_CMD_READSTART:
1016 return "STATE_CMD_READSTART";
1017 case STATE_CMD_ERASE1:
1018 return "STATE_CMD_ERASE1";
1019 case STATE_CMD_STATUS:
1020 return "STATE_CMD_STATUS";
1021 case STATE_CMD_SEQIN:
1022 return "STATE_CMD_SEQIN";
1023 case STATE_CMD_READID:
1024 return "STATE_CMD_READID";
1025 case STATE_CMD_ERASE2:
1026 return "STATE_CMD_ERASE2";
1027 case STATE_CMD_RESET:
1028 return "STATE_CMD_RESET";
1029 case STATE_CMD_RNDOUT:
1030 return "STATE_CMD_RNDOUT";
1031 case STATE_CMD_RNDOUTSTART:
1032 return "STATE_CMD_RNDOUTSTART";
1033 case STATE_ADDR_PAGE:
1034 return "STATE_ADDR_PAGE";
1035 case STATE_ADDR_SEC:
1036 return "STATE_ADDR_SEC";
1037 case STATE_ADDR_ZERO:
1038 return "STATE_ADDR_ZERO";
1039 case STATE_ADDR_COLUMN:
1040 return "STATE_ADDR_COLUMN";
1041 case STATE_DATAIN:
1042 return "STATE_DATAIN";
1043 case STATE_DATAOUT:
1044 return "STATE_DATAOUT";
1045 case STATE_DATAOUT_ID:
1046 return "STATE_DATAOUT_ID";
1047 case STATE_DATAOUT_STATUS:
1048 return "STATE_DATAOUT_STATUS";
1049 case STATE_READY:
1050 return "STATE_READY";
1051 case STATE_UNKNOWN:
1052 return "STATE_UNKNOWN";
1053 }
1054
1055 NS_ERR("get_state_name: unknown state, BUG\n");
1056 return NULL;
1057 }
1058
1059
1060
1061
1062
1063
1064 static int check_command(int cmd)
1065 {
1066 switch (cmd) {
1067
1068 case NAND_CMD_READ0:
1069 case NAND_CMD_READ1:
1070 case NAND_CMD_READSTART:
1071 case NAND_CMD_PAGEPROG:
1072 case NAND_CMD_READOOB:
1073 case NAND_CMD_ERASE1:
1074 case NAND_CMD_STATUS:
1075 case NAND_CMD_SEQIN:
1076 case NAND_CMD_READID:
1077 case NAND_CMD_ERASE2:
1078 case NAND_CMD_RESET:
1079 case NAND_CMD_RNDOUT:
1080 case NAND_CMD_RNDOUTSTART:
1081 return 0;
1082
1083 default:
1084 return 1;
1085 }
1086 }
1087
1088
1089
1090
1091 static uint32_t get_state_by_command(unsigned command)
1092 {
1093 switch (command) {
1094 case NAND_CMD_READ0:
1095 return STATE_CMD_READ0;
1096 case NAND_CMD_READ1:
1097 return STATE_CMD_READ1;
1098 case NAND_CMD_PAGEPROG:
1099 return STATE_CMD_PAGEPROG;
1100 case NAND_CMD_READSTART:
1101 return STATE_CMD_READSTART;
1102 case NAND_CMD_READOOB:
1103 return STATE_CMD_READOOB;
1104 case NAND_CMD_ERASE1:
1105 return STATE_CMD_ERASE1;
1106 case NAND_CMD_STATUS:
1107 return STATE_CMD_STATUS;
1108 case NAND_CMD_SEQIN:
1109 return STATE_CMD_SEQIN;
1110 case NAND_CMD_READID:
1111 return STATE_CMD_READID;
1112 case NAND_CMD_ERASE2:
1113 return STATE_CMD_ERASE2;
1114 case NAND_CMD_RESET:
1115 return STATE_CMD_RESET;
1116 case NAND_CMD_RNDOUT:
1117 return STATE_CMD_RNDOUT;
1118 case NAND_CMD_RNDOUTSTART:
1119 return STATE_CMD_RNDOUTSTART;
1120 }
1121
1122 NS_ERR("get_state_by_command: unknown command, BUG\n");
1123 return 0;
1124 }
1125
1126
1127
1128
1129 static inline void accept_addr_byte(struct nandsim *ns, u_char bt)
1130 {
1131 uint byte = (uint)bt;
1132
1133 if (ns->regs.count < (ns->geom.pgaddrbytes - ns->geom.secaddrbytes))
1134 ns->regs.column |= (byte << 8 * ns->regs.count);
1135 else {
1136 ns->regs.row |= (byte << 8 * (ns->regs.count -
1137 ns->geom.pgaddrbytes +
1138 ns->geom.secaddrbytes));
1139 }
1140
1141 return;
1142 }
1143
1144
1145
1146
1147 static inline void switch_to_ready_state(struct nandsim *ns, u_char status)
1148 {
1149 NS_DBG("switch_to_ready_state: switch to %s state\n", get_state_name(STATE_READY));
1150
1151 ns->state = STATE_READY;
1152 ns->nxstate = STATE_UNKNOWN;
1153 ns->op = NULL;
1154 ns->npstates = 0;
1155 ns->stateidx = 0;
1156 ns->regs.num = 0;
1157 ns->regs.count = 0;
1158 ns->regs.off = 0;
1159 ns->regs.row = 0;
1160 ns->regs.column = 0;
1161 ns->regs.status = status;
1162 }
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206 static int find_operation(struct nandsim *ns, uint32_t flag)
1207 {
1208 int opsfound = 0;
1209 int i, j, idx = 0;
1210
1211 for (i = 0; i < NS_OPER_NUM; i++) {
1212
1213 int found = 1;
1214
1215 if (!(ns->options & ops[i].reqopts))
1216
1217 continue;
1218
1219 if (flag) {
1220 if (!(ops[i].states[ns->npstates] & STATE_ADDR_MASK))
1221 continue;
1222 } else {
1223 if (NS_STATE(ns->state) != NS_STATE(ops[i].states[ns->npstates]))
1224 continue;
1225 }
1226
1227 for (j = 0; j < ns->npstates; j++)
1228 if (NS_STATE(ops[i].states[j]) != NS_STATE(ns->pstates[j])
1229 && (ns->options & ops[idx].reqopts)) {
1230 found = 0;
1231 break;
1232 }
1233
1234 if (found) {
1235 idx = i;
1236 opsfound += 1;
1237 }
1238 }
1239
1240 if (opsfound == 1) {
1241
1242 ns->op = &ops[idx].states[0];
1243 if (flag) {
1244
1245
1246
1247
1248
1249
1250
1251 ns->stateidx = ns->npstates - 1;
1252 } else {
1253 ns->stateidx = ns->npstates;
1254 }
1255 ns->npstates = 0;
1256 ns->state = ns->op[ns->stateidx];
1257 ns->nxstate = ns->op[ns->stateidx + 1];
1258 NS_DBG("find_operation: operation found, index: %d, state: %s, nxstate %s\n",
1259 idx, get_state_name(ns->state), get_state_name(ns->nxstate));
1260 return 0;
1261 }
1262
1263 if (opsfound == 0) {
1264
1265 if (ns->npstates != 0) {
1266 NS_DBG("find_operation: no operation found, try again with state %s\n",
1267 get_state_name(ns->state));
1268 ns->npstates = 0;
1269 return find_operation(ns, 0);
1270
1271 }
1272 NS_DBG("find_operation: no operations found\n");
1273 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
1274 return -2;
1275 }
1276
1277 if (flag) {
1278
1279 NS_DBG("find_operation: BUG, operation must be known if address is input\n");
1280 return -2;
1281 }
1282
1283 NS_DBG("find_operation: there is still ambiguity\n");
1284
1285 ns->pstates[ns->npstates++] = ns->state;
1286
1287 return -1;
1288 }
1289
1290 static void put_pages(struct nandsim *ns)
1291 {
1292 int i;
1293
1294 for (i = 0; i < ns->held_cnt; i++)
1295 put_page(ns->held_pages[i]);
1296 }
1297
1298
1299 static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t pos)
1300 {
1301 pgoff_t index, start_index, end_index;
1302 struct page *page;
1303 struct address_space *mapping = file->f_mapping;
1304
1305 start_index = pos >> PAGE_SHIFT;
1306 end_index = (pos + count - 1) >> PAGE_SHIFT;
1307 if (end_index - start_index + 1 > NS_MAX_HELD_PAGES)
1308 return -EINVAL;
1309 ns->held_cnt = 0;
1310 for (index = start_index; index <= end_index; index++) {
1311 page = find_get_page(mapping, index);
1312 if (page == NULL) {
1313 page = find_or_create_page(mapping, index, GFP_NOFS);
1314 if (page == NULL) {
1315 write_inode_now(mapping->host, 1);
1316 page = find_or_create_page(mapping, index, GFP_NOFS);
1317 }
1318 if (page == NULL) {
1319 put_pages(ns);
1320 return -ENOMEM;
1321 }
1322 unlock_page(page);
1323 }
1324 ns->held_pages[ns->held_cnt++] = page;
1325 }
1326 return 0;
1327 }
1328
1329 static ssize_t read_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos)
1330 {
1331 ssize_t tx;
1332 int err;
1333 unsigned int noreclaim_flag;
1334
1335 err = get_pages(ns, file, count, pos);
1336 if (err)
1337 return err;
1338 noreclaim_flag = memalloc_noreclaim_save();
1339 tx = kernel_read(file, buf, count, &pos);
1340 memalloc_noreclaim_restore(noreclaim_flag);
1341 put_pages(ns);
1342 return tx;
1343 }
1344
1345 static ssize_t write_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos)
1346 {
1347 ssize_t tx;
1348 int err;
1349 unsigned int noreclaim_flag;
1350
1351 err = get_pages(ns, file, count, pos);
1352 if (err)
1353 return err;
1354 noreclaim_flag = memalloc_noreclaim_save();
1355 tx = kernel_write(file, buf, count, &pos);
1356 memalloc_noreclaim_restore(noreclaim_flag);
1357 put_pages(ns);
1358 return tx;
1359 }
1360
1361
1362
1363
1364 static inline union ns_mem *NS_GET_PAGE(struct nandsim *ns)
1365 {
1366 return &(ns->pages[ns->regs.row]);
1367 }
1368
1369
1370
1371
1372 static inline u_char *NS_PAGE_BYTE_OFF(struct nandsim *ns)
1373 {
1374 return NS_GET_PAGE(ns)->byte + ns->regs.column + ns->regs.off;
1375 }
1376
1377 static int do_read_error(struct nandsim *ns, int num)
1378 {
1379 unsigned int page_no = ns->regs.row;
1380
1381 if (read_error(page_no)) {
1382 prandom_bytes(ns->buf.byte, num);
1383 NS_WARN("simulating read error in page %u\n", page_no);
1384 return 1;
1385 }
1386 return 0;
1387 }
1388
1389 static void do_bit_flips(struct nandsim *ns, int num)
1390 {
1391 if (bitflips && prandom_u32() < (1 << 22)) {
1392 int flips = 1;
1393 if (bitflips > 1)
1394 flips = (prandom_u32() % (int) bitflips) + 1;
1395 while (flips--) {
1396 int pos = prandom_u32() % (num * 8);
1397 ns->buf.byte[pos / 8] ^= (1 << (pos % 8));
1398 NS_WARN("read_page: flipping bit %d in page %d "
1399 "reading from %d ecc: corrected=%u failed=%u\n",
1400 pos, ns->regs.row, ns->regs.column + ns->regs.off,
1401 nsmtd->ecc_stats.corrected, nsmtd->ecc_stats.failed);
1402 }
1403 }
1404 }
1405
1406
1407
1408
1409 static void read_page(struct nandsim *ns, int num)
1410 {
1411 union ns_mem *mypage;
1412
1413 if (ns->cfile) {
1414 if (!test_bit(ns->regs.row, ns->pages_written)) {
1415 NS_DBG("read_page: page %d not written\n", ns->regs.row);
1416 memset(ns->buf.byte, 0xFF, num);
1417 } else {
1418 loff_t pos;
1419 ssize_t tx;
1420
1421 NS_DBG("read_page: page %d written, reading from %d\n",
1422 ns->regs.row, ns->regs.column + ns->regs.off);
1423 if (do_read_error(ns, num))
1424 return;
1425 pos = (loff_t)NS_RAW_OFFSET(ns) + ns->regs.off;
1426 tx = read_file(ns, ns->cfile, ns->buf.byte, num, pos);
1427 if (tx != num) {
1428 NS_ERR("read_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
1429 return;
1430 }
1431 do_bit_flips(ns, num);
1432 }
1433 return;
1434 }
1435
1436 mypage = NS_GET_PAGE(ns);
1437 if (mypage->byte == NULL) {
1438 NS_DBG("read_page: page %d not allocated\n", ns->regs.row);
1439 memset(ns->buf.byte, 0xFF, num);
1440 } else {
1441 NS_DBG("read_page: page %d allocated, reading from %d\n",
1442 ns->regs.row, ns->regs.column + ns->regs.off);
1443 if (do_read_error(ns, num))
1444 return;
1445 memcpy(ns->buf.byte, NS_PAGE_BYTE_OFF(ns), num);
1446 do_bit_flips(ns, num);
1447 }
1448 }
1449
1450
1451
1452
1453 static void erase_sector(struct nandsim *ns)
1454 {
1455 union ns_mem *mypage;
1456 int i;
1457
1458 if (ns->cfile) {
1459 for (i = 0; i < ns->geom.pgsec; i++)
1460 if (__test_and_clear_bit(ns->regs.row + i,
1461 ns->pages_written)) {
1462 NS_DBG("erase_sector: freeing page %d\n", ns->regs.row + i);
1463 }
1464 return;
1465 }
1466
1467 mypage = NS_GET_PAGE(ns);
1468 for (i = 0; i < ns->geom.pgsec; i++) {
1469 if (mypage->byte != NULL) {
1470 NS_DBG("erase_sector: freeing page %d\n", ns->regs.row+i);
1471 kmem_cache_free(ns->nand_pages_slab, mypage->byte);
1472 mypage->byte = NULL;
1473 }
1474 mypage++;
1475 }
1476 }
1477
1478
1479
1480
1481 static int prog_page(struct nandsim *ns, int num)
1482 {
1483 int i;
1484 union ns_mem *mypage;
1485 u_char *pg_off;
1486
1487 if (ns->cfile) {
1488 loff_t off;
1489 ssize_t tx;
1490 int all;
1491
1492 NS_DBG("prog_page: writing page %d\n", ns->regs.row);
1493 pg_off = ns->file_buf + ns->regs.column + ns->regs.off;
1494 off = (loff_t)NS_RAW_OFFSET(ns) + ns->regs.off;
1495 if (!test_bit(ns->regs.row, ns->pages_written)) {
1496 all = 1;
1497 memset(ns->file_buf, 0xff, ns->geom.pgszoob);
1498 } else {
1499 all = 0;
1500 tx = read_file(ns, ns->cfile, pg_off, num, off);
1501 if (tx != num) {
1502 NS_ERR("prog_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
1503 return -1;
1504 }
1505 }
1506 for (i = 0; i < num; i++)
1507 pg_off[i] &= ns->buf.byte[i];
1508 if (all) {
1509 loff_t pos = (loff_t)ns->regs.row * ns->geom.pgszoob;
1510 tx = write_file(ns, ns->cfile, ns->file_buf, ns->geom.pgszoob, pos);
1511 if (tx != ns->geom.pgszoob) {
1512 NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
1513 return -1;
1514 }
1515 __set_bit(ns->regs.row, ns->pages_written);
1516 } else {
1517 tx = write_file(ns, ns->cfile, pg_off, num, off);
1518 if (tx != num) {
1519 NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
1520 return -1;
1521 }
1522 }
1523 return 0;
1524 }
1525
1526 mypage = NS_GET_PAGE(ns);
1527 if (mypage->byte == NULL) {
1528 NS_DBG("prog_page: allocating page %d\n", ns->regs.row);
1529
1530
1531
1532
1533
1534
1535 mypage->byte = kmem_cache_alloc(ns->nand_pages_slab, GFP_NOFS);
1536 if (mypage->byte == NULL) {
1537 NS_ERR("prog_page: error allocating memory for page %d\n", ns->regs.row);
1538 return -1;
1539 }
1540 memset(mypage->byte, 0xFF, ns->geom.pgszoob);
1541 }
1542
1543 pg_off = NS_PAGE_BYTE_OFF(ns);
1544 for (i = 0; i < num; i++)
1545 pg_off[i] &= ns->buf.byte[i];
1546
1547 return 0;
1548 }
1549
1550
1551
1552
1553
1554
1555 static int do_state_action(struct nandsim *ns, uint32_t action)
1556 {
1557 int num;
1558 int busdiv = ns->busw == 8 ? 1 : 2;
1559 unsigned int erase_block_no, page_no;
1560
1561 action &= ACTION_MASK;
1562
1563
1564 if (action != ACTION_SECERASE && ns->regs.row >= ns->geom.pgnum) {
1565 NS_WARN("do_state_action: wrong page number (%#x)\n", ns->regs.row);
1566 return -1;
1567 }
1568
1569 switch (action) {
1570
1571 case ACTION_CPY:
1572
1573
1574
1575
1576
1577 if (ns->regs.column >= (ns->geom.pgszoob - ns->regs.off)) {
1578 NS_ERR("do_state_action: column number is too large\n");
1579 break;
1580 }
1581 num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
1582 read_page(ns, num);
1583
1584 NS_DBG("do_state_action: (ACTION_CPY:) copy %d bytes to int buf, raw offset %d\n",
1585 num, NS_RAW_OFFSET(ns) + ns->regs.off);
1586
1587 if (ns->regs.off == 0)
1588 NS_LOG("read page %d\n", ns->regs.row);
1589 else if (ns->regs.off < ns->geom.pgsz)
1590 NS_LOG("read page %d (second half)\n", ns->regs.row);
1591 else
1592 NS_LOG("read OOB of page %d\n", ns->regs.row);
1593
1594 NS_UDELAY(access_delay);
1595 NS_UDELAY(input_cycle * ns->geom.pgsz / 1000 / busdiv);
1596
1597 break;
1598
1599 case ACTION_SECERASE:
1600
1601
1602
1603
1604 if (ns->lines.wp) {
1605 NS_ERR("do_state_action: device is write-protected, ignore sector erase\n");
1606 return -1;
1607 }
1608
1609 if (ns->regs.row >= ns->geom.pgnum - ns->geom.pgsec
1610 || (ns->regs.row & ~(ns->geom.secsz - 1))) {
1611 NS_ERR("do_state_action: wrong sector address (%#x)\n", ns->regs.row);
1612 return -1;
1613 }
1614
1615 ns->regs.row = (ns->regs.row <<
1616 8 * (ns->geom.pgaddrbytes - ns->geom.secaddrbytes)) | ns->regs.column;
1617 ns->regs.column = 0;
1618
1619 erase_block_no = ns->regs.row >> (ns->geom.secshift - ns->geom.pgshift);
1620
1621 NS_DBG("do_state_action: erase sector at address %#x, off = %d\n",
1622 ns->regs.row, NS_RAW_OFFSET(ns));
1623 NS_LOG("erase sector %u\n", erase_block_no);
1624
1625 erase_sector(ns);
1626
1627 NS_MDELAY(erase_delay);
1628
1629 if (erase_block_wear)
1630 update_wear(erase_block_no);
1631
1632 if (erase_error(erase_block_no)) {
1633 NS_WARN("simulating erase failure in erase block %u\n", erase_block_no);
1634 return -1;
1635 }
1636
1637 break;
1638
1639 case ACTION_PRGPAGE:
1640
1641
1642
1643
1644 if (ns->lines.wp) {
1645 NS_WARN("do_state_action: device is write-protected, programm\n");
1646 return -1;
1647 }
1648
1649 num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
1650 if (num != ns->regs.count) {
1651 NS_ERR("do_state_action: too few bytes were input (%d instead of %d)\n",
1652 ns->regs.count, num);
1653 return -1;
1654 }
1655
1656 if (prog_page(ns, num) == -1)
1657 return -1;
1658
1659 page_no = ns->regs.row;
1660
1661 NS_DBG("do_state_action: copy %d bytes from int buf to (%#x, %#x), raw off = %d\n",
1662 num, ns->regs.row, ns->regs.column, NS_RAW_OFFSET(ns) + ns->regs.off);
1663 NS_LOG("programm page %d\n", ns->regs.row);
1664
1665 NS_UDELAY(programm_delay);
1666 NS_UDELAY(output_cycle * ns->geom.pgsz / 1000 / busdiv);
1667
1668 if (write_error(page_no)) {
1669 NS_WARN("simulating write failure in page %u\n", page_no);
1670 return -1;
1671 }
1672
1673 break;
1674
1675 case ACTION_ZEROOFF:
1676 NS_DBG("do_state_action: set internal offset to 0\n");
1677 ns->regs.off = 0;
1678 break;
1679
1680 case ACTION_HALFOFF:
1681 if (!(ns->options & OPT_PAGE512_8BIT)) {
1682 NS_ERR("do_state_action: BUG! can't skip half of page for non-512"
1683 "byte page size 8x chips\n");
1684 return -1;
1685 }
1686 NS_DBG("do_state_action: set internal offset to %d\n", ns->geom.pgsz/2);
1687 ns->regs.off = ns->geom.pgsz/2;
1688 break;
1689
1690 case ACTION_OOBOFF:
1691 NS_DBG("do_state_action: set internal offset to %d\n", ns->geom.pgsz);
1692 ns->regs.off = ns->geom.pgsz;
1693 break;
1694
1695 default:
1696 NS_DBG("do_state_action: BUG! unknown action\n");
1697 }
1698
1699 return 0;
1700 }
1701
1702
1703
1704
1705 static void switch_state(struct nandsim *ns)
1706 {
1707 if (ns->op) {
1708
1709
1710
1711
1712
1713 ns->stateidx += 1;
1714 ns->state = ns->nxstate;
1715 ns->nxstate = ns->op[ns->stateidx + 1];
1716
1717 NS_DBG("switch_state: operation is known, switch to the next state, "
1718 "state: %s, nxstate: %s\n",
1719 get_state_name(ns->state), get_state_name(ns->nxstate));
1720
1721
1722 if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
1723 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
1724 return;
1725 }
1726
1727 } else {
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737 ns->state = get_state_by_command(ns->regs.command);
1738
1739 NS_DBG("switch_state: operation is unknown, try to find it\n");
1740
1741 if (find_operation(ns, 0) != 0)
1742 return;
1743
1744 if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
1745 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
1746 return;
1747 }
1748 }
1749
1750
1751 if ((ns->nxstate & STATE_ADDR_MASK) && ns->busw == 16) {
1752 NS_DBG("switch_state: double the column number for 16x device\n");
1753 ns->regs.column <<= 1;
1754 }
1755
1756 if (NS_STATE(ns->nxstate) == STATE_READY) {
1757
1758
1759
1760
1761 u_char status = NS_STATUS_OK(ns);
1762
1763
1764 if ((ns->state & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK))
1765 && ns->regs.count != ns->regs.num) {
1766 NS_WARN("switch_state: not all bytes were processed, %d left\n",
1767 ns->regs.num - ns->regs.count);
1768 status = NS_STATUS_FAILED(ns);
1769 }
1770
1771 NS_DBG("switch_state: operation complete, switch to STATE_READY state\n");
1772
1773 switch_to_ready_state(ns, status);
1774
1775 return;
1776 } else if (ns->nxstate & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK)) {
1777
1778
1779
1780
1781 ns->state = ns->nxstate;
1782 ns->nxstate = ns->op[++ns->stateidx + 1];
1783 ns->regs.num = ns->regs.count = 0;
1784
1785 NS_DBG("switch_state: the next state is data I/O, switch, "
1786 "state: %s, nxstate: %s\n",
1787 get_state_name(ns->state), get_state_name(ns->nxstate));
1788
1789
1790
1791
1792
1793 switch (NS_STATE(ns->state)) {
1794 case STATE_DATAIN:
1795 case STATE_DATAOUT:
1796 ns->regs.num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
1797 break;
1798
1799 case STATE_DATAOUT_ID:
1800 ns->regs.num = ns->geom.idbytes;
1801 break;
1802
1803 case STATE_DATAOUT_STATUS:
1804 ns->regs.count = ns->regs.num = 0;
1805 break;
1806
1807 default:
1808 NS_ERR("switch_state: BUG! unknown data state\n");
1809 }
1810
1811 } else if (ns->nxstate & STATE_ADDR_MASK) {
1812
1813
1814
1815
1816
1817 ns->regs.count = 0;
1818
1819 switch (NS_STATE(ns->nxstate)) {
1820 case STATE_ADDR_PAGE:
1821 ns->regs.num = ns->geom.pgaddrbytes;
1822
1823 break;
1824 case STATE_ADDR_SEC:
1825 ns->regs.num = ns->geom.secaddrbytes;
1826 break;
1827
1828 case STATE_ADDR_ZERO:
1829 ns->regs.num = 1;
1830 break;
1831
1832 case STATE_ADDR_COLUMN:
1833
1834 ns->regs.num = ns->geom.pgaddrbytes - ns->geom.secaddrbytes;
1835 break;
1836
1837 default:
1838 NS_ERR("switch_state: BUG! unknown address state\n");
1839 }
1840 } else {
1841
1842
1843
1844
1845 ns->regs.num = 0;
1846 ns->regs.count = 0;
1847 }
1848 }
1849
1850 static u_char ns_nand_read_byte(struct nand_chip *chip)
1851 {
1852 struct nandsim *ns = nand_get_controller_data(chip);
1853 u_char outb = 0x00;
1854
1855
1856 if (!ns->lines.ce) {
1857 NS_ERR("read_byte: chip is disabled, return %#x\n", (uint)outb);
1858 return outb;
1859 }
1860 if (ns->lines.ale || ns->lines.cle) {
1861 NS_ERR("read_byte: ALE or CLE pin is high, return %#x\n", (uint)outb);
1862 return outb;
1863 }
1864 if (!(ns->state & STATE_DATAOUT_MASK)) {
1865 NS_WARN("read_byte: unexpected data output cycle, state is %s "
1866 "return %#x\n", get_state_name(ns->state), (uint)outb);
1867 return outb;
1868 }
1869
1870
1871 if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS) {
1872 NS_DBG("read_byte: return %#x status\n", ns->regs.status);
1873 return ns->regs.status;
1874 }
1875
1876
1877 if (ns->regs.count == ns->regs.num) {
1878 NS_WARN("read_byte: no more data to output, return %#x\n", (uint)outb);
1879 return outb;
1880 }
1881
1882 switch (NS_STATE(ns->state)) {
1883 case STATE_DATAOUT:
1884 if (ns->busw == 8) {
1885 outb = ns->buf.byte[ns->regs.count];
1886 ns->regs.count += 1;
1887 } else {
1888 outb = (u_char)cpu_to_le16(ns->buf.word[ns->regs.count >> 1]);
1889 ns->regs.count += 2;
1890 }
1891 break;
1892 case STATE_DATAOUT_ID:
1893 NS_DBG("read_byte: read ID byte %d, total = %d\n", ns->regs.count, ns->regs.num);
1894 outb = ns->ids[ns->regs.count];
1895 ns->regs.count += 1;
1896 break;
1897 default:
1898 BUG();
1899 }
1900
1901 if (ns->regs.count == ns->regs.num) {
1902 NS_DBG("read_byte: all bytes were read\n");
1903
1904 if (NS_STATE(ns->nxstate) == STATE_READY)
1905 switch_state(ns);
1906 }
1907
1908 return outb;
1909 }
1910
1911 static void ns_nand_write_byte(struct nand_chip *chip, u_char byte)
1912 {
1913 struct nandsim *ns = nand_get_controller_data(chip);
1914
1915
1916 if (!ns->lines.ce) {
1917 NS_ERR("write_byte: chip is disabled, ignore write\n");
1918 return;
1919 }
1920 if (ns->lines.ale && ns->lines.cle) {
1921 NS_ERR("write_byte: ALE and CLE pins are high simultaneously, ignore write\n");
1922 return;
1923 }
1924
1925 if (ns->lines.cle == 1) {
1926
1927
1928
1929
1930 if (byte == NAND_CMD_RESET) {
1931 NS_LOG("reset chip\n");
1932 switch_to_ready_state(ns, NS_STATUS_OK(ns));
1933 return;
1934 }
1935
1936
1937 if (check_command(byte)) {
1938 NS_ERR("write_byte: unknown command %#x\n", (uint)byte);
1939 return;
1940 }
1941
1942 if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS
1943 || NS_STATE(ns->state) == STATE_DATAOUT) {
1944 int row = ns->regs.row;
1945
1946 switch_state(ns);
1947 if (byte == NAND_CMD_RNDOUT)
1948 ns->regs.row = row;
1949 }
1950
1951
1952 if (NS_STATE(ns->nxstate) != STATE_UNKNOWN && !(ns->nxstate & STATE_CMD_MASK)) {
1953
1954 if (!(ns->regs.command == NAND_CMD_READID &&
1955 NS_STATE(ns->state) == STATE_DATAOUT_ID && ns->regs.count == 2)) {
1956
1957
1958
1959
1960
1961 NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, "
1962 "ignore previous states\n", (uint)byte, get_state_name(ns->nxstate));
1963 }
1964 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
1965 }
1966
1967 NS_DBG("command byte corresponding to %s state accepted\n",
1968 get_state_name(get_state_by_command(byte)));
1969 ns->regs.command = byte;
1970 switch_state(ns);
1971
1972 } else if (ns->lines.ale == 1) {
1973
1974
1975
1976
1977 if (NS_STATE(ns->nxstate) == STATE_UNKNOWN) {
1978
1979 NS_DBG("write_byte: operation isn't known yet, identify it\n");
1980
1981 if (find_operation(ns, 1) < 0)
1982 return;
1983
1984 if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
1985 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
1986 return;
1987 }
1988
1989 ns->regs.count = 0;
1990 switch (NS_STATE(ns->nxstate)) {
1991 case STATE_ADDR_PAGE:
1992 ns->regs.num = ns->geom.pgaddrbytes;
1993 break;
1994 case STATE_ADDR_SEC:
1995 ns->regs.num = ns->geom.secaddrbytes;
1996 break;
1997 case STATE_ADDR_ZERO:
1998 ns->regs.num = 1;
1999 break;
2000 default:
2001 BUG();
2002 }
2003 }
2004
2005
2006 if (!(ns->nxstate & STATE_ADDR_MASK)) {
2007 NS_ERR("write_byte: address (%#x) isn't expected, expected state is %s, "
2008 "switch to STATE_READY\n", (uint)byte, get_state_name(ns->nxstate));
2009 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
2010 return;
2011 }
2012
2013
2014 if (ns->regs.count == ns->regs.num) {
2015 NS_ERR("write_byte: no more address bytes expected\n");
2016 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
2017 return;
2018 }
2019
2020 accept_addr_byte(ns, byte);
2021
2022 ns->regs.count += 1;
2023
2024 NS_DBG("write_byte: address byte %#x was accepted (%d bytes input, %d expected)\n",
2025 (uint)byte, ns->regs.count, ns->regs.num);
2026
2027 if (ns->regs.count == ns->regs.num) {
2028 NS_DBG("address (%#x, %#x) is accepted\n", ns->regs.row, ns->regs.column);
2029 switch_state(ns);
2030 }
2031
2032 } else {
2033
2034
2035
2036
2037
2038 if (!(ns->state & STATE_DATAIN_MASK)) {
2039 NS_ERR("write_byte: data input (%#x) isn't expected, state is %s, "
2040 "switch to %s\n", (uint)byte,
2041 get_state_name(ns->state), get_state_name(STATE_READY));
2042 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
2043 return;
2044 }
2045
2046
2047 if (ns->regs.count == ns->regs.num) {
2048 NS_WARN("write_byte: %u input bytes has already been accepted, ignore write\n",
2049 ns->regs.num);
2050 return;
2051 }
2052
2053 if (ns->busw == 8) {
2054 ns->buf.byte[ns->regs.count] = byte;
2055 ns->regs.count += 1;
2056 } else {
2057 ns->buf.word[ns->regs.count >> 1] = cpu_to_le16((uint16_t)byte);
2058 ns->regs.count += 2;
2059 }
2060 }
2061
2062 return;
2063 }
2064
2065 static void ns_nand_write_buf(struct nand_chip *chip, const u_char *buf,
2066 int len)
2067 {
2068 struct nandsim *ns = nand_get_controller_data(chip);
2069
2070
2071 if (!(ns->state & STATE_DATAIN_MASK)) {
2072 NS_ERR("write_buf: data input isn't expected, state is %s, "
2073 "switch to STATE_READY\n", get_state_name(ns->state));
2074 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
2075 return;
2076 }
2077
2078
2079 if (ns->regs.count + len > ns->regs.num) {
2080 NS_ERR("write_buf: too many input bytes\n");
2081 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
2082 return;
2083 }
2084
2085 memcpy(ns->buf.byte + ns->regs.count, buf, len);
2086 ns->regs.count += len;
2087
2088 if (ns->regs.count == ns->regs.num) {
2089 NS_DBG("write_buf: %d bytes were written\n", ns->regs.count);
2090 }
2091 }
2092
2093 static void ns_nand_read_buf(struct nand_chip *chip, u_char *buf, int len)
2094 {
2095 struct nandsim *ns = nand_get_controller_data(chip);
2096
2097
2098 if (!ns->lines.ce) {
2099 NS_ERR("read_buf: chip is disabled\n");
2100 return;
2101 }
2102 if (ns->lines.ale || ns->lines.cle) {
2103 NS_ERR("read_buf: ALE or CLE pin is high\n");
2104 return;
2105 }
2106 if (!(ns->state & STATE_DATAOUT_MASK)) {
2107 NS_WARN("read_buf: unexpected data output cycle, current state is %s\n",
2108 get_state_name(ns->state));
2109 return;
2110 }
2111
2112 if (NS_STATE(ns->state) != STATE_DATAOUT) {
2113 int i;
2114
2115 for (i = 0; i < len; i++)
2116 buf[i] = ns_nand_read_byte(chip);
2117
2118 return;
2119 }
2120
2121
2122 if (ns->regs.count + len > ns->regs.num) {
2123 NS_ERR("read_buf: too many bytes to read\n");
2124 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
2125 return;
2126 }
2127
2128 memcpy(buf, ns->buf.byte + ns->regs.count, len);
2129 ns->regs.count += len;
2130
2131 if (ns->regs.count == ns->regs.num) {
2132 if (NS_STATE(ns->nxstate) == STATE_READY)
2133 switch_state(ns);
2134 }
2135
2136 return;
2137 }
2138
2139 static int ns_exec_op(struct nand_chip *chip, const struct nand_operation *op,
2140 bool check_only)
2141 {
2142 int i;
2143 unsigned int op_id;
2144 const struct nand_op_instr *instr = NULL;
2145 struct nandsim *ns = nand_get_controller_data(chip);
2146
2147 ns->lines.ce = 1;
2148
2149 for (op_id = 0; op_id < op->ninstrs; op_id++) {
2150 instr = &op->instrs[op_id];
2151 ns->lines.cle = 0;
2152 ns->lines.ale = 0;
2153
2154 switch (instr->type) {
2155 case NAND_OP_CMD_INSTR:
2156 ns->lines.cle = 1;
2157 ns_nand_write_byte(chip, instr->ctx.cmd.opcode);
2158 break;
2159 case NAND_OP_ADDR_INSTR:
2160 ns->lines.ale = 1;
2161 for (i = 0; i < instr->ctx.addr.naddrs; i++)
2162 ns_nand_write_byte(chip, instr->ctx.addr.addrs[i]);
2163 break;
2164 case NAND_OP_DATA_IN_INSTR:
2165 ns_nand_read_buf(chip, instr->ctx.data.buf.in, instr->ctx.data.len);
2166 break;
2167 case NAND_OP_DATA_OUT_INSTR:
2168 ns_nand_write_buf(chip, instr->ctx.data.buf.out, instr->ctx.data.len);
2169 break;
2170 case NAND_OP_WAITRDY_INSTR:
2171
2172 break;
2173 }
2174 }
2175
2176 return 0;
2177 }
2178
2179 static int ns_attach_chip(struct nand_chip *chip)
2180 {
2181 unsigned int eccsteps, eccbytes;
2182
2183 if (!bch)
2184 return 0;
2185
2186 if (!mtd_nand_has_bch()) {
2187 NS_ERR("BCH ECC support is disabled\n");
2188 return -EINVAL;
2189 }
2190
2191
2192 eccsteps = nsmtd->writesize / 512;
2193 eccbytes = ((bch * 13) + 7) / 8;
2194
2195
2196 if (nsmtd->oobsize < 64 || !eccsteps) {
2197 NS_ERR("BCH not available on small page devices\n");
2198 return -EINVAL;
2199 }
2200
2201 if (((eccbytes * eccsteps) + 2) > nsmtd->oobsize) {
2202 NS_ERR("Invalid BCH value %u\n", bch);
2203 return -EINVAL;
2204 }
2205
2206 chip->ecc.mode = NAND_ECC_SOFT;
2207 chip->ecc.algo = NAND_ECC_BCH;
2208 chip->ecc.size = 512;
2209 chip->ecc.strength = bch;
2210 chip->ecc.bytes = eccbytes;
2211
2212 NS_INFO("Using %u-bit/%u bytes BCH ECC\n", bch, chip->ecc.size);
2213
2214 return 0;
2215 }
2216
2217 static const struct nand_controller_ops ns_controller_ops = {
2218 .attach_chip = ns_attach_chip,
2219 .exec_op = ns_exec_op,
2220 };
2221
2222
2223
2224
2225 static int __init ns_init_module(void)
2226 {
2227 struct nand_chip *chip;
2228 struct nandsim *ns;
2229 int retval = -ENOMEM, i;
2230
2231 if (bus_width != 8 && bus_width != 16) {
2232 NS_ERR("wrong bus width (%d), use only 8 or 16\n", bus_width);
2233 return -EINVAL;
2234 }
2235
2236 ns = kzalloc(sizeof(struct nandsim), GFP_KERNEL);
2237 if (!ns) {
2238 NS_ERR("unable to allocate core structures.\n");
2239 return -ENOMEM;
2240 }
2241 chip = &ns->chip;
2242 nsmtd = nand_to_mtd(chip);
2243 nand_set_controller_data(chip, (void *)ns);
2244
2245 chip->ecc.mode = NAND_ECC_SOFT;
2246 chip->ecc.algo = NAND_ECC_HAMMING;
2247
2248
2249 chip->options |= NAND_SKIP_BBTSCAN;
2250
2251 switch (bbt) {
2252 case 2:
2253 chip->bbt_options |= NAND_BBT_NO_OOB;
2254
2255 case 1:
2256 chip->bbt_options |= NAND_BBT_USE_FLASH;
2257
2258 case 0:
2259 break;
2260 default:
2261 NS_ERR("bbt has to be 0..2\n");
2262 retval = -EINVAL;
2263 goto error;
2264 }
2265
2266
2267
2268
2269 if (id_bytes[6] != 0xFF || id_bytes[7] != 0xFF)
2270 ns->geom.idbytes = 8;
2271 else if (id_bytes[4] != 0xFF || id_bytes[5] != 0xFF)
2272 ns->geom.idbytes = 6;
2273 else if (id_bytes[2] != 0xFF || id_bytes[3] != 0xFF)
2274 ns->geom.idbytes = 4;
2275 else
2276 ns->geom.idbytes = 2;
2277 ns->regs.status = NS_STATUS_OK(ns);
2278 ns->nxstate = STATE_UNKNOWN;
2279 ns->options |= OPT_PAGE512;
2280 memcpy(ns->ids, id_bytes, sizeof(ns->ids));
2281 if (bus_width == 16) {
2282 ns->busw = 16;
2283 chip->options |= NAND_BUSWIDTH_16;
2284 }
2285
2286 nsmtd->owner = THIS_MODULE;
2287
2288 if ((retval = parse_weakblocks()) != 0)
2289 goto error;
2290
2291 if ((retval = parse_weakpages()) != 0)
2292 goto error;
2293
2294 if ((retval = parse_gravepages()) != 0)
2295 goto error;
2296
2297 nand_controller_init(&ns->base);
2298 ns->base.ops = &ns_controller_ops;
2299 chip->controller = &ns->base;
2300
2301 retval = nand_scan(chip, 1);
2302 if (retval) {
2303 NS_ERR("Could not scan NAND Simulator device\n");
2304 goto error;
2305 }
2306
2307 if (overridesize) {
2308 uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize;
2309 struct nand_memory_organization *memorg;
2310 u64 targetsize;
2311
2312 memorg = nanddev_get_memorg(&chip->base);
2313
2314 if (new_size >> overridesize != nsmtd->erasesize) {
2315 NS_ERR("overridesize is too big\n");
2316 retval = -EINVAL;
2317 goto err_exit;
2318 }
2319
2320
2321 nsmtd->size = new_size;
2322 memorg->eraseblocks_per_lun = 1 << overridesize;
2323 targetsize = nanddev_target_size(&chip->base);
2324 chip->chip_shift = ffs(nsmtd->erasesize) + overridesize - 1;
2325 chip->pagemask = (targetsize >> chip->page_shift) - 1;
2326 }
2327
2328 if ((retval = setup_wear_reporting(nsmtd)) != 0)
2329 goto err_exit;
2330
2331 if ((retval = init_nandsim(nsmtd)) != 0)
2332 goto err_exit;
2333
2334 if ((retval = nand_create_bbt(chip)) != 0)
2335 goto err_exit;
2336
2337 if ((retval = parse_badblocks(ns, nsmtd)) != 0)
2338 goto err_exit;
2339
2340
2341 retval = mtd_device_register(nsmtd, &ns->partitions[0],
2342 ns->nbparts);
2343 if (retval != 0)
2344 goto err_exit;
2345
2346 if ((retval = nandsim_debugfs_create(ns)) != 0)
2347 goto err_exit;
2348
2349 return 0;
2350
2351 err_exit:
2352 free_nandsim(ns);
2353 nand_release(chip);
2354 for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i)
2355 kfree(ns->partitions[i].name);
2356 error:
2357 kfree(ns);
2358 free_lists();
2359
2360 return retval;
2361 }
2362
2363 module_init(ns_init_module);
2364
2365
2366
2367
2368 static void __exit ns_cleanup_module(void)
2369 {
2370 struct nand_chip *chip = mtd_to_nand(nsmtd);
2371 struct nandsim *ns = nand_get_controller_data(chip);
2372 int i;
2373
2374 free_nandsim(ns);
2375 nand_release(chip);
2376 for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i)
2377 kfree(ns->partitions[i].name);
2378 kfree(ns);
2379 free_lists();
2380 }
2381
2382 module_exit(ns_cleanup_module);
2383
2384 MODULE_LICENSE ("GPL");
2385 MODULE_AUTHOR ("Artem B. Bityuckiy");
2386 MODULE_DESCRIPTION ("The NAND flash simulator");