This source file includes following definitions.
- mtd_cls_suspend
- mtd_cls_resume
- __mtd_next_device
- mtd_release
- mtd_type_show
- mtd_flags_show
- mtd_size_show
- mtd_erasesize_show
- mtd_writesize_show
- mtd_subpagesize_show
- mtd_oobsize_show
- mtd_oobavail_show
- mtd_numeraseregions_show
- mtd_name_show
- mtd_ecc_strength_show
- mtd_bitflip_threshold_show
- mtd_bitflip_threshold_store
- mtd_ecc_step_size_show
- mtd_ecc_stats_corrected_show
- mtd_ecc_stats_errors_show
- mtd_badblocks_show
- mtd_bbtblocks_show
- mtd_partid_show
- mtd_partid_debugfs_open
- mtd_partname_show
- mtd_partname_debugfs_open
- mtd_debugfs_populate
- mtd_mmap_capabilities
- mtd_reboot_notifier
- mtd_wunit_to_pairing_info
- mtd_pairing_info_to_wunit
- mtd_pairing_groups
- mtd_nvmem_reg_read
- mtd_nvmem_add
- add_mtd_device
- del_mtd_device
- mtd_set_dev_defaults
- mtd_device_parse_register
- mtd_device_unregister
- register_mtd_user
- unregister_mtd_user
- get_mtd_device
- __get_mtd_device
- get_mtd_device_nm
- put_mtd_device
- __put_mtd_device
- mtd_erase
- mtd_point
- mtd_unpoint
- mtd_get_unmapped_area
- mtd_read
- mtd_write
- mtd_panic_write
- mtd_check_oob_ops
- mtd_read_oob
- mtd_write_oob
- mtd_ooblayout_ecc
- mtd_ooblayout_free
- mtd_ooblayout_find_region
- mtd_ooblayout_find_eccregion
- mtd_ooblayout_get_bytes
- mtd_ooblayout_set_bytes
- mtd_ooblayout_count_bytes
- mtd_ooblayout_get_eccbytes
- mtd_ooblayout_set_eccbytes
- mtd_ooblayout_get_databytes
- mtd_ooblayout_set_databytes
- mtd_ooblayout_count_freebytes
- mtd_ooblayout_count_eccbytes
- mtd_get_fact_prot_info
- mtd_read_fact_prot_reg
- mtd_get_user_prot_info
- mtd_read_user_prot_reg
- mtd_write_user_prot_reg
- mtd_lock_user_prot_reg
- mtd_lock
- mtd_unlock
- mtd_is_locked
- mtd_block_isreserved
- mtd_block_isbad
- mtd_block_markbad
- default_mtd_writev
- mtd_writev
- mtd_kmalloc_up_to
- mtd_proc_show
- mtd_bdi_init
- init_mtd
- cleanup_mtd
1
2
3
4
5
6
7
8
9
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/ptrace.h>
13 #include <linux/seq_file.h>
14 #include <linux/string.h>
15 #include <linux/timer.h>
16 #include <linux/major.h>
17 #include <linux/fs.h>
18 #include <linux/err.h>
19 #include <linux/ioctl.h>
20 #include <linux/init.h>
21 #include <linux/of.h>
22 #include <linux/proc_fs.h>
23 #include <linux/idr.h>
24 #include <linux/backing-dev.h>
25 #include <linux/gfp.h>
26 #include <linux/slab.h>
27 #include <linux/reboot.h>
28 #include <linux/leds.h>
29 #include <linux/debugfs.h>
30 #include <linux/nvmem-provider.h>
31
32 #include <linux/mtd/mtd.h>
33 #include <linux/mtd/partitions.h>
34
35 #include "mtdcore.h"
36
37 struct backing_dev_info *mtd_bdi;
38
39 #ifdef CONFIG_PM_SLEEP
40
41 static int mtd_cls_suspend(struct device *dev)
42 {
43 struct mtd_info *mtd = dev_get_drvdata(dev);
44
45 return mtd ? mtd_suspend(mtd) : 0;
46 }
47
48 static int mtd_cls_resume(struct device *dev)
49 {
50 struct mtd_info *mtd = dev_get_drvdata(dev);
51
52 if (mtd)
53 mtd_resume(mtd);
54 return 0;
55 }
56
57 static SIMPLE_DEV_PM_OPS(mtd_cls_pm_ops, mtd_cls_suspend, mtd_cls_resume);
58 #define MTD_CLS_PM_OPS (&mtd_cls_pm_ops)
59 #else
60 #define MTD_CLS_PM_OPS NULL
61 #endif
62
63 static struct class mtd_class = {
64 .name = "mtd",
65 .owner = THIS_MODULE,
66 .pm = MTD_CLS_PM_OPS,
67 };
68
69 static DEFINE_IDR(mtd_idr);
70
71
72
73 DEFINE_MUTEX(mtd_table_mutex);
74 EXPORT_SYMBOL_GPL(mtd_table_mutex);
75
76 struct mtd_info *__mtd_next_device(int i)
77 {
78 return idr_get_next(&mtd_idr, &i);
79 }
80 EXPORT_SYMBOL_GPL(__mtd_next_device);
81
82 static LIST_HEAD(mtd_notifiers);
83
84
85 #define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
86
87
88
89
90 static void mtd_release(struct device *dev)
91 {
92 struct mtd_info *mtd = dev_get_drvdata(dev);
93 dev_t index = MTD_DEVT(mtd->index);
94
95
96 device_destroy(&mtd_class, index + 1);
97 }
98
99 static ssize_t mtd_type_show(struct device *dev,
100 struct device_attribute *attr, char *buf)
101 {
102 struct mtd_info *mtd = dev_get_drvdata(dev);
103 char *type;
104
105 switch (mtd->type) {
106 case MTD_ABSENT:
107 type = "absent";
108 break;
109 case MTD_RAM:
110 type = "ram";
111 break;
112 case MTD_ROM:
113 type = "rom";
114 break;
115 case MTD_NORFLASH:
116 type = "nor";
117 break;
118 case MTD_NANDFLASH:
119 type = "nand";
120 break;
121 case MTD_DATAFLASH:
122 type = "dataflash";
123 break;
124 case MTD_UBIVOLUME:
125 type = "ubi";
126 break;
127 case MTD_MLCNANDFLASH:
128 type = "mlc-nand";
129 break;
130 default:
131 type = "unknown";
132 }
133
134 return snprintf(buf, PAGE_SIZE, "%s\n", type);
135 }
136 static DEVICE_ATTR(type, S_IRUGO, mtd_type_show, NULL);
137
138 static ssize_t mtd_flags_show(struct device *dev,
139 struct device_attribute *attr, char *buf)
140 {
141 struct mtd_info *mtd = dev_get_drvdata(dev);
142
143 return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags);
144 }
145 static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL);
146
147 static ssize_t mtd_size_show(struct device *dev,
148 struct device_attribute *attr, char *buf)
149 {
150 struct mtd_info *mtd = dev_get_drvdata(dev);
151
152 return snprintf(buf, PAGE_SIZE, "%llu\n",
153 (unsigned long long)mtd->size);
154 }
155 static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL);
156
157 static ssize_t mtd_erasesize_show(struct device *dev,
158 struct device_attribute *attr, char *buf)
159 {
160 struct mtd_info *mtd = dev_get_drvdata(dev);
161
162 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize);
163 }
164 static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL);
165
166 static ssize_t mtd_writesize_show(struct device *dev,
167 struct device_attribute *attr, char *buf)
168 {
169 struct mtd_info *mtd = dev_get_drvdata(dev);
170
171 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize);
172 }
173 static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL);
174
175 static ssize_t mtd_subpagesize_show(struct device *dev,
176 struct device_attribute *attr, char *buf)
177 {
178 struct mtd_info *mtd = dev_get_drvdata(dev);
179 unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
180
181 return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize);
182 }
183 static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL);
184
185 static ssize_t mtd_oobsize_show(struct device *dev,
186 struct device_attribute *attr, char *buf)
187 {
188 struct mtd_info *mtd = dev_get_drvdata(dev);
189
190 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize);
191 }
192 static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL);
193
194 static ssize_t mtd_oobavail_show(struct device *dev,
195 struct device_attribute *attr, char *buf)
196 {
197 struct mtd_info *mtd = dev_get_drvdata(dev);
198
199 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->oobavail);
200 }
201 static DEVICE_ATTR(oobavail, S_IRUGO, mtd_oobavail_show, NULL);
202
203 static ssize_t mtd_numeraseregions_show(struct device *dev,
204 struct device_attribute *attr, char *buf)
205 {
206 struct mtd_info *mtd = dev_get_drvdata(dev);
207
208 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions);
209 }
210 static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show,
211 NULL);
212
213 static ssize_t mtd_name_show(struct device *dev,
214 struct device_attribute *attr, char *buf)
215 {
216 struct mtd_info *mtd = dev_get_drvdata(dev);
217
218 return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name);
219 }
220 static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL);
221
222 static ssize_t mtd_ecc_strength_show(struct device *dev,
223 struct device_attribute *attr, char *buf)
224 {
225 struct mtd_info *mtd = dev_get_drvdata(dev);
226
227 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_strength);
228 }
229 static DEVICE_ATTR(ecc_strength, S_IRUGO, mtd_ecc_strength_show, NULL);
230
231 static ssize_t mtd_bitflip_threshold_show(struct device *dev,
232 struct device_attribute *attr,
233 char *buf)
234 {
235 struct mtd_info *mtd = dev_get_drvdata(dev);
236
237 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->bitflip_threshold);
238 }
239
240 static ssize_t mtd_bitflip_threshold_store(struct device *dev,
241 struct device_attribute *attr,
242 const char *buf, size_t count)
243 {
244 struct mtd_info *mtd = dev_get_drvdata(dev);
245 unsigned int bitflip_threshold;
246 int retval;
247
248 retval = kstrtouint(buf, 0, &bitflip_threshold);
249 if (retval)
250 return retval;
251
252 mtd->bitflip_threshold = bitflip_threshold;
253 return count;
254 }
255 static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR,
256 mtd_bitflip_threshold_show,
257 mtd_bitflip_threshold_store);
258
259 static ssize_t mtd_ecc_step_size_show(struct device *dev,
260 struct device_attribute *attr, char *buf)
261 {
262 struct mtd_info *mtd = dev_get_drvdata(dev);
263
264 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_step_size);
265
266 }
267 static DEVICE_ATTR(ecc_step_size, S_IRUGO, mtd_ecc_step_size_show, NULL);
268
269 static ssize_t mtd_ecc_stats_corrected_show(struct device *dev,
270 struct device_attribute *attr, char *buf)
271 {
272 struct mtd_info *mtd = dev_get_drvdata(dev);
273 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
274
275 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->corrected);
276 }
277 static DEVICE_ATTR(corrected_bits, S_IRUGO,
278 mtd_ecc_stats_corrected_show, NULL);
279
280 static ssize_t mtd_ecc_stats_errors_show(struct device *dev,
281 struct device_attribute *attr, char *buf)
282 {
283 struct mtd_info *mtd = dev_get_drvdata(dev);
284 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
285
286 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->failed);
287 }
288 static DEVICE_ATTR(ecc_failures, S_IRUGO, mtd_ecc_stats_errors_show, NULL);
289
290 static ssize_t mtd_badblocks_show(struct device *dev,
291 struct device_attribute *attr, char *buf)
292 {
293 struct mtd_info *mtd = dev_get_drvdata(dev);
294 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
295
296 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->badblocks);
297 }
298 static DEVICE_ATTR(bad_blocks, S_IRUGO, mtd_badblocks_show, NULL);
299
300 static ssize_t mtd_bbtblocks_show(struct device *dev,
301 struct device_attribute *attr, char *buf)
302 {
303 struct mtd_info *mtd = dev_get_drvdata(dev);
304 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
305
306 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->bbtblocks);
307 }
308 static DEVICE_ATTR(bbt_blocks, S_IRUGO, mtd_bbtblocks_show, NULL);
309
310 static struct attribute *mtd_attrs[] = {
311 &dev_attr_type.attr,
312 &dev_attr_flags.attr,
313 &dev_attr_size.attr,
314 &dev_attr_erasesize.attr,
315 &dev_attr_writesize.attr,
316 &dev_attr_subpagesize.attr,
317 &dev_attr_oobsize.attr,
318 &dev_attr_oobavail.attr,
319 &dev_attr_numeraseregions.attr,
320 &dev_attr_name.attr,
321 &dev_attr_ecc_strength.attr,
322 &dev_attr_ecc_step_size.attr,
323 &dev_attr_corrected_bits.attr,
324 &dev_attr_ecc_failures.attr,
325 &dev_attr_bad_blocks.attr,
326 &dev_attr_bbt_blocks.attr,
327 &dev_attr_bitflip_threshold.attr,
328 NULL,
329 };
330 ATTRIBUTE_GROUPS(mtd);
331
332 static const struct device_type mtd_devtype = {
333 .name = "mtd",
334 .groups = mtd_groups,
335 .release = mtd_release,
336 };
337
338 static int mtd_partid_show(struct seq_file *s, void *p)
339 {
340 struct mtd_info *mtd = s->private;
341
342 seq_printf(s, "%s\n", mtd->dbg.partid);
343
344 return 0;
345 }
346
347 static int mtd_partid_debugfs_open(struct inode *inode, struct file *file)
348 {
349 return single_open(file, mtd_partid_show, inode->i_private);
350 }
351
352 static const struct file_operations mtd_partid_debug_fops = {
353 .open = mtd_partid_debugfs_open,
354 .read = seq_read,
355 .llseek = seq_lseek,
356 .release = single_release,
357 };
358
359 static int mtd_partname_show(struct seq_file *s, void *p)
360 {
361 struct mtd_info *mtd = s->private;
362
363 seq_printf(s, "%s\n", mtd->dbg.partname);
364
365 return 0;
366 }
367
368 static int mtd_partname_debugfs_open(struct inode *inode, struct file *file)
369 {
370 return single_open(file, mtd_partname_show, inode->i_private);
371 }
372
373 static const struct file_operations mtd_partname_debug_fops = {
374 .open = mtd_partname_debugfs_open,
375 .read = seq_read,
376 .llseek = seq_lseek,
377 .release = single_release,
378 };
379
380 static struct dentry *dfs_dir_mtd;
381
382 static void mtd_debugfs_populate(struct mtd_info *mtd)
383 {
384 struct device *dev = &mtd->dev;
385 struct dentry *root, *dent;
386
387 if (IS_ERR_OR_NULL(dfs_dir_mtd))
388 return;
389
390 root = debugfs_create_dir(dev_name(dev), dfs_dir_mtd);
391 if (IS_ERR_OR_NULL(root)) {
392 dev_dbg(dev, "won't show data in debugfs\n");
393 return;
394 }
395
396 mtd->dbg.dfs_dir = root;
397
398 if (mtd->dbg.partid) {
399 dent = debugfs_create_file("partid", 0400, root, mtd,
400 &mtd_partid_debug_fops);
401 if (IS_ERR_OR_NULL(dent))
402 dev_err(dev, "can't create debugfs entry for partid\n");
403 }
404
405 if (mtd->dbg.partname) {
406 dent = debugfs_create_file("partname", 0400, root, mtd,
407 &mtd_partname_debug_fops);
408 if (IS_ERR_OR_NULL(dent))
409 dev_err(dev,
410 "can't create debugfs entry for partname\n");
411 }
412 }
413
414 #ifndef CONFIG_MMU
415 unsigned mtd_mmap_capabilities(struct mtd_info *mtd)
416 {
417 switch (mtd->type) {
418 case MTD_RAM:
419 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
420 NOMMU_MAP_READ | NOMMU_MAP_WRITE;
421 case MTD_ROM:
422 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
423 NOMMU_MAP_READ;
424 default:
425 return NOMMU_MAP_COPY;
426 }
427 }
428 EXPORT_SYMBOL_GPL(mtd_mmap_capabilities);
429 #endif
430
431 static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state,
432 void *cmd)
433 {
434 struct mtd_info *mtd;
435
436 mtd = container_of(n, struct mtd_info, reboot_notifier);
437 mtd->_reboot(mtd);
438
439 return NOTIFY_DONE;
440 }
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468 int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
469 struct mtd_pairing_info *info)
470 {
471 int npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd);
472
473 if (wunit < 0 || wunit >= npairs)
474 return -EINVAL;
475
476 if (mtd->pairing && mtd->pairing->get_info)
477 return mtd->pairing->get_info(mtd, wunit, info);
478
479 info->group = 0;
480 info->pair = wunit;
481
482 return 0;
483 }
484 EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info);
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510 int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
511 const struct mtd_pairing_info *info)
512 {
513 int ngroups = mtd_pairing_groups(mtd);
514 int npairs = mtd_wunit_per_eb(mtd) / ngroups;
515
516 if (!info || info->pair < 0 || info->pair >= npairs ||
517 info->group < 0 || info->group >= ngroups)
518 return -EINVAL;
519
520 if (mtd->pairing && mtd->pairing->get_wunit)
521 return mtd->pairing->get_wunit(mtd, info);
522
523 return info->pair;
524 }
525 EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit);
526
527
528
529
530
531
532
533
534
535
536
537 int mtd_pairing_groups(struct mtd_info *mtd)
538 {
539 if (!mtd->pairing || !mtd->pairing->ngroups)
540 return 1;
541
542 return mtd->pairing->ngroups;
543 }
544 EXPORT_SYMBOL_GPL(mtd_pairing_groups);
545
546 static int mtd_nvmem_reg_read(void *priv, unsigned int offset,
547 void *val, size_t bytes)
548 {
549 struct mtd_info *mtd = priv;
550 size_t retlen;
551 int err;
552
553 err = mtd_read(mtd, offset, bytes, &retlen, val);
554 if (err && err != -EUCLEAN)
555 return err;
556
557 return retlen == bytes ? 0 : -EIO;
558 }
559
560 static int mtd_nvmem_add(struct mtd_info *mtd)
561 {
562 struct nvmem_config config = {};
563
564 config.id = -1;
565 config.dev = &mtd->dev;
566 config.name = dev_name(&mtd->dev);
567 config.owner = THIS_MODULE;
568 config.reg_read = mtd_nvmem_reg_read;
569 config.size = mtd->size;
570 config.word_size = 1;
571 config.stride = 1;
572 config.read_only = true;
573 config.root_only = true;
574 config.no_of_node = true;
575 config.priv = mtd;
576
577 mtd->nvmem = nvmem_register(&config);
578 if (IS_ERR(mtd->nvmem)) {
579
580 if (PTR_ERR(mtd->nvmem) == -EOPNOTSUPP) {
581 mtd->nvmem = NULL;
582 } else {
583 dev_err(&mtd->dev, "Failed to register NVMEM device\n");
584 return PTR_ERR(mtd->nvmem);
585 }
586 }
587
588 return 0;
589 }
590
591
592
593
594
595
596
597
598
599
600 int add_mtd_device(struct mtd_info *mtd)
601 {
602 struct mtd_notifier *not;
603 int i, error;
604
605
606
607
608
609
610 if (WARN_ONCE(mtd->dev.type, "MTD already registered\n"))
611 return -EEXIST;
612
613 BUG_ON(mtd->writesize == 0);
614
615
616
617
618
619 if (WARN_ON((mtd->_write && mtd->_write_oob) ||
620 (mtd->_read && mtd->_read_oob)))
621 return -EINVAL;
622
623 if (WARN_ON((!mtd->erasesize || !mtd->_erase) &&
624 !(mtd->flags & MTD_NO_ERASE)))
625 return -EINVAL;
626
627 mutex_lock(&mtd_table_mutex);
628
629 i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
630 if (i < 0) {
631 error = i;
632 goto fail_locked;
633 }
634
635 mtd->index = i;
636 mtd->usecount = 0;
637
638
639 if (mtd->bitflip_threshold == 0)
640 mtd->bitflip_threshold = mtd->ecc_strength;
641
642 if (is_power_of_2(mtd->erasesize))
643 mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
644 else
645 mtd->erasesize_shift = 0;
646
647 if (is_power_of_2(mtd->writesize))
648 mtd->writesize_shift = ffs(mtd->writesize) - 1;
649 else
650 mtd->writesize_shift = 0;
651
652 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
653 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
654
655
656 if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
657 error = mtd_unlock(mtd, 0, mtd->size);
658 if (error && error != -EOPNOTSUPP)
659 printk(KERN_WARNING
660 "%s: unlock failed, writes may not work\n",
661 mtd->name);
662
663 error = 0;
664 }
665
666
667
668
669 mtd->dev.type = &mtd_devtype;
670 mtd->dev.class = &mtd_class;
671 mtd->dev.devt = MTD_DEVT(i);
672 dev_set_name(&mtd->dev, "mtd%d", i);
673 dev_set_drvdata(&mtd->dev, mtd);
674 of_node_get(mtd_get_of_node(mtd));
675 error = device_register(&mtd->dev);
676 if (error)
677 goto fail_added;
678
679
680 error = mtd_nvmem_add(mtd);
681 if (error)
682 goto fail_nvmem_add;
683
684 mtd_debugfs_populate(mtd);
685
686 device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL,
687 "mtd%dro", i);
688
689 pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
690
691
692 list_for_each_entry(not, &mtd_notifiers, list)
693 not->add(mtd);
694
695 mutex_unlock(&mtd_table_mutex);
696
697
698
699
700 __module_get(THIS_MODULE);
701 return 0;
702
703 fail_nvmem_add:
704 device_unregister(&mtd->dev);
705 fail_added:
706 of_node_put(mtd_get_of_node(mtd));
707 idr_remove(&mtd_idr, i);
708 fail_locked:
709 mutex_unlock(&mtd_table_mutex);
710 return error;
711 }
712
713
714
715
716
717
718
719
720
721
722
723 int del_mtd_device(struct mtd_info *mtd)
724 {
725 int ret;
726 struct mtd_notifier *not;
727
728 mutex_lock(&mtd_table_mutex);
729
730 debugfs_remove_recursive(mtd->dbg.dfs_dir);
731
732 if (idr_find(&mtd_idr, mtd->index) != mtd) {
733 ret = -ENODEV;
734 goto out_error;
735 }
736
737
738
739 list_for_each_entry(not, &mtd_notifiers, list)
740 not->remove(mtd);
741
742 if (mtd->usecount) {
743 printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
744 mtd->index, mtd->name, mtd->usecount);
745 ret = -EBUSY;
746 } else {
747
748 if (mtd->nvmem)
749 nvmem_unregister(mtd->nvmem);
750
751 device_unregister(&mtd->dev);
752
753 idr_remove(&mtd_idr, mtd->index);
754 of_node_put(mtd_get_of_node(mtd));
755
756 module_put(THIS_MODULE);
757 ret = 0;
758 }
759
760 out_error:
761 mutex_unlock(&mtd_table_mutex);
762 return ret;
763 }
764
765
766
767
768
769 static void mtd_set_dev_defaults(struct mtd_info *mtd)
770 {
771 if (mtd->dev.parent) {
772 if (!mtd->owner && mtd->dev.parent->driver)
773 mtd->owner = mtd->dev.parent->driver->owner;
774 if (!mtd->name)
775 mtd->name = dev_name(mtd->dev.parent);
776 } else {
777 pr_debug("mtd device won't show a device symlink in sysfs\n");
778 }
779
780 mtd->orig_flags = mtd->flags;
781 }
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811 int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
812 struct mtd_part_parser_data *parser_data,
813 const struct mtd_partition *parts,
814 int nr_parts)
815 {
816 int ret;
817
818 mtd_set_dev_defaults(mtd);
819
820 if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
821 ret = add_mtd_device(mtd);
822 if (ret)
823 return ret;
824 }
825
826
827 ret = parse_mtd_partitions(mtd, types, parser_data);
828 if (ret > 0)
829 ret = 0;
830 else if (nr_parts)
831 ret = add_mtd_partitions(mtd, parts, nr_parts);
832 else if (!device_is_registered(&mtd->dev))
833 ret = add_mtd_device(mtd);
834 else
835 ret = 0;
836
837 if (ret)
838 goto out;
839
840
841
842
843
844
845
846
847
848 WARN_ONCE(mtd->_reboot && mtd->reboot_notifier.notifier_call,
849 "MTD already registered\n");
850 if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) {
851 mtd->reboot_notifier.notifier_call = mtd_reboot_notifier;
852 register_reboot_notifier(&mtd->reboot_notifier);
853 }
854
855 out:
856 if (ret && device_is_registered(&mtd->dev))
857 del_mtd_device(mtd);
858
859 return ret;
860 }
861 EXPORT_SYMBOL_GPL(mtd_device_parse_register);
862
863
864
865
866
867
868
869 int mtd_device_unregister(struct mtd_info *master)
870 {
871 int err;
872
873 if (master->_reboot)
874 unregister_reboot_notifier(&master->reboot_notifier);
875
876 err = del_mtd_partitions(master);
877 if (err)
878 return err;
879
880 if (!device_is_registered(&master->dev))
881 return 0;
882
883 return del_mtd_device(master);
884 }
885 EXPORT_SYMBOL_GPL(mtd_device_unregister);
886
887
888
889
890
891
892
893
894
895 void register_mtd_user (struct mtd_notifier *new)
896 {
897 struct mtd_info *mtd;
898
899 mutex_lock(&mtd_table_mutex);
900
901 list_add(&new->list, &mtd_notifiers);
902
903 __module_get(THIS_MODULE);
904
905 mtd_for_each_device(mtd)
906 new->add(mtd);
907
908 mutex_unlock(&mtd_table_mutex);
909 }
910 EXPORT_SYMBOL_GPL(register_mtd_user);
911
912
913
914
915
916
917
918
919
920
921 int unregister_mtd_user (struct mtd_notifier *old)
922 {
923 struct mtd_info *mtd;
924
925 mutex_lock(&mtd_table_mutex);
926
927 module_put(THIS_MODULE);
928
929 mtd_for_each_device(mtd)
930 old->remove(mtd);
931
932 list_del(&old->list);
933 mutex_unlock(&mtd_table_mutex);
934 return 0;
935 }
936 EXPORT_SYMBOL_GPL(unregister_mtd_user);
937
938
939
940
941
942
943
944
945
946
947
948
949 struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
950 {
951 struct mtd_info *ret = NULL, *other;
952 int err = -ENODEV;
953
954 mutex_lock(&mtd_table_mutex);
955
956 if (num == -1) {
957 mtd_for_each_device(other) {
958 if (other == mtd) {
959 ret = mtd;
960 break;
961 }
962 }
963 } else if (num >= 0) {
964 ret = idr_find(&mtd_idr, num);
965 if (mtd && mtd != ret)
966 ret = NULL;
967 }
968
969 if (!ret) {
970 ret = ERR_PTR(err);
971 goto out;
972 }
973
974 err = __get_mtd_device(ret);
975 if (err)
976 ret = ERR_PTR(err);
977 out:
978 mutex_unlock(&mtd_table_mutex);
979 return ret;
980 }
981 EXPORT_SYMBOL_GPL(get_mtd_device);
982
983
984 int __get_mtd_device(struct mtd_info *mtd)
985 {
986 int err;
987
988 if (!try_module_get(mtd->owner))
989 return -ENODEV;
990
991 if (mtd->_get_device) {
992 err = mtd->_get_device(mtd);
993
994 if (err) {
995 module_put(mtd->owner);
996 return err;
997 }
998 }
999 mtd->usecount++;
1000 return 0;
1001 }
1002 EXPORT_SYMBOL_GPL(__get_mtd_device);
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012 struct mtd_info *get_mtd_device_nm(const char *name)
1013 {
1014 int err = -ENODEV;
1015 struct mtd_info *mtd = NULL, *other;
1016
1017 mutex_lock(&mtd_table_mutex);
1018
1019 mtd_for_each_device(other) {
1020 if (!strcmp(name, other->name)) {
1021 mtd = other;
1022 break;
1023 }
1024 }
1025
1026 if (!mtd)
1027 goto out_unlock;
1028
1029 err = __get_mtd_device(mtd);
1030 if (err)
1031 goto out_unlock;
1032
1033 mutex_unlock(&mtd_table_mutex);
1034 return mtd;
1035
1036 out_unlock:
1037 mutex_unlock(&mtd_table_mutex);
1038 return ERR_PTR(err);
1039 }
1040 EXPORT_SYMBOL_GPL(get_mtd_device_nm);
1041
1042 void put_mtd_device(struct mtd_info *mtd)
1043 {
1044 mutex_lock(&mtd_table_mutex);
1045 __put_mtd_device(mtd);
1046 mutex_unlock(&mtd_table_mutex);
1047
1048 }
1049 EXPORT_SYMBOL_GPL(put_mtd_device);
1050
1051 void __put_mtd_device(struct mtd_info *mtd)
1052 {
1053 --mtd->usecount;
1054 BUG_ON(mtd->usecount < 0);
1055
1056 if (mtd->_put_device)
1057 mtd->_put_device(mtd);
1058
1059 module_put(mtd->owner);
1060 }
1061 EXPORT_SYMBOL_GPL(__put_mtd_device);
1062
1063
1064
1065
1066
1067
1068 int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
1069 {
1070 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
1071
1072 if (!mtd->erasesize || !mtd->_erase)
1073 return -ENOTSUPP;
1074
1075 if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr)
1076 return -EINVAL;
1077 if (!(mtd->flags & MTD_WRITEABLE))
1078 return -EROFS;
1079
1080 if (!instr->len)
1081 return 0;
1082
1083 ledtrig_mtd_activity();
1084 return mtd->_erase(mtd, instr);
1085 }
1086 EXPORT_SYMBOL_GPL(mtd_erase);
1087
1088
1089
1090
1091 int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1092 void **virt, resource_size_t *phys)
1093 {
1094 *retlen = 0;
1095 *virt = NULL;
1096 if (phys)
1097 *phys = 0;
1098 if (!mtd->_point)
1099 return -EOPNOTSUPP;
1100 if (from < 0 || from >= mtd->size || len > mtd->size - from)
1101 return -EINVAL;
1102 if (!len)
1103 return 0;
1104 return mtd->_point(mtd, from, len, retlen, virt, phys);
1105 }
1106 EXPORT_SYMBOL_GPL(mtd_point);
1107
1108
1109 int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1110 {
1111 if (!mtd->_unpoint)
1112 return -EOPNOTSUPP;
1113 if (from < 0 || from >= mtd->size || len > mtd->size - from)
1114 return -EINVAL;
1115 if (!len)
1116 return 0;
1117 return mtd->_unpoint(mtd, from, len);
1118 }
1119 EXPORT_SYMBOL_GPL(mtd_unpoint);
1120
1121
1122
1123
1124
1125
1126 unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
1127 unsigned long offset, unsigned long flags)
1128 {
1129 size_t retlen;
1130 void *virt;
1131 int ret;
1132
1133 ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL);
1134 if (ret)
1135 return ret;
1136 if (retlen != len) {
1137 mtd_unpoint(mtd, offset, retlen);
1138 return -ENOSYS;
1139 }
1140 return (unsigned long)virt;
1141 }
1142 EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
1143
1144 int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1145 u_char *buf)
1146 {
1147 struct mtd_oob_ops ops = {
1148 .len = len,
1149 .datbuf = buf,
1150 };
1151 int ret;
1152
1153 ret = mtd_read_oob(mtd, from, &ops);
1154 *retlen = ops.retlen;
1155
1156 return ret;
1157 }
1158 EXPORT_SYMBOL_GPL(mtd_read);
1159
1160 int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1161 const u_char *buf)
1162 {
1163 struct mtd_oob_ops ops = {
1164 .len = len,
1165 .datbuf = (u8 *)buf,
1166 };
1167 int ret;
1168
1169 ret = mtd_write_oob(mtd, to, &ops);
1170 *retlen = ops.retlen;
1171
1172 return ret;
1173 }
1174 EXPORT_SYMBOL_GPL(mtd_write);
1175
1176
1177
1178
1179
1180
1181
1182
1183 int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1184 const u_char *buf)
1185 {
1186 *retlen = 0;
1187 if (!mtd->_panic_write)
1188 return -EOPNOTSUPP;
1189 if (to < 0 || to >= mtd->size || len > mtd->size - to)
1190 return -EINVAL;
1191 if (!(mtd->flags & MTD_WRITEABLE))
1192 return -EROFS;
1193 if (!len)
1194 return 0;
1195 if (!mtd->oops_panic_write)
1196 mtd->oops_panic_write = true;
1197
1198 return mtd->_panic_write(mtd, to, len, retlen, buf);
1199 }
1200 EXPORT_SYMBOL_GPL(mtd_panic_write);
1201
1202 static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
1203 struct mtd_oob_ops *ops)
1204 {
1205
1206
1207
1208
1209
1210 if (!ops->datbuf)
1211 ops->len = 0;
1212
1213 if (!ops->oobbuf)
1214 ops->ooblen = 0;
1215
1216 if (offs < 0 || offs + ops->len > mtd->size)
1217 return -EINVAL;
1218
1219 if (ops->ooblen) {
1220 size_t maxooblen;
1221
1222 if (ops->ooboffs >= mtd_oobavail(mtd, ops))
1223 return -EINVAL;
1224
1225 maxooblen = ((size_t)(mtd_div_by_ws(mtd->size, mtd) -
1226 mtd_div_by_ws(offs, mtd)) *
1227 mtd_oobavail(mtd, ops)) - ops->ooboffs;
1228 if (ops->ooblen > maxooblen)
1229 return -EINVAL;
1230 }
1231
1232 return 0;
1233 }
1234
1235 int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
1236 {
1237 int ret_code;
1238 ops->retlen = ops->oobretlen = 0;
1239
1240 ret_code = mtd_check_oob_ops(mtd, from, ops);
1241 if (ret_code)
1242 return ret_code;
1243
1244 ledtrig_mtd_activity();
1245
1246
1247 if (!mtd->_read_oob && (!mtd->_read || ops->oobbuf))
1248 return -EOPNOTSUPP;
1249
1250 if (mtd->_read_oob)
1251 ret_code = mtd->_read_oob(mtd, from, ops);
1252 else
1253 ret_code = mtd->_read(mtd, from, ops->len, &ops->retlen,
1254 ops->datbuf);
1255
1256
1257
1258
1259
1260
1261
1262 if (unlikely(ret_code < 0))
1263 return ret_code;
1264 if (mtd->ecc_strength == 0)
1265 return 0;
1266 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
1267 }
1268 EXPORT_SYMBOL_GPL(mtd_read_oob);
1269
1270 int mtd_write_oob(struct mtd_info *mtd, loff_t to,
1271 struct mtd_oob_ops *ops)
1272 {
1273 int ret;
1274
1275 ops->retlen = ops->oobretlen = 0;
1276
1277 if (!(mtd->flags & MTD_WRITEABLE))
1278 return -EROFS;
1279
1280 ret = mtd_check_oob_ops(mtd, to, ops);
1281 if (ret)
1282 return ret;
1283
1284 ledtrig_mtd_activity();
1285
1286
1287 if (!mtd->_write_oob && (!mtd->_write || ops->oobbuf))
1288 return -EOPNOTSUPP;
1289
1290 if (mtd->_write_oob)
1291 return mtd->_write_oob(mtd, to, ops);
1292 else
1293 return mtd->_write(mtd, to, ops->len, &ops->retlen,
1294 ops->datbuf);
1295 }
1296 EXPORT_SYMBOL_GPL(mtd_write_oob);
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314 int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
1315 struct mtd_oob_region *oobecc)
1316 {
1317 memset(oobecc, 0, sizeof(*oobecc));
1318
1319 if (!mtd || section < 0)
1320 return -EINVAL;
1321
1322 if (!mtd->ooblayout || !mtd->ooblayout->ecc)
1323 return -ENOTSUPP;
1324
1325 return mtd->ooblayout->ecc(mtd, section, oobecc);
1326 }
1327 EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346 int mtd_ooblayout_free(struct mtd_info *mtd, int section,
1347 struct mtd_oob_region *oobfree)
1348 {
1349 memset(oobfree, 0, sizeof(*oobfree));
1350
1351 if (!mtd || section < 0)
1352 return -EINVAL;
1353
1354 if (!mtd->ooblayout || !mtd->ooblayout->free)
1355 return -ENOTSUPP;
1356
1357 return mtd->ooblayout->free(mtd, section, oobfree);
1358 }
1359 EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378 static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte,
1379 int *sectionp, struct mtd_oob_region *oobregion,
1380 int (*iter)(struct mtd_info *,
1381 int section,
1382 struct mtd_oob_region *oobregion))
1383 {
1384 int pos = 0, ret, section = 0;
1385
1386 memset(oobregion, 0, sizeof(*oobregion));
1387
1388 while (1) {
1389 ret = iter(mtd, section, oobregion);
1390 if (ret)
1391 return ret;
1392
1393 if (pos + oobregion->length > byte)
1394 break;
1395
1396 pos += oobregion->length;
1397 section++;
1398 }
1399
1400
1401
1402
1403
1404 oobregion->offset += byte - pos;
1405 oobregion->length -= byte - pos;
1406 *sectionp = section;
1407
1408 return 0;
1409 }
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424 int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
1425 int *section,
1426 struct mtd_oob_region *oobregion)
1427 {
1428 return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion,
1429 mtd_ooblayout_ecc);
1430 }
1431 EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion);
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447 static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
1448 const u8 *oobbuf, int start, int nbytes,
1449 int (*iter)(struct mtd_info *,
1450 int section,
1451 struct mtd_oob_region *oobregion))
1452 {
1453 struct mtd_oob_region oobregion;
1454 int section, ret;
1455
1456 ret = mtd_ooblayout_find_region(mtd, start, §ion,
1457 &oobregion, iter);
1458
1459 while (!ret) {
1460 int cnt;
1461
1462 cnt = min_t(int, nbytes, oobregion.length);
1463 memcpy(buf, oobbuf + oobregion.offset, cnt);
1464 buf += cnt;
1465 nbytes -= cnt;
1466
1467 if (!nbytes)
1468 break;
1469
1470 ret = iter(mtd, ++section, &oobregion);
1471 }
1472
1473 return ret;
1474 }
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490 static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
1491 u8 *oobbuf, int start, int nbytes,
1492 int (*iter)(struct mtd_info *,
1493 int section,
1494 struct mtd_oob_region *oobregion))
1495 {
1496 struct mtd_oob_region oobregion;
1497 int section, ret;
1498
1499 ret = mtd_ooblayout_find_region(mtd, start, §ion,
1500 &oobregion, iter);
1501
1502 while (!ret) {
1503 int cnt;
1504
1505 cnt = min_t(int, nbytes, oobregion.length);
1506 memcpy(oobbuf + oobregion.offset, buf, cnt);
1507 buf += cnt;
1508 nbytes -= cnt;
1509
1510 if (!nbytes)
1511 break;
1512
1513 ret = iter(mtd, ++section, &oobregion);
1514 }
1515
1516 return ret;
1517 }
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528 static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
1529 int (*iter)(struct mtd_info *,
1530 int section,
1531 struct mtd_oob_region *oobregion))
1532 {
1533 struct mtd_oob_region oobregion;
1534 int section = 0, ret, nbytes = 0;
1535
1536 while (1) {
1537 ret = iter(mtd, section++, &oobregion);
1538 if (ret) {
1539 if (ret == -ERANGE)
1540 ret = nbytes;
1541 break;
1542 }
1543
1544 nbytes += oobregion.length;
1545 }
1546
1547 return ret;
1548 }
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562 int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
1563 const u8 *oobbuf, int start, int nbytes)
1564 {
1565 return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes,
1566 mtd_ooblayout_ecc);
1567 }
1568 EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes);
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582 int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
1583 u8 *oobbuf, int start, int nbytes)
1584 {
1585 return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes,
1586 mtd_ooblayout_ecc);
1587 }
1588 EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes);
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602 int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
1603 const u8 *oobbuf, int start, int nbytes)
1604 {
1605 return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes,
1606 mtd_ooblayout_free);
1607 }
1608 EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes);
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622 int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
1623 u8 *oobbuf, int start, int nbytes)
1624 {
1625 return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes,
1626 mtd_ooblayout_free);
1627 }
1628 EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes);
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638 int mtd_ooblayout_count_freebytes(struct mtd_info *mtd)
1639 {
1640 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free);
1641 }
1642 EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes);
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652 int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd)
1653 {
1654 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc);
1655 }
1656 EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
1657
1658
1659
1660
1661
1662
1663 int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
1664 struct otp_info *buf)
1665 {
1666 if (!mtd->_get_fact_prot_info)
1667 return -EOPNOTSUPP;
1668 if (!len)
1669 return 0;
1670 return mtd->_get_fact_prot_info(mtd, len, retlen, buf);
1671 }
1672 EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
1673
1674 int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
1675 size_t *retlen, u_char *buf)
1676 {
1677 *retlen = 0;
1678 if (!mtd->_read_fact_prot_reg)
1679 return -EOPNOTSUPP;
1680 if (!len)
1681 return 0;
1682 return mtd->_read_fact_prot_reg(mtd, from, len, retlen, buf);
1683 }
1684 EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
1685
1686 int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
1687 struct otp_info *buf)
1688 {
1689 if (!mtd->_get_user_prot_info)
1690 return -EOPNOTSUPP;
1691 if (!len)
1692 return 0;
1693 return mtd->_get_user_prot_info(mtd, len, retlen, buf);
1694 }
1695 EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
1696
1697 int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
1698 size_t *retlen, u_char *buf)
1699 {
1700 *retlen = 0;
1701 if (!mtd->_read_user_prot_reg)
1702 return -EOPNOTSUPP;
1703 if (!len)
1704 return 0;
1705 return mtd->_read_user_prot_reg(mtd, from, len, retlen, buf);
1706 }
1707 EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
1708
1709 int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
1710 size_t *retlen, u_char *buf)
1711 {
1712 int ret;
1713
1714 *retlen = 0;
1715 if (!mtd->_write_user_prot_reg)
1716 return -EOPNOTSUPP;
1717 if (!len)
1718 return 0;
1719 ret = mtd->_write_user_prot_reg(mtd, to, len, retlen, buf);
1720 if (ret)
1721 return ret;
1722
1723
1724
1725
1726
1727 return (*retlen) ? 0 : -ENOSPC;
1728 }
1729 EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
1730
1731 int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
1732 {
1733 if (!mtd->_lock_user_prot_reg)
1734 return -EOPNOTSUPP;
1735 if (!len)
1736 return 0;
1737 return mtd->_lock_user_prot_reg(mtd, from, len);
1738 }
1739 EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
1740
1741
1742 int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1743 {
1744 if (!mtd->_lock)
1745 return -EOPNOTSUPP;
1746 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
1747 return -EINVAL;
1748 if (!len)
1749 return 0;
1750 return mtd->_lock(mtd, ofs, len);
1751 }
1752 EXPORT_SYMBOL_GPL(mtd_lock);
1753
1754 int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1755 {
1756 if (!mtd->_unlock)
1757 return -EOPNOTSUPP;
1758 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
1759 return -EINVAL;
1760 if (!len)
1761 return 0;
1762 return mtd->_unlock(mtd, ofs, len);
1763 }
1764 EXPORT_SYMBOL_GPL(mtd_unlock);
1765
1766 int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1767 {
1768 if (!mtd->_is_locked)
1769 return -EOPNOTSUPP;
1770 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
1771 return -EINVAL;
1772 if (!len)
1773 return 0;
1774 return mtd->_is_locked(mtd, ofs, len);
1775 }
1776 EXPORT_SYMBOL_GPL(mtd_is_locked);
1777
1778 int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
1779 {
1780 if (ofs < 0 || ofs >= mtd->size)
1781 return -EINVAL;
1782 if (!mtd->_block_isreserved)
1783 return 0;
1784 return mtd->_block_isreserved(mtd, ofs);
1785 }
1786 EXPORT_SYMBOL_GPL(mtd_block_isreserved);
1787
1788 int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
1789 {
1790 if (ofs < 0 || ofs >= mtd->size)
1791 return -EINVAL;
1792 if (!mtd->_block_isbad)
1793 return 0;
1794 return mtd->_block_isbad(mtd, ofs);
1795 }
1796 EXPORT_SYMBOL_GPL(mtd_block_isbad);
1797
1798 int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
1799 {
1800 if (!mtd->_block_markbad)
1801 return -EOPNOTSUPP;
1802 if (ofs < 0 || ofs >= mtd->size)
1803 return -EINVAL;
1804 if (!(mtd->flags & MTD_WRITEABLE))
1805 return -EROFS;
1806 return mtd->_block_markbad(mtd, ofs);
1807 }
1808 EXPORT_SYMBOL_GPL(mtd_block_markbad);
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821 static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
1822 unsigned long count, loff_t to, size_t *retlen)
1823 {
1824 unsigned long i;
1825 size_t totlen = 0, thislen;
1826 int ret = 0;
1827
1828 for (i = 0; i < count; i++) {
1829 if (!vecs[i].iov_len)
1830 continue;
1831 ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
1832 vecs[i].iov_base);
1833 totlen += thislen;
1834 if (ret || thislen != vecs[i].iov_len)
1835 break;
1836 to += vecs[i].iov_len;
1837 }
1838 *retlen = totlen;
1839 return ret;
1840 }
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853 int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
1854 unsigned long count, loff_t to, size_t *retlen)
1855 {
1856 *retlen = 0;
1857 if (!(mtd->flags & MTD_WRITEABLE))
1858 return -EROFS;
1859 if (!mtd->_writev)
1860 return default_mtd_writev(mtd, vecs, count, to, retlen);
1861 return mtd->_writev(mtd, vecs, count, to, retlen);
1862 }
1863 EXPORT_SYMBOL_GPL(mtd_writev);
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889 void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
1890 {
1891 gfp_t flags = __GFP_NOWARN | __GFP_DIRECT_RECLAIM | __GFP_NORETRY;
1892 size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
1893 void *kbuf;
1894
1895 *size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
1896
1897 while (*size > min_alloc) {
1898 kbuf = kmalloc(*size, flags);
1899 if (kbuf)
1900 return kbuf;
1901
1902 *size >>= 1;
1903 *size = ALIGN(*size, mtd->writesize);
1904 }
1905
1906
1907
1908
1909
1910 return kmalloc(*size, GFP_KERNEL);
1911 }
1912 EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
1913
1914 #ifdef CONFIG_PROC_FS
1915
1916
1917
1918
1919 static int mtd_proc_show(struct seq_file *m, void *v)
1920 {
1921 struct mtd_info *mtd;
1922
1923 seq_puts(m, "dev: size erasesize name\n");
1924 mutex_lock(&mtd_table_mutex);
1925 mtd_for_each_device(mtd) {
1926 seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
1927 mtd->index, (unsigned long long)mtd->size,
1928 mtd->erasesize, mtd->name);
1929 }
1930 mutex_unlock(&mtd_table_mutex);
1931 return 0;
1932 }
1933 #endif
1934
1935
1936
1937
1938 static struct backing_dev_info * __init mtd_bdi_init(char *name)
1939 {
1940 struct backing_dev_info *bdi;
1941 int ret;
1942
1943 bdi = bdi_alloc(GFP_KERNEL);
1944 if (!bdi)
1945 return ERR_PTR(-ENOMEM);
1946
1947 bdi->name = name;
1948
1949
1950
1951
1952 ret = bdi_register(bdi, "%.28s-0", name);
1953 if (ret)
1954 bdi_put(bdi);
1955
1956 return ret ? ERR_PTR(ret) : bdi;
1957 }
1958
1959 static struct proc_dir_entry *proc_mtd;
1960
1961 static int __init init_mtd(void)
1962 {
1963 int ret;
1964
1965 ret = class_register(&mtd_class);
1966 if (ret)
1967 goto err_reg;
1968
1969 mtd_bdi = mtd_bdi_init("mtd");
1970 if (IS_ERR(mtd_bdi)) {
1971 ret = PTR_ERR(mtd_bdi);
1972 goto err_bdi;
1973 }
1974
1975 proc_mtd = proc_create_single("mtd", 0, NULL, mtd_proc_show);
1976
1977 ret = init_mtdchar();
1978 if (ret)
1979 goto out_procfs;
1980
1981 dfs_dir_mtd = debugfs_create_dir("mtd", NULL);
1982
1983 return 0;
1984
1985 out_procfs:
1986 if (proc_mtd)
1987 remove_proc_entry("mtd", NULL);
1988 bdi_put(mtd_bdi);
1989 err_bdi:
1990 class_unregister(&mtd_class);
1991 err_reg:
1992 pr_err("Error registering mtd class or bdi: %d\n", ret);
1993 return ret;
1994 }
1995
1996 static void __exit cleanup_mtd(void)
1997 {
1998 debugfs_remove_recursive(dfs_dir_mtd);
1999 cleanup_mtdchar();
2000 if (proc_mtd)
2001 remove_proc_entry("mtd", NULL);
2002 class_unregister(&mtd_class);
2003 bdi_put(mtd_bdi);
2004 idr_destroy(&mtd_idr);
2005 }
2006
2007 module_init(init_mtd);
2008 module_exit(cleanup_mtd);
2009
2010 MODULE_LICENSE("GPL");
2011 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
2012 MODULE_DESCRIPTION("Core MTD registration and access routines");