This source file includes following definitions.
- nanddev_isbad
- nanddev_markbad
- nanddev_isreserved
- nanddev_erase
- nanddev_mtd_erase
- nanddev_mtd_max_bad_blocks
- nanddev_init
- nanddev_cleanup
1
2
3
4
5
6
7
8
9
10 #define pr_fmt(fmt) "nand: " fmt
11
12 #include <linux/module.h>
13 #include <linux/mtd/nand.h>
14
15
16
17
18
19
20
21
22 bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos)
23 {
24 if (nanddev_bbt_is_initialized(nand)) {
25 unsigned int entry;
26 int status;
27
28 entry = nanddev_bbt_pos_to_entry(nand, pos);
29 status = nanddev_bbt_get_block_status(nand, entry);
30
31 if (status == NAND_BBT_BLOCK_STATUS_UNKNOWN) {
32 if (nand->ops->isbad(nand, pos))
33 status = NAND_BBT_BLOCK_FACTORY_BAD;
34 else
35 status = NAND_BBT_BLOCK_GOOD;
36
37 nanddev_bbt_set_block_status(nand, entry, status);
38 }
39
40 if (status == NAND_BBT_BLOCK_WORN ||
41 status == NAND_BBT_BLOCK_FACTORY_BAD)
42 return true;
43
44 return false;
45 }
46
47 return nand->ops->isbad(nand, pos);
48 }
49 EXPORT_SYMBOL_GPL(nanddev_isbad);
50
51
52
53
54
55
56
57
58
59
60
61 int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos)
62 {
63 struct mtd_info *mtd = nanddev_to_mtd(nand);
64 unsigned int entry;
65 int ret = 0;
66
67 if (nanddev_isbad(nand, pos))
68 return 0;
69
70 ret = nand->ops->markbad(nand, pos);
71 if (ret)
72 pr_warn("failed to write BBM to block @%llx (err = %d)\n",
73 nanddev_pos_to_offs(nand, pos), ret);
74
75 if (!nanddev_bbt_is_initialized(nand))
76 goto out;
77
78 entry = nanddev_bbt_pos_to_entry(nand, pos);
79 ret = nanddev_bbt_set_block_status(nand, entry, NAND_BBT_BLOCK_WORN);
80 if (ret)
81 goto out;
82
83 ret = nanddev_bbt_update(nand);
84
85 out:
86 if (!ret)
87 mtd->ecc_stats.badblocks++;
88
89 return ret;
90 }
91 EXPORT_SYMBOL_GPL(nanddev_markbad);
92
93
94
95
96
97
98
99
100
101
102 bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos)
103 {
104 unsigned int entry;
105 int status;
106
107 if (!nanddev_bbt_is_initialized(nand))
108 return false;
109
110
111 entry = nanddev_bbt_pos_to_entry(nand, pos);
112 status = nanddev_bbt_get_block_status(nand, entry);
113 return status == NAND_BBT_BLOCK_RESERVED;
114 }
115 EXPORT_SYMBOL_GPL(nanddev_isreserved);
116
117
118
119
120
121
122
123
124
125
126 int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos)
127 {
128 if (nanddev_isbad(nand, pos) || nanddev_isreserved(nand, pos)) {
129 pr_warn("attempt to erase a bad/reserved block @%llx\n",
130 nanddev_pos_to_offs(nand, pos));
131 return -EIO;
132 }
133
134 return nand->ops->erase(nand, pos);
135 }
136 EXPORT_SYMBOL_GPL(nanddev_erase);
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153 int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo)
154 {
155 struct nand_device *nand = mtd_to_nanddev(mtd);
156 struct nand_pos pos, last;
157 int ret;
158
159 nanddev_offs_to_pos(nand, einfo->addr, &pos);
160 nanddev_offs_to_pos(nand, einfo->addr + einfo->len - 1, &last);
161 while (nanddev_pos_cmp(&pos, &last) <= 0) {
162 ret = nanddev_erase(nand, &pos);
163 if (ret) {
164 einfo->fail_addr = nanddev_pos_to_offs(nand, &pos);
165
166 return ret;
167 }
168
169 nanddev_pos_next_eraseblock(nand, &pos);
170 }
171
172 return 0;
173 }
174 EXPORT_SYMBOL_GPL(nanddev_mtd_erase);
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189 int nanddev_mtd_max_bad_blocks(struct mtd_info *mtd, loff_t offs, size_t len)
190 {
191 struct nand_device *nand = mtd_to_nanddev(mtd);
192 struct nand_pos pos, end;
193 unsigned int max_bb = 0;
194
195 if (!nand->memorg.max_bad_eraseblocks_per_lun)
196 return -ENOTSUPP;
197
198 nanddev_offs_to_pos(nand, offs, &pos);
199 nanddev_offs_to_pos(nand, offs + len, &end);
200
201 for (nanddev_offs_to_pos(nand, offs, &pos);
202 nanddev_pos_cmp(&pos, &end) < 0;
203 nanddev_pos_next_lun(nand, &pos))
204 max_bb += nand->memorg.max_bad_eraseblocks_per_lun;
205
206 return max_bb;
207 }
208 EXPORT_SYMBOL_GPL(nanddev_mtd_max_bad_blocks);
209
210
211
212
213
214
215
216
217
218
219
220
221 int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
222 struct module *owner)
223 {
224 struct mtd_info *mtd = nanddev_to_mtd(nand);
225 struct nand_memory_organization *memorg = nanddev_get_memorg(nand);
226
227 if (!nand || !ops)
228 return -EINVAL;
229
230 if (!ops->erase || !ops->markbad || !ops->isbad)
231 return -EINVAL;
232
233 if (!memorg->bits_per_cell || !memorg->pagesize ||
234 !memorg->pages_per_eraseblock || !memorg->eraseblocks_per_lun ||
235 !memorg->planes_per_lun || !memorg->luns_per_target ||
236 !memorg->ntargets)
237 return -EINVAL;
238
239 nand->rowconv.eraseblock_addr_shift =
240 fls(memorg->pages_per_eraseblock - 1);
241 nand->rowconv.lun_addr_shift = fls(memorg->eraseblocks_per_lun - 1) +
242 nand->rowconv.eraseblock_addr_shift;
243
244 nand->ops = ops;
245
246 mtd->type = memorg->bits_per_cell == 1 ?
247 MTD_NANDFLASH : MTD_MLCNANDFLASH;
248 mtd->flags = MTD_CAP_NANDFLASH;
249 mtd->erasesize = memorg->pagesize * memorg->pages_per_eraseblock;
250 mtd->writesize = memorg->pagesize;
251 mtd->writebufsize = memorg->pagesize;
252 mtd->oobsize = memorg->oobsize;
253 mtd->size = nanddev_size(nand);
254 mtd->owner = owner;
255
256 return nanddev_bbt_init(nand);
257 }
258 EXPORT_SYMBOL_GPL(nanddev_init);
259
260
261
262
263
264
265
266 void nanddev_cleanup(struct nand_device *nand)
267 {
268 if (nanddev_bbt_is_initialized(nand))
269 nanddev_bbt_cleanup(nand);
270 }
271 EXPORT_SYMBOL_GPL(nanddev_cleanup);
272
273 MODULE_DESCRIPTION("Generic NAND framework");
274 MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
275 MODULE_LICENSE("GPL v2");