This source file includes following definitions.
- cfi_udelay
- cfi_build_cmd_addr
- cfi_build_cmd
- cfi_merge_status
- cfi_send_gen_cmd
- cfi_qry_present
- cfi_qry_mode_on
- cfi_qry_mode_off
- cfi_read_pri
- cfi_fixup
- cfi_varsize_frob
1
2
3
4
5
6
7
8
9
10
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <asm/io.h>
15 #include <asm/byteorder.h>
16
17 #include <linux/errno.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
20 #include <linux/interrupt.h>
21 #include <linux/mtd/xip.h>
22 #include <linux/mtd/mtd.h>
23 #include <linux/mtd/map.h>
24 #include <linux/mtd/cfi.h>
25
26 void cfi_udelay(int us)
27 {
28 if (us >= 1000) {
29 msleep((us+999)/1000);
30 } else {
31 udelay(us);
32 cond_resched();
33 }
34 }
35 EXPORT_SYMBOL(cfi_udelay);
36
37
38
39
40 uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
41 struct map_info *map, struct cfi_private *cfi)
42 {
43 unsigned bankwidth = map_bankwidth(map);
44 unsigned interleave = cfi_interleave(cfi);
45 unsigned type = cfi->device_type;
46 uint32_t addr;
47
48 addr = (cmd_ofs * type) * interleave;
49
50
51
52
53
54
55 if (((type * interleave) > bankwidth) && ((cmd_ofs & 0xff) == 0xaa))
56 addr |= (type >> 1)*interleave;
57
58 return addr;
59 }
60 EXPORT_SYMBOL(cfi_build_cmd_addr);
61
62
63
64
65
66
67 map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi)
68 {
69 map_word val = { {0} };
70 int wordwidth, words_per_bus, chip_mode, chips_per_word;
71 unsigned long onecmd;
72 int i;
73
74
75
76
77
78 if (map_bankwidth_is_large(map)) {
79 wordwidth = sizeof(unsigned long);
80 words_per_bus = (map_bankwidth(map)) / wordwidth;
81 } else {
82 wordwidth = map_bankwidth(map);
83 words_per_bus = 1;
84 }
85
86 chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
87 chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
88
89
90
91 switch (chip_mode) {
92 default: BUG();
93 case 1:
94 onecmd = cmd;
95 break;
96 case 2:
97 onecmd = cpu_to_cfi16(map, cmd);
98 break;
99 case 4:
100 onecmd = cpu_to_cfi32(map, cmd);
101 break;
102 }
103
104
105
106 switch (chips_per_word) {
107 default: BUG();
108 #if BITS_PER_LONG >= 64
109 case 8:
110 onecmd |= (onecmd << (chip_mode * 32));
111 #endif
112
113 case 4:
114 onecmd |= (onecmd << (chip_mode * 16));
115
116 case 2:
117 onecmd |= (onecmd << (chip_mode * 8));
118
119 case 1:
120 ;
121 }
122
123
124
125 for (i=0; i < words_per_bus; i++) {
126 val.x[i] = onecmd;
127 }
128
129 return val;
130 }
131 EXPORT_SYMBOL(cfi_build_cmd);
132
133 unsigned long cfi_merge_status(map_word val, struct map_info *map,
134 struct cfi_private *cfi)
135 {
136 int wordwidth, words_per_bus, chip_mode, chips_per_word;
137 unsigned long onestat, res = 0;
138 int i;
139
140
141
142
143
144 if (map_bankwidth_is_large(map)) {
145 wordwidth = sizeof(unsigned long);
146 words_per_bus = (map_bankwidth(map)) / wordwidth;
147 } else {
148 wordwidth = map_bankwidth(map);
149 words_per_bus = 1;
150 }
151
152 chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
153 chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
154
155 onestat = val.x[0];
156
157 for (i=1; i < words_per_bus; i++) {
158 onestat |= val.x[i];
159 }
160
161 res = onestat;
162 switch(chips_per_word) {
163 default: BUG();
164 #if BITS_PER_LONG >= 64
165 case 8:
166 res |= (onestat >> (chip_mode * 32));
167 #endif
168
169 case 4:
170 res |= (onestat >> (chip_mode * 16));
171
172 case 2:
173 res |= (onestat >> (chip_mode * 8));
174
175 case 1:
176 ;
177 }
178
179
180
181 switch (chip_mode) {
182 case 1:
183 break;
184 case 2:
185 res = cfi16_to_cpu(map, res);
186 break;
187 case 4:
188 res = cfi32_to_cpu(map, res);
189 break;
190 default: BUG();
191 }
192 return res;
193 }
194 EXPORT_SYMBOL(cfi_merge_status);
195
196
197
198
199
200
201
202
203 uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base,
204 struct map_info *map, struct cfi_private *cfi,
205 int type, map_word *prev_val)
206 {
207 map_word val;
208 uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, map, cfi);
209 val = cfi_build_cmd(cmd, map, cfi);
210
211 if (prev_val)
212 *prev_val = map_read(map, addr);
213
214 map_write(map, val, addr);
215
216 return addr - base;
217 }
218 EXPORT_SYMBOL(cfi_send_gen_cmd);
219
220 int __xipram cfi_qry_present(struct map_info *map, __u32 base,
221 struct cfi_private *cfi)
222 {
223 int osf = cfi->interleave * cfi->device_type;
224 map_word val[3];
225 map_word qry[3];
226
227 qry[0] = cfi_build_cmd('Q', map, cfi);
228 qry[1] = cfi_build_cmd('R', map, cfi);
229 qry[2] = cfi_build_cmd('Y', map, cfi);
230
231 val[0] = map_read(map, base + osf*0x10);
232 val[1] = map_read(map, base + osf*0x11);
233 val[2] = map_read(map, base + osf*0x12);
234
235 if (!map_word_equal(map, qry[0], val[0]))
236 return 0;
237
238 if (!map_word_equal(map, qry[1], val[1]))
239 return 0;
240
241 if (!map_word_equal(map, qry[2], val[2]))
242 return 0;
243
244 return 1;
245 }
246 EXPORT_SYMBOL_GPL(cfi_qry_present);
247
248 int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map,
249 struct cfi_private *cfi)
250 {
251 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
252 cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
253 if (cfi_qry_present(map, base, cfi))
254 return 1;
255
256
257 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
258 cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
259 cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
260 if (cfi_qry_present(map, base, cfi))
261 return 1;
262
263 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
264 cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL);
265 if (cfi_qry_present(map, base, cfi))
266 return 1;
267
268 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
269 cfi_send_gen_cmd(0xAA, 0x5555, base, map, cfi, cfi->device_type, NULL);
270 cfi_send_gen_cmd(0x55, 0x2AAA, base, map, cfi, cfi->device_type, NULL);
271 cfi_send_gen_cmd(0x98, 0x5555, base, map, cfi, cfi->device_type, NULL);
272 if (cfi_qry_present(map, base, cfi))
273 return 1;
274
275 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
276 cfi_send_gen_cmd(0xAA, 0x555, base, map, cfi, cfi->device_type, NULL);
277 cfi_send_gen_cmd(0x55, 0x2AA, base, map, cfi, cfi->device_type, NULL);
278 cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL);
279 if (cfi_qry_present(map, base, cfi))
280 return 1;
281
282 return 0;
283 }
284 EXPORT_SYMBOL_GPL(cfi_qry_mode_on);
285
286 void __xipram cfi_qry_mode_off(uint32_t base, struct map_info *map,
287 struct cfi_private *cfi)
288 {
289 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
290 cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
291
292
293 if ((cfi->mfr == CFI_MFR_ST) && (cfi->id == 0x227E || cfi->id == 0x7E))
294 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
295 }
296 EXPORT_SYMBOL_GPL(cfi_qry_mode_off);
297
298 struct cfi_extquery *
299 __xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* name)
300 {
301 struct cfi_private *cfi = map->fldrv_priv;
302 __u32 base = 0;
303 int ofs_factor = cfi->interleave * cfi->device_type;
304 int i;
305 struct cfi_extquery *extp = NULL;
306
307 if (!adr)
308 goto out;
309
310 printk(KERN_INFO "%s Extended Query Table at 0x%4.4X\n", name, adr);
311
312 extp = kmalloc(size, GFP_KERNEL);
313 if (!extp)
314 goto out;
315
316 #ifdef CONFIG_MTD_XIP
317 local_irq_disable();
318 #endif
319
320
321 cfi_qry_mode_on(base, map, cfi);
322
323 for (i=0; i<size; i++) {
324 ((unsigned char *)extp)[i] =
325 cfi_read_query(map, base+((adr+i)*ofs_factor));
326 }
327
328
329 cfi_qry_mode_off(base, map, cfi);
330
331 #ifdef CONFIG_MTD_XIP
332 (void) map_read(map, base);
333 xip_iprefetch();
334 local_irq_enable();
335 #endif
336
337 out: return extp;
338 }
339
340 EXPORT_SYMBOL(cfi_read_pri);
341
342 void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup *fixups)
343 {
344 struct map_info *map = mtd->priv;
345 struct cfi_private *cfi = map->fldrv_priv;
346 struct cfi_fixup *f;
347
348 for (f=fixups; f->fixup; f++) {
349 if (((f->mfr == CFI_MFR_ANY) || (f->mfr == cfi->mfr)) &&
350 ((f->id == CFI_ID_ANY) || (f->id == cfi->id))) {
351 f->fixup(mtd);
352 }
353 }
354 }
355
356 EXPORT_SYMBOL(cfi_fixup);
357
358 int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
359 loff_t ofs, size_t len, void *thunk)
360 {
361 struct map_info *map = mtd->priv;
362 struct cfi_private *cfi = map->fldrv_priv;
363 unsigned long adr;
364 int chipnum, ret = 0;
365 int i, first;
366 struct mtd_erase_region_info *regions = mtd->eraseregions;
367
368
369
370
371
372 i = 0;
373
374
375
376
377
378
379
380 while (i < mtd->numeraseregions && ofs >= regions[i].offset)
381 i++;
382 i--;
383
384
385
386
387
388
389
390 if (ofs & (regions[i].erasesize-1))
391 return -EINVAL;
392
393
394 first = i;
395
396
397
398
399
400 while (i<mtd->numeraseregions && (ofs + len) >= regions[i].offset)
401 i++;
402
403
404
405
406 i--;
407
408 if ((ofs + len) & (regions[i].erasesize-1))
409 return -EINVAL;
410
411 chipnum = ofs >> cfi->chipshift;
412 adr = ofs - (chipnum << cfi->chipshift);
413
414 i=first;
415
416 while(len) {
417 int size = regions[i].erasesize;
418
419 ret = (*frob)(map, &cfi->chips[chipnum], adr, size, thunk);
420
421 if (ret)
422 return ret;
423
424 adr += size;
425 ofs += size;
426 len -= size;
427
428 if (ofs == regions[i].offset + size * regions[i].numblocks)
429 i++;
430
431 if (adr >> cfi->chipshift) {
432 adr = 0;
433 chipnum++;
434
435 if (chipnum >= cfi->numchips)
436 break;
437 }
438 }
439
440 return 0;
441 }
442
443 EXPORT_SYMBOL(cfi_varsize_frob);
444
445 MODULE_LICENSE("GPL");