1/*
2 * This file contains an ECC algorithm that detects and corrects 1 bit
3 * errors in a 256 byte block of data.
4 *
5 * drivers/mtd/nand/nand_ecc.c
6 *
7 * Copyright © 2008 Koninklijke Philips Electronics NV.
8 *                  Author: Frans Meulenbroeks
9 *
10 * Completely replaces the previous ECC implementation which was written by:
11 *   Steven J. Hill (sjhill@realitydiluted.com)
12 *   Thomas Gleixner (tglx@linutronix.de)
13 *
14 * Information on how this algorithm works and how it was developed
15 * can be found in Documentation/mtd/nand_ecc.txt
16 *
17 * This file is free software; you can redistribute it and/or modify it
18 * under the terms of the GNU General Public License as published by the
19 * Free Software Foundation; either version 2 or (at your option) any
20 * later version.
21 *
22 * This file is distributed in the hope that it will be useful, but WITHOUT
23 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
24 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
25 * for more details.
26 *
27 * You should have received a copy of the GNU General Public License along
28 * with this file; if not, write to the Free Software Foundation, Inc.,
29 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
30 *
31 */
32
33/*
34 * The STANDALONE macro is useful when running the code outside the kernel
35 * e.g. when running the code in a testbed or a benchmark program.
36 * When STANDALONE is used, the module related macros are commented out
37 * as well as the linux include files.
38 * Instead a private definition of mtd_info is given to satisfy the compiler
39 * (the code does not use mtd_info, so the code does not care)
40 */
41#ifndef STANDALONE
42#include <linux/types.h>
43#include <linux/kernel.h>
44#include <linux/module.h>
45#include <linux/mtd/mtd.h>
46#include <linux/mtd/nand.h>
47#include <linux/mtd/nand_ecc.h>
48#include <asm/byteorder.h>
49#else
50#include <stdint.h>
51struct mtd_info;
52#define EXPORT_SYMBOL(x)  /* x */
53
54#define MODULE_LICENSE(x)	/* x */
55#define MODULE_AUTHOR(x)	/* x */
56#define MODULE_DESCRIPTION(x)	/* x */
57
58#define pr_err printf
59#endif
60
61/*
62 * invparity is a 256 byte table that contains the odd parity
63 * for each byte. So if the number of bits in a byte is even,
64 * the array element is 1, and when the number of bits is odd
65 * the array eleemnt is 0.
66 */
67static const char invparity[256] = {
68	1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
69	0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
70	0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
71	1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
72	0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
73	1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
74	1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
75	0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
76	0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
77	1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
78	1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
79	0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
80	1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
81	0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
82	0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
83	1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1
84};
85
86/*
87 * bitsperbyte contains the number of bits per byte
88 * this is only used for testing and repairing parity
89 * (a precalculated value slightly improves performance)
90 */
91static const char bitsperbyte[256] = {
92	0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
93	1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
94	1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
95	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
96	1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
97	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
98	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
99	3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
100	1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
101	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
102	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
103	3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
104	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
105	3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
106	3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
107	4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8,
108};
109
110/*
111 * addressbits is a lookup table to filter out the bits from the xor-ed
112 * ECC data that identify the faulty location.
113 * this is only used for repairing parity
114 * see the comments in nand_correct_data for more details
115 */
116static const char addressbits[256] = {
117	0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
118	0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
119	0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
120	0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
121	0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
122	0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
123	0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
124	0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
125	0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
126	0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
127	0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
128	0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
129	0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
130	0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
131	0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
132	0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
133	0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
134	0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
135	0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
136	0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
137	0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
138	0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
139	0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
140	0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
141	0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
142	0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
143	0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
144	0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
145	0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
146	0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
147	0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
148	0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f
149};
150
151/**
152 * __nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
153 *			 block
154 * @buf:	input buffer with raw data
155 * @eccsize:	data bytes per ECC step (256 or 512)
156 * @code:	output buffer with ECC
157 */
158void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize,
159		       unsigned char *code)
160{
161	int i;
162	const uint32_t *bp = (uint32_t *)buf;
163	/* 256 or 512 bytes/ecc  */
164	const uint32_t eccsize_mult = eccsize >> 8;
165	uint32_t cur;		/* current value in buffer */
166	/* rp0..rp15..rp17 are the various accumulated parities (per byte) */
167	uint32_t rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7;
168	uint32_t rp8, rp9, rp10, rp11, rp12, rp13, rp14, rp15, rp16;
169	uint32_t uninitialized_var(rp17);	/* to make compiler happy */
170	uint32_t par;		/* the cumulative parity for all data */
171	uint32_t tmppar;	/* the cumulative parity for this iteration;
172				   for rp12, rp14 and rp16 at the end of the
173				   loop */
174
175	par = 0;
176	rp4 = 0;
177	rp6 = 0;
178	rp8 = 0;
179	rp10 = 0;
180	rp12 = 0;
181	rp14 = 0;
182	rp16 = 0;
183
184	/*
185	 * The loop is unrolled a number of times;
186	 * This avoids if statements to decide on which rp value to update
187	 * Also we process the data by longwords.
188	 * Note: passing unaligned data might give a performance penalty.
189	 * It is assumed that the buffers are aligned.
190	 * tmppar is the cumulative sum of this iteration.
191	 * needed for calculating rp12, rp14, rp16 and par
192	 * also used as a performance improvement for rp6, rp8 and rp10
193	 */
194	for (i = 0; i < eccsize_mult << 2; i++) {
195		cur = *bp++;
196		tmppar = cur;
197		rp4 ^= cur;
198		cur = *bp++;
199		tmppar ^= cur;
200		rp6 ^= tmppar;
201		cur = *bp++;
202		tmppar ^= cur;
203		rp4 ^= cur;
204		cur = *bp++;
205		tmppar ^= cur;
206		rp8 ^= tmppar;
207
208		cur = *bp++;
209		tmppar ^= cur;
210		rp4 ^= cur;
211		rp6 ^= cur;
212		cur = *bp++;
213		tmppar ^= cur;
214		rp6 ^= cur;
215		cur = *bp++;
216		tmppar ^= cur;
217		rp4 ^= cur;
218		cur = *bp++;
219		tmppar ^= cur;
220		rp10 ^= tmppar;
221
222		cur = *bp++;
223		tmppar ^= cur;
224		rp4 ^= cur;
225		rp6 ^= cur;
226		rp8 ^= cur;
227		cur = *bp++;
228		tmppar ^= cur;
229		rp6 ^= cur;
230		rp8 ^= cur;
231		cur = *bp++;
232		tmppar ^= cur;
233		rp4 ^= cur;
234		rp8 ^= cur;
235		cur = *bp++;
236		tmppar ^= cur;
237		rp8 ^= cur;
238
239		cur = *bp++;
240		tmppar ^= cur;
241		rp4 ^= cur;
242		rp6 ^= cur;
243		cur = *bp++;
244		tmppar ^= cur;
245		rp6 ^= cur;
246		cur = *bp++;
247		tmppar ^= cur;
248		rp4 ^= cur;
249		cur = *bp++;
250		tmppar ^= cur;
251
252		par ^= tmppar;
253		if ((i & 0x1) == 0)
254			rp12 ^= tmppar;
255		if ((i & 0x2) == 0)
256			rp14 ^= tmppar;
257		if (eccsize_mult == 2 && (i & 0x4) == 0)
258			rp16 ^= tmppar;
259	}
260
261	/*
262	 * handle the fact that we use longword operations
263	 * we'll bring rp4..rp14..rp16 back to single byte entities by
264	 * shifting and xoring first fold the upper and lower 16 bits,
265	 * then the upper and lower 8 bits.
266	 */
267	rp4 ^= (rp4 >> 16);
268	rp4 ^= (rp4 >> 8);
269	rp4 &= 0xff;
270	rp6 ^= (rp6 >> 16);
271	rp6 ^= (rp6 >> 8);
272	rp6 &= 0xff;
273	rp8 ^= (rp8 >> 16);
274	rp8 ^= (rp8 >> 8);
275	rp8 &= 0xff;
276	rp10 ^= (rp10 >> 16);
277	rp10 ^= (rp10 >> 8);
278	rp10 &= 0xff;
279	rp12 ^= (rp12 >> 16);
280	rp12 ^= (rp12 >> 8);
281	rp12 &= 0xff;
282	rp14 ^= (rp14 >> 16);
283	rp14 ^= (rp14 >> 8);
284	rp14 &= 0xff;
285	if (eccsize_mult == 2) {
286		rp16 ^= (rp16 >> 16);
287		rp16 ^= (rp16 >> 8);
288		rp16 &= 0xff;
289	}
290
291	/*
292	 * we also need to calculate the row parity for rp0..rp3
293	 * This is present in par, because par is now
294	 * rp3 rp3 rp2 rp2 in little endian and
295	 * rp2 rp2 rp3 rp3 in big endian
296	 * as well as
297	 * rp1 rp0 rp1 rp0 in little endian and
298	 * rp0 rp1 rp0 rp1 in big endian
299	 * First calculate rp2 and rp3
300	 */
301#ifdef __BIG_ENDIAN
302	rp2 = (par >> 16);
303	rp2 ^= (rp2 >> 8);
304	rp2 &= 0xff;
305	rp3 = par & 0xffff;
306	rp3 ^= (rp3 >> 8);
307	rp3 &= 0xff;
308#else
309	rp3 = (par >> 16);
310	rp3 ^= (rp3 >> 8);
311	rp3 &= 0xff;
312	rp2 = par & 0xffff;
313	rp2 ^= (rp2 >> 8);
314	rp2 &= 0xff;
315#endif
316
317	/* reduce par to 16 bits then calculate rp1 and rp0 */
318	par ^= (par >> 16);
319#ifdef __BIG_ENDIAN
320	rp0 = (par >> 8) & 0xff;
321	rp1 = (par & 0xff);
322#else
323	rp1 = (par >> 8) & 0xff;
324	rp0 = (par & 0xff);
325#endif
326
327	/* finally reduce par to 8 bits */
328	par ^= (par >> 8);
329	par &= 0xff;
330
331	/*
332	 * and calculate rp5..rp15..rp17
333	 * note that par = rp4 ^ rp5 and due to the commutative property
334	 * of the ^ operator we can say:
335	 * rp5 = (par ^ rp4);
336	 * The & 0xff seems superfluous, but benchmarking learned that
337	 * leaving it out gives slightly worse results. No idea why, probably
338	 * it has to do with the way the pipeline in pentium is organized.
339	 */
340	rp5 = (par ^ rp4) & 0xff;
341	rp7 = (par ^ rp6) & 0xff;
342	rp9 = (par ^ rp8) & 0xff;
343	rp11 = (par ^ rp10) & 0xff;
344	rp13 = (par ^ rp12) & 0xff;
345	rp15 = (par ^ rp14) & 0xff;
346	if (eccsize_mult == 2)
347		rp17 = (par ^ rp16) & 0xff;
348
349	/*
350	 * Finally calculate the ECC bits.
351	 * Again here it might seem that there are performance optimisations
352	 * possible, but benchmarks showed that on the system this is developed
353	 * the code below is the fastest
354	 */
355#ifdef CONFIG_MTD_NAND_ECC_SMC
356	code[0] =
357	    (invparity[rp7] << 7) |
358	    (invparity[rp6] << 6) |
359	    (invparity[rp5] << 5) |
360	    (invparity[rp4] << 4) |
361	    (invparity[rp3] << 3) |
362	    (invparity[rp2] << 2) |
363	    (invparity[rp1] << 1) |
364	    (invparity[rp0]);
365	code[1] =
366	    (invparity[rp15] << 7) |
367	    (invparity[rp14] << 6) |
368	    (invparity[rp13] << 5) |
369	    (invparity[rp12] << 4) |
370	    (invparity[rp11] << 3) |
371	    (invparity[rp10] << 2) |
372	    (invparity[rp9] << 1)  |
373	    (invparity[rp8]);
374#else
375	code[1] =
376	    (invparity[rp7] << 7) |
377	    (invparity[rp6] << 6) |
378	    (invparity[rp5] << 5) |
379	    (invparity[rp4] << 4) |
380	    (invparity[rp3] << 3) |
381	    (invparity[rp2] << 2) |
382	    (invparity[rp1] << 1) |
383	    (invparity[rp0]);
384	code[0] =
385	    (invparity[rp15] << 7) |
386	    (invparity[rp14] << 6) |
387	    (invparity[rp13] << 5) |
388	    (invparity[rp12] << 4) |
389	    (invparity[rp11] << 3) |
390	    (invparity[rp10] << 2) |
391	    (invparity[rp9] << 1)  |
392	    (invparity[rp8]);
393#endif
394	if (eccsize_mult == 1)
395		code[2] =
396		    (invparity[par & 0xf0] << 7) |
397		    (invparity[par & 0x0f] << 6) |
398		    (invparity[par & 0xcc] << 5) |
399		    (invparity[par & 0x33] << 4) |
400		    (invparity[par & 0xaa] << 3) |
401		    (invparity[par & 0x55] << 2) |
402		    3;
403	else
404		code[2] =
405		    (invparity[par & 0xf0] << 7) |
406		    (invparity[par & 0x0f] << 6) |
407		    (invparity[par & 0xcc] << 5) |
408		    (invparity[par & 0x33] << 4) |
409		    (invparity[par & 0xaa] << 3) |
410		    (invparity[par & 0x55] << 2) |
411		    (invparity[rp17] << 1) |
412		    (invparity[rp16] << 0);
413}
414EXPORT_SYMBOL(__nand_calculate_ecc);
415
416/**
417 * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
418 *			 block
419 * @mtd:	MTD block structure
420 * @buf:	input buffer with raw data
421 * @code:	output buffer with ECC
422 */
423int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
424		       unsigned char *code)
425{
426	__nand_calculate_ecc(buf,
427			((struct nand_chip *)mtd->priv)->ecc.size, code);
428
429	return 0;
430}
431EXPORT_SYMBOL(nand_calculate_ecc);
432
433/**
434 * __nand_correct_data - [NAND Interface] Detect and correct bit error(s)
435 * @buf:	raw data read from the chip
436 * @read_ecc:	ECC from the chip
437 * @calc_ecc:	the ECC calculated from raw data
438 * @eccsize:	data bytes per ECC step (256 or 512)
439 *
440 * Detect and correct a 1 bit error for eccsize byte block
441 */
442int __nand_correct_data(unsigned char *buf,
443			unsigned char *read_ecc, unsigned char *calc_ecc,
444			unsigned int eccsize)
445{
446	unsigned char b0, b1, b2, bit_addr;
447	unsigned int byte_addr;
448	/* 256 or 512 bytes/ecc  */
449	const uint32_t eccsize_mult = eccsize >> 8;
450
451	/*
452	 * b0 to b2 indicate which bit is faulty (if any)
453	 * we might need the xor result  more than once,
454	 * so keep them in a local var
455	*/
456#ifdef CONFIG_MTD_NAND_ECC_SMC
457	b0 = read_ecc[0] ^ calc_ecc[0];
458	b1 = read_ecc[1] ^ calc_ecc[1];
459#else
460	b0 = read_ecc[1] ^ calc_ecc[1];
461	b1 = read_ecc[0] ^ calc_ecc[0];
462#endif
463	b2 = read_ecc[2] ^ calc_ecc[2];
464
465	/* check if there are any bitfaults */
466
467	/* repeated if statements are slightly more efficient than switch ... */
468	/* ordered in order of likelihood */
469
470	if ((b0 | b1 | b2) == 0)
471		return 0;	/* no error */
472
473	if ((((b0 ^ (b0 >> 1)) & 0x55) == 0x55) &&
474	    (((b1 ^ (b1 >> 1)) & 0x55) == 0x55) &&
475	    ((eccsize_mult == 1 && ((b2 ^ (b2 >> 1)) & 0x54) == 0x54) ||
476	     (eccsize_mult == 2 && ((b2 ^ (b2 >> 1)) & 0x55) == 0x55))) {
477	/* single bit error */
478		/*
479		 * rp17/rp15/13/11/9/7/5/3/1 indicate which byte is the faulty
480		 * byte, cp 5/3/1 indicate the faulty bit.
481		 * A lookup table (called addressbits) is used to filter
482		 * the bits from the byte they are in.
483		 * A marginal optimisation is possible by having three
484		 * different lookup tables.
485		 * One as we have now (for b0), one for b2
486		 * (that would avoid the >> 1), and one for b1 (with all values
487		 * << 4). However it was felt that introducing two more tables
488		 * hardly justify the gain.
489		 *
490		 * The b2 shift is there to get rid of the lowest two bits.
491		 * We could also do addressbits[b2] >> 1 but for the
492		 * performance it does not make any difference
493		 */
494		if (eccsize_mult == 1)
495			byte_addr = (addressbits[b1] << 4) + addressbits[b0];
496		else
497			byte_addr = (addressbits[b2 & 0x3] << 8) +
498				    (addressbits[b1] << 4) + addressbits[b0];
499		bit_addr = addressbits[b2 >> 2];
500		/* flip the bit */
501		buf[byte_addr] ^= (1 << bit_addr);
502		return 1;
503
504	}
505	/* count nr of bits; use table lookup, faster than calculating it */
506	if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1)
507		return 1;	/* error in ECC data; no action needed */
508
509	pr_err("%s: uncorrectable ECC error\n", __func__);
510	return -1;
511}
512EXPORT_SYMBOL(__nand_correct_data);
513
514/**
515 * nand_correct_data - [NAND Interface] Detect and correct bit error(s)
516 * @mtd:	MTD block structure
517 * @buf:	raw data read from the chip
518 * @read_ecc:	ECC from the chip
519 * @calc_ecc:	the ECC calculated from raw data
520 *
521 * Detect and correct a 1 bit error for 256/512 byte block
522 */
523int nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
524		      unsigned char *read_ecc, unsigned char *calc_ecc)
525{
526	return __nand_correct_data(buf, read_ecc, calc_ecc,
527				   ((struct nand_chip *)mtd->priv)->ecc.size);
528}
529EXPORT_SYMBOL(nand_correct_data);
530
531MODULE_LICENSE("GPL");
532MODULE_AUTHOR("Frans Meulenbroeks <fransmeulenbroeks@gmail.com>");
533MODULE_DESCRIPTION("Generic NAND ECC support");
534