1/*
2 *  linux/drivers/mtd/onenand/omap2.c
3 *
4 *  OneNAND driver for OMAP2 / OMAP3
5 *
6 *  Copyright © 2005-2006 Nokia Corporation
7 *
8 *  Author: Jarkko Lavinen <jarkko.lavinen@nokia.com> and Juha Yrjölä
9 *  IRQ and DMA support written by Timo Teras
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 as published by
13 * the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 *
20 * You should have received a copy of the GNU General Public License along with
21 * this program; see the file COPYING. If not, write to the Free Software
22 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 *
24 */
25
26#include <linux/device.h>
27#include <linux/module.h>
28#include <linux/mtd/mtd.h>
29#include <linux/mtd/onenand.h>
30#include <linux/mtd/partitions.h>
31#include <linux/platform_device.h>
32#include <linux/interrupt.h>
33#include <linux/delay.h>
34#include <linux/dma-mapping.h>
35#include <linux/io.h>
36#include <linux/slab.h>
37#include <linux/regulator/consumer.h>
38
39#include <asm/mach/flash.h>
40#include <linux/platform_data/mtd-onenand-omap2.h>
41#include <asm/gpio.h>
42
43#include <linux/omap-dma.h>
44
45#define DRIVER_NAME "omap2-onenand"
46
47#define ONENAND_BUFRAM_SIZE	(1024 * 5)
48
49struct omap2_onenand {
50	struct platform_device *pdev;
51	int gpmc_cs;
52	unsigned long phys_base;
53	unsigned int mem_size;
54	int gpio_irq;
55	struct mtd_info mtd;
56	struct onenand_chip onenand;
57	struct completion irq_done;
58	struct completion dma_done;
59	int dma_channel;
60	int freq;
61	int (*setup)(void __iomem *base, int *freq_ptr);
62	struct regulator *regulator;
63	u8 flags;
64};
65
66static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
67{
68	struct omap2_onenand *c = data;
69
70	complete(&c->dma_done);
71}
72
73static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
74{
75	struct omap2_onenand *c = dev_id;
76
77	complete(&c->irq_done);
78
79	return IRQ_HANDLED;
80}
81
82static inline unsigned short read_reg(struct omap2_onenand *c, int reg)
83{
84	return readw(c->onenand.base + reg);
85}
86
87static inline void write_reg(struct omap2_onenand *c, unsigned short value,
88			     int reg)
89{
90	writew(value, c->onenand.base + reg);
91}
92
93static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
94{
95	printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
96	       msg, state, ctrl, intr);
97}
98
99static void wait_warn(char *msg, int state, unsigned int ctrl,
100		      unsigned int intr)
101{
102	printk(KERN_WARNING "onenand_wait: %s! state %d ctrl 0x%04x "
103	       "intr 0x%04x\n", msg, state, ctrl, intr);
104}
105
106static int omap2_onenand_wait(struct mtd_info *mtd, int state)
107{
108	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
109	struct onenand_chip *this = mtd->priv;
110	unsigned int intr = 0;
111	unsigned int ctrl, ctrl_mask;
112	unsigned long timeout;
113	u32 syscfg;
114
115	if (state == FL_RESETING || state == FL_PREPARING_ERASE ||
116	    state == FL_VERIFYING_ERASE) {
117		int i = 21;
118		unsigned int intr_flags = ONENAND_INT_MASTER;
119
120		switch (state) {
121		case FL_RESETING:
122			intr_flags |= ONENAND_INT_RESET;
123			break;
124		case FL_PREPARING_ERASE:
125			intr_flags |= ONENAND_INT_ERASE;
126			break;
127		case FL_VERIFYING_ERASE:
128			i = 101;
129			break;
130		}
131
132		while (--i) {
133			udelay(1);
134			intr = read_reg(c, ONENAND_REG_INTERRUPT);
135			if (intr & ONENAND_INT_MASTER)
136				break;
137		}
138		ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
139		if (ctrl & ONENAND_CTRL_ERROR) {
140			wait_err("controller error", state, ctrl, intr);
141			return -EIO;
142		}
143		if ((intr & intr_flags) == intr_flags)
144			return 0;
145		/* Continue in wait for interrupt branch */
146	}
147
148	if (state != FL_READING) {
149		int result;
150
151		/* Turn interrupts on */
152		syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
153		if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
154			syscfg |= ONENAND_SYS_CFG1_IOBE;
155			write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
156			if (c->flags & ONENAND_IN_OMAP34XX)
157				/* Add a delay to let GPIO settle */
158				syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
159		}
160
161		reinit_completion(&c->irq_done);
162		if (c->gpio_irq) {
163			result = gpio_get_value(c->gpio_irq);
164			if (result == -1) {
165				ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
166				intr = read_reg(c, ONENAND_REG_INTERRUPT);
167				wait_err("gpio error", state, ctrl, intr);
168				return -EIO;
169			}
170		} else
171			result = 0;
172		if (result == 0) {
173			int retry_cnt = 0;
174retry:
175			result = wait_for_completion_timeout(&c->irq_done,
176						    msecs_to_jiffies(20));
177			if (result == 0) {
178				/* Timeout after 20ms */
179				ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
180				if (ctrl & ONENAND_CTRL_ONGO &&
181				    !this->ongoing) {
182					/*
183					 * The operation seems to be still going
184					 * so give it some more time.
185					 */
186					retry_cnt += 1;
187					if (retry_cnt < 3)
188						goto retry;
189					intr = read_reg(c,
190							ONENAND_REG_INTERRUPT);
191					wait_err("timeout", state, ctrl, intr);
192					return -EIO;
193				}
194				intr = read_reg(c, ONENAND_REG_INTERRUPT);
195				if ((intr & ONENAND_INT_MASTER) == 0)
196					wait_warn("timeout", state, ctrl, intr);
197			}
198		}
199	} else {
200		int retry_cnt = 0;
201
202		/* Turn interrupts off */
203		syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
204		syscfg &= ~ONENAND_SYS_CFG1_IOBE;
205		write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
206
207		timeout = jiffies + msecs_to_jiffies(20);
208		while (1) {
209			if (time_before(jiffies, timeout)) {
210				intr = read_reg(c, ONENAND_REG_INTERRUPT);
211				if (intr & ONENAND_INT_MASTER)
212					break;
213			} else {
214				/* Timeout after 20ms */
215				ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
216				if (ctrl & ONENAND_CTRL_ONGO) {
217					/*
218					 * The operation seems to be still going
219					 * so give it some more time.
220					 */
221					retry_cnt += 1;
222					if (retry_cnt < 3) {
223						timeout = jiffies +
224							  msecs_to_jiffies(20);
225						continue;
226					}
227				}
228				break;
229			}
230		}
231	}
232
233	intr = read_reg(c, ONENAND_REG_INTERRUPT);
234	ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
235
236	if (intr & ONENAND_INT_READ) {
237		int ecc = read_reg(c, ONENAND_REG_ECC_STATUS);
238
239		if (ecc) {
240			unsigned int addr1, addr8;
241
242			addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1);
243			addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8);
244			if (ecc & ONENAND_ECC_2BIT_ALL) {
245				printk(KERN_ERR "onenand_wait: ECC error = "
246				       "0x%04x, addr1 %#x, addr8 %#x\n",
247				       ecc, addr1, addr8);
248				mtd->ecc_stats.failed++;
249				return -EBADMSG;
250			} else if (ecc & ONENAND_ECC_1BIT_ALL) {
251				printk(KERN_NOTICE "onenand_wait: correctable "
252				       "ECC error = 0x%04x, addr1 %#x, "
253				       "addr8 %#x\n", ecc, addr1, addr8);
254				mtd->ecc_stats.corrected++;
255			}
256		}
257	} else if (state == FL_READING) {
258		wait_err("timeout", state, ctrl, intr);
259		return -EIO;
260	}
261
262	if (ctrl & ONENAND_CTRL_ERROR) {
263		wait_err("controller error", state, ctrl, intr);
264		if (ctrl & ONENAND_CTRL_LOCK)
265			printk(KERN_ERR "onenand_wait: "
266					"Device is write protected!!!\n");
267		return -EIO;
268	}
269
270	ctrl_mask = 0xFE9F;
271	if (this->ongoing)
272		ctrl_mask &= ~0x8000;
273
274	if (ctrl & ctrl_mask)
275		wait_warn("unexpected controller status", state, ctrl, intr);
276
277	return 0;
278}
279
280static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
281{
282	struct onenand_chip *this = mtd->priv;
283
284	if (ONENAND_CURRENT_BUFFERRAM(this)) {
285		if (area == ONENAND_DATARAM)
286			return this->writesize;
287		if (area == ONENAND_SPARERAM)
288			return mtd->oobsize;
289	}
290
291	return 0;
292}
293
294#if defined(CONFIG_ARCH_OMAP3) || defined(MULTI_OMAP2)
295
296static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
297					unsigned char *buffer, int offset,
298					size_t count)
299{
300	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
301	struct onenand_chip *this = mtd->priv;
302	dma_addr_t dma_src, dma_dst;
303	int bram_offset;
304	unsigned long timeout;
305	void *buf = (void *)buffer;
306	size_t xtra;
307	volatile unsigned *done;
308
309	bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
310	if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
311		goto out_copy;
312
313	/* panic_write() may be in an interrupt context */
314	if (in_interrupt() || oops_in_progress)
315		goto out_copy;
316
317	if (buf >= high_memory) {
318		struct page *p1;
319
320		if (((size_t)buf & PAGE_MASK) !=
321		    ((size_t)(buf + count - 1) & PAGE_MASK))
322			goto out_copy;
323		p1 = vmalloc_to_page(buf);
324		if (!p1)
325			goto out_copy;
326		buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
327	}
328
329	xtra = count & 3;
330	if (xtra) {
331		count -= xtra;
332		memcpy(buf + count, this->base + bram_offset + count, xtra);
333	}
334
335	dma_src = c->phys_base + bram_offset;
336	dma_dst = dma_map_single(&c->pdev->dev, buf, count, DMA_FROM_DEVICE);
337	if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
338		dev_err(&c->pdev->dev,
339			"Couldn't DMA map a %d byte buffer\n",
340			count);
341		goto out_copy;
342	}
343
344	omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
345				     count >> 2, 1, 0, 0, 0);
346	omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
347				dma_src, 0, 0);
348	omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
349				 dma_dst, 0, 0);
350
351	reinit_completion(&c->dma_done);
352	omap_start_dma(c->dma_channel);
353
354	timeout = jiffies + msecs_to_jiffies(20);
355	done = &c->dma_done.done;
356	while (time_before(jiffies, timeout))
357		if (*done)
358			break;
359
360	dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
361
362	if (!*done) {
363		dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
364		goto out_copy;
365	}
366
367	return 0;
368
369out_copy:
370	memcpy(buf, this->base + bram_offset, count);
371	return 0;
372}
373
374static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
375					 const unsigned char *buffer,
376					 int offset, size_t count)
377{
378	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
379	struct onenand_chip *this = mtd->priv;
380	dma_addr_t dma_src, dma_dst;
381	int bram_offset;
382	unsigned long timeout;
383	void *buf = (void *)buffer;
384	volatile unsigned *done;
385
386	bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
387	if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
388		goto out_copy;
389
390	/* panic_write() may be in an interrupt context */
391	if (in_interrupt() || oops_in_progress)
392		goto out_copy;
393
394	if (buf >= high_memory) {
395		struct page *p1;
396
397		if (((size_t)buf & PAGE_MASK) !=
398		    ((size_t)(buf + count - 1) & PAGE_MASK))
399			goto out_copy;
400		p1 = vmalloc_to_page(buf);
401		if (!p1)
402			goto out_copy;
403		buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
404	}
405
406	dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
407	dma_dst = c->phys_base + bram_offset;
408	if (dma_mapping_error(&c->pdev->dev, dma_src)) {
409		dev_err(&c->pdev->dev,
410			"Couldn't DMA map a %d byte buffer\n",
411			count);
412		return -1;
413	}
414
415	omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
416				     count >> 2, 1, 0, 0, 0);
417	omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
418				dma_src, 0, 0);
419	omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
420				 dma_dst, 0, 0);
421
422	reinit_completion(&c->dma_done);
423	omap_start_dma(c->dma_channel);
424
425	timeout = jiffies + msecs_to_jiffies(20);
426	done = &c->dma_done.done;
427	while (time_before(jiffies, timeout))
428		if (*done)
429			break;
430
431	dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
432
433	if (!*done) {
434		dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
435		goto out_copy;
436	}
437
438	return 0;
439
440out_copy:
441	memcpy(this->base + bram_offset, buf, count);
442	return 0;
443}
444
445#else
446
447static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
448					unsigned char *buffer, int offset,
449					size_t count)
450{
451	return -ENOSYS;
452}
453
454static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
455					 const unsigned char *buffer,
456					 int offset, size_t count)
457{
458	return -ENOSYS;
459}
460
461#endif
462
463#if defined(CONFIG_ARCH_OMAP2) || defined(MULTI_OMAP2)
464
465static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
466					unsigned char *buffer, int offset,
467					size_t count)
468{
469	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
470	struct onenand_chip *this = mtd->priv;
471	dma_addr_t dma_src, dma_dst;
472	int bram_offset;
473
474	bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
475	/* DMA is not used.  Revisit PM requirements before enabling it. */
476	if (1 || (c->dma_channel < 0) ||
477	    ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
478	    (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
479		memcpy(buffer, (__force void *)(this->base + bram_offset),
480		       count);
481		return 0;
482	}
483
484	dma_src = c->phys_base + bram_offset;
485	dma_dst = dma_map_single(&c->pdev->dev, buffer, count,
486				 DMA_FROM_DEVICE);
487	if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
488		dev_err(&c->pdev->dev,
489			"Couldn't DMA map a %d byte buffer\n",
490			count);
491		return -1;
492	}
493
494	omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
495				     count / 4, 1, 0, 0, 0);
496	omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
497				dma_src, 0, 0);
498	omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
499				 dma_dst, 0, 0);
500
501	reinit_completion(&c->dma_done);
502	omap_start_dma(c->dma_channel);
503	wait_for_completion(&c->dma_done);
504
505	dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
506
507	return 0;
508}
509
510static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
511					 const unsigned char *buffer,
512					 int offset, size_t count)
513{
514	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
515	struct onenand_chip *this = mtd->priv;
516	dma_addr_t dma_src, dma_dst;
517	int bram_offset;
518
519	bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
520	/* DMA is not used.  Revisit PM requirements before enabling it. */
521	if (1 || (c->dma_channel < 0) ||
522	    ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
523	    (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
524		memcpy((__force void *)(this->base + bram_offset), buffer,
525		       count);
526		return 0;
527	}
528
529	dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count,
530				 DMA_TO_DEVICE);
531	dma_dst = c->phys_base + bram_offset;
532	if (dma_mapping_error(&c->pdev->dev, dma_src)) {
533		dev_err(&c->pdev->dev,
534			"Couldn't DMA map a %d byte buffer\n",
535			count);
536		return -1;
537	}
538
539	omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S16,
540				     count / 2, 1, 0, 0, 0);
541	omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
542				dma_src, 0, 0);
543	omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
544				 dma_dst, 0, 0);
545
546	reinit_completion(&c->dma_done);
547	omap_start_dma(c->dma_channel);
548	wait_for_completion(&c->dma_done);
549
550	dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
551
552	return 0;
553}
554
555#else
556
557static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
558					unsigned char *buffer, int offset,
559					size_t count)
560{
561	return -ENOSYS;
562}
563
564static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
565					 const unsigned char *buffer,
566					 int offset, size_t count)
567{
568	return -ENOSYS;
569}
570
571#endif
572
573static struct platform_driver omap2_onenand_driver;
574
575static void omap2_onenand_shutdown(struct platform_device *pdev)
576{
577	struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
578
579	/* With certain content in the buffer RAM, the OMAP boot ROM code
580	 * can recognize the flash chip incorrectly. Zero it out before
581	 * soft reset.
582	 */
583	memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
584}
585
586static int omap2_onenand_enable(struct mtd_info *mtd)
587{
588	int ret;
589	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
590
591	ret = regulator_enable(c->regulator);
592	if (ret != 0)
593		dev_err(&c->pdev->dev, "can't enable regulator\n");
594
595	return ret;
596}
597
598static int omap2_onenand_disable(struct mtd_info *mtd)
599{
600	int ret;
601	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
602
603	ret = regulator_disable(c->regulator);
604	if (ret != 0)
605		dev_err(&c->pdev->dev, "can't disable regulator\n");
606
607	return ret;
608}
609
610static int omap2_onenand_probe(struct platform_device *pdev)
611{
612	struct omap_onenand_platform_data *pdata;
613	struct omap2_onenand *c;
614	struct onenand_chip *this;
615	int r;
616	struct resource *res;
617	struct mtd_part_parser_data ppdata = {};
618
619	pdata = dev_get_platdata(&pdev->dev);
620	if (pdata == NULL) {
621		dev_err(&pdev->dev, "platform data missing\n");
622		return -ENODEV;
623	}
624
625	c = kzalloc(sizeof(struct omap2_onenand), GFP_KERNEL);
626	if (!c)
627		return -ENOMEM;
628
629	init_completion(&c->irq_done);
630	init_completion(&c->dma_done);
631	c->flags = pdata->flags;
632	c->gpmc_cs = pdata->cs;
633	c->gpio_irq = pdata->gpio_irq;
634	c->dma_channel = pdata->dma_channel;
635	if (c->dma_channel < 0) {
636		/* if -1, don't use DMA */
637		c->gpio_irq = 0;
638	}
639
640	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
641	if (res == NULL) {
642		r = -EINVAL;
643		dev_err(&pdev->dev, "error getting memory resource\n");
644		goto err_kfree;
645	}
646
647	c->phys_base = res->start;
648	c->mem_size = resource_size(res);
649
650	if (request_mem_region(c->phys_base, c->mem_size,
651			       pdev->dev.driver->name) == NULL) {
652		dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, size: 0x%x\n",
653						c->phys_base, c->mem_size);
654		r = -EBUSY;
655		goto err_kfree;
656	}
657	c->onenand.base = ioremap(c->phys_base, c->mem_size);
658	if (c->onenand.base == NULL) {
659		r = -ENOMEM;
660		goto err_release_mem_region;
661	}
662
663	if (pdata->onenand_setup != NULL) {
664		r = pdata->onenand_setup(c->onenand.base, &c->freq);
665		if (r < 0) {
666			dev_err(&pdev->dev, "Onenand platform setup failed: "
667				"%d\n", r);
668			goto err_iounmap;
669		}
670		c->setup = pdata->onenand_setup;
671	}
672
673	if (c->gpio_irq) {
674		if ((r = gpio_request(c->gpio_irq, "OneNAND irq")) < 0) {
675			dev_err(&pdev->dev,  "Failed to request GPIO%d for "
676				"OneNAND\n", c->gpio_irq);
677			goto err_iounmap;
678	}
679	gpio_direction_input(c->gpio_irq);
680
681	if ((r = request_irq(gpio_to_irq(c->gpio_irq),
682			     omap2_onenand_interrupt, IRQF_TRIGGER_RISING,
683			     pdev->dev.driver->name, c)) < 0)
684		goto err_release_gpio;
685	}
686
687	if (c->dma_channel >= 0) {
688		r = omap_request_dma(0, pdev->dev.driver->name,
689				     omap2_onenand_dma_cb, (void *) c,
690				     &c->dma_channel);
691		if (r == 0) {
692			omap_set_dma_write_mode(c->dma_channel,
693						OMAP_DMA_WRITE_NON_POSTED);
694			omap_set_dma_src_data_pack(c->dma_channel, 1);
695			omap_set_dma_src_burst_mode(c->dma_channel,
696						    OMAP_DMA_DATA_BURST_8);
697			omap_set_dma_dest_data_pack(c->dma_channel, 1);
698			omap_set_dma_dest_burst_mode(c->dma_channel,
699						     OMAP_DMA_DATA_BURST_8);
700		} else {
701			dev_info(&pdev->dev,
702				 "failed to allocate DMA for OneNAND, "
703				 "using PIO instead\n");
704			c->dma_channel = -1;
705		}
706	}
707
708	dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual "
709		 "base %p, freq %d MHz\n", c->gpmc_cs, c->phys_base,
710		 c->onenand.base, c->freq);
711
712	c->pdev = pdev;
713	c->mtd.name = dev_name(&pdev->dev);
714	c->mtd.priv = &c->onenand;
715	c->mtd.owner = THIS_MODULE;
716
717	c->mtd.dev.parent = &pdev->dev;
718
719	this = &c->onenand;
720	if (c->dma_channel >= 0) {
721		this->wait = omap2_onenand_wait;
722		if (c->flags & ONENAND_IN_OMAP34XX) {
723			this->read_bufferram = omap3_onenand_read_bufferram;
724			this->write_bufferram = omap3_onenand_write_bufferram;
725		} else {
726			this->read_bufferram = omap2_onenand_read_bufferram;
727			this->write_bufferram = omap2_onenand_write_bufferram;
728		}
729	}
730
731	if (pdata->regulator_can_sleep) {
732		c->regulator = regulator_get(&pdev->dev, "vonenand");
733		if (IS_ERR(c->regulator)) {
734			dev_err(&pdev->dev,  "Failed to get regulator\n");
735			r = PTR_ERR(c->regulator);
736			goto err_release_dma;
737		}
738		c->onenand.enable = omap2_onenand_enable;
739		c->onenand.disable = omap2_onenand_disable;
740	}
741
742	if (pdata->skip_initial_unlocking)
743		this->options |= ONENAND_SKIP_INITIAL_UNLOCKING;
744
745	if ((r = onenand_scan(&c->mtd, 1)) < 0)
746		goto err_release_regulator;
747
748	ppdata.of_node = pdata->of_node;
749	r = mtd_device_parse_register(&c->mtd, NULL, &ppdata,
750				      pdata ? pdata->parts : NULL,
751				      pdata ? pdata->nr_parts : 0);
752	if (r)
753		goto err_release_onenand;
754
755	platform_set_drvdata(pdev, c);
756
757	return 0;
758
759err_release_onenand:
760	onenand_release(&c->mtd);
761err_release_regulator:
762	regulator_put(c->regulator);
763err_release_dma:
764	if (c->dma_channel != -1)
765		omap_free_dma(c->dma_channel);
766	if (c->gpio_irq)
767		free_irq(gpio_to_irq(c->gpio_irq), c);
768err_release_gpio:
769	if (c->gpio_irq)
770		gpio_free(c->gpio_irq);
771err_iounmap:
772	iounmap(c->onenand.base);
773err_release_mem_region:
774	release_mem_region(c->phys_base, c->mem_size);
775err_kfree:
776	kfree(c);
777
778	return r;
779}
780
781static int omap2_onenand_remove(struct platform_device *pdev)
782{
783	struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
784
785	onenand_release(&c->mtd);
786	regulator_put(c->regulator);
787	if (c->dma_channel != -1)
788		omap_free_dma(c->dma_channel);
789	omap2_onenand_shutdown(pdev);
790	if (c->gpio_irq) {
791		free_irq(gpio_to_irq(c->gpio_irq), c);
792		gpio_free(c->gpio_irq);
793	}
794	iounmap(c->onenand.base);
795	release_mem_region(c->phys_base, c->mem_size);
796	kfree(c);
797
798	return 0;
799}
800
801static struct platform_driver omap2_onenand_driver = {
802	.probe		= omap2_onenand_probe,
803	.remove		= omap2_onenand_remove,
804	.shutdown	= omap2_onenand_shutdown,
805	.driver		= {
806		.name	= DRIVER_NAME,
807	},
808};
809
810module_platform_driver(omap2_onenand_driver);
811
812MODULE_ALIAS("platform:" DRIVER_NAME);
813MODULE_LICENSE("GPL");
814MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
815MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");
816