1/*
2 * Support functions for OMAP GPIO
3 *
4 * Copyright (C) 2003-2005 Nokia Corporation
5 * Written by Juha Yrj��l�� <juha.yrjola@nokia.com>
6 *
7 * Copyright (C) 2009 Texas Instruments
8 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/init.h>
16#include <linux/module.h>
17#include <linux/interrupt.h>
18#include <linux/syscore_ops.h>
19#include <linux/err.h>
20#include <linux/clk.h>
21#include <linux/io.h>
22#include <linux/device.h>
23#include <linux/pm_runtime.h>
24#include <linux/pm.h>
25#include <linux/of.h>
26#include <linux/of_device.h>
27#include <linux/gpio.h>
28#include <linux/bitops.h>
29#include <linux/platform_data/gpio-omap.h>
30
31#define OFF_MODE	1
32#define OMAP4_GPIO_DEBOUNCINGTIME_MASK 0xFF
33
34static LIST_HEAD(omap_gpio_list);
35
36struct gpio_regs {
37	u32 irqenable1;
38	u32 irqenable2;
39	u32 wake_en;
40	u32 ctrl;
41	u32 oe;
42	u32 leveldetect0;
43	u32 leveldetect1;
44	u32 risingdetect;
45	u32 fallingdetect;
46	u32 dataout;
47	u32 debounce;
48	u32 debounce_en;
49};
50
51struct gpio_bank {
52	struct list_head node;
53	void __iomem *base;
54	int irq;
55	u32 non_wakeup_gpios;
56	u32 enabled_non_wakeup_gpios;
57	struct gpio_regs context;
58	u32 saved_datain;
59	u32 level_mask;
60	u32 toggle_mask;
61	raw_spinlock_t lock;
62	raw_spinlock_t wa_lock;
63	struct gpio_chip chip;
64	struct clk *dbck;
65	u32 mod_usage;
66	u32 irq_usage;
67	u32 dbck_enable_mask;
68	bool dbck_enabled;
69	struct device *dev;
70	bool is_mpuio;
71	bool dbck_flag;
72	bool loses_context;
73	bool context_valid;
74	int stride;
75	u32 width;
76	int context_loss_count;
77	int power_mode;
78	bool workaround_enabled;
79
80	void (*set_dataout)(struct gpio_bank *bank, unsigned gpio, int enable);
81	int (*get_context_loss_count)(struct device *dev);
82
83	struct omap_gpio_reg_offs *regs;
84};
85
86#define GPIO_MOD_CTRL_BIT	BIT(0)
87
88#define BANK_USED(bank) (bank->mod_usage || bank->irq_usage)
89#define LINE_USED(line, offset) (line & (BIT(offset)))
90
91static void omap_gpio_unmask_irq(struct irq_data *d);
92
93static inline struct gpio_bank *omap_irq_data_get_bank(struct irq_data *d)
94{
95	struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
96	return container_of(chip, struct gpio_bank, chip);
97}
98
99static void omap_set_gpio_direction(struct gpio_bank *bank, int gpio,
100				    int is_input)
101{
102	void __iomem *reg = bank->base;
103	u32 l;
104
105	reg += bank->regs->direction;
106	l = readl_relaxed(reg);
107	if (is_input)
108		l |= BIT(gpio);
109	else
110		l &= ~(BIT(gpio));
111	writel_relaxed(l, reg);
112	bank->context.oe = l;
113}
114
115
116/* set data out value using dedicate set/clear register */
117static void omap_set_gpio_dataout_reg(struct gpio_bank *bank, unsigned offset,
118				      int enable)
119{
120	void __iomem *reg = bank->base;
121	u32 l = BIT(offset);
122
123	if (enable) {
124		reg += bank->regs->set_dataout;
125		bank->context.dataout |= l;
126	} else {
127		reg += bank->regs->clr_dataout;
128		bank->context.dataout &= ~l;
129	}
130
131	writel_relaxed(l, reg);
132}
133
134/* set data out value using mask register */
135static void omap_set_gpio_dataout_mask(struct gpio_bank *bank, unsigned offset,
136				       int enable)
137{
138	void __iomem *reg = bank->base + bank->regs->dataout;
139	u32 gpio_bit = BIT(offset);
140	u32 l;
141
142	l = readl_relaxed(reg);
143	if (enable)
144		l |= gpio_bit;
145	else
146		l &= ~gpio_bit;
147	writel_relaxed(l, reg);
148	bank->context.dataout = l;
149}
150
151static int omap_get_gpio_datain(struct gpio_bank *bank, int offset)
152{
153	void __iomem *reg = bank->base + bank->regs->datain;
154
155	return (readl_relaxed(reg) & (BIT(offset))) != 0;
156}
157
158static int omap_get_gpio_dataout(struct gpio_bank *bank, int offset)
159{
160	void __iomem *reg = bank->base + bank->regs->dataout;
161
162	return (readl_relaxed(reg) & (BIT(offset))) != 0;
163}
164
165static inline void omap_gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set)
166{
167	int l = readl_relaxed(base + reg);
168
169	if (set)
170		l |= mask;
171	else
172		l &= ~mask;
173
174	writel_relaxed(l, base + reg);
175}
176
177static inline void omap_gpio_dbck_enable(struct gpio_bank *bank)
178{
179	if (bank->dbck_enable_mask && !bank->dbck_enabled) {
180		clk_enable(bank->dbck);
181		bank->dbck_enabled = true;
182
183		writel_relaxed(bank->dbck_enable_mask,
184			     bank->base + bank->regs->debounce_en);
185	}
186}
187
188static inline void omap_gpio_dbck_disable(struct gpio_bank *bank)
189{
190	if (bank->dbck_enable_mask && bank->dbck_enabled) {
191		/*
192		 * Disable debounce before cutting it's clock. If debounce is
193		 * enabled but the clock is not, GPIO module seems to be unable
194		 * to detect events and generate interrupts at least on OMAP3.
195		 */
196		writel_relaxed(0, bank->base + bank->regs->debounce_en);
197
198		clk_disable(bank->dbck);
199		bank->dbck_enabled = false;
200	}
201}
202
203/**
204 * omap2_set_gpio_debounce - low level gpio debounce time
205 * @bank: the gpio bank we're acting upon
206 * @offset: the gpio number on this @bank
207 * @debounce: debounce time to use
208 *
209 * OMAP's debounce time is in 31us steps
210 *   <debounce time> = (GPIO_DEBOUNCINGTIME[7:0].DEBOUNCETIME + 1) x 31
211 * so we need to convert and round up to the closest unit.
212 */
213static void omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset,
214				    unsigned debounce)
215{
216	void __iomem		*reg;
217	u32			val;
218	u32			l;
219	bool			enable = !!debounce;
220
221	if (!bank->dbck_flag)
222		return;
223
224	if (enable) {
225		debounce = DIV_ROUND_UP(debounce, 31) - 1;
226		debounce &= OMAP4_GPIO_DEBOUNCINGTIME_MASK;
227	}
228
229	l = BIT(offset);
230
231	clk_enable(bank->dbck);
232	reg = bank->base + bank->regs->debounce;
233	writel_relaxed(debounce, reg);
234
235	reg = bank->base + bank->regs->debounce_en;
236	val = readl_relaxed(reg);
237
238	if (enable)
239		val |= l;
240	else
241		val &= ~l;
242	bank->dbck_enable_mask = val;
243
244	writel_relaxed(val, reg);
245	clk_disable(bank->dbck);
246	/*
247	 * Enable debounce clock per module.
248	 * This call is mandatory because in omap_gpio_request() when
249	 * *_runtime_get_sync() is called,  _gpio_dbck_enable() within
250	 * runtime callbck fails to turn on dbck because dbck_enable_mask
251	 * used within _gpio_dbck_enable() is still not initialized at
252	 * that point. Therefore we have to enable dbck here.
253	 */
254	omap_gpio_dbck_enable(bank);
255	if (bank->dbck_enable_mask) {
256		bank->context.debounce = debounce;
257		bank->context.debounce_en = val;
258	}
259}
260
261/**
262 * omap_clear_gpio_debounce - clear debounce settings for a gpio
263 * @bank: the gpio bank we're acting upon
264 * @offset: the gpio number on this @bank
265 *
266 * If a gpio is using debounce, then clear the debounce enable bit and if
267 * this is the only gpio in this bank using debounce, then clear the debounce
268 * time too. The debounce clock will also be disabled when calling this function
269 * if this is the only gpio in the bank using debounce.
270 */
271static void omap_clear_gpio_debounce(struct gpio_bank *bank, unsigned offset)
272{
273	u32 gpio_bit = BIT(offset);
274
275	if (!bank->dbck_flag)
276		return;
277
278	if (!(bank->dbck_enable_mask & gpio_bit))
279		return;
280
281	bank->dbck_enable_mask &= ~gpio_bit;
282	bank->context.debounce_en &= ~gpio_bit;
283        writel_relaxed(bank->context.debounce_en,
284		     bank->base + bank->regs->debounce_en);
285
286	if (!bank->dbck_enable_mask) {
287		bank->context.debounce = 0;
288		writel_relaxed(bank->context.debounce, bank->base +
289			     bank->regs->debounce);
290		clk_disable(bank->dbck);
291		bank->dbck_enabled = false;
292	}
293}
294
295static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
296						unsigned trigger)
297{
298	void __iomem *base = bank->base;
299	u32 gpio_bit = BIT(gpio);
300
301	omap_gpio_rmw(base, bank->regs->leveldetect0, gpio_bit,
302		      trigger & IRQ_TYPE_LEVEL_LOW);
303	omap_gpio_rmw(base, bank->regs->leveldetect1, gpio_bit,
304		      trigger & IRQ_TYPE_LEVEL_HIGH);
305	omap_gpio_rmw(base, bank->regs->risingdetect, gpio_bit,
306		      trigger & IRQ_TYPE_EDGE_RISING);
307	omap_gpio_rmw(base, bank->regs->fallingdetect, gpio_bit,
308		      trigger & IRQ_TYPE_EDGE_FALLING);
309
310	bank->context.leveldetect0 =
311			readl_relaxed(bank->base + bank->regs->leveldetect0);
312	bank->context.leveldetect1 =
313			readl_relaxed(bank->base + bank->regs->leveldetect1);
314	bank->context.risingdetect =
315			readl_relaxed(bank->base + bank->regs->risingdetect);
316	bank->context.fallingdetect =
317			readl_relaxed(bank->base + bank->regs->fallingdetect);
318
319	if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
320		omap_gpio_rmw(base, bank->regs->wkup_en, gpio_bit, trigger != 0);
321		bank->context.wake_en =
322			readl_relaxed(bank->base + bank->regs->wkup_en);
323	}
324
325	/* This part needs to be executed always for OMAP{34xx, 44xx} */
326	if (!bank->regs->irqctrl) {
327		/* On omap24xx proceed only when valid GPIO bit is set */
328		if (bank->non_wakeup_gpios) {
329			if (!(bank->non_wakeup_gpios & gpio_bit))
330				goto exit;
331		}
332
333		/*
334		 * Log the edge gpio and manually trigger the IRQ
335		 * after resume if the input level changes
336		 * to avoid irq lost during PER RET/OFF mode
337		 * Applies for omap2 non-wakeup gpio and all omap3 gpios
338		 */
339		if (trigger & IRQ_TYPE_EDGE_BOTH)
340			bank->enabled_non_wakeup_gpios |= gpio_bit;
341		else
342			bank->enabled_non_wakeup_gpios &= ~gpio_bit;
343	}
344
345exit:
346	bank->level_mask =
347		readl_relaxed(bank->base + bank->regs->leveldetect0) |
348		readl_relaxed(bank->base + bank->regs->leveldetect1);
349}
350
351#ifdef CONFIG_ARCH_OMAP1
352/*
353 * This only applies to chips that can't do both rising and falling edge
354 * detection at once.  For all other chips, this function is a noop.
355 */
356static void omap_toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio)
357{
358	void __iomem *reg = bank->base;
359	u32 l = 0;
360
361	if (!bank->regs->irqctrl)
362		return;
363
364	reg += bank->regs->irqctrl;
365
366	l = readl_relaxed(reg);
367	if ((l >> gpio) & 1)
368		l &= ~(BIT(gpio));
369	else
370		l |= BIT(gpio);
371
372	writel_relaxed(l, reg);
373}
374#else
375static void omap_toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) {}
376#endif
377
378static int omap_set_gpio_triggering(struct gpio_bank *bank, int gpio,
379				    unsigned trigger)
380{
381	void __iomem *reg = bank->base;
382	void __iomem *base = bank->base;
383	u32 l = 0;
384
385	if (bank->regs->leveldetect0 && bank->regs->wkup_en) {
386		omap_set_gpio_trigger(bank, gpio, trigger);
387	} else if (bank->regs->irqctrl) {
388		reg += bank->regs->irqctrl;
389
390		l = readl_relaxed(reg);
391		if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
392			bank->toggle_mask |= BIT(gpio);
393		if (trigger & IRQ_TYPE_EDGE_RISING)
394			l |= BIT(gpio);
395		else if (trigger & IRQ_TYPE_EDGE_FALLING)
396			l &= ~(BIT(gpio));
397		else
398			return -EINVAL;
399
400		writel_relaxed(l, reg);
401	} else if (bank->regs->edgectrl1) {
402		if (gpio & 0x08)
403			reg += bank->regs->edgectrl2;
404		else
405			reg += bank->regs->edgectrl1;
406
407		gpio &= 0x07;
408		l = readl_relaxed(reg);
409		l &= ~(3 << (gpio << 1));
410		if (trigger & IRQ_TYPE_EDGE_RISING)
411			l |= 2 << (gpio << 1);
412		if (trigger & IRQ_TYPE_EDGE_FALLING)
413			l |= BIT(gpio << 1);
414
415		/* Enable wake-up during idle for dynamic tick */
416		omap_gpio_rmw(base, bank->regs->wkup_en, BIT(gpio), trigger);
417		bank->context.wake_en =
418			readl_relaxed(bank->base + bank->regs->wkup_en);
419		writel_relaxed(l, reg);
420	}
421	return 0;
422}
423
424static void omap_enable_gpio_module(struct gpio_bank *bank, unsigned offset)
425{
426	if (bank->regs->pinctrl) {
427		void __iomem *reg = bank->base + bank->regs->pinctrl;
428
429		/* Claim the pin for MPU */
430		writel_relaxed(readl_relaxed(reg) | (BIT(offset)), reg);
431	}
432
433	if (bank->regs->ctrl && !BANK_USED(bank)) {
434		void __iomem *reg = bank->base + bank->regs->ctrl;
435		u32 ctrl;
436
437		ctrl = readl_relaxed(reg);
438		/* Module is enabled, clocks are not gated */
439		ctrl &= ~GPIO_MOD_CTRL_BIT;
440		writel_relaxed(ctrl, reg);
441		bank->context.ctrl = ctrl;
442	}
443}
444
445static void omap_disable_gpio_module(struct gpio_bank *bank, unsigned offset)
446{
447	void __iomem *base = bank->base;
448
449	if (bank->regs->wkup_en &&
450	    !LINE_USED(bank->mod_usage, offset) &&
451	    !LINE_USED(bank->irq_usage, offset)) {
452		/* Disable wake-up during idle for dynamic tick */
453		omap_gpio_rmw(base, bank->regs->wkup_en, BIT(offset), 0);
454		bank->context.wake_en =
455			readl_relaxed(bank->base + bank->regs->wkup_en);
456	}
457
458	if (bank->regs->ctrl && !BANK_USED(bank)) {
459		void __iomem *reg = bank->base + bank->regs->ctrl;
460		u32 ctrl;
461
462		ctrl = readl_relaxed(reg);
463		/* Module is disabled, clocks are gated */
464		ctrl |= GPIO_MOD_CTRL_BIT;
465		writel_relaxed(ctrl, reg);
466		bank->context.ctrl = ctrl;
467	}
468}
469
470static int omap_gpio_is_input(struct gpio_bank *bank, unsigned offset)
471{
472	void __iomem *reg = bank->base + bank->regs->direction;
473
474	return readl_relaxed(reg) & BIT(offset);
475}
476
477static void omap_gpio_init_irq(struct gpio_bank *bank, unsigned offset)
478{
479	if (!LINE_USED(bank->mod_usage, offset)) {
480		omap_enable_gpio_module(bank, offset);
481		omap_set_gpio_direction(bank, offset, 1);
482	}
483	bank->irq_usage |= BIT(offset);
484}
485
486static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
487{
488	struct gpio_bank *bank = omap_irq_data_get_bank(d);
489	int retval;
490	unsigned long flags;
491	unsigned offset = d->hwirq;
492
493	if (type & ~IRQ_TYPE_SENSE_MASK)
494		return -EINVAL;
495
496	if (!bank->regs->leveldetect0 &&
497		(type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)))
498		return -EINVAL;
499
500	raw_spin_lock_irqsave(&bank->lock, flags);
501	retval = omap_set_gpio_triggering(bank, offset, type);
502	if (retval) {
503		raw_spin_unlock_irqrestore(&bank->lock, flags);
504		goto error;
505	}
506	omap_gpio_init_irq(bank, offset);
507	if (!omap_gpio_is_input(bank, offset)) {
508		raw_spin_unlock_irqrestore(&bank->lock, flags);
509		retval = -EINVAL;
510		goto error;
511	}
512	raw_spin_unlock_irqrestore(&bank->lock, flags);
513
514	if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
515		irq_set_handler_locked(d, handle_level_irq);
516	else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
517		irq_set_handler_locked(d, handle_edge_irq);
518
519	return 0;
520
521error:
522	return retval;
523}
524
525static void omap_clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
526{
527	void __iomem *reg = bank->base;
528
529	reg += bank->regs->irqstatus;
530	writel_relaxed(gpio_mask, reg);
531
532	/* Workaround for clearing DSP GPIO interrupts to allow retention */
533	if (bank->regs->irqstatus2) {
534		reg = bank->base + bank->regs->irqstatus2;
535		writel_relaxed(gpio_mask, reg);
536	}
537
538	/* Flush posted write for the irq status to avoid spurious interrupts */
539	readl_relaxed(reg);
540}
541
542static inline void omap_clear_gpio_irqstatus(struct gpio_bank *bank,
543					     unsigned offset)
544{
545	omap_clear_gpio_irqbank(bank, BIT(offset));
546}
547
548static u32 omap_get_gpio_irqbank_mask(struct gpio_bank *bank)
549{
550	void __iomem *reg = bank->base;
551	u32 l;
552	u32 mask = (BIT(bank->width)) - 1;
553
554	reg += bank->regs->irqenable;
555	l = readl_relaxed(reg);
556	if (bank->regs->irqenable_inv)
557		l = ~l;
558	l &= mask;
559	return l;
560}
561
562static void omap_enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
563{
564	void __iomem *reg = bank->base;
565	u32 l;
566
567	if (bank->regs->set_irqenable) {
568		reg += bank->regs->set_irqenable;
569		l = gpio_mask;
570		bank->context.irqenable1 |= gpio_mask;
571	} else {
572		reg += bank->regs->irqenable;
573		l = readl_relaxed(reg);
574		if (bank->regs->irqenable_inv)
575			l &= ~gpio_mask;
576		else
577			l |= gpio_mask;
578		bank->context.irqenable1 = l;
579	}
580
581	writel_relaxed(l, reg);
582}
583
584static void omap_disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
585{
586	void __iomem *reg = bank->base;
587	u32 l;
588
589	if (bank->regs->clr_irqenable) {
590		reg += bank->regs->clr_irqenable;
591		l = gpio_mask;
592		bank->context.irqenable1 &= ~gpio_mask;
593	} else {
594		reg += bank->regs->irqenable;
595		l = readl_relaxed(reg);
596		if (bank->regs->irqenable_inv)
597			l |= gpio_mask;
598		else
599			l &= ~gpio_mask;
600		bank->context.irqenable1 = l;
601	}
602
603	writel_relaxed(l, reg);
604}
605
606static inline void omap_set_gpio_irqenable(struct gpio_bank *bank,
607					   unsigned offset, int enable)
608{
609	if (enable)
610		omap_enable_gpio_irqbank(bank, BIT(offset));
611	else
612		omap_disable_gpio_irqbank(bank, BIT(offset));
613}
614
615/*
616 * Note that ENAWAKEUP needs to be enabled in GPIO_SYSCONFIG register.
617 * 1510 does not seem to have a wake-up register. If JTAG is connected
618 * to the target, system will wake up always on GPIO events. While
619 * system is running all registered GPIO interrupts need to have wake-up
620 * enabled. When system is suspended, only selected GPIO interrupts need
621 * to have wake-up enabled.
622 */
623static int omap_set_gpio_wakeup(struct gpio_bank *bank, unsigned offset,
624				int enable)
625{
626	u32 gpio_bit = BIT(offset);
627	unsigned long flags;
628
629	if (bank->non_wakeup_gpios & gpio_bit) {
630		dev_err(bank->dev,
631			"Unable to modify wakeup on non-wakeup GPIO%d\n",
632			offset);
633		return -EINVAL;
634	}
635
636	raw_spin_lock_irqsave(&bank->lock, flags);
637	if (enable)
638		bank->context.wake_en |= gpio_bit;
639	else
640		bank->context.wake_en &= ~gpio_bit;
641
642	writel_relaxed(bank->context.wake_en, bank->base + bank->regs->wkup_en);
643	raw_spin_unlock_irqrestore(&bank->lock, flags);
644
645	return 0;
646}
647
648/* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
649static int omap_gpio_wake_enable(struct irq_data *d, unsigned int enable)
650{
651	struct gpio_bank *bank = omap_irq_data_get_bank(d);
652	unsigned offset = d->hwirq;
653	int ret;
654
655	ret = omap_set_gpio_wakeup(bank, offset, enable);
656	if (!ret)
657		ret = irq_set_irq_wake(bank->irq, enable);
658
659	return ret;
660}
661
662static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
663{
664	struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
665	unsigned long flags;
666
667	/*
668	 * If this is the first gpio_request for the bank,
669	 * enable the bank module.
670	 */
671	if (!BANK_USED(bank))
672		pm_runtime_get_sync(bank->dev);
673
674	raw_spin_lock_irqsave(&bank->lock, flags);
675	omap_enable_gpio_module(bank, offset);
676	bank->mod_usage |= BIT(offset);
677	raw_spin_unlock_irqrestore(&bank->lock, flags);
678
679	return 0;
680}
681
682static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
683{
684	struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
685	unsigned long flags;
686
687	raw_spin_lock_irqsave(&bank->lock, flags);
688	bank->mod_usage &= ~(BIT(offset));
689	if (!LINE_USED(bank->irq_usage, offset)) {
690		omap_set_gpio_direction(bank, offset, 1);
691		omap_clear_gpio_debounce(bank, offset);
692	}
693	omap_disable_gpio_module(bank, offset);
694	raw_spin_unlock_irqrestore(&bank->lock, flags);
695
696	/*
697	 * If this is the last gpio to be freed in the bank,
698	 * disable the bank module.
699	 */
700	if (!BANK_USED(bank))
701		pm_runtime_put(bank->dev);
702}
703
704/*
705 * We need to unmask the GPIO bank interrupt as soon as possible to
706 * avoid missing GPIO interrupts for other lines in the bank.
707 * Then we need to mask-read-clear-unmask the triggered GPIO lines
708 * in the bank to avoid missing nested interrupts for a GPIO line.
709 * If we wait to unmask individual GPIO lines in the bank after the
710 * line's interrupt handler has been run, we may miss some nested
711 * interrupts.
712 */
713static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
714{
715	void __iomem *isr_reg = NULL;
716	u32 isr;
717	unsigned int bit;
718	struct gpio_bank *bank = gpiobank;
719	unsigned long wa_lock_flags;
720	unsigned long lock_flags;
721
722	isr_reg = bank->base + bank->regs->irqstatus;
723	if (WARN_ON(!isr_reg))
724		goto exit;
725
726	pm_runtime_get_sync(bank->dev);
727
728	while (1) {
729		u32 isr_saved, level_mask = 0;
730		u32 enabled;
731
732		raw_spin_lock_irqsave(&bank->lock, lock_flags);
733
734		enabled = omap_get_gpio_irqbank_mask(bank);
735		isr_saved = isr = readl_relaxed(isr_reg) & enabled;
736
737		if (bank->level_mask)
738			level_mask = bank->level_mask & enabled;
739
740		/* clear edge sensitive interrupts before handler(s) are
741		called so that we don't miss any interrupt occurred while
742		executing them */
743		omap_disable_gpio_irqbank(bank, isr_saved & ~level_mask);
744		omap_clear_gpio_irqbank(bank, isr_saved & ~level_mask);
745		omap_enable_gpio_irqbank(bank, isr_saved & ~level_mask);
746
747		raw_spin_unlock_irqrestore(&bank->lock, lock_flags);
748
749		if (!isr)
750			break;
751
752		while (isr) {
753			bit = __ffs(isr);
754			isr &= ~(BIT(bit));
755
756			raw_spin_lock_irqsave(&bank->lock, lock_flags);
757			/*
758			 * Some chips can't respond to both rising and falling
759			 * at the same time.  If this irq was requested with
760			 * both flags, we need to flip the ICR data for the IRQ
761			 * to respond to the IRQ for the opposite direction.
762			 * This will be indicated in the bank toggle_mask.
763			 */
764			if (bank->toggle_mask & (BIT(bit)))
765				omap_toggle_gpio_edge_triggering(bank, bit);
766
767			raw_spin_unlock_irqrestore(&bank->lock, lock_flags);
768
769			raw_spin_lock_irqsave(&bank->wa_lock, wa_lock_flags);
770
771			generic_handle_irq(irq_find_mapping(bank->chip.irqdomain,
772							    bit));
773
774			raw_spin_unlock_irqrestore(&bank->wa_lock,
775						   wa_lock_flags);
776		}
777	}
778exit:
779	pm_runtime_put(bank->dev);
780	return IRQ_HANDLED;
781}
782
783static unsigned int omap_gpio_irq_startup(struct irq_data *d)
784{
785	struct gpio_bank *bank = omap_irq_data_get_bank(d);
786	unsigned long flags;
787	unsigned offset = d->hwirq;
788
789	raw_spin_lock_irqsave(&bank->lock, flags);
790
791	if (!LINE_USED(bank->mod_usage, offset))
792		omap_set_gpio_direction(bank, offset, 1);
793	else if (!omap_gpio_is_input(bank, offset))
794		goto err;
795	omap_enable_gpio_module(bank, offset);
796	bank->irq_usage |= BIT(offset);
797
798	raw_spin_unlock_irqrestore(&bank->lock, flags);
799	omap_gpio_unmask_irq(d);
800
801	return 0;
802err:
803	raw_spin_unlock_irqrestore(&bank->lock, flags);
804	return -EINVAL;
805}
806
807static void omap_gpio_irq_shutdown(struct irq_data *d)
808{
809	struct gpio_bank *bank = omap_irq_data_get_bank(d);
810	unsigned long flags;
811	unsigned offset = d->hwirq;
812
813	raw_spin_lock_irqsave(&bank->lock, flags);
814	bank->irq_usage &= ~(BIT(offset));
815	omap_set_gpio_irqenable(bank, offset, 0);
816	omap_clear_gpio_irqstatus(bank, offset);
817	omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
818	if (!LINE_USED(bank->mod_usage, offset))
819		omap_clear_gpio_debounce(bank, offset);
820	omap_disable_gpio_module(bank, offset);
821	raw_spin_unlock_irqrestore(&bank->lock, flags);
822}
823
824static void omap_gpio_irq_bus_lock(struct irq_data *data)
825{
826	struct gpio_bank *bank = omap_irq_data_get_bank(data);
827
828	if (!BANK_USED(bank))
829		pm_runtime_get_sync(bank->dev);
830}
831
832static void gpio_irq_bus_sync_unlock(struct irq_data *data)
833{
834	struct gpio_bank *bank = omap_irq_data_get_bank(data);
835
836	/*
837	 * If this is the last IRQ to be freed in the bank,
838	 * disable the bank module.
839	 */
840	if (!BANK_USED(bank))
841		pm_runtime_put(bank->dev);
842}
843
844static void omap_gpio_ack_irq(struct irq_data *d)
845{
846	struct gpio_bank *bank = omap_irq_data_get_bank(d);
847	unsigned offset = d->hwirq;
848
849	omap_clear_gpio_irqstatus(bank, offset);
850}
851
852static void omap_gpio_mask_irq(struct irq_data *d)
853{
854	struct gpio_bank *bank = omap_irq_data_get_bank(d);
855	unsigned offset = d->hwirq;
856	unsigned long flags;
857
858	raw_spin_lock_irqsave(&bank->lock, flags);
859	omap_set_gpio_irqenable(bank, offset, 0);
860	omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
861	raw_spin_unlock_irqrestore(&bank->lock, flags);
862}
863
864static void omap_gpio_unmask_irq(struct irq_data *d)
865{
866	struct gpio_bank *bank = omap_irq_data_get_bank(d);
867	unsigned offset = d->hwirq;
868	u32 trigger = irqd_get_trigger_type(d);
869	unsigned long flags;
870
871	raw_spin_lock_irqsave(&bank->lock, flags);
872	if (trigger)
873		omap_set_gpio_triggering(bank, offset, trigger);
874
875	/* For level-triggered GPIOs, the clearing must be done after
876	 * the HW source is cleared, thus after the handler has run */
877	if (bank->level_mask & BIT(offset)) {
878		omap_set_gpio_irqenable(bank, offset, 0);
879		omap_clear_gpio_irqstatus(bank, offset);
880	}
881
882	omap_set_gpio_irqenable(bank, offset, 1);
883	raw_spin_unlock_irqrestore(&bank->lock, flags);
884}
885
886/*---------------------------------------------------------------------*/
887
888static int omap_mpuio_suspend_noirq(struct device *dev)
889{
890	struct platform_device *pdev = to_platform_device(dev);
891	struct gpio_bank	*bank = platform_get_drvdata(pdev);
892	void __iomem		*mask_reg = bank->base +
893					OMAP_MPUIO_GPIO_MASKIT / bank->stride;
894	unsigned long		flags;
895
896	raw_spin_lock_irqsave(&bank->lock, flags);
897	writel_relaxed(0xffff & ~bank->context.wake_en, mask_reg);
898	raw_spin_unlock_irqrestore(&bank->lock, flags);
899
900	return 0;
901}
902
903static int omap_mpuio_resume_noirq(struct device *dev)
904{
905	struct platform_device *pdev = to_platform_device(dev);
906	struct gpio_bank	*bank = platform_get_drvdata(pdev);
907	void __iomem		*mask_reg = bank->base +
908					OMAP_MPUIO_GPIO_MASKIT / bank->stride;
909	unsigned long		flags;
910
911	raw_spin_lock_irqsave(&bank->lock, flags);
912	writel_relaxed(bank->context.wake_en, mask_reg);
913	raw_spin_unlock_irqrestore(&bank->lock, flags);
914
915	return 0;
916}
917
918static const struct dev_pm_ops omap_mpuio_dev_pm_ops = {
919	.suspend_noirq = omap_mpuio_suspend_noirq,
920	.resume_noirq = omap_mpuio_resume_noirq,
921};
922
923/* use platform_driver for this. */
924static struct platform_driver omap_mpuio_driver = {
925	.driver		= {
926		.name	= "mpuio",
927		.pm	= &omap_mpuio_dev_pm_ops,
928	},
929};
930
931static struct platform_device omap_mpuio_device = {
932	.name		= "mpuio",
933	.id		= -1,
934	.dev = {
935		.driver = &omap_mpuio_driver.driver,
936	}
937	/* could list the /proc/iomem resources */
938};
939
940static inline void omap_mpuio_init(struct gpio_bank *bank)
941{
942	platform_set_drvdata(&omap_mpuio_device, bank);
943
944	if (platform_driver_register(&omap_mpuio_driver) == 0)
945		(void) platform_device_register(&omap_mpuio_device);
946}
947
948/*---------------------------------------------------------------------*/
949
950static int omap_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
951{
952	struct gpio_bank *bank;
953	unsigned long flags;
954	void __iomem *reg;
955	int dir;
956
957	bank = container_of(chip, struct gpio_bank, chip);
958	reg = bank->base + bank->regs->direction;
959	raw_spin_lock_irqsave(&bank->lock, flags);
960	dir = !!(readl_relaxed(reg) & BIT(offset));
961	raw_spin_unlock_irqrestore(&bank->lock, flags);
962	return dir;
963}
964
965static int omap_gpio_input(struct gpio_chip *chip, unsigned offset)
966{
967	struct gpio_bank *bank;
968	unsigned long flags;
969
970	bank = container_of(chip, struct gpio_bank, chip);
971	raw_spin_lock_irqsave(&bank->lock, flags);
972	omap_set_gpio_direction(bank, offset, 1);
973	raw_spin_unlock_irqrestore(&bank->lock, flags);
974	return 0;
975}
976
977static int omap_gpio_get(struct gpio_chip *chip, unsigned offset)
978{
979	struct gpio_bank *bank;
980
981	bank = container_of(chip, struct gpio_bank, chip);
982
983	if (omap_gpio_is_input(bank, offset))
984		return omap_get_gpio_datain(bank, offset);
985	else
986		return omap_get_gpio_dataout(bank, offset);
987}
988
989static int omap_gpio_output(struct gpio_chip *chip, unsigned offset, int value)
990{
991	struct gpio_bank *bank;
992	unsigned long flags;
993
994	bank = container_of(chip, struct gpio_bank, chip);
995	raw_spin_lock_irqsave(&bank->lock, flags);
996	bank->set_dataout(bank, offset, value);
997	omap_set_gpio_direction(bank, offset, 0);
998	raw_spin_unlock_irqrestore(&bank->lock, flags);
999	return 0;
1000}
1001
1002static int omap_gpio_debounce(struct gpio_chip *chip, unsigned offset,
1003			      unsigned debounce)
1004{
1005	struct gpio_bank *bank;
1006	unsigned long flags;
1007
1008	bank = container_of(chip, struct gpio_bank, chip);
1009
1010	raw_spin_lock_irqsave(&bank->lock, flags);
1011	omap2_set_gpio_debounce(bank, offset, debounce);
1012	raw_spin_unlock_irqrestore(&bank->lock, flags);
1013
1014	return 0;
1015}
1016
1017static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
1018{
1019	struct gpio_bank *bank;
1020	unsigned long flags;
1021
1022	bank = container_of(chip, struct gpio_bank, chip);
1023	raw_spin_lock_irqsave(&bank->lock, flags);
1024	bank->set_dataout(bank, offset, value);
1025	raw_spin_unlock_irqrestore(&bank->lock, flags);
1026}
1027
1028/*---------------------------------------------------------------------*/
1029
1030static void __init omap_gpio_show_rev(struct gpio_bank *bank)
1031{
1032	static bool called;
1033	u32 rev;
1034
1035	if (called || bank->regs->revision == USHRT_MAX)
1036		return;
1037
1038	rev = readw_relaxed(bank->base + bank->regs->revision);
1039	pr_info("OMAP GPIO hardware version %d.%d\n",
1040		(rev >> 4) & 0x0f, rev & 0x0f);
1041
1042	called = true;
1043}
1044
1045static void omap_gpio_mod_init(struct gpio_bank *bank)
1046{
1047	void __iomem *base = bank->base;
1048	u32 l = 0xffffffff;
1049
1050	if (bank->width == 16)
1051		l = 0xffff;
1052
1053	if (bank->is_mpuio) {
1054		writel_relaxed(l, bank->base + bank->regs->irqenable);
1055		return;
1056	}
1057
1058	omap_gpio_rmw(base, bank->regs->irqenable, l,
1059		      bank->regs->irqenable_inv);
1060	omap_gpio_rmw(base, bank->regs->irqstatus, l,
1061		      !bank->regs->irqenable_inv);
1062	if (bank->regs->debounce_en)
1063		writel_relaxed(0, base + bank->regs->debounce_en);
1064
1065	/* Save OE default value (0xffffffff) in the context */
1066	bank->context.oe = readl_relaxed(bank->base + bank->regs->direction);
1067	 /* Initialize interface clk ungated, module enabled */
1068	if (bank->regs->ctrl)
1069		writel_relaxed(0, base + bank->regs->ctrl);
1070}
1071
1072static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
1073{
1074	static int gpio;
1075	int irq_base = 0;
1076	int ret;
1077
1078	/*
1079	 * REVISIT eventually switch from OMAP-specific gpio structs
1080	 * over to the generic ones
1081	 */
1082	bank->chip.request = omap_gpio_request;
1083	bank->chip.free = omap_gpio_free;
1084	bank->chip.get_direction = omap_gpio_get_direction;
1085	bank->chip.direction_input = omap_gpio_input;
1086	bank->chip.get = omap_gpio_get;
1087	bank->chip.direction_output = omap_gpio_output;
1088	bank->chip.set_debounce = omap_gpio_debounce;
1089	bank->chip.set = omap_gpio_set;
1090	if (bank->is_mpuio) {
1091		bank->chip.label = "mpuio";
1092		if (bank->regs->wkup_en)
1093			bank->chip.dev = &omap_mpuio_device.dev;
1094		bank->chip.base = OMAP_MPUIO(0);
1095	} else {
1096		bank->chip.label = "gpio";
1097		bank->chip.base = gpio;
1098	}
1099	bank->chip.ngpio = bank->width;
1100
1101	ret = gpiochip_add(&bank->chip);
1102	if (ret) {
1103		dev_err(bank->dev, "Could not register gpio chip %d\n", ret);
1104		return ret;
1105	}
1106
1107	if (!bank->is_mpuio)
1108		gpio += bank->width;
1109
1110#ifdef CONFIG_ARCH_OMAP1
1111	/*
1112	 * REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop
1113	 * irq_alloc_descs() since a base IRQ offset will no longer be needed.
1114	 */
1115	irq_base = irq_alloc_descs(-1, 0, bank->width, 0);
1116	if (irq_base < 0) {
1117		dev_err(bank->dev, "Couldn't allocate IRQ numbers\n");
1118		return -ENODEV;
1119	}
1120#endif
1121
1122	/* MPUIO is a bit different, reading IRQ status clears it */
1123	if (bank->is_mpuio) {
1124		irqc->irq_ack = dummy_irq_chip.irq_ack;
1125		if (!bank->regs->wkup_en)
1126			irqc->irq_set_wake = NULL;
1127	}
1128
1129	ret = gpiochip_irqchip_add(&bank->chip, irqc,
1130				   irq_base, handle_bad_irq,
1131				   IRQ_TYPE_NONE);
1132
1133	if (ret) {
1134		dev_err(bank->dev, "Couldn't add irqchip to gpiochip %d\n", ret);
1135		gpiochip_remove(&bank->chip);
1136		return -ENODEV;
1137	}
1138
1139	gpiochip_set_chained_irqchip(&bank->chip, irqc, bank->irq, NULL);
1140
1141	ret = devm_request_irq(bank->dev, bank->irq, omap_gpio_irq_handler,
1142			       0, dev_name(bank->dev), bank);
1143	if (ret)
1144		gpiochip_remove(&bank->chip);
1145
1146	return ret;
1147}
1148
1149static const struct of_device_id omap_gpio_match[];
1150
1151static int omap_gpio_probe(struct platform_device *pdev)
1152{
1153	struct device *dev = &pdev->dev;
1154	struct device_node *node = dev->of_node;
1155	const struct of_device_id *match;
1156	const struct omap_gpio_platform_data *pdata;
1157	struct resource *res;
1158	struct gpio_bank *bank;
1159	struct irq_chip *irqc;
1160	int ret;
1161
1162	match = of_match_device(of_match_ptr(omap_gpio_match), dev);
1163
1164	pdata = match ? match->data : dev_get_platdata(dev);
1165	if (!pdata)
1166		return -EINVAL;
1167
1168	bank = devm_kzalloc(dev, sizeof(struct gpio_bank), GFP_KERNEL);
1169	if (!bank) {
1170		dev_err(dev, "Memory alloc failed\n");
1171		return -ENOMEM;
1172	}
1173
1174	irqc = devm_kzalloc(dev, sizeof(*irqc), GFP_KERNEL);
1175	if (!irqc)
1176		return -ENOMEM;
1177
1178	irqc->irq_startup = omap_gpio_irq_startup,
1179	irqc->irq_shutdown = omap_gpio_irq_shutdown,
1180	irqc->irq_ack = omap_gpio_ack_irq,
1181	irqc->irq_mask = omap_gpio_mask_irq,
1182	irqc->irq_unmask = omap_gpio_unmask_irq,
1183	irqc->irq_set_type = omap_gpio_irq_type,
1184	irqc->irq_set_wake = omap_gpio_wake_enable,
1185	irqc->irq_bus_lock = omap_gpio_irq_bus_lock,
1186	irqc->irq_bus_sync_unlock = gpio_irq_bus_sync_unlock,
1187	irqc->name = dev_name(&pdev->dev);
1188
1189	bank->irq = platform_get_irq(pdev, 0);
1190	if (bank->irq <= 0) {
1191		if (!bank->irq)
1192			bank->irq = -ENXIO;
1193		if (bank->irq != -EPROBE_DEFER)
1194			dev_err(dev,
1195				"can't get irq resource ret=%d\n", bank->irq);
1196		return bank->irq;
1197	}
1198
1199	bank->dev = dev;
1200	bank->chip.dev = dev;
1201	bank->chip.owner = THIS_MODULE;
1202	bank->dbck_flag = pdata->dbck_flag;
1203	bank->stride = pdata->bank_stride;
1204	bank->width = pdata->bank_width;
1205	bank->is_mpuio = pdata->is_mpuio;
1206	bank->non_wakeup_gpios = pdata->non_wakeup_gpios;
1207	bank->regs = pdata->regs;
1208#ifdef CONFIG_OF_GPIO
1209	bank->chip.of_node = of_node_get(node);
1210#endif
1211	if (node) {
1212		if (!of_property_read_bool(node, "ti,gpio-always-on"))
1213			bank->loses_context = true;
1214	} else {
1215		bank->loses_context = pdata->loses_context;
1216
1217		if (bank->loses_context)
1218			bank->get_context_loss_count =
1219				pdata->get_context_loss_count;
1220	}
1221
1222	if (bank->regs->set_dataout && bank->regs->clr_dataout)
1223		bank->set_dataout = omap_set_gpio_dataout_reg;
1224	else
1225		bank->set_dataout = omap_set_gpio_dataout_mask;
1226
1227	raw_spin_lock_init(&bank->lock);
1228	raw_spin_lock_init(&bank->wa_lock);
1229
1230	/* Static mapping, never released */
1231	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1232	bank->base = devm_ioremap_resource(dev, res);
1233	if (IS_ERR(bank->base)) {
1234		return PTR_ERR(bank->base);
1235	}
1236
1237	if (bank->dbck_flag) {
1238		bank->dbck = devm_clk_get(bank->dev, "dbclk");
1239		if (IS_ERR(bank->dbck)) {
1240			dev_err(bank->dev,
1241				"Could not get gpio dbck. Disable debounce\n");
1242			bank->dbck_flag = false;
1243		} else {
1244			clk_prepare(bank->dbck);
1245		}
1246	}
1247
1248	platform_set_drvdata(pdev, bank);
1249
1250	pm_runtime_enable(bank->dev);
1251	pm_runtime_irq_safe(bank->dev);
1252	pm_runtime_get_sync(bank->dev);
1253
1254	if (bank->is_mpuio)
1255		omap_mpuio_init(bank);
1256
1257	omap_gpio_mod_init(bank);
1258
1259	ret = omap_gpio_chip_init(bank, irqc);
1260	if (ret) {
1261		pm_runtime_put_sync(bank->dev);
1262		pm_runtime_disable(bank->dev);
1263		return ret;
1264	}
1265
1266	omap_gpio_show_rev(bank);
1267
1268	pm_runtime_put(bank->dev);
1269
1270	list_add_tail(&bank->node, &omap_gpio_list);
1271
1272	return 0;
1273}
1274
1275static int omap_gpio_remove(struct platform_device *pdev)
1276{
1277	struct gpio_bank *bank = platform_get_drvdata(pdev);
1278
1279	list_del(&bank->node);
1280	gpiochip_remove(&bank->chip);
1281	pm_runtime_disable(bank->dev);
1282	if (bank->dbck_flag)
1283		clk_unprepare(bank->dbck);
1284
1285	return 0;
1286}
1287
1288#ifdef CONFIG_ARCH_OMAP2PLUS
1289
1290#if defined(CONFIG_PM)
1291static void omap_gpio_restore_context(struct gpio_bank *bank);
1292
1293static int omap_gpio_runtime_suspend(struct device *dev)
1294{
1295	struct platform_device *pdev = to_platform_device(dev);
1296	struct gpio_bank *bank = platform_get_drvdata(pdev);
1297	u32 l1 = 0, l2 = 0;
1298	unsigned long flags;
1299	u32 wake_low, wake_hi;
1300
1301	raw_spin_lock_irqsave(&bank->lock, flags);
1302
1303	/*
1304	 * Only edges can generate a wakeup event to the PRCM.
1305	 *
1306	 * Therefore, ensure any wake-up capable GPIOs have
1307	 * edge-detection enabled before going idle to ensure a wakeup
1308	 * to the PRCM is generated on a GPIO transition. (c.f. 34xx
1309	 * NDA TRM 25.5.3.1)
1310	 *
1311	 * The normal values will be restored upon ->runtime_resume()
1312	 * by writing back the values saved in bank->context.
1313	 */
1314	wake_low = bank->context.leveldetect0 & bank->context.wake_en;
1315	if (wake_low)
1316		writel_relaxed(wake_low | bank->context.fallingdetect,
1317			     bank->base + bank->regs->fallingdetect);
1318	wake_hi = bank->context.leveldetect1 & bank->context.wake_en;
1319	if (wake_hi)
1320		writel_relaxed(wake_hi | bank->context.risingdetect,
1321			     bank->base + bank->regs->risingdetect);
1322
1323	if (!bank->enabled_non_wakeup_gpios)
1324		goto update_gpio_context_count;
1325
1326	if (bank->power_mode != OFF_MODE) {
1327		bank->power_mode = 0;
1328		goto update_gpio_context_count;
1329	}
1330	/*
1331	 * If going to OFF, remove triggering for all
1332	 * non-wakeup GPIOs.  Otherwise spurious IRQs will be
1333	 * generated.  See OMAP2420 Errata item 1.101.
1334	 */
1335	bank->saved_datain = readl_relaxed(bank->base +
1336						bank->regs->datain);
1337	l1 = bank->context.fallingdetect;
1338	l2 = bank->context.risingdetect;
1339
1340	l1 &= ~bank->enabled_non_wakeup_gpios;
1341	l2 &= ~bank->enabled_non_wakeup_gpios;
1342
1343	writel_relaxed(l1, bank->base + bank->regs->fallingdetect);
1344	writel_relaxed(l2, bank->base + bank->regs->risingdetect);
1345
1346	bank->workaround_enabled = true;
1347
1348update_gpio_context_count:
1349	if (bank->get_context_loss_count)
1350		bank->context_loss_count =
1351				bank->get_context_loss_count(bank->dev);
1352
1353	omap_gpio_dbck_disable(bank);
1354	raw_spin_unlock_irqrestore(&bank->lock, flags);
1355
1356	return 0;
1357}
1358
1359static void omap_gpio_init_context(struct gpio_bank *p);
1360
1361static int omap_gpio_runtime_resume(struct device *dev)
1362{
1363	struct platform_device *pdev = to_platform_device(dev);
1364	struct gpio_bank *bank = platform_get_drvdata(pdev);
1365	u32 l = 0, gen, gen0, gen1;
1366	unsigned long flags;
1367	int c;
1368
1369	raw_spin_lock_irqsave(&bank->lock, flags);
1370
1371	/*
1372	 * On the first resume during the probe, the context has not
1373	 * been initialised and so initialise it now. Also initialise
1374	 * the context loss count.
1375	 */
1376	if (bank->loses_context && !bank->context_valid) {
1377		omap_gpio_init_context(bank);
1378
1379		if (bank->get_context_loss_count)
1380			bank->context_loss_count =
1381				bank->get_context_loss_count(bank->dev);
1382	}
1383
1384	omap_gpio_dbck_enable(bank);
1385
1386	/*
1387	 * In ->runtime_suspend(), level-triggered, wakeup-enabled
1388	 * GPIOs were set to edge trigger also in order to be able to
1389	 * generate a PRCM wakeup.  Here we restore the
1390	 * pre-runtime_suspend() values for edge triggering.
1391	 */
1392	writel_relaxed(bank->context.fallingdetect,
1393		     bank->base + bank->regs->fallingdetect);
1394	writel_relaxed(bank->context.risingdetect,
1395		     bank->base + bank->regs->risingdetect);
1396
1397	if (bank->loses_context) {
1398		if (!bank->get_context_loss_count) {
1399			omap_gpio_restore_context(bank);
1400		} else {
1401			c = bank->get_context_loss_count(bank->dev);
1402			if (c != bank->context_loss_count) {
1403				omap_gpio_restore_context(bank);
1404			} else {
1405				raw_spin_unlock_irqrestore(&bank->lock, flags);
1406				return 0;
1407			}
1408		}
1409	}
1410
1411	if (!bank->workaround_enabled) {
1412		raw_spin_unlock_irqrestore(&bank->lock, flags);
1413		return 0;
1414	}
1415
1416	l = readl_relaxed(bank->base + bank->regs->datain);
1417
1418	/*
1419	 * Check if any of the non-wakeup interrupt GPIOs have changed
1420	 * state.  If so, generate an IRQ by software.  This is
1421	 * horribly racy, but it's the best we can do to work around
1422	 * this silicon bug.
1423	 */
1424	l ^= bank->saved_datain;
1425	l &= bank->enabled_non_wakeup_gpios;
1426
1427	/*
1428	 * No need to generate IRQs for the rising edge for gpio IRQs
1429	 * configured with falling edge only; and vice versa.
1430	 */
1431	gen0 = l & bank->context.fallingdetect;
1432	gen0 &= bank->saved_datain;
1433
1434	gen1 = l & bank->context.risingdetect;
1435	gen1 &= ~(bank->saved_datain);
1436
1437	/* FIXME: Consider GPIO IRQs with level detections properly! */
1438	gen = l & (~(bank->context.fallingdetect) &
1439					 ~(bank->context.risingdetect));
1440	/* Consider all GPIO IRQs needed to be updated */
1441	gen |= gen0 | gen1;
1442
1443	if (gen) {
1444		u32 old0, old1;
1445
1446		old0 = readl_relaxed(bank->base + bank->regs->leveldetect0);
1447		old1 = readl_relaxed(bank->base + bank->regs->leveldetect1);
1448
1449		if (!bank->regs->irqstatus_raw0) {
1450			writel_relaxed(old0 | gen, bank->base +
1451						bank->regs->leveldetect0);
1452			writel_relaxed(old1 | gen, bank->base +
1453						bank->regs->leveldetect1);
1454		}
1455
1456		if (bank->regs->irqstatus_raw0) {
1457			writel_relaxed(old0 | l, bank->base +
1458						bank->regs->leveldetect0);
1459			writel_relaxed(old1 | l, bank->base +
1460						bank->regs->leveldetect1);
1461		}
1462		writel_relaxed(old0, bank->base + bank->regs->leveldetect0);
1463		writel_relaxed(old1, bank->base + bank->regs->leveldetect1);
1464	}
1465
1466	bank->workaround_enabled = false;
1467	raw_spin_unlock_irqrestore(&bank->lock, flags);
1468
1469	return 0;
1470}
1471#endif /* CONFIG_PM */
1472
1473#if IS_BUILTIN(CONFIG_GPIO_OMAP)
1474void omap2_gpio_prepare_for_idle(int pwr_mode)
1475{
1476	struct gpio_bank *bank;
1477
1478	list_for_each_entry(bank, &omap_gpio_list, node) {
1479		if (!BANK_USED(bank) || !bank->loses_context)
1480			continue;
1481
1482		bank->power_mode = pwr_mode;
1483
1484		pm_runtime_put_sync_suspend(bank->dev);
1485	}
1486}
1487
1488void omap2_gpio_resume_after_idle(void)
1489{
1490	struct gpio_bank *bank;
1491
1492	list_for_each_entry(bank, &omap_gpio_list, node) {
1493		if (!BANK_USED(bank) || !bank->loses_context)
1494			continue;
1495
1496		pm_runtime_get_sync(bank->dev);
1497	}
1498}
1499#endif
1500
1501#if defined(CONFIG_PM)
1502static void omap_gpio_init_context(struct gpio_bank *p)
1503{
1504	struct omap_gpio_reg_offs *regs = p->regs;
1505	void __iomem *base = p->base;
1506
1507	p->context.ctrl		= readl_relaxed(base + regs->ctrl);
1508	p->context.oe		= readl_relaxed(base + regs->direction);
1509	p->context.wake_en	= readl_relaxed(base + regs->wkup_en);
1510	p->context.leveldetect0	= readl_relaxed(base + regs->leveldetect0);
1511	p->context.leveldetect1	= readl_relaxed(base + regs->leveldetect1);
1512	p->context.risingdetect	= readl_relaxed(base + regs->risingdetect);
1513	p->context.fallingdetect = readl_relaxed(base + regs->fallingdetect);
1514	p->context.irqenable1	= readl_relaxed(base + regs->irqenable);
1515	p->context.irqenable2	= readl_relaxed(base + regs->irqenable2);
1516
1517	if (regs->set_dataout && p->regs->clr_dataout)
1518		p->context.dataout = readl_relaxed(base + regs->set_dataout);
1519	else
1520		p->context.dataout = readl_relaxed(base + regs->dataout);
1521
1522	p->context_valid = true;
1523}
1524
1525static void omap_gpio_restore_context(struct gpio_bank *bank)
1526{
1527	writel_relaxed(bank->context.wake_en,
1528				bank->base + bank->regs->wkup_en);
1529	writel_relaxed(bank->context.ctrl, bank->base + bank->regs->ctrl);
1530	writel_relaxed(bank->context.leveldetect0,
1531				bank->base + bank->regs->leveldetect0);
1532	writel_relaxed(bank->context.leveldetect1,
1533				bank->base + bank->regs->leveldetect1);
1534	writel_relaxed(bank->context.risingdetect,
1535				bank->base + bank->regs->risingdetect);
1536	writel_relaxed(bank->context.fallingdetect,
1537				bank->base + bank->regs->fallingdetect);
1538	if (bank->regs->set_dataout && bank->regs->clr_dataout)
1539		writel_relaxed(bank->context.dataout,
1540				bank->base + bank->regs->set_dataout);
1541	else
1542		writel_relaxed(bank->context.dataout,
1543				bank->base + bank->regs->dataout);
1544	writel_relaxed(bank->context.oe, bank->base + bank->regs->direction);
1545
1546	if (bank->dbck_enable_mask) {
1547		writel_relaxed(bank->context.debounce, bank->base +
1548					bank->regs->debounce);
1549		writel_relaxed(bank->context.debounce_en,
1550					bank->base + bank->regs->debounce_en);
1551	}
1552
1553	writel_relaxed(bank->context.irqenable1,
1554				bank->base + bank->regs->irqenable);
1555	writel_relaxed(bank->context.irqenable2,
1556				bank->base + bank->regs->irqenable2);
1557}
1558#endif /* CONFIG_PM */
1559#else
1560#define omap_gpio_runtime_suspend NULL
1561#define omap_gpio_runtime_resume NULL
1562static inline void omap_gpio_init_context(struct gpio_bank *p) {}
1563#endif
1564
1565static const struct dev_pm_ops gpio_pm_ops = {
1566	SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume,
1567									NULL)
1568};
1569
1570#if defined(CONFIG_OF)
1571static struct omap_gpio_reg_offs omap2_gpio_regs = {
1572	.revision =		OMAP24XX_GPIO_REVISION,
1573	.direction =		OMAP24XX_GPIO_OE,
1574	.datain =		OMAP24XX_GPIO_DATAIN,
1575	.dataout =		OMAP24XX_GPIO_DATAOUT,
1576	.set_dataout =		OMAP24XX_GPIO_SETDATAOUT,
1577	.clr_dataout =		OMAP24XX_GPIO_CLEARDATAOUT,
1578	.irqstatus =		OMAP24XX_GPIO_IRQSTATUS1,
1579	.irqstatus2 =		OMAP24XX_GPIO_IRQSTATUS2,
1580	.irqenable =		OMAP24XX_GPIO_IRQENABLE1,
1581	.irqenable2 =		OMAP24XX_GPIO_IRQENABLE2,
1582	.set_irqenable =	OMAP24XX_GPIO_SETIRQENABLE1,
1583	.clr_irqenable =	OMAP24XX_GPIO_CLEARIRQENABLE1,
1584	.debounce =		OMAP24XX_GPIO_DEBOUNCE_VAL,
1585	.debounce_en =		OMAP24XX_GPIO_DEBOUNCE_EN,
1586	.ctrl =			OMAP24XX_GPIO_CTRL,
1587	.wkup_en =		OMAP24XX_GPIO_WAKE_EN,
1588	.leveldetect0 =		OMAP24XX_GPIO_LEVELDETECT0,
1589	.leveldetect1 =		OMAP24XX_GPIO_LEVELDETECT1,
1590	.risingdetect =		OMAP24XX_GPIO_RISINGDETECT,
1591	.fallingdetect =	OMAP24XX_GPIO_FALLINGDETECT,
1592};
1593
1594static struct omap_gpio_reg_offs omap4_gpio_regs = {
1595	.revision =		OMAP4_GPIO_REVISION,
1596	.direction =		OMAP4_GPIO_OE,
1597	.datain =		OMAP4_GPIO_DATAIN,
1598	.dataout =		OMAP4_GPIO_DATAOUT,
1599	.set_dataout =		OMAP4_GPIO_SETDATAOUT,
1600	.clr_dataout =		OMAP4_GPIO_CLEARDATAOUT,
1601	.irqstatus =		OMAP4_GPIO_IRQSTATUS0,
1602	.irqstatus2 =		OMAP4_GPIO_IRQSTATUS1,
1603	.irqenable =		OMAP4_GPIO_IRQSTATUSSET0,
1604	.irqenable2 =		OMAP4_GPIO_IRQSTATUSSET1,
1605	.set_irqenable =	OMAP4_GPIO_IRQSTATUSSET0,
1606	.clr_irqenable =	OMAP4_GPIO_IRQSTATUSCLR0,
1607	.debounce =		OMAP4_GPIO_DEBOUNCINGTIME,
1608	.debounce_en =		OMAP4_GPIO_DEBOUNCENABLE,
1609	.ctrl =			OMAP4_GPIO_CTRL,
1610	.wkup_en =		OMAP4_GPIO_IRQWAKEN0,
1611	.leveldetect0 =		OMAP4_GPIO_LEVELDETECT0,
1612	.leveldetect1 =		OMAP4_GPIO_LEVELDETECT1,
1613	.risingdetect =		OMAP4_GPIO_RISINGDETECT,
1614	.fallingdetect =	OMAP4_GPIO_FALLINGDETECT,
1615};
1616
1617static const struct omap_gpio_platform_data omap2_pdata = {
1618	.regs = &omap2_gpio_regs,
1619	.bank_width = 32,
1620	.dbck_flag = false,
1621};
1622
1623static const struct omap_gpio_platform_data omap3_pdata = {
1624	.regs = &omap2_gpio_regs,
1625	.bank_width = 32,
1626	.dbck_flag = true,
1627};
1628
1629static const struct omap_gpio_platform_data omap4_pdata = {
1630	.regs = &omap4_gpio_regs,
1631	.bank_width = 32,
1632	.dbck_flag = true,
1633};
1634
1635static const struct of_device_id omap_gpio_match[] = {
1636	{
1637		.compatible = "ti,omap4-gpio",
1638		.data = &omap4_pdata,
1639	},
1640	{
1641		.compatible = "ti,omap3-gpio",
1642		.data = &omap3_pdata,
1643	},
1644	{
1645		.compatible = "ti,omap2-gpio",
1646		.data = &omap2_pdata,
1647	},
1648	{ },
1649};
1650MODULE_DEVICE_TABLE(of, omap_gpio_match);
1651#endif
1652
1653static struct platform_driver omap_gpio_driver = {
1654	.probe		= omap_gpio_probe,
1655	.remove		= omap_gpio_remove,
1656	.driver		= {
1657		.name	= "omap_gpio",
1658		.pm	= &gpio_pm_ops,
1659		.of_match_table = of_match_ptr(omap_gpio_match),
1660	},
1661};
1662
1663/*
1664 * gpio driver register needs to be done before
1665 * machine_init functions access gpio APIs.
1666 * Hence omap_gpio_drv_reg() is a postcore_initcall.
1667 */
1668static int __init omap_gpio_drv_reg(void)
1669{
1670	return platform_driver_register(&omap_gpio_driver);
1671}
1672postcore_initcall(omap_gpio_drv_reg);
1673
1674static void __exit omap_gpio_exit(void)
1675{
1676	platform_driver_unregister(&omap_gpio_driver);
1677}
1678module_exit(omap_gpio_exit);
1679
1680MODULE_DESCRIPTION("omap gpio driver");
1681MODULE_ALIAS("platform:gpio-omap");
1682MODULE_LICENSE("GPL v2");
1683