1 /*
2  *  Driver for Atmel AT91 / AT32 Serial ports
3  *  Copyright (C) 2003 Rick Bronson
4  *
5  *  Based on drivers/char/serial_sa1100.c, by Deep Blue Solutions Ltd.
6  *  Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
7  *
8  *  DMA support added by Chip Coldwell.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
23  *
24  */
25 #include <linux/module.h>
26 #include <linux/tty.h>
27 #include <linux/ioport.h>
28 #include <linux/slab.h>
29 #include <linux/init.h>
30 #include <linux/serial.h>
31 #include <linux/clk.h>
32 #include <linux/console.h>
33 #include <linux/sysrq.h>
34 #include <linux/tty_flip.h>
35 #include <linux/platform_device.h>
36 #include <linux/of.h>
37 #include <linux/of_device.h>
38 #include <linux/of_gpio.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/dmaengine.h>
41 #include <linux/atmel_pdc.h>
42 #include <linux/atmel_serial.h>
43 #include <linux/uaccess.h>
44 #include <linux/platform_data/atmel.h>
45 #include <linux/timer.h>
46 #include <linux/gpio.h>
47 #include <linux/gpio/consumer.h>
48 #include <linux/err.h>
49 #include <linux/irq.h>
50 #include <linux/suspend.h>
51 
52 #include <asm/io.h>
53 #include <asm/ioctls.h>
54 
55 #define PDC_BUFFER_SIZE		512
56 /* Revisit: We should calculate this based on the actual port settings */
57 #define PDC_RX_TIMEOUT		(3 * 10)		/* 3 bytes */
58 
59 /* The minium number of data FIFOs should be able to contain */
60 #define ATMEL_MIN_FIFO_SIZE	8
61 /*
62  * These two offsets are substracted from the RX FIFO size to define the RTS
63  * high and low thresholds
64  */
65 #define ATMEL_RTS_HIGH_OFFSET	16
66 #define ATMEL_RTS_LOW_OFFSET	20
67 
68 #if defined(CONFIG_SERIAL_ATMEL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
69 #define SUPPORT_SYSRQ
70 #endif
71 
72 #include <linux/serial_core.h>
73 
74 #include "serial_mctrl_gpio.h"
75 
76 static void atmel_start_rx(struct uart_port *port);
77 static void atmel_stop_rx(struct uart_port *port);
78 
79 #ifdef CONFIG_SERIAL_ATMEL_TTYAT
80 
81 /* Use device name ttyAT, major 204 and minor 154-169.  This is necessary if we
82  * should coexist with the 8250 driver, such as if we have an external 16C550
83  * UART. */
84 #define SERIAL_ATMEL_MAJOR	204
85 #define MINOR_START		154
86 #define ATMEL_DEVICENAME	"ttyAT"
87 
88 #else
89 
90 /* Use device name ttyS, major 4, minor 64-68.  This is the usual serial port
91  * name, but it is legally reserved for the 8250 driver. */
92 #define SERIAL_ATMEL_MAJOR	TTY_MAJOR
93 #define MINOR_START		64
94 #define ATMEL_DEVICENAME	"ttyS"
95 
96 #endif
97 
98 #define ATMEL_ISR_PASS_LIMIT	256
99 
100 struct atmel_dma_buffer {
101 	unsigned char	*buf;
102 	dma_addr_t	dma_addr;
103 	unsigned int	dma_size;
104 	unsigned int	ofs;
105 };
106 
107 struct atmel_uart_char {
108 	u16		status;
109 	u16		ch;
110 };
111 
112 #define ATMEL_SERIAL_RINGSIZE 1024
113 
114 /*
115  * at91: 6 USARTs and one DBGU port (SAM9260)
116  * avr32: 4
117  */
118 #define ATMEL_MAX_UART		7
119 
120 /*
121  * We wrap our port structure around the generic uart_port.
122  */
123 struct atmel_uart_port {
124 	struct uart_port	uart;		/* uart */
125 	struct clk		*clk;		/* uart clock */
126 	int			may_wakeup;	/* cached value of device_may_wakeup for times we need to disable it */
127 	u32			backup_imr;	/* IMR saved during suspend */
128 	int			break_active;	/* break being received */
129 
130 	bool			use_dma_rx;	/* enable DMA receiver */
131 	bool			use_pdc_rx;	/* enable PDC receiver */
132 	short			pdc_rx_idx;	/* current PDC RX buffer */
133 	struct atmel_dma_buffer	pdc_rx[2];	/* PDC receier */
134 
135 	bool			use_dma_tx;     /* enable DMA transmitter */
136 	bool			use_pdc_tx;	/* enable PDC transmitter */
137 	struct atmel_dma_buffer	pdc_tx;		/* PDC transmitter */
138 
139 	spinlock_t			lock_tx;	/* port lock */
140 	spinlock_t			lock_rx;	/* port lock */
141 	struct dma_chan			*chan_tx;
142 	struct dma_chan			*chan_rx;
143 	struct dma_async_tx_descriptor	*desc_tx;
144 	struct dma_async_tx_descriptor	*desc_rx;
145 	dma_cookie_t			cookie_tx;
146 	dma_cookie_t			cookie_rx;
147 	struct scatterlist		sg_tx;
148 	struct scatterlist		sg_rx;
149 	struct tasklet_struct	tasklet;
150 	unsigned int		irq_status;
151 	unsigned int		irq_status_prev;
152 	unsigned int		status_change;
153 	unsigned int		tx_len;
154 
155 	struct circ_buf		rx_ring;
156 
157 	struct mctrl_gpios	*gpios;
158 	int			gpio_irq[UART_GPIO_MAX];
159 	unsigned int		tx_done_mask;
160 	u32			fifo_size;
161 	u32			rts_high;
162 	u32			rts_low;
163 	bool			ms_irq_enabled;
164 	bool			is_usart;	/* usart or uart */
165 	struct timer_list	uart_timer;	/* uart timer */
166 
167 	bool			suspended;
168 	unsigned int		pending;
169 	unsigned int		pending_status;
170 	spinlock_t		lock_suspended;
171 
172 	int (*prepare_rx)(struct uart_port *port);
173 	int (*prepare_tx)(struct uart_port *port);
174 	void (*schedule_rx)(struct uart_port *port);
175 	void (*schedule_tx)(struct uart_port *port);
176 	void (*release_rx)(struct uart_port *port);
177 	void (*release_tx)(struct uart_port *port);
178 };
179 
180 static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART];
181 static DECLARE_BITMAP(atmel_ports_in_use, ATMEL_MAX_UART);
182 
183 #ifdef SUPPORT_SYSRQ
184 static struct console atmel_console;
185 #endif
186 
187 #if defined(CONFIG_OF)
188 static const struct of_device_id atmel_serial_dt_ids[] = {
189 	{ .compatible = "atmel,at91rm9200-usart" },
190 	{ .compatible = "atmel,at91sam9260-usart" },
191 	{ /* sentinel */ }
192 };
193 
194 MODULE_DEVICE_TABLE(of, atmel_serial_dt_ids);
195 #endif
196 
197 static inline struct atmel_uart_port *
to_atmel_uart_port(struct uart_port * uart)198 to_atmel_uart_port(struct uart_port *uart)
199 {
200 	return container_of(uart, struct atmel_uart_port, uart);
201 }
202 
atmel_uart_readl(struct uart_port * port,u32 reg)203 static inline u32 atmel_uart_readl(struct uart_port *port, u32 reg)
204 {
205 	return __raw_readl(port->membase + reg);
206 }
207 
atmel_uart_writel(struct uart_port * port,u32 reg,u32 value)208 static inline void atmel_uart_writel(struct uart_port *port, u32 reg, u32 value)
209 {
210 	__raw_writel(value, port->membase + reg);
211 }
212 
213 #ifdef CONFIG_AVR32
214 
215 /* AVR32 cannot handle 8 or 16bit I/O accesses but only 32bit I/O accesses */
atmel_uart_read_char(struct uart_port * port)216 static inline u8 atmel_uart_read_char(struct uart_port *port)
217 {
218 	return __raw_readl(port->membase + ATMEL_US_RHR);
219 }
220 
atmel_uart_write_char(struct uart_port * port,u8 value)221 static inline void atmel_uart_write_char(struct uart_port *port, u8 value)
222 {
223 	__raw_writel(value, port->membase + ATMEL_US_THR);
224 }
225 
226 #else
227 
atmel_uart_read_char(struct uart_port * port)228 static inline u8 atmel_uart_read_char(struct uart_port *port)
229 {
230 	return __raw_readb(port->membase + ATMEL_US_RHR);
231 }
232 
atmel_uart_write_char(struct uart_port * port,u8 value)233 static inline void atmel_uart_write_char(struct uart_port *port, u8 value)
234 {
235 	__raw_writeb(value, port->membase + ATMEL_US_THR);
236 }
237 
238 #endif
239 
240 #ifdef CONFIG_SERIAL_ATMEL_PDC
atmel_use_pdc_rx(struct uart_port * port)241 static bool atmel_use_pdc_rx(struct uart_port *port)
242 {
243 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
244 
245 	return atmel_port->use_pdc_rx;
246 }
247 
atmel_use_pdc_tx(struct uart_port * port)248 static bool atmel_use_pdc_tx(struct uart_port *port)
249 {
250 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
251 
252 	return atmel_port->use_pdc_tx;
253 }
254 #else
atmel_use_pdc_rx(struct uart_port * port)255 static bool atmel_use_pdc_rx(struct uart_port *port)
256 {
257 	return false;
258 }
259 
atmel_use_pdc_tx(struct uart_port * port)260 static bool atmel_use_pdc_tx(struct uart_port *port)
261 {
262 	return false;
263 }
264 #endif
265 
atmel_use_dma_tx(struct uart_port * port)266 static bool atmel_use_dma_tx(struct uart_port *port)
267 {
268 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
269 
270 	return atmel_port->use_dma_tx;
271 }
272 
atmel_use_dma_rx(struct uart_port * port)273 static bool atmel_use_dma_rx(struct uart_port *port)
274 {
275 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
276 
277 	return atmel_port->use_dma_rx;
278 }
279 
atmel_use_fifo(struct uart_port * port)280 static bool atmel_use_fifo(struct uart_port *port)
281 {
282 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
283 
284 	return atmel_port->fifo_size;
285 }
286 
atmel_get_lines_status(struct uart_port * port)287 static unsigned int atmel_get_lines_status(struct uart_port *port)
288 {
289 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
290 	unsigned int status, ret = 0;
291 
292 	status = atmel_uart_readl(port, ATMEL_US_CSR);
293 
294 	mctrl_gpio_get(atmel_port->gpios, &ret);
295 
296 	if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios,
297 						UART_GPIO_CTS))) {
298 		if (ret & TIOCM_CTS)
299 			status &= ~ATMEL_US_CTS;
300 		else
301 			status |= ATMEL_US_CTS;
302 	}
303 
304 	if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios,
305 						UART_GPIO_DSR))) {
306 		if (ret & TIOCM_DSR)
307 			status &= ~ATMEL_US_DSR;
308 		else
309 			status |= ATMEL_US_DSR;
310 	}
311 
312 	if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios,
313 						UART_GPIO_RI))) {
314 		if (ret & TIOCM_RI)
315 			status &= ~ATMEL_US_RI;
316 		else
317 			status |= ATMEL_US_RI;
318 	}
319 
320 	if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios,
321 						UART_GPIO_DCD))) {
322 		if (ret & TIOCM_CD)
323 			status &= ~ATMEL_US_DCD;
324 		else
325 			status |= ATMEL_US_DCD;
326 	}
327 
328 	return status;
329 }
330 
331 /* Enable or disable the rs485 support */
atmel_config_rs485(struct uart_port * port,struct serial_rs485 * rs485conf)332 static int atmel_config_rs485(struct uart_port *port,
333 			      struct serial_rs485 *rs485conf)
334 {
335 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
336 	unsigned int mode;
337 
338 	/* Disable interrupts */
339 	atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
340 
341 	mode = atmel_uart_readl(port, ATMEL_US_MR);
342 
343 	/* Resetting serial mode to RS232 (0x0) */
344 	mode &= ~ATMEL_US_USMODE;
345 
346 	port->rs485 = *rs485conf;
347 
348 	if (rs485conf->flags & SER_RS485_ENABLED) {
349 		dev_dbg(port->dev, "Setting UART to RS485\n");
350 		atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
351 		atmel_uart_writel(port, ATMEL_US_TTGR,
352 				  rs485conf->delay_rts_after_send);
353 		mode |= ATMEL_US_USMODE_RS485;
354 	} else {
355 		dev_dbg(port->dev, "Setting UART to RS232\n");
356 		if (atmel_use_pdc_tx(port))
357 			atmel_port->tx_done_mask = ATMEL_US_ENDTX |
358 				ATMEL_US_TXBUFE;
359 		else
360 			atmel_port->tx_done_mask = ATMEL_US_TXRDY;
361 	}
362 	atmel_uart_writel(port, ATMEL_US_MR, mode);
363 
364 	/* Enable interrupts */
365 	atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
366 
367 	return 0;
368 }
369 
370 /*
371  * Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty.
372  */
atmel_tx_empty(struct uart_port * port)373 static u_int atmel_tx_empty(struct uart_port *port)
374 {
375 	return (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXEMPTY) ?
376 		TIOCSER_TEMT :
377 		0;
378 }
379 
380 /*
381  * Set state of the modem control output lines
382  */
atmel_set_mctrl(struct uart_port * port,u_int mctrl)383 static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
384 {
385 	unsigned int control = 0;
386 	unsigned int mode = atmel_uart_readl(port, ATMEL_US_MR);
387 	unsigned int rts_paused, rts_ready;
388 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
389 
390 	/* override mode to RS485 if needed, otherwise keep the current mode */
391 	if (port->rs485.flags & SER_RS485_ENABLED) {
392 		atmel_uart_writel(port, ATMEL_US_TTGR,
393 				  port->rs485.delay_rts_after_send);
394 		mode &= ~ATMEL_US_USMODE;
395 		mode |= ATMEL_US_USMODE_RS485;
396 	}
397 
398 	/* set the RTS line state according to the mode */
399 	if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
400 		/* force RTS line to high level */
401 		rts_paused = ATMEL_US_RTSEN;
402 
403 		/* give the control of the RTS line back to the hardware */
404 		rts_ready = ATMEL_US_RTSDIS;
405 	} else {
406 		/* force RTS line to high level */
407 		rts_paused = ATMEL_US_RTSDIS;
408 
409 		/* force RTS line to low level */
410 		rts_ready = ATMEL_US_RTSEN;
411 	}
412 
413 	if (mctrl & TIOCM_RTS)
414 		control |= rts_ready;
415 	else
416 		control |= rts_paused;
417 
418 	if (mctrl & TIOCM_DTR)
419 		control |= ATMEL_US_DTREN;
420 	else
421 		control |= ATMEL_US_DTRDIS;
422 
423 	atmel_uart_writel(port, ATMEL_US_CR, control);
424 
425 	mctrl_gpio_set(atmel_port->gpios, mctrl);
426 
427 	/* Local loopback mode? */
428 	mode &= ~ATMEL_US_CHMODE;
429 	if (mctrl & TIOCM_LOOP)
430 		mode |= ATMEL_US_CHMODE_LOC_LOOP;
431 	else
432 		mode |= ATMEL_US_CHMODE_NORMAL;
433 
434 	atmel_uart_writel(port, ATMEL_US_MR, mode);
435 }
436 
437 /*
438  * Get state of the modem control input lines
439  */
atmel_get_mctrl(struct uart_port * port)440 static u_int atmel_get_mctrl(struct uart_port *port)
441 {
442 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
443 	unsigned int ret = 0, status;
444 
445 	status = atmel_uart_readl(port, ATMEL_US_CSR);
446 
447 	/*
448 	 * The control signals are active low.
449 	 */
450 	if (!(status & ATMEL_US_DCD))
451 		ret |= TIOCM_CD;
452 	if (!(status & ATMEL_US_CTS))
453 		ret |= TIOCM_CTS;
454 	if (!(status & ATMEL_US_DSR))
455 		ret |= TIOCM_DSR;
456 	if (!(status & ATMEL_US_RI))
457 		ret |= TIOCM_RI;
458 
459 	return mctrl_gpio_get(atmel_port->gpios, &ret);
460 }
461 
462 /*
463  * Stop transmitting.
464  */
atmel_stop_tx(struct uart_port * port)465 static void atmel_stop_tx(struct uart_port *port)
466 {
467 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
468 
469 	if (atmel_use_pdc_tx(port)) {
470 		/* disable PDC transmit */
471 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
472 	}
473 	/* Disable interrupts */
474 	atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
475 
476 	if ((port->rs485.flags & SER_RS485_ENABLED) &&
477 	    !(port->rs485.flags & SER_RS485_RX_DURING_TX))
478 		atmel_start_rx(port);
479 }
480 
481 /*
482  * Start transmitting.
483  */
atmel_start_tx(struct uart_port * port)484 static void atmel_start_tx(struct uart_port *port)
485 {
486 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
487 
488 	if (atmel_use_pdc_tx(port)) {
489 		if (atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN)
490 			/* The transmitter is already running.  Yes, we
491 			   really need this.*/
492 			return;
493 
494 		if ((port->rs485.flags & SER_RS485_ENABLED) &&
495 		    !(port->rs485.flags & SER_RS485_RX_DURING_TX))
496 			atmel_stop_rx(port);
497 
498 		/* re-enable PDC transmit */
499 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
500 	}
501 	/* Enable interrupts */
502 	atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
503 }
504 
505 /*
506  * start receiving - port is in process of being opened.
507  */
atmel_start_rx(struct uart_port * port)508 static void atmel_start_rx(struct uart_port *port)
509 {
510 	/* reset status and receiver */
511 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
512 
513 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXEN);
514 
515 	if (atmel_use_pdc_rx(port)) {
516 		/* enable PDC controller */
517 		atmel_uart_writel(port, ATMEL_US_IER,
518 				  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
519 				  port->read_status_mask);
520 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
521 	} else {
522 		atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY);
523 	}
524 }
525 
526 /*
527  * Stop receiving - port is in process of being closed.
528  */
atmel_stop_rx(struct uart_port * port)529 static void atmel_stop_rx(struct uart_port *port)
530 {
531 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXDIS);
532 
533 	if (atmel_use_pdc_rx(port)) {
534 		/* disable PDC receive */
535 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS);
536 		atmel_uart_writel(port, ATMEL_US_IDR,
537 				  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
538 				  port->read_status_mask);
539 	} else {
540 		atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXRDY);
541 	}
542 }
543 
544 /*
545  * Enable modem status interrupts
546  */
atmel_enable_ms(struct uart_port * port)547 static void atmel_enable_ms(struct uart_port *port)
548 {
549 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
550 	uint32_t ier = 0;
551 
552 	/*
553 	 * Interrupt should not be enabled twice
554 	 */
555 	if (atmel_port->ms_irq_enabled)
556 		return;
557 
558 	atmel_port->ms_irq_enabled = true;
559 
560 	if (atmel_port->gpio_irq[UART_GPIO_CTS] >= 0)
561 		enable_irq(atmel_port->gpio_irq[UART_GPIO_CTS]);
562 	else
563 		ier |= ATMEL_US_CTSIC;
564 
565 	if (atmel_port->gpio_irq[UART_GPIO_DSR] >= 0)
566 		enable_irq(atmel_port->gpio_irq[UART_GPIO_DSR]);
567 	else
568 		ier |= ATMEL_US_DSRIC;
569 
570 	if (atmel_port->gpio_irq[UART_GPIO_RI] >= 0)
571 		enable_irq(atmel_port->gpio_irq[UART_GPIO_RI]);
572 	else
573 		ier |= ATMEL_US_RIIC;
574 
575 	if (atmel_port->gpio_irq[UART_GPIO_DCD] >= 0)
576 		enable_irq(atmel_port->gpio_irq[UART_GPIO_DCD]);
577 	else
578 		ier |= ATMEL_US_DCDIC;
579 
580 	atmel_uart_writel(port, ATMEL_US_IER, ier);
581 }
582 
583 /*
584  * Disable modem status interrupts
585  */
atmel_disable_ms(struct uart_port * port)586 static void atmel_disable_ms(struct uart_port *port)
587 {
588 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
589 	uint32_t idr = 0;
590 
591 	/*
592 	 * Interrupt should not be disabled twice
593 	 */
594 	if (!atmel_port->ms_irq_enabled)
595 		return;
596 
597 	atmel_port->ms_irq_enabled = false;
598 
599 	if (atmel_port->gpio_irq[UART_GPIO_CTS] >= 0)
600 		disable_irq(atmel_port->gpio_irq[UART_GPIO_CTS]);
601 	else
602 		idr |= ATMEL_US_CTSIC;
603 
604 	if (atmel_port->gpio_irq[UART_GPIO_DSR] >= 0)
605 		disable_irq(atmel_port->gpio_irq[UART_GPIO_DSR]);
606 	else
607 		idr |= ATMEL_US_DSRIC;
608 
609 	if (atmel_port->gpio_irq[UART_GPIO_RI] >= 0)
610 		disable_irq(atmel_port->gpio_irq[UART_GPIO_RI]);
611 	else
612 		idr |= ATMEL_US_RIIC;
613 
614 	if (atmel_port->gpio_irq[UART_GPIO_DCD] >= 0)
615 		disable_irq(atmel_port->gpio_irq[UART_GPIO_DCD]);
616 	else
617 		idr |= ATMEL_US_DCDIC;
618 
619 	atmel_uart_writel(port, ATMEL_US_IDR, idr);
620 }
621 
622 /*
623  * Control the transmission of a break signal
624  */
atmel_break_ctl(struct uart_port * port,int break_state)625 static void atmel_break_ctl(struct uart_port *port, int break_state)
626 {
627 	if (break_state != 0)
628 		/* start break */
629 		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTBRK);
630 	else
631 		/* stop break */
632 		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STPBRK);
633 }
634 
635 /*
636  * Stores the incoming character in the ring buffer
637  */
638 static void
atmel_buffer_rx_char(struct uart_port * port,unsigned int status,unsigned int ch)639 atmel_buffer_rx_char(struct uart_port *port, unsigned int status,
640 		     unsigned int ch)
641 {
642 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
643 	struct circ_buf *ring = &atmel_port->rx_ring;
644 	struct atmel_uart_char *c;
645 
646 	if (!CIRC_SPACE(ring->head, ring->tail, ATMEL_SERIAL_RINGSIZE))
647 		/* Buffer overflow, ignore char */
648 		return;
649 
650 	c = &((struct atmel_uart_char *)ring->buf)[ring->head];
651 	c->status	= status;
652 	c->ch		= ch;
653 
654 	/* Make sure the character is stored before we update head. */
655 	smp_wmb();
656 
657 	ring->head = (ring->head + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
658 }
659 
660 /*
661  * Deal with parity, framing and overrun errors.
662  */
atmel_pdc_rxerr(struct uart_port * port,unsigned int status)663 static void atmel_pdc_rxerr(struct uart_port *port, unsigned int status)
664 {
665 	/* clear error */
666 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
667 
668 	if (status & ATMEL_US_RXBRK) {
669 		/* ignore side-effect */
670 		status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
671 		port->icount.brk++;
672 	}
673 	if (status & ATMEL_US_PARE)
674 		port->icount.parity++;
675 	if (status & ATMEL_US_FRAME)
676 		port->icount.frame++;
677 	if (status & ATMEL_US_OVRE)
678 		port->icount.overrun++;
679 }
680 
681 /*
682  * Characters received (called from interrupt handler)
683  */
atmel_rx_chars(struct uart_port * port)684 static void atmel_rx_chars(struct uart_port *port)
685 {
686 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
687 	unsigned int status, ch;
688 
689 	status = atmel_uart_readl(port, ATMEL_US_CSR);
690 	while (status & ATMEL_US_RXRDY) {
691 		ch = atmel_uart_read_char(port);
692 
693 		/*
694 		 * note that the error handling code is
695 		 * out of the main execution path
696 		 */
697 		if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
698 				       | ATMEL_US_OVRE | ATMEL_US_RXBRK)
699 			     || atmel_port->break_active)) {
700 
701 			/* clear error */
702 			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
703 
704 			if (status & ATMEL_US_RXBRK
705 			    && !atmel_port->break_active) {
706 				atmel_port->break_active = 1;
707 				atmel_uart_writel(port, ATMEL_US_IER,
708 						  ATMEL_US_RXBRK);
709 			} else {
710 				/*
711 				 * This is either the end-of-break
712 				 * condition or we've received at
713 				 * least one character without RXBRK
714 				 * being set. In both cases, the next
715 				 * RXBRK will indicate start-of-break.
716 				 */
717 				atmel_uart_writel(port, ATMEL_US_IDR,
718 						  ATMEL_US_RXBRK);
719 				status &= ~ATMEL_US_RXBRK;
720 				atmel_port->break_active = 0;
721 			}
722 		}
723 
724 		atmel_buffer_rx_char(port, status, ch);
725 		status = atmel_uart_readl(port, ATMEL_US_CSR);
726 	}
727 
728 	tasklet_schedule(&atmel_port->tasklet);
729 }
730 
731 /*
732  * Transmit characters (called from tasklet with TXRDY interrupt
733  * disabled)
734  */
atmel_tx_chars(struct uart_port * port)735 static void atmel_tx_chars(struct uart_port *port)
736 {
737 	struct circ_buf *xmit = &port->state->xmit;
738 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
739 
740 	if (port->x_char &&
741 	    (atmel_uart_readl(port, ATMEL_US_CSR) & atmel_port->tx_done_mask)) {
742 		atmel_uart_write_char(port, port->x_char);
743 		port->icount.tx++;
744 		port->x_char = 0;
745 	}
746 	if (uart_circ_empty(xmit) || uart_tx_stopped(port))
747 		return;
748 
749 	while (atmel_uart_readl(port, ATMEL_US_CSR) &
750 	       atmel_port->tx_done_mask) {
751 		atmel_uart_write_char(port, xmit->buf[xmit->tail]);
752 		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
753 		port->icount.tx++;
754 		if (uart_circ_empty(xmit))
755 			break;
756 	}
757 
758 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
759 		uart_write_wakeup(port);
760 
761 	if (!uart_circ_empty(xmit))
762 		/* Enable interrupts */
763 		atmel_uart_writel(port, ATMEL_US_IER,
764 				  atmel_port->tx_done_mask);
765 }
766 
atmel_complete_tx_dma(void * arg)767 static void atmel_complete_tx_dma(void *arg)
768 {
769 	struct atmel_uart_port *atmel_port = arg;
770 	struct uart_port *port = &atmel_port->uart;
771 	struct circ_buf *xmit = &port->state->xmit;
772 	struct dma_chan *chan = atmel_port->chan_tx;
773 	unsigned long flags;
774 
775 	spin_lock_irqsave(&port->lock, flags);
776 
777 	if (chan)
778 		dmaengine_terminate_all(chan);
779 	xmit->tail += atmel_port->tx_len;
780 	xmit->tail &= UART_XMIT_SIZE - 1;
781 
782 	port->icount.tx += atmel_port->tx_len;
783 
784 	spin_lock_irq(&atmel_port->lock_tx);
785 	async_tx_ack(atmel_port->desc_tx);
786 	atmel_port->cookie_tx = -EINVAL;
787 	atmel_port->desc_tx = NULL;
788 	spin_unlock_irq(&atmel_port->lock_tx);
789 
790 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
791 		uart_write_wakeup(port);
792 
793 	/*
794 	 * xmit is a circular buffer so, if we have just send data from
795 	 * xmit->tail to the end of xmit->buf, now we have to transmit the
796 	 * remaining data from the beginning of xmit->buf to xmit->head.
797 	 */
798 	if (!uart_circ_empty(xmit))
799 		tasklet_schedule(&atmel_port->tasklet);
800 
801 	spin_unlock_irqrestore(&port->lock, flags);
802 }
803 
atmel_release_tx_dma(struct uart_port * port)804 static void atmel_release_tx_dma(struct uart_port *port)
805 {
806 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
807 	struct dma_chan *chan = atmel_port->chan_tx;
808 
809 	if (chan) {
810 		dmaengine_terminate_all(chan);
811 		dma_release_channel(chan);
812 		dma_unmap_sg(port->dev, &atmel_port->sg_tx, 1,
813 				DMA_TO_DEVICE);
814 	}
815 
816 	atmel_port->desc_tx = NULL;
817 	atmel_port->chan_tx = NULL;
818 	atmel_port->cookie_tx = -EINVAL;
819 }
820 
821 /*
822  * Called from tasklet with TXRDY interrupt is disabled.
823  */
atmel_tx_dma(struct uart_port * port)824 static void atmel_tx_dma(struct uart_port *port)
825 {
826 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
827 	struct circ_buf *xmit = &port->state->xmit;
828 	struct dma_chan *chan = atmel_port->chan_tx;
829 	struct dma_async_tx_descriptor *desc;
830 	struct scatterlist sgl[2], *sg, *sg_tx = &atmel_port->sg_tx;
831 	unsigned int tx_len, part1_len, part2_len, sg_len;
832 	dma_addr_t phys_addr;
833 
834 	/* Make sure we have an idle channel */
835 	if (atmel_port->desc_tx != NULL)
836 		return;
837 
838 	if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
839 		/*
840 		 * DMA is idle now.
841 		 * Port xmit buffer is already mapped,
842 		 * and it is one page... Just adjust
843 		 * offsets and lengths. Since it is a circular buffer,
844 		 * we have to transmit till the end, and then the rest.
845 		 * Take the port lock to get a
846 		 * consistent xmit buffer state.
847 		 */
848 		tx_len = CIRC_CNT_TO_END(xmit->head,
849 					 xmit->tail,
850 					 UART_XMIT_SIZE);
851 
852 		if (atmel_port->fifo_size) {
853 			/* multi data mode */
854 			part1_len = (tx_len & ~0x3); /* DWORD access */
855 			part2_len = (tx_len & 0x3); /* BYTE access */
856 		} else {
857 			/* single data (legacy) mode */
858 			part1_len = 0;
859 			part2_len = tx_len; /* BYTE access only */
860 		}
861 
862 		sg_init_table(sgl, 2);
863 		sg_len = 0;
864 		phys_addr = sg_dma_address(sg_tx) + xmit->tail;
865 		if (part1_len) {
866 			sg = &sgl[sg_len++];
867 			sg_dma_address(sg) = phys_addr;
868 			sg_dma_len(sg) = part1_len;
869 
870 			phys_addr += part1_len;
871 		}
872 
873 		if (part2_len) {
874 			sg = &sgl[sg_len++];
875 			sg_dma_address(sg) = phys_addr;
876 			sg_dma_len(sg) = part2_len;
877 		}
878 
879 		/*
880 		 * save tx_len so atmel_complete_tx_dma() will increase
881 		 * xmit->tail correctly
882 		 */
883 		atmel_port->tx_len = tx_len;
884 
885 		desc = dmaengine_prep_slave_sg(chan,
886 					       sgl,
887 					       sg_len,
888 					       DMA_MEM_TO_DEV,
889 					       DMA_PREP_INTERRUPT |
890 					       DMA_CTRL_ACK);
891 		if (!desc) {
892 			dev_err(port->dev, "Failed to send via dma!\n");
893 			return;
894 		}
895 
896 		dma_sync_sg_for_device(port->dev, sg_tx, 1, DMA_TO_DEVICE);
897 
898 		atmel_port->desc_tx = desc;
899 		desc->callback = atmel_complete_tx_dma;
900 		desc->callback_param = atmel_port;
901 		atmel_port->cookie_tx = dmaengine_submit(desc);
902 
903 	} else {
904 		if (port->rs485.flags & SER_RS485_ENABLED) {
905 			/* DMA done, stop TX, start RX for RS485 */
906 			atmel_start_rx(port);
907 		}
908 	}
909 
910 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
911 		uart_write_wakeup(port);
912 }
913 
atmel_prepare_tx_dma(struct uart_port * port)914 static int atmel_prepare_tx_dma(struct uart_port *port)
915 {
916 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
917 	dma_cap_mask_t		mask;
918 	struct dma_slave_config config;
919 	int ret, nent;
920 
921 	dma_cap_zero(mask);
922 	dma_cap_set(DMA_SLAVE, mask);
923 
924 	atmel_port->chan_tx = dma_request_slave_channel(port->dev, "tx");
925 	if (atmel_port->chan_tx == NULL)
926 		goto chan_err;
927 	dev_info(port->dev, "using %s for tx DMA transfers\n",
928 		dma_chan_name(atmel_port->chan_tx));
929 
930 	spin_lock_init(&atmel_port->lock_tx);
931 	sg_init_table(&atmel_port->sg_tx, 1);
932 	/* UART circular tx buffer is an aligned page. */
933 	BUG_ON(!PAGE_ALIGNED(port->state->xmit.buf));
934 	sg_set_page(&atmel_port->sg_tx,
935 			virt_to_page(port->state->xmit.buf),
936 			UART_XMIT_SIZE,
937 			(unsigned long)port->state->xmit.buf & ~PAGE_MASK);
938 	nent = dma_map_sg(port->dev,
939 				&atmel_port->sg_tx,
940 				1,
941 				DMA_TO_DEVICE);
942 
943 	if (!nent) {
944 		dev_dbg(port->dev, "need to release resource of dma\n");
945 		goto chan_err;
946 	} else {
947 		dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__,
948 			sg_dma_len(&atmel_port->sg_tx),
949 			port->state->xmit.buf,
950 			&sg_dma_address(&atmel_port->sg_tx));
951 	}
952 
953 	/* Configure the slave DMA */
954 	memset(&config, 0, sizeof(config));
955 	config.direction = DMA_MEM_TO_DEV;
956 	config.dst_addr_width = (atmel_port->fifo_size) ?
957 				DMA_SLAVE_BUSWIDTH_4_BYTES :
958 				DMA_SLAVE_BUSWIDTH_1_BYTE;
959 	config.dst_addr = port->mapbase + ATMEL_US_THR;
960 	config.dst_maxburst = 1;
961 
962 	ret = dmaengine_slave_config(atmel_port->chan_tx,
963 				     &config);
964 	if (ret) {
965 		dev_err(port->dev, "DMA tx slave configuration failed\n");
966 		goto chan_err;
967 	}
968 
969 	return 0;
970 
971 chan_err:
972 	dev_err(port->dev, "TX channel not available, switch to pio\n");
973 	atmel_port->use_dma_tx = 0;
974 	if (atmel_port->chan_tx)
975 		atmel_release_tx_dma(port);
976 	return -EINVAL;
977 }
978 
atmel_complete_rx_dma(void * arg)979 static void atmel_complete_rx_dma(void *arg)
980 {
981 	struct uart_port *port = arg;
982 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
983 
984 	tasklet_schedule(&atmel_port->tasklet);
985 }
986 
atmel_release_rx_dma(struct uart_port * port)987 static void atmel_release_rx_dma(struct uart_port *port)
988 {
989 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
990 	struct dma_chan *chan = atmel_port->chan_rx;
991 
992 	if (chan) {
993 		dmaengine_terminate_all(chan);
994 		dma_release_channel(chan);
995 		dma_unmap_sg(port->dev, &atmel_port->sg_rx, 1,
996 				DMA_FROM_DEVICE);
997 	}
998 
999 	atmel_port->desc_rx = NULL;
1000 	atmel_port->chan_rx = NULL;
1001 	atmel_port->cookie_rx = -EINVAL;
1002 }
1003 
atmel_rx_from_dma(struct uart_port * port)1004 static void atmel_rx_from_dma(struct uart_port *port)
1005 {
1006 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1007 	struct tty_port *tport = &port->state->port;
1008 	struct circ_buf *ring = &atmel_port->rx_ring;
1009 	struct dma_chan *chan = atmel_port->chan_rx;
1010 	struct dma_tx_state state;
1011 	enum dma_status dmastat;
1012 	size_t count;
1013 
1014 
1015 	/* Reset the UART timeout early so that we don't miss one */
1016 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1017 	dmastat = dmaengine_tx_status(chan,
1018 				atmel_port->cookie_rx,
1019 				&state);
1020 	/* Restart a new tasklet if DMA status is error */
1021 	if (dmastat == DMA_ERROR) {
1022 		dev_dbg(port->dev, "Get residue error, restart tasklet\n");
1023 		atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
1024 		tasklet_schedule(&atmel_port->tasklet);
1025 		return;
1026 	}
1027 
1028 	/* CPU claims ownership of RX DMA buffer */
1029 	dma_sync_sg_for_cpu(port->dev,
1030 			    &atmel_port->sg_rx,
1031 			    1,
1032 			    DMA_FROM_DEVICE);
1033 
1034 	/*
1035 	 * ring->head points to the end of data already written by the DMA.
1036 	 * ring->tail points to the beginning of data to be read by the
1037 	 * framework.
1038 	 * The current transfer size should not be larger than the dma buffer
1039 	 * length.
1040 	 */
1041 	ring->head = sg_dma_len(&atmel_port->sg_rx) - state.residue;
1042 	BUG_ON(ring->head > sg_dma_len(&atmel_port->sg_rx));
1043 	/*
1044 	 * At this point ring->head may point to the first byte right after the
1045 	 * last byte of the dma buffer:
1046 	 * 0 <= ring->head <= sg_dma_len(&atmel_port->sg_rx)
1047 	 *
1048 	 * However ring->tail must always points inside the dma buffer:
1049 	 * 0 <= ring->tail <= sg_dma_len(&atmel_port->sg_rx) - 1
1050 	 *
1051 	 * Since we use a ring buffer, we have to handle the case
1052 	 * where head is lower than tail. In such a case, we first read from
1053 	 * tail to the end of the buffer then reset tail.
1054 	 */
1055 	if (ring->head < ring->tail) {
1056 		count = sg_dma_len(&atmel_port->sg_rx) - ring->tail;
1057 
1058 		tty_insert_flip_string(tport, ring->buf + ring->tail, count);
1059 		ring->tail = 0;
1060 		port->icount.rx += count;
1061 	}
1062 
1063 	/* Finally we read data from tail to head */
1064 	if (ring->tail < ring->head) {
1065 		count = ring->head - ring->tail;
1066 
1067 		tty_insert_flip_string(tport, ring->buf + ring->tail, count);
1068 		/* Wrap ring->head if needed */
1069 		if (ring->head >= sg_dma_len(&atmel_port->sg_rx))
1070 			ring->head = 0;
1071 		ring->tail = ring->head;
1072 		port->icount.rx += count;
1073 	}
1074 
1075 	/* USART retreives ownership of RX DMA buffer */
1076 	dma_sync_sg_for_device(port->dev,
1077 			       &atmel_port->sg_rx,
1078 			       1,
1079 			       DMA_FROM_DEVICE);
1080 
1081 	/*
1082 	 * Drop the lock here since it might end up calling
1083 	 * uart_start(), which takes the lock.
1084 	 */
1085 	spin_unlock(&port->lock);
1086 	tty_flip_buffer_push(tport);
1087 	spin_lock(&port->lock);
1088 
1089 	atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
1090 }
1091 
atmel_prepare_rx_dma(struct uart_port * port)1092 static int atmel_prepare_rx_dma(struct uart_port *port)
1093 {
1094 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1095 	struct dma_async_tx_descriptor *desc;
1096 	dma_cap_mask_t		mask;
1097 	struct dma_slave_config config;
1098 	struct circ_buf		*ring;
1099 	int ret, nent;
1100 
1101 	ring = &atmel_port->rx_ring;
1102 
1103 	dma_cap_zero(mask);
1104 	dma_cap_set(DMA_CYCLIC, mask);
1105 
1106 	atmel_port->chan_rx = dma_request_slave_channel(port->dev, "rx");
1107 	if (atmel_port->chan_rx == NULL)
1108 		goto chan_err;
1109 	dev_info(port->dev, "using %s for rx DMA transfers\n",
1110 		dma_chan_name(atmel_port->chan_rx));
1111 
1112 	spin_lock_init(&atmel_port->lock_rx);
1113 	sg_init_table(&atmel_port->sg_rx, 1);
1114 	/* UART circular rx buffer is an aligned page. */
1115 	BUG_ON(!PAGE_ALIGNED(ring->buf));
1116 	sg_set_page(&atmel_port->sg_rx,
1117 		    virt_to_page(ring->buf),
1118 		    sizeof(struct atmel_uart_char) * ATMEL_SERIAL_RINGSIZE,
1119 		    (unsigned long)ring->buf & ~PAGE_MASK);
1120 	nent = dma_map_sg(port->dev,
1121 			  &atmel_port->sg_rx,
1122 			  1,
1123 			  DMA_FROM_DEVICE);
1124 
1125 	if (!nent) {
1126 		dev_dbg(port->dev, "need to release resource of dma\n");
1127 		goto chan_err;
1128 	} else {
1129 		dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__,
1130 			sg_dma_len(&atmel_port->sg_rx),
1131 			ring->buf,
1132 			&sg_dma_address(&atmel_port->sg_rx));
1133 	}
1134 
1135 	/* Configure the slave DMA */
1136 	memset(&config, 0, sizeof(config));
1137 	config.direction = DMA_DEV_TO_MEM;
1138 	config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1139 	config.src_addr = port->mapbase + ATMEL_US_RHR;
1140 	config.src_maxburst = 1;
1141 
1142 	ret = dmaengine_slave_config(atmel_port->chan_rx,
1143 				     &config);
1144 	if (ret) {
1145 		dev_err(port->dev, "DMA rx slave configuration failed\n");
1146 		goto chan_err;
1147 	}
1148 	/*
1149 	 * Prepare a cyclic dma transfer, assign 2 descriptors,
1150 	 * each one is half ring buffer size
1151 	 */
1152 	desc = dmaengine_prep_dma_cyclic(atmel_port->chan_rx,
1153 					 sg_dma_address(&atmel_port->sg_rx),
1154 					 sg_dma_len(&atmel_port->sg_rx),
1155 					 sg_dma_len(&atmel_port->sg_rx)/2,
1156 					 DMA_DEV_TO_MEM,
1157 					 DMA_PREP_INTERRUPT);
1158 	desc->callback = atmel_complete_rx_dma;
1159 	desc->callback_param = port;
1160 	atmel_port->desc_rx = desc;
1161 	atmel_port->cookie_rx = dmaengine_submit(desc);
1162 
1163 	return 0;
1164 
1165 chan_err:
1166 	dev_err(port->dev, "RX channel not available, switch to pio\n");
1167 	atmel_port->use_dma_rx = 0;
1168 	if (atmel_port->chan_rx)
1169 		atmel_release_rx_dma(port);
1170 	return -EINVAL;
1171 }
1172 
atmel_uart_timer_callback(unsigned long data)1173 static void atmel_uart_timer_callback(unsigned long data)
1174 {
1175 	struct uart_port *port = (void *)data;
1176 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1177 
1178 	tasklet_schedule(&atmel_port->tasklet);
1179 	mod_timer(&atmel_port->uart_timer, jiffies + uart_poll_timeout(port));
1180 }
1181 
1182 /*
1183  * receive interrupt handler.
1184  */
1185 static void
atmel_handle_receive(struct uart_port * port,unsigned int pending)1186 atmel_handle_receive(struct uart_port *port, unsigned int pending)
1187 {
1188 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1189 
1190 	if (atmel_use_pdc_rx(port)) {
1191 		/*
1192 		 * PDC receive. Just schedule the tasklet and let it
1193 		 * figure out the details.
1194 		 *
1195 		 * TODO: We're not handling error flags correctly at
1196 		 * the moment.
1197 		 */
1198 		if (pending & (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)) {
1199 			atmel_uart_writel(port, ATMEL_US_IDR,
1200 					  (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT));
1201 			tasklet_schedule(&atmel_port->tasklet);
1202 		}
1203 
1204 		if (pending & (ATMEL_US_RXBRK | ATMEL_US_OVRE |
1205 				ATMEL_US_FRAME | ATMEL_US_PARE))
1206 			atmel_pdc_rxerr(port, pending);
1207 	}
1208 
1209 	if (atmel_use_dma_rx(port)) {
1210 		if (pending & ATMEL_US_TIMEOUT) {
1211 			atmel_uart_writel(port, ATMEL_US_IDR,
1212 					  ATMEL_US_TIMEOUT);
1213 			tasklet_schedule(&atmel_port->tasklet);
1214 		}
1215 	}
1216 
1217 	/* Interrupt receive */
1218 	if (pending & ATMEL_US_RXRDY)
1219 		atmel_rx_chars(port);
1220 	else if (pending & ATMEL_US_RXBRK) {
1221 		/*
1222 		 * End of break detected. If it came along with a
1223 		 * character, atmel_rx_chars will handle it.
1224 		 */
1225 		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
1226 		atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXBRK);
1227 		atmel_port->break_active = 0;
1228 	}
1229 }
1230 
1231 /*
1232  * transmit interrupt handler. (Transmit is IRQF_NODELAY safe)
1233  */
1234 static void
atmel_handle_transmit(struct uart_port * port,unsigned int pending)1235 atmel_handle_transmit(struct uart_port *port, unsigned int pending)
1236 {
1237 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1238 
1239 	if (pending & atmel_port->tx_done_mask) {
1240 		/* Either PDC or interrupt transmission */
1241 		atmel_uart_writel(port, ATMEL_US_IDR,
1242 				  atmel_port->tx_done_mask);
1243 		tasklet_schedule(&atmel_port->tasklet);
1244 	}
1245 }
1246 
1247 /*
1248  * status flags interrupt handler.
1249  */
1250 static void
atmel_handle_status(struct uart_port * port,unsigned int pending,unsigned int status)1251 atmel_handle_status(struct uart_port *port, unsigned int pending,
1252 		    unsigned int status)
1253 {
1254 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1255 
1256 	if (pending & (ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC
1257 				| ATMEL_US_CTSIC)) {
1258 		atmel_port->irq_status = status;
1259 		atmel_port->status_change = atmel_port->irq_status ^
1260 					    atmel_port->irq_status_prev;
1261 		atmel_port->irq_status_prev = status;
1262 		tasklet_schedule(&atmel_port->tasklet);
1263 	}
1264 }
1265 
1266 /*
1267  * Interrupt handler
1268  */
atmel_interrupt(int irq,void * dev_id)1269 static irqreturn_t atmel_interrupt(int irq, void *dev_id)
1270 {
1271 	struct uart_port *port = dev_id;
1272 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1273 	unsigned int status, pending, mask, pass_counter = 0;
1274 	bool gpio_handled = false;
1275 
1276 	spin_lock(&atmel_port->lock_suspended);
1277 
1278 	do {
1279 		status = atmel_get_lines_status(port);
1280 		mask = atmel_uart_readl(port, ATMEL_US_IMR);
1281 		pending = status & mask;
1282 		if (!gpio_handled) {
1283 			/*
1284 			 * Dealing with GPIO interrupt
1285 			 */
1286 			if (irq == atmel_port->gpio_irq[UART_GPIO_CTS])
1287 				pending |= ATMEL_US_CTSIC;
1288 
1289 			if (irq == atmel_port->gpio_irq[UART_GPIO_DSR])
1290 				pending |= ATMEL_US_DSRIC;
1291 
1292 			if (irq == atmel_port->gpio_irq[UART_GPIO_RI])
1293 				pending |= ATMEL_US_RIIC;
1294 
1295 			if (irq == atmel_port->gpio_irq[UART_GPIO_DCD])
1296 				pending |= ATMEL_US_DCDIC;
1297 
1298 			gpio_handled = true;
1299 		}
1300 		if (!pending)
1301 			break;
1302 
1303 		if (atmel_port->suspended) {
1304 			atmel_port->pending |= pending;
1305 			atmel_port->pending_status = status;
1306 			atmel_uart_writel(port, ATMEL_US_IDR, mask);
1307 			pm_system_wakeup();
1308 			break;
1309 		}
1310 
1311 		atmel_handle_receive(port, pending);
1312 		atmel_handle_status(port, pending, status);
1313 		atmel_handle_transmit(port, pending);
1314 	} while (pass_counter++ < ATMEL_ISR_PASS_LIMIT);
1315 
1316 	spin_unlock(&atmel_port->lock_suspended);
1317 
1318 	return pass_counter ? IRQ_HANDLED : IRQ_NONE;
1319 }
1320 
atmel_release_tx_pdc(struct uart_port * port)1321 static void atmel_release_tx_pdc(struct uart_port *port)
1322 {
1323 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1324 	struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1325 
1326 	dma_unmap_single(port->dev,
1327 			 pdc->dma_addr,
1328 			 pdc->dma_size,
1329 			 DMA_TO_DEVICE);
1330 }
1331 
1332 /*
1333  * Called from tasklet with ENDTX and TXBUFE interrupts disabled.
1334  */
atmel_tx_pdc(struct uart_port * port)1335 static void atmel_tx_pdc(struct uart_port *port)
1336 {
1337 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1338 	struct circ_buf *xmit = &port->state->xmit;
1339 	struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1340 	int count;
1341 
1342 	/* nothing left to transmit? */
1343 	if (atmel_uart_readl(port, ATMEL_PDC_TCR))
1344 		return;
1345 
1346 	xmit->tail += pdc->ofs;
1347 	xmit->tail &= UART_XMIT_SIZE - 1;
1348 
1349 	port->icount.tx += pdc->ofs;
1350 	pdc->ofs = 0;
1351 
1352 	/* more to transmit - setup next transfer */
1353 
1354 	/* disable PDC transmit */
1355 	atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
1356 
1357 	if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
1358 		dma_sync_single_for_device(port->dev,
1359 					   pdc->dma_addr,
1360 					   pdc->dma_size,
1361 					   DMA_TO_DEVICE);
1362 
1363 		count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
1364 		pdc->ofs = count;
1365 
1366 		atmel_uart_writel(port, ATMEL_PDC_TPR,
1367 				  pdc->dma_addr + xmit->tail);
1368 		atmel_uart_writel(port, ATMEL_PDC_TCR, count);
1369 		/* re-enable PDC transmit */
1370 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
1371 		/* Enable interrupts */
1372 		atmel_uart_writel(port, ATMEL_US_IER,
1373 				  atmel_port->tx_done_mask);
1374 	} else {
1375 		if ((port->rs485.flags & SER_RS485_ENABLED) &&
1376 		    !(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
1377 			/* DMA done, stop TX, start RX for RS485 */
1378 			atmel_start_rx(port);
1379 		}
1380 	}
1381 
1382 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1383 		uart_write_wakeup(port);
1384 }
1385 
atmel_prepare_tx_pdc(struct uart_port * port)1386 static int atmel_prepare_tx_pdc(struct uart_port *port)
1387 {
1388 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1389 	struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1390 	struct circ_buf *xmit = &port->state->xmit;
1391 
1392 	pdc->buf = xmit->buf;
1393 	pdc->dma_addr = dma_map_single(port->dev,
1394 					pdc->buf,
1395 					UART_XMIT_SIZE,
1396 					DMA_TO_DEVICE);
1397 	pdc->dma_size = UART_XMIT_SIZE;
1398 	pdc->ofs = 0;
1399 
1400 	return 0;
1401 }
1402 
atmel_rx_from_ring(struct uart_port * port)1403 static void atmel_rx_from_ring(struct uart_port *port)
1404 {
1405 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1406 	struct circ_buf *ring = &atmel_port->rx_ring;
1407 	unsigned int flg;
1408 	unsigned int status;
1409 
1410 	while (ring->head != ring->tail) {
1411 		struct atmel_uart_char c;
1412 
1413 		/* Make sure c is loaded after head. */
1414 		smp_rmb();
1415 
1416 		c = ((struct atmel_uart_char *)ring->buf)[ring->tail];
1417 
1418 		ring->tail = (ring->tail + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
1419 
1420 		port->icount.rx++;
1421 		status = c.status;
1422 		flg = TTY_NORMAL;
1423 
1424 		/*
1425 		 * note that the error handling code is
1426 		 * out of the main execution path
1427 		 */
1428 		if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
1429 				       | ATMEL_US_OVRE | ATMEL_US_RXBRK))) {
1430 			if (status & ATMEL_US_RXBRK) {
1431 				/* ignore side-effect */
1432 				status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
1433 
1434 				port->icount.brk++;
1435 				if (uart_handle_break(port))
1436 					continue;
1437 			}
1438 			if (status & ATMEL_US_PARE)
1439 				port->icount.parity++;
1440 			if (status & ATMEL_US_FRAME)
1441 				port->icount.frame++;
1442 			if (status & ATMEL_US_OVRE)
1443 				port->icount.overrun++;
1444 
1445 			status &= port->read_status_mask;
1446 
1447 			if (status & ATMEL_US_RXBRK)
1448 				flg = TTY_BREAK;
1449 			else if (status & ATMEL_US_PARE)
1450 				flg = TTY_PARITY;
1451 			else if (status & ATMEL_US_FRAME)
1452 				flg = TTY_FRAME;
1453 		}
1454 
1455 
1456 		if (uart_handle_sysrq_char(port, c.ch))
1457 			continue;
1458 
1459 		uart_insert_char(port, status, ATMEL_US_OVRE, c.ch, flg);
1460 	}
1461 
1462 	/*
1463 	 * Drop the lock here since it might end up calling
1464 	 * uart_start(), which takes the lock.
1465 	 */
1466 	spin_unlock(&port->lock);
1467 	tty_flip_buffer_push(&port->state->port);
1468 	spin_lock(&port->lock);
1469 }
1470 
atmel_release_rx_pdc(struct uart_port * port)1471 static void atmel_release_rx_pdc(struct uart_port *port)
1472 {
1473 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1474 	int i;
1475 
1476 	for (i = 0; i < 2; i++) {
1477 		struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
1478 
1479 		dma_unmap_single(port->dev,
1480 				 pdc->dma_addr,
1481 				 pdc->dma_size,
1482 				 DMA_FROM_DEVICE);
1483 		kfree(pdc->buf);
1484 	}
1485 }
1486 
atmel_rx_from_pdc(struct uart_port * port)1487 static void atmel_rx_from_pdc(struct uart_port *port)
1488 {
1489 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1490 	struct tty_port *tport = &port->state->port;
1491 	struct atmel_dma_buffer *pdc;
1492 	int rx_idx = atmel_port->pdc_rx_idx;
1493 	unsigned int head;
1494 	unsigned int tail;
1495 	unsigned int count;
1496 
1497 	do {
1498 		/* Reset the UART timeout early so that we don't miss one */
1499 		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1500 
1501 		pdc = &atmel_port->pdc_rx[rx_idx];
1502 		head = atmel_uart_readl(port, ATMEL_PDC_RPR) - pdc->dma_addr;
1503 		tail = pdc->ofs;
1504 
1505 		/* If the PDC has switched buffers, RPR won't contain
1506 		 * any address within the current buffer. Since head
1507 		 * is unsigned, we just need a one-way comparison to
1508 		 * find out.
1509 		 *
1510 		 * In this case, we just need to consume the entire
1511 		 * buffer and resubmit it for DMA. This will clear the
1512 		 * ENDRX bit as well, so that we can safely re-enable
1513 		 * all interrupts below.
1514 		 */
1515 		head = min(head, pdc->dma_size);
1516 
1517 		if (likely(head != tail)) {
1518 			dma_sync_single_for_cpu(port->dev, pdc->dma_addr,
1519 					pdc->dma_size, DMA_FROM_DEVICE);
1520 
1521 			/*
1522 			 * head will only wrap around when we recycle
1523 			 * the DMA buffer, and when that happens, we
1524 			 * explicitly set tail to 0. So head will
1525 			 * always be greater than tail.
1526 			 */
1527 			count = head - tail;
1528 
1529 			tty_insert_flip_string(tport, pdc->buf + pdc->ofs,
1530 						count);
1531 
1532 			dma_sync_single_for_device(port->dev, pdc->dma_addr,
1533 					pdc->dma_size, DMA_FROM_DEVICE);
1534 
1535 			port->icount.rx += count;
1536 			pdc->ofs = head;
1537 		}
1538 
1539 		/*
1540 		 * If the current buffer is full, we need to check if
1541 		 * the next one contains any additional data.
1542 		 */
1543 		if (head >= pdc->dma_size) {
1544 			pdc->ofs = 0;
1545 			atmel_uart_writel(port, ATMEL_PDC_RNPR, pdc->dma_addr);
1546 			atmel_uart_writel(port, ATMEL_PDC_RNCR, pdc->dma_size);
1547 
1548 			rx_idx = !rx_idx;
1549 			atmel_port->pdc_rx_idx = rx_idx;
1550 		}
1551 	} while (head >= pdc->dma_size);
1552 
1553 	/*
1554 	 * Drop the lock here since it might end up calling
1555 	 * uart_start(), which takes the lock.
1556 	 */
1557 	spin_unlock(&port->lock);
1558 	tty_flip_buffer_push(tport);
1559 	spin_lock(&port->lock);
1560 
1561 	atmel_uart_writel(port, ATMEL_US_IER,
1562 			  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
1563 }
1564 
atmel_prepare_rx_pdc(struct uart_port * port)1565 static int atmel_prepare_rx_pdc(struct uart_port *port)
1566 {
1567 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1568 	int i;
1569 
1570 	for (i = 0; i < 2; i++) {
1571 		struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
1572 
1573 		pdc->buf = kmalloc(PDC_BUFFER_SIZE, GFP_KERNEL);
1574 		if (pdc->buf == NULL) {
1575 			if (i != 0) {
1576 				dma_unmap_single(port->dev,
1577 					atmel_port->pdc_rx[0].dma_addr,
1578 					PDC_BUFFER_SIZE,
1579 					DMA_FROM_DEVICE);
1580 				kfree(atmel_port->pdc_rx[0].buf);
1581 			}
1582 			atmel_port->use_pdc_rx = 0;
1583 			return -ENOMEM;
1584 		}
1585 		pdc->dma_addr = dma_map_single(port->dev,
1586 						pdc->buf,
1587 						PDC_BUFFER_SIZE,
1588 						DMA_FROM_DEVICE);
1589 		pdc->dma_size = PDC_BUFFER_SIZE;
1590 		pdc->ofs = 0;
1591 	}
1592 
1593 	atmel_port->pdc_rx_idx = 0;
1594 
1595 	atmel_uart_writel(port, ATMEL_PDC_RPR, atmel_port->pdc_rx[0].dma_addr);
1596 	atmel_uart_writel(port, ATMEL_PDC_RCR, PDC_BUFFER_SIZE);
1597 
1598 	atmel_uart_writel(port, ATMEL_PDC_RNPR,
1599 			  atmel_port->pdc_rx[1].dma_addr);
1600 	atmel_uart_writel(port, ATMEL_PDC_RNCR, PDC_BUFFER_SIZE);
1601 
1602 	return 0;
1603 }
1604 
1605 /*
1606  * tasklet handling tty stuff outside the interrupt handler.
1607  */
atmel_tasklet_func(unsigned long data)1608 static void atmel_tasklet_func(unsigned long data)
1609 {
1610 	struct uart_port *port = (struct uart_port *)data;
1611 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1612 	unsigned int status = atmel_port->irq_status;
1613 	unsigned int status_change = atmel_port->status_change;
1614 
1615 	/* The interrupt handler does not take the lock */
1616 	spin_lock(&port->lock);
1617 
1618 	atmel_port->schedule_tx(port);
1619 
1620 	if (status_change & (ATMEL_US_RI | ATMEL_US_DSR
1621 				| ATMEL_US_DCD | ATMEL_US_CTS)) {
1622 		/* TODO: All reads to CSR will clear these interrupts! */
1623 		if (status_change & ATMEL_US_RI)
1624 			port->icount.rng++;
1625 		if (status_change & ATMEL_US_DSR)
1626 			port->icount.dsr++;
1627 		if (status_change & ATMEL_US_DCD)
1628 			uart_handle_dcd_change(port, !(status & ATMEL_US_DCD));
1629 		if (status_change & ATMEL_US_CTS)
1630 			uart_handle_cts_change(port, !(status & ATMEL_US_CTS));
1631 
1632 		wake_up_interruptible(&port->state->port.delta_msr_wait);
1633 
1634 		atmel_port->status_change = 0;
1635 	}
1636 
1637 	atmel_port->schedule_rx(port);
1638 
1639 	spin_unlock(&port->lock);
1640 }
1641 
atmel_init_property(struct atmel_uart_port * atmel_port,struct platform_device * pdev)1642 static void atmel_init_property(struct atmel_uart_port *atmel_port,
1643 				struct platform_device *pdev)
1644 {
1645 	struct device_node *np = pdev->dev.of_node;
1646 	struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev);
1647 
1648 	if (np) {
1649 		/* DMA/PDC usage specification */
1650 		if (of_get_property(np, "atmel,use-dma-rx", NULL)) {
1651 			if (of_get_property(np, "dmas", NULL)) {
1652 				atmel_port->use_dma_rx  = true;
1653 				atmel_port->use_pdc_rx  = false;
1654 			} else {
1655 				atmel_port->use_dma_rx  = false;
1656 				atmel_port->use_pdc_rx  = true;
1657 			}
1658 		} else {
1659 			atmel_port->use_dma_rx  = false;
1660 			atmel_port->use_pdc_rx  = false;
1661 		}
1662 
1663 		if (of_get_property(np, "atmel,use-dma-tx", NULL)) {
1664 			if (of_get_property(np, "dmas", NULL)) {
1665 				atmel_port->use_dma_tx  = true;
1666 				atmel_port->use_pdc_tx  = false;
1667 			} else {
1668 				atmel_port->use_dma_tx  = false;
1669 				atmel_port->use_pdc_tx  = true;
1670 			}
1671 		} else {
1672 			atmel_port->use_dma_tx  = false;
1673 			atmel_port->use_pdc_tx  = false;
1674 		}
1675 
1676 	} else {
1677 		atmel_port->use_pdc_rx  = pdata->use_dma_rx;
1678 		atmel_port->use_pdc_tx  = pdata->use_dma_tx;
1679 		atmel_port->use_dma_rx  = false;
1680 		atmel_port->use_dma_tx  = false;
1681 	}
1682 
1683 }
1684 
atmel_init_rs485(struct uart_port * port,struct platform_device * pdev)1685 static void atmel_init_rs485(struct uart_port *port,
1686 				struct platform_device *pdev)
1687 {
1688 	struct device_node *np = pdev->dev.of_node;
1689 	struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev);
1690 
1691 	if (np) {
1692 		struct serial_rs485 *rs485conf = &port->rs485;
1693 		u32 rs485_delay[2];
1694 		/* rs485 properties */
1695 		if (of_property_read_u32_array(np, "rs485-rts-delay",
1696 					rs485_delay, 2) == 0) {
1697 			rs485conf->delay_rts_before_send = rs485_delay[0];
1698 			rs485conf->delay_rts_after_send = rs485_delay[1];
1699 			rs485conf->flags = 0;
1700 		}
1701 
1702 		if (of_get_property(np, "rs485-rx-during-tx", NULL))
1703 			rs485conf->flags |= SER_RS485_RX_DURING_TX;
1704 
1705 		if (of_get_property(np, "linux,rs485-enabled-at-boot-time",
1706 								NULL))
1707 			rs485conf->flags |= SER_RS485_ENABLED;
1708 	} else {
1709 		port->rs485       = pdata->rs485;
1710 	}
1711 
1712 }
1713 
atmel_set_ops(struct uart_port * port)1714 static void atmel_set_ops(struct uart_port *port)
1715 {
1716 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1717 
1718 	if (atmel_use_dma_rx(port)) {
1719 		atmel_port->prepare_rx = &atmel_prepare_rx_dma;
1720 		atmel_port->schedule_rx = &atmel_rx_from_dma;
1721 		atmel_port->release_rx = &atmel_release_rx_dma;
1722 	} else if (atmel_use_pdc_rx(port)) {
1723 		atmel_port->prepare_rx = &atmel_prepare_rx_pdc;
1724 		atmel_port->schedule_rx = &atmel_rx_from_pdc;
1725 		atmel_port->release_rx = &atmel_release_rx_pdc;
1726 	} else {
1727 		atmel_port->prepare_rx = NULL;
1728 		atmel_port->schedule_rx = &atmel_rx_from_ring;
1729 		atmel_port->release_rx = NULL;
1730 	}
1731 
1732 	if (atmel_use_dma_tx(port)) {
1733 		atmel_port->prepare_tx = &atmel_prepare_tx_dma;
1734 		atmel_port->schedule_tx = &atmel_tx_dma;
1735 		atmel_port->release_tx = &atmel_release_tx_dma;
1736 	} else if (atmel_use_pdc_tx(port)) {
1737 		atmel_port->prepare_tx = &atmel_prepare_tx_pdc;
1738 		atmel_port->schedule_tx = &atmel_tx_pdc;
1739 		atmel_port->release_tx = &atmel_release_tx_pdc;
1740 	} else {
1741 		atmel_port->prepare_tx = NULL;
1742 		atmel_port->schedule_tx = &atmel_tx_chars;
1743 		atmel_port->release_tx = NULL;
1744 	}
1745 }
1746 
1747 /*
1748  * Get ip name usart or uart
1749  */
atmel_get_ip_name(struct uart_port * port)1750 static void atmel_get_ip_name(struct uart_port *port)
1751 {
1752 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1753 	int name = atmel_uart_readl(port, ATMEL_US_NAME);
1754 	u32 version;
1755 	int usart, uart;
1756 	/* usart and uart ascii */
1757 	usart = 0x55534152;
1758 	uart = 0x44424755;
1759 
1760 	atmel_port->is_usart = false;
1761 
1762 	if (name == usart) {
1763 		dev_dbg(port->dev, "This is usart\n");
1764 		atmel_port->is_usart = true;
1765 	} else if (name == uart) {
1766 		dev_dbg(port->dev, "This is uart\n");
1767 		atmel_port->is_usart = false;
1768 	} else {
1769 		/* fallback for older SoCs: use version field */
1770 		version = atmel_uart_readl(port, ATMEL_US_VERSION);
1771 		switch (version) {
1772 		case 0x302:
1773 		case 0x10213:
1774 			dev_dbg(port->dev, "This version is usart\n");
1775 			atmel_port->is_usart = true;
1776 			break;
1777 		case 0x203:
1778 		case 0x10202:
1779 			dev_dbg(port->dev, "This version is uart\n");
1780 			atmel_port->is_usart = false;
1781 			break;
1782 		default:
1783 			dev_err(port->dev, "Not supported ip name nor version, set to uart\n");
1784 		}
1785 	}
1786 }
1787 
atmel_free_gpio_irq(struct uart_port * port)1788 static void atmel_free_gpio_irq(struct uart_port *port)
1789 {
1790 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1791 	enum mctrl_gpio_idx i;
1792 
1793 	for (i = 0; i < UART_GPIO_MAX; i++)
1794 		if (atmel_port->gpio_irq[i] >= 0)
1795 			free_irq(atmel_port->gpio_irq[i], port);
1796 }
1797 
atmel_request_gpio_irq(struct uart_port * port)1798 static int atmel_request_gpio_irq(struct uart_port *port)
1799 {
1800 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1801 	int *irq = atmel_port->gpio_irq;
1802 	enum mctrl_gpio_idx i;
1803 	int err = 0;
1804 
1805 	for (i = 0; (i < UART_GPIO_MAX) && !err; i++) {
1806 		if (irq[i] < 0)
1807 			continue;
1808 
1809 		irq_set_status_flags(irq[i], IRQ_NOAUTOEN);
1810 		err = request_irq(irq[i], atmel_interrupt, IRQ_TYPE_EDGE_BOTH,
1811 				  "atmel_serial", port);
1812 		if (err)
1813 			dev_err(port->dev, "atmel_startup - Can't get %d irq\n",
1814 				irq[i]);
1815 	}
1816 
1817 	/*
1818 	 * If something went wrong, rollback.
1819 	 */
1820 	while (err && (--i >= 0))
1821 		if (irq[i] >= 0)
1822 			free_irq(irq[i], port);
1823 
1824 	return err;
1825 }
1826 
1827 /*
1828  * Perform initialization and enable port for reception
1829  */
atmel_startup(struct uart_port * port)1830 static int atmel_startup(struct uart_port *port)
1831 {
1832 	struct platform_device *pdev = to_platform_device(port->dev);
1833 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1834 	struct tty_struct *tty = port->state->port.tty;
1835 	int retval;
1836 
1837 	/*
1838 	 * Ensure that no interrupts are enabled otherwise when
1839 	 * request_irq() is called we could get stuck trying to
1840 	 * handle an unexpected interrupt
1841 	 */
1842 	atmel_uart_writel(port, ATMEL_US_IDR, -1);
1843 	atmel_port->ms_irq_enabled = false;
1844 
1845 	/*
1846 	 * Allocate the IRQ
1847 	 */
1848 	retval = request_irq(port->irq, atmel_interrupt,
1849 			IRQF_SHARED | IRQF_COND_SUSPEND,
1850 			tty ? tty->name : "atmel_serial", port);
1851 	if (retval) {
1852 		dev_err(port->dev, "atmel_startup - Can't get irq\n");
1853 		return retval;
1854 	}
1855 
1856 	/*
1857 	 * Get the GPIO lines IRQ
1858 	 */
1859 	retval = atmel_request_gpio_irq(port);
1860 	if (retval)
1861 		goto free_irq;
1862 
1863 	tasklet_enable(&atmel_port->tasklet);
1864 
1865 	/*
1866 	 * Initialize DMA (if necessary)
1867 	 */
1868 	atmel_init_property(atmel_port, pdev);
1869 	atmel_set_ops(port);
1870 
1871 	if (atmel_port->prepare_rx) {
1872 		retval = atmel_port->prepare_rx(port);
1873 		if (retval < 0)
1874 			atmel_set_ops(port);
1875 	}
1876 
1877 	if (atmel_port->prepare_tx) {
1878 		retval = atmel_port->prepare_tx(port);
1879 		if (retval < 0)
1880 			atmel_set_ops(port);
1881 	}
1882 
1883 	/*
1884 	 * Enable FIFO when available
1885 	 */
1886 	if (atmel_port->fifo_size) {
1887 		unsigned int txrdym = ATMEL_US_ONE_DATA;
1888 		unsigned int rxrdym = ATMEL_US_ONE_DATA;
1889 		unsigned int fmr;
1890 
1891 		atmel_uart_writel(port, ATMEL_US_CR,
1892 				  ATMEL_US_FIFOEN |
1893 				  ATMEL_US_RXFCLR |
1894 				  ATMEL_US_TXFLCLR);
1895 
1896 		if (atmel_use_dma_tx(port))
1897 			txrdym = ATMEL_US_FOUR_DATA;
1898 
1899 		fmr = ATMEL_US_TXRDYM(txrdym) | ATMEL_US_RXRDYM(rxrdym);
1900 		if (atmel_port->rts_high &&
1901 		    atmel_port->rts_low)
1902 			fmr |=	ATMEL_US_FRTSC |
1903 				ATMEL_US_RXFTHRES(atmel_port->rts_high) |
1904 				ATMEL_US_RXFTHRES2(atmel_port->rts_low);
1905 
1906 		atmel_uart_writel(port, ATMEL_US_FMR, fmr);
1907 	}
1908 
1909 	/* Save current CSR for comparison in atmel_tasklet_func() */
1910 	atmel_port->irq_status_prev = atmel_get_lines_status(port);
1911 	atmel_port->irq_status = atmel_port->irq_status_prev;
1912 
1913 	/*
1914 	 * Finally, enable the serial port
1915 	 */
1916 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
1917 	/* enable xmit & rcvr */
1918 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
1919 
1920 	setup_timer(&atmel_port->uart_timer,
1921 			atmel_uart_timer_callback,
1922 			(unsigned long)port);
1923 
1924 	if (atmel_use_pdc_rx(port)) {
1925 		/* set UART timeout */
1926 		if (!atmel_port->is_usart) {
1927 			mod_timer(&atmel_port->uart_timer,
1928 					jiffies + uart_poll_timeout(port));
1929 		/* set USART timeout */
1930 		} else {
1931 			atmel_uart_writel(port, ATMEL_US_RTOR, PDC_RX_TIMEOUT);
1932 			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1933 
1934 			atmel_uart_writel(port, ATMEL_US_IER,
1935 					  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
1936 		}
1937 		/* enable PDC controller */
1938 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
1939 	} else if (atmel_use_dma_rx(port)) {
1940 		/* set UART timeout */
1941 		if (!atmel_port->is_usart) {
1942 			mod_timer(&atmel_port->uart_timer,
1943 					jiffies + uart_poll_timeout(port));
1944 		/* set USART timeout */
1945 		} else {
1946 			atmel_uart_writel(port, ATMEL_US_RTOR, PDC_RX_TIMEOUT);
1947 			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1948 
1949 			atmel_uart_writel(port, ATMEL_US_IER,
1950 					  ATMEL_US_TIMEOUT);
1951 		}
1952 	} else {
1953 		/* enable receive only */
1954 		atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY);
1955 	}
1956 
1957 	return 0;
1958 
1959 free_irq:
1960 	free_irq(port->irq, port);
1961 
1962 	return retval;
1963 }
1964 
1965 /*
1966  * Flush any TX data submitted for DMA. Called when the TX circular
1967  * buffer is reset.
1968  */
atmel_flush_buffer(struct uart_port * port)1969 static void atmel_flush_buffer(struct uart_port *port)
1970 {
1971 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1972 
1973 	if (atmel_use_pdc_tx(port)) {
1974 		atmel_uart_writel(port, ATMEL_PDC_TCR, 0);
1975 		atmel_port->pdc_tx.ofs = 0;
1976 	}
1977 }
1978 
1979 /*
1980  * Disable the port
1981  */
atmel_shutdown(struct uart_port * port)1982 static void atmel_shutdown(struct uart_port *port)
1983 {
1984 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1985 
1986 	/*
1987 	 * Prevent any tasklets being scheduled during
1988 	 * cleanup
1989 	 */
1990 	del_timer_sync(&atmel_port->uart_timer);
1991 
1992 	/*
1993 	 * Clear out any scheduled tasklets before
1994 	 * we destroy the buffers
1995 	 */
1996 	tasklet_disable(&atmel_port->tasklet);
1997 	tasklet_kill(&atmel_port->tasklet);
1998 
1999 	/*
2000 	 * Ensure everything is stopped and
2001 	 * disable all interrupts, port and break condition.
2002 	 */
2003 	atmel_stop_rx(port);
2004 	atmel_stop_tx(port);
2005 
2006 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
2007 	atmel_uart_writel(port, ATMEL_US_IDR, -1);
2008 
2009 
2010 	/*
2011 	 * Shut-down the DMA.
2012 	 */
2013 	if (atmel_port->release_rx)
2014 		atmel_port->release_rx(port);
2015 	if (atmel_port->release_tx)
2016 		atmel_port->release_tx(port);
2017 
2018 	/*
2019 	 * Reset ring buffer pointers
2020 	 */
2021 	atmel_port->rx_ring.head = 0;
2022 	atmel_port->rx_ring.tail = 0;
2023 
2024 	/*
2025 	 * Free the interrupts
2026 	 */
2027 	free_irq(port->irq, port);
2028 	atmel_free_gpio_irq(port);
2029 
2030 	atmel_port->ms_irq_enabled = false;
2031 
2032 	atmel_flush_buffer(port);
2033 }
2034 
2035 /*
2036  * Power / Clock management.
2037  */
atmel_serial_pm(struct uart_port * port,unsigned int state,unsigned int oldstate)2038 static void atmel_serial_pm(struct uart_port *port, unsigned int state,
2039 			    unsigned int oldstate)
2040 {
2041 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2042 
2043 	switch (state) {
2044 	case 0:
2045 		/*
2046 		 * Enable the peripheral clock for this serial port.
2047 		 * This is called on uart_open() or a resume event.
2048 		 */
2049 		clk_prepare_enable(atmel_port->clk);
2050 
2051 		/* re-enable interrupts if we disabled some on suspend */
2052 		atmel_uart_writel(port, ATMEL_US_IER, atmel_port->backup_imr);
2053 		break;
2054 	case 3:
2055 		/* Back up the interrupt mask and disable all interrupts */
2056 		atmel_port->backup_imr = atmel_uart_readl(port, ATMEL_US_IMR);
2057 		atmel_uart_writel(port, ATMEL_US_IDR, -1);
2058 
2059 		/*
2060 		 * Disable the peripheral clock for this serial port.
2061 		 * This is called on uart_close() or a suspend event.
2062 		 */
2063 		clk_disable_unprepare(atmel_port->clk);
2064 		break;
2065 	default:
2066 		dev_err(port->dev, "atmel_serial: unknown pm %d\n", state);
2067 	}
2068 }
2069 
2070 /*
2071  * Change the port parameters
2072  */
atmel_set_termios(struct uart_port * port,struct ktermios * termios,struct ktermios * old)2073 static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
2074 			      struct ktermios *old)
2075 {
2076 	unsigned long flags;
2077 	unsigned int old_mode, mode, imr, quot, baud;
2078 
2079 	/* save the current mode register */
2080 	mode = old_mode = atmel_uart_readl(port, ATMEL_US_MR);
2081 
2082 	/* reset the mode, clock divisor, parity, stop bits and data size */
2083 	mode &= ~(ATMEL_US_USCLKS | ATMEL_US_CHRL | ATMEL_US_NBSTOP |
2084 		  ATMEL_US_PAR | ATMEL_US_USMODE);
2085 
2086 	baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
2087 	quot = uart_get_divisor(port, baud);
2088 
2089 	if (quot > 65535) {	/* BRGR is 16-bit, so switch to slower clock */
2090 		quot /= 8;
2091 		mode |= ATMEL_US_USCLKS_MCK_DIV8;
2092 	}
2093 
2094 	/* byte size */
2095 	switch (termios->c_cflag & CSIZE) {
2096 	case CS5:
2097 		mode |= ATMEL_US_CHRL_5;
2098 		break;
2099 	case CS6:
2100 		mode |= ATMEL_US_CHRL_6;
2101 		break;
2102 	case CS7:
2103 		mode |= ATMEL_US_CHRL_7;
2104 		break;
2105 	default:
2106 		mode |= ATMEL_US_CHRL_8;
2107 		break;
2108 	}
2109 
2110 	/* stop bits */
2111 	if (termios->c_cflag & CSTOPB)
2112 		mode |= ATMEL_US_NBSTOP_2;
2113 
2114 	/* parity */
2115 	if (termios->c_cflag & PARENB) {
2116 		/* Mark or Space parity */
2117 		if (termios->c_cflag & CMSPAR) {
2118 			if (termios->c_cflag & PARODD)
2119 				mode |= ATMEL_US_PAR_MARK;
2120 			else
2121 				mode |= ATMEL_US_PAR_SPACE;
2122 		} else if (termios->c_cflag & PARODD)
2123 			mode |= ATMEL_US_PAR_ODD;
2124 		else
2125 			mode |= ATMEL_US_PAR_EVEN;
2126 	} else
2127 		mode |= ATMEL_US_PAR_NONE;
2128 
2129 	spin_lock_irqsave(&port->lock, flags);
2130 
2131 	port->read_status_mask = ATMEL_US_OVRE;
2132 	if (termios->c_iflag & INPCK)
2133 		port->read_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
2134 	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
2135 		port->read_status_mask |= ATMEL_US_RXBRK;
2136 
2137 	if (atmel_use_pdc_rx(port))
2138 		/* need to enable error interrupts */
2139 		atmel_uart_writel(port, ATMEL_US_IER, port->read_status_mask);
2140 
2141 	/*
2142 	 * Characters to ignore
2143 	 */
2144 	port->ignore_status_mask = 0;
2145 	if (termios->c_iflag & IGNPAR)
2146 		port->ignore_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
2147 	if (termios->c_iflag & IGNBRK) {
2148 		port->ignore_status_mask |= ATMEL_US_RXBRK;
2149 		/*
2150 		 * If we're ignoring parity and break indicators,
2151 		 * ignore overruns too (for real raw support).
2152 		 */
2153 		if (termios->c_iflag & IGNPAR)
2154 			port->ignore_status_mask |= ATMEL_US_OVRE;
2155 	}
2156 	/* TODO: Ignore all characters if CREAD is set.*/
2157 
2158 	/* update the per-port timeout */
2159 	uart_update_timeout(port, termios->c_cflag, baud);
2160 
2161 	/*
2162 	 * save/disable interrupts. The tty layer will ensure that the
2163 	 * transmitter is empty if requested by the caller, so there's
2164 	 * no need to wait for it here.
2165 	 */
2166 	imr = atmel_uart_readl(port, ATMEL_US_IMR);
2167 	atmel_uart_writel(port, ATMEL_US_IDR, -1);
2168 
2169 	/* disable receiver and transmitter */
2170 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXDIS);
2171 
2172 	/* mode */
2173 	if (port->rs485.flags & SER_RS485_ENABLED) {
2174 		atmel_uart_writel(port, ATMEL_US_TTGR,
2175 				  port->rs485.delay_rts_after_send);
2176 		mode |= ATMEL_US_USMODE_RS485;
2177 	} else if (termios->c_cflag & CRTSCTS) {
2178 		/* RS232 with hardware handshake (RTS/CTS) */
2179 		if (atmel_use_dma_rx(port) && !atmel_use_fifo(port)) {
2180 			dev_info(port->dev, "not enabling hardware flow control because DMA is used");
2181 			termios->c_cflag &= ~CRTSCTS;
2182 		} else {
2183 			mode |= ATMEL_US_USMODE_HWHS;
2184 		}
2185 	} else {
2186 		/* RS232 without hadware handshake */
2187 		mode |= ATMEL_US_USMODE_NORMAL;
2188 	}
2189 
2190 	/* set the mode, clock divisor, parity, stop bits and data size */
2191 	atmel_uart_writel(port, ATMEL_US_MR, mode);
2192 
2193 	/*
2194 	 * when switching the mode, set the RTS line state according to the
2195 	 * new mode, otherwise keep the former state
2196 	 */
2197 	if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) {
2198 		unsigned int rts_state;
2199 
2200 		if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
2201 			/* let the hardware control the RTS line */
2202 			rts_state = ATMEL_US_RTSDIS;
2203 		} else {
2204 			/* force RTS line to low level */
2205 			rts_state = ATMEL_US_RTSEN;
2206 		}
2207 
2208 		atmel_uart_writel(port, ATMEL_US_CR, rts_state);
2209 	}
2210 
2211 	/* set the baud rate */
2212 	atmel_uart_writel(port, ATMEL_US_BRGR, quot);
2213 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
2214 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
2215 
2216 	/* restore interrupts */
2217 	atmel_uart_writel(port, ATMEL_US_IER, imr);
2218 
2219 	/* CTS flow-control and modem-status interrupts */
2220 	if (UART_ENABLE_MS(port, termios->c_cflag))
2221 		atmel_enable_ms(port);
2222 	else
2223 		atmel_disable_ms(port);
2224 
2225 	spin_unlock_irqrestore(&port->lock, flags);
2226 }
2227 
atmel_set_ldisc(struct uart_port * port,struct ktermios * termios)2228 static void atmel_set_ldisc(struct uart_port *port, struct ktermios *termios)
2229 {
2230 	if (termios->c_line == N_PPS) {
2231 		port->flags |= UPF_HARDPPS_CD;
2232 		spin_lock_irq(&port->lock);
2233 		atmel_enable_ms(port);
2234 		spin_unlock_irq(&port->lock);
2235 	} else {
2236 		port->flags &= ~UPF_HARDPPS_CD;
2237 		if (!UART_ENABLE_MS(port, termios->c_cflag)) {
2238 			spin_lock_irq(&port->lock);
2239 			atmel_disable_ms(port);
2240 			spin_unlock_irq(&port->lock);
2241 		}
2242 	}
2243 }
2244 
2245 /*
2246  * Return string describing the specified port
2247  */
atmel_type(struct uart_port * port)2248 static const char *atmel_type(struct uart_port *port)
2249 {
2250 	return (port->type == PORT_ATMEL) ? "ATMEL_SERIAL" : NULL;
2251 }
2252 
2253 /*
2254  * Release the memory region(s) being used by 'port'.
2255  */
atmel_release_port(struct uart_port * port)2256 static void atmel_release_port(struct uart_port *port)
2257 {
2258 	struct platform_device *pdev = to_platform_device(port->dev);
2259 	int size = pdev->resource[0].end - pdev->resource[0].start + 1;
2260 
2261 	release_mem_region(port->mapbase, size);
2262 
2263 	if (port->flags & UPF_IOREMAP) {
2264 		iounmap(port->membase);
2265 		port->membase = NULL;
2266 	}
2267 }
2268 
2269 /*
2270  * Request the memory region(s) being used by 'port'.
2271  */
atmel_request_port(struct uart_port * port)2272 static int atmel_request_port(struct uart_port *port)
2273 {
2274 	struct platform_device *pdev = to_platform_device(port->dev);
2275 	int size = pdev->resource[0].end - pdev->resource[0].start + 1;
2276 
2277 	if (!request_mem_region(port->mapbase, size, "atmel_serial"))
2278 		return -EBUSY;
2279 
2280 	if (port->flags & UPF_IOREMAP) {
2281 		port->membase = ioremap(port->mapbase, size);
2282 		if (port->membase == NULL) {
2283 			release_mem_region(port->mapbase, size);
2284 			return -ENOMEM;
2285 		}
2286 	}
2287 
2288 	return 0;
2289 }
2290 
2291 /*
2292  * Configure/autoconfigure the port.
2293  */
atmel_config_port(struct uart_port * port,int flags)2294 static void atmel_config_port(struct uart_port *port, int flags)
2295 {
2296 	if (flags & UART_CONFIG_TYPE) {
2297 		port->type = PORT_ATMEL;
2298 		atmel_request_port(port);
2299 	}
2300 }
2301 
2302 /*
2303  * Verify the new serial_struct (for TIOCSSERIAL).
2304  */
atmel_verify_port(struct uart_port * port,struct serial_struct * ser)2305 static int atmel_verify_port(struct uart_port *port, struct serial_struct *ser)
2306 {
2307 	int ret = 0;
2308 	if (ser->type != PORT_UNKNOWN && ser->type != PORT_ATMEL)
2309 		ret = -EINVAL;
2310 	if (port->irq != ser->irq)
2311 		ret = -EINVAL;
2312 	if (ser->io_type != SERIAL_IO_MEM)
2313 		ret = -EINVAL;
2314 	if (port->uartclk / 16 != ser->baud_base)
2315 		ret = -EINVAL;
2316 	if (port->mapbase != (unsigned long)ser->iomem_base)
2317 		ret = -EINVAL;
2318 	if (port->iobase != ser->port)
2319 		ret = -EINVAL;
2320 	if (ser->hub6 != 0)
2321 		ret = -EINVAL;
2322 	return ret;
2323 }
2324 
2325 #ifdef CONFIG_CONSOLE_POLL
atmel_poll_get_char(struct uart_port * port)2326 static int atmel_poll_get_char(struct uart_port *port)
2327 {
2328 	while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_RXRDY))
2329 		cpu_relax();
2330 
2331 	return atmel_uart_read_char(port);
2332 }
2333 
atmel_poll_put_char(struct uart_port * port,unsigned char ch)2334 static void atmel_poll_put_char(struct uart_port *port, unsigned char ch)
2335 {
2336 	while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY))
2337 		cpu_relax();
2338 
2339 	atmel_uart_write_char(port, ch);
2340 }
2341 #endif
2342 
2343 static struct uart_ops atmel_pops = {
2344 	.tx_empty	= atmel_tx_empty,
2345 	.set_mctrl	= atmel_set_mctrl,
2346 	.get_mctrl	= atmel_get_mctrl,
2347 	.stop_tx	= atmel_stop_tx,
2348 	.start_tx	= atmel_start_tx,
2349 	.stop_rx	= atmel_stop_rx,
2350 	.enable_ms	= atmel_enable_ms,
2351 	.break_ctl	= atmel_break_ctl,
2352 	.startup	= atmel_startup,
2353 	.shutdown	= atmel_shutdown,
2354 	.flush_buffer	= atmel_flush_buffer,
2355 	.set_termios	= atmel_set_termios,
2356 	.set_ldisc	= atmel_set_ldisc,
2357 	.type		= atmel_type,
2358 	.release_port	= atmel_release_port,
2359 	.request_port	= atmel_request_port,
2360 	.config_port	= atmel_config_port,
2361 	.verify_port	= atmel_verify_port,
2362 	.pm		= atmel_serial_pm,
2363 #ifdef CONFIG_CONSOLE_POLL
2364 	.poll_get_char	= atmel_poll_get_char,
2365 	.poll_put_char	= atmel_poll_put_char,
2366 #endif
2367 };
2368 
2369 /*
2370  * Configure the port from the platform device resource info.
2371  */
atmel_init_port(struct atmel_uart_port * atmel_port,struct platform_device * pdev)2372 static int atmel_init_port(struct atmel_uart_port *atmel_port,
2373 				      struct platform_device *pdev)
2374 {
2375 	int ret;
2376 	struct uart_port *port = &atmel_port->uart;
2377 	struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev);
2378 
2379 	atmel_init_property(atmel_port, pdev);
2380 	atmel_set_ops(port);
2381 
2382 	atmel_init_rs485(port, pdev);
2383 
2384 	port->iotype		= UPIO_MEM;
2385 	port->flags		= UPF_BOOT_AUTOCONF;
2386 	port->ops		= &atmel_pops;
2387 	port->fifosize		= 1;
2388 	port->dev		= &pdev->dev;
2389 	port->mapbase	= pdev->resource[0].start;
2390 	port->irq	= pdev->resource[1].start;
2391 	port->rs485_config	= atmel_config_rs485;
2392 
2393 	tasklet_init(&atmel_port->tasklet, atmel_tasklet_func,
2394 			(unsigned long)port);
2395 	tasklet_disable(&atmel_port->tasklet);
2396 
2397 	memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring));
2398 
2399 	if (pdata && pdata->regs) {
2400 		/* Already mapped by setup code */
2401 		port->membase = pdata->regs;
2402 	} else {
2403 		port->flags	|= UPF_IOREMAP;
2404 		port->membase	= NULL;
2405 	}
2406 
2407 	/* for console, the clock could already be configured */
2408 	if (!atmel_port->clk) {
2409 		atmel_port->clk = clk_get(&pdev->dev, "usart");
2410 		if (IS_ERR(atmel_port->clk)) {
2411 			ret = PTR_ERR(atmel_port->clk);
2412 			atmel_port->clk = NULL;
2413 			return ret;
2414 		}
2415 		ret = clk_prepare_enable(atmel_port->clk);
2416 		if (ret) {
2417 			clk_put(atmel_port->clk);
2418 			atmel_port->clk = NULL;
2419 			return ret;
2420 		}
2421 		port->uartclk = clk_get_rate(atmel_port->clk);
2422 		clk_disable_unprepare(atmel_port->clk);
2423 		/* only enable clock when USART is in use */
2424 	}
2425 
2426 	/* Use TXEMPTY for interrupt when rs485 else TXRDY or ENDTX|TXBUFE */
2427 	if (port->rs485.flags & SER_RS485_ENABLED)
2428 		atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
2429 	else if (atmel_use_pdc_tx(port)) {
2430 		port->fifosize = PDC_BUFFER_SIZE;
2431 		atmel_port->tx_done_mask = ATMEL_US_ENDTX | ATMEL_US_TXBUFE;
2432 	} else {
2433 		atmel_port->tx_done_mask = ATMEL_US_TXRDY;
2434 	}
2435 
2436 	return 0;
2437 }
2438 
2439 struct platform_device *atmel_default_console_device;	/* the serial console device */
2440 
2441 #ifdef CONFIG_SERIAL_ATMEL_CONSOLE
atmel_console_putchar(struct uart_port * port,int ch)2442 static void atmel_console_putchar(struct uart_port *port, int ch)
2443 {
2444 	while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY))
2445 		cpu_relax();
2446 	atmel_uart_write_char(port, ch);
2447 }
2448 
2449 /*
2450  * Interrupts are disabled on entering
2451  */
atmel_console_write(struct console * co,const char * s,u_int count)2452 static void atmel_console_write(struct console *co, const char *s, u_int count)
2453 {
2454 	struct uart_port *port = &atmel_ports[co->index].uart;
2455 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2456 	unsigned int status, imr;
2457 	unsigned int pdc_tx;
2458 
2459 	/*
2460 	 * First, save IMR and then disable interrupts
2461 	 */
2462 	imr = atmel_uart_readl(port, ATMEL_US_IMR);
2463 	atmel_uart_writel(port, ATMEL_US_IDR,
2464 			  ATMEL_US_RXRDY | atmel_port->tx_done_mask);
2465 
2466 	/* Store PDC transmit status and disable it */
2467 	pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN;
2468 	atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
2469 
2470 	uart_console_write(port, s, count, atmel_console_putchar);
2471 
2472 	/*
2473 	 * Finally, wait for transmitter to become empty
2474 	 * and restore IMR
2475 	 */
2476 	do {
2477 		status = atmel_uart_readl(port, ATMEL_US_CSR);
2478 	} while (!(status & ATMEL_US_TXRDY));
2479 
2480 	/* Restore PDC transmit status */
2481 	if (pdc_tx)
2482 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
2483 
2484 	/* set interrupts back the way they were */
2485 	atmel_uart_writel(port, ATMEL_US_IER, imr);
2486 }
2487 
2488 /*
2489  * If the port was already initialised (eg, by a boot loader),
2490  * try to determine the current setup.
2491  */
atmel_console_get_options(struct uart_port * port,int * baud,int * parity,int * bits)2492 static void __init atmel_console_get_options(struct uart_port *port, int *baud,
2493 					     int *parity, int *bits)
2494 {
2495 	unsigned int mr, quot;
2496 
2497 	/*
2498 	 * If the baud rate generator isn't running, the port wasn't
2499 	 * initialized by the boot loader.
2500 	 */
2501 	quot = atmel_uart_readl(port, ATMEL_US_BRGR) & ATMEL_US_CD;
2502 	if (!quot)
2503 		return;
2504 
2505 	mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_CHRL;
2506 	if (mr == ATMEL_US_CHRL_8)
2507 		*bits = 8;
2508 	else
2509 		*bits = 7;
2510 
2511 	mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_PAR;
2512 	if (mr == ATMEL_US_PAR_EVEN)
2513 		*parity = 'e';
2514 	else if (mr == ATMEL_US_PAR_ODD)
2515 		*parity = 'o';
2516 
2517 	/*
2518 	 * The serial core only rounds down when matching this to a
2519 	 * supported baud rate. Make sure we don't end up slightly
2520 	 * lower than one of those, as it would make us fall through
2521 	 * to a much lower baud rate than we really want.
2522 	 */
2523 	*baud = port->uartclk / (16 * (quot - 1));
2524 }
2525 
atmel_console_setup(struct console * co,char * options)2526 static int __init atmel_console_setup(struct console *co, char *options)
2527 {
2528 	int ret;
2529 	struct uart_port *port = &atmel_ports[co->index].uart;
2530 	int baud = 115200;
2531 	int bits = 8;
2532 	int parity = 'n';
2533 	int flow = 'n';
2534 
2535 	if (port->membase == NULL) {
2536 		/* Port not initialized yet - delay setup */
2537 		return -ENODEV;
2538 	}
2539 
2540 	ret = clk_prepare_enable(atmel_ports[co->index].clk);
2541 	if (ret)
2542 		return ret;
2543 
2544 	atmel_uart_writel(port, ATMEL_US_IDR, -1);
2545 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
2546 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
2547 
2548 	if (options)
2549 		uart_parse_options(options, &baud, &parity, &bits, &flow);
2550 	else
2551 		atmel_console_get_options(port, &baud, &parity, &bits);
2552 
2553 	return uart_set_options(port, co, baud, parity, bits, flow);
2554 }
2555 
2556 static struct uart_driver atmel_uart;
2557 
2558 static struct console atmel_console = {
2559 	.name		= ATMEL_DEVICENAME,
2560 	.write		= atmel_console_write,
2561 	.device		= uart_console_device,
2562 	.setup		= atmel_console_setup,
2563 	.flags		= CON_PRINTBUFFER,
2564 	.index		= -1,
2565 	.data		= &atmel_uart,
2566 };
2567 
2568 #define ATMEL_CONSOLE_DEVICE	(&atmel_console)
2569 
2570 /*
2571  * Early console initialization (before VM subsystem initialized).
2572  */
atmel_console_init(void)2573 static int __init atmel_console_init(void)
2574 {
2575 	int ret;
2576 	if (atmel_default_console_device) {
2577 		struct atmel_uart_data *pdata =
2578 			dev_get_platdata(&atmel_default_console_device->dev);
2579 		int id = pdata->num;
2580 		struct atmel_uart_port *port = &atmel_ports[id];
2581 
2582 		port->backup_imr = 0;
2583 		port->uart.line = id;
2584 
2585 		add_preferred_console(ATMEL_DEVICENAME, id, NULL);
2586 		ret = atmel_init_port(port, atmel_default_console_device);
2587 		if (ret)
2588 			return ret;
2589 		register_console(&atmel_console);
2590 	}
2591 
2592 	return 0;
2593 }
2594 
2595 console_initcall(atmel_console_init);
2596 
2597 /*
2598  * Late console initialization.
2599  */
atmel_late_console_init(void)2600 static int __init atmel_late_console_init(void)
2601 {
2602 	if (atmel_default_console_device
2603 	    && !(atmel_console.flags & CON_ENABLED))
2604 		register_console(&atmel_console);
2605 
2606 	return 0;
2607 }
2608 
2609 core_initcall(atmel_late_console_init);
2610 
atmel_is_console_port(struct uart_port * port)2611 static inline bool atmel_is_console_port(struct uart_port *port)
2612 {
2613 	return port->cons && port->cons->index == port->line;
2614 }
2615 
2616 #else
2617 #define ATMEL_CONSOLE_DEVICE	NULL
2618 
atmel_is_console_port(struct uart_port * port)2619 static inline bool atmel_is_console_port(struct uart_port *port)
2620 {
2621 	return false;
2622 }
2623 #endif
2624 
2625 static struct uart_driver atmel_uart = {
2626 	.owner		= THIS_MODULE,
2627 	.driver_name	= "atmel_serial",
2628 	.dev_name	= ATMEL_DEVICENAME,
2629 	.major		= SERIAL_ATMEL_MAJOR,
2630 	.minor		= MINOR_START,
2631 	.nr		= ATMEL_MAX_UART,
2632 	.cons		= ATMEL_CONSOLE_DEVICE,
2633 };
2634 
2635 #ifdef CONFIG_PM
atmel_serial_clk_will_stop(void)2636 static bool atmel_serial_clk_will_stop(void)
2637 {
2638 #ifdef CONFIG_ARCH_AT91
2639 	return at91_suspend_entering_slow_clock();
2640 #else
2641 	return false;
2642 #endif
2643 }
2644 
atmel_serial_suspend(struct platform_device * pdev,pm_message_t state)2645 static int atmel_serial_suspend(struct platform_device *pdev,
2646 				pm_message_t state)
2647 {
2648 	struct uart_port *port = platform_get_drvdata(pdev);
2649 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2650 
2651 	if (atmel_is_console_port(port) && console_suspend_enabled) {
2652 		/* Drain the TX shifter */
2653 		while (!(atmel_uart_readl(port, ATMEL_US_CSR) &
2654 			 ATMEL_US_TXEMPTY))
2655 			cpu_relax();
2656 	}
2657 
2658 	/* we can not wake up if we're running on slow clock */
2659 	atmel_port->may_wakeup = device_may_wakeup(&pdev->dev);
2660 	if (atmel_serial_clk_will_stop()) {
2661 		unsigned long flags;
2662 
2663 		spin_lock_irqsave(&atmel_port->lock_suspended, flags);
2664 		atmel_port->suspended = true;
2665 		spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
2666 		device_set_wakeup_enable(&pdev->dev, 0);
2667 	}
2668 
2669 	uart_suspend_port(&atmel_uart, port);
2670 
2671 	return 0;
2672 }
2673 
atmel_serial_resume(struct platform_device * pdev)2674 static int atmel_serial_resume(struct platform_device *pdev)
2675 {
2676 	struct uart_port *port = platform_get_drvdata(pdev);
2677 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2678 	unsigned long flags;
2679 
2680 	spin_lock_irqsave(&atmel_port->lock_suspended, flags);
2681 	if (atmel_port->pending) {
2682 		atmel_handle_receive(port, atmel_port->pending);
2683 		atmel_handle_status(port, atmel_port->pending,
2684 				    atmel_port->pending_status);
2685 		atmel_handle_transmit(port, atmel_port->pending);
2686 		atmel_port->pending = 0;
2687 	}
2688 	atmel_port->suspended = false;
2689 	spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
2690 
2691 	uart_resume_port(&atmel_uart, port);
2692 	device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup);
2693 
2694 	return 0;
2695 }
2696 #else
2697 #define atmel_serial_suspend NULL
2698 #define atmel_serial_resume NULL
2699 #endif
2700 
atmel_init_gpios(struct atmel_uart_port * p,struct device * dev)2701 static int atmel_init_gpios(struct atmel_uart_port *p, struct device *dev)
2702 {
2703 	enum mctrl_gpio_idx i;
2704 	struct gpio_desc *gpiod;
2705 
2706 	p->gpios = mctrl_gpio_init_noauto(dev, 0);
2707 	if (IS_ERR(p->gpios))
2708 		return PTR_ERR(p->gpios);
2709 
2710 	for (i = 0; i < UART_GPIO_MAX; i++) {
2711 		gpiod = mctrl_gpio_to_gpiod(p->gpios, i);
2712 		if (gpiod && (gpiod_get_direction(gpiod) == GPIOF_DIR_IN))
2713 			p->gpio_irq[i] = gpiod_to_irq(gpiod);
2714 		else
2715 			p->gpio_irq[i] = -EINVAL;
2716 	}
2717 
2718 	return 0;
2719 }
2720 
atmel_serial_probe_fifos(struct atmel_uart_port * port,struct platform_device * pdev)2721 static void atmel_serial_probe_fifos(struct atmel_uart_port *port,
2722 				     struct platform_device *pdev)
2723 {
2724 	port->fifo_size = 0;
2725 	port->rts_low = 0;
2726 	port->rts_high = 0;
2727 
2728 	if (of_property_read_u32(pdev->dev.of_node,
2729 				 "atmel,fifo-size",
2730 				 &port->fifo_size))
2731 		return;
2732 
2733 	if (!port->fifo_size)
2734 		return;
2735 
2736 	if (port->fifo_size < ATMEL_MIN_FIFO_SIZE) {
2737 		port->fifo_size = 0;
2738 		dev_err(&pdev->dev, "Invalid FIFO size\n");
2739 		return;
2740 	}
2741 
2742 	/*
2743 	 * 0 <= rts_low <= rts_high <= fifo_size
2744 	 * Once their CTS line asserted by the remote peer, some x86 UARTs tend
2745 	 * to flush their internal TX FIFO, commonly up to 16 data, before
2746 	 * actually stopping to send new data. So we try to set the RTS High
2747 	 * Threshold to a reasonably high value respecting this 16 data
2748 	 * empirical rule when possible.
2749 	 */
2750 	port->rts_high = max_t(int, port->fifo_size >> 1,
2751 			       port->fifo_size - ATMEL_RTS_HIGH_OFFSET);
2752 	port->rts_low  = max_t(int, port->fifo_size >> 2,
2753 			       port->fifo_size - ATMEL_RTS_LOW_OFFSET);
2754 
2755 	dev_info(&pdev->dev, "Using FIFO (%u data)\n",
2756 		 port->fifo_size);
2757 	dev_dbg(&pdev->dev, "RTS High Threshold : %2u data\n",
2758 		port->rts_high);
2759 	dev_dbg(&pdev->dev, "RTS Low Threshold  : %2u data\n",
2760 		port->rts_low);
2761 }
2762 
atmel_serial_probe(struct platform_device * pdev)2763 static int atmel_serial_probe(struct platform_device *pdev)
2764 {
2765 	struct atmel_uart_port *port;
2766 	struct device_node *np = pdev->dev.of_node;
2767 	struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev);
2768 	void *data;
2769 	int ret = -ENODEV;
2770 	bool rs485_enabled;
2771 
2772 	BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1));
2773 
2774 	if (np)
2775 		ret = of_alias_get_id(np, "serial");
2776 	else
2777 		if (pdata)
2778 			ret = pdata->num;
2779 
2780 	if (ret < 0)
2781 		/* port id not found in platform data nor device-tree aliases:
2782 		 * auto-enumerate it */
2783 		ret = find_first_zero_bit(atmel_ports_in_use, ATMEL_MAX_UART);
2784 
2785 	if (ret >= ATMEL_MAX_UART) {
2786 		ret = -ENODEV;
2787 		goto err;
2788 	}
2789 
2790 	if (test_and_set_bit(ret, atmel_ports_in_use)) {
2791 		/* port already in use */
2792 		ret = -EBUSY;
2793 		goto err;
2794 	}
2795 
2796 	port = &atmel_ports[ret];
2797 	port->backup_imr = 0;
2798 	port->uart.line = ret;
2799 	atmel_serial_probe_fifos(port, pdev);
2800 
2801 	spin_lock_init(&port->lock_suspended);
2802 
2803 	ret = atmel_init_gpios(port, &pdev->dev);
2804 	if (ret < 0) {
2805 		dev_err(&pdev->dev, "Failed to initialize GPIOs.");
2806 		goto err_clear_bit;
2807 	}
2808 
2809 	ret = atmel_init_port(port, pdev);
2810 	if (ret)
2811 		goto err_clear_bit;
2812 
2813 	if (!atmel_use_pdc_rx(&port->uart)) {
2814 		ret = -ENOMEM;
2815 		data = kmalloc(sizeof(struct atmel_uart_char)
2816 				* ATMEL_SERIAL_RINGSIZE, GFP_KERNEL);
2817 		if (!data)
2818 			goto err_alloc_ring;
2819 		port->rx_ring.buf = data;
2820 	}
2821 
2822 	rs485_enabled = port->uart.rs485.flags & SER_RS485_ENABLED;
2823 
2824 	ret = uart_add_one_port(&atmel_uart, &port->uart);
2825 	if (ret)
2826 		goto err_add_port;
2827 
2828 #ifdef CONFIG_SERIAL_ATMEL_CONSOLE
2829 	if (atmel_is_console_port(&port->uart)
2830 			&& ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) {
2831 		/*
2832 		 * The serial core enabled the clock for us, so undo
2833 		 * the clk_prepare_enable() in atmel_console_setup()
2834 		 */
2835 		clk_disable_unprepare(port->clk);
2836 	}
2837 #endif
2838 
2839 	device_init_wakeup(&pdev->dev, 1);
2840 	platform_set_drvdata(pdev, port);
2841 
2842 	/*
2843 	 * The peripheral clock has been disabled by atmel_init_port():
2844 	 * enable it before accessing I/O registers
2845 	 */
2846 	clk_prepare_enable(port->clk);
2847 
2848 	if (rs485_enabled) {
2849 		atmel_uart_writel(&port->uart, ATMEL_US_MR,
2850 				  ATMEL_US_USMODE_NORMAL);
2851 		atmel_uart_writel(&port->uart, ATMEL_US_CR, ATMEL_US_RTSEN);
2852 	}
2853 
2854 	/*
2855 	 * Get port name of usart or uart
2856 	 */
2857 	atmel_get_ip_name(&port->uart);
2858 
2859 	/*
2860 	 * The peripheral clock can now safely be disabled till the port
2861 	 * is used
2862 	 */
2863 	clk_disable_unprepare(port->clk);
2864 
2865 	return 0;
2866 
2867 err_add_port:
2868 	kfree(port->rx_ring.buf);
2869 	port->rx_ring.buf = NULL;
2870 err_alloc_ring:
2871 	if (!atmel_is_console_port(&port->uart)) {
2872 		clk_put(port->clk);
2873 		port->clk = NULL;
2874 	}
2875 err_clear_bit:
2876 	clear_bit(port->uart.line, atmel_ports_in_use);
2877 err:
2878 	return ret;
2879 }
2880 
atmel_serial_remove(struct platform_device * pdev)2881 static int atmel_serial_remove(struct platform_device *pdev)
2882 {
2883 	struct uart_port *port = platform_get_drvdata(pdev);
2884 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2885 	int ret = 0;
2886 
2887 	tasklet_kill(&atmel_port->tasklet);
2888 
2889 	device_init_wakeup(&pdev->dev, 0);
2890 
2891 	ret = uart_remove_one_port(&atmel_uart, port);
2892 
2893 	kfree(atmel_port->rx_ring.buf);
2894 
2895 	/* "port" is allocated statically, so we shouldn't free it */
2896 
2897 	clear_bit(port->line, atmel_ports_in_use);
2898 
2899 	clk_put(atmel_port->clk);
2900 
2901 	return ret;
2902 }
2903 
2904 static struct platform_driver atmel_serial_driver = {
2905 	.probe		= atmel_serial_probe,
2906 	.remove		= atmel_serial_remove,
2907 	.suspend	= atmel_serial_suspend,
2908 	.resume		= atmel_serial_resume,
2909 	.driver		= {
2910 		.name	= "atmel_usart",
2911 		.of_match_table	= of_match_ptr(atmel_serial_dt_ids),
2912 	},
2913 };
2914 
atmel_serial_init(void)2915 static int __init atmel_serial_init(void)
2916 {
2917 	int ret;
2918 
2919 	ret = uart_register_driver(&atmel_uart);
2920 	if (ret)
2921 		return ret;
2922 
2923 	ret = platform_driver_register(&atmel_serial_driver);
2924 	if (ret)
2925 		uart_unregister_driver(&atmel_uart);
2926 
2927 	return ret;
2928 }
2929 
atmel_serial_exit(void)2930 static void __exit atmel_serial_exit(void)
2931 {
2932 	platform_driver_unregister(&atmel_serial_driver);
2933 	uart_unregister_driver(&atmel_uart);
2934 }
2935 
2936 module_init(atmel_serial_init);
2937 module_exit(atmel_serial_exit);
2938 
2939 MODULE_AUTHOR("Rick Bronson");
2940 MODULE_DESCRIPTION("Atmel AT91 / AT32 serial port driver");
2941 MODULE_LICENSE("GPL");
2942 MODULE_ALIAS("platform:atmel_usart");
2943