1/*
2 *  i2c Support for Atmel's AT91 Two-Wire Interface (TWI)
3 *
4 *  Copyright (C) 2011 Weinmann Medical GmbH
5 *  Author: Nikolaus Voss <n.voss@weinmann.de>
6 *
7 *  Evolved from original work by:
8 *  Copyright (C) 2004 Rick Bronson
9 *  Converted to 2.6 by Andrew Victor <andrew@sanpeople.com>
10 *
11 *  Borrowed heavily from original work by:
12 *  Copyright (C) 2000 Philip Edelbrock <phil@stimpy.netroedge.com>
13 *
14 *  This program is free software; you can redistribute it and/or modify
15 *  it under the terms of the GNU General Public License as published by
16 *  the Free Software Foundation; either version 2 of the License, or
17 *  (at your option) any later version.
18 */
19
20#include <linux/clk.h>
21#include <linux/completion.h>
22#include <linux/dma-mapping.h>
23#include <linux/dmaengine.h>
24#include <linux/err.h>
25#include <linux/i2c.h>
26#include <linux/interrupt.h>
27#include <linux/io.h>
28#include <linux/module.h>
29#include <linux/of.h>
30#include <linux/of_device.h>
31#include <linux/platform_device.h>
32#include <linux/slab.h>
33#include <linux/platform_data/dma-atmel.h>
34#include <linux/pm_runtime.h>
35#include <linux/pinctrl/consumer.h>
36
37#define DEFAULT_TWI_CLK_HZ		100000		/* max 400 Kbits/s */
38#define AT91_I2C_TIMEOUT	msecs_to_jiffies(100)	/* transfer timeout */
39#define AT91_I2C_DMA_THRESHOLD	8			/* enable DMA if transfer size is bigger than this threshold */
40#define AUTOSUSPEND_TIMEOUT		2000
41
42/* AT91 TWI register definitions */
43#define	AT91_TWI_CR		0x0000	/* Control Register */
44#define	AT91_TWI_START		BIT(0)	/* Send a Start Condition */
45#define	AT91_TWI_STOP		BIT(1)	/* Send a Stop Condition */
46#define	AT91_TWI_MSEN		BIT(2)	/* Master Transfer Enable */
47#define	AT91_TWI_MSDIS		BIT(3)	/* Master Transfer Disable */
48#define	AT91_TWI_SVEN		BIT(4)	/* Slave Transfer Enable */
49#define	AT91_TWI_SVDIS		BIT(5)	/* Slave Transfer Disable */
50#define	AT91_TWI_QUICK		BIT(6)	/* SMBus quick command */
51#define	AT91_TWI_SWRST		BIT(7)	/* Software Reset */
52#define	AT91_TWI_ACMEN		BIT(16) /* Alternative Command Mode Enable */
53#define	AT91_TWI_ACMDIS		BIT(17) /* Alternative Command Mode Disable */
54#define	AT91_TWI_THRCLR		BIT(24) /* Transmit Holding Register Clear */
55#define	AT91_TWI_RHRCLR		BIT(25) /* Receive Holding Register Clear */
56#define	AT91_TWI_LOCKCLR	BIT(26) /* Lock Clear */
57#define	AT91_TWI_FIFOEN		BIT(28) /* FIFO Enable */
58#define	AT91_TWI_FIFODIS	BIT(29) /* FIFO Disable */
59
60#define	AT91_TWI_MMR		0x0004	/* Master Mode Register */
61#define	AT91_TWI_IADRSZ_1	0x0100	/* Internal Device Address Size */
62#define	AT91_TWI_MREAD		BIT(12)	/* Master Read Direction */
63
64#define	AT91_TWI_IADR		0x000c	/* Internal Address Register */
65
66#define	AT91_TWI_CWGR		0x0010	/* Clock Waveform Generator Reg */
67
68#define	AT91_TWI_SR		0x0020	/* Status Register */
69#define	AT91_TWI_TXCOMP		BIT(0)	/* Transmission Complete */
70#define	AT91_TWI_RXRDY		BIT(1)	/* Receive Holding Register Ready */
71#define	AT91_TWI_TXRDY		BIT(2)	/* Transmit Holding Register Ready */
72#define	AT91_TWI_OVRE		BIT(6)	/* Overrun Error */
73#define	AT91_TWI_UNRE		BIT(7)	/* Underrun Error */
74#define	AT91_TWI_NACK		BIT(8)	/* Not Acknowledged */
75#define	AT91_TWI_LOCK		BIT(23) /* TWI Lock due to Frame Errors */
76
77#define	AT91_TWI_INT_MASK \
78	(AT91_TWI_TXCOMP | AT91_TWI_RXRDY | AT91_TWI_TXRDY | AT91_TWI_NACK)
79
80#define	AT91_TWI_IER		0x0024	/* Interrupt Enable Register */
81#define	AT91_TWI_IDR		0x0028	/* Interrupt Disable Register */
82#define	AT91_TWI_IMR		0x002c	/* Interrupt Mask Register */
83#define	AT91_TWI_RHR		0x0030	/* Receive Holding Register */
84#define	AT91_TWI_THR		0x0034	/* Transmit Holding Register */
85
86#define	AT91_TWI_ACR		0x0040	/* Alternative Command Register */
87#define	AT91_TWI_ACR_DATAL(len)	((len) & 0xff)
88#define	AT91_TWI_ACR_DIR	BIT(8)
89
90#define	AT91_TWI_FMR		0x0050	/* FIFO Mode Register */
91#define	AT91_TWI_FMR_TXRDYM(mode)	(((mode) & 0x3) << 0)
92#define	AT91_TWI_FMR_TXRDYM_MASK	(0x3 << 0)
93#define	AT91_TWI_FMR_RXRDYM(mode)	(((mode) & 0x3) << 4)
94#define	AT91_TWI_FMR_RXRDYM_MASK	(0x3 << 4)
95#define	AT91_TWI_ONE_DATA	0x0
96#define	AT91_TWI_TWO_DATA	0x1
97#define	AT91_TWI_FOUR_DATA	0x2
98
99#define	AT91_TWI_FLR		0x0054	/* FIFO Level Register */
100
101#define	AT91_TWI_FSR		0x0060	/* FIFO Status Register */
102#define	AT91_TWI_FIER		0x0064	/* FIFO Interrupt Enable Register */
103#define	AT91_TWI_FIDR		0x0068	/* FIFO Interrupt Disable Register */
104#define	AT91_TWI_FIMR		0x006c	/* FIFO Interrupt Mask Register */
105
106#define	AT91_TWI_VER		0x00fc	/* Version Register */
107
108struct at91_twi_pdata {
109	unsigned clk_max_div;
110	unsigned clk_offset;
111	bool has_unre_flag;
112	bool has_alt_cmd;
113	struct at_dma_slave dma_slave;
114};
115
116struct at91_twi_dma {
117	struct dma_chan *chan_rx;
118	struct dma_chan *chan_tx;
119	struct scatterlist sg[2];
120	struct dma_async_tx_descriptor *data_desc;
121	enum dma_data_direction direction;
122	bool buf_mapped;
123	bool xfer_in_progress;
124};
125
126struct at91_twi_dev {
127	struct device *dev;
128	void __iomem *base;
129	struct completion cmd_complete;
130	struct clk *clk;
131	u8 *buf;
132	size_t buf_len;
133	struct i2c_msg *msg;
134	int irq;
135	unsigned imr;
136	unsigned transfer_status;
137	struct i2c_adapter adapter;
138	unsigned twi_cwgr_reg;
139	struct at91_twi_pdata *pdata;
140	bool use_dma;
141	bool recv_len_abort;
142	u32 fifo_size;
143	struct at91_twi_dma dma;
144};
145
146static unsigned at91_twi_read(struct at91_twi_dev *dev, unsigned reg)
147{
148	return readl_relaxed(dev->base + reg);
149}
150
151static void at91_twi_write(struct at91_twi_dev *dev, unsigned reg, unsigned val)
152{
153	writel_relaxed(val, dev->base + reg);
154}
155
156static void at91_disable_twi_interrupts(struct at91_twi_dev *dev)
157{
158	at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_INT_MASK);
159}
160
161static void at91_twi_irq_save(struct at91_twi_dev *dev)
162{
163	dev->imr = at91_twi_read(dev, AT91_TWI_IMR) & AT91_TWI_INT_MASK;
164	at91_disable_twi_interrupts(dev);
165}
166
167static void at91_twi_irq_restore(struct at91_twi_dev *dev)
168{
169	at91_twi_write(dev, AT91_TWI_IER, dev->imr);
170}
171
172static void at91_init_twi_bus(struct at91_twi_dev *dev)
173{
174	at91_disable_twi_interrupts(dev);
175	at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_SWRST);
176	/* FIFO should be enabled immediately after the software reset */
177	if (dev->fifo_size)
178		at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_FIFOEN);
179	at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_MSEN);
180	at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_SVDIS);
181	at91_twi_write(dev, AT91_TWI_CWGR, dev->twi_cwgr_reg);
182}
183
184/*
185 * Calculate symmetric clock as stated in datasheet:
186 * twi_clk = F_MAIN / (2 * (cdiv * (1 << ckdiv) + offset))
187 */
188static void at91_calc_twi_clock(struct at91_twi_dev *dev, int twi_clk)
189{
190	int ckdiv, cdiv, div;
191	struct at91_twi_pdata *pdata = dev->pdata;
192	int offset = pdata->clk_offset;
193	int max_ckdiv = pdata->clk_max_div;
194
195	div = max(0, (int)DIV_ROUND_UP(clk_get_rate(dev->clk),
196				       2 * twi_clk) - offset);
197	ckdiv = fls(div >> 8);
198	cdiv = div >> ckdiv;
199
200	if (ckdiv > max_ckdiv) {
201		dev_warn(dev->dev, "%d exceeds ckdiv max value which is %d.\n",
202			 ckdiv, max_ckdiv);
203		ckdiv = max_ckdiv;
204		cdiv = 255;
205	}
206
207	dev->twi_cwgr_reg = (ckdiv << 16) | (cdiv << 8) | cdiv;
208	dev_dbg(dev->dev, "cdiv %d ckdiv %d\n", cdiv, ckdiv);
209}
210
211static void at91_twi_dma_cleanup(struct at91_twi_dev *dev)
212{
213	struct at91_twi_dma *dma = &dev->dma;
214
215	at91_twi_irq_save(dev);
216
217	if (dma->xfer_in_progress) {
218		if (dma->direction == DMA_FROM_DEVICE)
219			dmaengine_terminate_all(dma->chan_rx);
220		else
221			dmaengine_terminate_all(dma->chan_tx);
222		dma->xfer_in_progress = false;
223	}
224	if (dma->buf_mapped) {
225		dma_unmap_single(dev->dev, sg_dma_address(&dma->sg[0]),
226				 dev->buf_len, dma->direction);
227		dma->buf_mapped = false;
228	}
229
230	at91_twi_irq_restore(dev);
231}
232
233static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
234{
235	if (!dev->buf_len)
236		return;
237
238	/* 8bit write works with and without FIFO */
239	writeb_relaxed(*dev->buf, dev->base + AT91_TWI_THR);
240
241	/* send stop when last byte has been written */
242	if (--dev->buf_len == 0)
243		if (!dev->pdata->has_alt_cmd)
244			at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
245
246	dev_dbg(dev->dev, "wrote 0x%x, to go %d\n", *dev->buf, dev->buf_len);
247
248	++dev->buf;
249}
250
251static void at91_twi_write_data_dma_callback(void *data)
252{
253	struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
254
255	dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]),
256			 dev->buf_len, DMA_TO_DEVICE);
257
258	/*
259	 * When this callback is called, THR/TX FIFO is likely not to be empty
260	 * yet. So we have to wait for TXCOMP or NACK bits to be set into the
261	 * Status Register to be sure that the STOP bit has been sent and the
262	 * transfer is completed. The NACK interrupt has already been enabled,
263	 * we just have to enable TXCOMP one.
264	 */
265	at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
266	if (!dev->pdata->has_alt_cmd)
267		at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
268}
269
270static void at91_twi_write_data_dma(struct at91_twi_dev *dev)
271{
272	dma_addr_t dma_addr;
273	struct dma_async_tx_descriptor *txdesc;
274	struct at91_twi_dma *dma = &dev->dma;
275	struct dma_chan *chan_tx = dma->chan_tx;
276	unsigned int sg_len = 1;
277
278	if (!dev->buf_len)
279		return;
280
281	dma->direction = DMA_TO_DEVICE;
282
283	at91_twi_irq_save(dev);
284	dma_addr = dma_map_single(dev->dev, dev->buf, dev->buf_len,
285				  DMA_TO_DEVICE);
286	if (dma_mapping_error(dev->dev, dma_addr)) {
287		dev_err(dev->dev, "dma map failed\n");
288		return;
289	}
290	dma->buf_mapped = true;
291	at91_twi_irq_restore(dev);
292
293	if (dev->fifo_size) {
294		size_t part1_len, part2_len;
295		struct scatterlist *sg;
296		unsigned fifo_mr;
297
298		sg_len = 0;
299
300		part1_len = dev->buf_len & ~0x3;
301		if (part1_len) {
302			sg = &dma->sg[sg_len++];
303			sg_dma_len(sg) = part1_len;
304			sg_dma_address(sg) = dma_addr;
305		}
306
307		part2_len = dev->buf_len & 0x3;
308		if (part2_len) {
309			sg = &dma->sg[sg_len++];
310			sg_dma_len(sg) = part2_len;
311			sg_dma_address(sg) = dma_addr + part1_len;
312		}
313
314		/*
315		 * DMA controller is triggered when at least 4 data can be
316		 * written into the TX FIFO
317		 */
318		fifo_mr = at91_twi_read(dev, AT91_TWI_FMR);
319		fifo_mr &= ~AT91_TWI_FMR_TXRDYM_MASK;
320		fifo_mr |= AT91_TWI_FMR_TXRDYM(AT91_TWI_FOUR_DATA);
321		at91_twi_write(dev, AT91_TWI_FMR, fifo_mr);
322	} else {
323		sg_dma_len(&dma->sg[0]) = dev->buf_len;
324		sg_dma_address(&dma->sg[0]) = dma_addr;
325	}
326
327	txdesc = dmaengine_prep_slave_sg(chan_tx, dma->sg, sg_len,
328					 DMA_MEM_TO_DEV,
329					 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
330	if (!txdesc) {
331		dev_err(dev->dev, "dma prep slave sg failed\n");
332		goto error;
333	}
334
335	txdesc->callback = at91_twi_write_data_dma_callback;
336	txdesc->callback_param = dev;
337
338	dma->xfer_in_progress = true;
339	dmaengine_submit(txdesc);
340	dma_async_issue_pending(chan_tx);
341
342	return;
343
344error:
345	at91_twi_dma_cleanup(dev);
346}
347
348static void at91_twi_read_next_byte(struct at91_twi_dev *dev)
349{
350	/*
351	 * If we are in this case, it means there is garbage data in RHR, so
352	 * delete them.
353	 */
354	if (!dev->buf_len) {
355		at91_twi_read(dev, AT91_TWI_RHR);
356		return;
357	}
358
359	/* 8bit read works with and without FIFO */
360	*dev->buf = readb_relaxed(dev->base + AT91_TWI_RHR);
361	--dev->buf_len;
362
363	/* return if aborting, we only needed to read RHR to clear RXRDY*/
364	if (dev->recv_len_abort)
365		return;
366
367	/* handle I2C_SMBUS_BLOCK_DATA */
368	if (unlikely(dev->msg->flags & I2C_M_RECV_LEN)) {
369		/* ensure length byte is a valid value */
370		if (*dev->buf <= I2C_SMBUS_BLOCK_MAX && *dev->buf > 0) {
371			dev->msg->flags &= ~I2C_M_RECV_LEN;
372			dev->buf_len += *dev->buf;
373			dev->msg->len = dev->buf_len + 1;
374			dev_dbg(dev->dev, "received block length %d\n",
375					 dev->buf_len);
376		} else {
377			/* abort and send the stop by reading one more byte */
378			dev->recv_len_abort = true;
379			dev->buf_len = 1;
380		}
381	}
382
383	/* send stop if second but last byte has been read */
384	if (!dev->pdata->has_alt_cmd && dev->buf_len == 1)
385		at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
386
387	dev_dbg(dev->dev, "read 0x%x, to go %d\n", *dev->buf, dev->buf_len);
388
389	++dev->buf;
390}
391
392static void at91_twi_read_data_dma_callback(void *data)
393{
394	struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
395	unsigned ier = AT91_TWI_TXCOMP;
396
397	dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]),
398			 dev->buf_len, DMA_FROM_DEVICE);
399
400	if (!dev->pdata->has_alt_cmd) {
401		/* The last two bytes have to be read without using dma */
402		dev->buf += dev->buf_len - 2;
403		dev->buf_len = 2;
404		ier |= AT91_TWI_RXRDY;
405	}
406	at91_twi_write(dev, AT91_TWI_IER, ier);
407}
408
409static void at91_twi_read_data_dma(struct at91_twi_dev *dev)
410{
411	dma_addr_t dma_addr;
412	struct dma_async_tx_descriptor *rxdesc;
413	struct at91_twi_dma *dma = &dev->dma;
414	struct dma_chan *chan_rx = dma->chan_rx;
415	size_t buf_len;
416
417	buf_len = (dev->pdata->has_alt_cmd) ? dev->buf_len : dev->buf_len - 2;
418	dma->direction = DMA_FROM_DEVICE;
419
420	/* Keep in mind that we won't use dma to read the last two bytes */
421	at91_twi_irq_save(dev);
422	dma_addr = dma_map_single(dev->dev, dev->buf, buf_len, DMA_FROM_DEVICE);
423	if (dma_mapping_error(dev->dev, dma_addr)) {
424		dev_err(dev->dev, "dma map failed\n");
425		return;
426	}
427	dma->buf_mapped = true;
428	at91_twi_irq_restore(dev);
429
430	if (dev->fifo_size && IS_ALIGNED(buf_len, 4)) {
431		unsigned fifo_mr;
432
433		/*
434		 * DMA controller is triggered when at least 4 data can be
435		 * read from the RX FIFO
436		 */
437		fifo_mr = at91_twi_read(dev, AT91_TWI_FMR);
438		fifo_mr &= ~AT91_TWI_FMR_RXRDYM_MASK;
439		fifo_mr |= AT91_TWI_FMR_RXRDYM(AT91_TWI_FOUR_DATA);
440		at91_twi_write(dev, AT91_TWI_FMR, fifo_mr);
441	}
442
443	sg_dma_len(&dma->sg[0]) = buf_len;
444	sg_dma_address(&dma->sg[0]) = dma_addr;
445
446	rxdesc = dmaengine_prep_slave_sg(chan_rx, dma->sg, 1, DMA_DEV_TO_MEM,
447					 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
448	if (!rxdesc) {
449		dev_err(dev->dev, "dma prep slave sg failed\n");
450		goto error;
451	}
452
453	rxdesc->callback = at91_twi_read_data_dma_callback;
454	rxdesc->callback_param = dev;
455
456	dma->xfer_in_progress = true;
457	dmaengine_submit(rxdesc);
458	dma_async_issue_pending(dma->chan_rx);
459
460	return;
461
462error:
463	at91_twi_dma_cleanup(dev);
464}
465
466static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id)
467{
468	struct at91_twi_dev *dev = dev_id;
469	const unsigned status = at91_twi_read(dev, AT91_TWI_SR);
470	const unsigned irqstatus = status & at91_twi_read(dev, AT91_TWI_IMR);
471
472	if (!irqstatus)
473		return IRQ_NONE;
474	/*
475	 * In reception, the behavior of the twi device (before sama5d2) is
476	 * weird. There is some magic about RXRDY flag! When a data has been
477	 * almost received, the reception of a new one is anticipated if there
478	 * is no stop command to send. That is the reason why ask for sending
479	 * the stop command not on the last data but on the second last one.
480	 *
481	 * Unfortunately, we could still have the RXRDY flag set even if the
482	 * transfer is done and we have read the last data. It might happen
483	 * when the i2c slave device sends too quickly data after receiving the
484	 * ack from the master. The data has been almost received before having
485	 * the order to send stop. In this case, sending the stop command could
486	 * cause a RXRDY interrupt with a TXCOMP one. It is better to manage
487	 * the RXRDY interrupt first in order to not keep garbage data in the
488	 * Receive Holding Register for the next transfer.
489	 */
490	if (irqstatus & AT91_TWI_RXRDY)
491		at91_twi_read_next_byte(dev);
492
493	/*
494	 * When a NACK condition is detected, the I2C controller sets the NACK,
495	 * TXCOMP and TXRDY bits all together in the Status Register (SR).
496	 *
497	 * 1 - Handling NACK errors with CPU write transfer.
498	 *
499	 * In such case, we should not write the next byte into the Transmit
500	 * Holding Register (THR) otherwise the I2C controller would start a new
501	 * transfer and the I2C slave is likely to reply by another NACK.
502	 *
503	 * 2 - Handling NACK errors with DMA write transfer.
504	 *
505	 * By setting the TXRDY bit in the SR, the I2C controller also triggers
506	 * the DMA controller to write the next data into the THR. Then the
507	 * result depends on the hardware version of the I2C controller.
508	 *
509	 * 2a - Without support of the Alternative Command mode.
510	 *
511	 * This is the worst case: the DMA controller is triggered to write the
512	 * next data into the THR, hence starting a new transfer: the I2C slave
513	 * is likely to reply by another NACK.
514	 * Concurrently, this interrupt handler is likely to be called to manage
515	 * the first NACK before the I2C controller detects the second NACK and
516	 * sets once again the NACK bit into the SR.
517	 * When handling the first NACK, this interrupt handler disables the I2C
518	 * controller interruptions, especially the NACK interrupt.
519	 * Hence, the NACK bit is pending into the SR. This is why we should
520	 * read the SR to clear all pending interrupts at the beginning of
521	 * at91_do_twi_transfer() before actually starting a new transfer.
522	 *
523	 * 2b - With support of the Alternative Command mode.
524	 *
525	 * When a NACK condition is detected, the I2C controller also locks the
526	 * THR (and sets the LOCK bit in the SR): even though the DMA controller
527	 * is triggered by the TXRDY bit to write the next data into the THR,
528	 * this data actually won't go on the I2C bus hence a second NACK is not
529	 * generated.
530	 */
531	if (irqstatus & (AT91_TWI_TXCOMP | AT91_TWI_NACK)) {
532		at91_disable_twi_interrupts(dev);
533		complete(&dev->cmd_complete);
534	} else if (irqstatus & AT91_TWI_TXRDY) {
535		at91_twi_write_next_byte(dev);
536	}
537
538	/* catch error flags */
539	dev->transfer_status |= status;
540
541	return IRQ_HANDLED;
542}
543
544static int at91_do_twi_transfer(struct at91_twi_dev *dev)
545{
546	int ret;
547	unsigned long time_left;
548	bool has_unre_flag = dev->pdata->has_unre_flag;
549	bool has_alt_cmd = dev->pdata->has_alt_cmd;
550
551	/*
552	 * WARNING: the TXCOMP bit in the Status Register is NOT a clear on
553	 * read flag but shows the state of the transmission at the time the
554	 * Status Register is read. According to the programmer datasheet,
555	 * TXCOMP is set when both holding register and internal shifter are
556	 * empty and STOP condition has been sent.
557	 * Consequently, we should enable NACK interrupt rather than TXCOMP to
558	 * detect transmission failure.
559	 * Indeed let's take the case of an i2c write command using DMA.
560	 * Whenever the slave doesn't acknowledge a byte, the LOCK, NACK and
561	 * TXCOMP bits are set together into the Status Register.
562	 * LOCK is a clear on write bit, which is set to prevent the DMA
563	 * controller from sending new data on the i2c bus after a NACK
564	 * condition has happened. Once locked, this i2c peripheral stops
565	 * triggering the DMA controller for new data but it is more than
566	 * likely that a new DMA transaction is already in progress, writing
567	 * into the Transmit Holding Register. Since the peripheral is locked,
568	 * these new data won't be sent to the i2c bus but they will remain
569	 * into the Transmit Holding Register, so TXCOMP bit is cleared.
570	 * Then when the interrupt handler is called, the Status Register is
571	 * read: the TXCOMP bit is clear but NACK bit is still set. The driver
572	 * manage the error properly, without waiting for timeout.
573	 * This case can be reproduced easyly when writing into an at24 eeprom.
574	 *
575	 * Besides, the TXCOMP bit is already set before the i2c transaction
576	 * has been started. For read transactions, this bit is cleared when
577	 * writing the START bit into the Control Register. So the
578	 * corresponding interrupt can safely be enabled just after.
579	 * However for write transactions managed by the CPU, we first write
580	 * into THR, so TXCOMP is cleared. Then we can safely enable TXCOMP
581	 * interrupt. If TXCOMP interrupt were enabled before writing into THR,
582	 * the interrupt handler would be called immediately and the i2c command
583	 * would be reported as completed.
584	 * Also when a write transaction is managed by the DMA controller,
585	 * enabling the TXCOMP interrupt in this function may lead to a race
586	 * condition since we don't know whether the TXCOMP interrupt is enabled
587	 * before or after the DMA has started to write into THR. So the TXCOMP
588	 * interrupt is enabled later by at91_twi_write_data_dma_callback().
589	 * Immediately after in that DMA callback, if the alternative command
590	 * mode is not used, we still need to send the STOP condition manually
591	 * writing the corresponding bit into the Control Register.
592	 */
593
594	dev_dbg(dev->dev, "transfer: %s %d bytes.\n",
595		(dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len);
596
597	reinit_completion(&dev->cmd_complete);
598	dev->transfer_status = 0;
599
600	/* Clear pending interrupts, such as NACK. */
601	at91_twi_read(dev, AT91_TWI_SR);
602
603	if (dev->fifo_size) {
604		unsigned fifo_mr = at91_twi_read(dev, AT91_TWI_FMR);
605
606		/* Reset FIFO mode register */
607		fifo_mr &= ~(AT91_TWI_FMR_TXRDYM_MASK |
608			     AT91_TWI_FMR_RXRDYM_MASK);
609		fifo_mr |= AT91_TWI_FMR_TXRDYM(AT91_TWI_ONE_DATA);
610		fifo_mr |= AT91_TWI_FMR_RXRDYM(AT91_TWI_ONE_DATA);
611		at91_twi_write(dev, AT91_TWI_FMR, fifo_mr);
612
613		/* Flush FIFOs */
614		at91_twi_write(dev, AT91_TWI_CR,
615			       AT91_TWI_THRCLR | AT91_TWI_RHRCLR);
616	}
617
618	if (!dev->buf_len) {
619		at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK);
620		at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
621	} else if (dev->msg->flags & I2C_M_RD) {
622		unsigned start_flags = AT91_TWI_START;
623
624		/* if only one byte is to be read, immediately stop transfer */
625		if (!has_alt_cmd && dev->buf_len <= 1 &&
626		    !(dev->msg->flags & I2C_M_RECV_LEN))
627			start_flags |= AT91_TWI_STOP;
628		at91_twi_write(dev, AT91_TWI_CR, start_flags);
629		/*
630		 * When using dma without alternative command mode, the last
631		 * byte has to be read manually in order to not send the stop
632		 * command too late and then to receive extra data.
633		 * In practice, there are some issues if you use the dma to
634		 * read n-1 bytes because of latency.
635		 * Reading n-2 bytes with dma and the two last ones manually
636		 * seems to be the best solution.
637		 */
638		if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
639			at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
640			at91_twi_read_data_dma(dev);
641		} else {
642			at91_twi_write(dev, AT91_TWI_IER,
643				       AT91_TWI_TXCOMP |
644				       AT91_TWI_NACK |
645				       AT91_TWI_RXRDY);
646		}
647	} else {
648		if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
649			at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
650			at91_twi_write_data_dma(dev);
651		} else {
652			at91_twi_write_next_byte(dev);
653			at91_twi_write(dev, AT91_TWI_IER,
654				       AT91_TWI_TXCOMP |
655				       AT91_TWI_NACK |
656				       AT91_TWI_TXRDY);
657		}
658	}
659
660	time_left = wait_for_completion_timeout(&dev->cmd_complete,
661					      dev->adapter.timeout);
662	if (time_left == 0) {
663		dev->transfer_status |= at91_twi_read(dev, AT91_TWI_SR);
664		dev_err(dev->dev, "controller timed out\n");
665		at91_init_twi_bus(dev);
666		ret = -ETIMEDOUT;
667		goto error;
668	}
669	if (dev->transfer_status & AT91_TWI_NACK) {
670		dev_dbg(dev->dev, "received nack\n");
671		ret = -EREMOTEIO;
672		goto error;
673	}
674	if (dev->transfer_status & AT91_TWI_OVRE) {
675		dev_err(dev->dev, "overrun while reading\n");
676		ret = -EIO;
677		goto error;
678	}
679	if (has_unre_flag && dev->transfer_status & AT91_TWI_UNRE) {
680		dev_err(dev->dev, "underrun while writing\n");
681		ret = -EIO;
682		goto error;
683	}
684	if ((has_alt_cmd || dev->fifo_size) &&
685	    (dev->transfer_status & AT91_TWI_LOCK)) {
686		dev_err(dev->dev, "tx locked\n");
687		ret = -EIO;
688		goto error;
689	}
690	if (dev->recv_len_abort) {
691		dev_err(dev->dev, "invalid smbus block length recvd\n");
692		ret = -EPROTO;
693		goto error;
694	}
695
696	dev_dbg(dev->dev, "transfer complete\n");
697
698	return 0;
699
700error:
701	/* first stop DMA transfer if still in progress */
702	at91_twi_dma_cleanup(dev);
703	/* then flush THR/FIFO and unlock TX if locked */
704	if ((has_alt_cmd || dev->fifo_size) &&
705	    (dev->transfer_status & AT91_TWI_LOCK)) {
706		dev_dbg(dev->dev, "unlock tx\n");
707		at91_twi_write(dev, AT91_TWI_CR,
708			       AT91_TWI_THRCLR | AT91_TWI_LOCKCLR);
709	}
710	return ret;
711}
712
713static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
714{
715	struct at91_twi_dev *dev = i2c_get_adapdata(adap);
716	int ret;
717	unsigned int_addr_flag = 0;
718	struct i2c_msg *m_start = msg;
719	bool is_read, use_alt_cmd = false;
720
721	dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num);
722
723	ret = pm_runtime_get_sync(dev->dev);
724	if (ret < 0)
725		goto out;
726
727	if (num == 2) {
728		int internal_address = 0;
729		int i;
730
731		/* 1st msg is put into the internal address, start with 2nd */
732		m_start = &msg[1];
733		for (i = 0; i < msg->len; ++i) {
734			const unsigned addr = msg->buf[msg->len - 1 - i];
735
736			internal_address |= addr << (8 * i);
737			int_addr_flag += AT91_TWI_IADRSZ_1;
738		}
739		at91_twi_write(dev, AT91_TWI_IADR, internal_address);
740	}
741
742	is_read = (m_start->flags & I2C_M_RD);
743	if (dev->pdata->has_alt_cmd) {
744		if (m_start->len > 0) {
745			at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMEN);
746			at91_twi_write(dev, AT91_TWI_ACR,
747				       AT91_TWI_ACR_DATAL(m_start->len) |
748				       ((is_read) ? AT91_TWI_ACR_DIR : 0));
749			use_alt_cmd = true;
750		} else {
751			at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMDIS);
752		}
753	}
754
755	at91_twi_write(dev, AT91_TWI_MMR,
756		       (m_start->addr << 16) |
757		       int_addr_flag |
758		       ((!use_alt_cmd && is_read) ? AT91_TWI_MREAD : 0));
759
760	dev->buf_len = m_start->len;
761	dev->buf = m_start->buf;
762	dev->msg = m_start;
763	dev->recv_len_abort = false;
764
765	ret = at91_do_twi_transfer(dev);
766
767	ret = (ret < 0) ? ret : num;
768out:
769	pm_runtime_mark_last_busy(dev->dev);
770	pm_runtime_put_autosuspend(dev->dev);
771
772	return ret;
773}
774
775/*
776 * The hardware can handle at most two messages concatenated by a
777 * repeated start via it's internal address feature.
778 */
779static struct i2c_adapter_quirks at91_twi_quirks = {
780	.flags = I2C_AQ_COMB | I2C_AQ_COMB_WRITE_FIRST | I2C_AQ_COMB_SAME_ADDR,
781	.max_comb_1st_msg_len = 3,
782};
783
784static u32 at91_twi_func(struct i2c_adapter *adapter)
785{
786	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL
787		| I2C_FUNC_SMBUS_READ_BLOCK_DATA;
788}
789
790static struct i2c_algorithm at91_twi_algorithm = {
791	.master_xfer	= at91_twi_xfer,
792	.functionality	= at91_twi_func,
793};
794
795static struct at91_twi_pdata at91rm9200_config = {
796	.clk_max_div = 5,
797	.clk_offset = 3,
798	.has_unre_flag = true,
799	.has_alt_cmd = false,
800};
801
802static struct at91_twi_pdata at91sam9261_config = {
803	.clk_max_div = 5,
804	.clk_offset = 4,
805	.has_unre_flag = false,
806	.has_alt_cmd = false,
807};
808
809static struct at91_twi_pdata at91sam9260_config = {
810	.clk_max_div = 7,
811	.clk_offset = 4,
812	.has_unre_flag = false,
813	.has_alt_cmd = false,
814};
815
816static struct at91_twi_pdata at91sam9g20_config = {
817	.clk_max_div = 7,
818	.clk_offset = 4,
819	.has_unre_flag = false,
820	.has_alt_cmd = false,
821};
822
823static struct at91_twi_pdata at91sam9g10_config = {
824	.clk_max_div = 7,
825	.clk_offset = 4,
826	.has_unre_flag = false,
827	.has_alt_cmd = false,
828};
829
830static const struct platform_device_id at91_twi_devtypes[] = {
831	{
832		.name = "i2c-at91rm9200",
833		.driver_data = (unsigned long) &at91rm9200_config,
834	}, {
835		.name = "i2c-at91sam9261",
836		.driver_data = (unsigned long) &at91sam9261_config,
837	}, {
838		.name = "i2c-at91sam9260",
839		.driver_data = (unsigned long) &at91sam9260_config,
840	}, {
841		.name = "i2c-at91sam9g20",
842		.driver_data = (unsigned long) &at91sam9g20_config,
843	}, {
844		.name = "i2c-at91sam9g10",
845		.driver_data = (unsigned long) &at91sam9g10_config,
846	}, {
847		/* sentinel */
848	}
849};
850
851#if defined(CONFIG_OF)
852static struct at91_twi_pdata at91sam9x5_config = {
853	.clk_max_div = 7,
854	.clk_offset = 4,
855	.has_unre_flag = false,
856	.has_alt_cmd = false,
857};
858
859static struct at91_twi_pdata sama5d2_config = {
860	.clk_max_div = 7,
861	.clk_offset = 4,
862	.has_unre_flag = true,
863	.has_alt_cmd = true,
864};
865
866static const struct of_device_id atmel_twi_dt_ids[] = {
867	{
868		.compatible = "atmel,at91rm9200-i2c",
869		.data = &at91rm9200_config,
870	} , {
871		.compatible = "atmel,at91sam9260-i2c",
872		.data = &at91sam9260_config,
873	} , {
874		.compatible = "atmel,at91sam9261-i2c",
875		.data = &at91sam9261_config,
876	} , {
877		.compatible = "atmel,at91sam9g20-i2c",
878		.data = &at91sam9g20_config,
879	} , {
880		.compatible = "atmel,at91sam9g10-i2c",
881		.data = &at91sam9g10_config,
882	}, {
883		.compatible = "atmel,at91sam9x5-i2c",
884		.data = &at91sam9x5_config,
885	}, {
886		.compatible = "atmel,sama5d2-i2c",
887		.data = &sama5d2_config,
888	}, {
889		/* sentinel */
890	}
891};
892MODULE_DEVICE_TABLE(of, atmel_twi_dt_ids);
893#endif
894
895static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr)
896{
897	int ret = 0;
898	struct dma_slave_config slave_config;
899	struct at91_twi_dma *dma = &dev->dma;
900	enum dma_slave_buswidth addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
901
902	/*
903	 * The actual width of the access will be chosen in
904	 * dmaengine_prep_slave_sg():
905	 * for each buffer in the scatter-gather list, if its size is aligned
906	 * to addr_width then addr_width accesses will be performed to transfer
907	 * the buffer. On the other hand, if the buffer size is not aligned to
908	 * addr_width then the buffer is transferred using single byte accesses.
909	 * Please refer to the Atmel eXtended DMA controller driver.
910	 * When FIFOs are used, the TXRDYM threshold can always be set to
911	 * trigger the XDMAC when at least 4 data can be written into the TX
912	 * FIFO, even if single byte accesses are performed.
913	 * However the RXRDYM threshold must be set to fit the access width,
914	 * deduced from buffer length, so the XDMAC is triggered properly to
915	 * read data from the RX FIFO.
916	 */
917	if (dev->fifo_size)
918		addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
919
920	memset(&slave_config, 0, sizeof(slave_config));
921	slave_config.src_addr = (dma_addr_t)phy_addr + AT91_TWI_RHR;
922	slave_config.src_addr_width = addr_width;
923	slave_config.src_maxburst = 1;
924	slave_config.dst_addr = (dma_addr_t)phy_addr + AT91_TWI_THR;
925	slave_config.dst_addr_width = addr_width;
926	slave_config.dst_maxburst = 1;
927	slave_config.device_fc = false;
928
929	dma->chan_tx = dma_request_slave_channel_reason(dev->dev, "tx");
930	if (IS_ERR(dma->chan_tx)) {
931		ret = PTR_ERR(dma->chan_tx);
932		dma->chan_tx = NULL;
933		goto error;
934	}
935
936	dma->chan_rx = dma_request_slave_channel_reason(dev->dev, "rx");
937	if (IS_ERR(dma->chan_rx)) {
938		ret = PTR_ERR(dma->chan_rx);
939		dma->chan_rx = NULL;
940		goto error;
941	}
942
943	slave_config.direction = DMA_MEM_TO_DEV;
944	if (dmaengine_slave_config(dma->chan_tx, &slave_config)) {
945		dev_err(dev->dev, "failed to configure tx channel\n");
946		ret = -EINVAL;
947		goto error;
948	}
949
950	slave_config.direction = DMA_DEV_TO_MEM;
951	if (dmaengine_slave_config(dma->chan_rx, &slave_config)) {
952		dev_err(dev->dev, "failed to configure rx channel\n");
953		ret = -EINVAL;
954		goto error;
955	}
956
957	sg_init_table(dma->sg, 2);
958	dma->buf_mapped = false;
959	dma->xfer_in_progress = false;
960	dev->use_dma = true;
961
962	dev_info(dev->dev, "using %s (tx) and %s (rx) for DMA transfers\n",
963		 dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
964
965	return ret;
966
967error:
968	if (ret != -EPROBE_DEFER)
969		dev_info(dev->dev, "can't use DMA, error %d\n", ret);
970	if (dma->chan_rx)
971		dma_release_channel(dma->chan_rx);
972	if (dma->chan_tx)
973		dma_release_channel(dma->chan_tx);
974	return ret;
975}
976
977static struct at91_twi_pdata *at91_twi_get_driver_data(
978					struct platform_device *pdev)
979{
980	if (pdev->dev.of_node) {
981		const struct of_device_id *match;
982		match = of_match_node(atmel_twi_dt_ids, pdev->dev.of_node);
983		if (!match)
984			return NULL;
985		return (struct at91_twi_pdata *)match->data;
986	}
987	return (struct at91_twi_pdata *) platform_get_device_id(pdev)->driver_data;
988}
989
990static int at91_twi_probe(struct platform_device *pdev)
991{
992	struct at91_twi_dev *dev;
993	struct resource *mem;
994	int rc;
995	u32 phy_addr;
996	u32 bus_clk_rate;
997
998	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
999	if (!dev)
1000		return -ENOMEM;
1001	init_completion(&dev->cmd_complete);
1002	dev->dev = &pdev->dev;
1003
1004	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1005	if (!mem)
1006		return -ENODEV;
1007	phy_addr = mem->start;
1008
1009	dev->pdata = at91_twi_get_driver_data(pdev);
1010	if (!dev->pdata)
1011		return -ENODEV;
1012
1013	dev->base = devm_ioremap_resource(&pdev->dev, mem);
1014	if (IS_ERR(dev->base))
1015		return PTR_ERR(dev->base);
1016
1017	dev->irq = platform_get_irq(pdev, 0);
1018	if (dev->irq < 0)
1019		return dev->irq;
1020
1021	rc = devm_request_irq(&pdev->dev, dev->irq, atmel_twi_interrupt, 0,
1022			 dev_name(dev->dev), dev);
1023	if (rc) {
1024		dev_err(dev->dev, "Cannot get irq %d: %d\n", dev->irq, rc);
1025		return rc;
1026	}
1027
1028	platform_set_drvdata(pdev, dev);
1029
1030	dev->clk = devm_clk_get(dev->dev, NULL);
1031	if (IS_ERR(dev->clk)) {
1032		dev_err(dev->dev, "no clock defined\n");
1033		return -ENODEV;
1034	}
1035	clk_prepare_enable(dev->clk);
1036
1037	if (dev->dev->of_node) {
1038		rc = at91_twi_configure_dma(dev, phy_addr);
1039		if (rc == -EPROBE_DEFER)
1040			return rc;
1041	}
1042
1043	if (!of_property_read_u32(pdev->dev.of_node, "atmel,fifo-size",
1044				  &dev->fifo_size)) {
1045		dev_info(dev->dev, "Using FIFO (%u data)\n", dev->fifo_size);
1046	}
1047
1048	rc = of_property_read_u32(dev->dev->of_node, "clock-frequency",
1049			&bus_clk_rate);
1050	if (rc)
1051		bus_clk_rate = DEFAULT_TWI_CLK_HZ;
1052
1053	at91_calc_twi_clock(dev, bus_clk_rate);
1054	at91_init_twi_bus(dev);
1055
1056	snprintf(dev->adapter.name, sizeof(dev->adapter.name), "AT91");
1057	i2c_set_adapdata(&dev->adapter, dev);
1058	dev->adapter.owner = THIS_MODULE;
1059	dev->adapter.class = I2C_CLASS_DEPRECATED;
1060	dev->adapter.algo = &at91_twi_algorithm;
1061	dev->adapter.quirks = &at91_twi_quirks;
1062	dev->adapter.dev.parent = dev->dev;
1063	dev->adapter.nr = pdev->id;
1064	dev->adapter.timeout = AT91_I2C_TIMEOUT;
1065	dev->adapter.dev.of_node = pdev->dev.of_node;
1066
1067	pm_runtime_set_autosuspend_delay(dev->dev, AUTOSUSPEND_TIMEOUT);
1068	pm_runtime_use_autosuspend(dev->dev);
1069	pm_runtime_set_active(dev->dev);
1070	pm_runtime_enable(dev->dev);
1071
1072	rc = i2c_add_numbered_adapter(&dev->adapter);
1073	if (rc) {
1074		dev_err(dev->dev, "Adapter %s registration failed\n",
1075			dev->adapter.name);
1076		clk_disable_unprepare(dev->clk);
1077
1078		pm_runtime_disable(dev->dev);
1079		pm_runtime_set_suspended(dev->dev);
1080
1081		return rc;
1082	}
1083
1084	dev_info(dev->dev, "AT91 i2c bus driver (hw version: %#x).\n",
1085		 at91_twi_read(dev, AT91_TWI_VER));
1086	return 0;
1087}
1088
1089static int at91_twi_remove(struct platform_device *pdev)
1090{
1091	struct at91_twi_dev *dev = platform_get_drvdata(pdev);
1092
1093	i2c_del_adapter(&dev->adapter);
1094	clk_disable_unprepare(dev->clk);
1095
1096	pm_runtime_disable(dev->dev);
1097	pm_runtime_set_suspended(dev->dev);
1098
1099	return 0;
1100}
1101
1102#ifdef CONFIG_PM
1103
1104static int at91_twi_runtime_suspend(struct device *dev)
1105{
1106	struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
1107
1108	clk_disable_unprepare(twi_dev->clk);
1109
1110	pinctrl_pm_select_sleep_state(dev);
1111
1112	return 0;
1113}
1114
1115static int at91_twi_runtime_resume(struct device *dev)
1116{
1117	struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
1118
1119	pinctrl_pm_select_default_state(dev);
1120
1121	return clk_prepare_enable(twi_dev->clk);
1122}
1123
1124static int at91_twi_suspend_noirq(struct device *dev)
1125{
1126	if (!pm_runtime_status_suspended(dev))
1127		at91_twi_runtime_suspend(dev);
1128
1129	return 0;
1130}
1131
1132static int at91_twi_resume_noirq(struct device *dev)
1133{
1134	int ret;
1135
1136	if (!pm_runtime_status_suspended(dev)) {
1137		ret = at91_twi_runtime_resume(dev);
1138		if (ret)
1139			return ret;
1140	}
1141
1142	pm_runtime_mark_last_busy(dev);
1143	pm_request_autosuspend(dev);
1144
1145	return 0;
1146}
1147
1148static const struct dev_pm_ops at91_twi_pm = {
1149	.suspend_noirq	= at91_twi_suspend_noirq,
1150	.resume_noirq	= at91_twi_resume_noirq,
1151	.runtime_suspend	= at91_twi_runtime_suspend,
1152	.runtime_resume		= at91_twi_runtime_resume,
1153};
1154
1155#define at91_twi_pm_ops (&at91_twi_pm)
1156#else
1157#define at91_twi_pm_ops NULL
1158#endif
1159
1160static struct platform_driver at91_twi_driver = {
1161	.probe		= at91_twi_probe,
1162	.remove		= at91_twi_remove,
1163	.id_table	= at91_twi_devtypes,
1164	.driver		= {
1165		.name	= "at91_i2c",
1166		.of_match_table = of_match_ptr(atmel_twi_dt_ids),
1167		.pm	= at91_twi_pm_ops,
1168	},
1169};
1170
1171static int __init at91_twi_init(void)
1172{
1173	return platform_driver_register(&at91_twi_driver);
1174}
1175
1176static void __exit at91_twi_exit(void)
1177{
1178	platform_driver_unregister(&at91_twi_driver);
1179}
1180
1181subsys_initcall(at91_twi_init);
1182module_exit(at91_twi_exit);
1183
1184MODULE_AUTHOR("Nikolaus Voss <n.voss@weinmann.de>");
1185MODULE_DESCRIPTION("I2C (TWI) driver for Atmel AT91");
1186MODULE_LICENSE("GPL");
1187MODULE_ALIAS("platform:at91_i2c");
1188