This source file includes following definitions.
- at91_init_twi_bus_master
- at91_calc_twi_clock
- at91_twi_dma_cleanup
- at91_twi_write_next_byte
- at91_twi_write_data_dma_callback
- at91_twi_write_data_dma
- at91_twi_read_next_byte
- at91_twi_read_data_dma_callback
- at91_twi_read_data_dma
- atmel_twi_interrupt
- at91_do_twi_transfer
- at91_twi_xfer
- at91_twi_func
- at91_twi_configure_dma
- at91_twi_probe_master
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16 #include <linux/clk.h>
17 #include <linux/completion.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/dmaengine.h>
20 #include <linux/err.h>
21 #include <linux/i2c.h>
22 #include <linux/interrupt.h>
23 #include <linux/io.h>
24 #include <linux/of.h>
25 #include <linux/of_device.h>
26 #include <linux/platform_device.h>
27 #include <linux/platform_data/dma-atmel.h>
28 #include <linux/pm_runtime.h>
29
30 #include "i2c-at91.h"
31
32 void at91_init_twi_bus_master(struct at91_twi_dev *dev)
33 {
34
35 if (dev->fifo_size)
36 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_FIFOEN);
37 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_MSEN);
38 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_SVDIS);
39 at91_twi_write(dev, AT91_TWI_CWGR, dev->twi_cwgr_reg);
40 }
41
42
43
44
45
46 static void at91_calc_twi_clock(struct at91_twi_dev *dev)
47 {
48 int ckdiv, cdiv, div, hold = 0;
49 struct at91_twi_pdata *pdata = dev->pdata;
50 int offset = pdata->clk_offset;
51 int max_ckdiv = pdata->clk_max_div;
52 struct i2c_timings timings, *t = &timings;
53
54 i2c_parse_fw_timings(dev->dev, t, true);
55
56 div = max(0, (int)DIV_ROUND_UP(clk_get_rate(dev->clk),
57 2 * t->bus_freq_hz) - offset);
58 ckdiv = fls(div >> 8);
59 cdiv = div >> ckdiv;
60
61 if (ckdiv > max_ckdiv) {
62 dev_warn(dev->dev, "%d exceeds ckdiv max value which is %d.\n",
63 ckdiv, max_ckdiv);
64 ckdiv = max_ckdiv;
65 cdiv = 255;
66 }
67
68 if (pdata->has_hold_field) {
69
70
71
72
73
74 hold = DIV_ROUND_UP(t->sda_hold_ns
75 * (clk_get_rate(dev->clk) / 1000), 1000000);
76 hold -= 3;
77 if (hold < 0)
78 hold = 0;
79 if (hold > AT91_TWI_CWGR_HOLD_MAX) {
80 dev_warn(dev->dev,
81 "HOLD field set to its maximum value (%d instead of %d)\n",
82 AT91_TWI_CWGR_HOLD_MAX, hold);
83 hold = AT91_TWI_CWGR_HOLD_MAX;
84 }
85 }
86
87 dev->twi_cwgr_reg = (ckdiv << 16) | (cdiv << 8) | cdiv
88 | AT91_TWI_CWGR_HOLD(hold);
89
90 dev_dbg(dev->dev, "cdiv %d ckdiv %d hold %d (%d ns)\n",
91 cdiv, ckdiv, hold, t->sda_hold_ns);
92 }
93
94 static void at91_twi_dma_cleanup(struct at91_twi_dev *dev)
95 {
96 struct at91_twi_dma *dma = &dev->dma;
97
98 at91_twi_irq_save(dev);
99
100 if (dma->xfer_in_progress) {
101 if (dma->direction == DMA_FROM_DEVICE)
102 dmaengine_terminate_all(dma->chan_rx);
103 else
104 dmaengine_terminate_all(dma->chan_tx);
105 dma->xfer_in_progress = false;
106 }
107 if (dma->buf_mapped) {
108 dma_unmap_single(dev->dev, sg_dma_address(&dma->sg[0]),
109 dev->buf_len, dma->direction);
110 dma->buf_mapped = false;
111 }
112
113 at91_twi_irq_restore(dev);
114 }
115
116 static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
117 {
118 if (!dev->buf_len)
119 return;
120
121
122 writeb_relaxed(*dev->buf, dev->base + AT91_TWI_THR);
123
124
125 if (--dev->buf_len == 0) {
126 if (!dev->use_alt_cmd)
127 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
128 at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_TXRDY);
129 }
130
131 dev_dbg(dev->dev, "wrote 0x%x, to go %zu\n", *dev->buf, dev->buf_len);
132
133 ++dev->buf;
134 }
135
136 static void at91_twi_write_data_dma_callback(void *data)
137 {
138 struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
139
140 dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]),
141 dev->buf_len, DMA_TO_DEVICE);
142
143
144
145
146
147
148
149
150 at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
151 if (!dev->use_alt_cmd)
152 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
153 }
154
155 static void at91_twi_write_data_dma(struct at91_twi_dev *dev)
156 {
157 dma_addr_t dma_addr;
158 struct dma_async_tx_descriptor *txdesc;
159 struct at91_twi_dma *dma = &dev->dma;
160 struct dma_chan *chan_tx = dma->chan_tx;
161 unsigned int sg_len = 1;
162
163 if (!dev->buf_len)
164 return;
165
166 dma->direction = DMA_TO_DEVICE;
167
168 at91_twi_irq_save(dev);
169 dma_addr = dma_map_single(dev->dev, dev->buf, dev->buf_len,
170 DMA_TO_DEVICE);
171 if (dma_mapping_error(dev->dev, dma_addr)) {
172 dev_err(dev->dev, "dma map failed\n");
173 return;
174 }
175 dma->buf_mapped = true;
176 at91_twi_irq_restore(dev);
177
178 if (dev->fifo_size) {
179 size_t part1_len, part2_len;
180 struct scatterlist *sg;
181 unsigned fifo_mr;
182
183 sg_len = 0;
184
185 part1_len = dev->buf_len & ~0x3;
186 if (part1_len) {
187 sg = &dma->sg[sg_len++];
188 sg_dma_len(sg) = part1_len;
189 sg_dma_address(sg) = dma_addr;
190 }
191
192 part2_len = dev->buf_len & 0x3;
193 if (part2_len) {
194 sg = &dma->sg[sg_len++];
195 sg_dma_len(sg) = part2_len;
196 sg_dma_address(sg) = dma_addr + part1_len;
197 }
198
199
200
201
202
203 fifo_mr = at91_twi_read(dev, AT91_TWI_FMR);
204 fifo_mr &= ~AT91_TWI_FMR_TXRDYM_MASK;
205 fifo_mr |= AT91_TWI_FMR_TXRDYM(AT91_TWI_FOUR_DATA);
206 at91_twi_write(dev, AT91_TWI_FMR, fifo_mr);
207 } else {
208 sg_dma_len(&dma->sg[0]) = dev->buf_len;
209 sg_dma_address(&dma->sg[0]) = dma_addr;
210 }
211
212 txdesc = dmaengine_prep_slave_sg(chan_tx, dma->sg, sg_len,
213 DMA_MEM_TO_DEV,
214 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
215 if (!txdesc) {
216 dev_err(dev->dev, "dma prep slave sg failed\n");
217 goto error;
218 }
219
220 txdesc->callback = at91_twi_write_data_dma_callback;
221 txdesc->callback_param = dev;
222
223 dma->xfer_in_progress = true;
224 dmaengine_submit(txdesc);
225 dma_async_issue_pending(chan_tx);
226
227 return;
228
229 error:
230 at91_twi_dma_cleanup(dev);
231 }
232
233 static void at91_twi_read_next_byte(struct at91_twi_dev *dev)
234 {
235
236
237
238
239 if (!dev->buf_len) {
240 at91_twi_read(dev, AT91_TWI_RHR);
241 return;
242 }
243
244
245 *dev->buf = readb_relaxed(dev->base + AT91_TWI_RHR);
246 --dev->buf_len;
247
248
249 if (dev->recv_len_abort)
250 return;
251
252
253 if (unlikely(dev->msg->flags & I2C_M_RECV_LEN)) {
254
255 if (*dev->buf <= I2C_SMBUS_BLOCK_MAX && *dev->buf > 0) {
256 dev->msg->flags &= ~I2C_M_RECV_LEN;
257 dev->buf_len += *dev->buf;
258 dev->msg->len = dev->buf_len + 1;
259 dev_dbg(dev->dev, "received block length %zu\n",
260 dev->buf_len);
261 } else {
262
263 dev->recv_len_abort = true;
264 dev->buf_len = 1;
265 }
266 }
267
268
269 if (!dev->use_alt_cmd && dev->buf_len == 1)
270 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
271
272 dev_dbg(dev->dev, "read 0x%x, to go %zu\n", *dev->buf, dev->buf_len);
273
274 ++dev->buf;
275 }
276
277 static void at91_twi_read_data_dma_callback(void *data)
278 {
279 struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
280 unsigned ier = AT91_TWI_TXCOMP;
281
282 dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]),
283 dev->buf_len, DMA_FROM_DEVICE);
284
285 if (!dev->use_alt_cmd) {
286
287 dev->buf += dev->buf_len - 2;
288 dev->buf_len = 2;
289 ier |= AT91_TWI_RXRDY;
290 }
291 at91_twi_write(dev, AT91_TWI_IER, ier);
292 }
293
294 static void at91_twi_read_data_dma(struct at91_twi_dev *dev)
295 {
296 dma_addr_t dma_addr;
297 struct dma_async_tx_descriptor *rxdesc;
298 struct at91_twi_dma *dma = &dev->dma;
299 struct dma_chan *chan_rx = dma->chan_rx;
300 size_t buf_len;
301
302 buf_len = (dev->use_alt_cmd) ? dev->buf_len : dev->buf_len - 2;
303 dma->direction = DMA_FROM_DEVICE;
304
305
306 at91_twi_irq_save(dev);
307 dma_addr = dma_map_single(dev->dev, dev->buf, buf_len, DMA_FROM_DEVICE);
308 if (dma_mapping_error(dev->dev, dma_addr)) {
309 dev_err(dev->dev, "dma map failed\n");
310 return;
311 }
312 dma->buf_mapped = true;
313 at91_twi_irq_restore(dev);
314
315 if (dev->fifo_size && IS_ALIGNED(buf_len, 4)) {
316 unsigned fifo_mr;
317
318
319
320
321
322 fifo_mr = at91_twi_read(dev, AT91_TWI_FMR);
323 fifo_mr &= ~AT91_TWI_FMR_RXRDYM_MASK;
324 fifo_mr |= AT91_TWI_FMR_RXRDYM(AT91_TWI_FOUR_DATA);
325 at91_twi_write(dev, AT91_TWI_FMR, fifo_mr);
326 }
327
328 sg_dma_len(&dma->sg[0]) = buf_len;
329 sg_dma_address(&dma->sg[0]) = dma_addr;
330
331 rxdesc = dmaengine_prep_slave_sg(chan_rx, dma->sg, 1, DMA_DEV_TO_MEM,
332 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
333 if (!rxdesc) {
334 dev_err(dev->dev, "dma prep slave sg failed\n");
335 goto error;
336 }
337
338 rxdesc->callback = at91_twi_read_data_dma_callback;
339 rxdesc->callback_param = dev;
340
341 dma->xfer_in_progress = true;
342 dmaengine_submit(rxdesc);
343 dma_async_issue_pending(dma->chan_rx);
344
345 return;
346
347 error:
348 at91_twi_dma_cleanup(dev);
349 }
350
351 static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id)
352 {
353 struct at91_twi_dev *dev = dev_id;
354 const unsigned status = at91_twi_read(dev, AT91_TWI_SR);
355 const unsigned irqstatus = status & at91_twi_read(dev, AT91_TWI_IMR);
356
357 if (!irqstatus)
358 return IRQ_NONE;
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375 if (irqstatus & AT91_TWI_RXRDY) {
376
377
378
379
380
381 do {
382 at91_twi_read_next_byte(dev);
383 } while (at91_twi_read(dev, AT91_TWI_SR) & AT91_TWI_RXRDY);
384 }
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424 if (irqstatus & (AT91_TWI_TXCOMP | AT91_TWI_NACK)) {
425 at91_disable_twi_interrupts(dev);
426 complete(&dev->cmd_complete);
427 } else if (irqstatus & AT91_TWI_TXRDY) {
428 at91_twi_write_next_byte(dev);
429 }
430
431
432 dev->transfer_status |= status;
433
434 return IRQ_HANDLED;
435 }
436
437 static int at91_do_twi_transfer(struct at91_twi_dev *dev)
438 {
439 int ret;
440 unsigned long time_left;
441 bool has_unre_flag = dev->pdata->has_unre_flag;
442 bool has_alt_cmd = dev->pdata->has_alt_cmd;
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487 dev_dbg(dev->dev, "transfer: %s %zu bytes.\n",
488 (dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len);
489
490 reinit_completion(&dev->cmd_complete);
491 dev->transfer_status = 0;
492
493
494 at91_twi_read(dev, AT91_TWI_SR);
495
496 if (dev->fifo_size) {
497 unsigned fifo_mr = at91_twi_read(dev, AT91_TWI_FMR);
498
499
500 fifo_mr &= ~(AT91_TWI_FMR_TXRDYM_MASK |
501 AT91_TWI_FMR_RXRDYM_MASK);
502 fifo_mr |= AT91_TWI_FMR_TXRDYM(AT91_TWI_ONE_DATA);
503 fifo_mr |= AT91_TWI_FMR_RXRDYM(AT91_TWI_ONE_DATA);
504 at91_twi_write(dev, AT91_TWI_FMR, fifo_mr);
505
506
507 at91_twi_write(dev, AT91_TWI_CR,
508 AT91_TWI_THRCLR | AT91_TWI_RHRCLR);
509 }
510
511 if (!dev->buf_len) {
512 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK);
513 at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
514 } else if (dev->msg->flags & I2C_M_RD) {
515 unsigned start_flags = AT91_TWI_START;
516
517
518 if (!dev->use_alt_cmd && dev->buf_len <= 1 &&
519 !(dev->msg->flags & I2C_M_RECV_LEN))
520 start_flags |= AT91_TWI_STOP;
521 at91_twi_write(dev, AT91_TWI_CR, start_flags);
522
523
524
525
526
527
528
529
530
531 if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
532 at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
533 at91_twi_read_data_dma(dev);
534 } else {
535 at91_twi_write(dev, AT91_TWI_IER,
536 AT91_TWI_TXCOMP |
537 AT91_TWI_NACK |
538 AT91_TWI_RXRDY);
539 }
540 } else {
541 if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
542 at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
543 at91_twi_write_data_dma(dev);
544 } else {
545 at91_twi_write_next_byte(dev);
546 at91_twi_write(dev, AT91_TWI_IER,
547 AT91_TWI_TXCOMP | AT91_TWI_NACK |
548 (dev->buf_len ? AT91_TWI_TXRDY : 0));
549 }
550 }
551
552 time_left = wait_for_completion_timeout(&dev->cmd_complete,
553 dev->adapter.timeout);
554 if (time_left == 0) {
555 dev->transfer_status |= at91_twi_read(dev, AT91_TWI_SR);
556 dev_err(dev->dev, "controller timed out\n");
557 at91_init_twi_bus(dev);
558 ret = -ETIMEDOUT;
559 goto error;
560 }
561 if (dev->transfer_status & AT91_TWI_NACK) {
562 dev_dbg(dev->dev, "received nack\n");
563 ret = -EREMOTEIO;
564 goto error;
565 }
566 if (dev->transfer_status & AT91_TWI_OVRE) {
567 dev_err(dev->dev, "overrun while reading\n");
568 ret = -EIO;
569 goto error;
570 }
571 if (has_unre_flag && dev->transfer_status & AT91_TWI_UNRE) {
572 dev_err(dev->dev, "underrun while writing\n");
573 ret = -EIO;
574 goto error;
575 }
576 if ((has_alt_cmd || dev->fifo_size) &&
577 (dev->transfer_status & AT91_TWI_LOCK)) {
578 dev_err(dev->dev, "tx locked\n");
579 ret = -EIO;
580 goto error;
581 }
582 if (dev->recv_len_abort) {
583 dev_err(dev->dev, "invalid smbus block length recvd\n");
584 ret = -EPROTO;
585 goto error;
586 }
587
588 dev_dbg(dev->dev, "transfer complete\n");
589
590 return 0;
591
592 error:
593
594 at91_twi_dma_cleanup(dev);
595
596 if ((has_alt_cmd || dev->fifo_size) &&
597 (dev->transfer_status & AT91_TWI_LOCK)) {
598 dev_dbg(dev->dev, "unlock tx\n");
599 at91_twi_write(dev, AT91_TWI_CR,
600 AT91_TWI_THRCLR | AT91_TWI_LOCKCLR);
601 }
602 return ret;
603 }
604
605 static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
606 {
607 struct at91_twi_dev *dev = i2c_get_adapdata(adap);
608 int ret;
609 unsigned int_addr_flag = 0;
610 struct i2c_msg *m_start = msg;
611 bool is_read;
612
613 dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num);
614
615 ret = pm_runtime_get_sync(dev->dev);
616 if (ret < 0)
617 goto out;
618
619 if (num == 2) {
620 int internal_address = 0;
621 int i;
622
623
624 m_start = &msg[1];
625 for (i = 0; i < msg->len; ++i) {
626 const unsigned addr = msg->buf[msg->len - 1 - i];
627
628 internal_address |= addr << (8 * i);
629 int_addr_flag += AT91_TWI_IADRSZ_1;
630 }
631 at91_twi_write(dev, AT91_TWI_IADR, internal_address);
632 }
633
634 dev->use_alt_cmd = false;
635 is_read = (m_start->flags & I2C_M_RD);
636 if (dev->pdata->has_alt_cmd) {
637 if (m_start->len > 0 &&
638 m_start->len < AT91_I2C_MAX_ALT_CMD_DATA_SIZE) {
639 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMEN);
640 at91_twi_write(dev, AT91_TWI_ACR,
641 AT91_TWI_ACR_DATAL(m_start->len) |
642 ((is_read) ? AT91_TWI_ACR_DIR : 0));
643 dev->use_alt_cmd = true;
644 } else {
645 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMDIS);
646 }
647 }
648
649 at91_twi_write(dev, AT91_TWI_MMR,
650 (m_start->addr << 16) |
651 int_addr_flag |
652 ((!dev->use_alt_cmd && is_read) ? AT91_TWI_MREAD : 0));
653
654 dev->buf_len = m_start->len;
655 dev->buf = m_start->buf;
656 dev->msg = m_start;
657 dev->recv_len_abort = false;
658
659 ret = at91_do_twi_transfer(dev);
660
661 ret = (ret < 0) ? ret : num;
662 out:
663 pm_runtime_mark_last_busy(dev->dev);
664 pm_runtime_put_autosuspend(dev->dev);
665
666 return ret;
667 }
668
669
670
671
672
673 static const struct i2c_adapter_quirks at91_twi_quirks = {
674 .flags = I2C_AQ_COMB | I2C_AQ_COMB_WRITE_FIRST | I2C_AQ_COMB_SAME_ADDR,
675 .max_comb_1st_msg_len = 3,
676 };
677
678 static u32 at91_twi_func(struct i2c_adapter *adapter)
679 {
680 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL
681 | I2C_FUNC_SMBUS_READ_BLOCK_DATA;
682 }
683
684 static const struct i2c_algorithm at91_twi_algorithm = {
685 .master_xfer = at91_twi_xfer,
686 .functionality = at91_twi_func,
687 };
688
689 static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr)
690 {
691 int ret = 0;
692 struct dma_slave_config slave_config;
693 struct at91_twi_dma *dma = &dev->dma;
694 enum dma_slave_buswidth addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711 if (dev->fifo_size)
712 addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
713
714 memset(&slave_config, 0, sizeof(slave_config));
715 slave_config.src_addr = (dma_addr_t)phy_addr + AT91_TWI_RHR;
716 slave_config.src_addr_width = addr_width;
717 slave_config.src_maxburst = 1;
718 slave_config.dst_addr = (dma_addr_t)phy_addr + AT91_TWI_THR;
719 slave_config.dst_addr_width = addr_width;
720 slave_config.dst_maxburst = 1;
721 slave_config.device_fc = false;
722
723 dma->chan_tx = dma_request_slave_channel_reason(dev->dev, "tx");
724 if (IS_ERR(dma->chan_tx)) {
725 ret = PTR_ERR(dma->chan_tx);
726 dma->chan_tx = NULL;
727 goto error;
728 }
729
730 dma->chan_rx = dma_request_slave_channel_reason(dev->dev, "rx");
731 if (IS_ERR(dma->chan_rx)) {
732 ret = PTR_ERR(dma->chan_rx);
733 dma->chan_rx = NULL;
734 goto error;
735 }
736
737 slave_config.direction = DMA_MEM_TO_DEV;
738 if (dmaengine_slave_config(dma->chan_tx, &slave_config)) {
739 dev_err(dev->dev, "failed to configure tx channel\n");
740 ret = -EINVAL;
741 goto error;
742 }
743
744 slave_config.direction = DMA_DEV_TO_MEM;
745 if (dmaengine_slave_config(dma->chan_rx, &slave_config)) {
746 dev_err(dev->dev, "failed to configure rx channel\n");
747 ret = -EINVAL;
748 goto error;
749 }
750
751 sg_init_table(dma->sg, 2);
752 dma->buf_mapped = false;
753 dma->xfer_in_progress = false;
754 dev->use_dma = true;
755
756 dev_info(dev->dev, "using %s (tx) and %s (rx) for DMA transfers\n",
757 dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
758
759 return ret;
760
761 error:
762 if (ret != -EPROBE_DEFER)
763 dev_info(dev->dev, "can't get DMA channel, continue without DMA support\n");
764 if (dma->chan_rx)
765 dma_release_channel(dma->chan_rx);
766 if (dma->chan_tx)
767 dma_release_channel(dma->chan_tx);
768 return ret;
769 }
770
771 int at91_twi_probe_master(struct platform_device *pdev,
772 u32 phy_addr, struct at91_twi_dev *dev)
773 {
774 int rc;
775
776 init_completion(&dev->cmd_complete);
777
778 rc = devm_request_irq(&pdev->dev, dev->irq, atmel_twi_interrupt, 0,
779 dev_name(dev->dev), dev);
780 if (rc) {
781 dev_err(dev->dev, "Cannot get irq %d: %d\n", dev->irq, rc);
782 return rc;
783 }
784
785 if (dev->dev->of_node) {
786 rc = at91_twi_configure_dma(dev, phy_addr);
787 if (rc == -EPROBE_DEFER)
788 return rc;
789 }
790
791 if (!of_property_read_u32(pdev->dev.of_node, "atmel,fifo-size",
792 &dev->fifo_size)) {
793 dev_info(dev->dev, "Using FIFO (%u data)\n", dev->fifo_size);
794 }
795
796 at91_calc_twi_clock(dev);
797
798 dev->adapter.algo = &at91_twi_algorithm;
799 dev->adapter.quirks = &at91_twi_quirks;
800
801 return 0;
802 }