This source file includes following definitions.
- get_mb_rx_first
- get_mb_rx_last
- get_mb_rx_split
- get_mb_rx_num
- get_mb_rx_low_last
- get_mb_rx_low_mask
- get_mb_tx_shift
- get_mb_tx_num
- get_mb_tx_first
- get_mb_tx_last
- get_next_prio_shift
- get_next_prio_mask
- get_next_mb_mask
- get_next_mask
- get_irq_mb_rx
- get_irq_mb_tx
- get_tx_next_mb
- get_tx_next_prio
- get_tx_echo_mb
- at91_read
- at91_write
- set_mb_mode_prio
- set_mb_mode
- at91_can_id_to_reg_mid
- at91_setup_mailboxes
- at91_set_bittiming
- at91_get_berr_counter
- at91_chip_start
- at91_chip_stop
- at91_start_xmit
- at91_activate_rx_low
- at91_activate_rx_mb
- at91_rx_overflow_err
- at91_read_mb
- at91_read_msg
- at91_poll_rx
- at91_poll_err_frame
- at91_poll_err
- at91_poll
- at91_irq_tx
- at91_irq_err_state
- at91_get_state_by_bec
- at91_irq_err
- at91_irq
- at91_open
- at91_close
- at91_set_mode
- at91_sysfs_show_mb0_id
- at91_sysfs_set_mb0_id
- at91_can_get_driver_data
- at91_can_probe
- at91_can_remove
1
2
3
4
5
6
7
8
9 #include <linux/clk.h>
10 #include <linux/errno.h>
11 #include <linux/if_arp.h>
12 #include <linux/interrupt.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/netdevice.h>
16 #include <linux/of.h>
17 #include <linux/platform_device.h>
18 #include <linux/rtnetlink.h>
19 #include <linux/skbuff.h>
20 #include <linux/spinlock.h>
21 #include <linux/string.h>
22 #include <linux/types.h>
23
24 #include <linux/can/dev.h>
25 #include <linux/can/error.h>
26 #include <linux/can/led.h>
27
28 #define AT91_MB_MASK(i) ((1 << (i)) - 1)
29
30
31 enum at91_reg {
32 AT91_MR = 0x000,
33 AT91_IER = 0x004,
34 AT91_IDR = 0x008,
35 AT91_IMR = 0x00C,
36 AT91_SR = 0x010,
37 AT91_BR = 0x014,
38 AT91_TIM = 0x018,
39 AT91_TIMESTP = 0x01C,
40 AT91_ECR = 0x020,
41 AT91_TCR = 0x024,
42 AT91_ACR = 0x028,
43 };
44
45
46 #define AT91_MMR(i) (enum at91_reg)(0x200 + ((i) * 0x20))
47 #define AT91_MAM(i) (enum at91_reg)(0x204 + ((i) * 0x20))
48 #define AT91_MID(i) (enum at91_reg)(0x208 + ((i) * 0x20))
49 #define AT91_MFID(i) (enum at91_reg)(0x20C + ((i) * 0x20))
50 #define AT91_MSR(i) (enum at91_reg)(0x210 + ((i) * 0x20))
51 #define AT91_MDL(i) (enum at91_reg)(0x214 + ((i) * 0x20))
52 #define AT91_MDH(i) (enum at91_reg)(0x218 + ((i) * 0x20))
53 #define AT91_MCR(i) (enum at91_reg)(0x21C + ((i) * 0x20))
54
55
56 #define AT91_MR_CANEN BIT(0)
57 #define AT91_MR_LPM BIT(1)
58 #define AT91_MR_ABM BIT(2)
59 #define AT91_MR_OVL BIT(3)
60 #define AT91_MR_TEOF BIT(4)
61 #define AT91_MR_TTM BIT(5)
62 #define AT91_MR_TIMFRZ BIT(6)
63 #define AT91_MR_DRPT BIT(7)
64
65 #define AT91_SR_RBSY BIT(29)
66
67 #define AT91_MMR_PRIO_SHIFT (16)
68
69 #define AT91_MID_MIDE BIT(29)
70
71 #define AT91_MSR_MRTR BIT(20)
72 #define AT91_MSR_MABT BIT(22)
73 #define AT91_MSR_MRDY BIT(23)
74 #define AT91_MSR_MMI BIT(24)
75
76 #define AT91_MCR_MRTR BIT(20)
77 #define AT91_MCR_MTCR BIT(23)
78
79
80 enum at91_mb_mode {
81 AT91_MB_MODE_DISABLED = 0,
82 AT91_MB_MODE_RX = 1,
83 AT91_MB_MODE_RX_OVRWR = 2,
84 AT91_MB_MODE_TX = 3,
85 AT91_MB_MODE_CONSUMER = 4,
86 AT91_MB_MODE_PRODUCER = 5,
87 };
88
89
90 #define AT91_IRQ_ERRA (1 << 16)
91 #define AT91_IRQ_WARN (1 << 17)
92 #define AT91_IRQ_ERRP (1 << 18)
93 #define AT91_IRQ_BOFF (1 << 19)
94 #define AT91_IRQ_SLEEP (1 << 20)
95 #define AT91_IRQ_WAKEUP (1 << 21)
96 #define AT91_IRQ_TOVF (1 << 22)
97 #define AT91_IRQ_TSTP (1 << 23)
98 #define AT91_IRQ_CERR (1 << 24)
99 #define AT91_IRQ_SERR (1 << 25)
100 #define AT91_IRQ_AERR (1 << 26)
101 #define AT91_IRQ_FERR (1 << 27)
102 #define AT91_IRQ_BERR (1 << 28)
103
104 #define AT91_IRQ_ERR_ALL (0x1fff0000)
105 #define AT91_IRQ_ERR_FRAME (AT91_IRQ_CERR | AT91_IRQ_SERR | \
106 AT91_IRQ_AERR | AT91_IRQ_FERR | AT91_IRQ_BERR)
107 #define AT91_IRQ_ERR_LINE (AT91_IRQ_ERRA | AT91_IRQ_WARN | \
108 AT91_IRQ_ERRP | AT91_IRQ_BOFF)
109
110 #define AT91_IRQ_ALL (0x1fffffff)
111
112 enum at91_devtype {
113 AT91_DEVTYPE_SAM9263,
114 AT91_DEVTYPE_SAM9X5,
115 };
116
117 struct at91_devtype_data {
118 unsigned int rx_first;
119 unsigned int rx_split;
120 unsigned int rx_last;
121 unsigned int tx_shift;
122 enum at91_devtype type;
123 };
124
125 struct at91_priv {
126 struct can_priv can;
127 struct napi_struct napi;
128
129 void __iomem *reg_base;
130
131 u32 reg_sr;
132 unsigned int tx_next;
133 unsigned int tx_echo;
134 unsigned int rx_next;
135 struct at91_devtype_data devtype_data;
136
137 struct clk *clk;
138 struct at91_can_data *pdata;
139
140 canid_t mb0_id;
141 };
142
143 static const struct at91_devtype_data at91_at91sam9263_data = {
144 .rx_first = 1,
145 .rx_split = 8,
146 .rx_last = 11,
147 .tx_shift = 2,
148 .type = AT91_DEVTYPE_SAM9263,
149 };
150
151 static const struct at91_devtype_data at91_at91sam9x5_data = {
152 .rx_first = 0,
153 .rx_split = 4,
154 .rx_last = 5,
155 .tx_shift = 1,
156 .type = AT91_DEVTYPE_SAM9X5,
157 };
158
159 static const struct can_bittiming_const at91_bittiming_const = {
160 .name = KBUILD_MODNAME,
161 .tseg1_min = 4,
162 .tseg1_max = 16,
163 .tseg2_min = 2,
164 .tseg2_max = 8,
165 .sjw_max = 4,
166 .brp_min = 2,
167 .brp_max = 128,
168 .brp_inc = 1,
169 };
170
171 #define AT91_IS(_model) \
172 static inline int at91_is_sam##_model(const struct at91_priv *priv) \
173 { \
174 return priv->devtype_data.type == AT91_DEVTYPE_SAM##_model; \
175 }
176
177 AT91_IS(9263);
178 AT91_IS(9X5);
179
180 static inline unsigned int get_mb_rx_first(const struct at91_priv *priv)
181 {
182 return priv->devtype_data.rx_first;
183 }
184
185 static inline unsigned int get_mb_rx_last(const struct at91_priv *priv)
186 {
187 return priv->devtype_data.rx_last;
188 }
189
190 static inline unsigned int get_mb_rx_split(const struct at91_priv *priv)
191 {
192 return priv->devtype_data.rx_split;
193 }
194
195 static inline unsigned int get_mb_rx_num(const struct at91_priv *priv)
196 {
197 return get_mb_rx_last(priv) - get_mb_rx_first(priv) + 1;
198 }
199
200 static inline unsigned int get_mb_rx_low_last(const struct at91_priv *priv)
201 {
202 return get_mb_rx_split(priv) - 1;
203 }
204
205 static inline unsigned int get_mb_rx_low_mask(const struct at91_priv *priv)
206 {
207 return AT91_MB_MASK(get_mb_rx_split(priv)) &
208 ~AT91_MB_MASK(get_mb_rx_first(priv));
209 }
210
211 static inline unsigned int get_mb_tx_shift(const struct at91_priv *priv)
212 {
213 return priv->devtype_data.tx_shift;
214 }
215
216 static inline unsigned int get_mb_tx_num(const struct at91_priv *priv)
217 {
218 return 1 << get_mb_tx_shift(priv);
219 }
220
221 static inline unsigned int get_mb_tx_first(const struct at91_priv *priv)
222 {
223 return get_mb_rx_last(priv) + 1;
224 }
225
226 static inline unsigned int get_mb_tx_last(const struct at91_priv *priv)
227 {
228 return get_mb_tx_first(priv) + get_mb_tx_num(priv) - 1;
229 }
230
231 static inline unsigned int get_next_prio_shift(const struct at91_priv *priv)
232 {
233 return get_mb_tx_shift(priv);
234 }
235
236 static inline unsigned int get_next_prio_mask(const struct at91_priv *priv)
237 {
238 return 0xf << get_mb_tx_shift(priv);
239 }
240
241 static inline unsigned int get_next_mb_mask(const struct at91_priv *priv)
242 {
243 return AT91_MB_MASK(get_mb_tx_shift(priv));
244 }
245
246 static inline unsigned int get_next_mask(const struct at91_priv *priv)
247 {
248 return get_next_mb_mask(priv) | get_next_prio_mask(priv);
249 }
250
251 static inline unsigned int get_irq_mb_rx(const struct at91_priv *priv)
252 {
253 return AT91_MB_MASK(get_mb_rx_last(priv) + 1) &
254 ~AT91_MB_MASK(get_mb_rx_first(priv));
255 }
256
257 static inline unsigned int get_irq_mb_tx(const struct at91_priv *priv)
258 {
259 return AT91_MB_MASK(get_mb_tx_last(priv) + 1) &
260 ~AT91_MB_MASK(get_mb_tx_first(priv));
261 }
262
263 static inline unsigned int get_tx_next_mb(const struct at91_priv *priv)
264 {
265 return (priv->tx_next & get_next_mb_mask(priv)) + get_mb_tx_first(priv);
266 }
267
268 static inline unsigned int get_tx_next_prio(const struct at91_priv *priv)
269 {
270 return (priv->tx_next >> get_next_prio_shift(priv)) & 0xf;
271 }
272
273 static inline unsigned int get_tx_echo_mb(const struct at91_priv *priv)
274 {
275 return (priv->tx_echo & get_next_mb_mask(priv)) + get_mb_tx_first(priv);
276 }
277
278 static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg)
279 {
280 return readl_relaxed(priv->reg_base + reg);
281 }
282
283 static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg,
284 u32 value)
285 {
286 writel_relaxed(value, priv->reg_base + reg);
287 }
288
289 static inline void set_mb_mode_prio(const struct at91_priv *priv,
290 unsigned int mb, enum at91_mb_mode mode, int prio)
291 {
292 at91_write(priv, AT91_MMR(mb), (mode << 24) | (prio << 16));
293 }
294
295 static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb,
296 enum at91_mb_mode mode)
297 {
298 set_mb_mode_prio(priv, mb, mode, 0);
299 }
300
301 static inline u32 at91_can_id_to_reg_mid(canid_t can_id)
302 {
303 u32 reg_mid;
304
305 if (can_id & CAN_EFF_FLAG)
306 reg_mid = (can_id & CAN_EFF_MASK) | AT91_MID_MIDE;
307 else
308 reg_mid = (can_id & CAN_SFF_MASK) << 18;
309
310 return reg_mid;
311 }
312
313 static void at91_setup_mailboxes(struct net_device *dev)
314 {
315 struct at91_priv *priv = netdev_priv(dev);
316 unsigned int i;
317 u32 reg_mid;
318
319
320
321
322
323
324
325
326 reg_mid = at91_can_id_to_reg_mid(priv->mb0_id);
327 for (i = 0; i < get_mb_rx_first(priv); i++) {
328 set_mb_mode(priv, i, AT91_MB_MODE_DISABLED);
329 at91_write(priv, AT91_MID(i), reg_mid);
330 at91_write(priv, AT91_MCR(i), 0x0);
331 }
332
333 for (i = get_mb_rx_first(priv); i < get_mb_rx_last(priv); i++)
334 set_mb_mode(priv, i, AT91_MB_MODE_RX);
335 set_mb_mode(priv, get_mb_rx_last(priv), AT91_MB_MODE_RX_OVRWR);
336
337
338 for (i = get_mb_rx_first(priv); i <= get_mb_rx_last(priv); i++) {
339 at91_write(priv, AT91_MAM(i), 0x0);
340 at91_write(priv, AT91_MID(i), AT91_MID_MIDE);
341 }
342
343
344 for (i = get_mb_tx_first(priv); i <= get_mb_tx_last(priv); i++)
345 set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0);
346
347
348 priv->tx_next = priv->tx_echo = 0;
349 priv->rx_next = get_mb_rx_first(priv);
350 }
351
352 static int at91_set_bittiming(struct net_device *dev)
353 {
354 const struct at91_priv *priv = netdev_priv(dev);
355 const struct can_bittiming *bt = &priv->can.bittiming;
356 u32 reg_br;
357
358 reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 << 24 : 0) |
359 ((bt->brp - 1) << 16) | ((bt->sjw - 1) << 12) |
360 ((bt->prop_seg - 1) << 8) | ((bt->phase_seg1 - 1) << 4) |
361 ((bt->phase_seg2 - 1) << 0);
362
363 netdev_info(dev, "writing AT91_BR: 0x%08x\n", reg_br);
364
365 at91_write(priv, AT91_BR, reg_br);
366
367 return 0;
368 }
369
370 static int at91_get_berr_counter(const struct net_device *dev,
371 struct can_berr_counter *bec)
372 {
373 const struct at91_priv *priv = netdev_priv(dev);
374 u32 reg_ecr = at91_read(priv, AT91_ECR);
375
376 bec->rxerr = reg_ecr & 0xff;
377 bec->txerr = reg_ecr >> 16;
378
379 return 0;
380 }
381
382 static void at91_chip_start(struct net_device *dev)
383 {
384 struct at91_priv *priv = netdev_priv(dev);
385 u32 reg_mr, reg_ier;
386
387
388 at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
389
390
391 reg_mr = at91_read(priv, AT91_MR);
392 at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN);
393
394 at91_set_bittiming(dev);
395 at91_setup_mailboxes(dev);
396
397
398 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
399 reg_mr = AT91_MR_CANEN | AT91_MR_ABM;
400 else
401 reg_mr = AT91_MR_CANEN;
402 at91_write(priv, AT91_MR, reg_mr);
403
404 priv->can.state = CAN_STATE_ERROR_ACTIVE;
405
406
407 reg_ier = get_irq_mb_rx(priv) | AT91_IRQ_ERRP | AT91_IRQ_ERR_FRAME;
408 at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
409 at91_write(priv, AT91_IER, reg_ier);
410 }
411
412 static void at91_chip_stop(struct net_device *dev, enum can_state state)
413 {
414 struct at91_priv *priv = netdev_priv(dev);
415 u32 reg_mr;
416
417
418 at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
419
420 reg_mr = at91_read(priv, AT91_MR);
421 at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN);
422
423 priv->can.state = state;
424 }
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449 static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
450 {
451 struct at91_priv *priv = netdev_priv(dev);
452 struct net_device_stats *stats = &dev->stats;
453 struct can_frame *cf = (struct can_frame *)skb->data;
454 unsigned int mb, prio;
455 u32 reg_mid, reg_mcr;
456
457 if (can_dropped_invalid_skb(dev, skb))
458 return NETDEV_TX_OK;
459
460 mb = get_tx_next_mb(priv);
461 prio = get_tx_next_prio(priv);
462
463 if (unlikely(!(at91_read(priv, AT91_MSR(mb)) & AT91_MSR_MRDY))) {
464 netif_stop_queue(dev);
465
466 netdev_err(dev, "BUG! TX buffer full when queue awake!\n");
467 return NETDEV_TX_BUSY;
468 }
469 reg_mid = at91_can_id_to_reg_mid(cf->can_id);
470 reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) |
471 (cf->can_dlc << 16) | AT91_MCR_MTCR;
472
473
474 set_mb_mode(priv, mb, AT91_MB_MODE_DISABLED);
475 at91_write(priv, AT91_MID(mb), reg_mid);
476 set_mb_mode_prio(priv, mb, AT91_MB_MODE_TX, prio);
477
478 at91_write(priv, AT91_MDL(mb), *(u32 *)(cf->data + 0));
479 at91_write(priv, AT91_MDH(mb), *(u32 *)(cf->data + 4));
480
481
482 at91_write(priv, AT91_MCR(mb), reg_mcr);
483
484 stats->tx_bytes += cf->can_dlc;
485
486
487 can_put_echo_skb(skb, dev, mb - get_mb_tx_first(priv));
488
489
490
491
492
493
494
495
496
497 priv->tx_next++;
498 if (!(at91_read(priv, AT91_MSR(get_tx_next_mb(priv))) &
499 AT91_MSR_MRDY) ||
500 (priv->tx_next & get_next_mask(priv)) == 0)
501 netif_stop_queue(dev);
502
503
504 at91_write(priv, AT91_IER, 1 << mb);
505
506 return NETDEV_TX_OK;
507 }
508
509
510
511
512
513
514
515 static inline void at91_activate_rx_low(const struct at91_priv *priv)
516 {
517 u32 mask = get_mb_rx_low_mask(priv);
518 at91_write(priv, AT91_TCR, mask);
519 }
520
521
522
523
524
525
526
527
528 static inline void at91_activate_rx_mb(const struct at91_priv *priv,
529 unsigned int mb)
530 {
531 u32 mask = 1 << mb;
532 at91_write(priv, AT91_TCR, mask);
533 }
534
535
536
537
538
539 static void at91_rx_overflow_err(struct net_device *dev)
540 {
541 struct net_device_stats *stats = &dev->stats;
542 struct sk_buff *skb;
543 struct can_frame *cf;
544
545 netdev_dbg(dev, "RX buffer overflow\n");
546 stats->rx_over_errors++;
547 stats->rx_errors++;
548
549 skb = alloc_can_err_skb(dev, &cf);
550 if (unlikely(!skb))
551 return;
552
553 cf->can_id |= CAN_ERR_CRTL;
554 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
555
556 stats->rx_packets++;
557 stats->rx_bytes += cf->can_dlc;
558 netif_receive_skb(skb);
559 }
560
561
562
563
564
565
566
567
568
569
570 static void at91_read_mb(struct net_device *dev, unsigned int mb,
571 struct can_frame *cf)
572 {
573 const struct at91_priv *priv = netdev_priv(dev);
574 u32 reg_msr, reg_mid;
575
576 reg_mid = at91_read(priv, AT91_MID(mb));
577 if (reg_mid & AT91_MID_MIDE)
578 cf->can_id = ((reg_mid >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG;
579 else
580 cf->can_id = (reg_mid >> 18) & CAN_SFF_MASK;
581
582 reg_msr = at91_read(priv, AT91_MSR(mb));
583 cf->can_dlc = get_can_dlc((reg_msr >> 16) & 0xf);
584
585 if (reg_msr & AT91_MSR_MRTR)
586 cf->can_id |= CAN_RTR_FLAG;
587 else {
588 *(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb));
589 *(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb));
590 }
591
592
593 at91_write(priv, AT91_MID(mb), AT91_MID_MIDE);
594
595 if (unlikely(mb == get_mb_rx_last(priv) && reg_msr & AT91_MSR_MMI))
596 at91_rx_overflow_err(dev);
597 }
598
599
600
601
602
603
604
605
606
607 static void at91_read_msg(struct net_device *dev, unsigned int mb)
608 {
609 struct net_device_stats *stats = &dev->stats;
610 struct can_frame *cf;
611 struct sk_buff *skb;
612
613 skb = alloc_can_skb(dev, &cf);
614 if (unlikely(!skb)) {
615 stats->rx_dropped++;
616 return;
617 }
618
619 at91_read_mb(dev, mb, cf);
620
621 stats->rx_packets++;
622 stats->rx_bytes += cf->can_dlc;
623 netif_receive_skb(skb);
624
625 can_led_event(dev, CAN_LED_EVENT_RX);
626 }
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677 static int at91_poll_rx(struct net_device *dev, int quota)
678 {
679 struct at91_priv *priv = netdev_priv(dev);
680 u32 reg_sr = at91_read(priv, AT91_SR);
681 const unsigned long *addr = (unsigned long *)®_sr;
682 unsigned int mb;
683 int received = 0;
684
685 if (priv->rx_next > get_mb_rx_low_last(priv) &&
686 reg_sr & get_mb_rx_low_mask(priv))
687 netdev_info(dev,
688 "order of incoming frames cannot be guaranteed\n");
689
690 again:
691 for (mb = find_next_bit(addr, get_mb_tx_first(priv), priv->rx_next);
692 mb < get_mb_tx_first(priv) && quota > 0;
693 reg_sr = at91_read(priv, AT91_SR),
694 mb = find_next_bit(addr, get_mb_tx_first(priv), ++priv->rx_next)) {
695 at91_read_msg(dev, mb);
696
697
698 if (mb == get_mb_rx_low_last(priv))
699
700 at91_activate_rx_low(priv);
701 else if (mb > get_mb_rx_low_last(priv))
702
703 at91_activate_rx_mb(priv, mb);
704
705 received++;
706 quota--;
707 }
708
709
710 if (priv->rx_next > get_mb_rx_low_last(priv) &&
711 mb > get_mb_rx_last(priv)) {
712 priv->rx_next = get_mb_rx_first(priv);
713 if (quota > 0)
714 goto again;
715 }
716
717 return received;
718 }
719
720 static void at91_poll_err_frame(struct net_device *dev,
721 struct can_frame *cf, u32 reg_sr)
722 {
723 struct at91_priv *priv = netdev_priv(dev);
724
725
726 if (reg_sr & AT91_IRQ_CERR) {
727 netdev_dbg(dev, "CERR irq\n");
728 dev->stats.rx_errors++;
729 priv->can.can_stats.bus_error++;
730 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
731 }
732
733
734 if (reg_sr & AT91_IRQ_SERR) {
735 netdev_dbg(dev, "SERR irq\n");
736 dev->stats.rx_errors++;
737 priv->can.can_stats.bus_error++;
738 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
739 cf->data[2] |= CAN_ERR_PROT_STUFF;
740 }
741
742
743 if (reg_sr & AT91_IRQ_AERR) {
744 netdev_dbg(dev, "AERR irq\n");
745 dev->stats.tx_errors++;
746 cf->can_id |= CAN_ERR_ACK;
747 }
748
749
750 if (reg_sr & AT91_IRQ_FERR) {
751 netdev_dbg(dev, "FERR irq\n");
752 dev->stats.rx_errors++;
753 priv->can.can_stats.bus_error++;
754 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
755 cf->data[2] |= CAN_ERR_PROT_FORM;
756 }
757
758
759 if (reg_sr & AT91_IRQ_BERR) {
760 netdev_dbg(dev, "BERR irq\n");
761 dev->stats.tx_errors++;
762 priv->can.can_stats.bus_error++;
763 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
764 cf->data[2] |= CAN_ERR_PROT_BIT;
765 }
766 }
767
768 static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr)
769 {
770 struct sk_buff *skb;
771 struct can_frame *cf;
772
773 if (quota == 0)
774 return 0;
775
776 skb = alloc_can_err_skb(dev, &cf);
777 if (unlikely(!skb))
778 return 0;
779
780 at91_poll_err_frame(dev, cf, reg_sr);
781
782 dev->stats.rx_packets++;
783 dev->stats.rx_bytes += cf->can_dlc;
784 netif_receive_skb(skb);
785
786 return 1;
787 }
788
789 static int at91_poll(struct napi_struct *napi, int quota)
790 {
791 struct net_device *dev = napi->dev;
792 const struct at91_priv *priv = netdev_priv(dev);
793 u32 reg_sr = at91_read(priv, AT91_SR);
794 int work_done = 0;
795
796 if (reg_sr & get_irq_mb_rx(priv))
797 work_done += at91_poll_rx(dev, quota - work_done);
798
799
800
801
802
803 reg_sr |= priv->reg_sr;
804 if (reg_sr & AT91_IRQ_ERR_FRAME)
805 work_done += at91_poll_err(dev, quota - work_done, reg_sr);
806
807 if (work_done < quota) {
808
809 u32 reg_ier = AT91_IRQ_ERR_FRAME;
810 reg_ier |= get_irq_mb_rx(priv) & ~AT91_MB_MASK(priv->rx_next);
811
812 napi_complete_done(napi, work_done);
813 at91_write(priv, AT91_IER, reg_ier);
814 }
815
816 return work_done;
817 }
818
819
820
821
822
823
824
825
826
827
828
829
830
831 static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
832 {
833 struct at91_priv *priv = netdev_priv(dev);
834 u32 reg_msr;
835 unsigned int mb;
836
837
838
839 for (; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
840 mb = get_tx_echo_mb(priv);
841
842
843 if (!(reg_sr & (1 << mb)))
844 break;
845
846
847 at91_write(priv, AT91_IDR, 1 << mb);
848
849
850
851
852
853
854
855 reg_msr = at91_read(priv, AT91_MSR(mb));
856 if (likely(reg_msr & AT91_MSR_MRDY &&
857 ~reg_msr & AT91_MSR_MABT)) {
858
859 can_get_echo_skb(dev, mb - get_mb_tx_first(priv));
860 dev->stats.tx_packets++;
861 can_led_event(dev, CAN_LED_EVENT_TX);
862 }
863 }
864
865
866
867
868
869
870 if ((priv->tx_next & get_next_mask(priv)) != 0 ||
871 (priv->tx_echo & get_next_mask(priv)) == 0)
872 netif_wake_queue(dev);
873 }
874
875 static void at91_irq_err_state(struct net_device *dev,
876 struct can_frame *cf, enum can_state new_state)
877 {
878 struct at91_priv *priv = netdev_priv(dev);
879 u32 reg_idr = 0, reg_ier = 0;
880 struct can_berr_counter bec;
881
882 at91_get_berr_counter(dev, &bec);
883
884 switch (priv->can.state) {
885 case CAN_STATE_ERROR_ACTIVE:
886
887
888
889
890
891 if (new_state >= CAN_STATE_ERROR_WARNING &&
892 new_state <= CAN_STATE_BUS_OFF) {
893 netdev_dbg(dev, "Error Warning IRQ\n");
894 priv->can.can_stats.error_warning++;
895
896 cf->can_id |= CAN_ERR_CRTL;
897 cf->data[1] = (bec.txerr > bec.rxerr) ?
898 CAN_ERR_CRTL_TX_WARNING :
899 CAN_ERR_CRTL_RX_WARNING;
900 }
901
902 case CAN_STATE_ERROR_WARNING:
903
904
905
906
907
908 if (new_state >= CAN_STATE_ERROR_PASSIVE &&
909 new_state <= CAN_STATE_BUS_OFF) {
910 netdev_dbg(dev, "Error Passive IRQ\n");
911 priv->can.can_stats.error_passive++;
912
913 cf->can_id |= CAN_ERR_CRTL;
914 cf->data[1] = (bec.txerr > bec.rxerr) ?
915 CAN_ERR_CRTL_TX_PASSIVE :
916 CAN_ERR_CRTL_RX_PASSIVE;
917 }
918 break;
919 case CAN_STATE_BUS_OFF:
920
921
922
923
924 if (new_state <= CAN_STATE_ERROR_PASSIVE) {
925 cf->can_id |= CAN_ERR_RESTARTED;
926
927 netdev_dbg(dev, "restarted\n");
928 priv->can.can_stats.restarts++;
929
930 netif_carrier_on(dev);
931 netif_wake_queue(dev);
932 }
933 break;
934 default:
935 break;
936 }
937
938
939
940 switch (new_state) {
941 case CAN_STATE_ERROR_ACTIVE:
942
943
944
945
946
947
948 netdev_dbg(dev, "Error Active\n");
949 cf->can_id |= CAN_ERR_PROT;
950 cf->data[2] = CAN_ERR_PROT_ACTIVE;
951
952 case CAN_STATE_ERROR_WARNING:
953 reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_BOFF;
954 reg_ier = AT91_IRQ_ERRP;
955 break;
956 case CAN_STATE_ERROR_PASSIVE:
957 reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_ERRP;
958 reg_ier = AT91_IRQ_BOFF;
959 break;
960 case CAN_STATE_BUS_OFF:
961 reg_idr = AT91_IRQ_ERRA | AT91_IRQ_ERRP |
962 AT91_IRQ_WARN | AT91_IRQ_BOFF;
963 reg_ier = 0;
964
965 cf->can_id |= CAN_ERR_BUSOFF;
966
967 netdev_dbg(dev, "bus-off\n");
968 netif_carrier_off(dev);
969 priv->can.can_stats.bus_off++;
970
971
972 if (!priv->can.restart_ms) {
973 at91_chip_stop(dev, CAN_STATE_BUS_OFF);
974 return;
975 }
976 break;
977 default:
978 break;
979 }
980
981 at91_write(priv, AT91_IDR, reg_idr);
982 at91_write(priv, AT91_IER, reg_ier);
983 }
984
985 static int at91_get_state_by_bec(const struct net_device *dev,
986 enum can_state *state)
987 {
988 struct can_berr_counter bec;
989 int err;
990
991 err = at91_get_berr_counter(dev, &bec);
992 if (err)
993 return err;
994
995 if (bec.txerr < 96 && bec.rxerr < 96)
996 *state = CAN_STATE_ERROR_ACTIVE;
997 else if (bec.txerr < 128 && bec.rxerr < 128)
998 *state = CAN_STATE_ERROR_WARNING;
999 else if (bec.txerr < 256 && bec.rxerr < 256)
1000 *state = CAN_STATE_ERROR_PASSIVE;
1001 else
1002 *state = CAN_STATE_BUS_OFF;
1003
1004 return 0;
1005 }
1006
1007
1008 static void at91_irq_err(struct net_device *dev)
1009 {
1010 struct at91_priv *priv = netdev_priv(dev);
1011 struct sk_buff *skb;
1012 struct can_frame *cf;
1013 enum can_state new_state;
1014 u32 reg_sr;
1015 int err;
1016
1017 if (at91_is_sam9263(priv)) {
1018 reg_sr = at91_read(priv, AT91_SR);
1019
1020
1021 if (unlikely(reg_sr & AT91_IRQ_BOFF))
1022 new_state = CAN_STATE_BUS_OFF;
1023 else if (unlikely(reg_sr & AT91_IRQ_ERRP))
1024 new_state = CAN_STATE_ERROR_PASSIVE;
1025 else if (unlikely(reg_sr & AT91_IRQ_WARN))
1026 new_state = CAN_STATE_ERROR_WARNING;
1027 else if (likely(reg_sr & AT91_IRQ_ERRA))
1028 new_state = CAN_STATE_ERROR_ACTIVE;
1029 else {
1030 netdev_err(dev, "BUG! hardware in undefined state\n");
1031 return;
1032 }
1033 } else {
1034 err = at91_get_state_by_bec(dev, &new_state);
1035 if (err)
1036 return;
1037 }
1038
1039
1040 if (likely(new_state == priv->can.state))
1041 return;
1042
1043 skb = alloc_can_err_skb(dev, &cf);
1044 if (unlikely(!skb))
1045 return;
1046
1047 at91_irq_err_state(dev, cf, new_state);
1048
1049 dev->stats.rx_packets++;
1050 dev->stats.rx_bytes += cf->can_dlc;
1051 netif_rx(skb);
1052
1053 priv->can.state = new_state;
1054 }
1055
1056
1057
1058
1059 static irqreturn_t at91_irq(int irq, void *dev_id)
1060 {
1061 struct net_device *dev = dev_id;
1062 struct at91_priv *priv = netdev_priv(dev);
1063 irqreturn_t handled = IRQ_NONE;
1064 u32 reg_sr, reg_imr;
1065
1066 reg_sr = at91_read(priv, AT91_SR);
1067 reg_imr = at91_read(priv, AT91_IMR);
1068
1069
1070 reg_sr &= reg_imr;
1071 if (!reg_sr)
1072 goto exit;
1073
1074 handled = IRQ_HANDLED;
1075
1076
1077 if (reg_sr & (get_irq_mb_rx(priv) | AT91_IRQ_ERR_FRAME)) {
1078
1079
1080
1081
1082 priv->reg_sr = reg_sr;
1083 at91_write(priv, AT91_IDR,
1084 get_irq_mb_rx(priv) | AT91_IRQ_ERR_FRAME);
1085 napi_schedule(&priv->napi);
1086 }
1087
1088
1089 if (reg_sr & get_irq_mb_tx(priv))
1090 at91_irq_tx(dev, reg_sr);
1091
1092 at91_irq_err(dev);
1093
1094 exit:
1095 return handled;
1096 }
1097
1098 static int at91_open(struct net_device *dev)
1099 {
1100 struct at91_priv *priv = netdev_priv(dev);
1101 int err;
1102
1103 err = clk_prepare_enable(priv->clk);
1104 if (err)
1105 return err;
1106
1107
1108 err = open_candev(dev);
1109 if (err)
1110 goto out;
1111
1112
1113 if (request_irq(dev->irq, at91_irq, IRQF_SHARED,
1114 dev->name, dev)) {
1115 err = -EAGAIN;
1116 goto out_close;
1117 }
1118
1119 can_led_event(dev, CAN_LED_EVENT_OPEN);
1120
1121
1122 at91_chip_start(dev);
1123 napi_enable(&priv->napi);
1124 netif_start_queue(dev);
1125
1126 return 0;
1127
1128 out_close:
1129 close_candev(dev);
1130 out:
1131 clk_disable_unprepare(priv->clk);
1132
1133 return err;
1134 }
1135
1136
1137
1138
1139 static int at91_close(struct net_device *dev)
1140 {
1141 struct at91_priv *priv = netdev_priv(dev);
1142
1143 netif_stop_queue(dev);
1144 napi_disable(&priv->napi);
1145 at91_chip_stop(dev, CAN_STATE_STOPPED);
1146
1147 free_irq(dev->irq, dev);
1148 clk_disable_unprepare(priv->clk);
1149
1150 close_candev(dev);
1151
1152 can_led_event(dev, CAN_LED_EVENT_STOP);
1153
1154 return 0;
1155 }
1156
1157 static int at91_set_mode(struct net_device *dev, enum can_mode mode)
1158 {
1159 switch (mode) {
1160 case CAN_MODE_START:
1161 at91_chip_start(dev);
1162 netif_wake_queue(dev);
1163 break;
1164
1165 default:
1166 return -EOPNOTSUPP;
1167 }
1168
1169 return 0;
1170 }
1171
1172 static const struct net_device_ops at91_netdev_ops = {
1173 .ndo_open = at91_open,
1174 .ndo_stop = at91_close,
1175 .ndo_start_xmit = at91_start_xmit,
1176 .ndo_change_mtu = can_change_mtu,
1177 };
1178
1179 static ssize_t at91_sysfs_show_mb0_id(struct device *dev,
1180 struct device_attribute *attr, char *buf)
1181 {
1182 struct at91_priv *priv = netdev_priv(to_net_dev(dev));
1183
1184 if (priv->mb0_id & CAN_EFF_FLAG)
1185 return snprintf(buf, PAGE_SIZE, "0x%08x\n", priv->mb0_id);
1186 else
1187 return snprintf(buf, PAGE_SIZE, "0x%03x\n", priv->mb0_id);
1188 }
1189
1190 static ssize_t at91_sysfs_set_mb0_id(struct device *dev,
1191 struct device_attribute *attr, const char *buf, size_t count)
1192 {
1193 struct net_device *ndev = to_net_dev(dev);
1194 struct at91_priv *priv = netdev_priv(ndev);
1195 unsigned long can_id;
1196 ssize_t ret;
1197 int err;
1198
1199 rtnl_lock();
1200
1201 if (ndev->flags & IFF_UP) {
1202 ret = -EBUSY;
1203 goto out;
1204 }
1205
1206 err = kstrtoul(buf, 0, &can_id);
1207 if (err) {
1208 ret = err;
1209 goto out;
1210 }
1211
1212 if (can_id & CAN_EFF_FLAG)
1213 can_id &= CAN_EFF_MASK | CAN_EFF_FLAG;
1214 else
1215 can_id &= CAN_SFF_MASK;
1216
1217 priv->mb0_id = can_id;
1218 ret = count;
1219
1220 out:
1221 rtnl_unlock();
1222 return ret;
1223 }
1224
1225 static DEVICE_ATTR(mb0_id, 0644, at91_sysfs_show_mb0_id, at91_sysfs_set_mb0_id);
1226
1227 static struct attribute *at91_sysfs_attrs[] = {
1228 &dev_attr_mb0_id.attr,
1229 NULL,
1230 };
1231
1232 static const struct attribute_group at91_sysfs_attr_group = {
1233 .attrs = at91_sysfs_attrs,
1234 };
1235
1236 #if defined(CONFIG_OF)
1237 static const struct of_device_id at91_can_dt_ids[] = {
1238 {
1239 .compatible = "atmel,at91sam9x5-can",
1240 .data = &at91_at91sam9x5_data,
1241 }, {
1242 .compatible = "atmel,at91sam9263-can",
1243 .data = &at91_at91sam9263_data,
1244 }, {
1245
1246 }
1247 };
1248 MODULE_DEVICE_TABLE(of, at91_can_dt_ids);
1249 #endif
1250
1251 static const struct at91_devtype_data *at91_can_get_driver_data(struct platform_device *pdev)
1252 {
1253 if (pdev->dev.of_node) {
1254 const struct of_device_id *match;
1255
1256 match = of_match_node(at91_can_dt_ids, pdev->dev.of_node);
1257 if (!match) {
1258 dev_err(&pdev->dev, "no matching node found in dtb\n");
1259 return NULL;
1260 }
1261 return (const struct at91_devtype_data *)match->data;
1262 }
1263 return (const struct at91_devtype_data *)
1264 platform_get_device_id(pdev)->driver_data;
1265 }
1266
1267 static int at91_can_probe(struct platform_device *pdev)
1268 {
1269 const struct at91_devtype_data *devtype_data;
1270 struct net_device *dev;
1271 struct at91_priv *priv;
1272 struct resource *res;
1273 struct clk *clk;
1274 void __iomem *addr;
1275 int err, irq;
1276
1277 devtype_data = at91_can_get_driver_data(pdev);
1278 if (!devtype_data) {
1279 dev_err(&pdev->dev, "no driver data\n");
1280 err = -ENODEV;
1281 goto exit;
1282 }
1283
1284 clk = clk_get(&pdev->dev, "can_clk");
1285 if (IS_ERR(clk)) {
1286 dev_err(&pdev->dev, "no clock defined\n");
1287 err = -ENODEV;
1288 goto exit;
1289 }
1290
1291 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1292 irq = platform_get_irq(pdev, 0);
1293 if (!res || irq <= 0) {
1294 err = -ENODEV;
1295 goto exit_put;
1296 }
1297
1298 if (!request_mem_region(res->start,
1299 resource_size(res),
1300 pdev->name)) {
1301 err = -EBUSY;
1302 goto exit_put;
1303 }
1304
1305 addr = ioremap_nocache(res->start, resource_size(res));
1306 if (!addr) {
1307 err = -ENOMEM;
1308 goto exit_release;
1309 }
1310
1311 dev = alloc_candev(sizeof(struct at91_priv),
1312 1 << devtype_data->tx_shift);
1313 if (!dev) {
1314 err = -ENOMEM;
1315 goto exit_iounmap;
1316 }
1317
1318 dev->netdev_ops = &at91_netdev_ops;
1319 dev->irq = irq;
1320 dev->flags |= IFF_ECHO;
1321
1322 priv = netdev_priv(dev);
1323 priv->can.clock.freq = clk_get_rate(clk);
1324 priv->can.bittiming_const = &at91_bittiming_const;
1325 priv->can.do_set_mode = at91_set_mode;
1326 priv->can.do_get_berr_counter = at91_get_berr_counter;
1327 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
1328 CAN_CTRLMODE_LISTENONLY;
1329 priv->reg_base = addr;
1330 priv->devtype_data = *devtype_data;
1331 priv->clk = clk;
1332 priv->pdata = dev_get_platdata(&pdev->dev);
1333 priv->mb0_id = 0x7ff;
1334
1335 netif_napi_add(dev, &priv->napi, at91_poll, get_mb_rx_num(priv));
1336
1337 if (at91_is_sam9263(priv))
1338 dev->sysfs_groups[0] = &at91_sysfs_attr_group;
1339
1340 platform_set_drvdata(pdev, dev);
1341 SET_NETDEV_DEV(dev, &pdev->dev);
1342
1343 err = register_candev(dev);
1344 if (err) {
1345 dev_err(&pdev->dev, "registering netdev failed\n");
1346 goto exit_free;
1347 }
1348
1349 devm_can_led_init(dev);
1350
1351 dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n",
1352 priv->reg_base, dev->irq);
1353
1354 return 0;
1355
1356 exit_free:
1357 free_candev(dev);
1358 exit_iounmap:
1359 iounmap(addr);
1360 exit_release:
1361 release_mem_region(res->start, resource_size(res));
1362 exit_put:
1363 clk_put(clk);
1364 exit:
1365 return err;
1366 }
1367
1368 static int at91_can_remove(struct platform_device *pdev)
1369 {
1370 struct net_device *dev = platform_get_drvdata(pdev);
1371 struct at91_priv *priv = netdev_priv(dev);
1372 struct resource *res;
1373
1374 unregister_netdev(dev);
1375
1376 iounmap(priv->reg_base);
1377
1378 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1379 release_mem_region(res->start, resource_size(res));
1380
1381 clk_put(priv->clk);
1382
1383 free_candev(dev);
1384
1385 return 0;
1386 }
1387
1388 static const struct platform_device_id at91_can_id_table[] = {
1389 {
1390 .name = "at91sam9x5_can",
1391 .driver_data = (kernel_ulong_t)&at91_at91sam9x5_data,
1392 }, {
1393 .name = "at91_can",
1394 .driver_data = (kernel_ulong_t)&at91_at91sam9263_data,
1395 }, {
1396
1397 }
1398 };
1399 MODULE_DEVICE_TABLE(platform, at91_can_id_table);
1400
1401 static struct platform_driver at91_can_driver = {
1402 .probe = at91_can_probe,
1403 .remove = at91_can_remove,
1404 .driver = {
1405 .name = KBUILD_MODNAME,
1406 .of_match_table = of_match_ptr(at91_can_dt_ids),
1407 },
1408 .id_table = at91_can_id_table,
1409 };
1410
1411 module_platform_driver(at91_can_driver);
1412
1413 MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
1414 MODULE_LICENSE("GPL v2");
1415 MODULE_DESCRIPTION(KBUILD_MODNAME " CAN netdevice driver");