This source file includes following definitions.
- sysfs_write
- sysfs_read
- isr_store
- isr_show
- ier_store
- ier_show
- tdfr_store
- tdfv_show
- tdfd_store
- tlr_store
- rdfr_store
- rdfo_show
- rdfd_show
- rlr_show
- srr_store
- tdr_store
- rdr_show
- reset_ip_core
- axis_fifo_read
- axis_fifo_write
- axis_fifo_irq
- axis_fifo_open
- axis_fifo_close
- get_dts_property
- axis_fifo_probe
- axis_fifo_remove
- axis_fifo_init
- axis_fifo_exit
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17 #include <linux/kernel.h>
18 #include <linux/wait.h>
19 #include <linux/spinlock_types.h>
20 #include <linux/device.h>
21 #include <linux/cdev.h>
22 #include <linux/init.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/io.h>
26 #include <linux/moduleparam.h>
27 #include <linux/interrupt.h>
28 #include <linux/param.h>
29 #include <linux/fs.h>
30 #include <linux/types.h>
31 #include <linux/uaccess.h>
32 #include <linux/jiffies.h>
33
34 #include <linux/of_address.h>
35 #include <linux/of_device.h>
36 #include <linux/of_platform.h>
37
38
39
40
41
42
43 #define DRIVER_NAME "axis_fifo"
44
45 #define READ_BUF_SIZE 128U
46 #define WRITE_BUF_SIZE 128U
47
48
49
50
51
52
53 #define XLLF_ISR_OFFSET 0x00000000
54 #define XLLF_IER_OFFSET 0x00000004
55
56 #define XLLF_TDFR_OFFSET 0x00000008
57 #define XLLF_TDFV_OFFSET 0x0000000c
58 #define XLLF_TDFD_OFFSET 0x00000010
59 #define XLLF_TLR_OFFSET 0x00000014
60
61 #define XLLF_RDFR_OFFSET 0x00000018
62 #define XLLF_RDFO_OFFSET 0x0000001c
63 #define XLLF_RDFD_OFFSET 0x00000020
64 #define XLLF_RLR_OFFSET 0x00000024
65 #define XLLF_SRR_OFFSET 0x00000028
66 #define XLLF_TDR_OFFSET 0x0000002C
67 #define XLLF_RDR_OFFSET 0x00000030
68
69
70
71
72
73
74 #define XLLF_RDFR_RESET_MASK 0x000000a5
75 #define XLLF_TDFR_RESET_MASK 0x000000a5
76 #define XLLF_SRR_RESET_MASK 0x000000a5
77
78
79
80
81
82
83 #define XLLF_INT_RPURE_MASK 0x80000000
84 #define XLLF_INT_RPORE_MASK 0x40000000
85 #define XLLF_INT_RPUE_MASK 0x20000000
86 #define XLLF_INT_TPOE_MASK 0x10000000
87 #define XLLF_INT_TC_MASK 0x08000000
88 #define XLLF_INT_RC_MASK 0x04000000
89 #define XLLF_INT_TSE_MASK 0x02000000
90 #define XLLF_INT_TRC_MASK 0x01000000
91 #define XLLF_INT_RRC_MASK 0x00800000
92 #define XLLF_INT_TFPF_MASK 0x00400000
93 #define XLLF_INT_TFPE_MASK 0x00200000
94 #define XLLF_INT_RFPF_MASK 0x00100000
95 #define XLLF_INT_RFPE_MASK 0x00080000
96 #define XLLF_INT_ALL_MASK 0xfff80000
97 #define XLLF_INT_ERROR_MASK 0xf2000000
98 #define XLLF_INT_RXERROR_MASK 0xe0000000
99 #define XLLF_INT_TXERROR_MASK 0x12000000
100
101
102
103
104
105
106 static struct class *axis_fifo_driver_class;
107
108 static int read_timeout = 1000;
109 static int write_timeout = 1000;
110
111
112
113
114
115
116 module_param(read_timeout, int, 0444);
117 MODULE_PARM_DESC(read_timeout, "ms to wait before blocking read() timing out; set to -1 for no timeout");
118 module_param(write_timeout, int, 0444);
119 MODULE_PARM_DESC(write_timeout, "ms to wait before blocking write() timing out; set to -1 for no timeout");
120
121
122
123
124
125
126 struct axis_fifo {
127 int irq;
128 struct resource *mem;
129 void __iomem *base_addr;
130
131 unsigned int rx_fifo_depth;
132 unsigned int tx_fifo_depth;
133 int has_rx_fifo;
134 int has_tx_fifo;
135
136 wait_queue_head_t read_queue;
137 spinlock_t read_queue_lock;
138 wait_queue_head_t write_queue;
139 spinlock_t write_queue_lock;
140 unsigned int write_flags;
141 unsigned int read_flags;
142
143 struct device *dt_device;
144 struct device *device;
145 dev_t devt;
146 struct cdev char_device;
147 };
148
149
150
151
152
153
154 static ssize_t sysfs_write(struct device *dev, const char *buf,
155 size_t count, unsigned int addr_offset)
156 {
157 struct axis_fifo *fifo = dev_get_drvdata(dev);
158 unsigned long tmp;
159 int rc;
160
161 rc = kstrtoul(buf, 0, &tmp);
162 if (rc < 0)
163 return rc;
164
165 iowrite32(tmp, fifo->base_addr + addr_offset);
166
167 return count;
168 }
169
170 static ssize_t sysfs_read(struct device *dev, char *buf,
171 unsigned int addr_offset)
172 {
173 struct axis_fifo *fifo = dev_get_drvdata(dev);
174 unsigned int read_val;
175 unsigned int len;
176 char tmp[32];
177
178 read_val = ioread32(fifo->base_addr + addr_offset);
179 len = snprintf(tmp, sizeof(tmp), "0x%x\n", read_val);
180 memcpy(buf, tmp, len);
181
182 return len;
183 }
184
185 static ssize_t isr_store(struct device *dev, struct device_attribute *attr,
186 const char *buf, size_t count)
187 {
188 return sysfs_write(dev, buf, count, XLLF_ISR_OFFSET);
189 }
190
191 static ssize_t isr_show(struct device *dev,
192 struct device_attribute *attr, char *buf)
193 {
194 return sysfs_read(dev, buf, XLLF_ISR_OFFSET);
195 }
196
197 static DEVICE_ATTR_RW(isr);
198
199 static ssize_t ier_store(struct device *dev, struct device_attribute *attr,
200 const char *buf, size_t count)
201 {
202 return sysfs_write(dev, buf, count, XLLF_IER_OFFSET);
203 }
204
205 static ssize_t ier_show(struct device *dev,
206 struct device_attribute *attr, char *buf)
207 {
208 return sysfs_read(dev, buf, XLLF_IER_OFFSET);
209 }
210
211 static DEVICE_ATTR_RW(ier);
212
213 static ssize_t tdfr_store(struct device *dev, struct device_attribute *attr,
214 const char *buf, size_t count)
215 {
216 return sysfs_write(dev, buf, count, XLLF_TDFR_OFFSET);
217 }
218
219 static DEVICE_ATTR_WO(tdfr);
220
221 static ssize_t tdfv_show(struct device *dev,
222 struct device_attribute *attr, char *buf)
223 {
224 return sysfs_read(dev, buf, XLLF_TDFV_OFFSET);
225 }
226
227 static DEVICE_ATTR_RO(tdfv);
228
229 static ssize_t tdfd_store(struct device *dev, struct device_attribute *attr,
230 const char *buf, size_t count)
231 {
232 return sysfs_write(dev, buf, count, XLLF_TDFD_OFFSET);
233 }
234
235 static DEVICE_ATTR_WO(tdfd);
236
237 static ssize_t tlr_store(struct device *dev, struct device_attribute *attr,
238 const char *buf, size_t count)
239 {
240 return sysfs_write(dev, buf, count, XLLF_TLR_OFFSET);
241 }
242
243 static DEVICE_ATTR_WO(tlr);
244
245 static ssize_t rdfr_store(struct device *dev, struct device_attribute *attr,
246 const char *buf, size_t count)
247 {
248 return sysfs_write(dev, buf, count, XLLF_RDFR_OFFSET);
249 }
250
251 static DEVICE_ATTR_WO(rdfr);
252
253 static ssize_t rdfo_show(struct device *dev,
254 struct device_attribute *attr, char *buf)
255 {
256 return sysfs_read(dev, buf, XLLF_RDFO_OFFSET);
257 }
258
259 static DEVICE_ATTR_RO(rdfo);
260
261 static ssize_t rdfd_show(struct device *dev,
262 struct device_attribute *attr, char *buf)
263 {
264 return sysfs_read(dev, buf, XLLF_RDFD_OFFSET);
265 }
266
267 static DEVICE_ATTR_RO(rdfd);
268
269 static ssize_t rlr_show(struct device *dev,
270 struct device_attribute *attr, char *buf)
271 {
272 return sysfs_read(dev, buf, XLLF_RLR_OFFSET);
273 }
274
275 static DEVICE_ATTR_RO(rlr);
276
277 static ssize_t srr_store(struct device *dev, struct device_attribute *attr,
278 const char *buf, size_t count)
279 {
280 return sysfs_write(dev, buf, count, XLLF_SRR_OFFSET);
281 }
282
283 static DEVICE_ATTR_WO(srr);
284
285 static ssize_t tdr_store(struct device *dev, struct device_attribute *attr,
286 const char *buf, size_t count)
287 {
288 return sysfs_write(dev, buf, count, XLLF_TDR_OFFSET);
289 }
290
291 static DEVICE_ATTR_WO(tdr);
292
293 static ssize_t rdr_show(struct device *dev,
294 struct device_attribute *attr, char *buf)
295 {
296 return sysfs_read(dev, buf, XLLF_RDR_OFFSET);
297 }
298
299 static DEVICE_ATTR_RO(rdr);
300
301 static struct attribute *axis_fifo_attrs[] = {
302 &dev_attr_isr.attr,
303 &dev_attr_ier.attr,
304 &dev_attr_tdfr.attr,
305 &dev_attr_tdfv.attr,
306 &dev_attr_tdfd.attr,
307 &dev_attr_tlr.attr,
308 &dev_attr_rdfr.attr,
309 &dev_attr_rdfo.attr,
310 &dev_attr_rdfd.attr,
311 &dev_attr_rlr.attr,
312 &dev_attr_srr.attr,
313 &dev_attr_tdr.attr,
314 &dev_attr_rdr.attr,
315 NULL,
316 };
317
318 static const struct attribute_group axis_fifo_attrs_group = {
319 .name = "ip_registers",
320 .attrs = axis_fifo_attrs,
321 };
322
323
324
325
326
327
328 static void reset_ip_core(struct axis_fifo *fifo)
329 {
330 iowrite32(XLLF_SRR_RESET_MASK, fifo->base_addr + XLLF_SRR_OFFSET);
331 iowrite32(XLLF_TDFR_RESET_MASK, fifo->base_addr + XLLF_TDFR_OFFSET);
332 iowrite32(XLLF_RDFR_RESET_MASK, fifo->base_addr + XLLF_RDFR_OFFSET);
333 iowrite32(XLLF_INT_TC_MASK | XLLF_INT_RC_MASK | XLLF_INT_RPURE_MASK |
334 XLLF_INT_RPORE_MASK | XLLF_INT_RPUE_MASK |
335 XLLF_INT_TPOE_MASK | XLLF_INT_TSE_MASK,
336 fifo->base_addr + XLLF_IER_OFFSET);
337 iowrite32(XLLF_INT_ALL_MASK, fifo->base_addr + XLLF_ISR_OFFSET);
338 }
339
340
341 static ssize_t axis_fifo_read(struct file *f, char __user *buf,
342 size_t len, loff_t *off)
343 {
344 struct axis_fifo *fifo = (struct axis_fifo *)f->private_data;
345 size_t bytes_available;
346 unsigned int words_available;
347 unsigned int copied;
348 unsigned int copy;
349 unsigned int i;
350 int ret;
351 u32 tmp_buf[READ_BUF_SIZE];
352
353 if (fifo->read_flags & O_NONBLOCK) {
354
355
356
357 if (!ioread32(fifo->base_addr + XLLF_RDFO_OFFSET))
358 return -EAGAIN;
359 } else {
360
361
362
363
364 spin_lock_irq(&fifo->read_queue_lock);
365 ret = wait_event_interruptible_lock_irq_timeout
366 (fifo->read_queue,
367 ioread32(fifo->base_addr + XLLF_RDFO_OFFSET),
368 fifo->read_queue_lock,
369 (read_timeout >= 0) ? msecs_to_jiffies(read_timeout) :
370 MAX_SCHEDULE_TIMEOUT);
371 spin_unlock_irq(&fifo->read_queue_lock);
372
373 if (ret == 0) {
374
375 dev_dbg(fifo->dt_device, "read timeout");
376 return -EAGAIN;
377 } else if (ret == -ERESTARTSYS) {
378
379 return -ERESTARTSYS;
380 } else if (ret < 0) {
381 dev_err(fifo->dt_device, "wait_event_interruptible_timeout() error in read (ret=%i)\n",
382 ret);
383 return ret;
384 }
385 }
386
387 bytes_available = ioread32(fifo->base_addr + XLLF_RLR_OFFSET);
388 if (!bytes_available) {
389 dev_err(fifo->dt_device, "received a packet of length 0 - fifo core will be reset\n");
390 reset_ip_core(fifo);
391 return -EIO;
392 }
393
394 if (bytes_available > len) {
395 dev_err(fifo->dt_device, "user read buffer too small (available bytes=%zu user buffer bytes=%zu) - fifo core will be reset\n",
396 bytes_available, len);
397 reset_ip_core(fifo);
398 return -EINVAL;
399 }
400
401 if (bytes_available % sizeof(u32)) {
402
403
404
405 dev_err(fifo->dt_device, "received a packet that isn't word-aligned - fifo core will be reset\n");
406 reset_ip_core(fifo);
407 return -EIO;
408 }
409
410 words_available = bytes_available / sizeof(u32);
411
412
413
414
415 copied = 0;
416 while (words_available > 0) {
417 copy = min(words_available, READ_BUF_SIZE);
418
419 for (i = 0; i < copy; i++) {
420 tmp_buf[i] = ioread32(fifo->base_addr +
421 XLLF_RDFD_OFFSET);
422 }
423
424 if (copy_to_user(buf + copied * sizeof(u32), tmp_buf,
425 copy * sizeof(u32))) {
426 reset_ip_core(fifo);
427 return -EFAULT;
428 }
429
430 copied += copy;
431 words_available -= copy;
432 }
433
434 return bytes_available;
435 }
436
437 static ssize_t axis_fifo_write(struct file *f, const char __user *buf,
438 size_t len, loff_t *off)
439 {
440 struct axis_fifo *fifo = (struct axis_fifo *)f->private_data;
441 unsigned int words_to_write;
442 unsigned int copied;
443 unsigned int copy;
444 unsigned int i;
445 int ret;
446 u32 tmp_buf[WRITE_BUF_SIZE];
447
448 if (len % sizeof(u32)) {
449 dev_err(fifo->dt_device,
450 "tried to send a packet that isn't word-aligned\n");
451 return -EINVAL;
452 }
453
454 words_to_write = len / sizeof(u32);
455
456 if (!words_to_write) {
457 dev_err(fifo->dt_device,
458 "tried to send a packet of length 0\n");
459 return -EINVAL;
460 }
461
462 if (words_to_write > fifo->tx_fifo_depth) {
463 dev_err(fifo->dt_device, "tried to write more words [%u] than slots in the fifo buffer [%u]\n",
464 words_to_write, fifo->tx_fifo_depth);
465 return -EINVAL;
466 }
467
468 if (fifo->write_flags & O_NONBLOCK) {
469
470
471
472 if (words_to_write > ioread32(fifo->base_addr +
473 XLLF_TDFV_OFFSET)) {
474 return -EAGAIN;
475 }
476 } else {
477
478
479
480
481
482 spin_lock_irq(&fifo->write_queue_lock);
483 ret = wait_event_interruptible_lock_irq_timeout
484 (fifo->write_queue,
485 ioread32(fifo->base_addr + XLLF_TDFV_OFFSET)
486 >= words_to_write,
487 fifo->write_queue_lock,
488 (write_timeout >= 0) ?
489 msecs_to_jiffies(write_timeout) :
490 MAX_SCHEDULE_TIMEOUT);
491 spin_unlock_irq(&fifo->write_queue_lock);
492
493 if (ret == 0) {
494
495 dev_dbg(fifo->dt_device, "write timeout\n");
496 return -EAGAIN;
497 } else if (ret == -ERESTARTSYS) {
498
499 return -ERESTARTSYS;
500 } else if (ret < 0) {
501
502 dev_err(fifo->dt_device,
503 "wait_event_interruptible_timeout() error in write (ret=%i)\n",
504 ret);
505 return ret;
506 }
507 }
508
509
510
511
512 copied = 0;
513 while (words_to_write > 0) {
514 copy = min(words_to_write, WRITE_BUF_SIZE);
515
516 if (copy_from_user(tmp_buf, buf + copied * sizeof(u32),
517 copy * sizeof(u32))) {
518 reset_ip_core(fifo);
519 return -EFAULT;
520 }
521
522 for (i = 0; i < copy; i++)
523 iowrite32(tmp_buf[i], fifo->base_addr +
524 XLLF_TDFD_OFFSET);
525
526 copied += copy;
527 words_to_write -= copy;
528 }
529
530
531 iowrite32(copied * sizeof(u32), fifo->base_addr + XLLF_TLR_OFFSET);
532
533 return (ssize_t)copied * sizeof(u32);
534 }
535
536 static irqreturn_t axis_fifo_irq(int irq, void *dw)
537 {
538 struct axis_fifo *fifo = (struct axis_fifo *)dw;
539 unsigned int pending_interrupts;
540
541 do {
542 pending_interrupts = ioread32(fifo->base_addr +
543 XLLF_IER_OFFSET) &
544 ioread32(fifo->base_addr
545 + XLLF_ISR_OFFSET);
546 if (pending_interrupts & XLLF_INT_RC_MASK) {
547
548
549
550 wake_up(&fifo->read_queue);
551
552
553 iowrite32(XLLF_INT_RC_MASK & XLLF_INT_ALL_MASK,
554 fifo->base_addr + XLLF_ISR_OFFSET);
555 } else if (pending_interrupts & XLLF_INT_TC_MASK) {
556
557
558
559 wake_up(&fifo->write_queue);
560
561 iowrite32(XLLF_INT_TC_MASK & XLLF_INT_ALL_MASK,
562 fifo->base_addr + XLLF_ISR_OFFSET);
563 } else if (pending_interrupts & XLLF_INT_TFPF_MASK) {
564
565
566 iowrite32(XLLF_INT_TFPF_MASK & XLLF_INT_ALL_MASK,
567 fifo->base_addr + XLLF_ISR_OFFSET);
568 } else if (pending_interrupts & XLLF_INT_TFPE_MASK) {
569
570
571 iowrite32(XLLF_INT_TFPE_MASK & XLLF_INT_ALL_MASK,
572 fifo->base_addr + XLLF_ISR_OFFSET);
573 } else if (pending_interrupts & XLLF_INT_RFPF_MASK) {
574
575
576 iowrite32(XLLF_INT_RFPF_MASK & XLLF_INT_ALL_MASK,
577 fifo->base_addr + XLLF_ISR_OFFSET);
578 } else if (pending_interrupts & XLLF_INT_RFPE_MASK) {
579
580
581 iowrite32(XLLF_INT_RFPE_MASK & XLLF_INT_ALL_MASK,
582 fifo->base_addr + XLLF_ISR_OFFSET);
583 } else if (pending_interrupts & XLLF_INT_TRC_MASK) {
584
585
586 iowrite32(XLLF_INT_TRC_MASK & XLLF_INT_ALL_MASK,
587 fifo->base_addr + XLLF_ISR_OFFSET);
588 } else if (pending_interrupts & XLLF_INT_RRC_MASK) {
589
590
591 iowrite32(XLLF_INT_RRC_MASK & XLLF_INT_ALL_MASK,
592 fifo->base_addr + XLLF_ISR_OFFSET);
593 } else if (pending_interrupts & XLLF_INT_RPURE_MASK) {
594
595 dev_err(fifo->dt_device,
596 "receive under-read interrupt\n");
597
598 iowrite32(XLLF_INT_RPURE_MASK & XLLF_INT_ALL_MASK,
599 fifo->base_addr + XLLF_ISR_OFFSET);
600 } else if (pending_interrupts & XLLF_INT_RPORE_MASK) {
601
602 dev_err(fifo->dt_device,
603 "receive over-read interrupt\n");
604
605 iowrite32(XLLF_INT_RPORE_MASK & XLLF_INT_ALL_MASK,
606 fifo->base_addr + XLLF_ISR_OFFSET);
607 } else if (pending_interrupts & XLLF_INT_RPUE_MASK) {
608
609 dev_err(fifo->dt_device,
610 "receive underrun error interrupt\n");
611
612 iowrite32(XLLF_INT_RPUE_MASK & XLLF_INT_ALL_MASK,
613 fifo->base_addr + XLLF_ISR_OFFSET);
614 } else if (pending_interrupts & XLLF_INT_TPOE_MASK) {
615
616 dev_err(fifo->dt_device,
617 "transmit overrun error interrupt\n");
618
619 iowrite32(XLLF_INT_TPOE_MASK & XLLF_INT_ALL_MASK,
620 fifo->base_addr + XLLF_ISR_OFFSET);
621 } else if (pending_interrupts & XLLF_INT_TSE_MASK) {
622
623 dev_err(fifo->dt_device,
624 "transmit length mismatch error interrupt\n");
625
626 iowrite32(XLLF_INT_TSE_MASK & XLLF_INT_ALL_MASK,
627 fifo->base_addr + XLLF_ISR_OFFSET);
628 } else if (pending_interrupts) {
629
630 dev_err(fifo->dt_device,
631 "unknown interrupt(s) 0x%x\n",
632 pending_interrupts);
633
634 iowrite32(XLLF_INT_ALL_MASK,
635 fifo->base_addr + XLLF_ISR_OFFSET);
636 }
637 } while (pending_interrupts);
638
639 return IRQ_HANDLED;
640 }
641
642 static int axis_fifo_open(struct inode *inod, struct file *f)
643 {
644 struct axis_fifo *fifo = (struct axis_fifo *)container_of(inod->i_cdev,
645 struct axis_fifo, char_device);
646 f->private_data = fifo;
647
648 if (((f->f_flags & O_ACCMODE) == O_WRONLY) ||
649 ((f->f_flags & O_ACCMODE) == O_RDWR)) {
650 if (fifo->has_tx_fifo) {
651 fifo->write_flags = f->f_flags;
652 } else {
653 dev_err(fifo->dt_device, "tried to open device for write but the transmit fifo is disabled\n");
654 return -EPERM;
655 }
656 }
657
658 if (((f->f_flags & O_ACCMODE) == O_RDONLY) ||
659 ((f->f_flags & O_ACCMODE) == O_RDWR)) {
660 if (fifo->has_rx_fifo) {
661 fifo->read_flags = f->f_flags;
662 } else {
663 dev_err(fifo->dt_device, "tried to open device for read but the receive fifo is disabled\n");
664 return -EPERM;
665 }
666 }
667
668 return 0;
669 }
670
671 static int axis_fifo_close(struct inode *inod, struct file *f)
672 {
673 f->private_data = NULL;
674
675 return 0;
676 }
677
678 static const struct file_operations fops = {
679 .owner = THIS_MODULE,
680 .open = axis_fifo_open,
681 .release = axis_fifo_close,
682 .read = axis_fifo_read,
683 .write = axis_fifo_write
684 };
685
686
687 static int get_dts_property(struct axis_fifo *fifo,
688 char *name, unsigned int *var)
689 {
690 int rc;
691
692 rc = of_property_read_u32(fifo->dt_device->of_node, name, var);
693 if (rc) {
694 dev_err(fifo->dt_device, "couldn't read IP dts property '%s'",
695 name);
696 return rc;
697 }
698 dev_dbg(fifo->dt_device, "dts property '%s' = %u\n",
699 name, *var);
700
701 return 0;
702 }
703
704 static int axis_fifo_probe(struct platform_device *pdev)
705 {
706 struct resource *r_irq;
707 struct resource *r_mem;
708 struct device *dev = &pdev->dev;
709 struct axis_fifo *fifo = NULL;
710
711 char device_name[32];
712
713 int rc = 0;
714
715
716 unsigned int rxd_tdata_width;
717 unsigned int txc_tdata_width;
718 unsigned int txd_tdata_width;
719 unsigned int tdest_width;
720 unsigned int tid_width;
721 unsigned int tuser_width;
722 unsigned int data_interface_type;
723 unsigned int has_tdest;
724 unsigned int has_tid;
725 unsigned int has_tkeep;
726 unsigned int has_tstrb;
727 unsigned int has_tuser;
728 unsigned int rx_fifo_depth;
729 unsigned int rx_programmable_empty_threshold;
730 unsigned int rx_programmable_full_threshold;
731 unsigned int axi_id_width;
732 unsigned int axi4_data_width;
733 unsigned int select_xpm;
734 unsigned int tx_fifo_depth;
735 unsigned int tx_programmable_empty_threshold;
736 unsigned int tx_programmable_full_threshold;
737 unsigned int use_rx_cut_through;
738 unsigned int use_rx_data;
739 unsigned int use_tx_control;
740 unsigned int use_tx_cut_through;
741 unsigned int use_tx_data;
742
743
744
745
746
747
748
749 fifo = devm_kmalloc(dev, sizeof(*fifo), GFP_KERNEL);
750 if (!fifo)
751 return -ENOMEM;
752
753 dev_set_drvdata(dev, fifo);
754 fifo->dt_device = dev;
755
756 init_waitqueue_head(&fifo->read_queue);
757 init_waitqueue_head(&fifo->write_queue);
758
759 spin_lock_init(&fifo->read_queue_lock);
760 spin_lock_init(&fifo->write_queue_lock);
761
762
763
764
765
766
767
768 r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
769 if (!r_mem) {
770 dev_err(fifo->dt_device, "invalid address\n");
771 rc = -ENODEV;
772 goto err_initial;
773 }
774
775 fifo->mem = r_mem;
776
777
778 if (!request_mem_region(fifo->mem->start, resource_size(fifo->mem),
779 DRIVER_NAME)) {
780 dev_err(fifo->dt_device,
781 "couldn't lock memory region at 0x%pa\n",
782 &fifo->mem->start);
783 rc = -EBUSY;
784 goto err_initial;
785 }
786 dev_dbg(fifo->dt_device, "got memory location [0x%pa - 0x%pa]\n",
787 &fifo->mem->start, &fifo->mem->end);
788
789
790 fifo->base_addr = ioremap(fifo->mem->start, resource_size(fifo->mem));
791 if (!fifo->base_addr) {
792 dev_err(fifo->dt_device, "couldn't map physical memory\n");
793 rc = -ENOMEM;
794 goto err_mem;
795 }
796 dev_dbg(fifo->dt_device, "remapped memory to 0x%p\n", fifo->base_addr);
797
798
799 snprintf(device_name, sizeof(device_name), "%s_%pa",
800 DRIVER_NAME, &fifo->mem->start);
801
802 dev_dbg(fifo->dt_device, "device name [%s]\n", device_name);
803
804
805
806
807
808
809
810 rc = get_dts_property(fifo, "xlnx,axi-str-rxd-tdata-width",
811 &rxd_tdata_width);
812 if (rc)
813 goto err_unmap;
814 rc = get_dts_property(fifo, "xlnx,axi-str-txc-tdata-width",
815 &txc_tdata_width);
816 if (rc)
817 goto err_unmap;
818 rc = get_dts_property(fifo, "xlnx,axi-str-txd-tdata-width",
819 &txd_tdata_width);
820 if (rc)
821 goto err_unmap;
822 rc = get_dts_property(fifo, "xlnx,axis-tdest-width", &tdest_width);
823 if (rc)
824 goto err_unmap;
825 rc = get_dts_property(fifo, "xlnx,axis-tid-width", &tid_width);
826 if (rc)
827 goto err_unmap;
828 rc = get_dts_property(fifo, "xlnx,axis-tuser-width", &tuser_width);
829 if (rc)
830 goto err_unmap;
831 rc = get_dts_property(fifo, "xlnx,data-interface-type",
832 &data_interface_type);
833 if (rc)
834 goto err_unmap;
835 rc = get_dts_property(fifo, "xlnx,has-axis-tdest", &has_tdest);
836 if (rc)
837 goto err_unmap;
838 rc = get_dts_property(fifo, "xlnx,has-axis-tid", &has_tid);
839 if (rc)
840 goto err_unmap;
841 rc = get_dts_property(fifo, "xlnx,has-axis-tkeep", &has_tkeep);
842 if (rc)
843 goto err_unmap;
844 rc = get_dts_property(fifo, "xlnx,has-axis-tstrb", &has_tstrb);
845 if (rc)
846 goto err_unmap;
847 rc = get_dts_property(fifo, "xlnx,has-axis-tuser", &has_tuser);
848 if (rc)
849 goto err_unmap;
850 rc = get_dts_property(fifo, "xlnx,rx-fifo-depth", &rx_fifo_depth);
851 if (rc)
852 goto err_unmap;
853 rc = get_dts_property(fifo, "xlnx,rx-fifo-pe-threshold",
854 &rx_programmable_empty_threshold);
855 if (rc)
856 goto err_unmap;
857 rc = get_dts_property(fifo, "xlnx,rx-fifo-pf-threshold",
858 &rx_programmable_full_threshold);
859 if (rc)
860 goto err_unmap;
861 rc = get_dts_property(fifo, "xlnx,s-axi-id-width", &axi_id_width);
862 if (rc)
863 goto err_unmap;
864 rc = get_dts_property(fifo, "xlnx,s-axi4-data-width", &axi4_data_width);
865 if (rc)
866 goto err_unmap;
867 rc = get_dts_property(fifo, "xlnx,select-xpm", &select_xpm);
868 if (rc)
869 goto err_unmap;
870 rc = get_dts_property(fifo, "xlnx,tx-fifo-depth", &tx_fifo_depth);
871 if (rc)
872 goto err_unmap;
873 rc = get_dts_property(fifo, "xlnx,tx-fifo-pe-threshold",
874 &tx_programmable_empty_threshold);
875 if (rc)
876 goto err_unmap;
877 rc = get_dts_property(fifo, "xlnx,tx-fifo-pf-threshold",
878 &tx_programmable_full_threshold);
879 if (rc)
880 goto err_unmap;
881 rc = get_dts_property(fifo, "xlnx,use-rx-cut-through",
882 &use_rx_cut_through);
883 if (rc)
884 goto err_unmap;
885 rc = get_dts_property(fifo, "xlnx,use-rx-data", &use_rx_data);
886 if (rc)
887 goto err_unmap;
888 rc = get_dts_property(fifo, "xlnx,use-tx-ctrl", &use_tx_control);
889 if (rc)
890 goto err_unmap;
891 rc = get_dts_property(fifo, "xlnx,use-tx-cut-through",
892 &use_tx_cut_through);
893 if (rc)
894 goto err_unmap;
895 rc = get_dts_property(fifo, "xlnx,use-tx-data", &use_tx_data);
896 if (rc)
897 goto err_unmap;
898
899
900 if (rxd_tdata_width != 32) {
901 dev_err(fifo->dt_device,
902 "rxd_tdata_width width [%u] unsupported\n",
903 rxd_tdata_width);
904 rc = -EIO;
905 goto err_unmap;
906 }
907 if (txd_tdata_width != 32) {
908 dev_err(fifo->dt_device,
909 "txd_tdata_width width [%u] unsupported\n",
910 txd_tdata_width);
911 rc = -EIO;
912 goto err_unmap;
913 }
914 if (has_tdest) {
915 dev_err(fifo->dt_device, "tdest not supported\n");
916 rc = -EIO;
917 goto err_unmap;
918 }
919 if (has_tid) {
920 dev_err(fifo->dt_device, "tid not supported\n");
921 rc = -EIO;
922 goto err_unmap;
923 }
924 if (has_tkeep) {
925 dev_err(fifo->dt_device, "tkeep not supported\n");
926 rc = -EIO;
927 goto err_unmap;
928 }
929 if (has_tstrb) {
930 dev_err(fifo->dt_device, "tstrb not supported\n");
931 rc = -EIO;
932 goto err_unmap;
933 }
934 if (has_tuser) {
935 dev_err(fifo->dt_device, "tuser not supported\n");
936 rc = -EIO;
937 goto err_unmap;
938 }
939 if (use_rx_cut_through) {
940 dev_err(fifo->dt_device, "rx cut-through not supported\n");
941 rc = -EIO;
942 goto err_unmap;
943 }
944 if (use_tx_cut_through) {
945 dev_err(fifo->dt_device, "tx cut-through not supported\n");
946 rc = -EIO;
947 goto err_unmap;
948 }
949 if (use_tx_control) {
950 dev_err(fifo->dt_device, "tx control not supported\n");
951 rc = -EIO;
952 goto err_unmap;
953 }
954
955
956
957
958
959
960
961
962 fifo->rx_fifo_depth = rx_fifo_depth;
963
964 fifo->tx_fifo_depth = tx_fifo_depth - 4;
965 fifo->has_rx_fifo = use_rx_data;
966 fifo->has_tx_fifo = use_tx_data;
967
968 reset_ip_core(fifo);
969
970
971
972
973
974
975
976 r_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
977 if (!r_irq) {
978 dev_err(fifo->dt_device, "no IRQ found for 0x%pa\n",
979 &fifo->mem->start);
980 rc = -EIO;
981 goto err_unmap;
982 }
983
984
985 fifo->irq = r_irq->start;
986 rc = request_irq(fifo->irq, &axis_fifo_irq, 0, DRIVER_NAME, fifo);
987 if (rc) {
988 dev_err(fifo->dt_device, "couldn't allocate interrupt %i\n",
989 fifo->irq);
990 goto err_unmap;
991 }
992
993
994
995
996
997
998
999 rc = alloc_chrdev_region(&fifo->devt, 0, 1, DRIVER_NAME);
1000 if (rc < 0)
1001 goto err_irq;
1002 dev_dbg(fifo->dt_device, "allocated device number major %i minor %i\n",
1003 MAJOR(fifo->devt), MINOR(fifo->devt));
1004
1005
1006 fifo->device = device_create(axis_fifo_driver_class, NULL, fifo->devt,
1007 NULL, device_name);
1008 if (IS_ERR(fifo->device)) {
1009 dev_err(fifo->dt_device,
1010 "couldn't create driver file\n");
1011 rc = PTR_ERR(fifo->device);
1012 goto err_chrdev_region;
1013 }
1014 dev_set_drvdata(fifo->device, fifo);
1015
1016
1017 cdev_init(&fifo->char_device, &fops);
1018 rc = cdev_add(&fifo->char_device, fifo->devt, 1);
1019 if (rc < 0) {
1020 dev_err(fifo->dt_device, "couldn't create character device\n");
1021 goto err_dev;
1022 }
1023
1024
1025 rc = sysfs_create_group(&fifo->device->kobj, &axis_fifo_attrs_group);
1026 if (rc < 0) {
1027 dev_err(fifo->dt_device, "couldn't register sysfs group\n");
1028 goto err_cdev;
1029 }
1030
1031 dev_info(fifo->dt_device, "axis-fifo created at %pa mapped to 0x%pa, irq=%i, major=%i, minor=%i\n",
1032 &fifo->mem->start, &fifo->base_addr, fifo->irq,
1033 MAJOR(fifo->devt), MINOR(fifo->devt));
1034
1035 return 0;
1036
1037 err_cdev:
1038 cdev_del(&fifo->char_device);
1039 err_dev:
1040 device_destroy(axis_fifo_driver_class, fifo->devt);
1041 err_chrdev_region:
1042 unregister_chrdev_region(fifo->devt, 1);
1043 err_irq:
1044 free_irq(fifo->irq, fifo);
1045 err_unmap:
1046 iounmap(fifo->base_addr);
1047 err_mem:
1048 release_mem_region(fifo->mem->start, resource_size(fifo->mem));
1049 err_initial:
1050 dev_set_drvdata(dev, NULL);
1051 return rc;
1052 }
1053
1054 static int axis_fifo_remove(struct platform_device *pdev)
1055 {
1056 struct device *dev = &pdev->dev;
1057 struct axis_fifo *fifo = dev_get_drvdata(dev);
1058
1059 sysfs_remove_group(&fifo->device->kobj, &axis_fifo_attrs_group);
1060 cdev_del(&fifo->char_device);
1061 dev_set_drvdata(fifo->device, NULL);
1062 device_destroy(axis_fifo_driver_class, fifo->devt);
1063 unregister_chrdev_region(fifo->devt, 1);
1064 free_irq(fifo->irq, fifo);
1065 iounmap(fifo->base_addr);
1066 release_mem_region(fifo->mem->start, resource_size(fifo->mem));
1067 dev_set_drvdata(dev, NULL);
1068 return 0;
1069 }
1070
1071 static const struct of_device_id axis_fifo_of_match[] = {
1072 { .compatible = "xlnx,axi-fifo-mm-s-4.1", },
1073 {},
1074 };
1075 MODULE_DEVICE_TABLE(of, axis_fifo_of_match);
1076
1077 static struct platform_driver axis_fifo_driver = {
1078 .driver = {
1079 .name = DRIVER_NAME,
1080 .of_match_table = axis_fifo_of_match,
1081 },
1082 .probe = axis_fifo_probe,
1083 .remove = axis_fifo_remove,
1084 };
1085
1086 static int __init axis_fifo_init(void)
1087 {
1088 pr_info("axis-fifo driver loaded with parameters read_timeout = %i, write_timeout = %i\n",
1089 read_timeout, write_timeout);
1090 axis_fifo_driver_class = class_create(THIS_MODULE, DRIVER_NAME);
1091 if (IS_ERR(axis_fifo_driver_class))
1092 return PTR_ERR(axis_fifo_driver_class);
1093 return platform_driver_register(&axis_fifo_driver);
1094 }
1095
1096 module_init(axis_fifo_init);
1097
1098 static void __exit axis_fifo_exit(void)
1099 {
1100 platform_driver_unregister(&axis_fifo_driver);
1101 class_destroy(axis_fifo_driver_class);
1102 }
1103
1104 module_exit(axis_fifo_exit);
1105
1106 MODULE_LICENSE("GPL");
1107 MODULE_AUTHOR("Jacob Feder <jacobsfeder@gmail.com>");
1108 MODULE_DESCRIPTION("Xilinx AXI-Stream FIFO v4.1 IP core driver");