1 /*
2  * CARMA DATA-FPGA Access Driver
3  *
4  * Copyright (c) 2009-2011 Ira W. Snyder <iws@ovro.caltech.edu>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License as published by the
8  * Free Software Foundation; either version 2 of the License, or (at your
9  * option) any later version.
10  */
11 
12 /*
13  * FPGA Memory Dump Format
14  *
15  * FPGA #0 control registers (32 x 32-bit words)
16  * FPGA #1 control registers (32 x 32-bit words)
17  * FPGA #2 control registers (32 x 32-bit words)
18  * FPGA #3 control registers (32 x 32-bit words)
19  * SYSFPGA control registers (32 x 32-bit words)
20  * FPGA #0 correlation array (NUM_CORL0 correlation blocks)
21  * FPGA #1 correlation array (NUM_CORL1 correlation blocks)
22  * FPGA #2 correlation array (NUM_CORL2 correlation blocks)
23  * FPGA #3 correlation array (NUM_CORL3 correlation blocks)
24  *
25  * Each correlation array consists of:
26  *
27  * Correlation Data      (2 x NUM_LAGSn x 32-bit words)
28  * Pipeline Metadata     (2 x NUM_METAn x 32-bit words)
29  * Quantization Counters (2 x NUM_QCNTn x 32-bit words)
30  *
31  * The NUM_CORLn, NUM_LAGSn, NUM_METAn, and NUM_QCNTn values come from
32  * the FPGA configuration registers. They do not change once the FPGA's
33  * have been programmed, they only change on re-programming.
34  */
35 
36 /*
37  * Basic Description:
38  *
39  * This driver is used to capture correlation spectra off of the four data
40  * processing FPGAs. The FPGAs are often reprogrammed at runtime, therefore
41  * this driver supports dynamic enable/disable of capture while the device
42  * remains open.
43  *
44  * The nominal capture rate is 64Hz (every 15.625ms). To facilitate this fast
45  * capture rate, all buffers are pre-allocated to avoid any potentially long
46  * running memory allocations while capturing.
47  *
48  * There are two lists and one pointer which are used to keep track of the
49  * different states of data buffers.
50  *
51  * 1) free list
52  * This list holds all empty data buffers which are ready to receive data.
53  *
54  * 2) inflight pointer
55  * This pointer holds the currently inflight data buffer. This buffer is having
56  * data copied into it by the DMA engine.
57  *
58  * 3) used list
59  * This list holds data buffers which have been filled, and are waiting to be
60  * read by userspace.
61  *
62  * All buffers start life on the free list, then move successively to the
63  * inflight pointer, and then to the used list. After they have been read by
64  * userspace, they are moved back to the free list. The cycle repeats as long
65  * as necessary.
66  *
67  * It should be noted that all buffers are mapped and ready for DMA when they
68  * are on any of the three lists. They are only unmapped when they are in the
69  * process of being read by userspace.
70  */
71 
72 /*
73  * Notes on the IRQ masking scheme:
74  *
75  * The IRQ masking scheme here is different than most other hardware. The only
76  * way for the DATA-FPGAs to detect if the kernel has taken too long to copy
77  * the data is if the status registers are not cleared before the next
78  * correlation data dump is ready.
79  *
80  * The interrupt line is connected to the status registers, such that when they
81  * are cleared, the interrupt is de-asserted. Therein lies our problem. We need
82  * to schedule a long-running DMA operation and return from the interrupt
83  * handler quickly, but we cannot clear the status registers.
84  *
85  * To handle this, the system controller FPGA has the capability to connect the
86  * interrupt line to a user-controlled GPIO pin. This pin is driven high
87  * (unasserted) and left that way. To mask the interrupt, we change the
88  * interrupt source to the GPIO pin. Tada, we hid the interrupt. :)
89  */
90 
91 #include <linux/of_address.h>
92 #include <linux/of_irq.h>
93 #include <linux/of_platform.h>
94 #include <linux/dma-mapping.h>
95 #include <linux/miscdevice.h>
96 #include <linux/interrupt.h>
97 #include <linux/dmaengine.h>
98 #include <linux/seq_file.h>
99 #include <linux/highmem.h>
100 #include <linux/debugfs.h>
101 #include <linux/vmalloc.h>
102 #include <linux/kernel.h>
103 #include <linux/module.h>
104 #include <linux/poll.h>
105 #include <linux/slab.h>
106 #include <linux/kref.h>
107 #include <linux/io.h>
108 
109 /* system controller registers */
110 #define SYS_IRQ_SOURCE_CTL	0x24
111 #define SYS_IRQ_OUTPUT_EN	0x28
112 #define SYS_IRQ_OUTPUT_DATA	0x2C
113 #define SYS_IRQ_INPUT_DATA	0x30
114 #define SYS_FPGA_CONFIG_STATUS	0x44
115 
116 /* GPIO IRQ line assignment */
117 #define IRQ_CORL_DONE		0x10
118 
119 /* FPGA registers */
120 #define MMAP_REG_VERSION	0x00
121 #define MMAP_REG_CORL_CONF1	0x08
122 #define MMAP_REG_CORL_CONF2	0x0C
123 #define MMAP_REG_STATUS		0x48
124 
125 #define SYS_FPGA_BLOCK		0xF0000000
126 
127 #define DATA_FPGA_START		0x400000
128 #define DATA_FPGA_SIZE		0x80000
129 
130 static const char drv_name[] = "carma-fpga";
131 
132 #define NUM_FPGA	4
133 
134 #define MIN_DATA_BUFS	8
135 #define MAX_DATA_BUFS	64
136 
137 struct fpga_info {
138 	unsigned int num_lag_ram;
139 	unsigned int blk_size;
140 };
141 
142 struct data_buf {
143 	struct list_head entry;
144 	void *vaddr;
145 	struct scatterlist *sglist;
146 	int sglen;
147 	int nr_pages;
148 	size_t size;
149 };
150 
151 struct fpga_device {
152 	/* character device */
153 	struct miscdevice miscdev;
154 	struct device *dev;
155 	struct mutex mutex;
156 
157 	/* reference count */
158 	struct kref ref;
159 
160 	/* FPGA registers and information */
161 	struct fpga_info info[NUM_FPGA];
162 	void __iomem *regs;
163 	int irq;
164 
165 	/* FPGA Physical Address/Size Information */
166 	resource_size_t phys_addr;
167 	size_t phys_size;
168 
169 	/* DMA structures */
170 	struct sg_table corl_table;
171 	unsigned int corl_nents;
172 	struct dma_chan *chan;
173 
174 	/* Protection for all members below */
175 	spinlock_t lock;
176 
177 	/* Device enable/disable flag */
178 	bool enabled;
179 
180 	/* Correlation data buffers */
181 	wait_queue_head_t wait;
182 	struct list_head free;
183 	struct list_head used;
184 	struct data_buf *inflight;
185 
186 	/* Information about data buffers */
187 	unsigned int num_dropped;
188 	unsigned int num_buffers;
189 	size_t bufsize;
190 	struct dentry *dbg_entry;
191 };
192 
193 struct fpga_reader {
194 	struct fpga_device *priv;
195 	struct data_buf *buf;
196 	off_t buf_start;
197 };
198 
fpga_device_release(struct kref * ref)199 static void fpga_device_release(struct kref *ref)
200 {
201 	struct fpga_device *priv = container_of(ref, struct fpga_device, ref);
202 
203 	/* the last reader has exited, cleanup the last bits */
204 	mutex_destroy(&priv->mutex);
205 	kfree(priv);
206 }
207 
208 /*
209  * Data Buffer Allocation Helpers
210  */
211 
carma_dma_init(struct data_buf * buf,int nr_pages)212 static int carma_dma_init(struct data_buf *buf, int nr_pages)
213 {
214 	struct page *pg;
215 	int i;
216 
217 	buf->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT);
218 	if (NULL == buf->vaddr) {
219 		pr_debug("vmalloc_32(%d pages) failed\n", nr_pages);
220 		return -ENOMEM;
221 	}
222 
223 	pr_debug("vmalloc is at addr 0x%08lx, size=%d\n",
224 				(unsigned long)buf->vaddr,
225 				nr_pages << PAGE_SHIFT);
226 
227 	memset(buf->vaddr, 0, nr_pages << PAGE_SHIFT);
228 	buf->nr_pages = nr_pages;
229 
230 	buf->sglist = vzalloc(buf->nr_pages * sizeof(*buf->sglist));
231 	if (NULL == buf->sglist)
232 		goto vzalloc_err;
233 
234 	sg_init_table(buf->sglist, buf->nr_pages);
235 	for (i = 0; i < buf->nr_pages; i++) {
236 		pg = vmalloc_to_page(buf->vaddr + i * PAGE_SIZE);
237 		if (NULL == pg)
238 			goto vmalloc_to_page_err;
239 		sg_set_page(&buf->sglist[i], pg, PAGE_SIZE, 0);
240 	}
241 	return 0;
242 
243 vmalloc_to_page_err:
244 	vfree(buf->sglist);
245 	buf->sglist = NULL;
246 vzalloc_err:
247 	vfree(buf->vaddr);
248 	buf->vaddr = NULL;
249 	return -ENOMEM;
250 }
251 
carma_dma_map(struct device * dev,struct data_buf * buf)252 static int carma_dma_map(struct device *dev, struct data_buf *buf)
253 {
254 	buf->sglen = dma_map_sg(dev, buf->sglist,
255 			buf->nr_pages, DMA_FROM_DEVICE);
256 
257 	if (0 == buf->sglen) {
258 		pr_warn("%s: dma_map_sg failed\n", __func__);
259 		return -ENOMEM;
260 	}
261 	return 0;
262 }
263 
carma_dma_unmap(struct device * dev,struct data_buf * buf)264 static int carma_dma_unmap(struct device *dev, struct data_buf *buf)
265 {
266 	if (!buf->sglen)
267 		return 0;
268 
269 	dma_unmap_sg(dev, buf->sglist, buf->sglen, DMA_FROM_DEVICE);
270 	buf->sglen = 0;
271 	return 0;
272 }
273 
274 /**
275  * data_free_buffer() - free a single data buffer and all allocated memory
276  * @buf: the buffer to free
277  *
278  * This will free all of the pages allocated to the given data buffer, and
279  * then free the structure itself
280  */
data_free_buffer(struct data_buf * buf)281 static void data_free_buffer(struct data_buf *buf)
282 {
283 	/* It is ok to free a NULL buffer */
284 	if (!buf)
285 		return;
286 
287 	/* free all memory */
288 	vfree(buf->sglist);
289 	vfree(buf->vaddr);
290 	kfree(buf);
291 }
292 
293 /**
294  * data_alloc_buffer() - allocate and fill a data buffer with pages
295  * @bytes: the number of bytes required
296  *
297  * This allocates all space needed for a data buffer. It must be mapped before
298  * use in a DMA transaction using carma_dma_map().
299  *
300  * Returns NULL on failure
301  */
data_alloc_buffer(const size_t bytes)302 static struct data_buf *data_alloc_buffer(const size_t bytes)
303 {
304 	unsigned int nr_pages;
305 	struct data_buf *buf;
306 	int ret;
307 
308 	/* calculate the number of pages necessary */
309 	nr_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
310 
311 	/* allocate the buffer structure */
312 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
313 	if (!buf)
314 		goto out_return;
315 
316 	/* initialize internal fields */
317 	INIT_LIST_HEAD(&buf->entry);
318 	buf->size = bytes;
319 
320 	/* allocate the buffer */
321 	ret = carma_dma_init(buf, nr_pages);
322 	if (ret)
323 		goto out_free_buf;
324 
325 	return buf;
326 
327 out_free_buf:
328 	kfree(buf);
329 out_return:
330 	return NULL;
331 }
332 
333 /**
334  * data_free_buffers() - free all allocated buffers
335  * @priv: the driver's private data structure
336  *
337  * Free all buffers allocated by the driver (except those currently in the
338  * process of being read by userspace).
339  *
340  * LOCKING: must hold dev->mutex
341  * CONTEXT: user
342  */
data_free_buffers(struct fpga_device * priv)343 static void data_free_buffers(struct fpga_device *priv)
344 {
345 	struct data_buf *buf, *tmp;
346 
347 	/* the device should be stopped, no DMA in progress */
348 	BUG_ON(priv->inflight != NULL);
349 
350 	list_for_each_entry_safe(buf, tmp, &priv->free, entry) {
351 		list_del_init(&buf->entry);
352 		carma_dma_unmap(priv->dev, buf);
353 		data_free_buffer(buf);
354 	}
355 
356 	list_for_each_entry_safe(buf, tmp, &priv->used, entry) {
357 		list_del_init(&buf->entry);
358 		carma_dma_unmap(priv->dev, buf);
359 		data_free_buffer(buf);
360 	}
361 
362 	priv->num_buffers = 0;
363 	priv->bufsize = 0;
364 }
365 
366 /**
367  * data_alloc_buffers() - allocate 1 seconds worth of data buffers
368  * @priv: the driver's private data structure
369  *
370  * Allocate enough buffers for a whole second worth of data
371  *
372  * This routine will attempt to degrade nicely by succeeding even if a full
373  * second worth of data buffers could not be allocated, as long as a minimum
374  * number were allocated. In this case, it will print a message to the kernel
375  * log.
376  *
377  * The device must not be modifying any lists when this is called.
378  *
379  * CONTEXT: user
380  * LOCKING: must hold dev->mutex
381  *
382  * Returns 0 on success, -ERRNO otherwise
383  */
data_alloc_buffers(struct fpga_device * priv)384 static int data_alloc_buffers(struct fpga_device *priv)
385 {
386 	struct data_buf *buf;
387 	int i, ret;
388 
389 	for (i = 0; i < MAX_DATA_BUFS; i++) {
390 
391 		/* allocate a buffer */
392 		buf = data_alloc_buffer(priv->bufsize);
393 		if (!buf)
394 			break;
395 
396 		/* map it for DMA */
397 		ret = carma_dma_map(priv->dev, buf);
398 		if (ret) {
399 			data_free_buffer(buf);
400 			break;
401 		}
402 
403 		/* add it to the list of free buffers */
404 		list_add_tail(&buf->entry, &priv->free);
405 		priv->num_buffers++;
406 	}
407 
408 	/* Make sure we allocated the minimum required number of buffers */
409 	if (priv->num_buffers < MIN_DATA_BUFS) {
410 		dev_err(priv->dev, "Unable to allocate enough data buffers\n");
411 		data_free_buffers(priv);
412 		return -ENOMEM;
413 	}
414 
415 	/* Warn if we are running in a degraded state, but do not fail */
416 	if (priv->num_buffers < MAX_DATA_BUFS) {
417 		dev_warn(priv->dev,
418 			 "Unable to allocate %d buffers, using %d buffers instead\n",
419 			 MAX_DATA_BUFS, i);
420 	}
421 
422 	return 0;
423 }
424 
425 /*
426  * DMA Operations Helpers
427  */
428 
429 /**
430  * fpga_start_addr() - get the physical address a DATA-FPGA
431  * @priv: the driver's private data structure
432  * @fpga: the DATA-FPGA number (zero based)
433  */
fpga_start_addr(struct fpga_device * priv,unsigned int fpga)434 static dma_addr_t fpga_start_addr(struct fpga_device *priv, unsigned int fpga)
435 {
436 	return priv->phys_addr + 0x400000 + (0x80000 * fpga);
437 }
438 
439 /**
440  * fpga_block_addr() - get the physical address of a correlation data block
441  * @priv: the driver's private data structure
442  * @fpga: the DATA-FPGA number (zero based)
443  * @blknum: the correlation block number (zero based)
444  */
fpga_block_addr(struct fpga_device * priv,unsigned int fpga,unsigned int blknum)445 static dma_addr_t fpga_block_addr(struct fpga_device *priv, unsigned int fpga,
446 				  unsigned int blknum)
447 {
448 	return fpga_start_addr(priv, fpga) + (0x10000 * (1 + blknum));
449 }
450 
451 #define REG_BLOCK_SIZE	(32 * 4)
452 
453 /**
454  * data_setup_corl_table() - create the scatterlist for correlation dumps
455  * @priv: the driver's private data structure
456  *
457  * Create the scatterlist for transferring a correlation dump from the
458  * DATA FPGAs. This structure will be reused for each buffer than needs
459  * to be filled with correlation data.
460  *
461  * Returns 0 on success, -ERRNO otherwise
462  */
data_setup_corl_table(struct fpga_device * priv)463 static int data_setup_corl_table(struct fpga_device *priv)
464 {
465 	struct sg_table *table = &priv->corl_table;
466 	struct scatterlist *sg;
467 	struct fpga_info *info;
468 	int i, j, ret;
469 
470 	/* Calculate the number of entries needed */
471 	priv->corl_nents = (1 + NUM_FPGA) * REG_BLOCK_SIZE;
472 	for (i = 0; i < NUM_FPGA; i++)
473 		priv->corl_nents += priv->info[i].num_lag_ram;
474 
475 	/* Allocate the scatterlist table */
476 	ret = sg_alloc_table(table, priv->corl_nents, GFP_KERNEL);
477 	if (ret) {
478 		dev_err(priv->dev, "unable to allocate DMA table\n");
479 		return ret;
480 	}
481 
482 	/* Add the DATA FPGA registers to the scatterlist */
483 	sg = table->sgl;
484 	for (i = 0; i < NUM_FPGA; i++) {
485 		sg_dma_address(sg) = fpga_start_addr(priv, i);
486 		sg_dma_len(sg) = REG_BLOCK_SIZE;
487 		sg = sg_next(sg);
488 	}
489 
490 	/* Add the SYS-FPGA registers to the scatterlist */
491 	sg_dma_address(sg) = SYS_FPGA_BLOCK;
492 	sg_dma_len(sg) = REG_BLOCK_SIZE;
493 	sg = sg_next(sg);
494 
495 	/* Add the FPGA correlation data blocks to the scatterlist */
496 	for (i = 0; i < NUM_FPGA; i++) {
497 		info = &priv->info[i];
498 		for (j = 0; j < info->num_lag_ram; j++) {
499 			sg_dma_address(sg) = fpga_block_addr(priv, i, j);
500 			sg_dma_len(sg) = info->blk_size;
501 			sg = sg_next(sg);
502 		}
503 	}
504 
505 	/*
506 	 * All physical addresses and lengths are present in the structure
507 	 * now. It can be reused for every FPGA DATA interrupt
508 	 */
509 	return 0;
510 }
511 
512 /*
513  * FPGA Register Access Helpers
514  */
515 
fpga_write_reg(struct fpga_device * priv,unsigned int fpga,unsigned int reg,u32 val)516 static void fpga_write_reg(struct fpga_device *priv, unsigned int fpga,
517 			   unsigned int reg, u32 val)
518 {
519 	const int fpga_start = DATA_FPGA_START + (fpga * DATA_FPGA_SIZE);
520 	iowrite32be(val, priv->regs + fpga_start + reg);
521 }
522 
fpga_read_reg(struct fpga_device * priv,unsigned int fpga,unsigned int reg)523 static u32 fpga_read_reg(struct fpga_device *priv, unsigned int fpga,
524 			 unsigned int reg)
525 {
526 	const int fpga_start = DATA_FPGA_START + (fpga * DATA_FPGA_SIZE);
527 	return ioread32be(priv->regs + fpga_start + reg);
528 }
529 
530 /**
531  * data_calculate_bufsize() - calculate the data buffer size required
532  * @priv: the driver's private data structure
533  *
534  * Calculate the total buffer size needed to hold a single block
535  * of correlation data
536  *
537  * CONTEXT: user
538  *
539  * Returns 0 on success, -ERRNO otherwise
540  */
data_calculate_bufsize(struct fpga_device * priv)541 static int data_calculate_bufsize(struct fpga_device *priv)
542 {
543 	u32 num_corl, num_lags, num_meta, num_qcnt, num_pack;
544 	u32 conf1, conf2, version;
545 	u32 num_lag_ram, blk_size;
546 	int i;
547 
548 	/* Each buffer starts with the 5 FPGA register areas */
549 	priv->bufsize = (1 + NUM_FPGA) * REG_BLOCK_SIZE;
550 
551 	/* Read and store the configuration data for each FPGA */
552 	for (i = 0; i < NUM_FPGA; i++) {
553 		version = fpga_read_reg(priv, i, MMAP_REG_VERSION);
554 		conf1 = fpga_read_reg(priv, i, MMAP_REG_CORL_CONF1);
555 		conf2 = fpga_read_reg(priv, i, MMAP_REG_CORL_CONF2);
556 
557 		/* minor version 2 and later */
558 		if ((version & 0x000000FF) >= 2) {
559 			num_corl = (conf1 & 0x000000F0) >> 4;
560 			num_pack = (conf1 & 0x00000F00) >> 8;
561 			num_lags = (conf1 & 0x00FFF000) >> 12;
562 			num_meta = (conf1 & 0x7F000000) >> 24;
563 			num_qcnt = (conf2 & 0x00000FFF) >> 0;
564 		} else {
565 			num_corl = (conf1 & 0x000000F0) >> 4;
566 			num_pack = 1; /* implied */
567 			num_lags = (conf1 & 0x000FFF00) >> 8;
568 			num_meta = (conf1 & 0x7FF00000) >> 20;
569 			num_qcnt = (conf2 & 0x00000FFF) >> 0;
570 		}
571 
572 		num_lag_ram = (num_corl + num_pack - 1) / num_pack;
573 		blk_size = ((num_pack * num_lags) + num_meta + num_qcnt) * 8;
574 
575 		priv->info[i].num_lag_ram = num_lag_ram;
576 		priv->info[i].blk_size = blk_size;
577 		priv->bufsize += num_lag_ram * blk_size;
578 
579 		dev_dbg(priv->dev, "FPGA %d NUM_CORL: %d\n", i, num_corl);
580 		dev_dbg(priv->dev, "FPGA %d NUM_PACK: %d\n", i, num_pack);
581 		dev_dbg(priv->dev, "FPGA %d NUM_LAGS: %d\n", i, num_lags);
582 		dev_dbg(priv->dev, "FPGA %d NUM_META: %d\n", i, num_meta);
583 		dev_dbg(priv->dev, "FPGA %d NUM_QCNT: %d\n", i, num_qcnt);
584 		dev_dbg(priv->dev, "FPGA %d BLK_SIZE: %d\n", i, blk_size);
585 	}
586 
587 	dev_dbg(priv->dev, "TOTAL BUFFER SIZE: %zu bytes\n", priv->bufsize);
588 	return 0;
589 }
590 
591 /*
592  * Interrupt Handling
593  */
594 
595 /**
596  * data_disable_interrupts() - stop the device from generating interrupts
597  * @priv: the driver's private data structure
598  *
599  * Hide interrupts by switching to GPIO interrupt source
600  *
601  * LOCKING: must hold dev->lock
602  */
data_disable_interrupts(struct fpga_device * priv)603 static void data_disable_interrupts(struct fpga_device *priv)
604 {
605 	/* hide the interrupt by switching the IRQ driver to GPIO */
606 	iowrite32be(0x2F, priv->regs + SYS_IRQ_SOURCE_CTL);
607 }
608 
609 /**
610  * data_enable_interrupts() - allow the device to generate interrupts
611  * @priv: the driver's private data structure
612  *
613  * Unhide interrupts by switching to the FPGA interrupt source. At the
614  * same time, clear the DATA-FPGA status registers.
615  *
616  * LOCKING: must hold dev->lock
617  */
data_enable_interrupts(struct fpga_device * priv)618 static void data_enable_interrupts(struct fpga_device *priv)
619 {
620 	/* clear the actual FPGA corl_done interrupt */
621 	fpga_write_reg(priv, 0, MMAP_REG_STATUS, 0x0);
622 	fpga_write_reg(priv, 1, MMAP_REG_STATUS, 0x0);
623 	fpga_write_reg(priv, 2, MMAP_REG_STATUS, 0x0);
624 	fpga_write_reg(priv, 3, MMAP_REG_STATUS, 0x0);
625 
626 	/* flush the writes */
627 	fpga_read_reg(priv, 0, MMAP_REG_STATUS);
628 	fpga_read_reg(priv, 1, MMAP_REG_STATUS);
629 	fpga_read_reg(priv, 2, MMAP_REG_STATUS);
630 	fpga_read_reg(priv, 3, MMAP_REG_STATUS);
631 
632 	/* switch back to the external interrupt source */
633 	iowrite32be(0x3F, priv->regs + SYS_IRQ_SOURCE_CTL);
634 }
635 
636 /**
637  * data_dma_cb() - DMAEngine callback for DMA completion
638  * @data: the driver's private data structure
639  *
640  * Complete a DMA transfer from the DATA-FPGA's
641  *
642  * This is called via the DMA callback mechanism, and will handle moving the
643  * completed DMA transaction to the used list, and then wake any processes
644  * waiting for new data
645  *
646  * CONTEXT: any, softirq expected
647  */
data_dma_cb(void * data)648 static void data_dma_cb(void *data)
649 {
650 	struct fpga_device *priv = data;
651 	unsigned long flags;
652 
653 	spin_lock_irqsave(&priv->lock, flags);
654 
655 	/* If there is no inflight buffer, we've got a bug */
656 	BUG_ON(priv->inflight == NULL);
657 
658 	/* Move the inflight buffer onto the used list */
659 	list_move_tail(&priv->inflight->entry, &priv->used);
660 	priv->inflight = NULL;
661 
662 	/*
663 	 * If data dumping is still enabled, then clear the FPGA
664 	 * status registers and re-enable FPGA interrupts
665 	 */
666 	if (priv->enabled)
667 		data_enable_interrupts(priv);
668 
669 	spin_unlock_irqrestore(&priv->lock, flags);
670 
671 	/*
672 	 * We've changed both the inflight and used lists, so we need
673 	 * to wake up any processes that are blocking for those events
674 	 */
675 	wake_up(&priv->wait);
676 }
677 
678 /**
679  * data_submit_dma() - prepare and submit the required DMA to fill a buffer
680  * @priv: the driver's private data structure
681  * @buf: the data buffer
682  *
683  * Prepare and submit the necessary DMA transactions to fill a correlation
684  * data buffer.
685  *
686  * LOCKING: must hold dev->lock
687  * CONTEXT: hardirq only
688  *
689  * Returns 0 on success, -ERRNO otherwise
690  */
data_submit_dma(struct fpga_device * priv,struct data_buf * buf)691 static int data_submit_dma(struct fpga_device *priv, struct data_buf *buf)
692 {
693 	struct scatterlist *dst_sg, *src_sg;
694 	unsigned int dst_nents, src_nents;
695 	struct dma_chan *chan = priv->chan;
696 	struct dma_async_tx_descriptor *tx;
697 	dma_cookie_t cookie;
698 	dma_addr_t dst, src;
699 	unsigned long dma_flags = 0;
700 
701 	dst_sg = buf->sglist;
702 	dst_nents = buf->sglen;
703 
704 	src_sg = priv->corl_table.sgl;
705 	src_nents = priv->corl_nents;
706 
707 	/*
708 	 * All buffers passed to this function should be ready and mapped
709 	 * for DMA already. Therefore, we don't need to do anything except
710 	 * submit it to the Freescale DMA Engine for processing
711 	 */
712 
713 	/* setup the scatterlist to scatterlist transfer */
714 	tx = chan->device->device_prep_dma_sg(chan,
715 					      dst_sg, dst_nents,
716 					      src_sg, src_nents,
717 					      0);
718 	if (!tx) {
719 		dev_err(priv->dev, "unable to prep scatterlist DMA\n");
720 		return -ENOMEM;
721 	}
722 
723 	/* submit the transaction to the DMA controller */
724 	cookie = tx->tx_submit(tx);
725 	if (dma_submit_error(cookie)) {
726 		dev_err(priv->dev, "unable to submit scatterlist DMA\n");
727 		return -ENOMEM;
728 	}
729 
730 	/* Prepare the re-read of the SYS-FPGA block */
731 	dst = sg_dma_address(dst_sg) + (NUM_FPGA * REG_BLOCK_SIZE);
732 	src = SYS_FPGA_BLOCK;
733 	tx = chan->device->device_prep_dma_memcpy(chan, dst, src,
734 						  REG_BLOCK_SIZE,
735 						  dma_flags);
736 	if (!tx) {
737 		dev_err(priv->dev, "unable to prep SYS-FPGA DMA\n");
738 		return -ENOMEM;
739 	}
740 
741 	/* Setup the callback */
742 	tx->callback = data_dma_cb;
743 	tx->callback_param = priv;
744 
745 	/* submit the transaction to the DMA controller */
746 	cookie = tx->tx_submit(tx);
747 	if (dma_submit_error(cookie)) {
748 		dev_err(priv->dev, "unable to submit SYS-FPGA DMA\n");
749 		return -ENOMEM;
750 	}
751 
752 	return 0;
753 }
754 
755 #define CORL_DONE	0x1
756 #define CORL_ERR	0x2
757 
data_irq(int irq,void * dev_id)758 static irqreturn_t data_irq(int irq, void *dev_id)
759 {
760 	struct fpga_device *priv = dev_id;
761 	bool submitted = false;
762 	struct data_buf *buf;
763 	u32 status;
764 	int i;
765 
766 	/* detect spurious interrupts via FPGA status */
767 	for (i = 0; i < 4; i++) {
768 		status = fpga_read_reg(priv, i, MMAP_REG_STATUS);
769 		if (!(status & (CORL_DONE | CORL_ERR))) {
770 			dev_err(priv->dev, "spurious irq detected (FPGA)\n");
771 			return IRQ_NONE;
772 		}
773 	}
774 
775 	/* detect spurious interrupts via raw IRQ pin readback */
776 	status = ioread32be(priv->regs + SYS_IRQ_INPUT_DATA);
777 	if (status & IRQ_CORL_DONE) {
778 		dev_err(priv->dev, "spurious irq detected (IRQ)\n");
779 		return IRQ_NONE;
780 	}
781 
782 	spin_lock(&priv->lock);
783 
784 	/*
785 	 * This is an error case that should never happen.
786 	 *
787 	 * If this driver has a bug and manages to re-enable interrupts while
788 	 * a DMA is in progress, then we will hit this statement and should
789 	 * start paying attention immediately.
790 	 */
791 	BUG_ON(priv->inflight != NULL);
792 
793 	/* hide the interrupt by switching the IRQ driver to GPIO */
794 	data_disable_interrupts(priv);
795 
796 	/* If there are no free buffers, drop this data */
797 	if (list_empty(&priv->free)) {
798 		priv->num_dropped++;
799 		goto out;
800 	}
801 
802 	buf = list_first_entry(&priv->free, struct data_buf, entry);
803 	list_del_init(&buf->entry);
804 	BUG_ON(buf->size != priv->bufsize);
805 
806 	/* Submit a DMA transfer to get the correlation data */
807 	if (data_submit_dma(priv, buf)) {
808 		dev_err(priv->dev, "Unable to setup DMA transfer\n");
809 		list_move_tail(&buf->entry, &priv->free);
810 		goto out;
811 	}
812 
813 	/* Save the buffer for the DMA callback */
814 	priv->inflight = buf;
815 	submitted = true;
816 
817 	/* Start the DMA Engine */
818 	dma_async_issue_pending(priv->chan);
819 
820 out:
821 	/* If no DMA was submitted, re-enable interrupts */
822 	if (!submitted)
823 		data_enable_interrupts(priv);
824 
825 	spin_unlock(&priv->lock);
826 	return IRQ_HANDLED;
827 }
828 
829 /*
830  * Realtime Device Enable Helpers
831  */
832 
833 /**
834  * data_device_enable() - enable the device for buffered dumping
835  * @priv: the driver's private data structure
836  *
837  * Enable the device for buffered dumping. Allocates buffers and hooks up
838  * the interrupt handler. When this finishes, data will come pouring in.
839  *
840  * LOCKING: must hold dev->mutex
841  * CONTEXT: user context only
842  *
843  * Returns 0 on success, -ERRNO otherwise
844  */
data_device_enable(struct fpga_device * priv)845 static int data_device_enable(struct fpga_device *priv)
846 {
847 	bool enabled;
848 	u32 val;
849 	int ret;
850 
851 	/* multiple enables are safe: they do nothing */
852 	spin_lock_irq(&priv->lock);
853 	enabled = priv->enabled;
854 	spin_unlock_irq(&priv->lock);
855 	if (enabled)
856 		return 0;
857 
858 	/* check that the FPGAs are programmed */
859 	val = ioread32be(priv->regs + SYS_FPGA_CONFIG_STATUS);
860 	if (!(val & (1 << 18))) {
861 		dev_err(priv->dev, "DATA-FPGAs are not enabled\n");
862 		return -ENODATA;
863 	}
864 
865 	/* read the FPGAs to calculate the buffer size */
866 	ret = data_calculate_bufsize(priv);
867 	if (ret) {
868 		dev_err(priv->dev, "unable to calculate buffer size\n");
869 		goto out_error;
870 	}
871 
872 	/* allocate the correlation data buffers */
873 	ret = data_alloc_buffers(priv);
874 	if (ret) {
875 		dev_err(priv->dev, "unable to allocate buffers\n");
876 		goto out_error;
877 	}
878 
879 	/* setup the source scatterlist for dumping correlation data */
880 	ret = data_setup_corl_table(priv);
881 	if (ret) {
882 		dev_err(priv->dev, "unable to setup correlation DMA table\n");
883 		goto out_error;
884 	}
885 
886 	/* prevent the FPGAs from generating interrupts */
887 	data_disable_interrupts(priv);
888 
889 	/* hookup the irq handler */
890 	ret = request_irq(priv->irq, data_irq, IRQF_SHARED, drv_name, priv);
891 	if (ret) {
892 		dev_err(priv->dev, "unable to request IRQ handler\n");
893 		goto out_error;
894 	}
895 
896 	/* allow the DMA callback to re-enable FPGA interrupts */
897 	spin_lock_irq(&priv->lock);
898 	priv->enabled = true;
899 	spin_unlock_irq(&priv->lock);
900 
901 	/* allow the FPGAs to generate interrupts */
902 	data_enable_interrupts(priv);
903 	return 0;
904 
905 out_error:
906 	sg_free_table(&priv->corl_table);
907 	priv->corl_nents = 0;
908 
909 	data_free_buffers(priv);
910 	return ret;
911 }
912 
913 /**
914  * data_device_disable() - disable the device for buffered dumping
915  * @priv: the driver's private data structure
916  *
917  * Disable the device for buffered dumping. Stops new DMA transactions from
918  * being generated, waits for all outstanding DMA to complete, and then frees
919  * all buffers.
920  *
921  * LOCKING: must hold dev->mutex
922  * CONTEXT: user only
923  *
924  * Returns 0 on success, -ERRNO otherwise
925  */
data_device_disable(struct fpga_device * priv)926 static int data_device_disable(struct fpga_device *priv)
927 {
928 	spin_lock_irq(&priv->lock);
929 
930 	/* allow multiple disable */
931 	if (!priv->enabled) {
932 		spin_unlock_irq(&priv->lock);
933 		return 0;
934 	}
935 
936 	/*
937 	 * Mark the device disabled
938 	 *
939 	 * This stops DMA callbacks from re-enabling interrupts
940 	 */
941 	priv->enabled = false;
942 
943 	/* prevent the FPGAs from generating interrupts */
944 	data_disable_interrupts(priv);
945 
946 	/* wait until all ongoing DMA has finished */
947 	while (priv->inflight != NULL) {
948 		spin_unlock_irq(&priv->lock);
949 		wait_event(priv->wait, priv->inflight == NULL);
950 		spin_lock_irq(&priv->lock);
951 	}
952 
953 	spin_unlock_irq(&priv->lock);
954 
955 	/* unhook the irq handler */
956 	free_irq(priv->irq, priv);
957 
958 	/* free the correlation table */
959 	sg_free_table(&priv->corl_table);
960 	priv->corl_nents = 0;
961 
962 	/* free all buffers: the free and used lists are not being changed */
963 	data_free_buffers(priv);
964 	return 0;
965 }
966 
967 /*
968  * DEBUGFS Interface
969  */
970 #ifdef CONFIG_DEBUG_FS
971 
972 /*
973  * Count the number of entries in the given list
974  */
list_num_entries(struct list_head * list)975 static unsigned int list_num_entries(struct list_head *list)
976 {
977 	struct list_head *entry;
978 	unsigned int ret = 0;
979 
980 	list_for_each(entry, list)
981 		ret++;
982 
983 	return ret;
984 }
985 
data_debug_show(struct seq_file * f,void * offset)986 static int data_debug_show(struct seq_file *f, void *offset)
987 {
988 	struct fpga_device *priv = f->private;
989 
990 	spin_lock_irq(&priv->lock);
991 
992 	seq_printf(f, "enabled: %d\n", priv->enabled);
993 	seq_printf(f, "bufsize: %d\n", priv->bufsize);
994 	seq_printf(f, "num_buffers: %d\n", priv->num_buffers);
995 	seq_printf(f, "num_free: %d\n", list_num_entries(&priv->free));
996 	seq_printf(f, "inflight: %d\n", priv->inflight != NULL);
997 	seq_printf(f, "num_used: %d\n", list_num_entries(&priv->used));
998 	seq_printf(f, "num_dropped: %d\n", priv->num_dropped);
999 
1000 	spin_unlock_irq(&priv->lock);
1001 	return 0;
1002 }
1003 
data_debug_open(struct inode * inode,struct file * file)1004 static int data_debug_open(struct inode *inode, struct file *file)
1005 {
1006 	return single_open(file, data_debug_show, inode->i_private);
1007 }
1008 
1009 static const struct file_operations data_debug_fops = {
1010 	.owner		= THIS_MODULE,
1011 	.open		= data_debug_open,
1012 	.read		= seq_read,
1013 	.llseek		= seq_lseek,
1014 	.release	= single_release,
1015 };
1016 
data_debugfs_init(struct fpga_device * priv)1017 static int data_debugfs_init(struct fpga_device *priv)
1018 {
1019 	priv->dbg_entry = debugfs_create_file(drv_name, S_IRUGO, NULL, priv,
1020 					      &data_debug_fops);
1021 	return PTR_ERR_OR_ZERO(priv->dbg_entry);
1022 }
1023 
data_debugfs_exit(struct fpga_device * priv)1024 static void data_debugfs_exit(struct fpga_device *priv)
1025 {
1026 	debugfs_remove(priv->dbg_entry);
1027 }
1028 
1029 #else
1030 
data_debugfs_init(struct fpga_device * priv)1031 static inline int data_debugfs_init(struct fpga_device *priv)
1032 {
1033 	return 0;
1034 }
1035 
data_debugfs_exit(struct fpga_device * priv)1036 static inline void data_debugfs_exit(struct fpga_device *priv)
1037 {
1038 }
1039 
1040 #endif	/* CONFIG_DEBUG_FS */
1041 
1042 /*
1043  * SYSFS Attributes
1044  */
1045 
data_en_show(struct device * dev,struct device_attribute * attr,char * buf)1046 static ssize_t data_en_show(struct device *dev, struct device_attribute *attr,
1047 			    char *buf)
1048 {
1049 	struct fpga_device *priv = dev_get_drvdata(dev);
1050 	int ret;
1051 
1052 	spin_lock_irq(&priv->lock);
1053 	ret = snprintf(buf, PAGE_SIZE, "%u\n", priv->enabled);
1054 	spin_unlock_irq(&priv->lock);
1055 
1056 	return ret;
1057 }
1058 
data_en_set(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1059 static ssize_t data_en_set(struct device *dev, struct device_attribute *attr,
1060 			   const char *buf, size_t count)
1061 {
1062 	struct fpga_device *priv = dev_get_drvdata(dev);
1063 	unsigned long enable;
1064 	int ret;
1065 
1066 	ret = kstrtoul(buf, 0, &enable);
1067 	if (ret) {
1068 		dev_err(priv->dev, "unable to parse enable input\n");
1069 		return ret;
1070 	}
1071 
1072 	/* protect against concurrent enable/disable */
1073 	ret = mutex_lock_interruptible(&priv->mutex);
1074 	if (ret)
1075 		return ret;
1076 
1077 	if (enable)
1078 		ret = data_device_enable(priv);
1079 	else
1080 		ret = data_device_disable(priv);
1081 
1082 	if (ret) {
1083 		dev_err(priv->dev, "device %s failed\n",
1084 			enable ? "enable" : "disable");
1085 		count = ret;
1086 		goto out_unlock;
1087 	}
1088 
1089 out_unlock:
1090 	mutex_unlock(&priv->mutex);
1091 	return count;
1092 }
1093 
1094 static DEVICE_ATTR(enable, S_IWUSR | S_IRUGO, data_en_show, data_en_set);
1095 
1096 static struct attribute *data_sysfs_attrs[] = {
1097 	&dev_attr_enable.attr,
1098 	NULL,
1099 };
1100 
1101 static const struct attribute_group rt_sysfs_attr_group = {
1102 	.attrs = data_sysfs_attrs,
1103 };
1104 
1105 /*
1106  * FPGA Realtime Data Character Device
1107  */
1108 
data_open(struct inode * inode,struct file * filp)1109 static int data_open(struct inode *inode, struct file *filp)
1110 {
1111 	/*
1112 	 * The miscdevice layer puts our struct miscdevice into the
1113 	 * filp->private_data field. We use this to find our private
1114 	 * data and then overwrite it with our own private structure.
1115 	 */
1116 	struct fpga_device *priv = container_of(filp->private_data,
1117 						struct fpga_device, miscdev);
1118 	struct fpga_reader *reader;
1119 	int ret;
1120 
1121 	/* allocate private data */
1122 	reader = kzalloc(sizeof(*reader), GFP_KERNEL);
1123 	if (!reader)
1124 		return -ENOMEM;
1125 
1126 	reader->priv = priv;
1127 	reader->buf = NULL;
1128 
1129 	filp->private_data = reader;
1130 	ret = nonseekable_open(inode, filp);
1131 	if (ret) {
1132 		dev_err(priv->dev, "nonseekable-open failed\n");
1133 		kfree(reader);
1134 		return ret;
1135 	}
1136 
1137 	/*
1138 	 * success, increase the reference count of the private data structure
1139 	 * so that it doesn't disappear if the device is unbound
1140 	 */
1141 	kref_get(&priv->ref);
1142 	return 0;
1143 }
1144 
data_release(struct inode * inode,struct file * filp)1145 static int data_release(struct inode *inode, struct file *filp)
1146 {
1147 	struct fpga_reader *reader = filp->private_data;
1148 	struct fpga_device *priv = reader->priv;
1149 
1150 	/* free the per-reader structure */
1151 	data_free_buffer(reader->buf);
1152 	kfree(reader);
1153 	filp->private_data = NULL;
1154 
1155 	/* decrement our reference count to the private data */
1156 	kref_put(&priv->ref, fpga_device_release);
1157 	return 0;
1158 }
1159 
data_read(struct file * filp,char __user * ubuf,size_t count,loff_t * f_pos)1160 static ssize_t data_read(struct file *filp, char __user *ubuf, size_t count,
1161 			 loff_t *f_pos)
1162 {
1163 	struct fpga_reader *reader = filp->private_data;
1164 	struct fpga_device *priv = reader->priv;
1165 	struct list_head *used = &priv->used;
1166 	bool drop_buffer = false;
1167 	struct data_buf *dbuf;
1168 	size_t avail;
1169 	void *data;
1170 	int ret;
1171 
1172 	/* check if we already have a partial buffer */
1173 	if (reader->buf) {
1174 		dbuf = reader->buf;
1175 		goto have_buffer;
1176 	}
1177 
1178 	spin_lock_irq(&priv->lock);
1179 
1180 	/* Block until there is at least one buffer on the used list */
1181 	while (list_empty(used)) {
1182 		spin_unlock_irq(&priv->lock);
1183 
1184 		if (filp->f_flags & O_NONBLOCK)
1185 			return -EAGAIN;
1186 
1187 		ret = wait_event_interruptible(priv->wait, !list_empty(used));
1188 		if (ret)
1189 			return ret;
1190 
1191 		spin_lock_irq(&priv->lock);
1192 	}
1193 
1194 	/* Grab the first buffer off of the used list */
1195 	dbuf = list_first_entry(used, struct data_buf, entry);
1196 	list_del_init(&dbuf->entry);
1197 
1198 	spin_unlock_irq(&priv->lock);
1199 
1200 	/* Buffers are always mapped: unmap it */
1201 	carma_dma_unmap(priv->dev, dbuf);
1202 
1203 	/* save the buffer for later */
1204 	reader->buf = dbuf;
1205 	reader->buf_start = 0;
1206 
1207 have_buffer:
1208 	/* Get the number of bytes available */
1209 	avail = dbuf->size - reader->buf_start;
1210 	data = dbuf->vaddr + reader->buf_start;
1211 
1212 	/* Get the number of bytes we can transfer */
1213 	count = min(count, avail);
1214 
1215 	/* Copy the data to the userspace buffer */
1216 	if (copy_to_user(ubuf, data, count))
1217 		return -EFAULT;
1218 
1219 	/* Update the amount of available space */
1220 	avail -= count;
1221 
1222 	/*
1223 	 * If there is still some data available, save the buffer for the
1224 	 * next userspace call to read() and return
1225 	 */
1226 	if (avail > 0) {
1227 		reader->buf_start += count;
1228 		reader->buf = dbuf;
1229 		return count;
1230 	}
1231 
1232 	/*
1233 	 * Get the buffer ready to be reused for DMA
1234 	 *
1235 	 * If it fails, we pretend that the read never happed and return
1236 	 * -EFAULT to userspace. The read will be retried.
1237 	 */
1238 	ret = carma_dma_map(priv->dev, dbuf);
1239 	if (ret) {
1240 		dev_err(priv->dev, "unable to remap buffer for DMA\n");
1241 		return -EFAULT;
1242 	}
1243 
1244 	/* Lock against concurrent enable/disable */
1245 	spin_lock_irq(&priv->lock);
1246 
1247 	/* the reader is finished with this buffer */
1248 	reader->buf = NULL;
1249 
1250 	/*
1251 	 * One of two things has happened, the device is disabled, or the
1252 	 * device has been reconfigured underneath us. In either case, we
1253 	 * should just throw away the buffer.
1254 	 *
1255 	 * Lockdep complains if this is done under the spinlock, so we
1256 	 * handle it during the unlock path.
1257 	 */
1258 	if (!priv->enabled || dbuf->size != priv->bufsize) {
1259 		drop_buffer = true;
1260 		goto out_unlock;
1261 	}
1262 
1263 	/* The buffer is safe to reuse, so add it back to the free list */
1264 	list_add_tail(&dbuf->entry, &priv->free);
1265 
1266 out_unlock:
1267 	spin_unlock_irq(&priv->lock);
1268 
1269 	if (drop_buffer) {
1270 		carma_dma_unmap(priv->dev, dbuf);
1271 		data_free_buffer(dbuf);
1272 	}
1273 
1274 	return count;
1275 }
1276 
data_poll(struct file * filp,struct poll_table_struct * tbl)1277 static unsigned int data_poll(struct file *filp, struct poll_table_struct *tbl)
1278 {
1279 	struct fpga_reader *reader = filp->private_data;
1280 	struct fpga_device *priv = reader->priv;
1281 	unsigned int mask = 0;
1282 
1283 	poll_wait(filp, &priv->wait, tbl);
1284 
1285 	if (!list_empty(&priv->used))
1286 		mask |= POLLIN | POLLRDNORM;
1287 
1288 	return mask;
1289 }
1290 
data_mmap(struct file * filp,struct vm_area_struct * vma)1291 static int data_mmap(struct file *filp, struct vm_area_struct *vma)
1292 {
1293 	struct fpga_reader *reader = filp->private_data;
1294 	struct fpga_device *priv = reader->priv;
1295 	unsigned long offset, vsize, psize, addr;
1296 
1297 	/* VMA properties */
1298 	offset = vma->vm_pgoff << PAGE_SHIFT;
1299 	vsize = vma->vm_end - vma->vm_start;
1300 	psize = priv->phys_size - offset;
1301 	addr = (priv->phys_addr + offset) >> PAGE_SHIFT;
1302 
1303 	/* Check against the FPGA region's physical memory size */
1304 	if (vsize > psize) {
1305 		dev_err(priv->dev, "requested mmap mapping too large\n");
1306 		return -EINVAL;
1307 	}
1308 
1309 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1310 
1311 	return io_remap_pfn_range(vma, vma->vm_start, addr, vsize,
1312 				  vma->vm_page_prot);
1313 }
1314 
1315 static const struct file_operations data_fops = {
1316 	.owner		= THIS_MODULE,
1317 	.open		= data_open,
1318 	.release	= data_release,
1319 	.read		= data_read,
1320 	.poll		= data_poll,
1321 	.mmap		= data_mmap,
1322 	.llseek		= no_llseek,
1323 };
1324 
1325 /*
1326  * OpenFirmware Device Subsystem
1327  */
1328 
dma_filter(struct dma_chan * chan,void * data)1329 static bool dma_filter(struct dma_chan *chan, void *data)
1330 {
1331 	/*
1332 	 * DMA Channel #0 is used for the FPGA Programmer, so ignore it
1333 	 *
1334 	 * This probably won't survive an unload/load cycle of the Freescale
1335 	 * DMAEngine driver, but that won't be a problem
1336 	 */
1337 	if (chan->chan_id == 0 && chan->device->dev_id == 0)
1338 		return false;
1339 
1340 	return true;
1341 }
1342 
data_of_probe(struct platform_device * op)1343 static int data_of_probe(struct platform_device *op)
1344 {
1345 	struct device_node *of_node = op->dev.of_node;
1346 	struct device *this_device;
1347 	struct fpga_device *priv;
1348 	struct resource res;
1349 	dma_cap_mask_t mask;
1350 	int ret;
1351 
1352 	/* Allocate private data */
1353 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1354 	if (!priv) {
1355 		dev_err(&op->dev, "Unable to allocate device private data\n");
1356 		ret = -ENOMEM;
1357 		goto out_return;
1358 	}
1359 
1360 	platform_set_drvdata(op, priv);
1361 	priv->dev = &op->dev;
1362 	kref_init(&priv->ref);
1363 	mutex_init(&priv->mutex);
1364 
1365 	dev_set_drvdata(priv->dev, priv);
1366 	spin_lock_init(&priv->lock);
1367 	INIT_LIST_HEAD(&priv->free);
1368 	INIT_LIST_HEAD(&priv->used);
1369 	init_waitqueue_head(&priv->wait);
1370 
1371 	/* Setup the misc device */
1372 	priv->miscdev.minor = MISC_DYNAMIC_MINOR;
1373 	priv->miscdev.name = drv_name;
1374 	priv->miscdev.fops = &data_fops;
1375 
1376 	/* Get the physical address of the FPGA registers */
1377 	ret = of_address_to_resource(of_node, 0, &res);
1378 	if (ret) {
1379 		dev_err(&op->dev, "Unable to find FPGA physical address\n");
1380 		ret = -ENODEV;
1381 		goto out_free_priv;
1382 	}
1383 
1384 	priv->phys_addr = res.start;
1385 	priv->phys_size = resource_size(&res);
1386 
1387 	/* ioremap the registers for use */
1388 	priv->regs = of_iomap(of_node, 0);
1389 	if (!priv->regs) {
1390 		dev_err(&op->dev, "Unable to ioremap registers\n");
1391 		ret = -ENOMEM;
1392 		goto out_free_priv;
1393 	}
1394 
1395 	dma_cap_zero(mask);
1396 	dma_cap_set(DMA_MEMCPY, mask);
1397 	dma_cap_set(DMA_INTERRUPT, mask);
1398 	dma_cap_set(DMA_SLAVE, mask);
1399 	dma_cap_set(DMA_SG, mask);
1400 
1401 	/* Request a DMA channel */
1402 	priv->chan = dma_request_channel(mask, dma_filter, NULL);
1403 	if (!priv->chan) {
1404 		dev_err(&op->dev, "Unable to request DMA channel\n");
1405 		ret = -ENODEV;
1406 		goto out_unmap_regs;
1407 	}
1408 
1409 	/* Find the correct IRQ number */
1410 	priv->irq = irq_of_parse_and_map(of_node, 0);
1411 	if (priv->irq == NO_IRQ) {
1412 		dev_err(&op->dev, "Unable to find IRQ line\n");
1413 		ret = -ENODEV;
1414 		goto out_release_dma;
1415 	}
1416 
1417 	/* Drive the GPIO for FPGA IRQ high (no interrupt) */
1418 	iowrite32be(IRQ_CORL_DONE, priv->regs + SYS_IRQ_OUTPUT_DATA);
1419 
1420 	/* Register the miscdevice */
1421 	ret = misc_register(&priv->miscdev);
1422 	if (ret) {
1423 		dev_err(&op->dev, "Unable to register miscdevice\n");
1424 		goto out_irq_dispose_mapping;
1425 	}
1426 
1427 	/* Create the debugfs files */
1428 	ret = data_debugfs_init(priv);
1429 	if (ret) {
1430 		dev_err(&op->dev, "Unable to create debugfs files\n");
1431 		goto out_misc_deregister;
1432 	}
1433 
1434 	/* Create the sysfs files */
1435 	this_device = priv->miscdev.this_device;
1436 	dev_set_drvdata(this_device, priv);
1437 	ret = sysfs_create_group(&this_device->kobj, &rt_sysfs_attr_group);
1438 	if (ret) {
1439 		dev_err(&op->dev, "Unable to create sysfs files\n");
1440 		goto out_data_debugfs_exit;
1441 	}
1442 
1443 	dev_info(&op->dev, "CARMA FPGA Realtime Data Driver Loaded\n");
1444 	return 0;
1445 
1446 out_data_debugfs_exit:
1447 	data_debugfs_exit(priv);
1448 out_misc_deregister:
1449 	misc_deregister(&priv->miscdev);
1450 out_irq_dispose_mapping:
1451 	irq_dispose_mapping(priv->irq);
1452 out_release_dma:
1453 	dma_release_channel(priv->chan);
1454 out_unmap_regs:
1455 	iounmap(priv->regs);
1456 out_free_priv:
1457 	kref_put(&priv->ref, fpga_device_release);
1458 out_return:
1459 	return ret;
1460 }
1461 
data_of_remove(struct platform_device * op)1462 static int data_of_remove(struct platform_device *op)
1463 {
1464 	struct fpga_device *priv = platform_get_drvdata(op);
1465 	struct device *this_device = priv->miscdev.this_device;
1466 
1467 	/* remove all sysfs files, now the device cannot be re-enabled */
1468 	sysfs_remove_group(&this_device->kobj, &rt_sysfs_attr_group);
1469 
1470 	/* remove all debugfs files */
1471 	data_debugfs_exit(priv);
1472 
1473 	/* disable the device from generating data */
1474 	data_device_disable(priv);
1475 
1476 	/* remove the character device to stop new readers from appearing */
1477 	misc_deregister(&priv->miscdev);
1478 
1479 	/* cleanup everything not needed by readers */
1480 	irq_dispose_mapping(priv->irq);
1481 	dma_release_channel(priv->chan);
1482 	iounmap(priv->regs);
1483 
1484 	/* release our reference */
1485 	kref_put(&priv->ref, fpga_device_release);
1486 	return 0;
1487 }
1488 
1489 static const struct of_device_id data_of_match[] = {
1490 	{ .compatible = "carma,carma-fpga", },
1491 	{},
1492 };
1493 
1494 static struct platform_driver data_of_driver = {
1495 	.probe		= data_of_probe,
1496 	.remove		= data_of_remove,
1497 	.driver		= {
1498 		.name		= drv_name,
1499 		.of_match_table	= data_of_match,
1500 	},
1501 };
1502 
1503 module_platform_driver(data_of_driver);
1504 
1505 MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
1506 MODULE_DESCRIPTION("CARMA DATA-FPGA Access Driver");
1507 MODULE_LICENSE("GPL");
1508