1 /* Copyright (c) 2012, The Linux Foundation. All rights reserved.
2  *
3  * This program is free software; you can redistribute it and/or modify
4  * it under the terms of the GNU General Public License version 2 and
5  * only version 2 as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  */
12 
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/types.h>
17 #include <linux/device.h>
18 #include <linux/io.h>
19 #include <linux/err.h>
20 #include <linux/fs.h>
21 #include <linux/miscdevice.h>
22 #include <linux/uaccess.h>
23 #include <linux/slab.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/spinlock.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/of.h>
28 #include <linux/coresight.h>
29 #include <linux/amba/bus.h>
30 
31 #include "coresight-priv.h"
32 
33 #define TMC_RSZ			0x004
34 #define TMC_STS			0x00c
35 #define TMC_RRD			0x010
36 #define TMC_RRP			0x014
37 #define TMC_RWP			0x018
38 #define TMC_TRG			0x01c
39 #define TMC_CTL			0x020
40 #define TMC_RWD			0x024
41 #define TMC_MODE		0x028
42 #define TMC_LBUFLEVEL		0x02c
43 #define TMC_CBUFLEVEL		0x030
44 #define TMC_BUFWM		0x034
45 #define TMC_RRPHI		0x038
46 #define TMC_RWPHI		0x03c
47 #define TMC_AXICTL		0x110
48 #define TMC_DBALO		0x118
49 #define TMC_DBAHI		0x11c
50 #define TMC_FFSR		0x300
51 #define TMC_FFCR		0x304
52 #define TMC_PSCR		0x308
53 #define TMC_ITMISCOP0		0xee0
54 #define TMC_ITTRFLIN		0xee8
55 #define TMC_ITATBDATA0		0xeec
56 #define TMC_ITATBCTR2		0xef0
57 #define TMC_ITATBCTR1		0xef4
58 #define TMC_ITATBCTR0		0xef8
59 
60 /* register description */
61 /* TMC_CTL - 0x020 */
62 #define TMC_CTL_CAPT_EN		BIT(0)
63 /* TMC_STS - 0x00C */
64 #define TMC_STS_TRIGGERED	BIT(1)
65 /* TMC_AXICTL - 0x110 */
66 #define TMC_AXICTL_PROT_CTL_B0	BIT(0)
67 #define TMC_AXICTL_PROT_CTL_B1	BIT(1)
68 #define TMC_AXICTL_SCT_GAT_MODE	BIT(7)
69 #define TMC_AXICTL_WR_BURST_LEN 0xF00
70 /* TMC_FFCR - 0x304 */
71 #define TMC_FFCR_EN_FMT		BIT(0)
72 #define TMC_FFCR_EN_TI		BIT(1)
73 #define TMC_FFCR_FON_FLIN	BIT(4)
74 #define TMC_FFCR_FON_TRIG_EVT	BIT(5)
75 #define TMC_FFCR_FLUSHMAN	BIT(6)
76 #define TMC_FFCR_TRIGON_TRIGIN	BIT(8)
77 #define TMC_FFCR_STOP_ON_FLUSH	BIT(12)
78 
79 #define TMC_STS_TRIGGERED_BIT	2
80 #define TMC_FFCR_FLUSHMAN_BIT	6
81 
82 enum tmc_config_type {
83 	TMC_CONFIG_TYPE_ETB,
84 	TMC_CONFIG_TYPE_ETR,
85 	TMC_CONFIG_TYPE_ETF,
86 };
87 
88 enum tmc_mode {
89 	TMC_MODE_CIRCULAR_BUFFER,
90 	TMC_MODE_SOFTWARE_FIFO,
91 	TMC_MODE_HARDWARE_FIFO,
92 };
93 
94 enum tmc_mem_intf_width {
95 	TMC_MEM_INTF_WIDTH_32BITS	= 0x2,
96 	TMC_MEM_INTF_WIDTH_64BITS	= 0x3,
97 	TMC_MEM_INTF_WIDTH_128BITS	= 0x4,
98 	TMC_MEM_INTF_WIDTH_256BITS	= 0x5,
99 };
100 
101 /**
102  * struct tmc_drvdata - specifics associated to an TMC component
103  * @base:	memory mapped base address for this component.
104  * @dev:	the device entity associated to this component.
105  * @csdev:	component vitals needed by the framework.
106  * @miscdev:	specifics to handle "/dev/xyz.tmc" entry.
107  * @spinlock:	only one at a time pls.
108  * @read_count:	manages preparation of buffer for reading.
109  * @buf:	area of memory where trace data get sent.
110  * @paddr:	DMA start location in RAM.
111  * @vaddr:	virtual representation of @paddr.
112  * @size:	@buf size.
113  * @enable:	this TMC is being used.
114  * @config_type: TMC variant, must be of type @tmc_config_type.
115  * @trigger_cntr: amount of words to store after a trigger.
116  */
117 struct tmc_drvdata {
118 	void __iomem		*base;
119 	struct device		*dev;
120 	struct coresight_device	*csdev;
121 	struct miscdevice	miscdev;
122 	spinlock_t		spinlock;
123 	int			read_count;
124 	bool			reading;
125 	char			*buf;
126 	dma_addr_t		paddr;
127 	void __iomem		*vaddr;
128 	u32			size;
129 	bool			enable;
130 	enum tmc_config_type	config_type;
131 	u32			trigger_cntr;
132 };
133 
tmc_wait_for_ready(struct tmc_drvdata * drvdata)134 static void tmc_wait_for_ready(struct tmc_drvdata *drvdata)
135 {
136 	/* Ensure formatter, unformatter and hardware fifo are empty */
137 	if (coresight_timeout(drvdata->base,
138 			      TMC_STS, TMC_STS_TRIGGERED_BIT, 1)) {
139 		dev_err(drvdata->dev,
140 			"timeout observed when probing at offset %#x\n",
141 			TMC_STS);
142 	}
143 }
144 
tmc_flush_and_stop(struct tmc_drvdata * drvdata)145 static void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
146 {
147 	u32 ffcr;
148 
149 	ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
150 	ffcr |= TMC_FFCR_STOP_ON_FLUSH;
151 	writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
152 	ffcr |= TMC_FFCR_FLUSHMAN;
153 	writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
154 	/* Ensure flush completes */
155 	if (coresight_timeout(drvdata->base,
156 			      TMC_FFCR, TMC_FFCR_FLUSHMAN_BIT, 0)) {
157 		dev_err(drvdata->dev,
158 			"timeout observed when probing at offset %#x\n",
159 			TMC_FFCR);
160 	}
161 
162 	tmc_wait_for_ready(drvdata);
163 }
164 
tmc_enable_hw(struct tmc_drvdata * drvdata)165 static void tmc_enable_hw(struct tmc_drvdata *drvdata)
166 {
167 	writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL);
168 }
169 
tmc_disable_hw(struct tmc_drvdata * drvdata)170 static void tmc_disable_hw(struct tmc_drvdata *drvdata)
171 {
172 	writel_relaxed(0x0, drvdata->base + TMC_CTL);
173 }
174 
tmc_etb_enable_hw(struct tmc_drvdata * drvdata)175 static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
176 {
177 	/* Zero out the memory to help with debug */
178 	memset(drvdata->buf, 0, drvdata->size);
179 
180 	CS_UNLOCK(drvdata->base);
181 
182 	writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
183 	writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
184 		       TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
185 		       TMC_FFCR_TRIGON_TRIGIN,
186 		       drvdata->base + TMC_FFCR);
187 
188 	writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
189 	tmc_enable_hw(drvdata);
190 
191 	CS_LOCK(drvdata->base);
192 }
193 
tmc_etr_enable_hw(struct tmc_drvdata * drvdata)194 static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
195 {
196 	u32 axictl;
197 
198 	/* Zero out the memory to help with debug */
199 	memset(drvdata->vaddr, 0, drvdata->size);
200 
201 	CS_UNLOCK(drvdata->base);
202 
203 	writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ);
204 	writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
205 
206 	axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
207 	axictl |= TMC_AXICTL_WR_BURST_LEN;
208 	writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
209 	axictl &= ~TMC_AXICTL_SCT_GAT_MODE;
210 	writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
211 	axictl = (axictl &
212 		  ~(TMC_AXICTL_PROT_CTL_B0 | TMC_AXICTL_PROT_CTL_B1)) |
213 		  TMC_AXICTL_PROT_CTL_B1;
214 	writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
215 
216 	writel_relaxed(drvdata->paddr, drvdata->base + TMC_DBALO);
217 	writel_relaxed(0x0, drvdata->base + TMC_DBAHI);
218 	writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
219 		       TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
220 		       TMC_FFCR_TRIGON_TRIGIN,
221 		       drvdata->base + TMC_FFCR);
222 	writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
223 	tmc_enable_hw(drvdata);
224 
225 	CS_LOCK(drvdata->base);
226 }
227 
tmc_etf_enable_hw(struct tmc_drvdata * drvdata)228 static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
229 {
230 	CS_UNLOCK(drvdata->base);
231 
232 	writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
233 	writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
234 		       drvdata->base + TMC_FFCR);
235 	writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
236 	tmc_enable_hw(drvdata);
237 
238 	CS_LOCK(drvdata->base);
239 }
240 
tmc_enable(struct tmc_drvdata * drvdata,enum tmc_mode mode)241 static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
242 {
243 	unsigned long flags;
244 
245 	pm_runtime_get_sync(drvdata->dev);
246 
247 	spin_lock_irqsave(&drvdata->spinlock, flags);
248 	if (drvdata->reading) {
249 		spin_unlock_irqrestore(&drvdata->spinlock, flags);
250 		pm_runtime_put(drvdata->dev);
251 		return -EBUSY;
252 	}
253 
254 	if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
255 		tmc_etb_enable_hw(drvdata);
256 	} else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
257 		tmc_etr_enable_hw(drvdata);
258 	} else {
259 		if (mode == TMC_MODE_CIRCULAR_BUFFER)
260 			tmc_etb_enable_hw(drvdata);
261 		else
262 			tmc_etf_enable_hw(drvdata);
263 	}
264 	drvdata->enable = true;
265 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
266 
267 	dev_info(drvdata->dev, "TMC enabled\n");
268 	return 0;
269 }
270 
tmc_enable_sink(struct coresight_device * csdev)271 static int tmc_enable_sink(struct coresight_device *csdev)
272 {
273 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
274 
275 	return tmc_enable(drvdata, TMC_MODE_CIRCULAR_BUFFER);
276 }
277 
tmc_enable_link(struct coresight_device * csdev,int inport,int outport)278 static int tmc_enable_link(struct coresight_device *csdev, int inport,
279 			   int outport)
280 {
281 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
282 
283 	return tmc_enable(drvdata, TMC_MODE_HARDWARE_FIFO);
284 }
285 
tmc_etb_dump_hw(struct tmc_drvdata * drvdata)286 static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
287 {
288 	enum tmc_mem_intf_width memwidth;
289 	u8 memwords;
290 	char *bufp;
291 	u32 read_data;
292 	int i;
293 
294 	memwidth = BMVAL(readl_relaxed(drvdata->base + CORESIGHT_DEVID), 8, 10);
295 	if (memwidth == TMC_MEM_INTF_WIDTH_32BITS)
296 		memwords = 1;
297 	else if (memwidth == TMC_MEM_INTF_WIDTH_64BITS)
298 		memwords = 2;
299 	else if (memwidth == TMC_MEM_INTF_WIDTH_128BITS)
300 		memwords = 4;
301 	else
302 		memwords = 8;
303 
304 	bufp = drvdata->buf;
305 	while (1) {
306 		for (i = 0; i < memwords; i++) {
307 			read_data = readl_relaxed(drvdata->base + TMC_RRD);
308 			if (read_data == 0xFFFFFFFF)
309 				return;
310 			memcpy(bufp, &read_data, 4);
311 			bufp += 4;
312 		}
313 	}
314 }
315 
tmc_etb_disable_hw(struct tmc_drvdata * drvdata)316 static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
317 {
318 	CS_UNLOCK(drvdata->base);
319 
320 	tmc_flush_and_stop(drvdata);
321 	tmc_etb_dump_hw(drvdata);
322 	tmc_disable_hw(drvdata);
323 
324 	CS_LOCK(drvdata->base);
325 }
326 
tmc_etr_dump_hw(struct tmc_drvdata * drvdata)327 static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
328 {
329 	u32 rwp, val;
330 
331 	rwp = readl_relaxed(drvdata->base + TMC_RWP);
332 	val = readl_relaxed(drvdata->base + TMC_STS);
333 
334 	/* How much memory do we still have */
335 	if (val & BIT(0))
336 		drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
337 	else
338 		drvdata->buf = drvdata->vaddr;
339 }
340 
tmc_etr_disable_hw(struct tmc_drvdata * drvdata)341 static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
342 {
343 	CS_UNLOCK(drvdata->base);
344 
345 	tmc_flush_and_stop(drvdata);
346 	tmc_etr_dump_hw(drvdata);
347 	tmc_disable_hw(drvdata);
348 
349 	CS_LOCK(drvdata->base);
350 }
351 
tmc_etf_disable_hw(struct tmc_drvdata * drvdata)352 static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
353 {
354 	CS_UNLOCK(drvdata->base);
355 
356 	tmc_flush_and_stop(drvdata);
357 	tmc_disable_hw(drvdata);
358 
359 	CS_LOCK(drvdata->base);
360 }
361 
tmc_disable(struct tmc_drvdata * drvdata,enum tmc_mode mode)362 static void tmc_disable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
363 {
364 	unsigned long flags;
365 
366 	spin_lock_irqsave(&drvdata->spinlock, flags);
367 	if (drvdata->reading)
368 		goto out;
369 
370 	if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
371 		tmc_etb_disable_hw(drvdata);
372 	} else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
373 		tmc_etr_disable_hw(drvdata);
374 	} else {
375 		if (mode == TMC_MODE_CIRCULAR_BUFFER)
376 			tmc_etb_disable_hw(drvdata);
377 		else
378 			tmc_etf_disable_hw(drvdata);
379 	}
380 out:
381 	drvdata->enable = false;
382 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
383 
384 	pm_runtime_put(drvdata->dev);
385 
386 	dev_info(drvdata->dev, "TMC disabled\n");
387 }
388 
tmc_disable_sink(struct coresight_device * csdev)389 static void tmc_disable_sink(struct coresight_device *csdev)
390 {
391 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
392 
393 	tmc_disable(drvdata, TMC_MODE_CIRCULAR_BUFFER);
394 }
395 
tmc_disable_link(struct coresight_device * csdev,int inport,int outport)396 static void tmc_disable_link(struct coresight_device *csdev, int inport,
397 			     int outport)
398 {
399 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
400 
401 	tmc_disable(drvdata, TMC_MODE_HARDWARE_FIFO);
402 }
403 
404 static const struct coresight_ops_sink tmc_sink_ops = {
405 	.enable		= tmc_enable_sink,
406 	.disable	= tmc_disable_sink,
407 };
408 
409 static const struct coresight_ops_link tmc_link_ops = {
410 	.enable		= tmc_enable_link,
411 	.disable	= tmc_disable_link,
412 };
413 
414 static const struct coresight_ops tmc_etb_cs_ops = {
415 	.sink_ops	= &tmc_sink_ops,
416 };
417 
418 static const struct coresight_ops tmc_etr_cs_ops = {
419 	.sink_ops	= &tmc_sink_ops,
420 };
421 
422 static const struct coresight_ops tmc_etf_cs_ops = {
423 	.sink_ops	= &tmc_sink_ops,
424 	.link_ops	= &tmc_link_ops,
425 };
426 
tmc_read_prepare(struct tmc_drvdata * drvdata)427 static int tmc_read_prepare(struct tmc_drvdata *drvdata)
428 {
429 	int ret;
430 	unsigned long flags;
431 	enum tmc_mode mode;
432 
433 	spin_lock_irqsave(&drvdata->spinlock, flags);
434 	if (!drvdata->enable)
435 		goto out;
436 
437 	if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
438 		tmc_etb_disable_hw(drvdata);
439 	} else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
440 		tmc_etr_disable_hw(drvdata);
441 	} else {
442 		mode = readl_relaxed(drvdata->base + TMC_MODE);
443 		if (mode == TMC_MODE_CIRCULAR_BUFFER) {
444 			tmc_etb_disable_hw(drvdata);
445 		} else {
446 			ret = -ENODEV;
447 			goto err;
448 		}
449 	}
450 out:
451 	drvdata->reading = true;
452 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
453 
454 	dev_info(drvdata->dev, "TMC read start\n");
455 	return 0;
456 err:
457 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
458 	return ret;
459 }
460 
tmc_read_unprepare(struct tmc_drvdata * drvdata)461 static void tmc_read_unprepare(struct tmc_drvdata *drvdata)
462 {
463 	unsigned long flags;
464 	enum tmc_mode mode;
465 
466 	spin_lock_irqsave(&drvdata->spinlock, flags);
467 	if (!drvdata->enable)
468 		goto out;
469 
470 	if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
471 		tmc_etb_enable_hw(drvdata);
472 	} else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
473 		tmc_etr_enable_hw(drvdata);
474 	} else {
475 		mode = readl_relaxed(drvdata->base + TMC_MODE);
476 		if (mode == TMC_MODE_CIRCULAR_BUFFER)
477 			tmc_etb_enable_hw(drvdata);
478 	}
479 out:
480 	drvdata->reading = false;
481 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
482 
483 	dev_info(drvdata->dev, "TMC read end\n");
484 }
485 
tmc_open(struct inode * inode,struct file * file)486 static int tmc_open(struct inode *inode, struct file *file)
487 {
488 	struct tmc_drvdata *drvdata = container_of(file->private_data,
489 						   struct tmc_drvdata, miscdev);
490 	int ret = 0;
491 
492 	if (drvdata->read_count++)
493 		goto out;
494 
495 	ret = tmc_read_prepare(drvdata);
496 	if (ret)
497 		return ret;
498 out:
499 	nonseekable_open(inode, file);
500 
501 	dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__);
502 	return 0;
503 }
504 
tmc_read(struct file * file,char __user * data,size_t len,loff_t * ppos)505 static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
506 			loff_t *ppos)
507 {
508 	struct tmc_drvdata *drvdata = container_of(file->private_data,
509 						   struct tmc_drvdata, miscdev);
510 	char *bufp = drvdata->buf + *ppos;
511 
512 	if (*ppos + len > drvdata->size)
513 		len = drvdata->size - *ppos;
514 
515 	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
516 		if (bufp == (char *)(drvdata->vaddr + drvdata->size))
517 			bufp = drvdata->vaddr;
518 		else if (bufp > (char *)(drvdata->vaddr + drvdata->size))
519 			bufp -= drvdata->size;
520 		if ((bufp + len) > (char *)(drvdata->vaddr + drvdata->size))
521 			len = (char *)(drvdata->vaddr + drvdata->size) - bufp;
522 	}
523 
524 	if (copy_to_user(data, bufp, len)) {
525 		dev_dbg(drvdata->dev, "%s: copy_to_user failed\n", __func__);
526 		return -EFAULT;
527 	}
528 
529 	*ppos += len;
530 
531 	dev_dbg(drvdata->dev, "%s: %zu bytes copied, %d bytes left\n",
532 		__func__, len, (int)(drvdata->size - *ppos));
533 	return len;
534 }
535 
tmc_release(struct inode * inode,struct file * file)536 static int tmc_release(struct inode *inode, struct file *file)
537 {
538 	struct tmc_drvdata *drvdata = container_of(file->private_data,
539 						   struct tmc_drvdata, miscdev);
540 
541 	if (--drvdata->read_count) {
542 		if (drvdata->read_count < 0) {
543 			dev_err(drvdata->dev, "mismatched close\n");
544 			drvdata->read_count = 0;
545 		}
546 		goto out;
547 	}
548 
549 	tmc_read_unprepare(drvdata);
550 out:
551 	dev_dbg(drvdata->dev, "%s: released\n", __func__);
552 	return 0;
553 }
554 
555 static const struct file_operations tmc_fops = {
556 	.owner		= THIS_MODULE,
557 	.open		= tmc_open,
558 	.read		= tmc_read,
559 	.release	= tmc_release,
560 	.llseek		= no_llseek,
561 };
562 
status_show(struct device * dev,struct device_attribute * attr,char * buf)563 static ssize_t status_show(struct device *dev,
564 			   struct device_attribute *attr, char *buf)
565 {
566 	unsigned long flags;
567 	u32 tmc_rsz, tmc_sts, tmc_rrp, tmc_rwp, tmc_trg;
568 	u32 tmc_ctl, tmc_ffsr, tmc_ffcr, tmc_mode, tmc_pscr;
569 	u32 devid;
570 	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
571 
572 	pm_runtime_get_sync(drvdata->dev);
573 	spin_lock_irqsave(&drvdata->spinlock, flags);
574 	CS_UNLOCK(drvdata->base);
575 
576 	tmc_rsz = readl_relaxed(drvdata->base + TMC_RSZ);
577 	tmc_sts = readl_relaxed(drvdata->base + TMC_STS);
578 	tmc_rrp = readl_relaxed(drvdata->base + TMC_RRP);
579 	tmc_rwp = readl_relaxed(drvdata->base + TMC_RWP);
580 	tmc_trg = readl_relaxed(drvdata->base + TMC_TRG);
581 	tmc_ctl = readl_relaxed(drvdata->base + TMC_CTL);
582 	tmc_ffsr = readl_relaxed(drvdata->base + TMC_FFSR);
583 	tmc_ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
584 	tmc_mode = readl_relaxed(drvdata->base + TMC_MODE);
585 	tmc_pscr = readl_relaxed(drvdata->base + TMC_PSCR);
586 	devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
587 
588 	CS_LOCK(drvdata->base);
589 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
590 	pm_runtime_put(drvdata->dev);
591 
592 	return sprintf(buf,
593 		       "Depth:\t\t0x%x\n"
594 		       "Status:\t\t0x%x\n"
595 		       "RAM read ptr:\t0x%x\n"
596 		       "RAM wrt ptr:\t0x%x\n"
597 		       "Trigger cnt:\t0x%x\n"
598 		       "Control:\t0x%x\n"
599 		       "Flush status:\t0x%x\n"
600 		       "Flush ctrl:\t0x%x\n"
601 		       "Mode:\t\t0x%x\n"
602 		       "PSRC:\t\t0x%x\n"
603 		       "DEVID:\t\t0x%x\n",
604 			tmc_rsz, tmc_sts, tmc_rrp, tmc_rwp, tmc_trg,
605 			tmc_ctl, tmc_ffsr, tmc_ffcr, tmc_mode, tmc_pscr, devid);
606 
607 	return -EINVAL;
608 }
609 static DEVICE_ATTR_RO(status);
610 
trigger_cntr_show(struct device * dev,struct device_attribute * attr,char * buf)611 static ssize_t trigger_cntr_show(struct device *dev,
612 			    struct device_attribute *attr, char *buf)
613 {
614 	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
615 	unsigned long val = drvdata->trigger_cntr;
616 
617 	return sprintf(buf, "%#lx\n", val);
618 }
619 
trigger_cntr_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)620 static ssize_t trigger_cntr_store(struct device *dev,
621 			     struct device_attribute *attr,
622 			     const char *buf, size_t size)
623 {
624 	int ret;
625 	unsigned long val;
626 	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
627 
628 	ret = kstrtoul(buf, 16, &val);
629 	if (ret)
630 		return ret;
631 
632 	drvdata->trigger_cntr = val;
633 	return size;
634 }
635 static DEVICE_ATTR_RW(trigger_cntr);
636 
637 static struct attribute *coresight_etb_attrs[] = {
638 	&dev_attr_trigger_cntr.attr,
639 	&dev_attr_status.attr,
640 	NULL,
641 };
642 ATTRIBUTE_GROUPS(coresight_etb);
643 
644 static struct attribute *coresight_etr_attrs[] = {
645 	&dev_attr_trigger_cntr.attr,
646 	&dev_attr_status.attr,
647 	NULL,
648 };
649 ATTRIBUTE_GROUPS(coresight_etr);
650 
651 static struct attribute *coresight_etf_attrs[] = {
652 	&dev_attr_trigger_cntr.attr,
653 	&dev_attr_status.attr,
654 	NULL,
655 };
656 ATTRIBUTE_GROUPS(coresight_etf);
657 
tmc_probe(struct amba_device * adev,const struct amba_id * id)658 static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
659 {
660 	int ret = 0;
661 	u32 devid;
662 	void __iomem *base;
663 	struct device *dev = &adev->dev;
664 	struct coresight_platform_data *pdata = NULL;
665 	struct tmc_drvdata *drvdata;
666 	struct resource *res = &adev->res;
667 	struct coresight_desc *desc;
668 	struct device_node *np = adev->dev.of_node;
669 
670 	if (np) {
671 		pdata = of_get_coresight_platform_data(dev, np);
672 		if (IS_ERR(pdata))
673 			return PTR_ERR(pdata);
674 		adev->dev.platform_data = pdata;
675 	}
676 
677 	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
678 	if (!drvdata)
679 		return -ENOMEM;
680 
681 	drvdata->dev = &adev->dev;
682 	dev_set_drvdata(dev, drvdata);
683 
684 	/* Validity for the resource is already checked by the AMBA core */
685 	base = devm_ioremap_resource(dev, res);
686 	if (IS_ERR(base))
687 		return PTR_ERR(base);
688 
689 	drvdata->base = base;
690 
691 	spin_lock_init(&drvdata->spinlock);
692 
693 	devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
694 	drvdata->config_type = BMVAL(devid, 6, 7);
695 
696 	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
697 		if (np)
698 			ret = of_property_read_u32(np,
699 						   "arm,buffer-size",
700 						   &drvdata->size);
701 		if (ret)
702 			drvdata->size = SZ_1M;
703 	} else {
704 		drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
705 	}
706 
707 	pm_runtime_put(&adev->dev);
708 
709 	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
710 		drvdata->vaddr = dma_alloc_coherent(dev, drvdata->size,
711 						&drvdata->paddr, GFP_KERNEL);
712 		if (!drvdata->vaddr)
713 			return -ENOMEM;
714 
715 		memset(drvdata->vaddr, 0, drvdata->size);
716 		drvdata->buf = drvdata->vaddr;
717 	} else {
718 		drvdata->buf = devm_kzalloc(dev, drvdata->size, GFP_KERNEL);
719 		if (!drvdata->buf)
720 			return -ENOMEM;
721 	}
722 
723 	desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
724 	if (!desc) {
725 		ret = -ENOMEM;
726 		goto err_devm_kzalloc;
727 	}
728 
729 	desc->pdata = pdata;
730 	desc->dev = dev;
731 	desc->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
732 
733 	if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
734 		desc->type = CORESIGHT_DEV_TYPE_SINK;
735 		desc->ops = &tmc_etb_cs_ops;
736 		desc->groups = coresight_etb_groups;
737 	} else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
738 		desc->type = CORESIGHT_DEV_TYPE_SINK;
739 		desc->ops = &tmc_etr_cs_ops;
740 		desc->groups = coresight_etr_groups;
741 	} else {
742 		desc->type = CORESIGHT_DEV_TYPE_LINKSINK;
743 		desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
744 		desc->ops = &tmc_etf_cs_ops;
745 		desc->groups = coresight_etf_groups;
746 	}
747 
748 	drvdata->csdev = coresight_register(desc);
749 	if (IS_ERR(drvdata->csdev)) {
750 		ret = PTR_ERR(drvdata->csdev);
751 		goto err_devm_kzalloc;
752 	}
753 
754 	drvdata->miscdev.name = pdata->name;
755 	drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
756 	drvdata->miscdev.fops = &tmc_fops;
757 	ret = misc_register(&drvdata->miscdev);
758 	if (ret)
759 		goto err_misc_register;
760 
761 	dev_info(dev, "TMC initialized\n");
762 	return 0;
763 
764 err_misc_register:
765 	coresight_unregister(drvdata->csdev);
766 err_devm_kzalloc:
767 	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
768 		dma_free_coherent(dev, drvdata->size,
769 				&drvdata->paddr, GFP_KERNEL);
770 	return ret;
771 }
772 
tmc_remove(struct amba_device * adev)773 static int tmc_remove(struct amba_device *adev)
774 {
775 	struct tmc_drvdata *drvdata = amba_get_drvdata(adev);
776 
777 	misc_deregister(&drvdata->miscdev);
778 	coresight_unregister(drvdata->csdev);
779 	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
780 		dma_free_coherent(drvdata->dev, drvdata->size,
781 				  &drvdata->paddr, GFP_KERNEL);
782 
783 	return 0;
784 }
785 
786 static struct amba_id tmc_ids[] = {
787 	{
788 		.id     = 0x0003b961,
789 		.mask   = 0x0003ffff,
790 	},
791 	{ 0, 0},
792 };
793 
794 static struct amba_driver tmc_driver = {
795 	.drv = {
796 		.name   = "coresight-tmc",
797 		.owner  = THIS_MODULE,
798 	},
799 	.probe		= tmc_probe,
800 	.remove		= tmc_remove,
801 	.id_table	= tmc_ids,
802 };
803 
804 module_amba_driver(tmc_driver);
805 
806 MODULE_LICENSE("GPL v2");
807 MODULE_DESCRIPTION("CoreSight Trace Memory Controller driver");
808