1/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/moduleparam.h>
15#include <linux/init.h>
16#include <linux/types.h>
17#include <linux/device.h>
18#include <linux/module.h>
19#include <linux/io.h>
20#include <linux/err.h>
21#include <linux/fs.h>
22#include <linux/slab.h>
23#include <linux/delay.h>
24#include <linux/smp.h>
25#include <linux/sysfs.h>
26#include <linux/stat.h>
27#include <linux/clk.h>
28#include <linux/cpu.h>
29#include <linux/coresight.h>
30#include <linux/pm_wakeup.h>
31#include <linux/amba/bus.h>
32#include <linux/seq_file.h>
33#include <linux/uaccess.h>
34#include <linux/pm_runtime.h>
35#include <asm/sections.h>
36
37#include "coresight-etm4x.h"
38
39static int boot_enable;
40module_param_named(boot_enable, boot_enable, int, S_IRUGO);
41
42/* The number of ETMv4 currently registered */
43static int etm4_count;
44static struct etmv4_drvdata *etmdrvdata[NR_CPUS];
45
46static void etm4_os_unlock(void *info)
47{
48	struct etmv4_drvdata *drvdata = (struct etmv4_drvdata *)info;
49
50	/* Writing any value to ETMOSLAR unlocks the trace registers */
51	writel_relaxed(0x0, drvdata->base + TRCOSLAR);
52	isb();
53}
54
55static bool etm4_arch_supported(u8 arch)
56{
57	switch (arch) {
58	case ETM_ARCH_V4:
59		break;
60	default:
61		return false;
62	}
63	return true;
64}
65
66static int etm4_trace_id(struct coresight_device *csdev)
67{
68	struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
69	unsigned long flags;
70	int trace_id = -1;
71
72	if (!drvdata->enable)
73		return drvdata->trcid;
74
75	pm_runtime_get_sync(drvdata->dev);
76	spin_lock_irqsave(&drvdata->spinlock, flags);
77
78	CS_UNLOCK(drvdata->base);
79	trace_id = readl_relaxed(drvdata->base + TRCTRACEIDR);
80	trace_id &= ETM_TRACEID_MASK;
81	CS_LOCK(drvdata->base);
82
83	spin_unlock_irqrestore(&drvdata->spinlock, flags);
84	pm_runtime_put(drvdata->dev);
85
86	return trace_id;
87}
88
89static void etm4_enable_hw(void *info)
90{
91	int i;
92	struct etmv4_drvdata *drvdata = info;
93
94	CS_UNLOCK(drvdata->base);
95
96	etm4_os_unlock(drvdata);
97
98	/* Disable the trace unit before programming trace registers */
99	writel_relaxed(0, drvdata->base + TRCPRGCTLR);
100
101	/* wait for TRCSTATR.IDLE to go up */
102	if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 1))
103		dev_err(drvdata->dev,
104			"timeout observed when probing at offset %#x\n",
105			TRCSTATR);
106
107	writel_relaxed(drvdata->pe_sel, drvdata->base + TRCPROCSELR);
108	writel_relaxed(drvdata->cfg, drvdata->base + TRCCONFIGR);
109	/* nothing specific implemented */
110	writel_relaxed(0x0, drvdata->base + TRCAUXCTLR);
111	writel_relaxed(drvdata->eventctrl0, drvdata->base + TRCEVENTCTL0R);
112	writel_relaxed(drvdata->eventctrl1, drvdata->base + TRCEVENTCTL1R);
113	writel_relaxed(drvdata->stall_ctrl, drvdata->base + TRCSTALLCTLR);
114	writel_relaxed(drvdata->ts_ctrl, drvdata->base + TRCTSCTLR);
115	writel_relaxed(drvdata->syncfreq, drvdata->base + TRCSYNCPR);
116	writel_relaxed(drvdata->ccctlr, drvdata->base + TRCCCCTLR);
117	writel_relaxed(drvdata->bb_ctrl, drvdata->base + TRCBBCTLR);
118	writel_relaxed(drvdata->trcid, drvdata->base + TRCTRACEIDR);
119	writel_relaxed(drvdata->vinst_ctrl, drvdata->base + TRCVICTLR);
120	writel_relaxed(drvdata->viiectlr, drvdata->base + TRCVIIECTLR);
121	writel_relaxed(drvdata->vissctlr,
122		       drvdata->base + TRCVISSCTLR);
123	writel_relaxed(drvdata->vipcssctlr,
124		       drvdata->base + TRCVIPCSSCTLR);
125	for (i = 0; i < drvdata->nrseqstate - 1; i++)
126		writel_relaxed(drvdata->seq_ctrl[i],
127			       drvdata->base + TRCSEQEVRn(i));
128	writel_relaxed(drvdata->seq_rst, drvdata->base + TRCSEQRSTEVR);
129	writel_relaxed(drvdata->seq_state, drvdata->base + TRCSEQSTR);
130	writel_relaxed(drvdata->ext_inp, drvdata->base + TRCEXTINSELR);
131	for (i = 0; i < drvdata->nr_cntr; i++) {
132		writel_relaxed(drvdata->cntrldvr[i],
133			       drvdata->base + TRCCNTRLDVRn(i));
134		writel_relaxed(drvdata->cntr_ctrl[i],
135			       drvdata->base + TRCCNTCTLRn(i));
136		writel_relaxed(drvdata->cntr_val[i],
137			       drvdata->base + TRCCNTVRn(i));
138	}
139
140	/* Resource selector pair 0 is always implemented and reserved */
141	for (i = 2; i < drvdata->nr_resource * 2; i++)
142		writel_relaxed(drvdata->res_ctrl[i],
143			       drvdata->base + TRCRSCTLRn(i));
144
145	for (i = 0; i < drvdata->nr_ss_cmp; i++) {
146		writel_relaxed(drvdata->ss_ctrl[i],
147			       drvdata->base + TRCSSCCRn(i));
148		writel_relaxed(drvdata->ss_status[i],
149			       drvdata->base + TRCSSCSRn(i));
150		writel_relaxed(drvdata->ss_pe_cmp[i],
151			       drvdata->base + TRCSSPCICRn(i));
152	}
153	for (i = 0; i < drvdata->nr_addr_cmp; i++) {
154		writeq_relaxed(drvdata->addr_val[i],
155			       drvdata->base + TRCACVRn(i));
156		writeq_relaxed(drvdata->addr_acc[i],
157			       drvdata->base + TRCACATRn(i));
158	}
159	for (i = 0; i < drvdata->numcidc; i++)
160		writeq_relaxed(drvdata->ctxid_pid[i],
161			       drvdata->base + TRCCIDCVRn(i));
162	writel_relaxed(drvdata->ctxid_mask0, drvdata->base + TRCCIDCCTLR0);
163	writel_relaxed(drvdata->ctxid_mask1, drvdata->base + TRCCIDCCTLR1);
164
165	for (i = 0; i < drvdata->numvmidc; i++)
166		writeq_relaxed(drvdata->vmid_val[i],
167			       drvdata->base + TRCVMIDCVRn(i));
168	writel_relaxed(drvdata->vmid_mask0, drvdata->base + TRCVMIDCCTLR0);
169	writel_relaxed(drvdata->vmid_mask1, drvdata->base + TRCVMIDCCTLR1);
170
171	/* Enable the trace unit */
172	writel_relaxed(1, drvdata->base + TRCPRGCTLR);
173
174	/* wait for TRCSTATR.IDLE to go back down to '0' */
175	if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 0))
176		dev_err(drvdata->dev,
177			"timeout observed when probing at offset %#x\n",
178			TRCSTATR);
179
180	CS_LOCK(drvdata->base);
181
182	dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
183}
184
185static int etm4_enable(struct coresight_device *csdev)
186{
187	struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
188	int ret;
189
190	pm_runtime_get_sync(drvdata->dev);
191	spin_lock(&drvdata->spinlock);
192
193	/*
194	 * Executing etm4_enable_hw on the cpu whose ETM is being enabled
195	 * ensures that register writes occur when cpu is powered.
196	 */
197	ret = smp_call_function_single(drvdata->cpu,
198				       etm4_enable_hw, drvdata, 1);
199	if (ret)
200		goto err;
201	drvdata->enable = true;
202	drvdata->sticky_enable = true;
203
204	spin_unlock(&drvdata->spinlock);
205
206	dev_info(drvdata->dev, "ETM tracing enabled\n");
207	return 0;
208err:
209	spin_unlock(&drvdata->spinlock);
210	pm_runtime_put(drvdata->dev);
211	return ret;
212}
213
214static void etm4_disable_hw(void *info)
215{
216	u32 control;
217	struct etmv4_drvdata *drvdata = info;
218
219	CS_UNLOCK(drvdata->base);
220
221	control = readl_relaxed(drvdata->base + TRCPRGCTLR);
222
223	/* EN, bit[0] Trace unit enable bit */
224	control &= ~0x1;
225
226	/* make sure everything completes before disabling */
227	mb();
228	isb();
229	writel_relaxed(control, drvdata->base + TRCPRGCTLR);
230
231	CS_LOCK(drvdata->base);
232
233	dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
234}
235
236static void etm4_disable(struct coresight_device *csdev)
237{
238	struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
239
240	/*
241	 * Taking hotplug lock here protects from clocks getting disabled
242	 * with tracing being left on (crash scenario) if user disable occurs
243	 * after cpu online mask indicates the cpu is offline but before the
244	 * DYING hotplug callback is serviced by the ETM driver.
245	 */
246	get_online_cpus();
247	spin_lock(&drvdata->spinlock);
248
249	/*
250	 * Executing etm4_disable_hw on the cpu whose ETM is being disabled
251	 * ensures that register writes occur when cpu is powered.
252	 */
253	smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1);
254	drvdata->enable = false;
255
256	spin_unlock(&drvdata->spinlock);
257	put_online_cpus();
258
259	pm_runtime_put(drvdata->dev);
260
261	dev_info(drvdata->dev, "ETM tracing disabled\n");
262}
263
264static const struct coresight_ops_source etm4_source_ops = {
265	.trace_id	= etm4_trace_id,
266	.enable		= etm4_enable,
267	.disable	= etm4_disable,
268};
269
270static const struct coresight_ops etm4_cs_ops = {
271	.source_ops	= &etm4_source_ops,
272};
273
274static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
275{
276	u8 idx = drvdata->addr_idx;
277
278	/*
279	 * TRCACATRn.TYPE bit[1:0]: type of comparison
280	 * the trace unit performs
281	 */
282	if (BMVAL(drvdata->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
283		if (idx % 2 != 0)
284			return -EINVAL;
285
286		/*
287		 * We are performing instruction address comparison. Set the
288		 * relevant bit of ViewInst Include/Exclude Control register
289		 * for corresponding address comparator pair.
290		 */
291		if (drvdata->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
292		    drvdata->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
293			return -EINVAL;
294
295		if (exclude == true) {
296			/*
297			 * Set exclude bit and unset the include bit
298			 * corresponding to comparator pair
299			 */
300			drvdata->viiectlr |= BIT(idx / 2 + 16);
301			drvdata->viiectlr &= ~BIT(idx / 2);
302		} else {
303			/*
304			 * Set include bit and unset exclude bit
305			 * corresponding to comparator pair
306			 */
307			drvdata->viiectlr |= BIT(idx / 2);
308			drvdata->viiectlr &= ~BIT(idx / 2 + 16);
309		}
310	}
311	return 0;
312}
313
314static ssize_t nr_pe_cmp_show(struct device *dev,
315			      struct device_attribute *attr,
316			      char *buf)
317{
318	unsigned long val;
319	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
320
321	val = drvdata->nr_pe_cmp;
322	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
323}
324static DEVICE_ATTR_RO(nr_pe_cmp);
325
326static ssize_t nr_addr_cmp_show(struct device *dev,
327				struct device_attribute *attr,
328				char *buf)
329{
330	unsigned long val;
331	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
332
333	val = drvdata->nr_addr_cmp;
334	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
335}
336static DEVICE_ATTR_RO(nr_addr_cmp);
337
338static ssize_t nr_cntr_show(struct device *dev,
339			    struct device_attribute *attr,
340			    char *buf)
341{
342	unsigned long val;
343	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
344
345	val = drvdata->nr_cntr;
346	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
347}
348static DEVICE_ATTR_RO(nr_cntr);
349
350static ssize_t nr_ext_inp_show(struct device *dev,
351			       struct device_attribute *attr,
352			       char *buf)
353{
354	unsigned long val;
355	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
356
357	val = drvdata->nr_ext_inp;
358	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
359}
360static DEVICE_ATTR_RO(nr_ext_inp);
361
362static ssize_t numcidc_show(struct device *dev,
363			    struct device_attribute *attr,
364			    char *buf)
365{
366	unsigned long val;
367	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
368
369	val = drvdata->numcidc;
370	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
371}
372static DEVICE_ATTR_RO(numcidc);
373
374static ssize_t numvmidc_show(struct device *dev,
375			     struct device_attribute *attr,
376			     char *buf)
377{
378	unsigned long val;
379	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
380
381	val = drvdata->numvmidc;
382	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
383}
384static DEVICE_ATTR_RO(numvmidc);
385
386static ssize_t nrseqstate_show(struct device *dev,
387			       struct device_attribute *attr,
388			       char *buf)
389{
390	unsigned long val;
391	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
392
393	val = drvdata->nrseqstate;
394	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
395}
396static DEVICE_ATTR_RO(nrseqstate);
397
398static ssize_t nr_resource_show(struct device *dev,
399				struct device_attribute *attr,
400				char *buf)
401{
402	unsigned long val;
403	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
404
405	val = drvdata->nr_resource;
406	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
407}
408static DEVICE_ATTR_RO(nr_resource);
409
410static ssize_t nr_ss_cmp_show(struct device *dev,
411			      struct device_attribute *attr,
412			      char *buf)
413{
414	unsigned long val;
415	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
416
417	val = drvdata->nr_ss_cmp;
418	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
419}
420static DEVICE_ATTR_RO(nr_ss_cmp);
421
422static ssize_t reset_store(struct device *dev,
423			   struct device_attribute *attr,
424			   const char *buf, size_t size)
425{
426	int i;
427	unsigned long val;
428	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
429
430	if (kstrtoul(buf, 16, &val))
431		return -EINVAL;
432
433	spin_lock(&drvdata->spinlock);
434	if (val)
435		drvdata->mode = 0x0;
436
437	/* Disable data tracing: do not trace load and store data transfers */
438	drvdata->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
439	drvdata->cfg &= ~(BIT(1) | BIT(2));
440
441	/* Disable data value and data address tracing */
442	drvdata->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
443			   ETM_MODE_DATA_TRACE_VAL);
444	drvdata->cfg &= ~(BIT(16) | BIT(17));
445
446	/* Disable all events tracing */
447	drvdata->eventctrl0 = 0x0;
448	drvdata->eventctrl1 = 0x0;
449
450	/* Disable timestamp event */
451	drvdata->ts_ctrl = 0x0;
452
453	/* Disable stalling */
454	drvdata->stall_ctrl = 0x0;
455
456	/* Reset trace synchronization period  to 2^8 = 256 bytes*/
457	if (drvdata->syncpr == false)
458		drvdata->syncfreq = 0x8;
459
460	/*
461	 * Enable ViewInst to trace everything with start-stop logic in
462	 * started state. ARM recommends start-stop logic is set before
463	 * each trace run.
464	 */
465	drvdata->vinst_ctrl |= BIT(0);
466	if (drvdata->nr_addr_cmp == true) {
467		drvdata->mode |= ETM_MODE_VIEWINST_STARTSTOP;
468		/* SSSTATUS, bit[9] */
469		drvdata->vinst_ctrl |= BIT(9);
470	}
471
472	/* No address range filtering for ViewInst */
473	drvdata->viiectlr = 0x0;
474
475	/* No start-stop filtering for ViewInst */
476	drvdata->vissctlr = 0x0;
477
478	/* Disable seq events */
479	for (i = 0; i < drvdata->nrseqstate-1; i++)
480		drvdata->seq_ctrl[i] = 0x0;
481	drvdata->seq_rst = 0x0;
482	drvdata->seq_state = 0x0;
483
484	/* Disable external input events */
485	drvdata->ext_inp = 0x0;
486
487	drvdata->cntr_idx = 0x0;
488	for (i = 0; i < drvdata->nr_cntr; i++) {
489		drvdata->cntrldvr[i] = 0x0;
490		drvdata->cntr_ctrl[i] = 0x0;
491		drvdata->cntr_val[i] = 0x0;
492	}
493
494	/* Resource selector pair 0 is always implemented and reserved */
495	drvdata->res_idx = 0x2;
496	for (i = 2; i < drvdata->nr_resource * 2; i++)
497		drvdata->res_ctrl[i] = 0x0;
498
499	for (i = 0; i < drvdata->nr_ss_cmp; i++) {
500		drvdata->ss_ctrl[i] = 0x0;
501		drvdata->ss_pe_cmp[i] = 0x0;
502	}
503
504	drvdata->addr_idx = 0x0;
505	for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
506		drvdata->addr_val[i] = 0x0;
507		drvdata->addr_acc[i] = 0x0;
508		drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE;
509	}
510
511	drvdata->ctxid_idx = 0x0;
512	for (i = 0; i < drvdata->numcidc; i++) {
513		drvdata->ctxid_pid[i] = 0x0;
514		drvdata->ctxid_vpid[i] = 0x0;
515	}
516
517	drvdata->ctxid_mask0 = 0x0;
518	drvdata->ctxid_mask1 = 0x0;
519
520	drvdata->vmid_idx = 0x0;
521	for (i = 0; i < drvdata->numvmidc; i++)
522		drvdata->vmid_val[i] = 0x0;
523	drvdata->vmid_mask0 = 0x0;
524	drvdata->vmid_mask1 = 0x0;
525
526	drvdata->trcid = drvdata->cpu + 1;
527	spin_unlock(&drvdata->spinlock);
528	return size;
529}
530static DEVICE_ATTR_WO(reset);
531
532static ssize_t mode_show(struct device *dev,
533			 struct device_attribute *attr,
534			 char *buf)
535{
536	unsigned long val;
537	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
538
539	val = drvdata->mode;
540	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
541}
542
543static ssize_t mode_store(struct device *dev,
544			  struct device_attribute *attr,
545			  const char *buf, size_t size)
546{
547	unsigned long val, mode;
548	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
549
550	if (kstrtoul(buf, 16, &val))
551		return -EINVAL;
552
553	spin_lock(&drvdata->spinlock);
554	drvdata->mode = val & ETMv4_MODE_ALL;
555
556	if (drvdata->mode & ETM_MODE_EXCLUDE)
557		etm4_set_mode_exclude(drvdata, true);
558	else
559		etm4_set_mode_exclude(drvdata, false);
560
561	if (drvdata->instrp0 == true) {
562		/* start by clearing instruction P0 field */
563		drvdata->cfg  &= ~(BIT(1) | BIT(2));
564		if (drvdata->mode & ETM_MODE_LOAD)
565			/* 0b01 Trace load instructions as P0 instructions */
566			drvdata->cfg  |= BIT(1);
567		if (drvdata->mode & ETM_MODE_STORE)
568			/* 0b10 Trace store instructions as P0 instructions */
569			drvdata->cfg  |= BIT(2);
570		if (drvdata->mode & ETM_MODE_LOAD_STORE)
571			/*
572			 * 0b11 Trace load and store instructions
573			 * as P0 instructions
574			 */
575			drvdata->cfg  |= BIT(1) | BIT(2);
576	}
577
578	/* bit[3], Branch broadcast mode */
579	if ((drvdata->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
580		drvdata->cfg |= BIT(3);
581	else
582		drvdata->cfg &= ~BIT(3);
583
584	/* bit[4], Cycle counting instruction trace bit */
585	if ((drvdata->mode & ETMv4_MODE_CYCACC) &&
586		(drvdata->trccci == true))
587		drvdata->cfg |= BIT(4);
588	else
589		drvdata->cfg &= ~BIT(4);
590
591	/* bit[6], Context ID tracing bit */
592	if ((drvdata->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
593		drvdata->cfg |= BIT(6);
594	else
595		drvdata->cfg &= ~BIT(6);
596
597	if ((drvdata->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
598		drvdata->cfg |= BIT(7);
599	else
600		drvdata->cfg &= ~BIT(7);
601
602	/* bits[10:8], Conditional instruction tracing bit */
603	mode = ETM_MODE_COND(drvdata->mode);
604	if (drvdata->trccond == true) {
605		drvdata->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
606		drvdata->cfg |= mode << 8;
607	}
608
609	/* bit[11], Global timestamp tracing bit */
610	if ((drvdata->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
611		drvdata->cfg |= BIT(11);
612	else
613		drvdata->cfg &= ~BIT(11);
614
615	/* bit[12], Return stack enable bit */
616	if ((drvdata->mode & ETM_MODE_RETURNSTACK) &&
617		(drvdata->retstack == true))
618		drvdata->cfg |= BIT(12);
619	else
620		drvdata->cfg &= ~BIT(12);
621
622	/* bits[14:13], Q element enable field */
623	mode = ETM_MODE_QELEM(drvdata->mode);
624	/* start by clearing QE bits */
625	drvdata->cfg &= ~(BIT(13) | BIT(14));
626	/* if supported, Q elements with instruction counts are enabled */
627	if ((mode & BIT(0)) && (drvdata->q_support & BIT(0)))
628		drvdata->cfg |= BIT(13);
629	/*
630	 * if supported, Q elements with and without instruction
631	 * counts are enabled
632	 */
633	if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
634		drvdata->cfg |= BIT(14);
635
636	/* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
637	if ((drvdata->mode & ETM_MODE_ATB_TRIGGER) &&
638	    (drvdata->atbtrig == true))
639		drvdata->eventctrl1 |= BIT(11);
640	else
641		drvdata->eventctrl1 &= ~BIT(11);
642
643	/* bit[12], Low-power state behavior override bit */
644	if ((drvdata->mode & ETM_MODE_LPOVERRIDE) &&
645	    (drvdata->lpoverride == true))
646		drvdata->eventctrl1 |= BIT(12);
647	else
648		drvdata->eventctrl1 &= ~BIT(12);
649
650	/* bit[8], Instruction stall bit */
651	if (drvdata->mode & ETM_MODE_ISTALL_EN)
652		drvdata->stall_ctrl |= BIT(8);
653	else
654		drvdata->stall_ctrl &= ~BIT(8);
655
656	/* bit[10], Prioritize instruction trace bit */
657	if (drvdata->mode & ETM_MODE_INSTPRIO)
658		drvdata->stall_ctrl |= BIT(10);
659	else
660		drvdata->stall_ctrl &= ~BIT(10);
661
662	/* bit[13], Trace overflow prevention bit */
663	if ((drvdata->mode & ETM_MODE_NOOVERFLOW) &&
664		(drvdata->nooverflow == true))
665		drvdata->stall_ctrl |= BIT(13);
666	else
667		drvdata->stall_ctrl &= ~BIT(13);
668
669	/* bit[9] Start/stop logic control bit */
670	if (drvdata->mode & ETM_MODE_VIEWINST_STARTSTOP)
671		drvdata->vinst_ctrl |= BIT(9);
672	else
673		drvdata->vinst_ctrl &= ~BIT(9);
674
675	/* bit[10], Whether a trace unit must trace a Reset exception */
676	if (drvdata->mode & ETM_MODE_TRACE_RESET)
677		drvdata->vinst_ctrl |= BIT(10);
678	else
679		drvdata->vinst_ctrl &= ~BIT(10);
680
681	/* bit[11], Whether a trace unit must trace a system error exception */
682	if ((drvdata->mode & ETM_MODE_TRACE_ERR) &&
683		(drvdata->trc_error == true))
684		drvdata->vinst_ctrl |= BIT(11);
685	else
686		drvdata->vinst_ctrl &= ~BIT(11);
687
688	spin_unlock(&drvdata->spinlock);
689	return size;
690}
691static DEVICE_ATTR_RW(mode);
692
693static ssize_t pe_show(struct device *dev,
694		       struct device_attribute *attr,
695		       char *buf)
696{
697	unsigned long val;
698	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
699
700	val = drvdata->pe_sel;
701	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
702}
703
704static ssize_t pe_store(struct device *dev,
705			struct device_attribute *attr,
706			const char *buf, size_t size)
707{
708	unsigned long val;
709	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
710
711	if (kstrtoul(buf, 16, &val))
712		return -EINVAL;
713
714	spin_lock(&drvdata->spinlock);
715	if (val > drvdata->nr_pe) {
716		spin_unlock(&drvdata->spinlock);
717		return -EINVAL;
718	}
719
720	drvdata->pe_sel = val;
721	spin_unlock(&drvdata->spinlock);
722	return size;
723}
724static DEVICE_ATTR_RW(pe);
725
726static ssize_t event_show(struct device *dev,
727			  struct device_attribute *attr,
728			  char *buf)
729{
730	unsigned long val;
731	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
732
733	val = drvdata->eventctrl0;
734	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
735}
736
737static ssize_t event_store(struct device *dev,
738			   struct device_attribute *attr,
739			   const char *buf, size_t size)
740{
741	unsigned long val;
742	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
743
744	if (kstrtoul(buf, 16, &val))
745		return -EINVAL;
746
747	spin_lock(&drvdata->spinlock);
748	switch (drvdata->nr_event) {
749	case 0x0:
750		/* EVENT0, bits[7:0] */
751		drvdata->eventctrl0 = val & 0xFF;
752		break;
753	case 0x1:
754		 /* EVENT1, bits[15:8] */
755		drvdata->eventctrl0 = val & 0xFFFF;
756		break;
757	case 0x2:
758		/* EVENT2, bits[23:16] */
759		drvdata->eventctrl0 = val & 0xFFFFFF;
760		break;
761	case 0x3:
762		/* EVENT3, bits[31:24] */
763		drvdata->eventctrl0 = val;
764		break;
765	default:
766		break;
767	}
768	spin_unlock(&drvdata->spinlock);
769	return size;
770}
771static DEVICE_ATTR_RW(event);
772
773static ssize_t event_instren_show(struct device *dev,
774				  struct device_attribute *attr,
775				  char *buf)
776{
777	unsigned long val;
778	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
779
780	val = BMVAL(drvdata->eventctrl1, 0, 3);
781	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
782}
783
784static ssize_t event_instren_store(struct device *dev,
785				   struct device_attribute *attr,
786				   const char *buf, size_t size)
787{
788	unsigned long val;
789	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
790
791	if (kstrtoul(buf, 16, &val))
792		return -EINVAL;
793
794	spin_lock(&drvdata->spinlock);
795	/* start by clearing all instruction event enable bits */
796	drvdata->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
797	switch (drvdata->nr_event) {
798	case 0x0:
799		/* generate Event element for event 1 */
800		drvdata->eventctrl1 |= val & BIT(1);
801		break;
802	case 0x1:
803		/* generate Event element for event 1 and 2 */
804		drvdata->eventctrl1 |= val & (BIT(0) | BIT(1));
805		break;
806	case 0x2:
807		/* generate Event element for event 1, 2 and 3 */
808		drvdata->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
809		break;
810	case 0x3:
811		/* generate Event element for all 4 events */
812		drvdata->eventctrl1 |= val & 0xF;
813		break;
814	default:
815		break;
816	}
817	spin_unlock(&drvdata->spinlock);
818	return size;
819}
820static DEVICE_ATTR_RW(event_instren);
821
822static ssize_t event_ts_show(struct device *dev,
823			     struct device_attribute *attr,
824			     char *buf)
825{
826	unsigned long val;
827	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
828
829	val = drvdata->ts_ctrl;
830	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
831}
832
833static ssize_t event_ts_store(struct device *dev,
834			      struct device_attribute *attr,
835			      const char *buf, size_t size)
836{
837	unsigned long val;
838	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
839
840	if (kstrtoul(buf, 16, &val))
841		return -EINVAL;
842	if (!drvdata->ts_size)
843		return -EINVAL;
844
845	drvdata->ts_ctrl = val & ETMv4_EVENT_MASK;
846	return size;
847}
848static DEVICE_ATTR_RW(event_ts);
849
850static ssize_t syncfreq_show(struct device *dev,
851			     struct device_attribute *attr,
852			     char *buf)
853{
854	unsigned long val;
855	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
856
857	val = drvdata->syncfreq;
858	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
859}
860
861static ssize_t syncfreq_store(struct device *dev,
862			      struct device_attribute *attr,
863			      const char *buf, size_t size)
864{
865	unsigned long val;
866	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
867
868	if (kstrtoul(buf, 16, &val))
869		return -EINVAL;
870	if (drvdata->syncpr == true)
871		return -EINVAL;
872
873	drvdata->syncfreq = val & ETMv4_SYNC_MASK;
874	return size;
875}
876static DEVICE_ATTR_RW(syncfreq);
877
878static ssize_t cyc_threshold_show(struct device *dev,
879				  struct device_attribute *attr,
880				  char *buf)
881{
882	unsigned long val;
883	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
884
885	val = drvdata->ccctlr;
886	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
887}
888
889static ssize_t cyc_threshold_store(struct device *dev,
890				   struct device_attribute *attr,
891				   const char *buf, size_t size)
892{
893	unsigned long val;
894	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
895
896	if (kstrtoul(buf, 16, &val))
897		return -EINVAL;
898	if (val < drvdata->ccitmin)
899		return -EINVAL;
900
901	drvdata->ccctlr = val & ETM_CYC_THRESHOLD_MASK;
902	return size;
903}
904static DEVICE_ATTR_RW(cyc_threshold);
905
906static ssize_t bb_ctrl_show(struct device *dev,
907			    struct device_attribute *attr,
908			    char *buf)
909{
910	unsigned long val;
911	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
912
913	val = drvdata->bb_ctrl;
914	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
915}
916
917static ssize_t bb_ctrl_store(struct device *dev,
918			     struct device_attribute *attr,
919			     const char *buf, size_t size)
920{
921	unsigned long val;
922	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
923
924	if (kstrtoul(buf, 16, &val))
925		return -EINVAL;
926	if (drvdata->trcbb == false)
927		return -EINVAL;
928	if (!drvdata->nr_addr_cmp)
929		return -EINVAL;
930	/*
931	 * Bit[7:0] selects which address range comparator is used for
932	 * branch broadcast control.
933	 */
934	if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp)
935		return -EINVAL;
936
937	drvdata->bb_ctrl = val;
938	return size;
939}
940static DEVICE_ATTR_RW(bb_ctrl);
941
942static ssize_t event_vinst_show(struct device *dev,
943				struct device_attribute *attr,
944				char *buf)
945{
946	unsigned long val;
947	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
948
949	val = drvdata->vinst_ctrl & ETMv4_EVENT_MASK;
950	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
951}
952
953static ssize_t event_vinst_store(struct device *dev,
954				 struct device_attribute *attr,
955				 const char *buf, size_t size)
956{
957	unsigned long val;
958	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
959
960	if (kstrtoul(buf, 16, &val))
961		return -EINVAL;
962
963	spin_lock(&drvdata->spinlock);
964	val &= ETMv4_EVENT_MASK;
965	drvdata->vinst_ctrl &= ~ETMv4_EVENT_MASK;
966	drvdata->vinst_ctrl |= val;
967	spin_unlock(&drvdata->spinlock);
968	return size;
969}
970static DEVICE_ATTR_RW(event_vinst);
971
972static ssize_t s_exlevel_vinst_show(struct device *dev,
973				    struct device_attribute *attr,
974				    char *buf)
975{
976	unsigned long val;
977	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
978
979	val = BMVAL(drvdata->vinst_ctrl, 16, 19);
980	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
981}
982
983static ssize_t s_exlevel_vinst_store(struct device *dev,
984				     struct device_attribute *attr,
985				     const char *buf, size_t size)
986{
987	unsigned long val;
988	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
989
990	if (kstrtoul(buf, 16, &val))
991		return -EINVAL;
992
993	spin_lock(&drvdata->spinlock);
994	/* clear all EXLEVEL_S bits (bit[18] is never implemented) */
995	drvdata->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19));
996	/* enable instruction tracing for corresponding exception level */
997	val &= drvdata->s_ex_level;
998	drvdata->vinst_ctrl |= (val << 16);
999	spin_unlock(&drvdata->spinlock);
1000	return size;
1001}
1002static DEVICE_ATTR_RW(s_exlevel_vinst);
1003
1004static ssize_t ns_exlevel_vinst_show(struct device *dev,
1005				     struct device_attribute *attr,
1006				     char *buf)
1007{
1008	unsigned long val;
1009	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1010
1011	/* EXLEVEL_NS, bits[23:20] */
1012	val = BMVAL(drvdata->vinst_ctrl, 20, 23);
1013	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1014}
1015
1016static ssize_t ns_exlevel_vinst_store(struct device *dev,
1017				      struct device_attribute *attr,
1018				      const char *buf, size_t size)
1019{
1020	unsigned long val;
1021	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1022
1023	if (kstrtoul(buf, 16, &val))
1024		return -EINVAL;
1025
1026	spin_lock(&drvdata->spinlock);
1027	/* clear EXLEVEL_NS bits (bit[23] is never implemented */
1028	drvdata->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22));
1029	/* enable instruction tracing for corresponding exception level */
1030	val &= drvdata->ns_ex_level;
1031	drvdata->vinst_ctrl |= (val << 20);
1032	spin_unlock(&drvdata->spinlock);
1033	return size;
1034}
1035static DEVICE_ATTR_RW(ns_exlevel_vinst);
1036
1037static ssize_t addr_idx_show(struct device *dev,
1038			     struct device_attribute *attr,
1039			     char *buf)
1040{
1041	unsigned long val;
1042	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1043
1044	val = drvdata->addr_idx;
1045	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1046}
1047
1048static ssize_t addr_idx_store(struct device *dev,
1049			      struct device_attribute *attr,
1050			      const char *buf, size_t size)
1051{
1052	unsigned long val;
1053	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1054
1055	if (kstrtoul(buf, 16, &val))
1056		return -EINVAL;
1057	if (val >= drvdata->nr_addr_cmp * 2)
1058		return -EINVAL;
1059
1060	/*
1061	 * Use spinlock to ensure index doesn't change while it gets
1062	 * dereferenced multiple times within a spinlock block elsewhere.
1063	 */
1064	spin_lock(&drvdata->spinlock);
1065	drvdata->addr_idx = val;
1066	spin_unlock(&drvdata->spinlock);
1067	return size;
1068}
1069static DEVICE_ATTR_RW(addr_idx);
1070
1071static ssize_t addr_instdatatype_show(struct device *dev,
1072				      struct device_attribute *attr,
1073				      char *buf)
1074{
1075	ssize_t len;
1076	u8 val, idx;
1077	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1078
1079	spin_lock(&drvdata->spinlock);
1080	idx = drvdata->addr_idx;
1081	val = BMVAL(drvdata->addr_acc[idx], 0, 1);
1082	len = scnprintf(buf, PAGE_SIZE, "%s\n",
1083			val == ETM_INSTR_ADDR ? "instr" :
1084			(val == ETM_DATA_LOAD_ADDR ? "data_load" :
1085			(val == ETM_DATA_STORE_ADDR ? "data_store" :
1086			"data_load_store")));
1087	spin_unlock(&drvdata->spinlock);
1088	return len;
1089}
1090
1091static ssize_t addr_instdatatype_store(struct device *dev,
1092				       struct device_attribute *attr,
1093				       const char *buf, size_t size)
1094{
1095	u8 idx;
1096	char str[20] = "";
1097	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1098
1099	if (strlen(buf) >= 20)
1100		return -EINVAL;
1101	if (sscanf(buf, "%s", str) != 1)
1102		return -EINVAL;
1103
1104	spin_lock(&drvdata->spinlock);
1105	idx = drvdata->addr_idx;
1106	if (!strcmp(str, "instr"))
1107		/* TYPE, bits[1:0] */
1108		drvdata->addr_acc[idx] &= ~(BIT(0) | BIT(1));
1109
1110	spin_unlock(&drvdata->spinlock);
1111	return size;
1112}
1113static DEVICE_ATTR_RW(addr_instdatatype);
1114
1115static ssize_t addr_single_show(struct device *dev,
1116				struct device_attribute *attr,
1117				char *buf)
1118{
1119	u8 idx;
1120	unsigned long val;
1121	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1122
1123	idx = drvdata->addr_idx;
1124	spin_lock(&drvdata->spinlock);
1125	if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1126	      drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
1127		spin_unlock(&drvdata->spinlock);
1128		return -EPERM;
1129	}
1130	val = (unsigned long)drvdata->addr_val[idx];
1131	spin_unlock(&drvdata->spinlock);
1132	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1133}
1134
1135static ssize_t addr_single_store(struct device *dev,
1136				 struct device_attribute *attr,
1137				 const char *buf, size_t size)
1138{
1139	u8 idx;
1140	unsigned long val;
1141	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1142
1143	if (kstrtoul(buf, 16, &val))
1144		return -EINVAL;
1145
1146	spin_lock(&drvdata->spinlock);
1147	idx = drvdata->addr_idx;
1148	if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1149	      drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
1150		spin_unlock(&drvdata->spinlock);
1151		return -EPERM;
1152	}
1153
1154	drvdata->addr_val[idx] = (u64)val;
1155	drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
1156	spin_unlock(&drvdata->spinlock);
1157	return size;
1158}
1159static DEVICE_ATTR_RW(addr_single);
1160
1161static ssize_t addr_range_show(struct device *dev,
1162			       struct device_attribute *attr,
1163			       char *buf)
1164{
1165	u8 idx;
1166	unsigned long val1, val2;
1167	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1168
1169	spin_lock(&drvdata->spinlock);
1170	idx = drvdata->addr_idx;
1171	if (idx % 2 != 0) {
1172		spin_unlock(&drvdata->spinlock);
1173		return -EPERM;
1174	}
1175	if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
1176	       drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
1177	      (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
1178	       drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
1179		spin_unlock(&drvdata->spinlock);
1180		return -EPERM;
1181	}
1182
1183	val1 = (unsigned long)drvdata->addr_val[idx];
1184	val2 = (unsigned long)drvdata->addr_val[idx + 1];
1185	spin_unlock(&drvdata->spinlock);
1186	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1187}
1188
1189static ssize_t addr_range_store(struct device *dev,
1190				struct device_attribute *attr,
1191				const char *buf, size_t size)
1192{
1193	u8 idx;
1194	unsigned long val1, val2;
1195	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1196
1197	if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
1198		return -EINVAL;
1199	/* lower address comparator cannot have a higher address value */
1200	if (val1 > val2)
1201		return -EINVAL;
1202
1203	spin_lock(&drvdata->spinlock);
1204	idx = drvdata->addr_idx;
1205	if (idx % 2 != 0) {
1206		spin_unlock(&drvdata->spinlock);
1207		return -EPERM;
1208	}
1209
1210	if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
1211	       drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
1212	      (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
1213	       drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
1214		spin_unlock(&drvdata->spinlock);
1215		return -EPERM;
1216	}
1217
1218	drvdata->addr_val[idx] = (u64)val1;
1219	drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
1220	drvdata->addr_val[idx + 1] = (u64)val2;
1221	drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
1222	/*
1223	 * Program include or exclude control bits for vinst or vdata
1224	 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
1225	 */
1226	if (drvdata->mode & ETM_MODE_EXCLUDE)
1227		etm4_set_mode_exclude(drvdata, true);
1228	else
1229		etm4_set_mode_exclude(drvdata, false);
1230
1231	spin_unlock(&drvdata->spinlock);
1232	return size;
1233}
1234static DEVICE_ATTR_RW(addr_range);
1235
1236static ssize_t addr_start_show(struct device *dev,
1237			       struct device_attribute *attr,
1238			       char *buf)
1239{
1240	u8 idx;
1241	unsigned long val;
1242	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1243
1244	spin_lock(&drvdata->spinlock);
1245	idx = drvdata->addr_idx;
1246
1247	if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1248	      drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1249		spin_unlock(&drvdata->spinlock);
1250		return -EPERM;
1251	}
1252
1253	val = (unsigned long)drvdata->addr_val[idx];
1254	spin_unlock(&drvdata->spinlock);
1255	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1256}
1257
1258static ssize_t addr_start_store(struct device *dev,
1259				struct device_attribute *attr,
1260				const char *buf, size_t size)
1261{
1262	u8 idx;
1263	unsigned long val;
1264	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1265
1266	if (kstrtoul(buf, 16, &val))
1267		return -EINVAL;
1268
1269	spin_lock(&drvdata->spinlock);
1270	idx = drvdata->addr_idx;
1271	if (!drvdata->nr_addr_cmp) {
1272		spin_unlock(&drvdata->spinlock);
1273		return -EINVAL;
1274	}
1275	if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1276	      drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1277		spin_unlock(&drvdata->spinlock);
1278		return -EPERM;
1279	}
1280
1281	drvdata->addr_val[idx] = (u64)val;
1282	drvdata->addr_type[idx] = ETM_ADDR_TYPE_START;
1283	drvdata->vissctlr |= BIT(idx);
1284	/* SSSTATUS, bit[9] - turn on start/stop logic */
1285	drvdata->vinst_ctrl |= BIT(9);
1286	spin_unlock(&drvdata->spinlock);
1287	return size;
1288}
1289static DEVICE_ATTR_RW(addr_start);
1290
1291static ssize_t addr_stop_show(struct device *dev,
1292			      struct device_attribute *attr,
1293			      char *buf)
1294{
1295	u8 idx;
1296	unsigned long val;
1297	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1298
1299	spin_lock(&drvdata->spinlock);
1300	idx = drvdata->addr_idx;
1301
1302	if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1303	      drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1304		spin_unlock(&drvdata->spinlock);
1305		return -EPERM;
1306	}
1307
1308	val = (unsigned long)drvdata->addr_val[idx];
1309	spin_unlock(&drvdata->spinlock);
1310	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1311}
1312
1313static ssize_t addr_stop_store(struct device *dev,
1314			       struct device_attribute *attr,
1315			       const char *buf, size_t size)
1316{
1317	u8 idx;
1318	unsigned long val;
1319	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1320
1321	if (kstrtoul(buf, 16, &val))
1322		return -EINVAL;
1323
1324	spin_lock(&drvdata->spinlock);
1325	idx = drvdata->addr_idx;
1326	if (!drvdata->nr_addr_cmp) {
1327		spin_unlock(&drvdata->spinlock);
1328		return -EINVAL;
1329	}
1330	if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1331	       drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1332		spin_unlock(&drvdata->spinlock);
1333		return -EPERM;
1334	}
1335
1336	drvdata->addr_val[idx] = (u64)val;
1337	drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP;
1338	drvdata->vissctlr |= BIT(idx + 16);
1339	/* SSSTATUS, bit[9] - turn on start/stop logic */
1340	drvdata->vinst_ctrl |= BIT(9);
1341	spin_unlock(&drvdata->spinlock);
1342	return size;
1343}
1344static DEVICE_ATTR_RW(addr_stop);
1345
1346static ssize_t addr_ctxtype_show(struct device *dev,
1347				 struct device_attribute *attr,
1348				 char *buf)
1349{
1350	ssize_t len;
1351	u8 idx, val;
1352	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1353
1354	spin_lock(&drvdata->spinlock);
1355	idx = drvdata->addr_idx;
1356	/* CONTEXTTYPE, bits[3:2] */
1357	val = BMVAL(drvdata->addr_acc[idx], 2, 3);
1358	len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
1359			(val == ETM_CTX_CTXID ? "ctxid" :
1360			(val == ETM_CTX_VMID ? "vmid" : "all")));
1361	spin_unlock(&drvdata->spinlock);
1362	return len;
1363}
1364
1365static ssize_t addr_ctxtype_store(struct device *dev,
1366				  struct device_attribute *attr,
1367				  const char *buf, size_t size)
1368{
1369	u8 idx;
1370	char str[10] = "";
1371	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1372
1373	if (strlen(buf) >= 10)
1374		return -EINVAL;
1375	if (sscanf(buf, "%s", str) != 1)
1376		return -EINVAL;
1377
1378	spin_lock(&drvdata->spinlock);
1379	idx = drvdata->addr_idx;
1380	if (!strcmp(str, "none"))
1381		/* start by clearing context type bits */
1382		drvdata->addr_acc[idx] &= ~(BIT(2) | BIT(3));
1383	else if (!strcmp(str, "ctxid")) {
1384		/* 0b01 The trace unit performs a Context ID */
1385		if (drvdata->numcidc) {
1386			drvdata->addr_acc[idx] |= BIT(2);
1387			drvdata->addr_acc[idx] &= ~BIT(3);
1388		}
1389	} else if (!strcmp(str, "vmid")) {
1390		/* 0b10 The trace unit performs a VMID */
1391		if (drvdata->numvmidc) {
1392			drvdata->addr_acc[idx] &= ~BIT(2);
1393			drvdata->addr_acc[idx] |= BIT(3);
1394		}
1395	} else if (!strcmp(str, "all")) {
1396		/*
1397		 * 0b11 The trace unit performs a Context ID
1398		 * comparison and a VMID
1399		 */
1400		if (drvdata->numcidc)
1401			drvdata->addr_acc[idx] |= BIT(2);
1402		if (drvdata->numvmidc)
1403			drvdata->addr_acc[idx] |= BIT(3);
1404	}
1405	spin_unlock(&drvdata->spinlock);
1406	return size;
1407}
1408static DEVICE_ATTR_RW(addr_ctxtype);
1409
1410static ssize_t addr_context_show(struct device *dev,
1411				 struct device_attribute *attr,
1412				 char *buf)
1413{
1414	u8 idx;
1415	unsigned long val;
1416	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1417
1418	spin_lock(&drvdata->spinlock);
1419	idx = drvdata->addr_idx;
1420	/* context ID comparator bits[6:4] */
1421	val = BMVAL(drvdata->addr_acc[idx], 4, 6);
1422	spin_unlock(&drvdata->spinlock);
1423	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1424}
1425
1426static ssize_t addr_context_store(struct device *dev,
1427				  struct device_attribute *attr,
1428				  const char *buf, size_t size)
1429{
1430	u8 idx;
1431	unsigned long val;
1432	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1433
1434	if (kstrtoul(buf, 16, &val))
1435		return -EINVAL;
1436	if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
1437		return -EINVAL;
1438	if (val >=  (drvdata->numcidc >= drvdata->numvmidc ?
1439		     drvdata->numcidc : drvdata->numvmidc))
1440		return -EINVAL;
1441
1442	spin_lock(&drvdata->spinlock);
1443	idx = drvdata->addr_idx;
1444	/* clear context ID comparator bits[6:4] */
1445	drvdata->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
1446	drvdata->addr_acc[idx] |= (val << 4);
1447	spin_unlock(&drvdata->spinlock);
1448	return size;
1449}
1450static DEVICE_ATTR_RW(addr_context);
1451
1452static ssize_t seq_idx_show(struct device *dev,
1453			    struct device_attribute *attr,
1454			    char *buf)
1455{
1456	unsigned long val;
1457	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1458
1459	val = drvdata->seq_idx;
1460	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1461}
1462
1463static ssize_t seq_idx_store(struct device *dev,
1464			     struct device_attribute *attr,
1465			     const char *buf, size_t size)
1466{
1467	unsigned long val;
1468	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1469
1470	if (kstrtoul(buf, 16, &val))
1471		return -EINVAL;
1472	if (val >= drvdata->nrseqstate - 1)
1473		return -EINVAL;
1474
1475	/*
1476	 * Use spinlock to ensure index doesn't change while it gets
1477	 * dereferenced multiple times within a spinlock block elsewhere.
1478	 */
1479	spin_lock(&drvdata->spinlock);
1480	drvdata->seq_idx = val;
1481	spin_unlock(&drvdata->spinlock);
1482	return size;
1483}
1484static DEVICE_ATTR_RW(seq_idx);
1485
1486static ssize_t seq_state_show(struct device *dev,
1487			      struct device_attribute *attr,
1488			      char *buf)
1489{
1490	unsigned long val;
1491	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1492
1493	val = drvdata->seq_state;
1494	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1495}
1496
1497static ssize_t seq_state_store(struct device *dev,
1498			       struct device_attribute *attr,
1499			       const char *buf, size_t size)
1500{
1501	unsigned long val;
1502	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1503
1504	if (kstrtoul(buf, 16, &val))
1505		return -EINVAL;
1506	if (val >= drvdata->nrseqstate)
1507		return -EINVAL;
1508
1509	drvdata->seq_state = val;
1510	return size;
1511}
1512static DEVICE_ATTR_RW(seq_state);
1513
1514static ssize_t seq_event_show(struct device *dev,
1515			      struct device_attribute *attr,
1516			      char *buf)
1517{
1518	u8 idx;
1519	unsigned long val;
1520	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1521
1522	spin_lock(&drvdata->spinlock);
1523	idx = drvdata->seq_idx;
1524	val = drvdata->seq_ctrl[idx];
1525	spin_unlock(&drvdata->spinlock);
1526	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1527}
1528
1529static ssize_t seq_event_store(struct device *dev,
1530			       struct device_attribute *attr,
1531			       const char *buf, size_t size)
1532{
1533	u8 idx;
1534	unsigned long val;
1535	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1536
1537	if (kstrtoul(buf, 16, &val))
1538		return -EINVAL;
1539
1540	spin_lock(&drvdata->spinlock);
1541	idx = drvdata->seq_idx;
1542	/* RST, bits[7:0] */
1543	drvdata->seq_ctrl[idx] = val & 0xFF;
1544	spin_unlock(&drvdata->spinlock);
1545	return size;
1546}
1547static DEVICE_ATTR_RW(seq_event);
1548
1549static ssize_t seq_reset_event_show(struct device *dev,
1550				    struct device_attribute *attr,
1551				    char *buf)
1552{
1553	unsigned long val;
1554	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1555
1556	val = drvdata->seq_rst;
1557	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1558}
1559
1560static ssize_t seq_reset_event_store(struct device *dev,
1561				     struct device_attribute *attr,
1562				     const char *buf, size_t size)
1563{
1564	unsigned long val;
1565	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1566
1567	if (kstrtoul(buf, 16, &val))
1568		return -EINVAL;
1569	if (!(drvdata->nrseqstate))
1570		return -EINVAL;
1571
1572	drvdata->seq_rst = val & ETMv4_EVENT_MASK;
1573	return size;
1574}
1575static DEVICE_ATTR_RW(seq_reset_event);
1576
1577static ssize_t cntr_idx_show(struct device *dev,
1578			     struct device_attribute *attr,
1579			     char *buf)
1580{
1581	unsigned long val;
1582	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1583
1584	val = drvdata->cntr_idx;
1585	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1586}
1587
1588static ssize_t cntr_idx_store(struct device *dev,
1589			      struct device_attribute *attr,
1590			      const char *buf, size_t size)
1591{
1592	unsigned long val;
1593	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1594
1595	if (kstrtoul(buf, 16, &val))
1596		return -EINVAL;
1597	if (val >= drvdata->nr_cntr)
1598		return -EINVAL;
1599
1600	/*
1601	 * Use spinlock to ensure index doesn't change while it gets
1602	 * dereferenced multiple times within a spinlock block elsewhere.
1603	 */
1604	spin_lock(&drvdata->spinlock);
1605	drvdata->cntr_idx = val;
1606	spin_unlock(&drvdata->spinlock);
1607	return size;
1608}
1609static DEVICE_ATTR_RW(cntr_idx);
1610
1611static ssize_t cntrldvr_show(struct device *dev,
1612			     struct device_attribute *attr,
1613			     char *buf)
1614{
1615	u8 idx;
1616	unsigned long val;
1617	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1618
1619	spin_lock(&drvdata->spinlock);
1620	idx = drvdata->cntr_idx;
1621	val = drvdata->cntrldvr[idx];
1622	spin_unlock(&drvdata->spinlock);
1623	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1624}
1625
1626static ssize_t cntrldvr_store(struct device *dev,
1627			      struct device_attribute *attr,
1628			      const char *buf, size_t size)
1629{
1630	u8 idx;
1631	unsigned long val;
1632	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1633
1634	if (kstrtoul(buf, 16, &val))
1635		return -EINVAL;
1636	if (val > ETM_CNTR_MAX_VAL)
1637		return -EINVAL;
1638
1639	spin_lock(&drvdata->spinlock);
1640	idx = drvdata->cntr_idx;
1641	drvdata->cntrldvr[idx] = val;
1642	spin_unlock(&drvdata->spinlock);
1643	return size;
1644}
1645static DEVICE_ATTR_RW(cntrldvr);
1646
1647static ssize_t cntr_val_show(struct device *dev,
1648			     struct device_attribute *attr,
1649			     char *buf)
1650{
1651	u8 idx;
1652	unsigned long val;
1653	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1654
1655	spin_lock(&drvdata->spinlock);
1656	idx = drvdata->cntr_idx;
1657	val = drvdata->cntr_val[idx];
1658	spin_unlock(&drvdata->spinlock);
1659	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1660}
1661
1662static ssize_t cntr_val_store(struct device *dev,
1663			      struct device_attribute *attr,
1664			      const char *buf, size_t size)
1665{
1666	u8 idx;
1667	unsigned long val;
1668	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1669
1670	if (kstrtoul(buf, 16, &val))
1671		return -EINVAL;
1672	if (val > ETM_CNTR_MAX_VAL)
1673		return -EINVAL;
1674
1675	spin_lock(&drvdata->spinlock);
1676	idx = drvdata->cntr_idx;
1677	drvdata->cntr_val[idx] = val;
1678	spin_unlock(&drvdata->spinlock);
1679	return size;
1680}
1681static DEVICE_ATTR_RW(cntr_val);
1682
1683static ssize_t cntr_ctrl_show(struct device *dev,
1684			      struct device_attribute *attr,
1685			      char *buf)
1686{
1687	u8 idx;
1688	unsigned long val;
1689	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1690
1691	spin_lock(&drvdata->spinlock);
1692	idx = drvdata->cntr_idx;
1693	val = drvdata->cntr_ctrl[idx];
1694	spin_unlock(&drvdata->spinlock);
1695	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1696}
1697
1698static ssize_t cntr_ctrl_store(struct device *dev,
1699			       struct device_attribute *attr,
1700			       const char *buf, size_t size)
1701{
1702	u8 idx;
1703	unsigned long val;
1704	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1705
1706	if (kstrtoul(buf, 16, &val))
1707		return -EINVAL;
1708
1709	spin_lock(&drvdata->spinlock);
1710	idx = drvdata->cntr_idx;
1711	drvdata->cntr_ctrl[idx] = val;
1712	spin_unlock(&drvdata->spinlock);
1713	return size;
1714}
1715static DEVICE_ATTR_RW(cntr_ctrl);
1716
1717static ssize_t res_idx_show(struct device *dev,
1718			    struct device_attribute *attr,
1719			    char *buf)
1720{
1721	unsigned long val;
1722	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1723
1724	val = drvdata->res_idx;
1725	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1726}
1727
1728static ssize_t res_idx_store(struct device *dev,
1729			     struct device_attribute *attr,
1730			     const char *buf, size_t size)
1731{
1732	unsigned long val;
1733	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1734
1735	if (kstrtoul(buf, 16, &val))
1736		return -EINVAL;
1737	/* Resource selector pair 0 is always implemented and reserved */
1738	if (val < 2 || val >= drvdata->nr_resource * 2)
1739		return -EINVAL;
1740
1741	/*
1742	 * Use spinlock to ensure index doesn't change while it gets
1743	 * dereferenced multiple times within a spinlock block elsewhere.
1744	 */
1745	spin_lock(&drvdata->spinlock);
1746	drvdata->res_idx = val;
1747	spin_unlock(&drvdata->spinlock);
1748	return size;
1749}
1750static DEVICE_ATTR_RW(res_idx);
1751
1752static ssize_t res_ctrl_show(struct device *dev,
1753			     struct device_attribute *attr,
1754			     char *buf)
1755{
1756	u8 idx;
1757	unsigned long val;
1758	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1759
1760	spin_lock(&drvdata->spinlock);
1761	idx = drvdata->res_idx;
1762	val = drvdata->res_ctrl[idx];
1763	spin_unlock(&drvdata->spinlock);
1764	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1765}
1766
1767static ssize_t res_ctrl_store(struct device *dev,
1768			      struct device_attribute *attr,
1769			      const char *buf, size_t size)
1770{
1771	u8 idx;
1772	unsigned long val;
1773	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1774
1775	if (kstrtoul(buf, 16, &val))
1776		return -EINVAL;
1777
1778	spin_lock(&drvdata->spinlock);
1779	idx = drvdata->res_idx;
1780	/* For odd idx pair inversal bit is RES0 */
1781	if (idx % 2 != 0)
1782		/* PAIRINV, bit[21] */
1783		val &= ~BIT(21);
1784	drvdata->res_ctrl[idx] = val;
1785	spin_unlock(&drvdata->spinlock);
1786	return size;
1787}
1788static DEVICE_ATTR_RW(res_ctrl);
1789
1790static ssize_t ctxid_idx_show(struct device *dev,
1791			      struct device_attribute *attr,
1792			      char *buf)
1793{
1794	unsigned long val;
1795	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1796
1797	val = drvdata->ctxid_idx;
1798	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1799}
1800
1801static ssize_t ctxid_idx_store(struct device *dev,
1802			       struct device_attribute *attr,
1803			       const char *buf, size_t size)
1804{
1805	unsigned long val;
1806	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1807
1808	if (kstrtoul(buf, 16, &val))
1809		return -EINVAL;
1810	if (val >= drvdata->numcidc)
1811		return -EINVAL;
1812
1813	/*
1814	 * Use spinlock to ensure index doesn't change while it gets
1815	 * dereferenced multiple times within a spinlock block elsewhere.
1816	 */
1817	spin_lock(&drvdata->spinlock);
1818	drvdata->ctxid_idx = val;
1819	spin_unlock(&drvdata->spinlock);
1820	return size;
1821}
1822static DEVICE_ATTR_RW(ctxid_idx);
1823
1824static ssize_t ctxid_pid_show(struct device *dev,
1825			      struct device_attribute *attr,
1826			      char *buf)
1827{
1828	u8 idx;
1829	unsigned long val;
1830	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1831
1832	spin_lock(&drvdata->spinlock);
1833	idx = drvdata->ctxid_idx;
1834	val = (unsigned long)drvdata->ctxid_vpid[idx];
1835	spin_unlock(&drvdata->spinlock);
1836	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1837}
1838
1839static ssize_t ctxid_pid_store(struct device *dev,
1840			       struct device_attribute *attr,
1841			       const char *buf, size_t size)
1842{
1843	u8 idx;
1844	unsigned long vpid, pid;
1845	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1846
1847	/*
1848	 * only implemented when ctxid tracing is enabled, i.e. at least one
1849	 * ctxid comparator is implemented and ctxid is greater than 0 bits
1850	 * in length
1851	 */
1852	if (!drvdata->ctxid_size || !drvdata->numcidc)
1853		return -EINVAL;
1854	if (kstrtoul(buf, 16, &vpid))
1855		return -EINVAL;
1856
1857	pid = coresight_vpid_to_pid(vpid);
1858
1859	spin_lock(&drvdata->spinlock);
1860	idx = drvdata->ctxid_idx;
1861	drvdata->ctxid_pid[idx] = (u64)pid;
1862	drvdata->ctxid_vpid[idx] = (u64)vpid;
1863	spin_unlock(&drvdata->spinlock);
1864	return size;
1865}
1866static DEVICE_ATTR_RW(ctxid_pid);
1867
1868static ssize_t ctxid_masks_show(struct device *dev,
1869				struct device_attribute *attr,
1870				char *buf)
1871{
1872	unsigned long val1, val2;
1873	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1874
1875	spin_lock(&drvdata->spinlock);
1876	val1 = drvdata->ctxid_mask0;
1877	val2 = drvdata->ctxid_mask1;
1878	spin_unlock(&drvdata->spinlock);
1879	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1880}
1881
1882static ssize_t ctxid_masks_store(struct device *dev,
1883				struct device_attribute *attr,
1884				const char *buf, size_t size)
1885{
1886	u8 i, j, maskbyte;
1887	unsigned long val1, val2, mask;
1888	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1889
1890	/*
1891	 * only implemented when ctxid tracing is enabled, i.e. at least one
1892	 * ctxid comparator is implemented and ctxid is greater than 0 bits
1893	 * in length
1894	 */
1895	if (!drvdata->ctxid_size || !drvdata->numcidc)
1896		return -EINVAL;
1897	if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
1898		return -EINVAL;
1899
1900	spin_lock(&drvdata->spinlock);
1901	/*
1902	 * each byte[0..3] controls mask value applied to ctxid
1903	 * comparator[0..3]
1904	 */
1905	switch (drvdata->numcidc) {
1906	case 0x1:
1907		/* COMP0, bits[7:0] */
1908		drvdata->ctxid_mask0 = val1 & 0xFF;
1909		break;
1910	case 0x2:
1911		/* COMP1, bits[15:8] */
1912		drvdata->ctxid_mask0 = val1 & 0xFFFF;
1913		break;
1914	case 0x3:
1915		/* COMP2, bits[23:16] */
1916		drvdata->ctxid_mask0 = val1 & 0xFFFFFF;
1917		break;
1918	case 0x4:
1919		 /* COMP3, bits[31:24] */
1920		drvdata->ctxid_mask0 = val1;
1921		break;
1922	case 0x5:
1923		/* COMP4, bits[7:0] */
1924		drvdata->ctxid_mask0 = val1;
1925		drvdata->ctxid_mask1 = val2 & 0xFF;
1926		break;
1927	case 0x6:
1928		/* COMP5, bits[15:8] */
1929		drvdata->ctxid_mask0 = val1;
1930		drvdata->ctxid_mask1 = val2 & 0xFFFF;
1931		break;
1932	case 0x7:
1933		/* COMP6, bits[23:16] */
1934		drvdata->ctxid_mask0 = val1;
1935		drvdata->ctxid_mask1 = val2 & 0xFFFFFF;
1936		break;
1937	case 0x8:
1938		/* COMP7, bits[31:24] */
1939		drvdata->ctxid_mask0 = val1;
1940		drvdata->ctxid_mask1 = val2;
1941		break;
1942	default:
1943		break;
1944	}
1945	/*
1946	 * If software sets a mask bit to 1, it must program relevant byte
1947	 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
1948	 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
1949	 * of ctxid comparator0 value (corresponding to byte 0) register.
1950	 */
1951	mask = drvdata->ctxid_mask0;
1952	for (i = 0; i < drvdata->numcidc; i++) {
1953		/* mask value of corresponding ctxid comparator */
1954		maskbyte = mask & ETMv4_EVENT_MASK;
1955		/*
1956		 * each bit corresponds to a byte of respective ctxid comparator
1957		 * value register
1958		 */
1959		for (j = 0; j < 8; j++) {
1960			if (maskbyte & 1)
1961				drvdata->ctxid_pid[i] &= ~(0xFF << (j * 8));
1962			maskbyte >>= 1;
1963		}
1964		/* Select the next ctxid comparator mask value */
1965		if (i == 3)
1966			/* ctxid comparators[4-7] */
1967			mask = drvdata->ctxid_mask1;
1968		else
1969			mask >>= 0x8;
1970	}
1971
1972	spin_unlock(&drvdata->spinlock);
1973	return size;
1974}
1975static DEVICE_ATTR_RW(ctxid_masks);
1976
1977static ssize_t vmid_idx_show(struct device *dev,
1978			     struct device_attribute *attr,
1979			     char *buf)
1980{
1981	unsigned long val;
1982	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1983
1984	val = drvdata->vmid_idx;
1985	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1986}
1987
1988static ssize_t vmid_idx_store(struct device *dev,
1989			      struct device_attribute *attr,
1990			      const char *buf, size_t size)
1991{
1992	unsigned long val;
1993	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1994
1995	if (kstrtoul(buf, 16, &val))
1996		return -EINVAL;
1997	if (val >= drvdata->numvmidc)
1998		return -EINVAL;
1999
2000	/*
2001	 * Use spinlock to ensure index doesn't change while it gets
2002	 * dereferenced multiple times within a spinlock block elsewhere.
2003	 */
2004	spin_lock(&drvdata->spinlock);
2005	drvdata->vmid_idx = val;
2006	spin_unlock(&drvdata->spinlock);
2007	return size;
2008}
2009static DEVICE_ATTR_RW(vmid_idx);
2010
2011static ssize_t vmid_val_show(struct device *dev,
2012			     struct device_attribute *attr,
2013			     char *buf)
2014{
2015	unsigned long val;
2016	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2017
2018	val = (unsigned long)drvdata->vmid_val[drvdata->vmid_idx];
2019	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
2020}
2021
2022static ssize_t vmid_val_store(struct device *dev,
2023			      struct device_attribute *attr,
2024			      const char *buf, size_t size)
2025{
2026	unsigned long val;
2027	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2028
2029	/*
2030	 * only implemented when vmid tracing is enabled, i.e. at least one
2031	 * vmid comparator is implemented and at least 8 bit vmid size
2032	 */
2033	if (!drvdata->vmid_size || !drvdata->numvmidc)
2034		return -EINVAL;
2035	if (kstrtoul(buf, 16, &val))
2036		return -EINVAL;
2037
2038	spin_lock(&drvdata->spinlock);
2039	drvdata->vmid_val[drvdata->vmid_idx] = (u64)val;
2040	spin_unlock(&drvdata->spinlock);
2041	return size;
2042}
2043static DEVICE_ATTR_RW(vmid_val);
2044
2045static ssize_t vmid_masks_show(struct device *dev,
2046			       struct device_attribute *attr, char *buf)
2047{
2048	unsigned long val1, val2;
2049	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2050
2051	spin_lock(&drvdata->spinlock);
2052	val1 = drvdata->vmid_mask0;
2053	val2 = drvdata->vmid_mask1;
2054	spin_unlock(&drvdata->spinlock);
2055	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
2056}
2057
2058static ssize_t vmid_masks_store(struct device *dev,
2059				struct device_attribute *attr,
2060				const char *buf, size_t size)
2061{
2062	u8 i, j, maskbyte;
2063	unsigned long val1, val2, mask;
2064	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2065	/*
2066	 * only implemented when vmid tracing is enabled, i.e. at least one
2067	 * vmid comparator is implemented and at least 8 bit vmid size
2068	 */
2069	if (!drvdata->vmid_size || !drvdata->numvmidc)
2070		return -EINVAL;
2071	if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
2072		return -EINVAL;
2073
2074	spin_lock(&drvdata->spinlock);
2075
2076	/*
2077	 * each byte[0..3] controls mask value applied to vmid
2078	 * comparator[0..3]
2079	 */
2080	switch (drvdata->numvmidc) {
2081	case 0x1:
2082		/* COMP0, bits[7:0] */
2083		drvdata->vmid_mask0 = val1 & 0xFF;
2084		break;
2085	case 0x2:
2086		/* COMP1, bits[15:8] */
2087		drvdata->vmid_mask0 = val1 & 0xFFFF;
2088		break;
2089	case 0x3:
2090		/* COMP2, bits[23:16] */
2091		drvdata->vmid_mask0 = val1 & 0xFFFFFF;
2092		break;
2093	case 0x4:
2094		/* COMP3, bits[31:24] */
2095		drvdata->vmid_mask0 = val1;
2096		break;
2097	case 0x5:
2098		/* COMP4, bits[7:0] */
2099		drvdata->vmid_mask0 = val1;
2100		drvdata->vmid_mask1 = val2 & 0xFF;
2101		break;
2102	case 0x6:
2103		/* COMP5, bits[15:8] */
2104		drvdata->vmid_mask0 = val1;
2105		drvdata->vmid_mask1 = val2 & 0xFFFF;
2106		break;
2107	case 0x7:
2108		/* COMP6, bits[23:16] */
2109		drvdata->vmid_mask0 = val1;
2110		drvdata->vmid_mask1 = val2 & 0xFFFFFF;
2111		break;
2112	case 0x8:
2113		/* COMP7, bits[31:24] */
2114		drvdata->vmid_mask0 = val1;
2115		drvdata->vmid_mask1 = val2;
2116		break;
2117	default:
2118		break;
2119	}
2120
2121	/*
2122	 * If software sets a mask bit to 1, it must program relevant byte
2123	 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
2124	 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
2125	 * of vmid comparator0 value (corresponding to byte 0) register.
2126	 */
2127	mask = drvdata->vmid_mask0;
2128	for (i = 0; i < drvdata->numvmidc; i++) {
2129		/* mask value of corresponding vmid comparator */
2130		maskbyte = mask & ETMv4_EVENT_MASK;
2131		/*
2132		 * each bit corresponds to a byte of respective vmid comparator
2133		 * value register
2134		 */
2135		for (j = 0; j < 8; j++) {
2136			if (maskbyte & 1)
2137				drvdata->vmid_val[i] &= ~(0xFF << (j * 8));
2138			maskbyte >>= 1;
2139		}
2140		/* Select the next vmid comparator mask value */
2141		if (i == 3)
2142			/* vmid comparators[4-7] */
2143			mask = drvdata->vmid_mask1;
2144		else
2145			mask >>= 0x8;
2146	}
2147	spin_unlock(&drvdata->spinlock);
2148	return size;
2149}
2150static DEVICE_ATTR_RW(vmid_masks);
2151
2152static ssize_t cpu_show(struct device *dev,
2153			struct device_attribute *attr, char *buf)
2154{
2155	int val;
2156	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2157
2158	val = drvdata->cpu;
2159	return scnprintf(buf, PAGE_SIZE, "%d\n", val);
2160
2161}
2162static DEVICE_ATTR_RO(cpu);
2163
2164static struct attribute *coresight_etmv4_attrs[] = {
2165	&dev_attr_nr_pe_cmp.attr,
2166	&dev_attr_nr_addr_cmp.attr,
2167	&dev_attr_nr_cntr.attr,
2168	&dev_attr_nr_ext_inp.attr,
2169	&dev_attr_numcidc.attr,
2170	&dev_attr_numvmidc.attr,
2171	&dev_attr_nrseqstate.attr,
2172	&dev_attr_nr_resource.attr,
2173	&dev_attr_nr_ss_cmp.attr,
2174	&dev_attr_reset.attr,
2175	&dev_attr_mode.attr,
2176	&dev_attr_pe.attr,
2177	&dev_attr_event.attr,
2178	&dev_attr_event_instren.attr,
2179	&dev_attr_event_ts.attr,
2180	&dev_attr_syncfreq.attr,
2181	&dev_attr_cyc_threshold.attr,
2182	&dev_attr_bb_ctrl.attr,
2183	&dev_attr_event_vinst.attr,
2184	&dev_attr_s_exlevel_vinst.attr,
2185	&dev_attr_ns_exlevel_vinst.attr,
2186	&dev_attr_addr_idx.attr,
2187	&dev_attr_addr_instdatatype.attr,
2188	&dev_attr_addr_single.attr,
2189	&dev_attr_addr_range.attr,
2190	&dev_attr_addr_start.attr,
2191	&dev_attr_addr_stop.attr,
2192	&dev_attr_addr_ctxtype.attr,
2193	&dev_attr_addr_context.attr,
2194	&dev_attr_seq_idx.attr,
2195	&dev_attr_seq_state.attr,
2196	&dev_attr_seq_event.attr,
2197	&dev_attr_seq_reset_event.attr,
2198	&dev_attr_cntr_idx.attr,
2199	&dev_attr_cntrldvr.attr,
2200	&dev_attr_cntr_val.attr,
2201	&dev_attr_cntr_ctrl.attr,
2202	&dev_attr_res_idx.attr,
2203	&dev_attr_res_ctrl.attr,
2204	&dev_attr_ctxid_idx.attr,
2205	&dev_attr_ctxid_pid.attr,
2206	&dev_attr_ctxid_masks.attr,
2207	&dev_attr_vmid_idx.attr,
2208	&dev_attr_vmid_val.attr,
2209	&dev_attr_vmid_masks.attr,
2210	&dev_attr_cpu.attr,
2211	NULL,
2212};
2213
2214#define coresight_simple_func(name, offset)				\
2215static ssize_t name##_show(struct device *_dev,				\
2216			   struct device_attribute *attr, char *buf)	\
2217{									\
2218	struct etmv4_drvdata *drvdata = dev_get_drvdata(_dev->parent);	\
2219	return scnprintf(buf, PAGE_SIZE, "0x%x\n",			\
2220			 readl_relaxed(drvdata->base + offset));	\
2221}									\
2222DEVICE_ATTR_RO(name)
2223
2224coresight_simple_func(trcoslsr, TRCOSLSR);
2225coresight_simple_func(trcpdcr, TRCPDCR);
2226coresight_simple_func(trcpdsr, TRCPDSR);
2227coresight_simple_func(trclsr, TRCLSR);
2228coresight_simple_func(trcauthstatus, TRCAUTHSTATUS);
2229coresight_simple_func(trcdevid, TRCDEVID);
2230coresight_simple_func(trcdevtype, TRCDEVTYPE);
2231coresight_simple_func(trcpidr0, TRCPIDR0);
2232coresight_simple_func(trcpidr1, TRCPIDR1);
2233coresight_simple_func(trcpidr2, TRCPIDR2);
2234coresight_simple_func(trcpidr3, TRCPIDR3);
2235
2236static struct attribute *coresight_etmv4_mgmt_attrs[] = {
2237	&dev_attr_trcoslsr.attr,
2238	&dev_attr_trcpdcr.attr,
2239	&dev_attr_trcpdsr.attr,
2240	&dev_attr_trclsr.attr,
2241	&dev_attr_trcauthstatus.attr,
2242	&dev_attr_trcdevid.attr,
2243	&dev_attr_trcdevtype.attr,
2244	&dev_attr_trcpidr0.attr,
2245	&dev_attr_trcpidr1.attr,
2246	&dev_attr_trcpidr2.attr,
2247	&dev_attr_trcpidr3.attr,
2248	NULL,
2249};
2250
2251coresight_simple_func(trcidr0, TRCIDR0);
2252coresight_simple_func(trcidr1, TRCIDR1);
2253coresight_simple_func(trcidr2, TRCIDR2);
2254coresight_simple_func(trcidr3, TRCIDR3);
2255coresight_simple_func(trcidr4, TRCIDR4);
2256coresight_simple_func(trcidr5, TRCIDR5);
2257/* trcidr[6,7] are reserved */
2258coresight_simple_func(trcidr8, TRCIDR8);
2259coresight_simple_func(trcidr9, TRCIDR9);
2260coresight_simple_func(trcidr10, TRCIDR10);
2261coresight_simple_func(trcidr11, TRCIDR11);
2262coresight_simple_func(trcidr12, TRCIDR12);
2263coresight_simple_func(trcidr13, TRCIDR13);
2264
2265static struct attribute *coresight_etmv4_trcidr_attrs[] = {
2266	&dev_attr_trcidr0.attr,
2267	&dev_attr_trcidr1.attr,
2268	&dev_attr_trcidr2.attr,
2269	&dev_attr_trcidr3.attr,
2270	&dev_attr_trcidr4.attr,
2271	&dev_attr_trcidr5.attr,
2272	/* trcidr[6,7] are reserved */
2273	&dev_attr_trcidr8.attr,
2274	&dev_attr_trcidr9.attr,
2275	&dev_attr_trcidr10.attr,
2276	&dev_attr_trcidr11.attr,
2277	&dev_attr_trcidr12.attr,
2278	&dev_attr_trcidr13.attr,
2279	NULL,
2280};
2281
2282static const struct attribute_group coresight_etmv4_group = {
2283	.attrs = coresight_etmv4_attrs,
2284};
2285
2286static const struct attribute_group coresight_etmv4_mgmt_group = {
2287	.attrs = coresight_etmv4_mgmt_attrs,
2288	.name = "mgmt",
2289};
2290
2291static const struct attribute_group coresight_etmv4_trcidr_group = {
2292	.attrs = coresight_etmv4_trcidr_attrs,
2293	.name = "trcidr",
2294};
2295
2296static const struct attribute_group *coresight_etmv4_groups[] = {
2297	&coresight_etmv4_group,
2298	&coresight_etmv4_mgmt_group,
2299	&coresight_etmv4_trcidr_group,
2300	NULL,
2301};
2302
2303static void etm4_init_arch_data(void *info)
2304{
2305	u32 etmidr0;
2306	u32 etmidr1;
2307	u32 etmidr2;
2308	u32 etmidr3;
2309	u32 etmidr4;
2310	u32 etmidr5;
2311	struct etmv4_drvdata *drvdata = info;
2312
2313	CS_UNLOCK(drvdata->base);
2314
2315	/* find all capabilities of the tracing unit */
2316	etmidr0 = readl_relaxed(drvdata->base + TRCIDR0);
2317
2318	/* INSTP0, bits[2:1] P0 tracing support field */
2319	if (BMVAL(etmidr0, 1, 1) && BMVAL(etmidr0, 2, 2))
2320		drvdata->instrp0 = true;
2321	else
2322		drvdata->instrp0 = false;
2323
2324	/* TRCBB, bit[5] Branch broadcast tracing support bit */
2325	if (BMVAL(etmidr0, 5, 5))
2326		drvdata->trcbb = true;
2327	else
2328		drvdata->trcbb = false;
2329
2330	/* TRCCOND, bit[6] Conditional instruction tracing support bit */
2331	if (BMVAL(etmidr0, 6, 6))
2332		drvdata->trccond = true;
2333	else
2334		drvdata->trccond = false;
2335
2336	/* TRCCCI, bit[7] Cycle counting instruction bit */
2337	if (BMVAL(etmidr0, 7, 7))
2338		drvdata->trccci = true;
2339	else
2340		drvdata->trccci = false;
2341
2342	/* RETSTACK, bit[9] Return stack bit */
2343	if (BMVAL(etmidr0, 9, 9))
2344		drvdata->retstack = true;
2345	else
2346		drvdata->retstack = false;
2347
2348	/* NUMEVENT, bits[11:10] Number of events field */
2349	drvdata->nr_event = BMVAL(etmidr0, 10, 11);
2350	/* QSUPP, bits[16:15] Q element support field */
2351	drvdata->q_support = BMVAL(etmidr0, 15, 16);
2352	/* TSSIZE, bits[28:24] Global timestamp size field */
2353	drvdata->ts_size = BMVAL(etmidr0, 24, 28);
2354
2355	/* base architecture of trace unit */
2356	etmidr1 = readl_relaxed(drvdata->base + TRCIDR1);
2357	/*
2358	 * TRCARCHMIN, bits[7:4] architecture the minor version number
2359	 * TRCARCHMAJ, bits[11:8] architecture major versin number
2360	 */
2361	drvdata->arch = BMVAL(etmidr1, 4, 11);
2362
2363	/* maximum size of resources */
2364	etmidr2 = readl_relaxed(drvdata->base + TRCIDR2);
2365	/* CIDSIZE, bits[9:5] Indicates the Context ID size */
2366	drvdata->ctxid_size = BMVAL(etmidr2, 5, 9);
2367	/* VMIDSIZE, bits[14:10] Indicates the VMID size */
2368	drvdata->vmid_size = BMVAL(etmidr2, 10, 14);
2369	/* CCSIZE, bits[28:25] size of the cycle counter in bits minus 12 */
2370	drvdata->ccsize = BMVAL(etmidr2, 25, 28);
2371
2372	etmidr3 = readl_relaxed(drvdata->base + TRCIDR3);
2373	/* CCITMIN, bits[11:0] minimum threshold value that can be programmed */
2374	drvdata->ccitmin = BMVAL(etmidr3, 0, 11);
2375	/* EXLEVEL_S, bits[19:16] Secure state instruction tracing */
2376	drvdata->s_ex_level = BMVAL(etmidr3, 16, 19);
2377	/* EXLEVEL_NS, bits[23:20] Non-secure state instruction tracing */
2378	drvdata->ns_ex_level = BMVAL(etmidr3, 20, 23);
2379
2380	/*
2381	 * TRCERR, bit[24] whether a trace unit can trace a
2382	 * system error exception.
2383	 */
2384	if (BMVAL(etmidr3, 24, 24))
2385		drvdata->trc_error = true;
2386	else
2387		drvdata->trc_error = false;
2388
2389	/* SYNCPR, bit[25] implementation has a fixed synchronization period? */
2390	if (BMVAL(etmidr3, 25, 25))
2391		drvdata->syncpr = true;
2392	else
2393		drvdata->syncpr = false;
2394
2395	/* STALLCTL, bit[26] is stall control implemented? */
2396	if (BMVAL(etmidr3, 26, 26))
2397		drvdata->stallctl = true;
2398	else
2399		drvdata->stallctl = false;
2400
2401	/* SYSSTALL, bit[27] implementation can support stall control? */
2402	if (BMVAL(etmidr3, 27, 27))
2403		drvdata->sysstall = true;
2404	else
2405		drvdata->sysstall = false;
2406
2407	/* NUMPROC, bits[30:28] the number of PEs available for tracing */
2408	drvdata->nr_pe = BMVAL(etmidr3, 28, 30);
2409
2410	/* NOOVERFLOW, bit[31] is trace overflow prevention supported */
2411	if (BMVAL(etmidr3, 31, 31))
2412		drvdata->nooverflow = true;
2413	else
2414		drvdata->nooverflow = false;
2415
2416	/* number of resources trace unit supports */
2417	etmidr4 = readl_relaxed(drvdata->base + TRCIDR4);
2418	/* NUMACPAIRS, bits[0:3] number of addr comparator pairs for tracing */
2419	drvdata->nr_addr_cmp = BMVAL(etmidr4, 0, 3);
2420	/* NUMPC, bits[15:12] number of PE comparator inputs for tracing */
2421	drvdata->nr_pe_cmp = BMVAL(etmidr4, 12, 15);
2422	/*
2423	 * NUMRSPAIR, bits[19:16]
2424	 * The number of resource pairs conveyed by the HW starts at 0, i.e a
2425	 * value of 0x0 indicate 1 resource pair, 0x1 indicate two and so on.
2426	 * As such add 1 to the value of NUMRSPAIR for a better representation.
2427	 */
2428	drvdata->nr_resource = BMVAL(etmidr4, 16, 19) + 1;
2429	/*
2430	 * NUMSSCC, bits[23:20] the number of single-shot
2431	 * comparator control for tracing
2432	 */
2433	drvdata->nr_ss_cmp = BMVAL(etmidr4, 20, 23);
2434	/* NUMCIDC, bits[27:24] number of Context ID comparators for tracing */
2435	drvdata->numcidc = BMVAL(etmidr4, 24, 27);
2436	/* NUMVMIDC, bits[31:28] number of VMID comparators for tracing */
2437	drvdata->numvmidc = BMVAL(etmidr4, 28, 31);
2438
2439	etmidr5 = readl_relaxed(drvdata->base + TRCIDR5);
2440	/* NUMEXTIN, bits[8:0] number of external inputs implemented */
2441	drvdata->nr_ext_inp = BMVAL(etmidr5, 0, 8);
2442	/* TRACEIDSIZE, bits[21:16] indicates the trace ID width */
2443	drvdata->trcid_size = BMVAL(etmidr5, 16, 21);
2444	/* ATBTRIG, bit[22] implementation can support ATB triggers? */
2445	if (BMVAL(etmidr5, 22, 22))
2446		drvdata->atbtrig = true;
2447	else
2448		drvdata->atbtrig = false;
2449	/*
2450	 * LPOVERRIDE, bit[23] implementation supports
2451	 * low-power state override
2452	 */
2453	if (BMVAL(etmidr5, 23, 23))
2454		drvdata->lpoverride = true;
2455	else
2456		drvdata->lpoverride = false;
2457	/* NUMSEQSTATE, bits[27:25] number of sequencer states implemented */
2458	drvdata->nrseqstate = BMVAL(etmidr5, 25, 27);
2459	/* NUMCNTR, bits[30:28] number of counters available for tracing */
2460	drvdata->nr_cntr = BMVAL(etmidr5, 28, 30);
2461	CS_LOCK(drvdata->base);
2462}
2463
2464static void etm4_init_default_data(struct etmv4_drvdata *drvdata)
2465{
2466	int i;
2467
2468	drvdata->pe_sel = 0x0;
2469	drvdata->cfg = (ETMv4_MODE_CTXID | ETM_MODE_VMID |
2470			ETMv4_MODE_TIMESTAMP | ETM_MODE_RETURNSTACK);
2471
2472	/* disable all events tracing */
2473	drvdata->eventctrl0 = 0x0;
2474	drvdata->eventctrl1 = 0x0;
2475
2476	/* disable stalling */
2477	drvdata->stall_ctrl = 0x0;
2478
2479	/* disable timestamp event */
2480	drvdata->ts_ctrl = 0x0;
2481
2482	/* enable trace synchronization every 4096 bytes for trace */
2483	if (drvdata->syncpr == false)
2484		drvdata->syncfreq = 0xC;
2485
2486	/*
2487	 *  enable viewInst to trace everything with start-stop logic in
2488	 *  started state
2489	 */
2490	drvdata->vinst_ctrl |= BIT(0);
2491	/* set initial state of start-stop logic */
2492	if (drvdata->nr_addr_cmp)
2493		drvdata->vinst_ctrl |= BIT(9);
2494
2495	/* no address range filtering for ViewInst */
2496	drvdata->viiectlr = 0x0;
2497	/* no start-stop filtering for ViewInst */
2498	drvdata->vissctlr = 0x0;
2499
2500	/* disable seq events */
2501	for (i = 0; i < drvdata->nrseqstate-1; i++)
2502		drvdata->seq_ctrl[i] = 0x0;
2503	drvdata->seq_rst = 0x0;
2504	drvdata->seq_state = 0x0;
2505
2506	/* disable external input events */
2507	drvdata->ext_inp = 0x0;
2508
2509	for (i = 0; i < drvdata->nr_cntr; i++) {
2510		drvdata->cntrldvr[i] = 0x0;
2511		drvdata->cntr_ctrl[i] = 0x0;
2512		drvdata->cntr_val[i] = 0x0;
2513	}
2514
2515	/* Resource selector pair 0 is always implemented and reserved */
2516	drvdata->res_idx = 0x2;
2517	for (i = 2; i < drvdata->nr_resource * 2; i++)
2518		drvdata->res_ctrl[i] = 0x0;
2519
2520	for (i = 0; i < drvdata->nr_ss_cmp; i++) {
2521		drvdata->ss_ctrl[i] = 0x0;
2522		drvdata->ss_pe_cmp[i] = 0x0;
2523	}
2524
2525	if (drvdata->nr_addr_cmp >= 1) {
2526		drvdata->addr_val[0] = (unsigned long)_stext;
2527		drvdata->addr_val[1] = (unsigned long)_etext;
2528		drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE;
2529		drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
2530	}
2531
2532	for (i = 0; i < drvdata->numcidc; i++) {
2533		drvdata->ctxid_pid[i] = 0x0;
2534		drvdata->ctxid_vpid[i] = 0x0;
2535	}
2536
2537	drvdata->ctxid_mask0 = 0x0;
2538	drvdata->ctxid_mask1 = 0x0;
2539
2540	for (i = 0; i < drvdata->numvmidc; i++)
2541		drvdata->vmid_val[i] = 0x0;
2542	drvdata->vmid_mask0 = 0x0;
2543	drvdata->vmid_mask1 = 0x0;
2544
2545	/*
2546	 * A trace ID value of 0 is invalid, so let's start at some
2547	 * random value that fits in 7 bits.  ETMv3.x has 0x10 so let's
2548	 * start at 0x20.
2549	 */
2550	drvdata->trcid = 0x20 + drvdata->cpu;
2551}
2552
2553static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action,
2554			    void *hcpu)
2555{
2556	unsigned int cpu = (unsigned long)hcpu;
2557
2558	if (!etmdrvdata[cpu])
2559		goto out;
2560
2561	switch (action & (~CPU_TASKS_FROZEN)) {
2562	case CPU_STARTING:
2563		spin_lock(&etmdrvdata[cpu]->spinlock);
2564		if (!etmdrvdata[cpu]->os_unlock) {
2565			etm4_os_unlock(etmdrvdata[cpu]);
2566			etmdrvdata[cpu]->os_unlock = true;
2567		}
2568
2569		if (etmdrvdata[cpu]->enable)
2570			etm4_enable_hw(etmdrvdata[cpu]);
2571		spin_unlock(&etmdrvdata[cpu]->spinlock);
2572		break;
2573
2574	case CPU_ONLINE:
2575		if (etmdrvdata[cpu]->boot_enable &&
2576			!etmdrvdata[cpu]->sticky_enable)
2577			coresight_enable(etmdrvdata[cpu]->csdev);
2578		break;
2579
2580	case CPU_DYING:
2581		spin_lock(&etmdrvdata[cpu]->spinlock);
2582		if (etmdrvdata[cpu]->enable)
2583			etm4_disable_hw(etmdrvdata[cpu]);
2584		spin_unlock(&etmdrvdata[cpu]->spinlock);
2585		break;
2586	}
2587out:
2588	return NOTIFY_OK;
2589}
2590
2591static struct notifier_block etm4_cpu_notifier = {
2592	.notifier_call = etm4_cpu_callback,
2593};
2594
2595static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
2596{
2597	int ret;
2598	void __iomem *base;
2599	struct device *dev = &adev->dev;
2600	struct coresight_platform_data *pdata = NULL;
2601	struct etmv4_drvdata *drvdata;
2602	struct resource *res = &adev->res;
2603	struct coresight_desc *desc;
2604	struct device_node *np = adev->dev.of_node;
2605
2606	desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
2607	if (!desc)
2608		return -ENOMEM;
2609
2610	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
2611	if (!drvdata)
2612		return -ENOMEM;
2613
2614	if (np) {
2615		pdata = of_get_coresight_platform_data(dev, np);
2616		if (IS_ERR(pdata))
2617			return PTR_ERR(pdata);
2618		adev->dev.platform_data = pdata;
2619	}
2620
2621	drvdata->dev = &adev->dev;
2622	dev_set_drvdata(dev, drvdata);
2623
2624	/* Validity for the resource is already checked by the AMBA core */
2625	base = devm_ioremap_resource(dev, res);
2626	if (IS_ERR(base))
2627		return PTR_ERR(base);
2628
2629	drvdata->base = base;
2630
2631	spin_lock_init(&drvdata->spinlock);
2632
2633	drvdata->cpu = pdata ? pdata->cpu : 0;
2634
2635	get_online_cpus();
2636	etmdrvdata[drvdata->cpu] = drvdata;
2637
2638	if (!smp_call_function_single(drvdata->cpu, etm4_os_unlock, drvdata, 1))
2639		drvdata->os_unlock = true;
2640
2641	if (smp_call_function_single(drvdata->cpu,
2642				etm4_init_arch_data,  drvdata, 1))
2643		dev_err(dev, "ETM arch init failed\n");
2644
2645	if (!etm4_count++)
2646		register_hotcpu_notifier(&etm4_cpu_notifier);
2647
2648	put_online_cpus();
2649
2650	if (etm4_arch_supported(drvdata->arch) == false) {
2651		ret = -EINVAL;
2652		goto err_arch_supported;
2653	}
2654	etm4_init_default_data(drvdata);
2655
2656	pm_runtime_put(&adev->dev);
2657
2658	desc->type = CORESIGHT_DEV_TYPE_SOURCE;
2659	desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
2660	desc->ops = &etm4_cs_ops;
2661	desc->pdata = pdata;
2662	desc->dev = dev;
2663	desc->groups = coresight_etmv4_groups;
2664	drvdata->csdev = coresight_register(desc);
2665	if (IS_ERR(drvdata->csdev)) {
2666		ret = PTR_ERR(drvdata->csdev);
2667		goto err_coresight_register;
2668	}
2669
2670	dev_info(dev, "%s initialized\n", (char *)id->data);
2671
2672	if (boot_enable) {
2673		coresight_enable(drvdata->csdev);
2674		drvdata->boot_enable = true;
2675	}
2676
2677	return 0;
2678
2679err_arch_supported:
2680	pm_runtime_put(&adev->dev);
2681err_coresight_register:
2682	if (--etm4_count == 0)
2683		unregister_hotcpu_notifier(&etm4_cpu_notifier);
2684	return ret;
2685}
2686
2687static int etm4_remove(struct amba_device *adev)
2688{
2689	struct etmv4_drvdata *drvdata = amba_get_drvdata(adev);
2690
2691	coresight_unregister(drvdata->csdev);
2692	if (--etm4_count == 0)
2693		unregister_hotcpu_notifier(&etm4_cpu_notifier);
2694
2695	return 0;
2696}
2697
2698static struct amba_id etm4_ids[] = {
2699	{       /* ETM 4.0 - Qualcomm */
2700		.id	= 0x0003b95d,
2701		.mask	= 0x0003ffff,
2702		.data	= "ETM 4.0",
2703	},
2704	{       /* ETM 4.0 - Juno board */
2705		.id	= 0x000bb95e,
2706		.mask	= 0x000fffff,
2707		.data	= "ETM 4.0",
2708	},
2709	{ 0, 0},
2710};
2711
2712static struct amba_driver etm4x_driver = {
2713	.drv = {
2714		.name   = "coresight-etm4x",
2715	},
2716	.probe		= etm4_probe,
2717	.remove		= etm4_remove,
2718	.id_table	= etm4_ids,
2719};
2720
2721module_amba_driver(etm4x_driver);
2722