Lines Matching refs:drvdata

45 static inline void etm_writel(struct etm_drvdata *drvdata,  in etm_writel()  argument
48 if (drvdata->use_cp14) { in etm_writel()
50 dev_err(drvdata->dev, in etm_writel()
54 writel_relaxed(val, drvdata->base + off); in etm_writel()
58 static inline unsigned int etm_readl(struct etm_drvdata *drvdata, u32 off) in etm_readl() argument
62 if (drvdata->use_cp14) { in etm_readl()
64 dev_err(drvdata->dev, in etm_readl()
68 val = readl_relaxed(drvdata->base + off); in etm_readl()
81 struct etm_drvdata *drvdata = (struct etm_drvdata *)info; in etm_os_unlock() local
83 etm_writel(drvdata, 0x0, ETMOSLAR); in etm_os_unlock()
87 static void etm_set_pwrdwn(struct etm_drvdata *drvdata) in etm_set_pwrdwn() argument
94 etmcr = etm_readl(drvdata, ETMCR); in etm_set_pwrdwn()
96 etm_writel(drvdata, etmcr, ETMCR); in etm_set_pwrdwn()
99 static void etm_clr_pwrdwn(struct etm_drvdata *drvdata) in etm_clr_pwrdwn() argument
103 etmcr = etm_readl(drvdata, ETMCR); in etm_clr_pwrdwn()
105 etm_writel(drvdata, etmcr, ETMCR); in etm_clr_pwrdwn()
111 static void etm_set_pwrup(struct etm_drvdata *drvdata) in etm_set_pwrup() argument
115 etmpdcr = readl_relaxed(drvdata->base + ETMPDCR); in etm_set_pwrup()
117 writel_relaxed(etmpdcr, drvdata->base + ETMPDCR); in etm_set_pwrup()
123 static void etm_clr_pwrup(struct etm_drvdata *drvdata) in etm_clr_pwrup() argument
130 etmpdcr = readl_relaxed(drvdata->base + ETMPDCR); in etm_clr_pwrup()
132 writel_relaxed(etmpdcr, drvdata->base + ETMPDCR); in etm_clr_pwrup()
149 static int coresight_timeout_etm(struct etm_drvdata *drvdata, u32 offset, in coresight_timeout_etm() argument
156 val = etm_readl(drvdata, offset); in coresight_timeout_etm()
180 static void etm_set_prog(struct etm_drvdata *drvdata) in etm_set_prog() argument
184 etmcr = etm_readl(drvdata, ETMCR); in etm_set_prog()
186 etm_writel(drvdata, etmcr, ETMCR); in etm_set_prog()
192 if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 1)) { in etm_set_prog()
193 dev_err(drvdata->dev, in etm_set_prog()
199 static void etm_clr_prog(struct etm_drvdata *drvdata) in etm_clr_prog() argument
203 etmcr = etm_readl(drvdata, ETMCR); in etm_clr_prog()
205 etm_writel(drvdata, etmcr, ETMCR); in etm_clr_prog()
211 if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 0)) { in etm_clr_prog()
212 dev_err(drvdata->dev, in etm_clr_prog()
218 static void etm_set_default(struct etm_drvdata *drvdata) in etm_set_default() argument
222 drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL; in etm_set_default()
223 drvdata->enable_event = ETM_HARD_WIRE_RES_A; in etm_set_default()
225 drvdata->seq_12_event = ETM_DEFAULT_EVENT_VAL; in etm_set_default()
226 drvdata->seq_21_event = ETM_DEFAULT_EVENT_VAL; in etm_set_default()
227 drvdata->seq_23_event = ETM_DEFAULT_EVENT_VAL; in etm_set_default()
228 drvdata->seq_31_event = ETM_DEFAULT_EVENT_VAL; in etm_set_default()
229 drvdata->seq_32_event = ETM_DEFAULT_EVENT_VAL; in etm_set_default()
230 drvdata->seq_13_event = ETM_DEFAULT_EVENT_VAL; in etm_set_default()
231 drvdata->timestamp_event = ETM_DEFAULT_EVENT_VAL; in etm_set_default()
233 for (i = 0; i < drvdata->nr_cntr; i++) { in etm_set_default()
234 drvdata->cntr_rld_val[i] = 0x0; in etm_set_default()
235 drvdata->cntr_event[i] = ETM_DEFAULT_EVENT_VAL; in etm_set_default()
236 drvdata->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL; in etm_set_default()
237 drvdata->cntr_val[i] = 0x0; in etm_set_default()
240 drvdata->seq_curr_state = 0x0; in etm_set_default()
241 drvdata->ctxid_idx = 0x0; in etm_set_default()
242 for (i = 0; i < drvdata->nr_ctxid_cmp; i++) { in etm_set_default()
243 drvdata->ctxid_pid[i] = 0x0; in etm_set_default()
244 drvdata->ctxid_vpid[i] = 0x0; in etm_set_default()
247 drvdata->ctxid_mask = 0x0; in etm_set_default()
254 struct etm_drvdata *drvdata = info; in etm_enable_hw() local
256 CS_UNLOCK(drvdata->base); in etm_enable_hw()
259 etm_clr_pwrdwn(drvdata); in etm_enable_hw()
261 etm_set_pwrup(drvdata); in etm_enable_hw()
263 etm_os_unlock(drvdata); in etm_enable_hw()
265 etm_set_prog(drvdata); in etm_enable_hw()
267 etmcr = etm_readl(drvdata, ETMCR); in etm_enable_hw()
269 etmcr |= drvdata->port_size; in etm_enable_hw()
270 etm_writel(drvdata, drvdata->ctrl | etmcr, ETMCR); in etm_enable_hw()
271 etm_writel(drvdata, drvdata->trigger_event, ETMTRIGGER); in etm_enable_hw()
272 etm_writel(drvdata, drvdata->startstop_ctrl, ETMTSSCR); in etm_enable_hw()
273 etm_writel(drvdata, drvdata->enable_event, ETMTEEVR); in etm_enable_hw()
274 etm_writel(drvdata, drvdata->enable_ctrl1, ETMTECR1); in etm_enable_hw()
275 etm_writel(drvdata, drvdata->fifofull_level, ETMFFLR); in etm_enable_hw()
276 for (i = 0; i < drvdata->nr_addr_cmp; i++) { in etm_enable_hw()
277 etm_writel(drvdata, drvdata->addr_val[i], ETMACVRn(i)); in etm_enable_hw()
278 etm_writel(drvdata, drvdata->addr_acctype[i], ETMACTRn(i)); in etm_enable_hw()
280 for (i = 0; i < drvdata->nr_cntr; i++) { in etm_enable_hw()
281 etm_writel(drvdata, drvdata->cntr_rld_val[i], ETMCNTRLDVRn(i)); in etm_enable_hw()
282 etm_writel(drvdata, drvdata->cntr_event[i], ETMCNTENRn(i)); in etm_enable_hw()
283 etm_writel(drvdata, drvdata->cntr_rld_event[i], in etm_enable_hw()
285 etm_writel(drvdata, drvdata->cntr_val[i], ETMCNTVRn(i)); in etm_enable_hw()
287 etm_writel(drvdata, drvdata->seq_12_event, ETMSQ12EVR); in etm_enable_hw()
288 etm_writel(drvdata, drvdata->seq_21_event, ETMSQ21EVR); in etm_enable_hw()
289 etm_writel(drvdata, drvdata->seq_23_event, ETMSQ23EVR); in etm_enable_hw()
290 etm_writel(drvdata, drvdata->seq_31_event, ETMSQ31EVR); in etm_enable_hw()
291 etm_writel(drvdata, drvdata->seq_32_event, ETMSQ32EVR); in etm_enable_hw()
292 etm_writel(drvdata, drvdata->seq_13_event, ETMSQ13EVR); in etm_enable_hw()
293 etm_writel(drvdata, drvdata->seq_curr_state, ETMSQR); in etm_enable_hw()
294 for (i = 0; i < drvdata->nr_ext_out; i++) in etm_enable_hw()
295 etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i)); in etm_enable_hw()
296 for (i = 0; i < drvdata->nr_ctxid_cmp; i++) in etm_enable_hw()
297 etm_writel(drvdata, drvdata->ctxid_pid[i], ETMCIDCVRn(i)); in etm_enable_hw()
298 etm_writel(drvdata, drvdata->ctxid_mask, ETMCIDCMR); in etm_enable_hw()
299 etm_writel(drvdata, drvdata->sync_freq, ETMSYNCFR); in etm_enable_hw()
301 etm_writel(drvdata, 0x0, ETMEXTINSELR); in etm_enable_hw()
302 etm_writel(drvdata, drvdata->timestamp_event, ETMTSEVR); in etm_enable_hw()
304 etm_writel(drvdata, 0x0, ETMAUXCR); in etm_enable_hw()
305 etm_writel(drvdata, drvdata->traceid, ETMTRACEIDR); in etm_enable_hw()
307 etm_writel(drvdata, 0x0, ETMVMIDCVR); in etm_enable_hw()
310 etm_writel(drvdata, drvdata->ctrl | ETMCR_ETM_EN | etmcr, ETMCR); in etm_enable_hw()
312 etm_clr_prog(drvdata); in etm_enable_hw()
313 CS_LOCK(drvdata->base); in etm_enable_hw()
315 dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu); in etm_enable_hw()
320 struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); in etm_trace_id() local
324 if (!drvdata->enable) in etm_trace_id()
325 return drvdata->traceid; in etm_trace_id()
328 spin_lock_irqsave(&drvdata->spinlock, flags); in etm_trace_id()
330 CS_UNLOCK(drvdata->base); in etm_trace_id()
331 trace_id = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK); in etm_trace_id()
332 CS_LOCK(drvdata->base); in etm_trace_id()
334 spin_unlock_irqrestore(&drvdata->spinlock, flags); in etm_trace_id()
342 struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); in etm_enable() local
346 spin_lock(&drvdata->spinlock); in etm_enable()
353 if (cpu_online(drvdata->cpu)) { in etm_enable()
354 ret = smp_call_function_single(drvdata->cpu, in etm_enable()
355 etm_enable_hw, drvdata, 1); in etm_enable()
360 drvdata->enable = true; in etm_enable()
361 drvdata->sticky_enable = true; in etm_enable()
363 spin_unlock(&drvdata->spinlock); in etm_enable()
365 dev_info(drvdata->dev, "ETM tracing enabled\n"); in etm_enable()
368 spin_unlock(&drvdata->spinlock); in etm_enable()
376 struct etm_drvdata *drvdata = info; in etm_disable_hw() local
378 CS_UNLOCK(drvdata->base); in etm_disable_hw()
379 etm_set_prog(drvdata); in etm_disable_hw()
382 etm_writel(drvdata, ETM_HARD_WIRE_RES_A | ETM_EVENT_NOT_A, ETMTEEVR); in etm_disable_hw()
385 drvdata->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK); in etm_disable_hw()
387 for (i = 0; i < drvdata->nr_cntr; i++) in etm_disable_hw()
388 drvdata->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i)); in etm_disable_hw()
390 etm_set_pwrdwn(drvdata); in etm_disable_hw()
391 CS_LOCK(drvdata->base); in etm_disable_hw()
393 dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu); in etm_disable_hw()
398 struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); in etm_disable() local
407 spin_lock(&drvdata->spinlock); in etm_disable()
413 smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1); in etm_disable()
414 drvdata->enable = false; in etm_disable()
416 spin_unlock(&drvdata->spinlock); in etm_disable()
420 dev_info(drvdata->dev, "ETM tracing disabled\n"); in etm_disable()
437 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in nr_addr_cmp_show() local
439 val = drvdata->nr_addr_cmp; in nr_addr_cmp_show()
447 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in nr_cntr_show() local
449 val = drvdata->nr_cntr; in nr_cntr_show()
458 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in nr_ctxid_cmp_show() local
460 val = drvdata->nr_ctxid_cmp; in nr_ctxid_cmp_show()
469 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in etmsr_show() local
471 pm_runtime_get_sync(drvdata->dev); in etmsr_show()
472 spin_lock_irqsave(&drvdata->spinlock, flags); in etmsr_show()
473 CS_UNLOCK(drvdata->base); in etmsr_show()
475 val = etm_readl(drvdata, ETMSR); in etmsr_show()
477 CS_LOCK(drvdata->base); in etmsr_show()
478 spin_unlock_irqrestore(&drvdata->spinlock, flags); in etmsr_show()
479 pm_runtime_put(drvdata->dev); in etmsr_show()
491 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in reset_store() local
498 spin_lock(&drvdata->spinlock); in reset_store()
499 drvdata->mode = ETM_MODE_EXCLUDE; in reset_store()
500 drvdata->ctrl = 0x0; in reset_store()
501 drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL; in reset_store()
502 drvdata->startstop_ctrl = 0x0; in reset_store()
503 drvdata->addr_idx = 0x0; in reset_store()
504 for (i = 0; i < drvdata->nr_addr_cmp; i++) { in reset_store()
505 drvdata->addr_val[i] = 0x0; in reset_store()
506 drvdata->addr_acctype[i] = 0x0; in reset_store()
507 drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE; in reset_store()
509 drvdata->cntr_idx = 0x0; in reset_store()
511 etm_set_default(drvdata); in reset_store()
512 spin_unlock(&drvdata->spinlock); in reset_store()
523 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in mode_show() local
525 val = drvdata->mode; in mode_show()
535 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in mode_store() local
541 spin_lock(&drvdata->spinlock); in mode_store()
542 drvdata->mode = val & ETM_MODE_ALL; in mode_store()
544 if (drvdata->mode & ETM_MODE_EXCLUDE) in mode_store()
545 drvdata->enable_ctrl1 |= ETMTECR1_INC_EXC; in mode_store()
547 drvdata->enable_ctrl1 &= ~ETMTECR1_INC_EXC; in mode_store()
549 if (drvdata->mode & ETM_MODE_CYCACC) in mode_store()
550 drvdata->ctrl |= ETMCR_CYC_ACC; in mode_store()
552 drvdata->ctrl &= ~ETMCR_CYC_ACC; in mode_store()
554 if (drvdata->mode & ETM_MODE_STALL) { in mode_store()
555 if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) { in mode_store()
556 dev_warn(drvdata->dev, "stall mode not supported\n"); in mode_store()
560 drvdata->ctrl |= ETMCR_STALL_MODE; in mode_store()
562 drvdata->ctrl &= ~ETMCR_STALL_MODE; in mode_store()
564 if (drvdata->mode & ETM_MODE_TIMESTAMP) { in mode_store()
565 if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) { in mode_store()
566 dev_warn(drvdata->dev, "timestamp not supported\n"); in mode_store()
570 drvdata->ctrl |= ETMCR_TIMESTAMP_EN; in mode_store()
572 drvdata->ctrl &= ~ETMCR_TIMESTAMP_EN; in mode_store()
574 if (drvdata->mode & ETM_MODE_CTXID) in mode_store()
575 drvdata->ctrl |= ETMCR_CTXID_SIZE; in mode_store()
577 drvdata->ctrl &= ~ETMCR_CTXID_SIZE; in mode_store()
578 spin_unlock(&drvdata->spinlock); in mode_store()
583 spin_unlock(&drvdata->spinlock); in mode_store()
592 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in trigger_event_show() local
594 val = drvdata->trigger_event; in trigger_event_show()
604 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in trigger_event_store() local
610 drvdata->trigger_event = val & ETM_EVENT_MASK; in trigger_event_store()
620 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in enable_event_show() local
622 val = drvdata->enable_event; in enable_event_show()
632 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in enable_event_store() local
638 drvdata->enable_event = val & ETM_EVENT_MASK; in enable_event_store()
648 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in fifofull_level_show() local
650 val = drvdata->fifofull_level; in fifofull_level_show()
660 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in fifofull_level_store() local
666 drvdata->fifofull_level = val; in fifofull_level_store()
676 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in addr_idx_show() local
678 val = drvdata->addr_idx; in addr_idx_show()
688 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in addr_idx_store() local
694 if (val >= drvdata->nr_addr_cmp) in addr_idx_store()
701 spin_lock(&drvdata->spinlock); in addr_idx_store()
702 drvdata->addr_idx = val; in addr_idx_store()
703 spin_unlock(&drvdata->spinlock); in addr_idx_store()
714 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in addr_single_show() local
716 spin_lock(&drvdata->spinlock); in addr_single_show()
717 idx = drvdata->addr_idx; in addr_single_show()
718 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || in addr_single_show()
719 drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { in addr_single_show()
720 spin_unlock(&drvdata->spinlock); in addr_single_show()
724 val = drvdata->addr_val[idx]; in addr_single_show()
725 spin_unlock(&drvdata->spinlock); in addr_single_show()
737 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in addr_single_store() local
743 spin_lock(&drvdata->spinlock); in addr_single_store()
744 idx = drvdata->addr_idx; in addr_single_store()
745 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || in addr_single_store()
746 drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { in addr_single_store()
747 spin_unlock(&drvdata->spinlock); in addr_single_store()
751 drvdata->addr_val[idx] = val; in addr_single_store()
752 drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE; in addr_single_store()
753 spin_unlock(&drvdata->spinlock); in addr_single_store()
764 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in addr_range_show() local
766 spin_lock(&drvdata->spinlock); in addr_range_show()
767 idx = drvdata->addr_idx; in addr_range_show()
769 spin_unlock(&drvdata->spinlock); in addr_range_show()
772 if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE && in addr_range_show()
773 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) || in addr_range_show()
774 (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE && in addr_range_show()
775 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) { in addr_range_show()
776 spin_unlock(&drvdata->spinlock); in addr_range_show()
780 val1 = drvdata->addr_val[idx]; in addr_range_show()
781 val2 = drvdata->addr_val[idx + 1]; in addr_range_show()
782 spin_unlock(&drvdata->spinlock); in addr_range_show()
793 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in addr_range_store() local
801 spin_lock(&drvdata->spinlock); in addr_range_store()
802 idx = drvdata->addr_idx; in addr_range_store()
804 spin_unlock(&drvdata->spinlock); in addr_range_store()
807 if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE && in addr_range_store()
808 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) || in addr_range_store()
809 (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE && in addr_range_store()
810 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) { in addr_range_store()
811 spin_unlock(&drvdata->spinlock); in addr_range_store()
815 drvdata->addr_val[idx] = val1; in addr_range_store()
816 drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE; in addr_range_store()
817 drvdata->addr_val[idx + 1] = val2; in addr_range_store()
818 drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE; in addr_range_store()
819 drvdata->enable_ctrl1 |= (1 << (idx/2)); in addr_range_store()
820 spin_unlock(&drvdata->spinlock); in addr_range_store()
831 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in addr_start_show() local
833 spin_lock(&drvdata->spinlock); in addr_start_show()
834 idx = drvdata->addr_idx; in addr_start_show()
835 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || in addr_start_show()
836 drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) { in addr_start_show()
837 spin_unlock(&drvdata->spinlock); in addr_start_show()
841 val = drvdata->addr_val[idx]; in addr_start_show()
842 spin_unlock(&drvdata->spinlock); in addr_start_show()
854 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in addr_start_store() local
860 spin_lock(&drvdata->spinlock); in addr_start_store()
861 idx = drvdata->addr_idx; in addr_start_store()
862 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || in addr_start_store()
863 drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) { in addr_start_store()
864 spin_unlock(&drvdata->spinlock); in addr_start_store()
868 drvdata->addr_val[idx] = val; in addr_start_store()
869 drvdata->addr_type[idx] = ETM_ADDR_TYPE_START; in addr_start_store()
870 drvdata->startstop_ctrl |= (1 << idx); in addr_start_store()
871 drvdata->enable_ctrl1 |= BIT(25); in addr_start_store()
872 spin_unlock(&drvdata->spinlock); in addr_start_store()
883 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in addr_stop_show() local
885 spin_lock(&drvdata->spinlock); in addr_stop_show()
886 idx = drvdata->addr_idx; in addr_stop_show()
887 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || in addr_stop_show()
888 drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) { in addr_stop_show()
889 spin_unlock(&drvdata->spinlock); in addr_stop_show()
893 val = drvdata->addr_val[idx]; in addr_stop_show()
894 spin_unlock(&drvdata->spinlock); in addr_stop_show()
906 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in addr_stop_store() local
912 spin_lock(&drvdata->spinlock); in addr_stop_store()
913 idx = drvdata->addr_idx; in addr_stop_store()
914 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || in addr_stop_store()
915 drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) { in addr_stop_store()
916 spin_unlock(&drvdata->spinlock); in addr_stop_store()
920 drvdata->addr_val[idx] = val; in addr_stop_store()
921 drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP; in addr_stop_store()
922 drvdata->startstop_ctrl |= (1 << (idx + 16)); in addr_stop_store()
923 drvdata->enable_ctrl1 |= ETMTECR1_START_STOP; in addr_stop_store()
924 spin_unlock(&drvdata->spinlock); in addr_stop_store()
934 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in addr_acctype_show() local
936 spin_lock(&drvdata->spinlock); in addr_acctype_show()
937 val = drvdata->addr_acctype[drvdata->addr_idx]; in addr_acctype_show()
938 spin_unlock(&drvdata->spinlock); in addr_acctype_show()
949 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in addr_acctype_store() local
955 spin_lock(&drvdata->spinlock); in addr_acctype_store()
956 drvdata->addr_acctype[drvdata->addr_idx] = val; in addr_acctype_store()
957 spin_unlock(&drvdata->spinlock); in addr_acctype_store()
967 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in cntr_idx_show() local
969 val = drvdata->cntr_idx; in cntr_idx_show()
979 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in cntr_idx_store() local
985 if (val >= drvdata->nr_cntr) in cntr_idx_store()
991 spin_lock(&drvdata->spinlock); in cntr_idx_store()
992 drvdata->cntr_idx = val; in cntr_idx_store()
993 spin_unlock(&drvdata->spinlock); in cntr_idx_store()
1003 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in cntr_rld_val_show() local
1005 spin_lock(&drvdata->spinlock); in cntr_rld_val_show()
1006 val = drvdata->cntr_rld_val[drvdata->cntr_idx]; in cntr_rld_val_show()
1007 spin_unlock(&drvdata->spinlock); in cntr_rld_val_show()
1018 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in cntr_rld_val_store() local
1024 spin_lock(&drvdata->spinlock); in cntr_rld_val_store()
1025 drvdata->cntr_rld_val[drvdata->cntr_idx] = val; in cntr_rld_val_store()
1026 spin_unlock(&drvdata->spinlock); in cntr_rld_val_store()
1036 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in cntr_event_show() local
1038 spin_lock(&drvdata->spinlock); in cntr_event_show()
1039 val = drvdata->cntr_event[drvdata->cntr_idx]; in cntr_event_show()
1040 spin_unlock(&drvdata->spinlock); in cntr_event_show()
1051 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in cntr_event_store() local
1057 spin_lock(&drvdata->spinlock); in cntr_event_store()
1058 drvdata->cntr_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK; in cntr_event_store()
1059 spin_unlock(&drvdata->spinlock); in cntr_event_store()
1069 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in cntr_rld_event_show() local
1071 spin_lock(&drvdata->spinlock); in cntr_rld_event_show()
1072 val = drvdata->cntr_rld_event[drvdata->cntr_idx]; in cntr_rld_event_show()
1073 spin_unlock(&drvdata->spinlock); in cntr_rld_event_show()
1084 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in cntr_rld_event_store() local
1090 spin_lock(&drvdata->spinlock); in cntr_rld_event_store()
1091 drvdata->cntr_rld_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK; in cntr_rld_event_store()
1092 spin_unlock(&drvdata->spinlock); in cntr_rld_event_store()
1103 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in cntr_val_show() local
1105 if (!drvdata->enable) { in cntr_val_show()
1106 spin_lock(&drvdata->spinlock); in cntr_val_show()
1107 for (i = 0; i < drvdata->nr_cntr; i++) in cntr_val_show()
1109 i, drvdata->cntr_val[i]); in cntr_val_show()
1110 spin_unlock(&drvdata->spinlock); in cntr_val_show()
1114 for (i = 0; i < drvdata->nr_cntr; i++) { in cntr_val_show()
1115 val = etm_readl(drvdata, ETMCNTVRn(i)); in cntr_val_show()
1128 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in cntr_val_store() local
1134 spin_lock(&drvdata->spinlock); in cntr_val_store()
1135 drvdata->cntr_val[drvdata->cntr_idx] = val; in cntr_val_store()
1136 spin_unlock(&drvdata->spinlock); in cntr_val_store()
1146 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in seq_12_event_show() local
1148 val = drvdata->seq_12_event; in seq_12_event_show()
1158 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in seq_12_event_store() local
1164 drvdata->seq_12_event = val & ETM_EVENT_MASK; in seq_12_event_store()
1173 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in seq_21_event_show() local
1175 val = drvdata->seq_21_event; in seq_21_event_show()
1185 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in seq_21_event_store() local
1191 drvdata->seq_21_event = val & ETM_EVENT_MASK; in seq_21_event_store()
1200 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in seq_23_event_show() local
1202 val = drvdata->seq_23_event; in seq_23_event_show()
1212 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in seq_23_event_store() local
1218 drvdata->seq_23_event = val & ETM_EVENT_MASK; in seq_23_event_store()
1227 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in seq_31_event_show() local
1229 val = drvdata->seq_31_event; in seq_31_event_show()
1239 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in seq_31_event_store() local
1245 drvdata->seq_31_event = val & ETM_EVENT_MASK; in seq_31_event_store()
1254 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in seq_32_event_show() local
1256 val = drvdata->seq_32_event; in seq_32_event_show()
1266 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in seq_32_event_store() local
1272 drvdata->seq_32_event = val & ETM_EVENT_MASK; in seq_32_event_store()
1281 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in seq_13_event_show() local
1283 val = drvdata->seq_13_event; in seq_13_event_show()
1293 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in seq_13_event_store() local
1299 drvdata->seq_13_event = val & ETM_EVENT_MASK; in seq_13_event_store()
1308 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in seq_curr_state_show() local
1310 if (!drvdata->enable) { in seq_curr_state_show()
1311 val = drvdata->seq_curr_state; in seq_curr_state_show()
1315 pm_runtime_get_sync(drvdata->dev); in seq_curr_state_show()
1316 spin_lock_irqsave(&drvdata->spinlock, flags); in seq_curr_state_show()
1318 CS_UNLOCK(drvdata->base); in seq_curr_state_show()
1319 val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK); in seq_curr_state_show()
1320 CS_LOCK(drvdata->base); in seq_curr_state_show()
1322 spin_unlock_irqrestore(&drvdata->spinlock, flags); in seq_curr_state_show()
1323 pm_runtime_put(drvdata->dev); in seq_curr_state_show()
1334 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in seq_curr_state_store() local
1343 drvdata->seq_curr_state = val; in seq_curr_state_store()
1353 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in ctxid_idx_show() local
1355 val = drvdata->ctxid_idx; in ctxid_idx_show()
1365 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in ctxid_idx_store() local
1371 if (val >= drvdata->nr_ctxid_cmp) in ctxid_idx_store()
1378 spin_lock(&drvdata->spinlock); in ctxid_idx_store()
1379 drvdata->ctxid_idx = val; in ctxid_idx_store()
1380 spin_unlock(&drvdata->spinlock); in ctxid_idx_store()
1390 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in ctxid_pid_show() local
1392 spin_lock(&drvdata->spinlock); in ctxid_pid_show()
1393 val = drvdata->ctxid_vpid[drvdata->ctxid_idx]; in ctxid_pid_show()
1394 spin_unlock(&drvdata->spinlock); in ctxid_pid_show()
1405 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in ctxid_pid_store() local
1413 spin_lock(&drvdata->spinlock); in ctxid_pid_store()
1414 drvdata->ctxid_pid[drvdata->ctxid_idx] = pid; in ctxid_pid_store()
1415 drvdata->ctxid_vpid[drvdata->ctxid_idx] = vpid; in ctxid_pid_store()
1416 spin_unlock(&drvdata->spinlock); in ctxid_pid_store()
1426 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in ctxid_mask_show() local
1428 val = drvdata->ctxid_mask; in ctxid_mask_show()
1438 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in ctxid_mask_store() local
1444 drvdata->ctxid_mask = val; in ctxid_mask_store()
1453 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in sync_freq_show() local
1455 val = drvdata->sync_freq; in sync_freq_show()
1465 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in sync_freq_store() local
1471 drvdata->sync_freq = val & ETM_SYNC_MASK; in sync_freq_store()
1480 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in timestamp_event_show() local
1482 val = drvdata->timestamp_event; in timestamp_event_show()
1492 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in timestamp_event_store() local
1498 drvdata->timestamp_event = val & ETM_EVENT_MASK; in timestamp_event_store()
1507 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in cpu_show() local
1509 val = drvdata->cpu; in cpu_show()
1519 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in traceid_show() local
1521 if (!drvdata->enable) { in traceid_show()
1522 val = drvdata->traceid; in traceid_show()
1526 pm_runtime_get_sync(drvdata->dev); in traceid_show()
1527 spin_lock_irqsave(&drvdata->spinlock, flags); in traceid_show()
1528 CS_UNLOCK(drvdata->base); in traceid_show()
1530 val = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK); in traceid_show()
1532 CS_LOCK(drvdata->base); in traceid_show()
1533 spin_unlock_irqrestore(&drvdata->spinlock, flags); in traceid_show()
1534 pm_runtime_put(drvdata->dev); in traceid_show()
1545 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); in traceid_store() local
1551 drvdata->traceid = val & ETM_TRACEID_MASK; in traceid_store()
1598 struct etm_drvdata *drvdata = dev_get_drvdata(_dev->parent); \
1600 readl_relaxed(drvdata->base + offset)); \
1708 struct etm_drvdata *drvdata = info; in etm_init_arch_data() local
1710 CS_UNLOCK(drvdata->base); in etm_init_arch_data()
1713 (void)etm_readl(drvdata, ETMPDSR); in etm_init_arch_data()
1715 etm_set_pwrup(drvdata); in etm_init_arch_data()
1720 etm_clr_pwrdwn(drvdata); in etm_init_arch_data()
1725 etm_set_prog(drvdata); in etm_init_arch_data()
1728 etmidr = etm_readl(drvdata, ETMIDR); in etm_init_arch_data()
1729 drvdata->arch = BMVAL(etmidr, 4, 11); in etm_init_arch_data()
1730 drvdata->port_size = etm_readl(drvdata, ETMCR) & PORT_SIZE_MASK; in etm_init_arch_data()
1732 drvdata->etmccer = etm_readl(drvdata, ETMCCER); in etm_init_arch_data()
1733 etmccr = etm_readl(drvdata, ETMCCR); in etm_init_arch_data()
1734 drvdata->etmccr = etmccr; in etm_init_arch_data()
1735 drvdata->nr_addr_cmp = BMVAL(etmccr, 0, 3) * 2; in etm_init_arch_data()
1736 drvdata->nr_cntr = BMVAL(etmccr, 13, 15); in etm_init_arch_data()
1737 drvdata->nr_ext_inp = BMVAL(etmccr, 17, 19); in etm_init_arch_data()
1738 drvdata->nr_ext_out = BMVAL(etmccr, 20, 22); in etm_init_arch_data()
1739 drvdata->nr_ctxid_cmp = BMVAL(etmccr, 24, 25); in etm_init_arch_data()
1741 etm_set_pwrdwn(drvdata); in etm_init_arch_data()
1742 etm_clr_pwrup(drvdata); in etm_init_arch_data()
1743 CS_LOCK(drvdata->base); in etm_init_arch_data()
1746 static void etm_init_default_data(struct etm_drvdata *drvdata) in etm_init_default_data() argument
1767 drvdata->traceid = etm3x_traceid++; in etm_init_default_data()
1768 drvdata->ctrl = (ETMCR_CYC_ACC | ETMCR_TIMESTAMP_EN); in etm_init_default_data()
1769 drvdata->enable_ctrl1 = ETMTECR1_ADDR_COMP_1; in etm_init_default_data()
1770 if (drvdata->nr_addr_cmp >= 2) { in etm_init_default_data()
1771 drvdata->addr_val[0] = (u32) _stext; in etm_init_default_data()
1772 drvdata->addr_val[1] = (u32) _etext; in etm_init_default_data()
1773 drvdata->addr_acctype[0] = flags; in etm_init_default_data()
1774 drvdata->addr_acctype[1] = flags; in etm_init_default_data()
1775 drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE; in etm_init_default_data()
1776 drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE; in etm_init_default_data()
1779 etm_set_default(drvdata); in etm_init_default_data()
1788 struct etm_drvdata *drvdata; in etm_probe() local
1797 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); in etm_probe()
1798 if (!drvdata) in etm_probe()
1807 drvdata->use_cp14 = of_property_read_bool(np, "arm,cp14"); in etm_probe()
1810 drvdata->dev = &adev->dev; in etm_probe()
1811 dev_set_drvdata(dev, drvdata); in etm_probe()
1818 drvdata->base = base; in etm_probe()
1820 spin_lock_init(&drvdata->spinlock); in etm_probe()
1822 drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */ in etm_probe()
1823 if (!IS_ERR(drvdata->atclk)) { in etm_probe()
1824 ret = clk_prepare_enable(drvdata->atclk); in etm_probe()
1829 drvdata->cpu = pdata ? pdata->cpu : 0; in etm_probe()
1832 etmdrvdata[drvdata->cpu] = drvdata; in etm_probe()
1834 if (!smp_call_function_single(drvdata->cpu, etm_os_unlock, drvdata, 1)) in etm_probe()
1835 drvdata->os_unlock = true; in etm_probe()
1837 if (smp_call_function_single(drvdata->cpu, in etm_probe()
1838 etm_init_arch_data, drvdata, 1)) in etm_probe()
1846 if (etm_arch_supported(drvdata->arch) == false) { in etm_probe()
1850 etm_init_default_data(drvdata); in etm_probe()
1858 drvdata->csdev = coresight_register(desc); in etm_probe()
1859 if (IS_ERR(drvdata->csdev)) { in etm_probe()
1860 ret = PTR_ERR(drvdata->csdev); in etm_probe()
1868 coresight_enable(drvdata->csdev); in etm_probe()
1869 drvdata->boot_enable = true; in etm_probe()
1882 struct etm_drvdata *drvdata = amba_get_drvdata(adev); in etm_remove() local
1884 coresight_unregister(drvdata->csdev); in etm_remove()
1894 struct etm_drvdata *drvdata = dev_get_drvdata(dev); in etm_runtime_suspend() local
1896 if (drvdata && !IS_ERR(drvdata->atclk)) in etm_runtime_suspend()
1897 clk_disable_unprepare(drvdata->atclk); in etm_runtime_suspend()
1904 struct etm_drvdata *drvdata = dev_get_drvdata(dev); in etm_runtime_resume() local
1906 if (drvdata && !IS_ERR(drvdata->atclk)) in etm_runtime_resume()
1907 clk_prepare_enable(drvdata->atclk); in etm_runtime_resume()