1/*
2 * Copyright 2014 Tilera Corporation. All Rights Reserved.
3 *
4 *   This program is free software; you can redistribute it and/or
5 *   modify it under the terms of the GNU General Public License
6 *   as published by the Free Software Foundation, version 2.
7 *
8 *   This program is distributed in the hope that it will be useful, but
9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 *   NON INFRINGEMENT.  See the GNU General Public License for
12 *   more details.
13 */
14
15#include <linux/errno.h>
16#include <linux/spinlock.h>
17#include <linux/module.h>
18#include <linux/atomic.h>
19#include <linux/interrupt.h>
20
21#include <asm/processor.h>
22#include <asm/pmc.h>
23
24perf_irq_t perf_irq = NULL;
25int handle_perf_interrupt(struct pt_regs *regs, int fault)
26{
27	int retval;
28
29	if (!perf_irq)
30		panic("Unexpected PERF_COUNT interrupt %d\n", fault);
31
32	nmi_enter();
33	retval = perf_irq(regs, fault);
34	nmi_exit();
35	return retval;
36}
37
38/* Reserve PMC hardware if it is available. */
39perf_irq_t reserve_pmc_hardware(perf_irq_t new_perf_irq)
40{
41	return cmpxchg(&perf_irq, NULL, new_perf_irq);
42}
43EXPORT_SYMBOL(reserve_pmc_hardware);
44
45/* Release PMC hardware. */
46void release_pmc_hardware(void)
47{
48	perf_irq = NULL;
49}
50EXPORT_SYMBOL(release_pmc_hardware);
51
52
53/*
54 * Get current overflow status of each performance counter,
55 * and auxiliary performance counter.
56 */
57unsigned long
58pmc_get_overflow(void)
59{
60	unsigned long status;
61
62	/*
63	 * merge base+aux into a single vector
64	 */
65	status = __insn_mfspr(SPR_PERF_COUNT_STS);
66	status |= __insn_mfspr(SPR_AUX_PERF_COUNT_STS) << TILE_BASE_COUNTERS;
67	return status;
68}
69
70/*
71 * Clear the status bit for the corresponding counter, if written
72 * with a one.
73 */
74void
75pmc_ack_overflow(unsigned long status)
76{
77	/*
78	 * clear overflow status by writing ones
79	 */
80	__insn_mtspr(SPR_PERF_COUNT_STS, status);
81	__insn_mtspr(SPR_AUX_PERF_COUNT_STS, status >> TILE_BASE_COUNTERS);
82}
83
84/*
85 * The perf count interrupts are masked and unmasked explicitly,
86 * and only here.  The normal irq_enable() does not enable them,
87 * and irq_disable() does not disable them.  That lets these
88 * routines drive the perf count interrupts orthogonally.
89 *
90 * We also mask the perf count interrupts on entry to the perf count
91 * interrupt handler in assembly code, and by default unmask them
92 * again (with interrupt critical section protection) just before
93 * returning from the interrupt.  If the perf count handler returns
94 * a non-zero error code, then we don't re-enable them before returning.
95 *
96 * For Pro, we rely on both interrupts being in the same word to update
97 * them atomically so we never have one enabled and one disabled.
98 */
99
100#if CHIP_HAS_SPLIT_INTR_MASK()
101# if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32
102#  error Fix assumptions about which word PERF_COUNT interrupts are in
103# endif
104#endif
105
106static inline unsigned long long pmc_mask(void)
107{
108	unsigned long long mask = 1ULL << INT_PERF_COUNT;
109	mask |= 1ULL << INT_AUX_PERF_COUNT;
110	return mask;
111}
112
113void unmask_pmc_interrupts(void)
114{
115	interrupt_mask_reset_mask(pmc_mask());
116}
117
118void mask_pmc_interrupts(void)
119{
120	interrupt_mask_set_mask(pmc_mask());
121}
122