1#ifndef _KERNEL_EVENTS_INTERNAL_H
2#define _KERNEL_EVENTS_INTERNAL_H
3
4#include <linux/hardirq.h>
5#include <linux/uaccess.h>
6
7/* Buffer handling */
8
9#define RING_BUFFER_WRITABLE		0x01
10
11struct ring_buffer {
12	atomic_t			refcount;
13	struct rcu_head			rcu_head;
14	struct irq_work			irq_work;
15#ifdef CONFIG_PERF_USE_VMALLOC
16	struct work_struct		work;
17	int				page_order;	/* allocation order  */
18#endif
19	int				nr_pages;	/* nr of data pages  */
20	int				overwrite;	/* can overwrite itself */
21
22	atomic_t			poll;		/* POLL_ for wakeups */
23
24	local_t				head;		/* write position    */
25	local_t				nest;		/* nested writers    */
26	local_t				events;		/* event limit       */
27	local_t				wakeup;		/* wakeup stamp      */
28	local_t				lost;		/* nr records lost   */
29
30	long				watermark;	/* wakeup watermark  */
31	long				aux_watermark;
32	/* poll crap */
33	spinlock_t			event_lock;
34	struct list_head		event_list;
35
36	atomic_t			mmap_count;
37	unsigned long			mmap_locked;
38	struct user_struct		*mmap_user;
39
40	/* AUX area */
41	local_t				aux_head;
42	local_t				aux_nest;
43	local_t				aux_wakeup;
44	unsigned long			aux_pgoff;
45	int				aux_nr_pages;
46	int				aux_overwrite;
47	atomic_t			aux_mmap_count;
48	unsigned long			aux_mmap_locked;
49	void				(*free_aux)(void *);
50	atomic_t			aux_refcount;
51	void				**aux_pages;
52	void				*aux_priv;
53
54	struct perf_event_mmap_page	*user_page;
55	void				*data_pages[0];
56};
57
58extern void rb_free(struct ring_buffer *rb);
59
60static inline void rb_free_rcu(struct rcu_head *rcu_head)
61{
62	struct ring_buffer *rb;
63
64	rb = container_of(rcu_head, struct ring_buffer, rcu_head);
65	rb_free(rb);
66}
67
68extern struct ring_buffer *
69rb_alloc(int nr_pages, long watermark, int cpu, int flags);
70extern void perf_event_wakeup(struct perf_event *event);
71extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
72			pgoff_t pgoff, int nr_pages, long watermark, int flags);
73extern void rb_free_aux(struct ring_buffer *rb);
74extern struct ring_buffer *ring_buffer_get(struct perf_event *event);
75extern void ring_buffer_put(struct ring_buffer *rb);
76
77static inline bool rb_has_aux(struct ring_buffer *rb)
78{
79	return !!rb->aux_nr_pages;
80}
81
82void perf_event_aux_event(struct perf_event *event, unsigned long head,
83			  unsigned long size, u64 flags);
84
85extern void
86perf_event_header__init_id(struct perf_event_header *header,
87			   struct perf_sample_data *data,
88			   struct perf_event *event);
89extern void
90perf_event__output_id_sample(struct perf_event *event,
91			     struct perf_output_handle *handle,
92			     struct perf_sample_data *sample);
93
94extern struct page *
95perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
96
97#ifdef CONFIG_PERF_USE_VMALLOC
98/*
99 * Back perf_mmap() with vmalloc memory.
100 *
101 * Required for architectures that have d-cache aliasing issues.
102 */
103
104static inline int page_order(struct ring_buffer *rb)
105{
106	return rb->page_order;
107}
108
109#else
110
111static inline int page_order(struct ring_buffer *rb)
112{
113	return 0;
114}
115#endif
116
117static inline unsigned long perf_data_size(struct ring_buffer *rb)
118{
119	return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
120}
121
122static inline unsigned long perf_aux_size(struct ring_buffer *rb)
123{
124	return rb->aux_nr_pages << PAGE_SHIFT;
125}
126
127#define DEFINE_OUTPUT_COPY(func_name, memcpy_func)			\
128static inline unsigned long						\
129func_name(struct perf_output_handle *handle,				\
130	  const void *buf, unsigned long len)				\
131{									\
132	unsigned long size, written;					\
133									\
134	do {								\
135		size    = min(handle->size, len);			\
136		written = memcpy_func(handle->addr, buf, size);		\
137		written = size - written;				\
138									\
139		len -= written;						\
140		handle->addr += written;				\
141		buf += written;						\
142		handle->size -= written;				\
143		if (!handle->size) {					\
144			struct ring_buffer *rb = handle->rb;		\
145									\
146			handle->page++;					\
147			handle->page &= rb->nr_pages - 1;		\
148			handle->addr = rb->data_pages[handle->page];	\
149			handle->size = PAGE_SIZE << page_order(rb);	\
150		}							\
151	} while (len && written == size);				\
152									\
153	return len;							\
154}
155
156static inline unsigned long
157memcpy_common(void *dst, const void *src, unsigned long n)
158{
159	memcpy(dst, src, n);
160	return 0;
161}
162
163DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
164
165static inline unsigned long
166memcpy_skip(void *dst, const void *src, unsigned long n)
167{
168	return 0;
169}
170
171DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
172
173#ifndef arch_perf_out_copy_user
174#define arch_perf_out_copy_user arch_perf_out_copy_user
175
176static inline unsigned long
177arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
178{
179	unsigned long ret;
180
181	pagefault_disable();
182	ret = __copy_from_user_inatomic(dst, src, n);
183	pagefault_enable();
184
185	return ret;
186}
187#endif
188
189DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
190
191/* Callchain handling */
192extern struct perf_callchain_entry *
193perf_callchain(struct perf_event *event, struct pt_regs *regs);
194extern int get_callchain_buffers(void);
195extern void put_callchain_buffers(void);
196
197static inline int get_recursion_context(int *recursion)
198{
199	int rctx;
200
201	if (in_nmi())
202		rctx = 3;
203	else if (in_irq())
204		rctx = 2;
205	else if (in_softirq())
206		rctx = 1;
207	else
208		rctx = 0;
209
210	if (recursion[rctx])
211		return -1;
212
213	recursion[rctx]++;
214	barrier();
215
216	return rctx;
217}
218
219static inline void put_recursion_context(int *recursion, int rctx)
220{
221	barrier();
222	recursion[rctx]--;
223}
224
225#ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
226static inline bool arch_perf_have_user_stack_dump(void)
227{
228	return true;
229}
230
231#define perf_user_stack_pointer(regs) user_stack_pointer(regs)
232#else
233static inline bool arch_perf_have_user_stack_dump(void)
234{
235	return false;
236}
237
238#define perf_user_stack_pointer(regs) 0
239#endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
240
241#endif /* _KERNEL_EVENTS_INTERNAL_H */
242