This source file includes following definitions.
- perf_cgroup_from_task
- perf_sample_data_init
- is_default_overflow_handler
- event_has_any_exclude_flag
- is_sampling_event
- is_software_event
- in_software_context
- is_exclusive_pmu
- perf_arch_fetch_caller_regs
- perf_fetch_caller_regs
- perf_sw_event
- perf_sw_event_sched
- perf_sw_migrate_enabled
- perf_event_task_migrate
- perf_event_task_sched_in
- perf_event_task_sched_out
- perf_callchain_store_context
- perf_callchain_store
- perf_paranoid_tracepoint_raw
- perf_paranoid_cpu
- perf_paranoid_kernel
- has_branch_stack
- needs_branch_stack
- has_aux
- is_write_backward
- has_addr_filter
- perf_event_addr_filters
- perf_aux_output_begin
- perf_aux_output_end
- perf_aux_output_skip
- perf_get_aux
- perf_event_task_migrate
- perf_event_task_sched_in
- perf_event_task_sched_out
- perf_event_init_task
- perf_event_exit_task
- perf_event_free_task
- perf_event_delayed_put
- perf_event_get
- perf_get_event
- perf_event_attrs
- perf_event_read_local
- perf_event_print_debug
- perf_event_task_disable
- perf_event_task_enable
- perf_event_refresh
- perf_sw_event
- perf_sw_event_sched
- perf_bp_event
- perf_event_mmap
- perf_event_ksymbol
- perf_event_bpf_event
- perf_event_exec
- perf_event_comm
- perf_event_namespaces
- perf_event_fork
- perf_event_init
- perf_swevent_get_recursion_context
- perf_swevent_put_recursion_context
- perf_swevent_set_period
- perf_event_enable
- perf_event_disable
- __perf_event_disable
- perf_event_task_tick
- perf_event_release_kernel
- perf_restore_debug_store
- perf_raw_frag_last
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #ifndef _LINUX_PERF_EVENT_H
15 #define _LINUX_PERF_EVENT_H
16
17 #include <uapi/linux/perf_event.h>
18 #include <uapi/linux/bpf_perf_event.h>
19
20
21
22
23
24 #ifdef CONFIG_PERF_EVENTS
25 # include <asm/perf_event.h>
26 # include <asm/local64.h>
27 #endif
28
29 struct perf_guest_info_callbacks {
30 int (*is_in_guest)(void);
31 int (*is_user_mode)(void);
32 unsigned long (*get_guest_ip)(void);
33 void (*handle_intel_pt_intr)(void);
34 };
35
36 #ifdef CONFIG_HAVE_HW_BREAKPOINT
37 #include <asm/hw_breakpoint.h>
38 #endif
39
40 #include <linux/list.h>
41 #include <linux/mutex.h>
42 #include <linux/rculist.h>
43 #include <linux/rcupdate.h>
44 #include <linux/spinlock.h>
45 #include <linux/hrtimer.h>
46 #include <linux/fs.h>
47 #include <linux/pid_namespace.h>
48 #include <linux/workqueue.h>
49 #include <linux/ftrace.h>
50 #include <linux/cpu.h>
51 #include <linux/irq_work.h>
52 #include <linux/static_key.h>
53 #include <linux/jump_label_ratelimit.h>
54 #include <linux/atomic.h>
55 #include <linux/sysfs.h>
56 #include <linux/perf_regs.h>
57 #include <linux/cgroup.h>
58 #include <linux/refcount.h>
59 #include <asm/local.h>
60
61 struct perf_callchain_entry {
62 __u64 nr;
63 __u64 ip[0];
64 };
65
66 struct perf_callchain_entry_ctx {
67 struct perf_callchain_entry *entry;
68 u32 max_stack;
69 u32 nr;
70 short contexts;
71 bool contexts_maxed;
72 };
73
74 typedef unsigned long (*perf_copy_f)(void *dst, const void *src,
75 unsigned long off, unsigned long len);
76
77 struct perf_raw_frag {
78 union {
79 struct perf_raw_frag *next;
80 unsigned long pad;
81 };
82 perf_copy_f copy;
83 void *data;
84 u32 size;
85 } __packed;
86
87 struct perf_raw_record {
88 struct perf_raw_frag frag;
89 u32 size;
90 };
91
92
93
94
95
96
97
98
99
100
101 struct perf_branch_stack {
102 __u64 nr;
103 struct perf_branch_entry entries[0];
104 };
105
106 struct task_struct;
107
108
109
110
111 struct hw_perf_event_extra {
112 u64 config;
113 unsigned int reg;
114 int alloc;
115 int idx;
116 };
117
118
119
120
121 struct hw_perf_event {
122 #ifdef CONFIG_PERF_EVENTS
123 union {
124 struct {
125 u64 config;
126 u64 last_tag;
127 unsigned long config_base;
128 unsigned long event_base;
129 int event_base_rdpmc;
130 int idx;
131 int last_cpu;
132 int flags;
133
134 struct hw_perf_event_extra extra_reg;
135 struct hw_perf_event_extra branch_reg;
136 };
137 struct {
138 struct hrtimer hrtimer;
139 };
140 struct {
141
142 struct list_head tp_list;
143 };
144 struct {
145 u64 pwr_acc;
146 u64 ptsc;
147 };
148 #ifdef CONFIG_HAVE_HW_BREAKPOINT
149 struct {
150
151
152
153
154
155 struct arch_hw_breakpoint info;
156 struct list_head bp_list;
157 };
158 #endif
159 struct {
160 u8 iommu_bank;
161 u8 iommu_cntr;
162 u16 padding;
163 u64 conf;
164 u64 conf1;
165 };
166 };
167
168
169
170
171 struct task_struct *target;
172
173
174
175
176
177 void *addr_filters;
178
179
180 unsigned long addr_filters_gen;
181
182
183
184
185 #define PERF_HES_STOPPED 0x01
186 #define PERF_HES_UPTODATE 0x02
187 #define PERF_HES_ARCH 0x04
188
189 int state;
190
191
192
193
194
195 local64_t prev_count;
196
197
198
199
200 u64 sample_period;
201
202
203
204
205 u64 last_period;
206
207
208
209
210
211
212 local64_t period_left;
213
214
215
216
217
218 u64 interrupts_seq;
219 u64 interrupts;
220
221
222
223
224
225 u64 freq_time_stamp;
226 u64 freq_count_stamp;
227 #endif
228 };
229
230 struct perf_event;
231
232
233
234
235 #define PERF_PMU_TXN_ADD 0x1
236 #define PERF_PMU_TXN_READ 0x2
237
238
239
240
241 #define PERF_PMU_CAP_NO_INTERRUPT 0x01
242 #define PERF_PMU_CAP_NO_NMI 0x02
243 #define PERF_PMU_CAP_AUX_NO_SG 0x04
244 #define PERF_PMU_CAP_EXTENDED_REGS 0x08
245 #define PERF_PMU_CAP_EXCLUSIVE 0x10
246 #define PERF_PMU_CAP_ITRACE 0x20
247 #define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x40
248 #define PERF_PMU_CAP_NO_EXCLUDE 0x80
249 #define PERF_PMU_CAP_AUX_OUTPUT 0x100
250
251
252
253
254 struct pmu {
255 struct list_head entry;
256
257 struct module *module;
258 struct device *dev;
259 const struct attribute_group **attr_groups;
260 const struct attribute_group **attr_update;
261 const char *name;
262 int type;
263
264
265
266
267 int capabilities;
268
269 int __percpu *pmu_disable_count;
270 struct perf_cpu_context __percpu *pmu_cpu_context;
271 atomic_t exclusive_cnt;
272 int task_ctx_nr;
273 int hrtimer_interval_ms;
274
275
276 unsigned int nr_addr_filters;
277
278
279
280
281
282 void (*pmu_enable) (struct pmu *pmu);
283 void (*pmu_disable) (struct pmu *pmu);
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301 int (*event_init) (struct perf_event *event);
302
303
304
305
306
307 void (*event_mapped) (struct perf_event *event, struct mm_struct *mm);
308 void (*event_unmapped) (struct perf_event *event, struct mm_struct *mm);
309
310
311
312
313
314 #define PERF_EF_START 0x01
315 #define PERF_EF_RELOAD 0x02
316 #define PERF_EF_UPDATE 0x04
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336 int (*add) (struct perf_event *event, int flags);
337 void (*del) (struct perf_event *event, int flags);
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357 void (*start) (struct perf_event *event, int flags);
358 void (*stop) (struct perf_event *event, int flags);
359
360
361
362
363
364
365
366 void (*read) (struct perf_event *event);
367
368
369
370
371
372
373
374
375
376
377
378 void (*start_txn) (struct pmu *pmu, unsigned int txn_flags);
379
380
381
382
383
384
385
386
387 int (*commit_txn) (struct pmu *pmu);
388
389
390
391
392
393
394 void (*cancel_txn) (struct pmu *pmu);
395
396
397
398
399
400 int (*event_idx) (struct perf_event *event);
401
402
403
404
405 void (*sched_task) (struct perf_event_context *ctx,
406 bool sched_in);
407
408
409
410 size_t task_ctx_size;
411
412
413
414
415
416 void *(*setup_aux) (struct perf_event *event, void **pages,
417 int nr_pages, bool overwrite);
418
419
420
421
422
423 void (*free_aux) (void *aux);
424
425
426
427
428
429
430
431
432
433 int (*addr_filters_validate) (struct list_head *filters);
434
435
436
437
438
439
440
441
442
443
444
445
446
447 void (*addr_filters_sync) (struct perf_event *event);
448
449
450
451
452
453
454
455
456
457 int (*aux_output_match) (struct perf_event *event);
458
459
460
461
462
463 int (*filter_match) (struct perf_event *event);
464
465
466
467
468 int (*check_period) (struct perf_event *event, u64 value);
469 };
470
471 enum perf_addr_filter_action_t {
472 PERF_ADDR_FILTER_ACTION_STOP = 0,
473 PERF_ADDR_FILTER_ACTION_START,
474 PERF_ADDR_FILTER_ACTION_FILTER,
475 };
476
477
478
479
480
481
482
483
484
485
486
487 struct perf_addr_filter {
488 struct list_head entry;
489 struct path path;
490 unsigned long offset;
491 unsigned long size;
492 enum perf_addr_filter_action_t action;
493 };
494
495
496
497
498
499
500
501
502
503
504
505 struct perf_addr_filters_head {
506 struct list_head list;
507 raw_spinlock_t lock;
508 unsigned int nr_file_filters;
509 };
510
511 struct perf_addr_filter_range {
512 unsigned long start;
513 unsigned long size;
514 };
515
516
517
518
519 enum perf_event_state {
520 PERF_EVENT_STATE_DEAD = -4,
521 PERF_EVENT_STATE_EXIT = -3,
522 PERF_EVENT_STATE_ERROR = -2,
523 PERF_EVENT_STATE_OFF = -1,
524 PERF_EVENT_STATE_INACTIVE = 0,
525 PERF_EVENT_STATE_ACTIVE = 1,
526 };
527
528 struct file;
529 struct perf_sample_data;
530
531 typedef void (*perf_overflow_handler_t)(struct perf_event *,
532 struct perf_sample_data *,
533 struct pt_regs *regs);
534
535
536
537
538
539
540
541
542 #define PERF_EV_CAP_SOFTWARE BIT(0)
543 #define PERF_EV_CAP_READ_ACTIVE_PKG BIT(1)
544
545 #define SWEVENT_HLIST_BITS 8
546 #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
547
548 struct swevent_hlist {
549 struct hlist_head heads[SWEVENT_HLIST_SIZE];
550 struct rcu_head rcu_head;
551 };
552
553 #define PERF_ATTACH_CONTEXT 0x01
554 #define PERF_ATTACH_GROUP 0x02
555 #define PERF_ATTACH_TASK 0x04
556 #define PERF_ATTACH_TASK_DATA 0x08
557 #define PERF_ATTACH_ITRACE 0x10
558
559 struct perf_cgroup;
560 struct ring_buffer;
561
562 struct pmu_event_list {
563 raw_spinlock_t lock;
564 struct list_head list;
565 };
566
567 #define for_each_sibling_event(sibling, event) \
568 if ((event)->group_leader == (event)) \
569 list_for_each_entry((sibling), &(event)->sibling_list, sibling_list)
570
571
572
573
574 struct perf_event {
575 #ifdef CONFIG_PERF_EVENTS
576
577
578
579
580
581 struct list_head event_entry;
582
583
584
585
586
587 struct list_head sibling_list;
588 struct list_head active_list;
589
590
591
592 struct rb_node group_node;
593 u64 group_index;
594
595
596
597
598
599 struct list_head migrate_entry;
600
601 struct hlist_node hlist_entry;
602 struct list_head active_entry;
603 int nr_siblings;
604
605
606 int event_caps;
607
608 int group_caps;
609
610 struct perf_event *group_leader;
611 struct pmu *pmu;
612 void *pmu_private;
613
614 enum perf_event_state state;
615 unsigned int attach_state;
616 local64_t count;
617 atomic64_t child_count;
618
619
620
621
622
623
624
625 u64 total_time_enabled;
626 u64 total_time_running;
627 u64 tstamp;
628
629
630
631
632
633
634
635
636
637 u64 shadow_ctx_time;
638
639 struct perf_event_attr attr;
640 u16 header_size;
641 u16 id_header_size;
642 u16 read_size;
643 struct hw_perf_event hw;
644
645 struct perf_event_context *ctx;
646 atomic_long_t refcount;
647
648
649
650
651
652 atomic64_t child_total_time_enabled;
653 atomic64_t child_total_time_running;
654
655
656
657
658 struct mutex child_mutex;
659 struct list_head child_list;
660 struct perf_event *parent;
661
662 int oncpu;
663 int cpu;
664
665 struct list_head owner_entry;
666 struct task_struct *owner;
667
668
669 struct mutex mmap_mutex;
670 atomic_t mmap_count;
671
672 struct ring_buffer *rb;
673 struct list_head rb_entry;
674 unsigned long rcu_batches;
675 int rcu_pending;
676
677
678 wait_queue_head_t waitq;
679 struct fasync_struct *fasync;
680
681
682 int pending_wakeup;
683 int pending_kill;
684 int pending_disable;
685 struct irq_work pending;
686
687 atomic_t event_limit;
688
689
690 struct perf_addr_filters_head addr_filters;
691
692 struct perf_addr_filter_range *addr_filter_ranges;
693 unsigned long addr_filters_gen;
694
695
696 struct perf_event *aux_event;
697
698 void (*destroy)(struct perf_event *);
699 struct rcu_head rcu_head;
700
701 struct pid_namespace *ns;
702 u64 id;
703
704 u64 (*clock)(void);
705 perf_overflow_handler_t overflow_handler;
706 void *overflow_handler_context;
707 #ifdef CONFIG_BPF_SYSCALL
708 perf_overflow_handler_t orig_overflow_handler;
709 struct bpf_prog *prog;
710 #endif
711
712 #ifdef CONFIG_EVENT_TRACING
713 struct trace_event_call *tp_event;
714 struct event_filter *filter;
715 #ifdef CONFIG_FUNCTION_TRACER
716 struct ftrace_ops ftrace_ops;
717 #endif
718 #endif
719
720 #ifdef CONFIG_CGROUP_PERF
721 struct perf_cgroup *cgrp;
722 #endif
723
724 struct list_head sb_list;
725 #endif
726 };
727
728
729 struct perf_event_groups {
730 struct rb_root tree;
731 u64 index;
732 };
733
734
735
736
737
738
739 struct perf_event_context {
740 struct pmu *pmu;
741
742
743
744
745 raw_spinlock_t lock;
746
747
748
749
750
751 struct mutex mutex;
752
753 struct list_head active_ctx_list;
754 struct perf_event_groups pinned_groups;
755 struct perf_event_groups flexible_groups;
756 struct list_head event_list;
757
758 struct list_head pinned_active;
759 struct list_head flexible_active;
760
761 int nr_events;
762 int nr_active;
763 int is_active;
764 int nr_stat;
765 int nr_freq;
766 int rotate_disable;
767
768
769
770
771 int rotate_necessary;
772 refcount_t refcount;
773 struct task_struct *task;
774
775
776
777
778 u64 time;
779 u64 timestamp;
780
781
782
783
784
785 struct perf_event_context *parent_ctx;
786 u64 parent_gen;
787 u64 generation;
788 int pin_count;
789 #ifdef CONFIG_CGROUP_PERF
790 int nr_cgroups;
791 #endif
792 void *task_ctx_data;
793 struct rcu_head rcu_head;
794 };
795
796
797
798
799
800 #define PERF_NR_CONTEXTS 4
801
802
803
804
805 struct perf_cpu_context {
806 struct perf_event_context ctx;
807 struct perf_event_context *task_ctx;
808 int active_oncpu;
809 int exclusive;
810
811 raw_spinlock_t hrtimer_lock;
812 struct hrtimer hrtimer;
813 ktime_t hrtimer_interval;
814 unsigned int hrtimer_active;
815
816 #ifdef CONFIG_CGROUP_PERF
817 struct perf_cgroup *cgrp;
818 struct list_head cgrp_cpuctx_entry;
819 #endif
820
821 struct list_head sched_cb_entry;
822 int sched_cb_usage;
823
824 int online;
825 };
826
827 struct perf_output_handle {
828 struct perf_event *event;
829 struct ring_buffer *rb;
830 unsigned long wakeup;
831 unsigned long size;
832 u64 aux_flags;
833 union {
834 void *addr;
835 unsigned long head;
836 };
837 int page;
838 };
839
840 struct bpf_perf_event_data_kern {
841 bpf_user_pt_regs_t *regs;
842 struct perf_sample_data *data;
843 struct perf_event *event;
844 };
845
846 #ifdef CONFIG_CGROUP_PERF
847
848
849
850
851
852 struct perf_cgroup_info {
853 u64 time;
854 u64 timestamp;
855 };
856
857 struct perf_cgroup {
858 struct cgroup_subsys_state css;
859 struct perf_cgroup_info __percpu *info;
860 };
861
862
863
864
865
866
867 static inline struct perf_cgroup *
868 perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
869 {
870 return container_of(task_css_check(task, perf_event_cgrp_id,
871 ctx ? lockdep_is_held(&ctx->lock)
872 : true),
873 struct perf_cgroup, css);
874 }
875 #endif
876
877 #ifdef CONFIG_PERF_EVENTS
878
879 extern void *perf_aux_output_begin(struct perf_output_handle *handle,
880 struct perf_event *event);
881 extern void perf_aux_output_end(struct perf_output_handle *handle,
882 unsigned long size);
883 extern int perf_aux_output_skip(struct perf_output_handle *handle,
884 unsigned long size);
885 extern void *perf_get_aux(struct perf_output_handle *handle);
886 extern void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags);
887 extern void perf_event_itrace_started(struct perf_event *event);
888
889 extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
890 extern void perf_pmu_unregister(struct pmu *pmu);
891
892 extern int perf_num_counters(void);
893 extern const char *perf_pmu_name(void);
894 extern void __perf_event_task_sched_in(struct task_struct *prev,
895 struct task_struct *task);
896 extern void __perf_event_task_sched_out(struct task_struct *prev,
897 struct task_struct *next);
898 extern int perf_event_init_task(struct task_struct *child);
899 extern void perf_event_exit_task(struct task_struct *child);
900 extern void perf_event_free_task(struct task_struct *task);
901 extern void perf_event_delayed_put(struct task_struct *task);
902 extern struct file *perf_event_get(unsigned int fd);
903 extern const struct perf_event *perf_get_event(struct file *file);
904 extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
905 extern void perf_event_print_debug(void);
906 extern void perf_pmu_disable(struct pmu *pmu);
907 extern void perf_pmu_enable(struct pmu *pmu);
908 extern void perf_sched_cb_dec(struct pmu *pmu);
909 extern void perf_sched_cb_inc(struct pmu *pmu);
910 extern int perf_event_task_disable(void);
911 extern int perf_event_task_enable(void);
912
913 extern void perf_pmu_resched(struct pmu *pmu);
914
915 extern int perf_event_refresh(struct perf_event *event, int refresh);
916 extern void perf_event_update_userpage(struct perf_event *event);
917 extern int perf_event_release_kernel(struct perf_event *event);
918 extern struct perf_event *
919 perf_event_create_kernel_counter(struct perf_event_attr *attr,
920 int cpu,
921 struct task_struct *task,
922 perf_overflow_handler_t callback,
923 void *context);
924 extern void perf_pmu_migrate_context(struct pmu *pmu,
925 int src_cpu, int dst_cpu);
926 int perf_event_read_local(struct perf_event *event, u64 *value,
927 u64 *enabled, u64 *running);
928 extern u64 perf_event_read_value(struct perf_event *event,
929 u64 *enabled, u64 *running);
930
931
932 struct perf_sample_data {
933
934
935
936
937 u64 addr;
938 struct perf_raw_record *raw;
939 struct perf_branch_stack *br_stack;
940 u64 period;
941 u64 weight;
942 u64 txn;
943 union perf_mem_data_src data_src;
944
945
946
947
948
949 u64 type;
950 u64 ip;
951 struct {
952 u32 pid;
953 u32 tid;
954 } tid_entry;
955 u64 time;
956 u64 id;
957 u64 stream_id;
958 struct {
959 u32 cpu;
960 u32 reserved;
961 } cpu_entry;
962 struct perf_callchain_entry *callchain;
963
964
965
966
967
968 struct perf_regs regs_user;
969 struct pt_regs regs_user_copy;
970
971 struct perf_regs regs_intr;
972 u64 stack_user_size;
973
974 u64 phys_addr;
975 } ____cacheline_aligned;
976
977
978 #define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\
979 PERF_MEM_S(LVL, NA) |\
980 PERF_MEM_S(SNOOP, NA) |\
981 PERF_MEM_S(LOCK, NA) |\
982 PERF_MEM_S(TLB, NA))
983
984 static inline void perf_sample_data_init(struct perf_sample_data *data,
985 u64 addr, u64 period)
986 {
987
988 data->addr = addr;
989 data->raw = NULL;
990 data->br_stack = NULL;
991 data->period = period;
992 data->weight = 0;
993 data->data_src.val = PERF_MEM_NA;
994 data->txn = 0;
995 }
996
997 extern void perf_output_sample(struct perf_output_handle *handle,
998 struct perf_event_header *header,
999 struct perf_sample_data *data,
1000 struct perf_event *event);
1001 extern void perf_prepare_sample(struct perf_event_header *header,
1002 struct perf_sample_data *data,
1003 struct perf_event *event,
1004 struct pt_regs *regs);
1005
1006 extern int perf_event_overflow(struct perf_event *event,
1007 struct perf_sample_data *data,
1008 struct pt_regs *regs);
1009
1010 extern void perf_event_output_forward(struct perf_event *event,
1011 struct perf_sample_data *data,
1012 struct pt_regs *regs);
1013 extern void perf_event_output_backward(struct perf_event *event,
1014 struct perf_sample_data *data,
1015 struct pt_regs *regs);
1016 extern int perf_event_output(struct perf_event *event,
1017 struct perf_sample_data *data,
1018 struct pt_regs *regs);
1019
1020 static inline bool
1021 is_default_overflow_handler(struct perf_event *event)
1022 {
1023 if (likely(event->overflow_handler == perf_event_output_forward))
1024 return true;
1025 if (unlikely(event->overflow_handler == perf_event_output_backward))
1026 return true;
1027 return false;
1028 }
1029
1030 extern void
1031 perf_event_header__init_id(struct perf_event_header *header,
1032 struct perf_sample_data *data,
1033 struct perf_event *event);
1034 extern void
1035 perf_event__output_id_sample(struct perf_event *event,
1036 struct perf_output_handle *handle,
1037 struct perf_sample_data *sample);
1038
1039 extern void
1040 perf_log_lost_samples(struct perf_event *event, u64 lost);
1041
1042 static inline bool event_has_any_exclude_flag(struct perf_event *event)
1043 {
1044 struct perf_event_attr *attr = &event->attr;
1045
1046 return attr->exclude_idle || attr->exclude_user ||
1047 attr->exclude_kernel || attr->exclude_hv ||
1048 attr->exclude_guest || attr->exclude_host;
1049 }
1050
1051 static inline bool is_sampling_event(struct perf_event *event)
1052 {
1053 return event->attr.sample_period != 0;
1054 }
1055
1056
1057
1058
1059 static inline int is_software_event(struct perf_event *event)
1060 {
1061 return event->event_caps & PERF_EV_CAP_SOFTWARE;
1062 }
1063
1064
1065
1066
1067 static inline int in_software_context(struct perf_event *event)
1068 {
1069 return event->ctx->pmu->task_ctx_nr == perf_sw_context;
1070 }
1071
1072 static inline int is_exclusive_pmu(struct pmu *pmu)
1073 {
1074 return pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE;
1075 }
1076
1077 extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
1078
1079 extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
1080 extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
1081
1082 #ifndef perf_arch_fetch_caller_regs
1083 static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
1084 #endif
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100 static inline void perf_fetch_caller_regs(struct pt_regs *regs)
1101 {
1102 perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
1103 }
1104
1105 static __always_inline void
1106 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
1107 {
1108 if (static_key_false(&perf_swevent_enabled[event_id]))
1109 __perf_sw_event(event_id, nr, regs, addr);
1110 }
1111
1112 DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
1113
1114
1115
1116
1117
1118
1119 static __always_inline void
1120 perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
1121 {
1122 if (static_key_false(&perf_swevent_enabled[event_id])) {
1123 struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
1124
1125 perf_fetch_caller_regs(regs);
1126 ___perf_sw_event(event_id, nr, regs, addr);
1127 }
1128 }
1129
1130 extern struct static_key_false perf_sched_events;
1131
1132 static __always_inline bool
1133 perf_sw_migrate_enabled(void)
1134 {
1135 if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS]))
1136 return true;
1137 return false;
1138 }
1139
1140 static inline void perf_event_task_migrate(struct task_struct *task)
1141 {
1142 if (perf_sw_migrate_enabled())
1143 task->sched_migrated = 1;
1144 }
1145
1146 static inline void perf_event_task_sched_in(struct task_struct *prev,
1147 struct task_struct *task)
1148 {
1149 if (static_branch_unlikely(&perf_sched_events))
1150 __perf_event_task_sched_in(prev, task);
1151
1152 if (perf_sw_migrate_enabled() && task->sched_migrated) {
1153 struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
1154
1155 perf_fetch_caller_regs(regs);
1156 ___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0);
1157 task->sched_migrated = 0;
1158 }
1159 }
1160
1161 static inline void perf_event_task_sched_out(struct task_struct *prev,
1162 struct task_struct *next)
1163 {
1164 perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
1165
1166 if (static_branch_unlikely(&perf_sched_events))
1167 __perf_event_task_sched_out(prev, next);
1168 }
1169
1170 extern void perf_event_mmap(struct vm_area_struct *vma);
1171
1172 extern void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len,
1173 bool unregister, const char *sym);
1174 extern void perf_event_bpf_event(struct bpf_prog *prog,
1175 enum perf_bpf_event_type type,
1176 u16 flags);
1177
1178 extern struct perf_guest_info_callbacks *perf_guest_cbs;
1179 extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1180 extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1181
1182 extern void perf_event_exec(void);
1183 extern void perf_event_comm(struct task_struct *tsk, bool exec);
1184 extern void perf_event_namespaces(struct task_struct *tsk);
1185 extern void perf_event_fork(struct task_struct *tsk);
1186
1187
1188 DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
1189
1190 extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
1191 extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
1192 extern struct perf_callchain_entry *
1193 get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
1194 u32 max_stack, bool crosstask, bool add_mark);
1195 extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs);
1196 extern int get_callchain_buffers(int max_stack);
1197 extern void put_callchain_buffers(void);
1198
1199 extern int sysctl_perf_event_max_stack;
1200 extern int sysctl_perf_event_max_contexts_per_stack;
1201
1202 static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *ctx, u64 ip)
1203 {
1204 if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) {
1205 struct perf_callchain_entry *entry = ctx->entry;
1206 entry->ip[entry->nr++] = ip;
1207 ++ctx->contexts;
1208 return 0;
1209 } else {
1210 ctx->contexts_maxed = true;
1211 return -1;
1212 }
1213 }
1214
1215 static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip)
1216 {
1217 if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) {
1218 struct perf_callchain_entry *entry = ctx->entry;
1219 entry->ip[entry->nr++] = ip;
1220 ++ctx->nr;
1221 return 0;
1222 } else {
1223 return -1;
1224 }
1225 }
1226
1227 extern int sysctl_perf_event_paranoid;
1228 extern int sysctl_perf_event_mlock;
1229 extern int sysctl_perf_event_sample_rate;
1230 extern int sysctl_perf_cpu_time_max_percent;
1231
1232 extern void perf_sample_event_took(u64 sample_len_ns);
1233
1234 extern int perf_proc_update_handler(struct ctl_table *table, int write,
1235 void __user *buffer, size_t *lenp,
1236 loff_t *ppos);
1237 extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
1238 void __user *buffer, size_t *lenp,
1239 loff_t *ppos);
1240
1241 int perf_event_max_stack_handler(struct ctl_table *table, int write,
1242 void __user *buffer, size_t *lenp, loff_t *ppos);
1243
1244 static inline bool perf_paranoid_tracepoint_raw(void)
1245 {
1246 return sysctl_perf_event_paranoid > -1;
1247 }
1248
1249 static inline bool perf_paranoid_cpu(void)
1250 {
1251 return sysctl_perf_event_paranoid > 0;
1252 }
1253
1254 static inline bool perf_paranoid_kernel(void)
1255 {
1256 return sysctl_perf_event_paranoid > 1;
1257 }
1258
1259 extern void perf_event_init(void);
1260 extern void perf_tp_event(u16 event_type, u64 count, void *record,
1261 int entry_size, struct pt_regs *regs,
1262 struct hlist_head *head, int rctx,
1263 struct task_struct *task);
1264 extern void perf_bp_event(struct perf_event *event, void *data);
1265
1266 #ifndef perf_misc_flags
1267 # define perf_misc_flags(regs) \
1268 (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
1269 # define perf_instruction_pointer(regs) instruction_pointer(regs)
1270 #endif
1271 #ifndef perf_arch_bpf_user_pt_regs
1272 # define perf_arch_bpf_user_pt_regs(regs) regs
1273 #endif
1274
1275 static inline bool has_branch_stack(struct perf_event *event)
1276 {
1277 return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
1278 }
1279
1280 static inline bool needs_branch_stack(struct perf_event *event)
1281 {
1282 return event->attr.branch_sample_type != 0;
1283 }
1284
1285 static inline bool has_aux(struct perf_event *event)
1286 {
1287 return event->pmu->setup_aux;
1288 }
1289
1290 static inline bool is_write_backward(struct perf_event *event)
1291 {
1292 return !!event->attr.write_backward;
1293 }
1294
1295 static inline bool has_addr_filter(struct perf_event *event)
1296 {
1297 return event->pmu->nr_addr_filters;
1298 }
1299
1300
1301
1302
1303 static inline struct perf_addr_filters_head *
1304 perf_event_addr_filters(struct perf_event *event)
1305 {
1306 struct perf_addr_filters_head *ifh = &event->addr_filters;
1307
1308 if (event->parent)
1309 ifh = &event->parent->addr_filters;
1310
1311 return ifh;
1312 }
1313
1314 extern void perf_event_addr_filters_sync(struct perf_event *event);
1315
1316 extern int perf_output_begin(struct perf_output_handle *handle,
1317 struct perf_event *event, unsigned int size);
1318 extern int perf_output_begin_forward(struct perf_output_handle *handle,
1319 struct perf_event *event,
1320 unsigned int size);
1321 extern int perf_output_begin_backward(struct perf_output_handle *handle,
1322 struct perf_event *event,
1323 unsigned int size);
1324
1325 extern void perf_output_end(struct perf_output_handle *handle);
1326 extern unsigned int perf_output_copy(struct perf_output_handle *handle,
1327 const void *buf, unsigned int len);
1328 extern unsigned int perf_output_skip(struct perf_output_handle *handle,
1329 unsigned int len);
1330 extern int perf_swevent_get_recursion_context(void);
1331 extern void perf_swevent_put_recursion_context(int rctx);
1332 extern u64 perf_swevent_set_period(struct perf_event *event);
1333 extern void perf_event_enable(struct perf_event *event);
1334 extern void perf_event_disable(struct perf_event *event);
1335 extern void perf_event_disable_local(struct perf_event *event);
1336 extern void perf_event_disable_inatomic(struct perf_event *event);
1337 extern void perf_event_task_tick(void);
1338 extern int perf_event_account_interrupt(struct perf_event *event);
1339 #else
1340 static inline void *
1341 perf_aux_output_begin(struct perf_output_handle *handle,
1342 struct perf_event *event) { return NULL; }
1343 static inline void
1344 perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
1345 { }
1346 static inline int
1347 perf_aux_output_skip(struct perf_output_handle *handle,
1348 unsigned long size) { return -EINVAL; }
1349 static inline void *
1350 perf_get_aux(struct perf_output_handle *handle) { return NULL; }
1351 static inline void
1352 perf_event_task_migrate(struct task_struct *task) { }
1353 static inline void
1354 perf_event_task_sched_in(struct task_struct *prev,
1355 struct task_struct *task) { }
1356 static inline void
1357 perf_event_task_sched_out(struct task_struct *prev,
1358 struct task_struct *next) { }
1359 static inline int perf_event_init_task(struct task_struct *child) { return 0; }
1360 static inline void perf_event_exit_task(struct task_struct *child) { }
1361 static inline void perf_event_free_task(struct task_struct *task) { }
1362 static inline void perf_event_delayed_put(struct task_struct *task) { }
1363 static inline struct file *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); }
1364 static inline const struct perf_event *perf_get_event(struct file *file)
1365 {
1366 return ERR_PTR(-EINVAL);
1367 }
1368 static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
1369 {
1370 return ERR_PTR(-EINVAL);
1371 }
1372 static inline int perf_event_read_local(struct perf_event *event, u64 *value,
1373 u64 *enabled, u64 *running)
1374 {
1375 return -EINVAL;
1376 }
1377 static inline void perf_event_print_debug(void) { }
1378 static inline int perf_event_task_disable(void) { return -EINVAL; }
1379 static inline int perf_event_task_enable(void) { return -EINVAL; }
1380 static inline int perf_event_refresh(struct perf_event *event, int refresh)
1381 {
1382 return -EINVAL;
1383 }
1384
1385 static inline void
1386 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { }
1387 static inline void
1388 perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { }
1389 static inline void
1390 perf_bp_event(struct perf_event *event, void *data) { }
1391
1392 static inline int perf_register_guest_info_callbacks
1393 (struct perf_guest_info_callbacks *callbacks) { return 0; }
1394 static inline int perf_unregister_guest_info_callbacks
1395 (struct perf_guest_info_callbacks *callbacks) { return 0; }
1396
1397 static inline void perf_event_mmap(struct vm_area_struct *vma) { }
1398
1399 typedef int (perf_ksymbol_get_name_f)(char *name, int name_len, void *data);
1400 static inline void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len,
1401 bool unregister, const char *sym) { }
1402 static inline void perf_event_bpf_event(struct bpf_prog *prog,
1403 enum perf_bpf_event_type type,
1404 u16 flags) { }
1405 static inline void perf_event_exec(void) { }
1406 static inline void perf_event_comm(struct task_struct *tsk, bool exec) { }
1407 static inline void perf_event_namespaces(struct task_struct *tsk) { }
1408 static inline void perf_event_fork(struct task_struct *tsk) { }
1409 static inline void perf_event_init(void) { }
1410 static inline int perf_swevent_get_recursion_context(void) { return -1; }
1411 static inline void perf_swevent_put_recursion_context(int rctx) { }
1412 static inline u64 perf_swevent_set_period(struct perf_event *event) { return 0; }
1413 static inline void perf_event_enable(struct perf_event *event) { }
1414 static inline void perf_event_disable(struct perf_event *event) { }
1415 static inline int __perf_event_disable(void *info) { return -1; }
1416 static inline void perf_event_task_tick(void) { }
1417 static inline int perf_event_release_kernel(struct perf_event *event) { return 0; }
1418 #endif
1419
1420 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
1421 extern void perf_restore_debug_store(void);
1422 #else
1423 static inline void perf_restore_debug_store(void) { }
1424 #endif
1425
1426 static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag)
1427 {
1428 return frag->pad < sizeof(u64);
1429 }
1430
1431 #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
1432
1433 struct perf_pmu_events_attr {
1434 struct device_attribute attr;
1435 u64 id;
1436 const char *event_str;
1437 };
1438
1439 struct perf_pmu_events_ht_attr {
1440 struct device_attribute attr;
1441 u64 id;
1442 const char *event_str_ht;
1443 const char *event_str_noht;
1444 };
1445
1446 ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
1447 char *page);
1448
1449 #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
1450 static struct perf_pmu_events_attr _var = { \
1451 .attr = __ATTR(_name, 0444, _show, NULL), \
1452 .id = _id, \
1453 };
1454
1455 #define PMU_EVENT_ATTR_STRING(_name, _var, _str) \
1456 static struct perf_pmu_events_attr _var = { \
1457 .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
1458 .id = 0, \
1459 .event_str = _str, \
1460 };
1461
1462 #define PMU_FORMAT_ATTR(_name, _format) \
1463 static ssize_t \
1464 _name##_show(struct device *dev, \
1465 struct device_attribute *attr, \
1466 char *page) \
1467 { \
1468 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
1469 return sprintf(page, _format "\n"); \
1470 } \
1471 \
1472 static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
1473
1474
1475 #ifdef CONFIG_PERF_EVENTS
1476 int perf_event_init_cpu(unsigned int cpu);
1477 int perf_event_exit_cpu(unsigned int cpu);
1478 #else
1479 #define perf_event_init_cpu NULL
1480 #define perf_event_exit_cpu NULL
1481 #endif
1482
1483 #endif