Searched refs:event (Results 1 - 200 of 3953) sorted by relevance

1234567891011>>

/linux-4.4.14/tools/testing/selftests/powerpc/pmu/ebb/
H A Devent_attributes_test.c13 * Test various attributes of the EBB event are enforced.
17 struct event event, leader; event_attributes() local
21 event_init(&event, 0x1001e); event_attributes()
22 event_leader_ebb_init(&event); event_attributes()
24 FAIL_IF(event_open(&event)); event_attributes()
25 event_close(&event); event_attributes()
28 event_init(&event, 0x001e); /* CYCLES - no PMC specified */ event_attributes()
29 event_leader_ebb_init(&event); event_attributes()
31 FAIL_IF(event_open(&event) == 0); event_attributes()
34 event_init(&event, 0x2001e); event_attributes()
35 event_leader_ebb_init(&event); event_attributes()
36 event.attr.exclusive = 0; event_attributes()
38 FAIL_IF(event_open(&event) == 0); event_attributes()
41 event_init(&event, 0x3001e); event_attributes()
42 event_leader_ebb_init(&event); event_attributes()
43 event.attr.freq = 1; event_attributes()
45 FAIL_IF(event_open(&event) == 0); event_attributes()
48 event_init(&event, 0x4001e); event_attributes()
49 event_leader_ebb_init(&event); event_attributes()
50 event.attr.sample_period = 1; event_attributes()
52 FAIL_IF(event_open(&event) == 0); event_attributes()
55 event_init(&event, 0x1001e); event_attributes()
56 event_leader_ebb_init(&event); event_attributes()
57 event.attr.enable_on_exec = 1; event_attributes()
59 FAIL_IF(event_open(&event) == 0); event_attributes()
62 event_init(&event, 0x1001e); event_attributes()
63 event_leader_ebb_init(&event); event_attributes()
64 event.attr.inherit = 1; event_attributes()
66 FAIL_IF(event_open(&event) == 0); event_attributes()
73 event_init(&event, 0x20002); event_attributes()
74 event_ebb_init(&event); event_attributes()
77 FAIL_IF(event_open_with_group(&event, leader.fd)); event_attributes()
79 event_close(&event); event_attributes()
86 event_init(&event, 0x20002); event_attributes()
88 /* Expected to fail, event doesn't request EBB, leader does */ event_attributes()
89 FAIL_IF(event_open_with_group(&event, leader.fd) == 0); event_attributes()
100 event_init(&event, 0x20002); event_attributes()
101 event_ebb_init(&event); event_attributes()
104 FAIL_IF(event_open_with_group(&event, leader.fd) == 0); event_attributes()
121 event_init(&event, 0x1001e); event_attributes()
122 event_leader_ebb_init(&event); event_attributes()
123 /* Expected to fail, not a task event */ event_attributes()
125 FAIL_IF(event_open_with_cpu(&event, 0) == 0); event_attributes()
H A Dcpu_event_pinned_vs_ebb_test.c18 * Tests a pinned cpu event vs an EBB - in that order. The pinned cpu event
19 * should remain and the EBB event should fail to enable.
22 static int setup_cpu_event(struct event *event, int cpu) setup_cpu_event() argument
24 event_init_named(event, 0x400FA, "PM_RUN_INST_CMPL"); setup_cpu_event()
26 event->attr.pinned = 1; setup_cpu_event()
28 event->attr.exclude_kernel = 1; setup_cpu_event()
29 event->attr.exclude_hv = 1; setup_cpu_event()
30 event->attr.exclude_idle = 1; setup_cpu_event()
33 FAIL_IF(event_open_with_cpu(event, cpu)); setup_cpu_event()
34 FAIL_IF(event_enable(event)); setup_cpu_event()
42 struct event event; cpu_event_pinned_vs_ebb() local
61 /* We setup the cpu event first */ cpu_event_pinned_vs_ebb()
62 rc = setup_cpu_event(&event, cpu); cpu_event_pinned_vs_ebb()
68 /* Signal the child to install its EBB event and wait */ cpu_event_pinned_vs_ebb()
77 /* We expect it to fail to read the event */ cpu_event_pinned_vs_ebb()
80 FAIL_IF(event_disable(&event)); cpu_event_pinned_vs_ebb()
81 FAIL_IF(event_read(&event)); cpu_event_pinned_vs_ebb()
83 event_report(&event); cpu_event_pinned_vs_ebb()
85 /* The cpu event should have run */ cpu_event_pinned_vs_ebb()
86 FAIL_IF(event.result.value == 0); cpu_event_pinned_vs_ebb()
87 FAIL_IF(event.result.enabled != event.result.running); cpu_event_pinned_vs_ebb()
H A Dtask_event_pinned_vs_ebb_test.c18 * Tests a pinned per-task event vs an EBB - in that order. The pinned per-task
19 * event should prevent the EBB event from being enabled.
22 static int setup_child_event(struct event *event, pid_t child_pid) setup_child_event() argument
24 event_init_named(event, 0x400FA, "PM_RUN_INST_CMPL"); setup_child_event()
26 event->attr.pinned = 1; setup_child_event()
28 event->attr.exclude_kernel = 1; setup_child_event()
29 event->attr.exclude_hv = 1; setup_child_event()
30 event->attr.exclude_idle = 1; setup_child_event()
32 FAIL_IF(event_open_with_pid(event, child_pid)); setup_child_event()
33 FAIL_IF(event_enable(event)); setup_child_event()
41 struct event event; task_event_pinned_vs_ebb() local
56 /* We setup the task event first */ task_event_pinned_vs_ebb()
57 rc = setup_child_event(&event, pid); task_event_pinned_vs_ebb()
63 /* Signal the child to install its EBB event and wait */ task_event_pinned_vs_ebb()
72 /* We expect it to fail to read the event */ task_event_pinned_vs_ebb()
74 FAIL_IF(event_disable(&event)); task_event_pinned_vs_ebb()
75 FAIL_IF(event_read(&event)); task_event_pinned_vs_ebb()
77 event_report(&event); task_event_pinned_vs_ebb()
79 FAIL_IF(event.result.value == 0); task_event_pinned_vs_ebb()
84 FAIL_IF(event.result.enabled == 0); task_event_pinned_vs_ebb()
85 FAIL_IF(event.result.running == 0); task_event_pinned_vs_ebb()
H A Dcycles_test.c17 struct event event; cycles() local
21 event_init_named(&event, 0x1001e, "cycles"); cycles()
22 event_leader_ebb_init(&event); cycles()
24 event.attr.exclude_kernel = 1; cycles()
25 event.attr.exclude_hv = 1; cycles()
26 event.attr.exclude_idle = 1; cycles()
28 FAIL_IF(event_open(&event)); cycles()
33 FAIL_IF(ebb_event_enable(&event)); cycles()
49 event_close(&event); cycles()
H A Debb_vs_cpu_event_test.c18 * Tests an EBB vs a cpu event - in that order. The EBB should force the cpu
19 * event off the PMU.
22 static int setup_cpu_event(struct event *event, int cpu) setup_cpu_event() argument
24 event_init_named(event, 0x400FA, "PM_RUN_INST_CMPL"); setup_cpu_event()
26 event->attr.exclude_kernel = 1; setup_cpu_event()
27 event->attr.exclude_hv = 1; setup_cpu_event()
28 event->attr.exclude_idle = 1; setup_cpu_event()
31 FAIL_IF(event_open_with_cpu(event, cpu)); setup_cpu_event()
32 FAIL_IF(event_enable(event)); setup_cpu_event()
40 struct event event; ebb_vs_cpu_event() local
59 /* Signal the child to install its EBB event and wait */ ebb_vs_cpu_event()
62 /* Now try to install our CPU event */ ebb_vs_cpu_event()
63 rc = setup_cpu_event(&event, cpu); ebb_vs_cpu_event()
74 FAIL_IF(event_disable(&event)); ebb_vs_cpu_event()
75 FAIL_IF(event_read(&event)); ebb_vs_cpu_event()
77 event_report(&event); ebb_vs_cpu_event()
79 /* The cpu event may have run, but we don't expect 100% */ ebb_vs_cpu_event()
80 FAIL_IF(event.result.enabled >= event.result.running); ebb_vs_cpu_event()
H A Dtask_event_vs_ebb_test.c18 * Tests a per-task event vs an EBB - in that order. The EBB should push the
19 * per-task event off the PMU.
22 static int setup_child_event(struct event *event, pid_t child_pid) setup_child_event() argument
24 event_init_named(event, 0x400FA, "PM_RUN_INST_CMPL"); setup_child_event()
26 event->attr.exclude_kernel = 1; setup_child_event()
27 event->attr.exclude_hv = 1; setup_child_event()
28 event->attr.exclude_idle = 1; setup_child_event()
30 FAIL_IF(event_open_with_pid(event, child_pid)); setup_child_event()
31 FAIL_IF(event_enable(event)); setup_child_event()
39 struct event event; task_event_vs_ebb() local
54 /* We setup the task event first */ task_event_vs_ebb()
55 rc = setup_child_event(&event, pid); task_event_vs_ebb()
61 /* Signal the child to install its EBB event and wait */ task_event_vs_ebb()
70 /* The EBB event should push the task event off so the child should succeed */ task_event_vs_ebb()
72 FAIL_IF(event_disable(&event)); task_event_vs_ebb()
73 FAIL_IF(event_read(&event)); task_event_vs_ebb()
75 event_report(&event); task_event_vs_ebb()
77 /* The task event may have run, or not so we can't assert anything about it */ task_event_vs_ebb()
H A Dcpu_event_vs_ebb_test.c18 * Tests a cpu event vs an EBB - in that order. The EBB should force the cpu
19 * event off the PMU.
22 static int setup_cpu_event(struct event *event, int cpu) setup_cpu_event() argument
24 event_init_named(event, 0x400FA, "PM_RUN_INST_CMPL"); setup_cpu_event()
26 event->attr.exclude_kernel = 1; setup_cpu_event()
27 event->attr.exclude_hv = 1; setup_cpu_event()
28 event->attr.exclude_idle = 1; setup_cpu_event()
31 FAIL_IF(event_open_with_cpu(event, cpu)); setup_cpu_event()
32 FAIL_IF(event_enable(event)); setup_cpu_event()
40 struct event event; cpu_event_vs_ebb() local
59 /* We setup the cpu event first */ cpu_event_vs_ebb()
60 rc = setup_cpu_event(&event, cpu); cpu_event_vs_ebb()
66 /* Signal the child to install its EBB event and wait */ cpu_event_vs_ebb()
78 FAIL_IF(event_disable(&event)); cpu_event_vs_ebb()
79 FAIL_IF(event_read(&event)); cpu_event_vs_ebb()
81 event_report(&event); cpu_event_vs_ebb()
83 /* The cpu event may have run */ cpu_event_vs_ebb()
H A Dinstruction_count_test.c25 static int do_count_loop(struct event *event, uint64_t instructions, do_count_loop() argument
45 event->result.value = ebb_state.stats.pmc_count[4-1]; do_count_loop()
47 difference = event->result.value - expected; do_count_loop()
48 percentage = (double)difference / event->result.value * 100; do_count_loop()
53 printf("Actual %llu\n", event->result.value); do_count_loop()
63 if (difference / event->result.value) do_count_loop()
70 static uint64_t determine_overhead(struct event *event) determine_overhead() argument
75 do_count_loop(event, 0, 0, false); determine_overhead()
76 overhead = event->result.value; determine_overhead()
79 do_count_loop(event, 0, 0, false); determine_overhead()
80 current = event->result.value; determine_overhead()
111 struct event event; instruction_count() local
116 event_init_named(&event, 0x400FA, "PM_RUN_INST_CMPL"); instruction_count()
117 event_leader_ebb_init(&event); instruction_count()
118 event.attr.exclude_kernel = 1; instruction_count()
119 event.attr.exclude_hv = 1; instruction_count()
120 event.attr.exclude_idle = 1; instruction_count()
122 FAIL_IF(event_open(&event)); instruction_count()
123 FAIL_IF(ebb_event_enable(&event)); instruction_count()
131 overhead = determine_overhead(&event); instruction_count()
135 FAIL_IF(do_count_loop(&event, 0x100000, overhead, true)); instruction_count()
138 FAIL_IF(do_count_loop(&event, 0xa00000, overhead, true)); instruction_count()
141 FAIL_IF(do_count_loop(&event, 0x6400000, overhead, true)); instruction_count()
144 FAIL_IF(do_count_loop(&event, 0x40000000, overhead, true)); instruction_count()
147 FAIL_IF(do_count_loop(&event, 0x400000000, overhead, true)); instruction_count()
150 FAIL_IF(do_count_loop(&event, 0x1000000000, overhead, true)); instruction_count()
153 FAIL_IF(do_count_loop(&event, 0x2000000000, overhead, true)); instruction_count()
156 event_close(&event); instruction_count()
H A Dno_handler_test.c18 struct event event; no_handler_test() local
24 event_init_named(&event, 0x1001e, "cycles"); no_handler_test()
25 event_leader_ebb_init(&event); no_handler_test()
27 event.attr.exclude_kernel = 1; no_handler_test()
28 event.attr.exclude_hv = 1; no_handler_test()
29 event.attr.exclude_idle = 1; no_handler_test()
31 FAIL_IF(event_open(&event)); no_handler_test()
32 FAIL_IF(ebb_event_enable(&event)); no_handler_test()
41 /* Spin to make sure the event has time to overflow */ no_handler_test()
51 event_close(&event); no_handler_test()
H A Dpmc56_overflow_test.c50 struct event event; pmc56_overflow() local
55 event_init(&event, 0x2001e); pmc56_overflow()
56 event_leader_ebb_init(&event); pmc56_overflow()
58 event.attr.exclude_kernel = 1; pmc56_overflow()
59 event.attr.exclude_hv = 1; pmc56_overflow()
60 event.attr.exclude_idle = 1; pmc56_overflow()
62 FAIL_IF(event_open(&event)); pmc56_overflow()
67 FAIL_IF(ebb_event_enable(&event)); pmc56_overflow()
85 event_close(&event); pmc56_overflow()
H A Dclose_clears_pmcc_test.c15 * Test that closing the EBB event clears MMCR0_PMCC, preventing further access
21 struct event event; close_clears_pmcc() local
25 event_init_named(&event, 0x1001e, "cycles"); close_clears_pmcc()
26 event_leader_ebb_init(&event); close_clears_pmcc()
28 FAIL_IF(event_open(&event)); close_clears_pmcc()
33 FAIL_IF(ebb_event_enable(&event)); close_clears_pmcc()
41 event_close(&event); close_clears_pmcc()
46 * that we have closed the event. We expect that we will. */ close_clears_pmcc()
H A Debb_on_child_test.c19 * even though the event is enabled and running the child hasn't enabled the
30 /* Parent creates EBB event */ victim_child()
38 /* EBB event is enabled here */ victim_child()
47 struct event event; ebb_on_child() local
65 event_init_named(&event, 0x1001e, "cycles"); ebb_on_child()
66 event_leader_ebb_init(&event); ebb_on_child()
68 event.attr.exclude_kernel = 1; ebb_on_child()
69 event.attr.exclude_hv = 1; ebb_on_child()
70 event.attr.exclude_idle = 1; ebb_on_child()
72 FAIL_IF(event_open_with_pid(&event, pid)); ebb_on_child()
73 FAIL_IF(ebb_event_enable(&event)); ebb_on_child()
80 event_close(&event); ebb_on_child()
H A Debb_on_willing_child_test.c19 * EBBs, which are then delivered to the child, even though the event is
27 /* Setup our EBB handler, before the EBB event is created */ victim_child()
54 struct event event; ebb_on_willing_child() local
73 event_init_named(&event, 0x1001e, "cycles"); ebb_on_willing_child()
74 event_leader_ebb_init(&event); ebb_on_willing_child()
76 event.attr.exclude_kernel = 1; ebb_on_willing_child()
77 event.attr.exclude_hv = 1; ebb_on_willing_child()
78 event.attr.exclude_idle = 1; ebb_on_willing_child()
80 FAIL_IF(event_open_with_pid(&event, pid)); ebb_on_willing_child()
81 FAIL_IF(ebb_event_enable(&event)); ebb_on_willing_child()
86 event_close(&event); ebb_on_willing_child()
H A Dback_to_back_ebbs_test.c64 struct event event; back_to_back_ebbs() local
68 event_init_named(&event, 0x1001e, "cycles"); back_to_back_ebbs()
69 event_leader_ebb_init(&event); back_to_back_ebbs()
71 event.attr.exclude_kernel = 1; back_to_back_ebbs()
72 event.attr.exclude_hv = 1; back_to_back_ebbs()
73 event.attr.exclude_idle = 1; back_to_back_ebbs()
75 FAIL_IF(event_open(&event)); back_to_back_ebbs()
79 FAIL_IF(ebb_event_enable(&event)); back_to_back_ebbs()
98 event_close(&event); back_to_back_ebbs()
H A Dcycles_with_mmcr2_test.c24 struct event event; cycles_with_mmcr2() local
31 event_init_named(&event, 0x1001e, "cycles"); cycles_with_mmcr2()
32 event_leader_ebb_init(&event); cycles_with_mmcr2()
34 event.attr.exclude_kernel = 1; cycles_with_mmcr2()
35 event.attr.exclude_hv = 1; cycles_with_mmcr2()
36 event.attr.exclude_idle = 1; cycles_with_mmcr2()
38 FAIL_IF(event_open(&event)); cycles_with_mmcr2()
44 FAIL_IF(ebb_event_enable(&event)); cycles_with_mmcr2()
78 event_close(&event); cycles_with_mmcr2()
H A Dmulti_ebb_procs_test.c32 struct event event; cycles_child() local
39 event_init_named(&event, 0x1001e, "cycles"); cycles_child()
40 event_leader_ebb_init(&event); cycles_child()
42 event.attr.exclude_kernel = 1; cycles_child()
43 event.attr.exclude_hv = 1; cycles_child()
44 event.attr.exclude_idle = 1; cycles_child()
46 FAIL_IF(event_open(&event)); cycles_child()
52 FAIL_IF(ebb_event_enable(&event)); cycles_child()
68 event_close(&event); cycles_child()
H A Dpmae_handling_test.c59 struct event event; test_body() local
63 event_init_named(&event, 0x1001e, "cycles"); test_body()
64 event_leader_ebb_init(&event); test_body()
66 event.attr.exclude_kernel = 1; test_body()
67 event.attr.exclude_hv = 1; test_body()
68 event.attr.exclude_idle = 1; test_body()
70 FAIL_IF(event_open(&event)); test_body()
75 FAIL_IF(ebb_event_enable(&event)); test_body()
92 event_close(&event); test_body()
H A Dfork_cleanup_test.c25 static struct event event; variable in typeref:struct:event
36 /* We can still read from the event, though it is on our parent */ child()
37 FAIL_IF(event_read(&event)); child()
49 event_init_named(&event, 0x1001e, "cycles"); fork_cleanup()
50 event_leader_ebb_init(&event); fork_cleanup()
52 FAIL_IF(event_open(&event)); fork_cleanup()
58 FAIL_IF(ebb_event_enable(&event)); fork_cleanup()
73 event_close(&event); fork_cleanup()
H A Dcycles_with_freeze_test.c16 * hardware when the event overflows. We may take the EBB after we have set FC,
55 struct event event; cycles_with_freeze() local
61 event_init_named(&event, 0x1001e, "cycles"); cycles_with_freeze()
62 event_leader_ebb_init(&event); cycles_with_freeze()
64 event.attr.exclude_kernel = 1; cycles_with_freeze()
65 event.attr.exclude_hv = 1; cycles_with_freeze()
66 event.attr.exclude_idle = 1; cycles_with_freeze()
68 FAIL_IF(event_open(&event)); cycles_with_freeze()
72 FAIL_IF(ebb_event_enable(&event)); cycles_with_freeze()
108 event_close(&event); cycles_with_freeze()
H A Dreg.h29 #define SPRN_BESCR 806 /* Branch event status & control register */
30 #define SPRN_BESCRS 800 /* Branch event status & control set (1 bits set to 1) */
31 #define SPRN_BESCRSU 801 /* Branch event status & control set upper */
32 #define SPRN_BESCRR 802 /* Branch event status & control REset (1 bits set to 0) */
33 #define SPRN_BESCRRU 803 /* Branch event status & control REset upper */
H A Dlost_exception_test.c24 struct event event; test_body() local
29 event_init_named(&event, 0x40002, "instructions"); test_body()
30 event_leader_ebb_init(&event); test_body()
32 event.attr.exclude_kernel = 1; test_body()
33 event.attr.exclude_hv = 1; test_body()
34 event.attr.exclude_idle = 1; test_body()
36 FAIL_IF(event_open(&event)); test_body()
41 FAIL_IF(ebb_event_enable(&event)); test_body()
84 event_close(&event); test_body()
H A Debb.h9 #include "../event.h"
47 void event_leader_ebb_init(struct event *e);
48 void event_ebb_init(struct event *e);
49 void event_bhrb_init(struct event *e, unsigned ifm);
52 int ebb_event_enable(struct event *e);
58 void event_ebb_init(struct event *e);
59 void event_leader_ebb_init(struct event *e);
/linux-4.4.14/tools/testing/selftests/powerpc/pmu/
H A Dl3_bank_test.c9 #include "event.h"
19 struct event event; l3_bank_test() local
26 event_init(&event, 0x84918F); l3_bank_test()
28 FAIL_IF(event_open(&event)); l3_bank_test()
33 event_read(&event); l3_bank_test()
34 event_report(&event); l3_bank_test()
36 FAIL_IF(event.result.running == 0); l3_bank_test()
37 FAIL_IF(event.result.enabled == 0); l3_bank_test()
39 event_close(&event); l3_bank_test()
H A Devent.h15 struct event { struct
27 void event_init(struct event *e, u64 config);
28 void event_init_named(struct event *e, u64 config, char *name);
29 void event_init_opts(struct event *e, u64 config, int type, char *name);
30 int event_open_with_options(struct event *e, pid_t pid, int cpu, int group_fd);
31 int event_open_with_group(struct event *e, int group_fd);
32 int event_open_with_pid(struct event *e, pid_t pid);
33 int event_open_with_cpu(struct event *e, int cpu);
34 int event_open(struct event *e);
35 void event_close(struct event *e);
36 int event_enable(struct event *e);
37 int event_disable(struct event *e);
38 int event_reset(struct event *e);
39 int event_read(struct event *e);
40 void event_report_justified(struct event *e, int name_width, int result_width);
41 void event_report(struct event *e);
H A Devent.c13 #include "event.h"
23 void event_init_opts(struct event *e, u64 config, int type, char *name) event_init_opts()
37 void event_init_named(struct event *e, u64 config, char *name) event_init_named()
42 void event_init(struct event *e, u64 config) event_init()
44 event_init_opts(e, config, PERF_TYPE_RAW, "event"); event_init()
52 int event_open_with_options(struct event *e, pid_t pid, int cpu, int group_fd) event_open_with_options()
63 int event_open_with_group(struct event *e, int group_fd) event_open_with_group()
68 int event_open_with_pid(struct event *e, pid_t pid) event_open_with_pid()
73 int event_open_with_cpu(struct event *e, int cpu) event_open_with_cpu()
78 int event_open(struct event *e) event_open()
83 void event_close(struct event *e) event_close()
88 int event_enable(struct event *e) event_enable()
93 int event_disable(struct event *e) event_disable()
98 int event_reset(struct event *e) event_reset()
103 int event_read(struct event *e) event_read()
109 fprintf(stderr, "read error on event %p!\n", e); event_read()
116 void event_report_justified(struct event *e, int name_width, int result_width) event_report_justified()
128 void event_report(struct event *e) event_report()
H A Dper_event_excludes.c15 #include "event.h"
20 * Test that per-event excludes work.
25 struct event *e, events[4]; per_event_excludes()
66 * The open here will fail if we don't have per event exclude support, per_event_excludes()
67 * because the second event has an incompatible set of exclude settings per_event_excludes()
74 * Even though the above will fail without per-event excludes we keep per_event_excludes()
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/core/
H A Devent.c22 #include <core/event.h>
26 nvkm_event_put(struct nvkm_event *event, u32 types, int index) nvkm_event_put() argument
28 assert_spin_locked(&event->refs_lock); nvkm_event_put()
31 if (--event->refs[index * event->types_nr + type] == 0) { nvkm_event_put()
32 if (event->func->fini) nvkm_event_put()
33 event->func->fini(event, 1 << type, index); nvkm_event_put()
39 nvkm_event_get(struct nvkm_event *event, u32 types, int index) nvkm_event_get() argument
41 assert_spin_locked(&event->refs_lock); nvkm_event_get()
44 if (++event->refs[index * event->types_nr + type] == 1) { nvkm_event_get()
45 if (event->func->init) nvkm_event_get()
46 event->func->init(event, 1 << type, index); nvkm_event_get()
52 nvkm_event_send(struct nvkm_event *event, u32 types, int index, nvkm_event_send() argument
58 if (!event->refs || WARN_ON(index >= event->index_nr)) nvkm_event_send()
61 spin_lock_irqsave(&event->list_lock, flags); nvkm_event_send()
62 list_for_each_entry(notify, &event->list, head) { nvkm_event_send()
64 if (event->func->send) { nvkm_event_send()
65 event->func->send(data, size, notify); nvkm_event_send()
71 spin_unlock_irqrestore(&event->list_lock, flags); nvkm_event_send()
75 nvkm_event_fini(struct nvkm_event *event) nvkm_event_fini() argument
77 if (event->refs) { nvkm_event_fini()
78 kfree(event->refs); nvkm_event_fini()
79 event->refs = NULL; nvkm_event_fini()
85 struct nvkm_event *event) nvkm_event_init()
87 event->refs = kzalloc(sizeof(*event->refs) * index_nr * types_nr, nvkm_event_init()
89 if (!event->refs) nvkm_event_init()
92 event->func = func; nvkm_event_init()
93 event->types_nr = types_nr; nvkm_event_init()
94 event->index_nr = index_nr; nvkm_event_init()
95 spin_lock_init(&event->refs_lock); nvkm_event_init()
96 spin_lock_init(&event->list_lock); nvkm_event_init()
97 INIT_LIST_HEAD(&event->list); nvkm_event_init()
84 nvkm_event_init(const struct nvkm_event_func *func, int types_nr, int index_nr, struct nvkm_event *event) nvkm_event_init() argument
H A Dnotify.c25 #include <core/event.h>
31 nvkm_event_put(notify->event, notify->types, notify->index); nvkm_notify_put_locked()
37 struct nvkm_event *event = notify->event; nvkm_notify_put() local
39 if (likely(event) && nvkm_notify_put()
41 spin_lock_irqsave(&event->refs_lock, flags); nvkm_notify_put()
43 spin_unlock_irqrestore(&event->refs_lock, flags); nvkm_notify_put()
53 nvkm_event_get(notify->event, notify->types, notify->index); nvkm_notify_get_locked()
59 struct nvkm_event *event = notify->event; nvkm_notify_get() local
61 if (likely(event) && nvkm_notify_get()
63 spin_lock_irqsave(&event->refs_lock, flags); nvkm_notify_get()
65 spin_unlock_irqrestore(&event->refs_lock, flags); nvkm_notify_get()
72 struct nvkm_event *event = notify->event; nvkm_notify_func() local
77 spin_lock_irqsave(&event->refs_lock, flags); nvkm_notify_func()
79 spin_unlock_irqrestore(&event->refs_lock, flags); nvkm_notify_func()
93 struct nvkm_event *event = notify->event; nvkm_notify_send() local
96 assert_spin_locked(&event->list_lock); nvkm_notify_send()
99 spin_lock_irqsave(&event->refs_lock, flags); nvkm_notify_send()
101 spin_unlock_irqrestore(&event->refs_lock, flags); nvkm_notify_send()
105 spin_unlock_irqrestore(&event->refs_lock, flags); nvkm_notify_send()
121 if (notify->event) { nvkm_notify_fini()
123 spin_lock_irqsave(&notify->event->list_lock, flags); nvkm_notify_fini()
125 spin_unlock_irqrestore(&notify->event->list_lock, flags); nvkm_notify_fini()
127 notify->event = NULL; nvkm_notify_fini()
132 nvkm_notify_init(struct nvkm_object *object, struct nvkm_event *event, nvkm_notify_init() argument
139 if ((notify->event = event), event->refs) { nvkm_notify_init()
140 ret = event->func->ctor(object, data, size, notify); nvkm_notify_init()
155 spin_lock_irqsave(&event->list_lock, flags); nvkm_notify_init()
156 list_add_tail(&notify->head, &event->list); nvkm_notify_init()
157 spin_unlock_irqrestore(&event->list_lock, flags); nvkm_notify_init()
161 notify->event = NULL; nvkm_notify_init()
/linux-4.4.14/drivers/misc/ibmasm/
H A DMakefile6 event.o \
H A Devent.c31 * ASM service processor event handling routines.
52 * Store the event in the circular event buffer, wake up any sleeping
53 * event readers.
60 struct ibmasm_event *event; ibmasm_receive_event() local
66 /* copy the event into the next slot in the circular buffer */ ibmasm_receive_event()
67 event = &buffer->events[buffer->next_index]; ibmasm_receive_event()
68 memcpy_fromio(event->data, data, data_size); ibmasm_receive_event()
69 event->data_size = data_size; ibmasm_receive_event()
70 event->serial_number = buffer->next_serial_number; ibmasm_receive_event()
87 * Called by event readers (initiated from user space through the file
89 * Sleeps until a new event is available.
94 struct ibmasm_event *event; ibmasm_get_next_event() local
110 event = &buffer->events[index]; ibmasm_get_next_event()
111 while (event->serial_number < reader->next_serial_number) { ibmasm_get_next_event()
113 event = &buffer->events[index]; ibmasm_get_next_event()
115 memcpy(reader->data, event->data, event->data_size); ibmasm_get_next_event()
116 reader->data_size = event->data_size; ibmasm_get_next_event()
117 reader->next_serial_number = event->serial_number + 1; ibmasm_get_next_event()
121 return event->data_size; ibmasm_get_next_event()
153 struct ibmasm_event *event; ibmasm_event_buffer_init() local
163 event = buffer->events; ibmasm_event_buffer_init()
164 for (i=0; i<IBMASM_NUM_EVENTS; i++, event++) ibmasm_event_buffer_init()
165 event->serial_number = 0; ibmasm_event_buffer_init()
/linux-4.4.14/drivers/net/wireless/ti/wl12xx/
H A DMakefile1 wl12xx-objs = main.o cmd.o acx.o debugfs.o scan.o event.o
/linux-4.4.14/drivers/net/wireless/ti/wl18xx/
H A DMakefile1 wl18xx-objs = main.o acx.o tx.o io.o debugfs.o scan.o cmd.o event.o
/linux-4.4.14/net/irda/
H A Diriap_event.c34 static void state_s_disconnect (struct iriap_cb *self, IRIAP_EVENT event,
36 static void state_s_connecting (struct iriap_cb *self, IRIAP_EVENT event,
38 static void state_s_call (struct iriap_cb *self, IRIAP_EVENT event,
41 static void state_s_make_call (struct iriap_cb *self, IRIAP_EVENT event,
43 static void state_s_calling (struct iriap_cb *self, IRIAP_EVENT event,
45 static void state_s_outstanding (struct iriap_cb *self, IRIAP_EVENT event,
47 static void state_s_replying (struct iriap_cb *self, IRIAP_EVENT event,
49 static void state_s_wait_for_call(struct iriap_cb *self, IRIAP_EVENT event,
51 static void state_s_wait_active (struct iriap_cb *self, IRIAP_EVENT event,
54 static void state_r_disconnect (struct iriap_cb *self, IRIAP_EVENT event,
56 static void state_r_call (struct iriap_cb *self, IRIAP_EVENT event,
58 static void state_r_waiting (struct iriap_cb *self, IRIAP_EVENT event,
60 static void state_r_wait_active (struct iriap_cb *self, IRIAP_EVENT event,
62 static void state_r_receiving (struct iriap_cb *self, IRIAP_EVENT event,
64 static void state_r_execute (struct iriap_cb *self, IRIAP_EVENT event,
66 static void state_r_returning (struct iriap_cb *self, IRIAP_EVENT event,
69 static void (*iriap_state[])(struct iriap_cb *self, IRIAP_EVENT event,
128 void iriap_do_client_event(struct iriap_cb *self, IRIAP_EVENT event, iriap_do_client_event() argument
134 (*iriap_state[ self->client_state]) (self, event, skb); iriap_do_client_event()
137 void iriap_do_call_event(struct iriap_cb *self, IRIAP_EVENT event, iriap_do_call_event() argument
143 (*iriap_state[ self->call_state]) (self, event, skb); iriap_do_call_event()
146 void iriap_do_server_event(struct iriap_cb *self, IRIAP_EVENT event, iriap_do_server_event() argument
152 (*iriap_state[ self->server_state]) (self, event, skb); iriap_do_server_event()
155 void iriap_do_r_connect_event(struct iriap_cb *self, IRIAP_EVENT event, iriap_do_r_connect_event() argument
161 (*iriap_state[ self->r_connect_state]) (self, event, skb); iriap_do_r_connect_event()
166 * Function state_s_disconnect (event, skb)
171 static void state_s_disconnect(struct iriap_cb *self, IRIAP_EVENT event, state_s_disconnect() argument
177 switch (event) { state_s_disconnect()
190 pr_debug("%s(), Unknown event %d\n", __func__, event); state_s_disconnect()
196 * Function state_s_connecting (self, event, skb)
201 static void state_s_connecting(struct iriap_cb *self, IRIAP_EVENT event, state_s_connecting() argument
207 switch (event) { state_s_connecting()
222 pr_debug("%s(), Unknown event %d\n", __func__, event); state_s_connecting()
228 * Function state_s_call (self, event, skb)
232 * catches that event and clears up
234 static void state_s_call(struct iriap_cb *self, IRIAP_EVENT event, state_s_call() argument
239 switch (event) { state_s_call()
246 pr_debug("state_s_call: Unknown event %d\n", event); state_s_call()
252 * Function state_s_make_call (event, skb)
257 static void state_s_make_call(struct iriap_cb *self, IRIAP_EVENT event, state_s_make_call() argument
264 switch (event) { state_s_make_call()
274 pr_debug("%s(), Unknown event %d\n", __func__, event); state_s_make_call()
280 * Function state_s_calling (event, skb)
285 static void state_s_calling(struct iriap_cb *self, IRIAP_EVENT event, state_s_calling() argument
292 * Function state_s_outstanding (event, skb)
297 static void state_s_outstanding(struct iriap_cb *self, IRIAP_EVENT event, state_s_outstanding() argument
302 switch (event) { state_s_outstanding()
310 pr_debug("%s(), Unknown event %d\n", __func__, event); state_s_outstanding()
316 * Function state_s_replying (event, skb)
320 static void state_s_replying(struct iriap_cb *self, IRIAP_EVENT event, state_s_replying() argument
327 * Function state_s_wait_for_call (event, skb)
332 static void state_s_wait_for_call(struct iriap_cb *self, IRIAP_EVENT event, state_s_wait_for_call() argument
340 * Function state_s_wait_active (event, skb)
345 static void state_s_wait_active(struct iriap_cb *self, IRIAP_EVENT event, state_s_wait_active() argument
358 * Function state_r_disconnect (self, event, skb)
363 static void state_r_disconnect(struct iriap_cb *self, IRIAP_EVENT event, state_r_disconnect() argument
368 switch (event) { state_r_disconnect()
389 pr_debug("%s(), unknown event %d\n", __func__, event); state_r_disconnect()
395 * Function state_r_call (self, event, skb)
397 static void state_r_call(struct iriap_cb *self, IRIAP_EVENT event, state_r_call() argument
400 switch (event) { state_r_call()
407 pr_debug("%s(), unknown event!\n", __func__); state_r_call()
417 * Function state_r_waiting (self, event, skb)
419 static void state_r_waiting(struct iriap_cb *self, IRIAP_EVENT event, state_r_waiting() argument
425 static void state_r_wait_active(struct iriap_cb *self, IRIAP_EVENT event, state_r_wait_active() argument
432 * Function state_r_receiving (self, event, skb)
437 static void state_r_receiving(struct iriap_cb *self, IRIAP_EVENT event, state_r_receiving() argument
440 switch (event) { state_r_receiving()
447 pr_debug("%s(), unknown event!\n", __func__); state_r_receiving()
453 * Function state_r_execute (self, event, skb)
458 static void state_r_execute(struct iriap_cb *self, IRIAP_EVENT event, state_r_execute() argument
465 switch (event) { state_r_execute()
480 pr_debug("%s(), unknown event!\n", __func__); state_r_execute()
485 static void state_r_returning(struct iriap_cb *self, IRIAP_EVENT event, state_r_returning() argument
488 pr_debug("%s(), event=%d\n", __func__, event); state_r_returning()
490 switch (event) { state_r_returning()
/linux-4.4.14/arch/powerpc/perf/
H A Dcore-fsl-emb.c2 * Performance event support - Freescale Embedded Performance Monitor
27 struct perf_event *event[MAX_HWEVENTS]; member in struct:cpu_hw_events
179 static void fsl_emb_pmu_read(struct perf_event *event) fsl_emb_pmu_read() argument
183 if (event->hw.state & PERF_HES_STOPPED) fsl_emb_pmu_read()
192 prev = local64_read(&event->hw.prev_count); fsl_emb_pmu_read()
194 val = read_pmc(event->hw.idx); fsl_emb_pmu_read()
195 } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); fsl_emb_pmu_read()
199 local64_add(delta, &event->count); fsl_emb_pmu_read()
200 local64_sub(delta, &event->hw.period_left); fsl_emb_pmu_read()
272 struct perf_event *event; collect_events() local
280 list_for_each_entry(event, &group->sibling_list, group_entry) { collect_events()
281 if (!is_software_event(event) && collect_events()
282 event->state != PERF_EVENT_STATE_OFF) { collect_events()
285 ctrs[n] = event; collect_events()
293 static int fsl_emb_pmu_add(struct perf_event *event, int flags) fsl_emb_pmu_add() argument
301 perf_pmu_disable(event->pmu); fsl_emb_pmu_add()
304 if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) fsl_emb_pmu_add()
312 if (cpuhw->event[i]) fsl_emb_pmu_add()
321 event->hw.idx = i; fsl_emb_pmu_add()
322 cpuhw->event[i] = event; fsl_emb_pmu_add()
326 if (event->hw.sample_period) { fsl_emb_pmu_add()
327 s64 left = local64_read(&event->hw.period_left); fsl_emb_pmu_add()
331 local64_set(&event->hw.prev_count, val); fsl_emb_pmu_add()
334 event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE; fsl_emb_pmu_add()
337 event->hw.state &= ~(PERF_HES_STOPPED | PERF_HES_UPTODATE); fsl_emb_pmu_add()
341 perf_event_update_userpage(event); fsl_emb_pmu_add()
343 write_pmlcb(i, event->hw.config >> 32); fsl_emb_pmu_add()
344 write_pmlca(i, event->hw.config_base); fsl_emb_pmu_add()
349 perf_pmu_enable(event->pmu); fsl_emb_pmu_add()
354 static void fsl_emb_pmu_del(struct perf_event *event, int flags) fsl_emb_pmu_del() argument
357 int i = event->hw.idx; fsl_emb_pmu_del()
359 perf_pmu_disable(event->pmu); fsl_emb_pmu_del()
363 fsl_emb_pmu_read(event); fsl_emb_pmu_del()
367 WARN_ON(event != cpuhw->event[event->hw.idx]); fsl_emb_pmu_del()
373 cpuhw->event[i] = NULL; fsl_emb_pmu_del()
374 event->hw.idx = -1; fsl_emb_pmu_del()
377 * TODO: if at least one restricted event exists, and we fsl_emb_pmu_del()
380 * a non-restricted event, migrate that event to the fsl_emb_pmu_del()
387 perf_pmu_enable(event->pmu); fsl_emb_pmu_del()
391 static void fsl_emb_pmu_start(struct perf_event *event, int ef_flags) fsl_emb_pmu_start() argument
397 if (event->hw.idx < 0 || !event->hw.sample_period) fsl_emb_pmu_start()
400 if (!(event->hw.state & PERF_HES_STOPPED)) fsl_emb_pmu_start()
404 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); fsl_emb_pmu_start()
407 perf_pmu_disable(event->pmu); fsl_emb_pmu_start()
409 event->hw.state = 0; fsl_emb_pmu_start()
410 left = local64_read(&event->hw.period_left); fsl_emb_pmu_start()
414 write_pmc(event->hw.idx, val); fsl_emb_pmu_start()
416 perf_event_update_userpage(event); fsl_emb_pmu_start()
417 perf_pmu_enable(event->pmu); fsl_emb_pmu_start()
421 static void fsl_emb_pmu_stop(struct perf_event *event, int ef_flags) fsl_emb_pmu_stop() argument
425 if (event->hw.idx < 0 || !event->hw.sample_period) fsl_emb_pmu_stop()
428 if (event->hw.state & PERF_HES_STOPPED) fsl_emb_pmu_stop()
432 perf_pmu_disable(event->pmu); fsl_emb_pmu_stop()
434 fsl_emb_pmu_read(event); fsl_emb_pmu_stop()
435 event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; fsl_emb_pmu_stop()
436 write_pmc(event->hw.idx, 0); fsl_emb_pmu_stop()
438 perf_event_update_userpage(event); fsl_emb_pmu_stop()
439 perf_pmu_enable(event->pmu); fsl_emb_pmu_stop()
446 static void hw_perf_event_destroy(struct perf_event *event) hw_perf_event_destroy() argument
486 static int fsl_emb_pmu_event_init(struct perf_event *event) fsl_emb_pmu_event_init() argument
501 switch (event->attr.type) { fsl_emb_pmu_event_init()
503 ev = event->attr.config; fsl_emb_pmu_event_init()
510 err = hw_perf_cache_event(event->attr.config, &ev); fsl_emb_pmu_event_init()
516 ev = event->attr.config; fsl_emb_pmu_event_init()
523 event->hw.config = ppmu->xlate_event(ev); fsl_emb_pmu_event_init()
524 if (!(event->hw.config & FSL_EMB_EVENT_VALID)) fsl_emb_pmu_event_init()
529 * other hardware events in the group. We assume the event fsl_emb_pmu_event_init()
533 if (event->group_leader != event) { fsl_emb_pmu_event_init()
534 n = collect_events(event->group_leader, fsl_emb_pmu_event_init()
540 if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) { fsl_emb_pmu_event_init()
551 event->hw.idx = -1; fsl_emb_pmu_event_init()
553 event->hw.config_base = PMLCA_CE | PMLCA_FCM1 | fsl_emb_pmu_event_init()
556 if (event->attr.exclude_user) fsl_emb_pmu_event_init()
557 event->hw.config_base |= PMLCA_FCU; fsl_emb_pmu_event_init()
558 if (event->attr.exclude_kernel) fsl_emb_pmu_event_init()
559 event->hw.config_base |= PMLCA_FCS; fsl_emb_pmu_event_init()
560 if (event->attr.exclude_idle) fsl_emb_pmu_event_init()
563 event->hw.last_period = event->hw.sample_period; fsl_emb_pmu_event_init()
564 local64_set(&event->hw.period_left, event->hw.last_period); fsl_emb_pmu_event_init()
585 event->destroy = hw_perf_event_destroy; fsl_emb_pmu_event_init()
606 static void record_and_restart(struct perf_event *event, unsigned long val, record_and_restart() argument
609 u64 period = event->hw.sample_period; record_and_restart()
613 if (event->hw.state & PERF_HES_STOPPED) { record_and_restart()
614 write_pmc(event->hw.idx, 0); record_and_restart()
619 prev = local64_read(&event->hw.prev_count); record_and_restart()
621 local64_add(delta, &event->count); record_and_restart()
624 * See if the total period for this event has expired, record_and_restart()
628 left = local64_read(&event->hw.period_left) - delta; record_and_restart()
635 event->hw.last_period = event->hw.sample_period; record_and_restart()
641 write_pmc(event->hw.idx, val); record_and_restart()
642 local64_set(&event->hw.prev_count, val); record_and_restart()
643 local64_set(&event->hw.period_left, left); record_and_restart()
644 perf_event_update_userpage(event); record_and_restart()
652 perf_sample_data_init(&data, 0, event->hw.last_period); record_and_restart()
654 if (perf_event_overflow(event, &data, regs)) record_and_restart()
655 fsl_emb_pmu_stop(event, 0); record_and_restart()
663 struct perf_event *event; perf_event_interrupt() local
675 event = cpuhw->event[i]; perf_event_interrupt()
679 if (event) { perf_event_interrupt()
680 /* event has overflowed */ perf_event_interrupt()
682 record_and_restart(event, val, regs); perf_event_interrupt()
H A Dhv-gpci.c38 * used depends on the event. See REQUEST_IDX_KIND in hv-gpci-requests.h
161 * we verify offset and length are within the zeroed buffer at event single_gpci_request()
172 static u64 h_gpci_get_value(struct perf_event *event) h_gpci_get_value() argument
175 unsigned long ret = single_gpci_request(event_get_request(event), h_gpci_get_value()
176 event_get_starting_index(event), h_gpci_get_value()
177 event_get_secondary_index(event), h_gpci_get_value()
178 event_get_counter_info_version(event), h_gpci_get_value()
179 event_get_offset(event), h_gpci_get_value()
180 event_get_length(event), h_gpci_get_value()
187 static void h_gpci_event_update(struct perf_event *event) h_gpci_event_update() argument
190 u64 now = h_gpci_get_value(event); h_gpci_event_update()
191 prev = local64_xchg(&event->hw.prev_count, now); h_gpci_event_update()
192 local64_add(now - prev, &event->count); h_gpci_event_update()
195 static void h_gpci_event_start(struct perf_event *event, int flags) h_gpci_event_start() argument
197 local64_set(&event->hw.prev_count, h_gpci_get_value(event)); h_gpci_event_start()
200 static void h_gpci_event_stop(struct perf_event *event, int flags) h_gpci_event_stop() argument
202 h_gpci_event_update(event); h_gpci_event_stop()
205 static int h_gpci_event_add(struct perf_event *event, int flags) h_gpci_event_add() argument
208 h_gpci_event_start(event, flags); h_gpci_event_add()
213 static int h_gpci_event_init(struct perf_event *event) h_gpci_event_init() argument
218 /* Not our event */ h_gpci_event_init()
219 if (event->attr.type != event->pmu->type) h_gpci_event_init()
223 if (event->attr.config2) { h_gpci_event_init()
229 if (event->attr.exclude_user || h_gpci_event_init()
230 event->attr.exclude_kernel || h_gpci_event_init()
231 event->attr.exclude_hv || h_gpci_event_init()
232 event->attr.exclude_idle || h_gpci_event_init()
233 event->attr.exclude_host || h_gpci_event_init()
234 event->attr.exclude_guest) h_gpci_event_init()
238 if (has_branch_stack(event)) h_gpci_event_init()
241 length = event_get_length(event); h_gpci_event_init()
248 if ((event_get_offset(event) + length) > GPCI_MAX_DATA_BYTES) { h_gpci_event_init()
250 (size_t)event_get_offset(event) + length, h_gpci_event_init()
256 if (single_gpci_request(event_get_request(event), h_gpci_event_init()
257 event_get_starting_index(event), h_gpci_event_init()
258 event_get_secondary_index(event), h_gpci_event_init()
259 event_get_counter_info_version(event), h_gpci_event_init()
260 event_get_offset(event), h_gpci_event_init()
H A Dpower5+-pmu.c18 * Bits in event code for POWER5+ (POWER5 GS) and POWER5++ (POWER5 GS DD3)
25 #define PM_BYTE_SH 12 /* Byte number of event bus to use */
29 #define PM_BUSEVENT_MSK 0x80 /* Set if event uses event bus */
106 * 24-27: Byte 0 event source 0x0f00_0000
107 * Encoding as for the event code
110 * 20-23, 16-19, 12-15: Byte 1, 2, 3 event sources
136 static int power5p_get_constraint(u64 event, unsigned long *maskp, power5p_get_constraint() argument
143 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; power5p_get_constraint()
150 if (pmc >= 5 && !(event == 0x500009 || event == 0x600005)) power5p_get_constraint()
153 if (event & PM_BUSEVENT_MSK) { power5p_get_constraint()
154 unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; power5p_get_constraint()
161 byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; power5p_get_constraint()
170 bit = event & 7; power5p_get_constraint()
174 value |= (unsigned long)((event >> PM_GRS_SH) & fmask) power5p_get_constraint()
191 static int power5p_limited_pmc_event(u64 event) power5p_limited_pmc_event() argument
193 int pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; power5p_limited_pmc_event()
198 #define MAX_ALT 3 /* at most 3 alternatives for any event */
218 static int find_alternative(unsigned int event) find_alternative() argument
223 if (event < event_alternatives[i][0]) find_alternative()
226 if (event == event_alternatives[i][j]) find_alternative()
240 * Some direct events for decodes of event bus byte 3 have alternative
242 * event code for those that do, or -1 otherwise. This also handles
245 static s64 find_alternative_bdecode(u64 event) find_alternative_bdecode() argument
249 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; find_alternative_bdecode()
253 pp = event & PM_PMCSEL_MSK; find_alternative_bdecode()
256 return (event & ~(PM_PMC_MSKS | PM_PMCSEL_MSK)) | find_alternative_bdecode()
264 return event + (2 << PM_PMC_SH) + (0x2e - 0x0d); find_alternative_bdecode()
266 return event - (2 << PM_PMC_SH) - (0x2e - 0x0d); find_alternative_bdecode()
268 /* alternative add event encodings */ find_alternative_bdecode()
270 return ((event ^ (0x10 ^ 0x28)) & ~PM_PMC_MSKS) | find_alternative_bdecode()
276 static int power5p_get_alternatives(u64 event, unsigned int flags, u64 alt[]) power5p_get_alternatives() argument
282 alt[0] = event; power5p_get_alternatives()
284 nlim = power5p_limited_pmc_event(event); power5p_get_alternatives()
285 i = find_alternative(event); power5p_get_alternatives()
289 if (ae && ae != event) power5p_get_alternatives()
294 ae = find_alternative_bdecode(event); power5p_get_alternatives()
308 * we never end up with more than 3 alternatives for any event. power5p_get_alternatives()
360 * Indexed by PMCSEL value, bit i (LE) set if PMC i is a marked event.
402 * Returns 1 if event counts things relating to marked instructions
405 static int power5p_marked_instr_event(u64 event) power5p_marked_instr_event() argument
411 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; power5p_marked_instr_event()
412 psel = event & PM_PMCSEL_MSK; power5p_marked_instr_event()
436 if (!(event & PM_BUSEVENT_MSK) || bit == -1) power5p_marked_instr_event()
439 byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; power5p_marked_instr_event()
440 unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; power5p_marked_instr_event()
454 static int power5p_compute_mmcr(u64 event[], int n_ev, power5p_compute_mmcr() argument
474 pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; power5p_compute_mmcr()
482 if (event[i] & PM_BUSEVENT_MSK) { power5p_compute_mmcr()
483 unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; power5p_compute_mmcr()
484 byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; power5p_compute_mmcr()
552 pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; power5p_compute_mmcr()
553 unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; power5p_compute_mmcr()
554 byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; power5p_compute_mmcr()
555 psel = event[i] & PM_PMCSEL_MSK; power5p_compute_mmcr()
556 isbus = event[i] & PM_BUSEVENT_MSK; power5p_compute_mmcr()
558 /* Bus event or any-PMC direct event */ power5p_compute_mmcr()
567 /* Direct event */ power5p_compute_mmcr()
579 grsel = (event[i] >> PM_GRS_SH) & PM_GRS_MSK; power5p_compute_mmcr()
582 if (power5p_marked_instr_event(event[i])) power5p_compute_mmcr()
623 * are event codes.
H A Dhv-24x7.c203 * Otherwise return the address of the byte just following the event.
284 /* chip event data always yeilds a single event, core yeilds multiple */
287 static char *event_fmt(struct hv_24x7_event_data *event, unsigned domain) event_fmt() argument
303 be16_to_cpu(event->event_counter_offs) + event_fmt()
304 be16_to_cpu(event->event_group_record_offs), event_fmt()
384 struct hv_24x7_event_data *event, event_to_attr()
394 pr_warn("catalog event %u has invalid domain %u\n", event_to_attr()
399 val = event_fmt(event, domain); event_to_attr()
404 ev_name = event_name(event, &event_name_len); event_to_attr()
427 static struct attribute *event_to_desc_attr(struct hv_24x7_event_data *event, event_to_desc_attr() argument
431 char *name = event_name(event, &nl); event_to_desc_attr()
432 char *desc = event_desc(event, &dl); event_to_desc_attr()
442 event_to_long_desc_attr(struct hv_24x7_event_data *event, int nonce) event_to_long_desc_attr() argument
445 char *name = event_name(event, &nl); event_to_long_desc_attr()
446 char *desc = event_long_desc(event, &dl); event_to_long_desc_attr()
456 struct hv_24x7_event_data *event, int nonce) event_data_to_attrs()
460 switch (event->domain) { event_data_to_attrs()
462 *attrs = event_to_attr(ix, event, event->domain, nonce); event_data_to_attrs()
466 attrs[i] = event_to_attr(ix, event, core_domains[i], event_data_to_attrs()
469 pr_warn("catalog event %u: individual attr %u " event_data_to_attrs()
478 pr_warn("catalog event %u: domain %u is not allowed in the " event_data_to_attrs()
479 "catalog\n", ix, event->domain); event_data_to_attrs()
484 static size_t event_to_attr_ct(struct hv_24x7_event_data *event) event_to_attr_ct() argument
486 switch (event->domain) { event_to_attr_ct()
559 pr_info("found a duplicate event %.*s, ct=%u\n", nl, event_uniq_add()
598 * ensure the event structure's sizes are self consistent and don't cause us to
599 * read outside of the event
601 * On success, return the event length in bytes.
604 static ssize_t catalog_event_len_validate(struct hv_24x7_event_data *event, catalog_event_len_validate() argument
617 pr_devel("catalog event data has %zu bytes of padding after last event\n", catalog_event_len_validate()
622 if (!event_fixed_portion_is_within(event, end)) { catalog_event_len_validate()
623 pr_warn("event %zu fixed portion is not within range\n", catalog_event_len_validate()
628 ev_len = be16_to_cpu(event->length); catalog_event_len_validate()
631 pr_info("event %zu has length %zu not divisible by 16: event=%pK\n", catalog_event_len_validate()
632 event_idx, ev_len, event); catalog_event_len_validate()
634 ev_end = (__u8 *)event + ev_len; catalog_event_len_validate()
636 pr_warn("event %zu has .length=%zu, ends after buffer end: ev_end=%pK > end=%pK, offset=%zu\n", catalog_event_len_validate()
642 calc_ev_end = event_end(event, end); catalog_event_len_validate()
644 pr_warn("event %zu has a calculated length which exceeds buffer length %zu: event=%pK end=%pK, offset=%zu\n", catalog_event_len_validate()
645 event_idx, event_data_bytes, event, end, catalog_event_len_validate()
651 pr_warn("event %zu exceeds it's own length: event=%pK, end=%pK, offset=%zu, calc_ev_end=%pK\n", catalog_event_len_validate()
652 event_idx, event, ev_end, offset, calc_ev_end); catalog_event_len_validate()
677 struct hv_24x7_event_data *event; create_events_from_catalog() local
714 pr_err("invalid event data offs %zu and/or len %zu\n", create_events_from_catalog()
721 pr_err("event data %zu-%zu does not fit inside catalog 0-%zu\n", create_events_from_catalog()
739 * event data can span several pages, events can cross between these create_events_from_catalog()
744 pr_err("could not allocate event data\n"); create_events_from_catalog()
763 pr_err("failed to get event data in page %zu\n", create_events_from_catalog()
774 for (junk_events = 0, event = event_data, event_idx = 0, attr_max = 0; create_events_from_catalog()
776 event_idx++, event = (void *)event + ev_len) { create_events_from_catalog()
777 size_t offset = (void *)event - (void *)event_data; create_events_from_catalog()
781 ev_len = catalog_event_len_validate(event, event_idx, create_events_from_catalog()
788 name = event_name(event, &nl); create_events_from_catalog()
790 if (event->event_group_record_len == 0) { create_events_from_catalog()
791 pr_devel("invalid event %zu (%.*s): group_record_len == 0, skipping\n", create_events_from_catalog()
797 if (!catalog_entry_domain_is_valid(event->domain)) { create_events_from_catalog()
798 pr_info("event %zu (%.*s) has invalid domain %d\n", create_events_from_catalog()
799 event_idx, nl, name, event->domain); create_events_from_catalog()
804 attr_max += event_to_attr_ct(event); create_events_from_catalog()
809 pr_warn("event buffer ended before listed # of events were parsed (got %zu, wanted %zu, junk %zu)\n", create_events_from_catalog()
834 event = event_data, event_idx = 0; create_events_from_catalog()
836 event_idx++, ev_len = be16_to_cpu(event->length), create_events_from_catalog()
837 event = (void *)event + ev_len) { create_events_from_catalog()
845 if (event->event_group_record_len == 0) create_events_from_catalog()
847 if (!catalog_entry_domain_is_valid(event->domain)) create_events_from_catalog()
850 name = event_name(event, &nl); create_events_from_catalog()
851 nonce = event_uniq_add(&ev_uniq, name, nl, event->domain); create_events_from_catalog()
853 event, nonce); create_events_from_catalog()
855 pr_warn("event %zu (%.*s) creation failure, skipping\n", create_events_from_catalog()
860 event_descs[desc_ct] = event_to_desc_attr(event, nonce); create_events_from_catalog()
864 event_to_long_desc_attr(event, nonce); create_events_from_catalog()
870 pr_info("read %zu catalog entries, created %zu event attrs (%zu failures), %zu descs\n", create_events_from_catalog()
1073 * Add the given @event to the next slot in the 24x7 request_buffer.
1079 static int add_event_to_24x7_request(struct perf_event *event, add_event_to_24x7_request() argument
1092 if (is_physical_domain(event_get_domain(event))) add_event_to_24x7_request()
1093 idx = event_get_core(event); add_event_to_24x7_request()
1095 idx = event_get_vcpu(event); add_event_to_24x7_request()
1100 req->performance_domain = event_get_domain(event); add_event_to_24x7_request()
1102 req->data_offset = cpu_to_be32(event_get_offset(event)); add_event_to_24x7_request()
1103 req->starting_lpar_ix = cpu_to_be16(event_get_lpar(event)), add_event_to_24x7_request()
1111 static unsigned long single_24x7_request(struct perf_event *event, u64 *count) single_24x7_request() argument
1125 ret = add_event_to_24x7_request(event, request_buffer); single_24x7_request()
1145 static int h_24x7_event_init(struct perf_event *event) h_24x7_event_init() argument
1152 /* Not our event */ h_24x7_event_init()
1153 if (event->attr.type != event->pmu->type) h_24x7_event_init()
1157 if (event_get_reserved1(event) || h_24x7_event_init()
1158 event_get_reserved2(event) || h_24x7_event_init()
1159 event_get_reserved3(event)) { h_24x7_event_init()
1161 event->attr.config, h_24x7_event_init()
1162 event_get_reserved1(event), h_24x7_event_init()
1163 event->attr.config1, h_24x7_event_init()
1164 event_get_reserved2(event), h_24x7_event_init()
1165 event->attr.config2, h_24x7_event_init()
1166 event_get_reserved3(event)); h_24x7_event_init()
1171 if (event->attr.exclude_user || h_24x7_event_init()
1172 event->attr.exclude_kernel || h_24x7_event_init()
1173 event->attr.exclude_hv || h_24x7_event_init()
1174 event->attr.exclude_idle || h_24x7_event_init()
1175 event->attr.exclude_host || h_24x7_event_init()
1176 event->attr.exclude_guest) h_24x7_event_init()
1180 if (has_branch_stack(event)) h_24x7_event_init()
1184 if (event_get_offset(event) % 8) { h_24x7_event_init()
1190 domain = event_get_domain(event); h_24x7_event_init()
1204 (event_get_lpar(event) != event_get_lpar_max()))) { h_24x7_event_init()
1207 event_get_lpar(event)); h_24x7_event_init()
1211 /* see if the event complains */ h_24x7_event_init()
1212 if (single_24x7_request(event, &ct)) { h_24x7_event_init()
1220 static u64 h_24x7_get_value(struct perf_event *event) h_24x7_get_value() argument
1224 ret = single_24x7_request(event, &ct); h_24x7_get_value()
1226 /* We checked this in event init, shouldn't fail here... */ h_24x7_get_value()
1232 static void update_event_count(struct perf_event *event, u64 now) update_event_count() argument
1236 prev = local64_xchg(&event->hw.prev_count, now); update_event_count()
1237 local64_add(now - prev, &event->count); update_event_count()
1240 static void h_24x7_event_read(struct perf_event *event) h_24x7_event_read() argument
1265 ret = add_event_to_24x7_request(event, request_buffer); h_24x7_event_read()
1270 * Assoicate the event with the HCALL request index, h_24x7_event_read()
1276 h24x7hw->events[i] = event; h_24x7_event_read()
1282 now = h_24x7_get_value(event); h_24x7_event_read()
1283 update_event_count(event, now); h_24x7_event_read()
1287 static void h_24x7_event_start(struct perf_event *event, int flags) h_24x7_event_start() argument
1290 local64_set(&event->hw.prev_count, h_24x7_get_value(event)); h_24x7_event_start()
1293 static void h_24x7_event_stop(struct perf_event *event, int flags) h_24x7_event_stop() argument
1295 h_24x7_event_read(event); h_24x7_event_stop()
1298 static int h_24x7_event_add(struct perf_event *event, int flags) h_24x7_event_add() argument
1301 h_24x7_event_start(event, flags); h_24x7_event_add()
1352 * the event counts.
1359 struct perf_event *event; h_24x7_event_commit_txn() local
1386 /* Update event counts from hcall */ h_24x7_event_commit_txn()
1390 event = h24x7hw->events[i]; h_24x7_event_commit_txn()
1392 update_event_count(event, count); h_24x7_event_commit_txn()
383 event_to_attr(unsigned ix, struct hv_24x7_event_data *event, unsigned domain, int nonce) event_to_attr() argument
455 event_data_to_attrs(unsigned ix, struct attribute **attrs, struct hv_24x7_event_data *event, int nonce) event_data_to_attrs() argument
H A Dcore-book3s.c2 * Performance event support - powerpc architecture code
36 struct perf_event *event[MAX_HWEVENTS]; member in struct:cpu_hw_events
116 static bool is_ebb_event(struct perf_event *event) { return false; } ebb_event_check() argument
117 static int ebb_event_check(struct perf_event *event) { return 0; } ebb_event_add() argument
118 static void ebb_event_add(struct perf_event *event) { } ebb_switch_out() argument
125 static inline void power_pmu_bhrb_enable(struct perf_event *event) {} power_pmu_bhrb_disable() argument
126 static inline void power_pmu_bhrb_disable(struct perf_event *event) {} power_pmu_sched_task() argument
280 * If this isn't a PMU exception (eg a software event) the SIAR is perf_read_regs()
283 * If it is a marked event use the SIAR. perf_read_regs()
349 static void power_pmu_bhrb_enable(struct perf_event *event) power_pmu_bhrb_enable() argument
357 if (event->ctx->task && cpuhw->bhrb_context != event->ctx) { power_pmu_bhrb_enable()
359 cpuhw->bhrb_context = event->ctx; power_pmu_bhrb_enable()
362 perf_sched_cb_inc(event->ctx->pmu); power_pmu_bhrb_enable()
365 static void power_pmu_bhrb_disable(struct perf_event *event) power_pmu_bhrb_disable() argument
374 perf_sched_cb_dec(event->ctx->pmu); power_pmu_bhrb_disable()
500 static bool is_ebb_event(struct perf_event *event) is_ebb_event() argument
505 * use bit 63 of the event code for something else if they wish. is_ebb_event()
508 ((event->attr.config >> PERF_EVENT_CONFIG_EBB_SHIFT) & 1); is_ebb_event()
511 static int ebb_event_check(struct perf_event *event) ebb_event_check() argument
513 struct perf_event *leader = event->group_leader; ebb_event_check()
516 if (is_ebb_event(leader) != is_ebb_event(event)) ebb_event_check()
519 if (is_ebb_event(event)) { ebb_event_check()
520 if (!(event->attach_state & PERF_ATTACH_TASK)) ebb_event_check()
526 if (event->attr.freq || ebb_event_check()
527 event->attr.inherit || ebb_event_check()
528 event->attr.sample_type || ebb_event_check()
529 event->attr.sample_period || ebb_event_check()
530 event->attr.enable_on_exec) ebb_event_check()
537 static void ebb_event_add(struct perf_event *event) ebb_event_add() argument
539 if (!is_ebb_event(event) || current->thread.used_ebb) ebb_event_add()
543 * IFF this is the first time we've added an EBB event, set ebb_event_add()
945 struct perf_event *event; check_excludes() local
948 * If the PMU we're on supports per event exclude settings then we check_excludes()
950 * per event exclude and limited PMCs. check_excludes()
965 event = ctrs[i]; check_excludes()
967 eu = event->attr.exclude_user; check_excludes()
968 ek = event->attr.exclude_kernel; check_excludes()
969 eh = event->attr.exclude_hv; check_excludes()
971 } else if (event->attr.exclude_user != eu || check_excludes()
972 event->attr.exclude_kernel != ek || check_excludes()
973 event->attr.exclude_hv != eh) { check_excludes()
1005 static void power_pmu_read(struct perf_event *event) power_pmu_read() argument
1009 if (event->hw.state & PERF_HES_STOPPED) power_pmu_read()
1012 if (!event->hw.idx) power_pmu_read()
1015 if (is_ebb_event(event)) { power_pmu_read()
1016 val = read_pmc(event->hw.idx); power_pmu_read()
1017 local64_set(&event->hw.prev_count, val); power_pmu_read()
1027 prev = local64_read(&event->hw.prev_count); power_pmu_read()
1029 val = read_pmc(event->hw.idx); power_pmu_read()
1033 } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); power_pmu_read()
1035 local64_add(delta, &event->count); power_pmu_read()
1047 prev = local64_read(&event->hw.period_left); power_pmu_read()
1051 } while (local64_cmpxchg(&event->hw.period_left, prev, val) != prev); power_pmu_read()
1057 * us if `event' is using such a PMC.
1068 struct perf_event *event; freeze_limited_counters() local
1073 event = cpuhw->limited_counter[i]; freeze_limited_counters()
1074 if (!event->hw.idx) freeze_limited_counters()
1076 val = (event->hw.idx == 5) ? pmc5 : pmc6; freeze_limited_counters()
1077 prev = local64_read(&event->hw.prev_count); freeze_limited_counters()
1078 event->hw.idx = 0; freeze_limited_counters()
1081 local64_add(delta, &event->count); freeze_limited_counters()
1088 struct perf_event *event; thaw_limited_counters() local
1093 event = cpuhw->limited_counter[i]; thaw_limited_counters()
1094 event->hw.idx = cpuhw->limited_hwidx[i]; thaw_limited_counters()
1095 val = (event->hw.idx == 5) ? pmc5 : pmc6; thaw_limited_counters()
1096 prev = local64_read(&event->hw.prev_count); thaw_limited_counters()
1098 local64_set(&event->hw.prev_count, val); thaw_limited_counters()
1099 perf_event_update_userpage(event); thaw_limited_counters()
1127 * events, we first write MMCR0 with the event overflow write_mmcr0()
1142 * Write the full MMCR0 including the event overflow interrupt write_mmcr0()
1213 struct perf_event *event; power_pmu_enable() local
1241 * flag set, or not set, so we can just check a single event. Also we power_pmu_enable()
1242 * know we have at least one event. power_pmu_enable()
1244 ebb = is_ebb_event(cpuhw->event[0]); power_pmu_enable()
1264 cpuhw->mmcr, cpuhw->event)) { power_pmu_enable()
1273 * bits for the first event. We have already checked that all power_pmu_enable()
1274 * events have the same value for these bits as the first event. power_pmu_enable()
1276 event = cpuhw->event[0]; power_pmu_enable()
1277 if (event->attr.exclude_user) power_pmu_enable()
1279 if (event->attr.exclude_kernel) power_pmu_enable()
1281 if (event->attr.exclude_hv) power_pmu_enable()
1303 event = cpuhw->event[i]; power_pmu_enable()
1304 if (event->hw.idx && event->hw.idx != hwc_index[i] + 1) { power_pmu_enable()
1305 power_pmu_read(event); power_pmu_enable()
1306 write_pmc(event->hw.idx, 0); power_pmu_enable()
1307 event->hw.idx = 0; power_pmu_enable()
1316 event = cpuhw->event[i]; power_pmu_enable()
1317 if (event->hw.idx) power_pmu_enable()
1321 cpuhw->limited_counter[n_lim] = event; power_pmu_enable()
1328 val = local64_read(&event->hw.prev_count); power_pmu_enable()
1331 if (event->hw.sample_period) { power_pmu_enable()
1332 left = local64_read(&event->hw.period_left); power_pmu_enable()
1336 local64_set(&event->hw.prev_count, val); power_pmu_enable()
1339 event->hw.idx = idx; power_pmu_enable()
1340 if (event->hw.state & PERF_HES_STOPPED) power_pmu_enable()
1344 perf_event_update_userpage(event); power_pmu_enable()
1378 struct perf_event *event; collect_events() local
1387 list_for_each_entry(event, &group->sibling_list, group_entry) { collect_events()
1388 if (!is_software_event(event) && collect_events()
1389 event->state != PERF_EVENT_STATE_OFF) { collect_events()
1392 ctrs[n] = event; collect_events()
1393 flags[n] = event->hw.event_base; collect_events()
1394 events[n++] = event->hw.config; collect_events()
1401 * Add a event to the PMU.
1406 static int power_pmu_add(struct perf_event *event, int ef_flags) power_pmu_add() argument
1414 perf_pmu_disable(event->pmu); power_pmu_add()
1417 * Add the event to the list (if there is room) power_pmu_add()
1424 cpuhw->event[n0] = event; power_pmu_add()
1425 cpuhw->events[n0] = event->hw.config; power_pmu_add()
1426 cpuhw->flags[n0] = event->hw.event_base; power_pmu_add()
1429 * This event may have been disabled/stopped in record_and_restart() power_pmu_add()
1430 * because we exceeded the ->event_limit. If re-starting the event, power_pmu_add()
1435 event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE; power_pmu_add()
1437 event->hw.state = 0; power_pmu_add()
1447 if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1)) power_pmu_add()
1451 event->hw.config = cpuhw->events[n0]; power_pmu_add()
1454 ebb_event_add(event); power_pmu_add()
1461 if (has_branch_stack(event)) { power_pmu_add()
1462 power_pmu_bhrb_enable(event); power_pmu_add()
1464 event->attr.branch_sample_type); power_pmu_add()
1467 perf_pmu_enable(event->pmu); power_pmu_add()
1473 * Remove a event from the PMU.
1475 static void power_pmu_del(struct perf_event *event, int ef_flags) power_pmu_del() argument
1482 perf_pmu_disable(event->pmu); power_pmu_del()
1484 power_pmu_read(event); power_pmu_del()
1488 if (event == cpuhw->event[i]) { power_pmu_del()
1490 cpuhw->event[i-1] = cpuhw->event[i]; power_pmu_del()
1495 ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr); power_pmu_del()
1496 if (event->hw.idx) { power_pmu_del()
1497 write_pmc(event->hw.idx, 0); power_pmu_del()
1498 event->hw.idx = 0; power_pmu_del()
1500 perf_event_update_userpage(event); power_pmu_del()
1505 if (event == cpuhw->limited_counter[i]) power_pmu_del()
1519 if (has_branch_stack(event)) power_pmu_del()
1520 power_pmu_bhrb_disable(event); power_pmu_del()
1522 perf_pmu_enable(event->pmu); power_pmu_del()
1531 static void power_pmu_start(struct perf_event *event, int ef_flags) power_pmu_start() argument
1537 if (!event->hw.idx || !event->hw.sample_period) power_pmu_start()
1540 if (!(event->hw.state & PERF_HES_STOPPED)) power_pmu_start()
1544 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); power_pmu_start()
1547 perf_pmu_disable(event->pmu); power_pmu_start()
1549 event->hw.state = 0; power_pmu_start()
1550 left = local64_read(&event->hw.period_left); power_pmu_start()
1556 write_pmc(event->hw.idx, val); power_pmu_start()
1558 perf_event_update_userpage(event); power_pmu_start()
1559 perf_pmu_enable(event->pmu); power_pmu_start()
1563 static void power_pmu_stop(struct perf_event *event, int ef_flags) power_pmu_stop() argument
1567 if (!event->hw.idx || !event->hw.sample_period) power_pmu_stop()
1570 if (event->hw.state & PERF_HES_STOPPED) power_pmu_stop()
1574 perf_pmu_disable(event->pmu); power_pmu_stop()
1576 power_pmu_read(event); power_pmu_stop()
1577 event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; power_pmu_stop()
1578 write_pmc(event->hw.idx, 0); power_pmu_stop()
1580 perf_event_update_userpage(event); power_pmu_stop()
1581 perf_pmu_enable(event->pmu); power_pmu_stop()
1650 if (check_excludes(cpuhw->event, cpuhw->flags, 0, n)) power_pmu_commit_txn()
1657 cpuhw->event[i]->hw.config = cpuhw->events[i]; power_pmu_commit_txn()
1665 * Return 1 if we might be able to put event on a limited PMC,
1667 * A event can only go on a limited PMC if it counts something
1671 static int can_go_on_limited_pmc(struct perf_event *event, u64 ev, can_go_on_limited_pmc() argument
1677 if (event->attr.exclude_user can_go_on_limited_pmc()
1678 || event->attr.exclude_kernel can_go_on_limited_pmc()
1679 || event->attr.exclude_hv can_go_on_limited_pmc()
1680 || event->attr.sample_period) can_go_on_limited_pmc()
1724 static void hw_perf_event_destroy(struct perf_event *event) hw_perf_event_destroy() argument
1764 static int power_pmu_event_init(struct perf_event *event) power_pmu_event_init() argument
1778 if (has_branch_stack(event)) { power_pmu_event_init()
1784 switch (event->attr.type) { power_pmu_event_init()
1786 ev = event->attr.config; power_pmu_event_init()
1792 err = hw_perf_cache_event(event->attr.config, &ev); power_pmu_event_init()
1797 ev = event->attr.config; power_pmu_event_init()
1803 event->hw.config_base = ev; power_pmu_event_init()
1804 event->hw.idx = 0; power_pmu_event_init()
1812 event->attr.exclude_hv = 0; power_pmu_event_init()
1815 * If this is a per-task event, then we can use power_pmu_event_init()
1821 if (event->attach_state & PERF_ATTACH_TASK) power_pmu_event_init()
1826 * event_id could go on a limited event. power_pmu_event_init()
1829 if (can_go_on_limited_pmc(event, ev, flags)) { power_pmu_event_init()
1844 err = ebb_event_check(event); power_pmu_event_init()
1850 * other hardware events in the group. We assume the event power_pmu_event_init()
1854 if (event->group_leader != event) { power_pmu_event_init()
1855 n = collect_events(event->group_leader, ppmu->n_counter - 1, power_pmu_event_init()
1861 ctrs[n] = event; power_pmu_event_init()
1869 if (has_branch_stack(event)) { power_pmu_event_init()
1871 event->attr.branch_sample_type); power_pmu_event_init()
1883 event->hw.config = events[n]; power_pmu_event_init()
1884 event->hw.event_base = cflags[n]; power_pmu_event_init()
1885 event->hw.last_period = event->hw.sample_period; power_pmu_event_init()
1886 local64_set(&event->hw.period_left, event->hw.last_period); power_pmu_event_init()
1892 if (is_ebb_event(event)) power_pmu_event_init()
1893 local64_set(&event->hw.prev_count, 0); power_pmu_event_init()
1911 event->destroy = hw_perf_event_destroy; power_pmu_event_init()
1916 static int power_pmu_event_idx(struct perf_event *event) power_pmu_event_idx() argument
1918 return event->hw.idx; power_pmu_event_idx()
1928 return sprintf(page, "event=0x%02llx\n", pmu_attr->id); power_events_sysfs_show()
1952 static void record_and_restart(struct perf_event *event, unsigned long val, record_and_restart() argument
1955 u64 period = event->hw.sample_period; record_and_restart()
1959 if (event->hw.state & PERF_HES_STOPPED) { record_and_restart()
1960 write_pmc(event->hw.idx, 0); record_and_restart()
1965 prev = local64_read(&event->hw.prev_count); record_and_restart()
1967 local64_add(delta, &event->count); record_and_restart()
1970 * See if the total period for this event has expired, record_and_restart()
1974 left = local64_read(&event->hw.period_left) - delta; record_and_restart()
1983 event->hw.last_period = event->hw.sample_period; record_and_restart()
1989 write_pmc(event->hw.idx, val); record_and_restart()
1990 local64_set(&event->hw.prev_count, val); record_and_restart()
1991 local64_set(&event->hw.period_left, left); record_and_restart()
1992 perf_event_update_userpage(event); record_and_restart()
2000 perf_sample_data_init(&data, ~0ULL, event->hw.last_period); record_and_restart()
2002 if (event->attr.sample_type & PERF_SAMPLE_ADDR) record_and_restart()
2005 if (event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK) { record_and_restart()
2012 if (perf_event_overflow(event, &data, regs)) record_and_restart()
2013 power_pmu_stop(event, 0); record_and_restart()
2050 * Events on POWER7 can roll back if a speculative event doesn't pmc_overflow_power7()
2081 struct perf_event *event; perf_event_interrupt() local
2117 event = cpuhw->event[j]; perf_event_interrupt()
2118 if (event->hw.idx == (i + 1)) { perf_event_interrupt()
2120 record_and_restart(event, val[i], regs); perf_event_interrupt()
2131 event = cpuhw->event[i]; perf_event_interrupt()
2132 if (!event->hw.idx || is_limited_pmc(event->hw.idx)) perf_event_interrupt()
2134 if (pmc_overflow_power7(val[event->hw.idx - 1])) { perf_event_interrupt()
2135 /* event has overflowed in a buggy way*/ perf_event_interrupt()
2137 record_and_restart(event, perf_event_interrupt()
2138 val[event->hw.idx - 1], perf_event_interrupt()
H A Dpower7-pmu.c18 * Bits in event code for POWER7
25 #define PM_COMBINE_SH 11 /* Combined event bit */
28 #define PM_L2SEL_SH 8 /* L2 event select */
54 * Power7 event codes.
85 static int power7_get_constraint(u64 event, unsigned long *maskp, power7_get_constraint() argument
91 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; power7_get_constraint()
98 if (pmc >= 5 && !(event == 0x500fa || event == 0x600f4)) power7_get_constraint()
107 unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; power7_get_constraint()
110 int l2sel = (event >> PM_L2SEL_SH) & PM_L2SEL_MSK; power7_get_constraint()
120 #define MAX_ALT 2 /* at most 2 alternatives for any event */
132 static int find_alternative(u64 event) find_alternative() argument
137 if (event < event_alternatives[i][0]) find_alternative()
140 if (event == event_alternatives[i][j]) find_alternative()
146 static s64 find_alternative_decode(u64 event) find_alternative_decode() argument
151 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; find_alternative_decode()
152 psel = event & PM_PMCSEL_MSK; find_alternative_decode()
154 return event - (1 << PM_PMC_SH) + 8; find_alternative_decode()
156 return event + (1 << PM_PMC_SH) - 8; find_alternative_decode()
160 static int power7_get_alternatives(u64 event, unsigned int flags, u64 alt[]) power7_get_alternatives() argument
165 alt[0] = event; power7_get_alternatives()
167 i = find_alternative(event); power7_get_alternatives()
171 if (ae && ae != event) power7_get_alternatives()
175 ae = find_alternative_decode(event); power7_get_alternatives()
212 * Returns 1 if event counts things relating to marked instructions
215 static int power7_marked_instr_event(u64 event) power7_marked_instr_event() argument
220 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; power7_marked_instr_event()
221 unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; power7_marked_instr_event()
222 psel = event & PM_PMCSEL_MSK & ~1; /* trim off edge/level bit */ power7_marked_instr_event()
247 static int power7_compute_mmcr(u64 event[], int n_ev, power7_compute_mmcr() argument
258 pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; power7_compute_mmcr()
270 pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; power7_compute_mmcr()
271 unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; power7_compute_mmcr()
272 combine = (event[i] >> PM_COMBINE_SH) & PM_COMBINE_MSK; power7_compute_mmcr()
273 l2sel = (event[i] >> PM_L2SEL_SH) & PM_L2SEL_MSK; power7_compute_mmcr()
274 psel = event[i] & PM_PMCSEL_MSK; power7_compute_mmcr()
276 /* Bus event or any-PMC direct event */ power7_compute_mmcr()
285 /* Direct or decoded event */ power7_compute_mmcr()
298 if (power7_marked_instr_event(event[i])) power7_compute_mmcr()
336 * are event codes.
412 PMU_FORMAT_ATTR(event, "config:0-19");
H A Dmpc7450-pmu.c17 #define MAX_ALT 3 /* Maximum number of event alternative codes */
20 * Bits in event code for MPC7450 family
37 * -1: event code is invalid
41 static int mpc7450_classify_event(u32 event) mpc7450_classify_event() argument
45 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; mpc7450_classify_event()
51 event &= PM_PMCSEL_MSK; mpc7450_classify_event()
52 if (event <= 1) mpc7450_classify_event()
54 if (event <= 7) mpc7450_classify_event()
56 if (event <= 13) mpc7450_classify_event()
58 if (event <= 22) mpc7450_classify_event()
81 static int mpc7450_threshold_use(u32 event) mpc7450_threshold_use() argument
85 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; mpc7450_threshold_use()
86 sel = event & PM_PMCSEL_MSK; mpc7450_threshold_use()
154 static int mpc7450_get_constraint(u64 event, unsigned long *maskp, mpc7450_get_constraint() argument
161 class = mpc7450_classify_event(event); mpc7450_get_constraint()
165 pmc = ((unsigned int)event >> PM_PMC_SH) & PM_PMC_MSK; mpc7450_get_constraint()
173 tuse = mpc7450_threshold_use(event); mpc7450_get_constraint()
175 thresh = ((unsigned int)event >> PM_THRESH_SH) & PM_THRESH_MSK; mpc7450_get_constraint()
180 if ((unsigned int)event & PM_THRMULT_MSKS) mpc7450_get_constraint()
212 static int find_alternative(u32 event) find_alternative() argument
217 if (event < event_alternatives[i][0]) find_alternative()
220 if (event == event_alternatives[i][j]) find_alternative()
226 static int mpc7450_get_alternatives(u64 event, unsigned int flags, u64 alt[]) mpc7450_get_alternatives() argument
231 alt[0] = event; mpc7450_get_alternatives()
233 i = find_alternative((u32)event); mpc7450_get_alternatives()
237 if (ae && ae != (u32)event) mpc7450_get_alternatives()
263 static int mpc7450_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[], mpc7450_compute_mmcr() argument
281 class = mpc7450_classify_event(event[i]); mpc7450_compute_mmcr()
288 /* Second pass: allocate PMCs from most specific event to least */ mpc7450_compute_mmcr()
291 ev = event[event_index[class][i]]; mpc7450_compute_mmcr()
359 * are event codes.
H A Dpower5-pmu.c18 * Bits in event code for POWER5 (not POWER5++)
25 #define PM_BYTE_SH 12 /* Byte number of event bus to use */
29 #define PM_BUSEVENT_MSK 0x80 /* Set if event uses event bus */
114 * 24-27: Byte 0 event source 0x0f00_0000
115 * Encoding as for the event code
118 * 20-23, 16-19, 12-15: Byte 1, 2, 3 event sources
140 static int power5_get_constraint(u64 event, unsigned long *maskp, power5_get_constraint() argument
148 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; power5_get_constraint()
157 else if (event != 0x500009 && event != 0x600005) power5_get_constraint()
160 if (event & PM_BUSEVENT_MSK) { power5_get_constraint()
161 unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; power5_get_constraint()
168 byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; power5_get_constraint()
177 bit = event & 7; power5_get_constraint()
181 value |= (unsigned long)((event >> PM_GRS_SH) & fmask) power5_get_constraint()
213 #define MAX_ALT 3 /* at most 3 alternatives for any event */
227 static int find_alternative(u64 event) find_alternative() argument
232 if (event < event_alternatives[i][0]) find_alternative()
235 if (event == event_alternatives[i][j]) find_alternative()
249 * Some direct events for decodes of event bus byte 3 have alternative
251 * event code for those that do, or -1 otherwise.
253 static s64 find_alternative_bdecode(u64 event) find_alternative_bdecode() argument
257 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; find_alternative_bdecode()
261 pp = event & PM_PMCSEL_MSK; find_alternative_bdecode()
264 return (event & ~(PM_PMC_MSKS | PM_PMCSEL_MSK)) | find_alternative_bdecode()
272 static int power5_get_alternatives(u64 event, unsigned int flags, u64 alt[]) power5_get_alternatives() argument
277 alt[0] = event; power5_get_alternatives()
279 i = find_alternative(event); power5_get_alternatives()
283 if (ae && ae != event) power5_get_alternatives()
287 ae = find_alternative_bdecode(event); power5_get_alternatives()
296 * Indexed by PMCSEL value, bit i (LE) set if PMC i is a marked event.
338 * Returns 1 if event counts things relating to marked instructions
341 static int power5_marked_instr_event(u64 event) power5_marked_instr_event() argument
347 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; power5_marked_instr_event()
348 psel = event & PM_PMCSEL_MSK; power5_marked_instr_event()
367 if (!(event & PM_BUSEVENT_MSK)) power5_marked_instr_event()
370 byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; power5_marked_instr_event()
371 unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; power5_marked_instr_event()
385 static int power5_compute_mmcr(u64 event[], int n_ev, power5_compute_mmcr() argument
407 pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; power5_compute_mmcr()
418 if (event[i] & PM_BUSEVENT_MSK) { power5_compute_mmcr()
419 unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; power5_compute_mmcr()
420 byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; power5_compute_mmcr()
492 pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; power5_compute_mmcr()
493 unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; power5_compute_mmcr()
494 byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; power5_compute_mmcr()
495 psel = event[i] & PM_PMCSEL_MSK; power5_compute_mmcr()
496 isbus = event[i] & PM_BUSEVENT_MSK; power5_compute_mmcr()
498 /* Bus event or any-PMC direct event */ power5_compute_mmcr()
513 /* Direct event */ power5_compute_mmcr()
524 grsel = (event[i] >> PM_GRS_SH) & PM_GRS_MSK; power5_compute_mmcr()
527 if (power5_marked_instr_event(event[i])) power5_compute_mmcr()
565 * are event codes.
H A Dpower6-pmu.c18 * Bits in event code for POWER6
23 #define PM_UNIT_SH 16 /* Unit event comes (TTMxSEL encoding) */
28 #define PM_BYTE_SH 12 /* Byte of event bus to use */
30 #define PM_SUBUNIT_SH 8 /* Subunit event comes from (NEST_SEL enc.) */
57 * top 4 bits say what sort of event:
58 * 0 = direct marked event,
59 * 1 = byte decode event,
60 * 4 = add/and event (PMC1 -> bits 0 & 4),
61 * 5 = add/and event (PMC1 -> bits 1 & 5),
62 * 6 = add/and event (PMC1 -> bits 2 & 6),
63 * 7 = add/and event (PMC1 -> bits 3 & 7).
136 * Returns 1 if event counts things relating to marked instructions
139 static int power6_marked_instr_event(u64 event) power6_marked_instr_event() argument
145 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; power6_marked_instr_event()
146 psel = (event & PM_PMCSEL_MSK) >> 1; /* drop edge/level bit */ power6_marked_instr_event()
165 if (!(event & PM_BUSEVENT_MSK) || bit == -1) power6_marked_instr_event()
168 byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; power6_marked_instr_event()
169 unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; power6_marked_instr_event()
177 static int p6_compute_mmcr(u64 event[], int n_ev, p6_compute_mmcr() argument
190 pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; p6_compute_mmcr()
198 ev = event[i]; p6_compute_mmcr()
214 /* this event uses the event bus */ p6_compute_mmcr()
217 /* check for conflict on this byte of event bus */ p6_compute_mmcr()
245 if (power6_marked_instr_event(event[i])) p6_compute_mmcr()
266 * 16-19 select field: unit on byte 0 of event bus
268 * 32-34 select field: nest (subunit) event selector
270 static int p6_get_constraint(u64 event, unsigned long *maskp, p6_get_constraint() argument
276 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; p6_get_constraint()
278 if (pmc > 4 && !(event == 0x500009 || event == 0x600005)) p6_get_constraint()
284 if (event & PM_BUSEVENT_MSK) { p6_get_constraint()
285 byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; p6_get_constraint()
288 value |= (unsigned long)(event & PM_UNIT_MSKS) << sh; p6_get_constraint()
289 if ((event & PM_UNIT_MSKS) == (5 << PM_UNIT_SH)) { p6_get_constraint()
290 subunit = (event >> PM_SUBUNIT_SH) & PM_SUBUNIT_MSK; p6_get_constraint()
304 static int p6_limited_pmc_event(u64 event) p6_limited_pmc_event() argument
306 int pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; p6_limited_pmc_event()
311 #define MAX_ALT 4 /* at most 4 alternatives for any event */
343 static int find_alternatives_list(u64 event) find_alternatives_list() argument
349 if (event < event_alternatives[i][0]) find_alternatives_list()
353 if (!alt || event < alt) find_alternatives_list()
355 if (event == alt) find_alternatives_list()
362 static int p6_get_alternatives(u64 event, unsigned int flags, u64 alt[]) p6_get_alternatives() argument
369 alt[0] = event; p6_get_alternatives()
370 nlim = p6_limited_pmc_event(event); p6_get_alternatives()
373 i = find_alternatives_list(event); p6_get_alternatives()
380 if (aevent != event) p6_get_alternatives()
388 psel = event & (PM_PMCSEL_MSK & ~1); /* ignore edge bit */ p6_get_alternatives()
389 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; p6_get_alternatives()
391 alt[nalt++] = ((event ^ 0x6) & ~PM_PMC_MSKS) | p6_get_alternatives()
396 alt[nalt++] = ((event ^ 0x2) & ~PM_PMC_MSKS) | p6_get_alternatives()
409 * we never end up with more than 4 alternatives for any event. p6_get_alternatives()
485 * are event codes.
H A Dppc970-pmu.c17 * Bits in event code for PPC970
25 #define PM_BYTE_SH 4 /* Byte number of event bus to use */
117 * 28-31: Byte 0 event source 0xf000_0000
118 * Encoding as for the event code
121 * 24-27, 20-23, 16-19: Byte 1, 2, 3 event sources
144 * Returns 1 if event counts things relating to marked instructions
147 static int p970_marked_instr_event(u64 event) p970_marked_instr_event() argument
152 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; p970_marked_instr_event()
153 psel = event & PM_PMCSEL_MSK; p970_marked_instr_event()
166 byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; p970_marked_instr_event()
167 unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; p970_marked_instr_event()
194 static int p970_get_constraint(u64 event, unsigned long *maskp, p970_get_constraint() argument
201 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; p970_get_constraint()
210 unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; p970_get_constraint()
216 byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; p970_get_constraint()
236 spcsel = (event >> PM_SPCSEL_SH) & PM_SPCSEL_MSK; p970_get_constraint()
246 static int p970_get_alternatives(u64 event, unsigned int flags, u64 alt[]) p970_get_alternatives() argument
248 alt[0] = event; p970_get_alternatives()
251 if (event == 0x2002 || event == 0x3002) { p970_get_alternatives()
252 alt[1] = event ^ 0x1000; p970_get_alternatives()
259 static int p970_compute_mmcr(u64 event[], int n_ev, p970_compute_mmcr() argument
283 pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; p970_compute_mmcr()
291 unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; p970_compute_mmcr()
292 byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; p970_compute_mmcr()
350 pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; p970_compute_mmcr()
351 unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; p970_compute_mmcr()
352 byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; p970_compute_mmcr()
353 psel = event[i] & PM_PMCSEL_MSK; p970_compute_mmcr()
355 /* Bus event or any-PMC direct event */ p970_compute_mmcr()
374 /* Direct event */ p970_compute_mmcr()
382 spcsel = (event[i] >> PM_SPCSEL_SH) & PM_SPCSEL_MSK; p970_compute_mmcr()
384 if (p970_marked_instr_event(event[i])) p970_compute_mmcr()
437 * are event codes.
H A Dpower4-pmu.c18 * Bits in event code for POWER4
27 #define PM_BYTE_SH 4 /* Byte number of event bus to use */
156 * 28-31: Byte 0 event source 0xf000_0000
167 * 24-27, 20-23, 16-19: Byte 1, 2, 3 event sources
215 * Returns 1 if event counts things relating to marked instructions
218 static int p4_marked_instr_event(u64 event) p4_marked_instr_event() argument
223 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; p4_marked_instr_event()
224 psel = event & PM_PMCSEL_MSK; p4_marked_instr_event()
237 byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; p4_marked_instr_event()
238 unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; p4_marked_instr_event()
242 if (event & PM_LOWER_MSKS) p4_marked_instr_event()
254 static int p4_get_constraint(u64 event, unsigned long *maskp, p4_get_constraint() argument
261 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; p4_get_constraint()
270 unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; p4_get_constraint()
271 byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; p4_get_constraint()
273 lower = (event >> PM_LOWER_SH) & PM_LOWER_MSK; p4_get_constraint()
308 if (p4_marked_instr_event(event)) { p4_get_constraint()
314 if (pmc && (event & PM_PMCSEL_MSK) == 6 && byte == 2) p4_get_constraint()
326 static int p4_get_alternatives(u64 event, unsigned int flags, u64 alt[]) p4_get_alternatives() argument
330 alt[0] = event; p4_get_alternatives()
334 if (event == 0x8003 || event == 0x0224) { p4_get_alternatives()
335 alt[1] = event ^ (0x8003 ^ 0x0224); p4_get_alternatives()
340 if (event == 0x0c13 || event == 0x0c23) { p4_get_alternatives()
341 alt[1] = event ^ (0x0c13 ^ 0x0c23); p4_get_alternatives()
347 if (event == ppc_inst_cmpl[i]) { p4_get_alternatives()
358 static int p4_compute_mmcr(u64 event[], int n_ev, p4_compute_mmcr() argument
379 pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; p4_compute_mmcr()
387 unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; p4_compute_mmcr()
388 byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; p4_compute_mmcr()
389 lower = (event[i] >> PM_LOWER_SH) & PM_LOWER_MSK; p4_compute_mmcr()
471 pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; p4_compute_mmcr()
472 unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; p4_compute_mmcr()
473 byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; p4_compute_mmcr()
474 psel = event[i] & PM_PMCSEL_MSK; p4_compute_mmcr()
476 /* Bus event or 00xxx direct event (off or cycles) */ p4_compute_mmcr()
493 /* Direct event */ p4_compute_mmcr()
510 if (p4_marked_instr_event(event[i])) p4_compute_mmcr()
557 * are event codes.
H A Dpower8-pmu.c22 * Some power8 event codes.
64 * Raw event encoding for POWER8:
113 * else if cache_sel[1]: # L1 event
230 * the fifth event to overflow and set the 4th bit. To achieve that we bias the
276 static inline bool event_is_fab_match(u64 event) event_is_fab_match() argument
279 event &= 0xff0fe; event_is_fab_match()
282 return (event == 0x30056 || event == 0x4f052); event_is_fab_match()
285 static int power8_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) power8_get_constraint() argument
292 if (event & ~EVENT_VALID_MASK) power8_get_constraint()
295 pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK; power8_get_constraint()
296 unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK; power8_get_constraint()
297 cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK; power8_get_constraint()
298 ebb = (event >> EVENT_EBB_SHIFT) & EVENT_EBB_MASK; power8_get_constraint()
306 /* Ignore Linux defined bits when checking event below */ power8_get_constraint()
307 base_event = event & ~EVENT_LINUX_MASK; power8_get_constraint()
320 * Don't count events on PMC 5 & 6, there is only one valid event power8_get_constraint()
340 } else if (event & EVENT_IS_L1) { power8_get_constraint()
345 if (event & EVENT_IS_MARKED) { power8_get_constraint()
347 value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT); power8_get_constraint()
354 if (event_is_fab_match(event)) { power8_get_constraint()
356 value |= CNST_FAB_MATCH_VAL(event >> EVENT_THR_CTL_SHIFT); power8_get_constraint()
364 cmp = (event >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK; power8_get_constraint()
371 value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT); power8_get_constraint()
378 if (event & EVENT_WANTS_BHRB) { power8_get_constraint()
384 value |= CNST_IFM_VAL(event >> EVENT_IFM_SHIFT); power8_get_constraint()
401 static int power8_compute_mmcr(u64 event[], int n_ev, power8_compute_mmcr() argument
413 pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK; power8_compute_mmcr()
424 pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK; power8_compute_mmcr()
425 unit = (event[i] >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK; power8_compute_mmcr()
426 combine = (event[i] >> EVENT_COMBINE_SHIFT) & EVENT_COMBINE_MASK; power8_compute_mmcr()
427 psel = event[i] & EVENT_PSEL_MASK; power8_compute_mmcr()
444 if (event[i] & EVENT_IS_L1) { power8_compute_mmcr()
445 cache = event[i] >> EVENT_CACHE_SEL_SHIFT; power8_compute_mmcr()
451 if (event[i] & EVENT_IS_MARKED) { power8_compute_mmcr()
454 val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK; power8_compute_mmcr()
465 if (event_is_fab_match(event[i])) { power8_compute_mmcr()
466 mmcr1 |= ((event[i] >> EVENT_THR_CTL_SHIFT) & power8_compute_mmcr()
469 val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK; power8_compute_mmcr()
471 val = (event[i] >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK; power8_compute_mmcr()
473 val = (event[i] >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK; power8_compute_mmcr()
477 if (event[i] & EVENT_WANTS_BHRB) { power8_compute_mmcr()
478 val = (event[i] >> EVENT_IFM_SHIFT) & EVENT_IFM_MASK; power8_compute_mmcr()
540 static int find_alternative(u64 event) find_alternative() argument
545 if (event < event_alternatives[i][0]) find_alternative()
549 if (event == event_alternatives[i][j]) find_alternative()
556 static int power8_get_alternatives(u64 event, unsigned int flags, u64 alt[]) power8_get_alternatives() argument
561 alt[num_alt++] = event; power8_get_alternatives()
563 i = find_alternative(event); power8_get_alternatives()
565 /* Filter out the original event, it's already in alt[0] */ power8_get_alternatives()
568 if (alt_event && alt_event != event) power8_get_alternatives()
607 PMU_FORMAT_ATTR(event, "config:0-49");
663 * regular PMU event. As the privilege state filter is handled power8_bhrb_filter_map()
665 * PMU event, we ignore any separate BHRB specific request. power8_bhrb_filter_map()
702 * are event codes.
/linux-4.4.14/drivers/md/
H A Ddm-uevent.c52 static void dm_uevent_free(struct dm_uevent *event) dm_uevent_free() argument
54 kmem_cache_free(_dm_event_cache, event); dm_uevent_free()
59 struct dm_uevent *event; dm_uevent_alloc() local
61 event = kmem_cache_zalloc(_dm_event_cache, GFP_ATOMIC); dm_uevent_alloc()
62 if (!event) dm_uevent_alloc()
65 INIT_LIST_HEAD(&event->elist); dm_uevent_alloc()
66 event->md = md; dm_uevent_alloc()
68 return event; dm_uevent_alloc()
78 struct dm_uevent *event; dm_build_path_uevent() local
80 event = dm_uevent_alloc(md); dm_build_path_uevent()
81 if (!event) { dm_build_path_uevent()
86 event->action = action; dm_build_path_uevent()
88 if (add_uevent_var(&event->ku_env, "DM_TARGET=%s", ti->type->name)) { dm_build_path_uevent()
94 if (add_uevent_var(&event->ku_env, "DM_ACTION=%s", dm_action)) { dm_build_path_uevent()
100 if (add_uevent_var(&event->ku_env, "DM_SEQNUM=%u", dm_build_path_uevent()
107 if (add_uevent_var(&event->ku_env, "DM_PATH=%s", path)) { dm_build_path_uevent()
112 if (add_uevent_var(&event->ku_env, "DM_NR_VALID_PATHS=%d", dm_build_path_uevent()
119 return event; dm_build_path_uevent()
122 dm_uevent_free(event); dm_build_path_uevent()
131 * @kobj: kobject generating event
137 struct dm_uevent *event, *next; dm_send_uevents() local
139 list_for_each_entry_safe(event, next, events, elist) { list_for_each_entry_safe()
140 list_del_init(&event->elist); list_for_each_entry_safe()
146 if (dm_copy_name_and_uuid(event->md, event->name, list_for_each_entry_safe()
147 event->uuid)) { list_for_each_entry_safe()
153 if (add_uevent_var(&event->ku_env, "DM_NAME=%s", event->name)) { list_for_each_entry_safe()
159 if (add_uevent_var(&event->ku_env, "DM_UUID=%s", event->uuid)) { list_for_each_entry_safe()
165 r = kobject_uevent_env(kobj, event->action, event->ku_env.envp); list_for_each_entry_safe()
169 dm_uevent_free(event); list_for_each_entry_safe()
175 * dm_path_uevent - called to create a new path event and queue it
177 * @event_type: path event type enum
187 struct dm_uevent *event; dm_path_uevent() local
194 event = dm_build_path_uevent(md, ti, dm_path_uevent()
198 if (IS_ERR(event)) dm_path_uevent()
201 dm_uevent_add(md, &event->elist); dm_path_uevent()
/linux-4.4.14/arch/x86/kernel/cpu/
H A Dperf_event_msr.c72 PMU_EVENT_ATTR_STRING(tsc, evattr_tsc, "event=0x00");
73 PMU_EVENT_ATTR_STRING(aperf, evattr_aperf, "event=0x01");
74 PMU_EVENT_ATTR_STRING(mperf, evattr_mperf, "event=0x02");
75 PMU_EVENT_ATTR_STRING(pperf, evattr_pperf, "event=0x03");
76 PMU_EVENT_ATTR_STRING(smi, evattr_smi, "event=0x04");
95 PMU_FORMAT_ATTR(event, "config:0-63");
111 static int msr_event_init(struct perf_event *event) msr_event_init() argument
113 u64 cfg = event->attr.config; msr_event_init()
115 if (event->attr.type != event->pmu->type) msr_event_init()
122 if (event->attr.exclude_user || msr_event_init()
123 event->attr.exclude_kernel || msr_event_init()
124 event->attr.exclude_hv || msr_event_init()
125 event->attr.exclude_idle || msr_event_init()
126 event->attr.exclude_host || msr_event_init()
127 event->attr.exclude_guest || msr_event_init()
128 event->attr.sample_period) /* no sampling */ msr_event_init()
134 event->hw.idx = -1; msr_event_init()
135 event->hw.event_base = msr[cfg].msr; msr_event_init()
136 event->hw.config = cfg; msr_event_init()
141 static inline u64 msr_read_counter(struct perf_event *event) msr_read_counter() argument
145 if (event->hw.event_base) msr_read_counter()
146 rdmsrl(event->hw.event_base, now); msr_read_counter()
152 static void msr_event_update(struct perf_event *event) msr_event_update() argument
157 /* Careful, an NMI might modify the previous event value. */ msr_event_update()
159 prev = local64_read(&event->hw.prev_count); msr_event_update()
160 now = msr_read_counter(event); msr_event_update()
162 if (local64_cmpxchg(&event->hw.prev_count, prev, now) != prev) msr_event_update()
166 if (unlikely(event->hw.event_base == MSR_SMI_COUNT)) msr_event_update()
169 local64_add(now - prev, &event->count); msr_event_update()
172 static void msr_event_start(struct perf_event *event, int flags) msr_event_start() argument
176 now = msr_read_counter(event); msr_event_start()
177 local64_set(&event->hw.prev_count, now); msr_event_start()
180 static void msr_event_stop(struct perf_event *event, int flags) msr_event_stop() argument
182 msr_event_update(event); msr_event_stop()
185 static void msr_event_del(struct perf_event *event, int flags) msr_event_del() argument
187 msr_event_stop(event, PERF_EF_UPDATE); msr_event_del()
190 static int msr_event_add(struct perf_event *event, int flags) msr_event_add() argument
193 msr_event_start(event, flags); msr_event_add()
H A Dperf_event_amd_iommu.c84 const char *event; member in struct:amd_iommu_event_desc
90 struct amd_iommu_event_desc *event = _iommu_event_show() local
92 return sprintf(buf, "%s\n", event->event); _iommu_event_show()
98 .event = _event, \
197 static int perf_iommu_event_init(struct perf_event *event) perf_iommu_event_init() argument
199 struct hw_perf_event *hwc = &event->hw; perf_iommu_event_init()
203 /* test the event attr type check for PMU enumeration */ perf_iommu_event_init()
204 if (event->attr.type != event->pmu->type) perf_iommu_event_init()
210 * Also, it does not support event sampling mode. perf_iommu_event_init()
212 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) perf_iommu_event_init()
216 if (event->attr.exclude_user || event->attr.exclude_kernel || perf_iommu_event_init()
217 event->attr.exclude_host || event->attr.exclude_guest) perf_iommu_event_init()
220 if (event->cpu < 0) perf_iommu_event_init()
225 if (event->pmu != &perf_iommu->pmu) perf_iommu_event_init()
229 config = event->attr.config; perf_iommu_event_init()
230 config1 = event->attr.config1; perf_iommu_event_init()
283 static void perf_iommu_disable_event(struct perf_event *event) perf_iommu_disable_event() argument
287 amd_iommu_pc_get_set_reg_val(_GET_DEVID(event), perf_iommu_disable_event()
288 _GET_BANK(event), _GET_CNTR(event), perf_iommu_disable_event()
292 static void perf_iommu_start(struct perf_event *event, int flags) perf_iommu_start() argument
294 struct hw_perf_event *hwc = &event->hw; perf_iommu_start()
305 amd_iommu_pc_get_set_reg_val(_GET_DEVID(event), perf_iommu_start()
306 _GET_BANK(event), _GET_CNTR(event), perf_iommu_start()
310 perf_iommu_enable_event(event); perf_iommu_start()
311 perf_event_update_userpage(event); perf_iommu_start()
315 static void perf_iommu_read(struct perf_event *event) perf_iommu_read() argument
320 struct hw_perf_event *hwc = &event->hw; perf_iommu_read()
323 amd_iommu_pc_get_set_reg_val(_GET_DEVID(event), perf_iommu_read()
324 _GET_BANK(event), _GET_CNTR(event), perf_iommu_read()
338 local64_add(delta, &event->count); perf_iommu_read()
342 static void perf_iommu_stop(struct perf_event *event, int flags) perf_iommu_stop() argument
344 struct hw_perf_event *hwc = &event->hw; perf_iommu_stop()
352 perf_iommu_disable_event(event); perf_iommu_stop()
360 perf_iommu_read(event); perf_iommu_stop()
364 static int perf_iommu_add(struct perf_event *event, int flags) perf_iommu_add() argument
368 container_of(event->pmu, struct perf_amd_iommu, pmu); perf_iommu_add()
371 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; perf_iommu_add()
376 event->hw.extra_reg.reg = (u16)retval; perf_iommu_add()
381 perf_iommu_start(event, PERF_EF_RELOAD); perf_iommu_add()
386 static void perf_iommu_del(struct perf_event *event, int flags) perf_iommu_del() argument
389 container_of(event->pmu, struct perf_amd_iommu, pmu); perf_iommu_del()
392 perf_iommu_stop(event, PERF_EF_UPDATE); perf_iommu_del()
396 _GET_BANK(event), perf_iommu_del()
397 _GET_CNTR(event)); perf_iommu_del()
399 perf_event_update_userpage(event); perf_iommu_del()
H A Dperf_event_intel_uncore_snb.c12 /* SNB event control */
66 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
74 static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) snb_uncore_msr_enable_event() argument
76 struct hw_perf_event *hwc = &event->hw; snb_uncore_msr_enable_event()
84 static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) snb_uncore_msr_disable_event() argument
86 wrmsrl(event->hw.config_base, 0); snb_uncore_msr_disable_event()
98 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
179 INTEL_UNCORE_EVENT_DESC(data_reads, "event=0x01"),
183 INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"),
239 static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event) snb_uncore_imc_enable_event() argument
242 static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event) snb_uncore_imc_disable_event() argument
245 static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event) snb_uncore_imc_read_counter() argument
247 struct hw_perf_event *hwc = &event->hw; snb_uncore_imc_read_counter()
257 static int snb_uncore_imc_event_init(struct perf_event *event) snb_uncore_imc_event_init() argument
261 struct hw_perf_event *hwc = &event->hw; snb_uncore_imc_event_init()
262 u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK; snb_uncore_imc_event_init()
265 if (event->attr.type != event->pmu->type) snb_uncore_imc_event_init()
268 pmu = uncore_event_to_pmu(event); snb_uncore_imc_event_init()
278 if (event->attr.exclude_user || snb_uncore_imc_event_init()
279 event->attr.exclude_kernel || snb_uncore_imc_event_init()
280 event->attr.exclude_hv || snb_uncore_imc_event_init()
281 event->attr.exclude_idle || snb_uncore_imc_event_init()
282 event->attr.exclude_host || snb_uncore_imc_event_init()
283 event->attr.exclude_guest || snb_uncore_imc_event_init()
284 event->attr.sample_period) /* no sampling */ snb_uncore_imc_event_init()
291 if (event->cpu < 0) snb_uncore_imc_event_init()
295 if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK) snb_uncore_imc_event_init()
298 box = uncore_pmu_to_box(pmu, event->cpu); snb_uncore_imc_event_init()
302 event->cpu = box->cpu; snb_uncore_imc_event_init()
304 event->hw.idx = -1; snb_uncore_imc_event_init()
305 event->hw.last_tag = ~0ULL; snb_uncore_imc_event_init()
306 event->hw.extra_reg.idx = EXTRA_REG_NONE; snb_uncore_imc_event_init()
307 event->hw.branch_reg.idx = EXTRA_REG_NONE; snb_uncore_imc_event_init()
309 * check event is known (whitelist, determines counter) snb_uncore_imc_event_init()
325 event->hw.event_base = base; snb_uncore_imc_event_init()
326 event->hw.config = cfg; snb_uncore_imc_event_init()
327 event->hw.idx = idx; snb_uncore_imc_event_init()
334 static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event) snb_uncore_imc_hw_config() argument
339 static void snb_uncore_imc_event_start(struct perf_event *event, int flags) snb_uncore_imc_event_start() argument
341 struct intel_uncore_box *box = uncore_event_to_box(event); snb_uncore_imc_event_start()
344 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) snb_uncore_imc_event_start()
347 event->hw.state = 0; snb_uncore_imc_event_start()
350 list_add_tail(&event->active_entry, &box->active_list); snb_uncore_imc_event_start()
352 count = snb_uncore_imc_read_counter(box, event); snb_uncore_imc_event_start()
353 local64_set(&event->hw.prev_count, count); snb_uncore_imc_event_start()
359 static void snb_uncore_imc_event_stop(struct perf_event *event, int flags) snb_uncore_imc_event_stop() argument
361 struct intel_uncore_box *box = uncore_event_to_box(event); snb_uncore_imc_event_stop()
362 struct hw_perf_event *hwc = &event->hw; snb_uncore_imc_event_stop()
370 list_del(&event->active_entry); snb_uncore_imc_event_stop()
378 * Drain the remaining delta count out of a event snb_uncore_imc_event_stop()
381 uncore_perf_event_update(box, event); snb_uncore_imc_event_stop()
386 static int snb_uncore_imc_event_add(struct perf_event *event, int flags) snb_uncore_imc_event_add() argument
388 struct intel_uncore_box *box = uncore_event_to_box(event); snb_uncore_imc_event_add()
389 struct hw_perf_event *hwc = &event->hw; snb_uncore_imc_event_add()
398 snb_uncore_imc_event_start(event, 0); snb_uncore_imc_event_add()
405 static void snb_uncore_imc_event_del(struct perf_event *event, int flags) snb_uncore_imc_event_del() argument
407 struct intel_uncore_box *box = uncore_event_to_box(event); snb_uncore_imc_event_del()
410 snb_uncore_imc_event_stop(event, PERF_EF_UPDATE); snb_uncore_imc_event_del()
413 if (event == box->event_list[i]) { snb_uncore_imc_event_del()
626 static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) nhm_uncore_msr_enable_event() argument
628 struct hw_perf_event *hwc = &event->hw; nhm_uncore_msr_enable_event()
651 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
652 INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"),
653 INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"),
654 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"),
655 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"),
656 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"),
657 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
658 INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"),
659 INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"),
H A Dperf_event_amd.c126 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
127 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */
176 static int amd_core_hw_config(struct perf_event *event) amd_core_hw_config() argument
178 if (event->attr.exclude_host && event->attr.exclude_guest) amd_core_hw_config()
184 event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR | amd_core_hw_config()
186 else if (event->attr.exclude_host) amd_core_hw_config()
187 event->hw.config |= AMD64_EVENTSEL_GUESTONLY; amd_core_hw_config()
188 else if (event->attr.exclude_guest) amd_core_hw_config()
189 event->hw.config |= AMD64_EVENTSEL_HOSTONLY; amd_core_hw_config()
195 * AMD64 events are detected based on their event codes.
214 static int amd_pmu_hw_config(struct perf_event *event) amd_pmu_hw_config() argument
218 /* pass precise event sampling to ibs: */ amd_pmu_hw_config()
219 if (event->attr.precise_ip && get_ibs_caps()) amd_pmu_hw_config()
222 if (has_branch_stack(event)) amd_pmu_hw_config()
225 ret = x86_pmu_hw_config(event); amd_pmu_hw_config()
229 if (event->attr.type == PERF_TYPE_RAW) amd_pmu_hw_config()
230 event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK; amd_pmu_hw_config()
232 return amd_core_hw_config(event); amd_pmu_hw_config()
236 struct perf_event *event) __amd_put_nb_event_constraints()
242 * need to scan whole list because event may not have __amd_put_nb_event_constraints()
245 * no race condition possible because event can only __amd_put_nb_event_constraints()
250 if (cmpxchg(nb->owners + i, event, NULL) == event) __amd_put_nb_event_constraints()
261 * traffic. They are identified by an event code >= 0xe00.
264 * shared set of counters. When a NB event is programmed
274 * We provide only one choice for each NB event based on
276 * if a counter is available, there is a guarantee the NB event
279 * for this event.
292 __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, __amd_get_nb_event_constraints() argument
295 struct hw_perf_event *hwc = &event->hw; __amd_get_nb_event_constraints()
312 * event can already be present yet not assigned (in hwc->idx) __amd_get_nb_event_constraints()
319 old = cmpxchg(nb->owners + idx, NULL, event); __amd_get_nb_event_constraints()
320 else if (nb->owners[idx] == event) __amd_get_nb_event_constraints()
321 /* event already present */ __amd_get_nb_event_constraints()
322 old = event; __amd_get_nb_event_constraints()
326 if (old && old != event) __amd_get_nb_event_constraints()
331 cmpxchg(nb->owners + new, event, NULL); __amd_get_nb_event_constraints()
335 if (old == event) __amd_get_nb_event_constraints()
434 struct perf_event *event) amd_get_event_constraints()
437 * if not NB event or no NB, then no constraints amd_get_event_constraints()
439 if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))) amd_get_event_constraints()
442 return __amd_get_nb_event_constraints(cpuc, event, NULL); amd_get_event_constraints()
446 struct perf_event *event) amd_put_event_constraints()
448 if (amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)) amd_put_event_constraints()
449 __amd_put_nb_event_constraints(cpuc, event); amd_put_event_constraints()
452 PMU_FORMAT_ATTR(event, "config:0-7,32-35");
481 * AMD family 15h event code/PMC mappings:
543 struct perf_event *event) amd_get_event_constraints_f15h()
545 struct hw_perf_event *hwc = &event->hw; amd_get_event_constraints_f15h()
616 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT) | amd_event_sysfs_show() local
619 return x86_event_sysfs_show(page, config, event); amd_event_sysfs_show()
235 __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) __amd_put_nb_event_constraints() argument
433 amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx, struct perf_event *event) amd_get_event_constraints() argument
445 amd_put_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) amd_put_event_constraints() argument
542 amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, int idx, struct perf_event *event) amd_get_event_constraints_f15h() argument
H A Dperf_event.c60 * Propagate event elapsed time into the generic event.
61 * Can only be executed on the CPU where the event is active.
64 u64 x86_perf_event_update(struct perf_event *event) x86_perf_event_update() argument
66 struct hw_perf_event *hwc = &event->hw; x86_perf_event_update()
76 * Careful: an NMI might modify the previous event value. x86_perf_event_update()
80 * count to the generic event atomically: x86_perf_event_update()
93 * (event-)time and add that to the generic event. x86_perf_event_update()
101 local64_add(delta, &event->count); x86_perf_event_update()
110 static int x86_pmu_extra_regs(u64 config, struct perf_event *event) x86_pmu_extra_regs() argument
115 reg = &event->hw.extra_reg; x86_pmu_extra_regs()
121 if (er->event != (config & er->config_mask)) x86_pmu_extra_regs()
123 if (event->attr.config1 & ~er->valid_mask) x86_pmu_extra_regs()
130 reg->config = event->attr.config1; x86_pmu_extra_regs()
272 static void hw_perf_event_destroy(struct perf_event *event) hw_perf_event_destroy() argument
278 void hw_perf_lbr_event_destroy(struct perf_event *event) hw_perf_lbr_event_destroy() argument
280 hw_perf_event_destroy(event); hw_perf_lbr_event_destroy()
282 /* undo the lbr/bts event accounting */ hw_perf_lbr_event_destroy()
292 set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event) set_ext_hw_attr() argument
294 struct perf_event_attr *attr = &event->attr; set_ext_hw_attr()
322 return x86_pmu_extra_regs(val, event); set_ext_hw_attr()
355 * Check if we can create event of a certain type (that no conflicting events
386 int x86_setup_perfctr(struct perf_event *event) x86_setup_perfctr() argument
388 struct perf_event_attr *attr = &event->attr; x86_setup_perfctr()
389 struct hw_perf_event *hwc = &event->hw; x86_setup_perfctr()
392 if (!is_sampling_event(event)) { x86_setup_perfctr()
399 return x86_pmu_extra_regs(event->attr.config, event); x86_setup_perfctr()
402 return set_ext_hw_attr(hwc, event); x86_setup_perfctr()
435 event->destroy = hw_perf_lbr_event_destroy; x86_setup_perfctr()
449 static inline int precise_br_compat(struct perf_event *event) precise_br_compat() argument
451 u64 m = event->attr.branch_sample_type; precise_br_compat()
460 if (!event->attr.exclude_user) precise_br_compat()
463 if (!event->attr.exclude_kernel) precise_br_compat()
473 int x86_pmu_hw_config(struct perf_event *event) x86_pmu_hw_config() argument
475 if (event->attr.precise_ip) { x86_pmu_hw_config()
487 if (event->attr.precise_ip > precise) x86_pmu_hw_config()
494 if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format < 2) { x86_pmu_hw_config()
495 u64 *br_type = &event->attr.branch_sample_type; x86_pmu_hw_config()
497 if (has_branch_stack(event)) { x86_pmu_hw_config()
498 if (!precise_br_compat(event)) x86_pmu_hw_config()
509 * event. x86_pmu_hw_config()
513 if (!event->attr.exclude_user) x86_pmu_hw_config()
516 if (!event->attr.exclude_kernel) x86_pmu_hw_config()
521 if (event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK) x86_pmu_hw_config()
522 event->attach_state |= PERF_ATTACH_TASK_DATA; x86_pmu_hw_config()
528 event->hw.config = ARCH_PERFMON_EVENTSEL_INT; x86_pmu_hw_config()
533 if (!event->attr.exclude_user) x86_pmu_hw_config()
534 event->hw.config |= ARCH_PERFMON_EVENTSEL_USR; x86_pmu_hw_config()
535 if (!event->attr.exclude_kernel) x86_pmu_hw_config()
536 event->hw.config |= ARCH_PERFMON_EVENTSEL_OS; x86_pmu_hw_config()
538 if (event->attr.type == PERF_TYPE_RAW) x86_pmu_hw_config()
539 event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK; x86_pmu_hw_config()
541 if (event->attr.sample_period && x86_pmu.limit_period) { x86_pmu_hw_config()
542 if (x86_pmu.limit_period(event, event->attr.sample_period) > x86_pmu_hw_config()
543 event->attr.sample_period) x86_pmu_hw_config()
547 return x86_setup_perfctr(event); x86_pmu_hw_config()
553 static int __x86_pmu_event_init(struct perf_event *event) __x86_pmu_event_init() argument
565 event->destroy = hw_perf_event_destroy; __x86_pmu_event_init()
567 event->hw.idx = -1; __x86_pmu_event_init()
568 event->hw.last_cpu = -1; __x86_pmu_event_init()
569 event->hw.last_tag = ~0ULL; __x86_pmu_event_init()
572 event->hw.extra_reg.idx = EXTRA_REG_NONE; __x86_pmu_event_init()
573 event->hw.branch_reg.idx = EXTRA_REG_NONE; __x86_pmu_event_init()
575 return x86_pmu.hw_config(event); __x86_pmu_event_init()
643 static inline int is_x86_event(struct perf_event *event) is_x86_event() argument
645 return event->pmu == &pmu; is_x86_event()
657 int event; /* event index */ member in struct:sched_state
696 sched->state.event = idx; /* start with min weight */ perf_sched_init()
725 * Select a counter for the current event to schedule. Return true on
736 if (sched->state.event >= sched->max_events) __perf_sched_find_counter()
739 c = sched->constraints[sched->state.event]; __perf_sched_find_counter()
793 /* next event */ perf_sched_next_event()
794 sched->state.event++; perf_sched_next_event()
795 if (sched->state.event >= sched->max_events) { perf_sched_next_event()
797 sched->state.event = 0; perf_sched_next_event()
802 c = sched->constraints[sched->state.event]; perf_sched_next_event()
811 * Assign a counter for each event.
824 assign[sched.state.event] = sched.state.counter; perf_assign_events()
907 * validate an event group (assign == NULL) x86_schedule_events()
946 struct perf_event *event; collect_events() local
963 list_for_each_entry(event, &leader->sibling_list, group_entry) { collect_events()
964 if (!is_x86_event(event) || collect_events()
965 event->state <= PERF_EVENT_STATE_OFF) collect_events()
971 cpuc->event_list[n] = event; collect_events()
977 static inline void x86_assign_hw_event(struct perf_event *event, x86_assign_hw_event() argument
980 struct hw_perf_event *hwc = &event->hw; x86_assign_hw_event()
1009 static void x86_pmu_start(struct perf_event *event, int flags);
1014 struct perf_event *event; x86_pmu_enable() local
1033 event = cpuc->event_list[i]; x86_pmu_enable()
1034 hwc = &event->hw; x86_pmu_enable()
1040 * - no other event has used the counter since x86_pmu_enable()
1053 x86_pmu_stop(event, PERF_EF_UPDATE); x86_pmu_enable()
1060 event = cpuc->event_list[i]; x86_pmu_enable()
1061 hwc = &event->hw; x86_pmu_enable()
1064 x86_assign_hw_event(event, cpuc, i); x86_pmu_enable()
1071 x86_pmu_start(event, PERF_EF_RELOAD); x86_pmu_enable()
1087 * To be called with the event disabled in hw:
1089 int x86_perf_event_set_period(struct perf_event *event) x86_perf_event_set_period() argument
1091 struct hw_perf_event *hwc = &event->hw; x86_perf_event_set_period()
1125 left = x86_pmu.limit_period(event, left); x86_perf_event_set_period()
1132 * The hw event starts counting from this event offset, x86_perf_event_set_period()
1150 perf_event_update_userpage(event); x86_perf_event_set_period()
1155 void x86_pmu_enable_event(struct perf_event *event) x86_pmu_enable_event() argument
1158 __x86_pmu_enable_event(&event->hw, x86_pmu_enable_event()
1163 * Add a single event to the PMU.
1165 * The event is added to the group of enabled events
1168 static int x86_pmu_add(struct perf_event *event, int flags) x86_pmu_add() argument
1175 hwc = &event->hw; x86_pmu_add()
1178 ret = n = collect_events(cpuc, event, false); x86_pmu_add()
1217 static void x86_pmu_start(struct perf_event *event, int flags) x86_pmu_start() argument
1220 int idx = event->hw.idx; x86_pmu_start()
1222 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) x86_pmu_start()
1229 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); x86_pmu_start()
1230 x86_perf_event_set_period(event); x86_pmu_start()
1233 event->hw.state = 0; x86_pmu_start()
1235 cpuc->events[idx] = event; x86_pmu_start()
1238 x86_pmu.enable(event); x86_pmu_start()
1239 perf_event_update_userpage(event); x86_pmu_start()
1302 void x86_pmu_stop(struct perf_event *event, int flags) x86_pmu_stop() argument
1305 struct hw_perf_event *hwc = &event->hw; x86_pmu_stop()
1308 x86_pmu.disable(event); x86_pmu_stop()
1316 * Drain the remaining delta count out of a event x86_pmu_stop()
1319 x86_perf_event_update(event); x86_pmu_stop()
1324 static void x86_pmu_del(struct perf_event *event, int flags) x86_pmu_del() argument
1330 * event is descheduled x86_pmu_del()
1332 event->hw.flags &= ~PERF_X86_EVENT_COMMITTED; x86_pmu_del()
1340 * an event added during that same TXN. x86_pmu_del()
1348 x86_pmu_stop(event, PERF_EF_UPDATE); x86_pmu_del()
1351 if (event == cpuc->event_list[i]) x86_pmu_del()
1358 /* If we have a newly added event; make sure to decrease n_added. */ x86_pmu_del()
1363 x86_pmu.put_event_constraints(cpuc, event); x86_pmu_del()
1372 perf_event_update_userpage(event); x86_pmu_del()
1379 struct perf_event *event; x86_pmu_handle_irq() local
1407 event = cpuc->events[idx]; x86_pmu_handle_irq()
1409 val = x86_perf_event_update(event); x86_pmu_handle_irq()
1414 * event overflow x86_pmu_handle_irq()
1417 perf_sample_data_init(&data, 0, event->hw.last_period); x86_pmu_handle_irq()
1419 if (!x86_perf_event_set_period(event)) x86_pmu_handle_irq()
1422 if (perf_event_overflow(event, &data, regs)) x86_pmu_handle_irq()
1423 x86_pmu_stop(event, 0); x86_pmu_handle_irq()
1528 * sample via a hrtimer based software event): pmu_check_apic()
1638 ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event) x86_event_sysfs_show() argument
1652 ret = sprintf(page, "event=0x%02llx", event); x86_event_sysfs_show()
1746 pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl); init_hw_perf_events()
1755 static inline void x86_pmu_read(struct perf_event *event) x86_pmu_read() argument
1757 x86_perf_event_update(event); x86_pmu_read()
1849 * a fake_cpuc is used to validate event groups. Due to
1853 * able to catch this when the last event gets added to
1885 * validate that we can schedule this event
1887 static int validate_event(struct perf_event *event) validate_event() argument
1897 c = x86_pmu.get_event_constraints(fake_cpuc, -1, event); validate_event()
1903 x86_pmu.put_event_constraints(fake_cpuc, event); validate_event()
1911 * validate a single event group
1921 static int validate_group(struct perf_event *event) validate_group() argument
1923 struct perf_event *leader = event->group_leader; validate_group()
1931 * the event is not yet connected with its validate_group()
1933 * existing siblings, then add the new event validate_group()
1941 n = collect_events(fake_cpuc, event, false); validate_group()
1954 static int x86_pmu_event_init(struct perf_event *event) x86_pmu_event_init() argument
1959 switch (event->attr.type) { x86_pmu_event_init()
1969 err = __x86_pmu_event_init(event); x86_pmu_event_init()
1972 * we temporarily connect event to its pmu x86_pmu_event_init()
1974 * it as an x86 event using is_x86_event() x86_pmu_event_init()
1976 tmp = event->pmu; x86_pmu_event_init()
1977 event->pmu = &pmu; x86_pmu_event_init()
1979 if (event->group_leader != event) x86_pmu_event_init()
1980 err = validate_group(event); x86_pmu_event_init()
1982 err = validate_event(event); x86_pmu_event_init()
1984 event->pmu = tmp; x86_pmu_event_init()
1987 if (event->destroy) x86_pmu_event_init()
1988 event->destroy(event); x86_pmu_event_init()
1992 event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED; x86_pmu_event_init()
2003 static void x86_pmu_event_mapped(struct perf_event *event) x86_pmu_event_mapped() argument
2005 if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) x86_pmu_event_mapped()
2012 static void x86_pmu_event_unmapped(struct perf_event *event) x86_pmu_event_unmapped() argument
2017 if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) x86_pmu_event_unmapped()
2024 static int x86_pmu_event_idx(struct perf_event *event) x86_pmu_event_idx() argument
2026 int idx = event->hw.idx; x86_pmu_event_idx()
2028 if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) x86_pmu_event_idx()
2066 * perf-event-bypassing mode. This path is extremely slow, set_attr_rdpmc()
2138 void arch_perf_update_userpage(struct perf_event *event, arch_perf_update_userpage() argument
2146 !!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED); arch_perf_update_userpage()
2167 if (event->clock == &local_clock) { arch_perf_update_userpage()
H A Dperf_event_intel_uncore.c80 struct uncore_event_desc *event = uncore_event_show() local
82 return sprintf(buf, "%s", event->config); uncore_event_show()
85 struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event) uncore_event_to_pmu() argument
87 return container_of(event->pmu, struct intel_uncore_pmu, pmu); uncore_event_to_pmu()
115 struct intel_uncore_box *uncore_event_to_box(struct perf_event *event) uncore_event_to_box() argument
118 * perf core schedules event on the basis of cpu, uncore events are uncore_event_to_box()
121 return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id()); uncore_event_to_box()
124 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event) uncore_msr_read_counter() argument
128 rdmsrl(event->hw.event_base, count); uncore_msr_read_counter()
137 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event) uncore_get_constraint() argument
140 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; uncore_get_constraint()
141 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; uncore_get_constraint()
174 void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event) uncore_put_constraint() argument
177 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; uncore_put_constraint()
181 * takes care of event which do not use an extra shared reg. uncore_put_constraint()
183 * Also, if this is a fake box we shouldn't touch any event state uncore_put_constraint()
210 static void uncore_assign_hw_event(struct intel_uncore_box *box, struct perf_event *event, int idx) uncore_assign_hw_event() argument
212 struct hw_perf_event *hwc = &event->hw; uncore_assign_hw_event()
227 void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event) uncore_perf_event_update() argument
232 if (event->hw.idx >= UNCORE_PMC_IDX_FIXED) uncore_perf_event_update()
237 /* the hrtimer might modify the previous event value */ uncore_perf_event_update()
239 prev_count = local64_read(&event->hw.prev_count); uncore_perf_event_update()
240 new_count = uncore_read_counter(box, event); uncore_perf_event_update()
241 if (local64_xchg(&event->hw.prev_count, new_count) != prev_count) uncore_perf_event_update()
247 local64_add(delta, &event->count); uncore_perf_event_update()
258 struct perf_event *event; uncore_pmu_hrtimer() local
272 * handle boxes with an active event list as opposed to active uncore_pmu_hrtimer()
275 list_for_each_entry(event, &box->active_list, active_entry) { uncore_pmu_hrtimer()
276 uncore_perf_event_update(box, event); uncore_pmu_hrtimer()
336 static int uncore_pmu_event_init(struct perf_event *event);
338 static bool is_uncore_event(struct perf_event *event) is_uncore_event() argument
340 return event->pmu->event_init == uncore_pmu_event_init; is_uncore_event()
346 struct perf_event *event; uncore_collect_events() local
366 list_for_each_entry(event, &leader->sibling_list, group_entry) { uncore_collect_events()
367 if (!is_uncore_event(event) || uncore_collect_events()
368 event->state <= PERF_EVENT_STATE_OFF) uncore_collect_events()
374 box->event_list[n] = event; uncore_collect_events()
381 uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event) uncore_get_event_constraint() argument
387 c = type->ops->get_constraint(box, event); uncore_get_event_constraint()
392 if (event->attr.config == UNCORE_FIXED_EVENT) uncore_get_event_constraint()
397 if ((event->hw.config & c->cmask) == c->code) uncore_get_event_constraint()
405 static void uncore_put_event_constraint(struct intel_uncore_box *box, struct perf_event *event) uncore_put_event_constraint() argument
408 box->pmu->type->ops->put_constraint(box, event); uncore_put_event_constraint()
460 static void uncore_pmu_event_start(struct perf_event *event, int flags) uncore_pmu_event_start() argument
462 struct intel_uncore_box *box = uncore_event_to_box(event); uncore_pmu_event_start()
463 int idx = event->hw.idx; uncore_pmu_event_start()
465 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) uncore_pmu_event_start()
471 event->hw.state = 0; uncore_pmu_event_start()
472 box->events[idx] = event; uncore_pmu_event_start()
476 local64_set(&event->hw.prev_count, uncore_read_counter(box, event)); uncore_pmu_event_start()
477 uncore_enable_event(box, event); uncore_pmu_event_start()
485 static void uncore_pmu_event_stop(struct perf_event *event, int flags) uncore_pmu_event_stop() argument
487 struct intel_uncore_box *box = uncore_event_to_box(event); uncore_pmu_event_stop()
488 struct hw_perf_event *hwc = &event->hw; uncore_pmu_event_stop()
491 uncore_disable_event(box, event); uncore_pmu_event_stop()
505 * Drain the remaining delta count out of a event uncore_pmu_event_stop()
508 uncore_perf_event_update(box, event); uncore_pmu_event_stop()
513 static int uncore_pmu_event_add(struct perf_event *event, int flags) uncore_pmu_event_add() argument
515 struct intel_uncore_box *box = uncore_event_to_box(event); uncore_pmu_event_add()
516 struct hw_perf_event *hwc = &event->hw; uncore_pmu_event_add()
523 ret = n = uncore_collect_events(box, event, false); uncore_pmu_event_add()
537 event = box->event_list[i]; uncore_pmu_event_add()
538 hwc = &event->hw; uncore_pmu_event_add()
550 uncore_pmu_event_stop(event, PERF_EF_UPDATE); uncore_pmu_event_add()
555 event = box->event_list[i]; uncore_pmu_event_add()
556 hwc = &event->hw; uncore_pmu_event_add()
560 uncore_assign_hw_event(box, event, assign[i]); uncore_pmu_event_add()
567 uncore_pmu_event_start(event, 0); uncore_pmu_event_add()
574 static void uncore_pmu_event_del(struct perf_event *event, int flags) uncore_pmu_event_del() argument
576 struct intel_uncore_box *box = uncore_event_to_box(event); uncore_pmu_event_del()
579 uncore_pmu_event_stop(event, PERF_EF_UPDATE); uncore_pmu_event_del()
582 if (event == box->event_list[i]) { uncore_pmu_event_del()
583 uncore_put_event_constraint(box, event); uncore_pmu_event_del()
593 event->hw.idx = -1; uncore_pmu_event_del()
594 event->hw.last_tag = ~0ULL; uncore_pmu_event_del()
597 void uncore_pmu_event_read(struct perf_event *event) uncore_pmu_event_read() argument
599 struct intel_uncore_box *box = uncore_event_to_box(event); uncore_pmu_event_read()
600 uncore_perf_event_update(box, event); uncore_pmu_event_read()
608 struct perf_event *event) uncore_validate_group()
610 struct perf_event *leader = event->group_leader; uncore_validate_group()
620 * the event is not yet connected with its uncore_validate_group()
622 * existing siblings, then add the new event uncore_validate_group()
630 n = uncore_collect_events(fake_box, event, false); uncore_validate_group()
642 static int uncore_pmu_event_init(struct perf_event *event) uncore_pmu_event_init() argument
646 struct hw_perf_event *hwc = &event->hw; uncore_pmu_event_init()
649 if (event->attr.type != event->pmu->type) uncore_pmu_event_init()
652 pmu = uncore_event_to_pmu(event); uncore_pmu_event_init()
661 if (event->attr.exclude_user || event->attr.exclude_kernel || uncore_pmu_event_init()
662 event->attr.exclude_hv || event->attr.exclude_idle) uncore_pmu_event_init()
673 if (event->cpu < 0) uncore_pmu_event_init()
675 box = uncore_pmu_to_box(pmu, event->cpu); uncore_pmu_event_init()
678 event->cpu = box->cpu; uncore_pmu_event_init()
680 event->hw.idx = -1; uncore_pmu_event_init()
681 event->hw.last_tag = ~0ULL; uncore_pmu_event_init()
682 event->hw.extra_reg.idx = EXTRA_REG_NONE; uncore_pmu_event_init()
683 event->hw.branch_reg.idx = EXTRA_REG_NONE; uncore_pmu_event_init()
685 if (event->attr.config == UNCORE_FIXED_EVENT) { uncore_pmu_event_init()
696 /* fixed counters have event field hardcoded to zero */ uncore_pmu_event_init()
699 hwc->config = event->attr.config & pmu->type->event_mask; uncore_pmu_event_init()
701 ret = pmu->type->ops->hw_config(box, event); uncore_pmu_event_init()
707 if (event->group_leader != event) uncore_pmu_event_init()
708 ret = uncore_validate_group(pmu, event); uncore_pmu_event_init()
607 uncore_validate_group(struct intel_uncore_pmu *pmu, struct perf_event *event) uncore_validate_group() argument
H A Dperf_event_amd_uncore.c54 static bool is_nb_event(struct perf_event *event) is_nb_event() argument
56 return event->pmu->type == amd_nb_pmu.type; is_nb_event()
59 static bool is_l2_event(struct perf_event *event) is_l2_event() argument
61 return event->pmu->type == amd_l2_pmu.type; is_l2_event()
64 static struct amd_uncore *event_to_amd_uncore(struct perf_event *event) event_to_amd_uncore() argument
66 if (is_nb_event(event) && amd_uncore_nb) event_to_amd_uncore()
67 return *per_cpu_ptr(amd_uncore_nb, event->cpu); event_to_amd_uncore()
68 else if (is_l2_event(event) && amd_uncore_l2) event_to_amd_uncore()
69 return *per_cpu_ptr(amd_uncore_l2, event->cpu); event_to_amd_uncore()
74 static void amd_uncore_read(struct perf_event *event) amd_uncore_read() argument
76 struct hw_perf_event *hwc = &event->hw; amd_uncore_read()
90 local64_add(delta, &event->count); amd_uncore_read()
93 static void amd_uncore_start(struct perf_event *event, int flags) amd_uncore_start() argument
95 struct hw_perf_event *hwc = &event->hw; amd_uncore_start()
102 perf_event_update_userpage(event); amd_uncore_start()
105 static void amd_uncore_stop(struct perf_event *event, int flags) amd_uncore_stop() argument
107 struct hw_perf_event *hwc = &event->hw; amd_uncore_stop()
113 amd_uncore_read(event); amd_uncore_stop()
118 static int amd_uncore_add(struct perf_event *event, int flags) amd_uncore_add() argument
121 struct amd_uncore *uncore = event_to_amd_uncore(event); amd_uncore_add()
122 struct hw_perf_event *hwc = &event->hw; amd_uncore_add()
125 if (hwc->idx != -1 && uncore->events[hwc->idx] == event) amd_uncore_add()
129 if (uncore->events[i] == event) { amd_uncore_add()
138 if (cmpxchg(&uncore->events[i], NULL, event) == NULL) { amd_uncore_add()
154 amd_uncore_start(event, PERF_EF_RELOAD); amd_uncore_add()
159 static void amd_uncore_del(struct perf_event *event, int flags) amd_uncore_del() argument
162 struct amd_uncore *uncore = event_to_amd_uncore(event); amd_uncore_del()
163 struct hw_perf_event *hwc = &event->hw; amd_uncore_del()
165 amd_uncore_stop(event, PERF_EF_UPDATE); amd_uncore_del()
168 if (cmpxchg(&uncore->events[i], event, NULL) == event) amd_uncore_del()
175 static int amd_uncore_event_init(struct perf_event *event) amd_uncore_event_init() argument
178 struct hw_perf_event *hwc = &event->hw; amd_uncore_event_init()
180 if (event->attr.type != event->pmu->type) amd_uncore_event_init()
186 * core, however, event counts generated by processes running on other amd_uncore_event_init()
190 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) amd_uncore_event_init()
194 if (event->attr.exclude_user || event->attr.exclude_kernel || amd_uncore_event_init()
195 event->attr.exclude_host || event->attr.exclude_guest) amd_uncore_event_init()
199 hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB; amd_uncore_event_init()
202 if (event->cpu < 0) amd_uncore_event_init()
205 uncore = event_to_amd_uncore(event); amd_uncore_event_init()
213 event->cpu = uncore->cpu; amd_uncore_event_init()
245 PMU_FORMAT_ATTR(event, "config:0-7,32-35");
H A Dperf_event_intel_cstate.c186 PMU_EVENT_ATTR_STRING(c1-residency, evattr_cstate_core_c1, "event=0x00");
187 PMU_EVENT_ATTR_STRING(c3-residency, evattr_cstate_core_c3, "event=0x01");
188 PMU_EVENT_ATTR_STRING(c6-residency, evattr_cstate_core_c6, "event=0x02");
189 PMU_EVENT_ATTR_STRING(c7-residency, evattr_cstate_core_c7, "event=0x03");
207 DEFINE_CSTATE_FORMAT_ATTR(core_event, event, "config:0-63");
323 PMU_EVENT_ATTR_STRING(c2-residency, evattr_cstate_pkg_c2, "event=0x00");
324 PMU_EVENT_ATTR_STRING(c3-residency, evattr_cstate_pkg_c3, "event=0x01");
325 PMU_EVENT_ATTR_STRING(c6-residency, evattr_cstate_pkg_c6, "event=0x02");
326 PMU_EVENT_ATTR_STRING(c7-residency, evattr_cstate_pkg_c7, "event=0x03");
327 PMU_EVENT_ATTR_STRING(c8-residency, evattr_cstate_pkg_c8, "event=0x04");
328 PMU_EVENT_ATTR_STRING(c9-residency, evattr_cstate_pkg_c9, "event=0x05");
329 PMU_EVENT_ATTR_STRING(c10-residency, evattr_cstate_pkg_c10, "event=0x06");
350 DEFINE_CSTATE_FORMAT_ATTR(pkg_event, event, "config:0-63");
385 static int cstate_pmu_event_init(struct perf_event *event) cstate_pmu_event_init() argument
387 u64 cfg = event->attr.config; cstate_pmu_event_init()
390 if (event->attr.type != event->pmu->type) cstate_pmu_event_init()
394 if (event->attr.exclude_user || cstate_pmu_event_init()
395 event->attr.exclude_kernel || cstate_pmu_event_init()
396 event->attr.exclude_hv || cstate_pmu_event_init()
397 event->attr.exclude_idle || cstate_pmu_event_init()
398 event->attr.exclude_host || cstate_pmu_event_init()
399 event->attr.exclude_guest || cstate_pmu_event_init()
400 event->attr.sample_period) /* no sampling */ cstate_pmu_event_init()
403 if (event->pmu == &cstate_core_pmu) { cstate_pmu_event_init()
408 event->hw.event_base = core_msr[cfg].msr; cstate_pmu_event_init()
409 } else if (event->pmu == &cstate_pkg_pmu) { cstate_pmu_event_init()
414 event->hw.event_base = pkg_msr[cfg].msr; cstate_pmu_event_init()
419 event->hw.config = cfg; cstate_pmu_event_init()
420 event->hw.idx = -1; cstate_pmu_event_init()
425 static inline u64 cstate_pmu_read_counter(struct perf_event *event) cstate_pmu_read_counter() argument
429 rdmsrl(event->hw.event_base, val); cstate_pmu_read_counter()
433 static void cstate_pmu_event_update(struct perf_event *event) cstate_pmu_event_update() argument
435 struct hw_perf_event *hwc = &event->hw; cstate_pmu_event_update()
440 new_raw_count = cstate_pmu_read_counter(event); cstate_pmu_event_update()
446 local64_add(new_raw_count - prev_raw_count, &event->count); cstate_pmu_event_update()
449 static void cstate_pmu_event_start(struct perf_event *event, int mode) cstate_pmu_event_start() argument
451 local64_set(&event->hw.prev_count, cstate_pmu_read_counter(event)); cstate_pmu_event_start()
454 static void cstate_pmu_event_stop(struct perf_event *event, int mode) cstate_pmu_event_stop() argument
456 cstate_pmu_event_update(event); cstate_pmu_event_stop()
459 static void cstate_pmu_event_del(struct perf_event *event, int mode) cstate_pmu_event_del() argument
461 cstate_pmu_event_stop(event, PERF_EF_UPDATE); cstate_pmu_event_del()
464 static int cstate_pmu_event_add(struct perf_event *event, int mode) cstate_pmu_event_add() argument
467 cstate_pmu_event_start(event, mode); cstate_pmu_event_add()
H A Dperf_event_intel_rapl.c19 * event: rapl_energy_cores
23 * event: rapl_energy_pkg
27 * event: rapl_energy_dram
31 * event: rapl_energy_gpu
94 * event code: LSB 8 bits, passed in attr->config
137 static inline u64 rapl_read_counter(struct perf_event *event) rapl_read_counter() argument
140 rdmsrl(event->hw.event_base, raw); rapl_read_counter()
168 static u64 rapl_event_update(struct perf_event *event) rapl_event_update() argument
170 struct hw_perf_event *hwc = &event->hw; rapl_event_update()
177 rdmsrl(event->hw.event_base, new_raw_count); rapl_event_update()
188 * (event-)time and add that to the generic event. rapl_event_update()
196 sdelta = rapl_scale(delta, event->hw.config); rapl_event_update()
198 local64_add(sdelta, &event->count); rapl_event_update()
217 struct perf_event *event; rapl_hrtimer_handle() local
225 list_for_each_entry(event, &pmu->active_list, active_entry) { rapl_hrtimer_handle()
226 rapl_event_update(event); rapl_hrtimer_handle()
245 struct perf_event *event) __rapl_pmu_event_start()
247 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) __rapl_pmu_event_start()
250 event->hw.state = 0; __rapl_pmu_event_start()
252 list_add_tail(&event->active_entry, &pmu->active_list); __rapl_pmu_event_start()
254 local64_set(&event->hw.prev_count, rapl_read_counter(event)); __rapl_pmu_event_start()
261 static void rapl_pmu_event_start(struct perf_event *event, int mode) rapl_pmu_event_start() argument
267 __rapl_pmu_event_start(pmu, event); rapl_pmu_event_start()
271 static void rapl_pmu_event_stop(struct perf_event *event, int mode) rapl_pmu_event_stop() argument
274 struct hw_perf_event *hwc = &event->hw; rapl_pmu_event_stop()
279 /* mark event as deactivated and stopped */ rapl_pmu_event_stop()
286 list_del(&event->active_entry); rapl_pmu_event_stop()
295 * Drain the remaining delta count out of a event rapl_pmu_event_stop()
298 rapl_event_update(event); rapl_pmu_event_stop()
305 static int rapl_pmu_event_add(struct perf_event *event, int mode) rapl_pmu_event_add() argument
308 struct hw_perf_event *hwc = &event->hw; rapl_pmu_event_add()
316 __rapl_pmu_event_start(pmu, event); rapl_pmu_event_add()
323 static void rapl_pmu_event_del(struct perf_event *event, int flags) rapl_pmu_event_del() argument
325 rapl_pmu_event_stop(event, PERF_EF_UPDATE); rapl_pmu_event_del()
328 static int rapl_pmu_event_init(struct perf_event *event) rapl_pmu_event_init() argument
330 u64 cfg = event->attr.config & RAPL_EVENT_MASK; rapl_pmu_event_init()
334 if (event->attr.type != rapl_pmu_class.type) rapl_pmu_event_init()
338 if (event->attr.config & ~RAPL_EVENT_MASK) rapl_pmu_event_init()
342 * check event is known (determines counter) rapl_pmu_event_init()
364 /* check event supported */ rapl_pmu_event_init()
369 if (event->attr.exclude_user || rapl_pmu_event_init()
370 event->attr.exclude_kernel || rapl_pmu_event_init()
371 event->attr.exclude_hv || rapl_pmu_event_init()
372 event->attr.exclude_idle || rapl_pmu_event_init()
373 event->attr.exclude_host || rapl_pmu_event_init()
374 event->attr.exclude_guest || rapl_pmu_event_init()
375 event->attr.sample_period) /* no sampling */ rapl_pmu_event_init()
379 event->hw.event_base = msr; rapl_pmu_event_init()
380 event->hw.config = cfg; rapl_pmu_event_init()
381 event->hw.idx = bit; rapl_pmu_event_init()
386 static void rapl_pmu_event_read(struct perf_event *event) rapl_pmu_event_read() argument
388 rapl_event_update(event); rapl_pmu_event_read()
421 RAPL_EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01");
422 RAPL_EVENT_ATTR_STR(energy-pkg , rapl_pkg, "event=0x02");
423 RAPL_EVENT_ATTR_STR(energy-ram , rapl_ram, "event=0x03");
424 RAPL_EVENT_ATTR_STR(energy-gpu , rapl_gpu, "event=0x04");
504 DEFINE_RAPL_FORMAT_ATTR(event, event, "config:0-7");
244 __rapl_pmu_event_start(struct rapl_pmu *pmu, struct perf_event *event) __rapl_pmu_event_start() argument
H A Dperf_event_intel_cqm.c48 * Also protects event->hw.cqm_rmid
278 /* If not task event, we're machine wide */ __match_event()
289 * Are we an inherited event? __match_event()
298 static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event) event_to_cgroup() argument
300 if (event->attach_state & PERF_ATTACH_TASK) event_to_cgroup()
301 return perf_cgroup_from_task(event->hw.target, event->ctx); event_to_cgroup()
303 return event->cgrp; event_to_cgroup()
326 * event at a time. __conflict_event()
356 * Ensure neither event is part of the other's cgroup __conflict_event()
405 struct perf_event *event; intel_cqm_xchg_rmid() local
428 list_for_each_entry(event, head, hw.cqm_group_entry) intel_cqm_xchg_rmid()
429 event->hw.cqm_rmid = rmid; intel_cqm_xchg_rmid()
474 struct perf_event *leader, *event; intel_cqm_sched_in_event() local
480 event = leader; intel_cqm_sched_in_event()
482 list_for_each_entry_continue(event, &cache_groups, intel_cqm_sched_in_event()
484 if (__rmid_valid(event->hw.cqm_rmid)) intel_cqm_sched_in_event()
487 if (__conflict_event(event, leader)) intel_cqm_sched_in_event()
490 intel_cqm_xchg_rmid(event, rmid); intel_cqm_sched_in_event()
641 * Deallocate the RMIDs from any events that conflict with @event, and
644 static void intel_cqm_sched_out_conflicting_events(struct perf_event *event) intel_cqm_sched_out_conflicting_events() argument
652 if (group == event) intel_cqm_sched_out_conflicting_events()
664 * No conflict? No problem! Leave the event alone. intel_cqm_sched_out_conflicting_events()
666 if (!__conflict_event(group, event)) intel_cqm_sched_out_conflicting_events()
725 * We have some event groups, but they all have RMIDs assigned __intel_cqm_rmid_rotate()
735 * We have more event groups without RMIDs than available RMIDs, __intel_cqm_rmid_rotate()
736 * or we have event groups that conflict with the ones currently __intel_cqm_rmid_rotate()
740 * cache_groups. The first event group without an RMID then gets __intel_cqm_rmid_rotate()
783 * event needs an RMID anyway. __intel_cqm_rmid_rotate()
810 * threshold skews the event data (because we reuse __intel_cqm_rmid_rotate()
845 static void intel_cqm_setup_event(struct perf_event *event, intel_cqm_setup_event() argument
855 if (__match_event(iter, event)) { intel_cqm_setup_event()
857 event->hw.cqm_rmid = rmid; intel_cqm_setup_event()
866 if (__conflict_event(iter, event) && __rmid_valid(rmid)) intel_cqm_setup_event()
875 event->hw.cqm_rmid = rmid; intel_cqm_setup_event()
878 static void intel_cqm_event_read(struct perf_event *event) intel_cqm_event_read() argument
887 if (event->cpu == -1) intel_cqm_event_read()
891 rmid = event->hw.cqm_rmid; intel_cqm_event_read()
904 local64_set(&event->count, val); intel_cqm_event_read()
922 static inline bool cqm_group_leader(struct perf_event *event) cqm_group_leader() argument
924 return !list_empty(&event->hw.cqm_groups_entry); cqm_group_leader()
927 static u64 intel_cqm_event_count(struct perf_event *event) intel_cqm_event_count() argument
939 if (event->cpu != -1) intel_cqm_event_count()
940 return __perf_event_count(event); intel_cqm_event_count()
951 if (!cqm_group_leader(event)) intel_cqm_event_count()
967 * Speculatively perform the read, since @event might be intel_cqm_event_count()
970 * check @event's RMID afterwards, and if it has changed, intel_cqm_event_count()
973 rr.rmid = ACCESS_ONCE(event->hw.cqm_rmid); intel_cqm_event_count()
981 if (event->hw.cqm_rmid == rr.rmid) intel_cqm_event_count()
982 local64_set(&event->count, atomic64_read(&rr.value)); intel_cqm_event_count()
985 return __perf_event_count(event); intel_cqm_event_count()
988 static void intel_cqm_event_start(struct perf_event *event, int mode) intel_cqm_event_start() argument
991 u32 rmid = event->hw.cqm_rmid; intel_cqm_event_start()
993 if (!(event->hw.cqm_state & PERF_HES_STOPPED)) intel_cqm_event_start()
996 event->hw.cqm_state &= ~PERF_HES_STOPPED; intel_cqm_event_start()
1009 static void intel_cqm_event_stop(struct perf_event *event, int mode) intel_cqm_event_stop() argument
1013 if (event->hw.cqm_state & PERF_HES_STOPPED) intel_cqm_event_stop()
1016 event->hw.cqm_state |= PERF_HES_STOPPED; intel_cqm_event_stop()
1018 intel_cqm_event_read(event); intel_cqm_event_stop()
1028 static int intel_cqm_event_add(struct perf_event *event, int mode) intel_cqm_event_add() argument
1035 event->hw.cqm_state = PERF_HES_STOPPED; intel_cqm_event_add()
1036 rmid = event->hw.cqm_rmid; intel_cqm_event_add()
1039 intel_cqm_event_start(event, mode); intel_cqm_event_add()
1046 static void intel_cqm_event_destroy(struct perf_event *event) intel_cqm_event_destroy() argument
1053 * If there's another event in this group... intel_cqm_event_destroy()
1055 if (!list_empty(&event->hw.cqm_group_entry)) { intel_cqm_event_destroy()
1056 group_other = list_first_entry(&event->hw.cqm_group_entry, intel_cqm_event_destroy()
1059 list_del(&event->hw.cqm_group_entry); intel_cqm_event_destroy()
1065 if (cqm_group_leader(event)) { intel_cqm_event_destroy()
1071 list_replace(&event->hw.cqm_groups_entry, intel_cqm_event_destroy()
1074 u32 rmid = event->hw.cqm_rmid; intel_cqm_event_destroy()
1078 list_del(&event->hw.cqm_groups_entry); intel_cqm_event_destroy()
1085 static int intel_cqm_event_init(struct perf_event *event) intel_cqm_event_init() argument
1090 if (event->attr.type != intel_cqm_pmu.type) intel_cqm_event_init()
1093 if (event->attr.config & ~QOS_EVENT_MASK) intel_cqm_event_init()
1097 if (event->attr.exclude_user || intel_cqm_event_init()
1098 event->attr.exclude_kernel || intel_cqm_event_init()
1099 event->attr.exclude_hv || intel_cqm_event_init()
1100 event->attr.exclude_idle || intel_cqm_event_init()
1101 event->attr.exclude_host || intel_cqm_event_init()
1102 event->attr.exclude_guest || intel_cqm_event_init()
1103 event->attr.sample_period) /* no sampling */ intel_cqm_event_init()
1106 INIT_LIST_HEAD(&event->hw.cqm_group_entry); intel_cqm_event_init()
1107 INIT_LIST_HEAD(&event->hw.cqm_groups_entry); intel_cqm_event_init()
1109 event->destroy = intel_cqm_event_destroy; intel_cqm_event_init()
1114 intel_cqm_setup_event(event, &group); intel_cqm_event_init()
1117 list_add_tail(&event->hw.cqm_group_entry, intel_cqm_event_init()
1120 list_add_tail(&event->hw.cqm_groups_entry, intel_cqm_event_init()
1128 * every event in a group to save on needless work. intel_cqm_event_init()
1130 if (!__rmid_valid(event->hw.cqm_rmid)) intel_cqm_event_init()
1142 EVENT_ATTR_STR(llc_occupancy, intel_cqm_llc, "event=0x01");
1162 PMU_FORMAT_ATTR(event, "config:0-7");
H A Dperf_event_amd_ibs.c40 struct perf_event *event; member in struct:cpu_perf_ibs
116 perf_event_try_update(struct perf_event *event, u64 new_raw_count, int width) perf_event_try_update() argument
118 struct hw_perf_event *hwc = &event->hw; perf_event_try_update()
124 * Careful: an NMI might modify the previous event value. perf_event_try_update()
128 * count to the generic event atomically: perf_event_try_update()
138 * (event-)time and add that to the generic event. perf_event_try_update()
146 local64_add(delta, &event->count); perf_event_try_update()
165 * Use IBS for precise event sampling:
181 static int perf_ibs_precise_event(struct perf_event *event, u64 *config) perf_ibs_precise_event() argument
183 switch (event->attr.precise_ip) { perf_ibs_precise_event()
193 switch (event->attr.type) { perf_ibs_precise_event()
195 switch (event->attr.config) { perf_ibs_precise_event()
202 switch (event->attr.config) { perf_ibs_precise_event()
227 static int perf_ibs_init(struct perf_event *event) perf_ibs_init() argument
229 struct hw_perf_event *hwc = &event->hw; perf_ibs_init()
234 perf_ibs = get_ibs_pmu(event->attr.type); perf_ibs_init()
236 config = event->attr.config; perf_ibs_init()
239 ret = perf_ibs_precise_event(event, &config); perf_ibs_init()
244 if (event->pmu != &perf_ibs->pmu) perf_ibs_init()
247 if (perf_flags(&event->attr) & perf_flags(&ibs_notsupp)) perf_ibs_init()
257 if (!event->attr.sample_freq && hwc->sample_period & 0x0f) perf_ibs_init()
270 event->attr.sample_period = max_cnt << 4; perf_ibs_init()
271 hwc->sample_period = event->attr.sample_period; perf_ibs_init()
321 perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event, perf_ibs_event_update() argument
331 while (!perf_event_try_update(event, count, 64)) { perf_ibs_event_update()
332 rdmsrl(event->hw.config_base, *config); perf_ibs_event_update()
361 * the event while stopping it and then reset the state when starting
365 static void perf_ibs_start(struct perf_event *event, int flags) perf_ibs_start() argument
367 struct hw_perf_event *hwc = &event->hw; perf_ibs_start()
368 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu); perf_ibs_start()
382 perf_event_update_userpage(event); perf_ibs_start()
385 static void perf_ibs_stop(struct perf_event *event, int flags) perf_ibs_stop() argument
387 struct hw_perf_event *hwc = &event->hw; perf_ibs_stop()
388 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu); perf_ibs_stop()
416 perf_ibs_event_update(perf_ibs, event, &config); perf_ibs_stop()
420 static int perf_ibs_add(struct perf_event *event, int flags) perf_ibs_add() argument
422 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu); perf_ibs_add()
428 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; perf_ibs_add()
430 pcpu->event = event; perf_ibs_add()
433 perf_ibs_start(event, PERF_EF_RELOAD); perf_ibs_add()
438 static void perf_ibs_del(struct perf_event *event, int flags) perf_ibs_del() argument
440 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu); perf_ibs_del()
446 perf_ibs_stop(event, PERF_EF_UPDATE); perf_ibs_del()
448 pcpu->event = NULL; perf_ibs_del()
450 perf_event_update_userpage(event); perf_ibs_del()
453 static void perf_ibs_read(struct perf_event *event) { } perf_ibs_read() argument
519 struct perf_event *event = pcpu->event; perf_ibs_handle_irq() local
520 struct hw_perf_event *hwc = &event->hw; perf_ibs_handle_irq()
546 perf_ibs_event_update(perf_ibs, event, config); perf_ibs_handle_irq()
555 if (event->attr.sample_type & PERF_SAMPLE_RAW) perf_ibs_handle_irq()
568 if (event->attr.sample_type & PERF_SAMPLE_RAW) { perf_ibs_handle_irq()
593 if (event->attr.sample_type & PERF_SAMPLE_RAW) { perf_ibs_handle_irq()
599 throttle = perf_event_overflow(event, &data, &regs); perf_ibs_handle_irq()
606 perf_event_update_userpage(event); perf_ibs_handle_irq()
H A Dperf_event_intel_uncore_nhmex.c4 /* NHM-EX event control */
127 * use the 9~13 bits to select event If the 7th bit is not set,
128 * otherwise use the 19~21 bits to select event.
189 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
190 DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5");
234 static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) nhmex_uncore_msr_disable_event() argument
236 wrmsrl(event->hw.config_base, 0); nhmex_uncore_msr_disable_event()
239 static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) nhmex_uncore_msr_enable_event() argument
241 struct hw_perf_event *hwc = &event->hw; nhmex_uncore_msr_enable_event()
322 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0"),
343 static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) nhmex_bbox_hw_config() argument
345 struct hw_perf_event *hwc = &event->hw; nhmex_bbox_hw_config()
365 reg1->config = event->attr.config1; nhmex_bbox_hw_config()
366 reg2->config = event->attr.config2; nhmex_bbox_hw_config()
370 static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) nhmex_bbox_msr_enable_event() argument
372 struct hw_perf_event *hwc = &event->hw; nhmex_bbox_msr_enable_event()
386 * Use bits 6-7 in the event config to select counter.
434 static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) nhmex_sbox_hw_config() argument
436 struct hw_perf_event *hwc = &event->hw; nhmex_sbox_hw_config()
440 /* only TO_R_PROG_EV event uses the match/mask register */ nhmex_sbox_hw_config()
450 reg1->config = event->attr.config1; nhmex_sbox_hw_config()
451 reg2->config = event->attr.config2; nhmex_sbox_hw_config()
455 static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) nhmex_sbox_msr_enable_event() argument
457 struct hw_perf_event *hwc = &event->hw; nhmex_sbox_msr_enable_event()
526 /* event 0xa uses two extra registers */
622 static u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify) nhmex_mbox_alter_er() argument
624 struct hw_perf_event *hwc = &event->hw; nhmex_mbox_alter_er()
650 /* adjust the main event selector */ nhmex_mbox_alter_er()
662 nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) nhmex_mbox_get_constraint() argument
664 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; nhmex_mbox_get_constraint()
665 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; nhmex_mbox_get_constraint()
692 * If it's a fake box -- as per validate_{group,event}() we nhmex_mbox_get_constraint()
693 * shouldn't touch event state and we can avoid doing so nhmex_mbox_get_constraint()
695 * on each event, this avoids the need for reg->alloc. nhmex_mbox_get_constraint()
699 nhmex_mbox_alter_er(event, idx[0], true); nhmex_mbox_get_constraint()
719 config1 = nhmex_mbox_alter_er(event, idx[0], false); nhmex_mbox_get_constraint()
731 static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) nhmex_mbox_put_constraint() argument
733 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; nhmex_mbox_put_constraint()
734 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; nhmex_mbox_put_constraint()
755 return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd; nhmex_mbox_extra_reg_idx()
758 static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) nhmex_mbox_hw_config() argument
761 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; nhmex_mbox_hw_config()
762 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; nhmex_mbox_hw_config()
772 if (er->event != (event->hw.config & er->config_mask)) nhmex_mbox_hw_config()
774 if (event->attr.config1 & ~er->valid_mask) nhmex_mbox_hw_config()
791 reg1->config = event->attr.config1; nhmex_mbox_hw_config()
800 if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN) nhmex_mbox_hw_config()
801 reg2->config = event->attr.config2; nhmex_mbox_hw_config()
828 static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) nhmex_mbox_msr_enable_event() argument
830 struct hw_perf_event *hwc = &event->hw; nhmex_mbox_msr_enable_event()
937 static void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event) nhmex_rbox_alter_er() argument
939 struct hw_perf_event *hwc = &event->hw; nhmex_rbox_alter_er()
942 /* adjust the main event selector and extra register index */ nhmex_rbox_alter_er()
965 * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7.
966 * An event set consists of 6 events, the 3rd and 4th events in
967 * an event set use the same extra register. So an event set uses
971 nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) nhmex_rbox_get_constraint() argument
973 struct hw_perf_event *hwc = &event->hw; nhmex_rbox_get_constraint()
1047 nhmex_rbox_alter_er(box, event); nhmex_rbox_get_constraint()
1055 static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) nhmex_rbox_put_constraint() argument
1058 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; nhmex_rbox_put_constraint()
1079 static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) nhmex_rbox_hw_config() argument
1081 struct hw_perf_event *hwc = &event->hw; nhmex_rbox_hw_config()
1082 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; nhmex_rbox_hw_config()
1083 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; nhmex_rbox_hw_config()
1086 idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >> nhmex_rbox_hw_config()
1092 reg1->config = event->attr.config1; nhmex_rbox_hw_config()
1097 hwc->config |= event->attr.config & (~0ULL << 32); nhmex_rbox_hw_config()
1098 reg2->config = event->attr.config2; nhmex_rbox_hw_config()
1104 static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) nhmex_rbox_msr_enable_event() argument
1106 struct hw_perf_event *hwc = &event->hw; nhmex_rbox_msr_enable_event()
1166 INTEL_UNCORE_EVENT_DESC(qpi0_flit_send, "event=0x0,iperf_cfg=0x80000000"),
1167 INTEL_UNCORE_EVENT_DESC(qpi1_filt_send, "event=0x6,iperf_cfg=0x80000000"),
1168 INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt, "event=0x0,iperf_cfg=0x40000000"),
1169 INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt, "event=0x6,iperf_cfg=0x40000000"),
1170 INTEL_UNCORE_EVENT_DESC(qpi0_date_response, "event=0x0,iperf_cfg=0xc4"),
1171 INTEL_UNCORE_EVENT_DESC(qpi1_date_response, "event=0x6,iperf_cfg=0xc4"),
H A Dperf_event_intel_bts.c216 static void __bts_event_start(struct perf_event *event) __bts_event_start() argument
225 event->hw.itrace_started = 1; __bts_event_start()
226 event->hw.state = 0; __bts_event_start()
230 if (!event->attr.exclude_kernel) __bts_event_start()
232 if (!event->attr.exclude_user) __bts_event_start()
246 static void bts_event_start(struct perf_event *event, int flags) bts_event_start() argument
250 __bts_event_start(event); bts_event_start()
256 static void __bts_event_stop(struct perf_event *event) __bts_event_stop() argument
264 if (event->hw.state & PERF_HES_STOPPED) __bts_event_stop()
267 ACCESS_ONCE(event->hw.state) |= PERF_HES_STOPPED; __bts_event_stop()
270 static void bts_event_stop(struct perf_event *event, int flags) bts_event_stop() argument
277 __bts_event_stop(event); bts_event_stop()
287 if (bts->handle.event && bts->started) intel_bts_enable_local()
288 __bts_event_start(bts->handle.event); intel_bts_enable_local()
295 if (bts->handle.event) intel_bts_disable_local()
296 __bts_event_stop(bts->handle.event); intel_bts_disable_local()
380 struct perf_event *event = bts->handle.event; intel_bts_interrupt() local
385 if (!event || !bts->started) intel_bts_interrupt()
407 buf = perf_aux_output_begin(&bts->handle, event); intel_bts_interrupt()
418 static void bts_event_del(struct perf_event *event, int mode) bts_event_del() argument
424 bts_event_stop(event, PERF_EF_UPDATE); bts_event_del()
441 static int bts_event_add(struct perf_event *event, int mode) bts_event_add() argument
446 struct hw_perf_event *hwc = &event->hw; bts_event_add()
449 event->hw.state = PERF_HES_STOPPED; bts_event_add()
454 if (bts->handle.event) bts_event_add()
457 buf = perf_aux_output_begin(&bts->handle, event); bts_event_add()
472 bts_event_start(event, 0); bts_event_add()
474 bts_event_del(event, 0); bts_event_add()
482 static void bts_event_destroy(struct perf_event *event) bts_event_destroy() argument
488 static int bts_event_init(struct perf_event *event) bts_event_init() argument
492 if (event->attr.type != bts_pmu.type) bts_event_init()
507 if (event->attr.exclude_kernel && perf_paranoid_kernel() && bts_event_init()
517 event->destroy = bts_event_destroy; bts_event_init()
522 static void bts_event_read(struct perf_event *event) bts_event_read() argument
H A Dperf_event_intel.c216 EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
217 EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
218 EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
1501 struct perf_event *event = __intel_pmu_enable_all() local
1504 if (WARN_ON_ONCE(!event)) __intel_pmu_enable_all()
1507 intel_pmu_enable_bts(event->hw.config); __intel_pmu_enable_all()
1540 struct perf_event *event; intel_pmu_nhm_workaround() local
1566 event = cpuc->events[i]; intel_pmu_nhm_workaround()
1567 if (event) intel_pmu_nhm_workaround()
1568 x86_perf_event_update(event); intel_pmu_nhm_workaround()
1580 event = cpuc->events[i]; intel_pmu_nhm_workaround()
1582 if (event) { intel_pmu_nhm_workaround()
1583 x86_perf_event_set_period(event); intel_pmu_nhm_workaround()
1584 __x86_pmu_enable_event(&event->hw, intel_pmu_nhm_workaround()
1624 static inline bool event_is_checkpointed(struct perf_event *event) event_is_checkpointed() argument
1626 return (event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0; event_is_checkpointed()
1629 static void intel_pmu_disable_event(struct perf_event *event) intel_pmu_disable_event() argument
1631 struct hw_perf_event *hwc = &event->hw; intel_pmu_disable_event()
1645 * must disable before any actual event intel_pmu_disable_event()
1646 * because any event may be combined with LBR intel_pmu_disable_event()
1648 if (needs_branch_stack(event)) intel_pmu_disable_event()
1649 intel_pmu_lbr_disable(event); intel_pmu_disable_event()
1656 x86_pmu_disable_event(event); intel_pmu_disable_event()
1658 if (unlikely(event->attr.precise_ip)) intel_pmu_disable_event()
1659 intel_pmu_pebs_disable(event); intel_pmu_disable_event()
1693 static void intel_pmu_enable_event(struct perf_event *event) intel_pmu_enable_event() argument
1695 struct hw_perf_event *hwc = &event->hw; intel_pmu_enable_event()
1706 * must enabled before any actual event intel_pmu_enable_event()
1707 * because any event may be combined with LBR intel_pmu_enable_event()
1709 if (needs_branch_stack(event)) intel_pmu_enable_event()
1710 intel_pmu_lbr_enable(event); intel_pmu_enable_event()
1712 if (event->attr.exclude_host) intel_pmu_enable_event()
1714 if (event->attr.exclude_guest) intel_pmu_enable_event()
1717 if (unlikely(event_is_checkpointed(event))) intel_pmu_enable_event()
1725 if (unlikely(event->attr.precise_ip)) intel_pmu_enable_event()
1726 intel_pmu_pebs_enable(event); intel_pmu_enable_event()
1732 * Save and restart an expired event. Called by NMI contexts,
1733 * so it has to be careful about preempting normal event ops:
1735 int intel_pmu_save_and_restart(struct perf_event *event) intel_pmu_save_and_restart() argument
1737 x86_perf_event_update(event); intel_pmu_save_and_restart()
1744 if (unlikely(event_is_checkpointed(event))) { intel_pmu_save_and_restart()
1746 wrmsrl(event->hw.event_base, 0); intel_pmu_save_and_restart()
1747 local64_set(&event->hw.prev_count, 0); intel_pmu_save_and_restart()
1749 return x86_perf_event_set_period(event); intel_pmu_save_and_restart()
1879 struct perf_event *event = cpuc->events[bit]; intel_pmu_handle_irq() local
1886 if (!intel_pmu_save_and_restart(event)) intel_pmu_handle_irq()
1889 perf_sample_data_init(&data, 0, event->hw.last_period); intel_pmu_handle_irq()
1891 if (has_branch_stack(event)) intel_pmu_handle_irq()
1894 if (perf_event_overflow(event, &data, regs)) intel_pmu_handle_irq()
1895 x86_pmu_stop(event, 0); intel_pmu_handle_irq()
1921 intel_bts_constraints(struct perf_event *event) intel_bts_constraints() argument
1923 struct hw_perf_event *hwc = &event->hw; intel_bts_constraints()
1926 if (event->attr.freq) intel_bts_constraints()
1956 static void intel_fixup_er(struct perf_event *event, int idx) intel_fixup_er() argument
1958 event->hw.extra_reg.idx = idx; intel_fixup_er()
1961 event->hw.config &= ~INTEL_ARCH_EVENT_MASK; intel_fixup_er()
1962 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_0].event; intel_fixup_er()
1963 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0; intel_fixup_er()
1965 event->hw.config &= ~INTEL_ARCH_EVENT_MASK; intel_fixup_er()
1966 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_1].event; intel_fixup_er()
1967 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1; intel_fixup_er()
1980 struct perf_event *event, __intel_shared_reg_get_constraints()
2007 * If its a fake cpuc -- as per validate_{group,event}() we __intel_shared_reg_get_constraints()
2008 * shouldn't touch event state and we can avoid doing so __intel_shared_reg_get_constraints()
2010 * on each event, this avoids the need for reg->alloc. __intel_shared_reg_get_constraints()
2018 intel_fixup_er(event, idx); __intel_shared_reg_get_constraints()
2038 * to check if associated event has constraints __intel_shared_reg_get_constraints()
2061 * care of event which do not use an extra shared reg. __intel_shared_reg_put_constraints()
2063 * Also, if this is a fake cpuc we shouldn't touch any event state __intel_shared_reg_put_constraints()
2081 struct perf_event *event) intel_shared_regs_constraints()
2086 xreg = &event->hw.extra_reg; intel_shared_regs_constraints()
2088 c = __intel_shared_reg_get_constraints(cpuc, event, xreg); intel_shared_regs_constraints()
2092 breg = &event->hw.branch_reg; intel_shared_regs_constraints()
2094 d = __intel_shared_reg_get_constraints(cpuc, event, breg); intel_shared_regs_constraints()
2105 struct perf_event *event) x86_get_event_constraints()
2111 if ((event->hw.config & c->cmask) == c->code) { for_each_event_constraint()
2112 event->hw.flags |= c->flags; for_each_event_constraint()
2123 struct perf_event *event) __intel_get_event_constraints()
2127 c = intel_bts_constraints(event); __intel_get_event_constraints()
2131 c = intel_shared_regs_constraints(cpuc, event); __intel_get_event_constraints()
2135 c = intel_pebs_constraints(event); __intel_get_event_constraints()
2139 return x86_get_event_constraints(cpuc, idx, event); __intel_get_event_constraints()
2226 intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, intel_get_excl_constraints() argument
2290 * event requires exclusive counter access intel_get_excl_constraints()
2294 if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) { intel_get_excl_constraints()
2295 event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT; intel_get_excl_constraints()
2304 * EXCLUSIVE: sibling counter measuring exclusive event intel_get_excl_constraints()
2305 * SHARED : sibling counter measuring non-exclusive event intel_get_excl_constraints()
2310 * exclusive event in sibling counter intel_get_excl_constraints()
2312 * regardless of our event intel_get_excl_constraints()
2317 * if measuring an exclusive event, sibling intel_get_excl_constraints()
2343 struct perf_event *event) intel_get_event_constraints()
2356 c2 = __intel_get_event_constraints(cpuc, idx, event); intel_get_event_constraints()
2364 return intel_get_excl_constraints(cpuc, event, idx, c2); intel_get_event_constraints()
2370 struct perf_event *event) intel_put_excl_constraints()
2372 struct hw_perf_event *hwc = &event->hw; intel_put_excl_constraints()
2393 * If event was actually assigned, then mark the counter state as intel_put_excl_constraints()
2416 struct perf_event *event) intel_put_shared_regs_event_constraints()
2420 reg = &event->hw.extra_reg; intel_put_shared_regs_event_constraints()
2424 reg = &event->hw.branch_reg; intel_put_shared_regs_event_constraints()
2430 struct perf_event *event) intel_put_event_constraints()
2432 intel_put_shared_regs_event_constraints(cpuc, event); intel_put_event_constraints()
2440 intel_put_excl_constraints(cpuc, event); intel_put_event_constraints()
2443 static void intel_pebs_aliases_core2(struct perf_event *event) intel_pebs_aliases_core2() argument
2445 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { intel_pebs_aliases_core2()
2450 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't intel_pebs_aliases_core2()
2452 * (0x00c0), which is a PEBS capable event, to get the same intel_pebs_aliases_core2()
2464 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16); intel_pebs_aliases_core2()
2466 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); intel_pebs_aliases_core2()
2467 event->hw.config = alt_config; intel_pebs_aliases_core2()
2471 static void intel_pebs_aliases_snb(struct perf_event *event) intel_pebs_aliases_snb() argument
2473 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { intel_pebs_aliases_snb()
2478 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't intel_pebs_aliases_snb()
2480 * (0x01c2), which is a PEBS capable event, to get the same intel_pebs_aliases_snb()
2492 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16); intel_pebs_aliases_snb()
2494 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); intel_pebs_aliases_snb()
2495 event->hw.config = alt_config; intel_pebs_aliases_snb()
2499 static unsigned long intel_pmu_free_running_flags(struct perf_event *event) intel_pmu_free_running_flags() argument
2503 if (event->attr.use_clockid) intel_pmu_free_running_flags()
2508 static int intel_pmu_hw_config(struct perf_event *event) intel_pmu_hw_config() argument
2510 int ret = x86_pmu_hw_config(event); intel_pmu_hw_config()
2515 if (event->attr.precise_ip) { intel_pmu_hw_config()
2516 if (!event->attr.freq) { intel_pmu_hw_config()
2517 event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD; intel_pmu_hw_config()
2518 if (!(event->attr.sample_type & intel_pmu_hw_config()
2519 ~intel_pmu_free_running_flags(event))) intel_pmu_hw_config()
2520 event->hw.flags |= PERF_X86_EVENT_FREERUNNING; intel_pmu_hw_config()
2523 x86_pmu.pebs_aliases(event); intel_pmu_hw_config()
2526 if (needs_branch_stack(event)) { intel_pmu_hw_config()
2527 ret = intel_pmu_setup_lbr_filter(event); intel_pmu_hw_config()
2534 if (!intel_pmu_has_bts(event)) { intel_pmu_hw_config()
2539 event->destroy = hw_perf_lbr_event_destroy; intel_pmu_hw_config()
2543 if (event->attr.type != PERF_TYPE_RAW) intel_pmu_hw_config()
2546 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY)) intel_pmu_hw_config()
2555 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY; intel_pmu_hw_config()
2597 struct perf_event *event = cpuc->events[idx]; core_guest_get_msrs() local
2606 event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE; core_guest_get_msrs()
2608 if (event->attr.exclude_host) core_guest_get_msrs()
2610 else if (event->attr.exclude_guest) core_guest_get_msrs()
2618 static void core_pmu_enable_event(struct perf_event *event) core_pmu_enable_event() argument
2620 if (!event->attr.exclude_host) core_pmu_enable_event()
2621 x86_pmu_enable_event(event); core_pmu_enable_event()
2640 static int hsw_hw_config(struct perf_event *event) hsw_hw_config() argument
2642 int ret = intel_pmu_hw_config(event); hsw_hw_config()
2648 event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED); hsw_hw_config()
2655 if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) && hsw_hw_config()
2656 ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) || hsw_hw_config()
2657 event->attr.precise_ip > 0)) hsw_hw_config()
2660 if (event_is_checkpointed(event)) { hsw_hw_config()
2670 if (event->attr.sample_period > 0 && hsw_hw_config()
2671 event->attr.sample_period < 0x7fffffff) hsw_hw_config()
2682 struct perf_event *event) hsw_get_event_constraints()
2686 c = intel_get_event_constraints(cpuc, idx, event); hsw_get_event_constraints()
2689 if (event->hw.config & HSW_IN_TX_CHECKPOINTED) { hsw_get_event_constraints()
2713 static unsigned bdw_limit_period(struct perf_event *event, unsigned left) bdw_limit_period() argument
2715 if ((event->hw.config & INTEL_ARCH_EVENT_MASK) == bdw_limit_period()
2716 X86_CONFIG(.event=0xc0, .umask=0x01)) { bdw_limit_period()
2724 PMU_FORMAT_ATTR(event, "config:0-7" );
2746 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT); intel_event_sysfs_show() local
2748 return x86_event_sysfs_show(page, config, event); intel_event_sysfs_show()
2962 * the generic event period:
3001 * the generic event period:
3024 * AJ68 - PEBS PMI may be delayed by one event intel_clovertown_quirk()
3145 /* disable event that reported as not presend by cpuid */ for_each_set_bit()
3148 pr_warn("CPUID marked event: \'%s\' unavailable\n", for_each_set_bit()
3161 * the BR_MISP_EXEC.ANY event. This will over-count intel_nehalem_quirk()
3163 * architectural event which is often completely bogus: intel_nehalem_quirk()
3193 EVENT_ATTR_STR(mem-loads, mem_ld_hsw, "event=0xcd,umask=0x1,ldlat=3");
3194 EVENT_ATTR_STR(mem-stores, mem_st_hsw, "event=0xd0,umask=0x82")
3197 EVENT_ATTR_STR(tx-start, tx_start, "event=0xc9,umask=0x1");
3198 EVENT_ATTR_STR(tx-commit, tx_commit, "event=0xc9,umask=0x2");
3199 EVENT_ATTR_STR(tx-abort, tx_abort, "event=0xc9,umask=0x4");
3200 EVENT_ATTR_STR(tx-capacity, tx_capacity, "event=0x54,umask=0x2");
3201 EVENT_ATTR_STR(tx-conflict, tx_conflict, "event=0x54,umask=0x1");
3202 EVENT_ATTR_STR(el-start, el_start, "event=0xc8,umask=0x1");
3203 EVENT_ATTR_STR(el-commit, el_commit, "event=0xc8,umask=0x2");
3204 EVENT_ATTR_STR(el-abort, el_abort, "event=0xc8,umask=0x4");
3205 EVENT_ATTR_STR(el-capacity, el_capacity, "event=0x54,umask=0x2");
3206 EVENT_ATTR_STR(el-conflict, el_conflict, "event=0x54,umask=0x1");
3207 EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1");
3208 EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1");
3334 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); intel_pmu_init()
3337 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1); intel_pmu_init()
3397 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); intel_pmu_init()
3400 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1); intel_pmu_init()
3434 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); intel_pmu_init()
3437 X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1); intel_pmu_init()
3470 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); intel_pmu_init()
3595 * event on fixed counter2 (REF_CYCLES) only works on this intel_pmu_init()
3625 * E.g. KVM doesn't support offcore event
1979 __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, struct hw_perf_event_extra *reg) __intel_shared_reg_get_constraints() argument
2080 intel_shared_regs_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) intel_shared_regs_constraints() argument
2104 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx, struct perf_event *event) x86_get_event_constraints() argument
2122 __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, struct perf_event *event) __intel_get_event_constraints() argument
2342 intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, struct perf_event *event) intel_get_event_constraints() argument
2369 intel_put_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) intel_put_excl_constraints() argument
2415 intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) intel_put_shared_regs_event_constraints() argument
2429 intel_put_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) intel_put_event_constraints() argument
2681 hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx, struct perf_event *event) hsw_get_event_constraints() argument
H A Dperf_event.h72 #define PERF_X86_EVENT_COMMITTED 0x0008 /* event passed commit_txn */
78 #define PERF_X86_EVENT_EXCL_ACCT 0x0200 /* accounted EXCL event */
191 int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
269 * The overlap flag marks event constraints with overlapping counter
270 * masks. This is the case if the counter mask of such an event is not
278 * The event scheduler may not select the correct counter in the first
337 /* Event constraint, but match on all event flags too. */
341 /* Check only flags, but allow all event/umask */
345 /* Check flags and event code, and set the HSW store flag */
351 /* Check flags and event code, and set the HSW load flag */
363 /* Check flags and event code/umask, and set the HSW store flag */
375 /* Check flags and event code/umask, and set the HSW load flag */
387 /* Check flags and event code/umask, and set the HSW N/A flag */
420 unsigned int event; member in struct:extra_reg
429 .event = (e), \
437 #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
438 EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
440 #define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \
441 EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \
475 u64 event:8, member in struct:x86_pmu_config::__anon3153
517 int (*hw_config)(struct perf_event *event);
539 struct perf_event *event);
542 struct perf_event *event);
554 unsigned (*limit_period)(struct perf_event *event, unsigned l);
597 void (*pebs_aliases)(struct perf_event *event);
680 int x86_perf_event_set_period(struct perf_event *event);
701 u64 x86_perf_event_update(struct perf_event *event);
728 void hw_perf_lbr_event_destroy(struct perf_event *event);
730 int x86_setup_perfctr(struct perf_event *event);
732 int x86_pmu_hw_config(struct perf_event *event);
752 void x86_pmu_stop(struct perf_event *event, int flags);
754 static inline void x86_pmu_disable_event(struct perf_event *event) x86_pmu_disable_event() argument
756 struct hw_perf_event *hwc = &event->hw; x86_pmu_disable_event()
761 void x86_pmu_enable_event(struct perf_event *event);
798 ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event);
818 static inline bool intel_pmu_has_bts(struct perf_event *event) intel_pmu_has_bts() argument
820 if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS && intel_pmu_has_bts()
821 !event->attr.freq && event->hw.sample_period == 1) intel_pmu_has_bts()
827 int intel_pmu_save_and_restart(struct perf_event *event);
831 struct perf_event *event);
871 struct event_constraint *intel_pebs_constraints(struct perf_event *event);
873 void intel_pmu_pebs_enable(struct perf_event *event);
875 void intel_pmu_pebs_disable(struct perf_event *event);
889 void intel_pmu_lbr_enable(struct perf_event *event);
891 void intel_pmu_lbr_disable(struct perf_event *event);
913 int intel_pmu_setup_lbr_filter(struct perf_event *event);
/linux-4.4.14/drivers/oprofile/
H A Dnmi_timer_int.c28 static void nmi_timer_callback(struct perf_event *event, nmi_timer_callback() argument
32 event->hw.interrupts = 0; /* don't throttle interrupts */ nmi_timer_callback()
38 struct perf_event *event = per_cpu(nmi_timer_events, cpu); nmi_timer_start_cpu() local
40 if (!event) { nmi_timer_start_cpu()
41 event = perf_event_create_kernel_counter(&nmi_timer_attr, cpu, NULL, nmi_timer_start_cpu()
43 if (IS_ERR(event)) nmi_timer_start_cpu()
44 return PTR_ERR(event); nmi_timer_start_cpu()
45 per_cpu(nmi_timer_events, cpu) = event; nmi_timer_start_cpu()
48 if (event && ctr_running) nmi_timer_start_cpu()
49 perf_event_enable(event); nmi_timer_start_cpu()
56 struct perf_event *event = per_cpu(nmi_timer_events, cpu); nmi_timer_stop_cpu() local
58 if (event && ctr_running) nmi_timer_stop_cpu()
59 perf_event_disable(event); nmi_timer_stop_cpu()
108 struct perf_event *event; nmi_timer_shutdown() local
114 event = per_cpu(nmi_timer_events, cpu); for_each_possible_cpu()
115 if (!event) for_each_possible_cpu()
117 perf_event_disable(event); for_each_possible_cpu()
119 perf_event_release_kernel(event); for_each_possible_cpu()
H A Doprofile_perf.c18 unsigned long event; member in struct:op_counter_config
35 static void op_overflow_handler(struct perf_event *event, op_overflow_handler() argument
42 if (per_cpu(perf_events, cpu)[id] == event) op_overflow_handler()
68 attr->config = counter_config[i].event; op_perf_setup()
74 static int op_create_counter(int cpu, int event) op_create_counter() argument
78 if (!counter_config[event].enabled || per_cpu(perf_events, cpu)[event]) op_create_counter()
81 pevent = perf_event_create_kernel_counter(&counter_config[event].attr, op_create_counter()
90 pr_warning("oprofile: failed to enable event %d " op_create_counter()
91 "on CPU %d\n", event, cpu); op_create_counter()
95 per_cpu(perf_events, cpu)[event] = pevent; op_create_counter()
100 static void op_destroy_counter(int cpu, int event) op_destroy_counter() argument
102 struct perf_event *pevent = per_cpu(perf_events, cpu)[event]; op_destroy_counter()
106 per_cpu(perf_events, cpu)[event] = NULL; op_destroy_counter()
116 int cpu, event, ret = 0; op_perf_start() local
119 for (event = 0; event < num_counters; ++event) { for_each_online_cpu()
120 ret = op_create_counter(cpu, event); for_each_online_cpu()
134 int cpu, event; op_perf_stop() local
137 for (event = 0; event < num_counters; ++event) op_perf_stop()
138 op_destroy_counter(cpu, event); op_perf_stop()
152 oprofilefs_create_ulong(dir, "event", &counter_config[i].event); oprofile_perf_create_files()
257 struct perf_event *event; oprofile_perf_exit() local
261 event = per_cpu(perf_events, cpu)[id]; for_each_possible_cpu()
262 if (event) for_each_possible_cpu()
263 perf_event_release_kernel(event); for_each_possible_cpu()
H A Dcpu_buffer.c11 * Each CPU has a local buffer that stores PC value/event
14 * event buffer by sync_buffer().
144 entry->event = ring_buffer_lock_reserve op_cpu_buffer_write_reserve()
147 if (!entry->event) op_cpu_buffer_write_reserve()
149 entry->sample = ring_buffer_event_data(entry->event); op_cpu_buffer_write_reserve()
158 return ring_buffer_unlock_commit(op_ring_buffer, entry->event); op_cpu_buffer_write_commit()
168 entry->event = e; op_cpu_buffer_read_entry()
224 sample->event = flags; op_add_code()
236 unsigned long pc, unsigned long event) op_add_sample()
246 sample->event = event; op_add_sample()
261 unsigned long backtrace, int is_kernel, unsigned long event, log_sample()
275 if (op_add_sample(cpu_buf, pc, event)) log_sample()
297 unsigned long event, int is_kernel, __oprofile_add_ext_sample()
305 * source of this event __oprofile_add_ext_sample()
307 if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event, task)) __oprofile_add_ext_sample()
320 unsigned long event, int is_kernel, oprofile_add_ext_hw_sample()
323 __oprofile_add_ext_sample(pc, regs, event, is_kernel, task); oprofile_add_ext_hw_sample()
327 unsigned long event, int is_kernel) oprofile_add_ext_sample()
329 __oprofile_add_ext_sample(pc, regs, event, is_kernel, NULL); oprofile_add_ext_sample()
332 void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) oprofile_add_sample() argument
345 __oprofile_add_ext_sample(pc, regs, event, is_kernel, NULL); oprofile_add_sample()
372 sample->event = 0; /* no flags */ oprofile_write_reserve()
380 entry->event = NULL; oprofile_write_reserve()
386 if (!entry->event) oprofile_add_data()
393 if (!entry->event) oprofile_add_data64()
408 if (!entry->event) oprofile_write_commit()
413 void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) oprofile_add_pc() argument
416 log_sample(cpu_buf, pc, 0, is_kernel, event, NULL); oprofile_add_pc()
235 op_add_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, unsigned long event) op_add_sample() argument
260 log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, unsigned long backtrace, int is_kernel, unsigned long event, struct task_struct *task) log_sample() argument
296 __oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, unsigned long event, int is_kernel, struct task_struct *task) __oprofile_add_ext_sample() argument
319 oprofile_add_ext_hw_sample(unsigned long pc, struct pt_regs * const regs, unsigned long event, int is_kernel, struct task_struct *task) oprofile_add_ext_hw_sample() argument
326 oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, unsigned long event, int is_kernel) oprofile_add_ext_sample() argument
H A Devent_buffer.h21 * Add data to the event buffer.
27 /* wake up the process sleeping on the event file */
/linux-4.4.14/arch/powerpc/platforms/pseries/
H A Dio_event_irq.c27 * IO event interrupt is a mechanism provided by RTAS to return
29 * drivers can register their event handlers to receive events.
32 * their event handlers. Since multiple IO event types and scopes
33 * share an IO event interrupt, the event handlers are called one
34 * by one until the IO event is claimed by one of the handlers.
35 * The event handlers are expected to return NOTIFY_OK if the
36 * event is handled by the event handler or NOTIFY_DONE if the
37 * event does not belong to the handler.
69 * Find the data portion of an IO Event section from event log.
70 * @elog: RTAS error/event log.
73 * pointer to a valid IO event section data. NULL if not found.
79 /* We should only ever get called for io-event interrupts, but if ioei_find_event()
82 * RTAS_TYPE_IO only exists in extended event log version 6 or later. ioei_find_event()
83 * No need to check event log version. ioei_find_event()
86 printk_once(KERN_WARNING"io_event_irq: Unexpected event type %d", ioei_find_event()
93 printk_once(KERN_WARNING "io_event_irq: RTAS extended event " ioei_find_event()
103 * - check-exception returns the first found error or event and clear that
104 * error or event so it is reported once.
105 * - Each interrupt returns one event. If a plateform chooses to report
113 * - The owner of an event is determined by combinations of scope,
114 * event type, and sub-type. There is no easy way to pre-sort clients
115 * by scope or event type alone. For example, Torrent ISR route change
116 * event is reported with scope 0x00 (Not Applicatable) rather than
118 * who owns the event.
123 struct pseries_io_event *event; ioei_interrupt() local
136 event = ioei_find_event((struct rtas_error_log *)ioei_rtas_buf); ioei_interrupt()
137 if (!event) ioei_interrupt()
141 0, event); ioei_interrupt()
154 np = of_find_node_by_path("/event-sources/ibm,io-events"); ioei_init()
157 pr_info("IBM I/O event interrupts enabled\n"); ioei_init()
/linux-4.4.14/kernel/events/
H A Dcore.c131 static bool is_kernel_event(struct perf_event *event) is_kernel_event() argument
133 return event->owner == EVENT_OWNER_KERNEL; is_kernel_event()
173 * perf event paranoia level:
185 * max perf event sample rate
321 static u64 perf_event_time(struct perf_event *event);
335 static inline u64 perf_event_clock(struct perf_event *event) perf_event_clock() argument
337 return event->clock(); perf_event_clock()
365 perf_cgroup_match(struct perf_event *event) perf_cgroup_match() argument
367 struct perf_event_context *ctx = event->ctx; perf_cgroup_match()
370 /* @event doesn't care about cgroup */ perf_cgroup_match()
371 if (!event->cgrp) perf_cgroup_match()
379 * Cgroup scoping is recursive. An event enabled for a cgroup is perf_cgroup_match()
381 * cgroup is a descendant of @event's (the test covers identity perf_cgroup_match()
385 event->cgrp->css.cgroup); perf_cgroup_match()
388 static inline void perf_detach_cgroup(struct perf_event *event) perf_detach_cgroup() argument
390 css_put(&event->cgrp->css); perf_detach_cgroup()
391 event->cgrp = NULL; perf_detach_cgroup()
394 static inline int is_cgroup_event(struct perf_event *event) is_cgroup_event() argument
396 return event->cgrp != NULL; is_cgroup_event()
399 static inline u64 perf_cgroup_event_time(struct perf_event *event) perf_cgroup_event_time() argument
403 t = per_cpu_ptr(event->cgrp->info, event->cpu); perf_cgroup_event_time()
427 static inline void update_cgrp_time_from_event(struct perf_event *event) update_cgrp_time_from_event() argument
435 if (!is_cgroup_event(event)) update_cgrp_time_from_event()
438 cgrp = perf_cgroup_from_task(current, event->ctx); update_cgrp_time_from_event()
442 if (cgrp == event->cgrp) update_cgrp_time_from_event()
443 __update_cgrp_time(event->cgrp); update_cgrp_time_from_event()
466 #define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
598 static inline int perf_cgroup_connect(int fd, struct perf_event *event, perf_cgroup_connect() argument
618 event->cgrp = cgrp; perf_cgroup_connect()
626 perf_detach_cgroup(event); perf_cgroup_connect()
635 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) perf_cgroup_set_shadow_time() argument
638 t = per_cpu_ptr(event->cgrp->info, event->cpu); perf_cgroup_set_shadow_time()
639 event->shadow_ctx_time = now - t->timestamp; perf_cgroup_set_shadow_time()
643 perf_cgroup_defer_enabled(struct perf_event *event) perf_cgroup_defer_enabled() argument
647 * the event's, we need to remember to call the perf_cgroup_defer_enabled()
651 if (is_cgroup_event(event) && !perf_cgroup_match(event)) perf_cgroup_defer_enabled()
652 event->cgrp_defer_enabled = 1; perf_cgroup_defer_enabled()
656 perf_cgroup_mark_enabled(struct perf_event *event, perf_cgroup_mark_enabled() argument
660 u64 tstamp = perf_event_time(event); perf_cgroup_mark_enabled()
662 if (!event->cgrp_defer_enabled) perf_cgroup_mark_enabled()
665 event->cgrp_defer_enabled = 0; perf_cgroup_mark_enabled()
667 event->tstamp_enabled = tstamp - event->total_time_enabled; perf_cgroup_mark_enabled()
668 list_for_each_entry(sub, &event->sibling_list, group_entry) { perf_cgroup_mark_enabled()
678 perf_cgroup_match(struct perf_event *event) perf_cgroup_match() argument
683 static inline void perf_detach_cgroup(struct perf_event *event) perf_detach_cgroup() argument
686 static inline int is_cgroup_event(struct perf_event *event) is_cgroup_event() argument
691 static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event) perf_cgroup_event_cgrp_time() argument
696 static inline void update_cgrp_time_from_event(struct perf_event *event) update_cgrp_time_from_event() argument
714 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, perf_cgroup_connect() argument
733 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) perf_cgroup_set_shadow_time() argument
737 static inline u64 perf_cgroup_event_time(struct perf_event *event) perf_cgroup_event_time() argument
743 perf_cgroup_defer_enabled(struct perf_event *event) perf_cgroup_defer_enabled() argument
748 perf_cgroup_mark_enabled(struct perf_event *event, perf_cgroup_mark_enabled() argument
928 * because the sys_perf_event_open() case will install a new event and break
939 * quiesce the event, after which we can install it in the new location. This
940 * means that only external vectors (perf_fops, prctl) can perturb the event
944 * However; because event->ctx can change while we're waiting to acquire
958 perf_event_ctx_lock_nested(struct perf_event *event, int nesting) perf_event_ctx_lock_nested() argument
964 ctx = ACCESS_ONCE(event->ctx); perf_event_ctx_lock_nested()
972 if (event->ctx != ctx) { perf_event_ctx_lock_nested()
982 perf_event_ctx_lock(struct perf_event *event) perf_event_ctx_lock() argument
984 return perf_event_ctx_lock_nested(event, 0); perf_event_ctx_lock()
987 static void perf_event_ctx_unlock(struct perf_event *event, perf_event_ctx_unlock() argument
1013 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) perf_event_pid() argument
1018 if (event->parent) perf_event_pid()
1019 event = event->parent; perf_event_pid()
1021 return task_tgid_nr_ns(p, event->ns); perf_event_pid()
1024 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) perf_event_tid() argument
1029 if (event->parent) perf_event_tid()
1030 event = event->parent; perf_event_tid()
1032 return task_pid_nr_ns(p, event->ns); perf_event_tid()
1036 * If we inherit events we want to return the parent event id
1039 static u64 primary_event_id(struct perf_event *event) primary_event_id() argument
1041 u64 id = event->id; primary_event_id()
1043 if (event->parent) primary_event_id()
1044 id = event->parent->id; primary_event_id()
1141 static u64 perf_event_time(struct perf_event *event) perf_event_time() argument
1143 struct perf_event_context *ctx = event->ctx; perf_event_time()
1145 if (is_cgroup_event(event)) perf_event_time()
1146 return perf_cgroup_event_time(event); perf_event_time()
1152 * Update the total_time_enabled and total_time_running fields for a event.
1155 static void update_event_times(struct perf_event *event) update_event_times() argument
1157 struct perf_event_context *ctx = event->ctx; update_event_times()
1160 if (event->state < PERF_EVENT_STATE_INACTIVE || update_event_times()
1161 event->group_leader->state < PERF_EVENT_STATE_INACTIVE) update_event_times()
1165 * the time the event was enabled AND active update_event_times()
1173 if (is_cgroup_event(event)) update_event_times()
1174 run_end = perf_cgroup_event_time(event); update_event_times()
1178 run_end = event->tstamp_stopped; update_event_times()
1180 event->total_time_enabled = run_end - event->tstamp_enabled; update_event_times()
1182 if (event->state == PERF_EVENT_STATE_INACTIVE) update_event_times()
1183 run_end = event->tstamp_stopped; update_event_times()
1185 run_end = perf_event_time(event); update_event_times()
1187 event->total_time_running = run_end - event->tstamp_running; update_event_times()
1196 struct perf_event *event; update_group_times() local
1199 list_for_each_entry(event, &leader->sibling_list, group_entry) update_group_times()
1200 update_event_times(event); update_group_times()
1204 ctx_group_list(struct perf_event *event, struct perf_event_context *ctx) ctx_group_list() argument
1206 if (event->attr.pinned) ctx_group_list()
1213 * Add a event from the lists for its context.
1217 list_add_event(struct perf_event *event, struct perf_event_context *ctx) list_add_event() argument
1219 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); list_add_event()
1220 event->attach_state |= PERF_ATTACH_CONTEXT; list_add_event()
1223 * If we're a stand alone event or group leader, we go to the context list_add_event()
1227 if (event->group_leader == event) { list_add_event()
1230 if (is_software_event(event)) list_add_event()
1231 event->group_flags |= PERF_GROUP_SOFTWARE; list_add_event()
1233 list = ctx_group_list(event, ctx); list_add_event()
1234 list_add_tail(&event->group_entry, list); list_add_event()
1237 if (is_cgroup_event(event)) list_add_event()
1240 list_add_rcu(&event->event_entry, &ctx->event_list); list_add_event()
1242 if (event->attr.inherit_stat) list_add_event()
1249 * Initialize event state based on the perf_event_attr::disabled.
1251 static inline void perf_event__state_init(struct perf_event *event) perf_event__state_init() argument
1253 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : perf_event__state_init()
1257 static void __perf_event_read_size(struct perf_event *event, int nr_siblings) __perf_event_read_size() argument
1263 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) __perf_event_read_size()
1266 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) __perf_event_read_size()
1269 if (event->attr.read_format & PERF_FORMAT_ID) __perf_event_read_size()
1272 if (event->attr.read_format & PERF_FORMAT_GROUP) { __perf_event_read_size()
1278 event->read_size = size; __perf_event_read_size()
1281 static void __perf_event_header_size(struct perf_event *event, u64 sample_type) __perf_event_header_size() argument
1299 size += event->read_size; __perf_event_header_size()
1307 event->header_size = size; __perf_event_header_size()
1314 static void perf_event__header_size(struct perf_event *event) perf_event__header_size() argument
1316 __perf_event_read_size(event, perf_event__header_size()
1317 event->group_leader->nr_siblings); perf_event__header_size()
1318 __perf_event_header_size(event, event->attr.sample_type); perf_event__header_size()
1321 static void perf_event__id_header_size(struct perf_event *event) perf_event__id_header_size() argument
1324 u64 sample_type = event->attr.sample_type; perf_event__id_header_size()
1345 event->id_header_size = size; perf_event__id_header_size()
1348 static bool perf_event_validate_size(struct perf_event *event) perf_event_validate_size() argument
1352 * attach the event. perf_event_validate_size()
1354 __perf_event_read_size(event, event->group_leader->nr_siblings + 1); perf_event_validate_size()
1355 __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ); perf_event_validate_size()
1356 perf_event__id_header_size(event); perf_event_validate_size()
1362 if (event->read_size + event->header_size + perf_event_validate_size()
1363 event->id_header_size + sizeof(struct perf_event_header) >= 16*1024) perf_event_validate_size()
1369 static void perf_group_attach(struct perf_event *event) perf_group_attach() argument
1371 struct perf_event *group_leader = event->group_leader, *pos; perf_group_attach()
1376 if (event->attach_state & PERF_ATTACH_GROUP) perf_group_attach()
1379 event->attach_state |= PERF_ATTACH_GROUP; perf_group_attach()
1381 if (group_leader == event) perf_group_attach()
1384 WARN_ON_ONCE(group_leader->ctx != event->ctx); perf_group_attach()
1387 !is_software_event(event)) perf_group_attach()
1390 list_add_tail(&event->group_entry, &group_leader->sibling_list); perf_group_attach()
1400 * Remove a event from the lists for its context.
1404 list_del_event(struct perf_event *event, struct perf_event_context *ctx) list_del_event() argument
1408 WARN_ON_ONCE(event->ctx != ctx); list_del_event()
1414 if (!(event->attach_state & PERF_ATTACH_CONTEXT)) list_del_event()
1417 event->attach_state &= ~PERF_ATTACH_CONTEXT; list_del_event()
1419 if (is_cgroup_event(event)) { list_del_event()
1432 if (event->attr.inherit_stat) list_del_event()
1435 list_del_rcu(&event->event_entry); list_del_event()
1437 if (event->group_leader == event) list_del_event()
1438 list_del_init(&event->group_entry); list_del_event()
1440 update_group_times(event); list_del_event()
1443 * If event was in error state, then keep it list_del_event()
1447 * of the event list_del_event()
1449 if (event->state > PERF_EVENT_STATE_OFF) list_del_event()
1450 event->state = PERF_EVENT_STATE_OFF; list_del_event()
1455 static void perf_group_detach(struct perf_event *event) perf_group_detach() argument
1463 if (!(event->attach_state & PERF_ATTACH_GROUP)) perf_group_detach()
1466 event->attach_state &= ~PERF_ATTACH_GROUP; perf_group_detach()
1471 if (event->group_leader != event) { perf_group_detach()
1472 list_del_init(&event->group_entry); perf_group_detach()
1473 event->group_leader->nr_siblings--; perf_group_detach()
1477 if (!list_empty(&event->group_entry)) perf_group_detach()
1478 list = &event->group_entry; perf_group_detach()
1481 * If this was a group event with sibling events then perf_group_detach()
1485 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) { perf_group_detach()
1491 sibling->group_flags = event->group_flags; perf_group_detach()
1493 WARN_ON_ONCE(sibling->ctx != event->ctx); perf_group_detach()
1497 perf_event__header_size(event->group_leader); perf_group_detach()
1499 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry) perf_group_detach()
1504 * User event without the task.
1506 static bool is_orphaned_event(struct perf_event *event) is_orphaned_event() argument
1508 return event && !is_kernel_event(event) && !event->owner; is_orphaned_event()
1515 static bool is_orphaned_child(struct perf_event *event) is_orphaned_child() argument
1517 return is_orphaned_event(event->parent); is_orphaned_child()
1542 static inline int pmu_filter_match(struct perf_event *event) pmu_filter_match() argument
1544 struct pmu *pmu = event->pmu; pmu_filter_match()
1545 return pmu->filter_match ? pmu->filter_match(event) : 1; pmu_filter_match()
1549 event_filter_match(struct perf_event *event) event_filter_match() argument
1551 return (event->cpu == -1 || event->cpu == smp_processor_id()) event_filter_match()
1552 && perf_cgroup_match(event) && pmu_filter_match(event); event_filter_match()
1556 event_sched_out(struct perf_event *event, event_sched_out() argument
1560 u64 tstamp = perf_event_time(event); event_sched_out()
1563 WARN_ON_ONCE(event->ctx != ctx); event_sched_out()
1567 * An event which could not be activated because of event_sched_out()
1572 if (event->state == PERF_EVENT_STATE_INACTIVE event_sched_out()
1573 && !event_filter_match(event)) { event_sched_out()
1574 delta = tstamp - event->tstamp_stopped; event_sched_out()
1575 event->tstamp_running += delta; event_sched_out()
1576 event->tstamp_stopped = tstamp; event_sched_out()
1579 if (event->state != PERF_EVENT_STATE_ACTIVE) event_sched_out()
1582 perf_pmu_disable(event->pmu); event_sched_out()
1584 event->tstamp_stopped = tstamp; event_sched_out()
1585 event->pmu->del(event, 0); event_sched_out()
1586 event->oncpu = -1; event_sched_out()
1587 event->state = PERF_EVENT_STATE_INACTIVE; event_sched_out()
1588 if (event->pending_disable) { event_sched_out()
1589 event->pending_disable = 0; event_sched_out()
1590 event->state = PERF_EVENT_STATE_OFF; event_sched_out()
1593 if (!is_software_event(event)) event_sched_out()
1597 if (event->attr.freq && event->attr.sample_freq) event_sched_out()
1599 if (event->attr.exclusive || !cpuctx->active_oncpu) event_sched_out()
1602 if (is_orphaned_child(event)) event_sched_out()
1605 perf_pmu_enable(event->pmu); event_sched_out()
1613 struct perf_event *event; group_sched_out() local
1621 list_for_each_entry(event, &group_event->sibling_list, group_entry) group_sched_out()
1622 event_sched_out(event, cpuctx, ctx); group_sched_out()
1629 struct perf_event *event; member in struct:remove_event
1634 * Cross CPU call to remove a performance event
1636 * We disable the event on the hardware level first. After that we
1642 struct perf_event *event = re->event; __perf_remove_from_context() local
1643 struct perf_event_context *ctx = event->ctx; __perf_remove_from_context()
1647 event_sched_out(event, cpuctx, ctx); __perf_remove_from_context()
1649 perf_group_detach(event); __perf_remove_from_context()
1650 list_del_event(event, ctx); __perf_remove_from_context()
1662 * Remove the event from a task's (or a CPU's) list of events.
1667 * If event->ctx is a cloned context, callers must make sure that
1668 * every task struct that event->ctx->task could possibly point to
1674 static void perf_remove_from_context(struct perf_event *event, bool detach_group) perf_remove_from_context() argument
1676 struct perf_event_context *ctx = event->ctx; perf_remove_from_context()
1679 .event = event, perf_remove_from_context()
1692 cpu_function_call(event->cpu, __perf_remove_from_context, &re); perf_remove_from_context()
1716 * Since the task isn't running, its safe to remove the event, us perf_remove_from_context()
1720 perf_group_detach(event); perf_remove_from_context()
1721 list_del_event(event, ctx); perf_remove_from_context()
1726 * Cross CPU call to disable a performance event
1730 struct perf_event *event = info; __perf_event_disable() local
1731 struct perf_event_context *ctx = event->ctx; __perf_event_disable()
1735 * If this is a per-task event, need to check whether this __perf_event_disable()
1736 * event's task is the current task on this cpu. __perf_event_disable()
1747 * If the event is on, turn it off. __perf_event_disable()
1750 if (event->state >= PERF_EVENT_STATE_INACTIVE) { __perf_event_disable()
1752 update_cgrp_time_from_event(event); __perf_event_disable()
1753 update_group_times(event); __perf_event_disable()
1754 if (event == event->group_leader) __perf_event_disable()
1755 group_sched_out(event, cpuctx, ctx); __perf_event_disable()
1757 event_sched_out(event, cpuctx, ctx); __perf_event_disable()
1758 event->state = PERF_EVENT_STATE_OFF; __perf_event_disable()
1767 * Disable a event.
1769 * If event->ctx is a cloned context, callers must make sure that
1770 * every task struct that event->ctx->task could possibly point to
1773 * hold the top-level event's child_mutex, so any descendant that
1775 * When called from perf_pending_event it's OK because event->ctx
1779 static void _perf_event_disable(struct perf_event *event) _perf_event_disable() argument
1781 struct perf_event_context *ctx = event->ctx; _perf_event_disable()
1786 * Disable the event on the cpu that it's on _perf_event_disable()
1788 cpu_function_call(event->cpu, __perf_event_disable, event); _perf_event_disable()
1793 if (!task_function_call(task, __perf_event_disable, event)) _perf_event_disable()
1798 * If the event is still active, we need to retry the cross-call. _perf_event_disable()
1800 if (event->state == PERF_EVENT_STATE_ACTIVE) { _perf_event_disable()
1814 if (event->state == PERF_EVENT_STATE_INACTIVE) { _perf_event_disable()
1815 update_group_times(event); _perf_event_disable()
1816 event->state = PERF_EVENT_STATE_OFF; _perf_event_disable()
1825 void perf_event_disable(struct perf_event *event) perf_event_disable() argument
1829 ctx = perf_event_ctx_lock(event); perf_event_disable()
1830 _perf_event_disable(event); perf_event_disable()
1831 perf_event_ctx_unlock(event, ctx); perf_event_disable()
1835 static void perf_set_shadow_time(struct perf_event *event, perf_set_shadow_time() argument
1854 * - event is guaranteed scheduled in perf_set_shadow_time()
1864 if (is_cgroup_event(event)) perf_set_shadow_time()
1865 perf_cgroup_set_shadow_time(event, tstamp); perf_set_shadow_time()
1867 event->shadow_ctx_time = tstamp - ctx->timestamp; perf_set_shadow_time()
1872 static void perf_log_throttle(struct perf_event *event, int enable);
1873 static void perf_log_itrace_start(struct perf_event *event);
1876 event_sched_in(struct perf_event *event, event_sched_in() argument
1880 u64 tstamp = perf_event_time(event); event_sched_in()
1885 if (event->state <= PERF_EVENT_STATE_OFF) event_sched_in()
1888 event->state = PERF_EVENT_STATE_ACTIVE; event_sched_in()
1889 event->oncpu = smp_processor_id(); event_sched_in()
1896 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { event_sched_in()
1897 perf_log_throttle(event, 1); event_sched_in()
1898 event->hw.interrupts = 0; event_sched_in()
1906 perf_pmu_disable(event->pmu); event_sched_in()
1908 perf_set_shadow_time(event, ctx, tstamp); event_sched_in()
1910 perf_log_itrace_start(event); event_sched_in()
1912 if (event->pmu->add(event, PERF_EF_START)) { event_sched_in()
1913 event->state = PERF_EVENT_STATE_INACTIVE; event_sched_in()
1914 event->oncpu = -1; event_sched_in()
1919 event->tstamp_running += tstamp - event->tstamp_stopped; event_sched_in()
1921 if (!is_software_event(event)) event_sched_in()
1925 if (event->attr.freq && event->attr.sample_freq) event_sched_in()
1928 if (event->attr.exclusive) event_sched_in()
1931 if (is_orphaned_child(event)) event_sched_in()
1935 perf_pmu_enable(event->pmu); event_sched_in()
1945 struct perf_event *event, *partial_group = NULL; group_sched_in() local
1964 list_for_each_entry(event, &group_event->sibling_list, group_entry) { group_sched_in()
1965 if (event_sched_in(event, cpuctx, ctx)) { group_sched_in()
1966 partial_group = event; group_sched_in()
1978 * The events up to the failed event are scheduled out normally, group_sched_in()
1986 * the time the event was actually stopped, such that time delta group_sched_in()
1989 list_for_each_entry(event, &group_event->sibling_list, group_entry) { group_sched_in()
1990 if (event == partial_group) group_sched_in()
1994 event->tstamp_running += now - event->tstamp_stopped; group_sched_in()
1995 event->tstamp_stopped = now; group_sched_in()
1997 event_sched_out(event, cpuctx, ctx); group_sched_in()
2010 * Work out whether we can put this event group on the CPU now.
2012 static int group_can_go_on(struct perf_event *event, group_can_go_on() argument
2019 if (event->group_flags & PERF_GROUP_SOFTWARE) group_can_go_on()
2031 if (event->attr.exclusive && cpuctx->active_oncpu) group_can_go_on()
2040 static void add_event_to_ctx(struct perf_event *event, add_event_to_ctx() argument
2043 u64 tstamp = perf_event_time(event); add_event_to_ctx()
2045 list_add_event(event, ctx); add_event_to_ctx()
2046 perf_group_attach(event); add_event_to_ctx()
2047 event->tstamp_enabled = tstamp; add_event_to_ctx()
2048 event->tstamp_running = tstamp; add_event_to_ctx()
2049 event->tstamp_stopped = tstamp; add_event_to_ctx()
2072 * Cross CPU call to install and enable a performance event
2078 struct perf_event *event = info; __perf_install_in_context() local
2079 struct perf_event_context *ctx = event->ctx; __perf_install_in_context()
2114 * matches event->cgrp. Must be done before __perf_install_in_context()
2117 update_cgrp_time_from_event(event); __perf_install_in_context()
2119 add_event_to_ctx(event, ctx); __perf_install_in_context()
2133 * Attach a performance event to a context
2135 * First we add the event to the list with the hardware enable bit
2136 * in event->hw_config cleared.
2138 * If the event is attached to a task which is on a CPU we use a smp
2144 struct perf_event *event, perf_install_in_context()
2151 event->ctx = ctx; perf_install_in_context()
2152 if (event->cpu != -1) perf_install_in_context()
2153 event->cpu = cpu; perf_install_in_context()
2160 cpu_function_call(cpu, __perf_install_in_context, event); perf_install_in_context()
2165 if (!task_function_call(task, __perf_install_in_context, event)) perf_install_in_context()
2184 * Since the task isn't running, its safe to add the event, us holding perf_install_in_context()
2187 add_event_to_ctx(event, ctx); perf_install_in_context()
2192 * Put a event into inactive state and update time fields.
2199 static void __perf_event_mark_enabled(struct perf_event *event) __perf_event_mark_enabled() argument
2202 u64 tstamp = perf_event_time(event); __perf_event_mark_enabled()
2204 event->state = PERF_EVENT_STATE_INACTIVE; __perf_event_mark_enabled()
2205 event->tstamp_enabled = tstamp - event->total_time_enabled; __perf_event_mark_enabled()
2206 list_for_each_entry(sub, &event->sibling_list, group_entry) { __perf_event_mark_enabled()
2213 * Cross CPU call to enable a performance event
2217 struct perf_event *event = info; __perf_event_enable() local
2218 struct perf_event_context *ctx = event->ctx; __perf_event_enable()
2219 struct perf_event *leader = event->group_leader; __perf_event_enable()
2238 if (event->state >= PERF_EVENT_STATE_INACTIVE) __perf_event_enable()
2246 __perf_event_mark_enabled(event); __perf_event_enable()
2248 if (!event_filter_match(event)) { __perf_event_enable()
2249 if (is_cgroup_event(event)) __perf_event_enable()
2250 perf_cgroup_defer_enabled(event); __perf_event_enable()
2255 * If the event is in a group and isn't the group leader, __perf_event_enable()
2258 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) __perf_event_enable()
2261 if (!group_can_go_on(event, cpuctx, 1)) { __perf_event_enable()
2264 if (event == leader) __perf_event_enable()
2265 err = group_sched_in(event, cpuctx, ctx); __perf_event_enable()
2267 err = event_sched_in(event, cpuctx, ctx); __perf_event_enable()
2272 * If this event can't go on and it's part of a __perf_event_enable()
2275 if (leader != event) { __perf_event_enable()
2292 * Enable a event.
2294 * If event->ctx is a cloned context, callers must make sure that
2295 * every task struct that event->ctx->task could possibly point to
2300 static void _perf_event_enable(struct perf_event *event) _perf_event_enable() argument
2302 struct perf_event_context *ctx = event->ctx; _perf_event_enable()
2307 * Enable the event on the cpu that it's on _perf_event_enable()
2309 cpu_function_call(event->cpu, __perf_event_enable, event); _perf_event_enable()
2314 if (event->state >= PERF_EVENT_STATE_INACTIVE) _perf_event_enable()
2318 * If the event is in error state, clear that first. _perf_event_enable()
2319 * That way, if we see the event in error state below, we _perf_event_enable()
2324 if (event->state == PERF_EVENT_STATE_ERROR) _perf_event_enable()
2325 event->state = PERF_EVENT_STATE_OFF; _perf_event_enable()
2329 __perf_event_mark_enabled(event); _perf_event_enable()
2335 if (!task_function_call(task, __perf_event_enable, event)) _perf_event_enable()
2341 * If the context is active and the event is still off, _perf_event_enable()
2344 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) { _perf_event_enable()
2360 void perf_event_enable(struct perf_event *event) perf_event_enable() argument
2364 ctx = perf_event_ctx_lock(event); perf_event_enable()
2365 _perf_event_enable(event); perf_event_enable()
2366 perf_event_ctx_unlock(event, ctx); perf_event_enable()
2370 static int _perf_event_refresh(struct perf_event *event, int refresh) _perf_event_refresh() argument
2375 if (event->attr.inherit || !is_sampling_event(event)) _perf_event_refresh()
2378 atomic_add(refresh, &event->event_limit); _perf_event_refresh()
2379 _perf_event_enable(event); _perf_event_refresh()
2387 int perf_event_refresh(struct perf_event *event, int refresh) perf_event_refresh() argument
2392 ctx = perf_event_ctx_lock(event); perf_event_refresh()
2393 ret = _perf_event_refresh(event, refresh); perf_event_refresh()
2394 perf_event_ctx_unlock(event, ctx); perf_event_refresh()
2404 struct perf_event *event; ctx_sched_out() local
2418 list_for_each_entry(event, &ctx->pinned_groups, group_entry) ctx_sched_out()
2419 group_sched_out(event, cpuctx, ctx); ctx_sched_out()
2423 list_for_each_entry(event, &ctx->flexible_groups, group_entry) ctx_sched_out()
2424 group_sched_out(event, cpuctx, ctx); ctx_sched_out()
2467 static void __perf_event_sync_stat(struct perf_event *event, __perf_event_sync_stat() argument
2472 if (!event->attr.inherit_stat) __perf_event_sync_stat()
2476 * Update the event value, we cannot use perf_event_read() __perf_event_sync_stat()
2479 * we know the event must be on the current CPU, therefore we __perf_event_sync_stat()
2482 switch (event->state) { __perf_event_sync_stat()
2484 event->pmu->read(event); __perf_event_sync_stat()
2488 update_event_times(event); __perf_event_sync_stat()
2496 * In order to keep per-task stats reliable we need to flip the event __perf_event_sync_stat()
2500 value = local64_xchg(&event->count, value); __perf_event_sync_stat()
2503 swap(event->total_time_enabled, next_event->total_time_enabled); __perf_event_sync_stat()
2504 swap(event->total_time_running, next_event->total_time_running); __perf_event_sync_stat()
2509 perf_event_update_userpage(event); __perf_event_sync_stat()
2516 struct perf_event *event, *next_event; perf_event_sync_stat() local
2523 event = list_first_entry(&ctx->event_list, perf_event_sync_stat()
2529 while (&event->event_entry != &ctx->event_list && perf_event_sync_stat()
2532 __perf_event_sync_stat(event, next_event); perf_event_sync_stat()
2534 event = list_next_entry(event, event_entry); perf_event_sync_stat()
2669 * We stop each event and update the event value in event->count.
2672 * sets the disabled bit in the control field of event _before_
2673 * accessing the event control register. If a NMI hits, then it will
2674 * not restart the event.
2693 * cgroup event are system-wide mode only __perf_event_task_sched_out()
2726 struct perf_event *event; ctx_pinned_sched_in() local
2728 list_for_each_entry(event, &ctx->pinned_groups, group_entry) { ctx_pinned_sched_in()
2729 if (event->state <= PERF_EVENT_STATE_OFF) ctx_pinned_sched_in()
2731 if (!event_filter_match(event)) ctx_pinned_sched_in()
2735 if (is_cgroup_event(event)) ctx_pinned_sched_in()
2736 perf_cgroup_mark_enabled(event, ctx); ctx_pinned_sched_in()
2738 if (group_can_go_on(event, cpuctx, 1)) ctx_pinned_sched_in()
2739 group_sched_in(event, cpuctx, ctx); ctx_pinned_sched_in()
2745 if (event->state == PERF_EVENT_STATE_INACTIVE) { ctx_pinned_sched_in()
2746 update_group_times(event); ctx_pinned_sched_in()
2747 event->state = PERF_EVENT_STATE_ERROR; ctx_pinned_sched_in()
2756 struct perf_event *event; ctx_flexible_sched_in() local
2759 list_for_each_entry(event, &ctx->flexible_groups, group_entry) { ctx_flexible_sched_in()
2761 if (event->state <= PERF_EVENT_STATE_OFF) ctx_flexible_sched_in()
2767 if (!event_filter_match(event)) ctx_flexible_sched_in()
2771 if (is_cgroup_event(event)) ctx_flexible_sched_in()
2772 perf_cgroup_mark_enabled(event, ctx); ctx_flexible_sched_in()
2774 if (group_can_go_on(event, cpuctx, can_add_hw)) { ctx_flexible_sched_in()
2775 if (group_sched_in(event, cpuctx, ctx)) ctx_flexible_sched_in()
2849 * We restore the event value and then enable it.
2852 * sets the enabled bit in the control field of event _before_
2853 * accessing the event control register. If a NMI hits, then it will
2854 * keep the event running.
2872 * cgroup event are system-wide mode only
2884 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) perf_calculate_period() argument
2886 u64 frequency = event->attr.sample_freq; perf_calculate_period()
2960 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable) perf_adjust_period() argument
2962 struct hw_perf_event *hwc = &event->hw; perf_adjust_period()
2966 period = perf_calculate_period(event, nsec, count); perf_adjust_period()
2980 event->pmu->stop(event, PERF_EF_UPDATE); perf_adjust_period()
2985 event->pmu->start(event, PERF_EF_RELOAD); perf_adjust_period()
2997 struct perf_event *event; perf_adjust_freq_unthr_context() local
3013 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { perf_adjust_freq_unthr_context()
3014 if (event->state != PERF_EVENT_STATE_ACTIVE) perf_adjust_freq_unthr_context()
3017 if (!event_filter_match(event)) perf_adjust_freq_unthr_context()
3020 perf_pmu_disable(event->pmu); perf_adjust_freq_unthr_context()
3022 hwc = &event->hw; perf_adjust_freq_unthr_context()
3026 perf_log_throttle(event, 1); perf_adjust_freq_unthr_context()
3027 event->pmu->start(event, 0); perf_adjust_freq_unthr_context()
3030 if (!event->attr.freq || !event->attr.sample_freq) perf_adjust_freq_unthr_context()
3034 * stop the event and update event->count perf_adjust_freq_unthr_context()
3036 event->pmu->stop(event, PERF_EF_UPDATE); perf_adjust_freq_unthr_context()
3038 now = local64_read(&event->count); perf_adjust_freq_unthr_context()
3043 * restart the event perf_adjust_freq_unthr_context()
3045 * we have stopped the event so tell that perf_adjust_freq_unthr_context()
3050 perf_adjust_period(event, period, delta, false); perf_adjust_freq_unthr_context()
3052 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); perf_adjust_freq_unthr_context()
3054 perf_pmu_enable(event->pmu); perf_adjust_freq_unthr_context()
3139 static int event_enable_on_exec(struct perf_event *event, event_enable_on_exec() argument
3142 if (!event->attr.enable_on_exec) event_enable_on_exec()
3145 event->attr.enable_on_exec = 0; event_enable_on_exec()
3146 if (event->state >= PERF_EVENT_STATE_INACTIVE) event_enable_on_exec()
3149 __perf_event_mark_enabled(event); event_enable_on_exec()
3161 struct perf_event *event; perf_event_enable_on_exec() local
3183 list_for_each_entry(event, &ctx->event_list, event_entry) { perf_event_enable_on_exec()
3184 ret = event_enable_on_exec(event, ctx); perf_event_enable_on_exec()
3190 * Unclone this context if we enabled any event. perf_event_enable_on_exec()
3219 struct perf_event *event; member in struct:perf_read_data
3225 * Cross CPU call to read the hardware event
3230 struct perf_event *sub, *event = data->event; __perf_event_read() local
3231 struct perf_event_context *ctx = event->ctx; __perf_event_read()
3233 struct pmu *pmu = event->pmu; __perf_event_read()
3239 * event->count would have been updated to a recent sample __perf_event_read()
3240 * when the event was scheduled out. __perf_event_read()
3248 update_cgrp_time_from_event(event); __perf_event_read()
3251 update_event_times(event); __perf_event_read()
3252 if (event->state != PERF_EVENT_STATE_ACTIVE) __perf_event_read()
3256 pmu->read(event); __perf_event_read()
3263 pmu->read(event); __perf_event_read()
3265 list_for_each_entry(sub, &event->sibling_list, group_entry) { __perf_event_read()
3269 * Use sibling's PMU rather than @event's since __perf_event_read()
3282 static inline u64 perf_event_count(struct perf_event *event) perf_event_count() argument
3284 if (event->pmu->count) perf_event_count()
3285 return event->pmu->count(event); perf_event_count()
3287 return __perf_event_count(event); perf_event_count()
3291 * NMI-safe method to read a local event, that is an event that
3298 u64 perf_event_read_local(struct perf_event *event) perf_event_read_local() argument
3309 /* If this is a per-task event, it must be for current */ perf_event_read_local()
3310 WARN_ON_ONCE((event->attach_state & PERF_ATTACH_TASK) && perf_event_read_local()
3311 event->hw.target != current); perf_event_read_local()
3313 /* If this is a per-CPU event, it must be for this CPU */ perf_event_read_local()
3314 WARN_ON_ONCE(!(event->attach_state & PERF_ATTACH_TASK) && perf_event_read_local()
3315 event->cpu != smp_processor_id()); perf_event_read_local()
3318 * It must not be an event with inherit set, we cannot read perf_event_read_local()
3321 WARN_ON_ONCE(event->attr.inherit); perf_event_read_local()
3327 WARN_ON_ONCE(event->pmu->count); perf_event_read_local()
3330 * If the event is currently on this CPU, its either a per-task event, perf_event_read_local()
3334 if (event->oncpu == smp_processor_id()) perf_event_read_local()
3335 event->pmu->read(event); perf_event_read_local()
3337 val = local64_read(&event->count); perf_event_read_local()
3343 static int perf_event_read(struct perf_event *event, bool group) perf_event_read() argument
3348 * If event is enabled and currently active on a CPU, update the perf_event_read()
3349 * value in the event structure: perf_event_read()
3351 if (event->state == PERF_EVENT_STATE_ACTIVE) { perf_event_read()
3353 .event = event, perf_event_read()
3357 smp_call_function_single(event->oncpu, perf_event_read()
3360 } else if (event->state == PERF_EVENT_STATE_INACTIVE) { perf_event_read()
3361 struct perf_event_context *ctx = event->ctx; perf_event_read()
3372 update_cgrp_time_from_event(event); perf_event_read()
3375 update_group_times(event); perf_event_read()
3377 update_event_times(event); perf_event_read()
3443 struct perf_event *event) find_get_context()
3450 int cpu = event->cpu; find_get_context()
3453 /* Must be root to operate on a CPU event: */ find_get_context()
3458 * We could be clever and allow to attach a event to an find_get_context()
3478 if (event->attach_state & PERF_ATTACH_TASK_DATA) { find_get_context()
3545 static void perf_event_free_filter(struct perf_event *event);
3546 static void perf_event_free_bpf_prog(struct perf_event *event);
3550 struct perf_event *event; free_event_rcu() local
3552 event = container_of(head, struct perf_event, rcu_head); free_event_rcu()
3553 if (event->ns) free_event_rcu()
3554 put_pid_ns(event->ns); free_event_rcu()
3555 perf_event_free_filter(event); free_event_rcu()
3556 kfree(event); free_event_rcu()
3559 static void ring_buffer_attach(struct perf_event *event,
3562 static void unaccount_event_cpu(struct perf_event *event, int cpu) unaccount_event_cpu() argument
3564 if (event->parent) unaccount_event_cpu()
3567 if (is_cgroup_event(event)) unaccount_event_cpu()
3571 static void unaccount_event(struct perf_event *event) unaccount_event() argument
3573 if (event->parent) unaccount_event()
3576 if (event->attach_state & PERF_ATTACH_TASK) unaccount_event()
3578 if (event->attr.mmap || event->attr.mmap_data) unaccount_event()
3580 if (event->attr.comm) unaccount_event()
3582 if (event->attr.task) unaccount_event()
3584 if (event->attr.freq) unaccount_event()
3586 if (event->attr.context_switch) { unaccount_event()
3590 if (is_cgroup_event(event)) unaccount_event()
3592 if (has_branch_stack(event)) unaccount_event()
3595 unaccount_event_cpu(event, event->cpu); unaccount_event()
3600 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
3610 static int exclusive_event_init(struct perf_event *event) exclusive_event_init() argument
3612 struct pmu *pmu = event->pmu; exclusive_event_init()
3625 * Since this is called in perf_event_alloc() path, event::ctx exclusive_event_init()
3627 * to mean "per-task event", because unlike other attach states it exclusive_event_init()
3630 if (event->attach_state & PERF_ATTACH_TASK) { exclusive_event_init()
3641 static void exclusive_event_destroy(struct perf_event *event) exclusive_event_destroy() argument
3643 struct pmu *pmu = event->pmu; exclusive_event_destroy()
3649 if (event->attach_state & PERF_ATTACH_TASK) exclusive_event_destroy()
3666 static bool exclusive_event_installable(struct perf_event *event, exclusive_event_installable() argument
3670 struct pmu *pmu = event->pmu; exclusive_event_installable()
3676 if (exclusive_event_match(iter_event, event)) exclusive_event_installable()
3683 static void __free_event(struct perf_event *event) __free_event() argument
3685 if (!event->parent) { __free_event()
3686 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) __free_event()
3690 perf_event_free_bpf_prog(event); __free_event()
3692 if (event->destroy) __free_event()
3693 event->destroy(event); __free_event()
3695 if (event->ctx) __free_event()
3696 put_ctx(event->ctx); __free_event()
3698 if (event->pmu) { __free_event()
3699 exclusive_event_destroy(event); __free_event()
3700 module_put(event->pmu->module); __free_event()
3703 call_rcu(&event->rcu_head, free_event_rcu); __free_event()
3706 static void _free_event(struct perf_event *event) _free_event() argument
3708 irq_work_sync(&event->pending); _free_event()
3710 unaccount_event(event); _free_event()
3712 if (event->rb) { _free_event()
3714 * Can happen when we close an event with re-directed output. _free_event()
3719 mutex_lock(&event->mmap_mutex); _free_event()
3720 ring_buffer_attach(event, NULL); _free_event()
3721 mutex_unlock(&event->mmap_mutex); _free_event()
3724 if (is_cgroup_event(event)) _free_event()
3725 perf_detach_cgroup(event); _free_event()
3727 __free_event(event); _free_event()
3732 * where the event isn't exposed yet and inherited events.
3734 static void free_event(struct perf_event *event) free_event() argument
3736 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1, free_event()
3737 "unexpected event refcount: %ld; ptr=%p\n", free_event()
3738 atomic_long_read(&event->refcount), event)) { free_event()
3743 _free_event(event); free_event()
3747 * Remove user event from the owner task.
3749 static void perf_remove_from_owner(struct perf_event *event) perf_remove_from_owner() argument
3754 owner = ACCESS_ONCE(event->owner); perf_remove_from_owner()
3758 * free this event, otherwise we need to serialize on perf_remove_from_owner()
3784 * We have to re-check the event->owner field, if it is cleared perf_remove_from_owner()
3787 * event. perf_remove_from_owner()
3789 if (event->owner) perf_remove_from_owner()
3790 list_del_init(&event->owner_entry); perf_remove_from_owner()
3796 static void put_event(struct perf_event *event) put_event() argument
3800 if (!atomic_long_dec_and_test(&event->refcount)) put_event()
3803 if (!is_kernel_event(event)) put_event()
3804 perf_remove_from_owner(event); put_event()
3818 ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING); put_event()
3820 perf_remove_from_context(event, true); put_event()
3821 perf_event_ctx_unlock(event, ctx); put_event()
3823 _free_event(event); put_event()
3826 int perf_event_release_kernel(struct perf_event *event) perf_event_release_kernel() argument
3828 put_event(event); perf_event_release_kernel()
3848 struct perf_event *event, *tmp; orphans_remove_work() local
3854 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) { orphans_remove_work()
3855 struct perf_event *parent_event = event->parent; orphans_remove_work()
3857 if (!is_orphaned_child(event)) orphans_remove_work()
3860 perf_remove_from_context(event, true); orphans_remove_work()
3863 list_del_init(&event->child_list); orphans_remove_work()
3866 free_event(event); orphans_remove_work()
3878 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) perf_event_read_value() argument
3886 mutex_lock(&event->child_mutex); perf_event_read_value()
3888 (void)perf_event_read(event, false); perf_event_read_value()
3889 total += perf_event_count(event); perf_event_read_value()
3891 *enabled += event->total_time_enabled + perf_event_read_value()
3892 atomic64_read(&event->child_total_time_enabled); perf_event_read_value()
3893 *running += event->total_time_running + perf_event_read_value()
3894 atomic64_read(&event->child_total_time_running); perf_event_read_value()
3896 list_for_each_entry(child, &event->child_list, child_list) { perf_event_read_value()
3902 mutex_unlock(&event->child_mutex); perf_event_read_value()
3950 static int perf_read_group(struct perf_event *event, perf_read_group() argument
3953 struct perf_event *leader = event->group_leader, *child; perf_read_group()
3960 values = kzalloc(event->read_size, GFP_KERNEL); perf_read_group()
3984 ret = event->read_size; perf_read_group()
3985 if (copy_to_user(buf, values, event->read_size)) perf_read_group()
3996 static int perf_read_one(struct perf_event *event, perf_read_one() argument
4003 values[n++] = perf_event_read_value(event, &enabled, &running); perf_read_one()
4009 values[n++] = primary_event_id(event); perf_read_one()
4017 static bool is_event_hup(struct perf_event *event) is_event_hup() argument
4021 if (event->state != PERF_EVENT_STATE_EXIT) is_event_hup()
4024 mutex_lock(&event->child_mutex); is_event_hup()
4025 no_children = list_empty(&event->child_list); is_event_hup()
4026 mutex_unlock(&event->child_mutex); is_event_hup()
4031 * Read the performance event - simple non blocking version for now
4034 __perf_read(struct perf_event *event, char __user *buf, size_t count) __perf_read() argument
4036 u64 read_format = event->attr.read_format; __perf_read()
4040 * Return end-of-file for a read on a event that is in __perf_read()
4044 if (event->state == PERF_EVENT_STATE_ERROR) __perf_read()
4047 if (count < event->read_size) __perf_read()
4050 WARN_ON_ONCE(event->ctx->parent_ctx); __perf_read()
4052 ret = perf_read_group(event, read_format, buf); __perf_read()
4054 ret = perf_read_one(event, read_format, buf); __perf_read()
4062 struct perf_event *event = file->private_data; perf_read() local
4066 ctx = perf_event_ctx_lock(event); perf_read()
4067 ret = __perf_read(event, buf, count); perf_read()
4068 perf_event_ctx_unlock(event, ctx); perf_read()
4075 struct perf_event *event = file->private_data; perf_poll() local
4079 poll_wait(file, &event->waitq, wait); perf_poll()
4081 if (is_event_hup(event)) perf_poll()
4085 * Pin the event->rb by taking event->mmap_mutex; otherwise perf_poll()
4088 mutex_lock(&event->mmap_mutex); perf_poll()
4089 rb = event->rb; perf_poll()
4092 mutex_unlock(&event->mmap_mutex); perf_poll()
4096 static void _perf_event_reset(struct perf_event *event) _perf_event_reset() argument
4098 (void)perf_event_read(event, false); _perf_event_reset()
4099 local64_set(&event->count, 0); _perf_event_reset()
4100 perf_event_update_userpage(event); _perf_event_reset()
4104 * Holding the top-level event's child_mutex means that any
4105 * descendant process that has inherited this event will block
4109 static void perf_event_for_each_child(struct perf_event *event, perf_event_for_each_child() argument
4114 WARN_ON_ONCE(event->ctx->parent_ctx); perf_event_for_each_child()
4116 mutex_lock(&event->child_mutex); perf_event_for_each_child()
4117 func(event); perf_event_for_each_child()
4118 list_for_each_entry(child, &event->child_list, child_list) perf_event_for_each_child()
4120 mutex_unlock(&event->child_mutex); perf_event_for_each_child()
4123 static void perf_event_for_each(struct perf_event *event, perf_event_for_each() argument
4126 struct perf_event_context *ctx = event->ctx; perf_event_for_each()
4131 event = event->group_leader; perf_event_for_each()
4133 perf_event_for_each_child(event, func); perf_event_for_each()
4134 list_for_each_entry(sibling, &event->sibling_list, group_entry) perf_event_for_each()
4139 struct perf_event *event; member in struct:period_event
4146 struct perf_event *event = pe->event; __perf_event_period() local
4147 struct perf_event_context *ctx = event->ctx; __perf_event_period()
4152 if (event->attr.freq) { __perf_event_period()
4153 event->attr.sample_freq = value; __perf_event_period()
4155 event->attr.sample_period = value; __perf_event_period()
4156 event->hw.sample_period = value; __perf_event_period()
4159 active = (event->state == PERF_EVENT_STATE_ACTIVE); __perf_event_period()
4162 event->pmu->stop(event, PERF_EF_UPDATE); __perf_event_period()
4165 local64_set(&event->hw.period_left, 0); __perf_event_period()
4168 event->pmu->start(event, PERF_EF_RELOAD); __perf_event_period()
4176 static int perf_event_period(struct perf_event *event, u64 __user *arg) perf_event_period() argument
4178 struct period_event pe = { .event = event, }; perf_event_period()
4179 struct perf_event_context *ctx = event->ctx; perf_event_period()
4183 if (!is_sampling_event(event)) perf_event_period()
4192 if (event->attr.freq && value > sysctl_perf_event_sample_rate) perf_event_period()
4199 cpu_function_call(event->cpu, __perf_event_period, &pe); perf_event_period()
4214 if (event->attr.freq) { perf_event_period()
4215 event->attr.sample_freq = value; perf_event_period()
4217 event->attr.sample_period = value; perf_event_period()
4218 event->hw.sample_period = value; perf_event_period()
4221 local64_set(&event->hw.period_left, 0); perf_event_period()
4243 static int perf_event_set_output(struct perf_event *event,
4245 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
4246 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd);
4248 static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg) _perf_ioctl() argument
4265 return _perf_event_refresh(event, arg); _perf_ioctl()
4268 return perf_event_period(event, (u64 __user *)arg); _perf_ioctl()
4272 u64 id = primary_event_id(event); _perf_ioctl()
4289 ret = perf_event_set_output(event, output_event); _perf_ioctl()
4292 ret = perf_event_set_output(event, NULL); _perf_ioctl()
4298 return perf_event_set_filter(event, (void __user *)arg); _perf_ioctl()
4301 return perf_event_set_bpf_prog(event, arg); _perf_ioctl()
4308 perf_event_for_each(event, func); _perf_ioctl()
4310 perf_event_for_each_child(event, func); _perf_ioctl()
4317 struct perf_event *event = file->private_data; perf_ioctl() local
4321 ctx = perf_event_ctx_lock(event); perf_ioctl()
4322 ret = _perf_ioctl(event, cmd, arg); perf_ioctl()
4323 perf_event_ctx_unlock(event, ctx); perf_ioctl()
4351 struct perf_event *event; perf_event_task_enable() local
4354 list_for_each_entry(event, &current->perf_event_list, owner_entry) { perf_event_task_enable()
4355 ctx = perf_event_ctx_lock(event); perf_event_task_enable()
4356 perf_event_for_each_child(event, _perf_event_enable); perf_event_task_enable()
4357 perf_event_ctx_unlock(event, ctx); perf_event_task_enable()
4367 struct perf_event *event; perf_event_task_disable() local
4370 list_for_each_entry(event, &current->perf_event_list, owner_entry) { perf_event_task_disable()
4371 ctx = perf_event_ctx_lock(event); perf_event_task_disable()
4372 perf_event_for_each_child(event, _perf_event_disable); perf_event_task_disable()
4373 perf_event_ctx_unlock(event, ctx); perf_event_task_disable()
4380 static int perf_event_index(struct perf_event *event) perf_event_index() argument
4382 if (event->hw.state & PERF_HES_STOPPED) perf_event_index()
4385 if (event->state != PERF_EVENT_STATE_ACTIVE) perf_event_index()
4388 return event->pmu->event_idx(event); perf_event_index()
4391 static void calc_timer_values(struct perf_event *event, calc_timer_values() argument
4399 ctx_time = event->shadow_ctx_time + *now; calc_timer_values()
4400 *enabled = ctx_time - event->tstamp_enabled; calc_timer_values()
4401 *running = ctx_time - event->tstamp_running; calc_timer_values()
4404 static void perf_event_init_userpage(struct perf_event *event) perf_event_init_userpage() argument
4410 rb = rcu_dereference(event->rb); perf_event_init_userpage()
4427 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now) arch_perf_update_userpage()
4436 void perf_event_update_userpage(struct perf_event *event) perf_event_update_userpage() argument
4443 rb = rcu_dereference(event->rb); perf_event_update_userpage()
4449 * based on snapshot values taken when the event perf_event_update_userpage()
4456 calc_timer_values(event, &now, &enabled, &running); perf_event_update_userpage()
4466 userpg->index = perf_event_index(event); perf_event_update_userpage()
4467 userpg->offset = perf_event_count(event); perf_event_update_userpage()
4469 userpg->offset -= local64_read(&event->hw.prev_count); perf_event_update_userpage()
4472 atomic64_read(&event->child_total_time_enabled); perf_event_update_userpage()
4475 atomic64_read(&event->child_total_time_running); perf_event_update_userpage()
4477 arch_perf_update_userpage(event, userpg, now); perf_event_update_userpage()
4488 struct perf_event *event = vma->vm_file->private_data; perf_mmap_fault() local
4499 rb = rcu_dereference(event->rb); perf_mmap_fault()
4521 static void ring_buffer_attach(struct perf_event *event, ring_buffer_attach() argument
4527 if (event->rb) { ring_buffer_attach()
4530 * event->rb_entry and wait/clear when adding event->rb_entry. ring_buffer_attach()
4532 WARN_ON_ONCE(event->rcu_pending); ring_buffer_attach()
4534 old_rb = event->rb; ring_buffer_attach()
4536 list_del_rcu(&event->rb_entry); ring_buffer_attach()
4539 event->rcu_batches = get_state_synchronize_rcu(); ring_buffer_attach()
4540 event->rcu_pending = 1; ring_buffer_attach()
4544 if (event->rcu_pending) { ring_buffer_attach()
4545 cond_synchronize_rcu(event->rcu_batches); ring_buffer_attach()
4546 event->rcu_pending = 0; ring_buffer_attach()
4550 list_add_rcu(&event->rb_entry, &rb->event_list); ring_buffer_attach()
4554 rcu_assign_pointer(event->rb, rb); ring_buffer_attach()
4563 wake_up_all(&event->waitq); ring_buffer_attach()
4567 static void ring_buffer_wakeup(struct perf_event *event) ring_buffer_wakeup() argument
4572 rb = rcu_dereference(event->rb); ring_buffer_wakeup()
4574 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) ring_buffer_wakeup()
4575 wake_up_all(&event->waitq); ring_buffer_wakeup()
4580 struct ring_buffer *ring_buffer_get(struct perf_event *event) ring_buffer_get() argument
4585 rb = rcu_dereference(event->rb); ring_buffer_get()
4607 struct perf_event *event = vma->vm_file->private_data; perf_mmap_open() local
4609 atomic_inc(&event->mmap_count); perf_mmap_open()
4610 atomic_inc(&event->rb->mmap_count); perf_mmap_open()
4613 atomic_inc(&event->rb->aux_mmap_count); perf_mmap_open()
4615 if (event->pmu->event_mapped) perf_mmap_open()
4616 event->pmu->event_mapped(event); perf_mmap_open()
4621 * event, or through other events by use of perf_event_set_output().
4629 struct perf_event *event = vma->vm_file->private_data; perf_mmap_close() local
4631 struct ring_buffer *rb = ring_buffer_get(event); perf_mmap_close()
4636 if (event->pmu->event_unmapped) perf_mmap_close()
4637 event->pmu->event_unmapped(event); perf_mmap_close()
4641 * event->mmap_count, so it is ok to use event->mmap_mutex to perf_mmap_close()
4645 atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) { perf_mmap_close()
4650 mutex_unlock(&event->mmap_mutex); perf_mmap_close()
4655 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) perf_mmap_close()
4658 ring_buffer_attach(event, NULL); perf_mmap_close()
4659 mutex_unlock(&event->mmap_mutex); perf_mmap_close()
4672 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { perf_mmap_close()
4673 if (!atomic_long_inc_not_zero(&event->refcount)) { perf_mmap_close()
4675 * This event is en-route to free_event() which will perf_mmap_close()
4682 mutex_lock(&event->mmap_mutex); perf_mmap_close()
4688 * If we find a different rb; ignore this event, a next perf_mmap_close()
4693 if (event->rb == rb) perf_mmap_close()
4694 ring_buffer_attach(event, NULL); perf_mmap_close()
4696 mutex_unlock(&event->mmap_mutex); perf_mmap_close()
4697 put_event(event); perf_mmap_close()
4733 struct perf_event *event = file->private_data; perf_mmap() local
4748 if (event->cpu == -1 && event->attr.inherit) perf_mmap()
4766 if (!event->rb) perf_mmap()
4771 mutex_lock(&event->mmap_mutex); perf_mmap()
4774 rb = event->rb; perf_mmap()
4826 WARN_ON_ONCE(event->ctx->parent_ctx); perf_mmap()
4828 mutex_lock(&event->mmap_mutex); perf_mmap()
4829 if (event->rb) { perf_mmap()
4830 if (event->rb->nr_pages != nr_pages) { perf_mmap()
4835 if (!atomic_inc_not_zero(&event->rb->mmap_count)) { perf_mmap()
4841 mutex_unlock(&event->mmap_mutex); perf_mmap()
4873 WARN_ON(!rb && event->rb); perf_mmap()
4880 event->attr.watermark ? event->attr.wakeup_watermark : 0, perf_mmap()
4881 event->cpu, flags); perf_mmap()
4892 ring_buffer_attach(event, rb); perf_mmap()
4894 perf_event_init_userpage(event); perf_mmap()
4895 perf_event_update_userpage(event); perf_mmap()
4897 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages, perf_mmap()
4898 event->attr.aux_watermark, flags); perf_mmap()
4908 atomic_inc(&event->mmap_count); perf_mmap()
4913 mutex_unlock(&event->mmap_mutex); perf_mmap()
4922 if (event->pmu->event_mapped) perf_mmap()
4923 event->pmu->event_mapped(event); perf_mmap()
4931 struct perf_event *event = filp->private_data; perf_fasync() local
4935 retval = fasync_helper(fd, filp, on, &event->fasync); perf_fasync()
4956 * Perf event wakeup
4962 static inline struct fasync_struct **perf_event_fasync(struct perf_event *event) perf_event_fasync() argument
4965 if (event->parent) perf_event_fasync()
4966 event = event->parent; perf_event_fasync()
4967 return &event->fasync; perf_event_fasync()
4970 void perf_event_wakeup(struct perf_event *event) perf_event_wakeup() argument
4972 ring_buffer_wakeup(event); perf_event_wakeup()
4974 if (event->pending_kill) { perf_event_wakeup()
4975 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill); perf_event_wakeup()
4976 event->pending_kill = 0; perf_event_wakeup()
4982 struct perf_event *event = container_of(entry, perf_pending_event() local
4992 if (event->pending_disable) { perf_pending_event()
4993 event->pending_disable = 0; perf_pending_event()
4994 __perf_event_disable(event); perf_pending_event()
4997 if (event->pending_wakeup) { perf_pending_event()
4998 event->pending_wakeup = 0; perf_pending_event()
4999 perf_event_wakeup(event); perf_pending_event()
5162 struct perf_event *event) __perf_event_header__init_id()
5164 u64 sample_type = event->attr.sample_type; __perf_event_header__init_id()
5167 header->size += event->id_header_size; __perf_event_header__init_id()
5171 data->tid_entry.pid = perf_event_pid(event, current); __perf_event_header__init_id()
5172 data->tid_entry.tid = perf_event_tid(event, current); __perf_event_header__init_id()
5176 data->time = perf_event_clock(event); __perf_event_header__init_id()
5179 data->id = primary_event_id(event); __perf_event_header__init_id()
5182 data->stream_id = event->id; __perf_event_header__init_id()
5192 struct perf_event *event) perf_event_header__init_id()
5194 if (event->attr.sample_id_all) perf_event_header__init_id()
5195 __perf_event_header__init_id(header, data, event); perf_event_header__init_id()
5222 void perf_event__output_id_sample(struct perf_event *event, perf_event__output_id_sample() argument
5226 if (event->attr.sample_id_all) perf_event__output_id_sample()
5231 struct perf_event *event, perf_output_read_one()
5234 u64 read_format = event->attr.read_format; perf_output_read_one()
5238 values[n++] = perf_event_count(event); perf_output_read_one()
5241 atomic64_read(&event->child_total_time_enabled); perf_output_read_one()
5245 atomic64_read(&event->child_total_time_running); perf_output_read_one()
5248 values[n++] = primary_event_id(event); perf_output_read_one()
5257 struct perf_event *event, perf_output_read_group()
5260 struct perf_event *leader = event->group_leader, *sub; perf_output_read_group()
5261 u64 read_format = event->attr.read_format; perf_output_read_group()
5273 if (leader != event) perf_output_read_group()
5285 if ((sub != event) && perf_output_read_group()
5301 struct perf_event *event) perf_output_read()
5304 u64 read_format = event->attr.read_format; perf_output_read()
5308 * based on snapshot values taken when the event perf_output_read()
5316 calc_timer_values(event, &now, &enabled, &running); perf_output_read()
5318 if (event->attr.read_format & PERF_FORMAT_GROUP) perf_output_read()
5319 perf_output_read_group(handle, event, enabled, running); perf_output_read()
5321 perf_output_read_one(handle, event, enabled, running); perf_output_read()
5327 struct perf_event *event) perf_output_sample()
5361 perf_output_read(handle, event); perf_output_sample()
5430 u64 mask = event->attr.sample_regs_user; perf_output_sample()
5461 u64 mask = event->attr.sample_regs_intr; perf_output_sample()
5469 if (!event->attr.watermark) { perf_output_sample()
5470 int wakeup_events = event->attr.wakeup_events; perf_output_sample()
5486 struct perf_event *event, perf_prepare_sample()
5489 u64 sample_type = event->attr.sample_type; perf_prepare_sample()
5492 header->size = sizeof(*header) + event->header_size; perf_prepare_sample()
5497 __perf_event_header__init_id(header, data, event); perf_prepare_sample()
5505 data->callchain = perf_callchain(event, regs); perf_prepare_sample()
5542 u64 mask = event->attr.sample_regs_user; perf_prepare_sample()
5556 u16 stack_size = event->attr.sample_stack_user; perf_prepare_sample()
5581 u64 mask = event->attr.sample_regs_intr; perf_prepare_sample()
5590 void perf_event_output(struct perf_event *event, perf_event_output() argument
5600 perf_prepare_sample(&header, data, event, regs); perf_event_output()
5602 if (perf_output_begin(&handle, event, header.size)) perf_event_output()
5605 perf_output_sample(&handle, &header, data, event); perf_event_output()
5625 perf_event_read_event(struct perf_event *event, perf_event_read_event() argument
5634 .size = sizeof(read_event) + event->read_size, perf_event_read_event()
5636 .pid = perf_event_pid(event, task), perf_event_read_event()
5637 .tid = perf_event_tid(event, task), perf_event_read_event()
5641 perf_event_header__init_id(&read_event.header, &sample, event); perf_event_read_event()
5642 ret = perf_output_begin(&handle, event, read_event.header.size); perf_event_read_event()
5647 perf_output_read(&handle, event); perf_event_read_event()
5648 perf_event__output_id_sample(event, &handle, &sample); perf_event_read_event()
5653 typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data);
5660 struct perf_event *event; perf_event_aux_ctx() local
5662 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { perf_event_aux_ctx()
5663 if (event->state < PERF_EVENT_STATE_INACTIVE) perf_event_aux_ctx()
5665 if (!event_filter_match(event)) perf_event_aux_ctx()
5667 output(event, data); perf_event_aux_ctx()
5741 static int perf_event_task_match(struct perf_event *event) perf_event_task_match() argument
5743 return event->attr.comm || event->attr.mmap || perf_event_task_match()
5744 event->attr.mmap2 || event->attr.mmap_data || perf_event_task_match()
5745 event->attr.task; perf_event_task_match()
5748 static void perf_event_task_output(struct perf_event *event, perf_event_task_output() argument
5757 if (!perf_event_task_match(event)) perf_event_task_output()
5760 perf_event_header__init_id(&task_event->event_id.header, &sample, event); perf_event_task_output()
5762 ret = perf_output_begin(&handle, event, perf_event_task_output()
5767 task_event->event_id.pid = perf_event_pid(event, task); perf_event_task_output()
5768 task_event->event_id.ppid = perf_event_pid(event, current); perf_event_task_output()
5770 task_event->event_id.tid = perf_event_tid(event, task); perf_event_task_output()
5771 task_event->event_id.ptid = perf_event_tid(event, current); perf_event_task_output()
5773 task_event->event_id.time = perf_event_clock(event); perf_event_task_output()
5777 perf_event__output_id_sample(event, &handle, &sample); perf_event_task_output()
5839 static int perf_event_comm_match(struct perf_event *event) perf_event_comm_match() argument
5841 return event->attr.comm; perf_event_comm_match()
5844 static void perf_event_comm_output(struct perf_event *event, perf_event_comm_output() argument
5853 if (!perf_event_comm_match(event)) perf_event_comm_output()
5856 perf_event_header__init_id(&comm_event->event_id.header, &sample, event); perf_event_comm_output()
5857 ret = perf_output_begin(&handle, event, perf_event_comm_output()
5863 comm_event->event_id.pid = perf_event_pid(event, comm_event->task); perf_event_comm_output()
5864 comm_event->event_id.tid = perf_event_tid(event, comm_event->task); perf_event_comm_output()
5870 perf_event__output_id_sample(event, &handle, &sample); perf_event_comm_output()
5946 static int perf_event_mmap_match(struct perf_event *event, perf_event_mmap_match() argument
5953 return (!executable && event->attr.mmap_data) || perf_event_mmap_match()
5954 (executable && (event->attr.mmap || event->attr.mmap2)); perf_event_mmap_match()
5957 static void perf_event_mmap_output(struct perf_event *event, perf_event_mmap_output() argument
5966 if (!perf_event_mmap_match(event, data)) perf_event_mmap_output()
5969 if (event->attr.mmap2) { perf_event_mmap_output()
5979 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); perf_event_mmap_output()
5980 ret = perf_output_begin(&handle, event, perf_event_mmap_output()
5985 mmap_event->event_id.pid = perf_event_pid(event, current); perf_event_mmap_output()
5986 mmap_event->event_id.tid = perf_event_tid(event, current); perf_event_mmap_output()
5990 if (event->attr.mmap2) { perf_event_mmap_output()
6002 perf_event__output_id_sample(event, &handle, &sample); perf_event_mmap_output()
6163 void perf_event_aux_event(struct perf_event *event, unsigned long head, perf_event_aux_event() argument
6185 perf_event_header__init_id(&rec.header, &sample, event); perf_event_aux_event()
6186 ret = perf_output_begin(&handle, event, rec.header.size); perf_event_aux_event()
6192 perf_event__output_id_sample(event, &handle, &sample); perf_event_aux_event()
6200 void perf_log_lost_samples(struct perf_event *event, u64 lost) perf_log_lost_samples() argument
6218 perf_event_header__init_id(&lost_samples_event.header, &sample, event); perf_log_lost_samples()
6220 ret = perf_output_begin(&handle, event, perf_log_lost_samples()
6226 perf_event__output_id_sample(event, &handle, &sample); perf_log_lost_samples()
6245 static int perf_event_switch_match(struct perf_event *event) perf_event_switch_match() argument
6247 return event->attr.context_switch; perf_event_switch_match()
6250 static void perf_event_switch_output(struct perf_event *event, void *data) perf_event_switch_output() argument
6257 if (!perf_event_switch_match(event)) perf_event_switch_output()
6261 if (event->ctx->task) { perf_event_switch_output()
6268 perf_event_pid(event, se->next_prev); perf_event_switch_output()
6270 perf_event_tid(event, se->next_prev); perf_event_switch_output()
6273 perf_event_header__init_id(&se->event_id.header, &sample, event); perf_event_switch_output()
6275 ret = perf_output_begin(&handle, event, se->event_id.header.size); perf_event_switch_output()
6279 if (event->ctx->task) perf_event_switch_output()
6284 perf_event__output_id_sample(event, &handle, &sample); perf_event_switch_output()
6319 static void perf_log_throttle(struct perf_event *event, int enable) perf_log_throttle() argument
6336 .time = perf_event_clock(event), perf_log_throttle()
6337 .id = primary_event_id(event), perf_log_throttle()
6338 .stream_id = event->id, perf_log_throttle()
6344 perf_event_header__init_id(&throttle_event.header, &sample, event); perf_log_throttle()
6346 ret = perf_output_begin(&handle, event, perf_log_throttle()
6352 perf_event__output_id_sample(event, &handle, &sample); perf_log_throttle()
6356 static void perf_log_itrace_start(struct perf_event *event) perf_log_itrace_start() argument
6367 if (event->parent) perf_log_itrace_start()
6368 event = event->parent; perf_log_itrace_start()
6370 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) || perf_log_itrace_start()
6371 event->hw.itrace_started) perf_log_itrace_start()
6377 rec.pid = perf_event_pid(event, current); perf_log_itrace_start()
6378 rec.tid = perf_event_tid(event, current); perf_log_itrace_start()
6380 perf_event_header__init_id(&rec.header, &sample, event); perf_log_itrace_start()
6381 ret = perf_output_begin(&handle, event, rec.header.size); perf_log_itrace_start()
6387 perf_event__output_id_sample(event, &handle, &sample); perf_log_itrace_start()
6393 * Generic event overflow handling, sampling.
6396 static int __perf_event_overflow(struct perf_event *event, __perf_event_overflow() argument
6400 int events = atomic_read(&event->event_limit); __perf_event_overflow()
6401 struct hw_perf_event *hwc = &event->hw; __perf_event_overflow()
6409 if (unlikely(!is_sampling_event(event))) __perf_event_overflow()
6422 perf_log_throttle(event, 0); __perf_event_overflow()
6428 if (event->attr.freq) { __perf_event_overflow()
6435 perf_adjust_period(event, delta, hwc->last_period, true); __perf_event_overflow()
6443 event->pending_kill = POLL_IN; __perf_event_overflow()
6444 if (events && atomic_dec_and_test(&event->event_limit)) { __perf_event_overflow()
6446 event->pending_kill = POLL_HUP; __perf_event_overflow()
6447 event->pending_disable = 1; __perf_event_overflow()
6448 irq_work_queue(&event->pending); __perf_event_overflow()
6451 if (event->overflow_handler) __perf_event_overflow()
6452 event->overflow_handler(event, data, regs); __perf_event_overflow()
6454 perf_event_output(event, data, regs); __perf_event_overflow()
6456 if (*perf_event_fasync(event) && event->pending_kill) { __perf_event_overflow()
6457 event->pending_wakeup = 1; __perf_event_overflow()
6458 irq_work_queue(&event->pending); __perf_event_overflow()
6464 int perf_event_overflow(struct perf_event *event, perf_event_overflow() argument
6468 return __perf_event_overflow(event, 1, data, regs); perf_event_overflow()
6472 * Generic software event infrastructure
6487 * We directly increment event->count and keep a second value in
6488 * event->hw.period_left to count intervals. This period event
6493 u64 perf_swevent_set_period(struct perf_event *event) perf_swevent_set_period() argument
6495 struct hw_perf_event *hwc = &event->hw; perf_swevent_set_period()
6516 static void perf_swevent_overflow(struct perf_event *event, u64 overflow, perf_swevent_overflow() argument
6520 struct hw_perf_event *hwc = &event->hw; perf_swevent_overflow()
6524 overflow = perf_swevent_set_period(event); perf_swevent_overflow()
6530 if (__perf_event_overflow(event, throttle, perf_swevent_overflow()
6542 static void perf_swevent_event(struct perf_event *event, u64 nr, perf_swevent_event() argument
6546 struct hw_perf_event *hwc = &event->hw; perf_swevent_event()
6548 local64_add(nr, &event->count); perf_swevent_event()
6553 if (!is_sampling_event(event)) perf_swevent_event()
6556 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) { perf_swevent_event()
6558 return perf_swevent_overflow(event, 1, data, regs); perf_swevent_event()
6560 data->period = event->hw.last_period; perf_swevent_event()
6562 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) perf_swevent_event()
6563 return perf_swevent_overflow(event, 1, data, regs); perf_swevent_event()
6568 perf_swevent_overflow(event, 0, data, regs); perf_swevent_event()
6571 static int perf_exclude_event(struct perf_event *event, perf_exclude_event() argument
6574 if (event->hw.state & PERF_HES_STOPPED) perf_exclude_event()
6578 if (event->attr.exclude_user && user_mode(regs)) perf_exclude_event()
6581 if (event->attr.exclude_kernel && !user_mode(regs)) perf_exclude_event()
6588 static int perf_swevent_match(struct perf_event *event, perf_swevent_match() argument
6594 if (event->attr.type != type) perf_swevent_match()
6597 if (event->attr.config != event_id) perf_swevent_match()
6600 if (perf_exclude_event(event, regs)) perf_swevent_match()
6634 /* For the event head insertion and removal in the hlist */
6636 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) find_swevent_head() argument
6639 u32 event_id = event->attr.config; find_swevent_head()
6640 u64 type = event->attr.type; find_swevent_head()
6648 lockdep_is_held(&event->ctx->lock)); find_swevent_head()
6661 struct perf_event *event; do_perf_sw_event() local
6669 hlist_for_each_entry_rcu(event, head, hlist_entry) { hlist_for_each_entry_rcu()
6670 if (perf_swevent_match(event, type, event_id, data, regs)) hlist_for_each_entry_rcu()
6671 perf_swevent_event(event, nr, data, regs); hlist_for_each_entry_rcu()
6721 static void perf_swevent_read(struct perf_event *event) perf_swevent_read() argument
6725 static int perf_swevent_add(struct perf_event *event, int flags) perf_swevent_add() argument
6728 struct hw_perf_event *hwc = &event->hw; perf_swevent_add()
6731 if (is_sampling_event(event)) { perf_swevent_add()
6733 perf_swevent_set_period(event); perf_swevent_add()
6738 head = find_swevent_head(swhash, event); perf_swevent_add()
6742 hlist_add_head_rcu(&event->hlist_entry, head); perf_swevent_add()
6743 perf_event_update_userpage(event); perf_swevent_add()
6748 static void perf_swevent_del(struct perf_event *event, int flags) perf_swevent_del() argument
6750 hlist_del_rcu(&event->hlist_entry); perf_swevent_del()
6753 static void perf_swevent_start(struct perf_event *event, int flags) perf_swevent_start() argument
6755 event->hw.state = 0; perf_swevent_start()
6758 static void perf_swevent_stop(struct perf_event *event, int flags) perf_swevent_stop() argument
6760 event->hw.state = PERF_HES_STOPPED; perf_swevent_stop()
6782 static void swevent_hlist_put_cpu(struct perf_event *event, int cpu) swevent_hlist_put_cpu() argument
6794 static void swevent_hlist_put(struct perf_event *event) swevent_hlist_put() argument
6799 swevent_hlist_put_cpu(event, cpu); swevent_hlist_put()
6802 static int swevent_hlist_get_cpu(struct perf_event *event, int cpu) swevent_hlist_get_cpu() argument
6825 static int swevent_hlist_get(struct perf_event *event) swevent_hlist_get() argument
6832 err = swevent_hlist_get_cpu(event, cpu); for_each_possible_cpu()
6845 swevent_hlist_put_cpu(event, cpu); for_each_possible_cpu()
6854 static void sw_perf_event_destroy(struct perf_event *event) sw_perf_event_destroy() argument
6856 u64 event_id = event->attr.config; sw_perf_event_destroy()
6858 WARN_ON(event->parent); sw_perf_event_destroy()
6861 swevent_hlist_put(event); sw_perf_event_destroy()
6864 static int perf_swevent_init(struct perf_event *event) perf_swevent_init() argument
6866 u64 event_id = event->attr.config; perf_swevent_init()
6868 if (event->attr.type != PERF_TYPE_SOFTWARE) perf_swevent_init()
6874 if (has_branch_stack(event)) perf_swevent_init()
6889 if (!event->parent) { perf_swevent_init()
6892 err = swevent_hlist_get(event); perf_swevent_init()
6897 event->destroy = sw_perf_event_destroy; perf_swevent_init()
6918 static int perf_tp_filter_match(struct perf_event *event, perf_tp_filter_match() argument
6924 if (event->parent) perf_tp_filter_match()
6925 event = event->parent; perf_tp_filter_match()
6927 if (likely(!event->filter) || filter_match_preds(event->filter, record)) perf_tp_filter_match()
6932 static int perf_tp_event_match(struct perf_event *event, perf_tp_event_match() argument
6936 if (event->hw.state & PERF_HES_STOPPED) perf_tp_event_match()
6941 if (event->attr.exclude_kernel) perf_tp_event_match()
6944 if (!perf_tp_filter_match(event, data)) perf_tp_event_match()
6955 struct perf_event *event; perf_tp_event() local
6965 hlist_for_each_entry_rcu(event, head, hlist_entry) { hlist_for_each_entry_rcu()
6966 if (perf_tp_event_match(event, &data, regs)) hlist_for_each_entry_rcu()
6967 perf_swevent_event(event, count, &data, regs); hlist_for_each_entry_rcu()
6972 * deliver this event there too.
6983 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
6984 if (event->attr.type != PERF_TYPE_TRACEPOINT)
6986 if (event->attr.config != entry->type)
6988 if (perf_tp_event_match(event, &data, regs))
6989 perf_swevent_event(event, count, &data, regs);
6999 static void tp_perf_event_destroy(struct perf_event *event) tp_perf_event_destroy() argument
7001 perf_trace_destroy(event); tp_perf_event_destroy()
7004 static int perf_tp_event_init(struct perf_event *event) perf_tp_event_init() argument
7008 if (event->attr.type != PERF_TYPE_TRACEPOINT) perf_tp_event_init()
7014 if (has_branch_stack(event)) perf_tp_event_init()
7017 err = perf_trace_init(event); perf_tp_event_init()
7021 event->destroy = tp_perf_event_destroy; perf_tp_event_init()
7042 static int perf_event_set_filter(struct perf_event *event, void __user *arg) perf_event_set_filter() argument
7047 if (event->attr.type != PERF_TYPE_TRACEPOINT) perf_event_set_filter()
7054 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); perf_event_set_filter()
7060 static void perf_event_free_filter(struct perf_event *event) perf_event_free_filter() argument
7062 ftrace_profile_free_filter(event); perf_event_free_filter()
7065 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) perf_event_set_bpf_prog() argument
7069 if (event->attr.type != PERF_TYPE_TRACEPOINT) perf_event_set_bpf_prog()
7072 if (event->tp_event->prog) perf_event_set_bpf_prog()
7075 if (!(event->tp_event->flags & TRACE_EVENT_FL_UKPROBE)) perf_event_set_bpf_prog()
7089 event->tp_event->prog = prog; perf_event_set_bpf_prog()
7094 static void perf_event_free_bpf_prog(struct perf_event *event) perf_event_free_bpf_prog() argument
7098 if (!event->tp_event) perf_event_free_bpf_prog()
7101 prog = event->tp_event->prog; perf_event_free_bpf_prog()
7103 event->tp_event->prog = NULL; perf_event_free_bpf_prog()
7114 static int perf_event_set_filter(struct perf_event *event, void __user *arg) perf_event_set_filter() argument
7119 static void perf_event_free_filter(struct perf_event *event) perf_event_free_filter() argument
7123 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) perf_event_set_bpf_prog() argument
7128 static void perf_event_free_bpf_prog(struct perf_event *event) perf_event_free_bpf_prog() argument
7155 struct perf_event *event; perf_swevent_hrtimer() local
7158 event = container_of(hrtimer, struct perf_event, hw.hrtimer); perf_swevent_hrtimer()
7160 if (event->state != PERF_EVENT_STATE_ACTIVE) perf_swevent_hrtimer()
7163 event->pmu->read(event); perf_swevent_hrtimer()
7165 perf_sample_data_init(&data, 0, event->hw.last_period); perf_swevent_hrtimer()
7168 if (regs && !perf_exclude_event(event, regs)) { perf_swevent_hrtimer()
7169 if (!(event->attr.exclude_idle && is_idle_task(current))) perf_swevent_hrtimer()
7170 if (__perf_event_overflow(event, 1, &data, regs)) perf_swevent_hrtimer()
7174 period = max_t(u64, 10000, event->hw.sample_period); perf_swevent_hrtimer()
7180 static void perf_swevent_start_hrtimer(struct perf_event *event) perf_swevent_start_hrtimer() argument
7182 struct hw_perf_event *hwc = &event->hw; perf_swevent_start_hrtimer()
7185 if (!is_sampling_event(event)) perf_swevent_start_hrtimer()
7201 static void perf_swevent_cancel_hrtimer(struct perf_event *event) perf_swevent_cancel_hrtimer() argument
7203 struct hw_perf_event *hwc = &event->hw; perf_swevent_cancel_hrtimer()
7205 if (is_sampling_event(event)) { perf_swevent_cancel_hrtimer()
7213 static void perf_swevent_init_hrtimer(struct perf_event *event) perf_swevent_init_hrtimer() argument
7215 struct hw_perf_event *hwc = &event->hw; perf_swevent_init_hrtimer()
7217 if (!is_sampling_event(event)) perf_swevent_init_hrtimer()
7227 if (event->attr.freq) { perf_swevent_init_hrtimer()
7228 long freq = event->attr.sample_freq; perf_swevent_init_hrtimer()
7230 event->attr.sample_period = NSEC_PER_SEC / freq; perf_swevent_init_hrtimer()
7231 hwc->sample_period = event->attr.sample_period; perf_swevent_init_hrtimer()
7234 event->attr.freq = 0; perf_swevent_init_hrtimer()
7239 * Software event: cpu wall time clock
7242 static void cpu_clock_event_update(struct perf_event *event) cpu_clock_event_update() argument
7248 prev = local64_xchg(&event->hw.prev_count, now); cpu_clock_event_update()
7249 local64_add(now - prev, &event->count); cpu_clock_event_update()
7252 static void cpu_clock_event_start(struct perf_event *event, int flags) cpu_clock_event_start() argument
7254 local64_set(&event->hw.prev_count, local_clock()); cpu_clock_event_start()
7255 perf_swevent_start_hrtimer(event); cpu_clock_event_start()
7258 static void cpu_clock_event_stop(struct perf_event *event, int flags) cpu_clock_event_stop() argument
7260 perf_swevent_cancel_hrtimer(event); cpu_clock_event_stop()
7261 cpu_clock_event_update(event); cpu_clock_event_stop()
7264 static int cpu_clock_event_add(struct perf_event *event, int flags) cpu_clock_event_add() argument
7267 cpu_clock_event_start(event, flags); cpu_clock_event_add()
7268 perf_event_update_userpage(event); cpu_clock_event_add()
7273 static void cpu_clock_event_del(struct perf_event *event, int flags) cpu_clock_event_del() argument
7275 cpu_clock_event_stop(event, flags); cpu_clock_event_del()
7278 static void cpu_clock_event_read(struct perf_event *event) cpu_clock_event_read() argument
7280 cpu_clock_event_update(event); cpu_clock_event_read()
7283 static int cpu_clock_event_init(struct perf_event *event) cpu_clock_event_init() argument
7285 if (event->attr.type != PERF_TYPE_SOFTWARE) cpu_clock_event_init()
7288 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) cpu_clock_event_init()
7294 if (has_branch_stack(event)) cpu_clock_event_init()
7297 perf_swevent_init_hrtimer(event); cpu_clock_event_init()
7316 * Software event: task time clock
7319 static void task_clock_event_update(struct perf_event *event, u64 now) task_clock_event_update() argument
7324 prev = local64_xchg(&event->hw.prev_count, now); task_clock_event_update()
7326 local64_add(delta, &event->count); task_clock_event_update()
7329 static void task_clock_event_start(struct perf_event *event, int flags) task_clock_event_start() argument
7331 local64_set(&event->hw.prev_count, event->ctx->time); task_clock_event_start()
7332 perf_swevent_start_hrtimer(event); task_clock_event_start()
7335 static void task_clock_event_stop(struct perf_event *event, int flags) task_clock_event_stop() argument
7337 perf_swevent_cancel_hrtimer(event); task_clock_event_stop()
7338 task_clock_event_update(event, event->ctx->time); task_clock_event_stop()
7341 static int task_clock_event_add(struct perf_event *event, int flags) task_clock_event_add() argument
7344 task_clock_event_start(event, flags); task_clock_event_add()
7345 perf_event_update_userpage(event); task_clock_event_add()
7350 static void task_clock_event_del(struct perf_event *event, int flags) task_clock_event_del() argument
7352 task_clock_event_stop(event, PERF_EF_UPDATE); task_clock_event_del()
7355 static void task_clock_event_read(struct perf_event *event) task_clock_event_read() argument
7358 u64 delta = now - event->ctx->timestamp; task_clock_event_read()
7359 u64 time = event->ctx->time + delta; task_clock_event_read()
7361 task_clock_event_update(event, time); task_clock_event_read()
7364 static int task_clock_event_init(struct perf_event *event) task_clock_event_init() argument
7366 if (event->attr.type != PERF_TYPE_SOFTWARE) task_clock_event_init()
7369 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) task_clock_event_init()
7375 if (has_branch_stack(event)) task_clock_event_init()
7378 perf_swevent_init_hrtimer(event); task_clock_event_init()
7446 static int perf_event_idx_default(struct perf_event *event) perf_event_idx_default() argument
7739 static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) perf_try_init_event() argument
7747 if (event->group_leader != event) { perf_try_init_event()
7752 ctx = perf_event_ctx_lock_nested(event->group_leader, perf_try_init_event()
7757 event->pmu = pmu; perf_try_init_event()
7758 ret = pmu->event_init(event); perf_try_init_event()
7761 perf_event_ctx_unlock(event->group_leader, ctx); perf_try_init_event()
7769 static struct pmu *perf_init_event(struct perf_event *event) perf_init_event() argument
7778 pmu = idr_find(&pmu_idr, event->attr.type); perf_init_event()
7781 ret = perf_try_init_event(pmu, event); perf_init_event()
7788 ret = perf_try_init_event(pmu, event); perf_init_event()
7804 static void account_event_cpu(struct perf_event *event, int cpu) account_event_cpu() argument
7806 if (event->parent) account_event_cpu()
7809 if (is_cgroup_event(event)) account_event_cpu()
7813 static void account_event(struct perf_event *event) account_event() argument
7815 if (event->parent) account_event()
7818 if (event->attach_state & PERF_ATTACH_TASK) account_event()
7820 if (event->attr.mmap || event->attr.mmap_data) account_event()
7822 if (event->attr.comm) account_event()
7824 if (event->attr.task) account_event()
7826 if (event->attr.freq) { account_event()
7830 if (event->attr.context_switch) { account_event()
7834 if (has_branch_stack(event)) account_event()
7836 if (is_cgroup_event(event)) account_event()
7839 account_event_cpu(event, event->cpu); account_event()
7843 * Allocate and initialize a event structure
7854 struct perf_event *event; perf_event_alloc() local
7863 event = kzalloc(sizeof(*event), GFP_KERNEL); perf_event_alloc()
7864 if (!event) perf_event_alloc()
7872 group_leader = event; perf_event_alloc()
7874 mutex_init(&event->child_mutex); perf_event_alloc()
7875 INIT_LIST_HEAD(&event->child_list); perf_event_alloc()
7877 INIT_LIST_HEAD(&event->group_entry); perf_event_alloc()
7878 INIT_LIST_HEAD(&event->event_entry); perf_event_alloc()
7879 INIT_LIST_HEAD(&event->sibling_list); perf_event_alloc()
7880 INIT_LIST_HEAD(&event->rb_entry); perf_event_alloc()
7881 INIT_LIST_HEAD(&event->active_entry); perf_event_alloc()
7882 INIT_HLIST_NODE(&event->hlist_entry); perf_event_alloc()
7885 init_waitqueue_head(&event->waitq); perf_event_alloc()
7886 init_irq_work(&event->pending, perf_pending_event); perf_event_alloc()
7888 mutex_init(&event->mmap_mutex); perf_event_alloc()
7890 atomic_long_set(&event->refcount, 1); perf_event_alloc()
7891 event->cpu = cpu; perf_event_alloc()
7892 event->attr = *attr; perf_event_alloc()
7893 event->group_leader = group_leader; perf_event_alloc()
7894 event->pmu = NULL; perf_event_alloc()
7895 event->oncpu = -1; perf_event_alloc()
7897 event->parent = parent_event; perf_event_alloc()
7899 event->ns = get_pid_ns(task_active_pid_ns(current)); perf_event_alloc()
7900 event->id = atomic64_inc_return(&perf_event_id); perf_event_alloc()
7902 event->state = PERF_EVENT_STATE_INACTIVE; perf_event_alloc()
7905 event->attach_state = PERF_ATTACH_TASK; perf_event_alloc()
7911 event->hw.target = task; perf_event_alloc()
7914 event->clock = &local_clock; perf_event_alloc()
7916 event->clock = parent_event->clock; perf_event_alloc()
7923 event->overflow_handler = overflow_handler; perf_event_alloc()
7924 event->overflow_handler_context = context; perf_event_alloc()
7926 perf_event__state_init(event); perf_event_alloc()
7930 hwc = &event->hw; perf_event_alloc()
7944 if (!has_branch_stack(event)) perf_event_alloc()
7945 event->attr.branch_sample_type = 0; perf_event_alloc()
7948 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader); perf_event_alloc()
7953 pmu = perf_init_event(event); perf_event_alloc()
7961 err = exclusive_event_init(event); perf_event_alloc()
7965 if (!event->parent) { perf_event_alloc()
7966 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { perf_event_alloc()
7974 account_event(event); perf_event_alloc()
7976 return event; perf_event_alloc()
7979 exclusive_event_destroy(event); perf_event_alloc()
7982 if (event->destroy) perf_event_alloc()
7983 event->destroy(event); perf_event_alloc()
7986 if (is_cgroup_event(event)) perf_event_alloc()
7987 perf_detach_cgroup(event); perf_event_alloc()
7988 if (event->ns) perf_event_alloc()
7989 put_pid_ns(event->ns); perf_event_alloc()
7990 kfree(event); perf_event_alloc()
8126 perf_event_set_output(struct perf_event *event, struct perf_event *output_event) perf_event_set_output() argument
8135 if (event == output_event) perf_event_set_output()
8141 if (output_event->cpu != event->cpu) perf_event_set_output()
8147 if (output_event->cpu == -1 && output_event->ctx != event->ctx) perf_event_set_output()
8153 if (output_event->clock != event->clock) perf_event_set_output()
8159 if (has_aux(event) && has_aux(output_event) && perf_event_set_output()
8160 event->pmu != output_event->pmu) perf_event_set_output()
8164 mutex_lock(&event->mmap_mutex); perf_event_set_output()
8166 if (atomic_read(&event->mmap_count)) perf_event_set_output()
8176 ring_buffer_attach(event, rb); perf_event_set_output()
8180 mutex_unlock(&event->mmap_mutex); perf_event_set_output()
8195 static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id) perf_event_set_clock() argument
8201 event->clock = &ktime_get_mono_fast_ns; perf_event_set_clock()
8206 event->clock = &ktime_get_raw_fast_ns; perf_event_set_clock()
8211 event->clock = &ktime_get_real_ns; perf_event_set_clock()
8215 event->clock = &ktime_get_boot_ns; perf_event_set_clock()
8219 event->clock = &ktime_get_tai_ns; perf_event_set_clock()
8226 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI)) perf_event_set_clock()
8233 * sys_perf_event_open - open a performance event, associate it to a task/cpu
8238 * @group_fd: group leader event fd
8245 struct perf_event *event, *sibling; SYSCALL_DEFINE5() local
8331 * perf_install_in_context() call for this new event to SYSCALL_DEFINE5()
8343 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, SYSCALL_DEFINE5()
8345 if (IS_ERR(event)) { SYSCALL_DEFINE5()
8346 err = PTR_ERR(event); SYSCALL_DEFINE5()
8350 if (is_sampling_event(event)) { SYSCALL_DEFINE5()
8351 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) { SYSCALL_DEFINE5()
8361 pmu = event->pmu; SYSCALL_DEFINE5()
8364 err = perf_event_set_clock(event, attr.clockid); SYSCALL_DEFINE5()
8370 (is_software_event(event) != is_software_event(group_leader))) { SYSCALL_DEFINE5()
8371 if (is_software_event(event)) { SYSCALL_DEFINE5()
8373 * If event and group_leader are not both a software SYSCALL_DEFINE5()
8374 * event, and event is, then group leader is not. SYSCALL_DEFINE5()
8385 * try to add a hardware event, move the whole group to SYSCALL_DEFINE5()
8395 ctx = find_get_context(pmu, task, event); SYSCALL_DEFINE5()
8407 * Look up the group leader (we will attach this event to it): SYSCALL_DEFINE5()
8420 if (group_leader->clock != event->clock) SYSCALL_DEFINE5()
8440 if (group_leader->cpu != event->cpu) SYSCALL_DEFINE5()
8455 err = perf_event_set_output(event, output_event); SYSCALL_DEFINE5()
8460 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, SYSCALL_DEFINE5()
8474 if (!perf_event_validate_size(event)) { SYSCALL_DEFINE5()
8481 * because we need to serialize with concurrent event creation. SYSCALL_DEFINE5()
8483 if (!exclusive_event_installable(event, ctx)) { SYSCALL_DEFINE5()
8536 * event. What we want here is event in the initial SYSCALL_DEFINE5()
8554 * perf_install_in_context() which is the point the event is active and SYSCALL_DEFINE5()
8557 perf_event__header_size(event); SYSCALL_DEFINE5()
8558 perf_event__id_header_size(event); SYSCALL_DEFINE5()
8560 perf_install_in_context(ctx, event, event->cpu); SYSCALL_DEFINE5()
8574 event->owner = current; SYSCALL_DEFINE5()
8577 list_add_tail(&event->owner_entry, &current->perf_event_list); SYSCALL_DEFINE5()
8582 * new event on the sibling_list. This ensures destruction SYSCALL_DEFINE5()
8602 * and that will take care of freeing the event. SYSCALL_DEFINE5()
8605 free_event(event); SYSCALL_DEFINE5()
8635 struct perf_event *event; perf_event_create_kernel_counter() local
8642 event = perf_event_alloc(attr, cpu, task, NULL, NULL, perf_event_create_kernel_counter()
8644 if (IS_ERR(event)) { perf_event_create_kernel_counter()
8645 err = PTR_ERR(event); perf_event_create_kernel_counter()
8650 event->owner = EVENT_OWNER_KERNEL; perf_event_create_kernel_counter()
8652 ctx = find_get_context(event->pmu, task, event); perf_event_create_kernel_counter()
8660 if (!exclusive_event_installable(event, ctx)) { perf_event_create_kernel_counter()
8668 perf_install_in_context(ctx, event, cpu); perf_event_create_kernel_counter()
8672 return event; perf_event_create_kernel_counter()
8675 free_event(event); perf_event_create_kernel_counter()
8685 struct perf_event *event, *tmp; perf_pmu_migrate_context() local
8696 list_for_each_entry_safe(event, tmp, &src_ctx->event_list, perf_pmu_migrate_context()
8698 perf_remove_from_context(event, false); perf_pmu_migrate_context()
8699 unaccount_event_cpu(event, src_cpu); perf_pmu_migrate_context()
8701 list_add(&event->migrate_entry, &events); perf_pmu_migrate_context()
8717 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { perf_pmu_migrate_context()
8718 if (event->group_leader == event) perf_pmu_migrate_context()
8721 list_del(&event->migrate_entry); perf_pmu_migrate_context()
8722 if (event->state >= PERF_EVENT_STATE_OFF) perf_pmu_migrate_context()
8723 event->state = PERF_EVENT_STATE_INACTIVE; perf_pmu_migrate_context()
8724 account_event_cpu(event, dst_cpu); perf_pmu_migrate_context()
8725 perf_install_in_context(dst_ctx, event, dst_cpu); perf_pmu_migrate_context()
8733 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { perf_pmu_migrate_context()
8734 list_del(&event->migrate_entry); perf_pmu_migrate_context()
8735 if (event->state >= PERF_EVENT_STATE_OFF) perf_pmu_migrate_context()
8736 event->state = PERF_EVENT_STATE_INACTIVE; perf_pmu_migrate_context()
8737 account_event_cpu(event, dst_cpu); perf_pmu_migrate_context()
8738 perf_install_in_context(dst_ctx, event, dst_cpu); perf_pmu_migrate_context()
8767 * Remove this event from the parent's list sync_child_event()
8776 * lost one event. sync_child_event()
8781 * Release the parent event, if this was the last sync_child_event()
8887 * When a child task exits, feed back event values to parent events.
8894 struct perf_event *event, *tmp; perf_event_exit_task() local
8898 list_for_each_entry_safe(event, tmp, &child->perf_event_list, perf_event_exit_task()
8900 list_del_init(&event->owner_entry); perf_event_exit_task()
8908 event->owner = NULL; perf_event_exit_task()
8924 static void perf_free_event(struct perf_event *event, perf_free_event() argument
8927 struct perf_event *parent = event->parent; perf_free_event()
8933 list_del_init(&event->child_list); perf_free_event()
8939 perf_group_detach(event); perf_free_event()
8940 list_del_event(event, ctx); perf_free_event()
8942 free_event(event); perf_free_event()
8955 struct perf_event *event, *tmp; perf_event_free_task() local
8965 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, for_each_task_context_nr()
8967 perf_free_event(event, ctx); for_each_task_context_nr()
8969 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, for_each_task_context_nr()
8971 perf_free_event(event, ctx); for_each_task_context_nr()
8995 struct perf_event *event; perf_event_get() local
9001 event = f.file->private_data; perf_event_get()
9002 atomic_long_inc(&event->refcount); perf_event_get()
9005 return event; perf_event_get()
9008 const struct perf_event_attr *perf_event_attrs(struct perf_event *event) perf_event_attrs() argument
9010 if (!event) perf_event_attrs()
9013 return &event->attr; perf_event_attrs()
9017 * inherit a event from parent task to child task:
9057 * Make the child state follow the state of the parent event, inherit_event()
9095 * Link this into the parent event's child list inherit_event()
9129 inherit_task_group(struct perf_event *event, struct task_struct *parent, inherit_task_group() argument
9137 if (!event->attr.inherit) { inherit_task_group()
9158 ret = inherit_group(event, parent, parent_ctx, inherit_task_group()
9174 struct perf_event *event; perf_event_init_context() local
9208 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) { perf_event_init_context()
9209 ret = inherit_task_group(event, parent, parent_ctx, perf_event_init_context()
9224 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) { perf_event_init_context()
9225 ret = inherit_task_group(event, parent, parent_ctx, perf_event_init_context()
9320 list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry) __perf_event_exit_context()
2143 perf_install_in_context(struct perf_event_context *ctx, struct perf_event *event, int cpu) perf_install_in_context() argument
3442 find_get_context(struct pmu *pmu, struct task_struct *task, struct perf_event *event) find_get_context() argument
4426 arch_perf_update_userpage( struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now) arch_perf_update_userpage() argument
5160 __perf_event_header__init_id(struct perf_event_header *header, struct perf_sample_data *data, struct perf_event *event) __perf_event_header__init_id() argument
5190 perf_event_header__init_id(struct perf_event_header *header, struct perf_sample_data *data, struct perf_event *event) perf_event_header__init_id() argument
5230 perf_output_read_one(struct perf_output_handle *handle, struct perf_event *event, u64 enabled, u64 running) perf_output_read_one() argument
5256 perf_output_read_group(struct perf_output_handle *handle, struct perf_event *event, u64 enabled, u64 running) perf_output_read_group() argument
5300 perf_output_read(struct perf_output_handle *handle, struct perf_event *event) perf_output_read() argument
5324 perf_output_sample(struct perf_output_handle *handle, struct perf_event_header *header, struct perf_sample_data *data, struct perf_event *event) perf_output_sample() argument
5484 perf_prepare_sample(struct perf_event_header *header, struct perf_sample_data *data, struct perf_event *event, struct pt_regs *regs) perf_prepare_sample() argument
/linux-4.4.14/fs/notify/
H A Dnotification.c22 * the event happened. When inotify gets an event it will need to add that
23 * event to the group notify queue. Since a single event might need to be on
24 * multiple group's notification queues we can't add the event directly to each
26 * has a pointer back to the original event. Since the majority of events are
28 * event_holder into each event. This means we have a single allocation instead
71 struct fsnotify_event *event) fsnotify_destroy_event()
74 if (!event || event->mask == FS_Q_OVERFLOW) fsnotify_destroy_event()
76 /* If the event is still queued, we have a problem... */ fsnotify_destroy_event()
77 WARN_ON(!list_empty(&event->list)); fsnotify_destroy_event()
78 group->ops->free_event(event); fsnotify_destroy_event()
82 * Add an event to the group notification queue. The group can later pull this
83 * event off the queue to deal with. The function returns 0 if the event was
84 * added to the queue, 1 if the event was merged with some other queued event,
88 struct fsnotify_event *event, fsnotify_add_event()
95 pr_debug("%s: group=%p event=%p\n", __func__, group, event); fsnotify_add_event()
101 /* Queue overflow event only if it isn't already queued */ fsnotify_add_event()
106 event = group->overflow_event; fsnotify_add_event()
111 ret = merge(list, event); fsnotify_add_event()
120 list_add_tail(&event->list, list); fsnotify_add_event()
129 * Remove @event from group's notification queue. It is the responsibility of
130 * the caller to destroy the event.
133 struct fsnotify_event *event) fsnotify_remove_event()
136 if (!list_empty(&event->list)) { fsnotify_remove_event()
137 list_del_init(&event->list); fsnotify_remove_event()
144 * Remove and return the first event from the notification list. It is the
145 * responsibility of the caller to destroy the obtained event
149 struct fsnotify_event *event; fsnotify_remove_first_event() local
155 event = list_first_entry(&group->notification_list, fsnotify_remove_first_event()
158 * We need to init list head for the case of overflow event so that fsnotify_remove_first_event()
161 list_del_init(&event->list); fsnotify_remove_first_event()
164 return event; fsnotify_remove_first_event()
168 * This will not remove the event, that must be done with
181 * event notifications.
185 struct fsnotify_event *event; fsnotify_flush_notify() local
189 event = fsnotify_remove_first_event(group); fsnotify_flush_notify()
190 fsnotify_destroy_event(group, event); fsnotify_flush_notify()
196 * fsnotify_create_event - Allocate a new event which will be sent to each
198 * particular event.
200 * @inode the inode which is supposed to receive the event (sometimes a
201 * parent of the inode to which the event happened.
207 void fsnotify_init_event(struct fsnotify_event *event, struct inode *inode, fsnotify_init_event() argument
210 INIT_LIST_HEAD(&event->list); fsnotify_init_event()
211 event->inode = inode; fsnotify_init_event()
212 event->mask = mask; fsnotify_init_event()
70 fsnotify_destroy_event(struct fsnotify_group *group, struct fsnotify_event *event) fsnotify_destroy_event() argument
87 fsnotify_add_event(struct fsnotify_group *group, struct fsnotify_event *event, int (*merge)(struct list_head *, struct fsnotify_event *)) fsnotify_add_event() argument
132 fsnotify_remove_event(struct fsnotify_group *group, struct fsnotify_event *event) fsnotify_remove_event() argument
/linux-4.4.14/include/linux/
H A Di2c-pxa.h12 void (*event)(void *ptr, i2c_slave_event_t event); member in struct:i2c_slave_client
H A Ddevfreq-event.h2 * devfreq-event: a framework to provide raw data and events of devfreq devices
18 * struct devfreq_event_dev - the devfreq-event device
20 * @node : Contain the devfreq-event device that have been registered.
21 * @dev : the device registered by devfreq-event class. dev.parent is
22 * the device using devfreq-event.
23 * @lock : a mutex to protect accessing devfreq-event.
25 * @desc : the description for devfreq-event device.
27 * This structure contains devfreq-event device information.
40 * struct devfreq_event_data - the devfreq-event data
42 * @load_count : load count of devfreq-event device for the given period.
43 * @total_count : total count of devfreq-event device for the given period.
48 * This structure contains the data of devfreq-event device for polling period.
56 * struct devfreq_event_ops - the operations of devfreq-event device
58 * @enable : Enable the devfreq-event device.
59 * @disable : Disable the devfreq-event device.
60 * @reset : Reset all setting of the devfreq-event device.
61 * @set_event : Set the specific event type for the devfreq-event device.
62 * @get_event : Get the result of the devfreq-event devie with specific
63 * event type.
65 * This structure contains devfreq-event device operations which can be
66 * implemented by devfreq-event device drivers.
81 * struct devfreq_event_desc - the descriptor of devfreq-event device
83 * @name : the name of devfreq-event device.
84 * @driver_data : the private data for devfreq-event driver.
85 * @ops : the operation to control devfreq-event device.
87 * Each devfreq-event device is described with a this structure.
88 * This structure contains the various data for devfreq-event device.
H A Dtrace_events.h56 struct trace_event *event);
119 int flags, struct trace_event *event);
135 extern int register_trace_event(struct trace_event *event);
136 extern int unregister_trace_event(struct trace_event *event);
173 struct ring_buffer_event *event,
177 struct ring_buffer_event *event,
181 struct ring_buffer_event *event);
210 int (*reg)(struct trace_event_call *event,
218 extern int trace_event_reg(struct trace_event_call *event,
223 struct ring_buffer_event *event; member in struct:trace_event_buffer
250 * FILTERED - The event has a filter attached
254 * WAS_ENABLED - Set and stays set when an event was ever enabled
255 * (used for module unloading, if a module event is enabled,
284 struct trace_event event; member in struct:trace_event_call
293 * bit 3: trace internal event (do not enable)
336 * ENABLED - The event is enabled
338 * FILTERED - The event has a filter attached
340 * SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED
341 * SOFT_DISABLED - When set, do not trace the event (even though its
343 * TRIGGER_MODE - When set, invoke the triggers associated with the event
345 * PID_FILTER - When set, the event is filtered based on pid
426 struct ring_buffer_event *event);
429 struct ring_buffer_event *event);
439 * @file: The file pointer of the event to test
441 * If any triggers without filters are attached to this event, they
442 * will be called here. If the event is soft disabled and has no
464 * If there are event triggers attached to this event that requires
466 * entry already holds the field information of the current event.
468 * It also checks if the event should be discarded or not.
469 * It is to be discarded if the event is soft disabled and the
470 * event was only recorded to process triggers, or if the event
471 * filter is active and this event did not match the filters.
473 * Returns true if the event is discarded, false otherwise.
478 struct ring_buffer_event *event, __event_trigger_test_discard()
488 ring_buffer_discard_commit(buffer, event); __event_trigger_test_discard()
489 else if (!filter_check_discard(file, entry, buffer, event)) __event_trigger_test_discard()
496 * event_trigger_unlock_commit - handle triggers and finish event commit
497 * @file: The file pointer assoctiated to the event
498 * @buffer: The ring buffer that the event is being written to
499 * @event: The event meta data in the ring buffer
500 * @entry: The event itself
501 * @irq_flags: The state of the interrupts at the start of the event
502 * @pc: The state of the preempt count at the start of the event.
505 * from the event itself. It also tests the event against filters and
506 * if the event is soft disabled and should be discarded.
511 struct ring_buffer_event *event, event_trigger_unlock_commit()
516 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) event_trigger_unlock_commit()
517 trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc); event_trigger_unlock_commit()
524 * event_trigger_unlock_commit_regs - handle triggers and finish event commit
525 * @file: The file pointer assoctiated to the event
526 * @buffer: The ring buffer that the event is being written to
527 * @event: The event meta data in the ring buffer
528 * @entry: The event itself
529 * @irq_flags: The state of the interrupts at the start of the event
530 * @pc: The state of the preempt count at the start of the event.
533 * from the event itself. It also tests the event against filters and
534 * if the event is soft disabled and should be discarded.
542 struct ring_buffer_event *event, event_trigger_unlock_commit_regs()
548 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) event_trigger_unlock_commit_regs()
549 trace_buffer_unlock_commit_regs(file->tr, buffer, event, event_trigger_unlock_commit_regs()
584 int trace_set_clr_event(const char *system, const char *event, int set);
610 extern int perf_trace_init(struct perf_event *event);
611 extern void perf_trace_destroy(struct perf_event *event);
612 extern int perf_trace_add(struct perf_event *event, int flags);
613 extern void perf_trace_del(struct perf_event *event, int flags);
614 extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
616 extern void ftrace_profile_free_filter(struct perf_event *event);
476 __event_trigger_test_discard(struct trace_event_file *file, struct ring_buffer *buffer, struct ring_buffer_event *event, void *entry, enum event_trigger_type *tt) __event_trigger_test_discard() argument
509 event_trigger_unlock_commit(struct trace_event_file *file, struct ring_buffer *buffer, struct ring_buffer_event *event, void *entry, unsigned long irq_flags, int pc) event_trigger_unlock_commit() argument
540 event_trigger_unlock_commit_regs(struct trace_event_file *file, struct ring_buffer *buffer, struct ring_buffer_event *event, void *entry, unsigned long irq_flags, int pc, struct pt_regs *regs) event_trigger_unlock_commit_regs() argument
H A Dperf_event.h86 * extra PMU register associated with an event
96 * struct hw_perf_event - performance event hardware details:
136 * creation and event initalization.
144 * If the event is a per task event, this will point to the task in
153 #define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */
182 * State for throttling the event, see __perf_event_overflow() and
202 #define PERF_PMU_TXN_ADD 0x1 /* txn to add/schedule event on PMU */
203 #define PERF_PMU_TXN_READ 0x2 /* txn to read event group from PMU */
246 * Try and initialize the event for this PMU.
249 * -ENOENT -- @event is not for this PMU
251 * -ENODEV -- @event is for this PMU but PMU not present
252 * -EBUSY -- @event is for this PMU but PMU temporarily unavailable
253 * -EINVAL -- @event is for this PMU but @event is not valid
254 * -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported
255 * -EACCESS -- @event is for this PMU, @event is valid, but no privilidges
257 * 0 -- @event is for this PMU and valid
261 int (*event_init) (struct perf_event *event);
264 * Notification that the event was mapped or unmapped. Called
267 void (*event_mapped) (struct perf_event *event); /*optional*/
268 void (*event_unmapped) (struct perf_event *event); /*optional*/
283 * to service the event, this includes any counter constraint
286 * Called with IRQs disabled and the PMU disabled on the CPU the event
292 * ->del() must always PERF_EF_UPDATE stop an event. If it calls
296 int (*add) (struct perf_event *event, int flags);
297 void (*del) (struct perf_event *event, int flags);
307 * Called with IRQs disabled and the PMU disabled on the CPU the event
317 void (*start) (struct perf_event *event, int flags);
318 void (*stop) (struct perf_event *event, int flags);
321 * Updates the counter value of the event.
326 void (*read) (struct perf_event *event);
357 * Will return the value for perf_event_mmap_page::index for this event,
358 * if no implementation is provided it will default to: event->hw.idx + 1.
360 int (*event_idx) (struct perf_event *event); /*optional */
376 u64 (*count) (struct perf_event *event); /*optional*/
393 int (*filter_match) (struct perf_event *event); /* optional */
397 * enum perf_event_active_state - the states of a event
435 * struct perf_event - performance event kernel representation:
477 * These are the total time in nanoseconds that the event
479 * been scheduled in, if this is a per-task event)
483 * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
490 * and total_time_running when the event is in INACTIVE or
493 * tstamp_enabled: the notional time when the event was enabled
494 * tstamp_running: the notional time when the event was scheduled on
496 * event was scheduled off.
505 * context time as it was when the event was last scheduled in.
581 struct perf_cgroup *cgrp; /* cgroup event is attach to */
589 * struct perf_event_context - event context structure
643 * Number of contexts where an event can trigger:
649 * struct perf_event_cpu_context - per cpu event context structure
667 struct perf_event *event; member in struct:perf_output_handle
697 * if there is no cgroup event for the current CPU context.
712 struct perf_event *event);
733 extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
741 extern int perf_event_refresh(struct perf_event *event, int refresh);
742 extern void perf_event_update_userpage(struct perf_event *event);
743 extern int perf_event_release_kernel(struct perf_event *event);
752 extern u64 perf_event_read_local(struct perf_event *event);
753 extern u64 perf_event_read_value(struct perf_event *event,
823 struct perf_event *event);
826 struct perf_event *event,
829 extern int perf_event_overflow(struct perf_event *event,
833 extern void perf_event_output(struct perf_event *event,
840 struct perf_event *event);
842 perf_event__output_id_sample(struct perf_event *event,
847 perf_log_lost_samples(struct perf_event *event, u64 lost);
849 static inline bool is_sampling_event(struct perf_event *event) is_sampling_event() argument
851 return event->attr.sample_period != 0; is_sampling_event()
855 * Return 1 for a software event, 0 for a hardware event
857 static inline int is_software_event(struct perf_event *event) is_software_event() argument
859 return event->pmu->task_ctx_nr == perf_sw_context; is_software_event()
951 static inline u64 __perf_event_count(struct perf_event *event) __perf_event_count() argument
953 return local64_read(&event->count) + atomic64_read(&event->child_count); __perf_event_count()
1012 extern void perf_bp_event(struct perf_event *event, void *data);
1020 static inline bool has_branch_stack(struct perf_event *event) has_branch_stack() argument
1022 return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK; has_branch_stack()
1025 static inline bool needs_branch_stack(struct perf_event *event) needs_branch_stack() argument
1027 return event->attr.branch_sample_type != 0; needs_branch_stack()
1030 static inline bool has_aux(struct perf_event *event) has_aux() argument
1032 return event->pmu->setup_aux; has_aux()
1036 struct perf_event *event, unsigned int size);
1044 extern u64 perf_swevent_set_period(struct perf_event *event);
1045 extern void perf_event_enable(struct perf_event *event);
1046 extern void perf_event_disable(struct perf_event *event);
1052 struct perf_event *event) { return NULL; }
1074 static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event) perf_event_attrs() argument
1078 static inline u64 perf_event_read_local(struct perf_event *event) { return -EINVAL; } perf_event_print_debug() argument
1082 static inline int perf_event_refresh(struct perf_event *event, int refresh) perf_event_refresh() argument
1092 perf_bp_event(struct perf_event *event, void *data) { } perf_bp_event() argument
1106 static inline u64 perf_swevent_set_period(struct perf_event *event) { return 0; } perf_event_enable() argument
1107 static inline void perf_event_enable(struct perf_event *event) { } perf_event_disable() argument
1108 static inline void perf_event_disable(struct perf_event *event) { } __perf_event_disable() argument
1111 static inline int perf_event_release_kernel(struct perf_event *event) { return 0; } perf_event_release_kernel() argument
1051 perf_aux_output_begin(struct perf_output_handle *handle, struct perf_event *event) perf_aux_output_begin() argument
H A Dtca6416_keypad.h20 int code; /* input event code (KEY_*, SW_*) */
22 int type; /* input event type (EV_KEY, EV_SW) */
/linux-4.4.14/tools/lib/traceevent/
H A Dplugin_hrtimer.c25 #include "event-parse.h"
29 struct event_format *event, void *context) timer_expire_handler()
33 if (pevent_print_num_field(s, "0x%llx", event, "timer", timer_expire_handler()
35 pevent_print_num_field(s, "0x%llx", event, "hrtimer", timer_expire_handler()
40 pevent_print_num_field(s, "%llu", event, "now", record, 1); timer_expire_handler()
42 pevent_print_func_field(s, " function=%s", event, "function", timer_expire_handler()
49 struct event_format *event, void *context) timer_start_handler()
53 if (pevent_print_num_field(s, "0x%llx", event, "timer", timer_start_handler()
55 pevent_print_num_field(s, "0x%llx", event, "hrtimer", timer_start_handler()
58 pevent_print_func_field(s, " function=%s", event, "function", timer_start_handler()
62 pevent_print_num_field(s, "%llu", event, "expires", record, 1); timer_start_handler()
65 pevent_print_num_field(s, "%llu", event, "softexpires", record, 1); timer_start_handler()
27 timer_expire_handler(struct trace_seq *s, struct pevent_record *record, struct event_format *event, void *context) timer_expire_handler() argument
47 timer_start_handler(struct trace_seq *s, struct pevent_record *record, struct event_format *event, void *context) timer_start_handler() argument
H A Dplugin_sched_switch.c24 #include "event-parse.h"
64 pevent_register_comm(field->event->pevent, comm, pid); write_and_save_comm()
69 struct event_format *event, void *context) sched_wakeup_handler()
74 if (pevent_get_field_val(s, event, "pid", record, &val, 1)) sched_wakeup_handler()
77 field = pevent_find_any_field(event, "comm"); sched_wakeup_handler()
84 if (pevent_get_field_val(s, event, "prio", record, &val, 0) == 0) sched_wakeup_handler()
87 if (pevent_get_field_val(s, event, "success", record, &val, 1) == 0) sched_wakeup_handler()
90 if (pevent_get_field_val(s, event, "target_cpu", record, &val, 0) == 0) sched_wakeup_handler()
98 struct event_format *event, void *context) sched_switch_handler()
103 if (pevent_get_field_val(s, event, "prev_pid", record, &val, 1)) sched_switch_handler()
106 field = pevent_find_any_field(event, "prev_comm"); sched_switch_handler()
113 if (pevent_get_field_val(s, event, "prev_prio", record, &val, 0) == 0) sched_switch_handler()
116 if (pevent_get_field_val(s, event, "prev_state", record, &val, 0) == 0) sched_switch_handler()
121 if (pevent_get_field_val(s, event, "next_pid", record, &val, 1)) sched_switch_handler()
124 field = pevent_find_any_field(event, "next_comm"); sched_switch_handler()
131 if (pevent_get_field_val(s, event, "next_prio", record, &val, 0) == 0) sched_switch_handler()
67 sched_wakeup_handler(struct trace_seq *s, struct pevent_record *record, struct event_format *event, void *context) sched_wakeup_handler() argument
96 sched_switch_handler(struct trace_seq *s, struct pevent_record *record, struct event_format *event, void *context) sched_switch_handler() argument
H A Devent-parse.c36 #include "event-parse.h"
37 #include "event-utils.h"
54 #define do_warning_event(event, fmt, ...) \
59 if (event) \
60 warning("[%s:%s] " fmt, event->system, \
61 event->name, ##__VA_ARGS__); \
108 struct event_format *event, struct print_arg *arg);
756 static int add_event(struct pevent *pevent, struct event_format *event) add_event() argument
759 struct event_format **events = realloc(pevent->events, sizeof(event) * add_event()
767 if (pevent->events[i]->id > event->id) add_event()
773 sizeof(event) * (pevent->nr_events - i)); add_event()
775 pevent->events[i] = event; add_event()
778 event->pevent = pevent; add_event()
1366 static int event_read_fields(struct event_format *event, struct format_field **fields) event_read_fields() argument
1394 if (event->flags & EVENT_FL_ISFTRACE && event_read_fields()
1413 field->event = event; event_read_fields()
1424 (event->flags & EVENT_FL_ISFTRACE && event_read_fields()
1453 do_warning_event(event, "%s: no type found", __func__); event_read_fields()
1500 do_warning_event(event, "failed to find token"); event_read_fields()
1629 field->elementsize = event->pevent ? event_read_fields()
1630 event->pevent->long_size : event_read_fields()
1653 static int event_read_format(struct event_format *event) event_read_format() argument
1668 ret = event_read_fields(event, &event->format.common_fields); event_read_format()
1671 event->format.nr_common = ret; event_read_format()
1673 ret = event_read_fields(event, &event->format.fields); event_read_format()
1676 event->format.nr_fields = ret; event_read_format()
1686 process_arg_token(struct event_format *event, struct print_arg *arg,
1690 process_arg(struct event_format *event, struct print_arg *arg, char **tok) process_arg() argument
1698 return process_arg_token(event, arg, tok, type); process_arg()
1702 process_op(struct event_format *event, struct print_arg *arg, char **tok);
1709 process_field_arg(struct event_format *event, struct print_arg *arg, char **tok) process_field_arg() argument
1713 type = process_arg(event, arg, tok); process_field_arg()
1716 type = process_op(event, arg, tok); process_field_arg()
1723 process_cond(struct event_format *event, struct print_arg *top, char **tok) process_cond() argument
1734 do_warning_event(event, "%s: not enough memory!", __func__); process_cond()
1746 type = process_arg(event, left, &token); process_cond()
1754 type = process_op(event, left, &token); process_cond()
1763 type = process_arg(event, right, &token); process_cond()
1779 process_array(struct event_format *event, struct print_arg *top, char **tok) process_array() argument
1787 do_warning_event(event, "%s: not enough memory!", __func__); process_array()
1794 type = process_arg(event, arg, &token); process_array()
1881 process_op(struct event_format *event, struct print_arg *arg, char **tok) process_op() argument
1893 do_warning_event(event, "bad op token %s", token); process_op()
1903 do_warning_event(event, "bad op token %s", token); process_op()
1924 type = process_arg(event, right, tok); process_op()
1941 type = process_cond(event, arg, tok); process_op()
1974 event->flags |= EVENT_FL_FAILED; process_op()
1989 do_warning_event(event, "bad pointer type"); process_op()
2010 type = process_arg_token(event, right, tok, type); process_op()
2049 type = process_array(event, arg, tok); process_op()
2052 do_warning_event(event, "unknown op '%s'", token); process_op()
2053 event->flags |= EVENT_FL_FAILED; process_op()
2065 return process_op(event, arg, tok); process_op()
2067 return process_op(event, right, tok); process_op()
2073 do_warning_event(event, "%s: not enough memory!", __func__); process_op()
2081 process_entry(struct event_format *event __maybe_unused, struct print_arg *arg, process_entry()
2099 arg->field.field = pevent_find_any_field(event, arg->field.name); process_entry()
2103 arg->field.field = pevent_find_any_field(event, arg->field.name); process_entry()
2120 static int alloc_and_process_delim(struct event_format *event, char *next_token, alloc_and_process_delim() argument
2130 do_warning_event(event, "%s: not enough memory!", __func__); alloc_and_process_delim()
2135 type = process_arg(event, field, &token); alloc_and_process_delim()
2449 process_fields(struct event_format *event, struct print_flag_sym **list, char **tok) process_fields() argument
2468 type = process_arg(event, arg, &token); process_fields()
2471 type = process_op(event, arg, &token); process_fields()
2496 type = process_arg(event, arg, &token); process_fields()
2530 process_flags(struct event_format *event, struct print_arg *arg, char **tok) process_flags() argument
2541 do_warning_event(event, "%s: not enough memory!", __func__); process_flags()
2545 type = process_field_arg(event, field, &token); process_flags()
2549 type = process_op(event, field, &token); process_flags()
2566 type = process_fields(event, &arg->flags.flags, &token); process_flags()
2583 process_symbols(struct event_format *event, struct print_arg *arg, char **tok) process_symbols() argument
2594 do_warning_event(event, "%s: not enough memory!", __func__); process_symbols()
2598 type = process_field_arg(event, field, &token); process_symbols()
2605 type = process_fields(event, &arg->symbol.symbols, &token); process_symbols()
2622 process_hex(struct event_format *event, struct print_arg *arg, char **tok) process_hex() argument
2627 if (alloc_and_process_delim(event, ",", &arg->hex.field)) process_hex()
2630 if (alloc_and_process_delim(event, ")", &arg->hex.size)) process_hex()
2643 process_int_array(struct event_format *event, struct print_arg *arg, char **tok) process_int_array() argument
2648 if (alloc_and_process_delim(event, ",", &arg->int_array.field)) process_int_array()
2651 if (alloc_and_process_delim(event, ",", &arg->int_array.count)) process_int_array()
2654 if (alloc_and_process_delim(event, ")", &arg->int_array.el_size)) process_int_array()
2669 process_dynamic_array(struct event_format *event, struct print_arg *arg, char **tok) process_dynamic_array() argument
2689 field = pevent_find_field(event, token); process_dynamic_array()
2708 do_warning_event(event, "%s: not enough memory!", __func__); process_dynamic_array()
2713 type = process_arg(event, arg, &token); process_dynamic_array()
2733 process_dynamic_array_len(struct event_format *event, struct print_arg *arg, process_dynamic_array_len() argument
2746 field = pevent_find_field(event, token); process_dynamic_array_len()
2769 process_paren(struct event_format *event, struct print_arg *arg, char **tok) process_paren() argument
2775 type = process_arg(event, arg, &token); process_paren()
2781 type = process_op(event, arg, &token); process_paren()
2803 do_warning_event(event, "previous needed to be PRINT_ATOM"); process_paren()
2809 do_warning_event(event, "%s: not enough memory!", process_paren()
2817 type = process_arg_token(event, item_arg, &token, type); process_paren()
2832 process_str(struct event_format *event __maybe_unused, struct print_arg *arg, process_str()
2861 process_bitmask(struct event_format *event __maybe_unused, struct print_arg *arg, process_bitmask()
2922 process_func_handler(struct event_format *event, struct pevent_function_handler *func, process_func_handler() argument
2940 do_warning_event(event, "%s: not enough memory!", process_func_handler()
2945 type = process_arg(event, farg, &token); process_func_handler()
2948 do_warning_event(event, process_func_handler()
2949 "Error: function '%s()' expects %d arguments but event %s only uses %d", process_func_handler()
2951 event->name, i + 1); process_func_handler()
2956 do_warning_event(event, process_func_handler()
2957 "Error: function '%s()' only expects %d arguments but event %s has more", process_func_handler()
2958 func->name, func->nr_args, event->name); process_func_handler()
2980 process_function(struct event_format *event, struct print_arg *arg, process_function() argument
2988 return process_flags(event, arg, tok); process_function()
2993 return process_symbols(event, arg, tok); process_function()
2997 return process_hex(event, arg, tok); process_function()
3001 return process_int_array(event, arg, tok); process_function()
3005 return process_str(event, arg, tok); process_function()
3009 return process_bitmask(event, arg, tok); process_function()
3013 return process_dynamic_array(event, arg, tok); process_function()
3017 return process_dynamic_array_len(event, arg, tok); process_function()
3020 func = find_func_handler(event->pevent, token); process_function()
3023 return process_func_handler(event, func, arg, tok); process_function()
3026 do_warning_event(event, "function %s not defined", token); process_function()
3032 process_arg_token(struct event_format *event, struct print_arg *arg, process_arg_token() argument
3044 type = process_entry(event, arg, &token); process_arg_token()
3059 type = process_function(event, arg, atom, &token); process_arg_token()
3093 type = process_paren(event, arg, &token); process_arg_token()
3101 type = process_op(event, arg, &token); process_arg_token()
3112 do_warning_event(event, "unexpected type %d", type); process_arg_token()
3120 static int event_read_print_args(struct event_format *event, struct print_arg **list) event_read_print_args() argument
3135 do_warning_event(event, "%s: not enough memory!", event_read_print_args()
3140 type = process_arg(event, arg, &token); event_read_print_args()
3152 type = process_op(event, arg, &token); event_read_print_args()
3178 static int event_read_print(struct event_format *event) event_read_print() argument
3197 event->print_fmt.format = token; event_read_print()
3198 event->print_fmt.args = NULL; event_read_print()
3210 if (asprintf(&cat, "%s%s", event->print_fmt.format, token) < 0) event_read_print()
3213 free_token(event->print_fmt.format); event_read_print()
3214 event->print_fmt.format = NULL; event_read_print()
3224 ret = event_read_print_args(event, &event->print_fmt.args); event_read_print()
3236 * pevent_find_common_field - return a common field by event
3237 * @event: handle for the event
3240 * Returns a common field from the event by the given @name.
3244 pevent_find_common_field(struct event_format *event, const char *name) pevent_find_common_field() argument
3248 for (format = event->format.common_fields; pevent_find_common_field()
3259 * @event: handle for the event
3266 pevent_find_field(struct event_format *event, const char *name) pevent_find_field() argument
3270 for (format = event->format.fields; pevent_find_field()
3281 * @event: handle for the event
3289 pevent_find_any_field(struct event_format *event, const char *name) pevent_find_any_field() argument
3293 format = pevent_find_common_field(event, name); pevent_find_any_field()
3296 return pevent_find_field(event, name); pevent_find_any_field()
3347 *value = pevent_read_number(field->event->pevent, pevent_read_number_field()
3358 struct event_format *event; get_common_info() local
3363 * Pick any event to find where the type is; get_common_info()
3370 event = pevent->events[0]; get_common_info()
3371 field = pevent_find_common_field(event, type); get_common_info()
3439 * pevent_find_event - find an event by given id
3441 * @id: the id of the event
3443 * Returns an event that has a given @id.
3469 * pevent_find_event_by_name - find an event by given name
3472 * @name: the name of the event to search for
3474 * This returns an event with a given @name and under the system
3475 * @sys. If @sys is NULL the first event with @name is returned.
3481 struct event_format *event; pevent_find_event_by_name() local
3490 event = pevent->events[i]; pevent_find_event_by_name()
3491 if (strcmp(event->name, name) == 0) { pevent_find_event_by_name()
3494 if (strcmp(event->system, sys) == 0) pevent_find_event_by_name()
3499 event = NULL; pevent_find_event_by_name()
3501 pevent->last_event = event; pevent_find_event_by_name()
3502 return event; pevent_find_event_by_name()
3506 eval_num_arg(void *data, int size, struct event_format *event, struct print_arg *arg) eval_num_arg() argument
3508 struct pevent *pevent = event->pevent; eval_num_arg()
3524 arg->field.field = pevent_find_any_field(event, arg->field.name); eval_num_arg()
3539 val = eval_num_arg(data, size, event, arg->typecast.item); eval_num_arg()
3548 val = process_defined_func(&s, data, size, event, arg); eval_num_arg()
3558 right = eval_num_arg(data, size, event, arg->op.right); eval_num_arg()
3589 pevent_find_any_field(event, larg->field.name); eval_num_arg()
3608 left = eval_num_arg(data, size, event, arg->op.left); eval_num_arg()
3611 val = eval_num_arg(data, size, event, arg->op.left); eval_num_arg()
3613 val = eval_num_arg(data, size, event, arg->op.right); eval_num_arg()
3617 left = eval_num_arg(data, size, event, arg->op.left); eval_num_arg()
3618 right = eval_num_arg(data, size, event, arg->op.right); eval_num_arg()
3729 do_warning_event(event, "%s: unknown op '%s'", __func__, arg->op.op); eval_num_arg()
3733 do_warning_event(event, "%s: field %s not found", eval_num_arg()
3842 struct event_format *event, const char *format, print_str_arg()
3845 struct pevent *pevent = event->pevent; print_str_arg()
3866 field = pevent_find_any_field(event, arg->field.name); print_str_arg()
3911 do_warning_event(event, "%s: not enough memory!", print_str_arg()
3921 val = eval_num_arg(data, size, event, arg->flags.field); print_str_arg()
3939 val = eval_num_arg(data, size, event, arg->symbol.field); print_str_arg()
3959 field = pevent_find_any_field(event, str); print_str_arg()
3966 len = eval_num_arg(data, size, event, arg->hex.size); print_str_arg()
3990 field = pevent_find_any_field(event, str); print_str_arg()
3997 len = eval_num_arg(data, size, event, arg->int_array.count); print_str_arg()
3998 el_size = eval_num_arg(data, size, event, print_str_arg()
4030 f = pevent_find_any_field(event, arg->string.string); print_str_arg()
4048 f = pevent_find_any_field(event, arg->bitmask.bitmask); print_str_arg()
4064 val = eval_num_arg(data, size, event, arg->op.left); print_str_arg()
4066 print_str_arg(s, data, size, event, print_str_arg()
4069 print_str_arg(s, data, size, event, print_str_arg()
4073 process_defined_func(s, data, size, event, arg); print_str_arg()
4083 do_warning_event(event, "%s: field %s not found", print_str_arg()
4089 struct event_format *event, struct print_arg *arg) process_defined_func()
4121 args[i] = eval_num_arg(data, size, event, farg); process_defined_func()
4125 print_str_arg(&str, data, size, event, "%s", -1, farg); process_defined_func()
4129 do_warning_event(event, "%s(%d): malloc str", process_defined_func()
4137 do_warning_event(event, "%s(%d): malloc str", process_defined_func()
4150 do_warning_event(event, "Unexpected end of arguments\n"); process_defined_func()
4184 static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struct event_format *event) make_bprint_args() argument
4186 struct pevent *pevent = event->pevent; make_bprint_args()
4198 field = pevent_find_field(event, "buf"); make_bprint_args()
4200 do_warning_event(event, "can't find buffer field for binary printk"); make_bprint_args()
4203 ip_field = pevent_find_field(event, "ip"); make_bprint_args()
4205 do_warning_event(event, "can't find ip field for binary printk"); make_bprint_args()
4219 do_warning_event(event, "%s(%d): not enough memory!", make_bprint_args()
4290 do_warning_event(event, "%s(%d): not enough memory!", make_bprint_args()
4313 do_warning_event(event, "%s(%d): not enough memory!", make_bprint_args()
4340 struct event_format *event) get_bprint_format()
4342 struct pevent *pevent = event->pevent; get_bprint_format()
4351 field = pevent_find_field(event, "fmt"); get_bprint_format()
4353 do_warning_event(event, "can't find format field for binary printk"); get_bprint_format()
4375 struct event_format *event, struct print_arg *arg) print_mac_arg()
4381 process_defined_func(s, data, size, event, arg); print_mac_arg()
4395 pevent_find_any_field(event, arg->field.name); print_mac_arg()
4397 do_warning_event(event, "%s: field %s not found", print_mac_arg()
4528 void *data, int size, struct event_format *event, print_ipv4_arg()
4534 process_defined_func(s, data, size, event, arg); print_ipv4_arg()
4545 pevent_find_any_field(event, arg->field.name); print_ipv4_arg()
4565 void *data, int size, struct event_format *event, print_ipv6_arg()
4580 process_defined_func(s, data, size, event, arg); print_ipv6_arg()
4591 pevent_find_any_field(event, arg->field.name); print_ipv6_arg()
4615 void *data, int size, struct event_format *event, print_ipsa_arg()
4638 process_defined_func(s, data, size, event, arg); print_ipsa_arg()
4649 pevent_find_any_field(event, arg->field.name); print_ipsa_arg()
4697 void *data, int size, struct event_format *event, print_ip_arg()
4713 rc += print_ipv4_arg(s, ptr, i, data, size, event, arg); print_ip_arg()
4716 rc += print_ipv6_arg(s, ptr, i, data, size, event, arg); print_ip_arg()
4719 rc += print_ipsa_arg(s, ptr, i, data, size, event, arg); print_ip_arg()
4740 struct event_format *event) print_event_fields()
4746 field = event->format.fields; print_event_fields()
4753 val = pevent_read_number(event->pevent, data + offset, len); print_event_fields()
4773 val = pevent_read_number(event->pevent, data + field->offset, print_event_fields()
4809 static void pretty_print(struct trace_seq *s, void *data, int size, struct event_format *event) pretty_print() argument
4811 struct pevent *pevent = event->pevent; pretty_print()
4812 struct print_fmt *print_fmt = &event->print_fmt; pretty_print()
4828 if (event->flags & EVENT_FL_FAILED) { pretty_print()
4830 print_event_fields(s, data, size, event); pretty_print()
4834 if (event->flags & EVENT_FL_ISBPRINT) { pretty_print()
4835 bprint_fmt = get_bprint_format(data, size, event); pretty_print()
4836 args = make_bprint_args(bprint_fmt, data, size, event); pretty_print()
4888 do_warning_event(event, "no argument match"); pretty_print()
4889 event->flags |= EVENT_FL_FAILED; pretty_print()
4892 len_arg = eval_num_arg(data, size, event, arg); pretty_print()
4913 print_mac_arg(s, *(ptr+1), data, size, event, arg); pretty_print()
4920 n = print_ip_arg(s, ptr+1, data, size, event, arg); pretty_print()
4935 do_warning_event(event, "no argument match"); pretty_print()
4936 event->flags |= EVENT_FL_FAILED; pretty_print()
4945 do_warning_event(event, "bad format!"); pretty_print()
4946 event->flags |= EVENT_FL_FAILED; pretty_print()
4953 val = eval_num_arg(data, size, event, arg); pretty_print()
5011 do_warning_event(event, "bad count (%d)", ls); pretty_print()
5012 event->flags |= EVENT_FL_FAILED; pretty_print()
5017 do_warning_event(event, "no matching argument"); pretty_print()
5018 event->flags |= EVENT_FL_FAILED; pretty_print()
5027 do_warning_event(event, "bad format!"); pretty_print()
5028 event->flags |= EVENT_FL_FAILED; pretty_print()
5038 print_str_arg(&p, data, size, event, pretty_print()
5053 if (event->flags & EVENT_FL_FAILED) { pretty_print()
5148 * pevent_data_type - parse out the given event type
5152 * This returns the event id from the @rec.
5160 * pevent_data_event_from_type - find the event by a given type
5162 * @type: the type of the event.
5164 * This returns the event form a given @type;
5292 * @event: the handle to the event
5295 * This parses the raw @data using the given @event information and
5298 void pevent_event_info(struct trace_seq *s, struct event_format *event, pevent_event_info() argument
5303 if (event->pevent->print_raw || (event->flags & EVENT_FL_PRINTRAW)) pevent_event_info()
5304 print_event_fields(s, record->data, record->size, event); pevent_event_info()
5307 if (event->handler && !(event->flags & EVENT_FL_NOHANDLE)) pevent_event_info()
5308 print_pretty = event->handler(s, record, event, pevent_event_info()
5309 event->context); pevent_event_info()
5312 pretty_print(s, record->data, record->size, event); pevent_event_info()
5335 struct event_format *event; pevent_print_event() local
5361 event = pevent_find_event(pevent, type); pevent_print_event()
5362 if (!event) { pevent_print_event()
5363 do_warning("ug! no event found for type %d", type); pevent_print_event()
5387 secs, p, usecs, event->name); pevent_print_event()
5390 record->ts, event->name); pevent_print_event()
5392 /* Space out the event names evenly. */ pevent_print_event()
5393 len = strlen(event->name); pevent_print_event()
5397 pevent_event_info(s, event, record); pevent_print_event()
5510 do_warning("event %s has more %s fields than specified", get_event_fields()
5518 do_warning("event %s has less %s fields than specified", get_event_fields()
5527 * pevent_event_common_fields - return a list of common fields for an event
5528 * @event: the event to return the common fields of.
5533 struct format_field **pevent_event_common_fields(struct event_format *event) pevent_event_common_fields() argument
5535 return get_event_fields("common", event->name, pevent_event_common_fields()
5536 event->format.nr_common, pevent_event_common_fields()
5537 event->format.common_fields); pevent_event_common_fields()
5541 * pevent_event_fields - return a list of event specific fields for an event
5542 * @event: the event to return the fields of.
5547 struct format_field **pevent_event_fields(struct event_format *event) pevent_event_fields() argument
5549 return get_event_fields("event", event->name, pevent_event_fields()
5550 event->format.nr_fields, pevent_event_fields()
5551 event->format.fields); pevent_event_fields()
5782 static int event_matches(struct event_format *event, event_matches() argument
5786 if (id >= 0 && id != event->id) event_matches()
5789 if (event_name && (strcmp(event_name, event->name) != 0)) event_matches()
5792 if (sys_name && (strcmp(sys_name, event->system) != 0)) event_matches()
5805 static int find_event_handle(struct pevent *pevent, struct event_format *event) find_event_handle() argument
5812 if (event_matches(event, handle->id, find_event_handle()
5821 pr_stat("overriding event (%d) %s:%s with new print handler", find_event_handle()
5822 event->id, event->system, event->name); find_event_handle()
5824 event->handler = handle->func; find_event_handle()
5825 event->context = handle->context; find_event_handle()
5834 * __pevent_parse_format - parse the event format
5835 * @buf: the buffer storing the event format string
5837 * @sys: the system the event belongs to
5839 * This parses the event format and creates an event structure
5840 * to quickly parse raw data for a given event.
5850 struct event_format *event; __pevent_parse_format() local
5855 *eventp = event = alloc_event(); __pevent_parse_format()
5856 if (!event) __pevent_parse_format()
5859 event->name = event_read_name(); __pevent_parse_format()
5860 if (!event->name) { __pevent_parse_format()
5861 /* Bad event? */ __pevent_parse_format()
5867 event->flags |= EVENT_FL_ISFTRACE; __pevent_parse_format()
5869 if (strcmp(event->name, "bprint") == 0) __pevent_parse_format()
5870 event->flags |= EVENT_FL_ISBPRINT; __pevent_parse_format()
5873 event->id = event_read_id(); __pevent_parse_format()
5874 if (event->id < 0) { __pevent_parse_format()
5883 event->system = strdup(sys); __pevent_parse_format()
5884 if (!event->system) { __pevent_parse_format()
5889 /* Add pevent to event so that it can be referenced */ __pevent_parse_format()
5890 event->pevent = pevent; __pevent_parse_format()
5892 ret = event_read_format(event); __pevent_parse_format()
5899 * If the event has an override, don't print warnings if the event __pevent_parse_format()
5902 if (pevent && find_event_handle(pevent, event)) __pevent_parse_format()
5905 ret = event_read_print(event); __pevent_parse_format()
5913 if (!ret && (event->flags & EVENT_FL_ISFTRACE)) { __pevent_parse_format()
5918 list = &event->print_fmt.args; __pevent_parse_format()
5919 for (field = event->format.fields; field; field = field->next) { __pevent_parse_format()
5922 event->flags |= EVENT_FL_FAILED; __pevent_parse_format()
5928 event->flags |= EVENT_FL_FAILED; __pevent_parse_format()
5942 event->flags |= EVENT_FL_FAILED; __pevent_parse_format()
5946 free(event->system); __pevent_parse_format()
5947 free(event->name); __pevent_parse_format()
5948 free(event); __pevent_parse_format()
5960 struct event_format *event = *eventp; __pevent_parse_event() local
5962 if (event == NULL) __pevent_parse_event()
5965 if (pevent && add_event(pevent, event)) { __pevent_parse_event()
5971 if (PRINT_ARGS && event->print_fmt.args) __pevent_parse_event()
5972 print_args(event->print_fmt.args); __pevent_parse_event()
5977 pevent_free_format(event); __pevent_parse_event()
5982 * pevent_parse_format - parse the event format
5985 * @buf: the buffer storing the event format string
5987 * @sys: the system the event belongs to
5989 * This parses the event format and creates an event structure
5990 * to quickly parse raw data for a given event.
6005 * pevent_parse_event - parse the event format
6007 * @buf: the buffer storing the event format string
6009 * @sys: the system the event belongs to
6011 * This parses the event format and creates an event structure
6012 * to quickly parse raw data for a given event.
6021 struct event_format *event = NULL; pevent_parse_event() local
6022 return __pevent_parse_event(pevent, &event, buf, size, sys); pevent_parse_event()
6081 * @event: the event that the field is for
6092 void *pevent_get_field_raw(struct trace_seq *s, struct event_format *event, pevent_get_field_raw() argument
6101 if (!event) pevent_get_field_raw()
6104 field = pevent_find_field(event, name); pevent_get_field_raw()
6118 offset = pevent_read_number(event->pevent, pevent_get_field_raw()
6131 * @event: the event that the field is for
6139 int pevent_get_field_val(struct trace_seq *s, struct event_format *event, pevent_get_field_val() argument
6145 if (!event) pevent_get_field_val()
6148 field = pevent_find_field(event, name); pevent_get_field_val()
6156 * @event: the event that the field is for
6164 int pevent_get_common_field_val(struct trace_seq *s, struct event_format *event, pevent_get_common_field_val() argument
6170 if (!event) pevent_get_common_field_val()
6173 field = pevent_find_common_field(event, name); pevent_get_common_field_val()
6181 * @event: the event that the field is for
6189 int pevent_get_any_field_val(struct trace_seq *s, struct event_format *event, pevent_get_any_field_val() argument
6195 if (!event) pevent_get_any_field_val()
6198 field = pevent_find_any_field(event, name); pevent_get_any_field_val()
6207 * @event: the event that the field is for
6215 struct event_format *event, const char *name, pevent_print_num_field()
6218 struct format_field *field = pevent_find_field(event, name); pevent_print_num_field()
6239 * @event: the event that the field is for
6247 struct event_format *event, const char *name, pevent_print_func_field()
6250 struct format_field *field = pevent_find_field(event, name); pevent_print_func_field()
6251 struct pevent *pevent = event->pevent; pevent_print_func_field()
6411 struct event_format *event; pevent_search_event() local
6415 event = pevent_find_event(pevent, id); pevent_search_event()
6416 if (!event) pevent_search_event()
6418 if (event_name && (strcmp(event_name, event->name) != 0)) pevent_search_event()
6420 if (sys_name && (strcmp(sys_name, event->system) != 0)) pevent_search_event()
6423 event = pevent_find_event_by_name(pevent, sys_name, event_name); pevent_search_event()
6424 if (!event) pevent_search_event()
6427 return event; pevent_search_event()
6431 * pevent_register_event_handler - register a way to parse an event
6433 * @id: the id of the event to register
6434 * @sys_name: the system name the event belongs to
6435 * @event_name: the name of the event
6436 * @func: the function to call to parse the event information
6440 * a given event. If for some reason the default print format
6442 * for an event to be used to parse the data instead.
6444 * If @id is >= 0, then it is used to find the event.
6451 struct event_format *event; pevent_register_event_handler() local
6454 event = pevent_search_event(pevent, id, sys_name, event_name); pevent_register_event_handler()
6455 if (event == NULL) pevent_register_event_handler()
6458 pr_stat("overriding event (%d) %s:%s with new print handler", pevent_register_event_handler()
6459 event->id, event->system, event->name); pevent_register_event_handler()
6461 event->handler = func; pevent_register_event_handler()
6462 event->context = context; pevent_register_event_handler()
6469 do_warning("Failed to allocate event handler"); pevent_register_event_handler()
6481 do_warning("Failed to allocate event/sys name"); pevent_register_event_handler()
6516 * pevent_unregister_event_handler - unregister an existing event handler
6518 * @id: the id of the event to unregister
6520 * @event_name: the name of the event handler
6521 * @func: the function to call to parse the event information
6524 * This function removes existing event handler (parser).
6526 * If @id is >= 0, then it is used to find the event.
6529 * Returns 0 if handler was removed successfully, -1 if event was not found.
6535 struct event_format *event; pevent_unregister_event_handler() local
6539 event = pevent_search_event(pevent, id, sys_name, event_name); pevent_unregister_event_handler()
6540 if (event == NULL) pevent_unregister_event_handler()
6543 if (event->handler == func && event->context == context) { pevent_unregister_event_handler()
6544 pr_stat("removing override handler for event (%d) %s:%s. Going back to default handler.", pevent_unregister_event_handler()
6545 event->id, event->system, event->name); pevent_unregister_event_handler()
6547 event->handler = NULL; pevent_unregister_event_handler()
6548 event->context = NULL; pevent_unregister_event_handler()
6613 void pevent_free_format(struct event_format *event) pevent_free_format() argument
6615 free(event->name); pevent_free_format()
6616 free(event->system); pevent_free_format()
6618 free_formats(&event->format); pevent_free_format()
6620 free(event->print_fmt.format); pevent_free_format()
6621 free_args(event->print_fmt.args); pevent_free_format()
6623 free(event); pevent_free_format()
3841 print_str_arg(struct trace_seq *s, void *data, int size, struct event_format *event, const char *format, int len_arg, struct print_arg *arg) print_str_arg() argument
4088 process_defined_func(struct trace_seq *s, void *data, int size, struct event_format *event, struct print_arg *arg) process_defined_func() argument
4339 get_bprint_format(void *data, int size __maybe_unused, struct event_format *event) get_bprint_format() argument
4374 print_mac_arg(struct trace_seq *s, int mac, void *data, int size, struct event_format *event, struct print_arg *arg) print_mac_arg() argument
4527 print_ipv4_arg(struct trace_seq *s, const char *ptr, char i, void *data, int size, struct event_format *event, struct print_arg *arg) print_ipv4_arg() argument
4564 print_ipv6_arg(struct trace_seq *s, const char *ptr, char i, void *data, int size, struct event_format *event, struct print_arg *arg) print_ipv6_arg() argument
4614 print_ipsa_arg(struct trace_seq *s, const char *ptr, char i, void *data, int size, struct event_format *event, struct print_arg *arg) print_ipsa_arg() argument
4696 print_ip_arg(struct trace_seq *s, const char *ptr, void *data, int size, struct event_format *event, struct print_arg *arg) print_ip_arg() argument
4738 print_event_fields(struct trace_seq *s, void *data, int size __maybe_unused, struct event_format *event) print_event_fields() argument
6214 pevent_print_num_field(struct trace_seq *s, const char *fmt, struct event_format *event, const char *name, struct pevent_record *record, int err) pevent_print_num_field() argument
6246 pevent_print_func_field(struct trace_seq *s, const char *fmt, struct event_format *event, const char *name, struct pevent_record *record, int err) pevent_print_func_field() argument
H A Dplugin_mac80211.c24 #include "event-parse.h"
28 static void print_string(struct trace_seq *s, struct event_format *event, print_string() argument
31 struct format_field *f = pevent_find_field(event, name); print_string()
56 #define SF(fn) pevent_print_num_field(s, fn ":%d", event, fn, record, 0)
57 #define SFX(fn) pevent_print_num_field(s, fn ":%#x", event, fn, record, 0)
62 struct event_format *event, void *context) drv_bss_info_changed()
66 print_string(s, event, "wiphy_name", data); drv_bss_info_changed()
68 print_string(s, event, "vif_name", data); drv_bss_info_changed()
69 pevent_print_num_field(s, "(%d)", event, "vif_type", record, 1); drv_bss_info_changed()
60 drv_bss_info_changed(struct trace_seq *s, struct pevent_record *record, struct event_format *event, void *context) drv_bss_info_changed() argument
H A Dplugin_kvm.c25 #include "event-parse.h"
251 struct event_format *event, const char *field) print_exit_reason()
257 if (pevent_get_field_val(s, event, field, record, &val, 1) < 0) print_exit_reason()
260 if (pevent_get_field_val(s, event, "isa", record, &isa, 0) < 0) print_exit_reason()
272 struct event_format *event, void *context) kvm_exit_handler()
276 if (print_exit_reason(s, record, event, "exit_reason") < 0) kvm_exit_handler()
279 pevent_print_num_field(s, " rip 0x%lx", event, "guest_rip", record, 1); kvm_exit_handler()
281 if (pevent_get_field_val(s, event, "info1", record, &info1, 0) >= 0 kvm_exit_handler()
282 && pevent_get_field_val(s, event, "info2", record, &info2, 0) >= 0) kvm_exit_handler()
295 struct event_format *event, void *context) kvm_emulate_insn_handler()
302 if (pevent_get_field_val(s, event, "rip", record, &rip, 1) < 0) kvm_emulate_insn_handler()
305 if (pevent_get_field_val(s, event, "csbase", record, &csbase, 1) < 0) kvm_emulate_insn_handler()
308 if (pevent_get_field_val(s, event, "len", record, &len, 1) < 0) kvm_emulate_insn_handler()
311 if (pevent_get_field_val(s, event, "flags", record, &flags, 1) < 0) kvm_emulate_insn_handler()
314 if (pevent_get_field_val(s, event, "failed", record, &failed, 1) < 0) kvm_emulate_insn_handler()
317 insn = pevent_get_field_raw(s, event, "insn", record, &llen, 1); kvm_emulate_insn_handler()
334 struct event_format *event, void *context) kvm_nested_vmexit_inject_handler()
336 if (print_exit_reason(s, record, event, "exit_code") < 0) kvm_nested_vmexit_inject_handler()
339 pevent_print_num_field(s, " info1 %llx", event, "exit_info1", record, 1); kvm_nested_vmexit_inject_handler()
340 pevent_print_num_field(s, " info2 %llx", event, "exit_info2", record, 1); kvm_nested_vmexit_inject_handler()
341 pevent_print_num_field(s, " int_info %llx", event, "exit_int_info", record, 1); kvm_nested_vmexit_inject_handler()
342 pevent_print_num_field(s, " int_info_err %llx", event, "exit_int_info_err", record, 1); kvm_nested_vmexit_inject_handler()
348 struct event_format *event, void *context) kvm_nested_vmexit_handler()
350 pevent_print_num_field(s, "rip %llx ", event, "rip", record, 1); kvm_nested_vmexit_handler()
352 return kvm_nested_vmexit_inject_handler(s, record, event, context); kvm_nested_vmexit_handler()
374 struct event_format *event, void *context) kvm_mmu_print_role()
382 if (pevent_get_field_val(s, event, "role", record, &val, 1) < 0) kvm_mmu_print_role()
391 if (pevent_is_file_bigendian(event->pevent) == kvm_mmu_print_role()
392 pevent_is_host_bigendian(event->pevent)) { kvm_mmu_print_role()
409 pevent_print_num_field(s, " root %u ", event, kvm_mmu_print_role()
412 if (pevent_get_field_val(s, event, "unsync", record, &val, 1) < 0) kvm_mmu_print_role()
421 struct event_format *event, void *context) kvm_mmu_get_page_handler()
425 if (pevent_get_field_val(s, event, "created", record, &val, 1) < 0) kvm_mmu_get_page_handler()
430 if (pevent_get_field_val(s, event, "gfn", record, &val, 1) < 0) kvm_mmu_get_page_handler()
434 return kvm_mmu_print_role(s, record, event, context); kvm_mmu_get_page_handler()
250 print_exit_reason(struct trace_seq *s, struct pevent_record *record, struct event_format *event, const char *field) print_exit_reason() argument
271 kvm_exit_handler(struct trace_seq *s, struct pevent_record *record, struct event_format *event, void *context) kvm_exit_handler() argument
293 kvm_emulate_insn_handler(struct trace_seq *s, struct pevent_record *record, struct event_format *event, void *context) kvm_emulate_insn_handler() argument
333 kvm_nested_vmexit_inject_handler(struct trace_seq *s, struct pevent_record *record, struct event_format *event, void *context) kvm_nested_vmexit_inject_handler() argument
347 kvm_nested_vmexit_handler(struct trace_seq *s, struct pevent_record *record, struct event_format *event, void *context) kvm_nested_vmexit_handler() argument
373 kvm_mmu_print_role(struct trace_seq *s, struct pevent_record *record, struct event_format *event, void *context) kvm_mmu_print_role() argument
419 kvm_mmu_get_page_handler(struct trace_seq *s, struct pevent_record *record, struct event_format *event, void *context) kvm_mmu_get_page_handler() argument
/linux-4.4.14/net/irda/irlan/
H A Dirlan_client_event.c39 static int irlan_client_state_idle (struct irlan_cb *self, IRLAN_EVENT event,
41 static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event,
43 static int irlan_client_state_conn (struct irlan_cb *self, IRLAN_EVENT event,
45 static int irlan_client_state_info (struct irlan_cb *self, IRLAN_EVENT event,
47 static int irlan_client_state_media(struct irlan_cb *self, IRLAN_EVENT event,
49 static int irlan_client_state_open (struct irlan_cb *self, IRLAN_EVENT event,
51 static int irlan_client_state_wait (struct irlan_cb *self, IRLAN_EVENT event,
53 static int irlan_client_state_arb (struct irlan_cb *self, IRLAN_EVENT event,
55 static int irlan_client_state_data (struct irlan_cb *self, IRLAN_EVENT event,
57 static int irlan_client_state_close(struct irlan_cb *self, IRLAN_EVENT event,
59 static int irlan_client_state_sync (struct irlan_cb *self, IRLAN_EVENT event,
62 static int (*state[])(struct irlan_cb *, IRLAN_EVENT event, struct sk_buff *) =
77 void irlan_do_client_event(struct irlan_cb *self, IRLAN_EVENT event, irlan_do_client_event() argument
83 (*state[ self->client.state]) (self, event, skb); irlan_do_client_event()
87 * Function irlan_client_state_idle (event, skb, info)
92 static int irlan_client_state_idle(struct irlan_cb *self, IRLAN_EVENT event, irlan_client_state_idle() argument
98 switch (event) { irlan_client_state_idle()
118 pr_debug("%s(), Unknown event %d\n", __func__ , event); irlan_client_state_idle()
128 * Function irlan_client_state_query (event, skb, info)
134 static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event, irlan_client_state_query() argument
140 switch(event) { irlan_client_state_query()
169 pr_debug("%s(), Unknown event %d\n", __func__ , event); irlan_client_state_query()
179 * Function irlan_client_state_conn (event, skb, info)
185 static int irlan_client_state_conn(struct irlan_cb *self, IRLAN_EVENT event, irlan_client_state_conn() argument
190 switch (event) { irlan_client_state_conn()
204 pr_debug("%s(), Unknown event %d\n", __func__ , event); irlan_client_state_conn()
214 * Function irlan_client_state_info (self, event, skb, info)
218 static int irlan_client_state_info(struct irlan_cb *self, IRLAN_EVENT event, irlan_client_state_info() argument
223 switch (event) { irlan_client_state_info()
242 pr_debug("%s(), Unknown event %d\n", __func__ , event); irlan_client_state_info()
252 * Function irlan_client_state_media (self, event, skb, info)
258 static int irlan_client_state_media(struct irlan_cb *self, IRLAN_EVENT event, irlan_client_state_media() argument
263 switch(event) { irlan_client_state_media()
277 pr_debug("%s(), Unknown event %d\n", __func__ , event); irlan_client_state_media()
287 * Function irlan_client_state_open (self, event, skb, info)
293 static int irlan_client_state_open(struct irlan_cb *self, IRLAN_EVENT event, irlan_client_state_open() argument
300 switch(event) { irlan_client_state_open()
347 pr_debug("%s(), Unknown event %d\n", __func__ , event); irlan_client_state_open()
358 * Function irlan_client_state_wait (self, event, skb, info)
364 static int irlan_client_state_wait(struct irlan_cb *self, IRLAN_EVENT event, irlan_client_state_wait() argument
369 switch(event) { irlan_client_state_wait()
382 pr_debug("%s(), Unknown event %d\n", __func__ , event); irlan_client_state_wait()
391 static int irlan_client_state_arb(struct irlan_cb *self, IRLAN_EVENT event, irlan_client_state_arb() argument
398 switch(event) { irlan_client_state_arb()
430 pr_debug("%s(), Unknown event %d\n", __func__ , event); irlan_client_state_arb()
440 * Function irlan_client_state_data (self, event, skb, info)
446 static int irlan_client_state_data(struct irlan_cb *self, IRLAN_EVENT event, irlan_client_state_data() argument
452 switch(event) { irlan_client_state_data()
461 pr_debug("%s(), Unknown event %d\n", __func__ , event); irlan_client_state_data()
471 * Function irlan_client_state_close (self, event, skb, info)
476 static int irlan_client_state_close(struct irlan_cb *self, IRLAN_EVENT event, irlan_client_state_close() argument
486 * Function irlan_client_state_sync (self, event, skb, info)
491 static int irlan_client_state_sync(struct irlan_cb *self, IRLAN_EVENT event, irlan_client_state_sync() argument
H A Dirlan_provider_event.c33 static int irlan_provider_state_idle(struct irlan_cb *self, IRLAN_EVENT event,
35 static int irlan_provider_state_info(struct irlan_cb *self, IRLAN_EVENT event,
37 static int irlan_provider_state_open(struct irlan_cb *self, IRLAN_EVENT event,
39 static int irlan_provider_state_data(struct irlan_cb *self, IRLAN_EVENT event,
42 static int (*state[])(struct irlan_cb *self, IRLAN_EVENT event,
58 void irlan_do_provider_event(struct irlan_cb *self, IRLAN_EVENT event, irlan_do_provider_event() argument
63 (*state[self->provider.state]) (self, event, skb); irlan_do_provider_event()
67 * Function irlan_provider_state_idle (event, skb, info)
72 static int irlan_provider_state_idle(struct irlan_cb *self, IRLAN_EVENT event, irlan_provider_state_idle() argument
77 switch(event) { irlan_provider_state_idle()
83 pr_debug("%s(), Unknown event %d\n", __func__ , event); irlan_provider_state_idle()
93 * Function irlan_provider_state_info (self, event, skb, info)
97 static int irlan_provider_state_info(struct irlan_cb *self, IRLAN_EVENT event, irlan_provider_state_info() argument
104 switch(event) { irlan_provider_state_info()
146 pr_debug("%s(), Unknown event %d\n", __func__ , event); irlan_provider_state_info()
156 * Function irlan_provider_state_open (self, event, skb, info)
162 static int irlan_provider_state_open(struct irlan_cb *self, IRLAN_EVENT event, irlan_provider_state_open() argument
167 switch(event) { irlan_provider_state_open()
183 pr_debug("%s(), Unknown event %d\n", __func__ , event); irlan_provider_state_open()
193 * Function irlan_provider_state_data (self, event, skb, info)
199 static int irlan_provider_state_data(struct irlan_cb *self, IRLAN_EVENT event, irlan_provider_state_data() argument
205 switch(event) { irlan_provider_state_data()
216 pr_debug("%s(), Unknown event %d\n", __func__ , event); irlan_provider_state_data()
/linux-4.4.14/tools/perf/util/
H A Devent.c3 #include "event.h"
134 static int perf_event__prepare_comm(union perf_event *event, pid_t pid, perf_event__prepare_comm() argument
142 memset(&event->comm, 0, sizeof(event->comm)); perf_event__prepare_comm()
145 if (perf_event__get_comm_ids(pid, event->comm.comm, perf_event__prepare_comm()
146 sizeof(event->comm.comm), perf_event__prepare_comm()
157 event->comm.pid = *tgid; perf_event__prepare_comm()
158 event->comm.header.type = PERF_RECORD_COMM; perf_event__prepare_comm()
160 size = strlen(event->comm.comm) + 1; perf_event__prepare_comm()
162 memset(event->comm.comm + size, 0, machine->id_hdr_size); perf_event__prepare_comm()
163 event->comm.header.size = (sizeof(event->comm) - perf_event__prepare_comm()
164 (sizeof(event->comm.comm) - size) + perf_event__prepare_comm()
166 event->comm.tid = pid; perf_event__prepare_comm()
172 union perf_event *event, pid_t pid, perf_event__synthesize_comm()
178 if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0) perf_event__synthesize_comm()
181 if (process(tool, event, &synth_sample, machine) != 0) perf_event__synthesize_comm()
188 union perf_event *event, perf_event__synthesize_fork()
193 memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size); perf_event__synthesize_fork()
201 event->fork.ppid = ppid; perf_event__synthesize_fork()
202 event->fork.ptid = ppid; perf_event__synthesize_fork()
204 event->fork.ppid = tgid; perf_event__synthesize_fork()
205 event->fork.ptid = tgid; perf_event__synthesize_fork()
207 event->fork.pid = tgid; perf_event__synthesize_fork()
208 event->fork.tid = pid; perf_event__synthesize_fork()
209 event->fork.header.type = PERF_RECORD_FORK; perf_event__synthesize_fork()
211 event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size); perf_event__synthesize_fork()
213 if (process(tool, event, &synth_sample, machine) != 0) perf_event__synthesize_fork()
220 union perf_event *event, perf_event__synthesize_mmap_events()
249 event->header.type = PERF_RECORD_MMAP2; perf_event__synthesize_mmap_events()
278 &event->mmap2.start, &event->mmap2.len, prot, perf_event__synthesize_mmap_events()
279 &event->mmap2.pgoff, &event->mmap2.maj, perf_event__synthesize_mmap_events()
280 &event->mmap2.min, perf_event__synthesize_mmap_events()
289 event->mmap2.ino = (u64)ino; perf_event__synthesize_mmap_events()
295 event->header.misc = PERF_RECORD_MISC_USER; perf_event__synthesize_mmap_events()
297 event->header.misc = PERF_RECORD_MISC_GUEST_USER; perf_event__synthesize_mmap_events()
300 event->mmap2.prot = 0; perf_event__synthesize_mmap_events()
301 event->mmap2.flags = 0; perf_event__synthesize_mmap_events()
303 event->mmap2.prot |= PROT_READ; perf_event__synthesize_mmap_events()
305 event->mmap2.prot |= PROT_WRITE; perf_event__synthesize_mmap_events()
307 event->mmap2.prot |= PROT_EXEC; perf_event__synthesize_mmap_events()
310 event->mmap2.flags |= MAP_SHARED; perf_event__synthesize_mmap_events()
312 event->mmap2.flags |= MAP_PRIVATE; perf_event__synthesize_mmap_events()
318 event->header.misc |= PERF_RECORD_MISC_MMAP_DATA; perf_event__synthesize_mmap_events()
323 event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT; perf_event__synthesize_mmap_events()
329 memcpy(event->mmap2.filename, execname, size); perf_event__synthesize_mmap_events()
331 event->mmap2.len -= event->mmap.start; perf_event__synthesize_mmap_events()
332 event->mmap2.header.size = (sizeof(event->mmap2) - perf_event__synthesize_mmap_events()
333 (sizeof(event->mmap2.filename) - size)); perf_event__synthesize_mmap_events()
334 memset(event->mmap2.filename + size, 0, machine->id_hdr_size); perf_event__synthesize_mmap_events()
335 event->mmap2.header.size += machine->id_hdr_size; perf_event__synthesize_mmap_events()
336 event->mmap2.pid = tgid; perf_event__synthesize_mmap_events()
337 event->mmap2.tid = pid; perf_event__synthesize_mmap_events()
339 if (process(tool, event, &synth_sample, machine) != 0) { perf_event__synthesize_mmap_events()
360 union perf_event *event = zalloc((sizeof(event->mmap) + perf_event__synthesize_modules() local
362 if (event == NULL) { perf_event__synthesize_modules()
363 pr_debug("Not enough memory synthesizing mmap event " perf_event__synthesize_modules()
368 event->header.type = PERF_RECORD_MMAP; perf_event__synthesize_modules()
375 event->header.misc = PERF_RECORD_MISC_KERNEL; perf_event__synthesize_modules()
377 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; perf_event__synthesize_modules()
386 event->mmap.header.type = PERF_RECORD_MMAP; perf_event__synthesize_modules()
387 event->mmap.header.size = (sizeof(event->mmap) - perf_event__synthesize_modules()
388 (sizeof(event->mmap.filename) - size)); perf_event__synthesize_modules()
389 memset(event->mmap.filename + size, 0, machine->id_hdr_size); perf_event__synthesize_modules()
390 event->mmap.header.size += machine->id_hdr_size; perf_event__synthesize_modules()
391 event->mmap.start = pos->start; perf_event__synthesize_modules()
392 event->mmap.len = pos->end - pos->start; perf_event__synthesize_modules()
393 event->mmap.pid = machine->pid; perf_event__synthesize_modules()
395 memcpy(event->mmap.filename, pos->dso->long_name, perf_event__synthesize_modules()
397 if (process(tool, event, &synth_sample, machine) != 0) { perf_event__synthesize_modules()
403 free(event); perf_event__synthesize_modules()
423 /* special case: only send one comm event using passed in pid */ __event__synthesize_thread()
465 * Send the prepared comm event __event__synthesize_thread()
656 union perf_event *event; perf_event__synthesize_kernel_mmap() local
666 event = zalloc((sizeof(event->mmap) + machine->id_hdr_size)); perf_event__synthesize_kernel_mmap()
667 if (event == NULL) { perf_event__synthesize_kernel_mmap()
668 pr_debug("Not enough memory synthesizing mmap event " perf_event__synthesize_kernel_mmap()
679 event->header.misc = PERF_RECORD_MISC_KERNEL; perf_event__synthesize_kernel_mmap()
681 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; perf_event__synthesize_kernel_mmap()
685 size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), perf_event__synthesize_kernel_mmap()
688 event->mmap.header.type = PERF_RECORD_MMAP; perf_event__synthesize_kernel_mmap()
689 event->mmap.header.size = (sizeof(event->mmap) - perf_event__synthesize_kernel_mmap()
690 (sizeof(event->mmap.filename) - size) + machine->id_hdr_size); perf_event__synthesize_kernel_mmap()
691 event->mmap.pgoff = kmap->ref_reloc_sym->addr; perf_event__synthesize_kernel_mmap()
692 event->mmap.start = map->start; perf_event__synthesize_kernel_mmap()
693 event->mmap.len = map->end - event->mmap.start; perf_event__synthesize_kernel_mmap()
694 event->mmap.pid = machine->pid; perf_event__synthesize_kernel_mmap()
696 err = process(tool, event, &synth_sample, machine); perf_event__synthesize_kernel_mmap()
697 free(event); perf_event__synthesize_kernel_mmap()
702 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp) perf_event__fprintf_comm() argument
706 if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC) perf_event__fprintf_comm()
711 return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid); perf_event__fprintf_comm()
715 union perf_event *event, perf_event__process_comm()
719 return machine__process_comm_event(machine, event, sample); perf_event__process_comm()
723 union perf_event *event, perf_event__process_lost()
727 return machine__process_lost_event(machine, event, sample); perf_event__process_lost()
731 union perf_event *event, perf_event__process_aux()
735 return machine__process_aux_event(machine, event); perf_event__process_aux()
739 union perf_event *event, perf_event__process_itrace_start()
743 return machine__process_itrace_start_event(machine, event); perf_event__process_itrace_start()
747 union perf_event *event, perf_event__process_lost_samples()
751 return machine__process_lost_samples_event(machine, event, sample); perf_event__process_lost_samples()
755 union perf_event *event, perf_event__process_switch()
759 return machine__process_switch_event(machine, event); perf_event__process_switch()
762 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp) perf_event__fprintf_mmap() argument
765 event->mmap.pid, event->mmap.tid, event->mmap.start, perf_event__fprintf_mmap()
766 event->mmap.len, event->mmap.pgoff, perf_event__fprintf_mmap()
767 (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x', perf_event__fprintf_mmap()
768 event->mmap.filename); perf_event__fprintf_mmap()
771 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp) perf_event__fprintf_mmap2() argument
775 event->mmap2.pid, event->mmap2.tid, event->mmap2.start, perf_event__fprintf_mmap2()
776 event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj, perf_event__fprintf_mmap2()
777 event->mmap2.min, event->mmap2.ino, perf_event__fprintf_mmap2()
778 event->mmap2.ino_generation, perf_event__fprintf_mmap2()
779 (event->mmap2.prot & PROT_READ) ? 'r' : '-', perf_event__fprintf_mmap2()
780 (event->mmap2.prot & PROT_WRITE) ? 'w' : '-', perf_event__fprintf_mmap2()
781 (event->mmap2.prot & PROT_EXEC) ? 'x' : '-', perf_event__fprintf_mmap2()
782 (event->mmap2.flags & MAP_SHARED) ? 's' : 'p', perf_event__fprintf_mmap2()
783 event->mmap2.filename); perf_event__fprintf_mmap2()
787 union perf_event *event, perf_event__process_mmap()
791 return machine__process_mmap_event(machine, event, sample); perf_event__process_mmap()
795 union perf_event *event, perf_event__process_mmap2()
799 return machine__process_mmap2_event(machine, event, sample); perf_event__process_mmap2()
802 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp) perf_event__fprintf_task() argument
805 event->fork.pid, event->fork.tid, perf_event__fprintf_task()
806 event->fork.ppid, event->fork.ptid); perf_event__fprintf_task()
810 union perf_event *event, perf_event__process_fork()
814 return machine__process_fork_event(machine, event, sample); perf_event__process_fork()
818 union perf_event *event, perf_event__process_exit()
822 return machine__process_exit_event(machine, event, sample); perf_event__process_exit()
825 size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp) perf_event__fprintf_aux() argument
828 event->aux.aux_offset, event->aux.aux_size, perf_event__fprintf_aux()
829 event->aux.flags, perf_event__fprintf_aux()
830 event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "", perf_event__fprintf_aux()
831 event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : ""); perf_event__fprintf_aux()
834 size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp) perf_event__fprintf_itrace_start() argument
837 event->itrace_start.pid, event->itrace_start.tid); perf_event__fprintf_itrace_start()
840 size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp) perf_event__fprintf_switch() argument
842 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT; perf_event__fprintf_switch()
845 if (event->header.type == PERF_RECORD_SWITCH) perf_event__fprintf_switch()
850 event->context_switch.next_prev_pid, perf_event__fprintf_switch()
851 event->context_switch.next_prev_tid); perf_event__fprintf_switch()
854 size_t perf_event__fprintf(union perf_event *event, FILE *fp) perf_event__fprintf() argument
857 perf_event__name(event->header.type)); perf_event__fprintf()
859 switch (event->header.type) { perf_event__fprintf()
861 ret += perf_event__fprintf_comm(event, fp); perf_event__fprintf()
865 ret += perf_event__fprintf_task(event, fp); perf_event__fprintf()
868 ret += perf_event__fprintf_mmap(event, fp); perf_event__fprintf()
871 ret += perf_event__fprintf_mmap2(event, fp); perf_event__fprintf()
874 ret += perf_event__fprintf_aux(event, fp); perf_event__fprintf()
877 ret += perf_event__fprintf_itrace_start(event, fp); perf_event__fprintf()
881 ret += perf_event__fprintf_switch(event, fp); perf_event__fprintf()
891 union perf_event *event, perf_event__process()
895 return machine__process_event(machine, event, sample); perf_event__process()
990 int perf_event__preprocess_sample(const union perf_event *event, perf_event__preprocess_sample() argument
995 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; perf_event__preprocess_sample()
1090 void perf_event__preprocess_sample_addr(union perf_event *event, perf_event__preprocess_sample_addr() argument
1095 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; perf_event__preprocess_sample_addr()
171 perf_event__synthesize_comm(struct perf_tool *tool, union perf_event *event, pid_t pid, perf_event__handler_t process, struct machine *machine) perf_event__synthesize_comm() argument
187 perf_event__synthesize_fork(struct perf_tool *tool, union perf_event *event, pid_t pid, pid_t tgid, pid_t ppid, perf_event__handler_t process, struct machine *machine) perf_event__synthesize_fork() argument
219 perf_event__synthesize_mmap_events(struct perf_tool *tool, union perf_event *event, pid_t pid, pid_t tgid, perf_event__handler_t process, struct machine *machine, bool mmap_data, unsigned int proc_map_timeout) perf_event__synthesize_mmap_events() argument
714 perf_event__process_comm(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, struct machine *machine) perf_event__process_comm() argument
722 perf_event__process_lost(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, struct machine *machine) perf_event__process_lost() argument
730 perf_event__process_aux(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample __maybe_unused, struct machine *machine) perf_event__process_aux() argument
738 perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample __maybe_unused, struct machine *machine) perf_event__process_itrace_start() argument
746 perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, struct machine *machine) perf_event__process_lost_samples() argument
754 perf_event__process_switch(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample __maybe_unused, struct machine *machine) perf_event__process_switch() argument
786 perf_event__process_mmap(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, struct machine *machine) perf_event__process_mmap() argument
794 perf_event__process_mmap2(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, struct machine *machine) perf_event__process_mmap2() argument
809 perf_event__process_fork(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, struct machine *machine) perf_event__process_fork() argument
817 perf_event__process_exit(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, struct machine *machine) perf_event__process_exit() argument
890 perf_event__process(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, struct machine *machine) perf_event__process() argument
H A Dsession.c2 #include <traceevent/event-parse.h>
22 union perf_event *event,
98 struct ordered_event *event) ordered_events__deliver_event()
103 int ret = perf_evlist__parse_sample(session->evlist, event->event, &sample); ordered_events__deliver_event()
110 return perf_session__deliver_event(session, event->event, &sample, ordered_events__deliver_event()
111 session->tool, event->file_offset); ordered_events__deliver_event()
148 * kernel MMAP event, in perf_event__process_mmap(). perf_session__new()
190 union perf_event *event process_event_synth_tracing_data_stub()
200 union perf_event *event __maybe_unused, process_event_synth_attr_stub()
209 union perf_event *event __maybe_unused, process_event_sample_stub()
219 union perf_event *event __maybe_unused, process_event_stub()
228 union perf_event *event __maybe_unused, process_build_id_stub()
236 union perf_event *event __maybe_unused, process_finished_round_stub()
244 union perf_event *event,
248 union perf_event *event __maybe_unused, process_id_index_stub()
257 union perf_event *event __maybe_unused, process_event_auxtrace_info_stub()
280 union perf_event *event, process_event_auxtrace_stub()
286 skipn(perf_data_file__fd(session->file), event->auxtrace.size); process_event_auxtrace_stub()
287 return event->auxtrace.size; process_event_auxtrace_stub()
292 union perf_event *event __maybe_unused, process_event_auxtrace_error_stub()
351 static void swap_sample_id_all(union perf_event *event, void *data) swap_sample_id_all() argument
353 void *end = (void *) event + event->header.size; swap_sample_id_all()
360 static void perf_event__all64_swap(union perf_event *event, perf_event__all64_swap() argument
363 struct perf_event_header *hdr = &event->header; perf_event__all64_swap()
364 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); perf_event__all64_swap()
367 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all) perf_event__comm_swap() argument
369 event->comm.pid = bswap_32(event->comm.pid); perf_event__comm_swap()
370 event->comm.tid = bswap_32(event->comm.tid); perf_event__comm_swap()
373 void *data = &event->comm.comm; perf_event__comm_swap()
376 swap_sample_id_all(event, data); perf_event__comm_swap()
380 static void perf_event__mmap_swap(union perf_event *event, perf_event__mmap_swap() argument
383 event->mmap.pid = bswap_32(event->mmap.pid); perf_event__mmap_swap()
384 event->mmap.tid = bswap_32(event->mmap.tid); perf_event__mmap_swap()
385 event->mmap.start = bswap_64(event->mmap.start); perf_event__mmap_swap()
386 event->mmap.len = bswap_64(event->mmap.len); perf_event__mmap_swap()
387 event->mmap.pgoff = bswap_64(event->mmap.pgoff); perf_event__mmap_swap()
390 void *data = &event->mmap.filename; perf_event__mmap_swap()
393 swap_sample_id_all(event, data); perf_event__mmap_swap()
397 static void perf_event__mmap2_swap(union perf_event *event, perf_event__mmap2_swap() argument
400 event->mmap2.pid = bswap_32(event->mmap2.pid); perf_event__mmap2_swap()
401 event->mmap2.tid = bswap_32(event->mmap2.tid); perf_event__mmap2_swap()
402 event->mmap2.start = bswap_64(event->mmap2.start); perf_event__mmap2_swap()
403 event->mmap2.len = bswap_64(event->mmap2.len); perf_event__mmap2_swap()
404 event->mmap2.pgoff = bswap_64(event->mmap2.pgoff); perf_event__mmap2_swap()
405 event->mmap2.maj = bswap_32(event->mmap2.maj); perf_event__mmap2_swap()
406 event->mmap2.min = bswap_32(event->mmap2.min); perf_event__mmap2_swap()
407 event->mmap2.ino = bswap_64(event->mmap2.ino); perf_event__mmap2_swap()
410 void *data = &event->mmap2.filename; perf_event__mmap2_swap()
413 swap_sample_id_all(event, data); perf_event__mmap2_swap()
416 static void perf_event__task_swap(union perf_event *event, bool sample_id_all) perf_event__task_swap() argument
418 event->fork.pid = bswap_32(event->fork.pid); perf_event__task_swap()
419 event->fork.tid = bswap_32(event->fork.tid); perf_event__task_swap()
420 event->fork.ppid = bswap_32(event->fork.ppid); perf_event__task_swap()
421 event->fork.ptid = bswap_32(event->fork.ptid); perf_event__task_swap()
422 event->fork.time = bswap_64(event->fork.time); perf_event__task_swap()
425 swap_sample_id_all(event, &event->fork + 1); perf_event__task_swap()
428 static void perf_event__read_swap(union perf_event *event, bool sample_id_all) perf_event__read_swap() argument
430 event->read.pid = bswap_32(event->read.pid); perf_event__read_swap()
431 event->read.tid = bswap_32(event->read.tid); perf_event__read_swap()
432 event->read.value = bswap_64(event->read.value); perf_event__read_swap()
433 event->read.time_enabled = bswap_64(event->read.time_enabled); perf_event__read_swap()
434 event->read.time_running = bswap_64(event->read.time_running); perf_event__read_swap()
435 event->read.id = bswap_64(event->read.id); perf_event__read_swap()
438 swap_sample_id_all(event, &event->read + 1); perf_event__read_swap()
441 static void perf_event__aux_swap(union perf_event *event, bool sample_id_all) perf_event__aux_swap() argument
443 event->aux.aux_offset = bswap_64(event->aux.aux_offset); perf_event__aux_swap()
444 event->aux.aux_size = bswap_64(event->aux.aux_size); perf_event__aux_swap()
445 event->aux.flags = bswap_64(event->aux.flags); perf_event__aux_swap()
448 swap_sample_id_all(event, &event->aux + 1); perf_event__aux_swap()
451 static void perf_event__itrace_start_swap(union perf_event *event, perf_event__itrace_start_swap() argument
454 event->itrace_start.pid = bswap_32(event->itrace_start.pid); perf_event__itrace_start_swap()
455 event->itrace_start.tid = bswap_32(event->itrace_start.tid); perf_event__itrace_start_swap()
458 swap_sample_id_all(event, &event->itrace_start + 1); perf_event__itrace_start_swap()
461 static void perf_event__switch_swap(union perf_event *event, bool sample_id_all) perf_event__switch_swap() argument
463 if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) { perf_event__switch_swap()
464 event->context_switch.next_prev_pid = perf_event__switch_swap()
465 bswap_32(event->context_switch.next_prev_pid); perf_event__switch_swap()
466 event->context_switch.next_prev_tid = perf_event__switch_swap()
467 bswap_32(event->context_switch.next_prev_tid); perf_event__switch_swap()
471 swap_sample_id_all(event, &event->context_switch + 1); perf_event__switch_swap()
474 static void perf_event__throttle_swap(union perf_event *event, perf_event__throttle_swap() argument
477 event->throttle.time = bswap_64(event->throttle.time); perf_event__throttle_swap()
478 event->throttle.id = bswap_64(event->throttle.id); perf_event__throttle_swap()
479 event->throttle.stream_id = bswap_64(event->throttle.stream_id); perf_event__throttle_swap()
482 swap_sample_id_all(event, &event->throttle + 1); perf_event__throttle_swap()
560 static void perf_event__hdr_attr_swap(union perf_event *event, perf_event__hdr_attr_swap() argument
565 perf_event__attr_swap(&event->attr.attr); perf_event__hdr_attr_swap()
567 size = event->header.size; perf_event__hdr_attr_swap()
568 size -= (void *)&event->attr.id - (void *)event; perf_event__hdr_attr_swap()
569 mem_bswap_64(event->attr.id, size); perf_event__hdr_attr_swap()
572 static void perf_event__event_type_swap(union perf_event *event, perf_event__event_type_swap() argument
575 event->event_type.event_type.event_id = perf_event__event_type_swap()
576 bswap_64(event->event_type.event_type.event_id); perf_event__event_type_swap()
579 static void perf_event__tracing_data_swap(union perf_event *event, perf_event__tracing_data_swap() argument
582 event->tracing_data.size = bswap_32(event->tracing_data.size); perf_event__tracing_data_swap()
585 static void perf_event__auxtrace_info_swap(union perf_event *event, perf_event__auxtrace_info_swap() argument
590 event->auxtrace_info.type = bswap_32(event->auxtrace_info.type); perf_event__auxtrace_info_swap()
592 size = event->header.size; perf_event__auxtrace_info_swap()
593 size -= (void *)&event->auxtrace_info.priv - (void *)event; perf_event__auxtrace_info_swap()
594 mem_bswap_64(event->auxtrace_info.priv, size); perf_event__auxtrace_info_swap()
597 static void perf_event__auxtrace_swap(union perf_event *event, perf_event__auxtrace_swap() argument
600 event->auxtrace.size = bswap_64(event->auxtrace.size); perf_event__auxtrace_swap()
601 event->auxtrace.offset = bswap_64(event->auxtrace.offset); perf_event__auxtrace_swap()
602 event->auxtrace.reference = bswap_64(event->auxtrace.reference); perf_event__auxtrace_swap()
603 event->auxtrace.idx = bswap_32(event->auxtrace.idx); perf_event__auxtrace_swap()
604 event->auxtrace.tid = bswap_32(event->auxtrace.tid); perf_event__auxtrace_swap()
605 event->auxtrace.cpu = bswap_32(event->auxtrace.cpu); perf_event__auxtrace_swap()
608 static void perf_event__auxtrace_error_swap(union perf_event *event, perf_event__auxtrace_error_swap() argument
611 event->auxtrace_error.type = bswap_32(event->auxtrace_error.type); perf_event__auxtrace_error_swap()
612 event->auxtrace_error.code = bswap_32(event->auxtrace_error.code); perf_event__auxtrace_error_swap()
613 event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu); perf_event__auxtrace_error_swap()
614 event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid); perf_event__auxtrace_error_swap()
615 event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid); perf_event__auxtrace_error_swap()
616 event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip); perf_event__auxtrace_error_swap()
619 typedef void (*perf_event__swap_op)(union perf_event *event,
651 * event.
689 union perf_event *event __maybe_unused, process_finished_round()
697 int perf_session__queue_event(struct perf_session *s, union perf_event *event, perf_session__queue_event() argument
700 return ordered_events__queue(&s->ordered_events, event, sample, file_offset); perf_session__queue_event()
847 union perf_event *event, perf_evlist__print_tstamp()
852 if (event->header.type != PERF_RECORD_SAMPLE && perf_evlist__print_tstamp()
895 static void dump_event(struct perf_evlist *evlist, union perf_event *event, dump_event() argument
901 printf("\n%#" PRIx64 " [%#x]: event: %d\n", dump_event()
902 file_offset, event->header.size, event->header.type); dump_event()
904 trace_event(event); dump_event()
907 perf_evlist__print_tstamp(evlist, event, sample); dump_event()
910 event->header.size, perf_event__name(event->header.type)); dump_event()
913 static void dump_sample(struct perf_evsel *evsel, union perf_event *event, dump_sample() argument
922 event->header.misc, sample->pid, sample->tid, sample->ip, dump_sample()
956 union perf_event *event, machines__find_for_cpumode()
959 const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; machines__find_for_cpumode()
967 if (event->header.type == PERF_RECORD_MMAP machines__find_for_cpumode()
968 || event->header.type == PERF_RECORD_MMAP2) machines__find_for_cpumode()
969 pid = event->mmap.pid; machines__find_for_cpumode()
984 union perf_event *event, deliver_sample_value()
1002 return tool->sample(tool, event, sample, sid->evsel, machine); deliver_sample_value()
1007 union perf_event *event, deliver_sample_group()
1015 ret = deliver_sample_value(evlist, tool, event, sample, deliver_sample_group()
1028 union perf_event *event, perf_evlist__deliver_sample()
1039 return tool->sample(tool, event, sample, evsel, machine); perf_evlist__deliver_sample()
1043 return deliver_sample_group(evlist, tool, event, sample, perf_evlist__deliver_sample()
1046 return deliver_sample_value(evlist, tool, event, sample, perf_evlist__deliver_sample()
1052 union perf_event *event, machines__deliver_event()
1059 dump_event(evlist, event, file_offset, sample); machines__deliver_event()
1063 machine = machines__find_for_cpumode(machines, event, sample); machines__deliver_event()
1065 switch (event->header.type) { machines__deliver_event()
1071 dump_sample(evsel, event, sample); machines__deliver_event()
1076 return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine); machines__deliver_event()
1078 return tool->mmap(tool, event, sample, machine); machines__deliver_event()
1080 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT) machines__deliver_event()
1082 return tool->mmap2(tool, event, sample, machine); machines__deliver_event()
1084 return tool->comm(tool, event, sample, machine); machines__deliver_event()
1086 return tool->fork(tool, event, sample, machine); machines__deliver_event()
1088 return tool->exit(tool, event, sample, machine); machines__deliver_event()
1091 evlist->stats.total_lost += event->lost.lost; machines__deliver_event()
1092 return tool->lost(tool, event, sample, machine); machines__deliver_event()
1095 evlist->stats.total_lost_samples += event->lost_samples.lost; machines__deliver_event()
1096 return tool->lost_samples(tool, event, sample, machine); machines__deliver_event()
1098 return tool->read(tool, event, sample, evsel, machine); machines__deliver_event()
1100 return tool->throttle(tool, event, sample, machine); machines__deliver_event()
1102 return tool->unthrottle(tool, event, sample, machine); machines__deliver_event()
1105 (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)) machines__deliver_event()
1107 return tool->aux(tool, event, sample, machine); machines__deliver_event()
1109 return tool->itrace_start(tool, event, sample, machine); machines__deliver_event()
1112 return tool->context_switch(tool, event, sample, machine); machines__deliver_event()
1120 union perf_event *event, perf_session__deliver_event()
1127 ret = auxtrace__process_event(session, event, sample, tool); perf_session__deliver_event()
1134 event, sample, tool, file_offset); perf_session__deliver_event()
1138 union perf_event *event, perf_session__process_user_event()
1146 dump_event(session->evlist, event, file_offset, NULL); perf_session__process_user_event()
1149 switch (event->header.type) { perf_session__process_user_event()
1151 err = tool->attr(tool, event, &session->evlist); perf_session__process_user_event()
1166 return tool->tracing_data(tool, event, session); perf_session__process_user_event()
1168 return tool->build_id(tool, event, session); perf_session__process_user_event()
1170 return tool->finished_round(tool, event, oe); perf_session__process_user_event()
1172 return tool->id_index(tool, event, session); perf_session__process_user_event()
1174 return tool->auxtrace_info(tool, event, session); perf_session__process_user_event()
1177 lseek(fd, file_offset + event->header.size, SEEK_SET); perf_session__process_user_event()
1178 return tool->auxtrace(tool, event, session); perf_session__process_user_event()
1180 perf_session__auxtrace_error_inc(session, event); perf_session__process_user_event()
1181 return tool->auxtrace_error(tool, event, session); perf_session__process_user_event()
1188 union perf_event *event, perf_session__deliver_synth_event()
1194 events_stats__inc(&evlist->stats, event->header.type); perf_session__deliver_synth_event()
1196 if (event->header.type >= PERF_RECORD_USER_TYPE_START) perf_session__deliver_synth_event()
1197 return perf_session__process_user_event(session, event, 0); perf_session__deliver_synth_event()
1199 return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0); perf_session__deliver_synth_event()
1202 static void event_swap(union perf_event *event, bool sample_id_all) event_swap() argument
1206 swap = perf_event__swap_ops[event->header.type]; event_swap()
1208 swap(event, sample_id_all); event_swap()
1216 union perf_event *event; perf_session__peek_event() local
1221 event = file_offset - session->one_mmap_offset + perf_session__peek_event()
1239 event = (union perf_event *)buf; perf_session__peek_event()
1242 perf_event_header__bswap(&event->header); perf_session__peek_event()
1244 if (event->header.size < hdr_sz || event->header.size > buf_sz) perf_session__peek_event()
1247 rest = event->header.size - hdr_sz; perf_session__peek_event()
1253 event_swap(event, perf_evlist__sample_id_all(session->evlist)); perf_session__peek_event()
1257 if (sample && event->header.type < PERF_RECORD_USER_TYPE_START && perf_session__peek_event()
1258 perf_evlist__parse_sample(session->evlist, event, sample)) perf_session__peek_event()
1261 *event_ptr = event; perf_session__peek_event()
1267 union perf_event *event, u64 file_offset) perf_session__process_event()
1275 event_swap(event, perf_evlist__sample_id_all(evlist)); perf_session__process_event()
1277 if (event->header.type >= PERF_RECORD_HEADER_MAX) perf_session__process_event()
1280 events_stats__inc(&evlist->stats, event->header.type); perf_session__process_event()
1282 if (event->header.type >= PERF_RECORD_USER_TYPE_START) perf_session__process_event()
1283 return perf_session__process_user_event(session, event, file_offset); perf_session__process_event()
1288 ret = perf_evlist__parse_sample(evlist, event, &sample); perf_session__process_event()
1293 ret = perf_session__queue_event(session, event, &sample, file_offset); perf_session__process_event()
1298 return perf_session__deliver_event(session, event, &sample, tool, perf_session__process_event()
1424 union perf_event *event; __perf_session__process_pipe_events() local
1441 event = buf; __perf_session__process_pipe_events()
1442 err = readn(fd, event, sizeof(struct perf_event_header)); __perf_session__process_pipe_events()
1447 pr_err("failed to read event header\n"); __perf_session__process_pipe_events()
1452 perf_event_header__bswap(&event->header); __perf_session__process_pipe_events()
1454 size = event->header.size; __perf_session__process_pipe_events()
1456 pr_err("bad event header size\n"); __perf_session__process_pipe_events()
1463 pr_err("failed to allocate memory to read event\n"); __perf_session__process_pipe_events()
1468 event = buf; __perf_session__process_pipe_events()
1470 p = event; __perf_session__process_pipe_events()
1477 pr_err("unexpected end of event stream\n"); __perf_session__process_pipe_events()
1481 pr_err("failed to read event data\n"); __perf_session__process_pipe_events()
1486 if ((skip = perf_session__process_event(session, event, head)) < 0) { __perf_session__process_pipe_events()
1488 head, event->header.size, event->header.type); __perf_session__process_pipe_events()
1521 union perf_event *event; fetch_mmaped_event() local
1525 * the size of the event in the headers. fetch_mmaped_event()
1527 if (head + sizeof(event->header) > mmap_size) fetch_mmaped_event()
1530 event = (union perf_event *)(buf + head); fetch_mmaped_event()
1533 perf_event_header__bswap(&event->header); fetch_mmaped_event()
1535 if (head + event->header.size > mmap_size) { fetch_mmaped_event()
1536 /* We're not fetching the event so swap back again */ fetch_mmaped_event()
1538 perf_event_header__bswap(&event->header); fetch_mmaped_event()
1542 return event; fetch_mmaped_event()
1568 union perf_event *event; __perf_session__process_events() local
1618 event = fetch_mmaped_event(session, head, mmap_size, buf); __perf_session__process_events()
1619 if (!event) { __perf_session__process_events()
1631 size = event->header.size; __perf_session__process_events()
1634 (skip = perf_session__process_event(session, event, file_pos)) < 0) { __perf_session__process_events()
1636 file_offset + head, event->header.size, __perf_session__process_events()
1637 event->header.type); __perf_session__process_events()
1961 * Adding a handler for an event not in the session, __perf_session__set_tracepoints_handlers()
1980 union perf_event *event, perf_event__process_id_index()
1984 struct id_index_event *ie = &event->id_index; perf_event__process_id_index()
97 ordered_events__deliver_event(struct ordered_events *oe, struct ordered_event *event) ordered_events__deliver_event() argument
279 process_event_auxtrace_stub(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_session *session __maybe_unused) process_event_auxtrace_stub() argument
846 perf_evlist__print_tstamp(struct perf_evlist *evlist, union perf_event *event, struct perf_sample *sample) perf_evlist__print_tstamp() argument
955 machines__find_for_cpumode(struct machines *machines, union perf_event *event, struct perf_sample *sample) machines__find_for_cpumode() argument
982 deliver_sample_value(struct perf_evlist *evlist, struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct sample_read_value *v, struct machine *machine) deliver_sample_value() argument
1005 deliver_sample_group(struct perf_evlist *evlist, struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct machine *machine) deliver_sample_group() argument
1026 perf_evlist__deliver_sample(struct perf_evlist *evlist, struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine) perf_evlist__deliver_sample() argument
1050 machines__deliver_event(struct machines *machines, struct perf_evlist *evlist, union perf_event *event, struct perf_sample *sample, struct perf_tool *tool, u64 file_offset) machines__deliver_event() argument
1119 perf_session__deliver_event(struct perf_session *session, union perf_event *event, struct perf_sample *sample, struct perf_tool *tool, u64 file_offset) perf_session__deliver_event() argument
1137 perf_session__process_user_event(struct perf_session *session, union perf_event *event, u64 file_offset) perf_session__process_user_event() argument
1187 perf_session__deliver_synth_event(struct perf_session *session, union perf_event *event, struct perf_sample *sample) perf_session__deliver_synth_event() argument
1266 perf_session__process_event(struct perf_session *session, union perf_event *event, u64 file_offset) perf_session__process_event() argument
1979 perf_event__process_id_index(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_session *session) perf_event__process_id_index() argument
H A Dtool.h17 typedef int (*event_sample)(struct perf_tool *tool, union perf_event *event,
21 typedef int (*event_op)(struct perf_tool *tool, union perf_event *event,
25 union perf_event *event,
28 typedef int (*event_op2)(struct perf_tool *tool, union perf_event *event,
31 typedef int (*event_oe)(struct perf_tool *tool, union perf_event *event,
34 typedef s64 (*event_op3)(struct perf_tool *tool, union perf_event *event,
H A Dordered-events.c32 * last event might point to some random place in the list as it's queue_event()
33 * the last queued event. We expect that the new event is close to queue_event()
61 union perf_event *event) __dup_event()
66 new_event = memdup(event, event->header.size); __dup_event()
68 oe->cur_alloc_size += event->header.size; __dup_event()
75 union perf_event *event) dup_event()
77 return oe->copy_on_queue ? __dup_event(oe, event) : event; dup_event()
80 static void free_dup_event(struct ordered_events *oe, union perf_event *event) free_dup_event() argument
83 oe->cur_alloc_size -= event->header.size; free_dup_event()
84 free(event); free_dup_event()
90 union perf_event *event) alloc_event()
96 new_event = dup_event(oe, event); alloc_event()
129 new->event = new_event; alloc_event()
135 union perf_event *event) ordered_events__new_event()
139 new = alloc_event(oe, event); ordered_events__new_event()
148 void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event) ordered_events__delete() argument
150 list_move(&event->list, &oe->cache); ordered_events__delete()
152 free_dup_event(oe, event->event); ordered_events__delete()
155 int ordered_events__queue(struct ordered_events *oe, union perf_event *event, ordered_events__queue() argument
165 pr_oe_time(timestamp, "out of order event\n"); ordered_events__queue()
172 oevent = ordered_events__new_event(oe, timestamp, event); ordered_events__queue()
175 oevent = ordered_events__new_event(oe, timestamp, event); ordered_events__queue()
255 /* Warn if we are called before any event got allocated. */ ordered_events__flush()
303 struct ordered_event *event; ordered_events__free() local
305 event = list_entry(oe->to_free.next, struct ordered_event, list); ordered_events__free()
306 list_del(&event->list); ordered_events__free()
307 free_dup_event(oe, event->event); ordered_events__free()
308 free(event); ordered_events__free()
60 __dup_event(struct ordered_events *oe, union perf_event *event) __dup_event() argument
74 dup_event(struct ordered_events *oe, union perf_event *event) dup_event() argument
89 alloc_event(struct ordered_events *oe, union perf_event *event) alloc_event() argument
134 ordered_events__new_event(struct ordered_events *oe, u64 timestamp, union perf_event *event) ordered_events__new_event() argument
H A Dordered-events.h11 union perf_event *event; member in struct:ordered_event
25 struct ordered_event *event);
46 int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
48 void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event);
H A Dtrace-event-parse.c29 #include "trace-event.h"
35 struct event_format *event; get_common_field() local
42 event = pevent->events[0]; get_common_field()
43 field = pevent_find_common_field(event, type); get_common_field()
96 raw_field_value(struct event_format *event, const char *name, void *data) raw_field_value() argument
101 field = pevent_find_any_field(event, name); raw_field_value()
110 unsigned long long read_size(struct event_format *event, void *ptr, int size) read_size() argument
112 return pevent_read_number(event->pevent, ptr, size); read_size()
115 void event_format__fprintf(struct event_format *event, event_format__fprintf() argument
127 pevent_event_info(&s, event, &record); event_format__fprintf()
132 void event_format__print(struct event_format *event, event_format__print() argument
135 return event_format__fprintf(event, cpu, data, size, stdout); event_format__print()
175 struct event_format *event) trace_find_next_event()
182 if (!event) { trace_find_next_event()
187 if (idx < pevent->nr_events && event == pevent->events[idx]) { trace_find_next_event()
195 if (event == pevent->events[idx - 1]) trace_find_next_event()
174 trace_find_next_event(struct pevent *pevent, struct event_format *event) trace_find_next_event() argument
H A Dunwind-libdw.h5 #include "event.h"
H A Dtrace-event.h4 #include <traceevent/event-parse.h>
28 void event_format__fprintf(struct event_format *event,
31 void event_format__print(struct event_format *event,
39 raw_field_value(struct event_format *event, const char *name, void *data);
47 struct event_format *event);
48 unsigned long long read_size(struct event_format *event, void *ptr, int size);
74 void (*process_event) (union perf_event *event,
H A Dpython.c7 #include "event.h"
40 offsetof(struct pyrf_event, event) + offsetof(struct type, member), \
51 union perf_event event; member in struct:pyrf_event
55 sample_member_def(sample_ip, ip, T_ULONGLONG, "event type"), \
56 sample_member_def(sample_pid, pid, T_INT, "event pid"), \
57 sample_member_def(sample_tid, tid, T_INT, "event tid"), \
58 sample_member_def(sample_time, time, T_ULONGLONG, "event timestamp"), \
59 sample_member_def(sample_addr, addr, T_ULONGLONG, "event addr"), \
60 sample_member_def(sample_id, id, T_ULONGLONG, "event id"), \
61 sample_member_def(sample_stream_id, stream_id, T_ULONGLONG, "event stream id"), \
62 sample_member_def(sample_period, period, T_ULONGLONG, "event period"), \
63 sample_member_def(sample_cpu, cpu, T_UINT, "event cpu"),
65 static char pyrf_mmap_event__doc[] = PyDoc_STR("perf mmap event object.");
69 member_def(perf_event_header, type, T_UINT, "event type"),
70 member_def(perf_event_header, misc, T_UINT, "event misc"),
71 member_def(mmap_event, pid, T_UINT, "event pid"),
72 member_def(mmap_event, tid, T_UINT, "event tid"),
88 pevent->event.mmap.pid, pevent->event.mmap.tid, pyrf_mmap_event__repr()
89 pevent->event.mmap.start, pevent->event.mmap.len, pyrf_mmap_event__repr()
90 pevent->event.mmap.pgoff, pevent->event.mmap.filename) < 0) { pyrf_mmap_event__repr()
109 static char pyrf_task_event__doc[] = PyDoc_STR("perf task (fork/exit) event object.");
113 member_def(perf_event_header, type, T_UINT, "event type"),
114 member_def(fork_event, pid, T_UINT, "event pid"),
115 member_def(fork_event, ppid, T_UINT, "event ppid"),
116 member_def(fork_event, tid, T_UINT, "event tid"),
117 member_def(fork_event, ptid, T_UINT, "event ptid"),
126 pevent->event.header.type == PERF_RECORD_FORK ? "fork" : "exit", pyrf_task_event__repr()
127 pevent->event.fork.pid, pyrf_task_event__repr()
128 pevent->event.fork.ppid, pyrf_task_event__repr()
129 pevent->event.fork.tid, pyrf_task_event__repr()
130 pevent->event.fork.ptid, pyrf_task_event__repr()
131 pevent->event.fork.time); pyrf_task_event__repr()
144 static char pyrf_comm_event__doc[] = PyDoc_STR("perf comm event object.");
148 member_def(perf_event_header, type, T_UINT, "event type"),
149 member_def(comm_event, pid, T_UINT, "event pid"),
150 member_def(comm_event, tid, T_UINT, "event tid"),
158 pevent->event.comm.pid, pyrf_comm_event__repr()
159 pevent->event.comm.tid, pyrf_comm_event__repr()
160 pevent->event.comm.comm); pyrf_comm_event__repr()
173 static char pyrf_throttle_event__doc[] = PyDoc_STR("perf throttle event object.");
177 member_def(perf_event_header, type, T_UINT, "event type"),
179 member_def(throttle_event, id, T_ULONGLONG, "event id"),
180 member_def(throttle_event, stream_id, T_ULONGLONG, "event stream id"),
186 struct throttle_event *te = (struct throttle_event *)(&pevent->event.header + 1); pyrf_throttle_event__repr()
190 pevent->event.header.type == PERF_RECORD_THROTTLE ? "" : "un", pyrf_throttle_event__repr()
204 static char pyrf_lost_event__doc[] = PyDoc_STR("perf lost event object.");
208 member_def(lost_event, id, T_ULONGLONG, "event id"),
220 pevent->event.lost.id, pevent->event.lost.lost) < 0) { pyrf_lost_event__repr()
239 static char pyrf_read_event__doc[] = PyDoc_STR("perf read event object.");
243 member_def(read_event, pid, T_UINT, "event pid"),
244 member_def(read_event, tid, T_UINT, "event tid"),
251 pevent->event.read.pid, pyrf_read_event__repr()
252 pevent->event.read.tid); pyrf_read_event__repr()
269 static char pyrf_sample_event__doc[] = PyDoc_STR("perf sample event object.");
273 member_def(perf_event_header, type, T_UINT, "event type"),
301 static char pyrf_context_switch_event__doc[] = PyDoc_STR("perf context_switch event object.");
305 member_def(perf_event_header, type, T_UINT, "event type"),
317 pevent->event.context_switch.next_prev_pid, pyrf_context_switch_event__repr()
318 pevent->event.context_switch.next_prev_tid, pyrf_context_switch_event__repr()
319 !!(pevent->event.header.misc & PERF_RECORD_MISC_SWITCH_OUT)) < 0) { pyrf_context_switch_event__repr()
391 static PyObject *pyrf_event__new(union perf_event *event) pyrf_event__new() argument
396 if ((event->header.type < PERF_RECORD_MMAP || pyrf_event__new()
397 event->header.type > PERF_RECORD_SAMPLE) && pyrf_event__new()
398 !(event->header.type == PERF_RECORD_SWITCH || pyrf_event__new()
399 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)) pyrf_event__new()
402 ptype = pyrf_event__type[event->header.type]; pyrf_event__new()
405 memcpy(&pevent->event, event, event->header.size); pyrf_event__new()
706 .ml_doc = PyDoc_STR("open the event selector file descriptor table.")
711 static char pyrf_evsel__doc[] = PyDoc_STR("perf event selector list object.");
853 union perf_event *event; pyrf_evlist__read_on_cpu() local
862 event = perf_evlist__mmap_read(evlist, cpu); pyrf_evlist__read_on_cpu()
863 if (event != NULL) { pyrf_evlist__read_on_cpu()
864 PyObject *pyevent = pyrf_event__new(event); pyrf_evlist__read_on_cpu()
872 err = perf_evlist__parse_sample(evlist, event, &pevent->sample); pyrf_evlist__read_on_cpu()
934 .ml_doc = PyDoc_STR("adds an event selector to the list.")
940 .ml_doc = PyDoc_STR("reads an event.")
973 static char pyrf_evlist__doc[] = PyDoc_STR("perf event selector list object.");
H A Devent.h239 * when possible sends this number in a PERF_RECORD_LOST event. The number of
245 * PERF_RECORD_LOST_SAMPLES event. The number of lost-samples events is stored
389 union perf_event *event,
411 union perf_event *event,
415 union perf_event *event,
419 union perf_event *event,
423 union perf_event *event,
427 union perf_event *event,
431 union perf_event *event,
435 union perf_event *event,
439 union perf_event *event,
443 union perf_event *event,
447 union perf_event *event,
451 union perf_event *event,
457 int perf_event__preprocess_sample(const union perf_event *event,
468 void perf_event__preprocess_sample_addr(union perf_event *event,
477 int perf_event__synthesize_sample(union perf_event *event, u64 type,
483 union perf_event *event, pid_t pid,
488 union perf_event *event,
495 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp);
496 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp);
497 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp);
498 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp);
499 size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp);
500 size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp);
501 size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp);
502 size_t perf_event__fprintf(union perf_event *event, FILE *fp);
H A Dtrace-event.c11 #include <traceevent/event-parse.h>
13 #include "trace-event.h"
77 struct event_format *event = NULL; tp_format() local
90 pevent_parse_format(pevent, &event, data, size, sys); tp_format()
93 return event; tp_format()
H A Dmachine.c3 #include "event.h"
464 int machine__process_comm_event(struct machine *machine, union perf_event *event, machine__process_comm_event() argument
468 event->comm.pid, machine__process_comm_event()
469 event->comm.tid); machine__process_comm_event()
470 bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC; machine__process_comm_event()
477 perf_event__fprintf_comm(event, stdout); machine__process_comm_event()
480 __thread__set_comm(thread, event->comm.comm, sample->time, exec)) { machine__process_comm_event()
481 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); machine__process_comm_event()
491 union perf_event *event, struct perf_sample *sample __maybe_unused) machine__process_lost_event()
494 event->lost.id, event->lost.lost); machine__process_lost_event()
499 union perf_event *event, struct perf_sample *sample) machine__process_lost_samples_event()
502 sample->id, event->lost_samples.lost); machine__process_lost_samples_event()
540 union perf_event *event) machine__process_aux_event()
543 perf_event__fprintf_aux(event, stdout); machine__process_aux_event()
548 union perf_event *event) machine__process_itrace_start_event()
551 perf_event__fprintf_itrace_start(event, stdout); machine__process_itrace_start_event()
556 union perf_event *event) machine__process_switch_event()
559 perf_event__fprintf_switch(event, stdout); machine__process_switch_event()
1118 union perf_event *event) machine__set_kernel_mmap_len()
1123 machine->vmlinux_maps[i]->start = event->mmap.start; machine__set_kernel_mmap_len()
1124 machine->vmlinux_maps[i]->end = (event->mmap.start + machine__set_kernel_mmap_len()
1125 event->mmap.len); machine__set_kernel_mmap_len()
1128 * a zero sized synthesized MMAP event for the kernel. machine__set_kernel_mmap_len()
1148 union perf_event *event) machine__process_kernel_mmap_event()
1165 is_kernel_mmap = memcmp(event->mmap.filename, machine__process_kernel_mmap_event()
1168 if (event->mmap.filename[0] == '/' || machine__process_kernel_mmap_event()
1169 (!is_kernel_mmap && event->mmap.filename[0] == '[')) { machine__process_kernel_mmap_event()
1170 map = machine__findnew_module_map(machine, event->mmap.start, machine__process_kernel_mmap_event()
1171 event->mmap.filename); machine__process_kernel_mmap_event()
1175 map->end = map->start + event->mmap.len; machine__process_kernel_mmap_event()
1177 const char *symbol_name = (event->mmap.filename + machine__process_kernel_mmap_event()
1192 * cpumode of *this* event. If we insist on passing machine__process_kernel_mmap_event()
1232 machine__set_kernel_mmap_len(machine, event); machine__process_kernel_mmap_event()
1239 if (event->mmap.pgoff != 0) { machine__process_kernel_mmap_event()
1242 event->mmap.pgoff); machine__process_kernel_mmap_event()
1258 union perf_event *event, machine__process_mmap2_event()
1261 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; machine__process_mmap2_event()
1268 perf_event__fprintf_mmap2(event, stdout); machine__process_mmap2_event()
1272 ret = machine__process_kernel_mmap_event(machine, event); machine__process_mmap2_event()
1278 thread = machine__findnew_thread(machine, event->mmap2.pid, machine__process_mmap2_event()
1279 event->mmap2.tid); machine__process_mmap2_event()
1283 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) machine__process_mmap2_event()
1288 map = map__new(machine, event->mmap2.start, machine__process_mmap2_event()
1289 event->mmap2.len, event->mmap2.pgoff, machine__process_mmap2_event()
1290 event->mmap2.pid, event->mmap2.maj, machine__process_mmap2_event()
1291 event->mmap2.min, event->mmap2.ino, machine__process_mmap2_event()
1292 event->mmap2.ino_generation, machine__process_mmap2_event()
1293 event->mmap2.prot, machine__process_mmap2_event()
1294 event->mmap2.flags, machine__process_mmap2_event()
1295 event->mmap2.filename, type, thread); machine__process_mmap2_event()
1308 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n"); machine__process_mmap2_event()
1312 int machine__process_mmap_event(struct machine *machine, union perf_event *event, machine__process_mmap_event() argument
1315 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; machine__process_mmap_event()
1322 perf_event__fprintf_mmap(event, stdout); machine__process_mmap_event()
1326 ret = machine__process_kernel_mmap_event(machine, event); machine__process_mmap_event()
1332 thread = machine__findnew_thread(machine, event->mmap.pid, machine__process_mmap_event()
1333 event->mmap.tid); machine__process_mmap_event()
1337 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) machine__process_mmap_event()
1342 map = map__new(machine, event->mmap.start, machine__process_mmap_event()
1343 event->mmap.len, event->mmap.pgoff, machine__process_mmap_event()
1344 event->mmap.pid, 0, 0, 0, 0, 0, 0, machine__process_mmap_event()
1345 event->mmap.filename, machine__process_mmap_event()
1359 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); machine__process_mmap_event()
1389 int machine__process_fork_event(struct machine *machine, union perf_event *event, machine__process_fork_event() argument
1393 event->fork.pid, machine__process_fork_event()
1394 event->fork.tid); machine__process_fork_event()
1396 event->fork.ppid, machine__process_fork_event()
1397 event->fork.ptid); machine__process_fork_event()
1401 perf_event__fprintf_task(event, stdout); machine__process_fork_event()
1406 * (fork) event that would have removed the thread was lost. Assume the machine__process_fork_event()
1409 if (parent->pid_ != (pid_t)event->fork.ppid) { machine__process_fork_event()
1414 parent = machine__findnew_thread(machine, event->fork.ppid, machine__process_fork_event()
1415 event->fork.ptid); machine__process_fork_event()
1424 thread = machine__findnew_thread(machine, event->fork.pid, machine__process_fork_event()
1425 event->fork.tid); machine__process_fork_event()
1429 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n"); machine__process_fork_event()
1438 int machine__process_exit_event(struct machine *machine, union perf_event *event, machine__process_exit_event() argument
1442 event->fork.pid, machine__process_exit_event()
1443 event->fork.tid); machine__process_exit_event()
1446 perf_event__fprintf_task(event, stdout); machine__process_exit_event()
1456 int machine__process_event(struct machine *machine, union perf_event *event, machine__process_event() argument
1461 switch (event->header.type) { machine__process_event()
1463 ret = machine__process_comm_event(machine, event, sample); break; machine__process_event()
1465 ret = machine__process_mmap_event(machine, event, sample); break; machine__process_event()
1467 ret = machine__process_mmap2_event(machine, event, sample); break; machine__process_event()
1469 ret = machine__process_fork_event(machine, event, sample); break; machine__process_event()
1471 ret = machine__process_exit_event(machine, event, sample); break; machine__process_event()
1473 ret = machine__process_lost_event(machine, event, sample); break; machine__process_event()
1475 ret = machine__process_aux_event(machine, event); break; machine__process_event()
1477 ret = machine__process_itrace_start_event(machine, event); break; machine__process_event()
1479 ret = machine__process_lost_samples_event(machine, event, sample); break; machine__process_event()
1482 ret = machine__process_switch_event(machine, event); break; machine__process_event()
490 machine__process_lost_event(struct machine *machine __maybe_unused, union perf_event *event, struct perf_sample *sample __maybe_unused) machine__process_lost_event() argument
498 machine__process_lost_samples_event(struct machine *machine __maybe_unused, union perf_event *event, struct perf_sample *sample) machine__process_lost_samples_event() argument
539 machine__process_aux_event(struct machine *machine __maybe_unused, union perf_event *event) machine__process_aux_event() argument
547 machine__process_itrace_start_event(struct machine *machine __maybe_unused, union perf_event *event) machine__process_itrace_start_event() argument
555 machine__process_switch_event(struct machine *machine __maybe_unused, union perf_event *event) machine__process_switch_event() argument
1117 machine__set_kernel_mmap_len(struct machine *machine, union perf_event *event) machine__set_kernel_mmap_len() argument
1147 machine__process_kernel_mmap_event(struct machine *machine, union perf_event *event) machine__process_kernel_mmap_event() argument
1257 machine__process_mmap2_event(struct machine *machine, union perf_event *event, struct perf_sample *sample __maybe_unused) machine__process_mmap2_event() argument
/linux-4.4.14/drivers/net/wireless/brcm80211/brcmfmac/
H A Dfweh.c60 * struct brcmf_event - contents of broadcom event packet.
64 * @msg: common part of the actual event message.
73 * struct brcmf_fweh_queue_item - event item on event queue.
76 * @code: event code.
77 * @ifidx: interface index related to this event.
79 * @emsg: common parameters of the firmware event message.
80 * @data: event specific data part of the firmware event.
103 /* array for mapping code to event name */
110 * brcmf_fweh_event_name() - returns name for given event code.
131 * brcmf_fweh_queue_event() - create and queue event.
133 * @fweh: firmware event handling info.
134 * @event: event queue entry.
137 struct brcmf_fweh_queue_item *event) brcmf_fweh_queue_event()
142 list_add_tail(&event->q, &fweh->event_q); brcmf_fweh_queue_event()
158 /* handle the event if valid interface and handler */ brcmf_fweh_call_event_handler()
162 brcmf_err("unhandled event %d ignored\n", code); brcmf_fweh_call_event_handler()
170 * brcmf_fweh_handle_if_event() - handle IF event.
189 /* The P2P Device interface event must not be ignored contrary to what brcmf_fweh_handle_if_event()
199 brcmf_dbg(EVENT, "event can be ignored\n"); brcmf_fweh_handle_if_event()
233 * brcmf_fweh_dequeue_event() - get event from the queue.
235 * @fweh: firmware event handling info.
240 struct brcmf_fweh_queue_item *event = NULL; brcmf_fweh_dequeue_event() local
245 event = list_first_entry(&fweh->event_q, brcmf_fweh_dequeue_event()
247 list_del(&event->q); brcmf_fweh_dequeue_event()
251 return event; brcmf_fweh_dequeue_event()
255 * brcmf_fweh_event_worker() - firmware event worker.
264 struct brcmf_fweh_queue_item *event; brcmf_fweh_event_worker() local
272 while ((event = brcmf_fweh_dequeue_event(fweh))) { brcmf_fweh_event_worker()
273 brcmf_dbg(EVENT, "event %s (%u) ifidx %u bsscfg %u addr %pM\n", brcmf_fweh_event_worker()
274 brcmf_fweh_event_name(event->code), event->code, brcmf_fweh_event_worker()
275 event->emsg.ifidx, event->emsg.bsscfgidx, brcmf_fweh_event_worker()
276 event->emsg.addr); brcmf_fweh_event_worker()
278 /* convert event message */ brcmf_fweh_event_worker()
279 emsg_be = &event->emsg; brcmf_fweh_event_worker()
282 emsg.event_code = event->code; brcmf_fweh_event_worker()
294 brcmf_dbg_hex_dump(BRCMF_EVENT_ON(), event->data, brcmf_fweh_event_worker()
296 "event payload, len=%d\n", emsg.datalen); brcmf_fweh_event_worker()
298 /* special handling of interface event */ brcmf_fweh_event_worker()
299 if (event->code == BRCMF_E_IF) { brcmf_fweh_event_worker()
300 brcmf_fweh_handle_if_event(drvr, &emsg, event->data); brcmf_fweh_event_worker()
304 if (event->code == BRCMF_E_TDLS_PEER_EVENT) brcmf_fweh_event_worker()
308 err = brcmf_fweh_call_event_handler(ifp, event->code, &emsg, brcmf_fweh_event_worker()
309 event->data); brcmf_fweh_event_worker()
311 brcmf_err("event handler failed (%d)\n", brcmf_fweh_event_worker()
312 event->code); brcmf_fweh_event_worker()
316 kfree(event); brcmf_fweh_event_worker()
332 * brcmf_fweh_attach() - initialize firmware event handling.
345 * brcmf_fweh_detach() - cleanup firmware event handling.
369 * brcmf_fweh_register() - register handler for given event code.
372 * @code: event code.
373 * @handler: handler for the given event code.
379 brcmf_err("event code %d already registered\n", code); brcmf_fweh_register()
383 brcmf_dbg(TRACE, "event handler registered for %s\n", brcmf_fweh_register()
392 * @code: event code.
397 brcmf_dbg(TRACE, "event handler cleared for %s\n", brcmf_fweh_unregister()
414 brcmf_dbg(EVENT, "enable event %s\n", brcmf_fweh_activate_events()
420 /* want to handle IF event as well */ brcmf_fweh_activate_events()
421 brcmf_dbg(EVENT, "enable event IF\n"); brcmf_fweh_activate_events()
433 * brcmf_fweh_process_event() - process skb as firmware event.
436 * @event_packet: event packet to process.
438 * If the packet buffer contains a firmware event message it will
439 * dispatch the event to a registered handler (using worker).
446 struct brcmf_fweh_queue_item *event; brcmf_fweh_process_event() local
451 /* get event info */ brcmf_fweh_process_event()
465 event = kzalloc(sizeof(*event) + datalen, alloc_flag); brcmf_fweh_process_event()
466 if (!event) brcmf_fweh_process_event()
469 event->code = code; brcmf_fweh_process_event()
470 event->ifidx = event_packet->msg.ifidx; brcmf_fweh_process_event()
472 /* use memcpy to get aligned event message */ brcmf_fweh_process_event()
473 memcpy(&event->emsg, &event_packet->msg, sizeof(event->emsg)); brcmf_fweh_process_event()
474 memcpy(event->data, data, datalen); brcmf_fweh_process_event()
475 memcpy(event->ifaddr, event_packet->eth.h_dest, ETH_ALEN); brcmf_fweh_process_event()
477 brcmf_fweh_queue_event(fweh, event); brcmf_fweh_process_event()
136 brcmf_fweh_queue_event(struct brcmf_fweh_info *fweh, struct brcmf_fweh_queue_item *event) brcmf_fweh_queue_event() argument
/linux-4.4.14/fs/notify/fanotify/
H A Dfanotify.c31 static int fanotify_merge(struct list_head *list, struct fsnotify_event *event) fanotify_merge() argument
36 pr_debug("%s: list=%p event=%p\n", __func__, list, event); fanotify_merge()
40 * Don't merge a permission event with any other event so that we know fanotify_merge()
41 * the event structure we have created in fanotify_handle_event() is the fanotify_merge()
44 if (event->mask & FAN_ALL_PERM_EVENTS) fanotify_merge()
49 if (should_merge(test_event, event)) { list_for_each_entry_reverse()
58 test_event->mask |= event->mask;
64 struct fanotify_perm_event_info *event) fanotify_get_response()
68 pr_debug("%s: group=%p event=%p\n", __func__, group, event); fanotify_get_response()
70 wait_event(group->fanotify_data.access_waitq, event->response || fanotify_get_response()
73 if (!event->response) { /* bypass_perm set */ fanotify_get_response()
76 * it from group's event list because we are responsible for fanotify_get_response()
77 * freeing the permission event. fanotify_get_response()
79 fsnotify_remove_event(group, &event->fae.fse); fanotify_get_response()
84 switch (event->response) { fanotify_get_response()
92 event->response = 0; fanotify_get_response()
94 pr_debug("%s: group=%p event=%p about to return ret=%d\n", __func__, fanotify_get_response()
95 group, event, ret); fanotify_get_response()
113 /* if we don't have enough info to send an event to userspace say no */ fanotify_should_send_event()
127 * if the event is for a child and this inode doesn't care about fanotify_should_send_event()
156 struct fanotify_event_info *event; fanotify_alloc_event() local
166 event = &pevent->fae; fanotify_alloc_event()
171 event = kmem_cache_alloc(fanotify_event_cachep, GFP_KERNEL); fanotify_alloc_event()
172 if (!event) fanotify_alloc_event()
175 fsnotify_init_event(&event->fse, inode, mask); fanotify_alloc_event()
176 event->tgid = get_pid(task_tgid(current)); fanotify_alloc_event()
178 event->path = *path; fanotify_alloc_event()
179 path_get(&event->path); fanotify_alloc_event()
181 event->path.mnt = NULL; fanotify_alloc_event()
182 event->path.dentry = NULL; fanotify_alloc_event()
184 return event; fanotify_alloc_event()
195 struct fanotify_event_info *event; fanotify_handle_event() local
216 event = fanotify_alloc_event(inode, mask, data); fanotify_handle_event()
217 if (unlikely(!event)) fanotify_handle_event()
220 fsn_event = &event->fse; fanotify_handle_event()
225 /* Our event wasn't used in the end. Free it. */ fanotify_handle_event()
251 struct fanotify_event_info *event; fanotify_free_event() local
253 event = FANOTIFY_E(fsn_event); fanotify_free_event()
254 path_put(&event->path); fanotify_free_event()
255 put_pid(event->tgid); fanotify_free_event()
263 kmem_cache_free(fanotify_event_cachep, event); fanotify_free_event()
63 fanotify_get_response(struct fsnotify_group *group, struct fanotify_perm_event_info *event) fanotify_get_response() argument
/linux-4.4.14/arch/sh/kernel/
H A Dperf_event.c2 * Performance event support framework for SuperH hardware counters.
84 static void hw_perf_event_destroy(struct perf_event *event) hw_perf_event_destroy() argument
121 static int __hw_perf_event_init(struct perf_event *event) __hw_perf_event_init() argument
123 struct perf_event_attr *attr = &event->attr; __hw_perf_event_init()
124 struct hw_perf_event *hwc = &event->hw; __hw_perf_event_init()
152 event->destroy = hw_perf_event_destroy; __hw_perf_event_init()
179 static void sh_perf_event_update(struct perf_event *event, sh_perf_event_update() argument
217 local64_add(delta, &event->count); sh_perf_event_update()
220 static void sh_pmu_stop(struct perf_event *event, int flags) sh_pmu_stop() argument
223 struct hw_perf_event *hwc = &event->hw; sh_pmu_stop()
226 if (!(event->hw.state & PERF_HES_STOPPED)) { sh_pmu_stop()
229 event->hw.state |= PERF_HES_STOPPED; sh_pmu_stop()
232 if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) { sh_pmu_stop()
233 sh_perf_event_update(event, &event->hw, idx); sh_pmu_stop()
234 event->hw.state |= PERF_HES_UPTODATE; sh_pmu_stop()
238 static void sh_pmu_start(struct perf_event *event, int flags) sh_pmu_start() argument
241 struct hw_perf_event *hwc = &event->hw; sh_pmu_start()
248 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); sh_pmu_start()
250 cpuc->events[idx] = event; sh_pmu_start()
251 event->hw.state = 0; sh_pmu_start()
255 static void sh_pmu_del(struct perf_event *event, int flags) sh_pmu_del() argument
259 sh_pmu_stop(event, PERF_EF_UPDATE); sh_pmu_del()
260 __clear_bit(event->hw.idx, cpuc->used_mask); sh_pmu_del()
262 perf_event_update_userpage(event); sh_pmu_del()
265 static int sh_pmu_add(struct perf_event *event, int flags) sh_pmu_add() argument
268 struct hw_perf_event *hwc = &event->hw; sh_pmu_add()
272 perf_pmu_disable(event->pmu); sh_pmu_add()
285 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; sh_pmu_add()
287 sh_pmu_start(event, PERF_EF_RELOAD); sh_pmu_add()
289 perf_event_update_userpage(event); sh_pmu_add()
292 perf_pmu_enable(event->pmu); sh_pmu_add()
296 static void sh_pmu_read(struct perf_event *event) sh_pmu_read() argument
298 sh_perf_event_update(event, &event->hw, event->hw.idx); sh_pmu_read()
301 static int sh_pmu_event_init(struct perf_event *event) sh_pmu_event_init() argument
306 if (has_branch_stack(event)) sh_pmu_event_init()
309 switch (event->attr.type) { sh_pmu_event_init()
313 err = __hw_perf_event_init(event); sh_pmu_event_init()
321 if (event->destroy) sh_pmu_event_init()
322 event->destroy(event); sh_pmu_event_init()
/linux-4.4.14/drivers/usb/usbip/
H A Dusbip_event.c33 usbip_dbg_eh("pending event %lx\n", ud->event); event_handler()
39 if (ud->event & USBIP_EH_SHUTDOWN) { event_handler()
41 ud->event &= ~USBIP_EH_SHUTDOWN; event_handler()
45 if (ud->event & USBIP_EH_RESET) { event_handler()
47 ud->event &= ~USBIP_EH_RESET; event_handler()
51 if (ud->event & USBIP_EH_UNUSABLE) { event_handler()
53 ud->event &= ~USBIP_EH_UNUSABLE; event_handler()
57 if (ud->event & USBIP_EH_BYE) event_handler()
84 ud->event = 0; usbip_start_eh()
106 void usbip_event_add(struct usbip_device *ud, unsigned long event) usbip_event_add() argument
111 ud->event |= event; usbip_event_add()
122 if (ud->event != 0) usbip_event_happened()
/linux-4.4.14/drivers/input/
H A Dinput-compat.c18 struct input_event *event) input_event_from_user()
27 event->time.tv_sec = compat_event.time.tv_sec; input_event_from_user()
28 event->time.tv_usec = compat_event.time.tv_usec; input_event_from_user()
29 event->type = compat_event.type; input_event_from_user()
30 event->code = compat_event.code; input_event_from_user()
31 event->value = compat_event.value; input_event_from_user()
34 if (copy_from_user(event, buffer, sizeof(struct input_event))) input_event_from_user()
42 const struct input_event *event) input_event_to_user()
47 compat_event.time.tv_sec = event->time.tv_sec; input_event_to_user()
48 compat_event.time.tv_usec = event->time.tv_usec; input_event_to_user()
49 compat_event.type = event->type; input_event_to_user()
50 compat_event.code = event->code; input_event_to_user()
51 compat_event.value = event->value; input_event_to_user()
58 if (copy_to_user(buffer, event, sizeof(struct input_event))) input_event_to_user()
103 struct input_event *event) input_event_from_user()
105 if (copy_from_user(event, buffer, sizeof(struct input_event))) input_event_from_user()
112 const struct input_event *event) input_event_to_user()
114 if (copy_to_user(buffer, event, sizeof(struct input_event))) input_event_to_user()
17 input_event_from_user(const char __user *buffer, struct input_event *event) input_event_from_user() argument
41 input_event_to_user(char __user *buffer, const struct input_event *event) input_event_to_user() argument
102 input_event_from_user(const char __user *buffer, struct input_event *event) input_event_from_user() argument
111 input_event_to_user(char __user *buffer, const struct input_event *event) input_event_to_user() argument
/linux-4.4.14/drivers/acpi/
H A Devent.c2 * event.c - exporting ACPI events via procfs
22 ACPI_MODULE_NAME("event");
29 struct acpi_bus_event event; acpi_notifier_call_chain() local
31 strcpy(event.device_class, dev->pnp.device_class); acpi_notifier_call_chain()
32 strcpy(event.bus_id, dev->pnp.bus_id); acpi_notifier_call_chain()
33 event.type = type; acpi_notifier_call_chain()
34 event.data = data; acpi_notifier_call_chain()
35 return (blocking_notifier_call_chain(&acpi_chain_head, 0, (void *)&event) acpi_notifier_call_chain()
64 ACPI_GENL_ATTR_EVENT, /* ACPI event info needed by user space */
100 struct acpi_genl_event *event; acpi_bus_generate_netlink_event() local
130 event = nla_data(attr); acpi_bus_generate_netlink_event()
131 memset(event, 0, sizeof(struct acpi_genl_event)); acpi_bus_generate_netlink_event()
133 strcpy(event->device_class, device_class); acpi_bus_generate_netlink_event()
134 strcpy(event->bus_id, bus_id); acpi_bus_generate_netlink_event()
135 event->type = type; acpi_bus_generate_netlink_event()
136 event->data = data; acpi_bus_generate_netlink_event()
175 /* create genetlink for acpi event */ acpi_event_init()
179 "Failed to create genetlink family for ACPI event\n"); acpi_event_init()
/linux-4.4.14/tools/perf/tests/
H A Dswitch-tracking.c63 union perf_event *event, const char *comm, int nr) check_comm()
65 if (event->header.type == PERF_RECORD_COMM && check_comm()
66 (pid_t)event->comm.pid == getpid() && check_comm()
67 (pid_t)event->comm.tid == getpid() && check_comm()
68 strcmp(event->comm.comm, comm) == 0) { check_comm()
70 pr_debug("Duplicate comm event\n"); check_comm()
74 pr_debug3("comm event: %s nr: %d\n", event->comm.comm, nr); check_comm()
114 union perf_event *event, process_sample_event()
122 if (perf_evlist__parse_sample(evlist, event, &sample)) { process_sample_event()
150 pr_debug3("cycles event\n"); process_sample_event()
163 static int process_event(struct perf_evlist *evlist, union perf_event *event, process_event() argument
166 if (event->header.type == PERF_RECORD_SAMPLE) process_event()
167 return process_sample_event(evlist, event, switch_tracking); process_event()
169 if (event->header.type == PERF_RECORD_COMM) { process_event()
172 err = check_comm(switch_tracking, event, "Test COMM 1", 0); process_event()
176 err = check_comm(switch_tracking, event, "Test COMM 2", 1); process_event()
180 err = check_comm(switch_tracking, event, "Test COMM 3", 2); process_event()
184 err = check_comm(switch_tracking, event, "Test COMM 4", 3); process_event()
189 pr_debug("Unexpected comm event\n"); process_event()
199 union perf_event *event; member in struct:event_node
204 union perf_event *event) add_event()
214 node->event = event; add_event()
217 if (perf_evlist__parse_sample(evlist, event, &sample)) { add_event()
223 pr_debug("event with no time\n"); add_event()
255 union perf_event *event; process_events() local
262 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { process_events()
264 ret = add_event(evlist, &events, event); process_events()
285 ret = process_event(evlist, events_array[pos].event, process_events()
349 /* First event */ test__switch_tracking()
352 pr_debug("Failed to parse event dummy:u\n"); test__switch_tracking()
358 /* Second event */ test__switch_tracking()
361 pr_debug("Failed to parse event cycles:u\n"); test__switch_tracking()
367 /* Third event */ test__switch_tracking()
376 pr_debug("Failed to parse event %s\n", sched_switch); test__switch_tracking()
389 /* Test moving an event to the front */ test__switch_tracking()
391 pr_debug("cycles event already at front"); test__switch_tracking()
396 pr_debug("Failed to move cycles event to front"); test__switch_tracking()
403 /* Fourth event */ test__switch_tracking()
406 pr_debug("Failed to parse event dummy:u\n"); test__switch_tracking()
422 /* Check moved event is still at the front */ test__switch_tracking()
424 pr_debug("Front event no longer at front"); test__switch_tracking()
428 /* Check tracking event is tracking */ test__switch_tracking()
430 pr_debug("Tracking event not tracking\n"); test__switch_tracking()
438 pr_debug("Non-tracking event is tracking\n"); evlist__for_each()
541 /* Check cycles event got enabled */
547 /* Check cycles event got disabled */
549 pr_debug("cycles events even though event was disabled\n");
553 /* Check cycles event got enabled again */
62 check_comm(struct switch_tracking *switch_tracking, union perf_event *event, const char *comm, int nr) check_comm() argument
113 process_sample_event(struct perf_evlist *evlist, union perf_event *event, struct switch_tracking *switch_tracking) process_sample_event() argument
203 add_event(struct perf_evlist *evlist, struct list_head *events, union perf_event *event) add_event() argument
H A Dkeep-tracking.c28 union perf_event *event; find_comm() local
33 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { find_comm()
34 if (event->header.type == PERF_RECORD_COMM && find_comm()
35 (pid_t)event->comm.pid == getpid() && find_comm()
36 (pid_t)event->comm.tid == getpid() && find_comm()
37 strcmp(event->comm.comm, comm) == 0) find_comm()
46 * test__keep_tracking - test using a dummy software event to keep tracking.
49 * when an event is disabled but a dummy software event is not disabled. If the
93 pr_debug("Unable to open dummy and cycles event\n"); test__keep_tracking()
101 * First, test that a 'comm' event can be found when the event is test__keep_tracking()
114 pr_debug("First time, failed to find tracking event.\n"); test__keep_tracking()
119 * Secondly, test that a 'comm' event can be found when the event is test__keep_tracking()
120 * disabled with the dummy event still enabled. test__keep_tracking()
136 pr_debug("Seconf time, failed to find tracking event.\n"); test__keep_tracking()
H A Dparse-no-sample-id-all.c6 #include "event.h"
12 static int process_event(struct perf_evlist **pevlist, union perf_event *event) process_event() argument
16 if (event->header.type == PERF_RECORD_HEADER_ATTR) { process_event()
17 if (perf_event__process_attr(NULL, event, pevlist)) { process_event()
24 if (event->header.type >= PERF_RECORD_USER_TYPE_START) process_event()
30 if (perf_evlist__parse_sample(*pevlist, event, &sample)) { process_event()
65 * more than one selected event, so this test processes three events: 2
66 * attributes representing the selected events and one mmap event.
H A Dopenat-syscall-tp-fields.c68 * Generate the event: test__syscall_openat_tp_fields()
76 union perf_event *event; test__syscall_openat_tp_fields() local
78 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { test__syscall_openat_tp_fields()
79 const u32 type = event->header.type; test__syscall_openat_tp_fields()
90 err = perf_evsel__parse_sample(evsel, event, &sample); test__syscall_openat_tp_fields()
/linux-4.4.14/net/sctp/
H A Dulpevent.c9 * These functions manipulate an sctp event. The struct ulpevent is used
46 static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event,
48 static void sctp_ulpevent_release_data(struct sctp_ulpevent *event);
49 static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event);
52 /* Initialize an ULP event from an given skb. */ sctp_ulpevent_init()
53 static void sctp_ulpevent_init(struct sctp_ulpevent *event, sctp_ulpevent_init() argument
57 memset(event, 0, sizeof(struct sctp_ulpevent)); sctp_ulpevent_init()
58 event->msg_flags = msg_flags; sctp_ulpevent_init()
59 event->rmem_len = len; sctp_ulpevent_init()
66 struct sctp_ulpevent *event; sctp_ulpevent_new() local
73 event = sctp_skb2event(skb); sctp_ulpevent_new()
74 sctp_ulpevent_init(event, msg_flags, skb->truesize); sctp_ulpevent_new()
76 return event; sctp_ulpevent_new()
83 int sctp_ulpevent_is_notification(const struct sctp_ulpevent *event) sctp_ulpevent_is_notification() argument
85 return MSG_NOTIFICATION == (event->msg_flags & MSG_NOTIFICATION); sctp_ulpevent_is_notification()
91 static inline void sctp_ulpevent_set_owner(struct sctp_ulpevent *event, sctp_ulpevent_set_owner() argument
100 skb = sctp_event2skb(event); sctp_ulpevent_set_owner()
101 event->asoc = (struct sctp_association *)asoc; sctp_ulpevent_set_owner()
102 atomic_add(event->rmem_len, &event->asoc->rmem_alloc); sctp_ulpevent_set_owner()
107 static inline void sctp_ulpevent_release_owner(struct sctp_ulpevent *event) sctp_ulpevent_release_owner() argument
109 struct sctp_association *asoc = event->asoc; sctp_ulpevent_release_owner()
111 atomic_sub(event->rmem_len, &asoc->rmem_alloc); sctp_ulpevent_release_owner()
115 /* Create and initialize an SCTP_ASSOC_CHANGE event.
131 struct sctp_ulpevent *event; sctp_ulpevent_make_assoc_change() local
148 /* Embed the event fields inside the cloned skb. */ sctp_ulpevent_make_assoc_change()
149 event = sctp_skb2event(skb); sctp_ulpevent_make_assoc_change()
150 sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize); sctp_ulpevent_make_assoc_change()
161 event = sctp_ulpevent_new(sizeof(struct sctp_assoc_change), sctp_ulpevent_make_assoc_change()
163 if (!event) sctp_ulpevent_make_assoc_change()
166 skb = sctp_event2skb(event); sctp_ulpevent_make_assoc_change()
184 * event that happened to the association. sctp_ulpevent_make_assoc_change()
238 sctp_ulpevent_set_owner(event, asoc); sctp_ulpevent_make_assoc_change()
241 return event; sctp_ulpevent_make_assoc_change()
247 /* Create and initialize an SCTP_PEER_ADDR_CHANGE event.
253 * an interface details event is sent.
260 struct sctp_ulpevent *event; sctp_ulpevent_make_peer_addr_change() local
264 event = sctp_ulpevent_new(sizeof(struct sctp_paddr_change), sctp_ulpevent_make_peer_addr_change()
266 if (!event) sctp_ulpevent_make_peer_addr_change()
269 skb = sctp_event2skb(event); sctp_ulpevent_make_peer_addr_change()
306 * event that happened to the address. sctp_ulpevent_make_peer_addr_change()
330 sctp_ulpevent_set_owner(event, asoc); sctp_ulpevent_make_peer_addr_change()
348 return event; sctp_ulpevent_make_peer_addr_change()
365 * included in a SCTP_REMOTE_ERROR event. Please refer to the SCTP
374 struct sctp_ulpevent *event; sctp_ulpevent_make_remote_error() local
398 /* Embed the event fields inside the cloned skb. */ sctp_ulpevent_make_remote_error()
399 event = sctp_skb2event(skb); sctp_ulpevent_make_remote_error()
400 sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize); sctp_ulpevent_make_remote_error()
413 sctp_ulpevent_set_owner(event, asoc); sctp_ulpevent_make_remote_error()
416 return event; sctp_ulpevent_make_remote_error()
430 struct sctp_ulpevent *event; sctp_ulpevent_make_send_failed() local
449 /* Embed the event fields inside the cloned skb. */ sctp_ulpevent_make_send_failed()
450 event = sctp_skb2event(skb); sctp_ulpevent_make_send_failed()
451 sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize); sctp_ulpevent_make_send_failed()
522 sctp_ulpevent_set_owner(event, asoc); sctp_ulpevent_make_send_failed()
524 return event; sctp_ulpevent_make_send_failed()
539 struct sctp_ulpevent *event; sctp_ulpevent_make_shutdown_event() local
543 event = sctp_ulpevent_new(sizeof(struct sctp_shutdown_event), sctp_ulpevent_make_shutdown_event()
545 if (!event) sctp_ulpevent_make_shutdown_event()
548 skb = sctp_event2skb(event); sctp_ulpevent_make_shutdown_event()
585 sctp_ulpevent_set_owner(event, asoc); sctp_ulpevent_make_shutdown_event()
588 return event; sctp_ulpevent_make_shutdown_event()
602 struct sctp_ulpevent *event; sctp_ulpevent_make_adaptation_indication() local
606 event = sctp_ulpevent_new(sizeof(struct sctp_adaptation_event), sctp_ulpevent_make_adaptation_indication()
608 if (!event) sctp_ulpevent_make_adaptation_indication()
611 skb = sctp_event2skb(event); sctp_ulpevent_make_adaptation_indication()
619 sctp_ulpevent_set_owner(event, asoc); sctp_ulpevent_make_adaptation_indication()
622 return event; sctp_ulpevent_make_adaptation_indication()
639 struct sctp_ulpevent *event = NULL; sctp_ulpevent_make_rcvmsg() local
693 /* Embed the event fields inside the cloned skb. */ sctp_ulpevent_make_rcvmsg()
694 event = sctp_skb2event(skb); sctp_ulpevent_make_rcvmsg()
696 /* Initialize event with flags 0 and correct length sctp_ulpevent_make_rcvmsg()
700 sctp_ulpevent_init(event, 0, skb->len + sizeof(struct sk_buff)); sctp_ulpevent_make_rcvmsg()
702 sctp_ulpevent_receive_data(event, asoc); sctp_ulpevent_make_rcvmsg()
704 event->stream = ntohs(chunk->subh.data_hdr->stream); sctp_ulpevent_make_rcvmsg()
705 event->ssn = ntohs(chunk->subh.data_hdr->ssn); sctp_ulpevent_make_rcvmsg()
706 event->ppid = chunk->subh.data_hdr->ppid; sctp_ulpevent_make_rcvmsg()
708 event->flags |= SCTP_UNORDERED; sctp_ulpevent_make_rcvmsg()
709 event->cumtsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map); sctp_ulpevent_make_rcvmsg()
711 event->tsn = ntohl(chunk->subh.data_hdr->tsn); sctp_ulpevent_make_rcvmsg()
712 event->msg_flags |= chunk->chunk_hdr->flags; sctp_ulpevent_make_rcvmsg()
713 event->iif = sctp_chunk_iif(chunk); sctp_ulpevent_make_rcvmsg()
715 return event; sctp_ulpevent_make_rcvmsg()
723 /* Create a partial delivery related event.
735 struct sctp_ulpevent *event; sctp_ulpevent_make_pdapi() local
739 event = sctp_ulpevent_new(sizeof(struct sctp_pdapi_event), sctp_ulpevent_make_pdapi()
741 if (!event) sctp_ulpevent_make_pdapi()
744 skb = sctp_event2skb(event); sctp_ulpevent_make_pdapi()
775 sctp_ulpevent_set_owner(event, asoc); sctp_ulpevent_make_pdapi()
778 return event; sctp_ulpevent_make_pdapi()
787 struct sctp_ulpevent *event; sctp_ulpevent_make_authkey() local
791 event = sctp_ulpevent_new(sizeof(struct sctp_authkey_event), sctp_ulpevent_make_authkey()
793 if (!event) sctp_ulpevent_make_authkey()
796 skb = sctp_event2skb(event); sctp_ulpevent_make_authkey()
811 sctp_ulpevent_set_owner(event, asoc); sctp_ulpevent_make_authkey()
814 return event; sctp_ulpevent_make_authkey()
826 struct sctp_ulpevent *event; sctp_ulpevent_make_sender_dry_event() local
830 event = sctp_ulpevent_new(sizeof(struct sctp_sender_dry_event), sctp_ulpevent_make_sender_dry_event()
832 if (!event) sctp_ulpevent_make_sender_dry_event()
835 skb = sctp_event2skb(event); sctp_ulpevent_make_sender_dry_event()
842 sctp_ulpevent_set_owner(event, asoc); sctp_ulpevent_make_sender_dry_event()
845 return event; sctp_ulpevent_make_sender_dry_event()
849 * event.
851 __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event) sctp_ulpevent_get_notification_type() argument
856 skb = sctp_event2skb(event); sctp_ulpevent_get_notification_type()
864 void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event, sctp_ulpevent_read_sndrcvinfo() argument
869 if (sctp_ulpevent_is_notification(event)) sctp_ulpevent_read_sndrcvinfo()
873 sinfo.sinfo_stream = event->stream; sctp_ulpevent_read_sndrcvinfo()
874 sinfo.sinfo_ssn = event->ssn; sctp_ulpevent_read_sndrcvinfo()
875 sinfo.sinfo_ppid = event->ppid; sctp_ulpevent_read_sndrcvinfo()
876 sinfo.sinfo_flags = event->flags; sctp_ulpevent_read_sndrcvinfo()
877 sinfo.sinfo_tsn = event->tsn; sctp_ulpevent_read_sndrcvinfo()
878 sinfo.sinfo_cumtsn = event->cumtsn; sctp_ulpevent_read_sndrcvinfo()
879 sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc); sctp_ulpevent_read_sndrcvinfo()
881 sinfo.sinfo_context = event->asoc->default_rcv_context; sctp_ulpevent_read_sndrcvinfo()
892 void sctp_ulpevent_read_rcvinfo(const struct sctp_ulpevent *event, sctp_ulpevent_read_rcvinfo() argument
897 if (sctp_ulpevent_is_notification(event)) sctp_ulpevent_read_rcvinfo()
901 rinfo.rcv_sid = event->stream; sctp_ulpevent_read_rcvinfo()
902 rinfo.rcv_ssn = event->ssn; sctp_ulpevent_read_rcvinfo()
903 rinfo.rcv_ppid = event->ppid; sctp_ulpevent_read_rcvinfo()
904 rinfo.rcv_flags = event->flags; sctp_ulpevent_read_rcvinfo()
905 rinfo.rcv_tsn = event->tsn; sctp_ulpevent_read_rcvinfo()
906 rinfo.rcv_cumtsn = event->cumtsn; sctp_ulpevent_read_rcvinfo()
907 rinfo.rcv_assoc_id = sctp_assoc2id(event->asoc); sctp_ulpevent_read_rcvinfo()
908 rinfo.rcv_context = event->asoc->default_rcv_context; sctp_ulpevent_read_rcvinfo()
917 static void __sctp_ulpevent_read_nxtinfo(const struct sctp_ulpevent *event, __sctp_ulpevent_read_nxtinfo() argument
924 nxtinfo.nxt_sid = event->stream; __sctp_ulpevent_read_nxtinfo()
925 nxtinfo.nxt_ppid = event->ppid; __sctp_ulpevent_read_nxtinfo()
926 nxtinfo.nxt_flags = event->flags; __sctp_ulpevent_read_nxtinfo()
927 if (sctp_ulpevent_is_notification(event)) __sctp_ulpevent_read_nxtinfo()
930 nxtinfo.nxt_assoc_id = sctp_assoc2id(event->asoc); __sctp_ulpevent_read_nxtinfo()
936 void sctp_ulpevent_read_nxtinfo(const struct sctp_ulpevent *event, sctp_ulpevent_read_nxtinfo() argument
955 static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event, sctp_ulpevent_receive_data() argument
960 skb = sctp_event2skb(event); sctp_ulpevent_receive_data()
962 sctp_ulpevent_set_owner(event, asoc); sctp_ulpevent_receive_data()
968 /* Note: Not clearing the entire event struct as this is just a sctp_ulpevent_receive_data()
969 * fragment of the real event. However, we still need to do rwnd sctp_ulpevent_receive_data()
981 static void sctp_ulpevent_release_data(struct sctp_ulpevent *event) sctp_ulpevent_release_data() argument
993 skb = sctp_event2skb(event); sctp_ulpevent_release_data()
1009 sctp_assoc_rwnd_increase(event->asoc, len);
1010 sctp_ulpevent_release_owner(event);
1013 static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event) sctp_ulpevent_release_frag_data() argument
1017 skb = sctp_event2skb(event); sctp_ulpevent_release_frag_data()
1032 sctp_ulpevent_release_owner(event);
1036 * to the owner, updating the rwnd in case of a DATA event and freeing the
1039 void sctp_ulpevent_free(struct sctp_ulpevent *event) sctp_ulpevent_free() argument
1041 if (sctp_ulpevent_is_notification(event)) sctp_ulpevent_free()
1042 sctp_ulpevent_release_owner(event); sctp_ulpevent_free()
1044 sctp_ulpevent_release_data(event); sctp_ulpevent_free()
1046 kfree_skb(sctp_event2skb(event)); sctp_ulpevent_free()
1056 struct sctp_ulpevent *event = sctp_skb2event(skb); sctp_queue_purge_ulpevents() local
1058 if (!sctp_ulpevent_is_notification(event)) sctp_queue_purge_ulpevents()
1061 sctp_ulpevent_free(event); sctp_queue_purge_ulpevents()
H A Dulpqueue.c74 struct sctp_ulpevent *event; sctp_ulpq_flush() local
77 event = sctp_skb2event(skb); sctp_ulpq_flush()
78 sctp_ulpevent_free(event); sctp_ulpq_flush()
82 event = sctp_skb2event(skb); sctp_ulpq_flush()
83 sctp_ulpevent_free(event); sctp_ulpq_flush()
99 struct sctp_ulpevent *event; sctp_ulpq_tail_data() local
102 /* Create an event from the incoming chunk. */ sctp_ulpq_tail_data()
103 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp); sctp_ulpq_tail_data()
104 if (!event) sctp_ulpq_tail_data()
108 event = sctp_ulpq_reasm(ulpq, event); sctp_ulpq_tail_data()
111 if ((event) && (event->msg_flags & MSG_EOR)) { sctp_ulpq_tail_data()
114 __skb_queue_tail(&temp, sctp_event2skb(event)); sctp_ulpq_tail_data()
116 event = sctp_ulpq_order(ulpq, event); sctp_ulpq_tail_data()
119 /* Send event to the ULP. 'event' is the sctp_ulpevent for sctp_ulpq_tail_data()
122 if (event) { sctp_ulpq_tail_data()
123 event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0; sctp_ulpq_tail_data()
124 sctp_ulpq_tail_event(ulpq, event); sctp_ulpq_tail_data()
130 /* Add a new event for propagation to the ULP. */
157 struct sctp_ulpevent *event; sctp_clear_pd() local
160 event = sctp_skb2event(skb); sctp_clear_pd()
161 if (event->asoc == asoc) { sctp_clear_pd()
190 /* If the SKB of 'event' is on a list, it is the first such member
193 int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) sctp_ulpq_tail_event() argument
197 struct sk_buff *skb = sctp_event2skb(event); sctp_ulpq_tail_event()
208 if (!sctp_ulpevent_is_notification(event)) { sctp_ulpq_tail_event()
212 /* Check if the user wishes to receive this event. */ sctp_ulpq_tail_event()
213 if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe)) sctp_ulpq_tail_event()
230 if ((event->msg_flags & MSG_NOTIFICATION) || sctp_ulpq_tail_event()
232 (event->msg_flags & SCTP_DATA_FRAG_MASK))) sctp_ulpq_tail_event()
235 clear_pd = event->msg_flags & MSG_EOR; sctp_ulpq_tail_event()
274 sctp_ulpevent_free(event); sctp_ulpq_tail_event()
283 struct sctp_ulpevent *event) sctp_ulpq_store_reasm()
289 tsn = event->tsn; sctp_ulpq_store_reasm()
294 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); sctp_ulpq_store_reasm()
302 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); sctp_ulpq_store_reasm()
316 __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event)); sctp_ulpq_store_reasm()
320 /* Helper function to return an event corresponding to the reassembled
333 struct sctp_ulpevent *event; sctp_make_reassembled_event() local
397 event = sctp_skb2event(f_frag); sctp_make_reassembled_event()
400 return event; sctp_make_reassembled_event()
405 * missing fragment in a SCTP datagram and return the corresponding event.
570 /* We have the reassembled event. There is no need to look sctp_ulpq_retrieve_partial()
587 struct sctp_ulpevent *event) sctp_ulpq_reasm()
592 if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) { sctp_ulpq_reasm()
593 event->msg_flags |= MSG_EOR; sctp_ulpq_reasm()
594 return event; sctp_ulpq_reasm()
597 sctp_ulpq_store_reasm(ulpq, event); sctp_ulpq_reasm()
606 ctsn = event->tsn; sctp_ulpq_reasm()
671 /* We have the reassembled event. There is no need to look sctp_ulpq_retrieve_first()
690 * In the event that the receiver has invoked the partial delivery API,
697 struct sctp_ulpevent *event; sctp_ulpq_reasm_flushtsn() local
704 event = sctp_skb2event(pos); sctp_ulpq_reasm_flushtsn()
705 tsn = event->tsn; sctp_ulpq_reasm_flushtsn()
714 sctp_ulpevent_free(event); sctp_ulpq_reasm_flushtsn()
727 struct sctp_ulpevent *event = NULL; sctp_ulpq_reasm_drain() local
733 while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) { sctp_ulpq_reasm_drain()
735 if ((event) && (event->msg_flags & MSG_EOR)) { sctp_ulpq_reasm_drain()
737 __skb_queue_tail(&temp, sctp_event2skb(event)); sctp_ulpq_reasm_drain()
739 event = sctp_ulpq_order(ulpq, event); sctp_ulpq_reasm_drain()
742 /* Send event to the ULP. 'event' is the sctp_ulpq_reasm_drain()
745 if (event) sctp_ulpq_reasm_drain()
746 sctp_ulpq_tail_event(ulpq, event); sctp_ulpq_reasm_drain()
755 struct sctp_ulpevent *event) sctp_ulpq_retrieve_ordered()
763 sid = event->stream; sctp_ulpq_retrieve_ordered()
766 event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev; sctp_ulpq_retrieve_ordered()
790 /* Attach all gathered skbs to the event. */ sctp_ulpq_retrieve_ordered()
797 struct sctp_ulpevent *event) sctp_ulpq_store_ordered()
806 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); sctp_ulpq_store_ordered()
810 sid = event->stream; sctp_ulpq_store_ordered()
811 ssn = event->ssn; sctp_ulpq_store_ordered()
817 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); sctp_ulpq_store_ordered()
822 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); sctp_ulpq_store_ordered()
842 __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event)); sctp_ulpq_store_ordered()
846 struct sctp_ulpevent *event) sctp_ulpq_order()
852 if (SCTP_DATA_UNORDERED & event->msg_flags) sctp_ulpq_order()
853 return event; sctp_ulpq_order()
856 sid = event->stream; sctp_ulpq_order()
857 ssn = event->ssn; sctp_ulpq_order()
865 sctp_ulpq_store_ordered(ulpq, event); sctp_ulpq_order()
875 sctp_ulpq_retrieve_ordered(ulpq, event); sctp_ulpq_order()
877 return event; sctp_ulpq_order()
887 struct sctp_ulpevent *event; sctp_ulpq_reap_ordered() local
897 event = NULL; sctp_skb_for_each()
916 if (!event) sctp_skb_for_each()
918 event = sctp_skb2event(pos); sctp_skb_for_each()
920 /* Attach all gathered skbs to the event. */ sctp_skb_for_each()
927 if (event == NULL && pos != (struct sk_buff *)lobby) {
936 event = sctp_skb2event(pos);
940 /* Send event to the ULP. 'event' is the sctp_ulpevent for
943 if (event) {
945 sctp_ulpq_retrieve_ordered(ulpq, event);
946 sctp_ulpq_tail_event(ulpq, event);
979 struct sctp_ulpevent *event; sctp_ulpq_renege_list() local
985 event = sctp_skb2event(skb); sctp_ulpq_renege_list()
986 tsn = event->tsn; sctp_ulpq_renege_list()
1007 /* Unlink the event, then renege all applicable TSNs. */ sctp_ulpq_renege_list()
1009 sctp_ulpevent_free(event); sctp_ulpq_renege_list()
1037 struct sctp_ulpevent *event; sctp_ulpq_partial_delivery() local
1069 event = sctp_ulpq_retrieve_first(ulpq); sctp_ulpq_partial_delivery()
1070 /* Send event to the ULP. */ sctp_ulpq_partial_delivery()
1071 if (event) { sctp_ulpq_partial_delivery()
1072 sctp_ulpq_tail_event(ulpq, event); sctp_ulpq_partial_delivery()
282 sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) sctp_ulpq_store_reasm() argument
586 sctp_ulpq_reasm(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) sctp_ulpq_reasm() argument
754 sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) sctp_ulpq_retrieve_ordered() argument
796 sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) sctp_ulpq_store_ordered() argument
845 sctp_ulpq_order(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) sctp_ulpq_order() argument
/linux-4.4.14/drivers/acpi/acpica/
H A Devxfevnt.c3 * Module Name: evxfevnt - External Interfaces, ACPI event disable/enable
168 * PARAMETERS: event - The fixed eventto be enabled ACPI_EXPORT_SYMBOL()
173 * DESCRIPTION: Enable an ACPI event (fixed) ACPI_EXPORT_SYMBOL()
176 acpi_status acpi_enable_event(u32 event, u32 flags) ACPI_EXPORT_SYMBOL()
185 if (event > ACPI_EVENT_MAX) { ACPI_EXPORT_SYMBOL()
190 * Enable the requested fixed event (by writing a one to the enable ACPI_EXPORT_SYMBOL()
194 acpi_write_bit_register(acpi_gbl_fixed_event_info[event]. ACPI_EXPORT_SYMBOL()
203 acpi_read_bit_register(acpi_gbl_fixed_event_info[event]. ACPI_EXPORT_SYMBOL()
211 "Could not enable %s event", ACPI_EXPORT_SYMBOL()
212 acpi_ut_get_event_name(event))); ACPI_EXPORT_SYMBOL()
225 * PARAMETERS: event - The fixed event to be disabled ACPI_EXPORT_SYMBOL()
230 * DESCRIPTION: Disable an ACPI event (fixed) ACPI_EXPORT_SYMBOL()
233 acpi_status acpi_disable_event(u32 event, u32 flags) ACPI_EXPORT_SYMBOL()
242 if (event > ACPI_EVENT_MAX) { ACPI_EXPORT_SYMBOL()
247 * Disable the requested fixed event (by writing a zero to the enable ACPI_EXPORT_SYMBOL()
251 acpi_write_bit_register(acpi_gbl_fixed_event_info[event]. ACPI_EXPORT_SYMBOL()
258 acpi_read_bit_register(acpi_gbl_fixed_event_info[event]. ACPI_EXPORT_SYMBOL()
267 acpi_ut_get_event_name(event))); ACPI_EXPORT_SYMBOL()
280 * PARAMETERS: event - The fixed event to be cleared ACPI_EXPORT_SYMBOL()
284 * DESCRIPTION: Clear an ACPI event (fixed) ACPI_EXPORT_SYMBOL()
287 acpi_status acpi_clear_event(u32 event) ACPI_EXPORT_SYMBOL()
295 if (event > ACPI_EVENT_MAX) { ACPI_EXPORT_SYMBOL()
300 * Clear the requested fixed event (By writing a one to the status ACPI_EXPORT_SYMBOL()
304 acpi_write_bit_register(acpi_gbl_fixed_event_info[event]. ACPI_EXPORT_SYMBOL()
316 * PARAMETERS: event - The fixed event ACPI_EXPORT_SYMBOL()
317 * event_status - Where the current status of the event will ACPI_EXPORT_SYMBOL()
322 * DESCRIPTION: Obtains and returns the current status of the event ACPI_EXPORT_SYMBOL()
325 acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status) ACPI_EXPORT_SYMBOL()
339 if (event > ACPI_EVENT_MAX) { ACPI_EXPORT_SYMBOL()
343 /* Fixed event currently can be dispatched? */ ACPI_EXPORT_SYMBOL()
345 if (acpi_gbl_fixed_event_handlers[event].handler) { ACPI_EXPORT_SYMBOL()
349 /* Fixed event currently enabled? */ ACPI_EXPORT_SYMBOL()
352 acpi_read_bit_register(acpi_gbl_fixed_event_info[event]. ACPI_EXPORT_SYMBOL()
363 /* Fixed event currently active? */ ACPI_EXPORT_SYMBOL()
366 acpi_read_bit_register(acpi_gbl_fixed_event_info[event]. ACPI_EXPORT_SYMBOL()
H A Devevent.c54 static u32 acpi_ev_fixed_event_dispatch(u32 event);
156 * DESCRIPTION: Install the fixed event handlers and disable all fixed events.
166 * Initialize the structure that keeps track of fixed event handlers and acpi_ev_fixed_event_initialize()
173 /* Disable the fixed event */ acpi_ev_fixed_event_initialize()
226 /* Both the status and enable bits must be on for this event */ acpi_ev_fixed_event_detect()
233 * Found an active (signalled) event. Invoke global event acpi_ev_fixed_event_detect()
254 * PARAMETERS: event - Event type
258 * DESCRIPTION: Clears the status bit for the requested event, calls the
259 * handler that previously registered for the event.
260 * NOTE: If there is no handler for the event, the event is
265 static u32 acpi_ev_fixed_event_dispatch(u32 event) acpi_ev_fixed_event_dispatch() argument
272 (void)acpi_write_bit_register(acpi_gbl_fixed_event_info[event]. acpi_ev_fixed_event_dispatch()
277 * and disable the event to prevent further interrupts. acpi_ev_fixed_event_dispatch()
279 if (!acpi_gbl_fixed_event_handlers[event].handler) { acpi_ev_fixed_event_dispatch()
280 (void)acpi_write_bit_register(acpi_gbl_fixed_event_info[event]. acpi_ev_fixed_event_dispatch()
285 "No installed handler for fixed event - %s (%u), disabling", acpi_ev_fixed_event_dispatch()
286 acpi_ut_get_event_name(event), event)); acpi_ev_fixed_event_dispatch()
293 return ((acpi_gbl_fixed_event_handlers[event]. acpi_ev_fixed_event_dispatch()
294 handler) (acpi_gbl_fixed_event_handlers[event].context)); acpi_ev_fixed_event_dispatch()
/linux-4.4.14/arch/xtensa/kernel/
H A Dperf_event.c55 struct perf_event *event[XCHAL_NUM_PERF_COUNTERS]; member in struct:xtensa_pmu_events
139 static void xtensa_perf_event_update(struct perf_event *event, xtensa_perf_event_update() argument
147 new_raw_count = xtensa_pmu_read_counter(event->hw.idx); xtensa_perf_event_update()
153 local64_add(delta, &event->count); xtensa_perf_event_update()
157 static bool xtensa_perf_event_set_period(struct perf_event *event, xtensa_perf_event_set_period() argument
163 if (!is_sampling_event(event)) { xtensa_perf_event_set_period()
186 perf_event_update_userpage(event); xtensa_perf_event_set_period()
201 static int xtensa_pmu_event_init(struct perf_event *event) xtensa_pmu_event_init() argument
205 switch (event->attr.type) { xtensa_pmu_event_init()
207 if (event->attr.config >= ARRAY_SIZE(xtensa_hw_ctl) || xtensa_pmu_event_init()
208 xtensa_hw_ctl[event->attr.config] == 0) xtensa_pmu_event_init()
210 event->hw.config = xtensa_hw_ctl[event->attr.config]; xtensa_pmu_event_init()
214 ret = xtensa_pmu_cache_event(event->attr.config); xtensa_pmu_event_init()
217 event->hw.config = ret; xtensa_pmu_event_init()
222 if ((event->attr.config & XTENSA_PMU_PMCTRL_SELECT) == xtensa_pmu_event_init()
225 event->hw.config = (event->attr.config & xtensa_pmu_event_init()
243 static void xtensa_pmu_start(struct perf_event *event, int flags) xtensa_pmu_start() argument
245 struct hw_perf_event *hwc = &event->hw; xtensa_pmu_start()
252 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); xtensa_pmu_start()
253 xtensa_perf_event_set_period(event, hwc, idx); xtensa_pmu_start()
261 static void xtensa_pmu_stop(struct perf_event *event, int flags) xtensa_pmu_stop() argument
263 struct hw_perf_event *hwc = &event->hw; xtensa_pmu_stop()
274 !(event->hw.state & PERF_HES_UPTODATE)) { xtensa_pmu_stop()
275 xtensa_perf_event_update(event, &event->hw, idx); xtensa_pmu_stop()
276 event->hw.state |= PERF_HES_UPTODATE; xtensa_pmu_stop()
284 static int xtensa_pmu_add(struct perf_event *event, int flags) xtensa_pmu_add() argument
287 struct hw_perf_event *hwc = &event->hw; xtensa_pmu_add()
299 ev->event[idx] = event; xtensa_pmu_add()
304 xtensa_pmu_start(event, PERF_EF_RELOAD); xtensa_pmu_add()
306 perf_event_update_userpage(event); xtensa_pmu_add()
310 static void xtensa_pmu_del(struct perf_event *event, int flags) xtensa_pmu_del() argument
314 xtensa_pmu_stop(event, PERF_EF_UPDATE); xtensa_pmu_del()
315 __clear_bit(event->hw.idx, ev->used_mask); xtensa_pmu_del()
316 perf_event_update_userpage(event); xtensa_pmu_del()
319 static void xtensa_pmu_read(struct perf_event *event) xtensa_pmu_read() argument
321 xtensa_perf_event_update(event, &event->hw, event->hw.idx); xtensa_pmu_read()
372 struct perf_event *event = ev->event[i]; xtensa_pmu_irq_handler() local
373 struct hw_perf_event *hwc = &event->hw; xtensa_pmu_irq_handler()
380 xtensa_perf_event_update(event, hwc, i); xtensa_pmu_irq_handler()
382 if (xtensa_perf_event_set_period(event, hwc, i)) { xtensa_pmu_irq_handler()
387 if (perf_event_overflow(event, &data, regs)) xtensa_pmu_irq_handler()
388 xtensa_pmu_stop(event, 0); xtensa_pmu_irq_handler()
/linux-4.4.14/sound/core/seq/
H A DMakefile13 snd-seq-midi-event-objs := seq_midi_event.o
19 obj-$(CONFIG_SND_SEQUENCER) += snd-seq-midi-event.o
25 obj-$(CONFIG_SND_VIRMIDI) += snd-seq-virmidi.o snd-seq-midi-event.o
26 obj-$(CONFIG_SND_RAWMIDI_SEQ) += snd-seq-midi.o snd-seq-midi-event.o
27 obj-$(CONFIG_SND_OPL3_LIB_SEQ) += snd-seq-midi-event.o snd-seq-midi-emul.o
28 obj-$(CONFIG_SND_OPL4_LIB_SEQ) += snd-seq-midi-event.o snd-seq-midi-emul.o
H A Dseq_memory.c46 * Variable length event:
47 * The event like sysex uses variable length type.
54 * When an event is generated via read(), the external data is
59 * When the variable length event is enqueued (in prioq or fifo),
71 static int get_var_len(const struct snd_seq_event *event) get_var_len() argument
73 if ((event->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE) get_var_len()
76 return event->data.ext.len & ~SNDRV_SEQ_EXT_MASK; get_var_len()
79 int snd_seq_dump_var_event(const struct snd_seq_event *event, snd_seq_dump_var_event() argument
85 if ((len = get_var_len(event)) <= 0) snd_seq_dump_var_event()
88 if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) { snd_seq_dump_var_event()
90 char __user *curptr = (char __force __user *)event->data.ext.ptr; snd_seq_dump_var_event()
105 if (!(event->data.ext.len & SNDRV_SEQ_EXT_CHAINED)) snd_seq_dump_var_event()
106 return func(private_data, event->data.ext.ptr, len); snd_seq_dump_var_event()
108 cell = (struct snd_seq_event_cell *)event->data.ext.ptr; snd_seq_dump_var_event()
113 err = func(private_data, &cell->event, size); snd_seq_dump_var_event()
126 * expand the variable length event to linear buffer space.
144 int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char *buf, snd_seq_expand_var_event() argument
150 if ((len = get_var_len(event)) < 0) snd_seq_expand_var_event()
158 if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) { snd_seq_expand_var_event()
161 if (copy_from_user(buf, (void __force __user *)event->data.ext.ptr, len)) snd_seq_expand_var_event()
165 err = snd_seq_dump_var_event(event, snd_seq_expand_var_event()
199 if (snd_seq_ev_is_variable(&cell->event)) { snd_seq_cell_free()
200 if (cell->event.data.ext.len & SNDRV_SEQ_EXT_CHAINED) { snd_seq_cell_free()
202 curp = cell->event.data.ext.ptr; snd_seq_cell_free()
220 * allocate an event cell.
285 * duplicate the event to a cell.
286 * if the event has external data, the data is decomposed to additional
289 int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event, snd_seq_event_dup() argument
301 if (snd_seq_ev_is_variable(event)) { snd_seq_event_dup()
302 extlen = event->data.ext.len & ~SNDRV_SEQ_EXT_MASK; snd_seq_event_dup()
312 /* copy the event */ snd_seq_event_dup()
313 cell->event = *event; snd_seq_event_dup()
316 if (snd_seq_ev_is_variable(event)) { snd_seq_event_dup()
318 int is_chained = event->data.ext.len & SNDRV_SEQ_EXT_CHAINED; snd_seq_event_dup()
319 int is_usrptr = event->data.ext.len & SNDRV_SEQ_EXT_USRPTR; snd_seq_event_dup()
323 cell->event.data.ext.len = extlen | SNDRV_SEQ_EXT_CHAINED; snd_seq_event_dup()
324 cell->event.data.ext.ptr = NULL; snd_seq_event_dup()
326 src = (struct snd_seq_event_cell *)event->data.ext.ptr; snd_seq_event_dup()
327 buf = (char *)event->data.ext.ptr; snd_seq_event_dup()
337 if (cell->event.data.ext.ptr == NULL) snd_seq_event_dup()
338 cell->event.data.ext.ptr = tmp; snd_seq_event_dup()
344 tmp->event = src->event; snd_seq_event_dup()
347 if (copy_from_user(&tmp->event, (char __force __user *)buf, size)) { snd_seq_event_dup()
352 memcpy(&tmp->event, buf, size); snd_seq_event_dup()
H A Dseq_clientmgr.c82 struct snd_seq_event *event,
85 struct snd_seq_event *event,
394 * -EINVAL no enough user-space buffer to write the whole event
438 if (snd_seq_ev_is_variable(&cell->event)) { snd_seq_read()
440 tmpev = cell->event; snd_seq_read()
448 err = snd_seq_expand_var_event(&cell->event, count, snd_seq_read()
457 if (copy_to_user(buf, &cell->event, sizeof(struct snd_seq_event))) { snd_seq_read()
495 static struct snd_seq_client *get_event_dest_client(struct snd_seq_event *event, get_event_dest_client() argument
500 dest = snd_seq_client_use_ptr(event->dest.client); get_event_dest_client()
506 ! test_bit(event->type, dest->event_filter)) get_event_dest_client()
519 * Return the error event.
521 * If the receiver client is a user client, the original event is
522 * encapsulated in SNDRV_SEQ_EVENT_BOUNCE as variable length event. If
523 * the original event is also variable length, the external data is
524 * copied after the event record.
525 * If the receiver client is a kernel client, the original event is
530 struct snd_seq_event *event, bounce_error_event()
549 bounce_ev.dest.port = event->source.port; bounce_error_event()
550 bounce_ev.data.quote.origin = event->dest; bounce_error_event()
551 bounce_ev.data.quote.event = event; bounce_error_event()
564 * rewrite the time-stamp of the event record with the curren time
568 static int update_timestamp_of_queue(struct snd_seq_event *event, update_timestamp_of_queue() argument
576 event->queue = queue; update_timestamp_of_queue()
577 event->flags &= ~SNDRV_SEQ_TIME_STAMP_MASK; update_timestamp_of_queue()
579 event->time.time = snd_seq_timer_get_cur_time(q->timer); update_timestamp_of_queue()
580 event->flags |= SNDRV_SEQ_TIME_STAMP_REAL; update_timestamp_of_queue()
582 event->time.tick = snd_seq_timer_get_cur_tick(q->timer); update_timestamp_of_queue()
583 event->flags |= SNDRV_SEQ_TIME_STAMP_TICK; update_timestamp_of_queue()
591 * deliver an event to the specified destination.
598 struct snd_seq_event *event, snd_seq_deliver_single_event()
606 direct = snd_seq_ev_is_direct(event); snd_seq_deliver_single_event()
608 dest = get_event_dest_client(event, filter); snd_seq_deliver_single_event()
611 dest_port = snd_seq_port_use_ptr(dest, event->dest.port); snd_seq_deliver_single_event()
622 update_timestamp_of_queue(event, dest_port->time_queue, snd_seq_deliver_single_event()
628 result = snd_seq_fifo_event_in(dest->data.user.fifo, event); snd_seq_deliver_single_event()
634 result = dest_port->event_input(event, direct, snd_seq_deliver_single_event()
649 result = bounce_error_event(client, event, result, atomic, hop); snd_seq_deliver_single_event()
656 * send the event to all subscribers:
659 struct snd_seq_event *event, deliver_to_subscribers()
668 src_port = snd_seq_port_use_ptr(client, event->source.port); deliver_to_subscribers()
671 /* save original event record */ deliver_to_subscribers()
672 event_saved = *event; deliver_to_subscribers()
684 event->dest = subs->info.dest; deliver_to_subscribers()
687 update_timestamp_of_queue(event, subs->info.queue, deliver_to_subscribers()
689 err = snd_seq_deliver_single_event(client, event, deliver_to_subscribers()
698 /* restore original event record */ deliver_to_subscribers()
699 *event = event_saved; deliver_to_subscribers()
705 *event = event_saved; /* restore */ deliver_to_subscribers()
716 struct snd_seq_event *event, port_broadcast_event()
723 dest_client = get_event_dest_client(event, SNDRV_SEQ_FILTER_BROADCAST); port_broadcast_event()
729 event->dest.port = port->addr.port; port_broadcast_event()
731 err = snd_seq_deliver_single_event(NULL, event, port_broadcast_event()
744 event->dest.port = SNDRV_SEQ_ADDRESS_BROADCAST; /* restore */ port_broadcast_event()
749 * send the event to all clients:
753 struct snd_seq_event *event, int atomic, int hop) broadcast_event()
759 addr = event->dest; /* save */ broadcast_event()
765 event->dest.client = dest; broadcast_event()
766 event->dest.port = addr.port; broadcast_event()
768 err = port_broadcast_event(client, event, atomic, hop); broadcast_event()
771 err = snd_seq_deliver_single_event(NULL, event, broadcast_event()
782 event->dest = addr; /* restore */ broadcast_event()
788 static int multicast_event(struct snd_seq_client *client, struct snd_seq_event *event, multicast_event() argument
797 /* deliver an event to the destination port(s).
798 * if the event is to subscribers or broadcast, the event is dispatched
802 * n == 0 : the event was not passed to any client.
803 * n < 0 : error - event was not processed.
805 static int snd_seq_deliver_event(struct snd_seq_client *client, struct snd_seq_event *event, snd_seq_deliver_event() argument
813 event->source.client, event->source.port, snd_seq_deliver_event()
814 event->dest.client, event->dest.port); snd_seq_deliver_event()
818 if (event->queue == SNDRV_SEQ_ADDRESS_SUBSCRIBERS || snd_seq_deliver_event()
819 event->dest.client == SNDRV_SEQ_ADDRESS_SUBSCRIBERS) snd_seq_deliver_event()
820 result = deliver_to_subscribers(client, event, atomic, hop); snd_seq_deliver_event()
822 else if (event->queue == SNDRV_SEQ_ADDRESS_BROADCAST || snd_seq_deliver_event()
823 event->dest.client == SNDRV_SEQ_ADDRESS_BROADCAST) snd_seq_deliver_event()
824 result = broadcast_event(client, event, atomic, hop); snd_seq_deliver_event()
825 else if (event->dest.client >= SNDRV_SEQ_MAX_CLIENTS) snd_seq_deliver_event()
826 result = multicast_event(client, event, atomic, hop); snd_seq_deliver_event()
827 else if (event->dest.port == SNDRV_SEQ_ADDRESS_BROADCAST) snd_seq_deliver_event()
828 result = port_broadcast_event(client, event, atomic, hop); snd_seq_deliver_event()
831 result = snd_seq_deliver_single_event(client, event, 0, atomic, hop); snd_seq_deliver_event()
837 * dispatch an event cell:
840 * The event cell shall be released or re-queued in this function.
843 * n == 0 : the event was not passed to any client.
844 * n < 0 : error - event was not processed.
854 client = snd_seq_client_use_ptr(cell->event.source.client); snd_seq_dispatch_event()
860 if (cell->event.type == SNDRV_SEQ_EVENT_NOTE) { snd_seq_dispatch_event()
861 /* NOTE event: snd_seq_dispatch_event()
862 * the event cell is re-used as a NOTE-OFF event and snd_seq_dispatch_event()
867 /* reserve this event to enqueue note-off later */ snd_seq_dispatch_event()
868 tmpev = cell->event; snd_seq_dispatch_event()
873 * This was originally a note event. We now re-use the snd_seq_dispatch_event()
874 * cell for the note-off event. snd_seq_dispatch_event()
877 ev = &cell->event; snd_seq_dispatch_event()
896 /* Now queue this cell as the note off event */ snd_seq_dispatch_event()
902 * event cell is freed after processing the event snd_seq_dispatch_event()
905 result = snd_seq_deliver_event(client, &cell->event, atomic, hop); snd_seq_dispatch_event()
919 struct snd_seq_event *event, snd_seq_client_enqueue_event()
927 if (event->queue == SNDRV_SEQ_ADDRESS_SUBSCRIBERS) { snd_seq_client_enqueue_event()
928 event->dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS; snd_seq_client_enqueue_event()
929 event->queue = SNDRV_SEQ_QUEUE_DIRECT; snd_seq_client_enqueue_event()
932 if (event->queue == SNDRV_SEQ_ADDRESS_BROADCAST) { snd_seq_client_enqueue_event()
933 event->dest.client = SNDRV_SEQ_ADDRESS_BROADCAST; snd_seq_client_enqueue_event()
934 event->queue = SNDRV_SEQ_QUEUE_DIRECT; snd_seq_client_enqueue_event()
937 if (event->dest.client == SNDRV_SEQ_ADDRESS_SUBSCRIBERS) { snd_seq_client_enqueue_event()
939 struct snd_seq_client_port *src_port = snd_seq_port_use_ptr(client, event->source.port); snd_seq_client_enqueue_event()
945 /* direct event processing without enqueued */ snd_seq_client_enqueue_event()
946 if (snd_seq_ev_is_direct(event)) { snd_seq_client_enqueue_event()
947 if (event->type == SNDRV_SEQ_EVENT_NOTE) snd_seq_client_enqueue_event()
948 return -EINVAL; /* this event must be enqueued! */ snd_seq_client_enqueue_event()
949 return snd_seq_deliver_event(client, event, atomic, hop); snd_seq_client_enqueue_event()
953 if (snd_seq_queue_is_used(event->queue, client->number) <= 0) snd_seq_client_enqueue_event()
958 /* allocate an event cell */ snd_seq_client_enqueue_event()
959 err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic, file); snd_seq_client_enqueue_event()
974 * check validity of event type and data length.
1003 * -EINVAL invalid event
1015 struct snd_seq_event event; snd_seq_write() local
1035 /* Read in the event header from the user */ snd_seq_write()
1036 len = sizeof(event); snd_seq_write()
1037 if (copy_from_user(&event, buf, len)) { snd_seq_write()
1041 event.source.client = client->number; /* fill in client number */ snd_seq_write()
1043 if (check_event_type_and_length(&event)) { snd_seq_write()
1049 if (event.type == SNDRV_SEQ_EVENT_NONE) snd_seq_write()
1051 else if (snd_seq_ev_is_reserved(&event)) { snd_seq_write()
1056 if (snd_seq_ev_is_variable(&event)) { snd_seq_write()
1057 int extlen = event.data.ext.len & ~SNDRV_SEQ_EXT_MASK; snd_seq_write()
1064 event.data.ext.len = extlen | SNDRV_SEQ_EXT_USRPTR; snd_seq_write()
1065 event.data.ext.ptr = (char __force *)buf snd_seq_write()
1070 if (client->convert32 && snd_seq_ev_is_varusr(&event)) { snd_seq_write()
1071 void *ptr = (void __force *)compat_ptr(event.data.raw32.d[1]); snd_seq_write()
1072 event.data.ext.ptr = ptr; snd_seq_write()
1078 err = snd_seq_client_enqueue_event(client, &event, file, snd_seq_write()
1421 * send an subscription notify event to user client:
1428 struct snd_seq_event event; snd_seq_client_notify_subscription() local
1430 memset(&event, 0, sizeof(event)); snd_seq_client_notify_subscription()
1431 event.type = evtype; snd_seq_client_notify_subscription()
1432 event.data.connect.dest = info->dest; snd_seq_client_notify_subscription()
1433 event.data.connect.sender = info->sender; snd_seq_client_notify_subscription()
1435 return snd_seq_system_notify(client, port, &event); /* non-atomic */ snd_seq_client_notify_subscription()
2310 /* skeleton to enqueue event, called from snd_seq_kernel_client_enqueue
529 bounce_error_event(struct snd_seq_client *client, struct snd_seq_event *event, int err, int atomic, int hop) bounce_error_event() argument
597 snd_seq_deliver_single_event(struct snd_seq_client *client, struct snd_seq_event *event, int filter, int atomic, int hop) snd_seq_deliver_single_event() argument
658 deliver_to_subscribers(struct snd_seq_client *client, struct snd_seq_event *event, int atomic, int hop) deliver_to_subscribers() argument
715 port_broadcast_event(struct snd_seq_client *client, struct snd_seq_event *event, int atomic, int hop) port_broadcast_event() argument
752 broadcast_event(struct snd_seq_client *client, struct snd_seq_event *event, int atomic, int hop) broadcast_event() argument
918 snd_seq_client_enqueue_event(struct snd_seq_client *client, struct snd_seq_event *event, struct file *file, int blocking, int atomic, int hop) snd_seq_client_enqueue_event() argument
/linux-4.4.14/arch/powerpc/kernel/
H A Deeh_event.c33 * This pair of routines creates an event and queues it onto a
54 struct eeh_event *event; eeh_event_handler() local
61 /* Fetch EEH event from the queue */ eeh_event_handler()
63 event = NULL; eeh_event_handler()
65 event = list_entry(eeh_eventlist.next, eeh_event_handler()
67 list_del(&event->list); eeh_event_handler()
70 if (!event) eeh_event_handler()
73 /* We might have event without binding PE */ eeh_event_handler()
74 pe = event->pe; eeh_event_handler()
90 kfree(event); eeh_event_handler()
100 * EEH event.
122 * eeh_send_failure_event - Generate a PCI error event
126 * the actual event will be delivered in a normal context
132 struct eeh_event *event; eeh_send_failure_event() local
134 event = kzalloc(sizeof(*event), GFP_ATOMIC); eeh_send_failure_event()
135 if (!event) { eeh_send_failure_event()
136 pr_err("EEH: out of memory, event not handled\n"); eeh_send_failure_event()
139 event->pe = pe; eeh_send_failure_event()
143 list_add(&event->list, &eeh_eventlist); eeh_send_failure_event()
153 * eeh_remove_event - Remove EEH event from the queue
165 struct eeh_event *event, *tmp; eeh_remove_event() local
172 * With "force", the event with associated PE that eeh_remove_event()
173 * have been isolated, the event won't be removed eeh_remove_event()
174 * to avoid event lost. eeh_remove_event()
177 list_for_each_entry_safe(event, tmp, &eeh_eventlist, list) { eeh_remove_event()
178 if (!force && event->pe && eeh_remove_event()
179 (event->pe->state & EEH_PE_ISOLATED)) eeh_remove_event()
183 list_del(&event->list); eeh_remove_event()
184 kfree(event); eeh_remove_event()
186 if (event->pe && event->pe->phb == pe->phb) { eeh_remove_event()
187 list_del(&event->list); eeh_remove_event()
188 kfree(event); eeh_remove_event()
190 } else if (event->pe == pe) { eeh_remove_event()
191 list_del(&event->list); eeh_remove_event()
192 kfree(event); eeh_remove_event()
/linux-4.4.14/drivers/staging/lustre/lnet/lnet/
H A Dlib-eq.c45 * Create an event queue that has room for \a count number of events.
47 * The event queue is circular and older events will be overwritten by new
50 * determine the appropriate size of the event queue to prevent this loss
52 * event loss can happen, since the handler is run for each event deposited
55 * \param count The number of events to be stored in the event queue. It
57 * \param callback A handler function that runs when an event is deposited
59 * indicate that no event handler is desired.
85 CWARN("EQ callback is guaranteed to get every event, do you still want to set eqcount %d for polling event which will have locking overhead? Please contact with developer to confirm\n", count); LNetEQAlloc()
88 * overhead of enqueue event */ LNetEQAlloc()
100 /* NB allocator has set all event sequence numbers to 0, LNetEQAlloc()
117 * both EQ lookup and poll event with only lnet_eq_wait_lock */ LNetEQAlloc()
142 * Release the resources associated with an event queue if it's idle;
145 * \param eqh A handle for the event queue to be released.
167 * both EQ lookup and poll event with only lnet_eq_wait_lock */ LNetEQFree()
248 /* We've got a new event... */ lnet_eq_dequeue_event()
251 CDEBUG(D_INFO, "event: %p, sequence: %lu, eq->size: %u\n", lnet_eq_dequeue_event()
254 /* ...but did it overwrite an event we've not seen yet? */ lnet_eq_dequeue_event()
270 * A nonblocking function that can be used to get the next event in an EQ.
271 * If an event handler is associated with the EQ, the handler will run before
272 * this function returns successfully. The event is removed from the queue.
274 * \param eventq A handle for the event queue.
275 * \param event On successful return (1 or -EOVERFLOW), this location will
276 * hold the next event in the EQ.
278 * \retval 0 No pending event in the EQ.
281 * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that
282 * at least one event between this event and the last event obtained from the
286 LNetEQGet(lnet_handle_eq_t eventq, lnet_event_t *event) LNetEQGet() argument
291 event, &which); LNetEQGet()
296 * Block the calling process until there is an event in the EQ.
297 * If an event handler is associated with the EQ, the handler will run before
298 * this function returns successfully. This function returns the next event
301 * \param eventq A handle for the event queue.
302 * \param event On successful return (1 or -EOVERFLOW), this location will
303 * hold the next event in the EQ.
307 * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that
308 * at least one event between this event and the last event obtained from the
312 LNetEQWait(lnet_handle_eq_t eventq, lnet_event_t *event) LNetEQWait() argument
317 event, &which); LNetEQWait()
331 return -1; /* don't want to wait and no new event */
346 if (tms < 0) /* no more wait but may have new event */
360 * Block the calling process until there's an event from a set of EQs or
363 * If an event handler is associated with the EQ, the handler will run before
364 * this function returns successfully, in which case the corresponding event
371 * \param timeout_ms Time in milliseconds to wait for an event to occur on
374 * \param event,which On successful return (1 or -EOVERFLOW), \a event will
375 * hold the next event in the EQs, and \a which will contain the index of the
376 * EQ from which the event was taken.
378 * \retval 0 No pending event in the EQs after timeout.
380 * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that
381 * at least one event between this event and the last event obtained from the
387 lnet_event_t *event, int *which) LNetEQPoll()
410 rc = lnet_eq_dequeue_event(eq, event); LNetEQPoll()
423 * -1 : did nothing and it's sure no new event LNetEQPoll()
424 * 1 : sleep inside and wait until new event LNetEQPoll()
425 * 0 : don't want to wait anymore, but might have new event LNetEQPoll()
429 if (wait < 0) /* no new event */ LNetEQPoll()
386 LNetEQPoll(lnet_handle_eq_t *eventqs, int neq, int timeout_ms, lnet_event_t *event, int *which) LNetEQPoll() argument
/linux-4.4.14/drivers/devfreq/
H A Ddevfreq-event.c2 * devfreq-event: a framework to provide raw data and events of devfreq devices
14 #include <linux/devfreq-event.h>
25 /* The list of all devfreq event list */
32 * devfreq_event_enable_edev() - Enable the devfreq-event dev and increase
33 * the enable_count of devfreq-event dev.
34 * @edev : the devfreq-event device
37 * devfreq-event device. The devfreq-event device should be enabled before
63 * devfreq_event_disable_edev() - Disable the devfreq-event dev and decrease
64 * the enable_count of the devfreq-event dev.
65 * @edev : the devfreq-event device
68 * devfreq-event device. After the devfreq-event device is disabled,
69 * devfreq device can't use the devfreq-event device for get/set/reset
101 * devfreq_event_is_enabled() - Check whether devfreq-event dev is enabled or
103 * @edev : the devfreq-event device
105 * Note that this function check whether devfreq-event dev is enabled or not.
106 * If return true, the devfreq-event dev is enabeld. If return false, the
107 * devfreq-event dev is disabled.
128 * devfreq_event_set_event() - Set event to devfreq-event dev to start.
129 * @edev : the devfreq-event device
131 * Note that this function set the event to the devfreq-event device to start
132 * for getting the event data which could be various event type.
156 * devfreq_event_get_event() - Get {load|total}_count from devfreq-event dev.
157 * @edev : the devfreq-event device
158 * @edata : the calculated data of devfreq-event device
160 * Note that this function get the calculated event data from devfreq-event dev
161 * after stoping the progress of whole sequence of devfreq-event dev.
190 * devfreq_event_reset_event() - Reset all opeations of devfreq-event dev.
191 * @edev : the devfreq-event device
193 * Note that this function stop all operations of devfreq-event dev and reset
194 * the current event data to make the devfreq-event device into initial state.
216 * devfreq_event_get_edev_by_phandle() - Get the devfreq-event dev from
219 * @index : the index into list of devfreq-event device
221 * Note that this function return the pointer of devfreq-event device.
251 dev_err(dev, "unable to get devfreq-event device : %s\n", devfreq_event_get_edev_by_phandle()
264 * devfreq_event_get_edev_count() - Get the count of devfreq-event dev
267 * Note that this function return the count of devfreq-event devices.
282 "failed to get the count of devfreq-event in %s node\n", devfreq_event_get_edev_count()
299 * devfreq_event_add_edev() - Add new devfreq-event device.
300 * @dev : the device owning the devfreq-event device being created
301 * @desc : the devfreq-event device's decriptor which include essential
302 * data for devfreq-event device.
304 * Note that this function add new devfreq-event device to devfreq-event class
305 * list and register the device of the devfreq-event device.
334 dev_set_name(&edev->dev, "event.%d", atomic_inc_return(&event_no) - 1); devfreq_event_add_edev()
353 * devfreq_event_remove_edev() - Remove the devfreq-event device registered.
354 * @dev : the devfreq-event device
356 * Note that this function remove the registered devfreq-event device.
392 * @dev : the device owning the devfreq-event device being created
393 * @desc : the devfreq-event device's decriptor which include essential
394 * data for devfreq-event device.
396 * Note that this function manages automatically the memory of devfreq-event
398 * for memory of devfreq-event device.
424 * @dev : the device owning the devfreq-event device being created
425 * @edev : the devfreq-event device
427 * Note that this function manages automatically the memory of devfreq-event
439 * Device attributes for devfreq-event class.
474 devfreq_event_class = class_create(THIS_MODULE, "devfreq-event"); devfreq_event_init()
/linux-4.4.14/drivers/perf/
H A Darm_pmu.c9 * This code is based on the sparc64 perf event code, which is in turn based
77 armpmu_map_event(struct perf_event *event, armpmu_map_event() argument
85 u64 config = event->attr.config; armpmu_map_event()
86 int type = event->attr.type; armpmu_map_event()
88 if (type == event->pmu->type) armpmu_map_event()
103 int armpmu_event_set_period(struct perf_event *event) armpmu_event_set_period() argument
105 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); armpmu_event_set_period()
106 struct hw_perf_event *hwc = &event->hw; armpmu_event_set_period()
136 armpmu->write_counter(event, (u64)(-left) & 0xffffffff); armpmu_event_set_period()
138 perf_event_update_userpage(event); armpmu_event_set_period()
143 u64 armpmu_event_update(struct perf_event *event) armpmu_event_update() argument
145 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); armpmu_event_update()
146 struct hw_perf_event *hwc = &event->hw; armpmu_event_update()
151 new_raw_count = armpmu->read_counter(event); armpmu_event_update()
159 local64_add(delta, &event->count); armpmu_event_update()
166 armpmu_read(struct perf_event *event) armpmu_read() argument
168 armpmu_event_update(event); armpmu_read()
172 armpmu_stop(struct perf_event *event, int flags) armpmu_stop() argument
174 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); armpmu_stop()
175 struct hw_perf_event *hwc = &event->hw; armpmu_stop()
182 armpmu->disable(event); armpmu_stop()
183 armpmu_event_update(event); armpmu_stop()
188 static void armpmu_start(struct perf_event *event, int flags) armpmu_start() argument
190 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); armpmu_start()
191 struct hw_perf_event *hwc = &event->hw; armpmu_start()
208 armpmu_event_set_period(event); armpmu_start()
209 armpmu->enable(event); armpmu_start()
213 armpmu_del(struct perf_event *event, int flags) armpmu_del() argument
215 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); armpmu_del()
217 struct hw_perf_event *hwc = &event->hw; armpmu_del()
220 armpmu_stop(event, PERF_EF_UPDATE); armpmu_del()
224 armpmu->clear_event_idx(hw_events, event); armpmu_del()
226 perf_event_update_userpage(event); armpmu_del()
230 armpmu_add(struct perf_event *event, int flags) armpmu_add() argument
232 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); armpmu_add()
234 struct hw_perf_event *hwc = &event->hw; armpmu_add()
238 /* An event following a process won't be stopped earlier */ armpmu_add()
242 perf_pmu_disable(event->pmu); armpmu_add()
245 idx = armpmu->get_event_idx(hw_events, event); armpmu_add()
252 * If there is an event in the counter we are going to use then make armpmu_add()
255 event->hw.idx = idx; armpmu_add()
256 armpmu->disable(event); armpmu_add()
257 hw_events->events[idx] = event; armpmu_add()
261 armpmu_start(event, PERF_EF_RELOAD); armpmu_add()
264 perf_event_update_userpage(event); armpmu_add()
267 perf_pmu_enable(event->pmu); armpmu_add()
273 struct perf_event *event) validate_event()
277 if (is_software_event(event)) validate_event()
283 * until after pmu->event_init(event). validate_event()
285 if (event->pmu != pmu) validate_event()
288 if (event->state < PERF_EVENT_STATE_OFF) validate_event()
291 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) validate_event()
294 armpmu = to_arm_pmu(event->pmu); validate_event()
295 return armpmu->get_event_idx(hw_events, event) >= 0; validate_event()
299 validate_group(struct perf_event *event) validate_group() argument
301 struct perf_event *sibling, *leader = event->group_leader; validate_group()
310 if (!validate_event(event->pmu, &fake_pmu, leader)) validate_group()
314 if (!validate_event(event->pmu, &fake_pmu, sibling)) validate_group()
318 if (!validate_event(event->pmu, &fake_pmu, event)) validate_group()
372 hw_perf_event_destroy(struct perf_event *event) hw_perf_event_destroy() argument
374 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); hw_perf_event_destroy()
392 __hw_perf_event_init(struct perf_event *event) __hw_perf_event_init() argument
394 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); __hw_perf_event_init()
395 struct hw_perf_event *hwc = &event->hw; __hw_perf_event_init()
398 mapping = armpmu->map_event(event); __hw_perf_event_init()
401 pr_debug("event %x:%llx not supported\n", event->attr.type, __hw_perf_event_init()
402 event->attr.config); __hw_perf_event_init()
407 * We don't assign an index until we actually place the event onto __hw_perf_event_init()
421 armpmu->set_event_filter(hwc, &event->attr)) && __hw_perf_event_init()
422 event_requires_mode_exclusion(&event->attr)) { __hw_perf_event_init()
429 * Store the event encoding into the config_base field. __hw_perf_event_init()
433 if (!is_sampling_event(event)) { __hw_perf_event_init()
445 if (event->group_leader != event) { __hw_perf_event_init()
446 if (validate_group(event) != 0) __hw_perf_event_init()
453 static int armpmu_event_init(struct perf_event *event) armpmu_event_init() argument
455 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); armpmu_event_init()
462 * event->cpu == -1) can be migrated between CPUs, and thus we have to armpmu_event_init()
466 if (event->cpu != -1 && armpmu_event_init()
467 !cpumask_test_cpu(event->cpu, &armpmu->supported_cpus)) armpmu_event_init()
471 if (has_branch_stack(event)) armpmu_event_init()
474 if (armpmu->map_event(event) == -ENOENT) armpmu_event_init()
477 event->destroy = hw_perf_event_destroy; armpmu_event_init()
492 err = __hw_perf_event_init(event); armpmu_event_init()
494 hw_perf_event_destroy(event); armpmu_event_init()
529 static int armpmu_filter_match(struct perf_event *event) armpmu_filter_match() argument
531 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); armpmu_filter_match()
272 validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events, struct perf_event *event) validate_event() argument
/linux-4.4.14/drivers/net/wireless/mwifiex/
H A Duap_event.c2 * Marvell Wireless LAN device driver: AP event handling
27 struct sk_buff *event) mwifiex_check_uap_capabilties()
37 skb_pull(event, MWIFIEX_BSS_START_EVT_FIX_SIZE); mwifiex_check_uap_capabilties()
38 evt_len = event->len; mwifiex_check_uap_capabilties()
39 curr = event->data; mwifiex_check_uap_capabilties()
42 event->data, event->len); mwifiex_check_uap_capabilties()
44 skb_push(event, MWIFIEX_BSS_START_EVT_FIX_SIZE); mwifiex_check_uap_capabilties()
95 * upon the generated event cause.
112 struct mwifiex_assoc_event *event; mwifiex_process_uap_event() local
121 event = (struct mwifiex_assoc_event *) mwifiex_process_uap_event()
123 if (le16_to_cpu(event->type) == TLV_TYPE_UAP_MGMT_FRAME) { mwifiex_process_uap_event()
126 if (ieee80211_is_assoc_req(event->frame_control)) mwifiex_process_uap_event()
128 else if (ieee80211_is_reassoc_req(event->frame_control)) mwifiex_process_uap_event()
135 sinfo.assoc_req_ies = &event->data[len]; mwifiex_process_uap_event()
137 (u8 *)&event->frame_control; mwifiex_process_uap_event()
139 le16_to_cpu(event->len) - (u16)len; mwifiex_process_uap_event()
142 cfg80211_new_sta(priv->netdev, event->sta_addr, &sinfo, mwifiex_process_uap_event()
145 node = mwifiex_add_sta_entry(priv, event->sta_addr); mwifiex_process_uap_event()
191 "AP EVENT: event id: %#x\n", eventcause); mwifiex_process_uap_event()
202 "AP EVENT: event id: %#x\n", eventcause); mwifiex_process_uap_event()
207 "event: AMSDU_AGGR_CTRL %d\n", ctrl); mwifiex_process_uap_event()
213 "event: tx_buf_size %d\n", mwifiex_process_uap_event()
218 mwifiex_dbg(adapter, EVENT, "event: ADDBA Request\n"); mwifiex_process_uap_event()
225 mwifiex_dbg(adapter, EVENT, "event: DELBA Request\n"); mwifiex_process_uap_event()
230 mwifiex_dbg(adapter, EVENT, "event: BA Stream timeout\n"); mwifiex_process_uap_event()
237 mwifiex_dbg(adapter, EVENT, "event: EXT_SCAN Report\n"); mwifiex_process_uap_event()
243 mwifiex_dbg(adapter, EVENT, "event: TX_STATUS Report\n"); mwifiex_process_uap_event()
260 "event: PPS/UAPSD mode activated\n"); mwifiex_process_uap_event()
288 mwifiex_dbg(adapter, EVENT, "event: Channel Report\n"); mwifiex_process_uap_event()
292 mwifiex_dbg(adapter, EVENT, "event: Radar detected\n"); mwifiex_process_uap_event()
301 mwifiex_dbg(adapter, EVENT, "event: TX DATA PAUSE\n"); mwifiex_process_uap_event()
306 mwifiex_dbg(adapter, EVENT, "event: multi-chan info\n"); mwifiex_process_uap_event()
312 "event: unknown event id: %#x\n", eventcause); mwifiex_process_uap_event()
26 mwifiex_check_uap_capabilties(struct mwifiex_private *priv, struct sk_buff *event) mwifiex_check_uap_capabilties() argument
H A Dsta_event.c2 * Marvell Wireless LAN device driver: station event handling
31 * The function is invoked after receiving a disconnect event from firmware,
41 * - Sends a disconnect event to upper layers/applications.
52 "info: handles disconnect event\n"); mwifiex_reset_connect_state()
159 /* reserved 2 bytes are not mandatory in tdls event */ mwifiex_parse_tdls_event()
162 mwifiex_dbg(adapter, ERROR, "Invalid event length!\n"); mwifiex_parse_tdls_event()
329 "unknown TLV in chan_info event\n"); mwifiex_process_multi_chan_event()
365 "in multi channel event\n"); mwifiex_process_multi_chan_event()
400 "tx_pause event while disconnected; bss_role=%d\n", mwifiex_process_tx_pause_event()
496 * upon the generated event cause.
554 mwifiex_dbg(adapter, EVENT, "event: LINK_SENSED\n"); mwifiex_process_sta_event()
561 mwifiex_dbg(adapter, EVENT, "event: Deauthenticated\n"); mwifiex_process_sta_event()
564 "info: receive deauth event in wps session\n"); mwifiex_process_sta_event()
576 mwifiex_dbg(adapter, EVENT, "event: Disassociated\n"); mwifiex_process_sta_event()
579 "info: receive disassoc event in wps session\n"); mwifiex_process_sta_event()
591 mwifiex_dbg(adapter, EVENT, "event: Link lost\n"); mwifiex_process_sta_event()
614 "event: PPS/UAPSD mode activated\n"); mwifiex_process_sta_event()
646 mwifiex_dbg(adapter, EVENT, "event: DS_AWAKE\n"); mwifiex_process_sta_event()
652 mwifiex_dbg(adapter, EVENT, "event: HS_ACT_REQ\n"); mwifiex_process_sta_event()
658 mwifiex_dbg(adapter, EVENT, "event: UNICAST MIC ERROR\n"); mwifiex_process_sta_event()
665 mwifiex_dbg(adapter, EVENT, "event: MULTICAST MIC ERROR\n"); mwifiex_process_sta_event()
675 mwifiex_dbg(adapter, EVENT, "event: ADHOC_BCN_LOST\n"); mwifiex_process_sta_event()
684 mwifiex_dbg(adapter, EVENT, "event: BGS_REPORT\n"); mwifiex_process_sta_event()
690 mwifiex_dbg(adapter, EVENT, "event: PORT RELEASE\n"); mwifiex_process_sta_event()
695 mwifiex_dbg(adapter, EVENT, "event: EXT_SCAN Report\n"); mwifiex_process_sta_event()
703 mwifiex_dbg(adapter, EVENT, "event: WMM status changed\n"); mwifiex_process_sta_event()
715 mwifiex_dbg(adapter, EVENT, "event: Beacon RSSI_LOW\n"); mwifiex_process_sta_event()
718 mwifiex_dbg(adapter, EVENT, "event: Beacon SNR_LOW\n"); mwifiex_process_sta_event()
721 mwifiex_dbg(adapter, EVENT, "event: MAX_FAIL\n"); mwifiex_process_sta_event()
730 mwifiex_dbg(adapter, EVENT, "event: Beacon RSSI_HIGH\n"); mwifiex_process_sta_event()
733 mwifiex_dbg(adapter, EVENT, "event: Beacon SNR_HIGH\n"); mwifiex_process_sta_event()
736 mwifiex_dbg(adapter, EVENT, "event: Data RSSI_LOW\n"); mwifiex_process_sta_event()
739 mwifiex_dbg(adapter, EVENT, "event: Data SNR_LOW\n"); mwifiex_process_sta_event()
742 mwifiex_dbg(adapter, EVENT, "event: Data RSSI_HIGH\n"); mwifiex_process_sta_event()
745 mwifiex_dbg(adapter, EVENT, "event: Data SNR_HIGH\n"); mwifiex_process_sta_event()
748 mwifiex_dbg(adapter, EVENT, "event: Link Quality\n"); mwifiex_process_sta_event()
751 mwifiex_dbg(adapter, EVENT, "event: Pre-Beacon Lost\n"); mwifiex_process_sta_event()
754 mwifiex_dbg(adapter, EVENT, "event: IBSS_COALESCED\n"); mwifiex_process_sta_event()
760 mwifiex_dbg(adapter, EVENT, "event: ADDBA Request\n"); mwifiex_process_sta_event()
766 mwifiex_dbg(adapter, EVENT, "event: DELBA Request\n"); mwifiex_process_sta_event()
770 mwifiex_dbg(adapter, EVENT, "event: BA Stream timeout\n"); mwifiex_process_sta_event()
779 "event: AMSDU_AGGR_CTRL %d\n", ctrl); mwifiex_process_sta_event()
783 mwifiex_dbg(adapter, EVENT, "event: tx_buf_size %d\n", mwifiex_process_sta_event()
788 mwifiex_dbg(adapter, EVENT, "event: WEP ICV error\n"); mwifiex_process_sta_event()
792 mwifiex_dbg(adapter, EVENT, "event: BW Change\n"); mwifiex_process_sta_event()
797 "event: HOSTWAKE_STAIE %d\n", eventcause); mwifiex_process_sta_event()
802 "event: Remain on channel expired\n"); mwifiex_process_sta_event()
813 mwifiex_dbg(adapter, EVENT, "event: Channel Switch Announcement\n"); mwifiex_process_sta_event()
828 mwifiex_dbg(adapter, EVENT, "event: TX DATA PAUSE\n"); mwifiex_process_sta_event()
833 mwifiex_dbg(adapter, EVENT, "event: multi-chan info\n"); mwifiex_process_sta_event()
838 mwifiex_dbg(adapter, EVENT, "event: TX_STATUS Report\n"); mwifiex_process_sta_event()
843 mwifiex_dbg(adapter, EVENT, "event: Channel Report\n"); mwifiex_process_sta_event()
848 mwifiex_dbg(adapter, EVENT, "event: Radar detected\n"); mwifiex_process_sta_event()
858 mwifiex_dbg(adapter, ERROR, "event: unknown event id: %#x\n", mwifiex_process_sta_event()
/linux-4.4.14/arch/arc/kernel/
H A Dperf_event.c33 * an event. A 0 means that the counter can be used.
84 /* read counter #idx; note that counter# != event# on ARC! */ arc_pmu_read_counter()
103 static void arc_perf_event_update(struct perf_event *event, arc_perf_event_update() argument
115 local64_add(delta, &event->count); arc_perf_event_update()
119 static void arc_pmu_read(struct perf_event *event) arc_pmu_read() argument
121 arc_perf_event_update(event, &event->hw, event->hw.idx); arc_pmu_read()
144 pr_debug("init cache event: type/op/result %d/%d/%d with h/w %d \'%s\'\n", arc_pmu_cache_event()
151 /* initializes hw_perf_event structure if event is supported */ arc_pmu_event_init()
152 static int arc_pmu_event_init(struct perf_event *event) arc_pmu_event_init() argument
154 struct hw_perf_event *hwc = &event->hw; arc_pmu_event_init()
157 if (!is_sampling_event(event)) { arc_pmu_event_init()
167 if (event->attr.exclude_user) arc_pmu_event_init()
171 if (event->attr.exclude_kernel) arc_pmu_event_init()
175 switch (event->attr.type) { arc_pmu_event_init()
177 if (event->attr.config >= PERF_COUNT_HW_MAX) arc_pmu_event_init()
179 if (arc_pmu->ev_hw_idx[event->attr.config] < 0) arc_pmu_event_init()
181 hwc->config |= arc_pmu->ev_hw_idx[event->attr.config]; arc_pmu_event_init()
182 pr_debug("init event %d with h/w %d \'%s\'\n", arc_pmu_event_init()
183 (int) event->attr.config, (int) hwc->config, arc_pmu_event_init()
184 arc_pmu_ev_hw_map[event->attr.config]); arc_pmu_event_init()
188 ret = arc_pmu_cache_event(event->attr.config); arc_pmu_event_init()
214 static int arc_pmu_event_set_period(struct perf_event *event) arc_pmu_event_set_period() argument
216 struct hw_perf_event *hwc = &event->hw; arc_pmu_event_set_period()
250 perf_event_update_userpage(event); arc_pmu_event_set_period()
260 static void arc_pmu_start(struct perf_event *event, int flags) arc_pmu_start() argument
262 struct hw_perf_event *hwc = &event->hw; arc_pmu_start()
273 arc_pmu_event_set_period(event); arc_pmu_start()
276 if (is_sampling_event(event)) arc_pmu_start()
285 static void arc_pmu_stop(struct perf_event *event, int flags) arc_pmu_stop() argument
287 struct hw_perf_event *hwc = &event->hw; arc_pmu_stop()
291 if (is_sampling_event(event)) { arc_pmu_stop()
301 if (!(event->hw.state & PERF_HES_STOPPED)) { arc_pmu_stop()
308 event->hw.state |= PERF_HES_STOPPED; arc_pmu_stop()
312 !(event->hw.state & PERF_HES_UPTODATE)) { arc_pmu_stop()
313 arc_perf_event_update(event, &event->hw, idx); arc_pmu_stop()
314 event->hw.state |= PERF_HES_UPTODATE; arc_pmu_stop()
318 static void arc_pmu_del(struct perf_event *event, int flags) arc_pmu_del() argument
322 arc_pmu_stop(event, PERF_EF_UPDATE); arc_pmu_del()
323 __clear_bit(event->hw.idx, pmu_cpu->used_mask); arc_pmu_del()
325 pmu_cpu->act_counter[event->hw.idx] = 0; arc_pmu_del()
327 perf_event_update_userpage(event); arc_pmu_del()
331 static int arc_pmu_add(struct perf_event *event, int flags) arc_pmu_add() argument
334 struct hw_perf_event *hwc = &event->hw; arc_pmu_add()
349 pmu_cpu->act_counter[idx] = event; arc_pmu_add()
351 if (is_sampling_event(event)) { arc_pmu_add()
365 arc_pmu_start(event, PERF_EF_RELOAD); arc_pmu_add()
367 perf_event_update_userpage(event); arc_pmu_add()
388 struct perf_event *event = pmu_cpu->act_counter[idx]; arc_pmu_intr() local
405 hwc = &event->hw; arc_pmu_intr()
409 arc_perf_event_update(event, &event->hw, event->hw.idx); arc_pmu_intr()
411 if (!arc_pmu_event_set_period(event)) arc_pmu_intr()
414 if (perf_event_overflow(event, &data, regs)) arc_pmu_intr()
415 arc_pmu_stop(event, 0); arc_pmu_intr()
497 pr_debug("mapping perf event %2d to h/w event \'%8s\' (idx %d)\n", arc_pmu_device_probe()
/linux-4.4.14/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/
H A DEventClass.py7 # PerfEvent is the base class for all perf event sample, PebsEvent
8 # is a HW base Intel x86 PEBS event, and user could add more SW/HW
9 # event classes based on requirements.
15 EVTYPE_PEBS = 1 # Basic PEBS event
16 EVTYPE_PEBS_LL = 2 # PEBS event with load latency info
20 # Currently we don't have good way to tell the event type, but by
21 # the size of raw buffer, raw PEBS event with load latency data's
22 # size is 176 bytes, while the pure PEBS event's size is 144 bytes.
26 event = PebsEvent(name, comm, dso, symbol, raw_buf)
28 event = PebsNHM(name, comm, dso, symbol, raw_buf)
30 event = PerfEvent(name, comm, dso, symbol, raw_buf)
32 return event
46 print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso)
49 # Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer
50 # contains the context info when that event happened: the EFLAGS and
/linux-4.4.14/drivers/net/wireless/rsi/
H A Drsi_common.h35 static inline int rsi_wait_event(struct rsi_event *event, u32 timeout) rsi_wait_event() argument
40 status = wait_event_interruptible(event->event_queue, rsi_wait_event()
41 (atomic_read(&event->event_condition) == 0)); rsi_wait_event()
43 status = wait_event_interruptible_timeout(event->event_queue, rsi_wait_event()
44 (atomic_read(&event->event_condition) == 0), rsi_wait_event()
49 static inline void rsi_set_event(struct rsi_event *event) rsi_set_event() argument
51 atomic_set(&event->event_condition, 0); rsi_set_event()
52 wake_up_interruptible(&event->event_queue); rsi_set_event()
55 static inline void rsi_reset_event(struct rsi_event *event) rsi_reset_event() argument
57 atomic_set(&event->event_condition, 1); rsi_reset_event()
76 rsi_set_event(&handle->event); rsi_kill_thread()
/linux-4.4.14/include/linux/iio/
H A Devents.h1 /* The industrial I/O - event passing to userspace
16 * IIO_EVENT_CODE() - create event identifier
18 * @diff: Whether the event is for an differential channel or not.
20 * @direction: Direction of the event. One of enum iio_event_direction.
21 * @type: Type of the event. Should be one of enum iio_event_type.
36 * IIO_MOD_EVENT_CODE() - create event identifier for modified channels
40 * @type: Type of the event. Should be one of enum iio_event_type.
41 * @direction: Direction of the event. One of enum iio_event_direction.
49 * IIO_UNMOD_EVENT_CODE() - create event identifier for unmodified channels
52 * @type: Type of the event. Should be one of enum iio_event_type.
53 * @direction: Direction of the event. One of enum iio_event_direction.
/linux-4.4.14/arch/tile/kernel/
H A Dperf_event.c17 * This code is based upon the x86 perf event
125 /* TILEPro hardware cache event map */
345 * Check whether perf event is enabled.
407 * Enable performance event by setting
410 static inline void tile_pmu_enable_event(struct perf_event *event) tile_pmu_enable_event() argument
412 struct hw_perf_event *hwc = &event->hw; tile_pmu_enable_event()
445 /* Clear mask bits to enable the event. */ tile_pmu_enable_event()
456 * Disable performance event by clearing
459 static inline void tile_pmu_disable_event(struct perf_event *event) tile_pmu_disable_event() argument
461 struct hw_perf_event *hwc = &event->hw; tile_pmu_disable_event()
488 /* Set mask bits to disable the event. */ tile_pmu_disable_event()
498 * Propagate event elapsed time into the generic event.
499 * Can only be executed on the CPU where the event is active.
502 static u64 tile_perf_event_update(struct perf_event *event) tile_perf_event_update() argument
504 struct hw_perf_event *hwc = &event->hw; tile_perf_event_update()
512 * Careful: an NMI might modify the previous event value. tile_perf_event_update()
516 * count to the generic event atomically: tile_perf_event_update()
530 * (event-)time and add that to the generic event. tile_perf_event_update()
538 local64_add(delta, &event->count); tile_perf_event_update()
546 * To be called with the event disabled in hw:
548 static int tile_event_set_period(struct perf_event *event) tile_event_set_period() argument
550 struct hw_perf_event *hwc = &event->hw; tile_event_set_period()
576 * The hw event starts counting from this event offset, tile_event_set_period()
583 perf_event_update_userpage(event); tile_event_set_period()
589 * Stop the event but do not release the PMU counter
591 static void tile_pmu_stop(struct perf_event *event, int flags) tile_pmu_stop() argument
594 struct hw_perf_event *hwc = &event->hw; tile_pmu_stop()
598 tile_pmu_disable_event(event); tile_pmu_stop()
606 * Drain the remaining delta count out of a event tile_pmu_stop()
609 tile_perf_event_update(event); tile_pmu_stop()
615 * Start an event (without re-assigning counter)
617 static void tile_pmu_start(struct perf_event *event, int flags) tile_pmu_start() argument
620 int idx = event->hw.idx; tile_pmu_start()
622 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) tile_pmu_start()
629 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); tile_pmu_start()
630 tile_event_set_period(event); tile_pmu_start()
633 event->hw.state = 0; tile_pmu_start()
635 cpuc->events[idx] = event; tile_pmu_start()
640 tile_pmu_enable_event(event); tile_pmu_start()
642 perf_event_update_userpage(event); tile_pmu_start()
646 * Add a single event to the PMU.
648 * The event is added to the group of enabled events
651 static int tile_pmu_add(struct perf_event *event, int flags) tile_pmu_add() argument
658 hwc = &event->hw; tile_pmu_add()
666 cpuc->event_list[cpuc->n_events] = event; tile_pmu_add()
687 * Assign counter to event. tile_pmu_add()
689 event->hw.idx = b; tile_pmu_add()
696 tile_pmu_start(event, PERF_EF_RELOAD); tile_pmu_add()
702 * Delete a single event from the PMU.
704 * The event is deleted from the group of enabled events.
705 * If it is the last event, disable PMU interrupt.
707 static void tile_pmu_del(struct perf_event *event, int flags) tile_pmu_del() argument
713 * Remove event from list, compact list if necessary. tile_pmu_del()
716 if (cpuc->event_list[i] == event) { tile_pmu_del()
720 cpuc->events[event->hw.idx] = NULL; tile_pmu_del()
721 __clear_bit(event->hw.idx, &cpuc->used_mask); tile_pmu_del()
722 tile_pmu_stop(event, PERF_EF_UPDATE); tile_pmu_del()
731 perf_event_update_userpage(event); tile_pmu_del()
735 * Propagate event elapsed time into the event.
737 static inline void tile_pmu_read(struct perf_event *event) tile_pmu_read() argument
739 tile_perf_event_update(event); tile_pmu_read()
782 static void tile_event_destroy(struct perf_event *event) tile_event_destroy() argument
788 static int __tile_event_init(struct perf_event *event) __tile_event_init() argument
790 struct perf_event_attr *attr = &event->attr; __tile_event_init()
791 struct hw_perf_event *hwc = &event->hw; __tile_event_init()
829 event->destroy = tile_event_destroy; __tile_event_init()
833 static int tile_event_init(struct perf_event *event) tile_event_init() argument
848 switch (event->attr.type) { tile_event_init()
858 err = __tile_event_init(event); tile_event_init()
860 if (event->destroy) tile_event_init()
861 event->destroy(event); tile_event_init()
884 struct perf_event *event; tile_pmu_handle_irq() local
900 event = cpuc->events[bit]; tile_pmu_handle_irq()
902 if (!event) tile_pmu_handle_irq()
908 hwc = &event->hw; tile_pmu_handle_irq()
910 val = tile_perf_event_update(event); tile_pmu_handle_irq()
914 perf_sample_data_init(&data, 0, event->hw.last_period); tile_pmu_handle_irq()
915 if (!tile_event_set_period(event)) tile_pmu_handle_irq()
918 if (perf_event_overflow(event, &data, regs)) tile_pmu_handle_irq()
919 tile_pmu_stop(event, 0); tile_pmu_handle_irq()
/linux-4.4.14/arch/alpha/oprofile/
H A Dop_model_ev5.c32 /* Select desired events. The event numbers are selected such common_reg_setup()
33 that they map directly into the event selection fields: common_reg_setup()
43 These event numbers are canonicalizes to their first appearance. */ common_reg_setup()
47 unsigned long event = ctr[i].event; common_reg_setup() local
53 if (event == 0) common_reg_setup()
54 event = 12+48; common_reg_setup()
55 else if (event == 2+41) common_reg_setup()
56 event = 4+65; common_reg_setup()
59 /* Convert the event numbers onto mux_select bit mask. */ common_reg_setup()
60 if (event < 2) common_reg_setup()
61 ctl |= event << 31; common_reg_setup()
62 else if (event < 24) common_reg_setup()
64 else if (event < 40) common_reg_setup()
65 ctl |= (event - 24) << 4; common_reg_setup()
66 else if (event < 48) common_reg_setup()
67 ctl |= (event - 40) << cbox1_ofs | 15 << 4; common_reg_setup()
68 else if (event < 64) common_reg_setup()
69 ctl |= event - 48; common_reg_setup()
70 else if (event < 72) common_reg_setup()
71 ctl |= (event - 64) << cbox2_ofs | 15; common_reg_setup()
/linux-4.4.14/include/uapi/linux/iio/
H A Devents.h1 /* The industrial I/O - event passing to userspace
16 * struct iio_event_data - The actual event being pushed to userspace
17 * @id: event identifier
18 * @timestamp: best estimate of time of event occurrence (often from
34 /* Event code number extraction depends on which type of event we have.
/linux-4.4.14/drivers/char/
H A Dsnsc_event.c12 * System controller event handler
60 * Break an event (as read from SAL) into useful pieces so we can decide
64 scdrv_parse_event(char *event, int *src, int *code, int *esp_code, char *desc) scdrv_parse_event() argument
68 /* record event source address */ scdrv_parse_event()
69 *src = get_unaligned_be32(event); scdrv_parse_event()
70 event += 4; /* move on to event code */ scdrv_parse_event()
72 /* record the system controller's event code */ scdrv_parse_event()
73 *code = get_unaligned_be32(event); scdrv_parse_event()
74 event += 4; /* move on to event arguments */ scdrv_parse_event()
77 if (*event++ != 2) { scdrv_parse_event()
83 if (*event++ != IR_ARG_INT) { scdrv_parse_event()
87 *esp_code = get_unaligned_be32(event); scdrv_parse_event()
88 event += 4; scdrv_parse_event()
90 /* parse out the event description */ scdrv_parse_event()
91 if (*event++ != IR_ARG_ASCII) { scdrv_parse_event()
95 event[CHUNKSIZE-1] = '\0'; /* ensure this string ends! */ scdrv_parse_event()
96 event += 2; /* skip leading CR/LF */ scdrv_parse_event()
97 desc_end = desc + sprintf(desc, "%s", event); scdrv_parse_event()
180 * Do the right thing with an incoming event. That's often nothing
185 scdrv_dispatch_event(char *event, int len) scdrv_dispatch_event() argument
192 if (scdrv_parse_event(event, &src, &code, &esp_code, desc) < 0) { scdrv_dispatch_event()
193 /* ignore uninterpretible event */ scdrv_dispatch_event()
208 /* give a message for each type of event */ scdrv_dispatch_event()
229 * Called as a tasklet when an event arrives from the L1. Read the event
263 * Sets up a system controller subchannel to begin receiving event
275 " for event monitoring\n", __func__); scdrv_event_init()
288 printk(KERN_WARNING "%s: couldn't open event subchannel\n", scdrv_event_init()
293 /* hook event subchannel up to the system controller interrupt */ scdrv_event_init()
/linux-4.4.14/net/irda/ircomm/
H A Dircomm_event.c41 static int ircomm_state_idle(struct ircomm_cb *self, IRCOMM_EVENT event,
43 static int ircomm_state_waiti(struct ircomm_cb *self, IRCOMM_EVENT event,
45 static int ircomm_state_waitr(struct ircomm_cb *self, IRCOMM_EVENT event,
47 static int ircomm_state_conn(struct ircomm_cb *self, IRCOMM_EVENT event,
76 static int (*state[])(struct ircomm_cb *self, IRCOMM_EVENT event,
86 * Function ircomm_state_idle (self, event, skb)
91 static int ircomm_state_idle(struct ircomm_cb *self, IRCOMM_EVENT event, ircomm_state_idle() argument
96 switch (event) { ircomm_state_idle()
107 pr_debug("%s(), unknown event: %s\n", __func__ , ircomm_state_idle()
108 ircomm_event[event]); ircomm_state_idle()
115 * Function ircomm_state_waiti (self, event, skb)
120 static int ircomm_state_waiti(struct ircomm_cb *self, IRCOMM_EVENT event, ircomm_state_waiti() argument
125 switch (event) { ircomm_state_waiti()
137 pr_debug("%s(), unknown event: %s\n", __func__ , ircomm_state_waiti()
138 ircomm_event[event]); ircomm_state_waiti()
145 * Function ircomm_state_waitr (self, event, skb)
150 static int ircomm_state_waitr(struct ircomm_cb *self, IRCOMM_EVENT event, ircomm_state_waitr() argument
155 switch (event) { ircomm_state_waitr()
170 pr_debug("%s(), unknown event = %s\n", __func__ , ircomm_state_waitr()
171 ircomm_event[event]); ircomm_state_waitr()
178 * Function ircomm_state_conn (self, event, skb)
183 static int ircomm_state_conn(struct ircomm_cb *self, IRCOMM_EVENT event, ircomm_state_conn() argument
188 switch (event) { ircomm_state_conn()
212 pr_debug("%s(), unknown event = %s\n", __func__ , ircomm_state_conn()
213 ircomm_event[event]); ircomm_state_conn()
220 * Function ircomm_do_event (self, event, skb)
222 * Process event
225 int ircomm_do_event(struct ircomm_cb *self, IRCOMM_EVENT event, ircomm_do_event() argument
228 pr_debug("%s: state=%s, event=%s\n", __func__ , ircomm_do_event()
229 ircomm_state[self->state], ircomm_event[event]); ircomm_do_event()
231 return (*state[self->state])(self, event, skb, info); ircomm_do_event()
H A Dircomm_tty_attach.c58 IRCOMM_TTY_EVENT event,
62 IRCOMM_TTY_EVENT event,
66 IRCOMM_TTY_EVENT event,
70 IRCOMM_TTY_EVENT event,
74 IRCOMM_TTY_EVENT event,
78 IRCOMM_TTY_EVENT event,
109 static int (*state[])(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event,
612 * Function ircomm_tty_do_event (self, event, skb)
614 * Process event
617 int ircomm_tty_do_event(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event, ircomm_tty_do_event() argument
623 pr_debug("%s: state=%s, event=%s\n", __func__ , ircomm_tty_do_event()
624 ircomm_tty_state[self->state], ircomm_tty_event[event]); ircomm_tty_do_event()
626 return (*state[self->state])(self, event, skb, info); ircomm_tty_do_event()
648 * Function ircomm_tty_state_idle (self, event, skb, info)
654 IRCOMM_TTY_EVENT event, ircomm_tty_state_idle()
660 pr_debug("%s: state=%s, event=%s\n", __func__ , ircomm_tty_state_idle()
661 ircomm_tty_state[self->state], ircomm_tty_event[event]); ircomm_tty_state_idle()
662 switch (event) { ircomm_tty_state_idle()
704 pr_debug("%s(), unknown event: %s\n", __func__ , ircomm_tty_state_idle()
705 ircomm_tty_event[event]); ircomm_tty_state_idle()
712 * Function ircomm_tty_state_search (self, event, skb, info)
718 IRCOMM_TTY_EVENT event, ircomm_tty_state_search()
724 pr_debug("%s: state=%s, event=%s\n", __func__ , ircomm_tty_state_search()
725 ircomm_tty_state[self->state], ircomm_tty_event[event]); ircomm_tty_state_search()
727 switch (event) { ircomm_tty_state_search()
777 pr_debug("%s(), unknown event: %s\n", __func__ , ircomm_tty_state_search()
778 ircomm_tty_event[event]); ircomm_tty_state_search()
785 * Function ircomm_tty_state_query (self, event, skb, info)
791 IRCOMM_TTY_EVENT event, ircomm_tty_state_query_parameters()
797 pr_debug("%s: state=%s, event=%s\n", __func__ , ircomm_tty_state_query_parameters()
798 ircomm_tty_state[self->state], ircomm_tty_event[event]); ircomm_tty_state_query_parameters()
800 switch (event) { ircomm_tty_state_query_parameters()
835 pr_debug("%s(), unknown event: %s\n", __func__ , ircomm_tty_state_query_parameters()
836 ircomm_tty_event[event]); ircomm_tty_state_query_parameters()
843 * Function ircomm_tty_state_query_lsap_sel (self, event, skb, info)
849 IRCOMM_TTY_EVENT event, ircomm_tty_state_query_lsap_sel()
855 pr_debug("%s: state=%s, event=%s\n", __func__ , ircomm_tty_state_query_lsap_sel()
856 ircomm_tty_state[self->state], ircomm_tty_event[event]); ircomm_tty_state_query_lsap_sel()
858 switch (event) { ircomm_tty_state_query_lsap_sel()
884 pr_debug("%s(), unknown event: %s\n", __func__ , ircomm_tty_state_query_lsap_sel()
885 ircomm_tty_event[event]); ircomm_tty_state_query_lsap_sel()
892 * Function ircomm_tty_state_setup (self, event, skb, info)
898 IRCOMM_TTY_EVENT event, ircomm_tty_state_setup()
904 pr_debug("%s: state=%s, event=%s\n", __func__ , ircomm_tty_state_setup()
905 ircomm_tty_state[self->state], ircomm_tty_event[event]); ircomm_tty_state_setup()
907 switch (event) { ircomm_tty_state_setup()
938 pr_debug("%s(), unknown event: %s\n", __func__ , ircomm_tty_state_setup()
939 ircomm_tty_event[event]); ircomm_tty_state_setup()
946 * Function ircomm_tty_state_ready (self, event, skb, info)
952 IRCOMM_TTY_EVENT event, ircomm_tty_state_ready()
958 switch (event) { ircomm_tty_state_ready()
981 pr_debug("%s(), unknown event: %s\n", __func__ , ircomm_tty_state_ready()
982 ircomm_tty_event[event]); ircomm_tty_state_ready()
653 ircomm_tty_state_idle(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event, struct sk_buff *skb, struct ircomm_tty_info *info) ircomm_tty_state_idle() argument
717 ircomm_tty_state_search(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event, struct sk_buff *skb, struct ircomm_tty_info *info) ircomm_tty_state_search() argument
790 ircomm_tty_state_query_parameters(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event, struct sk_buff *skb, struct ircomm_tty_info *info) ircomm_tty_state_query_parameters() argument
848 ircomm_tty_state_query_lsap_sel(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event, struct sk_buff *skb, struct ircomm_tty_info *info) ircomm_tty_state_query_lsap_sel() argument
897 ircomm_tty_state_setup(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event, struct sk_buff *skb, struct ircomm_tty_info *info) ircomm_tty_state_setup() argument
951 ircomm_tty_state_ready(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event, struct sk_buff *skb, struct ircomm_tty_info *info) ircomm_tty_state_ready() argument
/linux-4.4.14/tools/perf/arch/x86/tests/
H A Dintel-cqm.c26 * Create an event group that contains both a sampled hardware
27 * (cpu-cycles) and software (intel_cqm/llc_occupancy/) event. We then
29 * which triggers an event read for both of the events in the group.
31 * Since reading Intel CQM event counters requires sending SMP IPIs, the
43 void *event; test__intel_cqm_count_nmi_context() local
82 pr_debug("failed to open event\n"); test__intel_cqm_count_nmi_context()
94 pr_debug("failed to open event\n"); test__intel_cqm_count_nmi_context()
104 event = mmap(NULL, mmap_len, PROT_READ, MAP_SHARED, fd[0], 0); test__intel_cqm_count_nmi_context()
105 if (event == (void *)(-1)) { test__intel_cqm_count_nmi_context()
114 munmap(event, mmap_len); test__intel_cqm_count_nmi_context()
H A Dperf-time-to-tsc.c58 union perf_event *event; test__perf_time_to_tsc() local
110 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { test__perf_time_to_tsc()
113 if (event->header.type != PERF_RECORD_COMM || test__perf_time_to_tsc()
114 (pid_t)event->comm.pid != getpid() || test__perf_time_to_tsc()
115 (pid_t)event->comm.tid != getpid()) test__perf_time_to_tsc()
118 if (strcmp(event->comm.comm, comm1) == 0) { test__perf_time_to_tsc()
119 CHECK__(perf_evsel__parse_sample(evsel, event, test__perf_time_to_tsc()
123 if (strcmp(event->comm.comm, comm2) == 0) { test__perf_time_to_tsc()
124 CHECK__(perf_evsel__parse_sample(evsel, event, test__perf_time_to_tsc()
140 pr_debug("1st event perf time %"PRIu64" tsc %"PRIu64"\n", test__perf_time_to_tsc()
144 pr_debug("2nd event perf time %"PRIu64" tsc %"PRIu64"\n", test__perf_time_to_tsc()
/linux-4.4.14/tools/perf/util/scripting-engines/
H A Dtrace-event-perl.c2 * trace-event-perl. Feed perf script events to an embedded Perl interpreter.
35 #include "../event.h"
36 #include "../trace-event.h"
186 static void define_event_symbols(struct event_format *event, define_event_symbols() argument
203 define_event_symbols(event, ev_name, args->flags.field); define_event_symbols()
208 define_event_symbols(event, ev_name, args->symbol.field); define_event_symbols()
214 define_event_symbols(event, ev_name, args->hex.field); define_event_symbols()
215 define_event_symbols(event, ev_name, args->hex.size); define_event_symbols()
218 define_event_symbols(event, ev_name, args->int_array.field); define_event_symbols()
219 define_event_symbols(event, ev_name, args->int_array.count); define_event_symbols()
220 define_event_symbols(event, ev_name, args->int_array.el_size); define_event_symbols()
229 define_event_symbols(event, ev_name, args->typecast.item); define_event_symbols()
234 define_event_symbols(event, ev_name, args->op.left); define_event_symbols()
235 define_event_symbols(event, ev_name, args->op.right); define_event_symbols()
245 define_event_symbols(event, ev_name, args->next); define_event_symbols()
252 struct event_format *event = evsel->tp_format; perl_process_tracepoint() local
268 if (!event) perl_process_tracepoint()
269 die("ug! no event found for type %" PRIu64, (u64)evsel->attr.config); perl_process_tracepoint()
271 pid = raw_field_value(event, "common_pid", data); perl_process_tracepoint()
273 sprintf(handler, "%s::%s", event->system, event->name); perl_process_tracepoint()
275 if (!test_and_set_bit(event->id, events_defined)) perl_process_tracepoint()
276 define_event_symbols(event, handler, event->print_fmt.args); perl_process_tracepoint()
298 for (field = event->format.fields; field; field = field->next) { perl_process_tracepoint()
308 val = read_size(event, data + field->offset, perl_process_tracepoint()
337 static void perl_process_event_generic(union perf_event *event, perl_process_event_generic() argument
349 XPUSHs(sv_2mortal(newSVpvn((const char *)event, event->header.size))); perl_process_event_generic()
361 static void perl_process_event(union perf_event *event, perl_process_event() argument
367 perl_process_event_generic(event, sample, evsel); perl_process_event()
447 struct event_format *event = NULL; perl_generate_script() local
460 fprintf(ofp, "# perf script event handlers, " perl_generate_script()
466 fprintf(ofp, "# The common_* event handler fields are the most useful " perl_generate_script()
492 while ((event = trace_find_next_event(pevent, event))) { perl_generate_script()
493 fprintf(ofp, "sub %s::%s\n{\n", event->system, event->name); perl_generate_script()
507 for (f = event->format.fields; f; f = f->next) { perl_generate_script()
526 for (f = event->format.fields; f; f = f->next) { perl_generate_script()
550 for (f = event->format.fields; f; f = f->next) { perl_generate_script()
563 fprintf(ofp, "%s::%s\", ", event->system, perl_generate_script()
564 event->name); perl_generate_script()
573 fprintf(ofp, "%s::%s\", ", event->system, perl_generate_script()
574 event->name); perl_generate_script()
601 "# $event:\tunion perf_event\tutil/event.h\n" perl_generate_script()
603 "# $sample:\tstruct perf_sample\tutil/event.h\n" perl_generate_script()
604 "# $raw_data:\tperf_sample->raw_data\tutil/event.h\n" perl_generate_script()
608 "\tmy ($event, $attr, $sample, $raw_data) = @_;\n" perl_generate_script()
610 "\tmy @event\t= unpack(\"LSS\", $event);\n" perl_generate_script()
616 "\tprint Dumper \\@event, \\@attr, \\@sample, \\@raw_data;\n" perl_generate_script()
/linux-4.4.14/drivers/misc/vmw_vmci/
H A Dvmci_event.c31 u32 event; member in struct:vmci_subscription
98 subscriber_list = &subscriber_array[event_msg->event_data.event]; list_for_each_entry_rcu()
108 * subscribers for given event.
118 if (!VMCI_EVENT_VALID(event_msg->event_data.event)) vmci_event_dispatch()
126 * vmci_event_subscribe() - Subscribe to a given event.
127 * @event: The event to subscribe to.
128 * @callback: The callback to invoke upon the event.
133 * Subscribes to the provided event. The callback specified will be
136 int vmci_event_subscribe(u32 event, vmci_event_subscribe() argument
151 if (!VMCI_EVENT_VALID(event) || !callback) { vmci_event_subscribe()
152 pr_devel("%s: Failed to subscribe to event (type=%d) (callback=%p) (data=%p)\n", vmci_event_subscribe()
153 __func__, event, callback, callback_data); vmci_event_subscribe()
162 sub->event = event; vmci_event_subscribe()
169 /* Creation of a new event is always allowed. */ vmci_event_subscribe()
186 list_add_rcu(&sub->node, &subscriber_array[event]); vmci_event_subscribe()
200 * vmci_event_unsubscribe() - unsubscribe from an event.
203 * Unsubscribe from given event. Removes it from list and frees it.
/linux-4.4.14/drivers/net/fddi/skfp/
H A Dqueue.c32 * init event queue management
40 * add event to queue
42 void queue_event(struct s_smc *smc, int class, int event) queue_event() argument
44 PRINTF("queue class %d event %d\n",class,event) ; queue_event()
46 smc->q.ev_put->event = event ; queue_event()
60 PRINTF("timer event class %d token %d\n", timer_event()
67 * event dispatcher
68 * while event queue is not empty
69 * get event from queue
81 PRINTF("dispatch class %d event %d\n",ev->class,ev->event) ; ev_dispatcher()
84 ecm(smc,(int)ev->event) ; ev_dispatcher()
87 cfm(smc,(int)ev->event) ; ev_dispatcher()
90 rmt(smc,(int)ev->event) ; ev_dispatcher()
93 smt_event(smc,(int)ev->event) ; ev_dispatcher()
97 timer_test_event(smc,(int)ev->event) ; ev_dispatcher()
105 pcm(smc,class - EVENT_PCMA,(int)ev->event) ; ev_dispatcher()
/linux-4.4.14/kernel/trace/
H A Dtrace_output.c228 struct trace_event_call *event; trace_raw_output_prep() local
233 event = container_of(trace_event, struct trace_event_call, event); trace_raw_output_prep()
236 if (entry->type != event->event.type) { trace_raw_output_prep()
242 trace_seq_printf(s, "%s: ", trace_event_name(event)); trace_raw_output_prep()
595 * ftrace_find_event - find a registered event
596 * @type: the type of event to look for
598 * Returns an event of type @type otherwise NULL
603 struct trace_event *event; ftrace_find_event() local
608 hlist_for_each_entry(event, &event_hash[key], node) { ftrace_find_event()
609 if (event->type == type) ftrace_find_event()
610 return event; ftrace_find_event()
657 * register_trace_event - register output for an event type
658 * @event: the event type to register
661 * find a way to print an event. If the @event->type is set
669 * Returns the event type number or zero on error.
671 int register_trace_event(struct trace_event *event) register_trace_event() argument
678 if (WARN_ON(!event)) register_trace_event()
681 if (WARN_ON(!event->funcs)) register_trace_event()
684 INIT_LIST_HEAD(&event->list); register_trace_event()
686 if (!event->type) { register_trace_event()
691 event->type = trace_search_list(&list); register_trace_event()
692 if (!event->type) register_trace_event()
697 event->type = next_event_type++; register_trace_event()
701 if (WARN_ON(ftrace_find_event(event->type))) register_trace_event()
704 list_add_tail(&event->list, list); register_trace_event()
706 } else if (event->type > __TRACE_LAST_TYPE) { register_trace_event()
711 /* Is this event already used */ register_trace_event()
712 if (ftrace_find_event(event->type)) register_trace_event()
716 if (event->funcs->trace == NULL) register_trace_event()
717 event->funcs->trace = trace_nop_print; register_trace_event()
718 if (event->funcs->raw == NULL) register_trace_event()
719 event->funcs->raw = trace_nop_print; register_trace_event()
720 if (event->funcs->hex == NULL) register_trace_event()
721 event->funcs->hex = trace_nop_print; register_trace_event()
722 if (event->funcs->binary == NULL) register_trace_event()
723 event->funcs->binary = trace_nop_print; register_trace_event()
725 key = event->type & (EVENT_HASHSIZE - 1); register_trace_event()
727 hlist_add_head(&event->node, &event_hash[key]); register_trace_event()
729 ret = event->type; register_trace_event()
740 int __unregister_trace_event(struct trace_event *event) __unregister_trace_event() argument
742 hlist_del(&event->node); __unregister_trace_event()
743 list_del(&event->list); __unregister_trace_event()
748 * unregister_trace_event - remove a no longer used event
749 * @event: the event to remove
751 int unregister_trace_event(struct trace_event *event) unregister_trace_event() argument
754 __unregister_trace_event(event); unregister_trace_event()
766 struct trace_event *event) trace_nop_print()
775 struct trace_event *event) trace_fn_trace()
795 struct trace_event *event) trace_fn_raw()
809 struct trace_event *event) trace_fn_hex()
823 struct trace_event *event) trace_fn_bin()
876 struct trace_event *event) trace_ctx_print()
882 int flags, struct trace_event *event) trace_wake_print()
910 struct trace_event *event) trace_ctx_raw()
916 struct trace_event *event) trace_wake_raw()
946 struct trace_event *event) trace_ctx_hex()
952 struct trace_event *event) trace_wake_hex()
958 int flags, struct trace_event *event) trace_ctxwake_bin()
1003 int flags, struct trace_event *event) trace_stack_print()
1039 int flags, struct trace_event *event) trace_user_stack_print()
1100 struct trace_event *event) trace_bputs_print()
1118 struct trace_event *event) trace_bputs_raw()
1144 struct trace_event *event) trace_bprint_print()
1162 struct trace_event *event) trace_bprint_raw()
1187 int flags, struct trace_event *event) trace_print_print()
1201 struct trace_event *event) trace_print_raw()
1237 struct trace_event *event; init_events() local
1241 event = events[i]; init_events()
1243 ret = register_trace_event(event); init_events()
1245 printk(KERN_WARNING "event %d failed to register\n", init_events()
1246 event->type); init_events()
765 trace_nop_print(struct trace_iterator *iter, int flags, struct trace_event *event) trace_nop_print() argument
774 trace_fn_trace(struct trace_iterator *iter, int flags, struct trace_event *event) trace_fn_trace() argument
794 trace_fn_raw(struct trace_iterator *iter, int flags, struct trace_event *event) trace_fn_raw() argument
808 trace_fn_hex(struct trace_iterator *iter, int flags, struct trace_event *event) trace_fn_hex() argument
822 trace_fn_bin(struct trace_iterator *iter, int flags, struct trace_event *event) trace_fn_bin() argument
875 trace_ctx_print(struct trace_iterator *iter, int flags, struct trace_event *event) trace_ctx_print() argument
881 trace_wake_print(struct trace_iterator *iter, int flags, struct trace_event *event) trace_wake_print() argument
909 trace_ctx_raw(struct trace_iterator *iter, int flags, struct trace_event *event) trace_ctx_raw() argument
915 trace_wake_raw(struct trace_iterator *iter, int flags, struct trace_event *event) trace_wake_raw() argument
945 trace_ctx_hex(struct trace_iterator *iter, int flags, struct trace_event *event) trace_ctx_hex() argument
951 trace_wake_hex(struct trace_iterator *iter, int flags, struct trace_event *event) trace_wake_hex() argument
957 trace_ctxwake_bin(struct trace_iterator *iter, int flags, struct trace_event *event) trace_ctxwake_bin() argument
1002 trace_stack_print(struct trace_iterator *iter, int flags, struct trace_event *event) trace_stack_print() argument
1038 trace_user_stack_print(struct trace_iterator *iter, int flags, struct trace_event *event) trace_user_stack_print() argument
1099 trace_bputs_print(struct trace_iterator *iter, int flags, struct trace_event *event) trace_bputs_print() argument
1117 trace_bputs_raw(struct trace_iterator *iter, int flags, struct trace_event *event) trace_bputs_raw() argument
1143 trace_bprint_print(struct trace_iterator *iter, int flags, struct trace_event *event) trace_bprint_print() argument
1161 trace_bprint_raw(struct trace_iterator *iter, int flags, struct trace_event *event) trace_bprint_raw() argument
1186 trace_print_print(struct trace_iterator *iter, int flags, struct trace_event *event) trace_print_print() argument
1200 trace_print_raw(struct trace_iterator *iter, int flags, struct trace_event *event) trace_print_raw() argument
H A Dtrace_syscalls.c16 static int syscall_enter_register(struct trace_event_call *event,
18 static int syscall_exit_register(struct trace_event_call *event,
111 struct trace_event *event) print_syscall_enter()
127 if (entry->enter_event->event.type != ent->type) { print_syscall_enter()
158 struct trace_event *event) print_syscall_exit()
175 if (entry->exit_event->event.type != ent->type) { print_syscall_exit()
300 struct ring_buffer_event *event; ftrace_syscall_enter() local
329 event = trace_buffer_lock_reserve(buffer, ftrace_syscall_enter()
330 sys_data->enter_event->event.type, size, irq_flags, pc); ftrace_syscall_enter()
331 if (!event) ftrace_syscall_enter()
334 entry = ring_buffer_event_data(event); ftrace_syscall_enter()
338 event_trigger_unlock_commit(trace_file, buffer, event, entry, ftrace_syscall_enter()
348 struct ring_buffer_event *event; ftrace_syscall_exit() local
374 event = trace_buffer_lock_reserve(buffer, ftrace_syscall_exit()
375 sys_data->exit_event->event.type, sizeof(*entry), ftrace_syscall_exit()
377 if (!event) ftrace_syscall_exit()
380 entry = ring_buffer_event_data(event); ftrace_syscall_exit()
384 event_trigger_unlock_commit(trace_file, buffer, event, entry, ftrace_syscall_exit()
471 pr_debug("syscall %s metadata not mapped, disabling ftrace event\n", init_syscall_trace()
578 sys_data->enter_event->event.type, NULL, &rctx); perf_syscall_enter()
599 pr_info("event trace: Could not activate" perf_sysenter_enable()
651 sys_data->exit_event->event.type, NULL, &rctx); perf_syscall_exit()
671 pr_info("event trace: Could not activate" perf_sysexit_enable()
697 static int syscall_enter_register(struct trace_event_call *event, syscall_enter_register() argument
704 return reg_event_syscall_enter(file, event); syscall_enter_register()
706 unreg_event_syscall_enter(file, event); syscall_enter_register()
711 return perf_sysenter_enable(event); syscall_enter_register()
713 perf_sysenter_disable(event); syscall_enter_register()
725 static int syscall_exit_register(struct trace_event_call *event, syscall_exit_register() argument
732 return reg_event_syscall_exit(file, event); syscall_exit_register()
734 unreg_event_syscall_exit(file, event); syscall_exit_register()
739 return perf_sysexit_enable(event); syscall_exit_register()
741 perf_sysexit_disable(event); syscall_exit_register()
110 print_syscall_enter(struct trace_iterator *iter, int flags, struct trace_event *event) print_syscall_enter() argument
157 print_syscall_exit(struct trace_iterator *iter, int flags, struct trace_event *event) print_syscall_exit() argument
H A Dtrace_output.h26 int flags, struct trace_event *event);
31 extern int __unregister_trace_event(struct trace_event *event);
/linux-4.4.14/arch/powerpc/oprofile/
H A Dop_model_7450.c39 #define mmcr0_event1(event) \
40 ((event << MMCR0_PMC1_SHIFT) & MMCR0_PMC1SEL)
41 #define mmcr0_event2(event) \
42 ((event << MMCR0_PMC2_SHIFT) & MMCR0_PMC2SEL)
44 #define mmcr1_event3(event) \
45 ((event << MMCR1_PMC3_SHIFT) & MMCR1_PMC3SEL)
46 #define mmcr1_event4(event) \
47 ((event << MMCR1_PMC4_SHIFT) & MMCR1_PMC4SEL)
48 #define mmcr1_event5(event) \
49 ((event << MMCR1_PMC5_SHIFT) & MMCR1_PMC5SEL)
50 #define mmcr1_event6(event) \
51 ((event << MMCR1_PMC6_SHIFT) & MMCR1_PMC6SEL)
112 mmcr0_val = MMCR0_INIT | mmcr0_event1(ctr[0].event) fsl7450_reg_setup()
113 | mmcr0_event2(ctr[1].event); fsl7450_reg_setup()
123 mmcr1_val = mmcr1_event3(ctr[2].event) fsl7450_reg_setup()
124 | mmcr1_event4(ctr[3].event); fsl7450_reg_setup()
126 mmcr1_val |= mmcr1_event5(ctr[4].event) fsl7450_reg_setup()
127 | mmcr1_event6(ctr[5].event); fsl7450_reg_setup()
171 * event that triggered the interrupt */ fsl7450_handle_interrupt()
/linux-4.4.14/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/
H A DCore.pm104 for my $event (keys %flag_fields) {
105 print "event $event:\n";
106 for my $field (keys %{$flag_fields{$event}}) {
108 print " delim: $flag_fields{$event}{$field}{'delim'}\n";
109 foreach my $idx (sort {$a <=> $b} keys %{$flag_fields{$event}{$field}{"values"}}) {
110 print " value $idx: $flag_fields{$event}{$field}{'values'}{$idx}\n";
151 for my $event (keys %symbolic_fields) {
152 print "event $event:\n";
153 for my $field (keys %{$symbolic_fields{$event}}) {
155 foreach my $idx (sort {$a <=> $b} keys %{$symbolic_fields{$event}{$field}{"values"}}) {
156 print " value $idx: $symbolic_fields{$event}{$field}{'values'}{$idx}\n";
/linux-4.4.14/arch/blackfin/kernel/
H A Dperf_event.c33 * We have two counters, and each counter can support an event type.
260 static void bfin_perf_event_update(struct perf_event *event, bfin_perf_event_update() argument
298 local64_add(delta, &event->count); bfin_perf_event_update()
301 static void bfin_pmu_stop(struct perf_event *event, int flags) bfin_pmu_stop() argument
304 struct hw_perf_event *hwc = &event->hw; bfin_pmu_stop()
307 if (!(event->hw.state & PERF_HES_STOPPED)) { bfin_pmu_stop()
310 event->hw.state |= PERF_HES_STOPPED; bfin_pmu_stop()
313 if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) { bfin_pmu_stop()
314 bfin_perf_event_update(event, &event->hw, idx); bfin_pmu_stop()
315 event->hw.state |= PERF_HES_UPTODATE; bfin_pmu_stop()
319 static void bfin_pmu_start(struct perf_event *event, int flags) bfin_pmu_start() argument
322 struct hw_perf_event *hwc = &event->hw; bfin_pmu_start()
329 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); bfin_pmu_start()
331 cpuc->events[idx] = event; bfin_pmu_start()
332 event->hw.state = 0; bfin_pmu_start()
336 static void bfin_pmu_del(struct perf_event *event, int flags) bfin_pmu_del() argument
340 bfin_pmu_stop(event, PERF_EF_UPDATE); bfin_pmu_del()
341 __clear_bit(event->hw.idx, cpuc->used_mask); bfin_pmu_del()
343 perf_event_update_userpage(event); bfin_pmu_del()
346 static int bfin_pmu_add(struct perf_event *event, int flags) bfin_pmu_add() argument
349 struct hw_perf_event *hwc = &event->hw; bfin_pmu_add()
353 perf_pmu_disable(event->pmu); bfin_pmu_add()
366 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; bfin_pmu_add()
368 bfin_pmu_start(event, PERF_EF_RELOAD); bfin_pmu_add()
370 perf_event_update_userpage(event); bfin_pmu_add()
373 perf_pmu_enable(event->pmu); bfin_pmu_add()
377 static void bfin_pmu_read(struct perf_event *event) bfin_pmu_read() argument
379 bfin_perf_event_update(event, &event->hw, event->hw.idx); bfin_pmu_read()
382 static int bfin_pmu_event_init(struct perf_event *event) bfin_pmu_event_init() argument
384 struct perf_event_attr *attr = &event->attr; bfin_pmu_event_init()
385 struct hw_perf_event *hwc = &event->hw; bfin_pmu_event_init()
425 struct perf_event *event; bfin_pmu_enable() local
430 event = cpuc->events[i]; bfin_pmu_enable()
431 if (!event) bfin_pmu_enable()
433 hwc = &event->hw; bfin_pmu_enable()
/linux-4.4.14/drivers/char/tpm/
H A Dtpm_eventlog.c78 struct tcpa_event *event; tpm_bios_measurements_start() local
85 event = addr; tpm_bios_measurements_start()
88 do_endian_conversion(event->event_size); tpm_bios_measurements_start()
90 do_endian_conversion(event->event_type); tpm_bios_measurements_start()
105 event = addr; tpm_bios_measurements_start()
107 converted_event_size = do_endian_conversion(event->event_size); tpm_bios_measurements_start()
108 converted_event_type = do_endian_conversion(event->event_type); tpm_bios_measurements_start()
121 struct tcpa_event *event = v; tpm_bios_measurements_next() local
127 converted_event_size = do_endian_conversion(event->event_size); tpm_bios_measurements_next()
135 event = v; tpm_bios_measurements_next()
137 converted_event_size = do_endian_conversion(event->event_size); tpm_bios_measurements_next()
138 converted_event_type = do_endian_conversion(event->event_type); tpm_bios_measurements_next()
152 static int get_event_name(char *dest, struct tcpa_event *event, get_event_name() argument
161 switch (do_endian_conversion(event->event_type)) { get_event_name()
178 (event->event_type)]; get_event_name()
184 do_endian_conversion(event->event_size)) { get_event_name()
186 n_len = do_endian_conversion(event->event_size); get_event_name()
233 struct tcpa_event *event = v; tpm_binary_bios_measurements_show() local
238 memcpy(&temp_event, event, sizeof(struct tcpa_event)); tpm_binary_bios_measurements_show()
241 temp_event.pcr_index = do_endian_conversion(event->pcr_index); tpm_binary_bios_measurements_show()
242 temp_event.event_type = do_endian_conversion(event->event_type); tpm_binary_bios_measurements_show()
243 temp_event.event_size = do_endian_conversion(event->event_size); tpm_binary_bios_measurements_show()
278 struct tcpa_event *event = v; tpm_ascii_bios_measurements_show() local
284 printk(KERN_ERR "%s: ERROR - No Memory for event name\n ", tpm_ascii_bios_measurements_show()
290 seq_printf(m, "%2d ", do_endian_conversion(event->pcr_index)); tpm_ascii_bios_measurements_show()
293 seq_printf(m, "%20phN", event->pcr_value); tpm_ascii_bios_measurements_show()
295 /* 3rd: event type identifier */ tpm_ascii_bios_measurements_show()
296 seq_printf(m, " %02x", do_endian_conversion(event->event_type)); tpm_ascii_bios_measurements_show()
298 len += get_event_name(eventname, event, event_entry); tpm_ascii_bios_measurements_show()
/linux-4.4.14/tools/perf/
H A Dbuiltin-kvm.c14 #include "util/trace-event.h"
130 struct kvm_event *event; clear_events_cache_stats() local
136 list_for_each_entry(event, head, hash_entry) { list_for_each_entry()
137 /* reset stats for event */ list_for_each_entry()
138 event->total.time = 0; list_for_each_entry()
139 init_stats(&event->total.stats); list_for_each_entry()
141 for (j = 0; j < event->max_vcpu; ++j) { list_for_each_entry()
142 event->vcpu[j].time = 0; list_for_each_entry()
143 init_stats(&event->vcpu[j].stats); list_for_each_entry()
155 static bool kvm_event_expand(struct kvm_event *event, int vcpu_id) kvm_event_expand() argument
157 int old_max_vcpu = event->max_vcpu; kvm_event_expand()
160 if (vcpu_id < event->max_vcpu) kvm_event_expand()
163 while (event->max_vcpu <= vcpu_id) kvm_event_expand()
164 event->max_vcpu += DEFAULT_VCPU_NUM; kvm_event_expand()
166 prev = event->vcpu; kvm_event_expand()
167 event->vcpu = realloc(event->vcpu, kvm_event_expand()
168 event->max_vcpu * sizeof(*event->vcpu)); kvm_event_expand()
169 if (!event->vcpu) { kvm_event_expand()
175 memset(event->vcpu + old_max_vcpu, 0, kvm_event_expand()
176 (event->max_vcpu - old_max_vcpu) * sizeof(*event->vcpu)); kvm_event_expand()
182 struct kvm_event *event; kvm_alloc_init_event() local
184 event = zalloc(sizeof(*event)); kvm_alloc_init_event()
185 if (!event) { kvm_alloc_init_event()
190 event->key = *key; kvm_alloc_init_event()
191 init_stats(&event->total.stats); kvm_alloc_init_event()
192 return event; kvm_alloc_init_event()
198 struct kvm_event *event; find_create_kvm_event() local
204 list_for_each_entry(event, head, hash_entry) { list_for_each_entry()
205 if (event->key.key == key->key && event->key.info == key->info) list_for_each_entry()
206 return event; list_for_each_entry()
209 event = kvm_alloc_init_event(key);
210 if (!event)
213 list_add(&event->hash_entry, head);
214 return event;
221 struct kvm_event *event = NULL; handle_begin_event() local
224 event = find_create_kvm_event(kvm, key); handle_begin_event()
226 vcpu_record->last_event = event; handle_begin_event()
238 static double kvm_event_rel_stddev(int vcpu_id, struct kvm_event *event) kvm_event_rel_stddev() argument
240 struct kvm_event_stats *kvm_stats = &event->total; kvm_event_rel_stddev()
243 kvm_stats = &event->vcpu[vcpu_id]; kvm_event_rel_stddev()
249 static bool update_kvm_event(struct kvm_event *event, int vcpu_id, update_kvm_event() argument
253 kvm_update_event_stats(&event->total, time_diff); update_kvm_event()
257 if (!kvm_event_expand(event, vcpu_id)) update_kvm_event()
260 kvm_update_event_stats(&event->vcpu[vcpu_id], time_diff); update_kvm_event()
291 struct kvm_event *event = NULL; handle_child_event() local
294 event = find_create_kvm_event(kvm, key); handle_child_event()
296 vcpu_record->last_event = event; handle_child_event()
301 static bool skip_event(const char *event) skip_event() argument
306 if (!strcmp(event, *skip_events)) skip_event()
317 struct kvm_event *event; handle_end_event() local
326 event = vcpu_record->last_event; handle_end_event()
329 /* The begin event is not caught. */ handle_end_event()
334 * In some case, the 'begin event' only records the start timestamp, handle_end_event()
335 * the actual event is recognized in the 'end event' (e.g. mmio-event). handle_end_event()
339 if (!event && key->key == INVALID_KEY) handle_end_event()
342 if (!event) handle_end_event()
343 event = find_create_kvm_event(kvm, key); handle_end_event()
345 if (!event) handle_end_event()
353 pr_debug("End time before begin time; skipping event.\n"); handle_end_event()
362 kvm->events_ops->decode_key(kvm, &event->key, decode); handle_end_event()
364 pr_info("%" PRIu64 " VM %d, vcpu %d: %s event took %" PRIu64 "usec\n", handle_end_event()
370 return update_kvm_event(event, vcpu, time_diff); handle_end_event()
426 static u64 get_event_ ##func(struct kvm_event *event, int vcpu) \
429 return event->total.field; \
431 if (vcpu >= event->max_vcpu) \
434 return event->vcpu[vcpu].field; \
476 static void insert_to_result(struct rb_root *result, struct kvm_event *event, insert_to_result() argument
487 if (bigger(event, p, vcpu)) insert_to_result()
493 rb_link_node(&event->rb, parent, rb); insert_to_result()
494 rb_insert_color(&event->rb, result); insert_to_result()
498 update_total_count(struct perf_kvm_stat *kvm, struct kvm_event *event) update_total_count() argument
502 kvm->total_count += get_event_count(event, vcpu); update_total_count()
503 kvm->total_time += get_event_time(event, vcpu); update_total_count()
506 static bool event_is_valid(struct kvm_event *event, int vcpu) event_is_valid() argument
508 return !!get_event_count(event, vcpu); event_is_valid()
515 struct kvm_event *event; sort_result() local
518 list_for_each_entry(event, &kvm->kvm_events_cache[i], hash_entry) { sort_result()
519 if (event_is_valid(event, vcpu)) { sort_result()
520 update_total_count(kvm, event); sort_result()
521 insert_to_result(&kvm->result, event, sort_result()
578 struct kvm_event *event; print_result() local
598 while ((event = pop_from_result(&kvm->result))) { print_result()
601 ecount = get_event_count(event, vcpu); print_result()
602 etime = get_event_time(event, vcpu); print_result()
603 max = get_event_max(event, vcpu); print_result()
604 min = get_event_min(event, vcpu); print_result()
606 kvm->events_ops->decode_key(kvm, &event->key, decode); print_result()
614 kvm_event_rel_stddev(vcpu, event)); print_result()
627 union perf_event *event __maybe_unused, process_lost_event()
648 union perf_event *event, process_sample_event()
663 pr_debug("problem processing %d event, skipping it.\n", process_sample_event()
664 event->header.type); process_sample_event()
721 union perf_event *event; perf_kvm__mmap_read_idx() local
727 while ((event = perf_evlist__mmap_read(kvm->evlist, idx)) != NULL) { perf_kvm__mmap_read_idx()
728 err = perf_evlist__parse_sample(kvm->evlist, event, &sample); perf_kvm__mmap_read_idx()
735 err = perf_session__queue_event(kvm->session, event, &sample, 0); perf_kvm__mmap_read_idx()
737 * FIXME: Here we can't consume the event, as perf_session__queue_event will perf_kvm__mmap_read_idx()
1177 set_option_flag(record_options, 'e', "event", PARSE_OPT_HIDDEN); kvm_events_record()
1205 OPT_STRING(0, "event", &kvm->report_event, "report event", kvm_events_report()
1206 "event for reporting: vmexit, " kvm_events_report()
1307 OPT_STRING(0, "event", &kvm->report_event, "report event", kvm_events_live()
1308 "event for reporting: " kvm_events_live()
1332 /* event handling */ kvm_events_live()
1379 * generate the event list kvm_events_live()
647 process_sample_event(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine) process_sample_event() argument
H A Dbuiltin-inject.c4 * Builtin inject command: Examine the live mode (stdin) event stream
43 union perf_event event[0]; member in struct:event_entry
59 union perf_event *event) perf_event__repipe_synth()
64 return output_bytes(inject, event, event->header.size); perf_event__repipe_synth()
68 union perf_event *event, perf_event__repipe_oe_synth()
71 return perf_event__repipe_synth(tool, event); perf_event__repipe_oe_synth()
75 union perf_event *event, perf_event__repipe_op2_synth()
79 return perf_event__repipe_synth(tool, event); perf_event__repipe_op2_synth()
83 union perf_event *event, perf_event__repipe_attr()
90 ret = perf_event__process_attr(tool, event, pevlist); perf_event__repipe_attr()
97 return perf_event__repipe_synth(tool, event); perf_event__repipe_attr()
122 union perf_event *event, perf_event__repipe_auxtrace()
139 event, offset); perf_event__repipe_auxtrace()
145 ret = output_bytes(inject, event, event->header.size); perf_event__repipe_auxtrace()
149 event->auxtrace.size); perf_event__repipe_auxtrace()
151 ret = output_bytes(inject, event, perf_event__repipe_auxtrace()
152 event->header.size + event->auxtrace.size); perf_event__repipe_auxtrace()
157 return event->auxtrace.size; perf_event__repipe_auxtrace()
164 union perf_event *event __maybe_unused, perf_event__repipe_auxtrace()
174 union perf_event *event, perf_event__repipe()
178 return perf_event__repipe_synth(tool, event); perf_event__repipe()
182 union perf_event *event __maybe_unused, perf_event__drop()
190 union perf_event *event __maybe_unused, perf_event__drop_aux()
203 union perf_event *event,
209 union perf_event *event, perf_event__repipe_sample()
216 return f(tool, event, sample, evsel, machine); perf_event__repipe_sample()
219 build_id__mark_dso_hit(tool, event, sample, evsel, machine); perf_event__repipe_sample()
221 return perf_event__repipe_synth(tool, event); perf_event__repipe_sample()
225 union perf_event *event, perf_event__repipe_mmap()
231 err = perf_event__process_mmap(tool, event, sample, machine); perf_event__repipe_mmap()
232 perf_event__repipe(tool, event, sample, machine); perf_event__repipe_mmap()
238 union perf_event *event, perf_event__repipe_mmap2()
244 err = perf_event__process_mmap2(tool, event, sample, machine); perf_event__repipe_mmap2()
245 perf_event__repipe(tool, event, sample, machine); perf_event__repipe_mmap2()
251 union perf_event *event, perf_event__repipe_fork()
257 err = perf_event__process_fork(tool, event, sample, machine); perf_event__repipe_fork()
258 perf_event__repipe(tool, event, sample, machine); perf_event__repipe_fork()
264 union perf_event *event, perf_event__repipe_comm()
270 err = perf_event__process_comm(tool, event, sample, machine); perf_event__repipe_comm()
271 perf_event__repipe(tool, event, sample, machine); perf_event__repipe_comm()
277 union perf_event *event, perf_event__repipe_exit()
283 err = perf_event__process_exit(tool, event, sample, machine); perf_event__repipe_exit()
284 perf_event__repipe(tool, event, sample, machine); perf_event__repipe_exit()
290 union perf_event *event, perf_event__repipe_tracing_data()
295 perf_event__repipe_synth(tool, event); perf_event__repipe_tracing_data()
296 err = perf_event__process_tracing_data(tool, event, session); perf_event__repipe_tracing_data()
302 union perf_event *event, perf_event__repipe_id_index()
307 perf_event__repipe_synth(tool, event); perf_event__repipe_id_index()
308 err = perf_event__process_id_index(tool, event, session); perf_event__repipe_id_index()
344 pr_err("Can't synthesize build_id event for %s\n", dso->long_name); dso__inject_build_id()
352 union perf_event *event, perf_event__inject_buildid()
361 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; perf_event__inject_buildid()
365 pr_err("problem processing %d event, skipping it.\n", perf_event__inject_buildid()
366 event->header.type); perf_event__inject_buildid()
393 perf_event__repipe(tool, event, sample, machine); perf_event__inject_buildid()
398 union perf_event *event __maybe_unused, perf_inject__sched_process_exit()
418 union perf_event *event, perf_inject__sched_switch()
426 perf_inject__sched_process_exit(tool, event, sample, evsel, machine); perf_inject__sched_switch()
428 ent = malloc(event->header.size + sizeof(struct event_entry)); perf_inject__sched_switch()
431 "Not enough memory to process sched switch event!"); perf_inject__sched_switch()
436 memcpy(&ent->event, event, event->header.size); perf_inject__sched_switch()
442 union perf_event *event __maybe_unused, perf_inject__sched_stat()
460 event_sw = &ent->event[0]; perf_inject__sched_stat()
484 pr_err("Samples for %s event do not have %s attribute set.", perf_evsel__check_stype()
493 union perf_event *event __maybe_unused, drop_sample()
523 * their selected event to exist, except if there is only 1 selected event left
58 perf_event__repipe_synth(struct perf_tool *tool, union perf_event *event) perf_event__repipe_synth() argument
67 perf_event__repipe_oe_synth(struct perf_tool *tool, union perf_event *event, struct ordered_events *oe __maybe_unused) perf_event__repipe_oe_synth() argument
74 perf_event__repipe_op2_synth(struct perf_tool *tool, union perf_event *event, struct perf_session *session __maybe_unused) perf_event__repipe_op2_synth() argument
82 perf_event__repipe_attr(struct perf_tool *tool, union perf_event *event, struct perf_evlist **pevlist) perf_event__repipe_attr() argument
121 perf_event__repipe_auxtrace(struct perf_tool *tool, union perf_event *event, struct perf_session *session __maybe_unused) perf_event__repipe_auxtrace() argument
173 perf_event__repipe(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample __maybe_unused, struct machine *machine __maybe_unused) perf_event__repipe() argument
208 perf_event__repipe_sample(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine) perf_event__repipe_sample() argument
224 perf_event__repipe_mmap(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct machine *machine) perf_event__repipe_mmap() argument
237 perf_event__repipe_mmap2(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct machine *machine) perf_event__repipe_mmap2() argument
250 perf_event__repipe_fork(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct machine *machine) perf_event__repipe_fork() argument
263 perf_event__repipe_comm(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct machine *machine) perf_event__repipe_comm() argument
276 perf_event__repipe_exit(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct machine *machine) perf_event__repipe_exit() argument
289 perf_event__repipe_tracing_data(struct perf_tool *tool, union perf_event *event, struct perf_session *session) perf_event__repipe_tracing_data() argument
301 perf_event__repipe_id_index(struct perf_tool *tool, union perf_event *event, struct perf_session *session) perf_event__repipe_id_index() argument
351 perf_event__inject_buildid(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel __maybe_unused, struct machine *machine) perf_event__inject_buildid() argument
417 perf_inject__sched_switch(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine) perf_inject__sched_switch() argument
/linux-4.4.14/drivers/bus/
H A Darm-ccn.c160 struct perf_event *event; member in struct:arm_ccn_dt::__anon3685
240 static CCN_FORMAT_ATTR(event, "config:16-23");
271 u32 event; member in struct:arm_ccn_pmu_event
290 .type = CCN_TYPE_MN, .event = CCN_EVENT_WATCHPOINT, \
296 .event = CCN_EVENT_WATCHPOINT, .num_ports = CCN_NUM_XP_PORTS, \
301 .event = CCN_EVENT_WATCHPOINT, .num_ports = CCN_NUM_XP_PORTS, \
305 .type = CCN_TYPE_HNF, .event = _event, }
308 .type = CCN_TYPE_XP, .event = _event, \
314 * as they all share the same event types.
317 .type = CCN_TYPE_RNI_3P, .event = _event, }
320 .type = CCN_TYPE_SBAS, .event = _event, }
329 struct arm_ccn_pmu_event *event = container_of(attr, arm_ccn_pmu_event_show() local
333 res = snprintf(buf, PAGE_SIZE, "type=0x%x", event->type); arm_ccn_pmu_event_show()
334 if (event->event) arm_ccn_pmu_event_show()
335 res += snprintf(buf + res, PAGE_SIZE - res, ",event=0x%x", arm_ccn_pmu_event_show()
336 event->event); arm_ccn_pmu_event_show()
337 if (event->def) arm_ccn_pmu_event_show()
339 event->def); arm_ccn_pmu_event_show()
340 if (event->mask) arm_ccn_pmu_event_show()
342 event->mask); arm_ccn_pmu_event_show()
344 /* Arguments required by an event */ arm_ccn_pmu_event_show()
345 switch (event->type) { arm_ccn_pmu_event_show()
351 if (event->event == CCN_EVENT_WATCHPOINT) arm_ccn_pmu_event_show()
372 struct arm_ccn_pmu_event *event = container_of(dev_attr, arm_ccn_pmu_events_is_visible() local
375 if (event->type == CCN_TYPE_SBAS && !ccn->sbas_present) arm_ccn_pmu_events_is_visible()
377 if (event->type == CCN_TYPE_SBSX && !ccn->sbsx_present) arm_ccn_pmu_events_is_visible()
567 * as in the worst case scenario (an event every cycle), with 1GHz
631 static int arm_ccn_pmu_event_alloc(struct perf_event *event) arm_ccn_pmu_event_alloc() argument
633 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); arm_ccn_pmu_event_alloc()
634 struct hw_perf_event *hw = &event->hw; arm_ccn_pmu_event_alloc()
639 node_xp = CCN_CONFIG_NODE(event->attr.config); arm_ccn_pmu_event_alloc()
640 type = CCN_CONFIG_TYPE(event->attr.config); arm_ccn_pmu_event_alloc()
641 event_id = CCN_CONFIG_EVENT(event->attr.config); arm_ccn_pmu_event_alloc()
650 ccn->dt.pmu_counters[CCN_IDX_PMU_CYCLE_COUNTER].event = event; arm_ccn_pmu_event_alloc()
655 /* Allocate an event counter */ arm_ccn_pmu_event_alloc()
669 /* Allocate an event source or a watchpoint */ arm_ccn_pmu_event_alloc()
677 dev_dbg(ccn->dev, "No more event sources/watchpoints on node/XP %d!\n", arm_ccn_pmu_event_alloc()
684 ccn->dt.pmu_counters[hw->idx].event = event; arm_ccn_pmu_event_alloc()
689 static void arm_ccn_pmu_event_release(struct perf_event *event) arm_ccn_pmu_event_release() argument
691 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); arm_ccn_pmu_event_release()
692 struct hw_perf_event *hw = &event->hw; arm_ccn_pmu_event_release()
700 if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP && arm_ccn_pmu_event_release()
701 CCN_CONFIG_EVENT(event->attr.config) == arm_ccn_pmu_event_release()
710 ccn->dt.pmu_counters[hw->idx].event = NULL; arm_ccn_pmu_event_release()
713 static int arm_ccn_pmu_event_init(struct perf_event *event) arm_ccn_pmu_event_init() argument
716 struct hw_perf_event *hw = &event->hw; arm_ccn_pmu_event_init()
722 if (event->attr.type != event->pmu->type) arm_ccn_pmu_event_init()
725 ccn = pmu_to_arm_ccn(event->pmu); arm_ccn_pmu_event_init()
732 if (has_branch_stack(event) || event->attr.exclude_user || arm_ccn_pmu_event_init()
733 event->attr.exclude_kernel || event->attr.exclude_hv || arm_ccn_pmu_event_init()
734 event->attr.exclude_idle) { arm_ccn_pmu_event_init()
739 if (event->cpu < 0) { arm_ccn_pmu_event_init()
748 * event could be theoretically assigned to a different CPU. To arm_ccn_pmu_event_init()
752 event->cpu = cpumask_first(&ccn->dt.cpu); arm_ccn_pmu_event_init()
754 node_xp = CCN_CONFIG_NODE(event->attr.config); arm_ccn_pmu_event_init()
755 type = CCN_CONFIG_TYPE(event->attr.config); arm_ccn_pmu_event_init()
756 event_id = CCN_CONFIG_EVENT(event->attr.config); arm_ccn_pmu_event_init()
781 /* Validate event ID vs available for the type */ arm_ccn_pmu_event_init()
785 u32 port = CCN_CONFIG_PORT(event->attr.config); arm_ccn_pmu_event_init()
786 u32 vc = CCN_CONFIG_VC(event->attr.config); arm_ccn_pmu_event_init()
790 if (event_id != e->event) arm_ccn_pmu_event_init()
805 dev_warn(ccn->dev, "Invalid event 0x%x for node/XP %d!\n", arm_ccn_pmu_event_init()
810 /* Watchpoint-based event for a node is actually set on XP */ arm_ccn_pmu_event_init()
818 arm_ccn_pmu_config_set(&event->attr.config, arm_ccn_pmu_event_init()
827 if (event->group_leader->pmu != event->pmu && arm_ccn_pmu_event_init()
828 !is_software_event(event->group_leader)) arm_ccn_pmu_event_init()
831 list_for_each_entry(sibling, &event->group_leader->sibling_list, arm_ccn_pmu_event_init()
833 if (sibling->pmu != event->pmu && arm_ccn_pmu_event_init()
864 static void arm_ccn_pmu_event_update(struct perf_event *event) arm_ccn_pmu_event_update() argument
866 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); arm_ccn_pmu_event_update()
867 struct hw_perf_event *hw = &event->hw; arm_ccn_pmu_event_update()
877 local64_add((new_count - prev_count) & mask, &event->count); arm_ccn_pmu_event_update()
880 static void arm_ccn_pmu_xp_dt_config(struct perf_event *event, int enable) arm_ccn_pmu_xp_dt_config() argument
882 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); arm_ccn_pmu_xp_dt_config()
883 struct hw_perf_event *hw = &event->hw; arm_ccn_pmu_xp_dt_config()
887 if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP) arm_ccn_pmu_xp_dt_config()
888 xp = &ccn->xp[CCN_CONFIG_XP(event->attr.config)]; arm_ccn_pmu_xp_dt_config()
891 CCN_CONFIG_NODE(event->attr.config))]; arm_ccn_pmu_xp_dt_config()
909 static void arm_ccn_pmu_event_start(struct perf_event *event, int flags) arm_ccn_pmu_event_start() argument
911 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); arm_ccn_pmu_event_start()
912 struct hw_perf_event *hw = &event->hw; arm_ccn_pmu_event_start()
914 local64_set(&event->hw.prev_count, arm_ccn_pmu_event_start()
920 * event->cpu (this is the same one as presented in "cpumask" arm_ccn_pmu_event_start()
928 arm_ccn_pmu_xp_dt_config(event, 1); arm_ccn_pmu_event_start()
931 static void arm_ccn_pmu_event_stop(struct perf_event *event, int flags) arm_ccn_pmu_event_stop() argument
933 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); arm_ccn_pmu_event_stop()
934 struct hw_perf_event *hw = &event->hw; arm_ccn_pmu_event_stop()
938 arm_ccn_pmu_xp_dt_config(event, 0); arm_ccn_pmu_event_stop()
951 arm_ccn_pmu_event_update(event); arm_ccn_pmu_event_stop()
956 static void arm_ccn_pmu_xp_watchpoint_config(struct perf_event *event) arm_ccn_pmu_xp_watchpoint_config() argument
958 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); arm_ccn_pmu_xp_watchpoint_config()
959 struct hw_perf_event *hw = &event->hw; arm_ccn_pmu_xp_watchpoint_config()
964 u64 cmp_l = event->attr.config1; arm_ccn_pmu_xp_watchpoint_config()
965 u64 cmp_h = event->attr.config2; arm_ccn_pmu_xp_watchpoint_config()
966 u64 mask_l = ccn->dt.cmp_mask[CCN_CONFIG_MASK(event->attr.config)].l; arm_ccn_pmu_xp_watchpoint_config()
967 u64 mask_h = ccn->dt.cmp_mask[CCN_CONFIG_MASK(event->attr.config)].h; arm_ccn_pmu_xp_watchpoint_config()
975 val |= CCN_CONFIG_DIR(event->attr.config) << arm_ccn_pmu_xp_watchpoint_config()
979 val |= CCN_CONFIG_PORT(event->attr.config) << arm_ccn_pmu_xp_watchpoint_config()
983 val |= CCN_CONFIG_VC(event->attr.config) << arm_ccn_pmu_xp_watchpoint_config()
1004 static void arm_ccn_pmu_xp_event_config(struct perf_event *event) arm_ccn_pmu_xp_event_config() argument
1006 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); arm_ccn_pmu_xp_event_config()
1007 struct hw_perf_event *hw = &event->hw; arm_ccn_pmu_xp_event_config()
1014 id = (CCN_CONFIG_VC(event->attr.config) << 4) | arm_ccn_pmu_xp_event_config()
1015 (CCN_CONFIG_PORT(event->attr.config) << 3) | arm_ccn_pmu_xp_event_config()
1016 (CCN_CONFIG_EVENT(event->attr.config) << 0); arm_ccn_pmu_xp_event_config()
1025 static void arm_ccn_pmu_node_event_config(struct perf_event *event) arm_ccn_pmu_node_event_config() argument
1027 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); arm_ccn_pmu_node_event_config()
1028 struct hw_perf_event *hw = &event->hw; arm_ccn_pmu_node_event_config()
1031 u32 type = CCN_CONFIG_TYPE(event->attr.config); arm_ccn_pmu_node_event_config()
1034 port = arm_ccn_node_to_xp_port(CCN_CONFIG_NODE(event->attr.config)); arm_ccn_pmu_node_event_config()
1053 /* Set the event id for the pre-allocated counter */ arm_ccn_pmu_node_event_config()
1057 val |= CCN_CONFIG_EVENT(event->attr.config) << arm_ccn_pmu_node_event_config()
1062 static void arm_ccn_pmu_event_config(struct perf_event *event) arm_ccn_pmu_event_config() argument
1064 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); arm_ccn_pmu_event_config()
1065 struct hw_perf_event *hw = &event->hw; arm_ccn_pmu_event_config()
1072 if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP) arm_ccn_pmu_event_config()
1073 xp = CCN_CONFIG_XP(event->attr.config); arm_ccn_pmu_event_config()
1075 xp = arm_ccn_node_to_xp(CCN_CONFIG_NODE(event->attr.config)); arm_ccn_pmu_event_config()
1087 if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP) { arm_ccn_pmu_event_config()
1088 if (CCN_CONFIG_EVENT(event->attr.config) == arm_ccn_pmu_event_config()
1090 arm_ccn_pmu_xp_watchpoint_config(event); arm_ccn_pmu_event_config()
1092 arm_ccn_pmu_xp_event_config(event); arm_ccn_pmu_event_config()
1094 arm_ccn_pmu_node_event_config(event); arm_ccn_pmu_event_config()
1100 static int arm_ccn_pmu_event_add(struct perf_event *event, int flags) arm_ccn_pmu_event_add() argument
1103 struct hw_perf_event *hw = &event->hw; arm_ccn_pmu_event_add()
1105 err = arm_ccn_pmu_event_alloc(event); arm_ccn_pmu_event_add()
1109 arm_ccn_pmu_event_config(event); arm_ccn_pmu_event_add()
1114 arm_ccn_pmu_event_start(event, PERF_EF_UPDATE); arm_ccn_pmu_event_add()
1119 static void arm_ccn_pmu_event_del(struct perf_event *event, int flags) arm_ccn_pmu_event_del() argument
1121 arm_ccn_pmu_event_stop(event, PERF_EF_UPDATE); arm_ccn_pmu_event_del()
1123 arm_ccn_pmu_event_release(event); arm_ccn_pmu_event_del()
1126 static void arm_ccn_pmu_event_read(struct perf_event *event) arm_ccn_pmu_event_read() argument
1128 arm_ccn_pmu_event_update(event); arm_ccn_pmu_event_read()
1144 struct perf_event *event = dt->pmu_counters[idx].event; arm_ccn_pmu_overflow_handler() local
1147 WARN_ON_ONCE(overflowed && !event && arm_ccn_pmu_overflow_handler()
1150 if (!event || !overflowed) arm_ccn_pmu_overflow_handler()
1153 arm_ccn_pmu_event_update(event); arm_ccn_pmu_overflow_handler()
H A Darm-cci.c115 * @fixed_hw_cntrs - Number of fixed event counters
116 * @num_hw_cntrs - Maximum number of programmable event counters
117 * @cntr_size - Size of an event counter mapping
194 * Instead of an event id to monitor CCI cycles, a dedicated counter is
196 * make use of this event in hardware.
206 * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8
207 * ports and bits 4:0 are event codes. There are different event codes
214 * the different revisions and are used to validate the event to be monitored.
222 #define CCI400_PMU_EVENT_SOURCE(event) \
223 ((event >> CCI400_PMU_EVENT_SOURCE_SHIFT) & \
225 #define CCI400_PMU_EVENT_CODE(event) \
226 ((event >> CCI400_PMU_EVENT_CODE_SHIFT) & CCI400_PMU_EVENT_CODE_MASK)
246 CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"),
280 /* Special event for cycles counter */
326 /* Special event for cycles counter */
344 /* cycles event idx is fixed */ cci400_get_event_idx()
426 * CCI500 provides 8 independent event counters that can count
429 * CCI500 PMU event id is an 9-bit value made of two parts.
430 * bits [8:5] - Source for the event
463 #define CCI500_PMU_EVENT_SOURCE(event) \
464 ((event >> CCI500_PMU_EVENT_SOURCE_SHIFT) & CCI500_PMU_EVENT_SOURCE_MASK)
465 #define CCI500_PMU_EVENT_CODE(event) \
466 ((event >> CCI500_PMU_EVENT_CODE_SHIFT) & CCI500_PMU_EVENT_CODE_MASK)
484 CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"),
557 return snprintf(buf, PAGE_SIZE, "event=0x%lx,source=0x%x\n", cci500_pmu_global_event_show()
618 return snprintf(buf, PAGE_SIZE, "source=?,event=0x%lx\n", cci_pmu_event_show()
650 static void pmu_set_event(struct cci_pmu *cci_pmu, int idx, unsigned long event) pmu_set_event() argument
652 pmu_write_register(cci_pmu, event, idx, CCI_PMU_EVT_SEL); pmu_set_event()
665 static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *event) pmu_get_event_idx() argument
667 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); pmu_get_event_idx()
668 unsigned long cci_event = event->hw.config_base; pmu_get_event_idx()
683 static int pmu_map_event(struct perf_event *event) pmu_map_event() argument
685 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); pmu_map_event()
687 if (event->attr.type < PERF_TYPE_MAX || pmu_map_event()
691 return cci_pmu->model->validate_hw_event(cci_pmu, event->attr.config); pmu_map_event()
741 static u32 pmu_read_counter(struct perf_event *event) pmu_read_counter() argument
743 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); pmu_read_counter()
744 struct hw_perf_event *hw_counter = &event->hw; pmu_read_counter()
757 static void pmu_write_counter(struct perf_event *event, u32 value) pmu_write_counter() argument
759 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); pmu_write_counter()
760 struct hw_perf_event *hw_counter = &event->hw; pmu_write_counter()
769 static u64 pmu_event_update(struct perf_event *event) pmu_event_update() argument
771 struct hw_perf_event *hwc = &event->hw; pmu_event_update()
776 new_raw_count = pmu_read_counter(event); pmu_event_update()
782 local64_add(delta, &event->count); pmu_event_update()
787 static void pmu_read(struct perf_event *event) pmu_read() argument
789 pmu_event_update(event); pmu_read()
792 void pmu_event_set_period(struct perf_event *event) pmu_event_set_period() argument
794 struct hw_perf_event *hwc = &event->hw; pmu_event_set_period()
803 pmu_write_counter(event, val); pmu_event_set_period()
820 struct perf_event *event = events->events[idx]; pmu_handle_irq() local
823 if (!event) pmu_handle_irq()
826 hw_counter = &event->hw; pmu_handle_irq()
836 pmu_event_update(event); pmu_handle_irq()
837 pmu_event_set_period(event); pmu_handle_irq()
860 static void hw_perf_event_destroy(struct perf_event *event) hw_perf_event_destroy() argument
862 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); hw_perf_event_destroy()
909 * All the fixed event counters are mapped before the programmable
917 static void cci_pmu_start(struct perf_event *event, int pmu_flags) cci_pmu_start() argument
919 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); cci_pmu_start()
921 struct hw_perf_event *hwc = &event->hw; cci_pmu_start()
941 /* Configure the counter unless you are counting a fixed event */ cci_pmu_start()
945 pmu_event_set_period(event); cci_pmu_start()
951 static void cci_pmu_stop(struct perf_event *event, int pmu_flags) cci_pmu_stop() argument
953 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); cci_pmu_stop()
954 struct hw_perf_event *hwc = &event->hw; cci_pmu_stop()
970 pmu_event_update(event); cci_pmu_stop()
974 static int cci_pmu_add(struct perf_event *event, int flags) cci_pmu_add() argument
976 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); cci_pmu_add()
978 struct hw_perf_event *hwc = &event->hw; cci_pmu_add()
982 perf_pmu_disable(event->pmu); cci_pmu_add()
985 idx = pmu_get_event_idx(hw_events, event); cci_pmu_add()
991 event->hw.idx = idx; cci_pmu_add()
992 hw_events->events[idx] = event; cci_pmu_add()
996 cci_pmu_start(event, PERF_EF_RELOAD); cci_pmu_add()
999 perf_event_update_userpage(event); cci_pmu_add()
1002 perf_pmu_enable(event->pmu); cci_pmu_add()
1006 static void cci_pmu_del(struct perf_event *event, int flags) cci_pmu_del() argument
1008 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); cci_pmu_del()
1010 struct hw_perf_event *hwc = &event->hw; cci_pmu_del()
1013 cci_pmu_stop(event, PERF_EF_UPDATE); cci_pmu_del()
1017 perf_event_update_userpage(event); cci_pmu_del()
1023 struct perf_event *event) validate_event()
1025 if (is_software_event(event)) validate_event()
1031 * until after pmu->event_init(event). validate_event()
1033 if (event->pmu != cci_pmu) validate_event()
1036 if (event->state < PERF_EVENT_STATE_OFF) validate_event()
1039 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) validate_event()
1042 return pmu_get_event_idx(hw_events, event) >= 0; validate_event()
1046 validate_group(struct perf_event *event) validate_group() argument
1048 struct perf_event *sibling, *leader = event->group_leader; validate_group()
1049 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); validate_group()
1060 if (!validate_event(event->pmu, &fake_pmu, leader)) validate_group()
1064 if (!validate_event(event->pmu, &fake_pmu, sibling)) validate_group()
1068 if (!validate_event(event->pmu, &fake_pmu, event)) validate_group()
1075 __hw_perf_event_init(struct perf_event *event) __hw_perf_event_init() argument
1077 struct hw_perf_event *hwc = &event->hw; __hw_perf_event_init()
1080 mapping = pmu_map_event(event); __hw_perf_event_init()
1083 pr_debug("event %x:%llx not supported\n", event->attr.type, __hw_perf_event_init()
1084 event->attr.config); __hw_perf_event_init()
1089 * We don't assign an index until we actually place the event onto __hw_perf_event_init()
1099 * Store the event encoding into the config_base field. __hw_perf_event_init()
1112 if (event->group_leader != event) { __hw_perf_event_init()
1113 if (validate_group(event) != 0) __hw_perf_event_init()
1120 static int cci_pmu_event_init(struct perf_event *event) cci_pmu_event_init() argument
1122 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); cci_pmu_event_init()
1127 if (event->attr.type != event->pmu->type) cci_pmu_event_init()
1131 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) cci_pmu_event_init()
1135 if (event->attr.exclude_user || cci_pmu_event_init()
1136 event->attr.exclude_kernel || cci_pmu_event_init()
1137 event->attr.exclude_hv || cci_pmu_event_init()
1138 event->attr.exclude_idle || cci_pmu_event_init()
1139 event->attr.exclude_host || cci_pmu_event_init()
1140 event->attr.exclude_guest) cci_pmu_event_init()
1149 * the event being installed into its context, so the PMU's CPU can't cci_pmu_event_init()
1153 if (event->cpu < 0 || cpu < 0) cci_pmu_event_init()
1155 event->cpu = cpu; cci_pmu_event_init()
1157 event->destroy = hw_perf_event_destroy; cci_pmu_event_init()
1169 err = __hw_perf_event_init(event); cci_pmu_event_init()
1171 hw_perf_event_destroy(event); cci_pmu_event_init()
1320 * TODO: migrate context once core races on event->ctx have cci_pmu_cpu_notifier()
1021 validate_event(struct pmu *cci_pmu, struct cci_pmu_hw_events *hw_events, struct perf_event *event) validate_event() argument
/linux-4.4.14/tools/iio/
H A Diio_event_monitor.c1 /* Industrialio event test code.
107 static bool event_is_known(struct iio_event_data *event) event_is_known() argument
109 enum iio_chan_type type = IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event->id); event_is_known()
110 enum iio_modifier mod = IIO_EVENT_CODE_EXTRACT_MODIFIER(event->id); event_is_known()
111 enum iio_event_type ev_type = IIO_EVENT_CODE_EXTRACT_TYPE(event->id); event_is_known()
112 enum iio_event_direction dir = IIO_EVENT_CODE_EXTRACT_DIR(event->id); event_is_known()
207 static void print_event(struct iio_event_data *event) print_event() argument
209 enum iio_chan_type type = IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event->id); print_event()
210 enum iio_modifier mod = IIO_EVENT_CODE_EXTRACT_MODIFIER(event->id); print_event()
211 enum iio_event_type ev_type = IIO_EVENT_CODE_EXTRACT_TYPE(event->id); print_event()
212 enum iio_event_direction dir = IIO_EVENT_CODE_EXTRACT_DIR(event->id); print_event()
213 int chan = IIO_EVENT_CODE_EXTRACT_CHAN(event->id); print_event()
214 int chan2 = IIO_EVENT_CODE_EXTRACT_CHAN2(event->id); print_event()
215 bool diff = IIO_EVENT_CODE_EXTRACT_DIFF(event->id); print_event()
217 if (!event_is_known(event)) { print_event()
218 fprintf(stderr, "Unknown event: time: %lld, id: %llx\n", print_event()
219 event->timestamp, event->id); print_event()
224 printf("Event: time: %lld, type: %s", event->timestamp, print_event()
246 struct iio_event_data event; main() local
291 fprintf(stderr, "Failed to retrieve event fd\n"); main()
304 ret = read(event_fd, &event, sizeof(event)); main()
311 perror("Failed to read event from device"); main()
316 if (ret != sizeof(event)) { main()
317 fprintf(stderr, "Reading event failed!\n"); main()
322 print_event(&event); main()
326 perror("Failed to close event file"); main()
/linux-4.4.14/tools/perf/scripts/python/
H A Devent_analyzing_sample.py1 # event_analyzing_sample.py: general event handler in python
14 # for a x86 HW PMU event: PEBS with load latency data.
43 # load latency info, while gen_events is for general event.
67 # Create and insert event object to a database so that user could
88 # Create the event object and insert it to the right table in database
89 event = create_event(name, comm, dso, symbol, raw_buf)
90 insert_db(event)
92 def insert_db(event):
93 if event.ev_type == EVTYPE_GENERIC:
95 (event.name, event.symbol, event.comm, event.dso))
96 elif event.ev_type == EVTYPE_PEBS_LL:
97 event.ip &= 0x7fffffffffffffff
98 event.dla &= 0x7fffffffffffffff
100 (event.name, event.symbol, event.comm, event.dso, event.flags,
101 event.ip, event.status, event.dse, event.dla, event.lat))
105 # We show the basic info for the 2 type of event classes
111 # As the event number may be very big, so we can't use linear way
/linux-4.4.14/include/linux/perf/
H A Darm_pmu.h36 * The ARMv7 CPU PMU supports up to 32 event counters.
63 * an event. A 0 means that the counter can be used.
87 void (*enable)(struct perf_event *event);
88 void (*disable)(struct perf_event *event);
90 struct perf_event *event);
92 struct perf_event *event);
95 u32 (*read_counter)(struct perf_event *event);
96 void (*write_counter)(struct perf_event *event, u32 val);
102 int (*map_event)(struct perf_event *event);
116 u64 armpmu_event_update(struct perf_event *event);
118 int armpmu_event_set_period(struct perf_event *event);
120 int armpmu_map_event(struct perf_event *event,
/linux-4.4.14/samples/bpf/
H A Dbpf_load.c35 static int populate_prog_array(const char *event, int prog_fd) populate_prog_array() argument
37 int ind = atoi(event), err; populate_prog_array()
47 static int load_and_attach(const char *event, struct bpf_insn *prog, int size) load_and_attach() argument
49 bool is_socket = strncmp(event, "socket", 6) == 0; load_and_attach()
50 bool is_kprobe = strncmp(event, "kprobe/", 7) == 0; load_and_attach()
51 bool is_kretprobe = strncmp(event, "kretprobe/", 10) == 0; load_and_attach()
67 printf("Unknown event '%s'\n", event); load_and_attach()
80 event += 6; load_and_attach()
81 if (*event != '/') load_and_attach()
83 event++; load_and_attach()
84 if (!isdigit(*event)) { load_and_attach()
88 return populate_prog_array(event, fd); load_and_attach()
93 event += 7; load_and_attach()
95 event += 10; load_and_attach()
97 if (*event == 0) { load_and_attach()
98 printf("event name cannot be empty\n"); load_and_attach()
102 if (isdigit(*event)) load_and_attach()
103 return populate_prog_array(event, fd); load_and_attach()
107 is_kprobe ? 'p' : 'r', event, event); load_and_attach()
111 event, strerror(errno)); load_and_attach()
118 strcat(buf, event); load_and_attach()
123 printf("failed to open event %s\n", event); load_and_attach()
129 printf("read from '%s' failed '%s'\n", event, strerror(errno)); load_and_attach()
141 printf("event %d fd %d err %s\n", id, efd, strerror(errno)); load_and_attach()
/linux-4.4.14/drivers/scsi/lpfc/
H A Dlpfc_nl.h49 * All net link event payloads will begin with and event type
50 * and subcategory. The event type must come first.
57 /* RSCN event header */
64 /* els event header */
79 /* special els lsrjt event */
87 /* special els logo event */
93 /* fabric event header */
106 /* special case fabric fcprdchkerr event */
115 /* scsi event header */
133 /* special case scsi varqueuedepth event */
140 /* special case scsi check condition event */
149 /* event codes for FC_REG_BOARD_EVENT */
152 /* board event header */
159 /* event codes for FC_REG_ADAPTER_EVENT */
162 /* adapter event header */
169 /* event codes for temp_event */
/linux-4.4.14/drivers/isdn/hisax/
H A Dfsm.c35 if ((fnlist[i].state >= fsm->state_count) || (fnlist[i].event >= fsm->event_count)) { FsmNew()
38 (long)fnlist[i].event, (long)fsm->event_count); FsmNew()
40 fsm->jumpmatrix[fsm->state_count * fnlist[i].event + FsmNew()
52 FsmEvent(struct FsmInst *fi, int event, void *arg) FsmEvent() argument
56 if ((fi->state >= fi->fsm->state_count) || (event >= fi->fsm->event_count)) { FsmEvent()
58 (long)fi->state, (long)fi->fsm->state_count, event, (long)fi->fsm->event_count); FsmEvent()
61 r = fi->fsm->jumpmatrix[fi->fsm->state_count * event + fi->state]; FsmEvent()
66 fi->fsm->strEvent[event]); FsmEvent()
67 r(fi, event, arg); FsmEvent()
73 fi->fsm->strEvent[event]); FsmEvent()
94 FsmEvent(ft->fi, ft->event, ft->arg); FsmExpireTimer()
122 int millisec, int event, void *arg, int where) FsmAddTimer()
137 ft->event = event; FsmAddTimer()
146 int millisec, int event, void *arg, int where) FsmRestartTimer()
158 ft->event = event; FsmRestartTimer()
121 FsmAddTimer(struct FsmTimer *ft, int millisec, int event, void *arg, int where) FsmAddTimer() argument
145 FsmRestartTimer(struct FsmTimer *ft, int millisec, int event, void *arg, int where) FsmRestartTimer() argument
H A Dfsm.h39 int state, event; member in struct:FsmNode
46 int event; member in struct:FsmTimer
52 int FsmEvent(struct FsmInst *fi, int event, void *arg);
55 int FsmAddTimer(struct FsmTimer *ft, int millisec, int event,
57 void FsmRestartTimer(struct FsmTimer *ft, int millisec, int event,
/linux-4.4.14/drivers/isdn/mISDN/
H A Dfsm.c40 (fnlist[i].event >= fsm->event_count)) { mISDN_FsmNew()
44 (long)fnlist[i].event, (long)fsm->event_count); mISDN_FsmNew()
46 fsm->jumpmatrix[fsm->state_count * fnlist[i].event + mISDN_FsmNew()
59 mISDN_FsmEvent(struct FsmInst *fi, int event, void *arg) mISDN_FsmEvent() argument
64 (event >= fi->fsm->event_count)) { mISDN_FsmEvent()
67 (long)fi->state, (long)fi->fsm->state_count, event, mISDN_FsmEvent()
71 r = fi->fsm->jumpmatrix[fi->fsm->state_count * event + fi->state]; mISDN_FsmEvent()
76 fi->fsm->strEvent[event]); mISDN_FsmEvent()
77 r(fi, event, arg); mISDN_FsmEvent()
83 fi->fsm->strEvent[event]); mISDN_FsmEvent()
106 mISDN_FsmEvent(ft->fi, ft->event, ft->arg); FsmExpireTimer()
137 int millisec, int event, void *arg, int where) mISDN_FsmAddTimer()
156 ft->event = event; mISDN_FsmAddTimer()
166 int millisec, int event, void *arg, int where) mISDN_FsmRestartTimer()
178 ft->event = event; mISDN_FsmRestartTimer()
136 mISDN_FsmAddTimer(struct FsmTimer *ft, int millisec, int event, void *arg, int where) mISDN_FsmAddTimer() argument
165 mISDN_FsmRestartTimer(struct FsmTimer *ft, int millisec, int event, void *arg, int where) mISDN_FsmRestartTimer() argument
/linux-4.4.14/arch/sh/boards/mach-dreamcast/
H A Dirq.c24 * set in the Event Mask Registers (EMRs). When a hardware event is
27 * event.
46 #define ESR_BASE 0x005f6900 /* Base event status register */
47 #define EMR_BASE 0x005f6910 /* Base event mask register */
50 * Helps us determine the EMR group that this event belongs to: 0 = 0x6910,
51 * 1 = 0x6920, 2 = 0x6930; also determine the event offset.
53 #define LEVEL(event) (((event) - HW_EVENT_IRQ_BASE) / 32)
55 /* Return the hardware event's bit position within the EMR/ESR */
56 #define EVENT_BIT(event) (((event) - HW_EVENT_IRQ_BASE) & 31)
60 * (logically mapped to the corresponding bit for the hardware event).
63 /* Disable the hardware event by masking its bit in its EMR */ disable_systemasic_irq()
75 /* Enable the hardware event by setting its bit in its EMR */ enable_systemasic_irq()
87 /* Acknowledge a hardware event by writing its bit back to its ESR */ mask_ack_systemasic_irq()
104 * Map the hardware event indicated by the processor IRQ to a virtual IRQ.
131 /* Now scan and find the first set bit as the event to map */ systemasic_irq_demux()
/linux-4.4.14/drivers/staging/iio/
H A Diio_simple_dummy_events.c24 * iio_simple_dummy_read_event_config() - is event enabled?
26 * @chan: channel for the event whose state is being queried
27 * @type: type of the event whose state is being queried
31 * discover if the event generation is enabled on the device.
44 * iio_simple_dummy_write_event_config() - set whether event is enabled
46 * @chan: channel for the event whose state is being set
47 * @type: type of the event whose state is being set
52 * so that it generates the specified event. Here it just sets up a cached
105 * iio_simple_dummy_read_event_value() - get value associated with event
107 * @chan: channel for the event whose value is being read
108 * @type: type of the event whose value is being read
110 * @info: info type of the event whose value is being read
111 * @val: value for the event code.
115 * on the event enabled. This often means that the driver must cache the values
117 * the enabled event is changed.
134 * iio_simple_dummy_write_event_value() - set value associate with event
136 * @chan: channel for the event whose value is being set
137 * @type: type of the event whose value is being set
139 * @info: info type of the event whose value is being set
166 * iio_simple_dummy_event_handler() - identify and pass on event
167 * @irq: irq of event line
171 * event occurred and for then pushing that event towards userspace.
172 * Here only one event occurs so we push that directly on with locally
180 dev_dbg(&indio_dev->dev, "id %x event %x\n", iio_simple_dummy_event_handler()
241 /* Fire up event source - normally not present */ iio_simple_dummy_events_register()
/linux-4.4.14/arch/s390/kernel/
H A Dperf_cpum_sf.c2 * Performance event support for the System z CPU-measurement Sampling Facility
87 struct perf_event *event; /* Scheduled perf event */ member in struct:cpu_hw_sf
385 * 3. Store the raw sample buffer pointer in the perf event allocate_buffers()
431 * that the sampling facility is enabled too. If the event to be allocate_buffers()
436 * before the event is started. allocate_buffers()
511 * @hwc: Perf event hardware structure
514 * and postponed allocation extents stored in the specified Perf event hardware.
625 static void hw_perf_event_destroy(struct perf_event *event) hw_perf_event_destroy() argument
628 if (RAWSAMPLE_REG(&event->hw)) hw_perf_event_destroy()
629 kfree((void *) RAWSAMPLE_REG(&event->hw)); hw_perf_event_destroy()
631 /* Release PMC if this is the last perf event */ hw_perf_event_destroy()
668 static int __hw_perf_event_init(struct perf_event *event) __hw_perf_event_init() argument
672 struct perf_event_attr *attr = &event->attr; __hw_perf_event_init()
673 struct hw_perf_event *hwc = &event->hw; __hw_perf_event_init()
687 event->destroy = hw_perf_event_destroy; __hw_perf_event_init()
694 * The event->cpu value can be -1 to count on every CPU, for example, __hw_perf_event_init()
696 * sampling info from the current CPU, otherwise use event->cpu to __hw_perf_event_init()
703 if (event->cpu == -1) __hw_perf_event_init()
709 cpuhw = &per_cpu(cpu_hw_sf, event->cpu); __hw_perf_event_init()
779 * from the event. If the event is not pinned to a particular __hw_perf_event_init()
780 * CPU (event->cpu == -1; or cpuhw == NULL), allocate sampling __hw_perf_event_init()
801 static int cpumsf_pmu_event_init(struct perf_event *event) cpumsf_pmu_event_init() argument
806 if (has_branch_stack(event)) cpumsf_pmu_event_init()
809 switch (event->attr.type) { cpumsf_pmu_event_init()
811 if ((event->attr.config != PERF_EVENT_CPUM_SF) && cpumsf_pmu_event_init()
812 (event->attr.config != PERF_EVENT_CPUM_SF_DIAG)) cpumsf_pmu_event_init()
821 if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES) cpumsf_pmu_event_init()
823 if (!is_sampling_event(event)) cpumsf_pmu_event_init()
830 /* Check online status of the CPU to which the event is pinned */ cpumsf_pmu_event_init()
831 if (event->cpu >= nr_cpumask_bits || cpumsf_pmu_event_init()
832 (event->cpu >= 0 && !cpu_online(event->cpu))) cpumsf_pmu_event_init()
838 if (event->attr.exclude_hv) cpumsf_pmu_event_init()
839 event->attr.exclude_hv = 0; cpumsf_pmu_event_init()
840 if (event->attr.exclude_idle) cpumsf_pmu_event_init()
841 event->attr.exclude_idle = 0; cpumsf_pmu_event_init()
843 err = __hw_perf_event_init(event); cpumsf_pmu_event_init()
845 if (event->destroy) cpumsf_pmu_event_init()
846 event->destroy(event); cpumsf_pmu_event_init()
865 * perf event: cpumsf_pmu_enable()
866 * 1. Postponed buffer allocations from the event initialization. cpumsf_pmu_enable()
873 if (cpuhw->event) { cpumsf_pmu_enable()
874 hwc = &cpuhw->event->hw; cpumsf_pmu_enable()
942 /* perf_exclude_event() - Filter event
943 * @event: The perf event
949 * Return non-zero if the event shall be excluded.
951 static int perf_exclude_event(struct perf_event *event, struct pt_regs *regs, perf_exclude_event() argument
954 if (event->attr.exclude_user && user_mode(regs)) perf_exclude_event()
956 if (event->attr.exclude_kernel && !user_mode(regs)) perf_exclude_event()
958 if (event->attr.exclude_guest && sde_regs->in_guest) perf_exclude_event()
960 if (event->attr.exclude_host && !sde_regs->in_guest) perf_exclude_event()
966 * @event: The perf event
969 * Use the hardware sample data to create perf event sample. The sample
970 * is the pushed to the event subsystem and the function checks for
971 * possible event overflows. If an event overflow occurs, the PMU is
974 * Return non-zero if an event overflow occurred.
976 static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr) perf_push_sample() argument
985 perf_sample_data_init(&data, 0, event->hw.last_period); perf_push_sample()
1032 if (perf_exclude_event(event, &regs, sde_regs)) perf_push_sample()
1034 if (perf_event_overflow(event, &data, &regs)) { perf_push_sample()
1036 event->pmu->stop(event, 0); perf_push_sample()
1038 perf_event_update_userpage(event); perf_push_sample()
1043 static void perf_event_count_update(struct perf_event *event, u64 count) perf_event_count_update() argument
1045 local64_add(count, &event->count); perf_event_count_update()
1120 * @event: The perf event
1125 * then pushed to the perf event subsystem. Depending on the sampling function,
1129 * event hardware structure. The function always works with a combined-sampling
1137 * due to a perf event overflow.
1139 static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt, hw_collect_samples() argument
1142 unsigned long flags = SAMPL_FLAGS(&event->hw); hw_collect_samples()
1149 sfr = (struct sf_raw_sample *) RAWSAMPLE_REG(&event->hw); hw_collect_samples()
1152 sample_size = event_sample_size(&event->hw); hw_collect_samples()
1160 /* Update perf event period */ hw_collect_samples()
1161 perf_event_count_update(event, SAMPL_RATE(&event->hw)); hw_collect_samples()
1165 /* If an event overflow occurred, the PMU is stopped to hw_collect_samples()
1166 * throttle event delivery. Remaining sample data is hw_collect_samples()
1173 *overflow = perf_push_sample(event, sfr); hw_collect_samples()
1201 * @event: The perf event
1204 * Processes the sampling buffer and create perf event samples.
1206 * register of the specified perf event.
1214 static void hw_perf_event_update(struct perf_event *event, int flush_all) hw_perf_event_update() argument
1216 struct hw_perf_event *hwc = &event->hw; hw_perf_event_update()
1253 * flag if an (perf) event overflow happened. If so, the PMU hw_perf_event_update()
1256 hw_collect_samples(event, sdbt, &event_overflow); hw_perf_event_update()
1272 /* Update event hardware registers */ hw_perf_event_update()
1281 /* If an event overflow happened, discard samples by hw_perf_event_update()
1288 /* Account sample overflows in the event hardware structure */ hw_perf_event_update()
1294 "overflow stats: sample=%llu event=%llu\n", hw_perf_event_update()
1298 static void cpumsf_pmu_read(struct perf_event *event) cpumsf_pmu_read() argument
1306 static void cpumsf_pmu_start(struct perf_event *event, int flags) cpumsf_pmu_start() argument
1310 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) cpumsf_pmu_start()
1314 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); cpumsf_pmu_start()
1316 perf_pmu_disable(event->pmu); cpumsf_pmu_start()
1317 event->hw.state = 0; cpumsf_pmu_start()
1319 if (SAMPL_DIAG_MODE(&event->hw)) cpumsf_pmu_start()
1321 perf_pmu_enable(event->pmu); cpumsf_pmu_start()
1327 static void cpumsf_pmu_stop(struct perf_event *event, int flags) cpumsf_pmu_stop() argument
1331 if (event->hw.state & PERF_HES_STOPPED) cpumsf_pmu_stop()
1334 perf_pmu_disable(event->pmu); cpumsf_pmu_stop()
1337 event->hw.state |= PERF_HES_STOPPED; cpumsf_pmu_stop()
1339 if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) { cpumsf_pmu_stop()
1340 hw_perf_event_update(event, 1); cpumsf_pmu_stop()
1341 event->hw.state |= PERF_HES_UPTODATE; cpumsf_pmu_stop()
1343 perf_pmu_enable(event->pmu); cpumsf_pmu_stop()
1346 static int cpumsf_pmu_add(struct perf_event *event, int flags) cpumsf_pmu_add() argument
1358 perf_pmu_disable(event->pmu); cpumsf_pmu_add()
1360 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; cpumsf_pmu_add()
1363 * using the SDB-table start. Reset TEAR_REG event hardware register cpumsf_pmu_add()
1371 cpuhw->lsctl.interval = SAMPL_RATE(&event->hw); cpumsf_pmu_add()
1372 hw_reset_registers(&event->hw, cpuhw->sfb.sdbt); cpumsf_pmu_add()
1381 if (SAMPL_DIAG_MODE(&event->hw)) cpumsf_pmu_add()
1384 /* Set in_use flag and store event */ cpumsf_pmu_add()
1385 cpuhw->event = event; cpumsf_pmu_add()
1389 cpumsf_pmu_start(event, PERF_EF_RELOAD); cpumsf_pmu_add()
1391 perf_event_update_userpage(event); cpumsf_pmu_add()
1392 perf_pmu_enable(event->pmu); cpumsf_pmu_add()
1396 static void cpumsf_pmu_del(struct perf_event *event, int flags) cpumsf_pmu_del() argument
1400 perf_pmu_disable(event->pmu); cpumsf_pmu_del()
1401 cpumsf_pmu_stop(event, PERF_EF_UPDATE); cpumsf_pmu_del()
1406 cpuhw->event = NULL; cpumsf_pmu_del()
1408 perf_event_update_userpage(event); cpumsf_pmu_del()
1409 perf_pmu_enable(event->pmu); cpumsf_pmu_del()
1421 PMU_FORMAT_ATTR(event, "config:0-63");
1478 hw_perf_event_update(cpuhw->event, 0); cpumf_measurement_alert()
H A Dperf_cpum_cf.c2 * Performance event support for s390x - CPU-measurement Counter Facility
69 /* Local CPUMF event structure */
89 static int get_counter_set(u64 event) get_counter_set() argument
93 if (event < 32) get_counter_set()
95 else if (event < 64) get_counter_set()
97 else if (event < 128) get_counter_set()
99 else if (event < 256) get_counter_set()
165 * PMUs that might suffice the event request. validate_ctr_auth()
297 /* Release the PMU if event is the last perf event */ hw_perf_event_destroy()
298 static void hw_perf_event_destroy(struct perf_event *event) hw_perf_event_destroy() argument
308 /* CPUMF <-> perf event mappings for kernel+userspace (basic set) */
318 /* CPUMF <-> perf event mappings for userspace (problem-state set) */
329 static int __hw_perf_event_init(struct perf_event *event) __hw_perf_event_init() argument
331 struct perf_event_attr *attr = &event->attr; __hw_perf_event_init()
332 struct hw_perf_event *hwc = &event->hw; __hw_perf_event_init()
376 /* Use the hardware perf event structure to store the counter number __hw_perf_event_init()
384 /* Validate the counter that is assigned to this event. __hw_perf_event_init()
387 * validate event groups (event->group_leader != event). __hw_perf_event_init()
402 event->destroy = hw_perf_event_destroy; __hw_perf_event_init()
412 static int cpumf_pmu_event_init(struct perf_event *event) cpumf_pmu_event_init() argument
416 switch (event->attr.type) { cpumf_pmu_event_init()
420 err = __hw_perf_event_init(event); cpumf_pmu_event_init()
426 if (unlikely(err) && event->destroy) cpumf_pmu_event_init()
427 event->destroy(event); cpumf_pmu_event_init()
432 static int hw_perf_event_reset(struct perf_event *event) hw_perf_event_reset() argument
438 prev = local64_read(&event->hw.prev_count); hw_perf_event_reset()
439 err = ecctr(event->hw.config, &new); hw_perf_event_reset()
450 } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev); hw_perf_event_reset()
455 static int hw_perf_event_update(struct perf_event *event) hw_perf_event_update() argument
461 prev = local64_read(&event->hw.prev_count); hw_perf_event_update()
462 err = ecctr(event->hw.config, &new); hw_perf_event_update()
465 } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev); hw_perf_event_update()
469 local64_add(delta, &event->count); hw_perf_event_update()
474 static void cpumf_pmu_read(struct perf_event *event) cpumf_pmu_read() argument
476 if (event->hw.state & PERF_HES_STOPPED) cpumf_pmu_read()
479 hw_perf_event_update(event); cpumf_pmu_read()
482 static void cpumf_pmu_start(struct perf_event *event, int flags) cpumf_pmu_start() argument
485 struct hw_perf_event *hwc = &event->hw; cpumf_pmu_start()
503 * Because all counters in a set are active, the event->hw.prev_count cpumf_pmu_start()
507 hw_perf_event_reset(event); cpumf_pmu_start()
513 static void cpumf_pmu_stop(struct perf_event *event, int flags) cpumf_pmu_stop() argument
516 struct hw_perf_event *hwc = &event->hw; cpumf_pmu_stop()
525 event->hw.state |= PERF_HES_STOPPED; cpumf_pmu_stop()
529 hw_perf_event_update(event); cpumf_pmu_stop()
530 event->hw.state |= PERF_HES_UPTODATE; cpumf_pmu_stop()
534 static int cpumf_pmu_add(struct perf_event *event, int flags) cpumf_pmu_add() argument
544 if (validate_ctr_auth(&event->hw)) cpumf_pmu_add()
547 ctr_set_enable(&cpuhw->state, event->hw.config_base); cpumf_pmu_add()
548 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; cpumf_pmu_add()
551 cpumf_pmu_start(event, PERF_EF_RELOAD); cpumf_pmu_add()
553 perf_event_update_userpage(event); cpumf_pmu_add()
558 static void cpumf_pmu_del(struct perf_event *event, int flags) cpumf_pmu_del() argument
562 cpumf_pmu_stop(event, PERF_EF_UPDATE); cpumf_pmu_del()
568 * When a new perf event has been added but not yet started, this can cpumf_pmu_del()
572 if (!atomic_read(&cpuhw->ctr_set[event->hw.config_base])) cpumf_pmu_del()
573 ctr_set_disable(&cpuhw->state, event->hw.config_base); cpumf_pmu_del()
575 perf_event_update_userpage(event); cpumf_pmu_del()
/linux-4.4.14/drivers/s390/net/
H A Dfsm.h56 int event; member in struct:__anon8994
78 * Description of a state-event combination
126 fsm_record_history(fsm_instance *fi, int state, int event);
130 * Emits an event to a FSM.
131 * If an action function is defined for the current state/event combination,
134 * @param fi Pointer to FSM which should receive the event.
135 * @param event The event do be delivered.
139 * 1 if current state or event is out of range
140 * !0 if state and event in range, but no action defined.
143 fsm_event(fsm_instance *fi, int event, void *arg) fsm_event() argument
149 (event >= fi->f->nr_events) ) { fsm_event()
151 fi->name, (long)state,(long)fi->f->nr_states, event, fsm_event()
158 r = fi->f->jumpmatrix[fi->f->nr_states * event + state]; fsm_event()
161 printk(KERN_DEBUG "fsm(%s): state %s event %s\n", fsm_event()
163 fi->f->event_names[event]); fsm_event()
166 fsm_record_history(fi, state, event); fsm_event()
168 r(fi, event, arg); fsm_event()
172 printk(KERN_DEBUG "fsm(%s): no function for event %s in state %s\n", fsm_event()
173 fi->name, fi->f->event_names[event], fsm_event()
185 * This does <em>not</em> trigger an event or calls an action function.
248 * @param event Event, to trigger if timer expires.
253 extern int fsm_addtimer(fsm_timer *timer, int millisec, int event, void *arg);
260 * @param event Event, to trigger if timer expires.
263 extern void fsm_modtimer(fsm_timer *timer, int millisec, int event, void *arg);
H A Dctcm_fsms.c131 static void ctcm_action_nop(fsm_instance *fi, int event, void *arg);
137 static void chx_txdone(fsm_instance *fi, int event, void *arg);
138 static void chx_rx(fsm_instance *fi, int event, void *arg);
139 static void chx_rxidle(fsm_instance *fi, int event, void *arg);
140 static void chx_firstio(fsm_instance *fi, int event, void *arg);
141 static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg);
142 static void ctcm_chx_start(fsm_instance *fi, int event, void *arg);
143 static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg);
144 static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg);
145 static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg);
146 static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg);
147 static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg);
148 static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg);
149 static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg);
150 static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg);
151 static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg);
152 static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg);
153 static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg);
154 static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg);
160 static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg);
161 static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg);
162 static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg);
164 static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg);
165 static void ctcm_chx_start(fsm_instance *fi, int event, void *arg);
166 static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg);
167 static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg);
168 static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg);
169 static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg);
170 static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg);
171 static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg);
172 static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg);
173 static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg);
174 static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg);
175 static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg);
176 static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg);
177 static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg);
179 static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg);
182 static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg);
228 static void ctcm_action_nop(fsm_instance *fi, int event, void *arg) ctcm_action_nop() argument
242 * event The event, just happened.
245 static void chx_txdone(fsm_instance *fi, int event, void *arg) chx_txdone() argument
331 * event The event, just happened.
334 void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg) ctcm_chx_txidle() argument
352 * event The event, just happened.
355 static void chx_rx(fsm_instance *fi, int event, void *arg) chx_rx() argument
429 * event The event, just happened.
432 static void chx_firstio(fsm_instance *fi, int event, void *arg) chx_firstio() argument
457 chx_rxidle(fi, event, arg); chx_firstio()
508 * event The event, just happened.
511 static void chx_rxidle(fsm_instance *fi, int event, void *arg) chx_rxidle() argument
540 chx_firstio(fi, event, arg); chx_rxidle()
548 * event The event, just happened.
551 static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg) ctcm_chx_setmode() argument
568 if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */ ctcm_chx_setmode()
575 if (event == CTC_EVENT_TIMER) /* see above comments */ ctcm_chx_setmode()
589 * event The event, just happened.
592 static void ctcm_chx_start(fsm_instance *fi, int event, void *arg) ctcm_chx_start() argument
652 * event The event, just happened.
655 static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg) ctcm_chx_haltio() argument
668 if (event == CTC_EVENT_STOP) /* only for STOP not yet locked */ ctcm_chx_haltio()
676 if (event == CTC_EVENT_STOP) ctcm_chx_haltio()
682 if (event != CTC_EVENT_STOP) { ctcm_chx_haltio()
740 * event The event, just happened.
743 static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg) ctcm_chx_stopped() argument
753 * event The event, just happened.
756 static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg) ctcm_chx_stop() argument
767 * event The event, just happened.
770 static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg) ctcm_chx_fail() argument
779 * event The event, just happened.
782 static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg) ctcm_chx_setuperr() argument
794 ((event == CTC_EVENT_UC_RCRESET) || ctcm_chx_setuperr()
795 (event == CTC_EVENT_UC_RSRESET))) { ctcm_chx_setuperr()
811 CTCM_FUNTAIL, dev->name, ctc_ch_event_names[event], ctcm_chx_setuperr()
828 * event The event, just happened.
831 static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg) ctcm_chx_restart() argument
841 CTCM_FUNTAIL, ch->id, event, dev->name); ctcm_chx_restart()
848 if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */ ctcm_chx_restart()
854 if (event == CTC_EVENT_TIMER) ctcm_chx_restart()
870 * event The event, just happened.
873 static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg) ctcm_chx_rxiniterr() argument
879 if (event == CTC_EVENT_TIMER) { ctcm_chx_rxiniterr()
884 ctcm_chx_restart(fi, event, arg); ctcm_chx_rxiniterr()
892 ctc_ch_event_names[event], fsm_getstate_str(fi)); ctcm_chx_rxiniterr()
896 "error %s\n", ctc_ch_event_names[event]); ctcm_chx_rxiniterr()
905 * event The event, just happened.
908 static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg) ctcm_chx_rxinitfail() argument
925 * event The event, just happened.
928 static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg) ctcm_chx_rxdisc() argument
957 * event The event, just happened.
960 static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg) ctcm_chx_txiniterr() argument
966 if (event == CTC_EVENT_TIMER) { ctcm_chx_txiniterr()
969 ctcm_chx_restart(fi, event, arg); ctcm_chx_txiniterr()
977 ctc_ch_event_names[event], fsm_getstate_str(fi)); ctcm_chx_txiniterr()
981 "error %s\n", ctc_ch_event_names[event]); ctcm_chx_txiniterr()
989 * event The event, just happened.
992 static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg) ctcm_chx_txretry() argument
1012 ctcm_chx_restart(fi, event, arg); ctcm_chx_txretry()
1030 ctcm_chx_restart(fi, event, arg); ctcm_chx_txretry()
1034 if (event == CTC_EVENT_TIMER) /* for TIMER not yet locked */ ctcm_chx_txretry()
1045 if (event == CTC_EVENT_TIMER) ctcm_chx_txretry()
1062 * event The event, just happened.
1065 static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg) ctcm_chx_iofatal() argument
1212 * event The event, just happened.
1215 static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg) ctcmpc_chx_txdone() argument
1382 * event The event, just happened.
1385 static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg) ctcmpc_chx_rx() argument
1485 * event The event, just happened.
1488 static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg) ctcmpc_chx_firstio() argument
1514 ctcmpc_chx_rxidle(fi, event, arg); ctcmpc_chx_firstio()
1539 * event The event, just happened.
1542 void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg) ctcmpc_chx_rxidle() argument
1569 if (event == CTC_EVENT_START) ctcmpc_chx_rxidle()
1574 if (event == CTC_EVENT_START) ctcmpc_chx_rxidle()
1597 static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg) ctcmpc_chx_attn() argument
1656 static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg) ctcmpc_chx_attnbusy() argument
1741 static void ctcmpc_chx_resend(fsm_instance *fsm, int event, void *arg) ctcmpc_chx_resend() argument
1757 static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg) ctcmpc_chx_send_sweep() argument
2065 * event The event, just happened.
2068 static void dev_action_start(fsm_instance *fi, int event, void *arg) dev_action_start() argument
2090 * event The event, just happened.
2093 static void dev_action_stop(fsm_instance *fi, int event, void *arg) dev_action_stop() argument
2113 static void dev_action_restart(fsm_instance *fi, int event, void *arg) dev_action_restart() argument
2128 dev_action_stop(fi, event, arg); dev_action_restart()
2145 * event The event, just happened.
2148 static void dev_action_chup(fsm_instance *fi, int event, void *arg) dev_action_chup() argument
2156 dev->name, dev->ml_priv, dev_stat, event); dev_action_chup()
2160 if (event == DEV_EVENT_RXUP) dev_action_chup()
2166 if (event == DEV_EVENT_RXUP) { dev_action_chup()
2174 if (event == DEV_EVENT_TXUP) { dev_action_chup()
2182 if (event == DEV_EVENT_RXUP) dev_action_chup()
2186 if (event == DEV_EVENT_TXUP) dev_action_chup()
2192 if (event == DEV_EVENT_RXUP) dev_action_chup()
2206 * event The event, just happened.
2209 static void dev_action_chdown(fsm_instance *fi, int event, void *arg) dev_action_chdown() argument
2219 if (event == DEV_EVENT_TXDOWN) dev_action_chdown()
2225 if (event == DEV_EVENT_TXDOWN) dev_action_chdown()
2229 if (event == DEV_EVENT_RXDOWN) dev_action_chdown()
2233 if (event == DEV_EVENT_TXDOWN) dev_action_chdown()
2239 if (event == DEV_EVENT_RXDOWN) dev_action_chdown()
2243 if (event == DEV_EVENT_TXDOWN) dev_action_chdown()
2248 if (event == DEV_EVENT_RXDOWN) dev_action_chdown()
/linux-4.4.14/drivers/isdn/sc/
H A DMakefile9 sc-y := shmem.o init.o packet.o command.o event.o \
H A Devent.c1 /* $Id: event.c,v 1.4.8.1 2001/09/23 22:24:59 kai Exp $
41 int indicate_status(int card, int event, ulong Channel, char *Data) indicate_status() argument
46 pr_debug("%s: Indicating event %s on Channel %d\n", indicate_status()
47 sc_adapter[card]->devicename, events[event - 256], Channel); indicate_status()
52 switch (event) { indicate_status()
64 cmd.command = event; indicate_status()
/linux-4.4.14/drivers/net/wireless/ti/wl1251/
H A DMakefile1 wl1251-objs = main.o event.o tx.o rx.o ps.o cmd.o \
/linux-4.4.14/drivers/net/wireless/ti/wlcore/
H A DMakefile1 wlcore-objs = main.o cmd.o io.o event.o tx.o rx.o ps.o acx.o \
/linux-4.4.14/drivers/cpufreq/
H A Dcpufreq_performance.c20 unsigned int event) cpufreq_governor_performance()
22 switch (event) { cpufreq_governor_performance()
25 pr_debug("setting to %u kHz because of event %u\n", cpufreq_governor_performance()
26 policy->max, event); cpufreq_governor_performance()
19 cpufreq_governor_performance(struct cpufreq_policy *policy, unsigned int event) cpufreq_governor_performance() argument
H A Dcpufreq_powersave.c20 unsigned int event) cpufreq_governor_powersave()
22 switch (event) { cpufreq_governor_powersave()
25 pr_debug("setting to %u kHz because of event %u\n", cpufreq_governor_powersave()
26 policy->min, event); cpufreq_governor_powersave()
19 cpufreq_governor_powersave(struct cpufreq_policy *policy, unsigned int event) cpufreq_governor_powersave() argument
/linux-4.4.14/include/trace/events/
H A Dcontext_tracking.h28 * @dummy: dummy arg to make trace event macro happy
30 * This event occurs when the kernel resumes to userspace after
42 * @dummy: dummy arg to make trace event macro happy
44 * This event occurs when userspace enters the kernel through
/linux-4.4.14/arch/powerpc/include/asm/
H A Dperf_event_fsl_emb.h2 * Performance event support - Freescale embedded specific definitions.
18 /* event flags */
22 /* upper half of event flags is PMLCb */
40 /* Returns event flags and PMLCb (FSL_EMB_EVENT_*) */
/linux-4.4.14/drivers/iio/
H A DMakefile6 industrialio-y := industrialio-core.o industrialio-event.o inkern.o
10 obj-$(CONFIG_IIO_TRIGGERED_EVENT) += industrialio-triggered-event.o
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/sw/
H A Dchan.h5 #include <core/event.h>
14 struct nvkm_event event; member in struct:nvkm_sw_chan
/linux-4.4.14/drivers/net/ethernet/ti/
H A Dcpts.c39 static int event_expired(struct cpts_event *event) event_expired() argument
41 return time_after(jiffies, event->tmo); event_expired()
44 static int event_type(struct cpts_event *event) event_type() argument
46 return (event->high >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK; event_type()
63 * Returns zero if matching event type was found.
69 struct cpts_event *event; cpts_fifo_read() local
75 pr_err("cpts: event pool is empty\n"); cpts_fifo_read()
78 event = list_first_entry(&cpts->pool, struct cpts_event, list); cpts_fifo_read()
79 event->tmo = jiffies + 2; cpts_fifo_read()
80 event->high = hi; cpts_fifo_read()
81 event->low = lo; cpts_fifo_read()
82 type = event_type(event); cpts_fifo_read()
87 list_del_init(&event->list); cpts_fifo_read()
88 list_add_tail(&event->list, &cpts->events); cpts_fifo_read()
95 pr_err("cpts: unknown event type\n"); cpts_fifo_read()
107 struct cpts_event *event; cpts_systim_read() local
116 event = list_entry(this, struct cpts_event, list); cpts_systim_read()
117 if (event_type(event) == CPTS_EV_PUSH) { cpts_systim_read()
118 list_del_init(&event->list); cpts_systim_read()
119 list_add(&event->list, &cpts->pool); cpts_systim_read()
120 val = event->low; cpts_systim_read()
289 struct cpts_event *event; cpts_find_ts() local
302 event = list_entry(this, struct cpts_event, list); cpts_find_ts()
303 if (event_expired(event)) { cpts_find_ts()
304 list_del_init(&event->list); cpts_find_ts()
305 list_add(&event->list, &cpts->pool); cpts_find_ts()
308 mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK; cpts_find_ts()
309 seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK; cpts_find_ts()
310 if (ev_type == event_type(event) && cpts_find_ts()
312 ns = timecounter_cyc2time(&cpts->tc, event->low); cpts_find_ts()
313 list_del_init(&event->list); cpts_find_ts()
314 list_add(&event->list, &cpts->pool); cpts_find_ts()
/linux-4.4.14/include/linux/platform_data/
H A Dkeyboard-pxa930_rotary.h6 * rotary can be either interpreted as a ralative input event (e.g.
7 * REL_WHEEL or REL_HWHEEL) or a specific key event (e.g. UP/DOWN
/linux-4.4.14/arch/arm/kernel/
H A Dperf_event_xscale.c9 * - xscale1pmu: 2 event counters and a cycle counter
10 * - xscale2pmu: 4 event counters and a cycle counter
11 * The two variants share event definitions, but have different
174 struct perf_event *event = cpuc->events[idx]; xscale1pmu_handle_irq() local
177 if (!event) xscale1pmu_handle_irq()
183 hwc = &event->hw; xscale1pmu_handle_irq()
184 armpmu_event_update(event); xscale1pmu_handle_irq()
186 if (!armpmu_event_set_period(event)) xscale1pmu_handle_irq()
189 if (perf_event_overflow(event, &data, regs)) xscale1pmu_handle_irq()
190 cpu_pmu->disable(event); xscale1pmu_handle_irq()
204 static void xscale1pmu_enable_event(struct perf_event *event) xscale1pmu_enable_event() argument
207 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); xscale1pmu_enable_event()
208 struct hw_perf_event *hwc = &event->hw; xscale1pmu_enable_event()
240 static void xscale1pmu_disable_event(struct perf_event *event) xscale1pmu_disable_event() argument
243 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); xscale1pmu_disable_event()
244 struct hw_perf_event *hwc = &event->hw; xscale1pmu_disable_event()
276 struct perf_event *event) xscale1pmu_get_event_idx()
278 struct hw_perf_event *hwc = &event->hw; xscale1pmu_get_event_idx()
319 static inline u32 xscale1pmu_read_counter(struct perf_event *event) xscale1pmu_read_counter() argument
321 struct hw_perf_event *hwc = &event->hw; xscale1pmu_read_counter()
340 static inline void xscale1pmu_write_counter(struct perf_event *event, u32 val) xscale1pmu_write_counter() argument
342 struct hw_perf_event *hwc = &event->hw; xscale1pmu_write_counter()
358 static int xscale_map_event(struct perf_event *event) xscale_map_event() argument
360 return armpmu_map_event(event, &xscale_perf_map, xscale_map_event()
515 struct perf_event *event = cpuc->events[idx]; xscale2pmu_handle_irq() local
518 if (!event) xscale2pmu_handle_irq()
524 hwc = &event->hw; xscale2pmu_handle_irq()
525 armpmu_event_update(event); xscale2pmu_handle_irq()
527 if (!armpmu_event_set_period(event)) xscale2pmu_handle_irq()
530 if (perf_event_overflow(event, &data, regs)) xscale2pmu_handle_irq()
531 cpu_pmu->disable(event); xscale2pmu_handle_irq()
545 static void xscale2pmu_enable_event(struct perf_event *event) xscale2pmu_enable_event() argument
548 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); xscale2pmu_enable_event()
549 struct hw_perf_event *hwc = &event->hw; xscale2pmu_enable_event()
591 static void xscale2pmu_disable_event(struct perf_event *event) xscale2pmu_disable_event() argument
594 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); xscale2pmu_disable_event()
595 struct hw_perf_event *hwc = &event->hw; xscale2pmu_disable_event()
645 struct perf_event *event) xscale2pmu_get_event_idx()
647 int idx = xscale1pmu_get_event_idx(cpuc, event); xscale2pmu_get_event_idx()
683 static inline u32 xscale2pmu_read_counter(struct perf_event *event) xscale2pmu_read_counter() argument
685 struct hw_perf_event *hwc = &event->hw; xscale2pmu_read_counter()
710 static inline void xscale2pmu_write_counter(struct perf_event *event, u32 val) xscale2pmu_write_counter() argument
712 struct hw_perf_event *hwc = &event->hw; xscale2pmu_write_counter()
275 xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc, struct perf_event *event) xscale1pmu_get_event_idx() argument
644 xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc, struct perf_event *event) xscale2pmu_get_event_idx() argument
H A Dperf_event_v7.c32 * Common ARMv7 event types
83 /* ARMv7 Cortex-A8 specific event types */
91 /* ARMv7 Cortex-A9 specific event types */
98 /* ARMv7 Cortex-A5 specific event types */
104 /* ARMv7 Cortex-A15 specific event types */
122 /* ARMv7 Cortex-A12 specific event types */
135 /* ARMv7 Krait specific event types */
149 /* ARMv7 Scorpion specific event types */
640 static inline u32 armv7pmu_read_counter(struct perf_event *event) armv7pmu_read_counter() argument
642 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); armv7pmu_read_counter()
643 struct hw_perf_event *hwc = &event->hw; armv7pmu_read_counter()
660 static inline void armv7pmu_write_counter(struct perf_event *event, u32 value) armv7pmu_write_counter() argument
662 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); armv7pmu_write_counter()
663 struct hw_perf_event *hwc = &event->hw; armv7pmu_write_counter()
765 static void armv7pmu_enable_event(struct perf_event *event) armv7pmu_enable_event() argument
768 struct hw_perf_event *hwc = &event->hw; armv7pmu_enable_event()
769 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); armv7pmu_enable_event()
781 * the event that we're interested in. armv7pmu_enable_event()
791 * Set event (if destined for PMNx counters) armv7pmu_enable_event()
792 * We only need to set the event for the cycle counter if we armv7pmu_enable_event()
793 * have the ability to perform event filtering. armv7pmu_enable_event()
811 static void armv7pmu_disable_event(struct perf_event *event) armv7pmu_disable_event() argument
814 struct hw_perf_event *hwc = &event->hw; armv7pmu_disable_event()
815 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); armv7pmu_disable_event()
869 struct perf_event *event = cpuc->events[idx]; armv7pmu_handle_irq() local
872 /* Ignore if we don't have an event. */ armv7pmu_handle_irq()
873 if (!event) armv7pmu_handle_irq()
883 hwc = &event->hw; armv7pmu_handle_irq()
884 armpmu_event_update(event); armv7pmu_handle_irq()
886 if (!armpmu_event_set_period(event)) armv7pmu_handle_irq()
889 if (perf_event_overflow(event, &data, regs)) armv7pmu_handle_irq()
890 cpu_pmu->disable(event); armv7pmu_handle_irq()
928 struct perf_event *event) armv7pmu_get_event_idx()
931 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); armv7pmu_get_event_idx()
932 struct hw_perf_event *hwc = &event->hw; armv7pmu_get_event_idx()
957 * Add an event filter to a given event. This will only work for PMUv2 PMUs.
959 static int armv7pmu_set_event_filter(struct hw_perf_event *event, armv7pmu_set_event_filter() argument
975 * construct the event type. armv7pmu_set_event_filter()
977 event->config_base = config_base; armv7pmu_set_event_filter()
997 static int armv7_a8_map_event(struct perf_event *event) armv7_a8_map_event() argument
999 return armpmu_map_event(event, &armv7_a8_perf_map, armv7_a8_map_event()
1003 static int armv7_a9_map_event(struct perf_event *event) armv7_a9_map_event() argument
1005 return armpmu_map_event(event, &armv7_a9_perf_map, armv7_a9_map_event()
1009 static int armv7_a5_map_event(struct perf_event *event) armv7_a5_map_event() argument
1011 return armpmu_map_event(event, &armv7_a5_perf_map, armv7_a5_map_event()
1015 static int armv7_a15_map_event(struct perf_event *event) armv7_a15_map_event() argument
1017 return armpmu_map_event(event, &armv7_a15_perf_map, armv7_a15_map_event()
1021 static int armv7_a7_map_event(struct perf_event *event) armv7_a7_map_event() argument
1023 return armpmu_map_event(event, &armv7_a7_perf_map, armv7_a7_map_event()
1027 static int armv7_a12_map_event(struct perf_event *event) armv7_a12_map_event() argument
1029 return armpmu_map_event(event, &armv7_a12_perf_map, armv7_a12_map_event()
1033 static int krait_map_event(struct perf_event *event) krait_map_event() argument
1035 return armpmu_map_event(event, &krait_perf_map, krait_map_event()
1039 static int krait_map_event_no_branch(struct perf_event *event) krait_map_event_no_branch() argument
1041 return armpmu_map_event(event, &krait_perf_map_no_branch, krait_map_event_no_branch()
1045 static int scorpion_map_event(struct perf_event *event) scorpion_map_event() argument
1047 return armpmu_map_event(event, &scorpion_perf_map, scorpion_map_event()
1163 * G = group or particular event
1165 * Example: 0x12021 is a Krait CPU event in PMRESR2's group 1 with code 2
1168 * unit, etc.) while the event code (CC) corresponds to a particular class of
1169 * events (interrupts for example). An event code is broken down into
1179 #define EVENT_REGION(event) (((event) >> 12) & 0xf) /* R */
1180 #define EVENT_GROUP(event) ((event) & 0xf) /* G */
1181 #define EVENT_CODE(event) (((event) >> 4) & 0xff) /* CC */
1182 #define EVENT_VENUM(event) (!!(event & VENUM_EVENT)) /* N=2 */
1183 #define EVENT_CPU(event) (!!(event & KRAIT_EVENT)) /* N=1 */
1348 static void krait_pmu_disable_event(struct perf_event *event) krait_pmu_disable_event() argument
1351 struct hw_perf_event *hwc = &event->hw; krait_pmu_disable_event()
1353 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); krait_pmu_disable_event()
1374 static void krait_pmu_enable_event(struct perf_event *event) krait_pmu_enable_event() argument
1377 struct hw_perf_event *hwc = &event->hw; krait_pmu_enable_event()
1379 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); krait_pmu_enable_event()
1384 * the event that we're interested in. krait_pmu_enable_event()
1392 * Set event (if destined for PMNx counters) krait_pmu_enable_event()
1393 * We set the event for the cycle counter because we krait_pmu_enable_event()
1394 * have the ability to perform event filtering. krait_pmu_enable_event()
1435 static int krait_event_to_bit(struct perf_event *event, unsigned int region, krait_event_to_bit() argument
1439 struct hw_perf_event *hwc = &event->hw; krait_event_to_bit()
1440 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); krait_event_to_bit()
1462 struct perf_event *event) krait_pmu_get_event_idx()
1466 struct hw_perf_event *hwc = &event->hw; krait_pmu_get_event_idx()
1480 bit = krait_event_to_bit(event, region, group); krait_pmu_get_event_idx()
1485 idx = armv7pmu_get_event_idx(cpuc, event); krait_pmu_get_event_idx()
1493 struct perf_event *event) krait_pmu_clear_event_idx()
1496 struct hw_perf_event *hwc = &event->hw; krait_pmu_clear_event_idx()
1503 bit = krait_event_to_bit(event, region, group); krait_pmu_clear_event_idx()
1552 * G = group or particular event
1554 * Example: 0x12021 is a Scorpion CPU event in LPM2's group 1 with code 2
1557 * unit, etc.) while the event code (CC) corresponds to a particular class of
1558 * events (interrupts for example). An event code is broken down into
1680 static void scorpion_pmu_disable_event(struct perf_event *event) scorpion_pmu_disable_event() argument
1683 struct hw_perf_event *hwc = &event->hw; scorpion_pmu_disable_event()
1685 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); scorpion_pmu_disable_event()
1706 static void scorpion_pmu_enable_event(struct perf_event *event) scorpion_pmu_enable_event() argument
1709 struct hw_perf_event *hwc = &event->hw; scorpion_pmu_enable_event()
1711 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); scorpion_pmu_enable_event()
1716 * the event that we're interested in. scorpion_pmu_enable_event()
1724 * Set event (if destined for PMNx counters) scorpion_pmu_enable_event()
1725 * We don't set the event for the cycle counter because we scorpion_pmu_enable_event()
1726 * don't have the ability to perform event filtering. scorpion_pmu_enable_event()
1767 static int scorpion_event_to_bit(struct perf_event *event, unsigned int region, scorpion_event_to_bit() argument
1771 struct hw_perf_event *hwc = &event->hw; scorpion_event_to_bit()
1772 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); scorpion_event_to_bit()
1794 struct perf_event *event) scorpion_pmu_get_event_idx()
1798 struct hw_perf_event *hwc = &event->hw; scorpion_pmu_get_event_idx()
1809 bit = scorpion_event_to_bit(event, region, group); scorpion_pmu_get_event_idx()
1814 idx = armv7pmu_get_event_idx(cpuc, event); scorpion_pmu_get_event_idx()
1822 struct perf_event *event) scorpion_pmu_clear_event_idx()
1825 struct hw_perf_event *hwc = &event->hw; scorpion_pmu_clear_event_idx()
1832 bit = scorpion_event_to_bit(event, region, group); scorpion_pmu_clear_event_idx()
927 armv7pmu_get_event_idx(struct pmu_hw_events *cpuc, struct perf_event *event) armv7pmu_get_event_idx() argument
1461 krait_pmu_get_event_idx(struct pmu_hw_events *cpuc, struct perf_event *event) krait_pmu_get_event_idx() argument
1492 krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc, struct perf_event *event) krait_pmu_clear_event_idx() argument
1793 scorpion_pmu_get_event_idx(struct pmu_hw_events *cpuc, struct perf_event *event) scorpion_pmu_get_event_idx() argument
1821 scorpion_pmu_clear_event_idx(struct pmu_hw_events *cpuc, struct perf_event *event) scorpion_pmu_clear_event_idx() argument
H A Dperf_event_v6.c11 * one event and replace it with another we could get spurious counts from the
12 * wrong event. However, we can take advantage of the fact that the
13 * performance counters can export events to the event bus, and the event bus
15 * the event bus. The procedure for disabling a configurable counter is:
24 * - set the new event type.
104 * misses and main TLB misses. There isn't an event for TLB misses, so
167 * misses and main TLB misses. There isn't an event for TLB misses, so
235 static inline u32 armv6pmu_read_counter(struct perf_event *event) armv6pmu_read_counter() argument
237 struct hw_perf_event *hwc = &event->hw; armv6pmu_read_counter()
253 static inline void armv6pmu_write_counter(struct perf_event *event, u32 value) armv6pmu_write_counter() argument
255 struct hw_perf_event *hwc = &event->hw; armv6pmu_write_counter()
268 static void armv6pmu_enable_event(struct perf_event *event) armv6pmu_enable_event() argument
271 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); armv6pmu_enable_event()
272 struct hw_perf_event *hwc = &event->hw; armv6pmu_enable_event()
293 * Mask out the current event and set the counter to count the event armv6pmu_enable_event()
328 struct perf_event *event = cpuc->events[idx]; armv6pmu_handle_irq() local
331 /* Ignore if we don't have an event. */ armv6pmu_handle_irq()
332 if (!event) armv6pmu_handle_irq()
342 hwc = &event->hw; armv6pmu_handle_irq()
343 armpmu_event_update(event); armv6pmu_handle_irq()
345 if (!armpmu_event_set_period(event)) armv6pmu_handle_irq()
348 if (perf_event_overflow(event, &data, regs)) armv6pmu_handle_irq()
349 cpu_pmu->disable(event); armv6pmu_handle_irq()
390 struct perf_event *event) armv6pmu_get_event_idx()
392 struct hw_perf_event *hwc = &event->hw; armv6pmu_get_event_idx()
415 static void armv6pmu_disable_event(struct perf_event *event) armv6pmu_disable_event() argument
418 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); armv6pmu_disable_event()
419 struct hw_perf_event *hwc = &event->hw; armv6pmu_disable_event()
438 * Mask out the current event and set the counter to count the number armv6pmu_disable_event()
450 static void armv6mpcore_pmu_disable_event(struct perf_event *event) armv6mpcore_pmu_disable_event() argument
453 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); armv6mpcore_pmu_disable_event()
454 struct hw_perf_event *hwc = &event->hw; armv6mpcore_pmu_disable_event()
481 static int armv6_map_event(struct perf_event *event) armv6_map_event() argument
483 return armpmu_map_event(event, &armv6_perf_map, armv6_map_event()
527 * disable the interrupt reporting and update the event. When unthrottling we
531 static int armv6mpcore_map_event(struct perf_event *event) armv6mpcore_map_event() argument
533 return armpmu_map_event(event, &armv6mpcore_perf_map, armv6mpcore_map_event()
389 armv6pmu_get_event_idx(struct pmu_hw_events *cpuc, struct perf_event *event) armv6pmu_get_event_idx() argument
/linux-4.4.14/drivers/usb/dwc3/
H A Ddebug.h128 * dwc3_gadget_event_string - returns event name
129 * @event: the event code
131 static inline const char *dwc3_gadget_event_string(u8 event) dwc3_gadget_event_string() argument
133 switch (event) { dwc3_gadget_event_string()
160 * dwc3_ep_event_string - returns event name
161 * @event: then event code
163 static inline const char *dwc3_ep_event_string(u8 event) dwc3_ep_event_string() argument
165 switch (event) { dwc3_ep_event_string()
184 * dwc3_gadget_event_type_string - return event name
185 * @event: the event code
187 static inline const char *dwc3_gadget_event_type_string(u8 event) dwc3_gadget_event_type_string() argument
189 switch (event) { dwc3_gadget_event_type_string()
/linux-4.4.14/net/llc/
H A Dllc_c_st.c4 * Description of event functions and actions there is in 802.2 LLC standard,
35 /* State transitions for LLC_CONN_EV_DISC_REQ event */
52 /* State transitions for LLC_CONN_EV_RESET_REQ event */
69 /* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event */
89 /* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event */
105 /* State transitions for LLC_CONN_EV_RX_FRMR_RSP_Fbit_SET_X event */
124 /* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event */
139 /* State transitions for LLC_CONN_EV_RX_ZZZ_CMD_Pbit_SET_X_INVAL_Nr event */
155 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_X_INVAL_Ns event */
171 /* State transitions for LLC_CONN_EV_RX_ZZZ_RSP_Fbit_SET_X_INVAL_Nr event */
187 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_X_INVAL_Ns event */
203 /* State transitions for LLC_CONN_EV_RX_BAD_PDU event */
219 /* State transitions for LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X event */
235 /* State transitions for LLC_CONN_EV_RX_XXX_RSP_Fbit_SET_1 event */
258 /* State transitions for LLC_CONN_EV_P_TMR_EXP event */
280 /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
302 /* State transitions for LLC_CONN_EV_REJ_TMR_EXP event */
324 /* State transitions for LLC_CONN_EV_BUSY_TMR_EXP event */
353 /* State transitions for LLC_CONN_EV_CONN_REQ event */
369 /* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event */
388 /* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event */
402 /* State transitions for LLC_CONN_EV_RX_XXX_CMD_Pbit_SET_1 event */
416 /* State transitions for LLC_CONN_EV_RX_XXX_YYY event */
447 /* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event */
463 /* State transitions for LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X event */
487 /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
508 /* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event */
529 /* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event */
549 /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
570 /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
611 /* State transitions for LLC_CONN_EV_DATA_REQ event */
632 /* State transitions for LLC_CONN_EV_DATA_REQ event */
653 /* State transitions for LLC_CONN_EV_DATA_REQ event */
670 /* State transitions for LLC_CONN_EV_LOCAL_BUSY_DETECTED event */
690 /* State transitions for LLC_CONN_EV_LOCAL_BUSY_DETECTED event */
710 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */
733 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */
756 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1_UNEXPD_Ns event */
779 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */
800 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */
821 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns event */
837 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_X event */
860 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */
873 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 event */
894 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */
915 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 event */
932 /* State transitions for * LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 event */
947 /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 event */
962 /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 event */
983 /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 event */
1000 /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 event */
1015 /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 event */
1030 /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 event */
1050 /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 event */
1067 /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */
1090 /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_X event */
1113 /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */
1135 /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 event */
1157 /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 event */
1174 /* State transitions for LLC_CONN_EV_INIT_P_F_CYCLE event */
1193 /* State transitions for LLC_CONN_EV_P_TMR_EXP event */
1215 /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
1238 /* State transitions for LLC_CONN_EV_BUSY_TMR_EXP event */
1261 /* State transitions for LLC_CONN_EV_TX_BUFF_FULL event */
1344 /* State transitions for LLC_CONN_EV_DATA_REQ event */
1364 /* State transitions for LLC_CONN_EV_DATA_REQ event */
1384 /* State transitions for LLC_CONN_EV_DATA_REQ event */
1401 /* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */
1421 /* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */
1441 /* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */
1460 /* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */
1479 /* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */
1498 /* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */
1517 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_X_UNEXPD_Ns event */
1539 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */
1561 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */
1581 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */
1601 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns event */
1616 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 event */
1634 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_X event */
1659 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */
1684 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 event */
1707 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */
1730 /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 event */
1745 /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 event */
1760 /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 event */
1780 /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 event */
1795 /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 event */
1810 /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 event */
1825 /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 event */
1845 /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 event */
1860 /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */
1882 /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_X event */
1904 /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */
1925 /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 event */
1946 /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 event */
1963 /* State transitions for LLC_CONN_EV_INIT_P_F_CYCLE event */
1982 /* State transitions for LLC_CONN_EV_P_TMR_EXP event */
2003 /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
2025 /* State transitions for LLC_CONN_EV_BUSY_TMR_EXP event */
2047 /* State transitions for LLC_CONN_EV_REJ_TMR_EXP event */
2070 /* State transitions for LLC_CONN_EV_REJ_TMR_EXP event */
2157 /* State transitions for LLC_CONN_EV_DATA_REQ event */
2176 /* State transitions for LLC_CONN_EV_DATA_REQ event */
2195 /* State transitions for LLC_CONN_EV_DATA_REQ event */
2213 /* State transitions for LLC_CONN_EV_LOCAL_BUSY_DETECTED event */
2232 /* State transitions for LLC_CONN_EV_LOCAL_BUSY_DETECTED event */
2251 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */
2266 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */
2281 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1_UNEXPD_Ns event */
2301 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns event */
2315 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_X event */
2340 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */
2364 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 event */
2386 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */
2408 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 event */
2425 /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 event */
2440 /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 event */
2455 /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 event */
2475 /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 event */
2490 /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 event */
2505 /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 event */
2520 /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 event */
2540 /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 event */
2555 /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */
2577 /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_X event */
2599 /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */
2620 /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 event */
2641 /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 event */
2657 /* State transitions for LLC_CONN_EV_INIT_P_F_CYCLE event */
2676 /* State transitions for LLC_CONN_EV_REJ_TMR_EXP event */
2698 /* State transitions for LLC_CONN_EV_P_TMR_EXP event */
2720 /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
2743 /* State transitions for LLC_CONN_EV_BUSY_TMR_EXP event */
2828 /* State transitions for LLC_CONN_EV_DATA_REQ event */
2844 /* State transitions for LLC_CONN_EV_LOCAL_BUSY_DETECTED event */
2858 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1_UNEXPD_Ns event */
2877 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */
2893 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */
2909 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns event */
2926 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1 event */
2945 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 event */
2962 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */
2979 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 event */
2996 /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 event */
3013 /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_1 event */
3030 /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 event */
3045 /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 event */
3060 /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */
3075 /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 event */
3090 /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 event */
3106 /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 event */
3122 /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 event */
3138 /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 event */
3153 /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 event */
3168 /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 event */
3184 /* State transitions for LLC_CONN_EV_P_TMR_EXP event */
3257 /* State transitions for LLC_CONN_EV_DATA_CONN_REQ event */
3273 /* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */
3292 /* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */
3310 /* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */
3328 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1_UNEXPD_Ns event */
3347 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */
3363 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */
3379 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns event */
3395 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1 event */
3416 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 event */
3434 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */
3452 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 event */
3470 /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 event */
3487 /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_1 event */
3504 /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 event */
3519 /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 event */
3534 /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */
3549 /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 event */
3564 /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 event */
3580 /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 event */
3596 /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 event */
3612 /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 event */
3627 /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 event */
3642 /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 event */
3658 /* State transitions for LLC_CONN_EV_P_TMR_EXP event */
3733 /* State transitions for LLC_CONN_EV_DATA_CONN_REQ event */
3749 /* State transitions for LLC_CONN_EV_LOCAL_BUSY_DETECTED event */
3763 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */
3777 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */
3791 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns event */
3806 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1 event */
3826 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 event */
3844 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */
3862 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 event */
3880 /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 event */
3897 /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_1 event */
3914 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1_UNEXPD_Ns event */
3931 /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 event */
3946 /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 event */
3961 /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */
3976 /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 event */
3991 /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 event */
4007 /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 event */
4023 /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 event */
4039 /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 event */
4054 /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 event */
4069 /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 event */
4085 /* State transitions for LLC_CONN_EV_P_TMR_EXP event */
4158 /* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event,
4181 /* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event,
4204 /* State transitions for LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X event,
4228 /* State transitions for LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X event,
4251 /* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event */
4264 /* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event,
4287 /* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event,
4311 * LLC_CONN_EV_DATA_CONN_REQ event
4328 /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
4348 /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event, cause_flag = 1 */
4369 /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event, cause_flag = 0 */
4413 /* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event */
4429 /* State transitions for LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X event,
4457 /* State transitions for LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X event,
4485 /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
4505 /* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event,
4528 /* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event,
4551 /* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event,
4574 /* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event,
4596 /* State transitions for DATA_CONN_REQ event */
4612 /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
4633 /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
4654 /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
4700 /* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event */
4720 /* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event */
4736 /* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event */
4751 /* State transitions for LLC_CONN_EV_RX_FRMR_RSP_Fbit_SET_X event */
4767 /* State transitions for LLC_CONN_EV_RX_XXX_CMD_Pbit_SET_X event */
4780 /* State transitions for LLC_CONN_EV_RX_XXX_RSP_Fbit_SET_X event */
4788 /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
4808 /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
4830 /* State transitions for LLC_CONN_EV_DATA_CONN_REQ event */
4868 /* State transitions for LLC_CONN_EV_DISC_REQ event */
H A Dllc_s_st.c30 * LLC_SAP_EV_ACTIVATION_REQ event
49 /* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_RX_UI event */
61 /* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_UNITDATA_REQ event */
73 /* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_XID_REQ event */
85 /* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_RX_XID_C event */
97 /* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_RX_XID_R event */
109 /* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_TEST_REQ event */
121 /* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_RX_TEST_C event */
133 /* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_RX_TEST_R event */
146 * LLC_SAP_EV_DEACTIVATION_REQ event
H A Dllc_s_ac.c7 * All functions have one sap and one event as input argument. All of
32 * @skb: the event to forward
35 * UNITDATA INDICATION; verify our event is the kind we expect
46 * @skb: the event to send
49 * primitive from the network layer. Verifies event is a primitive type of
50 * event. Verify the primitive is a UNITDATA REQUEST.
69 * @skb: the event to send
72 * primitive from the network layer. Verify event is a primitive type
73 * event. Verify the primitive is a XID REQUEST.
92 * @skb: the event to send
95 * command PDU. Verify event is a PDU type event
123 * @skb: the event to send
126 * primitive from the network layer. Verify event is a primitive type
127 * event; verify the primitive is a TEST REQUEST.
172 * @skb: the event to send
174 * Report data link status to layer management. Verify our event is the
185 * @skb: the event to send
199 * @skb: the event to send
202 * primitive. Verify our event is a PDU type event.
/linux-4.4.14/net/rds/
H A Drdma_transport.c42 struct rdma_cm_event *event) rds_rdma_cm_event_handler()
49 rdsdebug("conn %p id %p handling event %u (%s)\n", conn, cm_id, rds_rdma_cm_event_handler()
50 event->event, rdma_event_msg(event->event)); rds_rdma_cm_event_handler()
68 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) rds_rdma_cm_event_handler()
74 switch (event->event) { rds_rdma_cm_event_handler()
76 ret = trans->cm_handle_connect(cm_id, event); rds_rdma_cm_event_handler()
101 trans->cm_connect_complete(conn, event); rds_rdma_cm_event_handler()
116 rdsdebug("DISCONNECT event - dropping connection " rds_rdma_cm_event_handler()
124 printk(KERN_ERR "RDS: unknown event %u (%s)!\n", rds_rdma_cm_event_handler()
125 event->event, rdma_event_msg(event->event)); rds_rdma_cm_event_handler()
133 rdsdebug("id %p event %u (%s) handling ret %d\n", cm_id, event->event, rds_rdma_cm_event_handler()
134 rdma_event_msg(event->event), ret); rds_rdma_cm_event_handler()
41 rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id, struct rdma_cm_event *event) rds_rdma_cm_event_handler() argument
/linux-4.4.14/arch/arm64/kernel/
H A Dperf_event.c7 * This code is based heavily on the ARMv7 perf event code.
30 * Common event types.
72 /* ARMv8 Cortex-A53 specific event types. */
77 /* ARMv8 Cortex-A57 specific event types. */
274 static inline u32 armv8pmu_read_counter(struct perf_event *event) armv8pmu_read_counter() argument
276 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); armv8pmu_read_counter()
277 struct hw_perf_event *hwc = &event->hw; armv8pmu_read_counter()
292 static inline void armv8pmu_write_counter(struct perf_event *event, u32 value) armv8pmu_write_counter() argument
294 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); armv8pmu_write_counter()
295 struct hw_perf_event *hwc = &event->hw; armv8pmu_write_counter()
362 static void armv8pmu_enable_event(struct perf_event *event) armv8pmu_enable_event() argument
365 struct hw_perf_event *hwc = &event->hw; armv8pmu_enable_event()
366 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); armv8pmu_enable_event()
372 * the event that we're interested in. armv8pmu_enable_event()
382 * Set event (if destined for PMNx counters). armv8pmu_enable_event()
399 static void armv8pmu_disable_event(struct perf_event *event) armv8pmu_disable_event() argument
402 struct hw_perf_event *hwc = &event->hw; armv8pmu_disable_event()
403 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); armv8pmu_disable_event()
451 struct perf_event *event = cpuc->events[idx]; armv8pmu_handle_irq() local
454 /* Ignore if we don't have an event. */ armv8pmu_handle_irq()
455 if (!event) armv8pmu_handle_irq()
465 hwc = &event->hw; armv8pmu_handle_irq()
466 armpmu_event_update(event); armv8pmu_handle_irq()
468 if (!armpmu_event_set_period(event)) armv8pmu_handle_irq()
471 if (perf_event_overflow(event, &data, regs)) armv8pmu_handle_irq()
472 cpu_pmu->disable(event); armv8pmu_handle_irq()
510 struct perf_event *event) armv8pmu_get_event_idx()
513 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); armv8pmu_get_event_idx()
514 struct hw_perf_event *hwc = &event->hw; armv8pmu_get_event_idx()
539 * Add an event filter to a given event. This will only work for PMUv2 PMUs.
541 static int armv8pmu_set_event_filter(struct hw_perf_event *event, armv8pmu_set_event_filter() argument
557 * construct the event type. armv8pmu_set_event_filter()
559 event->config_base = config_base; armv8pmu_set_event_filter()
579 static int armv8_pmuv3_map_event(struct perf_event *event) armv8_pmuv3_map_event() argument
581 return armpmu_map_event(event, &armv8_pmuv3_perf_map, armv8_pmuv3_map_event()
586 static int armv8_a53_map_event(struct perf_event *event) armv8_a53_map_event() argument
588 return armpmu_map_event(event, &armv8_a53_perf_map, armv8_a53_map_event()
593 static int armv8_a57_map_event(struct perf_event *event) armv8_a57_map_event() argument
595 return armpmu_map_event(event, &armv8_a57_perf_map, armv8_a57_map_event()
509 armv8pmu_get_event_idx(struct pmu_hw_events *cpuc, struct perf_event *event) armv8pmu_get_event_idx() argument
/linux-4.4.14/sound/firewire/dice/
H A Ddice-hwdep.c17 union snd_firewire_event event; hwdep_read() local
31 memset(&event, 0, sizeof(event)); hwdep_read()
33 event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS; hwdep_read()
34 event.lock_status.status = dice->dev_lock_count > 0; hwdep_read()
37 count = min_t(long, count, sizeof(event.lock_status)); hwdep_read()
39 event.dice_notification.type = hwdep_read()
41 event.dice_notification.notification = dice->notification_bits; hwdep_read()
44 count = min_t(long, count, sizeof(event.dice_notification)); hwdep_read()
49 if (copy_to_user(buf, &event, count)) hwdep_read()
/linux-4.4.14/include/media/
H A Dv4l2-event.h2 * v4l2-event.h
35 * Events are subscribed per-filehandle. An event specification consists of a
37 * 'id' field. So an event is uniquely identified by the (type, id) tuple.
40 * struct is added to that list, one for every subscribed event.
45 * v4l2_fh struct so VIDIOC_DQEVENT will know which event to dequeue first.
47 * Finally, if the event subscription is associated with a particular object
49 * so that an event can be raised by that object. So the 'node' field can
63 * it knows who subscribed an event to that object.
72 * struct v4l2_kevent - Internal kernel event struct.
75 * @event: The event itself.
80 struct v4l2_event event; member in struct:v4l2_kevent
83 /** struct v4l2_subscribed_event_ops - Subscribed event operations.
87 * @replace: Optional callback that can replace event 'old' with event 'new'.
88 * @merge: Optional callback that can merge event 'old' into event 'new'.
98 * struct v4l2_subscribed_event - Internal struct representing a subscribed event.
103 * @fh: Filehandle that subscribed to this event.
104 * @node: List node that hooks into the object's event list (if there is one).
107 * @first: The index of the events containing the oldest available event.
125 int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
/linux-4.4.14/drivers/scsi/libsas/
H A Dsas_event.c43 static void sas_queue_event(int event, unsigned long *pending, sas_queue_event() argument
47 if (!test_and_set_bit(event, pending)) { sas_queue_event()
119 static void notify_ha_event(struct sas_ha_struct *sas_ha, enum ha_event event) notify_ha_event() argument
121 BUG_ON(event >= HA_NUM_EVENTS); notify_ha_event()
123 sas_queue_event(event, &sas_ha->pending, notify_ha_event()
124 &sas_ha->ha_events[event].work, sas_ha); notify_ha_event()
127 static void notify_port_event(struct asd_sas_phy *phy, enum port_event event) notify_port_event() argument
131 BUG_ON(event >= PORT_NUM_EVENTS); notify_port_event()
133 sas_queue_event(event, &phy->port_events_pending, notify_port_event()
134 &phy->port_events[event].work, ha); notify_port_event()
137 void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event) sas_notify_phy_event() argument
141 BUG_ON(event >= PHY_NUM_EVENTS); sas_notify_phy_event()
143 sas_queue_event(event, &phy->phy_events_pending, sas_notify_phy_event()
144 &phy->phy_events[event].work, ha); sas_notify_phy_event()
/linux-4.4.14/sound/core/seq/oss/
H A Dseq_oss_rw.c160 * insert event record to write queue
167 struct snd_seq_event event; insert_queue() local
169 /* if this is a timing event, process the current time */ insert_queue()
173 /* parse this event */ insert_queue()
174 memset(&event, 0, sizeof(event)); insert_queue()
176 event.type = SNDRV_SEQ_EVENT_NOTEOFF; insert_queue()
177 snd_seq_oss_fill_addr(dp, &event, dp->addr.port, dp->addr.client); insert_queue()
179 if (snd_seq_oss_process_event(dp, rec, &event)) insert_queue()
180 return 0; /* invalid event - no need to insert queue */ insert_queue()
182 event.time.tick = snd_seq_oss_timer_cur_tick(dp->timer); insert_queue()
184 snd_seq_oss_dispatch(dp, &event, 0, 0); insert_queue()
187 rc = snd_seq_kernel_client_enqueue(dp->cseq, &event, 0, 0); insert_queue()
189 rc = snd_seq_kernel_client_enqueue_blocking(dp->cseq, &event, opt, 0, 0); insert_queue()
/linux-4.4.14/arch/alpha/kernel/
H A Dperf_event.c40 struct perf_event *event[MAX_HWEVENTS]; member in struct:cpu_hw_events
41 /* Event type of each scheduled event. */
43 /* Current index of each scheduled event; if not yet determined
61 /* Mapping of the perf system hw event types to indigenous event types */
86 /* Subroutine for checking validity of a raw event for this PMU. */
106 * EV67 PMC event types
108 * There is no one-to-one mapping of the possible hw event types to the
110 * own hw event type identifiers.
122 /* Mapping of the hw event types to the perf tool interface */
136 * The mapping used for one event only - these must be in same order as enum
151 static int ev67_check_constraints(struct perf_event **event, ev67_check_constraints() argument
198 event[0]->hw.idx = idx0; ev67_check_constraints()
199 event[0]->hw.config_base = config; ev67_check_constraints()
201 event[1]->hw.idx = idx0 ^ 1; ev67_check_constraints()
202 event[1]->hw.config_base = config; ev67_check_constraints()
251 static int alpha_perf_event_set_period(struct perf_event *event, alpha_perf_event_set_period() argument
286 perf_event_update_userpage(event); alpha_perf_event_set_period()
306 static unsigned long alpha_perf_event_update(struct perf_event *event, alpha_perf_event_update() argument
329 local64_add(delta, &event->count); alpha_perf_event_update()
337 * Collect all HW events into the array event[].
340 struct perf_event *event[], unsigned long *evtype, collect_events()
349 event[n] = group; collect_events()
357 event[n] = pe; collect_events()
399 struct perf_event *pe = cpuc->event[j]; maybe_change_configuration()
411 struct perf_event *pe = cpuc->event[j]; maybe_change_configuration()
423 cpuc->config = cpuc->event[0]->hw.config_base; maybe_change_configuration()
428 /* Schedule perf HW event on to PMU.
430 * returned from perf event initialisation.
432 static int alpha_pmu_add(struct perf_event *event, int flags) alpha_pmu_add() argument
435 struct hw_perf_event *hwc = &event->hw; alpha_pmu_add()
448 perf_pmu_disable(event->pmu); alpha_pmu_add()
454 /* Insert event on to PMU and if successful modify ret to valid return */ alpha_pmu_add()
457 cpuc->event[n0] = event; alpha_pmu_add()
458 cpuc->evtype[n0] = event->hw.event_base; alpha_pmu_add()
461 if (!alpha_check_constraints(cpuc->event, cpuc->evtype, n0+1)) { alpha_pmu_add()
473 perf_pmu_enable(event->pmu); alpha_pmu_add()
482 * returned from perf event initialisation.
484 static void alpha_pmu_del(struct perf_event *event, int flags) alpha_pmu_del() argument
487 struct hw_perf_event *hwc = &event->hw; alpha_pmu_del()
491 perf_pmu_disable(event->pmu); alpha_pmu_del()
495 if (event == cpuc->event[j]) { alpha_pmu_del()
502 cpuc->event[j - 1] = cpuc->event[j]; alpha_pmu_del()
508 /* Absorb the final count and turn off the event. */ alpha_pmu_del()
509 alpha_perf_event_update(event, hwc, idx, 0); alpha_pmu_del()
510 perf_event_update_userpage(event); alpha_pmu_del()
519 perf_pmu_enable(event->pmu); alpha_pmu_del()
523 static void alpha_pmu_read(struct perf_event *event) alpha_pmu_read() argument
525 struct hw_perf_event *hwc = &event->hw; alpha_pmu_read()
527 alpha_perf_event_update(event, hwc, hwc->idx, 0); alpha_pmu_read()
531 static void alpha_pmu_stop(struct perf_event *event, int flags) alpha_pmu_stop() argument
533 struct hw_perf_event *hwc = &event->hw; alpha_pmu_stop()
542 alpha_perf_event_update(event, hwc, hwc->idx, 0); alpha_pmu_stop()
551 static void alpha_pmu_start(struct perf_event *event, int flags) alpha_pmu_start() argument
553 struct hw_perf_event *hwc = &event->hw; alpha_pmu_start()
561 alpha_perf_event_set_period(event, hwc, hwc->idx); alpha_pmu_start()
593 static void hw_perf_event_destroy(struct perf_event *event) hw_perf_event_destroy() argument
601 static int __hw_perf_event_init(struct perf_event *event) __hw_perf_event_init() argument
603 struct perf_event_attr *attr = &event->attr; __hw_perf_event_init()
604 struct hw_perf_event *hwc = &event->hw; __hw_perf_event_init()
611 /* We only support a limited range of HARDWARE event types with one __hw_perf_event_init()
612 * only programmable via a RAW event type. __hw_perf_event_init()
639 * We place the event type in event_base here and leave calculation __hw_perf_event_init()
654 if (event->group_leader != event) { __hw_perf_event_init()
655 n = collect_events(event->group_leader, __hw_perf_event_init()
662 evts[n] = event; __hw_perf_event_init()
671 event->destroy = hw_perf_event_destroy; __hw_perf_event_init()
693 * Main entry point to initialise a HW performance event.
695 static int alpha_pmu_event_init(struct perf_event *event) alpha_pmu_event_init() argument
700 if (has_branch_stack(event)) alpha_pmu_event_init()
703 switch (event->attr.type) { alpha_pmu_event_init()
717 err = __hw_perf_event_init(event); alpha_pmu_event_init()
813 struct perf_event *event; alpha_perf_event_irq_handler() local
844 /* This can occur if the event is disabled right on a PMC overflow. */ alpha_perf_event_irq_handler()
849 event = cpuc->event[j]; alpha_perf_event_irq_handler()
851 if (unlikely(!event)) { alpha_perf_event_irq_handler()
854 pr_warning("PMI: No event at index %d!\n", idx); alpha_perf_event_irq_handler()
859 hwc = &event->hw; alpha_perf_event_irq_handler()
860 alpha_perf_event_update(event, hwc, idx, alpha_pmu->pmc_max_period[idx]+1); alpha_perf_event_irq_handler()
863 if (alpha_perf_event_set_period(event, hwc, idx)) { alpha_perf_event_irq_handler()
864 if (perf_event_overflow(event, &data, regs)) { alpha_perf_event_irq_handler()
868 alpha_pmu_stop(event, 0); alpha_perf_event_irq_handler()
339 collect_events(struct perf_event *group, int max_count, struct perf_event *event[], unsigned long *evtype, int *current_idx) collect_events() argument
/linux-4.4.14/drivers/uwb/
H A Duwbd.c29 * communicate with this daemon through an event queue. Daemon wakes
31 * function is extracted from a table based on the event's type and
34 * . Lock protecting the event list has to be an spinlock and locked
40 * uwbd_event_queue(). They just get the event, chew it to make it
47 * data blob, which depends on the event. The header is 'struct
52 * To find a handling function for an event, the type is used to index
54 * with the subtype to get the function that handles the event. Start
81 * Return !0 if the event needs not to be freed (ie the handler
83 * event.
91 * Properties of a UWBD event
93 * @handler: the function that will handle this event
94 * @name: text name of event
162 * Handle an URC event passed to the UWB Daemon
164 * @evt: the event to handle
165 * @returns: 0 if the event can be kfreed, !0 on the contrary
173 * The event structure passed to the event handler has the radio
185 u16 event; uwbd_event_handle_urc() local
188 event = le16_to_cpu(evt->notif.rceb->wEvent); uwbd_event_handle_urc()
196 if (event >= type_table->size) uwbd_event_handle_urc()
198 handler = type_table->uwbd_events[event].handler; uwbd_event_handle_urc()
206 "UWBD: event 0x%02x/%04x/%02x, handling failed: %d\n", uwbd_event_handle_urc()
207 type, event, context, result); uwbd_event_handle_urc()
247 dev_err(&rc->uwb_dev.dev, "UWBD: invalid event type %d\n", evt->type); uwbd_event_handle()
322 * Queue an event for the management daemon
324 * When some lower layer receives an event, it uses this function to
327 * Once you pass the event, you don't own it any more, but the daemon
331 * If the daemon is not running, we just free the event.
/linux-4.4.14/tools/perf/python/
H A Dtwatch.py42 event = evlist.read_on_cpu(cpu)
43 if not event:
45 print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
46 event.sample_pid,
47 event.sample_tid),
48 print event
62 It is possible as well to use event.misc & perf.PERF_RECORD_MISC_SWITCH_OUT
/linux-4.4.14/drivers/scsi/bfa/
H A Dbfa_fcs.c245 enum bfa_fcs_fabric_event event);
247 enum bfa_fcs_fabric_event event);
249 enum bfa_fcs_fabric_event event);
251 enum bfa_fcs_fabric_event event);
253 enum bfa_fcs_fabric_event event);
255 enum bfa_fcs_fabric_event event);
257 enum bfa_fcs_fabric_event event);
259 enum bfa_fcs_fabric_event event);
261 enum bfa_fcs_fabric_event event);
263 enum bfa_fcs_fabric_event event);
265 enum bfa_fcs_fabric_event event);
267 enum bfa_fcs_fabric_event event);
269 enum bfa_fcs_fabric_event event);
275 enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_uninit()
278 bfa_trc(fabric->fcs, event); bfa_fcs_fabric_sm_uninit()
280 switch (event) { bfa_fcs_fabric_sm_uninit()
292 bfa_sm_fault(fabric->fcs, event); bfa_fcs_fabric_sm_uninit()
301 enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_created()
306 bfa_trc(fabric->fcs, event); bfa_fcs_fabric_sm_created()
308 switch (event) { bfa_fcs_fabric_sm_created()
339 bfa_sm_fault(fabric->fcs, event); bfa_fcs_fabric_sm_created()
344 * Link is down, awaiting LINK UP event from port. This is also the
349 enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_linkdown()
354 bfa_trc(fabric->fcs, event); bfa_fcs_fabric_sm_linkdown()
356 switch (event) { bfa_fcs_fabric_sm_linkdown()
386 bfa_sm_fault(fabric->fcs, event); bfa_fcs_fabric_sm_linkdown()
395 enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_flogi()
398 bfa_trc(fabric->fcs, event); bfa_fcs_fabric_sm_flogi()
400 switch (event) { bfa_fcs_fabric_sm_flogi()
409 bfa_trc(fabric->fcs, event); bfa_fcs_fabric_sm_flogi()
449 bfa_sm_fault(fabric->fcs, event); bfa_fcs_fabric_sm_flogi()
456 enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_flogi_retry()
459 bfa_trc(fabric->fcs, event); bfa_fcs_fabric_sm_flogi_retry()
461 switch (event) { bfa_fcs_fabric_sm_flogi_retry()
479 bfa_sm_fault(fabric->fcs, event); bfa_fcs_fabric_sm_flogi_retry()
488 enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_auth()
491 bfa_trc(fabric->fcs, event); bfa_fcs_fabric_sm_auth()
493 switch (event) { bfa_fcs_fabric_sm_auth()
519 bfa_sm_fault(fabric->fcs, event); bfa_fcs_fabric_sm_auth()
528 enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_auth_failed()
531 bfa_trc(fabric->fcs, event); bfa_fcs_fabric_sm_auth_failed()
533 switch (event) { bfa_fcs_fabric_sm_auth_failed()
545 bfa_sm_fault(fabric->fcs, event); bfa_fcs_fabric_sm_auth_failed()
554 enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_loopback()
557 bfa_trc(fabric->fcs, event); bfa_fcs_fabric_sm_loopback()
559 switch (event) { bfa_fcs_fabric_sm_loopback()
571 bfa_sm_fault(fabric->fcs, event); bfa_fcs_fabric_sm_loopback()
580 enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_nofabric()
583 bfa_trc(fabric->fcs, event); bfa_fcs_fabric_sm_nofabric()
585 switch (event) { bfa_fcs_fabric_sm_nofabric()
607 bfa_sm_fault(fabric->fcs, event); bfa_fcs_fabric_sm_nofabric()
616 enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_online()
621 bfa_trc(fabric->fcs, event); bfa_fcs_fabric_sm_online()
623 switch (event) { bfa_fcs_fabric_sm_online()
653 bfa_sm_fault(fabric->fcs, event); bfa_fcs_fabric_sm_online()
662 enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_evfp()
665 bfa_trc(fabric->fcs, event); bfa_fcs_fabric_sm_evfp()
667 switch (event) { bfa_fcs_fabric_sm_evfp()
677 bfa_sm_fault(fabric->fcs, event); bfa_fcs_fabric_sm_evfp()
686 enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_evfp_done()
689 bfa_trc(fabric->fcs, event); bfa_fcs_fabric_sm_evfp_done()
697 enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_isolated()
703 bfa_trc(fabric->fcs, event); bfa_fcs_fabric_sm_isolated()
718 enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_deleting()
721 bfa_trc(fabric->fcs, event); bfa_fcs_fabric_sm_deleting()
723 switch (event) { bfa_fcs_fabric_sm_deleting()
737 bfa_sm_fault(fabric->fcs, event); bfa_fcs_fabric_sm_deleting()
746 enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_stopping()
751 bfa_trc(fabric->fcs, event); bfa_fcs_fabric_sm_stopping()
753 switch (event) { bfa_fcs_fabric_sm_stopping()
774 bfa_sm_fault(fabric->fcs, event); bfa_fcs_fabric_sm_stopping()
783 enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_cleanup()
786 bfa_trc(fabric->fcs, event); bfa_fcs_fabric_sm_cleanup()
788 switch (event) { bfa_fcs_fabric_sm_cleanup()
797 * Ignore - can get this event if we get notified about IOC down bfa_fcs_fabric_sm_cleanup()
803 bfa_sm_fault(fabric->fcs, event); bfa_fcs_fabric_sm_cleanup()
1028 * notify online event to base and then virtual ports bfa_fcs_fabric_notify_online()
1048 * notify offline event first to vports and then base port. bfa_fcs_fabric_notify_offline()
1494 enum bfa_port_aen_event event) bfa_fcs_fabric_aen_post()
1508 BFA_AEN_CAT_PORT, event); bfa_fcs_fabric_aen_post()
1531 * Don't generate a fabric name change event in this case. bfa_fcs_fabric_set_fabric_name()
1615 bfa_fcs_port_event_handler(void *cbarg, enum bfa_port_linkstate event) bfa_fcs_port_event_handler() argument
1619 bfa_trc(fcs, event); bfa_fcs_port_event_handler()
1621 switch (event) { bfa_fcs_port_event_handler()
274 bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_uninit() argument
300 bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_created() argument
348 bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_linkdown() argument
394 bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_flogi() argument
455 bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_flogi_retry() argument
487 bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_auth() argument
527 bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_auth_failed() argument
553 bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_loopback() argument
579 bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_nofabric() argument
615 bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_online() argument
661 bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_evfp() argument
685 bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_evfp_done() argument
696 bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_isolated() argument
717 bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_deleting() argument
745 bfa_fcs_fabric_sm_stopping(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_stopping() argument
782 bfa_fcs_fabric_sm_cleanup(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_cleanup() argument
1493 bfa_fcs_fabric_aen_post(struct bfa_fcs_lport_s *port, enum bfa_port_aen_event event) bfa_fcs_fabric_aen_post() argument
H A Dbfa_fcs_fcpim.c41 enum bfa_itnim_aen_event event);
44 enum bfa_fcs_itnim_event event);
46 enum bfa_fcs_itnim_event event);
48 enum bfa_fcs_itnim_event event);
50 enum bfa_fcs_itnim_event event);
52 enum bfa_fcs_itnim_event event);
54 enum bfa_fcs_itnim_event event);
56 enum bfa_fcs_itnim_event event);
58 enum bfa_fcs_itnim_event event);
60 enum bfa_fcs_itnim_event event);
79 enum bfa_fcs_itnim_event event) bfa_fcs_itnim_sm_offline()
82 bfa_trc(itnim->fcs, event); bfa_fcs_itnim_sm_offline()
84 switch (event) { bfa_fcs_itnim_sm_offline()
104 bfa_sm_fault(itnim->fcs, event); bfa_fcs_itnim_sm_offline()
111 enum bfa_fcs_itnim_event event) bfa_fcs_itnim_sm_prli_send()
114 bfa_trc(itnim->fcs, event); bfa_fcs_itnim_sm_prli_send()
116 switch (event) { bfa_fcs_itnim_sm_prli_send()
140 bfa_sm_fault(itnim->fcs, event); bfa_fcs_itnim_sm_prli_send()
146 enum bfa_fcs_itnim_event event) bfa_fcs_itnim_sm_prli()
149 bfa_trc(itnim->fcs, event); bfa_fcs_itnim_sm_prli()
151 switch (event) { bfa_fcs_itnim_sm_prli()
192 bfa_sm_fault(itnim->fcs, event); bfa_fcs_itnim_sm_prli()
198 enum bfa_fcs_itnim_event event) bfa_fcs_itnim_sm_hal_rport_online()
201 bfa_trc(itnim->fcs, event); bfa_fcs_itnim_sm_hal_rport_online()
203 switch (event) { bfa_fcs_itnim_sm_hal_rport_online()
230 bfa_sm_fault(itnim->fcs, event); bfa_fcs_itnim_sm_hal_rport_online()
236 enum bfa_fcs_itnim_event event) bfa_fcs_itnim_sm_prli_retry()
239 bfa_trc(itnim->fcs, event); bfa_fcs_itnim_sm_prli_retry()
241 switch (event) { bfa_fcs_itnim_sm_prli_retry()
275 bfa_sm_fault(itnim->fcs, event); bfa_fcs_itnim_sm_prli_retry()
281 enum bfa_fcs_itnim_event event) bfa_fcs_itnim_sm_hcb_online()
288 bfa_trc(itnim->fcs, event); bfa_fcs_itnim_sm_hcb_online()
290 switch (event) { bfa_fcs_itnim_sm_hcb_online()
313 bfa_sm_fault(itnim->fcs, event); bfa_fcs_itnim_sm_hcb_online()
319 enum bfa_fcs_itnim_event event) bfa_fcs_itnim_sm_online()
326 bfa_trc(itnim->fcs, event); bfa_fcs_itnim_sm_online()
328 switch (event) { bfa_fcs_itnim_sm_online()
354 bfa_sm_fault(itnim->fcs, event); bfa_fcs_itnim_sm_online()
360 enum bfa_fcs_itnim_event event) bfa_fcs_itnim_sm_hcb_offline()
363 bfa_trc(itnim->fcs, event); bfa_fcs_itnim_sm_hcb_offline()
365 switch (event) { bfa_fcs_itnim_sm_hcb_offline()
377 bfa_sm_fault(itnim->fcs, event); bfa_fcs_itnim_sm_hcb_offline()
388 enum bfa_fcs_itnim_event event) bfa_fcs_itnim_sm_initiator()
391 bfa_trc(itnim->fcs, event); bfa_fcs_itnim_sm_initiator()
393 switch (event) { bfa_fcs_itnim_sm_initiator()
416 bfa_sm_fault(itnim->fcs, event); bfa_fcs_itnim_sm_initiator()
422 enum bfa_itnim_aen_event event) bfa_fcs_itnim_aen_post()
444 BFA_AEN_CAT_ITNIM, event); bfa_fcs_itnim_aen_post()
78 bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) bfa_fcs_itnim_sm_offline() argument
110 bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) bfa_fcs_itnim_sm_prli_send() argument
145 bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) bfa_fcs_itnim_sm_prli() argument
197 bfa_fcs_itnim_sm_hal_rport_online(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) bfa_fcs_itnim_sm_hal_rport_online() argument
235 bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) bfa_fcs_itnim_sm_prli_retry() argument
280 bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) bfa_fcs_itnim_sm_hcb_online() argument
318 bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) bfa_fcs_itnim_sm_online() argument
359 bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) bfa_fcs_itnim_sm_hcb_offline() argument
387 bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) bfa_fcs_itnim_sm_initiator() argument
421 bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim, enum bfa_itnim_aen_event event) bfa_fcs_itnim_aen_post() argument
/linux-4.4.14/arch/metag/kernel/perf/
H A Dperf_event.c144 static int metag_pmu_event_init(struct perf_event *event) metag_pmu_event_init() argument
154 if (has_branch_stack(event)) metag_pmu_event_init()
157 event->destroy = _hw_perf_event_destroy; metag_pmu_event_init()
171 switch (event->attr.type) { metag_pmu_event_init()
175 err = _hw_perf_event_init(event); metag_pmu_event_init()
183 event->destroy(event); metag_pmu_event_init()
189 void metag_pmu_event_update(struct perf_event *event, metag_pmu_event_update() argument
217 local64_add(delta, &event->count); metag_pmu_event_update()
221 int metag_pmu_event_set_period(struct perf_event *event, metag_pmu_event_set_period() argument
254 perf_event_update_userpage(event); metag_pmu_event_set_period()
259 static void metag_pmu_start(struct perf_event *event, int flags) metag_pmu_start() argument
262 struct hw_perf_event *hwc = &event->hw; metag_pmu_start()
287 metag_pmu_event_set_period(event, hwc, hwc->idx); metag_pmu_start()
288 cpuc->events[idx] = event; metag_pmu_start()
292 static void metag_pmu_stop(struct perf_event *event, int flags) metag_pmu_stop() argument
294 struct hw_perf_event *hwc = &event->hw; metag_pmu_stop()
301 metag_pmu_event_update(event, hwc, hwc->idx); metag_pmu_stop()
307 static int metag_pmu_add(struct perf_event *event, int flags) metag_pmu_add() argument
310 struct hw_perf_event *hwc = &event->hw; metag_pmu_add()
313 perf_pmu_disable(event->pmu); metag_pmu_add()
341 metag_pmu_start(event, PERF_EF_RELOAD); metag_pmu_add()
343 perf_event_update_userpage(event); metag_pmu_add()
345 perf_pmu_enable(event->pmu); metag_pmu_add()
349 static void metag_pmu_del(struct perf_event *event, int flags) metag_pmu_del() argument
352 struct hw_perf_event *hwc = &event->hw; metag_pmu_del()
356 metag_pmu_stop(event, PERF_EF_UPDATE); metag_pmu_del()
360 perf_event_update_userpage(event); metag_pmu_del()
363 static void metag_pmu_read(struct perf_event *event) metag_pmu_read() argument
365 struct hw_perf_event *hwc = &event->hw; metag_pmu_read()
371 metag_pmu_event_update(event, hwc, hwc->idx); metag_pmu_read()
503 static void _hw_perf_event_destroy(struct perf_event *event) _hw_perf_event_destroy() argument
541 static int _hw_perf_event_init(struct perf_event *event) _hw_perf_event_init() argument
543 struct perf_event_attr *attr = &event->attr; _hw_perf_event_init()
544 struct hw_perf_event *hwc = &event->hw; _hw_perf_event_init()
566 /* Return early if the event is unsupported */ _hw_perf_event_init()
571 * Don't assign an index until the event is placed into the hardware. _hw_perf_event_init()
578 /* Store the event encoding */ _hw_perf_event_init()
598 static void metag_pmu_enable_counter(struct hw_perf_event *event, int idx) metag_pmu_enable_counter() argument
601 unsigned int config = event->config; metag_pmu_enable_counter()
615 local64_set(&event->prev_count, __core_reg_get(TXTACTCYC)); metag_pmu_enable_counter()
619 /* Check for a core internal or performance channel event. */ metag_pmu_enable_counter()
645 * Now we use the high nibble as the performance event to metag_pmu_enable_counter()
664 local64_set(&event->prev_count, 0); metag_pmu_enable_counter()
671 static void metag_pmu_disable_counter(struct hw_perf_event *event, int idx) metag_pmu_disable_counter() argument
691 * Here we remove the thread id AND the event nibble (there are at metag_pmu_disable_counter()
694 * performance counts, and event 0x00 requires a thread id mask! metag_pmu_disable_counter()
733 * We'll keep the thread mask and event id, and just update the metag_pmu_write_counter()
755 struct perf_event *event = cpuhw->events[idx]; metag_pmu_counter_overflow() local
756 struct hw_perf_event *hwc = &event->hw; metag_pmu_counter_overflow()
773 metag_pmu_event_update(event, hwc, idx); metag_pmu_counter_overflow()
775 metag_pmu_event_set_period(event, hwc, idx); metag_pmu_counter_overflow()
782 if (!perf_event_overflow(event, &sampledata, regs)) { metag_pmu_counter_overflow()
/linux-4.4.14/drivers/platform/x86/
H A Ddell-wmi-aio.c38 /* 0x000: A hot key pressed or an event occurred
41 u16 event[]; member in struct:dell_wmi_event
69 * The new WMI event data format will follow the dell_wmi_event structure
74 struct dell_wmi_event *event = (struct dell_wmi_event *)buffer; dell_wmi_aio_event_check() local
76 if (event == NULL || length < 6) dell_wmi_aio_event_check()
79 if ((event->type == 0 || event->type == 0xf) && dell_wmi_aio_event_check()
80 event->length >= 2) dell_wmi_aio_event_check()
90 struct dell_wmi_event *event; dell_wmi_aio_notify() local
95 pr_info("bad event status 0x%x\n", status); dell_wmi_aio_notify()
113 event = (struct dell_wmi_event *) dell_wmi_aio_notify()
115 scancode = event->event[0]; dell_wmi_aio_notify()
/linux-4.4.14/sound/soc/codecs/
H A Dwm_adsp.h77 .reg = SND_SOC_NOPM, .shift = num, .event = event_fn, \
80 .reg = SND_SOC_NOPM, .shift = num, .event = wm_adsp2_event, \
93 struct snd_kcontrol *kcontrol, int event);
95 struct snd_kcontrol *kcontrol, int event);
97 struct snd_kcontrol *kcontrol, int event);
/linux-4.4.14/arch/sparc/kernel/
H A Dperf_event.c1 /* Performance event support for sparc64.
5 * This code is based almost entirely upon the x86 perf event
48 * event fields, one for each of the two counters. It's thus nearly
63 * implemented. The event selections on SPARC-T4 lack any
84 * This works because the perf event layer always adds new
90 struct perf_event *event[MAX_HWEVENTS]; member in struct:cpu_hw_events
98 /* The current counter index assigned to an event. When the
99 * event hasn't been programmed into the cpu yet, this will
100 * hold PIC_NO_INDEX. The event->hw.idx value tells us where
101 * we ought to schedule the event.
115 /* An event map describes the characteristics of a performance
116 * counter event. In particular it gives the encoding as well as
117 * a mask telling which counters the event can be measured on.
762 * generates the overflow event for precise events via a trap
764 * we happen to be in the hypervisor when the event triggers.
765 * Essentially, the overflow event reporting is completely
861 static u64 sparc_perf_event_update(struct perf_event *event, sparc_perf_event_update() argument
879 local64_add(delta, &event->count); sparc_perf_event_update()
885 static int sparc_perf_event_set_period(struct perf_event *event, sparc_perf_event_set_period() argument
912 perf_event_update_userpage(event); sparc_perf_event_set_period()
922 struct perf_event *cp = cpuc->event[i]; read_in_all_counters()
948 struct perf_event *cp = cpuc->event[i]; calculate_single_pcr()
967 cpuc->pcr[0] |= cpuc->event[0]->hw.config_base; calculate_single_pcr()
970 static void sparc_pmu_start(struct perf_event *event, int flags);
981 struct perf_event *cp = cpuc->event[i]; calculate_multiple_pcrs()
994 struct perf_event *cp = cpuc->event[i]; calculate_multiple_pcrs()
1001 /* If performance event entries have been added, move existing events
1056 struct perf_event *event) active_event_index()
1061 if (cpuc->event[i] == event) active_event_index()
1068 static void sparc_pmu_start(struct perf_event *event, int flags) sparc_pmu_start() argument
1071 int idx = active_event_index(cpuc, event); sparc_pmu_start()
1074 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); sparc_pmu_start()
1075 sparc_perf_event_set_period(event, &event->hw, idx); sparc_pmu_start()
1078 event->hw.state = 0; sparc_pmu_start()
1080 sparc_pmu_enable_event(cpuc, &event->hw, idx); sparc_pmu_start()
1083 static void sparc_pmu_stop(struct perf_event *event, int flags) sparc_pmu_stop() argument
1086 int idx = active_event_index(cpuc, event); sparc_pmu_stop()
1088 if (!(event->hw.state & PERF_HES_STOPPED)) { sparc_pmu_stop()
1089 sparc_pmu_disable_event(cpuc, &event->hw, idx); sparc_pmu_stop()
1090 event->hw.state |= PERF_HES_STOPPED; sparc_pmu_stop()
1093 if (!(event->hw.state & PERF_HES_UPTODATE) && (flags & PERF_EF_UPDATE)) { sparc_pmu_stop()
1094 sparc_perf_event_update(event, &event->hw, idx); sparc_pmu_stop()
1095 event->hw.state |= PERF_HES_UPTODATE; sparc_pmu_stop()
1099 static void sparc_pmu_del(struct perf_event *event, int _flags) sparc_pmu_del() argument
1108 if (event == cpuc->event[i]) { sparc_pmu_del()
1110 * event. sparc_pmu_del()
1112 sparc_pmu_stop(event, PERF_EF_UPDATE); sparc_pmu_del()
1118 cpuc->event[i - 1] = cpuc->event[i]; sparc_pmu_del()
1124 perf_event_update_userpage(event); sparc_pmu_del()
1134 static void sparc_pmu_read(struct perf_event *event) sparc_pmu_read() argument
1137 int idx = active_event_index(cpuc, event); sparc_pmu_read()
1138 struct hw_perf_event *hwc = &event->hw; sparc_pmu_read()
1140 sparc_perf_event_update(event, hwc, idx); sparc_pmu_read()
1212 static void hw_perf_event_destroy(struct perf_event *event) hw_perf_event_destroy() argument
1264 /* If one event is limited to a specific counter, sparc_check_constraints()
1302 struct perf_event *event; check_excludes() local
1314 event = evts[i]; check_excludes()
1316 eu = event->attr.exclude_user; check_excludes()
1317 ek = event->attr.exclude_kernel; check_excludes()
1318 eh = event->attr.exclude_hv; check_excludes()
1320 } else if (event->attr.exclude_user != eu || check_excludes()
1321 event->attr.exclude_kernel != ek || check_excludes()
1322 event->attr.exclude_hv != eh) { check_excludes()
1334 struct perf_event *event; collect_events() local
1344 list_for_each_entry(event, &group->sibling_list, group_entry) { collect_events()
1345 if (!is_software_event(event) && collect_events()
1346 event->state != PERF_EVENT_STATE_OFF) { collect_events()
1349 evts[n] = event; collect_events()
1350 events[n] = event->hw.event_base; collect_events()
1357 static int sparc_pmu_add(struct perf_event *event, int ef_flags) sparc_pmu_add() argument
1369 cpuc->event[n0] = event; sparc_pmu_add()
1370 cpuc->events[n0] = event->hw.event_base; sparc_pmu_add()
1373 event->hw.state = PERF_HES_UPTODATE; sparc_pmu_add()
1375 event->hw.state |= PERF_HES_STOPPED; sparc_pmu_add()
1385 if (check_excludes(cpuc->event, n0, 1)) sparc_pmu_add()
1387 if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1)) sparc_pmu_add()
1400 static int sparc_pmu_event_init(struct perf_event *event) sparc_pmu_event_init() argument
1402 struct perf_event_attr *attr = &event->attr; sparc_pmu_event_init()
1404 struct hw_perf_event *hwc = &event->hw; sparc_pmu_event_init()
1414 if (has_branch_stack(event)) sparc_pmu_event_init()
1459 if (event->group_leader != event) { sparc_pmu_event_init()
1460 n = collect_events(event->group_leader, sparc_pmu_event_init()
1467 evts[n] = event; sparc_pmu_event_init()
1481 event->destroy = hw_perf_event_destroy; sparc_pmu_event_init()
1551 if (check_excludes(cpuc->event, 0, n)) sparc_pmu_commit_txn()
1553 if (sparc_check_constraints(cpuc->event, cpuc->events, n)) sparc_pmu_commit_txn()
1634 struct perf_event *event = cpuc->event[i]; perf_event_nmi_handler() local
1643 hwc = &event->hw; perf_event_nmi_handler()
1644 val = sparc_perf_event_update(event, hwc, idx); perf_event_nmi_handler()
1649 if (!sparc_perf_event_set_period(event, hwc, idx)) perf_event_nmi_handler()
1652 if (perf_event_overflow(event, &data, regs)) perf_event_nmi_handler()
1653 sparc_pmu_stop(event, 0); perf_event_nmi_handler()
1055 active_event_index(struct cpu_hw_events *cpuc, struct perf_event *event) active_event_index() argument
/linux-4.4.14/drivers/media/v4l2-core/
H A Dv4l2-event.c2 * v4l2-event.c
27 #include <media/v4l2-event.h>
39 static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event) __v4l2_event_dequeue() argument
57 kev->event.pending = fh->navailable; __v4l2_event_dequeue()
58 *event = kev->event; __v4l2_event_dequeue()
67 int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event, v4l2_event_dequeue() argument
73 return __v4l2_event_dequeue(fh, event); v4l2_event_dequeue()
85 ret = __v4l2_event_dequeue(fh, event); v4l2_event_dequeue()
123 * If the event has been added to the fh->subscribed list, but its __v4l2_event_queue_fh()
130 /* Increase event sequence number on fh. */ __v4l2_event_queue_fh()
143 sev->ops->replace(&kev->event, ev); __v4l2_event_queue_fh()
149 sev->ops->merge(&kev->event, &second_oldest->event); __v4l2_event_queue_fh()
155 kev->event.type = ev->type; __v4l2_event_queue_fh()
157 kev->event.u = ev->u; __v4l2_event_queue_fh()
158 kev->event.id = ev->id; __v4l2_event_queue_fh()
159 kev->event.timestamp = *ts; __v4l2_event_queue_fh()
160 kev->event.sequence = fh->sequence; __v4l2_event_queue_fh()
/linux-4.4.14/drivers/gpu/drm/atmel-hlcdc/
H A Datmel_hlcdc_crtc.c39 * @event: pointer to the current page flip event
46 struct drm_pending_vblank_event *event; member in struct:atmel_hlcdc_crtc
247 if (c->state->event) { atmel_hlcdc_crtc_atomic_begin()
248 c->state->event->pipe = drm_crtc_index(c); atmel_hlcdc_crtc_atomic_begin()
252 crtc->event = c->state->event; atmel_hlcdc_crtc_atomic_begin()
253 c->state->event = NULL; atmel_hlcdc_crtc_atomic_begin()
287 struct drm_pending_vblank_event *event; atmel_hlcdc_crtc_cancel_page_flip() local
292 event = crtc->event; atmel_hlcdc_crtc_cancel_page_flip()
293 if (event && event->base.file_priv == file) { atmel_hlcdc_crtc_cancel_page_flip()
294 event->base.destroy(&event->base); atmel_hlcdc_crtc_cancel_page_flip()
296 crtc->event = NULL; atmel_hlcdc_crtc_cancel_page_flip()
307 if (crtc->event) { atmel_hlcdc_crtc_finish_page_flip()
308 drm_send_vblank_event(dev, crtc->id, crtc->event); atmel_hlcdc_crtc_finish_page_flip()
310 crtc->event = NULL; atmel_hlcdc_crtc_finish_page_flip()
/linux-4.4.14/include/net/netfilter/
H A Dnf_conntrack_ecache.h2 * connection tracking event cache.
59 /* This structure is passed to event handler */
78 nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct) nf_conntrack_event_cache() argument
90 set_bit(event, &e->cache); nf_conntrack_event_cache()
119 /* This is a resent of a destroy event? If so, skip missed */ nf_conntrack_eventmask_report()
129 /* This is a destroy event that has been nf_conntrack_eventmask_report()
148 nf_conntrack_event_report(enum ip_conntrack_events event, struct nf_conn *ct, nf_conntrack_event_report() argument
151 return nf_conntrack_eventmask_report(1 << event, ct, portid, report); nf_conntrack_event_report()
155 nf_conntrack_event(enum ip_conntrack_events event, struct nf_conn *ct) nf_conntrack_event() argument
157 return nf_conntrack_eventmask_report(1 << event, ct, 0, 0); nf_conntrack_event()
176 nf_ct_expect_event_report(enum ip_conntrack_expect_events event, nf_ct_expect_event_report() argument
194 if (e->expmask & (1 << event)) { nf_ct_expect_event_report()
200 notify->fcn(1 << event, &item); nf_ct_expect_event_report()
207 nf_ct_expect_event(enum ip_conntrack_expect_events event, nf_ct_expect_event() argument
210 nf_ct_expect_event_report(event, exp, 0, 0); nf_ct_expect_event()
235 static inline void nf_conntrack_event_cache(enum ip_conntrack_events event, nf_conntrack_event_cache() argument
241 static inline int nf_conntrack_event(enum ip_conntrack_events event, nf_conntrack_event() argument
243 static inline int nf_conntrack_event_report(enum ip_conntrack_events event, nf_conntrack_event_report() argument
248 static inline void nf_ct_expect_event(enum ip_conntrack_expect_events event, nf_ct_expect_event() argument
/linux-4.4.14/arch/c6x/include/asm/
H A Dsoc.h16 /* Return active exception event or -1 if none */
19 /* Assert an event */
26 extern void soc_assert_event(unsigned int event);
/linux-4.4.14/include/uapi/sound/
H A Dasequencer.h31 * definition of sequencer event types
35 * event data type = #snd_seq_result
41 * event data type = #snd_seq_ev_note
49 * event data type = #snd_seq_ev_ctrl
60 * event data type = #snd_seq_ev_ctrl
65 #define SNDRV_SEQ_EVENT_TIMESIGN 23 /* SMF Time Signature event */
66 #define SNDRV_SEQ_EVENT_KEYSIGN 24 /* SMF Key Signature event */
69 * event data type = snd_seq_ev_queue_control
76 #define SNDRV_SEQ_EVENT_TEMPO 35 /* (SMF) Tempo event */
82 * event data type = none
86 #define SNDRV_SEQ_EVENT_SENSING 42 /* "active sensing" event */
89 * event data type = any type
91 #define SNDRV_SEQ_EVENT_ECHO 50 /* echo event */
92 #define SNDRV_SEQ_EVENT_OSS 51 /* OSS raw event */
95 * event data type = snd_seq_addr
105 * event data type = snd_seq_connect
113 * event data type = any
130 * event data type = snd_seq_ev_ext
134 #define SNDRV_SEQ_EVENT_BOUNCE 131 /* error event */
150 /* 255: special event */
156 /** event address */
170 #define SNDRV_SEQ_ADDRESS_SUBSCRIBERS 254 /* send event to all subscribed ports */
171 #define SNDRV_SEQ_ADDRESS_BROADCAST 255 /* send event to all queues/clients/ports/channels */
174 /* event mode flag - NOTE: only 8 bits available! */
183 #define SNDRV_SEQ_EVENT_LENGTH_FIXED (0<<2) /* fixed event size */
184 #define SNDRV_SEQ_EVENT_LENGTH_VARIABLE (1<<2) /* variable event size */
185 #define SNDRV_SEQ_EVENT_LENGTH_VARUSR (2<<2) /* variable event size - user memory space */
189 #define SNDRV_SEQ_PRIORITY_HIGH (1<<4) /* event should be processed before others */
193 /* note event */
202 /* controller event */
227 int event; /* processed event type */ member in struct:snd_seq_result
263 /* quoted event - inside the kernel only */
267 struct snd_seq_event *event; /* quoted event */ member in struct:snd_seq_ev_quote
271 /* sequencer event */
273 snd_seq_event_type_t type; /* event type */
274 unsigned char flags; /* event flags */
284 union { /* event data... */
301 * bounce event - stored as variable size data
305 struct snd_seq_event event; member in struct:snd_seq_event_bounce
345 /* event filter flags */
348 #define SNDRV_SEQ_FILTER_BOUNCE (1<<2) /* accept bounce event in error */
349 #define SNDRV_SEQ_FILTER_USE_EVENT (1<<31) /* use event filter */
357 unsigned char event_filter[32]; /* event filter bitmap */
385 #define SNDRV_SEQ_REMOVE_EVENT_TYPE (1<<7) /* Restrict to event type */
512 #define SNDRV_SEQ_TIMER_MIDI_CLOCK 1 /* Midi Clock (CLOCK event) */
513 #define SNDRV_SEQ_TIMER_MIDI_TICK 2 /* Midi Timer Tick (TICK event) */
/linux-4.4.14/include/net/sctp/
H A Dulpevent.h62 /* Retrieve the skb this event sits inside of. */ sctp_event2skb()
68 /* Retrieve & cast the event sitting inside the skb. */ sctp_skb2event()
131 void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
133 void sctp_ulpevent_read_rcvinfo(const struct sctp_ulpevent *event,
135 void sctp_ulpevent_read_nxtinfo(const struct sctp_ulpevent *event,
138 __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event);
140 /* Is this event type enabled? */ sctp_ulpevent_type_enabled()
148 /* Given an event subscription, is this event enabled? */ sctp_ulpevent_is_enabled()
149 static inline int sctp_ulpevent_is_enabled(const struct sctp_ulpevent *event, sctp_ulpevent_is_enabled() argument
155 if (sctp_ulpevent_is_notification(event)) { sctp_ulpevent_is_enabled()
156 sn_type = sctp_ulpevent_get_notification_type(event); sctp_ulpevent_is_enabled()
/linux-4.4.14/include/trace/
H A Dsyscall.h20 * @enter_fields: list of fields for syscall_enter trace event
21 * @enter_event: associated syscall_enter trace event
22 * @exit_event: associated syscall_exit trace event
/linux-4.4.14/arch/powerpc/platforms/powernv/
H A Dopal-power.c25 /* Detect EPOW event */ detect_epow()
34 * Check for EPOW event. Kernel sends supported EPOW classes info detect_epow()
40 pr_err("Failed to get EPOW event information\n"); detect_epow()
65 /* Check for DPO event */ poweroff_pending()
68 pr_info("Existing DPO event detected.\n"); poweroff_pending()
72 /* Check for EPOW event */ poweroff_pending()
74 pr_info("Existing EPOW event detected.\n"); poweroff_pending()
120 /* OPAL EPOW event notifier block */
127 /* OPAL DPO event notifier block */
134 /* OPAL power-control event notifier block */
163 /* Register EPOW event notifier */ opal_power_control_init()
168 /* Register DPO event notifier */ opal_power_control_init()
/linux-4.4.14/drivers/input/serio/
H A Dserio.c145 * Serio event processing.
168 struct serio_event *event = NULL; serio_get_event() local
174 event = list_first_entry(&serio_event_list, serio_get_event()
176 list_del_init(&event->node); serio_get_event()
180 return event; serio_get_event()
183 static void serio_free_event(struct serio_event *event) serio_free_event() argument
185 module_put(event->owner); serio_free_event()
186 kfree(event); serio_free_event()
200 * If this event is of different type we should not serio_remove_duplicate_events()
217 struct serio_event *event; serio_handle_event() local
221 while ((event = serio_get_event())) { serio_handle_event()
223 switch (event->type) { serio_handle_event()
226 serio_add_port(event->object); serio_handle_event()
230 serio_reconnect_port(event->object); serio_handle_event()
234 serio_disconnect_port(event->object); serio_handle_event()
235 serio_find_driver(event->object); serio_handle_event()
239 serio_reconnect_subtree(event->object); serio_handle_event()
243 serio_attach_driver(event->object); serio_handle_event()
247 serio_remove_duplicate_events(event->object, event->type); serio_handle_event()
248 serio_free_event(event); serio_handle_event()
260 struct serio_event *event; serio_queue_event() local
266 * Scan event list for the other events for the same serio port, serio_queue_event()
267 * starting with the most recent one. If event is the same we serio_queue_event()
268 * do not need add new one. If event is of different type we serio_queue_event()
269 * need to add this event and should not look further because serio_queue_event()
272 list_for_each_entry_reverse(event, &serio_event_list, node) { serio_queue_event()
273 if (event->object == object) { serio_queue_event()
274 if (event->type == event_type) serio_queue_event()
280 event = kmalloc(sizeof(struct serio_event), GFP_ATOMIC); serio_queue_event()
281 if (!event) { serio_queue_event()
282 pr_err("Not enough memory to queue event %d\n", event_type); serio_queue_event()
288 pr_warning("Can't get module reference, dropping event %d\n", serio_queue_event()
290 kfree(event); serio_queue_event()
295 event->type = event_type; serio_queue_event()
296 event->object = object; serio_queue_event()
297 event->owner = owner; serio_queue_event()
299 list_add_tail(&event->node, &serio_event_list); serio_queue_event()
313 struct serio_event *event, *next; serio_remove_pending_events() local
318 list_for_each_entry_safe(event, next, &serio_event_list, node) { serio_remove_pending_events()
319 if (event->object == object) { serio_remove_pending_events()
320 list_del_init(&event->node); serio_remove_pending_events()
321 serio_free_event(event); serio_remove_pending_events()
336 struct serio_event *event; serio_get_pending_child() local
342 list_for_each_entry(event, &serio_event_list, node) { serio_get_pending_child()
343 if (event->type == SERIO_REGISTER_PORT) { serio_get_pending_child()
344 serio = event->object; serio_get_pending_child()
/linux-4.4.14/arch/mips/kernel/
H A Dperf_event_mipsxx.c9 * based on the sparc64 perf event code and the x86 code. Performance
40 * is used for an event.
111 #define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5)
327 * when the former kind of event takes the counter the mipsxx_pmu_alloc_counter()
328 * latter kind of event wants to use, then the "counter mipsxx_pmu_alloc_counter()
329 * allocation" for the latter event will fail. In fact if mipsxx_pmu_alloc_counter()
375 static int mipspmu_event_set_period(struct perf_event *event, mipspmu_event_set_period() argument
406 perf_event_update_userpage(event); mipspmu_event_set_period()
411 static void mipspmu_event_update(struct perf_event *event, mipspmu_event_update() argument
428 local64_add(delta, &event->count); mipspmu_event_update()
432 static void mipspmu_start(struct perf_event *event, int flags) mipspmu_start() argument
434 struct hw_perf_event *hwc = &event->hw; mipspmu_start()
441 /* Set the period for the event. */ mipspmu_start()
442 mipspmu_event_set_period(event, hwc, hwc->idx); mipspmu_start()
444 /* Enable the event. */ mipspmu_start()
448 static void mipspmu_stop(struct perf_event *event, int flags) mipspmu_stop() argument
450 struct hw_perf_event *hwc = &event->hw; mipspmu_stop()
453 /* We are working on a local event. */ mipspmu_stop()
456 mipspmu_event_update(event, hwc, hwc->idx); mipspmu_stop()
461 static int mipspmu_add(struct perf_event *event, int flags) mipspmu_add() argument
464 struct hw_perf_event *hwc = &event->hw; mipspmu_add()
468 perf_pmu_disable(event->pmu); mipspmu_add()
470 /* To look for a free counter for this event. */ mipspmu_add()
478 * If there is an event in the counter we are going to use then mipspmu_add()
481 event->hw.idx = idx; mipspmu_add()
483 cpuc->events[idx] = event; mipspmu_add()
487 mipspmu_start(event, PERF_EF_RELOAD); mipspmu_add()
490 perf_event_update_userpage(event); mipspmu_add()
493 perf_pmu_enable(event->pmu); mipspmu_add()
497 static void mipspmu_del(struct perf_event *event, int flags) mipspmu_del() argument
500 struct hw_perf_event *hwc = &event->hw; mipspmu_del()
505 mipspmu_stop(event, PERF_EF_UPDATE); mipspmu_del()
509 perf_event_update_userpage(event); mipspmu_del()
512 static void mipspmu_read(struct perf_event *event) mipspmu_read() argument
514 struct hw_perf_event *hwc = &event->hw; mipspmu_read()
520 mipspmu_event_update(event, hwc, hwc->idx); mipspmu_read()
597 static int __hw_perf_event_init(struct perf_event *event);
599 static void hw_perf_event_destroy(struct perf_event *event) hw_perf_event_destroy() argument
614 static int mipspmu_event_init(struct perf_event *event) mipspmu_event_init() argument
619 if (has_branch_stack(event)) mipspmu_event_init()
622 switch (event->attr.type) { mipspmu_event_init()
632 if (event->cpu >= nr_cpumask_bits || mipspmu_event_init()
633 (event->cpu >= 0 && !cpu_online(event->cpu))) mipspmu_event_init()
649 return __hw_perf_event_init(event); mipspmu_event_init()
716 static int validate_group(struct perf_event *event) validate_group() argument
718 struct perf_event *sibling, *leader = event->group_leader; validate_group()
731 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0) validate_group()
742 struct perf_event *event = cpuc->events[idx]; handle_associated_event() local
743 struct hw_perf_event *hwc = &event->hw; handle_associated_event()
745 mipspmu_event_update(event, hwc, idx); handle_associated_event()
746 data->period = event->hw.last_period; handle_associated_event()
747 if (!mipspmu_event_set_period(event, hwc, idx)) handle_associated_event()
750 if (perf_event_overflow(event, data, regs)) handle_associated_event()
810 /* 24K/34K/1004K/interAptiv/loongson1 cores share the same event map. */
819 /* 74K/proAptiv core has different branch event code. */
861 /* 24K/34K/1004K/interAptiv/loongson1 cores share the same cache event map. */
942 /* 74K/proAptiv core has completely different cache event map. */
1156 * Only general DTLB misses are counted use the same event for
1205 * Only general DTLB misses are counted use the same event for
1231 static void check_and_calc_range(struct perf_event *event, check_and_calc_range() argument
1234 struct hw_perf_event *hwc = &event->hw; check_and_calc_range()
1236 if (event->cpu >= 0) { check_and_calc_range()
1239 * The user selected an event that is processor check_and_calc_range()
1245 * FIXME: cpu_data[event->cpu].vpe_id reports 0 check_and_calc_range()
1248 hwc->config_base |= M_PERFCTL_VPEID(event->cpu); check_and_calc_range()
1255 static void check_and_calc_range(struct perf_event *event, check_and_calc_range() argument
1261 static int __hw_perf_event_init(struct perf_event *event) __hw_perf_event_init() argument
1263 struct perf_event_attr *attr = &event->attr; __hw_perf_event_init()
1264 struct hw_perf_event *hwc = &event->hw; __hw_perf_event_init()
1268 /* Returning MIPS event descriptor for generic perf event. */ __hw_perf_event_init()
1269 if (PERF_TYPE_HARDWARE == event->attr.type) { __hw_perf_event_init()
1270 if (event->attr.config >= PERF_COUNT_HW_MAX) __hw_perf_event_init()
1272 pev = mipspmu_map_general_event(event->attr.config); __hw_perf_event_init()
1273 } else if (PERF_TYPE_HW_CACHE == event->attr.type) { __hw_perf_event_init()
1274 pev = mipspmu_map_cache_event(event->attr.config); __hw_perf_event_init()
1275 } else if (PERF_TYPE_RAW == event->attr.type) { __hw_perf_event_init()
1276 /* We are working on the global raw event. */ __hw_perf_event_init()
1278 pev = mipspmu.map_raw_event(event->attr.config); __hw_perf_event_init()
1280 /* The event type is not (yet) supported. */ __hw_perf_event_init()
1285 if (PERF_TYPE_RAW == event->attr.type) __hw_perf_event_init()
1298 check_and_calc_range(event, pev); __hw_perf_event_init()
1301 if (PERF_TYPE_RAW == event->attr.type) __hw_perf_event_init()
1316 * The event can belong to another cpu. We do not assign a local __hw_perf_event_init()
1329 if (event->group_leader != event) __hw_perf_event_init()
1330 err = validate_group(event); __hw_perf_event_init()
1332 event->destroy = hw_perf_event_destroy; __hw_perf_event_init()
1335 event->destroy(event); __hw_perf_event_init()
1494 * 128 needs to be added to 15 as the input for the event config, i.e., 143 (0x8F)
1503 /* currently most cores have 7-bit event numbers */ mipsxx_pmu_map_raw_event()
1560 /* 8-bit event numbers */ mipsxx_pmu_map_raw_event()

Completed in 4640 milliseconds

1234567891011>>