Searched refs:event (Results 1 - 200 of 3753) sorted by relevance

1234567891011>>

/linux-4.1.27/tools/testing/selftests/powerpc/pmu/ebb/
H A Devent_attributes_test.c13 * Test various attributes of the EBB event are enforced.
17 struct event event, leader; event_attributes() local
19 event_init(&event, 0x1001e); event_attributes()
20 event_leader_ebb_init(&event); event_attributes()
22 FAIL_IF(event_open(&event)); event_attributes()
23 event_close(&event); event_attributes()
26 event_init(&event, 0x001e); /* CYCLES - no PMC specified */ event_attributes()
27 event_leader_ebb_init(&event); event_attributes()
29 FAIL_IF(event_open(&event) == 0); event_attributes()
32 event_init(&event, 0x2001e); event_attributes()
33 event_leader_ebb_init(&event); event_attributes()
34 event.attr.exclusive = 0; event_attributes()
36 FAIL_IF(event_open(&event) == 0); event_attributes()
39 event_init(&event, 0x3001e); event_attributes()
40 event_leader_ebb_init(&event); event_attributes()
41 event.attr.freq = 1; event_attributes()
43 FAIL_IF(event_open(&event) == 0); event_attributes()
46 event_init(&event, 0x4001e); event_attributes()
47 event_leader_ebb_init(&event); event_attributes()
48 event.attr.sample_period = 1; event_attributes()
50 FAIL_IF(event_open(&event) == 0); event_attributes()
53 event_init(&event, 0x1001e); event_attributes()
54 event_leader_ebb_init(&event); event_attributes()
55 event.attr.enable_on_exec = 1; event_attributes()
57 FAIL_IF(event_open(&event) == 0); event_attributes()
60 event_init(&event, 0x1001e); event_attributes()
61 event_leader_ebb_init(&event); event_attributes()
62 event.attr.inherit = 1; event_attributes()
64 FAIL_IF(event_open(&event) == 0); event_attributes()
71 event_init(&event, 0x20002); event_attributes()
72 event_ebb_init(&event); event_attributes()
75 FAIL_IF(event_open_with_group(&event, leader.fd)); event_attributes()
77 event_close(&event); event_attributes()
84 event_init(&event, 0x20002); event_attributes()
86 /* Expected to fail, event doesn't request EBB, leader does */ event_attributes()
87 FAIL_IF(event_open_with_group(&event, leader.fd) == 0); event_attributes()
98 event_init(&event, 0x20002); event_attributes()
99 event_ebb_init(&event); event_attributes()
102 FAIL_IF(event_open_with_group(&event, leader.fd) == 0); event_attributes()
119 event_init(&event, 0x1001e); event_attributes()
120 event_leader_ebb_init(&event); event_attributes()
121 /* Expected to fail, not a task event */ event_attributes()
123 FAIL_IF(event_open_with_cpu(&event, 0) == 0); event_attributes()
H A Dcpu_event_pinned_vs_ebb_test.c18 * Tests a pinned cpu event vs an EBB - in that order. The pinned cpu event
19 * should remain and the EBB event should fail to enable.
22 static int setup_cpu_event(struct event *event, int cpu) setup_cpu_event() argument
24 event_init_named(event, 0x400FA, "PM_RUN_INST_CMPL"); setup_cpu_event()
26 event->attr.pinned = 1; setup_cpu_event()
28 event->attr.exclude_kernel = 1; setup_cpu_event()
29 event->attr.exclude_hv = 1; setup_cpu_event()
30 event->attr.exclude_idle = 1; setup_cpu_event()
33 FAIL_IF(event_open_with_cpu(event, cpu)); setup_cpu_event()
34 FAIL_IF(event_enable(event)); setup_cpu_event()
42 struct event event; cpu_event_pinned_vs_ebb() local
59 /* We setup the cpu event first */ cpu_event_pinned_vs_ebb()
60 rc = setup_cpu_event(&event, cpu); cpu_event_pinned_vs_ebb()
66 /* Signal the child to install its EBB event and wait */ cpu_event_pinned_vs_ebb()
75 /* We expect it to fail to read the event */ cpu_event_pinned_vs_ebb()
78 FAIL_IF(event_disable(&event)); cpu_event_pinned_vs_ebb()
79 FAIL_IF(event_read(&event)); cpu_event_pinned_vs_ebb()
81 event_report(&event); cpu_event_pinned_vs_ebb()
83 /* The cpu event should have run */ cpu_event_pinned_vs_ebb()
84 FAIL_IF(event.result.value == 0); cpu_event_pinned_vs_ebb()
85 FAIL_IF(event.result.enabled != event.result.running); cpu_event_pinned_vs_ebb()
H A Dtask_event_pinned_vs_ebb_test.c18 * Tests a pinned per-task event vs an EBB - in that order. The pinned per-task
19 * event should prevent the EBB event from being enabled.
22 static int setup_child_event(struct event *event, pid_t child_pid) setup_child_event() argument
24 event_init_named(event, 0x400FA, "PM_RUN_INST_CMPL"); setup_child_event()
26 event->attr.pinned = 1; setup_child_event()
28 event->attr.exclude_kernel = 1; setup_child_event()
29 event->attr.exclude_hv = 1; setup_child_event()
30 event->attr.exclude_idle = 1; setup_child_event()
32 FAIL_IF(event_open_with_pid(event, child_pid)); setup_child_event()
33 FAIL_IF(event_enable(event)); setup_child_event()
41 struct event event; task_event_pinned_vs_ebb() local
54 /* We setup the task event first */ task_event_pinned_vs_ebb()
55 rc = setup_child_event(&event, pid); task_event_pinned_vs_ebb()
61 /* Signal the child to install its EBB event and wait */ task_event_pinned_vs_ebb()
70 /* We expect it to fail to read the event */ task_event_pinned_vs_ebb()
72 FAIL_IF(event_disable(&event)); task_event_pinned_vs_ebb()
73 FAIL_IF(event_read(&event)); task_event_pinned_vs_ebb()
75 event_report(&event); task_event_pinned_vs_ebb()
77 FAIL_IF(event.result.value == 0); task_event_pinned_vs_ebb()
82 FAIL_IF(event.result.enabled == 0); task_event_pinned_vs_ebb()
83 FAIL_IF(event.result.running == 0); task_event_pinned_vs_ebb()
H A Dcycles_test.c17 struct event event; cycles() local
19 event_init_named(&event, 0x1001e, "cycles"); cycles()
20 event_leader_ebb_init(&event); cycles()
22 event.attr.exclude_kernel = 1; cycles()
23 event.attr.exclude_hv = 1; cycles()
24 event.attr.exclude_idle = 1; cycles()
26 FAIL_IF(event_open(&event)); cycles()
31 FAIL_IF(ebb_event_enable(&event)); cycles()
47 event_close(&event); cycles()
H A Debb_vs_cpu_event_test.c18 * Tests an EBB vs a cpu event - in that order. The EBB should force the cpu
19 * event off the PMU.
22 static int setup_cpu_event(struct event *event, int cpu) setup_cpu_event() argument
24 event_init_named(event, 0x400FA, "PM_RUN_INST_CMPL"); setup_cpu_event()
26 event->attr.exclude_kernel = 1; setup_cpu_event()
27 event->attr.exclude_hv = 1; setup_cpu_event()
28 event->attr.exclude_idle = 1; setup_cpu_event()
31 FAIL_IF(event_open_with_cpu(event, cpu)); setup_cpu_event()
32 FAIL_IF(event_enable(event)); setup_cpu_event()
40 struct event event; ebb_vs_cpu_event() local
57 /* Signal the child to install its EBB event and wait */ ebb_vs_cpu_event()
60 /* Now try to install our CPU event */ ebb_vs_cpu_event()
61 rc = setup_cpu_event(&event, cpu); ebb_vs_cpu_event()
72 FAIL_IF(event_disable(&event)); ebb_vs_cpu_event()
73 FAIL_IF(event_read(&event)); ebb_vs_cpu_event()
75 event_report(&event); ebb_vs_cpu_event()
77 /* The cpu event may have run, but we don't expect 100% */ ebb_vs_cpu_event()
78 FAIL_IF(event.result.enabled >= event.result.running); ebb_vs_cpu_event()
H A Dtask_event_vs_ebb_test.c18 * Tests a per-task event vs an EBB - in that order. The EBB should push the
19 * per-task event off the PMU.
22 static int setup_child_event(struct event *event, pid_t child_pid) setup_child_event() argument
24 event_init_named(event, 0x400FA, "PM_RUN_INST_CMPL"); setup_child_event()
26 event->attr.exclude_kernel = 1; setup_child_event()
27 event->attr.exclude_hv = 1; setup_child_event()
28 event->attr.exclude_idle = 1; setup_child_event()
30 FAIL_IF(event_open_with_pid(event, child_pid)); setup_child_event()
31 FAIL_IF(event_enable(event)); setup_child_event()
39 struct event event; task_event_vs_ebb() local
52 /* We setup the task event first */ task_event_vs_ebb()
53 rc = setup_child_event(&event, pid); task_event_vs_ebb()
59 /* Signal the child to install its EBB event and wait */ task_event_vs_ebb()
68 /* The EBB event should push the task event off so the child should succeed */ task_event_vs_ebb()
70 FAIL_IF(event_disable(&event)); task_event_vs_ebb()
71 FAIL_IF(event_read(&event)); task_event_vs_ebb()
73 event_report(&event); task_event_vs_ebb()
75 /* The task event may have run, or not so we can't assert anything about it */ task_event_vs_ebb()
H A Dcpu_event_vs_ebb_test.c18 * Tests a cpu event vs an EBB - in that order. The EBB should force the cpu
19 * event off the PMU.
22 static int setup_cpu_event(struct event *event, int cpu) setup_cpu_event() argument
24 event_init_named(event, 0x400FA, "PM_RUN_INST_CMPL"); setup_cpu_event()
26 event->attr.exclude_kernel = 1; setup_cpu_event()
27 event->attr.exclude_hv = 1; setup_cpu_event()
28 event->attr.exclude_idle = 1; setup_cpu_event()
31 FAIL_IF(event_open_with_cpu(event, cpu)); setup_cpu_event()
32 FAIL_IF(event_enable(event)); setup_cpu_event()
40 struct event event; cpu_event_vs_ebb() local
57 /* We setup the cpu event first */ cpu_event_vs_ebb()
58 rc = setup_cpu_event(&event, cpu); cpu_event_vs_ebb()
64 /* Signal the child to install its EBB event and wait */ cpu_event_vs_ebb()
76 FAIL_IF(event_disable(&event)); cpu_event_vs_ebb()
77 FAIL_IF(event_read(&event)); cpu_event_vs_ebb()
79 event_report(&event); cpu_event_vs_ebb()
81 /* The cpu event may have run */ cpu_event_vs_ebb()
H A Dinstruction_count_test.c25 static int do_count_loop(struct event *event, uint64_t instructions, do_count_loop() argument
45 event->result.value = ebb_state.stats.pmc_count[4-1]; do_count_loop()
47 difference = event->result.value - expected; do_count_loop()
48 percentage = (double)difference / event->result.value * 100; do_count_loop()
53 printf("Actual %llu\n", event->result.value); do_count_loop()
63 if (difference / event->result.value) do_count_loop()
70 static uint64_t determine_overhead(struct event *event) determine_overhead() argument
75 do_count_loop(event, 0, 0, false); determine_overhead()
76 overhead = event->result.value; determine_overhead()
79 do_count_loop(event, 0, 0, false); determine_overhead()
80 current = event->result.value; determine_overhead()
111 struct event event; instruction_count() local
114 event_init_named(&event, 0x400FA, "PM_RUN_INST_CMPL"); instruction_count()
115 event_leader_ebb_init(&event); instruction_count()
116 event.attr.exclude_kernel = 1; instruction_count()
117 event.attr.exclude_hv = 1; instruction_count()
118 event.attr.exclude_idle = 1; instruction_count()
120 FAIL_IF(event_open(&event)); instruction_count()
121 FAIL_IF(ebb_event_enable(&event)); instruction_count()
129 overhead = determine_overhead(&event); instruction_count()
133 FAIL_IF(do_count_loop(&event, 0x100000, overhead, true)); instruction_count()
136 FAIL_IF(do_count_loop(&event, 0xa00000, overhead, true)); instruction_count()
139 FAIL_IF(do_count_loop(&event, 0x6400000, overhead, true)); instruction_count()
142 FAIL_IF(do_count_loop(&event, 0x40000000, overhead, true)); instruction_count()
145 FAIL_IF(do_count_loop(&event, 0x400000000, overhead, true)); instruction_count()
148 FAIL_IF(do_count_loop(&event, 0x1000000000, overhead, true)); instruction_count()
151 FAIL_IF(do_count_loop(&event, 0x2000000000, overhead, true)); instruction_count()
154 event_close(&event); instruction_count()
H A Dno_handler_test.c18 struct event event; no_handler_test() local
22 event_init_named(&event, 0x1001e, "cycles"); no_handler_test()
23 event_leader_ebb_init(&event); no_handler_test()
25 event.attr.exclude_kernel = 1; no_handler_test()
26 event.attr.exclude_hv = 1; no_handler_test()
27 event.attr.exclude_idle = 1; no_handler_test()
29 FAIL_IF(event_open(&event)); no_handler_test()
30 FAIL_IF(ebb_event_enable(&event)); no_handler_test()
39 /* Spin to make sure the event has time to overflow */ no_handler_test()
49 event_close(&event); no_handler_test()
H A Dpmc56_overflow_test.c50 struct event event; pmc56_overflow() local
53 event_init(&event, 0x2001e); pmc56_overflow()
54 event_leader_ebb_init(&event); pmc56_overflow()
56 event.attr.exclude_kernel = 1; pmc56_overflow()
57 event.attr.exclude_hv = 1; pmc56_overflow()
58 event.attr.exclude_idle = 1; pmc56_overflow()
60 FAIL_IF(event_open(&event)); pmc56_overflow()
65 FAIL_IF(ebb_event_enable(&event)); pmc56_overflow()
83 event_close(&event); pmc56_overflow()
H A Dclose_clears_pmcc_test.c15 * Test that closing the EBB event clears MMCR0_PMCC, preventing further access
21 struct event event; close_clears_pmcc() local
23 event_init_named(&event, 0x1001e, "cycles"); close_clears_pmcc()
24 event_leader_ebb_init(&event); close_clears_pmcc()
26 FAIL_IF(event_open(&event)); close_clears_pmcc()
31 FAIL_IF(ebb_event_enable(&event)); close_clears_pmcc()
39 event_close(&event); close_clears_pmcc()
44 * that we have closed the event. We expect that we will. */ close_clears_pmcc()
H A Debb_on_child_test.c19 * even though the event is enabled and running the child hasn't enabled the
30 /* Parent creates EBB event */ victim_child()
38 /* EBB event is enabled here */ victim_child()
47 struct event event; ebb_on_child() local
63 event_init_named(&event, 0x1001e, "cycles"); ebb_on_child()
64 event_leader_ebb_init(&event); ebb_on_child()
66 event.attr.exclude_kernel = 1; ebb_on_child()
67 event.attr.exclude_hv = 1; ebb_on_child()
68 event.attr.exclude_idle = 1; ebb_on_child()
70 FAIL_IF(event_open_with_pid(&event, pid)); ebb_on_child()
71 FAIL_IF(ebb_event_enable(&event)); ebb_on_child()
78 event_close(&event); ebb_on_child()
H A Debb_on_willing_child_test.c19 * EBBs, which are then delivered to the child, even though the event is
27 /* Setup our EBB handler, before the EBB event is created */ victim_child()
54 struct event event; ebb_on_willing_child() local
71 event_init_named(&event, 0x1001e, "cycles"); ebb_on_willing_child()
72 event_leader_ebb_init(&event); ebb_on_willing_child()
74 event.attr.exclude_kernel = 1; ebb_on_willing_child()
75 event.attr.exclude_hv = 1; ebb_on_willing_child()
76 event.attr.exclude_idle = 1; ebb_on_willing_child()
78 FAIL_IF(event_open_with_pid(&event, pid)); ebb_on_willing_child()
79 FAIL_IF(ebb_event_enable(&event)); ebb_on_willing_child()
84 event_close(&event); ebb_on_willing_child()
H A Dback_to_back_ebbs_test.c64 struct event event; back_to_back_ebbs() local
66 event_init_named(&event, 0x1001e, "cycles"); back_to_back_ebbs()
67 event_leader_ebb_init(&event); back_to_back_ebbs()
69 event.attr.exclude_kernel = 1; back_to_back_ebbs()
70 event.attr.exclude_hv = 1; back_to_back_ebbs()
71 event.attr.exclude_idle = 1; back_to_back_ebbs()
73 FAIL_IF(event_open(&event)); back_to_back_ebbs()
77 FAIL_IF(ebb_event_enable(&event)); back_to_back_ebbs()
96 event_close(&event); back_to_back_ebbs()
H A Dcycles_with_mmcr2_test.c24 struct event event; cycles_with_mmcr2() local
29 event_init_named(&event, 0x1001e, "cycles"); cycles_with_mmcr2()
30 event_leader_ebb_init(&event); cycles_with_mmcr2()
32 event.attr.exclude_kernel = 1; cycles_with_mmcr2()
33 event.attr.exclude_hv = 1; cycles_with_mmcr2()
34 event.attr.exclude_idle = 1; cycles_with_mmcr2()
36 FAIL_IF(event_open(&event)); cycles_with_mmcr2()
42 FAIL_IF(ebb_event_enable(&event)); cycles_with_mmcr2()
76 event_close(&event); cycles_with_mmcr2()
H A Dmulti_ebb_procs_test.c32 struct event event; cycles_child() local
39 event_init_named(&event, 0x1001e, "cycles"); cycles_child()
40 event_leader_ebb_init(&event); cycles_child()
42 event.attr.exclude_kernel = 1; cycles_child()
43 event.attr.exclude_hv = 1; cycles_child()
44 event.attr.exclude_idle = 1; cycles_child()
46 FAIL_IF(event_open(&event)); cycles_child()
52 FAIL_IF(ebb_event_enable(&event)); cycles_child()
68 event_close(&event); cycles_child()
H A Dpmae_handling_test.c59 struct event event; test_body() local
61 event_init_named(&event, 0x1001e, "cycles"); test_body()
62 event_leader_ebb_init(&event); test_body()
64 event.attr.exclude_kernel = 1; test_body()
65 event.attr.exclude_hv = 1; test_body()
66 event.attr.exclude_idle = 1; test_body()
68 FAIL_IF(event_open(&event)); test_body()
73 FAIL_IF(ebb_event_enable(&event)); test_body()
90 event_close(&event); test_body()
H A Dfork_cleanup_test.c25 static struct event event; variable in typeref:struct:event
36 /* We can still read from the event, though it is on our parent */ child()
37 FAIL_IF(event_read(&event)); child()
47 event_init_named(&event, 0x1001e, "cycles"); fork_cleanup()
48 event_leader_ebb_init(&event); fork_cleanup()
50 FAIL_IF(event_open(&event)); fork_cleanup()
56 FAIL_IF(ebb_event_enable(&event)); fork_cleanup()
71 event_close(&event); fork_cleanup()
H A Dcycles_with_freeze_test.c16 * hardware when the event overflows. We may take the EBB after we have set FC,
55 struct event event; cycles_with_freeze() local
59 event_init_named(&event, 0x1001e, "cycles"); cycles_with_freeze()
60 event_leader_ebb_init(&event); cycles_with_freeze()
62 event.attr.exclude_kernel = 1; cycles_with_freeze()
63 event.attr.exclude_hv = 1; cycles_with_freeze()
64 event.attr.exclude_idle = 1; cycles_with_freeze()
66 FAIL_IF(event_open(&event)); cycles_with_freeze()
70 FAIL_IF(ebb_event_enable(&event)); cycles_with_freeze()
106 event_close(&event); cycles_with_freeze()
H A Dreg.h29 #define SPRN_BESCR 806 /* Branch event status & control register */
30 #define SPRN_BESCRS 800 /* Branch event status & control set (1 bits set to 1) */
31 #define SPRN_BESCRSU 801 /* Branch event status & control set upper */
32 #define SPRN_BESCRR 802 /* Branch event status & control REset (1 bits set to 0) */
33 #define SPRN_BESCRRU 803 /* Branch event status & control REset upper */
H A Dlost_exception_test.c24 struct event event; test_body() local
27 event_init_named(&event, 0x40002, "instructions"); test_body()
28 event_leader_ebb_init(&event); test_body()
30 event.attr.exclude_kernel = 1; test_body()
31 event.attr.exclude_hv = 1; test_body()
32 event.attr.exclude_idle = 1; test_body()
34 FAIL_IF(event_open(&event)); test_body()
39 FAIL_IF(ebb_event_enable(&event)); test_body()
82 event_close(&event); test_body()
H A Debb.h9 #include "../event.h"
47 void event_leader_ebb_init(struct event *e);
48 void event_ebb_init(struct event *e);
49 void event_bhrb_init(struct event *e, unsigned ifm);
52 int ebb_event_enable(struct event *e);
57 void event_ebb_init(struct event *e);
58 void event_leader_ebb_init(struct event *e);
/linux-4.1.27/tools/testing/selftests/powerpc/pmu/
H A Dl3_bank_test.c9 #include "event.h"
19 struct event event; l3_bank_test() local
26 event_init(&event, 0x84918F); l3_bank_test()
28 FAIL_IF(event_open(&event)); l3_bank_test()
33 event_read(&event); l3_bank_test()
34 event_report(&event); l3_bank_test()
36 FAIL_IF(event.result.running == 0); l3_bank_test()
37 FAIL_IF(event.result.enabled == 0); l3_bank_test()
39 event_close(&event); l3_bank_test()
H A Devent.h15 struct event { struct
27 void event_init(struct event *e, u64 config);
28 void event_init_named(struct event *e, u64 config, char *name);
29 void event_init_opts(struct event *e, u64 config, int type, char *name);
30 int event_open_with_options(struct event *e, pid_t pid, int cpu, int group_fd);
31 int event_open_with_group(struct event *e, int group_fd);
32 int event_open_with_pid(struct event *e, pid_t pid);
33 int event_open_with_cpu(struct event *e, int cpu);
34 int event_open(struct event *e);
35 void event_close(struct event *e);
36 int event_enable(struct event *e);
37 int event_disable(struct event *e);
38 int event_reset(struct event *e);
39 int event_read(struct event *e);
40 void event_report_justified(struct event *e, int name_width, int result_width);
41 void event_report(struct event *e);
H A Devent.c13 #include "event.h"
23 void event_init_opts(struct event *e, u64 config, int type, char *name) event_init_opts()
37 void event_init_named(struct event *e, u64 config, char *name) event_init_named()
42 void event_init(struct event *e, u64 config) event_init()
44 event_init_opts(e, config, PERF_TYPE_RAW, "event"); event_init()
52 int event_open_with_options(struct event *e, pid_t pid, int cpu, int group_fd) event_open_with_options()
63 int event_open_with_group(struct event *e, int group_fd) event_open_with_group()
68 int event_open_with_pid(struct event *e, pid_t pid) event_open_with_pid()
73 int event_open_with_cpu(struct event *e, int cpu) event_open_with_cpu()
78 int event_open(struct event *e) event_open()
83 void event_close(struct event *e) event_close()
88 int event_enable(struct event *e) event_enable()
93 int event_disable(struct event *e) event_disable()
98 int event_reset(struct event *e) event_reset()
103 int event_read(struct event *e) event_read()
109 fprintf(stderr, "read error on event %p!\n", e); event_read()
116 void event_report_justified(struct event *e, int name_width, int result_width) event_report_justified()
128 void event_report(struct event *e) event_report()
H A Dper_event_excludes.c15 #include "event.h"
20 * Test that per-event excludes work.
25 struct event *e, events[4]; per_event_excludes()
66 * The open here will fail if we don't have per event exclude support, per_event_excludes()
67 * because the second event has an incompatible set of exclude settings per_event_excludes()
74 * Even though the above will fail without per-event excludes we keep per_event_excludes()
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/core/
H A Devent.c22 #include <core/event.h>
26 nvkm_event_put(struct nvkm_event *event, u32 types, int index) nvkm_event_put() argument
28 assert_spin_locked(&event->refs_lock); nvkm_event_put()
31 if (--event->refs[index * event->types_nr + type] == 0) { nvkm_event_put()
32 if (event->func->fini) nvkm_event_put()
33 event->func->fini(event, 1 << type, index); nvkm_event_put()
39 nvkm_event_get(struct nvkm_event *event, u32 types, int index) nvkm_event_get() argument
41 assert_spin_locked(&event->refs_lock); nvkm_event_get()
44 if (++event->refs[index * event->types_nr + type] == 1) { nvkm_event_get()
45 if (event->func->init) nvkm_event_get()
46 event->func->init(event, 1 << type, index); nvkm_event_get()
52 nvkm_event_send(struct nvkm_event *event, u32 types, int index, nvkm_event_send() argument
58 if (!event->refs || WARN_ON(index >= event->index_nr)) nvkm_event_send()
61 spin_lock_irqsave(&event->list_lock, flags); nvkm_event_send()
62 list_for_each_entry(notify, &event->list, head) { nvkm_event_send()
64 if (event->func->send) { nvkm_event_send()
65 event->func->send(data, size, notify); nvkm_event_send()
71 spin_unlock_irqrestore(&event->list_lock, flags); nvkm_event_send()
75 nvkm_event_fini(struct nvkm_event *event) nvkm_event_fini() argument
77 if (event->refs) { nvkm_event_fini()
78 kfree(event->refs); nvkm_event_fini()
79 event->refs = NULL; nvkm_event_fini()
85 struct nvkm_event *event) nvkm_event_init()
87 event->refs = kzalloc(sizeof(*event->refs) * index_nr * types_nr, nvkm_event_init()
89 if (!event->refs) nvkm_event_init()
92 event->func = func; nvkm_event_init()
93 event->types_nr = types_nr; nvkm_event_init()
94 event->index_nr = index_nr; nvkm_event_init()
95 spin_lock_init(&event->refs_lock); nvkm_event_init()
96 spin_lock_init(&event->list_lock); nvkm_event_init()
97 INIT_LIST_HEAD(&event->list); nvkm_event_init()
84 nvkm_event_init(const struct nvkm_event_func *func, int types_nr, int index_nr, struct nvkm_event *event) nvkm_event_init() argument
H A Dnotify.c25 #include <core/event.h>
31 nvkm_event_put(notify->event, notify->types, notify->index); nvkm_notify_put_locked()
37 struct nvkm_event *event = notify->event; nvkm_notify_put() local
39 if (likely(event) && nvkm_notify_put()
41 spin_lock_irqsave(&event->refs_lock, flags); nvkm_notify_put()
43 spin_unlock_irqrestore(&event->refs_lock, flags); nvkm_notify_put()
53 nvkm_event_get(notify->event, notify->types, notify->index); nvkm_notify_get_locked()
59 struct nvkm_event *event = notify->event; nvkm_notify_get() local
61 if (likely(event) && nvkm_notify_get()
63 spin_lock_irqsave(&event->refs_lock, flags); nvkm_notify_get()
65 spin_unlock_irqrestore(&event->refs_lock, flags); nvkm_notify_get()
72 struct nvkm_event *event = notify->event; nvkm_notify_func() local
77 spin_lock_irqsave(&event->refs_lock, flags); nvkm_notify_func()
79 spin_unlock_irqrestore(&event->refs_lock, flags); nvkm_notify_func()
93 struct nvkm_event *event = notify->event; nvkm_notify_send() local
96 assert_spin_locked(&event->list_lock); nvkm_notify_send()
99 spin_lock_irqsave(&event->refs_lock, flags); nvkm_notify_send()
101 spin_unlock_irqrestore(&event->refs_lock, flags); nvkm_notify_send()
105 spin_unlock_irqrestore(&event->refs_lock, flags); nvkm_notify_send()
121 if (notify->event) { nvkm_notify_fini()
123 spin_lock_irqsave(&notify->event->list_lock, flags); nvkm_notify_fini()
125 spin_unlock_irqrestore(&notify->event->list_lock, flags); nvkm_notify_fini()
127 notify->event = NULL; nvkm_notify_fini()
132 nvkm_notify_init(struct nvkm_object *object, struct nvkm_event *event, nvkm_notify_init() argument
139 if ((notify->event = event), event->refs) { nvkm_notify_init()
140 ret = event->func->ctor(object, data, size, notify); nvkm_notify_init()
155 spin_lock_irqsave(&event->list_lock, flags); nvkm_notify_init()
156 list_add_tail(&notify->head, &event->list); nvkm_notify_init()
157 spin_unlock_irqrestore(&event->list_lock, flags); nvkm_notify_init()
161 notify->event = NULL; nvkm_notify_init()
/linux-4.1.27/drivers/misc/ibmasm/
H A DMakefile6 event.o \
H A Devent.c31 * ASM service processor event handling routines.
52 * Store the event in the circular event buffer, wake up any sleeping
53 * event readers.
60 struct ibmasm_event *event; ibmasm_receive_event() local
66 /* copy the event into the next slot in the circular buffer */ ibmasm_receive_event()
67 event = &buffer->events[buffer->next_index]; ibmasm_receive_event()
68 memcpy_fromio(event->data, data, data_size); ibmasm_receive_event()
69 event->data_size = data_size; ibmasm_receive_event()
70 event->serial_number = buffer->next_serial_number; ibmasm_receive_event()
87 * Called by event readers (initiated from user space through the file
89 * Sleeps until a new event is available.
94 struct ibmasm_event *event; ibmasm_get_next_event() local
110 event = &buffer->events[index]; ibmasm_get_next_event()
111 while (event->serial_number < reader->next_serial_number) { ibmasm_get_next_event()
113 event = &buffer->events[index]; ibmasm_get_next_event()
115 memcpy(reader->data, event->data, event->data_size); ibmasm_get_next_event()
116 reader->data_size = event->data_size; ibmasm_get_next_event()
117 reader->next_serial_number = event->serial_number + 1; ibmasm_get_next_event()
121 return event->data_size; ibmasm_get_next_event()
153 struct ibmasm_event *event; ibmasm_event_buffer_init() local
163 event = buffer->events; ibmasm_event_buffer_init()
164 for (i=0; i<IBMASM_NUM_EVENTS; i++, event++) ibmasm_event_buffer_init()
165 event->serial_number = 0; ibmasm_event_buffer_init()
/linux-4.1.27/drivers/net/wireless/ti/wl12xx/
H A DMakefile1 wl12xx-objs = main.o cmd.o acx.o debugfs.o scan.o event.o
/linux-4.1.27/drivers/net/wireless/ti/wl18xx/
H A DMakefile1 wl18xx-objs = main.o acx.o tx.o io.o debugfs.o scan.o cmd.o event.o
/linux-4.1.27/net/irda/
H A Diriap_event.c34 static void state_s_disconnect (struct iriap_cb *self, IRIAP_EVENT event,
36 static void state_s_connecting (struct iriap_cb *self, IRIAP_EVENT event,
38 static void state_s_call (struct iriap_cb *self, IRIAP_EVENT event,
41 static void state_s_make_call (struct iriap_cb *self, IRIAP_EVENT event,
43 static void state_s_calling (struct iriap_cb *self, IRIAP_EVENT event,
45 static void state_s_outstanding (struct iriap_cb *self, IRIAP_EVENT event,
47 static void state_s_replying (struct iriap_cb *self, IRIAP_EVENT event,
49 static void state_s_wait_for_call(struct iriap_cb *self, IRIAP_EVENT event,
51 static void state_s_wait_active (struct iriap_cb *self, IRIAP_EVENT event,
54 static void state_r_disconnect (struct iriap_cb *self, IRIAP_EVENT event,
56 static void state_r_call (struct iriap_cb *self, IRIAP_EVENT event,
58 static void state_r_waiting (struct iriap_cb *self, IRIAP_EVENT event,
60 static void state_r_wait_active (struct iriap_cb *self, IRIAP_EVENT event,
62 static void state_r_receiving (struct iriap_cb *self, IRIAP_EVENT event,
64 static void state_r_execute (struct iriap_cb *self, IRIAP_EVENT event,
66 static void state_r_returning (struct iriap_cb *self, IRIAP_EVENT event,
69 static void (*iriap_state[])(struct iriap_cb *self, IRIAP_EVENT event,
128 void iriap_do_client_event(struct iriap_cb *self, IRIAP_EVENT event, iriap_do_client_event() argument
134 (*iriap_state[ self->client_state]) (self, event, skb); iriap_do_client_event()
137 void iriap_do_call_event(struct iriap_cb *self, IRIAP_EVENT event, iriap_do_call_event() argument
143 (*iriap_state[ self->call_state]) (self, event, skb); iriap_do_call_event()
146 void iriap_do_server_event(struct iriap_cb *self, IRIAP_EVENT event, iriap_do_server_event() argument
152 (*iriap_state[ self->server_state]) (self, event, skb); iriap_do_server_event()
155 void iriap_do_r_connect_event(struct iriap_cb *self, IRIAP_EVENT event, iriap_do_r_connect_event() argument
161 (*iriap_state[ self->r_connect_state]) (self, event, skb); iriap_do_r_connect_event()
166 * Function state_s_disconnect (event, skb)
171 static void state_s_disconnect(struct iriap_cb *self, IRIAP_EVENT event, state_s_disconnect() argument
177 switch (event) { state_s_disconnect()
190 pr_debug("%s(), Unknown event %d\n", __func__, event); state_s_disconnect()
196 * Function state_s_connecting (self, event, skb)
201 static void state_s_connecting(struct iriap_cb *self, IRIAP_EVENT event, state_s_connecting() argument
207 switch (event) { state_s_connecting()
222 pr_debug("%s(), Unknown event %d\n", __func__, event); state_s_connecting()
228 * Function state_s_call (self, event, skb)
232 * catches that event and clears up
234 static void state_s_call(struct iriap_cb *self, IRIAP_EVENT event, state_s_call() argument
239 switch (event) { state_s_call()
246 pr_debug("state_s_call: Unknown event %d\n", event); state_s_call()
252 * Function state_s_make_call (event, skb)
257 static void state_s_make_call(struct iriap_cb *self, IRIAP_EVENT event, state_s_make_call() argument
264 switch (event) { state_s_make_call()
274 pr_debug("%s(), Unknown event %d\n", __func__, event); state_s_make_call()
280 * Function state_s_calling (event, skb)
285 static void state_s_calling(struct iriap_cb *self, IRIAP_EVENT event, state_s_calling() argument
292 * Function state_s_outstanding (event, skb)
297 static void state_s_outstanding(struct iriap_cb *self, IRIAP_EVENT event, state_s_outstanding() argument
302 switch (event) { state_s_outstanding()
310 pr_debug("%s(), Unknown event %d\n", __func__, event); state_s_outstanding()
316 * Function state_s_replying (event, skb)
320 static void state_s_replying(struct iriap_cb *self, IRIAP_EVENT event, state_s_replying() argument
327 * Function state_s_wait_for_call (event, skb)
332 static void state_s_wait_for_call(struct iriap_cb *self, IRIAP_EVENT event, state_s_wait_for_call() argument
340 * Function state_s_wait_active (event, skb)
345 static void state_s_wait_active(struct iriap_cb *self, IRIAP_EVENT event, state_s_wait_active() argument
358 * Function state_r_disconnect (self, event, skb)
363 static void state_r_disconnect(struct iriap_cb *self, IRIAP_EVENT event, state_r_disconnect() argument
368 switch (event) { state_r_disconnect()
389 pr_debug("%s(), unknown event %d\n", __func__, event); state_r_disconnect()
395 * Function state_r_call (self, event, skb)
397 static void state_r_call(struct iriap_cb *self, IRIAP_EVENT event, state_r_call() argument
400 switch (event) { state_r_call()
407 pr_debug("%s(), unknown event!\n", __func__); state_r_call()
417 * Function state_r_waiting (self, event, skb)
419 static void state_r_waiting(struct iriap_cb *self, IRIAP_EVENT event, state_r_waiting() argument
425 static void state_r_wait_active(struct iriap_cb *self, IRIAP_EVENT event, state_r_wait_active() argument
432 * Function state_r_receiving (self, event, skb)
437 static void state_r_receiving(struct iriap_cb *self, IRIAP_EVENT event, state_r_receiving() argument
440 switch (event) { state_r_receiving()
447 pr_debug("%s(), unknown event!\n", __func__); state_r_receiving()
453 * Function state_r_execute (self, event, skb)
458 static void state_r_execute(struct iriap_cb *self, IRIAP_EVENT event, state_r_execute() argument
465 switch (event) { state_r_execute()
480 pr_debug("%s(), unknown event!\n", __func__); state_r_execute()
485 static void state_r_returning(struct iriap_cb *self, IRIAP_EVENT event, state_r_returning() argument
488 pr_debug("%s(), event=%d\n", __func__, event); state_r_returning()
490 switch (event) { state_r_returning()
H A Dirlap_event.c50 static int irlap_state_ndm (struct irlap_cb *self, IRLAP_EVENT event,
52 static int irlap_state_query (struct irlap_cb *self, IRLAP_EVENT event,
54 static int irlap_state_reply (struct irlap_cb *self, IRLAP_EVENT event,
56 static int irlap_state_conn (struct irlap_cb *self, IRLAP_EVENT event,
58 static int irlap_state_setup (struct irlap_cb *self, IRLAP_EVENT event,
60 static int irlap_state_offline(struct irlap_cb *self, IRLAP_EVENT event,
62 static int irlap_state_xmit_p (struct irlap_cb *self, IRLAP_EVENT event,
64 static int irlap_state_pclose (struct irlap_cb *self, IRLAP_EVENT event,
66 static int irlap_state_nrm_p (struct irlap_cb *self, IRLAP_EVENT event,
68 static int irlap_state_reset_wait(struct irlap_cb *self, IRLAP_EVENT event,
70 static int irlap_state_reset (struct irlap_cb *self, IRLAP_EVENT event,
72 static int irlap_state_nrm_s (struct irlap_cb *self, IRLAP_EVENT event,
74 static int irlap_state_xmit_s (struct irlap_cb *self, IRLAP_EVENT event,
76 static int irlap_state_sclose (struct irlap_cb *self, IRLAP_EVENT event,
78 static int irlap_state_reset_check(struct irlap_cb *, IRLAP_EVENT event,
140 static int (*state[])(struct irlap_cb *self, IRLAP_EVENT event,
230 * Function irlap_do_event (event, skb, info)
235 void irlap_do_event(struct irlap_cb *self, IRLAP_EVENT event, irlap_do_event() argument
243 pr_debug("%s(), event = %s, state = %s\n", __func__, irlap_do_event()
244 irlap_event[event], irlap_state[self->state]); irlap_do_event()
246 ret = (*state[self->state])(self, event, skb, info); irlap_do_event()
319 * Function irlap_state_ndm (event, skb, frame)
324 static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event, irlap_state_ndm() argument
333 switch (event) { irlap_state_ndm()
340 * postpone the event... - Jean II */ irlap_state_ndm()
437 * log (and post an event). irlap_state_ndm()
456 * This event is not mentioned in the state machines in the irlap_state_ndm()
463 /* We don't send the frame, just post an event. irlap_state_ndm()
542 pr_debug("%s(), Unknown event %s\n", __func__, irlap_state_ndm()
543 irlap_event[event]); irlap_state_ndm()
552 * Function irlap_state_query (event, skb, info)
557 static int irlap_state_query(struct irlap_cb *self, IRLAP_EVENT event, irlap_state_query() argument
565 switch (event) { irlap_state_query()
594 * IrLMP who will put it in the log (and post an event). irlap_state_query()
652 pr_debug("%s(), Unknown event %s\n", __func__, irlap_state_query()
653 irlap_event[event]); irlap_state_query()
662 * Function irlap_state_reply (self, event, skb, info)
668 static int irlap_state_reply(struct irlap_cb *self, IRLAP_EVENT event, irlap_state_reply() argument
677 switch (event) { irlap_state_reply()
718 pr_debug("%s(), Unknown event %d, %s\n", __func__, irlap_state_reply()
719 event, irlap_event[event]); irlap_state_reply()
728 * Function irlap_state_conn (event, skb, info)
734 static int irlap_state_conn(struct irlap_cb *self, IRLAP_EVENT event, irlap_state_conn() argument
739 pr_debug("%s(), event=%s\n", __func__, irlap_event[event]); irlap_state_conn()
744 switch (event) { irlap_state_conn()
799 pr_debug("%s(), event RECV_DISCOVER_XID_CMD!\n", irlap_state_conn()
811 pr_debug("%s(), Unknown event %d, %s\n", __func__, irlap_state_conn()
812 event, irlap_event[event]); irlap_state_conn()
822 * Function irlap_state_setup (event, skb, frame)
828 static int irlap_state_setup(struct irlap_cb *self, IRLAP_EVENT event, irlap_state_setup() argument
836 switch (event) { irlap_state_setup()
947 pr_debug("%s(), Unknown event %d, %s\n", __func__, irlap_state_setup()
948 event, irlap_event[event]); irlap_state_setup()
957 * Function irlap_state_offline (self, event, skb, info)
962 static int irlap_state_offline(struct irlap_cb *self, IRLAP_EVENT event, irlap_state_offline() argument
965 pr_debug("%s(), Unknown event\n", __func__); irlap_state_offline()
971 * Function irlap_state_xmit_p (self, event, skb, info)
978 static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event, irlap_state_xmit_p() argument
983 switch (event) { irlap_state_xmit_p()
1118 pr_debug("%s(), Unknown event %s\n", irlap_state_xmit_p()
1119 __func__, irlap_event[event]); irlap_state_xmit_p()
1128 * Function irlap_state_pclose (event, skb, info)
1132 static int irlap_state_pclose(struct irlap_cb *self, IRLAP_EVENT event, irlap_state_pclose() argument
1140 switch (event) { irlap_state_pclose()
1170 pr_debug("%s(), Unknown event %d\n", __func__, event); irlap_state_pclose()
1179 * Function irlap_state_nrm_p (self, event, skb, info)
1187 static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event, irlap_state_nrm_p() argument
1194 switch (event) { irlap_state_nrm_p()
1404 pr_debug("%s(), event=%s, ns_status=%d, nr_status=%d\n", irlap_state_nrm_p()
1405 __func__, irlap_event[event], ns_status, nr_status); irlap_state_nrm_p()
1537 /* Early warning event. I'm using a pretty liberal irlap_state_nrm_p()
1538 * interpretation of the spec and generate an event irlap_state_nrm_p()
1583 pr_debug("%s(), Unknown event %s\n", irlap_state_nrm_p()
1584 __func__, irlap_event[event]); irlap_state_nrm_p()
1593 * Function irlap_state_reset_wait (event, skb, info)
1599 static int irlap_state_reset_wait(struct irlap_cb *self, IRLAP_EVENT event, irlap_state_reset_wait() argument
1604 pr_debug("%s(), event = %s\n", __func__, irlap_event[event]); irlap_state_reset_wait()
1609 switch (event) { irlap_state_reset_wait()
1630 pr_debug("%s(), Unknown event %s\n", __func__, irlap_state_reset_wait()
1631 irlap_event[event]); irlap_state_reset_wait()
1640 * Function irlap_state_reset (self, event, skb, info)
1646 static int irlap_state_reset(struct irlap_cb *self, IRLAP_EVENT event, irlap_state_reset() argument
1651 pr_debug("%s(), event = %s\n", __func__, irlap_event[event]); irlap_state_reset()
1656 switch (event) { irlap_state_reset()
1722 pr_debug("%s(), Unknown event %s\n", irlap_state_reset()
1723 __func__, irlap_event[event]); irlap_state_reset()
1732 * Function irlap_state_xmit_s (event, skb, info)
1738 static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event, irlap_state_xmit_s() argument
1743 pr_debug("%s(), event=%s\n", __func__, irlap_event[event]); irlap_state_xmit_s()
1748 switch (event) { irlap_state_xmit_s()
1842 pr_debug("%s(), Unknown event %s\n", __func__, irlap_state_xmit_s()
1843 irlap_event[event]); irlap_state_xmit_s()
1852 * Function irlap_state_nrm_s (event, skb, info)
1858 static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event, irlap_state_nrm_s() argument
1865 pr_debug("%s(), event=%s\n", __func__, irlap_event[event]); irlap_state_nrm_s()
1870 switch (event) { irlap_state_nrm_s()
1873 pr_debug("%s(), event=%s nr=%d, vs=%d, ns=%d, vr=%d, pf=%d\n", irlap_state_nrm_s()
1874 __func__, irlap_event[event], info->nr, irlap_state_nrm_s()
2203 pr_debug("%s(), Unknown event %d, (%s)\n", __func__, irlap_state_nrm_s()
2204 event, irlap_event[event]); irlap_state_nrm_s()
2213 * Function irlap_state_sclose (self, event, skb, info)
2215 static int irlap_state_sclose(struct irlap_cb *self, IRLAP_EVENT event, irlap_state_sclose() argument
2221 switch (event) { irlap_state_sclose()
2273 pr_debug("%s(), Unknown event %d, (%s)\n", __func__, irlap_state_sclose()
2274 event, irlap_event[event]); irlap_state_sclose()
2282 static int irlap_state_reset_check( struct irlap_cb *self, IRLAP_EVENT event, irlap_state_reset_check() argument
2288 pr_debug("%s(), event=%s\n", __func__, irlap_event[event]); irlap_state_reset_check()
2293 switch (event) { irlap_state_reset_check()
2309 pr_debug("%s(), Unknown event %d, (%s)\n", __func__, irlap_state_reset_check()
2310 event, irlap_event[event]); irlap_state_reset_check()
/linux-4.1.27/arch/powerpc/perf/
H A Dcore-fsl-emb.c2 * Performance event support - Freescale Embedded Performance Monitor
27 struct perf_event *event[MAX_HWEVENTS]; member in struct:cpu_hw_events
179 static void fsl_emb_pmu_read(struct perf_event *event) fsl_emb_pmu_read() argument
183 if (event->hw.state & PERF_HES_STOPPED) fsl_emb_pmu_read()
192 prev = local64_read(&event->hw.prev_count); fsl_emb_pmu_read()
194 val = read_pmc(event->hw.idx); fsl_emb_pmu_read()
195 } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); fsl_emb_pmu_read()
199 local64_add(delta, &event->count); fsl_emb_pmu_read()
200 local64_sub(delta, &event->hw.period_left); fsl_emb_pmu_read()
272 struct perf_event *event; collect_events() local
280 list_for_each_entry(event, &group->sibling_list, group_entry) { collect_events()
281 if (!is_software_event(event) && collect_events()
282 event->state != PERF_EVENT_STATE_OFF) { collect_events()
285 ctrs[n] = event; collect_events()
293 static int fsl_emb_pmu_add(struct perf_event *event, int flags) fsl_emb_pmu_add() argument
301 perf_pmu_disable(event->pmu); fsl_emb_pmu_add()
304 if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) fsl_emb_pmu_add()
312 if (cpuhw->event[i]) fsl_emb_pmu_add()
321 event->hw.idx = i; fsl_emb_pmu_add()
322 cpuhw->event[i] = event; fsl_emb_pmu_add()
326 if (event->hw.sample_period) { fsl_emb_pmu_add()
327 s64 left = local64_read(&event->hw.period_left); fsl_emb_pmu_add()
331 local64_set(&event->hw.prev_count, val); fsl_emb_pmu_add()
334 event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE; fsl_emb_pmu_add()
337 event->hw.state &= ~(PERF_HES_STOPPED | PERF_HES_UPTODATE); fsl_emb_pmu_add()
341 perf_event_update_userpage(event); fsl_emb_pmu_add()
343 write_pmlcb(i, event->hw.config >> 32); fsl_emb_pmu_add()
344 write_pmlca(i, event->hw.config_base); fsl_emb_pmu_add()
349 perf_pmu_enable(event->pmu); fsl_emb_pmu_add()
354 static void fsl_emb_pmu_del(struct perf_event *event, int flags) fsl_emb_pmu_del() argument
357 int i = event->hw.idx; fsl_emb_pmu_del()
359 perf_pmu_disable(event->pmu); fsl_emb_pmu_del()
363 fsl_emb_pmu_read(event); fsl_emb_pmu_del()
367 WARN_ON(event != cpuhw->event[event->hw.idx]); fsl_emb_pmu_del()
373 cpuhw->event[i] = NULL; fsl_emb_pmu_del()
374 event->hw.idx = -1; fsl_emb_pmu_del()
377 * TODO: if at least one restricted event exists, and we fsl_emb_pmu_del()
380 * a non-restricted event, migrate that event to the fsl_emb_pmu_del()
387 perf_pmu_enable(event->pmu); fsl_emb_pmu_del()
391 static void fsl_emb_pmu_start(struct perf_event *event, int ef_flags) fsl_emb_pmu_start() argument
397 if (event->hw.idx < 0 || !event->hw.sample_period) fsl_emb_pmu_start()
400 if (!(event->hw.state & PERF_HES_STOPPED)) fsl_emb_pmu_start()
404 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); fsl_emb_pmu_start()
407 perf_pmu_disable(event->pmu); fsl_emb_pmu_start()
409 event->hw.state = 0; fsl_emb_pmu_start()
410 left = local64_read(&event->hw.period_left); fsl_emb_pmu_start()
414 write_pmc(event->hw.idx, val); fsl_emb_pmu_start()
416 perf_event_update_userpage(event); fsl_emb_pmu_start()
417 perf_pmu_enable(event->pmu); fsl_emb_pmu_start()
421 static void fsl_emb_pmu_stop(struct perf_event *event, int ef_flags) fsl_emb_pmu_stop() argument
425 if (event->hw.idx < 0 || !event->hw.sample_period) fsl_emb_pmu_stop()
428 if (event->hw.state & PERF_HES_STOPPED) fsl_emb_pmu_stop()
432 perf_pmu_disable(event->pmu); fsl_emb_pmu_stop()
434 fsl_emb_pmu_read(event); fsl_emb_pmu_stop()
435 event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; fsl_emb_pmu_stop()
436 write_pmc(event->hw.idx, 0); fsl_emb_pmu_stop()
438 perf_event_update_userpage(event); fsl_emb_pmu_stop()
439 perf_pmu_enable(event->pmu); fsl_emb_pmu_stop()
446 static void hw_perf_event_destroy(struct perf_event *event) hw_perf_event_destroy() argument
486 static int fsl_emb_pmu_event_init(struct perf_event *event) fsl_emb_pmu_event_init() argument
501 switch (event->attr.type) { fsl_emb_pmu_event_init()
503 ev = event->attr.config; fsl_emb_pmu_event_init()
510 err = hw_perf_cache_event(event->attr.config, &ev); fsl_emb_pmu_event_init()
516 ev = event->attr.config; fsl_emb_pmu_event_init()
523 event->hw.config = ppmu->xlate_event(ev); fsl_emb_pmu_event_init()
524 if (!(event->hw.config & FSL_EMB_EVENT_VALID)) fsl_emb_pmu_event_init()
529 * other hardware events in the group. We assume the event fsl_emb_pmu_event_init()
533 if (event->group_leader != event) { fsl_emb_pmu_event_init()
534 n = collect_events(event->group_leader, fsl_emb_pmu_event_init()
540 if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) { fsl_emb_pmu_event_init()
551 event->hw.idx = -1; fsl_emb_pmu_event_init()
553 event->hw.config_base = PMLCA_CE | PMLCA_FCM1 | fsl_emb_pmu_event_init()
556 if (event->attr.exclude_user) fsl_emb_pmu_event_init()
557 event->hw.config_base |= PMLCA_FCU; fsl_emb_pmu_event_init()
558 if (event->attr.exclude_kernel) fsl_emb_pmu_event_init()
559 event->hw.config_base |= PMLCA_FCS; fsl_emb_pmu_event_init()
560 if (event->attr.exclude_idle) fsl_emb_pmu_event_init()
563 event->hw.last_period = event->hw.sample_period; fsl_emb_pmu_event_init()
564 local64_set(&event->hw.period_left, event->hw.last_period); fsl_emb_pmu_event_init()
585 event->destroy = hw_perf_event_destroy; fsl_emb_pmu_event_init()
606 static void record_and_restart(struct perf_event *event, unsigned long val, record_and_restart() argument
609 u64 period = event->hw.sample_period; record_and_restart()
613 if (event->hw.state & PERF_HES_STOPPED) { record_and_restart()
614 write_pmc(event->hw.idx, 0); record_and_restart()
619 prev = local64_read(&event->hw.prev_count); record_and_restart()
621 local64_add(delta, &event->count); record_and_restart()
624 * See if the total period for this event has expired, record_and_restart()
628 left = local64_read(&event->hw.period_left) - delta; record_and_restart()
635 event->hw.last_period = event->hw.sample_period; record_and_restart()
641 write_pmc(event->hw.idx, val); record_and_restart()
642 local64_set(&event->hw.prev_count, val); record_and_restart()
643 local64_set(&event->hw.period_left, left); record_and_restart()
644 perf_event_update_userpage(event); record_and_restart()
652 perf_sample_data_init(&data, 0, event->hw.last_period); record_and_restart()
654 if (perf_event_overflow(event, &data, regs)) record_and_restart()
655 fsl_emb_pmu_stop(event, 0); record_and_restart()
663 struct perf_event *event; perf_event_interrupt() local
675 event = cpuhw->event[i]; perf_event_interrupt()
679 if (event) { perf_event_interrupt()
680 /* event has overflowed */ perf_event_interrupt()
682 record_and_restart(event, val, regs); perf_event_interrupt()
H A Dhv-gpci.c38 * used depends on the event. See REQUEST_IDX_KIND in hv-gpci-requests.h
161 * we verify offset and length are within the zeroed buffer at event single_gpci_request()
172 static u64 h_gpci_get_value(struct perf_event *event) h_gpci_get_value() argument
175 unsigned long ret = single_gpci_request(event_get_request(event), h_gpci_get_value()
176 event_get_starting_index(event), h_gpci_get_value()
177 event_get_secondary_index(event), h_gpci_get_value()
178 event_get_counter_info_version(event), h_gpci_get_value()
179 event_get_offset(event), h_gpci_get_value()
180 event_get_length(event), h_gpci_get_value()
187 static void h_gpci_event_update(struct perf_event *event) h_gpci_event_update() argument
190 u64 now = h_gpci_get_value(event); h_gpci_event_update()
191 prev = local64_xchg(&event->hw.prev_count, now); h_gpci_event_update()
192 local64_add(now - prev, &event->count); h_gpci_event_update()
195 static void h_gpci_event_start(struct perf_event *event, int flags) h_gpci_event_start() argument
197 local64_set(&event->hw.prev_count, h_gpci_get_value(event)); h_gpci_event_start()
200 static void h_gpci_event_stop(struct perf_event *event, int flags) h_gpci_event_stop() argument
202 h_gpci_event_update(event); h_gpci_event_stop()
205 static int h_gpci_event_add(struct perf_event *event, int flags) h_gpci_event_add() argument
208 h_gpci_event_start(event, flags); h_gpci_event_add()
213 static int h_gpci_event_init(struct perf_event *event) h_gpci_event_init() argument
218 /* Not our event */ h_gpci_event_init()
219 if (event->attr.type != event->pmu->type) h_gpci_event_init()
223 if (event->attr.config2) { h_gpci_event_init()
229 if (event->attr.exclude_user || h_gpci_event_init()
230 event->attr.exclude_kernel || h_gpci_event_init()
231 event->attr.exclude_hv || h_gpci_event_init()
232 event->attr.exclude_idle || h_gpci_event_init()
233 event->attr.exclude_host || h_gpci_event_init()
234 event->attr.exclude_guest) h_gpci_event_init()
238 if (has_branch_stack(event)) h_gpci_event_init()
241 length = event_get_length(event); h_gpci_event_init()
248 if ((event_get_offset(event) + length) > GPCI_MAX_DATA_BYTES) { h_gpci_event_init()
250 (size_t)event_get_offset(event) + length, h_gpci_event_init()
256 if (single_gpci_request(event_get_request(event), h_gpci_event_init()
257 event_get_starting_index(event), h_gpci_event_init()
258 event_get_secondary_index(event), h_gpci_event_init()
259 event_get_counter_info_version(event), h_gpci_event_init()
260 event_get_offset(event), h_gpci_event_init()
H A Dpower5+-pmu.c18 * Bits in event code for POWER5+ (POWER5 GS) and POWER5++ (POWER5 GS DD3)
25 #define PM_BYTE_SH 12 /* Byte number of event bus to use */
29 #define PM_BUSEVENT_MSK 0x80 /* Set if event uses event bus */
106 * 24-27: Byte 0 event source 0x0f00_0000
107 * Encoding as for the event code
110 * 20-23, 16-19, 12-15: Byte 1, 2, 3 event sources
136 static int power5p_get_constraint(u64 event, unsigned long *maskp, power5p_get_constraint() argument
143 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; power5p_get_constraint()
150 if (pmc >= 5 && !(event == 0x500009 || event == 0x600005)) power5p_get_constraint()
153 if (event & PM_BUSEVENT_MSK) { power5p_get_constraint()
154 unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; power5p_get_constraint()
161 byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; power5p_get_constraint()
170 bit = event & 7; power5p_get_constraint()
174 value |= (unsigned long)((event >> PM_GRS_SH) & fmask) power5p_get_constraint()
191 static int power5p_limited_pmc_event(u64 event) power5p_limited_pmc_event() argument
193 int pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; power5p_limited_pmc_event()
198 #define MAX_ALT 3 /* at most 3 alternatives for any event */
218 static int find_alternative(unsigned int event) find_alternative() argument
223 if (event < event_alternatives[i][0]) find_alternative()
226 if (event == event_alternatives[i][j]) find_alternative()
240 * Some direct events for decodes of event bus byte 3 have alternative
242 * event code for those that do, or -1 otherwise. This also handles
245 static s64 find_alternative_bdecode(u64 event) find_alternative_bdecode() argument
249 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; find_alternative_bdecode()
253 pp = event & PM_PMCSEL_MSK; find_alternative_bdecode()
256 return (event & ~(PM_PMC_MSKS | PM_PMCSEL_MSK)) | find_alternative_bdecode()
264 return event + (2 << PM_PMC_SH) + (0x2e - 0x0d); find_alternative_bdecode()
266 return event - (2 << PM_PMC_SH) - (0x2e - 0x0d); find_alternative_bdecode()
268 /* alternative add event encodings */ find_alternative_bdecode()
270 return ((event ^ (0x10 ^ 0x28)) & ~PM_PMC_MSKS) | find_alternative_bdecode()
276 static int power5p_get_alternatives(u64 event, unsigned int flags, u64 alt[]) power5p_get_alternatives() argument
282 alt[0] = event; power5p_get_alternatives()
284 nlim = power5p_limited_pmc_event(event); power5p_get_alternatives()
285 i = find_alternative(event); power5p_get_alternatives()
289 if (ae && ae != event) power5p_get_alternatives()
294 ae = find_alternative_bdecode(event); power5p_get_alternatives()
308 * we never end up with more than 3 alternatives for any event. power5p_get_alternatives()
360 * Indexed by PMCSEL value, bit i (LE) set if PMC i is a marked event.
402 * Returns 1 if event counts things relating to marked instructions
405 static int power5p_marked_instr_event(u64 event) power5p_marked_instr_event() argument
411 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; power5p_marked_instr_event()
412 psel = event & PM_PMCSEL_MSK; power5p_marked_instr_event()
436 if (!(event & PM_BUSEVENT_MSK) || bit == -1) power5p_marked_instr_event()
439 byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; power5p_marked_instr_event()
440 unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; power5p_marked_instr_event()
454 static int power5p_compute_mmcr(u64 event[], int n_ev, power5p_compute_mmcr() argument
474 pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; power5p_compute_mmcr()
482 if (event[i] & PM_BUSEVENT_MSK) { power5p_compute_mmcr()
483 unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; power5p_compute_mmcr()
484 byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; power5p_compute_mmcr()
552 pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; power5p_compute_mmcr()
553 unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; power5p_compute_mmcr()
554 byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; power5p_compute_mmcr()
555 psel = event[i] & PM_PMCSEL_MSK; power5p_compute_mmcr()
556 isbus = event[i] & PM_BUSEVENT_MSK; power5p_compute_mmcr()
558 /* Bus event or any-PMC direct event */ power5p_compute_mmcr()
567 /* Direct event */ power5p_compute_mmcr()
579 grsel = (event[i] >> PM_GRS_SH) & PM_GRS_MSK; power5p_compute_mmcr()
582 if (power5p_marked_instr_event(event[i])) power5p_compute_mmcr()
623 * are event codes.
H A Dcore-book3s.c2 * Performance event support - powerpc architecture code
36 struct perf_event *event[MAX_HWEVENTS]; member in struct:cpu_hw_events
116 static bool is_ebb_event(struct perf_event *event) { return false; } ebb_event_check() argument
117 static int ebb_event_check(struct perf_event *event) { return 0; } ebb_event_add() argument
118 static void ebb_event_add(struct perf_event *event) { } ebb_switch_out() argument
125 static inline void power_pmu_bhrb_enable(struct perf_event *event) {} power_pmu_bhrb_disable() argument
126 static inline void power_pmu_bhrb_disable(struct perf_event *event) {} power_pmu_sched_task() argument
280 * If this isn't a PMU exception (eg a software event) the SIAR is perf_read_regs()
283 * If it is a marked event use the SIAR. perf_read_regs()
349 static void power_pmu_bhrb_enable(struct perf_event *event) power_pmu_bhrb_enable() argument
357 if (event->ctx->task && cpuhw->bhrb_context != event->ctx) { power_pmu_bhrb_enable()
359 cpuhw->bhrb_context = event->ctx; power_pmu_bhrb_enable()
362 perf_sched_cb_inc(event->ctx->pmu); power_pmu_bhrb_enable()
365 static void power_pmu_bhrb_disable(struct perf_event *event) power_pmu_bhrb_disable() argument
374 perf_sched_cb_dec(event->ctx->pmu); power_pmu_bhrb_disable()
500 static bool is_ebb_event(struct perf_event *event) is_ebb_event() argument
505 * use bit 63 of the event code for something else if they wish. is_ebb_event()
508 ((event->attr.config >> PERF_EVENT_CONFIG_EBB_SHIFT) & 1); is_ebb_event()
511 static int ebb_event_check(struct perf_event *event) ebb_event_check() argument
513 struct perf_event *leader = event->group_leader; ebb_event_check()
516 if (is_ebb_event(leader) != is_ebb_event(event)) ebb_event_check()
519 if (is_ebb_event(event)) { ebb_event_check()
520 if (!(event->attach_state & PERF_ATTACH_TASK)) ebb_event_check()
526 if (event->attr.freq || ebb_event_check()
527 event->attr.inherit || ebb_event_check()
528 event->attr.sample_type || ebb_event_check()
529 event->attr.sample_period || ebb_event_check()
530 event->attr.enable_on_exec) ebb_event_check()
537 static void ebb_event_add(struct perf_event *event) ebb_event_add() argument
539 if (!is_ebb_event(event) || current->thread.used_ebb) ebb_event_add()
543 * IFF this is the first time we've added an EBB event, set ebb_event_add()
945 struct perf_event *event; check_excludes() local
948 * If the PMU we're on supports per event exclude settings then we check_excludes()
950 * per event exclude and limited PMCs. check_excludes()
965 event = ctrs[i]; check_excludes()
967 eu = event->attr.exclude_user; check_excludes()
968 ek = event->attr.exclude_kernel; check_excludes()
969 eh = event->attr.exclude_hv; check_excludes()
971 } else if (event->attr.exclude_user != eu || check_excludes()
972 event->attr.exclude_kernel != ek || check_excludes()
973 event->attr.exclude_hv != eh) { check_excludes()
1005 static void power_pmu_read(struct perf_event *event) power_pmu_read() argument
1009 if (event->hw.state & PERF_HES_STOPPED) power_pmu_read()
1012 if (!event->hw.idx) power_pmu_read()
1015 if (is_ebb_event(event)) { power_pmu_read()
1016 val = read_pmc(event->hw.idx); power_pmu_read()
1017 local64_set(&event->hw.prev_count, val); power_pmu_read()
1027 prev = local64_read(&event->hw.prev_count); power_pmu_read()
1029 val = read_pmc(event->hw.idx); power_pmu_read()
1033 } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); power_pmu_read()
1035 local64_add(delta, &event->count); power_pmu_read()
1047 prev = local64_read(&event->hw.period_left); power_pmu_read()
1051 } while (local64_cmpxchg(&event->hw.period_left, prev, val) != prev); power_pmu_read()
1057 * us if `event' is using such a PMC.
1068 struct perf_event *event; freeze_limited_counters() local
1073 event = cpuhw->limited_counter[i]; freeze_limited_counters()
1074 if (!event->hw.idx) freeze_limited_counters()
1076 val = (event->hw.idx == 5) ? pmc5 : pmc6; freeze_limited_counters()
1077 prev = local64_read(&event->hw.prev_count); freeze_limited_counters()
1078 event->hw.idx = 0; freeze_limited_counters()
1081 local64_add(delta, &event->count); freeze_limited_counters()
1088 struct perf_event *event; thaw_limited_counters() local
1093 event = cpuhw->limited_counter[i]; thaw_limited_counters()
1094 event->hw.idx = cpuhw->limited_hwidx[i]; thaw_limited_counters()
1095 val = (event->hw.idx == 5) ? pmc5 : pmc6; thaw_limited_counters()
1096 prev = local64_read(&event->hw.prev_count); thaw_limited_counters()
1098 local64_set(&event->hw.prev_count, val); thaw_limited_counters()
1099 perf_event_update_userpage(event); thaw_limited_counters()
1127 * events, we first write MMCR0 with the event overflow write_mmcr0()
1142 * Write the full MMCR0 including the event overflow interrupt write_mmcr0()
1213 struct perf_event *event; power_pmu_enable() local
1241 * flag set, or not set, so we can just check a single event. Also we power_pmu_enable()
1242 * know we have at least one event. power_pmu_enable()
1244 ebb = is_ebb_event(cpuhw->event[0]); power_pmu_enable()
1264 cpuhw->mmcr, cpuhw->event)) { power_pmu_enable()
1273 * bits for the first event. We have already checked that all power_pmu_enable()
1274 * events have the same value for these bits as the first event. power_pmu_enable()
1276 event = cpuhw->event[0]; power_pmu_enable()
1277 if (event->attr.exclude_user) power_pmu_enable()
1279 if (event->attr.exclude_kernel) power_pmu_enable()
1281 if (event->attr.exclude_hv) power_pmu_enable()
1303 event = cpuhw->event[i]; power_pmu_enable()
1304 if (event->hw.idx && event->hw.idx != hwc_index[i] + 1) { power_pmu_enable()
1305 power_pmu_read(event); power_pmu_enable()
1306 write_pmc(event->hw.idx, 0); power_pmu_enable()
1307 event->hw.idx = 0; power_pmu_enable()
1316 event = cpuhw->event[i]; power_pmu_enable()
1317 if (event->hw.idx) power_pmu_enable()
1321 cpuhw->limited_counter[n_lim] = event; power_pmu_enable()
1328 val = local64_read(&event->hw.prev_count); power_pmu_enable()
1331 if (event->hw.sample_period) { power_pmu_enable()
1332 left = local64_read(&event->hw.period_left); power_pmu_enable()
1336 local64_set(&event->hw.prev_count, val); power_pmu_enable()
1339 event->hw.idx = idx; power_pmu_enable()
1340 if (event->hw.state & PERF_HES_STOPPED) power_pmu_enable()
1344 perf_event_update_userpage(event); power_pmu_enable()
1378 struct perf_event *event; collect_events() local
1387 list_for_each_entry(event, &group->sibling_list, group_entry) { collect_events()
1388 if (!is_software_event(event) && collect_events()
1389 event->state != PERF_EVENT_STATE_OFF) { collect_events()
1392 ctrs[n] = event; collect_events()
1393 flags[n] = event->hw.event_base; collect_events()
1394 events[n++] = event->hw.config; collect_events()
1401 * Add a event to the PMU.
1406 static int power_pmu_add(struct perf_event *event, int ef_flags) power_pmu_add() argument
1414 perf_pmu_disable(event->pmu); power_pmu_add()
1417 * Add the event to the list (if there is room) power_pmu_add()
1424 cpuhw->event[n0] = event; power_pmu_add()
1425 cpuhw->events[n0] = event->hw.config; power_pmu_add()
1426 cpuhw->flags[n0] = event->hw.event_base; power_pmu_add()
1429 * This event may have been disabled/stopped in record_and_restart() power_pmu_add()
1430 * because we exceeded the ->event_limit. If re-starting the event, power_pmu_add()
1435 event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE; power_pmu_add()
1437 event->hw.state = 0; power_pmu_add()
1447 if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1)) power_pmu_add()
1451 event->hw.config = cpuhw->events[n0]; power_pmu_add()
1454 ebb_event_add(event); power_pmu_add()
1461 if (has_branch_stack(event)) { power_pmu_add()
1462 power_pmu_bhrb_enable(event); power_pmu_add()
1464 event->attr.branch_sample_type); power_pmu_add()
1467 perf_pmu_enable(event->pmu); power_pmu_add()
1473 * Remove a event from the PMU.
1475 static void power_pmu_del(struct perf_event *event, int ef_flags) power_pmu_del() argument
1482 perf_pmu_disable(event->pmu); power_pmu_del()
1484 power_pmu_read(event); power_pmu_del()
1488 if (event == cpuhw->event[i]) { power_pmu_del()
1490 cpuhw->event[i-1] = cpuhw->event[i]; power_pmu_del()
1495 ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr); power_pmu_del()
1496 if (event->hw.idx) { power_pmu_del()
1497 write_pmc(event->hw.idx, 0); power_pmu_del()
1498 event->hw.idx = 0; power_pmu_del()
1500 perf_event_update_userpage(event); power_pmu_del()
1505 if (event == cpuhw->limited_counter[i]) power_pmu_del()
1519 if (has_branch_stack(event)) power_pmu_del()
1520 power_pmu_bhrb_disable(event); power_pmu_del()
1522 perf_pmu_enable(event->pmu); power_pmu_del()
1531 static void power_pmu_start(struct perf_event *event, int ef_flags) power_pmu_start() argument
1537 if (!event->hw.idx || !event->hw.sample_period) power_pmu_start()
1540 if (!(event->hw.state & PERF_HES_STOPPED)) power_pmu_start()
1544 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); power_pmu_start()
1547 perf_pmu_disable(event->pmu); power_pmu_start()
1549 event->hw.state = 0; power_pmu_start()
1550 left = local64_read(&event->hw.period_left); power_pmu_start()
1556 write_pmc(event->hw.idx, val); power_pmu_start()
1558 perf_event_update_userpage(event); power_pmu_start()
1559 perf_pmu_enable(event->pmu); power_pmu_start()
1563 static void power_pmu_stop(struct perf_event *event, int ef_flags) power_pmu_stop() argument
1567 if (!event->hw.idx || !event->hw.sample_period) power_pmu_stop()
1570 if (event->hw.state & PERF_HES_STOPPED) power_pmu_stop()
1574 perf_pmu_disable(event->pmu); power_pmu_stop()
1576 power_pmu_read(event); power_pmu_stop()
1577 event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; power_pmu_stop()
1578 write_pmc(event->hw.idx, 0); power_pmu_stop()
1580 perf_event_update_userpage(event); power_pmu_stop()
1581 perf_pmu_enable(event->pmu); power_pmu_stop()
1626 if (check_excludes(cpuhw->event, cpuhw->flags, 0, n)) power_pmu_commit_txn()
1633 cpuhw->event[i]->hw.config = cpuhw->events[i]; power_pmu_commit_txn()
1641 * Return 1 if we might be able to put event on a limited PMC,
1643 * A event can only go on a limited PMC if it counts something
1647 static int can_go_on_limited_pmc(struct perf_event *event, u64 ev, can_go_on_limited_pmc() argument
1653 if (event->attr.exclude_user can_go_on_limited_pmc()
1654 || event->attr.exclude_kernel can_go_on_limited_pmc()
1655 || event->attr.exclude_hv can_go_on_limited_pmc()
1656 || event->attr.sample_period) can_go_on_limited_pmc()
1700 static void hw_perf_event_destroy(struct perf_event *event) hw_perf_event_destroy() argument
1740 static int power_pmu_event_init(struct perf_event *event) power_pmu_event_init() argument
1754 if (has_branch_stack(event)) { power_pmu_event_init()
1760 switch (event->attr.type) { power_pmu_event_init()
1762 ev = event->attr.config; power_pmu_event_init()
1768 err = hw_perf_cache_event(event->attr.config, &ev); power_pmu_event_init()
1773 ev = event->attr.config; power_pmu_event_init()
1779 event->hw.config_base = ev; power_pmu_event_init()
1780 event->hw.idx = 0; power_pmu_event_init()
1788 event->attr.exclude_hv = 0; power_pmu_event_init()
1791 * If this is a per-task event, then we can use power_pmu_event_init()
1797 if (event->attach_state & PERF_ATTACH_TASK) power_pmu_event_init()
1802 * event_id could go on a limited event. power_pmu_event_init()
1805 if (can_go_on_limited_pmc(event, ev, flags)) { power_pmu_event_init()
1820 err = ebb_event_check(event); power_pmu_event_init()
1826 * other hardware events in the group. We assume the event power_pmu_event_init()
1830 if (event->group_leader != event) { power_pmu_event_init()
1831 n = collect_events(event->group_leader, ppmu->n_counter - 1, power_pmu_event_init()
1837 ctrs[n] = event; power_pmu_event_init()
1845 if (has_branch_stack(event)) { power_pmu_event_init()
1847 event->attr.branch_sample_type); power_pmu_event_init()
1859 event->hw.config = events[n]; power_pmu_event_init()
1860 event->hw.event_base = cflags[n]; power_pmu_event_init()
1861 event->hw.last_period = event->hw.sample_period; power_pmu_event_init()
1862 local64_set(&event->hw.period_left, event->hw.last_period); power_pmu_event_init()
1868 if (is_ebb_event(event)) power_pmu_event_init()
1869 local64_set(&event->hw.prev_count, 0); power_pmu_event_init()
1887 event->destroy = hw_perf_event_destroy; power_pmu_event_init()
1892 static int power_pmu_event_idx(struct perf_event *event) power_pmu_event_idx() argument
1894 return event->hw.idx; power_pmu_event_idx()
1904 return sprintf(page, "event=0x%02llx\n", pmu_attr->id); power_events_sysfs_show()
1928 static void record_and_restart(struct perf_event *event, unsigned long val, record_and_restart() argument
1931 u64 period = event->hw.sample_period; record_and_restart()
1935 if (event->hw.state & PERF_HES_STOPPED) { record_and_restart()
1936 write_pmc(event->hw.idx, 0); record_and_restart()
1941 prev = local64_read(&event->hw.prev_count); record_and_restart()
1943 local64_add(delta, &event->count); record_and_restart()
1946 * See if the total period for this event has expired, record_and_restart()
1950 left = local64_read(&event->hw.period_left) - delta; record_and_restart()
1959 event->hw.last_period = event->hw.sample_period; record_and_restart()
1965 write_pmc(event->hw.idx, val); record_and_restart()
1966 local64_set(&event->hw.prev_count, val); record_and_restart()
1967 local64_set(&event->hw.period_left, left); record_and_restart()
1968 perf_event_update_userpage(event); record_and_restart()
1976 perf_sample_data_init(&data, ~0ULL, event->hw.last_period); record_and_restart()
1978 if (event->attr.sample_type & PERF_SAMPLE_ADDR) record_and_restart()
1981 if (event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK) { record_and_restart()
1988 if (perf_event_overflow(event, &data, regs)) record_and_restart()
1989 power_pmu_stop(event, 0); record_and_restart()
2026 * Events on POWER7 can roll back if a speculative event doesn't pmc_overflow_power7()
2057 struct perf_event *event; perf_event_interrupt() local
2093 event = cpuhw->event[j]; perf_event_interrupt()
2094 if (event->hw.idx == (i + 1)) { perf_event_interrupt()
2096 record_and_restart(event, val[i], regs); perf_event_interrupt()
2107 event = cpuhw->event[i]; perf_event_interrupt()
2108 if (!event->hw.idx || is_limited_pmc(event->hw.idx)) perf_event_interrupt()
2110 if (pmc_overflow_power7(val[event->hw.idx - 1])) { perf_event_interrupt()
2111 /* event has overflowed in a buggy way*/ perf_event_interrupt()
2113 record_and_restart(event, perf_event_interrupt()
2114 val[event->hw.idx - 1], perf_event_interrupt()
H A Dhv-24x7.c194 * Otherwise return the address of the byte just following the event.
275 /* chip event data always yeilds a single event, core yeilds multiple */
278 static char *event_fmt(struct hv_24x7_event_data *event, unsigned domain) event_fmt() argument
294 be16_to_cpu(event->event_counter_offs) + event_fmt()
295 be16_to_cpu(event->event_group_record_offs), event_fmt()
373 struct hv_24x7_event_data *event, event_to_attr()
383 pr_warn("catalog event %u has invalid domain %u\n", event_to_attr()
388 val = event_fmt(event, domain); event_to_attr()
393 ev_name = event_name(event, &event_name_len); event_to_attr()
416 static struct attribute *event_to_desc_attr(struct hv_24x7_event_data *event, event_to_desc_attr() argument
420 char *name = event_name(event, &nl); event_to_desc_attr()
421 char *desc = event_desc(event, &dl); event_to_desc_attr()
431 event_to_long_desc_attr(struct hv_24x7_event_data *event, int nonce) event_to_long_desc_attr() argument
434 char *name = event_name(event, &nl); event_to_long_desc_attr()
435 char *desc = event_long_desc(event, &dl); event_to_long_desc_attr()
445 struct hv_24x7_event_data *event, int nonce) event_data_to_attrs()
449 switch (event->domain) { event_data_to_attrs()
451 *attrs = event_to_attr(ix, event, event->domain, nonce); event_data_to_attrs()
455 attrs[i] = event_to_attr(ix, event, core_domains[i], event_data_to_attrs()
458 pr_warn("catalog event %u: individual attr %u " event_data_to_attrs()
467 pr_warn("catalog event %u: domain %u is not allowed in the " event_data_to_attrs()
468 "catalog\n", ix, event->domain); event_data_to_attrs()
473 static size_t event_to_attr_ct(struct hv_24x7_event_data *event) event_to_attr_ct() argument
475 switch (event->domain) { event_to_attr_ct()
548 pr_info("found a duplicate event %.*s, ct=%u\n", nl, event_uniq_add()
587 * ensure the event structure's sizes are self consistent and don't cause us to
588 * read outside of the event
590 * On success, return the event length in bytes.
593 static ssize_t catalog_event_len_validate(struct hv_24x7_event_data *event, catalog_event_len_validate() argument
606 pr_devel("catalog event data has %zu bytes of padding after last event\n", catalog_event_len_validate()
611 if (!event_fixed_portion_is_within(event, end)) { catalog_event_len_validate()
612 pr_warn("event %zu fixed portion is not within range\n", catalog_event_len_validate()
617 ev_len = be16_to_cpu(event->length); catalog_event_len_validate()
620 pr_info("event %zu has length %zu not divisible by 16: event=%pK\n", catalog_event_len_validate()
621 event_idx, ev_len, event); catalog_event_len_validate()
623 ev_end = (__u8 *)event + ev_len; catalog_event_len_validate()
625 pr_warn("event %zu has .length=%zu, ends after buffer end: ev_end=%pK > end=%pK, offset=%zu\n", catalog_event_len_validate()
631 calc_ev_end = event_end(event, end); catalog_event_len_validate()
633 pr_warn("event %zu has a calculated length which exceeds buffer length %zu: event=%pK end=%pK, offset=%zu\n", catalog_event_len_validate()
634 event_idx, event_data_bytes, event, end, catalog_event_len_validate()
640 pr_warn("event %zu exceeds it's own length: event=%pK, end=%pK, offset=%zu, calc_ev_end=%pK\n", catalog_event_len_validate()
641 event_idx, event, ev_end, offset, calc_ev_end); catalog_event_len_validate()
666 struct hv_24x7_event_data *event; create_events_from_catalog() local
703 pr_err("invalid event data offs %zu and/or len %zu\n", create_events_from_catalog()
710 pr_err("event data %zu-%zu does not fit inside catalog 0-%zu\n", create_events_from_catalog()
728 * event data can span several pages, events can cross between these create_events_from_catalog()
733 pr_err("could not allocate event data\n"); create_events_from_catalog()
752 pr_err("failed to get event data in page %zu\n", create_events_from_catalog()
763 for (junk_events = 0, event = event_data, event_idx = 0, attr_max = 0; create_events_from_catalog()
765 event_idx++, event = (void *)event + ev_len) { create_events_from_catalog()
766 size_t offset = (void *)event - (void *)event_data; create_events_from_catalog()
770 ev_len = catalog_event_len_validate(event, event_idx, create_events_from_catalog()
777 name = event_name(event, &nl); create_events_from_catalog()
779 if (event->event_group_record_len == 0) { create_events_from_catalog()
780 pr_devel("invalid event %zu (%.*s): group_record_len == 0, skipping\n", create_events_from_catalog()
786 if (!catalog_entry_domain_is_valid(event->domain)) { create_events_from_catalog()
787 pr_info("event %zu (%.*s) has invalid domain %d\n", create_events_from_catalog()
788 event_idx, nl, name, event->domain); create_events_from_catalog()
793 attr_max += event_to_attr_ct(event); create_events_from_catalog()
798 pr_warn("event buffer ended before listed # of events were parsed (got %zu, wanted %zu, junk %zu)\n", create_events_from_catalog()
823 event = event_data, event_idx = 0; create_events_from_catalog()
825 event_idx++, ev_len = be16_to_cpu(event->length), create_events_from_catalog()
826 event = (void *)event + ev_len) { create_events_from_catalog()
834 if (event->event_group_record_len == 0) create_events_from_catalog()
836 if (!catalog_entry_domain_is_valid(event->domain)) create_events_from_catalog()
839 name = event_name(event, &nl); create_events_from_catalog()
840 nonce = event_uniq_add(&ev_uniq, name, nl, event->domain); create_events_from_catalog()
842 event, nonce); create_events_from_catalog()
844 pr_warn("event %zu (%.*s) creation failure, skipping\n", create_events_from_catalog()
849 event_descs[desc_ct] = event_to_desc_attr(event, nonce); create_events_from_catalog()
853 event_to_long_desc_attr(event, nonce); create_events_from_catalog()
859 pr_info("read %zu catalog entries, created %zu event attrs (%zu failures), %zu descs\n", create_events_from_catalog()
1062 * Add the given @event to the next slot in the 24x7 request_buffer.
1068 static int add_event_to_24x7_request(struct perf_event *event, add_event_to_24x7_request() argument
1081 if (is_physical_domain(event_get_domain(event))) add_event_to_24x7_request()
1082 idx = event_get_core(event); add_event_to_24x7_request()
1084 idx = event_get_vcpu(event); add_event_to_24x7_request()
1089 req->performance_domain = event_get_domain(event); add_event_to_24x7_request()
1091 req->data_offset = cpu_to_be32(event_get_offset(event)); add_event_to_24x7_request()
1092 req->starting_lpar_ix = cpu_to_be16(event_get_lpar(event)), add_event_to_24x7_request()
1100 static unsigned long single_24x7_request(struct perf_event *event, u64 *count) single_24x7_request() argument
1115 ret = add_event_to_24x7_request(event, request_buffer); single_24x7_request()
1136 static int h_24x7_event_init(struct perf_event *event) h_24x7_event_init() argument
1143 /* Not our event */ h_24x7_event_init()
1144 if (event->attr.type != event->pmu->type) h_24x7_event_init()
1148 if (event_get_reserved1(event) || h_24x7_event_init()
1149 event_get_reserved2(event) || h_24x7_event_init()
1150 event_get_reserved3(event)) { h_24x7_event_init()
1152 event->attr.config, h_24x7_event_init()
1153 event_get_reserved1(event), h_24x7_event_init()
1154 event->attr.config1, h_24x7_event_init()
1155 event_get_reserved2(event), h_24x7_event_init()
1156 event->attr.config2, h_24x7_event_init()
1157 event_get_reserved3(event)); h_24x7_event_init()
1162 if (event->attr.exclude_user || h_24x7_event_init()
1163 event->attr.exclude_kernel || h_24x7_event_init()
1164 event->attr.exclude_hv || h_24x7_event_init()
1165 event->attr.exclude_idle || h_24x7_event_init()
1166 event->attr.exclude_host || h_24x7_event_init()
1167 event->attr.exclude_guest) h_24x7_event_init()
1171 if (has_branch_stack(event)) h_24x7_event_init()
1175 if (event_get_offset(event) % 8) { h_24x7_event_init()
1181 domain = event_get_domain(event); h_24x7_event_init()
1195 (event_get_lpar(event) != event_get_lpar_max()))) { h_24x7_event_init()
1198 event_get_lpar(event)); h_24x7_event_init()
1202 /* see if the event complains */ h_24x7_event_init()
1203 if (single_24x7_request(event, &ct)) { h_24x7_event_init()
1211 static u64 h_24x7_get_value(struct perf_event *event) h_24x7_get_value() argument
1215 ret = single_24x7_request(event, &ct); h_24x7_get_value()
1217 /* We checked this in event init, shouldn't fail here... */ h_24x7_get_value()
1223 static void update_event_count(struct perf_event *event, u64 now) update_event_count() argument
1227 prev = local64_xchg(&event->hw.prev_count, now); update_event_count()
1228 local64_add(now - prev, &event->count); update_event_count()
1231 static void h_24x7_event_read(struct perf_event *event) h_24x7_event_read() argument
1235 now = h_24x7_get_value(event); h_24x7_event_read()
1236 update_event_count(event, now); h_24x7_event_read()
1239 static void h_24x7_event_start(struct perf_event *event, int flags) h_24x7_event_start() argument
1242 local64_set(&event->hw.prev_count, h_24x7_get_value(event)); h_24x7_event_start()
1245 static void h_24x7_event_stop(struct perf_event *event, int flags) h_24x7_event_stop() argument
1247 h_24x7_event_read(event); h_24x7_event_stop()
1250 static int h_24x7_event_add(struct perf_event *event, int flags) h_24x7_event_add() argument
1253 h_24x7_event_start(event, flags); h_24x7_event_add()
372 event_to_attr(unsigned ix, struct hv_24x7_event_data *event, unsigned domain, int nonce) event_to_attr() argument
444 event_data_to_attrs(unsigned ix, struct attribute **attrs, struct hv_24x7_event_data *event, int nonce) event_data_to_attrs() argument
H A Dpower7-pmu.c18 * Bits in event code for POWER7
25 #define PM_COMBINE_SH 11 /* Combined event bit */
28 #define PM_L2SEL_SH 8 /* L2 event select */
54 * Power7 event codes.
85 static int power7_get_constraint(u64 event, unsigned long *maskp, power7_get_constraint() argument
91 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; power7_get_constraint()
98 if (pmc >= 5 && !(event == 0x500fa || event == 0x600f4)) power7_get_constraint()
107 unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; power7_get_constraint()
110 int l2sel = (event >> PM_L2SEL_SH) & PM_L2SEL_MSK; power7_get_constraint()
120 #define MAX_ALT 2 /* at most 2 alternatives for any event */
132 static int find_alternative(u64 event) find_alternative() argument
137 if (event < event_alternatives[i][0]) find_alternative()
140 if (event == event_alternatives[i][j]) find_alternative()
146 static s64 find_alternative_decode(u64 event) find_alternative_decode() argument
151 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; find_alternative_decode()
152 psel = event & PM_PMCSEL_MSK; find_alternative_decode()
154 return event - (1 << PM_PMC_SH) + 8; find_alternative_decode()
156 return event + (1 << PM_PMC_SH) - 8; find_alternative_decode()
160 static int power7_get_alternatives(u64 event, unsigned int flags, u64 alt[]) power7_get_alternatives() argument
165 alt[0] = event; power7_get_alternatives()
167 i = find_alternative(event); power7_get_alternatives()
171 if (ae && ae != event) power7_get_alternatives()
175 ae = find_alternative_decode(event); power7_get_alternatives()
212 * Returns 1 if event counts things relating to marked instructions
215 static int power7_marked_instr_event(u64 event) power7_marked_instr_event() argument
220 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; power7_marked_instr_event()
221 unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; power7_marked_instr_event()
222 psel = event & PM_PMCSEL_MSK & ~1; /* trim off edge/level bit */ power7_marked_instr_event()
247 static int power7_compute_mmcr(u64 event[], int n_ev, power7_compute_mmcr() argument
258 pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; power7_compute_mmcr()
270 pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; power7_compute_mmcr()
271 unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; power7_compute_mmcr()
272 combine = (event[i] >> PM_COMBINE_SH) & PM_COMBINE_MSK; power7_compute_mmcr()
273 l2sel = (event[i] >> PM_L2SEL_SH) & PM_L2SEL_MSK; power7_compute_mmcr()
274 psel = event[i] & PM_PMCSEL_MSK; power7_compute_mmcr()
276 /* Bus event or any-PMC direct event */ power7_compute_mmcr()
285 /* Direct or decoded event */ power7_compute_mmcr()
298 if (power7_marked_instr_event(event[i])) power7_compute_mmcr()
336 * are event codes.
412 PMU_FORMAT_ATTR(event, "config:0-19");
H A Dmpc7450-pmu.c17 #define MAX_ALT 3 /* Maximum number of event alternative codes */
20 * Bits in event code for MPC7450 family
37 * -1: event code is invalid
41 static int mpc7450_classify_event(u32 event) mpc7450_classify_event() argument
45 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; mpc7450_classify_event()
51 event &= PM_PMCSEL_MSK; mpc7450_classify_event()
52 if (event <= 1) mpc7450_classify_event()
54 if (event <= 7) mpc7450_classify_event()
56 if (event <= 13) mpc7450_classify_event()
58 if (event <= 22) mpc7450_classify_event()
81 static int mpc7450_threshold_use(u32 event) mpc7450_threshold_use() argument
85 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; mpc7450_threshold_use()
86 sel = event & PM_PMCSEL_MSK; mpc7450_threshold_use()
154 static int mpc7450_get_constraint(u64 event, unsigned long *maskp, mpc7450_get_constraint() argument
161 class = mpc7450_classify_event(event); mpc7450_get_constraint()
165 pmc = ((unsigned int)event >> PM_PMC_SH) & PM_PMC_MSK; mpc7450_get_constraint()
173 tuse = mpc7450_threshold_use(event); mpc7450_get_constraint()
175 thresh = ((unsigned int)event >> PM_THRESH_SH) & PM_THRESH_MSK; mpc7450_get_constraint()
180 if ((unsigned int)event & PM_THRMULT_MSKS) mpc7450_get_constraint()
212 static int find_alternative(u32 event) find_alternative() argument
217 if (event < event_alternatives[i][0]) find_alternative()
220 if (event == event_alternatives[i][j]) find_alternative()
226 static int mpc7450_get_alternatives(u64 event, unsigned int flags, u64 alt[]) mpc7450_get_alternatives() argument
231 alt[0] = event; mpc7450_get_alternatives()
233 i = find_alternative((u32)event); mpc7450_get_alternatives()
237 if (ae && ae != (u32)event) mpc7450_get_alternatives()
263 static int mpc7450_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[], mpc7450_compute_mmcr() argument
281 class = mpc7450_classify_event(event[i]); mpc7450_compute_mmcr()
288 /* Second pass: allocate PMCs from most specific event to least */ mpc7450_compute_mmcr()
291 ev = event[event_index[class][i]]; mpc7450_compute_mmcr()
359 * are event codes.
H A Dpower5-pmu.c18 * Bits in event code for POWER5 (not POWER5++)
25 #define PM_BYTE_SH 12 /* Byte number of event bus to use */
29 #define PM_BUSEVENT_MSK 0x80 /* Set if event uses event bus */
114 * 24-27: Byte 0 event source 0x0f00_0000
115 * Encoding as for the event code
118 * 20-23, 16-19, 12-15: Byte 1, 2, 3 event sources
140 static int power5_get_constraint(u64 event, unsigned long *maskp, power5_get_constraint() argument
148 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; power5_get_constraint()
157 else if (event != 0x500009 && event != 0x600005) power5_get_constraint()
160 if (event & PM_BUSEVENT_MSK) { power5_get_constraint()
161 unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; power5_get_constraint()
168 byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; power5_get_constraint()
177 bit = event & 7; power5_get_constraint()
181 value |= (unsigned long)((event >> PM_GRS_SH) & fmask) power5_get_constraint()
213 #define MAX_ALT 3 /* at most 3 alternatives for any event */
227 static int find_alternative(u64 event) find_alternative() argument
232 if (event < event_alternatives[i][0]) find_alternative()
235 if (event == event_alternatives[i][j]) find_alternative()
249 * Some direct events for decodes of event bus byte 3 have alternative
251 * event code for those that do, or -1 otherwise.
253 static s64 find_alternative_bdecode(u64 event) find_alternative_bdecode() argument
257 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; find_alternative_bdecode()
261 pp = event & PM_PMCSEL_MSK; find_alternative_bdecode()
264 return (event & ~(PM_PMC_MSKS | PM_PMCSEL_MSK)) | find_alternative_bdecode()
272 static int power5_get_alternatives(u64 event, unsigned int flags, u64 alt[]) power5_get_alternatives() argument
277 alt[0] = event; power5_get_alternatives()
279 i = find_alternative(event); power5_get_alternatives()
283 if (ae && ae != event) power5_get_alternatives()
287 ae = find_alternative_bdecode(event); power5_get_alternatives()
296 * Indexed by PMCSEL value, bit i (LE) set if PMC i is a marked event.
338 * Returns 1 if event counts things relating to marked instructions
341 static int power5_marked_instr_event(u64 event) power5_marked_instr_event() argument
347 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; power5_marked_instr_event()
348 psel = event & PM_PMCSEL_MSK; power5_marked_instr_event()
367 if (!(event & PM_BUSEVENT_MSK)) power5_marked_instr_event()
370 byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; power5_marked_instr_event()
371 unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; power5_marked_instr_event()
385 static int power5_compute_mmcr(u64 event[], int n_ev, power5_compute_mmcr() argument
407 pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; power5_compute_mmcr()
418 if (event[i] & PM_BUSEVENT_MSK) { power5_compute_mmcr()
419 unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; power5_compute_mmcr()
420 byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; power5_compute_mmcr()
492 pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; power5_compute_mmcr()
493 unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; power5_compute_mmcr()
494 byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; power5_compute_mmcr()
495 psel = event[i] & PM_PMCSEL_MSK; power5_compute_mmcr()
496 isbus = event[i] & PM_BUSEVENT_MSK; power5_compute_mmcr()
498 /* Bus event or any-PMC direct event */ power5_compute_mmcr()
513 /* Direct event */ power5_compute_mmcr()
524 grsel = (event[i] >> PM_GRS_SH) & PM_GRS_MSK; power5_compute_mmcr()
527 if (power5_marked_instr_event(event[i])) power5_compute_mmcr()
565 * are event codes.
H A Dpower6-pmu.c18 * Bits in event code for POWER6
23 #define PM_UNIT_SH 16 /* Unit event comes (TTMxSEL encoding) */
28 #define PM_BYTE_SH 12 /* Byte of event bus to use */
30 #define PM_SUBUNIT_SH 8 /* Subunit event comes from (NEST_SEL enc.) */
57 * top 4 bits say what sort of event:
58 * 0 = direct marked event,
59 * 1 = byte decode event,
60 * 4 = add/and event (PMC1 -> bits 0 & 4),
61 * 5 = add/and event (PMC1 -> bits 1 & 5),
62 * 6 = add/and event (PMC1 -> bits 2 & 6),
63 * 7 = add/and event (PMC1 -> bits 3 & 7).
136 * Returns 1 if event counts things relating to marked instructions
139 static int power6_marked_instr_event(u64 event) power6_marked_instr_event() argument
145 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; power6_marked_instr_event()
146 psel = (event & PM_PMCSEL_MSK) >> 1; /* drop edge/level bit */ power6_marked_instr_event()
165 if (!(event & PM_BUSEVENT_MSK) || bit == -1) power6_marked_instr_event()
168 byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; power6_marked_instr_event()
169 unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; power6_marked_instr_event()
177 static int p6_compute_mmcr(u64 event[], int n_ev, p6_compute_mmcr() argument
190 pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; p6_compute_mmcr()
198 ev = event[i]; p6_compute_mmcr()
214 /* this event uses the event bus */ p6_compute_mmcr()
217 /* check for conflict on this byte of event bus */ p6_compute_mmcr()
245 if (power6_marked_instr_event(event[i])) p6_compute_mmcr()
266 * 16-19 select field: unit on byte 0 of event bus
268 * 32-34 select field: nest (subunit) event selector
270 static int p6_get_constraint(u64 event, unsigned long *maskp, p6_get_constraint() argument
276 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; p6_get_constraint()
278 if (pmc > 4 && !(event == 0x500009 || event == 0x600005)) p6_get_constraint()
284 if (event & PM_BUSEVENT_MSK) { p6_get_constraint()
285 byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; p6_get_constraint()
288 value |= (unsigned long)(event & PM_UNIT_MSKS) << sh; p6_get_constraint()
289 if ((event & PM_UNIT_MSKS) == (5 << PM_UNIT_SH)) { p6_get_constraint()
290 subunit = (event >> PM_SUBUNIT_SH) & PM_SUBUNIT_MSK; p6_get_constraint()
304 static int p6_limited_pmc_event(u64 event) p6_limited_pmc_event() argument
306 int pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; p6_limited_pmc_event()
311 #define MAX_ALT 4 /* at most 4 alternatives for any event */
343 static int find_alternatives_list(u64 event) find_alternatives_list() argument
349 if (event < event_alternatives[i][0]) find_alternatives_list()
353 if (!alt || event < alt) find_alternatives_list()
355 if (event == alt) find_alternatives_list()
362 static int p6_get_alternatives(u64 event, unsigned int flags, u64 alt[]) p6_get_alternatives() argument
369 alt[0] = event; p6_get_alternatives()
370 nlim = p6_limited_pmc_event(event); p6_get_alternatives()
373 i = find_alternatives_list(event); p6_get_alternatives()
380 if (aevent != event) p6_get_alternatives()
388 psel = event & (PM_PMCSEL_MSK & ~1); /* ignore edge bit */ p6_get_alternatives()
389 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; p6_get_alternatives()
391 alt[nalt++] = ((event ^ 0x6) & ~PM_PMC_MSKS) | p6_get_alternatives()
396 alt[nalt++] = ((event ^ 0x2) & ~PM_PMC_MSKS) | p6_get_alternatives()
409 * we never end up with more than 4 alternatives for any event. p6_get_alternatives()
485 * are event codes.
H A Dppc970-pmu.c17 * Bits in event code for PPC970
25 #define PM_BYTE_SH 4 /* Byte number of event bus to use */
117 * 28-31: Byte 0 event source 0xf000_0000
118 * Encoding as for the event code
121 * 24-27, 20-23, 16-19: Byte 1, 2, 3 event sources
144 * Returns 1 if event counts things relating to marked instructions
147 static int p970_marked_instr_event(u64 event) p970_marked_instr_event() argument
152 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; p970_marked_instr_event()
153 psel = event & PM_PMCSEL_MSK; p970_marked_instr_event()
166 byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; p970_marked_instr_event()
167 unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; p970_marked_instr_event()
194 static int p970_get_constraint(u64 event, unsigned long *maskp, p970_get_constraint() argument
201 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; p970_get_constraint()
210 unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; p970_get_constraint()
216 byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; p970_get_constraint()
236 spcsel = (event >> PM_SPCSEL_SH) & PM_SPCSEL_MSK; p970_get_constraint()
246 static int p970_get_alternatives(u64 event, unsigned int flags, u64 alt[]) p970_get_alternatives() argument
248 alt[0] = event; p970_get_alternatives()
251 if (event == 0x2002 || event == 0x3002) { p970_get_alternatives()
252 alt[1] = event ^ 0x1000; p970_get_alternatives()
259 static int p970_compute_mmcr(u64 event[], int n_ev, p970_compute_mmcr() argument
283 pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; p970_compute_mmcr()
291 unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; p970_compute_mmcr()
292 byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; p970_compute_mmcr()
350 pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; p970_compute_mmcr()
351 unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; p970_compute_mmcr()
352 byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; p970_compute_mmcr()
353 psel = event[i] & PM_PMCSEL_MSK; p970_compute_mmcr()
355 /* Bus event or any-PMC direct event */ p970_compute_mmcr()
374 /* Direct event */ p970_compute_mmcr()
382 spcsel = (event[i] >> PM_SPCSEL_SH) & PM_SPCSEL_MSK; p970_compute_mmcr()
384 if (p970_marked_instr_event(event[i])) p970_compute_mmcr()
437 * are event codes.
H A Dpower4-pmu.c18 * Bits in event code for POWER4
27 #define PM_BYTE_SH 4 /* Byte number of event bus to use */
156 * 28-31: Byte 0 event source 0xf000_0000
167 * 24-27, 20-23, 16-19: Byte 1, 2, 3 event sources
215 * Returns 1 if event counts things relating to marked instructions
218 static int p4_marked_instr_event(u64 event) p4_marked_instr_event() argument
223 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; p4_marked_instr_event()
224 psel = event & PM_PMCSEL_MSK; p4_marked_instr_event()
237 byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; p4_marked_instr_event()
238 unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; p4_marked_instr_event()
242 if (event & PM_LOWER_MSKS) p4_marked_instr_event()
254 static int p4_get_constraint(u64 event, unsigned long *maskp, p4_get_constraint() argument
261 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; p4_get_constraint()
270 unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; p4_get_constraint()
271 byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; p4_get_constraint()
273 lower = (event >> PM_LOWER_SH) & PM_LOWER_MSK; p4_get_constraint()
308 if (p4_marked_instr_event(event)) { p4_get_constraint()
314 if (pmc && (event & PM_PMCSEL_MSK) == 6 && byte == 2) p4_get_constraint()
326 static int p4_get_alternatives(u64 event, unsigned int flags, u64 alt[]) p4_get_alternatives() argument
330 alt[0] = event; p4_get_alternatives()
334 if (event == 0x8003 || event == 0x0224) { p4_get_alternatives()
335 alt[1] = event ^ (0x8003 ^ 0x0224); p4_get_alternatives()
340 if (event == 0x0c13 || event == 0x0c23) { p4_get_alternatives()
341 alt[1] = event ^ (0x0c13 ^ 0x0c23); p4_get_alternatives()
347 if (event == ppc_inst_cmpl[i]) { p4_get_alternatives()
358 static int p4_compute_mmcr(u64 event[], int n_ev, p4_compute_mmcr() argument
379 pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; p4_compute_mmcr()
387 unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; p4_compute_mmcr()
388 byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; p4_compute_mmcr()
389 lower = (event[i] >> PM_LOWER_SH) & PM_LOWER_MSK; p4_compute_mmcr()
471 pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; p4_compute_mmcr()
472 unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; p4_compute_mmcr()
473 byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; p4_compute_mmcr()
474 psel = event[i] & PM_PMCSEL_MSK; p4_compute_mmcr()
476 /* Bus event or 00xxx direct event (off or cycles) */ p4_compute_mmcr()
493 /* Direct event */ p4_compute_mmcr()
510 if (p4_marked_instr_event(event[i])) p4_compute_mmcr()
557 * are event codes.
H A Dpower8-pmu.c22 * Some power8 event codes.
64 * Raw event encoding for POWER8:
113 * else if cache_sel[1]: # L1 event
230 * the fifth event to overflow and set the 4th bit. To achieve that we bias the
276 static inline bool event_is_fab_match(u64 event) event_is_fab_match() argument
279 event &= 0xff0fe; event_is_fab_match()
282 return (event == 0x30056 || event == 0x4f052); event_is_fab_match()
285 static int power8_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) power8_get_constraint() argument
292 if (event & ~EVENT_VALID_MASK) power8_get_constraint()
295 pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK; power8_get_constraint()
296 unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK; power8_get_constraint()
297 cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK; power8_get_constraint()
298 ebb = (event >> EVENT_EBB_SHIFT) & EVENT_EBB_MASK; power8_get_constraint()
306 /* Ignore Linux defined bits when checking event below */ power8_get_constraint()
307 base_event = event & ~EVENT_LINUX_MASK; power8_get_constraint()
320 * Don't count events on PMC 5 & 6, there is only one valid event power8_get_constraint()
340 } else if (event & EVENT_IS_L1) { power8_get_constraint()
345 if (event & EVENT_IS_MARKED) { power8_get_constraint()
347 value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT); power8_get_constraint()
354 if (event_is_fab_match(event)) { power8_get_constraint()
356 value |= CNST_FAB_MATCH_VAL(event >> EVENT_THR_CTL_SHIFT); power8_get_constraint()
364 cmp = (event >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK; power8_get_constraint()
371 value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT); power8_get_constraint()
378 if (event & EVENT_WANTS_BHRB) { power8_get_constraint()
384 value |= CNST_IFM_VAL(event >> EVENT_IFM_SHIFT); power8_get_constraint()
401 static int power8_compute_mmcr(u64 event[], int n_ev, power8_compute_mmcr() argument
413 pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK; power8_compute_mmcr()
424 pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK; power8_compute_mmcr()
425 unit = (event[i] >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK; power8_compute_mmcr()
426 combine = (event[i] >> EVENT_COMBINE_SHIFT) & EVENT_COMBINE_MASK; power8_compute_mmcr()
427 psel = event[i] & EVENT_PSEL_MASK; power8_compute_mmcr()
444 if (event[i] & EVENT_IS_L1) { power8_compute_mmcr()
445 cache = event[i] >> EVENT_CACHE_SEL_SHIFT; power8_compute_mmcr()
451 if (event[i] & EVENT_IS_MARKED) { power8_compute_mmcr()
454 val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK; power8_compute_mmcr()
465 if (event_is_fab_match(event[i])) { power8_compute_mmcr()
466 mmcr1 |= ((event[i] >> EVENT_THR_CTL_SHIFT) & power8_compute_mmcr()
469 val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK; power8_compute_mmcr()
471 val = (event[i] >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK; power8_compute_mmcr()
473 val = (event[i] >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK; power8_compute_mmcr()
477 if (event[i] & EVENT_WANTS_BHRB) { power8_compute_mmcr()
478 val = (event[i] >> EVENT_IFM_SHIFT) & EVENT_IFM_MASK; power8_compute_mmcr()
540 static int find_alternative(u64 event) find_alternative() argument
545 if (event < event_alternatives[i][0]) find_alternative()
549 if (event == event_alternatives[i][j]) find_alternative()
556 static int power8_get_alternatives(u64 event, unsigned int flags, u64 alt[]) power8_get_alternatives() argument
561 alt[num_alt++] = event; power8_get_alternatives()
563 i = find_alternative(event); power8_get_alternatives()
565 /* Filter out the original event, it's already in alt[0] */ power8_get_alternatives()
568 if (alt_event && alt_event != event) power8_get_alternatives()
607 PMU_FORMAT_ATTR(event, "config:0-49");
663 * regular PMU event. As the privilege state filter is handled power8_bhrb_filter_map()
665 * PMU event, we ignore any separate BHRB specific request. power8_bhrb_filter_map()
699 * are event codes.
/linux-4.1.27/tools/perf/util/
H A Devent.c3 #include "event.h"
125 static int perf_event__prepare_comm(union perf_event *event, pid_t pid, perf_event__prepare_comm() argument
133 memset(&event->comm, 0, sizeof(event->comm)); perf_event__prepare_comm()
136 if (perf_event__get_comm_ids(pid, event->comm.comm, perf_event__prepare_comm()
137 sizeof(event->comm.comm), perf_event__prepare_comm()
148 event->comm.pid = *tgid; perf_event__prepare_comm()
149 event->comm.header.type = PERF_RECORD_COMM; perf_event__prepare_comm()
151 size = strlen(event->comm.comm) + 1; perf_event__prepare_comm()
153 memset(event->comm.comm + size, 0, machine->id_hdr_size); perf_event__prepare_comm()
154 event->comm.header.size = (sizeof(event->comm) - perf_event__prepare_comm()
155 (sizeof(event->comm.comm) - size) + perf_event__prepare_comm()
157 event->comm.tid = pid; perf_event__prepare_comm()
163 union perf_event *event, pid_t pid, perf_event__synthesize_comm()
169 if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0) perf_event__synthesize_comm()
172 if (process(tool, event, &synth_sample, machine) != 0) perf_event__synthesize_comm()
179 union perf_event *event, perf_event__synthesize_fork()
184 memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size); perf_event__synthesize_fork()
192 event->fork.ppid = ppid; perf_event__synthesize_fork()
193 event->fork.ptid = ppid; perf_event__synthesize_fork()
195 event->fork.ppid = tgid; perf_event__synthesize_fork()
196 event->fork.ptid = tgid; perf_event__synthesize_fork()
198 event->fork.pid = tgid; perf_event__synthesize_fork()
199 event->fork.tid = pid; perf_event__synthesize_fork()
200 event->fork.header.type = PERF_RECORD_FORK; perf_event__synthesize_fork()
202 event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size); perf_event__synthesize_fork()
204 if (process(tool, event, &synth_sample, machine) != 0) perf_event__synthesize_fork()
211 union perf_event *event, perf_event__synthesize_mmap_events()
236 event->header.type = PERF_RECORD_MMAP2; perf_event__synthesize_mmap_events()
255 &event->mmap2.start, &event->mmap2.len, prot, perf_event__synthesize_mmap_events()
256 &event->mmap2.pgoff, &event->mmap2.maj, perf_event__synthesize_mmap_events()
257 &event->mmap2.min, perf_event__synthesize_mmap_events()
266 event->mmap2.ino = (u64)ino; perf_event__synthesize_mmap_events()
272 event->header.misc = PERF_RECORD_MISC_USER; perf_event__synthesize_mmap_events()
274 event->header.misc = PERF_RECORD_MISC_GUEST_USER; perf_event__synthesize_mmap_events()
277 event->mmap2.prot = 0; perf_event__synthesize_mmap_events()
278 event->mmap2.flags = 0; perf_event__synthesize_mmap_events()
280 event->mmap2.prot |= PROT_READ; perf_event__synthesize_mmap_events()
282 event->mmap2.prot |= PROT_WRITE; perf_event__synthesize_mmap_events()
284 event->mmap2.prot |= PROT_EXEC; perf_event__synthesize_mmap_events()
287 event->mmap2.flags |= MAP_SHARED; perf_event__synthesize_mmap_events()
289 event->mmap2.flags |= MAP_PRIVATE; perf_event__synthesize_mmap_events()
295 event->header.misc |= PERF_RECORD_MISC_MMAP_DATA; perf_event__synthesize_mmap_events()
302 memcpy(event->mmap2.filename, execname, size); perf_event__synthesize_mmap_events()
304 event->mmap2.len -= event->mmap.start; perf_event__synthesize_mmap_events()
305 event->mmap2.header.size = (sizeof(event->mmap2) - perf_event__synthesize_mmap_events()
306 (sizeof(event->mmap2.filename) - size)); perf_event__synthesize_mmap_events()
307 memset(event->mmap2.filename + size, 0, machine->id_hdr_size); perf_event__synthesize_mmap_events()
308 event->mmap2.header.size += machine->id_hdr_size; perf_event__synthesize_mmap_events()
309 event->mmap2.pid = tgid; perf_event__synthesize_mmap_events()
310 event->mmap2.tid = pid; perf_event__synthesize_mmap_events()
312 if (process(tool, event, &synth_sample, machine) != 0) { perf_event__synthesize_mmap_events()
329 union perf_event *event = zalloc((sizeof(event->mmap) + perf_event__synthesize_modules() local
331 if (event == NULL) { perf_event__synthesize_modules()
332 pr_debug("Not enough memory synthesizing mmap event " perf_event__synthesize_modules()
337 event->header.type = PERF_RECORD_MMAP; perf_event__synthesize_modules()
344 event->header.misc = PERF_RECORD_MISC_KERNEL; perf_event__synthesize_modules()
346 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; perf_event__synthesize_modules()
357 event->mmap.header.type = PERF_RECORD_MMAP; perf_event__synthesize_modules()
358 event->mmap.header.size = (sizeof(event->mmap) - perf_event__synthesize_modules()
359 (sizeof(event->mmap.filename) - size)); perf_event__synthesize_modules()
360 memset(event->mmap.filename + size, 0, machine->id_hdr_size); perf_event__synthesize_modules()
361 event->mmap.header.size += machine->id_hdr_size; perf_event__synthesize_modules()
362 event->mmap.start = pos->start; perf_event__synthesize_modules()
363 event->mmap.len = pos->end - pos->start; perf_event__synthesize_modules()
364 event->mmap.pid = machine->pid; perf_event__synthesize_modules()
366 memcpy(event->mmap.filename, pos->dso->long_name, perf_event__synthesize_modules()
368 if (process(tool, event, &synth_sample, machine) != 0) { perf_event__synthesize_modules()
374 free(event); perf_event__synthesize_modules()
392 /* special case: only send one comm event using passed in pid */ __event__synthesize_thread()
433 * Send the prepared comm event __event__synthesize_thread()
620 union perf_event *event; perf_event__synthesize_kernel_mmap() local
630 event = zalloc((sizeof(event->mmap) + machine->id_hdr_size)); perf_event__synthesize_kernel_mmap()
631 if (event == NULL) { perf_event__synthesize_kernel_mmap()
632 pr_debug("Not enough memory synthesizing mmap event " perf_event__synthesize_kernel_mmap()
643 event->header.misc = PERF_RECORD_MISC_KERNEL; perf_event__synthesize_kernel_mmap()
645 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; perf_event__synthesize_kernel_mmap()
650 size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), perf_event__synthesize_kernel_mmap()
653 event->mmap.header.type = PERF_RECORD_MMAP; perf_event__synthesize_kernel_mmap()
654 event->mmap.header.size = (sizeof(event->mmap) - perf_event__synthesize_kernel_mmap()
655 (sizeof(event->mmap.filename) - size) + machine->id_hdr_size); perf_event__synthesize_kernel_mmap()
656 event->mmap.pgoff = kmap->ref_reloc_sym->addr; perf_event__synthesize_kernel_mmap()
657 event->mmap.start = map->start; perf_event__synthesize_kernel_mmap()
658 event->mmap.len = map->end - event->mmap.start; perf_event__synthesize_kernel_mmap()
659 event->mmap.pid = machine->pid; perf_event__synthesize_kernel_mmap()
661 err = process(tool, event, &synth_sample, machine); perf_event__synthesize_kernel_mmap()
662 free(event); perf_event__synthesize_kernel_mmap()
667 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp) perf_event__fprintf_comm() argument
671 if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC) perf_event__fprintf_comm()
676 return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid); perf_event__fprintf_comm()
680 union perf_event *event, perf_event__process_comm()
684 return machine__process_comm_event(machine, event, sample); perf_event__process_comm()
688 union perf_event *event, perf_event__process_lost()
692 return machine__process_lost_event(machine, event, sample); perf_event__process_lost()
695 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp) perf_event__fprintf_mmap() argument
698 event->mmap.pid, event->mmap.tid, event->mmap.start, perf_event__fprintf_mmap()
699 event->mmap.len, event->mmap.pgoff, perf_event__fprintf_mmap()
700 (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x', perf_event__fprintf_mmap()
701 event->mmap.filename); perf_event__fprintf_mmap()
704 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp) perf_event__fprintf_mmap2() argument
708 event->mmap2.pid, event->mmap2.tid, event->mmap2.start, perf_event__fprintf_mmap2()
709 event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj, perf_event__fprintf_mmap2()
710 event->mmap2.min, event->mmap2.ino, perf_event__fprintf_mmap2()
711 event->mmap2.ino_generation, perf_event__fprintf_mmap2()
712 (event->mmap2.prot & PROT_READ) ? 'r' : '-', perf_event__fprintf_mmap2()
713 (event->mmap2.prot & PROT_WRITE) ? 'w' : '-', perf_event__fprintf_mmap2()
714 (event->mmap2.prot & PROT_EXEC) ? 'x' : '-', perf_event__fprintf_mmap2()
715 (event->mmap2.flags & MAP_SHARED) ? 's' : 'p', perf_event__fprintf_mmap2()
716 event->mmap2.filename); perf_event__fprintf_mmap2()
720 union perf_event *event, perf_event__process_mmap()
724 return machine__process_mmap_event(machine, event, sample); perf_event__process_mmap()
728 union perf_event *event, perf_event__process_mmap2()
732 return machine__process_mmap2_event(machine, event, sample); perf_event__process_mmap2()
735 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp) perf_event__fprintf_task() argument
738 event->fork.pid, event->fork.tid, perf_event__fprintf_task()
739 event->fork.ppid, event->fork.ptid); perf_event__fprintf_task()
743 union perf_event *event, perf_event__process_fork()
747 return machine__process_fork_event(machine, event, sample); perf_event__process_fork()
751 union perf_event *event, perf_event__process_exit()
755 return machine__process_exit_event(machine, event, sample); perf_event__process_exit()
758 size_t perf_event__fprintf(union perf_event *event, FILE *fp) perf_event__fprintf() argument
761 perf_event__name(event->header.type)); perf_event__fprintf()
763 switch (event->header.type) { perf_event__fprintf()
765 ret += perf_event__fprintf_comm(event, fp); perf_event__fprintf()
769 ret += perf_event__fprintf_task(event, fp); perf_event__fprintf()
772 ret += perf_event__fprintf_mmap(event, fp); perf_event__fprintf()
775 ret += perf_event__fprintf_mmap2(event, fp); perf_event__fprintf()
785 union perf_event *event, perf_event__process()
789 return machine__process_event(machine, event, sample); perf_event__process()
880 int perf_event__preprocess_sample(const union perf_event *event, perf_event__preprocess_sample() argument
885 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; perf_event__preprocess_sample()
961 void perf_event__preprocess_sample_addr(union perf_event *event, perf_event__preprocess_sample_addr() argument
966 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; perf_event__preprocess_sample_addr()
162 perf_event__synthesize_comm(struct perf_tool *tool, union perf_event *event, pid_t pid, perf_event__handler_t process, struct machine *machine) perf_event__synthesize_comm() argument
178 perf_event__synthesize_fork(struct perf_tool *tool, union perf_event *event, pid_t pid, pid_t tgid, pid_t ppid, perf_event__handler_t process, struct machine *machine) perf_event__synthesize_fork() argument
210 perf_event__synthesize_mmap_events(struct perf_tool *tool, union perf_event *event, pid_t pid, pid_t tgid, perf_event__handler_t process, struct machine *machine, bool mmap_data) perf_event__synthesize_mmap_events() argument
679 perf_event__process_comm(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, struct machine *machine) perf_event__process_comm() argument
687 perf_event__process_lost(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, struct machine *machine) perf_event__process_lost() argument
719 perf_event__process_mmap(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, struct machine *machine) perf_event__process_mmap() argument
727 perf_event__process_mmap2(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, struct machine *machine) perf_event__process_mmap2() argument
742 perf_event__process_fork(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, struct machine *machine) perf_event__process_fork() argument
750 perf_event__process_exit(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, struct machine *machine) perf_event__process_exit() argument
784 perf_event__process(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, struct machine *machine) perf_event__process() argument
H A Dsession.c2 #include <traceevent/event-parse.h>
21 union perf_event *event,
96 struct ordered_event *event) ordered_events__deliver_event()
101 int ret = perf_evlist__parse_sample(session->evlist, event->event, &sample); ordered_events__deliver_event()
108 return machines__deliver_event(&session->machines, session->evlist, event->event, ordered_events__deliver_event()
109 &sample, session->tool, event->file_offset); ordered_events__deliver_event()
143 * kernel MMAP event, in perf_event__process_mmap(). perf_session__new()
199 union perf_event *event process_event_synth_tracing_data_stub()
209 union perf_event *event __maybe_unused, process_event_synth_attr_stub()
218 union perf_event *event __maybe_unused, process_event_sample_stub()
228 union perf_event *event __maybe_unused, process_event_stub()
237 union perf_event *event __maybe_unused, process_build_id_stub()
245 union perf_event *event __maybe_unused, process_finished_round_stub()
253 union perf_event *event,
257 union perf_event *event __maybe_unused, process_id_index_stub()
303 static void swap_sample_id_all(union perf_event *event, void *data) swap_sample_id_all() argument
305 void *end = (void *) event + event->header.size; swap_sample_id_all()
312 static void perf_event__all64_swap(union perf_event *event, perf_event__all64_swap() argument
315 struct perf_event_header *hdr = &event->header; perf_event__all64_swap()
316 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); perf_event__all64_swap()
319 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all) perf_event__comm_swap() argument
321 event->comm.pid = bswap_32(event->comm.pid); perf_event__comm_swap()
322 event->comm.tid = bswap_32(event->comm.tid); perf_event__comm_swap()
325 void *data = &event->comm.comm; perf_event__comm_swap()
328 swap_sample_id_all(event, data); perf_event__comm_swap()
332 static void perf_event__mmap_swap(union perf_event *event, perf_event__mmap_swap() argument
335 event->mmap.pid = bswap_32(event->mmap.pid); perf_event__mmap_swap()
336 event->mmap.tid = bswap_32(event->mmap.tid); perf_event__mmap_swap()
337 event->mmap.start = bswap_64(event->mmap.start); perf_event__mmap_swap()
338 event->mmap.len = bswap_64(event->mmap.len); perf_event__mmap_swap()
339 event->mmap.pgoff = bswap_64(event->mmap.pgoff); perf_event__mmap_swap()
342 void *data = &event->mmap.filename; perf_event__mmap_swap()
345 swap_sample_id_all(event, data); perf_event__mmap_swap()
349 static void perf_event__mmap2_swap(union perf_event *event, perf_event__mmap2_swap() argument
352 event->mmap2.pid = bswap_32(event->mmap2.pid); perf_event__mmap2_swap()
353 event->mmap2.tid = bswap_32(event->mmap2.tid); perf_event__mmap2_swap()
354 event->mmap2.start = bswap_64(event->mmap2.start); perf_event__mmap2_swap()
355 event->mmap2.len = bswap_64(event->mmap2.len); perf_event__mmap2_swap()
356 event->mmap2.pgoff = bswap_64(event->mmap2.pgoff); perf_event__mmap2_swap()
357 event->mmap2.maj = bswap_32(event->mmap2.maj); perf_event__mmap2_swap()
358 event->mmap2.min = bswap_32(event->mmap2.min); perf_event__mmap2_swap()
359 event->mmap2.ino = bswap_64(event->mmap2.ino); perf_event__mmap2_swap()
362 void *data = &event->mmap2.filename; perf_event__mmap2_swap()
365 swap_sample_id_all(event, data); perf_event__mmap2_swap()
368 static void perf_event__task_swap(union perf_event *event, bool sample_id_all) perf_event__task_swap() argument
370 event->fork.pid = bswap_32(event->fork.pid); perf_event__task_swap()
371 event->fork.tid = bswap_32(event->fork.tid); perf_event__task_swap()
372 event->fork.ppid = bswap_32(event->fork.ppid); perf_event__task_swap()
373 event->fork.ptid = bswap_32(event->fork.ptid); perf_event__task_swap()
374 event->fork.time = bswap_64(event->fork.time); perf_event__task_swap()
377 swap_sample_id_all(event, &event->fork + 1); perf_event__task_swap()
380 static void perf_event__read_swap(union perf_event *event, bool sample_id_all) perf_event__read_swap() argument
382 event->read.pid = bswap_32(event->read.pid); perf_event__read_swap()
383 event->read.tid = bswap_32(event->read.tid); perf_event__read_swap()
384 event->read.value = bswap_64(event->read.value); perf_event__read_swap()
385 event->read.time_enabled = bswap_64(event->read.time_enabled); perf_event__read_swap()
386 event->read.time_running = bswap_64(event->read.time_running); perf_event__read_swap()
387 event->read.id = bswap_64(event->read.id); perf_event__read_swap()
390 swap_sample_id_all(event, &event->read + 1); perf_event__read_swap()
393 static void perf_event__throttle_swap(union perf_event *event, perf_event__throttle_swap() argument
396 event->throttle.time = bswap_64(event->throttle.time); perf_event__throttle_swap()
397 event->throttle.id = bswap_64(event->throttle.id); perf_event__throttle_swap()
398 event->throttle.stream_id = bswap_64(event->throttle.stream_id); perf_event__throttle_swap()
401 swap_sample_id_all(event, &event->throttle + 1); perf_event__throttle_swap()
456 static void perf_event__hdr_attr_swap(union perf_event *event, perf_event__hdr_attr_swap() argument
461 perf_event__attr_swap(&event->attr.attr); perf_event__hdr_attr_swap()
463 size = event->header.size; perf_event__hdr_attr_swap()
464 size -= (void *)&event->attr.id - (void *)event; perf_event__hdr_attr_swap()
465 mem_bswap_64(event->attr.id, size); perf_event__hdr_attr_swap()
468 static void perf_event__event_type_swap(union perf_event *event, perf_event__event_type_swap() argument
471 event->event_type.event_type.event_id = perf_event__event_type_swap()
472 bswap_64(event->event_type.event_type.event_id); perf_event__event_type_swap()
475 static void perf_event__tracing_data_swap(union perf_event *event, perf_event__tracing_data_swap() argument
478 event->tracing_data.size = bswap_32(event->tracing_data.size); perf_event__tracing_data_swap()
481 typedef void (*perf_event__swap_op)(union perf_event *event,
505 * event.
543 union perf_event *event __maybe_unused, process_finished_round()
549 int perf_session__queue_event(struct perf_session *s, union perf_event *event, perf_session__queue_event() argument
552 return ordered_events__queue(&s->ordered_events, event, sample, file_offset); perf_session__queue_event()
691 union perf_event *event, perf_evlist__print_tstamp()
696 if (event->header.type != PERF_RECORD_SAMPLE && perf_evlist__print_tstamp()
739 static void dump_event(struct perf_evlist *evlist, union perf_event *event, dump_event() argument
745 printf("\n%#" PRIx64 " [%#x]: event: %d\n", dump_event()
746 file_offset, event->header.size, event->header.type); dump_event()
748 trace_event(event); dump_event()
751 perf_evlist__print_tstamp(evlist, event, sample); dump_event()
754 event->header.size, perf_event__name(event->header.type)); dump_event()
757 static void dump_sample(struct perf_evsel *evsel, union perf_event *event, dump_sample() argument
766 event->header.misc, sample->pid, sample->tid, sample->ip, dump_sample()
800 union perf_event *event, machines__find_for_cpumode()
803 const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; machines__find_for_cpumode()
811 if (event->header.type == PERF_RECORD_MMAP machines__find_for_cpumode()
812 || event->header.type == PERF_RECORD_MMAP2) machines__find_for_cpumode()
813 pid = event->mmap.pid; machines__find_for_cpumode()
828 union perf_event *event, deliver_sample_value()
846 return tool->sample(tool, event, sample, sid->evsel, machine); deliver_sample_value()
851 union perf_event *event, deliver_sample_group()
859 ret = deliver_sample_value(evlist, tool, event, sample, deliver_sample_group()
872 union perf_event *event, perf_evlist__deliver_sample()
883 return tool->sample(tool, event, sample, evsel, machine); perf_evlist__deliver_sample()
887 return deliver_sample_group(evlist, tool, event, sample, perf_evlist__deliver_sample()
890 return deliver_sample_value(evlist, tool, event, sample, perf_evlist__deliver_sample()
896 union perf_event *event, machines__deliver_event()
903 dump_event(evlist, event, file_offset, sample); machines__deliver_event()
907 machine = machines__find_for_cpumode(machines, event, sample); machines__deliver_event()
909 switch (event->header.type) { machines__deliver_event()
911 dump_sample(evsel, event, sample); machines__deliver_event()
920 return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine); machines__deliver_event()
922 return tool->mmap(tool, event, sample, machine); machines__deliver_event()
924 return tool->mmap2(tool, event, sample, machine); machines__deliver_event()
926 return tool->comm(tool, event, sample, machine); machines__deliver_event()
928 return tool->fork(tool, event, sample, machine); machines__deliver_event()
930 return tool->exit(tool, event, sample, machine); machines__deliver_event()
933 evlist->stats.total_lost += event->lost.lost; machines__deliver_event()
934 return tool->lost(tool, event, sample, machine); machines__deliver_event()
936 return tool->read(tool, event, sample, evsel, machine); machines__deliver_event()
938 return tool->throttle(tool, event, sample, machine); machines__deliver_event()
940 return tool->unthrottle(tool, event, sample, machine); machines__deliver_event()
948 union perf_event *event, perf_session__process_user_event()
956 dump_event(session->evlist, event, file_offset, NULL); perf_session__process_user_event()
959 switch (event->header.type) { perf_session__process_user_event()
961 err = tool->attr(tool, event, &session->evlist); perf_session__process_user_event()
976 return tool->tracing_data(tool, event, session); perf_session__process_user_event()
978 return tool->build_id(tool, event, session); perf_session__process_user_event()
980 return tool->finished_round(tool, event, oe); perf_session__process_user_event()
982 return tool->id_index(tool, event, session); perf_session__process_user_event()
989 union perf_event *event, perf_session__deliver_synth_event()
995 events_stats__inc(&evlist->stats, event->header.type); perf_session__deliver_synth_event()
997 if (event->header.type >= PERF_RECORD_USER_TYPE_START) perf_session__deliver_synth_event()
998 return perf_session__process_user_event(session, event, 0); perf_session__deliver_synth_event()
1000 return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0); perf_session__deliver_synth_event()
1003 static void event_swap(union perf_event *event, bool sample_id_all) event_swap() argument
1007 swap = perf_event__swap_ops[event->header.type]; event_swap()
1009 swap(event, sample_id_all); event_swap()
1017 union perf_event *event; perf_session__peek_event() local
1022 event = file_offset - session->one_mmap_offset + perf_session__peek_event()
1040 event = (union perf_event *)buf; perf_session__peek_event()
1043 perf_event_header__bswap(&event->header); perf_session__peek_event()
1045 if (event->header.size < hdr_sz) perf_session__peek_event()
1048 rest = event->header.size - hdr_sz; perf_session__peek_event()
1054 event_swap(event, perf_evlist__sample_id_all(session->evlist)); perf_session__peek_event()
1058 if (sample && event->header.type < PERF_RECORD_USER_TYPE_START && perf_session__peek_event()
1059 perf_evlist__parse_sample(session->evlist, event, sample)) perf_session__peek_event()
1062 *event_ptr = event; perf_session__peek_event()
1068 union perf_event *event, u64 file_offset) perf_session__process_event()
1076 event_swap(event, perf_evlist__sample_id_all(evlist)); perf_session__process_event()
1078 if (event->header.type >= PERF_RECORD_HEADER_MAX) perf_session__process_event()
1081 events_stats__inc(&evlist->stats, event->header.type); perf_session__process_event()
1083 if (event->header.type >= PERF_RECORD_USER_TYPE_START) perf_session__process_event()
1084 return perf_session__process_user_event(session, event, file_offset); perf_session__process_event()
1089 ret = perf_evlist__parse_sample(evlist, event, &sample); perf_session__process_event()
1094 ret = perf_session__queue_event(session, event, &sample, file_offset); perf_session__process_event()
1099 return machines__deliver_event(&session->machines, evlist, event, perf_session__process_event()
1180 union perf_event *event; __perf_session__process_pipe_events() local
1197 event = buf; __perf_session__process_pipe_events()
1198 err = readn(fd, event, sizeof(struct perf_event_header)); __perf_session__process_pipe_events()
1203 pr_err("failed to read event header\n"); __perf_session__process_pipe_events()
1208 perf_event_header__bswap(&event->header); __perf_session__process_pipe_events()
1210 size = event->header.size; __perf_session__process_pipe_events()
1212 pr_err("bad event header size\n"); __perf_session__process_pipe_events()
1219 pr_err("failed to allocate memory to read event\n"); __perf_session__process_pipe_events()
1224 event = buf; __perf_session__process_pipe_events()
1226 p = event; __perf_session__process_pipe_events()
1233 pr_err("unexpected end of event stream\n"); __perf_session__process_pipe_events()
1237 pr_err("failed to read event data\n"); __perf_session__process_pipe_events()
1242 if ((skip = perf_session__process_event(session, event, head)) < 0) { __perf_session__process_pipe_events()
1244 head, event->header.size, event->header.type); __perf_session__process_pipe_events()
1270 union perf_event *event; fetch_mmaped_event() local
1274 * the size of the event in the headers. fetch_mmaped_event()
1276 if (head + sizeof(event->header) > mmap_size) fetch_mmaped_event()
1279 event = (union perf_event *)(buf + head); fetch_mmaped_event()
1282 perf_event_header__bswap(&event->header); fetch_mmaped_event()
1284 if (head + event->header.size > mmap_size) { fetch_mmaped_event()
1285 /* We're not fetching the event so swap back again */ fetch_mmaped_event()
1287 perf_event_header__bswap(&event->header); fetch_mmaped_event()
1291 return event; fetch_mmaped_event()
1317 union perf_event *event; __perf_session__process_events() local
1364 event = fetch_mmaped_event(session, head, mmap_size, buf); __perf_session__process_events()
1365 if (!event) { __perf_session__process_events()
1377 size = event->header.size; __perf_session__process_events()
1380 (skip = perf_session__process_event(session, event, file_pos)) < 0) { __perf_session__process_events()
1382 file_offset + head, event->header.size, __perf_session__process_events()
1383 event->header.type); __perf_session__process_events()
1694 * Adding a handler for an event not in the session, __perf_session__set_tracepoints_handlers()
1713 union perf_event *event, perf_event__process_id_index()
1717 struct id_index_event *ie = &event->id_index; perf_event__process_id_index()
95 ordered_events__deliver_event(struct ordered_events *oe, struct ordered_event *event) ordered_events__deliver_event() argument
690 perf_evlist__print_tstamp(struct perf_evlist *evlist, union perf_event *event, struct perf_sample *sample) perf_evlist__print_tstamp() argument
799 machines__find_for_cpumode(struct machines *machines, union perf_event *event, struct perf_sample *sample) machines__find_for_cpumode() argument
826 deliver_sample_value(struct perf_evlist *evlist, struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct sample_read_value *v, struct machine *machine) deliver_sample_value() argument
849 deliver_sample_group(struct perf_evlist *evlist, struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct machine *machine) deliver_sample_group() argument
870 perf_evlist__deliver_sample(struct perf_evlist *evlist, struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine) perf_evlist__deliver_sample() argument
894 machines__deliver_event(struct machines *machines, struct perf_evlist *evlist, union perf_event *event, struct perf_sample *sample, struct perf_tool *tool, u64 file_offset) machines__deliver_event() argument
947 perf_session__process_user_event(struct perf_session *session, union perf_event *event, u64 file_offset) perf_session__process_user_event() argument
988 perf_session__deliver_synth_event(struct perf_session *session, union perf_event *event, struct perf_sample *sample) perf_session__deliver_synth_event() argument
1067 perf_session__process_event(struct perf_session *session, union perf_event *event, u64 file_offset) perf_session__process_event() argument
1712 perf_event__process_id_index(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_session *session) perf_event__process_id_index() argument
H A Dordered-events.c32 * last event might point to some random place in the list as it's queue_event()
33 * the last queued event. We expect that the new event is close to queue_event()
61 union perf_event *event) __dup_event()
66 new_event = memdup(event, event->header.size); __dup_event()
68 oe->cur_alloc_size += event->header.size; __dup_event()
75 union perf_event *event) dup_event()
77 return oe->copy_on_queue ? __dup_event(oe, event) : event; dup_event()
80 static void free_dup_event(struct ordered_events *oe, union perf_event *event) free_dup_event() argument
83 oe->cur_alloc_size -= event->header.size; free_dup_event()
84 free(event); free_dup_event()
90 union perf_event *event) alloc_event()
96 new_event = dup_event(oe, event); alloc_event()
129 new->event = new_event; alloc_event()
135 union perf_event *event) ordered_events__new_event()
139 new = alloc_event(oe, event); ordered_events__new_event()
148 void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event) ordered_events__delete() argument
150 list_move(&event->list, &oe->cache); ordered_events__delete()
152 free_dup_event(oe, event->event); ordered_events__delete()
155 int ordered_events__queue(struct ordered_events *oe, union perf_event *event, ordered_events__queue() argument
165 pr_oe_time(timestamp, "out of order event\n"); ordered_events__queue()
172 oevent = ordered_events__new_event(oe, timestamp, event); ordered_events__queue()
175 oevent = ordered_events__new_event(oe, timestamp, event); ordered_events__queue()
252 /* Warn if we are called before any event got allocated. */ ordered_events__flush()
300 struct ordered_event *event; ordered_events__free() local
302 event = list_entry(oe->to_free.next, struct ordered_event, list); ordered_events__free()
303 list_del(&event->list); ordered_events__free()
304 free_dup_event(oe, event->event); ordered_events__free()
305 free(event); ordered_events__free()
60 __dup_event(struct ordered_events *oe, union perf_event *event) __dup_event() argument
74 dup_event(struct ordered_events *oe, union perf_event *event) dup_event() argument
89 alloc_event(struct ordered_events *oe, union perf_event *event) alloc_event() argument
134 ordered_events__new_event(struct ordered_events *oe, u64 timestamp, union perf_event *event) ordered_events__new_event() argument
H A Dtool.h15 typedef int (*event_sample)(struct perf_tool *tool, union perf_event *event,
19 typedef int (*event_op)(struct perf_tool *tool, union perf_event *event,
23 union perf_event *event,
26 typedef int (*event_op2)(struct perf_tool *tool, union perf_event *event,
29 typedef int (*event_oe)(struct perf_tool *tool, union perf_event *event,
H A Dpython.c7 #include "event.h"
40 offsetof(struct pyrf_event, event) + offsetof(struct type, member), \
51 union perf_event event; member in struct:pyrf_event
55 sample_member_def(sample_ip, ip, T_ULONGLONG, "event type"), \
56 sample_member_def(sample_pid, pid, T_INT, "event pid"), \
57 sample_member_def(sample_tid, tid, T_INT, "event tid"), \
58 sample_member_def(sample_time, time, T_ULONGLONG, "event timestamp"), \
59 sample_member_def(sample_addr, addr, T_ULONGLONG, "event addr"), \
60 sample_member_def(sample_id, id, T_ULONGLONG, "event id"), \
61 sample_member_def(sample_stream_id, stream_id, T_ULONGLONG, "event stream id"), \
62 sample_member_def(sample_period, period, T_ULONGLONG, "event period"), \
63 sample_member_def(sample_cpu, cpu, T_UINT, "event cpu"),
65 static char pyrf_mmap_event__doc[] = PyDoc_STR("perf mmap event object.");
69 member_def(perf_event_header, type, T_UINT, "event type"),
70 member_def(mmap_event, pid, T_UINT, "event pid"),
71 member_def(mmap_event, tid, T_UINT, "event tid"),
87 pevent->event.mmap.pid, pevent->event.mmap.tid, pyrf_mmap_event__repr()
88 pevent->event.mmap.start, pevent->event.mmap.len, pyrf_mmap_event__repr()
89 pevent->event.mmap.pgoff, pevent->event.mmap.filename) < 0) { pyrf_mmap_event__repr()
108 static char pyrf_task_event__doc[] = PyDoc_STR("perf task (fork/exit) event object.");
112 member_def(perf_event_header, type, T_UINT, "event type"),
113 member_def(fork_event, pid, T_UINT, "event pid"),
114 member_def(fork_event, ppid, T_UINT, "event ppid"),
115 member_def(fork_event, tid, T_UINT, "event tid"),
116 member_def(fork_event, ptid, T_UINT, "event ptid"),
125 pevent->event.header.type == PERF_RECORD_FORK ? "fork" : "exit", pyrf_task_event__repr()
126 pevent->event.fork.pid, pyrf_task_event__repr()
127 pevent->event.fork.ppid, pyrf_task_event__repr()
128 pevent->event.fork.tid, pyrf_task_event__repr()
129 pevent->event.fork.ptid, pyrf_task_event__repr()
130 pevent->event.fork.time); pyrf_task_event__repr()
143 static char pyrf_comm_event__doc[] = PyDoc_STR("perf comm event object.");
147 member_def(perf_event_header, type, T_UINT, "event type"),
148 member_def(comm_event, pid, T_UINT, "event pid"),
149 member_def(comm_event, tid, T_UINT, "event tid"),
157 pevent->event.comm.pid, pyrf_comm_event__repr()
158 pevent->event.comm.tid, pyrf_comm_event__repr()
159 pevent->event.comm.comm); pyrf_comm_event__repr()
172 static char pyrf_throttle_event__doc[] = PyDoc_STR("perf throttle event object.");
176 member_def(perf_event_header, type, T_UINT, "event type"),
178 member_def(throttle_event, id, T_ULONGLONG, "event id"),
179 member_def(throttle_event, stream_id, T_ULONGLONG, "event stream id"),
185 struct throttle_event *te = (struct throttle_event *)(&pevent->event.header + 1); pyrf_throttle_event__repr()
189 pevent->event.header.type == PERF_RECORD_THROTTLE ? "" : "un", pyrf_throttle_event__repr()
203 static char pyrf_lost_event__doc[] = PyDoc_STR("perf lost event object.");
207 member_def(lost_event, id, T_ULONGLONG, "event id"),
219 pevent->event.lost.id, pevent->event.lost.lost) < 0) { pyrf_lost_event__repr()
238 static char pyrf_read_event__doc[] = PyDoc_STR("perf read event object.");
242 member_def(read_event, pid, T_UINT, "event pid"),
243 member_def(read_event, tid, T_UINT, "event tid"),
250 pevent->event.read.pid, pyrf_read_event__repr()
251 pevent->event.read.tid); pyrf_read_event__repr()
268 static char pyrf_sample_event__doc[] = PyDoc_STR("perf sample event object.");
272 member_def(perf_event_header, type, T_UINT, "event type"),
347 static PyObject *pyrf_event__new(union perf_event *event) pyrf_event__new() argument
352 if (event->header.type < PERF_RECORD_MMAP || pyrf_event__new()
353 event->header.type > PERF_RECORD_SAMPLE) pyrf_event__new()
356 ptype = pyrf_event__type[event->header.type]; pyrf_event__new()
359 memcpy(&pevent->event, event, event->header.size); pyrf_event__new()
657 .ml_doc = PyDoc_STR("open the event selector file descriptor table.")
662 static char pyrf_evsel__doc[] = PyDoc_STR("perf event selector list object.");
804 union perf_event *event; pyrf_evlist__read_on_cpu() local
813 event = perf_evlist__mmap_read(evlist, cpu); pyrf_evlist__read_on_cpu()
814 if (event != NULL) { pyrf_evlist__read_on_cpu()
815 PyObject *pyevent = pyrf_event__new(event); pyrf_evlist__read_on_cpu()
823 err = perf_evlist__parse_sample(evlist, event, &pevent->sample); pyrf_evlist__read_on_cpu()
885 .ml_doc = PyDoc_STR("adds an event selector to the list.")
891 .ml_doc = PyDoc_STR("reads an event.")
924 static char pyrf_evlist__doc[] = PyDoc_STR("perf event selector list object.");
H A Dordered-events.h11 union perf_event *event; member in struct:ordered_event
25 struct ordered_event *event);
46 int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
48 void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event);
H A Dunwind-libdw.h5 #include "event.h"
H A Dtrace-event.h4 #include <traceevent/event-parse.h>
26 void event_format__fprintf(struct event_format *event,
29 void event_format__print(struct event_format *event,
37 raw_field_value(struct event_format *event, const char *name, void *data);
45 struct event_format *event);
46 unsigned long long read_size(struct event_format *event, void *ptr, int size);
72 void (*process_event) (union perf_event *event,
H A Dtrace-event.c10 #include <traceevent/event-parse.h>
11 #include "trace-event.h"
45 struct event_format *event = NULL; tp_format() local
56 pevent_parse_format(pevent, &event, data, size, sys); tp_format()
59 return event; tp_format()
H A Devent.h223 * when possible sends this number in a PERF_RECORD_LOST event. The number of
306 union perf_event *event,
326 union perf_event *event,
330 union perf_event *event,
334 union perf_event *event,
338 union perf_event *event,
342 union perf_event *event,
346 union perf_event *event,
350 union perf_event *event,
356 int perf_event__preprocess_sample(const union perf_event *event,
365 void perf_event__preprocess_sample_addr(union perf_event *event,
374 int perf_event__synthesize_sample(union perf_event *event, u64 type,
380 union perf_event *event,
386 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp);
387 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp);
388 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp);
389 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp);
390 size_t perf_event__fprintf(union perf_event *event, FILE *fp);
/linux-4.1.27/drivers/net/wireless/brcm80211/brcmfmac/
H A Dfweh.c60 * struct brcmf_event - contents of broadcom event packet.
64 * @msg: common part of the actual event message.
73 * struct brcmf_fweh_queue_item - event item on event queue.
76 * @code: event code.
77 * @ifidx: interface index related to this event.
79 * @emsg: common parameters of the firmware event message.
80 * @data: event specific data part of the firmware event.
103 /* array for mapping code to event name */
110 * brcmf_fweh_event_name() - returns name for given event code.
131 * brcmf_fweh_queue_event() - create and queue event.
133 * @fweh: firmware event handling info.
134 * @event: event queue entry.
137 struct brcmf_fweh_queue_item *event) brcmf_fweh_queue_event()
142 list_add_tail(&event->q, &fweh->event_q); brcmf_fweh_queue_event()
158 /* handle the event if valid interface and handler */ brcmf_fweh_call_event_handler()
162 brcmf_err("unhandled event %d ignored\n", code); brcmf_fweh_call_event_handler()
170 * brcmf_fweh_handle_if_event() - handle IF event.
188 /* The P2P Device interface event must not be ignored brcmf_fweh_handle_if_event()
195 brcmf_dbg(EVENT, "event can be ignored\n"); brcmf_fweh_handle_if_event()
229 * brcmf_fweh_dequeue_event() - get event from the queue.
231 * @fweh: firmware event handling info.
236 struct brcmf_fweh_queue_item *event = NULL; brcmf_fweh_dequeue_event() local
241 event = list_first_entry(&fweh->event_q, brcmf_fweh_dequeue_event()
243 list_del(&event->q); brcmf_fweh_dequeue_event()
247 return event; brcmf_fweh_dequeue_event()
251 * brcmf_fweh_event_worker() - firmware event worker.
260 struct brcmf_fweh_queue_item *event; brcmf_fweh_event_worker() local
268 while ((event = brcmf_fweh_dequeue_event(fweh))) { brcmf_fweh_event_worker()
269 brcmf_dbg(EVENT, "event %s (%u) ifidx %u bsscfg %u addr %pM\n", brcmf_fweh_event_worker()
270 brcmf_fweh_event_name(event->code), event->code, brcmf_fweh_event_worker()
271 event->emsg.ifidx, event->emsg.bsscfgidx, brcmf_fweh_event_worker()
272 event->emsg.addr); brcmf_fweh_event_worker()
274 /* convert event message */ brcmf_fweh_event_worker()
275 emsg_be = &event->emsg; brcmf_fweh_event_worker()
278 emsg.event_code = event->code; brcmf_fweh_event_worker()
290 brcmf_dbg_hex_dump(BRCMF_EVENT_ON(), event->data, brcmf_fweh_event_worker()
292 "event payload, len=%d\n", emsg.datalen); brcmf_fweh_event_worker()
294 /* special handling of interface event */ brcmf_fweh_event_worker()
295 if (event->code == BRCMF_E_IF) { brcmf_fweh_event_worker()
296 brcmf_fweh_handle_if_event(drvr, &emsg, event->data); brcmf_fweh_event_worker()
300 if ((event->code == BRCMF_E_TDLS_PEER_EVENT) && brcmf_fweh_event_worker()
305 err = brcmf_fweh_call_event_handler(ifp, event->code, &emsg, brcmf_fweh_event_worker()
306 event->data); brcmf_fweh_event_worker()
308 brcmf_err("event handler failed (%d)\n", brcmf_fweh_event_worker()
309 event->code); brcmf_fweh_event_worker()
313 kfree(event); brcmf_fweh_event_worker()
318 * brcmf_fweh_attach() - initialize firmware event handling.
331 * brcmf_fweh_detach() - cleanup firmware event handling.
355 * brcmf_fweh_register() - register handler for given event code.
358 * @code: event code.
359 * @handler: handler for the given event code.
365 brcmf_err("event code %d already registered\n", code); brcmf_fweh_register()
369 brcmf_dbg(TRACE, "event handler registered for %s\n", brcmf_fweh_register()
378 * @code: event code.
383 brcmf_dbg(TRACE, "event handler cleared for %s\n", brcmf_fweh_unregister()
400 brcmf_dbg(EVENT, "enable event %s\n", brcmf_fweh_activate_events()
406 /* want to handle IF event as well */ brcmf_fweh_activate_events()
407 brcmf_dbg(EVENT, "enable event IF\n"); brcmf_fweh_activate_events()
419 * brcmf_fweh_process_event() - process skb as firmware event.
422 * @event_packet: event packet to process.
424 * If the packet buffer contains a firmware event message it will
425 * dispatch the event to a registered handler (using worker).
432 struct brcmf_fweh_queue_item *event; brcmf_fweh_process_event() local
437 /* get event info */ brcmf_fweh_process_event()
451 event = kzalloc(sizeof(*event) + datalen, alloc_flag); brcmf_fweh_process_event()
452 if (!event) brcmf_fweh_process_event()
455 event->code = code; brcmf_fweh_process_event()
456 event->ifidx = event_packet->msg.ifidx; brcmf_fweh_process_event()
458 /* use memcpy to get aligned event message */ brcmf_fweh_process_event()
459 memcpy(&event->emsg, &event_packet->msg, sizeof(event->emsg)); brcmf_fweh_process_event()
460 memcpy(event->data, data, datalen); brcmf_fweh_process_event()
461 memcpy(event->ifaddr, event_packet->eth.h_dest, ETH_ALEN); brcmf_fweh_process_event()
463 brcmf_fweh_queue_event(fweh, event); brcmf_fweh_process_event()
136 brcmf_fweh_queue_event(struct brcmf_fweh_info *fweh, struct brcmf_fweh_queue_item *event) brcmf_fweh_queue_event() argument
/linux-4.1.27/drivers/md/
H A Ddm-uevent.c52 static void dm_uevent_free(struct dm_uevent *event) dm_uevent_free() argument
54 kmem_cache_free(_dm_event_cache, event); dm_uevent_free()
59 struct dm_uevent *event; dm_uevent_alloc() local
61 event = kmem_cache_zalloc(_dm_event_cache, GFP_ATOMIC); dm_uevent_alloc()
62 if (!event) dm_uevent_alloc()
65 INIT_LIST_HEAD(&event->elist); dm_uevent_alloc()
66 event->md = md; dm_uevent_alloc()
68 return event; dm_uevent_alloc()
78 struct dm_uevent *event; dm_build_path_uevent() local
80 event = dm_uevent_alloc(md); dm_build_path_uevent()
81 if (!event) { dm_build_path_uevent()
86 event->action = action; dm_build_path_uevent()
88 if (add_uevent_var(&event->ku_env, "DM_TARGET=%s", ti->type->name)) { dm_build_path_uevent()
94 if (add_uevent_var(&event->ku_env, "DM_ACTION=%s", dm_action)) { dm_build_path_uevent()
100 if (add_uevent_var(&event->ku_env, "DM_SEQNUM=%u", dm_build_path_uevent()
107 if (add_uevent_var(&event->ku_env, "DM_PATH=%s", path)) { dm_build_path_uevent()
112 if (add_uevent_var(&event->ku_env, "DM_NR_VALID_PATHS=%d", dm_build_path_uevent()
119 return event; dm_build_path_uevent()
122 dm_uevent_free(event); dm_build_path_uevent()
131 * @kobj: kobject generating event
137 struct dm_uevent *event, *next; dm_send_uevents() local
139 list_for_each_entry_safe(event, next, events, elist) { list_for_each_entry_safe()
140 list_del_init(&event->elist); list_for_each_entry_safe()
146 if (dm_copy_name_and_uuid(event->md, event->name, list_for_each_entry_safe()
147 event->uuid)) { list_for_each_entry_safe()
153 if (add_uevent_var(&event->ku_env, "DM_NAME=%s", event->name)) { list_for_each_entry_safe()
159 if (add_uevent_var(&event->ku_env, "DM_UUID=%s", event->uuid)) { list_for_each_entry_safe()
165 r = kobject_uevent_env(kobj, event->action, event->ku_env.envp); list_for_each_entry_safe()
169 dm_uevent_free(event); list_for_each_entry_safe()
175 * dm_path_uevent - called to create a new path event and queue it
177 * @event_type: path event type enum
187 struct dm_uevent *event; dm_path_uevent() local
194 event = dm_build_path_uevent(md, ti, dm_path_uevent()
198 if (IS_ERR(event)) dm_path_uevent()
201 dm_uevent_add(md, &event->elist); dm_path_uevent()
/linux-4.1.27/drivers/oprofile/
H A Dnmi_timer_int.c28 static void nmi_timer_callback(struct perf_event *event, nmi_timer_callback() argument
32 event->hw.interrupts = 0; /* don't throttle interrupts */ nmi_timer_callback()
38 struct perf_event *event = per_cpu(nmi_timer_events, cpu); nmi_timer_start_cpu() local
40 if (!event) { nmi_timer_start_cpu()
41 event = perf_event_create_kernel_counter(&nmi_timer_attr, cpu, NULL, nmi_timer_start_cpu()
43 if (IS_ERR(event)) nmi_timer_start_cpu()
44 return PTR_ERR(event); nmi_timer_start_cpu()
45 per_cpu(nmi_timer_events, cpu) = event; nmi_timer_start_cpu()
48 if (event && ctr_running) nmi_timer_start_cpu()
49 perf_event_enable(event); nmi_timer_start_cpu()
56 struct perf_event *event = per_cpu(nmi_timer_events, cpu); nmi_timer_stop_cpu() local
58 if (event && ctr_running) nmi_timer_stop_cpu()
59 perf_event_disable(event); nmi_timer_stop_cpu()
108 struct perf_event *event; nmi_timer_shutdown() local
114 event = per_cpu(nmi_timer_events, cpu); for_each_possible_cpu()
115 if (!event) for_each_possible_cpu()
117 perf_event_disable(event); for_each_possible_cpu()
119 perf_event_release_kernel(event); for_each_possible_cpu()
H A Doprofile_perf.c18 unsigned long event; member in struct:op_counter_config
35 static void op_overflow_handler(struct perf_event *event, op_overflow_handler() argument
42 if (per_cpu(perf_events, cpu)[id] == event) op_overflow_handler()
68 attr->config = counter_config[i].event; op_perf_setup()
74 static int op_create_counter(int cpu, int event) op_create_counter() argument
78 if (!counter_config[event].enabled || per_cpu(perf_events, cpu)[event]) op_create_counter()
81 pevent = perf_event_create_kernel_counter(&counter_config[event].attr, op_create_counter()
90 pr_warning("oprofile: failed to enable event %d " op_create_counter()
91 "on CPU %d\n", event, cpu); op_create_counter()
95 per_cpu(perf_events, cpu)[event] = pevent; op_create_counter()
100 static void op_destroy_counter(int cpu, int event) op_destroy_counter() argument
102 struct perf_event *pevent = per_cpu(perf_events, cpu)[event]; op_destroy_counter()
106 per_cpu(perf_events, cpu)[event] = NULL; op_destroy_counter()
116 int cpu, event, ret = 0; op_perf_start() local
119 for (event = 0; event < num_counters; ++event) { for_each_online_cpu()
120 ret = op_create_counter(cpu, event); for_each_online_cpu()
134 int cpu, event; op_perf_stop() local
137 for (event = 0; event < num_counters; ++event) op_perf_stop()
138 op_destroy_counter(cpu, event); op_perf_stop()
152 oprofilefs_create_ulong(dir, "event", &counter_config[i].event); oprofile_perf_create_files()
257 struct perf_event *event; oprofile_perf_exit() local
261 event = per_cpu(perf_events, cpu)[id]; for_each_possible_cpu()
262 if (event) for_each_possible_cpu()
263 perf_event_release_kernel(event); for_each_possible_cpu()
H A Dcpu_buffer.c11 * Each CPU has a local buffer that stores PC value/event
14 * event buffer by sync_buffer().
144 entry->event = ring_buffer_lock_reserve op_cpu_buffer_write_reserve()
147 if (!entry->event) op_cpu_buffer_write_reserve()
149 entry->sample = ring_buffer_event_data(entry->event); op_cpu_buffer_write_reserve()
158 return ring_buffer_unlock_commit(op_ring_buffer, entry->event); op_cpu_buffer_write_commit()
168 entry->event = e; op_cpu_buffer_read_entry()
224 sample->event = flags; op_add_code()
236 unsigned long pc, unsigned long event) op_add_sample()
246 sample->event = event; op_add_sample()
261 unsigned long backtrace, int is_kernel, unsigned long event, log_sample()
275 if (op_add_sample(cpu_buf, pc, event)) log_sample()
297 unsigned long event, int is_kernel, __oprofile_add_ext_sample()
305 * source of this event __oprofile_add_ext_sample()
307 if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event, task)) __oprofile_add_ext_sample()
320 unsigned long event, int is_kernel, oprofile_add_ext_hw_sample()
323 __oprofile_add_ext_sample(pc, regs, event, is_kernel, task); oprofile_add_ext_hw_sample()
327 unsigned long event, int is_kernel) oprofile_add_ext_sample()
329 __oprofile_add_ext_sample(pc, regs, event, is_kernel, NULL); oprofile_add_ext_sample()
332 void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) oprofile_add_sample() argument
345 __oprofile_add_ext_sample(pc, regs, event, is_kernel, NULL); oprofile_add_sample()
372 sample->event = 0; /* no flags */ oprofile_write_reserve()
380 entry->event = NULL; oprofile_write_reserve()
386 if (!entry->event) oprofile_add_data()
393 if (!entry->event) oprofile_add_data64()
408 if (!entry->event) oprofile_write_commit()
413 void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) oprofile_add_pc() argument
416 log_sample(cpu_buf, pc, 0, is_kernel, event, NULL); oprofile_add_pc()
235 op_add_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, unsigned long event) op_add_sample() argument
260 log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, unsigned long backtrace, int is_kernel, unsigned long event, struct task_struct *task) log_sample() argument
296 __oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, unsigned long event, int is_kernel, struct task_struct *task) __oprofile_add_ext_sample() argument
319 oprofile_add_ext_hw_sample(unsigned long pc, struct pt_regs * const regs, unsigned long event, int is_kernel, struct task_struct *task) oprofile_add_ext_hw_sample() argument
326 oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, unsigned long event, int is_kernel) oprofile_add_ext_sample() argument
H A Devent_buffer.h21 * Add data to the event buffer.
27 /* wake up the process sleeping on the event file */
/linux-4.1.27/arch/powerpc/platforms/pseries/
H A Dio_event_irq.c27 * IO event interrupt is a mechanism provided by RTAS to return
29 * drivers can register their event handlers to receive events.
32 * their event handlers. Since multiple IO event types and scopes
33 * share an IO event interrupt, the event handlers are called one
34 * by one until the IO event is claimed by one of the handlers.
35 * The event handlers are expected to return NOTIFY_OK if the
36 * event is handled by the event handler or NOTIFY_DONE if the
37 * event does not belong to the handler.
69 * Find the data portion of an IO Event section from event log.
70 * @elog: RTAS error/event log.
73 * pointer to a valid IO event section data. NULL if not found.
79 /* We should only ever get called for io-event interrupts, but if ioei_find_event()
82 * RTAS_TYPE_IO only exists in extended event log version 6 or later. ioei_find_event()
83 * No need to check event log version. ioei_find_event()
86 printk_once(KERN_WARNING"io_event_irq: Unexpected event type %d", ioei_find_event()
93 printk_once(KERN_WARNING "io_event_irq: RTAS extended event " ioei_find_event()
103 * - check-exception returns the first found error or event and clear that
104 * error or event so it is reported once.
105 * - Each interrupt returns one event. If a plateform chooses to report
113 * - The owner of an event is determined by combinations of scope,
114 * event type, and sub-type. There is no easy way to pre-sort clients
115 * by scope or event type alone. For example, Torrent ISR route change
116 * event is reported with scope 0x00 (Not Applicatable) rather than
118 * who owns the event.
123 struct pseries_io_event *event; ioei_interrupt() local
136 event = ioei_find_event((struct rtas_error_log *)ioei_rtas_buf); ioei_interrupt()
137 if (!event) ioei_interrupt()
141 0, event); ioei_interrupt()
154 np = of_find_node_by_path("/event-sources/ibm,io-events"); ioei_init()
157 pr_info("IBM I/O event interrupts enabled\n"); ioei_init()
/linux-4.1.27/fs/notify/
H A Dnotification.c22 * the event happened. When inotify gets an event it will need to add that
23 * event to the group notify queue. Since a single event might need to be on
24 * multiple group's notification queues we can't add the event directly to each
26 * has a pointer back to the original event. Since the majority of events are
28 * event_holder into each event. This means we have a single allocation instead
71 struct fsnotify_event *event) fsnotify_destroy_event()
74 if (!event || event->mask == FS_Q_OVERFLOW) fsnotify_destroy_event()
76 /* If the event is still queued, we have a problem... */ fsnotify_destroy_event()
77 WARN_ON(!list_empty(&event->list)); fsnotify_destroy_event()
78 group->ops->free_event(event); fsnotify_destroy_event()
82 * Add an event to the group notification queue. The group can later pull this
83 * event off the queue to deal with. The function returns 0 if the event was
84 * added to the queue, 1 if the event was merged with some other queued event,
88 struct fsnotify_event *event, fsnotify_add_event()
95 pr_debug("%s: group=%p event=%p\n", __func__, group, event); fsnotify_add_event()
101 /* Queue overflow event only if it isn't already queued */ fsnotify_add_event()
106 event = group->overflow_event; fsnotify_add_event()
111 ret = merge(list, event); fsnotify_add_event()
120 list_add_tail(&event->list, list); fsnotify_add_event()
129 * Remove @event from group's notification queue. It is the responsibility of
130 * the caller to destroy the event.
133 struct fsnotify_event *event) fsnotify_remove_event()
136 if (!list_empty(&event->list)) { fsnotify_remove_event()
137 list_del_init(&event->list); fsnotify_remove_event()
144 * Remove and return the first event from the notification list. It is the
145 * responsibility of the caller to destroy the obtained event
149 struct fsnotify_event *event; fsnotify_remove_first_event() local
155 event = list_first_entry(&group->notification_list, fsnotify_remove_first_event()
158 * We need to init list head for the case of overflow event so that fsnotify_remove_first_event()
161 list_del_init(&event->list); fsnotify_remove_first_event()
164 return event; fsnotify_remove_first_event()
168 * This will not remove the event, that must be done with
181 * event notifications.
185 struct fsnotify_event *event; fsnotify_flush_notify() local
189 event = fsnotify_remove_first_event(group); fsnotify_flush_notify()
190 fsnotify_destroy_event(group, event); fsnotify_flush_notify()
196 * fsnotify_create_event - Allocate a new event which will be sent to each
198 * particular event.
200 * @inode the inode which is supposed to receive the event (sometimes a
201 * parent of the inode to which the event happened.
207 void fsnotify_init_event(struct fsnotify_event *event, struct inode *inode, fsnotify_init_event() argument
210 INIT_LIST_HEAD(&event->list); fsnotify_init_event()
211 event->inode = inode; fsnotify_init_event()
212 event->mask = mask; fsnotify_init_event()
70 fsnotify_destroy_event(struct fsnotify_group *group, struct fsnotify_event *event) fsnotify_destroy_event() argument
87 fsnotify_add_event(struct fsnotify_group *group, struct fsnotify_event *event, int (*merge)(struct list_head *, struct fsnotify_event *)) fsnotify_add_event() argument
132 fsnotify_remove_event(struct fsnotify_group *group, struct fsnotify_event *event) fsnotify_remove_event() argument
/linux-4.1.27/include/linux/
H A Di2c-pxa.h12 void (*event)(void *ptr, i2c_slave_event_t event); member in struct:i2c_slave_client
H A Ddevfreq-event.h2 * devfreq-event: a framework to provide raw data and events of devfreq devices
18 * struct devfreq_event_dev - the devfreq-event device
20 * @node : Contain the devfreq-event device that have been registered.
21 * @dev : the device registered by devfreq-event class. dev.parent is
22 * the device using devfreq-event.
23 * @lock : a mutex to protect accessing devfreq-event.
25 * @desc : the description for devfreq-event device.
27 * This structure contains devfreq-event device information.
40 * struct devfreq_event_data - the devfreq-event data
42 * @load_count : load count of devfreq-event device for the given period.
43 * @total_count : total count of devfreq-event device for the given period.
48 * This structure contains the data of devfreq-event device for polling period.
56 * struct devfreq_event_ops - the operations of devfreq-event device
58 * @enable : Enable the devfreq-event device.
59 * @disable : Disable the devfreq-event device.
60 * @reset : Reset all setting of the devfreq-event device.
61 * @set_event : Set the specific event type for the devfreq-event device.
62 * @get_event : Get the result of the devfreq-event devie with specific
63 * event type.
65 * This structure contains devfreq-event device operations which can be
66 * implemented by devfreq-event device drivers.
81 * struct devfreq_event_desc - the descriptor of devfreq-event device
83 * @name : the name of devfreq-event device.
84 * @driver_data : the private data for devfreq-event driver.
85 * @ops : the operation to control devfreq-event device.
87 * Each devfreq-event device is described with a this structure.
88 * This structure contains the various data for devfreq-event device.
H A Dftrace_event.h56 struct trace_event *event);
119 int flags, struct trace_event *event);
135 extern int register_ftrace_event(struct trace_event *event);
136 extern int unregister_ftrace_event(struct trace_event *event);
172 struct ring_buffer_event *event,
175 struct ring_buffer_event *event,
178 struct ring_buffer_event *event,
182 struct ring_buffer_event *event);
211 int (*reg)(struct ftrace_event_call *event,
219 extern int ftrace_event_reg(struct ftrace_event_call *event,
222 int ftrace_output_event(struct trace_iterator *iter, struct ftrace_event_call *event,
231 struct ring_buffer_event *event; member in struct:ftrace_event_buffer
261 * FILTERED - The event has a filter attached
265 * WAS_ENABLED - Set and stays set when an event was ever enabled
266 * (used for module unloading, if a module event is enabled,
291 struct trace_event event; member in struct:ftrace_event_call
300 * bit 3: ftrace internal event (do not enable)
341 * Ftrace event file flags:
342 * ENABLED - The event is enabled
344 * FILTERED - The event has a filter attached
346 * SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED
347 * SOFT_DISABLED - When set, do not trace the event (even though its
349 * TRIGGER_MODE - When set, invoke the triggers associated with the event
430 struct ring_buffer_event *event);
433 struct ring_buffer_event *event);
441 * @file: The file pointer of the event to test
443 * If any triggers without filters are attached to this event, they
444 * will be called here. If the event is soft disabled and has no
464 * If there are event triggers attached to this event that requires
466 * entry already holds the field information of the current event.
468 * It also checks if the event should be discarded or not.
469 * It is to be discarded if the event is soft disabled and the
470 * event was only recorded to process triggers, or if the event
471 * filter is active and this event did not match the filters.
473 * Returns true if the event is discarded, false otherwise.
478 struct ring_buffer_event *event, __event_trigger_test_discard()
488 ring_buffer_discard_commit(buffer, event); __event_trigger_test_discard()
489 else if (!filter_check_discard(file, entry, buffer, event)) __event_trigger_test_discard()
496 * event_trigger_unlock_commit - handle triggers and finish event commit
497 * @file: The file pointer assoctiated to the event
498 * @buffer: The ring buffer that the event is being written to
499 * @event: The event meta data in the ring buffer
500 * @entry: The event itself
501 * @irq_flags: The state of the interrupts at the start of the event
502 * @pc: The state of the preempt count at the start of the event.
505 * from the event itself. It also tests the event against filters and
506 * if the event is soft disabled and should be discarded.
511 struct ring_buffer_event *event, event_trigger_unlock_commit()
516 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) event_trigger_unlock_commit()
517 trace_buffer_unlock_commit(buffer, event, irq_flags, pc); event_trigger_unlock_commit()
524 * event_trigger_unlock_commit_regs - handle triggers and finish event commit
525 * @file: The file pointer assoctiated to the event
526 * @buffer: The ring buffer that the event is being written to
527 * @event: The event meta data in the ring buffer
528 * @entry: The event itself
529 * @irq_flags: The state of the interrupts at the start of the event
530 * @pc: The state of the preempt count at the start of the event.
533 * from the event itself. It also tests the event against filters and
534 * if the event is soft disabled and should be discarded.
542 struct ring_buffer_event *event, event_trigger_unlock_commit_regs()
548 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) event_trigger_unlock_commit_regs()
549 trace_buffer_unlock_commit_regs(buffer, event, event_trigger_unlock_commit_regs()
582 int trace_set_clr_event(const char *system, const char *event, int set);
608 extern int perf_trace_init(struct perf_event *event);
609 extern void perf_trace_destroy(struct perf_event *event);
610 extern int perf_trace_add(struct perf_event *event, int flags);
611 extern void perf_trace_del(struct perf_event *event, int flags);
612 extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
614 extern void ftrace_profile_free_filter(struct perf_event *event);
476 __event_trigger_test_discard(struct ftrace_event_file *file, struct ring_buffer *buffer, struct ring_buffer_event *event, void *entry, enum event_trigger_type *tt) __event_trigger_test_discard() argument
509 event_trigger_unlock_commit(struct ftrace_event_file *file, struct ring_buffer *buffer, struct ring_buffer_event *event, void *entry, unsigned long irq_flags, int pc) event_trigger_unlock_commit() argument
540 event_trigger_unlock_commit_regs(struct ftrace_event_file *file, struct ring_buffer *buffer, struct ring_buffer_event *event, void *entry, unsigned long irq_flags, int pc, struct pt_regs *regs) event_trigger_unlock_commit_regs() argument
H A Dpps_kernel.h43 int event, void *data); /* PPS echo function */
62 __u32 assert_sequence; /* PPS' assert event seq # */
63 __u32 clear_sequence; /* PPS' clear event seq # */
66 int current_mode; /* PPS mode at event time */
68 unsigned int last_ev; /* last PPS event id */
69 wait_queue_head_t queue; /* PPS event queue */
103 struct pps_event_time *ts, int event, void *data);
130 /* Subtract known time delay from PPS event time(s) */ pps_sub_ts()
H A Dtca6416_keypad.h20 int code; /* input event code (KEY_*, SW_*) */
22 int type; /* input event type (EV_KEY, EV_SW) */
H A Dperf_event.h86 * extra PMU register associated with an event
96 * struct hw_perf_event - performance event hardware details:
136 * creation and event initalization.
161 #define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */
212 * Try and initialize the event for this PMU.
213 * Should return -ENOENT when the @event doesn't match this PMU.
215 int (*event_init) (struct perf_event *event);
218 * Notification that the event was mapped or unmapped. Called
221 void (*event_mapped) (struct perf_event *event); /*optional*/
222 void (*event_unmapped) (struct perf_event *event); /*optional*/
232 int (*add) (struct perf_event *event, int flags);
233 void (*del) (struct perf_event *event, int flags);
240 void (*start) (struct perf_event *event, int flags);
241 void (*stop) (struct perf_event *event, int flags);
244 * Updates the counter value of the event.
246 void (*read) (struct perf_event *event);
271 * Will return the value for perf_event_mmap_page::index for this event,
272 * if no implementation is provided it will default to: event->hw.idx + 1.
274 int (*event_idx) (struct perf_event *event); /*optional */
290 u64 (*count) (struct perf_event *event); /*optional*/
306 * enum perf_event_active_state - the states of a event
344 * struct perf_event - performance event kernel representation:
386 * These are the total time in nanoseconds that the event
388 * been scheduled in, if this is a per-task event)
392 * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
399 * and total_time_running when the event is in INACTIVE or
402 * tstamp_enabled: the notional time when the event was enabled
403 * tstamp_running: the notional time when the event was scheduled on
405 * event was scheduled off.
414 * context time as it was when the event was last scheduled in.
490 struct perf_cgroup *cgrp; /* cgroup event is attach to */
498 * struct perf_event_context - event context structure
552 * Number of contexts where an event can trigger:
558 * struct perf_event_cpu_context - per cpu event context structure
572 struct perf_event *event; member in struct:perf_output_handle
602 * if there is no cgroup event for the current CPU context.
615 struct perf_event *event);
642 extern int perf_event_refresh(struct perf_event *event, int refresh);
643 extern void perf_event_update_userpage(struct perf_event *event);
644 extern int perf_event_release_kernel(struct perf_event *event);
653 extern u64 perf_event_read_value(struct perf_event *event,
723 struct perf_event *event);
726 struct perf_event *event,
729 extern int perf_event_overflow(struct perf_event *event,
733 static inline bool is_sampling_event(struct perf_event *event) is_sampling_event() argument
735 return event->attr.sample_period != 0; is_sampling_event()
739 * Return 1 for a software event, 0 for a hardware event
741 static inline int is_software_event(struct perf_event *event) is_software_event() argument
743 return event->pmu->task_ctx_nr == perf_sw_context; is_software_event()
813 static inline u64 __perf_event_count(struct perf_event *event) __perf_event_count() argument
815 return local64_read(&event->count) + atomic64_read(&event->child_count); __perf_event_count()
874 extern void perf_bp_event(struct perf_event *event, void *data);
882 static inline bool has_branch_stack(struct perf_event *event) has_branch_stack() argument
884 return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK; has_branch_stack()
887 static inline bool needs_branch_stack(struct perf_event *event) needs_branch_stack() argument
889 return event->attr.branch_sample_type != 0; needs_branch_stack()
892 static inline bool has_aux(struct perf_event *event) has_aux() argument
894 return event->pmu->setup_aux; has_aux()
898 struct perf_event *event, unsigned int size);
906 extern u64 perf_swevent_set_period(struct perf_event *event);
907 extern void perf_event_enable(struct perf_event *event);
908 extern void perf_event_disable(struct perf_event *event);
914 struct perf_event *event) { return NULL; }
936 static inline int perf_event_refresh(struct perf_event *event, int refresh) perf_event_refresh() argument
946 perf_bp_event(struct perf_event *event, void *data) { } perf_bp_event() argument
960 static inline u64 perf_swevent_set_period(struct perf_event *event) { return 0; } perf_event_enable() argument
961 static inline void perf_event_enable(struct perf_event *event) { } perf_event_disable() argument
962 static inline void perf_event_disable(struct perf_event *event) { } __perf_event_disable() argument
913 perf_aux_output_begin(struct perf_output_handle *handle, struct perf_event *event) perf_aux_output_begin() argument
/linux-4.1.27/tools/perf/tests/
H A Dkeep-tracking.c28 union perf_event *event; find_comm() local
33 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { find_comm()
34 if (event->header.type == PERF_RECORD_COMM && find_comm()
35 (pid_t)event->comm.pid == getpid() && find_comm()
36 (pid_t)event->comm.tid == getpid() && find_comm()
37 strcmp(event->comm.comm, comm) == 0) find_comm()
46 * test__keep_tracking - test using a dummy software event to keep tracking.
49 * when an event is disabled but a dummy software event is not disabled. If the
101 * First, test that a 'comm' event can be found when the event is test__keep_tracking()
114 pr_debug("First time, failed to find tracking event.\n"); test__keep_tracking()
119 * Secondly, test that a 'comm' event can be found when the event is test__keep_tracking()
120 * disabled with the dummy event still enabled. test__keep_tracking()
136 pr_debug("Seconf time, failed to find tracking event.\n"); test__keep_tracking()
H A Dswitch-tracking.c63 union perf_event *event, const char *comm, int nr) check_comm()
65 if (event->header.type == PERF_RECORD_COMM && check_comm()
66 (pid_t)event->comm.pid == getpid() && check_comm()
67 (pid_t)event->comm.tid == getpid() && check_comm()
68 strcmp(event->comm.comm, comm) == 0) { check_comm()
70 pr_debug("Duplicate comm event\n"); check_comm()
74 pr_debug3("comm event: %s nr: %d\n", event->comm.comm, nr); check_comm()
114 union perf_event *event, process_sample_event()
122 if (perf_evlist__parse_sample(evlist, event, &sample)) { process_sample_event()
150 pr_debug3("cycles event\n"); process_sample_event()
163 static int process_event(struct perf_evlist *evlist, union perf_event *event, process_event() argument
166 if (event->header.type == PERF_RECORD_SAMPLE) process_event()
167 return process_sample_event(evlist, event, switch_tracking); process_event()
169 if (event->header.type == PERF_RECORD_COMM) { process_event()
172 err = check_comm(switch_tracking, event, "Test COMM 1", 0); process_event()
176 err = check_comm(switch_tracking, event, "Test COMM 2", 1); process_event()
180 err = check_comm(switch_tracking, event, "Test COMM 3", 2); process_event()
184 err = check_comm(switch_tracking, event, "Test COMM 4", 3); process_event()
189 pr_debug("Unexpected comm event\n"); process_event()
199 union perf_event *event; member in struct:event_node
204 union perf_event *event) add_event()
214 node->event = event; add_event()
217 if (perf_evlist__parse_sample(evlist, event, &sample)) { add_event()
223 pr_debug("event with no time\n"); add_event()
255 union perf_event *event; process_events() local
262 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { process_events()
264 ret = add_event(evlist, &events, event); process_events()
285 ret = process_event(evlist, events_array[pos].event, process_events()
349 /* First event */ test__switch_tracking()
352 pr_debug("Failed to parse event dummy:u\n"); test__switch_tracking()
358 /* Second event */ test__switch_tracking()
361 pr_debug("Failed to parse event cycles:u\n"); test__switch_tracking()
367 /* Third event */ test__switch_tracking()
376 pr_debug("Failed to parse event %s\n", sched_switch); test__switch_tracking()
389 /* Test moving an event to the front */ test__switch_tracking()
391 pr_debug("cycles event already at front"); test__switch_tracking()
396 pr_debug("Failed to move cycles event to front"); test__switch_tracking()
403 /* Fourth event */ test__switch_tracking()
406 pr_debug("Failed to parse event dummy:u\n"); test__switch_tracking()
422 /* Check moved event is still at the front */ test__switch_tracking()
424 pr_debug("Front event no longer at front"); test__switch_tracking()
428 /* Check tracking event is tracking */ test__switch_tracking()
430 pr_debug("Tracking event not tracking\n"); test__switch_tracking()
438 pr_debug("Non-tracking event is tracking\n"); evlist__for_each()
541 /* Check cycles event got enabled */
547 /* Check cycles event got disabled */
549 pr_debug("cycles events even though event was disabled\n");
553 /* Check cycles event got enabled again */
62 check_comm(struct switch_tracking *switch_tracking, union perf_event *event, const char *comm, int nr) check_comm() argument
113 process_sample_event(struct perf_evlist *evlist, union perf_event *event, struct switch_tracking *switch_tracking) process_sample_event() argument
203 add_event(struct perf_evlist *evlist, struct list_head *events, union perf_event *event) add_event() argument
H A Dparse-no-sample-id-all.c6 #include "event.h"
12 static int process_event(struct perf_evlist **pevlist, union perf_event *event) process_event() argument
16 if (event->header.type == PERF_RECORD_HEADER_ATTR) { process_event()
17 if (perf_event__process_attr(NULL, event, pevlist)) { process_event()
24 if (event->header.type >= PERF_RECORD_USER_TYPE_START) process_event()
30 if (perf_evlist__parse_sample(*pevlist, event, &sample)) { process_event()
65 * more than one selected event, so this test processes three events: 2
66 * attributes representing the selected events and one mmap event.
H A Dperf-time-to-tsc.c56 union perf_event *event; test__perf_time_to_tsc() local
108 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { test__perf_time_to_tsc()
111 if (event->header.type != PERF_RECORD_COMM || test__perf_time_to_tsc()
112 (pid_t)event->comm.pid != getpid() || test__perf_time_to_tsc()
113 (pid_t)event->comm.tid != getpid()) test__perf_time_to_tsc()
116 if (strcmp(event->comm.comm, comm1) == 0) { test__perf_time_to_tsc()
117 CHECK__(perf_evsel__parse_sample(evsel, event, test__perf_time_to_tsc()
121 if (strcmp(event->comm.comm, comm2) == 0) { test__perf_time_to_tsc()
122 CHECK__(perf_evsel__parse_sample(evsel, event, test__perf_time_to_tsc()
138 pr_debug("1st event perf time %"PRIu64" tsc %"PRIu64"\n", test__perf_time_to_tsc()
142 pr_debug("2nd event perf time %"PRIu64" tsc %"PRIu64"\n", test__perf_time_to_tsc()
H A Dopen-syscall-tp-fields.c67 * Generate the event: test__syscall_open_tp_fields()
75 union perf_event *event; test__syscall_open_tp_fields() local
77 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { test__syscall_open_tp_fields()
78 const u32 type = event->header.type; test__syscall_open_tp_fields()
89 err = perf_evsel__parse_sample(evsel, event, &sample); test__syscall_open_tp_fields()
/linux-4.1.27/kernel/events/
H A Dcore.c129 static bool is_kernel_event(struct perf_event *event) is_kernel_event() argument
131 return event->owner == EVENT_OWNER_KERNEL; is_kernel_event()
170 * perf event paranoia level:
182 * max perf event sample rate
318 static u64 perf_event_time(struct perf_event *event);
332 static inline u64 perf_event_clock(struct perf_event *event) perf_event_clock() argument
334 return event->clock(); perf_event_clock()
362 perf_cgroup_match(struct perf_event *event) perf_cgroup_match() argument
364 struct perf_event_context *ctx = event->ctx; perf_cgroup_match()
367 /* @event doesn't care about cgroup */ perf_cgroup_match()
368 if (!event->cgrp) perf_cgroup_match()
376 * Cgroup scoping is recursive. An event enabled for a cgroup is perf_cgroup_match()
378 * cgroup is a descendant of @event's (the test covers identity perf_cgroup_match()
382 event->cgrp->css.cgroup); perf_cgroup_match()
385 static inline void perf_detach_cgroup(struct perf_event *event) perf_detach_cgroup() argument
387 css_put(&event->cgrp->css); perf_detach_cgroup()
388 event->cgrp = NULL; perf_detach_cgroup()
391 static inline int is_cgroup_event(struct perf_event *event) is_cgroup_event() argument
393 return event->cgrp != NULL; is_cgroup_event()
396 static inline u64 perf_cgroup_event_time(struct perf_event *event) perf_cgroup_event_time() argument
400 t = per_cpu_ptr(event->cgrp->info, event->cpu); perf_cgroup_event_time()
424 static inline void update_cgrp_time_from_event(struct perf_event *event) update_cgrp_time_from_event() argument
432 if (!is_cgroup_event(event)) update_cgrp_time_from_event()
439 if (cgrp == event->cgrp) update_cgrp_time_from_event()
440 __update_cgrp_time(event->cgrp); update_cgrp_time_from_event()
463 #define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
586 static inline int perf_cgroup_connect(int fd, struct perf_event *event, perf_cgroup_connect() argument
606 event->cgrp = cgrp; perf_cgroup_connect()
614 perf_detach_cgroup(event); perf_cgroup_connect()
623 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) perf_cgroup_set_shadow_time() argument
626 t = per_cpu_ptr(event->cgrp->info, event->cpu); perf_cgroup_set_shadow_time()
627 event->shadow_ctx_time = now - t->timestamp; perf_cgroup_set_shadow_time()
631 perf_cgroup_defer_enabled(struct perf_event *event) perf_cgroup_defer_enabled() argument
635 * the event's, we need to remember to call the perf_cgroup_defer_enabled()
639 if (is_cgroup_event(event) && !perf_cgroup_match(event)) perf_cgroup_defer_enabled()
640 event->cgrp_defer_enabled = 1; perf_cgroup_defer_enabled()
644 perf_cgroup_mark_enabled(struct perf_event *event, perf_cgroup_mark_enabled() argument
648 u64 tstamp = perf_event_time(event); perf_cgroup_mark_enabled()
650 if (!event->cgrp_defer_enabled) perf_cgroup_mark_enabled()
653 event->cgrp_defer_enabled = 0; perf_cgroup_mark_enabled()
655 event->tstamp_enabled = tstamp - event->total_time_enabled; perf_cgroup_mark_enabled()
656 list_for_each_entry(sub, &event->sibling_list, group_entry) { perf_cgroup_mark_enabled()
666 perf_cgroup_match(struct perf_event *event) perf_cgroup_match() argument
671 static inline void perf_detach_cgroup(struct perf_event *event) perf_detach_cgroup() argument
674 static inline int is_cgroup_event(struct perf_event *event) is_cgroup_event() argument
679 static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event) perf_cgroup_event_cgrp_time() argument
684 static inline void update_cgrp_time_from_event(struct perf_event *event) update_cgrp_time_from_event() argument
702 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, perf_cgroup_connect() argument
721 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) perf_cgroup_set_shadow_time() argument
725 static inline u64 perf_cgroup_event_time(struct perf_event *event) perf_cgroup_event_time() argument
731 perf_cgroup_defer_enabled(struct perf_event *event) perf_cgroup_defer_enabled() argument
736 perf_cgroup_mark_enabled(struct perf_event *event, perf_cgroup_mark_enabled() argument
942 * because the sys_perf_event_open() case will install a new event and break
953 * quiesce the event, after which we can install it in the new location. This
954 * means that only external vectors (perf_fops, prctl) can perturb the event
958 * However; because event->ctx can change while we're waiting to acquire
971 perf_event_ctx_lock_nested(struct perf_event *event, int nesting) perf_event_ctx_lock_nested() argument
977 ctx = ACCESS_ONCE(event->ctx); perf_event_ctx_lock_nested()
985 if (event->ctx != ctx) { perf_event_ctx_lock_nested()
995 perf_event_ctx_lock(struct perf_event *event) perf_event_ctx_lock() argument
997 return perf_event_ctx_lock_nested(event, 0); perf_event_ctx_lock()
1000 static void perf_event_ctx_unlock(struct perf_event *event, perf_event_ctx_unlock() argument
1026 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) perf_event_pid() argument
1031 if (event->parent) perf_event_pid()
1032 event = event->parent; perf_event_pid()
1034 return task_tgid_nr_ns(p, event->ns); perf_event_pid()
1037 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) perf_event_tid() argument
1042 if (event->parent) perf_event_tid()
1043 event = event->parent; perf_event_tid()
1045 return task_pid_nr_ns(p, event->ns); perf_event_tid()
1049 * If we inherit events we want to return the parent event id
1052 static u64 primary_event_id(struct perf_event *event) primary_event_id() argument
1054 u64 id = event->id; primary_event_id()
1056 if (event->parent) primary_event_id()
1057 id = event->parent->id; primary_event_id()
1153 static u64 perf_event_time(struct perf_event *event) perf_event_time() argument
1155 struct perf_event_context *ctx = event->ctx; perf_event_time()
1157 if (is_cgroup_event(event)) perf_event_time()
1158 return perf_cgroup_event_time(event); perf_event_time()
1164 * Update the total_time_enabled and total_time_running fields for a event.
1167 static void update_event_times(struct perf_event *event) update_event_times() argument
1169 struct perf_event_context *ctx = event->ctx; update_event_times()
1172 if (event->state < PERF_EVENT_STATE_INACTIVE || update_event_times()
1173 event->group_leader->state < PERF_EVENT_STATE_INACTIVE) update_event_times()
1177 * the time the event was enabled AND active update_event_times()
1185 if (is_cgroup_event(event)) update_event_times()
1186 run_end = perf_cgroup_event_time(event); update_event_times()
1190 run_end = event->tstamp_stopped; update_event_times()
1192 event->total_time_enabled = run_end - event->tstamp_enabled; update_event_times()
1194 if (event->state == PERF_EVENT_STATE_INACTIVE) update_event_times()
1195 run_end = event->tstamp_stopped; update_event_times()
1197 run_end = perf_event_time(event); update_event_times()
1199 event->total_time_running = run_end - event->tstamp_running; update_event_times()
1208 struct perf_event *event; update_group_times() local
1211 list_for_each_entry(event, &leader->sibling_list, group_entry) update_group_times()
1212 update_event_times(event); update_group_times()
1216 ctx_group_list(struct perf_event *event, struct perf_event_context *ctx) ctx_group_list() argument
1218 if (event->attr.pinned) ctx_group_list()
1225 * Add a event from the lists for its context.
1229 list_add_event(struct perf_event *event, struct perf_event_context *ctx) list_add_event() argument
1231 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); list_add_event()
1232 event->attach_state |= PERF_ATTACH_CONTEXT; list_add_event()
1235 * If we're a stand alone event or group leader, we go to the context list_add_event()
1239 if (event->group_leader == event) { list_add_event()
1242 if (is_software_event(event)) list_add_event()
1243 event->group_flags |= PERF_GROUP_SOFTWARE; list_add_event()
1245 list = ctx_group_list(event, ctx); list_add_event()
1246 list_add_tail(&event->group_entry, list); list_add_event()
1249 if (is_cgroup_event(event)) list_add_event()
1252 list_add_rcu(&event->event_entry, &ctx->event_list); list_add_event()
1254 if (event->attr.inherit_stat) list_add_event()
1261 * Initialize event state based on the perf_event_attr::disabled.
1263 static inline void perf_event__state_init(struct perf_event *event) perf_event__state_init() argument
1265 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : perf_event__state_init()
1273 static void perf_event__read_size(struct perf_event *event) perf_event__read_size() argument
1279 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) perf_event__read_size()
1282 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) perf_event__read_size()
1285 if (event->attr.read_format & PERF_FORMAT_ID) perf_event__read_size()
1288 if (event->attr.read_format & PERF_FORMAT_GROUP) { perf_event__read_size()
1289 nr += event->group_leader->nr_siblings; perf_event__read_size()
1294 event->read_size = size; perf_event__read_size()
1297 static void perf_event__header_size(struct perf_event *event) perf_event__header_size() argument
1300 u64 sample_type = event->attr.sample_type; perf_event__header_size()
1303 perf_event__read_size(event); perf_event__header_size()
1318 size += event->read_size; perf_event__header_size()
1326 event->header_size = size; perf_event__header_size()
1329 static void perf_event__id_header_size(struct perf_event *event) perf_event__id_header_size() argument
1332 u64 sample_type = event->attr.sample_type; perf_event__id_header_size()
1353 event->id_header_size = size; perf_event__id_header_size()
1356 static void perf_group_attach(struct perf_event *event) perf_group_attach() argument
1358 struct perf_event *group_leader = event->group_leader, *pos; perf_group_attach()
1363 if (event->attach_state & PERF_ATTACH_GROUP) perf_group_attach()
1366 event->attach_state |= PERF_ATTACH_GROUP; perf_group_attach()
1368 if (group_leader == event) perf_group_attach()
1371 WARN_ON_ONCE(group_leader->ctx != event->ctx); perf_group_attach()
1374 !is_software_event(event)) perf_group_attach()
1377 list_add_tail(&event->group_entry, &group_leader->sibling_list); perf_group_attach()
1387 * Remove a event from the lists for its context.
1391 list_del_event(struct perf_event *event, struct perf_event_context *ctx) list_del_event() argument
1395 WARN_ON_ONCE(event->ctx != ctx); list_del_event()
1401 if (!(event->attach_state & PERF_ATTACH_CONTEXT)) list_del_event()
1404 event->attach_state &= ~PERF_ATTACH_CONTEXT; list_del_event()
1406 if (is_cgroup_event(event)) { list_del_event()
1419 if (event->attr.inherit_stat) list_del_event()
1422 list_del_rcu(&event->event_entry); list_del_event()
1424 if (event->group_leader == event) list_del_event()
1425 list_del_init(&event->group_entry); list_del_event()
1427 update_group_times(event); list_del_event()
1430 * If event was in error state, then keep it list_del_event()
1434 * of the event list_del_event()
1436 if (event->state > PERF_EVENT_STATE_OFF) list_del_event()
1437 event->state = PERF_EVENT_STATE_OFF; list_del_event()
1442 static void perf_group_detach(struct perf_event *event) perf_group_detach() argument
1450 if (!(event->attach_state & PERF_ATTACH_GROUP)) perf_group_detach()
1453 event->attach_state &= ~PERF_ATTACH_GROUP; perf_group_detach()
1458 if (event->group_leader != event) { perf_group_detach()
1459 list_del_init(&event->group_entry); perf_group_detach()
1460 event->group_leader->nr_siblings--; perf_group_detach()
1464 if (!list_empty(&event->group_entry)) perf_group_detach()
1465 list = &event->group_entry; perf_group_detach()
1468 * If this was a group event with sibling events then perf_group_detach()
1472 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) { perf_group_detach()
1478 sibling->group_flags = event->group_flags; perf_group_detach()
1480 WARN_ON_ONCE(sibling->ctx != event->ctx); perf_group_detach()
1484 perf_event__header_size(event->group_leader); perf_group_detach()
1486 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry) perf_group_detach()
1491 * User event without the task.
1493 static bool is_orphaned_event(struct perf_event *event) is_orphaned_event() argument
1495 return event && !is_kernel_event(event) && !event->owner; is_orphaned_event()
1502 static bool is_orphaned_child(struct perf_event *event) is_orphaned_child() argument
1504 return is_orphaned_event(event->parent); is_orphaned_child()
1530 event_filter_match(struct perf_event *event) event_filter_match() argument
1532 return (event->cpu == -1 || event->cpu == smp_processor_id()) event_filter_match()
1533 && perf_cgroup_match(event); event_filter_match()
1537 event_sched_out(struct perf_event *event, event_sched_out() argument
1541 u64 tstamp = perf_event_time(event); event_sched_out()
1544 WARN_ON_ONCE(event->ctx != ctx); event_sched_out()
1548 * An event which could not be activated because of event_sched_out()
1553 if (event->state == PERF_EVENT_STATE_INACTIVE event_sched_out()
1554 && !event_filter_match(event)) { event_sched_out()
1555 delta = tstamp - event->tstamp_stopped; event_sched_out()
1556 event->tstamp_running += delta; event_sched_out()
1557 event->tstamp_stopped = tstamp; event_sched_out()
1560 if (event->state != PERF_EVENT_STATE_ACTIVE) event_sched_out()
1563 perf_pmu_disable(event->pmu); event_sched_out()
1565 event->tstamp_stopped = tstamp; event_sched_out()
1566 event->pmu->del(event, 0); event_sched_out()
1567 event->oncpu = -1; event_sched_out()
1568 event->state = PERF_EVENT_STATE_INACTIVE; event_sched_out()
1569 if (event->pending_disable) { event_sched_out()
1570 event->pending_disable = 0; event_sched_out()
1571 event->state = PERF_EVENT_STATE_OFF; event_sched_out()
1574 if (!is_software_event(event)) event_sched_out()
1578 if (event->attr.freq && event->attr.sample_freq) event_sched_out()
1580 if (event->attr.exclusive || !cpuctx->active_oncpu) event_sched_out()
1583 if (is_orphaned_child(event)) event_sched_out()
1586 perf_pmu_enable(event->pmu); event_sched_out()
1594 struct perf_event *event; group_sched_out() local
1602 list_for_each_entry(event, &group_event->sibling_list, group_entry) group_sched_out()
1603 event_sched_out(event, cpuctx, ctx); group_sched_out()
1610 struct perf_event *event; member in struct:remove_event
1615 * Cross CPU call to remove a performance event
1617 * We disable the event on the hardware level first. After that we
1623 struct perf_event *event = re->event; __perf_remove_from_context() local
1624 struct perf_event_context *ctx = event->ctx; __perf_remove_from_context()
1628 event_sched_out(event, cpuctx, ctx); __perf_remove_from_context()
1630 perf_group_detach(event); __perf_remove_from_context()
1631 list_del_event(event, ctx); __perf_remove_from_context()
1643 * Remove the event from a task's (or a CPU's) list of events.
1648 * If event->ctx is a cloned context, callers must make sure that
1649 * every task struct that event->ctx->task could possibly point to
1655 static void perf_remove_from_context(struct perf_event *event, bool detach_group) perf_remove_from_context() argument
1657 struct perf_event_context *ctx = event->ctx; perf_remove_from_context()
1660 .event = event, perf_remove_from_context()
1673 cpu_function_call(event->cpu, __perf_remove_from_context, &re); perf_remove_from_context()
1697 * Since the task isn't running, its safe to remove the event, us perf_remove_from_context()
1701 perf_group_detach(event); perf_remove_from_context()
1702 list_del_event(event, ctx); perf_remove_from_context()
1707 * Cross CPU call to disable a performance event
1711 struct perf_event *event = info; __perf_event_disable() local
1712 struct perf_event_context *ctx = event->ctx; __perf_event_disable()
1716 * If this is a per-task event, need to check whether this __perf_event_disable()
1717 * event's task is the current task on this cpu. __perf_event_disable()
1728 * If the event is on, turn it off. __perf_event_disable()
1731 if (event->state >= PERF_EVENT_STATE_INACTIVE) { __perf_event_disable()
1733 update_cgrp_time_from_event(event); __perf_event_disable()
1734 update_group_times(event); __perf_event_disable()
1735 if (event == event->group_leader) __perf_event_disable()
1736 group_sched_out(event, cpuctx, ctx); __perf_event_disable()
1738 event_sched_out(event, cpuctx, ctx); __perf_event_disable()
1739 event->state = PERF_EVENT_STATE_OFF; __perf_event_disable()
1748 * Disable a event.
1750 * If event->ctx is a cloned context, callers must make sure that
1751 * every task struct that event->ctx->task could possibly point to
1754 * hold the top-level event's child_mutex, so any descendant that
1756 * When called from perf_pending_event it's OK because event->ctx
1760 static void _perf_event_disable(struct perf_event *event) _perf_event_disable() argument
1762 struct perf_event_context *ctx = event->ctx; _perf_event_disable()
1767 * Disable the event on the cpu that it's on _perf_event_disable()
1769 cpu_function_call(event->cpu, __perf_event_disable, event); _perf_event_disable()
1774 if (!task_function_call(task, __perf_event_disable, event)) _perf_event_disable()
1779 * If the event is still active, we need to retry the cross-call. _perf_event_disable()
1781 if (event->state == PERF_EVENT_STATE_ACTIVE) { _perf_event_disable()
1795 if (event->state == PERF_EVENT_STATE_INACTIVE) { _perf_event_disable()
1796 update_group_times(event); _perf_event_disable()
1797 event->state = PERF_EVENT_STATE_OFF; _perf_event_disable()
1806 void perf_event_disable(struct perf_event *event) perf_event_disable() argument
1810 ctx = perf_event_ctx_lock(event); perf_event_disable()
1811 _perf_event_disable(event); perf_event_disable()
1812 perf_event_ctx_unlock(event, ctx); perf_event_disable()
1816 static void perf_set_shadow_time(struct perf_event *event, perf_set_shadow_time() argument
1835 * - event is guaranteed scheduled in perf_set_shadow_time()
1845 if (is_cgroup_event(event)) perf_set_shadow_time()
1846 perf_cgroup_set_shadow_time(event, tstamp); perf_set_shadow_time()
1848 event->shadow_ctx_time = tstamp - ctx->timestamp; perf_set_shadow_time()
1853 static void perf_log_throttle(struct perf_event *event, int enable);
1854 static void perf_log_itrace_start(struct perf_event *event);
1857 event_sched_in(struct perf_event *event, event_sched_in() argument
1861 u64 tstamp = perf_event_time(event); event_sched_in()
1866 if (event->state <= PERF_EVENT_STATE_OFF) event_sched_in()
1869 event->state = PERF_EVENT_STATE_ACTIVE; event_sched_in()
1870 event->oncpu = smp_processor_id(); event_sched_in()
1877 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { event_sched_in()
1878 perf_log_throttle(event, 1); event_sched_in()
1879 event->hw.interrupts = 0; event_sched_in()
1887 perf_pmu_disable(event->pmu); event_sched_in()
1889 perf_set_shadow_time(event, ctx, tstamp); event_sched_in()
1891 perf_log_itrace_start(event); event_sched_in()
1893 if (event->pmu->add(event, PERF_EF_START)) { event_sched_in()
1894 event->state = PERF_EVENT_STATE_INACTIVE; event_sched_in()
1895 event->oncpu = -1; event_sched_in()
1900 event->tstamp_running += tstamp - event->tstamp_stopped; event_sched_in()
1902 if (!is_software_event(event)) event_sched_in()
1906 if (event->attr.freq && event->attr.sample_freq) event_sched_in()
1909 if (event->attr.exclusive) event_sched_in()
1912 if (is_orphaned_child(event)) event_sched_in()
1916 perf_pmu_enable(event->pmu); event_sched_in()
1926 struct perf_event *event, *partial_group = NULL; group_sched_in() local
1945 list_for_each_entry(event, &group_event->sibling_list, group_entry) { group_sched_in()
1946 if (event_sched_in(event, cpuctx, ctx)) { group_sched_in()
1947 partial_group = event; group_sched_in()
1959 * The events up to the failed event are scheduled out normally, group_sched_in()
1967 * the time the event was actually stopped, such that time delta group_sched_in()
1970 list_for_each_entry(event, &group_event->sibling_list, group_entry) { group_sched_in()
1971 if (event == partial_group) group_sched_in()
1975 event->tstamp_running += now - event->tstamp_stopped; group_sched_in()
1976 event->tstamp_stopped = now; group_sched_in()
1978 event_sched_out(event, cpuctx, ctx); group_sched_in()
1991 * Work out whether we can put this event group on the CPU now.
1993 static int group_can_go_on(struct perf_event *event, group_can_go_on() argument
2000 if (event->group_flags & PERF_GROUP_SOFTWARE) group_can_go_on()
2012 if (event->attr.exclusive && cpuctx->active_oncpu) group_can_go_on()
2021 static void add_event_to_ctx(struct perf_event *event, add_event_to_ctx() argument
2024 u64 tstamp = perf_event_time(event); add_event_to_ctx()
2026 list_add_event(event, ctx); add_event_to_ctx()
2027 perf_group_attach(event); add_event_to_ctx()
2028 event->tstamp_enabled = tstamp; add_event_to_ctx()
2029 event->tstamp_running = tstamp; add_event_to_ctx()
2030 event->tstamp_stopped = tstamp; add_event_to_ctx()
2053 * Cross CPU call to install and enable a performance event
2059 struct perf_event *event = info; __perf_install_in_context() local
2060 struct perf_event_context *ctx = event->ctx; __perf_install_in_context()
2095 * matches event->cgrp. Must be done before __perf_install_in_context()
2098 update_cgrp_time_from_event(event); __perf_install_in_context()
2100 add_event_to_ctx(event, ctx); __perf_install_in_context()
2114 * Attach a performance event to a context
2116 * First we add the event to the list with the hardware enable bit
2117 * in event->hw_config cleared.
2119 * If the event is attached to a task which is on a CPU we use a smp
2125 struct perf_event *event, perf_install_in_context()
2132 event->ctx = ctx; perf_install_in_context()
2133 if (event->cpu != -1) perf_install_in_context()
2134 event->cpu = cpu; perf_install_in_context()
2141 cpu_function_call(cpu, __perf_install_in_context, event); perf_install_in_context()
2146 if (!task_function_call(task, __perf_install_in_context, event)) perf_install_in_context()
2165 * Since the task isn't running, its safe to add the event, us holding perf_install_in_context()
2168 add_event_to_ctx(event, ctx); perf_install_in_context()
2173 * Put a event into inactive state and update time fields.
2180 static void __perf_event_mark_enabled(struct perf_event *event) __perf_event_mark_enabled() argument
2183 u64 tstamp = perf_event_time(event); __perf_event_mark_enabled()
2185 event->state = PERF_EVENT_STATE_INACTIVE; __perf_event_mark_enabled()
2186 event->tstamp_enabled = tstamp - event->total_time_enabled; __perf_event_mark_enabled()
2187 list_for_each_entry(sub, &event->sibling_list, group_entry) { __perf_event_mark_enabled()
2194 * Cross CPU call to enable a performance event
2198 struct perf_event *event = info; __perf_event_enable() local
2199 struct perf_event_context *ctx = event->ctx; __perf_event_enable()
2200 struct perf_event *leader = event->group_leader; __perf_event_enable()
2219 if (event->state >= PERF_EVENT_STATE_INACTIVE) __perf_event_enable()
2227 __perf_event_mark_enabled(event); __perf_event_enable()
2229 if (!event_filter_match(event)) { __perf_event_enable()
2230 if (is_cgroup_event(event)) __perf_event_enable()
2231 perf_cgroup_defer_enabled(event); __perf_event_enable()
2236 * If the event is in a group and isn't the group leader, __perf_event_enable()
2239 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) __perf_event_enable()
2242 if (!group_can_go_on(event, cpuctx, 1)) { __perf_event_enable()
2245 if (event == leader) __perf_event_enable()
2246 err = group_sched_in(event, cpuctx, ctx); __perf_event_enable()
2248 err = event_sched_in(event, cpuctx, ctx); __perf_event_enable()
2253 * If this event can't go on and it's part of a __perf_event_enable()
2256 if (leader != event) { __perf_event_enable()
2273 * Enable a event.
2275 * If event->ctx is a cloned context, callers must make sure that
2276 * every task struct that event->ctx->task could possibly point to
2281 static void _perf_event_enable(struct perf_event *event) _perf_event_enable() argument
2283 struct perf_event_context *ctx = event->ctx; _perf_event_enable()
2288 * Enable the event on the cpu that it's on _perf_event_enable()
2290 cpu_function_call(event->cpu, __perf_event_enable, event); _perf_event_enable()
2295 if (event->state >= PERF_EVENT_STATE_INACTIVE) _perf_event_enable()
2299 * If the event is in error state, clear that first. _perf_event_enable()
2300 * That way, if we see the event in error state below, we _perf_event_enable()
2305 if (event->state == PERF_EVENT_STATE_ERROR) _perf_event_enable()
2306 event->state = PERF_EVENT_STATE_OFF; _perf_event_enable()
2310 __perf_event_mark_enabled(event); _perf_event_enable()
2316 if (!task_function_call(task, __perf_event_enable, event)) _perf_event_enable()
2322 * If the context is active and the event is still off, _perf_event_enable()
2325 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) { _perf_event_enable()
2341 void perf_event_enable(struct perf_event *event) perf_event_enable() argument
2345 ctx = perf_event_ctx_lock(event); perf_event_enable()
2346 _perf_event_enable(event); perf_event_enable()
2347 perf_event_ctx_unlock(event, ctx); perf_event_enable()
2351 static int _perf_event_refresh(struct perf_event *event, int refresh) _perf_event_refresh() argument
2356 if (event->attr.inherit || !is_sampling_event(event)) _perf_event_refresh()
2359 atomic_add(refresh, &event->event_limit); _perf_event_refresh()
2360 _perf_event_enable(event); _perf_event_refresh()
2368 int perf_event_refresh(struct perf_event *event, int refresh) perf_event_refresh() argument
2373 ctx = perf_event_ctx_lock(event); perf_event_refresh()
2374 ret = _perf_event_refresh(event, refresh); perf_event_refresh()
2375 perf_event_ctx_unlock(event, ctx); perf_event_refresh()
2385 struct perf_event *event; ctx_sched_out() local
2399 list_for_each_entry(event, &ctx->pinned_groups, group_entry) ctx_sched_out()
2400 group_sched_out(event, cpuctx, ctx); ctx_sched_out()
2404 list_for_each_entry(event, &ctx->flexible_groups, group_entry) ctx_sched_out()
2405 group_sched_out(event, cpuctx, ctx); ctx_sched_out()
2448 static void __perf_event_sync_stat(struct perf_event *event, __perf_event_sync_stat() argument
2453 if (!event->attr.inherit_stat) __perf_event_sync_stat()
2457 * Update the event value, we cannot use perf_event_read() __perf_event_sync_stat()
2460 * we know the event must be on the current CPU, therefore we __perf_event_sync_stat()
2463 switch (event->state) { __perf_event_sync_stat()
2465 event->pmu->read(event); __perf_event_sync_stat()
2469 update_event_times(event); __perf_event_sync_stat()
2477 * In order to keep per-task stats reliable we need to flip the event __perf_event_sync_stat()
2481 value = local64_xchg(&event->count, value); __perf_event_sync_stat()
2484 swap(event->total_time_enabled, next_event->total_time_enabled); __perf_event_sync_stat()
2485 swap(event->total_time_running, next_event->total_time_running); __perf_event_sync_stat()
2490 perf_event_update_userpage(event); __perf_event_sync_stat()
2497 struct perf_event *event, *next_event; perf_event_sync_stat() local
2504 event = list_first_entry(&ctx->event_list, perf_event_sync_stat()
2510 while (&event->event_entry != &ctx->event_list && perf_event_sync_stat()
2513 __perf_event_sync_stat(event, next_event); perf_event_sync_stat()
2515 event = list_next_entry(event, event_entry); perf_event_sync_stat()
2647 * We stop each event and update the event value in event->count.
2650 * sets the disabled bit in the control field of event _before_
2651 * accessing the event control register. If a NMI hits, then it will
2652 * not restart the event.
2668 * cgroup event are system-wide mode only __perf_event_task_sched_out()
2701 struct perf_event *event; ctx_pinned_sched_in() local
2703 list_for_each_entry(event, &ctx->pinned_groups, group_entry) { ctx_pinned_sched_in()
2704 if (event->state <= PERF_EVENT_STATE_OFF) ctx_pinned_sched_in()
2706 if (!event_filter_match(event)) ctx_pinned_sched_in()
2710 if (is_cgroup_event(event)) ctx_pinned_sched_in()
2711 perf_cgroup_mark_enabled(event, ctx); ctx_pinned_sched_in()
2713 if (group_can_go_on(event, cpuctx, 1)) ctx_pinned_sched_in()
2714 group_sched_in(event, cpuctx, ctx); ctx_pinned_sched_in()
2720 if (event->state == PERF_EVENT_STATE_INACTIVE) { ctx_pinned_sched_in()
2721 update_group_times(event); ctx_pinned_sched_in()
2722 event->state = PERF_EVENT_STATE_ERROR; ctx_pinned_sched_in()
2731 struct perf_event *event; ctx_flexible_sched_in() local
2734 list_for_each_entry(event, &ctx->flexible_groups, group_entry) { ctx_flexible_sched_in()
2736 if (event->state <= PERF_EVENT_STATE_OFF) ctx_flexible_sched_in()
2742 if (!event_filter_match(event)) ctx_flexible_sched_in()
2746 if (is_cgroup_event(event)) ctx_flexible_sched_in()
2747 perf_cgroup_mark_enabled(event, ctx); ctx_flexible_sched_in()
2749 if (group_can_go_on(event, cpuctx, can_add_hw)) { ctx_flexible_sched_in()
2750 if (group_sched_in(event, cpuctx, ctx)) ctx_flexible_sched_in()
2824 * We restore the event value and then enable it.
2827 * sets the enabled bit in the control field of event _before_
2828 * accessing the event control register. If a NMI hits, then it will
2829 * keep the event running.
2847 * cgroup event are system-wide mode only
2856 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) perf_calculate_period() argument
2858 u64 frequency = event->attr.sample_freq; perf_calculate_period()
2932 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable) perf_adjust_period() argument
2934 struct hw_perf_event *hwc = &event->hw; perf_adjust_period()
2938 period = perf_calculate_period(event, nsec, count); perf_adjust_period()
2952 event->pmu->stop(event, PERF_EF_UPDATE); perf_adjust_period()
2957 event->pmu->start(event, PERF_EF_RELOAD); perf_adjust_period()
2969 struct perf_event *event; perf_adjust_freq_unthr_context() local
2985 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { perf_adjust_freq_unthr_context()
2986 if (event->state != PERF_EVENT_STATE_ACTIVE) perf_adjust_freq_unthr_context()
2989 if (!event_filter_match(event)) perf_adjust_freq_unthr_context()
2992 perf_pmu_disable(event->pmu); perf_adjust_freq_unthr_context()
2994 hwc = &event->hw; perf_adjust_freq_unthr_context()
2998 perf_log_throttle(event, 1); perf_adjust_freq_unthr_context()
2999 event->pmu->start(event, 0); perf_adjust_freq_unthr_context()
3002 if (!event->attr.freq || !event->attr.sample_freq) perf_adjust_freq_unthr_context()
3006 * stop the event and update event->count perf_adjust_freq_unthr_context()
3008 event->pmu->stop(event, PERF_EF_UPDATE); perf_adjust_freq_unthr_context()
3010 now = local64_read(&event->count); perf_adjust_freq_unthr_context()
3015 * restart the event perf_adjust_freq_unthr_context()
3017 * we have stopped the event so tell that perf_adjust_freq_unthr_context()
3022 perf_adjust_period(event, period, delta, false); perf_adjust_freq_unthr_context()
3024 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); perf_adjust_freq_unthr_context()
3026 perf_pmu_enable(event->pmu); perf_adjust_freq_unthr_context()
3111 static int event_enable_on_exec(struct perf_event *event, event_enable_on_exec() argument
3114 if (!event->attr.enable_on_exec) event_enable_on_exec()
3117 event->attr.enable_on_exec = 0; event_enable_on_exec()
3118 if (event->state >= PERF_EVENT_STATE_INACTIVE) event_enable_on_exec()
3121 __perf_event_mark_enabled(event); event_enable_on_exec()
3133 struct perf_event *event; perf_event_enable_on_exec() local
3154 list_for_each_entry(event, &ctx->event_list, event_entry) { perf_event_enable_on_exec()
3155 ret = event_enable_on_exec(event, ctx); perf_event_enable_on_exec()
3161 * Unclone this context if we enabled any event. perf_event_enable_on_exec()
3196 * Cross CPU call to read the hardware event
3200 struct perf_event *event = info; __perf_event_read() local
3201 struct perf_event_context *ctx = event->ctx; __perf_event_read()
3208 * event->count would have been updated to a recent sample __perf_event_read()
3209 * when the event was scheduled out. __perf_event_read()
3217 update_cgrp_time_from_event(event); __perf_event_read()
3219 update_event_times(event); __perf_event_read()
3220 if (event->state == PERF_EVENT_STATE_ACTIVE) __perf_event_read()
3221 event->pmu->read(event); __perf_event_read()
3225 static inline u64 perf_event_count(struct perf_event *event) perf_event_count() argument
3227 if (event->pmu->count) perf_event_count()
3228 return event->pmu->count(event); perf_event_count()
3230 return __perf_event_count(event); perf_event_count()
3233 static u64 perf_event_read(struct perf_event *event) perf_event_read() argument
3236 * If event is enabled and currently active on a CPU, update the perf_event_read()
3237 * value in the event structure: perf_event_read()
3239 if (event->state == PERF_EVENT_STATE_ACTIVE) { perf_event_read()
3240 smp_call_function_single(event->oncpu, perf_event_read()
3241 __perf_event_read, event, 1); perf_event_read()
3242 } else if (event->state == PERF_EVENT_STATE_INACTIVE) { perf_event_read()
3243 struct perf_event_context *ctx = event->ctx; perf_event_read()
3254 update_cgrp_time_from_event(event); perf_event_read()
3256 update_event_times(event); perf_event_read()
3260 return perf_event_count(event); perf_event_read()
3332 struct perf_event *event) find_get_context()
3339 int cpu = event->cpu; find_get_context()
3342 /* Must be root to operate on a CPU event: */ find_get_context()
3347 * We could be clever and allow to attach a event to an find_get_context()
3367 if (event->attach_state & PERF_ATTACH_TASK_DATA) { find_get_context()
3434 static void perf_event_free_filter(struct perf_event *event);
3435 static void perf_event_free_bpf_prog(struct perf_event *event);
3439 struct perf_event *event; free_event_rcu() local
3441 event = container_of(head, struct perf_event, rcu_head); free_event_rcu()
3442 if (event->ns) free_event_rcu()
3443 put_pid_ns(event->ns); free_event_rcu()
3444 perf_event_free_filter(event); free_event_rcu()
3445 kfree(event); free_event_rcu()
3448 static void ring_buffer_attach(struct perf_event *event,
3451 static void unaccount_event_cpu(struct perf_event *event, int cpu) unaccount_event_cpu() argument
3453 if (event->parent) unaccount_event_cpu()
3456 if (is_cgroup_event(event)) unaccount_event_cpu()
3460 static void unaccount_event(struct perf_event *event) unaccount_event() argument
3462 if (event->parent) unaccount_event()
3465 if (event->attach_state & PERF_ATTACH_TASK) unaccount_event()
3467 if (event->attr.mmap || event->attr.mmap_data) unaccount_event()
3469 if (event->attr.comm) unaccount_event()
3471 if (event->attr.task) unaccount_event()
3473 if (event->attr.freq) unaccount_event()
3475 if (is_cgroup_event(event)) unaccount_event()
3477 if (has_branch_stack(event)) unaccount_event()
3480 unaccount_event_cpu(event, event->cpu); unaccount_event()
3485 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
3495 static int exclusive_event_init(struct perf_event *event) exclusive_event_init() argument
3497 struct pmu *pmu = event->pmu; exclusive_event_init()
3510 * Since this is called in perf_event_alloc() path, event::ctx exclusive_event_init()
3512 * to mean "per-task event", because unlike other attach states it exclusive_event_init()
3515 if (event->attach_state & PERF_ATTACH_TASK) { exclusive_event_init()
3526 static void exclusive_event_destroy(struct perf_event *event) exclusive_event_destroy() argument
3528 struct pmu *pmu = event->pmu; exclusive_event_destroy()
3534 if (event->attach_state & PERF_ATTACH_TASK) exclusive_event_destroy()
3551 static bool exclusive_event_installable(struct perf_event *event, exclusive_event_installable() argument
3555 struct pmu *pmu = event->pmu; exclusive_event_installable()
3561 if (exclusive_event_match(iter_event, event)) exclusive_event_installable()
3568 static void __free_event(struct perf_event *event) __free_event() argument
3570 if (!event->parent) { __free_event()
3571 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) __free_event()
3575 perf_event_free_bpf_prog(event); __free_event()
3577 if (event->destroy) __free_event()
3578 event->destroy(event); __free_event()
3580 if (event->ctx) __free_event()
3581 put_ctx(event->ctx); __free_event()
3583 if (event->pmu) { __free_event()
3584 exclusive_event_destroy(event); __free_event()
3585 module_put(event->pmu->module); __free_event()
3588 call_rcu(&event->rcu_head, free_event_rcu); __free_event()
3591 static void _free_event(struct perf_event *event) _free_event() argument
3593 irq_work_sync(&event->pending); _free_event()
3595 unaccount_event(event); _free_event()
3597 if (event->rb) { _free_event()
3599 * Can happen when we close an event with re-directed output. _free_event()
3604 mutex_lock(&event->mmap_mutex); _free_event()
3605 ring_buffer_attach(event, NULL); _free_event()
3606 mutex_unlock(&event->mmap_mutex); _free_event()
3609 if (is_cgroup_event(event)) _free_event()
3610 perf_detach_cgroup(event); _free_event()
3612 __free_event(event); _free_event()
3617 * where the event isn't exposed yet and inherited events.
3619 static void free_event(struct perf_event *event) free_event() argument
3621 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1, free_event()
3622 "unexpected event refcount: %ld; ptr=%p\n", free_event()
3623 atomic_long_read(&event->refcount), event)) { free_event()
3628 _free_event(event); free_event()
3632 * Remove user event from the owner task.
3634 static void perf_remove_from_owner(struct perf_event *event) perf_remove_from_owner() argument
3639 owner = ACCESS_ONCE(event->owner); perf_remove_from_owner()
3643 * free this event, otherwise we need to serialize on perf_remove_from_owner()
3669 * We have to re-check the event->owner field, if it is cleared perf_remove_from_owner()
3672 * event. perf_remove_from_owner()
3674 if (event->owner) perf_remove_from_owner()
3675 list_del_init(&event->owner_entry); perf_remove_from_owner()
3681 static void put_event(struct perf_event *event) put_event() argument
3685 if (!atomic_long_dec_and_test(&event->refcount)) put_event()
3688 if (!is_kernel_event(event)) put_event()
3689 perf_remove_from_owner(event); put_event()
3703 ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING); put_event()
3705 perf_remove_from_context(event, true); put_event()
3706 perf_event_ctx_unlock(event, ctx); put_event()
3708 _free_event(event); put_event()
3711 int perf_event_release_kernel(struct perf_event *event) perf_event_release_kernel() argument
3713 put_event(event); perf_event_release_kernel()
3733 struct perf_event *event, *tmp; orphans_remove_work() local
3739 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) { orphans_remove_work()
3740 struct perf_event *parent_event = event->parent; orphans_remove_work()
3742 if (!is_orphaned_child(event)) orphans_remove_work()
3745 perf_remove_from_context(event, true); orphans_remove_work()
3748 list_del_init(&event->child_list); orphans_remove_work()
3751 free_event(event); orphans_remove_work()
3763 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) perf_event_read_value() argument
3771 mutex_lock(&event->child_mutex); perf_event_read_value()
3772 total += perf_event_read(event); perf_event_read_value()
3773 *enabled += event->total_time_enabled + perf_event_read_value()
3774 atomic64_read(&event->child_total_time_enabled); perf_event_read_value()
3775 *running += event->total_time_running + perf_event_read_value()
3776 atomic64_read(&event->child_total_time_running); perf_event_read_value()
3778 list_for_each_entry(child, &event->child_list, child_list) { perf_event_read_value()
3783 mutex_unlock(&event->child_mutex); perf_event_read_value()
3789 static int perf_event_read_group(struct perf_event *event, perf_event_read_group() argument
3792 struct perf_event *leader = event->group_leader, *sub; perf_event_read_group()
3837 static int perf_event_read_one(struct perf_event *event, perf_event_read_one() argument
3844 values[n++] = perf_event_read_value(event, &enabled, &running); perf_event_read_one()
3850 values[n++] = primary_event_id(event); perf_event_read_one()
3858 static bool is_event_hup(struct perf_event *event) is_event_hup() argument
3862 if (event->state != PERF_EVENT_STATE_EXIT) is_event_hup()
3865 mutex_lock(&event->child_mutex); is_event_hup()
3866 no_children = list_empty(&event->child_list); is_event_hup()
3867 mutex_unlock(&event->child_mutex); is_event_hup()
3872 * Read the performance event - simple non blocking version for now
3875 perf_read_hw(struct perf_event *event, char __user *buf, size_t count) perf_read_hw() argument
3877 u64 read_format = event->attr.read_format; perf_read_hw()
3881 * Return end-of-file for a read on a event that is in perf_read_hw()
3885 if (event->state == PERF_EVENT_STATE_ERROR) perf_read_hw()
3888 if (count < event->read_size) perf_read_hw()
3891 WARN_ON_ONCE(event->ctx->parent_ctx); perf_read_hw()
3893 ret = perf_event_read_group(event, read_format, buf); perf_read_hw()
3895 ret = perf_event_read_one(event, read_format, buf); perf_read_hw()
3903 struct perf_event *event = file->private_data; perf_read() local
3907 ctx = perf_event_ctx_lock(event); perf_read()
3908 ret = perf_read_hw(event, buf, count); perf_read()
3909 perf_event_ctx_unlock(event, ctx); perf_read()
3916 struct perf_event *event = file->private_data; perf_poll() local
3920 poll_wait(file, &event->waitq, wait); perf_poll()
3922 if (is_event_hup(event)) perf_poll()
3926 * Pin the event->rb by taking event->mmap_mutex; otherwise perf_poll()
3929 mutex_lock(&event->mmap_mutex); perf_poll()
3930 rb = event->rb; perf_poll()
3933 mutex_unlock(&event->mmap_mutex); perf_poll()
3937 static void _perf_event_reset(struct perf_event *event) _perf_event_reset() argument
3939 (void)perf_event_read(event); _perf_event_reset()
3940 local64_set(&event->count, 0); _perf_event_reset()
3941 perf_event_update_userpage(event); _perf_event_reset()
3945 * Holding the top-level event's child_mutex means that any
3946 * descendant process that has inherited this event will block
3950 static void perf_event_for_each_child(struct perf_event *event, perf_event_for_each_child() argument
3955 WARN_ON_ONCE(event->ctx->parent_ctx); perf_event_for_each_child()
3957 mutex_lock(&event->child_mutex); perf_event_for_each_child()
3958 func(event); perf_event_for_each_child()
3959 list_for_each_entry(child, &event->child_list, child_list) perf_event_for_each_child()
3961 mutex_unlock(&event->child_mutex); perf_event_for_each_child()
3964 static void perf_event_for_each(struct perf_event *event, perf_event_for_each() argument
3967 struct perf_event_context *ctx = event->ctx; perf_event_for_each()
3972 event = event->group_leader; perf_event_for_each()
3974 perf_event_for_each_child(event, func); perf_event_for_each()
3975 list_for_each_entry(sibling, &event->sibling_list, group_entry) perf_event_for_each()
3980 struct perf_event *event; member in struct:period_event
3987 struct perf_event *event = pe->event; __perf_event_period() local
3988 struct perf_event_context *ctx = event->ctx; __perf_event_period()
3993 if (event->attr.freq) { __perf_event_period()
3994 event->attr.sample_freq = value; __perf_event_period()
3996 event->attr.sample_period = value; __perf_event_period()
3997 event->hw.sample_period = value; __perf_event_period()
4000 active = (event->state == PERF_EVENT_STATE_ACTIVE); __perf_event_period()
4003 event->pmu->stop(event, PERF_EF_UPDATE); __perf_event_period()
4006 local64_set(&event->hw.period_left, 0); __perf_event_period()
4009 event->pmu->start(event, PERF_EF_RELOAD); __perf_event_period()
4017 static int perf_event_period(struct perf_event *event, u64 __user *arg) perf_event_period() argument
4019 struct period_event pe = { .event = event, }; perf_event_period()
4020 struct perf_event_context *ctx = event->ctx; perf_event_period()
4024 if (!is_sampling_event(event)) perf_event_period()
4033 if (event->attr.freq && value > sysctl_perf_event_sample_rate) perf_event_period()
4040 cpu_function_call(event->cpu, __perf_event_period, &pe); perf_event_period()
4077 static int perf_event_set_output(struct perf_event *event,
4079 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
4080 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd);
4082 static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg) _perf_ioctl() argument
4099 return _perf_event_refresh(event, arg); _perf_ioctl()
4102 return perf_event_period(event, (u64 __user *)arg); _perf_ioctl()
4106 u64 id = primary_event_id(event); _perf_ioctl()
4123 ret = perf_event_set_output(event, output_event); _perf_ioctl()
4126 ret = perf_event_set_output(event, NULL); _perf_ioctl()
4132 return perf_event_set_filter(event, (void __user *)arg); _perf_ioctl()
4135 return perf_event_set_bpf_prog(event, arg); _perf_ioctl()
4142 perf_event_for_each(event, func); _perf_ioctl()
4144 perf_event_for_each_child(event, func); _perf_ioctl()
4151 struct perf_event *event = file->private_data; perf_ioctl() local
4155 ctx = perf_event_ctx_lock(event); perf_ioctl()
4156 ret = _perf_ioctl(event, cmd, arg); perf_ioctl()
4157 perf_event_ctx_unlock(event, ctx); perf_ioctl()
4185 struct perf_event *event; perf_event_task_enable() local
4188 list_for_each_entry(event, &current->perf_event_list, owner_entry) { perf_event_task_enable()
4189 ctx = perf_event_ctx_lock(event); perf_event_task_enable()
4190 perf_event_for_each_child(event, _perf_event_enable); perf_event_task_enable()
4191 perf_event_ctx_unlock(event, ctx); perf_event_task_enable()
4201 struct perf_event *event; perf_event_task_disable() local
4204 list_for_each_entry(event, &current->perf_event_list, owner_entry) { perf_event_task_disable()
4205 ctx = perf_event_ctx_lock(event); perf_event_task_disable()
4206 perf_event_for_each_child(event, _perf_event_disable); perf_event_task_disable()
4207 perf_event_ctx_unlock(event, ctx); perf_event_task_disable()
4214 static int perf_event_index(struct perf_event *event) perf_event_index() argument
4216 if (event->hw.state & PERF_HES_STOPPED) perf_event_index()
4219 if (event->state != PERF_EVENT_STATE_ACTIVE) perf_event_index()
4222 return event->pmu->event_idx(event); perf_event_index()
4225 static void calc_timer_values(struct perf_event *event, calc_timer_values() argument
4233 ctx_time = event->shadow_ctx_time + *now; calc_timer_values()
4234 *enabled = ctx_time - event->tstamp_enabled; calc_timer_values()
4235 *running = ctx_time - event->tstamp_running; calc_timer_values()
4238 static void perf_event_init_userpage(struct perf_event *event) perf_event_init_userpage() argument
4244 rb = rcu_dereference(event->rb); perf_event_init_userpage()
4261 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now) arch_perf_update_userpage()
4270 void perf_event_update_userpage(struct perf_event *event) perf_event_update_userpage() argument
4277 rb = rcu_dereference(event->rb); perf_event_update_userpage()
4283 * based on snapshot values taken when the event perf_event_update_userpage()
4290 calc_timer_values(event, &now, &enabled, &running); perf_event_update_userpage()
4300 userpg->index = perf_event_index(event); perf_event_update_userpage()
4301 userpg->offset = perf_event_count(event); perf_event_update_userpage()
4303 userpg->offset -= local64_read(&event->hw.prev_count); perf_event_update_userpage()
4306 atomic64_read(&event->child_total_time_enabled); perf_event_update_userpage()
4309 atomic64_read(&event->child_total_time_running); perf_event_update_userpage()
4311 arch_perf_update_userpage(event, userpg, now); perf_event_update_userpage()
4322 struct perf_event *event = vma->vm_file->private_data; perf_mmap_fault() local
4333 rb = rcu_dereference(event->rb); perf_mmap_fault()
4355 static void ring_buffer_attach(struct perf_event *event, ring_buffer_attach() argument
4361 if (event->rb) { ring_buffer_attach()
4364 * event->rb_entry and wait/clear when adding event->rb_entry. ring_buffer_attach()
4366 WARN_ON_ONCE(event->rcu_pending); ring_buffer_attach()
4368 old_rb = event->rb; ring_buffer_attach()
4370 list_del_rcu(&event->rb_entry); ring_buffer_attach()
4373 event->rcu_batches = get_state_synchronize_rcu(); ring_buffer_attach()
4374 event->rcu_pending = 1; ring_buffer_attach()
4378 if (event->rcu_pending) { ring_buffer_attach()
4379 cond_synchronize_rcu(event->rcu_batches); ring_buffer_attach()
4380 event->rcu_pending = 0; ring_buffer_attach()
4384 list_add_rcu(&event->rb_entry, &rb->event_list); ring_buffer_attach()
4388 rcu_assign_pointer(event->rb, rb); ring_buffer_attach()
4397 wake_up_all(&event->waitq); ring_buffer_attach()
4401 static void ring_buffer_wakeup(struct perf_event *event) ring_buffer_wakeup() argument
4406 rb = rcu_dereference(event->rb); ring_buffer_wakeup()
4408 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) ring_buffer_wakeup()
4409 wake_up_all(&event->waitq); ring_buffer_wakeup()
4414 struct ring_buffer *ring_buffer_get(struct perf_event *event) ring_buffer_get() argument
4419 rb = rcu_dereference(event->rb); ring_buffer_get()
4441 struct perf_event *event = vma->vm_file->private_data; perf_mmap_open() local
4443 atomic_inc(&event->mmap_count); perf_mmap_open()
4444 atomic_inc(&event->rb->mmap_count); perf_mmap_open()
4447 atomic_inc(&event->rb->aux_mmap_count); perf_mmap_open()
4449 if (event->pmu->event_mapped) perf_mmap_open()
4450 event->pmu->event_mapped(event); perf_mmap_open()
4455 * event, or through other events by use of perf_event_set_output().
4463 struct perf_event *event = vma->vm_file->private_data; perf_mmap_close() local
4465 struct ring_buffer *rb = ring_buffer_get(event); perf_mmap_close()
4470 if (event->pmu->event_unmapped) perf_mmap_close()
4471 event->pmu->event_unmapped(event); perf_mmap_close()
4475 * event->mmap_count, so it is ok to use event->mmap_mutex to perf_mmap_close()
4479 atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) { perf_mmap_close()
4484 mutex_unlock(&event->mmap_mutex); perf_mmap_close()
4489 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) perf_mmap_close()
4492 ring_buffer_attach(event, NULL); perf_mmap_close()
4493 mutex_unlock(&event->mmap_mutex); perf_mmap_close()
4506 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { perf_mmap_close()
4507 if (!atomic_long_inc_not_zero(&event->refcount)) { perf_mmap_close()
4509 * This event is en-route to free_event() which will perf_mmap_close()
4516 mutex_lock(&event->mmap_mutex); perf_mmap_close()
4522 * If we find a different rb; ignore this event, a next perf_mmap_close()
4527 if (event->rb == rb) perf_mmap_close()
4528 ring_buffer_attach(event, NULL); perf_mmap_close()
4530 mutex_unlock(&event->mmap_mutex); perf_mmap_close()
4531 put_event(event); perf_mmap_close()
4567 struct perf_event *event = file->private_data; perf_mmap() local
4582 if (event->cpu == -1 && event->attr.inherit) perf_mmap()
4600 if (!event->rb) perf_mmap()
4605 mutex_lock(&event->mmap_mutex); perf_mmap()
4608 rb = event->rb; perf_mmap()
4660 WARN_ON_ONCE(event->ctx->parent_ctx); perf_mmap()
4662 mutex_lock(&event->mmap_mutex); perf_mmap()
4663 if (event->rb) { perf_mmap()
4664 if (event->rb->nr_pages != nr_pages) { perf_mmap()
4669 if (!atomic_inc_not_zero(&event->rb->mmap_count)) { perf_mmap()
4675 mutex_unlock(&event->mmap_mutex); perf_mmap()
4707 WARN_ON(!rb && event->rb); perf_mmap()
4714 event->attr.watermark ? event->attr.wakeup_watermark : 0, perf_mmap()
4715 event->cpu, flags); perf_mmap()
4726 ring_buffer_attach(event, rb); perf_mmap()
4728 perf_event_init_userpage(event); perf_mmap()
4729 perf_event_update_userpage(event); perf_mmap()
4731 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages, perf_mmap()
4732 event->attr.aux_watermark, flags); perf_mmap()
4742 atomic_inc(&event->mmap_count); perf_mmap()
4747 mutex_unlock(&event->mmap_mutex); perf_mmap()
4756 if (event->pmu->event_mapped) perf_mmap()
4757 event->pmu->event_mapped(event); perf_mmap()
4765 struct perf_event *event = filp->private_data; perf_fasync() local
4769 retval = fasync_helper(fd, filp, on, &event->fasync); perf_fasync()
4790 * Perf event wakeup
4796 static inline struct fasync_struct **perf_event_fasync(struct perf_event *event) perf_event_fasync() argument
4799 if (event->parent) perf_event_fasync()
4800 event = event->parent; perf_event_fasync()
4801 return &event->fasync; perf_event_fasync()
4804 void perf_event_wakeup(struct perf_event *event) perf_event_wakeup() argument
4806 ring_buffer_wakeup(event); perf_event_wakeup()
4808 if (event->pending_kill) { perf_event_wakeup()
4809 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill); perf_event_wakeup()
4810 event->pending_kill = 0; perf_event_wakeup()
4816 struct perf_event *event = container_of(entry, perf_pending_event() local
4826 if (event->pending_disable) { perf_pending_event()
4827 event->pending_disable = 0; perf_pending_event()
4828 __perf_event_disable(event); perf_pending_event()
4831 if (event->pending_wakeup) { perf_pending_event()
4832 event->pending_wakeup = 0; perf_pending_event()
4833 perf_event_wakeup(event); perf_pending_event()
4996 struct perf_event *event) __perf_event_header__init_id()
4998 u64 sample_type = event->attr.sample_type; __perf_event_header__init_id()
5001 header->size += event->id_header_size; __perf_event_header__init_id()
5005 data->tid_entry.pid = perf_event_pid(event, current); __perf_event_header__init_id()
5006 data->tid_entry.tid = perf_event_tid(event, current); __perf_event_header__init_id()
5010 data->time = perf_event_clock(event); __perf_event_header__init_id()
5013 data->id = primary_event_id(event); __perf_event_header__init_id()
5016 data->stream_id = event->id; __perf_event_header__init_id()
5026 struct perf_event *event) perf_event_header__init_id()
5028 if (event->attr.sample_id_all) perf_event_header__init_id()
5029 __perf_event_header__init_id(header, data, event); perf_event_header__init_id()
5056 void perf_event__output_id_sample(struct perf_event *event, perf_event__output_id_sample() argument
5060 if (event->attr.sample_id_all) perf_event__output_id_sample()
5065 struct perf_event *event, perf_output_read_one()
5068 u64 read_format = event->attr.read_format; perf_output_read_one()
5072 values[n++] = perf_event_count(event); perf_output_read_one()
5075 atomic64_read(&event->child_total_time_enabled); perf_output_read_one()
5079 atomic64_read(&event->child_total_time_running); perf_output_read_one()
5082 values[n++] = primary_event_id(event); perf_output_read_one()
5091 struct perf_event *event, perf_output_read_group()
5094 struct perf_event *leader = event->group_leader, *sub; perf_output_read_group()
5095 u64 read_format = event->attr.read_format; perf_output_read_group()
5107 if (leader != event) perf_output_read_group()
5119 if ((sub != event) && perf_output_read_group()
5135 struct perf_event *event) perf_output_read()
5138 u64 read_format = event->attr.read_format; perf_output_read()
5142 * based on snapshot values taken when the event perf_output_read()
5150 calc_timer_values(event, &now, &enabled, &running); perf_output_read()
5152 if (event->attr.read_format & PERF_FORMAT_GROUP) perf_output_read()
5153 perf_output_read_group(handle, event, enabled, running); perf_output_read()
5155 perf_output_read_one(handle, event, enabled, running); perf_output_read()
5161 struct perf_event *event) perf_output_sample()
5195 perf_output_read(handle, event); perf_output_sample()
5258 u64 mask = event->attr.sample_regs_user; perf_output_sample()
5289 u64 mask = event->attr.sample_regs_intr; perf_output_sample()
5297 if (!event->attr.watermark) { perf_output_sample()
5298 int wakeup_events = event->attr.wakeup_events; perf_output_sample()
5314 struct perf_event *event, perf_prepare_sample()
5317 u64 sample_type = event->attr.sample_type; perf_prepare_sample()
5320 header->size = sizeof(*header) + event->header_size; perf_prepare_sample()
5325 __perf_event_header__init_id(header, data, event); perf_prepare_sample()
5333 data->callchain = perf_callchain(event, regs); perf_prepare_sample()
5371 u64 mask = event->attr.sample_regs_user; perf_prepare_sample()
5385 u16 stack_size = event->attr.sample_stack_user; perf_prepare_sample()
5410 u64 mask = event->attr.sample_regs_intr; perf_prepare_sample()
5419 static void perf_event_output(struct perf_event *event, perf_event_output() argument
5429 perf_prepare_sample(&header, data, event, regs); perf_event_output()
5431 if (perf_output_begin(&handle, event, header.size)) perf_event_output()
5434 perf_output_sample(&handle, &header, data, event); perf_event_output()
5454 perf_event_read_event(struct perf_event *event, perf_event_read_event() argument
5463 .size = sizeof(read_event) + event->read_size, perf_event_read_event()
5465 .pid = perf_event_pid(event, task), perf_event_read_event()
5466 .tid = perf_event_tid(event, task), perf_event_read_event()
5470 perf_event_header__init_id(&read_event.header, &sample, event); perf_event_read_event()
5471 ret = perf_output_begin(&handle, event, read_event.header.size); perf_event_read_event()
5476 perf_output_read(&handle, event); perf_event_read_event()
5477 perf_event__output_id_sample(event, &handle, &sample); perf_event_read_event()
5482 typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data);
5489 struct perf_event *event; perf_event_aux_ctx() local
5491 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { perf_event_aux_ctx()
5492 if (event->state < PERF_EVENT_STATE_INACTIVE) perf_event_aux_ctx()
5494 if (!event_filter_match(event)) perf_event_aux_ctx()
5496 output(event, data); perf_event_aux_ctx()
5556 static int perf_event_task_match(struct perf_event *event) perf_event_task_match() argument
5558 return event->attr.comm || event->attr.mmap || perf_event_task_match()
5559 event->attr.mmap2 || event->attr.mmap_data || perf_event_task_match()
5560 event->attr.task; perf_event_task_match()
5563 static void perf_event_task_output(struct perf_event *event, perf_event_task_output() argument
5572 if (!perf_event_task_match(event)) perf_event_task_output()
5575 perf_event_header__init_id(&task_event->event_id.header, &sample, event); perf_event_task_output()
5577 ret = perf_output_begin(&handle, event, perf_event_task_output()
5582 task_event->event_id.pid = perf_event_pid(event, task); perf_event_task_output()
5583 task_event->event_id.ppid = perf_event_pid(event, current); perf_event_task_output()
5585 task_event->event_id.tid = perf_event_tid(event, task); perf_event_task_output()
5586 task_event->event_id.ptid = perf_event_tid(event, current); perf_event_task_output()
5588 task_event->event_id.time = perf_event_clock(event); perf_event_task_output()
5592 perf_event__output_id_sample(event, &handle, &sample); perf_event_task_output()
5654 static int perf_event_comm_match(struct perf_event *event) perf_event_comm_match() argument
5656 return event->attr.comm; perf_event_comm_match()
5659 static void perf_event_comm_output(struct perf_event *event, perf_event_comm_output() argument
5668 if (!perf_event_comm_match(event)) perf_event_comm_output()
5671 perf_event_header__init_id(&comm_event->event_id.header, &sample, event); perf_event_comm_output()
5672 ret = perf_output_begin(&handle, event, perf_event_comm_output()
5678 comm_event->event_id.pid = perf_event_pid(event, comm_event->task); perf_event_comm_output()
5679 comm_event->event_id.tid = perf_event_tid(event, comm_event->task); perf_event_comm_output()
5685 perf_event__output_id_sample(event, &handle, &sample); perf_event_comm_output()
5761 static int perf_event_mmap_match(struct perf_event *event, perf_event_mmap_match() argument
5768 return (!executable && event->attr.mmap_data) || perf_event_mmap_match()
5769 (executable && (event->attr.mmap || event->attr.mmap2)); perf_event_mmap_match()
5772 static void perf_event_mmap_output(struct perf_event *event, perf_event_mmap_output() argument
5781 if (!perf_event_mmap_match(event, data)) perf_event_mmap_output()
5784 if (event->attr.mmap2) { perf_event_mmap_output()
5794 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); perf_event_mmap_output()
5795 ret = perf_output_begin(&handle, event, perf_event_mmap_output()
5800 mmap_event->event_id.pid = perf_event_pid(event, current); perf_event_mmap_output()
5801 mmap_event->event_id.tid = perf_event_tid(event, current); perf_event_mmap_output()
5805 if (event->attr.mmap2) { perf_event_mmap_output()
5817 perf_event__output_id_sample(event, &handle, &sample); perf_event_mmap_output()
5978 void perf_event_aux_event(struct perf_event *event, unsigned long head, perf_event_aux_event() argument
6000 perf_event_header__init_id(&rec.header, &sample, event); perf_event_aux_event()
6001 ret = perf_output_begin(&handle, event, rec.header.size); perf_event_aux_event()
6007 perf_event__output_id_sample(event, &handle, &sample); perf_event_aux_event()
6016 static void perf_log_throttle(struct perf_event *event, int enable) perf_log_throttle() argument
6033 .time = perf_event_clock(event), perf_log_throttle()
6034 .id = primary_event_id(event), perf_log_throttle()
6035 .stream_id = event->id, perf_log_throttle()
6041 perf_event_header__init_id(&throttle_event.header, &sample, event); perf_log_throttle()
6043 ret = perf_output_begin(&handle, event, perf_log_throttle()
6049 perf_event__output_id_sample(event, &handle, &sample); perf_log_throttle()
6053 static void perf_log_itrace_start(struct perf_event *event) perf_log_itrace_start() argument
6064 if (event->parent) perf_log_itrace_start()
6065 event = event->parent; perf_log_itrace_start()
6067 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) || perf_log_itrace_start()
6068 event->hw.itrace_started) perf_log_itrace_start()
6071 event->hw.itrace_started = 1; perf_log_itrace_start()
6076 rec.pid = perf_event_pid(event, current); perf_log_itrace_start()
6077 rec.tid = perf_event_tid(event, current); perf_log_itrace_start()
6079 perf_event_header__init_id(&rec.header, &sample, event); perf_log_itrace_start()
6080 ret = perf_output_begin(&handle, event, rec.header.size); perf_log_itrace_start()
6086 perf_event__output_id_sample(event, &handle, &sample); perf_log_itrace_start()
6092 * Generic event overflow handling, sampling.
6095 static int __perf_event_overflow(struct perf_event *event, __perf_event_overflow() argument
6099 int events = atomic_read(&event->event_limit); __perf_event_overflow()
6100 struct hw_perf_event *hwc = &event->hw; __perf_event_overflow()
6108 if (unlikely(!is_sampling_event(event))) __perf_event_overflow()
6121 perf_log_throttle(event, 0); __perf_event_overflow()
6127 if (event->attr.freq) { __perf_event_overflow()
6134 perf_adjust_period(event, delta, hwc->last_period, true); __perf_event_overflow()
6142 event->pending_kill = POLL_IN; __perf_event_overflow()
6143 if (events && atomic_dec_and_test(&event->event_limit)) { __perf_event_overflow()
6145 event->pending_kill = POLL_HUP; __perf_event_overflow()
6146 event->pending_disable = 1; __perf_event_overflow()
6147 irq_work_queue(&event->pending); __perf_event_overflow()
6150 if (event->overflow_handler) __perf_event_overflow()
6151 event->overflow_handler(event, data, regs); __perf_event_overflow()
6153 perf_event_output(event, data, regs); __perf_event_overflow()
6155 if (*perf_event_fasync(event) && event->pending_kill) { __perf_event_overflow()
6156 event->pending_wakeup = 1; __perf_event_overflow()
6157 irq_work_queue(&event->pending); __perf_event_overflow()
6163 int perf_event_overflow(struct perf_event *event, perf_event_overflow() argument
6167 return __perf_event_overflow(event, 1, data, regs); perf_event_overflow()
6171 * Generic software event infrastructure
6189 * We directly increment event->count and keep a second value in
6190 * event->hw.period_left to count intervals. This period event
6195 u64 perf_swevent_set_period(struct perf_event *event) perf_swevent_set_period() argument
6197 struct hw_perf_event *hwc = &event->hw; perf_swevent_set_period()
6218 static void perf_swevent_overflow(struct perf_event *event, u64 overflow, perf_swevent_overflow() argument
6222 struct hw_perf_event *hwc = &event->hw; perf_swevent_overflow()
6226 overflow = perf_swevent_set_period(event); perf_swevent_overflow()
6232 if (__perf_event_overflow(event, throttle, perf_swevent_overflow()
6244 static void perf_swevent_event(struct perf_event *event, u64 nr, perf_swevent_event() argument
6248 struct hw_perf_event *hwc = &event->hw; perf_swevent_event()
6250 local64_add(nr, &event->count); perf_swevent_event()
6255 if (!is_sampling_event(event)) perf_swevent_event()
6258 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) { perf_swevent_event()
6260 return perf_swevent_overflow(event, 1, data, regs); perf_swevent_event()
6262 data->period = event->hw.last_period; perf_swevent_event()
6264 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) perf_swevent_event()
6265 return perf_swevent_overflow(event, 1, data, regs); perf_swevent_event()
6270 perf_swevent_overflow(event, 0, data, regs); perf_swevent_event()
6273 static int perf_exclude_event(struct perf_event *event, perf_exclude_event() argument
6276 if (event->hw.state & PERF_HES_STOPPED) perf_exclude_event()
6280 if (event->attr.exclude_user && user_mode(regs)) perf_exclude_event()
6283 if (event->attr.exclude_kernel && !user_mode(regs)) perf_exclude_event()
6290 static int perf_swevent_match(struct perf_event *event, perf_swevent_match() argument
6296 if (event->attr.type != type) perf_swevent_match()
6299 if (event->attr.config != event_id) perf_swevent_match()
6302 if (perf_exclude_event(event, regs)) perf_swevent_match()
6336 /* For the event head insertion and removal in the hlist */
6338 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) find_swevent_head() argument
6341 u32 event_id = event->attr.config; find_swevent_head()
6342 u64 type = event->attr.type; find_swevent_head()
6350 lockdep_is_held(&event->ctx->lock)); find_swevent_head()
6363 struct perf_event *event; do_perf_sw_event() local
6371 hlist_for_each_entry_rcu(event, head, hlist_entry) { hlist_for_each_entry_rcu()
6372 if (perf_swevent_match(event, type, event_id, data, regs)) hlist_for_each_entry_rcu()
6373 perf_swevent_event(event, nr, data, regs); hlist_for_each_entry_rcu()
6423 static void perf_swevent_read(struct perf_event *event) perf_swevent_read() argument
6427 static int perf_swevent_add(struct perf_event *event, int flags) perf_swevent_add() argument
6430 struct hw_perf_event *hwc = &event->hw; perf_swevent_add()
6433 if (is_sampling_event(event)) { perf_swevent_add()
6435 perf_swevent_set_period(event); perf_swevent_add()
6440 head = find_swevent_head(swhash, event); perf_swevent_add()
6450 hlist_add_head_rcu(&event->hlist_entry, head); perf_swevent_add()
6451 perf_event_update_userpage(event); perf_swevent_add()
6456 static void perf_swevent_del(struct perf_event *event, int flags) perf_swevent_del() argument
6458 hlist_del_rcu(&event->hlist_entry); perf_swevent_del()
6461 static void perf_swevent_start(struct perf_event *event, int flags) perf_swevent_start() argument
6463 event->hw.state = 0; perf_swevent_start()
6466 static void perf_swevent_stop(struct perf_event *event, int flags) perf_swevent_stop() argument
6468 event->hw.state = PERF_HES_STOPPED; perf_swevent_stop()
6490 static void swevent_hlist_put_cpu(struct perf_event *event, int cpu) swevent_hlist_put_cpu() argument
6502 static void swevent_hlist_put(struct perf_event *event) swevent_hlist_put() argument
6507 swevent_hlist_put_cpu(event, cpu); swevent_hlist_put()
6510 static int swevent_hlist_get_cpu(struct perf_event *event, int cpu) swevent_hlist_get_cpu() argument
6534 static int swevent_hlist_get(struct perf_event *event) swevent_hlist_get() argument
6541 err = swevent_hlist_get_cpu(event, cpu); for_each_possible_cpu()
6554 swevent_hlist_put_cpu(event, cpu); for_each_possible_cpu()
6563 static void sw_perf_event_destroy(struct perf_event *event) sw_perf_event_destroy() argument
6565 u64 event_id = event->attr.config; sw_perf_event_destroy()
6567 WARN_ON(event->parent); sw_perf_event_destroy()
6570 swevent_hlist_put(event); sw_perf_event_destroy()
6573 static int perf_swevent_init(struct perf_event *event) perf_swevent_init() argument
6575 u64 event_id = event->attr.config; perf_swevent_init()
6577 if (event->attr.type != PERF_TYPE_SOFTWARE) perf_swevent_init()
6583 if (has_branch_stack(event)) perf_swevent_init()
6598 if (!event->parent) { perf_swevent_init()
6601 err = swevent_hlist_get(event); perf_swevent_init()
6606 event->destroy = sw_perf_event_destroy; perf_swevent_init()
6627 static int perf_tp_filter_match(struct perf_event *event, perf_tp_filter_match() argument
6632 if (likely(!event->filter) || filter_match_preds(event->filter, record)) perf_tp_filter_match()
6637 static int perf_tp_event_match(struct perf_event *event, perf_tp_event_match() argument
6641 if (event->hw.state & PERF_HES_STOPPED) perf_tp_event_match()
6646 if (event->attr.exclude_kernel) perf_tp_event_match()
6649 if (!perf_tp_filter_match(event, data)) perf_tp_event_match()
6660 struct perf_event *event; perf_tp_event() local
6670 hlist_for_each_entry_rcu(event, head, hlist_entry) { hlist_for_each_entry_rcu()
6671 if (perf_tp_event_match(event, &data, regs)) hlist_for_each_entry_rcu()
6672 perf_swevent_event(event, count, &data, regs); hlist_for_each_entry_rcu()
6677 * deliver this event there too.
6688 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
6689 if (event->attr.type != PERF_TYPE_TRACEPOINT)
6691 if (event->attr.config != entry->type)
6693 if (perf_tp_event_match(event, &data, regs))
6694 perf_swevent_event(event, count, &data, regs);
6704 static void tp_perf_event_destroy(struct perf_event *event) tp_perf_event_destroy() argument
6706 perf_trace_destroy(event); tp_perf_event_destroy()
6709 static int perf_tp_event_init(struct perf_event *event) perf_tp_event_init() argument
6713 if (event->attr.type != PERF_TYPE_TRACEPOINT) perf_tp_event_init()
6719 if (has_branch_stack(event)) perf_tp_event_init()
6722 err = perf_trace_init(event); perf_tp_event_init()
6726 event->destroy = tp_perf_event_destroy; perf_tp_event_init()
6747 static int perf_event_set_filter(struct perf_event *event, void __user *arg) perf_event_set_filter() argument
6752 if (event->attr.type != PERF_TYPE_TRACEPOINT) perf_event_set_filter()
6759 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); perf_event_set_filter()
6765 static void perf_event_free_filter(struct perf_event *event) perf_event_free_filter() argument
6767 ftrace_profile_free_filter(event); perf_event_free_filter()
6770 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) perf_event_set_bpf_prog() argument
6774 if (event->attr.type != PERF_TYPE_TRACEPOINT) perf_event_set_bpf_prog()
6777 if (event->tp_event->prog) perf_event_set_bpf_prog()
6780 if (!(event->tp_event->flags & TRACE_EVENT_FL_KPROBE)) perf_event_set_bpf_prog()
6794 event->tp_event->prog = prog; perf_event_set_bpf_prog()
6799 static void perf_event_free_bpf_prog(struct perf_event *event) perf_event_free_bpf_prog() argument
6803 if (!event->tp_event) perf_event_free_bpf_prog()
6806 prog = event->tp_event->prog; perf_event_free_bpf_prog()
6808 event->tp_event->prog = NULL; perf_event_free_bpf_prog()
6819 static int perf_event_set_filter(struct perf_event *event, void __user *arg) perf_event_set_filter() argument
6824 static void perf_event_free_filter(struct perf_event *event) perf_event_free_filter() argument
6828 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) perf_event_set_bpf_prog() argument
6833 static void perf_event_free_bpf_prog(struct perf_event *event) perf_event_free_bpf_prog() argument
6860 struct perf_event *event; perf_swevent_hrtimer() local
6863 event = container_of(hrtimer, struct perf_event, hw.hrtimer); perf_swevent_hrtimer()
6865 if (event->state != PERF_EVENT_STATE_ACTIVE) perf_swevent_hrtimer()
6868 event->pmu->read(event); perf_swevent_hrtimer()
6870 perf_sample_data_init(&data, 0, event->hw.last_period); perf_swevent_hrtimer()
6873 if (regs && !perf_exclude_event(event, regs)) { perf_swevent_hrtimer()
6874 if (!(event->attr.exclude_idle && is_idle_task(current))) perf_swevent_hrtimer()
6875 if (__perf_event_overflow(event, 1, &data, regs)) perf_swevent_hrtimer()
6879 period = max_t(u64, 10000, event->hw.sample_period); perf_swevent_hrtimer()
6885 static void perf_swevent_start_hrtimer(struct perf_event *event) perf_swevent_start_hrtimer() argument
6887 struct hw_perf_event *hwc = &event->hw; perf_swevent_start_hrtimer()
6890 if (!is_sampling_event(event)) perf_swevent_start_hrtimer()
6907 static void perf_swevent_cancel_hrtimer(struct perf_event *event) perf_swevent_cancel_hrtimer() argument
6909 struct hw_perf_event *hwc = &event->hw; perf_swevent_cancel_hrtimer()
6911 if (is_sampling_event(event)) { perf_swevent_cancel_hrtimer()
6919 static void perf_swevent_init_hrtimer(struct perf_event *event) perf_swevent_init_hrtimer() argument
6921 struct hw_perf_event *hwc = &event->hw; perf_swevent_init_hrtimer()
6923 if (!is_sampling_event(event)) perf_swevent_init_hrtimer()
6933 if (event->attr.freq) { perf_swevent_init_hrtimer()
6934 long freq = event->attr.sample_freq; perf_swevent_init_hrtimer()
6936 event->attr.sample_period = NSEC_PER_SEC / freq; perf_swevent_init_hrtimer()
6937 hwc->sample_period = event->attr.sample_period; perf_swevent_init_hrtimer()
6940 event->attr.freq = 0; perf_swevent_init_hrtimer()
6945 * Software event: cpu wall time clock
6948 static void cpu_clock_event_update(struct perf_event *event) cpu_clock_event_update() argument
6954 prev = local64_xchg(&event->hw.prev_count, now); cpu_clock_event_update()
6955 local64_add(now - prev, &event->count); cpu_clock_event_update()
6958 static void cpu_clock_event_start(struct perf_event *event, int flags) cpu_clock_event_start() argument
6960 local64_set(&event->hw.prev_count, local_clock()); cpu_clock_event_start()
6961 perf_swevent_start_hrtimer(event); cpu_clock_event_start()
6964 static void cpu_clock_event_stop(struct perf_event *event, int flags) cpu_clock_event_stop() argument
6966 perf_swevent_cancel_hrtimer(event); cpu_clock_event_stop()
6967 cpu_clock_event_update(event); cpu_clock_event_stop()
6970 static int cpu_clock_event_add(struct perf_event *event, int flags) cpu_clock_event_add() argument
6973 cpu_clock_event_start(event, flags); cpu_clock_event_add()
6974 perf_event_update_userpage(event); cpu_clock_event_add()
6979 static void cpu_clock_event_del(struct perf_event *event, int flags) cpu_clock_event_del() argument
6981 cpu_clock_event_stop(event, flags); cpu_clock_event_del()
6984 static void cpu_clock_event_read(struct perf_event *event) cpu_clock_event_read() argument
6986 cpu_clock_event_update(event); cpu_clock_event_read()
6989 static int cpu_clock_event_init(struct perf_event *event) cpu_clock_event_init() argument
6991 if (event->attr.type != PERF_TYPE_SOFTWARE) cpu_clock_event_init()
6994 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) cpu_clock_event_init()
7000 if (has_branch_stack(event)) cpu_clock_event_init()
7003 perf_swevent_init_hrtimer(event); cpu_clock_event_init()
7022 * Software event: task time clock
7025 static void task_clock_event_update(struct perf_event *event, u64 now) task_clock_event_update() argument
7030 prev = local64_xchg(&event->hw.prev_count, now); task_clock_event_update()
7032 local64_add(delta, &event->count); task_clock_event_update()
7035 static void task_clock_event_start(struct perf_event *event, int flags) task_clock_event_start() argument
7037 local64_set(&event->hw.prev_count, event->ctx->time); task_clock_event_start()
7038 perf_swevent_start_hrtimer(event); task_clock_event_start()
7041 static void task_clock_event_stop(struct perf_event *event, int flags) task_clock_event_stop() argument
7043 perf_swevent_cancel_hrtimer(event); task_clock_event_stop()
7044 task_clock_event_update(event, event->ctx->time); task_clock_event_stop()
7047 static int task_clock_event_add(struct perf_event *event, int flags) task_clock_event_add() argument
7050 task_clock_event_start(event, flags); task_clock_event_add()
7051 perf_event_update_userpage(event); task_clock_event_add()
7056 static void task_clock_event_del(struct perf_event *event, int flags) task_clock_event_del() argument
7058 task_clock_event_stop(event, PERF_EF_UPDATE); task_clock_event_del()
7061 static void task_clock_event_read(struct perf_event *event) task_clock_event_read() argument
7064 u64 delta = now - event->ctx->timestamp; task_clock_event_read()
7065 u64 time = event->ctx->time + delta; task_clock_event_read()
7067 task_clock_event_update(event, time); task_clock_event_read()
7070 static int task_clock_event_init(struct perf_event *event) task_clock_event_init() argument
7072 if (event->attr.type != PERF_TYPE_SOFTWARE) task_clock_event_init()
7075 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) task_clock_event_init()
7081 if (has_branch_stack(event)) task_clock_event_init()
7084 perf_swevent_init_hrtimer(event); task_clock_event_init()
7127 static int perf_event_idx_default(struct perf_event *event) perf_event_idx_default() argument
7414 static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) perf_try_init_event() argument
7422 if (event->group_leader != event) { perf_try_init_event()
7427 ctx = perf_event_ctx_lock_nested(event->group_leader, perf_try_init_event()
7432 event->pmu = pmu; perf_try_init_event()
7433 ret = pmu->event_init(event); perf_try_init_event()
7436 perf_event_ctx_unlock(event->group_leader, ctx); perf_try_init_event()
7444 struct pmu *perf_init_event(struct perf_event *event) perf_init_event() argument
7453 pmu = idr_find(&pmu_idr, event->attr.type); perf_init_event()
7456 ret = perf_try_init_event(pmu, event); perf_init_event()
7463 ret = perf_try_init_event(pmu, event); perf_init_event()
7479 static void account_event_cpu(struct perf_event *event, int cpu) account_event_cpu() argument
7481 if (event->parent) account_event_cpu()
7484 if (is_cgroup_event(event)) account_event_cpu()
7488 static void account_event(struct perf_event *event) account_event() argument
7490 if (event->parent) account_event()
7493 if (event->attach_state & PERF_ATTACH_TASK) account_event()
7495 if (event->attr.mmap || event->attr.mmap_data) account_event()
7497 if (event->attr.comm) account_event()
7499 if (event->attr.task) account_event()
7501 if (event->attr.freq) { account_event()
7505 if (has_branch_stack(event)) account_event()
7507 if (is_cgroup_event(event)) account_event()
7510 account_event_cpu(event, event->cpu); account_event()
7514 * Allocate and initialize a event structure
7525 struct perf_event *event; perf_event_alloc() local
7534 event = kzalloc(sizeof(*event), GFP_KERNEL); perf_event_alloc()
7535 if (!event) perf_event_alloc()
7543 group_leader = event; perf_event_alloc()
7545 mutex_init(&event->child_mutex); perf_event_alloc()
7546 INIT_LIST_HEAD(&event->child_list); perf_event_alloc()
7548 INIT_LIST_HEAD(&event->group_entry); perf_event_alloc()
7549 INIT_LIST_HEAD(&event->event_entry); perf_event_alloc()
7550 INIT_LIST_HEAD(&event->sibling_list); perf_event_alloc()
7551 INIT_LIST_HEAD(&event->rb_entry); perf_event_alloc()
7552 INIT_LIST_HEAD(&event->active_entry); perf_event_alloc()
7553 INIT_HLIST_NODE(&event->hlist_entry); perf_event_alloc()
7556 init_waitqueue_head(&event->waitq); perf_event_alloc()
7557 init_irq_work(&event->pending, perf_pending_event); perf_event_alloc()
7559 mutex_init(&event->mmap_mutex); perf_event_alloc()
7561 atomic_long_set(&event->refcount, 1); perf_event_alloc()
7562 event->cpu = cpu; perf_event_alloc()
7563 event->attr = *attr; perf_event_alloc()
7564 event->group_leader = group_leader; perf_event_alloc()
7565 event->pmu = NULL; perf_event_alloc()
7566 event->oncpu = -1; perf_event_alloc()
7568 event->parent = parent_event; perf_event_alloc()
7570 event->ns = get_pid_ns(task_active_pid_ns(current)); perf_event_alloc()
7571 event->id = atomic64_inc_return(&perf_event_id); perf_event_alloc()
7573 event->state = PERF_EVENT_STATE_INACTIVE; perf_event_alloc()
7576 event->attach_state = PERF_ATTACH_TASK; perf_event_alloc()
7582 event->hw.target = task; perf_event_alloc()
7585 event->clock = &local_clock; perf_event_alloc()
7587 event->clock = parent_event->clock; perf_event_alloc()
7594 event->overflow_handler = overflow_handler; perf_event_alloc()
7595 event->overflow_handler_context = context; perf_event_alloc()
7597 perf_event__state_init(event); perf_event_alloc()
7601 hwc = &event->hw; perf_event_alloc()
7615 if (!has_branch_stack(event)) perf_event_alloc()
7616 event->attr.branch_sample_type = 0; perf_event_alloc()
7619 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader); perf_event_alloc()
7624 pmu = perf_init_event(event); perf_event_alloc()
7632 err = exclusive_event_init(event); perf_event_alloc()
7636 if (!event->parent) { perf_event_alloc()
7637 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { perf_event_alloc()
7645 account_event(event); perf_event_alloc()
7647 return event; perf_event_alloc()
7650 exclusive_event_destroy(event); perf_event_alloc()
7653 if (event->destroy) perf_event_alloc()
7654 event->destroy(event); perf_event_alloc()
7657 if (is_cgroup_event(event)) perf_event_alloc()
7658 perf_detach_cgroup(event); perf_event_alloc()
7659 if (event->ns) perf_event_alloc()
7660 put_pid_ns(event->ns); perf_event_alloc()
7661 kfree(event); perf_event_alloc()
7797 perf_event_set_output(struct perf_event *event, struct perf_event *output_event) perf_event_set_output() argument
7806 if (event == output_event) perf_event_set_output()
7812 if (output_event->cpu != event->cpu) perf_event_set_output()
7818 if (output_event->cpu == -1 && output_event->ctx != event->ctx) perf_event_set_output()
7824 if (output_event->clock != event->clock) perf_event_set_output()
7830 if (has_aux(event) && has_aux(output_event) && perf_event_set_output()
7831 event->pmu != output_event->pmu) perf_event_set_output()
7835 mutex_lock(&event->mmap_mutex); perf_event_set_output()
7837 if (atomic_read(&event->mmap_count)) perf_event_set_output()
7847 ring_buffer_attach(event, rb); perf_event_set_output()
7851 mutex_unlock(&event->mmap_mutex); perf_event_set_output()
7866 static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id) perf_event_set_clock() argument
7872 event->clock = &ktime_get_mono_fast_ns; perf_event_set_clock()
7877 event->clock = &ktime_get_raw_fast_ns; perf_event_set_clock()
7882 event->clock = &ktime_get_real_ns; perf_event_set_clock()
7886 event->clock = &ktime_get_boot_ns; perf_event_set_clock()
7890 event->clock = &ktime_get_tai_ns; perf_event_set_clock()
7897 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI)) perf_event_set_clock()
7904 * sys_perf_event_open - open a performance event, associate it to a task/cpu
7909 * @group_fd: group leader event fd
7916 struct perf_event *event, *sibling; SYSCALL_DEFINE5() local
7996 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, SYSCALL_DEFINE5()
7998 if (IS_ERR(event)) { SYSCALL_DEFINE5()
7999 err = PTR_ERR(event); SYSCALL_DEFINE5()
8003 if (is_sampling_event(event)) { SYSCALL_DEFINE5()
8004 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) { SYSCALL_DEFINE5()
8014 pmu = event->pmu; SYSCALL_DEFINE5()
8017 err = perf_event_set_clock(event, attr.clockid); SYSCALL_DEFINE5()
8023 (is_software_event(event) != is_software_event(group_leader))) { SYSCALL_DEFINE5()
8024 if (is_software_event(event)) { SYSCALL_DEFINE5()
8026 * If event and group_leader are not both a software SYSCALL_DEFINE5()
8027 * event, and event is, then group leader is not. SYSCALL_DEFINE5()
8038 * try to add a hardware event, move the whole group to SYSCALL_DEFINE5()
8048 ctx = find_get_context(pmu, task, event); SYSCALL_DEFINE5()
8065 * Look up the group leader (we will attach this event to it): SYSCALL_DEFINE5()
8078 if (group_leader->clock != event->clock) SYSCALL_DEFINE5()
8098 if (group_leader->cpu != event->cpu) SYSCALL_DEFINE5()
8113 err = perf_event_set_output(event, output_event); SYSCALL_DEFINE5()
8118 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, SYSCALL_DEFINE5()
8173 * event. What we want here is event in the initial SYSCALL_DEFINE5()
8181 if (!exclusive_event_installable(event, ctx)) { SYSCALL_DEFINE5()
8188 perf_install_in_context(ctx, event, event->cpu); SYSCALL_DEFINE5()
8199 event->owner = current; SYSCALL_DEFINE5()
8202 list_add_tail(&event->owner_entry, &current->perf_event_list); SYSCALL_DEFINE5()
8208 perf_event__header_size(event); SYSCALL_DEFINE5()
8209 perf_event__id_header_size(event); SYSCALL_DEFINE5()
8213 * new event on the sibling_list. This ensures destruction SYSCALL_DEFINE5()
8227 * and that will take care of freeing the event. SYSCALL_DEFINE5()
8230 free_event(event); SYSCALL_DEFINE5()
8257 struct perf_event *event; perf_event_create_kernel_counter() local
8264 event = perf_event_alloc(attr, cpu, task, NULL, NULL, perf_event_create_kernel_counter()
8266 if (IS_ERR(event)) { perf_event_create_kernel_counter()
8267 err = PTR_ERR(event); perf_event_create_kernel_counter()
8272 event->owner = EVENT_OWNER_KERNEL; perf_event_create_kernel_counter()
8274 ctx = find_get_context(event->pmu, task, event); perf_event_create_kernel_counter()
8282 if (!exclusive_event_installable(event, ctx)) { perf_event_create_kernel_counter()
8290 perf_install_in_context(ctx, event, cpu); perf_event_create_kernel_counter()
8294 return event; perf_event_create_kernel_counter()
8297 free_event(event); perf_event_create_kernel_counter()
8307 struct perf_event *event, *tmp; perf_pmu_migrate_context() local
8318 list_for_each_entry_safe(event, tmp, &src_ctx->event_list, perf_pmu_migrate_context()
8320 perf_remove_from_context(event, false); perf_pmu_migrate_context()
8321 unaccount_event_cpu(event, src_cpu); perf_pmu_migrate_context()
8323 list_add(&event->migrate_entry, &events); perf_pmu_migrate_context()
8339 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { perf_pmu_migrate_context()
8340 if (event->group_leader == event) perf_pmu_migrate_context()
8343 list_del(&event->migrate_entry); perf_pmu_migrate_context()
8344 if (event->state >= PERF_EVENT_STATE_OFF) perf_pmu_migrate_context()
8345 event->state = PERF_EVENT_STATE_INACTIVE; perf_pmu_migrate_context()
8346 account_event_cpu(event, dst_cpu); perf_pmu_migrate_context()
8347 perf_install_in_context(dst_ctx, event, dst_cpu); perf_pmu_migrate_context()
8355 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { perf_pmu_migrate_context()
8356 list_del(&event->migrate_entry); perf_pmu_migrate_context()
8357 if (event->state >= PERF_EVENT_STATE_OFF) perf_pmu_migrate_context()
8358 event->state = PERF_EVENT_STATE_INACTIVE; perf_pmu_migrate_context()
8359 account_event_cpu(event, dst_cpu); perf_pmu_migrate_context()
8360 perf_install_in_context(dst_ctx, event, dst_cpu); perf_pmu_migrate_context()
8389 * Remove this event from the parent's list sync_child_event()
8398 * lost one event. sync_child_event()
8403 * Release the parent event, if this was the last sync_child_event()
8511 * When a child task exits, feed back event values to parent events.
8515 struct perf_event *event, *tmp; perf_event_exit_task() local
8519 list_for_each_entry_safe(event, tmp, &child->perf_event_list, perf_event_exit_task()
8521 list_del_init(&event->owner_entry); perf_event_exit_task()
8529 event->owner = NULL; perf_event_exit_task()
8537 static void perf_free_event(struct perf_event *event, perf_free_event() argument
8540 struct perf_event *parent = event->parent; perf_free_event()
8546 list_del_init(&event->child_list); perf_free_event()
8552 perf_group_detach(event); perf_free_event()
8553 list_del_event(event, ctx); perf_free_event()
8555 free_event(event); perf_free_event()
8568 struct perf_event *event, *tmp; perf_event_free_task() local
8578 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, for_each_task_context_nr()
8580 perf_free_event(event, ctx); for_each_task_context_nr()
8582 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, for_each_task_context_nr()
8584 perf_free_event(event, ctx); for_each_task_context_nr()
8605 * inherit a event from parent task to child task:
8645 * Make the child state follow the state of the parent event, inherit_event()
8683 * Link this into the parent event's child list inherit_event()
8717 inherit_task_group(struct perf_event *event, struct task_struct *parent, inherit_task_group() argument
8725 if (!event->attr.inherit) { inherit_task_group()
8746 ret = inherit_group(event, parent, parent_ctx, inherit_task_group()
8762 struct perf_event *event; perf_event_init_context() local
8796 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) { perf_event_init_context()
8797 ret = inherit_task_group(event, parent, parent_ctx, perf_event_init_context()
8812 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) { perf_event_init_context()
8813 ret = inherit_task_group(event, parent, parent_ctx, perf_event_init_context()
8909 list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry) __perf_event_exit_context()
2124 perf_install_in_context(struct perf_event_context *ctx, struct perf_event *event, int cpu) perf_install_in_context() argument
3331 find_get_context(struct pmu *pmu, struct task_struct *task, struct perf_event *event) find_get_context() argument
4260 arch_perf_update_userpage( struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now) arch_perf_update_userpage() argument
4994 __perf_event_header__init_id(struct perf_event_header *header, struct perf_sample_data *data, struct perf_event *event) __perf_event_header__init_id() argument
5024 perf_event_header__init_id(struct perf_event_header *header, struct perf_sample_data *data, struct perf_event *event) perf_event_header__init_id() argument
5064 perf_output_read_one(struct perf_output_handle *handle, struct perf_event *event, u64 enabled, u64 running) perf_output_read_one() argument
5090 perf_output_read_group(struct perf_output_handle *handle, struct perf_event *event, u64 enabled, u64 running) perf_output_read_group() argument
5134 perf_output_read(struct perf_output_handle *handle, struct perf_event *event) perf_output_read() argument
5158 perf_output_sample(struct perf_output_handle *handle, struct perf_event_header *header, struct perf_sample_data *data, struct perf_event *event) perf_output_sample() argument
5312 perf_prepare_sample(struct perf_event_header *header, struct perf_sample_data *data, struct perf_event *event, struct pt_regs *regs) perf_prepare_sample() argument
/linux-4.1.27/tools/lib/traceevent/
H A Dplugin_hrtimer.c25 #include "event-parse.h"
29 struct event_format *event, void *context) timer_expire_handler()
33 if (pevent_print_num_field(s, "0x%llx", event, "timer", timer_expire_handler()
35 pevent_print_num_field(s, "0x%llx", event, "hrtimer", timer_expire_handler()
40 pevent_print_num_field(s, "%llu", event, "now", record, 1); timer_expire_handler()
42 pevent_print_func_field(s, " function=%s", event, "function", timer_expire_handler()
49 struct event_format *event, void *context) timer_start_handler()
53 if (pevent_print_num_field(s, "0x%llx", event, "timer", timer_start_handler()
55 pevent_print_num_field(s, "0x%llx", event, "hrtimer", timer_start_handler()
58 pevent_print_func_field(s, " function=%s", event, "function", timer_start_handler()
62 pevent_print_num_field(s, "%llu", event, "expires", record, 1); timer_start_handler()
65 pevent_print_num_field(s, "%llu", event, "softexpires", record, 1); timer_start_handler()
27 timer_expire_handler(struct trace_seq *s, struct pevent_record *record, struct event_format *event, void *context) timer_expire_handler() argument
47 timer_start_handler(struct trace_seq *s, struct pevent_record *record, struct event_format *event, void *context) timer_start_handler() argument
H A Dplugin_sched_switch.c24 #include "event-parse.h"
64 pevent_register_comm(field->event->pevent, comm, pid); write_and_save_comm()
69 struct event_format *event, void *context) sched_wakeup_handler()
74 if (pevent_get_field_val(s, event, "pid", record, &val, 1)) sched_wakeup_handler()
77 field = pevent_find_any_field(event, "comm"); sched_wakeup_handler()
84 if (pevent_get_field_val(s, event, "prio", record, &val, 0) == 0) sched_wakeup_handler()
87 if (pevent_get_field_val(s, event, "success", record, &val, 1) == 0) sched_wakeup_handler()
90 if (pevent_get_field_val(s, event, "target_cpu", record, &val, 0) == 0) sched_wakeup_handler()
98 struct event_format *event, void *context) sched_switch_handler()
103 if (pevent_get_field_val(s, event, "prev_pid", record, &val, 1)) sched_switch_handler()
106 field = pevent_find_any_field(event, "prev_comm"); sched_switch_handler()
113 if (pevent_get_field_val(s, event, "prev_prio", record, &val, 0) == 0) sched_switch_handler()
116 if (pevent_get_field_val(s, event, "prev_state", record, &val, 0) == 0) sched_switch_handler()
121 if (pevent_get_field_val(s, event, "next_pid", record, &val, 1)) sched_switch_handler()
124 field = pevent_find_any_field(event, "next_comm"); sched_switch_handler()
131 if (pevent_get_field_val(s, event, "next_prio", record, &val, 0) == 0) sched_switch_handler()
67 sched_wakeup_handler(struct trace_seq *s, struct pevent_record *record, struct event_format *event, void *context) sched_wakeup_handler() argument
96 sched_switch_handler(struct trace_seq *s, struct pevent_record *record, struct event_format *event, void *context) sched_switch_handler() argument
H A Devent-parse.c36 #include "event-parse.h"
37 #include "event-utils.h"
54 #define do_warning_event(event, fmt, ...) \
59 if (event) \
60 warning("[%s:%s] " fmt, event->system, \
61 event->name, ##__VA_ARGS__); \
108 struct event_format *event, struct print_arg *arg);
691 static int add_event(struct pevent *pevent, struct event_format *event) add_event() argument
694 struct event_format **events = realloc(pevent->events, sizeof(event) * add_event()
702 if (pevent->events[i]->id > event->id) add_event()
708 sizeof(event) * (pevent->nr_events - i)); add_event()
710 pevent->events[i] = event; add_event()
713 event->pevent = pevent; add_event()
1300 static int event_read_fields(struct event_format *event, struct format_field **fields) event_read_fields() argument
1328 if (event->flags & EVENT_FL_ISFTRACE && event_read_fields()
1347 field->event = event; event_read_fields()
1358 (event->flags & EVENT_FL_ISFTRACE && event_read_fields()
1387 do_warning_event(event, "%s: no type found", __func__); event_read_fields()
1434 do_warning_event(event, "failed to find token"); event_read_fields()
1563 field->elementsize = event->pevent ? event_read_fields()
1564 event->pevent->long_size : event_read_fields()
1587 static int event_read_format(struct event_format *event) event_read_format() argument
1602 ret = event_read_fields(event, &event->format.common_fields); event_read_format()
1605 event->format.nr_common = ret; event_read_format()
1607 ret = event_read_fields(event, &event->format.fields); event_read_format()
1610 event->format.nr_fields = ret; event_read_format()
1620 process_arg_token(struct event_format *event, struct print_arg *arg,
1624 process_arg(struct event_format *event, struct print_arg *arg, char **tok) process_arg() argument
1632 return process_arg_token(event, arg, tok, type); process_arg()
1636 process_op(struct event_format *event, struct print_arg *arg, char **tok);
1643 process_field_arg(struct event_format *event, struct print_arg *arg, char **tok) process_field_arg() argument
1647 type = process_arg(event, arg, tok); process_field_arg()
1650 type = process_op(event, arg, tok); process_field_arg()
1657 process_cond(struct event_format *event, struct print_arg *top, char **tok) process_cond() argument
1668 do_warning_event(event, "%s: not enough memory!", __func__); process_cond()
1680 type = process_arg(event, left, &token); process_cond()
1685 type = process_op(event, left, &token); process_cond()
1694 type = process_arg(event, right, &token); process_cond()
1710 process_array(struct event_format *event, struct print_arg *top, char **tok) process_array() argument
1718 do_warning_event(event, "%s: not enough memory!", __func__); process_array()
1725 type = process_arg(event, arg, &token); process_array()
1812 process_op(struct event_format *event, struct print_arg *arg, char **tok) process_op() argument
1824 do_warning_event(event, "bad op token %s", token); process_op()
1834 do_warning_event(event, "bad op token %s", token); process_op()
1855 type = process_arg(event, right, tok); process_op()
1872 type = process_cond(event, arg, tok); process_op()
1905 event->flags |= EVENT_FL_FAILED; process_op()
1920 do_warning_event(event, "bad pointer type"); process_op()
1941 type = process_arg_token(event, right, tok, type); process_op()
1974 type = process_array(event, arg, tok); process_op()
1977 do_warning_event(event, "unknown op '%s'", token); process_op()
1978 event->flags |= EVENT_FL_FAILED; process_op()
1990 return process_op(event, arg, tok); process_op()
1992 return process_op(event, right, tok); process_op()
1998 do_warning_event(event, "%s: not enough memory!", __func__); process_op()
2006 process_entry(struct event_format *event __maybe_unused, struct print_arg *arg, process_entry()
2024 arg->field.field = pevent_find_any_field(event, arg->field.name); process_entry()
2028 arg->field.field = pevent_find_any_field(event, arg->field.name); process_entry()
2045 static int alloc_and_process_delim(struct event_format *event, char *next_token, alloc_and_process_delim() argument
2055 do_warning_event(event, "%s: not enough memory!", __func__); alloc_and_process_delim()
2060 type = process_arg(event, field, &token); alloc_and_process_delim()
2374 process_fields(struct event_format *event, struct print_flag_sym **list, char **tok) process_fields() argument
2393 type = process_arg(event, arg, &token); process_fields()
2396 type = process_op(event, arg, &token); process_fields()
2421 type = process_arg(event, arg, &token); process_fields()
2455 process_flags(struct event_format *event, struct print_arg *arg, char **tok) process_flags() argument
2466 do_warning_event(event, "%s: not enough memory!", __func__); process_flags()
2470 type = process_field_arg(event, field, &token); process_flags()
2474 type = process_op(event, field, &token); process_flags()
2491 type = process_fields(event, &arg->flags.flags, &token); process_flags()
2508 process_symbols(struct event_format *event, struct print_arg *arg, char **tok) process_symbols() argument
2519 do_warning_event(event, "%s: not enough memory!", __func__); process_symbols()
2523 type = process_field_arg(event, field, &token); process_symbols()
2530 type = process_fields(event, &arg->symbol.symbols, &token); process_symbols()
2547 process_hex(struct event_format *event, struct print_arg *arg, char **tok) process_hex() argument
2552 if (alloc_and_process_delim(event, ",", &arg->hex.field)) process_hex()
2555 if (alloc_and_process_delim(event, ")", &arg->hex.size)) process_hex()
2568 process_int_array(struct event_format *event, struct print_arg *arg, char **tok) process_int_array() argument
2573 if (alloc_and_process_delim(event, ",", &arg->int_array.field)) process_int_array()
2576 if (alloc_and_process_delim(event, ",", &arg->int_array.count)) process_int_array()
2579 if (alloc_and_process_delim(event, ")", &arg->int_array.el_size)) process_int_array()
2594 process_dynamic_array(struct event_format *event, struct print_arg *arg, char **tok) process_dynamic_array() argument
2614 field = pevent_find_field(event, token); process_dynamic_array()
2633 do_warning_event(event, "%s: not enough memory!", __func__); process_dynamic_array()
2638 type = process_arg(event, arg, &token); process_dynamic_array()
2658 process_paren(struct event_format *event, struct print_arg *arg, char **tok) process_paren() argument
2664 type = process_arg(event, arg, &token); process_paren()
2670 type = process_op(event, arg, &token); process_paren()
2692 do_warning_event(event, "previous needed to be PRINT_ATOM"); process_paren()
2698 do_warning_event(event, "%s: not enough memory!", process_paren()
2706 type = process_arg_token(event, item_arg, &token, type); process_paren()
2721 process_str(struct event_format *event __maybe_unused, struct print_arg *arg, process_str()
2750 process_bitmask(struct event_format *event __maybe_unused, struct print_arg *arg, process_bitmask()
2811 process_func_handler(struct event_format *event, struct pevent_function_handler *func, process_func_handler() argument
2829 do_warning_event(event, "%s: not enough memory!", process_func_handler()
2834 type = process_arg(event, farg, &token); process_func_handler()
2837 do_warning_event(event, process_func_handler()
2838 "Error: function '%s()' expects %d arguments but event %s only uses %d", process_func_handler()
2840 event->name, i + 1); process_func_handler()
2845 do_warning_event(event, process_func_handler()
2846 "Error: function '%s()' only expects %d arguments but event %s has more", process_func_handler()
2847 func->name, func->nr_args, event->name); process_func_handler()
2869 process_function(struct event_format *event, struct print_arg *arg, process_function() argument
2877 return process_flags(event, arg, tok); process_function()
2882 return process_symbols(event, arg, tok); process_function()
2886 return process_hex(event, arg, tok); process_function()
2890 return process_int_array(event, arg, tok); process_function()
2894 return process_str(event, arg, tok); process_function()
2898 return process_bitmask(event, arg, tok); process_function()
2902 return process_dynamic_array(event, arg, tok); process_function()
2905 func = find_func_handler(event->pevent, token); process_function()
2908 return process_func_handler(event, func, arg, tok); process_function()
2911 do_warning_event(event, "function %s not defined", token); process_function()
2917 process_arg_token(struct event_format *event, struct print_arg *arg, process_arg_token() argument
2929 type = process_entry(event, arg, &token); process_arg_token()
2944 type = process_function(event, arg, atom, &token); process_arg_token()
2978 type = process_paren(event, arg, &token); process_arg_token()
2986 type = process_op(event, arg, &token); process_arg_token()
2997 do_warning_event(event, "unexpected type %d", type); process_arg_token()
3005 static int event_read_print_args(struct event_format *event, struct print_arg **list) event_read_print_args() argument
3020 do_warning_event(event, "%s: not enough memory!", event_read_print_args()
3025 type = process_arg(event, arg, &token); event_read_print_args()
3037 type = process_op(event, arg, &token); event_read_print_args()
3063 static int event_read_print(struct event_format *event) event_read_print() argument
3082 event->print_fmt.format = token; event_read_print()
3083 event->print_fmt.args = NULL; event_read_print()
3095 if (asprintf(&cat, "%s%s", event->print_fmt.format, token) < 0) event_read_print()
3098 free_token(event->print_fmt.format); event_read_print()
3099 event->print_fmt.format = NULL; event_read_print()
3109 ret = event_read_print_args(event, &event->print_fmt.args); event_read_print()
3121 * pevent_find_common_field - return a common field by event
3122 * @event: handle for the event
3125 * Returns a common field from the event by the given @name.
3129 pevent_find_common_field(struct event_format *event, const char *name) pevent_find_common_field() argument
3133 for (format = event->format.common_fields; pevent_find_common_field()
3144 * @event: handle for the event
3151 pevent_find_field(struct event_format *event, const char *name) pevent_find_field() argument
3155 for (format = event->format.fields; pevent_find_field()
3166 * @event: handle for the event
3174 pevent_find_any_field(struct event_format *event, const char *name) pevent_find_any_field() argument
3178 format = pevent_find_common_field(event, name); pevent_find_any_field()
3181 return pevent_find_field(event, name); pevent_find_any_field()
3232 *value = pevent_read_number(field->event->pevent, pevent_read_number_field()
3243 struct event_format *event; get_common_info() local
3248 * Pick any event to find where the type is; get_common_info()
3255 event = pevent->events[0]; get_common_info()
3256 field = pevent_find_common_field(event, type); get_common_info()
3324 * pevent_find_event - find an event by given id
3326 * @id: the id of the event
3328 * Returns an event that has a given @id.
3354 * pevent_find_event_by_name - find an event by given name
3357 * @name: the name of the event to search for
3359 * This returns an event with a given @name and under the system
3360 * @sys. If @sys is NULL the first event with @name is returned.
3366 struct event_format *event; pevent_find_event_by_name() local
3375 event = pevent->events[i]; pevent_find_event_by_name()
3376 if (strcmp(event->name, name) == 0) { pevent_find_event_by_name()
3379 if (strcmp(event->system, sys) == 0) pevent_find_event_by_name()
3384 event = NULL; pevent_find_event_by_name()
3386 pevent->last_event = event; pevent_find_event_by_name()
3387 return event; pevent_find_event_by_name()
3391 eval_num_arg(void *data, int size, struct event_format *event, struct print_arg *arg) eval_num_arg() argument
3393 struct pevent *pevent = event->pevent; eval_num_arg()
3409 arg->field.field = pevent_find_any_field(event, arg->field.name); eval_num_arg()
3424 val = eval_num_arg(data, size, event, arg->typecast.item); eval_num_arg()
3433 val = process_defined_func(&s, data, size, event, arg); eval_num_arg()
3443 right = eval_num_arg(data, size, event, arg->op.right); eval_num_arg()
3474 pevent_find_any_field(event, larg->field.name); eval_num_arg()
3493 left = eval_num_arg(data, size, event, arg->op.left); eval_num_arg()
3496 val = eval_num_arg(data, size, event, arg->op.left); eval_num_arg()
3498 val = eval_num_arg(data, size, event, arg->op.right); eval_num_arg()
3502 left = eval_num_arg(data, size, event, arg->op.left); eval_num_arg()
3503 right = eval_num_arg(data, size, event, arg->op.right); eval_num_arg()
3603 do_warning_event(event, "%s: unknown op '%s'", __func__, arg->op.op); eval_num_arg()
3607 do_warning_event(event, "%s: field %s not found", eval_num_arg()
3716 struct event_format *event, const char *format, print_str_arg()
3719 struct pevent *pevent = event->pevent; print_str_arg()
3740 field = pevent_find_any_field(event, arg->field.name); print_str_arg()
3785 do_warning_event(event, "%s: not enough memory!", print_str_arg()
3795 val = eval_num_arg(data, size, event, arg->flags.field); print_str_arg()
3813 val = eval_num_arg(data, size, event, arg->symbol.field); print_str_arg()
3833 field = pevent_find_any_field(event, str); print_str_arg()
3840 len = eval_num_arg(data, size, event, arg->hex.size); print_str_arg()
3864 field = pevent_find_any_field(event, str); print_str_arg()
3871 len = eval_num_arg(data, size, event, arg->int_array.count); print_str_arg()
3872 el_size = eval_num_arg(data, size, event, print_str_arg()
3904 f = pevent_find_any_field(event, arg->string.string); print_str_arg()
3922 f = pevent_find_any_field(event, arg->bitmask.bitmask); print_str_arg()
3938 val = eval_num_arg(data, size, event, arg->op.left); print_str_arg()
3940 print_str_arg(s, data, size, event, print_str_arg()
3943 print_str_arg(s, data, size, event, print_str_arg()
3947 process_defined_func(s, data, size, event, arg); print_str_arg()
3957 do_warning_event(event, "%s: field %s not found", print_str_arg()
3963 struct event_format *event, struct print_arg *arg) process_defined_func()
3995 args[i] = eval_num_arg(data, size, event, farg); process_defined_func()
3999 print_str_arg(&str, data, size, event, "%s", -1, farg); process_defined_func()
4003 do_warning_event(event, "%s(%d): malloc str", process_defined_func()
4011 do_warning_event(event, "%s(%d): malloc str", process_defined_func()
4024 do_warning_event(event, "Unexpected end of arguments\n"); process_defined_func()
4058 static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struct event_format *event) make_bprint_args() argument
4060 struct pevent *pevent = event->pevent; make_bprint_args()
4072 field = pevent_find_field(event, "buf"); make_bprint_args()
4074 do_warning_event(event, "can't find buffer field for binary printk"); make_bprint_args()
4077 ip_field = pevent_find_field(event, "ip"); make_bprint_args()
4079 do_warning_event(event, "can't find ip field for binary printk"); make_bprint_args()
4093 do_warning_event(event, "%s(%d): not enough memory!", make_bprint_args()
4164 do_warning_event(event, "%s(%d): not enough memory!", make_bprint_args()
4187 do_warning_event(event, "%s(%d): not enough memory!", make_bprint_args()
4214 struct event_format *event) get_bprint_format()
4216 struct pevent *pevent = event->pevent; get_bprint_format()
4225 field = pevent_find_field(event, "fmt"); get_bprint_format()
4227 do_warning_event(event, "can't find format field for binary printk"); get_bprint_format()
4249 struct event_format *event, struct print_arg *arg) print_mac_arg()
4255 process_defined_func(s, data, size, event, arg); print_mac_arg()
4269 pevent_find_any_field(event, arg->field.name); print_mac_arg()
4271 do_warning_event(event, "%s: field %s not found", print_mac_arg()
4402 void *data, int size, struct event_format *event, print_ipv4_arg()
4408 process_defined_func(s, data, size, event, arg); print_ipv4_arg()
4419 pevent_find_any_field(event, arg->field.name); print_ipv4_arg()
4439 void *data, int size, struct event_format *event, print_ipv6_arg()
4454 process_defined_func(s, data, size, event, arg); print_ipv6_arg()
4465 pevent_find_any_field(event, arg->field.name); print_ipv6_arg()
4489 void *data, int size, struct event_format *event, print_ipsa_arg()
4512 process_defined_func(s, data, size, event, arg); print_ipsa_arg()
4523 pevent_find_any_field(event, arg->field.name); print_ipsa_arg()
4571 void *data, int size, struct event_format *event, print_ip_arg()
4587 rc += print_ipv4_arg(s, ptr, i, data, size, event, arg); print_ip_arg()
4590 rc += print_ipv6_arg(s, ptr, i, data, size, event, arg); print_ip_arg()
4593 rc += print_ipsa_arg(s, ptr, i, data, size, event, arg); print_ip_arg()
4614 struct event_format *event) print_event_fields()
4620 field = event->format.fields; print_event_fields()
4627 val = pevent_read_number(event->pevent, data + offset, len); print_event_fields()
4647 val = pevent_read_number(event->pevent, data + field->offset, print_event_fields()
4683 static void pretty_print(struct trace_seq *s, void *data, int size, struct event_format *event) pretty_print() argument
4685 struct pevent *pevent = event->pevent; pretty_print()
4686 struct print_fmt *print_fmt = &event->print_fmt; pretty_print()
4702 if (event->flags & EVENT_FL_FAILED) { pretty_print()
4704 print_event_fields(s, data, size, event); pretty_print()
4708 if (event->flags & EVENT_FL_ISBPRINT) { pretty_print()
4709 bprint_fmt = get_bprint_format(data, size, event); pretty_print()
4710 args = make_bprint_args(bprint_fmt, data, size, event); pretty_print()
4762 do_warning_event(event, "no argument match"); pretty_print()
4763 event->flags |= EVENT_FL_FAILED; pretty_print()
4766 len_arg = eval_num_arg(data, size, event, arg); pretty_print()
4786 print_mac_arg(s, *(ptr+1), data, size, event, arg); pretty_print()
4793 n = print_ip_arg(s, ptr+1, data, size, event, arg); pretty_print()
4808 do_warning_event(event, "no argument match"); pretty_print()
4809 event->flags |= EVENT_FL_FAILED; pretty_print()
4818 do_warning_event(event, "bad format!"); pretty_print()
4819 event->flags |= EVENT_FL_FAILED; pretty_print()
4826 val = eval_num_arg(data, size, event, arg); pretty_print()
4884 do_warning_event(event, "bad count (%d)", ls); pretty_print()
4885 event->flags |= EVENT_FL_FAILED; pretty_print()
4890 do_warning_event(event, "no matching argument"); pretty_print()
4891 event->flags |= EVENT_FL_FAILED; pretty_print()
4900 do_warning_event(event, "bad format!"); pretty_print()
4901 event->flags |= EVENT_FL_FAILED; pretty_print()
4911 print_str_arg(&p, data, size, event, pretty_print()
4926 if (event->flags & EVENT_FL_FAILED) { pretty_print()
5021 * pevent_data_type - parse out the given event type
5025 * This returns the event id from the @rec.
5033 * pevent_data_event_from_type - find the event by a given type
5035 * @type: the type of the event.
5037 * This returns the event form a given @type;
5165 * @event: the handle to the event
5168 * This parses the raw @data using the given @event information and
5171 void pevent_event_info(struct trace_seq *s, struct event_format *event, pevent_event_info() argument
5176 if (event->pevent->print_raw || (event->flags & EVENT_FL_PRINTRAW)) pevent_event_info()
5177 print_event_fields(s, record->data, record->size, event); pevent_event_info()
5180 if (event->handler && !(event->flags & EVENT_FL_NOHANDLE)) pevent_event_info()
5181 print_pretty = event->handler(s, record, event, pevent_event_info()
5182 event->context); pevent_event_info()
5185 pretty_print(s, record->data, record->size, event); pevent_event_info()
5208 struct event_format *event; pevent_print_event() local
5234 event = pevent_find_event(pevent, type); pevent_print_event()
5235 if (!event) { pevent_print_event()
5236 do_warning("ug! no event found for type %d", type); pevent_print_event()
5260 secs, p, usecs, event->name); pevent_print_event()
5263 record->ts, event->name); pevent_print_event()
5265 /* Space out the event names evenly. */ pevent_print_event()
5266 len = strlen(event->name); pevent_print_event()
5270 pevent_event_info(s, event, record); pevent_print_event()
5383 do_warning("event %s has more %s fields than specified", get_event_fields()
5391 do_warning("event %s has less %s fields than specified", get_event_fields()
5400 * pevent_event_common_fields - return a list of common fields for an event
5401 * @event: the event to return the common fields of.
5406 struct format_field **pevent_event_common_fields(struct event_format *event) pevent_event_common_fields() argument
5408 return get_event_fields("common", event->name, pevent_event_common_fields()
5409 event->format.nr_common, pevent_event_common_fields()
5410 event->format.common_fields); pevent_event_common_fields()
5414 * pevent_event_fields - return a list of event specific fields for an event
5415 * @event: the event to return the fields of.
5420 struct format_field **pevent_event_fields(struct event_format *event) pevent_event_fields() argument
5422 return get_event_fields("event", event->name, pevent_event_fields()
5423 event->format.nr_fields, pevent_event_fields()
5424 event->format.fields); pevent_event_fields()
5655 static int event_matches(struct event_format *event, event_matches() argument
5659 if (id >= 0 && id != event->id) event_matches()
5662 if (event_name && (strcmp(event_name, event->name) != 0)) event_matches()
5665 if (sys_name && (strcmp(sys_name, event->system) != 0)) event_matches()
5678 static int find_event_handle(struct pevent *pevent, struct event_format *event) find_event_handle() argument
5685 if (event_matches(event, handle->id, find_event_handle()
5694 pr_stat("overriding event (%d) %s:%s with new print handler", find_event_handle()
5695 event->id, event->system, event->name); find_event_handle()
5697 event->handler = handle->func; find_event_handle()
5698 event->context = handle->context; find_event_handle()
5707 * __pevent_parse_format - parse the event format
5708 * @buf: the buffer storing the event format string
5710 * @sys: the system the event belongs to
5712 * This parses the event format and creates an event structure
5713 * to quickly parse raw data for a given event.
5723 struct event_format *event; __pevent_parse_format() local
5728 *eventp = event = alloc_event(); __pevent_parse_format()
5729 if (!event) __pevent_parse_format()
5732 event->name = event_read_name(); __pevent_parse_format()
5733 if (!event->name) { __pevent_parse_format()
5734 /* Bad event? */ __pevent_parse_format()
5740 event->flags |= EVENT_FL_ISFTRACE; __pevent_parse_format()
5742 if (strcmp(event->name, "bprint") == 0) __pevent_parse_format()
5743 event->flags |= EVENT_FL_ISBPRINT; __pevent_parse_format()
5746 event->id = event_read_id(); __pevent_parse_format()
5747 if (event->id < 0) { __pevent_parse_format()
5756 event->system = strdup(sys); __pevent_parse_format()
5757 if (!event->system) { __pevent_parse_format()
5762 /* Add pevent to event so that it can be referenced */ __pevent_parse_format()
5763 event->pevent = pevent; __pevent_parse_format()
5765 ret = event_read_format(event); __pevent_parse_format()
5772 * If the event has an override, don't print warnings if the event __pevent_parse_format()
5775 if (pevent && find_event_handle(pevent, event)) __pevent_parse_format()
5778 ret = event_read_print(event); __pevent_parse_format()
5786 if (!ret && (event->flags & EVENT_FL_ISFTRACE)) { __pevent_parse_format()
5791 list = &event->print_fmt.args; __pevent_parse_format()
5792 for (field = event->format.fields; field; field = field->next) { __pevent_parse_format()
5795 event->flags |= EVENT_FL_FAILED; __pevent_parse_format()
5801 event->flags |= EVENT_FL_FAILED; __pevent_parse_format()
5815 event->flags |= EVENT_FL_FAILED; __pevent_parse_format()
5819 free(event->system); __pevent_parse_format()
5820 free(event->name); __pevent_parse_format()
5821 free(event); __pevent_parse_format()
5833 struct event_format *event = *eventp; __pevent_parse_event() local
5835 if (event == NULL) __pevent_parse_event()
5838 if (pevent && add_event(pevent, event)) { __pevent_parse_event()
5844 if (PRINT_ARGS && event->print_fmt.args) __pevent_parse_event()
5845 print_args(event->print_fmt.args); __pevent_parse_event()
5850 pevent_free_format(event); __pevent_parse_event()
5855 * pevent_parse_format - parse the event format
5858 * @buf: the buffer storing the event format string
5860 * @sys: the system the event belongs to
5862 * This parses the event format and creates an event structure
5863 * to quickly parse raw data for a given event.
5878 * pevent_parse_event - parse the event format
5880 * @buf: the buffer storing the event format string
5882 * @sys: the system the event belongs to
5884 * This parses the event format and creates an event structure
5885 * to quickly parse raw data for a given event.
5894 struct event_format *event = NULL; pevent_parse_event() local
5895 return __pevent_parse_event(pevent, &event, buf, size, sys); pevent_parse_event()
5954 * @event: the event that the field is for
5965 void *pevent_get_field_raw(struct trace_seq *s, struct event_format *event, pevent_get_field_raw() argument
5974 if (!event) pevent_get_field_raw()
5977 field = pevent_find_field(event, name); pevent_get_field_raw()
5991 offset = pevent_read_number(event->pevent, pevent_get_field_raw()
6004 * @event: the event that the field is for
6012 int pevent_get_field_val(struct trace_seq *s, struct event_format *event, pevent_get_field_val() argument
6018 if (!event) pevent_get_field_val()
6021 field = pevent_find_field(event, name); pevent_get_field_val()
6029 * @event: the event that the field is for
6037 int pevent_get_common_field_val(struct trace_seq *s, struct event_format *event, pevent_get_common_field_val() argument
6043 if (!event) pevent_get_common_field_val()
6046 field = pevent_find_common_field(event, name); pevent_get_common_field_val()
6054 * @event: the event that the field is for
6062 int pevent_get_any_field_val(struct trace_seq *s, struct event_format *event, pevent_get_any_field_val() argument
6068 if (!event) pevent_get_any_field_val()
6071 field = pevent_find_any_field(event, name); pevent_get_any_field_val()
6080 * @event: the event that the field is for
6088 struct event_format *event, const char *name, pevent_print_num_field()
6091 struct format_field *field = pevent_find_field(event, name); pevent_print_num_field()
6112 * @event: the event that the field is for
6120 struct event_format *event, const char *name, pevent_print_func_field()
6123 struct format_field *field = pevent_find_field(event, name); pevent_print_func_field()
6124 struct pevent *pevent = event->pevent; pevent_print_func_field()
6284 struct event_format *event; pevent_search_event() local
6288 event = pevent_find_event(pevent, id); pevent_search_event()
6289 if (!event) pevent_search_event()
6291 if (event_name && (strcmp(event_name, event->name) != 0)) pevent_search_event()
6293 if (sys_name && (strcmp(sys_name, event->system) != 0)) pevent_search_event()
6296 event = pevent_find_event_by_name(pevent, sys_name, event_name); pevent_search_event()
6297 if (!event) pevent_search_event()
6300 return event; pevent_search_event()
6304 * pevent_register_event_handler - register a way to parse an event
6306 * @id: the id of the event to register
6307 * @sys_name: the system name the event belongs to
6308 * @event_name: the name of the event
6309 * @func: the function to call to parse the event information
6313 * a given event. If for some reason the default print format
6315 * for an event to be used to parse the data instead.
6317 * If @id is >= 0, then it is used to find the event.
6324 struct event_format *event; pevent_register_event_handler() local
6327 event = pevent_search_event(pevent, id, sys_name, event_name); pevent_register_event_handler()
6328 if (event == NULL) pevent_register_event_handler()
6331 pr_stat("overriding event (%d) %s:%s with new print handler", pevent_register_event_handler()
6332 event->id, event->system, event->name); pevent_register_event_handler()
6334 event->handler = func; pevent_register_event_handler()
6335 event->context = context; pevent_register_event_handler()
6342 do_warning("Failed to allocate event handler"); pevent_register_event_handler()
6354 do_warning("Failed to allocate event/sys name"); pevent_register_event_handler()
6389 * pevent_unregister_event_handler - unregister an existing event handler
6391 * @id: the id of the event to unregister
6393 * @event_name: the name of the event handler
6394 * @func: the function to call to parse the event information
6397 * This function removes existing event handler (parser).
6399 * If @id is >= 0, then it is used to find the event.
6402 * Returns 0 if handler was removed successfully, -1 if event was not found.
6408 struct event_format *event; pevent_unregister_event_handler() local
6412 event = pevent_search_event(pevent, id, sys_name, event_name); pevent_unregister_event_handler()
6413 if (event == NULL) pevent_unregister_event_handler()
6416 if (event->handler == func && event->context == context) { pevent_unregister_event_handler()
6417 pr_stat("removing override handler for event (%d) %s:%s. Going back to default handler.", pevent_unregister_event_handler()
6418 event->id, event->system, event->name); pevent_unregister_event_handler()
6420 event->handler = NULL; pevent_unregister_event_handler()
6421 event->context = NULL; pevent_unregister_event_handler()
6484 void pevent_free_format(struct event_format *event) pevent_free_format() argument
6486 free(event->name); pevent_free_format()
6487 free(event->system); pevent_free_format()
6489 free_formats(&event->format); pevent_free_format()
6491 free(event->print_fmt.format); pevent_free_format()
6492 free_args(event->print_fmt.args); pevent_free_format()
6494 free(event); pevent_free_format()
3715 print_str_arg(struct trace_seq *s, void *data, int size, struct event_format *event, const char *format, int len_arg, struct print_arg *arg) print_str_arg() argument
3962 process_defined_func(struct trace_seq *s, void *data, int size, struct event_format *event, struct print_arg *arg) process_defined_func() argument
4213 get_bprint_format(void *data, int size __maybe_unused, struct event_format *event) get_bprint_format() argument
4248 print_mac_arg(struct trace_seq *s, int mac, void *data, int size, struct event_format *event, struct print_arg *arg) print_mac_arg() argument
4401 print_ipv4_arg(struct trace_seq *s, const char *ptr, char i, void *data, int size, struct event_format *event, struct print_arg *arg) print_ipv4_arg() argument
4438 print_ipv6_arg(struct trace_seq *s, const char *ptr, char i, void *data, int size, struct event_format *event, struct print_arg *arg) print_ipv6_arg() argument
4488 print_ipsa_arg(struct trace_seq *s, const char *ptr, char i, void *data, int size, struct event_format *event, struct print_arg *arg) print_ipsa_arg() argument
4570 print_ip_arg(struct trace_seq *s, const char *ptr, void *data, int size, struct event_format *event, struct print_arg *arg) print_ip_arg() argument
4612 print_event_fields(struct trace_seq *s, void *data, int size __maybe_unused, struct event_format *event) print_event_fields() argument
6087 pevent_print_num_field(struct trace_seq *s, const char *fmt, struct event_format *event, const char *name, struct pevent_record *record, int err) pevent_print_num_field() argument
6119 pevent_print_func_field(struct trace_seq *s, const char *fmt, struct event_format *event, const char *name, struct pevent_record *record, int err) pevent_print_func_field() argument
H A Dplugin_mac80211.c24 #include "event-parse.h"
28 static void print_string(struct trace_seq *s, struct event_format *event, print_string() argument
31 struct format_field *f = pevent_find_field(event, name); print_string()
56 #define SF(fn) pevent_print_num_field(s, fn ":%d", event, fn, record, 0)
57 #define SFX(fn) pevent_print_num_field(s, fn ":%#x", event, fn, record, 0)
62 struct event_format *event, void *context) drv_bss_info_changed()
66 print_string(s, event, "wiphy_name", data); drv_bss_info_changed()
68 print_string(s, event, "vif_name", data); drv_bss_info_changed()
69 pevent_print_num_field(s, "(%d)", event, "vif_type", record, 1); drv_bss_info_changed()
60 drv_bss_info_changed(struct trace_seq *s, struct pevent_record *record, struct event_format *event, void *context) drv_bss_info_changed() argument
H A Dplugin_kvm.c25 #include "event-parse.h"
248 struct event_format *event, const char *field) print_exit_reason()
254 if (pevent_get_field_val(s, event, field, record, &val, 1) < 0) print_exit_reason()
257 if (pevent_get_field_val(s, event, "isa", record, &isa, 0) < 0) print_exit_reason()
269 struct event_format *event, void *context) kvm_exit_handler()
273 if (print_exit_reason(s, record, event, "exit_reason") < 0) kvm_exit_handler()
276 pevent_print_num_field(s, " rip 0x%lx", event, "guest_rip", record, 1); kvm_exit_handler()
278 if (pevent_get_field_val(s, event, "info1", record, &info1, 0) >= 0 kvm_exit_handler()
279 && pevent_get_field_val(s, event, "info2", record, &info2, 0) >= 0) kvm_exit_handler()
292 struct event_format *event, void *context) kvm_emulate_insn_handler()
299 if (pevent_get_field_val(s, event, "rip", record, &rip, 1) < 0) kvm_emulate_insn_handler()
302 if (pevent_get_field_val(s, event, "csbase", record, &csbase, 1) < 0) kvm_emulate_insn_handler()
305 if (pevent_get_field_val(s, event, "len", record, &len, 1) < 0) kvm_emulate_insn_handler()
308 if (pevent_get_field_val(s, event, "flags", record, &flags, 1) < 0) kvm_emulate_insn_handler()
311 if (pevent_get_field_val(s, event, "failed", record, &failed, 1) < 0) kvm_emulate_insn_handler()
314 insn = pevent_get_field_raw(s, event, "insn", record, &llen, 1); kvm_emulate_insn_handler()
331 struct event_format *event, void *context) kvm_nested_vmexit_inject_handler()
333 if (print_exit_reason(s, record, event, "exit_code") < 0) kvm_nested_vmexit_inject_handler()
336 pevent_print_num_field(s, " info1 %llx", event, "exit_info1", record, 1); kvm_nested_vmexit_inject_handler()
337 pevent_print_num_field(s, " info2 %llx", event, "exit_info2", record, 1); kvm_nested_vmexit_inject_handler()
338 pevent_print_num_field(s, " int_info %llx", event, "exit_int_info", record, 1); kvm_nested_vmexit_inject_handler()
339 pevent_print_num_field(s, " int_info_err %llx", event, "exit_int_info_err", record, 1); kvm_nested_vmexit_inject_handler()
345 struct event_format *event, void *context) kvm_nested_vmexit_handler()
347 pevent_print_num_field(s, "rip %llx ", event, "rip", record, 1); kvm_nested_vmexit_handler()
349 return kvm_nested_vmexit_inject_handler(s, record, event, context); kvm_nested_vmexit_handler()
368 struct event_format *event, void *context) kvm_mmu_print_role()
376 if (pevent_get_field_val(s, event, "role", record, &val, 1) < 0) kvm_mmu_print_role()
385 if (pevent_is_file_bigendian(event->pevent) == kvm_mmu_print_role()
386 pevent_is_host_bigendian(event->pevent)) { kvm_mmu_print_role()
400 pevent_print_num_field(s, " root %u ", event, kvm_mmu_print_role()
403 if (pevent_get_field_val(s, event, "unsync", record, &val, 1) < 0) kvm_mmu_print_role()
412 struct event_format *event, void *context) kvm_mmu_get_page_handler()
416 if (pevent_get_field_val(s, event, "created", record, &val, 1) < 0) kvm_mmu_get_page_handler()
421 if (pevent_get_field_val(s, event, "gfn", record, &val, 1) < 0) kvm_mmu_get_page_handler()
425 return kvm_mmu_print_role(s, record, event, context); kvm_mmu_get_page_handler()
247 print_exit_reason(struct trace_seq *s, struct pevent_record *record, struct event_format *event, const char *field) print_exit_reason() argument
268 kvm_exit_handler(struct trace_seq *s, struct pevent_record *record, struct event_format *event, void *context) kvm_exit_handler() argument
290 kvm_emulate_insn_handler(struct trace_seq *s, struct pevent_record *record, struct event_format *event, void *context) kvm_emulate_insn_handler() argument
330 kvm_nested_vmexit_inject_handler(struct trace_seq *s, struct pevent_record *record, struct event_format *event, void *context) kvm_nested_vmexit_inject_handler() argument
344 kvm_nested_vmexit_handler(struct trace_seq *s, struct pevent_record *record, struct event_format *event, void *context) kvm_nested_vmexit_handler() argument
367 kvm_mmu_print_role(struct trace_seq *s, struct pevent_record *record, struct event_format *event, void *context) kvm_mmu_print_role() argument
410 kvm_mmu_get_page_handler(struct trace_seq *s, struct pevent_record *record, struct event_format *event, void *context) kvm_mmu_get_page_handler() argument
/linux-4.1.27/net/irda/irlan/
H A Dirlan_client_event.c39 static int irlan_client_state_idle (struct irlan_cb *self, IRLAN_EVENT event,
41 static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event,
43 static int irlan_client_state_conn (struct irlan_cb *self, IRLAN_EVENT event,
45 static int irlan_client_state_info (struct irlan_cb *self, IRLAN_EVENT event,
47 static int irlan_client_state_media(struct irlan_cb *self, IRLAN_EVENT event,
49 static int irlan_client_state_open (struct irlan_cb *self, IRLAN_EVENT event,
51 static int irlan_client_state_wait (struct irlan_cb *self, IRLAN_EVENT event,
53 static int irlan_client_state_arb (struct irlan_cb *self, IRLAN_EVENT event,
55 static int irlan_client_state_data (struct irlan_cb *self, IRLAN_EVENT event,
57 static int irlan_client_state_close(struct irlan_cb *self, IRLAN_EVENT event,
59 static int irlan_client_state_sync (struct irlan_cb *self, IRLAN_EVENT event,
62 static int (*state[])(struct irlan_cb *, IRLAN_EVENT event, struct sk_buff *) =
77 void irlan_do_client_event(struct irlan_cb *self, IRLAN_EVENT event, irlan_do_client_event() argument
83 (*state[ self->client.state]) (self, event, skb); irlan_do_client_event()
87 * Function irlan_client_state_idle (event, skb, info)
92 static int irlan_client_state_idle(struct irlan_cb *self, IRLAN_EVENT event, irlan_client_state_idle() argument
98 switch (event) { irlan_client_state_idle()
118 pr_debug("%s(), Unknown event %d\n", __func__ , event); irlan_client_state_idle()
128 * Function irlan_client_state_query (event, skb, info)
134 static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event, irlan_client_state_query() argument
140 switch(event) { irlan_client_state_query()
169 pr_debug("%s(), Unknown event %d\n", __func__ , event); irlan_client_state_query()
179 * Function irlan_client_state_conn (event, skb, info)
185 static int irlan_client_state_conn(struct irlan_cb *self, IRLAN_EVENT event, irlan_client_state_conn() argument
190 switch (event) { irlan_client_state_conn()
204 pr_debug("%s(), Unknown event %d\n", __func__ , event); irlan_client_state_conn()
214 * Function irlan_client_state_info (self, event, skb, info)
218 static int irlan_client_state_info(struct irlan_cb *self, IRLAN_EVENT event, irlan_client_state_info() argument
223 switch (event) { irlan_client_state_info()
242 pr_debug("%s(), Unknown event %d\n", __func__ , event); irlan_client_state_info()
252 * Function irlan_client_state_media (self, event, skb, info)
258 static int irlan_client_state_media(struct irlan_cb *self, IRLAN_EVENT event, irlan_client_state_media() argument
263 switch(event) { irlan_client_state_media()
277 pr_debug("%s(), Unknown event %d\n", __func__ , event); irlan_client_state_media()
287 * Function irlan_client_state_open (self, event, skb, info)
293 static int irlan_client_state_open(struct irlan_cb *self, IRLAN_EVENT event, irlan_client_state_open() argument
300 switch(event) { irlan_client_state_open()
347 pr_debug("%s(), Unknown event %d\n", __func__ , event); irlan_client_state_open()
358 * Function irlan_client_state_wait (self, event, skb, info)
364 static int irlan_client_state_wait(struct irlan_cb *self, IRLAN_EVENT event, irlan_client_state_wait() argument
369 switch(event) { irlan_client_state_wait()
382 pr_debug("%s(), Unknown event %d\n", __func__ , event); irlan_client_state_wait()
391 static int irlan_client_state_arb(struct irlan_cb *self, IRLAN_EVENT event, irlan_client_state_arb() argument
398 switch(event) { irlan_client_state_arb()
430 pr_debug("%s(), Unknown event %d\n", __func__ , event); irlan_client_state_arb()
440 * Function irlan_client_state_data (self, event, skb, info)
446 static int irlan_client_state_data(struct irlan_cb *self, IRLAN_EVENT event, irlan_client_state_data() argument
452 switch(event) { irlan_client_state_data()
461 pr_debug("%s(), Unknown event %d\n", __func__ , event); irlan_client_state_data()
471 * Function irlan_client_state_close (self, event, skb, info)
476 static int irlan_client_state_close(struct irlan_cb *self, IRLAN_EVENT event, irlan_client_state_close() argument
486 * Function irlan_client_state_sync (self, event, skb, info)
491 static int irlan_client_state_sync(struct irlan_cb *self, IRLAN_EVENT event, irlan_client_state_sync() argument
H A Dirlan_provider_event.c33 static int irlan_provider_state_idle(struct irlan_cb *self, IRLAN_EVENT event,
35 static int irlan_provider_state_info(struct irlan_cb *self, IRLAN_EVENT event,
37 static int irlan_provider_state_open(struct irlan_cb *self, IRLAN_EVENT event,
39 static int irlan_provider_state_data(struct irlan_cb *self, IRLAN_EVENT event,
42 static int (*state[])(struct irlan_cb *self, IRLAN_EVENT event,
58 void irlan_do_provider_event(struct irlan_cb *self, IRLAN_EVENT event, irlan_do_provider_event() argument
63 (*state[self->provider.state]) (self, event, skb); irlan_do_provider_event()
67 * Function irlan_provider_state_idle (event, skb, info)
72 static int irlan_provider_state_idle(struct irlan_cb *self, IRLAN_EVENT event, irlan_provider_state_idle() argument
77 switch(event) { irlan_provider_state_idle()
83 pr_debug("%s(), Unknown event %d\n", __func__ , event); irlan_provider_state_idle()
93 * Function irlan_provider_state_info (self, event, skb, info)
97 static int irlan_provider_state_info(struct irlan_cb *self, IRLAN_EVENT event, irlan_provider_state_info() argument
104 switch(event) { irlan_provider_state_info()
146 pr_debug("%s(), Unknown event %d\n", __func__ , event); irlan_provider_state_info()
156 * Function irlan_provider_state_open (self, event, skb, info)
162 static int irlan_provider_state_open(struct irlan_cb *self, IRLAN_EVENT event, irlan_provider_state_open() argument
167 switch(event) { irlan_provider_state_open()
183 pr_debug("%s(), Unknown event %d\n", __func__ , event); irlan_provider_state_open()
193 * Function irlan_provider_state_data (self, event, skb, info)
199 static int irlan_provider_state_data(struct irlan_cb *self, IRLAN_EVENT event, irlan_provider_state_data() argument
205 switch(event) { irlan_provider_state_data()
216 pr_debug("%s(), Unknown event %d\n", __func__ , event); irlan_provider_state_data()
/linux-4.1.27/arch/arm/kernel/
H A Dperf_event.c9 * This code is based on the sparc64 perf event code, which is in turn based
71 armpmu_map_event(struct perf_event *event, armpmu_map_event() argument
79 u64 config = event->attr.config; armpmu_map_event()
80 int type = event->attr.type; armpmu_map_event()
82 if (type == event->pmu->type) armpmu_map_event()
97 int armpmu_event_set_period(struct perf_event *event) armpmu_event_set_period() argument
99 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); armpmu_event_set_period()
100 struct hw_perf_event *hwc = &event->hw; armpmu_event_set_period()
130 armpmu->write_counter(event, (u64)(-left) & 0xffffffff); armpmu_event_set_period()
132 perf_event_update_userpage(event); armpmu_event_set_period()
137 u64 armpmu_event_update(struct perf_event *event) armpmu_event_update() argument
139 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); armpmu_event_update()
140 struct hw_perf_event *hwc = &event->hw; armpmu_event_update()
145 new_raw_count = armpmu->read_counter(event); armpmu_event_update()
153 local64_add(delta, &event->count); armpmu_event_update()
160 armpmu_read(struct perf_event *event) armpmu_read() argument
162 armpmu_event_update(event); armpmu_read()
166 armpmu_stop(struct perf_event *event, int flags) armpmu_stop() argument
168 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); armpmu_stop()
169 struct hw_perf_event *hwc = &event->hw; armpmu_stop()
176 armpmu->disable(event); armpmu_stop()
177 armpmu_event_update(event); armpmu_stop()
182 static void armpmu_start(struct perf_event *event, int flags) armpmu_start() argument
184 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); armpmu_start()
185 struct hw_perf_event *hwc = &event->hw; armpmu_start()
202 armpmu_event_set_period(event); armpmu_start()
203 armpmu->enable(event); armpmu_start()
207 armpmu_del(struct perf_event *event, int flags) armpmu_del() argument
209 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); armpmu_del()
211 struct hw_perf_event *hwc = &event->hw; armpmu_del()
214 armpmu_stop(event, PERF_EF_UPDATE); armpmu_del()
218 armpmu->clear_event_idx(hw_events, event); armpmu_del()
220 perf_event_update_userpage(event); armpmu_del()
224 armpmu_add(struct perf_event *event, int flags) armpmu_add() argument
226 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); armpmu_add()
228 struct hw_perf_event *hwc = &event->hw; armpmu_add()
232 perf_pmu_disable(event->pmu); armpmu_add()
235 idx = armpmu->get_event_idx(hw_events, event); armpmu_add()
242 * If there is an event in the counter we are going to use then make armpmu_add()
245 event->hw.idx = idx; armpmu_add()
246 armpmu->disable(event); armpmu_add()
247 hw_events->events[idx] = event; armpmu_add()
251 armpmu_start(event, PERF_EF_RELOAD); armpmu_add()
254 perf_event_update_userpage(event); armpmu_add()
257 perf_pmu_enable(event->pmu); armpmu_add()
263 struct perf_event *event) validate_event()
267 if (is_software_event(event)) validate_event()
273 * until after pmu->event_init(event). validate_event()
275 if (event->pmu != pmu) validate_event()
278 if (event->state < PERF_EVENT_STATE_OFF) validate_event()
281 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) validate_event()
284 armpmu = to_arm_pmu(event->pmu); validate_event()
285 return armpmu->get_event_idx(hw_events, event) >= 0; validate_event()
289 validate_group(struct perf_event *event) validate_group() argument
291 struct perf_event *sibling, *leader = event->group_leader; validate_group()
300 if (!validate_event(event->pmu, &fake_pmu, leader)) validate_group()
304 if (!validate_event(event->pmu, &fake_pmu, sibling)) validate_group()
308 if (!validate_event(event->pmu, &fake_pmu, event)) validate_group()
370 hw_perf_event_destroy(struct perf_event *event) hw_perf_event_destroy() argument
372 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); hw_perf_event_destroy()
390 __hw_perf_event_init(struct perf_event *event) __hw_perf_event_init() argument
392 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); __hw_perf_event_init()
393 struct hw_perf_event *hwc = &event->hw; __hw_perf_event_init()
396 mapping = armpmu->map_event(event); __hw_perf_event_init()
399 pr_debug("event %x:%llx not supported\n", event->attr.type, __hw_perf_event_init()
400 event->attr.config); __hw_perf_event_init()
405 * We don't assign an index until we actually place the event onto __hw_perf_event_init()
419 armpmu->set_event_filter(hwc, &event->attr)) && __hw_perf_event_init()
420 event_requires_mode_exclusion(&event->attr)) { __hw_perf_event_init()
427 * Store the event encoding into the config_base field. __hw_perf_event_init()
431 if (!is_sampling_event(event)) { __hw_perf_event_init()
443 if (event->group_leader != event) { __hw_perf_event_init()
444 if (validate_group(event) != 0) __hw_perf_event_init()
451 static int armpmu_event_init(struct perf_event *event) armpmu_event_init() argument
453 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); armpmu_event_init()
458 if (has_branch_stack(event)) armpmu_event_init()
461 if (armpmu->map_event(event) == -ENOENT) armpmu_event_init()
464 event->destroy = hw_perf_event_destroy; armpmu_event_init()
479 err = __hw_perf_event_init(event); armpmu_event_init()
481 hw_perf_event_destroy(event); armpmu_event_init()
262 validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events, struct perf_event *event) validate_event() argument
H A Dperf_event_xscale.c9 * - xscale1pmu: 2 event counters and a cycle counter
10 * - xscale2pmu: 4 event counters and a cycle counter
11 * The two variants share event definitions, but have different
166 struct perf_event *event = cpuc->events[idx]; xscale1pmu_handle_irq() local
169 if (!event) xscale1pmu_handle_irq()
175 hwc = &event->hw; xscale1pmu_handle_irq()
176 armpmu_event_update(event); xscale1pmu_handle_irq()
178 if (!armpmu_event_set_period(event)) xscale1pmu_handle_irq()
181 if (perf_event_overflow(event, &data, regs)) xscale1pmu_handle_irq()
182 cpu_pmu->disable(event); xscale1pmu_handle_irq()
196 static void xscale1pmu_enable_event(struct perf_event *event) xscale1pmu_enable_event() argument
199 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); xscale1pmu_enable_event()
200 struct hw_perf_event *hwc = &event->hw; xscale1pmu_enable_event()
232 static void xscale1pmu_disable_event(struct perf_event *event) xscale1pmu_disable_event() argument
235 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); xscale1pmu_disable_event()
236 struct hw_perf_event *hwc = &event->hw; xscale1pmu_disable_event()
268 struct perf_event *event) xscale1pmu_get_event_idx()
270 struct hw_perf_event *hwc = &event->hw; xscale1pmu_get_event_idx()
311 static inline u32 xscale1pmu_read_counter(struct perf_event *event) xscale1pmu_read_counter() argument
313 struct hw_perf_event *hwc = &event->hw; xscale1pmu_read_counter()
332 static inline void xscale1pmu_write_counter(struct perf_event *event, u32 val) xscale1pmu_write_counter() argument
334 struct hw_perf_event *hwc = &event->hw; xscale1pmu_write_counter()
350 static int xscale_map_event(struct perf_event *event) xscale_map_event() argument
352 return armpmu_map_event(event, &xscale_perf_map, xscale_map_event()
507 struct perf_event *event = cpuc->events[idx]; xscale2pmu_handle_irq() local
510 if (!event) xscale2pmu_handle_irq()
516 hwc = &event->hw; xscale2pmu_handle_irq()
517 armpmu_event_update(event); xscale2pmu_handle_irq()
519 if (!armpmu_event_set_period(event)) xscale2pmu_handle_irq()
522 if (perf_event_overflow(event, &data, regs)) xscale2pmu_handle_irq()
523 cpu_pmu->disable(event); xscale2pmu_handle_irq()
537 static void xscale2pmu_enable_event(struct perf_event *event) xscale2pmu_enable_event() argument
540 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); xscale2pmu_enable_event()
541 struct hw_perf_event *hwc = &event->hw; xscale2pmu_enable_event()
583 static void xscale2pmu_disable_event(struct perf_event *event) xscale2pmu_disable_event() argument
586 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); xscale2pmu_disable_event()
587 struct hw_perf_event *hwc = &event->hw; xscale2pmu_disable_event()
637 struct perf_event *event) xscale2pmu_get_event_idx()
639 int idx = xscale1pmu_get_event_idx(cpuc, event); xscale2pmu_get_event_idx()
675 static inline u32 xscale2pmu_read_counter(struct perf_event *event) xscale2pmu_read_counter() argument
677 struct hw_perf_event *hwc = &event->hw; xscale2pmu_read_counter()
702 static inline void xscale2pmu_write_counter(struct perf_event *event, u32 val) xscale2pmu_write_counter() argument
704 struct hw_perf_event *hwc = &event->hw; xscale2pmu_write_counter()
267 xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc, struct perf_event *event) xscale1pmu_get_event_idx() argument
636 xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc, struct perf_event *event) xscale2pmu_get_event_idx() argument
H A Dperf_event_v7.c26 * Common ARMv7 event types
77 /* ARMv7 Cortex-A8 specific event types */
85 /* ARMv7 Cortex-A9 specific event types */
92 /* ARMv7 Cortex-A5 specific event types */
98 /* ARMv7 Cortex-A15 specific event types */
116 /* ARMv7 Cortex-A12 specific event types */
129 /* ARMv7 Krait specific event types */
143 /* ARMv7 Scorpion specific event types */
634 static inline u32 armv7pmu_read_counter(struct perf_event *event) armv7pmu_read_counter() argument
636 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); armv7pmu_read_counter()
637 struct hw_perf_event *hwc = &event->hw; armv7pmu_read_counter()
654 static inline void armv7pmu_write_counter(struct perf_event *event, u32 value) armv7pmu_write_counter() argument
656 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); armv7pmu_write_counter()
657 struct hw_perf_event *hwc = &event->hw; armv7pmu_write_counter()
759 static void armv7pmu_enable_event(struct perf_event *event) armv7pmu_enable_event() argument
762 struct hw_perf_event *hwc = &event->hw; armv7pmu_enable_event()
763 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); armv7pmu_enable_event()
775 * the event that we're interested in. armv7pmu_enable_event()
785 * Set event (if destined for PMNx counters) armv7pmu_enable_event()
786 * We only need to set the event for the cycle counter if we armv7pmu_enable_event()
787 * have the ability to perform event filtering. armv7pmu_enable_event()
805 static void armv7pmu_disable_event(struct perf_event *event) armv7pmu_disable_event() argument
808 struct hw_perf_event *hwc = &event->hw; armv7pmu_disable_event()
809 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); armv7pmu_disable_event()
863 struct perf_event *event = cpuc->events[idx]; armv7pmu_handle_irq() local
866 /* Ignore if we don't have an event. */ armv7pmu_handle_irq()
867 if (!event) armv7pmu_handle_irq()
877 hwc = &event->hw; armv7pmu_handle_irq()
878 armpmu_event_update(event); armv7pmu_handle_irq()
880 if (!armpmu_event_set_period(event)) armv7pmu_handle_irq()
883 if (perf_event_overflow(event, &data, regs)) armv7pmu_handle_irq()
884 cpu_pmu->disable(event); armv7pmu_handle_irq()
922 struct perf_event *event) armv7pmu_get_event_idx()
925 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); armv7pmu_get_event_idx()
926 struct hw_perf_event *hwc = &event->hw; armv7pmu_get_event_idx()
951 * Add an event filter to a given event. This will only work for PMUv2 PMUs.
953 static int armv7pmu_set_event_filter(struct hw_perf_event *event, armv7pmu_set_event_filter() argument
969 * construct the event type. armv7pmu_set_event_filter()
971 event->config_base = config_base; armv7pmu_set_event_filter()
991 static int armv7_a8_map_event(struct perf_event *event) armv7_a8_map_event() argument
993 return armpmu_map_event(event, &armv7_a8_perf_map, armv7_a8_map_event()
997 static int armv7_a9_map_event(struct perf_event *event) armv7_a9_map_event() argument
999 return armpmu_map_event(event, &armv7_a9_perf_map, armv7_a9_map_event()
1003 static int armv7_a5_map_event(struct perf_event *event) armv7_a5_map_event() argument
1005 return armpmu_map_event(event, &armv7_a5_perf_map, armv7_a5_map_event()
1009 static int armv7_a15_map_event(struct perf_event *event) armv7_a15_map_event() argument
1011 return armpmu_map_event(event, &armv7_a15_perf_map, armv7_a15_map_event()
1015 static int armv7_a7_map_event(struct perf_event *event) armv7_a7_map_event() argument
1017 return armpmu_map_event(event, &armv7_a7_perf_map, armv7_a7_map_event()
1021 static int armv7_a12_map_event(struct perf_event *event) armv7_a12_map_event() argument
1023 return armpmu_map_event(event, &armv7_a12_perf_map, armv7_a12_map_event()
1027 static int krait_map_event(struct perf_event *event) krait_map_event() argument
1029 return armpmu_map_event(event, &krait_perf_map, krait_map_event()
1033 static int krait_map_event_no_branch(struct perf_event *event) krait_map_event_no_branch() argument
1035 return armpmu_map_event(event, &krait_perf_map_no_branch, krait_map_event_no_branch()
1039 static int scorpion_map_event(struct perf_event *event) scorpion_map_event() argument
1041 return armpmu_map_event(event, &scorpion_perf_map, scorpion_map_event()
1156 * G = group or particular event
1158 * Example: 0x12021 is a Krait CPU event in PMRESR2's group 1 with code 2
1161 * unit, etc.) while the event code (CC) corresponds to a particular class of
1162 * events (interrupts for example). An event code is broken down into
1172 #define EVENT_REGION(event) (((event) >> 12) & 0xf) /* R */
1173 #define EVENT_GROUP(event) ((event) & 0xf) /* G */
1174 #define EVENT_CODE(event) (((event) >> 4) & 0xff) /* CC */
1175 #define EVENT_VENUM(event) (!!(event & VENUM_EVENT)) /* N=2 */
1176 #define EVENT_CPU(event) (!!(event & KRAIT_EVENT)) /* N=1 */
1341 static void krait_pmu_disable_event(struct perf_event *event) krait_pmu_disable_event() argument
1344 struct hw_perf_event *hwc = &event->hw; krait_pmu_disable_event()
1346 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); krait_pmu_disable_event()
1367 static void krait_pmu_enable_event(struct perf_event *event) krait_pmu_enable_event() argument
1370 struct hw_perf_event *hwc = &event->hw; krait_pmu_enable_event()
1372 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); krait_pmu_enable_event()
1377 * the event that we're interested in. krait_pmu_enable_event()
1385 * Set event (if destined for PMNx counters) krait_pmu_enable_event()
1386 * We set the event for the cycle counter because we krait_pmu_enable_event()
1387 * have the ability to perform event filtering. krait_pmu_enable_event()
1428 static int krait_event_to_bit(struct perf_event *event, unsigned int region, krait_event_to_bit() argument
1432 struct hw_perf_event *hwc = &event->hw; krait_event_to_bit()
1433 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); krait_event_to_bit()
1455 struct perf_event *event) krait_pmu_get_event_idx()
1459 struct hw_perf_event *hwc = &event->hw; krait_pmu_get_event_idx()
1473 bit = krait_event_to_bit(event, region, group); krait_pmu_get_event_idx()
1478 idx = armv7pmu_get_event_idx(cpuc, event); krait_pmu_get_event_idx()
1486 struct perf_event *event) krait_pmu_clear_event_idx()
1489 struct hw_perf_event *hwc = &event->hw; krait_pmu_clear_event_idx()
1496 bit = krait_event_to_bit(event, region, group); krait_pmu_clear_event_idx()
1546 * G = group or particular event
1548 * Example: 0x12021 is a Scorpion CPU event in LPM2's group 1 with code 2
1551 * unit, etc.) while the event code (CC) corresponds to a particular class of
1552 * events (interrupts for example). An event code is broken down into
1674 static void scorpion_pmu_disable_event(struct perf_event *event) scorpion_pmu_disable_event() argument
1677 struct hw_perf_event *hwc = &event->hw; scorpion_pmu_disable_event()
1679 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); scorpion_pmu_disable_event()
1700 static void scorpion_pmu_enable_event(struct perf_event *event) scorpion_pmu_enable_event() argument
1703 struct hw_perf_event *hwc = &event->hw; scorpion_pmu_enable_event()
1705 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); scorpion_pmu_enable_event()
1710 * the event that we're interested in. scorpion_pmu_enable_event()
1718 * Set event (if destined for PMNx counters) scorpion_pmu_enable_event()
1719 * We don't set the event for the cycle counter because we scorpion_pmu_enable_event()
1720 * don't have the ability to perform event filtering. scorpion_pmu_enable_event()
1761 static int scorpion_event_to_bit(struct perf_event *event, unsigned int region, scorpion_event_to_bit() argument
1765 struct hw_perf_event *hwc = &event->hw; scorpion_event_to_bit()
1766 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); scorpion_event_to_bit()
1788 struct perf_event *event) scorpion_pmu_get_event_idx()
1792 struct hw_perf_event *hwc = &event->hw; scorpion_pmu_get_event_idx()
1803 bit = scorpion_event_to_bit(event, region, group); scorpion_pmu_get_event_idx()
1808 idx = armv7pmu_get_event_idx(cpuc, event); scorpion_pmu_get_event_idx()
1816 struct perf_event *event) scorpion_pmu_clear_event_idx()
1819 struct hw_perf_event *hwc = &event->hw; scorpion_pmu_clear_event_idx()
1826 bit = scorpion_event_to_bit(event, region, group); scorpion_pmu_clear_event_idx()
921 armv7pmu_get_event_idx(struct pmu_hw_events *cpuc, struct perf_event *event) armv7pmu_get_event_idx() argument
1454 krait_pmu_get_event_idx(struct pmu_hw_events *cpuc, struct perf_event *event) krait_pmu_get_event_idx() argument
1485 krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc, struct perf_event *event) krait_pmu_clear_event_idx() argument
1787 scorpion_pmu_get_event_idx(struct pmu_hw_events *cpuc, struct perf_event *event) scorpion_pmu_get_event_idx() argument
1815 scorpion_pmu_clear_event_idx(struct pmu_hw_events *cpuc, struct perf_event *event) scorpion_pmu_clear_event_idx() argument
H A Dperf_event_v6.c11 * one event and replace it with another we could get spurious counts from the
12 * wrong event. However, we can take advantage of the fact that the
13 * performance counters can export events to the event bus, and the event bus
15 * the event bus. The procedure for disabling a configurable counter is:
24 * - set the new event type.
96 * misses and main TLB misses. There isn't an event for TLB misses, so
159 * misses and main TLB misses. There isn't an event for TLB misses, so
227 static inline u32 armv6pmu_read_counter(struct perf_event *event) armv6pmu_read_counter() argument
229 struct hw_perf_event *hwc = &event->hw; armv6pmu_read_counter()
245 static inline void armv6pmu_write_counter(struct perf_event *event, u32 value) armv6pmu_write_counter() argument
247 struct hw_perf_event *hwc = &event->hw; armv6pmu_write_counter()
260 static void armv6pmu_enable_event(struct perf_event *event) armv6pmu_enable_event() argument
263 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); armv6pmu_enable_event()
264 struct hw_perf_event *hwc = &event->hw; armv6pmu_enable_event()
285 * Mask out the current event and set the counter to count the event armv6pmu_enable_event()
320 struct perf_event *event = cpuc->events[idx]; armv6pmu_handle_irq() local
323 /* Ignore if we don't have an event. */ armv6pmu_handle_irq()
324 if (!event) armv6pmu_handle_irq()
334 hwc = &event->hw; armv6pmu_handle_irq()
335 armpmu_event_update(event); armv6pmu_handle_irq()
337 if (!armpmu_event_set_period(event)) armv6pmu_handle_irq()
340 if (perf_event_overflow(event, &data, regs)) armv6pmu_handle_irq()
341 cpu_pmu->disable(event); armv6pmu_handle_irq()
382 struct perf_event *event) armv6pmu_get_event_idx()
384 struct hw_perf_event *hwc = &event->hw; armv6pmu_get_event_idx()
407 static void armv6pmu_disable_event(struct perf_event *event) armv6pmu_disable_event() argument
410 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); armv6pmu_disable_event()
411 struct hw_perf_event *hwc = &event->hw; armv6pmu_disable_event()
430 * Mask out the current event and set the counter to count the number armv6pmu_disable_event()
442 static void armv6mpcore_pmu_disable_event(struct perf_event *event) armv6mpcore_pmu_disable_event() argument
445 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); armv6mpcore_pmu_disable_event()
446 struct hw_perf_event *hwc = &event->hw; armv6mpcore_pmu_disable_event()
473 static int armv6_map_event(struct perf_event *event) armv6_map_event() argument
475 return armpmu_map_event(event, &armv6_perf_map, armv6_map_event()
519 * disable the interrupt reporting and update the event. When unthrottling we
523 static int armv6mpcore_map_event(struct perf_event *event) armv6mpcore_map_event() argument
525 return armpmu_map_event(event, &armv6mpcore_perf_map, armv6mpcore_map_event()
381 armv6pmu_get_event_idx(struct pmu_hw_events *cpuc, struct perf_event *event) armv6pmu_get_event_idx() argument
/linux-4.1.27/fs/notify/fanotify/
H A Dfanotify.c31 static int fanotify_merge(struct list_head *list, struct fsnotify_event *event) fanotify_merge() argument
36 pr_debug("%s: list=%p event=%p\n", __func__, list, event); fanotify_merge()
40 * Don't merge a permission event with any other event so that we know fanotify_merge()
41 * the event structure we have created in fanotify_handle_event() is the fanotify_merge()
44 if (event->mask & FAN_ALL_PERM_EVENTS) fanotify_merge()
49 if (should_merge(test_event, event)) { list_for_each_entry_reverse()
58 test_event->mask |= event->mask;
64 struct fanotify_perm_event_info *event) fanotify_get_response()
68 pr_debug("%s: group=%p event=%p\n", __func__, group, event); fanotify_get_response()
70 wait_event(group->fanotify_data.access_waitq, event->response || fanotify_get_response()
73 if (!event->response) { /* bypass_perm set */ fanotify_get_response()
76 * it from group's event list because we are responsible for fanotify_get_response()
77 * freeing the permission event. fanotify_get_response()
79 fsnotify_remove_event(group, &event->fae.fse); fanotify_get_response()
84 switch (event->response) { fanotify_get_response()
92 event->response = 0; fanotify_get_response()
94 pr_debug("%s: group=%p event=%p about to return ret=%d\n", __func__, fanotify_get_response()
95 group, event, ret); fanotify_get_response()
113 /* if we don't have enough info to send an event to userspace say no */ fanotify_should_send_event()
127 * if the event is for a child and this inode doesn't care about fanotify_should_send_event()
156 struct fanotify_event_info *event; fanotify_alloc_event() local
166 event = &pevent->fae; fanotify_alloc_event()
171 event = kmem_cache_alloc(fanotify_event_cachep, GFP_KERNEL); fanotify_alloc_event()
172 if (!event) fanotify_alloc_event()
175 fsnotify_init_event(&event->fse, inode, mask); fanotify_alloc_event()
176 event->tgid = get_pid(task_tgid(current)); fanotify_alloc_event()
178 event->path = *path; fanotify_alloc_event()
179 path_get(&event->path); fanotify_alloc_event()
181 event->path.mnt = NULL; fanotify_alloc_event()
182 event->path.dentry = NULL; fanotify_alloc_event()
184 return event; fanotify_alloc_event()
195 struct fanotify_event_info *event; fanotify_handle_event() local
216 event = fanotify_alloc_event(inode, mask, data); fanotify_handle_event()
217 if (unlikely(!event)) fanotify_handle_event()
220 fsn_event = &event->fse; fanotify_handle_event()
225 /* Our event wasn't used in the end. Free it. */ fanotify_handle_event()
251 struct fanotify_event_info *event; fanotify_free_event() local
253 event = FANOTIFY_E(fsn_event); fanotify_free_event()
254 path_put(&event->path); fanotify_free_event()
255 put_pid(event->tgid); fanotify_free_event()
263 kmem_cache_free(fanotify_event_cachep, event); fanotify_free_event()
63 fanotify_get_response(struct fsnotify_group *group, struct fanotify_perm_event_info *event) fanotify_get_response() argument
/linux-4.1.27/arch/sh/kernel/
H A Dperf_event.c2 * Performance event support framework for SuperH hardware counters.
84 static void hw_perf_event_destroy(struct perf_event *event) hw_perf_event_destroy() argument
121 static int __hw_perf_event_init(struct perf_event *event) __hw_perf_event_init() argument
123 struct perf_event_attr *attr = &event->attr; __hw_perf_event_init()
124 struct hw_perf_event *hwc = &event->hw; __hw_perf_event_init()
152 event->destroy = hw_perf_event_destroy; __hw_perf_event_init()
179 static void sh_perf_event_update(struct perf_event *event, sh_perf_event_update() argument
217 local64_add(delta, &event->count); sh_perf_event_update()
220 static void sh_pmu_stop(struct perf_event *event, int flags) sh_pmu_stop() argument
223 struct hw_perf_event *hwc = &event->hw; sh_pmu_stop()
226 if (!(event->hw.state & PERF_HES_STOPPED)) { sh_pmu_stop()
229 event->hw.state |= PERF_HES_STOPPED; sh_pmu_stop()
232 if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) { sh_pmu_stop()
233 sh_perf_event_update(event, &event->hw, idx); sh_pmu_stop()
234 event->hw.state |= PERF_HES_UPTODATE; sh_pmu_stop()
238 static void sh_pmu_start(struct perf_event *event, int flags) sh_pmu_start() argument
241 struct hw_perf_event *hwc = &event->hw; sh_pmu_start()
248 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); sh_pmu_start()
250 cpuc->events[idx] = event; sh_pmu_start()
251 event->hw.state = 0; sh_pmu_start()
255 static void sh_pmu_del(struct perf_event *event, int flags) sh_pmu_del() argument
259 sh_pmu_stop(event, PERF_EF_UPDATE); sh_pmu_del()
260 __clear_bit(event->hw.idx, cpuc->used_mask); sh_pmu_del()
262 perf_event_update_userpage(event); sh_pmu_del()
265 static int sh_pmu_add(struct perf_event *event, int flags) sh_pmu_add() argument
268 struct hw_perf_event *hwc = &event->hw; sh_pmu_add()
272 perf_pmu_disable(event->pmu); sh_pmu_add()
285 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; sh_pmu_add()
287 sh_pmu_start(event, PERF_EF_RELOAD); sh_pmu_add()
289 perf_event_update_userpage(event); sh_pmu_add()
292 perf_pmu_enable(event->pmu); sh_pmu_add()
296 static void sh_pmu_read(struct perf_event *event) sh_pmu_read() argument
298 sh_perf_event_update(event, &event->hw, event->hw.idx); sh_pmu_read()
301 static int sh_pmu_event_init(struct perf_event *event) sh_pmu_event_init() argument
306 if (has_branch_stack(event)) sh_pmu_event_init()
309 switch (event->attr.type) { sh_pmu_event_init()
313 err = __hw_perf_event_init(event); sh_pmu_event_init()
321 if (event->destroy) sh_pmu_event_init()
322 event->destroy(event); sh_pmu_event_init()
/linux-4.1.27/drivers/usb/usbip/
H A Dusbip_event.c33 usbip_dbg_eh("pending event %lx\n", ud->event); event_handler()
39 if (ud->event & USBIP_EH_SHUTDOWN) { event_handler()
41 ud->event &= ~USBIP_EH_SHUTDOWN; event_handler()
45 if (ud->event & USBIP_EH_RESET) { event_handler()
47 ud->event &= ~USBIP_EH_RESET; event_handler()
51 if (ud->event & USBIP_EH_UNUSABLE) { event_handler()
53 ud->event &= ~USBIP_EH_UNUSABLE; event_handler()
57 if (ud->event & USBIP_EH_BYE) event_handler()
84 ud->event = 0; usbip_start_eh()
106 void usbip_event_add(struct usbip_device *ud, unsigned long event) usbip_event_add() argument
111 ud->event |= event; usbip_event_add()
122 if (ud->event != 0) usbip_event_happened()
/linux-4.1.27/arch/arc/kernel/
H A Dperf_event.c69 /* read counter #idx; note that counter# != event# on ARC! */ arc_pmu_read_counter()
88 static void arc_perf_event_update(struct perf_event *event, arc_perf_event_update() argument
103 local64_add(delta, &event->count); arc_perf_event_update()
107 static void arc_pmu_read(struct perf_event *event) arc_pmu_read() argument
109 arc_perf_event_update(event, &event->hw, event->hw.idx); arc_pmu_read()
132 pr_debug("init cache event: type/op/result %d/%d/%d with h/w %d \'%s\'\n", arc_pmu_cache_event()
139 /* initializes hw_perf_event structure if event is supported */ arc_pmu_event_init()
140 static int arc_pmu_event_init(struct perf_event *event) arc_pmu_event_init() argument
142 struct hw_perf_event *hwc = &event->hw; arc_pmu_event_init()
145 switch (event->attr.type) { arc_pmu_event_init()
147 if (event->attr.config >= PERF_COUNT_HW_MAX) arc_pmu_event_init()
149 if (arc_pmu->ev_hw_idx[event->attr.config] < 0) arc_pmu_event_init()
151 hwc->config = arc_pmu->ev_hw_idx[event->attr.config]; arc_pmu_event_init()
152 pr_debug("init event %d with h/w %d \'%s\'\n", arc_pmu_event_init()
153 (int) event->attr.config, (int) hwc->config, arc_pmu_event_init()
154 arc_pmu_ev_hw_map[event->attr.config]); arc_pmu_event_init()
157 ret = arc_pmu_cache_event(event->attr.config); arc_pmu_event_init()
188 static void arc_pmu_start(struct perf_event *event, int flags) arc_pmu_start() argument
190 struct hw_perf_event *hwc = &event->hw; arc_pmu_start()
197 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); arc_pmu_start()
199 event->hw.state = 0; arc_pmu_start()
206 static void arc_pmu_stop(struct perf_event *event, int flags) arc_pmu_stop() argument
208 struct hw_perf_event *hwc = &event->hw; arc_pmu_stop()
211 if (!(event->hw.state & PERF_HES_STOPPED)) { arc_pmu_stop()
218 event->hw.state |= PERF_HES_STOPPED; arc_pmu_stop()
222 !(event->hw.state & PERF_HES_UPTODATE)) { arc_pmu_stop()
223 arc_perf_event_update(event, &event->hw, idx); arc_pmu_stop()
224 event->hw.state |= PERF_HES_UPTODATE; arc_pmu_stop()
228 static void arc_pmu_del(struct perf_event *event, int flags) arc_pmu_del() argument
230 arc_pmu_stop(event, PERF_EF_UPDATE); arc_pmu_del()
231 __clear_bit(event->hw.idx, arc_pmu->used_mask); arc_pmu_del()
233 perf_event_update_userpage(event); arc_pmu_del()
237 static int arc_pmu_add(struct perf_event *event, int flags) arc_pmu_add() argument
239 struct hw_perf_event *hwc = &event->hw; arc_pmu_add()
260 arc_pmu_start(event, PERF_EF_RELOAD); arc_pmu_add()
262 perf_event_update_userpage(event); arc_pmu_add()
317 pr_debug("mapping perf event %2d to h/w event \'%8s\' (idx %d)\n", arc_pmu_device_probe()
/linux-4.1.27/arch/x86/kernel/cpu/
H A Dperf_event_amd_iommu.c84 const char *event; member in struct:amd_iommu_event_desc
90 struct amd_iommu_event_desc *event = _iommu_event_show() local
92 return sprintf(buf, "%s\n", event->event); _iommu_event_show()
98 .event = _event, \
197 static int perf_iommu_event_init(struct perf_event *event) perf_iommu_event_init() argument
199 struct hw_perf_event *hwc = &event->hw; perf_iommu_event_init()
203 /* test the event attr type check for PMU enumeration */ perf_iommu_event_init()
204 if (event->attr.type != event->pmu->type) perf_iommu_event_init()
210 * Also, it does not support event sampling mode. perf_iommu_event_init()
212 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) perf_iommu_event_init()
216 if (event->attr.exclude_user || event->attr.exclude_kernel || perf_iommu_event_init()
217 event->attr.exclude_host || event->attr.exclude_guest) perf_iommu_event_init()
220 if (event->cpu < 0) perf_iommu_event_init()
225 if (event->pmu != &perf_iommu->pmu) perf_iommu_event_init()
229 config = event->attr.config; perf_iommu_event_init()
230 config1 = event->attr.config1; perf_iommu_event_init()
283 static void perf_iommu_disable_event(struct perf_event *event) perf_iommu_disable_event() argument
287 amd_iommu_pc_get_set_reg_val(_GET_DEVID(event), perf_iommu_disable_event()
288 _GET_BANK(event), _GET_CNTR(event), perf_iommu_disable_event()
292 static void perf_iommu_start(struct perf_event *event, int flags) perf_iommu_start() argument
294 struct hw_perf_event *hwc = &event->hw; perf_iommu_start()
305 amd_iommu_pc_get_set_reg_val(_GET_DEVID(event), perf_iommu_start()
306 _GET_BANK(event), _GET_CNTR(event), perf_iommu_start()
310 perf_iommu_enable_event(event); perf_iommu_start()
311 perf_event_update_userpage(event); perf_iommu_start()
315 static void perf_iommu_read(struct perf_event *event) perf_iommu_read() argument
320 struct hw_perf_event *hwc = &event->hw; perf_iommu_read()
323 amd_iommu_pc_get_set_reg_val(_GET_DEVID(event), perf_iommu_read()
324 _GET_BANK(event), _GET_CNTR(event), perf_iommu_read()
338 local64_add(delta, &event->count); perf_iommu_read()
342 static void perf_iommu_stop(struct perf_event *event, int flags) perf_iommu_stop() argument
344 struct hw_perf_event *hwc = &event->hw; perf_iommu_stop()
352 perf_iommu_disable_event(event); perf_iommu_stop()
360 perf_iommu_read(event); perf_iommu_stop()
364 static int perf_iommu_add(struct perf_event *event, int flags) perf_iommu_add() argument
368 container_of(event->pmu, struct perf_amd_iommu, pmu); perf_iommu_add()
371 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; perf_iommu_add()
376 event->hw.extra_reg.reg = (u16)retval; perf_iommu_add()
381 perf_iommu_start(event, PERF_EF_RELOAD); perf_iommu_add()
386 static void perf_iommu_del(struct perf_event *event, int flags) perf_iommu_del() argument
389 container_of(event->pmu, struct perf_amd_iommu, pmu); perf_iommu_del()
392 perf_iommu_stop(event, PERF_EF_UPDATE); perf_iommu_del()
396 _GET_BANK(event), perf_iommu_del()
397 _GET_CNTR(event)); perf_iommu_del()
399 perf_event_update_userpage(event); perf_iommu_del()
H A Dperf_event_intel_uncore_snb.c11 /* SNB event control */
60 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
68 static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) snb_uncore_msr_enable_event() argument
70 struct hw_perf_event *hwc = &event->hw; snb_uncore_msr_enable_event()
78 static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) snb_uncore_msr_disable_event() argument
80 wrmsrl(event->hw.config_base, 0); snb_uncore_msr_disable_event()
92 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
159 INTEL_UNCORE_EVENT_DESC(data_reads, "event=0x01"),
163 INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"),
219 static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event) snb_uncore_imc_enable_event() argument
222 static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event) snb_uncore_imc_disable_event() argument
225 static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event) snb_uncore_imc_read_counter() argument
227 struct hw_perf_event *hwc = &event->hw; snb_uncore_imc_read_counter()
237 static int snb_uncore_imc_event_init(struct perf_event *event) snb_uncore_imc_event_init() argument
241 struct hw_perf_event *hwc = &event->hw; snb_uncore_imc_event_init()
242 u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK; snb_uncore_imc_event_init()
245 if (event->attr.type != event->pmu->type) snb_uncore_imc_event_init()
248 pmu = uncore_event_to_pmu(event); snb_uncore_imc_event_init()
258 if (event->attr.exclude_user || snb_uncore_imc_event_init()
259 event->attr.exclude_kernel || snb_uncore_imc_event_init()
260 event->attr.exclude_hv || snb_uncore_imc_event_init()
261 event->attr.exclude_idle || snb_uncore_imc_event_init()
262 event->attr.exclude_host || snb_uncore_imc_event_init()
263 event->attr.exclude_guest || snb_uncore_imc_event_init()
264 event->attr.sample_period) /* no sampling */ snb_uncore_imc_event_init()
271 if (event->cpu < 0) snb_uncore_imc_event_init()
275 if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK) snb_uncore_imc_event_init()
278 box = uncore_pmu_to_box(pmu, event->cpu); snb_uncore_imc_event_init()
282 event->cpu = box->cpu; snb_uncore_imc_event_init()
284 event->hw.idx = -1; snb_uncore_imc_event_init()
285 event->hw.last_tag = ~0ULL; snb_uncore_imc_event_init()
286 event->hw.extra_reg.idx = EXTRA_REG_NONE; snb_uncore_imc_event_init()
287 event->hw.branch_reg.idx = EXTRA_REG_NONE; snb_uncore_imc_event_init()
289 * check event is known (whitelist, determines counter) snb_uncore_imc_event_init()
305 event->hw.event_base = base; snb_uncore_imc_event_init()
306 event->hw.config = cfg; snb_uncore_imc_event_init()
307 event->hw.idx = idx; snb_uncore_imc_event_init()
314 static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event) snb_uncore_imc_hw_config() argument
319 static void snb_uncore_imc_event_start(struct perf_event *event, int flags) snb_uncore_imc_event_start() argument
321 struct intel_uncore_box *box = uncore_event_to_box(event); snb_uncore_imc_event_start()
324 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) snb_uncore_imc_event_start()
327 event->hw.state = 0; snb_uncore_imc_event_start()
330 list_add_tail(&event->active_entry, &box->active_list); snb_uncore_imc_event_start()
332 count = snb_uncore_imc_read_counter(box, event); snb_uncore_imc_event_start()
333 local64_set(&event->hw.prev_count, count); snb_uncore_imc_event_start()
339 static void snb_uncore_imc_event_stop(struct perf_event *event, int flags) snb_uncore_imc_event_stop() argument
341 struct intel_uncore_box *box = uncore_event_to_box(event); snb_uncore_imc_event_stop()
342 struct hw_perf_event *hwc = &event->hw; snb_uncore_imc_event_stop()
350 list_del(&event->active_entry); snb_uncore_imc_event_stop()
358 * Drain the remaining delta count out of a event snb_uncore_imc_event_stop()
361 uncore_perf_event_update(box, event); snb_uncore_imc_event_stop()
366 static int snb_uncore_imc_event_add(struct perf_event *event, int flags) snb_uncore_imc_event_add() argument
368 struct intel_uncore_box *box = uncore_event_to_box(event); snb_uncore_imc_event_add()
369 struct hw_perf_event *hwc = &event->hw; snb_uncore_imc_event_add()
378 snb_uncore_imc_event_start(event, 0); snb_uncore_imc_event_add()
385 static void snb_uncore_imc_event_del(struct perf_event *event, int flags) snb_uncore_imc_event_del() argument
387 struct intel_uncore_box *box = uncore_event_to_box(event); snb_uncore_imc_event_del()
390 snb_uncore_imc_event_stop(event, PERF_EF_UPDATE); snb_uncore_imc_event_del()
393 if (event == box->event_list[i]) { snb_uncore_imc_event_del()
577 static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) nhm_uncore_msr_enable_event() argument
579 struct hw_perf_event *hwc = &event->hw; nhm_uncore_msr_enable_event()
602 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
603 INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"),
604 INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"),
605 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"),
606 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"),
607 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"),
608 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
609 INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"),
610 INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"),
H A Dperf_event_amd.c126 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
127 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */
176 static int amd_core_hw_config(struct perf_event *event) amd_core_hw_config() argument
178 if (event->attr.exclude_host && event->attr.exclude_guest) amd_core_hw_config()
184 event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR | amd_core_hw_config()
186 else if (event->attr.exclude_host) amd_core_hw_config()
187 event->hw.config |= AMD64_EVENTSEL_GUESTONLY; amd_core_hw_config()
188 else if (event->attr.exclude_guest) amd_core_hw_config()
189 event->hw.config |= AMD64_EVENTSEL_HOSTONLY; amd_core_hw_config()
195 * AMD64 events are detected based on their event codes.
214 static int amd_pmu_hw_config(struct perf_event *event) amd_pmu_hw_config() argument
218 /* pass precise event sampling to ibs: */ amd_pmu_hw_config()
219 if (event->attr.precise_ip && get_ibs_caps()) amd_pmu_hw_config()
222 if (has_branch_stack(event)) amd_pmu_hw_config()
225 ret = x86_pmu_hw_config(event); amd_pmu_hw_config()
229 if (event->attr.type == PERF_TYPE_RAW) amd_pmu_hw_config()
230 event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK; amd_pmu_hw_config()
232 return amd_core_hw_config(event); amd_pmu_hw_config()
236 struct perf_event *event) __amd_put_nb_event_constraints()
242 * need to scan whole list because event may not have __amd_put_nb_event_constraints()
245 * no race condition possible because event can only __amd_put_nb_event_constraints()
250 if (cmpxchg(nb->owners + i, event, NULL) == event) __amd_put_nb_event_constraints()
261 * traffic. They are identified by an event code >= 0xe00.
264 * shared set of counters. When a NB event is programmed
274 * We provide only one choice for each NB event based on
276 * if a counter is available, there is a guarantee the NB event
279 * for this event.
292 __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, __amd_get_nb_event_constraints() argument
295 struct hw_perf_event *hwc = &event->hw; __amd_get_nb_event_constraints()
312 * event can already be present yet not assigned (in hwc->idx) __amd_get_nb_event_constraints()
319 old = cmpxchg(nb->owners + idx, NULL, event); __amd_get_nb_event_constraints()
320 else if (nb->owners[idx] == event) __amd_get_nb_event_constraints()
321 /* event already present */ __amd_get_nb_event_constraints()
322 old = event; __amd_get_nb_event_constraints()
326 if (old && old != event) __amd_get_nb_event_constraints()
331 cmpxchg(nb->owners + new, event, NULL); __amd_get_nb_event_constraints()
335 if (old == event) __amd_get_nb_event_constraints()
434 struct perf_event *event) amd_get_event_constraints()
437 * if not NB event or no NB, then no constraints amd_get_event_constraints()
439 if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))) amd_get_event_constraints()
442 return __amd_get_nb_event_constraints(cpuc, event, NULL); amd_get_event_constraints()
446 struct perf_event *event) amd_put_event_constraints()
448 if (amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)) amd_put_event_constraints()
449 __amd_put_nb_event_constraints(cpuc, event); amd_put_event_constraints()
452 PMU_FORMAT_ATTR(event, "config:0-7,32-35");
481 * AMD family 15h event code/PMC mappings:
543 struct perf_event *event) amd_get_event_constraints_f15h()
545 struct hw_perf_event *hwc = &event->hw; amd_get_event_constraints_f15h()
616 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT) | amd_event_sysfs_show() local
619 return x86_event_sysfs_show(page, config, event); amd_event_sysfs_show()
235 __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) __amd_put_nb_event_constraints() argument
433 amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx, struct perf_event *event) amd_get_event_constraints() argument
445 amd_put_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) amd_put_event_constraints() argument
542 amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, int idx, struct perf_event *event) amd_get_event_constraints_f15h() argument
H A Dperf_event.c60 * Propagate event elapsed time into the generic event.
61 * Can only be executed on the CPU where the event is active.
64 u64 x86_perf_event_update(struct perf_event *event) x86_perf_event_update() argument
66 struct hw_perf_event *hwc = &event->hw; x86_perf_event_update()
76 * Careful: an NMI might modify the previous event value. x86_perf_event_update()
80 * count to the generic event atomically: x86_perf_event_update()
93 * (event-)time and add that to the generic event. x86_perf_event_update()
101 local64_add(delta, &event->count); x86_perf_event_update()
110 static int x86_pmu_extra_regs(u64 config, struct perf_event *event) x86_pmu_extra_regs() argument
115 reg = &event->hw.extra_reg; x86_pmu_extra_regs()
121 if (er->event != (config & er->config_mask)) x86_pmu_extra_regs()
123 if (event->attr.config1 & ~er->valid_mask) x86_pmu_extra_regs()
130 reg->config = event->attr.config1; x86_pmu_extra_regs()
271 static void hw_perf_event_destroy(struct perf_event *event) hw_perf_event_destroy() argument
276 void hw_perf_lbr_event_destroy(struct perf_event *event) hw_perf_lbr_event_destroy() argument
278 hw_perf_event_destroy(event); hw_perf_lbr_event_destroy()
280 /* undo the lbr/bts event accounting */ hw_perf_lbr_event_destroy()
290 set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event) set_ext_hw_attr() argument
292 struct perf_event_attr *attr = &event->attr; set_ext_hw_attr()
320 return x86_pmu_extra_regs(val, event); set_ext_hw_attr()
353 * Check if we can create event of a certain type (that no conflicting events
382 int x86_setup_perfctr(struct perf_event *event) x86_setup_perfctr() argument
384 struct perf_event_attr *attr = &event->attr; x86_setup_perfctr()
385 struct hw_perf_event *hwc = &event->hw; x86_setup_perfctr()
388 if (!is_sampling_event(event)) { x86_setup_perfctr()
395 return x86_pmu_extra_regs(event->attr.config, event); x86_setup_perfctr()
398 return set_ext_hw_attr(hwc, event); x86_setup_perfctr()
431 event->destroy = hw_perf_lbr_event_destroy; x86_setup_perfctr()
445 static inline int precise_br_compat(struct perf_event *event) precise_br_compat() argument
447 u64 m = event->attr.branch_sample_type; precise_br_compat()
456 if (!event->attr.exclude_user) precise_br_compat()
459 if (!event->attr.exclude_kernel) precise_br_compat()
469 int x86_pmu_hw_config(struct perf_event *event) x86_pmu_hw_config() argument
471 if (event->attr.precise_ip) { x86_pmu_hw_config()
483 if (event->attr.precise_ip > precise) x86_pmu_hw_config()
490 if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format < 2) { x86_pmu_hw_config()
491 u64 *br_type = &event->attr.branch_sample_type; x86_pmu_hw_config()
493 if (has_branch_stack(event)) { x86_pmu_hw_config()
494 if (!precise_br_compat(event)) x86_pmu_hw_config()
505 * event. x86_pmu_hw_config()
509 if (!event->attr.exclude_user) x86_pmu_hw_config()
512 if (!event->attr.exclude_kernel) x86_pmu_hw_config()
517 if (event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK) x86_pmu_hw_config()
518 event->attach_state |= PERF_ATTACH_TASK_DATA; x86_pmu_hw_config()
524 event->hw.config = ARCH_PERFMON_EVENTSEL_INT; x86_pmu_hw_config()
529 if (!event->attr.exclude_user) x86_pmu_hw_config()
530 event->hw.config |= ARCH_PERFMON_EVENTSEL_USR; x86_pmu_hw_config()
531 if (!event->attr.exclude_kernel) x86_pmu_hw_config()
532 event->hw.config |= ARCH_PERFMON_EVENTSEL_OS; x86_pmu_hw_config()
534 if (event->attr.type == PERF_TYPE_RAW) x86_pmu_hw_config()
535 event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK; x86_pmu_hw_config()
537 if (event->attr.sample_period && x86_pmu.limit_period) { x86_pmu_hw_config()
538 if (x86_pmu.limit_period(event, event->attr.sample_period) > x86_pmu_hw_config()
539 event->attr.sample_period) x86_pmu_hw_config()
543 return x86_setup_perfctr(event); x86_pmu_hw_config()
549 static int __x86_pmu_event_init(struct perf_event *event) __x86_pmu_event_init() argument
560 event->destroy = hw_perf_event_destroy; __x86_pmu_event_init()
562 event->hw.idx = -1; __x86_pmu_event_init()
563 event->hw.last_cpu = -1; __x86_pmu_event_init()
564 event->hw.last_tag = ~0ULL; __x86_pmu_event_init()
567 event->hw.extra_reg.idx = EXTRA_REG_NONE; __x86_pmu_event_init()
568 event->hw.branch_reg.idx = EXTRA_REG_NONE; __x86_pmu_event_init()
570 return x86_pmu.hw_config(event); __x86_pmu_event_init()
625 static inline int is_x86_event(struct perf_event *event) is_x86_event() argument
627 return event->pmu == &pmu; is_x86_event()
639 int event; /* event index */ member in struct:sched_state
678 sched->state.event = idx; /* start with min weight */ perf_sched_init()
707 * Select a counter for the current event to schedule. Return true on
718 if (sched->state.event >= sched->max_events) __perf_sched_find_counter()
721 c = sched->constraints[sched->state.event]; __perf_sched_find_counter()
775 /* next event */ perf_sched_next_event()
776 sched->state.event++; perf_sched_next_event()
777 if (sched->state.event >= sched->max_events) { perf_sched_next_event()
779 sched->state.event = 0; perf_sched_next_event()
784 c = sched->constraints[sched->state.event]; perf_sched_next_event()
793 * Assign a counter for each event.
806 assign[sched.state.event] = sched.state.counter; perf_assign_events()
889 * validate an event group (assign == NULL) x86_schedule_events()
931 struct perf_event *event; collect_events() local
948 list_for_each_entry(event, &leader->sibling_list, group_entry) { collect_events()
949 if (!is_x86_event(event) || collect_events()
950 event->state <= PERF_EVENT_STATE_OFF) collect_events()
956 cpuc->event_list[n] = event; collect_events()
962 static inline void x86_assign_hw_event(struct perf_event *event, x86_assign_hw_event() argument
965 struct hw_perf_event *hwc = &event->hw; x86_assign_hw_event()
994 static void x86_pmu_start(struct perf_event *event, int flags);
999 struct perf_event *event; x86_pmu_enable() local
1018 event = cpuc->event_list[i]; x86_pmu_enable()
1019 hwc = &event->hw; x86_pmu_enable()
1025 * - no other event has used the counter since x86_pmu_enable()
1038 x86_pmu_stop(event, PERF_EF_UPDATE); x86_pmu_enable()
1045 event = cpuc->event_list[i]; x86_pmu_enable()
1046 hwc = &event->hw; x86_pmu_enable()
1049 x86_assign_hw_event(event, cpuc, i); x86_pmu_enable()
1056 x86_pmu_start(event, PERF_EF_RELOAD); x86_pmu_enable()
1072 * To be called with the event disabled in hw:
1074 int x86_perf_event_set_period(struct perf_event *event) x86_perf_event_set_period() argument
1076 struct hw_perf_event *hwc = &event->hw; x86_perf_event_set_period()
1110 left = x86_pmu.limit_period(event, left); x86_perf_event_set_period()
1115 * The hw event starts counting from this event offset, x86_perf_event_set_period()
1132 perf_event_update_userpage(event); x86_perf_event_set_period()
1137 void x86_pmu_enable_event(struct perf_event *event) x86_pmu_enable_event() argument
1140 __x86_pmu_enable_event(&event->hw, x86_pmu_enable_event()
1145 * Add a single event to the PMU.
1147 * The event is added to the group of enabled events
1150 static int x86_pmu_add(struct perf_event *event, int flags) x86_pmu_add() argument
1157 hwc = &event->hw; x86_pmu_add()
1160 ret = n = collect_events(cpuc, event, false); x86_pmu_add()
1199 static void x86_pmu_start(struct perf_event *event, int flags) x86_pmu_start() argument
1202 int idx = event->hw.idx; x86_pmu_start()
1204 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) x86_pmu_start()
1211 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); x86_pmu_start()
1212 x86_perf_event_set_period(event); x86_pmu_start()
1215 event->hw.state = 0; x86_pmu_start()
1217 cpuc->events[idx] = event; x86_pmu_start()
1220 x86_pmu.enable(event); x86_pmu_start()
1221 perf_event_update_userpage(event); x86_pmu_start()
1284 void x86_pmu_stop(struct perf_event *event, int flags) x86_pmu_stop() argument
1287 struct hw_perf_event *hwc = &event->hw; x86_pmu_stop()
1290 x86_pmu.disable(event); x86_pmu_stop()
1298 * Drain the remaining delta count out of a event x86_pmu_stop()
1301 x86_perf_event_update(event); x86_pmu_stop()
1306 static void x86_pmu_del(struct perf_event *event, int flags) x86_pmu_del() argument
1312 * event is descheduled x86_pmu_del()
1314 event->hw.flags &= ~PERF_X86_EVENT_COMMITTED; x86_pmu_del()
1322 * an event added during that same TXN. x86_pmu_del()
1330 x86_pmu_stop(event, PERF_EF_UPDATE); x86_pmu_del()
1333 if (event == cpuc->event_list[i]) x86_pmu_del()
1340 /* If we have a newly added event; make sure to decrease n_added. */ x86_pmu_del()
1345 x86_pmu.put_event_constraints(cpuc, event); x86_pmu_del()
1354 perf_event_update_userpage(event); x86_pmu_del()
1361 struct perf_event *event; x86_pmu_handle_irq() local
1389 event = cpuc->events[idx]; x86_pmu_handle_irq()
1391 val = x86_perf_event_update(event); x86_pmu_handle_irq()
1396 * event overflow x86_pmu_handle_irq()
1399 perf_sample_data_init(&data, 0, event->hw.last_period); x86_pmu_handle_irq()
1401 if (!x86_perf_event_set_period(event)) x86_pmu_handle_irq()
1404 if (perf_event_overflow(event, &data, regs)) x86_pmu_handle_irq()
1405 x86_pmu_stop(event, 0); x86_pmu_handle_irq()
1506 * sample via a hrtimer based software event): pmu_check_apic()
1616 ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event) x86_event_sysfs_show() argument
1630 ret = sprintf(page, "event=0x%02llx", event); x86_event_sysfs_show()
1724 pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl); init_hw_perf_events()
1733 static inline void x86_pmu_read(struct perf_event *event) x86_pmu_read() argument
1735 x86_perf_event_update(event); x86_pmu_read()
1800 * a fake_cpuc is used to validate event groups. Due to
1804 * able to catch this when the last event gets added to
1836 * validate that we can schedule this event
1838 static int validate_event(struct perf_event *event) validate_event() argument
1848 c = x86_pmu.get_event_constraints(fake_cpuc, -1, event); validate_event()
1854 x86_pmu.put_event_constraints(fake_cpuc, event); validate_event()
1862 * validate a single event group
1872 static int validate_group(struct perf_event *event) validate_group() argument
1874 struct perf_event *leader = event->group_leader; validate_group()
1882 * the event is not yet connected with its validate_group()
1884 * existing siblings, then add the new event validate_group()
1892 n = collect_events(fake_cpuc, event, false); validate_group()
1905 static int x86_pmu_event_init(struct perf_event *event) x86_pmu_event_init() argument
1910 switch (event->attr.type) { x86_pmu_event_init()
1920 err = __x86_pmu_event_init(event); x86_pmu_event_init()
1923 * we temporarily connect event to its pmu x86_pmu_event_init()
1925 * it as an x86 event using is_x86_event() x86_pmu_event_init()
1927 tmp = event->pmu; x86_pmu_event_init()
1928 event->pmu = &pmu; x86_pmu_event_init()
1930 if (event->group_leader != event) x86_pmu_event_init()
1931 err = validate_group(event); x86_pmu_event_init()
1933 err = validate_event(event); x86_pmu_event_init()
1935 event->pmu = tmp; x86_pmu_event_init()
1938 if (event->destroy) x86_pmu_event_init()
1939 event->destroy(event); x86_pmu_event_init()
1943 event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED; x86_pmu_event_init()
1954 static void x86_pmu_event_mapped(struct perf_event *event) x86_pmu_event_mapped() argument
1956 if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) x86_pmu_event_mapped()
1963 static void x86_pmu_event_unmapped(struct perf_event *event) x86_pmu_event_unmapped() argument
1968 if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) x86_pmu_event_unmapped()
1975 static int x86_pmu_event_idx(struct perf_event *event) x86_pmu_event_idx() argument
1977 int idx = event->hw.idx; x86_pmu_event_idx()
1979 if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) x86_pmu_event_idx()
2017 * perf-event-bypassing mode. This path is extremely slow, set_attr_rdpmc()
2089 void arch_perf_update_userpage(struct perf_event *event, arch_perf_update_userpage() argument
2097 !!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED); arch_perf_update_userpage()
2118 if (event->clock == &local_clock) { arch_perf_update_userpage()
H A Dperf_event_intel_uncore.c26 struct uncore_event_desc *event = uncore_event_show() local
28 return sprintf(buf, "%s", event->config); uncore_event_show()
31 struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event) uncore_event_to_pmu() argument
33 return container_of(event->pmu, struct intel_uncore_pmu, pmu); uncore_event_to_pmu()
61 struct intel_uncore_box *uncore_event_to_box(struct perf_event *event) uncore_event_to_box() argument
64 * perf core schedules event on the basis of cpu, uncore events are uncore_event_to_box()
67 return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id()); uncore_event_to_box()
70 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event) uncore_msr_read_counter() argument
74 rdmsrl(event->hw.event_base, count); uncore_msr_read_counter()
83 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event) uncore_get_constraint() argument
86 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; uncore_get_constraint()
87 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; uncore_get_constraint()
120 void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event) uncore_put_constraint() argument
123 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; uncore_put_constraint()
127 * takes care of event which do not use an extra shared reg. uncore_put_constraint()
129 * Also, if this is a fake box we shouldn't touch any event state uncore_put_constraint()
156 static void uncore_assign_hw_event(struct intel_uncore_box *box, struct perf_event *event, int idx) uncore_assign_hw_event() argument
158 struct hw_perf_event *hwc = &event->hw; uncore_assign_hw_event()
173 void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event) uncore_perf_event_update() argument
178 if (event->hw.idx >= UNCORE_PMC_IDX_FIXED) uncore_perf_event_update()
183 /* the hrtimer might modify the previous event value */ uncore_perf_event_update()
185 prev_count = local64_read(&event->hw.prev_count); uncore_perf_event_update()
186 new_count = uncore_read_counter(box, event); uncore_perf_event_update()
187 if (local64_xchg(&event->hw.prev_count, new_count) != prev_count) uncore_perf_event_update()
193 local64_add(delta, &event->count); uncore_perf_event_update()
204 struct perf_event *event; uncore_pmu_hrtimer() local
218 * handle boxes with an active event list as opposed to active uncore_pmu_hrtimer()
221 list_for_each_entry(event, &box->active_list, active_entry) { uncore_pmu_hrtimer()
222 uncore_perf_event_update(box, event); uncore_pmu_hrtimer()
283 static int uncore_pmu_event_init(struct perf_event *event);
285 static bool is_uncore_event(struct perf_event *event) is_uncore_event() argument
287 return event->pmu->event_init == uncore_pmu_event_init; is_uncore_event()
293 struct perf_event *event; uncore_collect_events() local
313 list_for_each_entry(event, &leader->sibling_list, group_entry) { uncore_collect_events()
314 if (!is_uncore_event(event) || uncore_collect_events()
315 event->state <= PERF_EVENT_STATE_OFF) uncore_collect_events()
321 box->event_list[n] = event; uncore_collect_events()
328 uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event) uncore_get_event_constraint() argument
334 c = type->ops->get_constraint(box, event); uncore_get_event_constraint()
339 if (event->attr.config == UNCORE_FIXED_EVENT) uncore_get_event_constraint()
344 if ((event->hw.config & c->cmask) == c->code) uncore_get_event_constraint()
352 static void uncore_put_event_constraint(struct intel_uncore_box *box, struct perf_event *event) uncore_put_event_constraint() argument
355 box->pmu->type->ops->put_constraint(box, event); uncore_put_event_constraint()
407 static void uncore_pmu_event_start(struct perf_event *event, int flags) uncore_pmu_event_start() argument
409 struct intel_uncore_box *box = uncore_event_to_box(event); uncore_pmu_event_start()
410 int idx = event->hw.idx; uncore_pmu_event_start()
412 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) uncore_pmu_event_start()
418 event->hw.state = 0; uncore_pmu_event_start()
419 box->events[idx] = event; uncore_pmu_event_start()
423 local64_set(&event->hw.prev_count, uncore_read_counter(box, event)); uncore_pmu_event_start()
424 uncore_enable_event(box, event); uncore_pmu_event_start()
432 static void uncore_pmu_event_stop(struct perf_event *event, int flags) uncore_pmu_event_stop() argument
434 struct intel_uncore_box *box = uncore_event_to_box(event); uncore_pmu_event_stop()
435 struct hw_perf_event *hwc = &event->hw; uncore_pmu_event_stop()
438 uncore_disable_event(box, event); uncore_pmu_event_stop()
452 * Drain the remaining delta count out of a event uncore_pmu_event_stop()
455 uncore_perf_event_update(box, event); uncore_pmu_event_stop()
460 static int uncore_pmu_event_add(struct perf_event *event, int flags) uncore_pmu_event_add() argument
462 struct intel_uncore_box *box = uncore_event_to_box(event); uncore_pmu_event_add()
463 struct hw_perf_event *hwc = &event->hw; uncore_pmu_event_add()
470 ret = n = uncore_collect_events(box, event, false); uncore_pmu_event_add()
484 event = box->event_list[i]; uncore_pmu_event_add()
485 hwc = &event->hw; uncore_pmu_event_add()
497 uncore_pmu_event_stop(event, PERF_EF_UPDATE); uncore_pmu_event_add()
502 event = box->event_list[i]; uncore_pmu_event_add()
503 hwc = &event->hw; uncore_pmu_event_add()
507 uncore_assign_hw_event(box, event, assign[i]); uncore_pmu_event_add()
514 uncore_pmu_event_start(event, 0); uncore_pmu_event_add()
521 static void uncore_pmu_event_del(struct perf_event *event, int flags) uncore_pmu_event_del() argument
523 struct intel_uncore_box *box = uncore_event_to_box(event); uncore_pmu_event_del()
526 uncore_pmu_event_stop(event, PERF_EF_UPDATE); uncore_pmu_event_del()
529 if (event == box->event_list[i]) { uncore_pmu_event_del()
530 uncore_put_event_constraint(box, event); uncore_pmu_event_del()
540 event->hw.idx = -1; uncore_pmu_event_del()
541 event->hw.last_tag = ~0ULL; uncore_pmu_event_del()
544 void uncore_pmu_event_read(struct perf_event *event) uncore_pmu_event_read() argument
546 struct intel_uncore_box *box = uncore_event_to_box(event); uncore_pmu_event_read()
547 uncore_perf_event_update(box, event); uncore_pmu_event_read()
555 struct perf_event *event) uncore_validate_group()
557 struct perf_event *leader = event->group_leader; uncore_validate_group()
567 * the event is not yet connected with its uncore_validate_group()
569 * existing siblings, then add the new event uncore_validate_group()
577 n = uncore_collect_events(fake_box, event, false); uncore_validate_group()
589 static int uncore_pmu_event_init(struct perf_event *event) uncore_pmu_event_init() argument
593 struct hw_perf_event *hwc = &event->hw; uncore_pmu_event_init()
596 if (event->attr.type != event->pmu->type) uncore_pmu_event_init()
599 pmu = uncore_event_to_pmu(event); uncore_pmu_event_init()
608 if (event->attr.exclude_user || event->attr.exclude_kernel || uncore_pmu_event_init()
609 event->attr.exclude_hv || event->attr.exclude_idle) uncore_pmu_event_init()
620 if (event->cpu < 0) uncore_pmu_event_init()
622 box = uncore_pmu_to_box(pmu, event->cpu); uncore_pmu_event_init()
625 event->cpu = box->cpu; uncore_pmu_event_init()
627 event->hw.idx = -1; uncore_pmu_event_init()
628 event->hw.last_tag = ~0ULL; uncore_pmu_event_init()
629 event->hw.extra_reg.idx = EXTRA_REG_NONE; uncore_pmu_event_init()
630 event->hw.branch_reg.idx = EXTRA_REG_NONE; uncore_pmu_event_init()
632 if (event->attr.config == UNCORE_FIXED_EVENT) { uncore_pmu_event_init()
643 /* fixed counters have event field hardcoded to zero */ uncore_pmu_event_init()
646 hwc->config = event->attr.config & pmu->type->event_mask; uncore_pmu_event_init()
648 ret = pmu->type->ops->hw_config(box, event); uncore_pmu_event_init()
654 if (event->group_leader != event) uncore_pmu_event_init()
655 ret = uncore_validate_group(pmu, event); uncore_pmu_event_init()
554 uncore_validate_group(struct intel_uncore_pmu *pmu, struct perf_event *event) uncore_validate_group() argument
H A Dperf_event_amd_uncore.c54 static bool is_nb_event(struct perf_event *event) is_nb_event() argument
56 return event->pmu->type == amd_nb_pmu.type; is_nb_event()
59 static bool is_l2_event(struct perf_event *event) is_l2_event() argument
61 return event->pmu->type == amd_l2_pmu.type; is_l2_event()
64 static struct amd_uncore *event_to_amd_uncore(struct perf_event *event) event_to_amd_uncore() argument
66 if (is_nb_event(event) && amd_uncore_nb) event_to_amd_uncore()
67 return *per_cpu_ptr(amd_uncore_nb, event->cpu); event_to_amd_uncore()
68 else if (is_l2_event(event) && amd_uncore_l2) event_to_amd_uncore()
69 return *per_cpu_ptr(amd_uncore_l2, event->cpu); event_to_amd_uncore()
74 static void amd_uncore_read(struct perf_event *event) amd_uncore_read() argument
76 struct hw_perf_event *hwc = &event->hw; amd_uncore_read()
90 local64_add(delta, &event->count); amd_uncore_read()
93 static void amd_uncore_start(struct perf_event *event, int flags) amd_uncore_start() argument
95 struct hw_perf_event *hwc = &event->hw; amd_uncore_start()
102 perf_event_update_userpage(event); amd_uncore_start()
105 static void amd_uncore_stop(struct perf_event *event, int flags) amd_uncore_stop() argument
107 struct hw_perf_event *hwc = &event->hw; amd_uncore_stop()
113 amd_uncore_read(event); amd_uncore_stop()
118 static int amd_uncore_add(struct perf_event *event, int flags) amd_uncore_add() argument
121 struct amd_uncore *uncore = event_to_amd_uncore(event); amd_uncore_add()
122 struct hw_perf_event *hwc = &event->hw; amd_uncore_add()
125 if (hwc->idx != -1 && uncore->events[hwc->idx] == event) amd_uncore_add()
129 if (uncore->events[i] == event) { amd_uncore_add()
138 if (cmpxchg(&uncore->events[i], NULL, event) == NULL) { amd_uncore_add()
154 amd_uncore_start(event, PERF_EF_RELOAD); amd_uncore_add()
159 static void amd_uncore_del(struct perf_event *event, int flags) amd_uncore_del() argument
162 struct amd_uncore *uncore = event_to_amd_uncore(event); amd_uncore_del()
163 struct hw_perf_event *hwc = &event->hw; amd_uncore_del()
165 amd_uncore_stop(event, PERF_EF_UPDATE); amd_uncore_del()
168 if (cmpxchg(&uncore->events[i], event, NULL) == event) amd_uncore_del()
175 static int amd_uncore_event_init(struct perf_event *event) amd_uncore_event_init() argument
178 struct hw_perf_event *hwc = &event->hw; amd_uncore_event_init()
180 if (event->attr.type != event->pmu->type) amd_uncore_event_init()
186 * core, however, event counts generated by processes running on other amd_uncore_event_init()
190 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) amd_uncore_event_init()
194 if (event->attr.exclude_user || event->attr.exclude_kernel || amd_uncore_event_init()
195 event->attr.exclude_host || event->attr.exclude_guest) amd_uncore_event_init()
199 hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB; amd_uncore_event_init()
202 if (event->cpu < 0) amd_uncore_event_init()
205 uncore = event_to_amd_uncore(event); amd_uncore_event_init()
213 event->cpu = uncore->cpu; amd_uncore_event_init()
245 PMU_FORMAT_ATTR(event, "config:0-7,32-35");
H A Dperf_event_intel_rapl.c19 * event: rapl_energy_cores
23 * event: rapl_energy_pkg
27 * event: rapl_energy_dram
31 * event: rapl_energy_gpu
90 * event code: LSB 8 bits, passed in attr->config
139 static inline u64 rapl_read_counter(struct perf_event *event) rapl_read_counter() argument
142 rdmsrl(event->hw.event_base, raw); rapl_read_counter()
170 static u64 rapl_event_update(struct perf_event *event) rapl_event_update() argument
172 struct hw_perf_event *hwc = &event->hw; rapl_event_update()
179 rdmsrl(event->hw.event_base, new_raw_count); rapl_event_update()
190 * (event-)time and add that to the generic event. rapl_event_update()
198 sdelta = rapl_scale(delta, event->hw.config); rapl_event_update()
200 local64_add(sdelta, &event->count); rapl_event_update()
220 struct perf_event *event; rapl_hrtimer_handle() local
228 list_for_each_entry(event, &pmu->active_list, active_entry) { rapl_hrtimer_handle()
229 rapl_event_update(event); rapl_hrtimer_handle()
248 struct perf_event *event) __rapl_pmu_event_start()
250 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) __rapl_pmu_event_start()
253 event->hw.state = 0; __rapl_pmu_event_start()
255 list_add_tail(&event->active_entry, &pmu->active_list); __rapl_pmu_event_start()
257 local64_set(&event->hw.prev_count, rapl_read_counter(event)); __rapl_pmu_event_start()
264 static void rapl_pmu_event_start(struct perf_event *event, int mode) rapl_pmu_event_start() argument
270 __rapl_pmu_event_start(pmu, event); rapl_pmu_event_start()
274 static void rapl_pmu_event_stop(struct perf_event *event, int mode) rapl_pmu_event_stop() argument
277 struct hw_perf_event *hwc = &event->hw; rapl_pmu_event_stop()
282 /* mark event as deactivated and stopped */ rapl_pmu_event_stop()
289 list_del(&event->active_entry); rapl_pmu_event_stop()
298 * Drain the remaining delta count out of a event rapl_pmu_event_stop()
301 rapl_event_update(event); rapl_pmu_event_stop()
308 static int rapl_pmu_event_add(struct perf_event *event, int mode) rapl_pmu_event_add() argument
311 struct hw_perf_event *hwc = &event->hw; rapl_pmu_event_add()
319 __rapl_pmu_event_start(pmu, event); rapl_pmu_event_add()
326 static void rapl_pmu_event_del(struct perf_event *event, int flags) rapl_pmu_event_del() argument
328 rapl_pmu_event_stop(event, PERF_EF_UPDATE); rapl_pmu_event_del()
331 static int rapl_pmu_event_init(struct perf_event *event) rapl_pmu_event_init() argument
333 u64 cfg = event->attr.config & RAPL_EVENT_MASK; rapl_pmu_event_init()
337 if (event->attr.type != rapl_pmu_class.type) rapl_pmu_event_init()
341 if (event->attr.config & ~RAPL_EVENT_MASK) rapl_pmu_event_init()
345 * check event is known (determines counter) rapl_pmu_event_init()
367 /* check event supported */ rapl_pmu_event_init()
372 if (event->attr.exclude_user || rapl_pmu_event_init()
373 event->attr.exclude_kernel || rapl_pmu_event_init()
374 event->attr.exclude_hv || rapl_pmu_event_init()
375 event->attr.exclude_idle || rapl_pmu_event_init()
376 event->attr.exclude_host || rapl_pmu_event_init()
377 event->attr.exclude_guest || rapl_pmu_event_init()
378 event->attr.sample_period) /* no sampling */ rapl_pmu_event_init()
382 event->hw.event_base = msr; rapl_pmu_event_init()
383 event->hw.config = cfg; rapl_pmu_event_init()
384 event->hw.idx = bit; rapl_pmu_event_init()
389 static void rapl_pmu_event_read(struct perf_event *event) rapl_pmu_event_read() argument
391 rapl_event_update(event); rapl_pmu_event_read()
424 RAPL_EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01");
425 RAPL_EVENT_ATTR_STR(energy-pkg , rapl_pkg, "event=0x02");
426 RAPL_EVENT_ATTR_STR(energy-ram , rapl_ram, "event=0x03");
427 RAPL_EVENT_ATTR_STR(energy-gpu , rapl_gpu, "event=0x04");
495 DEFINE_RAPL_FORMAT_ATTR(event, event, "config:0-7");
247 __rapl_pmu_event_start(struct rapl_pmu *pmu, struct perf_event *event) __rapl_pmu_event_start() argument
H A Dperf_event_intel_cqm.c29 * Also protects event->hw.cqm_rmid
259 /* If not task event, we're machine wide */ __match_event()
270 * Are we an inherited event? __match_event()
279 static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event) event_to_cgroup() argument
281 if (event->attach_state & PERF_ATTACH_TASK) event_to_cgroup()
282 return perf_cgroup_from_task(event->hw.target); event_to_cgroup()
284 return event->cgrp; event_to_cgroup()
307 * event at a time. __conflict_event()
337 * Ensure neither event is part of the other's cgroup __conflict_event()
387 struct perf_event *event; intel_cqm_xchg_rmid() local
410 list_for_each_entry(event, head, hw.cqm_group_entry) intel_cqm_xchg_rmid()
411 event->hw.cqm_rmid = rmid; intel_cqm_xchg_rmid()
456 struct perf_event *leader, *event; intel_cqm_sched_in_event() local
462 event = leader; intel_cqm_sched_in_event()
464 list_for_each_entry_continue(event, &cache_groups, intel_cqm_sched_in_event()
466 if (__rmid_valid(event->hw.cqm_rmid)) intel_cqm_sched_in_event()
469 if (__conflict_event(event, leader)) intel_cqm_sched_in_event()
472 intel_cqm_xchg_rmid(event, rmid); intel_cqm_sched_in_event()
623 * Deallocate the RMIDs from any events that conflict with @event, and
626 static void intel_cqm_sched_out_conflicting_events(struct perf_event *event) intel_cqm_sched_out_conflicting_events() argument
634 if (group == event) intel_cqm_sched_out_conflicting_events()
646 * No conflict? No problem! Leave the event alone. intel_cqm_sched_out_conflicting_events()
648 if (!__conflict_event(group, event)) intel_cqm_sched_out_conflicting_events()
707 * We have some event groups, but they all have RMIDs assigned __intel_cqm_rmid_rotate()
717 * We have more event groups without RMIDs than available RMIDs, __intel_cqm_rmid_rotate()
718 * or we have event groups that conflict with the ones currently __intel_cqm_rmid_rotate()
722 * cache_groups. The first event group without an RMID then gets __intel_cqm_rmid_rotate()
765 * event needs an RMID anyway. __intel_cqm_rmid_rotate()
792 * threshold skews the event data (because we reuse __intel_cqm_rmid_rotate()
827 static void intel_cqm_setup_event(struct perf_event *event, intel_cqm_setup_event() argument
837 if (__match_event(iter, event)) { intel_cqm_setup_event()
839 event->hw.cqm_rmid = rmid; intel_cqm_setup_event()
848 if (__conflict_event(iter, event) && __rmid_valid(rmid)) intel_cqm_setup_event()
857 event->hw.cqm_rmid = rmid; intel_cqm_setup_event()
860 static void intel_cqm_event_read(struct perf_event *event) intel_cqm_event_read() argument
869 if (event->cpu == -1) intel_cqm_event_read()
873 rmid = event->hw.cqm_rmid; intel_cqm_event_read()
886 local64_set(&event->count, val); intel_cqm_event_read()
904 static inline bool cqm_group_leader(struct perf_event *event) cqm_group_leader() argument
906 return !list_empty(&event->hw.cqm_groups_entry); cqm_group_leader()
909 static u64 intel_cqm_event_count(struct perf_event *event) intel_cqm_event_count() argument
921 if (event->cpu != -1) intel_cqm_event_count()
922 return __perf_event_count(event); intel_cqm_event_count()
933 if (!cqm_group_leader(event)) intel_cqm_event_count()
949 * Speculatively perform the read, since @event might be intel_cqm_event_count()
952 * check @event's RMID afterwards, and if it has changed, intel_cqm_event_count()
955 rr.rmid = ACCESS_ONCE(event->hw.cqm_rmid); intel_cqm_event_count()
963 if (event->hw.cqm_rmid == rr.rmid) intel_cqm_event_count()
964 local64_set(&event->count, atomic64_read(&rr.value)); intel_cqm_event_count()
967 return __perf_event_count(event); intel_cqm_event_count()
970 static void intel_cqm_event_start(struct perf_event *event, int mode) intel_cqm_event_start() argument
973 unsigned int rmid = event->hw.cqm_rmid; intel_cqm_event_start()
976 if (!(event->hw.cqm_state & PERF_HES_STOPPED)) intel_cqm_event_start()
979 event->hw.cqm_state &= ~PERF_HES_STOPPED; intel_cqm_event_start()
994 static void intel_cqm_event_stop(struct perf_event *event, int mode) intel_cqm_event_stop() argument
999 if (event->hw.cqm_state & PERF_HES_STOPPED) intel_cqm_event_stop()
1002 event->hw.cqm_state |= PERF_HES_STOPPED; intel_cqm_event_stop()
1005 intel_cqm_event_read(event); intel_cqm_event_stop()
1017 static int intel_cqm_event_add(struct perf_event *event, int mode) intel_cqm_event_add() argument
1024 event->hw.cqm_state = PERF_HES_STOPPED; intel_cqm_event_add()
1025 rmid = event->hw.cqm_rmid; intel_cqm_event_add()
1028 intel_cqm_event_start(event, mode); intel_cqm_event_add()
1035 static void intel_cqm_event_del(struct perf_event *event, int mode) intel_cqm_event_del() argument
1037 intel_cqm_event_stop(event, mode); intel_cqm_event_del()
1040 static void intel_cqm_event_destroy(struct perf_event *event) intel_cqm_event_destroy() argument
1047 * If there's another event in this group... intel_cqm_event_destroy()
1049 if (!list_empty(&event->hw.cqm_group_entry)) { intel_cqm_event_destroy()
1050 group_other = list_first_entry(&event->hw.cqm_group_entry, intel_cqm_event_destroy()
1053 list_del(&event->hw.cqm_group_entry); intel_cqm_event_destroy()
1059 if (cqm_group_leader(event)) { intel_cqm_event_destroy()
1065 list_replace(&event->hw.cqm_groups_entry, intel_cqm_event_destroy()
1068 unsigned int rmid = event->hw.cqm_rmid; intel_cqm_event_destroy()
1072 list_del(&event->hw.cqm_groups_entry); intel_cqm_event_destroy()
1079 static int intel_cqm_event_init(struct perf_event *event) intel_cqm_event_init() argument
1084 if (event->attr.type != intel_cqm_pmu.type) intel_cqm_event_init()
1087 if (event->attr.config & ~QOS_EVENT_MASK) intel_cqm_event_init()
1091 if (event->attr.exclude_user || intel_cqm_event_init()
1092 event->attr.exclude_kernel || intel_cqm_event_init()
1093 event->attr.exclude_hv || intel_cqm_event_init()
1094 event->attr.exclude_idle || intel_cqm_event_init()
1095 event->attr.exclude_host || intel_cqm_event_init()
1096 event->attr.exclude_guest || intel_cqm_event_init()
1097 event->attr.sample_period) /* no sampling */ intel_cqm_event_init()
1100 INIT_LIST_HEAD(&event->hw.cqm_group_entry); intel_cqm_event_init()
1101 INIT_LIST_HEAD(&event->hw.cqm_groups_entry); intel_cqm_event_init()
1103 event->destroy = intel_cqm_event_destroy; intel_cqm_event_init()
1108 intel_cqm_setup_event(event, &group); intel_cqm_event_init()
1111 list_add_tail(&event->hw.cqm_group_entry, intel_cqm_event_init()
1114 list_add_tail(&event->hw.cqm_groups_entry, intel_cqm_event_init()
1122 * every event in a group to save on needless work. intel_cqm_event_init()
1124 if (!__rmid_valid(event->hw.cqm_rmid)) intel_cqm_event_init()
1136 EVENT_ATTR_STR(llc_occupancy, intel_cqm_llc, "event=0x01");
1156 PMU_FORMAT_ATTR(event, "config:0-7");
H A Dperf_event_amd_ibs.c40 struct perf_event *event; member in struct:cpu_perf_ibs
116 perf_event_try_update(struct perf_event *event, u64 new_raw_count, int width) perf_event_try_update() argument
118 struct hw_perf_event *hwc = &event->hw; perf_event_try_update()
124 * Careful: an NMI might modify the previous event value. perf_event_try_update()
128 * count to the generic event atomically: perf_event_try_update()
138 * (event-)time and add that to the generic event. perf_event_try_update()
146 local64_add(delta, &event->count); perf_event_try_update()
165 * Use IBS for precise event sampling:
181 static int perf_ibs_precise_event(struct perf_event *event, u64 *config) perf_ibs_precise_event() argument
183 switch (event->attr.precise_ip) { perf_ibs_precise_event()
193 switch (event->attr.type) { perf_ibs_precise_event()
195 switch (event->attr.config) { perf_ibs_precise_event()
202 switch (event->attr.config) { perf_ibs_precise_event()
227 static int perf_ibs_init(struct perf_event *event) perf_ibs_init() argument
229 struct hw_perf_event *hwc = &event->hw; perf_ibs_init()
234 perf_ibs = get_ibs_pmu(event->attr.type); perf_ibs_init()
236 config = event->attr.config; perf_ibs_init()
239 ret = perf_ibs_precise_event(event, &config); perf_ibs_init()
244 if (event->pmu != &perf_ibs->pmu) perf_ibs_init()
247 if (perf_flags(&event->attr) & perf_flags(&ibs_notsupp)) perf_ibs_init()
257 if (!event->attr.sample_freq && hwc->sample_period & 0x0f) perf_ibs_init()
270 event->attr.sample_period = max_cnt << 4; perf_ibs_init()
271 hwc->sample_period = event->attr.sample_period; perf_ibs_init()
321 perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event, perf_ibs_event_update() argument
331 while (!perf_event_try_update(event, count, 64)) { perf_ibs_event_update()
332 rdmsrl(event->hw.config_base, *config); perf_ibs_event_update()
361 * the event while stopping it and then reset the state when starting
365 static void perf_ibs_start(struct perf_event *event, int flags) perf_ibs_start() argument
367 struct hw_perf_event *hwc = &event->hw; perf_ibs_start()
368 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu); perf_ibs_start()
382 perf_event_update_userpage(event); perf_ibs_start()
385 static void perf_ibs_stop(struct perf_event *event, int flags) perf_ibs_stop() argument
387 struct hw_perf_event *hwc = &event->hw; perf_ibs_stop()
388 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu); perf_ibs_stop()
416 perf_ibs_event_update(perf_ibs, event, &config); perf_ibs_stop()
420 static int perf_ibs_add(struct perf_event *event, int flags) perf_ibs_add() argument
422 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu); perf_ibs_add()
428 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; perf_ibs_add()
430 pcpu->event = event; perf_ibs_add()
433 perf_ibs_start(event, PERF_EF_RELOAD); perf_ibs_add()
438 static void perf_ibs_del(struct perf_event *event, int flags) perf_ibs_del() argument
440 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu); perf_ibs_del()
446 perf_ibs_stop(event, PERF_EF_UPDATE); perf_ibs_del()
448 pcpu->event = NULL; perf_ibs_del()
450 perf_event_update_userpage(event); perf_ibs_del()
453 static void perf_ibs_read(struct perf_event *event) { } perf_ibs_read() argument
519 struct perf_event *event = pcpu->event; perf_ibs_handle_irq() local
520 struct hw_perf_event *hwc = &event->hw; perf_ibs_handle_irq()
546 perf_ibs_event_update(perf_ibs, event, config); perf_ibs_handle_irq()
555 if (event->attr.sample_type & PERF_SAMPLE_RAW) perf_ibs_handle_irq()
568 if (event->attr.sample_type & PERF_SAMPLE_RAW) { perf_ibs_handle_irq()
593 if (event->attr.sample_type & PERF_SAMPLE_RAW) { perf_ibs_handle_irq()
599 throttle = perf_event_overflow(event, &data, &regs); perf_ibs_handle_irq()
606 perf_event_update_userpage(event); perf_ibs_handle_irq()
H A Dperf_event_intel_uncore_nhmex.c4 /* NHM-EX event control */
127 * use the 9~13 bits to select event If the 7th bit is not set,
128 * otherwise use the 19~21 bits to select event.
189 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
190 DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5");
234 static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) nhmex_uncore_msr_disable_event() argument
236 wrmsrl(event->hw.config_base, 0); nhmex_uncore_msr_disable_event()
239 static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) nhmex_uncore_msr_enable_event() argument
241 struct hw_perf_event *hwc = &event->hw; nhmex_uncore_msr_enable_event()
322 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0"),
343 static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) nhmex_bbox_hw_config() argument
345 struct hw_perf_event *hwc = &event->hw; nhmex_bbox_hw_config()
365 reg1->config = event->attr.config1; nhmex_bbox_hw_config()
366 reg2->config = event->attr.config2; nhmex_bbox_hw_config()
370 static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) nhmex_bbox_msr_enable_event() argument
372 struct hw_perf_event *hwc = &event->hw; nhmex_bbox_msr_enable_event()
386 * Use bits 6-7 in the event config to select counter.
434 static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) nhmex_sbox_hw_config() argument
436 struct hw_perf_event *hwc = &event->hw; nhmex_sbox_hw_config()
440 /* only TO_R_PROG_EV event uses the match/mask register */ nhmex_sbox_hw_config()
450 reg1->config = event->attr.config1; nhmex_sbox_hw_config()
451 reg2->config = event->attr.config2; nhmex_sbox_hw_config()
455 static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) nhmex_sbox_msr_enable_event() argument
457 struct hw_perf_event *hwc = &event->hw; nhmex_sbox_msr_enable_event()
526 /* event 0xa uses two extra registers */
622 static u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify) nhmex_mbox_alter_er() argument
624 struct hw_perf_event *hwc = &event->hw; nhmex_mbox_alter_er()
650 /* adjust the main event selector */ nhmex_mbox_alter_er()
662 nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) nhmex_mbox_get_constraint() argument
664 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; nhmex_mbox_get_constraint()
665 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; nhmex_mbox_get_constraint()
692 * If it's a fake box -- as per validate_{group,event}() we nhmex_mbox_get_constraint()
693 * shouldn't touch event state and we can avoid doing so nhmex_mbox_get_constraint()
695 * on each event, this avoids the need for reg->alloc. nhmex_mbox_get_constraint()
699 nhmex_mbox_alter_er(event, idx[0], true); nhmex_mbox_get_constraint()
719 config1 = nhmex_mbox_alter_er(event, idx[0], false); nhmex_mbox_get_constraint()
731 static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) nhmex_mbox_put_constraint() argument
733 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; nhmex_mbox_put_constraint()
734 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; nhmex_mbox_put_constraint()
755 return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd; nhmex_mbox_extra_reg_idx()
758 static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) nhmex_mbox_hw_config() argument
761 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; nhmex_mbox_hw_config()
762 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; nhmex_mbox_hw_config()
772 if (er->event != (event->hw.config & er->config_mask)) nhmex_mbox_hw_config()
774 if (event->attr.config1 & ~er->valid_mask) nhmex_mbox_hw_config()
791 reg1->config = event->attr.config1; nhmex_mbox_hw_config()
800 if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN) nhmex_mbox_hw_config()
801 reg2->config = event->attr.config2; nhmex_mbox_hw_config()
828 static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) nhmex_mbox_msr_enable_event() argument
830 struct hw_perf_event *hwc = &event->hw; nhmex_mbox_msr_enable_event()
937 static void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event) nhmex_rbox_alter_er() argument
939 struct hw_perf_event *hwc = &event->hw; nhmex_rbox_alter_er()
942 /* adjust the main event selector and extra register index */ nhmex_rbox_alter_er()
965 * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7.
966 * An event set consists of 6 events, the 3rd and 4th events in
967 * an event set use the same extra register. So an event set uses
971 nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) nhmex_rbox_get_constraint() argument
973 struct hw_perf_event *hwc = &event->hw; nhmex_rbox_get_constraint()
1047 nhmex_rbox_alter_er(box, event); nhmex_rbox_get_constraint()
1055 static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) nhmex_rbox_put_constraint() argument
1058 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; nhmex_rbox_put_constraint()
1079 static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) nhmex_rbox_hw_config() argument
1081 struct hw_perf_event *hwc = &event->hw; nhmex_rbox_hw_config()
1082 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; nhmex_rbox_hw_config()
1083 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; nhmex_rbox_hw_config()
1086 idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >> nhmex_rbox_hw_config()
1092 reg1->config = event->attr.config1; nhmex_rbox_hw_config()
1097 hwc->config |= event->attr.config & (~0ULL << 32); nhmex_rbox_hw_config()
1098 reg2->config = event->attr.config2; nhmex_rbox_hw_config()
1104 static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) nhmex_rbox_msr_enable_event() argument
1106 struct hw_perf_event *hwc = &event->hw; nhmex_rbox_msr_enable_event()
1166 INTEL_UNCORE_EVENT_DESC(qpi0_flit_send, "event=0x0,iperf_cfg=0x80000000"),
1167 INTEL_UNCORE_EVENT_DESC(qpi1_filt_send, "event=0x6,iperf_cfg=0x80000000"),
1168 INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt, "event=0x0,iperf_cfg=0x40000000"),
1169 INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt, "event=0x6,iperf_cfg=0x40000000"),
1170 INTEL_UNCORE_EVENT_DESC(qpi0_date_response, "event=0x0,iperf_cfg=0xc4"),
1171 INTEL_UNCORE_EVENT_DESC(qpi1_date_response, "event=0x6,iperf_cfg=0xc4"),
H A Dperf_event.h71 #define PERF_X86_EVENT_COMMITTED 0x0008 /* event passed commit_txn */
77 #define PERF_X86_EVENT_EXCL_ACCT 0x0200 /* accounted EXCL event */
177 int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
255 * The overlap flag marks event constraints with overlapping counter
256 * masks. This is the case if the counter mask of such an event is not
264 * The event scheduler may not select the correct counter in the first
323 /* Event constraint, but match on all event flags too. */
327 /* Check only flags, but allow all event/umask */
331 /* Check flags and event code, and set the HSW store flag */
337 /* Check flags and event code, and set the HSW load flag */
349 /* Check flags and event code/umask, and set the HSW store flag */
361 /* Check flags and event code/umask, and set the HSW load flag */
373 /* Check flags and event code/umask, and set the HSW N/A flag */
406 unsigned int event; member in struct:extra_reg
415 .event = (e), \
423 #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
424 EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
426 #define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \
427 EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \
461 u64 event:8, member in struct:x86_pmu_config::__anon3132
503 int (*hw_config)(struct perf_event *event);
525 struct perf_event *event);
528 struct perf_event *event);
540 unsigned (*limit_period)(struct perf_event *event, unsigned l);
582 void (*pebs_aliases)(struct perf_event *event);
662 int x86_perf_event_set_period(struct perf_event *event);
683 u64 x86_perf_event_update(struct perf_event *event);
710 void hw_perf_lbr_event_destroy(struct perf_event *event);
712 int x86_setup_perfctr(struct perf_event *event);
714 int x86_pmu_hw_config(struct perf_event *event);
734 void x86_pmu_stop(struct perf_event *event, int flags);
736 static inline void x86_pmu_disable_event(struct perf_event *event) x86_pmu_disable_event() argument
738 struct hw_perf_event *hwc = &event->hw; x86_pmu_disable_event()
743 void x86_pmu_enable_event(struct perf_event *event);
780 ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event);
798 static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event) intel_pmu_needs_lbr_smpl() argument
801 if (has_branch_stack(event)) intel_pmu_needs_lbr_smpl()
805 if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1 && intel_pmu_needs_lbr_smpl()
812 static inline bool intel_pmu_has_bts(struct perf_event *event) intel_pmu_has_bts() argument
814 if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS && intel_pmu_has_bts()
815 !event->attr.freq && event->hw.sample_period == 1) intel_pmu_has_bts()
821 int intel_pmu_save_and_restart(struct perf_event *event);
825 struct perf_event *event);
863 struct event_constraint *intel_pebs_constraints(struct perf_event *event);
865 void intel_pmu_pebs_enable(struct perf_event *event);
867 void intel_pmu_pebs_disable(struct perf_event *event);
879 void intel_pmu_lbr_enable(struct perf_event *event);
881 void intel_pmu_lbr_disable(struct perf_event *event);
899 int intel_pmu_setup_lbr_filter(struct perf_event *event);
H A Dperf_event_intel_bts.c219 static void __bts_event_start(struct perf_event *event) __bts_event_start() argument
228 event->hw.state = 0; __bts_event_start()
232 if (!event->attr.exclude_kernel) __bts_event_start()
234 if (!event->attr.exclude_user) __bts_event_start()
248 static void bts_event_start(struct perf_event *event, int flags) bts_event_start() argument
252 __bts_event_start(event); bts_event_start()
258 static void __bts_event_stop(struct perf_event *event) __bts_event_stop() argument
266 if (event->hw.state & PERF_HES_STOPPED) __bts_event_stop()
269 ACCESS_ONCE(event->hw.state) |= PERF_HES_STOPPED; __bts_event_stop()
272 static void bts_event_stop(struct perf_event *event, int flags) bts_event_stop() argument
279 __bts_event_stop(event); bts_event_stop()
289 if (bts->handle.event && bts->started) intel_bts_enable_local()
290 __bts_event_start(bts->handle.event); intel_bts_enable_local()
297 if (bts->handle.event) intel_bts_disable_local()
298 __bts_event_stop(bts->handle.event); intel_bts_disable_local()
382 struct perf_event *event = bts->handle.event; intel_bts_interrupt() local
387 if (!event || !bts->started) intel_bts_interrupt()
409 buf = perf_aux_output_begin(&bts->handle, event); intel_bts_interrupt()
420 static void bts_event_del(struct perf_event *event, int mode) bts_event_del() argument
426 bts_event_stop(event, PERF_EF_UPDATE); bts_event_del()
443 static int bts_event_add(struct perf_event *event, int mode) bts_event_add() argument
448 struct hw_perf_event *hwc = &event->hw; bts_event_add()
451 event->hw.state = PERF_HES_STOPPED; bts_event_add()
456 if (bts->handle.event) bts_event_add()
459 buf = perf_aux_output_begin(&bts->handle, event); bts_event_add()
474 bts_event_start(event, 0); bts_event_add()
476 bts_event_del(event, 0); bts_event_add()
484 static void bts_event_destroy(struct perf_event *event) bts_event_destroy() argument
490 static int bts_event_init(struct perf_event *event) bts_event_init() argument
494 if (event->attr.type != bts_pmu.type) bts_event_init()
506 event->destroy = bts_event_destroy; bts_event_init()
511 static void bts_event_read(struct perf_event *event) bts_event_read() argument
H A Dperf_event_intel.c196 EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
197 EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
198 EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
1279 struct perf_event *event = __intel_pmu_enable_all() local
1282 if (WARN_ON_ONCE(!event)) __intel_pmu_enable_all()
1285 intel_pmu_enable_bts(event->hw.config); __intel_pmu_enable_all()
1318 struct perf_event *event; intel_pmu_nhm_workaround() local
1344 event = cpuc->events[i]; intel_pmu_nhm_workaround()
1345 if (event) intel_pmu_nhm_workaround()
1346 x86_perf_event_update(event); intel_pmu_nhm_workaround()
1358 event = cpuc->events[i]; intel_pmu_nhm_workaround()
1360 if (event) { intel_pmu_nhm_workaround()
1361 x86_perf_event_set_period(event); intel_pmu_nhm_workaround()
1362 __x86_pmu_enable_event(&event->hw, intel_pmu_nhm_workaround()
1402 static inline bool event_is_checkpointed(struct perf_event *event) event_is_checkpointed() argument
1404 return (event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0; event_is_checkpointed()
1407 static void intel_pmu_disable_event(struct perf_event *event) intel_pmu_disable_event() argument
1409 struct hw_perf_event *hwc = &event->hw; intel_pmu_disable_event()
1423 * must disable before any actual event intel_pmu_disable_event()
1424 * because any event may be combined with LBR intel_pmu_disable_event()
1426 if (needs_branch_stack(event)) intel_pmu_disable_event()
1427 intel_pmu_lbr_disable(event); intel_pmu_disable_event()
1434 x86_pmu_disable_event(event); intel_pmu_disable_event()
1436 if (unlikely(event->attr.precise_ip)) intel_pmu_disable_event()
1437 intel_pmu_pebs_disable(event); intel_pmu_disable_event()
1471 static void intel_pmu_enable_event(struct perf_event *event) intel_pmu_enable_event() argument
1473 struct hw_perf_event *hwc = &event->hw; intel_pmu_enable_event()
1484 * must enabled before any actual event intel_pmu_enable_event()
1485 * because any event may be combined with LBR intel_pmu_enable_event()
1487 if (needs_branch_stack(event)) intel_pmu_enable_event()
1488 intel_pmu_lbr_enable(event); intel_pmu_enable_event()
1490 if (event->attr.exclude_host) intel_pmu_enable_event()
1492 if (event->attr.exclude_guest) intel_pmu_enable_event()
1495 if (unlikely(event_is_checkpointed(event))) intel_pmu_enable_event()
1503 if (unlikely(event->attr.precise_ip)) intel_pmu_enable_event()
1504 intel_pmu_pebs_enable(event); intel_pmu_enable_event()
1510 * Save and restart an expired event. Called by NMI contexts,
1511 * so it has to be careful about preempting normal event ops:
1513 int intel_pmu_save_and_restart(struct perf_event *event) intel_pmu_save_and_restart() argument
1515 x86_perf_event_update(event); intel_pmu_save_and_restart()
1522 if (unlikely(event_is_checkpointed(event))) { intel_pmu_save_and_restart()
1524 wrmsrl(event->hw.event_base, 0); intel_pmu_save_and_restart()
1525 local64_set(&event->hw.prev_count, 0); intel_pmu_save_and_restart()
1527 return x86_perf_event_set_period(event); intel_pmu_save_and_restart()
1646 struct perf_event *event = cpuc->events[bit]; intel_pmu_handle_irq() local
1653 if (!intel_pmu_save_and_restart(event)) intel_pmu_handle_irq()
1656 perf_sample_data_init(&data, 0, event->hw.last_period); intel_pmu_handle_irq()
1658 if (has_branch_stack(event)) intel_pmu_handle_irq()
1661 if (perf_event_overflow(event, &data, regs)) intel_pmu_handle_irq()
1662 x86_pmu_stop(event, 0); intel_pmu_handle_irq()
1685 intel_bts_constraints(struct perf_event *event) intel_bts_constraints() argument
1687 struct hw_perf_event *hwc = &event->hw; intel_bts_constraints()
1690 if (event->attr.freq) intel_bts_constraints()
1716 static void intel_fixup_er(struct perf_event *event, int idx) intel_fixup_er() argument
1718 event->hw.extra_reg.idx = idx; intel_fixup_er()
1721 event->hw.config &= ~INTEL_ARCH_EVENT_MASK; intel_fixup_er()
1722 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_0].event; intel_fixup_er()
1723 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0; intel_fixup_er()
1725 event->hw.config &= ~INTEL_ARCH_EVENT_MASK; intel_fixup_er()
1726 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_1].event; intel_fixup_er()
1727 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1; intel_fixup_er()
1740 struct perf_event *event, __intel_shared_reg_get_constraints()
1767 * If its a fake cpuc -- as per validate_{group,event}() we __intel_shared_reg_get_constraints()
1768 * shouldn't touch event state and we can avoid doing so __intel_shared_reg_get_constraints()
1770 * on each event, this avoids the need for reg->alloc. __intel_shared_reg_get_constraints()
1778 intel_fixup_er(event, idx); __intel_shared_reg_get_constraints()
1798 * to check if associated event has constraints __intel_shared_reg_get_constraints()
1821 * care of event which do not use an extra shared reg. __intel_shared_reg_put_constraints()
1823 * Also, if this is a fake cpuc we shouldn't touch any event state __intel_shared_reg_put_constraints()
1841 struct perf_event *event) intel_shared_regs_constraints()
1846 xreg = &event->hw.extra_reg; intel_shared_regs_constraints()
1848 c = __intel_shared_reg_get_constraints(cpuc, event, xreg); intel_shared_regs_constraints()
1852 breg = &event->hw.branch_reg; intel_shared_regs_constraints()
1854 d = __intel_shared_reg_get_constraints(cpuc, event, breg); intel_shared_regs_constraints()
1865 struct perf_event *event) x86_get_event_constraints()
1871 if ((event->hw.config & c->cmask) == c->code) { for_each_event_constraint()
1872 event->hw.flags |= c->flags; for_each_event_constraint()
1883 struct perf_event *event) __intel_get_event_constraints()
1887 c = intel_bts_constraints(event); __intel_get_event_constraints()
1891 c = intel_shared_regs_constraints(cpuc, event); __intel_get_event_constraints()
1895 c = intel_pebs_constraints(event); __intel_get_event_constraints()
1899 return x86_get_event_constraints(cpuc, idx, event); __intel_get_event_constraints()
1975 intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, intel_get_excl_constraints() argument
1998 * event requires exclusive counter access intel_get_excl_constraints()
2002 if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) { intel_get_excl_constraints()
2003 event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT; intel_get_excl_constraints()
2060 * EXCLUSIVE: sibling counter measuring exclusive event intel_get_excl_constraints()
2061 * SHARED : sibling counter measuring non-exclusive event intel_get_excl_constraints()
2066 * exclusive event in sibling counter intel_get_excl_constraints()
2068 * regardless of our event intel_get_excl_constraints()
2073 * if measuring an exclusive event, sibling intel_get_excl_constraints()
2099 struct perf_event *event) intel_get_event_constraints()
2112 c2 = __intel_get_event_constraints(cpuc, idx, event); intel_get_event_constraints()
2120 return intel_get_excl_constraints(cpuc, event, idx, c2); intel_get_event_constraints()
2126 struct perf_event *event) intel_put_excl_constraints()
2128 struct hw_perf_event *hwc = &event->hw; intel_put_excl_constraints()
2163 * if event was actually assigned, then mark the intel_put_excl_constraints()
2175 struct perf_event *event) intel_put_shared_regs_event_constraints()
2179 reg = &event->hw.extra_reg; intel_put_shared_regs_event_constraints()
2183 reg = &event->hw.branch_reg; intel_put_shared_regs_event_constraints()
2189 struct perf_event *event) intel_put_event_constraints()
2191 intel_put_shared_regs_event_constraints(cpuc, event); intel_put_event_constraints()
2199 intel_put_excl_constraints(cpuc, event); intel_put_event_constraints()
2237 static void intel_pebs_aliases_core2(struct perf_event *event) intel_pebs_aliases_core2() argument
2239 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { intel_pebs_aliases_core2()
2244 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't intel_pebs_aliases_core2()
2246 * (0x00c0), which is a PEBS capable event, to get the same intel_pebs_aliases_core2()
2258 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16); intel_pebs_aliases_core2()
2260 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); intel_pebs_aliases_core2()
2261 event->hw.config = alt_config; intel_pebs_aliases_core2()
2265 static void intel_pebs_aliases_snb(struct perf_event *event) intel_pebs_aliases_snb() argument
2267 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { intel_pebs_aliases_snb()
2272 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't intel_pebs_aliases_snb()
2274 * (0x01c2), which is a PEBS capable event, to get the same intel_pebs_aliases_snb()
2286 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16); intel_pebs_aliases_snb()
2288 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); intel_pebs_aliases_snb()
2289 event->hw.config = alt_config; intel_pebs_aliases_snb()
2293 static int intel_pmu_hw_config(struct perf_event *event) intel_pmu_hw_config() argument
2295 int ret = x86_pmu_hw_config(event); intel_pmu_hw_config()
2300 if (event->attr.precise_ip && x86_pmu.pebs_aliases) intel_pmu_hw_config()
2301 x86_pmu.pebs_aliases(event); intel_pmu_hw_config()
2303 if (needs_branch_stack(event)) { intel_pmu_hw_config()
2304 ret = intel_pmu_setup_lbr_filter(event); intel_pmu_hw_config()
2311 if (!intel_pmu_has_bts(event)) { intel_pmu_hw_config()
2316 event->destroy = hw_perf_lbr_event_destroy; intel_pmu_hw_config()
2320 if (event->attr.type != PERF_TYPE_RAW) intel_pmu_hw_config()
2323 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY)) intel_pmu_hw_config()
2332 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY; intel_pmu_hw_config()
2374 struct perf_event *event = cpuc->events[idx]; core_guest_get_msrs() local
2383 event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE; core_guest_get_msrs()
2385 if (event->attr.exclude_host) core_guest_get_msrs()
2387 else if (event->attr.exclude_guest) core_guest_get_msrs()
2395 static void core_pmu_enable_event(struct perf_event *event) core_pmu_enable_event() argument
2397 if (!event->attr.exclude_host) core_pmu_enable_event()
2398 x86_pmu_enable_event(event); core_pmu_enable_event()
2417 static int hsw_hw_config(struct perf_event *event) hsw_hw_config() argument
2419 int ret = intel_pmu_hw_config(event); hsw_hw_config()
2425 event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED); hsw_hw_config()
2432 if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) && hsw_hw_config()
2433 ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) || hsw_hw_config()
2434 event->attr.precise_ip > 0)) hsw_hw_config()
2437 if (event_is_checkpointed(event)) { hsw_hw_config()
2447 if (event->attr.sample_period > 0 && hsw_hw_config()
2448 event->attr.sample_period < 0x7fffffff) hsw_hw_config()
2459 struct perf_event *event) hsw_get_event_constraints()
2463 c = intel_get_event_constraints(cpuc, idx, event); hsw_get_event_constraints()
2466 if (event->hw.config & HSW_IN_TX_CHECKPOINTED) { hsw_get_event_constraints()
2490 static unsigned bdw_limit_period(struct perf_event *event, unsigned left) bdw_limit_period() argument
2492 if ((event->hw.config & INTEL_ARCH_EVENT_MASK) == bdw_limit_period()
2493 X86_CONFIG(.event=0xc0, .umask=0x01)) { bdw_limit_period()
2501 PMU_FORMAT_ATTR(event, "config:0-7" );
2523 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT); intel_event_sysfs_show() local
2525 return x86_event_sysfs_show(page, config, event); intel_event_sysfs_show()
2720 * the generic event period:
2758 * the generic event period:
2781 * AJ68 - PEBS PMI may be delayed by one event intel_clovertown_quirk()
2902 /* disable event that reported as not presend by cpuid */ for_each_set_bit()
2905 pr_warn("CPUID marked event: \'%s\' unavailable\n", for_each_set_bit()
2918 * the BR_MISP_EXEC.ANY event. This will over-count intel_nehalem_quirk()
2920 * architectural event which is often completely bogus: intel_nehalem_quirk()
2950 EVENT_ATTR_STR(mem-loads, mem_ld_hsw, "event=0xcd,umask=0x1,ldlat=3");
2951 EVENT_ATTR_STR(mem-stores, mem_st_hsw, "event=0xd0,umask=0x82")
2954 EVENT_ATTR_STR(tx-start, tx_start, "event=0xc9,umask=0x1");
2955 EVENT_ATTR_STR(tx-commit, tx_commit, "event=0xc9,umask=0x2");
2956 EVENT_ATTR_STR(tx-abort, tx_abort, "event=0xc9,umask=0x4");
2957 EVENT_ATTR_STR(tx-capacity, tx_capacity, "event=0x54,umask=0x2");
2958 EVENT_ATTR_STR(tx-conflict, tx_conflict, "event=0x54,umask=0x1");
2959 EVENT_ATTR_STR(el-start, el_start, "event=0xc8,umask=0x1");
2960 EVENT_ATTR_STR(el-commit, el_commit, "event=0xc8,umask=0x2");
2961 EVENT_ATTR_STR(el-abort, el_abort, "event=0xc8,umask=0x4");
2962 EVENT_ATTR_STR(el-capacity, el_capacity, "event=0x54,umask=0x2");
2963 EVENT_ATTR_STR(el-conflict, el_conflict, "event=0x54,umask=0x1");
2964 EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1");
2965 EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1");
3091 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); intel_pmu_init()
3094 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1); intel_pmu_init()
3153 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); intel_pmu_init()
3156 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1); intel_pmu_init()
3189 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); intel_pmu_init()
3192 X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1); intel_pmu_init()
3225 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); intel_pmu_init()
3326 * event on fixed counter2 (REF_CYCLES) only works on this intel_pmu_init()
3356 * E.g. KVM doesn't support offcore event
1739 __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, struct hw_perf_event_extra *reg) __intel_shared_reg_get_constraints() argument
1840 intel_shared_regs_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) intel_shared_regs_constraints() argument
1864 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx, struct perf_event *event) x86_get_event_constraints() argument
1882 __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, struct perf_event *event) __intel_get_event_constraints() argument
2098 intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, struct perf_event *event) intel_get_event_constraints() argument
2125 intel_put_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) intel_put_excl_constraints() argument
2174 intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) intel_put_shared_regs_event_constraints() argument
2188 intel_put_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) intel_put_event_constraints() argument
2458 hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx, struct perf_event *event) hsw_get_event_constraints() argument
H A Dperf_event_knc.c27 /* On Xeon Phi event "0" is a valid DATA_READ */
176 knc_pmu_disable_event(struct perf_event *event) knc_pmu_disable_event() argument
178 struct hw_perf_event *hwc = &event->hw; knc_pmu_disable_event()
187 static void knc_pmu_enable_event(struct perf_event *event) knc_pmu_enable_event() argument
189 struct hw_perf_event *hwc = &event->hw; knc_pmu_enable_event()
242 struct perf_event *event = cpuc->events[bit]; knc_pmu_handle_irq() local
249 if (!intel_pmu_save_and_restart(event)) knc_pmu_handle_irq()
252 perf_sample_data_init(&data, 0, event->hw.last_period); knc_pmu_handle_irq()
254 if (perf_event_overflow(event, &data, regs)) knc_pmu_handle_irq()
255 x86_pmu_stop(event, 0); knc_pmu_handle_irq()
272 PMU_FORMAT_ATTR(event, "config:0-7" );
/linux-4.1.27/drivers/input/
H A Dinput-compat.c18 struct input_event *event) input_event_from_user()
27 event->time.tv_sec = compat_event.time.tv_sec; input_event_from_user()
28 event->time.tv_usec = compat_event.time.tv_usec; input_event_from_user()
29 event->type = compat_event.type; input_event_from_user()
30 event->code = compat_event.code; input_event_from_user()
31 event->value = compat_event.value; input_event_from_user()
34 if (copy_from_user(event, buffer, sizeof(struct input_event))) input_event_from_user()
42 const struct input_event *event) input_event_to_user()
47 compat_event.time.tv_sec = event->time.tv_sec; input_event_to_user()
48 compat_event.time.tv_usec = event->time.tv_usec; input_event_to_user()
49 compat_event.type = event->type; input_event_to_user()
50 compat_event.code = event->code; input_event_to_user()
51 compat_event.value = event->value; input_event_to_user()
58 if (copy_to_user(buffer, event, sizeof(struct input_event))) input_event_to_user()
103 struct input_event *event) input_event_from_user()
105 if (copy_from_user(event, buffer, sizeof(struct input_event))) input_event_from_user()
112 const struct input_event *event) input_event_to_user()
114 if (copy_to_user(buffer, event, sizeof(struct input_event))) input_event_to_user()
17 input_event_from_user(const char __user *buffer, struct input_event *event) input_event_from_user() argument
41 input_event_to_user(char __user *buffer, const struct input_event *event) input_event_to_user() argument
102 input_event_from_user(const char __user *buffer, struct input_event *event) input_event_from_user() argument
111 input_event_to_user(char __user *buffer, const struct input_event *event) input_event_to_user() argument
/linux-4.1.27/drivers/acpi/
H A Devent.c2 * event.c - exporting ACPI events via procfs
22 ACPI_MODULE_NAME("event");
29 struct acpi_bus_event event; acpi_notifier_call_chain() local
31 strcpy(event.device_class, dev->pnp.device_class); acpi_notifier_call_chain()
32 strcpy(event.bus_id, dev->pnp.bus_id); acpi_notifier_call_chain()
33 event.type = type; acpi_notifier_call_chain()
34 event.data = data; acpi_notifier_call_chain()
35 return (blocking_notifier_call_chain(&acpi_chain_head, 0, (void *)&event) acpi_notifier_call_chain()
64 ACPI_GENL_ATTR_EVENT, /* ACPI event info needed by user space */
100 struct acpi_genl_event *event; acpi_bus_generate_netlink_event() local
130 event = nla_data(attr); acpi_bus_generate_netlink_event()
131 memset(event, 0, sizeof(struct acpi_genl_event)); acpi_bus_generate_netlink_event()
133 strcpy(event->device_class, device_class); acpi_bus_generate_netlink_event()
134 strcpy(event->bus_id, bus_id); acpi_bus_generate_netlink_event()
135 event->type = type; acpi_bus_generate_netlink_event()
136 event->data = data; acpi_bus_generate_netlink_event()
175 /* create genetlink for acpi event */ acpi_event_init()
179 "Failed to create genetlink family for ACPI event\n"); acpi_event_init()
/linux-4.1.27/net/sctp/
H A Dulpevent.c9 * These functions manipulate an sctp event. The struct ulpevent is used
46 static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event,
48 static void sctp_ulpevent_release_data(struct sctp_ulpevent *event);
49 static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event);
52 /* Initialize an ULP event from an given skb. */ sctp_ulpevent_init()
53 static void sctp_ulpevent_init(struct sctp_ulpevent *event, sctp_ulpevent_init() argument
57 memset(event, 0, sizeof(struct sctp_ulpevent)); sctp_ulpevent_init()
58 event->msg_flags = msg_flags; sctp_ulpevent_init()
59 event->rmem_len = len; sctp_ulpevent_init()
66 struct sctp_ulpevent *event; sctp_ulpevent_new() local
73 event = sctp_skb2event(skb); sctp_ulpevent_new()
74 sctp_ulpevent_init(event, msg_flags, skb->truesize); sctp_ulpevent_new()
76 return event; sctp_ulpevent_new()
83 int sctp_ulpevent_is_notification(const struct sctp_ulpevent *event) sctp_ulpevent_is_notification() argument
85 return MSG_NOTIFICATION == (event->msg_flags & MSG_NOTIFICATION); sctp_ulpevent_is_notification()
91 static inline void sctp_ulpevent_set_owner(struct sctp_ulpevent *event, sctp_ulpevent_set_owner() argument
100 skb = sctp_event2skb(event); sctp_ulpevent_set_owner()
101 event->asoc = (struct sctp_association *)asoc; sctp_ulpevent_set_owner()
102 atomic_add(event->rmem_len, &event->asoc->rmem_alloc); sctp_ulpevent_set_owner()
107 static inline void sctp_ulpevent_release_owner(struct sctp_ulpevent *event) sctp_ulpevent_release_owner() argument
109 struct sctp_association *asoc = event->asoc; sctp_ulpevent_release_owner()
111 atomic_sub(event->rmem_len, &asoc->rmem_alloc); sctp_ulpevent_release_owner()
115 /* Create and initialize an SCTP_ASSOC_CHANGE event.
131 struct sctp_ulpevent *event; sctp_ulpevent_make_assoc_change() local
148 /* Embed the event fields inside the cloned skb. */ sctp_ulpevent_make_assoc_change()
149 event = sctp_skb2event(skb); sctp_ulpevent_make_assoc_change()
150 sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize); sctp_ulpevent_make_assoc_change()
161 event = sctp_ulpevent_new(sizeof(struct sctp_assoc_change), sctp_ulpevent_make_assoc_change()
163 if (!event) sctp_ulpevent_make_assoc_change()
166 skb = sctp_event2skb(event); sctp_ulpevent_make_assoc_change()
184 * event that happened to the association. sctp_ulpevent_make_assoc_change()
238 sctp_ulpevent_set_owner(event, asoc); sctp_ulpevent_make_assoc_change()
241 return event; sctp_ulpevent_make_assoc_change()
247 /* Create and initialize an SCTP_PEER_ADDR_CHANGE event.
253 * an interface details event is sent.
260 struct sctp_ulpevent *event; sctp_ulpevent_make_peer_addr_change() local
264 event = sctp_ulpevent_new(sizeof(struct sctp_paddr_change), sctp_ulpevent_make_peer_addr_change()
266 if (!event) sctp_ulpevent_make_peer_addr_change()
269 skb = sctp_event2skb(event); sctp_ulpevent_make_peer_addr_change()
306 * event that happened to the address. sctp_ulpevent_make_peer_addr_change()
330 sctp_ulpevent_set_owner(event, asoc); sctp_ulpevent_make_peer_addr_change()
348 return event; sctp_ulpevent_make_peer_addr_change()
365 * included in a SCTP_REMOTE_ERROR event. Please refer to the SCTP
374 struct sctp_ulpevent *event; sctp_ulpevent_make_remote_error() local
398 /* Embed the event fields inside the cloned skb. */ sctp_ulpevent_make_remote_error()
399 event = sctp_skb2event(skb); sctp_ulpevent_make_remote_error()
400 sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize); sctp_ulpevent_make_remote_error()
413 sctp_ulpevent_set_owner(event, asoc); sctp_ulpevent_make_remote_error()
416 return event; sctp_ulpevent_make_remote_error()
430 struct sctp_ulpevent *event; sctp_ulpevent_make_send_failed() local
449 /* Embed the event fields inside the cloned skb. */ sctp_ulpevent_make_send_failed()
450 event = sctp_skb2event(skb); sctp_ulpevent_make_send_failed()
451 sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize); sctp_ulpevent_make_send_failed()
522 sctp_ulpevent_set_owner(event, asoc); sctp_ulpevent_make_send_failed()
524 return event; sctp_ulpevent_make_send_failed()
539 struct sctp_ulpevent *event; sctp_ulpevent_make_shutdown_event() local
543 event = sctp_ulpevent_new(sizeof(struct sctp_shutdown_event), sctp_ulpevent_make_shutdown_event()
545 if (!event) sctp_ulpevent_make_shutdown_event()
548 skb = sctp_event2skb(event); sctp_ulpevent_make_shutdown_event()
585 sctp_ulpevent_set_owner(event, asoc); sctp_ulpevent_make_shutdown_event()
588 return event; sctp_ulpevent_make_shutdown_event()
602 struct sctp_ulpevent *event; sctp_ulpevent_make_adaptation_indication() local
606 event = sctp_ulpevent_new(sizeof(struct sctp_adaptation_event), sctp_ulpevent_make_adaptation_indication()
608 if (!event) sctp_ulpevent_make_adaptation_indication()
611 skb = sctp_event2skb(event); sctp_ulpevent_make_adaptation_indication()
619 sctp_ulpevent_set_owner(event, asoc); sctp_ulpevent_make_adaptation_indication()
622 return event; sctp_ulpevent_make_adaptation_indication()
639 struct sctp_ulpevent *event = NULL; sctp_ulpevent_make_rcvmsg() local
693 /* Embed the event fields inside the cloned skb. */ sctp_ulpevent_make_rcvmsg()
694 event = sctp_skb2event(skb); sctp_ulpevent_make_rcvmsg()
696 /* Initialize event with flags 0 and correct length sctp_ulpevent_make_rcvmsg()
700 sctp_ulpevent_init(event, 0, skb->len + sizeof(struct sk_buff)); sctp_ulpevent_make_rcvmsg()
702 sctp_ulpevent_receive_data(event, asoc); sctp_ulpevent_make_rcvmsg()
704 event->stream = ntohs(chunk->subh.data_hdr->stream); sctp_ulpevent_make_rcvmsg()
705 event->ssn = ntohs(chunk->subh.data_hdr->ssn); sctp_ulpevent_make_rcvmsg()
706 event->ppid = chunk->subh.data_hdr->ppid; sctp_ulpevent_make_rcvmsg()
708 event->flags |= SCTP_UNORDERED; sctp_ulpevent_make_rcvmsg()
709 event->cumtsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map); sctp_ulpevent_make_rcvmsg()
711 event->tsn = ntohl(chunk->subh.data_hdr->tsn); sctp_ulpevent_make_rcvmsg()
712 event->msg_flags |= chunk->chunk_hdr->flags; sctp_ulpevent_make_rcvmsg()
713 event->iif = sctp_chunk_iif(chunk); sctp_ulpevent_make_rcvmsg()
715 return event; sctp_ulpevent_make_rcvmsg()
723 /* Create a partial delivery related event.
735 struct sctp_ulpevent *event; sctp_ulpevent_make_pdapi() local
739 event = sctp_ulpevent_new(sizeof(struct sctp_pdapi_event), sctp_ulpevent_make_pdapi()
741 if (!event) sctp_ulpevent_make_pdapi()
744 skb = sctp_event2skb(event); sctp_ulpevent_make_pdapi()
775 sctp_ulpevent_set_owner(event, asoc); sctp_ulpevent_make_pdapi()
778 return event; sctp_ulpevent_make_pdapi()
787 struct sctp_ulpevent *event; sctp_ulpevent_make_authkey() local
791 event = sctp_ulpevent_new(sizeof(struct sctp_authkey_event), sctp_ulpevent_make_authkey()
793 if (!event) sctp_ulpevent_make_authkey()
796 skb = sctp_event2skb(event); sctp_ulpevent_make_authkey()
811 sctp_ulpevent_set_owner(event, asoc); sctp_ulpevent_make_authkey()
814 return event; sctp_ulpevent_make_authkey()
826 struct sctp_ulpevent *event; sctp_ulpevent_make_sender_dry_event() local
830 event = sctp_ulpevent_new(sizeof(struct sctp_sender_dry_event), sctp_ulpevent_make_sender_dry_event()
832 if (!event) sctp_ulpevent_make_sender_dry_event()
835 skb = sctp_event2skb(event); sctp_ulpevent_make_sender_dry_event()
842 sctp_ulpevent_set_owner(event, asoc); sctp_ulpevent_make_sender_dry_event()
845 return event; sctp_ulpevent_make_sender_dry_event()
849 * event.
851 __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event) sctp_ulpevent_get_notification_type() argument
856 skb = sctp_event2skb(event); sctp_ulpevent_get_notification_type()
864 void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event, sctp_ulpevent_read_sndrcvinfo() argument
869 if (sctp_ulpevent_is_notification(event)) sctp_ulpevent_read_sndrcvinfo()
873 sinfo.sinfo_stream = event->stream; sctp_ulpevent_read_sndrcvinfo()
874 sinfo.sinfo_ssn = event->ssn; sctp_ulpevent_read_sndrcvinfo()
875 sinfo.sinfo_ppid = event->ppid; sctp_ulpevent_read_sndrcvinfo()
876 sinfo.sinfo_flags = event->flags; sctp_ulpevent_read_sndrcvinfo()
877 sinfo.sinfo_tsn = event->tsn; sctp_ulpevent_read_sndrcvinfo()
878 sinfo.sinfo_cumtsn = event->cumtsn; sctp_ulpevent_read_sndrcvinfo()
879 sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc); sctp_ulpevent_read_sndrcvinfo()
881 sinfo.sinfo_context = event->asoc->default_rcv_context; sctp_ulpevent_read_sndrcvinfo()
892 void sctp_ulpevent_read_rcvinfo(const struct sctp_ulpevent *event, sctp_ulpevent_read_rcvinfo() argument
897 if (sctp_ulpevent_is_notification(event)) sctp_ulpevent_read_rcvinfo()
901 rinfo.rcv_sid = event->stream; sctp_ulpevent_read_rcvinfo()
902 rinfo.rcv_ssn = event->ssn; sctp_ulpevent_read_rcvinfo()
903 rinfo.rcv_ppid = event->ppid; sctp_ulpevent_read_rcvinfo()
904 rinfo.rcv_flags = event->flags; sctp_ulpevent_read_rcvinfo()
905 rinfo.rcv_tsn = event->tsn; sctp_ulpevent_read_rcvinfo()
906 rinfo.rcv_cumtsn = event->cumtsn; sctp_ulpevent_read_rcvinfo()
907 rinfo.rcv_assoc_id = sctp_assoc2id(event->asoc); sctp_ulpevent_read_rcvinfo()
908 rinfo.rcv_context = event->asoc->default_rcv_context; sctp_ulpevent_read_rcvinfo()
917 static void __sctp_ulpevent_read_nxtinfo(const struct sctp_ulpevent *event, __sctp_ulpevent_read_nxtinfo() argument
924 nxtinfo.nxt_sid = event->stream; __sctp_ulpevent_read_nxtinfo()
925 nxtinfo.nxt_ppid = event->ppid; __sctp_ulpevent_read_nxtinfo()
926 nxtinfo.nxt_flags = event->flags; __sctp_ulpevent_read_nxtinfo()
927 if (sctp_ulpevent_is_notification(event)) __sctp_ulpevent_read_nxtinfo()
930 nxtinfo.nxt_assoc_id = sctp_assoc2id(event->asoc); __sctp_ulpevent_read_nxtinfo()
936 void sctp_ulpevent_read_nxtinfo(const struct sctp_ulpevent *event, sctp_ulpevent_read_nxtinfo() argument
955 static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event, sctp_ulpevent_receive_data() argument
960 skb = sctp_event2skb(event); sctp_ulpevent_receive_data()
962 sctp_ulpevent_set_owner(event, asoc); sctp_ulpevent_receive_data()
968 /* Note: Not clearing the entire event struct as this is just a sctp_ulpevent_receive_data()
969 * fragment of the real event. However, we still need to do rwnd sctp_ulpevent_receive_data()
981 static void sctp_ulpevent_release_data(struct sctp_ulpevent *event) sctp_ulpevent_release_data() argument
993 skb = sctp_event2skb(event); sctp_ulpevent_release_data()
1009 sctp_assoc_rwnd_increase(event->asoc, len);
1010 sctp_ulpevent_release_owner(event);
1013 static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event) sctp_ulpevent_release_frag_data() argument
1017 skb = sctp_event2skb(event); sctp_ulpevent_release_frag_data()
1032 sctp_ulpevent_release_owner(event);
1036 * to the owner, updating the rwnd in case of a DATA event and freeing the
1039 void sctp_ulpevent_free(struct sctp_ulpevent *event) sctp_ulpevent_free() argument
1041 if (sctp_ulpevent_is_notification(event)) sctp_ulpevent_free()
1042 sctp_ulpevent_release_owner(event); sctp_ulpevent_free()
1044 sctp_ulpevent_release_data(event); sctp_ulpevent_free()
1046 kfree_skb(sctp_event2skb(event)); sctp_ulpevent_free()
1056 struct sctp_ulpevent *event = sctp_skb2event(skb); sctp_queue_purge_ulpevents() local
1058 if (!sctp_ulpevent_is_notification(event)) sctp_queue_purge_ulpevents()
1061 sctp_ulpevent_free(event); sctp_queue_purge_ulpevents()
H A Dulpqueue.c74 struct sctp_ulpevent *event; sctp_ulpq_flush() local
77 event = sctp_skb2event(skb); sctp_ulpq_flush()
78 sctp_ulpevent_free(event); sctp_ulpq_flush()
82 event = sctp_skb2event(skb); sctp_ulpq_flush()
83 sctp_ulpevent_free(event); sctp_ulpq_flush()
99 struct sctp_ulpevent *event; sctp_ulpq_tail_data() local
102 /* Create an event from the incoming chunk. */ sctp_ulpq_tail_data()
103 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp); sctp_ulpq_tail_data()
104 if (!event) sctp_ulpq_tail_data()
108 event = sctp_ulpq_reasm(ulpq, event); sctp_ulpq_tail_data()
111 if ((event) && (event->msg_flags & MSG_EOR)) { sctp_ulpq_tail_data()
114 __skb_queue_tail(&temp, sctp_event2skb(event)); sctp_ulpq_tail_data()
116 event = sctp_ulpq_order(ulpq, event); sctp_ulpq_tail_data()
119 /* Send event to the ULP. 'event' is the sctp_ulpevent for sctp_ulpq_tail_data()
122 if (event) { sctp_ulpq_tail_data()
123 event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0; sctp_ulpq_tail_data()
124 sctp_ulpq_tail_event(ulpq, event); sctp_ulpq_tail_data()
130 /* Add a new event for propagation to the ULP. */
157 struct sctp_ulpevent *event; sctp_clear_pd() local
160 event = sctp_skb2event(skb); sctp_clear_pd()
161 if (event->asoc == asoc) { sctp_clear_pd()
190 /* If the SKB of 'event' is on a list, it is the first such member
193 int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) sctp_ulpq_tail_event() argument
197 struct sk_buff *skb = sctp_event2skb(event); sctp_ulpq_tail_event()
208 if (!sctp_ulpevent_is_notification(event)) { sctp_ulpq_tail_event()
212 /* Check if the user wishes to receive this event. */ sctp_ulpq_tail_event()
213 if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe)) sctp_ulpq_tail_event()
230 if ((event->msg_flags & MSG_NOTIFICATION) || sctp_ulpq_tail_event()
232 (event->msg_flags & SCTP_DATA_FRAG_MASK))) sctp_ulpq_tail_event()
235 clear_pd = event->msg_flags & MSG_EOR; sctp_ulpq_tail_event()
274 sctp_ulpevent_free(event); sctp_ulpq_tail_event()
283 struct sctp_ulpevent *event) sctp_ulpq_store_reasm()
289 tsn = event->tsn; sctp_ulpq_store_reasm()
294 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); sctp_ulpq_store_reasm()
302 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); sctp_ulpq_store_reasm()
316 __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event)); sctp_ulpq_store_reasm()
320 /* Helper function to return an event corresponding to the reassembled
333 struct sctp_ulpevent *event; sctp_make_reassembled_event() local
397 event = sctp_skb2event(f_frag); sctp_make_reassembled_event()
400 return event; sctp_make_reassembled_event()
405 * missing fragment in a SCTP datagram and return the corresponding event.
570 /* We have the reassembled event. There is no need to look sctp_ulpq_retrieve_partial()
587 struct sctp_ulpevent *event) sctp_ulpq_reasm()
592 if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) { sctp_ulpq_reasm()
593 event->msg_flags |= MSG_EOR; sctp_ulpq_reasm()
594 return event; sctp_ulpq_reasm()
597 sctp_ulpq_store_reasm(ulpq, event); sctp_ulpq_reasm()
606 ctsn = event->tsn; sctp_ulpq_reasm()
671 /* We have the reassembled event. There is no need to look sctp_ulpq_retrieve_first()
690 * In the event that the receiver has invoked the partial delivery API,
697 struct sctp_ulpevent *event; sctp_ulpq_reasm_flushtsn() local
704 event = sctp_skb2event(pos); sctp_ulpq_reasm_flushtsn()
705 tsn = event->tsn; sctp_ulpq_reasm_flushtsn()
714 sctp_ulpevent_free(event); sctp_ulpq_reasm_flushtsn()
727 struct sctp_ulpevent *event = NULL; sctp_ulpq_reasm_drain() local
733 while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) { sctp_ulpq_reasm_drain()
735 if ((event) && (event->msg_flags & MSG_EOR)) { sctp_ulpq_reasm_drain()
737 __skb_queue_tail(&temp, sctp_event2skb(event)); sctp_ulpq_reasm_drain()
739 event = sctp_ulpq_order(ulpq, event); sctp_ulpq_reasm_drain()
742 /* Send event to the ULP. 'event' is the sctp_ulpq_reasm_drain()
745 if (event) sctp_ulpq_reasm_drain()
746 sctp_ulpq_tail_event(ulpq, event); sctp_ulpq_reasm_drain()
755 struct sctp_ulpevent *event) sctp_ulpq_retrieve_ordered()
763 sid = event->stream; sctp_ulpq_retrieve_ordered()
766 event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev; sctp_ulpq_retrieve_ordered()
790 /* Attach all gathered skbs to the event. */ sctp_ulpq_retrieve_ordered()
797 struct sctp_ulpevent *event) sctp_ulpq_store_ordered()
806 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); sctp_ulpq_store_ordered()
810 sid = event->stream; sctp_ulpq_store_ordered()
811 ssn = event->ssn; sctp_ulpq_store_ordered()
817 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); sctp_ulpq_store_ordered()
822 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); sctp_ulpq_store_ordered()
842 __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event)); sctp_ulpq_store_ordered()
846 struct sctp_ulpevent *event) sctp_ulpq_order()
852 if (SCTP_DATA_UNORDERED & event->msg_flags) sctp_ulpq_order()
853 return event; sctp_ulpq_order()
856 sid = event->stream; sctp_ulpq_order()
857 ssn = event->ssn; sctp_ulpq_order()
865 sctp_ulpq_store_ordered(ulpq, event); sctp_ulpq_order()
875 sctp_ulpq_retrieve_ordered(ulpq, event); sctp_ulpq_order()
877 return event; sctp_ulpq_order()
887 struct sctp_ulpevent *event; sctp_ulpq_reap_ordered() local
897 event = NULL; sctp_skb_for_each()
916 if (!event) sctp_skb_for_each()
918 event = sctp_skb2event(pos); sctp_skb_for_each()
920 /* Attach all gathered skbs to the event. */ sctp_skb_for_each()
927 if (event == NULL && pos != (struct sk_buff *)lobby) {
936 event = sctp_skb2event(pos);
940 /* Send event to the ULP. 'event' is the sctp_ulpevent for
943 if (event) {
945 sctp_ulpq_retrieve_ordered(ulpq, event);
946 sctp_ulpq_tail_event(ulpq, event);
979 struct sctp_ulpevent *event; sctp_ulpq_renege_list() local
985 event = sctp_skb2event(skb); sctp_ulpq_renege_list()
986 tsn = event->tsn; sctp_ulpq_renege_list()
1007 /* Unlink the event, then renege all applicable TSNs. */ sctp_ulpq_renege_list()
1009 sctp_ulpevent_free(event); sctp_ulpq_renege_list()
1037 struct sctp_ulpevent *event; sctp_ulpq_partial_delivery() local
1069 event = sctp_ulpq_retrieve_first(ulpq); sctp_ulpq_partial_delivery()
1070 /* Send event to the ULP. */ sctp_ulpq_partial_delivery()
1071 if (event) { sctp_ulpq_partial_delivery()
1072 sctp_ulpq_tail_event(ulpq, event); sctp_ulpq_partial_delivery()
282 sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) sctp_ulpq_store_reasm() argument
586 sctp_ulpq_reasm(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) sctp_ulpq_reasm() argument
754 sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) sctp_ulpq_retrieve_ordered() argument
796 sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) sctp_ulpq_store_ordered() argument
845 sctp_ulpq_order(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) sctp_ulpq_order() argument
/linux-4.1.27/drivers/acpi/acpica/
H A Devxfevnt.c3 * Module Name: evxfevnt - External Interfaces, ACPI event disable/enable
168 * PARAMETERS: event - The fixed eventto be enabled ACPI_EXPORT_SYMBOL()
173 * DESCRIPTION: Enable an ACPI event (fixed) ACPI_EXPORT_SYMBOL()
176 acpi_status acpi_enable_event(u32 event, u32 flags) ACPI_EXPORT_SYMBOL()
185 if (event > ACPI_EVENT_MAX) { ACPI_EXPORT_SYMBOL()
190 * Enable the requested fixed event (by writing a one to the enable ACPI_EXPORT_SYMBOL()
194 acpi_write_bit_register(acpi_gbl_fixed_event_info[event]. ACPI_EXPORT_SYMBOL()
203 acpi_read_bit_register(acpi_gbl_fixed_event_info[event]. ACPI_EXPORT_SYMBOL()
211 "Could not enable %s event", ACPI_EXPORT_SYMBOL()
212 acpi_ut_get_event_name(event))); ACPI_EXPORT_SYMBOL()
225 * PARAMETERS: event - The fixed event to be disabled ACPI_EXPORT_SYMBOL()
230 * DESCRIPTION: Disable an ACPI event (fixed) ACPI_EXPORT_SYMBOL()
233 acpi_status acpi_disable_event(u32 event, u32 flags) ACPI_EXPORT_SYMBOL()
242 if (event > ACPI_EVENT_MAX) { ACPI_EXPORT_SYMBOL()
247 * Disable the requested fixed event (by writing a zero to the enable ACPI_EXPORT_SYMBOL()
251 acpi_write_bit_register(acpi_gbl_fixed_event_info[event]. ACPI_EXPORT_SYMBOL()
258 acpi_read_bit_register(acpi_gbl_fixed_event_info[event]. ACPI_EXPORT_SYMBOL()
267 acpi_ut_get_event_name(event))); ACPI_EXPORT_SYMBOL()
280 * PARAMETERS: event - The fixed event to be cleared ACPI_EXPORT_SYMBOL()
284 * DESCRIPTION: Clear an ACPI event (fixed) ACPI_EXPORT_SYMBOL()
287 acpi_status acpi_clear_event(u32 event) ACPI_EXPORT_SYMBOL()
295 if (event > ACPI_EVENT_MAX) { ACPI_EXPORT_SYMBOL()
300 * Clear the requested fixed event (By writing a one to the status ACPI_EXPORT_SYMBOL()
304 acpi_write_bit_register(acpi_gbl_fixed_event_info[event]. ACPI_EXPORT_SYMBOL()
316 * PARAMETERS: event - The fixed event ACPI_EXPORT_SYMBOL()
317 * event_status - Where the current status of the event will ACPI_EXPORT_SYMBOL()
322 * DESCRIPTION: Obtains and returns the current status of the event ACPI_EXPORT_SYMBOL()
325 acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status) ACPI_EXPORT_SYMBOL()
339 if (event > ACPI_EVENT_MAX) { ACPI_EXPORT_SYMBOL()
343 /* Fixed event currently can be dispatched? */ ACPI_EXPORT_SYMBOL()
345 if (acpi_gbl_fixed_event_handlers[event].handler) { ACPI_EXPORT_SYMBOL()
349 /* Fixed event currently enabled? */ ACPI_EXPORT_SYMBOL()
352 acpi_read_bit_register(acpi_gbl_fixed_event_info[event]. ACPI_EXPORT_SYMBOL()
363 /* Fixed event currently active? */ ACPI_EXPORT_SYMBOL()
366 acpi_read_bit_register(acpi_gbl_fixed_event_info[event]. ACPI_EXPORT_SYMBOL()
H A Devevent.c54 static u32 acpi_ev_fixed_event_dispatch(u32 event);
156 * DESCRIPTION: Install the fixed event handlers and disable all fixed events.
166 * Initialize the structure that keeps track of fixed event handlers and acpi_ev_fixed_event_initialize()
173 /* Disable the fixed event */ acpi_ev_fixed_event_initialize()
226 /* Both the status and enable bits must be on for this event */ acpi_ev_fixed_event_detect()
233 * Found an active (signalled) event. Invoke global event acpi_ev_fixed_event_detect()
254 * PARAMETERS: event - Event type
258 * DESCRIPTION: Clears the status bit for the requested event, calls the
259 * handler that previously registered for the event.
260 * NOTE: If there is no handler for the event, the event is
265 static u32 acpi_ev_fixed_event_dispatch(u32 event) acpi_ev_fixed_event_dispatch() argument
272 (void)acpi_write_bit_register(acpi_gbl_fixed_event_info[event]. acpi_ev_fixed_event_dispatch()
277 * and disable the event to prevent further interrupts. acpi_ev_fixed_event_dispatch()
279 if (!acpi_gbl_fixed_event_handlers[event].handler) { acpi_ev_fixed_event_dispatch()
280 (void)acpi_write_bit_register(acpi_gbl_fixed_event_info[event]. acpi_ev_fixed_event_dispatch()
285 "No installed handler for fixed event - %s (%u), disabling", acpi_ev_fixed_event_dispatch()
286 acpi_ut_get_event_name(event), event)); acpi_ev_fixed_event_dispatch()
293 return ((acpi_gbl_fixed_event_handlers[event]. acpi_ev_fixed_event_dispatch()
294 handler) (acpi_gbl_fixed_event_handlers[event].context)); acpi_ev_fixed_event_dispatch()
/linux-4.1.27/sound/core/seq/
H A DMakefile12 snd-seq-midi-event-objs := seq_midi_event.o
18 obj-$(CONFIG_SND_SEQUENCER) += snd-seq-midi-event.o
24 obj-$(CONFIG_SND_VIRMIDI) += snd-seq-virmidi.o snd-seq-midi-event.o
25 obj-$(CONFIG_SND_RAWMIDI_SEQ) += snd-seq-midi.o snd-seq-midi-event.o
26 obj-$(CONFIG_SND_OPL3_LIB_SEQ) += snd-seq-midi-event.o snd-seq-midi-emul.o
27 obj-$(CONFIG_SND_OPL4_LIB_SEQ) += snd-seq-midi-event.o snd-seq-midi-emul.o
H A Dseq_memory.c46 * Variable length event:
47 * The event like sysex uses variable length type.
54 * When an event is generated via read(), the external data is
59 * When the variable length event is enqueued (in prioq or fifo),
71 static int get_var_len(const struct snd_seq_event *event) get_var_len() argument
73 if ((event->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE) get_var_len()
76 return event->data.ext.len & ~SNDRV_SEQ_EXT_MASK; get_var_len()
79 int snd_seq_dump_var_event(const struct snd_seq_event *event, snd_seq_dump_var_event() argument
85 if ((len = get_var_len(event)) <= 0) snd_seq_dump_var_event()
88 if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) { snd_seq_dump_var_event()
90 char __user *curptr = (char __force __user *)event->data.ext.ptr; snd_seq_dump_var_event()
105 if (!(event->data.ext.len & SNDRV_SEQ_EXT_CHAINED)) snd_seq_dump_var_event()
106 return func(private_data, event->data.ext.ptr, len); snd_seq_dump_var_event()
108 cell = (struct snd_seq_event_cell *)event->data.ext.ptr; snd_seq_dump_var_event()
113 err = func(private_data, &cell->event, size); snd_seq_dump_var_event()
126 * expand the variable length event to linear buffer space.
144 int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char *buf, snd_seq_expand_var_event() argument
150 if ((len = get_var_len(event)) < 0) snd_seq_expand_var_event()
158 if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) { snd_seq_expand_var_event()
161 if (copy_from_user(buf, (void __force __user *)event->data.ext.ptr, len)) snd_seq_expand_var_event()
165 err = snd_seq_dump_var_event(event, snd_seq_expand_var_event()
199 if (snd_seq_ev_is_variable(&cell->event)) { snd_seq_cell_free()
200 if (cell->event.data.ext.len & SNDRV_SEQ_EXT_CHAINED) { snd_seq_cell_free()
202 curp = cell->event.data.ext.ptr; snd_seq_cell_free()
220 * allocate an event cell.
285 * duplicate the event to a cell.
286 * if the event has external data, the data is decomposed to additional
289 int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event, snd_seq_event_dup() argument
301 if (snd_seq_ev_is_variable(event)) { snd_seq_event_dup()
302 extlen = event->data.ext.len & ~SNDRV_SEQ_EXT_MASK; snd_seq_event_dup()
312 /* copy the event */ snd_seq_event_dup()
313 cell->event = *event; snd_seq_event_dup()
316 if (snd_seq_ev_is_variable(event)) { snd_seq_event_dup()
318 int is_chained = event->data.ext.len & SNDRV_SEQ_EXT_CHAINED; snd_seq_event_dup()
319 int is_usrptr = event->data.ext.len & SNDRV_SEQ_EXT_USRPTR; snd_seq_event_dup()
323 cell->event.data.ext.len = extlen | SNDRV_SEQ_EXT_CHAINED; snd_seq_event_dup()
324 cell->event.data.ext.ptr = NULL; snd_seq_event_dup()
326 src = (struct snd_seq_event_cell *)event->data.ext.ptr; snd_seq_event_dup()
327 buf = (char *)event->data.ext.ptr; snd_seq_event_dup()
337 if (cell->event.data.ext.ptr == NULL) snd_seq_event_dup()
338 cell->event.data.ext.ptr = tmp; snd_seq_event_dup()
344 tmp->event = src->event; snd_seq_event_dup()
347 if (copy_from_user(&tmp->event, (char __force __user *)buf, size)) { snd_seq_event_dup()
352 memcpy(&tmp->event, buf, size); snd_seq_event_dup()
H A Dseq_clientmgr.c82 struct snd_seq_event *event,
85 struct snd_seq_event *event,
394 * -EINVAL no enough user-space buffer to write the whole event
438 if (snd_seq_ev_is_variable(&cell->event)) { snd_seq_read()
440 tmpev = cell->event; snd_seq_read()
448 err = snd_seq_expand_var_event(&cell->event, count, snd_seq_read()
457 if (copy_to_user(buf, &cell->event, sizeof(struct snd_seq_event))) { snd_seq_read()
495 static struct snd_seq_client *get_event_dest_client(struct snd_seq_event *event, get_event_dest_client() argument
500 dest = snd_seq_client_use_ptr(event->dest.client); get_event_dest_client()
506 ! test_bit(event->type, dest->event_filter)) get_event_dest_client()
519 * Return the error event.
521 * If the receiver client is a user client, the original event is
522 * encapsulated in SNDRV_SEQ_EVENT_BOUNCE as variable length event. If
523 * the original event is also variable length, the external data is
524 * copied after the event record.
525 * If the receiver client is a kernel client, the original event is
530 struct snd_seq_event *event, bounce_error_event()
549 bounce_ev.dest.port = event->source.port; bounce_error_event()
550 bounce_ev.data.quote.origin = event->dest; bounce_error_event()
551 bounce_ev.data.quote.event = event; bounce_error_event()
564 * rewrite the time-stamp of the event record with the curren time
568 static int update_timestamp_of_queue(struct snd_seq_event *event, update_timestamp_of_queue() argument
576 event->queue = queue; update_timestamp_of_queue()
577 event->flags &= ~SNDRV_SEQ_TIME_STAMP_MASK; update_timestamp_of_queue()
579 event->time.time = snd_seq_timer_get_cur_time(q->timer); update_timestamp_of_queue()
580 event->flags |= SNDRV_SEQ_TIME_STAMP_REAL; update_timestamp_of_queue()
582 event->time.tick = snd_seq_timer_get_cur_tick(q->timer); update_timestamp_of_queue()
583 event->flags |= SNDRV_SEQ_TIME_STAMP_TICK; update_timestamp_of_queue()
591 * deliver an event to the specified destination.
598 struct snd_seq_event *event, snd_seq_deliver_single_event()
606 direct = snd_seq_ev_is_direct(event); snd_seq_deliver_single_event()
608 dest = get_event_dest_client(event, filter); snd_seq_deliver_single_event()
611 dest_port = snd_seq_port_use_ptr(dest, event->dest.port); snd_seq_deliver_single_event()
622 update_timestamp_of_queue(event, dest_port->time_queue, snd_seq_deliver_single_event()
628 result = snd_seq_fifo_event_in(dest->data.user.fifo, event); snd_seq_deliver_single_event()
634 result = dest_port->event_input(event, direct, snd_seq_deliver_single_event()
649 result = bounce_error_event(client, event, result, atomic, hop); snd_seq_deliver_single_event()
656 * send the event to all subscribers:
659 struct snd_seq_event *event, deliver_to_subscribers()
668 src_port = snd_seq_port_use_ptr(client, event->source.port); deliver_to_subscribers()
671 /* save original event record */ deliver_to_subscribers()
672 event_saved = *event; deliver_to_subscribers()
684 event->dest = subs->info.dest; deliver_to_subscribers()
687 update_timestamp_of_queue(event, subs->info.queue, deliver_to_subscribers()
689 err = snd_seq_deliver_single_event(client, event, deliver_to_subscribers()
698 /* restore original event record */ deliver_to_subscribers()
699 *event = event_saved; deliver_to_subscribers()
705 *event = event_saved; /* restore */ deliver_to_subscribers()
716 struct snd_seq_event *event, port_broadcast_event()
723 dest_client = get_event_dest_client(event, SNDRV_SEQ_FILTER_BROADCAST); port_broadcast_event()
729 event->dest.port = port->addr.port; port_broadcast_event()
731 err = snd_seq_deliver_single_event(NULL, event, port_broadcast_event()
744 event->dest.port = SNDRV_SEQ_ADDRESS_BROADCAST; /* restore */ port_broadcast_event()
749 * send the event to all clients:
753 struct snd_seq_event *event, int atomic, int hop) broadcast_event()
759 addr = event->dest; /* save */ broadcast_event()
765 event->dest.client = dest; broadcast_event()
766 event->dest.port = addr.port; broadcast_event()
768 err = port_broadcast_event(client, event, atomic, hop); broadcast_event()
771 err = snd_seq_deliver_single_event(NULL, event, broadcast_event()
782 event->dest = addr; /* restore */ broadcast_event()
788 static int multicast_event(struct snd_seq_client *client, struct snd_seq_event *event, multicast_event() argument
797 /* deliver an event to the destination port(s).
798 * if the event is to subscribers or broadcast, the event is dispatched
802 * n == 0 : the event was not passed to any client.
803 * n < 0 : error - event was not processed.
805 static int snd_seq_deliver_event(struct snd_seq_client *client, struct snd_seq_event *event, snd_seq_deliver_event() argument
813 event->source.client, event->source.port, snd_seq_deliver_event()
814 event->dest.client, event->dest.port); snd_seq_deliver_event()
818 if (event->queue == SNDRV_SEQ_ADDRESS_SUBSCRIBERS || snd_seq_deliver_event()
819 event->dest.client == SNDRV_SEQ_ADDRESS_SUBSCRIBERS) snd_seq_deliver_event()
820 result = deliver_to_subscribers(client, event, atomic, hop); snd_seq_deliver_event()
822 else if (event->queue == SNDRV_SEQ_ADDRESS_BROADCAST || snd_seq_deliver_event()
823 event->dest.client == SNDRV_SEQ_ADDRESS_BROADCAST) snd_seq_deliver_event()
824 result = broadcast_event(client, event, atomic, hop); snd_seq_deliver_event()
825 else if (event->dest.client >= SNDRV_SEQ_MAX_CLIENTS) snd_seq_deliver_event()
826 result = multicast_event(client, event, atomic, hop); snd_seq_deliver_event()
827 else if (event->dest.port == SNDRV_SEQ_ADDRESS_BROADCAST) snd_seq_deliver_event()
828 result = port_broadcast_event(client, event, atomic, hop); snd_seq_deliver_event()
831 result = snd_seq_deliver_single_event(client, event, 0, atomic, hop); snd_seq_deliver_event()
837 * dispatch an event cell:
840 * The event cell shall be released or re-queued in this function.
843 * n == 0 : the event was not passed to any client.
844 * n < 0 : error - event was not processed.
854 client = snd_seq_client_use_ptr(cell->event.source.client); snd_seq_dispatch_event()
860 if (cell->event.type == SNDRV_SEQ_EVENT_NOTE) { snd_seq_dispatch_event()
861 /* NOTE event: snd_seq_dispatch_event()
862 * the event cell is re-used as a NOTE-OFF event and snd_seq_dispatch_event()
867 /* reserve this event to enqueue note-off later */ snd_seq_dispatch_event()
868 tmpev = cell->event; snd_seq_dispatch_event()
873 * This was originally a note event. We now re-use the snd_seq_dispatch_event()
874 * cell for the note-off event. snd_seq_dispatch_event()
877 ev = &cell->event; snd_seq_dispatch_event()
896 /* Now queue this cell as the note off event */ snd_seq_dispatch_event()
902 * event cell is freed after processing the event snd_seq_dispatch_event()
905 result = snd_seq_deliver_event(client, &cell->event, atomic, hop); snd_seq_dispatch_event()
919 struct snd_seq_event *event, snd_seq_client_enqueue_event()
927 if (event->queue == SNDRV_SEQ_ADDRESS_SUBSCRIBERS) { snd_seq_client_enqueue_event()
928 event->dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS; snd_seq_client_enqueue_event()
929 event->queue = SNDRV_SEQ_QUEUE_DIRECT; snd_seq_client_enqueue_event()
932 if (event->queue == SNDRV_SEQ_ADDRESS_BROADCAST) { snd_seq_client_enqueue_event()
933 event->dest.client = SNDRV_SEQ_ADDRESS_BROADCAST; snd_seq_client_enqueue_event()
934 event->queue = SNDRV_SEQ_QUEUE_DIRECT; snd_seq_client_enqueue_event()
937 if (event->dest.client == SNDRV_SEQ_ADDRESS_SUBSCRIBERS) { snd_seq_client_enqueue_event()
939 struct snd_seq_client_port *src_port = snd_seq_port_use_ptr(client, event->source.port); snd_seq_client_enqueue_event()
945 /* direct event processing without enqueued */ snd_seq_client_enqueue_event()
946 if (snd_seq_ev_is_direct(event)) { snd_seq_client_enqueue_event()
947 if (event->type == SNDRV_SEQ_EVENT_NOTE) snd_seq_client_enqueue_event()
948 return -EINVAL; /* this event must be enqueued! */ snd_seq_client_enqueue_event()
949 return snd_seq_deliver_event(client, event, atomic, hop); snd_seq_client_enqueue_event()
953 if (snd_seq_queue_is_used(event->queue, client->number) <= 0) snd_seq_client_enqueue_event()
958 /* allocate an event cell */ snd_seq_client_enqueue_event()
959 err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic, file); snd_seq_client_enqueue_event()
974 * check validity of event type and data length.
1003 * -EINVAL invalid event
1015 struct snd_seq_event event; snd_seq_write() local
1035 /* Read in the event header from the user */ snd_seq_write()
1036 len = sizeof(event); snd_seq_write()
1037 if (copy_from_user(&event, buf, len)) { snd_seq_write()
1041 event.source.client = client->number; /* fill in client number */ snd_seq_write()
1043 if (check_event_type_and_length(&event)) { snd_seq_write()
1049 if (event.type == SNDRV_SEQ_EVENT_NONE) snd_seq_write()
1051 else if (snd_seq_ev_is_reserved(&event)) { snd_seq_write()
1056 if (snd_seq_ev_is_variable(&event)) { snd_seq_write()
1057 int extlen = event.data.ext.len & ~SNDRV_SEQ_EXT_MASK; snd_seq_write()
1064 event.data.ext.len = extlen | SNDRV_SEQ_EXT_USRPTR; snd_seq_write()
1065 event.data.ext.ptr = (char __force *)buf snd_seq_write()
1070 if (client->convert32 && snd_seq_ev_is_varusr(&event)) { snd_seq_write()
1071 void *ptr = (void __force *)compat_ptr(event.data.raw32.d[1]); snd_seq_write()
1072 event.data.ext.ptr = ptr; snd_seq_write()
1078 err = snd_seq_client_enqueue_event(client, &event, file, snd_seq_write()
1421 * send an subscription notify event to user client:
1428 struct snd_seq_event event; snd_seq_client_notify_subscription() local
1430 memset(&event, 0, sizeof(event)); snd_seq_client_notify_subscription()
1431 event.type = evtype; snd_seq_client_notify_subscription()
1432 event.data.connect.dest = info->dest; snd_seq_client_notify_subscription()
1433 event.data.connect.sender = info->sender; snd_seq_client_notify_subscription()
1435 return snd_seq_system_notify(client, port, &event); /* non-atomic */ snd_seq_client_notify_subscription()
2310 /* skeleton to enqueue event, called from snd_seq_kernel_client_enqueue
529 bounce_error_event(struct snd_seq_client *client, struct snd_seq_event *event, int err, int atomic, int hop) bounce_error_event() argument
597 snd_seq_deliver_single_event(struct snd_seq_client *client, struct snd_seq_event *event, int filter, int atomic, int hop) snd_seq_deliver_single_event() argument
658 deliver_to_subscribers(struct snd_seq_client *client, struct snd_seq_event *event, int atomic, int hop) deliver_to_subscribers() argument
715 port_broadcast_event(struct snd_seq_client *client, struct snd_seq_event *event, int atomic, int hop) port_broadcast_event() argument
752 broadcast_event(struct snd_seq_client *client, struct snd_seq_event *event, int atomic, int hop) broadcast_event() argument
918 snd_seq_client_enqueue_event(struct snd_seq_client *client, struct snd_seq_event *event, struct file *file, int blocking, int atomic, int hop) snd_seq_client_enqueue_event() argument
/linux-4.1.27/arch/powerpc/kernel/
H A Deeh_event.c33 * This pair of routines creates an event and queues it onto a
54 struct eeh_event *event; eeh_event_handler() local
61 /* Fetch EEH event from the queue */ eeh_event_handler()
63 event = NULL; eeh_event_handler()
65 event = list_entry(eeh_eventlist.next, eeh_event_handler()
67 list_del(&event->list); eeh_event_handler()
70 if (!event) eeh_event_handler()
73 /* We might have event without binding PE */ eeh_event_handler()
74 pe = event->pe; eeh_event_handler()
90 kfree(event); eeh_event_handler()
100 * EEH event.
122 * eeh_send_failure_event - Generate a PCI error event
126 * the actual event will be delivered in a normal context
132 struct eeh_event *event; eeh_send_failure_event() local
134 event = kzalloc(sizeof(*event), GFP_ATOMIC); eeh_send_failure_event()
135 if (!event) { eeh_send_failure_event()
136 pr_err("EEH: out of memory, event not handled\n"); eeh_send_failure_event()
139 event->pe = pe; eeh_send_failure_event()
143 list_add(&event->list, &eeh_eventlist); eeh_send_failure_event()
153 * eeh_remove_event - Remove EEH event from the queue
165 struct eeh_event *event, *tmp; eeh_remove_event() local
172 * With "force", the event with associated PE that eeh_remove_event()
173 * have been isolated, the event won't be removed eeh_remove_event()
174 * to avoid event lost. eeh_remove_event()
177 list_for_each_entry_safe(event, tmp, &eeh_eventlist, list) { eeh_remove_event()
178 if (!force && event->pe && eeh_remove_event()
179 (event->pe->state & EEH_PE_ISOLATED)) eeh_remove_event()
183 list_del(&event->list); eeh_remove_event()
184 kfree(event); eeh_remove_event()
186 if (event->pe && event->pe->phb == pe->phb) { eeh_remove_event()
187 list_del(&event->list); eeh_remove_event()
188 kfree(event); eeh_remove_event()
190 } else if (event->pe == pe) { eeh_remove_event()
191 list_del(&event->list); eeh_remove_event()
192 kfree(event); eeh_remove_event()
/linux-4.1.27/tools/perf/
H A Dbuiltin-inject.c4 * Builtin inject command: Examine the live mode (stdin) event stream
38 union perf_event event[0]; member in struct:event_entry
42 union perf_event *event) perf_event__repipe_synth()
47 size = perf_data_file__write(&inject->output, event, perf_event__repipe_synth()
48 event->header.size); perf_event__repipe_synth()
57 union perf_event *event, perf_event__repipe_oe_synth()
60 return perf_event__repipe_synth(tool, event); perf_event__repipe_oe_synth()
64 union perf_event *event, perf_event__repipe_op2_synth()
68 return perf_event__repipe_synth(tool, event); perf_event__repipe_op2_synth()
72 union perf_event *event, perf_event__repipe_attr()
79 ret = perf_event__process_attr(tool, event, pevlist); perf_event__repipe_attr()
86 return perf_event__repipe_synth(tool, event); perf_event__repipe_attr()
90 union perf_event *event, perf_event__repipe()
94 return perf_event__repipe_synth(tool, event); perf_event__repipe()
98 union perf_event *event,
104 union perf_event *event, perf_event__repipe_sample()
111 return f(tool, event, sample, evsel, machine); perf_event__repipe_sample()
114 build_id__mark_dso_hit(tool, event, sample, evsel, machine); perf_event__repipe_sample()
116 return perf_event__repipe_synth(tool, event); perf_event__repipe_sample()
120 union perf_event *event, perf_event__repipe_mmap()
126 err = perf_event__process_mmap(tool, event, sample, machine); perf_event__repipe_mmap()
127 perf_event__repipe(tool, event, sample, machine); perf_event__repipe_mmap()
133 union perf_event *event, perf_event__repipe_mmap2()
139 err = perf_event__process_mmap2(tool, event, sample, machine); perf_event__repipe_mmap2()
140 perf_event__repipe(tool, event, sample, machine); perf_event__repipe_mmap2()
146 union perf_event *event, perf_event__repipe_fork()
152 err = perf_event__process_fork(tool, event, sample, machine); perf_event__repipe_fork()
153 perf_event__repipe(tool, event, sample, machine); perf_event__repipe_fork()
159 union perf_event *event, perf_event__repipe_tracing_data()
164 perf_event__repipe_synth(tool, event); perf_event__repipe_tracing_data()
165 err = perf_event__process_tracing_data(tool, event, session); perf_event__repipe_tracing_data()
201 pr_err("Can't synthesize build_id event for %s\n", dso->long_name); dso__inject_build_id()
209 union perf_event *event, perf_event__inject_buildid()
218 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; perf_event__inject_buildid()
222 pr_err("problem processing %d event, skipping it.\n", perf_event__inject_buildid()
223 event->header.type); perf_event__inject_buildid()
249 perf_event__repipe(tool, event, sample, machine); perf_event__inject_buildid()
254 union perf_event *event __maybe_unused, perf_inject__sched_process_exit()
274 union perf_event *event, perf_inject__sched_switch()
282 perf_inject__sched_process_exit(tool, event, sample, evsel, machine); perf_inject__sched_switch()
284 ent = malloc(event->header.size + sizeof(struct event_entry)); perf_inject__sched_switch()
287 "Not enough memory to process sched switch event!"); perf_inject__sched_switch()
292 memcpy(&ent->event, event, event->header.size); perf_inject__sched_switch()
298 union perf_event *event __maybe_unused, perf_inject__sched_stat()
316 event_sw = &ent->event[0]; perf_inject__sched_stat()
340 pr_err("Samples for %s event do not have %s attribute set.", perf_evsel__check_stype()
41 perf_event__repipe_synth(struct perf_tool *tool, union perf_event *event) perf_event__repipe_synth() argument
56 perf_event__repipe_oe_synth(struct perf_tool *tool, union perf_event *event, struct ordered_events *oe __maybe_unused) perf_event__repipe_oe_synth() argument
63 perf_event__repipe_op2_synth(struct perf_tool *tool, union perf_event *event, struct perf_session *session __maybe_unused) perf_event__repipe_op2_synth() argument
71 perf_event__repipe_attr(struct perf_tool *tool, union perf_event *event, struct perf_evlist **pevlist) perf_event__repipe_attr() argument
89 perf_event__repipe(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample __maybe_unused, struct machine *machine __maybe_unused) perf_event__repipe() argument
103 perf_event__repipe_sample(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine) perf_event__repipe_sample() argument
119 perf_event__repipe_mmap(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct machine *machine) perf_event__repipe_mmap() argument
132 perf_event__repipe_mmap2(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct machine *machine) perf_event__repipe_mmap2() argument
145 perf_event__repipe_fork(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct machine *machine) perf_event__repipe_fork() argument
158 perf_event__repipe_tracing_data(struct perf_tool *tool, union perf_event *event, struct perf_session *session) perf_event__repipe_tracing_data() argument
208 perf_event__inject_buildid(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel __maybe_unused, struct machine *machine) perf_event__inject_buildid() argument
273 perf_inject__sched_switch(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine) perf_inject__sched_switch() argument
H A Dbuiltin-kvm.c14 #include "util/trace-event.h"
131 struct kvm_event *event; clear_events_cache_stats() local
137 list_for_each_entry(event, head, hash_entry) { list_for_each_entry()
138 /* reset stats for event */ list_for_each_entry()
139 event->total.time = 0; list_for_each_entry()
140 init_stats(&event->total.stats); list_for_each_entry()
142 for (j = 0; j < event->max_vcpu; ++j) { list_for_each_entry()
143 event->vcpu[j].time = 0; list_for_each_entry()
144 init_stats(&event->vcpu[j].stats); list_for_each_entry()
156 static bool kvm_event_expand(struct kvm_event *event, int vcpu_id) kvm_event_expand() argument
158 int old_max_vcpu = event->max_vcpu; kvm_event_expand()
161 if (vcpu_id < event->max_vcpu) kvm_event_expand()
164 while (event->max_vcpu <= vcpu_id) kvm_event_expand()
165 event->max_vcpu += DEFAULT_VCPU_NUM; kvm_event_expand()
167 prev = event->vcpu; kvm_event_expand()
168 event->vcpu = realloc(event->vcpu, kvm_event_expand()
169 event->max_vcpu * sizeof(*event->vcpu)); kvm_event_expand()
170 if (!event->vcpu) { kvm_event_expand()
176 memset(event->vcpu + old_max_vcpu, 0, kvm_event_expand()
177 (event->max_vcpu - old_max_vcpu) * sizeof(*event->vcpu)); kvm_event_expand()
183 struct kvm_event *event; kvm_alloc_init_event() local
185 event = zalloc(sizeof(*event)); kvm_alloc_init_event()
186 if (!event) { kvm_alloc_init_event()
191 event->key = *key; kvm_alloc_init_event()
192 init_stats(&event->total.stats); kvm_alloc_init_event()
193 return event; kvm_alloc_init_event()
199 struct kvm_event *event; find_create_kvm_event() local
205 list_for_each_entry(event, head, hash_entry) { list_for_each_entry()
206 if (event->key.key == key->key && event->key.info == key->info) list_for_each_entry()
207 return event; list_for_each_entry()
210 event = kvm_alloc_init_event(key);
211 if (!event)
214 list_add(&event->hash_entry, head);
215 return event;
222 struct kvm_event *event = NULL; handle_begin_event() local
225 event = find_create_kvm_event(kvm, key); handle_begin_event()
227 vcpu_record->last_event = event; handle_begin_event()
239 static double kvm_event_rel_stddev(int vcpu_id, struct kvm_event *event) kvm_event_rel_stddev() argument
241 struct kvm_event_stats *kvm_stats = &event->total; kvm_event_rel_stddev()
244 kvm_stats = &event->vcpu[vcpu_id]; kvm_event_rel_stddev()
250 static bool update_kvm_event(struct kvm_event *event, int vcpu_id, update_kvm_event() argument
254 kvm_update_event_stats(&event->total, time_diff); update_kvm_event()
258 if (!kvm_event_expand(event, vcpu_id)) update_kvm_event()
261 kvm_update_event_stats(&event->vcpu[vcpu_id], time_diff); update_kvm_event()
292 struct kvm_event *event = NULL; handle_child_event() local
295 event = find_create_kvm_event(kvm, key); handle_child_event()
297 vcpu_record->last_event = event; handle_child_event()
302 static bool skip_event(const char *event) skip_event() argument
307 if (!strcmp(event, *skip_events)) skip_event()
318 struct kvm_event *event; handle_end_event() local
327 event = vcpu_record->last_event; handle_end_event()
330 /* The begin event is not caught. */ handle_end_event()
335 * In some case, the 'begin event' only records the start timestamp, handle_end_event()
336 * the actual event is recognized in the 'end event' (e.g. mmio-event). handle_end_event()
340 if (!event && key->key == INVALID_KEY) handle_end_event()
343 if (!event) handle_end_event()
344 event = find_create_kvm_event(kvm, key); handle_end_event()
346 if (!event) handle_end_event()
354 pr_debug("End time before begin time; skipping event.\n"); handle_end_event()
363 kvm->events_ops->decode_key(kvm, &event->key, decode); handle_end_event()
365 pr_info("%" PRIu64 " VM %d, vcpu %d: %s event took %" PRIu64 "usec\n", handle_end_event()
371 return update_kvm_event(event, vcpu, time_diff); handle_end_event()
427 static u64 get_event_ ##func(struct kvm_event *event, int vcpu) \
430 return event->total.field; \
432 if (vcpu >= event->max_vcpu) \
435 return event->vcpu[vcpu].field; \
477 static void insert_to_result(struct rb_root *result, struct kvm_event *event, insert_to_result() argument
488 if (bigger(event, p, vcpu)) insert_to_result()
494 rb_link_node(&event->rb, parent, rb); insert_to_result()
495 rb_insert_color(&event->rb, result); insert_to_result()
499 update_total_count(struct perf_kvm_stat *kvm, struct kvm_event *event) update_total_count() argument
503 kvm->total_count += get_event_count(event, vcpu); update_total_count()
504 kvm->total_time += get_event_time(event, vcpu); update_total_count()
507 static bool event_is_valid(struct kvm_event *event, int vcpu) event_is_valid() argument
509 return !!get_event_count(event, vcpu); event_is_valid()
516 struct kvm_event *event; sort_result() local
519 list_for_each_entry(event, &kvm->kvm_events_cache[i], hash_entry) { sort_result()
520 if (event_is_valid(event, vcpu)) { sort_result()
521 update_total_count(kvm, event); sort_result()
522 insert_to_result(&kvm->result, event, sort_result()
579 struct kvm_event *event; print_result() local
599 while ((event = pop_from_result(&kvm->result))) { print_result()
602 ecount = get_event_count(event, vcpu); print_result()
603 etime = get_event_time(event, vcpu); print_result()
604 max = get_event_max(event, vcpu); print_result()
605 min = get_event_min(event, vcpu); print_result()
607 kvm->events_ops->decode_key(kvm, &event->key, decode); print_result()
615 kvm_event_rel_stddev(vcpu, event)); print_result()
628 union perf_event *event __maybe_unused, process_lost_event()
649 union perf_event *event, process_sample_event()
663 pr_debug("problem processing %d event, skipping it.\n", process_sample_event()
664 event->header.type); process_sample_event()
720 union perf_event *event; perf_kvm__mmap_read_idx() local
726 while ((event = perf_evlist__mmap_read(kvm->evlist, idx)) != NULL) { perf_kvm__mmap_read_idx()
727 err = perf_evlist__parse_sample(kvm->evlist, event, &sample); perf_kvm__mmap_read_idx()
734 err = perf_session__queue_event(kvm->session, event, &sample, 0); perf_kvm__mmap_read_idx()
736 * FIXME: Here we can't consume the event, as perf_session__queue_event will perf_kvm__mmap_read_idx()
1170 set_option_flag(record_options, 'e', "event", PARSE_OPT_HIDDEN); kvm_events_record()
1198 OPT_STRING(0, "event", &kvm->report_event, "report event", kvm_events_report()
1199 "event for reporting: vmexit, " kvm_events_report()
1300 OPT_STRING(0, "event", &kvm->report_event, "report event", kvm_events_live()
1301 "event for reporting: " kvm_events_live()
1323 /* event handling */ kvm_events_live()
1369 * generate the event list kvm_events_live()
648 process_sample_event(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine) process_sample_event() argument
/linux-4.1.27/drivers/staging/lustre/lnet/lnet/
H A Dlib-eq.c45 * Create an event queue that has room for \a count number of events.
47 * The event queue is circular and older events will be overwritten by new
50 * determine the appropriate size of the event queue to prevent this loss
52 * event loss can happen, since the handler is run for each event deposited
55 * \param count The number of events to be stored in the event queue. It
57 * \param callback A handler function that runs when an event is deposited
59 * indicate that no event handler is desired.
85 CWARN("EQ callback is guaranteed to get every event, do you still want to set eqcount %d for polling event which will have locking overhead? Please contact with developer to confirm\n", count); LNetEQAlloc()
88 * overhead of enqueue event */ LNetEQAlloc()
100 /* NB allocator has set all event sequence numbers to 0, LNetEQAlloc()
117 * both EQ lookup and poll event with only lnet_eq_wait_lock */ LNetEQAlloc()
142 * Release the resources associated with an event queue if it's idle;
145 * \param eqh A handle for the event queue to be released.
167 * both EQ lookup and poll event with only lnet_eq_wait_lock */ LNetEQFree()
248 /* We've got a new event... */ lnet_eq_dequeue_event()
251 CDEBUG(D_INFO, "event: %p, sequence: %lu, eq->size: %u\n", lnet_eq_dequeue_event()
254 /* ...but did it overwrite an event we've not seen yet? */ lnet_eq_dequeue_event()
270 * A nonblocking function that can be used to get the next event in an EQ.
271 * If an event handler is associated with the EQ, the handler will run before
272 * this function returns successfully. The event is removed from the queue.
274 * \param eventq A handle for the event queue.
275 * \param event On successful return (1 or -EOVERFLOW), this location will
276 * hold the next event in the EQ.
278 * \retval 0 No pending event in the EQ.
281 * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that
282 * at least one event between this event and the last event obtained from the
286 LNetEQGet(lnet_handle_eq_t eventq, lnet_event_t *event) LNetEQGet() argument
291 event, &which); LNetEQGet()
296 * Block the calling process until there is an event in the EQ.
297 * If an event handler is associated with the EQ, the handler will run before
298 * this function returns successfully. This function returns the next event
301 * \param eventq A handle for the event queue.
302 * \param event On successful return (1 or -EOVERFLOW), this location will
303 * hold the next event in the EQ.
307 * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that
308 * at least one event between this event and the last event obtained from the
312 LNetEQWait(lnet_handle_eq_t eventq, lnet_event_t *event) LNetEQWait() argument
317 event, &which); LNetEQWait()
332 return -1; /* don't want to wait and no new event */
350 if (tms < 0) /* no more wait but may have new event */
366 * Block the calling process until there's an event from a set of EQs or
369 * If an event handler is associated with the EQ, the handler will run before
370 * this function returns successfully, in which case the corresponding event
377 * \param timeout_ms Time in milliseconds to wait for an event to occur on
380 * \param event,which On successful return (1 or -EOVERFLOW), \a event will
381 * hold the next event in the EQs, and \a which will contain the index of the
382 * EQ from which the event was taken.
384 * \retval 0 No pending event in the EQs after timeout.
386 * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that
387 * at least one event between this event and the last event obtained from the
393 lnet_event_t *event, int *which) LNetEQPoll()
416 rc = lnet_eq_dequeue_event(eq, event); LNetEQPoll()
429 * -1 : did nothing and it's sure no new event LNetEQPoll()
430 * 1 : sleep inside and wait until new event LNetEQPoll()
431 * 0 : don't want to wait anymore, but might have new event LNetEQPoll()
435 if (wait < 0) /* no new event */ LNetEQPoll()
392 LNetEQPoll(lnet_handle_eq_t *eventqs, int neq, int timeout_ms, lnet_event_t *event, int *which) LNetEQPoll() argument
/linux-4.1.27/drivers/devfreq/
H A Ddevfreq-event.c2 * devfreq-event: a framework to provide raw data and events of devfreq devices
14 #include <linux/devfreq-event.h>
25 /* The list of all devfreq event list */
32 * devfreq_event_enable_edev() - Enable the devfreq-event dev and increase
33 * the enable_count of devfreq-event dev.
34 * @edev : the devfreq-event device
37 * devfreq-event device. The devfreq-event device should be enabled before
63 * devfreq_event_disable_edev() - Disable the devfreq-event dev and decrease
64 * the enable_count of the devfreq-event dev.
65 * @edev : the devfreq-event device
68 * devfreq-event device. After the devfreq-event device is disabled,
69 * devfreq device can't use the devfreq-event device for get/set/reset
101 * devfreq_event_is_enabled() - Check whether devfreq-event dev is enabled or
103 * @edev : the devfreq-event device
105 * Note that this function check whether devfreq-event dev is enabled or not.
106 * If return true, the devfreq-event dev is enabeld. If return false, the
107 * devfreq-event dev is disabled.
128 * devfreq_event_set_event() - Set event to devfreq-event dev to start.
129 * @edev : the devfreq-event device
131 * Note that this function set the event to the devfreq-event device to start
132 * for getting the event data which could be various event type.
156 * devfreq_event_get_event() - Get {load|total}_count from devfreq-event dev.
157 * @edev : the devfreq-event device
158 * @edata : the calculated data of devfreq-event device
160 * Note that this function get the calculated event data from devfreq-event dev
161 * after stoping the progress of whole sequence of devfreq-event dev.
190 * devfreq_event_reset_event() - Reset all opeations of devfreq-event dev.
191 * @edev : the devfreq-event device
193 * Note that this function stop all operations of devfreq-event dev and reset
194 * the current event data to make the devfreq-event device into initial state.
216 * devfreq_event_get_edev_by_phandle() - Get the devfreq-event dev from
219 * @index : the index into list of devfreq-event device
221 * Note that this function return the pointer of devfreq-event device.
251 dev_err(dev, "unable to get devfreq-event device : %s\n", devfreq_event_get_edev_by_phandle()
264 * devfreq_event_get_edev_count() - Get the count of devfreq-event dev
267 * Note that this function return the count of devfreq-event devices.
282 "failed to get the count of devfreq-event in %s node\n", devfreq_event_get_edev_count()
299 * devfreq_event_add_edev() - Add new devfreq-event device.
300 * @dev : the device owning the devfreq-event device being created
301 * @desc : the devfreq-event device's decriptor which include essential
302 * data for devfreq-event device.
304 * Note that this function add new devfreq-event device to devfreq-event class
305 * list and register the device of the devfreq-event device.
334 dev_set_name(&edev->dev, "event.%d", atomic_inc_return(&event_no) - 1); devfreq_event_add_edev()
353 * devfreq_event_remove_edev() - Remove the devfreq-event device registered.
354 * @dev : the devfreq-event device
356 * Note that this function remove the registered devfreq-event device.
392 * @dev : the device owning the devfreq-event device being created
393 * @desc : the devfreq-event device's decriptor which include essential
394 * data for devfreq-event device.
396 * Note that this function manages automatically the memory of devfreq-event
398 * for memory of devfreq-event device.
424 * @dev : the device owning the devfreq-event device being created
425 * @edev : the devfreq-event device
427 * Note that this function manages automatically the memory of devfreq-event
439 * Device attributes for devfreq-event class.
474 devfreq_event_class = class_create(THIS_MODULE, "devfreq-event"); devfreq_event_init()
/linux-4.1.27/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/
H A DEventClass.py7 # PerfEvent is the base class for all perf event sample, PebsEvent
8 # is a HW base Intel x86 PEBS event, and user could add more SW/HW
9 # event classes based on requirements.
15 EVTYPE_PEBS = 1 # Basic PEBS event
16 EVTYPE_PEBS_LL = 2 # PEBS event with load latency info
20 # Currently we don't have good way to tell the event type, but by
21 # the size of raw buffer, raw PEBS event with load latency data's
22 # size is 176 bytes, while the pure PEBS event's size is 144 bytes.
26 event = PebsEvent(name, comm, dso, symbol, raw_buf)
28 event = PebsNHM(name, comm, dso, symbol, raw_buf)
30 event = PerfEvent(name, comm, dso, symbol, raw_buf)
32 return event
46 print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso)
49 # Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer
50 # contains the context info when that event happened: the EFLAGS and
/linux-4.1.27/drivers/net/wireless/rsi/
H A Drsi_common.h35 static inline int rsi_wait_event(struct rsi_event *event, u32 timeout) rsi_wait_event() argument
40 status = wait_event_interruptible(event->event_queue, rsi_wait_event()
41 (atomic_read(&event->event_condition) == 0)); rsi_wait_event()
43 status = wait_event_interruptible_timeout(event->event_queue, rsi_wait_event()
44 (atomic_read(&event->event_condition) == 0), rsi_wait_event()
49 static inline void rsi_set_event(struct rsi_event *event) rsi_set_event() argument
51 atomic_set(&event->event_condition, 0); rsi_set_event()
52 wake_up_interruptible(&event->event_queue); rsi_set_event()
55 static inline void rsi_reset_event(struct rsi_event *event) rsi_reset_event() argument
57 atomic_set(&event->event_condition, 1); rsi_reset_event()
76 rsi_set_event(&handle->event); rsi_kill_thread()
/linux-4.1.27/include/linux/iio/
H A Devents.h1 /* The industrial I/O - event passing to userspace
16 * IIO_EVENT_CODE() - create event identifier
18 * @diff: Whether the event is for an differential channel or not.
20 * @direction: Direction of the event. One of enum iio_event_direction.
21 * @type: Type of the event. Should be one of enum iio_event_type.
36 * IIO_MOD_EVENT_CODE() - create event identifier for modified channels
40 * @type: Type of the event. Should be one of enum iio_event_type.
41 * @direction: Direction of the event. One of enum iio_event_direction.
49 * IIO_UNMOD_EVENT_CODE() - create event identifier for unmodified channels
52 * @type: Type of the event. Should be one of enum iio_event_type.
53 * @direction: Direction of the event. One of enum iio_event_direction.
/linux-4.1.27/arch/tile/kernel/
H A Dperf_event.c17 * This code is based upon the x86 perf event
125 /* TILEPro hardware cache event map */
345 * Check whether perf event is enabled.
407 * Enable performance event by setting
410 static inline void tile_pmu_enable_event(struct perf_event *event) tile_pmu_enable_event() argument
412 struct hw_perf_event *hwc = &event->hw; tile_pmu_enable_event()
445 /* Clear mask bits to enable the event. */ tile_pmu_enable_event()
456 * Disable performance event by clearing
459 static inline void tile_pmu_disable_event(struct perf_event *event) tile_pmu_disable_event() argument
461 struct hw_perf_event *hwc = &event->hw; tile_pmu_disable_event()
488 /* Set mask bits to disable the event. */ tile_pmu_disable_event()
498 * Propagate event elapsed time into the generic event.
499 * Can only be executed on the CPU where the event is active.
502 static u64 tile_perf_event_update(struct perf_event *event) tile_perf_event_update() argument
504 struct hw_perf_event *hwc = &event->hw; tile_perf_event_update()
512 * Careful: an NMI might modify the previous event value. tile_perf_event_update()
516 * count to the generic event atomically: tile_perf_event_update()
530 * (event-)time and add that to the generic event. tile_perf_event_update()
538 local64_add(delta, &event->count); tile_perf_event_update()
546 * To be called with the event disabled in hw:
548 static int tile_event_set_period(struct perf_event *event) tile_event_set_period() argument
550 struct hw_perf_event *hwc = &event->hw; tile_event_set_period()
576 * The hw event starts counting from this event offset, tile_event_set_period()
583 perf_event_update_userpage(event); tile_event_set_period()
589 * Stop the event but do not release the PMU counter
591 static void tile_pmu_stop(struct perf_event *event, int flags) tile_pmu_stop() argument
594 struct hw_perf_event *hwc = &event->hw; tile_pmu_stop()
598 tile_pmu_disable_event(event); tile_pmu_stop()
606 * Drain the remaining delta count out of a event tile_pmu_stop()
609 tile_perf_event_update(event); tile_pmu_stop()
615 * Start an event (without re-assigning counter)
617 static void tile_pmu_start(struct perf_event *event, int flags) tile_pmu_start() argument
620 int idx = event->hw.idx; tile_pmu_start()
622 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) tile_pmu_start()
629 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); tile_pmu_start()
630 tile_event_set_period(event); tile_pmu_start()
633 event->hw.state = 0; tile_pmu_start()
635 cpuc->events[idx] = event; tile_pmu_start()
640 tile_pmu_enable_event(event); tile_pmu_start()
642 perf_event_update_userpage(event); tile_pmu_start()
646 * Add a single event to the PMU.
648 * The event is added to the group of enabled events
651 static int tile_pmu_add(struct perf_event *event, int flags) tile_pmu_add() argument
658 hwc = &event->hw; tile_pmu_add()
666 cpuc->event_list[cpuc->n_events] = event; tile_pmu_add()
687 * Assign counter to event. tile_pmu_add()
689 event->hw.idx = b; tile_pmu_add()
696 tile_pmu_start(event, PERF_EF_RELOAD); tile_pmu_add()
702 * Delete a single event from the PMU.
704 * The event is deleted from the group of enabled events.
705 * If it is the last event, disable PMU interrupt.
707 static void tile_pmu_del(struct perf_event *event, int flags) tile_pmu_del() argument
713 * Remove event from list, compact list if necessary. tile_pmu_del()
716 if (cpuc->event_list[i] == event) { tile_pmu_del()
720 cpuc->events[event->hw.idx] = NULL; tile_pmu_del()
721 __clear_bit(event->hw.idx, &cpuc->used_mask); tile_pmu_del()
722 tile_pmu_stop(event, PERF_EF_UPDATE); tile_pmu_del()
731 perf_event_update_userpage(event); tile_pmu_del()
735 * Propagate event elapsed time into the event.
737 static inline void tile_pmu_read(struct perf_event *event) tile_pmu_read() argument
739 tile_perf_event_update(event); tile_pmu_read()
782 static void tile_event_destroy(struct perf_event *event) tile_event_destroy() argument
788 static int __tile_event_init(struct perf_event *event) __tile_event_init() argument
790 struct perf_event_attr *attr = &event->attr; __tile_event_init()
791 struct hw_perf_event *hwc = &event->hw; __tile_event_init()
829 event->destroy = tile_event_destroy; __tile_event_init()
833 static int tile_event_init(struct perf_event *event) tile_event_init() argument
848 switch (event->attr.type) { tile_event_init()
858 err = __tile_event_init(event); tile_event_init()
860 if (event->destroy) tile_event_init()
861 event->destroy(event); tile_event_init()
884 struct perf_event *event; tile_pmu_handle_irq() local
900 event = cpuc->events[bit]; tile_pmu_handle_irq()
902 if (!event) tile_pmu_handle_irq()
908 hwc = &event->hw; tile_pmu_handle_irq()
910 val = tile_perf_event_update(event); tile_pmu_handle_irq()
914 perf_sample_data_init(&data, 0, event->hw.last_period); tile_pmu_handle_irq()
915 if (!tile_event_set_period(event)) tile_pmu_handle_irq()
918 if (perf_event_overflow(event, &data, regs)) tile_pmu_handle_irq()
919 tile_pmu_stop(event, 0); tile_pmu_handle_irq()
/linux-4.1.27/arch/alpha/oprofile/
H A Dop_model_ev5.c32 /* Select desired events. The event numbers are selected such common_reg_setup()
33 that they map directly into the event selection fields: common_reg_setup()
43 These event numbers are canonicalizes to their first appearance. */ common_reg_setup()
47 unsigned long event = ctr[i].event; common_reg_setup() local
53 if (event == 0) common_reg_setup()
54 event = 12+48; common_reg_setup()
55 else if (event == 2+41) common_reg_setup()
56 event = 4+65; common_reg_setup()
59 /* Convert the event numbers onto mux_select bit mask. */ common_reg_setup()
60 if (event < 2) common_reg_setup()
61 ctl |= event << 31; common_reg_setup()
62 else if (event < 24) common_reg_setup()
64 else if (event < 40) common_reg_setup()
65 ctl |= (event - 24) << 4; common_reg_setup()
66 else if (event < 48) common_reg_setup()
67 ctl |= (event - 40) << cbox1_ofs | 15 << 4; common_reg_setup()
68 else if (event < 64) common_reg_setup()
69 ctl |= event - 48; common_reg_setup()
70 else if (event < 72) common_reg_setup()
71 ctl |= (event - 64) << cbox2_ofs | 15; common_reg_setup()
/linux-4.1.27/drivers/bus/
H A Darm-ccn.c160 struct perf_event *event; member in struct:arm_ccn_dt::__anon3658
236 static CCN_FORMAT_ATTR(event, "config:16-23");
267 u32 event; member in struct:arm_ccn_pmu_event
286 .type = CCN_TYPE_MN, .event = CCN_EVENT_WATCHPOINT, \
292 .event = CCN_EVENT_WATCHPOINT, .num_ports = CCN_NUM_XP_PORTS, \
297 .event = CCN_EVENT_WATCHPOINT, .num_ports = CCN_NUM_XP_PORTS, \
301 .type = CCN_TYPE_HNF, .event = _event, }
304 .type = CCN_TYPE_XP, .event = _event, \
310 * as they all share the same event types.
313 .type = CCN_TYPE_RNI_3P, .event = _event, }
316 .type = CCN_TYPE_SBAS, .event = _event, }
325 struct arm_ccn_pmu_event *event = container_of(attr, arm_ccn_pmu_event_show() local
329 res = snprintf(buf, PAGE_SIZE, "type=0x%x", event->type); arm_ccn_pmu_event_show()
330 if (event->event) arm_ccn_pmu_event_show()
331 res += snprintf(buf + res, PAGE_SIZE - res, ",event=0x%x", arm_ccn_pmu_event_show()
332 event->event); arm_ccn_pmu_event_show()
333 if (event->def) arm_ccn_pmu_event_show()
335 event->def); arm_ccn_pmu_event_show()
336 if (event->mask) arm_ccn_pmu_event_show()
338 event->mask); arm_ccn_pmu_event_show()
351 struct arm_ccn_pmu_event *event = container_of(dev_attr, arm_ccn_pmu_events_is_visible() local
354 if (event->type == CCN_TYPE_SBAS && !ccn->sbas_present) arm_ccn_pmu_events_is_visible()
356 if (event->type == CCN_TYPE_SBSX && !ccn->sbsx_present) arm_ccn_pmu_events_is_visible()
527 * as in the worst case scenario (an event every cycle), with 1GHz
590 static void arm_ccn_pmu_event_destroy(struct perf_event *event) arm_ccn_pmu_event_destroy() argument
592 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); arm_ccn_pmu_event_destroy()
593 struct hw_perf_event *hw = &event->hw; arm_ccn_pmu_event_destroy()
601 if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP && arm_ccn_pmu_event_destroy()
602 CCN_CONFIG_EVENT(event->attr.config) == arm_ccn_pmu_event_destroy()
611 ccn->dt.pmu_counters[hw->idx].event = NULL; arm_ccn_pmu_event_destroy()
614 static int arm_ccn_pmu_event_init(struct perf_event *event) arm_ccn_pmu_event_init() argument
617 struct hw_perf_event *hw = &event->hw; arm_ccn_pmu_event_init()
623 if (event->attr.type != event->pmu->type) arm_ccn_pmu_event_init()
626 ccn = pmu_to_arm_ccn(event->pmu); arm_ccn_pmu_event_init()
627 event->destroy = arm_ccn_pmu_event_destroy; arm_ccn_pmu_event_init()
634 if (has_branch_stack(event) || event->attr.exclude_user || arm_ccn_pmu_event_init()
635 event->attr.exclude_kernel || event->attr.exclude_hv || arm_ccn_pmu_event_init()
636 event->attr.exclude_idle) { arm_ccn_pmu_event_init()
641 if (event->cpu < 0) { arm_ccn_pmu_event_init()
646 node_xp = CCN_CONFIG_NODE(event->attr.config); arm_ccn_pmu_event_init()
647 type = CCN_CONFIG_TYPE(event->attr.config); arm_ccn_pmu_event_init()
648 event_id = CCN_CONFIG_EVENT(event->attr.config); arm_ccn_pmu_event_init()
673 /* Validate event ID vs available for the type */ arm_ccn_pmu_event_init()
677 u32 port = CCN_CONFIG_PORT(event->attr.config); arm_ccn_pmu_event_init()
678 u32 vc = CCN_CONFIG_VC(event->attr.config); arm_ccn_pmu_event_init()
682 if (event_id != e->event) arm_ccn_pmu_event_init()
697 dev_warn(ccn->dev, "Invalid event 0x%x for node/XP %d!\n", arm_ccn_pmu_event_init()
702 /* Watchpoint-based event for a node is actually set on XP */ arm_ccn_pmu_event_init()
710 arm_ccn_pmu_config_set(&event->attr.config, arm_ccn_pmu_event_init()
721 ccn->dt.pmu_counters[CCN_IDX_PMU_CYCLE_COUNTER].event = event; arm_ccn_pmu_event_init()
726 /* Allocate an event counter */ arm_ccn_pmu_event_init()
740 /* Allocate an event source or a watchpoint */ arm_ccn_pmu_event_init()
748 dev_warn(ccn->dev, "No more event sources/watchpoints on node/XP %d!\n", arm_ccn_pmu_event_init()
755 ccn->dt.pmu_counters[hw->idx].event = event; arm_ccn_pmu_event_init()
784 static void arm_ccn_pmu_event_update(struct perf_event *event) arm_ccn_pmu_event_update() argument
786 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); arm_ccn_pmu_event_update()
787 struct hw_perf_event *hw = &event->hw; arm_ccn_pmu_event_update()
797 local64_add((new_count - prev_count) & mask, &event->count); arm_ccn_pmu_event_update()
800 static void arm_ccn_pmu_xp_dt_config(struct perf_event *event, int enable) arm_ccn_pmu_xp_dt_config() argument
802 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); arm_ccn_pmu_xp_dt_config()
803 struct hw_perf_event *hw = &event->hw; arm_ccn_pmu_xp_dt_config()
807 if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP) arm_ccn_pmu_xp_dt_config()
808 xp = &ccn->xp[CCN_CONFIG_XP(event->attr.config)]; arm_ccn_pmu_xp_dt_config()
811 CCN_CONFIG_NODE(event->attr.config))]; arm_ccn_pmu_xp_dt_config()
829 static void arm_ccn_pmu_event_start(struct perf_event *event, int flags) arm_ccn_pmu_event_start() argument
831 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); arm_ccn_pmu_event_start()
832 struct hw_perf_event *hw = &event->hw; arm_ccn_pmu_event_start()
834 local64_set(&event->hw.prev_count, arm_ccn_pmu_event_start()
843 arm_ccn_pmu_xp_dt_config(event, 1); arm_ccn_pmu_event_start()
846 static void arm_ccn_pmu_event_stop(struct perf_event *event, int flags) arm_ccn_pmu_event_stop() argument
848 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); arm_ccn_pmu_event_stop()
849 struct hw_perf_event *hw = &event->hw; arm_ccn_pmu_event_stop()
853 arm_ccn_pmu_xp_dt_config(event, 0); arm_ccn_pmu_event_stop()
866 arm_ccn_pmu_event_update(event); arm_ccn_pmu_event_stop()
871 static void arm_ccn_pmu_xp_watchpoint_config(struct perf_event *event) arm_ccn_pmu_xp_watchpoint_config() argument
873 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); arm_ccn_pmu_xp_watchpoint_config()
874 struct hw_perf_event *hw = &event->hw; arm_ccn_pmu_xp_watchpoint_config()
879 u64 cmp_l = event->attr.config1; arm_ccn_pmu_xp_watchpoint_config()
880 u64 cmp_h = event->attr.config2; arm_ccn_pmu_xp_watchpoint_config()
881 u64 mask_l = ccn->dt.cmp_mask[CCN_CONFIG_MASK(event->attr.config)].l; arm_ccn_pmu_xp_watchpoint_config()
882 u64 mask_h = ccn->dt.cmp_mask[CCN_CONFIG_MASK(event->attr.config)].h; arm_ccn_pmu_xp_watchpoint_config()
890 val |= CCN_CONFIG_DIR(event->attr.config) << arm_ccn_pmu_xp_watchpoint_config()
894 val |= CCN_CONFIG_PORT(event->attr.config) << arm_ccn_pmu_xp_watchpoint_config()
898 val |= CCN_CONFIG_VC(event->attr.config) << arm_ccn_pmu_xp_watchpoint_config()
919 static void arm_ccn_pmu_xp_event_config(struct perf_event *event) arm_ccn_pmu_xp_event_config() argument
921 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); arm_ccn_pmu_xp_event_config()
922 struct hw_perf_event *hw = &event->hw; arm_ccn_pmu_xp_event_config()
929 id = (CCN_CONFIG_VC(event->attr.config) << 4) | arm_ccn_pmu_xp_event_config()
930 (CCN_CONFIG_PORT(event->attr.config) << 3) | arm_ccn_pmu_xp_event_config()
931 (CCN_CONFIG_EVENT(event->attr.config) << 0); arm_ccn_pmu_xp_event_config()
940 static void arm_ccn_pmu_node_event_config(struct perf_event *event) arm_ccn_pmu_node_event_config() argument
942 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); arm_ccn_pmu_node_event_config()
943 struct hw_perf_event *hw = &event->hw; arm_ccn_pmu_node_event_config()
946 u32 type = CCN_CONFIG_TYPE(event->attr.config); arm_ccn_pmu_node_event_config()
949 port = arm_ccn_node_to_xp_port(CCN_CONFIG_NODE(event->attr.config)); arm_ccn_pmu_node_event_config()
968 /* Set the event id for the pre-allocated counter */ arm_ccn_pmu_node_event_config()
972 val |= CCN_CONFIG_EVENT(event->attr.config) << arm_ccn_pmu_node_event_config()
977 static void arm_ccn_pmu_event_config(struct perf_event *event) arm_ccn_pmu_event_config() argument
979 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); arm_ccn_pmu_event_config()
980 struct hw_perf_event *hw = &event->hw; arm_ccn_pmu_event_config()
987 if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP) arm_ccn_pmu_event_config()
988 xp = CCN_CONFIG_XP(event->attr.config); arm_ccn_pmu_event_config()
990 xp = arm_ccn_node_to_xp(CCN_CONFIG_NODE(event->attr.config)); arm_ccn_pmu_event_config()
1002 if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP) { arm_ccn_pmu_event_config()
1003 if (CCN_CONFIG_EVENT(event->attr.config) == arm_ccn_pmu_event_config()
1005 arm_ccn_pmu_xp_watchpoint_config(event); arm_ccn_pmu_event_config()
1007 arm_ccn_pmu_xp_event_config(event); arm_ccn_pmu_event_config()
1009 arm_ccn_pmu_node_event_config(event); arm_ccn_pmu_event_config()
1015 static int arm_ccn_pmu_event_add(struct perf_event *event, int flags) arm_ccn_pmu_event_add() argument
1017 struct hw_perf_event *hw = &event->hw; arm_ccn_pmu_event_add()
1019 arm_ccn_pmu_event_config(event); arm_ccn_pmu_event_add()
1024 arm_ccn_pmu_event_start(event, PERF_EF_UPDATE); arm_ccn_pmu_event_add()
1029 static void arm_ccn_pmu_event_del(struct perf_event *event, int flags) arm_ccn_pmu_event_del() argument
1031 arm_ccn_pmu_event_stop(event, PERF_EF_UPDATE); arm_ccn_pmu_event_del()
1034 static void arm_ccn_pmu_event_read(struct perf_event *event) arm_ccn_pmu_event_read() argument
1036 arm_ccn_pmu_event_update(event); arm_ccn_pmu_event_read()
1052 struct perf_event *event = dt->pmu_counters[idx].event; arm_ccn_pmu_overflow_handler() local
1055 WARN_ON_ONCE(overflowed && !event && arm_ccn_pmu_overflow_handler()
1058 if (!event || !overflowed) arm_ccn_pmu_overflow_handler()
1061 arm_ccn_pmu_event_update(event); arm_ccn_pmu_overflow_handler()
H A Darm-cci.c85 #define CCI_PMU_EVENT_SOURCE(event) ((event >> 5) & 0x7)
86 #define CCI_PMU_EVENT_CODE(event) (event & 0x1f)
148 * Instead of an event id to monitor CCI cycles, a dedicated counter is
150 * make use of this event in hardware.
161 * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8
162 * ports and bits 4:0 are event codes. There are different event codes
169 * the different revisions and are used to validate the event to be monitored.
262 static void pmu_set_event(int idx, unsigned long event) pmu_set_event() argument
264 pmu_write_register(event, idx, CCI_PMU_EVT_SEL); pmu_set_event()
276 static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *event) pmu_get_event_idx() argument
278 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); pmu_get_event_idx()
279 struct hw_perf_event *hw_event = &event->hw; pmu_get_event_idx()
298 static int pmu_map_event(struct perf_event *event) pmu_map_event() argument
301 unsigned long config = event->attr.config; pmu_map_event()
303 if (event->attr.type < PERF_TYPE_MAX) pmu_map_event()
361 static u32 pmu_read_counter(struct perf_event *event) pmu_read_counter() argument
363 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); pmu_read_counter()
364 struct hw_perf_event *hw_counter = &event->hw; pmu_read_counter()
377 static void pmu_write_counter(struct perf_event *event, u32 value) pmu_write_counter() argument
379 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); pmu_write_counter()
380 struct hw_perf_event *hw_counter = &event->hw; pmu_write_counter()
389 static u64 pmu_event_update(struct perf_event *event) pmu_event_update() argument
391 struct hw_perf_event *hwc = &event->hw; pmu_event_update()
396 new_raw_count = pmu_read_counter(event); pmu_event_update()
402 local64_add(delta, &event->count); pmu_event_update()
407 static void pmu_read(struct perf_event *event) pmu_read() argument
409 pmu_event_update(event); pmu_read()
412 void pmu_event_set_period(struct perf_event *event) pmu_event_set_period() argument
414 struct hw_perf_event *hwc = &event->hw; pmu_event_set_period()
423 pmu_write_counter(event, val); pmu_event_set_period()
440 struct perf_event *event = events->events[idx]; pmu_handle_irq() local
443 if (!event) pmu_handle_irq()
446 hw_counter = &event->hw; pmu_handle_irq()
455 pmu_event_update(event); pmu_handle_irq()
456 pmu_event_set_period(event); pmu_handle_irq()
479 static void hw_perf_event_destroy(struct perf_event *event) hw_perf_event_destroy() argument
481 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); hw_perf_event_destroy()
526 static void cci_pmu_start(struct perf_event *event, int pmu_flags) cci_pmu_start() argument
528 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); cci_pmu_start()
530 struct hw_perf_event *hwc = &event->hw; cci_pmu_start()
550 /* Configure the event to count, unless you are counting cycles */ cci_pmu_start()
554 pmu_event_set_period(event); cci_pmu_start()
560 static void cci_pmu_stop(struct perf_event *event, int pmu_flags) cci_pmu_stop() argument
562 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); cci_pmu_stop()
563 struct hw_perf_event *hwc = &event->hw; cci_pmu_stop()
579 pmu_event_update(event); cci_pmu_stop()
583 static int cci_pmu_add(struct perf_event *event, int flags) cci_pmu_add() argument
585 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); cci_pmu_add()
587 struct hw_perf_event *hwc = &event->hw; cci_pmu_add()
591 perf_pmu_disable(event->pmu); cci_pmu_add()
594 idx = pmu_get_event_idx(hw_events, event); cci_pmu_add()
600 event->hw.idx = idx; cci_pmu_add()
601 hw_events->events[idx] = event; cci_pmu_add()
605 cci_pmu_start(event, PERF_EF_RELOAD); cci_pmu_add()
608 perf_event_update_userpage(event); cci_pmu_add()
611 perf_pmu_enable(event->pmu); cci_pmu_add()
615 static void cci_pmu_del(struct perf_event *event, int flags) cci_pmu_del() argument
617 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); cci_pmu_del()
619 struct hw_perf_event *hwc = &event->hw; cci_pmu_del()
622 cci_pmu_stop(event, PERF_EF_UPDATE); cci_pmu_del()
626 perf_event_update_userpage(event); cci_pmu_del()
632 struct perf_event *event) validate_event()
634 if (is_software_event(event)) validate_event()
640 * until after pmu->event_init(event). validate_event()
642 if (event->pmu != cci_pmu) validate_event()
645 if (event->state < PERF_EVENT_STATE_OFF) validate_event()
648 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) validate_event()
651 return pmu_get_event_idx(hw_events, event) >= 0; validate_event()
655 validate_group(struct perf_event *event) validate_group() argument
657 struct perf_event *sibling, *leader = event->group_leader; validate_group()
666 if (!validate_event(event->pmu, &fake_pmu, leader)) validate_group()
670 if (!validate_event(event->pmu, &fake_pmu, sibling)) validate_group()
674 if (!validate_event(event->pmu, &fake_pmu, event)) validate_group()
681 __hw_perf_event_init(struct perf_event *event) __hw_perf_event_init() argument
683 struct hw_perf_event *hwc = &event->hw; __hw_perf_event_init()
686 mapping = pmu_map_event(event); __hw_perf_event_init()
689 pr_debug("event %x:%llx not supported\n", event->attr.type, __hw_perf_event_init()
690 event->attr.config); __hw_perf_event_init()
695 * We don't assign an index until we actually place the event onto __hw_perf_event_init()
705 * Store the event encoding into the config_base field. __hw_perf_event_init()
718 if (event->group_leader != event) { __hw_perf_event_init()
719 if (validate_group(event) != 0) __hw_perf_event_init()
726 static int cci_pmu_event_init(struct perf_event *event) cci_pmu_event_init() argument
728 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); cci_pmu_event_init()
733 if (event->attr.type != event->pmu->type) cci_pmu_event_init()
737 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) cci_pmu_event_init()
741 if (event->attr.exclude_user || cci_pmu_event_init()
742 event->attr.exclude_kernel || cci_pmu_event_init()
743 event->attr.exclude_hv || cci_pmu_event_init()
744 event->attr.exclude_idle || cci_pmu_event_init()
745 event->attr.exclude_host || cci_pmu_event_init()
746 event->attr.exclude_guest) cci_pmu_event_init()
755 * the event being installed into its context, so the PMU's CPU can't cci_pmu_event_init()
759 if (event->cpu < 0 || cpu < 0) cci_pmu_event_init()
761 event->cpu = cpu; cci_pmu_event_init()
763 event->destroy = hw_perf_event_destroy; cci_pmu_event_init()
775 err = __hw_perf_event_init(event); cci_pmu_event_init()
777 hw_perf_event_destroy(event); cci_pmu_event_init()
845 * TODO: migrate context once core races on event->ctx have cci_pmu_cpu_notifier()
630 validate_event(struct pmu *cci_pmu, struct cci_pmu_hw_events *hw_events, struct perf_event *event) validate_event() argument
/linux-4.1.27/include/uapi/linux/iio/
H A Devents.h1 /* The industrial I/O - event passing to userspace
16 * struct iio_event_data - The actual event being pushed to userspace
17 * @id: event identifier
18 * @timestamp: best estimate of time of event occurrence (often from
34 /* Event code number extraction depends on which type of event we have.
/linux-4.1.27/drivers/char/
H A Dsnsc_event.c12 * System controller event handler
60 * Break an event (as read from SAL) into useful pieces so we can decide
64 scdrv_parse_event(char *event, int *src, int *code, int *esp_code, char *desc) scdrv_parse_event() argument
68 /* record event source address */ scdrv_parse_event()
69 *src = get_unaligned_be32(event); scdrv_parse_event()
70 event += 4; /* move on to event code */ scdrv_parse_event()
72 /* record the system controller's event code */ scdrv_parse_event()
73 *code = get_unaligned_be32(event); scdrv_parse_event()
74 event += 4; /* move on to event arguments */ scdrv_parse_event()
77 if (*event++ != 2) { scdrv_parse_event()
83 if (*event++ != IR_ARG_INT) { scdrv_parse_event()
87 *esp_code = get_unaligned_be32(event); scdrv_parse_event()
88 event += 4; scdrv_parse_event()
90 /* parse out the event description */ scdrv_parse_event()
91 if (*event++ != IR_ARG_ASCII) { scdrv_parse_event()
95 event[CHUNKSIZE-1] = '\0'; /* ensure this string ends! */ scdrv_parse_event()
96 event += 2; /* skip leading CR/LF */ scdrv_parse_event()
97 desc_end = desc + sprintf(desc, "%s", event); scdrv_parse_event()
180 * Do the right thing with an incoming event. That's often nothing
185 scdrv_dispatch_event(char *event, int len) scdrv_dispatch_event() argument
192 if (scdrv_parse_event(event, &src, &code, &esp_code, desc) < 0) { scdrv_dispatch_event()
193 /* ignore uninterpretible event */ scdrv_dispatch_event()
208 /* give a message for each type of event */ scdrv_dispatch_event()
229 * Called as a tasklet when an event arrives from the L1. Read the event
263 * Sets up a system controller subchannel to begin receiving event
275 " for event monitoring\n", __func__); scdrv_event_init()
288 printk(KERN_WARNING "%s: couldn't open event subchannel\n", scdrv_event_init()
293 /* hook event subchannel up to the system controller interrupt */ scdrv_event_init()
/linux-4.1.27/drivers/char/tpm/
H A Dtpm_eventlog.c78 struct tcpa_event *event; tpm_bios_measurements_start() local
82 event = addr; tpm_bios_measurements_start()
85 if (event->event_type == 0 && event->event_size == 0) tpm_bios_measurements_start()
87 addr += sizeof(struct tcpa_event) + event->event_size; tpm_bios_measurements_start()
95 event = addr; tpm_bios_measurements_start()
97 if ((event->event_type == 0 && event->event_size == 0) || tpm_bios_measurements_start()
98 ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit)) tpm_bios_measurements_start()
107 struct tcpa_event *event = v; tpm_bios_measurements_next() local
111 v += sizeof(struct tcpa_event) + event->event_size; tpm_bios_measurements_next()
117 event = v; tpm_bios_measurements_next()
119 if (event->event_type == 0 && event->event_size == 0) tpm_bios_measurements_next()
122 if ((event->event_type == 0 && event->event_size == 0) || tpm_bios_measurements_next()
123 ((v + sizeof(struct tcpa_event) + event->event_size) >= limit)) tpm_bios_measurements_next()
134 static int get_event_name(char *dest, struct tcpa_event *event, get_event_name() argument
143 switch(event->event_type) { get_event_name()
159 name = tcpa_event_type_strings[event->event_type]; get_event_name()
164 if (MAX_TEXT_EVENT > event->event_size) { get_event_name()
166 n_len = event->event_size; get_event_name()
211 struct tcpa_event *event = v; tpm_binary_bios_measurements_show() local
215 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++) tpm_binary_bios_measurements_show()
239 struct tcpa_event *event = v; tpm_ascii_bios_measurements_show() local
245 printk(KERN_ERR "%s: ERROR - No Memory for event name\n ", tpm_ascii_bios_measurements_show()
250 seq_printf(m, "%2d ", event->pcr_index); tpm_ascii_bios_measurements_show()
253 seq_printf(m, "%20phN", event->pcr_value); tpm_ascii_bios_measurements_show()
255 /* 3rd: event type identifier */ tpm_ascii_bios_measurements_show()
256 seq_printf(m, " %02x", event->event_type); tpm_ascii_bios_measurements_show()
258 len += get_event_name(eventname, event, event_entry); tpm_ascii_bios_measurements_show()
/linux-4.1.27/net/irda/ircomm/
H A Dircomm_event.c41 static int ircomm_state_idle(struct ircomm_cb *self, IRCOMM_EVENT event,
43 static int ircomm_state_waiti(struct ircomm_cb *self, IRCOMM_EVENT event,
45 static int ircomm_state_waitr(struct ircomm_cb *self, IRCOMM_EVENT event,
47 static int ircomm_state_conn(struct ircomm_cb *self, IRCOMM_EVENT event,
76 static int (*state[])(struct ircomm_cb *self, IRCOMM_EVENT event,
86 * Function ircomm_state_idle (self, event, skb)
91 static int ircomm_state_idle(struct ircomm_cb *self, IRCOMM_EVENT event, ircomm_state_idle() argument
96 switch (event) { ircomm_state_idle()
107 pr_debug("%s(), unknown event: %s\n", __func__ , ircomm_state_idle()
108 ircomm_event[event]); ircomm_state_idle()
115 * Function ircomm_state_waiti (self, event, skb)
120 static int ircomm_state_waiti(struct ircomm_cb *self, IRCOMM_EVENT event, ircomm_state_waiti() argument
125 switch (event) { ircomm_state_waiti()
137 pr_debug("%s(), unknown event: %s\n", __func__ , ircomm_state_waiti()
138 ircomm_event[event]); ircomm_state_waiti()
145 * Function ircomm_state_waitr (self, event, skb)
150 static int ircomm_state_waitr(struct ircomm_cb *self, IRCOMM_EVENT event, ircomm_state_waitr() argument
155 switch (event) { ircomm_state_waitr()
170 pr_debug("%s(), unknown event = %s\n", __func__ , ircomm_state_waitr()
171 ircomm_event[event]); ircomm_state_waitr()
178 * Function ircomm_state_conn (self, event, skb)
183 static int ircomm_state_conn(struct ircomm_cb *self, IRCOMM_EVENT event, ircomm_state_conn() argument
188 switch (event) { ircomm_state_conn()
212 pr_debug("%s(), unknown event = %s\n", __func__ , ircomm_state_conn()
213 ircomm_event[event]); ircomm_state_conn()
220 * Function ircomm_do_event (self, event, skb)
222 * Process event
225 int ircomm_do_event(struct ircomm_cb *self, IRCOMM_EVENT event, ircomm_do_event() argument
228 pr_debug("%s: state=%s, event=%s\n", __func__ , ircomm_do_event()
229 ircomm_state[self->state], ircomm_event[event]); ircomm_do_event()
231 return (*state[self->state])(self, event, skb, info); ircomm_do_event()
H A Dircomm_tty_attach.c58 IRCOMM_TTY_EVENT event,
62 IRCOMM_TTY_EVENT event,
66 IRCOMM_TTY_EVENT event,
70 IRCOMM_TTY_EVENT event,
74 IRCOMM_TTY_EVENT event,
78 IRCOMM_TTY_EVENT event,
109 static int (*state[])(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event,
612 * Function ircomm_tty_do_event (self, event, skb)
614 * Process event
617 int ircomm_tty_do_event(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event, ircomm_tty_do_event() argument
623 pr_debug("%s: state=%s, event=%s\n", __func__ , ircomm_tty_do_event()
624 ircomm_tty_state[self->state], ircomm_tty_event[event]); ircomm_tty_do_event()
626 return (*state[self->state])(self, event, skb, info); ircomm_tty_do_event()
648 * Function ircomm_tty_state_idle (self, event, skb, info)
654 IRCOMM_TTY_EVENT event, ircomm_tty_state_idle()
660 pr_debug("%s: state=%s, event=%s\n", __func__ , ircomm_tty_state_idle()
661 ircomm_tty_state[self->state], ircomm_tty_event[event]); ircomm_tty_state_idle()
662 switch (event) { ircomm_tty_state_idle()
704 pr_debug("%s(), unknown event: %s\n", __func__ , ircomm_tty_state_idle()
705 ircomm_tty_event[event]); ircomm_tty_state_idle()
712 * Function ircomm_tty_state_search (self, event, skb, info)
718 IRCOMM_TTY_EVENT event, ircomm_tty_state_search()
724 pr_debug("%s: state=%s, event=%s\n", __func__ , ircomm_tty_state_search()
725 ircomm_tty_state[self->state], ircomm_tty_event[event]); ircomm_tty_state_search()
727 switch (event) { ircomm_tty_state_search()
777 pr_debug("%s(), unknown event: %s\n", __func__ , ircomm_tty_state_search()
778 ircomm_tty_event[event]); ircomm_tty_state_search()
785 * Function ircomm_tty_state_query (self, event, skb, info)
791 IRCOMM_TTY_EVENT event, ircomm_tty_state_query_parameters()
797 pr_debug("%s: state=%s, event=%s\n", __func__ , ircomm_tty_state_query_parameters()
798 ircomm_tty_state[self->state], ircomm_tty_event[event]); ircomm_tty_state_query_parameters()
800 switch (event) { ircomm_tty_state_query_parameters()
835 pr_debug("%s(), unknown event: %s\n", __func__ , ircomm_tty_state_query_parameters()
836 ircomm_tty_event[event]); ircomm_tty_state_query_parameters()
843 * Function ircomm_tty_state_query_lsap_sel (self, event, skb, info)
849 IRCOMM_TTY_EVENT event, ircomm_tty_state_query_lsap_sel()
855 pr_debug("%s: state=%s, event=%s\n", __func__ , ircomm_tty_state_query_lsap_sel()
856 ircomm_tty_state[self->state], ircomm_tty_event[event]); ircomm_tty_state_query_lsap_sel()
858 switch (event) { ircomm_tty_state_query_lsap_sel()
884 pr_debug("%s(), unknown event: %s\n", __func__ , ircomm_tty_state_query_lsap_sel()
885 ircomm_tty_event[event]); ircomm_tty_state_query_lsap_sel()
892 * Function ircomm_tty_state_setup (self, event, skb, info)
898 IRCOMM_TTY_EVENT event, ircomm_tty_state_setup()
904 pr_debug("%s: state=%s, event=%s\n", __func__ , ircomm_tty_state_setup()
905 ircomm_tty_state[self->state], ircomm_tty_event[event]); ircomm_tty_state_setup()
907 switch (event) { ircomm_tty_state_setup()
938 pr_debug("%s(), unknown event: %s\n", __func__ , ircomm_tty_state_setup()
939 ircomm_tty_event[event]); ircomm_tty_state_setup()
946 * Function ircomm_tty_state_ready (self, event, skb, info)
952 IRCOMM_TTY_EVENT event, ircomm_tty_state_ready()
958 switch (event) { ircomm_tty_state_ready()
981 pr_debug("%s(), unknown event: %s\n", __func__ , ircomm_tty_state_ready()
982 ircomm_tty_event[event]); ircomm_tty_state_ready()
653 ircomm_tty_state_idle(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event, struct sk_buff *skb, struct ircomm_tty_info *info) ircomm_tty_state_idle() argument
717 ircomm_tty_state_search(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event, struct sk_buff *skb, struct ircomm_tty_info *info) ircomm_tty_state_search() argument
790 ircomm_tty_state_query_parameters(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event, struct sk_buff *skb, struct ircomm_tty_info *info) ircomm_tty_state_query_parameters() argument
848 ircomm_tty_state_query_lsap_sel(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event, struct sk_buff *skb, struct ircomm_tty_info *info) ircomm_tty_state_query_lsap_sel() argument
897 ircomm_tty_state_setup(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event, struct sk_buff *skb, struct ircomm_tty_info *info) ircomm_tty_state_setup() argument
951 ircomm_tty_state_ready(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event, struct sk_buff *skb, struct ircomm_tty_info *info) ircomm_tty_state_ready() argument
/linux-4.1.27/tools/perf/util/scripting-engines/
H A Dtrace-event-perl.c2 * trace-event-perl. Feed perf script events to an embedded Perl interpreter.
35 #include "../event.h"
36 #include "../trace-event.h"
186 static void define_event_symbols(struct event_format *event, define_event_symbols() argument
203 define_event_symbols(event, ev_name, args->flags.field); define_event_symbols()
208 define_event_symbols(event, ev_name, args->symbol.field); define_event_symbols()
214 define_event_symbols(event, ev_name, args->hex.field); define_event_symbols()
215 define_event_symbols(event, ev_name, args->hex.size); define_event_symbols()
218 define_event_symbols(event, ev_name, args->int_array.field); define_event_symbols()
219 define_event_symbols(event, ev_name, args->int_array.count); define_event_symbols()
220 define_event_symbols(event, ev_name, args->int_array.el_size); define_event_symbols()
228 define_event_symbols(event, ev_name, args->typecast.item); define_event_symbols()
233 define_event_symbols(event, ev_name, args->op.left); define_event_symbols()
234 define_event_symbols(event, ev_name, args->op.right); define_event_symbols()
244 define_event_symbols(event, ev_name, args->next); define_event_symbols()
251 struct event_format *event = evsel->tp_format; perl_process_tracepoint() local
267 if (!event) perl_process_tracepoint()
268 die("ug! no event found for type %" PRIu64, (u64)evsel->attr.config); perl_process_tracepoint()
270 pid = raw_field_value(event, "common_pid", data); perl_process_tracepoint()
272 sprintf(handler, "%s::%s", event->system, event->name); perl_process_tracepoint()
274 if (!test_and_set_bit(event->id, events_defined)) perl_process_tracepoint()
275 define_event_symbols(event, handler, event->print_fmt.args); perl_process_tracepoint()
297 for (field = event->format.fields; field; field = field->next) { perl_process_tracepoint()
307 val = read_size(event, data + field->offset, perl_process_tracepoint()
336 static void perl_process_event_generic(union perf_event *event, perl_process_event_generic() argument
348 XPUSHs(sv_2mortal(newSVpvn((const char *)event, event->header.size))); perl_process_event_generic()
360 static void perl_process_event(union perf_event *event, perl_process_event() argument
366 perl_process_event_generic(event, sample, evsel); perl_process_event()
446 struct event_format *event = NULL; perl_generate_script() local
459 fprintf(ofp, "# perf script event handlers, " perl_generate_script()
465 fprintf(ofp, "# The common_* event handler fields are the most useful " perl_generate_script()
491 while ((event = trace_find_next_event(pevent, event))) { perl_generate_script()
492 fprintf(ofp, "sub %s::%s\n{\n", event->system, event->name); perl_generate_script()
506 for (f = event->format.fields; f; f = f->next) { perl_generate_script()
525 for (f = event->format.fields; f; f = f->next) { perl_generate_script()
549 for (f = event->format.fields; f; f = f->next) { perl_generate_script()
562 fprintf(ofp, "%s::%s\", ", event->system, perl_generate_script()
563 event->name); perl_generate_script()
572 fprintf(ofp, "%s::%s\", ", event->system, perl_generate_script()
573 event->name); perl_generate_script()
600 "# $event:\tunion perf_event\tutil/event.h\n" perl_generate_script()
602 "# $sample:\tstruct perf_sample\tutil/event.h\n" perl_generate_script()
603 "# $raw_data:\tperf_sample->raw_data\tutil/event.h\n" perl_generate_script()
607 "\tmy ($event, $attr, $sample, $raw_data) = @_;\n" perl_generate_script()
609 "\tmy @event\t= unpack(\"LSS\", $event);\n" perl_generate_script()
615 "\tprint Dumper \\@event, \\@attr, \\@sample, \\@raw_data;\n" perl_generate_script()
/linux-4.1.27/drivers/misc/vmw_vmci/
H A Dvmci_event.c31 u32 event; member in struct:vmci_subscription
98 subscriber_list = &subscriber_array[event_msg->event_data.event]; list_for_each_entry_rcu()
108 * subscribers for given event.
118 if (!VMCI_EVENT_VALID(event_msg->event_data.event)) vmci_event_dispatch()
126 * vmci_event_subscribe() - Subscribe to a given event.
127 * @event: The event to subscribe to.
128 * @callback: The callback to invoke upon the event.
133 * Subscribes to the provided event. The callback specified will be
136 int vmci_event_subscribe(u32 event, vmci_event_subscribe() argument
151 if (!VMCI_EVENT_VALID(event) || !callback) { vmci_event_subscribe()
152 pr_devel("%s: Failed to subscribe to event (type=%d) (callback=%p) (data=%p)\n", vmci_event_subscribe()
153 __func__, event, callback, callback_data); vmci_event_subscribe()
162 sub->event = event; vmci_event_subscribe()
169 /* Creation of a new event is always allowed. */ vmci_event_subscribe()
186 list_add_rcu(&sub->node, &subscriber_array[event]); vmci_event_subscribe()
200 * vmci_event_unsubscribe() - unsubscribe from an event.
203 * Unsubscribe from given event. Removes it from list and frees it.
/linux-4.1.27/drivers/net/fddi/skfp/
H A Dqueue.c32 * init event queue management
40 * add event to queue
42 void queue_event(struct s_smc *smc, int class, int event) queue_event() argument
44 PRINTF("queue class %d event %d\n",class,event) ; queue_event()
46 smc->q.ev_put->event = event ; queue_event()
60 PRINTF("timer event class %d token %d\n", timer_event()
67 * event dispatcher
68 * while event queue is not empty
69 * get event from queue
81 PRINTF("dispatch class %d event %d\n",ev->class,ev->event) ; ev_dispatcher()
84 ecm(smc,(int)ev->event) ; ev_dispatcher()
87 cfm(smc,(int)ev->event) ; ev_dispatcher()
90 rmt(smc,(int)ev->event) ; ev_dispatcher()
93 smt_event(smc,(int)ev->event) ; ev_dispatcher()
97 timer_test_event(smc,(int)ev->event) ; ev_dispatcher()
105 pcm(smc,class - EVENT_PCMA,(int)ev->event) ; ev_dispatcher()
/linux-4.1.27/arch/powerpc/oprofile/
H A Dop_model_7450.c39 #define mmcr0_event1(event) \
40 ((event << MMCR0_PMC1_SHIFT) & MMCR0_PMC1SEL)
41 #define mmcr0_event2(event) \
42 ((event << MMCR0_PMC2_SHIFT) & MMCR0_PMC2SEL)
44 #define mmcr1_event3(event) \
45 ((event << MMCR1_PMC3_SHIFT) & MMCR1_PMC3SEL)
46 #define mmcr1_event4(event) \
47 ((event << MMCR1_PMC4_SHIFT) & MMCR1_PMC4SEL)
48 #define mmcr1_event5(event) \
49 ((event << MMCR1_PMC5_SHIFT) & MMCR1_PMC5SEL)
50 #define mmcr1_event6(event) \
51 ((event << MMCR1_PMC6_SHIFT) & MMCR1_PMC6SEL)
112 mmcr0_val = MMCR0_INIT | mmcr0_event1(ctr[0].event) fsl7450_reg_setup()
113 | mmcr0_event2(ctr[1].event); fsl7450_reg_setup()
123 mmcr1_val = mmcr1_event3(ctr[2].event) fsl7450_reg_setup()
124 | mmcr1_event4(ctr[3].event); fsl7450_reg_setup()
126 mmcr1_val |= mmcr1_event5(ctr[4].event) fsl7450_reg_setup()
127 | mmcr1_event6(ctr[5].event); fsl7450_reg_setup()
171 * event that triggered the interrupt */ fsl7450_handle_interrupt()
/linux-4.1.27/kernel/trace/
H A Dtrace_output.c228 struct ftrace_event_call *event; ftrace_raw_output_prep() local
233 event = container_of(trace_event, struct ftrace_event_call, event); ftrace_raw_output_prep()
236 if (entry->type != event->event.type) { ftrace_raw_output_prep()
242 trace_seq_printf(s, "%s: ", ftrace_event_name(event)); ftrace_raw_output_prep()
634 * ftrace_find_event - find a registered event
635 * @type: the type of event to look for
637 * Returns an event of type @type otherwise NULL
642 struct trace_event *event; ftrace_find_event() local
647 hlist_for_each_entry(event, &event_hash[key], node) { ftrace_find_event()
648 if (event->type == type) ftrace_find_event()
649 return event; ftrace_find_event()
696 * register_ftrace_event - register output for an event type
697 * @event: the event type to register
700 * find a way to print an event. If the @event->type is set
708 * Returns the event type number or zero on error.
710 int register_ftrace_event(struct trace_event *event) register_ftrace_event() argument
717 if (WARN_ON(!event)) register_ftrace_event()
720 if (WARN_ON(!event->funcs)) register_ftrace_event()
723 INIT_LIST_HEAD(&event->list); register_ftrace_event()
725 if (!event->type) { register_ftrace_event()
730 event->type = trace_search_list(&list); register_ftrace_event()
731 if (!event->type) register_ftrace_event()
736 event->type = next_event_type++; register_ftrace_event()
740 if (WARN_ON(ftrace_find_event(event->type))) register_ftrace_event()
743 list_add_tail(&event->list, list); register_ftrace_event()
745 } else if (event->type > __TRACE_LAST_TYPE) { register_ftrace_event()
750 /* Is this event already used */ register_ftrace_event()
751 if (ftrace_find_event(event->type)) register_ftrace_event()
755 if (event->funcs->trace == NULL) register_ftrace_event()
756 event->funcs->trace = trace_nop_print; register_ftrace_event()
757 if (event->funcs->raw == NULL) register_ftrace_event()
758 event->funcs->raw = trace_nop_print; register_ftrace_event()
759 if (event->funcs->hex == NULL) register_ftrace_event()
760 event->funcs->hex = trace_nop_print; register_ftrace_event()
761 if (event->funcs->binary == NULL) register_ftrace_event()
762 event->funcs->binary = trace_nop_print; register_ftrace_event()
764 key = event->type & (EVENT_HASHSIZE - 1); register_ftrace_event()
766 hlist_add_head(&event->node, &event_hash[key]); register_ftrace_event()
768 ret = event->type; register_ftrace_event()
779 int __unregister_ftrace_event(struct trace_event *event) __unregister_ftrace_event() argument
781 hlist_del(&event->node); __unregister_ftrace_event()
782 list_del(&event->list); __unregister_ftrace_event()
787 * unregister_ftrace_event - remove a no longer used event
788 * @event: the event to remove
790 int unregister_ftrace_event(struct trace_event *event) unregister_ftrace_event() argument
793 __unregister_ftrace_event(event); unregister_ftrace_event()
805 struct trace_event *event) trace_nop_print()
814 struct trace_event *event) trace_fn_trace()
834 struct trace_event *event) trace_fn_raw()
848 struct trace_event *event) trace_fn_hex()
862 struct trace_event *event) trace_fn_bin()
915 struct trace_event *event) trace_ctx_print()
921 int flags, struct trace_event *event) trace_wake_print()
949 struct trace_event *event) trace_ctx_raw()
955 struct trace_event *event) trace_wake_raw()
985 struct trace_event *event) trace_ctx_hex()
991 struct trace_event *event) trace_wake_hex()
997 int flags, struct trace_event *event) trace_ctxwake_bin()
1042 int flags, struct trace_event *event) trace_stack_print()
1078 int flags, struct trace_event *event) trace_user_stack_print()
1103 struct trace_event *event) trace_bputs_print()
1121 struct trace_event *event) trace_bputs_raw()
1147 struct trace_event *event) trace_bprint_print()
1165 struct trace_event *event) trace_bprint_raw()
1190 int flags, struct trace_event *event) trace_print_print()
1204 struct trace_event *event) trace_print_raw()
1240 struct trace_event *event; init_events() local
1244 event = events[i]; init_events()
1246 ret = register_ftrace_event(event); init_events()
1248 printk(KERN_WARNING "event %d failed to register\n", init_events()
1249 event->type); init_events()
804 trace_nop_print(struct trace_iterator *iter, int flags, struct trace_event *event) trace_nop_print() argument
813 trace_fn_trace(struct trace_iterator *iter, int flags, struct trace_event *event) trace_fn_trace() argument
833 trace_fn_raw(struct trace_iterator *iter, int flags, struct trace_event *event) trace_fn_raw() argument
847 trace_fn_hex(struct trace_iterator *iter, int flags, struct trace_event *event) trace_fn_hex() argument
861 trace_fn_bin(struct trace_iterator *iter, int flags, struct trace_event *event) trace_fn_bin() argument
914 trace_ctx_print(struct trace_iterator *iter, int flags, struct trace_event *event) trace_ctx_print() argument
920 trace_wake_print(struct trace_iterator *iter, int flags, struct trace_event *event) trace_wake_print() argument
948 trace_ctx_raw(struct trace_iterator *iter, int flags, struct trace_event *event) trace_ctx_raw() argument
954 trace_wake_raw(struct trace_iterator *iter, int flags, struct trace_event *event) trace_wake_raw() argument
984 trace_ctx_hex(struct trace_iterator *iter, int flags, struct trace_event *event) trace_ctx_hex() argument
990 trace_wake_hex(struct trace_iterator *iter, int flags, struct trace_event *event) trace_wake_hex() argument
996 trace_ctxwake_bin(struct trace_iterator *iter, int flags, struct trace_event *event) trace_ctxwake_bin() argument
1041 trace_stack_print(struct trace_iterator *iter, int flags, struct trace_event *event) trace_stack_print() argument
1077 trace_user_stack_print(struct trace_iterator *iter, int flags, struct trace_event *event) trace_user_stack_print() argument
1102 trace_bputs_print(struct trace_iterator *iter, int flags, struct trace_event *event) trace_bputs_print() argument
1120 trace_bputs_raw(struct trace_iterator *iter, int flags, struct trace_event *event) trace_bputs_raw() argument
1146 trace_bprint_print(struct trace_iterator *iter, int flags, struct trace_event *event) trace_bprint_print() argument
1164 trace_bprint_raw(struct trace_iterator *iter, int flags, struct trace_event *event) trace_bprint_raw() argument
1189 trace_print_print(struct trace_iterator *iter, int flags, struct trace_event *event) trace_print_print() argument
1203 trace_print_raw(struct trace_iterator *iter, int flags, struct trace_event *event) trace_print_raw() argument
H A Dring_buffer_benchmark.c80 struct ring_buffer_event *event; read_event() local
84 event = ring_buffer_consume(buffer, cpu, &ts, NULL); read_event()
85 if (!event) read_event()
88 entry = ring_buffer_event_data(event); read_event()
100 struct ring_buffer_event *event; read_page() local
116 /* The commit may have missed event flags set, clear them */ read_page()
126 event = (void *)&rpage->data[i]; read_page()
127 switch (event->type_len) { read_page()
130 if (!event->time_delta) read_page()
132 inc = event->array[0] + 4; read_page()
138 entry = ring_buffer_event_data(event); read_page()
144 if (!event->array[0]) { read_page()
148 inc = event->array[0] + 4; read_page()
151 entry = ring_buffer_event_data(event); read_page()
157 inc = ((event->type_len + 1) * 4); read_page()
232 struct ring_buffer_event *event; ring_buffer_producer() local
237 event = ring_buffer_lock_reserve(buffer, 10); ring_buffer_producer()
238 if (!event) { ring_buffer_producer()
242 entry = ring_buffer_event_data(event); ring_buffer_producer()
244 ring_buffer_unlock_commit(buffer, event); ring_buffer_producer()
H A Dtrace_syscalls.c16 static int syscall_enter_register(struct ftrace_event_call *event,
18 static int syscall_exit_register(struct ftrace_event_call *event,
111 struct trace_event *event) print_syscall_enter()
126 if (entry->enter_event->event.type != ent->type) { print_syscall_enter()
157 struct trace_event *event) print_syscall_exit()
174 if (entry->exit_event->event.type != ent->type) { print_syscall_exit()
299 struct ring_buffer_event *event; ftrace_syscall_enter() local
328 event = trace_buffer_lock_reserve(buffer, ftrace_syscall_enter()
329 sys_data->enter_event->event.type, size, irq_flags, pc); ftrace_syscall_enter()
330 if (!event) ftrace_syscall_enter()
333 entry = ring_buffer_event_data(event); ftrace_syscall_enter()
337 event_trigger_unlock_commit(ftrace_file, buffer, event, entry, ftrace_syscall_enter()
347 struct ring_buffer_event *event; ftrace_syscall_exit() local
373 event = trace_buffer_lock_reserve(buffer, ftrace_syscall_exit()
374 sys_data->exit_event->event.type, sizeof(*entry), ftrace_syscall_exit()
376 if (!event) ftrace_syscall_exit()
379 entry = ring_buffer_event_data(event); ftrace_syscall_exit()
383 event_trigger_unlock_commit(ftrace_file, buffer, event, entry, ftrace_syscall_exit()
470 pr_debug("syscall %s metadata not mapped, disabling ftrace event\n", init_syscall_trace()
577 sys_data->enter_event->event.type, NULL, &rctx); perf_syscall_enter()
598 pr_info("event trace: Could not activate" perf_sysenter_enable()
650 sys_data->exit_event->event.type, NULL, &rctx); perf_syscall_exit()
670 pr_info("event trace: Could not activate" perf_sysexit_enable()
696 static int syscall_enter_register(struct ftrace_event_call *event, syscall_enter_register() argument
703 return reg_event_syscall_enter(file, event); syscall_enter_register()
705 unreg_event_syscall_enter(file, event); syscall_enter_register()
710 return perf_sysenter_enable(event); syscall_enter_register()
712 perf_sysenter_disable(event); syscall_enter_register()
724 static int syscall_exit_register(struct ftrace_event_call *event, syscall_exit_register() argument
731 return reg_event_syscall_exit(file, event); syscall_exit_register()
733 unreg_event_syscall_exit(file, event); syscall_exit_register()
738 return perf_sysexit_enable(event); syscall_exit_register()
740 perf_sysexit_disable(event); syscall_exit_register()
110 print_syscall_enter(struct trace_iterator *iter, int flags, struct trace_event *event) print_syscall_enter() argument
156 print_syscall_exit(struct trace_iterator *iter, int flags, struct trace_event *event) print_syscall_exit() argument
H A Dring_buffer.c198 #define skip_time_extend(event) \
199 ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
201 static inline int rb_null_event(struct ring_buffer_event *event) rb_null_event() argument
203 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta; rb_null_event()
206 static void rb_event_set_padding(struct ring_buffer_event *event) rb_event_set_padding() argument
209 event->type_len = RINGBUF_TYPE_PADDING; rb_event_set_padding()
210 event->time_delta = 0; rb_event_set_padding()
214 rb_event_data_length(struct ring_buffer_event *event) rb_event_data_length() argument
218 if (event->type_len) rb_event_data_length()
219 length = event->type_len * RB_ALIGNMENT; rb_event_data_length()
221 length = event->array[0]; rb_event_data_length()
226 * Return the length of the given event. Will return
227 * the length of the time extend if the event is a
231 rb_event_length(struct ring_buffer_event *event) rb_event_length() argument
233 switch (event->type_len) { rb_event_length()
235 if (rb_null_event(event)) rb_event_length()
238 return event->array[0] + RB_EVNT_HDR_SIZE; rb_event_length()
247 return rb_event_data_length(event); rb_event_length()
257 * or just the event length for all other events.
260 rb_event_ts_length(struct ring_buffer_event *event) rb_event_ts_length() argument
264 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) { rb_event_ts_length()
265 /* time extends include the data event after it */ rb_event_ts_length()
267 event = skip_time_extend(event); rb_event_ts_length()
269 return len + rb_event_length(event); rb_event_ts_length()
273 * ring_buffer_event_length - return the length of the event
274 * @event: the event to get the length of
276 * Returns the size of the data load of a data event.
277 * If the event is something other than a data event, it
278 * returns the size of the event itself. With the exception
280 * data load of the data event after it.
282 unsigned ring_buffer_event_length(struct ring_buffer_event *event) ring_buffer_event_length() argument
286 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) ring_buffer_event_length()
287 event = skip_time_extend(event); ring_buffer_event_length()
289 length = rb_event_length(event); ring_buffer_event_length()
290 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX) ring_buffer_event_length()
293 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) ring_buffer_event_length()
294 length -= sizeof(event->array[0]); ring_buffer_event_length()
301 rb_event_data(struct ring_buffer_event *event) rb_event_data() argument
303 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) rb_event_data()
304 event = skip_time_extend(event); rb_event_data()
305 BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX); rb_event_data()
307 if (event->type_len) rb_event_data()
308 return (void *)&event->array[0]; rb_event_data()
310 return (void *)&event->array[1]; rb_event_data()
314 * ring_buffer_event_data - return the data of the event
315 * @event: the event to get the data from
317 void *ring_buffer_event_data(struct ring_buffer_event *event) ring_buffer_event_data() argument
319 return rb_event_data(event); ring_buffer_event_data()
588 * empty. The flag only causes the next event to run ring_buffer_wait()
591 * an event will cause an irq_work to try to wake up ring_buffer_wait()
675 * is set, the next event will wake the task up, but we can get stuck ring_buffer_poll_wait()
676 * if there's only a single event in. ring_buffer_poll_wait()
682 * extremely small, and it's not a problem if another event comes in, we ring_buffer_poll_wait()
1907 rb_event_index(struct ring_buffer_event *event) rb_event_index() argument
1909 unsigned long addr = (unsigned long)event; rb_event_index()
1916 struct ring_buffer_event *event) rb_event_is_commit()
1918 unsigned long addr = (unsigned long)event; rb_event_is_commit()
1921 index = rb_event_index(event); rb_event_is_commit()
1935 * If we own the commit event, then we can commit rb_set_commit_to_write()
2008 rb_add_time_stamp(struct ring_buffer_event *event, u64 delta) rb_add_time_stamp() argument
2010 event->type_len = RINGBUF_TYPE_TIME_EXTEND; rb_add_time_stamp()
2012 /* Not the first event on the page? */ rb_add_time_stamp()
2013 if (rb_event_index(event)) { rb_add_time_stamp()
2014 event->time_delta = delta & TS_MASK; rb_add_time_stamp()
2015 event->array[0] = delta >> TS_SHIFT; rb_add_time_stamp()
2018 event->time_delta = 0; rb_add_time_stamp()
2019 event->array[0] = 0; rb_add_time_stamp()
2022 return skip_time_extend(event); rb_add_time_stamp()
2026 * rb_update_event - update event type and data
2027 * @event: the event to update
2028 * @type: the type of event
2029 * @length: the size of the event field in the ring buffer
2031 * Update the type and data fields of the event. The length
2038 struct ring_buffer_event *event, unsigned length, rb_update_event()
2042 if (unlikely(!rb_event_is_commit(cpu_buffer, event))) rb_update_event()
2050 event = rb_add_time_stamp(event, delta); rb_update_event()
2055 event->time_delta = delta; rb_update_event()
2058 event->type_len = 0; rb_update_event()
2059 event->array[0] = length; rb_update_event()
2061 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); rb_update_event()
2224 struct ring_buffer_event event; /* Used only for sizeof array */ rb_calculate_event_length() local
2231 length += sizeof(event.array[0]); rb_calculate_event_length()
2244 struct ring_buffer_event *event; rb_reset_tail() local
2247 * Only the event that crossed the page boundary rb_reset_tail()
2263 event = __rb_page_index(tail_page, tail); rb_reset_tail()
2264 kmemcheck_annotate_bitfield(event, bitfield); rb_reset_tail()
2271 * This will be used by the reader to add lost event rb_reset_tail()
2277 * If this event is bigger than the minimum size, then rb_reset_tail()
2291 rb_event_set_padding(event); rb_reset_tail()
2298 /* Put in a discarded event */ rb_reset_tail()
2299 event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE; rb_reset_tail()
2300 event->type_len = RINGBUF_TYPE_PADDING; rb_reset_tail()
2302 event->time_delta = 1; rb_reset_tail()
2424 struct ring_buffer_event *event; __rb_reserve_next() local
2428 * If the time delta since the last event is too big to __rb_reserve_next()
2429 * hold in the time field of the event, then we append a __rb_reserve_next()
2430 * TIME EXTEND event ahead of the data event. __rb_reserve_next()
2456 event = __rb_page_index(tail_page, tail); __rb_reserve_next()
2457 kmemcheck_annotate_bitfield(event, bitfield); __rb_reserve_next()
2458 rb_update_event(cpu_buffer, event, length, add_timestamp, delta); __rb_reserve_next()
2472 return event; __rb_reserve_next()
2477 struct ring_buffer_event *event) rb_try_to_discard()
2484 new_index = rb_event_index(event); rb_try_to_discard()
2485 old_index = new_index + rb_event_ts_length(event); rb_try_to_discard()
2486 addr = (unsigned long)event; rb_try_to_discard()
2494 unsigned long event_length = rb_event_length(event); rb_try_to_discard()
2558 struct ring_buffer_event *event; rb_reserve_next_event() local
2625 event = __rb_reserve_next(cpu_buffer, length, ts, rb_reserve_next_event()
2627 if (unlikely(PTR_ERR(event) == -EAGAIN)) rb_reserve_next_event()
2630 if (!event) rb_reserve_next_event()
2633 return event; rb_reserve_next_event()
2721 * @length: the length of the data to reserve (excluding event header)
2723 * Returns a reseverd event on the ring buffer to copy directly to.
2727 * The length is the length of the data needed, not the event length
2728 * which also includes the event header.
2737 struct ring_buffer_event *event; ring_buffer_lock_reserve() local
2765 event = rb_reserve_next_event(buffer, cpu_buffer, length); ring_buffer_lock_reserve()
2766 if (!event) ring_buffer_lock_reserve()
2769 return event; ring_buffer_lock_reserve()
2781 struct ring_buffer_event *event) rb_update_write_stamp()
2786 * The event first in the commit queue updates the rb_update_write_stamp()
2789 if (rb_event_is_commit(cpu_buffer, event)) { rb_update_write_stamp()
2791 * A commit event that is first on a page rb_update_write_stamp()
2794 if (!rb_event_index(event)) rb_update_write_stamp()
2797 else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) { rb_update_write_stamp()
2798 delta = event->array[0]; rb_update_write_stamp()
2800 delta += event->time_delta; rb_update_write_stamp()
2803 cpu_buffer->write_stamp += event->time_delta; rb_update_write_stamp()
2808 struct ring_buffer_event *event) rb_commit()
2811 rb_update_write_stamp(cpu_buffer, event); rb_commit()
2845 * @event: The event pointer to commit.
2852 struct ring_buffer_event *event) ring_buffer_unlock_commit()
2859 rb_commit(cpu_buffer, event); ring_buffer_unlock_commit()
2871 static inline void rb_event_discard(struct ring_buffer_event *event) rb_event_discard() argument
2873 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) rb_event_discard()
2874 event = skip_time_extend(event); rb_event_discard()
2876 /* array[0] holds the actual length for the discarded event */ rb_event_discard()
2877 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE; rb_event_discard()
2878 event->type_len = RINGBUF_TYPE_PADDING; rb_event_discard()
2880 if (!event->time_delta) rb_event_discard()
2881 event->time_delta = 1; rb_event_discard()
2885 * Decrement the entries to the page that an event is on.
2886 * The event does not even need to exist, only the pointer
2892 struct ring_buffer_event *event) rb_decrement_entry()
2894 unsigned long addr = (unsigned long)event; rb_decrement_entry()
2925 * ring_buffer_commit_discard - discard an event that has not been committed
2927 * @event: non committed event to discard
2929 * Sometimes an event that is in the ring buffer needs to be ignored.
2930 * This function lets the user discard an event in the ring buffer
2931 * and then that event will not be read later.
2934 * committed. It will try to free the event from the ring buffer
2935 * if another event has not been added behind it.
2937 * If another event has been added behind it, it will set the event
2941 * the event.
2944 struct ring_buffer_event *event) ring_buffer_discard_commit()
2949 /* The event is discarded regardless */ ring_buffer_discard_commit()
2950 rb_event_discard(event); ring_buffer_discard_commit()
2956 * This must only be called if the event has not been ring_buffer_discard_commit()
2962 rb_decrement_entry(cpu_buffer, event); ring_buffer_discard_commit()
2963 if (rb_try_to_discard(cpu_buffer, event)) ring_buffer_discard_commit()
2970 rb_update_write_stamp(cpu_buffer, event); ring_buffer_discard_commit()
2984 * @length: The length of the data being written (excluding the event header)
2992 * and not the length of the event which would hold the header.
2999 struct ring_buffer_event *event; ring_buffer_write() local
3025 event = rb_reserve_next_event(buffer, cpu_buffer, length); ring_buffer_write()
3026 if (!event) ring_buffer_write()
3029 body = rb_event_data(event); ring_buffer_write()
3033 rb_commit(cpu_buffer, event); ring_buffer_write()
3202 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
3459 struct ring_buffer_event *event) rb_update_read_stamp()
3463 switch (event->type_len) { rb_update_read_stamp()
3468 delta = event->array[0]; rb_update_read_stamp()
3470 delta += event->time_delta; rb_update_read_stamp()
3479 cpu_buffer->read_stamp += event->time_delta; rb_update_read_stamp()
3490 struct ring_buffer_event *event) rb_update_iter_read_stamp()
3494 switch (event->type_len) { rb_update_iter_read_stamp()
3499 delta = event->array[0]; rb_update_iter_read_stamp()
3501 delta += event->time_delta; rb_update_iter_read_stamp()
3510 iter->read_stamp += event->time_delta; rb_update_iter_read_stamp()
3650 struct ring_buffer_event *event; rb_advance_reader() local
3660 event = rb_reader_event(cpu_buffer); rb_advance_reader()
3662 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX) rb_advance_reader()
3665 rb_update_read_stamp(cpu_buffer, event); rb_advance_reader()
3667 length = rb_event_length(event); rb_advance_reader()
3674 struct ring_buffer_event *event; rb_advance_iter() local
3690 event = rb_iter_head_event(iter); rb_advance_iter()
3692 length = rb_event_length(event); rb_advance_iter()
3703 rb_update_iter_read_stamp(iter, event); rb_advance_iter()
3722 struct ring_buffer_event *event; rb_buffer_peek() local
3729 * Since the time extend is always attached to a data event, rb_buffer_peek()
3740 event = rb_reader_event(cpu_buffer); rb_buffer_peek()
3742 switch (event->type_len) { rb_buffer_peek()
3744 if (rb_null_event(event)) rb_buffer_peek()
3748 * event it creates (which would probably be bad) rb_buffer_peek()
3754 return event; rb_buffer_peek()
3768 *ts = cpu_buffer->read_stamp + event->time_delta; rb_buffer_peek()
3774 return event; rb_buffer_peek()
3789 struct ring_buffer_event *event; rb_iter_peek() local
3811 * to a data event, we should never loop more than three times. rb_iter_peek()
3813 * finally once to get the event. rb_iter_peek()
3827 event = rb_iter_head_event(iter); rb_iter_peek()
3829 switch (event->type_len) { rb_iter_peek()
3831 if (rb_null_event(event)) { rb_iter_peek()
3836 return event; rb_iter_peek()
3850 *ts = iter->read_stamp + event->time_delta; rb_iter_peek()
3854 return event; rb_iter_peek()
3880 * ring_buffer_peek - peek at the next event to be read
3883 * @ts: The timestamp counter of this event.
3886 * This will return the event that will be read next, but does
3894 struct ring_buffer_event *event; ring_buffer_peek() local
3906 event = rb_buffer_peek(cpu_buffer, ts, lost_events); ring_buffer_peek()
3907 if (event && event->type_len == RINGBUF_TYPE_PADDING) ring_buffer_peek()
3913 if (event && event->type_len == RINGBUF_TYPE_PADDING) ring_buffer_peek()
3916 return event; ring_buffer_peek()
3920 * ring_buffer_iter_peek - peek at the next event to be read
3922 * @ts: The timestamp counter of this event.
3924 * This will return the event that will be read next, but does
3931 struct ring_buffer_event *event; ring_buffer_iter_peek() local
3936 event = rb_iter_peek(iter, ts); ring_buffer_iter_peek()
3939 if (event && event->type_len == RINGBUF_TYPE_PADDING) ring_buffer_iter_peek()
3942 return event; ring_buffer_iter_peek()
3946 * ring_buffer_consume - return an event and consume it
3947 * @buffer: The ring buffer to get the next event from
3952 * Returns the next event in the ring buffer, and that event is consumed.
3953 * Meaning, that sequential reads will keep returning a different event,
3961 struct ring_buffer_event *event = NULL; ring_buffer_consume() local
3979 event = rb_buffer_peek(cpu_buffer, ts, lost_events); ring_buffer_consume()
3980 if (event) { ring_buffer_consume()
3992 if (event && event->type_len == RINGBUF_TYPE_PADDING) ring_buffer_consume()
3995 return event; ring_buffer_consume()
4119 * @ts: The time stamp of the event read.
4121 * This reads the next event in the ring buffer and increments the iterator.
4126 struct ring_buffer_event *event; ring_buffer_read() local
4132 event = rb_iter_peek(iter, ts); ring_buffer_read()
4133 if (!event) ring_buffer_read()
4136 if (event->type_len == RINGBUF_TYPE_PADDING) ring_buffer_read()
4143 return event; ring_buffer_read()
4484 struct ring_buffer_event *event; ring_buffer_read_page() local
4519 event = rb_reader_event(cpu_buffer); ring_buffer_read_page()
4548 size = rb_event_ts_length(event); ring_buffer_read_page()
4556 /* Need to copy one event at a time */ ring_buffer_read_page()
4558 /* We need the size of one event, because ring_buffer_read_page()
4559 * rb_advance_reader only advances by one event, ring_buffer_read_page()
4564 size = rb_event_length(event); ring_buffer_read_page()
4576 event = rb_reader_event(cpu_buffer); ring_buffer_read_page()
4578 size = rb_event_ts_length(event); ring_buffer_read_page()
4756 struct ring_buffer_event *event; rb_write_something() local
4776 event = ring_buffer_lock_reserve(data->buffer, len); rb_write_something()
4777 if (!event) { rb_write_something()
4788 event_len = ring_buffer_event_length(event); rb_write_something()
4793 item = ring_buffer_event_data(event); rb_write_something()
4816 ring_buffer_unlock_commit(data->buffer, event); rb_write_something()
4932 struct ring_buffer_event *event; for_each_online_cpu() local
4962 pr_info(" biggest event: %d\n", big_event_size); for_each_online_cpu()
4963 pr_info(" smallest event: %d\n", small_event_size); for_each_online_cpu()
4970 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) { for_each_online_cpu()
4972 item = ring_buffer_event_data(event); for_each_online_cpu()
4973 total_len += ring_buffer_event_length(event); for_each_online_cpu()
1915 rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event) rb_event_is_commit() argument
2037 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event, unsigned length, int add_timestamp, u64 delta) rb_update_event() argument
2476 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event) rb_try_to_discard() argument
2780 rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event) rb_update_write_stamp() argument
2807 rb_commit(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event) rb_commit() argument
2851 ring_buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event) ring_buffer_unlock_commit() argument
2891 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event) rb_decrement_entry() argument
2943 ring_buffer_discard_commit(struct ring_buffer *buffer, struct ring_buffer_event *event) ring_buffer_discard_commit() argument
3458 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event) rb_update_read_stamp() argument
3489 rb_update_iter_read_stamp(struct ring_buffer_iter *iter, struct ring_buffer_event *event) rb_update_iter_read_stamp() argument
/linux-4.1.27/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/
H A DCore.pm104 for my $event (keys %flag_fields) {
105 print "event $event:\n";
106 for my $field (keys %{$flag_fields{$event}}) {
108 print " delim: $flag_fields{$event}{$field}{'delim'}\n";
109 foreach my $idx (sort {$a <=> $b} keys %{$flag_fields{$event}{$field}{"values"}}) {
110 print " value $idx: $flag_fields{$event}{$field}{'values'}{$idx}\n";
151 for my $event (keys %symbolic_fields) {
152 print "event $event:\n";
153 for my $field (keys %{$symbolic_fields{$event}}) {
155 foreach my $idx (sort {$a <=> $b} keys %{$symbolic_fields{$event}{$field}{"values"}}) {
156 print " value $idx: $symbolic_fields{$event}{$field}{'values'}{$idx}\n";
/linux-4.1.27/arch/blackfin/kernel/
H A Dperf_event.c33 * We have two counters, and each counter can support an event type.
260 static void bfin_perf_event_update(struct perf_event *event, bfin_perf_event_update() argument
298 local64_add(delta, &event->count); bfin_perf_event_update()
301 static void bfin_pmu_stop(struct perf_event *event, int flags) bfin_pmu_stop() argument
304 struct hw_perf_event *hwc = &event->hw; bfin_pmu_stop()
307 if (!(event->hw.state & PERF_HES_STOPPED)) { bfin_pmu_stop()
310 event->hw.state |= PERF_HES_STOPPED; bfin_pmu_stop()
313 if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) { bfin_pmu_stop()
314 bfin_perf_event_update(event, &event->hw, idx); bfin_pmu_stop()
315 event->hw.state |= PERF_HES_UPTODATE; bfin_pmu_stop()
319 static void bfin_pmu_start(struct perf_event *event, int flags) bfin_pmu_start() argument
322 struct hw_perf_event *hwc = &event->hw; bfin_pmu_start()
329 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); bfin_pmu_start()
331 cpuc->events[idx] = event; bfin_pmu_start()
332 event->hw.state = 0; bfin_pmu_start()
336 static void bfin_pmu_del(struct perf_event *event, int flags) bfin_pmu_del() argument
340 bfin_pmu_stop(event, PERF_EF_UPDATE); bfin_pmu_del()
341 __clear_bit(event->hw.idx, cpuc->used_mask); bfin_pmu_del()
343 perf_event_update_userpage(event); bfin_pmu_del()
346 static int bfin_pmu_add(struct perf_event *event, int flags) bfin_pmu_add() argument
349 struct hw_perf_event *hwc = &event->hw; bfin_pmu_add()
353 perf_pmu_disable(event->pmu); bfin_pmu_add()
366 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; bfin_pmu_add()
368 bfin_pmu_start(event, PERF_EF_RELOAD); bfin_pmu_add()
370 perf_event_update_userpage(event); bfin_pmu_add()
373 perf_pmu_enable(event->pmu); bfin_pmu_add()
377 static void bfin_pmu_read(struct perf_event *event) bfin_pmu_read() argument
379 bfin_perf_event_update(event, &event->hw, event->hw.idx); bfin_pmu_read()
382 static int bfin_pmu_event_init(struct perf_event *event) bfin_pmu_event_init() argument
384 struct perf_event_attr *attr = &event->attr; bfin_pmu_event_init()
385 struct hw_perf_event *hwc = &event->hw; bfin_pmu_event_init()
425 struct perf_event *event; bfin_pmu_enable() local
430 event = cpuc->events[i]; bfin_pmu_enable()
431 if (!event) bfin_pmu_enable()
433 hwc = &event->hw; bfin_pmu_enable()
/linux-4.1.27/drivers/net/wireless/mwifiex/
H A Dsta_event.c2 * Marvell Wireless LAN device driver: station event handling
31 * The function is invoked after receiving a disconnect event from firmware,
41 * - Sends a disconnect event to upper layers/applications.
51 dev_dbg(adapter->dev, "info: handles disconnect event\n"); mwifiex_reset_connect_state()
154 /* reserved 2 bytes are not mandatory in tdls event */ mwifiex_parse_tdls_event()
157 dev_err(adapter->dev, "Invalid event length!\n"); mwifiex_parse_tdls_event()
188 * upon the generated event cause.
246 dev_dbg(adapter->dev, "event: LINK_SENSED\n"); mwifiex_process_sta_event()
253 dev_dbg(adapter->dev, "event: Deauthenticated\n"); mwifiex_process_sta_event()
256 "info: receive deauth event in wps session\n"); mwifiex_process_sta_event()
268 dev_dbg(adapter->dev, "event: Disassociated\n"); mwifiex_process_sta_event()
271 "info: receive disassoc event in wps session\n"); mwifiex_process_sta_event()
283 dev_dbg(adapter->dev, "event: Link lost\n"); mwifiex_process_sta_event()
306 "event: PPS/UAPSD mode activated\n"); mwifiex_process_sta_event()
336 dev_dbg(adapter->dev, "event: DS_AWAKE\n"); mwifiex_process_sta_event()
342 dev_dbg(adapter->dev, "event: HS_ACT_REQ\n"); mwifiex_process_sta_event()
348 dev_dbg(adapter->dev, "event: UNICAST MIC ERROR\n"); mwifiex_process_sta_event()
355 dev_dbg(adapter->dev, "event: MULTICAST MIC ERROR\n"); mwifiex_process_sta_event()
365 dev_dbg(adapter->dev, "event: ADHOC_BCN_LOST\n"); mwifiex_process_sta_event()
374 dev_dbg(adapter->dev, "event: BGS_REPORT\n"); mwifiex_process_sta_event()
380 dev_dbg(adapter->dev, "event: PORT RELEASE\n"); mwifiex_process_sta_event()
384 dev_dbg(adapter->dev, "event: EXT_SCAN Report\n"); mwifiex_process_sta_event()
392 dev_dbg(adapter->dev, "event: WMM status changed\n"); mwifiex_process_sta_event()
404 dev_dbg(adapter->dev, "event: Beacon RSSI_LOW\n"); mwifiex_process_sta_event()
407 dev_dbg(adapter->dev, "event: Beacon SNR_LOW\n"); mwifiex_process_sta_event()
410 dev_dbg(adapter->dev, "event: MAX_FAIL\n"); mwifiex_process_sta_event()
419 dev_dbg(adapter->dev, "event: Beacon RSSI_HIGH\n"); mwifiex_process_sta_event()
422 dev_dbg(adapter->dev, "event: Beacon SNR_HIGH\n"); mwifiex_process_sta_event()
425 dev_dbg(adapter->dev, "event: Data RSSI_LOW\n"); mwifiex_process_sta_event()
428 dev_dbg(adapter->dev, "event: Data SNR_LOW\n"); mwifiex_process_sta_event()
431 dev_dbg(adapter->dev, "event: Data RSSI_HIGH\n"); mwifiex_process_sta_event()
434 dev_dbg(adapter->dev, "event: Data SNR_HIGH\n"); mwifiex_process_sta_event()
437 dev_dbg(adapter->dev, "event: Link Quality\n"); mwifiex_process_sta_event()
440 dev_dbg(adapter->dev, "event: Pre-Beacon Lost\n"); mwifiex_process_sta_event()
443 dev_dbg(adapter->dev, "event: IBSS_COALESCED\n"); mwifiex_process_sta_event()
449 dev_dbg(adapter->dev, "event: ADDBA Request\n"); mwifiex_process_sta_event()
455 dev_dbg(adapter->dev, "event: DELBA Request\n"); mwifiex_process_sta_event()
459 dev_dbg(adapter->dev, "event: BA Stream timeout\n"); mwifiex_process_sta_event()
467 dev_dbg(adapter->dev, "event: AMSDU_AGGR_CTRL %d\n", ctrl); mwifiex_process_sta_event()
471 dev_dbg(adapter->dev, "event: tx_buf_size %d\n", mwifiex_process_sta_event()
476 dev_dbg(adapter->dev, "event: WEP ICV error\n"); mwifiex_process_sta_event()
480 dev_dbg(adapter->dev, "event: BW Change\n"); mwifiex_process_sta_event()
484 dev_dbg(adapter->dev, "event: HOSTWAKE_STAIE %d\n", eventcause); mwifiex_process_sta_event()
488 dev_dbg(adapter->dev, "event: Remain on channel expired\n"); mwifiex_process_sta_event()
499 dev_dbg(adapter->dev, "event: Channel Switch Announcement\n"); mwifiex_process_sta_event()
514 dev_dbg(adapter->dev, "event: TX_STATUS Report\n"); mwifiex_process_sta_event()
519 dev_dbg(adapter->dev, "event: Channel Report\n"); mwifiex_process_sta_event()
524 dev_dbg(adapter->dev, "event: Radar detected\n"); mwifiex_process_sta_event()
529 dev_dbg(adapter->dev, "event: unknown event id: %#x\n", mwifiex_process_sta_event()
H A Duap_event.c2 * Marvell Wireless LAN device driver: AP event handling
31 * upon the generated event cause.
48 struct mwifiex_assoc_event *event; mwifiex_process_uap_event() local
57 event = (struct mwifiex_assoc_event *) mwifiex_process_uap_event()
59 if (le16_to_cpu(event->type) == TLV_TYPE_UAP_MGMT_FRAME) { mwifiex_process_uap_event()
62 if (ieee80211_is_assoc_req(event->frame_control)) mwifiex_process_uap_event()
64 else if (ieee80211_is_reassoc_req(event->frame_control)) mwifiex_process_uap_event()
71 sinfo.assoc_req_ies = &event->data[len]; mwifiex_process_uap_event()
73 (u8 *)&event->frame_control; mwifiex_process_uap_event()
75 le16_to_cpu(event->len) - (u16)len; mwifiex_process_uap_event()
78 cfg80211_new_sta(priv->netdev, event->sta_addr, &sinfo, mwifiex_process_uap_event()
81 node = mwifiex_add_sta_entry(priv, event->sta_addr); mwifiex_process_uap_event()
131 dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause); mwifiex_process_uap_event()
139 dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause); mwifiex_process_uap_event()
143 dev_dbg(adapter->dev, "event: AMSDU_AGGR_CTRL %d\n", ctrl); mwifiex_process_uap_event()
148 dev_dbg(adapter->dev, "event: tx_buf_size %d\n", mwifiex_process_uap_event()
153 dev_dbg(adapter->dev, "event: ADDBA Request\n"); mwifiex_process_uap_event()
160 dev_dbg(adapter->dev, "event: DELBA Request\n"); mwifiex_process_uap_event()
165 dev_dbg(adapter->dev, "event: BA Stream timeout\n"); mwifiex_process_uap_event()
172 dev_dbg(adapter->dev, "event: EXT_SCAN Report\n"); mwifiex_process_uap_event()
178 dev_dbg(adapter->dev, "event: TX_STATUS Report\n"); mwifiex_process_uap_event()
195 "event: PPS/UAPSD mode activated\n"); mwifiex_process_uap_event()
221 dev_dbg(adapter->dev, "event: Channel Report\n"); mwifiex_process_uap_event()
225 dev_dbg(adapter->dev, "event: Radar detected\n"); mwifiex_process_uap_event()
229 dev_dbg(adapter->dev, "event: unknown event id: %#x\n", mwifiex_process_uap_event()
/linux-4.1.27/tools/perf/scripts/python/
H A Devent_analyzing_sample.py1 # event_analyzing_sample.py: general event handler in python
14 # for a x86 HW PMU event: PEBS with load latency data.
43 # load latency info, while gen_events is for general event.
67 # Create and insert event object to a database so that user could
88 # Create the event object and insert it to the right table in database
89 event = create_event(name, comm, dso, symbol, raw_buf)
90 insert_db(event)
92 def insert_db(event):
93 if event.ev_type == EVTYPE_GENERIC:
95 (event.name, event.symbol, event.comm, event.dso))
96 elif event.ev_type == EVTYPE_PEBS_LL:
97 event.ip &= 0x7fffffffffffffff
98 event.dla &= 0x7fffffffffffffff
100 (event.name, event.symbol, event.comm, event.dso, event.flags,
101 event.ip, event.status, event.dse, event.dla, event.lat))
105 # We show the basic info for the 2 type of event classes
111 # As the event number may be very big, so we can't use linear way
/linux-4.1.27/drivers/isdn/hisax/
H A Dfsm.c35 if ((fnlist[i].state >= fsm->state_count) || (fnlist[i].event >= fsm->event_count)) { FsmNew()
38 (long)fnlist[i].event, (long)fsm->event_count); FsmNew()
40 fsm->jumpmatrix[fsm->state_count * fnlist[i].event + FsmNew()
52 FsmEvent(struct FsmInst *fi, int event, void *arg) FsmEvent() argument
56 if ((fi->state >= fi->fsm->state_count) || (event >= fi->fsm->event_count)) { FsmEvent()
58 (long)fi->state, (long)fi->fsm->state_count, event, (long)fi->fsm->event_count); FsmEvent()
61 r = fi->fsm->jumpmatrix[fi->fsm->state_count * event + fi->state]; FsmEvent()
66 fi->fsm->strEvent[event]); FsmEvent()
67 r(fi, event, arg); FsmEvent()
73 fi->fsm->strEvent[event]); FsmEvent()
94 FsmEvent(ft->fi, ft->event, ft->arg); FsmExpireTimer()
122 int millisec, int event, void *arg, int where) FsmAddTimer()
137 ft->event = event; FsmAddTimer()
146 int millisec, int event, void *arg, int where) FsmRestartTimer()
158 ft->event = event; FsmRestartTimer()
121 FsmAddTimer(struct FsmTimer *ft, int millisec, int event, void *arg, int where) FsmAddTimer() argument
145 FsmRestartTimer(struct FsmTimer *ft, int millisec, int event, void *arg, int where) FsmRestartTimer() argument
H A Dfsm.h39 int state, event; member in struct:FsmNode
46 int event; member in struct:FsmTimer
52 int FsmEvent(struct FsmInst *fi, int event, void *arg);
55 int FsmAddTimer(struct FsmTimer *ft, int millisec, int event,
57 void FsmRestartTimer(struct FsmTimer *ft, int millisec, int event,
/linux-4.1.27/drivers/isdn/mISDN/
H A Dfsm.c40 (fnlist[i].event >= fsm->event_count)) { mISDN_FsmNew()
44 (long)fnlist[i].event, (long)fsm->event_count); mISDN_FsmNew()
46 fsm->jumpmatrix[fsm->state_count * fnlist[i].event + mISDN_FsmNew()
59 mISDN_FsmEvent(struct FsmInst *fi, int event, void *arg) mISDN_FsmEvent() argument
64 (event >= fi->fsm->event_count)) { mISDN_FsmEvent()
67 (long)fi->state, (long)fi->fsm->state_count, event, mISDN_FsmEvent()
71 r = fi->fsm->jumpmatrix[fi->fsm->state_count * event + fi->state]; mISDN_FsmEvent()
76 fi->fsm->strEvent[event]); mISDN_FsmEvent()
77 r(fi, event, arg); mISDN_FsmEvent()
83 fi->fsm->strEvent[event]); mISDN_FsmEvent()
106 mISDN_FsmEvent(ft->fi, ft->event, ft->arg); FsmExpireTimer()
137 int millisec, int event, void *arg, int where) mISDN_FsmAddTimer()
156 ft->event = event; mISDN_FsmAddTimer()
166 int millisec, int event, void *arg, int where) mISDN_FsmRestartTimer()
178 ft->event = event; mISDN_FsmRestartTimer()
136 mISDN_FsmAddTimer(struct FsmTimer *ft, int millisec, int event, void *arg, int where) mISDN_FsmAddTimer() argument
165 mISDN_FsmRestartTimer(struct FsmTimer *ft, int millisec, int event, void *arg, int where) mISDN_FsmRestartTimer() argument
/linux-4.1.27/drivers/scsi/lpfc/
H A Dlpfc_nl.h49 * All net link event payloads will begin with and event type
50 * and subcategory. The event type must come first.
57 /* RSCN event header */
64 /* els event header */
79 /* special els lsrjt event */
87 /* special els logo event */
93 /* fabric event header */
106 /* special case fabric fcprdchkerr event */
115 /* scsi event header */
133 /* special case scsi varqueuedepth event */
140 /* special case scsi check condition event */
149 /* event codes for FC_REG_BOARD_EVENT */
152 /* board event header */
159 /* event codes for FC_REG_ADAPTER_EVENT */
162 /* adapter event header */
169 /* event codes for temp_event */
/linux-4.1.27/arch/sh/boards/mach-dreamcast/
H A Dirq.c24 * set in the Event Mask Registers (EMRs). When a hardware event is
27 * event.
46 #define ESR_BASE 0x005f6900 /* Base event status register */
47 #define EMR_BASE 0x005f6910 /* Base event mask register */
50 * Helps us determine the EMR group that this event belongs to: 0 = 0x6910,
51 * 1 = 0x6920, 2 = 0x6930; also determine the event offset.
53 #define LEVEL(event) (((event) - HW_EVENT_IRQ_BASE) / 32)
55 /* Return the hardware event's bit position within the EMR/ESR */
56 #define EVENT_BIT(event) (((event) - HW_EVENT_IRQ_BASE) & 31)
60 * (logically mapped to the corresponding bit for the hardware event).
63 /* Disable the hardware event by masking its bit in its EMR */ disable_systemasic_irq()
75 /* Enable the hardware event by setting its bit in its EMR */ enable_systemasic_irq()
87 /* Acknowledge a hardware event by writing its bit back to its ESR */ mask_ack_systemasic_irq()
104 * Map the hardware event indicated by the processor IRQ to a virtual IRQ.
131 /* Now scan and find the first set bit as the event to map */ systemasic_irq_demux()
/linux-4.1.27/drivers/staging/iio/
H A Diio_simple_dummy_events.c24 * iio_simple_dummy_read_event_config() - is event enabled?
26 * @chan: channel for the event whose state is being queried
27 * @type: type of the event whose state is being queried
31 * discover if the event generation is enabled on the device.
44 * iio_simple_dummy_write_event_config() - set whether event is enabled
46 * @chan: channel for the event whose state is being set
47 * @type: type of the event whose state is being set
52 * so that it generates the specified event. Here it just sets up a cached
103 * iio_simple_dummy_read_event_value() - get value associated with event
105 * @chan: channel for the event whose value is being read
106 * @type: type of the event whose value is being read
108 * @info: info type of the event whose value is being read
109 * @val: value for the event code.
113 * on the event enabled. This often means that the driver must cache the values
115 * the enabled event is changed.
132 * iio_simple_dummy_write_event_value() - set value associate with event
134 * @chan: channel for the event whose value is being set
135 * @type: type of the event whose value is being set
137 * @info: info type of the event whose value is being set
155 * iio_simple_dummy_event_handler() - identify and pass on event
156 * @irq: irq of event line
160 * event occurred and for then pushing that event towards userspace.
161 * Here only one event occurs so we push that directly on with locally
169 dev_dbg(&indio_dev->dev, "id %x event %x\n", iio_simple_dummy_event_handler()
230 /* Fire up event source - normally not present */ iio_simple_dummy_events_register()
/linux-4.1.27/arch/s390/kernel/
H A Dperf_cpum_sf.c2 * Performance event support for the System z CPU-measurement Sampling Facility
87 struct perf_event *event; /* Scheduled perf event */ member in struct:cpu_hw_sf
385 * 3. Store the raw sample buffer pointer in the perf event allocate_buffers()
431 * that the sampling facility is enabled too. If the event to be allocate_buffers()
436 * before the event is started. allocate_buffers()
511 * @hwc: Perf event hardware structure
514 * and postponed allocation extents stored in the specified Perf event hardware.
625 static void hw_perf_event_destroy(struct perf_event *event) hw_perf_event_destroy() argument
628 if (RAWSAMPLE_REG(&event->hw)) hw_perf_event_destroy()
629 kfree((void *) RAWSAMPLE_REG(&event->hw)); hw_perf_event_destroy()
631 /* Release PMC if this is the last perf event */ hw_perf_event_destroy()
668 static int __hw_perf_event_init(struct perf_event *event) __hw_perf_event_init() argument
672 struct perf_event_attr *attr = &event->attr; __hw_perf_event_init()
673 struct hw_perf_event *hwc = &event->hw; __hw_perf_event_init()
687 event->destroy = hw_perf_event_destroy; __hw_perf_event_init()
694 * The event->cpu value can be -1 to count on every CPU, for example, __hw_perf_event_init()
696 * sampling info from the current CPU, otherwise use event->cpu to __hw_perf_event_init()
703 if (event->cpu == -1) __hw_perf_event_init()
709 cpuhw = &per_cpu(cpu_hw_sf, event->cpu); __hw_perf_event_init()
779 * from the event. If the event is not pinned to a particular __hw_perf_event_init()
780 * CPU (event->cpu == -1; or cpuhw == NULL), allocate sampling __hw_perf_event_init()
801 static int cpumsf_pmu_event_init(struct perf_event *event) cpumsf_pmu_event_init() argument
806 if (has_branch_stack(event)) cpumsf_pmu_event_init()
809 switch (event->attr.type) { cpumsf_pmu_event_init()
811 if ((event->attr.config != PERF_EVENT_CPUM_SF) && cpumsf_pmu_event_init()
812 (event->attr.config != PERF_EVENT_CPUM_SF_DIAG)) cpumsf_pmu_event_init()
821 if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES) cpumsf_pmu_event_init()
823 if (!is_sampling_event(event)) cpumsf_pmu_event_init()
830 /* Check online status of the CPU to which the event is pinned */ cpumsf_pmu_event_init()
831 if (event->cpu >= nr_cpumask_bits || cpumsf_pmu_event_init()
832 (event->cpu >= 0 && !cpu_online(event->cpu))) cpumsf_pmu_event_init()
838 if (event->attr.exclude_hv) cpumsf_pmu_event_init()
839 event->attr.exclude_hv = 0; cpumsf_pmu_event_init()
840 if (event->attr.exclude_idle) cpumsf_pmu_event_init()
841 event->attr.exclude_idle = 0; cpumsf_pmu_event_init()
843 err = __hw_perf_event_init(event); cpumsf_pmu_event_init()
845 if (event->destroy) cpumsf_pmu_event_init()
846 event->destroy(event); cpumsf_pmu_event_init()
865 * perf event: cpumsf_pmu_enable()
866 * 1. Postponed buffer allocations from the event initialization. cpumsf_pmu_enable()
873 if (cpuhw->event) { cpumsf_pmu_enable()
874 hwc = &cpuhw->event->hw; cpumsf_pmu_enable()
942 /* perf_exclude_event() - Filter event
943 * @event: The perf event
949 * Return non-zero if the event shall be excluded.
951 static int perf_exclude_event(struct perf_event *event, struct pt_regs *regs, perf_exclude_event() argument
954 if (event->attr.exclude_user && user_mode(regs)) perf_exclude_event()
956 if (event->attr.exclude_kernel && !user_mode(regs)) perf_exclude_event()
958 if (event->attr.exclude_guest && sde_regs->in_guest) perf_exclude_event()
960 if (event->attr.exclude_host && !sde_regs->in_guest) perf_exclude_event()
966 * @event: The perf event
969 * Use the hardware sample data to create perf event sample. The sample
970 * is the pushed to the event subsystem and the function checks for
971 * possible event overflows. If an event overflow occurs, the PMU is
974 * Return non-zero if an event overflow occurred.
976 static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr) perf_push_sample() argument
985 perf_sample_data_init(&data, 0, event->hw.last_period); perf_push_sample()
1033 if (perf_exclude_event(event, &regs, sde_regs)) perf_push_sample()
1035 if (perf_event_overflow(event, &data, &regs)) { perf_push_sample()
1037 event->pmu->stop(event, 0); perf_push_sample()
1039 perf_event_update_userpage(event); perf_push_sample()
1044 static void perf_event_count_update(struct perf_event *event, u64 count) perf_event_count_update() argument
1046 local64_add(count, &event->count); perf_event_count_update()
1121 * @event: The perf event
1126 * then pushed to the perf event subsystem. Depending on the sampling function,
1130 * event hardware structure. The function always works with a combined-sampling
1138 * due to a perf event overflow.
1140 static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt, hw_collect_samples() argument
1143 unsigned long flags = SAMPL_FLAGS(&event->hw); hw_collect_samples()
1150 sfr = (struct sf_raw_sample *) RAWSAMPLE_REG(&event->hw); hw_collect_samples()
1153 sample_size = event_sample_size(&event->hw); hw_collect_samples()
1161 /* Update perf event period */ hw_collect_samples()
1162 perf_event_count_update(event, SAMPL_RATE(&event->hw)); hw_collect_samples()
1166 /* If an event overflow occurred, the PMU is stopped to hw_collect_samples()
1167 * throttle event delivery. Remaining sample data is hw_collect_samples()
1174 *overflow = perf_push_sample(event, sfr); hw_collect_samples()
1202 * @event: The perf event
1205 * Processes the sampling buffer and create perf event samples.
1207 * register of the specified perf event.
1215 static void hw_perf_event_update(struct perf_event *event, int flush_all) hw_perf_event_update() argument
1217 struct hw_perf_event *hwc = &event->hw; hw_perf_event_update()
1254 * flag if an (perf) event overflow happened. If so, the PMU hw_perf_event_update()
1257 hw_collect_samples(event, sdbt, &event_overflow); hw_perf_event_update()
1273 /* Update event hardware registers */ hw_perf_event_update()
1282 /* If an event overflow happened, discard samples by hw_perf_event_update()
1289 /* Account sample overflows in the event hardware structure */ hw_perf_event_update()
1295 "overflow stats: sample=%llu event=%llu\n", hw_perf_event_update()
1299 static void cpumsf_pmu_read(struct perf_event *event) cpumsf_pmu_read() argument
1307 static void cpumsf_pmu_start(struct perf_event *event, int flags) cpumsf_pmu_start() argument
1311 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) cpumsf_pmu_start()
1315 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); cpumsf_pmu_start()
1317 perf_pmu_disable(event->pmu); cpumsf_pmu_start()
1318 event->hw.state = 0; cpumsf_pmu_start()
1320 if (SAMPL_DIAG_MODE(&event->hw)) cpumsf_pmu_start()
1322 perf_pmu_enable(event->pmu); cpumsf_pmu_start()
1328 static void cpumsf_pmu_stop(struct perf_event *event, int flags) cpumsf_pmu_stop() argument
1332 if (event->hw.state & PERF_HES_STOPPED) cpumsf_pmu_stop()
1335 perf_pmu_disable(event->pmu); cpumsf_pmu_stop()
1338 event->hw.state |= PERF_HES_STOPPED; cpumsf_pmu_stop()
1340 if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) { cpumsf_pmu_stop()
1341 hw_perf_event_update(event, 1); cpumsf_pmu_stop()
1342 event->hw.state |= PERF_HES_UPTODATE; cpumsf_pmu_stop()
1344 perf_pmu_enable(event->pmu); cpumsf_pmu_stop()
1347 static int cpumsf_pmu_add(struct perf_event *event, int flags) cpumsf_pmu_add() argument
1359 perf_pmu_disable(event->pmu); cpumsf_pmu_add()
1361 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; cpumsf_pmu_add()
1364 * using the SDB-table start. Reset TEAR_REG event hardware register cpumsf_pmu_add()
1372 cpuhw->lsctl.interval = SAMPL_RATE(&event->hw); cpumsf_pmu_add()
1373 hw_reset_registers(&event->hw, cpuhw->sfb.sdbt); cpumsf_pmu_add()
1382 if (SAMPL_DIAG_MODE(&event->hw)) cpumsf_pmu_add()
1385 /* Set in_use flag and store event */ cpumsf_pmu_add()
1386 cpuhw->event = event; cpumsf_pmu_add()
1390 cpumsf_pmu_start(event, PERF_EF_RELOAD); cpumsf_pmu_add()
1392 perf_event_update_userpage(event); cpumsf_pmu_add()
1393 perf_pmu_enable(event->pmu); cpumsf_pmu_add()
1397 static void cpumsf_pmu_del(struct perf_event *event, int flags) cpumsf_pmu_del() argument
1401 perf_pmu_disable(event->pmu); cpumsf_pmu_del()
1402 cpumsf_pmu_stop(event, PERF_EF_UPDATE); cpumsf_pmu_del()
1407 cpuhw->event = NULL; cpumsf_pmu_del()
1409 perf_event_update_userpage(event); cpumsf_pmu_del()
1410 perf_pmu_enable(event->pmu); cpumsf_pmu_del()
1422 PMU_FORMAT_ATTR(event, "config:0-63");
1479 hw_perf_event_update(cpuhw->event, 0); cpumf_measurement_alert()
H A Dperf_cpum_cf.c2 * Performance event support for s390x - CPU-measurement Counter Facility
69 /* Local CPUMF event structure */
87 static int get_counter_set(u64 event) get_counter_set() argument
91 if (event < 32) get_counter_set()
93 else if (event < 64) get_counter_set()
95 else if (event < 128) get_counter_set()
97 else if (event < 256) get_counter_set()
291 /* Release the PMU if event is the last perf event */ hw_perf_event_destroy()
292 static void hw_perf_event_destroy(struct perf_event *event) hw_perf_event_destroy() argument
302 /* CPUMF <-> perf event mappings for kernel+userspace (basic set) */
312 /* CPUMF <-> perf event mappings for userspace (problem-state set) */
323 static int __hw_perf_event_init(struct perf_event *event) __hw_perf_event_init() argument
325 struct perf_event_attr *attr = &event->attr; __hw_perf_event_init()
326 struct hw_perf_event *hwc = &event->hw; __hw_perf_event_init()
370 /* Use the hardware perf event structure to store the counter number __hw_perf_event_init()
378 /* Validate the counter that is assigned to this event. __hw_perf_event_init()
381 * validate event groups (event->group_leader != event). __hw_perf_event_init()
396 event->destroy = hw_perf_event_destroy; __hw_perf_event_init()
406 static int cpumf_pmu_event_init(struct perf_event *event) cpumf_pmu_event_init() argument
410 switch (event->attr.type) { cpumf_pmu_event_init()
414 err = __hw_perf_event_init(event); cpumf_pmu_event_init()
420 if (unlikely(err) && event->destroy) cpumf_pmu_event_init()
421 event->destroy(event); cpumf_pmu_event_init()
426 static int hw_perf_event_reset(struct perf_event *event) hw_perf_event_reset() argument
432 prev = local64_read(&event->hw.prev_count); hw_perf_event_reset()
433 err = ecctr(event->hw.config, &new); hw_perf_event_reset()
444 } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev); hw_perf_event_reset()
449 static int hw_perf_event_update(struct perf_event *event) hw_perf_event_update() argument
455 prev = local64_read(&event->hw.prev_count); hw_perf_event_update()
456 err = ecctr(event->hw.config, &new); hw_perf_event_update()
459 } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev); hw_perf_event_update()
463 local64_add(delta, &event->count); hw_perf_event_update()
468 static void cpumf_pmu_read(struct perf_event *event) cpumf_pmu_read() argument
470 if (event->hw.state & PERF_HES_STOPPED) cpumf_pmu_read()
473 hw_perf_event_update(event); cpumf_pmu_read()
476 static void cpumf_pmu_start(struct perf_event *event, int flags) cpumf_pmu_start() argument
479 struct hw_perf_event *hwc = &event->hw; cpumf_pmu_start()
497 * Because all counters in a set are active, the event->hw.prev_count cpumf_pmu_start()
501 hw_perf_event_reset(event); cpumf_pmu_start()
507 static void cpumf_pmu_stop(struct perf_event *event, int flags) cpumf_pmu_stop() argument
510 struct hw_perf_event *hwc = &event->hw; cpumf_pmu_stop()
519 event->hw.state |= PERF_HES_STOPPED; cpumf_pmu_stop()
523 hw_perf_event_update(event); cpumf_pmu_stop()
524 event->hw.state |= PERF_HES_UPTODATE; cpumf_pmu_stop()
528 static int cpumf_pmu_add(struct perf_event *event, int flags) cpumf_pmu_add() argument
538 if (validate_ctr_auth(&event->hw)) cpumf_pmu_add()
541 ctr_set_enable(&cpuhw->state, event->hw.config_base); cpumf_pmu_add()
542 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; cpumf_pmu_add()
545 cpumf_pmu_start(event, PERF_EF_RELOAD); cpumf_pmu_add()
547 perf_event_update_userpage(event); cpumf_pmu_add()
552 static void cpumf_pmu_del(struct perf_event *event, int flags) cpumf_pmu_del() argument
556 cpumf_pmu_stop(event, PERF_EF_UPDATE); cpumf_pmu_del()
562 * When a new perf event has been added but not yet started, this can cpumf_pmu_del()
566 if (!atomic_read(&cpuhw->ctr_set[event->hw.config_base])) cpumf_pmu_del()
567 ctr_set_disable(&cpuhw->state, event->hw.config_base); cpumf_pmu_del()
569 perf_event_update_userpage(event); cpumf_pmu_del()
/linux-4.1.27/drivers/isdn/sc/
H A DMakefile9 sc-y := shmem.o init.o packet.o command.o event.o \
H A Devent.c1 /* $Id: event.c,v 1.4.8.1 2001/09/23 22:24:59 kai Exp $
41 int indicate_status(int card, int event, ulong Channel, char *Data) indicate_status() argument
46 pr_debug("%s: Indicating event %s on Channel %d\n", indicate_status()
47 sc_adapter[card]->devicename, events[event - 256], Channel); indicate_status()
52 switch (event) { indicate_status()
64 cmd.command = event; indicate_status()
/linux-4.1.27/drivers/s390/net/
H A Dfsm.h56 int event; member in struct:__anon8474
78 * Description of a state-event combination
126 fsm_record_history(fsm_instance *fi, int state, int event);
130 * Emits an event to a FSM.
131 * If an action function is defined for the current state/event combination,
134 * @param fi Pointer to FSM which should receive the event.
135 * @param event The event do be delivered.
139 * 1 if current state or event is out of range
140 * !0 if state and event in range, but no action defined.
143 fsm_event(fsm_instance *fi, int event, void *arg) fsm_event() argument
149 (event >= fi->f->nr_events) ) { fsm_event()
151 fi->name, (long)state,(long)fi->f->nr_states, event, fsm_event()
158 r = fi->f->jumpmatrix[fi->f->nr_states * event + state]; fsm_event()
161 printk(KERN_DEBUG "fsm(%s): state %s event %s\n", fsm_event()
163 fi->f->event_names[event]); fsm_event()
166 fsm_record_history(fi, state, event); fsm_event()
168 r(fi, event, arg); fsm_event()
172 printk(KERN_DEBUG "fsm(%s): no function for event %s in state %s\n", fsm_event()
173 fi->name, fi->f->event_names[event], fsm_event()
185 * This does <em>not</em> trigger an event or calls an action function.
248 * @param event Event, to trigger if timer expires.
253 extern int fsm_addtimer(fsm_timer *timer, int millisec, int event, void *arg);
260 * @param event Event, to trigger if timer expires.
263 extern void fsm_modtimer(fsm_timer *timer, int millisec, int event, void *arg);
H A Dctcm_fsms.c131 static void ctcm_action_nop(fsm_instance *fi, int event, void *arg);
137 static void chx_txdone(fsm_instance *fi, int event, void *arg);
138 static void chx_rx(fsm_instance *fi, int event, void *arg);
139 static void chx_rxidle(fsm_instance *fi, int event, void *arg);
140 static void chx_firstio(fsm_instance *fi, int event, void *arg);
141 static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg);
142 static void ctcm_chx_start(fsm_instance *fi, int event, void *arg);
143 static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg);
144 static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg);
145 static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg);
146 static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg);
147 static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg);
148 static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg);
149 static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg);
150 static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg);
151 static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg);
152 static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg);
153 static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg);
154 static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg);
160 static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg);
161 static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg);
162 static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg);
164 static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg);
165 static void ctcm_chx_start(fsm_instance *fi, int event, void *arg);
166 static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg);
167 static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg);
168 static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg);
169 static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg);
170 static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg);
171 static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg);
172 static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg);
173 static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg);
174 static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg);
175 static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg);
176 static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg);
177 static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg);
179 static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg);
182 static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg);
228 static void ctcm_action_nop(fsm_instance *fi, int event, void *arg) ctcm_action_nop() argument
242 * event The event, just happened.
245 static void chx_txdone(fsm_instance *fi, int event, void *arg) chx_txdone() argument
331 * event The event, just happened.
334 void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg) ctcm_chx_txidle() argument
352 * event The event, just happened.
355 static void chx_rx(fsm_instance *fi, int event, void *arg) chx_rx() argument
429 * event The event, just happened.
432 static void chx_firstio(fsm_instance *fi, int event, void *arg) chx_firstio() argument
457 chx_rxidle(fi, event, arg); chx_firstio()
508 * event The event, just happened.
511 static void chx_rxidle(fsm_instance *fi, int event, void *arg) chx_rxidle() argument
540 chx_firstio(fi, event, arg); chx_rxidle()
548 * event The event, just happened.
551 static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg) ctcm_chx_setmode() argument
568 if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */ ctcm_chx_setmode()
575 if (event == CTC_EVENT_TIMER) /* see above comments */ ctcm_chx_setmode()
589 * event The event, just happened.
592 static void ctcm_chx_start(fsm_instance *fi, int event, void *arg) ctcm_chx_start() argument
652 * event The event, just happened.
655 static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg) ctcm_chx_haltio() argument
668 if (event == CTC_EVENT_STOP) /* only for STOP not yet locked */ ctcm_chx_haltio()
676 if (event == CTC_EVENT_STOP) ctcm_chx_haltio()
682 if (event != CTC_EVENT_STOP) { ctcm_chx_haltio()
740 * event The event, just happened.
743 static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg) ctcm_chx_stopped() argument
753 * event The event, just happened.
756 static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg) ctcm_chx_stop() argument
767 * event The event, just happened.
770 static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg) ctcm_chx_fail() argument
779 * event The event, just happened.
782 static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg) ctcm_chx_setuperr() argument
794 ((event == CTC_EVENT_UC_RCRESET) || ctcm_chx_setuperr()
795 (event == CTC_EVENT_UC_RSRESET))) { ctcm_chx_setuperr()
811 CTCM_FUNTAIL, dev->name, ctc_ch_event_names[event], ctcm_chx_setuperr()
828 * event The event, just happened.
831 static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg) ctcm_chx_restart() argument
841 CTCM_FUNTAIL, ch->id, event, dev->name); ctcm_chx_restart()
848 if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */ ctcm_chx_restart()
854 if (event == CTC_EVENT_TIMER) ctcm_chx_restart()
870 * event The event, just happened.
873 static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg) ctcm_chx_rxiniterr() argument
879 if (event == CTC_EVENT_TIMER) { ctcm_chx_rxiniterr()
884 ctcm_chx_restart(fi, event, arg); ctcm_chx_rxiniterr()
892 ctc_ch_event_names[event], fsm_getstate_str(fi)); ctcm_chx_rxiniterr()
896 "error %s\n", ctc_ch_event_names[event]); ctcm_chx_rxiniterr()
905 * event The event, just happened.
908 static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg) ctcm_chx_rxinitfail() argument
925 * event The event, just happened.
928 static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg) ctcm_chx_rxdisc() argument
957 * event The event, just happened.
960 static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg) ctcm_chx_txiniterr() argument
966 if (event == CTC_EVENT_TIMER) { ctcm_chx_txiniterr()
969 ctcm_chx_restart(fi, event, arg); ctcm_chx_txiniterr()
977 ctc_ch_event_names[event], fsm_getstate_str(fi)); ctcm_chx_txiniterr()
981 "error %s\n", ctc_ch_event_names[event]); ctcm_chx_txiniterr()
989 * event The event, just happened.
992 static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg) ctcm_chx_txretry() argument
1012 ctcm_chx_restart(fi, event, arg); ctcm_chx_txretry()
1030 ctcm_chx_restart(fi, event, arg); ctcm_chx_txretry()
1034 if (event == CTC_EVENT_TIMER) /* for TIMER not yet locked */ ctcm_chx_txretry()
1045 if (event == CTC_EVENT_TIMER) ctcm_chx_txretry()
1062 * event The event, just happened.
1065 static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg) ctcm_chx_iofatal() argument
1212 * event The event, just happened.
1215 static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg) ctcmpc_chx_txdone() argument
1382 * event The event, just happened.
1385 static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg) ctcmpc_chx_rx() argument
1485 * event The event, just happened.
1488 static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg) ctcmpc_chx_firstio() argument
1514 ctcmpc_chx_rxidle(fi, event, arg); ctcmpc_chx_firstio()
1539 * event The event, just happened.
1542 void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg) ctcmpc_chx_rxidle() argument
1569 if (event == CTC_EVENT_START) ctcmpc_chx_rxidle()
1574 if (event == CTC_EVENT_START) ctcmpc_chx_rxidle()
1597 static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg) ctcmpc_chx_attn() argument
1656 static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg) ctcmpc_chx_attnbusy() argument
1741 static void ctcmpc_chx_resend(fsm_instance *fsm, int event, void *arg) ctcmpc_chx_resend() argument
1757 static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg) ctcmpc_chx_send_sweep() argument
2065 * event The event, just happened.
2068 static void dev_action_start(fsm_instance *fi, int event, void *arg) dev_action_start() argument
2090 * event The event, just happened.
2093 static void dev_action_stop(fsm_instance *fi, int event, void *arg) dev_action_stop() argument
2113 static void dev_action_restart(fsm_instance *fi, int event, void *arg) dev_action_restart() argument
2128 dev_action_stop(fi, event, arg); dev_action_restart()
2145 * event The event, just happened.
2148 static void dev_action_chup(fsm_instance *fi, int event, void *arg) dev_action_chup() argument
2156 dev->name, dev->ml_priv, dev_stat, event); dev_action_chup()
2160 if (event == DEV_EVENT_RXUP) dev_action_chup()
2166 if (event == DEV_EVENT_RXUP) { dev_action_chup()
2174 if (event == DEV_EVENT_TXUP) { dev_action_chup()
2182 if (event == DEV_EVENT_RXUP) dev_action_chup()
2186 if (event == DEV_EVENT_TXUP) dev_action_chup()
2192 if (event == DEV_EVENT_RXUP) dev_action_chup()
2206 * event The event, just happened.
2209 static void dev_action_chdown(fsm_instance *fi, int event, void *arg) dev_action_chdown() argument
2219 if (event == DEV_EVENT_TXDOWN) dev_action_chdown()
2225 if (event == DEV_EVENT_TXDOWN) dev_action_chdown()
2229 if (event == DEV_EVENT_RXDOWN) dev_action_chdown()
2233 if (event == DEV_EVENT_TXDOWN) dev_action_chdown()
2239 if (event == DEV_EVENT_RXDOWN) dev_action_chdown()
2243 if (event == DEV_EVENT_TXDOWN) dev_action_chdown()
2248 if (event == DEV_EVENT_RXDOWN) dev_action_chdown()
/linux-4.1.27/drivers/net/wireless/ti/wl1251/
H A DMakefile1 wl1251-objs = main.o event.o tx.o rx.o ps.o cmd.o \
/linux-4.1.27/drivers/net/wireless/ti/wlcore/
H A DMakefile1 wlcore-objs = main.o cmd.o io.o event.o tx.o rx.o ps.o acx.o \
/linux-4.1.27/drivers/cpufreq/
H A Dcpufreq_performance.c20 unsigned int event) cpufreq_governor_performance()
22 switch (event) { cpufreq_governor_performance()
25 pr_debug("setting to %u kHz because of event %u\n", cpufreq_governor_performance()
26 policy->max, event); cpufreq_governor_performance()
19 cpufreq_governor_performance(struct cpufreq_policy *policy, unsigned int event) cpufreq_governor_performance() argument
H A Dcpufreq_powersave.c20 unsigned int event) cpufreq_governor_powersave()
22 switch (event) { cpufreq_governor_powersave()
25 pr_debug("setting to %u kHz because of event %u\n", cpufreq_governor_powersave()
26 policy->min, event); cpufreq_governor_powersave()
19 cpufreq_governor_powersave(struct cpufreq_policy *policy, unsigned int event) cpufreq_governor_powersave() argument
/linux-4.1.27/arch/sparc/kernel/
H A Dperf_event.c1 /* Performance event support for sparc64.
5 * This code is based almost entirely upon the x86 perf event
48 * event fields, one for each of the two counters. It's thus nearly
63 * implemented. The event selections on SPARC-T4 lack any
84 * This works because the perf event layer always adds new
90 struct perf_event *event[MAX_HWEVENTS]; member in struct:cpu_hw_events
98 /* The current counter index assigned to an event. When the
99 * event hasn't been programmed into the cpu yet, this will
100 * hold PIC_NO_INDEX. The event->hw.idx value tells us where
101 * we ought to schedule the event.
115 /* An event map describes the characteristics of a performance
116 * counter event. In particular it gives the encoding as well as
117 * a mask telling which counters the event can be measured on.
762 * generates the overflow event for precise events via a trap
764 * we happen to be in the hypervisor when the event triggers.
765 * Essentially, the overflow event reporting is completely
861 static u64 sparc_perf_event_update(struct perf_event *event, sparc_perf_event_update() argument
879 local64_add(delta, &event->count); sparc_perf_event_update()
885 static int sparc_perf_event_set_period(struct perf_event *event, sparc_perf_event_set_period() argument
912 perf_event_update_userpage(event); sparc_perf_event_set_period()
922 struct perf_event *cp = cpuc->event[i]; read_in_all_counters()
948 struct perf_event *cp = cpuc->event[i]; calculate_single_pcr()
967 cpuc->pcr[0] |= cpuc->event[0]->hw.config_base; calculate_single_pcr()
970 static void sparc_pmu_start(struct perf_event *event, int flags);
981 struct perf_event *cp = cpuc->event[i]; calculate_multiple_pcrs()
994 struct perf_event *cp = cpuc->event[i]; calculate_multiple_pcrs()
1001 /* If performance event entries have been added, move existing events
1056 struct perf_event *event) active_event_index()
1061 if (cpuc->event[i] == event) active_event_index()
1068 static void sparc_pmu_start(struct perf_event *event, int flags) sparc_pmu_start() argument
1071 int idx = active_event_index(cpuc, event); sparc_pmu_start()
1074 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); sparc_pmu_start()
1075 sparc_perf_event_set_period(event, &event->hw, idx); sparc_pmu_start()
1078 event->hw.state = 0; sparc_pmu_start()
1080 sparc_pmu_enable_event(cpuc, &event->hw, idx); sparc_pmu_start()
1083 static void sparc_pmu_stop(struct perf_event *event, int flags) sparc_pmu_stop() argument
1086 int idx = active_event_index(cpuc, event); sparc_pmu_stop()
1088 if (!(event->hw.state & PERF_HES_STOPPED)) { sparc_pmu_stop()
1089 sparc_pmu_disable_event(cpuc, &event->hw, idx); sparc_pmu_stop()
1090 event->hw.state |= PERF_HES_STOPPED; sparc_pmu_stop()
1093 if (!(event->hw.state & PERF_HES_UPTODATE) && (flags & PERF_EF_UPDATE)) { sparc_pmu_stop()
1094 sparc_perf_event_update(event, &event->hw, idx); sparc_pmu_stop()
1095 event->hw.state |= PERF_HES_UPTODATE; sparc_pmu_stop()
1099 static void sparc_pmu_del(struct perf_event *event, int _flags) sparc_pmu_del() argument
1108 if (event == cpuc->event[i]) { sparc_pmu_del()
1110 * event. sparc_pmu_del()
1112 sparc_pmu_stop(event, PERF_EF_UPDATE); sparc_pmu_del()
1118 cpuc->event[i - 1] = cpuc->event[i]; sparc_pmu_del()
1124 perf_event_update_userpage(event); sparc_pmu_del()
1134 static void sparc_pmu_read(struct perf_event *event) sparc_pmu_read() argument
1137 int idx = active_event_index(cpuc, event); sparc_pmu_read()
1138 struct hw_perf_event *hwc = &event->hw; sparc_pmu_read()
1140 sparc_perf_event_update(event, hwc, idx); sparc_pmu_read()
1212 static void hw_perf_event_destroy(struct perf_event *event) hw_perf_event_destroy() argument
1264 /* If one event is limited to a specific counter, sparc_check_constraints()
1302 struct perf_event *event; check_excludes() local
1314 event = evts[i]; check_excludes()
1316 eu = event->attr.exclude_user; check_excludes()
1317 ek = event->attr.exclude_kernel; check_excludes()
1318 eh = event->attr.exclude_hv; check_excludes()
1320 } else if (event->attr.exclude_user != eu || check_excludes()
1321 event->attr.exclude_kernel != ek || check_excludes()
1322 event->attr.exclude_hv != eh) { check_excludes()
1334 struct perf_event *event; collect_events() local
1344 list_for_each_entry(event, &group->sibling_list, group_entry) { collect_events()
1345 if (!is_software_event(event) && collect_events()
1346 event->state != PERF_EVENT_STATE_OFF) { collect_events()
1349 evts[n] = event; collect_events()
1350 events[n] = event->hw.event_base; collect_events()
1357 static int sparc_pmu_add(struct perf_event *event, int ef_flags) sparc_pmu_add() argument
1369 cpuc->event[n0] = event; sparc_pmu_add()
1370 cpuc->events[n0] = event->hw.event_base; sparc_pmu_add()
1373 event->hw.state = PERF_HES_UPTODATE; sparc_pmu_add()
1375 event->hw.state |= PERF_HES_STOPPED; sparc_pmu_add()
1385 if (check_excludes(cpuc->event, n0, 1)) sparc_pmu_add()
1387 if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1)) sparc_pmu_add()
1400 static int sparc_pmu_event_init(struct perf_event *event) sparc_pmu_event_init() argument
1402 struct perf_event_attr *attr = &event->attr; sparc_pmu_event_init()
1404 struct hw_perf_event *hwc = &event->hw; sparc_pmu_event_init()
1414 if (has_branch_stack(event)) sparc_pmu_event_init()
1459 if (event->group_leader != event) { sparc_pmu_event_init()
1460 n = collect_events(event->group_leader, sparc_pmu_event_init()
1467 evts[n] = event; sparc_pmu_event_init()
1481 event->destroy = hw_perf_event_destroy; sparc_pmu_event_init()
1533 if (check_excludes(cpuc->event, 0, n)) sparc_pmu_commit_txn()
1535 if (sparc_check_constraints(cpuc->event, cpuc->events, n)) sparc_pmu_commit_txn()
1616 struct perf_event *event = cpuc->event[i]; perf_event_nmi_handler() local
1625 hwc = &event->hw; perf_event_nmi_handler()
1626 val = sparc_perf_event_update(event, hwc, idx); perf_event_nmi_handler()
1631 if (!sparc_perf_event_set_period(event, hwc, idx)) perf_event_nmi_handler()
1634 if (perf_event_overflow(event, &data, regs)) perf_event_nmi_handler()
1635 sparc_pmu_stop(event, 0); perf_event_nmi_handler()
1055 active_event_index(struct cpu_hw_events *cpuc, struct perf_event *event) active_event_index() argument
/linux-4.1.27/include/trace/events/
H A Dcontext_tracking.h28 * @dummy: dummy arg to make trace event macro happy
30 * This event occurs when the kernel resumes to userspace after
42 * @dummy: dummy arg to make trace event macro happy
44 * This event occurs when userspace enters the kernel through
/linux-4.1.27/arch/powerpc/include/asm/
H A Dperf_event_fsl_emb.h2 * Performance event support - Freescale embedded specific definitions.
18 /* event flags */
22 /* upper half of event flags is PMLCb */
40 /* Returns event flags and PMLCb (FSL_EMB_EVENT_*) */
/linux-4.1.27/tools/iio/
H A Diio_event_monitor.c1 /* Industrialio event test code.
104 static bool event_is_known(struct iio_event_data *event) event_is_known() argument
106 enum iio_chan_type type = IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event->id); event_is_known()
107 enum iio_modifier mod = IIO_EVENT_CODE_EXTRACT_MODIFIER(event->id); event_is_known()
108 enum iio_event_type ev_type = IIO_EVENT_CODE_EXTRACT_TYPE(event->id); event_is_known()
109 enum iio_event_direction dir = IIO_EVENT_CODE_EXTRACT_DIR(event->id); event_is_known()
200 static void print_event(struct iio_event_data *event) print_event() argument
202 enum iio_chan_type type = IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event->id); print_event()
203 enum iio_modifier mod = IIO_EVENT_CODE_EXTRACT_MODIFIER(event->id); print_event()
204 enum iio_event_type ev_type = IIO_EVENT_CODE_EXTRACT_TYPE(event->id); print_event()
205 enum iio_event_direction dir = IIO_EVENT_CODE_EXTRACT_DIR(event->id); print_event()
206 int chan = IIO_EVENT_CODE_EXTRACT_CHAN(event->id); print_event()
207 int chan2 = IIO_EVENT_CODE_EXTRACT_CHAN2(event->id); print_event()
208 bool diff = IIO_EVENT_CODE_EXTRACT_DIFF(event->id); print_event()
210 if (!event_is_known(event)) { print_event()
211 printf("Unknown event: time: %lld, id: %llx\n", print_event()
212 event->timestamp, event->id); print_event()
216 printf("Event: time: %lld, ", event->timestamp); print_event()
241 struct iio_event_data event; main() local
282 fprintf(stdout, "Failed to retrieve event fd\n"); main()
288 ret = read(event_fd, &event, sizeof(event)); main()
294 perror("Failed to read event from device"); main()
300 print_event(&event); main()
/linux-4.1.27/drivers/net/ethernet/ti/
H A Dcpts.c39 static int event_expired(struct cpts_event *event) event_expired() argument
41 return time_after(jiffies, event->tmo); event_expired()
44 static int event_type(struct cpts_event *event) event_type() argument
46 return (event->high >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK; event_type()
63 * Returns zero if matching event type was found.
69 struct cpts_event *event; cpts_fifo_read() local
75 pr_err("cpts: event pool is empty\n"); cpts_fifo_read()
78 event = list_first_entry(&cpts->pool, struct cpts_event, list); cpts_fifo_read()
79 event->tmo = jiffies + 2; cpts_fifo_read()
80 event->high = hi; cpts_fifo_read()
81 event->low = lo; cpts_fifo_read()
82 type = event_type(event); cpts_fifo_read()
87 list_del_init(&event->list); cpts_fifo_read()
88 list_add_tail(&event->list, &cpts->events); cpts_fifo_read()
95 pr_err("cpts: unknown event type\n"); cpts_fifo_read()
107 struct cpts_event *event; cpts_systim_read() local
116 event = list_entry(this, struct cpts_event, list); cpts_systim_read()
117 if (event_type(event) == CPTS_EV_PUSH) { cpts_systim_read()
118 list_del_init(&event->list); cpts_systim_read()
119 list_add(&event->list, &cpts->pool); cpts_systim_read()
120 val = event->low; cpts_systim_read()
289 struct cpts_event *event; cpts_find_ts() local
302 event = list_entry(this, struct cpts_event, list); cpts_find_ts()
303 if (event_expired(event)) { cpts_find_ts()
304 list_del_init(&event->list); cpts_find_ts()
305 list_add(&event->list, &cpts->pool); cpts_find_ts()
308 mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK; cpts_find_ts()
309 seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK; cpts_find_ts()
310 if (ev_type == event_type(event) && cpts_find_ts()
312 ns = timecounter_cyc2time(&cpts->tc, event->low); cpts_find_ts()
313 list_del_init(&event->list); cpts_find_ts()
314 list_add(&event->list, &cpts->pool); cpts_find_ts()
/linux-4.1.27/include/linux/platform_data/
H A Dkeyboard-pxa930_rotary.h6 * rotary can be either interpreted as a ralative input event (e.g.
7 * REL_WHEEL or REL_HWHEEL) or a specific key event (e.g. UP/DOWN
/linux-4.1.27/tools/perf/python/
H A Dtwatch.py32 event = evlist.read_on_cpu(cpu)
33 if not event:
35 print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
36 event.sample_pid,
37 event.sample_tid),
38 print event
/linux-4.1.27/arch/arm64/kernel/
H A Dperf_event.c7 * This code is based heavily on the ARMv7 perf event code.
128 static int map_cpu_event(struct perf_event *event, map_cpu_event() argument
136 u64 config = event->attr.config; map_cpu_event()
138 switch (event->attr.type) { map_cpu_event()
151 armpmu_event_set_period(struct perf_event *event, armpmu_event_set_period() argument
155 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); armpmu_event_set_period()
187 perf_event_update_userpage(event); armpmu_event_set_period()
193 armpmu_event_update(struct perf_event *event, armpmu_event_update() argument
197 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); armpmu_event_update()
210 local64_add(delta, &event->count); armpmu_event_update()
217 armpmu_read(struct perf_event *event) armpmu_read() argument
219 struct hw_perf_event *hwc = &event->hw; armpmu_read()
225 armpmu_event_update(event, hwc, hwc->idx); armpmu_read()
229 armpmu_stop(struct perf_event *event, int flags) armpmu_stop() argument
231 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); armpmu_stop()
232 struct hw_perf_event *hwc = &event->hw; armpmu_stop()
241 armpmu_event_update(event, hwc, hwc->idx); armpmu_stop()
247 armpmu_start(struct perf_event *event, int flags) armpmu_start() argument
249 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); armpmu_start()
250 struct hw_perf_event *hwc = &event->hw; armpmu_start()
267 armpmu_event_set_period(event, hwc, hwc->idx); armpmu_start()
272 armpmu_del(struct perf_event *event, int flags) armpmu_del() argument
274 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); armpmu_del()
276 struct hw_perf_event *hwc = &event->hw; armpmu_del()
281 armpmu_stop(event, PERF_EF_UPDATE); armpmu_del()
285 perf_event_update_userpage(event); armpmu_del()
289 armpmu_add(struct perf_event *event, int flags) armpmu_add() argument
291 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); armpmu_add()
293 struct hw_perf_event *hwc = &event->hw; armpmu_add()
297 perf_pmu_disable(event->pmu); armpmu_add()
307 * If there is an event in the counter we are going to use then make armpmu_add()
310 event->hw.idx = idx; armpmu_add()
312 hw_events->events[idx] = event; armpmu_add()
316 armpmu_start(event, PERF_EF_RELOAD); armpmu_add()
319 perf_event_update_userpage(event); armpmu_add()
322 perf_pmu_enable(event->pmu); armpmu_add()
328 struct perf_event *event) validate_event()
331 struct hw_perf_event fake_event = event->hw; validate_event()
332 struct pmu *leader_pmu = event->group_leader->pmu; validate_event()
334 if (is_software_event(event)) validate_event()
340 * until after pmu->event_init(event). validate_event()
342 if (event->pmu != pmu) validate_event()
345 if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) validate_event()
348 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) validate_event()
351 armpmu = to_arm_pmu(event->pmu); validate_event()
356 validate_group(struct perf_event *event) validate_group() argument
358 struct perf_event *sibling, *leader = event->group_leader; validate_group()
369 if (!validate_event(event->pmu, &fake_pmu, leader)) validate_group()
373 if (!validate_event(event->pmu, &fake_pmu, sibling)) validate_group()
377 if (!validate_event(event->pmu, &fake_pmu, event)) validate_group()
508 hw_perf_event_destroy(struct perf_event *event) hw_perf_event_destroy() argument
510 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); hw_perf_event_destroy()
528 __hw_perf_event_init(struct perf_event *event) __hw_perf_event_init() argument
530 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); __hw_perf_event_init()
531 struct hw_perf_event *hwc = &event->hw; __hw_perf_event_init()
534 mapping = armpmu->map_event(event); __hw_perf_event_init()
537 pr_debug("event %x:%llx not supported\n", event->attr.type, __hw_perf_event_init()
538 event->attr.config); __hw_perf_event_init()
543 * We don't assign an index until we actually place the event onto __hw_perf_event_init()
557 armpmu->set_event_filter(hwc, &event->attr)) && __hw_perf_event_init()
558 event_requires_mode_exclusion(&event->attr)) { __hw_perf_event_init()
564 * Store the event encoding into the config_base field. __hw_perf_event_init()
581 if (event->group_leader != event) { __hw_perf_event_init()
582 err = validate_group(event); __hw_perf_event_init()
590 static int armpmu_event_init(struct perf_event *event) armpmu_event_init() argument
592 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); armpmu_event_init()
596 if (armpmu->map_event(event) == -ENOENT) armpmu_event_init()
599 event->destroy = hw_perf_event_destroy; armpmu_event_init()
614 err = __hw_perf_event_init(event); armpmu_event_init()
616 hw_perf_event_destroy(event); armpmu_event_init()
662 * Common event types.
1048 * the event that we're interested in. armv8pmu_enable_event()
1058 * Set event (if destined for PMNx counters). armv8pmu_enable_event()
1124 struct perf_event *event = cpuc->events[idx]; armv8pmu_handle_irq() local
1127 /* Ignore if we don't have an event. */ armv8pmu_handle_irq()
1128 if (!event) armv8pmu_handle_irq()
1138 hwc = &event->hw; armv8pmu_handle_irq()
1139 armpmu_event_update(event, hwc, idx); armv8pmu_handle_irq()
1141 if (!armpmu_event_set_period(event, hwc, idx)) armv8pmu_handle_irq()
1144 if (perf_event_overflow(event, &data, regs)) armv8pmu_handle_irq()
1183 struct hw_perf_event *event) armv8pmu_get_event_idx()
1186 unsigned long evtype = event->config_base & ARMV8_EVTYPE_EVENT; armv8pmu_get_event_idx()
1210 * Add an event filter to a given event. This will only work for PMUv2 PMUs.
1212 static int armv8pmu_set_event_filter(struct hw_perf_event *event, armv8pmu_set_event_filter() argument
1228 * construct the event type. armv8pmu_set_event_filter()
1230 event->config_base = config_base; armv8pmu_set_event_filter()
1247 static int armv8_pmuv3_map_event(struct perf_event *event) armv8_pmuv3_map_event() argument
1249 return map_cpu_event(event, &armv8_pmuv3_perf_map, armv8_pmuv3_map_event()
327 validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events, struct perf_event *event) validate_event() argument
1182 armv8pmu_get_event_idx(struct pmu_hw_events *cpuc, struct hw_perf_event *event) armv8pmu_get_event_idx() argument
/linux-4.1.27/drivers/usb/dwc3/
H A Ddebug.h128 * dwc3_gadget_event_string - returns event name
129 * @event: the event code
131 static inline const char *dwc3_gadget_event_string(u8 event) dwc3_gadget_event_string() argument
133 switch (event) { dwc3_gadget_event_string()
160 * dwc3_ep_event_string - returns event name
161 * @event: then event code
163 static inline const char *dwc3_ep_event_string(u8 event) dwc3_ep_event_string() argument
165 switch (event) { dwc3_ep_event_string()
184 * dwc3_gadget_event_type_string - return event name
185 * @event: the event code
187 static inline const char *dwc3_gadget_event_type_string(u8 event) dwc3_gadget_event_type_string() argument
189 switch (event) { dwc3_gadget_event_type_string()
/linux-4.1.27/net/llc/
H A Dllc_c_st.c4 * Description of event functions and actions there is in 802.2 LLC standard,
35 /* State transitions for LLC_CONN_EV_DISC_REQ event */
52 /* State transitions for LLC_CONN_EV_RESET_REQ event */
69 /* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event */
89 /* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event */
105 /* State transitions for LLC_CONN_EV_RX_FRMR_RSP_Fbit_SET_X event */
124 /* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event */
139 /* State transitions for LLC_CONN_EV_RX_ZZZ_CMD_Pbit_SET_X_INVAL_Nr event */
155 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_X_INVAL_Ns event */
171 /* State transitions for LLC_CONN_EV_RX_ZZZ_RSP_Fbit_SET_X_INVAL_Nr event */
187 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_X_INVAL_Ns event */
203 /* State transitions for LLC_CONN_EV_RX_BAD_PDU event */
219 /* State transitions for LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X event */
235 /* State transitions for LLC_CONN_EV_RX_XXX_RSP_Fbit_SET_1 event */
258 /* State transitions for LLC_CONN_EV_P_TMR_EXP event */
280 /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
302 /* State transitions for LLC_CONN_EV_REJ_TMR_EXP event */
324 /* State transitions for LLC_CONN_EV_BUSY_TMR_EXP event */
353 /* State transitions for LLC_CONN_EV_CONN_REQ event */
369 /* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event */
388 /* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event */
402 /* State transitions for LLC_CONN_EV_RX_XXX_CMD_Pbit_SET_1 event */
416 /* State transitions for LLC_CONN_EV_RX_XXX_YYY event */
447 /* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event */
463 /* State transitions for LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X event */
487 /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
508 /* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event */
529 /* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event */
549 /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
570 /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
611 /* State transitions for LLC_CONN_EV_DATA_REQ event */
632 /* State transitions for LLC_CONN_EV_DATA_REQ event */
653 /* State transitions for LLC_CONN_EV_DATA_REQ event */
670 /* State transitions for LLC_CONN_EV_LOCAL_BUSY_DETECTED event */
690 /* State transitions for LLC_CONN_EV_LOCAL_BUSY_DETECTED event */
710 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */
733 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */
756 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1_UNEXPD_Ns event */
779 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */
800 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */
821 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns event */
837 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_X event */
860 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */
873 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 event */
894 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */
915 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 event */
932 /* State transitions for * LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 event */
947 /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 event */
962 /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 event */
983 /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 event */
1000 /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 event */
1015 /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 event */
1030 /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 event */
1050 /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 event */
1067 /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */
1090 /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_X event */
1113 /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */
1135 /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 event */
1157 /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 event */
1174 /* State transitions for LLC_CONN_EV_INIT_P_F_CYCLE event */
1193 /* State transitions for LLC_CONN_EV_P_TMR_EXP event */
1215 /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
1238 /* State transitions for LLC_CONN_EV_BUSY_TMR_EXP event */
1261 /* State transitions for LLC_CONN_EV_TX_BUFF_FULL event */
1344 /* State transitions for LLC_CONN_EV_DATA_REQ event */
1364 /* State transitions for LLC_CONN_EV_DATA_REQ event */
1384 /* State transitions for LLC_CONN_EV_DATA_REQ event */
1401 /* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */
1421 /* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */
1441 /* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */
1460 /* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */
1479 /* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */
1498 /* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */
1517 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_X_UNEXPD_Ns event */
1539 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */
1561 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */
1581 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */
1601 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns event */
1616 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 event */
1634 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_X event */
1659 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */
1684 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 event */
1707 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */
1730 /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 event */
1745 /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 event */
1760 /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 event */
1780 /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 event */
1795 /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 event */
1810 /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 event */
1825 /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 event */
1845 /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 event */
1860 /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */
1882 /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_X event */
1904 /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */
1925 /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 event */
1946 /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 event */
1963 /* State transitions for LLC_CONN_EV_INIT_P_F_CYCLE event */
1982 /* State transitions for LLC_CONN_EV_P_TMR_EXP event */
2003 /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
2025 /* State transitions for LLC_CONN_EV_BUSY_TMR_EXP event */
2047 /* State transitions for LLC_CONN_EV_REJ_TMR_EXP event */
2070 /* State transitions for LLC_CONN_EV_REJ_TMR_EXP event */
2157 /* State transitions for LLC_CONN_EV_DATA_REQ event */
2176 /* State transitions for LLC_CONN_EV_DATA_REQ event */
2195 /* State transitions for LLC_CONN_EV_DATA_REQ event */
2213 /* State transitions for LLC_CONN_EV_LOCAL_BUSY_DETECTED event */
2232 /* State transitions for LLC_CONN_EV_LOCAL_BUSY_DETECTED event */
2251 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */
2266 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */
2281 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1_UNEXPD_Ns event */
2301 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns event */
2315 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_X event */
2340 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */
2364 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 event */
2386 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */
2408 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 event */
2425 /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 event */
2440 /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 event */
2455 /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 event */
2475 /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 event */
2490 /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 event */
2505 /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 event */
2520 /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 event */
2540 /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 event */
2555 /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */
2577 /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_X event */
2599 /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */
2620 /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 event */
2641 /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 event */
2657 /* State transitions for LLC_CONN_EV_INIT_P_F_CYCLE event */
2676 /* State transitions for LLC_CONN_EV_REJ_TMR_EXP event */
2698 /* State transitions for LLC_CONN_EV_P_TMR_EXP event */
2720 /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
2743 /* State transitions for LLC_CONN_EV_BUSY_TMR_EXP event */
2828 /* State transitions for LLC_CONN_EV_DATA_REQ event */
2844 /* State transitions for LLC_CONN_EV_LOCAL_BUSY_DETECTED event */
2858 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1_UNEXPD_Ns event */
2877 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */
2893 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */
2909 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns event */
2926 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1 event */
2945 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 event */
2962 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */
2979 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 event */
2996 /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 event */
3013 /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_1 event */
3030 /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 event */
3045 /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 event */
3060 /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */
3075 /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 event */
3090 /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 event */
3106 /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 event */
3122 /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 event */
3138 /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 event */
3153 /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 event */
3168 /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 event */
3184 /* State transitions for LLC_CONN_EV_P_TMR_EXP event */
3257 /* State transitions for LLC_CONN_EV_DATA_CONN_REQ event */
3273 /* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */
3292 /* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */
3310 /* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */
3328 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1_UNEXPD_Ns event */
3347 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */
3363 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */
3379 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns event */
3395 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1 event */
3416 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 event */
3434 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */
3452 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 event */
3470 /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 event */
3487 /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_1 event */
3504 /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 event */
3519 /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 event */
3534 /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */
3549 /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 event */
3564 /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 event */
3580 /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 event */
3596 /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 event */
3612 /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 event */
3627 /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 event */
3642 /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 event */
3658 /* State transitions for LLC_CONN_EV_P_TMR_EXP event */
3733 /* State transitions for LLC_CONN_EV_DATA_CONN_REQ event */
3749 /* State transitions for LLC_CONN_EV_LOCAL_BUSY_DETECTED event */
3763 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */
3777 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */
3791 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns event */
3806 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1 event */
3826 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 event */
3844 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */
3862 /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 event */
3880 /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 event */
3897 /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_1 event */
3914 /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1_UNEXPD_Ns event */
3931 /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 event */
3946 /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 event */
3961 /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */
3976 /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 event */
3991 /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 event */
4007 /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 event */
4023 /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 event */
4039 /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 event */
4054 /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 event */
4069 /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 event */
4085 /* State transitions for LLC_CONN_EV_P_TMR_EXP event */
4158 /* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event,
4181 /* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event,
4204 /* State transitions for LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X event,
4228 /* State transitions for LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X event,
4251 /* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event */
4264 /* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event,
4287 /* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event,
4311 * LLC_CONN_EV_DATA_CONN_REQ event
4328 /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
4348 /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event, cause_flag = 1 */
4369 /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event, cause_flag = 0 */
4413 /* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event */
4429 /* State transitions for LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X event,
4457 /* State transitions for LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X event,
4485 /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
4505 /* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event,
4528 /* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event,
4551 /* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event,
4574 /* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event,
4596 /* State transitions for DATA_CONN_REQ event */
4612 /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
4633 /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
4654 /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
4700 /* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event */
4720 /* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event */
4736 /* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event */
4751 /* State transitions for LLC_CONN_EV_RX_FRMR_RSP_Fbit_SET_X event */
4767 /* State transitions for LLC_CONN_EV_RX_XXX_CMD_Pbit_SET_X event */
4780 /* State transitions for LLC_CONN_EV_RX_XXX_RSP_Fbit_SET_X event */
4788 /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
4808 /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
4830 /* State transitions for LLC_CONN_EV_DATA_CONN_REQ event */
4868 /* State transitions for LLC_CONN_EV_DISC_REQ event */
H A Dllc_s_st.c30 * LLC_SAP_EV_ACTIVATION_REQ event
49 /* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_RX_UI event */
61 /* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_UNITDATA_REQ event */
73 /* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_XID_REQ event */
85 /* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_RX_XID_C event */
97 /* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_RX_XID_R event */
109 /* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_TEST_REQ event */
121 /* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_RX_TEST_C event */
133 /* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_RX_TEST_R event */
146 * LLC_SAP_EV_DEACTIVATION_REQ event
H A Dllc_s_ac.c7 * All functions have one sap and one event as input argument. All of
32 * @skb: the event to forward
35 * UNITDATA INDICATION; verify our event is the kind we expect
46 * @skb: the event to send
49 * primitive from the network layer. Verifies event is a primitive type of
50 * event. Verify the primitive is a UNITDATA REQUEST.
69 * @skb: the event to send
72 * primitive from the network layer. Verify event is a primitive type
73 * event. Verify the primitive is a XID REQUEST.
92 * @skb: the event to send
95 * command PDU. Verify event is a PDU type event
123 * @skb: the event to send
126 * primitive from the network layer. Verify event is a primitive type
127 * event; verify the primitive is a TEST REQUEST.
172 * @skb: the event to send
174 * Report data link status to layer management. Verify our event is the
185 * @skb: the event to send
199 * @skb: the event to send
202 * primitive. Verify our event is a PDU type event.
/linux-4.1.27/net/rds/
H A Drdma_transport.c69 struct rdma_cm_event *event) rds_rdma_cm_event_handler()
76 rdsdebug("conn %p id %p handling event %u (%s)\n", conn, cm_id, rds_rdma_cm_event_handler()
77 event->event, rds_cm_event_str(event->event)); rds_rdma_cm_event_handler()
95 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) rds_rdma_cm_event_handler()
101 switch (event->event) { rds_rdma_cm_event_handler()
103 ret = trans->cm_handle_connect(cm_id, event); rds_rdma_cm_event_handler()
118 trans->cm_connect_complete(conn, event); rds_rdma_cm_event_handler()
133 rdsdebug("DISCONNECT event - dropping connection " rds_rdma_cm_event_handler()
141 printk(KERN_ERR "RDS: unknown event %u (%s)!\n", rds_rdma_cm_event_handler()
142 event->event, rds_cm_event_str(event->event)); rds_rdma_cm_event_handler()
150 rdsdebug("id %p event %u (%s) handling ret %d\n", cm_id, event->event, rds_rdma_cm_event_handler()
151 rds_cm_event_str(event->event), ret); rds_rdma_cm_event_handler()
68 rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id, struct rdma_cm_event *event) rds_rdma_cm_event_handler() argument
/linux-4.1.27/sound/firewire/dice/
H A Ddice-hwdep.c17 union snd_firewire_event event; hwdep_read() local
31 memset(&event, 0, sizeof(event)); hwdep_read()
33 event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS; hwdep_read()
34 event.lock_status.status = dice->dev_lock_count > 0; hwdep_read()
37 count = min_t(long, count, sizeof(event.lock_status)); hwdep_read()
39 event.dice_notification.type = hwdep_read()
41 event.dice_notification.notification = dice->notification_bits; hwdep_read()
44 count = min_t(long, count, sizeof(event.dice_notification)); hwdep_read()
49 if (copy_to_user(buf, &event, count)) hwdep_read()
/linux-4.1.27/include/media/
H A Dv4l2-event.h2 * v4l2-event.h
35 * Events are subscribed per-filehandle. An event specification consists of a
37 * 'id' field. So an event is uniquely identified by the (type, id) tuple.
40 * struct is added to that list, one for every subscribed event.
45 * v4l2_fh struct so VIDIOC_DQEVENT will know which event to dequeue first.
47 * Finally, if the event subscription is associated with a particular object
49 * so that an event can be raised by that object. So the 'node' field can
63 * it knows who subscribed an event to that object.
71 /** struct v4l2_kevent - Internal kernel event struct.
74 * @event: The event itself.
79 struct v4l2_event event; member in struct:v4l2_kevent
82 /** struct v4l2_subscribed_event_ops - Subscribed event operations.
85 * @replace: Optional callback that can replace event 'old' with event 'new'.
86 * @merge: Optional callback that can merge event 'old' into event 'new'.
95 /** struct v4l2_subscribed_event - Internal struct representing a subscribed event.
100 * @fh: Filehandle that subscribed to this event.
101 * @node: List node that hooks into the object's event list (if there is one).
104 * @first: The index of the events containing the oldest available event.
122 int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
/linux-4.1.27/drivers/scsi/libsas/
H A Dsas_event.c43 static void sas_queue_event(int event, unsigned long *pending, sas_queue_event() argument
47 if (!test_and_set_bit(event, pending)) { sas_queue_event()
119 static void notify_ha_event(struct sas_ha_struct *sas_ha, enum ha_event event) notify_ha_event() argument
121 BUG_ON(event >= HA_NUM_EVENTS); notify_ha_event()
123 sas_queue_event(event, &sas_ha->pending, notify_ha_event()
124 &sas_ha->ha_events[event].work, sas_ha); notify_ha_event()
127 static void notify_port_event(struct asd_sas_phy *phy, enum port_event event) notify_port_event() argument
131 BUG_ON(event >= PORT_NUM_EVENTS); notify_port_event()
133 sas_queue_event(event, &phy->port_events_pending, notify_port_event()
134 &phy->port_events[event].work, ha); notify_port_event()
137 void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event) sas_notify_phy_event() argument
141 BUG_ON(event >= PHY_NUM_EVENTS); sas_notify_phy_event()
143 sas_queue_event(event, &phy->phy_events_pending, sas_notify_phy_event()
144 &phy->phy_events[event].work, ha); sas_notify_phy_event()
/linux-4.1.27/sound/core/seq/oss/
H A Dseq_oss_rw.c160 * insert event record to write queue
167 struct snd_seq_event event; insert_queue() local
169 /* if this is a timing event, process the current time */ insert_queue()
173 /* parse this event */ insert_queue()
174 memset(&event, 0, sizeof(event)); insert_queue()
176 event.type = SNDRV_SEQ_EVENT_NOTEOFF; insert_queue()
177 snd_seq_oss_fill_addr(dp, &event, dp->addr.port, dp->addr.client); insert_queue()
179 if (snd_seq_oss_process_event(dp, rec, &event)) insert_queue()
180 return 0; /* invalid event - no need to insert queue */ insert_queue()
182 event.time.tick = snd_seq_oss_timer_cur_tick(dp->timer); insert_queue()
184 snd_seq_oss_dispatch(dp, &event, 0, 0); insert_queue()
187 rc = snd_seq_kernel_client_enqueue(dp->cseq, &event, 0, 0); insert_queue()
189 rc = snd_seq_kernel_client_enqueue_blocking(dp->cseq, &event, opt, 0, 0); insert_queue()
/linux-4.1.27/arch/alpha/kernel/
H A Dperf_event.c40 struct perf_event *event[MAX_HWEVENTS]; member in struct:cpu_hw_events
41 /* Event type of each scheduled event. */
43 /* Current index of each scheduled event; if not yet determined
61 /* Mapping of the perf system hw event types to indigenous event types */
86 /* Subroutine for checking validity of a raw event for this PMU. */
106 * EV67 PMC event types
108 * There is no one-to-one mapping of the possible hw event types to the
110 * own hw event type identifiers.
122 /* Mapping of the hw event types to the perf tool interface */
136 * The mapping used for one event only - these must be in same order as enum
151 static int ev67_check_constraints(struct perf_event **event, ev67_check_constraints() argument
198 event[0]->hw.idx = idx0; ev67_check_constraints()
199 event[0]->hw.config_base = config; ev67_check_constraints()
201 event[1]->hw.idx = idx0 ^ 1; ev67_check_constraints()
202 event[1]->hw.config_base = config; ev67_check_constraints()
251 static int alpha_perf_event_set_period(struct perf_event *event, alpha_perf_event_set_period() argument
286 perf_event_update_userpage(event); alpha_perf_event_set_period()
306 static unsigned long alpha_perf_event_update(struct perf_event *event, alpha_perf_event_update() argument
329 local64_add(delta, &event->count); alpha_perf_event_update()
337 * Collect all HW events into the array event[].
340 struct perf_event *event[], unsigned long *evtype, collect_events()
349 event[n] = group; collect_events()
357 event[n] = pe; collect_events()
399 struct perf_event *pe = cpuc->event[j]; maybe_change_configuration()
411 struct perf_event *pe = cpuc->event[j]; maybe_change_configuration()
423 cpuc->config = cpuc->event[0]->hw.config_base; maybe_change_configuration()
428 /* Schedule perf HW event on to PMU.
430 * returned from perf event initialisation.
432 static int alpha_pmu_add(struct perf_event *event, int flags) alpha_pmu_add() argument
435 struct hw_perf_event *hwc = &event->hw; alpha_pmu_add()
448 perf_pmu_disable(event->pmu); alpha_pmu_add()
454 /* Insert event on to PMU and if successful modify ret to valid return */ alpha_pmu_add()
457 cpuc->event[n0] = event; alpha_pmu_add()
458 cpuc->evtype[n0] = event->hw.event_base; alpha_pmu_add()
461 if (!alpha_check_constraints(cpuc->event, cpuc->evtype, n0+1)) { alpha_pmu_add()
473 perf_pmu_enable(event->pmu); alpha_pmu_add()
482 * returned from perf event initialisation.
484 static void alpha_pmu_del(struct perf_event *event, int flags) alpha_pmu_del() argument
487 struct hw_perf_event *hwc = &event->hw; alpha_pmu_del()
491 perf_pmu_disable(event->pmu); alpha_pmu_del()
495 if (event == cpuc->event[j]) { alpha_pmu_del()
502 cpuc->event[j - 1] = cpuc->event[j]; alpha_pmu_del()
508 /* Absorb the final count and turn off the event. */ alpha_pmu_del()
509 alpha_perf_event_update(event, hwc, idx, 0); alpha_pmu_del()
510 perf_event_update_userpage(event); alpha_pmu_del()
519 perf_pmu_enable(event->pmu); alpha_pmu_del()
523 static void alpha_pmu_read(struct perf_event *event) alpha_pmu_read() argument
525 struct hw_perf_event *hwc = &event->hw; alpha_pmu_read()
527 alpha_perf_event_update(event, hwc, hwc->idx, 0); alpha_pmu_read()
531 static void alpha_pmu_stop(struct perf_event *event, int flags) alpha_pmu_stop() argument
533 struct hw_perf_event *hwc = &event->hw; alpha_pmu_stop()
542 alpha_perf_event_update(event, hwc, hwc->idx, 0); alpha_pmu_stop()
551 static void alpha_pmu_start(struct perf_event *event, int flags) alpha_pmu_start() argument
553 struct hw_perf_event *hwc = &event->hw; alpha_pmu_start()
561 alpha_perf_event_set_period(event, hwc, hwc->idx); alpha_pmu_start()
593 static void hw_perf_event_destroy(struct perf_event *event) hw_perf_event_destroy() argument
601 static int __hw_perf_event_init(struct perf_event *event) __hw_perf_event_init() argument
603 struct perf_event_attr *attr = &event->attr; __hw_perf_event_init()
604 struct hw_perf_event *hwc = &event->hw; __hw_perf_event_init()
611 /* We only support a limited range of HARDWARE event types with one __hw_perf_event_init()
612 * only programmable via a RAW event type. __hw_perf_event_init()
639 * We place the event type in event_base here and leave calculation __hw_perf_event_init()
654 if (event->group_leader != event) { __hw_perf_event_init()
655 n = collect_events(event->group_leader, __hw_perf_event_init()
662 evts[n] = event; __hw_perf_event_init()
671 event->destroy = hw_perf_event_destroy; __hw_perf_event_init()
693 * Main entry point to initialise a HW performance event.
695 static int alpha_pmu_event_init(struct perf_event *event) alpha_pmu_event_init() argument
700 if (has_branch_stack(event)) alpha_pmu_event_init()
703 switch (event->attr.type) { alpha_pmu_event_init()
717 err = __hw_perf_event_init(event); alpha_pmu_event_init()
813 struct perf_event *event; alpha_perf_event_irq_handler() local
844 /* This can occur if the event is disabled right on a PMC overflow. */ alpha_perf_event_irq_handler()
849 event = cpuc->event[j]; alpha_perf_event_irq_handler()
851 if (unlikely(!event)) { alpha_perf_event_irq_handler()
854 pr_warning("PMI: No event at index %d!\n", idx); alpha_perf_event_irq_handler()
859 hwc = &event->hw; alpha_perf_event_irq_handler()
860 alpha_perf_event_update(event, hwc, idx, alpha_pmu->pmc_max_period[idx]+1); alpha_perf_event_irq_handler()
863 if (alpha_perf_event_set_period(event, hwc, idx)) { alpha_perf_event_irq_handler()
864 if (perf_event_overflow(event, &data, regs)) { alpha_perf_event_irq_handler()
868 alpha_pmu_stop(event, 0); alpha_perf_event_irq_handler()
339 collect_events(struct perf_event *group, int max_count, struct perf_event *event[], unsigned long *evtype, int *current_idx) collect_events() argument
/linux-4.1.27/drivers/uwb/
H A Duwbd.c29 * communicate with this daemon through an event queue. Daemon wakes
31 * function is extracted from a table based on the event's type and
34 * . Lock protecting the event list has to be an spinlock and locked
40 * uwbd_event_queue(). They just get the event, chew it to make it
47 * data blob, which depends on the event. The header is 'struct
52 * To find a handling function for an event, the type is used to index
54 * with the subtype to get the function that handles the event. Start
81 * Return !0 if the event needs not to be freed (ie the handler
83 * event.
91 * Properties of a UWBD event
93 * @handler: the function that will handle this event
94 * @name: text name of event
162 * Handle an URC event passed to the UWB Daemon
164 * @evt: the event to handle
165 * @returns: 0 if the event can be kfreed, !0 on the contrary
173 * The event structure passed to the event handler has the radio
185 u16 event; uwbd_event_handle_urc() local
188 event = le16_to_cpu(evt->notif.rceb->wEvent); uwbd_event_handle_urc()
196 if (event >= type_table->size) uwbd_event_handle_urc()
198 handler = type_table->uwbd_events[event].handler; uwbd_event_handle_urc()
206 "UWBD: event 0x%02x/%04x/%02x, handling failed: %d\n", uwbd_event_handle_urc()
207 type, event, context, result); uwbd_event_handle_urc()
247 dev_err(&rc->uwb_dev.dev, "UWBD: invalid event type %d\n", evt->type); uwbd_event_handle()
322 * Queue an event for the management daemon
324 * When some lower layer receives an event, it uses this function to
327 * Once you pass the event, you don't own it any more, but the daemon
331 * If the daemon is not running, we just free the event.
/linux-4.1.27/drivers/scsi/bfa/
H A Dbfa_fcs.c245 enum bfa_fcs_fabric_event event);
247 enum bfa_fcs_fabric_event event);
249 enum bfa_fcs_fabric_event event);
251 enum bfa_fcs_fabric_event event);
253 enum bfa_fcs_fabric_event event);
255 enum bfa_fcs_fabric_event event);
257 enum bfa_fcs_fabric_event event);
259 enum bfa_fcs_fabric_event event);
261 enum bfa_fcs_fabric_event event);
263 enum bfa_fcs_fabric_event event);
265 enum bfa_fcs_fabric_event event);
267 enum bfa_fcs_fabric_event event);
269 enum bfa_fcs_fabric_event event);
275 enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_uninit()
278 bfa_trc(fabric->fcs, event); bfa_fcs_fabric_sm_uninit()
280 switch (event) { bfa_fcs_fabric_sm_uninit()
292 bfa_sm_fault(fabric->fcs, event); bfa_fcs_fabric_sm_uninit()
301 enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_created()
306 bfa_trc(fabric->fcs, event); bfa_fcs_fabric_sm_created()
308 switch (event) { bfa_fcs_fabric_sm_created()
339 bfa_sm_fault(fabric->fcs, event); bfa_fcs_fabric_sm_created()
344 * Link is down, awaiting LINK UP event from port. This is also the
349 enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_linkdown()
354 bfa_trc(fabric->fcs, event); bfa_fcs_fabric_sm_linkdown()
356 switch (event) { bfa_fcs_fabric_sm_linkdown()
386 bfa_sm_fault(fabric->fcs, event); bfa_fcs_fabric_sm_linkdown()
395 enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_flogi()
398 bfa_trc(fabric->fcs, event); bfa_fcs_fabric_sm_flogi()
400 switch (event) { bfa_fcs_fabric_sm_flogi()
409 bfa_trc(fabric->fcs, event); bfa_fcs_fabric_sm_flogi()
449 bfa_sm_fault(fabric->fcs, event); bfa_fcs_fabric_sm_flogi()
456 enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_flogi_retry()
459 bfa_trc(fabric->fcs, event); bfa_fcs_fabric_sm_flogi_retry()
461 switch (event) { bfa_fcs_fabric_sm_flogi_retry()
479 bfa_sm_fault(fabric->fcs, event); bfa_fcs_fabric_sm_flogi_retry()
488 enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_auth()
491 bfa_trc(fabric->fcs, event); bfa_fcs_fabric_sm_auth()
493 switch (event) { bfa_fcs_fabric_sm_auth()
519 bfa_sm_fault(fabric->fcs, event); bfa_fcs_fabric_sm_auth()
528 enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_auth_failed()
531 bfa_trc(fabric->fcs, event); bfa_fcs_fabric_sm_auth_failed()
533 switch (event) { bfa_fcs_fabric_sm_auth_failed()
545 bfa_sm_fault(fabric->fcs, event); bfa_fcs_fabric_sm_auth_failed()
554 enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_loopback()
557 bfa_trc(fabric->fcs, event); bfa_fcs_fabric_sm_loopback()
559 switch (event) { bfa_fcs_fabric_sm_loopback()
571 bfa_sm_fault(fabric->fcs, event); bfa_fcs_fabric_sm_loopback()
580 enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_nofabric()
583 bfa_trc(fabric->fcs, event); bfa_fcs_fabric_sm_nofabric()
585 switch (event) { bfa_fcs_fabric_sm_nofabric()
607 bfa_sm_fault(fabric->fcs, event); bfa_fcs_fabric_sm_nofabric()
616 enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_online()
621 bfa_trc(fabric->fcs, event); bfa_fcs_fabric_sm_online()
623 switch (event) { bfa_fcs_fabric_sm_online()
653 bfa_sm_fault(fabric->fcs, event); bfa_fcs_fabric_sm_online()
662 enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_evfp()
665 bfa_trc(fabric->fcs, event); bfa_fcs_fabric_sm_evfp()
667 switch (event) { bfa_fcs_fabric_sm_evfp()
677 bfa_sm_fault(fabric->fcs, event); bfa_fcs_fabric_sm_evfp()
686 enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_evfp_done()
689 bfa_trc(fabric->fcs, event); bfa_fcs_fabric_sm_evfp_done()
697 enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_isolated()
703 bfa_trc(fabric->fcs, event); bfa_fcs_fabric_sm_isolated()
718 enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_deleting()
721 bfa_trc(fabric->fcs, event); bfa_fcs_fabric_sm_deleting()
723 switch (event) { bfa_fcs_fabric_sm_deleting()
737 bfa_sm_fault(fabric->fcs, event); bfa_fcs_fabric_sm_deleting()
746 enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_stopping()
751 bfa_trc(fabric->fcs, event); bfa_fcs_fabric_sm_stopping()
753 switch (event) { bfa_fcs_fabric_sm_stopping()
774 bfa_sm_fault(fabric->fcs, event); bfa_fcs_fabric_sm_stopping()
783 enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_cleanup()
786 bfa_trc(fabric->fcs, event); bfa_fcs_fabric_sm_cleanup()
788 switch (event) { bfa_fcs_fabric_sm_cleanup()
797 * Ignore - can get this event if we get notified about IOC down bfa_fcs_fabric_sm_cleanup()
803 bfa_sm_fault(fabric->fcs, event); bfa_fcs_fabric_sm_cleanup()
1028 * notify online event to base and then virtual ports bfa_fcs_fabric_notify_online()
1048 * notify offline event first to vports and then base port. bfa_fcs_fabric_notify_offline()
1494 enum bfa_port_aen_event event) bfa_fcs_fabric_aen_post()
1508 BFA_AEN_CAT_PORT, event); bfa_fcs_fabric_aen_post()
1531 * Don't generate a fabric name change event in this case. bfa_fcs_fabric_set_fabric_name()
1615 bfa_fcs_port_event_handler(void *cbarg, enum bfa_port_linkstate event) bfa_fcs_port_event_handler() argument
1619 bfa_trc(fcs, event); bfa_fcs_port_event_handler()
1621 switch (event) { bfa_fcs_port_event_handler()
274 bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_uninit() argument
300 bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_created() argument
348 bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_linkdown() argument
394 bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_flogi() argument
455 bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_flogi_retry() argument
487 bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_auth() argument
527 bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_auth_failed() argument
553 bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_loopback() argument
579 bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_nofabric() argument
615 bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_online() argument
661 bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_evfp() argument
685 bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_evfp_done() argument
696 bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_isolated() argument
717 bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_deleting() argument
745 bfa_fcs_fabric_sm_stopping(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_stopping() argument
782 bfa_fcs_fabric_sm_cleanup(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) bfa_fcs_fabric_sm_cleanup() argument
1493 bfa_fcs_fabric_aen_post(struct bfa_fcs_lport_s *port, enum bfa_port_aen_event event) bfa_fcs_fabric_aen_post() argument
H A Dbfa_fcs_fcpim.c41 enum bfa_itnim_aen_event event);
44 enum bfa_fcs_itnim_event event);
46 enum bfa_fcs_itnim_event event);
48 enum bfa_fcs_itnim_event event);
50 enum bfa_fcs_itnim_event event);
52 enum bfa_fcs_itnim_event event);
54 enum bfa_fcs_itnim_event event);
56 enum bfa_fcs_itnim_event event);
58 enum bfa_fcs_itnim_event event);
60 enum bfa_fcs_itnim_event event);
79 enum bfa_fcs_itnim_event event) bfa_fcs_itnim_sm_offline()
82 bfa_trc(itnim->fcs, event); bfa_fcs_itnim_sm_offline()
84 switch (event) { bfa_fcs_itnim_sm_offline()
104 bfa_sm_fault(itnim->fcs, event); bfa_fcs_itnim_sm_offline()
111 enum bfa_fcs_itnim_event event) bfa_fcs_itnim_sm_prli_send()
114 bfa_trc(itnim->fcs, event); bfa_fcs_itnim_sm_prli_send()
116 switch (event) { bfa_fcs_itnim_sm_prli_send()
140 bfa_sm_fault(itnim->fcs, event); bfa_fcs_itnim_sm_prli_send()
146 enum bfa_fcs_itnim_event event) bfa_fcs_itnim_sm_prli()
149 bfa_trc(itnim->fcs, event); bfa_fcs_itnim_sm_prli()
151 switch (event) { bfa_fcs_itnim_sm_prli()
192 bfa_sm_fault(itnim->fcs, event); bfa_fcs_itnim_sm_prli()
198 enum bfa_fcs_itnim_event event) bfa_fcs_itnim_sm_hal_rport_online()
201 bfa_trc(itnim->fcs, event); bfa_fcs_itnim_sm_hal_rport_online()
203 switch (event) { bfa_fcs_itnim_sm_hal_rport_online()
230 bfa_sm_fault(itnim->fcs, event); bfa_fcs_itnim_sm_hal_rport_online()
236 enum bfa_fcs_itnim_event event) bfa_fcs_itnim_sm_prli_retry()
239 bfa_trc(itnim->fcs, event); bfa_fcs_itnim_sm_prli_retry()
241 switch (event) { bfa_fcs_itnim_sm_prli_retry()
275 bfa_sm_fault(itnim->fcs, event); bfa_fcs_itnim_sm_prli_retry()
281 enum bfa_fcs_itnim_event event) bfa_fcs_itnim_sm_hcb_online()
288 bfa_trc(itnim->fcs, event); bfa_fcs_itnim_sm_hcb_online()
290 switch (event) { bfa_fcs_itnim_sm_hcb_online()
313 bfa_sm_fault(itnim->fcs, event); bfa_fcs_itnim_sm_hcb_online()
319 enum bfa_fcs_itnim_event event) bfa_fcs_itnim_sm_online()
326 bfa_trc(itnim->fcs, event); bfa_fcs_itnim_sm_online()
328 switch (event) { bfa_fcs_itnim_sm_online()
354 bfa_sm_fault(itnim->fcs, event); bfa_fcs_itnim_sm_online()
360 enum bfa_fcs_itnim_event event) bfa_fcs_itnim_sm_hcb_offline()
363 bfa_trc(itnim->fcs, event); bfa_fcs_itnim_sm_hcb_offline()
365 switch (event) { bfa_fcs_itnim_sm_hcb_offline()
377 bfa_sm_fault(itnim->fcs, event); bfa_fcs_itnim_sm_hcb_offline()
388 enum bfa_fcs_itnim_event event) bfa_fcs_itnim_sm_initiator()
391 bfa_trc(itnim->fcs, event); bfa_fcs_itnim_sm_initiator()
393 switch (event) { bfa_fcs_itnim_sm_initiator()
416 bfa_sm_fault(itnim->fcs, event); bfa_fcs_itnim_sm_initiator()
422 enum bfa_itnim_aen_event event) bfa_fcs_itnim_aen_post()
444 BFA_AEN_CAT_ITNIM, event); bfa_fcs_itnim_aen_post()
78 bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) bfa_fcs_itnim_sm_offline() argument
110 bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) bfa_fcs_itnim_sm_prli_send() argument
145 bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) bfa_fcs_itnim_sm_prli() argument
197 bfa_fcs_itnim_sm_hal_rport_online(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) bfa_fcs_itnim_sm_hal_rport_online() argument
235 bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) bfa_fcs_itnim_sm_prli_retry() argument
280 bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) bfa_fcs_itnim_sm_hcb_online() argument
318 bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) bfa_fcs_itnim_sm_online() argument
359 bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) bfa_fcs_itnim_sm_hcb_offline() argument
387 bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) bfa_fcs_itnim_sm_initiator() argument
421 bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim, enum bfa_itnim_aen_event event) bfa_fcs_itnim_aen_post() argument
H A Dbfa_fcs_lport.c116 enum bfa_fcs_lport_event event);
118 enum bfa_fcs_lport_event event);
120 enum bfa_fcs_lport_event event);
122 enum bfa_fcs_lport_event event);
124 enum bfa_fcs_lport_event event);
126 enum bfa_fcs_lport_event event);
131 enum bfa_fcs_lport_event event) bfa_fcs_lport_sm_uninit()
134 bfa_trc(port->fcs, event); bfa_fcs_lport_sm_uninit()
136 switch (event) { bfa_fcs_lport_sm_uninit()
142 bfa_sm_fault(port->fcs, event); bfa_fcs_lport_sm_uninit()
148 enum bfa_fcs_lport_event event) bfa_fcs_lport_sm_init()
151 bfa_trc(port->fcs, event); bfa_fcs_lport_sm_init()
153 switch (event) { bfa_fcs_lport_sm_init()
176 bfa_sm_fault(port->fcs, event); bfa_fcs_lport_sm_init()
183 enum bfa_fcs_lport_event event) bfa_fcs_lport_sm_online()
189 bfa_trc(port->fcs, event); bfa_fcs_lport_sm_online()
191 switch (event) { bfa_fcs_lport_sm_online()
236 bfa_sm_fault(port->fcs, event); bfa_fcs_lport_sm_online()
243 enum bfa_fcs_lport_event event) bfa_fcs_lport_sm_offline()
249 bfa_trc(port->fcs, event); bfa_fcs_lport_sm_offline()
251 switch (event) { bfa_fcs_lport_sm_offline()
292 bfa_sm_fault(port->fcs, event); bfa_fcs_lport_sm_offline()
298 enum bfa_fcs_lport_event event) bfa_fcs_lport_sm_stopping()
301 bfa_trc(port->fcs, event); bfa_fcs_lport_sm_stopping()
303 switch (event) { bfa_fcs_lport_sm_stopping()
316 bfa_sm_fault(port->fcs, event); bfa_fcs_lport_sm_stopping()
323 enum bfa_fcs_lport_event event) bfa_fcs_lport_sm_deleting()
326 bfa_trc(port->fcs, event); bfa_fcs_lport_sm_deleting()
328 switch (event) { bfa_fcs_lport_sm_deleting()
337 bfa_sm_fault(port->fcs, event); bfa_fcs_lport_sm_deleting()
350 enum bfa_lport_aen_event event) bfa_fcs_lport_aen_post()
367 BFA_AEN_CAT_LPORT, event); bfa_fcs_lport_aen_post()
1440 enum port_fdmi_event event);
1443 enum port_fdmi_event event);
1445 enum port_fdmi_event event);
1448 enum port_fdmi_event event);
1451 enum port_fdmi_event event);
1453 enum port_fdmi_event event);
1456 enum port_fdmi_event event);
1459 enum port_fdmi_event event);
1461 enum port_fdmi_event event);
1464 enum port_fdmi_event event);
1466 enum port_fdmi_event event);
1469 enum port_fdmi_event event);
1475 enum port_fdmi_event event) bfa_fcs_lport_fdmi_sm_offline()
1480 bfa_trc(port->fcs, event); bfa_fcs_lport_fdmi_sm_offline()
1484 switch (event) { bfa_fcs_lport_fdmi_sm_offline()
1509 bfa_sm_fault(port->fcs, event); bfa_fcs_lport_fdmi_sm_offline()
1515 enum port_fdmi_event event) bfa_fcs_lport_fdmi_sm_sending_rhba()
1520 bfa_trc(port->fcs, event); bfa_fcs_lport_fdmi_sm_sending_rhba()
1522 switch (event) { bfa_fcs_lport_fdmi_sm_sending_rhba()
1534 bfa_sm_fault(port->fcs, event); bfa_fcs_lport_fdmi_sm_sending_rhba()
1540 enum port_fdmi_event event) bfa_fcs_lport_fdmi_sm_rhba()
1545 bfa_trc(port->fcs, event); bfa_fcs_lport_fdmi_sm_rhba()
1547 switch (event) { bfa_fcs_lport_fdmi_sm_rhba()
1583 bfa_sm_fault(port->fcs, event); bfa_fcs_lport_fdmi_sm_rhba()
1589 enum port_fdmi_event event) bfa_fcs_lport_fdmi_sm_rhba_retry()
1594 bfa_trc(port->fcs, event); bfa_fcs_lport_fdmi_sm_rhba_retry()
1596 switch (event) { bfa_fcs_lport_fdmi_sm_rhba_retry()
1611 bfa_sm_fault(port->fcs, event); bfa_fcs_lport_fdmi_sm_rhba_retry()
1620 enum port_fdmi_event event) bfa_fcs_lport_fdmi_sm_sending_rprt()
1625 bfa_trc(port->fcs, event); bfa_fcs_lport_fdmi_sm_sending_rprt()
1627 switch (event) { bfa_fcs_lport_fdmi_sm_sending_rprt()
1639 bfa_sm_fault(port->fcs, event); bfa_fcs_lport_fdmi_sm_sending_rprt()
1645 enum port_fdmi_event event) bfa_fcs_lport_fdmi_sm_rprt()
1650 bfa_trc(port->fcs, event); bfa_fcs_lport_fdmi_sm_rprt()
1652 switch (event) { bfa_fcs_lport_fdmi_sm_rprt()
1686 bfa_sm_fault(port->fcs, event); bfa_fcs_lport_fdmi_sm_rprt()
1692 enum port_fdmi_event event) bfa_fcs_lport_fdmi_sm_rprt_retry()
1697 bfa_trc(port->fcs, event); bfa_fcs_lport_fdmi_sm_rprt_retry()
1699 switch (event) { bfa_fcs_lport_fdmi_sm_rprt_retry()
1714 bfa_sm_fault(port->fcs, event); bfa_fcs_lport_fdmi_sm_rprt_retry()
1723 enum port_fdmi_event event) bfa_fcs_lport_fdmi_sm_sending_rpa()
1728 bfa_trc(port->fcs, event); bfa_fcs_lport_fdmi_sm_sending_rpa()
1730 switch (event) { bfa_fcs_lport_fdmi_sm_sending_rpa()
1742 bfa_sm_fault(port->fcs, event); bfa_fcs_lport_fdmi_sm_sending_rpa()
1748 enum port_fdmi_event event) bfa_fcs_lport_fdmi_sm_rpa()
1753 bfa_trc(port->fcs, event); bfa_fcs_lport_fdmi_sm_rpa()
1755 switch (event) { bfa_fcs_lport_fdmi_sm_rpa()
1787 bfa_sm_fault(port->fcs, event); bfa_fcs_lport_fdmi_sm_rpa()
1793 enum port_fdmi_event event) bfa_fcs_lport_fdmi_sm_rpa_retry()
1798 bfa_trc(port->fcs, event); bfa_fcs_lport_fdmi_sm_rpa_retry()
1800 switch (event) { bfa_fcs_lport_fdmi_sm_rpa_retry()
1815 bfa_sm_fault(port->fcs, event); bfa_fcs_lport_fdmi_sm_rpa_retry()
1821 enum port_fdmi_event event) bfa_fcs_lport_fdmi_sm_online()
1826 bfa_trc(port->fcs, event); bfa_fcs_lport_fdmi_sm_online()
1828 switch (event) { bfa_fcs_lport_fdmi_sm_online()
1834 bfa_sm_fault(port->fcs, event); bfa_fcs_lport_fdmi_sm_online()
1842 enum port_fdmi_event event) bfa_fcs_lport_fdmi_sm_disabled()
1847 bfa_trc(port->fcs, event); bfa_fcs_lport_fdmi_sm_disabled()
2868 enum port_ms_event event);
2870 enum port_ms_event event);
2872 enum port_ms_event event);
2874 enum port_ms_event event);
2876 enum port_ms_event event);
2878 enum port_ms_event event);
2880 enum port_ms_event event);
2882 enum port_ms_event event);
2884 enum port_ms_event event);
2886 enum port_ms_event event);
2888 enum port_ms_event event);
2894 enum port_ms_event event) bfa_fcs_lport_ms_sm_offline()
2897 bfa_trc(ms->port->fcs, event); bfa_fcs_lport_ms_sm_offline()
2899 switch (event) { bfa_fcs_lport_ms_sm_offline()
2909 bfa_sm_fault(ms->port->fcs, event); bfa_fcs_lport_ms_sm_offline()
2915 enum port_ms_event event) bfa_fcs_lport_ms_sm_plogi_sending()
2918 bfa_trc(ms->port->fcs, event); bfa_fcs_lport_ms_sm_plogi_sending()
2920 switch (event) { bfa_fcs_lport_ms_sm_plogi_sending()
2932 bfa_sm_fault(ms->port->fcs, event); bfa_fcs_lport_ms_sm_plogi_sending()
2938 enum port_ms_event event) bfa_fcs_lport_ms_sm_plogi()
2941 bfa_trc(ms->port->fcs, event); bfa_fcs_lport_ms_sm_plogi()
2943 switch (event) { bfa_fcs_lport_ms_sm_plogi()
2983 bfa_sm_fault(ms->port->fcs, event); bfa_fcs_lport_ms_sm_plogi()
2989 enum port_ms_event event) bfa_fcs_lport_ms_sm_plogi_retry()
2992 bfa_trc(ms->port->fcs, event); bfa_fcs_lport_ms_sm_plogi_retry()
2994 switch (event) { bfa_fcs_lport_ms_sm_plogi_retry()
3009 bfa_sm_fault(ms->port->fcs, event); bfa_fcs_lport_ms_sm_plogi_retry()
3015 enum port_ms_event event) bfa_fcs_lport_ms_sm_online()
3018 bfa_trc(ms->port->fcs, event); bfa_fcs_lport_ms_sm_online()
3020 switch (event) { bfa_fcs_lport_ms_sm_online()
3032 bfa_sm_fault(ms->port->fcs, event); bfa_fcs_lport_ms_sm_online()
3038 enum port_ms_event event) bfa_fcs_lport_ms_sm_gmal_sending()
3041 bfa_trc(ms->port->fcs, event); bfa_fcs_lport_ms_sm_gmal_sending()
3043 switch (event) { bfa_fcs_lport_ms_sm_gmal_sending()
3055 bfa_sm_fault(ms->port->fcs, event); bfa_fcs_lport_ms_sm_gmal_sending()
3061 enum port_ms_event event) bfa_fcs_lport_ms_sm_gmal()
3064 bfa_trc(ms->port->fcs, event); bfa_fcs_lport_ms_sm_gmal()
3066 switch (event) { bfa_fcs_lport_ms_sm_gmal()
3095 bfa_sm_fault(ms->port->fcs, event); bfa_fcs_lport_ms_sm_gmal()
3101 enum port_ms_event event) bfa_fcs_lport_ms_sm_gmal_retry()
3104 bfa_trc(ms->port->fcs, event); bfa_fcs_lport_ms_sm_gmal_retry()
3106 switch (event) { bfa_fcs_lport_ms_sm_gmal_retry()
3121 bfa_sm_fault(ms->port->fcs, event); bfa_fcs_lport_ms_sm_gmal_retry()
3241 enum port_ms_event event) bfa_fcs_lport_ms_sm_gfn_sending()
3244 bfa_trc(ms->port->fcs, event); bfa_fcs_lport_ms_sm_gfn_sending()
3246 switch (event) { bfa_fcs_lport_ms_sm_gfn_sending()
3258 bfa_sm_fault(ms->port->fcs, event); bfa_fcs_lport_ms_sm_gfn_sending()
3264 enum port_ms_event event) bfa_fcs_lport_ms_sm_gfn()
3267 bfa_trc(ms->port->fcs, event); bfa_fcs_lport_ms_sm_gfn()
3269 switch (event) { bfa_fcs_lport_ms_sm_gfn()
3296 bfa_sm_fault(ms->port->fcs, event); bfa_fcs_lport_ms_sm_gfn()
3302 enum port_ms_event event) bfa_fcs_lport_ms_sm_gfn_retry()
3305 bfa_trc(ms->port->fcs, event); bfa_fcs_lport_ms_sm_gfn_retry()
3307 switch (event) { bfa_fcs_lport_ms_sm_gfn_retry()
3322 bfa_sm_fault(ms->port->fcs, event); bfa_fcs_lport_ms_sm_gfn_retry()
3655 enum vport_ns_event event);
3657 enum vport_ns_event event);
3659 enum vport_ns_event event);
3661 enum vport_ns_event event);
3664 enum vport_ns_event event);
3666 enum vport_ns_event event);
3668 enum vport_ns_event event);
3671 enum vport_ns_event event);
3673 enum vport_ns_event event);
3675 enum vport_ns_event event);
3678 enum vport_ns_event event);
3680 enum vport_ns_event event);
3682 enum vport_ns_event event);
3685 enum vport_ns_event event);
3687 enum vport_ns_event event);
3689 enum vport_ns_event event);
3691 enum vport_ns_event event);
3694 enum vport_ns_event event);
3696 enum vport_ns_event event);
3698 enum vport_ns_event event);
3701 enum vport_ns_event event);
3703 enum vport_ns_event event);
3706 enum vport_ns_event event);
3712 enum vport_ns_event event) bfa_fcs_lport_ns_sm_offline()
3715 bfa_trc(ns->port->fcs, event); bfa_fcs_lport_ns_sm_offline()
3717 switch (event) { bfa_fcs_lport_ns_sm_offline()
3727 bfa_sm_fault(ns->port->fcs, event); bfa_fcs_lport_ns_sm_offline()
3733 enum vport_ns_event event) bfa_fcs_lport_ns_sm_plogi_sending()
3736 bfa_trc(ns->port->fcs, event); bfa_fcs_lport_ns_sm_plogi_sending()
3738 switch (event) { bfa_fcs_lport_ns_sm_plogi_sending()
3750 bfa_sm_fault(ns->port->fcs, event); bfa_fcs_lport_ns_sm_plogi_sending()
3756 enum vport_ns_event event) bfa_fcs_lport_ns_sm_plogi()
3759 bfa_trc(ns->port->fcs, event); bfa_fcs_lport_ns_sm_plogi()
3761 switch (event) { bfa_fcs_lport_ns_sm_plogi()
3785 bfa_sm_fault(ns->port->fcs, event); bfa_fcs_lport_ns_sm_plogi()
3791 enum vport_ns_event event) bfa_fcs_lport_ns_sm_plogi_retry()
3794 bfa_trc(ns->port->fcs, event); bfa_fcs_lport_ns_sm_plogi_retry()
3796 switch (event) { bfa_fcs_lport_ns_sm_plogi_retry()
3811 bfa_sm_fault(ns->port->fcs, event); bfa_fcs_lport_ns_sm_plogi_retry()
3817 enum vport_ns_event event) bfa_fcs_lport_ns_sm_sending_rnn_id()
3820 bfa_trc(ns->port->fcs, event); bfa_fcs_lport_ns_sm_sending_rnn_id()
3822 switch (event) { bfa_fcs_lport_ns_sm_sending_rnn_id()
3833 bfa_sm_fault(ns->port->fcs, event); bfa_fcs_lport_ns_sm_sending_rnn_id()
3839 enum vport_ns_event event) bfa_fcs_lport_ns_sm_rnn_id()
3842 bfa_trc(ns->port->fcs, event); bfa_fcs_lport_ns_sm_rnn_id()
3844 switch (event) { bfa_fcs_lport_ns_sm_rnn_id()
3873 bfa_sm_fault(ns->port->fcs, event); bfa_fcs_lport_ns_sm_rnn_id()
3879 enum vport_ns_event event) bfa_fcs_lport_ns_sm_rnn_id_retry()
3882 bfa_trc(ns->port->fcs, event); bfa_fcs_lport_ns_sm_rnn_id_retry()
3884 switch (event) { bfa_fcs_lport_ns_sm_rnn_id_retry()
3896 bfa_sm_fault(ns->port->fcs, event); bfa_fcs_lport_ns_sm_rnn_id_retry()
3902 enum vport_ns_event event) bfa_fcs_lport_ns_sm_sending_rsnn_nn()
3905 bfa_trc(ns->port->fcs, event); bfa_fcs_lport_ns_sm_sending_rsnn_nn()
3907 switch (event) { bfa_fcs_lport_ns_sm_sending_rsnn_nn()
3919 bfa_sm_fault(ns->port->fcs, event); bfa_fcs_lport_ns_sm_sending_rsnn_nn()
3925 enum vport_ns_event event) bfa_fcs_lport_ns_sm_rsnn_nn()
3928 bfa_trc(ns->port->fcs, event); bfa_fcs_lport_ns_sm_rsnn_nn()
3930 switch (event) { bfa_fcs_lport_ns_sm_rsnn_nn()
3958 bfa_sm_fault(ns->port->fcs, event); bfa_fcs_lport_ns_sm_rsnn_nn()
3964 enum vport_ns_event event) bfa_fcs_lport_ns_sm_rsnn_nn_retry()
3967 bfa_trc(ns->port->fcs, event); bfa_fcs_lport_ns_sm_rsnn_nn_retry()
3969 switch (event) { bfa_fcs_lport_ns_sm_rsnn_nn_retry()
3981 bfa_sm_fault(ns->port->fcs, event); bfa_fcs_lport_ns_sm_rsnn_nn_retry()
3987 enum vport_ns_event event) bfa_fcs_lport_ns_sm_sending_rspn_id()
3990 bfa_trc(ns->port->fcs, event); bfa_fcs_lport_ns_sm_sending_rspn_id()
3992 switch (event) { bfa_fcs_lport_ns_sm_sending_rspn_id()
4004 bfa_sm_fault(ns->port->fcs, event); bfa_fcs_lport_ns_sm_sending_rspn_id()
4010 enum vport_ns_event event) bfa_fcs_lport_ns_sm_rspn_id()
4013 bfa_trc(ns->port->fcs, event); bfa_fcs_lport_ns_sm_rspn_id()
4015 switch (event) { bfa_fcs_lport_ns_sm_rspn_id()
4038 bfa_sm_fault(ns->port->fcs, event); bfa_fcs_lport_ns_sm_rspn_id()
4044 enum vport_ns_event event) bfa_fcs_lport_ns_sm_rspn_id_retry()
4047 bfa_trc(ns->port->fcs, event); bfa_fcs_lport_ns_sm_rspn_id_retry()
4049 switch (event) { bfa_fcs_lport_ns_sm_rspn_id_retry()
4064 bfa_sm_fault(ns->port->fcs, event); bfa_fcs_lport_ns_sm_rspn_id_retry()
4070 enum vport_ns_event event) bfa_fcs_lport_ns_sm_sending_rft_id()
4073 bfa_trc(ns->port->fcs, event); bfa_fcs_lport_ns_sm_sending_rft_id()
4075 switch (event) { bfa_fcs_lport_ns_sm_sending_rft_id()
4087 bfa_sm_fault(ns->port->fcs, event); bfa_fcs_lport_ns_sm_sending_rft_id()
4093 enum vport_ns_event event) bfa_fcs_lport_ns_sm_rft_id()
4096 bfa_trc(ns->port->fcs, event); bfa_fcs_lport_ns_sm_rft_id()
4098 switch (event) { bfa_fcs_lport_ns_sm_rft_id()
4122 bfa_sm_fault(ns->port->fcs, event); bfa_fcs_lport_ns_sm_rft_id()
4128 enum vport_ns_event event) bfa_fcs_lport_ns_sm_rft_id_retry()
4131 bfa_trc(ns->port->fcs, event); bfa_fcs_lport_ns_sm_rft_id_retry()
4133 switch (event) { bfa_fcs_lport_ns_sm_rft_id_retry()
4145 bfa_sm_fault(ns->port->fcs, event); bfa_fcs_lport_ns_sm_rft_id_retry()
4151 enum vport_ns_event event) bfa_fcs_lport_ns_sm_sending_rff_id()
4154 bfa_trc(ns->port->fcs, event); bfa_fcs_lport_ns_sm_sending_rff_id()
4156 switch (event) { bfa_fcs_lport_ns_sm_sending_rff_id()
4168 bfa_sm_fault(ns->port->fcs, event); bfa_fcs_lport_ns_sm_sending_rff_id()
4174 enum vport_ns_event event) bfa_fcs_lport_ns_sm_rff_id()
4177 bfa_trc(ns->port->fcs, event); bfa_fcs_lport_ns_sm_rff_id()
4179 switch (event) { bfa_fcs_lport_ns_sm_rff_id()
4225 bfa_sm_fault(ns->port->fcs, event); bfa_fcs_lport_ns_sm_rff_id()
4231 enum vport_ns_event event) bfa_fcs_lport_ns_sm_rff_id_retry()
4234 bfa_trc(ns->port->fcs, event); bfa_fcs_lport_ns_sm_rff_id_retry()
4236 switch (event) { bfa_fcs_lport_ns_sm_rff_id_retry()
4248 bfa_sm_fault(ns->port->fcs, event); bfa_fcs_lport_ns_sm_rff_id_retry()
4253 enum vport_ns_event event) bfa_fcs_lport_ns_sm_sending_gid_ft()
4256 bfa_trc(ns->port->fcs, event); bfa_fcs_lport_ns_sm_sending_gid_ft()
4258 switch (event) { bfa_fcs_lport_ns_sm_sending_gid_ft()
4270 bfa_sm_fault(ns->port->fcs, event); bfa_fcs_lport_ns_sm_sending_gid_ft()
4276 enum vport_ns_event event) bfa_fcs_lport_ns_sm_gid_ft()
4279 bfa_trc(ns->port->fcs, event); bfa_fcs_lport_ns_sm_gid_ft()
4281 switch (event) { bfa_fcs_lport_ns_sm_gid_ft()
4309 bfa_sm_fault(ns->port->fcs, event); bfa_fcs_lport_ns_sm_gid_ft()
4315 enum vport_ns_event event) bfa_fcs_lport_ns_sm_gid_ft_retry()
4318 bfa_trc(ns->port->fcs, event); bfa_fcs_lport_ns_sm_gid_ft_retry()
4320 switch (event) { bfa_fcs_lport_ns_sm_gid_ft_retry()
4332 bfa_sm_fault(ns->port->fcs, event); bfa_fcs_lport_ns_sm_gid_ft_retry()
4338 enum vport_ns_event event) bfa_fcs_lport_ns_sm_online()
4341 bfa_trc(ns->port->fcs, event); bfa_fcs_lport_ns_sm_online()
4343 switch (event) { bfa_fcs_lport_ns_sm_online()
4361 bfa_sm_fault(ns->port->fcs, event); bfa_fcs_lport_ns_sm_online()
5257 enum port_scn_event event);
5260 enum port_scn_event event);
5262 enum port_scn_event event);
5264 enum port_scn_event event);
5266 enum port_scn_event event);
5273 enum port_scn_event event) bfa_fcs_lport_scn_sm_offline()
5275 switch (event) { bfa_fcs_lport_scn_sm_offline()
5285 bfa_sm_fault(scn->port->fcs, event); bfa_fcs_lport_scn_sm_offline()
5291 enum port_scn_event event) bfa_fcs_lport_scn_sm_sending_scr()
5293 switch (event) { bfa_fcs_lport_scn_sm_sending_scr()
5304 bfa_sm_fault(scn->port->fcs, event); bfa_fcs_lport_scn_sm_sending_scr()
5310 enum port_scn_event event) bfa_fcs_lport_scn_sm_scr()
5314 switch (event) { bfa_fcs_lport_scn_sm_scr()
5332 bfa_sm_fault(port->fcs, event); bfa_fcs_lport_scn_sm_scr()
5338 enum port_scn_event event) bfa_fcs_lport_scn_sm_scr_retry()
5340 switch (event) { bfa_fcs_lport_scn_sm_scr_retry()
5352 bfa_sm_fault(scn->port->fcs, event); bfa_fcs_lport_scn_sm_scr_retry()
5358 enum port_scn_event event) bfa_fcs_lport_scn_sm_online()
5360 switch (event) { bfa_fcs_lport_scn_sm_online()
5366 bfa_sm_fault(scn->port->fcs, event); bfa_fcs_lport_scn_sm_online()
5572 * Otherwise let rport handle the RSCN event. bfa_fcs_lport_scn_portid_rscn()
5635 sizeof(u32)) / sizeof(rscn->event[0]); bfa_fcs_lport_scn_process_rscn()
5644 rscn_pid = rscn->event[i].portid; bfa_fcs_lport_scn_process_rscn()
5646 bfa_trc(port->fcs, rscn->event[i].format); bfa_fcs_lport_scn_process_rscn()
5652 if (rscn->event[j].portid == rscn_pid) { bfa_fcs_lport_scn_process_rscn()
5664 switch (rscn->event[i].format) { bfa_fcs_lport_scn_process_rscn()
5666 if (rscn->event[i].qualifier == FC_QOS_RSCN_EVENT) { bfa_fcs_lport_scn_process_rscn()
5668 * Ignore this event. bfa_fcs_lport_scn_process_rscn()
5679 if (rscn->event[i].qualifier == bfa_fcs_lport_scn_process_rscn()
5690 rscn->event[i].format, bfa_fcs_lport_scn_process_rscn()
5998 BFA_FCS_VPORT_SM_CREATE = 1, /* vport create event */
5999 BFA_FCS_VPORT_SM_DELETE = 2, /* vport delete event */
6003 BFA_FCS_VPORT_SM_OFFLINE = 6, /* fabric offline event */
6007 BFA_FCS_VPORT_SM_TIMEOUT = 10, /* delay timer event */
6016 enum bfa_fcs_vport_event event);
6018 enum bfa_fcs_vport_event event);
6020 enum bfa_fcs_vport_event event);
6022 enum bfa_fcs_vport_event event);
6024 enum bfa_fcs_vport_event event);
6026 enum bfa_fcs_vport_event event);
6028 enum bfa_fcs_vport_event event);
6030 enum bfa_fcs_vport_event event);
6032 enum bfa_fcs_vport_event event);
6034 enum bfa_fcs_vport_event event);
6036 enum bfa_fcs_vport_event event);
6038 enum bfa_fcs_vport_event event);
6040 enum bfa_fcs_vport_event event);
6061 enum bfa_fcs_vport_event event) bfa_fcs_vport_sm_uninit()
6064 bfa_trc(__vport_fcs(vport), event); bfa_fcs_vport_sm_uninit() local
6066 switch (event) { bfa_fcs_vport_sm_uninit()
6073 bfa_sm_fault(__vport_fcs(vport), event); bfa_fcs_vport_sm_uninit() local
6078 * Created state - a start event is required to start up the state machine.
6082 enum bfa_fcs_vport_event event) bfa_fcs_vport_sm_created()
6085 bfa_trc(__vport_fcs(vport), event); bfa_fcs_vport_sm_created() local
6087 switch (event) { bfa_fcs_vport_sm_created()
6118 bfa_sm_fault(__vport_fcs(vport), event); bfa_fcs_vport_sm_created() local
6123 * Offline state - awaiting ONLINE event from fabric SM.
6127 enum bfa_fcs_vport_event event) bfa_fcs_vport_sm_offline()
6130 bfa_trc(__vport_fcs(vport), event); bfa_fcs_vport_sm_offline() local
6132 switch (event) { bfa_fcs_vport_sm_offline()
6154 * However, the link can go down and cause the this event to bfa_fcs_vport_sm_offline()
6160 bfa_sm_fault(__vport_fcs(vport), event); bfa_fcs_vport_sm_offline() local
6170 enum bfa_fcs_vport_event event) bfa_fcs_vport_sm_fdisc()
6173 bfa_trc(__vport_fcs(vport), event); bfa_fcs_vport_sm_fdisc() local
6175 switch (event) { bfa_fcs_vport_sm_fdisc()
6207 bfa_sm_fault(__vport_fcs(vport), event); bfa_fcs_vport_sm_fdisc() local
6216 enum bfa_fcs_vport_event event) bfa_fcs_vport_sm_fdisc_retry()
6219 bfa_trc(__vport_fcs(vport), event); bfa_fcs_vport_sm_fdisc_retry() local
6221 switch (event) { bfa_fcs_vport_sm_fdisc_retry()
6241 bfa_sm_fault(__vport_fcs(vport), event); bfa_fcs_vport_sm_fdisc_retry() local
6252 enum bfa_fcs_vport_event event) bfa_fcs_vport_sm_fdisc_rsp_wait()
6255 bfa_trc(__vport_fcs(vport), event); bfa_fcs_vport_sm_fdisc_rsp_wait() local
6257 switch (event) { bfa_fcs_vport_sm_fdisc_rsp_wait()
6277 bfa_sm_fault(__vport_fcs(vport), event); bfa_fcs_vport_sm_fdisc_rsp_wait() local
6286 enum bfa_fcs_vport_event event) bfa_fcs_vport_sm_online()
6289 bfa_trc(__vport_fcs(vport), event); bfa_fcs_vport_sm_online() local
6291 switch (event) { bfa_fcs_vport_sm_online()
6309 bfa_sm_fault(__vport_fcs(vport), event); bfa_fcs_vport_sm_online() local
6319 enum bfa_fcs_vport_event event) bfa_fcs_vport_sm_stopping()
6322 bfa_trc(__vport_fcs(vport), event); bfa_fcs_vport_sm_stopping() local
6324 switch (event) { bfa_fcs_vport_sm_stopping()
6335 bfa_sm_fault(__vport_fcs(vport), event); bfa_fcs_vport_sm_stopping() local
6345 enum bfa_fcs_vport_event event) bfa_fcs_vport_sm_deleting()
6348 bfa_trc(__vport_fcs(vport), event); bfa_fcs_vport_sm_deleting() local
6350 switch (event) { bfa_fcs_vport_sm_deleting()
6364 bfa_sm_fault(__vport_fcs(vport), event); bfa_fcs_vport_sm_deleting() local
6376 enum bfa_fcs_vport_event event) bfa_fcs_vport_sm_error()
6379 bfa_trc(__vport_fcs(vport), event); bfa_fcs_vport_sm_error() local
6381 switch (event) { bfa_fcs_vport_sm_error()
6388 bfa_trc(__vport_fcs(vport), event); bfa_fcs_vport_sm_error() local
6398 enum bfa_fcs_vport_event event) bfa_fcs_vport_sm_cleanup()
6401 bfa_trc(__vport_fcs(vport), event); bfa_fcs_vport_sm_cleanup() local
6403 switch (event) { bfa_fcs_vport_sm_cleanup()
6417 bfa_sm_fault(__vport_fcs(vport), event); bfa_fcs_vport_sm_cleanup() local
6427 enum bfa_fcs_vport_event event) bfa_fcs_vport_sm_logo_for_stop()
6430 bfa_trc(__vport_fcs(vport), event); bfa_fcs_vport_sm_logo_for_stop() local
6432 switch (event) { bfa_fcs_vport_sm_logo_for_stop()
6445 bfa_sm_fault(__vport_fcs(vport), event); bfa_fcs_vport_sm_logo_for_stop() local
6455 enum bfa_fcs_vport_event event) bfa_fcs_vport_sm_logo()
6458 bfa_trc(__vport_fcs(vport), event); bfa_fcs_vport_sm_logo() local
6460 switch (event) { bfa_fcs_vport_sm_logo()
6477 bfa_sm_fault(__vport_fcs(vport), event); bfa_fcs_vport_sm_logo() local
6491 enum bfa_lport_aen_event event) bfa_fcs_vport_aen_post()
6508 BFA_AEN_CAT_LPORT, event); bfa_fcs_vport_aen_post()
129 bfa_fcs_lport_sm_uninit( struct bfa_fcs_lport_s *port, enum bfa_fcs_lport_event event) bfa_fcs_lport_sm_uninit() argument
147 bfa_fcs_lport_sm_init(struct bfa_fcs_lport_s *port, enum bfa_fcs_lport_event event) bfa_fcs_lport_sm_init() argument
181 bfa_fcs_lport_sm_online( struct bfa_fcs_lport_s *port, enum bfa_fcs_lport_event event) bfa_fcs_lport_sm_online() argument
241 bfa_fcs_lport_sm_offline( struct bfa_fcs_lport_s *port, enum bfa_fcs_lport_event event) bfa_fcs_lport_sm_offline() argument
297 bfa_fcs_lport_sm_stopping(struct bfa_fcs_lport_s *port, enum bfa_fcs_lport_event event) bfa_fcs_lport_sm_stopping() argument
321 bfa_fcs_lport_sm_deleting( struct bfa_fcs_lport_s *port, enum bfa_fcs_lport_event event) bfa_fcs_lport_sm_deleting() argument
349 bfa_fcs_lport_aen_post(struct bfa_fcs_lport_s *port, enum bfa_lport_aen_event event) bfa_fcs_lport_aen_post() argument
1474 bfa_fcs_lport_fdmi_sm_offline(struct bfa_fcs_lport_fdmi_s *fdmi, enum port_fdmi_event event) bfa_fcs_lport_fdmi_sm_offline() argument
1514 bfa_fcs_lport_fdmi_sm_sending_rhba(struct bfa_fcs_lport_fdmi_s *fdmi, enum port_fdmi_event event) bfa_fcs_lport_fdmi_sm_sending_rhba() argument
1539 bfa_fcs_lport_fdmi_sm_rhba(struct bfa_fcs_lport_fdmi_s *fdmi, enum port_fdmi_event event) bfa_fcs_lport_fdmi_sm_rhba() argument
1588 bfa_fcs_lport_fdmi_sm_rhba_retry(struct bfa_fcs_lport_fdmi_s *fdmi, enum port_fdmi_event event) bfa_fcs_lport_fdmi_sm_rhba_retry() argument
1619 bfa_fcs_lport_fdmi_sm_sending_rprt(struct bfa_fcs_lport_fdmi_s *fdmi, enum port_fdmi_event event) bfa_fcs_lport_fdmi_sm_sending_rprt() argument
1644 bfa_fcs_lport_fdmi_sm_rprt(struct bfa_fcs_lport_fdmi_s *fdmi, enum port_fdmi_event event) bfa_fcs_lport_fdmi_sm_rprt() argument
1691 bfa_fcs_lport_fdmi_sm_rprt_retry(struct bfa_fcs_lport_fdmi_s *fdmi, enum port_fdmi_event event) bfa_fcs_lport_fdmi_sm_rprt_retry() argument
1722 bfa_fcs_lport_fdmi_sm_sending_rpa(struct bfa_fcs_lport_fdmi_s *fdmi, enum port_fdmi_event event) bfa_fcs_lport_fdmi_sm_sending_rpa() argument
1747 bfa_fcs_lport_fdmi_sm_rpa(struct bfa_fcs_lport_fdmi_s *fdmi, enum port_fdmi_event event) bfa_fcs_lport_fdmi_sm_rpa() argument
1792 bfa_fcs_lport_fdmi_sm_rpa_retry(struct bfa_fcs_lport_fdmi_s *fdmi, enum port_fdmi_event event) bfa_fcs_lport_fdmi_sm_rpa_retry() argument
1820 bfa_fcs_lport_fdmi_sm_online(struct bfa_fcs_lport_fdmi_s *fdmi, enum port_fdmi_event event) bfa_fcs_lport_fdmi_sm_online() argument
1841 bfa_fcs_lport_fdmi_sm_disabled(struct bfa_fcs_lport_fdmi_s *fdmi, enum port_fdmi_event event) bfa_fcs_lport_fdmi_sm_disabled() argument
2893 bfa_fcs_lport_ms_sm_offline(struct bfa_fcs_lport_ms_s *ms, enum port_ms_event event) bfa_fcs_lport_ms_sm_offline() argument
2914 bfa_fcs_lport_ms_sm_plogi_sending(struct bfa_fcs_lport_ms_s *ms, enum port_ms_event event) bfa_fcs_lport_ms_sm_plogi_sending() argument
2937 bfa_fcs_lport_ms_sm_plogi(struct bfa_fcs_lport_ms_s *ms, enum port_ms_event event) bfa_fcs_lport_ms_sm_plogi() argument
2988 bfa_fcs_lport_ms_sm_plogi_retry(struct bfa_fcs_lport_ms_s *ms, enum port_ms_event event) bfa_fcs_lport_ms_sm_plogi_retry() argument
3014 bfa_fcs_lport_ms_sm_online(struct bfa_fcs_lport_ms_s *ms, enum port_ms_event event) bfa_fcs_lport_ms_sm_online() argument
3037 bfa_fcs_lport_ms_sm_gmal_sending(struct bfa_fcs_lport_ms_s *ms, enum port_ms_event event) bfa_fcs_lport_ms_sm_gmal_sending() argument
3060 bfa_fcs_lport_ms_sm_gmal(struct bfa_fcs_lport_ms_s *ms, enum port_ms_event event) bfa_fcs_lport_ms_sm_gmal() argument
3100 bfa_fcs_lport_ms_sm_gmal_retry(struct bfa_fcs_lport_ms_s *ms, enum port_ms_event event) bfa_fcs_lport_ms_sm_gmal_retry() argument
3240 bfa_fcs_lport_ms_sm_gfn_sending(struct bfa_fcs_lport_ms_s *ms, enum port_ms_event event) bfa_fcs_lport_ms_sm_gfn_sending() argument
3263 bfa_fcs_lport_ms_sm_gfn(struct bfa_fcs_lport_ms_s *ms, enum port_ms_event event) bfa_fcs_lport_ms_sm_gfn() argument
3301 bfa_fcs_lport_ms_sm_gfn_retry(struct bfa_fcs_lport_ms_s *ms, enum port_ms_event event) bfa_fcs_lport_ms_sm_gfn_retry() argument
3711 bfa_fcs_lport_ns_sm_offline(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) bfa_fcs_lport_ns_sm_offline() argument
3732 bfa_fcs_lport_ns_sm_plogi_sending(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) bfa_fcs_lport_ns_sm_plogi_sending() argument
3755 bfa_fcs_lport_ns_sm_plogi(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) bfa_fcs_lport_ns_sm_plogi() argument
3790 bfa_fcs_lport_ns_sm_plogi_retry(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) bfa_fcs_lport_ns_sm_plogi_retry() argument
3816 bfa_fcs_lport_ns_sm_sending_rnn_id(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) bfa_fcs_lport_ns_sm_sending_rnn_id() argument
3838 bfa_fcs_lport_ns_sm_rnn_id(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) bfa_fcs_lport_ns_sm_rnn_id() argument
3878 bfa_fcs_lport_ns_sm_rnn_id_retry(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) bfa_fcs_lport_ns_sm_rnn_id_retry() argument
3901 bfa_fcs_lport_ns_sm_sending_rsnn_nn(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) bfa_fcs_lport_ns_sm_sending_rsnn_nn() argument
3924 bfa_fcs_lport_ns_sm_rsnn_nn(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) bfa_fcs_lport_ns_sm_rsnn_nn() argument
3963 bfa_fcs_lport_ns_sm_rsnn_nn_retry(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) bfa_fcs_lport_ns_sm_rsnn_nn_retry() argument
3986 bfa_fcs_lport_ns_sm_sending_rspn_id(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) bfa_fcs_lport_ns_sm_sending_rspn_id() argument
4009 bfa_fcs_lport_ns_sm_rspn_id(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) bfa_fcs_lport_ns_sm_rspn_id() argument
4043 bfa_fcs_lport_ns_sm_rspn_id_retry(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) bfa_fcs_lport_ns_sm_rspn_id_retry() argument
4069 bfa_fcs_lport_ns_sm_sending_rft_id(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) bfa_fcs_lport_ns_sm_sending_rft_id() argument
4092 bfa_fcs_lport_ns_sm_rft_id(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) bfa_fcs_lport_ns_sm_rft_id() argument
4127 bfa_fcs_lport_ns_sm_rft_id_retry(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) bfa_fcs_lport_ns_sm_rft_id_retry() argument
4150 bfa_fcs_lport_ns_sm_sending_rff_id(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) bfa_fcs_lport_ns_sm_sending_rff_id() argument
4173 bfa_fcs_lport_ns_sm_rff_id(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) bfa_fcs_lport_ns_sm_rff_id() argument
4230 bfa_fcs_lport_ns_sm_rff_id_retry(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) bfa_fcs_lport_ns_sm_rff_id_retry() argument
4252 bfa_fcs_lport_ns_sm_sending_gid_ft(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) bfa_fcs_lport_ns_sm_sending_gid_ft() argument
4275 bfa_fcs_lport_ns_sm_gid_ft(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) bfa_fcs_lport_ns_sm_gid_ft() argument
4314 bfa_fcs_lport_ns_sm_gid_ft_retry(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) bfa_fcs_lport_ns_sm_gid_ft_retry() argument
4337 bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) bfa_fcs_lport_ns_sm_online() argument
5272 bfa_fcs_lport_scn_sm_offline(struct bfa_fcs_lport_scn_s *scn, enum port_scn_event event) bfa_fcs_lport_scn_sm_offline() argument
5290 bfa_fcs_lport_scn_sm_sending_scr(struct bfa_fcs_lport_scn_s *scn, enum port_scn_event event) bfa_fcs_lport_scn_sm_sending_scr() argument
5309 bfa_fcs_lport_scn_sm_scr(struct bfa_fcs_lport_scn_s *scn, enum port_scn_event event) bfa_fcs_lport_scn_sm_scr() argument
5337 bfa_fcs_lport_scn_sm_scr_retry(struct bfa_fcs_lport_scn_s *scn, enum port_scn_event event) bfa_fcs_lport_scn_sm_scr_retry() argument
5357 bfa_fcs_lport_scn_sm_online(struct bfa_fcs_lport_scn_s *scn, enum port_scn_event event) bfa_fcs_lport_scn_sm_online() argument
6060 bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event) bfa_fcs_vport_sm_uninit() argument
6081 bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event) bfa_fcs_vport_sm_created() argument
6126 bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event) bfa_fcs_vport_sm_offline() argument
6169 bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event) bfa_fcs_vport_sm_fdisc() argument
6215 bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event) bfa_fcs_vport_sm_fdisc_retry() argument
6251 bfa_fcs_vport_sm_fdisc_rsp_wait(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event) bfa_fcs_vport_sm_fdisc_rsp_wait() argument
6285 bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event) bfa_fcs_vport_sm_online() argument
6318 bfa_fcs_vport_sm_stopping(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event) bfa_fcs_vport_sm_stopping() argument
6344 bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event) bfa_fcs_vport_sm_deleting() argument
6375 bfa_fcs_vport_sm_error(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event) bfa_fcs_vport_sm_error() argument
6397 bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event) bfa_fcs_vport_sm_cleanup() argument
6426 bfa_fcs_vport_sm_logo_for_stop(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event) bfa_fcs_vport_sm_logo_for_stop() argument
6454 bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event) bfa_fcs_vport_sm_logo() argument
6490 bfa_fcs_vport_aen_post(struct bfa_fcs_lport_s *port, enum bfa_lport_aen_event event) bfa_fcs_vport_aen_post() argument
H A Dbfa_fcs_rport.c90 enum rport_event event);
92 enum rport_event event);
94 enum rport_event event);
96 enum rport_event event);
98 enum rport_event event);
100 enum rport_event event);
102 enum rport_event event);
104 enum rport_event event);
106 enum rport_event event);
108 enum rport_event event);
110 struct bfa_fcs_rport_s *rport, enum rport_event event);
112 enum rport_event event);
114 *rport, enum rport_event event);
116 enum rport_event event);
118 enum rport_event event);
120 enum rport_event event);
122 enum rport_event event);
124 enum rport_event event);
126 enum rport_event event);
128 enum rport_event event);
130 enum rport_event event);
132 enum rport_event event);
134 enum rport_event event);
136 enum rport_event event);
138 enum rport_event event);
140 enum rport_event event);
142 enum rport_event event);
144 enum rport_event event);
178 bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, enum rport_event event) bfa_fcs_rport_sm_uninit() argument
182 bfa_trc(rport->fcs, event); bfa_fcs_rport_sm_uninit()
184 switch (event) { bfa_fcs_rport_sm_uninit()
208 bfa_sm_fault(rport->fcs, event); bfa_fcs_rport_sm_uninit()
217 enum rport_event event) bfa_fcs_rport_sm_plogi_sending()
221 bfa_trc(rport->fcs, event); bfa_fcs_rport_sm_plogi_sending()
223 switch (event) { bfa_fcs_rport_sm_plogi_sending()
269 bfa_sm_fault(rport->fcs, event); bfa_fcs_rport_sm_plogi_sending()
278 enum rport_event event) bfa_fcs_rport_sm_plogiacc_sending()
282 bfa_trc(rport->fcs, event); bfa_fcs_rport_sm_plogiacc_sending()
284 switch (event) { bfa_fcs_rport_sm_plogiacc_sending()
335 bfa_sm_fault(rport->fcs, event); bfa_fcs_rport_sm_plogiacc_sending()
344 enum rport_event event) bfa_fcs_rport_sm_plogi_retry()
348 bfa_trc(rport->fcs, event); bfa_fcs_rport_sm_plogi_retry()
350 switch (event) { bfa_fcs_rport_sm_plogi_retry()
406 bfa_sm_fault(rport->fcs, event); bfa_fcs_rport_sm_plogi_retry()
414 bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event) bfa_fcs_rport_sm_plogi() argument
418 bfa_trc(rport->fcs, event); bfa_fcs_rport_sm_plogi()
420 switch (event) { bfa_fcs_rport_sm_plogi()
515 bfa_sm_fault(rport->fcs, event); bfa_fcs_rport_sm_plogi()
524 enum rport_event event) bfa_fcs_rport_sm_fc4_fcs_online()
528 bfa_trc(rport->fcs, event); bfa_fcs_rport_sm_fc4_fcs_online()
530 switch (event) { bfa_fcs_rport_sm_fc4_fcs_online()
579 bfa_sm_fault(rport->fcs, event); bfa_fcs_rport_sm_fc4_fcs_online()
590 enum rport_event event) bfa_fcs_rport_sm_hal_online()
594 bfa_trc(rport->fcs, event); bfa_fcs_rport_sm_hal_online()
596 switch (event) { bfa_fcs_rport_sm_hal_online()
631 bfa_sm_fault(rport->fcs, event); bfa_fcs_rport_sm_hal_online()
639 bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event) bfa_fcs_rport_sm_online() argument
643 bfa_trc(rport->fcs, event); bfa_fcs_rport_sm_online()
645 switch (event) { bfa_fcs_rport_sm_online()
683 bfa_sm_fault(rport->fcs, event); bfa_fcs_rport_sm_online()
688 * An SCN event is received in ONLINE state. NS query is being sent
693 enum rport_event event) bfa_fcs_rport_sm_nsquery_sending()
697 bfa_trc(rport->fcs, event); bfa_fcs_rport_sm_nsquery_sending()
699 switch (event) { bfa_fcs_rport_sm_nsquery_sending()
733 bfa_sm_fault(rport->fcs, event); bfa_fcs_rport_sm_nsquery_sending()
738 * An SCN event is received in ONLINE state. NS query is sent to rport.
742 bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event) bfa_fcs_rport_sm_nsquery() argument
746 bfa_trc(rport->fcs, event); bfa_fcs_rport_sm_nsquery()
748 switch (event) { bfa_fcs_rport_sm_nsquery()
792 bfa_sm_fault(rport->fcs, event); bfa_fcs_rport_sm_nsquery()
797 * An SCN event is received in ONLINE state. ADISC is being sent for
802 enum rport_event event) bfa_fcs_rport_sm_adisc_online_sending()
806 bfa_trc(rport->fcs, event); bfa_fcs_rport_sm_adisc_online_sending()
808 switch (event) { bfa_fcs_rport_sm_adisc_online_sending()
843 bfa_sm_fault(rport->fcs, event); bfa_fcs_rport_sm_adisc_online_sending()
848 * An SCN event is received in ONLINE state. ADISC is to rport.
853 enum rport_event event) bfa_fcs_rport_sm_adisc_online()
857 bfa_trc(rport->fcs, event); bfa_fcs_rport_sm_adisc_online()
859 switch (event) { bfa_fcs_rport_sm_adisc_online()
906 bfa_sm_fault(rport->fcs, event); bfa_fcs_rport_sm_adisc_online()
916 enum rport_event event) bfa_fcs_rport_sm_adisc_offline_sending()
920 bfa_trc(rport->fcs, event); bfa_fcs_rport_sm_adisc_offline_sending()
922 switch (event) { bfa_fcs_rport_sm_adisc_offline_sending()
947 bfa_sm_fault(rport->fcs, event); bfa_fcs_rport_sm_adisc_offline_sending()
957 enum rport_event event) bfa_fcs_rport_sm_adisc_offline()
961 bfa_trc(rport->fcs, event); bfa_fcs_rport_sm_adisc_offline()
963 switch (event) { bfa_fcs_rport_sm_adisc_offline()
995 bfa_sm_fault(rport->fcs, event); bfa_fcs_rport_sm_adisc_offline()
1004 enum rport_event event) bfa_fcs_rport_sm_fc4_logorcv()
1008 bfa_trc(rport->fcs, event); bfa_fcs_rport_sm_fc4_logorcv()
1010 switch (event) { bfa_fcs_rport_sm_fc4_logorcv()
1034 bfa_sm_fault(rport->fcs, event); bfa_fcs_rport_sm_fc4_logorcv()
1044 enum rport_event event) bfa_fcs_rport_sm_fc4_logosend()
1048 bfa_trc(rport->fcs, event); bfa_fcs_rport_sm_fc4_logosend()
1050 switch (event) { bfa_fcs_rport_sm_fc4_logosend()
1070 bfa_sm_fault(rport->fcs, event); bfa_fcs_rport_sm_fc4_logosend()
1079 enum rport_event event) bfa_fcs_rport_sm_fc4_offline()
1083 bfa_trc(rport->fcs, event); bfa_fcs_rport_sm_fc4_offline()
1085 switch (event) { bfa_fcs_rport_sm_fc4_offline()
1120 bfa_sm_fault(rport->fcs, event); bfa_fcs_rport_sm_fc4_offline()
1130 enum rport_event event) bfa_fcs_rport_sm_hcb_offline()
1134 bfa_trc(rport->fcs, event); bfa_fcs_rport_sm_hcb_offline()
1136 switch (event) { bfa_fcs_rport_sm_hcb_offline()
1202 bfa_sm_fault(rport->fcs, event); bfa_fcs_rport_sm_hcb_offline()
1212 enum rport_event event) bfa_fcs_rport_sm_hcb_logorcv()
1216 bfa_trc(rport->fcs, event); bfa_fcs_rport_sm_hcb_logorcv()
1218 switch (event) { bfa_fcs_rport_sm_hcb_logorcv()
1280 bfa_sm_fault(rport->fcs, event); bfa_fcs_rport_sm_hcb_logorcv()
1291 enum rport_event event) bfa_fcs_rport_sm_hcb_logosend()
1295 bfa_trc(rport->fcs, event); bfa_fcs_rport_sm_hcb_logosend()
1297 switch (event) { bfa_fcs_rport_sm_hcb_logosend()
1318 bfa_sm_fault(rport->fcs, event); bfa_fcs_rport_sm_hcb_logosend()
1327 enum rport_event event) bfa_fcs_rport_sm_logo_sending()
1331 bfa_trc(rport->fcs, event); bfa_fcs_rport_sm_logo_sending()
1333 switch (event) { bfa_fcs_rport_sm_logo_sending()
1358 bfa_sm_fault(rport->fcs, event); bfa_fcs_rport_sm_logo_sending()
1367 bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event) bfa_fcs_rport_sm_offline() argument
1371 bfa_trc(rport->fcs, event); bfa_fcs_rport_sm_offline()
1373 switch (event) { bfa_fcs_rport_sm_offline()
1427 bfa_sm_fault(rport->fcs, event); bfa_fcs_rport_sm_offline()
1436 enum rport_event event) bfa_fcs_rport_sm_nsdisc_sending()
1440 bfa_trc(rport->fcs, event); bfa_fcs_rport_sm_nsdisc_sending()
1442 switch (event) { bfa_fcs_rport_sm_nsdisc_sending()
1484 bfa_sm_fault(rport->fcs, event); bfa_fcs_rport_sm_nsdisc_sending()
1493 enum rport_event event) bfa_fcs_rport_sm_nsdisc_retry()
1497 bfa_trc(rport->fcs, event); bfa_fcs_rport_sm_nsdisc_retry()
1499 switch (event) { bfa_fcs_rport_sm_nsdisc_retry()
1548 bfa_sm_fault(rport->fcs, event); bfa_fcs_rport_sm_nsdisc_retry()
1557 enum rport_event event) bfa_fcs_rport_sm_nsdisc_sent()
1561 bfa_trc(rport->fcs, event); bfa_fcs_rport_sm_nsdisc_sent()
1563 switch (event) { bfa_fcs_rport_sm_nsdisc_sent()
1638 bfa_sm_fault(rport->fcs, event); bfa_fcs_rport_sm_nsdisc_sent()
1648 enum rport_event event) bfa_fcs_rport_sm_fc4_off_delete()
1652 bfa_trc(rport->fcs, event); bfa_fcs_rport_sm_fc4_off_delete()
1654 switch (event) { bfa_fcs_rport_sm_fc4_off_delete()
1666 bfa_sm_fault(rport->fcs, event); bfa_fcs_rport_sm_fc4_off_delete()
1677 enum rport_event event) bfa_fcs_rport_sm_delete_pending()
1681 bfa_trc(rport->fcs, event); bfa_fcs_rport_sm_delete_pending()
1683 switch (event) { bfa_fcs_rport_sm_delete_pending()
1696 bfa_sm_fault(rport->fcs, event); bfa_fcs_rport_sm_delete_pending()
2407 enum bfa_rport_aen_event event, bfa_fcs_rport_aen_post()
2418 if (event == BFA_RPORT_AEN_QOS_PRIO) bfa_fcs_rport_aen_post()
2420 else if (event == BFA_RPORT_AEN_QOS_FLOWID) bfa_fcs_rport_aen_post()
2431 BFA_AEN_CAT_RPORT, event); bfa_fcs_rport_aen_post()
3133 RPFSM_EVENT_TIMEOUT = 4, /* Rport SM timeout event */
3140 enum rpf_event event);
3142 enum rpf_event event);
3144 enum rpf_event event);
3146 enum rpf_event event);
3148 enum rpf_event event);
3150 enum rpf_event event);
3153 bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) bfa_fcs_rpf_sm_uninit() argument
3160 bfa_trc(rport->fcs, event); bfa_fcs_rpf_sm_uninit()
3162 switch (event) { bfa_fcs_rpf_sm_uninit()
3179 bfa_sm_fault(rport->fcs, event); bfa_fcs_rpf_sm_uninit()
3184 bfa_fcs_rpf_sm_rpsc_sending(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) bfa_fcs_rpf_sm_rpsc_sending() argument
3188 bfa_trc(rport->fcs, event); bfa_fcs_rpf_sm_rpsc_sending()
3190 switch (event) { bfa_fcs_rpf_sm_rpsc_sending()
3202 bfa_sm_fault(rport->fcs, event); bfa_fcs_rpf_sm_rpsc_sending()
3207 bfa_fcs_rpf_sm_rpsc(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) bfa_fcs_rpf_sm_rpsc() argument
3212 bfa_trc(rport->fcs, event); bfa_fcs_rpf_sm_rpsc()
3214 switch (event) { bfa_fcs_rpf_sm_rpsc()
3248 bfa_sm_fault(rport->fcs, event); bfa_fcs_rpf_sm_rpsc()
3253 bfa_fcs_rpf_sm_rpsc_retry(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) bfa_fcs_rpf_sm_rpsc_retry() argument
3258 bfa_trc(rport->fcs, event); bfa_fcs_rpf_sm_rpsc_retry()
3260 switch (event) { bfa_fcs_rpf_sm_rpsc_retry()
3274 bfa_sm_fault(rport->fcs, event); bfa_fcs_rpf_sm_rpsc_retry()
3279 bfa_fcs_rpf_sm_online(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) bfa_fcs_rpf_sm_online() argument
3285 bfa_trc(rport->fcs, event); bfa_fcs_rpf_sm_online()
3287 switch (event) { bfa_fcs_rpf_sm_online()
3294 bfa_sm_fault(rport->fcs, event); bfa_fcs_rpf_sm_online()
3299 bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) bfa_fcs_rpf_sm_offline() argument
3305 bfa_trc(rport->fcs, event); bfa_fcs_rpf_sm_offline()
3307 switch (event) { bfa_fcs_rpf_sm_offline()
3317 bfa_sm_fault(rport->fcs, event); bfa_fcs_rpf_sm_offline()
216 bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport, enum rport_event event) bfa_fcs_rport_sm_plogi_sending() argument
277 bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport, enum rport_event event) bfa_fcs_rport_sm_plogiacc_sending() argument
343 bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport, enum rport_event event) bfa_fcs_rport_sm_plogi_retry() argument
523 bfa_fcs_rport_sm_fc4_fcs_online(struct bfa_fcs_rport_s *rport, enum rport_event event) bfa_fcs_rport_sm_fc4_fcs_online() argument
589 bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport, enum rport_event event) bfa_fcs_rport_sm_hal_online() argument
692 bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport, enum rport_event event) bfa_fcs_rport_sm_nsquery_sending() argument
801 bfa_fcs_rport_sm_adisc_online_sending(struct bfa_fcs_rport_s *rport, enum rport_event event) bfa_fcs_rport_sm_adisc_online_sending() argument
852 bfa_fcs_rport_sm_adisc_online(struct bfa_fcs_rport_s *rport, enum rport_event event) bfa_fcs_rport_sm_adisc_online() argument
915 bfa_fcs_rport_sm_adisc_offline_sending(struct bfa_fcs_rport_s *rport, enum rport_event event) bfa_fcs_rport_sm_adisc_offline_sending() argument
956 bfa_fcs_rport_sm_adisc_offline(struct bfa_fcs_rport_s *rport, enum rport_event event) bfa_fcs_rport_sm_adisc_offline() argument
1003 bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport, enum rport_event event) bfa_fcs_rport_sm_fc4_logorcv() argument
1043 bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport, enum rport_event event) bfa_fcs_rport_sm_fc4_logosend() argument
1078 bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport, enum rport_event event) bfa_fcs_rport_sm_fc4_offline() argument
1129 bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport, enum rport_event event) bfa_fcs_rport_sm_hcb_offline() argument
1211 bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport, enum rport_event event) bfa_fcs_rport_sm_hcb_logorcv() argument
1290 bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport, enum rport_event event) bfa_fcs_rport_sm_hcb_logosend() argument
1326 bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport, enum rport_event event) bfa_fcs_rport_sm_logo_sending() argument
1435 bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport, enum rport_event event) bfa_fcs_rport_sm_nsdisc_sending() argument
1492 bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport, enum rport_event event) bfa_fcs_rport_sm_nsdisc_retry() argument
1556 bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport, enum rport_event event) bfa_fcs_rport_sm_nsdisc_sent() argument
1647 bfa_fcs_rport_sm_fc4_off_delete(struct bfa_fcs_rport_s *rport, enum rport_event event) bfa_fcs_rport_sm_fc4_off_delete() argument
1676 bfa_fcs_rport_sm_delete_pending(struct bfa_fcs_rport_s *rport, enum rport_event event) bfa_fcs_rport_sm_delete_pending() argument
2406 bfa_fcs_rport_aen_post(struct bfa_fcs_rport_s *rport, enum bfa_rport_aen_event event, struct bfa_rport_aen_data_s *data) bfa_fcs_rport_aen_post() argument
/linux-4.1.27/arch/metag/kernel/perf/
H A Dperf_event.c144 static int metag_pmu_event_init(struct perf_event *event) metag_pmu_event_init() argument
154 if (has_branch_stack(event)) metag_pmu_event_init()
157 event->destroy = _hw_perf_event_destroy; metag_pmu_event_init()
171 switch (event->attr.type) { metag_pmu_event_init()
175 err = _hw_perf_event_init(event); metag_pmu_event_init()
183 event->destroy(event); metag_pmu_event_init()
189 void metag_pmu_event_update(struct perf_event *event, metag_pmu_event_update() argument
217 local64_add(delta, &event->count); metag_pmu_event_update()
221 int metag_pmu_event_set_period(struct perf_event *event, metag_pmu_event_set_period() argument
254 perf_event_update_userpage(event); metag_pmu_event_set_period()
259 static void metag_pmu_start(struct perf_event *event, int flags) metag_pmu_start() argument
262 struct hw_perf_event *hwc = &event->hw; metag_pmu_start()
287 metag_pmu_event_set_period(event, hwc, hwc->idx); metag_pmu_start()
288 cpuc->events[idx] = event; metag_pmu_start()
292 static void metag_pmu_stop(struct perf_event *event, int flags) metag_pmu_stop() argument
294 struct hw_perf_event *hwc = &event->hw; metag_pmu_stop()
301 metag_pmu_event_update(event, hwc, hwc->idx); metag_pmu_stop()
307 static int metag_pmu_add(struct perf_event *event, int flags) metag_pmu_add() argument
310 struct hw_perf_event *hwc = &event->hw; metag_pmu_add()
313 perf_pmu_disable(event->pmu); metag_pmu_add()
341 metag_pmu_start(event, PERF_EF_RELOAD); metag_pmu_add()
343 perf_event_update_userpage(event); metag_pmu_add()
345 perf_pmu_enable(event->pmu); metag_pmu_add()
349 static void metag_pmu_del(struct perf_event *event, int flags) metag_pmu_del() argument
352 struct hw_perf_event *hwc = &event->hw; metag_pmu_del()
356 metag_pmu_stop(event, PERF_EF_UPDATE); metag_pmu_del()
360 perf_event_update_userpage(event); metag_pmu_del()
363 static void metag_pmu_read(struct perf_event *event) metag_pmu_read() argument
365 struct hw_perf_event *hwc = &event->hw; metag_pmu_read()
371 metag_pmu_event_update(event, hwc, hwc->idx); metag_pmu_read()
503 static void _hw_perf_event_destroy(struct perf_event *event) _hw_perf_event_destroy() argument
541 static int _hw_perf_event_init(struct perf_event *event) _hw_perf_event_init() argument
543 struct perf_event_attr *attr = &event->attr; _hw_perf_event_init()
544 struct hw_perf_event *hwc = &event->hw; _hw_perf_event_init()
566 /* Return early if the event is unsupported */ _hw_perf_event_init()
571 * Don't assign an index until the event is placed into the hardware. _hw_perf_event_init()
578 /* Store the event encoding */ _hw_perf_event_init()
598 static void metag_pmu_enable_counter(struct hw_perf_event *event, int idx) metag_pmu_enable_counter() argument
601 unsigned int config = event->config; metag_pmu_enable_counter()
615 local64_set(&event->prev_count, __core_reg_get(TXTACTCYC)); metag_pmu_enable_counter()
619 /* Check for a core internal or performance channel event. */ metag_pmu_enable_counter()
645 * Now we use the high nibble as the performance event to metag_pmu_enable_counter()
664 local64_set(&event->prev_count, 0); metag_pmu_enable_counter()
671 static void metag_pmu_disable_counter(struct hw_perf_event *event, int idx) metag_pmu_disable_counter() argument
691 * Here we remove the thread id AND the event nibble (there are at metag_pmu_disable_counter()
694 * performance counts, and event 0x00 requires a thread id mask! metag_pmu_disable_counter()
733 * We'll keep the thread mask and event id, and just update the metag_pmu_write_counter()
755 struct perf_event *event = cpuhw->events[idx]; metag_pmu_counter_overflow() local
756 struct hw_perf_event *hwc = &event->hw; metag_pmu_counter_overflow()
773 metag_pmu_event_update(event, hwc, idx); metag_pmu_counter_overflow()
775 metag_pmu_event_set_period(event, hwc, idx); metag_pmu_counter_overflow()
782 if (!perf_event_overflow(event, &sampledata, regs)) { metag_pmu_counter_overflow()
/linux-4.1.27/drivers/platform/x86/
H A Ddell-wmi-aio.c38 /* 0x000: A hot key pressed or an event occurred
41 u16 event[]; member in struct:dell_wmi_event
69 * The new WMI event data format will follow the dell_wmi_event structure
74 struct dell_wmi_event *event = (struct dell_wmi_event *)buffer; dell_wmi_aio_event_check() local
76 if (event == NULL || length < 6) dell_wmi_aio_event_check()
79 if ((event->type == 0 || event->type == 0xf) && dell_wmi_aio_event_check()
80 event->length >= 2) dell_wmi_aio_event_check()
90 struct dell_wmi_event *event; dell_wmi_aio_notify() local
95 pr_info("bad event status 0x%x\n", status); dell_wmi_aio_notify()
113 event = (struct dell_wmi_event *) dell_wmi_aio_notify()
115 scancode = event->event[0]; dell_wmi_aio_notify()
/linux-4.1.27/sound/soc/codecs/
H A Dwm_adsp.h72 .reg = SND_SOC_NOPM, .shift = num, .event = wm_adsp2_early_event, \
75 .reg = SND_SOC_NOPM, .shift = num, .event = wm_adsp2_event, \
84 struct snd_kcontrol *kcontrol, int event);
86 struct snd_kcontrol *kcontrol, int event);
88 struct snd_kcontrol *kcontrol, int event);
/linux-4.1.27/drivers/media/v4l2-core/
H A Dv4l2-event.c2 * v4l2-event.c
27 #include <media/v4l2-event.h>
39 static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event) __v4l2_event_dequeue() argument
57 kev->event.pending = fh->navailable; __v4l2_event_dequeue()
58 *event = kev->event; __v4l2_event_dequeue()
67 int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event, v4l2_event_dequeue() argument
73 return __v4l2_event_dequeue(fh, event); v4l2_event_dequeue()
85 ret = __v4l2_event_dequeue(fh, event); v4l2_event_dequeue()
123 * If the event has been added to the fh->subscribed list, but its __v4l2_event_queue_fh()
130 /* Increase event sequence number on fh. */ __v4l2_event_queue_fh()
143 sev->ops->replace(&kev->event, ev); __v4l2_event_queue_fh()
149 sev->ops->merge(&kev->event, &second_oldest->event); __v4l2_event_queue_fh()
155 kev->event.type = ev->type; __v4l2_event_queue_fh()
157 kev->event.u = ev->u; __v4l2_event_queue_fh()
158 kev->event.id = ev->id; __v4l2_event_queue_fh()
159 kev->event.timestamp = *ts; __v4l2_event_queue_fh()
160 kev->event.sequence = fh->sequence; __v4l2_event_queue_fh()
/linux-4.1.27/drivers/gpu/drm/atmel-hlcdc/
H A Datmel_hlcdc_crtc.c39 * @event: pointer to the current page flip event
46 struct drm_pending_vblank_event *event; member in struct:atmel_hlcdc_crtc
246 if (c->state->event) { atmel_hlcdc_crtc_atomic_begin()
247 c->state->event->pipe = drm_crtc_index(c); atmel_hlcdc_crtc_atomic_begin()
251 crtc->event = c->state->event; atmel_hlcdc_crtc_atomic_begin()
252 c->state->event = NULL; atmel_hlcdc_crtc_atomic_begin()
285 struct drm_pending_vblank_event *event; atmel_hlcdc_crtc_cancel_page_flip() local
290 event = crtc->event; atmel_hlcdc_crtc_cancel_page_flip()
291 if (event && event->base.file_priv == file) { atmel_hlcdc_crtc_cancel_page_flip()
292 event->base.destroy(&event->base); atmel_hlcdc_crtc_cancel_page_flip()
294 crtc->event = NULL; atmel_hlcdc_crtc_cancel_page_flip()
305 if (crtc->event) { atmel_hlcdc_crtc_finish_page_flip()
306 drm_send_vblank_event(dev, crtc->id, crtc->event); atmel_hlcdc_crtc_finish_page_flip()
308 crtc->event = NULL; atmel_hlcdc_crtc_finish_page_flip()
/linux-4.1.27/include/net/netfilter/
H A Dnf_conntrack_ecache.h2 * connection tracking event cache.
59 /* This structure is passed to event handler */
78 nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct) nf_conntrack_event_cache() argument
90 set_bit(event, &e->cache); nf_conntrack_event_cache()
119 /* This is a resent of a destroy event? If so, skip missed */ nf_conntrack_eventmask_report()
129 /* This is a destroy event that has been nf_conntrack_eventmask_report()
148 nf_conntrack_event_report(enum ip_conntrack_events event, struct nf_conn *ct, nf_conntrack_event_report() argument
151 return nf_conntrack_eventmask_report(1 << event, ct, portid, report); nf_conntrack_event_report()
155 nf_conntrack_event(enum ip_conntrack_events event, struct nf_conn *ct) nf_conntrack_event() argument
157 return nf_conntrack_eventmask_report(1 << event, ct, 0, 0); nf_conntrack_event()
176 nf_ct_expect_event_report(enum ip_conntrack_expect_events event, nf_ct_expect_event_report() argument
194 if (e->expmask & (1 << event)) { nf_ct_expect_event_report()
200 notify->fcn(1 << event, &item); nf_ct_expect_event_report()
207 nf_ct_expect_event(enum ip_conntrack_expect_events event, nf_ct_expect_event() argument
210 nf_ct_expect_event_report(event, exp, 0, 0); nf_ct_expect_event()
235 static inline void nf_conntrack_event_cache(enum ip_conntrack_events event, nf_conntrack_event_cache() argument
241 static inline int nf_conntrack_event(enum ip_conntrack_events event, nf_conntrack_event() argument
243 static inline int nf_conntrack_event_report(enum ip_conntrack_events event, nf_conntrack_event_report() argument
248 static inline void nf_ct_expect_event(enum ip_conntrack_expect_events event, nf_ct_expect_event() argument
/linux-4.1.27/include/net/sctp/
H A Dulpevent.h62 /* Retrieve the skb this event sits inside of. */ sctp_event2skb()
68 /* Retrieve & cast the event sitting inside the skb. */ sctp_skb2event()
131 void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
133 void sctp_ulpevent_read_rcvinfo(const struct sctp_ulpevent *event,
135 void sctp_ulpevent_read_nxtinfo(const struct sctp_ulpevent *event,
138 __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event);
140 /* Is this event type enabled? */ sctp_ulpevent_type_enabled()
148 /* Given an event subscription, is this event enabled? */ sctp_ulpevent_is_enabled()
149 static inline int sctp_ulpevent_is_enabled(const struct sctp_ulpevent *event, sctp_ulpevent_is_enabled() argument
155 if (sctp_ulpevent_is_notification(event)) { sctp_ulpevent_is_enabled()
156 sn_type = sctp_ulpevent_get_notification_type(event); sctp_ulpevent_is_enabled()
/linux-4.1.27/include/trace/
H A Dsyscall.h20 * @enter_fields: list of fields for syscall_enter trace event
21 * @enter_event: associated syscall_enter trace event
22 * @exit_event: associated syscall_exit trace event
/linux-4.1.27/include/uapi/sound/
H A Dasequencer.h31 * definition of sequencer event types
35 * event data type = #snd_seq_result
41 * event data type = #snd_seq_ev_note
49 * event data type = #snd_seq_ev_ctrl
60 * event data type = #snd_seq_ev_ctrl
65 #define SNDRV_SEQ_EVENT_TIMESIGN 23 /* SMF Time Signature event */
66 #define SNDRV_SEQ_EVENT_KEYSIGN 24 /* SMF Key Signature event */
69 * event data type = snd_seq_ev_queue_control
76 #define SNDRV_SEQ_EVENT_TEMPO 35 /* (SMF) Tempo event */
82 * event data type = none
86 #define SNDRV_SEQ_EVENT_SENSING 42 /* "active sensing" event */
89 * event data type = any type
91 #define SNDRV_SEQ_EVENT_ECHO 50 /* echo event */
92 #define SNDRV_SEQ_EVENT_OSS 51 /* OSS raw event */
95 * event data type = snd_seq_addr
105 * event data type = snd_seq_connect
113 * event data type = any
130 * event data type = snd_seq_ev_ext
134 #define SNDRV_SEQ_EVENT_BOUNCE 131 /* error event */
150 /* 255: special event */
156 /** event address */
170 #define SNDRV_SEQ_ADDRESS_SUBSCRIBERS 254 /* send event to all subscribed ports */
171 #define SNDRV_SEQ_ADDRESS_BROADCAST 255 /* send event to all queues/clients/ports/channels */
174 /* event mode flag - NOTE: only 8 bits available! */
183 #define SNDRV_SEQ_EVENT_LENGTH_FIXED (0<<2) /* fixed event size */
184 #define SNDRV_SEQ_EVENT_LENGTH_VARIABLE (1<<2) /* variable event size */
185 #define SNDRV_SEQ_EVENT_LENGTH_VARUSR (2<<2) /* variable event size - user memory space */
189 #define SNDRV_SEQ_PRIORITY_HIGH (1<<4) /* event should be processed before others */
193 /* note event */
202 /* controller event */
227 int event; /* processed event type */ member in struct:snd_seq_result
263 /* quoted event - inside the kernel only */
267 struct snd_seq_event *event; /* quoted event */ member in struct:snd_seq_ev_quote
271 /* sequencer event */
273 snd_seq_event_type_t type; /* event type */
274 unsigned char flags; /* event flags */
284 union { /* event data... */
301 * bounce event - stored as variable size data
305 struct snd_seq_event event; member in struct:snd_seq_event_bounce
345 /* event filter flags */
348 #define SNDRV_SEQ_FILTER_BOUNCE (1<<2) /* accept bounce event in error */
349 #define SNDRV_SEQ_FILTER_USE_EVENT (1<<31) /* use event filter */
357 unsigned char event_filter[32]; /* event filter bitmap */
385 #define SNDRV_SEQ_REMOVE_EVENT_TYPE (1<<7) /* Restrict to event type */
512 #define SNDRV_SEQ_TIMER_MIDI_CLOCK 1 /* Midi Clock (CLOCK event) */
513 #define SNDRV_SEQ_TIMER_MIDI_TICK 2 /* Midi Timer Tick (TICK event) */
/linux-4.1.27/arch/c6x/include/asm/
H A Dsoc.h16 /* Return active exception event or -1 if none */
19 /* Assert an event */
26 extern void soc_assert_event(unsigned int event);
/linux-4.1.27/arch/arm/include/asm/
H A Dpmu.h48 * The ARMv7 CPU PMU supports up to 32 event counters.
75 * an event. A 0 means that the counter can be used.
98 void (*enable)(struct perf_event *event);
99 void (*disable)(struct perf_event *event);
101 struct perf_event *event);
103 struct perf_event *event);
106 u32 (*read_counter)(struct perf_event *event);
107 void (*write_counter)(struct perf_event *event, u32 val);
113 int (*map_event)(struct perf_event *event);
129 u64 armpmu_event_update(struct perf_event *event);
131 int armpmu_event_set_period(struct perf_event *event);
133 int armpmu_map_event(struct perf_event *event,
/linux-4.1.27/drivers/input/serio/
H A Dserio.c147 * Serio event processing.
170 struct serio_event *event = NULL; serio_get_event() local
176 event = list_first_entry(&serio_event_list, serio_get_event()
178 list_del_init(&event->node); serio_get_event()
182 return event; serio_get_event()
185 static void serio_free_event(struct serio_event *event) serio_free_event() argument
187 module_put(event->owner); serio_free_event()
188 kfree(event); serio_free_event()
202 * If this event is of different type we should not serio_remove_duplicate_events()
219 struct serio_event *event; serio_handle_event() local
223 while ((event = serio_get_event())) { serio_handle_event()
225 switch (event->type) { serio_handle_event()
228 serio_add_port(event->object); serio_handle_event()
232 serio_reconnect_port(event->object); serio_handle_event()
236 serio_disconnect_port(event->object); serio_handle_event()
237 serio_find_driver(event->object); serio_handle_event()
241 serio_reconnect_subtree(event->object); serio_handle_event()
245 serio_attach_driver(event->object); serio_handle_event()
249 serio_remove_duplicate_events(event->object, event->type); serio_handle_event()
250 serio_free_event(event); serio_handle_event()
262 struct serio_event *event; serio_queue_event() local
268 * Scan event list for the other events for the same serio port, serio_queue_event()
269 * starting with the most recent one. If event is the same we serio_queue_event()
270 * do not need add new one. If event is of different type we serio_queue_event()
271 * need to add this event and should not look further because serio_queue_event()
274 list_for_each_entry_reverse(event, &serio_event_list, node) { serio_queue_event()
275 if (event->object == object) { serio_queue_event()
276 if (event->type == event_type) serio_queue_event()
282 event = kmalloc(sizeof(struct serio_event), GFP_ATOMIC); serio_queue_event()
283 if (!event) { serio_queue_event()
284 pr_err("Not enough memory to queue event %d\n", event_type); serio_queue_event()
290 pr_warning("Can't get module reference, dropping event %d\n", serio_queue_event()
292 kfree(event); serio_queue_event()
297 event->type = event_type; serio_queue_event()
298 event->object = object; serio_queue_event()
299 event->owner = owner; serio_queue_event()
301 list_add_tail(&event->node, &serio_event_list); serio_queue_event()
315 struct serio_event *event, *next; serio_remove_pending_events() local
320 list_for_each_entry_safe(event, next, &serio_event_list, node) { serio_remove_pending_events()
321 if (event->object == object) { serio_remove_pending_events()
322 list_del_init(&event->node); serio_remove_pending_events()
323 serio_free_event(event); serio_remove_pending_events()
338 struct serio_event *event; serio_get_pending_child() local
344 list_for_each_entry(event, &serio_event_list, node) { serio_get_pending_child()
345 if (event->type == SERIO_REGISTER_PORT) { serio_get_pending_child()
346 serio = event->object; serio_get_pending_child()
/linux-4.1.27/arch/mips/kernel/
H A Dperf_event_mipsxx.c9 * based on the sparc64 perf event code and the x86 code. Performance
40 * is used for an event.
111 #define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5)
327 * when the former kind of event takes the counter the mipsxx_pmu_alloc_counter()
328 * latter kind of event wants to use, then the "counter mipsxx_pmu_alloc_counter()
329 * allocation" for the latter event will fail. In fact if mipsxx_pmu_alloc_counter()
375 static int mipspmu_event_set_period(struct perf_event *event, mipspmu_event_set_period() argument
406 perf_event_update_userpage(event); mipspmu_event_set_period()
411 static void mipspmu_event_update(struct perf_event *event, mipspmu_event_update() argument
428 local64_add(delta, &event->count); mipspmu_event_update()
432 static void mipspmu_start(struct perf_event *event, int flags) mipspmu_start() argument
434 struct hw_perf_event *hwc = &event->hw; mipspmu_start()
441 /* Set the period for the event. */ mipspmu_start()
442 mipspmu_event_set_period(event, hwc, hwc->idx); mipspmu_start()
444 /* Enable the event. */ mipspmu_start()
448 static void mipspmu_stop(struct perf_event *event, int flags) mipspmu_stop() argument
450 struct hw_perf_event *hwc = &event->hw; mipspmu_stop()
453 /* We are working on a local event. */ mipspmu_stop()
456 mipspmu_event_update(event, hwc, hwc->idx); mipspmu_stop()
461 static int mipspmu_add(struct perf_event *event, int flags) mipspmu_add() argument
464 struct hw_perf_event *hwc = &event->hw; mipspmu_add()
468 perf_pmu_disable(event->pmu); mipspmu_add()
470 /* To look for a free counter for this event. */ mipspmu_add()
478 * If there is an event in the counter we are going to use then mipspmu_add()
481 event->hw.idx = idx; mipspmu_add()
483 cpuc->events[idx] = event; mipspmu_add()
487 mipspmu_start(event, PERF_EF_RELOAD); mipspmu_add()
490 perf_event_update_userpage(event); mipspmu_add()
493 perf_pmu_enable(event->pmu); mipspmu_add()
497 static void mipspmu_del(struct perf_event *event, int flags) mipspmu_del() argument
500 struct hw_perf_event *hwc = &event->hw; mipspmu_del()
505 mipspmu_stop(event, PERF_EF_UPDATE); mipspmu_del()
509 perf_event_update_userpage(event); mipspmu_del()
512 static void mipspmu_read(struct perf_event *event) mipspmu_read() argument
514 struct hw_perf_event *hwc = &event->hw; mipspmu_read()
520 mipspmu_event_update(event, hwc, hwc->idx); mipspmu_read()
597 static int __hw_perf_event_init(struct perf_event *event);
599 static void hw_perf_event_destroy(struct perf_event *event) hw_perf_event_destroy() argument
614 static int mipspmu_event_init(struct perf_event *event) mipspmu_event_init() argument
619 if (has_branch_stack(event)) mipspmu_event_init()
622 switch (event->attr.type) { mipspmu_event_init()
632 if (event->cpu >= nr_cpumask_bits || mipspmu_event_init()
633 (event->cpu >= 0 && !cpu_online(event->cpu))) mipspmu_event_init()
649 return __hw_perf_event_init(event); mipspmu_event_init()
716 static int validate_group(struct perf_event *event) validate_group() argument
718 struct perf_event *sibling, *leader = event->group_leader; validate_group()
731 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0) validate_group()
742 struct perf_event *event = cpuc->events[idx]; handle_associated_event() local
743 struct hw_perf_event *hwc = &event->hw; handle_associated_event()
745 mipspmu_event_update(event, hwc, idx); handle_associated_event()
746 data->period = event->hw.last_period; handle_associated_event()
747 if (!mipspmu_event_set_period(event, hwc, idx)) handle_associated_event()
750 if (perf_event_overflow(event, data, regs)) handle_associated_event()
810 /* 24K/34K/1004K/interAptiv/loongson1 cores share the same event map. */
819 /* 74K/proAptiv core has different branch event code. */
861 /* 24K/34K/1004K/interAptiv/loongson1 cores share the same cache event map. */
942 /* 74K/proAptiv core has completely different cache event map. */
1156 * Only general DTLB misses are counted use the same event for
1205 * Only general DTLB misses are counted use the same event for
1231 static void check_and_calc_range(struct perf_event *event, check_and_calc_range() argument
1234 struct hw_perf_event *hwc = &event->hw; check_and_calc_range()
1236 if (event->cpu >= 0) { check_and_calc_range()
1239 * The user selected an event that is processor check_and_calc_range()
1245 * FIXME: cpu_data[event->cpu].vpe_id reports 0 check_and_calc_range()
1248 hwc->config_base |= M_PERFCTL_VPEID(event->cpu); check_and_calc_range()
1255 static void check_and_calc_range(struct perf_event *event, check_and_calc_range() argument
1261 static int __hw_perf_event_init(struct perf_event *event) __hw_perf_event_init() argument
1263 struct perf_event_attr *attr = &event->attr; __hw_perf_event_init()
1264 struct hw_perf_event *hwc = &event->hw; __hw_perf_event_init()
1268 /* Returning MIPS event descriptor for generic perf event. */ __hw_perf_event_init()
1269 if (PERF_TYPE_HARDWARE == event->attr.type) { __hw_perf_event_init()
1270 if (event->attr.config >= PERF_COUNT_HW_MAX) __hw_perf_event_init()
1272 pev = mipspmu_map_general_event(event->attr.config); __hw_perf_event_init()
1273 } else if (PERF_TYPE_HW_CACHE == event->attr.type) { __hw_perf_event_init()
1274 pev = mipspmu_map_cache_event(event->attr.config); __hw_perf_event_init()
1275 } else if (PERF_TYPE_RAW == event->attr.type) { __hw_perf_event_init()
1276 /* We are working on the global raw event. */ __hw_perf_event_init()
1278 pev = mipspmu.map_raw_event(event->attr.config); __hw_perf_event_init()
1280 /* The event type is not (yet) supported. */ __hw_perf_event_init()
1285 if (PERF_TYPE_RAW == event->attr.type) __hw_perf_event_init()
1298 check_and_calc_range(event, pev); __hw_perf_event_init()
1301 if (PERF_TYPE_RAW == event->attr.type) __hw_perf_event_init()
1316 * The event can belong to another cpu. We do not assign a local __hw_perf_event_init()
1329 if (event->group_leader != event) __hw_perf_event_init()
1330 err = validate_group(event); __hw_perf_event_init()
1332 event->destroy = hw_perf_event_destroy; __hw_perf_event_init()
1335 event->destroy(event); __hw_perf_event_init()
1494 * 128 needs to be added to 15 as the input for the event config, i.e., 143 (0x8F)
1503 /* currently most cores have 7-bit event numbers */ mipsxx_pmu_map_raw_event()
1559 /* 8-bit event numbers */ mipsxx_pmu_map_raw_event()
/linux-4.1.27/drivers/pps/
H A Dkapi.c55 static void pps_echo_client_default(struct pps_device *pps, int event, pps_echo_client_default() argument
59 event & PPS_CAPTUREASSERT ? "assert" : "", pps_echo_client_default()
60 event & PPS_CAPTURECLEAR ? "clear" : ""); pps_echo_client_default()
159 /* pps_event - register a PPS event into the system
161 * @ts: the event timestamp
162 * @event: the event type
166 * PPS event into the system (it's usually called inside an IRQ handler).
170 * pps->info.echo(pps, event, data);
172 void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event, pps_event() argument
179 /* check event type */ pps_event()
180 BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0); pps_event()
182 dev_dbg(pps->dev, "PPS event at %ld.%09ld\n", pps_event()
191 pps->info.echo(pps, event, data); pps_event()
193 /* Check the event */ pps_event()
195 if (event & pps->params.mode & PPS_CAPTUREASSERT) { pps_event()
209 if (event & pps->params.mode & PPS_CAPTURECLEAR) { pps_event()
224 pps_kc_event(pps, ts, event); pps_event()
/linux-4.1.27/drivers/input/gameport/
H A Dgameport.c255 * Gameport event processing.
275 struct gameport_event *event = NULL; gameport_get_event() local
281 event = list_first_entry(&gameport_event_list, gameport_get_event()
283 list_del_init(&event->node); gameport_get_event()
287 return event; gameport_get_event()
290 static void gameport_free_event(struct gameport_event *event) gameport_free_event() argument
292 module_put(event->owner); gameport_free_event()
293 kfree(event); gameport_free_event()
296 static void gameport_remove_duplicate_events(struct gameport_event *event) gameport_remove_duplicate_events() argument
304 if (event->object == e->object) { gameport_remove_duplicate_events()
306 * If this event is of different type we should not gameport_remove_duplicate_events()
310 if (event->type != e->type) gameport_remove_duplicate_events()
324 struct gameport_event *event; gameport_handle_events() local
329 * Note that we handle only one event here to give swsusp gameport_handle_events()
334 if ((event = gameport_get_event())) { gameport_handle_events()
336 switch (event->type) { gameport_handle_events()
339 gameport_add_port(event->object); gameport_handle_events()
343 gameport_attach_driver(event->object); gameport_handle_events()
347 gameport_remove_duplicate_events(event); gameport_handle_events()
348 gameport_free_event(event); gameport_handle_events()
360 struct gameport_event *event; gameport_queue_event() local
366 * Scan event list for the other events for the same gameport port, gameport_queue_event()
367 * starting with the most recent one. If event is the same we gameport_queue_event()
368 * do not need add new one. If event is of different type we gameport_queue_event()
369 * need to add this event and should not look further because gameport_queue_event()
372 list_for_each_entry_reverse(event, &gameport_event_list, node) { gameport_queue_event()
373 if (event->object == object) { gameport_queue_event()
374 if (event->type == event_type) gameport_queue_event()
380 event = kmalloc(sizeof(struct gameport_event), GFP_ATOMIC); gameport_queue_event()
381 if (!event) { gameport_queue_event()
382 pr_err("Not enough memory to queue event %d\n", event_type); gameport_queue_event()
388 pr_warning("Can't get module reference, dropping event %d\n", gameport_queue_event()
390 kfree(event); gameport_queue_event()
395 event->type = event_type; gameport_queue_event()
396 event->object = object; gameport_queue_event()
397 event->owner = owner; gameport_queue_event()
399 list_add_tail(&event->node, &gameport_event_list); gameport_queue_event()
413 struct gameport_event *event, *next; gameport_remove_pending_events() local
418 list_for_each_entry_safe(event, next, &gameport_event_list, node) { gameport_remove_pending_events()
419 if (event->object == object) { gameport_remove_pending_events()
420 list_del_init(&event->node); gameport_remove_pending_events()
421 gameport_free_event(event); gameport_remove_pending_events()
438 struct gameport_event *event; gameport_get_pending_child() local
444 list_for_each_entry(event, &gameport_event_list, node) { gameport_get_pending_child()
445 if (event->type == GAMEPORT_REGISTER_PORT) { gameport_get_pending_child()
446 gameport = event->object; gameport_get_pending_child()
/linux-4.1.27/drivers/infiniband/core/
H A Dcma.c167 struct rdma_cm_event event; member in struct:cma_work
173 struct rdma_cm_event event; member in struct:cma_ndev_work
917 if (ib_event->event == IB_CM_REQ_RECEIVED) cma_save_net_info()
919 else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) cma_save_net_info()
1109 static void cma_set_rep_event_data(struct rdma_cm_event *event, cma_set_rep_event_data() argument
1113 event->param.conn.private_data = private_data; cma_set_rep_event_data()
1114 event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE; cma_set_rep_event_data()
1115 event->param.conn.responder_resources = rep_data->responder_resources; cma_set_rep_event_data()
1116 event->param.conn.initiator_depth = rep_data->initiator_depth; cma_set_rep_event_data()
1117 event->param.conn.flow_control = rep_data->flow_control; cma_set_rep_event_data()
1118 event->param.conn.rnr_retry_count = rep_data->rnr_retry_count; cma_set_rep_event_data()
1119 event->param.conn.srq = rep_data->srq; cma_set_rep_event_data()
1120 event->param.conn.qp_num = rep_data->remote_qpn; cma_set_rep_event_data()
1126 struct rdma_cm_event event; cma_ib_handler() local
1129 if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && cma_ib_handler()
1131 (ib_event->event == IB_CM_TIMEWAIT_EXIT && cma_ib_handler()
1135 memset(&event, 0, sizeof event); cma_ib_handler()
1136 switch (ib_event->event) { cma_ib_handler()
1139 event.event = RDMA_CM_EVENT_UNREACHABLE; cma_ib_handler()
1140 event.status = -ETIMEDOUT; cma_ib_handler()
1144 event.status = cma_rep_recv(id_priv); cma_ib_handler()
1145 event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR : cma_ib_handler()
1148 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE; cma_ib_handler()
1150 cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd, cma_ib_handler()
1155 event.event = RDMA_CM_EVENT_ESTABLISHED; cma_ib_handler()
1158 event.status = -ETIMEDOUT; /* fall through */ cma_ib_handler()
1164 event.event = RDMA_CM_EVENT_DISCONNECTED; cma_ib_handler()
1167 event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT; cma_ib_handler()
1170 /* ignore event */ cma_ib_handler()
1174 event.status = ib_event->param.rej_rcvd.reason; cma_ib_handler()
1175 event.event = RDMA_CM_EVENT_REJECTED; cma_ib_handler()
1176 event.param.conn.private_data = ib_event->private_data; cma_ib_handler()
1177 event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; cma_ib_handler()
1180 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n", cma_ib_handler()
1181 ib_event->event); cma_ib_handler()
1185 ret = id_priv->id.event_handler(&id_priv->id, &event); cma_ib_handler()
1275 static void cma_set_req_event_data(struct rdma_cm_event *event, cma_set_req_event_data() argument
1279 event->param.conn.private_data = private_data + offset; cma_set_req_event_data()
1280 event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset; cma_set_req_event_data()
1281 event->param.conn.responder_resources = req_data->responder_resources; cma_set_req_event_data()
1282 event->param.conn.initiator_depth = req_data->initiator_depth; cma_set_req_event_data()
1283 event->param.conn.flow_control = req_data->flow_control; cma_set_req_event_data()
1284 event->param.conn.retry_count = req_data->retry_count; cma_set_req_event_data()
1285 event->param.conn.rnr_retry_count = req_data->rnr_retry_count; cma_set_req_event_data()
1286 event->param.conn.srq = req_data->srq; cma_set_req_event_data()
1287 event->param.conn.qp_num = req_data->remote_qpn; cma_set_req_event_data()
1292 return (((ib_event->event == IB_CM_REQ_RECEIVED) && cma_check_req_qp_type()
1294 ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) && cma_check_req_qp_type()
1302 struct rdma_cm_event event; cma_req_handler() local
1312 memset(&event, 0, sizeof event); cma_req_handler()
1314 event.event = RDMA_CM_EVENT_CONNECT_REQUEST; cma_req_handler()
1315 if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) { cma_req_handler()
1317 event.param.ud.private_data = ib_event->private_data + offset; cma_req_handler()
1318 event.param.ud.private_data_len = cma_req_handler()
1322 cma_set_req_event_data(&event, &ib_event->param.req_rcvd, cma_req_handler()
1344 ret = conn_id->id.event_handler(&conn_id->id, &event); cma_req_handler()
1423 struct rdma_cm_event event; cma_iw_handler() local
1431 memset(&event, 0, sizeof event); cma_iw_handler()
1432 switch (iw_event->event) { cma_iw_handler()
1434 event.event = RDMA_CM_EVENT_DISCONNECTED; cma_iw_handler()
1443 event.event = RDMA_CM_EVENT_ESTABLISHED; cma_iw_handler()
1444 event.param.conn.initiator_depth = iw_event->ird; cma_iw_handler()
1445 event.param.conn.responder_resources = iw_event->ord; cma_iw_handler()
1449 event.event = RDMA_CM_EVENT_REJECTED; cma_iw_handler()
1452 event.event = RDMA_CM_EVENT_UNREACHABLE; cma_iw_handler()
1455 event.event = RDMA_CM_EVENT_CONNECT_ERROR; cma_iw_handler()
1460 event.event = RDMA_CM_EVENT_ESTABLISHED; cma_iw_handler()
1461 event.param.conn.initiator_depth = iw_event->ird; cma_iw_handler()
1462 event.param.conn.responder_resources = iw_event->ord; cma_iw_handler()
1468 event.status = iw_event->status; cma_iw_handler()
1469 event.param.conn.private_data = iw_event->private_data; cma_iw_handler()
1470 event.param.conn.private_data_len = iw_event->private_data_len; cma_iw_handler()
1471 ret = id_priv->id.event_handler(&id_priv->id, &event); cma_iw_handler()
1490 struct rdma_cm_event event; iw_conn_req_handler() local
1540 memset(&event, 0, sizeof event); iw_conn_req_handler()
1541 event.event = RDMA_CM_EVENT_CONNECT_REQUEST; iw_conn_req_handler()
1542 event.param.conn.private_data = iw_event->private_data; iw_conn_req_handler()
1543 event.param.conn.private_data_len = iw_event->private_data_len; iw_conn_req_handler()
1544 event.param.conn.initiator_depth = iw_event->ird; iw_conn_req_handler()
1545 event.param.conn.responder_resources = iw_event->ord; iw_conn_req_handler()
1552 ret = conn_id->id.event_handler(&conn_id->id, &event); iw_conn_req_handler()
1629 struct rdma_cm_event *event) cma_listen_handler()
1635 return id_priv->id.event_handler(id, event); cma_listen_handler()
1706 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; cma_query_handler()
1707 work->event.status = status; cma_query_handler()
1770 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { cma_work_handler()
1793 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { cma_ndev_work_handler()
1820 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; cma_resolve_ib_route()
1879 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; cma_resolve_iw_route()
1962 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; cma_resolve_iboe_route()
1963 work->event.status = 0; cma_resolve_iboe_route()
2097 struct rdma_cm_event event; addr_handler() local
2099 memset(&event, 0, sizeof event); addr_handler()
2113 event.event = RDMA_CM_EVENT_ADDR_ERROR; addr_handler()
2114 event.status = status; addr_handler()
2116 event.event = RDMA_CM_EVENT_ADDR_RESOLVED; addr_handler()
2118 if (id_priv->id.event_handler(&id_priv->id, &event)) { addr_handler()
2153 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; cma_resolve_loopback()
2183 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; cma_resolve_ib_addr()
2667 struct rdma_cm_event event; cma_sidr_rep_handler() local
2674 memset(&event, 0, sizeof event); cma_sidr_rep_handler()
2675 switch (ib_event->event) { cma_sidr_rep_handler()
2677 event.event = RDMA_CM_EVENT_UNREACHABLE; cma_sidr_rep_handler()
2678 event.status = -ETIMEDOUT; cma_sidr_rep_handler()
2681 event.param.ud.private_data = ib_event->private_data; cma_sidr_rep_handler()
2682 event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE; cma_sidr_rep_handler()
2684 event.event = RDMA_CM_EVENT_UNREACHABLE; cma_sidr_rep_handler()
2685 event.status = ib_event->param.sidr_rep_rcvd.status; cma_sidr_rep_handler()
2690 event.event = RDMA_CM_EVENT_ADDR_ERROR; cma_sidr_rep_handler()
2691 event.status = ret; cma_sidr_rep_handler()
2696 &event.param.ud.ah_attr); cma_sidr_rep_handler()
2697 event.param.ud.qp_num = rep->qpn; cma_sidr_rep_handler()
2698 event.param.ud.qkey = rep->qkey; cma_sidr_rep_handler()
2699 event.event = RDMA_CM_EVENT_ESTABLISHED; cma_sidr_rep_handler()
2700 event.status = 0; cma_sidr_rep_handler()
2703 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n", cma_sidr_rep_handler()
2704 ib_event->event); cma_sidr_rep_handler()
2708 ret = id_priv->id.event_handler(&id_priv->id, &event); cma_sidr_rep_handler()
3057 int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event) rdma_notify() argument
3068 ret = ib_cm_notify(id_priv->cm_id.ib, event); rdma_notify()
3144 struct rdma_cm_event event; cma_ib_mc_handler() local
3160 memset(&event, 0, sizeof event); cma_ib_mc_handler()
3161 event.status = status; cma_ib_mc_handler()
3162 event.param.ud.private_data = mc->context; cma_ib_mc_handler()
3164 event.event = RDMA_CM_EVENT_MULTICAST_JOIN; cma_ib_mc_handler()
3167 &event.param.ud.ah_attr); cma_ib_mc_handler()
3168 event.param.ud.qp_num = 0xFFFFFF; cma_ib_mc_handler()
3169 event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey); cma_ib_mc_handler()
3171 event.event = RDMA_CM_EVENT_MULTICAST_ERROR; cma_ib_mc_handler()
3173 ret = id_priv->id.event_handler(&id_priv->id, &event); cma_ib_mc_handler()
3462 work->event.event = RDMA_CM_EVENT_ADDR_CHANGE; cma_netdev_change()
3470 static int cma_netdev_callback(struct notifier_block *self, unsigned long event, cma_netdev_callback() argument
3481 if (event != NETDEV_BONDING_FAILOVER) cma_netdev_callback()
3529 struct rdma_cm_event event; cma_remove_id_dev() local
3545 memset(&event, 0, sizeof event); cma_remove_id_dev()
3546 event.event = RDMA_CM_EVENT_DEVICE_REMOVAL; cma_remove_id_dev()
3547 ret = id_priv->id.event_handler(&id_priv->id, &event); cma_remove_id_dev()
1628 cma_listen_handler(struct rdma_cm_id *id, struct rdma_cm_event *event) cma_listen_handler() argument
/linux-4.1.27/fs/notify/inotify/
H A Dinotify_fsnotify.c57 struct fsnotify_event *event) inotify_merge()
62 return event_compare(last_event, event); inotify_merge()
73 struct inotify_event_info *event; inotify_handle_event() local
99 event = kmalloc(alloc_len, GFP_KERNEL); inotify_handle_event()
100 if (unlikely(!event)) inotify_handle_event()
103 fsn_event = &event->fse; inotify_handle_event()
105 event->wd = i_mark->wd; inotify_handle_event()
106 event->sync_cookie = cookie; inotify_handle_event()
107 event->name_len = len; inotify_handle_event()
109 strcpy(event->name, file_name); inotify_handle_event()
113 /* Our event wasn't used in the end. Free it. */ inotify_handle_event()
56 inotify_merge(struct list_head *list, struct fsnotify_event *event) inotify_merge() argument

Completed in 4268 milliseconds

1234567891011>>