Searched refs:events (Results 1 - 200 of 2322) sorted by relevance

1234567891011>>

/linux-4.1.27/tools/testing/selftests/powerpc/pmu/ebb/
H A Dmulti_counter_test.c14 * Test counting multiple events using EBBs.
18 struct event events[6]; multi_counter() local
21 event_init_named(&events[0], 0x1001C, "PM_CMPLU_STALL_THRD"); multi_counter()
22 event_init_named(&events[1], 0x2D016, "PM_CMPLU_STALL_FXU"); multi_counter()
23 event_init_named(&events[2], 0x30006, "PM_CMPLU_STALL_OTHER_CMPL"); multi_counter()
24 event_init_named(&events[3], 0x4000A, "PM_CMPLU_STALL"); multi_counter()
25 event_init_named(&events[4], 0x600f4, "PM_RUN_CYC"); multi_counter()
26 event_init_named(&events[5], 0x500fa, "PM_RUN_INST_CMPL"); multi_counter()
28 event_leader_ebb_init(&events[0]); multi_counter()
30 event_ebb_init(&events[i]); multi_counter()
34 events[i].attr.exclude_kernel = 1; multi_counter()
35 events[i].attr.exclude_hv = 1; multi_counter()
36 events[i].attr.exclude_idle = 1; multi_counter()
38 FAIL_IF(event_open_with_group(&events[i], group_fd)); multi_counter()
40 group_fd = events[0].fd; multi_counter()
51 FAIL_IF(ioctl(events[0].fd, PERF_EVENT_IOC_ENABLE, PERF_IOC_FLAG_GROUP)); multi_counter()
52 FAIL_IF(event_read(&events[0])); multi_counter()
81 event_close(&events[i]); multi_counter()
/linux-4.1.27/tools/testing/selftests/powerpc/pmu/
H A Dcount_instructions.c29 static int do_count_loop(struct event *events, u64 instructions, do_count_loop() argument
42 event_read(&events[0]); do_count_loop()
43 event_read(&events[1]); do_count_loop()
46 difference = events[0].result.value - expected; do_count_loop()
47 percentage = (double)difference / events[0].result.value * 100; do_count_loop()
50 event_report(&events[0]); do_count_loop()
51 event_report(&events[1]); do_count_loop()
55 printf("Actual %llu\n", events[0].result.value); do_count_loop()
59 event_reset(&events[0]); do_count_loop()
60 event_reset(&events[1]); do_count_loop()
67 if (difference / events[0].result.value) do_count_loop()
74 static u64 determine_overhead(struct event *events) determine_overhead() argument
79 do_count_loop(events, 0, 0, false); determine_overhead()
80 overhead = events[0].result.value; determine_overhead()
83 do_count_loop(events, 0, 0, false); determine_overhead()
84 current = events[0].result.value; determine_overhead()
96 struct event events[2]; test_body() local
99 setup_event(&events[0], PERF_COUNT_HW_INSTRUCTIONS, "instructions"); test_body()
100 setup_event(&events[1], PERF_COUNT_HW_CPU_CYCLES, "cycles"); test_body()
102 if (event_open(&events[0])) { test_body()
107 if (event_open_with_group(&events[1], events[0].fd)) { test_body()
112 overhead = determine_overhead(events); test_body()
116 FAIL_IF(do_count_loop(events, 1000000, overhead, true)); test_body()
119 FAIL_IF(do_count_loop(events, 10000000, overhead, true)); test_body()
122 FAIL_IF(do_count_loop(events, 100000000, overhead, true)); test_body()
125 FAIL_IF(do_count_loop(events, 1000000000, overhead, true)); test_body()
128 FAIL_IF(do_count_loop(events, 16000000000, overhead, true)); test_body()
131 FAIL_IF(do_count_loop(events, 64000000000, overhead, true)); test_body()
133 event_close(&events[0]); test_body()
134 event_close(&events[1]); test_body()
H A Dper_event_excludes.c25 struct event *e, events[4]; per_event_excludes() local
34 * We need to create the events disabled, otherwise the running/enabled per_event_excludes()
37 e = &events[0]; per_event_excludes()
42 e = &events[1]; per_event_excludes()
49 e = &events[2]; per_event_excludes()
56 e = &events[3]; per_event_excludes()
63 FAIL_IF(event_open(&events[0])); per_event_excludes()
68 * and we're asking for the events to be in a group. per_event_excludes()
71 FAIL_IF(event_open_with_group(&events[i], events[0].fd)); per_event_excludes()
86 FAIL_IF(event_read(&events[i])); per_event_excludes()
87 event_report(&events[i]); per_event_excludes()
91 * We should see that all events have enabled == running. That per_event_excludes()
95 FAIL_IF(events[i].result.running != events[i].result.enabled); per_event_excludes()
103 FAIL_IF(events[0].result.value < events[i].result.value); per_event_excludes()
106 event_close(&events[i]); per_event_excludes()
/linux-4.1.27/samples/trace_events/
H A DMakefile1 # builds the trace events example kernel modules;
4 # If you include a trace header outside of include/trace/events
10 # Here trace-events-sample.c does the CREATE_TRACE_POINTS.
12 CFLAGS_trace-events-sample.o := -I$(src)
14 obj-$(CONFIG_SAMPLE_TRACE_EVENTS) += trace-events-sample.o
H A Dtrace-events-sample.c11 #include "trace-events-sample.h"
129 MODULE_DESCRIPTION("trace-events-sample");
/linux-4.1.27/net/core/
H A Dnet-traces.c29 #include <trace/events/skb.h>
30 #include <trace/events/net.h>
31 #include <trace/events/napi.h>
32 #include <trace/events/sock.h>
33 #include <trace/events/udp.h>
/linux-4.1.27/drivers/scsi/lpfc/
H A Dlpfc_logmsg.h21 #define LOG_ELS 0x00000001 /* ELS events */
22 #define LOG_DISCOVERY 0x00000002 /* Link discovery events */
23 #define LOG_MBOX 0x00000004 /* Mailbox events */
24 #define LOG_INIT 0x00000008 /* Initialization events */
25 #define LOG_LINK_EVENT 0x00000010 /* Link events */
28 #define LOG_NODE 0x00000080 /* Node table events */
29 #define LOG_TEMP 0x00000100 /* Temperature sensor events */
30 #define LOG_BG 0x00000200 /* BlockGuard events */
31 #define LOG_MISC 0x00000400 /* Miscellaneous events */
32 #define LOG_SLI 0x00000800 /* SLI events */
34 #define LOG_LIBDFC 0x00002000 /* Libdfc events */
35 #define LOG_VPORT 0x00004000 /* NPIV events */
36 #define LOG_SECURITY 0x00008000 /* Security events */
38 #define LOG_FIP 0x00020000 /* FIP events */
H A Dlpfc_nl.h22 #define FC_REG_LINK_EVENT 0x0001 /* link up / down events */
23 #define FC_REG_RSCN_EVENT 0x0002 /* RSCN events */
24 #define FC_REG_CT_EVENT 0x0004 /* CT request events */
25 #define FC_REG_DUMP_EVENT 0x0010 /* Dump events */
26 #define FC_REG_TEMPERATURE_EVENT 0x0020 /* temperature events */
27 #define FC_REG_VPORTRSCN_EVENT 0x0040 /* Vport RSCN events */
28 #define FC_REG_ELS_EVENT 0x0080 /* lpfc els events */
29 #define FC_REG_FABRIC_EVENT 0x0100 /* lpfc fabric events */
30 #define FC_REG_SCSI_EVENT 0x0200 /* lpfc scsi events */
31 #define FC_REG_BOARD_EVENT 0x0400 /* lpfc board events */
32 #define FC_REG_ADAPTER_EVENT 0x0800 /* lpfc adapter events */
44 /* Temperature events */
/linux-4.1.27/drivers/media/pci/cx23885/
H A Dcx23885-ir.c37 u32 events = 0; cx23885_ir_rx_work_handler() local
41 events |= V4L2_SUBDEV_IR_RX_SW_FIFO_OVERRUN; cx23885_ir_rx_work_handler()
43 events |= V4L2_SUBDEV_IR_RX_HW_FIFO_OVERRUN; cx23885_ir_rx_work_handler()
45 events |= V4L2_SUBDEV_IR_RX_END_OF_RX_DETECTED; cx23885_ir_rx_work_handler()
47 events |= V4L2_SUBDEV_IR_RX_FIFO_SERVICE_REQ; cx23885_ir_rx_work_handler()
49 if (events == 0) cx23885_ir_rx_work_handler()
53 cx23885_input_rx_work_handler(dev, events); cx23885_ir_rx_work_handler()
60 u32 events = 0; cx23885_ir_tx_work_handler() local
64 events |= V4L2_SUBDEV_IR_TX_FIFO_SERVICE_REQ; cx23885_ir_tx_work_handler()
66 if (events == 0) cx23885_ir_tx_work_handler()
72 void cx23885_ir_rx_v4l2_dev_notify(struct v4l2_subdev *sd, u32 events) cx23885_ir_rx_v4l2_dev_notify() argument
77 if (events & V4L2_SUBDEV_IR_RX_FIFO_SERVICE_REQ) cx23885_ir_rx_v4l2_dev_notify()
79 if (events & V4L2_SUBDEV_IR_RX_END_OF_RX_DETECTED) cx23885_ir_rx_v4l2_dev_notify()
81 if (events & V4L2_SUBDEV_IR_RX_HW_FIFO_OVERRUN) cx23885_ir_rx_v4l2_dev_notify()
83 if (events & V4L2_SUBDEV_IR_RX_SW_FIFO_OVERRUN) cx23885_ir_rx_v4l2_dev_notify()
97 void cx23885_ir_tx_v4l2_dev_notify(struct v4l2_subdev *sd, u32 events) cx23885_ir_tx_v4l2_dev_notify() argument
102 if (events & V4L2_SUBDEV_IR_TX_FIFO_SERVICE_REQ) cx23885_ir_tx_v4l2_dev_notify()
H A Dcx23885-ir.h21 void cx23885_ir_rx_v4l2_dev_notify(struct v4l2_subdev *sd, u32 events);
22 void cx23885_ir_tx_v4l2_dev_notify(struct v4l2_subdev *sd, u32 events);
H A Dcx23885-input.h21 void cx23885_input_rx_work_handler(struct cx23885_dev *dev, u32 events);
/linux-4.1.27/lib/
H A Dflex_proportions.c13 * Where x_{i,j} is j's number of events in i-th last time period and x_i is
14 * total number of events in i-th last time period.
42 /* Use 1 to avoid dealing with periods with 0 events... */ fprop_global_init()
43 err = percpu_counter_init(&p->events, 1, gfp); fprop_global_init()
52 percpu_counter_destroy(&p->events); fprop_global_destroy()
60 * if aging zeroed out all events. This can be used to detect whether declaring
65 s64 events; fprop_new_period() local
69 events = percpu_counter_sum(&p->events); fprop_new_period()
71 * Don't do anything if there are no events. fprop_new_period()
73 if (events <= 1) { fprop_new_period()
79 events -= events >> periods; fprop_new_period()
80 /* Use addition to avoid losing events happening between sum and set */ fprop_new_period()
81 percpu_counter_add(&p->events, -events); fprop_new_period()
95 pl->events = 0; fprop_local_init_single()
122 pl->events >>= period - pl->period; fprop_reflect_period_single()
124 pl->events = 0; fprop_reflect_period_single()
133 pl->events++; __fprop_inc_single()
134 percpu_counter_add(&p->events, 1); __fprop_inc_single()
137 /* Return fraction of events of type pl */ fprop_fraction_single()
148 num = pl->events; fprop_fraction_single()
149 den = percpu_counter_read_positive(&p->events); fprop_fraction_single()
175 err = percpu_counter_init(&pl->events, 0, gfp); fprop_local_init_percpu()
185 percpu_counter_destroy(&pl->events); fprop_local_destroy_percpu()
205 s64 val = percpu_counter_read(&pl->events); fprop_reflect_period_percpu()
208 val = percpu_counter_sum(&pl->events); fprop_reflect_period_percpu()
210 __percpu_counter_add(&pl->events, fprop_reflect_period_percpu()
213 percpu_counter_set(&pl->events, 0); fprop_reflect_period_percpu()
222 __percpu_counter_add(&pl->events, 1, PROP_BATCH); __fprop_inc_percpu()
223 percpu_counter_add(&p->events, 1); __fprop_inc_percpu()
236 num = percpu_counter_read_positive(&pl->events); fprop_fraction_percpu()
237 den = percpu_counter_read_positive(&p->events); fprop_fraction_percpu()
270 __percpu_counter_add(&pl->events, 1, PROP_BATCH); __fprop_inc_percpu_max()
271 percpu_counter_add(&p->events, 1); __fprop_inc_percpu_max()
H A Dproportions.c13 * Where j is an element from {prop_local}, x_{j} is j's number of events,
24 * Further more, if we measure time (t) in the same events as x; so that:
86 err = percpu_counter_init(&pd->pg[0].events, 0, gfp); prop_descriptor_init()
90 err = percpu_counter_init(&pd->pg[1].events, 0, gfp); prop_descriptor_init()
92 percpu_counter_destroy(&pd->pg[0].events); prop_descriptor_init()
100 * update. The update is not really atomic wrt the events counter, but
103 * We copy the events count, move the bits around and flip the index.
109 u64 events; prop_change_shift() local
125 events = percpu_counter_sum(&pd->pg[pd->index].events); prop_change_shift()
127 events <<= -offset; prop_change_shift()
129 events >>= offset; prop_change_shift()
130 percpu_counter_set(&pd->pg[index].events, events); prop_change_shift()
196 return percpu_counter_init(&pl->events, 0, gfp); prop_local_init_percpu()
201 percpu_counter_destroy(&pl->events); prop_local_destroy_percpu()
219 global_period = percpu_counter_read(&pg->events); prop_norm_percpu()
235 * pl->events >> (global_period - pl->period); prop_norm_percpu()
239 s64 val = percpu_counter_read(&pl->events); prop_norm_percpu()
242 val = percpu_counter_sum(&pl->events); prop_norm_percpu()
244 __percpu_counter_add(&pl->events, -val + (val >> period), prop_norm_percpu()
247 percpu_counter_set(&pl->events, 0); prop_norm_percpu()
261 __percpu_counter_add(&pl->events, 1, PROP_BATCH); __prop_inc_percpu()
262 percpu_counter_add(&pg->events, 1); __prop_inc_percpu()
268 * @frac/PROP_FRAC_BASE by ignoring events when this limit has been exceeded.
283 numerator = percpu_counter_read_positive(&pl->events); __prop_inc_percpu_max()
284 global_count = percpu_counter_read(&pg->events); __prop_inc_percpu_max()
291 percpu_counter_add(&pl->events, 1); __prop_inc_percpu_max()
292 percpu_counter_add(&pg->events, 1); __prop_inc_percpu_max()
313 *numerator = percpu_counter_read_positive(&pl->events); prop_fraction_percpu()
315 global_count = percpu_counter_read(&pg->events); prop_fraction_percpu()
330 pl->events = 0; prop_local_init_single()
349 global_period = percpu_counter_read(&pg->events); prop_norm_single()
366 pl->events >>= period; prop_norm_single()
368 pl->events = 0; prop_norm_single()
381 pl->events++; __prop_inc_single()
382 percpu_counter_add(&pg->events, 1); __prop_inc_single()
401 *numerator = pl->events; prop_fraction_single()
403 global_count = percpu_counter_read(&pg->events); prop_fraction_single()
/linux-4.1.27/tools/perf/tests/
H A Dparse-no-sample-id-all.c38 static int process_events(union perf_event **events, size_t count) process_events() argument
45 err = process_event(&evlist, events[i]); process_events()
62 * sample_id_all bit. Without the sample_id_all bit, non-sample events (such as
63 * mmap events) do not have an id sample appended, and consequently logic
65 * more than one selected event, so this test processes three events: 2
66 * attributes representing the selected events and one mmap event.
98 union perf_event *events[] = { test__parse_no_sample_id_all() local
104 err = process_events(events, ARRAY_SIZE(events)); test__parse_no_sample_id_all()
H A Dswitch-tracking.c6 #include "parse-events.h"
138 * Check for no missing sched_switch events i.e. that the process_sample_event()
143 pr_debug("Missing sched_switch events\n"); process_sample_event()
203 static int add_event(struct perf_evlist *evlist, struct list_head *events, add_event() argument
215 list_add(&node->list, events); add_event()
232 static void free_event_nodes(struct list_head *events) free_event_nodes() argument
236 while (!list_empty(events)) { free_event_nodes()
237 node = list_entry(events->next, struct event_node, list); free_event_nodes()
257 LIST_HEAD(events); process_events()
264 ret = add_event(evlist, &events, event); process_events()
279 list_for_each_entry(node, &events, list) process_events()
293 pr_debug("%u events recorded\n", cnt); process_events()
296 free_event_nodes(&events); process_events()
301 * test__switch_tracking - test using sched_switch and tracking events.
303 * This function implements a test that checks that sched_switch events and
304 * tracking events can be recorded for a workload (current process) using the
305 * evsel->system_wide and evsel->tracking flags (respectively) with other events
419 /* Config events */ test__switch_tracking()
434 /* Check non-tracking events are not tracking */ evlist__for_each()
534 /* Check all 4 comm events were seen i.e. that evsel->tracking works */
537 pr_debug("Missing comm events\n");
543 pr_debug("Missing cycles events\n");
549 pr_debug("cycles events even though event was disabled\n");
555 pr_debug("Missing cycles events\n");
H A Dmmap-basic.c9 * then establish an mmap for a group of events that are created to monitor
12 * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated
15 * Then it checks if the number of syscalls reported as perf events by
92 pr_debug("failed to mmap events: %d (%s)\n", errno, test__basic_mmap()
132 pr_debug("expected %d %s events, got %d\n", evlist__for_each()
H A Dattr.py139 log.debug(" loading expected events");
148 def load_events(self, path, events):
168 events[section] = e
186 # events in result. Fail if there's not any.
207 # For each defined group in the expected events
225 def resolve_groups(self, events):
226 for name, event in events.items():
231 for iname, ievent in events.items():
244 # load events expectation for the test
245 log.debug(" loading result events");
H A Dkeep-tracking.c5 #include "parse-events.h"
48 * This function implements a test that checks that tracking events continue
/linux-4.1.27/arch/mips/loongson/common/
H A Dpm.c27 /* disable all mips events */ arch_suspend_disable_irqs()
31 /* disable all events of i8259A */ arch_suspend_disable_irqs()
40 /* disable all events of bonito */ arch_suspend_disable_irqs()
48 /* enable all mips events */ arch_suspend_enable_irqs()
51 /* only enable the cached events of i8259A */ arch_suspend_enable_irqs()
55 /* enable all cached events of bonito */ arch_suspend_enable_irqs()
61 * Setup the board-specific events for waking up loongson from wait mode
68 * Check wakeup events
76 * If the events are really what we want to wakeup the CPU, wake it up
100 /* setup wakeup events via enabling the IRQs */ loongson_suspend_enter()
110 /* wait for the given events to wakeup cpu from wait mode */ loongson_suspend_enter()
/linux-4.1.27/include/linux/
H A Dflex_proportions.h18 * bound on the number of events per period like
28 /* Number of events in the current period */
29 struct percpu_counter events; member in struct:fprop_global
44 /* the local events counter */
45 unsigned long events; member in struct:fprop_local_single
46 /* Period in which we last updated events */
76 /* the local events counter */
77 struct percpu_counter events; member in struct:fprop_local_percpu
78 /* Period in which we last updated events */
H A Dvt.h7 /* Virtual Terminal events. */
H A Dproportions.h30 struct percpu_counter events; member in struct:prop_global
53 * the local events counter
55 struct percpu_counter events; member in struct:prop_local_percpu
104 * the local events counter
106 unsigned long events; member in struct:prop_local_single
H A Dvmpressure.h19 struct list_head events; member in struct:vmpressure
20 /* Have to grab the lock on events traversal or modifications. */
H A Dfsl_hypervisor.h46 * fsl_hv_event_register() - register a callback for failover events
50 * functions for fail-over events.
58 * fsl_hv_event_unregister() - unregister a callback for failover events
H A Dwireless.h33 /* Size of the various events for compat */
H A Dmei_cl_bus.h32 u32 events, void *context);
H A Dfsnotify_backend.h24 * wholes if it needs more events.
46 #define FS_EXCL_UNLINK 0x04000000 /* do not send events if object is unlinked */
57 /* This is a list of all events that may get sent to a parernt based on fs event
122 * events. The mask holds the subset of event types this group cares about.
143 unsigned int q_len; /* events on the queue */
144 unsigned int max_events; /* maximum events allowed on the list */
201 * fsnotify listener to indicate they are either no longer interested in events
202 * of a type matching mask or only interested in those events.
227 __u32 ignored_mask; /* events types to ignore */
241 /* main fsnotify call to send events */
254 /* this inode might care about child events, does it care about the fsnotify_inode_watches_children()
255 * specific set of events that can happen on a child? */ fsnotify_inode_watches_children()
261 * filesystem events when those events happens to this dentry->d_inode.
H A Dinput.h46 * @evbit: bitmap of types of events supported by the device (EV_KEY,
51 * @mscbit: bitmap of miscellaneous events supported by the device
56 * @hint_events_per_packet: average number of events generated by the
57 * device in a packet (between EV_SYN/SYN_REPORT events). Used by
59 * events.
84 * to start generating events (start polling thread,
91 * @event: event handler for events sent _to_ the device, like EV_LED
97 * recipient for all input events coming from the device
250 * @events: event sequence handler. This method is being called by
289 void (*events)(struct input_handle *handle, member in struct:input_handler
311 * events from its device
317 * it gets events
427 * @n_events: the average number of events between calls to input_sync()
/linux-4.1.27/drivers/iommu/
H A Diommu-traces.c12 #include <trace/events/iommu.h>
/linux-4.1.27/kernel/trace/
H A Dpower-traces.c14 #include <trace/events/power.h>
H A Drpm-traces.c15 #include <trace/events/rpm.h>
H A Dtrace_events.c60 list_for_each_entry(file, &tr->events, list)
65 list_for_each_entry_safe(file, ___n, &tr->events, list)
421 list_for_each_entry(file, &tr->events, list) { ftrace_clear_events()
512 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
523 list_for_each_entry(file, &tr->events, list) { __ftrace_set_clr_event_nolock()
575 * <subsystem>:* means all events in that subsystem ftrace_set_clr_event()
578 * <name> (no ':') means all events in a subsystem with ftrace_set_clr_event()
606 * @event: event name to match (NULL for all events, within system)
613 * registered events.
680 list_for_each_entry_continue(file, &tr->events, list) { t_next()
702 file = list_entry(&tr->events, struct ftrace_event_file, list); t_start()
719 list_for_each_entry_continue(file, &tr->events, list) { s_next()
735 file = list_entry(&tr->events, struct ftrace_event_file, list); s_start()
842 list_for_each_entry(file, &tr->events, list) { system_enable_read()
851 * We need to find out if all the events are set system_enable_read()
852 * or if all events or cleared, or if we have system_enable_read()
1149 /* Don't open systems with no events */ subsystem_open()
1614 * Other events may have the same class. Only update event_create_dir()
1621 pr_warn("Could not initialize trace point events/%s\n", event_create_dir()
1699 pr_warn("Could not initialize trace events/%s\n", name); event_init()
1827 /* events are usually grouped together with systems */ trace_event_enum_update()
1860 list_add(&file->list, &tr->events); trace_create_new_event()
1880 * for enabling events at boot. We want to enable events before
1992 pr_err("%s: module has bad taint, not creating trace events\n", trace_module_add_events()
2023 * registered any events that were used. The only worry is if trace_module_remove_events()
2024 * a new module gets loaded, and takes on the same id as the events trace_module_remove_events()
2025 * of this module. When printing out the buffer, traced events left trace_module_remove_events()
2026 * over from this module may be passed to the new module events and trace_module_remove_events()
2082 list_for_each_entry(file, &tr->events, list) { find_event_file()
2368 * descriptors created in order to allow for early events to register_event_cmds()
2371 * to the events. register_event_cmds()
2380 list_for_each_entry(file, &tr->events, list) { __trace_early_add_event_dirs()
2390 * a list of events that can be enabled. This must be done before
2391 * the filesystem is set up in order to allow events to be traced
2418 list_for_each_entry_safe(file, next, &tr->events, list) __trace_remove_event_dirs()
2459 d_events = tracefs_create_dir("events", parent); create_event_toplevel_files()
2461 pr_warn("Could not create tracefs 'events' directory\n"); create_event_toplevel_files()
2483 * event_trace_add_tracer - add a instance of a trace_array to events
2484 * @parent: The parent dentry to place the files/directories for events in
2485 * @tr: The trace array associated with these events
2487 * When a new instance is created, it needs to set up its events
2488 * directory, as well as other files associated with events. It also
2489 * creates the event hierachry in the @parent/events directory.
2542 /* Disable any event triggers and associated soft-disabled events */ event_trace_del_tracer()
2545 /* Disable any running events */ event_trace_del_tracer()
2548 /* Access to events are within rcu_read_lock_sched() */ event_trace_del_tracer()
2637 * initialize events and perhaps start any events that are on the
2638 * command line. Unfortunately, there are some events that will not
2690 pr_warn("Failed to register trace events module notifier\n"); event_trace_init()
2746 * Do various things that may trigger events.
2752 test_thread = kthread_run(event_test_thread, NULL, "test-events"); event_test_stuff()
2774 pr_info("Running tests on trace events:\n"); event_trace_self_tests()
2776 list_for_each_entry(file, &tr->events, list) { event_trace_self_tests()
2785 * Testing syscall events here is pretty useless, but event_trace_self_tests()
2848 /* Test with all events enabled */ event_trace_self_tests()
2850 pr_info("Running tests on all trace events:\n"); event_trace_self_tests()
2851 pr_info("Testing all events: "); event_trace_self_tests()
2855 pr_warn("error enabling all events\n"); event_trace_self_tests()
2864 pr_warn("error disabling all events\n"); event_trace_self_tests()
/linux-4.1.27/arch/sparc/include/asm/
H A Dpcr.h19 #define PCR_STRACE 0x00000002 /* Trace supervisor events */
20 #define PCR_UTRACE 0x00000004 /* Trace user events */
21 #define PCR_N2_HTRACE 0x00000008 /* Trace hypervisor events */
37 #define PCR_N4_UTRACE 0x00000004 /* Trace user events */
38 #define PCR_N4_STRACE 0x00000008 /* Trace supervisor events */
39 #define PCR_N4_HTRACE 0x00000010 /* Trace hypervisor events */
H A Destate.h14 * errors 2) uncorrectable E-cache errors. Such events only occur on reads
16 * fetches 3) atomic operations. Such events _cannot_ occur for: 1) merge
46 * log the events even though the trap will not be generated by the processor.
H A Dpil.h15 * Finally, in order to handle profiling events even when a
/linux-4.1.27/include/uapi/linux/
H A Deventpoll.h30 * Request the handling of system wakeup events so as to prevent system suspends
31 * from happening while those events are being processed.
60 __u32 events; member in struct:epoll_event
67 if ((epev->events & EPOLLWAKEUP) && !capable(CAP_BLOCK_SUSPEND)) ep_take_care_of_epollwakeup()
68 epev->events &= ~EPOLLWAKEUP; ep_take_care_of_epollwakeup()
73 epev->events &= ~EPOLLWAKEUP; ep_take_care_of_epollwakeup()
H A Dfanotify.h6 /* the following events that user-space can register for */
20 #define FAN_EVENT_ON_CHILD 0x08000000 /* interested in child events */
22 /* helper events */
63 * All of the events - we build the list by hand so that we can add flags in
65 * events that they originally wanted. Be sure to add new events here!
73 * All events which require a permission response from userspace
H A Dinotify.h17 * When you are watching a directory, you will receive the filename for events
23 __u32 cookie; /* cookie to synchronize two events */
28 /* the following are legal, implemented events that user-space can watch for */
42 /* the following are legal events. they are sent as needed to any watch */
47 /* helper events */
54 #define IN_EXCL_UNLINK 0x04000000 /* exclude events on unlinked objects */
60 * All of the events - we build the list by hand so that we can add flags in
62 * events that they originally wanted. Be sure to add new events here!
H A Dthermal.h12 enum events { enum
H A Dcn_proc.h2 * cn_proc.h - process events connector
25 * for events on the connector.
48 * sets of events as well
H A Dperf_event.h2 * Performance events:
46 * Common hardware events, generalized by the kernel:
63 * Generalized hardware cache events:
97 * Special "software" events provided by the kernel, even if the hardware
98 * does not support performance events. These events measure various
99 * physical and sw events of the kernel (and allow the profiling of them as
209 * abort events. Multiple bits can be set.
321 sample_id_all : 1, /* sample_type all events */
329 comm_exec : 1, /* flag comm events that are due to an exec */
334 __u32 wakeup_events; /* wakeup every n events */
405 * Bits needed to read the hw events in user-space.
568 * different events so can reuse the same bit position.
616 * The MMAP events record the PROT_EXEC mappings so that we can
/linux-4.1.27/arch/m68k/mac/
H A Doss.c68 int events = oss->irq_pending & oss_irq() local
72 if ((console_loglevel == 10) && !(events & OSS_IP_SCSI)) { oss_irq()
73 printk("oss_irq: irq %u events = 0x%04X\n", irq, oss_irq()
78 if (events & OSS_IP_IOPSCC) { oss_irq()
83 if (events & OSS_IP_SCSI) { oss_irq()
88 if (events & OSS_IP_IOPISM) { oss_irq()
102 int events, irq_bit, i; oss_nubus_irq() local
104 events = oss->irq_pending & OSS_IP_NUBUS; oss_nubus_irq()
105 if (!events) oss_nubus_irq()
110 printk("oss_nubus_irq: events = 0x%04X\n", events); oss_nubus_irq()
120 if (events & irq_bit) { oss_nubus_irq()
124 } while(events & (irq_bit - 1)); oss_nubus_irq()
H A Dbaboon.c51 unsigned char events; baboon_irq() local
59 events = baboon->mb_ifr & 0x07; baboon_irq()
60 if (!events) baboon_irq()
66 if (events & irq_bit) { baboon_irq()
72 } while(events >= irq_bit); baboon_irq()
76 baboon->mb_ifr &= ~events; baboon_irq()
H A Dvia.c452 unsigned char irq_bit, events; via1_irq() local
454 events = via1[vIFR] & via1[vIER] & 0x7F; via1_irq()
455 if (!events) via1_irq()
461 if (events & irq_bit) { via1_irq()
467 } while (events >= irq_bit); via1_irq()
473 unsigned char irq_bit, events; via2_irq() local
475 events = via2[gIFR] & via2[gIER] & 0x7F; via2_irq()
476 if (!events) via2_irq()
482 if (events & irq_bit) { via2_irq()
488 } while (events >= irq_bit); via2_irq()
499 unsigned char slot_bit, events; via_nubus_irq() local
501 events = ~via2[gBufA] & 0x7F; via_nubus_irq()
503 events &= via2[rSIER]; via_nubus_irq()
505 events &= ~via2[vDirA]; via_nubus_irq()
506 if (!events) via_nubus_irq()
513 if (events & slot_bit) { via_nubus_irq()
514 events &= ~slot_bit; via_nubus_irq()
519 } while (events); via_nubus_irq()
523 events = ~via2[gBufA] & 0x7F; via_nubus_irq()
525 events &= via2[rSIER]; via_nubus_irq()
527 events &= ~via2[vDirA]; via_nubus_irq()
528 } while (events); via_nubus_irq()
H A Dpsc.c122 unsigned char irq_bit, events; psc_irq() local
129 events = psc_read_byte(pIFR) & psc_read_byte(pIER) & 0xF; psc_irq()
130 if (!events) psc_irq()
136 if (events & irq_bit) { psc_irq()
142 } while (events >= irq_bit); psc_irq()
/linux-4.1.27/include/sound/
H A Dasequencer.h35 /* result events: 0-4 */
37 /* channel specific events: 5-19 */
39 /* note events: 5-9 */
41 /* control events: 10-19 */
43 /* queue control events: 30-39 */
51 /* fixed length events: 0-99 */
53 /* variable length events: 130-139 */
58 /* direct dispatched events */
64 /* prior events */
H A Dseq_virmidi.h67 * ATTACH = input/output events from midi device are routed to the
71 * incoming events via snd_virmidi_receive()
72 * DISPATCH = input/output events are routed to subscribers.
H A Dseq_kernel.h39 /* max number of events in memory pool */
42 /* default number of events in memory pool */
45 /* max number of events in memory pool for one client (outqueue) */
48 /* default number of events in memory pool for one client (outqueue) */
/linux-4.1.27/arch/powerpc/perf/
H A Dpower4-pmu.c20 #define PM_PMC_SH 12 /* PMC number (1-based) for direct events */
118 * 54: FPU events needed 0x0040_0000_0000_0000
119 * 53: ISU1 events needed 0x0020_0000_0000_0000
120 * 52: IDU0|ISU2 events needed 0x0010_0000_0000_0000
124 * 50: FPU events needed 0x0004_0000_0000_0000
125 * 49: IFU events needed 0x0002_0000_0000_0000
126 * 48: LSU0 events needed 0x0001_0000_0000_0000
130 * 46: LSU0 events needed 0x4000_0000_0000
131 * 45: IFU events needed 0x2000_0000_0000
132 * 44: IDU0|ISU2 events needed 0x1000_0000_0000
133 * 43: ISU1 events needed 0x0800_0000_0000
136 * 42: 0 = IDU0 events needed
137 * 1 = ISU2 events needed 0x0400_0000_0000
140 * 41: 0 = IFU.U events needed
141 * 1 = IFU.L events needed 0x0200_0000_0000
144 * 40: 0 = LSU1.U events needed
145 * 1 = LSU1.L events needed 0x0100_0000_0000
149 * 36-38: count of events needing PMC1/2/5/6 0x0070_0000_0000
153 * 32-34: count of events needing PMC3/4/7/8 0x0007_0000_0000
171 * 14-15: Count of events needing PMC8
174 * 0-13: Count of events needing PMC1..PMC7
176 * Note: this doesn't allow events using IFU.U to be combined with events
178 * there are no listed events for IFU.L (they are debug events not
228 if (psel == 0) /* add events */ p4_marked_instr_event()
230 else if (psel == 6) /* decode events */ p4_marked_instr_event()
276 * Bus events on bytes 0 and 2 can be counted p4_get_constraint()
307 /* Marked instruction events need sample_enable set */ p4_get_constraint()
313 /* PMCSEL=6 decode events on byte 2 need sample_enable clear */ p4_get_constraint()
496 /* add events on higher-numbered bus */ p4_compute_mmcr()
555 * Table of generalized cache-related events.
H A De6500-pmu.c21 * Map of generic hardware event types to hardware events
35 * Table of generalized cache-related events.
54 * It does not have separate read/write events (but it does have
55 * separate instruction/data events).
91 /* Upper half of event id is PMLCb, for threshold events */ e6500_xlate_event()
H A De500-pmu.c18 * Map of generic hardware event types to hardware events
34 * Table of generalized cache-related events.
56 * does not have separate read/write events (but it does have
57 * separate instruction/data events).
89 /* Upper half of event id is PMLCb, for threshold events */ e500_xlate_event()
H A Dppc970-pmu.c19 #define PM_PMC_SH 12 /* PMC number (1-based) for direct events */
104 * 42: FPU|IFU|VPU events needed 0x0400_0000_0000
105 * 41: ISU events needed 0x0200_0000_0000
106 * 40: IDU|STS events needed 0x0100_0000_0000
110 * 36-38: count of events needing PMC1/2/5/6 0x0070_0000_0000
114 * 32-34: count of events needing PMC3/4/7/8 0x0007_0000_0000
125 * 14-15: Count of events needing PMC1
128 * 0-13: Count of events needing PMC2..PMC8
157 if (psel == 0) /* add events */ p970_marked_instr_event()
159 else if (psel == 7 || psel == 13) /* decode events */ p970_marked_instr_event()
184 /* Masks and values for using events from the various units */
218 * Bus events on bytes 0 and 2 can be counted p970_get_constraint()
377 /* add events on higher-numbered bus */ p970_compute_mmcr()
435 * Table of generalized cache-related events.
H A Dpower7-pmu.c20 #define PM_PMC_SH 16 /* PMC number (1-based) for direct events */
60 #include "power7-events-list.h"
75 * 12-14: number of events needing PMC1-4 0x7000
79 * 10-11: Count of events needing PMC6
82 * 0-9: Count of events needing PMC1..PMC5
109 /* L2SEL must be identical across events */ power7_get_constraint()
150 /* this only handles the 4x decode events */ find_alternative_decode()
294 if (unit == 6) /* L2 events */ power7_compute_mmcr()
334 * Table of generalized cache-related events.
387 #include "power7-events-list.h"
402 #include "power7-events-list.h"
408 .name = "events",
H A Dpower5+-pmu.c20 #define PM_PMC_SH 20 /* PMC number (1-based) for direct events */
84 * 48-50: number of events needing PMC1-4 0x0007_0000_0000_0000
101 * 32: FPU|IFU|ISU1 events needed 0x01_0000_0000
102 * 31: ISU0 events needed 0x01_8000_0000
103 * 30: IDU|GRS events needed 0x00_4000_0000
114 * 10-11: Count of events needing PMC6
117 * 0-9: Count of events needing PMC1..PMC5
126 /* Masks and values for using events from the various units */
240 * Some direct events for decodes of event bus byte 3 have alternative
243 * alternative PCMSEL values for add events.
334 /* remove the limited PMC events */ power5p_get_alternatives()
344 /* remove all but the limited PMC events */ power5p_get_alternatives()
359 * Map of which direct events on which PMCs are marked instruction events.
571 /* add events on higher-numbered bus */ power5p_compute_mmcr()
621 * Table of generalized cache-related events.
H A Dcore-book3s.c37 u64 events[MAX_HWEVENTS]; member in struct:cpu_hw_events
67 * Normally, to ignore kernel events we set the FCS (freeze counters
71 * then we need to use the FCHV bit to ignore kernel events.
285 * If the PMU doesn't update the SIAR for non marked events use perf_read_regs()
378 * events are active on the PMU. power_pmu_bhrb_disable()
597 * unfreeze counters, it should not set exclude_xxx in its events and ebb_switch_in()
823 * Check if a set of events can all go on the PMU at once.
824 * If they can't, this will look at alternative codes for the events
842 /* First see if the events will go on as-is */ power_check_constraints()
936 * Check if newly-added events have consistent settings for
938 * added events.
995 * number of events to rollback at once. If we dectect a rollback check_and_compute_delta()
1104 * Since limited events don't respect the freeze conditions, we
1106 * other events. We try to keep the values from the limited
1107 * events as consistent as possible by keeping the delay (in
1109 * the limited events as small and consistent as possible.
1110 * Therefore, if any limited events are in use, we read them
1127 * events, we first write MMCR0 with the event overflow write_mmcr0()
1150 * Disable all events to prevent PMU interrupts and to allow
1151 * events to be added or removed.
1182 * executed and the PMU has frozen the events etc. power_pmu_disable()
1207 * Re-enable all events if disable == 0.
1208 * If we were previously disabled and events were added, then
1240 * EBB requires an exclusive group and all events must have the EBB power_pmu_enable()
1247 * If we didn't change anything, or only removed events, power_pmu_enable()
1250 * (possibly updated for removal of events). power_pmu_enable()
1259 * Clear all MMCR settings and recompute them for the new set of events. power_pmu_enable()
1263 if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index, power_pmu_enable()
1274 * events have the same value for these bits as the first event. power_pmu_enable()
1287 * bit set and set the hardware events to their initial values. power_pmu_enable()
1288 * Then unfreeze the events. power_pmu_enable()
1299 * Read off any pre-existing events that need to move power_pmu_enable()
1312 * Initialize the PMCs for all the new and moved events. power_pmu_enable()
1374 struct perf_event *ctrs[], u64 *events, collect_events()
1385 events[n++] = group->hw.config; collect_events()
1394 events[n++] = event->hw.config; collect_events()
1402 * If all events are not already frozen, then we disable and
1425 cpuhw->events[n0] = event->hw.config; power_pmu_add()
1440 * If group events scheduling transaction was started, power_pmu_add()
1449 if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1)) power_pmu_add()
1451 event->hw.config = cpuhw->events[n0]; power_pmu_add()
1491 cpuhw->events[i-1] = cpuhw->events[i]; power_pmu_del()
1515 /* disable exceptions if no events are running */ power_pmu_del()
1586 * Start group events scheduling transaction
1600 * Stop group events scheduling transaction
1613 * Commit group events scheduling transaction
1628 i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n); power_pmu_commit_txn()
1633 cpuhw->event[i]->hw.config = cpuhw->events[i]; power_pmu_commit_txn()
1692 /* Number of perf_events counting hardware events */
1745 u64 events[MAX_HWEVENTS]; power_pmu_event_init() local
1792 * PM_RUN_* events interchangeably with their non RUN_* power_pmu_event_init()
1801 * If this machine has limited events, check whether this power_pmu_event_init()
1826 * other hardware events in the group. We assume the event power_pmu_event_init()
1832 ctrs, events, cflags); power_pmu_event_init()
1836 events[n] = ev; power_pmu_event_init()
1843 err = power_check_constraints(cpuhw, events, cflags, n + 1); power_pmu_event_init()
1859 event->hw.config = events[n]; power_pmu_event_init()
1865 * For EBB events we just context switch the PMC value, we don't do any power_pmu_event_init()
1873 * If no events are currently in use, then we have to take a power_pmu_event_init()
2126 * XXX might want to use MSR.PM to keep the events frozen until perf_event_interrupt()
2177 * Use FCHV to ignore kernel events if MSR.HV is set. register_power_pmu()
1373 collect_events(struct perf_event *group, int max_count, struct perf_event *ctrs[], u64 *events, unsigned int *flags) collect_events() argument
H A Dpower5-pmu.c20 #define PM_PMC_SH 20 /* PMC number (1-based) for direct events */
90 * 48-50: number of events needing PMC1-4 0x0007_0000_0000_0000
101 * 36: FPU|IFU|ISU1 events needed 0x10_0000_0000
102 * 35: ISU0 events needed 0x08_0000_0000
103 * 34: IDU|GRS events needed 0x04_0000_0000
107 * 31-32: count of events needing PMC1/2 0x1_8000_0000
111 * 28-29: count of events needing PMC3/4 0x3000_0000
121 * 0-11: Count of events needing PMC1..PMC6
130 /* Masks and values for using events from the various units */
185 * Bus events on bytes 0 and 2 can be counted power5_get_constraint()
249 * Some direct events for decodes of event bus byte 3 have alternative
295 * Map of which direct events on which PMCs are marked instruction events.
516 /* add events on higher-numbered bus */ power5_compute_mmcr()
563 * Table of generalized cache-related events.
H A Dpower6-pmu.c20 #define PM_PMC_SH 20 /* PMC number (1-based) for direct events */
54 * Map of which direct events on which PMCs are marked instruction events.
117 * Masks showing for each unit which bits are marked events.
121 0x01000000, /* direct events set 1: byte 3 bit 0 */
122 0x00010000, /* direct events set 2: byte 2 bit 0 */
175 * Assign PMC numbers and compute MMCR1 value for a set of events
223 /* Nest events have a further mux */ p6_compute_mmcr()
386 /* Check for alternative ways of computing sum events */ p6_get_alternatives()
440 /* remove the limited PMC events */ p6_get_alternatives()
450 /* remove all but the limited PMC events */ p6_get_alternatives()
483 * Table of generalized cache-related events.
486 * The "DTLB" and "ITLB" events relate to the DERAT and IERAT.
H A Dmpc7450-pmu.c30 * Classify events according to how specific their PMC requirements are.
120 * 0 - 11: Count of events needing PMC1 .. PMC6
123 * 12 - 14: Count of events needing PMC1 or PMC2
126 * 16 - 18: Count of events needing PMC1, PMC2 or PMC4
129 * 20 - 23: Count of events needing PMC1, PMC2, PMC3 or PMC4
261 * Compute MMCR0/1/2 values for a set of events.
357 * Table of generalized cache-related events.
H A Dcore-fsl-emb.c33 /* Number of perf_events counting hardware events */
204 * Disable all events to prevent PMU interrupts and to allow
205 * events to be added or removed.
231 * the events before we return. fsl_emb_pmu_disable()
242 * Re-enable all events if disable == 0.
243 * If we were previously disabled and events were added, then
489 struct perf_event *events[MAX_HWEVENTS]; fsl_emb_pmu_event_init() local
529 * other hardware events in the group. We assume the event fsl_emb_pmu_event_init()
535 ppmu->n_counter - 1, events); fsl_emb_pmu_event_init()
543 if (events[i]->hw.config & FSL_EMB_EVENT_RESTRICTED) fsl_emb_pmu_event_init()
568 * If no events are currently in use, then we have to take a fsl_emb_pmu_event_init()
H A Dpower8-pmu.c81 * | *- sampling mode for marked events *- combine
198 * BHRB IFM -* | | | Count of events for each PMC.
229 * For NC we are counting up to 4 events. This requires three bits, and we need
240 * events ask for the same PMC the sum will overflow, setting the high bit,
318 * Add to number of counters in use. Note this includes events with power8_get_constraint()
320 * Don't count events on PMC 5 & 6, there is only one valid event power8_get_constraint()
329 * L2/L3 events contain a cache selector field, which is power8_get_constraint()
333 * field to zeroes, and for us to only ever allow events that power8_get_constraint()
375 /* EBB events must specify the PMC */ power8_get_constraint()
380 /* Only EBB events can request BHRB */ power8_get_constraint()
388 * All events must agree on EBB, either all request it or none. power8_get_constraint()
389 * EBB events are pinned & exclusive, so this should never actually power8_get_constraint()
661 /* BHRB and regular PMU events share the same privilege state power8_bhrb_filter_map()
697 * Table of generalized cache-related events.
/linux-4.1.27/arch/x86/xen/
H A Dtrace.c20 #include <trace/events/xen.h>
H A Dxen-asm.S21 * Enable events. This clears the event mask and tests the pending
22 * event status with one and operation. If there are pending events,
26 /* Unmask events */
48 * Disabling events is simply a matter of making the event mask
81 * interrupt mask state, it checks for unmasked pending events and
/linux-4.1.27/scripts/gdb/linux/
H A Dcpus.py59 gdb.events.stop.disconnect(cpu_mask_invalidate)
60 if hasattr(gdb.events, 'new_objfile'):
61 gdb.events.new_objfile.disconnect(cpu_mask_invalidate)
71 if hasattr(gdb, 'events'):
73 gdb.events.stop.connect(cpu_mask_invalidate)
74 if hasattr(gdb.events, 'new_objfile'):
75 gdb.events.new_objfile.connect(cpu_mask_invalidate)
H A Dutils.py24 gdb.events.new_objfile.disconnect(self._new_objfile_handler)
32 if hasattr(gdb, 'events') and hasattr(gdb.events, 'new_objfile'):
33 gdb.events.new_objfile.connect(self._new_objfile_handler)
133 gdb.events.exited.disconnect(exit_handler)
154 if not gdbserver_type is None and hasattr(gdb, 'events'):
155 gdb.events.exited.connect(exit_handler)
/linux-4.1.27/drivers/net/wireless/ti/wlcore/
H A Devent.h29 * Mbox events
33 * buffer while the other buffer continues to collect events. If the host
34 * is not processing events, an interrupt is issued to signal that a buffer
35 * is ready. Once the host is done with processing events from one buffer,
53 /* events the driver might want to wait for */
/linux-4.1.27/arch/arm/kernel/
H A Dperf_event_xscale.c166 struct perf_event *event = cpuc->events[idx]; xscale1pmu_handle_irq()
201 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); xscale1pmu_enable_event() local
224 raw_spin_lock_irqsave(&events->pmu_lock, flags); xscale1pmu_enable_event()
229 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); xscale1pmu_enable_event()
237 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); xscale1pmu_disable_event() local
258 raw_spin_lock_irqsave(&events->pmu_lock, flags); xscale1pmu_disable_event()
263 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); xscale1pmu_disable_event()
290 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); xscale1pmu_start() local
292 raw_spin_lock_irqsave(&events->pmu_lock, flags); xscale1pmu_start()
296 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); xscale1pmu_start()
302 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); xscale1pmu_stop() local
304 raw_spin_lock_irqsave(&events->pmu_lock, flags); xscale1pmu_stop()
308 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); xscale1pmu_stop()
507 struct perf_event *event = cpuc->events[idx]; xscale2pmu_handle_irq()
542 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); xscale2pmu_enable_event() local
577 raw_spin_lock_irqsave(&events->pmu_lock, flags); xscale2pmu_enable_event()
580 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); xscale2pmu_enable_event()
588 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); xscale2pmu_disable_event() local
628 raw_spin_lock_irqsave(&events->pmu_lock, flags); xscale2pmu_disable_event()
632 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); xscale2pmu_disable_event()
654 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); xscale2pmu_start() local
656 raw_spin_lock_irqsave(&events->pmu_lock, flags); xscale2pmu_start()
660 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); xscale2pmu_start()
666 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); xscale2pmu_stop() local
668 raw_spin_lock_irqsave(&events->pmu_lock, flags); xscale2pmu_stop()
672 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); xscale2pmu_stop()
H A Dperf_event_v6.c13 * performance counters can export events to the event bus, and the event bus
14 * itself can be monitored. This requires that we *don't* export the events to
63 * The hardware events that we support. We do support cache operations but
131 * The hardware events that we support. We do support cache operations but
265 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); armv6pmu_enable_event() local
288 raw_spin_lock_irqsave(&events->pmu_lock, flags); armv6pmu_enable_event()
293 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); armv6pmu_enable_event()
320 struct perf_event *event = cpuc->events[idx]; armv6pmu_handle_irq()
345 * Handle the pending perf events. armv6pmu_handle_irq()
359 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); armv6pmu_start() local
361 raw_spin_lock_irqsave(&events->pmu_lock, flags); armv6pmu_start()
365 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); armv6pmu_start()
371 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); armv6pmu_stop() local
373 raw_spin_lock_irqsave(&events->pmu_lock, flags); armv6pmu_stop()
377 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); armv6pmu_stop()
412 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); armv6pmu_disable_event() local
434 raw_spin_lock_irqsave(&events->pmu_lock, flags); armv6pmu_disable_event()
439 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); armv6pmu_disable_event()
447 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); armv6mpcore_pmu_disable_event() local
465 raw_spin_lock_irqsave(&events->pmu_lock, flags); armv6mpcore_pmu_disable_event()
470 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); armv6mpcore_pmu_disable_event()
517 * that some of the events have different enumerations and that there is no
H A Dperf_event_v7.c28 * Note: An implementation may not be able to count all of these events
61 /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
161 * Cortex-A8 HW events mapping
163 * The hardware events that we support. We do support cache operations but
214 * Cortex-A9 HW events mapping
258 * Cortex-A5 HW events mapping
304 * Cortex-A15 HW events mapping
353 * Cortex-A7 HW events mapping
402 * Cortex-A12 HW events mapping
452 * Krait HW events mapping
502 * Scorpion HW events mapping
764 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); armv7pmu_enable_event() local
777 raw_spin_lock_irqsave(&events->pmu_lock, flags); armv7pmu_enable_event()
802 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); armv7pmu_enable_event()
810 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); armv7pmu_disable_event() local
822 raw_spin_lock_irqsave(&events->pmu_lock, flags); armv7pmu_disable_event()
834 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); armv7pmu_disable_event()
863 struct perf_event *event = cpuc->events[idx]; armv7pmu_handle_irq()
888 * Handle the pending perf events. armv7pmu_handle_irq()
902 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); armv7pmu_start() local
904 raw_spin_lock_irqsave(&events->pmu_lock, flags); armv7pmu_start()
907 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); armv7pmu_start()
913 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); armv7pmu_stop() local
915 raw_spin_lock_irqsave(&events->pmu_lock, flags); armv7pmu_stop()
918 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); armv7pmu_stop()
939 * the events counters armv7pmu_get_event_idx()
1155 * CC = class of events the group G is choosing from
1162 * events (interrupts for example). An event code is broken down into
1347 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); krait_pmu_disable_event() local
1350 raw_spin_lock_irqsave(&events->pmu_lock, flags); krait_pmu_disable_event()
1364 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); krait_pmu_disable_event()
1373 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); krait_pmu_enable_event() local
1379 raw_spin_lock_irqsave(&events->pmu_lock, flags); krait_pmu_enable_event()
1400 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); krait_pmu_enable_event()
1452 * Two events cant use the same group within a pmresr register.
1467 /* Ignore invalid events */ krait_pmu_get_event_idx()
1505 /* Some early versions of Krait don't support PC write events */ krait_pmu_init()
1545 * CC = class of events the group G is choosing from
1552 * events (interrupts for example). An event code is broken down into
1680 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); scorpion_pmu_disable_event() local
1683 raw_spin_lock_irqsave(&events->pmu_lock, flags); scorpion_pmu_disable_event()
1697 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); scorpion_pmu_disable_event()
1706 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); scorpion_pmu_enable_event() local
1712 raw_spin_lock_irqsave(&events->pmu_lock, flags); scorpion_pmu_enable_event()
1733 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); scorpion_pmu_enable_event()
1785 * Two events cant use the same group within a pmresr register.
1799 /* Ignore invalid events */ scorpion_pmu_get_event_idx()
H A Dsys_oabi-compat.c248 __u32 events; member in struct:oabi_epoll_event
264 kernel.events = user.events; sys_oabi_epoll_ctl()
274 struct oabi_epoll_event __user *events, sys_oabi_epoll_wait()
292 __put_user_error(kbuf[i].events, &events->events, err); sys_oabi_epoll_wait()
293 __put_user_error(kbuf[i].data, &events->data, err); sys_oabi_epoll_wait()
294 events++; sys_oabi_epoll_wait()
273 sys_oabi_epoll_wait(int epfd, struct oabi_epoll_event __user *events, int maxevents, int timeout) sys_oabi_epoll_wait() argument
/linux-4.1.27/drivers/power/
H A Dabx500_chargalg.c232 * @events: structure for information about events triggered
259 struct abx500_chargalg_events events; member in struct:abx500_chargalg
297 di->events.safety_timer_expired = true; abx500_chargalg_safety_timer_expired()
321 di->events.maintenance_timer_expired = true; abx500_chargalg_maintenance_timer_expired()
459 di->events.safety_timer_expired = false; abx500_chargalg_start_safety_timer()
475 di->events.safety_timer_expired = false; abx500_chargalg_stop_safety_timer()
493 di->events.maintenance_timer_expired = false; abx500_chargalg_start_maintenance_timer()
507 di->events.maintenance_timer_expired = false; abx500_chargalg_stop_maintenance_timer()
763 di->events.btemp_underover = false; abx500_chargalg_check_temp()
764 di->events.btemp_lowhigh = false; abx500_chargalg_check_temp()
775 di->events.btemp_underover = false; abx500_chargalg_check_temp()
776 di->events.btemp_lowhigh = true; abx500_chargalg_check_temp()
782 di->events.btemp_underover = true; abx500_chargalg_check_temp()
783 di->events.btemp_lowhigh = false; abx500_chargalg_check_temp()
830 di->events.usb_cv_active || di->events.ac_cv_active) && abx500_chargalg_end_of_charge()
885 if (di->events.vbus_collapsed) { abx500_chargalg_chg_curr_maxim()
1029 di->events.batt_rem = false; abx500_chargalg_get_ext_psy_data()
1032 di->events.batt_rem = true; abx500_chargalg_get_ext_psy_data()
1123 di->events.mainextchnotok = true; abx500_chargalg_get_ext_psy_data()
1124 di->events.main_thermal_prot = false; abx500_chargalg_get_ext_psy_data()
1125 di->events.main_ovv = false; abx500_chargalg_get_ext_psy_data()
1126 di->events.ac_wd_expired = false; abx500_chargalg_get_ext_psy_data()
1129 di->events.ac_wd_expired = true; abx500_chargalg_get_ext_psy_data()
1130 di->events.mainextchnotok = false; abx500_chargalg_get_ext_psy_data()
1131 di->events.main_ovv = false; abx500_chargalg_get_ext_psy_data()
1132 di->events.main_thermal_prot = false; abx500_chargalg_get_ext_psy_data()
1136 di->events.main_thermal_prot = true; abx500_chargalg_get_ext_psy_data()
1137 di->events.mainextchnotok = false; abx500_chargalg_get_ext_psy_data()
1138 di->events.main_ovv = false; abx500_chargalg_get_ext_psy_data()
1139 di->events.ac_wd_expired = false; abx500_chargalg_get_ext_psy_data()
1142 di->events.main_ovv = true; abx500_chargalg_get_ext_psy_data()
1143 di->events.mainextchnotok = false; abx500_chargalg_get_ext_psy_data()
1144 di->events.main_thermal_prot = false; abx500_chargalg_get_ext_psy_data()
1145 di->events.ac_wd_expired = false; abx500_chargalg_get_ext_psy_data()
1148 di->events.main_thermal_prot = false; abx500_chargalg_get_ext_psy_data()
1149 di->events.mainextchnotok = false; abx500_chargalg_get_ext_psy_data()
1150 di->events.main_ovv = false; abx500_chargalg_get_ext_psy_data()
1151 di->events.ac_wd_expired = false; abx500_chargalg_get_ext_psy_data()
1161 di->events.usbchargernotok = true; abx500_chargalg_get_ext_psy_data()
1162 di->events.usb_thermal_prot = false; abx500_chargalg_get_ext_psy_data()
1163 di->events.vbus_ovv = false; abx500_chargalg_get_ext_psy_data()
1164 di->events.usb_wd_expired = false; abx500_chargalg_get_ext_psy_data()
1167 di->events.usb_wd_expired = true; abx500_chargalg_get_ext_psy_data()
1168 di->events.usbchargernotok = false; abx500_chargalg_get_ext_psy_data()
1169 di->events.usb_thermal_prot = false; abx500_chargalg_get_ext_psy_data()
1170 di->events.vbus_ovv = false; abx500_chargalg_get_ext_psy_data()
1174 di->events.usb_thermal_prot = true; abx500_chargalg_get_ext_psy_data()
1175 di->events.usbchargernotok = false; abx500_chargalg_get_ext_psy_data()
1176 di->events.vbus_ovv = false; abx500_chargalg_get_ext_psy_data()
1177 di->events.usb_wd_expired = false; abx500_chargalg_get_ext_psy_data()
1180 di->events.vbus_ovv = true; abx500_chargalg_get_ext_psy_data()
1181 di->events.usbchargernotok = false; abx500_chargalg_get_ext_psy_data()
1182 di->events.usb_thermal_prot = false; abx500_chargalg_get_ext_psy_data()
1183 di->events.usb_wd_expired = false; abx500_chargalg_get_ext_psy_data()
1186 di->events.usbchargernotok = false; abx500_chargalg_get_ext_psy_data()
1187 di->events.usb_thermal_prot = false; abx500_chargalg_get_ext_psy_data()
1188 di->events.vbus_ovv = false; abx500_chargalg_get_ext_psy_data()
1189 di->events.usb_wd_expired = false; abx500_chargalg_get_ext_psy_data()
1221 di->events.ac_cv_active = true; abx500_chargalg_get_ext_psy_data()
1223 di->events.ac_cv_active = false; abx500_chargalg_get_ext_psy_data()
1230 di->events.usb_cv_active = true; abx500_chargalg_get_ext_psy_data()
1232 di->events.usb_cv_active = false; abx500_chargalg_get_ext_psy_data()
1244 di->events.batt_unknown = false; abx500_chargalg_get_ext_psy_data()
1246 di->events.batt_unknown = true; abx500_chargalg_get_ext_psy_data()
1283 di->events.vbus_collapsed = true; abx500_chargalg_get_ext_psy_data()
1285 di->events.vbus_collapsed = false; abx500_chargalg_get_ext_psy_data()
1360 (di->events.batt_unknown && !di->bm->chg_unknown_bat)) { abx500_chargalg_algorithm()
1362 di->events.safety_timer_expired = false; abx500_chargalg_algorithm()
1374 else if (di->events.safety_timer_expired) { abx500_chargalg_algorithm()
1385 else if (di->events.batt_rem) { abx500_chargalg_algorithm()
1390 else if (di->events.mainextchnotok || di->events.usbchargernotok) { abx500_chargalg_algorithm()
1396 !di->events.vbus_collapsed) abx500_chargalg_algorithm()
1400 else if (di->events.vbus_ovv || abx500_chargalg_algorithm()
1401 di->events.main_ovv || abx500_chargalg_algorithm()
1402 di->events.batt_ovv || abx500_chargalg_algorithm()
1409 else if (di->events.main_thermal_prot || abx500_chargalg_algorithm()
1410 di->events.usb_thermal_prot) { abx500_chargalg_algorithm()
1416 else if (di->events.btemp_underover) { abx500_chargalg_algorithm()
1422 else if (di->events.ac_wd_expired || abx500_chargalg_algorithm()
1423 di->events.usb_wd_expired) { abx500_chargalg_algorithm()
1428 else if (di->events.btemp_lowhigh) { abx500_chargalg_algorithm()
1451 di->events.ac_cv_active, abx500_chargalg_algorithm()
1452 di->events.usb_cv_active, abx500_chargalg_algorithm()
1493 if (!di->events.batt_rem) abx500_chargalg_algorithm()
1503 if (!di->events.main_thermal_prot && abx500_chargalg_algorithm()
1504 !di->events.usb_thermal_prot) abx500_chargalg_algorithm()
1514 if (!di->events.vbus_ovv && abx500_chargalg_algorithm()
1515 !di->events.main_ovv && abx500_chargalg_algorithm()
1516 !di->events.batt_ovv && abx500_chargalg_algorithm()
1528 if (!di->events.mainextchnotok && abx500_chargalg_algorithm()
1529 !di->events.usbchargernotok) abx500_chargalg_algorithm()
1630 if (di->events.maintenance_timer_expired) { abx500_chargalg_algorithm()
1650 if (di->events.maintenance_timer_expired) { abx500_chargalg_algorithm()
1669 if (!di->events.btemp_lowhigh) abx500_chargalg_algorithm()
1679 if (!di->events.ac_wd_expired && abx500_chargalg_algorithm()
1680 !di->events.usb_wd_expired) abx500_chargalg_algorithm()
1690 if (!di->events.btemp_underover) abx500_chargalg_algorithm()
1788 if (di->events.batt_ovv) { abx500_chargalg_get_property()
1790 } else if (di->events.btemp_underover) { abx500_chargalg_get_property()
H A Dab8500_btemp.c86 * @events: Structure for information about events triggered
103 struct ab8500_btemp_events events; member in struct:ab8500_btemp
665 if (di->events.ac_conn || di->events.usb_conn) ab8500_btemp_periodic_work()
688 di->events.batt_rem = true; ab8500_btemp_batctrlindb_handler()
711 di->events.btemp_low = true; ab8500_btemp_templow_handler()
712 di->events.btemp_high = false; ab8500_btemp_templow_handler()
713 di->events.btemp_medhigh = false; ab8500_btemp_templow_handler()
714 di->events.btemp_lowmed = false; ab8500_btemp_templow_handler()
734 di->events.btemp_high = true; ab8500_btemp_temphigh_handler()
735 di->events.btemp_medhigh = false; ab8500_btemp_temphigh_handler()
736 di->events.btemp_lowmed = false; ab8500_btemp_temphigh_handler()
737 di->events.btemp_low = false; ab8500_btemp_temphigh_handler()
756 di->events.btemp_lowmed = true; ab8500_btemp_lowmed_handler()
757 di->events.btemp_medhigh = false; ab8500_btemp_lowmed_handler()
758 di->events.btemp_high = false; ab8500_btemp_lowmed_handler()
759 di->events.btemp_low = false; ab8500_btemp_lowmed_handler()
778 di->events.btemp_medhigh = true; ab8500_btemp_medhigh_handler()
779 di->events.btemp_lowmed = false; ab8500_btemp_medhigh_handler()
780 di->events.btemp_high = false; ab8500_btemp_medhigh_handler()
781 di->events.btemp_low = false; ab8500_btemp_medhigh_handler()
821 * The BTEMP events are not reliabe on AB8500 cut3.3 ab8500_btemp_get_temp()
827 if (di->events.btemp_low) { ab8500_btemp_get_temp()
832 } else if (di->events.btemp_high) { ab8500_btemp_get_temp()
837 } else if (di->events.btemp_lowmed) { ab8500_btemp_get_temp()
842 } else if (di->events.btemp_medhigh) { ab8500_btemp_get_temp()
889 if (di->events.batt_rem) ab8500_btemp_get_property()
944 if (!ret.intval && di->events.ac_conn) { ab8500_btemp_get_ext_psy_data()
945 di->events.ac_conn = false; ab8500_btemp_get_ext_psy_data()
948 else if (ret.intval && !di->events.ac_conn) { ab8500_btemp_get_ext_psy_data()
949 di->events.ac_conn = true; ab8500_btemp_get_ext_psy_data()
950 if (!di->events.usb_conn) ab8500_btemp_get_ext_psy_data()
956 if (!ret.intval && di->events.usb_conn) { ab8500_btemp_get_ext_psy_data()
957 di->events.usb_conn = false; ab8500_btemp_get_ext_psy_data()
960 else if (ret.intval && !di->events.usb_conn) { ab8500_btemp_get_ext_psy_data()
961 di->events.usb_conn = true; ab8500_btemp_get_ext_psy_data()
962 if (!di->events.ac_conn) ab8500_btemp_get_ext_psy_data()
/linux-4.1.27/drivers/mfd/
H A Dda903x.c54 int (*unmask_events)(struct da903x_chip *, unsigned int events);
55 int (*mask_events)(struct da903x_chip *, unsigned int events);
56 int (*read_events)(struct da903x_chip *, unsigned int *events);
130 unsigned int events) da903x_register_notifier()
134 chip->ops->unmask_events(chip, events); da903x_register_notifier()
140 unsigned int events) da903x_unregister_notifier()
144 chip->ops->mask_events(chip, events); da903x_unregister_notifier()
266 static int da9030_unmask_events(struct da903x_chip *chip, unsigned int events) da9030_unmask_events() argument
270 chip->events_mask &= ~events; da9030_unmask_events()
279 static int da9030_mask_events(struct da903x_chip *chip, unsigned int events) da9030_mask_events() argument
283 chip->events_mask |= events; da9030_mask_events()
292 static int da9030_read_events(struct da903x_chip *chip, unsigned int *events) da9030_read_events() argument
301 *events = (v[2] << 16) | (v[1] << 8) | v[0]; da9030_read_events()
343 static int da9034_unmask_events(struct da903x_chip *chip, unsigned int events) da9034_unmask_events() argument
347 chip->events_mask &= ~events; da9034_unmask_events()
357 static int da9034_mask_events(struct da903x_chip *chip, unsigned int events) da9034_mask_events() argument
361 chip->events_mask |= events; da9034_mask_events()
371 static int da9034_read_events(struct da903x_chip *chip, unsigned int *events) da9034_read_events() argument
380 *events = (v[3] << 24) | (v[2] << 16) | (v[1] << 8) | v[0]; da9034_read_events()
401 unsigned int events = 0; da903x_irq_work() local
404 if (chip->ops->read_events(chip, &events)) da903x_irq_work()
407 events &= ~chip->events_mask; da903x_irq_work()
408 if (events == 0) da903x_irq_work()
412 &chip->notifier_list, events, NULL); da903x_irq_work()
129 da903x_register_notifier(struct device *dev, struct notifier_block *nb, unsigned int events) da903x_register_notifier() argument
139 da903x_unregister_notifier(struct device *dev, struct notifier_block *nb, unsigned int events) da903x_unregister_notifier() argument
H A Dadp5520.c143 unsigned int events) adp5520_register_notifier()
149 events & (ADP5520_KP_IEN | ADP5520_KR_IEN | adp5520_register_notifier()
161 unsigned int events) adp5520_unregister_notifier()
166 events & (ADP5520_KP_IEN | ADP5520_KR_IEN | adp5520_unregister_notifier()
176 unsigned int events; adp5520_irq_thread() local
184 events = reg_val & (ADP5520_OVP_INT | ADP5520_CMPR_INT | adp5520_irq_thread()
187 blocking_notifier_call_chain(&chip->notifier_list, events, NULL); adp5520_irq_thread()
189 __adp5520_ack_bits(chip->client, ADP5520_MODE_STATUS, events); adp5520_irq_thread()
142 adp5520_register_notifier(struct device *dev, struct notifier_block *nb, unsigned int events) adp5520_register_notifier() argument
160 adp5520_unregister_notifier(struct device *dev, struct notifier_block *nb, unsigned int events) adp5520_unregister_notifier() argument
/linux-4.1.27/arch/metag/kernel/perf/
H A Dperf_event.h26 * one time, with the returned count being an aggregate of events. A small
27 * number of events are thread global, i.e. they count the aggregate of all
28 * threads' events, regardless of the thread selected.
42 * struct cpu_hw_events - a processor core's performance events
43 * @events: an array of perf_events active for a given index.
51 struct perf_event *events[MAX_HWEVENTS]; member in struct:cpu_hw_events
H A Dperf_event.c288 cpuc->events[idx] = event; metag_pmu_start()
357 cpuc->events[idx] = NULL; metag_pmu_del()
600 struct cpu_hw_events *events = this_cpu_ptr(&cpu_hw_events); metag_pmu_enable_counter() local
605 raw_spin_lock_irqsave(&events->pmu_lock, flags); metag_pmu_enable_counter()
668 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); metag_pmu_enable_counter()
673 struct cpu_hw_events *events = this_cpu_ptr(&cpu_hw_events); metag_pmu_disable_counter() local
692 * least two events that count events that are core global and ignore metag_pmu_disable_counter()
696 raw_spin_lock_irqsave(&events->pmu_lock, flags); metag_pmu_disable_counter()
702 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); metag_pmu_disable_counter()
721 struct cpu_hw_events *events = this_cpu_ptr(&cpu_hw_events); metag_pmu_write_counter() local
736 raw_spin_lock_irqsave(&events->pmu_lock, flags); metag_pmu_write_counter()
743 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); metag_pmu_write_counter()
755 struct perf_event *event = cpuhw->events[idx]; metag_pmu_counter_overflow()
868 /* Initialise the active events and reservation mutex */ init_hw_perf_events()
/linux-4.1.27/sound/core/seq/oss/
H A Dseq_oss_event.h39 /* short note events (4bytes) */
47 /* long timer events (8bytes) */
55 /* long extended events (8bytes) */
64 /* long channel events (8bytes) */
74 /* channel voice events (8bytes) */
84 /* sysex events (8bytes) */
/linux-4.1.27/net/rxrpc/
H A Dar-ack.c164 !test_and_set_bit(RXRPC_CALL_ACK, &call->events)) __rxrpc_propose_ACK()
196 set_bit(RXRPC_CALL_RESEND, &call->events); rxrpc_set_resend()
206 clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events); rxrpc_set_resend()
558 set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events); rxrpc_insert_oos_packet()
755 /* connection level events - also handled elsewhere */ rxrpc_process_rx_queue()
794 clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events); rxrpc_process_rx_queue()
896 call->debug_id, rxrpc_call_states[call->state], call->events, rxrpc_process_call()
927 /* deal with events of a final nature */ rxrpc_process_call()
928 if (test_bit(RXRPC_CALL_RELEASE, &call->events)) { rxrpc_process_call()
930 clear_bit(RXRPC_CALL_RELEASE, &call->events); rxrpc_process_call()
933 if (test_bit(RXRPC_CALL_RCVD_ERROR, &call->events)) { rxrpc_process_call()
936 clear_bit(RXRPC_CALL_CONN_ABORT, &call->events); rxrpc_process_call()
937 clear_bit(RXRPC_CALL_REJECT_BUSY, &call->events); rxrpc_process_call()
938 clear_bit(RXRPC_CALL_ABORT, &call->events); rxrpc_process_call()
946 clear_bit(RXRPC_CALL_RCVD_ERROR, &call->events); rxrpc_process_call()
950 if (test_bit(RXRPC_CALL_CONN_ABORT, &call->events)) { rxrpc_process_call()
953 clear_bit(RXRPC_CALL_REJECT_BUSY, &call->events); rxrpc_process_call()
954 clear_bit(RXRPC_CALL_ABORT, &call->events); rxrpc_process_call()
961 clear_bit(RXRPC_CALL_CONN_ABORT, &call->events); rxrpc_process_call()
965 if (test_bit(RXRPC_CALL_REJECT_BUSY, &call->events)) { rxrpc_process_call()
971 if (test_bit(RXRPC_CALL_ABORT, &call->events)) { rxrpc_process_call()
985 if (test_bit(RXRPC_CALL_ACK_FINAL, &call->events)) { rxrpc_process_call()
1012 if (call->events & ((1 << RXRPC_CALL_RCVD_BUSY) | rxrpc_process_call()
1017 if (test_bit(RXRPC_CALL_RCVD_ABORT, &call->events)) rxrpc_process_call()
1027 clear_bit(RXRPC_CALL_RCVD_BUSY, &call->events); rxrpc_process_call()
1028 clear_bit(RXRPC_CALL_RCVD_ABORT, &call->events); rxrpc_process_call()
1032 if (test_and_clear_bit(RXRPC_CALL_RCVD_ACKALL, &call->events)) { rxrpc_process_call()
1037 if (test_bit(RXRPC_CALL_LIFE_TIMER, &call->events)) { rxrpc_process_call()
1042 set_bit(RXRPC_CALL_ABORT, &call->events); rxrpc_process_call()
1051 clear_bit(RXRPC_CALL_LIFE_TIMER, &call->events); rxrpc_process_call()
1072 if (test_and_clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events)) rxrpc_process_call()
1074 if (test_and_clear_bit(RXRPC_CALL_RESEND, &call->events)) rxrpc_process_call()
1078 if (test_bit(RXRPC_CALL_ACK, &call->events)) { rxrpc_process_call()
1086 clear_bit(RXRPC_CALL_ACK, &call->events); rxrpc_process_call()
1153 if (test_and_clear_bit(RXRPC_CALL_SECURED, &call->events)) { rxrpc_process_call()
1161 !test_bit(RXRPC_CALL_RELEASE, &call->events)) { rxrpc_process_call()
1170 set_bit(RXRPC_CALL_POST_ACCEPT, &call->events); rxrpc_process_call()
1175 if (!test_bit(RXRPC_CALL_POST_ACCEPT, &call->events)) rxrpc_process_call()
1180 if (test_bit(RXRPC_CALL_POST_ACCEPT, &call->events)) { rxrpc_process_call()
1185 clear_bit(RXRPC_CALL_POST_ACCEPT, &call->events); rxrpc_process_call()
1190 if (test_and_clear_bit(RXRPC_CALL_ACCEPTED, &call->events)) { rxrpc_process_call()
1196 set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events); rxrpc_process_call()
1202 if (test_and_clear_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events)) { rxrpc_process_call()
1209 /* other events may have been raised since we started checking */ rxrpc_process_call()
1282 clear_bit(genbit, &call->events); rxrpc_process_call()
1283 clear_bit(RXRPC_CALL_RCVD_ABORT, &call->events); rxrpc_process_call()
1294 clear_bit(genbit, &call->events); rxrpc_process_call()
1311 if (test_and_clear_bit(RXRPC_CALL_ACK_FINAL, &call->events)) rxrpc_process_call()
1313 clear_bit(RXRPC_CALL_ACK, &call->events); rxrpc_process_call()
1316 if (call->events || !skb_queue_empty(&call->rx_queue)) { rxrpc_process_call()
1327 call, call->events, call->flags, rxrpc_process_call()
1332 !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events)) rxrpc_process_call()
1345 if (call->events && !work_pending(&call->processor)) { rxrpc_process_call()
/linux-4.1.27/drivers/scsi/esas2r/
H A Desas2r_log.h50 ESAS2R_LOG_NONE = 0, /* no events logged */
51 ESAS2R_LOG_CRIT = 1, /* critical events */
52 ESAS2R_LOG_WARN = 2, /* warning events */
53 ESAS2R_LOG_INFO = 3, /* info events */
54 ESAS2R_LOG_DEBG = 4, /* debugging events */
55 ESAS2R_LOG_TRCE = 5, /* tracing events */
76 * debugging and tracing events. esas2r_hdebug is provided specifically for
77 * hardware layer debugging and tracing events.
/linux-4.1.27/drivers/video/fbdev/core/
H A Dfb_notify.c21 * @nb: notifier block to callback on events
31 * @nb: notifier block to callback on events
/linux-4.1.27/arch/powerpc/include/asm/
H A Dperf_event_fsl_emb.h32 * can hold restricted events, or zero if there are no
33 * restricted events.
H A Dperf_event_server.h34 int (*compute_mmcr)(u64 events[], int n_ev,
94 * other events.
103 * MMCR* bits. The constraint checking code will ensure that two events
108 * N events in a particular class. A field of k bits can be used for
116 * NAND field: this expresses the constraint that you may not have events
118 * events from the FPU, ISU and IDU simultaneously, although any two are
/linux-4.1.27/drivers/isdn/hisax/
H A Darcofi.h19 /* events */
/linux-4.1.27/drivers/md/bcache/
H A Dtrace.c8 #include <trace/events/bcache.h>
/linux-4.1.27/drivers/staging/lustre/lustre/ptlrpc/
H A DMakefile12 ptlrpc_objs += events.o ptlrpc_module.o service.o pinger.o
/linux-4.1.27/drivers/nfc/
H A Dmei_phy.h26 void nfc_mei_event_cb(struct mei_cl_device *device, u32 events, void *context);
/linux-4.1.27/arch/sparc/kernel/
H A Dsparc_ksyms_32.c18 short events; member in struct:poll
H A Dperf_event.c55 * "sw_count0" and "sw_count1" events. These count how many times
77 /* Number of events currently scheduled onto this cpu.
83 /* Number of new events added since the last hw_perf_disable().
85 * events inside of a perf_{disable,enable}() sequence.
89 /* Array of events current scheduled on this cpu. */
96 unsigned long events[MAX_HWEVENTS]; member in struct:cpu_hw_events
762 * generates the overflow event for precise events via a trap
833 enc = perf_event_get_enc(cpuc->events[idx]); sparc_pmu_enable_event()
936 * For such chips we require that all of the events have the same
946 /* Assign to counters all unassigned events. */ calculate_single_pcr()
959 enc = perf_event_get_enc(cpuc->events[i]); calculate_single_pcr()
1001 /* If performance event entries have been added, move existing events
1119 cpuc->events[i - 1] = cpuc->events[i]; sparc_pmu_del()
1217 /* Make sure all events can be scheduled into the hardware at
1219 * need to support 2 simultaneous HW events.
1222 * on success. These are pending indexes. When the events are
1228 unsigned long *events, int n_ev) sparc_check_constraints()
1250 msk0 = perf_event_get_msk(events[0]); sparc_check_constraints()
1257 msk1 = perf_event_get_msk(events[1]); sparc_check_constraints()
1259 /* If both events can go on any counter, OK. */ sparc_check_constraints()
1281 /* If the events are fixed to different counters, OK. */ sparc_check_constraints()
1331 struct perf_event *evts[], unsigned long *events, collect_events()
1341 events[n] = group->hw.event_base; collect_events()
1350 events[n] = event->hw.event_base; collect_events()
1370 cpuc->events[n0] = event->hw.event_base; sparc_pmu_add()
1378 * If group events scheduling transaction was started, sparc_pmu_add()
1387 if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1)) sparc_pmu_add()
1405 unsigned long events[MAX_HWEVENTS]; sparc_pmu_event_init() local
1444 * PERF_TYPE_RAW events. sparc_pmu_event_init()
1462 evts, events, current_idx_dmy); sparc_pmu_event_init()
1466 events[n] = hwc->event_base; sparc_pmu_event_init()
1472 if (sparc_check_constraints(evts, events, n + 1)) sparc_pmu_event_init()
1493 * Start group events scheduling transaction
1506 * Stop group events scheduling transaction
1519 * Commit group events scheduling transaction
1535 if (sparc_check_constraints(cpuc->event, cpuc->events, n)) sparc_pmu_commit_txn()
1609 * overflow so we don't lose any events. perf_event_nmi_handler()
1679 pr_info("Performance events: "); init_hw_perf_events()
1227 sparc_check_constraints(struct perf_event **evts, unsigned long *events, int n_ev) sparc_check_constraints() argument
1330 collect_events(struct perf_event *group, int max_count, struct perf_event *evts[], unsigned long *events, int *current_idx) collect_events() argument
H A Dsparc_ksyms_64.c21 short events; member in struct:poll
/linux-4.1.27/arch/um/include/shared/
H A Dirq_user.h17 int events; member in struct:irq_fd
/linux-4.1.27/fs/notify/
H A Dfsnotify.h9 /* destroy all events sitting in this groups notification queue */
15 /* Calculate mask of events for a list of marks */
52 * about events that happen to its children.
56 /* allocate and destroy and event holder to attach events to notification/access queues */
H A Dgroup.c43 * Trying to get rid of a group. Remove all marks, flush all events and release
55 /* clear the notification queue of all events */ fsnotify_destroy_group()
60 * that deliberately ignores overflow events. fsnotify_destroy_group()
/linux-4.1.27/include/net/irda/
H A Dirlap_event.h62 /* Services events */
71 /* Send events */
75 /* Receive events */
98 /* Timer events */
H A Dirlmp_event.h55 /* LSAP events */
71 /* IrLAP events */
/linux-4.1.27/arch/arm64/kernel/
H A Dtrace-events-emulation.h34 #define TRACE_INCLUDE_FILE trace-events-emulation
H A Dperf_event.c42 * ARMv8 supports a maximum of 32 events.
282 hw_events->events[idx] = NULL; armpmu_del()
312 hw_events->events[idx] = event; armpmu_add()
665 /* Required events. */
677 /* Common architectural events. */
689 /* Common microarchitectural events. */
704 /* PMUv3 HW events mapping. */
1044 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); armv8pmu_enable_event() local
1050 raw_spin_lock_irqsave(&events->pmu_lock, flags); armv8pmu_enable_event()
1072 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); armv8pmu_enable_event()
1078 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); armv8pmu_disable_event() local
1083 raw_spin_lock_irqsave(&events->pmu_lock, flags); armv8pmu_disable_event()
1095 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); armv8pmu_disable_event()
1124 struct perf_event *event = cpuc->events[idx]; armv8pmu_handle_irq()
1149 * Handle the pending perf events. armv8pmu_handle_irq()
1163 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); armv8pmu_start() local
1165 raw_spin_lock_irqsave(&events->pmu_lock, flags); armv8pmu_start()
1168 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); armv8pmu_start()
1174 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); armv8pmu_stop() local
1176 raw_spin_lock_irqsave(&events->pmu_lock, flags); armv8pmu_stop()
1179 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); armv8pmu_stop()
1198 * the events counters armv8pmu_get_event_idx()
1383 struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu); for_each_possible_cpu() local
1384 events->events = per_cpu(hw_events, cpu); for_each_possible_cpu()
1385 events->used_mask = per_cpu(used_mask, cpu); for_each_possible_cpu()
1386 raw_spin_lock_init(&events->pmu_lock); for_each_possible_cpu()
/linux-4.1.27/include/media/
H A Dv4l2-event.h4 * V4L2 events.
39 * The v4l2-fh struct has a list of subscribed events. The v4l2_subscribed_event
43 * This array (ringbuffer, really) is used to store any events raised by the
55 * struct v4l2_fh has two lists: one of the subscribed events, and one of the
56 * pending events.
58 * struct v4l2_subscribed_event has a ringbuffer of raised (pending) events of
103 * @elems: The number of elements in the events array.
104 * @first: The index of the events containing the oldest available event.
105 * @in_use: The number of queued events.
106 * @events: An array of @elems events.
119 struct v4l2_kevent events[]; member in struct:v4l2_subscribed_event
H A Dadv7511.h23 /* notify events */
/linux-4.1.27/tools/perf/
H A Dbuiltin-list.c14 #include "util/parse-events.h"
24 OPT_BOOLEAN(0, "raw-dump", &raw_dump, "Dump raw events"), cmd_list()
40 printf("\nList of pre-defined events (to be used in -e):\n\n"); cmd_list()
H A Dbuiltin-probe.c4 * Builtin probe command: Set up probe events by C expression
61 struct perf_probe_event events[MAX_PROBES]; member in struct:__anon14786
72 struct perf_probe_event *pev = &params.events[params.nevents]; parse_probe_event()
244 struct perf_probe_event *pev = &params.events[params.nevents]; opt_show_vars()
292 clear_perf_probe_event(params.events + i); cleanup_params()
332 "list up current probe events"), __cmd_probe()
359 OPT_BOOLEAN('f', "force", &params.force_add, "forcibly add events" __cmd_probe()
476 ret = show_available_vars(params.events, params.nevents, __cmd_probe()
492 pr_err_with_code(" Error: Failed to delete events.", ret); __cmd_probe()
504 ret = add_perf_probe_events(params.events, params.nevents, __cmd_probe()
508 pr_err_with_code(" Error: Failed to add events.", ret); __cmd_probe()
/linux-4.1.27/include/xen/interface/io/
H A Dkbdif.h29 /* In events (backend -> frontend) */
32 * Frontends should ignore unknown in events.
78 /* Out events (frontend -> backend) */
81 * Out events may be sent only when requested by backend, and receipt
83 * No out events currently defined.
H A Dfbif.h29 /* Out events (frontend -> backend) */
32 * Out events may be sent only when requested by backend, and receipt
76 /* In events (backend -> frontend) */
79 * Frontends should ignore unknown in events.
80 * No in events currently defined.
/linux-4.1.27/drivers/pcmcia/
H A Dvrc4173_cardu.c135 socket->events = 0; cardu_init()
403 uint16_t events; cardu_bh() local
406 events = socket->events; cardu_bh()
407 socket->events = 0; cardu_bh()
411 socket->handler(socket->info, events); cardu_bh()
416 uint16_t events = 0; get_events() local
423 events |= SS_DETECT; get_events()
426 events |= SS_READY; get_events()
430 events |= SS_STSCHG; get_events()
434 if (status == BV_DETECT_WARN) events |= SS_BATWARN; get_events()
435 else events |= SS_BATDEAD; get_events()
440 return events; get_events()
446 uint16_t events; cardu_interrupt() local
450 events = get_events(socket); cardu_interrupt()
451 if (events) { cardu_interrupt()
453 socket->events |= events; cardu_interrupt()
H A Di82092.c301 unsigned int events, active=0; i82092aa_interrupt() local
321 if (csc==0) /* no events on this socket */ i82092aa_interrupt()
324 events = 0; i82092aa_interrupt()
327 events |= SS_DETECT; i82092aa_interrupt()
333 events |= (csc & I365_CSC_STSCHG) ? SS_STSCHG : 0; i82092aa_interrupt()
335 /* Check for battery/ready events */ i82092aa_interrupt()
336 events |= (csc & I365_CSC_BVD1) ? SS_BATDEAD : 0; i82092aa_interrupt()
337 events |= (csc & I365_CSC_BVD2) ? SS_BATWARN : 0; i82092aa_interrupt()
338 events |= (csc & I365_CSC_READY) ? SS_READY : 0; i82092aa_interrupt()
341 if (events) { i82092aa_interrupt()
342 pcmcia_parse_events(&sockets[i].socket, events); i82092aa_interrupt()
344 active |= events; i82092aa_interrupt()
347 if (active==0) /* no more events to handle */ i82092aa_interrupt()
529 /* Enable specific interrupt events */ i82092aa_set_socket()
H A Dvrc4171_card.c493 unsigned int events = 0; get_events() local
501 events |= SS_STSCHG; get_events()
505 events |= SS_BATDEAD; get_events()
507 events |= SS_BATWARN; get_events()
511 events |= SS_READY; get_events()
513 events |= SS_DETECT; get_events()
515 return events; get_events()
521 unsigned int events; pccard_interrupt() local
530 events = get_events(CARD_SLOTA); pccard_interrupt()
531 if (events != 0) { pccard_interrupt()
532 pcmcia_parse_events(&socket->pcmcia_socket, events); pccard_interrupt()
543 events = get_events(CARD_SLOTB); pccard_interrupt()
544 if (events != 0) { pccard_interrupt()
545 pcmcia_parse_events(&socket->pcmcia_socket, events); pccard_interrupt()
H A Dcs.c284 * when card insertion and removal events are received.
572 * Some i82365-based systems send multiple SS_DETECT events during card
626 unsigned int events; pccardd() local
632 events = skt->thread_events; pccardd()
639 if (events & SS_DETECT) pccardd()
672 if (events || sysfs_events) pccardd()
702 void pcmcia_parse_events(struct pcmcia_socket *s, u_int events) pcmcia_parse_events() argument
705 dev_dbg(&s->dev, "parse_events: events %08x\n", events); pcmcia_parse_events()
708 s->thread_events |= events; pcmcia_parse_events()
719 * @events: events to pass to pccardd
722 * handled by pccardd to avoid any sysfs-related deadlocks. Valid events
727 void pcmcia_parse_uevents(struct pcmcia_socket *s, u_int events) pcmcia_parse_uevents() argument
730 dev_dbg(&s->dev, "parse_uevents: events %08x\n", events); pcmcia_parse_uevents()
733 s->sysfs_events |= events; pcmcia_parse_uevents()
H A Dpd6729.c179 unsigned int events, active = 0; pd6729_interrupt() local
196 if (csc == 0) /* no events on this socket */ pd6729_interrupt()
200 events = 0; pd6729_interrupt()
203 events |= SS_DETECT; pd6729_interrupt()
211 events |= (csc & I365_CSC_STSCHG) pd6729_interrupt()
214 /* Check for battery/ready events */ pd6729_interrupt()
215 events |= (csc & I365_CSC_BVD1) pd6729_interrupt()
217 events |= (csc & I365_CSC_BVD2) pd6729_interrupt()
219 events |= (csc & I365_CSC_READY) pd6729_interrupt()
223 if (events) pd6729_interrupt()
224 pcmcia_parse_events(&socket[i].socket, events); pd6729_interrupt()
226 active |= events; pd6729_interrupt()
229 if (active == 0) /* no more events to handle */ pd6729_interrupt()
388 /* Enable specific interrupt events */ pd6729_set_socket()
H A Dsoc_common.c331 unsigned int events; soc_common_check_status() local
342 events = (status ^ skt->status) & skt->cs_state.csc_mask; soc_common_check_status()
346 debug(skt, 4, "events: %s%s%s%s%s%s\n", soc_common_check_status()
347 events == 0 ? "<NONE>" : "", soc_common_check_status()
348 events & SS_DETECT ? "DETECT " : "", soc_common_check_status()
349 events & SS_READY ? "READY " : "", soc_common_check_status()
350 events & SS_BATDEAD ? "BATDEAD " : "", soc_common_check_status()
351 events & SS_BATWARN ? "BATWARN " : "", soc_common_check_status()
352 events & SS_STSCHG ? "STSCHG " : ""); soc_common_check_status()
354 if (events) soc_common_check_status()
355 pcmcia_parse_events(&skt->socket, events); soc_common_check_status()
356 } while (events); soc_common_check_status()
359 /* Let's poll for events in addition to IRQs since IRQ only is unreliable... */ soc_common_pcmcia_poll_event()
363 debug(skt, 4, "polling for events\n"); soc_common_pcmcia_poll_event()
/linux-4.1.27/tools/perf/util/
H A Dordered-events.c4 #include "ordered-events.h"
26 list_add(&new->list, &oe->events); queue_event()
39 if (p == &oe->events) { queue_event()
40 list_add_tail(&new->list, &oe->events); queue_event()
50 if (p == &oe->events) { queue_event()
51 list_add(&new->list, &oe->events); queue_event()
187 struct list_head *head = &oe->events; __ordered_events__flush()
199 ui_progress__init(&prog, oe->nr_events, "Processing time ordered events..."); __ordered_events__flush()
247 struct list_head *head = &oe->events; ordered_events__flush()
289 INIT_LIST_HEAD(&oe->events); ordered_events__init()
H A Dparse-events.y14 #include "parse-events.h"
15 #include "parse-events-bison.h"
77 %type <head> events
138 PE_NAME '{' events '}'
147 '{' events '}'
156 events: label
157 events ',' event
176 * Apply modifier on all events added by single event definition
177 * (there could be more events added for multiple tracepoint
H A Dtrace-event-parse.c39 if (!pevent->events) get_common_field()
42 event = pevent->events[0]; get_common_field()
209 if (!pevent || !pevent->events) trace_find_next_event()
214 return pevent->events[0]; trace_find_next_event()
217 if (idx < pevent->nr_events && event == pevent->events[idx]) { trace_find_next_event()
221 return pevent->events[idx]; trace_find_next_event()
225 if (event == pevent->events[idx - 1]) trace_find_next_event()
226 return pevent->events[idx]; trace_find_next_event()
H A Dprobe-finder.h88 struct probe_trace_event *tevs; /* Found trace events */
89 int ntevs; /* Number of trace events */
90 int max_tevs; /* Max number of trace events */
H A Dordered-events.h33 struct list_head events; member in struct:ordered_events
H A Dtrace-event-info.c111 path = get_tracing_file("events/header_page"); record_header_files()
113 pr_debug("can't get tracing/events/header_page"); record_header_files()
134 path = get_tracing_file("events/header_event"); record_header_files()
136 pr_debug("can't get tracing/events/header_event"); record_header_files()
245 path = get_tracing_file("events/ftrace"); record_ftrace_files()
247 pr_debug("can't get tracing/events/ftrace"); record_ftrace_files()
280 path = get_tracing_file("events"); record_event_files()
282 pr_debug("can't get tracing/events"); record_event_files()
/linux-4.1.27/drivers/misc/ibmasm/
H A Devent.c36 * The driver does not interpret the events, it simply stores them in a
55 * responsible for keeping up with the writer, or they will lose events.
67 event = &buffer->events[buffer->next_index]; ibmasm_receive_event()
110 event = &buffer->events[index]; ibmasm_get_next_event()
113 event = &buffer->events[index]; ibmasm_get_next_event()
163 event = buffer->events; ibmasm_event_buffer_init()
/linux-4.1.27/arch/um/os-Linux/
H A Dirq.c52 int os_create_pollfd(int fd, int events, void *tmp_pfd, int size_tmpfds) os_create_pollfd() argument
72 .events = events, os_create_pollfd()
/linux-4.1.27/drivers/hid/
H A Dhid-speedlink.c34 * able to map keyboard events to the button presses. speedlink_input_mapping()
50 /* This fixes the "jumpy" cursor occuring due to invalid events sent speedlink_event()
57 /* Drop useless distance 0 events (on button clicks etc.) as well */ speedlink_event()
H A Dhid-roccat-pyra.h98 * Mouse sends tilt events on report_number 1 and 3
99 * Tilt events are sent repeatedly with 0.94s between first and second
/linux-4.1.27/arch/arm64/include/asm/
H A Dpmu.h24 /* The events for a given PMU register set. */
27 * The events that are active on the PMU for the given index.
29 struct perf_event **events; member in struct:pmu_hw_events
/linux-4.1.27/drivers/isdn/hardware/eicon/
H A Ddebug_if.h78 #define DIVA_MGT_DBG_MDM_PROGRESS 0x00000004 /* Modem progress events */
79 #define DIVA_MGT_DBG_FAX_PROGRESS 0x00000008 /* Fax progress events */
83 #define DIVA_MGT_DBG_LINE_EVENTS 0x00000080 /* Line state events */
84 #define DIVA_MGT_DBG_IFC_EVENTS 0x00000100 /* Interface/L1/L2 state events */
/linux-4.1.27/drivers/net/wireless/ti/wl1251/
H A Devent.h27 * Mbox events
31 * buffer while the other buffer continues to collect events. If the host
32 * is not processing events, an interrupt is issued to signal that a buffer
33 * is ready. Once the host is done with processing events from one buffer,
/linux-4.1.27/include/uapi/asm-generic/
H A Dpoll.h37 short events; member in struct:pollfd
/linux-4.1.27/arch/sh/kernel/cpu/sh4/
H A DMakefile12 # Perf events
H A Dperf_event.c2 * Performance events support for SH7750-style performance counters
34 * There are a number of events supported by each counter (33 in total).
262 pr_notice("HW perf events unsupported, software events only.\n"); sh7750_pmu_init()
/linux-4.1.27/fs/f2fs/
H A Dtrace.h15 #include <trace/events/f2fs.h>
/linux-4.1.27/net/mac80211/
H A Devent.c8 * mac80211 - events
/linux-4.1.27/kernel/gcov/
H A Dbase.c98 * Turn on reporting of profiling data load/unload-events through the
99 * gcov_event() callback. Also replay all previous events once. This function
100 * is needed because some events are potentially generated too early for the
125 /* Update list and generate events when modules are unloaded. */ gcov_module_notifier()
/linux-4.1.27/sound/firewire/bebob/
H A Dbebob_hwdep.c60 unsigned int events; hwdep_poll() local
66 events = POLLIN | POLLRDNORM; hwdep_poll()
68 events = 0; hwdep_poll()
71 return events; hwdep_poll()
/linux-4.1.27/sound/firewire/dice/
H A Ddice-hwdep.c59 unsigned int events; hwdep_poll() local
65 events = POLLIN | POLLRDNORM; hwdep_poll()
67 events = 0; hwdep_poll()
70 return events; hwdep_poll()
/linux-4.1.27/sound/firewire/oxfw/
H A Doxfw-hwdep.c59 unsigned int events; hwdep_poll() local
65 events = POLLIN | POLLRDNORM; hwdep_poll()
67 events = 0; hwdep_poll()
70 return events; hwdep_poll()
/linux-4.1.27/include/linux/mfd/
H A Dipaq-micro.h103 * @key: callback for asynchronous key events
104 * @key_data: data to pass along with key events
105 * @ts: callback for asynchronous touchscreen events
106 * @ts_data: data to pass along with key events
H A Dda903x.h135 /* platform callbacks for battery low and critical events */
151 /* bit definitions for DA9030 events */
175 /* bit definitions for DA9034 events */
204 struct notifier_block *nb, unsigned int events);
206 struct notifier_block *nb, unsigned int events);
/linux-4.1.27/drivers/net/wireless/libertas/
H A Ddebugfs.c236 int events = 0; lbs_threshold_read() local
259 events = le16_to_cpu(subscribed->events); lbs_threshold_read()
262 !!(events & event_mask)); lbs_threshold_read()
281 struct cmd_ds_802_11_subscribe_event *events; lbs_threshold_write() local
304 events = kzalloc(sizeof(*events), GFP_KERNEL); lbs_threshold_write()
305 if (!events) { lbs_threshold_write()
310 events->hdr.size = cpu_to_le16(sizeof(*events)); lbs_threshold_write()
311 events->action = cpu_to_le16(CMD_ACT_GET); lbs_threshold_write()
313 ret = lbs_cmd_with_response(priv, CMD_802_11_SUBSCRIBE_EVENT, events); lbs_threshold_write()
317 curr_mask = le16_to_cpu(events->events); lbs_threshold_write()
326 tlv = (void *)events->tlv; lbs_threshold_write()
328 events->action = cpu_to_le16(CMD_ACT_SET); lbs_threshold_write()
329 events->events = cpu_to_le16(new_mask); lbs_threshold_write()
337 events->hdr.size = cpu_to_le16(sizeof(events->hdr) + 4 + sizeof(*tlv)); lbs_threshold_write()
339 ret = lbs_cmd_with_response(priv, CMD_802_11_SUBSCRIBE_EVENT, events); lbs_threshold_write()
344 kfree(events); lbs_threshold_write()
/linux-4.1.27/drivers/net/ethernet/sfc/
H A Dvfdi.h28 * direct or disable delivery of these events by setting
31 * The PF driver can send arbitrary events to arbitrary event queues.
32 * However, for consistency, VFDI events from the PF are defined to
36 * The general form of the variable bits of VFDI events is:
46 * address of the request (ADDR) in a series of 4 events:
55 * series of events, the PF driver will attempt to read the request
57 * sequence of events or a DMA error, there will be no response.
138 * @u.init_rxq.evq: Instance of event queue to target receive events at.
139 * @u.init_rxq.label: Label used in receive events.
146 * events at.
147 * @u.init_txq.label: Label used in transmit completion events.
/linux-4.1.27/kernel/time/
H A Dtimer_stats.c68 * Number of timeout events:
285 long events = 0; tstats_show() local
322 events += entry->count; tstats_show()
329 if (events && period.tv_sec) tstats_show()
330 seq_printf(m, "%ld total events, %ld.%03ld events/sec\n", tstats_show()
331 events, events * 1000 / ms, tstats_show()
332 (events * 1000000 / ms) % 1000); tstats_show()
334 seq_printf(m, "%ld total events\n", events); tstats_show()
/linux-4.1.27/fs/fscache/
H A Dobject.c62 * scheduler if none of the events in which the wait state has an interest are
74 { .events = (emask), .transit_to = STATE(state) }
112 * events, such as an I/O error. If an OOB event occurs, the state machine
171 unsigned long events, event_mask; fscache_object_sm_dispatcher() local
177 object->debug_id, object->state->name, object->events); fscache_object_sm_dispatcher()
184 events = object->events; fscache_object_sm_dispatcher()
186 /* Handle any out-of-band events (typically an error) */ fscache_object_sm_dispatcher()
187 if (events & object->oob_event_mask) { fscache_object_sm_dispatcher()
189 object->debug_id, events & object->oob_event_mask); fscache_object_sm_dispatcher()
190 for (t = object->oob_table; t->events; t++) { fscache_object_sm_dispatcher()
191 if (events & t->events) { fscache_object_sm_dispatcher()
194 event = fls(events & t->events) - 1; fscache_object_sm_dispatcher()
196 clear_bit(event, &object->events); fscache_object_sm_dispatcher()
204 if (events & event_mask) { fscache_object_sm_dispatcher()
205 for (t = state->transitions; t->events; t++) { fscache_object_sm_dispatcher()
206 if (events & t->events) { fscache_object_sm_dispatcher()
208 event = fls(events & t->events) - 1; fscache_object_sm_dispatcher()
209 clear_bit(event, &object->events); fscache_object_sm_dispatcher()
251 for (t = state->transitions; t->events; t++) fscache_object_sm_dispatcher()
252 event_mask |= t->events; fscache_object_sm_dispatcher()
257 events = object->events; fscache_object_sm_dispatcher()
258 if (events & event_mask) fscache_object_sm_dispatcher()
311 object->events = 0; fscache_object_init()
322 for (t = object->oob_table; t->events; t++) fscache_object_init()
323 object->oob_event_mask |= t->events; fscache_object_init()
325 for (t = object->state->transitions; t->events; t++) fscache_object_init()
326 object->event_mask |= t->events; fscache_object_init()
/linux-4.1.27/drivers/isdn/sc/
H A Devent.c24 static char *events[] = { "ISDN_STAT_STAVAIL", variable
47 sc_adapter[card]->devicename, events[event - 256], Channel); indicate_status()
/linux-4.1.27/drivers/md/
H A Ddm-uevent.h33 extern void dm_send_uevents(struct list_head *events, struct kobject *kobj);
47 static inline void dm_send_uevents(struct list_head *events, dm_send_uevents() argument
H A Ddm-uevent.c130 * @events: list of events to send
134 void dm_send_uevents(struct list_head *events, struct kobject *kobj) dm_send_uevents() argument
139 list_for_each_entry_safe(event, next, events, elist) { list_for_each_entry_safe()
144 * discard these unsent events. list_for_each_entry_safe()
/linux-4.1.27/drivers/media/v4l2-core/
H A Dv4l2-event.c4 * V4L2 events.
133 /* Do we have any free events? */ __v4l2_event_queue_fh()
136 kev = sev->events + sev_pos(sev, 0); __v4l2_event_queue_fh()
148 sev->events + sev_pos(sev, 0); __v4l2_event_queue_fh()
154 kev = sev->events + sev_pos(sev, sev->in_use); __v4l2_event_queue_fh()
223 sev->events[i].sev = sev; v4l2_event_subscribe()
296 /* Remove any pending events for this subscription */ v4l2_event_unsubscribe()
298 list_del(&sev->events[sev_pos(sev, i)].list); v4l2_event_unsubscribe()
/linux-4.1.27/drivers/spi/
H A Dspi-fsl-cpm.h29 extern void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events);
38 static inline void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events) { } fsl_spi_cpm_init() argument
H A Dspi-fsl-espi.c525 void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events) fsl_espi_cpu_irq() argument
530 if (events & SPIE_NE) { fsl_espi_cpu_irq()
535 while (SPIE_RXCNT(events) < min(4, mspi->len)) { fsl_espi_cpu_irq()
537 events = mpc8xxx_spi_read_reg(&reg_base->event); fsl_espi_cpu_irq()
559 if (!(events & SPIE_NF)) { fsl_espi_cpu_irq()
563 ret = spin_event_timeout(((events = mpc8xxx_spi_read_reg( fsl_espi_cpu_irq()
571 /* Clear the events */ fsl_espi_cpu_irq()
572 mpc8xxx_spi_write_reg(&reg_base->event, events); fsl_espi_cpu_irq()
589 u32 events; fsl_espi_irq() local
591 /* Get interrupt events(tx/rx) */ fsl_espi_irq()
592 events = mpc8xxx_spi_read_reg(&reg_base->event); fsl_espi_irq()
593 if (events) fsl_espi_irq()
596 dev_vdbg(mspi->dev, "%s: events %x\n", __func__, events); fsl_espi_irq()
598 fsl_espi_cpu_irq(mspi, events); fsl_espi_irq()
H A Dspi-fsl-spi.c508 static void fsl_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events) fsl_spi_cpu_irq() argument
513 if (events & SPIE_NE) { fsl_spi_cpu_irq()
520 if ((events & SPIE_NF) == 0) fsl_spi_cpu_irq()
522 while (((events = fsl_spi_cpu_irq()
527 /* Clear the events */ fsl_spi_cpu_irq()
528 mpc8xxx_spi_write_reg(&reg_base->event, events); fsl_spi_cpu_irq()
544 u32 events; fsl_spi_irq() local
547 /* Get interrupt events(tx/rx) */ fsl_spi_irq()
548 events = mpc8xxx_spi_read_reg(&reg_base->event); fsl_spi_irq()
549 if (events) fsl_spi_irq()
552 dev_dbg(mspi->dev, "%s: events %x\n", __func__, events); fsl_spi_irq()
555 fsl_spi_cpm_irq(mspi, events); fsl_spi_irq()
557 fsl_spi_cpu_irq(mspi, events); fsl_spi_irq()
/linux-4.1.27/drivers/staging/iio/
H A Diio_simple_dummy_events.c17 #include <linux/iio/events.h>
20 /* Evgen 'fakes' interrupt events for this example */
65 * how this is done when multiple events exist. iio_simple_dummy_write_event_config()
111 * Many devices provide a large set of events of which only a subset may
114 * associated with each possible events so that the right value is in place when
215 * iio_simple_dummy_events_register() - setup interrupt handling for events
218 * This function requests the threaded interrupt to handle the events.
222 * no way forms part of this example. Just assume that events magically
/linux-4.1.27/drivers/input/misc/
H A Ddm355evm_keys.c26 * read those events from the small (32 event) queue and reports them.
59 * remote controls could easily send more RC5-encoded events.
114 * events until we get the "queue empty" indicator. dm355evm_keys_irq()
136 /* Press and release a button: two events, same code. dm355evm_keys_irq()
137 * Press and hold (autorepeat), then release: N events dm355evm_keys_irq()
142 * So we must synthesize release events. We do that by dm355evm_keys_irq()
143 * mapping events to a press/release event pair; then dm355evm_keys_irq()
144 * to avoid adding extra events, skip the second event dm355evm_keys_irq()
H A Dpcap_keys.c2 * Input driver for PCAP events:
128 MODULE_DESCRIPTION("Motorola PCAP2 input events driver");
/linux-4.1.27/drivers/atm/
H A DuPD98402.c171 unsigned char events; stat_event() local
173 events = GET(PCR); stat_event()
174 if (events & uPD98402_PFM_PFEB) ADD_LIMITED(path_febe,PFECB); stat_event()
175 if (events & uPD98402_PFM_LFEB) ADD_LIMITED(line_febe,LECCT); stat_event()
176 if (events & uPD98402_PFM_B3E) ADD_LIMITED(path_bip,B3ECT); stat_event()
177 if (events & uPD98402_PFM_B2E) ADD_LIMITED(line_bip,B2ECT); stat_event()
178 if (events & uPD98402_PFM_B1E) ADD_LIMITED(section_bip,B1ECT); stat_event()
217 (void) GET(PCR); /* clear performance events */ uPD98402_start()
/linux-4.1.27/drivers/thermal/
H A Duser_space.c2 * user_space.c - A simple user space Thermal events notifier
30 * notify_user_space - Notifies user space about thermal events
/linux-4.1.27/drivers/usb/core/
H A Dnotify.c23 * @nb: pointer to the notifier block for the callback events.
35 * @nb: pointer to the notifier block for the callback events.
/linux-4.1.27/fs/notify/fanotify/
H A Dfanotify.h9 * Structure for normal fanotify events. It gets allocated in
25 * Structure for permission fanotify events. It gets allocated and freed in
/linux-4.1.27/fs/nfs/
H A Diostat.h22 unsigned long events[__NFSIOS_COUNTSMAX]; member in struct:nfs_iostats
28 this_cpu_inc(server->io_stats->events[stat]); nfs_inc_server_stats()
/linux-4.1.27/include/net/
H A Dllc_s_ev.h17 /* Defines SAP component events */
18 /* Types of events (possible values in 'ev->type') */
/linux-4.1.27/arch/sh/include/mach-dreamcast/mach/
H A Dsysasic.h18 /* Hardware events -
20 Each of these events correspond to a bit within the Event Mask Registers/
/linux-4.1.27/net/netfilter/
H A Dnf_conntrack_ecache.c116 /* deliver cached events and clear cache entry - must be called with locally
121 unsigned long events, missed; nf_ct_deliver_cached_events() local
136 events = xchg(&e->cache, 0); nf_ct_deliver_cached_events()
138 if (!nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct) || !events) nf_ct_deliver_cached_events()
142 * the lock, thus we may send missed events twice. However, nf_ct_deliver_cached_events()
146 if (!((events | missed) & e->ctmask)) nf_ct_deliver_cached_events()
153 ret = notify->fcn(events | missed, &item); nf_ct_deliver_cached_events()
160 e->missed |= events; nf_ct_deliver_cached_events()
/linux-4.1.27/sound/core/seq/
H A Dseq_prioq.h49 /* return number of events available in prioq */
58 /* Remove events */
H A Dseq_system.c34 * - send tempo /start/stop etc. events to this port to manipulate the
37 * - this port supports subscription. The received timer events are
93 /* entry points for broadcasting system events */ snd_seq_system_broadcast()
104 /* entry points for broadcasting system events */ snd_seq_system_notify()
115 /* call-back handler for timer events */ event_input_timer()
/linux-4.1.27/tools/perf/scripts/perl/
H A Dcheck-perf-trace.pl7 # events, etc. Basically, if this script runs successfully and
81 print "\nunhandled events:\n\n";
H A Dwakeup-latency.pl8 # all events. They don't necessarily correspond to the 'common_*' fields
90 print "\nunhandled events:\n\n";
H A Drw-by-file.pl8 # all events. They don't necessarily correspond to the 'common_*' fields
87 print "\nunhandled events:\n\n";
/linux-4.1.27/tools/perf/scripts/python/
H A Dcheck-perf-trace.py7 # events, etc. Basically, if this script runs successfully and
75 print "\nunhandled events:\n\n",
H A Dsctop.py65 print "\nsyscall events for %s:\n\n" % (for_comm),
67 print "\nsyscall events:\n\n",
H A Dsyscall-counts-by-pid.py58 print "\nsyscall events for %s:\n\n" % (for_comm),
60 print "\nsyscall events by comm/pid:\n\n",
H A Dsyscall-counts.py54 print "\nsyscall events for %s:\n\n" % (for_comm),
56 print "\nsyscall events:\n\n",
/linux-4.1.27/block/
H A Dgenhd.c1387 * Disk events - monitor disk events like media change and eject request.
1396 unsigned int pending; /* events already sent out */
1397 unsigned int clearing; /* events being cleared */
1427 * the default is being used, poll iff there are events which disk_events_poll_jiffies()
1432 else if (disk->events & ~disk->async_events) disk_events_poll_jiffies()
1440 * @disk: disk to block events for
1510 * @disk: disk to unblock events for
1513 * starts events polling if configured.
1526 * @disk: disk to check and flush events for
1527 * @mask: events to flush
1531 * doesn't clear the events from @disk->ev.
1552 * disk_clear_events - synchronously check, clear and return pending events
1553 * @disk: disk to fetch and clear events from
1554 * @mask: mask of events to be fetched and cleared
1556 * Disk events are synchronously checked and pending events in @mask
1582 * can still be modified even if events are blocked). disk_clear_events()
1596 /* then, fetch and clear pending events */ disk_clear_events()
1624 unsigned int events; disk_check_events() local
1628 /* check events */ disk_check_events()
1629 events = disk->fops->check_events(disk, clearing); disk_check_events()
1631 /* accumulate pending events and schedule next poll if necessary */ disk_check_events()
1634 events &= ~ev->pending; disk_check_events()
1635 ev->pending |= events; disk_check_events()
1646 * Tell userland about new events. Only the events listed in disk_check_events()
1647 * @disk->events are reported. Unlisted events are processed the disk_check_events()
1651 if (events & disk->events & (1 << i)) disk_check_events()
1659 * A disk events enabled device has the following sysfs nodes under
1662 * events : list of all supported events
1663 * events_async : list of events which can be detected w/o polling
1666 static ssize_t __disk_events_show(unsigned int events, char *buf) __disk_events_show() argument
1673 if (events & (1 << i)) { __disk_events_show()
1688 return __disk_events_show(disk->events, buf); disk_events_show()
1728 static const DEVICE_ATTR(events, S_IRUGO, disk_events_show, NULL);
1790 pr_warn("%s: failed to initialize events\n", disk->disk_name); disk_alloc_events()
1812 pr_warn("%s: failed to create sysfs files for events\n", disk_add_events()
/linux-4.1.27/arch/tile/kernel/
H A Dperf_event.c52 struct perf_event *events[TILE_MAX_COUNTERS]; /* counter order */ member in struct:cpu_hw_events
64 const int *hw_events; /* generic hw events table */
65 /* generic hw cache events table */
70 hw events */
72 cache events */
77 int max_events; /* max generic hw events
89 /* TILEPro hardware events map */
102 /* TILEGx hardware events map */
215 /* TILEGx hardware events map */
500 * Returns the delta events processed.
599 cpuc->events[hwc->idx] = NULL; tile_pmu_stop()
635 cpuc->events[idx] = event; tile_pmu_start()
648 * The event is added to the group of enabled events
649 * but only if it can be scehduled with existing events.
704 * The event is deleted from the group of enabled events.
720 cpuc->events[event->hw.idx] = NULL; tile_pmu_del()
727 * If there are no events left, then mask PMU interrupt. tile_pmu_del()
743 * Map generic events to Tile PMU.
753 * Map generic hardware cache events to Tile PMU.
900 event = cpuc->events[bit]; tile_pmu_handle_irq()
/linux-4.1.27/include/ras/
H A Dras_event.h18 * These events are generated when hardware detects a corrected or
81 * Those events are generated when hardware detected a corrected or
85 * FIXME: Add events for handling memory errors originated from the
90 * Hardware-independent Memory Controller specific events
166 * These events are generated when hardware detects a corrected or
/linux-4.1.27/arch/powerpc/platforms/pseries/
H A Dio_event_irq.c28 * information about hardware error and non-error events. Device
29 * drivers can register their event handlers to receive events.
106 * multiple events through a single interrupt, it must ensure that the
108 * process all out-standing events for that interrupt.
154 np = of_find_node_by_path("/event-sources/ibm,io-events"); ioei_init()
/linux-4.1.27/drivers/media/pci/ttpci/
H A Dav7110_av.c880 * Video MPEG decoder events
884 struct dvb_video_events *events = &av7110->video_events; dvb_video_add_event() local
887 spin_lock_bh(&events->lock); dvb_video_add_event()
889 wp = (events->eventw + 1) % MAX_VIDEO_EVENT; dvb_video_add_event()
890 if (wp == events->eventr) { dvb_video_add_event()
891 events->overflow = 1; dvb_video_add_event()
892 events->eventr = (events->eventr + 1) % MAX_VIDEO_EVENT; dvb_video_add_event()
896 memcpy(&events->events[events->eventw], event, sizeof(struct video_event)); dvb_video_add_event()
897 events->eventw = wp; dvb_video_add_event()
899 spin_unlock_bh(&events->lock); dvb_video_add_event()
901 wake_up_interruptible(&events->wait_queue); dvb_video_add_event()
907 struct dvb_video_events *events = &av7110->video_events; dvb_video_get_event() local
909 if (events->overflow) { dvb_video_get_event()
910 events->overflow = 0; dvb_video_get_event()
913 if (events->eventw == events->eventr) { dvb_video_get_event()
919 ret = wait_event_interruptible(events->wait_queue, dvb_video_get_event()
920 events->eventw != events->eventr); dvb_video_get_event()
925 spin_lock_bh(&events->lock); dvb_video_get_event()
927 memcpy(event, &events->events[events->eventr], dvb_video_get_event()
929 events->eventr = (events->eventr + 1) % MAX_VIDEO_EVENT; dvb_video_get_event()
931 spin_unlock_bh(&events->lock); dvb_video_get_event()
/linux-4.1.27/drivers/acpi/acpica/
H A Devevent.c64 * DESCRIPTION: Initialize global data structures for ACPI events (Fixed, GPE)
74 /* If Hardware Reduced flag is set, there are no fixed events */ acpi_ev_initialize_events()
88 "Unable to initialize fixed events")); acpi_ev_initialize_events()
95 "Unable to initialize general purpose events")); acpi_ev_initialize_events()
156 * DESCRIPTION: Install the fixed event handlers and disable all fixed events.
167 * enable the fixed events. acpi_ev_fixed_event_initialize()
197 * DESCRIPTION: Checks the PM status register for active fixed events
/linux-4.1.27/drivers/iio/dac/
H A Dad5421.c21 #include <linux/iio/events.h>
65 * @fault_mask: software masking of events
207 unsigned int events; ad5421_fault_handler() local
235 /* we are only interested in new events */ ad5421_fault_handler()
236 events = (old_fault ^ fault) & fault; ad5421_fault_handler()
237 events &= st->fault_mask; ad5421_fault_handler()
239 if (events & AD5421_FAULT_OVER_CURRENT) { ad5421_fault_handler()
248 if (events & AD5421_FAULT_UNDER_CURRENT) { ad5421_fault_handler()
257 if (events & AD5421_FAULT_TEMP_OVER_140) { ad5421_fault_handler()
/linux-4.1.27/include/rdma/
H A Diw_cm.h62 * iw_cm_handler - Function to be called by the IW CM when delivering events
73 * events to the IW CM. Returns either 0 indicating the event was processed
90 events */
131 * @event_handler: User callback invoked to report events associated with the
143 * The client can assume that no events will be delivered for the CM ID after
190 * CONNECT_REQUEST event. Subsequent events related to this connection will be
193 * client can assume that no events will be delivered to the specified IW CM
206 * The client can assume that no events will be delivered to the specified IW
222 * client can assume that no events will be delivered to the specified IW CM
/linux-4.1.27/arch/s390/kvm/
H A Dguestdbg.c75 * If the guest is not interrested in branching events, we can savely enable_all_hw_bp()
88 * report instruction-fetching events enable_all_hw_bp()
115 * spaces, enable all events and give all to the guest */ enable_all_hw_wp()
153 * This reduces the amount of reported events. kvm_s390_patch_guest_per_regs()
412 /* remove duplicate events if PC==PER address */ debug_exit_required()
450 /* filter all events, demanded by the guest */ filter_guest_per_event()
456 /* filter "successful-branching" events */ filter_guest_per_event()
462 /* filter "instruction-fetching" events */ filter_guest_per_event()
467 /* All other PER events will be given to the guest */ filter_guest_per_event()
/linux-4.1.27/arch/x86/kernel/cpu/
H A Dperf_event_amd.c195 * AMD64 events are detected based on their event codes.
256 * AMD64 NorthBridge events need special treatment because
260 * NB events are events measuring L3 cache, Hypertransport
262 * They measure events on the NorthBride which is shared
263 * by all cores on a package. NB events are counted on a
270 * can be measuring NB events using the same counters. Thus,
275 * the fact that only NB events have restrictions. Consequently,
282 * counters to host NB events, this is why we use atomic ops. Some
289 * Non NB events are not impacted by this restriction.
709 /* Reload all events */ amd_pmu_enable_virt()
727 /* Reload all events */ amd_pmu_disable_virt()
H A Dperf_event.h2 * Performance events x86 architecture header
87 /* The maximal number of PEBS events: */
121 * among events on a single PMU.
167 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */ member in struct:cpu_hw_events
172 int n_events; /* the # of events in the below arrays */
173 int n_added; /* the # last events in the below arrays;
175 int n_txn; /* the # last events in the below arrays;
183 int n_excl; /* the number of exclusive events */
265 * cycle because it needs to know which subsequent events will be
266 * scheduled. It may fail to schedule the events then. So we set the
268 * events to select for counter rescheduling.
287 * filter mask to validate fixed counter events.
382 * to enable blacklisting of events using a counter bitmask
396 * Extra registers for specific events.
398 * Some events need large masks and require external MSRs.
399 * Those extra MSRs end up being shared for all events on
600 * Extra registers for events
H A Dperf_event.c2 * Performance events x86 architecture code
62 * Returns the delta events processed.
263 printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n"); check_hw_exists()
353 * Check if we can create event of a certain type (that no conflicting events
427 /* disallow bts if conflicting events are present */ x86_setup_perfctr()
527 * Count user and OS events unless requested not to x86_pmu_hw_config()
614 struct hw_perf_event *hwc = &cpuc->events[idx]->hw; x86_pmu_enable_all()
633 * Assign events iterating over all events and counters, beginning
634 * with events with least weights first. Keep the current iterator
641 int unassigned; /* number of events to be assigned left */
660 * Initialize interator that runs through all events and counters.
764 * Go through all unassigned events and find the next one to schedule.
765 * Take events with the least weight first. Return true on success.
870 * N/2 counters can be used. This helps with events with x86_schedule_events()
882 * In case of success (unsched = 0), mark events as committed, x86_schedule_events()
883 * so we do not put_constraint() in case new events are added x86_schedule_events()
905 * do not put_constraint() on comitted events, x86_schedule_events()
912 * release events that failed scheduling x86_schedule_events()
926 * dogrp: true if must collect siblings events (group)
927 * returns total number of events and error code
936 /* current number of events already accepted */ collect_events()
1015 * step1: save events moving to new counters x86_pmu_enable()
1042 * step2: reprogram moved events into new counters x86_pmu_enable()
1147 * The event is added to the group of enabled events
1148 * but only if it can be scehduled with existing events.
1169 * If group events scheduling transaction was started, x86_pmu_add()
1217 cpuc->events[idx] = event; x86_pmu_start()
1291 cpuc->events[hwc->idx] = NULL; x86_pmu_stop()
1318 * The events never got scheduled and ->cancel_txn will truncate x86_pmu_del()
1389 event = cpuc->events[idx]; x86_pmu_handle_irq()
1505 * events (user-space has to fall back and pmu_check_apic()
1518 * Remove all undefined events (x86_pmu.event_map(id) == 0)
1612 .name = "events",
1673 pr_cont("no PMU driver, software events only.\n"); init_hw_perf_events()
1723 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed); init_hw_perf_events()
1739 * Start group events scheduling transaction
1751 * Stop group events scheduling transaction
1759 * Truncate collected array by the number of events added in this x86_pmu_cancel_txn()
1768 * Commit group events scheduling transaction
1802 * per_core and per_cpu structure. Otherwise, group events
1865 * - check events are compatible which each other
1866 * - events do not compete for the same counter
1867 * - number of events <= number of counters
/linux-4.1.27/arch/arm/mach-omap2/
H A Dprm3xxx.c34 static void omap3xxx_prm_read_pending_irqs(unsigned long *events);
149 * omap3xxx_prm_read_pending_irqs - read pending PRM MPU IRQs into @events
150 * @events: ptr to a u32, preallocated by caller
153 * MPU IRQs, and store the result into the u32 pointed to by @events.
156 static void omap3xxx_prm_read_pending_irqs(unsigned long *events) omap3xxx_prm_read_pending_irqs() argument
164 events[0] = mask & st; omap3xxx_prm_read_pending_irqs()
218 * omap3xxx_prm_clear_mod_irqs - clear wake-up events from PRCM interrupt
223 * The purpose of this function is to clear any wake-up events latched
227 * that any peripheral wake-up events occurring while attempting to
396 * Clear any previously-latched I/O wakeup events and ensure that the
425 * omap3xxx_prm_enable_io_wakeup - enable wakeup events from I/O wakeup latches
427 * Activates the I/O wakeup event latches and allows events logged by
H A Dprm44xx.c35 static void omap44xx_prm_read_pending_irqs(unsigned long *events);
210 * omap44xx_prm_read_pending_irqs - read pending PRM MPU IRQs into @events
211 * @events: ptr to two consecutive u32s, preallocated by caller
214 * MPU IRQs, and store the result into the two u32s pointed to by @events.
217 static void omap44xx_prm_read_pending_irqs(unsigned long *events) omap44xx_prm_read_pending_irqs() argument
219 events[0] = _read_pending_irq_reg(OMAP4_PRM_IRQENABLE_MPU_OFFSET, omap44xx_prm_read_pending_irqs()
222 events[1] = _read_pending_irq_reg(OMAP4_PRM_IRQENABLE_MPU_2_OFFSET, omap44xx_prm_read_pending_irqs()
291 * Clear any previously-latched I/O wakeup events and ensure that the
336 * omap44xx_prm_enable_io_wakeup - enable wakeup events from I/O wakeup latches
338 * Activates the I/O wakeup event latches and allows events logged by
/linux-4.1.27/drivers/base/power/
H A Dwakeup.c2 * drivers/base/power/wakeup.c - System wakeup events framework
17 #include <trace/events/power.h>
23 * if wakeup events are registered during or immediately before the transition.
31 * Combined counters of registered wakeup events and wakeup events in progress.
48 /* A preserved old value of the events counter. */
387 * core of the event by incrementing the counter of of wakeup events being
406 /* Increment the counter of events in progress. */ wakeup_source_activate()
490 * become inactive by decrementing the counter of wakeup events being processed
491 * and incrementing the counter of registered wakeup events.
530 * Increment the counter of registered wakeup events and decrement the wakeup_source_deactivate()
531 * couter of wakeup events in progress simultaneously. wakeup_source_deactivate()
545 * Call this function for wakeup events whose processing started with calling
700 * Compare the current number of registered wakeup events with its preserved
701 * value from the past and return true if new wakeup events have been registered
703 * wakeup events being processed is different from zero.
741 * pm_get_wakeup_count - Read the number of registered wakeup events.
745 * Store the number of registered wakeup events at the address in @count. If
746 * @block is set, block until the current number of wakeup events being
749 * Return 'false' if the current number of wakeup events being processed is
777 * pm_save_wakeup_count - Save the current number of registered wakeup events.
778 * @count: Value to compare with the current number of registered wakeup events.
780 * If @count is equal to the current number of registered wakeup events and the
781 * current number of wakeup events being processed is zero, store @count as the
782 * old number of registered wakeup events for pm_check_wakeup_events(), enable
783 * wakeup events detection and return 'true'. Otherwise disable wakeup events
/linux-4.1.27/drivers/char/
H A Dsonypi.c99 "set this if you would like sonypi to feed events to the input subsystem");
150 /* ioports used for brightness and type2 events */
255 /* The set of possible button release events */
261 /* The set of possible jogger events */
279 /* The set of possible capture button events */
287 /* The set of possible fnkeys events */
314 /* The set of possible program key events */
323 /* The set of possible bluetooth events */
331 /* The set of possible wireless events */
338 /* The set of possible back button events */
344 /* The set of possible help button events */
351 /* The set of possible lid events */
358 /* The set of possible zoom events */
364 /* The set of possible thumbphrase events */
370 /* The set of possible motioneye camera events */
377 /* The set of possible memorystick events */
384 /* The set of possible battery events */
395 struct sonypi_event * events; member in struct:sonypi_eventtypes
434 /* Correspondance table between sonypi events and input layer events */
854 for (j = 0; sonypi_eventtypes[i].events[j].event; j++) { sonypi_irq()
855 if (v1 == sonypi_eventtypes[i].events[j].data) { sonypi_irq()
856 event = sonypi_eventtypes[i].events[j].event; sonypi_irq()
867 * events belonging to the sonypi device we don't know about, sonypi_irq()
1098 /* Enable ACPI mode to get Fn key events */ sonypi_enable()
1108 sonypi_call2(0x81, 0); /* make sure we don't get any more events */ sonypi_disable()
H A Dsnsc_event.c14 * These routines deal with environmental events arriving from the
34 * Pull incoming environmental events off the physical link to the
231 * to send it on its way. Keep trying to read events until SAL indicates
283 /* ask the system controllers to send events to this node */ scdrv_event_init()
295 IRQF_SHARED, "system controller events", event_sd); scdrv_event_init()
/linux-4.1.27/fs/
H A Deventpoll.c171 /* The structure that describe the interested events and the source fd */
206 * happened while transferring ready events to userspace w/out
251 struct epoll_event __user *events; member in struct:ep_send_events_data
370 * ep_events_available - Checks if ready events might be available.
374 * Returns: Returns a value different than zero if ready events are available,
473 unsigned long events, int subclass) ep_wake_up_nested()
478 wake_up_locked_poll(wqueue, events); ep_wake_up_nested()
483 unsigned long events, int subclass) ep_wake_up_nested()
485 wake_up_poll(wqueue, events); ep_wake_up_nested()
612 * empty list. Also, set ep->ovflist to NULL so that events ep_scan_ready_list()
631 * other events might have been queued by the poll callback. ep_scan_ready_list()
649 * releasing the lock, events will be queued in the normal way inside ep_scan_ready_list()
798 pt->_key = epi->event.events; ep_item_poll()
800 return epi->ffd.file->f_op->poll(epi->ffd.file, pt) & epi->event.events; ep_item_poll()
818 * caller requested events goes. We can remove it here. list_for_each_entry_safe()
861 * Proceed to find out if wanted events are really available inside ep_eventpoll_poll()
882 seq_printf(m, "tfd: %8d events: %8x data: %16llx\n", ep_show_fdinfo()
883 epi->ffd.fd, epi->event.events, ep_show_fdinfo()
997 * have events to report.
1025 if (!(epi->event.events & ~EP_PRIVATE_BITS)) ep_poll_callback()
1029 * Check the events coming with the callback. At this stage, not ep_poll_callback()
1030 * every device reports the events in the "key" parameter of the ep_poll_callback()
1034 if (key && !((unsigned long) key & epi->event.events)) ep_poll_callback()
1038 * If we are transferring events to userspace, we can hold no locks ep_poll_callback()
1040 * semantics). All the events that happen during that period of time are ep_poll_callback()
1288 if (epi->event.events & EPOLLWAKEUP) { ep_insert()
1338 if ((revents & event->events) && !ep_is_linked(&epi->rdllink)) { ep_insert()
1342 /* Notify waiting tasks that events are available */ ep_insert()
1405 epi->event.events = event->events; /* need barrier below */ ep_modify()
1407 if (epi->event.events & EPOLLWAKEUP) { ep_modify()
1418 * we do not miss events from ep_poll_callback if an ep_modify()
1424 * 2) We also need to ensure we do not miss _past_ events ep_modify()
1444 if (revents & event->events) { ep_modify()
1450 /* Notify waiting tasks that events are available */ ep_modify()
1484 for (eventcnt = 0, uevent = esed->events; ep_send_events_proc()
1515 if (__put_user(revents, &uevent->events) || ep_send_events_proc()
1523 if (epi->event.events & EPOLLONESHOT) ep_send_events_proc()
1524 epi->event.events &= EP_PRIVATE_BITS; ep_send_events_proc()
1525 else if (!(epi->event.events & EPOLLET)) { ep_send_events_proc()
1530 * epoll_wait() will check again the events ep_send_events_proc()
1547 struct epoll_event __user *events, int maxevents) ep_send_events()
1552 esed.events = events; ep_send_events()
1569 * ep_poll - Retrieves ready events, and delivers them to the caller supplied
1573 * @events: Pointer to the userspace buffer where the ready events should be
1575 * @maxevents: Size (in terms of number of events) of the caller event buffer.
1576 * @timeout: Maximum timeout for the ready events fetch operation, in
1582 * Returns: Returns the number of ready events which have been fetched, or an
1585 static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, ep_poll() argument
1617 * ep_poll_callback() when events will become available. ep_poll()
1647 /* Is it worth to try to dig for events ? */ ep_poll()
1653 * Try to transfer events to user space. In case we get 0 events and ep_poll()
1658 !(res = ep_send_events(ep, events, maxevents)) && !timed_out) ep_poll()
1920 epds.events |= POLLERR | POLLHUP; SYSCALL_DEFINE4()
1935 epds.events |= POLLERR | POLLHUP; SYSCALL_DEFINE4()
1961 SYSCALL_DEFINE4(epoll_wait, int, epfd, struct epoll_event __user *, events, SYSCALL_DEFINE4()
1973 if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event))) SYSCALL_DEFINE4()
1995 /* Time to fish for events ... */ SYSCALL_DEFINE4()
1996 error = ep_poll(ep, events, maxevents, timeout); SYSCALL_DEFINE4()
2007 SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events, SYSCALL_DEFINE6()
2027 error = sys_epoll_wait(epfd, events, maxevents, timeout); SYSCALL_DEFINE6()
2049 struct epoll_event __user *, events, COMPAT_SYSCALL_DEFINE6()
2072 err = sys_epoll_wait(epfd, events, maxevents, timeout); COMPAT_SYSCALL_DEFINE6()
472 ep_wake_up_nested(wait_queue_head_t *wqueue, unsigned long events, int subclass) ep_wake_up_nested() argument
482 ep_wake_up_nested(wait_queue_head_t *wqueue, unsigned long events, int subclass) ep_wake_up_nested() argument
1546 ep_send_events(struct eventpoll *ep, struct epoll_event __user *events, int maxevents) ep_send_events() argument
/linux-4.1.27/drivers/iio/adc/
H A DMakefile38 xilinx-xadc-y := xilinx-xadc-core.o xilinx-xadc-events.o
H A Dxilinx-xadc-events.c10 #include <linux/iio/events.h>
44 * events. xadc_handle_event()
63 void xadc_handle_events(struct iio_dev *indio_dev, unsigned long events) xadc_handle_events() argument
67 for_each_set_bit(i, &events, 8) xadc_handle_events()
/linux-4.1.27/drivers/net/ethernet/freescale/fs_enet/
H A Dfec.h9 /* Interrupt events/masks.
/linux-4.1.27/drivers/net/wan/lmc/
H A Dlmc_debug.h18 #define LMC_EVENTLOGSIZE 1024 /* number of events in eventlog */
/linux-4.1.27/drivers/devfreq/
H A Dgovernor.h21 /* Devfreq events */
/linux-4.1.27/arch/mips/include/asm/ip32/
H A Dip32_ints.h16 * interrupting events. Order is fairly irrelevant to handling
/linux-4.1.27/arch/sh/boards/mach-dreamcast/
H A Dirq.c22 * hardware events from system peripherals and triggering an SH7750 IRQ.
23 * Hardware events can trigger IRQs 13, 11, or 9 depending on which bits are
36 * In the kernel, these events are mapped to virtual IRQs so that drivers can
38 * mapping simple, the events are mapped as:
/linux-4.1.27/arch/blackfin/include/mach-common/
H A Dirq.h13 * Core events interrupt source definitions
/linux-4.1.27/drivers/xen/
H A DMakefile6 obj-y += events/
/linux-4.1.27/drivers/staging/unisys/common-spar/include/channels/
H A Ddiagchannel.h82 #define SUBSYSTEM_DEBUG 0 /* Standard subsystem for debug events */
88 * events */
194 /* Levels of severity for diagnostic events, in order from lowest severity to
196 * but info events rarely need to be logged except during debugging). The values
201 * DIAG_SEVERITY_SHUTOFF are not valid severity values for logging events but
221 * Levels of cause for diagnostic events, in order from least to greatest cause
241 * DiagSwitch to segregate events into block types. The files are transferred in
248 /* The Diag DiagWriter appends event blocks to events.raw as today, and for data
362 * whether events are logged. Any event's severity for a
415 * Events: Area where diagnostic events (up to MAX_EVENTS) are written.
423 struct diag_channel_event events[(DIAG_CH_SIZE - DIAG_CH_EVENT_OFFSET) / member in struct:spar_diag_channel_protocol
/linux-4.1.27/drivers/iio/
H A Dindustrialio-event.c26 #include <linux/iio/events.h>
30 * @wait: wait queue to allow blocking reads of events
31 * @det_events: list of detected events
84 unsigned int events = 0; iio_event_poll() local
87 return events; iio_event_poll()
92 events = POLLIN | POLLRDNORM; iio_event_poll()
94 return events; iio_event_poll()
443 static const char *iio_event_group_name = "events"; iio_device_register_eventset()
/linux-4.1.27/fs/notify/dnotify/
H A Ddnotify.c46 * When a process starts or stops watching an inode the set of events which
48 * list of everything receiving dnotify events about this directory and calculates
49 * the set of all those events. After it updates what dnotify is interested in
50 * it calls the fsnotify function so it can update the set of all events relevant
77 * Mains fsnotify call where events are delivered to dnotify.
80 * events of this type. When found send the correct process and signal and
82 * events.
229 /* adding more events to existing dnofiy_struct? */ attach_dn()
/linux-4.1.27/samples/bpf/
H A Dtracex3_user.c134 printf(" - many events with this latency\n"); main()
140 printf(" - few events\n"); main()
/linux-4.1.27/arch/sh/kernel/
H A Dperf_event.c32 struct perf_event *events[MAX_HWEVENTS]; member in struct:cpu_hw_events
41 /* Number of perf_events counting hardware events */
134 * If no events are currently in use, then we have to take a __hw_perf_event_init()
195 * As there is no interrupt associated with the overflow events, sh_perf_event_update()
228 cpuc->events[idx] = NULL; sh_pmu_stop()
250 cpuc->events[idx] = event; sh_pmu_start()
/linux-4.1.27/drivers/uwb/
H A Dneh.c24 * card delivers a stream of notifications and events to the
27 * notifications and events and then deliver those.
54 * it up in a discrete series of events, look up who is listening for
62 * - Most notifications/events are small (less thank .5k), copying
65 * - Notifications/events are ALWAYS smaller than PAGE_SIZE
67 * - Notifications/events always come in a single piece (ie: a buffer
68 * will always contain entire notifications/events).
77 * - Most notifications/events are fixed size; only a few are variable
80 * - Listeners of events expect them, so they usually provide a
158 * avoid surprises with late events that timed out long time ago). So
441 * Given a buffer with one or more UWB RC events/notifications, break
445 * @buf: Buffer with the stream of notifications/events
451 * The device may pass us events formatted differently than expected.
470 * incoming event. this buffer may contain events that are not
493 "process incoming events (%zu left, minimum is " uwb_rc_neh_grok()
/linux-4.1.27/drivers/rtc/
H A Drtc-bfin.c17 * Since all events are maintained in the same interrupt mask register, if
114 * turning on interrupt events. Consider this:
172 * Since we handle all RTC events here, we have to make sure the requested
176 * and say that other events have happened as well (e.g. second). We do not
184 unsigned long events = 0; bfin_rtc_interrupt() local
205 events |= RTC_AF | RTC_IRQF; bfin_rtc_interrupt()
213 events |= RTC_UF | RTC_IRQF; bfin_rtc_interrupt()
217 if (events) bfin_rtc_interrupt()
218 rtc_update_irq(rtc->rtc_dev, 1, events); bfin_rtc_interrupt()
220 if (write_complete || events) { bfin_rtc_interrupt()
H A Drtc-at91sam9.c82 unsigned long events; member in struct:sam9_rtc
292 rtc->events |= (RTC_AF | RTC_IRQF); at91_rtc_cache_events()
296 rtc->events |= (RTC_UF | RTC_IRQF); at91_rtc_cache_events()
303 if (!rtc->events) at91_rtc_flush_events()
306 rtc_update_irq(rtc->rtcdev, 1, rtc->events); at91_rtc_flush_events()
307 rtc->events = 0; at91_rtc_flush_events()
309 pr_debug("%s: num=%ld, events=0x%02lx\n", __func__, at91_rtc_flush_events()
310 rtc->events >> 8, rtc->events & 0x000000FF); at91_rtc_flush_events()
/linux-4.1.27/drivers/dma-buf/
H A Ddma-buf.c139 unsigned long events; dma_buf_poll() local
150 events = poll_requested_events(poll) & (POLLIN | POLLOUT); dma_buf_poll()
151 if (!events) dma_buf_poll()
169 if (fence_excl && (!(events & POLLOUT) || shared_count == 0)) { dma_buf_poll()
179 events &= ~pevents; dma_buf_poll()
184 if (events & pevents) { dma_buf_poll()
187 events &= ~pevents; dma_buf_poll()
191 events &= ~pevents; dma_buf_poll()
204 if ((events & POLLOUT) && shared_count > 0) { dma_buf_poll()
211 events &= ~POLLOUT; dma_buf_poll()
216 if (!(events & POLLOUT)) dma_buf_poll()
229 events &= ~POLLOUT; dma_buf_poll()
236 events &= ~POLLOUT; dma_buf_poll()
249 return events; dma_buf_poll()
/linux-4.1.27/arch/x86/platform/olpc/
H A Dolpc-xo1-sci.c170 * If propagate_events is false, the queue is drained without events being
303 /* Enable all EC events */ xo1_sci_resume()
342 /* Enable interesting SCI events, and clear pending interrupts */ setup_sci_interrupt()
364 /* Clear pending EC SCI events */ setup_ec_sci()
369 * Enable EC SCI events, and map them to both a PME and the SCI setup_ec_sci()
374 * Management Events (PMEs) - events that bring the system out of setup_ec_sci()
579 /* Enable PME generation for EC-generated events */ xo1_sci_probe()
583 /* Clear pending events */ xo1_sci_probe()
596 /* Enable all EC events */ xo1_sci_probe()
/linux-4.1.27/drivers/usb/host/
H A Dehci-timer.c44 * Lots of different events are triggered from ehci->hrtimer. Whenever
45 * the timer routine runs, it checks each possible event; events that are
47 * The set of enabled events is stored as a collection of bitflags in
51 * Rather than implementing a sorted list or tree of all pending events,
56 * As a result, events might not get handled right away; the actual delay
58 * matter, because none of the events are especially time-critical. The
409 unsigned long events; ehci_hrtimer_func() local
415 events = ehci->enabled_hrtimer_events; ehci_hrtimer_func()
424 for_each_set_bit(e, &events, EHCI_HRTIMER_NUM_EVENTS) { ehci_hrtimer_func()

Completed in 5176 milliseconds

1234567891011>>