event 41 arch/alpha/kernel/perf_event.c struct perf_event *event[MAX_HWEVENTS]; event 152 arch/alpha/kernel/perf_event.c static int ev67_check_constraints(struct perf_event **event, event 199 arch/alpha/kernel/perf_event.c event[0]->hw.idx = idx0; event 200 arch/alpha/kernel/perf_event.c event[0]->hw.config_base = config; event 202 arch/alpha/kernel/perf_event.c event[1]->hw.idx = idx0 ^ 1; event 203 arch/alpha/kernel/perf_event.c event[1]->hw.config_base = config; event 252 arch/alpha/kernel/perf_event.c static int alpha_perf_event_set_period(struct perf_event *event, event 287 arch/alpha/kernel/perf_event.c perf_event_update_userpage(event); event 307 arch/alpha/kernel/perf_event.c static unsigned long alpha_perf_event_update(struct perf_event *event, event 330 arch/alpha/kernel/perf_event.c local64_add(delta, &event->count); event 341 arch/alpha/kernel/perf_event.c struct perf_event *event[], unsigned long *evtype, event 350 arch/alpha/kernel/perf_event.c event[n] = group; event 358 arch/alpha/kernel/perf_event.c event[n] = pe; event 400 arch/alpha/kernel/perf_event.c struct perf_event *pe = cpuc->event[j]; event 412 arch/alpha/kernel/perf_event.c struct perf_event *pe = cpuc->event[j]; event 424 arch/alpha/kernel/perf_event.c cpuc->config = cpuc->event[0]->hw.config_base; event 433 arch/alpha/kernel/perf_event.c static int alpha_pmu_add(struct perf_event *event, int flags) event 436 arch/alpha/kernel/perf_event.c struct hw_perf_event *hwc = &event->hw; event 449 arch/alpha/kernel/perf_event.c perf_pmu_disable(event->pmu); event 458 arch/alpha/kernel/perf_event.c cpuc->event[n0] = event; event 459 arch/alpha/kernel/perf_event.c cpuc->evtype[n0] = event->hw.event_base; event 462 arch/alpha/kernel/perf_event.c if (!alpha_check_constraints(cpuc->event, cpuc->evtype, n0+1)) { event 474 arch/alpha/kernel/perf_event.c perf_pmu_enable(event->pmu); event 485 arch/alpha/kernel/perf_event.c static void alpha_pmu_del(struct perf_event *event, int flags) event 488 arch/alpha/kernel/perf_event.c struct hw_perf_event *hwc = &event->hw; event 492 arch/alpha/kernel/perf_event.c perf_pmu_disable(event->pmu); event 496 arch/alpha/kernel/perf_event.c if (event == cpuc->event[j]) { event 503 arch/alpha/kernel/perf_event.c cpuc->event[j - 1] = cpuc->event[j]; event 510 arch/alpha/kernel/perf_event.c alpha_perf_event_update(event, hwc, idx, 0); event 511 arch/alpha/kernel/perf_event.c perf_event_update_userpage(event); event 520 arch/alpha/kernel/perf_event.c perf_pmu_enable(event->pmu); event 524 arch/alpha/kernel/perf_event.c static void alpha_pmu_read(struct perf_event *event) event 526 arch/alpha/kernel/perf_event.c struct hw_perf_event *hwc = &event->hw; event 528 arch/alpha/kernel/perf_event.c alpha_perf_event_update(event, hwc, hwc->idx, 0); event 532 arch/alpha/kernel/perf_event.c static void alpha_pmu_stop(struct perf_event *event, int flags) event 534 arch/alpha/kernel/perf_event.c struct hw_perf_event *hwc = &event->hw; event 543 arch/alpha/kernel/perf_event.c alpha_perf_event_update(event, hwc, hwc->idx, 0); event 552 arch/alpha/kernel/perf_event.c static void alpha_pmu_start(struct perf_event *event, int flags) event 554 arch/alpha/kernel/perf_event.c struct hw_perf_event *hwc = &event->hw; event 562 arch/alpha/kernel/perf_event.c alpha_perf_event_set_period(event, hwc, hwc->idx); event 594 arch/alpha/kernel/perf_event.c static void hw_perf_event_destroy(struct perf_event *event) event 602 arch/alpha/kernel/perf_event.c static int __hw_perf_event_init(struct perf_event *event) event 604 arch/alpha/kernel/perf_event.c struct perf_event_attr *attr = &event->attr; event 605 arch/alpha/kernel/perf_event.c struct hw_perf_event *hwc = &event->hw; event 649 arch/alpha/kernel/perf_event.c if (event->group_leader != event) { event 650 arch/alpha/kernel/perf_event.c n = collect_events(event->group_leader, event 657 arch/alpha/kernel/perf_event.c evts[n] = event; event 666 arch/alpha/kernel/perf_event.c event->destroy = hw_perf_event_destroy; event 690 arch/alpha/kernel/perf_event.c static int alpha_pmu_event_init(struct perf_event *event) event 695 arch/alpha/kernel/perf_event.c if (has_branch_stack(event)) event 698 arch/alpha/kernel/perf_event.c switch (event->attr.type) { event 712 arch/alpha/kernel/perf_event.c err = __hw_perf_event_init(event); event 809 arch/alpha/kernel/perf_event.c struct perf_event *event; event 845 arch/alpha/kernel/perf_event.c event = cpuc->event[j]; event 847 arch/alpha/kernel/perf_event.c if (unlikely(!event)) { event 855 arch/alpha/kernel/perf_event.c hwc = &event->hw; event 856 arch/alpha/kernel/perf_event.c alpha_perf_event_update(event, hwc, idx, alpha_pmu->pmc_max_period[idx]+1); event 859 arch/alpha/kernel/perf_event.c if (alpha_perf_event_set_period(event, hwc, idx)) { event 860 arch/alpha/kernel/perf_event.c if (perf_event_overflow(event, &data, regs)) { event 864 arch/alpha/kernel/perf_event.c alpha_pmu_stop(event, 0); event 1434 arch/alpha/kernel/setup.c alpha_panic_event(struct notifier_block *this, unsigned long event, void *ptr) event 121 arch/alpha/oprofile/common.c oprofilefs_create_ulong(dir, "event", &ctr[i].event); event 16 arch/alpha/oprofile/op_impl.h unsigned long event; event 39 arch/alpha/oprofile/op_model_ev4.c ctl |= (ctr[0].enabled ? ctr[0].event << 8 : 14 << 8); event 40 arch/alpha/oprofile/op_model_ev4.c ctl |= (ctr[1].enabled ? (ctr[1].event - 16) << 32 : 7ul << 32); event 47 arch/alpha/oprofile/op_model_ev5.c unsigned long event = ctr[i].event; event 53 arch/alpha/oprofile/op_model_ev5.c if (event == 0) event 54 arch/alpha/oprofile/op_model_ev5.c event = 12+48; event 55 arch/alpha/oprofile/op_model_ev5.c else if (event == 2+41) event 56 arch/alpha/oprofile/op_model_ev5.c event = 4+65; event 60 arch/alpha/oprofile/op_model_ev5.c if (event < 2) event 61 arch/alpha/oprofile/op_model_ev5.c ctl |= event << 31; event 62 arch/alpha/oprofile/op_model_ev5.c else if (event < 24) event 64 arch/alpha/oprofile/op_model_ev5.c else if (event < 40) event 65 arch/alpha/oprofile/op_model_ev5.c ctl |= (event - 24) << 4; event 66 arch/alpha/oprofile/op_model_ev5.c else if (event < 48) event 67 arch/alpha/oprofile/op_model_ev5.c ctl |= (event - 40) << cbox1_ofs | 15 << 4; event 68 arch/alpha/oprofile/op_model_ev5.c else if (event < 64) event 69 arch/alpha/oprofile/op_model_ev5.c ctl |= event - 48; event 70 arch/alpha/oprofile/op_model_ev5.c else if (event < 72) event 71 arch/alpha/oprofile/op_model_ev5.c ctl |= (event - 64) << cbox2_ofs | 15; event 29 arch/alpha/oprofile/op_model_ev6.c if (ctr[0].enabled && ctr[0].event) event 30 arch/alpha/oprofile/op_model_ev6.c ctl |= (ctr[0].event & 1) << 4; event 32 arch/alpha/oprofile/op_model_ev6.c ctl |= (ctr[1].event - 2) & 15; event 33 arch/alpha/oprofile/op_model_ev67.c ctl |= (ctr[1].event & 3) << 2; event 35 arch/alpha/oprofile/op_model_ev67.c if (ctr[0].event == 0) /* cycles */ event 133 arch/alpha/oprofile/op_model_ev67.c struct op_counter_config *ctr, unsigned long event) event 135 arch/alpha/oprofile/op_model_ev67.c unsigned long fake_counter = 2 + event; event 57 arch/arc/include/asm/ptrace.h unsigned long event; event 78 arch/arc/include/asm/ptrace.h unsigned long event; event 48 arch/arc/kernel/asm-offsets.c DEFINE(PT_event, offsetof(struct pt_regs, event)); event 118 arch/arc/kernel/perf_event.c static void arc_perf_event_update(struct perf_event *event, event 130 arch/arc/kernel/perf_event.c local64_add(delta, &event->count); event 134 arch/arc/kernel/perf_event.c static void arc_pmu_read(struct perf_event *event) event 136 arch/arc/kernel/perf_event.c arc_perf_event_update(event, &event->hw, event->hw.idx); event 167 arch/arc/kernel/perf_event.c static int arc_pmu_event_init(struct perf_event *event) event 169 arch/arc/kernel/perf_event.c struct hw_perf_event *hwc = &event->hw; event 172 arch/arc/kernel/perf_event.c if (!is_sampling_event(event)) { event 182 arch/arc/kernel/perf_event.c if (event->attr.exclude_user) event 186 arch/arc/kernel/perf_event.c if (event->attr.exclude_kernel) event 190 arch/arc/kernel/perf_event.c switch (event->attr.type) { event 192 arch/arc/kernel/perf_event.c if (event->attr.config >= PERF_COUNT_HW_MAX) event 194 arch/arc/kernel/perf_event.c if (arc_pmu->ev_hw_idx[event->attr.config] < 0) event 196 arch/arc/kernel/perf_event.c hwc->config |= arc_pmu->ev_hw_idx[event->attr.config]; event 198 arch/arc/kernel/perf_event.c (int)event->attr.config, (int)hwc->config, event 199 arch/arc/kernel/perf_event.c arc_pmu_ev_hw_map[event->attr.config]); event 203 arch/arc/kernel/perf_event.c ret = arc_pmu_cache_event(event->attr.config); event 212 arch/arc/kernel/perf_event.c if (event->attr.config >= arc_pmu->n_events) event 215 arch/arc/kernel/perf_event.c hwc->config |= event->attr.config; event 217 arch/arc/kernel/perf_event.c event->attr.config, event 218 arch/arc/kernel/perf_event.c arc_pmu->raw_entry[event->attr.config].name); event 243 arch/arc/kernel/perf_event.c static int arc_pmu_event_set_period(struct perf_event *event) event 245 arch/arc/kernel/perf_event.c struct hw_perf_event *hwc = &event->hw; event 279 arch/arc/kernel/perf_event.c perf_event_update_userpage(event); event 289 arch/arc/kernel/perf_event.c static void arc_pmu_start(struct perf_event *event, int flags) event 291 arch/arc/kernel/perf_event.c struct hw_perf_event *hwc = &event->hw; event 302 arch/arc/kernel/perf_event.c arc_pmu_event_set_period(event); event 305 arch/arc/kernel/perf_event.c if (is_sampling_event(event)) event 314 arch/arc/kernel/perf_event.c static void arc_pmu_stop(struct perf_event *event, int flags) event 316 arch/arc/kernel/perf_event.c struct hw_perf_event *hwc = &event->hw; event 320 arch/arc/kernel/perf_event.c if (is_sampling_event(event)) { event 330 arch/arc/kernel/perf_event.c if (!(event->hw.state & PERF_HES_STOPPED)) { event 337 arch/arc/kernel/perf_event.c event->hw.state |= PERF_HES_STOPPED; event 341 arch/arc/kernel/perf_event.c !(event->hw.state & PERF_HES_UPTODATE)) { event 342 arch/arc/kernel/perf_event.c arc_perf_event_update(event, &event->hw, idx); event 343 arch/arc/kernel/perf_event.c event->hw.state |= PERF_HES_UPTODATE; event 347 arch/arc/kernel/perf_event.c static void arc_pmu_del(struct perf_event *event, int flags) event 351 arch/arc/kernel/perf_event.c arc_pmu_stop(event, PERF_EF_UPDATE); event 352 arch/arc/kernel/perf_event.c __clear_bit(event->hw.idx, pmu_cpu->used_mask); event 354 arch/arc/kernel/perf_event.c pmu_cpu->act_counter[event->hw.idx] = 0; event 356 arch/arc/kernel/perf_event.c perf_event_update_userpage(event); event 360 arch/arc/kernel/perf_event.c static int arc_pmu_add(struct perf_event *event, int flags) event 363 arch/arc/kernel/perf_event.c struct hw_perf_event *hwc = &event->hw; event 375 arch/arc/kernel/perf_event.c pmu_cpu->act_counter[idx] = event; event 377 arch/arc/kernel/perf_event.c if (is_sampling_event(event)) { event 392 arch/arc/kernel/perf_event.c arc_pmu_start(event, PERF_EF_RELOAD); event 394 arch/arc/kernel/perf_event.c perf_event_update_userpage(event); event 417 arch/arc/kernel/perf_event.c struct perf_event *event; event 433 arch/arc/kernel/perf_event.c event = pmu_cpu->act_counter[idx]; event 434 arch/arc/kernel/perf_event.c hwc = &event->hw; event 438 arch/arc/kernel/perf_event.c arc_perf_event_update(event, &event->hw, event->hw.idx); event 440 arch/arc/kernel/perf_event.c if (arc_pmu_event_set_period(event)) { event 441 arch/arc/kernel/perf_event.c if (perf_event_overflow(event, &data, regs)) event 442 arch/arc/kernel/perf_event.c arc_pmu_stop(event, 0); event 473 arch/arc/kernel/perf_event.c PMU_FORMAT_ATTR(event, "config:0-14"); event 123 arch/arc/kernel/troubleshoot.c pr_info("\n[ECR ]: 0x%08lx => ", regs->event); event 236 arch/arm/kernel/perf_event_v6.c static inline u64 armv6pmu_read_counter(struct perf_event *event) event 238 arch/arm/kernel/perf_event_v6.c struct hw_perf_event *hwc = &event->hw; event 254 arch/arm/kernel/perf_event_v6.c static inline void armv6pmu_write_counter(struct perf_event *event, u64 value) event 256 arch/arm/kernel/perf_event_v6.c struct hw_perf_event *hwc = &event->hw; event 269 arch/arm/kernel/perf_event_v6.c static void armv6pmu_enable_event(struct perf_event *event) event 272 arch/arm/kernel/perf_event_v6.c struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); event 273 arch/arm/kernel/perf_event_v6.c struct hw_perf_event *hwc = &event->hw; event 327 arch/arm/kernel/perf_event_v6.c struct perf_event *event = cpuc->events[idx]; event 331 arch/arm/kernel/perf_event_v6.c if (!event) event 341 arch/arm/kernel/perf_event_v6.c hwc = &event->hw; event 342 arch/arm/kernel/perf_event_v6.c armpmu_event_update(event); event 344 arch/arm/kernel/perf_event_v6.c if (!armpmu_event_set_period(event)) event 347 arch/arm/kernel/perf_event_v6.c if (perf_event_overflow(event, &data, regs)) event 348 arch/arm/kernel/perf_event_v6.c cpu_pmu->disable(event); event 389 arch/arm/kernel/perf_event_v6.c struct perf_event *event) event 391 arch/arm/kernel/perf_event_v6.c struct hw_perf_event *hwc = &event->hw; event 415 arch/arm/kernel/perf_event_v6.c struct perf_event *event) event 417 arch/arm/kernel/perf_event_v6.c clear_bit(event->hw.idx, cpuc->used_mask); event 420 arch/arm/kernel/perf_event_v6.c static void armv6pmu_disable_event(struct perf_event *event) event 423 arch/arm/kernel/perf_event_v6.c struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); event 424 arch/arm/kernel/perf_event_v6.c struct hw_perf_event *hwc = &event->hw; event 455 arch/arm/kernel/perf_event_v6.c static void armv6mpcore_pmu_disable_event(struct perf_event *event) event 458 arch/arm/kernel/perf_event_v6.c struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); event 459 arch/arm/kernel/perf_event_v6.c struct hw_perf_event *hwc = &event->hw; event 486 arch/arm/kernel/perf_event_v6.c static int armv6_map_event(struct perf_event *event) event 488 arch/arm/kernel/perf_event_v6.c return armpmu_map_event(event, &armv6_perf_map, event 536 arch/arm/kernel/perf_event_v6.c static int armv6mpcore_map_event(struct perf_event *event) event 538 arch/arm/kernel/perf_event_v6.c return armpmu_map_event(event, &armv6mpcore_perf_map, event 535 arch/arm/kernel/perf_event_v7.c PMU_FORMAT_ATTR(event, "config:0-7"); event 746 arch/arm/kernel/perf_event_v7.c static inline u64 armv7pmu_read_counter(struct perf_event *event) event 748 arch/arm/kernel/perf_event_v7.c struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); event 749 arch/arm/kernel/perf_event_v7.c struct hw_perf_event *hwc = &event->hw; event 766 arch/arm/kernel/perf_event_v7.c static inline void armv7pmu_write_counter(struct perf_event *event, u64 value) event 768 arch/arm/kernel/perf_event_v7.c struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); event 769 arch/arm/kernel/perf_event_v7.c struct hw_perf_event *hwc = &event->hw; event 871 arch/arm/kernel/perf_event_v7.c static void armv7pmu_enable_event(struct perf_event *event) event 874 arch/arm/kernel/perf_event_v7.c struct hw_perf_event *hwc = &event->hw; event 875 arch/arm/kernel/perf_event_v7.c struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); event 917 arch/arm/kernel/perf_event_v7.c static void armv7pmu_disable_event(struct perf_event *event) event 920 arch/arm/kernel/perf_event_v7.c struct hw_perf_event *hwc = &event->hw; event 921 arch/arm/kernel/perf_event_v7.c struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); event 974 arch/arm/kernel/perf_event_v7.c struct perf_event *event = cpuc->events[idx]; event 978 arch/arm/kernel/perf_event_v7.c if (!event) event 988 arch/arm/kernel/perf_event_v7.c hwc = &event->hw; event 989 arch/arm/kernel/perf_event_v7.c armpmu_event_update(event); event 991 arch/arm/kernel/perf_event_v7.c if (!armpmu_event_set_period(event)) event 994 arch/arm/kernel/perf_event_v7.c if (perf_event_overflow(event, &data, regs)) event 995 arch/arm/kernel/perf_event_v7.c cpu_pmu->disable(event); event 1033 arch/arm/kernel/perf_event_v7.c struct perf_event *event) event 1036 arch/arm/kernel/perf_event_v7.c struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); event 1037 arch/arm/kernel/perf_event_v7.c struct hw_perf_event *hwc = &event->hw; event 1062 arch/arm/kernel/perf_event_v7.c struct perf_event *event) event 1064 arch/arm/kernel/perf_event_v7.c clear_bit(event->hw.idx, cpuc->used_mask); event 1070 arch/arm/kernel/perf_event_v7.c static int armv7pmu_set_event_filter(struct hw_perf_event *event, event 1088 arch/arm/kernel/perf_event_v7.c event->config_base = config_base; event 1114 arch/arm/kernel/perf_event_v7.c static int armv7_a8_map_event(struct perf_event *event) event 1116 arch/arm/kernel/perf_event_v7.c return armpmu_map_event(event, &armv7_a8_perf_map, event 1120 arch/arm/kernel/perf_event_v7.c static int armv7_a9_map_event(struct perf_event *event) event 1122 arch/arm/kernel/perf_event_v7.c return armpmu_map_event(event, &armv7_a9_perf_map, event 1126 arch/arm/kernel/perf_event_v7.c static int armv7_a5_map_event(struct perf_event *event) event 1128 arch/arm/kernel/perf_event_v7.c return armpmu_map_event(event, &armv7_a5_perf_map, event 1132 arch/arm/kernel/perf_event_v7.c static int armv7_a15_map_event(struct perf_event *event) event 1134 arch/arm/kernel/perf_event_v7.c return armpmu_map_event(event, &armv7_a15_perf_map, event 1138 arch/arm/kernel/perf_event_v7.c static int armv7_a7_map_event(struct perf_event *event) event 1140 arch/arm/kernel/perf_event_v7.c return armpmu_map_event(event, &armv7_a7_perf_map, event 1144 arch/arm/kernel/perf_event_v7.c static int armv7_a12_map_event(struct perf_event *event) event 1146 arch/arm/kernel/perf_event_v7.c return armpmu_map_event(event, &armv7_a12_perf_map, event 1150 arch/arm/kernel/perf_event_v7.c static int krait_map_event(struct perf_event *event) event 1152 arch/arm/kernel/perf_event_v7.c return armpmu_map_event(event, &krait_perf_map, event 1156 arch/arm/kernel/perf_event_v7.c static int krait_map_event_no_branch(struct perf_event *event) event 1158 arch/arm/kernel/perf_event_v7.c return armpmu_map_event(event, &krait_perf_map_no_branch, event 1162 arch/arm/kernel/perf_event_v7.c static int scorpion_map_event(struct perf_event *event) event 1164 arch/arm/kernel/perf_event_v7.c return armpmu_map_event(event, &scorpion_perf_map, event 1324 arch/arm/kernel/perf_event_v7.c #define EVENT_REGION(event) (((event) >> 12) & 0xf) /* R */ event 1325 arch/arm/kernel/perf_event_v7.c #define EVENT_GROUP(event) ((event) & 0xf) /* G */ event 1326 arch/arm/kernel/perf_event_v7.c #define EVENT_CODE(event) (((event) >> 4) & 0xff) /* CC */ event 1327 arch/arm/kernel/perf_event_v7.c #define EVENT_VENUM(event) (!!(event & VENUM_EVENT)) /* N=2 */ event 1328 arch/arm/kernel/perf_event_v7.c #define EVENT_CPU(event) (!!(event & KRAIT_EVENT)) /* N=1 */ event 1493 arch/arm/kernel/perf_event_v7.c static void krait_pmu_disable_event(struct perf_event *event) event 1496 arch/arm/kernel/perf_event_v7.c struct hw_perf_event *hwc = &event->hw; event 1498 arch/arm/kernel/perf_event_v7.c struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); event 1519 arch/arm/kernel/perf_event_v7.c static void krait_pmu_enable_event(struct perf_event *event) event 1522 arch/arm/kernel/perf_event_v7.c struct hw_perf_event *hwc = &event->hw; event 1524 arch/arm/kernel/perf_event_v7.c struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); event 1580 arch/arm/kernel/perf_event_v7.c static int krait_event_to_bit(struct perf_event *event, unsigned int region, event 1584 arch/arm/kernel/perf_event_v7.c struct hw_perf_event *hwc = &event->hw; event 1585 arch/arm/kernel/perf_event_v7.c struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); event 1607 arch/arm/kernel/perf_event_v7.c struct perf_event *event) event 1611 arch/arm/kernel/perf_event_v7.c struct hw_perf_event *hwc = &event->hw; event 1625 arch/arm/kernel/perf_event_v7.c bit = krait_event_to_bit(event, region, group); event 1630 arch/arm/kernel/perf_event_v7.c idx = armv7pmu_get_event_idx(cpuc, event); event 1638 arch/arm/kernel/perf_event_v7.c struct perf_event *event) event 1641 arch/arm/kernel/perf_event_v7.c struct hw_perf_event *hwc = &event->hw; event 1647 arch/arm/kernel/perf_event_v7.c armv7pmu_clear_event_idx(cpuc, event); event 1649 arch/arm/kernel/perf_event_v7.c bit = krait_event_to_bit(event, region, group); event 1826 arch/arm/kernel/perf_event_v7.c static void scorpion_pmu_disable_event(struct perf_event *event) event 1829 arch/arm/kernel/perf_event_v7.c struct hw_perf_event *hwc = &event->hw; event 1831 arch/arm/kernel/perf_event_v7.c struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); event 1852 arch/arm/kernel/perf_event_v7.c static void scorpion_pmu_enable_event(struct perf_event *event) event 1855 arch/arm/kernel/perf_event_v7.c struct hw_perf_event *hwc = &event->hw; event 1857 arch/arm/kernel/perf_event_v7.c struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); event 1913 arch/arm/kernel/perf_event_v7.c static int scorpion_event_to_bit(struct perf_event *event, unsigned int region, event 1917 arch/arm/kernel/perf_event_v7.c struct hw_perf_event *hwc = &event->hw; event 1918 arch/arm/kernel/perf_event_v7.c struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); event 1940 arch/arm/kernel/perf_event_v7.c struct perf_event *event) event 1944 arch/arm/kernel/perf_event_v7.c struct hw_perf_event *hwc = &event->hw; event 1955 arch/arm/kernel/perf_event_v7.c bit = scorpion_event_to_bit(event, region, group); event 1960 arch/arm/kernel/perf_event_v7.c idx = armv7pmu_get_event_idx(cpuc, event); event 1968 arch/arm/kernel/perf_event_v7.c struct perf_event *event) event 1971 arch/arm/kernel/perf_event_v7.c struct hw_perf_event *hwc = &event->hw; event 1977 arch/arm/kernel/perf_event_v7.c armv7pmu_clear_event_idx(cpuc, event); event 1979 arch/arm/kernel/perf_event_v7.c bit = scorpion_event_to_bit(event, region, group); event 174 arch/arm/kernel/perf_event_xscale.c struct perf_event *event = cpuc->events[idx]; event 177 arch/arm/kernel/perf_event_xscale.c if (!event) event 183 arch/arm/kernel/perf_event_xscale.c hwc = &event->hw; event 184 arch/arm/kernel/perf_event_xscale.c armpmu_event_update(event); event 186 arch/arm/kernel/perf_event_xscale.c if (!armpmu_event_set_period(event)) event 189 arch/arm/kernel/perf_event_xscale.c if (perf_event_overflow(event, &data, regs)) event 190 arch/arm/kernel/perf_event_xscale.c cpu_pmu->disable(event); event 204 arch/arm/kernel/perf_event_xscale.c static void xscale1pmu_enable_event(struct perf_event *event) event 207 arch/arm/kernel/perf_event_xscale.c struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); event 208 arch/arm/kernel/perf_event_xscale.c struct hw_perf_event *hwc = &event->hw; event 240 arch/arm/kernel/perf_event_xscale.c static void xscale1pmu_disable_event(struct perf_event *event) event 243 arch/arm/kernel/perf_event_xscale.c struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); event 244 arch/arm/kernel/perf_event_xscale.c struct hw_perf_event *hwc = &event->hw; event 276 arch/arm/kernel/perf_event_xscale.c struct perf_event *event) event 278 arch/arm/kernel/perf_event_xscale.c struct hw_perf_event *hwc = &event->hw; event 296 arch/arm/kernel/perf_event_xscale.c struct perf_event *event) event 298 arch/arm/kernel/perf_event_xscale.c clear_bit(event->hw.idx, cpuc->used_mask); event 325 arch/arm/kernel/perf_event_xscale.c static inline u64 xscale1pmu_read_counter(struct perf_event *event) event 327 arch/arm/kernel/perf_event_xscale.c struct hw_perf_event *hwc = &event->hw; event 346 arch/arm/kernel/perf_event_xscale.c static inline void xscale1pmu_write_counter(struct perf_event *event, u64 val) event 348 arch/arm/kernel/perf_event_xscale.c struct hw_perf_event *hwc = &event->hw; event 364 arch/arm/kernel/perf_event_xscale.c static int xscale_map_event(struct perf_event *event) event 366 arch/arm/kernel/perf_event_xscale.c return armpmu_map_event(event, &xscale_perf_map, event 520 arch/arm/kernel/perf_event_xscale.c struct perf_event *event = cpuc->events[idx]; event 523 arch/arm/kernel/perf_event_xscale.c if (!event) event 529 arch/arm/kernel/perf_event_xscale.c hwc = &event->hw; event 530 arch/arm/kernel/perf_event_xscale.c armpmu_event_update(event); event 532 arch/arm/kernel/perf_event_xscale.c if (!armpmu_event_set_period(event)) event 535 arch/arm/kernel/perf_event_xscale.c if (perf_event_overflow(event, &data, regs)) event 536 arch/arm/kernel/perf_event_xscale.c cpu_pmu->disable(event); event 550 arch/arm/kernel/perf_event_xscale.c static void xscale2pmu_enable_event(struct perf_event *event) event 553 arch/arm/kernel/perf_event_xscale.c struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); event 554 arch/arm/kernel/perf_event_xscale.c struct hw_perf_event *hwc = &event->hw; event 596 arch/arm/kernel/perf_event_xscale.c static void xscale2pmu_disable_event(struct perf_event *event) event 599 arch/arm/kernel/perf_event_xscale.c struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); event 600 arch/arm/kernel/perf_event_xscale.c struct hw_perf_event *hwc = &event->hw; event 650 arch/arm/kernel/perf_event_xscale.c struct perf_event *event) event 652 arch/arm/kernel/perf_event_xscale.c int idx = xscale1pmu_get_event_idx(cpuc, event); event 688 arch/arm/kernel/perf_event_xscale.c static inline u64 xscale2pmu_read_counter(struct perf_event *event) event 690 arch/arm/kernel/perf_event_xscale.c struct hw_perf_event *hwc = &event->hw; event 715 arch/arm/kernel/perf_event_xscale.c static inline void xscale2pmu_write_counter(struct perf_event *event, u64 val) event 717 arch/arm/kernel/perf_event_xscale.c struct hw_perf_event *hwc = &event->hw; event 252 arch/arm/kernel/sys_oabi-compat.c struct oabi_epoll_event __user *event) event 261 arch/arm/kernel/sys_oabi-compat.c if (copy_from_user(&user, event, sizeof(user))) event 122 arch/arm/mach-davinci/board-mityomapl138.c unsigned long event, void *data) event 68 arch/arm/mach-highbank/highbank.c unsigned long event, void *__dev) event 75 arch/arm/mach-highbank/highbank.c if (event != BUS_NOTIFY_ADD_DEVICE) event 119 arch/arm/mach-highbank/highbank.c static int hb_keys_notifier(struct notifier_block *nb, unsigned long event, void *data) event 123 arch/arm/mach-highbank/highbank.c if (event != 0x1000) event 161 arch/arm/mach-imx/mmdc.c PMU_FORMAT_ATTR(event, "config:0-63"); event 232 arch/arm/mach-imx/mmdc.c static bool mmdc_pmu_group_event_is_valid(struct perf_event *event, event 236 arch/arm/mach-imx/mmdc.c int cfg = event->attr.config; event 238 arch/arm/mach-imx/mmdc.c if (is_software_event(event)) event 241 arch/arm/mach-imx/mmdc.c if (event->pmu != pmu) event 253 arch/arm/mach-imx/mmdc.c static bool mmdc_pmu_group_is_valid(struct perf_event *event) event 255 arch/arm/mach-imx/mmdc.c struct pmu *pmu = event->pmu; event 256 arch/arm/mach-imx/mmdc.c struct perf_event *leader = event->group_leader; event 262 arch/arm/mach-imx/mmdc.c if (event != leader) { event 263 arch/arm/mach-imx/mmdc.c if (!mmdc_pmu_group_event_is_valid(event, pmu, &counter_mask)) event 275 arch/arm/mach-imx/mmdc.c static int mmdc_pmu_event_init(struct perf_event *event) event 277 arch/arm/mach-imx/mmdc.c struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu); event 278 arch/arm/mach-imx/mmdc.c int cfg = event->attr.config; event 280 arch/arm/mach-imx/mmdc.c if (event->attr.type != event->pmu->type) event 283 arch/arm/mach-imx/mmdc.c if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) event 286 arch/arm/mach-imx/mmdc.c if (event->cpu < 0) { event 291 arch/arm/mach-imx/mmdc.c if (event->attr.sample_period) event 297 arch/arm/mach-imx/mmdc.c if (!mmdc_pmu_group_is_valid(event)) event 300 arch/arm/mach-imx/mmdc.c event->cpu = cpumask_first(&pmu_mmdc->cpu); event 304 arch/arm/mach-imx/mmdc.c static void mmdc_pmu_event_update(struct perf_event *event) event 306 arch/arm/mach-imx/mmdc.c struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu); event 307 arch/arm/mach-imx/mmdc.c struct hw_perf_event *hwc = &event->hw; event 313 arch/arm/mach-imx/mmdc.c event->attr.config); event 319 arch/arm/mach-imx/mmdc.c local64_add(delta, &event->count); event 322 arch/arm/mach-imx/mmdc.c static void mmdc_pmu_event_start(struct perf_event *event, int flags) event 324 arch/arm/mach-imx/mmdc.c struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu); event 325 arch/arm/mach-imx/mmdc.c struct hw_perf_event *hwc = &event->hw; event 346 arch/arm/mach-imx/mmdc.c val = event->attr.config1; event 358 arch/arm/mach-imx/mmdc.c static int mmdc_pmu_event_add(struct perf_event *event, int flags) event 360 arch/arm/mach-imx/mmdc.c struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu); event 361 arch/arm/mach-imx/mmdc.c struct hw_perf_event *hwc = &event->hw; event 363 arch/arm/mach-imx/mmdc.c int cfg = event->attr.config; event 366 arch/arm/mach-imx/mmdc.c mmdc_pmu_event_start(event, flags); event 371 arch/arm/mach-imx/mmdc.c pmu_mmdc->mmdc_events[cfg] = event; event 379 arch/arm/mach-imx/mmdc.c static void mmdc_pmu_event_stop(struct perf_event *event, int flags) event 381 arch/arm/mach-imx/mmdc.c struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu); event 392 arch/arm/mach-imx/mmdc.c mmdc_pmu_event_update(event); event 395 arch/arm/mach-imx/mmdc.c static void mmdc_pmu_event_del(struct perf_event *event, int flags) event 397 arch/arm/mach-imx/mmdc.c struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu); event 398 arch/arm/mach-imx/mmdc.c int cfg = event->attr.config; event 406 arch/arm/mach-imx/mmdc.c mmdc_pmu_event_stop(event, PERF_EF_UPDATE); event 414 arch/arm/mach-imx/mmdc.c struct perf_event *event = pmu_mmdc->mmdc_events[i]; event 416 arch/arm/mach-imx/mmdc.c if (event) event 417 arch/arm/mach-imx/mmdc.c mmdc_pmu_event_update(event); event 27 arch/arm/mach-ixp4xx/include/mach/ixp46x_ts.h u32 event; /* 0x04 Time Sync Event Register */ event 30 arch/arm/mach-keystone/keystone.c unsigned long event, void *data) event 34 arch/arm/mach-keystone/keystone.c if (event != BUS_NOTIFY_ADD_DEVICE) event 95 arch/arm/mach-mvebu/coherency.c unsigned long event, void *__dev) event 99 arch/arm/mach-mvebu/coherency.c if (event != BUS_NOTIFY_ADD_DEVICE) event 215 arch/arm/mach-omap2/omap_device.c unsigned long event, void *dev) event 221 arch/arm/mach-omap2/omap_device.c switch (event) { event 251 arch/arm/mach-omap2/omap_device.c od->_driver_status = event; event 556 arch/arm/mach-omap2/prcm-common.h extern int omap_prcm_event_to_irq(const char *event); event 177 arch/arm/mach-pxa/am200epd.c unsigned long event, void *data) event 184 arch/arm/mach-pxa/am200epd.c if (event == FB_EVENT_FB_REGISTERED) event 186 arch/arm/mach-pxa/am200epd.c else if (event == FB_EVENT_FB_UNREGISTERED) event 223 arch/arm/mach-s3c24xx/common.c void __init samsung_set_timer_source(unsigned int event, unsigned int source) event 226 arch/arm/mach-s3c24xx/common.c s3c24xx_pwm_variant.output_mask &= ~(BIT(event) | BIT(source)); event 173 arch/arm/mach-s3c64xx/common.c void __init samsung_set_timer_source(unsigned int event, unsigned int source) event 176 arch/arm/mach-s3c64xx/common.c s3c64xx_pwm_variant.output_mask &= ~(BIT(event) | BIT(source)); event 117 arch/arm/mm/cache-l2x0-pmu.c static void l2x0_pmu_event_read(struct perf_event *event) event 119 arch/arm/mm/cache-l2x0-pmu.c struct hw_perf_event *hw = &event->hw; event 128 arch/arm/mm/cache-l2x0-pmu.c local64_add((new_count - prev_count) & mask, &event->count); event 133 arch/arm/mm/cache-l2x0-pmu.c static void l2x0_pmu_event_configure(struct perf_event *event) event 135 arch/arm/mm/cache-l2x0-pmu.c struct hw_perf_event *hw = &event->hw; event 160 arch/arm/mm/cache-l2x0-pmu.c struct perf_event *event = events[i]; event 162 arch/arm/mm/cache-l2x0-pmu.c if (!event) event 165 arch/arm/mm/cache-l2x0-pmu.c l2x0_pmu_event_read(event); event 166 arch/arm/mm/cache-l2x0-pmu.c l2x0_pmu_event_configure(event); event 177 arch/arm/mm/cache-l2x0-pmu.c static void __l2x0_pmu_event_enable(int idx, u32 event) event 181 arch/arm/mm/cache-l2x0-pmu.c val = event << L2X0_EVENT_CNT_CFG_SRC_SHIFT; event 186 arch/arm/mm/cache-l2x0-pmu.c static void l2x0_pmu_event_start(struct perf_event *event, int flags) event 188 arch/arm/mm/cache-l2x0-pmu.c struct hw_perf_event *hw = &event->hw; event 190 arch/arm/mm/cache-l2x0-pmu.c if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) event 195 arch/arm/mm/cache-l2x0-pmu.c l2x0_pmu_event_configure(event); event 212 arch/arm/mm/cache-l2x0-pmu.c static void l2x0_pmu_event_stop(struct perf_event *event, int flags) event 214 arch/arm/mm/cache-l2x0-pmu.c struct hw_perf_event *hw = &event->hw; event 216 arch/arm/mm/cache-l2x0-pmu.c if (WARN_ON_ONCE(event->hw.state & PERF_HES_STOPPED)) event 224 arch/arm/mm/cache-l2x0-pmu.c l2x0_pmu_event_read(event); event 229 arch/arm/mm/cache-l2x0-pmu.c static int l2x0_pmu_event_add(struct perf_event *event, int flags) event 231 arch/arm/mm/cache-l2x0-pmu.c struct hw_perf_event *hw = &event->hw; event 246 arch/arm/mm/cache-l2x0-pmu.c events[idx] = event; event 249 arch/arm/mm/cache-l2x0-pmu.c l2x0_pmu_event_configure(event); event 254 arch/arm/mm/cache-l2x0-pmu.c l2x0_pmu_event_start(event, 0); event 259 arch/arm/mm/cache-l2x0-pmu.c static void l2x0_pmu_event_del(struct perf_event *event, int flags) event 261 arch/arm/mm/cache-l2x0-pmu.c struct hw_perf_event *hw = &event->hw; event 263 arch/arm/mm/cache-l2x0-pmu.c l2x0_pmu_event_stop(event, PERF_EF_UPDATE); event 272 arch/arm/mm/cache-l2x0-pmu.c static bool l2x0_pmu_group_is_valid(struct perf_event *event) event 274 arch/arm/mm/cache-l2x0-pmu.c struct pmu *pmu = event->pmu; event 275 arch/arm/mm/cache-l2x0-pmu.c struct perf_event *leader = event->group_leader; event 294 arch/arm/mm/cache-l2x0-pmu.c static int l2x0_pmu_event_init(struct perf_event *event) event 296 arch/arm/mm/cache-l2x0-pmu.c struct hw_perf_event *hw = &event->hw; event 298 arch/arm/mm/cache-l2x0-pmu.c if (event->attr.type != l2x0_pmu->type) event 301 arch/arm/mm/cache-l2x0-pmu.c if (is_sampling_event(event) || event 302 arch/arm/mm/cache-l2x0-pmu.c event->attach_state & PERF_ATTACH_TASK) event 305 arch/arm/mm/cache-l2x0-pmu.c if (event->cpu < 0) event 308 arch/arm/mm/cache-l2x0-pmu.c if (event->attr.config & ~L2X0_EVENT_CNT_CFG_SRC_MASK) event 311 arch/arm/mm/cache-l2x0-pmu.c hw->config_base = event->attr.config; event 313 arch/arm/mm/cache-l2x0-pmu.c if (!l2x0_pmu_group_is_valid(event)) event 316 arch/arm/mm/cache-l2x0-pmu.c event->cpu = cpumask_first(&pmu_cpu); event 21 arch/arm/plat-samsung/include/plat/samsung-time.h extern void __init samsung_set_timer_source(enum samsung_timer_mode event, event 72 arch/arm64/include/asm/arm_dsu_pmu.h static inline void __dsu_pmu_set_event(int counter, u32 event) event 75 arch/arm64/include/asm/arm_dsu_pmu.h write_sysreg_s(event, CLUSTERPMXEVTYPER_EL1); event 320 arch/arm64/kernel/perf_event.c PMU_FORMAT_ATTR(event, "config:0-15"); event 323 arch/arm64/kernel/perf_event.c static inline bool armv8pmu_event_is_64bit(struct perf_event *event) event 325 arch/arm64/kernel/perf_event.c return event->attr.config1 & 0x1; event 353 arch/arm64/kernel/perf_event.c static inline bool armv8pmu_event_is_chained(struct perf_event *event) event 355 arch/arm64/kernel/perf_event.c int idx = event->hw.idx; event 358 arch/arm64/kernel/perf_event.c armv8pmu_event_is_64bit(event) && event 413 arch/arm64/kernel/perf_event.c static inline u64 armv8pmu_read_hw_counter(struct perf_event *event) event 415 arch/arm64/kernel/perf_event.c int idx = event->hw.idx; event 419 arch/arm64/kernel/perf_event.c if (armv8pmu_event_is_chained(event)) event 424 arch/arm64/kernel/perf_event.c static u64 armv8pmu_read_counter(struct perf_event *event) event 426 arch/arm64/kernel/perf_event.c struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); event 427 arch/arm64/kernel/perf_event.c struct hw_perf_event *hwc = &event->hw; event 437 arch/arm64/kernel/perf_event.c value = armv8pmu_read_hw_counter(event); event 448 arch/arm64/kernel/perf_event.c static inline void armv8pmu_write_hw_counter(struct perf_event *event, event 451 arch/arm64/kernel/perf_event.c int idx = event->hw.idx; event 453 arch/arm64/kernel/perf_event.c if (armv8pmu_event_is_chained(event)) { event 461 arch/arm64/kernel/perf_event.c static void armv8pmu_write_counter(struct perf_event *event, u64 value) event 463 arch/arm64/kernel/perf_event.c struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); event 464 arch/arm64/kernel/perf_event.c struct hw_perf_event *hwc = &event->hw; event 477 arch/arm64/kernel/perf_event.c if (!armv8pmu_event_is_64bit(event)) event 481 arch/arm64/kernel/perf_event.c armv8pmu_write_hw_counter(event, value); event 491 arch/arm64/kernel/perf_event.c static inline void armv8pmu_write_event_type(struct perf_event *event) event 493 arch/arm64/kernel/perf_event.c struct hw_perf_event *hwc = &event->hw; event 501 arch/arm64/kernel/perf_event.c if (armv8pmu_event_is_chained(event)) { event 519 arch/arm64/kernel/perf_event.c static inline void armv8pmu_enable_event_counter(struct perf_event *event) event 521 arch/arm64/kernel/perf_event.c struct perf_event_attr *attr = &event->attr; event 522 arch/arm64/kernel/perf_event.c int idx = event->hw.idx; event 525 arch/arm64/kernel/perf_event.c if (armv8pmu_event_is_chained(event)) event 533 arch/arm64/kernel/perf_event.c if (armv8pmu_event_is_chained(event)) event 545 arch/arm64/kernel/perf_event.c static inline void armv8pmu_disable_event_counter(struct perf_event *event) event 547 arch/arm64/kernel/perf_event.c struct hw_perf_event *hwc = &event->hw; event 548 arch/arm64/kernel/perf_event.c struct perf_event_attr *attr = &event->attr; event 552 arch/arm64/kernel/perf_event.c if (armv8pmu_event_is_chained(event)) event 559 arch/arm64/kernel/perf_event.c if (armv8pmu_event_is_chained(event)) event 572 arch/arm64/kernel/perf_event.c static inline int armv8pmu_enable_event_irq(struct perf_event *event) event 574 arch/arm64/kernel/perf_event.c return armv8pmu_enable_intens(event->hw.idx); event 589 arch/arm64/kernel/perf_event.c static inline int armv8pmu_disable_event_irq(struct perf_event *event) event 591 arch/arm64/kernel/perf_event.c return armv8pmu_disable_intens(event->hw.idx); event 608 arch/arm64/kernel/perf_event.c static void armv8pmu_enable_event(struct perf_event *event) event 611 arch/arm64/kernel/perf_event.c struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); event 623 arch/arm64/kernel/perf_event.c armv8pmu_disable_event_counter(event); event 628 arch/arm64/kernel/perf_event.c armv8pmu_write_event_type(event); event 633 arch/arm64/kernel/perf_event.c armv8pmu_enable_event_irq(event); event 638 arch/arm64/kernel/perf_event.c armv8pmu_enable_event_counter(event); event 643 arch/arm64/kernel/perf_event.c static void armv8pmu_disable_event(struct perf_event *event) event 646 arch/arm64/kernel/perf_event.c struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); event 657 arch/arm64/kernel/perf_event.c armv8pmu_disable_event_counter(event); event 662 arch/arm64/kernel/perf_event.c armv8pmu_disable_event_irq(event); event 719 arch/arm64/kernel/perf_event.c struct perf_event *event = cpuc->events[idx]; event 723 arch/arm64/kernel/perf_event.c if (!event) event 733 arch/arm64/kernel/perf_event.c hwc = &event->hw; event 734 arch/arm64/kernel/perf_event.c armpmu_event_update(event); event 736 arch/arm64/kernel/perf_event.c if (!armpmu_event_set_period(event)) event 739 arch/arm64/kernel/perf_event.c if (perf_event_overflow(event, &data, regs)) event 740 arch/arm64/kernel/perf_event.c cpu_pmu->disable(event); event 790 arch/arm64/kernel/perf_event.c struct perf_event *event) event 792 arch/arm64/kernel/perf_event.c struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); event 793 arch/arm64/kernel/perf_event.c struct hw_perf_event *hwc = &event->hw; event 805 arch/arm64/kernel/perf_event.c if (armv8pmu_event_is_64bit(event)) event 812 arch/arm64/kernel/perf_event.c struct perf_event *event) event 814 arch/arm64/kernel/perf_event.c int idx = event->hw.idx; event 817 arch/arm64/kernel/perf_event.c if (armv8pmu_event_is_chained(event)) event 824 arch/arm64/kernel/perf_event.c static int armv8pmu_set_event_filter(struct hw_perf_event *event, event 863 arch/arm64/kernel/perf_event.c event->config_base = config_base; event 868 arch/arm64/kernel/perf_event.c static int armv8pmu_filter_match(struct perf_event *event) event 870 arch/arm64/kernel/perf_event.c unsigned long evtype = event->hw.config_base & ARMV8_PMU_EVTYPE_EVENT; event 896 arch/arm64/kernel/perf_event.c static int __armv8_pmuv3_map_event(struct perf_event *event, event 905 arch/arm64/kernel/perf_event.c struct arm_pmu *armpmu = to_arm_pmu(event->pmu); event 907 arch/arm64/kernel/perf_event.c hw_event_id = armpmu_map_event(event, &armv8_pmuv3_perf_map, event 911 arch/arm64/kernel/perf_event.c if (armv8pmu_event_is_64bit(event)) event 912 arch/arm64/kernel/perf_event.c event->hw.flags |= ARMPMU_EVT_64BIT; event 920 arch/arm64/kernel/perf_event.c return armpmu_map_event(event, extra_event_map, extra_cache_map, event 924 arch/arm64/kernel/perf_event.c static int armv8_pmuv3_map_event(struct perf_event *event) event 926 arch/arm64/kernel/perf_event.c return __armv8_pmuv3_map_event(event, NULL, NULL); event 929 arch/arm64/kernel/perf_event.c static int armv8_a53_map_event(struct perf_event *event) event 931 arch/arm64/kernel/perf_event.c return __armv8_pmuv3_map_event(event, NULL, &armv8_a53_perf_cache_map); event 934 arch/arm64/kernel/perf_event.c static int armv8_a57_map_event(struct perf_event *event) event 936 arch/arm64/kernel/perf_event.c return __armv8_pmuv3_map_event(event, NULL, &armv8_a57_perf_cache_map); event 939 arch/arm64/kernel/perf_event.c static int armv8_a73_map_event(struct perf_event *event) event 941 arch/arm64/kernel/perf_event.c return __armv8_pmuv3_map_event(event, NULL, &armv8_a73_perf_cache_map); event 944 arch/arm64/kernel/perf_event.c static int armv8_thunder_map_event(struct perf_event *event) event 946 arch/arm64/kernel/perf_event.c return __armv8_pmuv3_map_event(event, NULL, event 950 arch/arm64/kernel/perf_event.c static int armv8_vulcan_map_event(struct perf_event *event) event 952 arch/arm64/kernel/perf_event.c return __armv8_pmuv3_map_event(event, NULL, event 1200 arch/arm64/kernel/perf_event.c void arch_perf_update_userpage(struct perf_event *event, event 26 arch/c6x/include/asm/soc.h extern void soc_assert_event(unsigned int event); event 878 arch/csky/kernel/perf_event.c int csky_pmu_event_set_period(struct perf_event *event) event 880 arch/csky/kernel/perf_event.c struct hw_perf_event *hwc = &event->hw; event 914 arch/csky/kernel/perf_event.c perf_event_update_userpage(event); event 919 arch/csky/kernel/perf_event.c static void csky_perf_event_update(struct perf_event *event, event 936 arch/csky/kernel/perf_event.c local64_add(delta, &event->count); event 945 arch/csky/kernel/perf_event.c static void csky_pmu_read(struct perf_event *event) event 947 arch/csky/kernel/perf_event.c csky_perf_event_update(event, &event->hw); event 968 arch/csky/kernel/perf_event.c static int csky_pmu_event_init(struct perf_event *event) event 970 arch/csky/kernel/perf_event.c struct hw_perf_event *hwc = &event->hw; event 973 arch/csky/kernel/perf_event.c switch (event->attr.type) { event 975 arch/csky/kernel/perf_event.c if (event->attr.config >= PERF_COUNT_HW_MAX) event 977 arch/csky/kernel/perf_event.c ret = csky_pmu_hw_map[event->attr.config]; event 983 arch/csky/kernel/perf_event.c ret = csky_pmu_cache_event(event->attr.config); event 989 arch/csky/kernel/perf_event.c if (hw_raw_read_mapping[event->attr.config] == NULL) event 991 arch/csky/kernel/perf_event.c hwc->idx = event->attr.config; event 997 arch/csky/kernel/perf_event.c if (event->attr.exclude_user) event 999 arch/csky/kernel/perf_event.c else if (event->attr.exclude_kernel) event 1021 arch/csky/kernel/perf_event.c static void csky_pmu_start(struct perf_event *event, int flags) event 1024 arch/csky/kernel/perf_event.c struct hw_perf_event *hwc = &event->hw; event 1035 arch/csky/kernel/perf_event.c csky_pmu_event_set_period(event); event 1045 arch/csky/kernel/perf_event.c static void csky_pmu_stop_event(struct perf_event *event) event 1048 arch/csky/kernel/perf_event.c struct hw_perf_event *hwc = &event->hw; event 1059 arch/csky/kernel/perf_event.c static void csky_pmu_stop(struct perf_event *event, int flags) event 1061 arch/csky/kernel/perf_event.c if (!(event->hw.state & PERF_HES_STOPPED)) { event 1062 arch/csky/kernel/perf_event.c csky_pmu_stop_event(event); event 1063 arch/csky/kernel/perf_event.c event->hw.state |= PERF_HES_STOPPED; event 1067 arch/csky/kernel/perf_event.c !(event->hw.state & PERF_HES_UPTODATE)) { event 1068 arch/csky/kernel/perf_event.c csky_perf_event_update(event, &event->hw); event 1069 arch/csky/kernel/perf_event.c event->hw.state |= PERF_HES_UPTODATE; event 1073 arch/csky/kernel/perf_event.c static void csky_pmu_del(struct perf_event *event, int flags) event 1076 arch/csky/kernel/perf_event.c struct hw_perf_event *hwc = &event->hw; event 1078 arch/csky/kernel/perf_event.c csky_pmu_stop(event, PERF_EF_UPDATE); event 1082 arch/csky/kernel/perf_event.c perf_event_update_userpage(event); event 1086 arch/csky/kernel/perf_event.c static int csky_pmu_add(struct perf_event *event, int flags) event 1089 arch/csky/kernel/perf_event.c struct hw_perf_event *hwc = &event->hw; event 1091 arch/csky/kernel/perf_event.c hw_events->events[hwc->idx] = event; event 1096 arch/csky/kernel/perf_event.c csky_pmu_start(event, PERF_EF_RELOAD); event 1098 arch/csky/kernel/perf_event.c perf_event_update_userpage(event); event 1124 arch/csky/kernel/perf_event.c struct perf_event *event = cpuc->events[idx]; event 1128 arch/csky/kernel/perf_event.c if (!event) event 1137 arch/csky/kernel/perf_event.c hwc = &event->hw; event 1138 arch/csky/kernel/perf_event.c csky_perf_event_update(event, &event->hw); event 1140 arch/csky/kernel/perf_event.c csky_pmu_event_set_period(event); event 1142 arch/csky/kernel/perf_event.c if (perf_event_overflow(event, &data, regs)) event 1143 arch/csky/kernel/perf_event.c csky_pmu_stop_event(event); event 114 arch/ia64/kernel/mca.c #define NOTIFY_INIT(event, regs, arg, spin) \ event 116 arch/ia64/kernel/mca.c if ((notify_die((event), "INIT", (regs), (arg), 0, 0) \ event 121 arch/ia64/kernel/mca.c #define NOTIFY_MCA(event, regs, arg, spin) \ event 123 arch/ia64/kernel/mca.c if ((notify_die((event), "MCA", (regs), (arg), 0, 0) \ event 23 arch/ia64/oprofile/perfmon.c int event = arg->pmd_eventid; event 31 arch/ia64/oprofile/perfmon.c oprofile_add_sample(regs, event); event 10213 arch/m68k/ifpsp060/src/fplsp.S # if enabled so the operating system can log the event. # event 169 arch/mips/cavium-octeon/executive/cvmx-l2c.c void cvmx_l2c_config_perf(uint32_t counter, enum cvmx_l2c_event event, event 179 arch/mips/cavium-octeon/executive/cvmx-l2c.c pfctl.s.cnt0sel = event; event 184 arch/mips/cavium-octeon/executive/cvmx-l2c.c pfctl.s.cnt1sel = event; event 189 arch/mips/cavium-octeon/executive/cvmx-l2c.c pfctl.s.cnt2sel = event; event 195 arch/mips/cavium-octeon/executive/cvmx-l2c.c pfctl.s.cnt3sel = event; event 214 arch/mips/cavium-octeon/executive/cvmx-l2c.c l2c_tadx_prf.s.cnt0sel = event; event 217 arch/mips/cavium-octeon/executive/cvmx-l2c.c l2c_tadx_prf.s.cnt1sel = event; event 220 arch/mips/cavium-octeon/executive/cvmx-l2c.c l2c_tadx_prf.s.cnt2sel = event; event 224 arch/mips/cavium-octeon/executive/cvmx-l2c.c l2c_tadx_prf.s.cnt3sel = event; event 54 arch/mips/dec/ecc-berr.c const char *kind, *agent, *cycle, *event; event 82 arch/mips/dec/ecc-berr.c event = eccstr; event 86 arch/mips/dec/ecc-berr.c event = (erraddr & KN0X_EAR_CPU) ? timestr : overstr; event 185 arch/mips/dec/ecc-berr.c kind, agent, cycle, event, address); event 81 arch/mips/dec/kn01-berr.c const char *kind, *agent, *cycle, *event; event 127 arch/mips/dec/kn01-berr.c event = paritystr; event 130 arch/mips/dec/kn01-berr.c event = timestr; event 138 arch/mips/dec/kn01-berr.c kind, agent, cycle, event, address); event 53 arch/mips/dec/kn02xa-berr.c const char *kind, *agent, *cycle, *event; event 73 arch/mips/dec/kn02xa-berr.c event = paritystr; event 76 arch/mips/dec/kn02xa-berr.c event = timestr; event 84 arch/mips/dec/kn02xa-berr.c kind, agent, cycle, event, address); event 169 arch/mips/include/asm/octeon/cvmx-l2c.h void cvmx_l2c_config_perf(uint32_t counter, enum cvmx_l2c_event event, event 95 arch/mips/kernel/perf_event_mipsxx.c #define M_PERFCTL_EVENT(event) (((event) << MIPS_PERFCTRL_EVENT_S) & \ event 317 arch/mips/kernel/perf_event_mipsxx.c struct perf_event *event = container_of(evt, struct perf_event, hw); event 344 arch/mips/kernel/perf_event_mipsxx.c cpu = (event->cpu >= 0) ? event->cpu : smp_processor_id(); event 370 arch/mips/kernel/perf_event_mipsxx.c static int mipspmu_event_set_period(struct perf_event *event, event 401 arch/mips/kernel/perf_event_mipsxx.c perf_event_update_userpage(event); event 406 arch/mips/kernel/perf_event_mipsxx.c static void mipspmu_event_update(struct perf_event *event, event 423 arch/mips/kernel/perf_event_mipsxx.c local64_add(delta, &event->count); event 427 arch/mips/kernel/perf_event_mipsxx.c static void mipspmu_start(struct perf_event *event, int flags) event 429 arch/mips/kernel/perf_event_mipsxx.c struct hw_perf_event *hwc = &event->hw; event 437 arch/mips/kernel/perf_event_mipsxx.c mipspmu_event_set_period(event, hwc, hwc->idx); event 443 arch/mips/kernel/perf_event_mipsxx.c static void mipspmu_stop(struct perf_event *event, int flags) event 445 arch/mips/kernel/perf_event_mipsxx.c struct hw_perf_event *hwc = &event->hw; event 451 arch/mips/kernel/perf_event_mipsxx.c mipspmu_event_update(event, hwc, hwc->idx); event 456 arch/mips/kernel/perf_event_mipsxx.c static int mipspmu_add(struct perf_event *event, int flags) event 459 arch/mips/kernel/perf_event_mipsxx.c struct hw_perf_event *hwc = &event->hw; event 463 arch/mips/kernel/perf_event_mipsxx.c perf_pmu_disable(event->pmu); event 476 arch/mips/kernel/perf_event_mipsxx.c event->hw.idx = idx; event 478 arch/mips/kernel/perf_event_mipsxx.c cpuc->events[idx] = event; event 482 arch/mips/kernel/perf_event_mipsxx.c mipspmu_start(event, PERF_EF_RELOAD); event 485 arch/mips/kernel/perf_event_mipsxx.c perf_event_update_userpage(event); event 488 arch/mips/kernel/perf_event_mipsxx.c perf_pmu_enable(event->pmu); event 492 arch/mips/kernel/perf_event_mipsxx.c static void mipspmu_del(struct perf_event *event, int flags) event 495 arch/mips/kernel/perf_event_mipsxx.c struct hw_perf_event *hwc = &event->hw; event 500 arch/mips/kernel/perf_event_mipsxx.c mipspmu_stop(event, PERF_EF_UPDATE); event 504 arch/mips/kernel/perf_event_mipsxx.c perf_event_update_userpage(event); event 507 arch/mips/kernel/perf_event_mipsxx.c static void mipspmu_read(struct perf_event *event) event 509 arch/mips/kernel/perf_event_mipsxx.c struct hw_perf_event *hwc = &event->hw; event 515 arch/mips/kernel/perf_event_mipsxx.c mipspmu_event_update(event, hwc, hwc->idx); event 592 arch/mips/kernel/perf_event_mipsxx.c static int __hw_perf_event_init(struct perf_event *event); event 594 arch/mips/kernel/perf_event_mipsxx.c static void hw_perf_event_destroy(struct perf_event *event) event 609 arch/mips/kernel/perf_event_mipsxx.c static int mipspmu_event_init(struct perf_event *event) event 614 arch/mips/kernel/perf_event_mipsxx.c if (has_branch_stack(event)) event 617 arch/mips/kernel/perf_event_mipsxx.c switch (event->attr.type) { event 627 arch/mips/kernel/perf_event_mipsxx.c if (event->cpu >= 0 && !cpu_online(event->cpu)) event 643 arch/mips/kernel/perf_event_mipsxx.c return __hw_perf_event_init(event); event 711 arch/mips/kernel/perf_event_mipsxx.c static int validate_group(struct perf_event *event) event 713 arch/mips/kernel/perf_event_mipsxx.c struct perf_event *sibling, *leader = event->group_leader; event 726 arch/mips/kernel/perf_event_mipsxx.c if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0) event 737 arch/mips/kernel/perf_event_mipsxx.c struct perf_event *event = cpuc->events[idx]; event 738 arch/mips/kernel/perf_event_mipsxx.c struct hw_perf_event *hwc = &event->hw; event 740 arch/mips/kernel/perf_event_mipsxx.c mipspmu_event_update(event, hwc, idx); event 741 arch/mips/kernel/perf_event_mipsxx.c data->period = event->hw.last_period; event 742 arch/mips/kernel/perf_event_mipsxx.c if (!mipspmu_event_set_period(event, hwc, idx)) event 745 arch/mips/kernel/perf_event_mipsxx.c if (perf_event_overflow(event, data, regs)) event 1279 arch/mips/kernel/perf_event_mipsxx.c static int __hw_perf_event_init(struct perf_event *event) event 1281 arch/mips/kernel/perf_event_mipsxx.c struct perf_event_attr *attr = &event->attr; event 1282 arch/mips/kernel/perf_event_mipsxx.c struct hw_perf_event *hwc = &event->hw; event 1287 arch/mips/kernel/perf_event_mipsxx.c if (PERF_TYPE_HARDWARE == event->attr.type) { event 1288 arch/mips/kernel/perf_event_mipsxx.c if (event->attr.config >= PERF_COUNT_HW_MAX) event 1290 arch/mips/kernel/perf_event_mipsxx.c pev = mipspmu_map_general_event(event->attr.config); event 1291 arch/mips/kernel/perf_event_mipsxx.c } else if (PERF_TYPE_HW_CACHE == event->attr.type) { event 1292 arch/mips/kernel/perf_event_mipsxx.c pev = mipspmu_map_cache_event(event->attr.config); event 1293 arch/mips/kernel/perf_event_mipsxx.c } else if (PERF_TYPE_RAW == event->attr.type) { event 1296 arch/mips/kernel/perf_event_mipsxx.c pev = mipspmu.map_raw_event(event->attr.config); event 1303 arch/mips/kernel/perf_event_mipsxx.c if (PERF_TYPE_RAW == event->attr.type) event 1315 arch/mips/kernel/perf_event_mipsxx.c if (PERF_TYPE_RAW == event->attr.type) event 1343 arch/mips/kernel/perf_event_mipsxx.c if (event->group_leader != event) event 1344 arch/mips/kernel/perf_event_mipsxx.c err = validate_group(event); event 1346 arch/mips/kernel/perf_event_mipsxx.c event->destroy = hw_perf_event_destroy; event 1349 arch/mips/kernel/perf_event_mipsxx.c event->destroy(event); event 671 arch/mips/kernel/pm-cps.c unsigned long event, void *ptr) event 675 arch/mips/kernel/pm-cps.c switch (event) { event 79 arch/mips/lasat/setup.c unsigned long event, void *ptr) event 91 arch/mips/lasat/setup.c unsigned long event, void *ptr) event 50 arch/mips/oprofile/common.c oprofilefs_create_ulong(dir, "event", &ctr[i].event); event 18 arch/mips/oprofile/op_impl.h unsigned long event; event 28 arch/mips/oprofile/op_model_loongson2.c #define LOONGSON2_PERFCTRL_EVENT(idx, event) \ event 29 arch/mips/oprofile/op_model_loongson2.c (((event) & 0x0f) << ((idx) ? 9 : 5)) event 64 arch/mips/oprofile/op_model_loongson2.c ctrl |= LOONGSON2_PERFCTRL_EVENT(0, cfg[0].event); event 69 arch/mips/oprofile/op_model_loongson2.c ctrl |= LOONGSON2_PERFCTRL_EVENT(1, cfg[1].event); event 28 arch/mips/oprofile/op_model_loongson3.c #define LOONGSON3_PERFCTRL_EVENT(idx, event) \ event 29 arch/mips/oprofile/op_model_loongson3.c (((event) & (idx ? 0x0f : 0x3f)) << 5) event 72 arch/mips/oprofile/op_model_loongson3.c control1 |= LOONGSON3_PERFCTRL_EVENT(0, ctr[0].event) | event 82 arch/mips/oprofile/op_model_loongson3.c control2 |= LOONGSON3_PERFCTRL_EVENT(1, ctr[1].event) | event 18 arch/mips/oprofile/op_model_mipsxx.c #define M_PERFCTL_EVENT(event) (((event) << MIPS_PERFCTRL_EVENT_S) & \ event 148 arch/mips/oprofile/op_model_mipsxx.c reg.control[i] = M_PERFCTL_EVENT(ctr[i].event) | event 166 arch/mips/sgi-ip22/ip22-reset.c static int panic_event(struct notifier_block *this, unsigned long event, event 111 arch/mips/sgi-ip32/ip32-reset.c static int panic_event(struct notifier_block *this, unsigned long event, event 332 arch/mips/txx9/generic/setup_tx4939.c unsigned long event, event 337 arch/mips/txx9/generic/setup_tx4939.c if (event == NETDEV_CHANGE && netif_carrier_ok(dev)) { event 68 arch/nds32/include/asm/pmu.h void (*enable)(struct perf_event *event); event 69 arch/nds32/include/asm/pmu.h void (*disable)(struct perf_event *event); event 71 arch/nds32/include/asm/pmu.h struct perf_event *event); event 74 arch/nds32/include/asm/pmu.h u32 (*read_counter)(struct perf_event *event); event 75 arch/nds32/include/asm/pmu.h void (*write_counter)(struct perf_event *event, u32 val); event 81 arch/nds32/include/asm/pmu.h int (*map_event)(struct perf_event *event); event 93 arch/nds32/include/asm/pmu.h u64 nds32_pmu_event_update(struct perf_event *event); event 95 arch/nds32/include/asm/pmu.h int nds32_pmu_event_set_period(struct perf_event *event); event 188 arch/nds32/include/asm/pmu.h static inline int get_converted_event_idx(unsigned long event) event 192 arch/nds32/include/asm/pmu.h if ((event) > SPAV3_0_SEL_BASE && event < SPAV3_0_SEL_LAST) { event 194 arch/nds32/include/asm/pmu.h } else if ((event) > SPAV3_1_SEL_BASE && event < SPAV3_1_SEL_LAST) { event 196 arch/nds32/include/asm/pmu.h } else if ((event) > SPAV3_2_SEL_BASE && event < SPAV3_2_SEL_LAST) { event 207 arch/nds32/include/asm/pmu.h static inline u32 get_converted_evet_hw_num(u32 event) event 209 arch/nds32/include/asm/pmu.h if (event > SPAV3_0_SEL_BASE && event < SPAV3_0_SEL_LAST) event 210 arch/nds32/include/asm/pmu.h event -= PFM_OFFSET_MAGIC_0; event 211 arch/nds32/include/asm/pmu.h else if (event > SPAV3_1_SEL_BASE && event < SPAV3_1_SEL_LAST) event 212 arch/nds32/include/asm/pmu.h event -= PFM_OFFSET_MAGIC_1; event 213 arch/nds32/include/asm/pmu.h else if (event > SPAV3_2_SEL_BASE && event < SPAV3_2_SEL_LAST) event 214 arch/nds32/include/asm/pmu.h event -= PFM_OFFSET_MAGIC_2; event 215 arch/nds32/include/asm/pmu.h else if (event != 0) event 218 arch/nds32/include/asm/pmu.h return event; event 380 arch/nds32/include/asm/pmu.h int nds32_pmu_map_event(struct perf_event *event, event 110 arch/nds32/kernel/perf_event_cpu.c nds32_pmu_map_event(struct perf_event *event, event 117 arch/nds32/kernel/perf_event_cpu.c u64 config = event->attr.config; event 119 arch/nds32/kernel/perf_event_cpu.c switch (event->attr.type) { event 131 arch/nds32/kernel/perf_event_cpu.c static int nds32_spav3_map_event(struct perf_event *event) event 133 arch/nds32/kernel/perf_event_cpu.c return nds32_pmu_map_event(event, &nds32_pfm_perf_map, event 183 arch/nds32/kernel/perf_event_cpu.c int nds32_pmu_event_set_period(struct perf_event *event) event 185 arch/nds32/kernel/perf_event_cpu.c struct nds32_pmu *nds32_pmu = to_nds32_pmu(event->pmu); event 186 arch/nds32/kernel/perf_event_cpu.c struct hw_perf_event *hwc = &event->hw; event 218 arch/nds32/kernel/perf_event_cpu.c nds32_pmu->write_counter(event, (u64)(-left) & nds32_pmu->max_period); event 220 arch/nds32/kernel/perf_event_cpu.c perf_event_update_userpage(event); event 251 arch/nds32/kernel/perf_event_cpu.c struct perf_event *event = cpuc->events[idx]; event 255 arch/nds32/kernel/perf_event_cpu.c if (!event) event 265 arch/nds32/kernel/perf_event_cpu.c hwc = &event->hw; event 266 arch/nds32/kernel/perf_event_cpu.c nds32_pmu_event_update(event); event 268 arch/nds32/kernel/perf_event_cpu.c if (!nds32_pmu_event_set_period(event)) event 271 arch/nds32/kernel/perf_event_cpu.c if (perf_event_overflow(event, &data, regs)) event 272 arch/nds32/kernel/perf_event_cpu.c cpu_pmu->disable(event); event 307 arch/nds32/kernel/perf_event_cpu.c static int nds32_pmu_set_event_filter(struct hw_perf_event *event, event 311 arch/nds32/kernel/perf_event_cpu.c int idx = event->idx; event 333 arch/nds32/kernel/perf_event_cpu.c event->config_base |= config_base; event 412 arch/nds32/kernel/perf_event_cpu.c static void nds32_pmu_enable_event(struct perf_event *event) event 416 arch/nds32/kernel/perf_event_cpu.c struct hw_perf_event *hwc = &event->hw; event 417 arch/nds32/kernel/perf_event_cpu.c struct nds32_pmu *cpu_pmu = to_nds32_pmu(event->pmu); event 441 arch/nds32/kernel/perf_event_cpu.c cpu_pmu->set_event_filter(hwc, &event->attr)) && event 442 arch/nds32/kernel/perf_event_cpu.c event_requires_mode_exclusion(&event->attr)) { event 464 arch/nds32/kernel/perf_event_cpu.c static void nds32_pmu_disable_event(struct perf_event *event) event 467 arch/nds32/kernel/perf_event_cpu.c struct hw_perf_event *hwc = &event->hw; event 468 arch/nds32/kernel/perf_event_cpu.c struct nds32_pmu *cpu_pmu = to_nds32_pmu(event->pmu); event 495 arch/nds32/kernel/perf_event_cpu.c static inline u32 nds32_pmu_read_counter(struct perf_event *event) event 497 arch/nds32/kernel/perf_event_cpu.c struct nds32_pmu *cpu_pmu = to_nds32_pmu(event->pmu); event 498 arch/nds32/kernel/perf_event_cpu.c struct hw_perf_event *hwc = &event->hw; event 524 arch/nds32/kernel/perf_event_cpu.c static inline void nds32_pmu_write_counter(struct perf_event *event, u32 value) event 526 arch/nds32/kernel/perf_event_cpu.c struct nds32_pmu *cpu_pmu = to_nds32_pmu(event->pmu); event 527 arch/nds32/kernel/perf_event_cpu.c struct hw_perf_event *hwc = &event->hw; event 552 arch/nds32/kernel/perf_event_cpu.c struct perf_event *event) event 555 arch/nds32/kernel/perf_event_cpu.c struct hw_perf_event *hwc = &event->hw; event 752 arch/nds32/kernel/perf_event_cpu.c struct perf_event *event) event 754 arch/nds32/kernel/perf_event_cpu.c struct nds32_pmu *nds32_pmu = to_nds32_pmu(event->pmu); event 756 arch/nds32/kernel/perf_event_cpu.c if (is_software_event(event)) event 759 arch/nds32/kernel/perf_event_cpu.c if (event->pmu != pmu) event 762 arch/nds32/kernel/perf_event_cpu.c if (event->state < PERF_EVENT_STATE_OFF) event 765 arch/nds32/kernel/perf_event_cpu.c if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) event 768 arch/nds32/kernel/perf_event_cpu.c return nds32_pmu->get_event_idx(hw_events, event) >= 0; event 771 arch/nds32/kernel/perf_event_cpu.c static int validate_group(struct perf_event *event) event 773 arch/nds32/kernel/perf_event_cpu.c struct perf_event *sibling, *leader = event->group_leader; event 782 arch/nds32/kernel/perf_event_cpu.c if (!validate_event(event->pmu, &fake_pmu, leader)) event 786 arch/nds32/kernel/perf_event_cpu.c if (!validate_event(event->pmu, &fake_pmu, sibling)) event 790 arch/nds32/kernel/perf_event_cpu.c if (!validate_event(event->pmu, &fake_pmu, event)) event 796 arch/nds32/kernel/perf_event_cpu.c static int __hw_perf_event_init(struct perf_event *event) event 798 arch/nds32/kernel/perf_event_cpu.c struct nds32_pmu *nds32_pmu = to_nds32_pmu(event->pmu); event 799 arch/nds32/kernel/perf_event_cpu.c struct hw_perf_event *hwc = &event->hw; event 802 arch/nds32/kernel/perf_event_cpu.c mapping = nds32_pmu->map_event(event); event 805 arch/nds32/kernel/perf_event_cpu.c pr_debug("event %x:%llx not supported\n", event->attr.type, event 806 arch/nds32/kernel/perf_event_cpu.c event->attr.config); event 825 arch/nds32/kernel/perf_event_cpu.c nds32_pmu->set_event_filter(hwc, &event->attr)) && event 826 arch/nds32/kernel/perf_event_cpu.c event_requires_mode_exclusion(&event->attr)) { event 849 arch/nds32/kernel/perf_event_cpu.c if (event->group_leader != event) { event 850 arch/nds32/kernel/perf_event_cpu.c if (validate_group(event) != 0) event 857 arch/nds32/kernel/perf_event_cpu.c static int nds32_pmu_event_init(struct perf_event *event) event 859 arch/nds32/kernel/perf_event_cpu.c struct nds32_pmu *nds32_pmu = to_nds32_pmu(event->pmu); event 864 arch/nds32/kernel/perf_event_cpu.c if (has_branch_stack(event)) event 867 arch/nds32/kernel/perf_event_cpu.c if (nds32_pmu->map_event(event) == -ENOENT) event 883 arch/nds32/kernel/perf_event_cpu.c err = __hw_perf_event_init(event); event 888 arch/nds32/kernel/perf_event_cpu.c static void nds32_start(struct perf_event *event, int flags) event 890 arch/nds32/kernel/perf_event_cpu.c struct nds32_pmu *nds32_pmu = to_nds32_pmu(event->pmu); event 891 arch/nds32/kernel/perf_event_cpu.c struct hw_perf_event *hwc = &event->hw; event 901 arch/nds32/kernel/perf_event_cpu.c nds32_pmu_event_set_period(event); event 903 arch/nds32/kernel/perf_event_cpu.c nds32_pmu->enable(event); event 906 arch/nds32/kernel/perf_event_cpu.c static int nds32_pmu_add(struct perf_event *event, int flags) event 908 arch/nds32/kernel/perf_event_cpu.c struct nds32_pmu *nds32_pmu = to_nds32_pmu(event->pmu); event 910 arch/nds32/kernel/perf_event_cpu.c struct hw_perf_event *hwc = &event->hw; event 914 arch/nds32/kernel/perf_event_cpu.c perf_pmu_disable(event->pmu); event 917 arch/nds32/kernel/perf_event_cpu.c idx = nds32_pmu->get_event_idx(hw_events, event); event 927 arch/nds32/kernel/perf_event_cpu.c event->hw.idx = idx; event 928 arch/nds32/kernel/perf_event_cpu.c nds32_pmu->disable(event); event 929 arch/nds32/kernel/perf_event_cpu.c hw_events->events[idx] = event; event 933 arch/nds32/kernel/perf_event_cpu.c nds32_start(event, PERF_EF_RELOAD); event 936 arch/nds32/kernel/perf_event_cpu.c perf_event_update_userpage(event); event 939 arch/nds32/kernel/perf_event_cpu.c perf_pmu_enable(event->pmu); event 943 arch/nds32/kernel/perf_event_cpu.c u64 nds32_pmu_event_update(struct perf_event *event) event 945 arch/nds32/kernel/perf_event_cpu.c struct nds32_pmu *nds32_pmu = to_nds32_pmu(event->pmu); event 946 arch/nds32/kernel/perf_event_cpu.c struct hw_perf_event *hwc = &event->hw; event 951 arch/nds32/kernel/perf_event_cpu.c new_raw_count = nds32_pmu->read_counter(event); event 963 arch/nds32/kernel/perf_event_cpu.c local64_add(delta, &event->count); event 969 arch/nds32/kernel/perf_event_cpu.c static void nds32_stop(struct perf_event *event, int flags) event 971 arch/nds32/kernel/perf_event_cpu.c struct nds32_pmu *nds32_pmu = to_nds32_pmu(event->pmu); event 972 arch/nds32/kernel/perf_event_cpu.c struct hw_perf_event *hwc = &event->hw; event 978 arch/nds32/kernel/perf_event_cpu.c nds32_pmu->disable(event); event 979 arch/nds32/kernel/perf_event_cpu.c nds32_pmu_event_update(event); event 984 arch/nds32/kernel/perf_event_cpu.c static void nds32_pmu_del(struct perf_event *event, int flags) event 986 arch/nds32/kernel/perf_event_cpu.c struct nds32_pmu *nds32_pmu = to_nds32_pmu(event->pmu); event 988 arch/nds32/kernel/perf_event_cpu.c struct hw_perf_event *hwc = &event->hw; event 991 arch/nds32/kernel/perf_event_cpu.c nds32_stop(event, PERF_EF_UPDATE); event 995 arch/nds32/kernel/perf_event_cpu.c perf_event_update_userpage(event); event 998 arch/nds32/kernel/perf_event_cpu.c static void nds32_pmu_read(struct perf_event *event) event 1000 arch/nds32/kernel/perf_event_cpu.c nds32_pmu_event_update(event); event 1004 arch/nds32/kernel/perf_event_cpu.c PMU_FORMAT_ATTR(event, "config:0-63"); event 87 arch/parisc/kernel/pdc_chassis.c unsigned long event, void *ptr) event 107 arch/parisc/kernel/pdc_chassis.c unsigned long event, void *ptr) event 17 arch/powerpc/include/asm/oprofile_impl.h unsigned long event; event 42 arch/powerpc/kernel/eeh_event.c struct eeh_event *event; event 50 arch/powerpc/kernel/eeh_event.c event = NULL; event 52 arch/powerpc/kernel/eeh_event.c event = list_entry(eeh_eventlist.next, event 54 arch/powerpc/kernel/eeh_event.c list_del(&event->list); event 57 arch/powerpc/kernel/eeh_event.c if (!event) event 61 arch/powerpc/kernel/eeh_event.c if (event->pe) event 62 arch/powerpc/kernel/eeh_event.c eeh_handle_normal_event(event->pe); event 66 arch/powerpc/kernel/eeh_event.c kfree(event); event 105 arch/powerpc/kernel/eeh_event.c struct eeh_event *event; event 107 arch/powerpc/kernel/eeh_event.c event = kzalloc(sizeof(*event), GFP_ATOMIC); event 108 arch/powerpc/kernel/eeh_event.c if (!event) { event 112 arch/powerpc/kernel/eeh_event.c event->pe = pe; event 134 arch/powerpc/kernel/eeh_event.c list_add(&event->list, &eeh_eventlist); event 170 arch/powerpc/kernel/eeh_event.c struct eeh_event *event, *tmp; event 182 arch/powerpc/kernel/eeh_event.c list_for_each_entry_safe(event, tmp, &eeh_eventlist, list) { event 183 arch/powerpc/kernel/eeh_event.c if (!force && event->pe && event 184 arch/powerpc/kernel/eeh_event.c (event->pe->state & EEH_PE_ISOLATED)) event 188 arch/powerpc/kernel/eeh_event.c list_del(&event->list); event 189 arch/powerpc/kernel/eeh_event.c kfree(event); event 191 arch/powerpc/kernel/eeh_event.c if (event->pe && event->pe->phb == pe->phb) { event 192 arch/powerpc/kernel/eeh_event.c list_del(&event->list); event 193 arch/powerpc/kernel/eeh_event.c kfree(event); event 195 arch/powerpc/kernel/eeh_event.c } else if (event->pe == pe) { event 196 arch/powerpc/kernel/eeh_event.c list_del(&event->list); event 197 arch/powerpc/kernel/eeh_event.c kfree(event); event 695 arch/powerpc/kernel/setup-common.c unsigned long event, void *ptr) event 162 arch/powerpc/oprofile/common.c oprofilefs_create_ulong(dir, "event", &ctr[i].event); event 35 arch/powerpc/oprofile/op_model_7450.c #define mmcr0_event1(event) \ event 36 arch/powerpc/oprofile/op_model_7450.c ((event << MMCR0_PMC1_SHIFT) & MMCR0_PMC1SEL) event 37 arch/powerpc/oprofile/op_model_7450.c #define mmcr0_event2(event) \ event 38 arch/powerpc/oprofile/op_model_7450.c ((event << MMCR0_PMC2_SHIFT) & MMCR0_PMC2SEL) event 40 arch/powerpc/oprofile/op_model_7450.c #define mmcr1_event3(event) \ event 41 arch/powerpc/oprofile/op_model_7450.c ((event << MMCR1_PMC3_SHIFT) & MMCR1_PMC3SEL) event 42 arch/powerpc/oprofile/op_model_7450.c #define mmcr1_event4(event) \ event 43 arch/powerpc/oprofile/op_model_7450.c ((event << MMCR1_PMC4_SHIFT) & MMCR1_PMC4SEL) event 44 arch/powerpc/oprofile/op_model_7450.c #define mmcr1_event5(event) \ event 45 arch/powerpc/oprofile/op_model_7450.c ((event << MMCR1_PMC5_SHIFT) & MMCR1_PMC5SEL) event 46 arch/powerpc/oprofile/op_model_7450.c #define mmcr1_event6(event) \ event 47 arch/powerpc/oprofile/op_model_7450.c ((event << MMCR1_PMC6_SHIFT) & MMCR1_PMC6SEL) event 108 arch/powerpc/oprofile/op_model_7450.c mmcr0_val = MMCR0_INIT | mmcr0_event1(ctr[0].event) event 109 arch/powerpc/oprofile/op_model_7450.c | mmcr0_event2(ctr[1].event); event 119 arch/powerpc/oprofile/op_model_7450.c mmcr1_val = mmcr1_event3(ctr[2].event) event 120 arch/powerpc/oprofile/op_model_7450.c | mmcr1_event4(ctr[3].event); event 122 arch/powerpc/oprofile/op_model_7450.c mmcr1_val |= mmcr1_event5(ctr[4].event) event 123 arch/powerpc/oprofile/op_model_7450.c | mmcr1_event6(ctr[5].event); event 282 arch/powerpc/oprofile/op_model_cell.c static void set_pm_event(u32 ctr, int event, u32 unit_mask) event 289 arch/powerpc/oprofile/op_model_cell.c if (event == PPU_CYCLES_EVENT_NUM) { event 307 arch/powerpc/oprofile/op_model_cell.c signal_bit = (event % 100); event 311 arch/powerpc/oprofile/op_model_cell.c p->signal_group = event / 100; event 736 arch/powerpc/oprofile/op_model_cell.c set_pm_event(0, ctr[0].event, ctr[0].unit_mask); event 771 arch/powerpc/oprofile/op_model_cell.c pmc_cntrl[0][i].evnts = ctr[i].event; event 785 arch/powerpc/oprofile/op_model_cell.c if ((ctr[i].event >= 2100) && (ctr[i].event <= 2111)) event 786 arch/powerpc/oprofile/op_model_cell.c pmc_cntrl[1][i].evnts = ctr[i].event + 19; event 787 arch/powerpc/oprofile/op_model_cell.c else if (ctr[i].event == 2203) event 788 arch/powerpc/oprofile/op_model_cell.c pmc_cntrl[1][i].evnts = ctr[i].event; event 789 arch/powerpc/oprofile/op_model_cell.c else if ((ctr[i].event >= 2200) && (ctr[i].event <= 2215)) event 790 arch/powerpc/oprofile/op_model_cell.c pmc_cntrl[1][i].evnts = ctr[i].event + 16; event 792 arch/powerpc/oprofile/op_model_cell.c pmc_cntrl[1][i].evnts = ctr[i].event; event 866 arch/powerpc/oprofile/op_model_cell.c if (ctr[0].event == SPU_CYCLES_EVENT_NUM) { event 869 arch/powerpc/oprofile/op_model_cell.c } else if ((ctr[0].event >= SPU_EVENT_NUM_START) && event 870 arch/powerpc/oprofile/op_model_cell.c (ctr[0].event <= SPU_EVENT_NUM_STOP)) { event 166 arch/powerpc/oprofile/op_model_fsl_emb.c static void set_pmc_event(int ctr, int event) event 173 arch/powerpc/oprofile/op_model_fsl_emb.c ((event << PMLCA_EVENT_SHIFT) & event 265 arch/powerpc/oprofile/op_model_fsl_emb.c set_pmc_event(i, ctr[i].event); event 48 arch/powerpc/perf/8xx-pmu.c static int event_type(struct perf_event *event) event 50 arch/powerpc/perf/8xx-pmu.c switch (event->attr.type) { event 52 arch/powerpc/perf/8xx-pmu.c if (event->attr.config == PERF_COUNT_HW_CPU_CYCLES) event 54 arch/powerpc/perf/8xx-pmu.c if (event->attr.config == PERF_COUNT_HW_INSTRUCTIONS) event 58 arch/powerpc/perf/8xx-pmu.c if (event->attr.config == ITLB_LOAD_MISS) event 60 arch/powerpc/perf/8xx-pmu.c if (event->attr.config == DTLB_LOAD_MISS) event 71 arch/powerpc/perf/8xx-pmu.c static int mpc8xx_pmu_event_init(struct perf_event *event) event 73 arch/powerpc/perf/8xx-pmu.c int type = event_type(event); event 80 arch/powerpc/perf/8xx-pmu.c static int mpc8xx_pmu_add(struct perf_event *event, int flags) event 82 arch/powerpc/perf/8xx-pmu.c int type = event_type(event); event 119 arch/powerpc/perf/8xx-pmu.c local64_set(&event->hw.prev_count, val); event 123 arch/powerpc/perf/8xx-pmu.c static void mpc8xx_pmu_read(struct perf_event *event) event 125 arch/powerpc/perf/8xx-pmu.c int type = event_type(event); event 132 arch/powerpc/perf/8xx-pmu.c prev = local64_read(&event->hw.prev_count); event 153 arch/powerpc/perf/8xx-pmu.c } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); event 155 arch/powerpc/perf/8xx-pmu.c local64_add(delta, &event->count); event 158 arch/powerpc/perf/8xx-pmu.c static void mpc8xx_pmu_del(struct perf_event *event, int flags) event 164 arch/powerpc/perf/8xx-pmu.c mpc8xx_pmu_read(event); event 167 arch/powerpc/perf/8xx-pmu.c switch (event_type(event)) { event 37 arch/powerpc/perf/core-book3s.c struct perf_event *event[MAX_HWEVENTS]; event 118 arch/powerpc/perf/core-book3s.c static bool is_ebb_event(struct perf_event *event) { return false; } event 119 arch/powerpc/perf/core-book3s.c static int ebb_event_check(struct perf_event *event) { return 0; } event 120 arch/powerpc/perf/core-book3s.c static void ebb_event_add(struct perf_event *event) { } event 127 arch/powerpc/perf/core-book3s.c static inline void power_pmu_bhrb_enable(struct perf_event *event) {} event 128 arch/powerpc/perf/core-book3s.c static inline void power_pmu_bhrb_disable(struct perf_event *event) {} event 367 arch/powerpc/perf/core-book3s.c static void power_pmu_bhrb_enable(struct perf_event *event) event 375 arch/powerpc/perf/core-book3s.c if (event->ctx->task && cpuhw->bhrb_context != event->ctx) { event 377 arch/powerpc/perf/core-book3s.c cpuhw->bhrb_context = event->ctx; event 380 arch/powerpc/perf/core-book3s.c perf_sched_cb_inc(event->ctx->pmu); event 383 arch/powerpc/perf/core-book3s.c static void power_pmu_bhrb_disable(struct perf_event *event) event 392 arch/powerpc/perf/core-book3s.c perf_sched_cb_dec(event->ctx->pmu); event 532 arch/powerpc/perf/core-book3s.c static bool is_ebb_event(struct perf_event *event) event 540 arch/powerpc/perf/core-book3s.c ((event->attr.config >> PERF_EVENT_CONFIG_EBB_SHIFT) & 1); event 543 arch/powerpc/perf/core-book3s.c static int ebb_event_check(struct perf_event *event) event 545 arch/powerpc/perf/core-book3s.c struct perf_event *leader = event->group_leader; event 548 arch/powerpc/perf/core-book3s.c if (is_ebb_event(leader) != is_ebb_event(event)) event 551 arch/powerpc/perf/core-book3s.c if (is_ebb_event(event)) { event 552 arch/powerpc/perf/core-book3s.c if (!(event->attach_state & PERF_ATTACH_TASK)) event 558 arch/powerpc/perf/core-book3s.c if (event->attr.freq || event 559 arch/powerpc/perf/core-book3s.c event->attr.inherit || event 560 arch/powerpc/perf/core-book3s.c event->attr.sample_type || event 561 arch/powerpc/perf/core-book3s.c event->attr.sample_period || event 562 arch/powerpc/perf/core-book3s.c event->attr.enable_on_exec) event 569 arch/powerpc/perf/core-book3s.c static void ebb_event_add(struct perf_event *event) event 571 arch/powerpc/perf/core-book3s.c if (!is_ebb_event(event) || current->thread.used_ebb) event 993 arch/powerpc/perf/core-book3s.c struct perf_event *event; event 1013 arch/powerpc/perf/core-book3s.c event = ctrs[i]; event 1015 arch/powerpc/perf/core-book3s.c eu = event->attr.exclude_user; event 1016 arch/powerpc/perf/core-book3s.c ek = event->attr.exclude_kernel; event 1017 arch/powerpc/perf/core-book3s.c eh = event->attr.exclude_hv; event 1019 arch/powerpc/perf/core-book3s.c } else if (event->attr.exclude_user != eu || event 1020 arch/powerpc/perf/core-book3s.c event->attr.exclude_kernel != ek || event 1021 arch/powerpc/perf/core-book3s.c event->attr.exclude_hv != eh) { event 1053 arch/powerpc/perf/core-book3s.c static void power_pmu_read(struct perf_event *event) event 1057 arch/powerpc/perf/core-book3s.c if (event->hw.state & PERF_HES_STOPPED) event 1060 arch/powerpc/perf/core-book3s.c if (!event->hw.idx) event 1063 arch/powerpc/perf/core-book3s.c if (is_ebb_event(event)) { event 1064 arch/powerpc/perf/core-book3s.c val = read_pmc(event->hw.idx); event 1065 arch/powerpc/perf/core-book3s.c local64_set(&event->hw.prev_count, val); event 1075 arch/powerpc/perf/core-book3s.c prev = local64_read(&event->hw.prev_count); event 1077 arch/powerpc/perf/core-book3s.c val = read_pmc(event->hw.idx); event 1081 arch/powerpc/perf/core-book3s.c } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); event 1083 arch/powerpc/perf/core-book3s.c local64_add(delta, &event->count); event 1095 arch/powerpc/perf/core-book3s.c prev = local64_read(&event->hw.period_left); event 1099 arch/powerpc/perf/core-book3s.c } while (local64_cmpxchg(&event->hw.period_left, prev, val) != prev); event 1116 arch/powerpc/perf/core-book3s.c struct perf_event *event; event 1121 arch/powerpc/perf/core-book3s.c event = cpuhw->limited_counter[i]; event 1122 arch/powerpc/perf/core-book3s.c if (!event->hw.idx) event 1124 arch/powerpc/perf/core-book3s.c val = (event->hw.idx == 5) ? pmc5 : pmc6; event 1125 arch/powerpc/perf/core-book3s.c prev = local64_read(&event->hw.prev_count); event 1126 arch/powerpc/perf/core-book3s.c event->hw.idx = 0; event 1129 arch/powerpc/perf/core-book3s.c local64_add(delta, &event->count); event 1136 arch/powerpc/perf/core-book3s.c struct perf_event *event; event 1141 arch/powerpc/perf/core-book3s.c event = cpuhw->limited_counter[i]; event 1142 arch/powerpc/perf/core-book3s.c event->hw.idx = cpuhw->limited_hwidx[i]; event 1143 arch/powerpc/perf/core-book3s.c val = (event->hw.idx == 5) ? pmc5 : pmc6; event 1144 arch/powerpc/perf/core-book3s.c prev = local64_read(&event->hw.prev_count); event 1146 arch/powerpc/perf/core-book3s.c local64_set(&event->hw.prev_count, val); event 1147 arch/powerpc/perf/core-book3s.c perf_event_update_userpage(event); event 1276 arch/powerpc/perf/core-book3s.c struct perf_event *event; event 1307 arch/powerpc/perf/core-book3s.c ebb = is_ebb_event(cpuhw->event[0]); event 1327 arch/powerpc/perf/core-book3s.c cpuhw->mmcr, cpuhw->event)) { event 1339 arch/powerpc/perf/core-book3s.c event = cpuhw->event[0]; event 1340 arch/powerpc/perf/core-book3s.c if (event->attr.exclude_user) event 1342 arch/powerpc/perf/core-book3s.c if (event->attr.exclude_kernel) event 1344 arch/powerpc/perf/core-book3s.c if (event->attr.exclude_hv) event 1366 arch/powerpc/perf/core-book3s.c event = cpuhw->event[i]; event 1367 arch/powerpc/perf/core-book3s.c if (event->hw.idx && event->hw.idx != hwc_index[i] + 1) { event 1368 arch/powerpc/perf/core-book3s.c power_pmu_read(event); event 1369 arch/powerpc/perf/core-book3s.c write_pmc(event->hw.idx, 0); event 1370 arch/powerpc/perf/core-book3s.c event->hw.idx = 0; event 1379 arch/powerpc/perf/core-book3s.c event = cpuhw->event[i]; event 1380 arch/powerpc/perf/core-book3s.c if (event->hw.idx) event 1384 arch/powerpc/perf/core-book3s.c cpuhw->limited_counter[n_lim] = event; event 1391 arch/powerpc/perf/core-book3s.c val = local64_read(&event->hw.prev_count); event 1394 arch/powerpc/perf/core-book3s.c if (event->hw.sample_period) { event 1395 arch/powerpc/perf/core-book3s.c left = local64_read(&event->hw.period_left); event 1399 arch/powerpc/perf/core-book3s.c local64_set(&event->hw.prev_count, val); event 1402 arch/powerpc/perf/core-book3s.c event->hw.idx = idx; event 1403 arch/powerpc/perf/core-book3s.c if (event->hw.state & PERF_HES_STOPPED) event 1407 arch/powerpc/perf/core-book3s.c perf_event_update_userpage(event); event 1441 arch/powerpc/perf/core-book3s.c struct perf_event *event; event 1450 arch/powerpc/perf/core-book3s.c for_each_sibling_event(event, group) { event 1451 arch/powerpc/perf/core-book3s.c if (event->pmu->task_ctx_nr == perf_hw_context && event 1452 arch/powerpc/perf/core-book3s.c event->state != PERF_EVENT_STATE_OFF) { event 1455 arch/powerpc/perf/core-book3s.c ctrs[n] = event; event 1456 arch/powerpc/perf/core-book3s.c flags[n] = event->hw.event_base; event 1457 arch/powerpc/perf/core-book3s.c events[n++] = event->hw.config; event 1469 arch/powerpc/perf/core-book3s.c static int power_pmu_add(struct perf_event *event, int ef_flags) event 1477 arch/powerpc/perf/core-book3s.c perf_pmu_disable(event->pmu); event 1487 arch/powerpc/perf/core-book3s.c cpuhw->event[n0] = event; event 1488 arch/powerpc/perf/core-book3s.c cpuhw->events[n0] = event->hw.config; event 1489 arch/powerpc/perf/core-book3s.c cpuhw->flags[n0] = event->hw.event_base; event 1498 arch/powerpc/perf/core-book3s.c event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE; event 1500 arch/powerpc/perf/core-book3s.c event->hw.state = 0; event 1510 arch/powerpc/perf/core-book3s.c if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1)) event 1514 arch/powerpc/perf/core-book3s.c event->hw.config = cpuhw->events[n0]; event 1517 arch/powerpc/perf/core-book3s.c ebb_event_add(event); event 1524 arch/powerpc/perf/core-book3s.c if (has_branch_stack(event)) { event 1525 arch/powerpc/perf/core-book3s.c power_pmu_bhrb_enable(event); event 1527 arch/powerpc/perf/core-book3s.c event->attr.branch_sample_type); event 1530 arch/powerpc/perf/core-book3s.c perf_pmu_enable(event->pmu); event 1538 arch/powerpc/perf/core-book3s.c static void power_pmu_del(struct perf_event *event, int ef_flags) event 1545 arch/powerpc/perf/core-book3s.c perf_pmu_disable(event->pmu); event 1547 arch/powerpc/perf/core-book3s.c power_pmu_read(event); event 1551 arch/powerpc/perf/core-book3s.c if (event == cpuhw->event[i]) { event 1553 arch/powerpc/perf/core-book3s.c cpuhw->event[i-1] = cpuhw->event[i]; event 1558 arch/powerpc/perf/core-book3s.c ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr); event 1559 arch/powerpc/perf/core-book3s.c if (event->hw.idx) { event 1560 arch/powerpc/perf/core-book3s.c write_pmc(event->hw.idx, 0); event 1561 arch/powerpc/perf/core-book3s.c event->hw.idx = 0; event 1563 arch/powerpc/perf/core-book3s.c perf_event_update_userpage(event); event 1568 arch/powerpc/perf/core-book3s.c if (event == cpuhw->limited_counter[i]) event 1582 arch/powerpc/perf/core-book3s.c if (has_branch_stack(event)) event 1583 arch/powerpc/perf/core-book3s.c power_pmu_bhrb_disable(event); event 1585 arch/powerpc/perf/core-book3s.c perf_pmu_enable(event->pmu); event 1594 arch/powerpc/perf/core-book3s.c static void power_pmu_start(struct perf_event *event, int ef_flags) event 1600 arch/powerpc/perf/core-book3s.c if (!event->hw.idx || !event->hw.sample_period) event 1603 arch/powerpc/perf/core-book3s.c if (!(event->hw.state & PERF_HES_STOPPED)) event 1607 arch/powerpc/perf/core-book3s.c WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); event 1610 arch/powerpc/perf/core-book3s.c perf_pmu_disable(event->pmu); event 1612 arch/powerpc/perf/core-book3s.c event->hw.state = 0; event 1613 arch/powerpc/perf/core-book3s.c left = local64_read(&event->hw.period_left); event 1619 arch/powerpc/perf/core-book3s.c write_pmc(event->hw.idx, val); event 1621 arch/powerpc/perf/core-book3s.c perf_event_update_userpage(event); event 1622 arch/powerpc/perf/core-book3s.c perf_pmu_enable(event->pmu); event 1626 arch/powerpc/perf/core-book3s.c static void power_pmu_stop(struct perf_event *event, int ef_flags) event 1630 arch/powerpc/perf/core-book3s.c if (!event->hw.idx || !event->hw.sample_period) event 1633 arch/powerpc/perf/core-book3s.c if (event->hw.state & PERF_HES_STOPPED) event 1637 arch/powerpc/perf/core-book3s.c perf_pmu_disable(event->pmu); event 1639 arch/powerpc/perf/core-book3s.c power_pmu_read(event); event 1640 arch/powerpc/perf/core-book3s.c event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; event 1641 arch/powerpc/perf/core-book3s.c write_pmc(event->hw.idx, 0); event 1643 arch/powerpc/perf/core-book3s.c perf_event_update_userpage(event); event 1644 arch/powerpc/perf/core-book3s.c perf_pmu_enable(event->pmu); event 1713 arch/powerpc/perf/core-book3s.c if (check_excludes(cpuhw->event, cpuhw->flags, 0, n)) event 1720 arch/powerpc/perf/core-book3s.c cpuhw->event[i]->hw.config = cpuhw->events[i]; event 1734 arch/powerpc/perf/core-book3s.c static int can_go_on_limited_pmc(struct perf_event *event, u64 ev, event 1740 arch/powerpc/perf/core-book3s.c if (event->attr.exclude_user event 1741 arch/powerpc/perf/core-book3s.c || event->attr.exclude_kernel event 1742 arch/powerpc/perf/core-book3s.c || event->attr.exclude_hv event 1743 arch/powerpc/perf/core-book3s.c || event->attr.sample_period) event 1787 arch/powerpc/perf/core-book3s.c static void hw_perf_event_destroy(struct perf_event *event) event 1839 arch/powerpc/perf/core-book3s.c static int power_pmu_event_init(struct perf_event *event) event 1854 arch/powerpc/perf/core-book3s.c if (has_branch_stack(event)) { event 1860 arch/powerpc/perf/core-book3s.c switch (event->attr.type) { event 1862 arch/powerpc/perf/core-book3s.c ev = event->attr.config; event 1871 arch/powerpc/perf/core-book3s.c err = hw_perf_cache_event(event->attr.config, &ev); event 1879 arch/powerpc/perf/core-book3s.c ev = event->attr.config; event 1888 arch/powerpc/perf/core-book3s.c event->hw.config_base = ev; event 1889 arch/powerpc/perf/core-book3s.c event->hw.idx = 0; event 1897 arch/powerpc/perf/core-book3s.c event->attr.exclude_hv = 0; event 1906 arch/powerpc/perf/core-book3s.c if (event->attach_state & PERF_ATTACH_TASK) event 1914 arch/powerpc/perf/core-book3s.c if (can_go_on_limited_pmc(event, ev, flags)) { event 1929 arch/powerpc/perf/core-book3s.c err = ebb_event_check(event); event 1939 arch/powerpc/perf/core-book3s.c if (event->group_leader != event) { event 1940 arch/powerpc/perf/core-book3s.c n = collect_events(event->group_leader, ppmu->n_counter - 1, event 1946 arch/powerpc/perf/core-book3s.c ctrs[n] = event; event 1954 arch/powerpc/perf/core-book3s.c if (has_branch_stack(event)) { event 1956 arch/powerpc/perf/core-book3s.c event->attr.branch_sample_type); event 1969 arch/powerpc/perf/core-book3s.c event->hw.config = events[n]; event 1970 arch/powerpc/perf/core-book3s.c event->hw.event_base = cflags[n]; event 1971 arch/powerpc/perf/core-book3s.c event->hw.last_period = event->hw.sample_period; event 1972 arch/powerpc/perf/core-book3s.c local64_set(&event->hw.period_left, event->hw.last_period); event 1978 arch/powerpc/perf/core-book3s.c if (is_ebb_event(event)) event 1979 arch/powerpc/perf/core-book3s.c local64_set(&event->hw.prev_count, 0); event 1997 arch/powerpc/perf/core-book3s.c event->destroy = hw_perf_event_destroy; event 2002 arch/powerpc/perf/core-book3s.c static int power_pmu_event_idx(struct perf_event *event) event 2004 arch/powerpc/perf/core-book3s.c return event->hw.idx; event 2038 arch/powerpc/perf/core-book3s.c static void record_and_restart(struct perf_event *event, unsigned long val, event 2041 arch/powerpc/perf/core-book3s.c u64 period = event->hw.sample_period; event 2045 arch/powerpc/perf/core-book3s.c if (event->hw.state & PERF_HES_STOPPED) { event 2046 arch/powerpc/perf/core-book3s.c write_pmc(event->hw.idx, 0); event 2051 arch/powerpc/perf/core-book3s.c prev = local64_read(&event->hw.prev_count); event 2053 arch/powerpc/perf/core-book3s.c local64_add(delta, &event->count); event 2060 arch/powerpc/perf/core-book3s.c left = local64_read(&event->hw.period_left) - delta; event 2069 arch/powerpc/perf/core-book3s.c event->hw.last_period = event->hw.sample_period; event 2075 arch/powerpc/perf/core-book3s.c write_pmc(event->hw.idx, val); event 2076 arch/powerpc/perf/core-book3s.c local64_set(&event->hw.prev_count, val); event 2077 arch/powerpc/perf/core-book3s.c local64_set(&event->hw.period_left, left); event 2078 arch/powerpc/perf/core-book3s.c perf_event_update_userpage(event); event 2086 arch/powerpc/perf/core-book3s.c perf_sample_data_init(&data, ~0ULL, event->hw.last_period); event 2088 arch/powerpc/perf/core-book3s.c if (event->attr.sample_type & event 2092 arch/powerpc/perf/core-book3s.c if (event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK) { event 2099 arch/powerpc/perf/core-book3s.c if (event->attr.sample_type & PERF_SAMPLE_DATA_SRC && event 2103 arch/powerpc/perf/core-book3s.c if (event->attr.sample_type & PERF_SAMPLE_WEIGHT && event 2107 arch/powerpc/perf/core-book3s.c if (perf_event_overflow(event, &data, regs)) event 2108 arch/powerpc/perf/core-book3s.c power_pmu_stop(event, 0); event 2176 arch/powerpc/perf/core-book3s.c struct perf_event *event; event 2212 arch/powerpc/perf/core-book3s.c event = cpuhw->event[j]; event 2213 arch/powerpc/perf/core-book3s.c if (event->hw.idx == (i + 1)) { event 2215 arch/powerpc/perf/core-book3s.c record_and_restart(event, val[i], regs); event 2226 arch/powerpc/perf/core-book3s.c event = cpuhw->event[i]; event 2227 arch/powerpc/perf/core-book3s.c if (!event->hw.idx || is_limited_pmc(event->hw.idx)) event 2229 arch/powerpc/perf/core-book3s.c if (pmc_overflow_power7(val[event->hw.idx - 1])) { event 2232 arch/powerpc/perf/core-book3s.c record_and_restart(event, event 2233 arch/powerpc/perf/core-book3s.c val[event->hw.idx - 1], event 23 arch/powerpc/perf/core-fsl-emb.c struct perf_event *event[MAX_HWEVENTS]; event 175 arch/powerpc/perf/core-fsl-emb.c static void fsl_emb_pmu_read(struct perf_event *event) event 179 arch/powerpc/perf/core-fsl-emb.c if (event->hw.state & PERF_HES_STOPPED) event 188 arch/powerpc/perf/core-fsl-emb.c prev = local64_read(&event->hw.prev_count); event 190 arch/powerpc/perf/core-fsl-emb.c val = read_pmc(event->hw.idx); event 191 arch/powerpc/perf/core-fsl-emb.c } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); event 195 arch/powerpc/perf/core-fsl-emb.c local64_add(delta, &event->count); event 196 arch/powerpc/perf/core-fsl-emb.c local64_sub(delta, &event->hw.period_left); event 268 arch/powerpc/perf/core-fsl-emb.c struct perf_event *event; event 276 arch/powerpc/perf/core-fsl-emb.c for_each_sibling_event(event, group) { event 277 arch/powerpc/perf/core-fsl-emb.c if (!is_software_event(event) && event 278 arch/powerpc/perf/core-fsl-emb.c event->state != PERF_EVENT_STATE_OFF) { event 281 arch/powerpc/perf/core-fsl-emb.c ctrs[n] = event; event 289 arch/powerpc/perf/core-fsl-emb.c static int fsl_emb_pmu_add(struct perf_event *event, int flags) event 297 arch/powerpc/perf/core-fsl-emb.c perf_pmu_disable(event->pmu); event 300 arch/powerpc/perf/core-fsl-emb.c if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) event 308 arch/powerpc/perf/core-fsl-emb.c if (cpuhw->event[i]) event 317 arch/powerpc/perf/core-fsl-emb.c event->hw.idx = i; event 318 arch/powerpc/perf/core-fsl-emb.c cpuhw->event[i] = event; event 322 arch/powerpc/perf/core-fsl-emb.c if (event->hw.sample_period) { event 323 arch/powerpc/perf/core-fsl-emb.c s64 left = local64_read(&event->hw.period_left); event 327 arch/powerpc/perf/core-fsl-emb.c local64_set(&event->hw.prev_count, val); event 330 arch/powerpc/perf/core-fsl-emb.c event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE; event 333 arch/powerpc/perf/core-fsl-emb.c event->hw.state &= ~(PERF_HES_STOPPED | PERF_HES_UPTODATE); event 337 arch/powerpc/perf/core-fsl-emb.c perf_event_update_userpage(event); event 339 arch/powerpc/perf/core-fsl-emb.c write_pmlcb(i, event->hw.config >> 32); event 340 arch/powerpc/perf/core-fsl-emb.c write_pmlca(i, event->hw.config_base); event 345 arch/powerpc/perf/core-fsl-emb.c perf_pmu_enable(event->pmu); event 350 arch/powerpc/perf/core-fsl-emb.c static void fsl_emb_pmu_del(struct perf_event *event, int flags) event 353 arch/powerpc/perf/core-fsl-emb.c int i = event->hw.idx; event 355 arch/powerpc/perf/core-fsl-emb.c perf_pmu_disable(event->pmu); event 359 arch/powerpc/perf/core-fsl-emb.c fsl_emb_pmu_read(event); event 363 arch/powerpc/perf/core-fsl-emb.c WARN_ON(event != cpuhw->event[event->hw.idx]); event 369 arch/powerpc/perf/core-fsl-emb.c cpuhw->event[i] = NULL; event 370 arch/powerpc/perf/core-fsl-emb.c event->hw.idx = -1; event 383 arch/powerpc/perf/core-fsl-emb.c perf_pmu_enable(event->pmu); event 387 arch/powerpc/perf/core-fsl-emb.c static void fsl_emb_pmu_start(struct perf_event *event, int ef_flags) event 393 arch/powerpc/perf/core-fsl-emb.c if (event->hw.idx < 0 || !event->hw.sample_period) event 396 arch/powerpc/perf/core-fsl-emb.c if (!(event->hw.state & PERF_HES_STOPPED)) event 400 arch/powerpc/perf/core-fsl-emb.c WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); event 403 arch/powerpc/perf/core-fsl-emb.c perf_pmu_disable(event->pmu); event 405 arch/powerpc/perf/core-fsl-emb.c event->hw.state = 0; event 406 arch/powerpc/perf/core-fsl-emb.c left = local64_read(&event->hw.period_left); event 410 arch/powerpc/perf/core-fsl-emb.c write_pmc(event->hw.idx, val); event 412 arch/powerpc/perf/core-fsl-emb.c perf_event_update_userpage(event); event 413 arch/powerpc/perf/core-fsl-emb.c perf_pmu_enable(event->pmu); event 417 arch/powerpc/perf/core-fsl-emb.c static void fsl_emb_pmu_stop(struct perf_event *event, int ef_flags) event 421 arch/powerpc/perf/core-fsl-emb.c if (event->hw.idx < 0 || !event->hw.sample_period) event 424 arch/powerpc/perf/core-fsl-emb.c if (event->hw.state & PERF_HES_STOPPED) event 428 arch/powerpc/perf/core-fsl-emb.c perf_pmu_disable(event->pmu); event 430 arch/powerpc/perf/core-fsl-emb.c fsl_emb_pmu_read(event); event 431 arch/powerpc/perf/core-fsl-emb.c event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; event 432 arch/powerpc/perf/core-fsl-emb.c write_pmc(event->hw.idx, 0); event 434 arch/powerpc/perf/core-fsl-emb.c perf_event_update_userpage(event); event 435 arch/powerpc/perf/core-fsl-emb.c perf_pmu_enable(event->pmu); event 442 arch/powerpc/perf/core-fsl-emb.c static void hw_perf_event_destroy(struct perf_event *event) event 482 arch/powerpc/perf/core-fsl-emb.c static int fsl_emb_pmu_event_init(struct perf_event *event) event 497 arch/powerpc/perf/core-fsl-emb.c switch (event->attr.type) { event 499 arch/powerpc/perf/core-fsl-emb.c ev = event->attr.config; event 506 arch/powerpc/perf/core-fsl-emb.c err = hw_perf_cache_event(event->attr.config, &ev); event 512 arch/powerpc/perf/core-fsl-emb.c ev = event->attr.config; event 519 arch/powerpc/perf/core-fsl-emb.c event->hw.config = ppmu->xlate_event(ev); event 520 arch/powerpc/perf/core-fsl-emb.c if (!(event->hw.config & FSL_EMB_EVENT_VALID)) event 529 arch/powerpc/perf/core-fsl-emb.c if (event->group_leader != event) { event 530 arch/powerpc/perf/core-fsl-emb.c n = collect_events(event->group_leader, event 536 arch/powerpc/perf/core-fsl-emb.c if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) { event 547 arch/powerpc/perf/core-fsl-emb.c event->hw.idx = -1; event 549 arch/powerpc/perf/core-fsl-emb.c event->hw.config_base = PMLCA_CE | PMLCA_FCM1 | event 552 arch/powerpc/perf/core-fsl-emb.c if (event->attr.exclude_user) event 553 arch/powerpc/perf/core-fsl-emb.c event->hw.config_base |= PMLCA_FCU; event 554 arch/powerpc/perf/core-fsl-emb.c if (event->attr.exclude_kernel) event 555 arch/powerpc/perf/core-fsl-emb.c event->hw.config_base |= PMLCA_FCS; event 556 arch/powerpc/perf/core-fsl-emb.c if (event->attr.exclude_idle) event 559 arch/powerpc/perf/core-fsl-emb.c event->hw.last_period = event->hw.sample_period; event 560 arch/powerpc/perf/core-fsl-emb.c local64_set(&event->hw.period_left, event->hw.last_period); event 581 arch/powerpc/perf/core-fsl-emb.c event->destroy = hw_perf_event_destroy; event 602 arch/powerpc/perf/core-fsl-emb.c static void record_and_restart(struct perf_event *event, unsigned long val, event 605 arch/powerpc/perf/core-fsl-emb.c u64 period = event->hw.sample_period; event 609 arch/powerpc/perf/core-fsl-emb.c if (event->hw.state & PERF_HES_STOPPED) { event 610 arch/powerpc/perf/core-fsl-emb.c write_pmc(event->hw.idx, 0); event 615 arch/powerpc/perf/core-fsl-emb.c prev = local64_read(&event->hw.prev_count); event 617 arch/powerpc/perf/core-fsl-emb.c local64_add(delta, &event->count); event 624 arch/powerpc/perf/core-fsl-emb.c left = local64_read(&event->hw.period_left) - delta; event 631 arch/powerpc/perf/core-fsl-emb.c event->hw.last_period = event->hw.sample_period; event 637 arch/powerpc/perf/core-fsl-emb.c write_pmc(event->hw.idx, val); event 638 arch/powerpc/perf/core-fsl-emb.c local64_set(&event->hw.prev_count, val); event 639 arch/powerpc/perf/core-fsl-emb.c local64_set(&event->hw.period_left, left); event 640 arch/powerpc/perf/core-fsl-emb.c perf_event_update_userpage(event); event 648 arch/powerpc/perf/core-fsl-emb.c perf_sample_data_init(&data, 0, event->hw.last_period); event 650 arch/powerpc/perf/core-fsl-emb.c if (perf_event_overflow(event, &data, regs)) event 651 arch/powerpc/perf/core-fsl-emb.c fsl_emb_pmu_stop(event, 0); event 659 arch/powerpc/perf/core-fsl-emb.c struct perf_event *event; event 671 arch/powerpc/perf/core-fsl-emb.c event = cpuhw->event[i]; event 675 arch/powerpc/perf/core-fsl-emb.c if (event) { event 678 arch/powerpc/perf/core-fsl-emb.c record_and_restart(event, val, regs); event 64 arch/powerpc/perf/generic-compat-pmu.c PMU_FORMAT_ATTR(event, "config:0-19"); event 339 arch/powerpc/perf/hv-24x7.c static char *event_fmt(struct hv_24x7_event_data *event, unsigned domain) event 367 arch/powerpc/perf/hv-24x7.c be16_to_cpu(event->event_counter_offs) + event 368 arch/powerpc/perf/hv-24x7.c be16_to_cpu(event->event_group_record_offs), event 447 arch/powerpc/perf/hv-24x7.c struct hv_24x7_event_data *event, event 461 arch/powerpc/perf/hv-24x7.c val = event_fmt(event, domain); event 465 arch/powerpc/perf/hv-24x7.c ev_name = event_name(event, &event_name_len); event 488 arch/powerpc/perf/hv-24x7.c static struct attribute *event_to_desc_attr(struct hv_24x7_event_data *event, event 492 arch/powerpc/perf/hv-24x7.c char *name = event_name(event, &nl); event 493 arch/powerpc/perf/hv-24x7.c char *desc = event_desc(event, &dl); event 503 arch/powerpc/perf/hv-24x7.c event_to_long_desc_attr(struct hv_24x7_event_data *event, int nonce) event 506 arch/powerpc/perf/hv-24x7.c char *name = event_name(event, &nl); event 507 arch/powerpc/perf/hv-24x7.c char *desc = event_long_desc(event, &dl); event 517 arch/powerpc/perf/hv-24x7.c struct hv_24x7_event_data *event, int nonce) event 519 arch/powerpc/perf/hv-24x7.c *attrs = event_to_attr(ix, event, event->domain, nonce); event 626 arch/powerpc/perf/hv-24x7.c static ssize_t catalog_event_len_validate(struct hv_24x7_event_data *event, event 644 arch/powerpc/perf/hv-24x7.c if (!event_fixed_portion_is_within(event, end)) { event 650 arch/powerpc/perf/hv-24x7.c ev_len = be16_to_cpu(event->length); event 654 arch/powerpc/perf/hv-24x7.c event_idx, ev_len, event); event 656 arch/powerpc/perf/hv-24x7.c ev_end = (__u8 *)event + ev_len; event 664 arch/powerpc/perf/hv-24x7.c calc_ev_end = event_end(event, end); event 667 arch/powerpc/perf/hv-24x7.c event_idx, event_data_bytes, event, end, event 674 arch/powerpc/perf/hv-24x7.c event_idx, event, ev_end, offset, calc_ev_end); event 699 arch/powerpc/perf/hv-24x7.c struct hv_24x7_event_data *event; event 795 arch/powerpc/perf/hv-24x7.c for (junk_events = 0, event = event_data, event_idx = 0, attr_max = 0; event 797 arch/powerpc/perf/hv-24x7.c event_idx++, event = (void *)event + ev_len) { event 798 arch/powerpc/perf/hv-24x7.c size_t offset = (void *)event - (void *)event_data; event 802 arch/powerpc/perf/hv-24x7.c ev_len = catalog_event_len_validate(event, event_idx, event 809 arch/powerpc/perf/hv-24x7.c name = event_name(event, &nl); event 811 arch/powerpc/perf/hv-24x7.c if (event->event_group_record_len == 0) { event 818 arch/powerpc/perf/hv-24x7.c if (!catalog_entry_domain_is_valid(event->domain)) { event 820 arch/powerpc/perf/hv-24x7.c event_idx, nl, name, event->domain); event 855 arch/powerpc/perf/hv-24x7.c event = event_data, event_idx = 0; event 857 arch/powerpc/perf/hv-24x7.c event_idx++, ev_len = be16_to_cpu(event->length), event 858 arch/powerpc/perf/hv-24x7.c event = (void *)event + ev_len) { event 866 arch/powerpc/perf/hv-24x7.c if (event->event_group_record_len == 0) event 868 arch/powerpc/perf/hv-24x7.c if (!catalog_entry_domain_is_valid(event->domain)) event 871 arch/powerpc/perf/hv-24x7.c name = event_name(event, &nl); event 872 arch/powerpc/perf/hv-24x7.c nonce = event_uniq_add(&ev_uniq, name, nl, event->domain); event 874 arch/powerpc/perf/hv-24x7.c event, nonce); event 881 arch/powerpc/perf/hv-24x7.c event_descs[desc_ct] = event_to_desc_attr(event, nonce); event 885 arch/powerpc/perf/hv-24x7.c event_to_long_desc_attr(event, nonce); event 1117 arch/powerpc/perf/hv-24x7.c static int add_event_to_24x7_request(struct perf_event *event, event 1132 arch/powerpc/perf/hv-24x7.c switch (event_get_domain(event)) { event 1134 arch/powerpc/perf/hv-24x7.c idx = event_get_chip(event); event 1137 arch/powerpc/perf/hv-24x7.c idx = event_get_core(event); event 1140 arch/powerpc/perf/hv-24x7.c idx = event_get_vcpu(event); event 1148 arch/powerpc/perf/hv-24x7.c req->performance_domain = event_get_domain(event); event 1150 arch/powerpc/perf/hv-24x7.c req->data_offset = cpu_to_be32(event_get_offset(event)); event 1151 arch/powerpc/perf/hv-24x7.c req->starting_lpar_ix = cpu_to_be16(event_get_lpar(event)); event 1180 arch/powerpc/perf/hv-24x7.c static int get_count_from_result(struct perf_event *event, event 1211 arch/powerpc/perf/hv-24x7.c !domain_needs_aggregation(event_get_domain(event))) { event 1247 arch/powerpc/perf/hv-24x7.c static int single_24x7_request(struct perf_event *event, u64 *count) event 1261 arch/powerpc/perf/hv-24x7.c ret = add_event_to_24x7_request(event, request_buffer); event 1270 arch/powerpc/perf/hv-24x7.c ret = get_count_from_result(event, result_buffer, event 1280 arch/powerpc/perf/hv-24x7.c static int h_24x7_event_init(struct perf_event *event) event 1288 arch/powerpc/perf/hv-24x7.c if (event->attr.type != event->pmu->type) event 1292 arch/powerpc/perf/hv-24x7.c if (event_get_reserved1(event) || event 1293 arch/powerpc/perf/hv-24x7.c event_get_reserved2(event) || event 1294 arch/powerpc/perf/hv-24x7.c event_get_reserved3(event)) { event 1296 arch/powerpc/perf/hv-24x7.c event->attr.config, event 1297 arch/powerpc/perf/hv-24x7.c event_get_reserved1(event), event 1298 arch/powerpc/perf/hv-24x7.c event->attr.config1, event 1299 arch/powerpc/perf/hv-24x7.c event_get_reserved2(event), event 1300 arch/powerpc/perf/hv-24x7.c event->attr.config2, event 1301 arch/powerpc/perf/hv-24x7.c event_get_reserved3(event)); event 1306 arch/powerpc/perf/hv-24x7.c if (has_branch_stack(event)) event 1310 arch/powerpc/perf/hv-24x7.c if (event_get_offset(event) % 8) { event 1315 arch/powerpc/perf/hv-24x7.c domain = event_get_domain(event); event 1329 arch/powerpc/perf/hv-24x7.c (event_get_lpar(event) != event_get_lpar_max()))) { event 1332 arch/powerpc/perf/hv-24x7.c event_get_lpar(event)); event 1337 arch/powerpc/perf/hv-24x7.c if (single_24x7_request(event, &ct)) { event 1341 arch/powerpc/perf/hv-24x7.c (void)local64_xchg(&event->hw.prev_count, ct); event 1346 arch/powerpc/perf/hv-24x7.c static u64 h_24x7_get_value(struct perf_event *event) event 1350 arch/powerpc/perf/hv-24x7.c if (single_24x7_request(event, &ct)) event 1357 arch/powerpc/perf/hv-24x7.c static void update_event_count(struct perf_event *event, u64 now) event 1361 arch/powerpc/perf/hv-24x7.c prev = local64_xchg(&event->hw.prev_count, now); event 1362 arch/powerpc/perf/hv-24x7.c local64_add(now - prev, &event->count); event 1365 arch/powerpc/perf/hv-24x7.c static void h_24x7_event_read(struct perf_event *event) event 1390 arch/powerpc/perf/hv-24x7.c ret = add_event_to_24x7_request(event, request_buffer); event 1401 arch/powerpc/perf/hv-24x7.c h24x7hw->events[i] = event; event 1412 arch/powerpc/perf/hv-24x7.c local64_set(&event->count, 0); event 1417 arch/powerpc/perf/hv-24x7.c now = h_24x7_get_value(event); event 1418 arch/powerpc/perf/hv-24x7.c update_event_count(event, now); event 1422 arch/powerpc/perf/hv-24x7.c static void h_24x7_event_start(struct perf_event *event, int flags) event 1425 arch/powerpc/perf/hv-24x7.c local64_set(&event->hw.prev_count, h_24x7_get_value(event)); event 1428 arch/powerpc/perf/hv-24x7.c static void h_24x7_event_stop(struct perf_event *event, int flags) event 1430 arch/powerpc/perf/hv-24x7.c h_24x7_event_read(event); event 1433 arch/powerpc/perf/hv-24x7.c static int h_24x7_event_add(struct perf_event *event, int flags) event 1436 arch/powerpc/perf/hv-24x7.c h_24x7_event_start(event, flags); event 1521 arch/powerpc/perf/hv-24x7.c struct perf_event *event = h24x7hw->events[res->result_ix]; event 1523 arch/powerpc/perf/hv-24x7.c ret = get_count_from_result(event, result_buffer, res, &count, event 1528 arch/powerpc/perf/hv-24x7.c update_event_count(event, count); event 41 arch/powerpc/perf/hv-common.h static u64 event_get_##name(struct perf_event *event) \ event 43 arch/powerpc/perf/hv-common.h return (event->attr.attr_var >> (bit_start)) & \ event 175 arch/powerpc/perf/hv-gpci.c static u64 h_gpci_get_value(struct perf_event *event) event 178 arch/powerpc/perf/hv-gpci.c unsigned long ret = single_gpci_request(event_get_request(event), event 179 arch/powerpc/perf/hv-gpci.c event_get_starting_index(event), event 180 arch/powerpc/perf/hv-gpci.c event_get_secondary_index(event), event 181 arch/powerpc/perf/hv-gpci.c event_get_counter_info_version(event), event 182 arch/powerpc/perf/hv-gpci.c event_get_offset(event), event 183 arch/powerpc/perf/hv-gpci.c event_get_length(event), event 190 arch/powerpc/perf/hv-gpci.c static void h_gpci_event_update(struct perf_event *event) event 193 arch/powerpc/perf/hv-gpci.c u64 now = h_gpci_get_value(event); event 194 arch/powerpc/perf/hv-gpci.c prev = local64_xchg(&event->hw.prev_count, now); event 195 arch/powerpc/perf/hv-gpci.c local64_add(now - prev, &event->count); event 198 arch/powerpc/perf/hv-gpci.c static void h_gpci_event_start(struct perf_event *event, int flags) event 200 arch/powerpc/perf/hv-gpci.c local64_set(&event->hw.prev_count, h_gpci_get_value(event)); event 203 arch/powerpc/perf/hv-gpci.c static void h_gpci_event_stop(struct perf_event *event, int flags) event 205 arch/powerpc/perf/hv-gpci.c h_gpci_event_update(event); event 208 arch/powerpc/perf/hv-gpci.c static int h_gpci_event_add(struct perf_event *event, int flags) event 211 arch/powerpc/perf/hv-gpci.c h_gpci_event_start(event, flags); event 216 arch/powerpc/perf/hv-gpci.c static int h_gpci_event_init(struct perf_event *event) event 222 arch/powerpc/perf/hv-gpci.c if (event->attr.type != event->pmu->type) event 226 arch/powerpc/perf/hv-gpci.c if (event->attr.config2) { event 232 arch/powerpc/perf/hv-gpci.c if (has_branch_stack(event)) event 235 arch/powerpc/perf/hv-gpci.c length = event_get_length(event); event 242 arch/powerpc/perf/hv-gpci.c if ((event_get_offset(event) + length) > HGPCI_MAX_DATA_BYTES) { event 244 arch/powerpc/perf/hv-gpci.c (size_t)event_get_offset(event) + length, event 250 arch/powerpc/perf/hv-gpci.c if (single_gpci_request(event_get_request(event), event 251 arch/powerpc/perf/hv-gpci.c event_get_starting_index(event), event 252 arch/powerpc/perf/hv-gpci.c event_get_secondary_index(event), event 253 arch/powerpc/perf/hv-gpci.c event_get_counter_info_version(event), event 254 arch/powerpc/perf/hv-gpci.c event_get_offset(event), event 47 arch/powerpc/perf/imc-pmu.c static struct imc_pmu *imc_event_to_pmu(struct perf_event *event) event 49 arch/powerpc/perf/imc-pmu.c return container_of(event->pmu, struct imc_pmu, pmu); event 52 arch/powerpc/perf/imc-pmu.c PMU_FORMAT_ATTR(event, "config:0-61"); event 142 arch/powerpc/perf/imc-pmu.c u32 base, struct imc_events *event) event 150 arch/powerpc/perf/imc-pmu.c event->value = base + reg; event 155 arch/powerpc/perf/imc-pmu.c event->name = kasprintf(GFP_KERNEL, "%s%s", prefix, s); event 156 arch/powerpc/perf/imc-pmu.c if (!event->name) event 163 arch/powerpc/perf/imc-pmu.c event->scale = kstrdup(s, GFP_KERNEL); event 164 arch/powerpc/perf/imc-pmu.c if (!event->scale) event 172 arch/powerpc/perf/imc-pmu.c event->unit = kstrdup(s, GFP_KERNEL); event 173 arch/powerpc/perf/imc-pmu.c if (!event->unit) event 179 arch/powerpc/perf/imc-pmu.c kfree(event->unit); event 180 arch/powerpc/perf/imc-pmu.c kfree(event->scale); event 181 arch/powerpc/perf/imc-pmu.c kfree(event->name); event 435 arch/powerpc/perf/imc-pmu.c static void nest_imc_counters_release(struct perf_event *event) event 440 arch/powerpc/perf/imc-pmu.c if (event->cpu < 0) event 443 arch/powerpc/perf/imc-pmu.c node_id = cpu_to_node(event->cpu); event 451 arch/powerpc/perf/imc-pmu.c ref = get_nest_pmu_ref(event->cpu); event 474 arch/powerpc/perf/imc-pmu.c get_hard_smp_processor_id(event->cpu)); event 487 arch/powerpc/perf/imc-pmu.c static int nest_imc_event_init(struct perf_event *event) event 490 arch/powerpc/perf/imc-pmu.c u32 l_config, config = event->attr.config; event 496 arch/powerpc/perf/imc-pmu.c if (event->attr.type != event->pmu->type) event 500 arch/powerpc/perf/imc-pmu.c if (event->hw.sample_period) event 503 arch/powerpc/perf/imc-pmu.c if (event->cpu < 0) event 506 arch/powerpc/perf/imc-pmu.c pmu = imc_event_to_pmu(event); event 516 arch/powerpc/perf/imc-pmu.c chip_id = cpu_to_chip_id(event->cpu); event 538 arch/powerpc/perf/imc-pmu.c event->hw.event_base = (u64)pcni->vbase + l_config; event 539 arch/powerpc/perf/imc-pmu.c node_id = cpu_to_node(event->cpu); event 546 arch/powerpc/perf/imc-pmu.c ref = get_nest_pmu_ref(event->cpu); event 553 arch/powerpc/perf/imc-pmu.c get_hard_smp_processor_id(event->cpu)); event 564 arch/powerpc/perf/imc-pmu.c event->destroy = nest_imc_counters_release; event 713 arch/powerpc/perf/imc-pmu.c static void core_imc_counters_release(struct perf_event *event) event 718 arch/powerpc/perf/imc-pmu.c if (event->cpu < 0) event 726 arch/powerpc/perf/imc-pmu.c core_id = event->cpu / threads_per_core; event 751 arch/powerpc/perf/imc-pmu.c get_hard_smp_processor_id(event->cpu)); event 764 arch/powerpc/perf/imc-pmu.c static int core_imc_event_init(struct perf_event *event) event 767 arch/powerpc/perf/imc-pmu.c u64 config = event->attr.config; event 772 arch/powerpc/perf/imc-pmu.c if (event->attr.type != event->pmu->type) event 776 arch/powerpc/perf/imc-pmu.c if (event->hw.sample_period) event 779 arch/powerpc/perf/imc-pmu.c if (event->cpu < 0) event 782 arch/powerpc/perf/imc-pmu.c event->hw.idx = -1; event 783 arch/powerpc/perf/imc-pmu.c pmu = imc_event_to_pmu(event); event 789 arch/powerpc/perf/imc-pmu.c if (!is_core_imc_mem_inited(event->cpu)) event 792 arch/powerpc/perf/imc-pmu.c core_id = event->cpu / threads_per_core; event 811 arch/powerpc/perf/imc-pmu.c get_hard_smp_processor_id(event->cpu)); event 822 arch/powerpc/perf/imc-pmu.c event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK); event 823 arch/powerpc/perf/imc-pmu.c event->destroy = core_imc_counters_release; event 892 arch/powerpc/perf/imc-pmu.c static int thread_imc_event_init(struct perf_event *event) event 894 arch/powerpc/perf/imc-pmu.c u32 config = event->attr.config; event 898 arch/powerpc/perf/imc-pmu.c if (event->attr.type != event->pmu->type) event 905 arch/powerpc/perf/imc-pmu.c if (event->hw.sample_period) event 908 arch/powerpc/perf/imc-pmu.c event->hw.idx = -1; event 909 arch/powerpc/perf/imc-pmu.c pmu = imc_event_to_pmu(event); event 915 arch/powerpc/perf/imc-pmu.c target = event->hw.target; event 919 arch/powerpc/perf/imc-pmu.c event->pmu->task_ctx_nr = perf_sw_context; event 923 arch/powerpc/perf/imc-pmu.c static bool is_thread_imc_pmu(struct perf_event *event) event 925 arch/powerpc/perf/imc-pmu.c if (!strncmp(event->pmu->name, "thread_imc", strlen("thread_imc"))) event 931 arch/powerpc/perf/imc-pmu.c static u64 * get_event_base_addr(struct perf_event *event) event 935 arch/powerpc/perf/imc-pmu.c if (is_thread_imc_pmu(event)) { event 937 arch/powerpc/perf/imc-pmu.c return (u64 *)(addr + (event->attr.config & IMC_EVENT_OFFSET_MASK)); event 940 arch/powerpc/perf/imc-pmu.c return (u64 *)event->hw.event_base; event 962 arch/powerpc/perf/imc-pmu.c static u64 imc_read_counter(struct perf_event *event) event 972 arch/powerpc/perf/imc-pmu.c addr = get_event_base_addr(event); event 974 arch/powerpc/perf/imc-pmu.c local64_set(&event->hw.prev_count, data); event 979 arch/powerpc/perf/imc-pmu.c static void imc_event_update(struct perf_event *event) event 983 arch/powerpc/perf/imc-pmu.c counter_prev = local64_read(&event->hw.prev_count); event 984 arch/powerpc/perf/imc-pmu.c counter_new = imc_read_counter(event); event 988 arch/powerpc/perf/imc-pmu.c local64_add(final_count, &event->count); event 991 arch/powerpc/perf/imc-pmu.c static void imc_event_start(struct perf_event *event, int flags) event 999 arch/powerpc/perf/imc-pmu.c imc_read_counter(event); event 1002 arch/powerpc/perf/imc-pmu.c static void imc_event_stop(struct perf_event *event, int flags) event 1008 arch/powerpc/perf/imc-pmu.c imc_event_update(event); event 1011 arch/powerpc/perf/imc-pmu.c static int imc_event_add(struct perf_event *event, int flags) event 1014 arch/powerpc/perf/imc-pmu.c imc_event_start(event, flags); event 1019 arch/powerpc/perf/imc-pmu.c static int thread_imc_event_add(struct perf_event *event, int flags) event 1026 arch/powerpc/perf/imc-pmu.c imc_event_start(event, flags); event 1060 arch/powerpc/perf/imc-pmu.c static void thread_imc_event_del(struct perf_event *event, int flags) event 1089 arch/powerpc/perf/imc-pmu.c imc_event_update(event); event 1161 arch/powerpc/perf/imc-pmu.c struct perf_event *event) event 1175 arch/powerpc/perf/imc-pmu.c data->period = event->hw.last_period; event 1178 arch/powerpc/perf/imc-pmu.c header->size = sizeof(*header) + event->header_size; event 1186 arch/powerpc/perf/imc-pmu.c perf_event_header__init_id(header, data, event); event 1191 arch/powerpc/perf/imc-pmu.c static void dump_trace_imc_data(struct perf_event *event) event 1203 arch/powerpc/perf/imc-pmu.c ret = trace_imc_prepare_sample(mem, &data, &prev_tb, &header, event); event 1210 arch/powerpc/perf/imc-pmu.c if (perf_output_begin(&handle, event, header.size)) event 1213 arch/powerpc/perf/imc-pmu.c perf_output_sample(&handle, &header, &data, event); event 1219 arch/powerpc/perf/imc-pmu.c static int trace_imc_event_add(struct perf_event *event, int flags) event 1255 arch/powerpc/perf/imc-pmu.c static void trace_imc_event_read(struct perf_event *event) event 1260 arch/powerpc/perf/imc-pmu.c static void trace_imc_event_stop(struct perf_event *event, int flags) event 1263 arch/powerpc/perf/imc-pmu.c dump_trace_imc_data(event); event 1267 arch/powerpc/perf/imc-pmu.c static void trace_imc_event_start(struct perf_event *event, int flags) event 1272 arch/powerpc/perf/imc-pmu.c static void trace_imc_event_del(struct perf_event *event, int flags) event 1300 arch/powerpc/perf/imc-pmu.c trace_imc_event_stop(event, flags); event 1303 arch/powerpc/perf/imc-pmu.c static int trace_imc_event_init(struct perf_event *event) event 1307 arch/powerpc/perf/imc-pmu.c if (event->attr.type != event->pmu->type) event 1314 arch/powerpc/perf/imc-pmu.c if (event->attr.sample_period == 0) event 1317 arch/powerpc/perf/imc-pmu.c event->hw.idx = -1; event 1318 arch/powerpc/perf/imc-pmu.c target = event->hw.target; event 1320 arch/powerpc/perf/imc-pmu.c event->pmu->task_ctx_nr = perf_hw_context; event 11 arch/powerpc/perf/isa207-common.c PMU_FORMAT_ATTR(event, "config:0-49"); event 45 arch/powerpc/perf/isa207-common.c static inline bool event_is_fab_match(u64 event) event 48 arch/powerpc/perf/isa207-common.c event &= 0xff0fe; event 51 arch/powerpc/perf/isa207-common.c return (event == 0x30056 || event == 0x4f052); event 54 arch/powerpc/perf/isa207-common.c static bool is_event_valid(u64 event) event 61 arch/powerpc/perf/isa207-common.c return !(event & ~valid_mask); event 64 arch/powerpc/perf/isa207-common.c static inline bool is_event_marked(u64 event) event 66 arch/powerpc/perf/isa207-common.c if (event & EVENT_IS_MARKED) event 72 arch/powerpc/perf/isa207-common.c static void mmcra_sdar_mode(u64 event, unsigned long *mmcra) event 91 arch/powerpc/perf/isa207-common.c if (is_event_marked(event) || (*mmcra & MMCRA_SAMPLE_ENABLE)) event 93 arch/powerpc/perf/isa207-common.c else if (p9_SDAR_MODE(event)) event 94 arch/powerpc/perf/isa207-common.c *mmcra |= p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT; event 109 arch/powerpc/perf/isa207-common.c static unsigned long combine_from_event(u64 event) event 112 arch/powerpc/perf/isa207-common.c return p9_EVENT_COMBINE(event); event 114 arch/powerpc/perf/isa207-common.c return EVENT_COMBINE(event); event 125 arch/powerpc/perf/isa207-common.c static inline bool event_is_threshold(u64 event) event 127 arch/powerpc/perf/isa207-common.c return (event >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK; event 130 arch/powerpc/perf/isa207-common.c static bool is_thresh_cmp_valid(u64 event) event 138 arch/powerpc/perf/isa207-common.c cmp = (event >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK; event 147 arch/powerpc/perf/isa207-common.c static unsigned int dc_ic_rld_quad_l1_sel(u64 event) event 151 arch/powerpc/perf/isa207-common.c cache = (event >> EVENT_CACHE_SEL_SHIFT) & MMCR1_DC_IC_QUAL_MASK; event 242 arch/powerpc/perf/isa207-common.c int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) event 249 arch/powerpc/perf/isa207-common.c if (!is_event_valid(event)) event 252 arch/powerpc/perf/isa207-common.c pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK; event 253 arch/powerpc/perf/isa207-common.c unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK; event 254 arch/powerpc/perf/isa207-common.c cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK; event 255 arch/powerpc/perf/isa207-common.c ebb = (event >> EVENT_EBB_SHIFT) & EVENT_EBB_MASK; event 264 arch/powerpc/perf/isa207-common.c base_event = event & ~EVENT_LINUX_MASK; event 288 arch/powerpc/perf/isa207-common.c value |= CNST_CACHE_GROUP_VAL(event & 0xff); event 306 arch/powerpc/perf/isa207-common.c } else if (cpu_has_feature(CPU_FTR_ARCH_300) || (event & EVENT_IS_L1)) { event 311 arch/powerpc/perf/isa207-common.c if (is_event_marked(event)) { event 313 arch/powerpc/perf/isa207-common.c value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT); event 317 arch/powerpc/perf/isa207-common.c if (event_is_threshold(event) && is_thresh_cmp_valid(event)) { event 319 arch/powerpc/perf/isa207-common.c value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT); event 326 arch/powerpc/perf/isa207-common.c if (event_is_fab_match(event)) { event 328 arch/powerpc/perf/isa207-common.c value |= CNST_FAB_MATCH_VAL(event >> EVENT_THR_CTL_SHIFT); event 330 arch/powerpc/perf/isa207-common.c if (!is_thresh_cmp_valid(event)) event 334 arch/powerpc/perf/isa207-common.c value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT); event 342 arch/powerpc/perf/isa207-common.c if (event & EVENT_WANTS_BHRB) { event 348 arch/powerpc/perf/isa207-common.c value |= CNST_IFM_VAL(event >> EVENT_IFM_SHIFT); event 365 arch/powerpc/perf/isa207-common.c int isa207_compute_mmcr(u64 event[], int n_ev, event 377 arch/powerpc/perf/isa207-common.c pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK; event 386 arch/powerpc/perf/isa207-common.c pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK; event 387 arch/powerpc/perf/isa207-common.c unit = (event[i] >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK; event 388 arch/powerpc/perf/isa207-common.c combine = combine_from_event(event[i]); event 389 arch/powerpc/perf/isa207-common.c psel = event[i] & EVENT_PSEL_MASK; event 407 arch/powerpc/perf/isa207-common.c mmcra_sdar_mode(event[i], &mmcra); event 410 arch/powerpc/perf/isa207-common.c cache = dc_ic_rld_quad_l1_sel(event[i]); event 413 arch/powerpc/perf/isa207-common.c if (event[i] & EVENT_IS_L1) { event 414 arch/powerpc/perf/isa207-common.c cache = dc_ic_rld_quad_l1_sel(event[i]); event 419 arch/powerpc/perf/isa207-common.c if (is_event_marked(event[i])) { event 422 arch/powerpc/perf/isa207-common.c val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK; event 433 arch/powerpc/perf/isa207-common.c if (!cpu_has_feature(CPU_FTR_ARCH_300) && event_is_fab_match(event[i])) { event 434 arch/powerpc/perf/isa207-common.c mmcr1 |= ((event[i] >> EVENT_THR_CTL_SHIFT) & event 437 arch/powerpc/perf/isa207-common.c val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK; event 439 arch/powerpc/perf/isa207-common.c val = (event[i] >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK; event 441 arch/powerpc/perf/isa207-common.c val = (event[i] >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK; event 445 arch/powerpc/perf/isa207-common.c if (event[i] & EVENT_WANTS_BHRB) { event 446 arch/powerpc/perf/isa207-common.c val = (event[i] >> EVENT_IFM_SHIFT) & EVENT_IFM_MASK; event 493 arch/powerpc/perf/isa207-common.c static int find_alternative(u64 event, const unsigned int ev_alt[][MAX_ALT], int size) event 498 arch/powerpc/perf/isa207-common.c if (event < ev_alt[i][0]) event 502 arch/powerpc/perf/isa207-common.c if (event == ev_alt[i][j]) event 509 arch/powerpc/perf/isa207-common.c int isa207_get_alternatives(u64 event, u64 alt[], int size, unsigned int flags, event 515 arch/powerpc/perf/isa207-common.c alt[num_alt++] = event; event 516 arch/powerpc/perf/isa207-common.c i = find_alternative(event, ev_alt, size); event 521 arch/powerpc/perf/isa207-common.c if (alt_event && alt_event != event) event 218 arch/powerpc/perf/isa207-common.h int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp); event 219 arch/powerpc/perf/isa207-common.h int isa207_compute_mmcr(u64 event[], int n_ev, event 223 arch/powerpc/perf/isa207-common.h int isa207_get_alternatives(u64 event, u64 alt[], int size, unsigned int flags, event 37 arch/powerpc/perf/mpc7450-pmu.c static int mpc7450_classify_event(u32 event) event 41 arch/powerpc/perf/mpc7450-pmu.c pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; event 47 arch/powerpc/perf/mpc7450-pmu.c event &= PM_PMCSEL_MSK; event 48 arch/powerpc/perf/mpc7450-pmu.c if (event <= 1) event 50 arch/powerpc/perf/mpc7450-pmu.c if (event <= 7) event 52 arch/powerpc/perf/mpc7450-pmu.c if (event <= 13) event 54 arch/powerpc/perf/mpc7450-pmu.c if (event <= 22) event 77 arch/powerpc/perf/mpc7450-pmu.c static int mpc7450_threshold_use(u32 event) event 81 arch/powerpc/perf/mpc7450-pmu.c pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; event 82 arch/powerpc/perf/mpc7450-pmu.c sel = event & PM_PMCSEL_MSK; event 150 arch/powerpc/perf/mpc7450-pmu.c static int mpc7450_get_constraint(u64 event, unsigned long *maskp, event 157 arch/powerpc/perf/mpc7450-pmu.c class = mpc7450_classify_event(event); event 161 arch/powerpc/perf/mpc7450-pmu.c pmc = ((unsigned int)event >> PM_PMC_SH) & PM_PMC_MSK; event 169 arch/powerpc/perf/mpc7450-pmu.c tuse = mpc7450_threshold_use(event); event 171 arch/powerpc/perf/mpc7450-pmu.c thresh = ((unsigned int)event >> PM_THRESH_SH) & PM_THRESH_MSK; event 176 arch/powerpc/perf/mpc7450-pmu.c if ((unsigned int)event & PM_THRMULT_MSKS) event 208 arch/powerpc/perf/mpc7450-pmu.c static int find_alternative(u32 event) event 213 arch/powerpc/perf/mpc7450-pmu.c if (event < event_alternatives[i][0]) event 216 arch/powerpc/perf/mpc7450-pmu.c if (event == event_alternatives[i][j]) event 222 arch/powerpc/perf/mpc7450-pmu.c static int mpc7450_get_alternatives(u64 event, unsigned int flags, u64 alt[]) event 227 arch/powerpc/perf/mpc7450-pmu.c alt[0] = event; event 229 arch/powerpc/perf/mpc7450-pmu.c i = find_alternative((u32)event); event 233 arch/powerpc/perf/mpc7450-pmu.c if (ae && ae != (u32)event) event 259 arch/powerpc/perf/mpc7450-pmu.c static int mpc7450_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[], event 277 arch/powerpc/perf/mpc7450-pmu.c class = mpc7450_classify_event(event[i]); event 287 arch/powerpc/perf/mpc7450-pmu.c ev = event[event_index[class][i]]; event 132 arch/powerpc/perf/power5+-pmu.c static int power5p_get_constraint(u64 event, unsigned long *maskp, event 139 arch/powerpc/perf/power5+-pmu.c pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; event 146 arch/powerpc/perf/power5+-pmu.c if (pmc >= 5 && !(event == 0x500009 || event == 0x600005)) event 149 arch/powerpc/perf/power5+-pmu.c if (event & PM_BUSEVENT_MSK) { event 150 arch/powerpc/perf/power5+-pmu.c unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; event 157 arch/powerpc/perf/power5+-pmu.c byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; event 166 arch/powerpc/perf/power5+-pmu.c bit = event & 7; event 170 arch/powerpc/perf/power5+-pmu.c value |= (unsigned long)((event >> PM_GRS_SH) & fmask) event 187 arch/powerpc/perf/power5+-pmu.c static int power5p_limited_pmc_event(u64 event) event 189 arch/powerpc/perf/power5+-pmu.c int pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; event 214 arch/powerpc/perf/power5+-pmu.c static int find_alternative(unsigned int event) event 219 arch/powerpc/perf/power5+-pmu.c if (event < event_alternatives[i][0]) event 222 arch/powerpc/perf/power5+-pmu.c if (event == event_alternatives[i][j]) event 241 arch/powerpc/perf/power5+-pmu.c static s64 find_alternative_bdecode(u64 event) event 245 arch/powerpc/perf/power5+-pmu.c pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; event 249 arch/powerpc/perf/power5+-pmu.c pp = event & PM_PMCSEL_MSK; event 252 arch/powerpc/perf/power5+-pmu.c return (event & ~(PM_PMC_MSKS | PM_PMCSEL_MSK)) | event 260 arch/powerpc/perf/power5+-pmu.c return event + (2 << PM_PMC_SH) + (0x2e - 0x0d); event 262 arch/powerpc/perf/power5+-pmu.c return event - (2 << PM_PMC_SH) - (0x2e - 0x0d); event 266 arch/powerpc/perf/power5+-pmu.c return ((event ^ (0x10 ^ 0x28)) & ~PM_PMC_MSKS) | event 272 arch/powerpc/perf/power5+-pmu.c static int power5p_get_alternatives(u64 event, unsigned int flags, u64 alt[]) event 278 arch/powerpc/perf/power5+-pmu.c alt[0] = event; event 280 arch/powerpc/perf/power5+-pmu.c nlim = power5p_limited_pmc_event(event); event 281 arch/powerpc/perf/power5+-pmu.c i = find_alternative(event); event 285 arch/powerpc/perf/power5+-pmu.c if (ae && ae != event) event 290 arch/powerpc/perf/power5+-pmu.c ae = find_alternative_bdecode(event); event 401 arch/powerpc/perf/power5+-pmu.c static int power5p_marked_instr_event(u64 event) event 407 arch/powerpc/perf/power5+-pmu.c pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; event 408 arch/powerpc/perf/power5+-pmu.c psel = event & PM_PMCSEL_MSK; event 432 arch/powerpc/perf/power5+-pmu.c if (!(event & PM_BUSEVENT_MSK) || bit == -1) event 435 arch/powerpc/perf/power5+-pmu.c byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; event 436 arch/powerpc/perf/power5+-pmu.c unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; event 450 arch/powerpc/perf/power5+-pmu.c static int power5p_compute_mmcr(u64 event[], int n_ev, event 470 arch/powerpc/perf/power5+-pmu.c pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; event 478 arch/powerpc/perf/power5+-pmu.c if (event[i] & PM_BUSEVENT_MSK) { event 479 arch/powerpc/perf/power5+-pmu.c unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; event 480 arch/powerpc/perf/power5+-pmu.c byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; event 548 arch/powerpc/perf/power5+-pmu.c pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; event 549 arch/powerpc/perf/power5+-pmu.c unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; event 550 arch/powerpc/perf/power5+-pmu.c byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; event 551 arch/powerpc/perf/power5+-pmu.c psel = event[i] & PM_PMCSEL_MSK; event 552 arch/powerpc/perf/power5+-pmu.c isbus = event[i] & PM_BUSEVENT_MSK; event 575 arch/powerpc/perf/power5+-pmu.c grsel = (event[i] >> PM_GRS_SH) & PM_GRS_MSK; event 578 arch/powerpc/perf/power5+-pmu.c if (power5p_marked_instr_event(event[i])) event 136 arch/powerpc/perf/power5-pmu.c static int power5_get_constraint(u64 event, unsigned long *maskp, event 144 arch/powerpc/perf/power5-pmu.c pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; event 153 arch/powerpc/perf/power5-pmu.c else if (event != 0x500009 && event != 0x600005) event 156 arch/powerpc/perf/power5-pmu.c if (event & PM_BUSEVENT_MSK) { event 157 arch/powerpc/perf/power5-pmu.c unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; event 164 arch/powerpc/perf/power5-pmu.c byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; event 173 arch/powerpc/perf/power5-pmu.c bit = event & 7; event 177 arch/powerpc/perf/power5-pmu.c value |= (unsigned long)((event >> PM_GRS_SH) & fmask) event 223 arch/powerpc/perf/power5-pmu.c static int find_alternative(u64 event) event 228 arch/powerpc/perf/power5-pmu.c if (event < event_alternatives[i][0]) event 231 arch/powerpc/perf/power5-pmu.c if (event == event_alternatives[i][j]) event 249 arch/powerpc/perf/power5-pmu.c static s64 find_alternative_bdecode(u64 event) event 253 arch/powerpc/perf/power5-pmu.c pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; event 257 arch/powerpc/perf/power5-pmu.c pp = event & PM_PMCSEL_MSK; event 260 arch/powerpc/perf/power5-pmu.c return (event & ~(PM_PMC_MSKS | PM_PMCSEL_MSK)) | event 268 arch/powerpc/perf/power5-pmu.c static int power5_get_alternatives(u64 event, unsigned int flags, u64 alt[]) event 273 arch/powerpc/perf/power5-pmu.c alt[0] = event; event 275 arch/powerpc/perf/power5-pmu.c i = find_alternative(event); event 279 arch/powerpc/perf/power5-pmu.c if (ae && ae != event) event 283 arch/powerpc/perf/power5-pmu.c ae = find_alternative_bdecode(event); event 337 arch/powerpc/perf/power5-pmu.c static int power5_marked_instr_event(u64 event) event 343 arch/powerpc/perf/power5-pmu.c pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; event 344 arch/powerpc/perf/power5-pmu.c psel = event & PM_PMCSEL_MSK; event 363 arch/powerpc/perf/power5-pmu.c if (!(event & PM_BUSEVENT_MSK)) event 366 arch/powerpc/perf/power5-pmu.c byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; event 367 arch/powerpc/perf/power5-pmu.c unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; event 381 arch/powerpc/perf/power5-pmu.c static int power5_compute_mmcr(u64 event[], int n_ev, event 403 arch/powerpc/perf/power5-pmu.c pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; event 414 arch/powerpc/perf/power5-pmu.c if (event[i] & PM_BUSEVENT_MSK) { event 415 arch/powerpc/perf/power5-pmu.c unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; event 416 arch/powerpc/perf/power5-pmu.c byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; event 488 arch/powerpc/perf/power5-pmu.c pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; event 489 arch/powerpc/perf/power5-pmu.c unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; event 490 arch/powerpc/perf/power5-pmu.c byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; event 491 arch/powerpc/perf/power5-pmu.c psel = event[i] & PM_PMCSEL_MSK; event 492 arch/powerpc/perf/power5-pmu.c isbus = event[i] & PM_BUSEVENT_MSK; event 520 arch/powerpc/perf/power5-pmu.c grsel = (event[i] >> PM_GRS_SH) & PM_GRS_MSK; event 523 arch/powerpc/perf/power5-pmu.c if (power5_marked_instr_event(event[i])) event 135 arch/powerpc/perf/power6-pmu.c static int power6_marked_instr_event(u64 event) event 141 arch/powerpc/perf/power6-pmu.c pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; event 142 arch/powerpc/perf/power6-pmu.c psel = (event & PM_PMCSEL_MSK) >> 1; /* drop edge/level bit */ event 161 arch/powerpc/perf/power6-pmu.c if (!(event & PM_BUSEVENT_MSK) || bit == -1) event 164 arch/powerpc/perf/power6-pmu.c byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; event 165 arch/powerpc/perf/power6-pmu.c unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; event 173 arch/powerpc/perf/power6-pmu.c static int p6_compute_mmcr(u64 event[], int n_ev, event 186 arch/powerpc/perf/power6-pmu.c pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; event 194 arch/powerpc/perf/power6-pmu.c ev = event[i]; event 241 arch/powerpc/perf/power6-pmu.c if (power6_marked_instr_event(event[i])) event 266 arch/powerpc/perf/power6-pmu.c static int p6_get_constraint(u64 event, unsigned long *maskp, event 272 arch/powerpc/perf/power6-pmu.c pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; event 274 arch/powerpc/perf/power6-pmu.c if (pmc > 4 && !(event == 0x500009 || event == 0x600005)) event 280 arch/powerpc/perf/power6-pmu.c if (event & PM_BUSEVENT_MSK) { event 281 arch/powerpc/perf/power6-pmu.c byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; event 284 arch/powerpc/perf/power6-pmu.c value |= (unsigned long)(event & PM_UNIT_MSKS) << sh; event 285 arch/powerpc/perf/power6-pmu.c if ((event & PM_UNIT_MSKS) == (5 << PM_UNIT_SH)) { event 286 arch/powerpc/perf/power6-pmu.c subunit = (event >> PM_SUBUNIT_SH) & PM_SUBUNIT_MSK; event 300 arch/powerpc/perf/power6-pmu.c static int p6_limited_pmc_event(u64 event) event 302 arch/powerpc/perf/power6-pmu.c int pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; event 339 arch/powerpc/perf/power6-pmu.c static int find_alternatives_list(u64 event) event 345 arch/powerpc/perf/power6-pmu.c if (event < event_alternatives[i][0]) event 349 arch/powerpc/perf/power6-pmu.c if (!alt || event < alt) event 351 arch/powerpc/perf/power6-pmu.c if (event == alt) event 358 arch/powerpc/perf/power6-pmu.c static int p6_get_alternatives(u64 event, unsigned int flags, u64 alt[]) event 365 arch/powerpc/perf/power6-pmu.c alt[0] = event; event 366 arch/powerpc/perf/power6-pmu.c nlim = p6_limited_pmc_event(event); event 369 arch/powerpc/perf/power6-pmu.c i = find_alternatives_list(event); event 376 arch/powerpc/perf/power6-pmu.c if (aevent != event) event 384 arch/powerpc/perf/power6-pmu.c psel = event & (PM_PMCSEL_MSK & ~1); /* ignore edge bit */ event 385 arch/powerpc/perf/power6-pmu.c pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; event 387 arch/powerpc/perf/power6-pmu.c alt[nalt++] = ((event ^ 0x6) & ~PM_PMC_MSKS) | event 392 arch/powerpc/perf/power6-pmu.c alt[nalt++] = ((event ^ 0x2) & ~PM_PMC_MSKS) | event 81 arch/powerpc/perf/power7-pmu.c static int power7_get_constraint(u64 event, unsigned long *maskp, event 87 arch/powerpc/perf/power7-pmu.c pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; event 94 arch/powerpc/perf/power7-pmu.c if (pmc >= 5 && !(event == 0x500fa || event == 0x600f4)) event 103 arch/powerpc/perf/power7-pmu.c unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; event 106 arch/powerpc/perf/power7-pmu.c int l2sel = (event >> PM_L2SEL_SH) & PM_L2SEL_MSK; event 128 arch/powerpc/perf/power7-pmu.c static int find_alternative(u64 event) event 133 arch/powerpc/perf/power7-pmu.c if (event < event_alternatives[i][0]) event 136 arch/powerpc/perf/power7-pmu.c if (event == event_alternatives[i][j]) event 142 arch/powerpc/perf/power7-pmu.c static s64 find_alternative_decode(u64 event) event 147 arch/powerpc/perf/power7-pmu.c pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; event 148 arch/powerpc/perf/power7-pmu.c psel = event & PM_PMCSEL_MSK; event 150 arch/powerpc/perf/power7-pmu.c return event - (1 << PM_PMC_SH) + 8; event 152 arch/powerpc/perf/power7-pmu.c return event + (1 << PM_PMC_SH) - 8; event 156 arch/powerpc/perf/power7-pmu.c static int power7_get_alternatives(u64 event, unsigned int flags, u64 alt[]) event 161 arch/powerpc/perf/power7-pmu.c alt[0] = event; event 163 arch/powerpc/perf/power7-pmu.c i = find_alternative(event); event 167 arch/powerpc/perf/power7-pmu.c if (ae && ae != event) event 171 arch/powerpc/perf/power7-pmu.c ae = find_alternative_decode(event); event 211 arch/powerpc/perf/power7-pmu.c static int power7_marked_instr_event(u64 event) event 216 arch/powerpc/perf/power7-pmu.c pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; event 217 arch/powerpc/perf/power7-pmu.c unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; event 218 arch/powerpc/perf/power7-pmu.c psel = event & PM_PMCSEL_MSK & ~1; /* trim off edge/level bit */ event 244 arch/powerpc/perf/power7-pmu.c static int power7_compute_mmcr(u64 event[], int n_ev, event 255 arch/powerpc/perf/power7-pmu.c pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; event 267 arch/powerpc/perf/power7-pmu.c pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; event 268 arch/powerpc/perf/power7-pmu.c unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; event 269 arch/powerpc/perf/power7-pmu.c combine = (event[i] >> PM_COMBINE_SH) & PM_COMBINE_MSK; event 270 arch/powerpc/perf/power7-pmu.c l2sel = (event[i] >> PM_L2SEL_SH) & PM_L2SEL_MSK; event 271 arch/powerpc/perf/power7-pmu.c psel = event[i] & PM_PMCSEL_MSK; event 295 arch/powerpc/perf/power7-pmu.c if (power7_marked_instr_event(event[i])) event 409 arch/powerpc/perf/power7-pmu.c PMU_FORMAT_ATTR(event, "config:0-19"); event 112 arch/powerpc/perf/power8-pmu.c static int power8_get_alternatives(u64 event, unsigned int flags, u64 alt[]) event 116 arch/powerpc/perf/power8-pmu.c num_alt = isa207_get_alternatives(event, alt, event 141 arch/powerpc/perf/power9-pmu.c static int power9_get_alternatives(u64 event, unsigned int flags, u64 alt[]) event 145 arch/powerpc/perf/power9-pmu.c num_alt = isa207_get_alternatives(event, alt, event 211 arch/powerpc/perf/power9-pmu.c PMU_FORMAT_ATTR(event, "config:0-51"); event 143 arch/powerpc/perf/ppc970-pmu.c static int p970_marked_instr_event(u64 event) event 148 arch/powerpc/perf/ppc970-pmu.c pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; event 149 arch/powerpc/perf/ppc970-pmu.c psel = event & PM_PMCSEL_MSK; event 162 arch/powerpc/perf/ppc970-pmu.c byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; event 163 arch/powerpc/perf/ppc970-pmu.c unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; event 190 arch/powerpc/perf/ppc970-pmu.c static int p970_get_constraint(u64 event, unsigned long *maskp, event 197 arch/powerpc/perf/ppc970-pmu.c pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; event 206 arch/powerpc/perf/ppc970-pmu.c unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; event 212 arch/powerpc/perf/ppc970-pmu.c byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; event 232 arch/powerpc/perf/ppc970-pmu.c spcsel = (event >> PM_SPCSEL_SH) & PM_SPCSEL_MSK; event 242 arch/powerpc/perf/ppc970-pmu.c static int p970_get_alternatives(u64 event, unsigned int flags, u64 alt[]) event 244 arch/powerpc/perf/ppc970-pmu.c alt[0] = event; event 247 arch/powerpc/perf/ppc970-pmu.c if (event == 0x2002 || event == 0x3002) { event 248 arch/powerpc/perf/ppc970-pmu.c alt[1] = event ^ 0x1000; event 255 arch/powerpc/perf/ppc970-pmu.c static int p970_compute_mmcr(u64 event[], int n_ev, event 279 arch/powerpc/perf/ppc970-pmu.c pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; event 287 arch/powerpc/perf/ppc970-pmu.c unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; event 288 arch/powerpc/perf/ppc970-pmu.c byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; event 346 arch/powerpc/perf/ppc970-pmu.c pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; event 347 arch/powerpc/perf/ppc970-pmu.c unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; event 348 arch/powerpc/perf/ppc970-pmu.c byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; event 349 arch/powerpc/perf/ppc970-pmu.c psel = event[i] & PM_PMCSEL_MSK; event 378 arch/powerpc/perf/ppc970-pmu.c spcsel = (event[i] >> PM_SPCSEL_SH) & PM_SPCSEL_MSK; event 380 arch/powerpc/perf/ppc970-pmu.c if (p970_marked_instr_event(event[i])) event 74 arch/powerpc/platforms/512x/pdm360ng.c unsigned long event, void *__dev) event 78 arch/powerpc/platforms/512x/pdm360ng.c if ((event == BUS_NOTIFY_ADD_DEVICE) && event 47 arch/powerpc/platforms/83xx/suspend.c u32 event; event 146 arch/powerpc/platforms/83xx/suspend.c u32 event = in_be32(&pmc_regs->event); event 152 arch/powerpc/platforms/83xx/suspend.c if (event) { event 153 arch/powerpc/platforms/83xx/suspend.c out_be32(&pmc_regs->event, event); event 34 arch/powerpc/platforms/cell/spufs/backing_ops.c static void gen_spu_event(struct spu_context *ctx, u32 event) event 43 arch/powerpc/platforms/cell/spufs/backing_ops.c ctx->csa.spu_chnldata_RW[0] |= event; event 44 arch/powerpc/platforms/cell/spufs/backing_ops.c if ((ch0_cnt == 0) && !(ch0_data & event) && (ch1_data & event)) { event 353 arch/powerpc/platforms/cell/spufs/run.c long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event) event 451 arch/powerpc/platforms/cell/spufs/run.c *event = ctx->event_return; event 37 arch/powerpc/platforms/powermac/pic.c unsigned int event; event 209 arch/powerpc/platforms/powermac/pic.c bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i]; event 239 arch/powerpc/platforms/powermac/pic.c bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i]; event 623 arch/powerpc/platforms/powermac/pic.c (void)in_le32(&pmac_irq_hw[0]->event); event 119 arch/powerpc/platforms/pseries/io_event_irq.c struct pseries_io_event *event; event 132 arch/powerpc/platforms/pseries/io_event_irq.c event = ioei_find_event((struct rtas_error_log *)ioei_rtas_buf); event 133 arch/powerpc/platforms/pseries/io_event_irq.c if (!event) event 137 arch/powerpc/platforms/pseries/io_event_irq.c 0, event); event 214 arch/riscv/kernel/perf_event.c static void riscv_pmu_read(struct perf_event *event) event 216 arch/riscv/kernel/perf_event.c struct hw_perf_event *hwc = &event->hw; event 235 arch/riscv/kernel/perf_event.c local64_add(delta, &event->count); event 251 arch/riscv/kernel/perf_event.c static void riscv_pmu_stop(struct perf_event *event, int flags) event 253 arch/riscv/kernel/perf_event.c struct hw_perf_event *hwc = &event->hw; event 259 arch/riscv/kernel/perf_event.c riscv_pmu->pmu->read(event); event 267 arch/riscv/kernel/perf_event.c static void riscv_pmu_start(struct perf_event *event, int flags) event 269 arch/riscv/kernel/perf_event.c struct hw_perf_event *hwc = &event->hw; event 271 arch/riscv/kernel/perf_event.c if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) event 275 arch/riscv/kernel/perf_event.c WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); event 284 arch/riscv/kernel/perf_event.c perf_event_update_userpage(event); event 297 arch/riscv/kernel/perf_event.c static int riscv_pmu_add(struct perf_event *event, int flags) event 300 arch/riscv/kernel/perf_event.c struct hw_perf_event *hwc = &event->hw; event 314 arch/riscv/kernel/perf_event.c cpuc->events[hwc->idx] = event; event 320 arch/riscv/kernel/perf_event.c riscv_pmu->pmu->start(event, PERF_EF_RELOAD); event 328 arch/riscv/kernel/perf_event.c static void riscv_pmu_del(struct perf_event *event, int flags) event 331 arch/riscv/kernel/perf_event.c struct hw_perf_event *hwc = &event->hw; event 335 arch/riscv/kernel/perf_event.c riscv_pmu->pmu->stop(event, PERF_EF_UPDATE); event 336 arch/riscv/kernel/perf_event.c perf_event_update_userpage(event); event 378 arch/riscv/kernel/perf_event.c static void riscv_event_destroy(struct perf_event *event) event 384 arch/riscv/kernel/perf_event.c static int riscv_event_init(struct perf_event *event) event 386 arch/riscv/kernel/perf_event.c struct perf_event_attr *attr = &event->attr; event 387 arch/riscv/kernel/perf_event.c struct hw_perf_event *hwc = &event->hw; event 401 arch/riscv/kernel/perf_event.c switch (event->attr.type) { event 414 arch/riscv/kernel/perf_event.c event->destroy = riscv_event_destroy; event 416 arch/riscv/kernel/perf_event.c event->destroy(event); event 109 arch/s390/include/asm/eadm.h void (*notify) (struct scm_device *scmdev, enum scm_event event); event 1666 arch/s390/kernel/ipl.c unsigned long event, void *data) event 19 arch/s390/kernel/perf_cpum_cf.c static enum cpumf_ctr_set get_counter_set(u64 event) event 23 arch/s390/kernel/perf_cpum_cf.c if (event < 32) event 25 arch/s390/kernel/perf_cpum_cf.c else if (event < 64) event 27 arch/s390/kernel/perf_cpum_cf.c else if (event < 128) event 29 arch/s390/kernel/perf_cpum_cf.c else if (event < 288) event 31 arch/s390/kernel/perf_cpum_cf.c else if (event >= 448 && event < 496) event 171 arch/s390/kernel/perf_cpum_cf.c static void hw_perf_event_destroy(struct perf_event *event) event 202 arch/s390/kernel/perf_cpum_cf.c static int __hw_perf_event_init(struct perf_event *event, unsigned int type) event 204 arch/s390/kernel/perf_cpum_cf.c struct perf_event_attr *attr = &event->attr; event 205 arch/s390/kernel/perf_cpum_cf.c struct hw_perf_event *hwc = &event->hw; event 221 arch/s390/kernel/perf_cpum_cf.c if (is_sampling_event(event)) /* No sampling support */ event 285 arch/s390/kernel/perf_cpum_cf.c event->destroy = hw_perf_event_destroy; event 295 arch/s390/kernel/perf_cpum_cf.c static int cpumf_pmu_event_init(struct perf_event *event) event 297 arch/s390/kernel/perf_cpum_cf.c unsigned int type = event->attr.type; event 301 arch/s390/kernel/perf_cpum_cf.c err = __hw_perf_event_init(event, type); event 302 arch/s390/kernel/perf_cpum_cf.c else if (event->pmu->type == type) event 304 arch/s390/kernel/perf_cpum_cf.c err = __hw_perf_event_init(event, PERF_TYPE_RAW); event 308 arch/s390/kernel/perf_cpum_cf.c if (unlikely(err) && event->destroy) event 309 arch/s390/kernel/perf_cpum_cf.c event->destroy(event); event 314 arch/s390/kernel/perf_cpum_cf.c static int hw_perf_event_reset(struct perf_event *event) event 320 arch/s390/kernel/perf_cpum_cf.c prev = local64_read(&event->hw.prev_count); event 321 arch/s390/kernel/perf_cpum_cf.c err = ecctr(event->hw.config, &new); event 332 arch/s390/kernel/perf_cpum_cf.c } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev); event 337 arch/s390/kernel/perf_cpum_cf.c static void hw_perf_event_update(struct perf_event *event) event 343 arch/s390/kernel/perf_cpum_cf.c prev = local64_read(&event->hw.prev_count); event 344 arch/s390/kernel/perf_cpum_cf.c err = ecctr(event->hw.config, &new); event 347 arch/s390/kernel/perf_cpum_cf.c } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev); event 351 arch/s390/kernel/perf_cpum_cf.c local64_add(delta, &event->count); event 354 arch/s390/kernel/perf_cpum_cf.c static void cpumf_pmu_read(struct perf_event *event) event 356 arch/s390/kernel/perf_cpum_cf.c if (event->hw.state & PERF_HES_STOPPED) event 359 arch/s390/kernel/perf_cpum_cf.c hw_perf_event_update(event); event 362 arch/s390/kernel/perf_cpum_cf.c static void cpumf_pmu_start(struct perf_event *event, int flags) event 365 arch/s390/kernel/perf_cpum_cf.c struct hw_perf_event *hwc = &event->hw; event 387 arch/s390/kernel/perf_cpum_cf.c hw_perf_event_reset(event); event 393 arch/s390/kernel/perf_cpum_cf.c static void cpumf_pmu_stop(struct perf_event *event, int flags) event 396 arch/s390/kernel/perf_cpum_cf.c struct hw_perf_event *hwc = &event->hw; event 405 arch/s390/kernel/perf_cpum_cf.c event->hw.state |= PERF_HES_STOPPED; event 409 arch/s390/kernel/perf_cpum_cf.c hw_perf_event_update(event); event 410 arch/s390/kernel/perf_cpum_cf.c event->hw.state |= PERF_HES_UPTODATE; event 414 arch/s390/kernel/perf_cpum_cf.c static int cpumf_pmu_add(struct perf_event *event, int flags) event 424 arch/s390/kernel/perf_cpum_cf.c if (validate_ctr_auth(&event->hw)) event 427 arch/s390/kernel/perf_cpum_cf.c ctr_set_enable(&cpuhw->state, event->hw.config_base); event 428 arch/s390/kernel/perf_cpum_cf.c event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; event 431 arch/s390/kernel/perf_cpum_cf.c cpumf_pmu_start(event, PERF_EF_RELOAD); event 433 arch/s390/kernel/perf_cpum_cf.c perf_event_update_userpage(event); event 438 arch/s390/kernel/perf_cpum_cf.c static void cpumf_pmu_del(struct perf_event *event, int flags) event 442 arch/s390/kernel/perf_cpum_cf.c cpumf_pmu_stop(event, PERF_EF_UPDATE); event 452 arch/s390/kernel/perf_cpum_cf.c if (!atomic_read(&cpuhw->ctr_set[event->hw.config_base])) event 453 arch/s390/kernel/perf_cpum_cf.c ctr_set_disable(&cpuhw->state, event->hw.config_base); event 455 arch/s390/kernel/perf_cpum_cf.c perf_event_update_userpage(event); event 183 arch/s390/kernel/perf_cpum_cf_diag.c static void cf_diag_perf_event_destroy(struct perf_event *event) event 187 arch/s390/kernel/perf_cpum_cf_diag.c __func__, event, event->cpu, event 197 arch/s390/kernel/perf_cpum_cf_diag.c static int __hw_perf_event_init(struct perf_event *event) event 199 arch/s390/kernel/perf_cpum_cf_diag.c struct perf_event_attr *attr = &event->attr; event 205 arch/s390/kernel/perf_cpum_cf_diag.c event, event->cpu); event 207 arch/s390/kernel/perf_cpum_cf_diag.c event->hw.config = attr->config; event 208 arch/s390/kernel/perf_cpum_cf_diag.c event->hw.config_base = 0; event 221 arch/s390/kernel/perf_cpum_cf_diag.c event->hw.config_base |= cpumf_ctr_ctl[i]; event 225 arch/s390/kernel/perf_cpum_cf_diag.c if (!event->hw.config_base) { event 231 arch/s390/kernel/perf_cpum_cf_diag.c event->hw.sample_period = attr->sample_period; event 232 arch/s390/kernel/perf_cpum_cf_diag.c local64_set(&event->hw.period_left, event->hw.sample_period); event 233 arch/s390/kernel/perf_cpum_cf_diag.c event->hw.last_period = event->hw.sample_period; event 236 arch/s390/kernel/perf_cpum_cf_diag.c __func__, err, event->hw.config_base); event 240 arch/s390/kernel/perf_cpum_cf_diag.c static int cf_diag_event_init(struct perf_event *event) event 242 arch/s390/kernel/perf_cpum_cf_diag.c struct perf_event_attr *attr = &event->attr; event 248 arch/s390/kernel/perf_cpum_cf_diag.c event, event->cpu, attr->config, event->pmu->type, event 251 arch/s390/kernel/perf_cpum_cf_diag.c if (event->attr.config != PERF_EVENT_CPUM_CF_DIAG || event 252 arch/s390/kernel/perf_cpum_cf_diag.c event->attr.type != event->pmu->type) event 274 arch/s390/kernel/perf_cpum_cf_diag.c event->destroy = cf_diag_perf_event_destroy; event 276 arch/s390/kernel/perf_cpum_cf_diag.c err = __hw_perf_event_init(event); event 278 arch/s390/kernel/perf_cpum_cf_diag.c event->destroy(event); event 284 arch/s390/kernel/perf_cpum_cf_diag.c static void cf_diag_read(struct perf_event *event) event 286 arch/s390/kernel/perf_cpum_cf_diag.c debug_sprintf_event(cf_diag_dbg, 5, "%s event %p\n", __func__, event); event 487 arch/s390/kernel/perf_cpum_cf_diag.c static int cf_diag_push_sample(struct perf_event *event, event 496 arch/s390/kernel/perf_cpum_cf_diag.c perf_sample_data_init(&data, 0, event->hw.last_period); event 500 arch/s390/kernel/perf_cpum_cf_diag.c if (event->attr.sample_type & PERF_SAMPLE_CPU) event 501 arch/s390/kernel/perf_cpum_cf_diag.c data.cpu_entry.cpu = event->cpu; event 502 arch/s390/kernel/perf_cpum_cf_diag.c if (event->attr.sample_type & PERF_SAMPLE_RAW) { event 509 arch/s390/kernel/perf_cpum_cf_diag.c overflow = perf_event_overflow(event, &data, ®s); event 512 arch/s390/kernel/perf_cpum_cf_diag.c "ov %d\n", __func__, event, event->cpu, event 513 arch/s390/kernel/perf_cpum_cf_diag.c event->attr.sample_type, raw.size, overflow); event 515 arch/s390/kernel/perf_cpum_cf_diag.c event->pmu->stop(event, 0); event 517 arch/s390/kernel/perf_cpum_cf_diag.c perf_event_update_userpage(event); event 521 arch/s390/kernel/perf_cpum_cf_diag.c static void cf_diag_start(struct perf_event *event, int flags) event 525 arch/s390/kernel/perf_cpum_cf_diag.c struct hw_perf_event *hwc = &event->hw; event 529 arch/s390/kernel/perf_cpum_cf_diag.c __func__, event, event->cpu, flags, hwc->state); event 539 arch/s390/kernel/perf_cpum_cf_diag.c event->hw.config_base); event 544 arch/s390/kernel/perf_cpum_cf_diag.c static void cf_diag_stop(struct perf_event *event, int flags) event 548 arch/s390/kernel/perf_cpum_cf_diag.c struct hw_perf_event *hwc = &event->hw; event 552 arch/s390/kernel/perf_cpum_cf_diag.c __func__, event, event->cpu, flags, hwc->state); event 556 arch/s390/kernel/perf_cpum_cf_diag.c local64_inc(&event->count); event 558 arch/s390/kernel/perf_cpum_cf_diag.c event->hw.config_base); event 559 arch/s390/kernel/perf_cpum_cf_diag.c if (cf_diag_diffctr(csd, event->hw.config_base)) event 560 arch/s390/kernel/perf_cpum_cf_diag.c cf_diag_push_sample(event, csd); event 564 arch/s390/kernel/perf_cpum_cf_diag.c static int cf_diag_add(struct perf_event *event, int flags) event 571 arch/s390/kernel/perf_cpum_cf_diag.c __func__, event, event->cpu, flags, cpuhw); event 578 arch/s390/kernel/perf_cpum_cf_diag.c event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; event 582 arch/s390/kernel/perf_cpum_cf_diag.c cf_diag_start(event, PERF_EF_RELOAD); event 588 arch/s390/kernel/perf_cpum_cf_diag.c static void cf_diag_del(struct perf_event *event, int flags) event 594 arch/s390/kernel/perf_cpum_cf_diag.c __func__, event, event->cpu, flags); event 596 arch/s390/kernel/perf_cpum_cf_diag.c cf_diag_stop(event, PERF_EF_UPDATE); event 597 arch/s390/kernel/perf_cpum_cf_diag.c ctr_set_multiple_stop(&cpuhw->state, event->hw.config_base); event 598 arch/s390/kernel/perf_cpum_cf_diag.c ctr_set_multiple_disable(&cpuhw->state, event->hw.config_base); event 609 arch/s390/kernel/perf_cpum_cf_diag.c PMU_FORMAT_ATTR(event, "config:0-63"); event 525 arch/s390/kernel/perf_cpum_cf_events.c PMU_FORMAT_ATTR(event, "config:0-63"); event 94 arch/s390/kernel/perf_cpum_sf.c struct perf_event *event; /* Scheduled perf event */ event 596 arch/s390/kernel/perf_cpum_sf.c static void hw_perf_event_destroy(struct perf_event *event) event 628 arch/s390/kernel/perf_cpum_sf.c static u32 cpumsf_pid_type(struct perf_event *event, event 644 arch/s390/kernel/perf_cpum_sf.c if (event->parent) event 645 arch/s390/kernel/perf_cpum_sf.c event = event->parent; event 646 arch/s390/kernel/perf_cpum_sf.c pid = __task_pid_nr_ns(tsk, type, event->ns); event 658 arch/s390/kernel/perf_cpum_sf.c static void cpumsf_output_event_pid(struct perf_event *event, event 675 arch/s390/kernel/perf_cpum_sf.c perf_prepare_sample(&header, data, event, regs); event 676 arch/s390/kernel/perf_cpum_sf.c if (perf_output_begin(&handle, event, header.size)) event 680 arch/s390/kernel/perf_cpum_sf.c data->tid_entry.pid = cpumsf_pid_type(event, pid, PIDTYPE_TGID); event 681 arch/s390/kernel/perf_cpum_sf.c data->tid_entry.tid = cpumsf_pid_type(event, pid, PIDTYPE_PID); event 683 arch/s390/kernel/perf_cpum_sf.c perf_output_sample(&handle, &header, data, event); event 738 arch/s390/kernel/perf_cpum_sf.c static int __hw_perf_event_init_rate(struct perf_event *event, event 741 arch/s390/kernel/perf_cpum_sf.c struct perf_event_attr *attr = &event->attr; event 742 arch/s390/kernel/perf_cpum_sf.c struct hw_perf_event *hwc = &event->hw; event 760 arch/s390/kernel/perf_cpum_sf.c "cpu:%d period:%llx freq:%d,%#lx\n", event->cpu, event 761 arch/s390/kernel/perf_cpum_sf.c event->attr.sample_period, event->attr.freq, event 766 arch/s390/kernel/perf_cpum_sf.c static int __hw_perf_event_init(struct perf_event *event) event 770 arch/s390/kernel/perf_cpum_sf.c struct perf_event_attr *attr = &event->attr; event 771 arch/s390/kernel/perf_cpum_sf.c struct hw_perf_event *hwc = &event->hw; event 784 arch/s390/kernel/perf_cpum_sf.c event->destroy = hw_perf_event_destroy; event 800 arch/s390/kernel/perf_cpum_sf.c if (event->cpu == -1) event 806 arch/s390/kernel/perf_cpum_sf.c cpuhw = &per_cpu(cpu_hw_sf, event->cpu); event 843 arch/s390/kernel/perf_cpum_sf.c err = __hw_perf_event_init_rate(event, &si); event 879 arch/s390/kernel/perf_cpum_sf.c if (event->attr.sample_type & PERF_SAMPLE_TID) event 880 arch/s390/kernel/perf_cpum_sf.c if (is_default_overflow_handler(event)) event 881 arch/s390/kernel/perf_cpum_sf.c event->overflow_handler = cpumsf_output_event_pid; event 886 arch/s390/kernel/perf_cpum_sf.c static int cpumsf_pmu_event_init(struct perf_event *event) event 891 arch/s390/kernel/perf_cpum_sf.c if (has_branch_stack(event)) event 894 arch/s390/kernel/perf_cpum_sf.c switch (event->attr.type) { event 896 arch/s390/kernel/perf_cpum_sf.c if ((event->attr.config != PERF_EVENT_CPUM_SF) && event 897 arch/s390/kernel/perf_cpum_sf.c (event->attr.config != PERF_EVENT_CPUM_SF_DIAG)) event 906 arch/s390/kernel/perf_cpum_sf.c if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES) event 908 arch/s390/kernel/perf_cpum_sf.c if (!is_sampling_event(event)) event 916 arch/s390/kernel/perf_cpum_sf.c if (event->cpu >= 0 && !cpu_online(event->cpu)) event 922 arch/s390/kernel/perf_cpum_sf.c if (event->attr.exclude_hv) event 923 arch/s390/kernel/perf_cpum_sf.c event->attr.exclude_hv = 0; event 924 arch/s390/kernel/perf_cpum_sf.c if (event->attr.exclude_idle) event 925 arch/s390/kernel/perf_cpum_sf.c event->attr.exclude_idle = 0; event 927 arch/s390/kernel/perf_cpum_sf.c err = __hw_perf_event_init(event); event 929 arch/s390/kernel/perf_cpum_sf.c if (event->destroy) event 930 arch/s390/kernel/perf_cpum_sf.c event->destroy(event); event 957 arch/s390/kernel/perf_cpum_sf.c if (cpuhw->event) { event 958 arch/s390/kernel/perf_cpum_sf.c hwc = &cpuhw->event->hw; event 969 arch/s390/kernel/perf_cpum_sf.c cpuhw->lsctl.interval = SAMPL_RATE(&cpuhw->event->hw); event 1047 arch/s390/kernel/perf_cpum_sf.c static int perf_exclude_event(struct perf_event *event, struct pt_regs *regs, event 1050 arch/s390/kernel/perf_cpum_sf.c if (event->attr.exclude_user && user_mode(regs)) event 1052 arch/s390/kernel/perf_cpum_sf.c if (event->attr.exclude_kernel && !user_mode(regs)) event 1054 arch/s390/kernel/perf_cpum_sf.c if (event->attr.exclude_guest && sde_regs->in_guest) event 1056 arch/s390/kernel/perf_cpum_sf.c if (event->attr.exclude_host && !sde_regs->in_guest) event 1072 arch/s390/kernel/perf_cpum_sf.c static int perf_push_sample(struct perf_event *event, event 1081 arch/s390/kernel/perf_cpum_sf.c perf_sample_data_init(&data, 0, event->hw.last_period); event 1130 arch/s390/kernel/perf_cpum_sf.c if (perf_exclude_event(event, ®s, sde_regs)) event 1132 arch/s390/kernel/perf_cpum_sf.c if (perf_event_overflow(event, &data, ®s)) { event 1134 arch/s390/kernel/perf_cpum_sf.c event->pmu->stop(event, 0); event 1136 arch/s390/kernel/perf_cpum_sf.c perf_event_update_userpage(event); event 1141 arch/s390/kernel/perf_cpum_sf.c static void perf_event_count_update(struct perf_event *event, u64 count) event 1143 arch/s390/kernel/perf_cpum_sf.c local64_add(count, &event->count); event 1175 arch/s390/kernel/perf_cpum_sf.c static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt, event 1189 arch/s390/kernel/perf_cpum_sf.c perf_event_count_update(event, SAMPL_RATE(&event->hw)); event 1201 arch/s390/kernel/perf_cpum_sf.c *overflow = perf_push_sample(event, event 1243 arch/s390/kernel/perf_cpum_sf.c static void hw_perf_event_update(struct perf_event *event, int flush_all) event 1245 arch/s390/kernel/perf_cpum_sf.c struct hw_perf_event *hwc = &event->hw; event 1255 arch/s390/kernel/perf_cpum_sf.c if (SAMPL_DIAG_MODE(&event->hw)) event 1292 arch/s390/kernel/perf_cpum_sf.c hw_collect_samples(event, sdbt, &event_overflow); event 1595 arch/s390/kernel/perf_cpum_sf.c aux = perf_aux_output_begin(handle, cpuhw->event); event 1691 arch/s390/kernel/perf_cpum_sf.c static void *aux_buffer_setup(struct perf_event *event, void **pages, event 1790 arch/s390/kernel/perf_cpum_sf.c static void cpumsf_pmu_read(struct perf_event *event) event 1799 arch/s390/kernel/perf_cpum_sf.c static int cpumsf_pmu_check_period(struct perf_event *event, u64 value) event 1806 arch/s390/kernel/perf_cpum_sf.c if (event->cpu == -1) { event 1813 arch/s390/kernel/perf_cpum_sf.c struct cpu_hw_sf *cpuhw = &per_cpu(cpu_hw_sf, event->cpu); event 1818 arch/s390/kernel/perf_cpum_sf.c do_freq = !!SAMPLE_FREQ_MODE(&event->hw); event 1823 arch/s390/kernel/perf_cpum_sf.c event->attr.sample_period = rate; event 1824 arch/s390/kernel/perf_cpum_sf.c SAMPL_RATE(&event->hw) = rate; event 1825 arch/s390/kernel/perf_cpum_sf.c hw_init_period(&event->hw, SAMPL_RATE(&event->hw)); event 1828 arch/s390/kernel/perf_cpum_sf.c event->cpu, value, event 1829 arch/s390/kernel/perf_cpum_sf.c event->attr.sample_period, do_freq); event 1836 arch/s390/kernel/perf_cpum_sf.c static void cpumsf_pmu_start(struct perf_event *event, int flags) event 1840 arch/s390/kernel/perf_cpum_sf.c if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) event 1844 arch/s390/kernel/perf_cpum_sf.c WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); event 1846 arch/s390/kernel/perf_cpum_sf.c perf_pmu_disable(event->pmu); event 1847 arch/s390/kernel/perf_cpum_sf.c event->hw.state = 0; event 1849 arch/s390/kernel/perf_cpum_sf.c if (SAMPL_DIAG_MODE(&event->hw)) event 1851 arch/s390/kernel/perf_cpum_sf.c perf_pmu_enable(event->pmu); event 1857 arch/s390/kernel/perf_cpum_sf.c static void cpumsf_pmu_stop(struct perf_event *event, int flags) event 1861 arch/s390/kernel/perf_cpum_sf.c if (event->hw.state & PERF_HES_STOPPED) event 1864 arch/s390/kernel/perf_cpum_sf.c perf_pmu_disable(event->pmu); event 1867 arch/s390/kernel/perf_cpum_sf.c event->hw.state |= PERF_HES_STOPPED; event 1869 arch/s390/kernel/perf_cpum_sf.c if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) { event 1870 arch/s390/kernel/perf_cpum_sf.c hw_perf_event_update(event, 1); event 1871 arch/s390/kernel/perf_cpum_sf.c event->hw.state |= PERF_HES_UPTODATE; event 1873 arch/s390/kernel/perf_cpum_sf.c perf_pmu_enable(event->pmu); event 1876 arch/s390/kernel/perf_cpum_sf.c static int cpumsf_pmu_add(struct perf_event *event, int flags) event 1885 arch/s390/kernel/perf_cpum_sf.c if (!SAMPL_DIAG_MODE(&event->hw) && !cpuhw->sfb.sdbt) event 1889 arch/s390/kernel/perf_cpum_sf.c perf_pmu_disable(event->pmu); event 1891 arch/s390/kernel/perf_cpum_sf.c event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; event 1900 arch/s390/kernel/perf_cpum_sf.c cpuhw->lsctl.interval = SAMPL_RATE(&event->hw); event 1901 arch/s390/kernel/perf_cpum_sf.c if (!SAMPL_DIAG_MODE(&event->hw)) { event 1904 arch/s390/kernel/perf_cpum_sf.c hw_reset_registers(&event->hw, cpuhw->sfb.sdbt); event 1913 arch/s390/kernel/perf_cpum_sf.c if (SAMPL_DIAG_MODE(&event->hw)) { event 1914 arch/s390/kernel/perf_cpum_sf.c aux = perf_aux_output_begin(&cpuhw->handle, event); event 1927 arch/s390/kernel/perf_cpum_sf.c cpuhw->event = event; event 1931 arch/s390/kernel/perf_cpum_sf.c cpumsf_pmu_start(event, PERF_EF_RELOAD); event 1933 arch/s390/kernel/perf_cpum_sf.c perf_event_update_userpage(event); event 1934 arch/s390/kernel/perf_cpum_sf.c perf_pmu_enable(event->pmu); event 1938 arch/s390/kernel/perf_cpum_sf.c static void cpumsf_pmu_del(struct perf_event *event, int flags) event 1942 arch/s390/kernel/perf_cpum_sf.c perf_pmu_disable(event->pmu); event 1943 arch/s390/kernel/perf_cpum_sf.c cpumsf_pmu_stop(event, PERF_EF_UPDATE); event 1948 arch/s390/kernel/perf_cpum_sf.c cpuhw->event = NULL; event 1950 arch/s390/kernel/perf_cpum_sf.c if (SAMPL_DIAG_MODE(&event->hw)) event 1952 arch/s390/kernel/perf_cpum_sf.c perf_event_update_userpage(event); event 1953 arch/s390/kernel/perf_cpum_sf.c perf_pmu_enable(event->pmu); event 1985 arch/s390/kernel/perf_cpum_sf.c PMU_FORMAT_ATTR(event, "config:0-63"); event 2049 arch/s390/kernel/perf_cpum_sf.c if (SAMPL_DIAG_MODE(&cpuhw->event->hw)) event 2052 arch/s390/kernel/perf_cpum_sf.c hw_perf_event_update(cpuhw->event, 0); event 409 arch/s390/mm/cmm.c unsigned long event, void *ptr) event 411 arch/s390/mm/cmm.c switch (event) { event 52 arch/sh/boards/mach-dreamcast/irq.c #define LEVEL(event) (((event) - HW_EVENT_IRQ_BASE) / 32) event 55 arch/sh/boards/mach-dreamcast/irq.c #define EVENT_BIT(event) (((event) - HW_EVENT_IRQ_BASE) & 31) event 197 arch/sh/kernel/cpu/sh4/perf_event.c static int sh7750_event_map(int event) event 199 arch/sh/kernel/cpu/sh4/perf_event.c return sh7750_general_events[event]; event 222 arch/sh/kernel/cpu/sh4a/perf_event.c static int sh4a_event_map(int event) event 224 arch/sh/kernel/cpu/sh4a/perf_event.c return sh4a_general_events[event]; event 81 arch/sh/kernel/perf_event.c static void hw_perf_event_destroy(struct perf_event *event) event 118 arch/sh/kernel/perf_event.c static int __hw_perf_event_init(struct perf_event *event) event 120 arch/sh/kernel/perf_event.c struct perf_event_attr *attr = &event->attr; event 121 arch/sh/kernel/perf_event.c struct hw_perf_event *hwc = &event->hw; event 149 arch/sh/kernel/perf_event.c event->destroy = hw_perf_event_destroy; event 176 arch/sh/kernel/perf_event.c static void sh_perf_event_update(struct perf_event *event, event 214 arch/sh/kernel/perf_event.c local64_add(delta, &event->count); event 217 arch/sh/kernel/perf_event.c static void sh_pmu_stop(struct perf_event *event, int flags) event 220 arch/sh/kernel/perf_event.c struct hw_perf_event *hwc = &event->hw; event 223 arch/sh/kernel/perf_event.c if (!(event->hw.state & PERF_HES_STOPPED)) { event 226 arch/sh/kernel/perf_event.c event->hw.state |= PERF_HES_STOPPED; event 229 arch/sh/kernel/perf_event.c if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) { event 230 arch/sh/kernel/perf_event.c sh_perf_event_update(event, &event->hw, idx); event 231 arch/sh/kernel/perf_event.c event->hw.state |= PERF_HES_UPTODATE; event 235 arch/sh/kernel/perf_event.c static void sh_pmu_start(struct perf_event *event, int flags) event 238 arch/sh/kernel/perf_event.c struct hw_perf_event *hwc = &event->hw; event 245 arch/sh/kernel/perf_event.c WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); event 247 arch/sh/kernel/perf_event.c cpuc->events[idx] = event; event 248 arch/sh/kernel/perf_event.c event->hw.state = 0; event 252 arch/sh/kernel/perf_event.c static void sh_pmu_del(struct perf_event *event, int flags) event 256 arch/sh/kernel/perf_event.c sh_pmu_stop(event, PERF_EF_UPDATE); event 257 arch/sh/kernel/perf_event.c __clear_bit(event->hw.idx, cpuc->used_mask); event 259 arch/sh/kernel/perf_event.c perf_event_update_userpage(event); event 262 arch/sh/kernel/perf_event.c static int sh_pmu_add(struct perf_event *event, int flags) event 265 arch/sh/kernel/perf_event.c struct hw_perf_event *hwc = &event->hw; event 269 arch/sh/kernel/perf_event.c perf_pmu_disable(event->pmu); event 282 arch/sh/kernel/perf_event.c event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; event 284 arch/sh/kernel/perf_event.c sh_pmu_start(event, PERF_EF_RELOAD); event 286 arch/sh/kernel/perf_event.c perf_event_update_userpage(event); event 289 arch/sh/kernel/perf_event.c perf_pmu_enable(event->pmu); event 293 arch/sh/kernel/perf_event.c static void sh_pmu_read(struct perf_event *event) event 295 arch/sh/kernel/perf_event.c sh_perf_event_update(event, &event->hw, event->hw.idx); event 298 arch/sh/kernel/perf_event.c static int sh_pmu_event_init(struct perf_event *event) event 303 arch/sh/kernel/perf_event.c if (has_branch_stack(event)) event 306 arch/sh/kernel/perf_event.c switch (event->attr.type) { event 310 arch/sh/kernel/perf_event.c err = __hw_perf_event_init(event); event 318 arch/sh/kernel/perf_event.c if (event->destroy) event 319 arch/sh/kernel/perf_event.c event->destroy(event); event 16 arch/sparc/include/asm/ebus_dma.h void (*callback)(struct ebus_dma_info *p, int event, void *cookie); event 432 arch/sparc/include/asm/floppy_64.h void sun_pci_fd_dma_callback(struct ebus_dma_info *p, int event, void *cookie) event 23 arch/sparc/include/asm/ldc.h void (*event)(void *arg, int event); event 497 arch/sparc/include/asm/vio.h void vio_link_state_change(struct vio_driver_state *vio, int event); event 1093 arch/sparc/kernel/ds.c static void ds_event(void *arg, int event) event 1102 arch/sparc/kernel/ds.c if (event == LDC_EVENT_UP) { event 1108 arch/sparc/kernel/ds.c if (event == LDC_EVENT_RESET) { event 1114 arch/sparc/kernel/ds.c if (event != LDC_EVENT_DATA_READY) { event 1116 arch/sparc/kernel/ds.c dp->id, event); event 1162 arch/sparc/kernel/ds.c .event = ds_event, event 782 arch/sparc/kernel/ldc.c lp->cfg.event(lp->event_arg, LDC_EVENT_RESET); event 784 arch/sparc/kernel/ldc.c lp->cfg.event(lp->event_arg, LDC_EVENT_UP); event 786 arch/sparc/kernel/ldc.c lp->cfg.event(lp->event_arg, LDC_EVENT_DATA_READY); event 842 arch/sparc/kernel/ldc.c unsigned int event = LDC_EVENT_RESET; event 845 arch/sparc/kernel/ldc.c event = LDC_EVENT_UP; event 847 arch/sparc/kernel/ldc.c event_mask |= event; event 1160 arch/sparc/kernel/ldc.c if (!cfgp->event || !event_arg || !cfgp->rx_irq || !cfgp->tx_irq) event 92 arch/sparc/kernel/perf_event.c struct perf_event *event[MAX_HWEVENTS]; event 863 arch/sparc/kernel/perf_event.c static u64 sparc_perf_event_update(struct perf_event *event, event 881 arch/sparc/kernel/perf_event.c local64_add(delta, &event->count); event 887 arch/sparc/kernel/perf_event.c static int sparc_perf_event_set_period(struct perf_event *event, event 918 arch/sparc/kernel/perf_event.c perf_event_update_userpage(event); event 928 arch/sparc/kernel/perf_event.c struct perf_event *cp = cpuc->event[i]; event 956 arch/sparc/kernel/perf_event.c struct perf_event *cp = cpuc->event[i]; event 977 arch/sparc/kernel/perf_event.c cpuc->pcr[0] |= cpuc->event[0]->hw.config_base; event 980 arch/sparc/kernel/perf_event.c static void sparc_pmu_start(struct perf_event *event, int flags); event 991 arch/sparc/kernel/perf_event.c struct perf_event *cp = cpuc->event[i]; event 1007 arch/sparc/kernel/perf_event.c struct perf_event *cp = cpuc->event[i]; event 1069 arch/sparc/kernel/perf_event.c struct perf_event *event) event 1074 arch/sparc/kernel/perf_event.c if (cpuc->event[i] == event) event 1081 arch/sparc/kernel/perf_event.c static void sparc_pmu_start(struct perf_event *event, int flags) event 1084 arch/sparc/kernel/perf_event.c int idx = active_event_index(cpuc, event); event 1087 arch/sparc/kernel/perf_event.c WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); event 1088 arch/sparc/kernel/perf_event.c sparc_perf_event_set_period(event, &event->hw, idx); event 1091 arch/sparc/kernel/perf_event.c event->hw.state = 0; event 1093 arch/sparc/kernel/perf_event.c sparc_pmu_enable_event(cpuc, &event->hw, idx); event 1095 arch/sparc/kernel/perf_event.c perf_event_update_userpage(event); event 1098 arch/sparc/kernel/perf_event.c static void sparc_pmu_stop(struct perf_event *event, int flags) event 1101 arch/sparc/kernel/perf_event.c int idx = active_event_index(cpuc, event); event 1103 arch/sparc/kernel/perf_event.c if (!(event->hw.state & PERF_HES_STOPPED)) { event 1104 arch/sparc/kernel/perf_event.c sparc_pmu_disable_event(cpuc, &event->hw, idx); event 1105 arch/sparc/kernel/perf_event.c event->hw.state |= PERF_HES_STOPPED; event 1108 arch/sparc/kernel/perf_event.c if (!(event->hw.state & PERF_HES_UPTODATE) && (flags & PERF_EF_UPDATE)) { event 1109 arch/sparc/kernel/perf_event.c sparc_perf_event_update(event, &event->hw, idx); event 1110 arch/sparc/kernel/perf_event.c event->hw.state |= PERF_HES_UPTODATE; event 1114 arch/sparc/kernel/perf_event.c static void sparc_pmu_del(struct perf_event *event, int _flags) event 1123 arch/sparc/kernel/perf_event.c if (event == cpuc->event[i]) { event 1127 arch/sparc/kernel/perf_event.c sparc_pmu_stop(event, PERF_EF_UPDATE); event 1133 arch/sparc/kernel/perf_event.c cpuc->event[i - 1] = cpuc->event[i]; event 1139 arch/sparc/kernel/perf_event.c perf_event_update_userpage(event); event 1149 arch/sparc/kernel/perf_event.c static void sparc_pmu_read(struct perf_event *event) event 1152 arch/sparc/kernel/perf_event.c int idx = active_event_index(cpuc, event); event 1153 arch/sparc/kernel/perf_event.c struct hw_perf_event *hwc = &event->hw; event 1155 arch/sparc/kernel/perf_event.c sparc_perf_event_update(event, hwc, idx); event 1227 arch/sparc/kernel/perf_event.c static void hw_perf_event_destroy(struct perf_event *event) event 1317 arch/sparc/kernel/perf_event.c struct perf_event *event; event 1329 arch/sparc/kernel/perf_event.c event = evts[i]; event 1331 arch/sparc/kernel/perf_event.c eu = event->attr.exclude_user; event 1332 arch/sparc/kernel/perf_event.c ek = event->attr.exclude_kernel; event 1333 arch/sparc/kernel/perf_event.c eh = event->attr.exclude_hv; event 1335 arch/sparc/kernel/perf_event.c } else if (event->attr.exclude_user != eu || event 1336 arch/sparc/kernel/perf_event.c event->attr.exclude_kernel != ek || event 1337 arch/sparc/kernel/perf_event.c event->attr.exclude_hv != eh) { event 1349 arch/sparc/kernel/perf_event.c struct perf_event *event; event 1359 arch/sparc/kernel/perf_event.c for_each_sibling_event(event, group) { event 1360 arch/sparc/kernel/perf_event.c if (!is_software_event(event) && event 1361 arch/sparc/kernel/perf_event.c event->state != PERF_EVENT_STATE_OFF) { event 1364 arch/sparc/kernel/perf_event.c evts[n] = event; event 1365 arch/sparc/kernel/perf_event.c events[n] = event->hw.event_base; event 1372 arch/sparc/kernel/perf_event.c static int sparc_pmu_add(struct perf_event *event, int ef_flags) event 1384 arch/sparc/kernel/perf_event.c cpuc->event[n0] = event; event 1385 arch/sparc/kernel/perf_event.c cpuc->events[n0] = event->hw.event_base; event 1388 arch/sparc/kernel/perf_event.c event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; event 1390 arch/sparc/kernel/perf_event.c event->hw.state |= PERF_HES_ARCH; event 1400 arch/sparc/kernel/perf_event.c if (check_excludes(cpuc->event, n0, 1)) event 1402 arch/sparc/kernel/perf_event.c if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1)) event 1415 arch/sparc/kernel/perf_event.c static int sparc_pmu_event_init(struct perf_event *event) event 1417 arch/sparc/kernel/perf_event.c struct perf_event_attr *attr = &event->attr; event 1419 arch/sparc/kernel/perf_event.c struct hw_perf_event *hwc = &event->hw; event 1429 arch/sparc/kernel/perf_event.c if (has_branch_stack(event)) event 1474 arch/sparc/kernel/perf_event.c if (event->group_leader != event) { event 1475 arch/sparc/kernel/perf_event.c n = collect_events(event->group_leader, event 1482 arch/sparc/kernel/perf_event.c evts[n] = event; event 1496 arch/sparc/kernel/perf_event.c event->destroy = hw_perf_event_destroy; event 1566 arch/sparc/kernel/perf_event.c if (check_excludes(cpuc->event, 0, n)) event 1568 arch/sparc/kernel/perf_event.c if (sparc_check_constraints(cpuc->event, cpuc->events, n)) event 1653 arch/sparc/kernel/perf_event.c struct perf_event *event = cpuc->event[i]; event 1662 arch/sparc/kernel/perf_event.c hwc = &event->hw; event 1663 arch/sparc/kernel/perf_event.c val = sparc_perf_event_update(event, hwc, idx); event 1668 arch/sparc/kernel/perf_event.c if (!sparc_perf_event_set_period(event, hwc, idx)) event 1671 arch/sparc/kernel/perf_event.c if (perf_event_overflow(event, &data, regs)) event 1672 arch/sparc/kernel/perf_event.c sparc_pmu_stop(event, 0); event 78 arch/sparc/kernel/sstate.c static int sstate_panic_event(struct notifier_block *n, unsigned long event, void *ptr) event 101 arch/sparc/kernel/viohs.c void vio_link_state_change(struct vio_driver_state *vio, int event) event 103 arch/sparc/kernel/viohs.c if (event == LDC_EVENT_UP) { event 121 arch/sparc/kernel/viohs.c } else if (event == LDC_EVENT_RESET) { event 737 arch/um/drivers/net_kern.c static int uml_inetaddr_event(struct notifier_block *this, unsigned long event, event 752 arch/um/drivers/net_kern.c switch (event) { event 1639 arch/um/drivers/vector_kern.c unsigned long event, event 94 arch/um/os-Linux/irq.c struct epoll_event event; event 97 arch/um/os-Linux/irq.c event.data.ptr = data; event 98 arch/um/os-Linux/irq.c event.events = events | EPOLLET; event 99 arch/um/os-Linux/irq.c result = epoll_ctl(epollfd, EPOLL_CTL_ADD, fd, &event); event 112 arch/um/os-Linux/irq.c struct epoll_event event; event 115 arch/um/os-Linux/irq.c event.data.ptr = data; event 116 arch/um/os-Linux/irq.c event.events = events; event 117 arch/um/os-Linux/irq.c result = epoll_ctl(epollfd, EPOLL_CTL_MOD, fd, &event); event 129 arch/um/os-Linux/irq.c struct epoll_event event; event 134 arch/um/os-Linux/irq.c result = epoll_ctl(epollfd, EPOLL_CTL_DEL, fd, &event); event 96 arch/x86/boot/main.c boot_params.ist_info.event = oreg.ecx; event 324 arch/x86/events/amd/core.c static int amd_core_hw_config(struct perf_event *event) event 326 arch/x86/events/amd/core.c if (event->attr.exclude_host && event->attr.exclude_guest) event 332 arch/x86/events/amd/core.c event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR | event 334 arch/x86/events/amd/core.c else if (event->attr.exclude_host) event 335 arch/x86/events/amd/core.c event->hw.config |= AMD64_EVENTSEL_GUESTONLY; event 336 arch/x86/events/amd/core.c else if (event->attr.exclude_guest) event 337 arch/x86/events/amd/core.c event->hw.config |= AMD64_EVENTSEL_HOSTONLY; event 354 arch/x86/events/amd/core.c static int amd_pmu_hw_config(struct perf_event *event) event 359 arch/x86/events/amd/core.c if (event->attr.precise_ip && get_ibs_caps()) event 362 arch/x86/events/amd/core.c if (has_branch_stack(event)) event 365 arch/x86/events/amd/core.c ret = x86_pmu_hw_config(event); event 369 arch/x86/events/amd/core.c if (event->attr.type == PERF_TYPE_RAW) event 370 arch/x86/events/amd/core.c event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK; event 372 arch/x86/events/amd/core.c return amd_core_hw_config(event); event 376 arch/x86/events/amd/core.c struct perf_event *event) event 390 arch/x86/events/amd/core.c if (cmpxchg(nb->owners + i, event, NULL) == event) event 432 arch/x86/events/amd/core.c __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, event 435 arch/x86/events/amd/core.c struct hw_perf_event *hwc = &event->hw; event 459 arch/x86/events/amd/core.c old = cmpxchg(nb->owners + idx, NULL, event); event 460 arch/x86/events/amd/core.c else if (nb->owners[idx] == event) event 462 arch/x86/events/amd/core.c old = event; event 466 arch/x86/events/amd/core.c if (old && old != event) event 471 arch/x86/events/amd/core.c cmpxchg(nb->owners + new, event, NULL); event 475 arch/x86/events/amd/core.c if (old == event) event 630 arch/x86/events/amd/core.c static void amd_pmu_disable_event(struct perf_event *event) event 632 arch/x86/events/amd/core.c x86_pmu_disable_event(event); event 644 arch/x86/events/amd/core.c amd_pmu_wait_on_overflow(event->hw.idx); event 699 arch/x86/events/amd/core.c struct perf_event *event) event 704 arch/x86/events/amd/core.c if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))) event 707 arch/x86/events/amd/core.c return __amd_get_nb_event_constraints(cpuc, event, NULL); event 711 arch/x86/events/amd/core.c struct perf_event *event) event 713 arch/x86/events/amd/core.c if (amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)) event 714 arch/x86/events/amd/core.c __amd_put_nb_event_constraints(cpuc, event); event 717 arch/x86/events/amd/core.c PMU_FORMAT_ATTR(event, "config:0-7,32-35"); event 808 arch/x86/events/amd/core.c struct perf_event *event) event 810 arch/x86/events/amd/core.c struct hw_perf_event *hwc = &event->hw; event 883 arch/x86/events/amd/core.c struct perf_event *event) event 885 arch/x86/events/amd/core.c struct hw_perf_event *hwc = &event->hw; event 895 arch/x86/events/amd/core.c u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT) | event 898 arch/x86/events/amd/core.c return x86_event_sysfs_show(page, config, event); event 78 arch/x86/events/amd/ibs.c struct perf_event *event; event 154 arch/x86/events/amd/ibs.c perf_event_try_update(struct perf_event *event, u64 new_raw_count, int width) event 156 arch/x86/events/amd/ibs.c struct hw_perf_event *hwc = &event->hw; event 184 arch/x86/events/amd/ibs.c local64_add(delta, &event->count); event 219 arch/x86/events/amd/ibs.c static int perf_ibs_precise_event(struct perf_event *event, u64 *config) event 221 arch/x86/events/amd/ibs.c switch (event->attr.precise_ip) { event 231 arch/x86/events/amd/ibs.c switch (event->attr.type) { event 233 arch/x86/events/amd/ibs.c switch (event->attr.config) { event 240 arch/x86/events/amd/ibs.c switch (event->attr.config) { event 256 arch/x86/events/amd/ibs.c static int perf_ibs_init(struct perf_event *event) event 258 arch/x86/events/amd/ibs.c struct hw_perf_event *hwc = &event->hw; event 263 arch/x86/events/amd/ibs.c perf_ibs = get_ibs_pmu(event->attr.type); event 265 arch/x86/events/amd/ibs.c config = event->attr.config; event 268 arch/x86/events/amd/ibs.c ret = perf_ibs_precise_event(event, &config); event 273 arch/x86/events/amd/ibs.c if (event->pmu != &perf_ibs->pmu) event 283 arch/x86/events/amd/ibs.c if (!event->attr.sample_freq && hwc->sample_period & 0x0f) event 296 arch/x86/events/amd/ibs.c event->attr.sample_period = max_cnt << 4; event 297 arch/x86/events/amd/ibs.c hwc->sample_period = event->attr.sample_period; event 347 arch/x86/events/amd/ibs.c perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event, event 357 arch/x86/events/amd/ibs.c while (!perf_event_try_update(event, count, 64)) { event 358 arch/x86/events/amd/ibs.c rdmsrl(event->hw.config_base, *config); event 392 arch/x86/events/amd/ibs.c static void perf_ibs_start(struct perf_event *event, int flags) event 394 arch/x86/events/amd/ibs.c struct hw_perf_event *hwc = &event->hw; event 395 arch/x86/events/amd/ibs.c struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu); event 414 arch/x86/events/amd/ibs.c perf_event_update_userpage(event); event 417 arch/x86/events/amd/ibs.c static void perf_ibs_stop(struct perf_event *event, int flags) event 419 arch/x86/events/amd/ibs.c struct hw_perf_event *hwc = &event->hw; event 420 arch/x86/events/amd/ibs.c struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu); event 467 arch/x86/events/amd/ibs.c perf_ibs_event_update(perf_ibs, event, &config); event 471 arch/x86/events/amd/ibs.c static int perf_ibs_add(struct perf_event *event, int flags) event 473 arch/x86/events/amd/ibs.c struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu); event 479 arch/x86/events/amd/ibs.c event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; event 481 arch/x86/events/amd/ibs.c pcpu->event = event; event 484 arch/x86/events/amd/ibs.c perf_ibs_start(event, PERF_EF_RELOAD); event 489 arch/x86/events/amd/ibs.c static void perf_ibs_del(struct perf_event *event, int flags) event 491 arch/x86/events/amd/ibs.c struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu); event 497 arch/x86/events/amd/ibs.c perf_ibs_stop(event, PERF_EF_UPDATE); event 499 arch/x86/events/amd/ibs.c pcpu->event = NULL; event 501 arch/x86/events/amd/ibs.c perf_event_update_userpage(event); event 504 arch/x86/events/amd/ibs.c static void perf_ibs_read(struct perf_event *event) { } event 572 arch/x86/events/amd/ibs.c struct perf_event *event = pcpu->event; event 596 arch/x86/events/amd/ibs.c if (WARN_ON_ONCE(!event)) event 599 arch/x86/events/amd/ibs.c hwc = &event->hw; event 607 arch/x86/events/amd/ibs.c perf_ibs_event_update(perf_ibs, event, config); event 616 arch/x86/events/amd/ibs.c if (event->attr.sample_type & PERF_SAMPLE_RAW) event 629 arch/x86/events/amd/ibs.c if (event->attr.sample_type & PERF_SAMPLE_RAW) { event 654 arch/x86/events/amd/ibs.c if (event->attr.sample_type & PERF_SAMPLE_RAW) { event 664 arch/x86/events/amd/ibs.c throttle = perf_event_overflow(event, &data, ®s); event 667 arch/x86/events/amd/ibs.c perf_ibs_stop(event, 0); event 678 arch/x86/events/amd/ibs.c perf_event_update_userpage(event); event 85 arch/x86/events/amd/iommu.c const char *event; event 91 arch/x86/events/amd/iommu.c struct amd_iommu_event_desc *event = event 93 arch/x86/events/amd/iommu.c return sprintf(buf, "%s\n", event->event); event 99 arch/x86/events/amd/iommu.c .event = _event, \ event 154 arch/x86/events/amd/iommu.c static int get_next_avail_iommu_bnk_cntr(struct perf_event *event) event 156 arch/x86/events/amd/iommu.c struct perf_amd_iommu *piommu = container_of(event->pmu, struct perf_amd_iommu, pmu); event 172 arch/x86/events/amd/iommu.c event->hw.iommu_bank = bank; event 173 arch/x86/events/amd/iommu.c event->hw.iommu_cntr = cntr; event 207 arch/x86/events/amd/iommu.c static int perf_iommu_event_init(struct perf_event *event) event 209 arch/x86/events/amd/iommu.c struct hw_perf_event *hwc = &event->hw; event 212 arch/x86/events/amd/iommu.c if (event->attr.type != event->pmu->type) event 220 arch/x86/events/amd/iommu.c if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) event 223 arch/x86/events/amd/iommu.c if (event->cpu < 0) event 227 arch/x86/events/amd/iommu.c hwc->conf = event->attr.config; event 228 arch/x86/events/amd/iommu.c hwc->conf1 = event->attr.config1; event 268 arch/x86/events/amd/iommu.c static void perf_iommu_disable_event(struct perf_event *event) event 270 arch/x86/events/amd/iommu.c struct amd_iommu *iommu = perf_event_2_iommu(event); event 271 arch/x86/events/amd/iommu.c struct hw_perf_event *hwc = &event->hw; event 278 arch/x86/events/amd/iommu.c static void perf_iommu_start(struct perf_event *event, int flags) event 280 arch/x86/events/amd/iommu.c struct hw_perf_event *hwc = &event->hw; event 290 arch/x86/events/amd/iommu.c struct amd_iommu *iommu = perf_event_2_iommu(event); event 296 arch/x86/events/amd/iommu.c perf_iommu_enable_event(event); event 297 arch/x86/events/amd/iommu.c perf_event_update_userpage(event); event 301 arch/x86/events/amd/iommu.c static void perf_iommu_read(struct perf_event *event) event 304 arch/x86/events/amd/iommu.c struct hw_perf_event *hwc = &event->hw; event 305 arch/x86/events/amd/iommu.c struct amd_iommu *iommu = perf_event_2_iommu(event); event 321 arch/x86/events/amd/iommu.c local64_add(delta, &event->count); event 324 arch/x86/events/amd/iommu.c static void perf_iommu_stop(struct perf_event *event, int flags) event 326 arch/x86/events/amd/iommu.c struct hw_perf_event *hwc = &event->hw; event 331 arch/x86/events/amd/iommu.c perf_iommu_disable_event(event); event 338 arch/x86/events/amd/iommu.c perf_iommu_read(event); event 342 arch/x86/events/amd/iommu.c static int perf_iommu_add(struct perf_event *event, int flags) event 346 arch/x86/events/amd/iommu.c event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; event 349 arch/x86/events/amd/iommu.c retval = get_next_avail_iommu_bnk_cntr(event); event 354 arch/x86/events/amd/iommu.c perf_iommu_start(event, PERF_EF_RELOAD); event 359 arch/x86/events/amd/iommu.c static void perf_iommu_del(struct perf_event *event, int flags) event 361 arch/x86/events/amd/iommu.c struct hw_perf_event *hwc = &event->hw; event 363 arch/x86/events/amd/iommu.c container_of(event->pmu, struct perf_amd_iommu, pmu); event 365 arch/x86/events/amd/iommu.c perf_iommu_stop(event, PERF_EF_UPDATE); event 371 arch/x86/events/amd/iommu.c perf_event_update_userpage(event); event 47 arch/x86/events/amd/power.c static void event_update(struct perf_event *event) event 49 arch/x86/events/amd/power.c struct hw_perf_event *hwc = &event->hw; event 72 arch/x86/events/amd/power.c local64_add(delta, &event->count); event 75 arch/x86/events/amd/power.c static void __pmu_event_start(struct perf_event *event) event 77 arch/x86/events/amd/power.c if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) event 80 arch/x86/events/amd/power.c event->hw.state = 0; event 82 arch/x86/events/amd/power.c rdmsrl(MSR_F15H_PTSC, event->hw.ptsc); event 83 arch/x86/events/amd/power.c rdmsrl(MSR_F15H_CU_PWR_ACCUMULATOR, event->hw.pwr_acc); event 86 arch/x86/events/amd/power.c static void pmu_event_start(struct perf_event *event, int mode) event 88 arch/x86/events/amd/power.c __pmu_event_start(event); event 91 arch/x86/events/amd/power.c static void pmu_event_stop(struct perf_event *event, int mode) event 93 arch/x86/events/amd/power.c struct hw_perf_event *hwc = &event->hw; event 105 arch/x86/events/amd/power.c event_update(event); event 110 arch/x86/events/amd/power.c static int pmu_event_add(struct perf_event *event, int mode) event 112 arch/x86/events/amd/power.c struct hw_perf_event *hwc = &event->hw; event 117 arch/x86/events/amd/power.c __pmu_event_start(event); event 122 arch/x86/events/amd/power.c static void pmu_event_del(struct perf_event *event, int flags) event 124 arch/x86/events/amd/power.c pmu_event_stop(event, PERF_EF_UPDATE); event 127 arch/x86/events/amd/power.c static int pmu_event_init(struct perf_event *event) event 129 arch/x86/events/amd/power.c u64 cfg = event->attr.config & AMD_POWER_EVENT_MASK; event 132 arch/x86/events/amd/power.c if (event->attr.type != pmu_class.type) event 136 arch/x86/events/amd/power.c if (event->attr.sample_period) event 145 arch/x86/events/amd/power.c static void pmu_event_read(struct perf_event *event) event 147 arch/x86/events/amd/power.c event_update(event); event 190 arch/x86/events/amd/power.c PMU_FORMAT_ATTR(event, "config:0-7"); event 62 arch/x86/events/amd/uncore.c static bool is_nb_event(struct perf_event *event) event 64 arch/x86/events/amd/uncore.c return event->pmu->type == amd_nb_pmu.type; event 67 arch/x86/events/amd/uncore.c static bool is_llc_event(struct perf_event *event) event 69 arch/x86/events/amd/uncore.c return event->pmu->type == amd_llc_pmu.type; event 72 arch/x86/events/amd/uncore.c static struct amd_uncore *event_to_amd_uncore(struct perf_event *event) event 74 arch/x86/events/amd/uncore.c if (is_nb_event(event) && amd_uncore_nb) event 75 arch/x86/events/amd/uncore.c return *per_cpu_ptr(amd_uncore_nb, event->cpu); event 76 arch/x86/events/amd/uncore.c else if (is_llc_event(event) && amd_uncore_llc) event 77 arch/x86/events/amd/uncore.c return *per_cpu_ptr(amd_uncore_llc, event->cpu); event 82 arch/x86/events/amd/uncore.c static void amd_uncore_read(struct perf_event *event) event 84 arch/x86/events/amd/uncore.c struct hw_perf_event *hwc = &event->hw; event 98 arch/x86/events/amd/uncore.c local64_add(delta, &event->count); event 101 arch/x86/events/amd/uncore.c static void amd_uncore_start(struct perf_event *event, int flags) event 103 arch/x86/events/amd/uncore.c struct hw_perf_event *hwc = &event->hw; event 110 arch/x86/events/amd/uncore.c perf_event_update_userpage(event); event 113 arch/x86/events/amd/uncore.c static void amd_uncore_stop(struct perf_event *event, int flags) event 115 arch/x86/events/amd/uncore.c struct hw_perf_event *hwc = &event->hw; event 121 arch/x86/events/amd/uncore.c amd_uncore_read(event); event 126 arch/x86/events/amd/uncore.c static int amd_uncore_add(struct perf_event *event, int flags) event 129 arch/x86/events/amd/uncore.c struct amd_uncore *uncore = event_to_amd_uncore(event); event 130 arch/x86/events/amd/uncore.c struct hw_perf_event *hwc = &event->hw; event 133 arch/x86/events/amd/uncore.c if (hwc->idx != -1 && uncore->events[hwc->idx] == event) event 137 arch/x86/events/amd/uncore.c if (uncore->events[i] == event) { event 146 arch/x86/events/amd/uncore.c if (cmpxchg(&uncore->events[i], NULL, event) == NULL) { event 162 arch/x86/events/amd/uncore.c amd_uncore_start(event, PERF_EF_RELOAD); event 167 arch/x86/events/amd/uncore.c static void amd_uncore_del(struct perf_event *event, int flags) event 170 arch/x86/events/amd/uncore.c struct amd_uncore *uncore = event_to_amd_uncore(event); event 171 arch/x86/events/amd/uncore.c struct hw_perf_event *hwc = &event->hw; event 173 arch/x86/events/amd/uncore.c amd_uncore_stop(event, PERF_EF_UPDATE); event 176 arch/x86/events/amd/uncore.c if (cmpxchg(&uncore->events[i], event, NULL) == event) event 183 arch/x86/events/amd/uncore.c static int amd_uncore_event_init(struct perf_event *event) event 186 arch/x86/events/amd/uncore.c struct hw_perf_event *hwc = &event->hw; event 188 arch/x86/events/amd/uncore.c if (event->attr.type != event->pmu->type) event 199 arch/x86/events/amd/uncore.c hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB; event 202 arch/x86/events/amd/uncore.c if (event->cpu < 0) event 209 arch/x86/events/amd/uncore.c if (l3_mask && is_llc_event(event)) { event 210 arch/x86/events/amd/uncore.c int thread = 2 * (cpu_data(event->cpu).cpu_core_id % 4); event 213 arch/x86/events/amd/uncore.c thread += cpu_data(event->cpu).apicid & 1; event 219 arch/x86/events/amd/uncore.c uncore = event_to_amd_uncore(event); event 227 arch/x86/events/amd/uncore.c event->cpu = uncore->cpu; event 291 arch/x86/events/amd/uncore.c AMD_FORMAT_ATTR(event, , "config:0-7,32-35"); event 293 arch/x86/events/amd/uncore.c AMD_FORMAT_ATTR(event, _df, "config:0-7,32-35,59-60"); event 294 arch/x86/events/amd/uncore.c AMD_FORMAT_ATTR(event, _l3, "config:0-7"); event 68 arch/x86/events/core.c u64 x86_perf_event_update(struct perf_event *event) event 70 arch/x86/events/core.c struct hw_perf_event *hwc = &event->hw; event 105 arch/x86/events/core.c local64_add(delta, &event->count); event 114 arch/x86/events/core.c static int x86_pmu_extra_regs(u64 config, struct perf_event *event) event 119 arch/x86/events/core.c reg = &event->hw.extra_reg; event 125 arch/x86/events/core.c if (er->event != (config & er->config_mask)) event 127 arch/x86/events/core.c if (event->attr.config1 & ~er->valid_mask) event 134 arch/x86/events/core.c reg->config = event->attr.config1; event 280 arch/x86/events/core.c static void hw_perf_event_destroy(struct perf_event *event) event 286 arch/x86/events/core.c void hw_perf_lbr_event_destroy(struct perf_event *event) event 288 arch/x86/events/core.c hw_perf_event_destroy(event); event 300 arch/x86/events/core.c set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event) event 302 arch/x86/events/core.c struct perf_event_attr *attr = &event->attr; event 333 arch/x86/events/core.c return x86_pmu_extra_regs(val, event); event 412 arch/x86/events/core.c int x86_setup_perfctr(struct perf_event *event) event 414 arch/x86/events/core.c struct perf_event_attr *attr = &event->attr; event 415 arch/x86/events/core.c struct hw_perf_event *hwc = &event->hw; event 418 arch/x86/events/core.c if (!is_sampling_event(event)) { event 425 arch/x86/events/core.c return x86_pmu_extra_regs(event->attr.config, event); event 428 arch/x86/events/core.c return set_ext_hw_attr(hwc, event); event 457 arch/x86/events/core.c static inline int precise_br_compat(struct perf_event *event) event 459 arch/x86/events/core.c u64 m = event->attr.branch_sample_type; event 468 arch/x86/events/core.c if (!event->attr.exclude_user) event 471 arch/x86/events/core.c if (!event->attr.exclude_kernel) event 499 arch/x86/events/core.c int x86_pmu_hw_config(struct perf_event *event) event 501 arch/x86/events/core.c if (event->attr.precise_ip) { event 504 arch/x86/events/core.c if (event->attr.precise_ip > precise) event 508 arch/x86/events/core.c if (!is_sampling_event(event)) event 515 arch/x86/events/core.c if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format < 2) { event 516 arch/x86/events/core.c u64 *br_type = &event->attr.branch_sample_type; event 518 arch/x86/events/core.c if (has_branch_stack(event)) { event 519 arch/x86/events/core.c if (!precise_br_compat(event)) event 534 arch/x86/events/core.c if (!event->attr.exclude_user) event 537 arch/x86/events/core.c if (!event->attr.exclude_kernel) event 542 arch/x86/events/core.c if (event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK) event 543 arch/x86/events/core.c event->attach_state |= PERF_ATTACH_TASK_DATA; event 549 arch/x86/events/core.c event->hw.config = ARCH_PERFMON_EVENTSEL_INT; event 554 arch/x86/events/core.c if (!event->attr.exclude_user) event 555 arch/x86/events/core.c event->hw.config |= ARCH_PERFMON_EVENTSEL_USR; event 556 arch/x86/events/core.c if (!event->attr.exclude_kernel) event 557 arch/x86/events/core.c event->hw.config |= ARCH_PERFMON_EVENTSEL_OS; event 559 arch/x86/events/core.c if (event->attr.type == PERF_TYPE_RAW) event 560 arch/x86/events/core.c event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK; event 562 arch/x86/events/core.c if (event->attr.sample_period && x86_pmu.limit_period) { event 563 arch/x86/events/core.c if (x86_pmu.limit_period(event, event->attr.sample_period) > event 564 arch/x86/events/core.c event->attr.sample_period) event 569 arch/x86/events/core.c if (unlikely(event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK)) event 575 arch/x86/events/core.c if (unlikely(event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK)) { event 576 arch/x86/events/core.c if (!(event->pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS)) event 579 arch/x86/events/core.c if (!event->attr.precise_ip) event 583 arch/x86/events/core.c return x86_setup_perfctr(event); event 589 arch/x86/events/core.c static int __x86_pmu_event_init(struct perf_event *event) event 601 arch/x86/events/core.c event->destroy = hw_perf_event_destroy; event 603 arch/x86/events/core.c event->hw.idx = -1; event 604 arch/x86/events/core.c event->hw.last_cpu = -1; event 605 arch/x86/events/core.c event->hw.last_tag = ~0ULL; event 608 arch/x86/events/core.c event->hw.extra_reg.idx = EXTRA_REG_NONE; event 609 arch/x86/events/core.c event->hw.branch_reg.idx = EXTRA_REG_NONE; event 611 arch/x86/events/core.c return x86_pmu.hw_config(event); event 679 arch/x86/events/core.c static inline int is_x86_event(struct perf_event *event) event 681 arch/x86/events/core.c return event->pmu == &pmu; event 697 arch/x86/events/core.c int event; /* event index */ event 736 arch/x86/events/core.c sched->state.event = idx; /* start with min weight */ event 776 arch/x86/events/core.c if (sched->state.event >= sched->max_events) event 779 arch/x86/events/core.c c = sched->constraints[sched->state.event]; event 834 arch/x86/events/core.c sched->state.event++; event 835 arch/x86/events/core.c if (sched->state.event >= sched->max_events) { event 837 arch/x86/events/core.c sched->state.event = 0; event 842 arch/x86/events/core.c c = sched->constraints[sched->state.event]; event 864 arch/x86/events/core.c assign[sched.state.event] = sched.state.counter; event 1006 arch/x86/events/core.c struct perf_event *event; event 1044 arch/x86/events/core.c for_each_sibling_event(event, leader) { event 1045 arch/x86/events/core.c if (!is_x86_event(event) || event 1046 arch/x86/events/core.c event->state <= PERF_EVENT_STATE_OFF) event 1052 arch/x86/events/core.c cpuc->event_list[n] = event; event 1058 arch/x86/events/core.c static inline void x86_assign_hw_event(struct perf_event *event, event 1061 arch/x86/events/core.c struct hw_perf_event *hwc = &event->hw; event 1095 arch/x86/events/core.c int x86_perf_rdpmc_index(struct perf_event *event) event 1099 arch/x86/events/core.c return event->hw.event_base_rdpmc; event 1111 arch/x86/events/core.c static void x86_pmu_start(struct perf_event *event, int flags); event 1116 arch/x86/events/core.c struct perf_event *event; event 1135 arch/x86/events/core.c event = cpuc->event_list[i]; event 1136 arch/x86/events/core.c hwc = &event->hw; event 1155 arch/x86/events/core.c x86_pmu_stop(event, PERF_EF_UPDATE); event 1162 arch/x86/events/core.c event = cpuc->event_list[i]; event 1163 arch/x86/events/core.c hwc = &event->hw; event 1166 arch/x86/events/core.c x86_assign_hw_event(event, cpuc, i); event 1173 arch/x86/events/core.c x86_pmu_start(event, PERF_EF_RELOAD); event 1191 arch/x86/events/core.c int x86_perf_event_set_period(struct perf_event *event) event 1193 arch/x86/events/core.c struct hw_perf_event *hwc = &event->hw; event 1227 arch/x86/events/core.c left = x86_pmu.limit_period(event, left); event 1249 arch/x86/events/core.c perf_event_update_userpage(event); event 1254 arch/x86/events/core.c void x86_pmu_enable_event(struct perf_event *event) event 1257 arch/x86/events/core.c __x86_pmu_enable_event(&event->hw, event 1267 arch/x86/events/core.c static int x86_pmu_add(struct perf_event *event, int flags) event 1274 arch/x86/events/core.c hwc = &event->hw; event 1277 arch/x86/events/core.c ret = n = collect_events(cpuc, event, false); event 1319 arch/x86/events/core.c x86_pmu.add(event); event 1327 arch/x86/events/core.c static void x86_pmu_start(struct perf_event *event, int flags) event 1330 arch/x86/events/core.c int idx = event->hw.idx; event 1332 arch/x86/events/core.c if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) event 1339 arch/x86/events/core.c WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); event 1340 arch/x86/events/core.c x86_perf_event_set_period(event); event 1343 arch/x86/events/core.c event->hw.state = 0; event 1345 arch/x86/events/core.c cpuc->events[idx] = event; event 1348 arch/x86/events/core.c x86_pmu.enable(event); event 1349 arch/x86/events/core.c perf_event_update_userpage(event); event 1412 arch/x86/events/core.c void x86_pmu_stop(struct perf_event *event, int flags) event 1415 arch/x86/events/core.c struct hw_perf_event *hwc = &event->hw; event 1418 arch/x86/events/core.c x86_pmu.disable(event); event 1430 arch/x86/events/core.c x86_perf_event_update(event); event 1435 arch/x86/events/core.c static void x86_pmu_del(struct perf_event *event, int flags) event 1454 arch/x86/events/core.c x86_pmu_stop(event, PERF_EF_UPDATE); event 1457 arch/x86/events/core.c if (event == cpuc->event_list[i]) event 1469 arch/x86/events/core.c x86_pmu.put_event_constraints(cpuc, event); event 1479 arch/x86/events/core.c perf_event_update_userpage(event); event 1487 arch/x86/events/core.c x86_pmu.del(event); event 1495 arch/x86/events/core.c struct perf_event *event; event 1515 arch/x86/events/core.c event = cpuc->events[idx]; event 1517 arch/x86/events/core.c val = x86_perf_event_update(event); event 1525 arch/x86/events/core.c perf_sample_data_init(&data, 0, event->hw.last_period); event 1527 arch/x86/events/core.c if (!x86_perf_event_set_period(event)) event 1530 arch/x86/events/core.c if (perf_event_overflow(event, &data, regs)) event 1531 arch/x86/events/core.c x86_pmu_stop(event, 0); event 1737 arch/x86/events/core.c ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event) event 1751 arch/x86/events/core.c ret = sprintf(page, "event=0x%02llx", event); event 1879 arch/x86/events/core.c static inline void x86_pmu_read(struct perf_event *event) event 1882 arch/x86/events/core.c return x86_pmu.read(event); event 1883 arch/x86/events/core.c x86_perf_event_update(event); event 2010 arch/x86/events/core.c static int validate_event(struct perf_event *event) event 2020 arch/x86/events/core.c c = x86_pmu.get_event_constraints(fake_cpuc, 0, event); event 2026 arch/x86/events/core.c x86_pmu.put_event_constraints(fake_cpuc, event); event 2044 arch/x86/events/core.c static int validate_group(struct perf_event *event) event 2046 arch/x86/events/core.c struct perf_event *leader = event->group_leader; event 2064 arch/x86/events/core.c n = collect_events(fake_cpuc, event, false); event 2076 arch/x86/events/core.c static int x86_pmu_event_init(struct perf_event *event) event 2081 arch/x86/events/core.c switch (event->attr.type) { event 2091 arch/x86/events/core.c err = __x86_pmu_event_init(event); event 2098 arch/x86/events/core.c tmp = event->pmu; event 2099 arch/x86/events/core.c event->pmu = &pmu; event 2101 arch/x86/events/core.c if (event->group_leader != event) event 2102 arch/x86/events/core.c err = validate_group(event); event 2104 arch/x86/events/core.c err = validate_event(event); event 2106 arch/x86/events/core.c event->pmu = tmp; event 2109 arch/x86/events/core.c if (event->destroy) event 2110 arch/x86/events/core.c event->destroy(event); event 2114 arch/x86/events/core.c !(event->hw.flags & PERF_X86_EVENT_LARGE_PEBS)) event 2115 arch/x86/events/core.c event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED; event 2125 arch/x86/events/core.c static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm) event 2127 arch/x86/events/core.c if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) event 2146 arch/x86/events/core.c static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm) event 2149 arch/x86/events/core.c if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) event 2156 arch/x86/events/core.c static int x86_pmu_event_idx(struct perf_event *event) event 2158 arch/x86/events/core.c int idx = event->hw.idx; event 2160 arch/x86/events/core.c if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) event 2263 arch/x86/events/core.c static int x86_pmu_check_period(struct perf_event *event, u64 value) event 2265 arch/x86/events/core.c if (x86_pmu.check_period && x86_pmu.check_period(event, value)) event 2269 arch/x86/events/core.c if (x86_pmu.limit_period(event, value) > value) event 2276 arch/x86/events/core.c static int x86_pmu_aux_output_match(struct perf_event *event) event 2282 arch/x86/events/core.c return x86_pmu.aux_output_match(event); event 2316 arch/x86/events/core.c void arch_perf_update_userpage(struct perf_event *event, event 2325 arch/x86/events/core.c !!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED); event 2348 arch/x86/events/core.c if (!event->attr.use_clockid) { event 80 arch/x86/events/intel/bts.c bts_buffer_setup_aux(struct perf_event *event, void **pages, event 85 arch/x86/events/intel/bts.c int cpu = event->cpu; event 226 arch/x86/events/intel/bts.c static void __bts_event_start(struct perf_event *event) event 234 arch/x86/events/intel/bts.c if (!event->attr.exclude_kernel) event 236 arch/x86/events/intel/bts.c if (!event->attr.exclude_user) event 254 arch/x86/events/intel/bts.c static void bts_event_start(struct perf_event *event, int flags) event 260 arch/x86/events/intel/bts.c buf = perf_aux_output_begin(&bts->handle, event); event 271 arch/x86/events/intel/bts.c perf_event_itrace_started(event); event 272 arch/x86/events/intel/bts.c event->hw.state = 0; event 274 arch/x86/events/intel/bts.c __bts_event_start(event); event 282 arch/x86/events/intel/bts.c event->hw.state = PERF_HES_STOPPED; event 285 arch/x86/events/intel/bts.c static void __bts_event_stop(struct perf_event *event, int state) event 299 arch/x86/events/intel/bts.c static void bts_event_stop(struct perf_event *event, int flags) event 307 arch/x86/events/intel/bts.c __bts_event_stop(event, BTS_STATE_STOPPED); event 312 arch/x86/events/intel/bts.c event->hw.state |= PERF_HES_STOPPED; event 349 arch/x86/events/intel/bts.c if (bts->handle.event) event 350 arch/x86/events/intel/bts.c __bts_event_start(bts->handle.event); event 364 arch/x86/events/intel/bts.c if (bts->handle.event) event 365 arch/x86/events/intel/bts.c __bts_event_stop(bts->handle.event, BTS_STATE_INACTIVE); event 448 arch/x86/events/intel/bts.c struct perf_event *event = bts->handle.event; event 488 arch/x86/events/intel/bts.c buf = perf_aux_output_begin(&bts->handle, event); event 508 arch/x86/events/intel/bts.c static void bts_event_del(struct perf_event *event, int mode) event 510 arch/x86/events/intel/bts.c bts_event_stop(event, PERF_EF_UPDATE); event 513 arch/x86/events/intel/bts.c static int bts_event_add(struct perf_event *event, int mode) event 517 arch/x86/events/intel/bts.c struct hw_perf_event *hwc = &event->hw; event 519 arch/x86/events/intel/bts.c event->hw.state = PERF_HES_STOPPED; event 524 arch/x86/events/intel/bts.c if (bts->handle.event) event 528 arch/x86/events/intel/bts.c bts_event_start(event, 0); event 536 arch/x86/events/intel/bts.c static void bts_event_destroy(struct perf_event *event) event 542 arch/x86/events/intel/bts.c static int bts_event_init(struct perf_event *event) event 546 arch/x86/events/intel/bts.c if (event->attr.type != bts_pmu.type) event 558 arch/x86/events/intel/bts.c if (event->attr.exclude_kernel && perf_paranoid_kernel() && event 571 arch/x86/events/intel/bts.c event->destroy = bts_event_destroy; event 576 arch/x86/events/intel/bts.c static void bts_event_read(struct perf_event *event) event 1977 arch/x86/events/intel/core.c struct perf_event *event = event 1980 arch/x86/events/intel/core.c if (WARN_ON_ONCE(!event)) event 1983 arch/x86/events/intel/core.c intel_pmu_enable_bts(event->hw.config); event 2015 arch/x86/events/intel/core.c struct perf_event *event; event 2041 arch/x86/events/intel/core.c event = cpuc->events[i]; event 2042 arch/x86/events/intel/core.c if (event) event 2043 arch/x86/events/intel/core.c x86_perf_event_update(event); event 2055 arch/x86/events/intel/core.c event = cpuc->events[i]; event 2057 arch/x86/events/intel/core.c if (event) { event 2058 arch/x86/events/intel/core.c x86_perf_event_set_period(event); event 2059 arch/x86/events/intel/core.c __x86_pmu_enable_event(&event->hw, event 2144 arch/x86/events/intel/core.c static inline bool event_is_checkpointed(struct perf_event *event) event 2146 arch/x86/events/intel/core.c return (event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0; event 2149 arch/x86/events/intel/core.c static void intel_pmu_disable_event(struct perf_event *event) event 2151 arch/x86/events/intel/core.c struct hw_perf_event *hwc = &event->hw; event 2167 arch/x86/events/intel/core.c x86_pmu_disable_event(event); event 2173 arch/x86/events/intel/core.c if (unlikely(event->attr.precise_ip)) event 2174 arch/x86/events/intel/core.c intel_pmu_pebs_disable(event); event 2177 arch/x86/events/intel/core.c static void intel_pmu_del_event(struct perf_event *event) event 2179 arch/x86/events/intel/core.c if (needs_branch_stack(event)) event 2180 arch/x86/events/intel/core.c intel_pmu_lbr_del(event); event 2181 arch/x86/events/intel/core.c if (event->attr.precise_ip) event 2182 arch/x86/events/intel/core.c intel_pmu_pebs_del(event); event 2185 arch/x86/events/intel/core.c static void intel_pmu_read_event(struct perf_event *event) event 2187 arch/x86/events/intel/core.c if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) event 2188 arch/x86/events/intel/core.c intel_pmu_auto_reload_read(event); event 2190 arch/x86/events/intel/core.c x86_perf_event_update(event); event 2193 arch/x86/events/intel/core.c static void intel_pmu_enable_fixed(struct perf_event *event) event 2195 arch/x86/events/intel/core.c struct hw_perf_event *hwc = &event->hw; event 2204 arch/x86/events/intel/core.c if (!event->attr.precise_ip) event 2220 arch/x86/events/intel/core.c if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) { event 2231 arch/x86/events/intel/core.c static void intel_pmu_enable_event(struct perf_event *event) event 2233 arch/x86/events/intel/core.c struct hw_perf_event *hwc = &event->hw; event 2244 arch/x86/events/intel/core.c if (event->attr.exclude_host) event 2246 arch/x86/events/intel/core.c if (event->attr.exclude_guest) event 2249 arch/x86/events/intel/core.c if (unlikely(event_is_checkpointed(event))) event 2252 arch/x86/events/intel/core.c if (unlikely(event->attr.precise_ip)) event 2253 arch/x86/events/intel/core.c intel_pmu_pebs_enable(event); event 2256 arch/x86/events/intel/core.c intel_pmu_enable_fixed(event); event 2263 arch/x86/events/intel/core.c static void intel_pmu_add_event(struct perf_event *event) event 2265 arch/x86/events/intel/core.c if (event->attr.precise_ip) event 2266 arch/x86/events/intel/core.c intel_pmu_pebs_add(event); event 2267 arch/x86/events/intel/core.c if (needs_branch_stack(event)) event 2268 arch/x86/events/intel/core.c intel_pmu_lbr_add(event); event 2275 arch/x86/events/intel/core.c int intel_pmu_save_and_restart(struct perf_event *event) event 2277 arch/x86/events/intel/core.c x86_perf_event_update(event); event 2284 arch/x86/events/intel/core.c if (unlikely(event_is_checkpointed(event))) { event 2286 arch/x86/events/intel/core.c wrmsrl(event->hw.event_base, 0); event 2287 arch/x86/events/intel/core.c local64_set(&event->hw.prev_count, 0); event 2289 arch/x86/events/intel/core.c return x86_perf_event_set_period(event); event 2402 arch/x86/events/intel/core.c struct perf_event *event = cpuc->events[bit]; event 2409 arch/x86/events/intel/core.c if (!intel_pmu_save_and_restart(event)) event 2412 arch/x86/events/intel/core.c perf_sample_data_init(&data, 0, event->hw.last_period); event 2414 arch/x86/events/intel/core.c if (has_branch_stack(event)) event 2417 arch/x86/events/intel/core.c if (perf_event_overflow(event, &data, regs)) event 2418 arch/x86/events/intel/core.c x86_pmu_stop(event, 0); event 2590 arch/x86/events/intel/core.c intel_bts_constraints(struct perf_event *event) event 2592 arch/x86/events/intel/core.c if (unlikely(intel_pmu_has_bts(event))) event 2617 arch/x86/events/intel/core.c static void intel_fixup_er(struct perf_event *event, int idx) event 2619 arch/x86/events/intel/core.c event->hw.extra_reg.idx = idx; event 2622 arch/x86/events/intel/core.c event->hw.config &= ~INTEL_ARCH_EVENT_MASK; event 2623 arch/x86/events/intel/core.c event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_0].event; event 2624 arch/x86/events/intel/core.c event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0; event 2626 arch/x86/events/intel/core.c event->hw.config &= ~INTEL_ARCH_EVENT_MASK; event 2627 arch/x86/events/intel/core.c event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_1].event; event 2628 arch/x86/events/intel/core.c event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1; event 2641 arch/x86/events/intel/core.c struct perf_event *event, event 2679 arch/x86/events/intel/core.c intel_fixup_er(event, idx); event 2742 arch/x86/events/intel/core.c struct perf_event *event) event 2747 arch/x86/events/intel/core.c xreg = &event->hw.extra_reg; event 2749 arch/x86/events/intel/core.c c = __intel_shared_reg_get_constraints(cpuc, event, xreg); event 2753 arch/x86/events/intel/core.c breg = &event->hw.branch_reg; event 2755 arch/x86/events/intel/core.c d = __intel_shared_reg_get_constraints(cpuc, event, breg); event 2766 arch/x86/events/intel/core.c struct perf_event *event) event 2772 arch/x86/events/intel/core.c if (constraint_match(c, event->hw.config)) { event 2773 arch/x86/events/intel/core.c event->hw.flags |= c->flags; event 2784 arch/x86/events/intel/core.c struct perf_event *event) event 2788 arch/x86/events/intel/core.c c = intel_bts_constraints(event); event 2792 arch/x86/events/intel/core.c c = intel_shared_regs_constraints(cpuc, event); event 2796 arch/x86/events/intel/core.c c = intel_pebs_constraints(event); event 2800 arch/x86/events/intel/core.c return x86_get_event_constraints(cpuc, idx, event); event 2916 arch/x86/events/intel/core.c intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, event 2964 arch/x86/events/intel/core.c if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) { event 2965 arch/x86/events/intel/core.c event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT; event 3017 arch/x86/events/intel/core.c struct perf_event *event) event 3028 arch/x86/events/intel/core.c c2 = __intel_get_event_constraints(cpuc, idx, event); event 3037 arch/x86/events/intel/core.c return intel_get_excl_constraints(cpuc, event, idx, c2); event 3043 arch/x86/events/intel/core.c struct perf_event *event) event 3045 arch/x86/events/intel/core.c struct hw_perf_event *hwc = &event->hw; event 3089 arch/x86/events/intel/core.c struct perf_event *event) event 3093 arch/x86/events/intel/core.c reg = &event->hw.extra_reg; event 3097 arch/x86/events/intel/core.c reg = &event->hw.branch_reg; event 3103 arch/x86/events/intel/core.c struct perf_event *event) event 3105 arch/x86/events/intel/core.c intel_put_shared_regs_event_constraints(cpuc, event); event 3113 arch/x86/events/intel/core.c intel_put_excl_constraints(cpuc, event); event 3116 arch/x86/events/intel/core.c static void intel_pebs_aliases_core2(struct perf_event *event) event 3118 arch/x86/events/intel/core.c if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { event 3137 arch/x86/events/intel/core.c u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16); event 3139 arch/x86/events/intel/core.c alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); event 3140 arch/x86/events/intel/core.c event->hw.config = alt_config; event 3144 arch/x86/events/intel/core.c static void intel_pebs_aliases_snb(struct perf_event *event) event 3146 arch/x86/events/intel/core.c if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { event 3165 arch/x86/events/intel/core.c u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16); event 3167 arch/x86/events/intel/core.c alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); event 3168 arch/x86/events/intel/core.c event->hw.config = alt_config; event 3172 arch/x86/events/intel/core.c static void intel_pebs_aliases_precdist(struct perf_event *event) event 3174 arch/x86/events/intel/core.c if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { event 3189 arch/x86/events/intel/core.c u64 alt_config = X86_CONFIG(.event=0xc0, .umask=0x01, .inv=1, .cmask=16); event 3191 arch/x86/events/intel/core.c alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); event 3192 arch/x86/events/intel/core.c event->hw.config = alt_config; event 3196 arch/x86/events/intel/core.c static void intel_pebs_aliases_ivb(struct perf_event *event) event 3198 arch/x86/events/intel/core.c if (event->attr.precise_ip < 3) event 3199 arch/x86/events/intel/core.c return intel_pebs_aliases_snb(event); event 3200 arch/x86/events/intel/core.c return intel_pebs_aliases_precdist(event); event 3203 arch/x86/events/intel/core.c static void intel_pebs_aliases_skl(struct perf_event *event) event 3205 arch/x86/events/intel/core.c if (event->attr.precise_ip < 3) event 3206 arch/x86/events/intel/core.c return intel_pebs_aliases_core2(event); event 3207 arch/x86/events/intel/core.c return intel_pebs_aliases_precdist(event); event 3210 arch/x86/events/intel/core.c static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event) event 3214 arch/x86/events/intel/core.c if (event->attr.use_clockid) event 3216 arch/x86/events/intel/core.c if (!event->attr.exclude_kernel) event 3218 arch/x86/events/intel/core.c if (event->attr.sample_regs_user & ~PEBS_GP_REGS) event 3223 arch/x86/events/intel/core.c static int intel_pmu_bts_config(struct perf_event *event) event 3225 arch/x86/events/intel/core.c struct perf_event_attr *attr = &event->attr; event 3227 arch/x86/events/intel/core.c if (unlikely(intel_pmu_has_bts(event))) { event 3244 arch/x86/events/intel/core.c event->destroy = hw_perf_lbr_event_destroy; event 3250 arch/x86/events/intel/core.c static int core_pmu_hw_config(struct perf_event *event) event 3252 arch/x86/events/intel/core.c int ret = x86_pmu_hw_config(event); event 3257 arch/x86/events/intel/core.c return intel_pmu_bts_config(event); event 3260 arch/x86/events/intel/core.c static int intel_pmu_hw_config(struct perf_event *event) event 3262 arch/x86/events/intel/core.c int ret = x86_pmu_hw_config(event); event 3267 arch/x86/events/intel/core.c ret = intel_pmu_bts_config(event); event 3271 arch/x86/events/intel/core.c if (event->attr.precise_ip) { event 3272 arch/x86/events/intel/core.c if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) { event 3273 arch/x86/events/intel/core.c event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD; event 3274 arch/x86/events/intel/core.c if (!(event->attr.sample_type & event 3275 arch/x86/events/intel/core.c ~intel_pmu_large_pebs_flags(event))) event 3276 arch/x86/events/intel/core.c event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS; event 3279 arch/x86/events/intel/core.c x86_pmu.pebs_aliases(event); event 3281 arch/x86/events/intel/core.c if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) event 3282 arch/x86/events/intel/core.c event->attr.sample_type |= __PERF_SAMPLE_CALLCHAIN_EARLY; event 3285 arch/x86/events/intel/core.c if (needs_branch_stack(event)) { event 3286 arch/x86/events/intel/core.c ret = intel_pmu_setup_lbr_filter(event); event 3293 arch/x86/events/intel/core.c if (!unlikely(intel_pmu_has_bts(event))) { event 3298 arch/x86/events/intel/core.c event->destroy = hw_perf_lbr_event_destroy; event 3302 arch/x86/events/intel/core.c if (event->attr.aux_output) { event 3303 arch/x86/events/intel/core.c if (!event->attr.precise_ip) event 3306 arch/x86/events/intel/core.c event->hw.flags |= PERF_X86_EVENT_PEBS_VIA_PT; event 3309 arch/x86/events/intel/core.c if (event->attr.type != PERF_TYPE_RAW) event 3312 arch/x86/events/intel/core.c if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY)) event 3321 arch/x86/events/intel/core.c event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY; event 3374 arch/x86/events/intel/core.c struct perf_event *event = cpuc->events[idx]; event 3383 arch/x86/events/intel/core.c event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE; event 3385 arch/x86/events/intel/core.c if (event->attr.exclude_host) event 3387 arch/x86/events/intel/core.c else if (event->attr.exclude_guest) event 3395 arch/x86/events/intel/core.c static void core_pmu_enable_event(struct perf_event *event) event 3397 arch/x86/events/intel/core.c if (!event->attr.exclude_host) event 3398 arch/x86/events/intel/core.c x86_pmu_enable_event(event); event 3417 arch/x86/events/intel/core.c static int hsw_hw_config(struct perf_event *event) event 3419 arch/x86/events/intel/core.c int ret = intel_pmu_hw_config(event); event 3425 arch/x86/events/intel/core.c event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED); event 3432 arch/x86/events/intel/core.c if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) && event 3433 arch/x86/events/intel/core.c ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) || event 3434 arch/x86/events/intel/core.c event->attr.precise_ip > 0)) event 3437 arch/x86/events/intel/core.c if (event_is_checkpointed(event)) { event 3447 arch/x86/events/intel/core.c if (event->attr.sample_period > 0 && event 3448 arch/x86/events/intel/core.c event->attr.sample_period < 0x7fffffff) event 3468 arch/x86/events/intel/core.c struct perf_event *event) event 3472 arch/x86/events/intel/core.c c = intel_get_event_constraints(cpuc, idx, event); event 3475 arch/x86/events/intel/core.c if (event->hw.config & HSW_IN_TX_CHECKPOINTED) { event 3486 arch/x86/events/intel/core.c struct perf_event *event) event 3492 arch/x86/events/intel/core.c if ((event->attr.precise_ip == 3) && event 3493 arch/x86/events/intel/core.c constraint_match(&fixed0_constraint, event->hw.config)) event 3496 arch/x86/events/intel/core.c return hsw_get_event_constraints(cpuc, idx, event); event 3501 arch/x86/events/intel/core.c struct perf_event *event) event 3506 arch/x86/events/intel/core.c if (event->attr.precise_ip == 3) event 3509 arch/x86/events/intel/core.c c = intel_get_event_constraints(cpuc, idx, event); event 3516 arch/x86/events/intel/core.c struct perf_event *event) event 3524 arch/x86/events/intel/core.c if (event->attr.precise_ip == 3) { event 3526 arch/x86/events/intel/core.c if (constraint_match(&fixed0_constraint, event->hw.config)) event 3532 arch/x86/events/intel/core.c c = intel_get_event_constraints(cpuc, idx, event); event 3541 arch/x86/events/intel/core.c struct perf_event *event) event 3543 arch/x86/events/intel/core.c struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event); event 3572 arch/x86/events/intel/core.c static u64 bdw_limit_period(struct perf_event *event, u64 left) event 3574 arch/x86/events/intel/core.c if ((event->hw.config & INTEL_ARCH_EVENT_MASK) == event 3575 arch/x86/events/intel/core.c X86_CONFIG(.event=0xc0, .umask=0x01)) { event 3583 arch/x86/events/intel/core.c static u64 nhm_limit_period(struct perf_event *event, u64 left) event 3588 arch/x86/events/intel/core.c PMU_FORMAT_ATTR(event, "config:0-7" ); event 3610 arch/x86/events/intel/core.c u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT); event 3612 arch/x86/events/intel/core.c return x86_event_sysfs_show(page, config, event); event 3822 arch/x86/events/intel/core.c static int intel_pmu_check_period(struct perf_event *event, u64 value) event 3824 arch/x86/events/intel/core.c return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0; event 3827 arch/x86/events/intel/core.c static int intel_pmu_aux_output_match(struct perf_event *event) event 3832 arch/x86/events/intel/core.c return is_intel_pt_event(event); event 4638 arch/x86/events/intel/core.c X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); event 4641 arch/x86/events/intel/core.c X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1); event 4795 arch/x86/events/intel/core.c X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); event 4798 arch/x86/events/intel/core.c X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1); event 4835 arch/x86/events/intel/core.c X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); event 4838 arch/x86/events/intel/core.c X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1); event 4876 arch/x86/events/intel/core.c X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); event 5060 arch/x86/events/intel/core.c x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xca, .umask=0x02); event 192 arch/x86/events/intel/cstate.c DEFINE_CSTATE_FORMAT_ATTR(core_event, event, "config:0-63"); event 271 arch/x86/events/intel/cstate.c DEFINE_CSTATE_FORMAT_ATTR(pkg_event, event, "config:0-63"); event 304 arch/x86/events/intel/cstate.c static int cstate_pmu_event_init(struct perf_event *event) event 306 arch/x86/events/intel/cstate.c u64 cfg = event->attr.config; event 309 arch/x86/events/intel/cstate.c if (event->attr.type != event->pmu->type) event 313 arch/x86/events/intel/cstate.c if (event->attr.sample_period) /* no sampling */ event 316 arch/x86/events/intel/cstate.c if (event->cpu < 0) event 319 arch/x86/events/intel/cstate.c if (event->pmu == &cstate_core_pmu) { event 325 arch/x86/events/intel/cstate.c event->hw.event_base = core_msr[cfg].msr; event 327 arch/x86/events/intel/cstate.c topology_sibling_cpumask(event->cpu)); event 328 arch/x86/events/intel/cstate.c } else if (event->pmu == &cstate_pkg_pmu) { event 334 arch/x86/events/intel/cstate.c event->hw.event_base = pkg_msr[cfg].msr; event 336 arch/x86/events/intel/cstate.c topology_die_cpumask(event->cpu)); event 344 arch/x86/events/intel/cstate.c event->cpu = cpu; event 345 arch/x86/events/intel/cstate.c event->hw.config = cfg; event 346 arch/x86/events/intel/cstate.c event->hw.idx = -1; event 350 arch/x86/events/intel/cstate.c static inline u64 cstate_pmu_read_counter(struct perf_event *event) event 354 arch/x86/events/intel/cstate.c rdmsrl(event->hw.event_base, val); event 358 arch/x86/events/intel/cstate.c static void cstate_pmu_event_update(struct perf_event *event) event 360 arch/x86/events/intel/cstate.c struct hw_perf_event *hwc = &event->hw; event 365 arch/x86/events/intel/cstate.c new_raw_count = cstate_pmu_read_counter(event); event 371 arch/x86/events/intel/cstate.c local64_add(new_raw_count - prev_raw_count, &event->count); event 374 arch/x86/events/intel/cstate.c static void cstate_pmu_event_start(struct perf_event *event, int mode) event 376 arch/x86/events/intel/cstate.c local64_set(&event->hw.prev_count, cstate_pmu_read_counter(event)); event 379 arch/x86/events/intel/cstate.c static void cstate_pmu_event_stop(struct perf_event *event, int mode) event 381 arch/x86/events/intel/cstate.c cstate_pmu_event_update(event); event 384 arch/x86/events/intel/cstate.c static void cstate_pmu_event_del(struct perf_event *event, int mode) event 386 arch/x86/events/intel/cstate.c cstate_pmu_event_stop(event, PERF_EF_UPDATE); event 389 arch/x86/events/intel/cstate.c static int cstate_pmu_event_add(struct perf_event *event, int mode) event 392 arch/x86/events/intel/cstate.c cstate_pmu_event_start(event, mode); event 137 arch/x86/events/intel/ds.c static u64 precise_datala_hsw(struct perf_event *event, u64 status) event 143 arch/x86/events/intel/ds.c if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) event 145 arch/x86/events/intel/ds.c else if (event->hw.flags & PERF_X86_EVENT_PEBS_LD_HSW) event 156 arch/x86/events/intel/ds.c if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) { event 589 arch/x86/events/intel/ds.c struct perf_event *event = cpuc->events[INTEL_PMC_IDX_FIXED_BTS]; event 597 arch/x86/events/intel/ds.c if (!event) event 613 arch/x86/events/intel/ds.c perf_sample_data_init(&data, 0, event->hw.last_period); event 631 arch/x86/events/intel/ds.c if (event->attr.exclude_kernel && event 642 arch/x86/events/intel/ds.c perf_prepare_sample(&header, &data, event, ®s); event 644 arch/x86/events/intel/ds.c if (perf_output_begin(&handle, event, header.size * event 650 arch/x86/events/intel/ds.c if (event->attr.exclude_kernel && event 657 arch/x86/events/intel/ds.c perf_output_sample(&handle, &header, &data, event); event 663 arch/x86/events/intel/ds.c event->hw.interrupts++; event 664 arch/x86/events/intel/ds.c event->pending_kill = POLL_IN; event 872 arch/x86/events/intel/ds.c struct event_constraint *intel_pebs_constraints(struct perf_event *event) event 876 arch/x86/events/intel/ds.c if (!event->attr.precise_ip) event 881 arch/x86/events/intel/ds.c if (constraint_match(c, event->hw.config)) { event 882 arch/x86/events/intel/ds.c event->hw.flags |= c->flags; event 965 arch/x86/events/intel/ds.c static u64 pebs_update_adaptive_cfg(struct perf_event *event) event 967 arch/x86/events/intel/ds.c struct perf_event_attr *attr = &event->attr; event 1013 arch/x86/events/intel/ds.c struct perf_event *event, bool add) event 1015 arch/x86/events/intel/ds.c struct pmu *pmu = event->ctx->pmu; event 1045 arch/x86/events/intel/ds.c pebs_data_cfg = pebs_update_adaptive_cfg(event); event 1059 arch/x86/events/intel/ds.c void intel_pmu_pebs_add(struct perf_event *event) event 1062 arch/x86/events/intel/ds.c struct hw_perf_event *hwc = &event->hw; event 1071 arch/x86/events/intel/ds.c pebs_update_state(needed_cb, cpuc, event, true); event 1074 arch/x86/events/intel/ds.c static void intel_pmu_pebs_via_pt_disable(struct perf_event *event) event 1078 arch/x86/events/intel/ds.c if (!is_pebs_pt(event)) event 1085 arch/x86/events/intel/ds.c static void intel_pmu_pebs_via_pt_enable(struct perf_event *event) event 1088 arch/x86/events/intel/ds.c struct hw_perf_event *hwc = &event->hw; event 1091 arch/x86/events/intel/ds.c if (!is_pebs_pt(event)) event 1094 arch/x86/events/intel/ds.c if (!(event->hw.flags & PERF_X86_EVENT_LARGE_PEBS)) event 1102 arch/x86/events/intel/ds.c void intel_pmu_pebs_enable(struct perf_event *event) event 1105 arch/x86/events/intel/ds.c struct hw_perf_event *hwc = &event->hw; event 1112 arch/x86/events/intel/ds.c if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) && (x86_pmu.version < 5)) event 1114 arch/x86/events/intel/ds.c else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST) event 1140 arch/x86/events/intel/ds.c intel_pmu_pebs_via_pt_enable(event); event 1143 arch/x86/events/intel/ds.c void intel_pmu_pebs_del(struct perf_event *event) event 1146 arch/x86/events/intel/ds.c struct hw_perf_event *hwc = &event->hw; event 1155 arch/x86/events/intel/ds.c pebs_update_state(needed_cb, cpuc, event, false); event 1158 arch/x86/events/intel/ds.c void intel_pmu_pebs_disable(struct perf_event *event) event 1161 arch/x86/events/intel/ds.c struct hw_perf_event *hwc = &event->hw; event 1169 arch/x86/events/intel/ds.c if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) && event 1172 arch/x86/events/intel/ds.c else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST) event 1175 arch/x86/events/intel/ds.c intel_pmu_pebs_via_pt_disable(event); event 1324 arch/x86/events/intel/ds.c static u64 get_data_src(struct perf_event *event, u64 aux) event 1327 arch/x86/events/intel/ds.c int fl = event->hw.flags; event 1333 arch/x86/events/intel/ds.c val = precise_datala_hsw(event, aux); event 1339 arch/x86/events/intel/ds.c static void setup_pebs_fixed_sample_data(struct perf_event *event, event 1356 arch/x86/events/intel/ds.c sample_type = event->attr.sample_type; event 1357 arch/x86/events/intel/ds.c fll = event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT; event 1359 arch/x86/events/intel/ds.c perf_sample_data_init(data, 0, event->hw.last_period); event 1361 arch/x86/events/intel/ds.c data->period = event->hw.last_period; event 1373 arch/x86/events/intel/ds.c data->data_src.val = get_data_src(event, pebs->dse); event 1382 arch/x86/events/intel/ds.c data->callchain = perf_callchain(event, iregs); event 1423 arch/x86/events/intel/ds.c if (event->attr.precise_ip > 1) { event 1474 arch/x86/events/intel/ds.c event->attr.use_clockid == 0) event 1477 arch/x86/events/intel/ds.c if (has_branch_stack(event)) event 1508 arch/x86/events/intel/ds.c static void setup_pebs_adaptive_sample_data(struct perf_event *event, event 1528 arch/x86/events/intel/ds.c sample_type = event->attr.sample_type; event 1530 arch/x86/events/intel/ds.c perf_sample_data_init(data, 0, event->hw.last_period); event 1531 arch/x86/events/intel/ds.c data->period = event->hw.last_period; event 1533 arch/x86/events/intel/ds.c if (event->attr.use_clockid == 0) event 1543 arch/x86/events/intel/ds.c data->callchain = perf_callchain(event, iregs); event 1564 arch/x86/events/intel/ds.c if (event->attr.precise_ip < 2) { event 1579 arch/x86/events/intel/ds.c data->data_src.val = get_data_src(event, meminfo->aux); event 1602 arch/x86/events/intel/ds.c if (has_branch_stack(event)) { event 1653 arch/x86/events/intel/ds.c void intel_pmu_auto_reload_read(struct perf_event *event) event 1655 arch/x86/events/intel/ds.c WARN_ON(!(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)); event 1657 arch/x86/events/intel/ds.c perf_pmu_disable(event->pmu); event 1659 arch/x86/events/intel/ds.c perf_pmu_enable(event->pmu); event 1666 arch/x86/events/intel/ds.c intel_pmu_save_and_restart_reload(struct perf_event *event, int count) event 1668 arch/x86/events/intel/ds.c struct hw_perf_event *hwc = &event->hw; event 1714 arch/x86/events/intel/ds.c local64_add(new - old + count * period, &event->count); event 1718 arch/x86/events/intel/ds.c perf_event_update_userpage(event); event 1723 arch/x86/events/intel/ds.c static void __intel_pmu_pebs_event(struct perf_event *event, event 1734 arch/x86/events/intel/ds.c struct hw_perf_event *hwc = &event->hw; event 1747 arch/x86/events/intel/ds.c intel_pmu_save_and_restart_reload(event, count); event 1748 arch/x86/events/intel/ds.c } else if (!intel_pmu_save_and_restart(event)) event 1752 arch/x86/events/intel/ds.c setup_sample(event, iregs, at, &data, regs); event 1753 arch/x86/events/intel/ds.c perf_event_output(event, &data, regs); event 1759 arch/x86/events/intel/ds.c setup_sample(event, iregs, at, &data, regs); event 1765 arch/x86/events/intel/ds.c if (perf_event_overflow(event, &data, regs)) { event 1766 arch/x86/events/intel/ds.c x86_pmu_stop(event, 0); event 1776 arch/x86/events/intel/ds.c struct perf_event *event = cpuc->events[0]; /* PMC0 only */ event 1794 arch/x86/events/intel/ds.c WARN_ON_ONCE(!event); event 1796 arch/x86/events/intel/ds.c if (!event->attr.precise_ip) event 1801 arch/x86/events/intel/ds.c if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) event 1802 arch/x86/events/intel/ds.c intel_pmu_save_and_restart_reload(event, 0); event 1806 arch/x86/events/intel/ds.c __intel_pmu_pebs_event(event, iregs, at, top, 0, n, event 1812 arch/x86/events/intel/ds.c struct perf_event *event; event 1823 arch/x86/events/intel/ds.c event = cpuc->events[bit]; event 1824 arch/x86/events/intel/ds.c if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) event 1825 arch/x86/events/intel/ds.c intel_pmu_save_and_restart_reload(event, 0); event 1833 arch/x86/events/intel/ds.c struct perf_event *event; event 1920 arch/x86/events/intel/ds.c event = cpuc->events[bit]; event 1921 arch/x86/events/intel/ds.c if (WARN_ON_ONCE(!event)) event 1924 arch/x86/events/intel/ds.c if (WARN_ON_ONCE(!event->attr.precise_ip)) event 1929 arch/x86/events/intel/ds.c perf_log_lost_samples(event, error[bit]); event 1931 arch/x86/events/intel/ds.c if (perf_event_account_interrupt(event)) event 1932 arch/x86/events/intel/ds.c x86_pmu_stop(event, 0); event 1936 arch/x86/events/intel/ds.c __intel_pmu_pebs_event(event, iregs, base, event 1948 arch/x86/events/intel/ds.c struct perf_event *event; event 1984 arch/x86/events/intel/ds.c event = cpuc->events[bit]; event 1985 arch/x86/events/intel/ds.c if (WARN_ON_ONCE(!event)) event 1988 arch/x86/events/intel/ds.c if (WARN_ON_ONCE(!event->attr.precise_ip)) event 1991 arch/x86/events/intel/ds.c __intel_pmu_pebs_event(event, iregs, base, event 177 arch/x86/events/intel/knc.c knc_pmu_disable_event(struct perf_event *event) event 179 arch/x86/events/intel/knc.c struct hw_perf_event *hwc = &event->hw; event 188 arch/x86/events/intel/knc.c static void knc_pmu_enable_event(struct perf_event *event) event 190 arch/x86/events/intel/knc.c struct hw_perf_event *hwc = &event->hw; event 243 arch/x86/events/intel/knc.c struct perf_event *event = cpuc->events[bit]; event 250 arch/x86/events/intel/knc.c if (!intel_pmu_save_and_restart(event)) event 253 arch/x86/events/intel/knc.c perf_sample_data_init(&data, 0, event->hw.last_period); event 255 arch/x86/events/intel/knc.c if (perf_event_overflow(event, &data, regs)) event 256 arch/x86/events/intel/knc.c x86_pmu_stop(event, 0); event 275 arch/x86/events/intel/knc.c PMU_FORMAT_ATTR(event, "config:0-7" ); event 457 arch/x86/events/intel/lbr.c void intel_pmu_lbr_add(struct perf_event *event) event 465 arch/x86/events/intel/lbr.c cpuc->br_sel = event->hw.branch_reg.reg; event 467 arch/x86/events/intel/lbr.c if (branch_user_callstack(cpuc->br_sel) && event->ctx->task_ctx_data) { event 468 arch/x86/events/intel/lbr.c task_ctx = event->ctx->task_ctx_data; event 491 arch/x86/events/intel/lbr.c if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0) event 493 arch/x86/events/intel/lbr.c perf_sched_cb_inc(event->ctx->pmu); event 494 arch/x86/events/intel/lbr.c if (!cpuc->lbr_users++ && !event->total_time_running) event 498 arch/x86/events/intel/lbr.c void intel_pmu_lbr_del(struct perf_event *event) event 507 arch/x86/events/intel/lbr.c event->ctx->task_ctx_data) { event 508 arch/x86/events/intel/lbr.c task_ctx = event->ctx->task_ctx_data; event 512 arch/x86/events/intel/lbr.c if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0) event 517 arch/x86/events/intel/lbr.c perf_sched_cb_dec(event->ctx->pmu); event 688 arch/x86/events/intel/lbr.c static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event) event 690 arch/x86/events/intel/lbr.c u64 br_type = event->attr.branch_sample_type; event 747 arch/x86/events/intel/lbr.c event->hw.branch_reg.reg = mask; event 756 arch/x86/events/intel/lbr.c static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event) event 759 arch/x86/events/intel/lbr.c u64 br_type = event->attr.branch_sample_type; event 775 arch/x86/events/intel/lbr.c reg = &event->hw.branch_reg; event 795 arch/x86/events/intel/lbr.c int intel_pmu_setup_lbr_filter(struct perf_event *event) event 808 arch/x86/events/intel/lbr.c ret = intel_pmu_setup_sw_lbr_filter(event); event 816 arch/x86/events/intel/lbr.c ret = intel_pmu_setup_hw_lbr_filter(event); event 508 arch/x86/events/intel/p4.c #define P4_GEN_CACHE_EVENT(event, bit, metric) \ event 509 arch/x86/events/intel/p4.c p4_config_pack_escr(P4_ESCR_EVENT(event) | \ event 510 arch/x86/events/intel/p4.c P4_ESCR_EMASK_BIT(event, bit)) | \ event 512 arch/x86/events/intel/p4.c P4_CCCR_ESEL(P4_OPCODE_ESEL(P4_OPCODE(event)))) event 749 arch/x86/events/intel/p4.c static int p4_validate_raw_event(struct perf_event *event) event 754 arch/x86/events/intel/p4.c v = p4_config_unpack_event(event->attr.config); event 784 arch/x86/events/intel/p4.c emask = p4_config_unpack_escr(event->attr.config) & P4_ESCR_EVENTMASK_MASK; event 791 arch/x86/events/intel/p4.c if (p4_config_pebs_has(event->attr.config, P4_PEBS_CONFIG_ENABLE)) event 794 arch/x86/events/intel/p4.c v = p4_config_unpack_metric(event->attr.config); event 801 arch/x86/events/intel/p4.c static int p4_hw_config(struct perf_event *event) event 814 arch/x86/events/intel/p4.c escr = p4_default_escr_conf(cpu, event->attr.exclude_kernel, event 815 arch/x86/events/intel/p4.c event->attr.exclude_user); event 816 arch/x86/events/intel/p4.c event->hw.config = p4_config_pack_escr(escr) | event 820 arch/x86/events/intel/p4.c event->hw.config = p4_set_ht_bit(event->hw.config); event 822 arch/x86/events/intel/p4.c if (event->attr.type == PERF_TYPE_RAW) { event 829 arch/x86/events/intel/p4.c event->attr.config &= P4_CONFIG_MASK; event 831 arch/x86/events/intel/p4.c rc = p4_validate_raw_event(event); event 839 arch/x86/events/intel/p4.c event->hw.config |= event->attr.config; event 840 arch/x86/events/intel/p4.c bind = p4_config_get_bind(event->attr.config); event 846 arch/x86/events/intel/p4.c event->hw.config |= p4_config_pack_cccr(P4_CCCR_ESEL(esel)); event 849 arch/x86/events/intel/p4.c rc = x86_setup_perfctr(event); event 903 arch/x86/events/intel/p4.c static inline void p4_pmu_disable_event(struct perf_event *event) event 905 arch/x86/events/intel/p4.c struct hw_perf_event *hwc = &event->hw; event 922 arch/x86/events/intel/p4.c struct perf_event *event = cpuc->events[idx]; event 925 arch/x86/events/intel/p4.c p4_pmu_disable_event(event); event 949 arch/x86/events/intel/p4.c static void p4_pmu_enable_event(struct perf_event *event) event 951 arch/x86/events/intel/p4.c struct hw_perf_event *hwc = &event->hw; event 991 arch/x86/events/intel/p4.c struct perf_event *event = cpuc->events[idx]; event 994 arch/x86/events/intel/p4.c p4_pmu_enable_event(event); event 1002 arch/x86/events/intel/p4.c struct perf_event *event; event 1019 arch/x86/events/intel/p4.c event = cpuc->events[idx]; event 1020 arch/x86/events/intel/p4.c hwc = &event->hw; event 1027 arch/x86/events/intel/p4.c val = x86_perf_event_update(event); event 1036 arch/x86/events/intel/p4.c if (!x86_perf_event_set_period(event)) event 1040 arch/x86/events/intel/p4.c if (perf_event_overflow(event, &data, regs)) event 1041 arch/x86/events/intel/p4.c x86_pmu_stop(event, 0); event 159 arch/x86/events/intel/p6.c p6_pmu_disable_event(struct perf_event *event) event 161 arch/x86/events/intel/p6.c struct hw_perf_event *hwc = &event->hw; event 167 arch/x86/events/intel/p6.c static void p6_pmu_enable_event(struct perf_event *event) event 169 arch/x86/events/intel/p6.c struct hw_perf_event *hwc = &event->hw; event 184 arch/x86/events/intel/p6.c PMU_FORMAT_ATTR(event, "config:0-7" ); event 304 arch/x86/events/intel/pt.c static bool pt_event_valid(struct perf_event *event) event 306 arch/x86/events/intel/pt.c u64 config = event->attr.config; event 428 arch/x86/events/intel/pt.c static u64 pt_config_filters(struct perf_event *event) event 430 arch/x86/events/intel/pt.c struct pt_filters *filters = event->hw.addr_filters; event 438 arch/x86/events/intel/pt.c perf_event_addr_filters_sync(event); event 469 arch/x86/events/intel/pt.c static void pt_config(struct perf_event *event) event 475 arch/x86/events/intel/pt.c if (!event->hw.config) { event 476 arch/x86/events/intel/pt.c perf_event_itrace_started(event); event 480 arch/x86/events/intel/pt.c reg = pt_config_filters(event); event 490 arch/x86/events/intel/pt.c if (event->attr.config & BIT(0)) { event 491 arch/x86/events/intel/pt.c reg |= event->attr.config & RTIT_CTL_BRANCH_EN; event 496 arch/x86/events/intel/pt.c if (!event->attr.exclude_kernel) event 498 arch/x86/events/intel/pt.c if (!event->attr.exclude_user) event 501 arch/x86/events/intel/pt.c reg |= (event->attr.config & PT_CONFIG_MASK); event 503 arch/x86/events/intel/pt.c event->hw.config = reg; event 510 arch/x86/events/intel/pt.c static void pt_config_stop(struct perf_event *event) event 513 arch/x86/events/intel/pt.c u64 ctl = READ_ONCE(event->hw.config); event 523 arch/x86/events/intel/pt.c WRITE_ONCE(event->hw.config, ctl); event 1192 arch/x86/events/intel/pt.c pt_buffer_setup_aux(struct perf_event *event, void **pages, event 1196 arch/x86/events/intel/pt.c int node, ret, cpu = event->cpu; event 1237 arch/x86/events/intel/pt.c static int pt_addr_filters_init(struct perf_event *event) event 1240 arch/x86/events/intel/pt.c int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu); event 1249 arch/x86/events/intel/pt.c if (event->parent) event 1250 arch/x86/events/intel/pt.c memcpy(filters, event->parent->hw.addr_filters, event 1253 arch/x86/events/intel/pt.c event->hw.addr_filters = filters; event 1258 arch/x86/events/intel/pt.c static void pt_addr_filters_fini(struct perf_event *event) event 1260 arch/x86/events/intel/pt.c kfree(event->hw.addr_filters); event 1261 arch/x86/events/intel/pt.c event->hw.addr_filters = NULL; event 1298 arch/x86/events/intel/pt.c static void pt_event_addr_filters_sync(struct perf_event *event) event 1300 arch/x86/events/intel/pt.c struct perf_addr_filters_head *head = perf_event_addr_filters(event); event 1302 arch/x86/events/intel/pt.c struct perf_addr_filter_range *fr = event->addr_filter_ranges; event 1303 arch/x86/events/intel/pt.c struct pt_filters *filters = event->hw.addr_filters; event 1338 arch/x86/events/intel/pt.c struct perf_event *event = pt->handle.event; event 1348 arch/x86/events/intel/pt.c if (!event) event 1351 arch/x86/events/intel/pt.c pt_config_stop(event); event 1365 arch/x86/events/intel/pt.c if (!event->hw.state) { event 1368 arch/x86/events/intel/pt.c buf = perf_aux_output_begin(&pt->handle, event); event 1370 arch/x86/events/intel/pt.c event->hw.state = PERF_HES_STOPPED; event 1384 arch/x86/events/intel/pt.c pt_config(event); event 1391 arch/x86/events/intel/pt.c struct perf_event *event; event 1411 arch/x86/events/intel/pt.c event = pt->handle.event; event 1412 arch/x86/events/intel/pt.c if (event) event 1417 arch/x86/events/intel/pt.c if (!on && event) event 1418 arch/x86/events/intel/pt.c wrmsrl(MSR_IA32_RTIT_CTL, event->hw.config); event 1428 arch/x86/events/intel/pt.c static void pt_event_start(struct perf_event *event, int mode) event 1430 arch/x86/events/intel/pt.c struct hw_perf_event *hwc = &event->hw; event 1434 arch/x86/events/intel/pt.c buf = perf_aux_output_begin(&pt->handle, event); event 1449 arch/x86/events/intel/pt.c pt_config(event); event 1459 arch/x86/events/intel/pt.c static void pt_event_stop(struct perf_event *event, int mode) event 1469 arch/x86/events/intel/pt.c pt_config_stop(event); event 1471 arch/x86/events/intel/pt.c if (event->hw.state == PERF_HES_STOPPED) event 1474 arch/x86/events/intel/pt.c event->hw.state = PERF_HES_STOPPED; event 1482 arch/x86/events/intel/pt.c if (WARN_ON_ONCE(pt->handle.event != event)) event 1499 arch/x86/events/intel/pt.c static void pt_event_del(struct perf_event *event, int mode) event 1501 arch/x86/events/intel/pt.c pt_event_stop(event, PERF_EF_UPDATE); event 1504 arch/x86/events/intel/pt.c static int pt_event_add(struct perf_event *event, int mode) event 1507 arch/x86/events/intel/pt.c struct hw_perf_event *hwc = &event->hw; event 1510 arch/x86/events/intel/pt.c if (pt->handle.event) event 1514 arch/x86/events/intel/pt.c pt_event_start(event, 0); event 1528 arch/x86/events/intel/pt.c static void pt_event_read(struct perf_event *event) event 1532 arch/x86/events/intel/pt.c static void pt_event_destroy(struct perf_event *event) event 1534 arch/x86/events/intel/pt.c pt_addr_filters_fini(event); event 1538 arch/x86/events/intel/pt.c static int pt_event_init(struct perf_event *event) event 1540 arch/x86/events/intel/pt.c if (event->attr.type != pt_pmu.pmu.type) event 1543 arch/x86/events/intel/pt.c if (!pt_event_valid(event)) event 1549 arch/x86/events/intel/pt.c if (pt_addr_filters_init(event)) { event 1554 arch/x86/events/intel/pt.c event->destroy = pt_event_destroy; event 1563 arch/x86/events/intel/pt.c if (pt->handle.event) event 1564 arch/x86/events/intel/pt.c pt_event_stop(pt->handle.event, PERF_EF_UPDATE); event 1567 arch/x86/events/intel/pt.c int is_intel_pt_event(struct perf_event *event) event 1569 arch/x86/events/intel/pt.c return event->pmu == &pt_pmu.pmu; event 154 arch/x86/events/intel/rapl.c static inline u64 rapl_read_counter(struct perf_event *event) event 157 arch/x86/events/intel/rapl.c rdmsrl(event->hw.event_base, raw); event 176 arch/x86/events/intel/rapl.c static u64 rapl_event_update(struct perf_event *event) event 178 arch/x86/events/intel/rapl.c struct hw_perf_event *hwc = &event->hw; event 185 arch/x86/events/intel/rapl.c rdmsrl(event->hw.event_base, new_raw_count); event 204 arch/x86/events/intel/rapl.c sdelta = rapl_scale(delta, event->hw.config); event 206 arch/x86/events/intel/rapl.c local64_add(sdelta, &event->count); event 220 arch/x86/events/intel/rapl.c struct perf_event *event; event 228 arch/x86/events/intel/rapl.c list_for_each_entry(event, &pmu->active_list, active_entry) event 229 arch/x86/events/intel/rapl.c rapl_event_update(event); event 247 arch/x86/events/intel/rapl.c struct perf_event *event) event 249 arch/x86/events/intel/rapl.c if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) event 252 arch/x86/events/intel/rapl.c event->hw.state = 0; event 254 arch/x86/events/intel/rapl.c list_add_tail(&event->active_entry, &pmu->active_list); event 256 arch/x86/events/intel/rapl.c local64_set(&event->hw.prev_count, rapl_read_counter(event)); event 263 arch/x86/events/intel/rapl.c static void rapl_pmu_event_start(struct perf_event *event, int mode) event 265 arch/x86/events/intel/rapl.c struct rapl_pmu *pmu = event->pmu_private; event 269 arch/x86/events/intel/rapl.c __rapl_pmu_event_start(pmu, event); event 273 arch/x86/events/intel/rapl.c static void rapl_pmu_event_stop(struct perf_event *event, int mode) event 275 arch/x86/events/intel/rapl.c struct rapl_pmu *pmu = event->pmu_private; event 276 arch/x86/events/intel/rapl.c struct hw_perf_event *hwc = &event->hw; event 288 arch/x86/events/intel/rapl.c list_del(&event->active_entry); event 300 arch/x86/events/intel/rapl.c rapl_event_update(event); event 307 arch/x86/events/intel/rapl.c static int rapl_pmu_event_add(struct perf_event *event, int mode) event 309 arch/x86/events/intel/rapl.c struct rapl_pmu *pmu = event->pmu_private; event 310 arch/x86/events/intel/rapl.c struct hw_perf_event *hwc = &event->hw; event 318 arch/x86/events/intel/rapl.c __rapl_pmu_event_start(pmu, event); event 325 arch/x86/events/intel/rapl.c static void rapl_pmu_event_del(struct perf_event *event, int flags) event 327 arch/x86/events/intel/rapl.c rapl_pmu_event_stop(event, PERF_EF_UPDATE); event 330 arch/x86/events/intel/rapl.c static int rapl_pmu_event_init(struct perf_event *event) event 332 arch/x86/events/intel/rapl.c u64 cfg = event->attr.config & RAPL_EVENT_MASK; event 337 arch/x86/events/intel/rapl.c if (event->attr.type != rapl_pmus->pmu.type) event 341 arch/x86/events/intel/rapl.c if (event->attr.config & ~RAPL_EVENT_MASK) event 344 arch/x86/events/intel/rapl.c if (event->cpu < 0) event 347 arch/x86/events/intel/rapl.c event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG; event 360 arch/x86/events/intel/rapl.c if (event->attr.sample_period) /* no sampling */ event 364 arch/x86/events/intel/rapl.c pmu = cpu_to_rapl_pmu(event->cpu); event 367 arch/x86/events/intel/rapl.c event->cpu = pmu->cpu; event 368 arch/x86/events/intel/rapl.c event->pmu_private = pmu; event 369 arch/x86/events/intel/rapl.c event->hw.event_base = rapl_msrs[bit].msr; event 370 arch/x86/events/intel/rapl.c event->hw.config = cfg; event 371 arch/x86/events/intel/rapl.c event->hw.idx = bit; event 376 arch/x86/events/intel/rapl.c static void rapl_pmu_event_read(struct perf_event *event) event 378 arch/x86/events/intel/rapl.c rapl_event_update(event); event 433 arch/x86/events/intel/rapl.c DEFINE_RAPL_FORMAT_ATTR(event, event, "config:0-7"); event 98 arch/x86/events/intel/uncore.c struct uncore_event_desc *event = event 100 arch/x86/events/intel/uncore.c return sprintf(buf, "%s", event->config); event 114 arch/x86/events/intel/uncore.c u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event) event 118 arch/x86/events/intel/uncore.c rdmsrl(event->hw.event_base, count); event 130 arch/x86/events/intel/uncore.c struct perf_event *event) event 135 arch/x86/events/intel/uncore.c return readq(box->io_addr + event->hw.event_base); event 142 arch/x86/events/intel/uncore.c uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event) event 145 arch/x86/events/intel/uncore.c struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; event 146 arch/x86/events/intel/uncore.c struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; event 179 arch/x86/events/intel/uncore.c void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event) event 182 arch/x86/events/intel/uncore.c struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; event 216 arch/x86/events/intel/uncore.c struct perf_event *event, int idx) event 218 arch/x86/events/intel/uncore.c struct hw_perf_event *hwc = &event->hw; event 233 arch/x86/events/intel/uncore.c void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event) event 238 arch/x86/events/intel/uncore.c if (uncore_pmc_freerunning(event->hw.idx)) event 239 arch/x86/events/intel/uncore.c shift = 64 - uncore_freerunning_bits(box, event); event 240 arch/x86/events/intel/uncore.c else if (uncore_pmc_fixed(event->hw.idx)) event 247 arch/x86/events/intel/uncore.c prev_count = local64_read(&event->hw.prev_count); event 248 arch/x86/events/intel/uncore.c new_count = uncore_read_counter(box, event); event 249 arch/x86/events/intel/uncore.c if (local64_xchg(&event->hw.prev_count, new_count) != prev_count) event 255 arch/x86/events/intel/uncore.c local64_add(delta, &event->count); event 266 arch/x86/events/intel/uncore.c struct perf_event *event; event 283 arch/x86/events/intel/uncore.c list_for_each_entry(event, &box->active_list, active_entry) { event 284 arch/x86/events/intel/uncore.c uncore_perf_event_update(box, event); event 345 arch/x86/events/intel/uncore.c static int uncore_pmu_event_init(struct perf_event *event); event 347 arch/x86/events/intel/uncore.c static bool is_box_event(struct intel_uncore_box *box, struct perf_event *event) event 349 arch/x86/events/intel/uncore.c return &box->pmu->pmu == event->pmu; event 356 arch/x86/events/intel/uncore.c struct perf_event *event; event 376 arch/x86/events/intel/uncore.c for_each_sibling_event(event, leader) { event 377 arch/x86/events/intel/uncore.c if (!is_box_event(box, event) || event 378 arch/x86/events/intel/uncore.c event->state <= PERF_EVENT_STATE_OFF) event 384 arch/x86/events/intel/uncore.c box->event_list[n] = event; event 391 arch/x86/events/intel/uncore.c uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event) event 397 arch/x86/events/intel/uncore.c c = type->ops->get_constraint(box, event); event 402 arch/x86/events/intel/uncore.c if (event->attr.config == UNCORE_FIXED_EVENT) event 407 arch/x86/events/intel/uncore.c if ((event->hw.config & c->cmask) == c->code) event 416 arch/x86/events/intel/uncore.c struct perf_event *event) event 419 arch/x86/events/intel/uncore.c box->pmu->type->ops->put_constraint(box, event); event 471 arch/x86/events/intel/uncore.c void uncore_pmu_event_start(struct perf_event *event, int flags) event 473 arch/x86/events/intel/uncore.c struct intel_uncore_box *box = uncore_event_to_box(event); event 474 arch/x86/events/intel/uncore.c int idx = event->hw.idx; event 485 arch/x86/events/intel/uncore.c if (uncore_pmc_freerunning(event->hw.idx)) { event 486 arch/x86/events/intel/uncore.c list_add_tail(&event->active_entry, &box->active_list); event 487 arch/x86/events/intel/uncore.c local64_set(&event->hw.prev_count, event 488 arch/x86/events/intel/uncore.c uncore_read_counter(box, event)); event 494 arch/x86/events/intel/uncore.c if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) event 497 arch/x86/events/intel/uncore.c event->hw.state = 0; event 498 arch/x86/events/intel/uncore.c box->events[idx] = event; event 502 arch/x86/events/intel/uncore.c local64_set(&event->hw.prev_count, uncore_read_counter(box, event)); event 503 arch/x86/events/intel/uncore.c uncore_enable_event(box, event); event 509 arch/x86/events/intel/uncore.c void uncore_pmu_event_stop(struct perf_event *event, int flags) event 511 arch/x86/events/intel/uncore.c struct intel_uncore_box *box = uncore_event_to_box(event); event 512 arch/x86/events/intel/uncore.c struct hw_perf_event *hwc = &event->hw; event 516 arch/x86/events/intel/uncore.c list_del(&event->active_entry); event 519 arch/x86/events/intel/uncore.c uncore_perf_event_update(box, event); event 524 arch/x86/events/intel/uncore.c uncore_disable_event(box, event); event 539 arch/x86/events/intel/uncore.c uncore_perf_event_update(box, event); event 544 arch/x86/events/intel/uncore.c int uncore_pmu_event_add(struct perf_event *event, int flags) event 546 arch/x86/events/intel/uncore.c struct intel_uncore_box *box = uncore_event_to_box(event); event 547 arch/x86/events/intel/uncore.c struct hw_perf_event *hwc = &event->hw; event 561 arch/x86/events/intel/uncore.c uncore_pmu_event_start(event, 0); event 565 arch/x86/events/intel/uncore.c ret = n = uncore_collect_events(box, event, false); event 579 arch/x86/events/intel/uncore.c event = box->event_list[i]; event 580 arch/x86/events/intel/uncore.c hwc = &event->hw; event 592 arch/x86/events/intel/uncore.c uncore_pmu_event_stop(event, PERF_EF_UPDATE); event 597 arch/x86/events/intel/uncore.c event = box->event_list[i]; event 598 arch/x86/events/intel/uncore.c hwc = &event->hw; event 602 arch/x86/events/intel/uncore.c uncore_assign_hw_event(box, event, assign[i]); event 609 arch/x86/events/intel/uncore.c uncore_pmu_event_start(event, 0); event 616 arch/x86/events/intel/uncore.c void uncore_pmu_event_del(struct perf_event *event, int flags) event 618 arch/x86/events/intel/uncore.c struct intel_uncore_box *box = uncore_event_to_box(event); event 621 arch/x86/events/intel/uncore.c uncore_pmu_event_stop(event, PERF_EF_UPDATE); event 628 arch/x86/events/intel/uncore.c if (uncore_pmc_freerunning(event->hw.idx)) event 632 arch/x86/events/intel/uncore.c if (event == box->event_list[i]) { event 633 arch/x86/events/intel/uncore.c uncore_put_event_constraint(box, event); event 643 arch/x86/events/intel/uncore.c event->hw.idx = -1; event 644 arch/x86/events/intel/uncore.c event->hw.last_tag = ~0ULL; event 647 arch/x86/events/intel/uncore.c void uncore_pmu_event_read(struct perf_event *event) event 649 arch/x86/events/intel/uncore.c struct intel_uncore_box *box = uncore_event_to_box(event); event 650 arch/x86/events/intel/uncore.c uncore_perf_event_update(box, event); event 658 arch/x86/events/intel/uncore.c struct perf_event *event) event 660 arch/x86/events/intel/uncore.c struct perf_event *leader = event->group_leader; event 665 arch/x86/events/intel/uncore.c if (uncore_pmc_freerunning(event->hw.idx)) event 684 arch/x86/events/intel/uncore.c n = uncore_collect_events(fake_box, event, false); event 696 arch/x86/events/intel/uncore.c static int uncore_pmu_event_init(struct perf_event *event) event 700 arch/x86/events/intel/uncore.c struct hw_perf_event *hwc = &event->hw; event 703 arch/x86/events/intel/uncore.c if (event->attr.type != event->pmu->type) event 706 arch/x86/events/intel/uncore.c pmu = uncore_event_to_pmu(event); event 719 arch/x86/events/intel/uncore.c if (event->cpu < 0) event 721 arch/x86/events/intel/uncore.c box = uncore_pmu_to_box(pmu, event->cpu); event 724 arch/x86/events/intel/uncore.c event->cpu = box->cpu; event 725 arch/x86/events/intel/uncore.c event->pmu_private = box; event 727 arch/x86/events/intel/uncore.c event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG; event 729 arch/x86/events/intel/uncore.c event->hw.idx = -1; event 730 arch/x86/events/intel/uncore.c event->hw.last_tag = ~0ULL; event 731 arch/x86/events/intel/uncore.c event->hw.extra_reg.idx = EXTRA_REG_NONE; event 732 arch/x86/events/intel/uncore.c event->hw.branch_reg.idx = EXTRA_REG_NONE; event 734 arch/x86/events/intel/uncore.c if (event->attr.config == UNCORE_FIXED_EVENT) { event 747 arch/x86/events/intel/uncore.c } else if (is_freerunning_event(event)) { event 748 arch/x86/events/intel/uncore.c hwc->config = event->attr.config; event 749 arch/x86/events/intel/uncore.c if (!check_valid_freerunning_event(box, event)) event 751 arch/x86/events/intel/uncore.c event->hw.idx = UNCORE_PMC_IDX_FREERUNNING; event 758 arch/x86/events/intel/uncore.c event->hw.event_base = uncore_freerunning_counter(box, event); event 760 arch/x86/events/intel/uncore.c hwc->config = event->attr.config & event 763 arch/x86/events/intel/uncore.c ret = pmu->type->ops->hw_config(box, event); event 769 arch/x86/events/intel/uncore.c if (event->group_leader != event) event 770 arch/x86/events/intel/uncore.c ret = uncore_validate_group(pmu, event); event 305 arch/x86/events/intel/uncore.h struct perf_event *event) event 307 arch/x86/events/intel/uncore.h unsigned int type = uncore_freerunning_type(event->hw.config); event 308 arch/x86/events/intel/uncore.h unsigned int idx = uncore_freerunning_idx(event->hw.config); event 390 arch/x86/events/intel/uncore.h struct perf_event *event) event 392 arch/x86/events/intel/uncore.h unsigned int type = uncore_freerunning_type(event->hw.config); event 398 arch/x86/events/intel/uncore.h struct perf_event *event) event 400 arch/x86/events/intel/uncore.h unsigned int type = uncore_freerunning_type(event->hw.config); event 406 arch/x86/events/intel/uncore.h struct perf_event *event) event 412 arch/x86/events/intel/uncore.h struct perf_event *event) event 414 arch/x86/events/intel/uncore.h unsigned int type = uncore_freerunning_type(event->hw.config); event 415 arch/x86/events/intel/uncore.h unsigned int idx = uncore_freerunning_idx(event->hw.config); event 417 arch/x86/events/intel/uncore.h return (type < uncore_num_freerunning_types(box, event)) && event 418 arch/x86/events/intel/uncore.h (idx < uncore_num_freerunning(box, event)); event 426 arch/x86/events/intel/uncore.h static inline bool is_freerunning_event(struct perf_event *event) event 428 arch/x86/events/intel/uncore.h u64 cfg = event->attr.config; event 436 arch/x86/events/intel/uncore.h struct perf_event *event) event 438 arch/x86/events/intel/uncore.h if (is_freerunning_event(event)) event 445 arch/x86/events/intel/uncore.h struct perf_event *event) event 447 arch/x86/events/intel/uncore.h box->pmu->type->ops->disable_event(box, event); event 451 arch/x86/events/intel/uncore.h struct perf_event *event) event 453 arch/x86/events/intel/uncore.h box->pmu->type->ops->enable_event(box, event); event 457 arch/x86/events/intel/uncore.h struct perf_event *event) event 459 arch/x86/events/intel/uncore.h return box->pmu->type->ops->read_counter(box, event); event 483 arch/x86/events/intel/uncore.h static inline struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event) event 485 arch/x86/events/intel/uncore.h return container_of(event->pmu, struct intel_uncore_pmu, pmu); event 488 arch/x86/events/intel/uncore.h static inline struct intel_uncore_box *uncore_event_to_box(struct perf_event *event) event 490 arch/x86/events/intel/uncore.h return event->pmu_private; event 494 arch/x86/events/intel/uncore.h u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event); event 497 arch/x86/events/intel/uncore.h struct perf_event *event); event 500 arch/x86/events/intel/uncore.h void uncore_pmu_event_start(struct perf_event *event, int flags); event 501 arch/x86/events/intel/uncore.h void uncore_pmu_event_stop(struct perf_event *event, int flags); event 502 arch/x86/events/intel/uncore.h int uncore_pmu_event_add(struct perf_event *event, int flags); event 503 arch/x86/events/intel/uncore.h void uncore_pmu_event_del(struct perf_event *event, int flags); event 504 arch/x86/events/intel/uncore.h void uncore_pmu_event_read(struct perf_event *event); event 505 arch/x86/events/intel/uncore.h void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event); event 507 arch/x86/events/intel/uncore.h uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event); event 508 arch/x86/events/intel/uncore.h void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event); event 190 arch/x86/events/intel/uncore_nhmex.c DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); event 191 arch/x86/events/intel/uncore_nhmex.c DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5"); event 240 arch/x86/events/intel/uncore_nhmex.c static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) event 242 arch/x86/events/intel/uncore_nhmex.c wrmsrl(event->hw.config_base, 0); event 245 arch/x86/events/intel/uncore_nhmex.c static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) event 247 arch/x86/events/intel/uncore_nhmex.c struct hw_perf_event *hwc = &event->hw; event 350 arch/x86/events/intel/uncore_nhmex.c static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) event 352 arch/x86/events/intel/uncore_nhmex.c struct hw_perf_event *hwc = &event->hw; event 372 arch/x86/events/intel/uncore_nhmex.c reg1->config = event->attr.config1; event 373 arch/x86/events/intel/uncore_nhmex.c reg2->config = event->attr.config2; event 377 arch/x86/events/intel/uncore_nhmex.c static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) event 379 arch/x86/events/intel/uncore_nhmex.c struct hw_perf_event *hwc = &event->hw; event 441 arch/x86/events/intel/uncore_nhmex.c static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) event 443 arch/x86/events/intel/uncore_nhmex.c struct hw_perf_event *hwc = &event->hw; event 457 arch/x86/events/intel/uncore_nhmex.c reg1->config = event->attr.config1; event 458 arch/x86/events/intel/uncore_nhmex.c reg2->config = event->attr.config2; event 462 arch/x86/events/intel/uncore_nhmex.c static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) event 464 arch/x86/events/intel/uncore_nhmex.c struct hw_perf_event *hwc = &event->hw; event 629 arch/x86/events/intel/uncore_nhmex.c static u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify) event 631 arch/x86/events/intel/uncore_nhmex.c struct hw_perf_event *hwc = &event->hw; event 669 arch/x86/events/intel/uncore_nhmex.c nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) event 671 arch/x86/events/intel/uncore_nhmex.c struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; event 672 arch/x86/events/intel/uncore_nhmex.c struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; event 706 arch/x86/events/intel/uncore_nhmex.c nhmex_mbox_alter_er(event, idx[0], true); event 726 arch/x86/events/intel/uncore_nhmex.c config1 = nhmex_mbox_alter_er(event, idx[0], false); event 738 arch/x86/events/intel/uncore_nhmex.c static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) event 740 arch/x86/events/intel/uncore_nhmex.c struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; event 741 arch/x86/events/intel/uncore_nhmex.c struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; event 762 arch/x86/events/intel/uncore_nhmex.c return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd; event 765 arch/x86/events/intel/uncore_nhmex.c static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) event 768 arch/x86/events/intel/uncore_nhmex.c struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; event 769 arch/x86/events/intel/uncore_nhmex.c struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; event 779 arch/x86/events/intel/uncore_nhmex.c if (er->event != (event->hw.config & er->config_mask)) event 781 arch/x86/events/intel/uncore_nhmex.c if (event->attr.config1 & ~er->valid_mask) event 798 arch/x86/events/intel/uncore_nhmex.c reg1->config = event->attr.config1; event 807 arch/x86/events/intel/uncore_nhmex.c if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN) event 808 arch/x86/events/intel/uncore_nhmex.c reg2->config = event->attr.config2; event 835 arch/x86/events/intel/uncore_nhmex.c static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) event 837 arch/x86/events/intel/uncore_nhmex.c struct hw_perf_event *hwc = &event->hw; event 944 arch/x86/events/intel/uncore_nhmex.c static void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event) event 946 arch/x86/events/intel/uncore_nhmex.c struct hw_perf_event *hwc = &event->hw; event 978 arch/x86/events/intel/uncore_nhmex.c nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) event 980 arch/x86/events/intel/uncore_nhmex.c struct hw_perf_event *hwc = &event->hw; event 1054 arch/x86/events/intel/uncore_nhmex.c nhmex_rbox_alter_er(box, event); event 1062 arch/x86/events/intel/uncore_nhmex.c static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) event 1065 arch/x86/events/intel/uncore_nhmex.c struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; event 1086 arch/x86/events/intel/uncore_nhmex.c static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) event 1088 arch/x86/events/intel/uncore_nhmex.c struct hw_perf_event *hwc = &event->hw; event 1089 arch/x86/events/intel/uncore_nhmex.c struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; event 1090 arch/x86/events/intel/uncore_nhmex.c struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; event 1093 arch/x86/events/intel/uncore_nhmex.c idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >> event 1099 arch/x86/events/intel/uncore_nhmex.c reg1->config = event->attr.config1; event 1104 arch/x86/events/intel/uncore_nhmex.c hwc->config |= event->attr.config & (~0ULL << 32); event 1105 arch/x86/events/intel/uncore_nhmex.c reg2->config = event->attr.config2; event 1111 arch/x86/events/intel/uncore_nhmex.c static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) event 1113 arch/x86/events/intel/uncore_nhmex.c struct hw_perf_event *hwc = &event->hw; event 113 arch/x86/events/intel/uncore_snb.c DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); event 121 arch/x86/events/intel/uncore_snb.c static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) event 123 arch/x86/events/intel/uncore_snb.c struct hw_perf_event *hwc = &event->hw; event 131 arch/x86/events/intel/uncore_snb.c static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) event 133 arch/x86/events/intel/uncore_snb.c wrmsrl(event->hw.config_base, 0); event 438 arch/x86/events/intel/uncore_snb.c static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event) event 441 arch/x86/events/intel/uncore_snb.c static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event) event 448 arch/x86/events/intel/uncore_snb.c static int snb_uncore_imc_event_init(struct perf_event *event) event 452 arch/x86/events/intel/uncore_snb.c struct hw_perf_event *hwc = &event->hw; event 453 arch/x86/events/intel/uncore_snb.c u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK; event 456 arch/x86/events/intel/uncore_snb.c if (event->attr.type != event->pmu->type) event 459 arch/x86/events/intel/uncore_snb.c pmu = uncore_event_to_pmu(event); event 469 arch/x86/events/intel/uncore_snb.c if (event->attr.sample_period) /* no sampling */ event 476 arch/x86/events/intel/uncore_snb.c if (event->cpu < 0) event 480 arch/x86/events/intel/uncore_snb.c if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK) event 483 arch/x86/events/intel/uncore_snb.c box = uncore_pmu_to_box(pmu, event->cpu); event 487 arch/x86/events/intel/uncore_snb.c event->cpu = box->cpu; event 488 arch/x86/events/intel/uncore_snb.c event->pmu_private = box; event 490 arch/x86/events/intel/uncore_snb.c event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG; event 492 arch/x86/events/intel/uncore_snb.c event->hw.idx = -1; event 493 arch/x86/events/intel/uncore_snb.c event->hw.last_tag = ~0ULL; event 494 arch/x86/events/intel/uncore_snb.c event->hw.extra_reg.idx = EXTRA_REG_NONE; event 495 arch/x86/events/intel/uncore_snb.c event->hw.branch_reg.idx = EXTRA_REG_NONE; event 513 arch/x86/events/intel/uncore_snb.c event->hw.event_base = base; event 514 arch/x86/events/intel/uncore_snb.c event->hw.idx = idx; event 517 arch/x86/events/intel/uncore_snb.c event->hw.config = ((cfg - 1) << 8) | 0x10ff; event 524 arch/x86/events/intel/uncore_snb.c static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event) event 933 arch/x86/events/intel/uncore_snb.c static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) event 935 arch/x86/events/intel/uncore_snb.c struct hw_perf_event *hwc = &event->hw; event 102 arch/x86/events/intel/uncore_snbep.c .event = (e), \ event 385 arch/x86/events/intel/uncore_snbep.c DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); event 386 arch/x86/events/intel/uncore_snbep.c DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6"); event 387 arch/x86/events/intel/uncore_snbep.c DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21"); event 487 arch/x86/events/intel/uncore_snbep.c static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event) event 490 arch/x86/events/intel/uncore_snbep.c struct hw_perf_event *hwc = &event->hw; event 495 arch/x86/events/intel/uncore_snbep.c static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event) event 498 arch/x86/events/intel/uncore_snbep.c struct hw_perf_event *hwc = &event->hw; event 503 arch/x86/events/intel/uncore_snbep.c static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event) event 506 arch/x86/events/intel/uncore_snbep.c struct hw_perf_event *hwc = &event->hw; event 549 arch/x86/events/intel/uncore_snbep.c static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) event 551 arch/x86/events/intel/uncore_snbep.c struct hw_perf_event *hwc = &event->hw; event 561 arch/x86/events/intel/uncore_snbep.c struct perf_event *event) event 563 arch/x86/events/intel/uncore_snbep.c struct hw_perf_event *hwc = &event->hw; event 841 arch/x86/events/intel/uncore_snbep.c static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) event 843 arch/x86/events/intel/uncore_snbep.c struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; event 858 arch/x86/events/intel/uncore_snbep.c __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event, event 861 arch/x86/events/intel/uncore_snbep.c struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; event 921 arch/x86/events/intel/uncore_snbep.c snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) event 923 arch/x86/events/intel/uncore_snbep.c return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask); event 926 arch/x86/events/intel/uncore_snbep.c static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) event 928 arch/x86/events/intel/uncore_snbep.c struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; event 933 arch/x86/events/intel/uncore_snbep.c if (er->event != (event->hw.config & er->config_mask)) event 941 arch/x86/events/intel/uncore_snbep.c reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx); event 970 arch/x86/events/intel/uncore_snbep.c static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify) event 972 arch/x86/events/intel/uncore_snbep.c struct hw_perf_event *hwc = &event->hw; event 990 arch/x86/events/intel/uncore_snbep.c snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event) event 992 arch/x86/events/intel/uncore_snbep.c struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; event 1017 arch/x86/events/intel/uncore_snbep.c config1 = snbep_pcu_alter_er(event, idx, false); event 1025 arch/x86/events/intel/uncore_snbep.c snbep_pcu_alter_er(event, idx, true); event 1031 arch/x86/events/intel/uncore_snbep.c static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event) event 1033 arch/x86/events/intel/uncore_snbep.c struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; event 1043 arch/x86/events/intel/uncore_snbep.c static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event) event 1045 arch/x86/events/intel/uncore_snbep.c struct hw_perf_event *hwc = &event->hw; event 1052 arch/x86/events/intel/uncore_snbep.c reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8)); event 1099 arch/x86/events/intel/uncore_snbep.c static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event) event 1101 arch/x86/events/intel/uncore_snbep.c struct hw_perf_event *hwc = &event->hw; event 1108 arch/x86/events/intel/uncore_snbep.c reg1->config = event->attr.config1; event 1110 arch/x86/events/intel/uncore_snbep.c reg2->config = event->attr.config2; event 1115 arch/x86/events/intel/uncore_snbep.c static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event) event 1118 arch/x86/events/intel/uncore_snbep.c struct hw_perf_event *hwc = &event->hw; event 1605 arch/x86/events/intel/uncore_snbep.c ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) event 1607 arch/x86/events/intel/uncore_snbep.c return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask); event 1610 arch/x86/events/intel/uncore_snbep.c static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) event 1612 arch/x86/events/intel/uncore_snbep.c struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; event 1617 arch/x86/events/intel/uncore_snbep.c if (er->event != (event->hw.config & er->config_mask)) event 1625 arch/x86/events/intel/uncore_snbep.c reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx); event 1631 arch/x86/events/intel/uncore_snbep.c static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event) event 1633 arch/x86/events/intel/uncore_snbep.c struct hw_perf_event *hwc = &event->hw; event 1732 arch/x86/events/intel/uncore_snbep.c static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event) event 1735 arch/x86/events/intel/uncore_snbep.c struct hw_perf_event *hwc = &event->hw; event 1741 arch/x86/events/intel/uncore_snbep.c static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event) event 1744 arch/x86/events/intel/uncore_snbep.c struct hw_perf_event *hwc = &event->hw; event 1749 arch/x86/events/intel/uncore_snbep.c static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event) event 1752 arch/x86/events/intel/uncore_snbep.c struct hw_perf_event *hwc = &event->hw; event 2032 arch/x86/events/intel/uncore_snbep.c knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event) event 2034 arch/x86/events/intel/uncore_snbep.c return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask); event 2038 arch/x86/events/intel/uncore_snbep.c struct perf_event *event) event 2040 arch/x86/events/intel/uncore_snbep.c struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; event 2045 arch/x86/events/intel/uncore_snbep.c if (er->event != (event->hw.config & er->config_mask)) event 2053 arch/x86/events/intel/uncore_snbep.c reg1->config = event->attr.config1 & knl_cha_filter_mask(idx); event 2064 arch/x86/events/intel/uncore_snbep.c struct perf_event *event); event 2146 arch/x86/events/intel/uncore_snbep.c struct perf_event *event) event 2149 arch/x86/events/intel/uncore_snbep.c struct hw_perf_event *hwc = &event->hw; event 2151 arch/x86/events/intel/uncore_snbep.c if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK) event 2460 arch/x86/events/intel/uncore_snbep.c static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event) event 2462 arch/x86/events/intel/uncore_snbep.c struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; event 2464 arch/x86/events/intel/uncore_snbep.c reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK; event 2588 arch/x86/events/intel/uncore_snbep.c hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) event 2590 arch/x86/events/intel/uncore_snbep.c return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask); event 2593 arch/x86/events/intel/uncore_snbep.c static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) event 2595 arch/x86/events/intel/uncore_snbep.c struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; event 2600 arch/x86/events/intel/uncore_snbep.c if (er->event != (event->hw.config & er->config_mask)) event 2608 arch/x86/events/intel/uncore_snbep.c reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx); event 2615 arch/x86/events/intel/uncore_snbep.c struct perf_event *event) event 2617 arch/x86/events/intel/uncore_snbep.c struct hw_perf_event *hwc = &event->hw; event 2710 arch/x86/events/intel/uncore_snbep.c static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event) event 2712 arch/x86/events/intel/uncore_snbep.c struct hw_perf_event *hwc = &event->hw; event 2719 arch/x86/events/intel/uncore_snbep.c reg1->config = event->attr.config1 & (0xff << reg1->idx); event 2806 arch/x86/events/intel/uncore_snbep.c static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event) event 2809 arch/x86/events/intel/uncore_snbep.c struct hw_perf_event *hwc = &event->hw; event 3481 arch/x86/events/intel/uncore_snbep.c skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event) event 3483 arch/x86/events/intel/uncore_snbep.c return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask); event 3486 arch/x86/events/intel/uncore_snbep.c static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event) event 3488 arch/x86/events/intel/uncore_snbep.c struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; event 3493 arch/x86/events/intel/uncore_snbep.c if (er->event != (event->hw.config & er->config_mask)) event 3501 arch/x86/events/intel/uncore_snbep.c reg1->config = event->attr.config1 & skx_cha_filter_mask(idx); event 3562 arch/x86/events/intel/uncore_snbep.c struct perf_event *event) event 3564 arch/x86/events/intel/uncore_snbep.c struct hw_perf_event *hwc = &event->hw; event 4067 arch/x86/events/intel/uncore_snbep.c static int snr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event) event 4069 arch/x86/events/intel/uncore_snbep.c struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; event 4073 arch/x86/events/intel/uncore_snbep.c reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID; event 4080 arch/x86/events/intel/uncore_snbep.c struct perf_event *event) event 4082 arch/x86/events/intel/uncore_snbep.c struct hw_perf_event *hwc = &event->hw; event 4175 arch/x86/events/intel/uncore_snbep.c static int snr_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event) event 4177 arch/x86/events/intel/uncore_snbep.c struct hw_perf_event *hwc = &event->hw; event 4184 arch/x86/events/intel/uncore_snbep.c reg1->config = event->attr.config1 & (0xff << reg1->idx); event 4433 arch/x86/events/intel/uncore_snbep.c struct perf_event *event) event 4435 arch/x86/events/intel/uncore_snbep.c struct hw_perf_event *hwc = &event->hw; event 4445 arch/x86/events/intel/uncore_snbep.c struct perf_event *event) event 4447 arch/x86/events/intel/uncore_snbep.c struct hw_perf_event *hwc = &event->hw; event 162 arch/x86/events/msr.c PMU_FORMAT_ATTR(event, "config:0-63"); event 189 arch/x86/events/msr.c static int msr_event_init(struct perf_event *event) event 191 arch/x86/events/msr.c u64 cfg = event->attr.config; event 193 arch/x86/events/msr.c if (event->attr.type != event->pmu->type) event 197 arch/x86/events/msr.c if (event->attr.sample_period) /* no sampling */ event 208 arch/x86/events/msr.c event->hw.idx = -1; event 209 arch/x86/events/msr.c event->hw.event_base = msr[cfg].msr; event 210 arch/x86/events/msr.c event->hw.config = cfg; event 215 arch/x86/events/msr.c static inline u64 msr_read_counter(struct perf_event *event) event 219 arch/x86/events/msr.c if (event->hw.event_base) event 220 arch/x86/events/msr.c rdmsrl(event->hw.event_base, now); event 227 arch/x86/events/msr.c static void msr_event_update(struct perf_event *event) event 234 arch/x86/events/msr.c prev = local64_read(&event->hw.prev_count); event 235 arch/x86/events/msr.c now = msr_read_counter(event); event 237 arch/x86/events/msr.c if (local64_cmpxchg(&event->hw.prev_count, prev, now) != prev) event 241 arch/x86/events/msr.c if (unlikely(event->hw.event_base == MSR_SMI_COUNT)) { event 243 arch/x86/events/msr.c local64_add(delta, &event->count); event 244 arch/x86/events/msr.c } else if (unlikely(event->hw.event_base == MSR_IA32_THERM_STATUS)) { event 247 arch/x86/events/msr.c local64_set(&event->count, now); event 249 arch/x86/events/msr.c local64_add(delta, &event->count); event 253 arch/x86/events/msr.c static void msr_event_start(struct perf_event *event, int flags) event 255 arch/x86/events/msr.c u64 now = msr_read_counter(event); event 257 arch/x86/events/msr.c local64_set(&event->hw.prev_count, now); event 260 arch/x86/events/msr.c static void msr_event_stop(struct perf_event *event, int flags) event 262 arch/x86/events/msr.c msr_event_update(event); event 265 arch/x86/events/msr.c static void msr_event_del(struct perf_event *event, int flags) event 267 arch/x86/events/msr.c msr_event_stop(event, PERF_EF_UPDATE); event 270 arch/x86/events/msr.c static int msr_event_add(struct perf_event *event, int flags) event 273 arch/x86/events/msr.c msr_event_start(event, flags); event 477 arch/x86/events/perf_event.h unsigned int event; event 486 arch/x86/events/perf_event.h .event = (e), \ event 494 arch/x86/events/perf_event.h #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \ event 495 arch/x86/events/perf_event.h EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx) event 497 arch/x86/events/perf_event.h #define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \ event 498 arch/x86/events/perf_event.h EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \ event 535 arch/x86/events/perf_event.h u64 event:8, event 579 arch/x86/events/perf_event.h void (*read)(struct perf_event *event); event 580 arch/x86/events/perf_event.h int (*hw_config)(struct perf_event *event); event 602 arch/x86/events/perf_event.h struct perf_event *event); event 605 arch/x86/events/perf_event.h struct perf_event *event); event 616 arch/x86/events/perf_event.h u64 (*limit_period)(struct perf_event *event, u64 l); event 667 arch/x86/events/perf_event.h void (*pebs_aliases)(struct perf_event *event); event 705 arch/x86/events/perf_event.h int (*check_period) (struct perf_event *event, u64 period); event 707 arch/x86/events/perf_event.h int (*aux_output_match) (struct perf_event *event); event 777 arch/x86/events/perf_event.h int x86_perf_event_set_period(struct perf_event *event); event 798 arch/x86/events/perf_event.h u64 x86_perf_event_update(struct perf_event *event); event 827 arch/x86/events/perf_event.h void hw_perf_lbr_event_destroy(struct perf_event *event); event 829 arch/x86/events/perf_event.h int x86_setup_perfctr(struct perf_event *event); event 831 arch/x86/events/perf_event.h int x86_pmu_hw_config(struct perf_event *event); event 851 arch/x86/events/perf_event.h void x86_pmu_stop(struct perf_event *event, int flags); event 853 arch/x86/events/perf_event.h static inline void x86_pmu_disable_event(struct perf_event *event) event 855 arch/x86/events/perf_event.h struct hw_perf_event *hwc = &event->hw; event 860 arch/x86/events/perf_event.h void x86_pmu_enable_event(struct perf_event *event); event 897 arch/x86/events/perf_event.h ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event); event 918 arch/x86/events/perf_event.h static inline int is_pebs_pt(struct perf_event *event) event 920 arch/x86/events/perf_event.h return !!(event->hw.flags & PERF_X86_EVENT_PEBS_VIA_PT); event 925 arch/x86/events/perf_event.h static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period) event 927 arch/x86/events/perf_event.h struct hw_perf_event *hwc = &event->hw; event 930 arch/x86/events/perf_event.h if (event->attr.freq) event 939 arch/x86/events/perf_event.h static inline bool intel_pmu_has_bts(struct perf_event *event) event 941 arch/x86/events/perf_event.h struct hw_perf_event *hwc = &event->hw; event 943 arch/x86/events/perf_event.h return intel_pmu_has_bts_period(event, hwc->sample_period); event 946 arch/x86/events/perf_event.h int intel_pmu_save_and_restart(struct perf_event *event); event 950 arch/x86/events/perf_event.h struct perf_event *event); event 999 arch/x86/events/perf_event.h struct event_constraint *intel_pebs_constraints(struct perf_event *event); event 1001 arch/x86/events/perf_event.h void intel_pmu_pebs_add(struct perf_event *event); event 1003 arch/x86/events/perf_event.h void intel_pmu_pebs_del(struct perf_event *event); event 1005 arch/x86/events/perf_event.h void intel_pmu_pebs_enable(struct perf_event *event); event 1007 arch/x86/events/perf_event.h void intel_pmu_pebs_disable(struct perf_event *event); event 1015 arch/x86/events/perf_event.h void intel_pmu_auto_reload_read(struct perf_event *event); event 1027 arch/x86/events/perf_event.h void intel_pmu_lbr_add(struct perf_event *event); event 1029 arch/x86/events/perf_event.h void intel_pmu_lbr_del(struct perf_event *event); event 1057 arch/x86/events/perf_event.h int intel_pmu_setup_lbr_filter(struct perf_event *event); event 31 arch/x86/include/asm/intel_pt.h extern int is_intel_pt_event(struct perf_event *event); event 36 arch/x86/include/asm/intel_pt.h static inline int is_intel_pt_event(struct perf_event *event) { return 0; } event 328 arch/x86/include/asm/perf_event.h extern int x86_perf_rdpmc_index(struct perf_event *event); event 294 arch/x86/include/asm/perf_event_p4.h #define P4_OPCODE(event) event##_OPCODE event 297 arch/x86/include/asm/perf_event_p4.h #define P4_OPCODE_PACK(event, sel) (((event) << 8) | sel) event 26 arch/x86/include/uapi/asm/ist.h __u32 event; event 772 arch/x86/kernel/apm_32.c static int apm_get_event(apm_event_t *event, apm_eventinfo_t *info) event 782 arch/x86/kernel/apm_32.c *event = call.ebx; event 1176 arch/x86/kernel/apm_32.c static void queue_event(apm_event_t event, struct apm_user *sender) event 1197 arch/x86/kernel/apm_32.c as->events[as->event_head] = event; event 1200 arch/x86/kernel/apm_32.c switch (event) { event 1304 arch/x86/kernel/apm_32.c apm_event_t event = APM_NO_EVENTS; /* silence gcc */ event 1310 arch/x86/kernel/apm_32.c error = apm_get_event(&event, &info); event 1312 arch/x86/kernel/apm_32.c return event; event 1322 arch/x86/kernel/apm_32.c apm_event_t event; event 1326 arch/x86/kernel/apm_32.c while ((event = get_event()) != 0) { event 1328 arch/x86/kernel/apm_32.c if (event <= NR_APM_EVENT_NAME) event 1330 arch/x86/kernel/apm_32.c apm_event_name[event - 1]); event 1333 arch/x86/kernel/apm_32.c "event 0x%02x\n", event); event 1339 arch/x86/kernel/apm_32.c switch (event) { event 1342 arch/x86/kernel/apm_32.c queue_event(event, NULL); event 1370 arch/x86/kernel/apm_32.c queue_event(event, NULL); event 1381 arch/x86/kernel/apm_32.c if ((event != APM_NORMAL_RESUME) event 1384 arch/x86/kernel/apm_32.c queue_event(event, NULL); event 1392 arch/x86/kernel/apm_32.c queue_event(event, NULL); event 1466 arch/x86/kernel/apm_32.c apm_event_t event; event 1477 arch/x86/kernel/apm_32.c while ((i >= sizeof(event)) && !queue_empty(as)) { event 1478 arch/x86/kernel/apm_32.c event = get_queued_event(as); event 1479 arch/x86/kernel/apm_32.c if (copy_to_user(buf, &event, sizeof(event))) { event 1484 arch/x86/kernel/apm_32.c switch (event) { event 1495 arch/x86/kernel/apm_32.c buf += sizeof(event); event 1496 arch/x86/kernel/apm_32.c i -= sizeof(event); event 48 arch/x86/kernel/cpu/mce/therm_throt.c int event; event 91 arch/x86/kernel/cpu/mce/therm_throt.c #define define_therm_throt_device_show_func(event, name) \ event 93 arch/x86/kernel/cpu/mce/therm_throt.c static ssize_t therm_throt_device_show_##event##_##name( \ event 104 arch/x86/kernel/cpu/mce/therm_throt.c per_cpu(thermal_state, cpu).event.name); \ event 149 arch/x86/kernel/cpu/mce/therm_throt.c static void therm_throt_process(bool new_event, int event, int level) event 159 arch/x86/kernel/cpu/mce/therm_throt.c if (event == THERMAL_THROTTLING_EVENT) event 161 arch/x86/kernel/cpu/mce/therm_throt.c else if (event == POWER_LIMIT_EVENT) event 166 arch/x86/kernel/cpu/mce/therm_throt.c if (event == THERMAL_THROTTLING_EVENT) event 168 arch/x86/kernel/cpu/mce/therm_throt.c else if (event == POWER_LIMIT_EVENT) event 190 arch/x86/kernel/cpu/mce/therm_throt.c if (event == THERMAL_THROTTLING_EVENT) event 198 arch/x86/kernel/cpu/mce/therm_throt.c if (event == THERMAL_THROTTLING_EVENT) event 205 arch/x86/kernel/cpu/mce/therm_throt.c static int thresh_event_valid(int level, int event) event 213 arch/x86/kernel/cpu/mce/therm_throt.c state = (event == 0) ? &pstate->pkg_thresh0 : event 216 arch/x86/kernel/cpu/mce/therm_throt.c state = (event == 0) ? &pstate->core_thresh0 : event 1063 arch/x86/kernel/cpu/resctrl/pseudo_lock.c perf_miss_attr.config = X86_CONFIG(.event = 0xd1, event 1065 arch/x86/kernel/cpu/resctrl/pseudo_lock.c perf_hit_attr.config = X86_CONFIG(.event = 0xd1, event 1102 arch/x86/kernel/cpu/resctrl/pseudo_lock.c perf_hit_attr.config = X86_CONFIG(.event = 0x2e, event 1104 arch/x86/kernel/cpu/resctrl/pseudo_lock.c perf_miss_attr.config = X86_CONFIG(.event = 0x2e, event 626 arch/x86/kernel/kgdb.c static void kgdb_hw_overflow_handler(struct perf_event *event, event 104 arch/x86/kvm/pmu.c struct perf_event *event; event 130 arch/x86/kvm/pmu.c event = perf_event_create_kernel_counter(&attr, -1, current, event 133 arch/x86/kvm/pmu.c if (IS_ERR(event)) { event 135 arch/x86/kvm/pmu.c PTR_ERR(event), pmc->idx); event 139 arch/x86/kvm/pmu.c pmc->perf_event = event; event 87 arch/x86/kvm/vmx/pmu_intel.c u32 event; event 93 arch/x86/kvm/vmx/pmu_intel.c event = fixed_pmc_events[array_index_nospec(idx, size)]; event 94 arch/x86/kvm/vmx/pmu_intel.c return intel_arch_events[event].event_type; event 46 arch/x86/oprofile/nmi_int.c u16 event = (u16)counter_config->event; event 56 arch/x86/oprofile/nmi_int.c event &= model->event_mask ? model->event_mask : 0xFF; event 57 arch/x86/oprofile/nmi_int.c val |= event & 0xFF; event 58 arch/x86/oprofile/nmi_int.c val |= (u64)(event & 0x0F00) << 24; event 432 arch/x86/oprofile/nmi_int.c oprofilefs_create_ulong(dir, "event", &counter_config[i].event); event 21 arch/x86/oprofile/op_counter.h unsigned long event; event 521 arch/x86/oprofile/op_model_p4.c if (counter_config[ctr].event <= 0 || counter_config[ctr].event > NUM_EVENTS) { event 524 arch/x86/oprofile/op_model_p4.c counter_config[ctr].event); event 528 arch/x86/oprofile/op_model_p4.c ev = &(p4_events[counter_config[ctr].event - 1]); event 565 arch/x86/oprofile/op_model_p4.c counter_config[ctr].event, stag, ctr); event 280 arch/x86/xen/enlighten.c xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr) event 52 arch/xtensa/kernel/perf_event.c struct perf_event *event[XCHAL_NUM_PERF_COUNTERS]; event 136 arch/xtensa/kernel/perf_event.c static void xtensa_perf_event_update(struct perf_event *event, event 144 arch/xtensa/kernel/perf_event.c new_raw_count = xtensa_pmu_read_counter(event->hw.idx); event 150 arch/xtensa/kernel/perf_event.c local64_add(delta, &event->count); event 154 arch/xtensa/kernel/perf_event.c static bool xtensa_perf_event_set_period(struct perf_event *event, event 160 arch/xtensa/kernel/perf_event.c if (!is_sampling_event(event)) { event 183 arch/xtensa/kernel/perf_event.c perf_event_update_userpage(event); event 198 arch/xtensa/kernel/perf_event.c static int xtensa_pmu_event_init(struct perf_event *event) event 202 arch/xtensa/kernel/perf_event.c switch (event->attr.type) { event 204 arch/xtensa/kernel/perf_event.c if (event->attr.config >= ARRAY_SIZE(xtensa_hw_ctl) || event 205 arch/xtensa/kernel/perf_event.c xtensa_hw_ctl[event->attr.config] == 0) event 207 arch/xtensa/kernel/perf_event.c event->hw.config = xtensa_hw_ctl[event->attr.config]; event 211 arch/xtensa/kernel/perf_event.c ret = xtensa_pmu_cache_event(event->attr.config); event 214 arch/xtensa/kernel/perf_event.c event->hw.config = ret; event 219 arch/xtensa/kernel/perf_event.c if ((event->attr.config & XTENSA_PMU_PMCTRL_SELECT) == event 222 arch/xtensa/kernel/perf_event.c event->hw.config = (event->attr.config & event 240 arch/xtensa/kernel/perf_event.c static void xtensa_pmu_start(struct perf_event *event, int flags) event 242 arch/xtensa/kernel/perf_event.c struct hw_perf_event *hwc = &event->hw; event 249 arch/xtensa/kernel/perf_event.c WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); event 250 arch/xtensa/kernel/perf_event.c xtensa_perf_event_set_period(event, hwc, idx); event 258 arch/xtensa/kernel/perf_event.c static void xtensa_pmu_stop(struct perf_event *event, int flags) event 260 arch/xtensa/kernel/perf_event.c struct hw_perf_event *hwc = &event->hw; event 271 arch/xtensa/kernel/perf_event.c !(event->hw.state & PERF_HES_UPTODATE)) { event 272 arch/xtensa/kernel/perf_event.c xtensa_perf_event_update(event, &event->hw, idx); event 273 arch/xtensa/kernel/perf_event.c event->hw.state |= PERF_HES_UPTODATE; event 281 arch/xtensa/kernel/perf_event.c static int xtensa_pmu_add(struct perf_event *event, int flags) event 284 arch/xtensa/kernel/perf_event.c struct hw_perf_event *hwc = &event->hw; event 296 arch/xtensa/kernel/perf_event.c ev->event[idx] = event; event 301 arch/xtensa/kernel/perf_event.c xtensa_pmu_start(event, PERF_EF_RELOAD); event 303 arch/xtensa/kernel/perf_event.c perf_event_update_userpage(event); event 307 arch/xtensa/kernel/perf_event.c static void xtensa_pmu_del(struct perf_event *event, int flags) event 311 arch/xtensa/kernel/perf_event.c xtensa_pmu_stop(event, PERF_EF_UPDATE); event 312 arch/xtensa/kernel/perf_event.c __clear_bit(event->hw.idx, ev->used_mask); event 313 arch/xtensa/kernel/perf_event.c perf_event_update_userpage(event); event 316 arch/xtensa/kernel/perf_event.c static void xtensa_pmu_read(struct perf_event *event) event 318 arch/xtensa/kernel/perf_event.c xtensa_perf_event_update(event, &event->hw, event->hw.idx); event 369 arch/xtensa/kernel/perf_event.c struct perf_event *event = ev->event[i]; event 370 arch/xtensa/kernel/perf_event.c struct hw_perf_event *hwc = &event->hw; event 377 arch/xtensa/kernel/perf_event.c xtensa_perf_event_update(event, hwc, i); event 379 arch/xtensa/kernel/perf_event.c if (xtensa_perf_event_set_period(event, hwc, i)) { event 384 arch/xtensa/kernel/perf_event.c if (perf_event_overflow(event, &data, regs)) event 385 arch/xtensa/kernel/perf_event.c xtensa_pmu_stop(event, 0); event 64 arch/xtensa/platforms/iss/setup.c iss_panic_event(struct notifier_block *this, unsigned long event, void *ptr) event 1562 block/genhd.c char event[] = "DISK_RO=1"; event 1563 block/genhd.c char *envp[] = { event, NULL }; event 1566 block/genhd.c event[8] = '0'; event 45 drivers/acpi/ac.c static void acpi_ac_notify(struct acpi_device *device, u32 event); event 228 drivers/acpi/ac.c static void acpi_ac_notify(struct acpi_device *device, u32 event) event 235 drivers/acpi/ac.c switch (event) { event 238 drivers/acpi/ac.c "Unsupported event [0x%x]\n", event)); event 255 drivers/acpi/ac.c dev_name(&device->dev), event, event 257 drivers/acpi/ac.c acpi_notifier_call_chain(device, event, (u32) ac->state); event 268 drivers/acpi/ac.c struct acpi_bus_event *event = (struct acpi_bus_event *)data; event 277 drivers/acpi/ac.c if (strcmp(event->device_class, ACPI_BATTERY_CLASS) == 0 && event 278 drivers/acpi/ac.c event->type == ACPI_BATTERY_NOTIFY_STATUS) event 411 drivers/acpi/acpi_pad.c static void acpi_pad_notify(acpi_handle handle, u32 event, event 416 drivers/acpi/acpi_pad.c switch (event) { event 420 drivers/acpi/acpi_pad.c dev_name(&device->dev), event, 0); event 423 drivers/acpi/acpi_pad.c pr_warn("Unsupported event [0x%x]\n", event); event 85 drivers/acpi/acpi_video.c static void acpi_video_bus_notify(struct acpi_device *device, u32 event); event 214 drivers/acpi/acpi_video.c static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data); event 225 drivers/acpi/acpi_video.c u32 level_current, u32 event); event 1375 drivers/acpi/acpi_video.c u32 level_current, u32 event) event 1403 drivers/acpi/acpi_video.c switch (event) { event 1424 drivers/acpi/acpi_video.c int event = device->switch_brightness_event; event 1440 drivers/acpi/acpi_video.c level_next = acpi_video_get_next_level(device, level_current, event); event 1565 drivers/acpi/acpi_video.c static void acpi_video_bus_notify(struct acpi_device *device, u32 event) event 1576 drivers/acpi/acpi_video.c switch (event) { event 1601 drivers/acpi/acpi_video.c "Unsupported event [0x%x]\n", event)); event 1605 drivers/acpi/acpi_video.c if (acpi_notifier_call_chain(device, event, 0)) event 1620 drivers/acpi/acpi_video.c u32 event) event 1625 drivers/acpi/acpi_video.c video_device->switch_brightness_event = event; event 1629 drivers/acpi/acpi_video.c static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data) event 1648 drivers/acpi/acpi_video.c acpi_notifier_call_chain(device, event, 0); event 1652 drivers/acpi/acpi_video.c switch (event) { event 1654 drivers/acpi/acpi_video.c brightness_switch_event(video_device, event); event 1658 drivers/acpi/acpi_video.c brightness_switch_event(video_device, event); event 1662 drivers/acpi/acpi_video.c brightness_switch_event(video_device, event); event 1666 drivers/acpi/acpi_video.c brightness_switch_event(video_device, event); event 1670 drivers/acpi/acpi_video.c brightness_switch_event(video_device, event); event 1675 drivers/acpi/acpi_video.c "Unsupported event [0x%x]\n", event)); event 1679 drivers/acpi/acpi_video.c acpi_notifier_call_chain(device, event, 0); event 372 drivers/acpi/acpica/acobject.h struct acpi_object_event event; event 102 drivers/acpi/acpica/actables.h void acpi_tb_notify_table(u32 event, void *table); event 20 drivers/acpi/acpica/evevent.c static u32 acpi_ev_fixed_event_dispatch(u32 event); event 236 drivers/acpi/acpica/evevent.c static u32 acpi_ev_fixed_event_dispatch(u32 event) event 243 drivers/acpi/acpica/evevent.c (void)acpi_write_bit_register(acpi_gbl_fixed_event_info[event]. event 250 drivers/acpi/acpica/evevent.c if (!acpi_gbl_fixed_event_handlers[event].handler) { event 251 drivers/acpi/acpica/evevent.c (void)acpi_write_bit_register(acpi_gbl_fixed_event_info[event]. event 257 drivers/acpi/acpica/evevent.c acpi_ut_get_event_name(event), event)); event 264 drivers/acpi/acpica/evevent.c return ((acpi_gbl_fixed_event_handlers[event]. event 265 drivers/acpi/acpica/evevent.c handler) (acpi_gbl_fixed_event_handlers[event].context)); event 584 drivers/acpi/acpica/evxface.c acpi_install_fixed_event_handler(u32 event, event 593 drivers/acpi/acpica/evxface.c if (event > ACPI_EVENT_MAX) { event 604 drivers/acpi/acpica/evxface.c if (acpi_gbl_fixed_event_handlers[event].handler) { event 611 drivers/acpi/acpica/evxface.c acpi_gbl_fixed_event_handlers[event].handler = handler; event 612 drivers/acpi/acpica/evxface.c acpi_gbl_fixed_event_handlers[event].context = context; event 614 drivers/acpi/acpica/evxface.c status = acpi_clear_event(event); event 616 drivers/acpi/acpica/evxface.c status = acpi_enable_event(event, 0); event 620 drivers/acpi/acpica/evxface.c acpi_ut_get_event_name(event), event)); event 624 drivers/acpi/acpica/evxface.c acpi_gbl_fixed_event_handlers[event].handler = NULL; event 625 drivers/acpi/acpica/evxface.c acpi_gbl_fixed_event_handlers[event].context = NULL; event 629 drivers/acpi/acpica/evxface.c acpi_ut_get_event_name(event), event, event 653 drivers/acpi/acpica/evxface.c acpi_remove_fixed_event_handler(u32 event, acpi_event_handler handler) event 661 drivers/acpi/acpica/evxface.c if (event > ACPI_EVENT_MAX) { event 672 drivers/acpi/acpica/evxface.c status = acpi_disable_event(event, 0); event 676 drivers/acpi/acpica/evxface.c acpi_gbl_fixed_event_handlers[event].handler = NULL; event 677 drivers/acpi/acpica/evxface.c acpi_gbl_fixed_event_handlers[event].context = NULL; event 682 drivers/acpi/acpica/evxface.c acpi_ut_get_event_name(event), event)); event 686 drivers/acpi/acpica/evxface.c acpi_ut_get_event_name(event), event)); event 142 drivers/acpi/acpica/evxfevnt.c acpi_status acpi_enable_event(u32 event, u32 flags) event 157 drivers/acpi/acpica/evxfevnt.c if (event > ACPI_EVENT_MAX) { event 166 drivers/acpi/acpica/evxfevnt.c acpi_write_bit_register(acpi_gbl_fixed_event_info[event]. event 175 drivers/acpi/acpica/evxfevnt.c acpi_read_bit_register(acpi_gbl_fixed_event_info[event]. event 184 drivers/acpi/acpica/evxfevnt.c acpi_ut_get_event_name(event))); event 205 drivers/acpi/acpica/evxfevnt.c acpi_status acpi_disable_event(u32 event, u32 flags) event 220 drivers/acpi/acpica/evxfevnt.c if (event > ACPI_EVENT_MAX) { event 229 drivers/acpi/acpica/evxfevnt.c acpi_write_bit_register(acpi_gbl_fixed_event_info[event]. event 236 drivers/acpi/acpica/evxfevnt.c acpi_read_bit_register(acpi_gbl_fixed_event_info[event]. event 245 drivers/acpi/acpica/evxfevnt.c acpi_ut_get_event_name(event))); event 265 drivers/acpi/acpica/evxfevnt.c acpi_status acpi_clear_event(u32 event) event 279 drivers/acpi/acpica/evxfevnt.c if (event > ACPI_EVENT_MAX) { event 288 drivers/acpi/acpica/evxfevnt.c acpi_write_bit_register(acpi_gbl_fixed_event_info[event]. event 309 drivers/acpi/acpica/evxfevnt.c acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status) event 323 drivers/acpi/acpica/evxfevnt.c if (event > ACPI_EVENT_MAX) { event 329 drivers/acpi/acpica/evxfevnt.c if (acpi_gbl_fixed_event_handlers[event].handler) { event 336 drivers/acpi/acpica/evxfevnt.c acpi_read_bit_register(acpi_gbl_fixed_event_info[event]. event 350 drivers/acpi/acpica/evxfevnt.c acpi_read_bit_register(acpi_gbl_fixed_event_info[event]. event 122 drivers/acpi/acpica/excreate.c &obj_desc->event.os_semaphore); event 87 drivers/acpi/acpica/exdump.c {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(event.os_semaphore), "OsSemaphore"} event 204 drivers/acpi/acpica/exsystem.c acpi_os_signal_semaphore(obj_desc->event.os_semaphore, 1); event 235 drivers/acpi/acpica/exsystem.c acpi_ex_system_wait_semaphore(obj_desc->event.os_semaphore, event 269 drivers/acpi/acpica/exsystem.c (void)acpi_os_delete_semaphore(obj_desc->event.os_semaphore); event 270 drivers/acpi/acpica/exsystem.c obj_desc->event.os_semaphore = temp_semaphore; event 1053 drivers/acpi/acpica/tbdata.c void acpi_tb_notify_table(u32 event, void *table) event 1058 drivers/acpi/acpica/tbdata.c (void)acpi_gbl_table_handler(event, table, event 761 drivers/acpi/acpica/utcopy.c &dest_desc->event. event 157 drivers/acpi/acpica/utdelete.c object, object->event.os_semaphore)); event 159 drivers/acpi/acpica/utdelete.c (void)acpi_os_delete_semaphore(object->event.os_semaphore); event 160 drivers/acpi/acpica/utdelete.c object->event.os_semaphore = NULL; event 787 drivers/acpi/apei/ghes.c static int ghes_notify_hed(struct notifier_block *this, unsigned long event, event 1251 drivers/acpi/battery.c static void acpi_battery_notify(struct acpi_device *device, u32 event) event 1267 drivers/acpi/battery.c if (event == ACPI_BATTERY_NOTIFY_INFO) event 1271 drivers/acpi/battery.c dev_name(&device->dev), event, event 1273 drivers/acpi/battery.c acpi_notifier_call_chain(device, event, acpi_battery_present(battery)); event 416 drivers/acpi/bus.c static void acpi_device_notify(acpi_handle handle, u32 event, void *data) event 420 drivers/acpi/bus.c device->driver->ops.notify(device, event); event 500 drivers/acpi/bus.c static void acpi_sb_notify(acpi_handle handle, u32 event, void *data) event 504 drivers/acpi/bus.c if (event == ACPI_SB_NOTIFY_SHUTDOWN_REQUEST) { event 508 drivers/acpi/bus.c pr_warn("event %x is not supported by \\_SB device\n", event); event 1113 drivers/acpi/bus.c static acpi_status acpi_bus_table_handler(u32 event, void *table, void *context) event 1115 drivers/acpi/bus.c acpi_scan_table_handler(event, table, context); event 1117 drivers/acpi/bus.c return acpi_sysfs_table_handler(event, table, context); event 108 drivers/acpi/button.c static void acpi_button_notify(struct acpi_device *device, u32 event); event 407 drivers/acpi/button.c static void acpi_button_notify(struct acpi_device *device, u32 event) event 413 drivers/acpi/button.c switch (event) { event 415 drivers/acpi/button.c event = ACPI_BUTTON_NOTIFY_STATUS; event 442 drivers/acpi/button.c event, ++button->pushed); event 447 drivers/acpi/button.c "Unsupported event [0x%x]\n", event)); event 89 drivers/acpi/dock.c static void dock_hotplug_event(struct dock_dependent_device *dd, u32 event, event 114 drivers/acpi/dock.c uevent(adev, event); event 123 drivers/acpi/dock.c notify(adev, event); event 252 drivers/acpi/dock.c static void hotplug_dock_devices(struct dock_station *ds, u32 event) event 258 drivers/acpi/dock.c dock_hotplug_event(dd, event, DOCK_CALL_FIXUP); event 262 drivers/acpi/dock.c dock_hotplug_event(dd, event, DOCK_CALL_HANDLER); event 281 drivers/acpi/dock.c static void dock_event(struct dock_station *ds, u32 event, int num) event 301 drivers/acpi/dock.c dock_hotplug_event(dd, event, DOCK_CALL_UEVENT); event 387 drivers/acpi/dock.c static int handle_eject_request(struct dock_station *ds, u32 event) event 399 drivers/acpi/dock.c dock_event(ds, event, UNDOCK_EVENT); event 422 drivers/acpi/dock.c int dock_notify(struct acpi_device *adev, u32 event) event 436 drivers/acpi/dock.c if ((ds->flags & DOCK_IS_DOCK) && event == ACPI_NOTIFY_DEVICE_CHECK) event 437 drivers/acpi/dock.c event = ACPI_NOTIFY_EJECT_REQUEST; event 448 drivers/acpi/dock.c switch (event) { event 459 drivers/acpi/dock.c hotplug_dock_devices(ds, event); event 461 drivers/acpi/dock.c dock_event(ds, event, DOCK_EVENT); event 470 drivers/acpi/dock.c event = ACPI_NOTIFY_EJECT_REQUEST; event 477 drivers/acpi/dock.c handle_eject_request(ds, event); event 479 drivers/acpi/dock.c dock_event(ds, event, UNDOCK_EVENT); event 30 drivers/acpi/event.c struct acpi_bus_event event; event 32 drivers/acpi/event.c strcpy(event.device_class, dev->pnp.device_class); event 33 drivers/acpi/event.c strcpy(event.bus_id, dev->pnp.bus_id); event 34 drivers/acpi/event.c event.type = type; event 35 drivers/acpi/event.c event.data = data; event 36 drivers/acpi/event.c return (blocking_notifier_call_chain(&acpi_chain_head, 0, (void *)&event) event 101 drivers/acpi/event.c struct acpi_genl_event *event; event 131 drivers/acpi/event.c event = nla_data(attr); event 132 drivers/acpi/event.c memset(event, 0, sizeof(struct acpi_genl_event)); event 134 drivers/acpi/event.c strscpy(event->device_class, device_class, sizeof(event->device_class)); event 135 drivers/acpi/event.c strscpy(event->bus_id, bus_id, sizeof(event->bus_id)); event 136 drivers/acpi/event.c event->type = type; event 137 drivers/acpi/event.c event->data = data; event 58 drivers/acpi/evged.c struct acpi_ged_event *event = data; event 61 drivers/acpi/evged.c acpi_ret = acpi_execute_simple_method(event->handle, NULL, event->gsi); event 63 drivers/acpi/evged.c dev_err_once(event->dev, "IRQ method execution failed\n"); event 71 drivers/acpi/evged.c struct acpi_ged_event *event; event 118 drivers/acpi/evged.c event = devm_kzalloc(dev, sizeof(*event), GFP_KERNEL); event 119 drivers/acpi/evged.c if (!event) event 122 drivers/acpi/evged.c event->gsi = gsi; event 123 drivers/acpi/evged.c event->dev = dev; event 124 drivers/acpi/evged.c event->irq = irq; event 125 drivers/acpi/evged.c event->handle = evt_handle; event 131 drivers/acpi/evged.c irqflags, "ACPI:Ged", event)) { event 137 drivers/acpi/evged.c list_add_tail(&event->node, &geddev->event_list); event 166 drivers/acpi/evged.c struct acpi_ged_event *event, *next; event 168 drivers/acpi/evged.c list_for_each_entry_safe(event, next, &geddev->event_list, node) { event 169 drivers/acpi/evged.c free_irq(event->irq, event); event 170 drivers/acpi/evged.c list_del(&event->node); event 172 drivers/acpi/evged.c event->gsi, event->irq); event 45 drivers/acpi/hed.c static void acpi_hed_notify(struct acpi_device *device, u32 event) event 48 drivers/acpi/internal.h int dock_notify(struct acpi_device *adev, u32 event); event 53 drivers/acpi/internal.h static inline int dock_notify(struct acpi_device *adev, u32 event) { return -ENODEV; } event 88 drivers/acpi/internal.h acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context); event 89 drivers/acpi/internal.h void acpi_scan_table_handler(u32 event, void *table, void *context); event 1720 drivers/acpi/nfit/core.c void __acpi_nvdimm_notify(struct device *dev, u32 event) event 1726 drivers/acpi/nfit/core.c event); event 1728 drivers/acpi/nfit/core.c if (event != NFIT_NOTIFY_DIMM_HEALTH) { event 1730 drivers/acpi/nfit/core.c event); event 1748 drivers/acpi/nfit/core.c static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data) event 1754 drivers/acpi/nfit/core.c __acpi_nvdimm_notify(dev, event); event 3734 drivers/acpi/nfit/core.c void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event) event 3736 drivers/acpi/nfit/core.c dev_dbg(dev, "event: 0x%x\n", event); event 3738 drivers/acpi/nfit/core.c switch (event) { event 3749 drivers/acpi/nfit/core.c static void acpi_nfit_notify(struct acpi_device *adev, u32 event) event 3752 drivers/acpi/nfit/core.c __acpi_nfit_notify(&adev->dev, adev->handle, event); event 343 drivers/acpi/nfit/nfit.h void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event); event 344 drivers/acpi/nfit/nfit.h void __acpi_nvdimm_notify(struct device *dev, u32 event); event 56 drivers/acpi/processor_driver.c static void acpi_processor_notify(acpi_handle handle, u32 event, void *data) event 69 drivers/acpi/processor_driver.c switch (event) { event 76 drivers/acpi/processor_driver.c dev_name(&device->dev), event, event 82 drivers/acpi/processor_driver.c dev_name(&device->dev), event, 0); event 87 drivers/acpi/processor_driver.c dev_name(&device->dev), event, 0); event 91 drivers/acpi/processor_driver.c "Unsupported event [0x%x]\n", event)); event 290 drivers/acpi/processor_driver.c unsigned long event, void *data) event 294 drivers/acpi/processor_driver.c if (event == CPUFREQ_CREATE_POLICY) { event 297 drivers/acpi/processor_driver.c } else if (event == CPUFREQ_REMOVE_POLICY) { event 208 drivers/acpi/processor_throttling.c static int acpi_processor_throttling_notifier(unsigned long event, void *data) event 230 drivers/acpi/processor_throttling.c switch (event) { event 2297 drivers/acpi/scan.c u32 event; event 2306 drivers/acpi/scan.c if (tew->event == ACPI_TABLE_EVENT_LOAD) { event 2315 drivers/acpi/scan.c void acpi_scan_table_handler(u32 event, void *table, void *context) event 2322 drivers/acpi/scan.c if (event != ACPI_TABLE_EVENT_LOAD) event 2331 drivers/acpi/scan.c tew->event = event; event 1159 drivers/acpi/sleep.c if (stage.event == PM_EVENT_HIBERNATE) event 1243 drivers/acpi/sleep.c if (stage.event == PM_EVENT_HIBERNATE) event 403 drivers/acpi/sysfs.c acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context) event 407 drivers/acpi/sysfs.c switch (event) { event 783 drivers/acpi/sysfs.c int event = index - num_gpes; event 786 drivers/acpi/sysfs.c result = acpi_disable_event(event, ACPI_NOT_ISR); event 789 drivers/acpi/sysfs.c result = acpi_enable_event(event, ACPI_NOT_ISR); event 792 drivers/acpi/sysfs.c result = acpi_clear_event(event); event 80 drivers/acpi/thermal.c static void acpi_thermal_notify(struct acpi_device *device, u32 event); event 935 drivers/acpi/thermal.c static void acpi_thermal_notify(struct acpi_device *device, u32 event) event 943 drivers/acpi/thermal.c switch (event) { event 951 drivers/acpi/thermal.c dev_name(&device->dev), event, 0); event 957 drivers/acpi/thermal.c dev_name(&device->dev), event, 0); event 961 drivers/acpi/thermal.c "Unsupported event [0x%x]\n", event)); event 119 drivers/ata/acard-ahci.c if (mesg.event & PM_EVENT_SUSPEND && event 126 drivers/ata/acard-ahci.c if (mesg.event & PM_EVENT_SLEEP) { event 149 drivers/ata/acard-ahci.c if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { event 877 drivers/ata/ahci.c if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { event 1003 drivers/ata/ata_piix.c if (piix_broken_suspend() && (mesg.event & PM_EVENT_SLEEP)) { event 766 drivers/ata/libahci_platform.c if (dev->power.power_state.event == PM_EVENT_SUSPEND) { event 101 drivers/ata/libata-acpi.c u32 event) event 113 drivers/ata/libata-acpi.c switch (event) { event 135 drivers/ata/libata-acpi.c static int ata_acpi_dev_notify_dock(struct acpi_device *adev, u32 event) event 138 drivers/ata/libata-acpi.c ata_acpi_handle_hotplug(dev->link->ap, dev, event); event 142 drivers/ata/libata-acpi.c static int ata_acpi_ap_notify_dock(struct acpi_device *adev, u32 event) event 144 drivers/ata/libata-acpi.c ata_acpi_handle_hotplug(ata_hotplug_data(adev->hp).ap, NULL, event); event 149 drivers/ata/libata-acpi.c u32 event) event 162 drivers/ata/libata-acpi.c snprintf(event_string, 20, "BAY_EVENT=%d", event); event 167 drivers/ata/libata-acpi.c static void ata_acpi_ap_uevent(struct acpi_device *adev, u32 event) event 169 drivers/ata/libata-acpi.c ata_acpi_uevent(ata_hotplug_data(adev->hp).ap, NULL, event); event 172 drivers/ata/libata-acpi.c static void ata_acpi_dev_uevent(struct acpi_device *adev, u32 event) event 175 drivers/ata/libata-acpi.c ata_acpi_uevent(dev->link->ap, dev, event); event 904 drivers/ata/libata-acpi.c if (!(state.event & PM_EVENT_RESUME)) { event 932 drivers/ata/libata-acpi.c if (state.event & PM_EVENT_RESUME) event 940 drivers/ata/libata-acpi.c acpi_bus_set_power(dev_handle, state.event & PM_EVENT_RESUME ? event 944 drivers/ata/libata-acpi.c if (!(state.event & PM_EVENT_RESUME)) event 6825 drivers/ata/libata-core.c if (mesg.event & PM_EVENT_SLEEP) event 4075 drivers/ata/libata-eh.c ap->pm_mesg.event & PM_EVENT_RESUME) { event 4140 drivers/ata/libata-eh.c !(ap->pm_mesg.event & PM_EVENT_RESUME)) { event 240 drivers/ata/libata-zpodd.c static void zpodd_wake_dev(acpi_handle handle, u32 event, void *context) event 246 drivers/ata/libata-zpodd.c if (event == ACPI_NOTIFY_DEVICE_WAKE && pm_runtime_suspended(dev)) { event 603 drivers/ata/sata_highbank.c if (dev->power.power_state.event == PM_EVENT_SUSPEND) { event 805 drivers/ata/sata_inic162x.c if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { event 2406 drivers/ata/sata_nv.c if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { event 1338 drivers/ata/sata_sil24.c if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) event 65 drivers/base/power/main.c static const char *pm_verb(int event) event 67 drivers/base/power/main.c switch (event) { event 346 drivers/base/power/main.c switch (state.event) { event 381 drivers/base/power/main.c switch (state.event) { event 415 drivers/base/power/main.c switch (state.event) { event 441 drivers/base/power/main.c dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event), event 442 drivers/base/power/main.c ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? event 450 drivers/base/power/main.c dev_name(dev), pm_verb(state.event), info, error); event 468 drivers/base/power/main.c info ?: "", info ? " " : "", pm_verb(state.event), event 485 drivers/base/power/main.c trace_device_pm_callback_start(dev, info, state.event); event 566 drivers/base/power/main.c switch (resume_msg.event) { event 587 drivers/base/power/main.c return !dev->power.must_resume && pm_transition.event != PM_EVENT_RESTORE; event 677 drivers/base/power/main.c if (state.event == PM_EVENT_THAW) { event 753 drivers/base/power/main.c trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true); event 789 drivers/base/power/main.c trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false); event 903 drivers/base/power/main.c trace_suspend_resume(TPS("dpm_resume_early"), state.event, true); event 938 drivers/base/power/main.c trace_suspend_resume(TPS("dpm_resume_early"), state.event, false); event 1066 drivers/base/power/main.c trace_suspend_resume(TPS("dpm_resume"), state.event, true); event 1104 drivers/base/power/main.c trace_suspend_resume(TPS("dpm_resume"), state.event, false); event 1162 drivers/base/power/main.c trace_suspend_resume(TPS("dpm_complete"), state.event, true); event 1175 drivers/base/power/main.c trace_device_pm_callback_start(dev, "", state.event); event 1187 drivers/base/power/main.c trace_suspend_resume(TPS("dpm_complete"), state.event, false); event 1216 drivers/base/power/main.c switch (sleep_state.event) { event 1289 drivers/base/power/main.c (resume_msg.event != PM_EVENT_RESUME || event 1394 drivers/base/power/main.c trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true); event 1431 drivers/base/power/main.c trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false); event 1594 drivers/base/power/main.c trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true); event 1632 drivers/base/power/main.c trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false); event 1675 drivers/base/power/main.c trace_device_pm_callback_start(dev, info, state.event); event 1862 drivers/base/power/main.c trace_suspend_resume(TPS("dpm_suspend"), state.event, true); event 1901 drivers/base/power/main.c trace_suspend_resume(TPS("dpm_suspend"), state.event, false); event 1971 drivers/base/power/main.c dev->power.direct_complete = state.event == PM_EVENT_SUSPEND && event 1989 drivers/base/power/main.c trace_suspend_resume(TPS("dpm_prepare"), state.event, true); event 2013 drivers/base/power/main.c trace_device_pm_callback_start(dev, "", state.event); event 2035 drivers/base/power/main.c trace_suspend_resume(TPS("dpm_prepare"), state.event, false); event 375 drivers/block/skd_main.c static void skd_log_skdev(struct skd_device *skdev, const char *event); event 377 drivers/block/skd_main.c struct skd_request_context *skreq, const char *event); event 3564 drivers/block/skd_main.c static void skd_log_skdev(struct skd_device *skdev, const char *event) event 3566 drivers/block/skd_main.c dev_dbg(&skdev->pdev->dev, "skdev=%p event='%s'\n", skdev, event); event 3578 drivers/block/skd_main.c struct skd_request_context *skreq, const char *event) event 3584 drivers/block/skd_main.c dev_dbg(&skdev->pdev->dev, "skreq=%p event='%s'\n", skreq, event); event 352 drivers/block/sunvdc.c static void vdc_event(void *arg, int event) event 361 drivers/block/sunvdc.c if (unlikely(event == LDC_EVENT_RESET)) { event 362 drivers/block/sunvdc.c vio_link_state_change(vio, event); event 367 drivers/block/sunvdc.c if (unlikely(event == LDC_EVENT_UP)) { event 368 drivers/block/sunvdc.c vio_link_state_change(vio, event); event 372 drivers/block/sunvdc.c if (unlikely(event != LDC_EVENT_DATA_READY)) { event 373 drivers/block/sunvdc.c pr_warn(PFX "Unexpected LDC event %d\n", event); event 907 drivers/block/sunvdc.c .event = vdc_event, event 86 drivers/bluetooth/btmrvl_main.c struct btmrvl_event *event; event 89 drivers/bluetooth/btmrvl_main.c event = (struct btmrvl_event *) skb->data; event 90 drivers/bluetooth/btmrvl_main.c if (event->ec != 0xff) { event 91 drivers/bluetooth/btmrvl_main.c BT_DBG("Not Marvell Event=%x", event->ec); event 96 drivers/bluetooth/btmrvl_main.c switch (event->data[0]) { event 98 drivers/bluetooth/btmrvl_main.c if (!event->data[2]) { event 99 drivers/bluetooth/btmrvl_main.c if (event->data[1] == BT_PS_ENABLE) event 111 drivers/bluetooth/btmrvl_main.c if (!event->data[3]) event 112 drivers/bluetooth/btmrvl_main.c BT_DBG("gpio=%x, gap=%x", event->data[1], event 113 drivers/bluetooth/btmrvl_main.c event->data[2]); event 119 drivers/bluetooth/btmrvl_main.c if (!event->data[1]) { event 132 drivers/bluetooth/btmrvl_main.c event->data[1] == MODULE_BRINGUP_REQ) { event 134 drivers/bluetooth/btmrvl_main.c ((event->data[2] == MODULE_BROUGHT_UP) || event 135 drivers/bluetooth/btmrvl_main.c (event->data[2] == MODULE_ALREADY_UP)) ? event 138 drivers/bluetooth/btmrvl_main.c if (event->length > 3 && event->data[3]) event 145 drivers/bluetooth/btmrvl_main.c event->data[1] == MODULE_SHUTDOWN_REQ) { event 146 drivers/bluetooth/btmrvl_main.c BT_DBG("EVENT:%s", (event->data[2]) ? event 155 drivers/bluetooth/btmrvl_main.c if (event->data[1] == BT_PS_SLEEP) event 162 drivers/bluetooth/btmrvl_main.c BT_DBG("Unknown Event=%d", event->data[0]); event 2022 drivers/bus/ti-sysc.c unsigned long event, void *device) event 2032 drivers/bus/ti-sysc.c switch (event) { event 177 drivers/char/apm-emulation.c static void queue_add_event(struct apm_queue *q, apm_event_t event) event 187 drivers/char/apm-emulation.c q->events[q->event_head] = event; event 190 drivers/char/apm-emulation.c static void queue_event(apm_event_t event) event 197 drivers/char/apm-emulation.c queue_add_event(&as->queue, event); event 206 drivers/char/apm-emulation.c apm_event_t event; event 217 drivers/char/apm-emulation.c while ((i >= sizeof(event)) && !queue_empty(&as->queue)) { event 218 drivers/char/apm-emulation.c event = queue_get_event(&as->queue); event 221 drivers/char/apm-emulation.c if (copy_to_user(buf, &event, sizeof(event))) event 226 drivers/char/apm-emulation.c (event == APM_SYS_SUSPEND || event == APM_USER_SUSPEND)) event 230 drivers/char/apm-emulation.c buf += sizeof(event); event 231 drivers/char/apm-emulation.c i -= sizeof(event); event 470 drivers/char/apm-emulation.c apm_event_t event; event 479 drivers/char/apm-emulation.c event = 0; event 481 drivers/char/apm-emulation.c event = queue_get_event(&kapmd_queue); event 484 drivers/char/apm-emulation.c switch (event) { event 490 drivers/char/apm-emulation.c queue_event(event); event 510 drivers/char/apm-emulation.c unsigned long event, event 521 drivers/char/apm-emulation.c switch (event) { event 524 drivers/char/apm-emulation.c apm_event = (event == PM_SUSPEND_PREPARE) ? event 590 drivers/char/apm-emulation.c apm_event = (event == PM_POST_SUSPEND) ? event 716 drivers/char/apm-emulation.c void apm_queue_event(apm_event_t event) event 721 drivers/char/apm-emulation.c queue_add_event(&kapmd_queue, event); event 236 drivers/char/ipmi/ipmb_dev_int.c enum i2c_slave_event event, u8 *val) event 243 drivers/char/ipmi/ipmb_dev_int.c switch (event) { event 690 drivers/char/ipmi/ipmi_bt_sm.c .event = bt_event, event 527 drivers/char/ipmi/ipmi_kcs_sm.c .event = kcs_event, event 5066 drivers/char/ipmi/ipmi_msghandler.c unsigned long event, event 761 drivers/char/ipmi/ipmi_si_intf.c si_sm_result = smi_info->handlers->event(smi_info->si_sm, time); event 764 drivers/char/ipmi/ipmi_si_intf.c si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); event 1304 drivers/char/ipmi/ipmi_si_intf.c smi_result = smi_info->handlers->event(smi_info->si_sm, 0); event 1309 drivers/char/ipmi/ipmi_si_intf.c smi_result = smi_info->handlers->event( event 1312 drivers/char/ipmi/ipmi_si_intf.c smi_result = smi_info->handlers->event( event 84 drivers/char/ipmi/ipmi_si_sm.h enum si_sm_result (*event)(struct si_sm_data *smi, long time); event 572 drivers/char/ipmi/ipmi_smic_sm.c .event = smic_event, event 238 drivers/char/sonypi.c u8 event; event 774 drivers/char/sonypi.c static void sonypi_report_input_event(u8 event) event 781 drivers/char/sonypi.c switch (event) { event 805 drivers/char/sonypi.c if (event == sonypi_inputkeys[i].sonypiev) { event 826 drivers/char/sonypi.c u8 v1, v2, event = 0; event 840 drivers/char/sonypi.c for (j = 0; sonypi_eventtypes[i].events[j].event; j++) { event 842 drivers/char/sonypi.c event = sonypi_eventtypes[i].events[j].event; event 863 drivers/char/sonypi.c sonypi_report_input_event(event); event 865 drivers/char/sonypi.c kfifo_in_locked(&sonypi_device.fifo, (unsigned char *)&event, event 866 drivers/char/sonypi.c sizeof(event), &sonypi_device.fifo_lock); event 77 drivers/char/tpm/eventlog/tpm1.c struct tcpa_event *event; event 83 drivers/char/tpm/eventlog/tpm1.c event = addr; event 90 drivers/char/tpm/eventlog/tpm1.c do_endian_conversion(event->event_size); event 92 drivers/char/tpm/eventlog/tpm1.c do_endian_conversion(event->event_type); event 111 drivers/char/tpm/eventlog/tpm1.c struct tcpa_event *event = v; event 119 drivers/char/tpm/eventlog/tpm1.c converted_event_size = do_endian_conversion(event->event_size); event 127 drivers/char/tpm/eventlog/tpm1.c event = v; event 129 drivers/char/tpm/eventlog/tpm1.c converted_event_size = do_endian_conversion(event->event_size); event 130 drivers/char/tpm/eventlog/tpm1.c converted_event_type = do_endian_conversion(event->event_type); event 143 drivers/char/tpm/eventlog/tpm1.c static int get_event_name(char *dest, struct tcpa_event *event, event 152 drivers/char/tpm/eventlog/tpm1.c switch (do_endian_conversion(event->event_type)) { event 169 drivers/char/tpm/eventlog/tpm1.c (event->event_type)]; event 175 drivers/char/tpm/eventlog/tpm1.c do_endian_conversion(event->event_size)) { event 177 drivers/char/tpm/eventlog/tpm1.c n_len = do_endian_conversion(event->event_size); event 224 drivers/char/tpm/eventlog/tpm1.c struct tcpa_event *event = v; event 229 drivers/char/tpm/eventlog/tpm1.c memcpy(&temp_event, event, sizeof(struct tcpa_event)); event 232 drivers/char/tpm/eventlog/tpm1.c temp_event.pcr_index = do_endian_conversion(event->pcr_index); event 233 drivers/char/tpm/eventlog/tpm1.c temp_event.event_type = do_endian_conversion(event->event_type); event 234 drivers/char/tpm/eventlog/tpm1.c temp_event.event_size = do_endian_conversion(event->event_size); event 255 drivers/char/tpm/eventlog/tpm1.c struct tcpa_event *event = v; event 267 drivers/char/tpm/eventlog/tpm1.c seq_printf(m, "%2d ", do_endian_conversion(event->pcr_index)); event 270 drivers/char/tpm/eventlog/tpm1.c seq_printf(m, "%20phN", event->pcr_value); event 273 drivers/char/tpm/eventlog/tpm1.c seq_printf(m, " %02x", do_endian_conversion(event->event_type)); event 275 drivers/char/tpm/eventlog/tpm1.c len += get_event_name(eventname, event, event_entry); event 36 drivers/char/tpm/eventlog/tpm2.c static size_t calc_tpm2_event_size(struct tcg_pcr_event2_head *event, event 39 drivers/char/tpm/eventlog/tpm2.c return __calc_tpm2_event_size(event, event_header, false); event 49 drivers/char/tpm/eventlog/tpm2.c struct tcg_pcr_event2_head *event; event 54 drivers/char/tpm/eventlog/tpm2.c size = sizeof(struct tcg_pcr_event) - sizeof(event_header->event) event 68 drivers/char/tpm/eventlog/tpm2.c event = addr; event 69 drivers/char/tpm/eventlog/tpm2.c size = calc_tpm2_event_size(event, event_header); event 75 drivers/char/tpm/eventlog/tpm2.c event = addr; event 76 drivers/char/tpm/eventlog/tpm2.c size = calc_tpm2_event_size(event, event_header); event 90 drivers/char/tpm/eventlog/tpm2.c struct tcg_pcr_event2_head *event; event 102 drivers/char/tpm/eventlog/tpm2.c sizeof(event_header->event) + event_header->event_size; event 105 drivers/char/tpm/eventlog/tpm2.c event = v; event 106 drivers/char/tpm/eventlog/tpm2.c event_size = calc_tpm2_event_size(event, event_header); event 109 drivers/char/tpm/eventlog/tpm2.c marker = event; event 116 drivers/char/tpm/eventlog/tpm2.c event = v; event 118 drivers/char/tpm/eventlog/tpm2.c event_size = calc_tpm2_event_size(event, event_header); event 134 drivers/char/tpm/eventlog/tpm2.c struct tcg_pcr_event2_head *event = v; event 140 drivers/char/tpm/eventlog/tpm2.c sizeof(event_header->event) + event_header->event_size; event 147 drivers/char/tpm/eventlog/tpm2.c size = calc_tpm2_event_size(event, event_header); event 148 drivers/char/tpm/eventlog/tpm2.c temp_ptr = event; event 548 drivers/char/virtio_console.c unsigned int event, unsigned int value) event 562 drivers/char/virtio_console.c portdev->cpkt.event = cpu_to_virtio16(portdev->vdev, event); event 578 drivers/char/virtio_console.c static ssize_t send_control_msg(struct port *port, unsigned int event, event 583 drivers/char/virtio_console.c return __send_control_msg(port->portdev, port->id, event, value); event 1579 drivers/char/virtio_console.c cpkt->event != cpu_to_virtio16(vdev, VIRTIO_CONSOLE_PORT_ADD)) { event 1586 drivers/char/virtio_console.c switch (virtio16_to_cpu(vdev, cpkt->event)) { event 1973 drivers/clk/clk.c unsigned long event) event 1982 drivers/clk/clk.c ret = __clk_notify(core, event, core->rate, core->new_rate); event 1991 drivers/clk/clk.c tmp_clk = clk_propagate_rate_change(child, event); event 1998 drivers/clk/clk.c tmp_clk = clk_propagate_rate_change(core->new_child, event); event 909 drivers/clk/meson/g12a.c unsigned long event, void *data) event 911 drivers/clk/meson/g12a.c if (event == POST_RATE_CHANGE || event == PRE_RATE_CHANGE) { event 934 drivers/clk/meson/g12a.c unsigned long event, void *data) event 939 drivers/clk/meson/g12a.c switch (event) { event 1037 drivers/clk/meson/g12a.c unsigned long event, void *data) event 1042 drivers/clk/meson/g12a.c switch (event) { event 3595 drivers/clk/meson/meson8b.c unsigned long event, void *data) event 3604 drivers/clk/meson/meson8b.c switch (event) { event 31 drivers/clk/qcom/apcs-msm8916.c static int a53cc_notifier_cb(struct notifier_block *nb, unsigned long event, event 38 drivers/clk/qcom/apcs-msm8916.c if (event == PRE_RATE_CHANGE) event 34 drivers/clk/qcom/krait-cc.c unsigned long event, event 41 drivers/clk/qcom/krait-cc.c if (event == PRE_RATE_CHANGE) { event 50 drivers/clk/qcom/krait-cc.c } else if (event == POST_RATE_CHANGE) { event 230 drivers/clk/rockchip/clk-cpu.c unsigned long event, void *data) event 237 drivers/clk/rockchip/clk-cpu.c __func__, event, ndata->old_rate, ndata->new_rate); event 238 drivers/clk/rockchip/clk-cpu.c if (event == PRE_RATE_CHANGE) event 240 drivers/clk/rockchip/clk-cpu.c else if (event == POST_RATE_CHANGE) event 159 drivers/clk/rockchip/clk-mmc-phase.c unsigned long event, void *data) event 181 drivers/clk/rockchip/clk-mmc-phase.c if (event == PRE_RATE_CHANGE) event 185 drivers/clk/rockchip/clk-mmc-phase.c event == POST_RATE_CHANGE) event 141 drivers/clk/rockchip/clk.c unsigned long event, void *data) event 149 drivers/clk/rockchip/clk.c __func__, event, ndata->old_rate, ndata->new_rate); event 150 drivers/clk/rockchip/clk.c if (event == PRE_RATE_CHANGE) { event 158 drivers/clk/rockchip/clk.c } else if (event == POST_RATE_CHANGE) { event 361 drivers/clk/samsung/clk-cpu.c unsigned long event, void *data) event 371 drivers/clk/samsung/clk-cpu.c if (event == PRE_RATE_CHANGE) event 373 drivers/clk/samsung/clk-cpu.c else if (event == POST_RATE_CHANGE) event 384 drivers/clk/samsung/clk-cpu.c unsigned long event, void *data) event 394 drivers/clk/samsung/clk-cpu.c if (event == PRE_RATE_CHANGE) event 396 drivers/clk/samsung/clk-cpu.c else if (event == POST_RATE_CHANGE) event 192 drivers/clk/samsung/clk-s3c2410-dclk.c unsigned long event, void *data) event 196 drivers/clk/samsung/clk-s3c2410-dclk.c if (event == POST_RATE_CHANGE) { event 205 drivers/clk/samsung/clk-s3c2410-dclk.c unsigned long event, void *data) event 209 drivers/clk/samsung/clk-s3c2410-dclk.c if (event == POST_RATE_CHANGE) { event 54 drivers/clk/sunxi-ng/ccu_common.c unsigned long event, void *data) event 59 drivers/clk/sunxi-ng/ccu_common.c if (event != POST_RATE_CHANGE) event 262 drivers/clk/sunxi-ng/ccu_mux.c unsigned long event, void *data) event 267 drivers/clk/sunxi-ng/ccu_mux.c if (event == PRE_RATE_CHANGE) { event 272 drivers/clk/sunxi-ng/ccu_mux.c } else if (event == POST_RATE_CHANGE) { event 44 drivers/clocksource/bcm2835_timer.c static int bcm2835_time_set_next_event(unsigned long event, event 49 drivers/clocksource/bcm2835_timer.c writel_relaxed(readl_relaxed(system_clock) + event, event 225 drivers/clocksource/timer-cadence-ttc.c unsigned long event, void *data) event 232 drivers/clocksource/timer-cadence-ttc.c switch (event) { event 379 drivers/clocksource/timer-cadence-ttc.c unsigned long event, void *data) event 386 drivers/clocksource/timer-cadence-ttc.c switch (event) { event 190 drivers/clocksource/timer-mediatek.c static int mtk_gpt_clkevt_next_event(unsigned long event, event 196 drivers/clocksource/timer-mediatek.c mtk_gpt_clkevt_time_setup(to, event, TIMER_CLK_EVT); event 114 drivers/clocksource/timer-milbeaut.c static int mlb_clkevt_next_event(unsigned long event, event 120 drivers/clocksource/timer-milbeaut.c mlb_evt_timer_register_count(to, event); event 164 drivers/clocksource/timer-sun5i.c unsigned long event, void *data) event 170 drivers/clocksource/timer-sun5i.c switch (event) { event 250 drivers/clocksource/timer-sun5i.c unsigned long event, void *data) event 256 drivers/clocksource/timer-sun5i.c if (event == POST_RATE_CHANGE) { event 303 drivers/cpufreq/s3c2416-cpufreq.c unsigned long event, void *ptr) event 555 drivers/cpufreq/s5pv210-cpufreq.c unsigned long event, void *ptr) event 106 drivers/cpufreq/speedstep-smi.c if (!(ist_info.event & 0xFFFF)) { event 343 drivers/cpufreq/speedstep-smi.c ist_info.event, ist_info.perf_level); event 52 drivers/crypto/chelsio/chtls/chtls_main.c unsigned long event, void *data) event 59 drivers/crypto/chelsio/chtls/chtls_main.c switch (event) { event 89 drivers/crypto/qat/qat_common/adf_common_drv.h enum adf_event event); event 338 drivers/crypto/qat/qat_common/qat_crypto.c enum adf_event event) event 342 drivers/crypto/qat/qat_common/qat_crypto.c switch (event) { event 54 drivers/devfreq/governor.h unsigned int event, void *data); event 126 drivers/devfreq/governor_passive.c unsigned long event, void *ptr) event 135 drivers/devfreq/governor_passive.c switch (event) { event 150 drivers/devfreq/governor_passive.c unsigned int event, void *data) event 161 drivers/devfreq/governor_passive.c switch (event) { event 25 drivers/devfreq/governor_performance.c unsigned int event, void *data) event 29 drivers/devfreq/governor_performance.c if (event == DEVFREQ_GOV_START) { event 25 drivers/devfreq/governor_powersave.c unsigned int event, void *data) event 29 drivers/devfreq/governor_powersave.c if (event == DEVFREQ_GOV_START) { event 88 drivers/devfreq/governor_simpleondemand.c unsigned int event, void *data) event 90 drivers/devfreq/governor_simpleondemand.c switch (event) { event 115 drivers/devfreq/governor_userspace.c unsigned int event, void *data) event 119 drivers/devfreq/governor_userspace.c switch (event) { event 551 drivers/devfreq/tegra30-devfreq.c unsigned int event, void *data) event 555 drivers/devfreq/tegra30-devfreq.c switch (event) { event 617 drivers/dma/imx-sdma.c static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event) event 620 drivers/dma/imx-sdma.c return chnenbl0 + event * 4; event 722 drivers/dma/imx-sdma.c static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event) event 727 drivers/dma/imx-sdma.c u32 chnenbl = chnenbl_ofs(sdma, event); event 734 drivers/dma/imx-sdma.c static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event) event 738 drivers/dma/imx-sdma.c u32 chnenbl = chnenbl_ofs(sdma, event); event 1156 drivers/dma/ste_dma40.c enum d40_events event_type, u32 event, event 1167 drivers/dma/ste_dma40.c writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) event 1168 drivers/dma/ste_dma40.c | ~D40_EVENTLINE_MASK(event), addr); event 1172 drivers/dma/ste_dma40.c status = (readl(addr) & D40_EVENTLINE_MASK(event)) >> event 1173 drivers/dma/ste_dma40.c D40_EVENTLINE_POS(event); event 1179 drivers/dma/ste_dma40.c writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event)) event 1180 drivers/dma/ste_dma40.c | ~D40_EVENTLINE_MASK(event), addr); event 1184 drivers/dma/ste_dma40.c status = (readl(addr) & D40_EVENTLINE_MASK(event)) >> event 1185 drivers/dma/ste_dma40.c D40_EVENTLINE_POS(event); event 1215 drivers/dma/ste_dma40.c D40_EVENTLINE_POS(event)) | event 1216 drivers/dma/ste_dma40.c ~D40_EVENTLINE_MASK(event), addr); event 1218 drivers/dma/ste_dma40.c if (readl(addr) & D40_EVENTLINE_MASK(event)) event 1241 drivers/dma/ste_dma40.c u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type); event 1246 drivers/dma/ste_dma40.c __d40_config_set_event(d40c, event_type, event, event 1250 drivers/dma/ste_dma40.c __d40_config_set_event(d40c, event_type, event, event 1993 drivers/dma/ste_dma40.c u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type); event 2028 drivers/dma/ste_dma40.c d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0); event 2056 drivers/dma/ste_dma40.c u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type); event 2084 drivers/dma/ste_dma40.c status = (status & D40_EVENTLINE_MASK(event)) >> event 2085 drivers/dma/ste_dma40.c D40_EVENTLINE_POS(event); event 2298 drivers/dma/ste_dma40.c u32 event = D40_TYPE_TO_EVENT(dev_type); event 2300 drivers/dma/ste_dma40.c u32 bit = BIT(event); event 51 drivers/dma/ti/dma-crossbar.c static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u8 val) event 58 drivers/dma/ti/dma-crossbar.c if (event >= 60 && event <= 63) event 59 drivers/dma/ti/dma-crossbar.c writeb_relaxed(val, iomem + (63 - event % 4)); event 61 drivers/dma/ti/dma-crossbar.c writeb_relaxed(val, iomem + event); event 2128 drivers/edac/altera_edac.c unsigned long event, void *ptr) event 39 drivers/edac/octeon_edac-pc.c unsigned long event, void *ptr) event 49 drivers/edac/octeon_edac-pc.c if (event) { event 69 drivers/edac/octeon_edac-pc.c if (event) event 28 drivers/edac/sifive_edac.c int ecc_err_event(struct notifier_block *this, unsigned long event, void *ptr) event 35 drivers/edac/sifive_edac.c if (event == SIFIVE_L2_ERR_TYPE_UE) event 37 drivers/edac/sifive_edac.c else if (event == SIFIVE_L2_ERR_TYPE_CE) event 285 drivers/extcon/extcon-axp288.c unsigned long event, void *param) event 297 drivers/extcon/extcon-rt8973a.c enum rt8973a_event_type event) event 305 drivers/extcon/extcon-rt8973a.c switch (event) { event 318 drivers/extcon/extcon-rt8973a.c event == RT8973A_EVENT_OVP ? "Voltage" : "Temperature"); event 324 drivers/extcon/extcon-rt8973a.c "Cannot handle this event (event:%d)\n", event); event 162 drivers/firewire/core-cdev.c struct event event; event 167 drivers/firewire/core-cdev.c struct event event; event 174 drivers/firewire/core-cdev.c struct event event; event 182 drivers/firewire/core-cdev.c struct event event; event 187 drivers/firewire/core-cdev.c struct event event; event 192 drivers/firewire/core-cdev.c struct event event; event 197 drivers/firewire/core-cdev.c struct event event; event 204 drivers/firewire/core-cdev.c struct event event; event 271 drivers/firewire/core-cdev.c static void queue_event(struct client *client, struct event *event, event 276 drivers/firewire/core-cdev.c event->v[0].data = data0; event 277 drivers/firewire/core-cdev.c event->v[0].size = size0; event 278 drivers/firewire/core-cdev.c event->v[1].data = data1; event 279 drivers/firewire/core-cdev.c event->v[1].size = size1; event 283 drivers/firewire/core-cdev.c kfree(event); event 285 drivers/firewire/core-cdev.c list_add_tail(&event->link, &client->event_list); event 294 drivers/firewire/core-cdev.c struct event *event; event 309 drivers/firewire/core-cdev.c event = list_first_entry(&client->event_list, struct event, link); event 310 drivers/firewire/core-cdev.c list_del(&event->link); event 314 drivers/firewire/core-cdev.c for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) { event 315 drivers/firewire/core-cdev.c size = min(event->v[i].size, count - total); event 316 drivers/firewire/core-cdev.c if (copy_to_user(buffer + total, event->v[i].data, size)) { event 325 drivers/firewire/core-cdev.c kfree(event); event 338 drivers/firewire/core-cdev.c static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, event 345 drivers/firewire/core-cdev.c event->closure = client->bus_reset_closure; event 346 drivers/firewire/core-cdev.c event->type = FW_CDEV_EVENT_BUS_RESET; event 347 drivers/firewire/core-cdev.c event->generation = client->device->generation; event 348 drivers/firewire/core-cdev.c event->node_id = client->device->node_id; event 349 drivers/firewire/core-cdev.c event->local_node_id = card->local_node->node_id; event 350 drivers/firewire/core-cdev.c event->bm_node_id = card->bm_node_id; event 351 drivers/firewire/core-cdev.c event->irm_node_id = card->irm_node->node_id; event 352 drivers/firewire/core-cdev.c event->root_node_id = card->root_node->node_id; event 385 drivers/firewire/core-cdev.c queue_event(client, &e->event, event 565 drivers/firewire/core-cdev.c queue_event(client, &e->event, rsp, sizeof(*rsp), event 568 drivers/firewire/core-cdev.c queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, event 736 drivers/firewire/core-cdev.c queue_event(handler->client, &e->event, event 926 drivers/firewire/core-cdev.c queue_event(client, &e->event, &e->interrupt, event 944 drivers/firewire/core-cdev.c queue_event(client, &e->event, &e->interrupt, event 1320 drivers/firewire/core-cdev.c queue_event(client, &e->event, event 1501 drivers/firewire/core-cdev.c queue_event(e->client, &e->event, &e->phy_packet, event 1577 drivers/firewire/core-cdev.c queue_event(client, &e->event, event 1740 drivers/firewire/core-cdev.c struct event *event, *next_event; event 1766 drivers/firewire/core-cdev.c list_for_each_entry_safe(event, next_event, &client->event_list, link) event 1767 drivers/firewire/core-cdev.c kfree(event); event 1203 drivers/firewire/core-device.c void fw_node_event(struct fw_card *card, struct fw_node *node, int event) event 1207 drivers/firewire/core-device.c switch (event) { event 413 drivers/firewire/core-topology.c int i, event; event 427 drivers/firewire/core-topology.c event = FW_NODE_LINK_OFF; event 429 drivers/firewire/core-topology.c event = FW_NODE_LINK_ON; event 431 drivers/firewire/core-topology.c event = FW_NODE_INITIATED_RESET; event 433 drivers/firewire/core-topology.c event = FW_NODE_UPDATED; event 441 drivers/firewire/core-topology.c fw_node_event(card, node0, event); event 153 drivers/firewire/core.h void fw_node_event(struct fw_card *card, struct fw_node *node, int event); event 2065 drivers/firewire/ohci.c u32 event, iso_event; event 2068 drivers/firewire/ohci.c event = reg_read(ohci, OHCI1394_IntEventClear); event 2070 drivers/firewire/ohci.c if (!event || !~event) event 2078 drivers/firewire/ohci.c event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr)); event 2079 drivers/firewire/ohci.c log_irqs(ohci, event); event 2081 drivers/firewire/ohci.c if (event & OHCI1394_selfIDComplete) event 2084 drivers/firewire/ohci.c if (event & OHCI1394_RQPkt) event 2087 drivers/firewire/ohci.c if (event & OHCI1394_RSPkt) event 2090 drivers/firewire/ohci.c if (event & OHCI1394_reqTxComplete) event 2093 drivers/firewire/ohci.c if (event & OHCI1394_respTxComplete) event 2096 drivers/firewire/ohci.c if (event & OHCI1394_isochRx) { event 2108 drivers/firewire/ohci.c if (event & OHCI1394_isochTx) { event 2120 drivers/firewire/ohci.c if (unlikely(event & OHCI1394_regAccessFail)) event 2123 drivers/firewire/ohci.c if (unlikely(event & OHCI1394_postedWriteErr)) { event 2132 drivers/firewire/ohci.c if (unlikely(event & OHCI1394_cycleTooLong)) { event 2139 drivers/firewire/ohci.c if (unlikely(event & OHCI1394_cycleInconsistent)) { event 2150 drivers/firewire/ohci.c if (unlikely(event & OHCI1394_unrecoverableError)) event 2153 drivers/firewire/ohci.c if (event & OHCI1394_cycle64Seconds) { event 76 drivers/firmware/arm_sdei.c struct sdei_event *event; event 81 drivers/firmware/arm_sdei.c #define CROSSCALL_INIT(arg, event) (arg.event = event, \ event 85 drivers/firmware/arm_sdei.c static inline int sdei_do_cross_call(void *fn, struct sdei_event * event) event 89 drivers/firmware/arm_sdei.c CROSSCALL_INIT(arg, event); event 195 drivers/firmware/arm_sdei.c static int sdei_api_event_get_info(u32 event, u32 info, u64 *result) event 197 drivers/firmware/arm_sdei.c return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_GET_INFO, event, info, 0, event 207 drivers/firmware/arm_sdei.c struct sdei_event *event; event 212 drivers/firmware/arm_sdei.c event = kzalloc(sizeof(*event), GFP_KERNEL); event 213 drivers/firmware/arm_sdei.c if (!event) event 216 drivers/firmware/arm_sdei.c INIT_LIST_HEAD(&event->list); event 217 drivers/firmware/arm_sdei.c event->event_num = event_num; event 222 drivers/firmware/arm_sdei.c kfree(event); event 225 drivers/firmware/arm_sdei.c event->priority = result; event 230 drivers/firmware/arm_sdei.c kfree(event); event 233 drivers/firmware/arm_sdei.c event->type = result; event 235 drivers/firmware/arm_sdei.c if (event->type == SDEI_EVENT_TYPE_SHARED) { event 238 drivers/firmware/arm_sdei.c kfree(event); event 243 drivers/firmware/arm_sdei.c reg->priority = event->priority; event 247 drivers/firmware/arm_sdei.c event->registered = reg; event 254 drivers/firmware/arm_sdei.c kfree(event); event 261 drivers/firmware/arm_sdei.c reg->event_num = event->event_num; event 262 drivers/firmware/arm_sdei.c reg->priority = event->priority; event 267 drivers/firmware/arm_sdei.c event->private_registered = regs; event 271 drivers/firmware/arm_sdei.c kfree(event->registered); event 272 drivers/firmware/arm_sdei.c kfree(event); event 273 drivers/firmware/arm_sdei.c event = ERR_PTR(-EBUSY); event 276 drivers/firmware/arm_sdei.c list_add(&event->list, &sdei_list); event 280 drivers/firmware/arm_sdei.c return event; event 283 drivers/firmware/arm_sdei.c static void sdei_event_destroy(struct sdei_event *event) event 288 drivers/firmware/arm_sdei.c list_del(&event->list); event 291 drivers/firmware/arm_sdei.c if (event->type == SDEI_EVENT_TYPE_SHARED) event 292 drivers/firmware/arm_sdei.c kfree(event->registered); event 294 drivers/firmware/arm_sdei.c free_percpu(event->private_registered); event 296 drivers/firmware/arm_sdei.c kfree(event); event 398 drivers/firmware/arm_sdei.c err = sdei_api_event_enable(arg->event->event_num); event 406 drivers/firmware/arm_sdei.c struct sdei_event *event; event 409 drivers/firmware/arm_sdei.c event = sdei_event_find(event_num); event 410 drivers/firmware/arm_sdei.c if (!event) { event 416 drivers/firmware/arm_sdei.c event->reenable = true; event 419 drivers/firmware/arm_sdei.c if (event->type == SDEI_EVENT_TYPE_SHARED) event 420 drivers/firmware/arm_sdei.c err = sdei_api_event_enable(event->event_num); event 422 drivers/firmware/arm_sdei.c err = sdei_do_cross_call(_local_event_enable, event); event 440 drivers/firmware/arm_sdei.c err = sdei_api_event_disable(arg->event->event_num); event 448 drivers/firmware/arm_sdei.c struct sdei_event *event; event 451 drivers/firmware/arm_sdei.c event = sdei_event_find(event_num); event 452 drivers/firmware/arm_sdei.c if (!event) { event 458 drivers/firmware/arm_sdei.c event->reenable = false; event 461 drivers/firmware/arm_sdei.c if (event->type == SDEI_EVENT_TYPE_SHARED) event 462 drivers/firmware/arm_sdei.c err = sdei_api_event_disable(event->event_num); event 464 drivers/firmware/arm_sdei.c err = sdei_do_cross_call(_ipi_event_disable, event); event 485 drivers/firmware/arm_sdei.c err = sdei_api_event_unregister(arg->event->event_num); event 490 drivers/firmware/arm_sdei.c static int _sdei_event_unregister(struct sdei_event *event) event 494 drivers/firmware/arm_sdei.c if (event->type == SDEI_EVENT_TYPE_SHARED) event 495 drivers/firmware/arm_sdei.c return sdei_api_event_unregister(event->event_num); event 497 drivers/firmware/arm_sdei.c return sdei_do_cross_call(_local_event_unregister, event); event 503 drivers/firmware/arm_sdei.c struct sdei_event *event; event 508 drivers/firmware/arm_sdei.c event = sdei_event_find(event_num); event 510 drivers/firmware/arm_sdei.c if (!event) { event 517 drivers/firmware/arm_sdei.c event->reregister = false; event 518 drivers/firmware/arm_sdei.c event->reenable = false; event 521 drivers/firmware/arm_sdei.c err = _sdei_event_unregister(event); event 525 drivers/firmware/arm_sdei.c sdei_event_destroy(event); event 540 drivers/firmware/arm_sdei.c struct sdei_event *event; event 544 drivers/firmware/arm_sdei.c list_for_each_entry(event, &sdei_list, list) { event 545 drivers/firmware/arm_sdei.c if (event->type != SDEI_EVENT_TYPE_SHARED) event 548 drivers/firmware/arm_sdei.c err = _sdei_event_unregister(event); event 575 drivers/firmware/arm_sdei.c reg = per_cpu_ptr(arg->event->private_registered, smp_processor_id()); event 576 drivers/firmware/arm_sdei.c err = sdei_api_event_register(arg->event->event_num, sdei_entry_point, event 582 drivers/firmware/arm_sdei.c static int _sdei_event_register(struct sdei_event *event) event 588 drivers/firmware/arm_sdei.c if (event->type == SDEI_EVENT_TYPE_SHARED) event 589 drivers/firmware/arm_sdei.c return sdei_api_event_register(event->event_num, event 591 drivers/firmware/arm_sdei.c event->registered, event 594 drivers/firmware/arm_sdei.c err = sdei_do_cross_call(_local_event_register, event); event 596 drivers/firmware/arm_sdei.c sdei_do_cross_call(_local_event_unregister, event); event 604 drivers/firmware/arm_sdei.c struct sdei_event *event; event 616 drivers/firmware/arm_sdei.c event = sdei_event_create(event_num, cb, arg); event 617 drivers/firmware/arm_sdei.c if (IS_ERR(event)) { event 618 drivers/firmware/arm_sdei.c err = PTR_ERR(event); event 625 drivers/firmware/arm_sdei.c event->reregister = true; event 628 drivers/firmware/arm_sdei.c err = _sdei_event_register(event); event 631 drivers/firmware/arm_sdei.c event->reregister = false; event 632 drivers/firmware/arm_sdei.c event->reenable = false; event 635 drivers/firmware/arm_sdei.c sdei_event_destroy(event); event 646 drivers/firmware/arm_sdei.c static int sdei_reregister_event(struct sdei_event *event) event 652 drivers/firmware/arm_sdei.c err = _sdei_event_register(event); event 654 drivers/firmware/arm_sdei.c pr_err("Failed to re-register event %u\n", event->event_num); event 655 drivers/firmware/arm_sdei.c sdei_event_destroy(event); event 659 drivers/firmware/arm_sdei.c if (event->reenable) { event 660 drivers/firmware/arm_sdei.c if (event->type == SDEI_EVENT_TYPE_SHARED) event 661 drivers/firmware/arm_sdei.c err = sdei_api_event_enable(event->event_num); event 663 drivers/firmware/arm_sdei.c err = sdei_do_cross_call(_local_event_enable, event); event 667 drivers/firmware/arm_sdei.c pr_err("Failed to re-enable event %u\n", event->event_num); event 675 drivers/firmware/arm_sdei.c struct sdei_event *event; event 679 drivers/firmware/arm_sdei.c list_for_each_entry(event, &sdei_list, list) { event 680 drivers/firmware/arm_sdei.c if (event->type != SDEI_EVENT_TYPE_SHARED) event 683 drivers/firmware/arm_sdei.c if (event->reregister) { event 684 drivers/firmware/arm_sdei.c err = sdei_reregister_event(event); event 697 drivers/firmware/arm_sdei.c struct sdei_event *event; event 702 drivers/firmware/arm_sdei.c list_for_each_entry(event, &sdei_list, list) { event 703 drivers/firmware/arm_sdei.c if (event->type == SDEI_EVENT_TYPE_SHARED) event 706 drivers/firmware/arm_sdei.c CROSSCALL_INIT(arg, event); event 711 drivers/firmware/arm_sdei.c event->event_num, arg.first_error); event 720 drivers/firmware/arm_sdei.c struct sdei_event *event; event 725 drivers/firmware/arm_sdei.c list_for_each_entry(event, &sdei_list, list) { event 726 drivers/firmware/arm_sdei.c if (event->type == SDEI_EVENT_TYPE_SHARED) event 729 drivers/firmware/arm_sdei.c if (event->reregister) { event 730 drivers/firmware/arm_sdei.c CROSSCALL_INIT(arg, event); event 735 drivers/firmware/arm_sdei.c event->event_num, arg.first_error); event 738 drivers/firmware/arm_sdei.c if (event->reenable) { event 739 drivers/firmware/arm_sdei.c CROSSCALL_INIT(arg, event); event 743 drivers/firmware/arm_sdei.c event->event_num, arg.first_error); event 286 drivers/firmware/efi/capsule.c static int capsule_reboot_notify(struct notifier_block *nb, unsigned long event, void *cmd) event 62 drivers/firmware/efi/efibc.c unsigned long event, void *data) event 67 drivers/firmware/efi/efibc.c if (event == SYS_RESTART) event 405 drivers/gpio/gpio-dln2.c } __packed *event = data; event 408 drivers/gpio/gpio-dln2.c if (len < sizeof(*event)) { event 413 drivers/gpio/gpio-dln2.c pin = le16_to_cpu(event->pin); event 427 drivers/gpio/gpio-dln2.c if (event->value) event 431 drivers/gpio/gpio-dln2.c if (!event->value) event 133 drivers/gpio/gpiolib-acpi.c struct acpi_gpio_event *event = data; event 135 drivers/gpio/gpiolib-acpi.c acpi_evaluate_object(event->handle, NULL, NULL, NULL); event 142 drivers/gpio/gpiolib-acpi.c struct acpi_gpio_event *event = data; event 144 drivers/gpio/gpiolib-acpi.c acpi_execute_simple_method(event->handle, NULL, event->pin); event 172 drivers/gpio/gpiolib-acpi.c struct acpi_gpio_event *event) event 176 drivers/gpio/gpiolib-acpi.c ret = request_threaded_irq(event->irq, NULL, event->handler, event 177 drivers/gpio/gpiolib-acpi.c event->irqflags, "ACPI:Event", event); event 181 drivers/gpio/gpiolib-acpi.c event->irq); event 185 drivers/gpio/gpiolib-acpi.c if (event->irq_is_wake) event 186 drivers/gpio/gpiolib-acpi.c enable_irq_wake(event->irq); event 188 drivers/gpio/gpiolib-acpi.c event->irq_requested = true; event 192 drivers/gpio/gpiolib-acpi.c (event->irqflags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))) { event 193 drivers/gpio/gpiolib-acpi.c value = gpiod_get_raw_value_cansleep(event->desc); event 194 drivers/gpio/gpiolib-acpi.c if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) || event 195 drivers/gpio/gpiolib-acpi.c ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0)) event 196 drivers/gpio/gpiolib-acpi.c event->handler(event->irq, event); event 202 drivers/gpio/gpiolib-acpi.c struct acpi_gpio_event *event; event 204 drivers/gpio/gpiolib-acpi.c list_for_each_entry(event, &acpi_gpio->events, node) event 205 drivers/gpio/gpiolib-acpi.c acpi_gpiochip_request_irq(acpi_gpio, event); event 266 drivers/gpio/gpiolib-acpi.c struct acpi_gpio_event *event; event 311 drivers/gpio/gpiolib-acpi.c event = kzalloc(sizeof(*event), GFP_KERNEL); event 312 drivers/gpio/gpiolib-acpi.c if (!event) event 315 drivers/gpio/gpiolib-acpi.c event->irqflags = IRQF_ONESHOT; event 318 drivers/gpio/gpiolib-acpi.c event->irqflags |= IRQF_TRIGGER_HIGH; event 320 drivers/gpio/gpiolib-acpi.c event->irqflags |= IRQF_TRIGGER_LOW; event 324 drivers/gpio/gpiolib-acpi.c event->irqflags |= IRQF_TRIGGER_RISING; event 327 drivers/gpio/gpiolib-acpi.c event->irqflags |= IRQF_TRIGGER_FALLING; event 330 drivers/gpio/gpiolib-acpi.c event->irqflags |= IRQF_TRIGGER_RISING | event 336 drivers/gpio/gpiolib-acpi.c event->handle = evt_handle; event 337 drivers/gpio/gpiolib-acpi.c event->handler = handler; event 338 drivers/gpio/gpiolib-acpi.c event->irq = irq; event 339 drivers/gpio/gpiolib-acpi.c event->irq_is_wake = acpi_gpio_irq_is_wake(chip->parent, agpio); event 340 drivers/gpio/gpiolib-acpi.c event->pin = pin; event 341 drivers/gpio/gpiolib-acpi.c event->desc = desc; event 343 drivers/gpio/gpiolib-acpi.c list_add_tail(&event->node, &acpi_gpio->events); event 410 drivers/gpio/gpiolib-acpi.c struct acpi_gpio_event *event, *ep; event 430 drivers/gpio/gpiolib-acpi.c list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { event 431 drivers/gpio/gpiolib-acpi.c if (event->irq_requested) { event 432 drivers/gpio/gpiolib-acpi.c if (event->irq_is_wake) event 433 drivers/gpio/gpiolib-acpi.c disable_irq_wake(event->irq); event 435 drivers/gpio/gpiolib-acpi.c free_irq(event->irq, event); event 438 drivers/gpio/gpiolib-acpi.c gpiochip_unlock_as_irq(chip, event->pin); event 439 drivers/gpio/gpiolib-acpi.c gpiochip_free_own_desc(event->desc); event 440 drivers/gpio/gpiolib-acpi.c list_del(&event->node); event 441 drivers/gpio/gpiolib-acpi.c kfree(event); event 1026 drivers/gpio/gpiolib-acpi.c struct acpi_gpio_event *event; event 1028 drivers/gpio/gpiolib-acpi.c list_for_each_entry(event, &achip->events, node) { event 1029 drivers/gpio/gpiolib-acpi.c if (event->pin == pin) { event 1030 drivers/gpio/gpiolib-acpi.c desc = event->desc; event 390 drivers/gpu/drm/amd/amdgpu/amdgpu.h struct drm_pending_vblank_event *event; event 414 drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c struct acpi_bus_event *event) event 420 drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c event->device_class, event->type); event 422 drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0) event 428 drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c event->type != atif->notification_cfg.command_code) { event 430 drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c if (event->type == ACPI_VIDEO_NOTIFY_PROBE) event 150 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c struct drm_pending_vblank_event *event, event 171 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c work->event = event; event 429 drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h struct drm_pending_vblank_event *event; event 631 drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h struct drm_pending_vblank_event *event, event 46 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c static int amdgpu_perf_event_init(struct perf_event *event) event 48 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c struct hw_perf_event *hwc = &event->hw; event 51 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c if (event->attr.type != event->pmu->type) event 55 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c hwc->conf = event->attr.config; event 61 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c static void amdgpu_perf_start(struct perf_event *event, int flags) event 63 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c struct hw_perf_event *hwc = &event->hw; event 64 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c struct amdgpu_pmu_entry *pe = container_of(event->pmu, event 85 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c perf_event_update_userpage(event); event 90 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c static void amdgpu_perf_read(struct perf_event *event) event 92 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c struct hw_perf_event *hwc = &event->hw; event 93 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c struct amdgpu_pmu_entry *pe = container_of(event->pmu, event 113 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c local64_add(count - prev, &event->count); event 117 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c static void amdgpu_perf_stop(struct perf_event *event, int flags) event 119 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c struct hw_perf_event *hwc = &event->hw; event 120 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c struct amdgpu_pmu_entry *pe = container_of(event->pmu, event 141 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c amdgpu_perf_read(event); event 146 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c static int amdgpu_perf_add(struct perf_event *event, int flags) event 148 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c struct hw_perf_event *hwc = &event->hw; event 151 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c struct amdgpu_pmu_entry *pe = container_of(event->pmu, event 155 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; event 169 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c amdgpu_perf_start(event, PERF_EF_RELOAD); event 176 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c static void amdgpu_perf_del(struct perf_event *event, int flags) event 178 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c struct hw_perf_event *hwc = &event->hw; event 179 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c struct amdgpu_pmu_entry *pe = container_of(event->pmu, event 183 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c amdgpu_perf_stop(event, PERF_EF_UPDATE); event 193 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c perf_event_update_userpage(event); event 3153 drivers/gpu/drm/amd/amdgpu/dce_v10_0.c if (works->event) event 3154 drivers/gpu/drm/amd/amdgpu/dce_v10_0.c drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event); event 3279 drivers/gpu/drm/amd/amdgpu/dce_v11_0.c if(works->event) event 3280 drivers/gpu/drm/amd/amdgpu/dce_v11_0.c drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event); event 3030 drivers/gpu/drm/amd/amdgpu/dce_v6_0.c if (works->event) event 3031 drivers/gpu/drm/amd/amdgpu/dce_v6_0.c drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event); event 3122 drivers/gpu/drm/amd/amdgpu/dce_v8_0.c if (works->event) event 3123 drivers/gpu/drm/amd/amdgpu/dce_v8_0.c drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event); event 674 drivers/gpu/drm/amd/amdgpu/dce_virtual.c if (works->event) event 675 drivers/gpu/drm/amd/amdgpu/dce_virtual.c drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event); event 34 drivers/gpu/drm/amd/amdgpu/df_v3_6.c AMDGPU_PMU_ATTR(event, "config:0-7"); event 62 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c enum idh_event event) event 68 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c if (reg != event) event 99 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event) event 104 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c r = xgpu_ai_mailbox_rcv_msg(adev, event); event 112 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r); event 369 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c enum idh_event event = xgpu_ai_mailbox_peek_msg(adev); event 371 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c switch (event) { event 365 drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c enum idh_event event) event 371 drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c if (event != IDH_FLR_NOTIFICATION_CMPL) { event 378 drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c if (reg != event) event 409 drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c static int xgpu_vi_poll_msg(struct amdgpu_device *adev, enum idh_event event) event 413 drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c r = xgpu_vi_mailbox_rcv_msg(adev, event); event 423 drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c r = xgpu_vi_mailbox_rcv_msg(adev, event); event 41 drivers/gpu/drm/amd/amdkfd/kfd_events.c struct kfd_event *event; /* Event to wait for */ event 244 drivers/gpu/drm/amd/amdkfd/kfd_events.c waiter->event = NULL; event 553 drivers/gpu/drm/amd/amdkfd/kfd_events.c waiter->event = ev; event 562 drivers/gpu/drm/amd/amdkfd/kfd_events.c struct kfd_event *ev = waiter->event; event 588 drivers/gpu/drm/amd/amdkfd/kfd_events.c if (!event_waiters[i].event) event 614 drivers/gpu/drm/amd/amdkfd/kfd_events.c struct kfd_event *event; event 619 drivers/gpu/drm/amd/amdkfd/kfd_events.c event = waiter->event; event 620 drivers/gpu/drm/amd/amdkfd/kfd_events.c if (waiter->activated && event->type == KFD_EVENT_TYPE_MEMORY) { event 622 drivers/gpu/drm/amd/amdkfd/kfd_events.c src = &event->memory_exception_data; event 658 drivers/gpu/drm/amd/amdkfd/kfd_events.c if (waiters[i].event) event 659 drivers/gpu/drm/amd/amdkfd/kfd_events.c remove_wait_queue(&waiters[i].event->wq, event 299 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c e = amdgpu_crtc->event; event 300 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c amdgpu_crtc->event = NULL; event 3497 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING; event 5483 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c WARN_ON(acrtc->event); event 5485 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c acrtc->event = acrtc->base.state->event; event 5491 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c acrtc->base.state->event = NULL; event 5876 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c if (acrtc_attach->base.state->event) { event 6408 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c if (new_crtc_state->event) event 6409 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c drm_send_event_locked(dev, &new_crtc_state->event->base); event 6411 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c new_crtc_state->event = NULL; event 753 drivers/gpu/drm/amd/display/dc/dc_hw_types.h enum crtc_event event; event 1589 drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c switch (crtc_tp->event) { event 716 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c switch (crtc_tp->event) { event 141 drivers/gpu/drm/arc/arcpgu_crtc.c struct drm_pending_vblank_event *event = crtc->state->event; event 143 drivers/gpu/drm/arc/arcpgu_crtc.c if (event) { event 144 drivers/gpu/drm/arc/arcpgu_crtc.c crtc->state->event = NULL; event 147 drivers/gpu/drm/arc/arcpgu_crtc.c drm_crtc_send_vblank_event(crtc, event); event 194 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c struct drm_pending_vblank_event *event; event 200 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c } else if (crtc->state->event) { event 201 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c event = crtc->state->event; event 206 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c crtc->state->event = NULL; event 207 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c drm_crtc_send_vblank_event(crtc, event); event 210 drivers/gpu/drm/arm/hdlcd_crtc.c struct drm_pending_vblank_event *event = crtc->state->event; event 212 drivers/gpu/drm/arm/hdlcd_crtc.c if (event) { event 213 drivers/gpu/drm/arm/hdlcd_crtc.c crtc->state->event = NULL; event 217 drivers/gpu/drm/arm/hdlcd_crtc.c drm_crtc_arm_vblank_event(crtc, event); event 219 drivers/gpu/drm/arm/hdlcd_crtc.c drm_crtc_send_vblank_event(crtc, event); event 195 drivers/gpu/drm/arm/malidp_drv.c malidp->event = malidp->crtc.state->event; event 196 drivers/gpu/drm/arm/malidp_drv.c malidp->crtc.state->event = NULL; event 204 drivers/gpu/drm/arm/malidp_drv.c if (malidp->event) event 220 drivers/gpu/drm/arm/malidp_drv.c } else if (malidp->event) { event 223 drivers/gpu/drm/arm/malidp_drv.c drm_crtc_send_vblank_event(&malidp->crtc, malidp->event); event 224 drivers/gpu/drm/arm/malidp_drv.c malidp->event = NULL; event 36 drivers/gpu/drm/arm/malidp_drv.h struct drm_pending_vblank_event *event; event 1186 drivers/gpu/drm/arm/malidp_hw.c if (malidp->event != NULL) { event 1188 drivers/gpu/drm/arm/malidp_hw.c drm_crtc_send_vblank_event(&malidp->crtc, malidp->event); event 1189 drivers/gpu/drm/arm/malidp_hw.c malidp->event = NULL; event 124 drivers/gpu/drm/armada/armada_crtc.c struct drm_pending_vblank_event *event; event 127 drivers/gpu/drm/armada/armada_crtc.c event = xchg(&crtc->state->event, NULL); event 128 drivers/gpu/drm/armada/armada_crtc.c if (event) { event 130 drivers/gpu/drm/armada/armada_crtc.c dcrtc->event = event; event 248 drivers/gpu/drm/armada/armada_crtc.c struct drm_pending_vblank_event *event; event 295 drivers/gpu/drm/armada/armada_crtc.c event = xchg(&dcrtc->event, NULL); event 296 drivers/gpu/drm/armada/armada_crtc.c if (event) { event 298 drivers/gpu/drm/armada/armada_crtc.c drm_crtc_send_vblank_event(&dcrtc->crtc, event); event 473 drivers/gpu/drm/armada/armada_crtc.c struct drm_pending_vblank_event *event; event 495 drivers/gpu/drm/armada/armada_crtc.c event = crtc->state->event; event 496 drivers/gpu/drm/armada/armada_crtc.c crtc->state->event = NULL; event 497 drivers/gpu/drm/armada/armada_crtc.c if (event) { event 499 drivers/gpu/drm/armada/armada_crtc.c drm_crtc_send_vblank_event(crtc, event); event 66 drivers/gpu/drm/armada/armada_crtc.h struct drm_pending_vblank_event *event; event 168 drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c struct drm_pending_vblank_event *event; event 172 drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c event = crtc->state->event; event 173 drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c if (event) { event 174 drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c crtc->state->event = NULL; event 177 drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c drm_crtc_arm_vblank_event(crtc, event); event 179 drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c drm_crtc_send_vblank_event(crtc, event); event 55 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c struct drm_pending_vblank_event *event; event 348 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c if (c->state->event) { event 349 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c c->state->event->pipe = drm_crtc_index(c); event 353 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c crtc->event = c->state->event; event 354 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c c->state->event = NULL; event 388 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c if (crtc->event) { event 389 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c drm_crtc_send_vblank_event(&crtc->base, crtc->event); event 391 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c crtc->event = NULL; event 64 drivers/gpu/drm/bochs/bochs_kms.c if (crtc->state->event) { event 66 drivers/gpu/drm/bochs/bochs_kms.c drm_crtc_send_vblank_event(crtc, crtc->state->event); event 67 drivers/gpu/drm/bochs/bochs_kms.c crtc->state->event = NULL; event 1172 drivers/gpu/drm/bridge/analogix-anx78xx.c bool event = false; event 1181 drivers/gpu/drm/bridge/analogix-anx78xx.c return event; event 1186 drivers/gpu/drm/bridge/analogix-anx78xx.c event = true; event 1193 drivers/gpu/drm/bridge/analogix-anx78xx.c event = true; event 1196 drivers/gpu/drm/bridge/analogix-anx78xx.c return event; event 1242 drivers/gpu/drm/bridge/analogix-anx78xx.c bool event = false; event 1267 drivers/gpu/drm/bridge/analogix-anx78xx.c event = anx78xx_handle_common_int_4(anx78xx, irq); event 1286 drivers/gpu/drm/bridge/analogix-anx78xx.c if (event) event 2151 drivers/gpu/drm/bridge/sil-sii8620.c unsigned long event, void *ptr) event 438 drivers/gpu/drm/cirrus/cirrus.c if (crtc->state->event) { event 440 drivers/gpu/drm/cirrus/cirrus.c drm_crtc_send_vblank_event(crtc, crtc->state->event); event 441 drivers/gpu/drm/cirrus/cirrus.c crtc->state->event = NULL; event 369 drivers/gpu/drm/drm_atomic.c if (new_crtc_state->event && event 2052 drivers/gpu/drm/drm_atomic_helper.c if (!new_crtc_state->event) { event 2053 drivers/gpu/drm/drm_atomic_helper.c commit->event = kzalloc(sizeof(*commit->event), event 2055 drivers/gpu/drm/drm_atomic_helper.c if (!commit->event) event 2058 drivers/gpu/drm/drm_atomic_helper.c new_crtc_state->event = commit->event; event 2061 drivers/gpu/drm/drm_atomic_helper.c new_crtc_state->event->base.completion = &commit->flip_done; event 2062 drivers/gpu/drm/drm_atomic_helper.c new_crtc_state->event->base.completion_release = release_crtc_commit; event 2223 drivers/gpu/drm/drm_atomic_helper.c if (new_crtc_state->event) { event 2225 drivers/gpu/drm/drm_atomic_helper.c new_crtc_state->event); event 2226 drivers/gpu/drm/drm_atomic_helper.c new_crtc_state->event = NULL; event 2271 drivers/gpu/drm/drm_atomic_helper.c WARN_ON(new_crtc_state->event); event 2780 drivers/gpu/drm/drm_atomic_helper.c new_crtc_state->commit->event = NULL; event 3298 drivers/gpu/drm/drm_atomic_helper.c struct drm_pending_vblank_event *event, event 3310 drivers/gpu/drm/drm_atomic_helper.c crtc_state->event = event; event 3352 drivers/gpu/drm/drm_atomic_helper.c struct drm_pending_vblank_event *event, event 3366 drivers/gpu/drm/drm_atomic_helper.c ret = page_flip_common(state, crtc, fb, event, flags); event 3395 drivers/gpu/drm/drm_atomic_helper.c struct drm_pending_vblank_event *event, event 3411 drivers/gpu/drm/drm_atomic_helper.c ret = page_flip_common(state, crtc, fb, event, flags); event 130 drivers/gpu/drm/drm_atomic_state_helper.c state->event = NULL; event 182 drivers/gpu/drm/drm_atomic_state_helper.c if (state->event && state->commit->abort_completion) event 185 drivers/gpu/drm/drm_atomic_state_helper.c kfree(state->commit->event); event 186 drivers/gpu/drm/drm_atomic_state_helper.c state->commit->event = NULL; event 904 drivers/gpu/drm/drm_atomic_uapi.c e->event.base.type = DRM_EVENT_FLIP_COMPLETE; event 905 drivers/gpu/drm/drm_atomic_uapi.c e->event.base.length = sizeof(e->event); event 906 drivers/gpu/drm/drm_atomic_uapi.c e->event.vbl.crtc_id = crtc->base.id; event 907 drivers/gpu/drm/drm_atomic_uapi.c e->event.vbl.user_data = user_data; event 1131 drivers/gpu/drm/drm_atomic_uapi.c crtc_state->event = e; event 1135 drivers/gpu/drm/drm_atomic_uapi.c struct drm_pending_vblank_event *e = crtc_state->event; event 1141 drivers/gpu/drm/drm_atomic_uapi.c &e->event.base); event 1144 drivers/gpu/drm/drm_atomic_uapi.c crtc_state->event = NULL; event 1173 drivers/gpu/drm/drm_atomic_uapi.c crtc_state->event->base.fence = fence; event 1246 drivers/gpu/drm/drm_atomic_uapi.c struct drm_pending_vblank_event *event = crtc_state->event; event 1252 drivers/gpu/drm/drm_atomic_uapi.c if (event && (event->base.fence || event->base.file_priv)) { event 1253 drivers/gpu/drm/drm_atomic_uapi.c drm_event_cancel_free(dev, &event->base); event 1254 drivers/gpu/drm/drm_atomic_uapi.c crtc_state->event = NULL; event 503 drivers/gpu/drm/drm_file.c file_priv->event_space += e->event->length; event 525 drivers/gpu/drm/drm_file.c unsigned length = e->event->length; event 537 drivers/gpu/drm/drm_file.c if (copy_to_user(buffer + ret, e->event, length)) { event 617 drivers/gpu/drm/drm_file.c p->event = e; event 680 drivers/gpu/drm/drm_file.c p->file_priv->event_space += p->event->length; event 308 drivers/gpu/drm/drm_mipi_dbi.c if (crtc->state->event) { event 310 drivers/gpu/drm/drm_mipi_dbi.c drm_crtc_send_vblank_event(crtc, crtc->state->event); event 312 drivers/gpu/drm/drm_mipi_dbi.c crtc->state->event = NULL; event 1169 drivers/gpu/drm/drm_plane.c e->event.base.type = DRM_EVENT_FLIP_COMPLETE; event 1170 drivers/gpu/drm/drm_plane.c e->event.base.length = sizeof(e->event); event 1171 drivers/gpu/drm/drm_plane.c e->event.vbl.user_data = page_flip->user_data; event 1172 drivers/gpu/drm/drm_plane.c e->event.vbl.crtc_id = crtc->base.id; event 1174 drivers/gpu/drm/drm_plane.c ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base); event 835 drivers/gpu/drm/drm_vblank.c switch (e->event.base.type) { event 839 drivers/gpu/drm/drm_vblank.c e->event.vbl.sequence = seq; event 845 drivers/gpu/drm/drm_vblank.c e->event.vbl.tv_sec = tv.tv_sec; event 846 drivers/gpu/drm/drm_vblank.c e->event.vbl.tv_usec = tv.tv_nsec / 1000; event 850 drivers/gpu/drm/drm_vblank.c e->event.seq.sequence = seq; event 851 drivers/gpu/drm/drm_vblank.c e->event.seq.time_ns = ktime_to_ns(now); event 1469 drivers/gpu/drm/drm_vblank.c e->event.base.type = DRM_EVENT_VBLANK; event 1470 drivers/gpu/drm/drm_vblank.c e->event.base.length = sizeof(e->event.vbl); event 1471 drivers/gpu/drm/drm_vblank.c e->event.vbl.user_data = vblwait->request.signal; event 1472 drivers/gpu/drm/drm_vblank.c e->event.vbl.crtc_id = 0; event 1476 drivers/gpu/drm/drm_vblank.c e->event.vbl.crtc_id = crtc->base.id; event 1493 drivers/gpu/drm/drm_vblank.c &e->event.base); event 1935 drivers/gpu/drm/drm_vblank.c e->event.base.type = DRM_EVENT_CRTC_SEQUENCE; event 1936 drivers/gpu/drm/drm_vblank.c e->event.base.length = sizeof(e->event.seq); event 1937 drivers/gpu/drm/drm_vblank.c e->event.seq.user_data = queue_seq->user_data; event 1953 drivers/gpu/drm/drm_vblank.c &e->event.base); event 275 drivers/gpu/drm/etnaviv/etnaviv_buffer.c void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event) event 291 drivers/gpu/drm/etnaviv/etnaviv_buffer.c CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) | event 315 drivers/gpu/drm/etnaviv/etnaviv_buffer.c struct etnaviv_iommu_context *mmu_context, unsigned int event, event 455 drivers/gpu/drm/etnaviv/etnaviv_buffer.c CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) | event 475 drivers/gpu/drm/etnaviv/etnaviv_buffer.c pr_info("event: %d\n", event); event 75 drivers/gpu/drm/etnaviv/etnaviv_drv.h void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event); event 78 drivers/gpu/drm/etnaviv/etnaviv_drv.h unsigned int event, struct etnaviv_cmdbuf *cmdbuf); event 787 drivers/gpu/drm/etnaviv/etnaviv_gpu.c for (i = 0; i < ARRAY_SIZE(gpu->event); i++) event 1102 drivers/gpu/drm/etnaviv/etnaviv_gpu.c int event = find_first_zero_bit(gpu->event_bitmap, ETNA_NR_EVENTS); event 1104 drivers/gpu/drm/etnaviv/etnaviv_gpu.c events[i] = event; event 1105 drivers/gpu/drm/etnaviv/etnaviv_gpu.c memset(&gpu->event[event], 0, sizeof(struct etnaviv_event)); event 1106 drivers/gpu/drm/etnaviv/etnaviv_gpu.c set_bit(event, gpu->event_bitmap); event 1120 drivers/gpu/drm/etnaviv/etnaviv_gpu.c static void event_free(struct etnaviv_gpu *gpu, unsigned int event) event 1122 drivers/gpu/drm/etnaviv/etnaviv_gpu.c if (!test_bit(event, gpu->event_bitmap)) { event 1124 drivers/gpu/drm/etnaviv/etnaviv_gpu.c event); event 1126 drivers/gpu/drm/etnaviv/etnaviv_gpu.c clear_bit(event, gpu->event_bitmap); event 1204 drivers/gpu/drm/etnaviv/etnaviv_gpu.c struct etnaviv_event *event, unsigned int flags) event 1206 drivers/gpu/drm/etnaviv/etnaviv_gpu.c const struct etnaviv_gem_submit *submit = event->submit; event 1218 drivers/gpu/drm/etnaviv/etnaviv_gpu.c struct etnaviv_event *event) event 1232 drivers/gpu/drm/etnaviv/etnaviv_gpu.c sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_PRE); event 1236 drivers/gpu/drm/etnaviv/etnaviv_gpu.c struct etnaviv_event *event) event 1238 drivers/gpu/drm/etnaviv/etnaviv_gpu.c const struct etnaviv_gem_submit *submit = event->submit; event 1242 drivers/gpu/drm/etnaviv/etnaviv_gpu.c sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_POST); event 1267 drivers/gpu/drm/etnaviv/etnaviv_gpu.c unsigned int i, nr_events = 1, event[3]; event 1287 drivers/gpu/drm/etnaviv/etnaviv_gpu.c ret = event_alloc(gpu, nr_events, event); event 1298 drivers/gpu/drm/etnaviv/etnaviv_gpu.c event_free(gpu, event[i]); event 1313 drivers/gpu/drm/etnaviv/etnaviv_gpu.c gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre; event 1315 drivers/gpu/drm/etnaviv/etnaviv_gpu.c gpu->event[event[1]].submit = submit; event 1316 drivers/gpu/drm/etnaviv/etnaviv_gpu.c etnaviv_sync_point_queue(gpu, event[1]); event 1319 drivers/gpu/drm/etnaviv/etnaviv_gpu.c gpu->event[event[0]].fence = gpu_fence; event 1322 drivers/gpu/drm/etnaviv/etnaviv_gpu.c event[0], &submit->cmdbuf); event 1325 drivers/gpu/drm/etnaviv/etnaviv_gpu.c gpu->event[event[2]].sync_point = &sync_point_perfmon_sample_post; event 1327 drivers/gpu/drm/etnaviv/etnaviv_gpu.c gpu->event[event[2]].submit = submit; event 1328 drivers/gpu/drm/etnaviv/etnaviv_gpu.c etnaviv_sync_point_queue(gpu, event[2]); event 1341 drivers/gpu/drm/etnaviv/etnaviv_gpu.c struct etnaviv_event *event = &gpu->event[gpu->sync_point_event]; event 1344 drivers/gpu/drm/etnaviv/etnaviv_gpu.c event->sync_point(gpu, event); event 1345 drivers/gpu/drm/etnaviv/etnaviv_gpu.c etnaviv_submit_put(event->submit); event 1389 drivers/gpu/drm/etnaviv/etnaviv_gpu.c int event; event 1405 drivers/gpu/drm/etnaviv/etnaviv_gpu.c while ((event = ffs(intr)) != 0) { event 1408 drivers/gpu/drm/etnaviv/etnaviv_gpu.c event -= 1; event 1410 drivers/gpu/drm/etnaviv/etnaviv_gpu.c intr &= ~(1 << event); event 1412 drivers/gpu/drm/etnaviv/etnaviv_gpu.c dev_dbg(gpu->dev, "event %u\n", event); event 1414 drivers/gpu/drm/etnaviv/etnaviv_gpu.c if (gpu->event[event].sync_point) { event 1415 drivers/gpu/drm/etnaviv/etnaviv_gpu.c gpu->sync_point_event = event; event 1419 drivers/gpu/drm/etnaviv/etnaviv_gpu.c fence = gpu->event[event].fence; event 1423 drivers/gpu/drm/etnaviv/etnaviv_gpu.c gpu->event[event].fence = NULL; event 1438 drivers/gpu/drm/etnaviv/etnaviv_gpu.c event_free(gpu, event); event 85 drivers/gpu/drm/etnaviv/etnaviv_gpu.h void (*sync_point)(struct etnaviv_gpu *gpu, struct etnaviv_event *event); event 111 drivers/gpu/drm/etnaviv/etnaviv_gpu.h struct etnaviv_event event[ETNA_NR_EVENTS]; event 42 drivers/gpu/drm/exynos/exynos_drm_crtc.c if (crtc->state->event && !crtc->state->active) { event 44 drivers/gpu/drm/exynos/exynos_drm_crtc.c drm_crtc_send_vblank_event(crtc, crtc->state->event); event 47 drivers/gpu/drm/exynos/exynos_drm_crtc.c crtc->state->event = NULL; event 121 drivers/gpu/drm/exynos/exynos_drm_crtc.c struct drm_pending_vblank_event *event = crtc->state->event; event 124 drivers/gpu/drm/exynos/exynos_drm_crtc.c if (!event) event 126 drivers/gpu/drm/exynos/exynos_drm_crtc.c crtc->state->event = NULL; event 131 drivers/gpu/drm/exynos/exynos_drm_crtc.c drm_crtc_arm_vblank_event(crtc, event); event 200 drivers/gpu/drm/exynos/exynos_drm_g2d.c struct drm_exynos_g2d_event event; event 220 drivers/gpu/drm/exynos/exynos_drm_g2d.c struct drm_exynos_pending_g2d_event *event; event 372 drivers/gpu/drm/exynos/exynos_drm_g2d.c if (node->event) event 373 drivers/gpu/drm/exynos/exynos_drm_g2d.c list_add_tail(&node->event->base.link, &file_priv->event_list); event 923 drivers/gpu/drm/exynos/exynos_drm_g2d.c e->event.tv_sec = now.tv_sec; event 924 drivers/gpu/drm/exynos/exynos_drm_g2d.c e->event.tv_usec = now.tv_nsec / NSEC_PER_USEC; event 925 drivers/gpu/drm/exynos/exynos_drm_g2d.c e->event.cmdlist_no = cmdlist_no; event 1165 drivers/gpu/drm/exynos/exynos_drm_g2d.c node->event = NULL; event 1168 drivers/gpu/drm/exynos/exynos_drm_g2d.c e = kzalloc(sizeof(*node->event), GFP_KERNEL); event 1174 drivers/gpu/drm/exynos/exynos_drm_g2d.c e->event.base.type = DRM_EXYNOS_G2D_EVENT; event 1175 drivers/gpu/drm/exynos/exynos_drm_g2d.c e->event.base.length = sizeof(e->event); event 1176 drivers/gpu/drm/exynos/exynos_drm_g2d.c e->event.user_data = req->user_data; event 1178 drivers/gpu/drm/exynos/exynos_drm_g2d.c ret = drm_event_reserve_init(drm_dev, file, &e->base, &e->event.base); event 1184 drivers/gpu/drm/exynos/exynos_drm_g2d.c node->event = e; event 1212 drivers/gpu/drm/exynos/exynos_drm_g2d.c if (node->event) { event 1288 drivers/gpu/drm/exynos/exynos_drm_g2d.c if (node->event) event 1289 drivers/gpu/drm/exynos/exynos_drm_g2d.c drm_event_cancel_free(drm_dev, &node->event->base); event 253 drivers/gpu/drm/exynos/exynos_drm_ipp.c struct drm_exynos_ipp_event event; event 400 drivers/gpu/drm/exynos/exynos_drm_ipp.c if (task->event) event 401 drivers/gpu/drm/exynos/exynos_drm_ipp.c drm_event_cancel_free(ipp->drm_dev, &task->event->base); event 704 drivers/gpu/drm/exynos/exynos_drm_ipp.c e->event.base.type = DRM_EXYNOS_IPP_EVENT; event 705 drivers/gpu/drm/exynos/exynos_drm_ipp.c e->event.base.length = sizeof(e->event); event 706 drivers/gpu/drm/exynos/exynos_drm_ipp.c e->event.user_data = user_data; event 709 drivers/gpu/drm/exynos/exynos_drm_ipp.c &e->event.base); event 713 drivers/gpu/drm/exynos/exynos_drm_ipp.c task->event = e; event 725 drivers/gpu/drm/exynos/exynos_drm_ipp.c task->event->event.tv_sec = now.tv_sec; event 726 drivers/gpu/drm/exynos/exynos_drm_ipp.c task->event->event.tv_usec = now.tv_nsec / NSEC_PER_USEC; event 727 drivers/gpu/drm/exynos/exynos_drm_ipp.c task->event->event.sequence = atomic_inc_return(&task->ipp->sequence); event 729 drivers/gpu/drm/exynos/exynos_drm_ipp.c drm_send_event(task->ipp->drm_dev, &task->event->base); event 736 drivers/gpu/drm/exynos/exynos_drm_ipp.c if (ret == 0 && task->event) { event 739 drivers/gpu/drm/exynos/exynos_drm_ipp.c task->event = NULL; event 97 drivers/gpu/drm/exynos/exynos_drm_ipp.h struct drm_pending_exynos_ipp_event *event; event 28 drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c struct drm_pending_vblank_event *event = crtc->state->event; event 33 drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c if (event) { event 34 drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c crtc->state->event = NULL; event 38 drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c drm_crtc_arm_vblank_event(crtc, event); event 40 drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c drm_crtc_send_vblank_event(crtc, event); event 422 drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c if (crtc->state->event) event 423 drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c drm_crtc_send_vblank_event(crtc, crtc->state->event); event 424 drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c crtc->state->event = NULL; event 506 drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c struct drm_pending_vblank_event *event = crtc->state->event; event 516 drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c if (event) { event 517 drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c crtc->state->event = NULL; event 521 drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c drm_crtc_arm_vblank_event(crtc, event); event 523 drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c drm_crtc_send_vblank_event(crtc, event); event 14007 drivers/gpu/drm/i915/display/intel_display.c if (modeset && !new_crtc_state->base.active && new_crtc_state->base.event) { event 14009 drivers/gpu/drm/i915/display/intel_display.c drm_crtc_send_vblank_event(&crtc->base, new_crtc_state->base.event); event 14012 drivers/gpu/drm/i915/display/intel_display.c new_crtc_state->base.event = NULL; event 628 drivers/gpu/drm/i915/display/intel_opregion.c struct acpi_bus_event *event = data; event 632 drivers/gpu/drm/i915/display/intel_opregion.c if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0) event 637 drivers/gpu/drm/i915/display/intel_opregion.c if (event->type == 0x80 && ((acpi->cevt & 1) == 0)) event 219 drivers/gpu/drm/i915/display/intel_sprite.c if (new_crtc_state->base.event) { event 223 drivers/gpu/drm/i915/display/intel_sprite.c drm_crtc_arm_vblank_event(&crtc->base, new_crtc_state->base.event); event 226 drivers/gpu/drm/i915/display/intel_sprite.c new_crtc_state->base.event = NULL; event 382 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) event 422 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr) event 1195 drivers/gpu/drm/i915/gvt/cmd_parser.c int event; event 1208 drivers/gpu/drm/i915/gvt/cmd_parser.c int event; event 1236 drivers/gpu/drm/i915/gvt/cmd_parser.c info->event = gen8_plane_code[v].event; event 1272 drivers/gpu/drm/i915/gvt/cmd_parser.c info->event = PRIMARY_A_FLIP_DONE; event 1276 drivers/gpu/drm/i915/gvt/cmd_parser.c info->event = PRIMARY_B_FLIP_DONE; event 1280 drivers/gpu/drm/i915/gvt/cmd_parser.c info->event = PRIMARY_C_FLIP_DONE; event 1285 drivers/gpu/drm/i915/gvt/cmd_parser.c info->event = SPRITE_A_FLIP_DONE; event 1290 drivers/gpu/drm/i915/gvt/cmd_parser.c info->event = SPRITE_B_FLIP_DONE; event 1295 drivers/gpu/drm/i915/gvt/cmd_parser.c info->event = SPRITE_C_FLIP_DONE; event 1369 drivers/gpu/drm/i915/gvt/cmd_parser.c intel_vgpu_trigger_virtual_event(vgpu, info->event); event 1371 drivers/gpu/drm/i915/gvt/cmd_parser.c set_bit(info->event, vgpu->irq.flip_done_event[info->pipe]); event 438 drivers/gpu/drm/i915/gvt/display.c int event; event 443 drivers/gpu/drm/i915/gvt/display.c for_each_set_bit(event, irq->flip_done_event[pipe], event 445 drivers/gpu/drm/i915/gvt/display.c clear_bit(event, irq->flip_done_event[pipe]); event 449 drivers/gpu/drm/i915/gvt/display.c intel_vgpu_trigger_virtual_event(vgpu, event); event 756 drivers/gpu/drm/i915/gvt/handlers.c int event = SKL_FLIP_EVENT(pipe, PLANE_PRIMARY); event 764 drivers/gpu/drm/i915/gvt/handlers.c intel_vgpu_trigger_virtual_event(vgpu, event); event 766 drivers/gpu/drm/i915/gvt/handlers.c set_bit(event, vgpu->irq.flip_done_event[pipe]); event 778 drivers/gpu/drm/i915/gvt/handlers.c int event = SKL_FLIP_EVENT(pipe, PLANE_SPRITE0); event 784 drivers/gpu/drm/i915/gvt/handlers.c intel_vgpu_trigger_virtual_event(vgpu, event); event 786 drivers/gpu/drm/i915/gvt/handlers.c set_bit(event, vgpu->irq.flip_done_event[pipe]); event 798 drivers/gpu/drm/i915/gvt/handlers.c int event = SKL_FLIP_EVENT(pipe, plane); event 809 drivers/gpu/drm/i915/gvt/handlers.c intel_vgpu_trigger_virtual_event(vgpu, event); event 811 drivers/gpu/drm/i915/gvt/handlers.c set_bit(event, vgpu->irq.flip_done_event[pipe]); event 820 drivers/gpu/drm/i915/gvt/handlers.c enum intel_gvt_event_type event; event 823 drivers/gpu/drm/i915/gvt/handlers.c event = AUX_CHANNEL_A; event 825 drivers/gpu/drm/i915/gvt/handlers.c event = AUX_CHANNEL_B; event 827 drivers/gpu/drm/i915/gvt/handlers.c event = AUX_CHANNEL_C; event 829 drivers/gpu/drm/i915/gvt/handlers.c event = AUX_CHANNEL_D; event 835 drivers/gpu/drm/i915/gvt/handlers.c intel_vgpu_trigger_virtual_event(vgpu, event); event 401 drivers/gpu/drm/i915/gvt/interrupt.c enum intel_gvt_event_type event, struct intel_vgpu *vgpu) event 407 drivers/gpu/drm/i915/gvt/interrupt.c info = get_irq_info(irq, event); event 412 drivers/gpu/drm/i915/gvt/interrupt.c bit = irq->events[event].bit; event 416 drivers/gpu/drm/i915/gvt/interrupt.c trace_propagate_event(vgpu->id, irq_name[event], bit); event 424 drivers/gpu/drm/i915/gvt/interrupt.c enum intel_gvt_event_type event, struct intel_vgpu *vgpu) event 426 drivers/gpu/drm/i915/gvt/interrupt.c if (!vgpu->irq.irq_warn_once[event]) { event 428 drivers/gpu/drm/i915/gvt/interrupt.c vgpu->id, event, irq_name[event]); event 429 drivers/gpu/drm/i915/gvt/interrupt.c vgpu->irq.irq_warn_once[event] = true; event 431 drivers/gpu/drm/i915/gvt/interrupt.c propagate_event(irq, event, vgpu); event 619 drivers/gpu/drm/i915/gvt/interrupt.c enum intel_gvt_event_type event) event 626 drivers/gpu/drm/i915/gvt/interrupt.c handler = get_event_virt_handler(irq, event); event 629 drivers/gpu/drm/i915/gvt/interrupt.c handler(irq, event, vgpu); event 140 drivers/gpu/drm/i915/gvt/interrupt.h enum intel_gvt_event_type event, struct intel_vgpu *vgpu); event 218 drivers/gpu/drm/i915/gvt/interrupt.h enum intel_gvt_event_type event); event 912 drivers/gpu/drm/i915/gvt/scheduler.c int event; event 941 drivers/gpu/drm/i915/gvt/scheduler.c for_each_set_bit(event, workload->pending_events, event 943 drivers/gpu/drm/i915/gvt/scheduler.c intel_vgpu_trigger_virtual_event(vgpu, event); event 292 drivers/gpu/drm/i915/i915_drv.c pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; event 1898 drivers/gpu/drm/i915/i915_drv.c if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND && event 1899 drivers/gpu/drm/i915/i915_drv.c state.event != PM_EVENT_FREEZE)) event 37 drivers/gpu/drm/i915/i915_pmu.c static u8 engine_event_sample(struct perf_event *event) event 39 drivers/gpu/drm/i915/i915_pmu.c return engine_config_sample(event->attr.config); event 42 drivers/gpu/drm/i915/i915_pmu.c static u8 engine_event_class(struct perf_event *event) event 44 drivers/gpu/drm/i915/i915_pmu.c return (event->attr.config >> I915_PMU_CLASS_SHIFT) & 0xff; event 47 drivers/gpu/drm/i915/i915_pmu.c static u8 engine_event_instance(struct perf_event *event) event 49 drivers/gpu/drm/i915/i915_pmu.c return (event->attr.config >> I915_PMU_SAMPLE_BITS) & 0xff; event 70 drivers/gpu/drm/i915/i915_pmu.c static bool is_engine_event(struct perf_event *event) event 72 drivers/gpu/drm/i915/i915_pmu.c return is_engine_config(event->attr.config); event 75 drivers/gpu/drm/i915/i915_pmu.c static unsigned int event_enabled_bit(struct perf_event *event) event 77 drivers/gpu/drm/i915/i915_pmu.c return config_enabled_bit(event->attr.config); event 299 drivers/gpu/drm/i915/i915_pmu.c static void engine_event_destroy(struct perf_event *event) event 302 drivers/gpu/drm/i915/i915_pmu.c container_of(event->pmu, typeof(*i915), pmu.base); event 306 drivers/gpu/drm/i915/i915_pmu.c engine_event_class(event), event 307 drivers/gpu/drm/i915/i915_pmu.c engine_event_instance(event)); event 311 drivers/gpu/drm/i915/i915_pmu.c if (engine_event_sample(event) == I915_SAMPLE_BUSY && event 316 drivers/gpu/drm/i915/i915_pmu.c static void i915_pmu_event_destroy(struct perf_event *event) event 318 drivers/gpu/drm/i915/i915_pmu.c WARN_ON(event->parent); event 320 drivers/gpu/drm/i915/i915_pmu.c if (is_engine_event(event)) event 321 drivers/gpu/drm/i915/i915_pmu.c engine_event_destroy(event); event 369 drivers/gpu/drm/i915/i915_pmu.c static int engine_event_init(struct perf_event *event) event 372 drivers/gpu/drm/i915/i915_pmu.c container_of(event->pmu, typeof(*i915), pmu.base); event 377 drivers/gpu/drm/i915/i915_pmu.c engine = intel_engine_lookup_user(i915, engine_event_class(event), event 378 drivers/gpu/drm/i915/i915_pmu.c engine_event_instance(event)); event 382 drivers/gpu/drm/i915/i915_pmu.c sample = engine_event_sample(event); event 393 drivers/gpu/drm/i915/i915_pmu.c static int i915_pmu_event_init(struct perf_event *event) event 396 drivers/gpu/drm/i915/i915_pmu.c container_of(event->pmu, typeof(*i915), pmu.base); event 399 drivers/gpu/drm/i915/i915_pmu.c if (event->attr.type != event->pmu->type) event 403 drivers/gpu/drm/i915/i915_pmu.c if (event->attr.sample_period) /* no sampling */ event 406 drivers/gpu/drm/i915/i915_pmu.c if (has_branch_stack(event)) event 409 drivers/gpu/drm/i915/i915_pmu.c if (event->cpu < 0) event 413 drivers/gpu/drm/i915/i915_pmu.c if (!cpumask_test_cpu(event->cpu, &i915_pmu_cpumask)) event 416 drivers/gpu/drm/i915/i915_pmu.c if (is_engine_event(event)) event 417 drivers/gpu/drm/i915/i915_pmu.c ret = engine_event_init(event); event 419 drivers/gpu/drm/i915/i915_pmu.c ret = config_status(i915, event->attr.config); event 423 drivers/gpu/drm/i915/i915_pmu.c if (!event->parent) event 424 drivers/gpu/drm/i915/i915_pmu.c event->destroy = i915_pmu_event_destroy; event 527 drivers/gpu/drm/i915/i915_pmu.c static u64 __i915_pmu_event_read(struct perf_event *event) event 530 drivers/gpu/drm/i915/i915_pmu.c container_of(event->pmu, typeof(*i915), pmu.base); event 534 drivers/gpu/drm/i915/i915_pmu.c if (is_engine_event(event)) { event 535 drivers/gpu/drm/i915/i915_pmu.c u8 sample = engine_event_sample(event); event 539 drivers/gpu/drm/i915/i915_pmu.c engine_event_class(event), event 540 drivers/gpu/drm/i915/i915_pmu.c engine_event_instance(event)); event 551 drivers/gpu/drm/i915/i915_pmu.c switch (event->attr.config) { event 574 drivers/gpu/drm/i915/i915_pmu.c static void i915_pmu_event_read(struct perf_event *event) event 576 drivers/gpu/drm/i915/i915_pmu.c struct hw_perf_event *hwc = &event->hw; event 581 drivers/gpu/drm/i915/i915_pmu.c new = __i915_pmu_event_read(event); event 586 drivers/gpu/drm/i915/i915_pmu.c local64_add(new - prev, &event->count); event 589 drivers/gpu/drm/i915/i915_pmu.c static void i915_pmu_enable(struct perf_event *event) event 592 drivers/gpu/drm/i915/i915_pmu.c container_of(event->pmu, typeof(*i915), pmu.base); event 593 drivers/gpu/drm/i915/i915_pmu.c unsigned int bit = event_enabled_bit(event); event 618 drivers/gpu/drm/i915/i915_pmu.c if (is_engine_event(event)) { event 619 drivers/gpu/drm/i915/i915_pmu.c u8 sample = engine_event_sample(event); event 623 drivers/gpu/drm/i915/i915_pmu.c engine_event_class(event), event 624 drivers/gpu/drm/i915/i915_pmu.c engine_event_instance(event)); event 645 drivers/gpu/drm/i915/i915_pmu.c local64_set(&event->hw.prev_count, __i915_pmu_event_read(event)); event 648 drivers/gpu/drm/i915/i915_pmu.c static void i915_pmu_disable(struct perf_event *event) event 651 drivers/gpu/drm/i915/i915_pmu.c container_of(event->pmu, typeof(*i915), pmu.base); event 652 drivers/gpu/drm/i915/i915_pmu.c unsigned int bit = event_enabled_bit(event); event 658 drivers/gpu/drm/i915/i915_pmu.c if (is_engine_event(event)) { event 659 drivers/gpu/drm/i915/i915_pmu.c u8 sample = engine_event_sample(event); event 663 drivers/gpu/drm/i915/i915_pmu.c engine_event_class(event), event 664 drivers/gpu/drm/i915/i915_pmu.c engine_event_instance(event)); event 692 drivers/gpu/drm/i915/i915_pmu.c static void i915_pmu_event_start(struct perf_event *event, int flags) event 694 drivers/gpu/drm/i915/i915_pmu.c i915_pmu_enable(event); event 695 drivers/gpu/drm/i915/i915_pmu.c event->hw.state = 0; event 698 drivers/gpu/drm/i915/i915_pmu.c static void i915_pmu_event_stop(struct perf_event *event, int flags) event 701 drivers/gpu/drm/i915/i915_pmu.c i915_pmu_event_read(event); event 702 drivers/gpu/drm/i915/i915_pmu.c i915_pmu_disable(event); event 703 drivers/gpu/drm/i915/i915_pmu.c event->hw.state = PERF_HES_STOPPED; event 706 drivers/gpu/drm/i915/i915_pmu.c static int i915_pmu_event_add(struct perf_event *event, int flags) event 709 drivers/gpu/drm/i915/i915_pmu.c i915_pmu_event_start(event, flags); event 714 drivers/gpu/drm/i915/i915_pmu.c static void i915_pmu_event_del(struct perf_event *event, int flags) event 716 drivers/gpu/drm/i915/i915_pmu.c i915_pmu_event_stop(event, PERF_EF_UPDATE); event 719 drivers/gpu/drm/i915/i915_pmu.c static int i915_pmu_event_event_idx(struct perf_event *event) event 41 drivers/gpu/drm/imx/ipuv3-crtc.c struct drm_pending_vblank_event *event; event 101 drivers/gpu/drm/imx/ipuv3-crtc.c if (crtc->state->event && !crtc->state->active) { event 102 drivers/gpu/drm/imx/ipuv3-crtc.c drm_crtc_send_vblank_event(crtc, crtc->state->event); event 103 drivers/gpu/drm/imx/ipuv3-crtc.c crtc->state->event = NULL; event 187 drivers/gpu/drm/imx/ipuv3-crtc.c if (ipu_crtc->event) { event 200 drivers/gpu/drm/imx/ipuv3-crtc.c drm_crtc_send_vblank_event(crtc, ipu_crtc->event); event 201 drivers/gpu/drm/imx/ipuv3-crtc.c ipu_crtc->event = NULL; event 253 drivers/gpu/drm/imx/ipuv3-crtc.c if (crtc->state->event) { event 257 drivers/gpu/drm/imx/ipuv3-crtc.c ipu_crtc->event = crtc->state->event; event 258 drivers/gpu/drm/imx/ipuv3-crtc.c crtc->state->event = NULL; event 341 drivers/gpu/drm/ingenic/ingenic-drm.c struct drm_pending_vblank_event *event = state->event; event 356 drivers/gpu/drm/ingenic/ingenic-drm.c if (event) { event 357 drivers/gpu/drm/ingenic/ingenic-drm.c state->event = NULL; event 361 drivers/gpu/drm/ingenic/ingenic-drm.c drm_crtc_arm_vblank_event(crtc, event); event 363 drivers/gpu/drm/ingenic/ingenic-drm.c drm_crtc_send_vblank_event(crtc, event); event 1012 drivers/gpu/drm/mcde/mcde_display.c struct drm_pending_vblank_event *event = crtc->state->event; event 1022 drivers/gpu/drm/mcde/mcde_display.c if (event) { event 1023 drivers/gpu/drm/mcde/mcde_display.c crtc->state->event = NULL; event 1035 drivers/gpu/drm/mcde/mcde_display.c drm_crtc_arm_vblank_event(crtc, event); event 1038 drivers/gpu/drm/mcde/mcde_display.c drm_crtc_send_vblank_event(crtc, event); event 40 drivers/gpu/drm/mediatek/mtk_drm_crtc.c struct drm_pending_vblank_event *event; event 77 drivers/gpu/drm/mediatek/mtk_drm_crtc.c drm_crtc_send_vblank_event(crtc, mtk_crtc->event); event 79 drivers/gpu/drm/mediatek/mtk_drm_crtc.c mtk_crtc->event = NULL; event 324 drivers/gpu/drm/mediatek/mtk_drm_crtc.c if (crtc->state->event && !crtc->state->active) { event 326 drivers/gpu/drm/mediatek/mtk_drm_crtc.c drm_crtc_send_vblank_event(crtc, crtc->state->event); event 327 drivers/gpu/drm/mediatek/mtk_drm_crtc.c crtc->state->event = NULL; event 431 drivers/gpu/drm/mediatek/mtk_drm_crtc.c if (mtk_crtc->event && state->base.event) event 434 drivers/gpu/drm/mediatek/mtk_drm_crtc.c if (state->base.event) { event 435 drivers/gpu/drm/mediatek/mtk_drm_crtc.c state->base.event->pipe = drm_crtc_index(crtc); event 437 drivers/gpu/drm/mediatek/mtk_drm_crtc.c mtk_crtc->event = state->base.event; event 438 drivers/gpu/drm/mediatek/mtk_drm_crtc.c state->base.event = NULL; event 450 drivers/gpu/drm/mediatek/mtk_drm_crtc.c if (mtk_crtc->event) event 34 drivers/gpu/drm/meson/meson_crtc.c struct drm_pending_vblank_event *event; event 154 drivers/gpu/drm/meson/meson_crtc.c if (crtc->state->event && !crtc->state->active) { event 156 drivers/gpu/drm/meson/meson_crtc.c drm_crtc_send_vblank_event(crtc, crtc->state->event); event 159 drivers/gpu/drm/meson/meson_crtc.c crtc->state->event = NULL; event 184 drivers/gpu/drm/meson/meson_crtc.c if (crtc->state->event && !crtc->state->active) { event 186 drivers/gpu/drm/meson/meson_crtc.c drm_crtc_send_vblank_event(crtc, crtc->state->event); event 189 drivers/gpu/drm/meson/meson_crtc.c crtc->state->event = NULL; event 199 drivers/gpu/drm/meson/meson_crtc.c if (crtc->state->event) { event 203 drivers/gpu/drm/meson/meson_crtc.c meson_crtc->event = crtc->state->event; event 205 drivers/gpu/drm/meson/meson_crtc.c crtc->state->event = NULL; event 549 drivers/gpu/drm/meson/meson_crtc.c if (meson_crtc->event) { event 550 drivers/gpu/drm/meson/meson_crtc.c drm_crtc_send_vblank_event(priv->crtc, meson_crtc->event); event 552 drivers/gpu/drm/meson/meson_crtc.c meson_crtc->event = NULL; event 255 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c if (dpu_crtc->event) { event 257 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c dpu_crtc->event); event 259 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c drm_crtc_send_vblank_event(crtc, dpu_crtc->event); event 260 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c dpu_crtc->event = NULL; event 308 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c DRM_DEBUG_KMS("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event, event 311 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE event 320 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c fevent->event); event 324 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c fevent->event); event 327 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c if (fevent->event & DPU_ENCODER_FRAME_EVENT_DONE) event 330 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE event 335 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD) event 357 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c static void dpu_crtc_frame_event_cb(void *data, u32 event) event 367 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c if (event & DPU_ENCODER_FRAME_EVENT_IDLE) event 374 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c trace_dpu_crtc_frame_event_cb(DRMID(crtc), event); event 384 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c DRM_ERROR("crtc%d event %d overflow\n", crtc->base.id, event); event 388 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c fevent->event = event; event 450 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c if (dpu_crtc->event) { event 451 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c WARN_ON(dpu_crtc->event); event 454 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c dpu_crtc->event = crtc->state->event; event 455 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c crtc->state->event = NULL; event 513 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c if (dpu_crtc->event) { event 517 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c dpu_crtc->event = crtc->state->event; event 518 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c crtc->state->event = NULL; event 751 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c if (crtc->state->event && !crtc->state->active) { event 753 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c drm_crtc_send_vblank_event(crtc, crtc->state->event); event 754 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c crtc->state->event = NULL; event 99 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h u32 event; event 145 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h struct drm_pending_vblank_event *event; event 268 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h struct drm_crtc *crtc_drm, u32 event, bool en); event 190 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c void (*crtc_frame_event_cb)(void *, u32 event); event 1332 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c void (*frame_event_cb)(void *, u32 event), event 1355 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c struct dpu_encoder_phys *ready_phys, u32 event) event 1360 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c if (event & (DPU_ENCODER_FRAME_EVENT_DONE event 1370 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c event, ready_phys->intf_idx); event 1393 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c event); event 1398 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c dpu_enc->crtc_frame_event_cb_data, event); event 2137 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c u32 event; event 2156 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c event = DPU_ENCODER_FRAME_EVENT_ERROR; event 2157 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event); event 2158 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c dpu_enc->crtc_frame_event_cb(dpu_enc->crtc_frame_event_cb_data, event); event 2253 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c enum msm_event_wait event) event 2271 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c switch (event) { event 2283 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c event); event 111 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h enum msm_event_wait event); event 76 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h struct dpu_encoder_phys *phys, u32 event); event 80 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c u32 event = DPU_ENCODER_FRAME_EVENT_DONE; event 89 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c phys_enc, event); event 97 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c new_cnt, event); event 402 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h TP_PROTO(uint32_t drm_id, u32 event, enum dpu_intf intf_idx), event 403 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h TP_ARGS(drm_id, event, intf_idx), event 406 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h __field( u32, event ) event 411 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h __entry->event = event; event 414 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h TP_printk("id=%u, event=%u, intf=%d", __entry->drm_id, __entry->event, event 489 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h TP_PROTO(uint32_t drm_id, u32 event), event 490 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h TP_ARGS(drm_id, event), event 493 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h __field( u32, event ) event 497 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h __entry->event = event; event 499 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h TP_printk("id=%u, event=%u", __entry->drm_id, __entry->event) event 502 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h TP_PROTO(uint32_t drm_id, u32 event), event 503 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h TP_ARGS(drm_id, event) event 506 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h TP_PROTO(uint32_t drm_id, u32 event), event 507 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h TP_ARGS(drm_id, event) event 510 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h TP_PROTO(uint32_t drm_id, u32 event), event 511 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h TP_ARGS(drm_id, event) event 514 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h TP_PROTO(uint32_t drm_id, u32 event), event 515 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h TP_ARGS(drm_id, event) event 566 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h u32 event), event 567 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h TP_ARGS(drm_id, pp, new_count, event), event 572 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h __field( u32, event ) event 578 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h __entry->event = event; event 581 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h __entry->pp, __entry->new_count, __entry->event) event 586 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h int kickoff_count, u32 event), event 587 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h TP_ARGS(drm_id, pp, timeout_count, kickoff_count, event), event 593 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h __field( u32, event ) event 600 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h __entry->event = event; event 604 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h __entry->kickoff_count, __entry->event) event 42 drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c struct drm_pending_vblank_event *event; event 101 drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c struct drm_pending_vblank_event *event; event 105 drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c event = mdp4_crtc->event; event 106 drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c if (event) { event 107 drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c mdp4_crtc->event = NULL; event 108 drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c DBG("%s: send event: %p", mdp4_crtc->name, event); event 109 drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c drm_crtc_send_vblank_event(crtc, event); event 332 drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c DBG("%s: event: %p", mdp4_crtc->name, crtc->state->event); event 334 drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c WARN_ON(mdp4_crtc->event); event 337 drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c mdp4_crtc->event = crtc->state->event; event 338 drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c crtc->state->event = NULL; event 30 drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c struct drm_pending_vblank_event *event; event 141 drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c struct drm_pending_vblank_event *event; event 145 drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c event = mdp5_crtc->event; event 146 drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c if (event) { event 147 drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c mdp5_crtc->event = NULL; event 148 drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c DBG("%s: send event: %p", crtc->name, event); event 149 drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c drm_crtc_send_vblank_event(crtc, event); event 434 drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c if (crtc->state->event && !crtc->state->active) { event 435 drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c WARN_ON(mdp5_crtc->event); event 437 drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c drm_crtc_send_vblank_event(crtc, crtc->state->event); event 438 drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c crtc->state->event = NULL; event 711 drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c DBG("%s: event: %p", crtc->name, crtc->state->event); event 713 drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c WARN_ON(mdp5_crtc->event); event 716 drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c mdp5_crtc->event = crtc->state->event; event 717 drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c crtc->state->event = NULL; event 25 drivers/gpu/drm/msm/msm_fence.c init_waitqueue_head(&fctx->event); event 60 drivers/gpu/drm/msm/msm_fence.c ret = wait_event_interruptible_timeout(fctx->event, event 64 drivers/gpu/drm/msm/msm_fence.c ret = wait_event_timeout(fctx->event, event 87 drivers/gpu/drm/msm/msm_fence.c wake_up_all(&fctx->event); event 19 drivers/gpu/drm/msm/msm_fence.h wait_queue_head_t event; event 96 drivers/gpu/drm/msm/msm_gem_shrinker.c msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr) event 307 drivers/gpu/drm/mxsfb/mxsfb_crtc.c struct drm_pending_vblank_event *event; event 311 drivers/gpu/drm/mxsfb/mxsfb_crtc.c event = crtc->state->event; event 312 drivers/gpu/drm/mxsfb/mxsfb_crtc.c if (event) { event 313 drivers/gpu/drm/mxsfb/mxsfb_crtc.c crtc->state->event = NULL; event 316 drivers/gpu/drm/mxsfb/mxsfb_crtc.c drm_crtc_arm_vblank_event(crtc, event); event 318 drivers/gpu/drm/mxsfb/mxsfb_crtc.c drm_crtc_send_vblank_event(crtc, event); event 118 drivers/gpu/drm/mxsfb/mxsfb_drv.c struct drm_pending_vblank_event *event; event 126 drivers/gpu/drm/mxsfb/mxsfb_drv.c event = crtc->state->event; event 127 drivers/gpu/drm/mxsfb/mxsfb_drv.c if (event) { event 128 drivers/gpu/drm/mxsfb/mxsfb_drv.c crtc->state->event = NULL; event 129 drivers/gpu/drm/mxsfb/mxsfb_drv.c drm_crtc_send_vblank_event(crtc, event); event 1035 drivers/gpu/drm/nouveau/dispnv04/crtc.c struct drm_pending_vblank_event *event; event 1060 drivers/gpu/drm/nouveau/dispnv04/crtc.c if (s->event) { event 1061 drivers/gpu/drm/nouveau/dispnv04/crtc.c drm_crtc_arm_vblank_event(s->crtc, s->event); event 1140 drivers/gpu/drm/nouveau/dispnv04/crtc.c struct drm_pending_vblank_event *event, u32 flags, event 1193 drivers/gpu/drm/nouveau/dispnv04/crtc.c { { }, event, crtc, fb->format->cpp[0] * 8, fb->pitches[0], event 1948 drivers/gpu/drm/nouveau/dispnv50/disp.c if (new_crtc_state->event) event 1993 drivers/gpu/drm/nouveau/dispnv50/disp.c if (new_crtc_state->event) { event 1999 drivers/gpu/drm/nouveau/dispnv50/disp.c drm_crtc_send_vblank_event(crtc, new_crtc_state->event); event 2002 drivers/gpu/drm/nouveau/dispnv50/disp.c new_crtc_state->event = NULL; event 102 drivers/gpu/drm/nouveau/include/nvif/ioctl.h __u8 event; event 103 drivers/gpu/drm/nouveau/include/nvkm/core/device.h struct nvkm_event event; event 8 drivers/gpu/drm/nouveau/include/nvkm/core/notify.h struct nvkm_event *event; event 13 drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h struct nvkm_event event; event 26 drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h struct nvkm_event event; event 82 drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h struct nvkm_event event; event 212 drivers/gpu/drm/nouveau/nouveau_usif.c ntfy->p->base.event = &ntfy->p->e.base; event 166 drivers/gpu/drm/nouveau/nvif/notify.c bool work, u8 event, void *data, u32 size, u32 reply, event 195 drivers/gpu/drm/nouveau/nvif/notify.c args->ntfy.event = event; event 129 drivers/gpu/drm/nouveau/nvkm/core/client.c struct nvkm_event *event, void *data, u32 size) event 165 drivers/gpu/drm/nouveau/nvkm/core/client.c ret = nvkm_notify_init(object, event, nvkm_client_notify, event 26 drivers/gpu/drm/nouveau/nvkm/core/event.c nvkm_event_put(struct nvkm_event *event, u32 types, int index) event 28 drivers/gpu/drm/nouveau/nvkm/core/event.c assert_spin_locked(&event->refs_lock); event 31 drivers/gpu/drm/nouveau/nvkm/core/event.c if (--event->refs[index * event->types_nr + type] == 0) { event 32 drivers/gpu/drm/nouveau/nvkm/core/event.c if (event->func->fini) event 33 drivers/gpu/drm/nouveau/nvkm/core/event.c event->func->fini(event, 1 << type, index); event 39 drivers/gpu/drm/nouveau/nvkm/core/event.c nvkm_event_get(struct nvkm_event *event, u32 types, int index) event 41 drivers/gpu/drm/nouveau/nvkm/core/event.c assert_spin_locked(&event->refs_lock); event 44 drivers/gpu/drm/nouveau/nvkm/core/event.c if (++event->refs[index * event->types_nr + type] == 1) { event 45 drivers/gpu/drm/nouveau/nvkm/core/event.c if (event->func->init) event 46 drivers/gpu/drm/nouveau/nvkm/core/event.c event->func->init(event, 1 << type, index); event 52 drivers/gpu/drm/nouveau/nvkm/core/event.c nvkm_event_send(struct nvkm_event *event, u32 types, int index, event 58 drivers/gpu/drm/nouveau/nvkm/core/event.c if (!event->refs || WARN_ON(index >= event->index_nr)) event 61 drivers/gpu/drm/nouveau/nvkm/core/event.c spin_lock_irqsave(&event->list_lock, flags); event 62 drivers/gpu/drm/nouveau/nvkm/core/event.c list_for_each_entry(notify, &event->list, head) { event 64 drivers/gpu/drm/nouveau/nvkm/core/event.c if (event->func->send) { event 65 drivers/gpu/drm/nouveau/nvkm/core/event.c event->func->send(data, size, notify); event 71 drivers/gpu/drm/nouveau/nvkm/core/event.c spin_unlock_irqrestore(&event->list_lock, flags); event 75 drivers/gpu/drm/nouveau/nvkm/core/event.c nvkm_event_fini(struct nvkm_event *event) event 77 drivers/gpu/drm/nouveau/nvkm/core/event.c if (event->refs) { event 78 drivers/gpu/drm/nouveau/nvkm/core/event.c kfree(event->refs); event 79 drivers/gpu/drm/nouveau/nvkm/core/event.c event->refs = NULL; event 85 drivers/gpu/drm/nouveau/nvkm/core/event.c struct nvkm_event *event) event 87 drivers/gpu/drm/nouveau/nvkm/core/event.c event->refs = kzalloc(array3_size(index_nr, types_nr, event 88 drivers/gpu/drm/nouveau/nvkm/core/event.c sizeof(*event->refs)), event 90 drivers/gpu/drm/nouveau/nvkm/core/event.c if (!event->refs) event 93 drivers/gpu/drm/nouveau/nvkm/core/event.c event->func = func; event 94 drivers/gpu/drm/nouveau/nvkm/core/event.c event->types_nr = types_nr; event 95 drivers/gpu/drm/nouveau/nvkm/core/event.c event->index_nr = index_nr; event 96 drivers/gpu/drm/nouveau/nvkm/core/event.c spin_lock_init(&event->refs_lock); event 97 drivers/gpu/drm/nouveau/nvkm/core/event.c spin_lock_init(&event->list_lock); event 98 drivers/gpu/drm/nouveau/nvkm/core/event.c INIT_LIST_HEAD(&event->list); event 303 drivers/gpu/drm/nouveau/nvkm/core/ioctl.c struct nvkm_event *event; event 309 drivers/gpu/drm/nouveau/nvkm/core/ioctl.c args->v0.version, args->v0.event); event 310 drivers/gpu/drm/nouveau/nvkm/core/ioctl.c ret = nvkm_object_ntfy(object, args->v0.event, &event); event 312 drivers/gpu/drm/nouveau/nvkm/core/ioctl.c ret = nvkm_client_notify_new(object, event, data, size); event 31 drivers/gpu/drm/nouveau/nvkm/core/notify.c nvkm_event_put(notify->event, notify->types, notify->index); event 37 drivers/gpu/drm/nouveau/nvkm/core/notify.c struct nvkm_event *event = notify->event; event 39 drivers/gpu/drm/nouveau/nvkm/core/notify.c if (likely(event) && event 41 drivers/gpu/drm/nouveau/nvkm/core/notify.c spin_lock_irqsave(&event->refs_lock, flags); event 43 drivers/gpu/drm/nouveau/nvkm/core/notify.c spin_unlock_irqrestore(&event->refs_lock, flags); event 53 drivers/gpu/drm/nouveau/nvkm/core/notify.c nvkm_event_get(notify->event, notify->types, notify->index); event 59 drivers/gpu/drm/nouveau/nvkm/core/notify.c struct nvkm_event *event = notify->event; event 61 drivers/gpu/drm/nouveau/nvkm/core/notify.c if (likely(event) && event 63 drivers/gpu/drm/nouveau/nvkm/core/notify.c spin_lock_irqsave(&event->refs_lock, flags); event 65 drivers/gpu/drm/nouveau/nvkm/core/notify.c spin_unlock_irqrestore(&event->refs_lock, flags); event 72 drivers/gpu/drm/nouveau/nvkm/core/notify.c struct nvkm_event *event = notify->event; event 77 drivers/gpu/drm/nouveau/nvkm/core/notify.c spin_lock_irqsave(&event->refs_lock, flags); event 79 drivers/gpu/drm/nouveau/nvkm/core/notify.c spin_unlock_irqrestore(&event->refs_lock, flags); event 93 drivers/gpu/drm/nouveau/nvkm/core/notify.c struct nvkm_event *event = notify->event; event 96 drivers/gpu/drm/nouveau/nvkm/core/notify.c assert_spin_locked(&event->list_lock); event 99 drivers/gpu/drm/nouveau/nvkm/core/notify.c spin_lock_irqsave(&event->refs_lock, flags); event 101 drivers/gpu/drm/nouveau/nvkm/core/notify.c spin_unlock_irqrestore(&event->refs_lock, flags); event 105 drivers/gpu/drm/nouveau/nvkm/core/notify.c spin_unlock_irqrestore(&event->refs_lock, flags); event 121 drivers/gpu/drm/nouveau/nvkm/core/notify.c if (notify->event) { event 123 drivers/gpu/drm/nouveau/nvkm/core/notify.c spin_lock_irqsave(¬ify->event->list_lock, flags); event 125 drivers/gpu/drm/nouveau/nvkm/core/notify.c spin_unlock_irqrestore(¬ify->event->list_lock, flags); event 127 drivers/gpu/drm/nouveau/nvkm/core/notify.c notify->event = NULL; event 132 drivers/gpu/drm/nouveau/nvkm/core/notify.c nvkm_notify_init(struct nvkm_object *object, struct nvkm_event *event, event 139 drivers/gpu/drm/nouveau/nvkm/core/notify.c if ((notify->event = event), event->refs) { event 140 drivers/gpu/drm/nouveau/nvkm/core/notify.c ret = event->func->ctor(object, data, size, notify); event 155 drivers/gpu/drm/nouveau/nvkm/core/notify.c spin_lock_irqsave(&event->list_lock, flags); event 156 drivers/gpu/drm/nouveau/nvkm/core/notify.c list_add_tail(¬ify->head, &event->list); event 157 drivers/gpu/drm/nouveau/nvkm/core/notify.c spin_unlock_irqrestore(&event->list_lock, flags); event 161 drivers/gpu/drm/nouveau/nvkm/core/notify.c notify->event = NULL; event 37 drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.c nvkm_event_send(&device->event, 1, 0, NULL, 0); event 2870 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c nvkm_event_fini(&device->event); event 2915 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c ret = nvkm_event_init(&nvkm_device_event_func, 1, 1, &device->event); event 43 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c nvkm_disp_vblank_fini(struct nvkm_event *event, int type, int id) event 45 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank); event 52 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c nvkm_disp_vblank_init(struct nvkm_event *event, int type, int id) event 54 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank); event 65 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c container_of(notify->event, typeof(*disp), vblank); event 102 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c container_of(notify->event, typeof(*disp), hpd); event 113 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c if (ret = -ENODEV, outp->conn->hpd.event) { event 132 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c nvkm_disp_ntfy(struct nvkm_object *object, u32 type, struct nvkm_event **event) event 137 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c *event = &disp->vblank; event 140 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c *event = &disp->hpd; event 27 drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c gf119_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index) event 29 drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c struct nv50_disp *disp = container_of(event, typeof(*disp), uevent); event 36 drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c gf119_disp_chan_uevent_init(struct nvkm_event *event, int types, int index) event 38 drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c struct nv50_disp *disp = container_of(event, typeof(*disp), uevent); event 107 drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c nv50_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index) event 109 drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c struct nv50_disp *disp = container_of(event, typeof(*disp), uevent); event 116 drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c nv50_disp_chan_uevent_init(struct nvkm_event *event, int types, int index) event 118 drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c struct nv50_disp *disp = container_of(event, typeof(*disp), uevent); event 108 drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c ret = nvkm_notify_init(NULL, &gpio->event, nvkm_conn_hpd, event 660 drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c ret = nvkm_notify_init(NULL, &i2c->event, nvkm_dp_hpd, true, event 172 drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c nvkm_fifo_uevent_fini(struct nvkm_event *event, int type, int index) event 174 drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent); event 179 drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c nvkm_fifo_uevent_init(struct nvkm_event *event, int type, int index) event 181 drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent); event 39 drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c nvkm_event_send(&chan->event, 1, 0, NULL, 0); event 82 drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c nvkm_event_fini(&chan->event); event 110 drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c return nvkm_event_init(&nvkm_sw_chan_event, 1, 1, &chan->event); event 17 drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h struct nvkm_event event; event 45 drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.c *pevent = &nvsw->chan->event; event 693 drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c ret = nvkm_notify_init(NULL, &device->event, nvkm_clk_pwrsrc, true, event 28 drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c nvkm_fault_ntfy_fini(struct nvkm_event *event, int type, int index) event 30 drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c struct nvkm_fault *fault = container_of(event, typeof(*fault), event); event 35 drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c nvkm_fault_ntfy_init(struct nvkm_event *event, int type, int index) event 37 drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c struct nvkm_fault *fault = container_of(event, typeof(*fault), event); event 134 drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c &fault->event); event 150 drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c nvkm_event_fini(&fault->event); event 62 drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c nvkm_event_send(&fault->event, 1, 0, NULL, 0); event 166 drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c nvkm_event_send(&fault->event, 1, 0, NULL, 0); event 173 drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c nvkm_event_send(&fault->event, 1, 1, NULL, 0); event 203 drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c return nvkm_notify_init(&fault->buffer[0]->object, &fault->event, event 113 drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c nvkm_event_send(&fault->event, 1, 0, NULL, 0); event 121 drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c nvkm_event_send(&fault->event, 1, 1, NULL, 0); event 48 drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c *pevent = &buffer->fault->event; event 113 drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c nvkm_gpio_intr_fini(struct nvkm_event *event, int type, int index) event 115 drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c struct nvkm_gpio *gpio = container_of(event, typeof(*gpio), event); event 120 drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c nvkm_gpio_intr_init(struct nvkm_event *event, int type, int index) event 122 drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c struct nvkm_gpio *gpio = container_of(event, typeof(*gpio), event); event 160 drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c nvkm_event_send(&gpio->event, rep.mask, i, &rep, sizeof(rep)); event 230 drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c nvkm_event_fini(&gpio->event); event 255 drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c &gpio->event); event 90 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c nvkm_i2c_intr_fini(struct nvkm_event *event, int type, int id) event 92 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c struct nvkm_i2c *i2c = container_of(event, typeof(*i2c), event); event 99 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c nvkm_i2c_intr_init(struct nvkm_event *event, int type, int id) event 101 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c struct nvkm_i2c *i2c = container_of(event, typeof(*i2c), event); event 152 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c nvkm_event_send(&i2c->event, rep.mask, aux->id, event 234 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c nvkm_event_fini(&i2c->event); event 430 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c return nvkm_event_init(&nvkm_i2c_intr_func, 4, i, &i2c->event); event 45 drivers/gpu/drm/omapdrm/omap_crtc.c struct drm_pending_vblank_event *event; event 309 drivers/gpu/drm/omapdrm/omap_crtc.c if (omap_crtc->event) { event 310 drivers/gpu/drm/omapdrm/omap_crtc.c drm_crtc_send_vblank_event(crtc, omap_crtc->event); event 311 drivers/gpu/drm/omapdrm/omap_crtc.c omap_crtc->event = NULL; event 338 drivers/gpu/drm/omapdrm/omap_crtc.c if (omap_crtc->event) { event 339 drivers/gpu/drm/omapdrm/omap_crtc.c drm_crtc_send_vblank_event(crtc, omap_crtc->event); event 340 drivers/gpu/drm/omapdrm/omap_crtc.c omap_crtc->event = NULL; event 432 drivers/gpu/drm/omapdrm/omap_crtc.c if (crtc->state->event) { event 433 drivers/gpu/drm/omapdrm/omap_crtc.c omap_crtc->event = crtc->state->event; event 434 drivers/gpu/drm/omapdrm/omap_crtc.c crtc->state->event = NULL; event 473 drivers/gpu/drm/omapdrm/omap_crtc.c if (crtc->state->event) { event 474 drivers/gpu/drm/omapdrm/omap_crtc.c drm_crtc_send_vblank_event(crtc, crtc->state->event); event 475 drivers/gpu/drm/omapdrm/omap_crtc.c crtc->state->event = NULL; event 395 drivers/gpu/drm/pl111/pl111_display.c struct drm_pending_vblank_event *event = crtc->state->event; event 406 drivers/gpu/drm/pl111/pl111_display.c if (event) { event 407 drivers/gpu/drm/pl111/pl111_display.c crtc->state->event = NULL; event 411 drivers/gpu/drm/pl111/pl111_display.c drm_crtc_arm_vblank_event(crtc, event); event 413 drivers/gpu/drm/pl111/pl111_display.c drm_crtc_send_vblank_event(crtc, event); event 376 drivers/gpu/drm/qxl/qxl_display.c struct drm_pending_vblank_event *event; event 379 drivers/gpu/drm/qxl/qxl_display.c if (crtc->state && crtc->state->event) { event 380 drivers/gpu/drm/qxl/qxl_display.c event = crtc->state->event; event 381 drivers/gpu/drm/qxl/qxl_display.c crtc->state->event = NULL; event 384 drivers/gpu/drm/qxl/qxl_display.c drm_crtc_send_vblank_event(crtc, event); event 742 drivers/gpu/drm/radeon/radeon.h struct drm_pending_vblank_event *event; event 359 drivers/gpu/drm/radeon/radeon_acpi.c struct acpi_bus_event *event) event 367 drivers/gpu/drm/radeon/radeon_acpi.c event->device_class, event->type); event 369 drivers/gpu/drm/radeon/radeon_acpi.c if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0) event 373 drivers/gpu/drm/radeon/radeon_acpi.c event->type != atif->notification_cfg.command_code) event 391 drivers/gpu/drm/radeon/radeon_display.c if (work->event) event 392 drivers/gpu/drm/radeon/radeon_display.c drm_crtc_send_vblank_event(&radeon_crtc->base, work->event); event 482 drivers/gpu/drm/radeon/radeon_display.c struct drm_pending_vblank_event *event, event 507 drivers/gpu/drm/radeon/radeon_display.c work->event = event; event 135 drivers/gpu/drm/radeon/radeon_mn.c .event = MMU_NOTIFY_UNMAP, event 430 drivers/gpu/drm/rcar-du/rcar_du_crtc.c struct drm_pending_vblank_event *event; event 435 drivers/gpu/drm/rcar-du/rcar_du_crtc.c event = rcrtc->event; event 436 drivers/gpu/drm/rcar-du/rcar_du_crtc.c rcrtc->event = NULL; event 439 drivers/gpu/drm/rcar-du/rcar_du_crtc.c if (event == NULL) event 443 drivers/gpu/drm/rcar-du/rcar_du_crtc.c drm_crtc_send_vblank_event(&rcrtc->crtc, event); event 457 drivers/gpu/drm/rcar-du/rcar_du_crtc.c pending = rcrtc->event != NULL; event 714 drivers/gpu/drm/rcar-du/rcar_du_crtc.c if (crtc->state->event) { event 715 drivers/gpu/drm/rcar-du/rcar_du_crtc.c drm_crtc_send_vblank_event(crtc, crtc->state->event); event 716 drivers/gpu/drm/rcar-du/rcar_du_crtc.c crtc->state->event = NULL; event 755 drivers/gpu/drm/rcar-du/rcar_du_crtc.c if (crtc->state->event) { event 759 drivers/gpu/drm/rcar-du/rcar_du_crtc.c rcrtc->event = crtc->state->event; event 760 drivers/gpu/drm/rcar-du/rcar_du_crtc.c crtc->state->event = NULL; event 59 drivers/gpu/drm/rcar-du/rcar_du_crtc.h struct drm_pending_vblank_event *event; event 990 drivers/gpu/drm/rockchip/cdn-dp-core.c unsigned long event, void *priv) event 478 drivers/gpu/drm/rockchip/cdn-dp-reg.c u8 msg, event[2]; event 499 drivers/gpu/drm/rockchip/cdn-dp-reg.c sizeof(event)); event 503 drivers/gpu/drm/rockchip/cdn-dp-reg.c ret = cdn_dp_mailbox_read_receive(dp, event, sizeof(event)); event 507 drivers/gpu/drm/rockchip/cdn-dp-reg.c if (event[1] & EQ_PHASE_FINISHED) event 131 drivers/gpu/drm/rockchip/rockchip_drm_vop.c struct drm_pending_vblank_event *event; event 652 drivers/gpu/drm/rockchip/rockchip_drm_vop.c WARN_ON(vop->event); event 698 drivers/gpu/drm/rockchip/rockchip_drm_vop.c if (crtc->state->event && !crtc->state->active) { event 700 drivers/gpu/drm/rockchip/rockchip_drm_vop.c drm_crtc_send_vblank_event(crtc, crtc->state->event); event 703 drivers/gpu/drm/rockchip/rockchip_drm_vop.c crtc->state->event = NULL; event 1111 drivers/gpu/drm/rockchip/rockchip_drm_vop.c WARN_ON(vop->event); event 1251 drivers/gpu/drm/rockchip/rockchip_drm_vop.c if (crtc->state->event) { event 1253 drivers/gpu/drm/rockchip/rockchip_drm_vop.c WARN_ON(vop->event); event 1255 drivers/gpu/drm/rockchip/rockchip_drm_vop.c vop->event = crtc->state->event; event 1256 drivers/gpu/drm/rockchip/rockchip_drm_vop.c crtc->state->event = NULL; event 1412 drivers/gpu/drm/rockchip/rockchip_drm_vop.c if (vop->event) { event 1413 drivers/gpu/drm/rockchip/rockchip_drm_vop.c drm_crtc_send_vblank_event(crtc, vop->event); event 1415 drivers/gpu/drm/rockchip/rockchip_drm_vop.c vop->event = NULL; event 252 drivers/gpu/drm/savage/savage_bci.c uint16_t event; event 258 drivers/gpu/drm/savage/savage_bci.c event = dev_priv->status_ptr[1] & 0xffff; event 260 drivers/gpu/drm/savage/savage_bci.c event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff; event 262 drivers/gpu/drm/savage/savage_bci.c if (event > dev_priv->event_counter) event 265 drivers/gpu/drm/savage/savage_bci.c DRM_DEBUG(" tail=0x%04x %d\n", tail->age.event, tail->age.wrap); event 266 drivers/gpu/drm/savage/savage_bci.c DRM_DEBUG(" head=0x%04x %d\n", event, wrap); event 268 drivers/gpu/drm/savage/savage_bci.c if (tail->buf && (TEST_AGE(&tail->age, event, wrap) || event == 0)) { event 286 drivers/gpu/drm/savage/savage_bci.c DRM_DEBUG("age=0x%04x wrap=%d\n", entry->age.event, entry->age.wrap); event 331 drivers/gpu/drm/savage/savage_bci.c uint16_t event; event 333 drivers/gpu/drm/savage/savage_bci.c event = savage_bci_emit_event(dev_priv, 0); event 336 drivers/gpu/drm/savage/savage_bci.c SET_AGE(&dev_priv->dma_pages[i].age, event, wrap); event 340 drivers/gpu/drm/savage/savage_bci.c SET_AGE(&dev_priv->last_dma_age, event, wrap); event 346 drivers/gpu/drm/savage/savage_bci.c uint16_t event; event 355 drivers/gpu/drm/savage/savage_bci.c event = dev_priv->status_ptr[1] & 0xffff; event 357 drivers/gpu/drm/savage/savage_bci.c event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff; event 359 drivers/gpu/drm/savage/savage_bci.c if (event > dev_priv->event_counter) event 364 drivers/gpu/drm/savage/savage_bci.c dev_priv->dma_pages[page].age.event > event)) { event 366 drivers/gpu/drm/savage/savage_bci.c dev_priv->dma_pages[page].age.event) event 432 drivers/gpu/drm/savage/savage_bci.c uint16_t event; event 483 drivers/gpu/drm/savage/savage_bci.c event = savage_bci_emit_event(dev_priv, 0); event 486 drivers/gpu/drm/savage/savage_bci.c SET_AGE(&dev_priv->dma_pages[i].age, event, wrap); event 492 drivers/gpu/drm/savage/savage_bci.c SET_AGE(&dev_priv->dma_pages[cur].age, event, wrap); event 504 drivers/gpu/drm/savage/savage_bci.c SET_AGE(&dev_priv->last_dma_age, event, wrap); event 941 drivers/gpu/drm/savage/savage_bci.c drm_savage_event_emit_t *event = data; event 947 drivers/gpu/drm/savage/savage_bci.c event->count = savage_bci_emit_event(dev_priv, event->flags); event 948 drivers/gpu/drm/savage/savage_bci.c event->count |= dev_priv->event_wrap << 16; event 956 drivers/gpu/drm/savage/savage_bci.c drm_savage_event_wait_t *event = data; event 971 drivers/gpu/drm/savage/savage_bci.c event_e = event->count & 0xffff; event 972 drivers/gpu/drm/savage/savage_bci.c event_w = event->count >> 16; event 1072 drivers/gpu/drm/savage/savage_bci.c uint16_t event; event 1074 drivers/gpu/drm/savage/savage_bci.c event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D); event 1075 drivers/gpu/drm/savage/savage_bci.c SET_AGE(&buf_priv->age, event, dev_priv->event_wrap); event 59 drivers/gpu/drm/savage/savage_drv.h uint16_t event; event 573 drivers/gpu/drm/savage/savage_drv.h (age)->event = e; \ event 578 drivers/gpu/drm/savage/savage_drv.h ( (age)->wrap < (w) || ( (age)->wrap == (w) && (age)->event <= (e) ) ) event 1156 drivers/gpu/drm/savage/savage_state.c uint16_t event; event 1157 drivers/gpu/drm/savage/savage_state.c event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D); event 1158 drivers/gpu/drm/savage/savage_state.c SET_AGE(&buf_priv->age, event, dev_priv->event_wrap); event 391 drivers/gpu/drm/shmobile/shmob_drm_crtc.c struct drm_pending_vblank_event *event; event 396 drivers/gpu/drm/shmobile/shmob_drm_crtc.c event = scrtc->event; event 397 drivers/gpu/drm/shmobile/shmob_drm_crtc.c scrtc->event = NULL; event 398 drivers/gpu/drm/shmobile/shmob_drm_crtc.c if (event) { event 399 drivers/gpu/drm/shmobile/shmob_drm_crtc.c drm_crtc_send_vblank_event(&scrtc->crtc, event); event 407 drivers/gpu/drm/shmobile/shmob_drm_crtc.c struct drm_pending_vblank_event *event, event 416 drivers/gpu/drm/shmobile/shmob_drm_crtc.c if (scrtc->event != NULL) { event 425 drivers/gpu/drm/shmobile/shmob_drm_crtc.c if (event) { event 426 drivers/gpu/drm/shmobile/shmob_drm_crtc.c event->pipe = 0; event 429 drivers/gpu/drm/shmobile/shmob_drm_crtc.c scrtc->event = event; event 25 drivers/gpu/drm/shmobile/shmob_drm_crtc.h struct drm_pending_vblank_event *event; event 142 drivers/gpu/drm/sti/sti_crtc.c struct drm_pending_vblank_event *event; event 209 drivers/gpu/drm/sti/sti_crtc.c event = crtc->state->event; event 210 drivers/gpu/drm/sti/sti_crtc.c if (event) { event 211 drivers/gpu/drm/sti/sti_crtc.c crtc->state->event = NULL; event 215 drivers/gpu/drm/sti/sti_crtc.c drm_crtc_arm_vblank_event(crtc, event); event 217 drivers/gpu/drm/sti/sti_crtc.c drm_crtc_send_vblank_event(crtc, event); event 244 drivers/gpu/drm/sti/sti_crtc.c unsigned long event, void *data) event 255 drivers/gpu/drm/sti/sti_crtc.c if ((event != VTG_TOP_FIELD_EVENT) && event 256 drivers/gpu/drm/sti/sti_crtc.c (event != VTG_BOTTOM_FIELD_EVENT)) { event 257 drivers/gpu/drm/sti/sti_crtc.c DRM_ERROR("unknown event: %lu\n", event); event 21 drivers/gpu/drm/sti/sti_crtc.h unsigned long event, void *data); event 487 drivers/gpu/drm/sti/sti_gdp.c unsigned long event, void *data) event 499 drivers/gpu/drm/sti/sti_gdp.c switch (event) { event 507 drivers/gpu/drm/sti/sti_gdp.c DRM_ERROR("unsupported event: %lu\n", event); event 355 drivers/gpu/drm/sti/sti_vtg.c u32 event; event 357 drivers/gpu/drm/sti/sti_vtg.c event = (vtg->irq_status & VTG_IRQ_TOP) ? event 360 drivers/gpu/drm/sti/sti_vtg.c raw_notifier_call_chain(&vtg->notifier_list, event, vtg->crtc); event 623 drivers/gpu/drm/stm/ltdc.c struct drm_pending_vblank_event *event = crtc->state->event; event 632 drivers/gpu/drm/stm/ltdc.c if (event) { event 633 drivers/gpu/drm/stm/ltdc.c crtc->state->event = NULL; event 637 drivers/gpu/drm/stm/ltdc.c drm_crtc_arm_vblank_event(crtc, event); event 639 drivers/gpu/drm/stm/ltdc.c drm_crtc_send_vblank_event(crtc, event); event 68 drivers/gpu/drm/sun4i/sun4i_crtc.c if (crtc->state->event) { event 72 drivers/gpu/drm/sun4i/sun4i_crtc.c scrtc->event = crtc->state->event; event 74 drivers/gpu/drm/sun4i/sun4i_crtc.c crtc->state->event = NULL; event 85 drivers/gpu/drm/sun4i/sun4i_crtc.c struct drm_pending_vblank_event *event = crtc->state->event; event 91 drivers/gpu/drm/sun4i/sun4i_crtc.c if (event) { event 92 drivers/gpu/drm/sun4i/sun4i_crtc.c crtc->state->event = NULL; event 96 drivers/gpu/drm/sun4i/sun4i_crtc.c drm_crtc_arm_vblank_event(crtc, event); event 98 drivers/gpu/drm/sun4i/sun4i_crtc.c drm_crtc_send_vblank_event(crtc, event); event 115 drivers/gpu/drm/sun4i/sun4i_crtc.c if (crtc->state->event && !crtc->state->active) { event 117 drivers/gpu/drm/sun4i/sun4i_crtc.c drm_crtc_send_vblank_event(crtc, crtc->state->event); event 120 drivers/gpu/drm/sun4i/sun4i_crtc.c crtc->state->event = NULL; event 14 drivers/gpu/drm/sun4i/sun4i_crtc.h struct drm_pending_vblank_event *event; event 707 drivers/gpu/drm/sun4i/sun4i_tcon.c if (scrtc->event) { event 708 drivers/gpu/drm/sun4i/sun4i_tcon.c drm_crtc_send_vblank_event(&scrtc->crtc, scrtc->event); event 710 drivers/gpu/drm/sun4i/sun4i_tcon.c scrtc->event = NULL; event 1767 drivers/gpu/drm/tegra/dc.c if (crtc->state->event) { event 1768 drivers/gpu/drm/tegra/dc.c drm_crtc_send_vblank_event(crtc, crtc->state->event); event 1769 drivers/gpu/drm/tegra/dc.c crtc->state->event = NULL; event 1896 drivers/gpu/drm/tegra/dc.c if (crtc->state->event) { event 1900 drivers/gpu/drm/tegra/dc.c drm_crtc_send_vblank_event(crtc, crtc->state->event); event 1902 drivers/gpu/drm/tegra/dc.c drm_crtc_arm_vblank_event(crtc, crtc->state->event); event 1906 drivers/gpu/drm/tegra/dc.c crtc->state->event = NULL; event 34 drivers/gpu/drm/tilcdc/tilcdc_crtc.c struct drm_pending_vblank_event *event; event 583 drivers/gpu/drm/tilcdc/tilcdc_crtc.c struct drm_pending_vblank_event *event) event 588 drivers/gpu/drm/tilcdc/tilcdc_crtc.c if (tilcdc_crtc->event) { event 593 drivers/gpu/drm/tilcdc/tilcdc_crtc.c tilcdc_crtc->event = event; event 897 drivers/gpu/drm/tilcdc/tilcdc_crtc.c struct drm_pending_vblank_event *event; event 901 drivers/gpu/drm/tilcdc/tilcdc_crtc.c event = tilcdc_crtc->event; event 902 drivers/gpu/drm/tilcdc/tilcdc_crtc.c tilcdc_crtc->event = NULL; event 903 drivers/gpu/drm/tilcdc/tilcdc_crtc.c if (event) event 904 drivers/gpu/drm/tilcdc/tilcdc_crtc.c drm_crtc_send_vblank_event(crtc, event); event 165 drivers/gpu/drm/tilcdc/tilcdc_drv.h struct drm_pending_vblank_event *event); event 89 drivers/gpu/drm/tilcdc/tilcdc_plane.c state->crtc->state->event); event 619 drivers/gpu/drm/tiny/gm12u320.c if (crtc->state->event) { event 621 drivers/gpu/drm/tiny/gm12u320.c drm_crtc_send_vblank_event(crtc, crtc->state->event); event 622 drivers/gpu/drm/tiny/gm12u320.c crtc->state->event = NULL; event 174 drivers/gpu/drm/tiny/ili9225.c if (crtc->state->event) { event 176 drivers/gpu/drm/tiny/ili9225.c drm_crtc_send_vblank_event(crtc, crtc->state->event); event 178 drivers/gpu/drm/tiny/ili9225.c crtc->state->event = NULL; event 865 drivers/gpu/drm/tiny/repaper.c if (crtc->state->event) { event 867 drivers/gpu/drm/tiny/repaper.c drm_crtc_send_vblank_event(crtc, crtc->state->event); event 869 drivers/gpu/drm/tiny/repaper.c crtc->state->event = NULL; event 168 drivers/gpu/drm/tiny/st7586.c if (crtc->state->event) { event 170 drivers/gpu/drm/tiny/st7586.c drm_crtc_send_vblank_event(crtc, crtc->state->event); event 172 drivers/gpu/drm/tiny/st7586.c crtc->state->event = NULL; event 245 drivers/gpu/drm/tve200/tve200_display.c struct drm_pending_vblank_event *event = crtc->state->event; event 264 drivers/gpu/drm/tve200/tve200_display.c if (event) { event 265 drivers/gpu/drm/tve200/tve200_display.c crtc->state->event = NULL; event 269 drivers/gpu/drm/tve200/tve200_display.c drm_crtc_arm_vblank_event(crtc, event); event 271 drivers/gpu/drm/tve200/tve200_display.c drm_crtc_send_vblank_event(crtc, event); event 361 drivers/gpu/drm/udl/udl_modeset.c struct drm_pending_vblank_event *event, event 378 drivers/gpu/drm/udl/udl_modeset.c if (event) event 379 drivers/gpu/drm/udl/udl_modeset.c drm_crtc_send_vblank_event(crtc, event); event 228 drivers/gpu/drm/vboxvideo/vbox_mode.c struct drm_pending_vblank_event *event; event 231 drivers/gpu/drm/vboxvideo/vbox_mode.c if (crtc->state && crtc->state->event) { event 232 drivers/gpu/drm/vboxvideo/vbox_mode.c event = crtc->state->event; event 233 drivers/gpu/drm/vboxvideo/vbox_mode.c crtc->state->event = NULL; event 236 drivers/gpu/drm/vboxvideo/vbox_mode.c drm_crtc_send_vblank_event(crtc, event); event 497 drivers/gpu/drm/vc4/vc4_crtc.c if (crtc->state->event) { event 501 drivers/gpu/drm/vc4/vc4_crtc.c drm_crtc_send_vblank_event(crtc, crtc->state->event); event 502 drivers/gpu/drm/vc4/vc4_crtc.c crtc->state->event = NULL; event 521 drivers/gpu/drm/vc4/vc4_crtc.c if (crtc->state->event) { event 524 drivers/gpu/drm/vc4/vc4_crtc.c crtc->state->event->pipe = drm_crtc_index(crtc); event 531 drivers/gpu/drm/vc4/vc4_crtc.c vc4_crtc->event = crtc->state->event; event 532 drivers/gpu/drm/vc4/vc4_crtc.c crtc->state->event = NULL; event 791 drivers/gpu/drm/vc4/vc4_crtc.c if (vc4_crtc->event && event 794 drivers/gpu/drm/vc4/vc4_crtc.c drm_crtc_send_vblank_event(crtc, vc4_crtc->event); event 795 drivers/gpu/drm/vc4/vc4_crtc.c vc4_crtc->event = NULL; event 835 drivers/gpu/drm/vc4/vc4_crtc.c struct drm_pending_vblank_event *event; event 854 drivers/gpu/drm/vc4/vc4_crtc.c if (flip_state->event) { event 858 drivers/gpu/drm/vc4/vc4_crtc.c drm_crtc_send_vblank_event(crtc, flip_state->event); event 894 drivers/gpu/drm/vc4/vc4_crtc.c struct drm_pending_vblank_event *event, event 925 drivers/gpu/drm/vc4/vc4_crtc.c flip_state->event = event; event 965 drivers/gpu/drm/vc4/vc4_crtc.c struct drm_pending_vblank_event *event, event 970 drivers/gpu/drm/vc4/vc4_crtc.c return vc4_async_page_flip(crtc, fb, event, flags); event 972 drivers/gpu/drm/vc4/vc4_crtc.c return drm_atomic_helper_page_flip(crtc, fb, event, flags, ctx); event 467 drivers/gpu/drm/vc4/vc4_drv.h struct drm_pending_vblank_event *event; event 124 drivers/gpu/drm/virtio/virtgpu_display.c if (crtc->state->event) event 125 drivers/gpu/drm/virtio/virtgpu_display.c drm_crtc_send_vblank_event(crtc, crtc->state->event); event 126 drivers/gpu/drm/virtio/virtgpu_display.c crtc->state->event = NULL; event 237 drivers/gpu/drm/vkms/vkms_crtc.c if (crtc->state->event) { event 241 drivers/gpu/drm/vkms/vkms_crtc.c drm_crtc_send_vblank_event(crtc, crtc->state->event); event 243 drivers/gpu/drm/vkms/vkms_crtc.c drm_crtc_arm_vblank_event(crtc, crtc->state->event); event 247 drivers/gpu/drm/vkms/vkms_crtc.c crtc->state->event = NULL; event 68 drivers/gpu/drm/vkms/vkms_drv.h struct drm_pending_vblank_event *event; event 1563 drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h SVGADXEventQueryResult event; event 1272 drivers/gpu/drm/vmwgfx/vmwgfx_drv.c dummy.event = 0; event 76 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c struct drm_pending_event *event; event 912 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c struct drm_pending_event *event = eaction->event; event 914 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c if (unlikely(event == NULL)) event 928 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c drm_send_event_locked(dev, eaction->event); event 929 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c eaction->event = NULL; event 1017 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c struct drm_pending_event *event, event 1029 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c eaction->event = event; event 1047 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c struct drm_vmw_event_fence event; event 1056 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c struct vmw_event_fence_pending *event; event 1061 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c event = kzalloc(sizeof(*event), GFP_KERNEL); event 1062 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c if (unlikely(!event)) { event 1068 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED; event 1069 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c event->event.base.length = sizeof(*event); event 1070 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c event->event.user_data = user_data; event 1072 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base); event 1076 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c kfree(event); event 1082 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c &event->base, event 1083 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c &event->event.tv_sec, event 1084 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c &event->event.tv_usec, event 1088 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c &event->base, event 1098 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c drm_event_cancel_free(dev, &event->base); event 128 drivers/gpu/drm/vmwgfx/vmwgfx_fence.h struct drm_pending_event *event, event 563 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c struct drm_pending_vblank_event *event = crtc->state->event; event 565 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c if (event) { event 566 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c crtc->state->event = NULL; event 569 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c drm_crtc_send_vblank_event(crtc, event); event 731 drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c struct drm_pending_vblank_event *event = NULL; event 756 drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c event = crtc->state->event; event 757 drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c if (event && fence) { event 758 drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c struct drm_file *file_priv = event->base.file_priv; event 762 drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c &event->base, event 763 drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c &event->event.vbl.tv_sec, event 764 drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c &event->event.vbl.tv_usec, event 770 drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c crtc->state->event = NULL; event 1595 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c struct drm_pending_vblank_event *event; event 1645 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c event = crtc->state->event; event 1646 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c if (event && fence) { event 1647 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c struct drm_file *file_priv = event->base.file_priv; event 1651 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c &event->base, event 1652 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c &event->event.vbl.tv_sec, event 1653 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c &event->event.vbl.tv_usec, event 1658 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c crtc->state->event = NULL; event 101 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c struct xendispl_evt *event; event 103 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c event = &XENDISPL_IN_RING_REF(page, cons); event 104 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c if (unlikely(event->id != evtchnl->evt_id++)) event 107 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c switch (event->type) { event 110 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c event->op.pg_flip.fb_cookie); event 236 drivers/gpu/drm/xen/xen_drm_front_kms.c struct drm_pending_vblank_event *event; event 239 drivers/gpu/drm/xen/xen_drm_front_kms.c event = crtc->state->event; event 240 drivers/gpu/drm/xen/xen_drm_front_kms.c if (event) { event 247 drivers/gpu/drm/xen/xen_drm_front_kms.c crtc->state->event = NULL; event 249 drivers/gpu/drm/xen/xen_drm_front_kms.c pipeline->pending_event = event; event 478 drivers/gpu/drm/zte/zx_vou.c struct drm_pending_vblank_event *event = crtc->state->event; event 480 drivers/gpu/drm/zte/zx_vou.c if (!event) event 483 drivers/gpu/drm/zte/zx_vou.c crtc->state->event = NULL; event 487 drivers/gpu/drm/zte/zx_vou.c drm_crtc_arm_vblank_event(crtc, event); event 489 drivers/gpu/drm/zte/zx_vou.c drm_crtc_send_vblank_event(crtc, event); event 186 drivers/gpu/host1x/cdma.c enum cdma_event event) event 192 drivers/gpu/host1x/cdma.c switch (event) { event 210 drivers/gpu/host1x/cdma.c event); event 213 drivers/gpu/host1x/cdma.c if (cdma->event != CDMA_EVENT_NONE) { event 220 drivers/gpu/host1x/cdma.c cdma->event = event; event 253 drivers/gpu/host1x/cdma.c if (cdma->event != CDMA_EVENT_NONE) { event 260 drivers/gpu/host1x/cdma.c cdma->event = CDMA_EVENT_PUSH_BUFFER_SPACE; event 352 drivers/gpu/host1x/cdma.c if (cdma->event == CDMA_EVENT_PUSH_BUFFER_SPACE) event 360 drivers/gpu/host1x/cdma.c if (cdma->event == CDMA_EVENT_SYNC_QUEUE_EMPTY && event 365 drivers/gpu/host1x/cdma.c cdma->event = CDMA_EVENT_NONE; event 458 drivers/gpu/host1x/cdma.c cdma->event = CDMA_EVENT_NONE; event 62 drivers/gpu/host1x/cdma.h enum cdma_event event; /* event that complete is waiting for */ event 89 drivers/gpu/host1x/cdma.h enum cdma_event event); event 109 drivers/gpu/ipu-v3/ipu-dc.c static void dc_link_event(struct ipu_dc *dc, int event, int addr, int priority) event 113 drivers/gpu/ipu-v3/ipu-dc.c reg = readl(dc->base + DC_RL_CH(event)); event 114 drivers/gpu/ipu-v3/ipu-dc.c reg &= ~(0xffff << (16 * (event & 0x1))); event 115 drivers/gpu/ipu-v3/ipu-dc.c reg |= ((addr << 8) | priority) << (16 * (event & 0x1)); event 116 drivers/gpu/ipu-v3/ipu-dc.c writel(reg, dc->base + DC_RL_CH(event)); event 159 drivers/hid/hid-a4tech.c .event = a4_event, event 585 drivers/hid/hid-apple.c .event = apple_event, event 1075 drivers/hid/hid-asus.c .event = asus_event, event 1501 drivers/hid/hid-core.c if (hdrv && hdrv->event && hid_match_usage(hid, usage)) { event 1502 drivers/hid/hid-core.c ret = hdrv->event(hid, field, usage, value); event 738 drivers/hid/hid-corsair.c .event = corsair_event, event 140 drivers/hid/hid-cypress.c .event = cp_event, event 74 drivers/hid/hid-ezkey.c .event = ez_event, event 493 drivers/hid/hid-google-hammer.c .event = hammer_event, event 86 drivers/hid/hid-gyration.c .event = gyration_event, event 136 drivers/hid/hid-holtek-kbd.c return boot_hid_input->input->event(boot_hid_input->input, type, code, event 152 drivers/hid/hid-holtek-kbd.c hidinput->input->event = holtek_kbd_input_event; event 231 drivers/hid/hid-icade.c .event = icade_event, event 1720 drivers/hid/hid-input.c input_dev->event = hidinput_input_event; event 54 drivers/hid/hid-ite.c .event = ite_event, event 936 drivers/hid/hid-lenovo.c .event = lenovo_event, event 950 drivers/hid/hid-lg.c .event = lg_event, event 3829 drivers/hid/hid-logitech-hidpp.c .event = hidpp_event, event 464 drivers/hid/hid-microsoft.c .event = ms_event, event 2178 drivers/hid/hid-multitouch.c .event = mt_event, event 1028 drivers/hid/hid-ntrig.c .event = ntrig_event, event 774 drivers/hid/hid-rmi.c .event = rmi_event, event 388 drivers/hid/hid-roccat-arvo.c roccat_report.button = special_report->event & event 390 drivers/hid/hid-roccat-arvo.c if ((special_report->event & ARVO_SPECIAL_REPORT_EVENT_MASK_ACTION) == event 48 drivers/hid/hid-roccat-arvo.h uint8_t event; event 99 drivers/hid/hid-roccat-isku.c roccat_report.event = ISKU_REPORT_BUTTON_EVENT_PROFILE; event 367 drivers/hid/hid-roccat-isku.c switch (button_report->event) { event 387 drivers/hid/hid-roccat-isku.c roccat_report.event = button_report->event; event 68 drivers/hid/hid-roccat-isku.h uint8_t event; event 82 drivers/hid/hid-roccat-isku.h uint8_t event; event 47 drivers/hid/hid-roccat-kone.c roccat_report.event = kone_mouse_event_switch_profile; event 777 drivers/hid/hid-roccat-kone.c struct kone_mouse_event const *event) event 779 drivers/hid/hid-roccat-kone.c switch (event->event) { event 781 drivers/hid/hid-roccat-kone.c kone->actual_dpi = kone->profiles[event->value - 1]. event 785 drivers/hid/hid-roccat-kone.c kone->actual_profile = event->value; event 789 drivers/hid/hid-roccat-kone.c kone->actual_dpi = event->value; event 795 drivers/hid/hid-roccat-kone.c struct kone_mouse_event const *event) event 799 drivers/hid/hid-roccat-kone.c switch (event->event) { event 804 drivers/hid/hid-roccat-kone.c roccat_report.event = event->event; event 805 drivers/hid/hid-roccat-kone.c roccat_report.value = event->value; event 812 drivers/hid/hid-roccat-kone.c if (event->value == kone_keystroke_action_press) { event 813 drivers/hid/hid-roccat-kone.c roccat_report.event = event->event; event 815 drivers/hid/hid-roccat-kone.c roccat_report.key = event->macro_key; event 833 drivers/hid/hid-roccat-kone.c struct kone_mouse_event *event = (struct kone_mouse_event *)data; event 847 drivers/hid/hid-roccat-kone.c if (memcmp(&kone->last_mouse_event.tilt, &event->tilt, 5)) event 848 drivers/hid/hid-roccat-kone.c memcpy(&kone->last_mouse_event, event, event 851 drivers/hid/hid-roccat-kone.c memset(&event->tilt, 0, 5); event 853 drivers/hid/hid-roccat-kone.c kone_keep_values_up_to_date(kone, event); event 856 drivers/hid/hid-roccat-kone.c kone_report_to_chrdev(kone, event); event 157 drivers/hid/hid-roccat-kone.h uint8_t event; event 186 drivers/hid/hid-roccat-kone.h uint8_t event; event 201 drivers/hid/hid-saitek.c .event = saitek_event, event 74 drivers/hid/hid-speedlink.c .event = speedlink_event, event 55 drivers/hid/hid-xinmo.c .event = xinmo_event event 88 drivers/hid/uhid.c static int uhid_queue_event(struct uhid_device *uhid, __u32 event) event 97 drivers/hid/uhid.c ev->type = event; event 400 drivers/hid/uhid.c struct uhid_event *event) event 429 drivers/hid/uhid.c event->type = type; event 431 drivers/hid/uhid.c memcpy(event->u.create.name, compat->name, event 433 drivers/hid/uhid.c memcpy(event->u.create.phys, compat->phys, event 435 drivers/hid/uhid.c memcpy(event->u.create.uniq, compat->uniq, event 438 drivers/hid/uhid.c event->u.create.rd_data = compat_ptr(compat->rd_data); event 439 drivers/hid/uhid.c event->u.create.rd_size = compat->rd_size; event 441 drivers/hid/uhid.c event->u.create.bus = compat->bus; event 442 drivers/hid/uhid.c event->u.create.vendor = compat->vendor; event 443 drivers/hid/uhid.c event->u.create.product = compat->product; event 444 drivers/hid/uhid.c event->u.create.version = compat->version; event 445 drivers/hid/uhid.c event->u.create.country = compat->country; event 453 drivers/hid/uhid.c if (copy_from_user(event, buffer, min(len, sizeof(*event)))) event 460 drivers/hid/uhid.c struct uhid_event *event) event 462 drivers/hid/uhid.c if (copy_from_user(event, buffer, min(len, sizeof(*event)))) event 378 drivers/hid/usbhid/hiddev.c struct hiddev_event event; event 380 drivers/hid/usbhid/hiddev.c event.hid = list->buffer[list->tail].usage_code; event 381 drivers/hid/usbhid/hiddev.c event.value = list->buffer[list->tail].value; event 382 drivers/hid/usbhid/hiddev.c if (copy_to_user(buffer + retval, &event, sizeof(struct hiddev_event))) { event 331 drivers/hid/usbhid/usbkbd.c input_dev->event = usb_kbd_event; event 905 drivers/hsi/clients/ssi_protocol.c static void ssip_port_event(struct hsi_client *cl, unsigned long event) event 907 drivers/hsi/clients/ssi_protocol.c switch (event) { event 268 drivers/hsi/controllers/omap_ssi_core.c static int ssi_clk_event(struct notifier_block *nb, unsigned long event, event 278 drivers/hsi/controllers/omap_ssi_core.c switch (event) { event 645 drivers/hsi/hsi_core.c unsigned long event, void *data __maybe_unused) event 649 drivers/hsi/hsi_core.c (*cl->ehandler)(cl, event); event 721 drivers/hsi/hsi_core.c int hsi_event(struct hsi_port *port, unsigned long event) event 723 drivers/hsi/hsi_core.c return blocking_notifier_call_chain(&port->n_head, event, NULL); event 1185 drivers/hv/vmbus_drv.c union hv_synic_event_flags *event event 1190 drivers/hv/vmbus_drv.c recv_int_page = event->flags; event 1243 drivers/hv/vmbus_drv.c union hv_synic_event_flags *event; event 1249 drivers/hv/vmbus_drv.c event = (union hv_synic_event_flags *)page_addr + event 1261 drivers/hv/vmbus_drv.c if (sync_test_and_clear_bit(0, event->flags)) event 810 drivers/hwmon/acpi_power_meter.c static void acpi_power_meter_notify(struct acpi_device *device, u32 event) event 821 drivers/hwmon/acpi_power_meter.c switch (event) { event 848 drivers/hwmon/acpi_power_meter.c WARN(1, "Unexpected event %d\n", event); event 854 drivers/hwmon/acpi_power_meter.c dev_name(&device->dev), event, 0); event 896 drivers/hwmon/sht15.c unsigned long event, event 901 drivers/hwmon/sht15.c if (event == REGULATOR_EVENT_VOLTAGE_CHANGE) event 189 drivers/hwtracing/coresight/coresight-etb10.c pid = task_pid_nr(handle->event->owner); event 374 drivers/hwtracing/coresight/coresight-etb10.c struct perf_event *event, void **pages, event 380 drivers/hwtracing/coresight/coresight-etb10.c node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu); event 79 drivers/hwtracing/coresight/coresight-etm-perf.c static void etm_event_read(struct perf_event *event) {} event 81 drivers/hwtracing/coresight/coresight-etm-perf.c static int etm_addr_filters_alloc(struct perf_event *event) event 84 drivers/hwtracing/coresight/coresight-etm-perf.c int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu); event 90 drivers/hwtracing/coresight/coresight-etm-perf.c if (event->parent) event 91 drivers/hwtracing/coresight/coresight-etm-perf.c memcpy(filters, event->parent->hw.addr_filters, event 94 drivers/hwtracing/coresight/coresight-etm-perf.c event->hw.addr_filters = filters; event 99 drivers/hwtracing/coresight/coresight-etm-perf.c static void etm_event_destroy(struct perf_event *event) event 101 drivers/hwtracing/coresight/coresight-etm-perf.c kfree(event->hw.addr_filters); event 102 drivers/hwtracing/coresight/coresight-etm-perf.c event->hw.addr_filters = NULL; event 105 drivers/hwtracing/coresight/coresight-etm-perf.c static int etm_event_init(struct perf_event *event) event 109 drivers/hwtracing/coresight/coresight-etm-perf.c if (event->attr.type != etm_pmu.type) { event 114 drivers/hwtracing/coresight/coresight-etm-perf.c ret = etm_addr_filters_alloc(event); event 118 drivers/hwtracing/coresight/coresight-etm-perf.c event->destroy = etm_event_destroy; event 207 drivers/hwtracing/coresight/coresight-etm-perf.c static void *etm_setup_aux(struct perf_event *event, void **pages, event 211 drivers/hwtracing/coresight/coresight-etm-perf.c int cpu = event->cpu; event 222 drivers/hwtracing/coresight/coresight-etm-perf.c if (event->attr.config2) { event 223 drivers/hwtracing/coresight/coresight-etm-perf.c id = (u32)event->attr.config2; event 280 drivers/hwtracing/coresight/coresight-etm-perf.c sink_ops(sink)->alloc_buffer(sink, event, pages, event 294 drivers/hwtracing/coresight/coresight-etm-perf.c static void etm_event_start(struct perf_event *event, int flags) event 309 drivers/hwtracing/coresight/coresight-etm-perf.c event_data = perf_aux_output_begin(handle, event); event 324 drivers/hwtracing/coresight/coresight-etm-perf.c event->hw.state = 0; event 327 drivers/hwtracing/coresight/coresight-etm-perf.c if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF)) event 339 drivers/hwtracing/coresight/coresight-etm-perf.c event->hw.state = PERF_HES_STOPPED; event 343 drivers/hwtracing/coresight/coresight-etm-perf.c static void etm_event_stop(struct perf_event *event, int mode) event 352 drivers/hwtracing/coresight/coresight-etm-perf.c if (event->hw.state == PERF_HES_STOPPED) event 367 drivers/hwtracing/coresight/coresight-etm-perf.c source_ops(csdev)->disable(csdev, event); event 370 drivers/hwtracing/coresight/coresight-etm-perf.c event->hw.state = PERF_HES_STOPPED; event 373 drivers/hwtracing/coresight/coresight-etm-perf.c if (WARN_ON_ONCE(handle->event != event)) event 389 drivers/hwtracing/coresight/coresight-etm-perf.c static int etm_event_add(struct perf_event *event, int mode) event 392 drivers/hwtracing/coresight/coresight-etm-perf.c struct hw_perf_event *hwc = &event->hw; event 395 drivers/hwtracing/coresight/coresight-etm-perf.c etm_event_start(event, 0); event 405 drivers/hwtracing/coresight/coresight-etm-perf.c static void etm_event_del(struct perf_event *event, int mode) event 407 drivers/hwtracing/coresight/coresight-etm-perf.c etm_event_stop(event, PERF_EF_UPDATE); event 449 drivers/hwtracing/coresight/coresight-etm-perf.c static void etm_addr_filters_sync(struct perf_event *event) event 451 drivers/hwtracing/coresight/coresight-etm-perf.c struct perf_addr_filters_head *head = perf_event_addr_filters(event); event 453 drivers/hwtracing/coresight/coresight-etm-perf.c struct perf_addr_filter_range *fr = event->addr_filter_ranges; event 454 drivers/hwtracing/coresight/coresight-etm-perf.c struct etm_filters *filters = event->hw.addr_filters; event 309 drivers/hwtracing/coresight/coresight-etm3x.c struct perf_event *event) event 312 drivers/hwtracing/coresight/coresight-etm3x.c struct perf_event_attr *attr = &event->attr; event 491 drivers/hwtracing/coresight/coresight-etm3x.c struct perf_event *event) event 499 drivers/hwtracing/coresight/coresight-etm3x.c etm_parse_event_config(drvdata, event); event 536 drivers/hwtracing/coresight/coresight-etm3x.c struct perf_event *event, u32 mode) event 553 drivers/hwtracing/coresight/coresight-etm3x.c ret = etm_enable_perf(csdev, event); event 638 drivers/hwtracing/coresight/coresight-etm3x.c struct perf_event *event) event 520 drivers/hwtracing/coresight/coresight-etm4x-sysfs.c static DEVICE_ATTR_RW(event); event 45 drivers/hwtracing/coresight/coresight-etm4x.c struct perf_event *event); event 302 drivers/hwtracing/coresight/coresight-etm4x.c struct perf_event *event) event 306 drivers/hwtracing/coresight/coresight-etm4x.c struct perf_event_attr *attr = &event->attr; event 326 drivers/hwtracing/coresight/coresight-etm4x.c ret = etm4_set_event_filters(drvdata, event); event 369 drivers/hwtracing/coresight/coresight-etm4x.c struct perf_event *event) event 380 drivers/hwtracing/coresight/coresight-etm4x.c ret = etm4_parse_event_config(drvdata, event); event 417 drivers/hwtracing/coresight/coresight-etm4x.c struct perf_event *event, u32 mode) event 434 drivers/hwtracing/coresight/coresight-etm4x.c ret = etm4_enable_perf(csdev, event); event 482 drivers/hwtracing/coresight/coresight-etm4x.c struct perf_event *event) event 485 drivers/hwtracing/coresight/coresight-etm4x.c struct etm_filters *filters = event->hw.addr_filters; event 532 drivers/hwtracing/coresight/coresight-etm4x.c struct perf_event *event) event 551 drivers/hwtracing/coresight/coresight-etm4x.c etm4_disable_perf(csdev, event); event 931 drivers/hwtracing/coresight/coresight-etm4x.c struct perf_event *event) event 936 drivers/hwtracing/coresight/coresight-etm4x.c struct etm_filters *filters = event->hw.addr_filters; event 942 drivers/hwtracing/coresight/coresight-etm4x.c perf_event_addr_filters_sync(event); event 195 drivers/hwtracing/coresight/coresight-stm.c struct perf_event *event, u32 mode) event 258 drivers/hwtracing/coresight/coresight-stm.c struct perf_event *event) event 246 drivers/hwtracing/coresight/coresight-tmc-etf.c pid = task_pid_nr(handle->event->owner); event 389 drivers/hwtracing/coresight/coresight-tmc-etf.c struct perf_event *event, void **pages, event 395 drivers/hwtracing/coresight/coresight-tmc-etf.c node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu); event 1203 drivers/hwtracing/coresight/coresight-tmc-etr.c alloc_etr_buf(struct tmc_drvdata *drvdata, struct perf_event *event, event 1210 drivers/hwtracing/coresight/coresight-tmc-etr.c node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu); event 1242 drivers/hwtracing/coresight/coresight-tmc-etr.c struct perf_event *event, int nr_pages, event 1246 drivers/hwtracing/coresight/coresight-tmc-etr.c pid_t pid = task_pid_nr(event->owner); event 1279 drivers/hwtracing/coresight/coresight-tmc-etr.c etr_buf = alloc_etr_buf(drvdata, event, nr_pages, pages, snapshot); event 1306 drivers/hwtracing/coresight/coresight-tmc-etr.c struct perf_event *event, int nr_pages, event 1313 drivers/hwtracing/coresight/coresight-tmc-etr.c return alloc_etr_buf(drvdata, event, nr_pages, pages, snapshot); event 1317 drivers/hwtracing/coresight/coresight-tmc-etr.c get_perf_etr_buf(struct tmc_drvdata *drvdata, struct perf_event *event, event 1320 drivers/hwtracing/coresight/coresight-tmc-etr.c if (event->cpu == -1) event 1321 drivers/hwtracing/coresight/coresight-tmc-etr.c return get_perf_etr_buf_per_thread(drvdata, event, nr_pages, event 1324 drivers/hwtracing/coresight/coresight-tmc-etr.c return get_perf_etr_buf_cpu_wide(drvdata, event, nr_pages, event 1329 drivers/hwtracing/coresight/coresight-tmc-etr.c tmc_etr_setup_perf_buf(struct tmc_drvdata *drvdata, struct perf_event *event, event 1336 drivers/hwtracing/coresight/coresight-tmc-etr.c node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu); event 1342 drivers/hwtracing/coresight/coresight-tmc-etr.c etr_buf = get_perf_etr_buf(drvdata, event, nr_pages, pages, snapshot); event 1362 drivers/hwtracing/coresight/coresight-tmc-etr.c struct perf_event *event, void **pages, event 1368 drivers/hwtracing/coresight/coresight-tmc-etr.c etr_perf = tmc_etr_setup_perf_buf(drvdata, event, event 1375 drivers/hwtracing/coresight/coresight-tmc-etr.c etr_perf->pid = task_pid_nr(event->owner); event 88 drivers/i2c/busses/i2c-amd-mp2-plat.c union i2c_event *event = &i2c_common->eventval; event 90 drivers/i2c/busses/i2c-amd-mp2-plat.c if (event->r.status == i2c_readcomplete_event) event 92 drivers/i2c/busses/i2c-amd-mp2-plat.c __func__, event->r.length, event 774 drivers/i2c/busses/i2c-cadence.c event, void *data) event 782 drivers/i2c/busses/i2c-cadence.c switch (event) { event 231 drivers/i2c/busses/i2c-emev2.c enum i2c_slave_event event; event 267 drivers/i2c/busses/i2c-emev2.c event = status & I2C_BIT_STD0 ? event 270 drivers/i2c/busses/i2c-emev2.c i2c_slave_event(priv->slave, event, &value); event 576 drivers/i2c/busses/i2c-pxa.c i2c->slave->event(i2c->slave->data, event 610 drivers/i2c/busses/i2c-pxa.c i2c->slave->event(i2c->slave->data, I2C_SLAVE_EVENT_STOP); event 924 drivers/i2c/busses/i2c-rk3x.c event, void *data) event 930 drivers/i2c/busses/i2c-rk3x.c switch (event) { event 561 drivers/i2c/busses/i2c-stm32f4.c u32 status, ien, event, cr2; event 571 drivers/i2c/busses/i2c-stm32f4.c event = status & possible_status; event 572 drivers/i2c/busses/i2c-stm32f4.c if (!event) { event 580 drivers/i2c/busses/i2c-stm32f4.c if (event & STM32F4_I2C_SR1_SB) event 584 drivers/i2c/busses/i2c-stm32f4.c if (event & STM32F4_I2C_SR1_ADDR) { event 599 drivers/i2c/busses/i2c-stm32f4.c if ((event & STM32F4_I2C_SR1_TXE) && !(msg->addr & I2C_M_RD)) event 603 drivers/i2c/busses/i2c-stm32f4.c if ((event & STM32F4_I2C_SR1_RXNE) && (msg->addr & I2C_M_RD)) event 613 drivers/i2c/busses/i2c-stm32f4.c if (event & STM32F4_I2C_SR1_BTF) { event 46 drivers/i2c/i2c-slave-eeprom.c enum i2c_slave_event event, u8 *val) event 50 drivers/i2c/i2c-slave-eeprom.c switch (event) { event 26 drivers/ide/ide-pm.c if (mesg.event == PM_EVENT_PRETHAW) event 27 drivers/ide/ide-pm.c mesg.event = PM_EVENT_FREEZE; event 28 drivers/ide/ide-pm.c rqpm.pm_state = mesg.event; event 1223 drivers/ide/pmac.c if (mesg.event != mdev->ofdev.dev.power.power_state.event event 1224 drivers/ide/pmac.c && (mesg.event & PM_EVENT_SLEEP)) { event 1239 drivers/ide/pmac.c if (mdev->ofdev.dev.power.power_state.event != PM_EVENT_ON) { event 1326 drivers/ide/pmac.c if (mesg.event != pdev->dev.power.power_state.event event 1327 drivers/ide/pmac.c && (mesg.event & PM_EVENT_SLEEP)) { event 1342 drivers/ide/pmac.c if (pdev->dev.power.power_state.event != PM_EVENT_ON) { event 232 drivers/ide/sc1200.c printk("SC1200: suspend(%u)\n", state.event); event 237 drivers/ide/sc1200.c if (state.event == PM_EVENT_ON) { event 716 drivers/iio/accel/mma9553.c struct mma9553_event *event; event 718 drivers/iio/accel/mma9553.c event = mma9553_get_event(data, chan->type, chan->channel2, dir); event 719 drivers/iio/accel/mma9553.c if (!event) event 722 drivers/iio/accel/mma9553.c return event->enabled; event 731 drivers/iio/accel/mma9553.c struct mma9553_event *event; event 734 drivers/iio/accel/mma9553.c event = mma9553_get_event(data, chan->type, chan->channel2, dir); event 735 drivers/iio/accel/mma9553.c if (!event) event 738 drivers/iio/accel/mma9553.c if (event->enabled == state) event 746 drivers/iio/accel/mma9553.c event->enabled = state; event 758 drivers/iio/accel/mma9553.c event->enabled = false; event 16 drivers/iio/adc/xilinx-xadc-events.c struct iio_dev *indio_dev, unsigned int event) event 18 drivers/iio/adc/xilinx-xadc-events.c switch (event) { event 24 drivers/iio/adc/xilinx-xadc-events.c return &indio_dev->channels[event]; event 26 drivers/iio/adc/xilinx-xadc-events.c return &indio_dev->channels[event-1]; event 30 drivers/iio/adc/xilinx-xadc-events.c static void xadc_handle_event(struct iio_dev *indio_dev, unsigned int event) event 35 drivers/iio/adc/xilinx-xadc-events.c if (event == 0) event 38 drivers/iio/adc/xilinx-xadc-events.c chan = xadc_event_to_channel(indio_dev, event); event 133 drivers/iio/dummy/iio_dummy_evgen.c unsigned long event; event 136 drivers/iio/dummy/iio_dummy_evgen.c ret = kstrtoul(buf, 10, &event); event 141 drivers/iio/dummy/iio_dummy_evgen.c iio_evgen->regs[this_attr->address].reg_data = event; event 103 drivers/iio/light/acpi-als.c static void acpi_als_notify(struct acpi_device *device, u32 event) event 116 drivers/iio/light/acpi-als.c switch (event) { event 126 drivers/iio/light/acpi-als.c event); event 538 drivers/iio/light/stk3310.c u64 event; event 551 drivers/iio/light/stk3310.c event = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 1, event 555 drivers/iio/light/stk3310.c iio_push_event(indio_dev, event, data->timestamp); event 847 drivers/infiniband/core/addr.c static int netevent_callback(struct notifier_block *self, unsigned long event, event 852 drivers/infiniband/core/addr.c if (event == NETEVENT_NEIGH_UPDATE) { event 54 drivers/infiniband/core/cache.c struct ib_event event; event 126 drivers/infiniband/core/cache.c struct ib_event event; event 128 drivers/infiniband/core/cache.c event.device = ib_dev; event 129 drivers/infiniband/core/cache.c event.element.port_num = port; event 130 drivers/infiniband/core/cache.c event.event = IB_EVENT_GID_CHANGE; event 132 drivers/infiniband/core/cache.c ib_dispatch_event_clients(&event); event 1472 drivers/infiniband/core/cache.c ret = ib_cache_update(work->event.device, work->event.element.port_num, event 1479 drivers/infiniband/core/cache.c if (!ret && work->event.event != IB_EVENT_GID_CHANGE) event 1480 drivers/infiniband/core/cache.c ib_dispatch_event_clients(&work->event); event 1490 drivers/infiniband/core/cache.c ib_dispatch_event_clients(&work->event); event 1494 drivers/infiniband/core/cache.c static bool is_cache_update_event(const struct ib_event *event) event 1496 drivers/infiniband/core/cache.c return (event->event == IB_EVENT_PORT_ERR || event 1497 drivers/infiniband/core/cache.c event->event == IB_EVENT_PORT_ACTIVE || event 1498 drivers/infiniband/core/cache.c event->event == IB_EVENT_LID_CHANGE || event 1499 drivers/infiniband/core/cache.c event->event == IB_EVENT_PKEY_CHANGE || event 1500 drivers/infiniband/core/cache.c event->event == IB_EVENT_CLIENT_REREGISTER || event 1501 drivers/infiniband/core/cache.c event->event == IB_EVENT_GID_CHANGE); event 1512 drivers/infiniband/core/cache.c void ib_dispatch_event(const struct ib_event *event) event 1520 drivers/infiniband/core/cache.c if (is_cache_update_event(event)) event 1525 drivers/infiniband/core/cache.c work->event = *event; event 1526 drivers/infiniband/core/cache.c if (event->event == IB_EVENT_PKEY_CHANGE || event 1527 drivers/infiniband/core/cache.c event->event == IB_EVENT_GID_CHANGE) event 957 drivers/infiniband/core/cm.c timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT; event 3750 drivers/infiniband/core/cm.c cm_event.event = IB_CM_REQ_ERROR; event 3755 drivers/infiniband/core/cm.c cm_event.event = IB_CM_REP_ERROR; event 3759 drivers/infiniband/core/cm.c cm_event.event = IB_CM_DREQ_ERROR; event 3763 drivers/infiniband/core/cm.c cm_event.event = IB_CM_SIDR_REQ_ERROR; event 3827 drivers/infiniband/core/cm.c switch (work->cm_event.event) { event 3868 drivers/infiniband/core/cm.c pr_debug("cm_event.event: 0x%x\n", work->cm_event.event); event 3926 drivers/infiniband/core/cm.c work->cm_event.event = IB_CM_USER_ESTABLISHED; event 3971 drivers/infiniband/core/cm.c int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event) event 3975 drivers/infiniband/core/cm.c switch (event) { event 3995 drivers/infiniband/core/cm.c enum ib_cm_event_type event; event 4006 drivers/infiniband/core/cm.c event = IB_CM_REQ_RECEIVED; event 4009 drivers/infiniband/core/cm.c event = IB_CM_MRA_RECEIVED; event 4012 drivers/infiniband/core/cm.c event = IB_CM_REJ_RECEIVED; event 4015 drivers/infiniband/core/cm.c event = IB_CM_REP_RECEIVED; event 4018 drivers/infiniband/core/cm.c event = IB_CM_RTU_RECEIVED; event 4021 drivers/infiniband/core/cm.c event = IB_CM_DREQ_RECEIVED; event 4024 drivers/infiniband/core/cm.c event = IB_CM_DREP_RECEIVED; event 4027 drivers/infiniband/core/cm.c event = IB_CM_SIDR_REQ_RECEIVED; event 4030 drivers/infiniband/core/cm.c event = IB_CM_SIDR_REP_RECEIVED; event 4034 drivers/infiniband/core/cm.c event = IB_CM_LAP_RECEIVED; event 4037 drivers/infiniband/core/cm.c event = IB_CM_APR_RECEIVED; event 4055 drivers/infiniband/core/cm.c work->cm_event.event = event; event 97 drivers/infiniband/core/cma.c const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event) event 99 drivers/infiniband/core/cma.c size_t index = event; event 378 drivers/infiniband/core/cma.c struct rdma_cm_event event; event 384 drivers/infiniband/core/cma.c struct rdma_cm_event event; event 1357 drivers/infiniband/core/cma.c if (ib_event->event == IB_CM_REQ_RECEIVED) event 1360 drivers/infiniband/core/cma.c else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) event 1376 drivers/infiniband/core/cma.c switch (ib_event->event) { event 1491 drivers/infiniband/core/cma.c if (ib_event->event == IB_CM_REQ_RECEIVED) event 1493 drivers/infiniband/core/cma.c else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) event 1906 drivers/infiniband/core/cma.c static void cma_set_rep_event_data(struct rdma_cm_event *event, event 1910 drivers/infiniband/core/cma.c event->param.conn.private_data = private_data; event 1911 drivers/infiniband/core/cma.c event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE; event 1912 drivers/infiniband/core/cma.c event->param.conn.responder_resources = rep_data->responder_resources; event 1913 drivers/infiniband/core/cma.c event->param.conn.initiator_depth = rep_data->initiator_depth; event 1914 drivers/infiniband/core/cma.c event->param.conn.flow_control = rep_data->flow_control; event 1915 drivers/infiniband/core/cma.c event->param.conn.rnr_retry_count = rep_data->rnr_retry_count; event 1916 drivers/infiniband/core/cma.c event->param.conn.srq = rep_data->srq; event 1917 drivers/infiniband/core/cma.c event->param.conn.qp_num = rep_data->remote_qpn; event 1924 drivers/infiniband/core/cma.c struct rdma_cm_event event = {}; event 1928 drivers/infiniband/core/cma.c if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && event 1930 drivers/infiniband/core/cma.c (ib_event->event == IB_CM_TIMEWAIT_EXIT && event 1934 drivers/infiniband/core/cma.c switch (ib_event->event) { event 1937 drivers/infiniband/core/cma.c event.event = RDMA_CM_EVENT_UNREACHABLE; event 1938 drivers/infiniband/core/cma.c event.status = -ETIMEDOUT; event 1945 drivers/infiniband/core/cma.c event.status = cma_rep_recv(id_priv); event 1946 drivers/infiniband/core/cma.c event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR : event 1949 drivers/infiniband/core/cma.c event.event = RDMA_CM_EVENT_CONNECT_RESPONSE; event 1951 drivers/infiniband/core/cma.c cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd, event 1956 drivers/infiniband/core/cma.c event.event = RDMA_CM_EVENT_ESTABLISHED; event 1959 drivers/infiniband/core/cma.c event.status = -ETIMEDOUT; /* fall through */ event 1965 drivers/infiniband/core/cma.c event.event = RDMA_CM_EVENT_DISCONNECTED; event 1968 drivers/infiniband/core/cma.c event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT; event 1977 drivers/infiniband/core/cma.c event.status = ib_event->param.rej_rcvd.reason; event 1978 drivers/infiniband/core/cma.c event.event = RDMA_CM_EVENT_REJECTED; event 1979 drivers/infiniband/core/cma.c event.param.conn.private_data = ib_event->private_data; event 1980 drivers/infiniband/core/cma.c event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; event 1984 drivers/infiniband/core/cma.c ib_event->event); event 1988 drivers/infiniband/core/cma.c ret = id_priv->id.event_handler(&id_priv->id, &event); event 2110 drivers/infiniband/core/cma.c static void cma_set_req_event_data(struct rdma_cm_event *event, event 2114 drivers/infiniband/core/cma.c event->param.conn.private_data = private_data + offset; event 2115 drivers/infiniband/core/cma.c event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset; event 2116 drivers/infiniband/core/cma.c event->param.conn.responder_resources = req_data->responder_resources; event 2117 drivers/infiniband/core/cma.c event->param.conn.initiator_depth = req_data->initiator_depth; event 2118 drivers/infiniband/core/cma.c event->param.conn.flow_control = req_data->flow_control; event 2119 drivers/infiniband/core/cma.c event->param.conn.retry_count = req_data->retry_count; event 2120 drivers/infiniband/core/cma.c event->param.conn.rnr_retry_count = req_data->rnr_retry_count; event 2121 drivers/infiniband/core/cma.c event->param.conn.srq = req_data->srq; event 2122 drivers/infiniband/core/cma.c event->param.conn.qp_num = req_data->remote_qpn; event 2128 drivers/infiniband/core/cma.c return (((ib_event->event == IB_CM_REQ_RECEIVED) && event 2130 drivers/infiniband/core/cma.c ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) && event 2139 drivers/infiniband/core/cma.c struct rdma_cm_event event = {}; event 2161 drivers/infiniband/core/cma.c event.event = RDMA_CM_EVENT_CONNECT_REQUEST; event 2162 drivers/infiniband/core/cma.c if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) { event 2164 drivers/infiniband/core/cma.c event.param.ud.private_data = ib_event->private_data + offset; event 2165 drivers/infiniband/core/cma.c event.param.ud.private_data_len = event 2169 drivers/infiniband/core/cma.c cma_set_req_event_data(&event, &ib_event->param.req_rcvd, event 2191 drivers/infiniband/core/cma.c ret = conn_id->id.event_handler(&conn_id->id, &event); event 2268 drivers/infiniband/core/cma.c struct rdma_cm_event event = {}; event 2277 drivers/infiniband/core/cma.c switch (iw_event->event) { event 2279 drivers/infiniband/core/cma.c event.event = RDMA_CM_EVENT_DISCONNECTED; event 2288 drivers/infiniband/core/cma.c event.event = RDMA_CM_EVENT_ESTABLISHED; event 2289 drivers/infiniband/core/cma.c event.param.conn.initiator_depth = iw_event->ird; event 2290 drivers/infiniband/core/cma.c event.param.conn.responder_resources = iw_event->ord; event 2294 drivers/infiniband/core/cma.c event.event = RDMA_CM_EVENT_REJECTED; event 2297 drivers/infiniband/core/cma.c event.event = RDMA_CM_EVENT_UNREACHABLE; event 2300 drivers/infiniband/core/cma.c event.event = RDMA_CM_EVENT_CONNECT_ERROR; event 2305 drivers/infiniband/core/cma.c event.event = RDMA_CM_EVENT_ESTABLISHED; event 2306 drivers/infiniband/core/cma.c event.param.conn.initiator_depth = iw_event->ird; event 2307 drivers/infiniband/core/cma.c event.param.conn.responder_resources = iw_event->ord; event 2313 drivers/infiniband/core/cma.c event.status = iw_event->status; event 2314 drivers/infiniband/core/cma.c event.param.conn.private_data = iw_event->private_data; event 2315 drivers/infiniband/core/cma.c event.param.conn.private_data_len = iw_event->private_data_len; event 2316 drivers/infiniband/core/cma.c ret = id_priv->id.event_handler(&id_priv->id, &event); event 2336 drivers/infiniband/core/cma.c struct rdma_cm_event event = {}; event 2341 drivers/infiniband/core/cma.c event.event = RDMA_CM_EVENT_CONNECT_REQUEST; event 2342 drivers/infiniband/core/cma.c event.param.conn.private_data = iw_event->private_data; event 2343 drivers/infiniband/core/cma.c event.param.conn.private_data_len = iw_event->private_data_len; event 2344 drivers/infiniband/core/cma.c event.param.conn.initiator_depth = iw_event->ird; event 2345 drivers/infiniband/core/cma.c event.param.conn.responder_resources = iw_event->ord; event 2393 drivers/infiniband/core/cma.c ret = conn_id->id.event_handler(&conn_id->id, &event); event 2459 drivers/infiniband/core/cma.c struct rdma_cm_event *event) event 2465 drivers/infiniband/core/cma.c return id_priv->id.event_handler(id, event); event 2567 drivers/infiniband/core/cma.c work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; event 2568 drivers/infiniband/core/cma.c work->event.status = status; event 2639 drivers/infiniband/core/cma.c if (id_priv->id.event_handler(&id_priv->id, &work->event)) { event 2662 drivers/infiniband/core/cma.c if (id_priv->id.event_handler(&id_priv->id, &work->event)) { event 2682 drivers/infiniband/core/cma.c work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; event 2692 drivers/infiniband/core/cma.c work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; event 3029 drivers/infiniband/core/cma.c struct rdma_cm_event event = {}; event 3061 drivers/infiniband/core/cma.c event.event = RDMA_CM_EVENT_ADDR_ERROR; event 3062 drivers/infiniband/core/cma.c event.status = status; event 3064 drivers/infiniband/core/cma.c event.event = RDMA_CM_EVENT_ADDR_RESOLVED; event 3066 drivers/infiniband/core/cma.c if (id_priv->id.event_handler(&id_priv->id, &event)) { event 3675 drivers/infiniband/core/cma.c struct rdma_cm_event event = {}; event 3684 drivers/infiniband/core/cma.c switch (ib_event->event) { event 3686 drivers/infiniband/core/cma.c event.event = RDMA_CM_EVENT_UNREACHABLE; event 3687 drivers/infiniband/core/cma.c event.status = -ETIMEDOUT; event 3690 drivers/infiniband/core/cma.c event.param.ud.private_data = ib_event->private_data; event 3691 drivers/infiniband/core/cma.c event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE; event 3693 drivers/infiniband/core/cma.c event.event = RDMA_CM_EVENT_UNREACHABLE; event 3694 drivers/infiniband/core/cma.c event.status = ib_event->param.sidr_rep_rcvd.status; event 3696 drivers/infiniband/core/cma.c event.status); event 3702 drivers/infiniband/core/cma.c event.event = RDMA_CM_EVENT_ADDR_ERROR; event 3703 drivers/infiniband/core/cma.c event.status = ret; event 3709 drivers/infiniband/core/cma.c &event.param.ud.ah_attr, event 3711 drivers/infiniband/core/cma.c event.param.ud.qp_num = rep->qpn; event 3712 drivers/infiniband/core/cma.c event.param.ud.qkey = rep->qkey; event 3713 drivers/infiniband/core/cma.c event.event = RDMA_CM_EVENT_ESTABLISHED; event 3714 drivers/infiniband/core/cma.c event.status = 0; event 3718 drivers/infiniband/core/cma.c ib_event->event); event 3722 drivers/infiniband/core/cma.c ret = id_priv->id.event_handler(&id_priv->id, &event); event 3724 drivers/infiniband/core/cma.c rdma_destroy_ah_attr(&event.param.ud.ah_attr); event 4074 drivers/infiniband/core/cma.c int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event) event 4085 drivers/infiniband/core/cma.c ret = ib_cm_notify(id_priv->cm_id.ib, event); event 4153 drivers/infiniband/core/cma.c struct rdma_cm_event event = {}; event 4177 drivers/infiniband/core/cma.c event.status = status; event 4178 drivers/infiniband/core/cma.c event.param.ud.private_data = mc->context; event 4188 drivers/infiniband/core/cma.c event.event = RDMA_CM_EVENT_MULTICAST_JOIN; event 4193 drivers/infiniband/core/cma.c &event.param.ud.ah_attr); event 4195 drivers/infiniband/core/cma.c event.event = RDMA_CM_EVENT_MULTICAST_ERROR; event 4197 drivers/infiniband/core/cma.c event.param.ud.qp_num = 0xFFFFFF; event 4198 drivers/infiniband/core/cma.c event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey); event 4202 drivers/infiniband/core/cma.c event.event = RDMA_CM_EVENT_MULTICAST_ERROR; event 4204 drivers/infiniband/core/cma.c ret = id_priv->id.event_handler(&id_priv->id, &event); event 4206 drivers/infiniband/core/cma.c rdma_destroy_ah_attr(&event.param.ud.ah_attr); event 4523 drivers/infiniband/core/cma.c work->event.event = RDMA_CM_EVENT_ADDR_CHANGE; event 4531 drivers/infiniband/core/cma.c static int cma_netdev_callback(struct notifier_block *self, unsigned long event, event 4539 drivers/infiniband/core/cma.c if (event != NETDEV_BONDING_FAILOVER) event 4622 drivers/infiniband/core/cma.c struct rdma_cm_event event = {}; event 4638 drivers/infiniband/core/cma.c event.event = RDMA_CM_EVENT_DEVICE_REMOVAL; event 4639 drivers/infiniband/core/cma.c ret = id_priv->id.event_handler(&id_priv->id, &event); event 152 drivers/infiniband/core/core_priv.h void ib_dispatch_event_clients(struct ib_event *event); event 191 drivers/infiniband/core/device.c static int ib_security_change(struct notifier_block *nb, unsigned long event, event 840 drivers/infiniband/core/device.c static int ib_security_change(struct notifier_block *nb, unsigned long event, event 843 drivers/infiniband/core/device.c if (event != LSM_POLICY_CHANGE) event 1965 drivers/infiniband/core/device.c void ib_dispatch_event_clients(struct ib_event *event) event 1969 drivers/infiniband/core/device.c down_read(&event->device->event_handler_rwsem); event 1971 drivers/infiniband/core/device.c list_for_each_entry(handler, &event->device->event_handler_list, list) event 1972 drivers/infiniband/core/device.c handler->handler(handler, event); event 1974 drivers/infiniband/core/device.c up_read(&event->device->event_handler_rwsem); event 99 drivers/infiniband/core/iwcm.c struct iw_cm_event event; event 191 drivers/infiniband/core/iwcm.c static int copy_private_data(struct iw_cm_event *event) event 195 drivers/infiniband/core/iwcm.c p = kmemdup(event->private_data, event->private_data_len, GFP_ATOMIC); event 198 drivers/infiniband/core/iwcm.c event->private_data = p; event 240 drivers/infiniband/core/iwcm.c static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event); event 984 drivers/infiniband/core/iwcm.c switch (iw_event->event) { event 1032 drivers/infiniband/core/iwcm.c levent = work->event; event 1041 drivers/infiniband/core/iwcm.c pr_debug("dropping event %d\n", levent.event); event 1085 drivers/infiniband/core/iwcm.c work->event = *iw_event; event 1087 drivers/infiniband/core/iwcm.c if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST || event 1088 drivers/infiniband/core/iwcm.c work->event.event == IW_CM_EVENT_CONNECT_REPLY) && event 1089 drivers/infiniband/core/iwcm.c work->event.private_data_len) { event 1090 drivers/infiniband/core/iwcm.c ret = copy_private_data(&work->event); event 3115 drivers/infiniband/core/mad.c static void qp_event_handler(struct ib_event *event, void *qp_context) event 3122 drivers/infiniband/core/mad.c event->event, qp_info->qp->qp_num); event 793 drivers/infiniband/core/multicast.c struct ib_event *event) event 799 drivers/infiniband/core/multicast.c if (!rdma_cap_ib_mcast(dev->device, event->element.port_num)) event 802 drivers/infiniband/core/multicast.c index = event->element.port_num - dev->start_port; event 804 drivers/infiniband/core/multicast.c switch (event->event) { event 734 drivers/infiniband/core/roce_gid_mgmt.c static int netdevice_event(struct notifier_block *this, unsigned long event, event 757 drivers/infiniband/core/roce_gid_mgmt.c switch (event) { event 814 drivers/infiniband/core/roce_gid_mgmt.c static int addr_event(struct notifier_block *this, unsigned long event, event 823 drivers/infiniband/core/roce_gid_mgmt.c switch (event) { event 854 drivers/infiniband/core/roce_gid_mgmt.c static int inetaddr_event(struct notifier_block *this, unsigned long event, event 865 drivers/infiniband/core/roce_gid_mgmt.c return addr_event(this, event, (struct sockaddr *)&in, ndev); event 868 drivers/infiniband/core/roce_gid_mgmt.c static int inet6addr_event(struct notifier_block *this, unsigned long event, event 879 drivers/infiniband/core/roce_gid_mgmt.c return addr_event(this, event, (struct sockaddr *)&in6, ndev); event 2287 drivers/infiniband/core/sa_query.c struct ib_event *event) event 2289 drivers/infiniband/core/sa_query.c if (event->event == IB_EVENT_PORT_ERR || event 2290 drivers/infiniband/core/sa_query.c event->event == IB_EVENT_PORT_ACTIVE || event 2291 drivers/infiniband/core/sa_query.c event->event == IB_EVENT_LID_CHANGE || event 2292 drivers/infiniband/core/sa_query.c event->event == IB_EVENT_PKEY_CHANGE || event 2293 drivers/infiniband/core/sa_query.c event->event == IB_EVENT_SM_CHANGE || event 2294 drivers/infiniband/core/sa_query.c event->event == IB_EVENT_CLIENT_REREGISTER) { event 2298 drivers/infiniband/core/sa_query.c u8 port_num = event->element.port_num - sa_dev->start_port; event 2310 drivers/infiniband/core/sa_query.c if (event->event == IB_EVENT_SM_CHANGE || event 2311 drivers/infiniband/core/sa_query.c event->event == IB_EVENT_CLIENT_REREGISTER || event 2312 drivers/infiniband/core/sa_query.c event->event == IB_EVENT_LID_CHANGE || event 2313 drivers/infiniband/core/sa_query.c event->event == IB_EVENT_PORT_ACTIVE) { event 159 drivers/infiniband/core/security.c struct ib_event event = { event 160 drivers/infiniband/core/security.c .event = IB_EVENT_QP_FATAL event 175 drivers/infiniband/core/security.c event.element.qp = sec->qp; event 176 drivers/infiniband/core/security.c sec->qp->event_handler(&event, event 186 drivers/infiniband/core/security.c event.element.qp = qp; event 187 drivers/infiniband/core/security.c event.device = qp->device; event 188 drivers/infiniband/core/security.c qp->event_handler(&event, event 283 drivers/infiniband/core/ucma.c struct rdma_cm_event *event, event 287 drivers/infiniband/core/ucma.c switch (event->event) { event 291 drivers/infiniband/core/ucma.c event->param.ud.private_data; event 328 drivers/infiniband/core/ucma.c con_req_eve->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) { event 341 drivers/infiniband/core/ucma.c struct rdma_cm_event *event) event 349 drivers/infiniband/core/ucma.c return event->event == RDMA_CM_EVENT_CONNECT_REQUEST; event 353 drivers/infiniband/core/ucma.c ucma_set_event_context(ctx, event, uevent); event 354 drivers/infiniband/core/ucma.c uevent->resp.event = event->event; event 355 drivers/infiniband/core/ucma.c uevent->resp.status = event->status; event 358 drivers/infiniband/core/ucma.c &event->param.ud); event 361 drivers/infiniband/core/ucma.c &event->param.conn); event 363 drivers/infiniband/core/ucma.c if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) { event 379 drivers/infiniband/core/ucma.c if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) event 388 drivers/infiniband/core/ucma.c if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) event 429 drivers/infiniband/core/ucma.c if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) { event 588 drivers/infiniband/core/ucma.c if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) event 1281 drivers/infiniband/core/ucma.c struct rdma_cm_event event; event 1319 drivers/infiniband/core/ucma.c memset(&event, 0, sizeof event); event 1320 drivers/infiniband/core/ucma.c event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; event 1321 drivers/infiniband/core/ucma.c return ucma_event_handler(ctx->cm_id, &event); event 1411 drivers/infiniband/core/ucma.c ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event); event 235 drivers/infiniband/core/uverbs.h void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr); event 236 drivers/infiniband/core/uverbs.h void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr); event 237 drivers/infiniband/core/uverbs.h void ib_uverbs_wq_event_handler(struct ib_event *event, void *context_ptr); event 238 drivers/infiniband/core/uverbs.h void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr); event 240 drivers/infiniband/core/uverbs.h struct ib_event *event); event 227 drivers/infiniband/core/uverbs_main.c struct ib_uverbs_event *event; event 252 drivers/infiniband/core/uverbs_main.c event = list_entry(ev_queue->event_list.next, struct ib_uverbs_event, list); event 256 drivers/infiniband/core/uverbs_main.c event = NULL; event 259 drivers/infiniband/core/uverbs_main.c if (event->counter) { event 260 drivers/infiniband/core/uverbs_main.c ++(*event->counter); event 261 drivers/infiniband/core/uverbs_main.c list_del(&event->obj_list); event 267 drivers/infiniband/core/uverbs_main.c if (event) { event 268 drivers/infiniband/core/uverbs_main.c if (copy_to_user(buf, event, eventsz)) event 274 drivers/infiniband/core/uverbs_main.c kfree(event); event 453 drivers/infiniband/core/uverbs_main.c __u64 element, __u64 event, event 473 drivers/infiniband/core/uverbs_main.c entry->desc.async.event_type = event; event 486 drivers/infiniband/core/uverbs_main.c void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr) event 488 drivers/infiniband/core/uverbs_main.c struct ib_ucq_object *uobj = container_of(event->element.cq->uobject, event 492 drivers/infiniband/core/uverbs_main.c event->event, &uobj->async_list, event 496 drivers/infiniband/core/uverbs_main.c void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr) event 501 drivers/infiniband/core/uverbs_main.c if (!event->element.qp->uobject) event 504 drivers/infiniband/core/uverbs_main.c uobj = container_of(event->element.qp->uobject, event 508 drivers/infiniband/core/uverbs_main.c event->event, &uobj->event_list, event 512 drivers/infiniband/core/uverbs_main.c void ib_uverbs_wq_event_handler(struct ib_event *event, void *context_ptr) event 514 drivers/infiniband/core/uverbs_main.c struct ib_uevent_object *uobj = container_of(event->element.wq->uobject, event 518 drivers/infiniband/core/uverbs_main.c event->event, &uobj->event_list, event 522 drivers/infiniband/core/uverbs_main.c void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr) event 526 drivers/infiniband/core/uverbs_main.c uobj = container_of(event->element.srq->uobject, event 530 drivers/infiniband/core/uverbs_main.c event->event, &uobj->event_list, event 535 drivers/infiniband/core/uverbs_main.c struct ib_event *event) event 540 drivers/infiniband/core/uverbs_main.c ib_uverbs_async_handler(file, event->element.port_num, event->event, event 1332 drivers/infiniband/core/uverbs_main.c struct ib_event event; event 1336 drivers/infiniband/core/uverbs_main.c event.event = IB_EVENT_DEVICE_FATAL; event 1337 drivers/infiniband/core/uverbs_main.c event.element.port_num = 0; event 1338 drivers/infiniband/core/uverbs_main.c event.device = ib_dev; event 1354 drivers/infiniband/core/uverbs_main.c ib_uverbs_event_handler(&file->event_handler, &event); event 81 drivers/infiniband/core/verbs.c const char *__attribute_const__ ib_event_msg(enum ib_event_type event) event 83 drivers/infiniband/core/verbs.c size_t index = event; event 1049 drivers/infiniband/core/verbs.c static void __ib_shared_qp_event_handler(struct ib_event *event, void *context) event 1055 drivers/infiniband/core/verbs.c list_for_each_entry(event->element.qp, &qp->open_list, open_list) event 1056 drivers/infiniband/core/verbs.c if (event->element.qp->event_handler) event 1057 drivers/infiniband/core/verbs.c event->element.qp->event_handler(event, event->element.qp->qp_context); event 94 drivers/infiniband/hw/bnxt_re/bnxt_re.h unsigned long event; event 757 drivers/infiniband/hw/bnxt_re/main.c switch (unaffi_async->event) { event 789 drivers/infiniband/hw/bnxt_re/main.c struct ib_event event; event 798 drivers/infiniband/hw/bnxt_re/main.c memset(&event, 0, sizeof(event)); event 800 drivers/infiniband/hw/bnxt_re/main.c event.device = &qp->rdev->ibdev; event 801 drivers/infiniband/hw/bnxt_re/main.c event.element.qp = &qp->ib_qp; event 802 drivers/infiniband/hw/bnxt_re/main.c event.event = IB_EVENT_QP_LAST_WQE_REACHED; event 805 drivers/infiniband/hw/bnxt_re/main.c if (event.device && qp->ib_qp.event_handler) event 806 drivers/infiniband/hw/bnxt_re/main.c qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context); event 815 drivers/infiniband/hw/bnxt_re/main.c u8 event; event 820 drivers/infiniband/hw/bnxt_re/main.c event = affi_async->event; event 821 drivers/infiniband/hw/bnxt_re/main.c if (event == CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION) { event 851 drivers/infiniband/hw/bnxt_re/main.c struct bnxt_qplib_srq *handle, u8 event) event 866 drivers/infiniband/hw/bnxt_re/main.c if (event == NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT) event 867 drivers/infiniband/hw/bnxt_re/main.c ib_event.event = IB_EVENT_SRQ_LIMIT_REACHED; event 869 drivers/infiniband/hw/bnxt_re/main.c ib_event.event = IB_EVENT_SRQ_ERR; event 1043 drivers/infiniband/hw/bnxt_re/main.c u8 port_num, enum ib_event_type event) event 1050 drivers/infiniband/hw/bnxt_re/main.c ib_event.event = event; event 1056 drivers/infiniband/hw/bnxt_re/main.c ib_event.event = event; event 1550 drivers/infiniband/hw/bnxt_re/main.c if (re_work->event != NETDEV_REGISTER && event 1554 drivers/infiniband/hw/bnxt_re/main.c switch (re_work->event) { event 1610 drivers/infiniband/hw/bnxt_re/main.c unsigned long event, void *ptr) event 1623 drivers/infiniband/hw/bnxt_re/main.c if (!rdev && event != NETDEV_REGISTER) event 1628 drivers/infiniband/hw/bnxt_re/main.c switch (event) { event 1664 drivers/infiniband/hw/bnxt_re/main.c re_work->event = event; event 298 drivers/infiniband/hw/bnxt_re/qplib_fp.c nqsrqe->event)) event 303 drivers/infiniband/hw/bnxt_re/qplib_fp.c nqsrqe->event); event 421 drivers/infiniband/hw/bnxt_re/qplib_fp.c u8 event)) event 493 drivers/infiniband/hw/bnxt_re/qplib_fp.h u8 event); event 514 drivers/infiniband/hw/bnxt_re/qplib_fp.h u8 event)); event 256 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c switch (func_event->event) { event 305 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c switch (qp_event->event) { event 820 drivers/infiniband/hw/bnxt_re/roce_hsi.h u8 event; event 839 drivers/infiniband/hw/bnxt_re/roce_hsi.h u8 event; event 1973 drivers/infiniband/hw/bnxt_re/roce_hsi.h u8 event; event 1990 drivers/infiniband/hw/bnxt_re/roce_hsi.h u8 event; event 2022 drivers/infiniband/hw/bnxt_re/roce_hsi.h u8 event; event 2073 drivers/infiniband/hw/bnxt_re/roce_hsi.h u8 event; event 2093 drivers/infiniband/hw/bnxt_re/roce_hsi.h u8 event; event 2113 drivers/infiniband/hw/bnxt_re/roce_hsi.h u8 event; event 2147 drivers/infiniband/hw/bnxt_re/roce_hsi.h u8 event; event 2246 drivers/infiniband/hw/bnxt_re/roce_hsi.h u8 event; event 2266 drivers/infiniband/hw/bnxt_re/roce_hsi.h u8 event; event 2290 drivers/infiniband/hw/bnxt_re/roce_hsi.h u8 event; event 2325 drivers/infiniband/hw/bnxt_re/roce_hsi.h u8 event; event 2345 drivers/infiniband/hw/bnxt_re/roce_hsi.h u8 event; event 2371 drivers/infiniband/hw/bnxt_re/roce_hsi.h u8 event; event 2391 drivers/infiniband/hw/bnxt_re/roce_hsi.h u8 event; event 2411 drivers/infiniband/hw/bnxt_re/roce_hsi.h u8 event; event 2432 drivers/infiniband/hw/bnxt_re/roce_hsi.h u8 event; event 2452 drivers/infiniband/hw/bnxt_re/roce_hsi.h u8 event; event 2473 drivers/infiniband/hw/bnxt_re/roce_hsi.h u8 event; event 2493 drivers/infiniband/hw/bnxt_re/roce_hsi.h u8 event; event 2513 drivers/infiniband/hw/bnxt_re/roce_hsi.h u8 event; event 2533 drivers/infiniband/hw/bnxt_re/roce_hsi.h u8 event; event 2585 drivers/infiniband/hw/bnxt_re/roce_hsi.h u8 event; event 2605 drivers/infiniband/hw/bnxt_re/roce_hsi.h u8 event; event 2625 drivers/infiniband/hw/bnxt_re/roce_hsi.h u8 event; event 2645 drivers/infiniband/hw/bnxt_re/roce_hsi.h u8 event; event 2665 drivers/infiniband/hw/bnxt_re/roce_hsi.h u8 event; event 2685 drivers/infiniband/hw/bnxt_re/roce_hsi.h u8 event; event 2705 drivers/infiniband/hw/bnxt_re/roce_hsi.h u8 event; event 2725 drivers/infiniband/hw/bnxt_re/roce_hsi.h u8 event; event 2783 drivers/infiniband/hw/bnxt_re/roce_hsi.h u8 event; event 2803 drivers/infiniband/hw/bnxt_re/roce_hsi.h u8 event; event 2826 drivers/infiniband/hw/bnxt_re/roce_hsi.h u8 event; event 2850 drivers/infiniband/hw/bnxt_re/roce_hsi.h u8 event; event 2870 drivers/infiniband/hw/bnxt_re/roce_hsi.h u8 event; event 2917 drivers/infiniband/hw/bnxt_re/roce_hsi.h u8 event; event 2999 drivers/infiniband/hw/bnxt_re/roce_hsi.h u8 event; event 199 drivers/infiniband/hw/cxgb3/iwch.c struct ib_event event; event 210 drivers/infiniband/hw/cxgb3/iwch.c event.event = IB_EVENT_DEVICE_FATAL; event 215 drivers/infiniband/hw/cxgb3/iwch.c event.event = IB_EVENT_PORT_ERR; event 220 drivers/infiniband/hw/cxgb3/iwch.c event.event = IB_EVENT_PORT_ACTIVE; event 250 drivers/infiniband/hw/cxgb3/iwch.c event.device = &rnicp->ibdev; event 251 drivers/infiniband/hw/cxgb3/iwch.c event.element.port_num = portnum; event 252 drivers/infiniband/hw/cxgb3/iwch.c ib_dispatch_event(&event); event 666 drivers/infiniband/hw/cxgb3/iwch_cm.c struct iw_cm_event event; event 669 drivers/infiniband/hw/cxgb3/iwch_cm.c memset(&event, 0, sizeof(event)); event 670 drivers/infiniband/hw/cxgb3/iwch_cm.c event.event = IW_CM_EVENT_CLOSE; event 674 drivers/infiniband/hw/cxgb3/iwch_cm.c ep->com.cm_id->event_handler(ep->com.cm_id, &event); event 683 drivers/infiniband/hw/cxgb3/iwch_cm.c struct iw_cm_event event; event 686 drivers/infiniband/hw/cxgb3/iwch_cm.c memset(&event, 0, sizeof(event)); event 687 drivers/infiniband/hw/cxgb3/iwch_cm.c event.event = IW_CM_EVENT_DISCONNECT; event 691 drivers/infiniband/hw/cxgb3/iwch_cm.c ep->com.cm_id->event_handler(ep->com.cm_id, &event); event 697 drivers/infiniband/hw/cxgb3/iwch_cm.c struct iw_cm_event event; event 700 drivers/infiniband/hw/cxgb3/iwch_cm.c memset(&event, 0, sizeof(event)); event 701 drivers/infiniband/hw/cxgb3/iwch_cm.c event.event = IW_CM_EVENT_CLOSE; event 702 drivers/infiniband/hw/cxgb3/iwch_cm.c event.status = -ECONNRESET; event 706 drivers/infiniband/hw/cxgb3/iwch_cm.c ep->com.cm_id->event_handler(ep->com.cm_id, &event); event 715 drivers/infiniband/hw/cxgb3/iwch_cm.c struct iw_cm_event event; event 718 drivers/infiniband/hw/cxgb3/iwch_cm.c memset(&event, 0, sizeof(event)); event 719 drivers/infiniband/hw/cxgb3/iwch_cm.c event.event = IW_CM_EVENT_CONNECT_REPLY; event 720 drivers/infiniband/hw/cxgb3/iwch_cm.c event.status = status; event 721 drivers/infiniband/hw/cxgb3/iwch_cm.c memcpy(&event.local_addr, &ep->com.local_addr, event 723 drivers/infiniband/hw/cxgb3/iwch_cm.c memcpy(&event.remote_addr, &ep->com.remote_addr, event 727 drivers/infiniband/hw/cxgb3/iwch_cm.c event.private_data_len = ep->plen; event 728 drivers/infiniband/hw/cxgb3/iwch_cm.c event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); event 733 drivers/infiniband/hw/cxgb3/iwch_cm.c ep->com.cm_id->event_handler(ep->com.cm_id, &event); event 744 drivers/infiniband/hw/cxgb3/iwch_cm.c struct iw_cm_event event; event 747 drivers/infiniband/hw/cxgb3/iwch_cm.c memset(&event, 0, sizeof(event)); event 748 drivers/infiniband/hw/cxgb3/iwch_cm.c event.event = IW_CM_EVENT_CONNECT_REQUEST; event 749 drivers/infiniband/hw/cxgb3/iwch_cm.c memcpy(&event.local_addr, &ep->com.local_addr, event 751 drivers/infiniband/hw/cxgb3/iwch_cm.c memcpy(&event.remote_addr, &ep->com.remote_addr, event 753 drivers/infiniband/hw/cxgb3/iwch_cm.c event.private_data_len = ep->plen; event 754 drivers/infiniband/hw/cxgb3/iwch_cm.c event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); event 755 drivers/infiniband/hw/cxgb3/iwch_cm.c event.provider_data = ep; event 760 drivers/infiniband/hw/cxgb3/iwch_cm.c event.ird = event.ord = 8; event 765 drivers/infiniband/hw/cxgb3/iwch_cm.c &event); event 773 drivers/infiniband/hw/cxgb3/iwch_cm.c struct iw_cm_event event; event 776 drivers/infiniband/hw/cxgb3/iwch_cm.c memset(&event, 0, sizeof(event)); event 777 drivers/infiniband/hw/cxgb3/iwch_cm.c event.event = IW_CM_EVENT_ESTABLISHED; event 782 drivers/infiniband/hw/cxgb3/iwch_cm.c event.ird = event.ord = 8; event 785 drivers/infiniband/hw/cxgb3/iwch_cm.c ep->com.cm_id->event_handler(ep->com.cm_id, &event); event 46 drivers/infiniband/hw/cxgb3/iwch_ev.c struct ib_event event; event 89 drivers/infiniband/hw/cxgb3/iwch_ev.c event.event = ib_event; event 90 drivers/infiniband/hw/cxgb3/iwch_ev.c event.device = chp->ibcq.device; event 92 drivers/infiniband/hw/cxgb3/iwch_ev.c event.element.cq = &chp->ibcq; event 94 drivers/infiniband/hw/cxgb3/iwch_ev.c event.element.qp = &qhp->ibqp; event 97 drivers/infiniband/hw/cxgb3/iwch_ev.c (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context); event 1269 drivers/infiniband/hw/cxgb4/cm.c struct iw_cm_event event; event 1272 drivers/infiniband/hw/cxgb4/cm.c memset(&event, 0, sizeof(event)); event 1273 drivers/infiniband/hw/cxgb4/cm.c event.event = IW_CM_EVENT_CLOSE; event 1274 drivers/infiniband/hw/cxgb4/cm.c event.status = status; event 1278 drivers/infiniband/hw/cxgb4/cm.c ep->com.cm_id->event_handler(ep->com.cm_id, &event); event 1286 drivers/infiniband/hw/cxgb4/cm.c struct iw_cm_event event; event 1289 drivers/infiniband/hw/cxgb4/cm.c memset(&event, 0, sizeof(event)); event 1290 drivers/infiniband/hw/cxgb4/cm.c event.event = IW_CM_EVENT_DISCONNECT; event 1294 drivers/infiniband/hw/cxgb4/cm.c ep->com.cm_id->event_handler(ep->com.cm_id, &event); event 1301 drivers/infiniband/hw/cxgb4/cm.c struct iw_cm_event event; event 1304 drivers/infiniband/hw/cxgb4/cm.c memset(&event, 0, sizeof(event)); event 1305 drivers/infiniband/hw/cxgb4/cm.c event.event = IW_CM_EVENT_CLOSE; event 1306 drivers/infiniband/hw/cxgb4/cm.c event.status = -ECONNRESET; event 1310 drivers/infiniband/hw/cxgb4/cm.c ep->com.cm_id->event_handler(ep->com.cm_id, &event); event 1318 drivers/infiniband/hw/cxgb4/cm.c struct iw_cm_event event; event 1322 drivers/infiniband/hw/cxgb4/cm.c memset(&event, 0, sizeof(event)); event 1323 drivers/infiniband/hw/cxgb4/cm.c event.event = IW_CM_EVENT_CONNECT_REPLY; event 1324 drivers/infiniband/hw/cxgb4/cm.c event.status = status; event 1325 drivers/infiniband/hw/cxgb4/cm.c memcpy(&event.local_addr, &ep->com.local_addr, event 1327 drivers/infiniband/hw/cxgb4/cm.c memcpy(&event.remote_addr, &ep->com.remote_addr, event 1333 drivers/infiniband/hw/cxgb4/cm.c event.ord = ep->ird; event 1334 drivers/infiniband/hw/cxgb4/cm.c event.ird = ep->ord; event 1335 drivers/infiniband/hw/cxgb4/cm.c event.private_data_len = ep->plen - event 1337 drivers/infiniband/hw/cxgb4/cm.c event.private_data = ep->mpa_pkt + event 1342 drivers/infiniband/hw/cxgb4/cm.c event.ord = cur_max_read_depth(ep->com.dev); event 1343 drivers/infiniband/hw/cxgb4/cm.c event.ird = cur_max_read_depth(ep->com.dev); event 1344 drivers/infiniband/hw/cxgb4/cm.c event.private_data_len = ep->plen; event 1345 drivers/infiniband/hw/cxgb4/cm.c event.private_data = ep->mpa_pkt + event 1353 drivers/infiniband/hw/cxgb4/cm.c ep->com.cm_id->event_handler(ep->com.cm_id, &event); event 1361 drivers/infiniband/hw/cxgb4/cm.c struct iw_cm_event event; event 1365 drivers/infiniband/hw/cxgb4/cm.c memset(&event, 0, sizeof(event)); event 1366 drivers/infiniband/hw/cxgb4/cm.c event.event = IW_CM_EVENT_CONNECT_REQUEST; event 1367 drivers/infiniband/hw/cxgb4/cm.c memcpy(&event.local_addr, &ep->com.local_addr, event 1369 drivers/infiniband/hw/cxgb4/cm.c memcpy(&event.remote_addr, &ep->com.remote_addr, event 1371 drivers/infiniband/hw/cxgb4/cm.c event.provider_data = ep; event 1374 drivers/infiniband/hw/cxgb4/cm.c event.ord = ep->ord; event 1375 drivers/infiniband/hw/cxgb4/cm.c event.ird = ep->ird; event 1376 drivers/infiniband/hw/cxgb4/cm.c event.private_data_len = ep->plen - event 1378 drivers/infiniband/hw/cxgb4/cm.c event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) + event 1382 drivers/infiniband/hw/cxgb4/cm.c event.ord = cur_max_read_depth(ep->com.dev); event 1383 drivers/infiniband/hw/cxgb4/cm.c event.ird = cur_max_read_depth(ep->com.dev); event 1384 drivers/infiniband/hw/cxgb4/cm.c event.private_data_len = ep->plen; event 1385 drivers/infiniband/hw/cxgb4/cm.c event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); event 1389 drivers/infiniband/hw/cxgb4/cm.c &event); event 1399 drivers/infiniband/hw/cxgb4/cm.c struct iw_cm_event event; event 1402 drivers/infiniband/hw/cxgb4/cm.c memset(&event, 0, sizeof(event)); event 1403 drivers/infiniband/hw/cxgb4/cm.c event.event = IW_CM_EVENT_ESTABLISHED; event 1404 drivers/infiniband/hw/cxgb4/cm.c event.ird = ep->ord; event 1405 drivers/infiniband/hw/cxgb4/cm.c event.ord = ep->ird; event 1408 drivers/infiniband/hw/cxgb4/cm.c ep->com.cm_id->event_handler(ep->com.cm_id, &event); event 1249 drivers/infiniband/hw/cxgb4/device.c struct ib_event event = {}; event 1252 drivers/infiniband/hw/cxgb4/device.c event.event = IB_EVENT_DEVICE_FATAL; event 1253 drivers/infiniband/hw/cxgb4/device.c event.device = &ctx->dev->ibdev; event 1254 drivers/infiniband/hw/cxgb4/device.c ib_dispatch_event(&event); event 92 drivers/infiniband/hw/cxgb4/ev.c struct ib_event event; event 104 drivers/infiniband/hw/cxgb4/ev.c event.event = ib_event; event 105 drivers/infiniband/hw/cxgb4/ev.c event.device = chp->ibcq.device; event 107 drivers/infiniband/hw/cxgb4/ev.c event.element.cq = &chp->ibcq; event 109 drivers/infiniband/hw/cxgb4/ev.c event.element.qp = &qhp->ibqp; event 111 drivers/infiniband/hw/cxgb4/ev.c (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context); event 2426 drivers/infiniband/hw/cxgb4/qp.c struct ib_event event = {}; event 2428 drivers/infiniband/hw/cxgb4/qp.c event.device = &srq->rhp->ibdev; event 2429 drivers/infiniband/hw/cxgb4/qp.c event.element.srq = &srq->ibsrq; event 2430 drivers/infiniband/hw/cxgb4/qp.c event.event = IB_EVENT_SRQ_LIMIT_REACHED; event 2431 drivers/infiniband/hw/cxgb4/qp.c ib_dispatch_event(&event); event 10631 drivers/infiniband/hw/hfi1/chip.c struct ib_event event = {.device = NULL}; event 10771 drivers/infiniband/hw/hfi1/chip.c event.device = &dd->verbs_dev.rdi.ibdev; event 10772 drivers/infiniband/hw/hfi1/chip.c event.element.port_num = ppd->port; event 10773 drivers/infiniband/hw/hfi1/chip.c event.event = IB_EVENT_PORT_ACTIVE; event 10923 drivers/infiniband/hw/hfi1/chip.c if (event.device) event 10924 drivers/infiniband/hw/hfi1/chip.c ib_dispatch_event(&event); event 125 drivers/infiniband/hw/hfi1/intr.c struct ib_event event; event 135 drivers/infiniband/hw/hfi1/intr.c event.device = &dd->verbs_dev.rdi.ibdev; event 136 drivers/infiniband/hw/hfi1/intr.c event.element.port_num = ppd->port; event 137 drivers/infiniband/hw/hfi1/intr.c event.event = ev; event 138 drivers/infiniband/hw/hfi1/intr.c ib_dispatch_event(&event); event 113 drivers/infiniband/hw/hfi1/mad.c struct ib_event event; event 115 drivers/infiniband/hw/hfi1/mad.c event.event = IB_EVENT_PKEY_CHANGE; event 116 drivers/infiniband/hw/hfi1/mad.c event.device = &dd->verbs_dev.rdi.ibdev; event 117 drivers/infiniband/hw/hfi1/mad.c event.element.port_num = port; event 118 drivers/infiniband/hw/hfi1/mad.c ib_dispatch_event(&event); event 1356 drivers/infiniband/hw/hfi1/mad.c struct ib_event event; event 1402 drivers/infiniband/hw/hfi1/mad.c event.device = ibdev; event 1403 drivers/infiniband/hw/hfi1/mad.c event.element.port_num = port; event 1410 drivers/infiniband/hw/hfi1/mad.c event.event = IB_EVENT_GID_CHANGE; event 1411 drivers/infiniband/hw/hfi1/mad.c ib_dispatch_event(&event); event 1428 drivers/infiniband/hw/hfi1/mad.c event.event = IB_EVENT_LID_CHANGE; event 1429 drivers/infiniband/hw/hfi1/mad.c ib_dispatch_event(&event); event 1437 drivers/infiniband/hw/hfi1/mad.c event.event = IB_EVENT_GID_CHANGE; event 1438 drivers/infiniband/hw/hfi1/mad.c ib_dispatch_event(&event); event 1467 drivers/infiniband/hw/hfi1/mad.c event.event = IB_EVENT_SM_CHANGE; event 1468 drivers/infiniband/hw/hfi1/mad.c ib_dispatch_event(&event); event 1625 drivers/infiniband/hw/hfi1/mad.c event.event = IB_EVENT_CLIENT_REREGISTER; event 1626 drivers/infiniband/hw/hfi1/mad.c ib_dispatch_event(&event); event 842 drivers/infiniband/hw/hfi1/qp.c ev.event = IB_EVENT_PATH_MIG; event 957 drivers/infiniband/hw/hfi1/qp.c ev.event = IB_EVENT_QP_LAST_WQE_REACHED; event 244 drivers/infiniband/hw/hfi1/sdma.c enum sdma_events event); event 247 drivers/infiniband/hw/hfi1/sdma.c enum sdma_events event); event 2544 drivers/infiniband/hw/hfi1/sdma.c static void sdma_process_event(struct sdma_engine *sde, enum sdma_events event) event 2551 drivers/infiniband/hw/hfi1/sdma.c __sdma_process_event(sde, event); event 2561 drivers/infiniband/hw/hfi1/sdma.c enum sdma_events event) event 2570 drivers/infiniband/hw/hfi1/sdma.c sdma_event_names[event]); event 2575 drivers/infiniband/hw/hfi1/sdma.c switch (event) { event 2621 drivers/infiniband/hw/hfi1/sdma.c switch (event) { event 2662 drivers/infiniband/hw/hfi1/sdma.c switch (event) { event 2703 drivers/infiniband/hw/hfi1/sdma.c switch (event) { event 2745 drivers/infiniband/hw/hfi1/sdma.c switch (event) { event 2784 drivers/infiniband/hw/hfi1/sdma.c switch (event) { event 2826 drivers/infiniband/hw/hfi1/sdma.c switch (event) { event 2867 drivers/infiniband/hw/hfi1/sdma.c switch (event) { event 2907 drivers/infiniband/hw/hfi1/sdma.c switch (event) { event 2946 drivers/infiniband/hw/hfi1/sdma.c switch (event) { event 2990 drivers/infiniband/hw/hfi1/sdma.c switch (event) { event 3038 drivers/infiniband/hw/hfi1/sdma.c ss->last_event = event; event 3342 drivers/infiniband/hw/hfi1/sdma.c enum sdma_events event = link_down ? sdma_event_e85_link_down : event 3350 drivers/infiniband/hw/hfi1/sdma.c sdma_process_event(&dd->per_sdma[i], event); event 4003 drivers/infiniband/hw/hfi1/tid_rdma.c ev.event = IB_EVENT_QP_FATAL; event 46 drivers/infiniband/hw/hns/hns_roce_cmd.c int event) event 53 drivers/infiniband/hw/hns/hns_roce_cmd.c op_modifier, op, token, event); event 53 drivers/infiniband/hw/hns/hns_roce_cq.c struct ib_event event; event 69 drivers/infiniband/hw/hns/hns_roce_cq.c event.device = ibcq->device; event 70 drivers/infiniband/hw/hns/hns_roce_cq.c event.event = IB_EVENT_CQ_ERR; event 71 drivers/infiniband/hw/hns/hns_roce_cq.c event.element.cq = ibcq; event 72 drivers/infiniband/hw/hns/hns_roce_cq.c ibcq->event_handler(&event, ibcq->cq_context); event 467 drivers/infiniband/hw/hns/hns_roce_cq.c hr_cq->event = hns_roce_ib_cq_event; event 551 drivers/infiniband/hw/hns/hns_roce_cq.c cq->event(cq, (enum hns_roce_event)event_type); event 498 drivers/infiniband/hw/hns/hns_roce_device.h void (*event)(struct hns_roce_cq *cq, enum hns_roce_event event_type); event 524 drivers/infiniband/hw/hns/hns_roce_device.h void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event); event 684 drivers/infiniband/hw/hns/hns_roce_device.h void (*event)(struct hns_roce_qp *qp, event 750 drivers/infiniband/hw/hns/hns_roce_device.h } event; event 937 drivers/infiniband/hw/hns/hns_roce_device.h u16 token, int event); event 1674 drivers/infiniband/hw/hns/hns_roce_hw_v1.c u16 op, u16 token, int event) event 1696 drivers/infiniband/hw/hns/hns_roce_hw_v1.c roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_EVENT_S, event); event 3777 drivers/infiniband/hw/hns/hns_roce_hw_v1.c qpn = roce_get_field(aeqe->event.qp_event.qp, event 3780 drivers/infiniband/hw/hns/hns_roce_hw_v1.c phy_port = roce_get_field(aeqe->event.qp_event.qp, event 3811 drivers/infiniband/hw/hns/hns_roce_hw_v1.c cqn = roce_get_field(aeqe->event.cq_event.cq, event 3936 drivers/infiniband/hw/hns/hns_roce_hw_v1.c le16_to_cpu(aeqe->event.cmd.token), event 3937 drivers/infiniband/hw/hns/hns_roce_hw_v1.c aeqe->event.cmd.status, event 3938 drivers/infiniband/hw/hns/hns_roce_hw_v1.c le64_to_cpu(aeqe->event.cmd.out_param event 3946 drivers/infiniband/hw/hns/hns_roce_hw_v1.c roce_get_field(aeqe->event.ce_event.ceqe, event 2065 drivers/infiniband/hw/hns/hns_roce_hw_v2.c u16 op, u16 token, int event) event 2077 drivers/infiniband/hw/hns/hns_roce_hw_v2.c mb->token_event_en = cpu_to_le32(event << 16 | token); event 2084 drivers/infiniband/hw/hns/hns_roce_hw_v2.c u16 op, u16 token, int event) event 2101 drivers/infiniband/hw/hns/hns_roce_hw_v2.c op_modifier, op, token, event); event 5047 drivers/infiniband/hw/hns/hns_roce_hw_v2.c qpn = roce_get_field(aeqe->event.qp_event.qp, event 5050 drivers/infiniband/hw/hns/hns_roce_hw_v2.c cqn = roce_get_field(aeqe->event.cq_event.cq, event 5053 drivers/infiniband/hw/hns/hns_roce_hw_v2.c srqn = roce_get_field(aeqe->event.srq_event.srq, event 5080 drivers/infiniband/hw/hns/hns_roce_hw_v2.c le16_to_cpu(aeqe->event.cmd.token), event 5081 drivers/infiniband/hw/hns/hns_roce_hw_v2.c aeqe->event.cmd.status, event 5082 drivers/infiniband/hw/hns/hns_roce_hw_v2.c le64_to_cpu(aeqe->event.cmd.out_param)); event 6528 drivers/infiniband/hw/hns/hns_roce_hw_v2.c struct ib_event event; event 6546 drivers/infiniband/hw/hns/hns_roce_hw_v2.c event.event = IB_EVENT_DEVICE_FATAL; event 6547 drivers/infiniband/hw/hns/hns_roce_hw_v2.c event.device = &hr_dev->ib_dev; event 6548 drivers/infiniband/hw/hns/hns_roce_hw_v2.c event.element.port_num = 1; event 6549 drivers/infiniband/hw/hns/hns_roce_hw_v2.c ib_dispatch_event(&event); event 106 drivers/infiniband/hw/hns/hns_roce_main.c unsigned long event) event 118 drivers/infiniband/hw/hns/hns_roce_main.c switch (event) { event 131 drivers/infiniband/hw/hns/hns_roce_main.c dev_dbg(dev, "NETDEV event = 0x%x!\n", (u32)(event)); event 139 drivers/infiniband/hw/hns/hns_roce_main.c unsigned long event, void *ptr) event 152 drivers/infiniband/hw/hns/hns_roce_main.c ret = handle_en_event(hr_dev, port, event); event 62 drivers/infiniband/hw/hns/hns_roce_qp.c qp->event(qp, (enum hns_roce_event)event_type); event 71 drivers/infiniband/hw/hns/hns_roce_qp.c struct ib_event event; event 75 drivers/infiniband/hw/hns/hns_roce_qp.c event.device = ibqp->device; event 76 drivers/infiniband/hw/hns/hns_roce_qp.c event.element.qp = ibqp; event 79 drivers/infiniband/hw/hns/hns_roce_qp.c event.event = IB_EVENT_PATH_MIG; event 82 drivers/infiniband/hw/hns/hns_roce_qp.c event.event = IB_EVENT_COMM_EST; event 85 drivers/infiniband/hw/hns/hns_roce_qp.c event.event = IB_EVENT_SQ_DRAINED; event 88 drivers/infiniband/hw/hns/hns_roce_qp.c event.event = IB_EVENT_QP_LAST_WQE_REACHED; event 91 drivers/infiniband/hw/hns/hns_roce_qp.c event.event = IB_EVENT_QP_FATAL; event 94 drivers/infiniband/hw/hns/hns_roce_qp.c event.event = IB_EVENT_PATH_MIG_ERR; event 97 drivers/infiniband/hw/hns/hns_roce_qp.c event.event = IB_EVENT_QP_REQ_ERR; event 100 drivers/infiniband/hw/hns/hns_roce_qp.c event.event = IB_EVENT_QP_ACCESS_ERR; event 107 drivers/infiniband/hw/hns/hns_roce_qp.c ibqp->event_handler(&event, ibqp->qp_context); event 952 drivers/infiniband/hw/hns/hns_roce_qp.c hr_qp->event = hns_roce_ib_qp_event; event 28 drivers/infiniband/hw/hns/hns_roce_srq.c srq->event(srq, event_type); event 39 drivers/infiniband/hw/hns/hns_roce_srq.c struct ib_event event; event 42 drivers/infiniband/hw/hns/hns_roce_srq.c event.device = ibsrq->device; event 43 drivers/infiniband/hw/hns/hns_roce_srq.c event.element.srq = ibsrq; event 46 drivers/infiniband/hw/hns/hns_roce_srq.c event.event = IB_EVENT_SRQ_LIMIT_REACHED; event 49 drivers/infiniband/hw/hns/hns_roce_srq.c event.event = IB_EVENT_SRQ_ERR; event 58 drivers/infiniband/hw/hns/hns_roce_srq.c ibsrq->event_handler(&event, ibsrq->srq_context); event 418 drivers/infiniband/hw/hns/hns_roce_srq.c srq->event = hns_roce_ib_srq_event; event 590 drivers/infiniband/hw/i40iw/i40iw.h unsigned long event, event 593 drivers/infiniband/hw/i40iw/i40iw.h unsigned long event, event 596 drivers/infiniband/hw/i40iw/i40iw.h unsigned long event, event 599 drivers/infiniband/hw/i40iw/i40iw.h unsigned long event, event 67 drivers/infiniband/hw/i40iw/i40iw_cm.c static void i40iw_cm_post_event(struct i40iw_cm_event *event); event 171 drivers/infiniband/hw/i40iw/i40iw_cm.c struct iw_cm_event *event) event 173 drivers/infiniband/hw/i40iw/i40iw_cm.c struct sockaddr_in *laddr = (struct sockaddr_in *)&event->local_addr; event 174 drivers/infiniband/hw/i40iw/i40iw_cm.c struct sockaddr_in *raddr = (struct sockaddr_in *)&event->remote_addr; event 192 drivers/infiniband/hw/i40iw/i40iw_cm.c struct iw_cm_event *event) event 194 drivers/infiniband/hw/i40iw/i40iw_cm.c struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)&event->local_addr; event 195 drivers/infiniband/hw/i40iw/i40iw_cm.c struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)&event->remote_addr; event 234 drivers/infiniband/hw/i40iw/i40iw_cm.c struct iw_cm_event *event) event 236 drivers/infiniband/hw/i40iw/i40iw_cm.c memcpy(&event->local_addr, &cm_id->m_local_addr, event 237 drivers/infiniband/hw/i40iw/i40iw_cm.c sizeof(event->local_addr)); event 238 drivers/infiniband/hw/i40iw/i40iw_cm.c memcpy(&event->remote_addr, &cm_id->m_remote_addr, event 239 drivers/infiniband/hw/i40iw/i40iw_cm.c sizeof(event->remote_addr)); event 241 drivers/infiniband/hw/i40iw/i40iw_cm.c event->private_data = (void *)cm_node->pdata_buf; event 242 drivers/infiniband/hw/i40iw/i40iw_cm.c event->private_data_len = (u8)cm_node->pdata.size; event 243 drivers/infiniband/hw/i40iw/i40iw_cm.c event->ird = cm_node->ird_size; event 244 drivers/infiniband/hw/i40iw/i40iw_cm.c event->ord = cm_node->ord_size; event 260 drivers/infiniband/hw/i40iw/i40iw_cm.c struct iw_cm_event event; event 262 drivers/infiniband/hw/i40iw/i40iw_cm.c memset(&event, 0, sizeof(event)); event 263 drivers/infiniband/hw/i40iw/i40iw_cm.c event.event = type; event 264 drivers/infiniband/hw/i40iw/i40iw_cm.c event.status = status; event 268 drivers/infiniband/hw/i40iw/i40iw_cm.c i40iw_fill_sockaddr4(cm_node, &event); event 270 drivers/infiniband/hw/i40iw/i40iw_cm.c i40iw_fill_sockaddr6(cm_node, &event); event 271 drivers/infiniband/hw/i40iw/i40iw_cm.c event.provider_data = (void *)cm_node; event 272 drivers/infiniband/hw/i40iw/i40iw_cm.c event.private_data = (void *)cm_node->pdata_buf; event 273 drivers/infiniband/hw/i40iw/i40iw_cm.c event.private_data_len = (u8)cm_node->pdata.size; event 274 drivers/infiniband/hw/i40iw/i40iw_cm.c event.ird = cm_node->ird_size; event 277 drivers/infiniband/hw/i40iw/i40iw_cm.c i40iw_get_cmevent_info(cm_node, cm_id, &event); event 280 drivers/infiniband/hw/i40iw/i40iw_cm.c event.ird = cm_node->ird_size; event 281 drivers/infiniband/hw/i40iw/i40iw_cm.c event.ord = cm_node->ord_size; event 291 drivers/infiniband/hw/i40iw/i40iw_cm.c return cm_id->event_handler(cm_id, &event); event 302 drivers/infiniband/hw/i40iw/i40iw_cm.c struct i40iw_cm_event *event; event 307 drivers/infiniband/hw/i40iw/i40iw_cm.c event = kzalloc(sizeof(*event), GFP_ATOMIC); event 309 drivers/infiniband/hw/i40iw/i40iw_cm.c if (!event) event 312 drivers/infiniband/hw/i40iw/i40iw_cm.c event->type = type; event 313 drivers/infiniband/hw/i40iw/i40iw_cm.c event->cm_node = cm_node; event 314 drivers/infiniband/hw/i40iw/i40iw_cm.c memcpy(event->cm_info.rem_addr, cm_node->rem_addr, sizeof(event->cm_info.rem_addr)); event 315 drivers/infiniband/hw/i40iw/i40iw_cm.c memcpy(event->cm_info.loc_addr, cm_node->loc_addr, sizeof(event->cm_info.loc_addr)); event 316 drivers/infiniband/hw/i40iw/i40iw_cm.c event->cm_info.rem_port = cm_node->rem_port; event 317 drivers/infiniband/hw/i40iw/i40iw_cm.c event->cm_info.loc_port = cm_node->loc_port; event 318 drivers/infiniband/hw/i40iw/i40iw_cm.c event->cm_info.cm_id = cm_node->cm_id; event 324 drivers/infiniband/hw/i40iw/i40iw_cm.c event, event 326 drivers/infiniband/hw/i40iw/i40iw_cm.c event->cm_info.loc_addr, event 327 drivers/infiniband/hw/i40iw/i40iw_cm.c event->cm_info.rem_addr); event 329 drivers/infiniband/hw/i40iw/i40iw_cm.c i40iw_cm_post_event(event); event 330 drivers/infiniband/hw/i40iw/i40iw_cm.c return event; event 607 drivers/infiniband/hw/i40iw/i40iw_cm.c static void i40iw_event_connect_error(struct i40iw_cm_event *event) event 612 drivers/infiniband/hw/i40iw/i40iw_cm.c cm_id = event->cm_node->cm_id; event 623 drivers/infiniband/hw/i40iw/i40iw_cm.c i40iw_send_cm_event(event->cm_node, cm_id, event 627 drivers/infiniband/hw/i40iw/i40iw_cm.c i40iw_rem_ref_cm_node(event->cm_node); event 1890 drivers/infiniband/hw/i40iw/i40iw_cm.c struct i40iw_cm_event event; event 1892 drivers/infiniband/hw/i40iw/i40iw_cm.c event.cm_node = loopback; event 1893 drivers/infiniband/hw/i40iw/i40iw_cm.c memcpy(event.cm_info.rem_addr, event 1894 drivers/infiniband/hw/i40iw/i40iw_cm.c loopback->rem_addr, sizeof(event.cm_info.rem_addr)); event 1895 drivers/infiniband/hw/i40iw/i40iw_cm.c memcpy(event.cm_info.loc_addr, event 1896 drivers/infiniband/hw/i40iw/i40iw_cm.c loopback->loc_addr, sizeof(event.cm_info.loc_addr)); event 1897 drivers/infiniband/hw/i40iw/i40iw_cm.c event.cm_info.rem_port = loopback->rem_port; event 1898 drivers/infiniband/hw/i40iw/i40iw_cm.c event.cm_info.loc_port = loopback->loc_port; event 1899 drivers/infiniband/hw/i40iw/i40iw_cm.c event.cm_info.cm_id = loopback->cm_id; event 1900 drivers/infiniband/hw/i40iw/i40iw_cm.c event.cm_info.ipv4 = loopback->ipv4; event 1903 drivers/infiniband/hw/i40iw/i40iw_cm.c i40iw_event_connect_error(&event); event 3581 drivers/infiniband/hw/i40iw/i40iw_cm.c ibevent.event = (qp->eventtype == TERM_EVENT_QP_FATAL) ? event 4078 drivers/infiniband/hw/i40iw/i40iw_cm.c static void i40iw_cm_event_connected(struct i40iw_cm_event *event) event 4091 drivers/infiniband/hw/i40iw/i40iw_cm.c cm_node = event->cm_node; event 4129 drivers/infiniband/hw/i40iw/i40iw_cm.c i40iw_send_cm_event(event->cm_node, event 4134 drivers/infiniband/hw/i40iw/i40iw_cm.c i40iw_rem_ref_cm_node(event->cm_node); event 4141 drivers/infiniband/hw/i40iw/i40iw_cm.c static void i40iw_cm_event_reset(struct i40iw_cm_event *event) event 4143 drivers/infiniband/hw/i40iw/i40iw_cm.c struct i40iw_cm_node *cm_node = event->cm_node; event 4157 drivers/infiniband/hw/i40iw/i40iw_cm.c event->cm_node, cm_id); event 4170 drivers/infiniband/hw/i40iw/i40iw_cm.c struct i40iw_cm_event *event = container_of(work, event 4175 drivers/infiniband/hw/i40iw/i40iw_cm.c if (!event || !event->cm_node || !event->cm_node->cm_core) event 4178 drivers/infiniband/hw/i40iw/i40iw_cm.c cm_node = event->cm_node; event 4180 drivers/infiniband/hw/i40iw/i40iw_cm.c switch (event->type) { event 4188 drivers/infiniband/hw/i40iw/i40iw_cm.c i40iw_cm_event_reset(event); event 4191 drivers/infiniband/hw/i40iw/i40iw_cm.c if (!event->cm_node->cm_id || event 4192 drivers/infiniband/hw/i40iw/i40iw_cm.c (event->cm_node->state != I40IW_CM_STATE_OFFLOADED)) event 4194 drivers/infiniband/hw/i40iw/i40iw_cm.c i40iw_cm_event_connected(event); event 4197 drivers/infiniband/hw/i40iw/i40iw_cm.c if (!event->cm_node->cm_id || event 4206 drivers/infiniband/hw/i40iw/i40iw_cm.c if (!event->cm_node->cm_id || event 4207 drivers/infiniband/hw/i40iw/i40iw_cm.c (event->cm_node->state == I40IW_CM_STATE_OFFLOADED)) event 4209 drivers/infiniband/hw/i40iw/i40iw_cm.c i40iw_event_connect_error(event); event 4212 drivers/infiniband/hw/i40iw/i40iw_cm.c i40iw_pr_err("event type = %d\n", event->type); event 4216 drivers/infiniband/hw/i40iw/i40iw_cm.c event->cm_info.cm_id->rem_ref(event->cm_info.cm_id); event 4217 drivers/infiniband/hw/i40iw/i40iw_cm.c i40iw_rem_ref_cm_node(event->cm_node); event 4218 drivers/infiniband/hw/i40iw/i40iw_cm.c kfree(event); event 4225 drivers/infiniband/hw/i40iw/i40iw_cm.c static void i40iw_cm_post_event(struct i40iw_cm_event *event) event 4227 drivers/infiniband/hw/i40iw/i40iw_cm.c atomic_inc(&event->cm_node->ref_count); event 4228 drivers/infiniband/hw/i40iw/i40iw_cm.c event->cm_info.cm_id->add_ref(event->cm_info.cm_id); event 4229 drivers/infiniband/hw/i40iw/i40iw_cm.c INIT_WORK(&event->event_work, i40iw_cm_event_handler); event 4231 drivers/infiniband/hw/i40iw/i40iw_cm.c queue_work(event->cm_node->cm_core->event_wq, &event->event_work); event 385 drivers/infiniband/hw/i40iw/i40iw_hw.c ibevent.event = IB_EVENT_CQ_ERR; event 146 drivers/infiniband/hw/i40iw/i40iw_utils.c unsigned long event, event 190 drivers/infiniband/hw/i40iw/i40iw_utils.c switch (event) { event 223 drivers/infiniband/hw/i40iw/i40iw_utils.c unsigned long event, event 247 drivers/infiniband/hw/i40iw/i40iw_utils.c switch (event) { event 274 drivers/infiniband/hw/i40iw/i40iw_utils.c int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void *ptr) event 282 drivers/infiniband/hw/i40iw/i40iw_utils.c switch (event) { event 320 drivers/infiniband/hw/i40iw/i40iw_utils.c unsigned long event, event 344 drivers/infiniband/hw/i40iw/i40iw_utils.c switch (event) { event 2741 drivers/infiniband/hw/i40iw/i40iw_verbs.c struct ib_event event; event 2743 drivers/infiniband/hw/i40iw/i40iw_verbs.c event.device = &iwibdev->ibdev; event 2744 drivers/infiniband/hw/i40iw/i40iw_verbs.c event.element.port_num = 1; event 2745 drivers/infiniband/hw/i40iw/i40iw_verbs.c event.event = iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; event 2746 drivers/infiniband/hw/i40iw/i40iw_verbs.c ib_dispatch_event(&event); event 51 drivers/infiniband/hw/mlx4/cq.c struct ib_event event; event 62 drivers/infiniband/hw/mlx4/cq.c event.device = ibcq->device; event 63 drivers/infiniband/hw/mlx4/cq.c event.event = IB_EVENT_CQ_ERR; event 64 drivers/infiniband/hw/mlx4/cq.c event.element.cq = ibcq; event 65 drivers/infiniband/hw/mlx4/cq.c ibcq->event_handler(&event, ibcq->cq_context); event 260 drivers/infiniband/hw/mlx4/cq.c cq->mcq.event = mlx4_ib_cq_event; event 63 drivers/infiniband/hw/mlx4/mad.c #define GET_BLK_PTR_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.block_ptr) event 64 drivers/infiniband/hw/mlx4/mad.c #define GET_MASK_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.tbl_entries_mask) event 1190 drivers/infiniband/hw/mlx4/mad.c u8 port = eqe->event.port_mgmt_change.port; event 1197 drivers/infiniband/hw/mlx4/mad.c changed_attr = be32_to_cpu(eqe->event.port_mgmt_change.params.port_info.changed_attr); event 1202 drivers/infiniband/hw/mlx4/mad.c u16 lid = be16_to_cpu(eqe->event.port_mgmt_change.params.port_info.mstr_sm_lid); event 1203 drivers/infiniband/hw/mlx4/mad.c u8 sl = eqe->event.port_mgmt_change.params.port_info.mstr_sm_sl & 0xf; event 1217 drivers/infiniband/hw/mlx4/mad.c if (!eqe->event.port_mgmt_change.params.port_info.gid_prefix) event 1221 drivers/infiniband/hw/mlx4/mad.c eqe->event.port_mgmt_change.params.port_info.gid_prefix; event 1272 drivers/infiniband/hw/mlx4/mad.c eqe->event.port_mgmt_change.params.sl2vl_tbl_change_info.sl2vl_table[jj]; event 1290 drivers/infiniband/hw/mlx4/mad.c struct ib_event event; event 1292 drivers/infiniband/hw/mlx4/mad.c event.device = &dev->ib_dev; event 1293 drivers/infiniband/hw/mlx4/mad.c event.element.port_num = port_num; event 1294 drivers/infiniband/hw/mlx4/mad.c event.event = type; event 1296 drivers/infiniband/hw/mlx4/mad.c ib_dispatch_event(&event); event 1787 drivers/infiniband/hw/mlx4/mad.c static void pv_qp_event_handler(struct ib_event *event, void *qp_context) event 1793 drivers/infiniband/hw/mlx4/mad.c event->event, sqp->port); event 2331 drivers/infiniband/hw/mlx4/main.c unsigned long event) event 2349 drivers/infiniband/hw/mlx4/main.c (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER || event 2350 drivers/infiniband/hw/mlx4/main.c event == NETDEV_UP || event == NETDEV_CHANGE)) event 2354 drivers/infiniband/hw/mlx4/main.c (event == NETDEV_UP || event == NETDEV_DOWN)) { event 2362 drivers/infiniband/hw/mlx4/main.c if (event == NETDEV_UP && event 2366 drivers/infiniband/hw/mlx4/main.c if (event == NETDEV_DOWN && event 2374 drivers/infiniband/hw/mlx4/main.c ibev.event = event == NETDEV_UP ? IB_EVENT_PORT_ACTIVE : event 2387 drivers/infiniband/hw/mlx4/main.c unsigned long event, void *ptr) event 2396 drivers/infiniband/hw/mlx4/main.c mlx4_ib_scan_netdevs(ibdev, dev, event); event 3194 drivers/infiniband/hw/mlx4/main.c ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ? event 3240 drivers/infiniband/hw/mlx4/main.c enum mlx4_dev_event event, unsigned long param) event 3249 drivers/infiniband/hw/mlx4/main.c ((event == MLX4_DEV_EVENT_PORT_UP) || event 3250 drivers/infiniband/hw/mlx4/main.c (event == MLX4_DEV_EVENT_PORT_DOWN))) { event 3260 drivers/infiniband/hw/mlx4/main.c if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE) event 3265 drivers/infiniband/hw/mlx4/main.c switch (event) { event 3278 drivers/infiniband/hw/mlx4/main.c ibev.event = IB_EVENT_PORT_ACTIVE; event 3284 drivers/infiniband/hw/mlx4/main.c ibev.event = IB_EVENT_PORT_ERR; event 3289 drivers/infiniband/hw/mlx4/main.c ibev.event = IB_EVENT_DEVICE_FATAL; event 3353 drivers/infiniband/hw/mlx4/main.c .event = mlx4_ib_event, event 231 drivers/infiniband/hw/mlx4/qp.c struct ib_event event; event 238 drivers/infiniband/hw/mlx4/qp.c event.device = ibqp->device; event 239 drivers/infiniband/hw/mlx4/qp.c event.element.qp = ibqp; event 242 drivers/infiniband/hw/mlx4/qp.c event.event = IB_EVENT_PATH_MIG; event 245 drivers/infiniband/hw/mlx4/qp.c event.event = IB_EVENT_COMM_EST; event 248 drivers/infiniband/hw/mlx4/qp.c event.event = IB_EVENT_SQ_DRAINED; event 251 drivers/infiniband/hw/mlx4/qp.c event.event = IB_EVENT_QP_LAST_WQE_REACHED; event 254 drivers/infiniband/hw/mlx4/qp.c event.event = IB_EVENT_QP_FATAL; event 257 drivers/infiniband/hw/mlx4/qp.c event.event = IB_EVENT_PATH_MIG_ERR; event 260 drivers/infiniband/hw/mlx4/qp.c event.event = IB_EVENT_QP_REQ_ERR; event 263 drivers/infiniband/hw/mlx4/qp.c event.event = IB_EVENT_QP_ACCESS_ERR; event 271 drivers/infiniband/hw/mlx4/qp.c ibqp->event_handler(&event, ibqp->qp_context); event 956 drivers/infiniband/hw/mlx4/qp.c qp->mqp.event = mlx4_ib_wq_event; event 1240 drivers/infiniband/hw/mlx4/qp.c qp->mqp.event = mlx4_ib_qp_event; event 49 drivers/infiniband/hw/mlx4/srq.c struct ib_event event; event 53 drivers/infiniband/hw/mlx4/srq.c event.device = ibsrq->device; event 54 drivers/infiniband/hw/mlx4/srq.c event.element.srq = ibsrq; event 57 drivers/infiniband/hw/mlx4/srq.c event.event = IB_EVENT_SRQ_LIMIT_REACHED; event 60 drivers/infiniband/hw/mlx4/srq.c event.event = IB_EVENT_SRQ_ERR; event 68 drivers/infiniband/hw/mlx4/srq.c ibsrq->event_handler(&event, ibsrq->srq_context); event 184 drivers/infiniband/hw/mlx4/srq.c srq->msrq.event = mlx4_ib_srq_event; event 52 drivers/infiniband/hw/mlx5/cq.c struct ib_event event; event 61 drivers/infiniband/hw/mlx5/cq.c event.device = &dev->ib_dev; event 62 drivers/infiniband/hw/mlx5/cq.c event.event = IB_EVENT_CQ_ERR; event 63 drivers/infiniband/hw/mlx5/cq.c event.element.cq = ibcq; event 64 drivers/infiniband/hw/mlx5/cq.c ibcq->event_handler(&event, ibcq->cq_context); event 991 drivers/infiniband/hw/mlx5/cq.c cq->mcq.event = mlx5_ib_cq_event; event 1304 drivers/infiniband/hw/mlx5/devx.c struct devx_event *event; event 1318 drivers/infiniband/hw/mlx5/devx.c event = xa_load(&dev->devx_event_table.event_xa, event 1320 drivers/infiniband/hw/mlx5/devx.c WARN_ON(!event); event 1322 drivers/infiniband/hw/mlx5/devx.c xa_val_level2 = xa_load(&event->object_ids, sub->xa_key_level2); event 1324 drivers/infiniband/hw/mlx5/devx.c xa_erase(&event->object_ids, event 1378 drivers/infiniband/hw/mlx5/devx.c struct devx_event *event; event 1384 drivers/infiniband/hw/mlx5/devx.c event = xa_load(&table->event_xa, MLX5_EVENT_TYPE_COMP); event 1385 drivers/infiniband/hw/mlx5/devx.c if (!event) event 1388 drivers/infiniband/hw/mlx5/devx.c obj_event = xa_load(&event->object_ids, obj_id); event 1778 drivers/infiniband/hw/mlx5/devx.c struct devx_event *event; event 1785 drivers/infiniband/hw/mlx5/devx.c event = xa_load(&devx_event_table->event_xa, key_level1); event 1786 drivers/infiniband/hw/mlx5/devx.c WARN_ON(!event); event 1788 drivers/infiniband/hw/mlx5/devx.c xa_val_level2 = xa_load(&event->object_ids, event 1791 drivers/infiniband/hw/mlx5/devx.c xa_erase(&event->object_ids, event 1804 drivers/infiniband/hw/mlx5/devx.c struct devx_event *event; event 1807 drivers/infiniband/hw/mlx5/devx.c event = xa_load(&devx_event_table->event_xa, key_level1); event 1808 drivers/infiniband/hw/mlx5/devx.c if (!event) { event 1809 drivers/infiniband/hw/mlx5/devx.c event = kzalloc(sizeof(*event), GFP_KERNEL); event 1810 drivers/infiniband/hw/mlx5/devx.c if (!event) event 1813 drivers/infiniband/hw/mlx5/devx.c INIT_LIST_HEAD(&event->unaffiliated_list); event 1814 drivers/infiniband/hw/mlx5/devx.c xa_init(&event->object_ids); event 1818 drivers/infiniband/hw/mlx5/devx.c event, event 1821 drivers/infiniband/hw/mlx5/devx.c kfree(event); event 1829 drivers/infiniband/hw/mlx5/devx.c obj_event = xa_load(&event->object_ids, key_level2); event 1836 drivers/infiniband/hw/mlx5/devx.c err = xa_insert(&event->object_ids, event 2047 drivers/infiniband/hw/mlx5/devx.c struct devx_event *event; event 2057 drivers/infiniband/hw/mlx5/devx.c event = xa_load(&devx_event_table->event_xa, event 2059 drivers/infiniband/hw/mlx5/devx.c WARN_ON(!event); event 2063 drivers/infiniband/hw/mlx5/devx.c &event->unaffiliated_list); event 2067 drivers/infiniband/hw/mlx5/devx.c obj_event = xa_load(&event->object_ids, obj_id); event 2370 drivers/infiniband/hw/mlx5/devx.c struct devx_event *event; event 2389 drivers/infiniband/hw/mlx5/devx.c event = xa_load(&table->event_xa, event_type | (obj_type << 16)); event 2390 drivers/infiniband/hw/mlx5/devx.c if (!event) { event 2396 drivers/infiniband/hw/mlx5/devx.c dispatch_event_fd(&event->unaffiliated_list, data); event 2402 drivers/infiniband/hw/mlx5/devx.c obj_event = xa_load(&event->object_ids, obj_id); event 2428 drivers/infiniband/hw/mlx5/devx.c struct devx_event *event; event 2435 drivers/infiniband/hw/mlx5/devx.c event = entry; event 2436 drivers/infiniband/hw/mlx5/devx.c list_for_each_entry_safe(sub, tmp, &event->unaffiliated_list, event 2450 drivers/infiniband/hw/mlx5/devx.c struct devx_async_data *event; event 2476 drivers/infiniband/hw/mlx5/devx.c event = list_entry(ev_queue->event_list.next, event 2478 drivers/infiniband/hw/mlx5/devx.c eventsz = event->cmd_out_len + event 2489 drivers/infiniband/hw/mlx5/devx.c if (copy_to_user(buf, &event->hdr, eventsz)) event 2494 drivers/infiniband/hw/mlx5/devx.c atomic_sub(event->cmd_out_len, &ev_queue->bytes_in_use); event 2495 drivers/infiniband/hw/mlx5/devx.c kvfree(event); event 2548 drivers/infiniband/hw/mlx5/devx.c struct devx_async_event_data *uninitialized_var(event); event 2595 drivers/infiniband/hw/mlx5/devx.c event = list_first_entry(&ev_file->event_list, event 2599 drivers/infiniband/hw/mlx5/devx.c event_data = &event->hdr; event 2610 drivers/infiniband/hw/mlx5/devx.c list_del(&event->list); event 2621 drivers/infiniband/hw/mlx5/devx.c kfree(event); event 92 drivers/infiniband/hw/mlx5/main.c unsigned int event; event 189 drivers/infiniband/hw/mlx5/main.c unsigned long event, void *ptr) event 202 drivers/infiniband/hw/mlx5/main.c switch (event) { event 251 drivers/infiniband/hw/mlx5/main.c ibev.event = IB_EVENT_PORT_ERR; event 253 drivers/infiniband/hw/mlx5/main.c ibev.event = IB_EVENT_PORT_ACTIVE; event 4580 drivers/infiniband/hw/mlx5/main.c ibev->event = (eqe->sub_type == MLX5_PORT_CHANGE_SUBTYPE_ACTIVE) ? event 4585 drivers/infiniband/hw/mlx5/main.c ibev->event = IB_EVENT_LID_CHANGE; event 4589 drivers/infiniband/hw/mlx5/main.c ibev->event = IB_EVENT_PKEY_CHANGE; event 4594 drivers/infiniband/hw/mlx5/main.c ibev->event = IB_EVENT_GID_CHANGE; event 4598 drivers/infiniband/hw/mlx5/main.c ibev->event = IB_EVENT_CLIENT_REREGISTER; event 4623 drivers/infiniband/hw/mlx5/main.c switch (work->event) { event 4625 drivers/infiniband/hw/mlx5/main.c ibev.event = IB_EVENT_DEVICE_FATAL; event 4658 drivers/infiniband/hw/mlx5/main.c unsigned long event, void *param) event 4670 drivers/infiniband/hw/mlx5/main.c work->event = event; event 4678 drivers/infiniband/hw/mlx5/main.c unsigned long event, void *param) event 4690 drivers/infiniband/hw/mlx5/main.c work->event = event; event 286 drivers/infiniband/hw/mlx5/qp.c struct ib_event event; event 294 drivers/infiniband/hw/mlx5/qp.c event.device = ibqp->device; event 295 drivers/infiniband/hw/mlx5/qp.c event.element.qp = ibqp; event 298 drivers/infiniband/hw/mlx5/qp.c event.event = IB_EVENT_PATH_MIG; event 301 drivers/infiniband/hw/mlx5/qp.c event.event = IB_EVENT_COMM_EST; event 304 drivers/infiniband/hw/mlx5/qp.c event.event = IB_EVENT_SQ_DRAINED; event 307 drivers/infiniband/hw/mlx5/qp.c event.event = IB_EVENT_QP_LAST_WQE_REACHED; event 310 drivers/infiniband/hw/mlx5/qp.c event.event = IB_EVENT_QP_FATAL; event 313 drivers/infiniband/hw/mlx5/qp.c event.event = IB_EVENT_PATH_MIG_ERR; event 316 drivers/infiniband/hw/mlx5/qp.c event.event = IB_EVENT_QP_REQ_ERR; event 319 drivers/infiniband/hw/mlx5/qp.c event.event = IB_EVENT_QP_ACCESS_ERR; event 326 drivers/infiniband/hw/mlx5/qp.c ibqp->event_handler(&event, ibqp->qp_context); event 1483 drivers/infiniband/hw/mlx5/qp.c sq->base.mqp.event = mlx5_ib_qp_event; event 2298 drivers/infiniband/hw/mlx5/qp.c base->mqp.event = mlx5_ib_qp_event; event 5879 drivers/infiniband/hw/mlx5/qp.c struct ib_event event; event 5882 drivers/infiniband/hw/mlx5/qp.c event.device = rwq->ibwq.device; event 5883 drivers/infiniband/hw/mlx5/qp.c event.element.wq = &rwq->ibwq; event 5886 drivers/infiniband/hw/mlx5/qp.c event.event = IB_EVENT_WQ_FATAL; event 5893 drivers/infiniband/hw/mlx5/qp.c rwq->ibwq.event_handler(&event, rwq->ibwq.wq_context); event 6170 drivers/infiniband/hw/mlx5/qp.c rwq->core_qp.event = mlx5_ib_wq_event; event 21 drivers/infiniband/hw/mlx5/srq.c struct ib_event event; event 25 drivers/infiniband/hw/mlx5/srq.c event.device = ibsrq->device; event 26 drivers/infiniband/hw/mlx5/srq.c event.element.srq = ibsrq; event 29 drivers/infiniband/hw/mlx5/srq.c event.event = IB_EVENT_SRQ_LIMIT_REACHED; event 32 drivers/infiniband/hw/mlx5/srq.c event.event = IB_EVENT_SRQ_ERR; event 40 drivers/infiniband/hw/mlx5/srq.c ibsrq->event_handler(&event, ibsrq->srq_context); event 310 drivers/infiniband/hw/mlx5/srq.c srq->msrq.event = mlx5_ib_srq_event; event 47 drivers/infiniband/hw/mlx5/srq.h void (*event)(struct mlx5_core_srq *srq, enum mlx5_event e); event 684 drivers/infiniband/hw/mlx5/srq_cmd.c srq->event(srq, eqe->type); event 89 drivers/infiniband/hw/mthca/mthca_catas.c struct ib_event event; event 94 drivers/infiniband/hw/mthca/mthca_catas.c event.device = &dev->ib_dev; event 95 drivers/infiniband/hw/mthca/mthca_catas.c event.event = IB_EVENT_DEVICE_FATAL; event 96 drivers/infiniband/hw/mthca/mthca_catas.c event.element.port_num = 0; event 99 drivers/infiniband/hw/mthca/mthca_catas.c ib_dispatch_event(&event); event 237 drivers/infiniband/hw/mthca/mthca_cmd.c int event) event 239 drivers/infiniband/hw/mthca/mthca_cmd.c if (event) { event 268 drivers/infiniband/hw/mthca/mthca_cmd.c (event ? (1 << HCA_E_BIT) : 0) | event 282 drivers/infiniband/hw/mthca/mthca_cmd.c int event) event 288 drivers/infiniband/hw/mthca/mthca_cmd.c if (event && dev->cmd.flags & MTHCA_CMD_POST_DOORBELLS && fw_cmd_doorbell) event 293 drivers/infiniband/hw/mthca/mthca_cmd.c op_modifier, op, token, event); event 237 drivers/infiniband/hw/mthca/mthca_cq.c struct ib_event event; event 252 drivers/infiniband/hw/mthca/mthca_cq.c event.device = &dev->ib_dev; event 253 drivers/infiniband/hw/mthca/mthca_cq.c event.event = event_type; event 254 drivers/infiniband/hw/mthca/mthca_cq.c event.element.cq = &cq->ibcq; event 256 drivers/infiniband/hw/mthca/mthca_cq.c cq->ibcq.event_handler(&event, cq->ibcq.cq_context); event 158 drivers/infiniband/hw/mthca/mthca_eq.c } event; event 254 drivers/infiniband/hw/mthca/mthca_eq.c record.event = active ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; event 276 drivers/infiniband/hw/mthca/mthca_eq.c disarm_cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff; event 282 drivers/infiniband/hw/mthca/mthca_eq.c mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, event 287 drivers/infiniband/hw/mthca/mthca_eq.c mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, event 292 drivers/infiniband/hw/mthca/mthca_eq.c mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, event 297 drivers/infiniband/hw/mthca/mthca_eq.c mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, event 302 drivers/infiniband/hw/mthca/mthca_eq.c mthca_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff, event 307 drivers/infiniband/hw/mthca/mthca_eq.c mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, event 312 drivers/infiniband/hw/mthca/mthca_eq.c mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, event 317 drivers/infiniband/hw/mthca/mthca_eq.c mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, event 322 drivers/infiniband/hw/mthca/mthca_eq.c mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, event 328 drivers/infiniband/hw/mthca/mthca_eq.c be16_to_cpu(eqe->event.cmd.token), event 329 drivers/infiniband/hw/mthca/mthca_eq.c eqe->event.cmd.status, event 330 drivers/infiniband/hw/mthca/mthca_eq.c be64_to_cpu(eqe->event.cmd.out_param)); event 335 drivers/infiniband/hw/mthca/mthca_eq.c (be32_to_cpu(eqe->event.port_change.port) >> 28) & 3, event 341 drivers/infiniband/hw/mthca/mthca_eq.c eqe->event.cq_err.syndrome == 1 ? event 343 drivers/infiniband/hw/mthca/mthca_eq.c be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff); event 344 drivers/infiniband/hw/mthca/mthca_eq.c mthca_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn), event 112 drivers/infiniband/hw/mthca/mthca_mad.c struct ib_event event; event 127 drivers/infiniband/hw/mthca/mthca_mad.c event.device = ibdev; event 128 drivers/infiniband/hw/mthca/mthca_mad.c event.element.port_num = port_num; event 131 drivers/infiniband/hw/mthca/mthca_mad.c event.event = IB_EVENT_CLIENT_REREGISTER; event 132 drivers/infiniband/hw/mthca/mthca_mad.c ib_dispatch_event(&event); event 136 drivers/infiniband/hw/mthca/mthca_mad.c event.event = IB_EVENT_LID_CHANGE; event 137 drivers/infiniband/hw/mthca/mthca_mad.c ib_dispatch_event(&event); event 142 drivers/infiniband/hw/mthca/mthca_mad.c event.device = ibdev; event 143 drivers/infiniband/hw/mthca/mthca_mad.c event.event = IB_EVENT_PKEY_CHANGE; event 144 drivers/infiniband/hw/mthca/mthca_mad.c event.element.port_num = port_num; event 145 drivers/infiniband/hw/mthca/mthca_mad.c ib_dispatch_event(&event); event 242 drivers/infiniband/hw/mthca/mthca_qp.c struct ib_event event; event 259 drivers/infiniband/hw/mthca/mthca_qp.c event.device = &dev->ib_dev; event 260 drivers/infiniband/hw/mthca/mthca_qp.c event.event = event_type; event 261 drivers/infiniband/hw/mthca/mthca_qp.c event.element.qp = &qp->ibqp; event 263 drivers/infiniband/hw/mthca/mthca_qp.c qp->ibqp.event_handler(&event, qp->ibqp.qp_context); event 434 drivers/infiniband/hw/mthca/mthca_srq.c struct ib_event event; event 450 drivers/infiniband/hw/mthca/mthca_srq.c event.device = &dev->ib_dev; event 451 drivers/infiniband/hw/mthca/mthca_srq.c event.event = event_type; event 452 drivers/infiniband/hw/mthca/mthca_srq.c event.element.srq = &srq->ibsrq; event 453 drivers/infiniband/hw/mthca/mthca_srq.c srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); event 722 drivers/infiniband/hw/ocrdma/ocrdma_hw.c ib_evt.event = IB_EVENT_CQ_ERR; event 728 drivers/infiniband/hw/ocrdma/ocrdma_hw.c ib_evt.event = IB_EVENT_CQ_ERR; event 734 drivers/infiniband/hw/ocrdma/ocrdma_hw.c ib_evt.event = IB_EVENT_QP_FATAL; event 739 drivers/infiniband/hw/ocrdma/ocrdma_hw.c ib_evt.event = IB_EVENT_QP_ACCESS_ERR; event 743 drivers/infiniband/hw/ocrdma/ocrdma_hw.c ib_evt.event = IB_EVENT_COMM_EST; event 747 drivers/infiniband/hw/ocrdma/ocrdma_hw.c ib_evt.event = IB_EVENT_SQ_DRAINED; event 751 drivers/infiniband/hw/ocrdma/ocrdma_hw.c ib_evt.event = IB_EVENT_DEVICE_FATAL; event 757 drivers/infiniband/hw/ocrdma/ocrdma_hw.c ib_evt.event = IB_EVENT_SRQ_ERR; event 763 drivers/infiniband/hw/ocrdma/ocrdma_hw.c ib_evt.event = IB_EVENT_SRQ_LIMIT_REACHED; event 769 drivers/infiniband/hw/ocrdma/ocrdma_hw.c ib_evt.event = IB_EVENT_QP_LAST_WQE_REACHED; event 390 drivers/infiniband/hw/ocrdma/ocrdma_main.c port_event.event = IB_EVENT_PORT_ACTIVE; event 401 drivers/infiniband/hw/ocrdma/ocrdma_main.c err_event.event = IB_EVENT_PORT_ERR; event 418 drivers/infiniband/hw/ocrdma/ocrdma_main.c static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event) event 420 drivers/infiniband/hw/ocrdma/ocrdma_main.c switch (event) { event 63 drivers/infiniband/hw/qedr/main.c ibev.event = type; event 668 drivers/infiniband/hw/qedr/main.c struct ib_event event; event 681 drivers/infiniband/hw/qedr/main.c event.event = IB_EVENT_CQ_ERR; event 685 drivers/infiniband/hw/qedr/main.c event.event = IB_EVENT_SQ_DRAINED; event 689 drivers/infiniband/hw/qedr/main.c event.event = IB_EVENT_QP_FATAL; event 693 drivers/infiniband/hw/qedr/main.c event.event = IB_EVENT_QP_REQ_ERR; event 697 drivers/infiniband/hw/qedr/main.c event.event = IB_EVENT_QP_ACCESS_ERR; event 701 drivers/infiniband/hw/qedr/main.c event.event = IB_EVENT_SRQ_LIMIT_REACHED; event 705 drivers/infiniband/hw/qedr/main.c event.event = IB_EVENT_SRQ_ERR; event 715 drivers/infiniband/hw/qedr/main.c event.event = IB_EVENT_SRQ_LIMIT_REACHED; event 719 drivers/infiniband/hw/qedr/main.c event.event = IB_EVENT_SRQ_ERR; event 733 drivers/infiniband/hw/qedr/main.c event.device = ibcq->device; event 734 drivers/infiniband/hw/qedr/main.c event.element.cq = ibcq; event 735 drivers/infiniband/hw/qedr/main.c ibcq->event_handler(&event, ibcq->cq_context); event 749 drivers/infiniband/hw/qedr/main.c event.device = ibqp->device; event 750 drivers/infiniband/hw/qedr/main.c event.element.qp = ibqp; event 751 drivers/infiniband/hw/qedr/main.c ibqp->event_handler(&event, ibqp->qp_context); event 767 drivers/infiniband/hw/qedr/main.c event.device = ibsrq->device; event 768 drivers/infiniband/hw/qedr/main.c event.element.srq = ibsrq; event 769 drivers/infiniband/hw/qedr/main.c ibsrq->event_handler(&event, event 1014 drivers/infiniband/hw/qedr/main.c static void qedr_notify(struct qedr_dev *dev, enum qede_rdma_event event) event 1016 drivers/infiniband/hw/qedr/main.c switch (event) { event 44 drivers/infiniband/hw/qedr/qedr_iw_cm.c struct iw_cm_event *event) event 46 drivers/infiniband/hw/qedr/qedr_iw_cm.c struct sockaddr_in *laddr = (struct sockaddr_in *)&event->local_addr; event 47 drivers/infiniband/hw/qedr/qedr_iw_cm.c struct sockaddr_in *raddr = (struct sockaddr_in *)&event->remote_addr; event 61 drivers/infiniband/hw/qedr/qedr_iw_cm.c struct iw_cm_event *event) event 63 drivers/infiniband/hw/qedr/qedr_iw_cm.c struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)&event->local_addr; event 65 drivers/infiniband/hw/qedr/qedr_iw_cm.c (struct sockaddr_in6 *)&event->remote_addr; event 108 drivers/infiniband/hw/qedr/qedr_iw_cm.c struct iw_cm_event event; event 119 drivers/infiniband/hw/qedr/qedr_iw_cm.c memset(&event, 0, sizeof(event)); event 120 drivers/infiniband/hw/qedr/qedr_iw_cm.c event.event = IW_CM_EVENT_CONNECT_REQUEST; event 121 drivers/infiniband/hw/qedr/qedr_iw_cm.c event.status = params->status; event 125 drivers/infiniband/hw/qedr/qedr_iw_cm.c qedr_fill_sockaddr4(params->cm_info, &event); event 127 drivers/infiniband/hw/qedr/qedr_iw_cm.c qedr_fill_sockaddr6(params->cm_info, &event); event 129 drivers/infiniband/hw/qedr/qedr_iw_cm.c event.provider_data = (void *)ep; event 130 drivers/infiniband/hw/qedr/qedr_iw_cm.c event.private_data = (void *)params->cm_info->private_data; event 131 drivers/infiniband/hw/qedr/qedr_iw_cm.c event.private_data_len = (u8)params->cm_info->private_data_len; event 132 drivers/infiniband/hw/qedr/qedr_iw_cm.c event.ord = params->cm_info->ord; event 133 drivers/infiniband/hw/qedr/qedr_iw_cm.c event.ird = params->cm_info->ird; event 135 drivers/infiniband/hw/qedr/qedr_iw_cm.c listener->cm_id->event_handler(listener->cm_id, &event); event 144 drivers/infiniband/hw/qedr/qedr_iw_cm.c struct iw_cm_event event; event 146 drivers/infiniband/hw/qedr/qedr_iw_cm.c memset(&event, 0, sizeof(event)); event 147 drivers/infiniband/hw/qedr/qedr_iw_cm.c event.status = params->status; event 148 drivers/infiniband/hw/qedr/qedr_iw_cm.c event.event = event_type; event 151 drivers/infiniband/hw/qedr/qedr_iw_cm.c event.ird = params->cm_info->ird; event 152 drivers/infiniband/hw/qedr/qedr_iw_cm.c event.ord = params->cm_info->ord; event 153 drivers/infiniband/hw/qedr/qedr_iw_cm.c event.private_data_len = params->cm_info->private_data_len; event 154 drivers/infiniband/hw/qedr/qedr_iw_cm.c event.private_data = (void *)params->cm_info->private_data; event 158 drivers/infiniband/hw/qedr/qedr_iw_cm.c ep->cm_id->event_handler(ep->cm_id, &event); event 180 drivers/infiniband/hw/qedr/qedr_iw_cm.c struct ib_event event; event 185 drivers/infiniband/hw/qedr/qedr_iw_cm.c event.event = ib_event; event 186 drivers/infiniband/hw/qedr/qedr_iw_cm.c event.device = ibqp->device; event 187 drivers/infiniband/hw/qedr/qedr_iw_cm.c event.element.qp = ibqp; event 188 drivers/infiniband/hw/qedr/qedr_iw_cm.c ibqp->event_handler(&event, ibqp->qp_context); event 195 drivers/infiniband/hw/qedr/qedr_iw_cm.c enum qed_iwarp_event_type event; event 207 drivers/infiniband/hw/qedr/qedr_iw_cm.c struct iw_cm_event event; event 217 drivers/infiniband/hw/qedr/qedr_iw_cm.c memset(&event, 0, sizeof(event)); event 218 drivers/infiniband/hw/qedr/qedr_iw_cm.c event.status = dwork->status; event 219 drivers/infiniband/hw/qedr/qedr_iw_cm.c event.event = IW_CM_EVENT_DISCONNECT; event 231 drivers/infiniband/hw/qedr/qedr_iw_cm.c ep->cm_id->event_handler(ep->cm_id, &event); event 263 drivers/infiniband/hw/qedr/qedr_iw_cm.c work->event = params->event; event 325 drivers/infiniband/hw/qedr/qedr_iw_cm.c switch (params->event) { event 380 drivers/infiniband/hw/qedr/qedr_iw_cm.c DP_NOTICE(dev, "Unknown event received %d\n", params->event); event 837 drivers/infiniband/hw/qib/qib.h int (*f_notify_dca)(struct qib_devdata *, unsigned long event); event 3409 drivers/infiniband/hw/qib/qib_iba6120.c static int qib_6120_notify_dca(struct qib_devdata *dd, unsigned long event) event 4457 drivers/infiniband/hw/qib/qib_iba7220.c static int qib_7220_notify_dca(struct qib_devdata *dd, unsigned long event) event 2668 drivers/infiniband/hw/qib/qib_iba7322.c static int qib_7322_notify_dca(struct qib_devdata *dd, unsigned long event) event 2670 drivers/infiniband/hw/qib/qib_iba7322.c switch (event) { event 1196 drivers/infiniband/hw/qib/qib_init.c unsigned long event = *(unsigned long *)data; event 1198 drivers/infiniband/hw/qib/qib_init.c return dd->f_notify_dca(dd, event); event 1201 drivers/infiniband/hw/qib/qib_init.c static int qib_notify_dca(struct notifier_block *nb, unsigned long event, event 1207 drivers/infiniband/hw/qib/qib_init.c &event, qib_notify_dca_device); event 74 drivers/infiniband/hw/qib/qib_intr.c struct ib_event event; event 77 drivers/infiniband/hw/qib/qib_intr.c event.device = &dd->verbs_dev.rdi.ibdev; event 78 drivers/infiniband/hw/qib/qib_intr.c event.element.port_num = ppd->port; event 79 drivers/infiniband/hw/qib/qib_intr.c event.event = ev; event 80 drivers/infiniband/hw/qib/qib_intr.c ib_dispatch_event(&event); event 657 drivers/infiniband/hw/qib/qib_mad.c struct ib_event event; event 687 drivers/infiniband/hw/qib/qib_mad.c event.device = ibdev; event 688 drivers/infiniband/hw/qib/qib_mad.c event.element.port_num = port; event 704 drivers/infiniband/hw/qib/qib_mad.c event.event = IB_EVENT_LID_CHANGE; event 705 drivers/infiniband/hw/qib/qib_mad.c ib_dispatch_event(&event); event 727 drivers/infiniband/hw/qib/qib_mad.c event.event = IB_EVENT_SM_CHANGE; event 728 drivers/infiniband/hw/qib/qib_mad.c ib_dispatch_event(&event); event 870 drivers/infiniband/hw/qib/qib_mad.c event.event = IB_EVENT_CLIENT_REREGISTER; event 871 drivers/infiniband/hw/qib/qib_mad.c ib_dispatch_event(&event); event 1027 drivers/infiniband/hw/qib/qib_mad.c struct ib_event event; event 1031 drivers/infiniband/hw/qib/qib_mad.c event.event = IB_EVENT_PKEY_CHANGE; event 1032 drivers/infiniband/hw/qib/qib_mad.c event.device = &dd->verbs_dev.rdi.ibdev; event 1033 drivers/infiniband/hw/qib/qib_mad.c event.element.port_num = port; event 1034 drivers/infiniband/hw/qib/qib_mad.c ib_dispatch_event(&event); event 55 drivers/infiniband/hw/qib/qib_ruc.c ev.event = IB_EVENT_PATH_MIG; event 734 drivers/infiniband/hw/qib/qib_sdma.c enum qib_sdma_events event) event 740 drivers/infiniband/hw/qib/qib_sdma.c __qib_sdma_process_event(ppd, event); event 749 drivers/infiniband/hw/qib/qib_sdma.c enum qib_sdma_events event) event 755 drivers/infiniband/hw/qib/qib_sdma.c switch (event) { event 794 drivers/infiniband/hw/qib/qib_sdma.c switch (event) { event 828 drivers/infiniband/hw/qib/qib_sdma.c switch (event) { event 859 drivers/infiniband/hw/qib/qib_sdma.c switch (event) { event 892 drivers/infiniband/hw/qib/qib_sdma.c switch (event) { event 926 drivers/infiniband/hw/qib/qib_sdma.c switch (event) { event 960 drivers/infiniband/hw/qib/qib_sdma.c switch (event) { event 998 drivers/infiniband/hw/qib/qib_sdma.c ss->last_event = event; event 136 drivers/infiniband/hw/usnic/usnic_ib_main.c unsigned long event) event 145 drivers/infiniband/hw/usnic/usnic_ib_main.c switch (event) { event 149 drivers/infiniband/hw/usnic/usnic_ib_main.c ib_event.event = IB_EVENT_PORT_ERR; event 162 drivers/infiniband/hw/usnic/usnic_ib_main.c ib_event.event = IB_EVENT_PORT_ACTIVE; event 172 drivers/infiniband/hw/usnic/usnic_ib_main.c ib_event.event = IB_EVENT_PORT_ERR; event 178 drivers/infiniband/hw/usnic/usnic_ib_main.c netdev_cmd_to_name(event), event 194 drivers/infiniband/hw/usnic/usnic_ib_main.c ib_event.event = IB_EVENT_GID_CHANGE; event 215 drivers/infiniband/hw/usnic/usnic_ib_main.c netdev_cmd_to_name(event), event 222 drivers/infiniband/hw/usnic/usnic_ib_main.c unsigned long event, void *ptr) event 234 drivers/infiniband/hw/usnic/usnic_ib_main.c usnic_ib_handle_usdev_event(us_ibdev, event); event 246 drivers/infiniband/hw/usnic/usnic_ib_main.c unsigned long event, void *ptr) event 253 drivers/infiniband/hw/usnic/usnic_ib_main.c switch (event) { event 256 drivers/infiniband/hw/usnic/usnic_ib_main.c netdev_cmd_to_name(event)); event 259 drivers/infiniband/hw/usnic/usnic_ib_main.c ib_event.event = IB_EVENT_GID_CHANGE; event 267 drivers/infiniband/hw/usnic/usnic_ib_main.c netdev_cmd_to_name(event), event 269 drivers/infiniband/hw/usnic/usnic_ib_main.c ib_event.event = IB_EVENT_GID_CHANGE; event 276 drivers/infiniband/hw/usnic/usnic_ib_main.c netdev_cmd_to_name(event), event 285 drivers/infiniband/hw/usnic/usnic_ib_main.c unsigned long event, void *ptr) event 297 drivers/infiniband/hw/usnic/usnic_ib_main.c usnic_ib_handle_inet_event(us_ibdev, event, ptr); event 488 drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c ib_event.event = IB_EVENT_QP_FATAL; event 255 drivers/infiniband/hw/vmw_pvrdma/pvrdma.h unsigned long event; event 328 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c e.event = type; /* 1:1 mapping for now. */ event 354 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c e.event = type; /* 1:1 mapping for now. */ event 383 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c e.event = type; /* 1:1 mapping for now. */ event 393 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c enum ib_event_type event) event 400 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c ib_event.event = event; event 690 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c unsigned long event) event 695 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c switch (event) { event 734 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c event, dev_name(&dev->ib_dev.dev)); event 748 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c if ((netdev_work->event == NETDEV_REGISTER) || event 752 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c netdev_work->event); event 762 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c unsigned long event, void *ptr) event 773 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c netdev_work->event = event; event 115 drivers/infiniband/sw/rdmavt/cq.c ev.event = IB_EVENT_CQ_ERR; event 755 drivers/infiniband/sw/rdmavt/qp.c ev.event = IB_EVENT_QP_LAST_WQE_REACHED; event 1690 drivers/infiniband/sw/rdmavt/qp.c ev.event = IB_EVENT_QP_LAST_WQE_REACHED; event 1696 drivers/infiniband/sw/rdmavt/qp.c ev.event = IB_EVENT_PATH_MIG; event 2495 drivers/infiniband/sw/rdmavt/qp.c ev.event = IB_EVENT_SRQ_LIMIT_REACHED; event 2520 drivers/infiniband/sw/rdmavt/qp.c ev.event = IB_EVENT_COMM_EST; event 2540 drivers/infiniband/sw/rdmavt/qp.c ev.event = IB_EVENT_QP_LAST_WQE_REACHED; event 3260 drivers/infiniband/sw/rdmavt/qp.c ev.event = IB_EVENT_QP_LAST_WQE_REACHED; event 499 drivers/infiniband/sw/rxe/rxe_comp.c ev.event = IB_EVENT_SQ_DRAINED; event 144 drivers/infiniband/sw/rxe/rxe_cq.c ev.event = IB_EVENT_CQ_ERR; event 549 drivers/infiniband/sw/rxe/rxe_net.c enum ib_event_type event) event 555 drivers/infiniband/sw/rxe/rxe_net.c ev.event = event; event 594 drivers/infiniband/sw/rxe/rxe_net.c unsigned long event, event 603 drivers/infiniband/sw/rxe/rxe_net.c switch (event) { event 627 drivers/infiniband/sw/rxe/rxe_net.c event, ndev->name); event 167 drivers/infiniband/sw/rxe/rxe_req.c ev.event = IB_EVENT_SQ_DRAINED; event 343 drivers/infiniband/sw/rxe/rxe_resp.c goto event; event 349 drivers/infiniband/sw/rxe/rxe_resp.c event: event 353 drivers/infiniband/sw/rxe/rxe_resp.c ev.event = IB_EVENT_SRQ_LIMIT_REACHED; event 312 drivers/infiniband/sw/siw/siw_cm.c struct iw_cm_event event; event 315 drivers/infiniband/sw/siw/siw_cm.c memset(&event, 0, sizeof(event)); event 316 drivers/infiniband/sw/siw/siw_cm.c event.status = status; event 317 drivers/infiniband/sw/siw/siw_cm.c event.event = reason; event 320 drivers/infiniband/sw/siw/siw_cm.c event.provider_data = cep; event 329 drivers/infiniband/sw/siw/siw_cm.c event.ird = cep->ird; event 330 drivers/infiniband/sw/siw/siw_cm.c event.ord = cep->ord; event 332 drivers/infiniband/sw/siw/siw_cm.c event.ird = cep->ord; event 333 drivers/infiniband/sw/siw/siw_cm.c event.ord = cep->ird; event 344 drivers/infiniband/sw/siw/siw_cm.c event.private_data_len = pd_len; event 345 drivers/infiniband/sw/siw/siw_cm.c event.private_data = cep->mpa.pdata; event 349 drivers/infiniband/sw/siw/siw_cm.c event.private_data_len -= event 351 drivers/infiniband/sw/siw/siw_cm.c event.private_data += event 355 drivers/infiniband/sw/siw/siw_cm.c getname_local(cep->sock, &event.local_addr); event 356 drivers/infiniband/sw/siw/siw_cm.c getname_peer(cep->sock, &event.remote_addr); event 361 drivers/infiniband/sw/siw/siw_cm.c return id->event_handler(id, &event); event 465 drivers/infiniband/sw/siw/siw_main.c static int siw_netdev_event(struct notifier_block *nb, unsigned long event, event 472 drivers/infiniband/sw/siw/siw_main.c dev_dbg(&netdev->dev, "siw: event %lu\n", event); event 483 drivers/infiniband/sw/siw/siw_main.c switch (event) { event 1802 drivers/infiniband/sw/siw/siw_verbs.c struct ib_event event; event 1812 drivers/infiniband/sw/siw/siw_verbs.c event.event = etype; event 1813 drivers/infiniband/sw/siw/siw_verbs.c event.device = base_qp->device; event 1814 drivers/infiniband/sw/siw/siw_verbs.c event.element.qp = base_qp; event 1818 drivers/infiniband/sw/siw/siw_verbs.c base_qp->event_handler(&event, base_qp->qp_context); event 1824 drivers/infiniband/sw/siw/siw_verbs.c struct ib_event event; event 1827 drivers/infiniband/sw/siw/siw_verbs.c event.event = etype; event 1828 drivers/infiniband/sw/siw/siw_verbs.c event.device = base_cq->device; event 1829 drivers/infiniband/sw/siw/siw_verbs.c event.element.cq = base_cq; event 1833 drivers/infiniband/sw/siw/siw_verbs.c base_cq->event_handler(&event, base_cq->cq_context); event 1839 drivers/infiniband/sw/siw/siw_verbs.c struct ib_event event; event 1842 drivers/infiniband/sw/siw/siw_verbs.c event.event = etype; event 1843 drivers/infiniband/sw/siw/siw_verbs.c event.device = base_srq->device; event 1844 drivers/infiniband/sw/siw/siw_verbs.c event.element.srq = base_srq; event 1849 drivers/infiniband/sw/siw/siw_verbs.c base_srq->event_handler(&event, base_srq->srq_context); event 1855 drivers/infiniband/sw/siw/siw_verbs.c struct ib_event event; event 1857 drivers/infiniband/sw/siw/siw_verbs.c event.event = etype; event 1858 drivers/infiniband/sw/siw/siw_verbs.c event.device = &sdev->base_dev; event 1859 drivers/infiniband/sw/siw/siw_verbs.c event.element.port_num = port; event 1863 drivers/infiniband/sw/siw/siw_verbs.c ib_dispatch_event(&event); event 81 drivers/infiniband/ulp/ipoib/ipoib_cm.c const struct ib_cm_event *event); event 233 drivers/infiniband/ulp/ipoib/ipoib_cm.c static void ipoib_cm_rx_event_handler(struct ib_event *event, void *ctx) event 239 drivers/infiniband/ulp/ipoib/ipoib_cm.c if (event->event != IB_EVENT_QP_LAST_WQE_REACHED) event 443 drivers/infiniband/ulp/ipoib/ipoib_cm.c const struct ib_cm_event *event) event 489 drivers/infiniband/ulp/ipoib/ipoib_cm.c ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn); event 505 drivers/infiniband/ulp/ipoib/ipoib_cm.c const struct ib_cm_event *event) event 510 drivers/infiniband/ulp/ipoib/ipoib_cm.c switch (event->event) { event 512 drivers/infiniband/ulp/ipoib/ipoib_cm.c return ipoib_cm_req_handler(cm_id, event); event 986 drivers/infiniband/ulp/ipoib/ipoib_cm.c const struct ib_cm_event *event) event 990 drivers/infiniband/ulp/ipoib/ipoib_cm.c struct ipoib_cm_data *data = event->private_data; event 1254 drivers/infiniband/ulp/ipoib/ipoib_cm.c const struct ib_cm_event *event) event 1263 drivers/infiniband/ulp/ipoib/ipoib_cm.c switch (event->event) { event 1270 drivers/infiniband/ulp/ipoib/ipoib_cm.c ret = ipoib_cm_rep_handler(cm_id, event); event 1278 drivers/infiniband/ulp/ipoib/ipoib_cm.c ipoib_dbg(priv, "CM error %d.\n", event->event); event 113 drivers/infiniband/ulp/ipoib/ipoib_main.c unsigned long event, void *ptr) event 121 drivers/infiniband/ulp/ipoib/ipoib_main.c switch (event) { event 276 drivers/infiniband/ulp/ipoib/ipoib_verbs.c ipoib_dbg(priv, "Event %d on device %s port %d\n", record->event, event 279 drivers/infiniband/ulp/ipoib/ipoib_verbs.c if (record->event == IB_EVENT_CLIENT_REREGISTER) { event 281 drivers/infiniband/ulp/ipoib/ipoib_verbs.c } else if (record->event == IB_EVENT_PORT_ERR || event 282 drivers/infiniband/ulp/ipoib/ipoib_verbs.c record->event == IB_EVENT_PORT_ACTIVE || event 283 drivers/infiniband/ulp/ipoib/ipoib_verbs.c record->event == IB_EVENT_LID_CHANGE) { event 285 drivers/infiniband/ulp/ipoib/ipoib_verbs.c } else if (record->event == IB_EVENT_PKEY_CHANGE) { event 287 drivers/infiniband/ulp/ipoib/ipoib_verbs.c } else if (record->event == IB_EVENT_GID_CHANGE && event 50 drivers/infiniband/ulp/iser/iser_verbs.c ib_event_msg(cause->event), cause->event); event 54 drivers/infiniband/ulp/iser/iser_verbs.c struct ib_event *event) event 57 drivers/infiniband/ulp/iser/iser_verbs.c ib_event_msg(event->event), event->event, event 58 drivers/infiniband/ulp/iser/iser_verbs.c dev_name(&event->device->dev), event->element.port_num); event 835 drivers/infiniband/ulp/iser/iser_verbs.c static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) event 842 drivers/infiniband/ulp/iser/iser_verbs.c rdma_event_msg(event->event), event->event, event 843 drivers/infiniband/ulp/iser/iser_verbs.c event->status, cma_id->context, cma_id); event 846 drivers/infiniband/ulp/iser/iser_verbs.c switch (event->event) { event 854 drivers/infiniband/ulp/iser/iser_verbs.c iser_connected_handler(cma_id, event->param.conn.private_data); event 858 drivers/infiniband/ulp/iser/iser_verbs.c rdma_reject_msg(cma_id, event->status)); event 886 drivers/infiniband/ulp/iser/iser_verbs.c rdma_event_msg(event->event), event->event); event 70 drivers/infiniband/ulp/isert/ib_isert.c ib_event_msg(e->event), e->event, isert_conn); event 72 drivers/infiniband/ulp/isert/ib_isert.c switch (e->event) { event 493 drivers/infiniband/ulp/isert/ib_isert.c isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) event 530 drivers/infiniband/ulp/isert/ib_isert.c isert_set_nego_params(isert_conn, &event->param.conn); event 680 drivers/infiniband/ulp/isert/ib_isert.c enum rdma_cm_event_type event) event 683 drivers/infiniband/ulp/isert/ib_isert.c rdma_event_msg(event), event, isert_np); event 685 drivers/infiniband/ulp/isert/ib_isert.c switch (event) { event 699 drivers/infiniband/ulp/isert/ib_isert.c isert_np, event); event 707 drivers/infiniband/ulp/isert/ib_isert.c enum rdma_cm_event_type event) event 747 drivers/infiniband/ulp/isert/ib_isert.c isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) event 754 drivers/infiniband/ulp/isert/ib_isert.c rdma_event_msg(event->event), event->event, event 755 drivers/infiniband/ulp/isert/ib_isert.c event->status, cma_id, cma_id->context); event 758 drivers/infiniband/ulp/isert/ib_isert.c return isert_np_cma_handler(cma_id->context, event->event); event 760 drivers/infiniband/ulp/isert/ib_isert.c switch (event->event) { event 762 drivers/infiniband/ulp/isert/ib_isert.c ret = isert_connect_request(cma_id, event); event 772 drivers/infiniband/ulp/isert/ib_isert.c ret = isert_disconnected_handler(cma_id, event->event); event 777 drivers/infiniband/ulp/isert/ib_isert.c isert_disconnected_handler(cma_id, event->event); event 788 drivers/infiniband/ulp/isert/ib_isert.c rdma_reject_msg(cma_id, event->status)); event 795 drivers/infiniband/ulp/isert/ib_isert.c isert_err("Unhandled RDMA CMA event: %d\n", event->event); event 325 drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h void opa_vnic_vema_report_event(struct opa_vnic_adapter *adapter, u8 event); event 229 drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c static void opa_vnic_mac_send_event(struct net_device *netdev, u8 event) event 237 drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c switch (event) { event 259 drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c opa_vnic_vema_report_event(adapter, event); event 869 drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c record->event, dev_name(&record->device->dev), event 872 drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c if (record->event != IB_EVENT_PORT_ERR && event 873 drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c record->event != IB_EVENT_PORT_ACTIVE) event 877 drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c if (record->event == IB_EVENT_PORT_ACTIVE) event 61 drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c void opa_vnic_vema_report_event(struct opa_vnic_adapter *adapter, u8 event) event 71 drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c trap_data.opcode = event; event 156 drivers/infiniband/ulp/srp/ib_srp.c const struct ib_cm_event *event); event 158 drivers/infiniband/ulp/srp/ib_srp.c struct rdma_cm_event *event); event 274 drivers/infiniband/ulp/srp/ib_srp.c static void srp_qp_event(struct ib_event *event, void *context) event 277 drivers/infiniband/ulp/srp/ib_srp.c ib_event_msg(event->event), event->event); event 2618 drivers/infiniband/ulp/srp/ib_srp.c const struct ib_cm_event *event, event 2627 drivers/infiniband/ulp/srp/ib_srp.c switch (event->param.rej_rcvd.reason) { event 2629 drivers/infiniband/ulp/srp/ib_srp.c cpi = event->param.rej_rcvd.ari; event 2648 drivers/infiniband/ulp/srp/ib_srp.c memcpy(dgid->raw, event->param.rej_rcvd.ari, 16); event 2670 drivers/infiniband/ulp/srp/ib_srp.c opcode = *(u8 *) event->private_data; event 2672 drivers/infiniband/ulp/srp/ib_srp.c struct srp_login_rej *rej = event->private_data; event 2698 drivers/infiniband/ulp/srp/ib_srp.c event->param.rej_rcvd.reason); event 2704 drivers/infiniband/ulp/srp/ib_srp.c const struct ib_cm_event *event) event 2710 drivers/infiniband/ulp/srp/ib_srp.c switch (event->event) { event 2720 drivers/infiniband/ulp/srp/ib_srp.c srp_cm_rep_handler(cm_id, event->private_data, ch); event 2727 drivers/infiniband/ulp/srp/ib_srp.c srp_ib_cm_rej_handler(cm_id, event, ch); event 2755 drivers/infiniband/ulp/srp/ib_srp.c PFX "Unhandled CM event %d\n", event->event); event 2766 drivers/infiniband/ulp/srp/ib_srp.c struct rdma_cm_event *event) event 2772 drivers/infiniband/ulp/srp/ib_srp.c switch (event->status) { event 2780 drivers/infiniband/ulp/srp/ib_srp.c opcode = *(u8 *) event->param.conn.private_data; event 2784 drivers/infiniband/ulp/srp/ib_srp.c event->param.conn.private_data; event 2809 drivers/infiniband/ulp/srp/ib_srp.c event->status); event 2816 drivers/infiniband/ulp/srp/ib_srp.c struct rdma_cm_event *event) event 2822 drivers/infiniband/ulp/srp/ib_srp.c switch (event->event) { event 2853 drivers/infiniband/ulp/srp/ib_srp.c srp_cm_rep_handler(NULL, event->param.conn.private_data, ch); event 2860 drivers/infiniband/ulp/srp/ib_srp.c srp_rdma_cm_rej_handler(ch, event); event 2884 drivers/infiniband/ulp/srp/ib_srp.c PFX "Unhandled CM event %d\n", event->event); event 136 drivers/infiniband/ulp/srpt/ib_srpt.c struct ib_event *event) event 142 drivers/infiniband/ulp/srpt/ib_srpt.c sdev = ib_get_client_data(event->device, &srpt_client); event 143 drivers/infiniband/ulp/srpt/ib_srpt.c if (!sdev || sdev->device != event->device) event 146 drivers/infiniband/ulp/srpt/ib_srpt.c pr_debug("ASYNC event= %d on device= %s\n", event->event, event 149 drivers/infiniband/ulp/srpt/ib_srpt.c switch (event->event) { event 151 drivers/infiniband/ulp/srpt/ib_srpt.c port_num = event->element.port_num - 1; event 158 drivers/infiniband/ulp/srpt/ib_srpt.c event->event, port_num + 1, event 169 drivers/infiniband/ulp/srpt/ib_srpt.c port_num = event->element.port_num - 1; event 176 drivers/infiniband/ulp/srpt/ib_srpt.c event->event, port_num + 1, event 181 drivers/infiniband/ulp/srpt/ib_srpt.c pr_err("received unrecognized IB event %d\n", event->event); event 191 drivers/infiniband/ulp/srpt/ib_srpt.c static void srpt_srq_event(struct ib_event *event, void *ctx) event 193 drivers/infiniband/ulp/srpt/ib_srpt.c pr_debug("SRQ event %d\n", event->event); event 218 drivers/infiniband/ulp/srpt/ib_srpt.c static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch) event 221 drivers/infiniband/ulp/srpt/ib_srpt.c event->event, ch, ch->sess_name, ch->state); event 223 drivers/infiniband/ulp/srpt/ib_srpt.c switch (event->event) { event 226 drivers/infiniband/ulp/srpt/ib_srpt.c rdma_notify(ch->rdma_cm.cm_id, event->event); event 228 drivers/infiniband/ulp/srpt/ib_srpt.c ib_cm_notify(ch->ib_cm.cm_id, event->event); event 236 drivers/infiniband/ulp/srpt/ib_srpt.c pr_err("received unrecognized IB QP event %d\n", event->event); event 2527 drivers/infiniband/ulp/srpt/ib_srpt.c struct rdma_cm_event *event) event 2538 drivers/infiniband/ulp/srpt/ib_srpt.c if (event->param.conn.private_data_len < sizeof(*req_rdma)) event 2542 drivers/infiniband/ulp/srpt/ib_srpt.c req_rdma = event->param.conn.private_data; event 2627 drivers/infiniband/ulp/srpt/ib_srpt.c const struct ib_cm_event *event) event 2633 drivers/infiniband/ulp/srpt/ib_srpt.c switch (event->event) { event 2635 drivers/infiniband/ulp/srpt/ib_srpt.c ret = srpt_ib_cm_req_recv(cm_id, &event->param.req_rcvd, event 2636 drivers/infiniband/ulp/srpt/ib_srpt.c event->private_data); event 2639 drivers/infiniband/ulp/srpt/ib_srpt.c srpt_cm_rej_recv(ch, event->param.rej_rcvd.reason, event 2640 drivers/infiniband/ulp/srpt/ib_srpt.c event->private_data, event 2671 drivers/infiniband/ulp/srpt/ib_srpt.c pr_err("received unrecognized CM event %d\n", event->event); event 2679 drivers/infiniband/ulp/srpt/ib_srpt.c struct rdma_cm_event *event) event 2684 drivers/infiniband/ulp/srpt/ib_srpt.c switch (event->event) { event 2686 drivers/infiniband/ulp/srpt/ib_srpt.c ret = srpt_rdma_cm_req_recv(cm_id, event); event 2689 drivers/infiniband/ulp/srpt/ib_srpt.c srpt_cm_rej_recv(ch, event->status, event 2690 drivers/infiniband/ulp/srpt/ib_srpt.c event->param.conn.private_data, event 2691 drivers/infiniband/ulp/srpt/ib_srpt.c event->param.conn.private_data_len); event 2714 drivers/infiniband/ulp/srpt/ib_srpt.c event->event); event 100 drivers/input/apm-power.c .event = apmpower_event, event 85 drivers/input/evbug.c .event = evbug_event, event 215 drivers/input/evdev.c const struct input_event *event) event 217 drivers/input/evdev.c client->buffer[client->head++] = *event; event 228 drivers/input/evdev.c .input_event_sec = event->input_event_sec, event 229 drivers/input/evdev.c .input_event_usec = event->input_event_usec, event 238 drivers/input/evdev.c if (event->type == EV_SYN && event->code == SYN_REPORT) { event 250 drivers/input/evdev.c struct input_event event; event 258 drivers/input/evdev.c event.input_event_sec = ts.tv_sec; event 259 drivers/input/evdev.c event.input_event_usec = ts.tv_nsec / NSEC_PER_USEC; event 276 drivers/input/evdev.c event.type = v->type; event 277 drivers/input/evdev.c event.code = v->code; event 278 drivers/input/evdev.c event.value = v->value; event 279 drivers/input/evdev.c __pass_event(client, &event); event 506 drivers/input/evdev.c struct input_event event; event 523 drivers/input/evdev.c if (input_event_from_user(buffer + retval, &event)) { event 530 drivers/input/evdev.c event.type, event.code, event.value); event 540 drivers/input/evdev.c struct input_event *event) event 548 drivers/input/evdev.c *event = client->buffer[client->tail++]; event 562 drivers/input/evdev.c struct input_event event; event 585 drivers/input/evdev.c evdev_fetch_next_event(client, &event)) { event 587 drivers/input/evdev.c if (input_event_to_user(buffer + read, &event)) event 1421 drivers/input/evdev.c .event = evdev_event, event 342 drivers/input/ff-core.c dev->event = input_ff_event; event 271 drivers/input/gameport/gameport.c struct gameport_event *event = NULL; event 277 drivers/input/gameport/gameport.c event = list_first_entry(&gameport_event_list, event 279 drivers/input/gameport/gameport.c list_del_init(&event->node); event 283 drivers/input/gameport/gameport.c return event; event 286 drivers/input/gameport/gameport.c static void gameport_free_event(struct gameport_event *event) event 288 drivers/input/gameport/gameport.c module_put(event->owner); event 289 drivers/input/gameport/gameport.c kfree(event); event 292 drivers/input/gameport/gameport.c static void gameport_remove_duplicate_events(struct gameport_event *event) event 300 drivers/input/gameport/gameport.c if (event->object == e->object) { event 306 drivers/input/gameport/gameport.c if (event->type != e->type) event 320 drivers/input/gameport/gameport.c struct gameport_event *event; event 330 drivers/input/gameport/gameport.c if ((event = gameport_get_event())) { event 332 drivers/input/gameport/gameport.c switch (event->type) { event 335 drivers/input/gameport/gameport.c gameport_add_port(event->object); event 339 drivers/input/gameport/gameport.c gameport_attach_driver(event->object); event 343 drivers/input/gameport/gameport.c gameport_remove_duplicate_events(event); event 344 drivers/input/gameport/gameport.c gameport_free_event(event); event 356 drivers/input/gameport/gameport.c struct gameport_event *event; event 368 drivers/input/gameport/gameport.c list_for_each_entry_reverse(event, &gameport_event_list, node) { event 369 drivers/input/gameport/gameport.c if (event->object == object) { event 370 drivers/input/gameport/gameport.c if (event->type == event_type) event 376 drivers/input/gameport/gameport.c event = kmalloc(sizeof(struct gameport_event), GFP_ATOMIC); event 377 drivers/input/gameport/gameport.c if (!event) { event 386 drivers/input/gameport/gameport.c kfree(event); event 391 drivers/input/gameport/gameport.c event->type = event_type; event 392 drivers/input/gameport/gameport.c event->object = object; event 393 drivers/input/gameport/gameport.c event->owner = owner; event 395 drivers/input/gameport/gameport.c list_add_tail(&event->node, &gameport_event_list); event 409 drivers/input/gameport/gameport.c struct gameport_event *event, *next; event 414 drivers/input/gameport/gameport.c list_for_each_entry_safe(event, next, &gameport_event_list, node) { event 415 drivers/input/gameport/gameport.c if (event->object == object) { event 416 drivers/input/gameport/gameport.c list_del_init(&event->node); event 417 drivers/input/gameport/gameport.c gameport_free_event(event); event 434 drivers/input/gameport/gameport.c struct gameport_event *event; event 440 drivers/input/gameport/gameport.c list_for_each_entry(event, &gameport_event_list, node) { event 441 drivers/input/gameport/gameport.c if (event->type == GAMEPORT_REGISTER_PORT) { event 442 drivers/input/gameport/gameport.c gameport = event->object; event 15 drivers/input/input-compat.c struct input_event *event) event 24 drivers/input/input-compat.c event->input_event_sec = compat_event.sec; event 25 drivers/input/input-compat.c event->input_event_usec = compat_event.usec; event 26 drivers/input/input-compat.c event->type = compat_event.type; event 27 drivers/input/input-compat.c event->code = compat_event.code; event 28 drivers/input/input-compat.c event->value = compat_event.value; event 31 drivers/input/input-compat.c if (copy_from_user(event, buffer, sizeof(struct input_event))) event 39 drivers/input/input-compat.c const struct input_event *event) event 44 drivers/input/input-compat.c compat_event.sec = event->input_event_sec; event 45 drivers/input/input-compat.c compat_event.usec = event->input_event_usec; event 46 drivers/input/input-compat.c compat_event.type = event->type; event 47 drivers/input/input-compat.c compat_event.code = event->code; event 48 drivers/input/input-compat.c compat_event.value = event->value; event 55 drivers/input/input-compat.c if (copy_to_user(buffer, event, sizeof(struct input_event))) event 100 drivers/input/input-compat.c struct input_event *event) event 102 drivers/input/input-compat.c if (copy_from_user(event, buffer, sizeof(struct input_event))) event 109 drivers/input/input-compat.c const struct input_event *event) event 111 drivers/input/input-compat.c if (copy_to_user(buffer, event, sizeof(struct input_event))) event 70 drivers/input/input-compat.h struct input_event *event); event 73 drivers/input/input-compat.h const struct input_event *event); event 198 drivers/input/input-leds.c .event = input_leds_event, event 116 drivers/input/input.c else if (handler->event) event 118 drivers/input/input.c handler->event(handle, v->type, v->code, v->value); event 375 drivers/input/input.c if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event) event 376 drivers/input/input.c dev->event(dev, type, code, value); event 1667 drivers/input/input.c dev->event(dev, EV_##type, i, on ? active : 0); \ event 1673 drivers/input/input.c if (!dev->event) event 1680 drivers/input/input.c dev->event(dev, EV_REP, REP_PERIOD, dev->rep[REP_PERIOD]); event 1681 drivers/input/input.c dev->event(dev, EV_REP, REP_DELAY, dev->rep[REP_DELAY]); event 90 drivers/input/joydev.c struct js_event *event) event 99 drivers/input/joydev.c client->buffer[client->head] = *event; event 118 drivers/input/joydev.c struct js_event event; event 125 drivers/input/joydev.c event.type = JS_EVENT_BUTTON; event 126 drivers/input/joydev.c event.number = joydev->keymap[code - BTN_MISC]; event 127 drivers/input/joydev.c event.value = value; event 131 drivers/input/joydev.c event.type = JS_EVENT_AXIS; event 132 drivers/input/joydev.c event.number = joydev->absmap[code]; event 133 drivers/input/joydev.c event.value = joydev_correct(value, event 134 drivers/input/joydev.c &joydev->corr[event.number]); event 135 drivers/input/joydev.c if (event.value == joydev->abs[event.number]) event 137 drivers/input/joydev.c joydev->abs[event.number] = event.value; event 144 drivers/input/joydev.c event.time = jiffies_to_msecs(jiffies); event 148 drivers/input/joydev.c joydev_pass_event(client, &event); event 290 drivers/input/joydev.c struct js_event *event) event 301 drivers/input/joydev.c event->time = jiffies_to_msecs(jiffies); event 303 drivers/input/joydev.c event->type = JS_EVENT_BUTTON | JS_EVENT_INIT; event 304 drivers/input/joydev.c event->number = client->startup; event 305 drivers/input/joydev.c event->value = !!test_bit(joydev->keypam[event->number], event 308 drivers/input/joydev.c event->type = JS_EVENT_AXIS | JS_EVENT_INIT; event 309 drivers/input/joydev.c event->number = client->startup - joydev->nkey; event 310 drivers/input/joydev.c event->value = joydev->abs[event->number]; event 321 drivers/input/joydev.c struct js_event *event) event 329 drivers/input/joydev.c *event = client->buffer[client->tail++]; event 390 drivers/input/joydev.c struct js_event event; event 414 drivers/input/joydev.c joydev_generate_startup_event(client, input, &event)) { event 416 drivers/input/joydev.c if (copy_to_user(buf + retval, &event, sizeof(struct js_event))) event 423 drivers/input/joydev.c joydev_fetch_next_event(client, &event)) { event 425 drivers/input/joydev.c if (copy_to_user(buf + retval, &event, sizeof(struct js_event))) event 1065 drivers/input/joydev.c .event = joydev_event, event 36 drivers/input/keyboard/adp5520-keys.c unsigned long event, void *data) event 44 drivers/input/keyboard/adp5520-keys.c if (event & ADP5520_KP_INT) { event 56 drivers/input/keyboard/adp5520-keys.c if (event & ADP5520_KR_INT) { event 1713 drivers/input/keyboard/applespi.c applespi->keyboard_input_dev->event = applespi_event; event 1077 drivers/input/keyboard/atkbd.c input_dev->event = atkbd_event; event 639 drivers/input/keyboard/lkkbd.c input_dev->event = lkkbd_event; event 236 drivers/input/keyboard/lm8323.c static inline u8 lm8323_whichkey(u8 event) event 238 drivers/input/keyboard/lm8323.c return event & 0x7f; event 241 drivers/input/keyboard/lm8323.c static inline int lm8323_ispress(u8 event) event 243 drivers/input/keyboard/lm8323.c return (event & 0x80) ? 1 : 0; event 248 drivers/input/keyboard/lm8323.c u8 event; event 266 drivers/input/keyboard/lm8323.c while ((event = key_fifo[i++])) { event 267 drivers/input/keyboard/lm8323.c u8 key = lm8323_whichkey(event); event 268 drivers/input/keyboard/lm8323.c int isdown = lm8323_ispress(event); event 280 drivers/input/keyboard/sunkbd.c input_dev->event = sunkbd_event; event 790 drivers/input/misc/cm109.c input_dev->event = cm109_input_ev; event 825 drivers/input/misc/cm109.c dev_info(&intf->dev, "cm109: usb_suspend (event=%d)\n", message.event); event 106 drivers/input/misc/dm355evm_keys.c u16 event; event 119 drivers/input/misc/dm355evm_keys.c event = status << 8; event 127 drivers/input/misc/dm355evm_keys.c event |= status; event 128 drivers/input/misc/dm355evm_keys.c if (event == 0xdead) event 142 drivers/input/misc/dm355evm_keys.c if (event == last_event) { event 146 drivers/input/misc/dm355evm_keys.c last_event = event; event 149 drivers/input/misc/dm355evm_keys.c event &= ~0x0800; event 152 drivers/input/misc/dm355evm_keys.c ke = sparse_keymap_entry_from_scancode(keys->input, event); event 156 drivers/input/misc/dm355evm_keys.c event, keycode); event 86 drivers/input/misc/gpio-beeper.c input->event = gpio_beeper_event; event 110 drivers/input/misc/ixp4xx-beeper.c input_dev->event = ixp4xx_spkr_event; event 65 drivers/input/misc/m68kspkr.c input_dev->event = m68kspkr_event; event 84 drivers/input/misc/pcspkr.c pcspkr_dev->event = pcspkr_event; event 367 drivers/input/misc/powermate.c input_dev->event = powermate_input_event; event 190 drivers/input/misc/pwm-beeper.c beeper->input->event = pwm_beeper_event; event 27 drivers/input/misc/rave-sp-pwrbutton.c const u8 event = rave_sp_action_unpack_event(action); event 31 drivers/input/misc/rave-sp-pwrbutton.c if (event == RAVE_SP_EVNT_BUTTON_PRESS) { event 33 drivers/input/misc/sparcspkr.c int (*event)(struct input_dev *dev, unsigned int type, unsigned int code, int value); event 164 drivers/input/misc/sparcspkr.c input_dev->event = state->event; event 183 drivers/input/misc/sparcspkr.c state->event(input_dev, EV_SND, SND_BELL, 0); event 198 drivers/input/misc/sparcspkr.c state->event = bbc_spkr_event; event 239 drivers/input/misc/sparcspkr.c state->event(input_dev, EV_SND, SND_BELL, 0); event 280 drivers/input/misc/sparcspkr.c state->event = grover_spkr_event; event 318 drivers/input/misc/sparcspkr.c state->event(input_dev, EV_SND, SND_BELL, 0); event 360 drivers/input/misc/uinput.c dev->event = uinput_dev_event; event 622 drivers/input/misc/uinput.c struct input_event *event) event 630 drivers/input/misc/uinput.c *event = udev->buff[udev->tail]; event 642 drivers/input/misc/uinput.c struct input_event event; event 646 drivers/input/misc/uinput.c uinput_fetch_next_event(udev, &event)) { event 648 drivers/input/misc/uinput.c if (input_event_to_user(buffer + read, &event)) event 160 drivers/input/misc/xen-kbdfront.c union xenkbd_in_event *event) event 162 drivers/input/misc/xen-kbdfront.c switch (event->type) { event 164 drivers/input/misc/xen-kbdfront.c xenkbd_handle_motion_event(info, &event->motion); event 168 drivers/input/misc/xen-kbdfront.c xenkbd_handle_key_event(info, &event->key); event 172 drivers/input/misc/xen-kbdfront.c xenkbd_handle_position_event(info, &event->pos); event 176 drivers/input/misc/xen-kbdfront.c xenkbd_handle_mt_event(info, &event->mtouch); event 761 drivers/input/mouse/hgpk.c if (psmouse->ps2dev.serio->dev.power.power_state.event != event 1054 drivers/input/mousedev.c .event = mousedev_event, event 152 drivers/input/serio/serio.c struct serio_event *event = NULL; event 158 drivers/input/serio/serio.c event = list_first_entry(&serio_event_list, event 160 drivers/input/serio/serio.c list_del_init(&event->node); event 164 drivers/input/serio/serio.c return event; event 167 drivers/input/serio/serio.c static void serio_free_event(struct serio_event *event) event 169 drivers/input/serio/serio.c module_put(event->owner); event 170 drivers/input/serio/serio.c kfree(event); event 201 drivers/input/serio/serio.c struct serio_event *event; event 205 drivers/input/serio/serio.c while ((event = serio_get_event())) { event 207 drivers/input/serio/serio.c switch (event->type) { event 210 drivers/input/serio/serio.c serio_add_port(event->object); event 214 drivers/input/serio/serio.c serio_reconnect_port(event->object); event 218 drivers/input/serio/serio.c serio_disconnect_port(event->object); event 219 drivers/input/serio/serio.c serio_find_driver(event->object); event 223 drivers/input/serio/serio.c serio_reconnect_subtree(event->object); event 227 drivers/input/serio/serio.c serio_attach_driver(event->object); event 231 drivers/input/serio/serio.c serio_remove_duplicate_events(event->object, event->type); event 232 drivers/input/serio/serio.c serio_free_event(event); event 244 drivers/input/serio/serio.c struct serio_event *event; event 256 drivers/input/serio/serio.c list_for_each_entry_reverse(event, &serio_event_list, node) { event 257 drivers/input/serio/serio.c if (event->object == object) { event 258 drivers/input/serio/serio.c if (event->type == event_type) event 264 drivers/input/serio/serio.c event = kmalloc(sizeof(struct serio_event), GFP_ATOMIC); event 265 drivers/input/serio/serio.c if (!event) { event 274 drivers/input/serio/serio.c kfree(event); event 279 drivers/input/serio/serio.c event->type = event_type; event 280 drivers/input/serio/serio.c event->object = object; event 281 drivers/input/serio/serio.c event->owner = owner; event 283 drivers/input/serio/serio.c list_add_tail(&event->node, &serio_event_list); event 297 drivers/input/serio/serio.c struct serio_event *event, *next; event 302 drivers/input/serio/serio.c list_for_each_entry_safe(event, next, &serio_event_list, node) { event 303 drivers/input/serio/serio.c if (event->object == object) { event 304 drivers/input/serio/serio.c list_del_init(&event->node); event 305 drivers/input/serio/serio.c serio_free_event(event); event 320 drivers/input/serio/serio.c struct serio_event *event; event 326 drivers/input/serio/serio.c list_for_each_entry(event, &serio_event_list, node) { event 327 drivers/input/serio/serio.c if (event->type == SERIO_REGISTER_PORT) { event 328 drivers/input/serio/serio.c serio = event->object; event 34 drivers/input/touchscreen/chipone_icn8318.c __u8 event; event 76 drivers/input/touchscreen/chipone_icn8318.c static inline bool icn8318_touch_active(u8 event) event 78 drivers/input/touchscreen/chipone_icn8318.c return (event == ICN8318_EVENT_UPDATE1) || event 79 drivers/input/touchscreen/chipone_icn8318.c (event == ICN8318_EVENT_UPDATE2); event 113 drivers/input/touchscreen/chipone_icn8318.c bool act = icn8318_touch_active(touch->event); event 54 drivers/input/touchscreen/chipone_icn8505.c u8 event; event 316 drivers/input/touchscreen/chipone_icn8505.c static bool icn8505_touch_active(u8 event) event 318 drivers/input/touchscreen/chipone_icn8505.c return event == ICN8505_EVENT_UPDATE1 || event 319 drivers/input/touchscreen/chipone_icn8505.c event == ICN8505_EVENT_UPDATE2; event 344 drivers/input/touchscreen/chipone_icn8505.c bool act = icn8505_touch_active(touch->event); event 141 drivers/input/touchscreen/da9034-ts.c static void da9034_event_handler(struct da9034_touch *touch, int event) event 147 drivers/input/touchscreen/da9034-ts.c if (event != EVENT_PEN_DOWN) event 161 drivers/input/touchscreen/da9034-ts.c if (event != EVENT_TSI_READY) event 189 drivers/input/touchscreen/da9034-ts.c if (event == EVENT_PEN_DOWN) { event 196 drivers/input/touchscreen/da9034-ts.c if (event == EVENT_PEN_UP) { event 203 drivers/input/touchscreen/da9034-ts.c if (event != EVENT_TIMEDOUT) event 232 drivers/input/touchscreen/da9034-ts.c unsigned long event, void *data) event 237 drivers/input/touchscreen/da9034-ts.c if (event & DA9034_EVENT_TSI_READY) event 240 drivers/input/touchscreen/da9034-ts.c if ((event & DA9034_EVENT_PEN_DOWN) && touch->state == STATE_IDLE) event 693 drivers/input/touchscreen/hideep.c const struct hideep_event *event) event 695 drivers/input/touchscreen/hideep.c input_mt_slot(input, event->index & 0x0f); event 697 drivers/input/touchscreen/hideep.c __GET_MT_TOOL_TYPE(event->type), event 698 drivers/input/touchscreen/hideep.c !(event->flag & HIDEEP_MT_RELEASED)); event 699 drivers/input/touchscreen/hideep.c if (!(event->flag & HIDEEP_MT_RELEASED)) { event 701 drivers/input/touchscreen/hideep.c le16_to_cpup(&event->x)); event 703 drivers/input/touchscreen/hideep.c le16_to_cpup(&event->y)); event 705 drivers/input/touchscreen/hideep.c le16_to_cpup(&event->z)); event 706 drivers/input/touchscreen/hideep.c input_report_abs(input, ABS_MT_TOUCH_MAJOR, event->w); event 37 drivers/input/touchscreen/migor_ts.c unsigned char event; event 68 drivers/input/touchscreen/migor_ts.c event = buf[12]; event 70 drivers/input/touchscreen/migor_ts.c switch (event) { event 143 drivers/input/touchscreen/s6sy761.c u8 *event, u8 tid) event 145 drivers/input/touchscreen/s6sy761.c u8 major = event[4]; event 146 drivers/input/touchscreen/s6sy761.c u8 minor = event[5]; event 147 drivers/input/touchscreen/s6sy761.c u8 z = event[6] & S6SY761_MASK_Z; event 148 drivers/input/touchscreen/s6sy761.c u16 x = (event[1] << 3) | ((event[3] & S6SY761_MASK_X) >> 4); event 149 drivers/input/touchscreen/s6sy761.c u16 y = (event[2] << 3) | (event[3] & S6SY761_MASK_Y); event 164 drivers/input/touchscreen/s6sy761.c u8 *event, u8 tid) event 172 drivers/input/touchscreen/s6sy761.c static void s6sy761_handle_coordinates(struct s6sy761_data *sdata, u8 *event) event 177 drivers/input/touchscreen/s6sy761.c if (unlikely(!(event[0] & S6SY761_MASK_TID))) event 180 drivers/input/touchscreen/s6sy761.c tid = ((event[0] & S6SY761_MASK_TID) >> 2) - 1; event 181 drivers/input/touchscreen/s6sy761.c touch_state = (event[0] & S6SY761_MASK_TOUCH_STATE) >> 6; event 188 drivers/input/touchscreen/s6sy761.c s6sy761_report_release(sdata, event, tid); event 192 drivers/input/touchscreen/s6sy761.c s6sy761_report_coordinates(sdata, event, tid); event 202 drivers/input/touchscreen/s6sy761.c u8 *event = &sdata->data[i * S6SY761_EVENT_SIZE]; event 203 drivers/input/touchscreen/s6sy761.c u8 event_id = event[0] & S6SY761_MASK_EID; event 205 drivers/input/touchscreen/s6sy761.c if (!event[0]) event 211 drivers/input/touchscreen/s6sy761.c s6sy761_handle_coordinates(sdata, event); event 297 drivers/input/touchscreen/s6sy761.c u8 event; event 315 drivers/input/touchscreen/s6sy761.c event = (buffer[0] >> 2) & 0xf; event 317 drivers/input/touchscreen/s6sy761.c if ((event != S6SY761_EVENT_INFO && event 318 drivers/input/touchscreen/s6sy761.c event != S6SY761_EVENT_VENDOR_INFO) || event 172 drivers/input/touchscreen/stmfts.c const u8 event[]) event 174 drivers/input/touchscreen/stmfts.c u8 slot_id = (event[0] & STMFTS_MASK_TOUCH_ID) >> 4; event 175 drivers/input/touchscreen/stmfts.c u16 x = event[1] | ((event[2] & STMFTS_MASK_X_MSB) << 8); event 176 drivers/input/touchscreen/stmfts.c u16 y = (event[2] >> 4) | (event[3] << 4); event 177 drivers/input/touchscreen/stmfts.c u8 maj = event[4]; event 178 drivers/input/touchscreen/stmfts.c u8 min = event[5]; event 179 drivers/input/touchscreen/stmfts.c u8 orientation = event[6]; event 180 drivers/input/touchscreen/stmfts.c u8 area = event[7]; event 196 drivers/input/touchscreen/stmfts.c const u8 event[]) event 198 drivers/input/touchscreen/stmfts.c u8 slot_id = (event[0] & STMFTS_MASK_TOUCH_ID) >> 4; event 207 drivers/input/touchscreen/stmfts.c const u8 event[]) event 209 drivers/input/touchscreen/stmfts.c u16 x = (event[2] << 4) | (event[4] >> 4); event 210 drivers/input/touchscreen/stmfts.c u16 y = (event[3] << 4) | (event[4] & STMFTS_MASK_Y_LSB); event 211 drivers/input/touchscreen/stmfts.c u8 z = event[5]; event 220 drivers/input/touchscreen/stmfts.c static void stmfts_report_key_event(struct stmfts_data *sdata, const u8 event[]) event 222 drivers/input/touchscreen/stmfts.c switch (event[2]) { event 238 drivers/input/touchscreen/stmfts.c "unknown key event: %#02x\n", event[2]); event 250 drivers/input/touchscreen/stmfts.c u8 *event = &sdata->data[i * STMFTS_EVENT_SIZE]; event 252 drivers/input/touchscreen/stmfts.c switch (event[0]) { event 265 drivers/input/touchscreen/stmfts.c switch (event[0] & STMFTS_MASK_EVENT_ID) { event 269 drivers/input/touchscreen/stmfts.c stmfts_report_contact_event(sdata, event); event 273 drivers/input/touchscreen/stmfts.c stmfts_report_contact_release(sdata, event); event 279 drivers/input/touchscreen/stmfts.c stmfts_report_hover_event(sdata, event); event 283 drivers/input/touchscreen/stmfts.c stmfts_report_key_event(sdata, event); event 289 drivers/input/touchscreen/stmfts.c event[6], event[5], event[4], event 290 drivers/input/touchscreen/stmfts.c event[3], event[2], event[1]); event 295 drivers/input/touchscreen/stmfts.c "unknown event %#02x\n", event[0]); event 562 drivers/iommu/amd_iommu.c volatile u32 *event = __evt; event 567 drivers/iommu/amd_iommu.c type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK; event 568 drivers/iommu/amd_iommu.c devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK; event 569 drivers/iommu/amd_iommu.c pasid = (event[0] & EVENT_DOMID_MASK_HI) | event 570 drivers/iommu/amd_iommu.c (event[1] & EVENT_DOMID_MASK_LO); event 571 drivers/iommu/amd_iommu.c flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK; event 572 drivers/iommu/amd_iommu.c address = (u64)(((u64)event[3]) << 32) | event[2]; event 626 drivers/iommu/amd_iommu.c pasid = ((event[0] >> 16) & 0xFFFF) event 627 drivers/iommu/amd_iommu.c | ((event[1] << 6) & 0xF0000); event 628 drivers/iommu/amd_iommu.c tag = event[1] & 0x03FF; event 635 drivers/iommu/amd_iommu.c event[0], event[1], event[2], event[3]); event 123 drivers/iommu/dmar.c dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event) event 140 drivers/iommu/dmar.c if (event == BUS_NOTIFY_ADD_DEVICE) event 158 drivers/iommu/dmar.c info->event = event; event 162 drivers/iommu/dmar.c if (event == BUS_NOTIFY_ADD_DEVICE) { event 4612 drivers/iommu/intel-iommu.c if (info->event == BUS_NOTIFY_ADD_DEVICE) { event 4619 drivers/iommu/intel-iommu.c } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) { event 4630 drivers/iommu/intel-iommu.c if (info->event == BUS_NOTIFY_ADD_DEVICE) { event 4639 drivers/iommu/intel-iommu.c } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) { event 195 drivers/irqchip/irq-gic-v3-its.c u32 event) event 199 drivers/irqchip/irq-gic-v3-its.c return its->collections + its_dev->event_map.col_map[event]; event 1063 drivers/irqchip/irq-gic-v3-its.c u32 event = its_get_event_id(d); event 1067 drivers/irqchip/irq-gic-v3-its.c map = &its_dev->event_map.vlpi_maps[event]; event 1104 drivers/irqchip/irq-gic-v3-its.c u32 event = its_get_event_id(d); event 1106 drivers/irqchip/irq-gic-v3-its.c if (its_dev->event_map.vlpi_maps[event].db_enabled == enable) event 1109 drivers/irqchip/irq-gic-v3-its.c its_dev->event_map.vlpi_maps[event].db_enabled = enable; event 1121 drivers/irqchip/irq-gic-v3-its.c its_send_vmovi(its_dev, event); event 1206 drivers/irqchip/irq-gic-v3-its.c u32 event = its_get_event_id(d); event 1212 drivers/irqchip/irq-gic-v3-its.c its_send_int(its_dev, event); event 1214 drivers/irqchip/irq-gic-v3-its.c its_send_clear(its_dev, event); event 1276 drivers/irqchip/irq-gic-v3-its.c u32 event = its_get_event_id(d); event 1302 drivers/irqchip/irq-gic-v3-its.c its_dev->event_map.vlpi_maps[event] = *info->map; event 1306 drivers/irqchip/irq-gic-v3-its.c its_send_vmovi(its_dev, event); event 1321 drivers/irqchip/irq-gic-v3-its.c its_send_discard(its_dev, event); event 1324 drivers/irqchip/irq-gic-v3-its.c its_send_vmapti(its_dev, event); event 1338 drivers/irqchip/irq-gic-v3-its.c u32 event = its_get_event_id(d); event 1344 drivers/irqchip/irq-gic-v3-its.c !its_dev->event_map.vlpi_maps[event].vm) { event 1350 drivers/irqchip/irq-gic-v3-its.c *info->map = its_dev->event_map.vlpi_maps[event]; event 1360 drivers/irqchip/irq-gic-v3-its.c u32 event = its_get_event_id(d); event 1371 drivers/irqchip/irq-gic-v3-its.c its_send_discard(its_dev, event); event 1375 drivers/irqchip/irq-gic-v3-its.c its_send_mapti(its_dev, d->hwirq, event); event 2616 drivers/irqchip/irq-gic-v3-its.c u32 event = its_get_event_id(d); event 2633 drivers/irqchip/irq-gic-v3-its.c its_dev->event_map.col_map[event] = cpu; event 2637 drivers/irqchip/irq-gic-v3-its.c its_send_mapti(its_dev, d->hwirq, event); event 2645 drivers/irqchip/irq-gic-v3-its.c u32 event = its_get_event_id(d); event 2648 drivers/irqchip/irq-gic-v3-its.c its_send_discard(its_dev, event); event 272 drivers/isdn/capi/kcapi.c struct capictr_event *event = event 275 drivers/isdn/capi/kcapi.c blocking_notifier_call_chain(&ctr_notifier_list, event->type, event 276 drivers/isdn/capi/kcapi.c (void *)(long)event->controller); event 277 drivers/isdn/capi/kcapi.c kfree(event); event 286 drivers/isdn/capi/kcapi.c struct capictr_event *event = kmalloc(sizeof(*event), GFP_ATOMIC); event 288 drivers/isdn/capi/kcapi.c if (!event) event 291 drivers/isdn/capi/kcapi.c INIT_WORK(&event->work, do_notify_work); event 292 drivers/isdn/capi/kcapi.c event->type = event_type; event 293 drivers/isdn/capi/kcapi.c event->controller = controller; event 295 drivers/isdn/capi/kcapi.c queue_work(kcapi_wq, &event->work); event 139 drivers/isdn/hardware/mISDN/hfcsusb.c handle_led(struct hfcsusb *hw, int event) event 149 drivers/isdn/hardware/mISDN/hfcsusb.c switch (event) { event 35 drivers/isdn/mISDN/fsm.c (fnlist[i].event >= fsm->event_count)) { event 39 drivers/isdn/mISDN/fsm.c (long)fnlist[i].event, (long)fsm->event_count); event 41 drivers/isdn/mISDN/fsm.c fsm->jumpmatrix[fsm->state_count * fnlist[i].event + event 55 drivers/isdn/mISDN/fsm.c mISDN_FsmEvent(struct FsmInst *fi, int event, void *arg) event 60 drivers/isdn/mISDN/fsm.c (event >= fi->fsm->event_count)) { event 63 drivers/isdn/mISDN/fsm.c (long)fi->state, (long)fi->fsm->state_count, event, event 67 drivers/isdn/mISDN/fsm.c r = fi->fsm->jumpmatrix[fi->fsm->state_count * event + fi->state]; event 72 drivers/isdn/mISDN/fsm.c fi->fsm->strEvent[event]); event 73 drivers/isdn/mISDN/fsm.c r(fi, event, arg); event 79 drivers/isdn/mISDN/fsm.c fi->fsm->strEvent[event]); event 103 drivers/isdn/mISDN/fsm.c mISDN_FsmEvent(ft->fi, ft->event, ft->arg); event 132 drivers/isdn/mISDN/fsm.c int millisec, int event, void *arg, int where) event 150 drivers/isdn/mISDN/fsm.c ft->event = event; event 160 drivers/isdn/mISDN/fsm.c int millisec, int event, void *arg, int where) event 171 drivers/isdn/mISDN/fsm.c ft->event = event; event 38 drivers/isdn/mISDN/fsm.h int state, event; event 45 drivers/isdn/mISDN/fsm.h int event; event 109 drivers/isdn/mISDN/layer1.c l1_reset(struct FsmInst *fi, int event, void *arg) event 115 drivers/isdn/mISDN/layer1.c l1_deact_cnf(struct FsmInst *fi, int event, void *arg) event 125 drivers/isdn/mISDN/layer1.c l1_deact_req_s(struct FsmInst *fi, int event, void *arg) event 135 drivers/isdn/mISDN/layer1.c l1_power_up_s(struct FsmInst *fi, int event, void *arg) event 147 drivers/isdn/mISDN/layer1.c l1_go_F5(struct FsmInst *fi, int event, void *arg) event 153 drivers/isdn/mISDN/layer1.c l1_go_F8(struct FsmInst *fi, int event, void *arg) event 159 drivers/isdn/mISDN/layer1.c l1_info2_ind(struct FsmInst *fi, int event, void *arg) event 168 drivers/isdn/mISDN/layer1.c l1_info4_ind(struct FsmInst *fi, int event, void *arg) event 185 drivers/isdn/mISDN/layer1.c l1_timer3(struct FsmInst *fi, int event, void *arg) event 202 drivers/isdn/mISDN/layer1.c l1_timer_act(struct FsmInst *fi, int event, void *arg) event 212 drivers/isdn/mISDN/layer1.c l1_timer_deact(struct FsmInst *fi, int event, void *arg) event 225 drivers/isdn/mISDN/layer1.c l1_activate_s(struct FsmInst *fi, int event, void *arg) event 236 drivers/isdn/mISDN/layer1.c l1_activate_no(struct FsmInst *fi, int event, void *arg) event 308 drivers/isdn/mISDN/layer1.c l1_event(struct layer1 *l1, u_int event) event 314 drivers/isdn/mISDN/layer1.c switch (event) { event 354 drivers/isdn/mISDN/layer1.c if ((event & ~HW_TIMER3_VMASK) == HW_TIMER3_VALUE) { event 355 drivers/isdn/mISDN/layer1.c int val = event & HW_TIMER3_VMASK; event 366 drivers/isdn/mISDN/layer1.c __func__, event); event 278 drivers/isdn/mISDN/layer2.c l2_timeout(struct FsmInst *fi, int event, void *arg) event 288 drivers/isdn/mISDN/layer2.c l2->ch.nr, event == EV_L2_T200 ? "T200" : "T203"); event 292 drivers/isdn/mISDN/layer2.c hh->prim = event == EV_L2_T200 ? DL_TIMER200_IND : DL_TIMER203_IND; event 297 drivers/isdn/mISDN/layer2.c l2->ch.nr, event == EV_L2_T200 ? "T200" : "T203"); event 714 drivers/isdn/mISDN/layer2.c l2_mdl_error_ua(struct FsmInst *fi, int event, void *arg) event 727 drivers/isdn/mISDN/layer2.c l2_mdl_error_dm(struct FsmInst *fi, int event, void *arg) event 742 drivers/isdn/mISDN/layer2.c l2_st8_mdl_error_dm(struct FsmInst *fi, int event, void *arg) event 756 drivers/isdn/mISDN/layer2.c l2_go_st3(struct FsmInst *fi, int event, void *arg) event 763 drivers/isdn/mISDN/layer2.c l2_mdl_assign(struct FsmInst *fi, int event, void *arg) event 773 drivers/isdn/mISDN/layer2.c l2_queue_ui_assign(struct FsmInst *fi, int event, void *arg) event 784 drivers/isdn/mISDN/layer2.c l2_queue_ui(struct FsmInst *fi, int event, void *arg) event 810 drivers/isdn/mISDN/layer2.c l2_send_ui(struct FsmInst *fi, int event, void *arg) event 820 drivers/isdn/mISDN/layer2.c l2_got_ui(struct FsmInst *fi, int event, void *arg) event 836 drivers/isdn/mISDN/layer2.c l2_establish(struct FsmInst *fi, int event, void *arg) event 847 drivers/isdn/mISDN/layer2.c l2_discard_i_setl3(struct FsmInst *fi, int event, void *arg) event 859 drivers/isdn/mISDN/layer2.c l2_l3_reestablish(struct FsmInst *fi, int event, void *arg) event 871 drivers/isdn/mISDN/layer2.c l2_release(struct FsmInst *fi, int event, void *arg) event 881 drivers/isdn/mISDN/layer2.c l2_pend_rel(struct FsmInst *fi, int event, void *arg) event 891 drivers/isdn/mISDN/layer2.c l2_disconnect(struct FsmInst *fi, int event, void *arg) event 907 drivers/isdn/mISDN/layer2.c l2_start_multi(struct FsmInst *fi, int event, void *arg) event 927 drivers/isdn/mISDN/layer2.c l2_send_UA(struct FsmInst *fi, int event, void *arg) event 936 drivers/isdn/mISDN/layer2.c l2_send_DM(struct FsmInst *fi, int event, void *arg) event 945 drivers/isdn/mISDN/layer2.c l2_restart_multi(struct FsmInst *fi, int event, void *arg) event 980 drivers/isdn/mISDN/layer2.c l2_stop_multi(struct FsmInst *fi, int event, void *arg) event 998 drivers/isdn/mISDN/layer2.c l2_connected(struct FsmInst *fi, int event, void *arg) event 1005 drivers/isdn/mISDN/layer2.c l2_mdl_error_ua(fi, event, arg); event 1010 drivers/isdn/mISDN/layer2.c l2_disconnect(fi, event, NULL); event 1035 drivers/isdn/mISDN/layer2.c l2_released(struct FsmInst *fi, int event, void *arg) event 1041 drivers/isdn/mISDN/layer2.c l2_mdl_error_ua(fi, event, arg); event 1053 drivers/isdn/mISDN/layer2.c l2_reestablish(struct FsmInst *fi, int event, void *arg) event 1065 drivers/isdn/mISDN/layer2.c l2_st5_dm_release(struct FsmInst *fi, int event, void *arg) event 1085 drivers/isdn/mISDN/layer2.c l2_st6_dm_release(struct FsmInst *fi, int event, void *arg) event 1183 drivers/isdn/mISDN/layer2.c l2_st7_got_super(struct FsmInst *fi, int event, void *arg) event 1244 drivers/isdn/mISDN/layer2.c l2_feed_i_if_reest(struct FsmInst *fi, int event, void *arg) event 1256 drivers/isdn/mISDN/layer2.c l2_feed_i_pull(struct FsmInst *fi, int event, void *arg) event 1266 drivers/isdn/mISDN/layer2.c l2_feed_iqueue(struct FsmInst *fi, int event, void *arg) event 1275 drivers/isdn/mISDN/layer2.c l2_got_iframe(struct FsmInst *fi, int event, void *arg) event 1344 drivers/isdn/mISDN/layer2.c l2_got_tei(struct FsmInst *fi, int event, void *arg) event 1363 drivers/isdn/mISDN/layer2.c l2_st5_tout_200(struct FsmInst *fi, int event, void *arg) event 1390 drivers/isdn/mISDN/layer2.c l2_st6_tout_200(struct FsmInst *fi, int event, void *arg) event 1413 drivers/isdn/mISDN/layer2.c l2_st7_tout_200(struct FsmInst *fi, int event, void *arg) event 1430 drivers/isdn/mISDN/layer2.c l2_st8_tout_200(struct FsmInst *fi, int event, void *arg) event 1451 drivers/isdn/mISDN/layer2.c l2_st7_tout_203(struct FsmInst *fi, int event, void *arg) event 1466 drivers/isdn/mISDN/layer2.c l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) event 1516 drivers/isdn/mISDN/layer2.c l2_st8_got_super(struct FsmInst *fi, int event, void *arg) event 1570 drivers/isdn/mISDN/layer2.c l2_got_FRMR(struct FsmInst *fi, int event, void *arg) event 1587 drivers/isdn/mISDN/layer2.c l2_st24_tei_remove(struct FsmInst *fi, int event, void *arg) event 1597 drivers/isdn/mISDN/layer2.c l2_st3_tei_remove(struct FsmInst *fi, int event, void *arg) event 1608 drivers/isdn/mISDN/layer2.c l2_st5_tei_remove(struct FsmInst *fi, int event, void *arg) event 1622 drivers/isdn/mISDN/layer2.c l2_st6_tei_remove(struct FsmInst *fi, int event, void *arg) event 1634 drivers/isdn/mISDN/layer2.c l2_tei_remove(struct FsmInst *fi, int event, void *arg) event 1653 drivers/isdn/mISDN/layer2.c l2_st14_persistent_da(struct FsmInst *fi, int event, void *arg) event 1667 drivers/isdn/mISDN/layer2.c l2_st5_persistent_da(struct FsmInst *fi, int event, void *arg) event 1684 drivers/isdn/mISDN/layer2.c l2_st6_persistent_da(struct FsmInst *fi, int event, void *arg) event 1698 drivers/isdn/mISDN/layer2.c l2_persistent_da(struct FsmInst *fi, int event, void *arg) event 1715 drivers/isdn/mISDN/layer2.c l2_set_own_busy(struct FsmInst *fi, int event, void *arg) event 1728 drivers/isdn/mISDN/layer2.c l2_clear_own_busy(struct FsmInst *fi, int event, void *arg) event 1741 drivers/isdn/mISDN/layer2.c l2_frame_error(struct FsmInst *fi, int event, void *arg) event 1749 drivers/isdn/mISDN/layer2.c l2_frame_error_reest(struct FsmInst *fi, int event, void *arg) event 90 drivers/isdn/mISDN/tei.c da_activate(struct FsmInst *fi, int event, void *arg) event 100 drivers/isdn/mISDN/tei.c da_deactivate_ind(struct FsmInst *fi, int event, void *arg) event 106 drivers/isdn/mISDN/tei.c da_deactivate(struct FsmInst *fi, int event, void *arg) event 130 drivers/isdn/mISDN/tei.c da_ui(struct FsmInst *fi, int event, void *arg) event 143 drivers/isdn/mISDN/tei.c da_timer(struct FsmInst *fi, int event, void *arg) event 456 drivers/isdn/mISDN/tei.c tei_id_request(struct FsmInst *fi, int event, void *arg) event 477 drivers/isdn/mISDN/tei.c tei_id_assign(struct FsmInst *fi, int event, void *arg) event 506 drivers/isdn/mISDN/tei.c tei_id_test_dup(struct FsmInst *fi, int event, void *arg) event 531 drivers/isdn/mISDN/tei.c tei_id_denied(struct FsmInst *fi, int event, void *arg) event 547 drivers/isdn/mISDN/tei.c tei_id_chk_req(struct FsmInst *fi, int event, void *arg) event 565 drivers/isdn/mISDN/tei.c tei_id_remove(struct FsmInst *fi, int event, void *arg) event 583 drivers/isdn/mISDN/tei.c tei_id_verify(struct FsmInst *fi, int event, void *arg) event 597 drivers/isdn/mISDN/tei.c tei_id_req_tout(struct FsmInst *fi, int event, void *arg) event 616 drivers/isdn/mISDN/tei.c tei_id_ver_tout(struct FsmInst *fi, int event, void *arg) event 660 drivers/isdn/mISDN/tei.c tei_assign_req(struct FsmInst *fi, int event, void *arg) event 680 drivers/isdn/mISDN/tei.c tei_id_chk_req_net(struct FsmInst *fi, int event, void *arg) event 695 drivers/isdn/mISDN/tei.c tei_id_chk_resp(struct FsmInst *fi, int event, void *arg) event 709 drivers/isdn/mISDN/tei.c tei_id_verify_net(struct FsmInst *fi, int event, void *arg) event 720 drivers/isdn/mISDN/tei.c tei_id_chk_req_net(fi, event, arg); event 724 drivers/isdn/mISDN/tei.c tei_id_ver_tout_net(struct FsmInst *fi, int event, void *arg) event 105 drivers/leds/led-triggers.c char *event = NULL; event 114 drivers/leds/led-triggers.c event = kasprintf(GFP_KERNEL, "TRIGGER=%s", name); event 153 drivers/leds/led-triggers.c if (event) { event 154 drivers/leds/led-triggers.c envp[0] = event; event 159 drivers/leds/led-triggers.c kfree(event); event 176 drivers/leds/led-triggers.c kfree(event); event 418 drivers/leds/leds-pca9532.c data->idev->event = pca9532_event; event 29 drivers/leds/trigger/ledtrig-backlight.c unsigned long event, void *data) event 39 drivers/leds/trigger/ledtrig-backlight.c if (event != FB_EVENT_BLANK) event 841 drivers/macintosh/adbhid.c input_dev->event = adbhid_kbd_event; event 619 drivers/macintosh/mediabay.c if (state.event != mdev->ofdev.dev.power.power_state.event event 620 drivers/macintosh/mediabay.c && (state.event & PM_EVENT_SLEEP)) { event 635 drivers/macintosh/mediabay.c if (mdev->ofdev.dev.power.power_state.event != PM_EVENT_ON) { event 69 drivers/macintosh/windfarm_core.c static inline void wf_notify(int event, void *param) event 71 drivers/macintosh/windfarm_core.c blocking_notifier_call_chain(&wf_client_list, event, param); event 632 drivers/macintosh/windfarm_pm112.c unsigned long event, void *data) event 634 drivers/macintosh/windfarm_pm112.c switch (event) { event 941 drivers/macintosh/windfarm_pm121.c unsigned long event, void *data) event 943 drivers/macintosh/windfarm_pm121.c switch (event) { event 752 drivers/macintosh/windfarm_pm72.c unsigned long event, void *data) event 754 drivers/macintosh/windfarm_pm72.c switch (event) { event 676 drivers/macintosh/windfarm_pm81.c unsigned long event, void *data) event 678 drivers/macintosh/windfarm_pm81.c switch(event) { event 609 drivers/macintosh/windfarm_pm91.c unsigned long event, void *data) event 611 drivers/macintosh/windfarm_pm91.c switch(event) { event 645 drivers/macintosh/windfarm_rm31.c unsigned long event, void *data) event 647 drivers/macintosh/windfarm_rm31.c switch (event) { event 39 drivers/md/dm-uevent.c static void dm_uevent_free(struct dm_uevent *event) event 41 drivers/md/dm-uevent.c kmem_cache_free(_dm_event_cache, event); event 46 drivers/md/dm-uevent.c struct dm_uevent *event; event 48 drivers/md/dm-uevent.c event = kmem_cache_zalloc(_dm_event_cache, GFP_ATOMIC); event 49 drivers/md/dm-uevent.c if (!event) event 52 drivers/md/dm-uevent.c INIT_LIST_HEAD(&event->elist); event 53 drivers/md/dm-uevent.c event->md = md; event 55 drivers/md/dm-uevent.c return event; event 65 drivers/md/dm-uevent.c struct dm_uevent *event; event 67 drivers/md/dm-uevent.c event = dm_uevent_alloc(md); event 68 drivers/md/dm-uevent.c if (!event) { event 73 drivers/md/dm-uevent.c event->action = action; event 75 drivers/md/dm-uevent.c if (add_uevent_var(&event->ku_env, "DM_TARGET=%s", ti->type->name)) { event 81 drivers/md/dm-uevent.c if (add_uevent_var(&event->ku_env, "DM_ACTION=%s", dm_action)) { event 87 drivers/md/dm-uevent.c if (add_uevent_var(&event->ku_env, "DM_SEQNUM=%u", event 94 drivers/md/dm-uevent.c if (add_uevent_var(&event->ku_env, "DM_PATH=%s", path)) { event 99 drivers/md/dm-uevent.c if (add_uevent_var(&event->ku_env, "DM_NR_VALID_PATHS=%d", event 106 drivers/md/dm-uevent.c return event; event 109 drivers/md/dm-uevent.c dm_uevent_free(event); event 124 drivers/md/dm-uevent.c struct dm_uevent *event, *next; event 126 drivers/md/dm-uevent.c list_for_each_entry_safe(event, next, events, elist) { event 127 drivers/md/dm-uevent.c list_del_init(&event->elist); event 133 drivers/md/dm-uevent.c if (dm_copy_name_and_uuid(event->md, event->name, event 134 drivers/md/dm-uevent.c event->uuid)) { event 140 drivers/md/dm-uevent.c if (add_uevent_var(&event->ku_env, "DM_NAME=%s", event->name)) { event 146 drivers/md/dm-uevent.c if (add_uevent_var(&event->ku_env, "DM_UUID=%s", event->uuid)) { event 152 drivers/md/dm-uevent.c r = kobject_uevent_env(kobj, event->action, event->ku_env.envp); event 156 drivers/md/dm-uevent.c dm_uevent_free(event); event 174 drivers/md/dm-uevent.c struct dm_uevent *event; event 181 drivers/md/dm-uevent.c event = dm_build_path_uevent(md, ti, event 185 drivers/md/dm-uevent.c if (IS_ERR(event)) event 188 drivers/md/dm-uevent.c dm_uevent_add(md, &event->elist); event 106 drivers/media/cec/cec-adap.c unsigned int ev_idx = new_ev->event - 1; event 120 drivers/media/cec/cec-adap.c if (new_ev->event == CEC_EVENT_LOST_MSGS && event 175 drivers/media/cec/cec-adap.c .event = is_high ? CEC_EVENT_PIN_CEC_HIGH : event 193 drivers/media/cec/cec-adap.c .event = is_high ? CEC_EVENT_PIN_HPD_HIGH : event 209 drivers/media/cec/cec-adap.c .event = is_high ? CEC_EVENT_PIN_5V_HIGH : event 231 drivers/media/cec/cec-adap.c .event = CEC_EVENT_LOST_MSGS, event 317 drivers/media/cec/cec-adap.c .event = CEC_EVENT_STATE_CHANGE, event 460 drivers/media/cec/cec-api.c ev.event = adap->cec_pin_is_high ? CEC_EVENT_PIN_CEC_HIGH : event 540 drivers/media/cec/cec-api.c .event = CEC_EVENT_STATE_CHANGE, event 586 drivers/media/cec/cec-api.c ev.event = err ? CEC_EVENT_PIN_HPD_HIGH : event 594 drivers/media/cec/cec-api.c ev.event = err ? CEC_EVENT_PIN_5V_HIGH : event 65 drivers/media/common/siano/smsdvb-main.c enum SMS_DVB3_EVENTS event) { event 68 drivers/media/common/siano/smsdvb-main.c switch (event) { event 282 drivers/media/dvb-core/dvb_frontend.c struct dvb_frontend_event *event, int flags) event 308 drivers/media/dvb-core/dvb_frontend.c *event = events->events[events->eventr]; event 4274 drivers/media/pci/bt8xx/bttv-driver.c dprintk("%d: suspend %d\n", btv->c.nr, state.event); event 602 drivers/media/pci/intel/ipu3/ipu3-cio2.c struct v4l2_event event = { event 607 drivers/media/pci/intel/ipu3/ipu3-cio2.c v4l2_event_queue(q->subdev.devnode, &event); event 499 drivers/media/pci/ttpci/av7110.c struct video_event event; event 509 drivers/media/pci/ttpci/av7110.c event.type = VIDEO_EVENT_SIZE_CHANGED; event 510 drivers/media/pci/ttpci/av7110.c event.u.size.w = av7110->video_size.w; event 511 drivers/media/pci/ttpci/av7110.c event.u.size.h = av7110->video_size.h; event 516 drivers/media/pci/ttpci/av7110.c event.u.size.aspect_ratio = VIDEO_FORMAT_16_9; event 521 drivers/media/pci/ttpci/av7110.c event.u.size.aspect_ratio = VIDEO_FORMAT_221_1; event 526 drivers/media/pci/ttpci/av7110.c event.u.size.aspect_ratio = VIDEO_FORMAT_4_3; event 534 drivers/media/pci/ttpci/av7110.c dvb_video_add_event(av7110, &event); event 867 drivers/media/pci/ttpci/av7110_av.c void dvb_video_add_event(struct av7110 *av7110, struct video_event *event) event 881 drivers/media/pci/ttpci/av7110_av.c memcpy(&events->events[events->eventw], event, sizeof(struct video_event)); event 890 drivers/media/pci/ttpci/av7110_av.c static int dvb_video_get_event (struct av7110 *av7110, struct video_event *event, int flags) event 912 drivers/media/pci/ttpci/av7110_av.c memcpy(event, &events->events[events->eventr], event 1103 drivers/media/pci/ttpci/av7110_av.c struct compat_video_event *event, int flags) event 1110 drivers/media/pci/ttpci/av7110_av.c *event = (struct compat_video_event) { event 21 drivers/media/pci/ttpci/av7110_av.h extern void dvb_video_add_event(struct av7110 *av7110, struct video_event *event); event 112 drivers/media/platform/davinci/vpbe_display.c unsigned event = 0; event 120 drivers/media/platform/davinci/vpbe_display.c event |= VENC_SECOND_FIELD; event 122 drivers/media/platform/davinci/vpbe_display.c event |= VENC_FIRST_FIELD; event 124 drivers/media/platform/davinci/vpbe_display.c if (event == (last_event & ~VENC_END_OF_FRAME)) { event 132 drivers/media/platform/davinci/vpbe_display.c event |= VENC_END_OF_FRAME; event 133 drivers/media/platform/davinci/vpbe_display.c } else if (event == VENC_SECOND_FIELD) { event 135 drivers/media/platform/davinci/vpbe_display.c event |= VENC_END_OF_FRAME; event 137 drivers/media/platform/davinci/vpbe_display.c last_event = event; event 151 drivers/media/platform/davinci/vpbe_display.c (event & VENC_END_OF_FRAME)) { event 160 drivers/media/platform/davinci/vpbe_display.c if (event & VENC_FIRST_FIELD) event 356 drivers/media/platform/marvell-ccic/mmp-driver.c if (state.event != PM_EVENT_SUSPEND) event 1420 drivers/media/platform/omap3isp/ispccdc.c static int ccdc_handle_stopping(struct isp_ccdc_device *ccdc, u32 event) event 1424 drivers/media/platform/omap3isp/ispccdc.c switch ((ccdc->stopping & 3) | event) { event 1460 drivers/media/platform/omap3isp/ispccdc.c struct v4l2_event event; event 1465 drivers/media/platform/omap3isp/ispccdc.c memset(&event, 0, sizeof(event)); event 1466 drivers/media/platform/omap3isp/ispccdc.c event.type = V4L2_EVENT_FRAME_SYNC; event 1467 drivers/media/platform/omap3isp/ispccdc.c event.u.frame_sync.frame_sequence = atomic_read(&pipe->frame_number); event 1469 drivers/media/platform/omap3isp/ispccdc.c v4l2_event_queue(vdev, &event); event 460 drivers/media/platform/omap3isp/ispstat.c struct v4l2_event event; event 461 drivers/media/platform/omap3isp/ispstat.c struct omap3isp_stat_event_status *status = (void *)event.u.data; event 463 drivers/media/platform/omap3isp/ispstat.c memset(&event, 0, sizeof(event)); event 470 drivers/media/platform/omap3isp/ispstat.c event.type = stat->event_type; event 471 drivers/media/platform/omap3isp/ispstat.c v4l2_event_queue(vdev, &event); event 25 drivers/media/platform/qcom/venus/core.c static void venus_event_notify(struct venus_core *core, u32 event) event 29 drivers/media/platform/qcom/venus/core.c switch (event) { event 96 drivers/media/platform/qcom/venus/hfi.h void (*event_notify)(struct venus_core *core, u32 event); event 103 drivers/media/platform/qcom/venus/hfi.h void (*event_notify)(struct venus_inst *inst, u32 event, event 21 drivers/media/platform/qcom/venus/hfi_msgs.c struct hfi_event_data event = {0}; event 44 drivers/media/platform/qcom/venus/hfi_msgs.c event.event_type = pkt->event_data1; event 59 drivers/media/platform/qcom/venus/hfi_msgs.c event.width = frame_sz->width; event 60 drivers/media/platform/qcom/venus/hfi_msgs.c event.height = frame_sz->height; event 66 drivers/media/platform/qcom/venus/hfi_msgs.c event.profile = profile_level->profile; event 67 drivers/media/platform/qcom/venus/hfi_msgs.c event.level = profile_level->level; event 73 drivers/media/platform/qcom/venus/hfi_msgs.c event.bit_depth = pixel_depth->bit_depth; event 79 drivers/media/platform/qcom/venus/hfi_msgs.c event.pic_struct = pic_struct->progressive_only; event 85 drivers/media/platform/qcom/venus/hfi_msgs.c event.colour_space = colour_info->colour_space; event 90 drivers/media/platform/qcom/venus/hfi_msgs.c event.entropy_mode = *(u32 *)data_ptr; event 96 drivers/media/platform/qcom/venus/hfi_msgs.c event.buf_count = HFI_BUFREQ_COUNT_MIN(bufreq, ver); event 102 drivers/media/platform/qcom/venus/hfi_msgs.c event.input_crop.left = crop->left; event 103 drivers/media/platform/qcom/venus/hfi_msgs.c event.input_crop.top = crop->top; event 104 drivers/media/platform/qcom/venus/hfi_msgs.c event.input_crop.width = crop->width; event 105 drivers/media/platform/qcom/venus/hfi_msgs.c event.input_crop.height = crop->height; event 115 drivers/media/platform/qcom/venus/hfi_msgs.c inst->ops->event_notify(inst, EVT_SYS_EVENT_CHANGE, &event); event 122 drivers/media/platform/qcom/venus/hfi_msgs.c struct hfi_event_data event = {0}; event 128 drivers/media/platform/qcom/venus/hfi_msgs.c event.event_type = HFI_EVENT_RELEASE_BUFFER_REFERENCE; event 129 drivers/media/platform/qcom/venus/hfi_msgs.c event.packet_buffer = data->packet_buffer; event 130 drivers/media/platform/qcom/venus/hfi_msgs.c event.extradata_buffer = data->extradata_buffer; event 131 drivers/media/platform/qcom/venus/hfi_msgs.c event.tag = data->output_tag; event 134 drivers/media/platform/qcom/venus/hfi_msgs.c inst->ops->event_notify(inst, EVT_SYS_EVENT_CHANGE, &event); event 137 drivers/media/platform/qcom/venus/hfi_msgs.c static void event_sys_error(struct venus_core *core, u32 event, event 146 drivers/media/platform/qcom/venus/hfi_msgs.c core->core_ops->event_notify(core, event); event 1215 drivers/media/platform/qcom/venus/vdec.c static void vdec_event_notify(struct venus_inst *inst, u32 event, event 1221 drivers/media/platform/qcom/venus/vdec.c switch (event) { event 1064 drivers/media/platform/qcom/venus/venc.c static void venc_event_notify(struct venus_inst *inst, u32 event, event 1069 drivers/media/platform/qcom/venus/venc.c if (event == EVT_SESSION_ERROR) { event 523 drivers/media/platform/ti-vpe/vpdma_priv.h u32 event; event 605 drivers/media/platform/ti-vpe/vpdma_priv.h return ctd->event & CTD_EVENT_MASK; event 278 drivers/media/rc/fintek-cir.c bool event = false; event 317 drivers/media/rc/fintek-cir.c event = true; event 327 drivers/media/rc/fintek-cir.c if (event) { event 125 drivers/media/rc/iguanair.c bool event = false; event 138 drivers/media/rc/iguanair.c event = true; event 141 drivers/media/rc/iguanair.c if (event) event 1260 drivers/media/rc/mceusb.c bool event = false; event 1301 drivers/media/rc/mceusb.c event = true; event 1336 drivers/media/rc/mceusb.c event = true; event 1354 drivers/media/rc/mceusb.c if (event) { event 37 drivers/media/rc/rc-core-priv.h int (*decode)(struct rc_dev *dev, struct ir_raw_event event); event 113 drivers/media/rc/ttusbir.c bool event = false; event 122 drivers/media/rc/ttusbir.c event = true; event 128 drivers/media/rc/ttusbir.c event = true; event 142 drivers/media/rc/ttusbir.c event = true; event 147 drivers/media/rc/ttusbir.c event = true; event 153 drivers/media/rc/ttusbir.c if (event) event 73 drivers/media/usb/dvb-usb/af9005-remote.c int af9005_rc_decode(struct dvb_usb_device *d, u8 * data, int len, u32 * event, event 88 drivers/media/usb/dvb-usb/af9005-remote.c *event = d->last_event; event 90 drivers/media/usb/dvb-usb/af9005-remote.c *event); event 123 drivers/media/usb/dvb-usb/af9005-remote.c *event = rc_map_af9005_table[i].keycode; event 126 drivers/media/usb/dvb-usb/af9005-remote.c ("key pressed, event %x\n", *event); event 33 drivers/media/usb/dvb-usb/af9005.c u32 *event, int *state); event 823 drivers/media/usb/dvb-usb/af9005.c static int af9005_rc_query(struct dvb_usb_device *d, u32 * event, int *state) event 866 drivers/media/usb/dvb-usb/af9005.c ret = rc_decode(d, &st->data[6], len, event, state); event 871 drivers/media/usb/dvb-usb/af9005.c deb_rc("rc_decode state %x event %x\n", *state, *event); event 873 drivers/media/usb/dvb-usb/af9005.c *event = d->last_event; event 3479 drivers/media/usb/dvb-usb/af9005.h u32 * event, int *state); event 392 drivers/media/usb/dvb-usb/az6027.c static int az6027_rc_query(struct dvb_usb_device *d, u32 *event, int *state) event 145 drivers/media/usb/dvb-usb/cinergyT2-core.c static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state) event 166 drivers/media/usb/dvb-usb/cinergyT2-core.c *event = d->last_event; event 168 drivers/media/usb/dvb-usb/cinergyT2-core.c *event); event 179 drivers/media/usb/dvb-usb/cinergyT2-core.c dvb_usb_nec_rc_key_to_event(d, st->data, event, state); event 181 drivers/media/usb/dvb-usb/cinergyT2-core.c if (*event != d->last_event) event 374 drivers/media/usb/dvb-usb/dibusb-common.c int dibusb_rc_query(struct dvb_usb_device *d, u32 *event, int *state) event 389 drivers/media/usb/dvb-usb/dibusb-common.c dvb_usb_nec_rc_key_to_event(d, buf, event, state); event 231 drivers/media/usb/dvb-usb/digitv.c static int digitv_rc_query(struct dvb_usb_device *d, u32 *event, int *state) event 237 drivers/media/usb/dvb-usb/digitv.c *event = 0; event 256 drivers/media/usb/dvb-usb/digitv.c *event = d->props.rc.legacy.rc_map_table[i].keycode; event 113 drivers/media/usb/dvb-usb/dvb-usb-remote.c u32 event; event 123 drivers/media/usb/dvb-usb/dvb-usb-remote.c if (d->props.rc.legacy.rc_query(d,&event,&state)) { event 134 drivers/media/usb/dvb-usb/dvb-usb-remote.c d->last_event = event; event 135 drivers/media/usb/dvb-usb/dvb-usb-remote.c input_event(d->input_dev, EV_KEY, event, 1); event 142 drivers/media/usb/dvb-usb/dvb-usb-remote.c input_event(d->input_dev, EV_KEY, event, 1); event 363 drivers/media/usb/dvb-usb/dvb-usb-remote.c u8 keybuf[5], u32 *event, int *state) event 367 drivers/media/usb/dvb-usb/dvb-usb-remote.c *event = 0; event 382 drivers/media/usb/dvb-usb/dvb-usb-remote.c *event = keymap[i].keycode; event 179 drivers/media/usb/dvb-usb/m920x.c static int m920x_rc_query(struct dvb_usb_device *d, u32 *event, int *state) event 202 drivers/media/usb/dvb-usb/m920x.c *event = d->props.rc.legacy.rc_map_table[i].keycode; event 72 drivers/media/usb/dvb-usb/nova-t-usb2.c static int nova_t_rc_query(struct dvb_usb_device *d, u32 *event, int *state) event 107 drivers/media/usb/dvb-usb/nova-t-usb2.c *event = rc_map_haupp_table[i].keycode; event 37 drivers/media/usb/dvb-usb/opera1.c u32 event; event 370 drivers/media/usb/dvb-usb/opera1.c static int opera1_rc_query(struct dvb_usb_device *dev, u32 * event, int *state) event 394 drivers/media/usb/dvb-usb/opera1.c *event = opst->last_key_pressed; event 416 drivers/media/usb/dvb-usb/opera1.c *event = rc_map_opera1_table[i].keycode; event 257 drivers/media/usb/dvb-usb/vp702x.c static int vp702x_rc_query(struct dvb_usb_device *d, u32 *event, int *state) event 281 drivers/media/usb/dvb-usb/vp702x.c *event = rc_map_vp702x_table[i].keycode; event 582 drivers/media/usb/siano/smsusb.c printk(KERN_INFO "%s Entering status %d.\n", __func__, msg.event); event 27 drivers/media/v4l2-core/v4l2-event.c static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event) event 45 drivers/media/v4l2-core/v4l2-event.c kev->event.pending = fh->navailable; event 46 drivers/media/v4l2-core/v4l2-event.c *event = kev->event; event 47 drivers/media/v4l2-core/v4l2-event.c event->timestamp = ns_to_timespec(kev->ts); event 56 drivers/media/v4l2-core/v4l2-event.c int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event, event 62 drivers/media/v4l2-core/v4l2-event.c return __v4l2_event_dequeue(fh, event); event 74 drivers/media/v4l2-core/v4l2-event.c ret = __v4l2_event_dequeue(fh, event); event 124 drivers/media/v4l2-core/v4l2-event.c sev->ops->replace(&kev->event, ev); event 130 drivers/media/v4l2-core/v4l2-event.c sev->ops->merge(&kev->event, &second_oldest->event); event 136 drivers/media/v4l2-core/v4l2-event.c kev->event.type = ev->type; event 138 drivers/media/v4l2-core/v4l2-event.c kev->event.u = ev->u; event 139 drivers/media/v4l2-core/v4l2-event.c kev->event.id = ev->id; event 141 drivers/media/v4l2-core/v4l2-event.c kev->event.sequence = fh->sequence; event 631 drivers/message/fusion/mptbase.c u8 event; event 640 drivers/message/fusion/mptbase.c event = le32_to_cpu(pEventReply->Event) & 0xFF; event 643 drivers/message/fusion/mptbase.c if (event != MPI_EVENT_EVENT_CHANGE) event 7244 drivers/message/fusion/mptbase.c u8 event; event 7247 drivers/message/fusion/mptbase.c event = le32_to_cpu(pEventReply->Event) & 0xFF; event 7250 drivers/message/fusion/mptbase.c switch(event) { event 7673 drivers/message/fusion/mptbase.c ioc->name, event, evStr)); event 7703 drivers/message/fusion/mptbase.c u8 event; event 7708 drivers/message/fusion/mptbase.c event = le32_to_cpu(pEventReply->Event) & 0xFF; event 7722 drivers/message/fusion/mptbase.c switch(event) { event 7747 drivers/message/fusion/mptbase.c if (ioc->events && (ioc->eventTypes & ( 1 << event))) { event 7752 drivers/message/fusion/mptbase.c ioc->events[idx].event = event; event 463 drivers/message/fusion/mptbase.h u32 event; /* Specified by define above */ event 555 drivers/message/fusion/mptctl.c u8 event; event 557 drivers/message/fusion/mptctl.c event = le32_to_cpu(pEvReply->Event) & 0xFF; event 568 drivers/message/fusion/mptctl.c if (event == 0x21) { event 588 drivers/message/fusion/mptctl.c if (ioc->events && (ioc->eventTypes & ( 1 << event))) { event 270 drivers/message/fusion/mptctl.h uint event; event 1370 drivers/message/fusion/mptfc.c u8 event = le32_to_cpu(pEvReply->Event) & 0xFF; event 1378 drivers/message/fusion/mptfc.c ioc->name, event)); event 1384 drivers/message/fusion/mptfc.c switch (event) { event 1004 drivers/message/fusion/mptsas.c fw_event->event = MPI_EVENT_SAS_DEVICE_STATUS_CHANGE; event 1020 drivers/message/fusion/mptsas.c fw_event->event = -1; event 1624 drivers/message/fusion/mptsas.c if (fw_event->event == -1) { event 1650 drivers/message/fusion/mptsas.c (fw_event->event & 0xFF))); event 1652 drivers/message/fusion/mptsas.c switch (fw_event->event) { event 4975 drivers/message/fusion/mptsas.c u32 event = le32_to_cpu(reply->Event); event 4988 drivers/message/fusion/mptsas.c switch (event) { event 5093 drivers/message/fusion/mptsas.c fw_event->event = event; event 111 drivers/message/fusion/mptsas.h u32 event; event 2434 drivers/message/fusion/mptscsih.c ioc->events[idx].event = MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE; event 2583 drivers/message/fusion/mptscsih.c u8 event = le32_to_cpu(pEvReply->Event) & 0xFF; event 2587 drivers/message/fusion/mptscsih.c ioc->name, event)); event 2589 drivers/message/fusion/mptscsih.c if ((event == MPI_EVENT_IOC_BUS_RESET || event 2590 drivers/message/fusion/mptscsih.c event == MPI_EVENT_EXT_BUS_RESET) && event 1172 drivers/message/fusion/mptspi.c u8 event = le32_to_cpu(pEvReply->Event) & 0xFF; event 1178 drivers/message/fusion/mptspi.c if (hd && event == MPI_EVENT_INTEGRATED_RAID) { event 381 drivers/mfd/ab3100-core.c u8 *event) event 387 drivers/mfd/ab3100-core.c memcpy(event, ab3100->startup_events, 3); event 128 drivers/mfd/abx500-core.c int abx500_event_registers_startup_state_get(struct device *dev, u8 *event) event 134 drivers/mfd/abx500-core.c return ops->event_registers_startup_state_get(dev, event); event 696 drivers/misc/cardreader/rtsx_usb.c __func__, message.event); event 178 drivers/misc/cb710/core.c if (state.event & PM_EVENT_SLEEP) event 45 drivers/misc/cs5535-mfgpt.c int event, int enable) event 59 drivers/misc/cs5535-mfgpt.c switch (event) { event 391 drivers/misc/cxl/file.c struct cxl_event *event, event 401 drivers/misc/cxl/file.c event->header.size += pl->data_size; event 402 drivers/misc/cxl/file.c if (event->header.size > CXL_READ_MIN_SIZE) { event 408 drivers/misc/cxl/file.c if (copy_to_user(buf, event, sizeof(struct cxl_event_header))) { event 421 drivers/misc/cxl/file.c return event->header.size; event 429 drivers/misc/cxl/file.c struct cxl_event event; event 471 drivers/misc/cxl/file.c memset(&event, 0, sizeof(event)); event 472 drivers/misc/cxl/file.c event.header.process_element = ctx->pe; event 473 drivers/misc/cxl/file.c event.header.size = sizeof(struct cxl_event_header); event 478 drivers/misc/cxl/file.c event.header.type = CXL_EVENT_AFU_DRIVER; event 481 drivers/misc/cxl/file.c event.header.size += sizeof(struct cxl_event_afu_interrupt); event 482 drivers/misc/cxl/file.c event.header.type = CXL_EVENT_AFU_INTERRUPT; event 483 drivers/misc/cxl/file.c event.irq.irq = find_first_bit(ctx->irq_bitmap, ctx->irq_count) + 1; event 484 drivers/misc/cxl/file.c clear_bit(event.irq.irq - 1, ctx->irq_bitmap); event 489 drivers/misc/cxl/file.c event.header.size += sizeof(struct cxl_event_data_storage); event 490 drivers/misc/cxl/file.c event.header.type = CXL_EVENT_DATA_STORAGE; event 491 drivers/misc/cxl/file.c event.fault.addr = ctx->fault_addr; event 492 drivers/misc/cxl/file.c event.fault.dsisr = ctx->fault_dsisr; event 496 drivers/misc/cxl/file.c event.header.size += sizeof(struct cxl_event_afu_error); event 497 drivers/misc/cxl/file.c event.header.type = CXL_EVENT_AFU_ERROR; event 498 drivers/misc/cxl/file.c event.afu_error.error = ctx->afu_err; event 509 drivers/misc/cxl/file.c if (event.header.type == CXL_EVENT_AFU_DRIVER) event 510 drivers/misc/cxl/file.c return afu_driver_event_copy(ctx, buf, &event, pl); event 512 drivers/misc/cxl/file.c if (copy_to_user(buf, &event, event.header.size)) event 514 drivers/misc/cxl/file.c return event.header.size; event 46 drivers/misc/ibmasm/event.c struct ibmasm_event *event; event 53 drivers/misc/ibmasm/event.c event = &buffer->events[buffer->next_index]; event 54 drivers/misc/ibmasm/event.c memcpy_fromio(event->data, data, data_size); event 55 drivers/misc/ibmasm/event.c event->data_size = data_size; event 56 drivers/misc/ibmasm/event.c event->serial_number = buffer->next_serial_number; event 80 drivers/misc/ibmasm/event.c struct ibmasm_event *event; event 96 drivers/misc/ibmasm/event.c event = &buffer->events[index]; event 97 drivers/misc/ibmasm/event.c while (event->serial_number < reader->next_serial_number) { event 99 drivers/misc/ibmasm/event.c event = &buffer->events[index]; event 101 drivers/misc/ibmasm/event.c memcpy(reader->data, event->data, event->data_size); event 102 drivers/misc/ibmasm/event.c reader->data_size = event->data_size; event 103 drivers/misc/ibmasm/event.c reader->next_serial_number = event->serial_number + 1; event 107 drivers/misc/ibmasm/event.c return event->data_size; event 139 drivers/misc/ibmasm/event.c struct ibmasm_event *event; event 149 drivers/misc/ibmasm/event.c event = buffer->events; event 150 drivers/misc/ibmasm/event.c for (i=0; i<IBMASM_NUM_EVENTS; i++, event++) event 151 drivers/misc/ibmasm/event.c event->serial_number = 0; event 28 drivers/misc/mic/cosm_client/cosm_scif_client.c static int cosm_reboot_event(struct notifier_block *this, unsigned long event, event 34 drivers/misc/mic/cosm_client/cosm_scif_client.c event = (event == SYS_RESTART) ? SYSTEM_RESTART : event; event 36 drivers/misc/mic/cosm_client/cosm_scif_client.c __func__, __LINE__, event); event 38 drivers/misc/mic/cosm_client/cosm_scif_client.c msg.shutdown_status = event; event 28 drivers/misc/pvpanic.c pvpanic_send_event(unsigned int event) event 30 drivers/misc/pvpanic.c iowrite8(event, base); event 1054 drivers/misc/sgi-xp/xpc_main.c xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused) event 1058 drivers/misc/sgi-xp/xpc_main.c switch (event) { event 1162 drivers/misc/sgi-xp/xpc_main.c xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args) event 1165 drivers/misc/sgi-xp/xpc_main.c switch (event) { event 1196 drivers/misc/sgi-xp/xpc_main.c switch (event) { event 254 drivers/misc/vmw_vmci/vmci_context.c ev.msg.event_data.event = VMCI_EVENT_CTX_REMOVED; event 261 drivers/misc/vmw_vmci/vmci_context.c ev.msg.event_data.event, event 24 drivers/misc/vmw_vmci/vmci_event.c u32 event; event 91 drivers/misc/vmw_vmci/vmci_event.c subscriber_list = &subscriber_array[event_msg->event_data.event]; event 111 drivers/misc/vmw_vmci/vmci_event.c if (!VMCI_EVENT_VALID(event_msg->event_data.event)) event 129 drivers/misc/vmw_vmci/vmci_event.c int vmci_event_subscribe(u32 event, event 144 drivers/misc/vmw_vmci/vmci_event.c if (!VMCI_EVENT_VALID(event) || !callback) { event 146 drivers/misc/vmw_vmci/vmci_event.c __func__, event, callback, callback_data); event 155 drivers/misc/vmw_vmci/vmci_event.c sub->event = event; event 179 drivers/misc/vmw_vmci/vmci_event.c list_add_rcu(&sub->node, &subscriber_array[event]); event 152 drivers/misc/vmw_vmci/vmci_guest.c vm_context_id, ev_payload->context_id, event_data->event); event 157 drivers/misc/vmw_vmci/vmci_queue_pair.c wait_queue_head_t event; event 854 drivers/misc/vmw_vmci/vmci_queue_pair.c ev.msg.event_data.event = event 1467 drivers/misc/vmw_vmci/vmci_queue_pair.c ev.msg.event_data.event = attach ? event 2489 drivers/misc/vmw_vmci/vmci_queue_pair.c wake_up(&qpair->event); event 2508 drivers/misc/vmw_vmci/vmci_queue_pair.c wait_event(qpair->event, generation != qpair->generation); event 2718 drivers/misc/vmw_vmci/vmci_queue_pair.c init_waitqueue_head(&my_qpair->event); event 395 drivers/mmc/host/atmel-mci.c #define atmci_test_and_clear_pending(host, event) \ event 396 drivers/mmc/host/atmel-mci.c test_and_clear_bit(event, &host->pending_events) event 397 drivers/mmc/host/atmel-mci.c #define atmci_set_completed(host, event) \ event 398 drivers/mmc/host/atmel-mci.c set_bit(event, &host->completed_events) event 399 drivers/mmc/host/atmel-mci.c #define atmci_set_pending(host, event) \ event 400 drivers/mmc/host/atmel-mci.c set_bit(event, &host->pending_events) event 339 drivers/net/arcnet/arcdevice.h void arcnet_led_event(struct net_device *netdev, enum arcnet_led_event event); event 196 drivers/net/arcnet/arcnet.c void arcnet_led_event(struct net_device *dev, enum arcnet_led_event event) event 202 drivers/net/arcnet/arcnet.c switch (event) { event 3069 drivers/net/bonding/bond_main.c static int bond_master_netdev_event(unsigned long event, event 3076 drivers/net/bonding/bond_main.c switch (event) { event 3092 drivers/net/bonding/bond_main.c static int bond_slave_netdev_event(unsigned long event, event 3114 drivers/net/bonding/bond_main.c switch (event) { event 3194 drivers/net/bonding/bond_main.c call_netdevice_notifiers(event, slave->bond->dev); event 3211 drivers/net/bonding/bond_main.c unsigned long event, void *ptr) event 3216 drivers/net/bonding/bond_main.c __func__, netdev_cmd_to_name(event)); event 3224 drivers/net/bonding/bond_main.c ret = bond_master_netdev_event(event, event_dev); event 3230 drivers/net/bonding/bond_main.c return bond_slave_netdev_event(event, event_dev); event 22 drivers/net/can/led.c void can_led_event(struct net_device *netdev, enum can_led_event event) event 26 drivers/net/can/led.c switch (event) { event 155 drivers/net/can/usb/ems_usb.c u8 event; event 119 drivers/net/dsa/mv88e6xxx/ptp.c static int mv88e6352_config_eventcap(struct mv88e6xxx_chip *chip, int event, event 136 drivers/net/dsa/mv88e6xxx/ptp.c if (event == PTP_CLOCK_PPS) { event 138 drivers/net/dsa/mv88e6xxx/ptp.c } else if (event == PTP_CLOCK_EXTTS) { event 724 drivers/net/ethernet/alteon/acenic.c size = (sizeof(struct event) * EVT_RING_ENTRIES); event 775 drivers/net/ethernet/alteon/acenic.c size = (sizeof(struct event) * EVT_RING_ENTRIES); event 1190 drivers/net/ethernet/alteon/acenic.c memset(ap->evt_ring, 0, EVT_RING_ENTRIES * sizeof(struct event)); event 314 drivers/net/ethernet/alteon/acenic.h #define EVT_RING_SIZE (EVT_RING_ENTRIES * sizeof(struct event)) event 672 drivers/net/ethernet/alteon/acenic.h struct event *evt_ring; event 436 drivers/net/ethernet/amd/xgbe/xgbe-main.c static int xgbe_netdev_event(struct notifier_block *nb, unsigned long event, event 445 drivers/net/ethernet/amd/xgbe/xgbe-main.c switch (event) { event 1019 drivers/net/ethernet/aquantia/atlantic/aq_nic.c if (pm_msg->event & PM_EVENT_SLEEP || pm_msg->event & PM_EVENT_FREEZE) { event 2376 drivers/net/ethernet/broadcom/bcmsysport.c unsigned long event, void *ptr) event 2380 drivers/net/ethernet/broadcom/bcmsysport.c switch (event) { event 2802 drivers/net/ethernet/broadcom/bnx2.c bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event) event 2808 drivers/net/ethernet/broadcom/bnx2.c new_link_state = sblk->status_attn_bits & event; event 2809 drivers/net/ethernet/broadcom/bnx2.c old_link_state = sblk->status_attn_bits_ack & event; event 2812 drivers/net/ethernet/broadcom/bnx2.c BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event); event 2814 drivers/net/ethernet/broadcom/bnx2.c BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event); event 3737 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c static void bnx2x_oem_event(struct bnx2x *bp, u32 event) event 3742 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c if (event & DRV_STATUS_DCC_EVENT_MASK && event 3743 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c event & DRV_STATUS_OEM_EVENT_MASK) { event 3744 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c BNX2X_ERR("Received simultaneous events %08x\n", event); event 3748 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c if (event & DRV_STATUS_DCC_EVENT_MASK) { event 3756 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c DP(BNX2X_MSG_MCP, "oem_event 0x%x\n", event); event 3758 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c if (event & (DRV_STATUS_DCC_DISABLE_ENABLE_PF | event 3775 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c event &= ~(DRV_STATUS_DCC_DISABLE_ENABLE_PF | event 3779 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c if (event & (DRV_STATUS_DCC_BANDWIDTH_ALLOCATION | event 3782 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c event &= ~(DRV_STATUS_DCC_BANDWIDTH_ALLOCATION | event 3787 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c if (event) event 1366 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) event 1378 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c if (event == STATS_EVENT_UPDATE) event 1382 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c "Unlikely stats' lock contention [event %d]\n", event); event 1385 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c event); event 1390 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c bnx2x_stats_stm[state][event].action(bp); event 1391 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c bp->stats_state = bnx2x_stats_stm[state][event].next_state; event 1395 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) event 1397 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c state, event, bp->stats_state); event 546 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); event 1506 drivers/net/ethernet/broadcom/bnxt/bnxt.c u8 *event) event 1538 drivers/net/ethernet/broadcom/bnxt/bnxt.c *event |= BNXT_AGG_EVENT; event 1551 drivers/net/ethernet/broadcom/bnxt/bnxt.c *event |= BNXT_AGG_EVENT; event 1672 drivers/net/ethernet/broadcom/bnxt/bnxt.c u32 *raw_cons, u8 *event) event 1714 drivers/net/ethernet/broadcom/bnxt/bnxt.c *event |= BNXT_RX_EVENT; event 1720 drivers/net/ethernet/broadcom/bnxt/bnxt.c (struct rx_tpa_end_cmp_ext *)rxcmp1, event); event 1730 drivers/net/ethernet/broadcom/bnxt/bnxt.c *event |= BNXT_RX_EVENT; event 1756 drivers/net/ethernet/broadcom/bnxt/bnxt.c *event |= BNXT_AGG_EVENT; event 1758 drivers/net/ethernet/broadcom/bnxt/bnxt.c *event |= BNXT_RX_EVENT; event 1784 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) { event 1880 drivers/net/ethernet/broadcom/bnxt/bnxt.c u32 *raw_cons, u8 *event) event 1911 drivers/net/ethernet/broadcom/bnxt/bnxt.c return bnxt_rx_pkt(bp, cpr, raw_cons, event); event 2159 drivers/net/ethernet/broadcom/bnxt/bnxt.c u8 event = 0; event 2189 drivers/net/ethernet/broadcom/bnxt/bnxt.c rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); event 2192 drivers/net/ethernet/broadcom/bnxt/bnxt.c &event); event 2220 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (event & BNXT_REDIRECT_EVENT) event 2223 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (event & BNXT_TX_EVENT) { event 2235 drivers/net/ethernet/broadcom/bnxt/bnxt.c bnapi->events |= event; event 2285 drivers/net/ethernet/broadcom/bnxt/bnxt.c u8 event = 0; event 2309 drivers/net/ethernet/broadcom/bnxt/bnxt.c rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); event 2331 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (event & BNXT_AGG_EVENT) event 85 drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c unsigned long event; event 90 drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c event = fw_reporter_ctx->sp_event; event 91 drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c if (event == BNXT_FW_RESET_NOTIFY_SP_EVENT) event 93 drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c else if (event == BNXT_FW_EXCEPTION_SP_EVENT) event 186 drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event) event 191 drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c fw_reporter_ctx.sp_event = event; event 192 drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c switch (event) { event 59 drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event); event 112 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c struct page *page, u8 **data_ptr, unsigned int *len, u8 *event) event 152 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c *event &= ~BNXT_RX_EVENT; event 170 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c *event = BNXT_TX_EVENT; event 199 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c *event |= BNXT_REDIRECT_EVENT; event 19 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h u8 *event); event 5651 drivers/net/ethernet/broadcom/cnic.c static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event, event 5673 drivers/net/ethernet/broadcom/cnic.c ulp_ops->indicate_netevent(ctx, event, vlan_id); event 5680 drivers/net/ethernet/broadcom/cnic.c static int cnic_netdev_event(struct notifier_block *this, unsigned long event, event 5689 drivers/net/ethernet/broadcom/cnic.c if (!dev && event == NETDEV_REGISTER) { event 5702 drivers/net/ethernet/broadcom/cnic.c else if (event == NETDEV_UNREGISTER) event 5705 drivers/net/ethernet/broadcom/cnic.c if (event == NETDEV_UP) { event 5714 drivers/net/ethernet/broadcom/cnic.c cnic_rcv_netevent(cp, event, 0); event 5716 drivers/net/ethernet/broadcom/cnic.c if (event == NETDEV_GOING_DOWN) { event 5720 drivers/net/ethernet/broadcom/cnic.c } else if (event == NETDEV_UNREGISTER) { event 5739 drivers/net/ethernet/broadcom/cnic.c cnic_rcv_netevent(dev->cnic_priv, event, vid); event 371 drivers/net/ethernet/broadcom/cnic_if.h void (*indicate_netevent)(void *ulp_ctx, unsigned long event, u16 vid); event 914 drivers/net/ethernet/broadcom/tg3.c static int tg3_ape_send_event(struct tg3 *tp, u32 event) event 933 drivers/net/ethernet/broadcom/tg3.c event | APE_EVENT_STATUS_EVENT_PENDING); event 943 drivers/net/ethernet/broadcom/tg3.c u32 event; event 965 drivers/net/ethernet/broadcom/tg3.c event = APE_EVENT_STATUS_STATE_START; event 978 drivers/net/ethernet/broadcom/tg3.c event = APE_EVENT_STATUS_STATE_UNLOAD; event 984 drivers/net/ethernet/broadcom/tg3.c event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE; event 986 drivers/net/ethernet/broadcom/tg3.c tg3_ape_send_event(tp, event); event 217 drivers/net/ethernet/brocade/bna/bfa_cee.c bfa_cee_notify(void *arg, enum bfa_ioc_event event) event 222 drivers/net/ethernet/brocade/bna/bfa_cee.c switch (event) { event 21 drivers/net/ethernet/brocade/bna/bfa_cs.h typedef void (*bfa_sm_t)(void *sm, int event); event 32 drivers/net/ethernet/brocade/bna/bfa_cs.h typedef void (*bfa_fsm_t)(void *fsm, int event); event 40 drivers/net/ethernet/brocade/bna/bfa_cs.h static void oc ## _sm_ ## st(otype * fsm, etype event); \ event 213 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event) event 215 drivers/net/ethernet/brocade/bna/bfa_ioc.c switch (event) { event 221 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_sm_fault(event); event 234 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event) event 236 drivers/net/ethernet/brocade/bna/bfa_ioc.c switch (event) { event 250 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_sm_fault(event); event 264 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event) event 266 drivers/net/ethernet/brocade/bna/bfa_ioc.c switch (event) { event 276 drivers/net/ethernet/brocade/bna/bfa_ioc.c if (event != IOC_E_PFFAILED) event 298 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_sm_fault(event); event 313 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event) event 315 drivers/net/ethernet/brocade/bna/bfa_ioc.c switch (event) { event 328 drivers/net/ethernet/brocade/bna/bfa_ioc.c if (event != IOC_E_PFFAILED) event 341 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_sm_fault(event); event 354 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event) event 356 drivers/net/ethernet/brocade/bna/bfa_ioc.c switch (event) { event 377 drivers/net/ethernet/brocade/bna/bfa_ioc.c if (event != IOC_E_PFFAILED) event 382 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_sm_fault(event); event 394 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event) event 396 drivers/net/ethernet/brocade/bna/bfa_ioc.c switch (event) { event 416 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_sm_fault(event); event 428 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event) event 430 drivers/net/ethernet/brocade/bna/bfa_ioc.c switch (event) { event 445 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_sm_fault(event); event 456 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event) event 458 drivers/net/ethernet/brocade/bna/bfa_ioc.c switch (event) { event 470 drivers/net/ethernet/brocade/bna/bfa_ioc.c if (event != IOC_E_PFFAILED) event 492 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_sm_fault(event); event 503 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event) event 505 drivers/net/ethernet/brocade/bna/bfa_ioc.c switch (event) { event 524 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_sm_fault(event); event 535 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event) event 537 drivers/net/ethernet/brocade/bna/bfa_ioc.c switch (event) { event 552 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_sm_fault(event); event 568 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event) event 570 drivers/net/ethernet/brocade/bna/bfa_ioc.c switch (event) { event 579 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_sm_fault(event); event 593 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event) event 597 drivers/net/ethernet/brocade/bna/bfa_ioc.c switch (event) { event 632 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_sm_fault(event); event 651 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event) event 655 drivers/net/ethernet/brocade/bna/bfa_ioc.c switch (event) { event 672 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_sm_fault(event); event 685 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event) event 689 drivers/net/ethernet/brocade/bna/bfa_ioc.c switch (event) { event 712 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_sm_fault(event); event 727 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event) event 731 drivers/net/ethernet/brocade/bna/bfa_ioc.c switch (event) { event 750 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_sm_fault(event); event 770 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event) event 774 drivers/net/ethernet/brocade/bna/bfa_ioc.c switch (event) { event 787 drivers/net/ethernet/brocade/bna/bfa_ioc.c if (event == IOCPF_E_TIMEOUT) event 799 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_sm_fault(event); event 810 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event) event 812 drivers/net/ethernet/brocade/bna/bfa_ioc.c switch (event) { event 826 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_sm_fault(event); event 840 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event) event 844 drivers/net/ethernet/brocade/bna/bfa_ioc.c switch (event) { event 863 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_sm_fault(event); event 875 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) event 879 drivers/net/ethernet/brocade/bna/bfa_ioc.c switch (event) { event 895 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_sm_fault(event); event 908 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event) event 912 drivers/net/ethernet/brocade/bna/bfa_ioc.c switch (event) { event 923 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_sm_fault(event); event 936 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) event 940 drivers/net/ethernet/brocade/bna/bfa_ioc.c switch (event) { event 969 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_sm_fault(event); event 980 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event) event 984 drivers/net/ethernet/brocade/bna/bfa_ioc.c switch (event) { event 995 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_sm_fault(event); event 1016 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) event 1020 drivers/net/ethernet/brocade/bna/bfa_ioc.c switch (event) { event 1053 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_sm_fault(event); event 1064 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event) event 1066 drivers/net/ethernet/brocade/bna/bfa_ioc.c switch (event) { event 1072 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_sm_fault(event); event 1080 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event) event 1085 drivers/net/ethernet/brocade/bna/bfa_ioc.c notify->cbfn(notify->cbarg, event); event 3036 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_flash_notify(void *cbarg, enum bfa_ioc_event event) event 3040 drivers/net/ethernet/brocade/bna/bfa_ioc.c switch (event) { event 69 drivers/net/ethernet/brocade/bna/bfa_msgq.c cmdq_sm_stopped(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event) event 71 drivers/net/ethernet/brocade/bna/bfa_msgq.c switch (event) { event 86 drivers/net/ethernet/brocade/bna/bfa_msgq.c bfa_sm_fault(event); event 97 drivers/net/ethernet/brocade/bna/bfa_msgq.c cmdq_sm_init_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event) event 99 drivers/net/ethernet/brocade/bna/bfa_msgq.c switch (event) { event 118 drivers/net/ethernet/brocade/bna/bfa_msgq.c bfa_sm_fault(event); event 128 drivers/net/ethernet/brocade/bna/bfa_msgq.c cmdq_sm_ready(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event) event 130 drivers/net/ethernet/brocade/bna/bfa_msgq.c switch (event) { event 141 drivers/net/ethernet/brocade/bna/bfa_msgq.c bfa_sm_fault(event); event 152 drivers/net/ethernet/brocade/bna/bfa_msgq.c cmdq_sm_dbell_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event) event 154 drivers/net/ethernet/brocade/bna/bfa_msgq.c switch (event) { event 173 drivers/net/ethernet/brocade/bna/bfa_msgq.c bfa_sm_fault(event); event 338 drivers/net/ethernet/brocade/bna/bfa_msgq.c rspq_sm_stopped(struct bfa_msgq_rspq *rspq, enum rspq_event event) event 340 drivers/net/ethernet/brocade/bna/bfa_msgq.c switch (event) { event 351 drivers/net/ethernet/brocade/bna/bfa_msgq.c bfa_sm_fault(event); event 362 drivers/net/ethernet/brocade/bna/bfa_msgq.c rspq_sm_init_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event) event 364 drivers/net/ethernet/brocade/bna/bfa_msgq.c switch (event) { event 375 drivers/net/ethernet/brocade/bna/bfa_msgq.c bfa_sm_fault(event); event 385 drivers/net/ethernet/brocade/bna/bfa_msgq.c rspq_sm_ready(struct bfa_msgq_rspq *rspq, enum rspq_event event) event 387 drivers/net/ethernet/brocade/bna/bfa_msgq.c switch (event) { event 398 drivers/net/ethernet/brocade/bna/bfa_msgq.c bfa_sm_fault(event); event 410 drivers/net/ethernet/brocade/bna/bfa_msgq.c rspq_sm_dbell_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event) event 412 drivers/net/ethernet/brocade/bna/bfa_msgq.c switch (event) { event 431 drivers/net/ethernet/brocade/bna/bfa_msgq.c bfa_sm_fault(event); event 553 drivers/net/ethernet/brocade/bna/bfa_msgq.c bfa_msgq_notify(void *cbarg, enum bfa_ioc_event event) event 557 drivers/net/ethernet/brocade/bna/bfa_msgq.c switch (event) { event 515 drivers/net/ethernet/brocade/bna/bna_enet.c enum bna_ethport_event event) event 517 drivers/net/ethernet/brocade/bna/bna_enet.c switch (event) { event 536 drivers/net/ethernet/brocade/bna/bna_enet.c bfa_sm_fault(event); event 547 drivers/net/ethernet/brocade/bna/bna_enet.c enum bna_ethport_event event) event 549 drivers/net/ethernet/brocade/bna/bna_enet.c switch (event) { event 564 drivers/net/ethernet/brocade/bna/bna_enet.c bfa_sm_fault(event); event 575 drivers/net/ethernet/brocade/bna/bna_enet.c enum bna_ethport_event event) event 577 drivers/net/ethernet/brocade/bna/bna_enet.c switch (event) { event 608 drivers/net/ethernet/brocade/bna/bna_enet.c bfa_sm_fault(event); event 624 drivers/net/ethernet/brocade/bna/bna_enet.c enum bna_ethport_event event) event 626 drivers/net/ethernet/brocade/bna/bna_enet.c switch (event) { event 650 drivers/net/ethernet/brocade/bna/bna_enet.c bfa_sm_fault(event); event 661 drivers/net/ethernet/brocade/bna/bna_enet.c enum bna_ethport_event event) event 663 drivers/net/ethernet/brocade/bna/bna_enet.c switch (event) { event 679 drivers/net/ethernet/brocade/bna/bna_enet.c bfa_sm_fault(event); event 690 drivers/net/ethernet/brocade/bna/bna_enet.c enum bna_ethport_event event) event 692 drivers/net/ethernet/brocade/bna/bna_enet.c switch (event) { event 716 drivers/net/ethernet/brocade/bna/bna_enet.c bfa_sm_fault(event); event 915 drivers/net/ethernet/brocade/bna/bna_enet.c bna_enet_sm_stopped(struct bna_enet *enet, enum bna_enet_event event) event 917 drivers/net/ethernet/brocade/bna/bna_enet.c switch (event) { event 946 drivers/net/ethernet/brocade/bna/bna_enet.c bfa_sm_fault(event); event 958 drivers/net/ethernet/brocade/bna/bna_enet.c enum bna_enet_event event) event 960 drivers/net/ethernet/brocade/bna/bna_enet.c switch (event) { event 990 drivers/net/ethernet/brocade/bna/bna_enet.c bfa_sm_fault(event); event 1002 drivers/net/ethernet/brocade/bna/bna_enet.c enum bna_enet_event event) event 1004 drivers/net/ethernet/brocade/bna/bna_enet.c switch (event) { event 1011 drivers/net/ethernet/brocade/bna/bna_enet.c bfa_sm_fault(event); event 1027 drivers/net/ethernet/brocade/bna/bna_enet.c enum bna_enet_event event) event 1029 drivers/net/ethernet/brocade/bna/bna_enet.c switch (event) { event 1050 drivers/net/ethernet/brocade/bna/bna_enet.c bfa_sm_fault(event); event 1061 drivers/net/ethernet/brocade/bna/bna_enet.c enum bna_enet_event event) event 1063 drivers/net/ethernet/brocade/bna/bna_enet.c switch (event) { event 1101 drivers/net/ethernet/brocade/bna/bna_enet.c bfa_sm_fault(event); event 1114 drivers/net/ethernet/brocade/bna/bna_enet.c enum bna_enet_event event) event 1116 drivers/net/ethernet/brocade/bna/bna_enet.c switch (event) { event 1128 drivers/net/ethernet/brocade/bna/bna_enet.c bfa_sm_fault(event); event 1140 drivers/net/ethernet/brocade/bna/bna_enet.c enum bna_enet_event event) event 1142 drivers/net/ethernet/brocade/bna/bna_enet.c switch (event) { event 1153 drivers/net/ethernet/brocade/bna/bna_enet.c bfa_sm_fault(event); event 1389 drivers/net/ethernet/brocade/bna/bna_enet.c enum bna_ioceth_event event) event 1391 drivers/net/ethernet/brocade/bna/bna_enet.c switch (event) { event 1411 drivers/net/ethernet/brocade/bna/bna_enet.c bfa_sm_fault(event); event 1426 drivers/net/ethernet/brocade/bna/bna_enet.c enum bna_ioceth_event event) event 1428 drivers/net/ethernet/brocade/bna/bna_enet.c switch (event) { event 1448 drivers/net/ethernet/brocade/bna/bna_enet.c bfa_sm_fault(event); event 1460 drivers/net/ethernet/brocade/bna/bna_enet.c enum bna_ioceth_event event) event 1462 drivers/net/ethernet/brocade/bna/bna_enet.c switch (event) { event 1477 drivers/net/ethernet/brocade/bna/bna_enet.c bfa_sm_fault(event); event 1490 drivers/net/ethernet/brocade/bna/bna_enet.c bna_ioceth_sm_ready(struct bna_ioceth *ioceth, enum bna_ioceth_event event) event 1492 drivers/net/ethernet/brocade/bna/bna_enet.c switch (event) { event 1505 drivers/net/ethernet/brocade/bna/bna_enet.c bfa_sm_fault(event); event 1516 drivers/net/ethernet/brocade/bna/bna_enet.c enum bna_ioceth_event event) event 1518 drivers/net/ethernet/brocade/bna/bna_enet.c switch (event) { event 1531 drivers/net/ethernet/brocade/bna/bna_enet.c bfa_sm_fault(event); event 1544 drivers/net/ethernet/brocade/bna/bna_enet.c enum bna_ioceth_event event) event 1546 drivers/net/ethernet/brocade/bna/bna_enet.c switch (event) { event 1561 drivers/net/ethernet/brocade/bna/bna_enet.c bfa_sm_fault(event); event 1572 drivers/net/ethernet/brocade/bna/bna_enet.c enum bna_ioceth_event event) event 1574 drivers/net/ethernet/brocade/bna/bna_enet.c switch (event) { event 1586 drivers/net/ethernet/brocade/bna/bna_enet.c bfa_sm_fault(event); event 1598 drivers/net/ethernet/brocade/bna/bna_enet.c enum bna_ioceth_event event) event 1600 drivers/net/ethernet/brocade/bna/bna_enet.c switch (event) { event 1615 drivers/net/ethernet/brocade/bna/bna_enet.c bfa_sm_fault(event); event 68 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event) event 70 drivers/net/ethernet/brocade/bna/bna_tx_rx.c switch (event) { event 88 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bfa_sm_fault(event); event 102 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event) event 104 drivers/net/ethernet/brocade/bna/bna_tx_rx.c switch (event) { event 128 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bfa_sm_fault(event); event 140 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event) event 142 drivers/net/ethernet/brocade/bna/bna_tx_rx.c switch (event) { event 154 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bfa_sm_fault(event); event 164 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bna_rxf_sm_last_resp_wait(struct bna_rxf *rxf, enum bna_rxf_event event) event 166 drivers/net/ethernet/brocade/bna/bna_tx_rx.c switch (event) { event 174 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bfa_sm_fault(event); event 1332 drivers/net/ethernet/brocade/bna/bna_tx_rx.c enum bna_rx_event event) event 1334 drivers/net/ethernet/brocade/bna/bna_tx_rx.c switch (event) { event 1348 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bfa_sm_fault(event); event 1364 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bna_rx_sm_stop_wait(struct bna_rx *rx, enum bna_rx_event event) event 1366 drivers/net/ethernet/brocade/bna/bna_tx_rx.c switch (event) { event 1378 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bfa_sm_fault(event); event 1384 drivers/net/ethernet/brocade/bna/bna_tx_rx.c enum bna_rx_event event) event 1386 drivers/net/ethernet/brocade/bna/bna_tx_rx.c switch (event) { event 1400 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bfa_sm_fault(event); event 1417 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event) event 1419 drivers/net/ethernet/brocade/bna/bna_tx_rx.c switch (event) { event 1438 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bfa_sm_fault(event); event 1450 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bna_rx_sm_start_stop_wait(struct bna_rx *rx, enum bna_rx_event event) event 1452 drivers/net/ethernet/brocade/bna/bna_tx_rx.c switch (event) { event 1463 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bfa_sm_fault(event); event 1481 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event) event 1483 drivers/net/ethernet/brocade/bna/bna_tx_rx.c switch (event) { event 1499 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bfa_sm_fault(event); event 1505 drivers/net/ethernet/brocade/bna/bna_tx_rx.c enum bna_rx_event event) event 1507 drivers/net/ethernet/brocade/bna/bna_tx_rx.c switch (event) { event 1524 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bfa_sm_fault(event); event 1535 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event) event 1537 drivers/net/ethernet/brocade/bna/bna_tx_rx.c switch (event) { event 1548 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bfa_sm_fault(event); event 1559 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bna_rx_sm_failed(struct bna_rx *rx, enum bna_rx_event event) event 1561 drivers/net/ethernet/brocade/bna/bna_tx_rx.c switch (event) { event 1581 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bfa_sm_fault(event); event 1591 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bna_rx_sm_quiesce_wait(struct bna_rx *rx, enum bna_rx_event event) event 1593 drivers/net/ethernet/brocade/bna/bna_tx_rx.c switch (event) { event 1607 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bfa_sm_fault(event); event 2806 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event) event 2808 drivers/net/ethernet/brocade/bna/bna_tx_rx.c switch (event) { event 2826 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bfa_sm_fault(event); event 2837 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event) event 2839 drivers/net/ethernet/brocade/bna/bna_tx_rx.c switch (event) { event 2863 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bfa_sm_fault(event); event 2882 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event) event 2884 drivers/net/ethernet/brocade/bna/bna_tx_rx.c switch (event) { event 2902 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bfa_sm_fault(event); event 2912 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event) event 2914 drivers/net/ethernet/brocade/bna/bna_tx_rx.c switch (event) { event 2934 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bfa_sm_fault(event); event 2944 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event) event 2946 drivers/net/ethernet/brocade/bna/bna_tx_rx.c switch (event) { event 2957 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bfa_sm_fault(event); event 2969 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event) event 2971 drivers/net/ethernet/brocade/bna/bna_tx_rx.c switch (event) { event 2990 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bfa_sm_fault(event); event 3001 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event) event 3003 drivers/net/ethernet/brocade/bna/bna_tx_rx.c switch (event) { event 3021 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bfa_sm_fault(event); event 3031 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bna_tx_sm_failed(struct bna_tx *tx, enum bna_tx_event event) event 3033 drivers/net/ethernet/brocade/bna/bna_tx_rx.c switch (event) { event 3051 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bfa_sm_fault(event); event 3061 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bna_tx_sm_quiesce_wait(struct bna_tx *tx, enum bna_tx_event event) event 3063 drivers/net/ethernet/brocade/bna/bna_tx_rx.c switch (event) { event 3081 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bfa_sm_fault(event); event 608 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c unsigned long event, void *ptr) event 616 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c switch (event) { event 164 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port) event 171 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c client->event_handler(tdev, event, port); event 962 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c static int nb_callback(struct notifier_block *self, unsigned long event, event 965 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c switch (event) { event 67 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port); event 90 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h void (*event_handler)(struct t3cdev *tdev, u32 event, u32 port); event 1986 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c static int netevent_cb(struct notifier_block *nb, unsigned long event, event 1989 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c switch (event) { event 2277 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c unsigned long event, void *data) event 2290 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c switch (event) { event 2311 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c switch (event) { event 1849 drivers/net/ethernet/dec/tulip/tulip_core.c if (state.event == PM_EVENT_SUSPEND && pstate != PCI_D0) { event 225 drivers/net/ethernet/emulex/benet/be_hw.h u8 event; event 889 drivers/net/ethernet/emulex/benet/be_main.c SET_TX_WRB_HDR_BITS(event, hdr, event 78 drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c struct ptp_clock_event event; event 95 drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c event.type = PTP_CLOCK_PPS; event 96 drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c ptp_clock_event(ptp_qoriq->clock, &event); event 539 drivers/net/ethernet/freescale/fec_ptp.c struct ptp_clock_event event; event 555 drivers/net/ethernet/freescale/fec_ptp.c event.type = PTP_CLOCK_PPS; event 556 drivers/net/ethernet/freescale/fec_ptp.c ptp_clock_event(fep->ptp_clock, &event); event 1227 drivers/net/ethernet/freescale/fman/fman.c u32 event, mask, force; event 1231 drivers/net/ethernet/freescale/fman/fman.c event = ioread32be(&bmi_rg->fmbm_ievr); event 1233 drivers/net/ethernet/freescale/fman/fman.c event &= mask; event 1236 drivers/net/ethernet/freescale/fman/fman.c if (force & event) event 1237 drivers/net/ethernet/freescale/fman/fman.c iowrite32be(force & ~event, &bmi_rg->fmbm_ifr); event 1239 drivers/net/ethernet/freescale/fman/fman.c iowrite32be(event, &bmi_rg->fmbm_ievr); event 1241 drivers/net/ethernet/freescale/fman/fman.c if (event & BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC) event 1243 drivers/net/ethernet/freescale/fman/fman.c if (event & BMI_ERR_INTR_EN_LIST_RAM_ECC) event 1245 drivers/net/ethernet/freescale/fman/fman.c if (event & BMI_ERR_INTR_EN_STATISTICS_RAM_ECC) event 1247 drivers/net/ethernet/freescale/fman/fman.c if (event & BMI_ERR_INTR_EN_DISPATCH_RAM_ECC) event 1255 drivers/net/ethernet/freescale/fman/fman.c u32 event, mask, force; event 1259 drivers/net/ethernet/freescale/fman/fman.c event = ioread32be(&qmi_rg->fmqm_eie); event 1261 drivers/net/ethernet/freescale/fman/fman.c event &= mask; event 1265 drivers/net/ethernet/freescale/fman/fman.c if (force & event) event 1266 drivers/net/ethernet/freescale/fman/fman.c iowrite32be(force & ~event, &qmi_rg->fmqm_eif); event 1268 drivers/net/ethernet/freescale/fman/fman.c iowrite32be(event, &qmi_rg->fmqm_eie); event 1270 drivers/net/ethernet/freescale/fman/fman.c if (event & QMI_ERR_INTR_EN_DOUBLE_ECC) event 1272 drivers/net/ethernet/freescale/fman/fman.c if (event & QMI_ERR_INTR_EN_DEQ_FROM_DEF) event 1335 drivers/net/ethernet/freescale/fman/fman.c u32 event; event 1339 drivers/net/ethernet/freescale/fman/fman.c event = ioread32be(&fpm_rg->fmfp_ee); event 1341 drivers/net/ethernet/freescale/fman/fman.c iowrite32be(event, &fpm_rg->fmfp_ee); event 1343 drivers/net/ethernet/freescale/fman/fman.c if ((event & FPM_EV_MASK_DOUBLE_ECC) && event 1344 drivers/net/ethernet/freescale/fman/fman.c (event & FPM_EV_MASK_DOUBLE_ECC_EN)) event 1346 drivers/net/ethernet/freescale/fman/fman.c if ((event & FPM_EV_MASK_STALL) && (event & FPM_EV_MASK_STALL_EN)) event 1348 drivers/net/ethernet/freescale/fman/fman.c if ((event & FPM_EV_MASK_SINGLE_ECC) && event 1349 drivers/net/ethernet/freescale/fman/fman.c (event & FPM_EV_MASK_SINGLE_ECC_EN)) event 1357 drivers/net/ethernet/freescale/fman/fman.c u32 event, mask; event 1361 drivers/net/ethernet/freescale/fman/fman.c event = ioread32be(&fpm_rg->fm_rcr); event 1365 drivers/net/ethernet/freescale/fman/fman.c iowrite32be(event & ~FPM_RAM_IRAM_ECC, &fpm_rg->fm_rcr); event 1367 drivers/net/ethernet/freescale/fman/fman.c if ((mask & FPM_MURAM_ECC_ERR_EX_EN) && (event & FPM_RAM_MURAM_ECC)) event 1375 drivers/net/ethernet/freescale/fman/fman.c u32 event, mask, force; event 1379 drivers/net/ethernet/freescale/fman/fman.c event = ioread32be(&qmi_rg->fmqm_ie); event 1381 drivers/net/ethernet/freescale/fman/fman.c event &= mask; event 1384 drivers/net/ethernet/freescale/fman/fman.c if (force & event) event 1385 drivers/net/ethernet/freescale/fman/fman.c iowrite32be(force & ~event, &qmi_rg->fmqm_if); event 1387 drivers/net/ethernet/freescale/fman/fman.c iowrite32be(event, &qmi_rg->fmqm_ie); event 1389 drivers/net/ethernet/freescale/fman/fman.c if (event & QMI_INTR_EN_SINGLE_ECC) event 1524 drivers/net/ethernet/freescale/fman/fman.c int event; event 1529 drivers/net/ethernet/freescale/fman/fman.c event = FMAN_EV_ERR_MAC0 + mod_id; event 1531 drivers/net/ethernet/freescale/fman/fman.c event = FMAN_EV_MAC0 + mod_id; event 1535 drivers/net/ethernet/freescale/fman/fman.c event = FMAN_EV_CNT; event 1537 drivers/net/ethernet/freescale/fman/fman.c event = (FMAN_EV_FMAN_CTRL_0 + mod_id); event 1540 drivers/net/ethernet/freescale/fman/fman.c event = FMAN_EV_CNT; event 1543 drivers/net/ethernet/freescale/fman/fman.c event = FMAN_EV_CNT; event 1547 drivers/net/ethernet/freescale/fman/fman.c return event; event 2077 drivers/net/ethernet/freescale/fman/fman.c int event = 0; event 2079 drivers/net/ethernet/freescale/fman/fman.c event = get_module_event(module, mod_id, intr_type); event 2080 drivers/net/ethernet/freescale/fman/fman.c WARN_ON(event >= FMAN_EV_CNT); event 2083 drivers/net/ethernet/freescale/fman/fman.c fman->intr_mng[event].isr_cb = isr_cb; event 2084 drivers/net/ethernet/freescale/fman/fman.c fman->intr_mng[event].src_handle = src_arg; event 2102 drivers/net/ethernet/freescale/fman/fman.c int event = 0; event 2104 drivers/net/ethernet/freescale/fman/fman.c event = get_module_event(module, mod_id, intr_type); event 2105 drivers/net/ethernet/freescale/fman/fman.c WARN_ON(event >= FMAN_EV_CNT); event 2107 drivers/net/ethernet/freescale/fman/fman.c fman->intr_mng[event].isr_cb = NULL; event 2108 drivers/net/ethernet/freescale/fman/fman.c fman->intr_mng[event].src_handle = NULL; event 687 drivers/net/ethernet/freescale/fman/fman_dtsec.c u32 event; event 690 drivers/net/ethernet/freescale/fman/fman_dtsec.c event = ioread32be(®s->ievent) & event 693 drivers/net/ethernet/freescale/fman/fman_dtsec.c event &= ioread32be(®s->imask); event 695 drivers/net/ethernet/freescale/fman/fman_dtsec.c iowrite32be(event, ®s->ievent); event 697 drivers/net/ethernet/freescale/fman/fman_dtsec.c if (event & DTSEC_IMASK_BREN) event 699 drivers/net/ethernet/freescale/fman/fman_dtsec.c if (event & DTSEC_IMASK_RXCEN) event 701 drivers/net/ethernet/freescale/fman/fman_dtsec.c if (event & DTSEC_IMASK_GTSCEN) event 704 drivers/net/ethernet/freescale/fman/fman_dtsec.c if (event & DTSEC_IMASK_BTEN) event 706 drivers/net/ethernet/freescale/fman/fman_dtsec.c if (event & DTSEC_IMASK_TXCEN) event 708 drivers/net/ethernet/freescale/fman/fman_dtsec.c if (event & DTSEC_IMASK_TXEEN) event 710 drivers/net/ethernet/freescale/fman/fman_dtsec.c if (event & DTSEC_IMASK_LCEN) event 712 drivers/net/ethernet/freescale/fman/fman_dtsec.c if (event & DTSEC_IMASK_CRLEN) event 714 drivers/net/ethernet/freescale/fman/fman_dtsec.c if (event & DTSEC_IMASK_XFUNEN) { event 798 drivers/net/ethernet/freescale/fman/fman_dtsec.c if (event & DTSEC_IMASK_MAGEN) event 800 drivers/net/ethernet/freescale/fman/fman_dtsec.c if (event & DTSEC_IMASK_GRSCEN) event 803 drivers/net/ethernet/freescale/fman/fman_dtsec.c if (event & DTSEC_IMASK_TDPEEN) event 805 drivers/net/ethernet/freescale/fman/fman_dtsec.c if (event & DTSEC_IMASK_RDPEEN) event 809 drivers/net/ethernet/freescale/fman/fman_dtsec.c WARN_ON(event & DTSEC_IMASK_ABRTEN); event 810 drivers/net/ethernet/freescale/fman/fman_dtsec.c WARN_ON(event & DTSEC_IMASK_IFERREN); event 817 drivers/net/ethernet/freescale/fman/fman_dtsec.c u32 event; event 820 drivers/net/ethernet/freescale/fman/fman_dtsec.c event = ioread32be(®s->tmr_pevent); event 821 drivers/net/ethernet/freescale/fman/fman_dtsec.c event &= ioread32be(®s->tmr_pemask); event 823 drivers/net/ethernet/freescale/fman/fman_dtsec.c if (event) { event 824 drivers/net/ethernet/freescale/fman/fman_dtsec.c iowrite32be(event, ®s->tmr_pevent); event 825 drivers/net/ethernet/freescale/fman/fman_dtsec.c WARN_ON(event & TMR_PEVENT_TSRE); event 644 drivers/net/ethernet/freescale/fman/fman_memac.c u32 event, imask; event 646 drivers/net/ethernet/freescale/fman/fman_memac.c event = ioread32be(®s->ievent); event 654 drivers/net/ethernet/freescale/fman/fman_memac.c event &= ((imask & MEMAC_ALL_ERRS_IMASK) >> 16); event 656 drivers/net/ethernet/freescale/fman/fman_memac.c iowrite32be(event, ®s->ievent); event 658 drivers/net/ethernet/freescale/fman/fman_memac.c if (event & MEMAC_IEVNT_TS_ECC_ER) event 660 drivers/net/ethernet/freescale/fman/fman_memac.c if (event & MEMAC_IEVNT_TX_ECC_ER) event 662 drivers/net/ethernet/freescale/fman/fman_memac.c if (event & MEMAC_IEVNT_RX_ECC_ER) event 670 drivers/net/ethernet/freescale/fman/fman_memac.c u32 event, imask; event 672 drivers/net/ethernet/freescale/fman/fman_memac.c event = ioread32be(®s->ievent); event 680 drivers/net/ethernet/freescale/fman/fman_memac.c event &= ((imask & MEMAC_ALL_ERRS_IMASK) >> 16); event 682 drivers/net/ethernet/freescale/fman/fman_memac.c iowrite32be(event, ®s->ievent); event 684 drivers/net/ethernet/freescale/fman/fman_memac.c if (event & MEMAC_IEVNT_MGI) event 360 drivers/net/ethernet/freescale/fman/fman_tgec.c u32 event; event 363 drivers/net/ethernet/freescale/fman/fman_tgec.c event = ioread32be(®s->ievent) & event 367 drivers/net/ethernet/freescale/fman/fman_tgec.c event &= ioread32be(®s->imask); event 369 drivers/net/ethernet/freescale/fman/fman_tgec.c iowrite32be(event, ®s->ievent); event 371 drivers/net/ethernet/freescale/fman/fman_tgec.c if (event & TGEC_IMASK_REM_FAULT) event 373 drivers/net/ethernet/freescale/fman/fman_tgec.c if (event & TGEC_IMASK_LOC_FAULT) event 375 drivers/net/ethernet/freescale/fman/fman_tgec.c if (event & TGEC_IMASK_TX_ECC_ER) event 377 drivers/net/ethernet/freescale/fman/fman_tgec.c if (event & TGEC_IMASK_TX_FIFO_UNFL) event 379 drivers/net/ethernet/freescale/fman/fman_tgec.c if (event & TGEC_IMASK_TX_FIFO_OVFL) event 381 drivers/net/ethernet/freescale/fman/fman_tgec.c if (event & TGEC_IMASK_TX_ER) event 383 drivers/net/ethernet/freescale/fman/fman_tgec.c if (event & TGEC_IMASK_RX_FIFO_OVFL) event 385 drivers/net/ethernet/freescale/fman/fman_tgec.c if (event & TGEC_IMASK_RX_ECC_ER) event 387 drivers/net/ethernet/freescale/fman/fman_tgec.c if (event & TGEC_IMASK_RX_JAB_FRM) event 389 drivers/net/ethernet/freescale/fman/fman_tgec.c if (event & TGEC_IMASK_RX_OVRSZ_FRM) event 391 drivers/net/ethernet/freescale/fman/fman_tgec.c if (event & TGEC_IMASK_RX_RUNT_FRM) event 393 drivers/net/ethernet/freescale/fman/fman_tgec.c if (event & TGEC_IMASK_RX_FRAG_FRM) event 395 drivers/net/ethernet/freescale/fman/fman_tgec.c if (event & TGEC_IMASK_RX_LEN_ER) event 397 drivers/net/ethernet/freescale/fman/fman_tgec.c if (event & TGEC_IMASK_RX_CRC_ER) event 399 drivers/net/ethernet/freescale/fman/fman_tgec.c if (event & TGEC_IMASK_RX_ALIGN_ER) event 111 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c enum hinic_aeq_type event, void *handle, event 115 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c struct hinic_hw_event_cb *hwe_cb = &aeqs->hwe_cb[event]; event 128 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c enum hinic_aeq_type event) event 130 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c struct hinic_hw_event_cb *hwe_cb = &aeqs->hwe_cb[event]; event 148 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c enum hinic_ceq_type event, void *handle, event 151 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c struct hinic_ceq_cb *ceq_cb = &ceqs->ceq_cb[event]; event 164 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c enum hinic_ceq_type event) event 166 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c struct hinic_ceq_cb *ceq_cb = &ceqs->ceq_cb[event]; event 223 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c enum hinic_aeq_type event; event 240 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c event = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, TYPE); event 241 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c if (event >= HINIC_MAX_AEQ_EVENTS) { event 242 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c dev_err(&pdev->dev, "Unknown AEQ Event %d\n", event); event 247 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c hwe_cb = &aeqs->hwe_cb[event]; event 261 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c event); event 285 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c enum hinic_ceq_type event; event 288 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c event = CEQE_TYPE(ceqe); event 289 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c if (event >= HINIC_MAX_CEQ_EVENTS) { event 290 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c dev_err(&pdev->dev, "Unknown CEQ event, event = %d\n", event); event 294 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c ceq_cb = &ceqs->ceq_cb[event]; event 303 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c dev_err(&pdev->dev, "Unhandled CEQ Event %d\n", event); event 230 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h enum hinic_aeq_type event, void *handle, event 235 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h enum hinic_aeq_type event); event 238 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h enum hinic_ceq_type event, void *handle, event 242 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h enum hinic_ceq_type event); event 9219 drivers/net/ethernet/intel/i40e/i40e_main.c struct i40e_arq_event_info event; event 9273 drivers/net/ethernet/intel/i40e/i40e_main.c event.buf_len = I40E_MAX_AQ_BUF_SIZE; event 9274 drivers/net/ethernet/intel/i40e/i40e_main.c event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); event 9275 drivers/net/ethernet/intel/i40e/i40e_main.c if (!event.msg_buf) event 9279 drivers/net/ethernet/intel/i40e/i40e_main.c ret = i40e_clean_arq_element(hw, &event, &pending); event 9287 drivers/net/ethernet/intel/i40e/i40e_main.c opcode = le16_to_cpu(event.desc.opcode); event 9291 drivers/net/ethernet/intel/i40e/i40e_main.c i40e_handle_link_event(pf, &event); event 9295 drivers/net/ethernet/intel/i40e/i40e_main.c le16_to_cpu(event.desc.retval), event 9296 drivers/net/ethernet/intel/i40e/i40e_main.c le32_to_cpu(event.desc.cookie_high), event 9297 drivers/net/ethernet/intel/i40e/i40e_main.c le32_to_cpu(event.desc.cookie_low), event 9298 drivers/net/ethernet/intel/i40e/i40e_main.c event.msg_buf, event 9299 drivers/net/ethernet/intel/i40e/i40e_main.c event.msg_len); event 9305 drivers/net/ethernet/intel/i40e/i40e_main.c ret = i40e_handle_lldp_event(pf, &event); event 9311 drivers/net/ethernet/intel/i40e/i40e_main.c i40e_handle_lan_overflow_event(pf, &event); event 9340 drivers/net/ethernet/intel/i40e/i40e_main.c kfree(event.msg_buf); event 10275 drivers/net/ethernet/intel/i40e/i40e_main.c u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> event 10282 drivers/net/ethernet/intel/i40e/i40e_main.c event, queue, pf_num, vf_num); event 10290 drivers/net/ethernet/intel/i40e/i40e_main.c u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> event 10297 drivers/net/ethernet/intel/i40e/i40e_main.c event, queue, func); event 56 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; event 102 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; event 130 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; event 4395 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; event 2272 drivers/net/ethernet/intel/iavf/iavf_main.c struct iavf_arq_event_info event; event 2281 drivers/net/ethernet/intel/iavf/iavf_main.c event.buf_len = IAVF_MAX_AQ_BUF_SIZE; event 2282 drivers/net/ethernet/intel/iavf/iavf_main.c event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); event 2283 drivers/net/ethernet/intel/iavf/iavf_main.c if (!event.msg_buf) event 2287 drivers/net/ethernet/intel/iavf/iavf_main.c ret = iavf_clean_arq_element(hw, &event, &pending); event 2288 drivers/net/ethernet/intel/iavf/iavf_main.c v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); event 2289 drivers/net/ethernet/intel/iavf/iavf_main.c v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low); event 2294 drivers/net/ethernet/intel/iavf/iavf_main.c iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf, event 2295 drivers/net/ethernet/intel/iavf/iavf_main.c event.msg_len); event 2297 drivers/net/ethernet/intel/iavf/iavf_main.c memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE); event 2343 drivers/net/ethernet/intel/iavf/iavf_main.c kfree(event.msg_buf); event 70 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c struct iavf_arq_event_info event; event 74 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c event.buf_len = IAVF_MAX_AQ_BUF_SIZE; event 75 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); event 76 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c if (!event.msg_buf) { event 82 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c err = iavf_clean_arq_element(hw, &event, NULL); event 89 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); event 95 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low); event 106 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c pf_vvi = (struct virtchnl_version_info *)event.msg_buf; event 115 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c kfree(event.msg_buf); event 192 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c struct iavf_arq_event_info event; event 199 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c event.buf_len = len; event 200 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); event 201 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c if (!event.msg_buf) { event 210 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c err = iavf_clean_arq_element(hw, &event, NULL); event 214 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); event 219 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low); event 220 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len)); event 229 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c kfree(event.msg_buf); event 1192 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c switch (vpe->event) { event 1241 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c vpe->event); event 623 drivers/net/ethernet/intel/ice/ice_dcb_lib.c struct ice_rq_event_info *event) event 644 drivers/net/ethernet/intel/ice/ice_dcb_lib.c mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw; event 24 drivers/net/ethernet/intel/ice/ice_dcb_lib.h struct ice_rq_event_info *event); event 60 drivers/net/ethernet/intel/ice/ice_dcb_lib.h #define ice_dcb_process_lldp_set_mib_change(pf, event) do {} while (0) event 935 drivers/net/ethernet/intel/ice/ice_main.c ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event) event 941 drivers/net/ethernet/intel/ice/ice_main.c link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf; event 963 drivers/net/ethernet/intel/ice/ice_main.c struct ice_rq_event_info event; event 1036 drivers/net/ethernet/intel/ice/ice_main.c event.buf_len = cq->rq_buf_size; event 1037 drivers/net/ethernet/intel/ice/ice_main.c event.msg_buf = devm_kzalloc(&pf->pdev->dev, event.buf_len, event 1039 drivers/net/ethernet/intel/ice/ice_main.c if (!event.msg_buf) event 1046 drivers/net/ethernet/intel/ice/ice_main.c ret = ice_clean_rq_elem(hw, cq, &event, &pending); event 1056 drivers/net/ethernet/intel/ice/ice_main.c opcode = le16_to_cpu(event.desc.opcode); event 1060 drivers/net/ethernet/intel/ice/ice_main.c if (ice_handle_link_event(pf, &event)) event 1065 drivers/net/ethernet/intel/ice/ice_main.c ice_vc_process_vf_msg(pf, &event); event 1068 drivers/net/ethernet/intel/ice/ice_main.c ice_output_fw_log(hw, &event.desc, event.msg_buf); event 1071 drivers/net/ethernet/intel/ice/ice_main.c ice_dcb_process_lldp_set_mib_change(pf, &event); event 1081 drivers/net/ethernet/intel/ice/ice_main.c devm_kfree(&pf->pdev->dev, event.msg_buf); event 1240 drivers/net/ethernet/intel/ice/ice_main.c u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >> event 1247 drivers/net/ethernet/intel/ice/ice_main.c event, queue, pf_num, vf_num); event 1258 drivers/net/ethernet/intel/ice/ice_main.c u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >> event 1265 drivers/net/ethernet/intel/ice/ice_main.c event, queue, pf_num, vf_num); event 1276 drivers/net/ethernet/intel/ice/ice_main.c u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >> event 1283 drivers/net/ethernet/intel/ice/ice_main.c event, queue, pf_num, vf_num); event 129 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; event 1273 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; event 1299 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; event 2902 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) event 2904 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c u32 v_opcode = le32_to_cpu(event->desc.cookie_high); event 2905 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c s16 vf_id = le16_to_cpu(event->desc.retval); event 2906 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c u16 msglen = event->msg_len; event 2907 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c u8 *msg = event->msg_buf; event 3247 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; event 106 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event); event 127 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h #define ice_vc_process_vf_msg(pf, event) do {} while (0) event 5171 drivers/net/ethernet/intel/igb/igb_main.c static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event) event 5183 drivers/net/ethernet/intel/igb/igb_main.c ret = !!(thstat & event); event 6451 drivers/net/ethernet/intel/igb/igb_main.c struct ptp_clock_event event; event 6456 drivers/net/ethernet/intel/igb/igb_main.c event.type = PTP_CLOCK_PPS; event 6458 drivers/net/ethernet/intel/igb/igb_main.c ptp_clock_event(adapter->ptp_clock, &event); event 6500 drivers/net/ethernet/intel/igb/igb_main.c event.type = PTP_CLOCK_EXTTS; event 6501 drivers/net/ethernet/intel/igb/igb_main.c event.index = 0; event 6502 drivers/net/ethernet/intel/igb/igb_main.c event.timestamp = sec * 1000000000ULL + nsec; event 6503 drivers/net/ethernet/intel/igb/igb_main.c ptp_clock_event(adapter->ptp_clock, &event); event 6510 drivers/net/ethernet/intel/igb/igb_main.c event.type = PTP_CLOCK_EXTTS; event 6511 drivers/net/ethernet/intel/igb/igb_main.c event.index = 1; event 6512 drivers/net/ethernet/intel/igb/igb_main.c event.timestamp = sec * 1000000000ULL + nsec; event 6513 drivers/net/ethernet/intel/igb/igb_main.c ptp_clock_event(adapter->ptp_clock, &event); event 6676 drivers/net/ethernet/intel/igb/igb_main.c unsigned long event = *(unsigned long *)data; event 6678 drivers/net/ethernet/intel/igb/igb_main.c switch (event) { event 6706 drivers/net/ethernet/intel/igb/igb_main.c static int igb_notify_dca(struct notifier_block *nb, unsigned long event, event 6711 drivers/net/ethernet/intel/igb/igb_main.c ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event, event 139 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, event 1384 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c unsigned long event = *(unsigned long *)data; event 1389 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c switch (event) { event 11555 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, event 11560 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event, event 670 drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c struct ptp_clock_event event; event 672 drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c event.type = PTP_CLOCK_PPS; event 683 drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c ptp_clock_event(adapter->ptp_clock, &event); event 2735 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c bool event = false, link = false; event 2743 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c event = true; event 2753 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c event = true; event 2765 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c if (!netif_running(dev) || !event) event 443 drivers/net/ethernet/marvell/octeontx2/af/cgx.c struct cgx_link_event event; event 449 drivers/net/ethernet/marvell/octeontx2/af/cgx.c link_status_user_format(lstat, &event.link_uinfo, cgx, lmac->lmac_id); event 452 drivers/net/ethernet/marvell/octeontx2/af/cgx.c event.cgx_id = cgx->cgx_id; event 453 drivers/net/ethernet/marvell/octeontx2/af/cgx.c event.lmac_id = lmac->lmac_id; event 456 drivers/net/ethernet/marvell/octeontx2/af/cgx.c lmac->link_info = event.link_uinfo; event 475 drivers/net/ethernet/marvell/octeontx2/af/cgx.c if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data)) event 481 drivers/net/ethernet/marvell/octeontx2/af/cgx.c static inline bool cgx_cmdresp_is_linkevent(u64 event) event 485 drivers/net/ethernet/marvell/octeontx2/af/cgx.c id = FIELD_GET(EVTREG_ID, event); event 493 drivers/net/ethernet/marvell/octeontx2/af/cgx.c static inline bool cgx_event_is_linkevent(u64 event) event 495 drivers/net/ethernet/marvell/octeontx2/af/cgx.c if (FIELD_GET(EVTREG_ID, event) == CGX_EVT_LINK_CHANGE) event 559 drivers/net/ethernet/marvell/octeontx2/af/cgx.c u64 event; event 563 drivers/net/ethernet/marvell/octeontx2/af/cgx.c event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG); event 565 drivers/net/ethernet/marvell/octeontx2/af/cgx.c if (!FIELD_GET(EVTREG_ACK, event)) event 568 drivers/net/ethernet/marvell/octeontx2/af/cgx.c switch (FIELD_GET(EVTREG_EVT_TYPE, event)) { event 573 drivers/net/ethernet/marvell/octeontx2/af/cgx.c lmac->resp = event; event 580 drivers/net/ethernet/marvell/octeontx2/af/cgx.c if (cgx_cmdresp_is_linkevent(event)) event 581 drivers/net/ethernet/marvell/octeontx2/af/cgx.c cgx_link_change_handler(event, lmac); event 588 drivers/net/ethernet/marvell/octeontx2/af/cgx.c if (cgx_event_is_linkevent(event)) event 589 drivers/net/ethernet/marvell/octeontx2/af/cgx.c cgx_link_change_handler(event, lmac); event 92 drivers/net/ethernet/marvell/octeontx2/af/cgx.h int (*notify_link_chg)(struct cgx_link_event *event, void *data); event 139 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c static int cgx_lmac_postevent(struct cgx_link_event *event, void *data) event 148 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c qentry->link_event = *event; event 159 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu) event 166 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c linfo = &event->link_uinfo; event 167 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id); event 176 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c event->cgx_id, event->lmac_id, event 198 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c struct cgx_link_event *event; event 213 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c event = &qentry->link_event; event 216 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c cgx_notify_pfs(event, rvu); event 3730 drivers/net/ethernet/marvell/skge.c unsigned long event, void *ptr) event 3739 drivers/net/ethernet/marvell/skge.c switch (event) { event 4616 drivers/net/ethernet/marvell/sky2.c unsigned long event, void *ptr) event 4624 drivers/net/ethernet/marvell/sky2.c switch (event) { event 434 drivers/net/ethernet/mellanox/mlx4/cmd.c int event) event 457 drivers/net/ethernet/mellanox/mlx4/cmd.c if (event) event 494 drivers/net/ethernet/mellanox/mlx4/cmd.c (event ? (1 << HCR_E_BIT) : 0) | event 143 drivers/net/ethernet/mellanox/mlx4/cq.c cq->event(cq, event_type); event 40 drivers/net/ethernet/mellanox/mlx4/en_cq.c static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event) event 147 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->mcq.event = mlx4_en_cq_event; event 194 drivers/net/ethernet/mellanox/mlx4/en_main.c enum mlx4_dev_event event, unsigned long port) event 199 drivers/net/ethernet/mellanox/mlx4/en_main.c switch (event) { event 207 drivers/net/ethernet/mellanox/mlx4/en_main.c priv->link_state = event; event 222 drivers/net/ethernet/mellanox/mlx4/en_main.c mlx4_warn(mdev, "Unhandled event %d for port %d\n", event, event 357 drivers/net/ethernet/mellanox/mlx4/en_main.c .event = mlx4_en_event, event 3033 drivers/net/ethernet/mellanox/mlx4/en_netdev.c unsigned long event, void *ptr) event 3073 drivers/net/ethernet/mellanox/mlx4/en_netdev.c if ((do_bond && (event != NETDEV_BONDING_INFO)) || !port) event 111 drivers/net/ethernet/mellanox/mlx4/en_resources.c void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event) event 1074 drivers/net/ethernet/mellanox/mlx4/en_rx.c qp->event = mlx4_en_sqp_event; event 1193 drivers/net/ethernet/mellanox/mlx4/en_rx.c rss_map->indir_qp->event = mlx4_en_sqp_event; event 118 drivers/net/ethernet/mellanox/mlx4/en_tx.c ring->sp_qp.event = mlx4_en_sqp_event; event 171 drivers/net/ethernet/mellanox/mlx4/eq.c phys_port = eqe->event.port_mgmt_change.port; event 175 drivers/net/ethernet/mellanox/mlx4/eq.c eqe->event.port_mgmt_change.port = slave_port; event 181 drivers/net/ethernet/mellanox/mlx4/eq.c eqe->event.port_mgmt_change.port = phys_port; event 267 drivers/net/ethernet/mellanox/mlx4/eq.c eqe.event.port_mgmt_change.port = mlx4_phys_to_slave_port(dev, slave, port); event 284 drivers/net/ethernet/mellanox/mlx4/eq.c eqe.event.port_mgmt_change.port = mlx4_phys_to_slave_port(dev, slave, port); event 303 drivers/net/ethernet/mellanox/mlx4/eq.c eqe.event.port_change.port = cpu_to_be32(slave_port << 28); event 345 drivers/net/ethernet/mellanox/mlx4/eq.c static void set_all_slave_state(struct mlx4_dev *dev, u8 port, int event) event 355 drivers/net/ethernet/mellanox/mlx4/eq.c event, &gen_event); event 367 drivers/net/ethernet/mellanox/mlx4/eq.c u8 port, int event, event 392 drivers/net/ethernet/mellanox/mlx4/eq.c if (MLX4_PORT_STATE_DEV_EVENT_PORT_UP == event) event 397 drivers/net/ethernet/mellanox/mlx4/eq.c if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event) event 400 drivers/net/ethernet/mellanox/mlx4/eq.c else if (MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID == event) { event 407 drivers/net/ethernet/mellanox/mlx4/eq.c if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event) { event 412 drivers/net/ethernet/mellanox/mlx4/eq.c event) { event 440 drivers/net/ethernet/mellanox/mlx4/eq.c eqe.event.port_mgmt_change.port = port; event 441 drivers/net/ethernet/mellanox/mlx4/eq.c eqe.event.port_mgmt_change.params.port_info.changed_attr = event 521 drivers/net/ethernet/mellanox/mlx4/eq.c cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff; event 538 drivers/net/ethernet/mellanox/mlx4/eq.c be32_to_cpu(eqe->event.qp.qpn) event 553 drivers/net/ethernet/mellanox/mlx4/eq.c mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & event 559 drivers/net/ethernet/mellanox/mlx4/eq.c __func__, be32_to_cpu(eqe->event.srq.srqn), event 567 drivers/net/ethernet/mellanox/mlx4/eq.c be32_to_cpu(eqe->event.srq.srqn) event 580 drivers/net/ethernet/mellanox/mlx4/eq.c be32_to_cpu(eqe->event.srq.srqn), event 593 drivers/net/ethernet/mellanox/mlx4/eq.c mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & event 599 drivers/net/ethernet/mellanox/mlx4/eq.c be16_to_cpu(eqe->event.cmd.token), event 600 drivers/net/ethernet/mellanox/mlx4/eq.c eqe->event.cmd.status, event 601 drivers/net/ethernet/mellanox/mlx4/eq.c be64_to_cpu(eqe->event.cmd.out_param)); event 606 drivers/net/ethernet/mellanox/mlx4/eq.c port = be32_to_cpu(eqe->event.port_change.port) >> 28; event 627 drivers/net/ethernet/mellanox/mlx4/eq.c eqe->event.port_change.port = event 629 drivers/net/ethernet/mellanox/mlx4/eq.c (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF) event 641 drivers/net/ethernet/mellanox/mlx4/eq.c eqe->event.port_change.port = event 643 drivers/net/ethernet/mellanox/mlx4/eq.c (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF) event 668 drivers/net/ethernet/mellanox/mlx4/eq.c eqe->event.port_change.port = event 670 drivers/net/ethernet/mellanox/mlx4/eq.c (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF) event 686 drivers/net/ethernet/mellanox/mlx4/eq.c eqe->event.cq_err.syndrome == 1 ? event 688 drivers/net/ethernet/mellanox/mlx4/eq.c be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff); event 692 drivers/net/ethernet/mellanox/mlx4/eq.c be32_to_cpu(eqe->event.cq_err.cqn) event 707 drivers/net/ethernet/mellanox/mlx4/eq.c be32_to_cpu(eqe->event.cq_err.cqn) event 730 drivers/net/ethernet/mellanox/mlx4/eq.c eqe->event.comm_channel_arm.bit_vec, event 731 drivers/net/ethernet/mellanox/mlx4/eq.c sizeof(eqe->event.comm_channel_arm.bit_vec)); event 737 drivers/net/ethernet/mellanox/mlx4/eq.c flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id); event 777 drivers/net/ethernet/mellanox/mlx4/eq.c be16_to_cpu(eqe->event.warming.warning_threshold), event 778 drivers/net/ethernet/mellanox/mlx4/eq.c be16_to_cpu(eqe->event.warming.current_temperature)); event 798 drivers/net/ethernet/mellanox/mlx4/eq.c eqe->event.bad_cable.port); event 192 drivers/net/ethernet/mellanox/mlx4/intf.c if (dev_ctx->intf->event) event 193 drivers/net/ethernet/mellanox/mlx4/intf.c dev_ctx->intf->event(dev, dev_ctx->context, type, param); event 753 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event); event 798 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h unsigned long event, void *ptr); event 67 drivers/net/ethernet/mellanox/mlx4/qp.c qp->event(qp, event_type); event 3382 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c eqe->event.cmd.token = cpu_to_be16(event_eq->token); event 58 drivers/net/ethernet/mellanox/mlx4/srq.c srq->event(srq, event_type); event 915 drivers/net/ethernet/mellanox/mlx5/core/en.h void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); event 61 drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c unsigned long event, void *eqe) event 201 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static int async_event(struct notifier_block *nb, unsigned long event, void *data) event 206 drivers/net/ethernet/mellanox/mlx5/core/en_main.c if (event != MLX5_EVENT_TYPE_PORT_CHANGE) event 1559 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mcq->event = mlx5e_cq_error_event; event 857 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c unsigned long event, void *ptr) event 868 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c switch (event) { event 906 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c unsigned long event, void *ptr) event 919 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c switch (event) { event 1694 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event, void *data) event 1698 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c if (event == MLX5_EVENT_TYPE_PORT_CHANGE) { event 1713 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c if (event == MLX5_DEV_EVENT_PORT_AFFINITY) { event 4080 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c unsigned long event, void *ptr) event 4089 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c event != NETDEV_UNREGISTER || event 208 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event) event 215 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c __func__, mcq->cqn, event); event 499 drivers/net/ethernet/mellanox/mlx5/core/eq.c if (cq->event) event 500 drivers/net/ethernet/mellanox/mlx5/core/eq.c cq->event(cq, type); event 1666 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static int mlx5_esw_offloads_devcom_event(int event, event 1675 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c switch (event) { event 1714 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c event, err); event 326 drivers/net/ethernet/mellanox/mlx5/core/events.c static int forward_event(struct notifier_block *nb, unsigned long event, void *data) event 334 drivers/net/ethernet/mellanox/mlx5/core/events.c atomic_notifier_call_chain(&events->nh, event, data); event 402 drivers/net/ethernet/mellanox/mlx5/core/events.c int mlx5_notifier_call_chain(struct mlx5_events *events, unsigned int event, void *data) event 404 drivers/net/ethernet/mellanox/mlx5/core/events.c return atomic_notifier_call_chain(&events->nh, event, data); event 366 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c enum mlx5_event event) event 371 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c mlx5_fpga_warn(conn->fdev, "CQ event %u on CQ #%u\n", event, mcq->cqn); event 374 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c static void mlx5_fpga_conn_event(struct mlx5_core_qp *mqp, int event) event 379 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c mlx5_fpga_warn(conn->fdev, "QP event %u on QP #%u\n", event, mqp->qpn); event 496 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c conn->cq.mcq.event = mlx5_fpga_conn_cq_event; event 607 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c conn->qp.mqp.event = mlx5_fpga_conn_event; event 151 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c static int fpga_err_event(struct notifier_block *nb, unsigned long event, void *eqe) event 155 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c return mlx5_fpga_event(fdev, event, eqe); event 158 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c static int fpga_qp_err_event(struct notifier_block *nb, unsigned long event, void *eqe) event 162 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c return mlx5_fpga_event(fdev, event, eqe); event 314 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c unsigned long event, void *eqe) event 322 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c switch (event) { event 457 drivers/net/ethernet/mellanox/mlx5/core/lag.c unsigned long event, void *ptr) event 467 drivers/net/ethernet/mellanox/mlx5/core/lag.c if ((event != NETDEV_CHANGEUPPER) && (event != NETDEV_CHANGELOWERSTATE)) event 473 drivers/net/ethernet/mellanox/mlx5/core/lag.c switch (event) { event 102 drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c unsigned long event; event 110 drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c unsigned long event, event 118 drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c if (event == FIB_EVENT_ENTRY_DEL) { event 165 drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c unsigned long event, event 176 drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c if (event == FIB_EVENT_NH_DEL) { event 183 drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c } else if (event == FIB_EVENT_NH_ADD && event 198 drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c switch (fib_work->event) { event 203 drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c mlx5_lag_fib_route_event(ldev, fib_work->event, event 211 drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c fib_work->event, event 223 drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c mlx5_lag_init_fib_work(struct mlx5_lag *ldev, unsigned long event) event 233 drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c fib_work->event = event; event 239 drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c unsigned long event, event 260 drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c switch (event) { event 277 drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c fib_work = mlx5_lag_init_fib_work(ldev, event); event 290 drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c fib_work = mlx5_lag_init_fib_work(ldev, event); event 183 drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c int event, event 196 drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c err = comp->handler(event, comp->device[i].data, event 15 drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h typedef int (*mlx5_devcom_event_handler_t)(int event, event 31 drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h int event, event 80 drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h int mlx5_notifier_call_chain(struct mlx5_events *events, unsigned int event, void *data); event 180 drivers/net/ethernet/mellanox/mlx5/core/qp.c qp->event(qp, event_type); event 103 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c static void dr_qp_event(struct mlx5_core_qp *mqp, int event) event 105 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c pr_info("DR QP event %u on QP #%u\n", event, mqp->qpn); event 190 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c dr_qp->mqp.event = dr_qp_event; event 687 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c enum mlx5_event event) event 689 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c pr_info("CQ event %u on CQ #%u\n", event, mcq->cqn); event 758 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c cq->mcq.event = dr_cq_event; event 4760 drivers/net/ethernet/mellanox/mlxsw/spectrum.c unsigned long event, void *ptr); event 5977 drivers/net/ethernet/mellanox/mlxsw/spectrum.c unsigned long event, void *ptr) event 5991 drivers/net/ethernet/mellanox/mlxsw/spectrum.c switch (event) { event 6090 drivers/net/ethernet/mellanox/mlxsw/spectrum.c unsigned long event, void *ptr) event 6099 drivers/net/ethernet/mellanox/mlxsw/spectrum.c switch (event) { event 6115 drivers/net/ethernet/mellanox/mlxsw/spectrum.c unsigned long event, void *ptr) event 6117 drivers/net/ethernet/mellanox/mlxsw/spectrum.c switch (event) { event 6121 drivers/net/ethernet/mellanox/mlxsw/spectrum.c event, ptr); event 6123 drivers/net/ethernet/mellanox/mlxsw/spectrum.c return mlxsw_sp_netdevice_port_lower_event(port_dev, event, event 6131 drivers/net/ethernet/mellanox/mlxsw/spectrum.c unsigned long event, void *ptr) event 6139 drivers/net/ethernet/mellanox/mlxsw/spectrum.c ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, event 6151 drivers/net/ethernet/mellanox/mlxsw/spectrum.c unsigned long event, void *ptr, event 6163 drivers/net/ethernet/mellanox/mlxsw/spectrum.c switch (event) { event 6218 drivers/net/ethernet/mellanox/mlxsw/spectrum.c unsigned long event, event 6228 drivers/net/ethernet/mellanox/mlxsw/spectrum.c event, ptr, event 6240 drivers/net/ethernet/mellanox/mlxsw/spectrum.c unsigned long event, void *ptr, event 6253 drivers/net/ethernet/mellanox/mlxsw/spectrum.c switch (event) { event 6281 drivers/net/ethernet/mellanox/mlxsw/spectrum.c unsigned long event, void *ptr) event 6288 drivers/net/ethernet/mellanox/mlxsw/spectrum.c event, ptr, vid); event 6291 drivers/net/ethernet/mellanox/mlxsw/spectrum.c real_dev, event, event 6295 drivers/net/ethernet/mellanox/mlxsw/spectrum.c event, ptr, vid); event 6301 drivers/net/ethernet/mellanox/mlxsw/spectrum.c unsigned long event, void *ptr) event 6313 drivers/net/ethernet/mellanox/mlxsw/spectrum.c switch (event) { event 6343 drivers/net/ethernet/mellanox/mlxsw/spectrum.c unsigned long event, void *ptr) event 6349 drivers/net/ethernet/mellanox/mlxsw/spectrum.c if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) event 6360 drivers/net/ethernet/mellanox/mlxsw/spectrum.c static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) event 6364 drivers/net/ethernet/mellanox/mlxsw/spectrum.c if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) event 6371 drivers/net/ethernet/mellanox/mlxsw/spectrum.c unsigned long event, void *ptr) event 6380 drivers/net/ethernet/mellanox/mlxsw/spectrum.c switch (event) { event 6438 drivers/net/ethernet/mellanox/mlxsw/spectrum.c unsigned long event, void *ptr) event 6446 drivers/net/ethernet/mellanox/mlxsw/spectrum.c if (event == NETDEV_UNREGISTER) { event 6454 drivers/net/ethernet/mellanox/mlxsw/spectrum.c err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); event 6457 drivers/net/ethernet/mellanox/mlxsw/spectrum.c event, ptr); event 6460 drivers/net/ethernet/mellanox/mlxsw/spectrum.c event, ptr); event 6461 drivers/net/ethernet/mellanox/mlxsw/spectrum.c else if (event == NETDEV_PRE_CHANGEADDR || event 6462 drivers/net/ethernet/mellanox/mlxsw/spectrum.c event == NETDEV_CHANGEADDR || event 6463 drivers/net/ethernet/mellanox/mlxsw/spectrum.c event == NETDEV_CHANGEMTU) event 6464 drivers/net/ethernet/mellanox/mlxsw/spectrum.c err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr); event 6465 drivers/net/ethernet/mellanox/mlxsw/spectrum.c else if (mlxsw_sp_is_vrf_event(event, ptr)) event 6466 drivers/net/ethernet/mellanox/mlxsw/spectrum.c err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); event 6468 drivers/net/ethernet/mellanox/mlxsw/spectrum.c err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); event 6470 drivers/net/ethernet/mellanox/mlxsw/spectrum.c err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); event 6472 drivers/net/ethernet/mellanox/mlxsw/spectrum.c err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); event 6474 drivers/net/ethernet/mellanox/mlxsw/spectrum.c err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); event 6476 drivers/net/ethernet/mellanox/mlxsw/spectrum.c err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); event 530 drivers/net/ethernet/mellanox/mlxsw/spectrum.h unsigned long event, void *ptr); event 534 drivers/net/ethernet/mellanox/mlxsw/spectrum.h unsigned long event, void *ptr); event 536 drivers/net/ethernet/mellanox/mlxsw/spectrum.h unsigned long event, void *ptr); event 537 drivers/net/ethernet/mellanox/mlxsw/spectrum.h int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event, event 545 drivers/net/ethernet/mellanox/mlxsw/spectrum.h unsigned long event, event 550 drivers/net/ethernet/mellanox/mlxsw/spectrum.h unsigned long event, event 1725 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c unsigned long event, event 1731 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c switch (event) { event 1765 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c unsigned long event, event 1771 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c switch (event) { event 1796 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c unsigned long event, event 1806 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c ul_dev, event, info); event 2552 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c unsigned long event, void *ptr) event 2561 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c switch (event) { event 3797 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c unsigned long event, struct fib_nh *fib_nh) event 3810 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c switch (event) { event 5931 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c unsigned long event; event 5994 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c switch (fib_work->event) { event 5998 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE; event 5999 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c append = fib_work->event == FIB_EVENT_ENTRY_APPEND; event 6018 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event, event 6038 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c switch (fib_work->event) { event 6041 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE; event 6076 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c switch (fib_work->event) { event 6079 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE; event 6120 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c switch (fib_work->event) { event 6149 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c switch (fib_work->event) { event 6169 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c switch (fib_work->event) { event 6184 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c static int mlxsw_sp_router_fib_rule_event(unsigned long event, event 6194 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (event == FIB_EVENT_RULE_DEL) event 6234 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c unsigned long event, void *ptr) event 6249 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c switch (event) { event 6252 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c err = mlxsw_sp_router_fib_rule_event(event, info, event 6294 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c fib_work->event = event; event 6361 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c unsigned long event) event 6367 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c switch (event) { event 6742 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c unsigned long event, u16 vid, event 6752 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c switch (event) { event 6765 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c unsigned long event, event 6773 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, event 6779 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c unsigned long event, u16 vid, event 6790 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c event, vid, event 6801 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c unsigned long event, event 6807 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, event 6813 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c unsigned long event, event 6821 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c switch (event) { event 6838 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c unsigned long event, event 6849 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c event, vid, extack); event 6851 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event, event 6854 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev, event, event 6958 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c unsigned long event, event 6961 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c switch (event) { event 7004 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c unsigned long event, event 7008 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c return mlxsw_sp_inetaddr_port_event(dev, event, extack); event 7010 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c return mlxsw_sp_inetaddr_lag_event(dev, event, extack); event 7012 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, event, event 7015 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c return mlxsw_sp_inetaddr_vlan_event(mlxsw_sp, dev, event, event 7018 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c return mlxsw_sp_inetaddr_macvlan_event(mlxsw_sp, dev, event, event 7025 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c unsigned long event, void *ptr) event 7034 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (event == NETDEV_UP) event 7039 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (!mlxsw_sp_rif_should_config(rif, dev, event)) event 7042 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, NULL); event 7048 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c unsigned long event, void *ptr) event 7061 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (!mlxsw_sp_rif_should_config(rif, dev, event)) event 7069 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, ivi->extack); event 7078 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c unsigned long event; event 7087 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c unsigned long event = inet6addr_work->event; event 7093 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (!mlxsw_sp_rif_should_config(rif, dev, event)) event 7096 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, NULL); event 7105 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c unsigned long event, void *ptr) event 7113 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (event == NETDEV_UP) event 7124 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c inet6addr_work->event = event; event 7132 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c unsigned long event, void *ptr) event 7145 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (!mlxsw_sp_rif_should_config(rif, dev, event)) event 7153 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, i6vi->extack); event 7237 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c unsigned long event, void *ptr) event 7250 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c switch (event) { event 7289 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event, event 7301 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c switch (event) { event 2769 drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c unsigned long event; event 2793 drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c switch (switchdev_work->event) { event 2833 drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c if (switchdev_work->event != SWITCHDEV_FDB_ADD_TO_DEVICE && event 2834 drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c switchdev_work->event != SWITCHDEV_FDB_DEL_TO_DEVICE) event 2837 drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c if (switchdev_work->event == SWITCHDEV_FDB_ADD_TO_DEVICE && event 2890 drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c switch (switchdev_work->event) { event 3060 drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c switch (switchdev_work->event) { event 3119 drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c unsigned long event, void *ptr) event 3128 drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c if (event == SWITCHDEV_PORT_ATTR_SET) { event 3149 drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c switchdev_work->event = event; event 3151 drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c switch (event) { event 3436 drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c unsigned long event, void *ptr) event 3441 drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c switch (event) { event 1677 drivers/net/ethernet/mscc/ocelot.c unsigned long event, event 1683 drivers/net/ethernet/mscc/ocelot.c switch (event) { event 1713 drivers/net/ethernet/mscc/ocelot.c unsigned long event, void *ptr) event 1722 drivers/net/ethernet/mscc/ocelot.c if (event == NETDEV_PRECHANGEUPPER && event 1742 drivers/net/ethernet/mscc/ocelot.c ret = ocelot_netdevice_port_event(slave, event, info); event 1747 drivers/net/ethernet/mscc/ocelot.c ret = ocelot_netdevice_port_event(dev, event, info); event 1760 drivers/net/ethernet/mscc/ocelot.c unsigned long event, void *ptr) event 1765 drivers/net/ethernet/mscc/ocelot.c switch (event) { event 1782 drivers/net/ethernet/mscc/ocelot.c unsigned long event, void *ptr) event 1787 drivers/net/ethernet/mscc/ocelot.c switch (event) { event 1147 drivers/net/ethernet/myricom/myri10ge/myri10ge.c unsigned long event; event 1150 drivers/net/ethernet/myricom/myri10ge/myri10ge.c event = *(unsigned long *)data; event 1152 drivers/net/ethernet/myricom/myri10ge/myri10ge.c if (event == DCA_PROVIDER_ADD) event 1154 drivers/net/ethernet/myricom/myri10ge/myri10ge.c else if (event == DCA_PROVIDER_REMOVE) event 4046 drivers/net/ethernet/myricom/myri10ge/myri10ge.c myri10ge_notify_dca(struct notifier_block *nb, unsigned long event, void *p) event 4049 drivers/net/ethernet/myricom/myri10ge/myri10ge.c NULL, &event, event 1605 drivers/net/ethernet/neterion/vxge/vxge-main.c static int do_vxge_reset(struct vxgedev *vdev, int event) event 1612 drivers/net/ethernet/neterion/vxge/vxge-main.c if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) { event 1622 drivers/net/ethernet/neterion/vxge/vxge-main.c if (event == VXGE_LL_FULL_RESET) { event 1644 drivers/net/ethernet/neterion/vxge/vxge-main.c if (event == VXGE_LL_FULL_RESET) { event 1706 drivers/net/ethernet/neterion/vxge/vxge-main.c if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) event 1709 drivers/net/ethernet/neterion/vxge/vxge-main.c if (event == VXGE_LL_FULL_RESET) { event 1720 drivers/net/ethernet/neterion/vxge/vxge-main.c if (event == VXGE_LL_COMPL_RESET) { event 1742 drivers/net/ethernet/neterion/vxge/vxge-main.c if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET)) { event 1778 drivers/net/ethernet/neterion/vxge/vxge-main.c if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET)) event 633 drivers/net/ethernet/netronome/nfp/flower/lag_conf.c unsigned long event, void *ptr) event 638 drivers/net/ethernet/netronome/nfp/flower/lag_conf.c switch (event) { event 125 drivers/net/ethernet/netronome/nfp/flower/main.c unsigned long event) event 127 drivers/net/ethernet/netronome/nfp/flower/main.c if (event == NETDEV_UNREGISTER && event 905 drivers/net/ethernet/netronome/nfp/flower/main.c unsigned long event, void *ptr) event 911 drivers/net/ethernet/netronome/nfp/flower/main.c ret = nfp_flower_lag_netdev_event(app_priv, netdev, event, ptr); event 916 drivers/net/ethernet/netronome/nfp/flower/main.c ret = nfp_flower_reg_indir_block_handler(app, netdev, event); event 920 drivers/net/ethernet/netronome/nfp/flower/main.c ret = nfp_flower_internal_port_event_handler(app, netdev, event); event 924 drivers/net/ethernet/netronome/nfp/flower/main.c return nfp_tunnel_mac_event_handler(app, netdev, event, ptr); event 396 drivers/net/ethernet/netronome/nfp/flower/main.h unsigned long event, void *ptr); event 406 drivers/net/ethernet/netronome/nfp/flower/main.h unsigned long event, void *ptr); event 421 drivers/net/ethernet/netronome/nfp/flower/main.h unsigned long event); event 1653 drivers/net/ethernet/netronome/nfp/flower/offload.c unsigned long event) event 1660 drivers/net/ethernet/netronome/nfp/flower/offload.c if (event == NETDEV_REGISTER) { event 1668 drivers/net/ethernet/netronome/nfp/flower/offload.c } else if (event == NETDEV_UNREGISTER) { event 326 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, event 337 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c switch (event) { event 864 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c unsigned long event, void *ptr) event 868 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c if (event == NETDEV_DOWN) { event 874 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c } else if (event == NETDEV_UP) { event 880 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c } else if (event == NETDEV_CHANGEADDR) { event 890 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c } else if (event == NETDEV_CHANGEUPPER) { event 174 drivers/net/ethernet/netronome/nfp/nfp_app.c nfp_app_netdev_event(struct notifier_block *nb, unsigned long event, void *ptr) event 183 drivers/net/ethernet/netronome/nfp/nfp_app.c switch (event) { event 191 drivers/net/ethernet/netronome/nfp/nfp_app.c return app->type->netdev_event(app, netdev, event, ptr); event 127 drivers/net/ethernet/netronome/nfp/nfp_app.h unsigned long event, void *ptr); event 2464 drivers/net/ethernet/pensando/ionic/ionic_if.h struct ionic_notifyq_event event; event 666 drivers/net/ethernet/pensando/ionic/ionic_lif.c eid = le64_to_cpu(comp->event.eid); event 678 drivers/net/ethernet/pensando/ionic/ionic_lif.c switch (le16_to_cpu(comp->event.ecode)) { event 691 drivers/net/ethernet/pensando/ionic/ionic_lif.c comp->event.ecode, eid); event 2104 drivers/net/ethernet/pensando/ionic/ionic_lif.c unsigned long event, void *info) event 2113 drivers/net/ethernet/pensando/ionic/ionic_lif.c switch (event) { event 3197 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c struct in_ifaddr *ifa, unsigned long event) event 3209 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c switch (event) { event 3248 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c struct net_device *dev, unsigned long event) event 3262 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c switch (event) { event 3278 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c netxen_restore_indev_addr(struct net_device *netdev, unsigned long event) event 3285 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN; event 3286 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c netxen_config_indev_addr(adapter, netdev, event); event 3316 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c static void netxen_config_master(struct net_device *dev, unsigned long event) event 3331 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c netxen_config_indev_addr(adapter, master, event); event 3335 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c netxen_config_indev_addr(adapter, slave, event); event 3348 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c unsigned long event, void *ptr) event 3363 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c if (event == NETDEV_UP || event == NETDEV_DOWN) { event 3372 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c orig_dev, event); event 3381 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c netxen_config_master(dev, event); event 3382 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c netxen_config_indev_addr(adapter, orig_dev, event); event 3391 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c unsigned long event, void *ptr) event 3399 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN; event 3408 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c if (event == NETDEV_UP || event == NETDEV_DOWN) { event 3439 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c netxen_restore_indev_addr(struct net_device *dev, unsigned long event) event 824 drivers/net/ethernet/qlogic/qed/qed_iwarp.c params.event = QED_IWARP_EVENT_MPA_REQUEST; event 986 drivers/net/ethernet/qlogic/qed/qed_iwarp.c params.event = QED_IWARP_EVENT_ACTIVE_MPA_REPLY; event 1016 drivers/net/ethernet/qlogic/qed/qed_iwarp.c params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE; event 1018 drivers/net/ethernet/qlogic/qed/qed_iwarp.c params.event = QED_IWARP_EVENT_PASSIVE_COMPLETE; event 2848 drivers/net/ethernet/qlogic/qed/qed_iwarp.c params.event = QED_IWARP_EVENT_CLOSE; event 2877 drivers/net/ethernet/qlogic/qed/qed_iwarp.c params.event = QED_IWARP_EVENT_DISCONNECT; event 2882 drivers/net/ethernet/qlogic/qed/qed_iwarp.c params.event = QED_IWARP_EVENT_DISCONNECT; event 2886 drivers/net/ethernet/qlogic/qed/qed_iwarp.c params.event = QED_IWARP_EVENT_RQ_EMPTY; event 2890 drivers/net/ethernet/qlogic/qed/qed_iwarp.c params.event = QED_IWARP_EVENT_IRQ_FULL; event 2894 drivers/net/ethernet/qlogic/qed/qed_iwarp.c params.event = QED_IWARP_EVENT_LLP_TIMEOUT; event 2898 drivers/net/ethernet/qlogic/qed/qed_iwarp.c params.event = QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR; event 2902 drivers/net/ethernet/qlogic/qed/qed_iwarp.c params.event = QED_IWARP_EVENT_CQ_OVERFLOW; event 2906 drivers/net/ethernet/qlogic/qed/qed_iwarp.c params.event = QED_IWARP_EVENT_QP_CATASTROPHIC; event 2910 drivers/net/ethernet/qlogic/qed/qed_iwarp.c params.event = QED_IWARP_EVENT_LOCAL_ACCESS_ERROR; event 2914 drivers/net/ethernet/qlogic/qed/qed_iwarp.c params.event = QED_IWARP_EVENT_REMOTE_OPERATION_ERROR; event 2918 drivers/net/ethernet/qlogic/qed/qed_iwarp.c params.event = QED_IWARP_EVENT_TERMINATE_RECEIVED; event 2942 drivers/net/ethernet/qlogic/qed/qed_iwarp.c params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE; event 231 drivers/net/ethernet/qlogic/qede/qede_main.c static int qede_netdev_event(struct notifier_block *this, unsigned long event, event 238 drivers/net/ethernet/qlogic/qede/qede_main.c if (event != NETDEV_CHANGENAME && event != NETDEV_CHANGEADDR) event 251 drivers/net/ethernet/qlogic/qede/qede_main.c switch (event) { event 294 drivers/net/ethernet/qlogic/qede/qede_rdma.c enum qede_rdma_event event; event 298 drivers/net/ethernet/qlogic/qede/qede_rdma.c event = event_node->event; event 301 drivers/net/ethernet/qlogic/qede/qede_rdma.c switch (event) { event 315 drivers/net/ethernet/qlogic/qede/qede_rdma.c DP_NOTICE(edev, "Invalid rdma event %d", event); event 320 drivers/net/ethernet/qlogic/qede/qede_rdma.c enum qede_rdma_event event) event 341 drivers/net/ethernet/qlogic/qede/qede_rdma.c event_node->event = event; event 503 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c u32 resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED; event 512 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c event = readl(QLCNIC_MBX_FW(adapter->ahw, 0)); event 513 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c if (event & QLCNIC_MBX_ASYNC_EVENT) { event 969 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c u32 event[QLC_83XX_MBX_AEN_CNT]; event 973 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c event[i] = readl(QLCNIC_MBX_FW(ahw, i)); event 975 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c switch (QLCNIC_MBX_RSP(event[0])) { event 978 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c qlcnic_83xx_handle_link_aen(adapter, event); event 981 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c qlcnic_83xx_handle_idc_comp_aen(adapter, event); event 985 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c adapter->ahw->mbox_aen[i] = QLCNIC_MBX_RSP(event[i]); event 990 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c ahw->extend_lb_time = event[1] >> 8 & 0xf; event 993 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c qlcnic_sriov_handle_bc_event(adapter, event[1]); event 997 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c QLCNIC_MBX_RSP(event[0])); event 1001 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c QLCNIC_MBX_RSP(event[0])); event 1004 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c qlcnic_dcb_aen_handler(adapter->dcb, (void *)&event[1]); event 1008 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c QLCNIC_MBX_RSP(event[0])); event 1017 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c u32 resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED; event 1025 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c event = readl(QLCNIC_MBX_FW(ahw, 0)); event 1026 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c if (event & QLCNIC_MBX_ASYNC_EVENT) { event 2344 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c u32 mask, resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED; event 2355 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c event = readl(QLCNIC_MBX_FW(adapter->ahw, 0)); event 2356 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c if (event & QLCNIC_MBX_ASYNC_EVENT) { event 4120 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c struct net_device *dev, unsigned long event) event 4130 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c switch (event) { event 4147 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c void qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event) event 4153 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c qlcnic_config_indev_addr(adapter, netdev, event); event 4160 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c qlcnic_config_indev_addr(adapter, dev, event); event 4166 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c unsigned long event, void *ptr) event 4191 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c qlcnic_config_indev_addr(adapter, dev, event); event 4198 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c unsigned long event, void *ptr) event 4227 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c switch (event) { event 4252 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event) event 1295 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *adapter, u32 event) event 1303 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c pci_func = qlcnic_sriov_target_func_id(event); event 1312 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c if (qlcnic_sriov_channel_free_check(event)) event 1315 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c if (qlcnic_sriov_flr_check(event)) { event 1320 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c if (qlcnic_sriov_bc_msg_check(event)) event 494 drivers/net/ethernet/qualcomm/qca_spi.c qcaspi_qca7k_sync(struct qcaspi *qca, int event) event 500 drivers/net/ethernet/qualcomm/qca_spi.c if (event == QCASPI_EVENT_CPUON) { event 237 drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c unsigned long event, void *data) event 244 drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c switch (event) { event 311 drivers/net/ethernet/renesas/ravb_ptp.c struct ptp_clock_event event; event 313 drivers/net/ethernet/renesas/ravb_ptp.c event.type = PTP_CLOCK_EXTTS; event 314 drivers/net/ethernet/renesas/ravb_ptp.c event.index = 0; event 315 drivers/net/ethernet/renesas/ravb_ptp.c event.timestamp = ravb_read(ndev, GCPT); event 316 drivers/net/ethernet/renesas/ravb_ptp.c ptp_clock_event(priv->ptp.clock, &event); event 2148 drivers/net/ethernet/rocker/rocker_main.c unsigned long event; event 2161 drivers/net/ethernet/rocker/rocker_main.c switch (fib_work->event) { event 2186 drivers/net/ethernet/rocker/rocker_main.c unsigned long event, void *ptr) event 2204 drivers/net/ethernet/rocker/rocker_main.c fib_work->event = event; event 2206 drivers/net/ethernet/rocker/rocker_main.c switch (event) { event 2739 drivers/net/ethernet/rocker/rocker_main.c unsigned long event; event 2764 drivers/net/ethernet/rocker/rocker_main.c switch (switchdev_work->event) { event 2794 drivers/net/ethernet/rocker/rocker_main.c unsigned long event, void *ptr) event 2804 drivers/net/ethernet/rocker/rocker_main.c if (event == SWITCHDEV_PORT_ATTR_SET) event 2814 drivers/net/ethernet/rocker/rocker_main.c switchdev_work->event = event; event 2816 drivers/net/ethernet/rocker/rocker_main.c switch (event) { event 2843 drivers/net/ethernet/rocker/rocker_main.c rocker_switchdev_port_obj_event(unsigned long event, struct net_device *netdev, event 2848 drivers/net/ethernet/rocker/rocker_main.c switch (event) { event 2863 drivers/net/ethernet/rocker/rocker_main.c unsigned long event, void *ptr) event 2870 drivers/net/ethernet/rocker/rocker_main.c switch (event) { event 2873 drivers/net/ethernet/rocker/rocker_main.c return rocker_switchdev_port_obj_event(event, dev, ptr); event 3134 drivers/net/ethernet/rocker/rocker_main.c unsigned long event, void *ptr) event 3144 drivers/net/ethernet/rocker/rocker_main.c switch (event) { event 3177 drivers/net/ethernet/rocker/rocker_main.c unsigned long event, void *ptr) event 3184 drivers/net/ethernet/rocker/rocker_main.c switch (event) { event 3201 drivers/net/ethernet/sfc/ef10.c efx_qword_t event; event 3203 drivers/net/ethernet/sfc/ef10.c EFX_POPULATE_QWORD_2(event, event 3212 drivers/net/ethernet/sfc/ef10.c memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0], event 3445 drivers/net/ethernet/sfc/ef10.c const efx_qword_t *event) event 3450 drivers/net/ethernet/sfc/ef10.c if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)) { event 3458 drivers/net/ethernet/sfc/ef10.c if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR)) { event 3467 drivers/net/ethernet/sfc/ef10.c EFX_QWORD_VAL(*event)); event 3474 drivers/net/ethernet/sfc/ef10.c if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) { event 3483 drivers/net/ethernet/sfc/ef10.c EFX_QWORD_VAL(*event)); event 3490 drivers/net/ethernet/sfc/ef10.c if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_IP_INNER_CHKSUM_ERR)) { event 3495 drivers/net/ethernet/sfc/ef10.c EFX_QWORD_VAL(*event)); event 3503 drivers/net/ethernet/sfc/ef10.c EFX_QWORD_VAL(*event)); event 3508 drivers/net/ethernet/sfc/ef10.c if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR)) { event 3513 drivers/net/ethernet/sfc/ef10.c EFX_QWORD_VAL(*event)); event 3521 drivers/net/ethernet/sfc/ef10.c EFX_QWORD_VAL(*event)); event 3532 drivers/net/ethernet/sfc/ef10.c const efx_qword_t *event) event 3548 drivers/net/ethernet/sfc/ef10.c rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES); event 3549 drivers/net/ethernet/sfc/ef10.c next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS); event 3550 drivers/net/ethernet/sfc/ef10.c rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL); event 3551 drivers/net/ethernet/sfc/ef10.c rx_l3_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L3_CLASS); event 3552 drivers/net/ethernet/sfc/ef10.c rx_l4_class = EFX_QWORD_FIELD(*event, ESF_FZ_RX_L4_CLASS); event 3553 drivers/net/ethernet/sfc/ef10.c rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT); event 3557 drivers/net/ethernet/sfc/ef10.c EFX_QWORD_FIELD(*event, ESF_EZ_RX_ENCAP_HDR) : event 3560 drivers/net/ethernet/sfc/ef10.c if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT)) event 3563 drivers/net/ethernet/sfc/ef10.c EFX_QWORD_VAL(*event)); event 3583 drivers/net/ethernet/sfc/ef10.c EFX_QWORD_VAL(*event)); event 3623 drivers/net/ethernet/sfc/ef10.c EFX_AND_QWORD(errors, *event, errors); event 3628 drivers/net/ethernet/sfc/ef10.c event); event 3648 drivers/net/ethernet/sfc/ef10.c EFX_QWORD_VAL(*event)); event 3672 drivers/net/ethernet/sfc/ef10.c static u32 efx_ef10_extract_event_ts(efx_qword_t *event) event 3676 drivers/net/ethernet/sfc/ef10.c tstamp = EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI); event 3678 drivers/net/ethernet/sfc/ef10.c tstamp |= EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO); event 3684 drivers/net/ethernet/sfc/ef10.c efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) event 3696 drivers/net/ethernet/sfc/ef10.c if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT))) event 3700 drivers/net/ethernet/sfc/ef10.c tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL); event 3706 drivers/net/ethernet/sfc/ef10.c tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX); event 3720 drivers/net/ethernet/sfc/ef10.c tx_ev_type = EFX_QWORD_FIELD(*event, ESF_EZ_TX_SOFT1); event 3731 drivers/net/ethernet/sfc/ef10.c tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, event 3738 drivers/net/ethernet/sfc/ef10.c ts_part = efx_ef10_extract_event_ts(event); event 3743 drivers/net/ethernet/sfc/ef10.c ts_part = efx_ef10_extract_event_ts(event); event 3755 drivers/net/ethernet/sfc/ef10.c EFX_QWORD_VAL(*event)); event 3761 drivers/net/ethernet/sfc/ef10.c efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) event 3766 drivers/net/ethernet/sfc/ef10.c subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE); event 3780 drivers/net/ethernet/sfc/ef10.c EFX_QWORD_VAL(*event)); event 3786 drivers/net/ethernet/sfc/ef10.c efx_qword_t *event) event 3791 drivers/net/ethernet/sfc/ef10.c subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0); event 3809 drivers/net/ethernet/sfc/ef10.c EFX_QWORD_VAL(*event)); event 3816 drivers/net/ethernet/sfc/ef10.c efx_qword_t event, *p_event; event 3828 drivers/net/ethernet/sfc/ef10.c event = *p_event; event 3830 drivers/net/ethernet/sfc/ef10.c if (!efx_event_present(&event)) event 3837 drivers/net/ethernet/sfc/ef10.c ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE); event 3841 drivers/net/ethernet/sfc/ef10.c channel->channel, EFX_QWORD_VAL(event)); event 3845 drivers/net/ethernet/sfc/ef10.c efx_mcdi_process_event(channel, &event); event 3848 drivers/net/ethernet/sfc/ef10.c spent += efx_ef10_handle_rx_event(channel, &event); event 3858 drivers/net/ethernet/sfc/ef10.c efx_ef10_handle_tx_event(channel, &event); event 3861 drivers/net/ethernet/sfc/ef10.c efx_ef10_handle_driver_event(channel, &event); event 3866 drivers/net/ethernet/sfc/ef10.c efx_ef10_handle_driver_generated_event(channel, &event); event 3873 drivers/net/ethernet/sfc/ef10.c EFX_QWORD_VAL(event)); event 3920 drivers/net/ethernet/sfc/ef10.c efx_qword_t event; event 3923 drivers/net/ethernet/sfc/ef10.c EFX_POPULATE_QWORD_2(event, event 3932 drivers/net/ethernet/sfc/ef10.c memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0], event 2503 drivers/net/ethernet/sfc/efx.c unsigned long event, void *ptr) event 2508 drivers/net/ethernet/sfc/efx.c event == NETDEV_CHANGENAME) event 2241 drivers/net/ethernet/sfc/falcon/efx.c unsigned long event, void *ptr) event 2246 drivers/net/ethernet/sfc/falcon/efx.c event == NETDEV_CHANGENAME) event 1741 drivers/net/ethernet/sfc/falcon/falcon.c falcon_handle_global_event(struct ef4_channel *channel, ef4_qword_t *event) event 1746 drivers/net/ethernet/sfc/falcon/falcon.c if (EF4_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) || event 1747 drivers/net/ethernet/sfc/falcon/falcon.c EF4_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) || event 1748 drivers/net/ethernet/sfc/falcon/falcon.c EF4_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR)) event 1753 drivers/net/ethernet/sfc/falcon/falcon.c EF4_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) { event 1759 drivers/net/ethernet/sfc/falcon/falcon.c EF4_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) : event 1760 drivers/net/ethernet/sfc/falcon/falcon.c EF4_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) { event 796 drivers/net/ethernet/sfc/falcon/farch.c ef4_qword_t *event) event 802 drivers/net/ethernet/sfc/falcon/farch.c drv_ev_reg.u32[0] = event->u32[0]; event 803 drivers/net/ethernet/sfc/falcon/farch.c drv_ev_reg.u32[1] = event->u32[1]; event 812 drivers/net/ethernet/sfc/falcon/farch.c ef4_qword_t event; event 814 drivers/net/ethernet/sfc/falcon/farch.c EF4_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE, event 817 drivers/net/ethernet/sfc/falcon/farch.c ef4_farch_generate_event(channel->efx, channel->channel, &event); event 826 drivers/net/ethernet/sfc/falcon/farch.c ef4_farch_handle_tx_event(struct ef4_channel *channel, ef4_qword_t *event) event 837 drivers/net/ethernet/sfc/falcon/farch.c if (likely(EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { event 839 drivers/net/ethernet/sfc/falcon/farch.c tx_ev_desc_ptr = EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); event 840 drivers/net/ethernet/sfc/falcon/farch.c tx_ev_q_label = EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); event 846 drivers/net/ethernet/sfc/falcon/farch.c } else if (EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { event 848 drivers/net/ethernet/sfc/falcon/farch.c tx_ev_q_label = EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); event 855 drivers/net/ethernet/sfc/falcon/farch.c } else if (EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) { event 861 drivers/net/ethernet/sfc/falcon/farch.c EF4_QWORD_VAL(*event)); event 869 drivers/net/ethernet/sfc/falcon/farch.c const ef4_qword_t *event) event 880 drivers/net/ethernet/sfc/falcon/farch.c rx_ev_hdr_type = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); event 881 drivers/net/ethernet/sfc/falcon/farch.c rx_ev_mcast_pkt = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); event 882 drivers/net/ethernet/sfc/falcon/farch.c rx_ev_tobe_disc = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); event 883 drivers/net/ethernet/sfc/falcon/farch.c rx_ev_pkt_type = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE); event 884 drivers/net/ethernet/sfc/falcon/farch.c rx_ev_buf_owner_id_err = EF4_QWORD_FIELD(*event, event 886 drivers/net/ethernet/sfc/falcon/farch.c rx_ev_ip_hdr_chksum_err = EF4_QWORD_FIELD(*event, event 888 drivers/net/ethernet/sfc/falcon/farch.c rx_ev_tcp_udp_chksum_err = EF4_QWORD_FIELD(*event, event 890 drivers/net/ethernet/sfc/falcon/farch.c rx_ev_eth_crc_err = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); event 891 drivers/net/ethernet/sfc/falcon/farch.c rx_ev_frm_trunc = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); event 893 drivers/net/ethernet/sfc/falcon/farch.c 0 : EF4_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB)); event 894 drivers/net/ethernet/sfc/falcon/farch.c rx_ev_pause_frm = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); event 923 drivers/net/ethernet/sfc/falcon/farch.c ef4_rx_queue_index(rx_queue), EF4_QWORD_VAL(*event), event 980 drivers/net/ethernet/sfc/falcon/farch.c ef4_farch_handle_rx_event(struct ef4_channel *channel, const ef4_qword_t *event) event 993 drivers/net/ethernet/sfc/falcon/farch.c rx_ev_cont = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); event 994 drivers/net/ethernet/sfc/falcon/farch.c rx_ev_sop = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP); event 995 drivers/net/ethernet/sfc/falcon/farch.c WARN_ON(EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != event 1000 drivers/net/ethernet/sfc/falcon/farch.c rx_ev_desc_ptr = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); event 1040 drivers/net/ethernet/sfc/falcon/farch.c rx_ev_byte_cnt = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); event 1041 drivers/net/ethernet/sfc/falcon/farch.c rx_ev_pkt_ok = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); event 1042 drivers/net/ethernet/sfc/falcon/farch.c rx_ev_hdr_type = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); event 1061 drivers/net/ethernet/sfc/falcon/farch.c flags = ef4_farch_handle_rx_not_ok(rx_queue, event); event 1065 drivers/net/ethernet/sfc/falcon/farch.c rx_ev_mcast_pkt = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); event 1068 drivers/net/ethernet/sfc/falcon/farch.c EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); event 1091 drivers/net/ethernet/sfc/falcon/farch.c ef4_farch_handle_tx_flush_done(struct ef4_nic *efx, ef4_qword_t *event) event 1096 drivers/net/ethernet/sfc/falcon/farch.c qid = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); event 1112 drivers/net/ethernet/sfc/falcon/farch.c ef4_farch_handle_rx_flush_done(struct ef4_nic *efx, ef4_qword_t *event) event 1119 drivers/net/ethernet/sfc/falcon/farch.c qid = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); event 1120 drivers/net/ethernet/sfc/falcon/farch.c failed = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); event 1154 drivers/net/ethernet/sfc/falcon/farch.c ef4_qword_t *event) event 1162 drivers/net/ethernet/sfc/falcon/farch.c magic = EF4_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); event 1179 drivers/net/ethernet/sfc/falcon/farch.c channel->channel, EF4_QWORD_VAL(*event)); event 1184 drivers/net/ethernet/sfc/falcon/farch.c ef4_farch_handle_driver_event(struct ef4_channel *channel, ef4_qword_t *event) event 1190 drivers/net/ethernet/sfc/falcon/farch.c ev_sub_code = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); event 1191 drivers/net/ethernet/sfc/falcon/farch.c ev_sub_data = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); event 1197 drivers/net/ethernet/sfc/falcon/farch.c ef4_farch_handle_tx_flush_done(efx, event); event 1202 drivers/net/ethernet/sfc/falcon/farch.c ef4_farch_handle_rx_flush_done(efx, event); event 1260 drivers/net/ethernet/sfc/falcon/farch.c ef4_qword_t event, *p_event; event 1272 drivers/net/ethernet/sfc/falcon/farch.c event = *p_event; event 1274 drivers/net/ethernet/sfc/falcon/farch.c if (!ef4_event_present(&event)) event 1280 drivers/net/ethernet/sfc/falcon/farch.c channel->channel, EF4_QWORD_VAL(event)); event 1287 drivers/net/ethernet/sfc/falcon/farch.c ev_code = EF4_QWORD_FIELD(event, FSF_AZ_EV_CODE); event 1291 drivers/net/ethernet/sfc/falcon/farch.c ef4_farch_handle_rx_event(channel, &event); event 1297 drivers/net/ethernet/sfc/falcon/farch.c &event); event 1304 drivers/net/ethernet/sfc/falcon/farch.c ef4_farch_handle_generated_event(channel, &event); event 1307 drivers/net/ethernet/sfc/falcon/farch.c ef4_farch_handle_driver_event(channel, &event); event 1311 drivers/net/ethernet/sfc/falcon/farch.c efx->type->handle_global_event(channel, &event)) event 1318 drivers/net/ethernet/sfc/falcon/farch.c ev_code, EF4_QWORD_VAL(event)); event 1548 drivers/net/ethernet/sfc/falcon/farch.c ef4_qword_t *event; event 1560 drivers/net/ethernet/sfc/falcon/farch.c event = ef4_event(channel, event 1562 drivers/net/ethernet/sfc/falcon/farch.c if (ef4_event_present(event)) event 53 drivers/net/ethernet/sfc/falcon/nic.h static inline int ef4_event_present(ef4_qword_t *event) event 55 drivers/net/ethernet/sfc/falcon/nic.h return !(EF4_DWORD_IS_ALL_ONES(event->dword[0]) | event 56 drivers/net/ethernet/sfc/falcon/nic.h EF4_DWORD_IS_ALL_ONES(event->dword[1])); event 510 drivers/net/ethernet/sfc/falcon/nic.h ef4_qword_t *event); event 789 drivers/net/ethernet/sfc/farch.c efx_qword_t *event) event 795 drivers/net/ethernet/sfc/farch.c drv_ev_reg.u32[0] = event->u32[0]; event 796 drivers/net/ethernet/sfc/farch.c drv_ev_reg.u32[1] = event->u32[1]; event 805 drivers/net/ethernet/sfc/farch.c efx_qword_t event; event 807 drivers/net/ethernet/sfc/farch.c EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE, event 810 drivers/net/ethernet/sfc/farch.c efx_farch_generate_event(channel->efx, channel->channel, &event); event 819 drivers/net/ethernet/sfc/farch.c efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) event 829 drivers/net/ethernet/sfc/farch.c if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { event 831 drivers/net/ethernet/sfc/farch.c tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); event 832 drivers/net/ethernet/sfc/farch.c tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); event 836 drivers/net/ethernet/sfc/farch.c } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { event 838 drivers/net/ethernet/sfc/farch.c tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); event 845 drivers/net/ethernet/sfc/farch.c } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) { event 851 drivers/net/ethernet/sfc/farch.c EFX_QWORD_VAL(*event)); event 857 drivers/net/ethernet/sfc/farch.c const efx_qword_t *event) event 868 drivers/net/ethernet/sfc/farch.c rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); event 869 drivers/net/ethernet/sfc/farch.c rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); event 870 drivers/net/ethernet/sfc/farch.c rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); event 871 drivers/net/ethernet/sfc/farch.c rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE); event 872 drivers/net/ethernet/sfc/farch.c rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, event 874 drivers/net/ethernet/sfc/farch.c rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, event 876 drivers/net/ethernet/sfc/farch.c rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, event 878 drivers/net/ethernet/sfc/farch.c rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); event 879 drivers/net/ethernet/sfc/farch.c rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); event 880 drivers/net/ethernet/sfc/farch.c rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); event 909 drivers/net/ethernet/sfc/farch.c efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event), event 968 drivers/net/ethernet/sfc/farch.c efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) event 981 drivers/net/ethernet/sfc/farch.c rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); event 982 drivers/net/ethernet/sfc/farch.c rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP); event 983 drivers/net/ethernet/sfc/farch.c WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != event 988 drivers/net/ethernet/sfc/farch.c rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); event 1028 drivers/net/ethernet/sfc/farch.c rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); event 1029 drivers/net/ethernet/sfc/farch.c rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); event 1030 drivers/net/ethernet/sfc/farch.c rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); event 1049 drivers/net/ethernet/sfc/farch.c flags = efx_farch_handle_rx_not_ok(rx_queue, event); event 1053 drivers/net/ethernet/sfc/farch.c rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); event 1056 drivers/net/ethernet/sfc/farch.c EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); event 1079 drivers/net/ethernet/sfc/farch.c efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) event 1084 drivers/net/ethernet/sfc/farch.c qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); event 1100 drivers/net/ethernet/sfc/farch.c efx_farch_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) event 1107 drivers/net/ethernet/sfc/farch.c qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); event 1108 drivers/net/ethernet/sfc/farch.c failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); event 1142 drivers/net/ethernet/sfc/farch.c efx_qword_t *event) event 1150 drivers/net/ethernet/sfc/farch.c magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); event 1167 drivers/net/ethernet/sfc/farch.c channel->channel, EFX_QWORD_VAL(*event)); event 1172 drivers/net/ethernet/sfc/farch.c efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) event 1178 drivers/net/ethernet/sfc/farch.c ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); event 1179 drivers/net/ethernet/sfc/farch.c ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); event 1185 drivers/net/ethernet/sfc/farch.c efx_farch_handle_tx_flush_done(efx, event); event 1187 drivers/net/ethernet/sfc/farch.c efx_siena_sriov_tx_flush_done(efx, event); event 1193 drivers/net/ethernet/sfc/farch.c efx_farch_handle_rx_flush_done(efx, event); event 1195 drivers/net/ethernet/sfc/farch.c efx_siena_sriov_rx_flush_done(efx, event); event 1263 drivers/net/ethernet/sfc/farch.c efx_qword_t event, *p_event; event 1274 drivers/net/ethernet/sfc/farch.c event = *p_event; event 1276 drivers/net/ethernet/sfc/farch.c if (!efx_event_present(&event)) event 1282 drivers/net/ethernet/sfc/farch.c channel->channel, EFX_QWORD_VAL(event)); event 1289 drivers/net/ethernet/sfc/farch.c ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); event 1293 drivers/net/ethernet/sfc/farch.c efx_farch_handle_rx_event(channel, &event); event 1298 drivers/net/ethernet/sfc/farch.c efx_farch_handle_tx_event(channel, &event); event 1301 drivers/net/ethernet/sfc/farch.c efx_farch_handle_generated_event(channel, &event); event 1304 drivers/net/ethernet/sfc/farch.c efx_farch_handle_driver_event(channel, &event); event 1308 drivers/net/ethernet/sfc/farch.c efx_siena_sriov_event(channel, &event); event 1312 drivers/net/ethernet/sfc/farch.c efx_mcdi_process_event(channel, &event); event 1316 drivers/net/ethernet/sfc/farch.c efx->type->handle_global_event(channel, &event)) event 1323 drivers/net/ethernet/sfc/farch.c ev_code, EFX_QWORD_VAL(event)); event 1557 drivers/net/ethernet/sfc/farch.c efx_qword_t *event; event 1569 drivers/net/ethernet/sfc/farch.c event = efx_event(channel, event 1571 drivers/net/ethernet/sfc/farch.c if (efx_event_present(event)) event 1302 drivers/net/ethernet/sfc/mcdi.c efx_qword_t *event) event 1305 drivers/net/ethernet/sfc/mcdi.c int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE); event 1306 drivers/net/ethernet/sfc/mcdi.c u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA); event 1321 drivers/net/ethernet/sfc/mcdi.c MCDI_EVENT_FIELD(*event, CMDDONE_SEQ), event 1322 drivers/net/ethernet/sfc/mcdi.c MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN), event 1323 drivers/net/ethernet/sfc/mcdi.c MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO)); event 1327 drivers/net/ethernet/sfc/mcdi.c efx_mcdi_process_link_change(efx, event); event 1330 drivers/net/ethernet/sfc/mcdi.c efx_mcdi_sensor_event(efx, event); event 1351 drivers/net/ethernet/sfc/mcdi.c MCDI_EVENT_FIELD(*event, FLR_VF)); event 1356 drivers/net/ethernet/sfc/mcdi.c efx_ptp_event(efx, event); event 1359 drivers/net/ethernet/sfc/mcdi.c efx_time_sync_event(channel, event); event 1371 drivers/net/ethernet/sfc/mcdi.c if (!MCDI_EVENT_FIELD(*event, TX_FLUSH_TO_DRIVER)) event 1379 drivers/net/ethernet/sfc/mcdi.c EFX_QWORD_VAL(*event)); event 1384 drivers/net/ethernet/sfc/mcdi.c MCDI_EVENT_FIELD(*event, PROXY_RESPONSE_HANDLE), event 1385 drivers/net/ethernet/sfc/mcdi.c MCDI_EVENT_FIELD(*event, PROXY_RESPONSE_RC)); event 1390 drivers/net/ethernet/sfc/mcdi.c EFX_QWORD_VAL(*event)); event 185 drivers/net/ethernet/sfc/mcdi.h void efx_mcdi_process_event(struct efx_channel *channel, efx_qword_t *event); event 51 drivers/net/ethernet/sfc/nic.h static inline int efx_event_present(efx_qword_t *event) event 53 drivers/net/ethernet/sfc/nic.h return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | event 54 drivers/net/ethernet/sfc/nic.h EFX_DWORD_IS_ALL_ONES(event->dword[1])); event 691 drivers/net/ethernet/sfc/nic.h efx_qword_t *event); event 407 drivers/net/ethernet/sfc/siena_sriov.c efx_qword_t event; event 466 drivers/net/ethernet/sfc/siena_sriov.c EFX_POPULATE_QWORD_3(event, event 473 drivers/net/ethernet/sfc/siena_sriov.c &event); event 944 drivers/net/ethernet/sfc/siena_sriov.c efx_qword_t event; event 957 drivers/net/ethernet/sfc/siena_sriov.c EFX_POPULATE_QWORD_3(event, event 962 drivers/net/ethernet/sfc/siena_sriov.c for (pos = 0; pos < EFX_PAGE_SIZE; pos += sizeof(event)) event 963 drivers/net/ethernet/sfc/siena_sriov.c memcpy(buffer->addr + pos, &event, sizeof(event)); event 1401 drivers/net/ethernet/sfc/siena_sriov.c void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event) event 1407 drivers/net/ethernet/sfc/siena_sriov.c qid = EFX_QWORD_FIELD(*event, FSF_CZ_USER_QID); event 1411 drivers/net/ethernet/sfc/siena_sriov.c seq = EFX_QWORD_FIELD(*event, VFDI_EV_SEQ); event 1412 drivers/net/ethernet/sfc/siena_sriov.c type = EFX_QWORD_FIELD(*event, VFDI_EV_TYPE); event 1413 drivers/net/ethernet/sfc/siena_sriov.c data = EFX_QWORD_FIELD(*event, VFDI_EV_DATA); event 1490 drivers/net/ethernet/sfc/siena_sriov.c void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) event 1495 drivers/net/ethernet/sfc/siena_sriov.c queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); event 1509 drivers/net/ethernet/sfc/siena_sriov.c void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) event 1514 drivers/net/ethernet/sfc/siena_sriov.c queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); event 1515 drivers/net/ethernet/sfc/siena_sriov.c ev_failed = EFX_QWORD_FIELD(*event, event 71 drivers/net/ethernet/sfc/siena_sriov.h void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event); event 72 drivers/net/ethernet/sfc/siena_sriov.h void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event); event 73 drivers/net/ethernet/sfc/siena_sriov.h void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event); event 4178 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c unsigned long event, void *ptr) event 4186 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c switch (event) { event 265 drivers/net/ethernet/sun/ldmvsw.c .event = sunvnet_event_common, event 410 drivers/net/ethernet/sun/sunvnet.c .event = sunvnet_event_common, event 928 drivers/net/ethernet/sun/sunvnet_common.c void sunvnet_event_common(void *arg, int event) event 933 drivers/net/ethernet/sun/sunvnet_common.c port->rx_event |= event; event 146 drivers/net/ethernet/sun/sunvnet_common.h void sunvnet_event_common(void *arg, int event); event 35 drivers/net/ethernet/ti/cpts.c static int event_expired(struct cpts_event *event) event 37 drivers/net/ethernet/ti/cpts.c return time_after(jiffies, event->tmo); event 40 drivers/net/ethernet/ti/cpts.c static int event_type(struct cpts_event *event) event 42 drivers/net/ethernet/ti/cpts.c return (event->high >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK; event 61 drivers/net/ethernet/ti/cpts.c struct cpts_event *event; event 65 drivers/net/ethernet/ti/cpts.c event = list_entry(this, struct cpts_event, list); event 66 drivers/net/ethernet/ti/cpts.c if (event_expired(event)) { event 67 drivers/net/ethernet/ti/cpts.c list_del_init(&event->list); event 68 drivers/net/ethernet/ti/cpts.c list_add(&event->list, &cpts->pool); event 97 drivers/net/ethernet/ti/cpts.c static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event) event 104 drivers/net/ethernet/ti/cpts.c mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK; event 105 drivers/net/ethernet/ti/cpts.c seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK; event 115 drivers/net/ethernet/ti/cpts.c u64 ns = timecounter_cyc2time(&cpts->tc, event->low); event 146 drivers/net/ethernet/ti/cpts.c struct cpts_event *event; event 157 drivers/net/ethernet/ti/cpts.c event = list_first_entry(&cpts->pool, struct cpts_event, list); event 158 drivers/net/ethernet/ti/cpts.c event->tmo = jiffies + 2; event 159 drivers/net/ethernet/ti/cpts.c event->high = hi; event 160 drivers/net/ethernet/ti/cpts.c event->low = lo; event 161 drivers/net/ethernet/ti/cpts.c type = event_type(event); event 164 drivers/net/ethernet/ti/cpts.c if (cpts_match_tx_ts(cpts, event)) { event 173 drivers/net/ethernet/ti/cpts.c list_del_init(&event->list); event 174 drivers/net/ethernet/ti/cpts.c list_add_tail(&event->list, &cpts->events); event 193 drivers/net/ethernet/ti/cpts.c struct cpts_event *event; event 202 drivers/net/ethernet/ti/cpts.c event = list_entry(this, struct cpts_event, list); event 203 drivers/net/ethernet/ti/cpts.c if (event_type(event) == CPTS_EV_PUSH) { event 204 drivers/net/ethernet/ti/cpts.c list_del_init(&event->list); event 205 drivers/net/ethernet/ti/cpts.c list_add(&event->list, &cpts->pool); event 206 drivers/net/ethernet/ti/cpts.c val = event->low; event 370 drivers/net/ethernet/ti/cpts.c struct cpts_event *event; event 383 drivers/net/ethernet/ti/cpts.c event = list_entry(this, struct cpts_event, list); event 384 drivers/net/ethernet/ti/cpts.c if (event_expired(event)) { event 385 drivers/net/ethernet/ti/cpts.c list_del_init(&event->list); event 386 drivers/net/ethernet/ti/cpts.c list_add(&event->list, &cpts->pool); event 389 drivers/net/ethernet/ti/cpts.c mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK; event 390 drivers/net/ethernet/ti/cpts.c seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK; event 391 drivers/net/ethernet/ti/cpts.c if (ev_type == event_type(event) && event 393 drivers/net/ethernet/ti/cpts.c ns = timecounter_cyc2time(&cpts->tc, event->low); event 394 drivers/net/ethernet/ti/cpts.c list_del_init(&event->list); event 395 drivers/net/ethernet/ti/cpts.c list_add(&event->list, &cpts->pool); event 2024 drivers/net/ethernet/toshiba/ps3_gelic_wireless.c u64 event) event 2038 drivers/net/ethernet/toshiba/ps3_gelic_wireless.c if (desired_event == event) { event 2044 drivers/net/ethernet/toshiba/ps3_gelic_wireless.c __func__, event); event 2051 drivers/net/ethernet/toshiba/ps3_gelic_wireless.c u64 event) event 2088 drivers/net/ethernet/toshiba/ps3_gelic_wireless.c static const char *eventstr(enum gelic_lv1_wl_event event) event 2092 drivers/net/ethernet/toshiba/ps3_gelic_wireless.c if (event & GELIC_LV1_WL_EVENT_DEVICE_READY) event 2094 drivers/net/ethernet/toshiba/ps3_gelic_wireless.c else if (event & GELIC_LV1_WL_EVENT_SCAN_COMPLETED) event 2096 drivers/net/ethernet/toshiba/ps3_gelic_wireless.c else if (event & GELIC_LV1_WL_EVENT_DEAUTH) event 2098 drivers/net/ethernet/toshiba/ps3_gelic_wireless.c else if (event & GELIC_LV1_WL_EVENT_BEACON_LOST) event 2100 drivers/net/ethernet/toshiba/ps3_gelic_wireless.c else if (event & GELIC_LV1_WL_EVENT_CONNECTED) event 2102 drivers/net/ethernet/toshiba/ps3_gelic_wireless.c else if (event & GELIC_LV1_WL_EVENT_WPA_CONNECTED) event 2104 drivers/net/ethernet/toshiba/ps3_gelic_wireless.c else if (event & GELIC_LV1_WL_EVENT_WPA_ERROR) event 2107 drivers/net/ethernet/toshiba/ps3_gelic_wireless.c sprintf(buf, "Unknown(%#x)", event); event 2113 drivers/net/ethernet/toshiba/ps3_gelic_wireless.c static const char *eventstr(enum gelic_lv1_wl_event event) event 2122 drivers/net/ethernet/toshiba/ps3_gelic_wireless.c u64 event, tmp; event 2131 drivers/net/ethernet/toshiba/ps3_gelic_wireless.c &event, &tmp); event 2140 drivers/net/ethernet/toshiba/ps3_gelic_wireless.c pr_debug("%s: event=%s\n", __func__, eventstr(event)); event 2141 drivers/net/ethernet/toshiba/ps3_gelic_wireless.c switch (event) { event 2147 drivers/net/ethernet/toshiba/ps3_gelic_wireless.c gelic_wl_disconnect_event(wl, event); event 2151 drivers/net/ethernet/toshiba/ps3_gelic_wireless.c gelic_wl_connected_event(wl, event); event 151 drivers/net/fddi/skfp/cfm.c static void cem_priv_state(struct s_smc *smc, int event) event 161 drivers/net/fddi/skfp/cfm.c np = event - CF_JOIN; event 207 drivers/net/fddi/skfp/cfm.c void cfm(struct s_smc *smc, int event) event 223 drivers/net/fddi/skfp/cfm.c cem_priv_state (smc, event); event 230 drivers/net/fddi/skfp/cfm.c cfm_events[event]); event 232 drivers/net/fddi/skfp/cfm.c cfm_fsm(smc,event) ; event 233 drivers/net/fddi/skfp/cfm.c event = 0 ; event 91 drivers/net/fddi/skfp/ecm.c static void start_ecm_timer(struct s_smc *smc, u_long value, int event); event 117 drivers/net/fddi/skfp/ecm.c void ecm(struct s_smc *smc, int event) event 125 drivers/net/fddi/skfp/ecm.c ecm_events[event]); event 127 drivers/net/fddi/skfp/ecm.c ecm_fsm(smc,event) ; event 128 drivers/net/fddi/skfp/ecm.c event = 0 ; event 514 drivers/net/fddi/skfp/ecm.c static void start_ecm_timer(struct s_smc *smc, u_long value, int event) event 516 drivers/net/fddi/skfp/ecm.c smt_timer_start(smc,&smc->e.ecm_timer,value,EV_TOKEN(EVENT_ECM,event)); event 179 drivers/net/fddi/skfp/h/cmtdef.h #define EV_TOKEN(class,event) (((u_long)(class)<<16L)|((u_long)(event))) event 495 drivers/net/fddi/skfp/h/cmtdef.h void queue_event(struct s_smc *smc, int class, int event); event 496 drivers/net/fddi/skfp/h/cmtdef.h void ecm(struct s_smc *smc, int event); event 498 drivers/net/fddi/skfp/h/cmtdef.h void rmt(struct s_smc *smc, int event); event 500 drivers/net/fddi/skfp/h/cmtdef.h void pcm(struct s_smc *smc, const int np, int event); event 502 drivers/net/fddi/skfp/h/cmtdef.h void cfm(struct s_smc *smc, int event); event 555 drivers/net/fddi/skfp/h/cmtdef.h void smt_event(struct s_smc *smc, int event); event 60 drivers/net/fddi/skfp/h/smc.h u_short event ; /* event value */ event 209 drivers/net/fddi/skfp/pcmplc.c static void start_pcm_timer0(struct s_smc *smc, u_long value, int event, event 214 drivers/net/fddi/skfp/pcmplc.c EV_TOKEN(EVENT_PCM+phy->np,event)) ; event 588 drivers/net/fddi/skfp/pcmplc.c void pcm(struct s_smc *smc, const int np, int event) event 610 drivers/net/fddi/skfp/pcmplc.c pcm_events[event]); event 612 drivers/net/fddi/skfp/pcmplc.c pcm_fsm(smc,phy,event) ; event 613 drivers/net/fddi/skfp/pcmplc.c event = 0 ; event 38 drivers/net/fddi/skfp/queue.c void queue_event(struct s_smc *smc, int class, int event) event 40 drivers/net/fddi/skfp/queue.c PRINTF("queue class %d event %d\n",class,event) ; event 42 drivers/net/fddi/skfp/queue.c smc->q.ev_put->event = event ; event 77 drivers/net/fddi/skfp/queue.c PRINTF("dispatch class %d event %d\n",ev->class,ev->event) ; event 80 drivers/net/fddi/skfp/queue.c ecm(smc,(int)ev->event) ; event 83 drivers/net/fddi/skfp/queue.c cfm(smc,(int)ev->event) ; event 86 drivers/net/fddi/skfp/queue.c rmt(smc,(int)ev->event) ; event 89 drivers/net/fddi/skfp/queue.c smt_event(smc,(int)ev->event) ; event 93 drivers/net/fddi/skfp/queue.c timer_test_event(smc,(int)ev->event) ; event 101 drivers/net/fddi/skfp/queue.c pcm(smc,class - EVENT_PCMA,(int)ev->event) ; event 100 drivers/net/fddi/skfp/rmt.c static void start_rmt_timer0(struct s_smc *smc, u_long value, int event); event 101 drivers/net/fddi/skfp/rmt.c static void start_rmt_timer1(struct s_smc *smc, u_long value, int event); event 102 drivers/net/fddi/skfp/rmt.c static void start_rmt_timer2(struct s_smc *smc, u_long value, int event); event 141 drivers/net/fddi/skfp/rmt.c void rmt(struct s_smc *smc, int event) event 149 drivers/net/fddi/skfp/rmt.c rmt_events[event]); event 151 drivers/net/fddi/skfp/rmt.c rmt_fsm(smc,event) ; event 152 drivers/net/fddi/skfp/rmt.c event = 0 ; event 591 drivers/net/fddi/skfp/rmt.c static void start_rmt_timer0(struct s_smc *smc, u_long value, int event) event 594 drivers/net/fddi/skfp/rmt.c smt_timer_start(smc,&smc->r.rmt_timer0,value,EV_TOKEN(EVENT_RMT,event)); event 601 drivers/net/fddi/skfp/rmt.c static void start_rmt_timer1(struct s_smc *smc, u_long value, int event) event 604 drivers/net/fddi/skfp/rmt.c smt_timer_start(smc,&smc->r.rmt_timer1,value,EV_TOKEN(EVENT_RMT,event)); event 611 drivers/net/fddi/skfp/rmt.c static void start_rmt_timer2(struct s_smc *smc, u_long value, int event) event 614 drivers/net/fddi/skfp/rmt.c smt_timer_start(smc,&smc->r.rmt_timer2,value,EV_TOKEN(EVENT_RMT,event)); event 232 drivers/net/fddi/skfp/smt.c void smt_event(struct s_smc *smc, int event) event 248 drivers/net/fddi/skfp/smt.c if (event == SM_FAST) event 1933 drivers/net/fddi/skfp/smt.c int event ; event 1988 drivers/net/fddi/skfp/smt.c event = PC_ENABLE ; event 1991 drivers/net/fddi/skfp/smt.c event = PC_DISABLE ; event 1994 drivers/net/fddi/skfp/smt.c event = PC_MAINT ; event 1997 drivers/net/fddi/skfp/smt.c event = PC_START ; event 2000 drivers/net/fddi/skfp/smt.c event = PC_STOP ; event 2005 drivers/net/fddi/skfp/smt.c queue_event(smc,EVENT_PCM+index,event) ; event 1800 drivers/net/geneve.c unsigned long event, void *ptr) event 1804 drivers/net/geneve.c if (event == NETDEV_UDP_TUNNEL_PUSH_INFO || event 1805 drivers/net/geneve.c event == NETDEV_UDP_TUNNEL_DROP_INFO) { event 1806 drivers/net/geneve.c geneve_offload_rx_ports(dev, event == NETDEV_UDP_TUNNEL_PUSH_INFO); event 1807 drivers/net/geneve.c } else if (event == NETDEV_UNREGISTER) { event 1809 drivers/net/geneve.c } else if (event == NETDEV_REGISTER) { event 507 drivers/net/hamradio/bpqether.c unsigned long event, void *ptr) event 517 drivers/net/hamradio/bpqether.c switch (event) { event 322 drivers/net/hamradio/scc.c static inline void scc_notify(struct scc_channel *scc, int event) event 335 drivers/net/hamradio/scc.c *bp++ = event; event 581 drivers/net/hippi/rrunner.c rrpriv->info->evt_ctrl.entry_size = sizeof(struct event); event 355 drivers/net/hippi/rrunner.h #define EVT_RING_SIZE (EVT_RING_ENTRIES * sizeof(struct event)) event 804 drivers/net/hippi/rrunner.h struct event *evt_ring; event 906 drivers/net/hyperv/hyperv_net.h u32 event; event 720 drivers/net/hyperv/netvsc_drv.c struct netvsc_reconfig *event; event 742 drivers/net/hyperv/netvsc_drv.c event = kzalloc(sizeof(*event), GFP_ATOMIC); event 743 drivers/net/hyperv/netvsc_drv.c if (!event) event 745 drivers/net/hyperv/netvsc_drv.c event->event = indicate->status; event 748 drivers/net/hyperv/netvsc_drv.c list_add_tail(&event->list, &ndev_ctx->reconfig_events); event 1896 drivers/net/hyperv/netvsc_drv.c struct netvsc_reconfig *event = NULL; event 1927 drivers/net/hyperv/netvsc_drv.c event = list_first_entry(&ndev_ctx->reconfig_events, event 1929 drivers/net/hyperv/netvsc_drv.c list_del(&event->list); event 1934 drivers/net/hyperv/netvsc_drv.c if (!event) event 1937 drivers/net/hyperv/netvsc_drv.c switch (event->event) { event 1949 drivers/net/hyperv/netvsc_drv.c kfree(event); event 1957 drivers/net/hyperv/netvsc_drv.c kfree(event); event 1965 drivers/net/hyperv/netvsc_drv.c event->event = RNDIS_STATUS_MEDIA_CONNECT; event 1967 drivers/net/hyperv/netvsc_drv.c list_add(&event->list, &ndev_ctx->reconfig_events); event 2452 drivers/net/hyperv/netvsc_drv.c unsigned long event, void *ptr) event 2473 drivers/net/hyperv/netvsc_drv.c switch (event) { event 689 drivers/net/ipvlan/ipvlan_main.c unsigned long event, void *ptr) event 704 drivers/net/ipvlan/ipvlan_main.c switch (event) { event 855 drivers/net/ipvlan/ipvlan_main.c unsigned long event, void *ptr) event 864 drivers/net/ipvlan/ipvlan_main.c switch (event) { event 879 drivers/net/ipvlan/ipvlan_main.c unsigned long event, void *ptr) event 888 drivers/net/ipvlan/ipvlan_main.c switch (event) { event 923 drivers/net/ipvlan/ipvlan_main.c unsigned long event, void *ptr) event 933 drivers/net/ipvlan/ipvlan_main.c switch (event) { event 950 drivers/net/ipvlan/ipvlan_main.c unsigned long event, void *ptr) event 959 drivers/net/ipvlan/ipvlan_main.c switch (event) { event 138 drivers/net/ipvlan/ipvtap.c unsigned long event, void *ptr) event 153 drivers/net/ipvlan/ipvtap.c switch (event) { event 3474 drivers/net/macsec.c static int macsec_notify(struct notifier_block *this, unsigned long event, event 3483 drivers/net/macsec.c switch (event) { event 1665 drivers/net/macvlan.c unsigned long event, void *ptr) event 1677 drivers/net/macvlan.c switch (event) { event 1729 drivers/net/macvlan.c call_netdevice_notifiers(event, vlan->dev); event 145 drivers/net/macvtap.c unsigned long event, void *ptr) event 160 drivers/net/macvtap.c switch (event) { event 692 drivers/net/netconsole.c unsigned long event, void *ptr) event 699 drivers/net/netconsole.c if (!(event == NETDEV_CHANGENAME || event == NETDEV_UNREGISTER || event 700 drivers/net/netconsole.c event == NETDEV_RELEASE || event == NETDEV_JOIN)) event 708 drivers/net/netconsole.c switch (event) { event 736 drivers/net/netconsole.c switch (event) { event 178 drivers/net/netdevsim/fib.c static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event, event 184 drivers/net/netdevsim/fib.c switch (event) { event 187 drivers/net/netdevsim/fib.c err = nsim_fib_rule_event(info, event == FIB_EVENT_RULE_ADD); event 192 drivers/net/netdevsim/fib.c err = nsim_fib_event(info, event == FIB_EVENT_ENTRY_ADD); event 743 drivers/net/phy/dp83640.c struct ptp_clock_event event; event 784 drivers/net/phy/dp83640.c event.type = PTP_CLOCK_EXTTS; event 785 drivers/net/phy/dp83640.c event.timestamp = phy2txts(&dp83640->edata); event 788 drivers/net/phy/dp83640.c event.timestamp -= 35; event 792 drivers/net/phy/dp83640.c event.index = i; event 793 drivers/net/phy/dp83640.c ptp_clock_event(dp83640->clock->ptp_clock, &event); event 104 drivers/net/phy/sfp.c static const char *event_to_str(unsigned short event) event 106 drivers/net/phy/sfp.c if (event >= ARRAY_SIZE(event_strings)) event 108 drivers/net/phy/sfp.c return event_strings[event]; event 1322 drivers/net/phy/sfp.c static bool sfp_los_event_active(struct sfp *sfp, unsigned int event) event 1325 drivers/net/phy/sfp.c event == SFP_E_LOS_LOW) || event 1327 drivers/net/phy/sfp.c event == SFP_E_LOS_HIGH); event 1330 drivers/net/phy/sfp.c static bool sfp_los_event_inactive(struct sfp *sfp, unsigned int event) event 1333 drivers/net/phy/sfp.c event == SFP_E_LOS_HIGH) || event 1335 drivers/net/phy/sfp.c event == SFP_E_LOS_LOW); event 1547 drivers/net/phy/sfp.c static void sfp_sm_event(struct sfp *sfp, unsigned int event) event 1555 drivers/net/phy/sfp.c event_to_str(event)); event 1562 drivers/net/phy/sfp.c if (event == SFP_E_INSERT && sfp->attached) { event 1569 drivers/net/phy/sfp.c if (event == SFP_E_REMOVE) { event 1571 drivers/net/phy/sfp.c } else if (event == SFP_E_TIMEOUT) { event 1586 drivers/net/phy/sfp.c if (event == SFP_E_TIMEOUT) { event 1593 drivers/net/phy/sfp.c if (event == SFP_E_REMOVE) { event 1603 drivers/net/phy/sfp.c if (event == SFP_E_DEV_UP) event 1608 drivers/net/phy/sfp.c if (event == SFP_E_DEV_DOWN) { event 1643 drivers/net/phy/sfp.c if (event == SFP_E_TIMEOUT && sfp->state & SFP_F_TX_FAULT) event 1645 drivers/net/phy/sfp.c else if (event == SFP_E_TIMEOUT || event == SFP_E_TX_CLEAR) event 1650 drivers/net/phy/sfp.c if (event == SFP_E_TX_FAULT) event 1652 drivers/net/phy/sfp.c else if (sfp_los_event_inactive(sfp, event)) event 1657 drivers/net/phy/sfp.c if (event == SFP_E_TX_FAULT) { event 1660 drivers/net/phy/sfp.c } else if (sfp_los_event_active(sfp, event)) { event 1667 drivers/net/phy/sfp.c if (event == SFP_E_TIMEOUT) { event 1674 drivers/net/phy/sfp.c if (event == SFP_E_TIMEOUT && sfp->state & SFP_F_TX_FAULT) { event 1676 drivers/net/phy/sfp.c } else if (event == SFP_E_TIMEOUT || event == SFP_E_TX_CLEAR) { event 333 drivers/net/ppp/pppoe.c unsigned long event, void *ptr) event 338 drivers/net/ppp/pppoe.c switch (event) { event 2979 drivers/net/team/team.c unsigned long event, void *ptr) event 2988 drivers/net/team/team.c switch (event) { event 3017 drivers/net/team/team.c call_netdevice_notifiers(event, port->team->dev); event 3643 drivers/net/tun.c unsigned long event, void *ptr) event 3652 drivers/net/tun.c switch (event) { event 45 drivers/net/usb/asix_devices.c struct ax88172_int_data *event; event 51 drivers/net/usb/asix_devices.c event = urb->transfer_buffer; event 52 drivers/net/usb/asix_devices.c link = event->link & 0x01; event 335 drivers/net/usb/ax88179_178a.c struct ax88179_int_data *event; event 341 drivers/net/usb/ax88179_178a.c event = urb->transfer_buffer; event 342 drivers/net/usb/ax88179_178a.c le32_to_cpus((void *)&event->intdata1); event 344 drivers/net/usb/ax88179_178a.c link = (((__force u32)event->intdata1) & AX_INT_PPLS_LINK) >> 16; event 393 drivers/net/usb/cdc_ether.c struct usb_cdc_notification *event; event 395 drivers/net/usb/cdc_ether.c if (urb->actual_length < sizeof(*event)) event 404 drivers/net/usb/cdc_ether.c event = urb->transfer_buffer; event 405 drivers/net/usb/cdc_ether.c switch (event->bNotificationType) { event 408 drivers/net/usb/cdc_ether.c event->wValue ? "on" : "off"); event 409 drivers/net/usb/cdc_ether.c usbnet_link_change(dev, !!event->wValue, 0); event 414 drivers/net/usb/cdc_ether.c if (urb->actual_length != (sizeof(*event) + 8)) event 417 drivers/net/usb/cdc_ether.c dumpspeed(dev, (__le32 *) &event[1]); event 424 drivers/net/usb/cdc_ether.c event->bNotificationType); event 489 drivers/net/usb/cdc_ether.c struct usb_cdc_notification *event; event 491 drivers/net/usb/cdc_ether.c if (urb->actual_length < sizeof(*event)) event 494 drivers/net/usb/cdc_ether.c event = urb->transfer_buffer; event 496 drivers/net/usb/cdc_ether.c if (event->bNotificationType != USB_CDC_NOTIFY_NETWORK_CONNECTION) { event 502 drivers/net/usb/cdc_ether.c event->wValue ? "on" : "off"); event 504 drivers/net/usb/cdc_ether.c if (event->wValue && event 508 drivers/net/usb/cdc_ether.c usbnet_link_change(dev, !!event->wValue, 0); event 1607 drivers/net/usb/cdc_ncm.c struct usb_cdc_notification *event; event 1609 drivers/net/usb/cdc_ncm.c if (urb->actual_length < sizeof(*event)) event 1619 drivers/net/usb/cdc_ncm.c event = urb->transfer_buffer; event 1621 drivers/net/usb/cdc_ncm.c switch (event->bNotificationType) { event 1630 drivers/net/usb/cdc_ncm.c !!event->wValue ? "" : "dis"); event 1631 drivers/net/usb/cdc_ncm.c usbnet_link_change(dev, !!event->wValue, 0); event 1635 drivers/net/usb/cdc_ncm.c if (urb->actual_length < (sizeof(*event) + event 1640 drivers/net/usb/cdc_ncm.c (struct usb_cdc_speed_change *)&event[1]); event 1646 drivers/net/usb/cdc_ncm.c event->bNotificationType); event 4012 drivers/net/usb/lan78xx.c int event; event 4014 drivers/net/usb/lan78xx.c event = message.event; event 591 drivers/net/usb/sierra_net.c struct usb_cdc_notification *event; event 595 drivers/net/usb/sierra_net.c if (urb->actual_length < sizeof *event) event 599 drivers/net/usb/sierra_net.c event = urb->transfer_buffer; event 600 drivers/net/usb/sierra_net.c switch (event->bNotificationType) { event 610 drivers/net/usb/sierra_net.c event->bNotificationType); event 1806 drivers/net/usb/smsc75xx.c if (message.event == PM_EVENT_AUTO_SUSPEND) { event 1600 drivers/net/usb/smsc95xx.c if (message.event == PM_EVENT_AUTO_SUSPEND && event 152 drivers/net/usb/sr9800.c struct sr9800_int_data *event; event 158 drivers/net/usb/sr9800.c event = urb->transfer_buffer; event 159 drivers/net/usb/sr9800.c link = event->link & 0x01; event 1420 drivers/net/vrf.c unsigned long event, void *ptr) event 1425 drivers/net/vrf.c if (event == NETDEV_UNREGISTER) { event 4249 drivers/net/vxlan.c unsigned long event, void *ptr) event 4254 drivers/net/vxlan.c if (event == NETDEV_UNREGISTER) { event 4257 drivers/net/vxlan.c } else if (event == NETDEV_REGISTER) { event 4259 drivers/net/vxlan.c } else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO || event 4260 drivers/net/vxlan.c event == NETDEV_UDP_TUNNEL_DROP_INFO) { event 4261 drivers/net/vxlan.c vxlan_offload_rx_ports(dev, event == NETDEV_UDP_TUNNEL_PUSH_INFO); event 4358 drivers/net/vxlan.c unsigned long event, void *ptr) event 4364 drivers/net/vxlan.c switch (event) { event 486 drivers/net/wan/dlci.c unsigned long event, void *ptr) event 493 drivers/net/wan/dlci.c if (event == NETDEV_UNREGISTER) { event 1466 drivers/net/wan/farsync.c int event; /* Actual event for processing */ event 1554 drivers/net/wan/farsync.c event = FST_RDB(card, interruptEvent.evntbuff[rdidx]); event 1555 drivers/net/wan/farsync.c port = &card->ports[event & 0x03]; event 1557 drivers/net/wan/farsync.c dbg(DBG_INTR, "Processing Interrupt event: %x\n", event); event 1559 drivers/net/wan/farsync.c switch (event) { event 1605 drivers/net/wan/farsync.c pr_err("intr: unknown card event %d. ignored\n", event); event 88 drivers/net/wan/hdlc.c static int hdlc_device_event(struct notifier_block *this, unsigned long event, event 102 drivers/net/wan/hdlc.c if (event != NETDEV_CHANGE) event 301 drivers/net/wan/hdlc_ppp.c static void ppp_cp_event(struct net_device *dev, u16 pid, u16 event, u8 code, event 310 drivers/net/wan/hdlc_ppp.c BUG_ON(event >= EVENTS); event 314 drivers/net/wan/hdlc_ppp.c proto_name(pid), event_names[event], state_names[proto->state]); event 317 drivers/net/wan/hdlc_ppp.c action = cp_table[event][old_state]; event 367 drivers/net/wan/hdlc_ppp.c proto_name(pid), event_names[event], state_names[proto->state]); event 363 drivers/net/wan/lapbether.c unsigned long event, void *ptr) event 374 drivers/net/wan/lapbether.c switch (event) { event 338 drivers/net/wan/z85230.h unsigned long event; /* Pending events */ event 591 drivers/net/wimax/i2400m/usb.c d_fnstart(3, dev, "(iface %p pm_msg %u)\n", iface, pm_msg.event); event 632 drivers/net/wimax/i2400m/usb.c iface, pm_msg.event, result); event 784 drivers/net/wireless/ath/ath10k/qmi.c struct ath10k_qmi_driver_event *event; event 786 drivers/net/wireless/ath/ath10k/qmi.c event = kzalloc(sizeof(*event), GFP_ATOMIC); event 787 drivers/net/wireless/ath/ath10k/qmi.c if (!event) event 790 drivers/net/wireless/ath/ath10k/qmi.c event->type = type; event 791 drivers/net/wireless/ath/ath10k/qmi.c event->data = data; event 794 drivers/net/wireless/ath/ath10k/qmi.c list_add_tail(&event->list, &qmi->event_list); event 922 drivers/net/wireless/ath/ath10k/qmi.c struct ath10k_qmi_driver_event *event; event 927 drivers/net/wireless/ath/ath10k/qmi.c event = list_first_entry(&qmi->event_list, event 929 drivers/net/wireless/ath/ath10k/qmi.c list_del(&event->list); event 932 drivers/net/wireless/ath/ath10k/qmi.c switch (event->type) { event 946 drivers/net/wireless/ath/ath10k/qmi.c ath10k_warn(ar, "invalid event type: %d", event->type); event 949 drivers/net/wireless/ath/ath10k/qmi.c kfree(event); event 171 drivers/net/wireless/ath/ath10k/wmi-ops.h enum wmi_wow_wakeup_event event, event 1317 drivers/net/wireless/ath/ath10k/wmi-ops.h enum wmi_wow_wakeup_event event, event 1326 drivers/net/wireless/ath/ath10k/wmi-ops.h skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable); event 3465 drivers/net/wireless/ath/ath10k/wmi-tlv.c enum wmi_wow_wakeup_event event, event 3485 drivers/net/wireless/ath/ath10k/wmi-tlv.c cmd->event_bitmap = __cpu_to_le32(1 << event); event 3488 drivers/net/wireless/ath/ath10k/wmi-tlv.c wow_wakeup_event(event), enable, vdev_id); event 536 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c struct ath9k_htc_tx_event *event, *tmp; event 562 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c list_for_each_entry_safe(event, tmp, &priv->wmi->pending_tx_events, list) { event 563 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c list_del(&event->list); event 564 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c kfree(event); event 759 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c struct ath9k_htc_tx_event *event, *tmp; event 763 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c list_for_each_entry_safe(event, tmp, &priv->wmi->pending_tx_events, list) { event 765 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c skb = ath9k_htc_tx_get_packet(priv, &event->txs); event 769 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c event->txs.cookie, event 770 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c MS(event->txs.ts_rate, ATH9K_HTC_TXSTAT_EPID)); event 772 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c ath9k_htc_tx_process(priv, skb, &event->txs); event 773 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c list_del(&event->list); event 774 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c kfree(event); event 778 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c if (++event->count >= ATH9K_HTC_TX_TIMEOUT_COUNT) { event 779 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c list_del(&event->list); event 780 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c kfree(event); event 271 drivers/net/wireless/ath/dfs_pattern_detector.c dpd_add_pulse(struct dfs_pattern_detector *dpd, struct pulse_event *event, event 284 drivers/net/wireless/ath/dfs_pattern_detector.c cd = channel_detector_get(dpd, event->freq); event 289 drivers/net/wireless/ath/dfs_pattern_detector.c if (event->ts < dpd->last_pulse_ts) event 291 drivers/net/wireless/ath/dfs_pattern_detector.c dpd->last_pulse_ts = event->ts; event 296 drivers/net/wireless/ath/dfs_pattern_detector.c struct pri_sequence *ps = pd->add_pulse(pd, event); event 303 drivers/net/wireless/ath/dfs_pattern_detector.c event->freq, pd->rs->type_id, event 383 drivers/net/wireless/ath/dfs_pri_detector.c struct pulse_event *event) event 387 drivers/net/wireless/ath/dfs_pri_detector.c u64 ts = event->ts; event 391 drivers/net/wireless/ath/dfs_pri_detector.c if ((rs->width_min > event->width) || (rs->width_max < event->width)) event 398 drivers/net/wireless/ath/dfs_pri_detector.c if (rs->chirp && rs->chirp != event->chirp) event 503 drivers/net/wireless/ath/wil6210/wil6210.h } __packed event; event 1948 drivers/net/wireless/ath/wil6210/wmi.c event.wmi) + len, 4), event 1953 drivers/net/wireless/ath/wil6210/wmi.c evt->event.hdr = hdr; event 1954 drivers/net/wireless/ath/wil6210/wmi.c cmd = (void *)&evt->event.wmi; event 1962 drivers/net/wireless/ath/wil6210/wmi.c struct wmi_cmd_hdr *wmi = &evt->event.wmi; event 1997 drivers/net/wireless/ath/wil6210/wmi.c &evt->event.hdr, sizeof(hdr) + len, true); event 3401 drivers/net/wireless/ath/wil6210/wmi.c wmi_event_handle(wil, &evt->event.hdr); event 5352 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c u32 event = e->event_code; event 5356 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c event == BRCMF_E_PSK_SUP && event 5359 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c if (event == BRCMF_E_SET_SSID && status == BRCMF_E_STATUS_SUCCESS) { event 5379 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c u32 event = e->event_code; event 5382 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c if ((event == BRCMF_E_DEAUTH) || (event == BRCMF_E_DEAUTH_IND) || event 5383 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c (event == BRCMF_E_DISASSOC_IND) || event 5384 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c ((event == BRCMF_E_LINK) && (!(flags & BRCMF_EVENT_MSG_LINK)))) { event 5394 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c u32 event = e->event_code; event 5397 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c if (event == BRCMF_E_LINK && status == BRCMF_E_STATUS_NO_NETWORKS) { event 5403 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c if (event == BRCMF_E_SET_SSID && status != BRCMF_E_STATUS_SUCCESS) { event 5408 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c if (event == BRCMF_E_PSK_SUP && event 5606 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c u32 event = e->event_code; event 5611 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c brcmf_fweh_event_name(event), event, reason); event 5612 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c if (event == BRCMF_E_LINK && reason == BRCMF_E_REASON_LINK_BSSCFG_DIS && event 5619 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c if (((event == BRCMF_E_ASSOC_IND) || (event == BRCMF_E_REASSOC_IND)) && event 5637 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c } else if ((event == BRCMF_E_DISASSOC_IND) || event 5638 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c (event == BRCMF_E_DEAUTH_IND) || event 5639 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c (event == BRCMF_E_DEAUTH)) { event 5705 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c u32 event = e->event_code; event 5708 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c if (event == BRCMF_E_ROAM && status == BRCMF_E_STATUS_SUCCESS) { event 5744 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c struct brcmf_cfg80211_vif_event *event = &cfg->vif_event; event 5751 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c spin_lock(&event->vif_event_lock); event 5752 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c event->action = ifevent->action; event 5753 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c vif = event->vif; event 5759 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c spin_unlock(&event->vif_event_lock); event 5770 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c spin_unlock(&event->vif_event_lock); event 5771 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c wake_up(&event->vif_wq); event 5775 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c spin_unlock(&event->vif_event_lock); event 5778 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c wake_up(&event->vif_wq); event 5782 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c spin_unlock(&event->vif_event_lock); event 5783 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c wake_up(&event->vif_wq); event 5787 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c spin_unlock(&event->vif_event_lock); event 5906 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c static void init_vif_event(struct brcmf_cfg80211_vif_event *event) event 5908 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c init_waitqueue_head(&event->vif_wq); event 5909 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c spin_lock_init(&event->vif_event_lock); event 6862 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c static inline bool vif_event_equals(struct brcmf_cfg80211_vif_event *event, event 6867 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c spin_lock(&event->vif_event_lock); event 6868 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c evt_action = event->action; event 6869 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c spin_unlock(&event->vif_event_lock); event 6876 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c struct brcmf_cfg80211_vif_event *event = &cfg->vif_event; event 6878 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c spin_lock(&event->vif_event_lock); event 6879 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c event->vif = vif; event 6880 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c event->action = 0; event 6881 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c spin_unlock(&event->vif_event_lock); event 6886 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c struct brcmf_cfg80211_vif_event *event = &cfg->vif_event; event 6889 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c spin_lock(&event->vif_event_lock); event 6890 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c armed = event->vif != NULL; event 6891 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c spin_unlock(&event->vif_event_lock); event 6899 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c struct brcmf_cfg80211_vif_event *event = &cfg->vif_event; event 6901 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c return wait_event_timeout(event->vif_wq, event 6902 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c vif_event_equals(event, action), timeout); event 84 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c struct brcmf_fweh_queue_item *event) event 89 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c list_add_tail(&event->q, &fweh->event_q); event 194 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c struct brcmf_fweh_queue_item *event = NULL; event 199 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c event = list_first_entry(&fweh->event_q, event 201 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c list_del(&event->q); event 205 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c return event; event 218 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c struct brcmf_fweh_queue_item *event; event 226 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c while ((event = brcmf_fweh_dequeue_event(fweh))) { event 228 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c brcmf_fweh_event_name(event->code), event->code, event 229 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c event->emsg.ifidx, event->emsg.bsscfgidx, event 230 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c event->emsg.addr); event 233 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c emsg_be = &event->emsg; event 236 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c emsg.event_code = event->code; event 248 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c brcmf_dbg_hex_dump(BRCMF_EVENT_ON(), event->data, event 253 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c if (event->code == BRCMF_E_IF) { event 254 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c brcmf_fweh_handle_if_event(drvr, &emsg, event->data); event 258 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c if (event->code == BRCMF_E_TDLS_PEER_EVENT) event 262 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c err = brcmf_fweh_call_event_handler(drvr, ifp, event->code, event 263 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c &emsg, event->data); event 266 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c event->code); event 270 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c kfree(event); event 394 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c struct brcmf_fweh_queue_item *event; event 417 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c event = kzalloc(sizeof(*event) + datalen, alloc_flag); event 418 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c if (!event) event 421 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c event->code = code; event 422 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c event->ifidx = event_packet->msg.ifidx; event 425 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c memcpy(&event->emsg, &event_packet->msg, sizeof(event->emsg)); event 426 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c memcpy(event->data, data, datalen); event 427 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c event->datalen = datalen; event 428 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c memcpy(event->ifaddr, event_packet->eth.h_dest, ETH_ALEN); event 430 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c brcmf_fweh_queue_event(fweh, event); event 1097 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c struct msgbuf_rx_event *event; event 1103 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c event = (struct msgbuf_rx_event *)buf; event 1104 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c idx = le32_to_cpu(event->msg.request_id); event 1105 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c buflen = le16_to_cpu(event->event_data_len); event 1121 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c ifp = brcmf_get_ifp(msgbuf->drvr, event->msg.ifidx); event 1124 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c event->msg.ifidx); event 152 drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.h u32 event; event 2252 drivers/net/wireless/cisco/airo.c if (ai->power.event) { event 3129 drivers/net/wireless/cisco/airo.c if (ai->power.event || test_bit(FLAG_FLASHING, &ai->flags)) { event 7635 drivers/net/wireless/cisco/airo.c if (ai->power.event) event 7713 drivers/net/wireless/cisco/airo.c if (local->power.event) { event 668 drivers/net/wireless/intel/ipw2x00/ipw2200.c error->log[i].data, error->log[i].event); event 1286 drivers/net/wireless/intel/ipw2x00/ipw2200.c log[i].time, log[i].event, log[i].data); event 1323 drivers/net/wireless/intel/ipw2x00/ipw2200.c priv->error->log[i].event, event 1098 drivers/net/wireless/intel/ipw2x00/ipw2200.h u32 event; event 3446 drivers/net/wireless/intel/iwlwifi/dvm/commands.h u8 event; event 1124 drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c const struct ieee80211_event *event) event 1128 drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c if (event->type != RSSI_EVENT) event 1135 drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c if (event->u.rssi.data == RSSI_EVENT_LOW) event 1137 drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c else if (event->u.rssi.data == RSSI_EVENT_HIGH) event 4786 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c const struct ieee80211_event *event) event 4805 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c if (event->u.mlme.data == ASSOC_EVENT) { event 4806 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c if (event->u.mlme.status == MLME_DENIED) event 4809 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c event->u.mlme.reason); event 4810 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c else if (event->u.mlme.status == MLME_TIMEOUT) event 4813 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c } else if (event->u.mlme.data == AUTH_EVENT) { event 4814 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c if (event->u.mlme.status == MLME_DENIED) event 4817 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c event->u.mlme.reason); event 4818 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c else if (event->u.mlme.status == MLME_TIMEOUT) event 4821 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c } else if (event->u.mlme.data == DEAUTH_RX_EVENT) { event 4823 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c "DEAUTH RX %d", event->u.mlme.reason); event 4824 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c } else if (event->u.mlme.data == DEAUTH_TX_EVENT) { event 4826 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c "DEAUTH TX %d", event->u.mlme.reason); event 4833 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c const struct ieee80211_event *event) event 4845 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid))) event 4850 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c event->u.ba.sta->addr, event->u.ba.tid, event 4851 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c event->u.ba.ssn); event 4856 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c const struct ieee80211_event *event) event 4860 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c switch (event->type) { event 4862 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c iwl_mvm_event_mlme_callback(mvm, vif, event); event 4865 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c iwl_mvm_event_bar_rx_callback(mvm, vif, event); event 4868 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c iwl_mvm_event_frame_timeout_callback(mvm, vif, event->u.ba.sta, event 4869 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c event->u.ba.tid); event 1042 drivers/net/wireless/intersil/hostap/hostap_wlan.h void prism2_callback(local_info_t *local, int event); event 196 drivers/net/wireless/intersil/p54/lmac.h __le16 event; event 614 drivers/net/wireless/intersil/p54/txrx.c u16 event = le16_to_cpu(trap->event); event 617 drivers/net/wireless/intersil/p54/txrx.c switch (event) { event 641 drivers/net/wireless/intersil/p54/txrx.c event, freq); event 831 drivers/net/wireless/marvell/libertas/cfg.c void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event) event 835 drivers/net/wireless/marvell/libertas/cfg.c event == MACREG_INT_CODE_MIC_ERR_MULTICAST ? event 16 drivers/net/wireless/marvell/libertas/cfg.h void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event); event 79 drivers/net/wireless/marvell/libertas/cmd.h int lbs_process_event(struct lbs_private *priv, u32 event); event 223 drivers/net/wireless/marvell/libertas/cmdresp.c int lbs_process_event(struct lbs_private *priv, u32 event) event 228 drivers/net/wireless/marvell/libertas/cmdresp.c switch (event) { event 312 drivers/net/wireless/marvell/libertas/cmdresp.c lbs_send_mic_failureevent(priv, event); event 317 drivers/net/wireless/marvell/libertas/cmdresp.c lbs_send_mic_failureevent(priv, event); event 351 drivers/net/wireless/marvell/libertas/cmdresp.c netdev_alert(priv->dev, "EVENT: unknown event id %d\n", event); event 65 drivers/net/wireless/marvell/libertas/decl.h void lbs_queue_event(struct lbs_private *priv, u32 event); event 268 drivers/net/wireless/marvell/libertas/if_sdio.c u32 event; event 271 drivers/net/wireless/marvell/libertas/if_sdio.c event = sdio_readb(card->func, IF_SDIO_EVENT, &ret); event 276 drivers/net/wireless/marvell/libertas/if_sdio.c event >>= 3; event 284 drivers/net/wireless/marvell/libertas/if_sdio.c event = buffer[3] << 24; event 285 drivers/net/wireless/marvell/libertas/if_sdio.c event |= buffer[2] << 16; event 286 drivers/net/wireless/marvell/libertas/if_sdio.c event |= buffer[1] << 8; event 287 drivers/net/wireless/marvell/libertas/if_sdio.c event |= buffer[0] << 0; event 290 drivers/net/wireless/marvell/libertas/if_sdio.c lbs_queue_event(card->priv, event & 0xFF); event 662 drivers/net/wireless/marvell/libertas/if_usb.c uint32_t event; event 693 drivers/net/wireless/marvell/libertas/if_usb.c event = le32_to_cpu(pkt[1]); event 694 drivers/net/wireless/marvell/libertas/if_usb.c lbs_deb_usbd(&cardp->udev->dev, "**EVENT** 0x%X\n", event); event 698 drivers/net/wireless/marvell/libertas/if_usb.c if (event & 0xffff0000) { event 699 drivers/net/wireless/marvell/libertas/if_usb.c u32 trycount = (event & 0xffff0000) >> 16; event 703 drivers/net/wireless/marvell/libertas/if_usb.c lbs_queue_event(priv, event & 0xFF); event 528 drivers/net/wireless/marvell/libertas/main.c u32 event; event 531 drivers/net/wireless/marvell/libertas/main.c (unsigned char *) &event, sizeof(event)) != event 532 drivers/net/wireless/marvell/libertas/main.c sizeof(event)) event 535 drivers/net/wireless/marvell/libertas/main.c lbs_process_event(priv, event); event 1105 drivers/net/wireless/marvell/libertas/main.c void lbs_queue_event(struct lbs_private *priv, u32 event) event 1114 drivers/net/wireless/marvell/libertas/main.c kfifo_in(&priv->event_fifo, (unsigned char *) &event, sizeof(u32)); event 778 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c struct host_cmd_ds_11n_batimeout *event) event 783 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c memcpy(delba.peer_mac_addr, event->peer_mac_addr, ETH_ALEN); event 786 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c cpu_to_le16((u16) event->tid << DELBA_TID_POS); event 788 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c (u16) event->origninator << DELBA_INITIATOR_POS); event 63 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h struct host_cmd_ds_11n_batimeout *event); event 1698 drivers/net/wireless/marvell/mwifiex/main.h struct sk_buff *event); event 1839 drivers/net/wireless/marvell/mwifiex/pcie.c u32 wrptr, event; event 1887 drivers/net/wireless/marvell/mwifiex/pcie.c event = get_unaligned_le32( event 1889 drivers/net/wireless/marvell/mwifiex/pcie.c adapter->event_cause = event; event 32 drivers/net/wireless/marvell/mwifiex/sta_event.c struct sk_buff *event) event 41 drivers/net/wireless/marvell/mwifiex/sta_event.c skb_pull(event, MWIFIEX_IBSS_CONNECT_EVT_FIX_SIZE); event 42 drivers/net/wireless/marvell/mwifiex/sta_event.c evt_len = event->len; event 43 drivers/net/wireless/marvell/mwifiex/sta_event.c curr = event->data; event 46 drivers/net/wireless/marvell/mwifiex/sta_event.c event->data, event->len); event 48 drivers/net/wireless/marvell/mwifiex/sta_event.c skb_push(event, MWIFIEX_IBSS_CONNECT_EVT_FIX_SIZE); event 27 drivers/net/wireless/marvell/mwifiex/uap_event.c struct sk_buff *event) event 37 drivers/net/wireless/marvell/mwifiex/uap_event.c skb_pull(event, MWIFIEX_BSS_START_EVT_FIX_SIZE); event 38 drivers/net/wireless/marvell/mwifiex/uap_event.c evt_len = event->len; event 39 drivers/net/wireless/marvell/mwifiex/uap_event.c curr = event->data; event 42 drivers/net/wireless/marvell/mwifiex/uap_event.c event->data, event->len); event 44 drivers/net/wireless/marvell/mwifiex/uap_event.c skb_push(event, MWIFIEX_BSS_START_EVT_FIX_SIZE); event 112 drivers/net/wireless/marvell/mwifiex/uap_event.c struct mwifiex_assoc_event *event; event 124 drivers/net/wireless/marvell/mwifiex/uap_event.c event = (struct mwifiex_assoc_event *) event 126 drivers/net/wireless/marvell/mwifiex/uap_event.c if (le16_to_cpu(event->type) == TLV_TYPE_UAP_MGMT_FRAME) { event 129 drivers/net/wireless/marvell/mwifiex/uap_event.c if (ieee80211_is_assoc_req(event->frame_control)) event 131 drivers/net/wireless/marvell/mwifiex/uap_event.c else if (ieee80211_is_reassoc_req(event->frame_control)) event 138 drivers/net/wireless/marvell/mwifiex/uap_event.c sinfo->assoc_req_ies = &event->data[len]; event 140 drivers/net/wireless/marvell/mwifiex/uap_event.c (u8 *)&event->frame_control; event 142 drivers/net/wireless/marvell/mwifiex/uap_event.c le16_to_cpu(event->len) - (u16)len; event 145 drivers/net/wireless/marvell/mwifiex/uap_event.c cfg80211_new_sta(priv->netdev, event->sta_addr, sinfo, event 148 drivers/net/wireless/marvell/mwifiex/uap_event.c node = mwifiex_add_sta_entry(priv, event->sta_addr); event 363 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c struct mt76x02_dfs_event *event) event 380 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c event->engine = MT_DFS_EVENT_ENGINE(data); event 382 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c event->ts = MT_DFS_EVENT_TIMESTAMP(data); event 384 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c event->width = MT_DFS_EVENT_WIDTH(data); event 390 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c struct mt76x02_dfs_event *event) event 392 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c if (event->engine == 2) { event 400 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c delta_ts = event->ts - event_buff->data[last_event_idx].ts; event 409 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c struct mt76x02_dfs_event *event) event 415 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c event_buff = event->engine == 2 ? &dfs_pd->event_rb[1] event 417 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c event_buff->data[event_buff->t_rb] = *event; event 427 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c struct mt76x02_dfs_event *event, event 438 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c event_rb = event->engine == 2 ? &dfs_pd->event_rb[1] event 446 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c with_sum = event->width + cur_event->width; event 458 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c if (event->engine == 2) event 470 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c pri = event->ts - cur_event->ts; event 471 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c if (abs(event->width - cur_event->width) > width_delta || event 478 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c seq.pri = event->ts - cur_event->ts; event 480 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c seq.last_ts = event->ts; event 481 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c seq.engine = event->engine; event 487 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c cur_pri = event->ts - cur_event->ts; event 514 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c struct mt76x02_dfs_event *event) event 524 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c if (event->ts > seq->first_ts + MT_DFS_SEQUENCE_WINDOW) { event 530 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c if (event->engine != seq->engine) event 533 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c pri = event->ts - seq->last_ts; event 537 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c seq->last_ts = event->ts; event 565 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c struct mt76x02_dfs_event event; event 571 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c if (!mt76x02_dfs_fetch_event(dev, &event)) event 574 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c if (dfs_pd->last_event_ts > event.ts) event 576 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c dfs_pd->last_event_ts = event.ts; event 578 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c if (!mt76x02_dfs_check_event(dev, &event)) event 581 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c seq_len = mt76x02_dfs_add_event_to_sequence(dev, &event); event 582 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c mt76x02_dfs_create_sequence(dev, &event, seq_len); event 584 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c mt76x02_dfs_queue_event(dev, &event); event 593 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c struct mt76x02_dfs_event *event; event 600 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c event = &event_buff->data[event_buff->h_rb]; event 603 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c if (time_is_after_jiffies(event->fetch_ts + event 542 drivers/net/wireless/quantenna/qtnfmac/event.c vif->netdev->name, ev->event, event 546 drivers/net/wireless/quantenna/qtnfmac/event.c switch (ev->event) { event 577 drivers/net/wireless/quantenna/qtnfmac/event.c vif->netdev->name, ev->event); event 624 drivers/net/wireless/quantenna/qtnfmac/event.c const struct qlink_event *event; event 630 drivers/net/wireless/quantenna/qtnfmac/event.c event = (const struct qlink_event *)event_skb->data; event 631 drivers/net/wireless/quantenna/qtnfmac/event.c event_id = le16_to_cpu(event->event_id); event 632 drivers/net/wireless/quantenna/qtnfmac/event.c event_len = le16_to_cpu(event->mhdr.len); event 634 drivers/net/wireless/quantenna/qtnfmac/event.c if (likely(event->vifid < QTNF_MAX_INTF)) { event 635 drivers/net/wireless/quantenna/qtnfmac/event.c vif = &mac->iflist[event->vifid]; event 637 drivers/net/wireless/quantenna/qtnfmac/event.c pr_err("invalid vif(%u)\n", event->vifid); event 643 drivers/net/wireless/quantenna/qtnfmac/event.c ret = qtnf_event_handle_sta_assoc(mac, vif, (const void *)event, event 648 drivers/net/wireless/quantenna/qtnfmac/event.c (const void *)event, event 652 drivers/net/wireless/quantenna/qtnfmac/event.c ret = qtnf_event_handle_mgmt_received(vif, (const void *)event, event 656 drivers/net/wireless/quantenna/qtnfmac/event.c ret = qtnf_event_handle_scan_results(vif, (const void *)event, event 660 drivers/net/wireless/quantenna/qtnfmac/event.c ret = qtnf_event_handle_scan_complete(mac, (const void *)event, event 664 drivers/net/wireless/quantenna/qtnfmac/event.c ret = qtnf_event_handle_bss_join(vif, (const void *)event, event 668 drivers/net/wireless/quantenna/qtnfmac/event.c ret = qtnf_event_handle_bss_leave(vif, (const void *)event, event 672 drivers/net/wireless/quantenna/qtnfmac/event.c ret = qtnf_event_handle_freq_change(mac, (const void *)event, event 676 drivers/net/wireless/quantenna/qtnfmac/event.c ret = qtnf_event_handle_radar(vif, (const void *)event, event 680 drivers/net/wireless/quantenna/qtnfmac/event.c ret = qtnf_event_handle_external_auth(vif, (const void *)event, event 694 drivers/net/wireless/quantenna/qtnfmac/event.c const struct qlink_event *event; event 698 drivers/net/wireless/quantenna/qtnfmac/event.c if (unlikely(!skb || skb->len < sizeof(*event))) { event 703 drivers/net/wireless/quantenna/qtnfmac/event.c event = (struct qlink_event *)skb->data; event 705 drivers/net/wireless/quantenna/qtnfmac/event.c mac = qtnf_core_get_mac(bus, event->macid); event 708 drivers/net/wireless/quantenna/qtnfmac/event.c le16_to_cpu(event->event_id), le16_to_cpu(event->mhdr.len), event 709 drivers/net/wireless/quantenna/qtnfmac/event.c event->macid, event->vifid); event 1132 drivers/net/wireless/quantenna/qtnfmac/qlink.h u8 event; event 3165 drivers/net/wireless/rndis_wlan.c enum nl80211_cqm_rssi_threshold_event event; event 3178 drivers/net/wireless/rndis_wlan.c event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW; event 3180 drivers/net/wireless/rndis_wlan.c event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH; event 3185 drivers/net/wireless/rndis_wlan.c cfg80211_cqm_rssi_notify(usbdev->net, event, rssi, GFP_KERNEL); event 60 drivers/net/wireless/rsi/rsi_91x_coex.c rsi_wait_event(&coex_cb->coex_tx_thread.event, timeout); event 61 drivers/net/wireless/rsi/rsi_91x_coex.c rsi_reset_event(&coex_cb->coex_tx_thread.event); event 118 drivers/net/wireless/rsi/rsi_91x_coex.c rsi_set_event(&coex_cb->coex_tx_thread.event); event 155 drivers/net/wireless/rsi/rsi_91x_coex.c rsi_init_event(&coex_cb->coex_tx_thread.event); event 483 drivers/net/wireless/rsi/rsi_91x_core.c rsi_set_event(&common->tx_thread.event); event 489 drivers/net/wireless/rsi/rsi_91x_core.c rsi_set_event(&common->tx_thread.event); event 1250 drivers/net/wireless/rsi/rsi_91x_mac80211.c enum nl80211_cqm_rssi_threshold_event event; event 1253 drivers/net/wireless/rsi/rsi_91x_mac80211.c event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW; event 1256 drivers/net/wireless/rsi/rsi_91x_mac80211.c event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH; event 1261 drivers/net/wireless/rsi/rsi_91x_mac80211.c rsi_dbg(INFO_ZONE, "CQM: Notifying event: %d\n", event); event 1262 drivers/net/wireless/rsi/rsi_91x_mac80211.c ieee80211_cqm_rssi_notify(vif, event, rssi, GFP_KERNEL); event 255 drivers/net/wireless/rsi/rsi_91x_main.c rsi_wait_event(&common->tx_thread.event, timeout); event 256 drivers/net/wireless/rsi/rsi_91x_main.c rsi_reset_event(&common->tx_thread.event); event 310 drivers/net/wireless/rsi/rsi_91x_main.c rsi_init_event(&common->tx_thread.event); event 359 drivers/net/wireless/rsi/rsi_91x_mgmt.c rsi_set_event(&common->tx_thread.event); event 613 drivers/net/wireless/rsi/rsi_91x_mgmt.c u8 event, event 638 drivers/net/wireless/rsi/rsi_91x_mgmt.c if (event == STA_TX_ADDBA_DONE) { event 642 drivers/net/wireless/rsi/rsi_91x_mgmt.c } else if (event == STA_RX_ADDBA_DONE) { event 646 drivers/net/wireless/rsi/rsi_91x_mgmt.c } else if (event == STA_RX_DELBA) { event 1763 drivers/net/wireless/rsi/rsi_91x_mgmt.c rsi_set_event(&common->tx_thread.event); event 1055 drivers/net/wireless/rsi/rsi_91x_sdio.c rsi_init_event(&sdev->rx_thread.event); event 71 drivers/net/wireless/rsi/rsi_91x_sdio_ops.c rsi_wait_event(&sdev->rx_thread.event, EVENT_WAIT_FOREVER); event 72 drivers/net/wireless/rsi/rsi_91x_sdio_ops.c rsi_reset_event(&sdev->rx_thread.event); event 162 drivers/net/wireless/rsi/rsi_91x_sdio_ops.c rsi_set_event(&dev->rx_thread.event); event 286 drivers/net/wireless/rsi/rsi_91x_sdio_ops.c rsi_set_event(&common->tx_thread.event); event 309 drivers/net/wireless/rsi/rsi_91x_sdio_ops.c rsi_set_event(&common->tx_thread.event); event 285 drivers/net/wireless/rsi/rsi_91x_usb.c rsi_set_event(&dev->rx_thread.event); event 581 drivers/net/wireless/rsi/rsi_91x_usb.c rsi_init_event(&dev->rx_thread.event); event 36 drivers/net/wireless/rsi/rsi_91x_usb_ops.c rsi_wait_event(&dev->rx_thread.event, EVENT_WAIT_FOREVER); event 37 drivers/net/wireless/rsi/rsi_91x_usb_ops.c rsi_reset_event(&dev->rx_thread.event); event 34 drivers/net/wireless/rsi/rsi_common.h static inline int rsi_wait_event(struct rsi_event *event, u32 timeout) event 39 drivers/net/wireless/rsi/rsi_common.h status = wait_event_interruptible(event->event_queue, event 40 drivers/net/wireless/rsi/rsi_common.h (atomic_read(&event->event_condition) == 0)); event 42 drivers/net/wireless/rsi/rsi_common.h status = wait_event_interruptible_timeout(event->event_queue, event 43 drivers/net/wireless/rsi/rsi_common.h (atomic_read(&event->event_condition) == 0), event 48 drivers/net/wireless/rsi/rsi_common.h static inline void rsi_set_event(struct rsi_event *event) event 50 drivers/net/wireless/rsi/rsi_common.h atomic_set(&event->event_condition, 0); event 51 drivers/net/wireless/rsi/rsi_common.h wake_up_interruptible(&event->event_queue); event 54 drivers/net/wireless/rsi/rsi_common.h static inline void rsi_reset_event(struct rsi_event *event) event 56 drivers/net/wireless/rsi/rsi_common.h atomic_set(&event->event_condition, 1); event 76 drivers/net/wireless/rsi/rsi_common.h rsi_set_event(&handle->event); event 205 drivers/net/wireless/rsi/rsi_main.h struct rsi_event event; event 718 drivers/net/wireless/rsi/rsi_mgmt.h u16 ssn, u8 buf_size, u8 event, event 726 drivers/net/wireless/rsi/rsi_mgmt.h int rsi_send_block_unblock_frame(struct rsi_common *common, bool event); event 44 drivers/net/wireless/st/cw1200/sta.c struct cw1200_wsm_event *event, *tmp; event 45 drivers/net/wireless/st/cw1200/sta.c list_for_each_entry_safe(event, tmp, list, link) { event 46 drivers/net/wireless/st/cw1200/sta.c list_del(&event->link); event 47 drivers/net/wireless/st/cw1200/sta.c kfree(event); event 967 drivers/net/wireless/st/cw1200/sta.c struct cw1200_wsm_event *event; event 974 drivers/net/wireless/st/cw1200/sta.c list_for_each_entry(event, &list, link) { event 975 drivers/net/wireless/st/cw1200/sta.c switch (event->evt.id) { event 1008 drivers/net/wireless/st/cw1200/sta.c int rcpi_rssi = (int)(event->evt.data & 0xFF); event 918 drivers/net/wireless/st/cw1200/wsm.c struct cw1200_wsm_event *event; event 925 drivers/net/wireless/st/cw1200/wsm.c event = kzalloc(sizeof(struct cw1200_wsm_event), GFP_KERNEL); event 926 drivers/net/wireless/st/cw1200/wsm.c if (!event) event 929 drivers/net/wireless/st/cw1200/wsm.c event->evt.id = WSM_GET32(buf); event 930 drivers/net/wireless/st/cw1200/wsm.c event->evt.data = WSM_GET32(buf); event 933 drivers/net/wireless/st/cw1200/wsm.c event->evt.id, event->evt.data); event 937 drivers/net/wireless/st/cw1200/wsm.c list_add_tail(&event->link, &priv->event_queue); event 946 drivers/net/wireless/st/cw1200/wsm.c kfree(event); event 1071 drivers/net/wireless/ti/wl1251/acx.h struct acx_event_statistics event; event 169 drivers/net/wireless/ti/wl1251/debugfs.c DEBUGFS_FWSTATS_FILE(event, heart_beat, 20, "%u"); event 170 drivers/net/wireless/ti/wl1251/debugfs.c DEBUGFS_FWSTATS_FILE(event, calibration, 20, "%u"); event 171 drivers/net/wireless/ti/wl1251/debugfs.c DEBUGFS_FWSTATS_FILE(event, rx_mismatch, 20, "%u"); event 172 drivers/net/wireless/ti/wl1251/debugfs.c DEBUGFS_FWSTATS_FILE(event, rx_mem_empty, 20, "%u"); event 173 drivers/net/wireless/ti/wl1251/debugfs.c DEBUGFS_FWSTATS_FILE(event, rx_pool, 20, "%u"); event 174 drivers/net/wireless/ti/wl1251/debugfs.c DEBUGFS_FWSTATS_FILE(event, oom_late, 20, "%u"); event 175 drivers/net/wireless/ti/wl1251/debugfs.c DEBUGFS_FWSTATS_FILE(event, phy_transmit_error, 20, "%u"); event 176 drivers/net/wireless/ti/wl1251/debugfs.c DEBUGFS_FWSTATS_FILE(event, tx_stuck, 20, "%u"); event 309 drivers/net/wireless/ti/wl1251/debugfs.c DEBUGFS_FWSTATS_DEL(event, heart_beat); event 310 drivers/net/wireless/ti/wl1251/debugfs.c DEBUGFS_FWSTATS_DEL(event, calibration); event 311 drivers/net/wireless/ti/wl1251/debugfs.c DEBUGFS_FWSTATS_DEL(event, rx_mismatch); event 312 drivers/net/wireless/ti/wl1251/debugfs.c DEBUGFS_FWSTATS_DEL(event, rx_mem_empty); event 313 drivers/net/wireless/ti/wl1251/debugfs.c DEBUGFS_FWSTATS_DEL(event, rx_pool); event 314 drivers/net/wireless/ti/wl1251/debugfs.c DEBUGFS_FWSTATS_DEL(event, oom_late); event 315 drivers/net/wireless/ti/wl1251/debugfs.c DEBUGFS_FWSTATS_DEL(event, phy_transmit_error); event 316 drivers/net/wireless/ti/wl1251/debugfs.c DEBUGFS_FWSTATS_DEL(event, tx_stuck); event 408 drivers/net/wireless/ti/wl1251/debugfs.c DEBUGFS_FWSTATS_ADD(event, heart_beat); event 409 drivers/net/wireless/ti/wl1251/debugfs.c DEBUGFS_FWSTATS_ADD(event, calibration); event 410 drivers/net/wireless/ti/wl1251/debugfs.c DEBUGFS_FWSTATS_ADD(event, rx_mismatch); event 411 drivers/net/wireless/ti/wl1251/debugfs.c DEBUGFS_FWSTATS_ADD(event, rx_mem_empty); event 412 drivers/net/wireless/ti/wl1251/debugfs.c DEBUGFS_FWSTATS_ADD(event, rx_pool); event 413 drivers/net/wireless/ti/wl1251/debugfs.c DEBUGFS_FWSTATS_ADD(event, oom_late); event 414 drivers/net/wireless/ti/wl1251/debugfs.c DEBUGFS_FWSTATS_ADD(event, phy_transmit_error); event 415 drivers/net/wireless/ti/wl1251/debugfs.c DEBUGFS_FWSTATS_ADD(event, tx_stuck); event 160 drivers/net/wireless/ti/wl1251/event.c u32 events_vector, event; event 174 drivers/net/wireless/ti/wl1251/event.c event = events_vector & mask; event 177 drivers/net/wireless/ti/wl1251/event.c event |= events_vector & mask; event 178 drivers/net/wireless/ti/wl1251/event.c } while (!event); event 252 drivers/net/wireless/ti/wl12xx/acx.h struct wl12xx_acx_event_statistics event; event 87 drivers/net/wireless/ti/wl12xx/debugfs.c WL12XX_DEBUGFS_FWSTATS_FILE(event, heart_beat, "%u"); event 88 drivers/net/wireless/ti/wl12xx/debugfs.c WL12XX_DEBUGFS_FWSTATS_FILE(event, calibration, "%u"); event 89 drivers/net/wireless/ti/wl12xx/debugfs.c WL12XX_DEBUGFS_FWSTATS_FILE(event, rx_mismatch, "%u"); event 90 drivers/net/wireless/ti/wl12xx/debugfs.c WL12XX_DEBUGFS_FWSTATS_FILE(event, rx_mem_empty, "%u"); event 91 drivers/net/wireless/ti/wl12xx/debugfs.c WL12XX_DEBUGFS_FWSTATS_FILE(event, rx_pool, "%u"); event 92 drivers/net/wireless/ti/wl12xx/debugfs.c WL12XX_DEBUGFS_FWSTATS_FILE(event, oom_late, "%u"); event 93 drivers/net/wireless/ti/wl12xx/debugfs.c WL12XX_DEBUGFS_FWSTATS_FILE(event, phy_transmit_error, "%u"); event 94 drivers/net/wireless/ti/wl12xx/debugfs.c WL12XX_DEBUGFS_FWSTATS_FILE(event, tx_stuck, "%u"); event 187 drivers/net/wireless/ti/wl12xx/debugfs.c DEBUGFS_FWSTATS_ADD(event, heart_beat); event 188 drivers/net/wireless/ti/wl12xx/debugfs.c DEBUGFS_FWSTATS_ADD(event, calibration); event 189 drivers/net/wireless/ti/wl12xx/debugfs.c DEBUGFS_FWSTATS_ADD(event, rx_mismatch); event 190 drivers/net/wireless/ti/wl12xx/debugfs.c DEBUGFS_FWSTATS_ADD(event, rx_mem_empty); event 191 drivers/net/wireless/ti/wl12xx/debugfs.c DEBUGFS_FWSTATS_ADD(event, rx_pool); event 192 drivers/net/wireless/ti/wl12xx/debugfs.c DEBUGFS_FWSTATS_ADD(event, oom_late); event 193 drivers/net/wireless/ti/wl12xx/debugfs.c DEBUGFS_FWSTATS_ADD(event, phy_transmit_error); event 194 drivers/net/wireless/ti/wl12xx/debugfs.c DEBUGFS_FWSTATS_ADD(event, tx_stuck); event 13 drivers/net/wireless/ti/wl12xx/event.c int wl12xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event, event 18 drivers/net/wireless/ti/wl12xx/event.c switch (event) { event 92 drivers/net/wireless/ti/wl12xx/event.h int wl12xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event, event 16 drivers/net/wireless/ti/wl18xx/event.c int wl18xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event, event 21 drivers/net/wireless/ti/wl18xx/event.c switch (event) { event 106 drivers/net/wireless/ti/wl18xx/event.h int wl18xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event, event 168 drivers/net/wireless/ti/wlcore/cmd.c u32 event; event 207 drivers/net/wireless/ti/wlcore/cmd.c event = *events_vector & mask; event 214 drivers/net/wireless/ti/wlcore/cmd.c event |= *events_vector & mask; event 215 drivers/net/wireless/ti/wlcore/cmd.c } while (!event); event 104 drivers/net/wireless/ti/wlcore/event.c enum nl80211_cqm_rssi_threshold_event event; event 112 drivers/net/wireless/ti/wlcore/event.c event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW; event 114 drivers/net/wireless/ti/wlcore/event.c event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH; event 117 drivers/net/wireless/ti/wlcore/event.c if (event != wlvif->last_rssi_event) event 118 drivers/net/wireless/ti/wlcore/event.c ieee80211_cqm_rssi_notify(vif, event, metric, event 120 drivers/net/wireless/ti/wlcore/event.c wlvif->last_rssi_event = event; event 46 drivers/net/wireless/ti/wlcore/wlcore.h int (*wait_for_event)(struct wl1271 *wl, enum wlcore_wait_event event, event 600 drivers/net/wireless/virt_wifi.c static int virt_wifi_event(struct notifier_block *this, unsigned long event, event 611 drivers/net/wireless/virt_wifi.c switch (event) { event 257 drivers/net/wireless/zydas/zd1201.c int event; event 263 drivers/net/wireless/zydas/zd1201.c event = IWEVREGISTERED; event 269 drivers/net/wireless/zydas/zd1201.c event = IWEVEXPIRED; event 275 drivers/net/wireless/zydas/zd1201.c wireless_send_event(zd->dev, event, &wrqu, NULL); event 550 drivers/nfc/microread/microread.c u8 event, struct sk_buff *skb) event 556 drivers/nfc/microread/microread.c pr_info("Microread received event 0x%x to gate 0x%x\n", event, gate); event 558 drivers/nfc/microread/microread.c switch (event) { event 716 drivers/nfc/pn544/pn544.c static int pn544_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event, event 723 drivers/nfc/pn544/pn544.c pr_debug("hci event %d\n", event); event 724 drivers/nfc/pn544/pn544.c switch (event) { event 252 drivers/nfc/st-nci/se.c u8 event, struct sk_buff *skb) event 256 drivers/nfc/st-nci/se.c switch (event) { event 276 drivers/nfc/st-nci/se.c u8 event, event 282 drivers/nfc/st-nci/se.c pr_debug("apdu reader gate event: %x\n", event); event 284 drivers/nfc/st-nci/se.c switch (event) { event 310 drivers/nfc/st-nci/se.c u8 host, u8 event, event 317 drivers/nfc/st-nci/se.c pr_debug("connectivity gate event: %x\n", event); event 319 drivers/nfc/st-nci/se.c switch (event) { event 362 drivers/nfc/st-nci/se.c u8 event, struct sk_buff *skb) event 369 drivers/nfc/st-nci/se.c st_nci_hci_admin_event_received(ndev, event, skb); event 372 drivers/nfc/st-nci/se.c st_nci_hci_apdu_reader_event_received(ndev, event, skb); event 375 drivers/nfc/st-nci/se.c st_nci_hci_connectivity_event_received(ndev, host, event, skb); event 136 drivers/nfc/st-nci/st-nci.h u8 event, struct sk_buff *skb); event 858 drivers/nfc/st21nfca/core.c static int st21nfca_admin_event_received(struct nfc_hci_dev *hdev, u8 event, event 863 drivers/nfc/st21nfca/core.c pr_debug("admin event: %x\n", event); event 865 drivers/nfc/st21nfca/core.c switch (event) { event 892 drivers/nfc/st21nfca/core.c u8 event, struct sk_buff *skb) event 897 drivers/nfc/st21nfca/core.c pr_debug("hci event: %d gate: %x\n", event, gate); event 901 drivers/nfc/st21nfca/core.c return st21nfca_admin_event_received(hdev, event, skb); event 903 drivers/nfc/st21nfca/core.c return st21nfca_dep_event_received(hdev, event, skb); event 906 drivers/nfc/st21nfca/core.c event, skb); event 908 drivers/nfc/st21nfca/core.c return st21nfca_apdu_reader_event_received(hdev, event, skb); event 910 drivers/nfc/st21nfca/core.c return st21nfca_hci_loopback_event_received(hdev, event, skb); event 407 drivers/nfc/st21nfca/dep.c u8 event, struct sk_buff *skb) event 412 drivers/nfc/st21nfca/dep.c pr_debug("dep event: %d\n", event); event 414 drivers/nfc/st21nfca/dep.c switch (event) { event 294 drivers/nfc/st21nfca/se.c u8 event, struct sk_buff *skb) event 300 drivers/nfc/st21nfca/se.c pr_debug("connectivity gate event: %x\n", event); event 302 drivers/nfc/st21nfca/se.c switch (event) { event 348 drivers/nfc/st21nfca/se.c u8 event, struct sk_buff *skb) event 353 drivers/nfc/st21nfca/se.c pr_debug("apdu reader gate event: %x\n", event); event 355 drivers/nfc/st21nfca/se.c switch (event) { event 173 drivers/nfc/st21nfca/st21nfca.h u8 event, struct sk_buff *skb); event 182 drivers/nfc/st21nfca/st21nfca.h u8 event, struct sk_buff *skb); event 184 drivers/nfc/st21nfca/st21nfca.h u8 event, struct sk_buff *skb); event 196 drivers/nfc/st21nfca/st21nfca.h int st21nfca_hci_loopback_event_received(struct nfc_hci_dev *ndev, u8 event, event 230 drivers/nfc/st21nfca/vendor_cmds.c int st21nfca_hci_loopback_event_received(struct nfc_hci_dev *hdev, u8 event, event 235 drivers/nfc/st21nfca/vendor_cmds.c switch (event) { event 145 drivers/nvdimm/bus.c void nd_device_notify(struct device *dev, enum nvdimm_event event) event 153 drivers/nvdimm/bus.c nd_drv->notify(dev, event); event 159 drivers/nvdimm/bus.c void nvdimm_region_notify(struct nd_region *nd_region, enum nvdimm_event event) event 167 drivers/nvdimm/bus.c nd_device_notify(&nd_region->dev, event); event 224 drivers/nvdimm/nd.h void nd_device_notify(struct device *dev, enum nvdimm_event event); event 562 drivers/nvdimm/pmem.c static void nd_pmem_notify(struct device *dev, enum nvdimm_event event) event 572 drivers/nvdimm/pmem.c if (event != NVDIMM_REVALIDATE_POISON) event 120 drivers/nvdimm/region.c static void nd_region_notify(struct device *dev, enum nvdimm_event event) event 122 drivers/nvdimm/region.c if (event == NVDIMM_REVALIDATE_POISON) { event 136 drivers/nvdimm/region.c device_for_each_child(dev, &event, child_notify); event 139 drivers/nvme/host/rdma.c struct rdma_cm_event *event); event 233 drivers/nvme/host/rdma.c static void nvme_rdma_qp_event(struct ib_event *event, void *context) event 236 drivers/nvme/host/rdma.c ib_event_msg(event->event), event->event); event 1642 drivers/nvme/host/rdma.c rdma_event_msg(ev->event), ev->event, event 1645 drivers/nvme/host/rdma.c switch (ev->event) { event 1668 drivers/nvme/host/rdma.c "CM error event %d\n", ev->event); event 1683 drivers/nvme/host/rdma.c "Unexpected RDMA CM event (%d)\n", ev->event); event 132 drivers/nvme/target/rdma.c static void nvmet_rdma_qp_event(struct ib_event *event, void *priv); event 1139 drivers/nvme/target/rdma.c struct rdma_cm_event *event) event 1156 drivers/nvme/target/rdma.c ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue); event 1228 drivers/nvme/target/rdma.c static void nvmet_rdma_qp_event(struct ib_event *event, void *priv) event 1232 drivers/nvme/target/rdma.c switch (event->event) { event 1234 drivers/nvme/target/rdma.c rdma_notify(queue->cm_id, event->event); event 1238 drivers/nvme/target/rdma.c ib_event_msg(event->event), event->event); event 1268 drivers/nvme/target/rdma.c struct rdma_cm_event *event) event 1280 drivers/nvme/target/rdma.c queue = nvmet_rdma_alloc_queue(ndev, cm_id, event); event 1292 drivers/nvme/target/rdma.c ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); event 1440 drivers/nvme/target/rdma.c struct rdma_cm_event *event) event 1449 drivers/nvme/target/rdma.c rdma_event_msg(event->event), event->event, event 1450 drivers/nvme/target/rdma.c event->status, cm_id); event 1452 drivers/nvme/target/rdma.c switch (event->event) { event 1454 drivers/nvme/target/rdma.c ret = nvmet_rdma_queue_connect(cm_id, event); event 1469 drivers/nvme/target/rdma.c rdma_reject_msg(cm_id, event->status)); event 1477 drivers/nvme/target/rdma.c event->event); event 368 drivers/oprofile/buffer_sync.c static inline void add_sample_entry(unsigned long offset, unsigned long event) event 371 drivers/oprofile/buffer_sync.c add_event_entry(event); event 387 drivers/oprofile/buffer_sync.c add_sample_entry(s->eip, s->event); event 410 drivers/oprofile/buffer_sync.c add_sample_entry(offset, s->event); event 521 drivers/oprofile/buffer_sync.c flags = sample->event; event 146 drivers/oprofile/cpu_buffer.c entry->event = ring_buffer_lock_reserve event 149 drivers/oprofile/cpu_buffer.c if (!entry->event) event 151 drivers/oprofile/cpu_buffer.c entry->sample = ring_buffer_event_data(entry->event); event 160 drivers/oprofile/cpu_buffer.c return ring_buffer_unlock_commit(op_ring_buffer, entry->event); event 170 drivers/oprofile/cpu_buffer.c entry->event = e; event 226 drivers/oprofile/cpu_buffer.c sample->event = flags; event 238 drivers/oprofile/cpu_buffer.c unsigned long pc, unsigned long event) event 248 drivers/oprofile/cpu_buffer.c sample->event = event; event 263 drivers/oprofile/cpu_buffer.c unsigned long backtrace, int is_kernel, unsigned long event, event 277 drivers/oprofile/cpu_buffer.c if (op_add_sample(cpu_buf, pc, event)) event 299 drivers/oprofile/cpu_buffer.c unsigned long event, int is_kernel, event 309 drivers/oprofile/cpu_buffer.c if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event, task)) event 322 drivers/oprofile/cpu_buffer.c unsigned long event, int is_kernel, event 325 drivers/oprofile/cpu_buffer.c __oprofile_add_ext_sample(pc, regs, event, is_kernel, task); event 329 drivers/oprofile/cpu_buffer.c unsigned long event, int is_kernel) event 331 drivers/oprofile/cpu_buffer.c __oprofile_add_ext_sample(pc, regs, event, is_kernel, NULL); event 334 drivers/oprofile/cpu_buffer.c void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) event 347 drivers/oprofile/cpu_buffer.c __oprofile_add_ext_sample(pc, regs, event, is_kernel, NULL); event 374 drivers/oprofile/cpu_buffer.c sample->event = 0; /* no flags */ event 382 drivers/oprofile/cpu_buffer.c entry->event = NULL; event 388 drivers/oprofile/cpu_buffer.c if (!entry->event) event 395 drivers/oprofile/cpu_buffer.c if (!entry->event) event 410 drivers/oprofile/cpu_buffer.c if (!entry->event) event 415 drivers/oprofile/cpu_buffer.c void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) event 418 drivers/oprofile/cpu_buffer.c log_sample(cpu_buf, pc, 0, is_kernel, event, NULL); event 35 drivers/oprofile/cpu_buffer.h unsigned long event; event 29 drivers/oprofile/nmi_timer_int.c static void nmi_timer_callback(struct perf_event *event, event 33 drivers/oprofile/nmi_timer_int.c event->hw.interrupts = 0; /* don't throttle interrupts */ event 39 drivers/oprofile/nmi_timer_int.c struct perf_event *event = per_cpu(nmi_timer_events, cpu); event 41 drivers/oprofile/nmi_timer_int.c if (!event) { event 42 drivers/oprofile/nmi_timer_int.c event = perf_event_create_kernel_counter(&nmi_timer_attr, cpu, NULL, event 44 drivers/oprofile/nmi_timer_int.c if (IS_ERR(event)) event 45 drivers/oprofile/nmi_timer_int.c return PTR_ERR(event); event 46 drivers/oprofile/nmi_timer_int.c per_cpu(nmi_timer_events, cpu) = event; event 49 drivers/oprofile/nmi_timer_int.c if (event && ctr_running) event 50 drivers/oprofile/nmi_timer_int.c perf_event_enable(event); event 57 drivers/oprofile/nmi_timer_int.c struct perf_event *event = per_cpu(nmi_timer_events, cpu); event 59 drivers/oprofile/nmi_timer_int.c if (event && ctr_running) event 60 drivers/oprofile/nmi_timer_int.c perf_event_disable(event); event 102 drivers/oprofile/nmi_timer_int.c struct perf_event *event; event 107 drivers/oprofile/nmi_timer_int.c event = per_cpu(nmi_timer_events, cpu); event 108 drivers/oprofile/nmi_timer_int.c if (!event) event 110 drivers/oprofile/nmi_timer_int.c perf_event_disable(event); event 112 drivers/oprofile/nmi_timer_int.c perf_event_release_kernel(event); event 19 drivers/oprofile/oprofile_perf.c unsigned long event; event 36 drivers/oprofile/oprofile_perf.c static void op_overflow_handler(struct perf_event *event, event 43 drivers/oprofile/oprofile_perf.c if (per_cpu(perf_events, cpu)[id] == event) event 69 drivers/oprofile/oprofile_perf.c attr->config = counter_config[i].event; event 75 drivers/oprofile/oprofile_perf.c static int op_create_counter(int cpu, int event) event 79 drivers/oprofile/oprofile_perf.c if (!counter_config[event].enabled || per_cpu(perf_events, cpu)[event]) event 82 drivers/oprofile/oprofile_perf.c pevent = perf_event_create_kernel_counter(&counter_config[event].attr, event 92 drivers/oprofile/oprofile_perf.c "on CPU %d\n", event, cpu); event 96 drivers/oprofile/oprofile_perf.c per_cpu(perf_events, cpu)[event] = pevent; event 101 drivers/oprofile/oprofile_perf.c static void op_destroy_counter(int cpu, int event) event 103 drivers/oprofile/oprofile_perf.c struct perf_event *pevent = per_cpu(perf_events, cpu)[event]; event 107 drivers/oprofile/oprofile_perf.c per_cpu(perf_events, cpu)[event] = NULL; event 117 drivers/oprofile/oprofile_perf.c int cpu, event, ret = 0; event 120 drivers/oprofile/oprofile_perf.c for (event = 0; event < num_counters; ++event) { event 121 drivers/oprofile/oprofile_perf.c ret = op_create_counter(cpu, event); event 135 drivers/oprofile/oprofile_perf.c int cpu, event; event 138 drivers/oprofile/oprofile_perf.c for (event = 0; event < num_counters; ++event) event 139 drivers/oprofile/oprofile_perf.c op_destroy_counter(cpu, event); event 153 drivers/oprofile/oprofile_perf.c oprofilefs_create_ulong(dir, "event", &counter_config[i].event); event 258 drivers/oprofile/oprofile_perf.c struct perf_event *event; event 262 drivers/oprofile/oprofile_perf.c event = per_cpu(perf_events, cpu)[id]; event 263 drivers/oprofile/oprofile_perf.c if (event) event 264 drivers/oprofile/oprofile_perf.c perf_event_release_kernel(event); event 505 drivers/parisc/led.c static int led_halt(struct notifier_block *nb, unsigned long event, void *buf) event 513 drivers/parisc/led.c switch (event) { event 183 drivers/parisc/power.c unsigned long event, void *ptr) event 478 drivers/pci/controller/dwc/pcie-tegra194.c static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event) event 485 drivers/pci/controller/dwc/pcie-tegra194.c val |= event << EVENT_COUNTER_EVENT_SEL_SHIFT; event 1590 drivers/pci/controller/pci-hyperv.c struct completion *event; event 1595 drivers/pci/controller/pci-hyperv.c event = xchg(&hbus->survey_event, NULL); event 1596 drivers/pci/controller/pci-hyperv.c if (!event) event 1601 drivers/pci/controller/pci-hyperv.c complete(event); event 1642 drivers/pci/controller/pci-hyperv.c complete(event); event 79 drivers/pci/hotplug/acpiphp_ibm.c u8 event; event 84 drivers/pci/hotplug/acpiphp_ibm.c static void ibm_handle_events(acpi_handle handle, u32 event, void *context); event 251 drivers/pci/hotplug/acpiphp_ibm.c static void ibm_handle_events(acpi_handle handle, u32 event, void *context) event 253 drivers/pci/hotplug/acpiphp_ibm.c u8 detail = event & 0x0f; event 254 drivers/pci/hotplug/acpiphp_ibm.c u8 subevent = event & 0xf0; event 257 drivers/pci/hotplug/acpiphp_ibm.c pr_debug("%s: Received notification %02x\n", __func__, event); event 263 drivers/pci/hotplug/acpiphp_ibm.c note->event, detail); event 265 drivers/pci/hotplug/acpiphp_ibm.c note->event = event; event 740 drivers/pci/hotplug/pnv_php.c struct pnv_php_event *event = event 742 drivers/pci/hotplug/pnv_php.c struct pnv_php_slot *php_slot = event->php_slot; event 744 drivers/pci/hotplug/pnv_php.c if (event->added) event 749 drivers/pci/hotplug/pnv_php.c kfree(event); event 758 drivers/pci/hotplug/pnv_php.c struct pnv_php_event *event; event 810 drivers/pci/hotplug/pnv_php.c event = kzalloc(sizeof(*event), GFP_ATOMIC); event 811 drivers/pci/hotplug/pnv_php.c if (!event) { event 819 drivers/pci/hotplug/pnv_php.c INIT_WORK(&event->work, pnv_php_event_handler); event 820 drivers/pci/hotplug/pnv_php.c event->added = added; event 821 drivers/pci/hotplug/pnv_php.c event->php_slot = php_slot; event 822 drivers/pci/hotplug/pnv_php.c queue_work(php_slot->wq, &event->work); event 1172 drivers/pci/pci.c switch (state.event) { event 1183 drivers/pci/pci.c state.event); event 193 drivers/perf/arm-cci.c #define CCI400_PMU_EVENT_SOURCE(event) \ event 194 drivers/perf/arm-cci.c ((event >> CCI400_PMU_EVENT_SOURCE_SHIFT) & \ event 196 drivers/perf/arm-cci.c #define CCI400_PMU_EVENT_CODE(event) \ event 197 drivers/perf/arm-cci.c ((event >> CCI400_PMU_EVENT_CODE_SHIFT) & CCI400_PMU_EVENT_CODE_MASK) event 217 drivers/perf/arm-cci.c CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"), event 432 drivers/perf/arm-cci.c #define CCI5xx_PMU_EVENT_SOURCE(event) \ event 433 drivers/perf/arm-cci.c ((event >> CCI5xx_PMU_EVENT_SOURCE_SHIFT) & CCI5xx_PMU_EVENT_SOURCE_MASK) event 434 drivers/perf/arm-cci.c #define CCI5xx_PMU_EVENT_CODE(event) \ event 435 drivers/perf/arm-cci.c ((event >> CCI5xx_PMU_EVENT_CODE_SHIFT) & CCI5xx_PMU_EVENT_CODE_MASK) event 453 drivers/perf/arm-cci.c CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"), event 650 drivers/perf/arm-cci.c struct perf_event *event = cci_hw->events[i]; event 652 drivers/perf/arm-cci.c if (WARN_ON(!event)) event 656 drivers/perf/arm-cci.c if (event->hw.state & PERF_HES_STOPPED) event 658 drivers/perf/arm-cci.c if (event->hw.state & PERF_HES_ARCH) { event 660 drivers/perf/arm-cci.c event->hw.state &= ~PERF_HES_ARCH; event 746 drivers/perf/arm-cci.c static void pmu_set_event(struct cci_pmu *cci_pmu, int idx, unsigned long event) event 748 drivers/perf/arm-cci.c pmu_write_register(cci_pmu, event, idx, CCI_PMU_EVT_SEL); event 799 drivers/perf/arm-cci.c static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *event) event 801 drivers/perf/arm-cci.c struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); event 802 drivers/perf/arm-cci.c unsigned long cci_event = event->hw.config_base; event 817 drivers/perf/arm-cci.c static int pmu_map_event(struct perf_event *event) event 819 drivers/perf/arm-cci.c struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); event 821 drivers/perf/arm-cci.c if (event->attr.type < PERF_TYPE_MAX || event 825 drivers/perf/arm-cci.c return cci_pmu->model->validate_hw_event(cci_pmu, event->attr.config); event 875 drivers/perf/arm-cci.c static u32 pmu_read_counter(struct perf_event *event) event 877 drivers/perf/arm-cci.c struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); event 878 drivers/perf/arm-cci.c struct hw_perf_event *hw_counter = &event->hw; event 902 drivers/perf/arm-cci.c struct perf_event *event = cci_hw->events[i]; event 904 drivers/perf/arm-cci.c if (WARN_ON(!event)) event 906 drivers/perf/arm-cci.c pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i); event 964 drivers/perf/arm-cci.c struct perf_event *event = cci_pmu->hw_events.events[i]; event 966 drivers/perf/arm-cci.c if (WARN_ON(!event)) event 971 drivers/perf/arm-cci.c pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i); event 973 drivers/perf/arm-cci.c pmu_set_event(cci_pmu, i, event->hw.config_base); event 983 drivers/perf/arm-cci.c static u64 pmu_event_update(struct perf_event *event) event 985 drivers/perf/arm-cci.c struct hw_perf_event *hwc = &event->hw; event 990 drivers/perf/arm-cci.c new_raw_count = pmu_read_counter(event); event 996 drivers/perf/arm-cci.c local64_add(delta, &event->count); event 1001 drivers/perf/arm-cci.c static void pmu_read(struct perf_event *event) event 1003 drivers/perf/arm-cci.c pmu_event_update(event); event 1006 drivers/perf/arm-cci.c static void pmu_event_set_period(struct perf_event *event) event 1008 drivers/perf/arm-cci.c struct hw_perf_event *hwc = &event->hw; event 1044 drivers/perf/arm-cci.c struct perf_event *event = events->events[idx]; event 1046 drivers/perf/arm-cci.c if (!event) event 1057 drivers/perf/arm-cci.c pmu_event_update(event); event 1058 drivers/perf/arm-cci.c pmu_event_set_period(event); event 1084 drivers/perf/arm-cci.c static void hw_perf_event_destroy(struct perf_event *event) event 1086 drivers/perf/arm-cci.c struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); event 1133 drivers/perf/arm-cci.c static void cci_pmu_start(struct perf_event *event, int pmu_flags) event 1135 drivers/perf/arm-cci.c struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); event 1137 drivers/perf/arm-cci.c struct hw_perf_event *hwc = &event->hw; event 1161 drivers/perf/arm-cci.c pmu_event_set_period(event); event 1167 drivers/perf/arm-cci.c static void cci_pmu_stop(struct perf_event *event, int pmu_flags) event 1169 drivers/perf/arm-cci.c struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); event 1170 drivers/perf/arm-cci.c struct hw_perf_event *hwc = &event->hw; event 1186 drivers/perf/arm-cci.c pmu_event_update(event); event 1190 drivers/perf/arm-cci.c static int cci_pmu_add(struct perf_event *event, int flags) event 1192 drivers/perf/arm-cci.c struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); event 1194 drivers/perf/arm-cci.c struct hw_perf_event *hwc = &event->hw; event 1198 drivers/perf/arm-cci.c idx = pmu_get_event_idx(hw_events, event); event 1202 drivers/perf/arm-cci.c event->hw.idx = idx; event 1203 drivers/perf/arm-cci.c hw_events->events[idx] = event; event 1207 drivers/perf/arm-cci.c cci_pmu_start(event, PERF_EF_RELOAD); event 1210 drivers/perf/arm-cci.c perf_event_update_userpage(event); event 1215 drivers/perf/arm-cci.c static void cci_pmu_del(struct perf_event *event, int flags) event 1217 drivers/perf/arm-cci.c struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); event 1219 drivers/perf/arm-cci.c struct hw_perf_event *hwc = &event->hw; event 1222 drivers/perf/arm-cci.c cci_pmu_stop(event, PERF_EF_UPDATE); event 1226 drivers/perf/arm-cci.c perf_event_update_userpage(event); event 1231 drivers/perf/arm-cci.c struct perf_event *event) event 1233 drivers/perf/arm-cci.c if (is_software_event(event)) event 1241 drivers/perf/arm-cci.c if (event->pmu != cci_pmu) event 1244 drivers/perf/arm-cci.c if (event->state < PERF_EVENT_STATE_OFF) event 1247 drivers/perf/arm-cci.c if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) event 1250 drivers/perf/arm-cci.c return pmu_get_event_idx(hw_events, event) >= 0; event 1253 drivers/perf/arm-cci.c static int validate_group(struct perf_event *event) event 1255 drivers/perf/arm-cci.c struct perf_event *sibling, *leader = event->group_leader; event 1256 drivers/perf/arm-cci.c struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); event 1267 drivers/perf/arm-cci.c if (!validate_event(event->pmu, &fake_pmu, leader)) event 1271 drivers/perf/arm-cci.c if (!validate_event(event->pmu, &fake_pmu, sibling)) event 1275 drivers/perf/arm-cci.c if (!validate_event(event->pmu, &fake_pmu, event)) event 1281 drivers/perf/arm-cci.c static int __hw_perf_event_init(struct perf_event *event) event 1283 drivers/perf/arm-cci.c struct hw_perf_event *hwc = &event->hw; event 1286 drivers/perf/arm-cci.c mapping = pmu_map_event(event); event 1289 drivers/perf/arm-cci.c pr_debug("event %x:%llx not supported\n", event->attr.type, event 1290 drivers/perf/arm-cci.c event->attr.config); event 1309 drivers/perf/arm-cci.c if (event->group_leader != event) { event 1310 drivers/perf/arm-cci.c if (validate_group(event) != 0) event 1317 drivers/perf/arm-cci.c static int cci_pmu_event_init(struct perf_event *event) event 1319 drivers/perf/arm-cci.c struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); event 1323 drivers/perf/arm-cci.c if (event->attr.type != event->pmu->type) event 1327 drivers/perf/arm-cci.c if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) event 1339 drivers/perf/arm-cci.c if (event->cpu < 0) event 1341 drivers/perf/arm-cci.c event->cpu = cci_pmu->cpu; event 1343 drivers/perf/arm-cci.c event->destroy = hw_perf_event_destroy; event 1355 drivers/perf/arm-cci.c err = __hw_perf_event_init(event); event 1357 drivers/perf/arm-cci.c hw_perf_event_destroy(event); event 154 drivers/perf/arm-ccn.c struct perf_event *event; event 235 drivers/perf/arm-ccn.c static CCN_FORMAT_ATTR(event, "config:16-23"); event 268 drivers/perf/arm-ccn.c u32 event; event 287 drivers/perf/arm-ccn.c .type = CCN_TYPE_MN, .event = CCN_EVENT_WATCHPOINT, \ event 293 drivers/perf/arm-ccn.c .event = CCN_EVENT_WATCHPOINT, .num_ports = CCN_NUM_XP_PORTS, \ event 298 drivers/perf/arm-ccn.c .event = CCN_EVENT_WATCHPOINT, .num_ports = CCN_NUM_XP_PORTS, \ event 302 drivers/perf/arm-ccn.c .type = CCN_TYPE_HNF, .event = _event, } event 305 drivers/perf/arm-ccn.c .type = CCN_TYPE_XP, .event = _event, \ event 314 drivers/perf/arm-ccn.c .type = CCN_TYPE_RNI_3P, .event = _event, } event 317 drivers/perf/arm-ccn.c .type = CCN_TYPE_SBAS, .event = _event, } event 327 drivers/perf/arm-ccn.c struct arm_ccn_pmu_event *event = container_of(attr, event 331 drivers/perf/arm-ccn.c res = snprintf(buf, PAGE_SIZE, "type=0x%x", event->type); event 332 drivers/perf/arm-ccn.c if (event->event) event 334 drivers/perf/arm-ccn.c event->event); event 335 drivers/perf/arm-ccn.c if (event->def) event 337 drivers/perf/arm-ccn.c event->def); event 338 drivers/perf/arm-ccn.c if (event->mask) event 340 drivers/perf/arm-ccn.c event->mask); event 343 drivers/perf/arm-ccn.c switch (event->type) { event 349 drivers/perf/arm-ccn.c if (event->event == CCN_EVENT_WATCHPOINT) event 377 drivers/perf/arm-ccn.c struct arm_ccn_pmu_event *event = container_of(dev_attr, event 380 drivers/perf/arm-ccn.c if (event->type == CCN_TYPE_SBAS && !ccn->sbas_present) event 382 drivers/perf/arm-ccn.c if (event->type == CCN_TYPE_SBSX && !ccn->sbsx_present) event 636 drivers/perf/arm-ccn.c static int arm_ccn_pmu_event_alloc(struct perf_event *event) event 638 drivers/perf/arm-ccn.c struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); event 639 drivers/perf/arm-ccn.c struct hw_perf_event *hw = &event->hw; event 644 drivers/perf/arm-ccn.c node_xp = CCN_CONFIG_NODE(event->attr.config); event 645 drivers/perf/arm-ccn.c type = CCN_CONFIG_TYPE(event->attr.config); event 646 drivers/perf/arm-ccn.c event_id = CCN_CONFIG_EVENT(event->attr.config); event 655 drivers/perf/arm-ccn.c ccn->dt.pmu_counters[CCN_IDX_PMU_CYCLE_COUNTER].event = event; event 689 drivers/perf/arm-ccn.c ccn->dt.pmu_counters[hw->idx].event = event; event 694 drivers/perf/arm-ccn.c static void arm_ccn_pmu_event_release(struct perf_event *event) event 696 drivers/perf/arm-ccn.c struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); event 697 drivers/perf/arm-ccn.c struct hw_perf_event *hw = &event->hw; event 705 drivers/perf/arm-ccn.c if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP && event 706 drivers/perf/arm-ccn.c CCN_CONFIG_EVENT(event->attr.config) == event 715 drivers/perf/arm-ccn.c ccn->dt.pmu_counters[hw->idx].event = NULL; event 718 drivers/perf/arm-ccn.c static int arm_ccn_pmu_event_init(struct perf_event *event) event 721 drivers/perf/arm-ccn.c struct hw_perf_event *hw = &event->hw; event 727 drivers/perf/arm-ccn.c if (event->attr.type != event->pmu->type) event 730 drivers/perf/arm-ccn.c ccn = pmu_to_arm_ccn(event->pmu); event 737 drivers/perf/arm-ccn.c if (has_branch_stack(event)) { event 742 drivers/perf/arm-ccn.c if (event->cpu < 0) { event 755 drivers/perf/arm-ccn.c event->cpu = ccn->dt.cpu; event 757 drivers/perf/arm-ccn.c node_xp = CCN_CONFIG_NODE(event->attr.config); event 758 drivers/perf/arm-ccn.c type = CCN_CONFIG_TYPE(event->attr.config); event 759 drivers/perf/arm-ccn.c event_id = CCN_CONFIG_EVENT(event->attr.config); event 794 drivers/perf/arm-ccn.c u32 port = CCN_CONFIG_PORT(event->attr.config); event 795 drivers/perf/arm-ccn.c u32 vc = CCN_CONFIG_VC(event->attr.config); event 799 drivers/perf/arm-ccn.c if (event_id != e->event) event 827 drivers/perf/arm-ccn.c arm_ccn_pmu_config_set(&event->attr.config, event 836 drivers/perf/arm-ccn.c if (event->group_leader->pmu != event->pmu && event 837 drivers/perf/arm-ccn.c !is_software_event(event->group_leader)) event 840 drivers/perf/arm-ccn.c for_each_sibling_event(sibling, event->group_leader) { event 841 drivers/perf/arm-ccn.c if (sibling->pmu != event->pmu && event 873 drivers/perf/arm-ccn.c static void arm_ccn_pmu_event_update(struct perf_event *event) event 875 drivers/perf/arm-ccn.c struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); event 876 drivers/perf/arm-ccn.c struct hw_perf_event *hw = &event->hw; event 886 drivers/perf/arm-ccn.c local64_add((new_count - prev_count) & mask, &event->count); event 889 drivers/perf/arm-ccn.c static void arm_ccn_pmu_xp_dt_config(struct perf_event *event, int enable) event 891 drivers/perf/arm-ccn.c struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); event 892 drivers/perf/arm-ccn.c struct hw_perf_event *hw = &event->hw; event 900 drivers/perf/arm-ccn.c if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP) event 901 drivers/perf/arm-ccn.c xp = &ccn->xp[CCN_CONFIG_XP(event->attr.config)]; event 904 drivers/perf/arm-ccn.c CCN_CONFIG_NODE(event->attr.config))]; event 922 drivers/perf/arm-ccn.c static void arm_ccn_pmu_event_start(struct perf_event *event, int flags) event 924 drivers/perf/arm-ccn.c struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); event 925 drivers/perf/arm-ccn.c struct hw_perf_event *hw = &event->hw; event 927 drivers/perf/arm-ccn.c local64_set(&event->hw.prev_count, event 932 drivers/perf/arm-ccn.c arm_ccn_pmu_xp_dt_config(event, 1); event 935 drivers/perf/arm-ccn.c static void arm_ccn_pmu_event_stop(struct perf_event *event, int flags) event 937 drivers/perf/arm-ccn.c struct hw_perf_event *hw = &event->hw; event 940 drivers/perf/arm-ccn.c arm_ccn_pmu_xp_dt_config(event, 0); event 943 drivers/perf/arm-ccn.c arm_ccn_pmu_event_update(event); event 948 drivers/perf/arm-ccn.c static void arm_ccn_pmu_xp_watchpoint_config(struct perf_event *event) event 950 drivers/perf/arm-ccn.c struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); event 951 drivers/perf/arm-ccn.c struct hw_perf_event *hw = &event->hw; event 956 drivers/perf/arm-ccn.c u64 cmp_l = event->attr.config1; event 957 drivers/perf/arm-ccn.c u64 cmp_h = event->attr.config2; event 958 drivers/perf/arm-ccn.c u64 mask_l = ccn->dt.cmp_mask[CCN_CONFIG_MASK(event->attr.config)].l; event 959 drivers/perf/arm-ccn.c u64 mask_h = ccn->dt.cmp_mask[CCN_CONFIG_MASK(event->attr.config)].h; event 967 drivers/perf/arm-ccn.c val |= CCN_CONFIG_DIR(event->attr.config) << event 971 drivers/perf/arm-ccn.c val |= CCN_CONFIG_PORT(event->attr.config) << event 975 drivers/perf/arm-ccn.c val |= CCN_CONFIG_VC(event->attr.config) << event 996 drivers/perf/arm-ccn.c static void arm_ccn_pmu_xp_event_config(struct perf_event *event) event 998 drivers/perf/arm-ccn.c struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); event 999 drivers/perf/arm-ccn.c struct hw_perf_event *hw = &event->hw; event 1006 drivers/perf/arm-ccn.c id = (CCN_CONFIG_VC(event->attr.config) << 4) | event 1007 drivers/perf/arm-ccn.c (CCN_CONFIG_BUS(event->attr.config) << 3) | event 1008 drivers/perf/arm-ccn.c (CCN_CONFIG_EVENT(event->attr.config) << 0); event 1017 drivers/perf/arm-ccn.c static void arm_ccn_pmu_node_event_config(struct perf_event *event) event 1019 drivers/perf/arm-ccn.c struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); event 1020 drivers/perf/arm-ccn.c struct hw_perf_event *hw = &event->hw; event 1023 drivers/perf/arm-ccn.c u32 type = CCN_CONFIG_TYPE(event->attr.config); event 1026 drivers/perf/arm-ccn.c port = arm_ccn_node_to_xp_port(CCN_CONFIG_NODE(event->attr.config)); event 1049 drivers/perf/arm-ccn.c val |= CCN_CONFIG_EVENT(event->attr.config) << event 1054 drivers/perf/arm-ccn.c static void arm_ccn_pmu_event_config(struct perf_event *event) event 1056 drivers/perf/arm-ccn.c struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); event 1057 drivers/perf/arm-ccn.c struct hw_perf_event *hw = &event->hw; event 1064 drivers/perf/arm-ccn.c if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP) event 1065 drivers/perf/arm-ccn.c xp = CCN_CONFIG_XP(event->attr.config); event 1067 drivers/perf/arm-ccn.c xp = arm_ccn_node_to_xp(CCN_CONFIG_NODE(event->attr.config)); event 1079 drivers/perf/arm-ccn.c if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP) { event 1080 drivers/perf/arm-ccn.c if (CCN_CONFIG_EVENT(event->attr.config) == event 1082 drivers/perf/arm-ccn.c arm_ccn_pmu_xp_watchpoint_config(event); event 1084 drivers/perf/arm-ccn.c arm_ccn_pmu_xp_event_config(event); event 1086 drivers/perf/arm-ccn.c arm_ccn_pmu_node_event_config(event); event 1098 drivers/perf/arm-ccn.c static int arm_ccn_pmu_event_add(struct perf_event *event, int flags) event 1101 drivers/perf/arm-ccn.c struct hw_perf_event *hw = &event->hw; event 1102 drivers/perf/arm-ccn.c struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); event 1104 drivers/perf/arm-ccn.c err = arm_ccn_pmu_event_alloc(event); event 1117 drivers/perf/arm-ccn.c arm_ccn_pmu_event_config(event); event 1122 drivers/perf/arm-ccn.c arm_ccn_pmu_event_start(event, PERF_EF_UPDATE); event 1127 drivers/perf/arm-ccn.c static void arm_ccn_pmu_event_del(struct perf_event *event, int flags) event 1129 drivers/perf/arm-ccn.c struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); event 1131 drivers/perf/arm-ccn.c arm_ccn_pmu_event_stop(event, PERF_EF_UPDATE); event 1133 drivers/perf/arm-ccn.c arm_ccn_pmu_event_release(event); event 1139 drivers/perf/arm-ccn.c static void arm_ccn_pmu_event_read(struct perf_event *event) event 1141 drivers/perf/arm-ccn.c arm_ccn_pmu_event_update(event); event 1175 drivers/perf/arm-ccn.c struct perf_event *event = dt->pmu_counters[idx].event; event 1178 drivers/perf/arm-ccn.c WARN_ON_ONCE(overflowed && !event && event 1181 drivers/perf/arm-ccn.c if (!event || !overflowed) event 1184 drivers/perf/arm-ccn.c arm_ccn_pmu_event_update(event); event 176 drivers/perf/arm_dsu_pmu.c DSU_FORMAT_ATTR(event, "config:0-31"), event 248 drivers/perf/arm_dsu_pmu.c static inline u64 dsu_pmu_read_counter(struct perf_event *event) event 252 drivers/perf/arm_dsu_pmu.c struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu); event 253 drivers/perf/arm_dsu_pmu.c int idx = event->hw.idx; event 260 drivers/perf/arm_dsu_pmu.c dev_err(event->pmu->dev, event 275 drivers/perf/arm_dsu_pmu.c static void dsu_pmu_write_counter(struct perf_event *event, u64 val) event 278 drivers/perf/arm_dsu_pmu.c struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu); event 279 drivers/perf/arm_dsu_pmu.c int idx = event->hw.idx; event 286 drivers/perf/arm_dsu_pmu.c dev_err(event->pmu->dev, event 300 drivers/perf/arm_dsu_pmu.c struct perf_event *event) event 303 drivers/perf/arm_dsu_pmu.c unsigned long evtype = event->attr.config; event 304 drivers/perf/arm_dsu_pmu.c struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu); event 333 drivers/perf/arm_dsu_pmu.c struct perf_event *event) event 335 drivers/perf/arm_dsu_pmu.c int idx = event->hw.idx; event 339 drivers/perf/arm_dsu_pmu.c dev_err(event->pmu->dev, event 345 drivers/perf/arm_dsu_pmu.c __dsu_pmu_set_event(idx, event->hw.config_base); event 349 drivers/perf/arm_dsu_pmu.c static void dsu_pmu_event_update(struct perf_event *event) event 351 drivers/perf/arm_dsu_pmu.c struct hw_perf_event *hwc = &event->hw; event 357 drivers/perf/arm_dsu_pmu.c new_count = dsu_pmu_read_counter(event); event 361 drivers/perf/arm_dsu_pmu.c local64_add(delta, &event->count); event 364 drivers/perf/arm_dsu_pmu.c static void dsu_pmu_read(struct perf_event *event) event 366 drivers/perf/arm_dsu_pmu.c dsu_pmu_event_update(event); event 381 drivers/perf/arm_dsu_pmu.c static void dsu_pmu_set_event_period(struct perf_event *event) event 383 drivers/perf/arm_dsu_pmu.c int idx = event->hw.idx; event 386 drivers/perf/arm_dsu_pmu.c local64_set(&event->hw.prev_count, val); event 387 drivers/perf/arm_dsu_pmu.c dsu_pmu_write_counter(event, val); event 403 drivers/perf/arm_dsu_pmu.c struct perf_event *event = hw_events->events[i]; event 405 drivers/perf/arm_dsu_pmu.c if (!event) event 407 drivers/perf/arm_dsu_pmu.c dsu_pmu_event_update(event); event 408 drivers/perf/arm_dsu_pmu.c dsu_pmu_set_event_period(event); event 415 drivers/perf/arm_dsu_pmu.c static void dsu_pmu_start(struct perf_event *event, int pmu_flags) event 417 drivers/perf/arm_dsu_pmu.c struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu); event 421 drivers/perf/arm_dsu_pmu.c WARN_ON(!(event->hw.state & PERF_HES_UPTODATE)); event 422 drivers/perf/arm_dsu_pmu.c dsu_pmu_set_event_period(event); event 423 drivers/perf/arm_dsu_pmu.c if (event->hw.idx != DSU_PMU_IDX_CYCLE_COUNTER) event 424 drivers/perf/arm_dsu_pmu.c dsu_pmu_set_event(dsu_pmu, event); event 425 drivers/perf/arm_dsu_pmu.c event->hw.state = 0; event 426 drivers/perf/arm_dsu_pmu.c dsu_pmu_enable_counter(dsu_pmu, event->hw.idx); event 429 drivers/perf/arm_dsu_pmu.c static void dsu_pmu_stop(struct perf_event *event, int pmu_flags) event 431 drivers/perf/arm_dsu_pmu.c struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu); event 433 drivers/perf/arm_dsu_pmu.c if (event->hw.state & PERF_HES_STOPPED) event 435 drivers/perf/arm_dsu_pmu.c dsu_pmu_disable_counter(dsu_pmu, event->hw.idx); event 436 drivers/perf/arm_dsu_pmu.c dsu_pmu_event_update(event); event 437 drivers/perf/arm_dsu_pmu.c event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; event 440 drivers/perf/arm_dsu_pmu.c static int dsu_pmu_add(struct perf_event *event, int flags) event 442 drivers/perf/arm_dsu_pmu.c struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu); event 444 drivers/perf/arm_dsu_pmu.c struct hw_perf_event *hwc = &event->hw; event 451 drivers/perf/arm_dsu_pmu.c idx = dsu_pmu_get_event_idx(hw_events, event); event 456 drivers/perf/arm_dsu_pmu.c hw_events->events[idx] = event; event 460 drivers/perf/arm_dsu_pmu.c dsu_pmu_start(event, PERF_EF_RELOAD); event 462 drivers/perf/arm_dsu_pmu.c perf_event_update_userpage(event); event 466 drivers/perf/arm_dsu_pmu.c static void dsu_pmu_del(struct perf_event *event, int flags) event 468 drivers/perf/arm_dsu_pmu.c struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu); event 470 drivers/perf/arm_dsu_pmu.c struct hw_perf_event *hwc = &event->hw; event 473 drivers/perf/arm_dsu_pmu.c dsu_pmu_stop(event, PERF_EF_UPDATE); event 476 drivers/perf/arm_dsu_pmu.c perf_event_update_userpage(event); event 511 drivers/perf/arm_dsu_pmu.c struct perf_event *event) event 513 drivers/perf/arm_dsu_pmu.c if (is_software_event(event)) event 516 drivers/perf/arm_dsu_pmu.c if (event->pmu != pmu) event 518 drivers/perf/arm_dsu_pmu.c return dsu_pmu_get_event_idx(hw_events, event) >= 0; event 525 drivers/perf/arm_dsu_pmu.c static bool dsu_pmu_validate_group(struct perf_event *event) event 527 drivers/perf/arm_dsu_pmu.c struct perf_event *sibling, *leader = event->group_leader; event 530 drivers/perf/arm_dsu_pmu.c if (event->group_leader == event) event 534 drivers/perf/arm_dsu_pmu.c if (!dsu_pmu_validate_event(event->pmu, &fake_hw, leader)) event 537 drivers/perf/arm_dsu_pmu.c if (!dsu_pmu_validate_event(event->pmu, &fake_hw, sibling)) event 540 drivers/perf/arm_dsu_pmu.c return dsu_pmu_validate_event(event->pmu, &fake_hw, event); event 543 drivers/perf/arm_dsu_pmu.c static int dsu_pmu_event_init(struct perf_event *event) event 545 drivers/perf/arm_dsu_pmu.c struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu); event 547 drivers/perf/arm_dsu_pmu.c if (event->attr.type != event->pmu->type) event 551 drivers/perf/arm_dsu_pmu.c if (is_sampling_event(event)) { event 557 drivers/perf/arm_dsu_pmu.c if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK) { event 562 drivers/perf/arm_dsu_pmu.c if (has_branch_stack(event)) { event 567 drivers/perf/arm_dsu_pmu.c if (!cpumask_test_cpu(event->cpu, &dsu_pmu->associated_cpus)) { event 578 drivers/perf/arm_dsu_pmu.c event->cpu = cpumask_first(&dsu_pmu->active_cpu); event 579 drivers/perf/arm_dsu_pmu.c if (event->cpu >= nr_cpu_ids) event 581 drivers/perf/arm_dsu_pmu.c if (!dsu_pmu_validate_group(event)) event 584 drivers/perf/arm_dsu_pmu.c event->hw.config_base = event->attr.config; event 32 drivers/perf/arm_pmu.c static inline u64 arm_pmu_event_max_period(struct perf_event *event) event 34 drivers/perf/arm_pmu.c if (event->hw.flags & ARMPMU_EVT_64BIT) event 94 drivers/perf/arm_pmu.c armpmu_map_event(struct perf_event *event, event 102 drivers/perf/arm_pmu.c u64 config = event->attr.config; event 103 drivers/perf/arm_pmu.c int type = event->attr.type; event 105 drivers/perf/arm_pmu.c if (type == event->pmu->type) event 120 drivers/perf/arm_pmu.c int armpmu_event_set_period(struct perf_event *event) event 122 drivers/perf/arm_pmu.c struct arm_pmu *armpmu = to_arm_pmu(event->pmu); event 123 drivers/perf/arm_pmu.c struct hw_perf_event *hwc = &event->hw; event 129 drivers/perf/arm_pmu.c max_period = arm_pmu_event_max_period(event); event 155 drivers/perf/arm_pmu.c armpmu->write_counter(event, (u64)(-left) & max_period); event 157 drivers/perf/arm_pmu.c perf_event_update_userpage(event); event 162 drivers/perf/arm_pmu.c u64 armpmu_event_update(struct perf_event *event) event 164 drivers/perf/arm_pmu.c struct arm_pmu *armpmu = to_arm_pmu(event->pmu); event 165 drivers/perf/arm_pmu.c struct hw_perf_event *hwc = &event->hw; event 167 drivers/perf/arm_pmu.c u64 max_period = arm_pmu_event_max_period(event); event 171 drivers/perf/arm_pmu.c new_raw_count = armpmu->read_counter(event); event 179 drivers/perf/arm_pmu.c local64_add(delta, &event->count); event 186 drivers/perf/arm_pmu.c armpmu_read(struct perf_event *event) event 188 drivers/perf/arm_pmu.c armpmu_event_update(event); event 192 drivers/perf/arm_pmu.c armpmu_stop(struct perf_event *event, int flags) event 194 drivers/perf/arm_pmu.c struct arm_pmu *armpmu = to_arm_pmu(event->pmu); event 195 drivers/perf/arm_pmu.c struct hw_perf_event *hwc = &event->hw; event 202 drivers/perf/arm_pmu.c armpmu->disable(event); event 203 drivers/perf/arm_pmu.c armpmu_event_update(event); event 208 drivers/perf/arm_pmu.c static void armpmu_start(struct perf_event *event, int flags) event 210 drivers/perf/arm_pmu.c struct arm_pmu *armpmu = to_arm_pmu(event->pmu); event 211 drivers/perf/arm_pmu.c struct hw_perf_event *hwc = &event->hw; event 228 drivers/perf/arm_pmu.c armpmu_event_set_period(event); event 229 drivers/perf/arm_pmu.c armpmu->enable(event); event 233 drivers/perf/arm_pmu.c armpmu_del(struct perf_event *event, int flags) event 235 drivers/perf/arm_pmu.c struct arm_pmu *armpmu = to_arm_pmu(event->pmu); event 237 drivers/perf/arm_pmu.c struct hw_perf_event *hwc = &event->hw; event 240 drivers/perf/arm_pmu.c armpmu_stop(event, PERF_EF_UPDATE); event 242 drivers/perf/arm_pmu.c armpmu->clear_event_idx(hw_events, event); event 243 drivers/perf/arm_pmu.c perf_event_update_userpage(event); event 249 drivers/perf/arm_pmu.c armpmu_add(struct perf_event *event, int flags) event 251 drivers/perf/arm_pmu.c struct arm_pmu *armpmu = to_arm_pmu(event->pmu); event 253 drivers/perf/arm_pmu.c struct hw_perf_event *hwc = &event->hw; event 261 drivers/perf/arm_pmu.c idx = armpmu->get_event_idx(hw_events, event); event 269 drivers/perf/arm_pmu.c event->hw.idx = idx; event 270 drivers/perf/arm_pmu.c armpmu->disable(event); event 271 drivers/perf/arm_pmu.c hw_events->events[idx] = event; event 275 drivers/perf/arm_pmu.c armpmu_start(event, PERF_EF_RELOAD); event 278 drivers/perf/arm_pmu.c perf_event_update_userpage(event); event 285 drivers/perf/arm_pmu.c struct perf_event *event) event 289 drivers/perf/arm_pmu.c if (is_software_event(event)) event 297 drivers/perf/arm_pmu.c if (event->pmu != pmu) event 300 drivers/perf/arm_pmu.c if (event->state < PERF_EVENT_STATE_OFF) event 303 drivers/perf/arm_pmu.c if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) event 306 drivers/perf/arm_pmu.c armpmu = to_arm_pmu(event->pmu); event 307 drivers/perf/arm_pmu.c return armpmu->get_event_idx(hw_events, event) >= 0; event 311 drivers/perf/arm_pmu.c validate_group(struct perf_event *event) event 313 drivers/perf/arm_pmu.c struct perf_event *sibling, *leader = event->group_leader; event 322 drivers/perf/arm_pmu.c if (!validate_event(event->pmu, &fake_pmu, leader)) event 326 drivers/perf/arm_pmu.c if (!validate_event(event->pmu, &fake_pmu, sibling)) event 330 drivers/perf/arm_pmu.c if (!validate_event(event->pmu, &fake_pmu, event)) event 361 drivers/perf/arm_pmu.c __hw_perf_event_init(struct perf_event *event) event 363 drivers/perf/arm_pmu.c struct arm_pmu *armpmu = to_arm_pmu(event->pmu); event 364 drivers/perf/arm_pmu.c struct hw_perf_event *hwc = &event->hw; event 368 drivers/perf/arm_pmu.c mapping = armpmu->map_event(event); event 371 drivers/perf/arm_pmu.c pr_debug("event %x:%llx not supported\n", event->attr.type, event 372 drivers/perf/arm_pmu.c event->attr.config); event 391 drivers/perf/arm_pmu.c armpmu->set_event_filter(hwc, &event->attr)) { event 402 drivers/perf/arm_pmu.c if (!is_sampling_event(event)) { event 409 drivers/perf/arm_pmu.c hwc->sample_period = arm_pmu_event_max_period(event) >> 1; event 414 drivers/perf/arm_pmu.c if (event->group_leader != event) { event 415 drivers/perf/arm_pmu.c if (validate_group(event) != 0) event 422 drivers/perf/arm_pmu.c static int armpmu_event_init(struct perf_event *event) event 424 drivers/perf/arm_pmu.c struct arm_pmu *armpmu = to_arm_pmu(event->pmu); event 433 drivers/perf/arm_pmu.c if (event->cpu != -1 && event 434 drivers/perf/arm_pmu.c !cpumask_test_cpu(event->cpu, &armpmu->supported_cpus)) event 438 drivers/perf/arm_pmu.c if (has_branch_stack(event)) event 441 drivers/perf/arm_pmu.c if (armpmu->map_event(event) == -ENOENT) event 444 drivers/perf/arm_pmu.c return __hw_perf_event_init(event); event 477 drivers/perf/arm_pmu.c static int armpmu_filter_match(struct perf_event *event) event 479 drivers/perf/arm_pmu.c struct arm_pmu *armpmu = to_arm_pmu(event->pmu); event 485 drivers/perf/arm_pmu.c return armpmu->filter_match(event); event 663 drivers/perf/arm_pmu.c struct perf_event *event; event 667 drivers/perf/arm_pmu.c event = hw_events->events[idx]; event 668 drivers/perf/arm_pmu.c if (!event) event 676 drivers/perf/arm_pmu.c armpmu_stop(event, PERF_EF_UPDATE); event 692 drivers/perf/arm_pmu.c RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD)); event 121 drivers/perf/arm_smmuv3_pmu.c static inline u32 get_##_name(struct perf_event *event) \ event 124 drivers/perf/arm_smmuv3_pmu.c event->attr._config); \ event 127 drivers/perf/arm_smmuv3_pmu.c SMMU_PMU_EVENT_ATTR_EXTRACTOR(event, config, 0, 15); event 202 drivers/perf/arm_smmuv3_pmu.c static void smmu_pmu_event_update(struct perf_event *event) event 204 drivers/perf/arm_smmuv3_pmu.c struct hw_perf_event *hwc = &event->hw; event 205 drivers/perf/arm_smmuv3_pmu.c struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); event 218 drivers/perf/arm_smmuv3_pmu.c local64_add(delta, &event->count); event 250 drivers/perf/arm_smmuv3_pmu.c static void smmu_pmu_set_event_filter(struct perf_event *event, event 253 drivers/perf/arm_smmuv3_pmu.c struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); event 256 drivers/perf/arm_smmuv3_pmu.c evtyper = get_event(event) | span << SMMU_PMCG_SID_SPAN_SHIFT; event 275 drivers/perf/arm_smmuv3_pmu.c struct perf_event *event, int idx) event 279 drivers/perf/arm_smmuv3_pmu.c bool filter_en = !!get_filter_enable(event); event 281 drivers/perf/arm_smmuv3_pmu.c span = filter_en ? get_filter_span(event) : event 283 drivers/perf/arm_smmuv3_pmu.c sid = filter_en ? get_filter_stream_id(event) : event 288 drivers/perf/arm_smmuv3_pmu.c smmu_pmu_set_event_filter(event, idx, span, sid); event 295 drivers/perf/arm_smmuv3_pmu.c smmu_pmu_check_global_filter(smmu_pmu->events[idx], event)) { event 296 drivers/perf/arm_smmuv3_pmu.c smmu_pmu_set_event_filter(event, 0, span, sid); event 304 drivers/perf/arm_smmuv3_pmu.c struct perf_event *event) event 314 drivers/perf/arm_smmuv3_pmu.c err = smmu_pmu_apply_event_filter(smmu_pmu, event, idx); event 341 drivers/perf/arm_smmuv3_pmu.c static int smmu_pmu_event_init(struct perf_event *event) event 343 drivers/perf/arm_smmuv3_pmu.c struct hw_perf_event *hwc = &event->hw; event 344 drivers/perf/arm_smmuv3_pmu.c struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); event 350 drivers/perf/arm_smmuv3_pmu.c if (event->attr.type != event->pmu->type) event 358 drivers/perf/arm_smmuv3_pmu.c if (event->cpu < 0) { event 364 drivers/perf/arm_smmuv3_pmu.c event_id = get_event(event); event 372 drivers/perf/arm_smmuv3_pmu.c if (!is_software_event(event->group_leader)) { event 373 drivers/perf/arm_smmuv3_pmu.c if (!smmu_pmu_events_compatible(event->group_leader, event)) event 380 drivers/perf/arm_smmuv3_pmu.c for_each_sibling_event(sibling, event->group_leader) { event 384 drivers/perf/arm_smmuv3_pmu.c if (!smmu_pmu_events_compatible(sibling, event)) event 397 drivers/perf/arm_smmuv3_pmu.c event->cpu = smmu_pmu->on_cpu; event 402 drivers/perf/arm_smmuv3_pmu.c static void smmu_pmu_event_start(struct perf_event *event, int flags) event 404 drivers/perf/arm_smmuv3_pmu.c struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); event 405 drivers/perf/arm_smmuv3_pmu.c struct hw_perf_event *hwc = &event->hw; event 415 drivers/perf/arm_smmuv3_pmu.c static void smmu_pmu_event_stop(struct perf_event *event, int flags) event 417 drivers/perf/arm_smmuv3_pmu.c struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); event 418 drivers/perf/arm_smmuv3_pmu.c struct hw_perf_event *hwc = &event->hw; event 426 drivers/perf/arm_smmuv3_pmu.c smmu_pmu_event_update(event); event 430 drivers/perf/arm_smmuv3_pmu.c static int smmu_pmu_event_add(struct perf_event *event, int flags) event 432 drivers/perf/arm_smmuv3_pmu.c struct hw_perf_event *hwc = &event->hw; event 434 drivers/perf/arm_smmuv3_pmu.c struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); event 436 drivers/perf/arm_smmuv3_pmu.c idx = smmu_pmu_get_event_idx(smmu_pmu, event); event 442 drivers/perf/arm_smmuv3_pmu.c smmu_pmu->events[idx] = event; event 448 drivers/perf/arm_smmuv3_pmu.c smmu_pmu_event_start(event, flags); event 451 drivers/perf/arm_smmuv3_pmu.c perf_event_update_userpage(event); event 456 drivers/perf/arm_smmuv3_pmu.c static void smmu_pmu_event_del(struct perf_event *event, int flags) event 458 drivers/perf/arm_smmuv3_pmu.c struct hw_perf_event *hwc = &event->hw; event 459 drivers/perf/arm_smmuv3_pmu.c struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); event 462 drivers/perf/arm_smmuv3_pmu.c smmu_pmu_event_stop(event, flags | PERF_EF_UPDATE); event 467 drivers/perf/arm_smmuv3_pmu.c perf_event_update_userpage(event); event 470 drivers/perf/arm_smmuv3_pmu.c static void smmu_pmu_event_read(struct perf_event *event) event 472 drivers/perf/arm_smmuv3_pmu.c smmu_pmu_event_update(event); event 556 drivers/perf/arm_smmuv3_pmu.c PMU_FORMAT_ATTR(event, "config:0-15"); event 618 drivers/perf/arm_smmuv3_pmu.c struct perf_event *event = smmu_pmu->events[idx]; event 621 drivers/perf/arm_smmuv3_pmu.c if (WARN_ON_ONCE(!event)) event 624 drivers/perf/arm_smmuv3_pmu.c smmu_pmu_event_update(event); event 625 drivers/perf/arm_smmuv3_pmu.c hwc = &event->hw; event 262 drivers/perf/arm_spe_pmu.c static u64 arm_spe_event_to_pmscr(struct perf_event *event) event 264 drivers/perf/arm_spe_pmu.c struct perf_event_attr *attr = &event->attr; event 283 drivers/perf/arm_spe_pmu.c static void arm_spe_event_sanitise_period(struct perf_event *event) event 285 drivers/perf/arm_spe_pmu.c struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu); event 286 drivers/perf/arm_spe_pmu.c u64 period = event->hw.sample_period; event 297 drivers/perf/arm_spe_pmu.c event->hw.sample_period = period; event 300 drivers/perf/arm_spe_pmu.c static u64 arm_spe_event_to_pmsirr(struct perf_event *event) event 302 drivers/perf/arm_spe_pmu.c struct perf_event_attr *attr = &event->attr; event 305 drivers/perf/arm_spe_pmu.c arm_spe_event_sanitise_period(event); event 308 drivers/perf/arm_spe_pmu.c reg |= event->hw.sample_period; event 313 drivers/perf/arm_spe_pmu.c static u64 arm_spe_event_to_pmsfcr(struct perf_event *event) event 315 drivers/perf/arm_spe_pmu.c struct perf_event_attr *attr = &event->attr; event 334 drivers/perf/arm_spe_pmu.c static u64 arm_spe_event_to_pmsevfr(struct perf_event *event) event 336 drivers/perf/arm_spe_pmu.c struct perf_event_attr *attr = &event->attr; event 340 drivers/perf/arm_spe_pmu.c static u64 arm_spe_event_to_pmslatfr(struct perf_event *event) event 342 drivers/perf/arm_spe_pmu.c struct perf_event_attr *attr = &event->attr; event 360 drivers/perf/arm_spe_pmu.c struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu); event 388 drivers/perf/arm_spe_pmu.c struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu); event 459 drivers/perf/arm_spe_pmu.c struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu); event 476 drivers/perf/arm_spe_pmu.c struct perf_event *event) event 482 drivers/perf/arm_spe_pmu.c buf = perf_aux_output_begin(handle, event); event 484 drivers/perf/arm_spe_pmu.c event->hw.state |= PERF_HES_STOPPED; event 609 drivers/perf/arm_spe_pmu.c struct perf_event *event = handle->event; event 644 drivers/perf/arm_spe_pmu.c arm_spe_perf_aux_output_begin(handle, event); event 659 drivers/perf/arm_spe_pmu.c static int arm_spe_pmu_event_init(struct perf_event *event) event 662 drivers/perf/arm_spe_pmu.c struct perf_event_attr *attr = &event->attr; event 663 drivers/perf/arm_spe_pmu.c struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu); event 666 drivers/perf/arm_spe_pmu.c if (attr->type != event->pmu->type) event 669 drivers/perf/arm_spe_pmu.c if (event->cpu >= 0 && event 670 drivers/perf/arm_spe_pmu.c !cpumask_test_cpu(event->cpu, &spe_pmu->supported_cpus)) event 673 drivers/perf/arm_spe_pmu.c if (arm_spe_event_to_pmsevfr(event) & SYS_PMSEVFR_EL1_RES0) event 689 drivers/perf/arm_spe_pmu.c reg = arm_spe_event_to_pmsfcr(event); event 702 drivers/perf/arm_spe_pmu.c reg = arm_spe_event_to_pmscr(event); event 712 drivers/perf/arm_spe_pmu.c static void arm_spe_pmu_start(struct perf_event *event, int flags) event 715 drivers/perf/arm_spe_pmu.c struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu); event 716 drivers/perf/arm_spe_pmu.c struct hw_perf_event *hwc = &event->hw; event 720 drivers/perf/arm_spe_pmu.c arm_spe_perf_aux_output_begin(handle, event); event 724 drivers/perf/arm_spe_pmu.c reg = arm_spe_event_to_pmsfcr(event); event 727 drivers/perf/arm_spe_pmu.c reg = arm_spe_event_to_pmsevfr(event); event 730 drivers/perf/arm_spe_pmu.c reg = arm_spe_event_to_pmslatfr(event); event 734 drivers/perf/arm_spe_pmu.c reg = arm_spe_event_to_pmsirr(event); event 741 drivers/perf/arm_spe_pmu.c reg = arm_spe_event_to_pmscr(event); event 746 drivers/perf/arm_spe_pmu.c static void arm_spe_pmu_stop(struct perf_event *event, int flags) event 748 drivers/perf/arm_spe_pmu.c struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu); event 749 drivers/perf/arm_spe_pmu.c struct hw_perf_event *hwc = &event->hw; event 787 drivers/perf/arm_spe_pmu.c static int arm_spe_pmu_add(struct perf_event *event, int flags) event 790 drivers/perf/arm_spe_pmu.c struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu); event 791 drivers/perf/arm_spe_pmu.c struct hw_perf_event *hwc = &event->hw; event 792 drivers/perf/arm_spe_pmu.c int cpu = event->cpu == -1 ? smp_processor_id() : event->cpu; event 800 drivers/perf/arm_spe_pmu.c arm_spe_pmu_start(event, PERF_EF_RELOAD); event 808 drivers/perf/arm_spe_pmu.c static void arm_spe_pmu_del(struct perf_event *event, int flags) event 810 drivers/perf/arm_spe_pmu.c arm_spe_pmu_stop(event, PERF_EF_UPDATE); event 813 drivers/perf/arm_spe_pmu.c static void arm_spe_pmu_read(struct perf_event *event) event 817 drivers/perf/arm_spe_pmu.c static void *arm_spe_pmu_setup_aux(struct perf_event *event, void **pages, event 820 drivers/perf/arm_spe_pmu.c int i, cpu = event->cpu; event 159 drivers/perf/fsl_imx8_ddr_perf.c PMU_FORMAT_ATTR(event, "config:0-7"); event 182 drivers/perf/fsl_imx8_ddr_perf.c static bool ddr_perf_is_filtered(struct perf_event *event) event 184 drivers/perf/fsl_imx8_ddr_perf.c return event->attr.config == 0x41 || event->attr.config == 0x42; event 187 drivers/perf/fsl_imx8_ddr_perf.c static u32 ddr_perf_filter_val(struct perf_event *event) event 189 drivers/perf/fsl_imx8_ddr_perf.c return event->attr.config1; event 202 drivers/perf/fsl_imx8_ddr_perf.c static bool ddr_perf_is_enhanced_filtered(struct perf_event *event) event 205 drivers/perf/fsl_imx8_ddr_perf.c struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); event 209 drivers/perf/fsl_imx8_ddr_perf.c ddr_perf_is_filtered(event); event 212 drivers/perf/fsl_imx8_ddr_perf.c static u32 ddr_perf_alloc_counter(struct ddr_pmu *pmu, int event) event 221 drivers/perf/fsl_imx8_ddr_perf.c if (event == EVENT_CYCLES_ID) { event 243 drivers/perf/fsl_imx8_ddr_perf.c struct perf_event *event = pmu->events[counter]; event 251 drivers/perf/fsl_imx8_ddr_perf.c base += ddr_perf_is_enhanced_filtered(event) ? COUNTER_DPCR1 : event 256 drivers/perf/fsl_imx8_ddr_perf.c static int ddr_perf_event_init(struct perf_event *event) event 258 drivers/perf/fsl_imx8_ddr_perf.c struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); event 259 drivers/perf/fsl_imx8_ddr_perf.c struct hw_perf_event *hwc = &event->hw; event 262 drivers/perf/fsl_imx8_ddr_perf.c if (event->attr.type != event->pmu->type) event 265 drivers/perf/fsl_imx8_ddr_perf.c if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) event 268 drivers/perf/fsl_imx8_ddr_perf.c if (event->cpu < 0) { event 278 drivers/perf/fsl_imx8_ddr_perf.c if (event->group_leader->pmu != event->pmu && event 279 drivers/perf/fsl_imx8_ddr_perf.c !is_software_event(event->group_leader)) event 283 drivers/perf/fsl_imx8_ddr_perf.c if (!ddr_perf_filters_compatible(event, event->group_leader)) event 285 drivers/perf/fsl_imx8_ddr_perf.c for_each_sibling_event(sibling, event->group_leader) { event 286 drivers/perf/fsl_imx8_ddr_perf.c if (!ddr_perf_filters_compatible(event, sibling)) event 291 drivers/perf/fsl_imx8_ddr_perf.c for_each_sibling_event(sibling, event->group_leader) { event 292 drivers/perf/fsl_imx8_ddr_perf.c if (sibling->pmu != event->pmu && event 297 drivers/perf/fsl_imx8_ddr_perf.c event->cpu = pmu->cpu; event 304 drivers/perf/fsl_imx8_ddr_perf.c static void ddr_perf_event_update(struct perf_event *event) event 306 drivers/perf/fsl_imx8_ddr_perf.c struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); event 307 drivers/perf/fsl_imx8_ddr_perf.c struct hw_perf_event *hwc = &event->hw; event 319 drivers/perf/fsl_imx8_ddr_perf.c local64_add(delta, &event->count); event 346 drivers/perf/fsl_imx8_ddr_perf.c static void ddr_perf_event_start(struct perf_event *event, int flags) event 348 drivers/perf/fsl_imx8_ddr_perf.c struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); event 349 drivers/perf/fsl_imx8_ddr_perf.c struct hw_perf_event *hwc = &event->hw; event 354 drivers/perf/fsl_imx8_ddr_perf.c ddr_perf_counter_enable(pmu, event->attr.config, counter, true); event 359 drivers/perf/fsl_imx8_ddr_perf.c static int ddr_perf_event_add(struct perf_event *event, int flags) event 361 drivers/perf/fsl_imx8_ddr_perf.c struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); event 362 drivers/perf/fsl_imx8_ddr_perf.c struct hw_perf_event *hwc = &event->hw; event 364 drivers/perf/fsl_imx8_ddr_perf.c int cfg = event->attr.config; event 365 drivers/perf/fsl_imx8_ddr_perf.c int cfg1 = event->attr.config1; event 372 drivers/perf/fsl_imx8_ddr_perf.c !ddr_perf_filters_compatible(event, pmu->events[i])) event 376 drivers/perf/fsl_imx8_ddr_perf.c if (ddr_perf_is_filtered(event)) { event 389 drivers/perf/fsl_imx8_ddr_perf.c pmu->events[counter] = event; event 396 drivers/perf/fsl_imx8_ddr_perf.c ddr_perf_event_start(event, flags); event 401 drivers/perf/fsl_imx8_ddr_perf.c static void ddr_perf_event_stop(struct perf_event *event, int flags) event 403 drivers/perf/fsl_imx8_ddr_perf.c struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); event 404 drivers/perf/fsl_imx8_ddr_perf.c struct hw_perf_event *hwc = &event->hw; event 407 drivers/perf/fsl_imx8_ddr_perf.c ddr_perf_counter_enable(pmu, event->attr.config, counter, false); event 408 drivers/perf/fsl_imx8_ddr_perf.c ddr_perf_event_update(event); event 413 drivers/perf/fsl_imx8_ddr_perf.c static void ddr_perf_event_del(struct perf_event *event, int flags) event 415 drivers/perf/fsl_imx8_ddr_perf.c struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); event 416 drivers/perf/fsl_imx8_ddr_perf.c struct hw_perf_event *hwc = &event->hw; event 419 drivers/perf/fsl_imx8_ddr_perf.c ddr_perf_event_stop(event, PERF_EF_UPDATE); event 478 drivers/perf/fsl_imx8_ddr_perf.c struct perf_event *event, *cycle_event = NULL; event 500 drivers/perf/fsl_imx8_ddr_perf.c event = pmu->events[i]; event 502 drivers/perf/fsl_imx8_ddr_perf.c ddr_perf_event_update(event); event 504 drivers/perf/fsl_imx8_ddr_perf.c if (event->hw.idx == EVENT_CYCLES_COUNTER) event 505 drivers/perf/fsl_imx8_ddr_perf.c cycle_event = event; event 143 drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c static int hisi_ddrc_pmu_get_event_idx(struct perf_event *event) event 145 drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c struct hisi_pmu *ddrc_pmu = to_hisi_pmu(event->pmu); event 147 drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c struct hw_perf_event *hwc = &event->hw; event 184 drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c struct perf_event *event; event 202 drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c event = ddrc_pmu->pmu_events.hw_events[idx]; event 203 drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c if (!event) event 206 drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c hisi_uncore_pmu_event_update(event); event 207 drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c hisi_uncore_pmu_set_event_period(event); event 277 drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c HISI_PMU_FORMAT_ATTR(event, "config:0-4"), event 174 drivers/perf/hisilicon/hisi_uncore_hha_pmu.c struct perf_event *event; event 192 drivers/perf/hisilicon/hisi_uncore_hha_pmu.c event = hha_pmu->pmu_events.hw_events[idx]; event 193 drivers/perf/hisilicon/hisi_uncore_hha_pmu.c if (!event) event 196 drivers/perf/hisilicon/hisi_uncore_hha_pmu.c hisi_uncore_pmu_event_update(event); event 197 drivers/perf/hisilicon/hisi_uncore_hha_pmu.c hisi_uncore_pmu_set_event_period(event); event 270 drivers/perf/hisilicon/hisi_uncore_hha_pmu.c HISI_PMU_FORMAT_ATTR(event, "config:0-7"), event 173 drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c struct perf_event *event; event 191 drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c event = l3c_pmu->pmu_events.hw_events[idx]; event 192 drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c if (!event) event 195 drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c hisi_uncore_pmu_event_update(event); event 196 drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c hisi_uncore_pmu_set_event_period(event); event 273 drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c HISI_PMU_FORMAT_ATTR(event, "config:0-7"), event 62 drivers/perf/hisilicon/hisi_uncore_pmu.c static bool hisi_validate_event_group(struct perf_event *event) event 64 drivers/perf/hisilicon/hisi_uncore_pmu.c struct perf_event *sibling, *leader = event->group_leader; event 65 drivers/perf/hisilicon/hisi_uncore_pmu.c struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); event 74 drivers/perf/hisilicon/hisi_uncore_pmu.c if (leader->pmu != event->pmu) event 78 drivers/perf/hisilicon/hisi_uncore_pmu.c if (leader != event) event 82 drivers/perf/hisilicon/hisi_uncore_pmu.c for_each_sibling_event(sibling, event->group_leader) { event 85 drivers/perf/hisilicon/hisi_uncore_pmu.c if (sibling->pmu != event->pmu) event 100 drivers/perf/hisilicon/hisi_uncore_pmu.c int hisi_uncore_pmu_get_event_idx(struct perf_event *event) event 102 drivers/perf/hisilicon/hisi_uncore_pmu.c struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); event 126 drivers/perf/hisilicon/hisi_uncore_pmu.c int hisi_uncore_pmu_event_init(struct perf_event *event) event 128 drivers/perf/hisilicon/hisi_uncore_pmu.c struct hw_perf_event *hwc = &event->hw; event 131 drivers/perf/hisilicon/hisi_uncore_pmu.c if (event->attr.type != event->pmu->type) event 139 drivers/perf/hisilicon/hisi_uncore_pmu.c if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) event 146 drivers/perf/hisilicon/hisi_uncore_pmu.c if (event->cpu < 0) event 153 drivers/perf/hisilicon/hisi_uncore_pmu.c if (!hisi_validate_event_group(event)) event 156 drivers/perf/hisilicon/hisi_uncore_pmu.c hisi_pmu = to_hisi_pmu(event->pmu); event 157 drivers/perf/hisilicon/hisi_uncore_pmu.c if (event->attr.config > hisi_pmu->check_event) event 168 drivers/perf/hisilicon/hisi_uncore_pmu.c hwc->config_base = event->attr.config; event 171 drivers/perf/hisilicon/hisi_uncore_pmu.c event->cpu = hisi_pmu->on_cpu; event 180 drivers/perf/hisilicon/hisi_uncore_pmu.c static void hisi_uncore_pmu_enable_event(struct perf_event *event) event 182 drivers/perf/hisilicon/hisi_uncore_pmu.c struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); event 183 drivers/perf/hisilicon/hisi_uncore_pmu.c struct hw_perf_event *hwc = &event->hw; event 186 drivers/perf/hisilicon/hisi_uncore_pmu.c HISI_GET_EVENTID(event)); event 195 drivers/perf/hisilicon/hisi_uncore_pmu.c static void hisi_uncore_pmu_disable_event(struct perf_event *event) event 197 drivers/perf/hisilicon/hisi_uncore_pmu.c struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); event 198 drivers/perf/hisilicon/hisi_uncore_pmu.c struct hw_perf_event *hwc = &event->hw; event 204 drivers/perf/hisilicon/hisi_uncore_pmu.c void hisi_uncore_pmu_set_event_period(struct perf_event *event) event 206 drivers/perf/hisilicon/hisi_uncore_pmu.c struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); event 207 drivers/perf/hisilicon/hisi_uncore_pmu.c struct hw_perf_event *hwc = &event->hw; event 223 drivers/perf/hisilicon/hisi_uncore_pmu.c void hisi_uncore_pmu_event_update(struct perf_event *event) event 225 drivers/perf/hisilicon/hisi_uncore_pmu.c struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); event 226 drivers/perf/hisilicon/hisi_uncore_pmu.c struct hw_perf_event *hwc = &event->hw; event 240 drivers/perf/hisilicon/hisi_uncore_pmu.c local64_add(delta, &event->count); event 243 drivers/perf/hisilicon/hisi_uncore_pmu.c void hisi_uncore_pmu_start(struct perf_event *event, int flags) event 245 drivers/perf/hisilicon/hisi_uncore_pmu.c struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); event 246 drivers/perf/hisilicon/hisi_uncore_pmu.c struct hw_perf_event *hwc = &event->hw; event 253 drivers/perf/hisilicon/hisi_uncore_pmu.c hisi_uncore_pmu_set_event_period(event); event 261 drivers/perf/hisilicon/hisi_uncore_pmu.c hisi_uncore_pmu_enable_event(event); event 262 drivers/perf/hisilicon/hisi_uncore_pmu.c perf_event_update_userpage(event); event 265 drivers/perf/hisilicon/hisi_uncore_pmu.c void hisi_uncore_pmu_stop(struct perf_event *event, int flags) event 267 drivers/perf/hisilicon/hisi_uncore_pmu.c struct hw_perf_event *hwc = &event->hw; event 269 drivers/perf/hisilicon/hisi_uncore_pmu.c hisi_uncore_pmu_disable_event(event); event 277 drivers/perf/hisilicon/hisi_uncore_pmu.c hisi_uncore_pmu_event_update(event); event 281 drivers/perf/hisilicon/hisi_uncore_pmu.c int hisi_uncore_pmu_add(struct perf_event *event, int flags) event 283 drivers/perf/hisilicon/hisi_uncore_pmu.c struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); event 284 drivers/perf/hisilicon/hisi_uncore_pmu.c struct hw_perf_event *hwc = &event->hw; event 290 drivers/perf/hisilicon/hisi_uncore_pmu.c idx = hisi_pmu->ops->get_event_idx(event); event 294 drivers/perf/hisilicon/hisi_uncore_pmu.c event->hw.idx = idx; event 295 drivers/perf/hisilicon/hisi_uncore_pmu.c hisi_pmu->pmu_events.hw_events[idx] = event; event 298 drivers/perf/hisilicon/hisi_uncore_pmu.c hisi_uncore_pmu_start(event, PERF_EF_RELOAD); event 303 drivers/perf/hisilicon/hisi_uncore_pmu.c void hisi_uncore_pmu_del(struct perf_event *event, int flags) event 305 drivers/perf/hisilicon/hisi_uncore_pmu.c struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); event 306 drivers/perf/hisilicon/hisi_uncore_pmu.c struct hw_perf_event *hwc = &event->hw; event 308 drivers/perf/hisilicon/hisi_uncore_pmu.c hisi_uncore_pmu_stop(event, PERF_EF_UPDATE); event 310 drivers/perf/hisilicon/hisi_uncore_pmu.c perf_event_update_userpage(event); event 314 drivers/perf/hisilicon/hisi_uncore_pmu.c void hisi_uncore_pmu_read(struct perf_event *event) event 317 drivers/perf/hisilicon/hisi_uncore_pmu.c hisi_uncore_pmu_event_update(event); event 80 drivers/perf/hisilicon/hisi_uncore_pmu.h int hisi_uncore_pmu_get_event_idx(struct perf_event *event); event 81 drivers/perf/hisilicon/hisi_uncore_pmu.h void hisi_uncore_pmu_read(struct perf_event *event); event 82 drivers/perf/hisilicon/hisi_uncore_pmu.h int hisi_uncore_pmu_add(struct perf_event *event, int flags); event 83 drivers/perf/hisilicon/hisi_uncore_pmu.h void hisi_uncore_pmu_del(struct perf_event *event, int flags); event 84 drivers/perf/hisilicon/hisi_uncore_pmu.h void hisi_uncore_pmu_start(struct perf_event *event, int flags); event 85 drivers/perf/hisilicon/hisi_uncore_pmu.h void hisi_uncore_pmu_stop(struct perf_event *event, int flags); event 86 drivers/perf/hisilicon/hisi_uncore_pmu.h void hisi_uncore_pmu_set_event_period(struct perf_event *event); event 87 drivers/perf/hisilicon/hisi_uncore_pmu.h void hisi_uncore_pmu_event_update(struct perf_event *event); event 88 drivers/perf/hisilicon/hisi_uncore_pmu.h int hisi_uncore_pmu_event_init(struct perf_event *event); event 74 drivers/perf/qcom_l2_pmu.c #define L2_EVT_CODE(event) (((event) & L2_EVT_CODE_MASK) >> L2_EVT_CODE_SHIFT) event 75 drivers/perf/qcom_l2_pmu.c #define L2_EVT_GROUP(event) (((event) & L2_EVT_GRP_MASK) >> L2_EVT_GRP_SHIFT) event 339 drivers/perf/qcom_l2_pmu.c static void l2_cache_event_update(struct perf_event *event) event 341 drivers/perf/qcom_l2_pmu.c struct hw_perf_event *hwc = &event->hw; event 358 drivers/perf/qcom_l2_pmu.c local64_add(delta, &event->count); event 382 drivers/perf/qcom_l2_pmu.c struct perf_event *event) event 384 drivers/perf/qcom_l2_pmu.c struct hw_perf_event *hwc = &event->hw; event 417 drivers/perf/qcom_l2_pmu.c struct perf_event *event) event 419 drivers/perf/qcom_l2_pmu.c struct hw_perf_event *hwc = &event->hw; event 439 drivers/perf/qcom_l2_pmu.c struct perf_event *event = cluster->events[idx]; event 442 drivers/perf/qcom_l2_pmu.c if (WARN_ON_ONCE(!event)) event 448 drivers/perf/qcom_l2_pmu.c l2_cache_event_update(event); event 449 drivers/perf/qcom_l2_pmu.c hwc = &event->hw; event 480 drivers/perf/qcom_l2_pmu.c static int l2_cache_event_init(struct perf_event *event) event 482 drivers/perf/qcom_l2_pmu.c struct hw_perf_event *hwc = &event->hw; event 487 drivers/perf/qcom_l2_pmu.c if (event->attr.type != event->pmu->type) event 490 drivers/perf/qcom_l2_pmu.c l2cache_pmu = to_l2cache_pmu(event->pmu); event 498 drivers/perf/qcom_l2_pmu.c if (event->cpu < 0) { event 504 drivers/perf/qcom_l2_pmu.c if (((L2_EVT_GROUP(event->attr.config) > L2_EVT_GROUP_MAX) || event 505 drivers/perf/qcom_l2_pmu.c ((event->attr.config & ~L2_EVT_MASK) != 0)) && event 506 drivers/perf/qcom_l2_pmu.c (event->attr.config != L2CYCLE_CTR_RAW_CODE)) { event 509 drivers/perf/qcom_l2_pmu.c event->attr.config); event 514 drivers/perf/qcom_l2_pmu.c if (event->group_leader->pmu != event->pmu && event 515 drivers/perf/qcom_l2_pmu.c !is_software_event(event->group_leader)) { event 521 drivers/perf/qcom_l2_pmu.c for_each_sibling_event(sibling, event->group_leader) { event 522 drivers/perf/qcom_l2_pmu.c if (sibling->pmu != event->pmu && event 530 drivers/perf/qcom_l2_pmu.c cluster = get_cluster_pmu(l2cache_pmu, event->cpu); event 534 drivers/perf/qcom_l2_pmu.c "CPU%d not associated with L2 cluster\n", event->cpu); event 539 drivers/perf/qcom_l2_pmu.c if ((event->group_leader != event) && event 540 drivers/perf/qcom_l2_pmu.c (cluster->on_cpu != event->group_leader->cpu)) { event 543 drivers/perf/qcom_l2_pmu.c event->cpu, event->group_leader->cpu); event 547 drivers/perf/qcom_l2_pmu.c if ((event != event->group_leader) && event 548 drivers/perf/qcom_l2_pmu.c !is_software_event(event->group_leader) && event 549 drivers/perf/qcom_l2_pmu.c (L2_EVT_GROUP(event->group_leader->attr.config) == event 550 drivers/perf/qcom_l2_pmu.c L2_EVT_GROUP(event->attr.config))) { event 553 drivers/perf/qcom_l2_pmu.c event->group_leader->attr.config, event 554 drivers/perf/qcom_l2_pmu.c event->attr.config); event 558 drivers/perf/qcom_l2_pmu.c for_each_sibling_event(sibling, event->group_leader) { event 559 drivers/perf/qcom_l2_pmu.c if ((sibling != event) && event 562 drivers/perf/qcom_l2_pmu.c L2_EVT_GROUP(event->attr.config))) { event 566 drivers/perf/qcom_l2_pmu.c event->attr.config); event 572 drivers/perf/qcom_l2_pmu.c hwc->config_base = event->attr.config; event 578 drivers/perf/qcom_l2_pmu.c event->cpu = cluster->on_cpu; event 583 drivers/perf/qcom_l2_pmu.c static void l2_cache_event_start(struct perf_event *event, int flags) event 586 drivers/perf/qcom_l2_pmu.c struct hw_perf_event *hwc = &event->hw; event 593 drivers/perf/qcom_l2_pmu.c cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu); event 614 drivers/perf/qcom_l2_pmu.c static void l2_cache_event_stop(struct perf_event *event, int flags) event 616 drivers/perf/qcom_l2_pmu.c struct hw_perf_event *hwc = &event->hw; event 626 drivers/perf/qcom_l2_pmu.c l2_cache_event_update(event); event 630 drivers/perf/qcom_l2_pmu.c static int l2_cache_event_add(struct perf_event *event, int flags) event 632 drivers/perf/qcom_l2_pmu.c struct hw_perf_event *hwc = &event->hw; event 637 drivers/perf/qcom_l2_pmu.c cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu); event 639 drivers/perf/qcom_l2_pmu.c idx = l2_cache_get_event_idx(cluster, event); event 645 drivers/perf/qcom_l2_pmu.c cluster->events[idx] = event; event 649 drivers/perf/qcom_l2_pmu.c l2_cache_event_start(event, flags); event 652 drivers/perf/qcom_l2_pmu.c perf_event_update_userpage(event); event 657 drivers/perf/qcom_l2_pmu.c static void l2_cache_event_del(struct perf_event *event, int flags) event 659 drivers/perf/qcom_l2_pmu.c struct hw_perf_event *hwc = &event->hw; event 663 drivers/perf/qcom_l2_pmu.c cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu); event 665 drivers/perf/qcom_l2_pmu.c l2_cache_event_stop(event, flags | PERF_EF_UPDATE); event 667 drivers/perf/qcom_l2_pmu.c l2_cache_clear_event_idx(cluster, event); event 669 drivers/perf/qcom_l2_pmu.c perf_event_update_userpage(event); event 672 drivers/perf/qcom_l2_pmu.c static void l2_cache_event_read(struct perf_event *event) event 674 drivers/perf/qcom_l2_pmu.c l2_cache_event_update(event); event 701 drivers/perf/qcom_l2_pmu.c PMU_FORMAT_ATTR(event, "config:0-11"); event 136 drivers/perf/qcom_l3_pmu.c static inline u32 get_event_type(struct perf_event *event) event 138 drivers/perf/qcom_l3_pmu.c return (event->attr.config) & L3_EVTYPE_MASK; event 141 drivers/perf/qcom_l3_pmu.c static inline bool event_uses_long_counter(struct perf_event *event) event 143 drivers/perf/qcom_l3_pmu.c return !!(event->attr.config & BIT_ULL(L3_EVENT_LC_BIT)); event 146 drivers/perf/qcom_l3_pmu.c static inline int event_num_counters(struct perf_event *event) event 148 drivers/perf/qcom_l3_pmu.c return event_uses_long_counter(event) ? 2 : 1; event 175 drivers/perf/qcom_l3_pmu.c void (*start)(struct perf_event *event); event 177 drivers/perf/qcom_l3_pmu.c void (*stop)(struct perf_event *event, int flags); event 179 drivers/perf/qcom_l3_pmu.c void (*update)(struct perf_event *event); event 193 drivers/perf/qcom_l3_pmu.c static void qcom_l3_cache__64bit_counter_start(struct perf_event *event) event 195 drivers/perf/qcom_l3_pmu.c struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); event 196 drivers/perf/qcom_l3_pmu.c int idx = event->hw.idx; event 197 drivers/perf/qcom_l3_pmu.c u32 evsel = get_event_type(event); event 206 drivers/perf/qcom_l3_pmu.c local64_set(&event->hw.prev_count, 0); event 224 drivers/perf/qcom_l3_pmu.c static void qcom_l3_cache__64bit_counter_stop(struct perf_event *event, event 227 drivers/perf/qcom_l3_pmu.c struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); event 228 drivers/perf/qcom_l3_pmu.c int idx = event->hw.idx; event 239 drivers/perf/qcom_l3_pmu.c static void qcom_l3_cache__64bit_counter_update(struct perf_event *event) event 241 drivers/perf/qcom_l3_pmu.c struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); event 242 drivers/perf/qcom_l3_pmu.c int idx = event->hw.idx; event 247 drivers/perf/qcom_l3_pmu.c prev = local64_read(&event->hw.prev_count); event 253 drivers/perf/qcom_l3_pmu.c } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev); event 255 drivers/perf/qcom_l3_pmu.c local64_add(new - prev, &event->count); event 274 drivers/perf/qcom_l3_pmu.c static void qcom_l3_cache__32bit_counter_start(struct perf_event *event) event 276 drivers/perf/qcom_l3_pmu.c struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); event 277 drivers/perf/qcom_l3_pmu.c int idx = event->hw.idx; event 278 drivers/perf/qcom_l3_pmu.c u32 evsel = get_event_type(event); event 285 drivers/perf/qcom_l3_pmu.c local64_set(&event->hw.prev_count, 0); event 299 drivers/perf/qcom_l3_pmu.c static void qcom_l3_cache__32bit_counter_stop(struct perf_event *event, event 302 drivers/perf/qcom_l3_pmu.c struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); event 303 drivers/perf/qcom_l3_pmu.c int idx = event->hw.idx; event 316 drivers/perf/qcom_l3_pmu.c static void qcom_l3_cache__32bit_counter_update(struct perf_event *event) event 318 drivers/perf/qcom_l3_pmu.c struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); event 319 drivers/perf/qcom_l3_pmu.c int idx = event->hw.idx; event 323 drivers/perf/qcom_l3_pmu.c prev = local64_read(&event->hw.prev_count); event 325 drivers/perf/qcom_l3_pmu.c } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev); event 327 drivers/perf/qcom_l3_pmu.c local64_add(new - prev, &event->count); event 338 drivers/perf/qcom_l3_pmu.c const struct l3cache_event_ops *l3cache_event_get_ops(struct perf_event *event) event 340 drivers/perf/qcom_l3_pmu.c if (event_uses_long_counter(event)) event 402 drivers/perf/qcom_l3_pmu.c struct perf_event *event; event 405 drivers/perf/qcom_l3_pmu.c event = l3pmu->events[idx]; event 406 drivers/perf/qcom_l3_pmu.c if (!event) event 415 drivers/perf/qcom_l3_pmu.c ops = l3cache_event_get_ops(event); event 416 drivers/perf/qcom_l3_pmu.c ops->update(event); event 451 drivers/perf/qcom_l3_pmu.c static bool qcom_l3_cache__validate_event_group(struct perf_event *event) event 453 drivers/perf/qcom_l3_pmu.c struct perf_event *leader = event->group_leader; event 457 drivers/perf/qcom_l3_pmu.c if (leader->pmu != event->pmu && !is_software_event(leader)) event 460 drivers/perf/qcom_l3_pmu.c counters = event_num_counters(event); event 466 drivers/perf/qcom_l3_pmu.c if (sibling->pmu != event->pmu) event 478 drivers/perf/qcom_l3_pmu.c static int qcom_l3_cache__event_init(struct perf_event *event) event 480 drivers/perf/qcom_l3_pmu.c struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); event 481 drivers/perf/qcom_l3_pmu.c struct hw_perf_event *hwc = &event->hw; event 486 drivers/perf/qcom_l3_pmu.c if (event->attr.type != event->pmu->type) event 499 drivers/perf/qcom_l3_pmu.c if (event->cpu < 0) event 503 drivers/perf/qcom_l3_pmu.c if (!qcom_l3_cache__validate_event_group(event)) event 519 drivers/perf/qcom_l3_pmu.c event->cpu = cpumask_first(&l3pmu->cpumask); event 524 drivers/perf/qcom_l3_pmu.c static void qcom_l3_cache__event_start(struct perf_event *event, int flags) event 526 drivers/perf/qcom_l3_pmu.c struct hw_perf_event *hwc = &event->hw; event 527 drivers/perf/qcom_l3_pmu.c const struct l3cache_event_ops *ops = l3cache_event_get_ops(event); event 530 drivers/perf/qcom_l3_pmu.c ops->start(event); event 533 drivers/perf/qcom_l3_pmu.c static void qcom_l3_cache__event_stop(struct perf_event *event, int flags) event 535 drivers/perf/qcom_l3_pmu.c struct hw_perf_event *hwc = &event->hw; event 536 drivers/perf/qcom_l3_pmu.c const struct l3cache_event_ops *ops = l3cache_event_get_ops(event); event 541 drivers/perf/qcom_l3_pmu.c ops->stop(event, flags); event 543 drivers/perf/qcom_l3_pmu.c ops->update(event); event 547 drivers/perf/qcom_l3_pmu.c static int qcom_l3_cache__event_add(struct perf_event *event, int flags) event 549 drivers/perf/qcom_l3_pmu.c struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); event 550 drivers/perf/qcom_l3_pmu.c struct hw_perf_event *hwc = &event->hw; event 551 drivers/perf/qcom_l3_pmu.c int order = event_uses_long_counter(event) ? 1 : 0; event 564 drivers/perf/qcom_l3_pmu.c l3pmu->events[idx] = event; event 567 drivers/perf/qcom_l3_pmu.c qcom_l3_cache__event_start(event, 0); event 570 drivers/perf/qcom_l3_pmu.c perf_event_update_userpage(event); event 575 drivers/perf/qcom_l3_pmu.c static void qcom_l3_cache__event_del(struct perf_event *event, int flags) event 577 drivers/perf/qcom_l3_pmu.c struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); event 578 drivers/perf/qcom_l3_pmu.c struct hw_perf_event *hwc = &event->hw; event 579 drivers/perf/qcom_l3_pmu.c int order = event_uses_long_counter(event) ? 1 : 0; event 582 drivers/perf/qcom_l3_pmu.c qcom_l3_cache__event_stop(event, flags | PERF_EF_UPDATE); event 587 drivers/perf/qcom_l3_pmu.c perf_event_update_userpage(event); event 590 drivers/perf/qcom_l3_pmu.c static void qcom_l3_cache__event_read(struct perf_event *event) event 592 drivers/perf/qcom_l3_pmu.c const struct l3cache_event_ops *ops = l3cache_event_get_ops(event); event 594 drivers/perf/qcom_l3_pmu.c ops->update(event); event 628 drivers/perf/qcom_l3_pmu.c L3CACHE_PMU_FORMAT_ATTR(event, "config:0-7"), event 82 drivers/perf/thunderx2_pmu.c void (*init_cntr_base)(struct perf_event *event, event 84 drivers/perf/thunderx2_pmu.c void (*stop_event)(struct perf_event *event); event 85 drivers/perf/thunderx2_pmu.c void (*start_event)(struct perf_event *event, int flags); event 95 drivers/perf/thunderx2_pmu.c PMU_FORMAT_ATTR(event, "config:0-4"); event 244 drivers/perf/thunderx2_pmu.c static void init_cntr_base_l3c(struct perf_event *event, event 247 drivers/perf/thunderx2_pmu.c struct hw_perf_event *hwc = &event->hw; event 251 drivers/perf/thunderx2_pmu.c + L3C_COUNTER_CTL + (8 * GET_COUNTERID(event)); event 253 drivers/perf/thunderx2_pmu.c + L3C_COUNTER_DATA + (8 * GET_COUNTERID(event)); event 256 drivers/perf/thunderx2_pmu.c static void init_cntr_base_dmc(struct perf_event *event, event 259 drivers/perf/thunderx2_pmu.c struct hw_perf_event *hwc = &event->hw; event 265 drivers/perf/thunderx2_pmu.c + DMC_COUNTER_DATA + (0xc * GET_COUNTERID(event)); event 268 drivers/perf/thunderx2_pmu.c static void uncore_start_event_l3c(struct perf_event *event, int flags) event 271 drivers/perf/thunderx2_pmu.c struct hw_perf_event *hwc = &event->hw; event 274 drivers/perf/thunderx2_pmu.c val = GET_EVENTID(event) << 3; event 280 drivers/perf/thunderx2_pmu.c static inline void uncore_stop_event_l3c(struct perf_event *event) event 282 drivers/perf/thunderx2_pmu.c reg_writel(0, event->hw.config_base); event 285 drivers/perf/thunderx2_pmu.c static void uncore_start_event_dmc(struct perf_event *event, int flags) event 288 drivers/perf/thunderx2_pmu.c struct hw_perf_event *hwc = &event->hw; event 289 drivers/perf/thunderx2_pmu.c int idx = GET_COUNTERID(event); event 290 drivers/perf/thunderx2_pmu.c int event_id = GET_EVENTID(event); event 303 drivers/perf/thunderx2_pmu.c static void uncore_stop_event_dmc(struct perf_event *event) event 306 drivers/perf/thunderx2_pmu.c struct hw_perf_event *hwc = &event->hw; event 307 drivers/perf/thunderx2_pmu.c int idx = GET_COUNTERID(event); event 315 drivers/perf/thunderx2_pmu.c static void tx2_uncore_event_update(struct perf_event *event) event 318 drivers/perf/thunderx2_pmu.c struct hw_perf_event *hwc = &event->hw; event 323 drivers/perf/thunderx2_pmu.c tx2_pmu = pmu_to_tx2_pmu(event->pmu); event 335 drivers/perf/thunderx2_pmu.c GET_EVENTID(event) == DMC_EVENT_DATA_TRANSFERS) event 342 drivers/perf/thunderx2_pmu.c local64_add(delta * prorate_factor, &event->count); event 367 drivers/perf/thunderx2_pmu.c struct perf_event *event, int *counters) event 369 drivers/perf/thunderx2_pmu.c if (is_software_event(event)) event 372 drivers/perf/thunderx2_pmu.c if (event->pmu != pmu) event 383 drivers/perf/thunderx2_pmu.c static bool tx2_uncore_validate_event_group(struct perf_event *event) event 385 drivers/perf/thunderx2_pmu.c struct perf_event *sibling, *leader = event->group_leader; event 388 drivers/perf/thunderx2_pmu.c if (event->group_leader == event) event 391 drivers/perf/thunderx2_pmu.c if (!tx2_uncore_validate_event(event->pmu, leader, &counters)) event 395 drivers/perf/thunderx2_pmu.c if (!tx2_uncore_validate_event(event->pmu, sibling, &counters)) event 399 drivers/perf/thunderx2_pmu.c if (!tx2_uncore_validate_event(event->pmu, event, &counters)) event 410 drivers/perf/thunderx2_pmu.c static int tx2_uncore_event_init(struct perf_event *event) event 412 drivers/perf/thunderx2_pmu.c struct hw_perf_event *hwc = &event->hw; event 416 drivers/perf/thunderx2_pmu.c if (event->attr.type != event->pmu->type) event 424 drivers/perf/thunderx2_pmu.c if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) event 427 drivers/perf/thunderx2_pmu.c if (event->cpu < 0) event 430 drivers/perf/thunderx2_pmu.c tx2_pmu = pmu_to_tx2_pmu(event->pmu); event 433 drivers/perf/thunderx2_pmu.c event->cpu = tx2_pmu->cpu; event 435 drivers/perf/thunderx2_pmu.c if (event->attr.config >= tx2_pmu->max_events) event 439 drivers/perf/thunderx2_pmu.c hwc->config = event->attr.config; event 442 drivers/perf/thunderx2_pmu.c if (!tx2_uncore_validate_event_group(event)) event 448 drivers/perf/thunderx2_pmu.c static void tx2_uncore_event_start(struct perf_event *event, int flags) event 450 drivers/perf/thunderx2_pmu.c struct hw_perf_event *hwc = &event->hw; event 454 drivers/perf/thunderx2_pmu.c tx2_pmu = pmu_to_tx2_pmu(event->pmu); event 456 drivers/perf/thunderx2_pmu.c tx2_pmu->start_event(event, flags); event 457 drivers/perf/thunderx2_pmu.c perf_event_update_userpage(event); event 468 drivers/perf/thunderx2_pmu.c static void tx2_uncore_event_stop(struct perf_event *event, int flags) event 470 drivers/perf/thunderx2_pmu.c struct hw_perf_event *hwc = &event->hw; event 476 drivers/perf/thunderx2_pmu.c tx2_pmu = pmu_to_tx2_pmu(event->pmu); event 477 drivers/perf/thunderx2_pmu.c tx2_pmu->stop_event(event); event 481 drivers/perf/thunderx2_pmu.c tx2_uncore_event_update(event); event 486 drivers/perf/thunderx2_pmu.c static int tx2_uncore_event_add(struct perf_event *event, int flags) event 488 drivers/perf/thunderx2_pmu.c struct hw_perf_event *hwc = &event->hw; event 491 drivers/perf/thunderx2_pmu.c tx2_pmu = pmu_to_tx2_pmu(event->pmu); event 498 drivers/perf/thunderx2_pmu.c tx2_pmu->events[hwc->idx] = event; event 500 drivers/perf/thunderx2_pmu.c tx2_pmu->init_cntr_base(event, tx2_pmu); event 504 drivers/perf/thunderx2_pmu.c tx2_uncore_event_start(event, flags); event 509 drivers/perf/thunderx2_pmu.c static void tx2_uncore_event_del(struct perf_event *event, int flags) event 511 drivers/perf/thunderx2_pmu.c struct tx2_uncore_pmu *tx2_pmu = pmu_to_tx2_pmu(event->pmu); event 512 drivers/perf/thunderx2_pmu.c struct hw_perf_event *hwc = &event->hw; event 514 drivers/perf/thunderx2_pmu.c tx2_uncore_event_stop(event, PERF_EF_UPDATE); event 517 drivers/perf/thunderx2_pmu.c free_counter(tx2_pmu, GET_COUNTERID(event)); event 519 drivers/perf/thunderx2_pmu.c perf_event_update_userpage(event); event 524 drivers/perf/thunderx2_pmu.c static void tx2_uncore_event_read(struct perf_event *event) event 526 drivers/perf/thunderx2_pmu.c tx2_uncore_event_update(event); event 541 drivers/perf/thunderx2_pmu.c struct perf_event *event = tx2_pmu->events[idx]; event 543 drivers/perf/thunderx2_pmu.c tx2_uncore_event_update(event); event 890 drivers/perf/xgene_pmu.c static int xgene_perf_event_init(struct perf_event *event) event 892 drivers/perf/xgene_pmu.c struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); event 893 drivers/perf/xgene_pmu.c struct hw_perf_event *hw = &event->hw; event 897 drivers/perf/xgene_pmu.c if (event->attr.type != event->pmu->type) event 905 drivers/perf/xgene_pmu.c if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) event 908 drivers/perf/xgene_pmu.c if (event->cpu < 0) event 919 drivers/perf/xgene_pmu.c event->cpu = cpumask_first(&pmu_dev->parent->cpu); event 921 drivers/perf/xgene_pmu.c hw->config = event->attr.config; event 928 drivers/perf/xgene_pmu.c hw->config_base = event->attr.config1; event 934 drivers/perf/xgene_pmu.c if (event->group_leader->pmu != event->pmu && event 935 drivers/perf/xgene_pmu.c !is_software_event(event->group_leader)) event 938 drivers/perf/xgene_pmu.c for_each_sibling_event(sibling, event->group_leader) { event 939 drivers/perf/xgene_pmu.c if (sibling->pmu != event->pmu && event 947 drivers/perf/xgene_pmu.c static void xgene_perf_enable_event(struct perf_event *event) event 949 drivers/perf/xgene_pmu.c struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); event 952 drivers/perf/xgene_pmu.c xgene_pmu->ops->write_evttype(pmu_dev, GET_CNTR(event), event 953 drivers/perf/xgene_pmu.c GET_EVENTID(event)); event 954 drivers/perf/xgene_pmu.c xgene_pmu->ops->write_agentmsk(pmu_dev, ~((u32)GET_AGENTID(event))); event 957 drivers/perf/xgene_pmu.c ~((u32)GET_AGENT1ID(event))); event 959 drivers/perf/xgene_pmu.c xgene_pmu->ops->enable_counter(pmu_dev, GET_CNTR(event)); event 960 drivers/perf/xgene_pmu.c xgene_pmu->ops->enable_counter_int(pmu_dev, GET_CNTR(event)); event 963 drivers/perf/xgene_pmu.c static void xgene_perf_disable_event(struct perf_event *event) event 965 drivers/perf/xgene_pmu.c struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); event 968 drivers/perf/xgene_pmu.c xgene_pmu->ops->disable_counter(pmu_dev, GET_CNTR(event)); event 969 drivers/perf/xgene_pmu.c xgene_pmu->ops->disable_counter_int(pmu_dev, GET_CNTR(event)); event 972 drivers/perf/xgene_pmu.c static void xgene_perf_event_set_period(struct perf_event *event) event 974 drivers/perf/xgene_pmu.c struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); event 976 drivers/perf/xgene_pmu.c struct hw_perf_event *hw = &event->hw; event 990 drivers/perf/xgene_pmu.c static void xgene_perf_event_update(struct perf_event *event) event 992 drivers/perf/xgene_pmu.c struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); event 994 drivers/perf/xgene_pmu.c struct hw_perf_event *hw = &event->hw; event 999 drivers/perf/xgene_pmu.c new_raw_count = xgene_pmu->ops->read_counter(pmu_dev, GET_CNTR(event)); event 1007 drivers/perf/xgene_pmu.c local64_add(delta, &event->count); event 1010 drivers/perf/xgene_pmu.c static void xgene_perf_read(struct perf_event *event) event 1012 drivers/perf/xgene_pmu.c xgene_perf_event_update(event); event 1015 drivers/perf/xgene_pmu.c static void xgene_perf_start(struct perf_event *event, int flags) event 1017 drivers/perf/xgene_pmu.c struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); event 1019 drivers/perf/xgene_pmu.c struct hw_perf_event *hw = &event->hw; event 1027 drivers/perf/xgene_pmu.c xgene_perf_event_set_period(event); event 1032 drivers/perf/xgene_pmu.c xgene_pmu->ops->write_counter(pmu_dev, GET_CNTR(event), event 1036 drivers/perf/xgene_pmu.c xgene_perf_enable_event(event); event 1037 drivers/perf/xgene_pmu.c perf_event_update_userpage(event); event 1040 drivers/perf/xgene_pmu.c static void xgene_perf_stop(struct perf_event *event, int flags) event 1042 drivers/perf/xgene_pmu.c struct hw_perf_event *hw = &event->hw; event 1047 drivers/perf/xgene_pmu.c xgene_perf_disable_event(event); event 1054 drivers/perf/xgene_pmu.c xgene_perf_read(event); event 1058 drivers/perf/xgene_pmu.c static int xgene_perf_add(struct perf_event *event, int flags) event 1060 drivers/perf/xgene_pmu.c struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); event 1061 drivers/perf/xgene_pmu.c struct hw_perf_event *hw = &event->hw; event 1071 drivers/perf/xgene_pmu.c pmu_dev->pmu_counter_event[hw->idx] = event; event 1074 drivers/perf/xgene_pmu.c xgene_perf_start(event, PERF_EF_RELOAD); event 1079 drivers/perf/xgene_pmu.c static void xgene_perf_del(struct perf_event *event, int flags) event 1081 drivers/perf/xgene_pmu.c struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); event 1082 drivers/perf/xgene_pmu.c struct hw_perf_event *hw = &event->hw; event 1084 drivers/perf/xgene_pmu.c xgene_perf_stop(event, PERF_EF_UPDATE); event 1087 drivers/perf/xgene_pmu.c clear_avail_cntr(pmu_dev, GET_CNTR(event)); event 1089 drivers/perf/xgene_pmu.c perf_event_update_userpage(event); event 1218 drivers/perf/xgene_pmu.c struct perf_event *event = pmu_dev->pmu_counter_event[idx]; event 1222 drivers/perf/xgene_pmu.c if (!event || !overflowed) event 1224 drivers/perf/xgene_pmu.c xgene_perf_event_update(event); event 1225 drivers/perf/xgene_pmu.c xgene_perf_event_set_period(event); event 91 drivers/phy/qualcomm/phy-qcom-usb-hs.c qcom_usb_hs_phy_vbus_notifier(struct notifier_block *nb, unsigned long event, event 99 drivers/phy/qualcomm/phy-qcom-usb-hs.c if (event) event 969 drivers/phy/rockchip/phy-rockchip-inno-usb2.c unsigned long event, void *ptr) event 148 drivers/pinctrl/bcm/pinctrl-nsp-gpio.c unsigned int event, level; event 151 drivers/pinctrl/bcm/pinctrl-nsp-gpio.c event = readl(chip->base + NSP_GPIO_EVENT_INT_MASK) & event 156 drivers/pinctrl/bcm/pinctrl-nsp-gpio.c int_bits = level | event; event 48 drivers/platform/chrome/chromeos_tbmc.c static void chromeos_tbmc_notify(struct acpi_device *adev, u32 event) event 51 drivers/platform/chrome/chromeos_tbmc.c switch (event) { event 56 drivers/platform/chrome/chromeos_tbmc.c dev_err(&adev->dev, "Unexpected event: 0x%08X\n", event); event 100 drivers/platform/chrome/cros_ec_chardev.c struct ec_event *event; event 102 drivers/platform/chrome/cros_ec_chardev.c int total_size = sizeof(*event) + ec_dev->event_size; event 108 drivers/platform/chrome/cros_ec_chardev.c event = kzalloc(total_size, GFP_KERNEL); event 109 drivers/platform/chrome/cros_ec_chardev.c if (!event) event 112 drivers/platform/chrome/cros_ec_chardev.c event->size = ec_dev->event_size; event 113 drivers/platform/chrome/cros_ec_chardev.c event->event_type = ec_dev->event_data.event_type; event 114 drivers/platform/chrome/cros_ec_chardev.c memcpy(event->data, &ec_dev->event_data.data, ec_dev->event_size); event 117 drivers/platform/chrome/cros_ec_chardev.c list_add_tail(&event->node, &priv->events); event 128 drivers/platform/chrome/cros_ec_chardev.c struct ec_event *event; event 133 drivers/platform/chrome/cros_ec_chardev.c event = ERR_PTR(-EWOULDBLOCK); event 138 drivers/platform/chrome/cros_ec_chardev.c event = NULL; event 145 drivers/platform/chrome/cros_ec_chardev.c event = ERR_PTR(err); event 149 drivers/platform/chrome/cros_ec_chardev.c event = list_first_entry(&priv->events, struct ec_event, node); event 150 drivers/platform/chrome/cros_ec_chardev.c list_del(&event->node); event 151 drivers/platform/chrome/cros_ec_chardev.c priv->event_len -= sizeof(*event) + event->size; event 155 drivers/platform/chrome/cros_ec_chardev.c return event; event 212 drivers/platform/chrome/cros_ec_chardev.c struct ec_event *event; event 214 drivers/platform/chrome/cros_ec_chardev.c event = cros_ec_chardev_fetch_event(priv, length != 0, event 216 drivers/platform/chrome/cros_ec_chardev.c if (IS_ERR(event)) event 217 drivers/platform/chrome/cros_ec_chardev.c return PTR_ERR(event); event 226 drivers/platform/chrome/cros_ec_chardev.c count = min(length, event->size + 1); event 227 drivers/platform/chrome/cros_ec_chardev.c ret = copy_to_user(buffer, &event->event_type, count); event 228 drivers/platform/chrome/cros_ec_chardev.c kfree(event); event 258 drivers/platform/chrome/cros_ec_chardev.c struct ec_event *event, *e; event 263 drivers/platform/chrome/cros_ec_chardev.c list_for_each_entry_safe(event, e, &priv->events, node) { event 264 drivers/platform/chrome/cros_ec_chardev.c list_del(&event->node); event 265 drivers/platform/chrome/cros_ec_chardev.c kfree(event); event 82 drivers/platform/chrome/wilco_ec/event.c u16 event[0]; event 164 drivers/platform/chrome/wilco_ec/event.c struct ec_event *event; event 166 drivers/platform/chrome/wilco_ec/event.c while ((event = event_queue_pop(q)) != NULL) event 167 drivers/platform/chrome/wilco_ec/event.c kfree(event); event 216 drivers/platform/chrome/wilco_ec/event.c struct ec_event *event, *queue_event, *old_event; event 221 drivers/platform/chrome/wilco_ec/event.c event = (struct ec_event *)(buf + offset); event 223 drivers/platform/chrome/wilco_ec/event.c num_words = ec_event_num_words(event); event 224 drivers/platform/chrome/wilco_ec/event.c event_size = ec_event_size(event); event 242 drivers/platform/chrome/wilco_ec/event.c queue_event = kmemdup(event, event_size, GFP_KERNEL); event 357 drivers/platform/chrome/wilco_ec/event.c struct ec_event *event; event 382 drivers/platform/chrome/wilco_ec/event.c event = event_queue_pop(dev_data->events); event 384 drivers/platform/chrome/wilco_ec/event.c n_bytes_written = ec_event_size(event); event 385 drivers/platform/chrome/wilco_ec/event.c if (copy_to_user(buf, event, n_bytes_written)) event 387 drivers/platform/chrome/wilco_ec/event.c kfree(event); event 21 drivers/platform/x86/acer-wireless.c static void acer_wireless_notify(struct acpi_device *adev, u32 event) event 25 drivers/platform/x86/acer-wireless.c dev_dbg(&adev->dev, "event=%#x\n", event); event 26 drivers/platform/x86/acer-wireless.c if (event != 0x80) { event 27 drivers/platform/x86/acer-wireless.c dev_notice(&adev->dev, "Unknown SMKB event: %#x\n", event); event 1474 drivers/platform/x86/asus-laptop.c static void asus_input_notify(struct asus_laptop *asus, int event) event 1478 drivers/platform/x86/asus-laptop.c if (!sparse_keymap_report_event(asus->inputdev, event, 1, true)) event 1479 drivers/platform/x86/asus-laptop.c pr_info("Unknown key %x pressed\n", event); event 1525 drivers/platform/x86/asus-laptop.c static void asus_acpi_notify(struct acpi_device *device, u32 event) event 1531 drivers/platform/x86/asus-laptop.c count = asus->event_count[event % 128]++; event 1533 drivers/platform/x86/asus-laptop.c dev_name(&asus->device->dev), event, event 1536 drivers/platform/x86/asus-laptop.c if (event >= ATKD_BRNUP_MIN && event <= ATKD_BRNUP_MAX) event 1537 drivers/platform/x86/asus-laptop.c event = ATKD_BRNUP; event 1538 drivers/platform/x86/asus-laptop.c else if (event >= ATKD_BRNDOWN_MIN && event 1539 drivers/platform/x86/asus-laptop.c event <= ATKD_BRNDOWN_MAX) event 1540 drivers/platform/x86/asus-laptop.c event = ATKD_BRNDOWN; event 1543 drivers/platform/x86/asus-laptop.c if (event == ATKD_BRNDOWN || event == ATKD_BRNUP) { event 1552 drivers/platform/x86/asus-laptop.c if (asus->pega_accel_poll && event == 0xEA) { event 1558 drivers/platform/x86/asus-laptop.c asus_input_notify(asus, event); event 111 drivers/platform/x86/asus-wireless.c static void asus_wireless_notify(struct acpi_device *adev, u32 event) event 115 drivers/platform/x86/asus-wireless.c dev_dbg(&adev->dev, "event=%#x\n", event); event 116 drivers/platform/x86/asus-wireless.c if (event != 0x88) { event 117 drivers/platform/x86/asus-wireless.c dev_notice(&adev->dev, "Unknown ASHS event: %#x\n", event); event 836 drivers/platform/x86/asus-wmi.c static void asus_rfkill_notify(acpi_handle handle, u32 event, void *data) event 840 drivers/platform/x86/asus-wmi.c if (event != ACPI_NOTIFY_BUS_CHECK) event 183 drivers/platform/x86/classmate-laptop.c static void cmpc_accel_handler_v4(struct acpi_device *dev, u32 event) event 185 drivers/platform/x86/classmate-laptop.c if (event == 0x81) { event 526 drivers/platform/x86/classmate-laptop.c static void cmpc_accel_handler(struct acpi_device *dev, u32 event) event 528 drivers/platform/x86/classmate-laptop.c if (event == 0x81) { event 696 drivers/platform/x86/classmate-laptop.c static void cmpc_tablet_handler(struct acpi_device *dev, u32 event) event 701 drivers/platform/x86/classmate-laptop.c if (event == 0x81) { event 1029 drivers/platform/x86/classmate-laptop.c static void cmpc_keys_handler(struct acpi_device *dev, u32 event) event 1034 drivers/platform/x86/classmate-laptop.c if ((event & 0x0F) < ARRAY_SIZE(cmpc_keys_codes)) event 1035 drivers/platform/x86/classmate-laptop.c code = cmpc_keys_codes[event & 0x0F]; event 1037 drivers/platform/x86/classmate-laptop.c input_report_key(inputdev, code, !(event & 0x10)); event 210 drivers/platform/x86/dell-rbtn.c static void rbtn_notify(struct acpi_device *device, u32 event); event 450 drivers/platform/x86/dell-rbtn.c static void rbtn_notify(struct acpi_device *device, u32 event) event 463 drivers/platform/x86/dell-rbtn.c if (event != 0x80) { event 465 drivers/platform/x86/dell-rbtn.c event); event 475 drivers/platform/x86/dell-rbtn.c atomic_notifier_call_chain(&rbtn_chain_head, event, device); event 28 drivers/platform/x86/dell-wmi-aio.c u16 event[]; event 61 drivers/platform/x86/dell-wmi-aio.c struct dell_wmi_event *event = (struct dell_wmi_event *)buffer; event 63 drivers/platform/x86/dell-wmi-aio.c if (event == NULL || length < 6) event 66 drivers/platform/x86/dell-wmi-aio.c if ((event->type == 0 || event->type == 0xf) && event 67 drivers/platform/x86/dell-wmi-aio.c event->length >= 2) event 77 drivers/platform/x86/dell-wmi-aio.c struct dell_wmi_event *event; event 100 drivers/platform/x86/dell-wmi-aio.c event = (struct dell_wmi_event *) event 102 drivers/platform/x86/dell-wmi-aio.c scancode = event->event[0]; event 645 drivers/platform/x86/eeepc-laptop.c static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data) event 649 drivers/platform/x86/eeepc-laptop.c if (event != ACPI_NOTIFY_BUS_CHECK) event 1201 drivers/platform/x86/eeepc-laptop.c static void eeepc_input_notify(struct eeepc_laptop *eeepc, int event) event 1205 drivers/platform/x86/eeepc-laptop.c if (!sparse_keymap_report_event(eeepc->inputdev, event, 1, true)) event 1206 drivers/platform/x86/eeepc-laptop.c pr_info("Unknown key %x pressed\n", event); event 1209 drivers/platform/x86/eeepc-laptop.c static void eeepc_acpi_notify(struct acpi_device *device, u32 event) event 1215 drivers/platform/x86/eeepc-laptop.c if (event > ACPI_MAX_SYS_NOTIFY) event 1217 drivers/platform/x86/eeepc-laptop.c count = eeepc->event_count[event % 128]++; event 1219 drivers/platform/x86/eeepc-laptop.c dev_name(&device->dev), event, event 1223 drivers/platform/x86/eeepc-laptop.c if (event < NOTIFY_BRN_MIN || event > NOTIFY_BRN_MAX) { event 1224 drivers/platform/x86/eeepc-laptop.c eeepc_input_notify(eeepc, event); event 1236 drivers/platform/x86/eeepc-laptop.c new_brightness = event - NOTIFY_BRN_MIN; event 1239 drivers/platform/x86/eeepc-laptop.c event = NOTIFY_BRN_MIN; /* brightness down */ event 1241 drivers/platform/x86/eeepc-laptop.c event = NOTIFY_BRN_MAX; /* brightness up */ event 1248 drivers/platform/x86/eeepc-laptop.c eeepc_input_notify(eeepc, event); event 418 drivers/platform/x86/fujitsu-laptop.c static void acpi_fujitsu_bl_notify(struct acpi_device *device, u32 event) event 423 drivers/platform/x86/fujitsu-laptop.c if (event != ACPI_FUJITSU_NOTIFY_CODE) { event 425 drivers/platform/x86/fujitsu-laptop.c event); event 894 drivers/platform/x86/fujitsu-laptop.c static void acpi_fujitsu_laptop_notify(struct acpi_device *device, u32 event) event 901 drivers/platform/x86/fujitsu-laptop.c if (event != ACPI_FUJITSU_NOTIFY_CODE) { event 903 drivers/platform/x86/fujitsu-laptop.c event); event 59 drivers/platform/x86/hp-wireless.c static void hpwl_notify(struct acpi_device *acpi_dev, u32 event) event 61 drivers/platform/x86/hp-wireless.c if (event != 0x80) { event 62 drivers/platform/x86/hp-wireless.c pr_info("Received unknown event (0x%x)\n", event); event 883 drivers/platform/x86/ideapad-laptop.c static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data) event 341 drivers/platform/x86/intel-hid.c static void notify_handler(acpi_handle handle, u32 event, void *context) event 354 drivers/platform/x86/intel-hid.c if (event == 0xce) event 358 drivers/platform/x86/intel-hid.c if (event == 0xc0 || !priv->array) event 361 drivers/platform/x86/intel-hid.c if (!sparse_keymap_entry_from_scancode(priv->array, event)) { event 362 drivers/platform/x86/intel-hid.c dev_info(&device->dev, "unknown event 0x%x\n", event); event 379 drivers/platform/x86/intel-hid.c if (event == 0xce) { event 385 drivers/platform/x86/intel-hid.c if (event == 0xcf) { event 393 drivers/platform/x86/intel-hid.c if (event != 0xc0) { event 395 drivers/platform/x86/intel-hid.c !sparse_keymap_report_event(priv->array, event, 1, true)) event 396 drivers/platform/x86/intel-hid.c dev_dbg(&device->dev, "unknown event 0x%x\n", event); event 74 drivers/platform/x86/intel-vbtn.c static void notify_handler(acpi_handle handle, u32 event, void *context) event 78 drivers/platform/x86/intel-vbtn.c unsigned int val = !(event & 1); /* Even=press, Odd=release */ event 83 drivers/platform/x86/intel-vbtn.c ke = sparse_keymap_entry_from_scancode(priv->input_dev, event); event 94 drivers/platform/x86/intel-vbtn.c event, event 106 drivers/platform/x86/intel-vbtn.c ke_rel = sparse_keymap_entry_from_scancode(priv->input_dev, event | 1); event 109 drivers/platform/x86/intel-vbtn.c if (sparse_keymap_report_event(priv->input_dev, event, val, autorelease)) event 113 drivers/platform/x86/intel-vbtn.c dev_dbg(&device->dev, "unknown event index 0x%x\n", event); event 883 drivers/platform/x86/intel_telemetry_debugfs.c unsigned long event, void *ptr) event 885 drivers/platform/x86/intel_telemetry_debugfs.c switch (event) { event 262 drivers/platform/x86/lg-laptop.c static void acpi_notify(struct acpi_device *device, u32 event) event 266 drivers/platform/x86/lg-laptop.c acpi_handle_debug(device->handle, "notify: %d\n", event); event 165 drivers/platform/x86/panasonic-laptop.c static void acpi_pcc_hotkey_notify(struct acpi_device *device, u32 event); event 465 drivers/platform/x86/panasonic-laptop.c static void acpi_pcc_hotkey_notify(struct acpi_device *device, u32 event) event 469 drivers/platform/x86/panasonic-laptop.c switch (event) { event 374 drivers/platform/x86/sony-laptop.c static void sony_laptop_report_input_event(u8 event) event 381 drivers/platform/x86/sony-laptop.c if (event == SONYPI_EVENT_FNKEY_RELEASED || event 382 drivers/platform/x86/sony-laptop.c event == SONYPI_EVENT_ANYBUTTON_RELEASED) { event 388 drivers/platform/x86/sony-laptop.c switch (event) { event 409 drivers/platform/x86/sony-laptop.c if (event >= ARRAY_SIZE(sony_laptop_input_index)) { event 410 drivers/platform/x86/sony-laptop.c dprintk("sony_laptop_report_input_event, event not known: %d\n", event); event 413 drivers/platform/x86/sony-laptop.c if ((scancode = sony_laptop_input_index[event]) != -1) { event 436 drivers/platform/x86/sony-laptop.c dprintk("unknown input event %.2x\n", event); event 1081 drivers/platform/x86/sony-laptop.c u8 event; event 1148 drivers/platform/x86/sony-laptop.c static int sony_nc_hotkeys_decode(u32 event, unsigned int handle) event 1156 drivers/platform/x86/sony-laptop.c event); event 1169 drivers/platform/x86/sony-laptop.c ret = key_event->event; event 1176 drivers/platform/x86/sony-laptop.c event, result, handle); event 1189 drivers/platform/x86/sony-laptop.c static void sony_nc_notify(struct acpi_device *device, u32 event) event 1191 drivers/platform/x86/sony-laptop.c u32 real_ev = event; event 1195 drivers/platform/x86/sony-laptop.c dprintk("sony_nc_notify, event: 0x%.2x\n", event); event 1197 drivers/platform/x86/sony-laptop.c if (event >= 0x90) { event 1201 drivers/platform/x86/sony-laptop.c unsigned int offset = event - 0x90; event 1205 drivers/platform/x86/sony-laptop.c event); event 1216 drivers/platform/x86/sony-laptop.c ret = sony_nc_hotkeys_decode(event, handle); event 1265 drivers/platform/x86/sony-laptop.c event, handle); event 3395 drivers/platform/x86/sony-laptop.c u8 event; event 4279 drivers/platform/x86/sony-laptop.c static void sonypi_compat_report_event(u8 event) event 4281 drivers/platform/x86/sony-laptop.c kfifo_in_locked(&sonypi_compat.fifo, (unsigned char *)&event, event 4282 drivers/platform/x86/sony-laptop.c sizeof(event), &sonypi_compat.fifo_lock); event 4327 drivers/platform/x86/sony-laptop.c static void sonypi_compat_report_event(u8 event) { } event 4620 drivers/platform/x86/sony-laptop.c for (j = 0; dev->event_types[i].events[j].event; j++) { event 4623 drivers/platform/x86/sony-laptop.c dev->event_types[i].events[j].event; event 76 drivers/platform/x86/surfacepro3_button.c static void surface_button_notify(struct acpi_device *device, u32 event) event 83 drivers/platform/x86/surfacepro3_button.c switch (event) { event 117 drivers/platform/x86/surfacepro3_button.c "Unsupported event [0x%x]\n", event); event 764 drivers/platform/x86/thinkpad_acpi.c static void dispatch_acpi_notify(acpi_handle handle, u32 event, void *data) event 774 drivers/platform/x86/thinkpad_acpi.c ibm->acpi->notify(ibm, event); event 4113 drivers/platform/x86/thinkpad_acpi.c static void hotkey_notify(struct ibm_struct *ibm, u32 event) event 4120 drivers/platform/x86/thinkpad_acpi.c if (event != 0x80) { event 4121 drivers/platform/x86/thinkpad_acpi.c pr_err("unknown HKEY notification event %d\n", event); event 4126 drivers/platform/x86/thinkpad_acpi.c event, 0); event 4212 drivers/platform/x86/thinkpad_acpi.c event, hkey); event 136 drivers/platform/x86/topstar-laptop.c static void topstar_input_notify(struct topstar_laptop *topstar, int event) event 138 drivers/platform/x86/topstar-laptop.c if (!sparse_keymap_report_event(topstar->input, event, 1, true)) event 139 drivers/platform/x86/topstar-laptop.c pr_info("unknown event = 0x%02x\n", event); event 235 drivers/platform/x86/topstar-laptop.c static void topstar_acpi_notify(struct acpi_device *device, u32 event) event 242 drivers/platform/x86/topstar-laptop.c if (event == 0x83 || event == 0x84) { event 243 drivers/platform/x86/topstar-laptop.c dup = &dup_evnt[event - 0x83]; event 251 drivers/platform/x86/topstar-laptop.c topstar_input_notify(topstar, event); event 3206 drivers/platform/x86/toshiba_acpi.c static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event) event 3210 drivers/platform/x86/toshiba_acpi.c switch (event) { event 3225 drivers/platform/x86/toshiba_acpi.c pr_info("Dock event received %x\n", event); event 3235 drivers/platform/x86/toshiba_acpi.c pr_info("SATA power event received %x\n", event); event 3255 drivers/platform/x86/toshiba_acpi.c pr_info("Unknown event received %x\n", event); event 3261 drivers/platform/x86/toshiba_acpi.c event, (event == 0x80) ? event 40 drivers/platform/x86/toshiba_bluetooth.c static void toshiba_bt_rfkill_notify(struct acpi_device *device, u32 event); event 207 drivers/platform/x86/toshiba_bluetooth.c static void toshiba_bt_rfkill_notify(struct acpi_device *device, u32 event) event 132 drivers/platform/x86/toshiba_haps.c static void toshiba_haps_notify(struct acpi_device *device, u32 event) event 134 drivers/platform/x86/toshiba_haps.c pr_debug("Received event: 0x%x", event); event 138 drivers/platform/x86/toshiba_haps.c event, 0); event 604 drivers/platform/x86/wmi.c acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out) event 614 drivers/platform/x86/wmi.c params[0].integer.value = event; event 620 drivers/platform/x86/wmi.c (gblock->notify_id == event)) event 1273 drivers/platform/x86/wmi.c static void acpi_wmi_notify_handler(acpi_handle handle, u32 event, event 1285 drivers/platform/x86/wmi.c (block->notify_id == event)) event 1309 drivers/platform/x86/wmi.c params[0].integer.value = event; event 1326 drivers/platform/x86/wmi.c wblock->handler(event, wblock->handler_data); event 1337 drivers/platform/x86/wmi.c event, 0); event 61 drivers/platform/x86/xo15-ebook.c static void ebook_switch_notify(struct acpi_device *device, u32 event) event 63 drivers/platform/x86/xo15-ebook.c switch (event) { event 70 drivers/platform/x86/xo15-ebook.c "Unsupported event [0x%x]\n", event)); event 393 drivers/pnp/card.c if (link->pm_state.event == state.event) event 403 drivers/pnp/card.c if (link->pm_state.event == PM_EVENT_ON) event 149 drivers/pnp/pnpacpi/core.c power_state = (state.event == PM_EVENT_ON) ? event 101 drivers/power/avs/rockchip-io-domain.c unsigned long event, event 122 drivers/power/avs/rockchip-io-domain.c if (event & REGULATOR_EVENT_PRE_VOLTAGE_CHANGE) { event 126 drivers/power/avs/rockchip-io-domain.c } else if (event & (REGULATOR_EVENT_VOLTAGE_CHANGE | event 138 drivers/power/avs/rockchip-io-domain.c if (event == REGULATOR_EVENT_PRE_VOLTAGE_CHANGE) event 143 drivers/power/avs/rockchip-io-domain.c if (ret && event == REGULATOR_EVENT_PRE_VOLTAGE_CHANGE) event 1677 drivers/power/supply/ab8500_charger.c unsigned long event, void *data) event 3157 drivers/power/supply/ab8500_charger.c unsigned long event, void *power) event 3167 drivers/power/supply/ab8500_charger.c if (event != USB_EVENT_VBUS) { event 645 drivers/power/supply/axp288_charger.c unsigned long event, void *param) event 676 drivers/power/supply/axp288_charger.c unsigned long event, void *param) event 461 drivers/power/supply/charger-manager.c static void uevent_notify(struct charger_manager *cm, const char *event) event 469 drivers/power/supply/charger-manager.c if (!strncmp(env_str, event, UEVENT_BUF_SIZE)) event 471 drivers/power/supply/charger-manager.c strncpy(env_str_save, event, UEVENT_BUF_SIZE); event 475 drivers/power/supply/charger-manager.c if (!strncmp(env_str_save, event, UEVENT_BUF_SIZE)) event 477 drivers/power/supply/charger-manager.c strncpy(env_str_save, event, UEVENT_BUF_SIZE); event 481 drivers/power/supply/charger-manager.c if (event == NULL) { event 494 drivers/power/supply/charger-manager.c if (!strncmp(env_str, event, UEVENT_BUF_SIZE)) event 498 drivers/power/supply/charger-manager.c strncpy(env_str, event, UEVENT_BUF_SIZE); event 501 drivers/power/supply/charger-manager.c dev_info(cm->dev, "%s\n", event); event 1160 drivers/power/supply/charger-manager.c unsigned long event, void *ptr) event 1169 drivers/power/supply/charger-manager.c cable->attached = event; event 398 drivers/power/supply/da9030_battery.c static int da9030_battery_event(struct notifier_block *nb, unsigned long event, event 404 drivers/power/supply/da9030_battery.c switch (event) { event 420 drivers/power/supply/lp8788-charger.c enum lp8788_charger_event event = lp8788_is_charger_detected(pchg); event 422 drivers/power/supply/lp8788-charger.c pdata->charger_event(pchg->lp, event); event 231 drivers/power/supply/pda_power.c unsigned long event, void *unused) event 233 drivers/power/supply/pda_power.c switch (event) { event 1401 drivers/power/supply/rt9455_charger.c unsigned long event, void *power) event 1426 drivers/power/supply/rt9455_charger.c dev_dbg(dev, "Received USB event %lu\n", event); event 1427 drivers/power/supply/rt9455_charger.c switch (event) { event 148 drivers/power/supply/twl4030_charger.c unsigned long event; event 653 drivers/power/supply/twl4030_charger.c switch (bci->event) { event 677 drivers/power/supply/twl4030_charger.c bci->event = val; event 69 drivers/pps/clients/pps-gpio.c static void pps_gpio_echo(struct pps_device *pps, int event, void *data) event 74 drivers/pps/clients/pps-gpio.c switch (event) { event 41 drivers/pps/kapi.c static void pps_echo_client_default(struct pps_device *pps, int event, event 45 drivers/pps/kapi.c event & PPS_CAPTUREASSERT ? "assert" : "", event 46 drivers/pps/kapi.c event & PPS_CAPTURECLEAR ? "clear" : ""); event 159 drivers/pps/kapi.c void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event, event 167 drivers/pps/kapi.c BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0); event 178 drivers/pps/kapi.c pps->info.echo(pps, event, data); event 182 drivers/pps/kapi.c if (event & pps->params.mode & PPS_CAPTUREASSERT) { event 196 drivers/pps/kapi.c if (event & pps->params.mode & PPS_CAPTURECLEAR) { event 211 drivers/pps/kapi.c pps_kc_event(pps, ts, event); event 100 drivers/pps/kc.c int event) event 106 drivers/pps/kc.c if (pps == pps_kc_hardpps_dev && event & pps_kc_hardpps_mode) event 20 drivers/pps/kc.h struct pps_event_time *ts, int event); event 29 drivers/pps/kc.h struct pps_event_time *ts, int event) {} event 412 drivers/ps3/ps3-sys-manager.c } event; event 414 drivers/ps3/ps3-sys-manager.c BUILD_BUG_ON(sizeof(event) != 16); event 416 drivers/ps3/ps3-sys-manager.c result = ps3_vuart_read(dev, &event, sizeof(event)); event 419 drivers/ps3/ps3-sys-manager.c if (event.version != 1) { event 421 drivers/ps3/ps3-sys-manager.c __func__, __LINE__, event.version); event 425 drivers/ps3/ps3-sys-manager.c switch (event.type) { event 429 drivers/ps3/ps3-sys-manager.c (event.value == PS3_SM_BUTTON_EVENT_SOFT ? "soft" event 442 drivers/ps3/ps3-sys-manager.c __func__, __LINE__, event.value); event 447 drivers/ps3/ps3-sys-manager.c (event.value == PS3_SM_BUTTON_EVENT_SOFT ? "soft" event 460 drivers/ps3/ps3-sys-manager.c __func__, __LINE__, event.value); event 464 drivers/ps3/ps3-sys-manager.c __func__, __LINE__, event.value); event 465 drivers/ps3/ps3-sys-manager.c pr_info("PS3 Thermal Alert Zone %u\n", event.value); event 469 drivers/ps3/ps3-sys-manager.c __func__, __LINE__, event.value); event 473 drivers/ps3/ps3-sys-manager.c __func__, __LINE__, event.type); event 212 drivers/ps3/ps3av.c int event; event 249 drivers/ps3/ps3av.c event = ps3av_parse_event_packet(recv_buf); event 251 drivers/ps3/ps3av.c } while (event); event 404 drivers/ptp/ptp_chardev.c struct ptp_extts_event *event; event 431 drivers/ptp/ptp_chardev.c event = kmalloc(EXTTS_BUFSIZE, GFP_KERNEL); event 432 drivers/ptp/ptp_chardev.c if (!event) { event 445 drivers/ptp/ptp_chardev.c event[i] = queue->buf[queue->head]; event 456 drivers/ptp/ptp_chardev.c if (copy_to_user(buf, event, cnt)) event 459 drivers/ptp/ptp_chardev.c kfree(event); event 312 drivers/ptp/ptp_clock.c void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event) event 316 drivers/ptp/ptp_clock.c switch (event->type) { event 322 drivers/ptp/ptp_clock.c enqueue_external_timestamp(&ptp->tsevq, event); event 332 drivers/ptp/ptp_clock.c pps_event(ptp->pps_source, &event->pps_times, event 76 drivers/ptp/ptp_ixp46x.c struct ptp_clock_event event; event 79 drivers/ptp/ptp_ixp46x.c val = __raw_readl(®s->event); event 86 drivers/ptp/ptp_ixp46x.c event.type = PTP_CLOCK_EXTTS; event 87 drivers/ptp/ptp_ixp46x.c event.index = 0; event 88 drivers/ptp/ptp_ixp46x.c event.timestamp = ((u64) hi) << 32; event 89 drivers/ptp/ptp_ixp46x.c event.timestamp |= lo; event 90 drivers/ptp/ptp_ixp46x.c event.timestamp <<= TICKS_NS_SHIFT; event 91 drivers/ptp/ptp_ixp46x.c ptp_clock_event(ixp_clock->ptp_clock, &event); event 100 drivers/ptp/ptp_ixp46x.c event.type = PTP_CLOCK_EXTTS; event 101 drivers/ptp/ptp_ixp46x.c event.index = 1; event 102 drivers/ptp/ptp_ixp46x.c event.timestamp = ((u64) hi) << 32; event 103 drivers/ptp/ptp_ixp46x.c event.timestamp |= lo; event 104 drivers/ptp/ptp_ixp46x.c event.timestamp <<= TICKS_NS_SHIFT; event 105 drivers/ptp/ptp_ixp46x.c ptp_clock_event(ixp_clock->ptp_clock, &event); event 113 drivers/ptp/ptp_ixp46x.c __raw_writel(ack, ®s->event); event 304 drivers/ptp/ptp_ixp46x.c __raw_writel(TTIPEND, &ixp_clock.regs->event); event 44 drivers/ptp/ptp_pch.c u32 event; event 353 drivers/ptp/ptp_pch.c struct ptp_clock_event event; event 356 drivers/ptp/ptp_pch.c val = ioread32(®s->event); event 363 drivers/ptp/ptp_pch.c event.type = PTP_CLOCK_EXTTS; event 364 drivers/ptp/ptp_pch.c event.index = 0; event 365 drivers/ptp/ptp_pch.c event.timestamp = ((u64) hi) << 32; event 366 drivers/ptp/ptp_pch.c event.timestamp |= lo; event 367 drivers/ptp/ptp_pch.c event.timestamp <<= TICKS_NS_SHIFT; event 368 drivers/ptp/ptp_pch.c ptp_clock_event(pch_dev->ptp_clock, &event); event 377 drivers/ptp/ptp_pch.c event.type = PTP_CLOCK_EXTTS; event 378 drivers/ptp/ptp_pch.c event.index = 1; event 379 drivers/ptp/ptp_pch.c event.timestamp = ((u64) hi) << 32; event 380 drivers/ptp/ptp_pch.c event.timestamp |= lo; event 381 drivers/ptp/ptp_pch.c event.timestamp <<= TICKS_NS_SHIFT; event 382 drivers/ptp/ptp_pch.c ptp_clock_event(pch_dev->ptp_clock, &event); event 390 drivers/ptp/ptp_pch.c iowrite32(ack, ®s->event); event 642 drivers/ptp/ptp_pch.c iowrite32(PCH_TSE_TTIPEND, &chip->regs->event); event 81 drivers/ptp/ptp_qoriq.c struct ptp_clock_event event; event 101 drivers/ptp/ptp_qoriq.c event.type = PTP_CLOCK_EXTTS; event 102 drivers/ptp/ptp_qoriq.c event.index = index; event 109 drivers/ptp/ptp_qoriq.c event.timestamp = ((u64) hi) << 32; event 110 drivers/ptp/ptp_qoriq.c event.timestamp |= lo; event 111 drivers/ptp/ptp_qoriq.c ptp_clock_event(ptp_qoriq->clock, &event); event 128 drivers/ptp/ptp_qoriq.c struct ptp_clock_event event; event 154 drivers/ptp/ptp_qoriq.c event.type = PTP_CLOCK_ALARM; event 155 drivers/ptp/ptp_qoriq.c event.index = 0; event 156 drivers/ptp/ptp_qoriq.c event.timestamp = ptp_qoriq->alarm_value; event 157 drivers/ptp/ptp_qoriq.c ptp_clock_event(ptp_qoriq->clock, &event); event 179 drivers/ptp/ptp_qoriq.c event.type = PTP_CLOCK_PPS; event 180 drivers/ptp/ptp_qoriq.c ptp_clock_event(ptp_qoriq->clock, &event); event 67 drivers/ptp/ptp_sysfs.c struct ptp_extts_event event; event 72 drivers/ptp/ptp_sysfs.c memset(&event, 0, sizeof(event)); event 80 drivers/ptp/ptp_sysfs.c event = queue->buf[queue->head]; event 89 drivers/ptp/ptp_sysfs.c event.index, event.t.sec, event.t.nsec); event 283 drivers/pwm/pwm-lpc18xx-sct.c unsigned long event; event 285 drivers/pwm/pwm-lpc18xx-sct.c event = find_first_zero_bit(&lpc18xx_pwm->event_map, event 288 drivers/pwm/pwm-lpc18xx-sct.c if (event >= LPC18XX_PWM_EVENT_MAX) { event 294 drivers/pwm/pwm-lpc18xx-sct.c set_bit(event, &lpc18xx_pwm->event_map); event 295 drivers/pwm/pwm-lpc18xx-sct.c lpc18xx_data->duty_event = event; event 218 drivers/pwm/pwm-sifive.c unsigned long event, void *data) event 224 drivers/pwm/pwm-sifive.c if (event == POST_RATE_CHANGE) event 1375 drivers/rapidio/devices/rio_mport_cdev.c struct rio_event *event) event 1379 drivers/rapidio/devices/rio_mport_cdev.c if (!(priv->event_mask & event->header)) event 1383 drivers/rapidio/devices/rio_mport_cdev.c overflow = kfifo_avail(&priv->event_fifo) < sizeof(*event) event 1384 drivers/rapidio/devices/rio_mport_cdev.c || kfifo_in(&priv->event_fifo, (unsigned char *)event, event 1385 drivers/rapidio/devices/rio_mport_cdev.c sizeof(*event)) != sizeof(*event); event 1404 drivers/rapidio/devices/rio_mport_cdev.c struct rio_event event; event 1407 drivers/rapidio/devices/rio_mport_cdev.c event.header = RIO_DOORBELL; event 1408 drivers/rapidio/devices/rio_mport_cdev.c event.u.doorbell.rioid = src; event 1409 drivers/rapidio/devices/rio_mport_cdev.c event.u.doorbell.payload = info; event 1419 drivers/rapidio/devices/rio_mport_cdev.c rio_mport_add_event(priv, &event); event 1524 drivers/rapidio/devices/rio_mport_cdev.c struct rio_event event; event 1527 drivers/rapidio/devices/rio_mport_cdev.c event.header = RIO_PORTWRITE; event 1528 drivers/rapidio/devices/rio_mport_cdev.c memcpy(event.u.portwrite.payload, msg->raw, RIO_PW_MSG_SIZE); event 1535 drivers/rapidio/devices/rio_mport_cdev.c rio_mport_add_event(priv, &event); event 2300 drivers/rapidio/devices/rio_mport_cdev.c struct rio_event event; event 2306 drivers/rapidio/devices/rio_mport_cdev.c if (count % sizeof(event)) event 2310 drivers/rapidio/devices/rio_mport_cdev.c while ((count - len) >= (int)sizeof(event)) { event 2311 drivers/rapidio/devices/rio_mport_cdev.c if (copy_from_user(&event, buf, sizeof(event))) event 2314 drivers/rapidio/devices/rio_mport_cdev.c if (event.header != RIO_DOORBELL) event 2318 drivers/rapidio/devices/rio_mport_cdev.c event.u.doorbell.rioid, event 2319 drivers/rapidio/devices/rio_mport_cdev.c event.u.doorbell.payload); event 2323 drivers/rapidio/devices/rio_mport_cdev.c len += sizeof(event); event 2324 drivers/rapidio/devices/rio_mport_cdev.c buf += sizeof(event); event 100 drivers/regulator/core.c unsigned long event, void *data); event 4382 drivers/regulator/core.c unsigned long event, void *data) event 4385 drivers/regulator/core.c return blocking_notifier_call_chain(&rdev->notifier, event, data); event 4598 drivers/regulator/core.c unsigned long event, void *data) event 4602 drivers/regulator/core.c _notifier_call_chain(rdev, event, data); event 354 drivers/regulator/ltc3589.c unsigned int i, irqstat, event; event 359 drivers/regulator/ltc3589.c event = REGULATOR_EVENT_OVER_TEMP; event 363 drivers/regulator/ltc3589.c event, NULL); event 369 drivers/regulator/ltc3589.c event = REGULATOR_EVENT_UNDER_VOLTAGE; event 373 drivers/regulator/ltc3589.c event, NULL); event 271 drivers/regulator/ltc3676.c unsigned int i, irqstat, event; event 278 drivers/regulator/ltc3676.c event = REGULATOR_EVENT_OVER_TEMP; event 282 drivers/regulator/ltc3676.c event, NULL); event 289 drivers/regulator/ltc3676.c event = REGULATOR_EVENT_UNDER_VOLTAGE; event 293 drivers/regulator/ltc3676.c event, NULL); event 183 drivers/remoteproc/qcom_sysmon.c u32 event; event 216 drivers/remoteproc/qcom_sysmon.c event), event 351 drivers/remoteproc/qcom_sysmon.c req.event = SSCTL_SSR_EVENT_BEFORE_SHUTDOWN; event 454 drivers/remoteproc/qcom_sysmon.c static int sysmon_notify(struct notifier_block *nb, unsigned long event, event 328 drivers/rtc/rtc-armada38x.c int event = RTC_IRQF | RTC_AF; event 346 drivers/rtc/rtc-armada38x.c event |= RTC_UF; event 348 drivers/rtc/rtc-armada38x.c event |= RTC_PF; event 351 drivers/rtc/rtc-armada38x.c rtc_update_irq(rtc->rtc_dev, 1, event); event 136 drivers/rtc/rtc-imx-sc.c unsigned long event, void *group) event 139 drivers/rtc/rtc-imx-sc.c if (!((event & SC_IRQ_RTC) && (*(u8 *)group == SC_IRQ_GROUP_RTC))) event 293 drivers/rtc/rtc-m48t59.c u8 event; event 296 drivers/rtc/rtc-m48t59.c event = M48T59_READ(M48T59_FLAGS); event 299 drivers/rtc/rtc-m48t59.c if (event & M48T59_FLAGS_AF) { event 3811 drivers/s390/block/dasd.c int dasd_generic_notify(struct ccw_device *cdev, int event) event 3820 drivers/s390/block/dasd.c switch (event) { event 17 drivers/s390/block/scm_drv.c static void scm_notify(struct scm_device *scmdev, enum scm_event event) event 21 drivers/s390/block/scm_drv.c switch (event) { event 872 drivers/s390/char/con3215.c unsigned long event, void *data) event 564 drivers/s390/char/con3270.c unsigned long event, void *data) event 1003 drivers/s390/char/sclp.c sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr) event 1086 drivers/s390/char/sclp.c static int sclp_undo_suspend(enum sclp_pm_event event) event 1099 drivers/s390/char/sclp.c sclp_pm_event(event, 0); event 1224 drivers/s390/char/sclp.c unsigned long event, void *data) event 264 drivers/s390/char/sclp_con.c unsigned long event, void *data) event 855 drivers/s390/char/sclp_vt220.c unsigned long event, void *data) event 275 drivers/s390/char/vmur.c DECLARE_COMPLETION_ONSTACK(event); event 283 drivers/s390/char/vmur.c urd->io_done = &event; event 293 drivers/s390/char/vmur.c wait_for_completion(&event); event 1015 drivers/s390/cio/css.c unsigned long event, event 1043 drivers/s390/cio/css.c static int css_power_event(struct notifier_block *this, unsigned long event, event 1049 drivers/s390/cio/css.c switch (event) { event 1170 drivers/s390/cio/device.c struct chp_link *link, int event) event 1178 drivers/s390/cio/device.c switch (event) { event 296 drivers/s390/cio/device_fsm.c int ccw_device_notify(struct ccw_device *cdev, int event) event 306 drivers/s390/cio/device_fsm.c event); event 311 drivers/s390/cio/device_fsm.c if (cdev->drv->notify(cdev, event)) event 163 drivers/s390/cio/vfio_ccw_fsm.c enum vfio_ccw_event event) event 182 drivers/s390/cio/vfio_ccw_fsm.c enum vfio_ccw_event event) event 187 drivers/s390/cio/vfio_ccw_fsm.c enum vfio_ccw_event event) event 194 drivers/s390/cio/vfio_ccw_fsm.c enum vfio_ccw_event event) event 200 drivers/s390/cio/vfio_ccw_fsm.c enum vfio_ccw_event event) event 206 drivers/s390/cio/vfio_ccw_fsm.c enum vfio_ccw_event event) event 218 drivers/s390/cio/vfio_ccw_fsm.c enum vfio_ccw_event event) event 224 drivers/s390/cio/vfio_ccw_fsm.c enum vfio_ccw_event event) event 243 drivers/s390/cio/vfio_ccw_fsm.c enum vfio_ccw_event event) event 332 drivers/s390/cio/vfio_ccw_fsm.c enum vfio_ccw_event event) event 353 drivers/s390/cio/vfio_ccw_fsm.c enum vfio_ccw_event event) event 136 drivers/s390/cio/vfio_ccw_private.h int event) event 138 drivers/s390/cio/vfio_ccw_private.h vfio_ccw_jumptable[private->state][event](private, event); event 676 drivers/s390/crypto/ap_bus.c static int ap_power_event(struct notifier_block *this, unsigned long event, event 679 drivers/s390/crypto/ap_bus.c switch (event) { event 438 drivers/s390/crypto/ap_queue.c enum ap_wait ap_sm_event(struct ap_queue *aq, enum ap_event event) event 440 drivers/s390/crypto/ap_queue.c return ap_jumptable[aq->state][event](aq); event 443 drivers/s390/crypto/ap_queue.c enum ap_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_event event) event 447 drivers/s390/crypto/ap_queue.c while ((wait = ap_sm_event(aq, event)) == AP_WAIT_AGAIN) event 132 drivers/s390/net/ctcm_fsms.c static void ctcm_action_nop(fsm_instance *fi, int event, void *arg); event 138 drivers/s390/net/ctcm_fsms.c static void chx_txdone(fsm_instance *fi, int event, void *arg); event 139 drivers/s390/net/ctcm_fsms.c static void chx_rx(fsm_instance *fi, int event, void *arg); event 140 drivers/s390/net/ctcm_fsms.c static void chx_rxidle(fsm_instance *fi, int event, void *arg); event 141 drivers/s390/net/ctcm_fsms.c static void chx_firstio(fsm_instance *fi, int event, void *arg); event 142 drivers/s390/net/ctcm_fsms.c static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg); event 143 drivers/s390/net/ctcm_fsms.c static void ctcm_chx_start(fsm_instance *fi, int event, void *arg); event 144 drivers/s390/net/ctcm_fsms.c static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg); event 145 drivers/s390/net/ctcm_fsms.c static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg); event 146 drivers/s390/net/ctcm_fsms.c static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg); event 147 drivers/s390/net/ctcm_fsms.c static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg); event 148 drivers/s390/net/ctcm_fsms.c static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg); event 149 drivers/s390/net/ctcm_fsms.c static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg); event 150 drivers/s390/net/ctcm_fsms.c static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg); event 151 drivers/s390/net/ctcm_fsms.c static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg); event 152 drivers/s390/net/ctcm_fsms.c static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg); event 153 drivers/s390/net/ctcm_fsms.c static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg); event 154 drivers/s390/net/ctcm_fsms.c static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg); event 155 drivers/s390/net/ctcm_fsms.c static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg); event 161 drivers/s390/net/ctcm_fsms.c static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg); event 162 drivers/s390/net/ctcm_fsms.c static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg); event 163 drivers/s390/net/ctcm_fsms.c static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg); event 180 drivers/s390/net/ctcm_fsms.c static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg); event 183 drivers/s390/net/ctcm_fsms.c static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg); event 229 drivers/s390/net/ctcm_fsms.c static void ctcm_action_nop(fsm_instance *fi, int event, void *arg) event 246 drivers/s390/net/ctcm_fsms.c static void chx_txdone(fsm_instance *fi, int event, void *arg) event 334 drivers/s390/net/ctcm_fsms.c void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg) event 355 drivers/s390/net/ctcm_fsms.c static void chx_rx(fsm_instance *fi, int event, void *arg) event 431 drivers/s390/net/ctcm_fsms.c static void chx_firstio(fsm_instance *fi, int event, void *arg) event 456 drivers/s390/net/ctcm_fsms.c chx_rxidle(fi, event, arg); event 509 drivers/s390/net/ctcm_fsms.c static void chx_rxidle(fsm_instance *fi, int event, void *arg) event 537 drivers/s390/net/ctcm_fsms.c chx_firstio(fi, event, arg); event 548 drivers/s390/net/ctcm_fsms.c static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg) event 565 drivers/s390/net/ctcm_fsms.c if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */ event 571 drivers/s390/net/ctcm_fsms.c if (event == CTC_EVENT_TIMER) /* see above comments */ event 588 drivers/s390/net/ctcm_fsms.c static void ctcm_chx_start(fsm_instance *fi, int event, void *arg) event 651 drivers/s390/net/ctcm_fsms.c static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg) event 664 drivers/s390/net/ctcm_fsms.c if (event == CTC_EVENT_STOP) /* only for STOP not yet locked */ event 672 drivers/s390/net/ctcm_fsms.c if (event == CTC_EVENT_STOP) event 678 drivers/s390/net/ctcm_fsms.c if (event != CTC_EVENT_STOP) { event 739 drivers/s390/net/ctcm_fsms.c static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg) event 752 drivers/s390/net/ctcm_fsms.c static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg) event 766 drivers/s390/net/ctcm_fsms.c static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg) event 778 drivers/s390/net/ctcm_fsms.c static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg) event 790 drivers/s390/net/ctcm_fsms.c ((event == CTC_EVENT_UC_RCRESET) || event 791 drivers/s390/net/ctcm_fsms.c (event == CTC_EVENT_UC_RSRESET))) { event 807 drivers/s390/net/ctcm_fsms.c CTCM_FUNTAIL, dev->name, ctc_ch_event_names[event], event 827 drivers/s390/net/ctcm_fsms.c static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg) event 837 drivers/s390/net/ctcm_fsms.c CTCM_FUNTAIL, ch->id, event, dev->name); event 844 drivers/s390/net/ctcm_fsms.c if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */ event 850 drivers/s390/net/ctcm_fsms.c if (event == CTC_EVENT_TIMER) event 869 drivers/s390/net/ctcm_fsms.c static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg) event 875 drivers/s390/net/ctcm_fsms.c if (event == CTC_EVENT_TIMER) { event 880 drivers/s390/net/ctcm_fsms.c ctcm_chx_restart(fi, event, arg); event 888 drivers/s390/net/ctcm_fsms.c ctc_ch_event_names[event], fsm_getstate_str(fi)); event 892 drivers/s390/net/ctcm_fsms.c "error %s\n", ctc_ch_event_names[event]); event 904 drivers/s390/net/ctcm_fsms.c static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg) event 924 drivers/s390/net/ctcm_fsms.c static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg) event 956 drivers/s390/net/ctcm_fsms.c static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg) event 962 drivers/s390/net/ctcm_fsms.c if (event == CTC_EVENT_TIMER) { event 965 drivers/s390/net/ctcm_fsms.c ctcm_chx_restart(fi, event, arg); event 973 drivers/s390/net/ctcm_fsms.c ctc_ch_event_names[event], fsm_getstate_str(fi)); event 977 drivers/s390/net/ctcm_fsms.c "error %s\n", ctc_ch_event_names[event]); event 988 drivers/s390/net/ctcm_fsms.c static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg) event 1008 drivers/s390/net/ctcm_fsms.c ctcm_chx_restart(fi, event, arg); event 1026 drivers/s390/net/ctcm_fsms.c ctcm_chx_restart(fi, event, arg); event 1030 drivers/s390/net/ctcm_fsms.c if (event == CTC_EVENT_TIMER) /* for TIMER not yet locked */ event 1040 drivers/s390/net/ctcm_fsms.c if (event == CTC_EVENT_TIMER) event 1060 drivers/s390/net/ctcm_fsms.c static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg) event 1210 drivers/s390/net/ctcm_fsms.c static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg) event 1379 drivers/s390/net/ctcm_fsms.c static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg) event 1480 drivers/s390/net/ctcm_fsms.c static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg) event 1506 drivers/s390/net/ctcm_fsms.c ctcmpc_chx_rxidle(fi, event, arg); event 1534 drivers/s390/net/ctcm_fsms.c void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg) event 1561 drivers/s390/net/ctcm_fsms.c if (event == CTC_EVENT_START) event 1565 drivers/s390/net/ctcm_fsms.c if (event == CTC_EVENT_START) event 1588 drivers/s390/net/ctcm_fsms.c static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg) event 1647 drivers/s390/net/ctcm_fsms.c static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg) event 1733 drivers/s390/net/ctcm_fsms.c static void ctcmpc_chx_resend(fsm_instance *fsm, int event, void *arg) event 1749 drivers/s390/net/ctcm_fsms.c static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg) event 2059 drivers/s390/net/ctcm_fsms.c static void dev_action_start(fsm_instance *fi, int event, void *arg) event 2084 drivers/s390/net/ctcm_fsms.c static void dev_action_stop(fsm_instance *fi, int event, void *arg) event 2104 drivers/s390/net/ctcm_fsms.c static void dev_action_restart(fsm_instance *fi, int event, void *arg) event 2119 drivers/s390/net/ctcm_fsms.c dev_action_stop(fi, event, arg); event 2139 drivers/s390/net/ctcm_fsms.c static void dev_action_chup(fsm_instance *fi, int event, void *arg) event 2147 drivers/s390/net/ctcm_fsms.c dev->name, dev->ml_priv, dev_stat, event); event 2151 drivers/s390/net/ctcm_fsms.c if (event == DEV_EVENT_RXUP) event 2157 drivers/s390/net/ctcm_fsms.c if (event == DEV_EVENT_RXUP) { event 2165 drivers/s390/net/ctcm_fsms.c if (event == DEV_EVENT_TXUP) { event 2173 drivers/s390/net/ctcm_fsms.c if (event == DEV_EVENT_RXUP) event 2177 drivers/s390/net/ctcm_fsms.c if (event == DEV_EVENT_TXUP) event 2183 drivers/s390/net/ctcm_fsms.c if (event == DEV_EVENT_RXUP) event 2200 drivers/s390/net/ctcm_fsms.c static void dev_action_chdown(fsm_instance *fi, int event, void *arg) event 2210 drivers/s390/net/ctcm_fsms.c if (event == DEV_EVENT_TXDOWN) event 2216 drivers/s390/net/ctcm_fsms.c if (event == DEV_EVENT_TXDOWN) event 2220 drivers/s390/net/ctcm_fsms.c if (event == DEV_EVENT_RXDOWN) event 2224 drivers/s390/net/ctcm_fsms.c if (event == DEV_EVENT_TXDOWN) event 2230 drivers/s390/net/ctcm_fsms.c if (event == DEV_EVENT_RXDOWN) event 2234 drivers/s390/net/ctcm_fsms.c if (event == DEV_EVENT_TXDOWN) event 2239 drivers/s390/net/ctcm_fsms.c if (event == DEV_EVENT_RXDOWN) event 162 drivers/s390/net/ctcm_fsms.h void fsm_action_nop(fsm_instance *fi, int event, void *arg); event 168 drivers/s390/net/ctcm_fsms.h void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg); event 184 drivers/s390/net/ctcm_fsms.h void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg); event 112 drivers/s390/net/ctcm_mpc.c static void mpc_action_nop(fsm_instance *fsm, int event, void *arg); event 113 drivers/s390/net/ctcm_mpc.c static void mpc_action_go_ready(fsm_instance *fsm, int event, void *arg); event 114 drivers/s390/net/ctcm_mpc.c static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg); event 115 drivers/s390/net/ctcm_mpc.c static void mpc_action_timeout(fsm_instance *fi, int event, void *arg); event 117 drivers/s390/net/ctcm_mpc.c static void mpc_action_yside_xid(fsm_instance *fsm, int event, void *arg); event 118 drivers/s390/net/ctcm_mpc.c static void mpc_action_doxid0(fsm_instance *fsm, int event, void *arg); event 119 drivers/s390/net/ctcm_mpc.c static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg); event 120 drivers/s390/net/ctcm_mpc.c static void mpc_action_xside_xid(fsm_instance *fsm, int event, void *arg); event 121 drivers/s390/net/ctcm_mpc.c static void mpc_action_rcvd_xid0(fsm_instance *fsm, int event, void *arg); event 122 drivers/s390/net/ctcm_mpc.c static void mpc_action_rcvd_xid7(fsm_instance *fsm, int event, void *arg); event 829 drivers/s390/net/ctcm_mpc.c static void mpc_action_go_ready(fsm_instance *fsm, int event, void *arg) event 1340 drivers/s390/net/ctcm_mpc.c static void mpc_action_nop(fsm_instance *fi, int event, void *arg) event 1350 drivers/s390/net/ctcm_mpc.c static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg) event 1452 drivers/s390/net/ctcm_mpc.c static void mpc_action_timeout(fsm_instance *fi, int event, void *arg) event 1488 drivers/s390/net/ctcm_mpc.c void mpc_action_discontact(fsm_instance *fi, int event, void *arg) event 1820 drivers/s390/net/ctcm_mpc.c static void mpc_action_xside_xid(fsm_instance *fsm, int event, void *arg) event 1829 drivers/s390/net/ctcm_mpc.c static void mpc_action_yside_xid(fsm_instance *fsm, int event, void *arg) event 1838 drivers/s390/net/ctcm_mpc.c static void mpc_action_doxid0(fsm_instance *fsm, int event, void *arg) event 1879 drivers/s390/net/ctcm_mpc.c static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg) event 1948 drivers/s390/net/ctcm_mpc.c static void mpc_action_rcvd_xid0(fsm_instance *fsm, int event, void *arg) event 2011 drivers/s390/net/ctcm_mpc.c static void mpc_action_rcvd_xid7(fsm_instance *fsm, int event, void *arg) event 236 drivers/s390/net/ctcm_mpc.h void mpc_action_discontact(fsm_instance *fi, int event, void *arg); event 98 drivers/s390/net/fsm.c int e = fi->history[idx].event; event 113 drivers/s390/net/fsm.c fsm_record_history(fsm_instance *fi, int state, int event) event 116 drivers/s390/net/fsm.c fi->history[fi->history_index++].event = event; event 165 drivers/s390/net/fsm.c fsm_addtimer(fsm_timer *this, int millisec, int event, void *arg) event 174 drivers/s390/net/fsm.c this->expire_event = event; event 183 drivers/s390/net/fsm.c fsm_modtimer(fsm_timer *this, int millisec, int event, void *arg) event 193 drivers/s390/net/fsm.c this->expire_event = event; event 57 drivers/s390/net/fsm.h int event; event 127 drivers/s390/net/fsm.h fsm_record_history(fsm_instance *fi, int state, int event); event 144 drivers/s390/net/fsm.h fsm_event(fsm_instance *fi, int event, void *arg) event 150 drivers/s390/net/fsm.h (event >= fi->f->nr_events) ) { event 152 drivers/s390/net/fsm.h fi->name, (long)state,(long)fi->f->nr_states, event, event 159 drivers/s390/net/fsm.h r = fi->f->jumpmatrix[fi->f->nr_states * event + state]; event 164 drivers/s390/net/fsm.h fi->f->event_names[event]); event 167 drivers/s390/net/fsm.h fsm_record_history(fi, state, event); event 169 drivers/s390/net/fsm.h r(fi, event, arg); event 174 drivers/s390/net/fsm.h fi->name, fi->f->event_names[event], event 254 drivers/s390/net/fsm.h extern int fsm_addtimer(fsm_timer *timer, int millisec, int event, void *arg); event 264 drivers/s390/net/fsm.h extern void fsm_modtimer(fsm_timer *timer, int millisec, int event, void *arg); event 598 drivers/s390/net/netiucv.c static void netiucv_action_nop(fsm_instance *fi, int event, void *arg) event 669 drivers/s390/net/netiucv.c static void conn_action_rx(fsm_instance *fi, int event, void *arg) event 705 drivers/s390/net/netiucv.c static void conn_action_txdone(fsm_instance *fi, int event, void *arg) event 795 drivers/s390/net/netiucv.c static void conn_action_connaccept(fsm_instance *fi, int event, void *arg) event 819 drivers/s390/net/netiucv.c static void conn_action_connreject(fsm_instance *fi, int event, void *arg) event 828 drivers/s390/net/netiucv.c static void conn_action_connack(fsm_instance *fi, int event, void *arg) event 841 drivers/s390/net/netiucv.c static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg) event 851 drivers/s390/net/netiucv.c static void conn_action_connsever(fsm_instance *fi, int event, void *arg) event 869 drivers/s390/net/netiucv.c static void conn_action_start(fsm_instance *fi, int event, void *arg) event 952 drivers/s390/net/netiucv.c static void conn_action_stop(fsm_instance *fi, int event, void *arg) event 974 drivers/s390/net/netiucv.c static void conn_action_inval(fsm_instance *fi, int event, void *arg) event 1030 drivers/s390/net/netiucv.c static void dev_action_start(fsm_instance *fi, int event, void *arg) event 1049 drivers/s390/net/netiucv.c dev_action_stop(fsm_instance *fi, int event, void *arg) event 1072 drivers/s390/net/netiucv.c dev_action_connup(fsm_instance *fi, int event, void *arg) event 1105 drivers/s390/net/netiucv.c dev_action_conndown(fsm_instance *fi, int event, void *arg) event 2496 drivers/s390/net/qeth_l3_main.c unsigned long event) event 2498 drivers/s390/net/qeth_l3_main.c switch (event) { event 2545 drivers/s390/net/qeth_l3_main.c unsigned long event, void *ptr) event 2565 drivers/s390/net/qeth_l3_main.c return qeth_l3_handle_ip_event(card, &addr, event); event 2574 drivers/s390/net/qeth_l3_main.c unsigned long event, void *ptr) event 2581 drivers/s390/net/qeth_l3_main.c if (event != NETDEV_UP && event != NETDEV_DOWN) event 2595 drivers/s390/net/qeth_l3_main.c if (event == NETDEV_UP) event 241 drivers/s390/scsi/zfcp_ccw.c static int zfcp_ccw_notify(struct ccw_device *cdev, int event) event 248 drivers/s390/scsi/zfcp_ccw.c switch (event) { event 101 drivers/s390/scsi/zfcp_fc.c struct zfcp_fc_event *event = NULL, *tmp = NULL; event 112 drivers/s390/scsi/zfcp_fc.c list_for_each_entry_safe(event, tmp, &tmp_lh, list) { event 114 drivers/s390/scsi/zfcp_fc.c event->code, event->data); event 115 drivers/s390/scsi/zfcp_fc.c list_del(&event->list); event 116 drivers/s390/scsi/zfcp_fc.c kfree(event); event 130 drivers/s390/scsi/zfcp_fc.c struct zfcp_fc_event *event; event 132 drivers/s390/scsi/zfcp_fc.c event = kmalloc(sizeof(struct zfcp_fc_event), GFP_ATOMIC); event 133 drivers/s390/scsi/zfcp_fc.c if (!event) event 136 drivers/s390/scsi/zfcp_fc.c event->code = event_code; event 137 drivers/s390/scsi/zfcp_fc.c event->data = event_data; event 140 drivers/s390/scsi/zfcp_fc.c list_add_tail(&event->list, &adapter->events.list); event 1343 drivers/s390/virtio/virtio_ccw.c static int virtio_ccw_cio_notify(struct ccw_device *cdev, int event) event 1355 drivers/s390/virtio/virtio_ccw.c switch (event) { event 374 drivers/scsi/3w-9xxx.c TW_Event *event; event 382 drivers/scsi/3w-9xxx.c event = tw_dev->event_queue[tw_dev->error_index]; event 388 drivers/scsi/3w-9xxx.c if (event->retrieved == TW_AEN_NOT_RETRIEVED) event 393 drivers/scsi/3w-9xxx.c memset(event, 0, sizeof(TW_Event)); event 395 drivers/scsi/3w-9xxx.c event->severity = TW_SEV_OUT(header->status_block.severity__reserved); event 398 drivers/scsi/3w-9xxx.c event->time_stamp_sec = local_time; event 399 drivers/scsi/3w-9xxx.c event->aen_code = aen; event 400 drivers/scsi/3w-9xxx.c event->retrieved = TW_AEN_NOT_RETRIEVED; event 401 drivers/scsi/3w-9xxx.c event->sequence_id = tw_dev->error_sequence_id; event 408 drivers/scsi/3w-9xxx.c event->parameter_len = strlen(header->err_specific_desc); event 409 drivers/scsi/3w-9xxx.c memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + (error_str[0] == '\0' ? 0 : (1 + strlen(error_str)))); event 410 drivers/scsi/3w-9xxx.c if (event->severity != TW_AEN_SEVERITY_DEBUG) event 651 drivers/scsi/3w-9xxx.c TW_Event *event; event 784 drivers/scsi/3w-9xxx.c event = (TW_Event *)tw_ioctl->data_buffer; event 785 drivers/scsi/3w-9xxx.c sequence_id = event->sequence_id; event 813 drivers/scsi/3w-9xxx.c event = (TW_Event *)tw_ioctl->data_buffer; event 814 drivers/scsi/3w-9xxx.c sequence_id = event->sequence_id; event 224 drivers/scsi/3w-sas.c TW_Event *event; event 232 drivers/scsi/3w-sas.c event = tw_dev->event_queue[tw_dev->error_index]; event 239 drivers/scsi/3w-sas.c memset(event, 0, sizeof(TW_Event)); event 241 drivers/scsi/3w-sas.c event->severity = TW_SEV_OUT(header->status_block.severity__reserved); event 244 drivers/scsi/3w-sas.c event->time_stamp_sec = local_time; event 245 drivers/scsi/3w-sas.c event->aen_code = aen; event 246 drivers/scsi/3w-sas.c event->retrieved = TW_AEN_NOT_RETRIEVED; event 247 drivers/scsi/3w-sas.c event->sequence_id = tw_dev->error_sequence_id; event 254 drivers/scsi/3w-sas.c event->parameter_len = strlen(header->err_specific_desc); event 255 drivers/scsi/3w-sas.c memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + 1 + strlen(error_str)); event 256 drivers/scsi/3w-sas.c if (event->severity != TW_AEN_SEVERITY_DEBUG) event 900 drivers/scsi/aacraid/aacraid.h void (*adapter_notify)(struct aac_dev *dev, u32 event); event 1681 drivers/scsi/aacraid/aacraid.h #define aac_adapter_notify(dev, event) \ event 1682 drivers/scsi/aacraid/aacraid.h (dev)->a_ops.adapter_notify(dev, event) event 266 drivers/scsi/aacraid/rx.c static void aac_rx_notify_adapter(struct aac_dev *dev, u32 event) event 268 drivers/scsi/aacraid/rx.c switch (event) { event 98 drivers/scsi/aacraid/sa.c static void aac_sa_notify_adapter(struct aac_dev *dev, u32 event) event 100 drivers/scsi/aacraid/sa.c switch (event) { event 342 drivers/scsi/aacraid/src.c static void aac_src_notify_adapter(struct aac_dev *dev, u32 event) event 344 drivers/scsi/aacraid/src.c switch (event) { event 9194 drivers/scsi/aic7xxx/aic79xx_core.c struct ahd_tmode_event *event; event 9228 drivers/scsi/aic7xxx/aic79xx_core.c event = &lstate->event_buffer[lstate->event_w_idx]; event 9229 drivers/scsi/aic7xxx/aic79xx_core.c event->initiator_id = initiator_id; event 9230 drivers/scsi/aic7xxx/aic79xx_core.c event->event_type = event_type; event 9231 drivers/scsi/aic7xxx/aic79xx_core.c event->event_arg = event_arg; event 9249 drivers/scsi/aic7xxx/aic79xx_core.c struct ahd_tmode_event *event; event 9251 drivers/scsi/aic7xxx/aic79xx_core.c event = &lstate->event_buffer[lstate->event_r_idx]; event 9254 drivers/scsi/aic7xxx/aic79xx_core.c switch (event->event_type) { event 9260 drivers/scsi/aic7xxx/aic79xx_core.c inot->message_args[0] = event->event_type; event 9261 drivers/scsi/aic7xxx/aic79xx_core.c inot->message_args[1] = event->event_arg; event 9264 drivers/scsi/aic7xxx/aic79xx_core.c inot->initiator_id = event->initiator_id; event 92 drivers/scsi/aic7xxx/aic79xx_osm_pci.c if (mesg.event & PM_EVENT_SLEEP) event 6712 drivers/scsi/aic7xxx/aic7xxx_core.c struct ahc_tmode_event *event; event 6746 drivers/scsi/aic7xxx/aic7xxx_core.c event = &lstate->event_buffer[lstate->event_w_idx]; event 6747 drivers/scsi/aic7xxx/aic7xxx_core.c event->initiator_id = initiator_id; event 6748 drivers/scsi/aic7xxx/aic7xxx_core.c event->event_type = event_type; event 6749 drivers/scsi/aic7xxx/aic7xxx_core.c event->event_arg = event_arg; event 6767 drivers/scsi/aic7xxx/aic7xxx_core.c struct ahc_tmode_event *event; event 6769 drivers/scsi/aic7xxx/aic7xxx_core.c event = &lstate->event_buffer[lstate->event_r_idx]; event 6772 drivers/scsi/aic7xxx/aic7xxx_core.c switch (event->event_type) { event 6778 drivers/scsi/aic7xxx/aic7xxx_core.c inot->message_args[0] = event->event_type; event 6779 drivers/scsi/aic7xxx/aic7xxx_core.c inot->message_args[1] = event->event_arg; event 6782 drivers/scsi/aic7xxx/aic7xxx_core.c inot->initiator_id = event->initiator_id; event 137 drivers/scsi/aic7xxx/aic7xxx_osm_pci.c if (mesg.event & PM_EVENT_SLEEP) event 637 drivers/scsi/be2iscsi/be_main.c unsigned char rearm, unsigned char event) event 645 drivers/scsi/be2iscsi/be_main.c if (event) event 685 drivers/scsi/be2iscsi/be_main.h u8 event[1]; event 253 drivers/scsi/bfa/bfa_core.c bfa_iocfc_sm_stopped(struct bfa_iocfc_s *iocfc, enum iocfc_event event) event 255 drivers/scsi/bfa/bfa_core.c bfa_trc(iocfc->bfa, event); event 257 drivers/scsi/bfa/bfa_core.c switch (event) { event 263 drivers/scsi/bfa/bfa_core.c bfa_sm_fault(iocfc->bfa, event); event 275 drivers/scsi/bfa/bfa_core.c bfa_iocfc_sm_initing(struct bfa_iocfc_s *iocfc, enum iocfc_event event) event 277 drivers/scsi/bfa/bfa_core.c bfa_trc(iocfc->bfa, event); event 279 drivers/scsi/bfa/bfa_core.c switch (event) { event 296 drivers/scsi/bfa/bfa_core.c bfa_sm_fault(iocfc->bfa, event); event 308 drivers/scsi/bfa/bfa_core.c bfa_iocfc_sm_dconf_read(struct bfa_iocfc_s *iocfc, enum iocfc_event event) event 310 drivers/scsi/bfa/bfa_core.c bfa_trc(iocfc->bfa, event); event 312 drivers/scsi/bfa/bfa_core.c switch (event) { event 329 drivers/scsi/bfa/bfa_core.c bfa_sm_fault(iocfc->bfa, event); event 341 drivers/scsi/bfa/bfa_core.c bfa_iocfc_sm_init_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event) event 343 drivers/scsi/bfa/bfa_core.c bfa_trc(iocfc->bfa, event); event 345 drivers/scsi/bfa/bfa_core.c switch (event) { event 362 drivers/scsi/bfa/bfa_core.c bfa_sm_fault(iocfc->bfa, event); event 376 drivers/scsi/bfa/bfa_core.c bfa_iocfc_sm_init_cfg_done(struct bfa_iocfc_s *iocfc, enum iocfc_event event) event 378 drivers/scsi/bfa/bfa_core.c bfa_trc(iocfc->bfa, event); event 380 drivers/scsi/bfa/bfa_core.c switch (event) { event 394 drivers/scsi/bfa/bfa_core.c bfa_sm_fault(iocfc->bfa, event); event 407 drivers/scsi/bfa/bfa_core.c bfa_iocfc_sm_operational(struct bfa_iocfc_s *iocfc, enum iocfc_event event) event 409 drivers/scsi/bfa/bfa_core.c bfa_trc(iocfc->bfa, event); event 411 drivers/scsi/bfa/bfa_core.c switch (event) { event 422 drivers/scsi/bfa/bfa_core.c bfa_sm_fault(iocfc->bfa, event); event 434 drivers/scsi/bfa/bfa_core.c bfa_iocfc_sm_dconf_write(struct bfa_iocfc_s *iocfc, enum iocfc_event event) event 436 drivers/scsi/bfa/bfa_core.c bfa_trc(iocfc->bfa, event); event 438 drivers/scsi/bfa/bfa_core.c switch (event) { event 444 drivers/scsi/bfa/bfa_core.c bfa_sm_fault(iocfc->bfa, event); event 456 drivers/scsi/bfa/bfa_core.c bfa_iocfc_sm_stopping(struct bfa_iocfc_s *iocfc, enum iocfc_event event) event 458 drivers/scsi/bfa/bfa_core.c bfa_trc(iocfc->bfa, event); event 460 drivers/scsi/bfa/bfa_core.c switch (event) { event 476 drivers/scsi/bfa/bfa_core.c bfa_sm_fault(iocfc->bfa, event); event 488 drivers/scsi/bfa/bfa_core.c bfa_iocfc_sm_enabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event) event 490 drivers/scsi/bfa/bfa_core.c bfa_trc(iocfc->bfa, event); event 492 drivers/scsi/bfa/bfa_core.c switch (event) { event 517 drivers/scsi/bfa/bfa_core.c bfa_sm_fault(iocfc->bfa, event); event 529 drivers/scsi/bfa/bfa_core.c bfa_iocfc_sm_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event) event 531 drivers/scsi/bfa/bfa_core.c bfa_trc(iocfc->bfa, event); event 533 drivers/scsi/bfa/bfa_core.c switch (event) { event 562 drivers/scsi/bfa/bfa_core.c bfa_sm_fault(iocfc->bfa, event); event 574 drivers/scsi/bfa/bfa_core.c bfa_iocfc_sm_disabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event) event 576 drivers/scsi/bfa/bfa_core.c bfa_trc(iocfc->bfa, event); event 578 drivers/scsi/bfa/bfa_core.c switch (event) { event 587 drivers/scsi/bfa/bfa_core.c bfa_sm_fault(iocfc->bfa, event); event 603 drivers/scsi/bfa/bfa_core.c bfa_iocfc_sm_disabled(struct bfa_iocfc_s *iocfc, enum iocfc_event event) event 605 drivers/scsi/bfa/bfa_core.c bfa_trc(iocfc->bfa, event); event 607 drivers/scsi/bfa/bfa_core.c switch (event) { event 615 drivers/scsi/bfa/bfa_core.c bfa_sm_fault(iocfc->bfa, event); event 628 drivers/scsi/bfa/bfa_core.c bfa_iocfc_sm_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event) event 630 drivers/scsi/bfa/bfa_core.c bfa_trc(iocfc->bfa, event); event 632 drivers/scsi/bfa/bfa_core.c switch (event) { event 645 drivers/scsi/bfa/bfa_core.c bfa_sm_fault(iocfc->bfa, event); event 660 drivers/scsi/bfa/bfa_core.c bfa_iocfc_sm_init_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event) event 662 drivers/scsi/bfa/bfa_core.c bfa_trc(iocfc->bfa, event); event 664 drivers/scsi/bfa/bfa_core.c switch (event) { event 683 drivers/scsi/bfa/bfa_core.c bfa_sm_fault(iocfc->bfa, event); event 179 drivers/scsi/bfa/bfa_cs.h typedef void (*bfa_sm_t)(void *sm, int event); event 188 drivers/scsi/bfa/bfa_cs.h static void oc ## _sm_ ## st(otype * fsm, etype event) event 208 drivers/scsi/bfa/bfa_cs.h typedef void (*bfa_fsm_t)(void *fsm, int event); event 217 drivers/scsi/bfa/bfa_cs.h static void oc ## _sm_ ## st(otype * fsm, etype event); \ event 818 drivers/scsi/bfa/bfa_fc.h struct fc_rscn_event_s event[1]; event 1046 drivers/scsi/bfa/bfa_fcbuild.c rscn->pagelen = sizeof(rscn->event[0]); event 1051 drivers/scsi/bfa/bfa_fcbuild.c rscn->event[0].format = FC_RSCN_FORMAT_PORTID; event 1052 drivers/scsi/bfa/bfa_fcbuild.c rscn->event[0].portid = s_id; event 178 drivers/scsi/bfa/bfa_fcpim.c enum bfa_itnim_event event); event 180 drivers/scsi/bfa/bfa_fcpim.c enum bfa_itnim_event event); event 182 drivers/scsi/bfa/bfa_fcpim.c enum bfa_itnim_event event); event 184 drivers/scsi/bfa/bfa_fcpim.c enum bfa_itnim_event event); event 186 drivers/scsi/bfa/bfa_fcpim.c enum bfa_itnim_event event); event 188 drivers/scsi/bfa/bfa_fcpim.c enum bfa_itnim_event event); event 190 drivers/scsi/bfa/bfa_fcpim.c enum bfa_itnim_event event); event 192 drivers/scsi/bfa/bfa_fcpim.c enum bfa_itnim_event event); event 194 drivers/scsi/bfa/bfa_fcpim.c enum bfa_itnim_event event); event 196 drivers/scsi/bfa/bfa_fcpim.c enum bfa_itnim_event event); event 198 drivers/scsi/bfa/bfa_fcpim.c enum bfa_itnim_event event); event 200 drivers/scsi/bfa/bfa_fcpim.c enum bfa_itnim_event event); event 202 drivers/scsi/bfa/bfa_fcpim.c enum bfa_itnim_event event); event 204 drivers/scsi/bfa/bfa_fcpim.c enum bfa_itnim_event event); event 206 drivers/scsi/bfa/bfa_fcpim.c enum bfa_itnim_event event); event 226 drivers/scsi/bfa/bfa_fcpim.c enum bfa_ioim_event event); event 228 drivers/scsi/bfa/bfa_fcpim.c enum bfa_ioim_event event); event 230 drivers/scsi/bfa/bfa_fcpim.c enum bfa_ioim_event event); event 232 drivers/scsi/bfa/bfa_fcpim.c enum bfa_ioim_event event); event 234 drivers/scsi/bfa/bfa_fcpim.c enum bfa_ioim_event event); event 236 drivers/scsi/bfa/bfa_fcpim.c enum bfa_ioim_event event); event 238 drivers/scsi/bfa/bfa_fcpim.c enum bfa_ioim_event event); event 240 drivers/scsi/bfa/bfa_fcpim.c enum bfa_ioim_event event); event 242 drivers/scsi/bfa/bfa_fcpim.c enum bfa_ioim_event event); event 244 drivers/scsi/bfa/bfa_fcpim.c enum bfa_ioim_event event); event 246 drivers/scsi/bfa/bfa_fcpim.c enum bfa_ioim_event event); event 248 drivers/scsi/bfa/bfa_fcpim.c enum bfa_ioim_event event); event 267 drivers/scsi/bfa/bfa_fcpim.c enum bfa_tskim_event event); event 269 drivers/scsi/bfa/bfa_fcpim.c enum bfa_tskim_event event); event 271 drivers/scsi/bfa/bfa_fcpim.c enum bfa_tskim_event event); event 273 drivers/scsi/bfa/bfa_fcpim.c enum bfa_tskim_event event); event 275 drivers/scsi/bfa/bfa_fcpim.c enum bfa_tskim_event event); event 277 drivers/scsi/bfa/bfa_fcpim.c enum bfa_tskim_event event); event 279 drivers/scsi/bfa/bfa_fcpim.c enum bfa_tskim_event event); event 508 drivers/scsi/bfa/bfa_fcpim.c bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) event 511 drivers/scsi/bfa/bfa_fcpim.c bfa_trc(itnim->bfa, event); event 513 drivers/scsi/bfa/bfa_fcpim.c switch (event) { event 521 drivers/scsi/bfa/bfa_fcpim.c bfa_sm_fault(itnim->bfa, event); event 529 drivers/scsi/bfa/bfa_fcpim.c bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) event 532 drivers/scsi/bfa/bfa_fcpim.c bfa_trc(itnim->bfa, event); event 534 drivers/scsi/bfa/bfa_fcpim.c switch (event) { event 552 drivers/scsi/bfa/bfa_fcpim.c bfa_sm_fault(itnim->bfa, event); event 560 drivers/scsi/bfa/bfa_fcpim.c bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) event 563 drivers/scsi/bfa/bfa_fcpim.c bfa_trc(itnim->bfa, event); event 565 drivers/scsi/bfa/bfa_fcpim.c switch (event) { event 589 drivers/scsi/bfa/bfa_fcpim.c bfa_sm_fault(itnim->bfa, event); event 595 drivers/scsi/bfa/bfa_fcpim.c enum bfa_itnim_event event) event 598 drivers/scsi/bfa/bfa_fcpim.c bfa_trc(itnim->bfa, event); event 600 drivers/scsi/bfa/bfa_fcpim.c switch (event) { event 624 drivers/scsi/bfa/bfa_fcpim.c bfa_sm_fault(itnim->bfa, event); event 633 drivers/scsi/bfa/bfa_fcpim.c enum bfa_itnim_event event) event 636 drivers/scsi/bfa/bfa_fcpim.c bfa_trc(itnim->bfa, event); event 638 drivers/scsi/bfa/bfa_fcpim.c switch (event) { event 652 drivers/scsi/bfa/bfa_fcpim.c bfa_sm_fault(itnim->bfa, event); event 660 drivers/scsi/bfa/bfa_fcpim.c bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) event 663 drivers/scsi/bfa/bfa_fcpim.c bfa_trc(itnim->bfa, event); event 665 drivers/scsi/bfa/bfa_fcpim.c switch (event) { event 694 drivers/scsi/bfa/bfa_fcpim.c bfa_sm_fault(itnim->bfa, event); event 702 drivers/scsi/bfa/bfa_fcpim.c bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) event 705 drivers/scsi/bfa/bfa_fcpim.c bfa_trc(itnim->bfa, event); event 707 drivers/scsi/bfa/bfa_fcpim.c switch (event) { event 725 drivers/scsi/bfa/bfa_fcpim.c bfa_sm_fault(itnim->bfa, event); event 734 drivers/scsi/bfa/bfa_fcpim.c enum bfa_itnim_event event) event 737 drivers/scsi/bfa/bfa_fcpim.c bfa_trc(itnim->bfa, event); event 739 drivers/scsi/bfa/bfa_fcpim.c switch (event) { event 762 drivers/scsi/bfa/bfa_fcpim.c bfa_sm_fault(itnim->bfa, event); event 771 drivers/scsi/bfa/bfa_fcpim.c enum bfa_itnim_event event) event 774 drivers/scsi/bfa/bfa_fcpim.c bfa_trc(itnim->bfa, event); event 776 drivers/scsi/bfa/bfa_fcpim.c switch (event) { event 790 drivers/scsi/bfa/bfa_fcpim.c bfa_sm_fault(itnim->bfa, event); event 798 drivers/scsi/bfa/bfa_fcpim.c bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) event 801 drivers/scsi/bfa/bfa_fcpim.c bfa_trc(itnim->bfa, event); event 803 drivers/scsi/bfa/bfa_fcpim.c switch (event) { event 819 drivers/scsi/bfa/bfa_fcpim.c bfa_sm_fault(itnim->bfa, event); event 825 drivers/scsi/bfa/bfa_fcpim.c enum bfa_itnim_event event) event 828 drivers/scsi/bfa/bfa_fcpim.c bfa_trc(itnim->bfa, event); event 830 drivers/scsi/bfa/bfa_fcpim.c switch (event) { event 847 drivers/scsi/bfa/bfa_fcpim.c bfa_sm_fault(itnim->bfa, event); event 855 drivers/scsi/bfa/bfa_fcpim.c bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) event 858 drivers/scsi/bfa/bfa_fcpim.c bfa_trc(itnim->bfa, event); event 860 drivers/scsi/bfa/bfa_fcpim.c switch (event) { event 879 drivers/scsi/bfa/bfa_fcpim.c bfa_sm_fault(itnim->bfa, event); event 885 drivers/scsi/bfa/bfa_fcpim.c enum bfa_itnim_event event) event 888 drivers/scsi/bfa/bfa_fcpim.c bfa_trc(itnim->bfa, event); event 890 drivers/scsi/bfa/bfa_fcpim.c switch (event) { event 912 drivers/scsi/bfa/bfa_fcpim.c bfa_sm_fault(itnim->bfa, event); event 920 drivers/scsi/bfa/bfa_fcpim.c bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) event 923 drivers/scsi/bfa/bfa_fcpim.c bfa_trc(itnim->bfa, event); event 925 drivers/scsi/bfa/bfa_fcpim.c switch (event) { event 933 drivers/scsi/bfa/bfa_fcpim.c bfa_sm_fault(itnim->bfa, event); event 939 drivers/scsi/bfa/bfa_fcpim.c enum bfa_itnim_event event) event 942 drivers/scsi/bfa/bfa_fcpim.c bfa_trc(itnim->bfa, event); event 944 drivers/scsi/bfa/bfa_fcpim.c switch (event) { event 957 drivers/scsi/bfa/bfa_fcpim.c bfa_sm_fault(itnim->bfa, event); event 1505 drivers/scsi/bfa/bfa_fcpim.c bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) event 1507 drivers/scsi/bfa/bfa_fcpim.c switch (event) { event 1559 drivers/scsi/bfa/bfa_fcpim.c bfa_sm_fault(ioim->bfa, event); event 1567 drivers/scsi/bfa/bfa_fcpim.c bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) event 1570 drivers/scsi/bfa/bfa_fcpim.c bfa_trc(ioim->bfa, event); event 1572 drivers/scsi/bfa/bfa_fcpim.c switch (event) { event 1606 drivers/scsi/bfa/bfa_fcpim.c bfa_sm_fault(ioim->bfa, event); event 1614 drivers/scsi/bfa/bfa_fcpim.c bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) event 1616 drivers/scsi/bfa/bfa_fcpim.c switch (event) { event 1687 drivers/scsi/bfa/bfa_fcpim.c bfa_sm_fault(ioim->bfa, event); event 1695 drivers/scsi/bfa/bfa_fcpim.c bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) event 1697 drivers/scsi/bfa/bfa_fcpim.c switch (event) { event 1739 drivers/scsi/bfa/bfa_fcpim.c bfa_sm_fault(ioim->bfa, event); event 1747 drivers/scsi/bfa/bfa_fcpim.c bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) event 1750 drivers/scsi/bfa/bfa_fcpim.c bfa_trc(ioim->bfa, event); event 1752 drivers/scsi/bfa/bfa_fcpim.c switch (event) { event 1801 drivers/scsi/bfa/bfa_fcpim.c bfa_sm_fault(ioim->bfa, event); event 1810 drivers/scsi/bfa/bfa_fcpim.c bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) event 1813 drivers/scsi/bfa/bfa_fcpim.c bfa_trc(ioim->bfa, event); event 1815 drivers/scsi/bfa/bfa_fcpim.c switch (event) { event 1862 drivers/scsi/bfa/bfa_fcpim.c bfa_sm_fault(ioim->bfa, event); event 1870 drivers/scsi/bfa/bfa_fcpim.c bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) event 1873 drivers/scsi/bfa/bfa_fcpim.c bfa_trc(ioim->bfa, event); event 1875 drivers/scsi/bfa/bfa_fcpim.c switch (event) { event 1906 drivers/scsi/bfa/bfa_fcpim.c bfa_sm_fault(ioim->bfa, event); event 1914 drivers/scsi/bfa/bfa_fcpim.c bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) event 1917 drivers/scsi/bfa/bfa_fcpim.c bfa_trc(ioim->bfa, event); event 1919 drivers/scsi/bfa/bfa_fcpim.c switch (event) { event 1957 drivers/scsi/bfa/bfa_fcpim.c bfa_sm_fault(ioim->bfa, event); event 1965 drivers/scsi/bfa/bfa_fcpim.c bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) event 1968 drivers/scsi/bfa/bfa_fcpim.c bfa_trc(ioim->bfa, event); event 1970 drivers/scsi/bfa/bfa_fcpim.c switch (event) { event 2007 drivers/scsi/bfa/bfa_fcpim.c bfa_sm_fault(ioim->bfa, event); event 2015 drivers/scsi/bfa/bfa_fcpim.c bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) event 2017 drivers/scsi/bfa/bfa_fcpim.c switch (event) { event 2031 drivers/scsi/bfa/bfa_fcpim.c bfa_sm_fault(ioim->bfa, event); event 2039 drivers/scsi/bfa/bfa_fcpim.c bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) event 2042 drivers/scsi/bfa/bfa_fcpim.c bfa_trc(ioim->bfa, event); event 2044 drivers/scsi/bfa/bfa_fcpim.c switch (event) { event 2064 drivers/scsi/bfa/bfa_fcpim.c bfa_sm_fault(ioim->bfa, event); event 2072 drivers/scsi/bfa/bfa_fcpim.c bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) event 2075 drivers/scsi/bfa/bfa_fcpim.c bfa_trc(ioim->bfa, event); event 2077 drivers/scsi/bfa/bfa_fcpim.c switch (event) { event 2091 drivers/scsi/bfa/bfa_fcpim.c bfa_sm_fault(ioim->bfa, event); event 3033 drivers/scsi/bfa/bfa_fcpim.c bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) event 3035 drivers/scsi/bfa/bfa_fcpim.c bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); event 3037 drivers/scsi/bfa/bfa_fcpim.c switch (event) { event 3062 drivers/scsi/bfa/bfa_fcpim.c bfa_sm_fault(tskim->bfa, event); event 3071 drivers/scsi/bfa/bfa_fcpim.c bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) event 3073 drivers/scsi/bfa/bfa_fcpim.c bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); event 3075 drivers/scsi/bfa/bfa_fcpim.c switch (event) { event 3098 drivers/scsi/bfa/bfa_fcpim.c bfa_sm_fault(tskim->bfa, event); event 3107 drivers/scsi/bfa/bfa_fcpim.c bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) event 3109 drivers/scsi/bfa/bfa_fcpim.c bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); event 3111 drivers/scsi/bfa/bfa_fcpim.c switch (event) { event 3131 drivers/scsi/bfa/bfa_fcpim.c bfa_sm_fault(tskim->bfa, event); event 3136 drivers/scsi/bfa/bfa_fcpim.c bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) event 3138 drivers/scsi/bfa/bfa_fcpim.c bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); event 3140 drivers/scsi/bfa/bfa_fcpim.c switch (event) { event 3160 drivers/scsi/bfa/bfa_fcpim.c bfa_sm_fault(tskim->bfa, event); event 3168 drivers/scsi/bfa/bfa_fcpim.c bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) event 3170 drivers/scsi/bfa/bfa_fcpim.c bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); event 3172 drivers/scsi/bfa/bfa_fcpim.c switch (event) { event 3195 drivers/scsi/bfa/bfa_fcpim.c bfa_sm_fault(tskim->bfa, event); event 3205 drivers/scsi/bfa/bfa_fcpim.c enum bfa_tskim_event event) event 3207 drivers/scsi/bfa/bfa_fcpim.c bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); event 3209 drivers/scsi/bfa/bfa_fcpim.c switch (event) { event 3226 drivers/scsi/bfa/bfa_fcpim.c bfa_sm_fault(tskim->bfa, event); event 3234 drivers/scsi/bfa/bfa_fcpim.c bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) event 3236 drivers/scsi/bfa/bfa_fcpim.c bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); event 3238 drivers/scsi/bfa/bfa_fcpim.c switch (event) { event 3252 drivers/scsi/bfa/bfa_fcpim.c bfa_sm_fault(tskim->bfa, event); event 175 drivers/scsi/bfa/bfa_fcs.c enum bfa_fcs_fabric_event event); event 177 drivers/scsi/bfa/bfa_fcs.c enum bfa_fcs_fabric_event event); event 179 drivers/scsi/bfa/bfa_fcs.c enum bfa_fcs_fabric_event event); event 181 drivers/scsi/bfa/bfa_fcs.c enum bfa_fcs_fabric_event event); event 183 drivers/scsi/bfa/bfa_fcs.c enum bfa_fcs_fabric_event event); event 185 drivers/scsi/bfa/bfa_fcs.c enum bfa_fcs_fabric_event event); event 187 drivers/scsi/bfa/bfa_fcs.c enum bfa_fcs_fabric_event event); event 189 drivers/scsi/bfa/bfa_fcs.c enum bfa_fcs_fabric_event event); event 191 drivers/scsi/bfa/bfa_fcs.c enum bfa_fcs_fabric_event event); event 193 drivers/scsi/bfa/bfa_fcs.c enum bfa_fcs_fabric_event event); event 195 drivers/scsi/bfa/bfa_fcs.c enum bfa_fcs_fabric_event event); event 197 drivers/scsi/bfa/bfa_fcs.c enum bfa_fcs_fabric_event event); event 199 drivers/scsi/bfa/bfa_fcs.c enum bfa_fcs_fabric_event event); event 205 drivers/scsi/bfa/bfa_fcs.c enum bfa_fcs_fabric_event event) event 208 drivers/scsi/bfa/bfa_fcs.c bfa_trc(fabric->fcs, event); event 210 drivers/scsi/bfa/bfa_fcs.c switch (event) { event 222 drivers/scsi/bfa/bfa_fcs.c bfa_sm_fault(fabric->fcs, event); event 231 drivers/scsi/bfa/bfa_fcs.c enum bfa_fcs_fabric_event event) event 236 drivers/scsi/bfa/bfa_fcs.c bfa_trc(fabric->fcs, event); event 238 drivers/scsi/bfa/bfa_fcs.c switch (event) { event 269 drivers/scsi/bfa/bfa_fcs.c bfa_sm_fault(fabric->fcs, event); event 279 drivers/scsi/bfa/bfa_fcs.c enum bfa_fcs_fabric_event event) event 284 drivers/scsi/bfa/bfa_fcs.c bfa_trc(fabric->fcs, event); event 286 drivers/scsi/bfa/bfa_fcs.c switch (event) { event 316 drivers/scsi/bfa/bfa_fcs.c bfa_sm_fault(fabric->fcs, event); event 325 drivers/scsi/bfa/bfa_fcs.c enum bfa_fcs_fabric_event event) event 328 drivers/scsi/bfa/bfa_fcs.c bfa_trc(fabric->fcs, event); event 330 drivers/scsi/bfa/bfa_fcs.c switch (event) { event 339 drivers/scsi/bfa/bfa_fcs.c bfa_trc(fabric->fcs, event); event 379 drivers/scsi/bfa/bfa_fcs.c bfa_sm_fault(fabric->fcs, event); event 386 drivers/scsi/bfa/bfa_fcs.c enum bfa_fcs_fabric_event event) event 389 drivers/scsi/bfa/bfa_fcs.c bfa_trc(fabric->fcs, event); event 391 drivers/scsi/bfa/bfa_fcs.c switch (event) { event 409 drivers/scsi/bfa/bfa_fcs.c bfa_sm_fault(fabric->fcs, event); event 418 drivers/scsi/bfa/bfa_fcs.c enum bfa_fcs_fabric_event event) event 421 drivers/scsi/bfa/bfa_fcs.c bfa_trc(fabric->fcs, event); event 423 drivers/scsi/bfa/bfa_fcs.c switch (event) { event 449 drivers/scsi/bfa/bfa_fcs.c bfa_sm_fault(fabric->fcs, event); event 458 drivers/scsi/bfa/bfa_fcs.c enum bfa_fcs_fabric_event event) event 461 drivers/scsi/bfa/bfa_fcs.c bfa_trc(fabric->fcs, event); event 463 drivers/scsi/bfa/bfa_fcs.c switch (event) { event 475 drivers/scsi/bfa/bfa_fcs.c bfa_sm_fault(fabric->fcs, event); event 484 drivers/scsi/bfa/bfa_fcs.c enum bfa_fcs_fabric_event event) event 487 drivers/scsi/bfa/bfa_fcs.c bfa_trc(fabric->fcs, event); event 489 drivers/scsi/bfa/bfa_fcs.c switch (event) { event 501 drivers/scsi/bfa/bfa_fcs.c bfa_sm_fault(fabric->fcs, event); event 510 drivers/scsi/bfa/bfa_fcs.c enum bfa_fcs_fabric_event event) event 513 drivers/scsi/bfa/bfa_fcs.c bfa_trc(fabric->fcs, event); event 515 drivers/scsi/bfa/bfa_fcs.c switch (event) { event 537 drivers/scsi/bfa/bfa_fcs.c bfa_sm_fault(fabric->fcs, event); event 546 drivers/scsi/bfa/bfa_fcs.c enum bfa_fcs_fabric_event event) event 551 drivers/scsi/bfa/bfa_fcs.c bfa_trc(fabric->fcs, event); event 553 drivers/scsi/bfa/bfa_fcs.c switch (event) { event 583 drivers/scsi/bfa/bfa_fcs.c bfa_sm_fault(fabric->fcs, event); event 592 drivers/scsi/bfa/bfa_fcs.c enum bfa_fcs_fabric_event event) event 595 drivers/scsi/bfa/bfa_fcs.c bfa_trc(fabric->fcs, event); event 597 drivers/scsi/bfa/bfa_fcs.c switch (event) { event 607 drivers/scsi/bfa/bfa_fcs.c bfa_sm_fault(fabric->fcs, event); event 616 drivers/scsi/bfa/bfa_fcs.c enum bfa_fcs_fabric_event event) event 619 drivers/scsi/bfa/bfa_fcs.c bfa_trc(fabric->fcs, event); event 627 drivers/scsi/bfa/bfa_fcs.c enum bfa_fcs_fabric_event event) event 633 drivers/scsi/bfa/bfa_fcs.c bfa_trc(fabric->fcs, event); event 648 drivers/scsi/bfa/bfa_fcs.c enum bfa_fcs_fabric_event event) event 651 drivers/scsi/bfa/bfa_fcs.c bfa_trc(fabric->fcs, event); event 653 drivers/scsi/bfa/bfa_fcs.c switch (event) { event 667 drivers/scsi/bfa/bfa_fcs.c bfa_sm_fault(fabric->fcs, event); event 676 drivers/scsi/bfa/bfa_fcs.c enum bfa_fcs_fabric_event event) event 681 drivers/scsi/bfa/bfa_fcs.c bfa_trc(fabric->fcs, event); event 683 drivers/scsi/bfa/bfa_fcs.c switch (event) { event 704 drivers/scsi/bfa/bfa_fcs.c bfa_sm_fault(fabric->fcs, event); event 713 drivers/scsi/bfa/bfa_fcs.c enum bfa_fcs_fabric_event event) event 716 drivers/scsi/bfa/bfa_fcs.c bfa_trc(fabric->fcs, event); event 718 drivers/scsi/bfa/bfa_fcs.c switch (event) { event 733 drivers/scsi/bfa/bfa_fcs.c bfa_sm_fault(fabric->fcs, event); event 1368 drivers/scsi/bfa/bfa_fcs.c enum bfa_port_aen_event event) event 1382 drivers/scsi/bfa/bfa_fcs.c BFA_AEN_CAT_PORT, event); event 1489 drivers/scsi/bfa/bfa_fcs.c bfa_fcs_port_event_handler(void *cbarg, enum bfa_port_linkstate event) event 1493 drivers/scsi/bfa/bfa_fcs.c bfa_trc(fcs, event); event 1495 drivers/scsi/bfa/bfa_fcs.c switch (event) { event 822 drivers/scsi/bfa/bfa_fcs.h enum bfa_fcs_fabric_event event); event 824 drivers/scsi/bfa/bfa_fcs.h enum bfa_fcs_fabric_event event); event 826 drivers/scsi/bfa/bfa_fcs.h enum bfa_fcs_fabric_event event); event 34 drivers/scsi/bfa/bfa_fcs_fcpim.c enum bfa_itnim_aen_event event); event 37 drivers/scsi/bfa/bfa_fcs_fcpim.c enum bfa_fcs_itnim_event event); event 39 drivers/scsi/bfa/bfa_fcs_fcpim.c enum bfa_fcs_itnim_event event); event 41 drivers/scsi/bfa/bfa_fcs_fcpim.c enum bfa_fcs_itnim_event event); event 43 drivers/scsi/bfa/bfa_fcs_fcpim.c enum bfa_fcs_itnim_event event); event 45 drivers/scsi/bfa/bfa_fcs_fcpim.c enum bfa_fcs_itnim_event event); event 47 drivers/scsi/bfa/bfa_fcs_fcpim.c enum bfa_fcs_itnim_event event); event 49 drivers/scsi/bfa/bfa_fcs_fcpim.c enum bfa_fcs_itnim_event event); event 51 drivers/scsi/bfa/bfa_fcs_fcpim.c enum bfa_fcs_itnim_event event); event 53 drivers/scsi/bfa/bfa_fcs_fcpim.c enum bfa_fcs_itnim_event event); event 72 drivers/scsi/bfa/bfa_fcs_fcpim.c enum bfa_fcs_itnim_event event) event 75 drivers/scsi/bfa/bfa_fcs_fcpim.c bfa_trc(itnim->fcs, event); event 77 drivers/scsi/bfa/bfa_fcs_fcpim.c switch (event) { event 97 drivers/scsi/bfa/bfa_fcs_fcpim.c bfa_sm_fault(itnim->fcs, event); event 104 drivers/scsi/bfa/bfa_fcs_fcpim.c enum bfa_fcs_itnim_event event) event 107 drivers/scsi/bfa/bfa_fcs_fcpim.c bfa_trc(itnim->fcs, event); event 109 drivers/scsi/bfa/bfa_fcs_fcpim.c switch (event) { event 133 drivers/scsi/bfa/bfa_fcs_fcpim.c bfa_sm_fault(itnim->fcs, event); event 139 drivers/scsi/bfa/bfa_fcs_fcpim.c enum bfa_fcs_itnim_event event) event 142 drivers/scsi/bfa/bfa_fcs_fcpim.c bfa_trc(itnim->fcs, event); event 144 drivers/scsi/bfa/bfa_fcs_fcpim.c switch (event) { event 185 drivers/scsi/bfa/bfa_fcs_fcpim.c bfa_sm_fault(itnim->fcs, event); event 191 drivers/scsi/bfa/bfa_fcs_fcpim.c enum bfa_fcs_itnim_event event) event 194 drivers/scsi/bfa/bfa_fcs_fcpim.c bfa_trc(itnim->fcs, event); event 196 drivers/scsi/bfa/bfa_fcs_fcpim.c switch (event) { event 223 drivers/scsi/bfa/bfa_fcs_fcpim.c bfa_sm_fault(itnim->fcs, event); event 229 drivers/scsi/bfa/bfa_fcs_fcpim.c enum bfa_fcs_itnim_event event) event 232 drivers/scsi/bfa/bfa_fcs_fcpim.c bfa_trc(itnim->fcs, event); event 234 drivers/scsi/bfa/bfa_fcs_fcpim.c switch (event) { event 268 drivers/scsi/bfa/bfa_fcs_fcpim.c bfa_sm_fault(itnim->fcs, event); event 274 drivers/scsi/bfa/bfa_fcs_fcpim.c enum bfa_fcs_itnim_event event) event 281 drivers/scsi/bfa/bfa_fcs_fcpim.c bfa_trc(itnim->fcs, event); event 283 drivers/scsi/bfa/bfa_fcs_fcpim.c switch (event) { event 306 drivers/scsi/bfa/bfa_fcs_fcpim.c bfa_sm_fault(itnim->fcs, event); event 312 drivers/scsi/bfa/bfa_fcs_fcpim.c enum bfa_fcs_itnim_event event) event 319 drivers/scsi/bfa/bfa_fcs_fcpim.c bfa_trc(itnim->fcs, event); event 321 drivers/scsi/bfa/bfa_fcs_fcpim.c switch (event) { event 347 drivers/scsi/bfa/bfa_fcs_fcpim.c bfa_sm_fault(itnim->fcs, event); event 353 drivers/scsi/bfa/bfa_fcs_fcpim.c enum bfa_fcs_itnim_event event) event 356 drivers/scsi/bfa/bfa_fcs_fcpim.c bfa_trc(itnim->fcs, event); event 358 drivers/scsi/bfa/bfa_fcs_fcpim.c switch (event) { event 370 drivers/scsi/bfa/bfa_fcs_fcpim.c bfa_sm_fault(itnim->fcs, event); event 381 drivers/scsi/bfa/bfa_fcs_fcpim.c enum bfa_fcs_itnim_event event) event 384 drivers/scsi/bfa/bfa_fcs_fcpim.c bfa_trc(itnim->fcs, event); event 386 drivers/scsi/bfa/bfa_fcs_fcpim.c switch (event) { event 409 drivers/scsi/bfa/bfa_fcs_fcpim.c bfa_sm_fault(itnim->fcs, event); event 415 drivers/scsi/bfa/bfa_fcs_fcpim.c enum bfa_itnim_aen_event event) event 437 drivers/scsi/bfa/bfa_fcs_fcpim.c BFA_AEN_CAT_ITNIM, event); event 120 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_fcs_lport_event event); event 122 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_fcs_lport_event event); event 124 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_fcs_lport_event event); event 126 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_fcs_lport_event event); event 128 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_fcs_lport_event event); event 130 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_fcs_lport_event event); event 135 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_fcs_lport_event event) event 138 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(port->fcs, event); event 140 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 146 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(port->fcs, event); event 152 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_fcs_lport_event event) event 155 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(port->fcs, event); event 157 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 180 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(port->fcs, event); event 187 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_fcs_lport_event event) event 193 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(port->fcs, event); event 195 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 240 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(port->fcs, event); event 247 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_fcs_lport_event event) event 253 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(port->fcs, event); event 255 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 296 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(port->fcs, event); event 302 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_fcs_lport_event event) event 305 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(port->fcs, event); event 307 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 320 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(port->fcs, event); event 327 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_fcs_lport_event event) event 330 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(port->fcs, event); event 332 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 341 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(port->fcs, event); event 354 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_lport_aen_event event) event 371 drivers/scsi/bfa/bfa_fcs_lport.c BFA_AEN_CAT_LPORT, event); event 1444 drivers/scsi/bfa/bfa_fcs_lport.c enum port_fdmi_event event); event 1447 drivers/scsi/bfa/bfa_fcs_lport.c enum port_fdmi_event event); event 1449 drivers/scsi/bfa/bfa_fcs_lport.c enum port_fdmi_event event); event 1452 drivers/scsi/bfa/bfa_fcs_lport.c enum port_fdmi_event event); event 1455 drivers/scsi/bfa/bfa_fcs_lport.c enum port_fdmi_event event); event 1457 drivers/scsi/bfa/bfa_fcs_lport.c enum port_fdmi_event event); event 1460 drivers/scsi/bfa/bfa_fcs_lport.c enum port_fdmi_event event); event 1463 drivers/scsi/bfa/bfa_fcs_lport.c enum port_fdmi_event event); event 1465 drivers/scsi/bfa/bfa_fcs_lport.c enum port_fdmi_event event); event 1468 drivers/scsi/bfa/bfa_fcs_lport.c enum port_fdmi_event event); event 1470 drivers/scsi/bfa/bfa_fcs_lport.c enum port_fdmi_event event); event 1473 drivers/scsi/bfa/bfa_fcs_lport.c enum port_fdmi_event event); event 1479 drivers/scsi/bfa/bfa_fcs_lport.c enum port_fdmi_event event) event 1484 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(port->fcs, event); event 1488 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 1513 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(port->fcs, event); event 1519 drivers/scsi/bfa/bfa_fcs_lport.c enum port_fdmi_event event) event 1524 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(port->fcs, event); event 1526 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 1538 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(port->fcs, event); event 1544 drivers/scsi/bfa/bfa_fcs_lport.c enum port_fdmi_event event) event 1549 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(port->fcs, event); event 1551 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 1587 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(port->fcs, event); event 1593 drivers/scsi/bfa/bfa_fcs_lport.c enum port_fdmi_event event) event 1598 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(port->fcs, event); event 1600 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 1615 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(port->fcs, event); event 1624 drivers/scsi/bfa/bfa_fcs_lport.c enum port_fdmi_event event) event 1629 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(port->fcs, event); event 1631 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 1643 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(port->fcs, event); event 1649 drivers/scsi/bfa/bfa_fcs_lport.c enum port_fdmi_event event) event 1654 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(port->fcs, event); event 1656 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 1690 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(port->fcs, event); event 1696 drivers/scsi/bfa/bfa_fcs_lport.c enum port_fdmi_event event) event 1701 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(port->fcs, event); event 1703 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 1718 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(port->fcs, event); event 1727 drivers/scsi/bfa/bfa_fcs_lport.c enum port_fdmi_event event) event 1732 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(port->fcs, event); event 1734 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 1746 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(port->fcs, event); event 1752 drivers/scsi/bfa/bfa_fcs_lport.c enum port_fdmi_event event) event 1757 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(port->fcs, event); event 1759 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 1791 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(port->fcs, event); event 1797 drivers/scsi/bfa/bfa_fcs_lport.c enum port_fdmi_event event) event 1802 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(port->fcs, event); event 1804 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 1819 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(port->fcs, event); event 1825 drivers/scsi/bfa/bfa_fcs_lport.c enum port_fdmi_event event) event 1830 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(port->fcs, event); event 1832 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 1838 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(port->fcs, event); event 1846 drivers/scsi/bfa/bfa_fcs_lport.c enum port_fdmi_event event) event 1851 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(port->fcs, event); event 2872 drivers/scsi/bfa/bfa_fcs_lport.c enum port_ms_event event); event 2874 drivers/scsi/bfa/bfa_fcs_lport.c enum port_ms_event event); event 2876 drivers/scsi/bfa/bfa_fcs_lport.c enum port_ms_event event); event 2878 drivers/scsi/bfa/bfa_fcs_lport.c enum port_ms_event event); event 2880 drivers/scsi/bfa/bfa_fcs_lport.c enum port_ms_event event); event 2882 drivers/scsi/bfa/bfa_fcs_lport.c enum port_ms_event event); event 2884 drivers/scsi/bfa/bfa_fcs_lport.c enum port_ms_event event); event 2886 drivers/scsi/bfa/bfa_fcs_lport.c enum port_ms_event event); event 2888 drivers/scsi/bfa/bfa_fcs_lport.c enum port_ms_event event); event 2890 drivers/scsi/bfa/bfa_fcs_lport.c enum port_ms_event event); event 2892 drivers/scsi/bfa/bfa_fcs_lport.c enum port_ms_event event); event 2898 drivers/scsi/bfa/bfa_fcs_lport.c enum port_ms_event event) event 2901 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(ms->port->fcs, event); event 2903 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 2913 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(ms->port->fcs, event); event 2919 drivers/scsi/bfa/bfa_fcs_lport.c enum port_ms_event event) event 2922 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(ms->port->fcs, event); event 2924 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 2936 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(ms->port->fcs, event); event 2942 drivers/scsi/bfa/bfa_fcs_lport.c enum port_ms_event event) event 2945 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(ms->port->fcs, event); event 2947 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 2987 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(ms->port->fcs, event); event 2993 drivers/scsi/bfa/bfa_fcs_lport.c enum port_ms_event event) event 2996 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(ms->port->fcs, event); event 2998 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 3013 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(ms->port->fcs, event); event 3019 drivers/scsi/bfa/bfa_fcs_lport.c enum port_ms_event event) event 3022 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(ms->port->fcs, event); event 3024 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 3036 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(ms->port->fcs, event); event 3042 drivers/scsi/bfa/bfa_fcs_lport.c enum port_ms_event event) event 3045 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(ms->port->fcs, event); event 3047 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 3059 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(ms->port->fcs, event); event 3065 drivers/scsi/bfa/bfa_fcs_lport.c enum port_ms_event event) event 3068 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(ms->port->fcs, event); event 3070 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 3099 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(ms->port->fcs, event); event 3105 drivers/scsi/bfa/bfa_fcs_lport.c enum port_ms_event event) event 3108 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(ms->port->fcs, event); event 3110 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 3125 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(ms->port->fcs, event); event 3245 drivers/scsi/bfa/bfa_fcs_lport.c enum port_ms_event event) event 3248 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(ms->port->fcs, event); event 3250 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 3262 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(ms->port->fcs, event); event 3268 drivers/scsi/bfa/bfa_fcs_lport.c enum port_ms_event event) event 3271 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(ms->port->fcs, event); event 3273 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 3300 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(ms->port->fcs, event); event 3306 drivers/scsi/bfa/bfa_fcs_lport.c enum port_ms_event event) event 3309 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(ms->port->fcs, event); event 3311 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 3326 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(ms->port->fcs, event); event 3659 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event); event 3661 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event); event 3663 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event); event 3665 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event); event 3668 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event); event 3670 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event); event 3672 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event); event 3675 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event); event 3677 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event); event 3679 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event); event 3682 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event); event 3684 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event); event 3686 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event); event 3689 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event); event 3691 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event); event 3693 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event); event 3695 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event); event 3698 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event); event 3700 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event); event 3702 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event); event 3705 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event); event 3707 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event); event 3710 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event); event 3716 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event) event 3719 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(ns->port->fcs, event); event 3721 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 3731 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(ns->port->fcs, event); event 3737 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event) event 3740 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(ns->port->fcs, event); event 3742 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 3754 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(ns->port->fcs, event); event 3760 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event) event 3763 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(ns->port->fcs, event); event 3765 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 3789 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(ns->port->fcs, event); event 3795 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event) event 3798 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(ns->port->fcs, event); event 3800 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 3815 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(ns->port->fcs, event); event 3821 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event) event 3824 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(ns->port->fcs, event); event 3826 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 3837 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(ns->port->fcs, event); event 3843 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event) event 3846 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(ns->port->fcs, event); event 3848 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 3877 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(ns->port->fcs, event); event 3883 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event) event 3886 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(ns->port->fcs, event); event 3888 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 3900 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(ns->port->fcs, event); event 3906 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event) event 3909 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(ns->port->fcs, event); event 3911 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 3923 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(ns->port->fcs, event); event 3929 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event) event 3932 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(ns->port->fcs, event); event 3934 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 3962 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(ns->port->fcs, event); event 3968 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event) event 3971 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(ns->port->fcs, event); event 3973 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 3985 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(ns->port->fcs, event); event 3991 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event) event 3994 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(ns->port->fcs, event); event 3996 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 4008 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(ns->port->fcs, event); event 4014 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event) event 4017 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(ns->port->fcs, event); event 4019 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 4042 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(ns->port->fcs, event); event 4048 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event) event 4051 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(ns->port->fcs, event); event 4053 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 4068 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(ns->port->fcs, event); event 4074 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event) event 4077 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(ns->port->fcs, event); event 4079 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 4091 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(ns->port->fcs, event); event 4097 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event) event 4100 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(ns->port->fcs, event); event 4102 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 4126 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(ns->port->fcs, event); event 4132 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event) event 4135 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(ns->port->fcs, event); event 4137 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 4149 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(ns->port->fcs, event); event 4155 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event) event 4158 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(ns->port->fcs, event); event 4160 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 4172 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(ns->port->fcs, event); event 4178 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event) event 4181 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(ns->port->fcs, event); event 4183 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 4229 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(ns->port->fcs, event); event 4235 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event) event 4238 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(ns->port->fcs, event); event 4240 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 4252 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(ns->port->fcs, event); event 4257 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event) event 4260 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(ns->port->fcs, event); event 4262 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 4274 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(ns->port->fcs, event); event 4280 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event) event 4283 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(ns->port->fcs, event); event 4285 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 4313 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(ns->port->fcs, event); event 4319 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event) event 4322 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(ns->port->fcs, event); event 4324 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 4336 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(ns->port->fcs, event); event 4342 drivers/scsi/bfa/bfa_fcs_lport.c enum vport_ns_event event) event 4345 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(ns->port->fcs, event); event 4347 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 4365 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(ns->port->fcs, event); event 5247 drivers/scsi/bfa/bfa_fcs_lport.c enum port_scn_event event); event 5250 drivers/scsi/bfa/bfa_fcs_lport.c enum port_scn_event event); event 5252 drivers/scsi/bfa/bfa_fcs_lport.c enum port_scn_event event); event 5254 drivers/scsi/bfa/bfa_fcs_lport.c enum port_scn_event event); event 5256 drivers/scsi/bfa/bfa_fcs_lport.c enum port_scn_event event); event 5263 drivers/scsi/bfa/bfa_fcs_lport.c enum port_scn_event event) event 5265 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 5275 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(scn->port->fcs, event); event 5281 drivers/scsi/bfa/bfa_fcs_lport.c enum port_scn_event event) event 5283 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 5294 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(scn->port->fcs, event); event 5300 drivers/scsi/bfa/bfa_fcs_lport.c enum port_scn_event event) event 5304 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 5322 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(port->fcs, event); event 5328 drivers/scsi/bfa/bfa_fcs_lport.c enum port_scn_event event) event 5330 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 5342 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(scn->port->fcs, event); event 5348 drivers/scsi/bfa/bfa_fcs_lport.c enum port_scn_event event) event 5350 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 5356 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(scn->port->fcs, event); event 5625 drivers/scsi/bfa/bfa_fcs_lport.c sizeof(u32)) / sizeof(rscn->event[0]); event 5634 drivers/scsi/bfa/bfa_fcs_lport.c rscn_pid = rscn->event[i].portid; event 5636 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(port->fcs, rscn->event[i].format); event 5642 drivers/scsi/bfa/bfa_fcs_lport.c if (rscn->event[j].portid == rscn_pid) { event 5654 drivers/scsi/bfa/bfa_fcs_lport.c switch (rscn->event[i].format) { event 5656 drivers/scsi/bfa/bfa_fcs_lport.c if (rscn->event[i].qualifier == FC_QOS_RSCN_EVENT) { event 5669 drivers/scsi/bfa/bfa_fcs_lport.c if (rscn->event[i].qualifier == event 5680 drivers/scsi/bfa/bfa_fcs_lport.c rscn->event[i].format, event 6006 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_fcs_vport_event event); event 6008 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_fcs_vport_event event); event 6010 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_fcs_vport_event event); event 6012 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_fcs_vport_event event); event 6014 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_fcs_vport_event event); event 6016 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_fcs_vport_event event); event 6018 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_fcs_vport_event event); event 6020 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_fcs_vport_event event); event 6022 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_fcs_vport_event event); event 6024 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_fcs_vport_event event); event 6026 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_fcs_vport_event event); event 6028 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_fcs_vport_event event); event 6030 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_fcs_vport_event event); event 6051 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_fcs_vport_event event) event 6054 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(__vport_fcs(vport), event); event 6056 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 6063 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(__vport_fcs(vport), event); event 6072 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_fcs_vport_event event) event 6075 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(__vport_fcs(vport), event); event 6077 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 6108 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(__vport_fcs(vport), event); event 6117 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_fcs_vport_event event) event 6120 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(__vport_fcs(vport), event); event 6122 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 6150 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(__vport_fcs(vport), event); event 6160 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_fcs_vport_event event) event 6163 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(__vport_fcs(vport), event); event 6165 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 6197 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(__vport_fcs(vport), event); event 6206 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_fcs_vport_event event) event 6209 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(__vport_fcs(vport), event); event 6211 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 6231 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(__vport_fcs(vport), event); event 6242 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_fcs_vport_event event) event 6245 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(__vport_fcs(vport), event); event 6247 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 6267 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(__vport_fcs(vport), event); event 6276 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_fcs_vport_event event) event 6279 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(__vport_fcs(vport), event); event 6281 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 6299 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(__vport_fcs(vport), event); event 6309 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_fcs_vport_event event) event 6312 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(__vport_fcs(vport), event); event 6314 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 6325 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(__vport_fcs(vport), event); event 6335 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_fcs_vport_event event) event 6338 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(__vport_fcs(vport), event); event 6340 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 6354 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(__vport_fcs(vport), event); event 6366 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_fcs_vport_event event) event 6369 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(__vport_fcs(vport), event); event 6371 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 6378 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(__vport_fcs(vport), event); event 6388 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_fcs_vport_event event) event 6391 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(__vport_fcs(vport), event); event 6393 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 6407 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(__vport_fcs(vport), event); event 6417 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_fcs_vport_event event) event 6420 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(__vport_fcs(vport), event); event 6422 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 6433 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(__vport_fcs(vport), event); event 6443 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_fcs_vport_event event) event 6446 drivers/scsi/bfa/bfa_fcs_lport.c bfa_trc(__vport_fcs(vport), event); event 6448 drivers/scsi/bfa/bfa_fcs_lport.c switch (event) { event 6463 drivers/scsi/bfa/bfa_fcs_lport.c bfa_sm_fault(__vport_fcs(vport), event); event 6477 drivers/scsi/bfa/bfa_fcs_lport.c enum bfa_lport_aen_event event) event 6494 drivers/scsi/bfa/bfa_fcs_lport.c BFA_AEN_CAT_LPORT, event); event 83 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event); event 85 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event); event 87 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event); event 89 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event); event 91 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event); event 93 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event); event 95 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event); event 97 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event); event 99 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event); event 101 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event); event 103 drivers/scsi/bfa/bfa_fcs_rport.c struct bfa_fcs_rport_s *rport, enum rport_event event); event 105 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event); event 107 drivers/scsi/bfa/bfa_fcs_rport.c *rport, enum rport_event event); event 109 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event); event 111 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event); event 113 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event); event 115 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event); event 117 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event); event 119 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event); event 121 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event); event 123 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event); event 125 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event); event 127 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event); event 129 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event); event 131 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event); event 133 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event); event 135 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event); event 137 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event); event 171 drivers/scsi/bfa/bfa_fcs_rport.c bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, enum rport_event event) event 175 drivers/scsi/bfa/bfa_fcs_rport.c bfa_trc(rport->fcs, event); event 177 drivers/scsi/bfa/bfa_fcs_rport.c switch (event) { event 201 drivers/scsi/bfa/bfa_fcs_rport.c bfa_sm_fault(rport->fcs, event); event 210 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event) event 214 drivers/scsi/bfa/bfa_fcs_rport.c bfa_trc(rport->fcs, event); event 216 drivers/scsi/bfa/bfa_fcs_rport.c switch (event) { event 262 drivers/scsi/bfa/bfa_fcs_rport.c bfa_sm_fault(rport->fcs, event); event 271 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event) event 275 drivers/scsi/bfa/bfa_fcs_rport.c bfa_trc(rport->fcs, event); event 277 drivers/scsi/bfa/bfa_fcs_rport.c switch (event) { event 328 drivers/scsi/bfa/bfa_fcs_rport.c bfa_sm_fault(rport->fcs, event); event 337 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event) event 341 drivers/scsi/bfa/bfa_fcs_rport.c bfa_trc(rport->fcs, event); event 343 drivers/scsi/bfa/bfa_fcs_rport.c switch (event) { event 399 drivers/scsi/bfa/bfa_fcs_rport.c bfa_sm_fault(rport->fcs, event); event 407 drivers/scsi/bfa/bfa_fcs_rport.c bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event) event 411 drivers/scsi/bfa/bfa_fcs_rport.c bfa_trc(rport->fcs, event); event 413 drivers/scsi/bfa/bfa_fcs_rport.c switch (event) { event 504 drivers/scsi/bfa/bfa_fcs_rport.c bfa_sm_fault(rport->fcs, event); event 513 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event) event 517 drivers/scsi/bfa/bfa_fcs_rport.c bfa_trc(rport->fcs, event); event 519 drivers/scsi/bfa/bfa_fcs_rport.c switch (event) { event 568 drivers/scsi/bfa/bfa_fcs_rport.c bfa_sm_fault(rport->fcs, event); event 579 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event) event 583 drivers/scsi/bfa/bfa_fcs_rport.c bfa_trc(rport->fcs, event); event 585 drivers/scsi/bfa/bfa_fcs_rport.c switch (event) { event 620 drivers/scsi/bfa/bfa_fcs_rport.c bfa_sm_fault(rport->fcs, event); event 628 drivers/scsi/bfa/bfa_fcs_rport.c bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event) event 632 drivers/scsi/bfa/bfa_fcs_rport.c bfa_trc(rport->fcs, event); event 634 drivers/scsi/bfa/bfa_fcs_rport.c switch (event) { event 672 drivers/scsi/bfa/bfa_fcs_rport.c bfa_sm_fault(rport->fcs, event); event 682 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event) event 686 drivers/scsi/bfa/bfa_fcs_rport.c bfa_trc(rport->fcs, event); event 688 drivers/scsi/bfa/bfa_fcs_rport.c switch (event) { event 722 drivers/scsi/bfa/bfa_fcs_rport.c bfa_sm_fault(rport->fcs, event); event 731 drivers/scsi/bfa/bfa_fcs_rport.c bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event) event 735 drivers/scsi/bfa/bfa_fcs_rport.c bfa_trc(rport->fcs, event); event 737 drivers/scsi/bfa/bfa_fcs_rport.c switch (event) { event 781 drivers/scsi/bfa/bfa_fcs_rport.c bfa_sm_fault(rport->fcs, event); event 791 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event) event 795 drivers/scsi/bfa/bfa_fcs_rport.c bfa_trc(rport->fcs, event); event 797 drivers/scsi/bfa/bfa_fcs_rport.c switch (event) { event 832 drivers/scsi/bfa/bfa_fcs_rport.c bfa_sm_fault(rport->fcs, event); event 842 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event) event 846 drivers/scsi/bfa/bfa_fcs_rport.c bfa_trc(rport->fcs, event); event 848 drivers/scsi/bfa/bfa_fcs_rport.c switch (event) { event 893 drivers/scsi/bfa/bfa_fcs_rport.c bfa_sm_fault(rport->fcs, event); event 903 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event) event 907 drivers/scsi/bfa/bfa_fcs_rport.c bfa_trc(rport->fcs, event); event 909 drivers/scsi/bfa/bfa_fcs_rport.c switch (event) { event 934 drivers/scsi/bfa/bfa_fcs_rport.c bfa_sm_fault(rport->fcs, event); event 944 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event) event 948 drivers/scsi/bfa/bfa_fcs_rport.c bfa_trc(rport->fcs, event); event 950 drivers/scsi/bfa/bfa_fcs_rport.c switch (event) { event 982 drivers/scsi/bfa/bfa_fcs_rport.c bfa_sm_fault(rport->fcs, event); event 991 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event) event 995 drivers/scsi/bfa/bfa_fcs_rport.c bfa_trc(rport->fcs, event); event 997 drivers/scsi/bfa/bfa_fcs_rport.c switch (event) { event 1021 drivers/scsi/bfa/bfa_fcs_rport.c bfa_sm_fault(rport->fcs, event); event 1031 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event) event 1035 drivers/scsi/bfa/bfa_fcs_rport.c bfa_trc(rport->fcs, event); event 1037 drivers/scsi/bfa/bfa_fcs_rport.c switch (event) { event 1058 drivers/scsi/bfa/bfa_fcs_rport.c bfa_sm_fault(rport->fcs, event); event 1067 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event) event 1071 drivers/scsi/bfa/bfa_fcs_rport.c bfa_trc(rport->fcs, event); event 1073 drivers/scsi/bfa/bfa_fcs_rport.c switch (event) { event 1108 drivers/scsi/bfa/bfa_fcs_rport.c bfa_sm_fault(rport->fcs, event); event 1118 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event) event 1122 drivers/scsi/bfa/bfa_fcs_rport.c bfa_trc(rport->fcs, event); event 1124 drivers/scsi/bfa/bfa_fcs_rport.c switch (event) { event 1188 drivers/scsi/bfa/bfa_fcs_rport.c bfa_sm_fault(rport->fcs, event); event 1198 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event) event 1202 drivers/scsi/bfa/bfa_fcs_rport.c bfa_trc(rport->fcs, event); event 1204 drivers/scsi/bfa/bfa_fcs_rport.c switch (event) { event 1266 drivers/scsi/bfa/bfa_fcs_rport.c bfa_sm_fault(rport->fcs, event); event 1277 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event) event 1281 drivers/scsi/bfa/bfa_fcs_rport.c bfa_trc(rport->fcs, event); event 1283 drivers/scsi/bfa/bfa_fcs_rport.c switch (event) { event 1305 drivers/scsi/bfa/bfa_fcs_rport.c bfa_sm_fault(rport->fcs, event); event 1314 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event) event 1318 drivers/scsi/bfa/bfa_fcs_rport.c bfa_trc(rport->fcs, event); event 1320 drivers/scsi/bfa/bfa_fcs_rport.c switch (event) { event 1346 drivers/scsi/bfa/bfa_fcs_rport.c bfa_sm_fault(rport->fcs, event); event 1355 drivers/scsi/bfa/bfa_fcs_rport.c bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event) event 1359 drivers/scsi/bfa/bfa_fcs_rport.c bfa_trc(rport->fcs, event); event 1361 drivers/scsi/bfa/bfa_fcs_rport.c switch (event) { event 1415 drivers/scsi/bfa/bfa_fcs_rport.c bfa_sm_fault(rport->fcs, event); event 1424 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event) event 1428 drivers/scsi/bfa/bfa_fcs_rport.c bfa_trc(rport->fcs, event); event 1430 drivers/scsi/bfa/bfa_fcs_rport.c switch (event) { event 1472 drivers/scsi/bfa/bfa_fcs_rport.c bfa_sm_fault(rport->fcs, event); event 1481 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event) event 1485 drivers/scsi/bfa/bfa_fcs_rport.c bfa_trc(rport->fcs, event); event 1487 drivers/scsi/bfa/bfa_fcs_rport.c switch (event) { event 1536 drivers/scsi/bfa/bfa_fcs_rport.c bfa_sm_fault(rport->fcs, event); event 1545 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event) event 1549 drivers/scsi/bfa/bfa_fcs_rport.c bfa_trc(rport->fcs, event); event 1551 drivers/scsi/bfa/bfa_fcs_rport.c switch (event) { event 1626 drivers/scsi/bfa/bfa_fcs_rport.c bfa_sm_fault(rport->fcs, event); event 1636 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event) event 1640 drivers/scsi/bfa/bfa_fcs_rport.c bfa_trc(rport->fcs, event); event 1642 drivers/scsi/bfa/bfa_fcs_rport.c switch (event) { event 1654 drivers/scsi/bfa/bfa_fcs_rport.c bfa_sm_fault(rport->fcs, event); event 1665 drivers/scsi/bfa/bfa_fcs_rport.c enum rport_event event) event 1669 drivers/scsi/bfa/bfa_fcs_rport.c bfa_trc(rport->fcs, event); event 1671 drivers/scsi/bfa/bfa_fcs_rport.c switch (event) { event 1684 drivers/scsi/bfa/bfa_fcs_rport.c bfa_sm_fault(rport->fcs, event); event 2395 drivers/scsi/bfa/bfa_fcs_rport.c enum bfa_rport_aen_event event, event 2406 drivers/scsi/bfa/bfa_fcs_rport.c if (event == BFA_RPORT_AEN_QOS_PRIO) event 2408 drivers/scsi/bfa/bfa_fcs_rport.c else if (event == BFA_RPORT_AEN_QOS_FLOWID) event 2419 drivers/scsi/bfa/bfa_fcs_rport.c BFA_AEN_CAT_RPORT, event); event 3128 drivers/scsi/bfa/bfa_fcs_rport.c enum rpf_event event); event 3130 drivers/scsi/bfa/bfa_fcs_rport.c enum rpf_event event); event 3132 drivers/scsi/bfa/bfa_fcs_rport.c enum rpf_event event); event 3134 drivers/scsi/bfa/bfa_fcs_rport.c enum rpf_event event); event 3136 drivers/scsi/bfa/bfa_fcs_rport.c enum rpf_event event); event 3138 drivers/scsi/bfa/bfa_fcs_rport.c enum rpf_event event); event 3141 drivers/scsi/bfa/bfa_fcs_rport.c bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) event 3148 drivers/scsi/bfa/bfa_fcs_rport.c bfa_trc(rport->fcs, event); event 3150 drivers/scsi/bfa/bfa_fcs_rport.c switch (event) { event 3167 drivers/scsi/bfa/bfa_fcs_rport.c bfa_sm_fault(rport->fcs, event); event 3172 drivers/scsi/bfa/bfa_fcs_rport.c bfa_fcs_rpf_sm_rpsc_sending(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) event 3176 drivers/scsi/bfa/bfa_fcs_rport.c bfa_trc(rport->fcs, event); event 3178 drivers/scsi/bfa/bfa_fcs_rport.c switch (event) { event 3190 drivers/scsi/bfa/bfa_fcs_rport.c bfa_sm_fault(rport->fcs, event); event 3195 drivers/scsi/bfa/bfa_fcs_rport.c bfa_fcs_rpf_sm_rpsc(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) event 3200 drivers/scsi/bfa/bfa_fcs_rport.c bfa_trc(rport->fcs, event); event 3202 drivers/scsi/bfa/bfa_fcs_rport.c switch (event) { event 3236 drivers/scsi/bfa/bfa_fcs_rport.c bfa_sm_fault(rport->fcs, event); event 3241 drivers/scsi/bfa/bfa_fcs_rport.c bfa_fcs_rpf_sm_rpsc_retry(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) event 3246 drivers/scsi/bfa/bfa_fcs_rport.c bfa_trc(rport->fcs, event); event 3248 drivers/scsi/bfa/bfa_fcs_rport.c switch (event) { event 3262 drivers/scsi/bfa/bfa_fcs_rport.c bfa_sm_fault(rport->fcs, event); event 3267 drivers/scsi/bfa/bfa_fcs_rport.c bfa_fcs_rpf_sm_online(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) event 3273 drivers/scsi/bfa/bfa_fcs_rport.c bfa_trc(rport->fcs, event); event 3275 drivers/scsi/bfa/bfa_fcs_rport.c switch (event) { event 3282 drivers/scsi/bfa/bfa_fcs_rport.c bfa_sm_fault(rport->fcs, event); event 3287 drivers/scsi/bfa/bfa_fcs_rport.c bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) event 3293 drivers/scsi/bfa/bfa_fcs_rport.c bfa_trc(rport->fcs, event); event 3295 drivers/scsi/bfa/bfa_fcs_rport.c switch (event) { event 3305 drivers/scsi/bfa/bfa_fcs_rport.c bfa_sm_fault(rport->fcs, event); event 102 drivers/scsi/bfa/bfa_ioc.c enum bfa_ioc_event_e event); event 265 drivers/scsi/bfa/bfa_ioc.c bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event) event 267 drivers/scsi/bfa/bfa_ioc.c bfa_trc(ioc, event); event 269 drivers/scsi/bfa/bfa_ioc.c switch (event) { event 275 drivers/scsi/bfa/bfa_ioc.c bfa_sm_fault(ioc, event); event 291 drivers/scsi/bfa/bfa_ioc.c bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event) event 293 drivers/scsi/bfa/bfa_ioc.c bfa_trc(ioc, event); event 295 drivers/scsi/bfa/bfa_ioc.c switch (event) { event 309 drivers/scsi/bfa/bfa_ioc.c bfa_sm_fault(ioc, event); event 325 drivers/scsi/bfa/bfa_ioc.c bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event) event 327 drivers/scsi/bfa/bfa_ioc.c bfa_trc(ioc, event); event 329 drivers/scsi/bfa/bfa_ioc.c switch (event) { event 339 drivers/scsi/bfa/bfa_ioc.c if (event != IOC_E_PFFAILED) event 361 drivers/scsi/bfa/bfa_ioc.c bfa_sm_fault(ioc, event); event 377 drivers/scsi/bfa/bfa_ioc.c bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event) event 379 drivers/scsi/bfa/bfa_ioc.c bfa_trc(ioc, event); event 381 drivers/scsi/bfa/bfa_ioc.c switch (event) { event 394 drivers/scsi/bfa/bfa_ioc.c if (event != IOC_E_PFFAILED) event 407 drivers/scsi/bfa/bfa_ioc.c bfa_sm_fault(ioc, event); event 424 drivers/scsi/bfa/bfa_ioc.c bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event) event 426 drivers/scsi/bfa/bfa_ioc.c bfa_trc(ioc, event); event 428 drivers/scsi/bfa/bfa_ioc.c switch (event) { event 449 drivers/scsi/bfa/bfa_ioc.c if (event != IOC_E_PFFAILED) event 454 drivers/scsi/bfa/bfa_ioc.c bfa_sm_fault(ioc, event); event 472 drivers/scsi/bfa/bfa_ioc.c bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event) event 474 drivers/scsi/bfa/bfa_ioc.c bfa_trc(ioc, event); event 476 drivers/scsi/bfa/bfa_ioc.c switch (event) { event 496 drivers/scsi/bfa/bfa_ioc.c bfa_sm_fault(ioc, event); event 510 drivers/scsi/bfa/bfa_ioc.c bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event) event 512 drivers/scsi/bfa/bfa_ioc.c bfa_trc(ioc, event); event 514 drivers/scsi/bfa/bfa_ioc.c switch (event) { event 529 drivers/scsi/bfa/bfa_ioc.c bfa_sm_fault(ioc, event); event 544 drivers/scsi/bfa/bfa_ioc.c bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event) event 546 drivers/scsi/bfa/bfa_ioc.c bfa_trc(ioc, event); event 548 drivers/scsi/bfa/bfa_ioc.c switch (event) { event 560 drivers/scsi/bfa/bfa_ioc.c if (event != IOC_E_PFFAILED) event 582 drivers/scsi/bfa/bfa_ioc.c bfa_sm_fault(ioc, event); event 597 drivers/scsi/bfa/bfa_ioc.c bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event) event 599 drivers/scsi/bfa/bfa_ioc.c bfa_trc(ioc, event); event 601 drivers/scsi/bfa/bfa_ioc.c switch (event) { event 623 drivers/scsi/bfa/bfa_ioc.c bfa_sm_fault(ioc, event); event 634 drivers/scsi/bfa/bfa_ioc.c bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event) event 636 drivers/scsi/bfa/bfa_ioc.c bfa_trc(ioc, event); event 638 drivers/scsi/bfa/bfa_ioc.c switch (event) { event 656 drivers/scsi/bfa/bfa_ioc.c bfa_sm_fault(ioc, event); event 678 drivers/scsi/bfa/bfa_ioc.c bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event) event 682 drivers/scsi/bfa/bfa_ioc.c bfa_trc(ioc, event); event 684 drivers/scsi/bfa/bfa_ioc.c switch (event) { event 693 drivers/scsi/bfa/bfa_ioc.c bfa_sm_fault(ioc, event); event 765 drivers/scsi/bfa/bfa_ioc.c bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event) event 769 drivers/scsi/bfa/bfa_ioc.c bfa_trc(ioc, event); event 771 drivers/scsi/bfa/bfa_ioc.c switch (event) { event 805 drivers/scsi/bfa/bfa_ioc.c bfa_sm_fault(ioc, event); event 829 drivers/scsi/bfa/bfa_ioc.c bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event) event 833 drivers/scsi/bfa/bfa_ioc.c bfa_trc(ioc, event); event 835 drivers/scsi/bfa/bfa_ioc.c switch (event) { event 852 drivers/scsi/bfa/bfa_ioc.c bfa_sm_fault(ioc, event); event 869 drivers/scsi/bfa/bfa_ioc.c bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event) event 873 drivers/scsi/bfa/bfa_ioc.c bfa_trc(ioc, event); event 875 drivers/scsi/bfa/bfa_ioc.c switch (event) { event 897 drivers/scsi/bfa/bfa_ioc.c bfa_sm_fault(ioc, event); event 913 drivers/scsi/bfa/bfa_ioc.c bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event) event 917 drivers/scsi/bfa/bfa_ioc.c bfa_trc(ioc, event); event 919 drivers/scsi/bfa/bfa_ioc.c switch (event) { event 938 drivers/scsi/bfa/bfa_ioc.c bfa_sm_fault(ioc, event); event 958 drivers/scsi/bfa/bfa_ioc.c bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event) event 962 drivers/scsi/bfa/bfa_ioc.c bfa_trc(ioc, event); event 964 drivers/scsi/bfa/bfa_ioc.c switch (event) { event 977 drivers/scsi/bfa/bfa_ioc.c if (event == IOCPF_E_TIMEOUT) event 989 drivers/scsi/bfa/bfa_ioc.c bfa_sm_fault(ioc, event); event 1000 drivers/scsi/bfa/bfa_ioc.c bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event) event 1004 drivers/scsi/bfa/bfa_ioc.c bfa_trc(ioc, event); event 1006 drivers/scsi/bfa/bfa_ioc.c switch (event) { event 1020 drivers/scsi/bfa/bfa_ioc.c bfa_sm_fault(ioc, event); event 1035 drivers/scsi/bfa/bfa_ioc.c bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event) event 1039 drivers/scsi/bfa/bfa_ioc.c bfa_trc(ioc, event); event 1041 drivers/scsi/bfa/bfa_ioc.c switch (event) { event 1060 drivers/scsi/bfa/bfa_ioc.c bfa_sm_fault(ioc, event); event 1074 drivers/scsi/bfa/bfa_ioc.c bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event) event 1078 drivers/scsi/bfa/bfa_ioc.c bfa_trc(ioc, event); event 1080 drivers/scsi/bfa/bfa_ioc.c switch (event) { event 1096 drivers/scsi/bfa/bfa_ioc.c bfa_sm_fault(ioc, event); event 1111 drivers/scsi/bfa/bfa_ioc.c bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event) event 1115 drivers/scsi/bfa/bfa_ioc.c bfa_trc(ioc, event); event 1117 drivers/scsi/bfa/bfa_ioc.c switch (event) { event 1128 drivers/scsi/bfa/bfa_ioc.c bfa_sm_fault(ioc, event); event 1143 drivers/scsi/bfa/bfa_ioc.c bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event) event 1147 drivers/scsi/bfa/bfa_ioc.c bfa_trc(ioc, event); event 1149 drivers/scsi/bfa/bfa_ioc.c switch (event) { event 1178 drivers/scsi/bfa/bfa_ioc.c bfa_sm_fault(ioc, event); event 1192 drivers/scsi/bfa/bfa_ioc.c bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event) event 1196 drivers/scsi/bfa/bfa_ioc.c bfa_trc(ioc, event); event 1198 drivers/scsi/bfa/bfa_ioc.c switch (event) { event 1209 drivers/scsi/bfa/bfa_ioc.c bfa_sm_fault(ioc, event); event 1230 drivers/scsi/bfa/bfa_ioc.c bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event) event 1234 drivers/scsi/bfa/bfa_ioc.c bfa_trc(ioc, event); event 1236 drivers/scsi/bfa/bfa_ioc.c switch (event) { event 1269 drivers/scsi/bfa/bfa_ioc.c bfa_sm_fault(ioc, event); event 1283 drivers/scsi/bfa/bfa_ioc.c bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event) event 1287 drivers/scsi/bfa/bfa_ioc.c bfa_trc(ioc, event); event 1289 drivers/scsi/bfa/bfa_ioc.c switch (event) { event 1295 drivers/scsi/bfa/bfa_ioc.c bfa_sm_fault(ioc, event); event 1307 drivers/scsi/bfa/bfa_ioc.c bfa_ioc_event_notify(struct bfa_ioc_s *ioc, enum bfa_ioc_event_e event) event 1314 drivers/scsi/bfa/bfa_ioc.c notify->cbfn(notify->cbarg, event); event 2911 drivers/scsi/bfa/bfa_ioc.c bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event) event 2941 drivers/scsi/bfa/bfa_ioc.c BFA_AEN_CAT_IOC, event); event 3333 drivers/scsi/bfa/bfa_ioc.c bfa_ablk_notify(void *cbarg, enum bfa_ioc_event_e event) event 3337 drivers/scsi/bfa/bfa_ioc.c bfa_trc(ablk->ioc, event); event 3339 drivers/scsi/bfa/bfa_ioc.c switch (event) { event 3678 drivers/scsi/bfa/bfa_ioc.c bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event) event 3682 drivers/scsi/bfa/bfa_ioc.c bfa_trc(sfp, event); event 3686 drivers/scsi/bfa/bfa_ioc.c switch (event) { event 3716 drivers/scsi/bfa/bfa_ioc.c ((u64)rsp->event)); event 3726 drivers/scsi/bfa/bfa_ioc.c switch (rsp->event) { event 3744 drivers/scsi/bfa/bfa_ioc.c bfa_trc(sfp, rsp->event); event 3800 drivers/scsi/bfa/bfa_ioc.c switch (rsp->event) { event 3831 drivers/scsi/bfa/bfa_ioc.c bfa_trc(sfp, rsp->event); event 4193 drivers/scsi/bfa/bfa_ioc.c bfa_flash_aen_audit_post(struct bfa_ioc_s *ioc, enum bfa_audit_aen_event event, event 4209 drivers/scsi/bfa/bfa_ioc.c BFA_AEN_CAT_AUDIT, event); event 4221 drivers/scsi/bfa/bfa_ioc.c bfa_flash_notify(void *cbarg, enum bfa_ioc_event_e event) event 4225 drivers/scsi/bfa/bfa_ioc.c bfa_trc(flash, event); event 4226 drivers/scsi/bfa/bfa_ioc.c switch (event) { event 4353 drivers/scsi/bfa/bfa_ioc.c struct bfi_flash_event_s *event; event 4439 drivers/scsi/bfa/bfa_ioc.c status = be32_to_cpu(m.event->status); event 4445 drivers/scsi/bfa/bfa_ioc.c param = be32_to_cpu(m.event->param); event 4716 drivers/scsi/bfa/bfa_ioc.c bfa_diag_notify(void *diag_arg, enum bfa_ioc_event_e event) event 4720 drivers/scsi/bfa/bfa_ioc.c bfa_trc(diag, event); event 4725 drivers/scsi/bfa/bfa_ioc.c switch (event) { event 5317 drivers/scsi/bfa/bfa_ioc.c bfa_phy_notify(void *cbarg, enum bfa_ioc_event_e event) event 5321 drivers/scsi/bfa/bfa_ioc.c bfa_trc(phy, event); event 5323 drivers/scsi/bfa/bfa_ioc.c switch (event) { event 5828 drivers/scsi/bfa/bfa_ioc.c enum bfa_dconf_event event); event 5830 drivers/scsi/bfa/bfa_ioc.c enum bfa_dconf_event event); event 5832 drivers/scsi/bfa/bfa_ioc.c enum bfa_dconf_event event); event 5834 drivers/scsi/bfa/bfa_ioc.c enum bfa_dconf_event event); event 5836 drivers/scsi/bfa/bfa_ioc.c enum bfa_dconf_event event); event 5838 drivers/scsi/bfa/bfa_ioc.c enum bfa_dconf_event event); event 5840 drivers/scsi/bfa/bfa_ioc.c enum bfa_dconf_event event); event 5851 drivers/scsi/bfa/bfa_ioc.c bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event) event 5854 drivers/scsi/bfa/bfa_ioc.c bfa_trc(dconf->bfa, event); event 5856 drivers/scsi/bfa/bfa_ioc.c switch (event) { event 5886 drivers/scsi/bfa/bfa_ioc.c bfa_sm_fault(dconf->bfa, event); event 5895 drivers/scsi/bfa/bfa_ioc.c enum bfa_dconf_event event) event 5897 drivers/scsi/bfa/bfa_ioc.c bfa_trc(dconf->bfa, event); event 5899 drivers/scsi/bfa/bfa_ioc.c switch (event) { event 5918 drivers/scsi/bfa/bfa_ioc.c bfa_sm_fault(dconf->bfa, event); event 5926 drivers/scsi/bfa/bfa_ioc.c bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event) event 5928 drivers/scsi/bfa/bfa_ioc.c bfa_trc(dconf->bfa, event); event 5930 drivers/scsi/bfa/bfa_ioc.c switch (event) { event 5944 drivers/scsi/bfa/bfa_ioc.c bfa_sm_fault(dconf->bfa, event); event 5953 drivers/scsi/bfa/bfa_ioc.c bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event) event 5955 drivers/scsi/bfa/bfa_ioc.c bfa_trc(dconf->bfa, event); event 5957 drivers/scsi/bfa/bfa_ioc.c switch (event) { event 5981 drivers/scsi/bfa/bfa_ioc.c bfa_sm_fault(dconf->bfa, event); event 5990 drivers/scsi/bfa/bfa_ioc.c enum bfa_dconf_event event) event 5992 drivers/scsi/bfa/bfa_ioc.c bfa_trc(dconf->bfa, event); event 5994 drivers/scsi/bfa/bfa_ioc.c switch (event) { event 6004 drivers/scsi/bfa/bfa_ioc.c bfa_sm_fault(dconf->bfa, event); event 6009 drivers/scsi/bfa/bfa_ioc.c bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event) event 6011 drivers/scsi/bfa/bfa_ioc.c bfa_trc(dconf->bfa, event); event 6013 drivers/scsi/bfa/bfa_ioc.c switch (event) { event 6031 drivers/scsi/bfa/bfa_ioc.c bfa_sm_fault(dconf->bfa, event); event 6037 drivers/scsi/bfa/bfa_ioc.c enum bfa_dconf_event event) event 6039 drivers/scsi/bfa/bfa_ioc.c bfa_trc(dconf->bfa, event); event 6041 drivers/scsi/bfa/bfa_ioc.c switch (event) { event 6054 drivers/scsi/bfa/bfa_ioc.c bfa_sm_fault(dconf->bfa, event); event 6193 drivers/scsi/bfa/bfa_ioc.c bfa_fru_notify(void *cbarg, enum bfa_ioc_event_e event) event 6197 drivers/scsi/bfa/bfa_ioc.c bfa_trc(fru, event); event 6199 drivers/scsi/bfa/bfa_ioc.c switch (event) { event 927 drivers/scsi/bfa/bfa_ioc.h void bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event); event 138 drivers/scsi/bfa/bfa_plog.h enum bfa_plog_eid event, u16 misc, char *log_str); event 140 drivers/scsi/bfa/bfa_plog.h enum bfa_plog_eid event, u16 misc, event 143 drivers/scsi/bfa/bfa_plog.h enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr); event 145 drivers/scsi/bfa/bfa_plog.h enum bfa_plog_eid event, u16 misc, event 409 drivers/scsi/bfa/bfa_port.c bfa_port_notify(void *arg, enum bfa_ioc_event_e event) event 413 drivers/scsi/bfa/bfa_port.c switch (event) { event 796 drivers/scsi/bfa/bfa_port.c bfa_cee_notify(void *arg, enum bfa_ioc_event_e event) event 800 drivers/scsi/bfa/bfa_port.c bfa_trc(cee, event); event 802 drivers/scsi/bfa/bfa_port.c switch (event) { event 49 drivers/scsi/bfa/bfa_port.h void bfa_port_notify(void *arg, enum bfa_ioc_event_e event); event 126 drivers/scsi/bfa/bfa_svc.c static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event); event 127 drivers/scsi/bfa/bfa_svc.c static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event); event 129 drivers/scsi/bfa/bfa_svc.c event); event 130 drivers/scsi/bfa/bfa_svc.c static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event); event 132 drivers/scsi/bfa/bfa_svc.c enum bfa_lps_event event); event 133 drivers/scsi/bfa/bfa_svc.c static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event); event 135 drivers/scsi/bfa/bfa_svc.c event); event 147 drivers/scsi/bfa/bfa_svc.c enum bfa_port_linkstate event, bfa_boolean_t trunk); event 149 drivers/scsi/bfa/bfa_svc.c enum bfa_port_linkstate event); event 159 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_sm_event event); event 161 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_sm_event event); event 163 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_sm_event event); event 165 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_sm_event event); event 167 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_sm_event event); event 169 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_sm_event event); event 171 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_sm_event event); event 173 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_sm_event event); event 175 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_sm_event event); event 177 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_sm_event event); event 179 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_sm_event event); event 181 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_sm_event event); event 183 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_sm_event event); event 185 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_sm_event event); event 187 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_sm_event event); event 190 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_ln_sm_event event); event 192 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_ln_sm_event event); event 194 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_ln_sm_event event); event 196 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_ln_sm_event event); event 198 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_ln_sm_event event); event 200 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_ln_sm_event event); event 202 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_ln_sm_event event); event 240 drivers/scsi/bfa/bfa_svc.c enum bfa_rport_event event); event 242 drivers/scsi/bfa/bfa_svc.c enum bfa_rport_event event); event 244 drivers/scsi/bfa/bfa_svc.c enum bfa_rport_event event); event 246 drivers/scsi/bfa/bfa_svc.c enum bfa_rport_event event); event 248 drivers/scsi/bfa/bfa_svc.c enum bfa_rport_event event); event 250 drivers/scsi/bfa/bfa_svc.c enum bfa_rport_event event); event 252 drivers/scsi/bfa/bfa_svc.c enum bfa_rport_event event); event 254 drivers/scsi/bfa/bfa_svc.c enum bfa_rport_event event); event 256 drivers/scsi/bfa/bfa_svc.c enum bfa_rport_event event); event 258 drivers/scsi/bfa/bfa_svc.c enum bfa_rport_event event); event 260 drivers/scsi/bfa/bfa_svc.c enum bfa_rport_event event); event 262 drivers/scsi/bfa/bfa_svc.c enum bfa_rport_event event); event 264 drivers/scsi/bfa/bfa_svc.c enum bfa_rport_event event); event 322 drivers/scsi/bfa/bfa_svc.c enum bfa_plog_eid event, event 330 drivers/scsi/bfa/bfa_svc.c lp.eid = event; event 342 drivers/scsi/bfa/bfa_svc.c enum bfa_plog_eid event, event 354 drivers/scsi/bfa/bfa_svc.c lp.eid = event; event 369 drivers/scsi/bfa/bfa_svc.c enum bfa_plog_eid event, event 383 drivers/scsi/bfa/bfa_svc.c bfa_plog_intarr(plog, mid, event, misc, ints, 3); event 389 drivers/scsi/bfa/bfa_svc.c enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr, event 404 drivers/scsi/bfa/bfa_svc.c bfa_plog_intarr(plog, mid, event, misc, ints, 4); event 1169 drivers/scsi/bfa/bfa_svc.c bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event) event 1172 drivers/scsi/bfa/bfa_svc.c bfa_trc(lps->bfa, event); event 1174 drivers/scsi/bfa/bfa_svc.c switch (event) { event 1219 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(lps->bfa, event); event 1227 drivers/scsi/bfa/bfa_svc.c bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event) event 1230 drivers/scsi/bfa/bfa_svc.c bfa_trc(lps->bfa, event); event 1232 drivers/scsi/bfa/bfa_svc.c switch (event) { event 1273 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(lps->bfa, event); event 1281 drivers/scsi/bfa/bfa_svc.c bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event) event 1284 drivers/scsi/bfa/bfa_svc.c bfa_trc(lps->bfa, event); event 1286 drivers/scsi/bfa/bfa_svc.c switch (event) { event 1307 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(lps->bfa, event); event 1315 drivers/scsi/bfa/bfa_svc.c bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event) event 1318 drivers/scsi/bfa/bfa_svc.c bfa_trc(lps->bfa, event); event 1320 drivers/scsi/bfa/bfa_svc.c switch (event) { event 1356 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(lps->bfa, event); event 1364 drivers/scsi/bfa/bfa_svc.c bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event) event 1367 drivers/scsi/bfa/bfa_svc.c bfa_trc(lps->bfa, event); event 1369 drivers/scsi/bfa/bfa_svc.c switch (event) { event 1398 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(lps->bfa, event); event 1406 drivers/scsi/bfa/bfa_svc.c bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event) event 1409 drivers/scsi/bfa/bfa_svc.c bfa_trc(lps->bfa, event); event 1411 drivers/scsi/bfa/bfa_svc.c switch (event) { event 1423 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(lps->bfa, event); event 1431 drivers/scsi/bfa/bfa_svc.c bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event) event 1434 drivers/scsi/bfa/bfa_svc.c bfa_trc(lps->bfa, event); event 1436 drivers/scsi/bfa/bfa_svc.c switch (event) { event 1449 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(lps->bfa, event); event 2003 drivers/scsi/bfa/bfa_svc.c bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event) event 2017 drivers/scsi/bfa/bfa_svc.c BFA_AEN_CAT_PORT, event); event 2025 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_sm_event event) event 2027 drivers/scsi/bfa/bfa_svc.c bfa_trc(fcport->bfa, event); event 2029 drivers/scsi/bfa/bfa_svc.c switch (event) { event 2067 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(fcport->bfa, event); event 2073 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_sm_event event) event 2077 drivers/scsi/bfa/bfa_svc.c bfa_trc(fcport->bfa, event); event 2079 drivers/scsi/bfa/bfa_svc.c switch (event) { event 2131 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(fcport->bfa, event); event 2137 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_sm_event event) event 2141 drivers/scsi/bfa/bfa_svc.c bfa_trc(fcport->bfa, event); event 2143 drivers/scsi/bfa/bfa_svc.c switch (event) { event 2193 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(fcport->bfa, event); event 2199 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_sm_event event) event 2201 drivers/scsi/bfa/bfa_svc.c struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event; event 2205 drivers/scsi/bfa/bfa_svc.c bfa_trc(fcport->bfa, event); event 2207 drivers/scsi/bfa/bfa_svc.c switch (event) { event 2285 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(fcport->bfa, event); event 2291 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_sm_event event) event 2296 drivers/scsi/bfa/bfa_svc.c bfa_trc(fcport->bfa, event); event 2298 drivers/scsi/bfa/bfa_svc.c switch (event) { event 2384 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(fcport->bfa, event); event 2390 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_sm_event event) event 2392 drivers/scsi/bfa/bfa_svc.c bfa_trc(fcport->bfa, event); event 2394 drivers/scsi/bfa/bfa_svc.c switch (event) { event 2435 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(fcport->bfa, event); event 2441 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_sm_event event) event 2443 drivers/scsi/bfa/bfa_svc.c bfa_trc(fcport->bfa, event); event 2445 drivers/scsi/bfa/bfa_svc.c switch (event) { event 2482 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(fcport->bfa, event); event 2488 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_sm_event event) event 2492 drivers/scsi/bfa/bfa_svc.c bfa_trc(fcport->bfa, event); event 2494 drivers/scsi/bfa/bfa_svc.c switch (event) { event 2537 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(fcport->bfa, event); event 2543 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_sm_event event) event 2547 drivers/scsi/bfa/bfa_svc.c bfa_trc(fcport->bfa, event); event 2549 drivers/scsi/bfa/bfa_svc.c switch (event) { event 2594 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(fcport->bfa, event); event 2600 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_sm_event event) event 2602 drivers/scsi/bfa/bfa_svc.c bfa_trc(fcport->bfa, event); event 2604 drivers/scsi/bfa/bfa_svc.c switch (event) { event 2626 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_sm_event event) event 2628 drivers/scsi/bfa/bfa_svc.c bfa_trc(fcport->bfa, event); event 2630 drivers/scsi/bfa/bfa_svc.c switch (event) { event 2652 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_sm_event event) event 2654 drivers/scsi/bfa/bfa_svc.c bfa_trc(fcport->bfa, event); event 2656 drivers/scsi/bfa/bfa_svc.c switch (event) { event 2674 drivers/scsi/bfa/bfa_svc.c bfa_fcport_sm_dport(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) event 2676 drivers/scsi/bfa/bfa_svc.c bfa_trc(fcport->bfa, event); event 2678 drivers/scsi/bfa/bfa_svc.c switch (event) { event 2701 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(fcport->bfa, event); event 2707 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_sm_event event) event 2709 drivers/scsi/bfa/bfa_svc.c bfa_trc(fcport->bfa, event); event 2711 drivers/scsi/bfa/bfa_svc.c switch (event) { event 2735 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(fcport->bfa, event); event 2741 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_sm_event event) event 2743 drivers/scsi/bfa/bfa_svc.c bfa_trc(fcport->bfa, event); event 2745 drivers/scsi/bfa/bfa_svc.c switch (event) { event 2778 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(fcport->bfa, event); event 2787 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_ln_sm_event event) event 2789 drivers/scsi/bfa/bfa_svc.c bfa_trc(ln->fcport->bfa, event); event 2791 drivers/scsi/bfa/bfa_svc.c switch (event) { event 2798 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(ln->fcport->bfa, event); event 2807 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_ln_sm_event event) event 2809 drivers/scsi/bfa/bfa_svc.c bfa_trc(ln->fcport->bfa, event); event 2811 drivers/scsi/bfa/bfa_svc.c switch (event) { event 2821 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(ln->fcport->bfa, event); event 2830 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_ln_sm_event event) event 2832 drivers/scsi/bfa/bfa_svc.c bfa_trc(ln->fcport->bfa, event); event 2834 drivers/scsi/bfa/bfa_svc.c switch (event) { event 2845 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(ln->fcport->bfa, event); event 2854 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_ln_sm_event event) event 2856 drivers/scsi/bfa/bfa_svc.c bfa_trc(ln->fcport->bfa, event); event 2858 drivers/scsi/bfa/bfa_svc.c switch (event) { event 2865 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(ln->fcport->bfa, event); event 2874 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_ln_sm_event event) event 2876 drivers/scsi/bfa/bfa_svc.c bfa_trc(ln->fcport->bfa, event); event 2878 drivers/scsi/bfa/bfa_svc.c switch (event) { event 2888 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(ln->fcport->bfa, event); event 2897 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_ln_sm_event event) event 2899 drivers/scsi/bfa/bfa_svc.c bfa_trc(ln->fcport->bfa, event); event 2901 drivers/scsi/bfa/bfa_svc.c switch (event) { event 2912 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(ln->fcport->bfa, event); event 2921 drivers/scsi/bfa/bfa_svc.c enum bfa_fcport_ln_sm_event event) event 2923 drivers/scsi/bfa/bfa_svc.c bfa_trc(ln->fcport->bfa, event); event 2925 drivers/scsi/bfa/bfa_svc.c switch (event) { event 2936 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(ln->fcport->bfa, event); event 2956 drivers/scsi/bfa/bfa_svc.c bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event, event 2962 drivers/scsi/bfa/bfa_svc.c switch (event) { event 2975 drivers/scsi/bfa/bfa_svc.c bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event) event 2980 drivers/scsi/bfa/bfa_svc.c fcport->event_cbfn(fcport->event_cbarg, event); event 2983 drivers/scsi/bfa/bfa_svc.c ln->ln_event = event; event 3102 drivers/scsi/bfa/bfa_svc.c struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event; event 3609 drivers/scsi/bfa/bfa_svc.c if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP) event 3612 drivers/scsi/bfa/bfa_svc.c if (i2hmsg.event->link_state.linkstate_rsn == event 3621 drivers/scsi/bfa/bfa_svc.c i2hmsg.event->link_state.qos_attr.qos_bw_op; event 3674 drivers/scsi/bfa/bfa_svc.c enum bfa_port_linkstate event), event 4312 drivers/scsi/bfa/bfa_svc.c bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event) event 4315 drivers/scsi/bfa/bfa_svc.c bfa_trc(rp->bfa, event); event 4317 drivers/scsi/bfa/bfa_svc.c switch (event) { event 4325 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(rp->bfa, event); event 4330 drivers/scsi/bfa/bfa_svc.c bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event) event 4333 drivers/scsi/bfa/bfa_svc.c bfa_trc(rp->bfa, event); event 4335 drivers/scsi/bfa/bfa_svc.c switch (event) { event 4357 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(rp->bfa, event); event 4365 drivers/scsi/bfa/bfa_svc.c bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event) event 4368 drivers/scsi/bfa/bfa_svc.c bfa_trc(rp->bfa, event); event 4370 drivers/scsi/bfa/bfa_svc.c switch (event) { event 4394 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(rp->bfa, event); event 4402 drivers/scsi/bfa/bfa_svc.c bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event) event 4405 drivers/scsi/bfa/bfa_svc.c bfa_trc(rp->bfa, event); event 4407 drivers/scsi/bfa/bfa_svc.c switch (event) { event 4435 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(rp->bfa, event); event 4443 drivers/scsi/bfa/bfa_svc.c bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event) event 4448 drivers/scsi/bfa/bfa_svc.c bfa_trc(rp->bfa, event); event 4450 drivers/scsi/bfa/bfa_svc.c switch (event) { event 4503 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(rp->bfa, event); event 4511 drivers/scsi/bfa/bfa_svc.c bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event) event 4514 drivers/scsi/bfa/bfa_svc.c bfa_trc(rp->bfa, event); event 4516 drivers/scsi/bfa/bfa_svc.c switch (event) { event 4536 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(rp->bfa, event); event 4541 drivers/scsi/bfa/bfa_svc.c bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event) event 4544 drivers/scsi/bfa/bfa_svc.c bfa_trc(rp->bfa, event); event 4546 drivers/scsi/bfa/bfa_svc.c switch (event) { event 4566 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(rp->bfa, event); event 4574 drivers/scsi/bfa/bfa_svc.c bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event) event 4577 drivers/scsi/bfa/bfa_svc.c bfa_trc(rp->bfa, event); event 4579 drivers/scsi/bfa/bfa_svc.c switch (event) { event 4605 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(rp->bfa, event); event 4613 drivers/scsi/bfa/bfa_svc.c bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event) event 4616 drivers/scsi/bfa/bfa_svc.c bfa_trc(rp->bfa, event); event 4618 drivers/scsi/bfa/bfa_svc.c switch (event) { event 4632 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(rp->bfa, event); event 4637 drivers/scsi/bfa/bfa_svc.c bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event) event 4640 drivers/scsi/bfa/bfa_svc.c bfa_trc(rp->bfa, event); event 4642 drivers/scsi/bfa/bfa_svc.c switch (event) { event 4657 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(rp->bfa, event); event 4666 drivers/scsi/bfa/bfa_svc.c enum bfa_rport_event event) event 4669 drivers/scsi/bfa/bfa_svc.c bfa_trc(rp->bfa, event); event 4671 drivers/scsi/bfa/bfa_svc.c switch (event) { event 4688 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(rp->bfa, event); event 4697 drivers/scsi/bfa/bfa_svc.c enum bfa_rport_event event) event 4700 drivers/scsi/bfa/bfa_svc.c bfa_trc(rp->bfa, event); event 4702 drivers/scsi/bfa/bfa_svc.c switch (event) { event 4724 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(rp->bfa, event); event 4732 drivers/scsi/bfa/bfa_svc.c bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event) event 4735 drivers/scsi/bfa/bfa_svc.c bfa_trc(rp->bfa, event); event 4737 drivers/scsi/bfa/bfa_svc.c switch (event) { event 4762 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(rp->bfa, event); event 5668 drivers/scsi/bfa/bfa_svc.c enum bfa_dport_sm_event event); event 5670 drivers/scsi/bfa/bfa_svc.c enum bfa_dport_sm_event event); event 5672 drivers/scsi/bfa/bfa_svc.c enum bfa_dport_sm_event event); event 5674 drivers/scsi/bfa/bfa_svc.c enum bfa_dport_sm_event event); event 5676 drivers/scsi/bfa/bfa_svc.c enum bfa_dport_sm_event event); event 5678 drivers/scsi/bfa/bfa_svc.c enum bfa_dport_sm_event event); event 5680 drivers/scsi/bfa/bfa_svc.c enum bfa_dport_sm_event event); event 5682 drivers/scsi/bfa/bfa_svc.c enum bfa_dport_sm_event event); event 5684 drivers/scsi/bfa/bfa_svc.c enum bfa_dport_sm_event event); event 5686 drivers/scsi/bfa/bfa_svc.c enum bfa_dport_sm_event event); event 6146 drivers/scsi/bfa/bfa_svc.c bfa_dport_sm_disabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) event 6148 drivers/scsi/bfa/bfa_svc.c bfa_trc(dport->bfa, event); event 6150 drivers/scsi/bfa/bfa_svc.c switch (event) { event 6180 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(dport->bfa, event); event 6186 drivers/scsi/bfa/bfa_svc.c enum bfa_dport_sm_event event) event 6188 drivers/scsi/bfa/bfa_svc.c bfa_trc(dport->bfa, event); event 6190 drivers/scsi/bfa/bfa_svc.c switch (event) { event 6203 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(dport->bfa, event); event 6208 drivers/scsi/bfa/bfa_svc.c bfa_dport_sm_enabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) event 6210 drivers/scsi/bfa/bfa_svc.c bfa_trc(dport->bfa, event); event 6212 drivers/scsi/bfa/bfa_svc.c switch (event) { event 6237 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(dport->bfa, event); event 6242 drivers/scsi/bfa/bfa_svc.c bfa_dport_sm_enabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) event 6244 drivers/scsi/bfa/bfa_svc.c bfa_trc(dport->bfa, event); event 6246 drivers/scsi/bfa/bfa_svc.c switch (event) { event 6305 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(dport->bfa, event); event 6309 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(dport->bfa, event); event 6315 drivers/scsi/bfa/bfa_svc.c enum bfa_dport_sm_event event) event 6317 drivers/scsi/bfa/bfa_svc.c bfa_trc(dport->bfa, event); event 6319 drivers/scsi/bfa/bfa_svc.c switch (event) { event 6336 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(dport->bfa, event); event 6341 drivers/scsi/bfa/bfa_svc.c bfa_dport_sm_disabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) event 6343 drivers/scsi/bfa/bfa_svc.c bfa_trc(dport->bfa, event); event 6345 drivers/scsi/bfa/bfa_svc.c switch (event) { event 6361 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(dport->bfa, event); event 6367 drivers/scsi/bfa/bfa_svc.c enum bfa_dport_sm_event event) event 6369 drivers/scsi/bfa/bfa_svc.c bfa_trc(dport->bfa, event); event 6371 drivers/scsi/bfa/bfa_svc.c switch (event) { event 6384 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(dport->bfa, event); event 6389 drivers/scsi/bfa/bfa_svc.c bfa_dport_sm_starting(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) event 6391 drivers/scsi/bfa/bfa_svc.c bfa_trc(dport->bfa, event); event 6393 drivers/scsi/bfa/bfa_svc.c switch (event) { event 6415 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(dport->bfa, event); event 6421 drivers/scsi/bfa/bfa_svc.c enum bfa_dport_sm_event event) event 6423 drivers/scsi/bfa/bfa_svc.c bfa_trc(dport->bfa, event); event 6425 drivers/scsi/bfa/bfa_svc.c switch (event) { event 6436 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(dport->bfa, event); event 6447 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(dport->bfa, event); event 6453 drivers/scsi/bfa/bfa_svc.c enum bfa_dport_sm_event event) event 6455 drivers/scsi/bfa/bfa_svc.c bfa_trc(dport->bfa, event); event 6457 drivers/scsi/bfa/bfa_svc.c switch (event) { event 6474 drivers/scsi/bfa/bfa_svc.c bfa_sm_fault(dport->bfa, event); event 484 drivers/scsi/bfa/bfa_svc.h enum bfa_port_linkstate event); event 544 drivers/scsi/bfa/bfa_svc.h enum bfa_port_linkstate event), void *event_cbarg); event 145 drivers/scsi/bfa/bfad.c bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event); event 147 drivers/scsi/bfa/bfad.c bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event); event 149 drivers/scsi/bfa/bfad.c bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event); event 151 drivers/scsi/bfa/bfad.c bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event); event 153 drivers/scsi/bfa/bfad.c bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event); event 155 drivers/scsi/bfa/bfad.c bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event); event 157 drivers/scsi/bfa/bfad.c bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event); event 163 drivers/scsi/bfa/bfad.c bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event) event 165 drivers/scsi/bfa/bfad.c bfa_trc(bfad, event); event 167 drivers/scsi/bfa/bfad.c switch (event) { event 185 drivers/scsi/bfa/bfad.c bfa_sm_fault(bfad, event); event 193 drivers/scsi/bfa/bfad.c bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event) event 198 drivers/scsi/bfa/bfad.c bfa_trc(bfad, event); event 200 drivers/scsi/bfa/bfad.c switch (event) { event 264 drivers/scsi/bfa/bfad.c bfa_sm_fault(bfad, event); event 269 drivers/scsi/bfa/bfad.c bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event) event 274 drivers/scsi/bfa/bfad.c bfa_trc(bfad, event); event 276 drivers/scsi/bfa/bfad.c switch (event) { event 303 drivers/scsi/bfa/bfad.c bfa_sm_fault(bfad, event); event 308 drivers/scsi/bfa/bfad.c bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event) event 312 drivers/scsi/bfa/bfad.c bfa_trc(bfad, event); event 314 drivers/scsi/bfa/bfad.c switch (event) { event 334 drivers/scsi/bfa/bfad.c bfa_sm_fault(bfad, event); event 339 drivers/scsi/bfa/bfad.c bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event) event 341 drivers/scsi/bfa/bfad.c bfa_trc(bfad, event); event 343 drivers/scsi/bfa/bfad.c switch (event) { event 350 drivers/scsi/bfa/bfad.c bfa_sm_fault(bfad, event); event 355 drivers/scsi/bfa/bfad.c bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event) event 357 drivers/scsi/bfa/bfad.c bfa_trc(bfad, event); event 359 drivers/scsi/bfa/bfad.c switch (event) { event 366 drivers/scsi/bfa/bfad.c bfa_sm_fault(bfad, event); event 371 drivers/scsi/bfa/bfad.c bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event) event 373 drivers/scsi/bfa/bfad.c bfa_trc(bfad, event); event 375 drivers/scsi/bfa/bfad.c switch (event) { event 386 drivers/scsi/bfa/bfad.c bfa_sm_fault(bfad, event); event 832 drivers/scsi/bfa/bfi.h u8 event; event 326 drivers/scsi/bfa/bfi_ms.h struct bfi_fcport_event_s *event; event 550 drivers/scsi/bnx2fc/bnx2fc.h enum fc_rport_event event); event 859 drivers/scsi/bnx2fc/bnx2fc_fcoe.c static void bnx2fc_indicate_netevent(void *context, unsigned long event, event 871 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (vlan_id != 0 && event != NETDEV_UNREGISTER) event 874 drivers/scsi/bnx2fc/bnx2fc_fcoe.c switch (event) { event 923 drivers/scsi/bnx2fc/bnx2fc_fcoe.c interface->netdev->name, event); event 441 drivers/scsi/bnx2fc/bnx2fc_tgt.c enum fc_rport_event event) event 452 drivers/scsi/bnx2fc/bnx2fc_tgt.c event, rdata->ids.port_id); event 453 drivers/scsi/bnx2fc/bnx2fc_tgt.c switch (event) { event 2529 drivers/scsi/bnx2i/bnx2i_hwi.c static void bnx2i_indicate_netevent(void *context, unsigned long event, event 2538 drivers/scsi/bnx2i/bnx2i_hwi.c switch (event) { event 1352 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c static void cxgb3i_dev_event_handler(struct t3cdev *t3dev, u32 event, u32 port) event 1358 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c t3dev, cdev, event, port); event 1362 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c switch (event) { event 1018 drivers/scsi/cxlflash/ocxl_hw.c struct cxl_event event; event 1061 drivers/scsi/cxlflash/ocxl_hw.c memset(&event, 0, sizeof(event)); event 1062 drivers/scsi/cxlflash/ocxl_hw.c event.header.process_element = ctx->pe; event 1063 drivers/scsi/cxlflash/ocxl_hw.c event.header.size = sizeof(struct cxl_event_header); event 1066 drivers/scsi/cxlflash/ocxl_hw.c event.header.size += esize; event 1067 drivers/scsi/cxlflash/ocxl_hw.c event.header.type = CXL_EVENT_AFU_INTERRUPT; event 1071 drivers/scsi/cxlflash/ocxl_hw.c event.irq.irq = bit + 1; event 1075 drivers/scsi/cxlflash/ocxl_hw.c event.header.size += sizeof(struct cxl_event_data_storage); event 1076 drivers/scsi/cxlflash/ocxl_hw.c event.header.type = CXL_EVENT_DATA_STORAGE; event 1077 drivers/scsi/cxlflash/ocxl_hw.c event.fault.addr = ctx->fault_addr; event 1078 drivers/scsi/cxlflash/ocxl_hw.c event.fault.dsisr = ctx->fault_dsisr; event 1084 drivers/scsi/cxlflash/ocxl_hw.c if (copy_to_user(buf, &event, event.header.size)) { event 1090 drivers/scsi/cxlflash/ocxl_hw.c rc = event.header.size; event 127 drivers/scsi/esp_scsi.c p->event = esp->event; event 177 drivers/scsi/esp_scsi.c esp->event = val; event 195 drivers/scsi/esp_scsi.c p->sreg2, p->ireg, p->select_state, p->event); event 1701 drivers/scsi/esp_scsi.c esp->event, esp->sreg & ESP_STAT_PMASK); event 1702 drivers/scsi/esp_scsi.c switch (esp->event) { event 1995 drivers/scsi/esp_scsi.c if (esp->event == ESP_EVENT_RESET) event 1998 drivers/scsi/esp_scsi.c if (esp->event != ESP_EVENT_FREE_BUS) event 2029 drivers/scsi/esp_scsi.c "Unexpected event %x, resetting\n", esp->event); event 2141 drivers/scsi/esp_scsi.c esp->event != ESP_EVENT_STATUS && event 2142 drivers/scsi/esp_scsi.c esp->event != ESP_EVENT_DATA_DONE) || event 351 drivers/scsi/esp_scsi.h u8 event; event 490 drivers/scsi/esp_scsi.h u8 event; event 105 drivers/scsi/fcoe/fcoe.c ulong event, void *ptr); event 1793 drivers/scsi/fcoe/fcoe.c ulong event, void *ptr) event 1844 drivers/scsi/fcoe/fcoe.c ulong event, void *ptr) event 1868 drivers/scsi/fcoe/fcoe.c switch (event) { event 1903 drivers/scsi/fcoe/fcoe.c "from netdev netlink\n", event); event 2125 drivers/scsi/fcoe/fcoe_ctlr.c enum fc_rport_event event) event 2131 drivers/scsi/fcoe/fcoe_ctlr.c rdata->ids.port_id, event); event 2134 drivers/scsi/fcoe/fcoe_ctlr.c switch (event) { event 31 drivers/scsi/fcoe/fcoe_transport.c ulong event, void *ptr); event 731 drivers/scsi/fcoe/fcoe_transport.c ulong event, void *ptr) event 735 drivers/scsi/fcoe/fcoe_transport.c switch (event) { event 213 drivers/scsi/fnic/fnic.h enum fnic_evt event; event 275 drivers/scsi/fnic/fnic_fcs.c switch (fevt->event) { event 288 drivers/scsi/fnic/fnic_fcs.c "Unknown event 0x%x\n", fevt->event); event 558 drivers/scsi/fnic/fnic_fcs.c fevt->event = ev; event 590 drivers/scsi/gdth.c writeb(0, &dp6_ptr->io.event); event 619 drivers/scsi/gdth.c writeb(0, &dp6_ptr->io.event); event 1050 drivers/scsi/gdth.c writeb(0, &((gdt6_dpram_str __iomem *)ha->brd)->io.event); event 3460 drivers/scsi/gdth.c if (evt.event.event_source == ES_TEST) event 3461 drivers/scsi/gdth.c evt.event.event_data.size=sizeof(evt.event.event_data.eu.test); event 3462 drivers/scsi/gdth.c else if (evt.event.event_source == ES_DRIVER) event 3463 drivers/scsi/gdth.c evt.event.event_data.size=sizeof(evt.event.event_data.eu.driver); event 3464 drivers/scsi/gdth.c else if (evt.event.event_source == ES_SYNC) event 3465 drivers/scsi/gdth.c evt.event.event_data.size=sizeof(evt.event.event_data.eu.sync); event 3467 drivers/scsi/gdth.c evt.event.event_data.size=sizeof(evt.event.event_data.eu.async); event 3469 drivers/scsi/gdth.c gdth_store_event(ha, evt.event.event_source, evt.event.event_idx, event 3470 drivers/scsi/gdth.c &evt.event.event_data); event 3475 drivers/scsi/gdth.c evt.handle = gdth_read_event(ha, evt.handle, &evt.event); event 3477 drivers/scsi/gdth.c gdth_readapp_event(ha, evt.erase, &evt.event); event 4250 drivers/scsi/gdth.c static int gdth_halt(struct notifier_block *nb, unsigned long event, void *buf) event 4254 drivers/scsi/gdth.c TRACE2(("gdth_halt() event %d\n", (int)event)); event 4255 drivers/scsi/gdth.c if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF) event 708 drivers/scsi/gdth.h u8 event; /* release event */ event 729 drivers/scsi/gdth.h u8 event; /* release event */ event 228 drivers/scsi/gdth_ioctl.h gdth_evt_str event; event 590 drivers/scsi/hisi_sas/hisi_sas.h enum hisi_sas_phy_event event); event 880 drivers/scsi/hisi_sas/hisi_sas_main.c enum hisi_sas_phy_event event) event 884 drivers/scsi/hisi_sas/hisi_sas_main.c if (WARN_ON(event >= HISI_PHYES_NUM)) event 887 drivers/scsi/hisi_sas/hisi_sas_main.c return queue_work(hisi_hba->wq, &phy->works[event]); event 2629 drivers/scsi/ibmvscsi/ibmvfc.c const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(be64_to_cpu(crq->event)); event 2637 drivers/scsi/ibmvscsi/ibmvfc.c switch (be64_to_cpu(crq->event)) { event 2686 drivers/scsi/ibmvscsi/ibmvfc.c if (tgt->need_login && be64_to_cpu(crq->event) == IBMVFC_AE_ELS_LOGO) event 2688 drivers/scsi/ibmvscsi/ibmvfc.c if (!tgt->need_login || be64_to_cpu(crq->event) == IBMVFC_AE_ELS_PLOGI) { event 2705 drivers/scsi/ibmvscsi/ibmvfc.c dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event); event 568 drivers/scsi/ibmvscsi/ibmvfc.h volatile __be64 event; event 10801 drivers/scsi/ipr.c static int ipr_halt(struct notifier_block *nb, ulong event, void *buf) event 10807 drivers/scsi/ipr.c if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF) event 10815 drivers/scsi/ipr.c (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) { event 398 drivers/scsi/ips.c static int ips_halt(struct notifier_block *nb, ulong event, void *buf); event 716 drivers/scsi/ips.c ips_halt(struct notifier_block *nb, ulong event, void *buf) event 722 drivers/scsi/ips.c if ((event != SYS_RESTART) && (event != SYS_HALT) && event 723 drivers/scsi/ips.c (event != SYS_POWER_OFF)) event 255 drivers/scsi/libfc/fc_disc.c static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event) event 290 drivers/scsi/libfc/fc_disc.c disc->disc_callback(lport, event); event 502 drivers/scsi/libfc/fc_disc.c enum fc_disc_event event = DISC_EV_NONE; event 525 drivers/scsi/libfc/fc_disc.c event = DISC_EV_FAILED; event 535 drivers/scsi/libfc/fc_disc.c event = DISC_EV_FAILED; event 538 drivers/scsi/libfc/fc_disc.c event = DISC_EV_SUCCESS; event 542 drivers/scsi/libfc/fc_disc.c event = DISC_EV_FAILED; event 550 drivers/scsi/libfc/fc_disc.c event = DISC_EV_FAILED; event 554 drivers/scsi/libfc/fc_disc.c else if (event != DISC_EV_NONE) event 555 drivers/scsi/libfc/fc_disc.c fc_disc_done(disc, event); event 170 drivers/scsi/libfc/fc_lport.c enum fc_rport_event event) event 172 drivers/scsi/libfc/fc_lport.c FC_LPORT_DBG(lport, "Received a %d event for port (%6.6x)\n", event, event 176 drivers/scsi/libfc/fc_lport.c switch (event) { event 685 drivers/scsi/libfc/fc_lport.c enum fc_disc_event event) event 687 drivers/scsi/libfc/fc_lport.c switch (event) { event 154 drivers/scsi/libfc/fc_rport.c rdata->event = RPORT_EV_NONE; event 262 drivers/scsi/libfc/fc_rport.c enum fc_rport_event event; event 271 drivers/scsi/libfc/fc_rport.c event = rdata->event; event 275 drivers/scsi/libfc/fc_rport.c FC_RPORT_DBG(rdata, "work event %u\n", event); event 277 drivers/scsi/libfc/fc_rport.c switch (event) { event 280 drivers/scsi/libfc/fc_rport.c rdata->event = RPORT_EV_NONE; event 311 drivers/scsi/libfc/fc_rport.c FC_RPORT_DBG(rdata, "callback ev %d\n", event); event 312 drivers/scsi/libfc/fc_rport.c rport_ops->event_callback(lport, rdata, event); event 315 drivers/scsi/libfc/fc_rport.c FC_RPORT_DBG(rdata, "lld callback ev %d\n", event); event 316 drivers/scsi/libfc/fc_rport.c rdata->lld_event_callback(lport, rdata, event); event 337 drivers/scsi/libfc/fc_rport.c FC_RPORT_DBG(rdata, "callback ev %d\n", event); event 338 drivers/scsi/libfc/fc_rport.c rport_ops->event_callback(lport, rdata, event); event 341 drivers/scsi/libfc/fc_rport.c FC_RPORT_DBG(rdata, "lld callback ev %d\n", event); event 342 drivers/scsi/libfc/fc_rport.c rdata->lld_event_callback(lport, rdata, event); event 365 drivers/scsi/libfc/fc_rport.c rdata->event = RPORT_EV_NONE; event 372 drivers/scsi/libfc/fc_rport.c rdata->event = RPORT_EV_NONE; event 388 drivers/scsi/libfc/fc_rport.c rdata->event = RPORT_EV_NONE; event 473 drivers/scsi/libfc/fc_rport.c enum fc_rport_event event) event 485 drivers/scsi/libfc/fc_rport.c if (rdata->event == RPORT_EV_NONE && event 489 drivers/scsi/libfc/fc_rport.c rdata->event = event; event 551 drivers/scsi/libfc/fc_rport.c if (rdata->event == RPORT_EV_NONE && event 555 drivers/scsi/libfc/fc_rport.c rdata->event = RPORT_EV_READY; event 670 drivers/scsi/libsas/sas_ata.c if (sata->ap->pm_mesg.event == PM_EVENT_SUSPEND) event 692 drivers/scsi/libsas/sas_ata.c if (sata->ap->pm_mesg.event == PM_EVENT_ON) event 541 drivers/scsi/libsas/sas_discover.c static void sas_chain_event(int event, unsigned long *pending, event 545 drivers/scsi/libsas/sas_discover.c if (!test_and_set_bit(event, pending)) { event 31 drivers/scsi/libsas/sas_event.c static int sas_queue_event(int event, struct sas_work *work, event 122 drivers/scsi/libsas/sas_event.c sas_port_event_fns[ev->event](work); event 130 drivers/scsi/libsas/sas_event.c sas_phy_event_fns[ev->event](work); event 134 drivers/scsi/libsas/sas_event.c static int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event) event 140 drivers/scsi/libsas/sas_event.c BUG_ON(event >= PORT_NUM_EVENTS); event 146 drivers/scsi/libsas/sas_event.c INIT_SAS_EVENT(ev, sas_port_event_worker, phy, event); event 148 drivers/scsi/libsas/sas_event.c ret = sas_queue_event(event, &ev->work, ha); event 155 drivers/scsi/libsas/sas_event.c int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event) event 161 drivers/scsi/libsas/sas_event.c BUG_ON(event >= PHY_NUM_EVENTS); event 167 drivers/scsi/libsas/sas_event.c INIT_SAS_EVENT(ev, sas_phy_event_worker, phy, event); event 169 drivers/scsi/libsas/sas_event.c ret = sas_queue_event(event, &ev->work, ha); event 596 drivers/scsi/libsas/sas_init.c struct asd_sas_event *event; event 602 drivers/scsi/libsas/sas_init.c event = kmem_cache_zalloc(sas_event_cache, flags); event 603 drivers/scsi/libsas/sas_init.c if (!event) event 618 drivers/scsi/libsas/sas_init.c kmem_cache_free(sas_event_cache, event); event 620 drivers/scsi/libsas/sas_init.c event = NULL; event 624 drivers/scsi/libsas/sas_init.c return event; event 627 drivers/scsi/libsas/sas_init.c void sas_free_event(struct asd_sas_event *event) event 629 drivers/scsi/libsas/sas_init.c struct asd_sas_phy *phy = event->phy; event 631 drivers/scsi/libsas/sas_init.c kmem_cache_free(sas_event_cache, event); event 52 drivers/scsi/libsas/sas_internal.h void sas_free_event(struct asd_sas_event *event); event 80 drivers/scsi/libsas/sas_internal.h int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event); event 3160 drivers/scsi/lpfc/lpfc_els.c } *event; event 3162 drivers/scsi/lpfc/lpfc_els.c uint16_t cmdsize = sizeof(*event); event 3203 drivers/scsi/lpfc/lpfc_els.c event = ((struct lpfc_dmabuf *)elsiocb->context2)->virt; event 3205 drivers/scsi/lpfc/lpfc_els.c event->rscn.rscn_cmd = ELS_RSCN; event 3206 drivers/scsi/lpfc/lpfc_els.c event->rscn.rscn_page_len = sizeof(struct fc_els_rscn_page); event 3207 drivers/scsi/lpfc/lpfc_els.c event->rscn.rscn_plen = cpu_to_be16(cmdsize); event 3211 drivers/scsi/lpfc/lpfc_els.c event->portid.rscn_page_flags = 0; event 3212 drivers/scsi/lpfc/lpfc_els.c event->portid.rscn_fid[0] = (nportid & 0x00FF0000) >> 16; event 3213 drivers/scsi/lpfc/lpfc_els.c event->portid.rscn_fid[1] = (nportid & 0x0000FF00) >> 8; event 3214 drivers/scsi/lpfc/lpfc_els.c event->portid.rscn_fid[2] = nportid & 0x000000FF; event 1760 drivers/scsi/mesh.c switch (mesg.event) { event 1018 drivers/scsi/mpt3sas/mpt3sas_base.c u16 event; event 1023 drivers/scsi/mpt3sas/mpt3sas_base.c event = le16_to_cpu(mpi_reply->Event); event 1025 drivers/scsi/mpt3sas/mpt3sas_base.c switch (event) { event 6437 drivers/scsi/mpt3sas/mpt3sas_base.c _base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event) event 6441 drivers/scsi/mpt3sas/mpt3sas_base.c if (event >= 128) event 6444 drivers/scsi/mpt3sas/mpt3sas_base.c desired_event = (1 << (event % 32)); event 6446 drivers/scsi/mpt3sas/mpt3sas_base.c if (event < 32) event 6448 drivers/scsi/mpt3sas/mpt3sas_base.c else if (event < 64) event 6450 drivers/scsi/mpt3sas/mpt3sas_base.c else if (event < 96) event 6452 drivers/scsi/mpt3sas/mpt3sas_base.c else if (event < 128) event 1728 drivers/scsi/mpt3sas/mpt3sas_base.h void mpt3sas_trigger_event(struct MPT3SAS_ADAPTER *ioc, u16 event, event 315 drivers/scsi/mpt3sas/mpt3sas_ctl.c _ctl_check_event_type(struct MPT3SAS_ADAPTER *ioc, u16 event) event 320 drivers/scsi/mpt3sas/mpt3sas_ctl.c if (event >= 128 || !event || !ioc->event_log) event 323 drivers/scsi/mpt3sas/mpt3sas_ctl.c desired_event = (1 << (event % 32)); event 326 drivers/scsi/mpt3sas/mpt3sas_ctl.c i = event / 32; event 340 drivers/scsi/mpt3sas/mpt3sas_ctl.c u16 event; event 348 drivers/scsi/mpt3sas/mpt3sas_ctl.c event = le16_to_cpu(mpi_reply->Event); event 350 drivers/scsi/mpt3sas/mpt3sas_ctl.c if (_ctl_check_event_type(ioc, event)) { event 355 drivers/scsi/mpt3sas/mpt3sas_ctl.c event_log[i].event = event; event 369 drivers/scsi/mpt3sas/mpt3sas_ctl.c if (event == MPI2_EVENT_LOG_ENTRY_ADDED || event 221 drivers/scsi/mpt3sas/mpt3sas_ctl.h uint32_t event; event 208 drivers/scsi/mpt3sas/mpt3sas_scsih.c u16 event; event 3186 drivers/scsi/mpt3sas/mpt3sas_scsih.c fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG; event 3207 drivers/scsi/mpt3sas/mpt3sas_scsih.c fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES; event 3225 drivers/scsi/mpt3sas/mpt3sas_scsih.c fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE; event 3992 drivers/scsi/mpt3sas/mpt3sas_scsih.c _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event, event 4009 drivers/scsi/mpt3sas/mpt3sas_scsih.c le16_to_cpu(event), smid, ioc->base_cb_idx)); event 4013 drivers/scsi/mpt3sas/mpt3sas_scsih.c ack_request->Event = event; event 4217 drivers/scsi/mpt3sas/mpt3sas_scsih.c if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST || event 4288 drivers/scsi/mpt3sas/mpt3sas_scsih.c if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST || event 5117 drivers/scsi/mpt3sas/mpt3sas_scsih.c fw_event->event = MPT3SAS_TURN_ON_PFA_LED; event 9341 drivers/scsi/mpt3sas/mpt3sas_scsih.c switch (fw_event->event) { event 9457 drivers/scsi/mpt3sas/mpt3sas_scsih.c u16 event; event 9473 drivers/scsi/mpt3sas/mpt3sas_scsih.c event = le16_to_cpu(mpi_reply->Event); event 9475 drivers/scsi/mpt3sas/mpt3sas_scsih.c if (event != MPI2_EVENT_LOG_ENTRY_ADDED) event 9476 drivers/scsi/mpt3sas/mpt3sas_scsih.c mpt3sas_trigger_event(ioc, event, 0); event 9478 drivers/scsi/mpt3sas/mpt3sas_scsih.c switch (event) { event 9609 drivers/scsi/mpt3sas/mpt3sas_scsih.c fw_event->event = event; event 216 drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c mpt3sas_trigger_event(struct MPT3SAS_ADAPTER *ioc, u16 event, event 243 drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c __func__, event, log_entry_qualifier)); event 255 drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c if (event_trigger->EventValue != event) event 257 drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c if (event == MPI2_EVENT_LOG_ENTRY_ADDED) { event 279 drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c event_data.u.event.EventValue = event; event 280 drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c event_data.u.event.LogEntryQualifier = log_entry_qualifier; event 189 drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h struct SL_WH_EVENT_TRIGGER_T event; event 1736 drivers/scsi/mvumi.c mvumi_get_event(mu_ev->mhba, mu_ev->event); event 1756 drivers/scsi/mvumi.c mu_ev->event = APICDB1_EVENT_GETEVENT; event 158 drivers/scsi/mvumi.h unsigned int event; event 285 drivers/scsi/myrb.c static void myrb_get_event(struct myrb_hba *cb, unsigned int event) event 304 drivers/scsi/myrb.c mbox->type3E.ev_seq = event; event 310 drivers/scsi/myrb.c event, status); event 312 drivers/scsi/myrb.c else if (ev_buf->seq_num == event) { event 2423 drivers/scsi/myrb.c int event = cb->old_ev_seq; event 2427 drivers/scsi/myrb.c cb->new_ev_seq, event); event 2428 drivers/scsi/myrb.c myrb_get_event(cb, event); event 2429 drivers/scsi/myrb.c cb->old_ev_seq = event + 1; event 2114 drivers/scsi/pm8001/pm8001_hwi.c u32 event = le32_to_cpu(psspPayload->event); event 2122 drivers/scsi/pm8001/pm8001_hwi.c if (event) event 2124 drivers/scsi/pm8001/pm8001_hwi.c pm8001_printk("sas IO status 0x%x\n", event)); event 2131 drivers/scsi/pm8001/pm8001_hwi.c switch (event) { event 2267 drivers/scsi/pm8001/pm8001_hwi.c pm8001_printk("Unknown status 0x%x\n", event)); event 2282 drivers/scsi/pm8001/pm8001_hwi.c t, event, ts->resp, ts->stat)); event 2688 drivers/scsi/pm8001/pm8001_hwi.c u32 event = le32_to_cpu(psataPayload->event); event 2703 drivers/scsi/pm8001/pm8001_hwi.c if (event) event 2705 drivers/scsi/pm8001/pm8001_hwi.c pm8001_printk("SATA EVENT 0x%x\n", event)); event 2708 drivers/scsi/pm8001/pm8001_hwi.c if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) { event 2720 drivers/scsi/pm8001/pm8001_hwi.c if (event) event 2722 drivers/scsi/pm8001/pm8001_hwi.c pm8001_printk("sata IO status 0x%x\n", event)); event 2728 drivers/scsi/pm8001/pm8001_hwi.c port_id, dev_id, tag, event)); event 2729 drivers/scsi/pm8001/pm8001_hwi.c switch (event) { event 2876 drivers/scsi/pm8001/pm8001_hwi.c pm8001_printk("Unknown status 0x%x\n", event)); event 2891 drivers/scsi/pm8001/pm8001_hwi.c t, event, ts->resp, ts->stat)); event 356 drivers/scsi/pm8001/pm8001_hwi.h __le32 event; event 369 drivers/scsi/pm8001/pm8001_hwi.h __le32 event; event 1816 drivers/scsi/pm8001/pm80xx_hwi.c u32 event = le32_to_cpu(psspPayload->event); event 1823 drivers/scsi/pm8001/pm80xx_hwi.c if (event) event 1825 drivers/scsi/pm8001/pm80xx_hwi.c pm8001_printk("sas IO status 0x%x\n", event)); event 1831 drivers/scsi/pm8001/pm80xx_hwi.c port_id, tag, event)); event 1832 drivers/scsi/pm8001/pm80xx_hwi.c switch (event) { event 1978 drivers/scsi/pm8001/pm80xx_hwi.c pm8001_printk("Unknown status 0x%x\n", event)); event 1993 drivers/scsi/pm8001/pm80xx_hwi.c t, event, ts->resp, ts->stat)); event 2403 drivers/scsi/pm8001/pm80xx_hwi.c u32 event = le32_to_cpu(psataPayload->event); event 2419 drivers/scsi/pm8001/pm80xx_hwi.c if (event) event 2421 drivers/scsi/pm8001/pm80xx_hwi.c pm8001_printk("SATA EVENT 0x%x\n", event)); event 2424 drivers/scsi/pm8001/pm80xx_hwi.c if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) { event 2442 drivers/scsi/pm8001/pm80xx_hwi.c port_id, tag, event)); event 2443 drivers/scsi/pm8001/pm80xx_hwi.c switch (event) { event 2607 drivers/scsi/pm8001/pm80xx_hwi.c pm8001_printk("Unknown status 0x%x\n", event)); event 2622 drivers/scsi/pm8001/pm80xx_hwi.c t, event, ts->resp, ts->stat)); event 570 drivers/scsi/pm8001/pm80xx_hwi.h __le32 event; event 593 drivers/scsi/pm8001/pm80xx_hwi.h __le32 event; event 135 drivers/scsi/qedf/qedf.h enum qedf_ioreq_event event; event 188 drivers/scsi/qedf/qedf_els.c if (rrq_req->event != QEDF_IOREQ_EV_ELS_TMO && event 189 drivers/scsi/qedf/qedf_els.c rrq_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT) event 212 drivers/scsi/qedf/qedf_els.c if (rrq_req->event == QEDF_IOREQ_EV_ELS_TMO) event 426 drivers/scsi/qedf/qedf_els.c if (els_req->event == QEDF_IOREQ_EV_ELS_FLUSH) { event 443 drivers/scsi/qedf/qedf_els.c if (els_req->event == QEDF_IOREQ_EV_ELS_TMO) { event 550 drivers/scsi/qedf/qedf_els.c if (srr_req->event != QEDF_IOREQ_EV_ELS_TMO && event 551 drivers/scsi/qedf/qedf_els.c srr_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT) event 560 drivers/scsi/qedf/qedf_els.c if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO) { event 737 drivers/scsi/qedf/qedf_els.c if (io_req->event == QEDF_IOREQ_EV_ELS_TMO || !cqe) { event 739 drivers/scsi/qedf/qedf_els.c "cqe is NULL or timeout event (0x%x)", io_req->event); event 849 drivers/scsi/qedf/qedf_els.c if (rec_req->event != QEDF_IOREQ_EV_ELS_TMO && event 850 drivers/scsi/qedf/qedf_els.c rec_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT) event 859 drivers/scsi/qedf/qedf_els.c if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO) { event 89 drivers/scsi/qedf/qedf_io.c io_req->event = QEDF_IOREQ_EV_ELS_TMO; event 103 drivers/scsi/qedf/qedf_io.c io_req->event = QEDF_IOREQ_EV_ELS_TMO; event 1547 drivers/scsi/qedf/qedf_io.c els_req->event = QEDF_IOREQ_EV_ELS_FLUSH; event 1981 drivers/scsi/qedf/qedf_io.c io_req->event = QEDF_IOREQ_EV_ABORT_SUCCESS; event 2001 drivers/scsi/qedf/qedf_io.c io_req->event = QEDF_IOREQ_EV_ABORT_FAILED; event 2249 drivers/scsi/qedf/qedf_io.c io_req->event = QEDF_IOREQ_EV_CLEANUP_SUCCESS; event 2251 drivers/scsi/qedf/qedf_io.c io_req->event = QEDF_IOREQ_EV_CLEANUP_FAILED; event 762 drivers/scsi/qedf/qedf_main.c if (io_req->event == QEDF_IOREQ_EV_ABORT_SUCCESS || event 763 drivers/scsi/qedf/qedf_main.c io_req->event == QEDF_IOREQ_EV_ABORT_FAILED || event 764 drivers/scsi/qedf/qedf_main.c io_req->event == QEDF_IOREQ_EV_CLEANUP_SUCCESS) { event 1388 drivers/scsi/qedf/qedf_main.c enum fc_rport_event event) event 1399 drivers/scsi/qedf/qedf_main.c "port_id = 0x%x\n", event, rdata->ids.port_id); event 1401 drivers/scsi/qedf/qedf_main.c switch (event) { event 770 drivers/scsi/qla1280.h uint8_t event; event 312 drivers/scsi/qla2xxx/qla_isr.c static char *event[] = event 334 drivers/scsi/qla2xxx/qla_isr.c event[aen & 0xff], mb[0], mb[1], mb[2], mb[3], event 351 drivers/scsi/qla2xxx/qla_isr.c vha->host_no, event[aen & 0xff], timeout); event 365 drivers/scsi/qla2xxx/qla_isr.c vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo); event 533 drivers/scsi/scsi_transport_fc.c struct fc_nl_event *event; event 546 drivers/scsi/scsi_transport_fc.c len = FC_NL_MSGALIGN(sizeof(*event) + data_len); event 559 drivers/scsi/scsi_transport_fc.c event = nlmsg_data(nlh); event 561 drivers/scsi/scsi_transport_fc.c INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC, event 563 drivers/scsi/scsi_transport_fc.c event->seconds = ktime_get_real_seconds(); event 564 drivers/scsi/scsi_transport_fc.c event->vendor_id = vendor_id; event 565 drivers/scsi/scsi_transport_fc.c event->host_no = shost->host_no; event 566 drivers/scsi/scsi_transport_fc.c event->event_datalen = data_len; /* bytes */ event 567 drivers/scsi/scsi_transport_fc.c event->event_num = event_number; event 568 drivers/scsi/scsi_transport_fc.c event->event_code = event_code; event 570 drivers/scsi/scsi_transport_fc.c memcpy(&event->event_data, data_buf, data_len); event 2629 drivers/scsi/scsi_transport_iscsi.c enum iscsi_uevent_e event) event 2647 drivers/scsi/scsi_transport_iscsi.c "event %u\n", event); event 2655 drivers/scsi/scsi_transport_iscsi.c ev->type = event; event 2656 drivers/scsi/scsi_transport_iscsi.c switch (event) { event 2671 drivers/scsi/scsi_transport_iscsi.c "%u.\n", event); event 2685 drivers/scsi/scsi_transport_iscsi.c event); event 2688 drivers/scsi/scsi_transport_iscsi.c event, rc); event 3051 drivers/scsi/smartpqi/smartpqi_init.c struct pqi_event *event) event 3060 drivers/scsi/smartpqi/smartpqi_init.c request.event_type = event->event_type; event 3061 drivers/scsi/smartpqi/smartpqi_init.c request.event_id = event->event_id; event 3062 drivers/scsi/smartpqi/smartpqi_init.c request.additional_event_id = event->additional_event_id; event 3133 drivers/scsi/smartpqi/smartpqi_init.c struct pqi_event *event) event 3138 drivers/scsi/smartpqi/smartpqi_init.c event_id = get_unaligned_le16(&event->event_id); event 3147 drivers/scsi/smartpqi/smartpqi_init.c pqi_acknowledge_event(ctrl_info, event); event 3157 drivers/scsi/smartpqi/smartpqi_init.c pqi_acknowledge_event(ctrl_info, event); event 3159 drivers/scsi/smartpqi/smartpqi_init.c le32_to_cpu(event->ofa_bytes_requested)); event 3163 drivers/scsi/smartpqi/smartpqi_init.c pqi_acknowledge_event(ctrl_info, event); event 3166 drivers/scsi/smartpqi/smartpqi_init.c ctrl_info->ctrl_id, event->ofa_cancel_reason); event 3176 drivers/scsi/smartpqi/smartpqi_init.c struct pqi_event *event; event 3187 drivers/scsi/smartpqi/smartpqi_init.c event = ctrl_info->events; event 3189 drivers/scsi/smartpqi/smartpqi_init.c if (event->pending) { event 3190 drivers/scsi/smartpqi/smartpqi_init.c event->pending = false; event 3191 drivers/scsi/smartpqi/smartpqi_init.c if (event->event_type == PQI_EVENT_TYPE_OFA) { event 3193 drivers/scsi/smartpqi/smartpqi_init.c pqi_ofa_process_event(ctrl_info, event); event 3196 drivers/scsi/smartpqi/smartpqi_init.c pqi_acknowledge_event(ctrl_info, event); event 3198 drivers/scsi/smartpqi/smartpqi_init.c event++; event 3274 drivers/scsi/smartpqi/smartpqi_init.c static void pqi_ofa_capture_event_payload(struct pqi_event *event, event 3279 drivers/scsi/smartpqi/smartpqi_init.c event_id = get_unaligned_le16(&event->event_id); event 3281 drivers/scsi/smartpqi/smartpqi_init.c if (event->event_type == PQI_EVENT_TYPE_OFA) { event 3283 drivers/scsi/smartpqi/smartpqi_init.c event->ofa_bytes_requested = event 3286 drivers/scsi/smartpqi/smartpqi_init.c event->ofa_cancel_reason = event 3299 drivers/scsi/smartpqi/smartpqi_init.c struct pqi_event *event; event 3320 drivers/scsi/smartpqi/smartpqi_init.c event = &ctrl_info->events[event_index]; event 3321 drivers/scsi/smartpqi/smartpqi_init.c event->pending = true; event 3322 drivers/scsi/smartpqi/smartpqi_init.c event->event_type = response->event_type; event 3323 drivers/scsi/smartpqi/smartpqi_init.c event->event_id = response->event_id; event 3324 drivers/scsi/smartpqi/smartpqi_init.c event->additional_event_id = event 3326 drivers/scsi/smartpqi/smartpqi_init.c pqi_ofa_capture_event_payload(event, response); event 8039 drivers/scsi/smartpqi/smartpqi_init.c if (state.event == PM_EVENT_FREEZE) event 362 drivers/scsi/stex.c static int stex_halt(struct notifier_block *nb, ulong event, void *buf); event 1964 drivers/scsi/stex.c switch (state.event) { event 1996 drivers/scsi/stex.c static int stex_halt(struct notifier_block *nb, unsigned long event, void *buf) event 59 drivers/scsi/virtio_scsi.c struct virtio_scsi_event event; event 238 drivers/scsi/virtio_scsi.c sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event)); event 278 drivers/scsi/virtio_scsi.c struct virtio_scsi_event *event) event 282 drivers/scsi/virtio_scsi.c unsigned int target = event->lun[1]; event 283 drivers/scsi/virtio_scsi.c unsigned int lun = (event->lun[2] << 8) | event->lun[3]; event 285 drivers/scsi/virtio_scsi.c switch (virtio32_to_cpu(vscsi->vdev, event->reason)) { event 300 drivers/scsi/virtio_scsi.c pr_info("Unsupport virtio scsi event reason %x\n", event->reason); event 305 drivers/scsi/virtio_scsi.c struct virtio_scsi_event *event) event 309 drivers/scsi/virtio_scsi.c unsigned int target = event->lun[1]; event 310 drivers/scsi/virtio_scsi.c unsigned int lun = (event->lun[2] << 8) | event->lun[3]; event 311 drivers/scsi/virtio_scsi.c u8 asc = virtio32_to_cpu(vscsi->vdev, event->reason) & 255; event 312 drivers/scsi/virtio_scsi.c u8 ascq = virtio32_to_cpu(vscsi->vdev, event->reason) >> 8; event 364 drivers/scsi/virtio_scsi.c struct virtio_scsi_event *event = &event_node->event; event 366 drivers/scsi/virtio_scsi.c if (event->event & event 368 drivers/scsi/virtio_scsi.c event->event &= ~cpu_to_virtio32(vscsi->vdev, event 374 drivers/scsi/virtio_scsi.c switch (virtio32_to_cpu(vscsi->vdev, event->event)) { event 378 drivers/scsi/virtio_scsi.c virtscsi_handle_transport_reset(vscsi, event); event 381 drivers/scsi/virtio_scsi.c virtscsi_handle_param_change(vscsi, event); event 384 drivers/scsi/virtio_scsi.c pr_err("Unsupport virtio scsi event %x\n", event->event); event 165 drivers/soc/mediatek/mtk-cmdq-helper.c int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event) event 169 drivers/soc/mediatek/mtk-cmdq-helper.c if (event >= CMDQ_MAX_EVENT) event 181 drivers/soc/mediatek/mtk-cmdq-helper.c return cmdq_pkt_append_command(pkt, CMDQ_CODE_WFE, event, arg_b); event 185 drivers/soc/mediatek/mtk-cmdq-helper.c int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event) event 187 drivers/soc/mediatek/mtk-cmdq-helper.c if (event >= CMDQ_MAX_EVENT) event 190 drivers/soc/mediatek/mtk-cmdq-helper.c return cmdq_pkt_append_command(pkt, CMDQ_CODE_WFE, event, event 84 drivers/soc/qcom/glink_ssr.c static int qcom_glink_ssr_notify(struct notifier_block *nb, unsigned long event, event 79 drivers/soc/qcom/qcom_aoss.c wait_queue_head_t event; event 153 drivers/soc/qcom/qcom_aoss.c ret = wait_event_timeout(qmp->event, qmp_link_acked(qmp), HZ); event 163 drivers/soc/qcom/qcom_aoss.c ret = wait_event_timeout(qmp->event, qmp_ucore_channel_up(qmp), HZ); event 174 drivers/soc/qcom/qcom_aoss.c ret = wait_event_timeout(qmp->event, qmp_mcore_channel_acked(qmp), HZ); event 203 drivers/soc/qcom/qcom_aoss.c wake_up_interruptible_all(&qmp->event); event 244 drivers/soc/qcom/qcom_aoss.c time_left = wait_event_interruptible_timeout(qmp->event, event 526 drivers/soc/qcom/qcom_aoss.c init_waitqueue_head(&qmp->event); event 1867 drivers/soc/tegra/pmc.c const struct tegra_wake_event *event = &soc->wake_events[i]; event 1872 drivers/soc/tegra/pmc.c if (event->id != fwspec->param[0]) event 1876 drivers/soc/tegra/pmc.c event->id, event 1884 drivers/soc/tegra/pmc.c spec.param[1] = event->irq; event 1894 drivers/soc/tegra/pmc.c if (event->gpio.instance != fwspec->param[0] || event 1895 drivers/soc/tegra/pmc.c event->gpio.pin != fwspec->param[1]) event 1899 drivers/soc/tegra/pmc.c event->id, event 194 drivers/spi/spi-fsl-cpm.c mpc8xxx_spi_write_reg(®_base->event, events); event 512 drivers/spi/spi-fsl-spi.c mpc8xxx_spi_read_reg(®_base->event)) & event 517 drivers/spi/spi-fsl-spi.c mpc8xxx_spi_write_reg(®_base->event, events); event 537 drivers/spi/spi-fsl-spi.c events = mpc8xxx_spi_read_reg(®_base->event); event 659 drivers/spi/spi-fsl-spi.c mpc8xxx_spi_write_reg(®_base->event, 0xffffffff); event 27 drivers/spi/spi-fsl-spi.h __be32 event; event 73 drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c static int clk_wzrd_clk_notifier(struct notifier_block *nb, unsigned long event, event 90 drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c switch (event) { event 1153 drivers/staging/fsl-dpaa2/ethsw/ethsw.c unsigned long event, void *ptr) event 1164 drivers/staging/fsl-dpaa2/ethsw/ethsw.c if (event == NETDEV_CHANGEUPPER) { event 1185 drivers/staging/fsl-dpaa2/ethsw/ethsw.c unsigned long event; event 1199 drivers/staging/fsl-dpaa2/ethsw/ethsw.c switch (switchdev_work->event) { event 1233 drivers/staging/fsl-dpaa2/ethsw/ethsw.c unsigned long event, void *ptr) event 1242 drivers/staging/fsl-dpaa2/ethsw/ethsw.c if (event == SWITCHDEV_PORT_ATTR_SET) event 1251 drivers/staging/fsl-dpaa2/ethsw/ethsw.c switchdev_work->event = event; event 1253 drivers/staging/fsl-dpaa2/ethsw/ethsw.c switch (event) { event 1283 drivers/staging/fsl-dpaa2/ethsw/ethsw.c ethsw_switchdev_port_obj_event(unsigned long event, struct net_device *netdev, event 1288 drivers/staging/fsl-dpaa2/ethsw/ethsw.c switch (event) { event 1303 drivers/staging/fsl-dpaa2/ethsw/ethsw.c unsigned long event, void *ptr) event 1310 drivers/staging/fsl-dpaa2/ethsw/ethsw.c switch (event) { event 1313 drivers/staging/fsl-dpaa2/ethsw/ethsw.c return ethsw_switchdev_port_obj_event(event, dev, ptr); event 30 drivers/staging/greybus/audio_module.c req->jack_attribute, req->event); event 36 drivers/staging/greybus/audio_module.c req->jack_attribute, req->event); event 38 drivers/staging/greybus/audio_module.c if (req->event == GB_AUDIO_JACK_EVENT_REMOVAL) { event 54 drivers/staging/greybus/audio_module.c req->jack_attribute, req->event); event 78 drivers/staging/greybus/audio_module.c req->button_id, req->event); event 84 drivers/staging/greybus/audio_module.c req->button_id, req->event); event 120 drivers/staging/greybus/audio_module.c if (req->event == GB_AUDIO_BUTTON_EVENT_PRESS) event 136 drivers/staging/greybus/audio_module.c le16_to_cpu(req->data_cport), req->event); event 504 drivers/staging/greybus/audio_topology.c struct snd_kcontrol *k, int event) event 512 drivers/staging/greybus/audio_topology.c struct snd_kcontrol *k, int event) event 520 drivers/staging/greybus/audio_topology.c struct snd_kcontrol *k, int event) event 934 drivers/staging/greybus/audio_topology.c struct snd_kcontrol *kcontrol, int event) event 943 drivers/staging/greybus/audio_topology.c dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event); event 963 drivers/staging/greybus/audio_topology.c switch (event) { event 978 drivers/staging/greybus/audio_topology.c event, ret); event 365 drivers/staging/greybus/gpio.c struct gb_gpio_irq_event_request *event; event 377 drivers/staging/greybus/gpio.c if (request->payload_size < sizeof(*event)) { event 379 drivers/staging/greybus/gpio.c request->payload_size, sizeof(*event)); event 383 drivers/staging/greybus/gpio.c event = request->payload; event 384 drivers/staging/greybus/gpio.c if (event->which > ggc->line_max) { event 385 drivers/staging/greybus/gpio.c dev_err(dev, "invalid hw irq: %d\n", event->which); event 389 drivers/staging/greybus/gpio.c irq = irq_find_mapping(ggc->chip.irq.domain, event->which); event 1218 drivers/staging/greybus/light.c u8 event; event 1243 drivers/staging/greybus/light.c event = payload->event; event 1245 drivers/staging/greybus/light.c if (event & GB_LIGHTS_LIGHT_CONFIG) { event 996 drivers/staging/greybus/power_supply.c u8 event; event 1026 drivers/staging/greybus/power_supply.c event = payload->event; event 1037 drivers/staging/greybus/power_supply.c if (event & GB_POWER_SUPPLY_UPDATE) { event 161 drivers/staging/greybus/sdio.c static void _gb_queue_event(struct gb_sdio_host *host, u8 event) event 163 drivers/staging/greybus/sdio.c if (event & GB_SDIO_CARD_INSERTED) event 165 drivers/staging/greybus/sdio.c else if (event & GB_SDIO_CARD_REMOVED) event 168 drivers/staging/greybus/sdio.c host->queued_events |= event; event 171 drivers/staging/greybus/sdio.c static int _gb_sdio_process_events(struct gb_sdio_host *host, u8 event) event 175 drivers/staging/greybus/sdio.c if (event & GB_SDIO_CARD_INSERTED) { event 184 drivers/staging/greybus/sdio.c if (event & GB_SDIO_CARD_REMOVED) { event 193 drivers/staging/greybus/sdio.c if (event & GB_SDIO_WP) event 212 drivers/staging/greybus/sdio.c u8 event; event 229 drivers/staging/greybus/sdio.c event = payload->event; event 232 drivers/staging/greybus/sdio.c _gb_queue_event(host, event); event 234 drivers/staging/greybus/sdio.c ret = _gb_sdio_process_events(host, event); event 171 drivers/staging/iio/impedance-analyzer/ad5933.c static int ad5933_wait_busy(struct ad5933_state *st, unsigned char event) event 180 drivers/staging/iio/impedance-analyzer/ad5933.c if (val & event) event 2510 drivers/staging/isdn/gigaset/bas-gigaset.c if (message.event != PM_EVENT_ON) event 335 drivers/staging/isdn/gigaset/common.c struct event_t *event = NULL; event 346 drivers/staging/isdn/gigaset/common.c event = cs->events + tail; event 347 drivers/staging/isdn/gigaset/common.c event->type = type; event 348 drivers/staging/isdn/gigaset/common.c event->at_state = at_state; event 349 drivers/staging/isdn/gigaset/common.c event->cid = -1; event 350 drivers/staging/isdn/gigaset/common.c event->ptr = ptr; event 351 drivers/staging/isdn/gigaset/common.c event->arg = arg; event 352 drivers/staging/isdn/gigaset/common.c event->parameter = parameter; event 358 drivers/staging/isdn/gigaset/common.c return event; event 409 drivers/staging/isdn/gigaset/ev-layer.c struct event_t *event; event 421 drivers/staging/isdn/gigaset/ev-layer.c event = cs->events + tail; event 422 drivers/staging/isdn/gigaset/ev-layer.c event->type = type; event 423 drivers/staging/isdn/gigaset/ev-layer.c event->cid = cid; event 424 drivers/staging/isdn/gigaset/ev-layer.c event->ptr = ptr; event 425 drivers/staging/isdn/gigaset/ev-layer.c event->arg = NULL; event 426 drivers/staging/isdn/gigaset/ev-layer.c event->parameter = parameter; event 427 drivers/staging/isdn/gigaset/ev-layer.c event->at_state = NULL; event 329 drivers/staging/ks7010/ks7010_sdio.c if (le16_to_cpu(hdr->event) < HIF_DATA_REQ || event 330 drivers/staging/ks7010/ks7010_sdio.c le16_to_cpu(hdr->event) > HIF_REQ_MAX) { event 331 drivers/staging/ks7010/ks7010_sdio.c netdev_err(priv->net_dev, "unknown event=%04X\n", hdr->event); event 388 drivers/staging/ks7010/ks7010_sdio.c if (le16_to_cpu(hdr->event) < HIF_DATA_REQ || event 389 drivers/staging/ks7010/ks7010_sdio.c le16_to_cpu(hdr->event) > HIF_REQ_MAX) { event 390 drivers/staging/ks7010/ks7010_sdio.c netdev_err(priv->net_dev, "unknown event=%04X\n", hdr->event); event 395 drivers/staging/ks7010/ks7010_sdio.c priv->hostt.buff[priv->hostt.qtail] = le16_to_cpu(hdr->event); event 428 drivers/staging/ks7010/ks7010_sdio.c u16 event = 0; event 459 drivers/staging/ks7010/ks7010_sdio.c event = le16_to_cpu(hdr->event); event 466 drivers/staging/ks7010/ks7010_sdio.c if (atomic_read(&priv->psstatus.confirm_wait) && is_hif_conf(event)) { event 1065 drivers/staging/ks7010/ks7010_sdio.c pp->header.event = cpu_to_le16(HIF_STOP_REQ); event 980 drivers/staging/ks7010/ks_hostif.c u16 event; event 982 drivers/staging/ks7010/ks_hostif.c event = get_word(priv); event 983 drivers/staging/ks7010/ks_hostif.c switch (event) { event 1037 drivers/staging/ks7010/ks_hostif.c netdev_err(priv->net_dev, "undefined event[%04X]\n", event); event 1044 drivers/staging/ks7010/ks_hostif.c priv->hostt.buff[priv->hostt.qtail] = event; event 1049 drivers/staging/ks7010/ks_hostif.c static void *hostif_generic_request(size_t size, int event) event 1058 drivers/staging/ks7010/ks_hostif.c p->event = cpu_to_le16(event); event 1208 drivers/staging/ks7010/ks_hostif.c pp->header.event = cpu_to_le16(HIF_DATA_REQ); event 1374 drivers/staging/ks7010/ks_hostif.c void hostif_infrastructure_set_request(struct ks_wlan_private *priv, int event) event 1378 drivers/staging/ks7010/ks_hostif.c pp = hostif_generic_request(sizeof(*pp), event); event 2054 drivers/staging/ks7010/ks_hostif.c static void hostif_sme_execute(struct ks_wlan_private *priv, int event) event 2058 drivers/staging/ks7010/ks_hostif.c switch (event) { event 2124 drivers/staging/ks7010/ks_hostif.c hostif_sme_set_wep(priv, event); event 2131 drivers/staging/ks7010/ks_hostif.c hostif_sme_set_rsn(priv, event); event 2142 drivers/staging/ks7010/ks_hostif.c hostif_sme_set_key(priv, event); event 2230 drivers/staging/ks7010/ks_hostif.c void hostif_sme_enqueue(struct ks_wlan_private *priv, u16 event) event 2234 drivers/staging/ks7010/ks_hostif.c priv->sme_i.event_buff[priv->sme_i.qtail] = event; event 64 drivers/staging/ks7010/ks_hostif.h __le16 event; event 570 drivers/staging/ks7010/ks_hostif.h static inline bool is_hif_ind(unsigned short event) event 572 drivers/staging/ks7010/ks_hostif.h return (((event & HIF_EVENT_MASK) == HIF_EVENT_MASK) && event 573 drivers/staging/ks7010/ks_hostif.h (((event & ~HIF_EVENT_MASK) == 0x0001) || event 574 drivers/staging/ks7010/ks_hostif.h ((event & ~HIF_EVENT_MASK) == 0x0006) || event 575 drivers/staging/ks7010/ks_hostif.h ((event & ~HIF_EVENT_MASK) == 0x000C) || event 576 drivers/staging/ks7010/ks_hostif.h ((event & ~HIF_EVENT_MASK) == 0x0011) || event 577 drivers/staging/ks7010/ks_hostif.h ((event & ~HIF_EVENT_MASK) == 0x0012))); event 580 drivers/staging/ks7010/ks_hostif.h static inline bool is_hif_conf(unsigned short event) event 582 drivers/staging/ks7010/ks_hostif.h return (((event & HIF_EVENT_MASK) == HIF_EVENT_MASK) && event 583 drivers/staging/ks7010/ks_hostif.h ((event & ~HIF_EVENT_MASK) > 0x0000) && event 584 drivers/staging/ks7010/ks_hostif.h ((event & ~HIF_EVENT_MASK) < 0x0012) && event 585 drivers/staging/ks7010/ks_hostif.h !is_hif_ind(event)); event 596 drivers/staging/ks7010/ks_hostif.h void hostif_sme_enqueue(struct ks_wlan_private *priv, u16 event); event 2188 drivers/staging/ks7010/ks_wlan_net.c static void print_hif_event(struct net_device *dev, int event) event 2190 drivers/staging/ks7010/ks_wlan_net.c switch (event) { event 2305 drivers/staging/ks7010/ks_wlan_net.c int i, event; event 2309 drivers/staging/ks7010/ks_wlan_net.c event = event 2312 drivers/staging/ks7010/ks_wlan_net.c print_hif_event(dev, event); event 2014 drivers/staging/media/ipu3/ipu3-css.c u32 event, daddr; event 2021 drivers/staging/media/ipu3/ipu3-css.c r = imgu_css_dequeue_data(css, IMGU_ABI_QUEUE_EVENT_ID, &event); event 2025 drivers/staging/media/ipu3/ipu3-css.c evtype = (event & IMGU_ABI_EVTTYPE_EVENT_MASK) >> event 2033 drivers/staging/media/ipu3/ipu3-css.c pipe = (event & IMGU_ABI_EVTTYPE_PIPE_MASK) >> event 2035 drivers/staging/media/ipu3/ipu3-css.c pipeid = (event & IMGU_ABI_EVTTYPE_PIPEID_MASK) >> event 2052 drivers/staging/media/ipu3/ipu3-css.c event, queue, pipe, pipeid); event 2091 drivers/staging/media/ipu3/ipu3-css.c pipe = (event & IMGU_ABI_EVTTYPE_PIPE_MASK) >> event 2100 drivers/staging/media/ipu3/ipu3-css.c event, pipe); event 2103 drivers/staging/media/ipu3/ipu3-css.c r = imgu_css_dequeue_data(css, IMGU_ABI_QUEUE_EVENT_ID, &event); event 2107 drivers/staging/media/ipu3/ipu3-css.c if ((event & IMGU_ABI_EVTTYPE_EVENT_MASK) >> event 2114 drivers/staging/media/ipu3/ipu3-css.c dev_warn(css->dev, "event: firmware warning 0x%x\n", event); event 2119 drivers/staging/media/ipu3/ipu3-css.c event, event 2120 drivers/staging/media/ipu3/ipu3-css.c (event & IMGU_ABI_EVTTYPE_MODULEID_MASK) >> event 2122 drivers/staging/media/ipu3/ipu3-css.c swab16((event & IMGU_ABI_EVTTYPE_LINENO_MASK) >> event 2126 drivers/staging/media/ipu3/ipu3-css.c dev_warn(css->dev, "received unknown event 0x%x\n", event); event 130 drivers/staging/nvec/nvec_kbd.c idev->event = nvec_kbd_event; event 1226 drivers/staging/qlge/qlge.h u8 event; event 2169 drivers/staging/qlge/qlge_main.c switch (ib_ae_rsp->event) { event 2196 drivers/staging/qlge/qlge_main.c ib_ae_rsp->event); event 69 drivers/staging/rtl8712/rtl871x_event.h #define GEN_EVT_CODE(event) event ## _EVT_ event 73 drivers/staging/rtl8723bs/include/rtw_event.h #define GEN_EVT_CODE(event) event ## _EVT_ event 396 drivers/staging/sm750fb/sm750.c if (mesg.event == pdev->dev.power.power_state.event) event 401 drivers/staging/sm750fb/sm750.c switch (mesg.event) { event 409 drivers/staging/sm750fb/sm750.c if (mesg.event & PM_EVENT_SLEEP) { event 465 drivers/staging/sm750fb/sm750.c if (pdev->dev.power.power_state.event != PM_EVENT_FREEZE) { event 503 drivers/staging/sm750fb/sm750.c pdev->dev.power.power_state.event = PM_EVENT_RESUME; event 264 drivers/staging/unisys/visorhba/visorhba_main.c wait_queue_head_t *event, int *result) event 269 drivers/staging/unisys/visorhba/visorhba_main.c simple_idr_get(idrtable, event, lock); event 422 drivers/staging/uwb/est.c u16 type_event_high, event; event 428 drivers/staging/uwb/est.c event = le16_to_cpu(rceb->wEvent); event 429 drivers/staging/uwb/est.c type_event_high = rceb->bEventType << 8 | (event & 0xff00) >> 8; event 434 drivers/staging/uwb/est.c event & 0x00ff, rceb, rceb_size); event 487 drivers/staging/uwb/hwa-rc.c int event = le16_to_cpu(rceb->wEvent); event 493 drivers/staging/uwb/hwa-rc.c switch (event) { event 64 drivers/staging/uwb/lc-dev.c void uwb_notify(struct uwb_rc *rc, struct uwb_dev *uwb_dev, enum uwb_notifs event) event 71 drivers/staging/uwb/lc-dev.c handler->cb(handler->data, uwb_dev, event); event 33 drivers/staging/uwb/uwb-internal.h void uwb_notify(struct uwb_rc *rc, struct uwb_dev *uwb_dev, enum uwb_notifs event); event 171 drivers/staging/uwb/uwbd.c u16 event; event 174 drivers/staging/uwb/uwbd.c event = le16_to_cpu(evt->notif.rceb->wEvent); event 182 drivers/staging/uwb/uwbd.c if (event >= type_table->size) event 184 drivers/staging/uwb/uwbd.c handler = type_table->uwbd_events[event].handler; event 193 drivers/staging/uwb/uwbd.c type, event, context, result); event 207 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c remote_event_signal(struct remote_event *event) event 211 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c event->fired = 1; event 215 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c if (event->armed) event 390 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c remote_event_create(wait_queue_head_t *wq, struct remote_event *event) event 392 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c event->armed = 0; event 407 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c remote_event_wait(wait_queue_head_t *wq, struct remote_event *event) event 409 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c if (!event->fired) { event 410 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c event->armed = 1; event 412 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c if (wait_event_interruptible(*wq, event->fired)) { event 413 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c event->armed = 0; event 416 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c event->armed = 0; event 420 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c event->fired = 0; event 425 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event) event 427 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c event->fired = 1; event 428 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c event->armed = 0; event 433 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c remote_event_poll(wait_queue_head_t *wq, struct remote_event *event) event 435 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c if (event->fired && event->armed) event 436 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c remote_event_signal_local(wq, event); event 1228 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c complete(&waiter->event); event 2984 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c init_completion(&bulk_waiter->event); event 3085 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c if (wait_for_completion_interruptible(&bulk_waiter->event)) event 476 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h struct completion event; event 591 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h remote_event_signal(struct remote_event *event); event 130 drivers/target/tcm_fc/tfc_sess.c int ft_lport_notify(struct notifier_block *nb, unsigned long event, void *arg) event 134 drivers/target/tcm_fc/tfc_sess.c switch (event) { event 264 drivers/thermal/clock_cooling.c unsigned long event, void *data) event 269 drivers/thermal/clock_cooling.c switch (event) { event 198 drivers/thermal/intel/int340x_thermal/int3400_thermal.c u32 event, event 207 drivers/thermal/intel/int340x_thermal/int3400_thermal.c switch (event) { event 23 drivers/thermal/intel/int340x_thermal/int3402_thermal.c static void int3402_notify(acpi_handle handle, u32 event, void *data) event 30 drivers/thermal/intel/int340x_thermal/int3402_thermal.c switch (event) { event 52 drivers/thermal/intel/int340x_thermal/int3403_thermal.c u32 event, void *data) event 64 drivers/thermal/intel/int340x_thermal/int3403_thermal.c switch (event) { event 77 drivers/thermal/intel/int340x_thermal/int3403_thermal.c dev_err(&priv->pdev->dev, "Unsupported event [0x%x]\n", event); event 128 drivers/thermal/intel/int340x_thermal/int3406_thermal.c static void int3406_notify(acpi_handle handle, u32 event, void *data) event 130 drivers/thermal/intel/int340x_thermal/int3406_thermal.c if (event == INT3406_BRIGHTNESS_LIMITS_CHANGED) event 56 drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h enum thermal_notify_event event) event 58 drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h thermal_zone_device_update(tzone->zone, event); event 325 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c static void proc_thermal_notify(acpi_handle handle, u32 event, void *data) event 332 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c switch (event) { event 339 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c dev_dbg(proc_priv->dev, "Unsupported event [0x%x]\n", event); event 475 drivers/thermal/thermal_core.c enum thermal_notify_event event) event 489 drivers/thermal/thermal_core.c tz->notify_event = event; event 1482 drivers/thermal/thermal_core.c enum events event) event 1530 drivers/thermal/thermal_core.c thermal_event->event = event; event 189 drivers/tty/n_gsm.c wait_queue_head_t event; event 1282 drivers/tty/n_gsm.c wake_up(&gsm->event); event 1330 drivers/tty/n_gsm.c wake_up(&gsm->event); event 1360 drivers/tty/n_gsm.c wait_event(gsm->event, gsm->pending_cmd == NULL); event 1396 drivers/tty/n_gsm.c wait_event(gsm->event, control->done == 1); event 1430 drivers/tty/n_gsm.c wake_up(&dlci->gsm->event); event 1451 drivers/tty/n_gsm.c wake_up(&dlci->gsm->event); event 2040 drivers/tty/n_gsm.c wait_event_interruptible(gsm->event, event 2110 drivers/tty/n_gsm.c init_waitqueue_head(&gsm->event); event 69 drivers/tty/serial/ifx6x60.c unsigned long event, void *data); event 91 drivers/tty/serial/ifx6x60.c unsigned long event, void *data) event 627 drivers/tty/serial/sifive.c unsigned long event, void *data) event 632 drivers/tty/serial/sifive.c if (event == POST_RATE_CHANGE && ssp->clkin_rate != cnd->new_rate) { event 488 drivers/tty/serial/xilinx_uartps.c unsigned long event, void *data) event 501 drivers/tty/serial/xilinx_uartps.c switch (event) { event 440 drivers/tty/vcc.c static void vcc_event(void *arg, int event) event 452 drivers/tty/vcc.c switch (event) { event 455 drivers/tty/vcc.c vio_link_state_change(vio, event); event 465 drivers/tty/vcc.c pr_err("VCC: unexpected LDC event(%d)\n", event); event 472 drivers/tty/vcc.c .event = vcc_event, event 1604 drivers/tty/vt/keyboard.c .event = kbd_event, event 83 drivers/tty/vt/vc_screen.c int event; event 116 drivers/tty/vt/vc_screen.c poll->event = code; event 150 drivers/tty/vt/vc_screen.c poll->event = VT_UPDATE; event 282 drivers/tty/vt/vc_screen.c poll->event = 0; event 644 drivers/tty/vt/vc_screen.c switch (poll->event) { event 94 drivers/tty/vt/vt_ioctl.c struct vt_event event; event 111 drivers/tty/vt/vt_ioctl.c void vt_event_post(unsigned int event, unsigned int old, unsigned int new) event 123 drivers/tty/vt/vt_ioctl.c if (!(ve->event.event & event)) event 125 drivers/tty/vt/vt_ioctl.c ve->event.event = event; event 128 drivers/tty/vt/vt_ioctl.c ve->event.oldev = old + 1; event 129 drivers/tty/vt/vt_ioctl.c ve->event.newev = new + 1; event 189 drivers/tty/vt/vt_ioctl.c static int vt_event_wait_ioctl(struct vt_event __user *event) event 193 drivers/tty/vt/vt_ioctl.c if (copy_from_user(&vw.event, event, sizeof(struct vt_event))) event 196 drivers/tty/vt/vt_ioctl.c if (vw.event.event & ~VT_MAX_EVENT) event 202 drivers/tty/vt/vt_ioctl.c if (copy_to_user(event, &vw.event, sizeof(struct vt_event))) event 222 drivers/tty/vt/vt_ioctl.c vw.event.event = VT_EVENT_SWITCH; event 232 drivers/tty/vt/vt_ioctl.c } while (vw.event.newev != n); event 259 drivers/uio/uio.c return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event)); event 261 drivers/uio/uio.c static DEVICE_ATTR_RO(event); event 431 drivers/uio/uio.c atomic_inc(&idev->event); event 487 drivers/uio/uio.c listener->event_count = atomic_read(&idev->event); event 558 drivers/uio/uio.c if (listener->event_count != atomic_read(&idev->event)) event 588 drivers/uio/uio.c event_count = atomic_read(&idev->event); event 940 drivers/uio/uio.c atomic_set(&idev->event, 0); event 269 drivers/usb/chipidea/ci_hdrc_imx.c static int ci_hdrc_imx_notify_event(struct ci_hdrc *ci, unsigned int event) event 275 drivers/usb/chipidea/ci_hdrc_imx.c switch (event) { event 79 drivers/usb/chipidea/ci_hdrc_msm.c static int ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event) event 85 drivers/usb/chipidea/ci_hdrc_msm.c switch (event) { event 590 drivers/usb/chipidea/core.c static int ci_cable_notifier(struct notifier_block *nb, unsigned long event, event 596 drivers/usb/chipidea/core.c cbl->connected = event; event 1351 drivers/usb/core/driver.c msg.event ^= (PM_EVENT_SUSPEND | PM_EVENT_RESUME); event 1435 drivers/usb/core/driver.c if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_QUIESCE) { event 254 drivers/usb/core/generic.c else if ((msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_PRETHAW) event 467 drivers/usb/core/hcd-pci.c static int resume_common(struct device *dev, int event) event 495 drivers/usb/core/hcd-pci.c if (pci_dev->class == CL_EHCI && event != PM_EVENT_AUTO_RESUME) event 500 drivers/usb/core/hcd-pci.c event == PM_EVENT_RESTORE); event 197 drivers/usb/dwc3/debug.h const struct dwc3_event_devt *event) event 199 drivers/usb/dwc3/debug.h enum dwc3_link_state state = event->event_info & DWC3_LINK_STATE_MASK; event 201 drivers/usb/dwc3/debug.h switch (event->type) { event 254 drivers/usb/dwc3/debug.h const struct dwc3_event_depevt *event, u32 ep0state) event 256 drivers/usb/dwc3/debug.h u8 epnum = event->endpoint_number; event 263 drivers/usb/dwc3/debug.h status = event->status; event 265 drivers/usb/dwc3/debug.h switch (event->endpoint_event) { event 280 drivers/usb/dwc3/debug.h event->parameters, event 288 drivers/usb/dwc3/debug.h event->parameters, event 294 drivers/usb/dwc3/debug.h int phase = DEPEVT_STATUS_CONTROL_PHASE(event->status); event 311 drivers/usb/dwc3/debug.h status = event->status; event 316 drivers/usb/dwc3/debug.h event->parameters); event 339 drivers/usb/dwc3/debug.h static inline const char *dwc3_gadget_event_type_string(u8 event) event 341 drivers/usb/dwc3/debug.h switch (event) { event 369 drivers/usb/dwc3/debug.h static inline const char *dwc3_decode_event(char *str, size_t size, u32 event, event 372 drivers/usb/dwc3/debug.h const union dwc3_event evt = (union dwc3_event) event; event 430 drivers/usb/dwc3/drd.c unsigned long event, void *ptr) event 434 drivers/usb/dwc3/drd.c dwc3_set_mode(dwc, event ? event 343 drivers/usb/dwc3/dwc3-omap.c unsigned long event, void *ptr) event 347 drivers/usb/dwc3/dwc3-omap.c if (event) event 356 drivers/usb/dwc3/dwc3-omap.c unsigned long event, void *ptr) event 360 drivers/usb/dwc3/dwc3-omap.c if (event) event 121 drivers/usb/dwc3/dwc3-qcom.c unsigned long event, void *ptr) event 126 drivers/usb/dwc3/dwc3-qcom.c dwc3_qcom_vbus_overrride_enable(qcom, event); event 127 drivers/usb/dwc3/dwc3-qcom.c qcom->mode = event ? USB_DR_MODE_PERIPHERAL : USB_DR_MODE_HOST; event 133 drivers/usb/dwc3/dwc3-qcom.c unsigned long event, void *ptr) event 138 drivers/usb/dwc3/dwc3-qcom.c dwc3_qcom_vbus_overrride_enable(qcom, !event); event 139 drivers/usb/dwc3/dwc3-qcom.c qcom->mode = event ? USB_DR_MODE_HOST : USB_DR_MODE_PERIPHERAL; event 784 drivers/usb/dwc3/ep0.c const struct dwc3_event_depevt *event) event 820 drivers/usb/dwc3/ep0.c const struct dwc3_event_depevt *event) event 831 drivers/usb/dwc3/ep0.c epnum = event->endpoint_number; event 878 drivers/usb/dwc3/ep0.c const struct dwc3_event_depevt *event) event 917 drivers/usb/dwc3/ep0.c const struct dwc3_event_depevt *event) event 919 drivers/usb/dwc3/ep0.c struct dwc3_ep *dep = dwc->eps[event->endpoint_number]; event 927 drivers/usb/dwc3/ep0.c dwc3_ep0_inspect_setup(dwc, event); event 931 drivers/usb/dwc3/ep0.c dwc3_ep0_complete_data(dwc, event); event 935 drivers/usb/dwc3/ep0.c dwc3_ep0_complete_status(dwc, event); event 1038 drivers/usb/dwc3/ep0.c const struct dwc3_event_depevt *event) event 1040 drivers/usb/dwc3/ep0.c struct dwc3_ep *dep = dwc->eps[event->endpoint_number]; event 1064 drivers/usb/dwc3/ep0.c const struct dwc3_event_depevt *event) event 1066 drivers/usb/dwc3/ep0.c switch (event->status) { event 1077 drivers/usb/dwc3/ep0.c if (dwc->ep0_expect_in != event->endpoint_number) { event 1097 drivers/usb/dwc3/ep0.c WARN_ON_ONCE(event->endpoint_number != 1); event 1107 drivers/usb/dwc3/ep0.c dwc3_ep0_do_control_status(dwc, event); event 1113 drivers/usb/dwc3/ep0.c dwc3_ep0_do_control_status(dwc, event); event 1118 drivers/usb/dwc3/ep0.c const struct dwc3_event_depevt *event) event 1120 drivers/usb/dwc3/ep0.c struct dwc3_ep *dep = dwc->eps[event->endpoint_number]; event 1123 drivers/usb/dwc3/ep0.c switch (event->endpoint_event) { event 1125 drivers/usb/dwc3/ep0.c dwc3_ep0_xfer_complete(dwc, event); event 1129 drivers/usb/dwc3/ep0.c dwc3_ep0_xfernotready(dwc, event); event 1137 drivers/usb/dwc3/ep0.c cmd = DEPEVT_PARAMETER_CMD(event->parameters); event 2407 drivers/usb/dwc3/gadget.c const struct dwc3_event_depevt *event, int status, int chain) event 2459 drivers/usb/dwc3/gadget.c if (event->status & DEPEVT_STATUS_SHORT && !chain) event 2470 drivers/usb/dwc3/gadget.c struct dwc3_request *req, const struct dwc3_event_depevt *event, event 2487 drivers/usb/dwc3/gadget.c trb, event, status, true); event 2496 drivers/usb/dwc3/gadget.c struct dwc3_request *req, const struct dwc3_event_depevt *event, event 2502 drivers/usb/dwc3/gadget.c event, status, false); event 2511 drivers/usb/dwc3/gadget.c const struct dwc3_event_depevt *event, event 2517 drivers/usb/dwc3/gadget.c ret = dwc3_gadget_ep_reclaim_trb_sg(dep, req, event, event 2520 drivers/usb/dwc3/gadget.c ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, event 2524 drivers/usb/dwc3/gadget.c ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, event 2543 drivers/usb/dwc3/gadget.c const struct dwc3_event_depevt *event, int status) event 2551 drivers/usb/dwc3/gadget.c ret = dwc3_gadget_ep_cleanup_completed_request(dep, event, event 2559 drivers/usb/dwc3/gadget.c const struct dwc3_event_depevt *event) event 2561 drivers/usb/dwc3/gadget.c dep->frame_number = event->parameters; event 2565 drivers/usb/dwc3/gadget.c const struct dwc3_event_depevt *event) event 2571 drivers/usb/dwc3/gadget.c dwc3_gadget_endpoint_frame_from_event(dep, event); event 2573 drivers/usb/dwc3/gadget.c if (event->status & DEPEVT_STATUS_BUSERR) event 2576 drivers/usb/dwc3/gadget.c if (event->status & DEPEVT_STATUS_MISSED_ISOC) { event 2583 drivers/usb/dwc3/gadget.c dwc3_gadget_ep_cleanup_completed_requests(dep, event, status); event 2615 drivers/usb/dwc3/gadget.c const struct dwc3_event_depevt *event) event 2617 drivers/usb/dwc3/gadget.c dwc3_gadget_endpoint_frame_from_event(dep, event); event 2622 drivers/usb/dwc3/gadget.c const struct dwc3_event_depevt *event) event 2625 drivers/usb/dwc3/gadget.c u8 epnum = event->endpoint_number; event 2635 drivers/usb/dwc3/gadget.c if (event->endpoint_event != DWC3_DEPEVT_EPCMDCMPLT) event 2640 drivers/usb/dwc3/gadget.c dwc3_ep0_interrupt(dwc, event); event 2644 drivers/usb/dwc3/gadget.c switch (event->endpoint_event) { event 2646 drivers/usb/dwc3/gadget.c dwc3_gadget_endpoint_transfer_in_progress(dep, event); event 2649 drivers/usb/dwc3/gadget.c dwc3_gadget_endpoint_transfer_not_ready(dep, event); event 2652 drivers/usb/dwc3/gadget.c cmd = DEPEVT_PARAMETER_CMD(event->parameters); event 3137 drivers/usb/dwc3/gadget.c const struct dwc3_event_devt *event) event 3139 drivers/usb/dwc3/gadget.c switch (event->type) { event 3157 drivers/usb/dwc3/gadget.c dwc3_gadget_hibernation_interrupt(dwc, event->event_info); event 3160 drivers/usb/dwc3/gadget.c dwc3_gadget_linksts_change_interrupt(dwc, event->event_info); event 3171 drivers/usb/dwc3/gadget.c event->event_info); event 3180 drivers/usb/dwc3/gadget.c dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type); event 3185 drivers/usb/dwc3/gadget.c const union dwc3_event *event) event 3187 drivers/usb/dwc3/gadget.c trace_dwc3_event(event->raw, dwc); event 3189 drivers/usb/dwc3/gadget.c if (!event->type.is_devspec) event 3190 drivers/usb/dwc3/gadget.c dwc3_endpoint_interrupt(dwc, &event->depevt); event 3191 drivers/usb/dwc3/gadget.c else if (event->type.type == DWC3_EVENT_TYPE_DEV) event 3192 drivers/usb/dwc3/gadget.c dwc3_gadget_interrupt(dwc, &event->devt); event 3194 drivers/usb/dwc3/gadget.c dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw); event 3210 drivers/usb/dwc3/gadget.c union dwc3_event event; event 3212 drivers/usb/dwc3/gadget.c event.raw = *(u32 *) (evt->cache + evt->lpos); event 3214 drivers/usb/dwc3/gadget.c dwc3_process_event_entry(dwc, &event); event 107 drivers/usb/dwc3/gadget.h const struct dwc3_event_depevt *event); event 50 drivers/usb/dwc3/trace.h TP_PROTO(u32 event, struct dwc3 *dwc), event 51 drivers/usb/dwc3/trace.h TP_ARGS(event, dwc), event 53 drivers/usb/dwc3/trace.h __field(u32, event) event 58 drivers/usb/dwc3/trace.h __entry->event = event; event 61 drivers/usb/dwc3/trace.h TP_printk("event (%08x): %s", __entry->event, event 63 drivers/usb/dwc3/trace.h __entry->event, __entry->ep0state)) event 67 drivers/usb/dwc3/trace.h TP_PROTO(u32 event, struct dwc3 *dwc), event 68 drivers/usb/dwc3/trace.h TP_ARGS(event, dwc) event 378 drivers/usb/gadget/function/f_ecm.c struct usb_cdc_notification *event; event 387 drivers/usb/gadget/function/f_ecm.c event = req->buf; event 393 drivers/usb/gadget/function/f_ecm.c event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION; event 395 drivers/usb/gadget/function/f_ecm.c event->wValue = cpu_to_le16(1); event 397 drivers/usb/gadget/function/f_ecm.c event->wValue = cpu_to_le16(0); event 398 drivers/usb/gadget/function/f_ecm.c event->wLength = 0; event 399 drivers/usb/gadget/function/f_ecm.c req->length = sizeof *event; event 407 drivers/usb/gadget/function/f_ecm.c event->bNotificationType = USB_CDC_NOTIFY_SPEED_CHANGE; event 408 drivers/usb/gadget/function/f_ecm.c event->wValue = cpu_to_le16(0); event 409 drivers/usb/gadget/function/f_ecm.c event->wLength = cpu_to_le16(8); event 413 drivers/usb/gadget/function/f_ecm.c data = req->buf + sizeof *event; event 421 drivers/usb/gadget/function/f_ecm.c event->bmRequestType = 0xA1; event 422 drivers/usb/gadget/function/f_ecm.c event->wIndex = cpu_to_le16(ecm->ctrl_id); event 447 drivers/usb/gadget/function/f_ecm.c struct usb_cdc_notification *event = req->buf; event 461 drivers/usb/gadget/function/f_ecm.c event->bNotificationType, req->status); event 545 drivers/usb/gadget/function/f_ncm.c struct usb_cdc_notification *event; event 554 drivers/usb/gadget/function/f_ncm.c event = req->buf; event 560 drivers/usb/gadget/function/f_ncm.c event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION; event 562 drivers/usb/gadget/function/f_ncm.c event->wValue = cpu_to_le16(1); event 564 drivers/usb/gadget/function/f_ncm.c event->wValue = cpu_to_le16(0); event 565 drivers/usb/gadget/function/f_ncm.c event->wLength = 0; event 566 drivers/usb/gadget/function/f_ncm.c req->length = sizeof *event; event 574 drivers/usb/gadget/function/f_ncm.c event->bNotificationType = USB_CDC_NOTIFY_SPEED_CHANGE; event 575 drivers/usb/gadget/function/f_ncm.c event->wValue = cpu_to_le16(0); event 576 drivers/usb/gadget/function/f_ncm.c event->wLength = cpu_to_le16(8); event 580 drivers/usb/gadget/function/f_ncm.c data = req->buf + sizeof *event; event 588 drivers/usb/gadget/function/f_ncm.c event->bmRequestType = 0xA1; event 589 drivers/usb/gadget/function/f_ncm.c event->wIndex = cpu_to_le16(ncm->ctrl_id); event 630 drivers/usb/gadget/function/f_ncm.c struct usb_cdc_notification *event = req->buf; event 636 drivers/usb/gadget/function/f_ncm.c event->bNotificationType); event 646 drivers/usb/gadget/function/f_ncm.c event->bNotificationType, req->status); event 118 drivers/usb/gadget/legacy/inode.c struct usb_gadgetfs_event event [N_EVENT]; event 1002 drivers/usb/gadget/legacy/inode.c if (len < sizeof dev->event [0]) { event 1020 drivers/usb/gadget/legacy/inode.c if (dev->event [i].type == GADGETFS_SETUP) { event 1028 drivers/usb/gadget/legacy/inode.c if (copy_to_user (buf, &dev->event, len)) event 1038 drivers/usb/gadget/legacy/inode.c memmove(&dev->event[0], &dev->event[n], event 1079 drivers/usb/gadget/legacy/inode.c struct usb_gadgetfs_event *event; event 1095 drivers/usb/gadget/legacy/inode.c if (dev->event [i].type != type) event 1102 drivers/usb/gadget/legacy/inode.c memmove (&dev->event [i], &dev->event [i + 1], event 1111 drivers/usb/gadget/legacy/inode.c event = &dev->event [dev->ev_next++]; event 1113 drivers/usb/gadget/legacy/inode.c memset (event, 0, sizeof *event); event 1114 drivers/usb/gadget/legacy/inode.c event->type = type; event 1115 drivers/usb/gadget/legacy/inode.c return event; event 1332 drivers/usb/gadget/legacy/inode.c struct usb_gadgetfs_event *event; event 1350 drivers/usb/gadget/legacy/inode.c event = next_event (dev, GADGETFS_CONNECT); event 1351 drivers/usb/gadget/legacy/inode.c event->u.speed = gadget->speed; event 1497 drivers/usb/gadget/legacy/inode.c event = next_event (dev, GADGETFS_SETUP); event 1498 drivers/usb/gadget/legacy/inode.c event->u.setup = *ctrl; event 88 drivers/usb/gadget/udc/snps_udc_plat.c unsigned long event, void *ptr) event 92 drivers/usb/gadget/udc/snps_udc_plat.c dev_dbg(udc->dev, "%s: event: %lu\n", __func__, event); event 94 drivers/usb/gadget/udc/snps_udc_plat.c udc->conn_type = event; event 76 drivers/usb/host/ehci-timer.c static void ehci_enable_event(struct ehci_hcd *ehci, unsigned event, event 79 drivers/usb/host/ehci-timer.c ktime_t *timeout = &ehci->hr_timeouts[event]; event 82 drivers/usb/host/ehci-timer.c *timeout = ktime_add(ktime_get(), event_delays_ns[event]); event 83 drivers/usb/host/ehci-timer.c ehci->enabled_hrtimer_events |= (1 << event); event 86 drivers/usb/host/ehci-timer.c if (event < ehci->next_hrtimer_event) { event 87 drivers/usb/host/ehci-timer.c ehci->next_hrtimer_event = event; event 1054 drivers/usb/host/fotg210-hcd.c static void fotg210_enable_event(struct fotg210_hcd *fotg210, unsigned event, event 1057 drivers/usb/host/fotg210-hcd.c ktime_t *timeout = &fotg210->hr_timeouts[event]; event 1060 drivers/usb/host/fotg210-hcd.c *timeout = ktime_add(ktime_get(), event_delays_ns[event]); event 1061 drivers/usb/host/fotg210-hcd.c fotg210->enabled_hrtimer_events |= (1 << event); event 1064 drivers/usb/host/fotg210-hcd.c if (event < fotg210->next_hrtimer_event) { event 1065 drivers/usb/host/fotg210-hcd.c fotg210->next_hrtimer_event = event; event 57 drivers/usb/host/imx21-hcd.c #define DEBUG_LOG_FRAME(imx21, etd, event) \ event 58 drivers/usb/host/imx21-hcd.c (etd)->event##_frame = readl((imx21)->regs + USBH_FRMNUB) event 60 drivers/usb/host/imx21-hcd.c #define DEBUG_LOG_FRAME(imx21, etd, event) do { } while (0) event 1662 drivers/usb/host/isp116x-hcd.c VDBG("%s: state %x\n", __func__, state.event); event 2725 drivers/usb/host/isp1362-hcd.c if (state.event == PM_EVENT_FREEZE) { event 2747 drivers/usb/host/isp1362-hcd.c if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { event 180 drivers/usb/host/ohci-da8xx.c unsigned long event, void *data) event 185 drivers/usb/host/ohci-da8xx.c if (event & REGULATOR_EVENT_OVER_CURRENT) { event 1747 drivers/usb/host/sl811-hcd.c switch (state.event) { event 3132 drivers/usb/host/u132-hcd.c switch (state.event) { event 568 drivers/usb/host/xhci-dbgcap.c dbc_handle_port_status(struct xhci_hcd *xhci, union xhci_trb *event) event 590 drivers/usb/host/xhci-dbgcap.c static void dbc_handle_xfer_event(struct xhci_hcd *xhci, union xhci_trb *event) event 600 drivers/usb/host/xhci-dbgcap.c comp_code = GET_COMP_CODE(le32_to_cpu(event->generic.field[2])); event 601 drivers/usb/host/xhci-dbgcap.c remain_length = EVENT_TRB_LEN(le32_to_cpu(event->generic.field[2])); event 602 drivers/usb/host/xhci-dbgcap.c ep_id = TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3])); event 629 drivers/usb/host/xhci-dbgcap.c if (r->trb_dma == event->trans_event.buffer) { event 718 drivers/usb/host/xhci-ring.c union xhci_trb *trb, struct xhci_event_cmd *event) event 1221 drivers/usb/host/xhci-ring.c struct xhci_event_cmd *event, u32 cmd_comp_code) event 1288 drivers/usb/host/xhci-ring.c struct xhci_event_cmd *event) event 1304 drivers/usb/host/xhci-ring.c struct xhci_event_cmd *event) event 1312 drivers/usb/host/xhci-ring.c NEC_FW_MAJOR(le32_to_cpu(event->status)), event 1313 drivers/usb/host/xhci-ring.c NEC_FW_MINOR(le32_to_cpu(event->status))); event 1391 drivers/usb/host/xhci-ring.c struct xhci_event_cmd *event) event 1393 drivers/usb/host/xhci-ring.c int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); event 1401 drivers/usb/host/xhci-ring.c cmd_dma = le64_to_cpu(event->cmd_trb); event 1422 drivers/usb/host/xhci-ring.c cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status)); event 1461 drivers/usb/host/xhci-ring.c xhci_handle_cmd_config_ep(xhci, slot_id, event, event 1473 drivers/usb/host/xhci-ring.c xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event); event 1496 drivers/usb/host/xhci-ring.c xhci_handle_cmd_reset_dev(xhci, slot_id, event); event 1499 drivers/usb/host/xhci-ring.c xhci_handle_cmd_nec_get_fw(xhci, event); event 1523 drivers/usb/host/xhci-ring.c union xhci_trb *event) event 1527 drivers/usb/host/xhci-ring.c trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3])); event 1530 drivers/usb/host/xhci-ring.c handle_cmd_completion(xhci, &event->event_cmd); event 1534 drivers/usb/host/xhci-ring.c union xhci_trb *event) event 1539 drivers/usb/host/xhci-ring.c slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3])); event 1583 drivers/usb/host/xhci-ring.c union xhci_trb *event) event 1596 drivers/usb/host/xhci-ring.c if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) event 1600 drivers/usb/host/xhci-ring.c port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0])); event 1962 drivers/usb/host/xhci-ring.c struct xhci_transfer_event *event, event 1972 drivers/usb/host/xhci-ring.c slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); event 1974 drivers/usb/host/xhci-ring.c ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; event 1975 drivers/usb/host/xhci-ring.c ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); event 1977 drivers/usb/host/xhci-ring.c trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); event 2034 drivers/usb/host/xhci-ring.c union xhci_trb *ep_trb, struct xhci_transfer_event *event, event 2046 drivers/usb/host/xhci-ring.c slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); event 2048 drivers/usb/host/xhci-ring.c ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; event 2050 drivers/usb/host/xhci-ring.c trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); event 2052 drivers/usb/host/xhci-ring.c remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); event 2129 drivers/usb/host/xhci-ring.c return finish_td(xhci, td, event, ep, status); event 2136 drivers/usb/host/xhci-ring.c union xhci_trb *ep_trb, struct xhci_transfer_event *event, event 2148 drivers/usb/host/xhci-ring.c ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); event 2149 drivers/usb/host/xhci-ring.c trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); event 2154 drivers/usb/host/xhci-ring.c remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); event 2216 drivers/usb/host/xhci-ring.c return finish_td(xhci, td, event, ep, status); event 2220 drivers/usb/host/xhci-ring.c struct xhci_transfer_event *event, event 2228 drivers/usb/host/xhci-ring.c ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); event 2251 drivers/usb/host/xhci-ring.c union xhci_trb *ep_trb, struct xhci_transfer_event *event, event 2261 drivers/usb/host/xhci-ring.c slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); event 2263 drivers/usb/host/xhci-ring.c ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; event 2264 drivers/usb/host/xhci-ring.c ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); event 2265 drivers/usb/host/xhci-ring.c trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); event 2266 drivers/usb/host/xhci-ring.c remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); event 2321 drivers/usb/host/xhci-ring.c return finish_td(xhci, td, event, ep, status); event 2330 drivers/usb/host/xhci-ring.c struct xhci_transfer_event *event) event 2348 drivers/usb/host/xhci-ring.c slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); event 2349 drivers/usb/host/xhci-ring.c ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; event 2350 drivers/usb/host/xhci-ring.c trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); event 2351 drivers/usb/host/xhci-ring.c ep_trb_dma = le64_to_cpu(event->buffer); event 2404 drivers/usb/host/xhci-ring.c if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) event 2482 drivers/usb/host/xhci-ring.c TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), event 2490 drivers/usb/host/xhci-ring.c TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), event 2547 drivers/usb/host/xhci-ring.c TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), event 2621 drivers/usb/host/xhci-ring.c skip_isoc_td(xhci, td, event, ep, &status); event 2662 drivers/usb/host/xhci-ring.c process_ctrl_td(xhci, td, ep_trb, event, ep, &status); event 2664 drivers/usb/host/xhci-ring.c process_isoc_td(xhci, td, ep_trb, event, ep, &status); event 2666 drivers/usb/host/xhci-ring.c process_bulk_intr_td(xhci, td, ep_trb, event, ep, event 2695 drivers/usb/host/xhci-ring.c lower_32_bits(le64_to_cpu(event->buffer)), event 2696 drivers/usb/host/xhci-ring.c upper_32_bits(le64_to_cpu(event->buffer)), event 2697 drivers/usb/host/xhci-ring.c le32_to_cpu(event->transfer_len), event 2698 drivers/usb/host/xhci-ring.c le32_to_cpu(event->flags)); event 2710 drivers/usb/host/xhci-ring.c union xhci_trb *event; event 2720 drivers/usb/host/xhci-ring.c event = xhci->event_ring->dequeue; event 2722 drivers/usb/host/xhci-ring.c if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) != event 2726 drivers/usb/host/xhci-ring.c trace_xhci_handle_event(xhci->event_ring, &event->generic); event 2734 drivers/usb/host/xhci-ring.c switch (le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) { event 2736 drivers/usb/host/xhci-ring.c handle_cmd_completion(xhci, &event->event_cmd); event 2739 drivers/usb/host/xhci-ring.c handle_port_status(xhci, event); event 2743 drivers/usb/host/xhci-ring.c ret = handle_tx_event(xhci, &event->trans_event); event 2748 drivers/usb/host/xhci-ring.c handle_device_notification(xhci, event); event 2751 drivers/usb/host/xhci-ring.c if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >= event 2753 drivers/usb/host/xhci-ring.c handle_vendor_event(xhci, event); event 2757 drivers/usb/host/xhci-ring.c le32_to_cpu(event->event_cmd.flags))); event 216 drivers/usb/mtu3/mtu3_dr.c unsigned long event, void *ptr) event 221 drivers/usb/mtu3/mtu3_dr.c otg_sx->id_event = event; event 228 drivers/usb/mtu3/mtu3_dr.c unsigned long event, void *ptr) event 233 drivers/usb/mtu3/mtu3_dr.c otg_sx->vbus_event = event; event 200 drivers/usb/musb/sunxi.c unsigned long event, void *ptr) event 204 drivers/usb/musb/sunxi.c if (event) event 95 drivers/usb/musb/ux500.c unsigned long event, void *unused) event 100 drivers/usb/musb/ux500.c event, usb_otg_state_string(musb->xceiv->otg->state)); event 102 drivers/usb/musb/ux500.c switch (event) { event 316 drivers/usb/phy/phy-ab8500-usb.c enum ux500_musb_vbus_id_status event = 0; event 332 drivers/usb/phy/phy-ab8500-usb.c event = UX500_MUSB_RIDB; event 342 drivers/usb/phy/phy-ab8500-usb.c if (event != UX500_MUSB_RIDB) event 343 drivers/usb/phy/phy-ab8500-usb.c event = UX500_MUSB_NONE; event 353 drivers/usb/phy/phy-ab8500-usb.c event = UX500_MUSB_RIDC; event 366 drivers/usb/phy/phy-ab8500-usb.c if (event != UX500_MUSB_RIDC) event 367 drivers/usb/phy/phy-ab8500-usb.c event = UX500_MUSB_VBUS; event 372 drivers/usb/phy/phy-ab8500-usb.c event = UX500_MUSB_RIDA; event 382 drivers/usb/phy/phy-ab8500-usb.c if (event != UX500_MUSB_RIDA) event 383 drivers/usb/phy/phy-ab8500-usb.c event = UX500_MUSB_ID; event 385 drivers/usb/phy/phy-ab8500-usb.c event, &ab->vbus_draw); event 390 drivers/usb/phy/phy-ab8500-usb.c event = UX500_MUSB_CHARGER; event 392 drivers/usb/phy/phy-ab8500-usb.c event, &ab->vbus_draw); event 406 drivers/usb/phy/phy-ab8500-usb.c enum ux500_musb_vbus_id_status event = 0; event 427 drivers/usb/phy/phy-ab8500-usb.c event = UX500_MUSB_RIDB; event 434 drivers/usb/phy/phy-ab8500-usb.c if (event != UX500_MUSB_RIDB) event 435 drivers/usb/phy/phy-ab8500-usb.c event = UX500_MUSB_NONE; event 444 drivers/usb/phy/phy-ab8500-usb.c event = UX500_MUSB_RIDC; event 459 drivers/usb/phy/phy-ab8500-usb.c if (event != UX500_MUSB_RIDC) event 460 drivers/usb/phy/phy-ab8500-usb.c event = UX500_MUSB_VBUS; event 464 drivers/usb/phy/phy-ab8500-usb.c event = UX500_MUSB_RIDA; event 474 drivers/usb/phy/phy-ab8500-usb.c if (event != UX500_MUSB_RIDA) event 475 drivers/usb/phy/phy-ab8500-usb.c event = UX500_MUSB_ID; event 477 drivers/usb/phy/phy-ab8500-usb.c event, &ab->vbus_draw); event 482 drivers/usb/phy/phy-ab8500-usb.c event = UX500_MUSB_CHARGER; event 484 drivers/usb/phy/phy-ab8500-usb.c event, &ab->vbus_draw); event 546 drivers/usb/phy/phy-ab8500-usb.c enum usb_phy_events event = USB_EVENT_NONE; event 553 drivers/usb/phy/phy-ab8500-usb.c event, &ab->vbus_draw); event 560 drivers/usb/phy/phy-ab8500-usb.c event, &ab->vbus_draw); event 49 drivers/usb/phy/phy-fsl-usb.c .event = 1, event 64 drivers/usb/phy/phy-omap-otg.c unsigned long event, void *ptr) event 68 drivers/usb/phy/phy-omap-otg.c otg_dev->id = event; event 75 drivers/usb/phy/phy-omap-otg.c unsigned long event, void *ptr) event 80 drivers/usb/phy/phy-omap-otg.c otg_dev->vbus = event; event 709 drivers/usb/phy/phy.c void usb_phy_set_event(struct usb_phy *x, unsigned long event) event 711 drivers/usb/phy/phy.c x->last_event = event; event 434 drivers/usb/serial/mxuport.c u8 buf[4], u32 event) event 436 drivers/usb/serial/mxuport.c dev_dbg(&port->dev, "%s - receive event : %04x\n", __func__, event); event 438 drivers/usb/serial/mxuport.c switch (event) { event 223 drivers/usb/serial/whiteheat.h __u8 event; /* indicates what the current event is, event 276 drivers/usb/serial/whiteheat.h __u8 event; /* see whiteheat_status_info.event */ event 1945 drivers/usb/typec/tcpm/tcpm.c struct pd_rx_event *event = container_of(work, event 1947 drivers/usb/typec/tcpm/tcpm.c const struct pd_message *msg = &event->msg; event 1949 drivers/usb/typec/tcpm/tcpm.c struct tcpm_port *port = event->port; event 1994 drivers/usb/typec/tcpm/tcpm.c kfree(event); event 1999 drivers/usb/typec/tcpm/tcpm.c struct pd_rx_event *event; event 2001 drivers/usb/typec/tcpm/tcpm.c event = kzalloc(sizeof(*event), GFP_ATOMIC); event 2002 drivers/usb/typec/tcpm/tcpm.c if (!event) event 2005 drivers/usb/typec/tcpm/tcpm.c INIT_WORK(&event->work, tcpm_pd_rx_handler); event 2006 drivers/usb/typec/tcpm/tcpm.c event->port = port; event 2007 drivers/usb/typec/tcpm/tcpm.c memcpy(&event->msg, msg, sizeof(*msg)); event 2008 drivers/usb/typec/tcpm/tcpm.c queue_work(port->wq, &event->work); event 58 drivers/usb/typec/ucsi/ucsi_acpi.c static void ucsi_acpi_notify(acpi_handle handle, u32 event, void *data) event 272 drivers/usb/usbip/usbip_common.h unsigned long event; event 322 drivers/usb/usbip/usbip_common.h void usbip_event_add(struct usbip_device *ud, unsigned long event); event 22 drivers/usb/usbip/usbip_event.c static void set_event(struct usbip_device *ud, unsigned long event) event 27 drivers/usb/usbip/usbip_event.c ud->event |= event; event 31 drivers/usb/usbip/usbip_event.c static void unset_event(struct usbip_device *ud, unsigned long event) event 36 drivers/usb/usbip/usbip_event.c ud->event &= ~event; event 71 drivers/usb/usbip/usbip_event.c usbip_dbg_eh("pending event %lx\n", ud->event); event 77 drivers/usb/usbip/usbip_event.c if (ud->event & USBIP_EH_SHUTDOWN) { event 83 drivers/usb/usbip/usbip_event.c if (ud->event & USBIP_EH_RESET) { event 89 drivers/usb/usbip/usbip_event.c if (ud->event & USBIP_EH_UNUSABLE) { event 101 drivers/usb/usbip/usbip_event.c ud->event = 0; event 108 drivers/usb/usbip/usbip_event.c unsigned long pending = ud->event & ~USBIP_EH_BYE; event 110 drivers/usb/usbip/usbip_event.c if (!(ud->event & USBIP_EH_BYE)) event 116 drivers/usb/usbip/usbip_event.c wait_event_interruptible(ud->eh_waitq, !(ud->event & ~USBIP_EH_BYE)); event 143 drivers/usb/usbip/usbip_event.c void usbip_event_add(struct usbip_device *ud, unsigned long event) event 148 drivers/usb/usbip/usbip_event.c if (ud->event & USBIP_EH_BYE) event 151 drivers/usb/usbip/usbip_event.c set_event(ud, event); event 180 drivers/usb/usbip/usbip_event.c if (ud->event != 0) event 158 drivers/vhost/scsi.c struct virtio_scsi_event event; event 408 drivers/vhost/scsi.c u32 event, u32 reason) event 425 drivers/vhost/scsi.c evt->event.event = cpu_to_vhost32(vq, event); event 426 drivers/vhost/scsi.c evt->event.reason = cpu_to_vhost32(vq, reason); event 450 drivers/vhost/scsi.c struct virtio_scsi_event *event = &evt->event; event 484 drivers/vhost/scsi.c event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED); event 489 drivers/vhost/scsi.c ret = __copy_to_user(eventp, event, sizeof(*event)); event 1292 drivers/vhost/scsi.c u32 event, event 1297 drivers/vhost/scsi.c evt = vhost_scsi_allocate_evt(vs, event, reason); event 1307 drivers/vhost/scsi.c evt->event.lun[0] = 0x01; event 1308 drivers/vhost/scsi.c evt->event.lun[1] = tpg->tport_tpgt; event 1310 drivers/vhost/scsi.c evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ; event 1311 drivers/vhost/scsi.c evt->event.lun[3] = lun->unpacked_lun & 0xFF; event 432 drivers/vhost/vhost.c size_t event __maybe_unused = event 436 drivers/vhost/vhost.c sizeof(*vq->avail->ring) * num + event; event 442 drivers/vhost/vhost.c size_t event __maybe_unused = event 446 drivers/vhost/vhost.c sizeof(*vq->used->ring) * num + event; event 1000 drivers/vhost/vhost.c __virtio16 *event) event 1002 drivers/vhost/vhost.c return vhost_get_avail(vq, *event, vhost_used_event(vq)); event 2441 drivers/vhost/vhost.c __virtio16 event; event 2468 drivers/vhost/vhost.c if (vhost_get_used_event(vq, &event)) { event 2472 drivers/vhost/vhost.c return vring_need_event(vhost16_to_cpu(vq, event), new, old); event 48 drivers/video/backlight/backlight.c unsigned long event, void *data) event 56 drivers/video/backlight/backlight.c if (event != FB_EVENT_BLANK) event 28 drivers/video/backlight/lcd.c unsigned long event, void *data) event 39 drivers/video/backlight/lcd.c if (event == FB_EVENT_BLANK) { event 1101 drivers/video/fbdev/arkfb.c if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) { event 2395 drivers/video/fbdev/aty/aty128fb.c if (state.event == pdev->dev.power.power_state.event) event 2429 drivers/video/fbdev/aty/aty128fb.c if (state.event != PM_EVENT_ON) event 2444 drivers/video/fbdev/aty/aty128fb.c if (pdev->dev.power.power_state.event == PM_EVENT_ON) event 2053 drivers/video/fbdev/aty/atyfb_base.c if (state.event == pdev->dev.power.power_state.event) event 2117 drivers/video/fbdev/aty/atyfb_base.c if (pdev->dev.power.power_state.event == PM_EVENT_ON) event 2130 drivers/video/fbdev/aty/atyfb_base.c pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) event 2621 drivers/video/fbdev/aty/radeon_pm.c if (mesg.event == pdev->dev.power.power_state.event) event 2625 drivers/video/fbdev/aty/radeon_pm.c pci_name(pdev), mesg.event); event 2632 drivers/video/fbdev/aty/radeon_pm.c switch (mesg.event) { event 2723 drivers/video/fbdev/aty/radeon_pm.c if (pdev->dev.power.power_state.event == PM_EVENT_ON) event 2733 drivers/video/fbdev/aty/radeon_pm.c pci_name(pdev), pdev->dev.power.power_state.event); event 2739 drivers/video/fbdev/aty/radeon_pm.c if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { event 454 drivers/video/fbdev/chipsfb.c if (state.event == pdev->dev.power.power_state.event) event 456 drivers/video/fbdev/chipsfb.c if (!(state.event & PM_EVENT_SLEEP)) event 960 drivers/video/fbdev/core/fbmem.c struct fb_event event; event 1046 drivers/video/fbdev/core/fbmem.c event.info = info; event 1047 drivers/video/fbdev/core/fbmem.c event.data = &mode; event 1048 drivers/video/fbdev/core/fbmem.c fb_notifier_call_chain(FB_EVENT_MODE_CHANGE, &event); event 1060 drivers/video/fbdev/core/fbmem.c struct fb_event event; event 1066 drivers/video/fbdev/core/fbmem.c event.info = info; event 1067 drivers/video/fbdev/core/fbmem.c event.data = ␣ event 1073 drivers/video/fbdev/core/fbmem.c fb_notifier_call_chain(FB_EVENT_BLANK, &event); event 1641 drivers/video/fbdev/core/fbmem.c struct fb_event event; event 1642 drivers/video/fbdev/core/fbmem.c event.info = fb_info; event 1643 drivers/video/fbdev/core/fbmem.c fb_notifier_call_chain(FB_EVENT_FB_REGISTERED, &event); event 1709 drivers/video/fbdev/core/fbmem.c struct fb_event event; event 1710 drivers/video/fbdev/core/fbmem.c event.info = fb_info; event 1711 drivers/video/fbdev/core/fbmem.c fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event); event 744 drivers/video/fbdev/fb-puv3.c if (dev->dev.power.power_state.event == PM_EVENT_ON) event 749 drivers/video/fbdev/fb-puv3.c if (dev->dev.power.power_state.event == PM_EVENT_SUSPEND) { event 783 drivers/video/fbdev/fb-puv3.c if (mesg.event == dev->dev.power.power_state.event) event 786 drivers/video/fbdev/fb-puv3.c switch (mesg.event) { event 330 drivers/video/fbdev/geode/gxfb_core.c if (state.event == PM_EVENT_SUSPEND) { event 451 drivers/video/fbdev/geode/lxfb_core.c if (state.event == PM_EVENT_SUSPEND) { event 1185 drivers/video/fbdev/i740fb.c if (state.event == PM_EVENT_FREEZE || state.event == PM_EVENT_PRETHAW) event 1569 drivers/video/fbdev/i810/i810_main.c par->cur_state = mesg.event; event 1571 drivers/video/fbdev/i810/i810_main.c switch (mesg.event) { event 1049 drivers/video/fbdev/nvidia/nvidia.c if (mesg.event == PM_EVENT_PRETHAW) event 1050 drivers/video/fbdev/nvidia/nvidia.c mesg.event = PM_EVENT_FREEZE; event 1052 drivers/video/fbdev/nvidia/nvidia.c par->pm_state = mesg.event; event 1054 drivers/video/fbdev/nvidia/nvidia.c if (mesg.event & PM_EVENT_SLEEP) { event 138 drivers/video/fbdev/omap/omapfb.h unsigned long event, event 223 drivers/video/fbdev/omap/omapfb.h unsigned long event); event 981 drivers/video/fbdev/omap/omapfb_main.c void omapfb_notify_clients(struct omapfb_device *fbdev, unsigned long event) event 990 drivers/video/fbdev/omap/omapfb_main.c blocking_notifier_call_chain(&omapfb_client_list[i], event, event 1423 drivers/video/fbdev/s3fb.c if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) { event 2357 drivers/video/fbdev/savage/savagefb_driver.c if (mesg.event == PM_EVENT_PRETHAW) event 2358 drivers/video/fbdev/savage/savagefb_driver.c mesg.event = PM_EVENT_FREEZE; event 2359 drivers/video/fbdev/savage/savagefb_driver.c par->pm_state = mesg.event; event 2366 drivers/video/fbdev/savage/savagefb_driver.c if (mesg.event == PM_EVENT_FREEZE) event 587 drivers/video/fbdev/via/via-core.c if (state.event != PM_EVENT_SUSPEND) event 831 drivers/video/fbdev/vt8623fb.c if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) { event 76 drivers/video/fbdev/xen-fbfront.c union xenfb_out_event *event) event 83 drivers/video/fbdev/xen-fbfront.c XENFB_OUT_RING_REF(info->page, prod) = *event; event 93 drivers/video/fbdev/xen-fbfront.c union xenfb_out_event event; event 95 drivers/video/fbdev/xen-fbfront.c memset(&event, 0, sizeof(event)); event 96 drivers/video/fbdev/xen-fbfront.c event.type = XENFB_TYPE_UPDATE; event 97 drivers/video/fbdev/xen-fbfront.c event.update.x = x; event 98 drivers/video/fbdev/xen-fbfront.c event.update.y = y; event 99 drivers/video/fbdev/xen-fbfront.c event.update.width = w; event 100 drivers/video/fbdev/xen-fbfront.c event.update.height = h; event 103 drivers/video/fbdev/xen-fbfront.c xenfb_send_event(info, &event); event 108 drivers/video/fbdev/xen-fbfront.c union xenfb_out_event event; event 110 drivers/video/fbdev/xen-fbfront.c memset(&event, 0, sizeof(event)); event 111 drivers/video/fbdev/xen-fbfront.c event.resize = info->resize; event 114 drivers/video/fbdev/xen-fbfront.c xenfb_send_event(info, &event); event 34 drivers/virtio/virtio_input.c struct virtio_input_event *event; event 40 drivers/virtio/virtio_input.c while ((event = virtqueue_get_buf(vi->evt, &len)) != NULL) { event 43 drivers/virtio/virtio_input.c le16_to_cpu(event->type), event 44 drivers/virtio/virtio_input.c le16_to_cpu(event->code), event 45 drivers/virtio/virtio_input.c le32_to_cpu(event->value)); event 47 drivers/virtio/virtio_input.c virtinput_queue_evtbuf(vi, event); event 265 drivers/virtio/virtio_input.c vi->idev->event = virtinput_status; event 106 drivers/virtio/virtio_ring.c bool event; event 607 drivers/virtio/virtio_ring.c if (vq->event) { event 742 drivers/virtio/virtio_ring.c if (!vq->event) event 763 drivers/virtio/virtio_ring.c if (!vq->event) event 796 drivers/virtio/virtio_ring.c if (!vq->event) event 1433 drivers/virtio/virtio_ring.c if (vq->event) { event 1446 drivers/virtio/virtio_ring.c vq->packed.event_flags_shadow = vq->event ? event 1483 drivers/virtio/virtio_ring.c if (vq->event) { event 1505 drivers/virtio/virtio_ring.c vq->packed.event_flags_shadow = vq->event ? event 1619 drivers/virtio/virtio_ring.c vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); event 2093 drivers/virtio/virtio_ring.c vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); event 2108 drivers/virtio/virtio_ring.c if (!vq->event) event 250 drivers/watchdog/diag288_wdt.c static int wdt_power_event(struct notifier_block *this, unsigned long event, event 253 drivers/watchdog/diag288_wdt.c switch (event) { event 119 drivers/watchdog/imx_sc_wdt.c unsigned long event, void *group) event 126 drivers/watchdog/imx_sc_wdt.c if (event & SC_IRQ_WDOG && event 86 drivers/xen/cpu_hotplug.c unsigned long event, void *data) event 85 drivers/xen/gntalloc.c int event; /* Port (event channel) to notify */ event 192 drivers/xen/gntalloc.c notify_remote_via_evtchn(gref->notify.event); event 193 drivers/xen/gntalloc.c evtchn_put(gref->notify.event); event 428 drivers/xen/gntalloc.c evtchn_put(gref->notify.event); event 432 drivers/xen/gntalloc.c gref->notify.event = op.event_channel_port; event 48 drivers/xen/gntdev-common.h int event; event 248 drivers/xen/gntdev.c notify_remote_via_evtchn(map->notify.event); event 249 drivers/xen/gntdev.c evtchn_put(map->notify.event); event 808 drivers/xen/gntdev.c out_event = map->notify.event; event 812 drivers/xen/gntdev.c map->notify.event = op.event_channel_port; event 367 drivers/xen/manage.c unsigned long event, event 233 drivers/xen/xen-acpi-cpuhotplug.c u32 event, void *data) event 242 drivers/xen/xen-acpi-cpuhotplug.c switch (event) { event 247 drivers/xen/xen-acpi-cpuhotplug.c (event == ACPI_NOTIFY_BUS_CHECK) ? event 294 drivers/xen/xen-acpi-cpuhotplug.c "Unsupported event [0x%x]\n", event)); event 300 drivers/xen/xen-acpi-cpuhotplug.c (void) acpi_evaluate_ost(handle, event, ost_code, NULL); event 222 drivers/xen/xen-acpi-memhotplug.c static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data) event 228 drivers/xen/xen-acpi-memhotplug.c switch (event) { event 234 drivers/xen/xen-acpi-memhotplug.c if (event == ACPI_NOTIFY_DEVICE_CHECK) event 273 drivers/xen/xen-acpi-memhotplug.c "Unsupported event [0x%x]\n", event)); event 278 drivers/xen/xen-acpi-memhotplug.c (void) acpi_evaluate_ost(handle, event, ost_code, NULL); event 97 drivers/xen/xen-acpi-pad.c static void acpi_pad_notify(acpi_handle handle, u32 event, event 100 drivers/xen/xen-acpi-pad.c switch (event) { event 105 drivers/xen/xen-acpi-pad.c pr_warn("Unsupported event [0x%x]\n", event); event 112 drivers/xen/xen-balloon.c unsigned long event, event 102 drivers/xen/xenbus/xenbus.h int xs_watch_msg(struct xs_watch_event *event); event 241 drivers/xen/xenbus/xenbus_probe_backend.c unsigned long event, event 455 drivers/xen/xenbus/xenbus_probe_frontend.c unsigned long event, event 697 drivers/xen/xenbus/xenbus_xs.c int xs_watch_msg(struct xs_watch_event *event) event 699 drivers/xen/xenbus/xenbus_xs.c if (count_strings(event->body, event->len) != 2) { event 700 drivers/xen/xenbus/xenbus_xs.c kfree(event); event 703 drivers/xen/xenbus/xenbus_xs.c event->path = (const char *)event->body; event 704 drivers/xen/xenbus/xenbus_xs.c event->token = (const char *)strchr(event->body, '\0') + 1; event 707 drivers/xen/xenbus/xenbus_xs.c event->handle = find_watch(event->token); event 708 drivers/xen/xenbus/xenbus_xs.c if (event->handle != NULL) { event 710 drivers/xen/xenbus/xenbus_xs.c list_add_tail(&event->list, &watch_events); event 714 drivers/xen/xenbus/xenbus_xs.c kfree(event); event 791 drivers/xen/xenbus/xenbus_xs.c struct xs_watch_event *event, *tmp; event 817 drivers/xen/xenbus/xenbus_xs.c list_for_each_entry_safe(event, tmp, &watch_events, list) { event 818 drivers/xen/xenbus/xenbus_xs.c if (event->handle != watch) event 820 drivers/xen/xenbus/xenbus_xs.c list_del(&event->list); event 821 drivers/xen/xenbus/xenbus_xs.c kfree(event); event 869 drivers/xen/xenbus/xenbus_xs.c struct xs_watch_event *event; event 889 drivers/xen/xenbus/xenbus_xs.c event = list_entry(ent, struct xs_watch_event, list); event 890 drivers/xen/xenbus/xenbus_xs.c event->handle->callback(event->handle, event->path, event 891 drivers/xen/xenbus/xenbus_xs.c event->token); event 892 drivers/xen/xenbus/xenbus_xs.c kfree(event); event 1093 fs/aio.c struct io_event *ev_page, *event; event 1111 fs/aio.c event = ev_page + pos % AIO_EVENTS_PER_PAGE; event 1113 fs/aio.c *event = iocb->ki_res; event 1175 fs/aio.c struct io_event __user *event, long nr) event 1228 fs/aio.c copy_ret = copy_to_user(event + ret, ev + pos, event 1255 fs/aio.c struct io_event __user *event, long *i) event 1257 fs/aio.c long ret = aio_read_events_ring(ctx, event + *i, nr - *i); event 1272 fs/aio.c struct io_event __user *event, event 1292 fs/aio.c aio_read_events(ctx, min_nr, nr, event, &ret); event 1295 fs/aio.c aio_read_events(ctx, min_nr, nr, event, &ret), event 176 fs/cifs/smbdirect.c struct rdma_cm_id *id, struct rdma_cm_event *event) event 181 fs/cifs/smbdirect.c event->event, event->status); event 183 fs/cifs/smbdirect.c switch (event->event) { event 201 fs/cifs/smbdirect.c log_rdma_event(INFO, "connected event=%d\n", event->event); event 209 fs/cifs/smbdirect.c log_rdma_event(INFO, "connecting failed event=%d\n", event->event); event 238 fs/cifs/smbdirect.c smbd_qp_async_error_upcall(struct ib_event *event, void *context) event 243 fs/cifs/smbdirect.c ib_event_msg(event->event), event->device->name, info); event 245 fs/cifs/smbdirect.c switch (event->event) { event 173 fs/eventpoll.c struct epoll_event event; event 890 fs/eventpoll.c pt->_key = epi->event.events; event 892 fs/eventpoll.c return vfs_poll(epi->ffd.file, pt) & epi->event.events; event 900 fs/eventpoll.c locked) & epi->event.events; event 959 fs/eventpoll.c epi->ffd.fd, epi->event.events, event 960 fs/eventpoll.c (long long)epi->event.data, event 1230 fs/eventpoll.c if (!(epi->event.events & ~EP_PRIVATE_BITS)) event 1239 fs/eventpoll.c if (pollflags && !(pollflags & epi->event.events)) event 1262 fs/eventpoll.c if ((epi->event.events & EPOLLEXCLUSIVE) && event 1266 fs/eventpoll.c if (epi->event.events & EPOLLIN) event 1270 fs/eventpoll.c if (epi->event.events & EPOLLOUT) event 1290 fs/eventpoll.c if (!(epi->event.events & EPOLLEXCLUSIVE)) event 1326 fs/eventpoll.c if (epi->event.events & EPOLLEXCLUSIVE) event 1496 fs/eventpoll.c static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, event 1519 fs/eventpoll.c epi->event = *event; event 1522 fs/eventpoll.c if (epi->event.events & EPOLLWAKEUP) { event 1630 fs/eventpoll.c const struct epoll_event *event) event 1644 fs/eventpoll.c epi->event.events = event->events; /* need barrier below */ event 1645 fs/eventpoll.c epi->event.data = event->data; /* protected by mtx */ event 1646 fs/eventpoll.c if (epi->event.events & EPOLLWAKEUP) { event 1754 fs/eventpoll.c __put_user(epi->event.data, &uevent->data)) { event 1763 fs/eventpoll.c if (epi->event.events & EPOLLONESHOT) event 1764 fs/eventpoll.c epi->event.events &= EP_PRIVATE_BITS; event 1765 fs/eventpoll.c else if (!(epi->event.events & EPOLLET)) { event 2109 fs/eventpoll.c struct epoll_event __user *, event) event 2121 fs/eventpoll.c copy_from_user(&epds, event, sizeof(struct epoll_event))) event 2236 fs/eventpoll.c if (!(epi->event.events & EPOLLEXCLUSIVE)) { event 307 fs/fscache/internal.h unsigned event) event 309 fs/fscache/internal.h BUG_ON(event >= NR_FSCACHE_OBJECT_EVENTS); event 312 fs/fscache/internal.h object->debug_id, object->event_mask, (1 << event)); event 314 fs/fscache/internal.h if (!test_and_set_bit(event, &object->events) && event 315 fs/fscache/internal.h test_bit(event, &object->event_mask)) event 173 fs/fscache/object.c int event = -1; event 196 fs/fscache/object.c event = fls(events & t->events) - 1; event 197 fs/fscache/object.c __clear_bit(event, &object->oob_event_mask); event 198 fs/fscache/object.c clear_bit(event, &object->events); event 211 fs/fscache/object.c event = fls(events & t->events) - 1; event 213 fs/fscache/object.c true, false, event); event 214 fs/fscache/object.c clear_bit(event, &object->events); event 216 fs/fscache/object.c object->debug_id, event, event 233 fs/fscache/object.c trace_fscache_osm(object, state, false, oob, event); event 234 fs/fscache/object.c new_state = state->work(object, event); event 235 fs/fscache/object.c event = -1; event 356 fs/fscache/object.c int event) event 358 fs/fscache/object.c _enter("{OBJ%x},%d", object->debug_id, event); event 373 fs/fscache/object.c int event) event 378 fs/fscache/object.c _enter("{OBJ%x},%d", object->debug_id, event); event 428 fs/fscache/object.c int event) event 432 fs/fscache/object.c _enter("{OBJ%x},%d", object->debug_id, event); event 452 fs/fscache/object.c int event) event 458 fs/fscache/object.c _enter("{OBJ%x},%d", object->debug_id, event); event 576 fs/fscache/object.c int event) event 578 fs/fscache/object.c _enter("{OBJ%x},%d", object->debug_id, event); event 610 fs/fscache/object.c int event) event 612 fs/fscache/object.c _enter("{OBJ%x},%d", object->debug_id, event); event 623 fs/fscache/object.c int event) event 627 fs/fscache/object.c _enter("{OBJ%x},%d", object->debug_id, event); event 651 fs/fscache/object.c int event) event 654 fs/fscache/object.c object->debug_id, object->n_ops, object->n_children, event); event 687 fs/fscache/object.c int event) event 689 fs/fscache/object.c _enter("{OBJ%x},%d", object->debug_id, event); event 700 fs/fscache/object.c int event) event 707 fs/fscache/object.c _enter("{OBJ%x,%d},%d", object->debug_id, object->n_children, event); event 867 fs/fscache/object.c static bool fscache_enqueue_dependents(struct fscache_object *object, int event) event 884 fs/fscache/object.c fscache_raise_event(dep, event); event 965 fs/fscache/object.c int event) event 970 fs/fscache/object.c _enter("{OBJ%x},%d", object->debug_id, event); event 1035 fs/fscache/object.c int event) event 1041 fs/fscache/object.c s = _fscache_invalidate_object(object, event); event 1061 fs/fscache/object.c int event) event 1063 fs/fscache/object.c _enter("{OBJ%x},%d", object->debug_id, event); event 1125 fs/fscache/object.c int event) event 123 fs/jfs/jfs_logmgr.c #define LCACHE_WAKEUP(event) wake_up(event) event 118 fs/jfs/jfs_txnmgr.c static inline void TXN_SLEEP_DROP_LOCK(wait_queue_head_t * event) event 122 fs/jfs/jfs_txnmgr.c add_wait_queue(event, &wait); event 126 fs/jfs/jfs_txnmgr.c remove_wait_queue(event, &wait); event 129 fs/jfs/jfs_txnmgr.c #define TXN_SLEEP(event)\ event 131 fs/jfs/jfs_txnmgr.c TXN_SLEEP_DROP_LOCK(event);\ event 135 fs/jfs/jfs_txnmgr.c #define TXN_WAKEUP(event) wake_up_all(event) event 36 fs/kernfs/file.c atomic_t event; event 165 fs/kernfs/file.c of->event = atomic_read(&of->kn->attr.open->event); event 210 fs/kernfs/file.c of->event = atomic_read(&of->kn->attr.open->event); event 576 fs/kernfs/file.c atomic_set(&new_on->event, 1); event 841 fs/kernfs/file.c if (of->event != atomic_read(&on->event)) event 944 fs/kernfs/file.c atomic_inc(&on->event); event 300 fs/lockd/svc.c unsigned long event, void *ptr) event 305 fs/lockd/svc.c if ((event != NETDEV_DOWN) || event 330 fs/lockd/svc.c unsigned long event, void *ptr) event 335 fs/lockd/svc.c if ((event != NETDEV_DOWN) || event 17 fs/mount.h u64 event; event 65 fs/namespace.c static u64 event; event 785 fs/namespace.c ns->event = ++event; event 795 fs/namespace.c if (ns && ns->event != event) { event 796 fs/namespace.c ns->event = event; event 1254 fs/namespace.c if (p->cached_event == p->ns->event) { event 1264 fs/namespace.c p->cached_event = p->ns->event; event 1582 fs/namespace.c event++; event 1623 fs/namespace.c event++; event 165 fs/nfs/blocklayout/rpc_pipefs.c static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event, event 182 fs/nfs/blocklayout/rpc_pipefs.c switch (event) { event 425 fs/nfs/dns_resolve.c static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event, event 440 fs/nfs/dns_resolve.c switch (event) { event 2135 fs/nfsd/nfs4recover.c rpc_pipefs_event(struct notifier_block *nb, unsigned long event, void *ptr) event 2152 fs/nfsd/nfs4recover.c switch (event) { event 428 fs/nfsd/nfssvc.c static int nfsd_inetaddr_event(struct notifier_block *this, unsigned long event, event 437 fs/nfsd/nfssvc.c if ((event != NETDEV_DOWN) || event 460 fs/nfsd/nfssvc.c unsigned long event, void *ptr) event 468 fs/nfsd/nfssvc.c if ((event != NETDEV_DOWN) || event 54 fs/notify/fanotify/fanotify.c static int fanotify_merge(struct list_head *list, struct fsnotify_event *event) event 59 fs/notify/fanotify/fanotify.c pr_debug("%s: list=%p event=%p\n", __func__, list, event); event 60 fs/notify/fanotify/fanotify.c new = FANOTIFY_E(event); event 71 fs/notify/fanotify/fanotify.c if (should_merge(test_event, event)) { event 88 fs/notify/fanotify/fanotify.c struct fanotify_perm_event *event, event 93 fs/notify/fanotify/fanotify.c pr_debug("%s: group=%p event=%p\n", __func__, group, event); event 96 fs/notify/fanotify/fanotify.c event->state == FAN_EVENT_ANSWERED); event 101 fs/notify/fanotify/fanotify.c if (event->state == FAN_EVENT_REPORTED) { event 103 fs/notify/fanotify/fanotify.c event->state = FAN_EVENT_CANCELED; event 108 fs/notify/fanotify/fanotify.c if (event->state == FAN_EVENT_INIT) event 109 fs/notify/fanotify/fanotify.c fsnotify_remove_queued_event(group, &event->fae.fse); event 120 fs/notify/fanotify/fanotify.c switch (event->response & ~FAN_AUDIT) { event 130 fs/notify/fanotify/fanotify.c if (event->response & FAN_AUDIT) event 131 fs/notify/fanotify/fanotify.c audit_fanotify(event->response & ~FAN_AUDIT); event 134 fs/notify/fanotify/fanotify.c group, event, ret); event 136 fs/notify/fanotify/fanotify.c fsnotify_destroy_event(group, &event->fae.fse); event 219 fs/notify/fanotify/fanotify.c static int fanotify_encode_fid(struct fanotify_event *event, event 223 fs/notify/fanotify/fanotify.c struct fanotify_fid *fid = &event->fid; event 250 fs/notify/fanotify/fanotify.c event->fh_len = bytes; event 260 fs/notify/fanotify/fanotify.c event->fh_len = 0; event 290 fs/notify/fanotify/fanotify.c struct fanotify_event *event = NULL; event 314 fs/notify/fanotify/fanotify.c event = &pevent->fae; event 319 fs/notify/fanotify/fanotify.c event = kmem_cache_alloc(fanotify_event_cachep, gfp); event 320 fs/notify/fanotify/fanotify.c if (!event) event 328 fs/notify/fanotify/fanotify.c fsnotify_init_event(&event->fse, (unsigned long)id); event 329 fs/notify/fanotify/fanotify.c event->mask = mask; event 331 fs/notify/fanotify/fanotify.c event->pid = get_pid(task_pid(current)); event 333 fs/notify/fanotify/fanotify.c event->pid = get_pid(task_tgid(current)); event 334 fs/notify/fanotify/fanotify.c event->fh_len = 0; event 337 fs/notify/fanotify/fanotify.c event->fh_type = fanotify_encode_fid(event, id, gfp, fsid); event 339 fs/notify/fanotify/fanotify.c event->fh_type = FILEID_ROOT; event 340 fs/notify/fanotify/fanotify.c event->path = *((struct path *)data); event 341 fs/notify/fanotify/fanotify.c path_get(&event->path); event 343 fs/notify/fanotify/fanotify.c event->fh_type = FILEID_INVALID; event 344 fs/notify/fanotify/fanotify.c event->path.mnt = NULL; event 345 fs/notify/fanotify/fanotify.c event->path.dentry = NULL; event 349 fs/notify/fanotify/fanotify.c return event; event 392 fs/notify/fanotify/fanotify.c struct fanotify_event *event; event 442 fs/notify/fanotify/fanotify.c event = fanotify_alloc_event(group, inode, mask, data, data_type, event 445 fs/notify/fanotify/fanotify.c if (unlikely(!event)) { event 455 fs/notify/fanotify/fanotify.c fsn_event = &event->fse; event 486 fs/notify/fanotify/fanotify.c struct fanotify_event *event; event 488 fs/notify/fanotify/fanotify.c event = FANOTIFY_E(fsn_event); event 489 fs/notify/fanotify/fanotify.c if (fanotify_event_has_path(event)) event 490 fs/notify/fanotify/fanotify.c path_put(&event->path); event 491 fs/notify/fanotify/fanotify.c else if (fanotify_event_has_ext_fh(event)) event 492 fs/notify/fanotify/fanotify.c kfree(event->fid.ext_fh); event 493 fs/notify/fanotify/fanotify.c put_pid(event->pid); event 494 fs/notify/fanotify/fanotify.c if (fanotify_is_perm_event(event->mask)) { event 499 fs/notify/fanotify/fanotify.c kmem_cache_free(fanotify_event_cachep, event); event 89 fs/notify/fanotify/fanotify.h static inline bool fanotify_event_has_path(struct fanotify_event *event) event 91 fs/notify/fanotify/fanotify.h return event->fh_type == FILEID_ROOT; event 94 fs/notify/fanotify/fanotify.h static inline bool fanotify_event_has_fid(struct fanotify_event *event) event 96 fs/notify/fanotify/fanotify.h return event->fh_type != FILEID_ROOT && event 97 fs/notify/fanotify/fanotify.h event->fh_type != FILEID_INVALID; event 100 fs/notify/fanotify/fanotify.h static inline bool fanotify_event_has_ext_fh(struct fanotify_event *event) event 102 fs/notify/fanotify/fanotify.h return fanotify_event_has_fid(event) && event 103 fs/notify/fanotify/fanotify.h event->fh_len > FANOTIFY_INLINE_FH_LEN; event 106 fs/notify/fanotify/fanotify.h static inline void *fanotify_event_fh(struct fanotify_event *event) event 108 fs/notify/fanotify/fanotify.h return fanotify_fid_fh(&event->fid, event->fh_len); event 54 fs/notify/fanotify/fanotify_user.c static int fanotify_event_info_len(struct fanotify_event *event) event 56 fs/notify/fanotify/fanotify_user.c if (!fanotify_event_has_fid(event)) event 60 fs/notify/fanotify/fanotify_user.c sizeof(struct file_handle) + event->fh_len, event 100 fs/notify/fanotify/fanotify_user.c struct fanotify_event *event, event 106 fs/notify/fanotify/fanotify_user.c pr_debug("%s: group=%p event=%p\n", __func__, group, event); event 118 fs/notify/fanotify/fanotify_user.c if (event->path.dentry && event->path.mnt) event 119 fs/notify/fanotify/fanotify_user.c new_file = dentry_open(&event->path, event 146 fs/notify/fanotify/fanotify_user.c struct fanotify_perm_event *event, event 153 fs/notify/fanotify/fanotify_user.c event->response = response; event 154 fs/notify/fanotify/fanotify_user.c if (event->state == FAN_EVENT_CANCELED) event 157 fs/notify/fanotify/fanotify_user.c event->state = FAN_EVENT_ANSWERED; event 160 fs/notify/fanotify/fanotify_user.c fsnotify_destroy_event(group, &event->fae.fse); event 166 fs/notify/fanotify/fanotify_user.c struct fanotify_perm_event *event; event 192 fs/notify/fanotify/fanotify_user.c list_for_each_entry(event, &group->fanotify_data.access_list, event 194 fs/notify/fanotify/fanotify_user.c if (event->fd != fd) event 197 fs/notify/fanotify/fanotify_user.c list_del_init(&event->fae.fse.list); event 198 fs/notify/fanotify/fanotify_user.c finish_permission_event(group, event, response); event 207 fs/notify/fanotify/fanotify_user.c static int copy_fid_to_user(struct fanotify_event *event, char __user *buf) event 212 fs/notify/fanotify/fanotify_user.c size_t fh_len = event->fh_len; event 213 fs/notify/fanotify/fanotify_user.c size_t len = fanotify_event_info_len(event); event 224 fs/notify/fanotify/fanotify_user.c info.fsid = event->fid.fsid; event 230 fs/notify/fanotify/fanotify_user.c handle.handle_type = event->fh_type; event 241 fs/notify/fanotify/fanotify_user.c fh = fanotify_event_fh(event); event 264 fs/notify/fanotify/fanotify_user.c struct fanotify_event *event; event 270 fs/notify/fanotify/fanotify_user.c event = container_of(fsn_event, struct fanotify_event, fse); event 275 fs/notify/fanotify/fanotify_user.c metadata.mask = event->mask & FANOTIFY_OUTGOING_EVENTS; event 276 fs/notify/fanotify/fanotify_user.c metadata.pid = pid_vnr(event->pid); event 278 fs/notify/fanotify/fanotify_user.c if (fanotify_event_has_path(event)) { event 279 fs/notify/fanotify/fanotify_user.c fd = create_fd(group, event, &f); event 282 fs/notify/fanotify/fanotify_user.c } else if (fanotify_event_has_fid(event)) { event 283 fs/notify/fanotify/fanotify_user.c metadata.event_len += fanotify_event_info_len(event); event 298 fs/notify/fanotify/fanotify_user.c if (fanotify_is_perm_event(event->mask)) event 301 fs/notify/fanotify/fanotify_user.c if (fanotify_event_has_path(event)) { event 303 fs/notify/fanotify/fanotify_user.c } else if (fanotify_event_has_fid(event)) { event 304 fs/notify/fanotify/fanotify_user.c ret = copy_fid_to_user(event, buf + FAN_EVENT_METADATA_LEN); event 443 fs/notify/fanotify/fanotify_user.c struct fanotify_perm_event *event; event 459 fs/notify/fanotify/fanotify_user.c event = list_first_entry(&group->fanotify_data.access_list, event 461 fs/notify/fanotify/fanotify_user.c list_del_init(&event->fae.fse.list); event 462 fs/notify/fanotify/fanotify_user.c finish_permission_event(group, event, FAN_ALLOW); event 50 fs/notify/inotify/inotify_fsnotify.c struct fsnotify_event *event) event 55 fs/notify/inotify/inotify_fsnotify.c return event_compare(last_event, event); event 66 fs/notify/inotify/inotify_fsnotify.c struct inotify_event_info *event; event 99 fs/notify/inotify/inotify_fsnotify.c event = kmalloc(alloc_len, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); event 102 fs/notify/inotify/inotify_fsnotify.c if (unlikely(!event)) { event 120 fs/notify/inotify/inotify_fsnotify.c fsn_event = &event->fse; event 122 fs/notify/inotify/inotify_fsnotify.c event->mask = mask; event 123 fs/notify/inotify/inotify_fsnotify.c event->wd = i_mark->wd; event 124 fs/notify/inotify/inotify_fsnotify.c event->sync_cookie = cookie; event 125 fs/notify/inotify/inotify_fsnotify.c event->name_len = len; event 127 fs/notify/inotify/inotify_fsnotify.c strcpy(event->name, file_name->name); event 117 fs/notify/inotify/inotify_user.c struct inotify_event_info *event; event 119 fs/notify/inotify/inotify_user.c event = INOTIFY_E(fsn_event); event 120 fs/notify/inotify/inotify_user.c if (!event->name_len) event 122 fs/notify/inotify/inotify_user.c return roundup(event->name_len + 1, sizeof(struct inotify_event)); event 136 fs/notify/inotify/inotify_user.c struct fsnotify_event *event; event 141 fs/notify/inotify/inotify_user.c event = fsnotify_peek_first_event(group); event 143 fs/notify/inotify/inotify_user.c pr_debug("%s: group=%p event=%p\n", __func__, group, event); event 145 fs/notify/inotify/inotify_user.c event_size += round_event_name_len(event); event 153 fs/notify/inotify/inotify_user.c return event; event 167 fs/notify/inotify/inotify_user.c struct inotify_event_info *event; event 174 fs/notify/inotify/inotify_user.c event = INOTIFY_E(fsn_event); event 175 fs/notify/inotify/inotify_user.c name_len = event->name_len; event 182 fs/notify/inotify/inotify_user.c inotify_event.mask = inotify_mask_to_arg(event->mask); event 183 fs/notify/inotify/inotify_user.c inotify_event.wd = event->wd; event 184 fs/notify/inotify/inotify_user.c inotify_event.cookie = event->sync_cookie; event 199 fs/notify/inotify/inotify_user.c if (copy_to_user(buf, event->name, name_len)) event 58 fs/notify/notification.c struct fsnotify_event *event) event 61 fs/notify/notification.c if (!event || event == group->overflow_event) event 69 fs/notify/notification.c if (!list_empty(&event->list)) { event 71 fs/notify/notification.c WARN_ON(!list_empty(&event->list)); event 74 fs/notify/notification.c group->ops->free_event(event); event 85 fs/notify/notification.c struct fsnotify_event *event, event 92 fs/notify/notification.c pr_debug("%s: group=%p event=%p\n", __func__, group, event); event 101 fs/notify/notification.c if (event == group->overflow_event || event 109 fs/notify/notification.c event = group->overflow_event; event 114 fs/notify/notification.c ret = merge(list, event); event 123 fs/notify/notification.c list_add_tail(&event->list, list); event 132 fs/notify/notification.c struct fsnotify_event *event) event 139 fs/notify/notification.c list_del_init(&event->list); event 149 fs/notify/notification.c struct fsnotify_event *event; event 155 fs/notify/notification.c event = list_first_entry(&group->notification_list, event 157 fs/notify/notification.c fsnotify_remove_queued_event(group, event); event 158 fs/notify/notification.c return event; event 179 fs/notify/notification.c struct fsnotify_event *event; event 183 fs/notify/notification.c event = fsnotify_remove_first_event(group); event 185 fs/notify/notification.c fsnotify_destroy_event(group, event); event 756 fs/ocfs2/cluster/heartbeat.c struct o2hb_node_event *event; event 766 fs/ocfs2/cluster/heartbeat.c event = list_entry(o2hb_node_events.next, event 769 fs/ocfs2/cluster/heartbeat.c list_del_init(&event->hn_item); event 773 fs/ocfs2/cluster/heartbeat.c event->hn_event_type == O2HB_NODE_UP_CB ? "UP" : "DOWN", event 774 fs/ocfs2/cluster/heartbeat.c event->hn_node_num); event 776 fs/ocfs2/cluster/heartbeat.c hbcall = hbcall_from_type(event->hn_event_type); event 783 fs/ocfs2/cluster/heartbeat.c o2hb_fire_callbacks(hbcall, event->hn_node, event->hn_node_num); event 792 fs/ocfs2/cluster/heartbeat.c static void o2hb_queue_node_event(struct o2hb_node_event *event, event 801 fs/ocfs2/cluster/heartbeat.c event->hn_event_type = type; event 802 fs/ocfs2/cluster/heartbeat.c event->hn_node = node; event 803 fs/ocfs2/cluster/heartbeat.c event->hn_node_num = node_num; event 808 fs/ocfs2/cluster/heartbeat.c list_add_tail(&event->hn_item, &o2hb_node_events); event 813 fs/ocfs2/cluster/heartbeat.c struct o2hb_node_event event = event 814 fs/ocfs2/cluster/heartbeat.c { .hn_item = LIST_HEAD_INIT(event.hn_item), }; event 832 fs/ocfs2/cluster/heartbeat.c o2hb_queue_node_event(&event, O2HB_NODE_DOWN_CB, node, event 840 fs/ocfs2/cluster/heartbeat.c o2hb_run_event_list(&event); event 892 fs/ocfs2/cluster/heartbeat.c struct o2hb_node_event event = event 893 fs/ocfs2/cluster/heartbeat.c { .hn_item = LIST_HEAD_INIT(event.hn_item), }; event 990 fs/ocfs2/cluster/heartbeat.c o2hb_queue_node_event(&event, O2HB_NODE_UP_CB, node, event 1042 fs/ocfs2/cluster/heartbeat.c o2hb_queue_node_event(&event, O2HB_NODE_DOWN_CB, event 1063 fs/ocfs2/cluster/heartbeat.c o2hb_run_event_list(&event); event 101 fs/ocfs2/dlm/dlmcommon.h wait_queue_head_t event; event 2023 fs/ocfs2/dlm/dlmdomain.c init_waitqueue_head(&dlm->reco.event); event 406 fs/ocfs2/dlm/dlmrecovery.c wait_event(dlm->reco.event, !dlm_in_recovery(dlm)); event 425 fs/ocfs2/dlm/dlmrecovery.c wake_up(&dlm->reco.event); event 210 fs/ocfs2/dlmfs/dlmfs.c __poll_t event = 0; event 218 fs/ocfs2/dlmfs/dlmfs.c event = EPOLLIN | EPOLLRDNORM; event 221 fs/ocfs2/dlmfs/dlmfs.c return event; event 1042 fs/ocfs2/localalloc.c enum ocfs2_la_event event) event 1056 fs/ocfs2/localalloc.c if (event == OCFS2_LA_EVENT_ENOSPC || event 1057 fs/ocfs2/localalloc.c event == OCFS2_LA_EVENT_FRAGMENTED) { event 56 fs/proc/proc_sysctl.c atomic_inc(&poll->event); event 662 fs/proc/proc_sysctl.c unsigned long event; event 674 fs/proc/proc_sysctl.c event = (unsigned long)filp->private_data; event 677 fs/proc/proc_sysctl.c if (event != atomic_read(&table->poll->event)) { event 27 fs/proc_namespace.c int event; event 31 fs/proc_namespace.c event = READ_ONCE(ns->event); event 32 fs/proc_namespace.c if (m->poll_event != event) { event 33 fs/proc_namespace.c m->poll_event = event; event 276 fs/proc_namespace.c m->poll_event = ns->event; event 1995 fs/reiserfs/inode.c inode->i_generation = ++event; event 197 fs/userfaultfd.c msg.event = UFFD_EVENT_PAGEFAULT; event 604 fs/userfaultfd.c if (ewq->msg.event == 0) event 615 fs/userfaultfd.c if (ewq->msg.event == UFFD_EVENT_FORK) { event 666 fs/userfaultfd.c ewq->msg.event = 0; event 727 fs/userfaultfd.c ewq.msg.event = UFFD_EVENT_FORK; event 782 fs/userfaultfd.c ewq.msg.event = UFFD_EVENT_REMAP; event 807 fs/userfaultfd.c ewq.msg.event = UFFD_EVENT_REMOVE; event 864 fs/userfaultfd.c ewq.msg.event = UFFD_EVENT_UNMAP; event 1107 fs/userfaultfd.c if (uwq->msg.event == UFFD_EVENT_FORK) { event 1146 fs/userfaultfd.c if (!ret && msg->event == UFFD_EVENT_FORK) { event 151 include/acpi/acpi_bus.h typedef void (*acpi_op_notify) (struct acpi_device * device, u32 event); event 689 include/acpi/acpixf.h acpi_enable_event(u32 event, u32 flags)) event 692 include/acpi/acpixf.h acpi_disable_event(u32 event, u32 flags)) event 693 include/acpi/acpixf.h ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_clear_event(u32 event)) event 696 include/acpi/acpixf.h acpi_get_event_status(u32 event, event 1070 include/acpi/actypes.h acpi_status (*acpi_table_handler) (u32 event, void *table, void *context); event 137 include/drm/drm_atomic.h struct drm_pending_vblank_event *event; event 137 include/drm/drm_atomic_helper.h struct drm_pending_vblank_event *event, event 143 include/drm/drm_atomic_helper.h struct drm_pending_vblank_event *event, event 363 include/drm/drm_crtc.h struct drm_pending_vblank_event *event; event 566 include/drm/drm_crtc.h struct drm_pending_vblank_event *event, event 585 include/drm/drm_crtc.h struct drm_pending_vblank_event *event, event 116 include/drm/drm_file.h struct drm_event *event; event 76 include/drm/drm_vblank.h } event; event 378 include/linux/acpi.h extern acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out); event 59 include/linux/apm-emulation.h void apm_queue_event(apm_event_t event); event 29 include/linux/arm_sdei.h typedef int (sdei_event_callback)(u32 event, struct pt_regs *regs, void *arg); event 595 include/linux/avf/virtchnl.h enum virtchnl_event_codes event; event 477 include/linux/bpf.h struct perf_event *event; event 27 include/linux/can/led.h void can_led_event(struct net_device *netdev, enum can_led_event event); event 35 include/linux/can/led.h enum can_led_event event) event 423 include/linux/compat.h int get_compat_sigevent(struct sigevent *event, event 213 include/linux/coresight.h struct perf_event *event, void **pages, event 246 include/linux/coresight.h struct perf_event *event, u32 mode); event 248 include/linux/coresight.h struct perf_event *event); event 217 include/linux/cs5535.h int event, int enable); event 62 include/linux/dmar.h unsigned long event; event 339 include/linux/fscache-cache.h int event); event 124 include/linux/fsnotify_backend.h void (*free_event)(struct fsnotify_event *event); event 408 include/linux/fsnotify_backend.h struct fsnotify_event *event); event 411 include/linux/fsnotify_backend.h struct fsnotify_event *event, event 428 include/linux/fsnotify_backend.h struct fsnotify_event *event); event 501 include/linux/fsnotify_backend.h static inline void fsnotify_init_event(struct fsnotify_event *event, event 504 include/linux/fsnotify_backend.h INIT_LIST_HEAD(&event->list); event 505 include/linux/fsnotify_backend.h event->objectid = objectid; event 603 include/linux/greybus/greybus_protocols.h __u8 event; event 1544 include/linux/greybus/greybus_protocols.h __u8 event; event 1748 include/linux/greybus/greybus_protocols.h __u8 event; event 2144 include/linux/greybus/greybus_protocols.h __u8 event; event 2150 include/linux/greybus/greybus_protocols.h __u8 event; event 2155 include/linux/greybus/greybus_protocols.h __u8 event; event 751 include/linux/hid.h int (*event)(struct hid_device *hdev, struct hid_field *field, event 242 include/linux/hsi/hsi.h int hsi_event(struct hsi_port *port, unsigned long event); event 13 include/linux/i2c-pxa.h void (*event)(void *ptr, i2c_slave_event_t event); event 40 include/linux/i2c.h enum i2c_slave_event event, u8 *val); event 376 include/linux/i2c.h enum i2c_slave_event event, u8 *val) event 378 include/linux/i2c.h return client->slave_cb(client, event, val); event 170 include/linux/inetdevice.h void inet_netconf_notify_devconf(struct net *net, int event, int type, event 182 include/linux/input.h int (*event)(struct input_dev *dev, unsigned int type, unsigned int code, int value); event 306 include/linux/input.h void (*event)(struct input_handle *handle, unsigned int type, unsigned int code, int value); event 210 include/linux/kernfs.h int event; event 378 include/linux/leds.h enum led_brightness event); event 434 include/linux/leds.h enum led_brightness event) {} event 787 include/linux/memcontrol.h enum memcg_memory_event event) event 789 include/linux/memcontrol.h atomic_long_inc(&memcg->memory_events_local[event]); event 793 include/linux/memcontrol.h atomic_long_inc(&memcg->memory_events[event]); event 805 include/linux/memcontrol.h enum memcg_memory_event event) event 815 include/linux/memcontrol.h memcg_memory_event(memcg, event); event 841 include/linux/memcontrol.h enum memcg_memory_event event) event 846 include/linux/memcontrol.h enum memcg_memory_event event) event 330 include/linux/mfd/abx500.h int abx500_event_registers_startup_state_get(struct device *dev, u8 *event); event 163 include/linux/mfd/arizona/core.h unsigned long event, event 166 include/linux/mfd/arizona/core.h return blocking_notifier_call_chain(&arizona->notifier, event, data); event 37 include/linux/mfd/dln2.h int dln2_register_event_cb(struct platform_device *pdev, u16 event, event 46 include/linux/mfd/dln2.h void dln2_unregister_event_cb(struct platform_device *pdev, u16 event); event 206 include/linux/mfd/lp8788.h enum lp8788_charger_event event); event 39 include/linux/mfd/rave-sp.h static inline unsigned long rave_sp_action_pack(u8 event, u8 value) event 41 include/linux/mfd/rave-sp.h return ((unsigned long)value << 8) | event; event 739 include/linux/mlx4/device.h void (*event) (struct mlx4_cq *, enum mlx4_event); event 766 include/linux/mlx4/device.h void (*event) (struct mlx4_qp *, enum mlx4_event); event 776 include/linux/mlx4/device.h void (*event) (struct mlx4_srq *, enum mlx4_event); event 987 include/linux/mlx4/device.h } event; event 1473 include/linux/mlx4/device.h int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave, u8 port, int event, enum slave_port_gen_event *gen_event); event 60 include/linux/mlx4/driver.h void (*event) (struct mlx4_dev *dev, void *context, event 61 include/linux/mlx4/driver.h enum mlx4_dev_event event, unsigned long param); event 51 include/linux/mlx5/cq.h void (*event) (struct mlx5_core_cq *, enum mlx5_event); event 58 include/linux/mlx5/eq.h #define MLX5_NB_INIT(name, handler, event) do { \ event 60 include/linux/mlx5/eq.h (name)->event_type = MLX5_EVENT_TYPE_##event; \ event 478 include/linux/mlx5/qp.h void (*event) (struct mlx5_core_qp *, int); event 70 include/linux/mmu_notifier.h enum mmu_notifier_event event; event 407 include/linux/mmu_notifier.h enum mmu_notifier_event event, event 415 include/linux/mmu_notifier.h range->event = event; event 544 include/linux/mmu_notifier.h #define mmu_notifier_range_init(range,event,flags,vma,mm,start,end) \ event 31 include/linux/nd.h void (*notify)(struct device *dev, enum nvdimm_event event); event 172 include/linux/nd.h void nvdimm_region_notify(struct nd_region *nd_region, enum nvdimm_event event); event 96 include/linux/oprofile.h void oprofile_add_sample(struct pt_regs * const regs, unsigned long event); event 106 include/linux/oprofile.h unsigned long event, int is_kernel); event 112 include/linux/oprofile.h unsigned long event, int is_kernel, event 117 include/linux/oprofile.h void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event); event 183 include/linux/oprofile.h struct ring_buffer_event *event; event 84 include/linux/perf/arm_pmu.h void (*enable)(struct perf_event *event); event 85 include/linux/perf/arm_pmu.h void (*disable)(struct perf_event *event); event 87 include/linux/perf/arm_pmu.h struct perf_event *event); event 89 include/linux/perf/arm_pmu.h struct perf_event *event); event 92 include/linux/perf/arm_pmu.h u64 (*read_counter)(struct perf_event *event); event 93 include/linux/perf/arm_pmu.h void (*write_counter)(struct perf_event *event, u64 val); event 97 include/linux/perf/arm_pmu.h int (*map_event)(struct perf_event *event); event 98 include/linux/perf/arm_pmu.h int (*filter_match)(struct perf_event *event); event 118 include/linux/perf/arm_pmu.h u64 armpmu_event_update(struct perf_event *event); event 120 include/linux/perf/arm_pmu.h int armpmu_event_set_period(struct perf_event *event); event 122 include/linux/perf/arm_pmu.h int armpmu_map_event(struct perf_event *event, event 301 include/linux/perf_event.h int (*event_init) (struct perf_event *event); event 307 include/linux/perf_event.h void (*event_mapped) (struct perf_event *event, struct mm_struct *mm); /* optional */ event 308 include/linux/perf_event.h void (*event_unmapped) (struct perf_event *event, struct mm_struct *mm); /* optional */ event 336 include/linux/perf_event.h int (*add) (struct perf_event *event, int flags); event 337 include/linux/perf_event.h void (*del) (struct perf_event *event, int flags); event 357 include/linux/perf_event.h void (*start) (struct perf_event *event, int flags); event 358 include/linux/perf_event.h void (*stop) (struct perf_event *event, int flags); event 366 include/linux/perf_event.h void (*read) (struct perf_event *event); event 400 include/linux/perf_event.h int (*event_idx) (struct perf_event *event); /*optional */ event 416 include/linux/perf_event.h void *(*setup_aux) (struct perf_event *event, void **pages, event 447 include/linux/perf_event.h void (*addr_filters_sync) (struct perf_event *event); event 457 include/linux/perf_event.h int (*aux_output_match) (struct perf_event *event); event 463 include/linux/perf_event.h int (*filter_match) (struct perf_event *event); /* optional */ event 468 include/linux/perf_event.h int (*check_period) (struct perf_event *event, u64 value); /* optional */ event 567 include/linux/perf_event.h #define for_each_sibling_event(sibling, event) \ event 568 include/linux/perf_event.h if ((event)->group_leader == (event)) \ event 569 include/linux/perf_event.h list_for_each_entry((sibling), &(event)->sibling_list, sibling_list) event 828 include/linux/perf_event.h struct perf_event *event; event 843 include/linux/perf_event.h struct perf_event *event; event 880 include/linux/perf_event.h struct perf_event *event); event 887 include/linux/perf_event.h extern void perf_event_itrace_started(struct perf_event *event); event 904 include/linux/perf_event.h extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event); event 915 include/linux/perf_event.h extern int perf_event_refresh(struct perf_event *event, int refresh); event 916 include/linux/perf_event.h extern void perf_event_update_userpage(struct perf_event *event); event 917 include/linux/perf_event.h extern int perf_event_release_kernel(struct perf_event *event); event 926 include/linux/perf_event.h int perf_event_read_local(struct perf_event *event, u64 *value, event 928 include/linux/perf_event.h extern u64 perf_event_read_value(struct perf_event *event, event 1000 include/linux/perf_event.h struct perf_event *event); event 1003 include/linux/perf_event.h struct perf_event *event, event 1006 include/linux/perf_event.h extern int perf_event_overflow(struct perf_event *event, event 1010 include/linux/perf_event.h extern void perf_event_output_forward(struct perf_event *event, event 1013 include/linux/perf_event.h extern void perf_event_output_backward(struct perf_event *event, event 1016 include/linux/perf_event.h extern int perf_event_output(struct perf_event *event, event 1021 include/linux/perf_event.h is_default_overflow_handler(struct perf_event *event) event 1023 include/linux/perf_event.h if (likely(event->overflow_handler == perf_event_output_forward)) event 1025 include/linux/perf_event.h if (unlikely(event->overflow_handler == perf_event_output_backward)) event 1033 include/linux/perf_event.h struct perf_event *event); event 1035 include/linux/perf_event.h perf_event__output_id_sample(struct perf_event *event, event 1040 include/linux/perf_event.h perf_log_lost_samples(struct perf_event *event, u64 lost); event 1042 include/linux/perf_event.h static inline bool event_has_any_exclude_flag(struct perf_event *event) event 1044 include/linux/perf_event.h struct perf_event_attr *attr = &event->attr; event 1051 include/linux/perf_event.h static inline bool is_sampling_event(struct perf_event *event) event 1053 include/linux/perf_event.h return event->attr.sample_period != 0; event 1059 include/linux/perf_event.h static inline int is_software_event(struct perf_event *event) event 1061 include/linux/perf_event.h return event->event_caps & PERF_EV_CAP_SOFTWARE; event 1067 include/linux/perf_event.h static inline int in_software_context(struct perf_event *event) event 1069 include/linux/perf_event.h return event->ctx->pmu->task_ctx_nr == perf_sw_context; event 1195 include/linux/perf_event.h extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs); event 1264 include/linux/perf_event.h extern void perf_bp_event(struct perf_event *event, void *data); event 1275 include/linux/perf_event.h static inline bool has_branch_stack(struct perf_event *event) event 1277 include/linux/perf_event.h return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK; event 1280 include/linux/perf_event.h static inline bool needs_branch_stack(struct perf_event *event) event 1282 include/linux/perf_event.h return event->attr.branch_sample_type != 0; event 1285 include/linux/perf_event.h static inline bool has_aux(struct perf_event *event) event 1287 include/linux/perf_event.h return event->pmu->setup_aux; event 1290 include/linux/perf_event.h static inline bool is_write_backward(struct perf_event *event) event 1292 include/linux/perf_event.h return !!event->attr.write_backward; event 1295 include/linux/perf_event.h static inline bool has_addr_filter(struct perf_event *event) event 1297 include/linux/perf_event.h return event->pmu->nr_addr_filters; event 1304 include/linux/perf_event.h perf_event_addr_filters(struct perf_event *event) event 1306 include/linux/perf_event.h struct perf_addr_filters_head *ifh = &event->addr_filters; event 1308 include/linux/perf_event.h if (event->parent) event 1309 include/linux/perf_event.h ifh = &event->parent->addr_filters; event 1314 include/linux/perf_event.h extern void perf_event_addr_filters_sync(struct perf_event *event); event 1317 include/linux/perf_event.h struct perf_event *event, unsigned int size); event 1319 include/linux/perf_event.h struct perf_event *event, event 1322 include/linux/perf_event.h struct perf_event *event, event 1332 include/linux/perf_event.h extern u64 perf_swevent_set_period(struct perf_event *event); event 1333 include/linux/perf_event.h extern void perf_event_enable(struct perf_event *event); event 1334 include/linux/perf_event.h extern void perf_event_disable(struct perf_event *event); event 1335 include/linux/perf_event.h extern void perf_event_disable_local(struct perf_event *event); event 1336 include/linux/perf_event.h extern void perf_event_disable_inatomic(struct perf_event *event); event 1338 include/linux/perf_event.h extern int perf_event_account_interrupt(struct perf_event *event); event 1342 include/linux/perf_event.h struct perf_event *event) { return NULL; } event 1368 include/linux/perf_event.h static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event) event 1372 include/linux/perf_event.h static inline int perf_event_read_local(struct perf_event *event, u64 *value, event 1380 include/linux/perf_event.h static inline int perf_event_refresh(struct perf_event *event, int refresh) event 1390 include/linux/perf_event.h perf_bp_event(struct perf_event *event, void *data) { } event 1412 include/linux/perf_event.h static inline u64 perf_swevent_set_period(struct perf_event *event) { return 0; } event 1413 include/linux/perf_event.h static inline void perf_event_enable(struct perf_event *event) { } event 1414 include/linux/perf_event.h static inline void perf_event_disable(struct perf_event *event) { } event 1417 include/linux/perf_event.h static inline int perf_event_release_kernel(struct perf_event *event) { return 0; } event 51 include/linux/pm.h int event; event 450 include/linux/pm.h #define PMSG_INVALID ((struct pm_message){ .event = PM_EVENT_INVALID, }) event 451 include/linux/pm.h #define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, }) event 452 include/linux/pm.h #define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, }) event 453 include/linux/pm.h #define PMSG_QUIESCE ((struct pm_message){ .event = PM_EVENT_QUIESCE, }) event 454 include/linux/pm.h #define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, }) event 455 include/linux/pm.h #define PMSG_HIBERNATE ((struct pm_message){ .event = PM_EVENT_HIBERNATE, }) event 456 include/linux/pm.h #define PMSG_RESUME ((struct pm_message){ .event = PM_EVENT_RESUME, }) event 457 include/linux/pm.h #define PMSG_THAW ((struct pm_message){ .event = PM_EVENT_THAW, }) event 458 include/linux/pm.h #define PMSG_RESTORE ((struct pm_message){ .event = PM_EVENT_RESTORE, }) event 459 include/linux/pm.h #define PMSG_RECOVER ((struct pm_message){ .event = PM_EVENT_RECOVER, }) event 461 include/linux/pm.h { .event = PM_EVENT_USER_SUSPEND, }) event 463 include/linux/pm.h { .event = PM_EVENT_USER_RESUME, }) event 465 include/linux/pm.h { .event = PM_EVENT_REMOTE_RESUME, }) event 467 include/linux/pm.h { .event = PM_EVENT_AUTO_SUSPEND, }) event 469 include/linux/pm.h { .event = PM_EVENT_AUTO_RESUME, }) event 471 include/linux/pm.h #define PMSG_IS_AUTO(msg) (((msg).event & PM_EVENT_AUTO) != 0) event 29 include/linux/pps_kernel.h int event, void *data); /* PPS echo function */ event 89 include/linux/pps_kernel.h struct pps_event_time *ts, int event, void *data); event 113 include/linux/psi_types.h int event; event 205 include/linux/ptp_clock_kernel.h struct ptp_clock_event *event); event 253 include/linux/ptp_clock_kernel.h struct ptp_clock_event *event) event 37 include/linux/ptrace.h #define PT_EVENT_FLAG(event) (1 << (PT_OPT_FLAG_SHIFT + (event))) event 140 include/linux/ptrace.h static inline bool ptrace_event_enabled(struct task_struct *task, int event) event 142 include/linux/ptrace.h return task->ptrace & PT_EVENT_FLAG(event); event 155 include/linux/ptrace.h static inline void ptrace_event(int event, unsigned long message) event 157 include/linux/ptrace.h if (unlikely(ptrace_event_enabled(current, event))) { event 159 include/linux/ptrace.h ptrace_notify((event << 8) | SIGTRAP); event 160 include/linux/ptrace.h } else if (event == PTRACE_EVENT_EXEC) { event 178 include/linux/ptrace.h static inline void ptrace_event_pid(int event, struct pid *pid) event 195 include/linux/ptrace.h ptrace_event(event, message); event 501 include/linux/qed/qed_rdma_if.h enum qed_iwarp_event_type event; event 508 include/linux/qed/qed_rdma_if.h struct qed_iwarp_cm_event_params *event); event 55 include/linux/qed/qede_rdma.h enum qede_rdma_event event; event 498 include/linux/regulator/driver.h unsigned long event, void *data); event 62 include/linux/ring_buffer.h unsigned ring_buffer_event_length(struct ring_buffer_event *event); event 63 include/linux/ring_buffer.h void *ring_buffer_event_data(struct ring_buffer_event *event); event 64 include/linux/ring_buffer.h u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event); event 81 include/linux/ring_buffer.h struct ring_buffer_event *event); event 116 include/linux/ring_buffer.h struct ring_buffer_event *event); event 25 include/linux/rtnetlink.h unsigned change, u32 event, event 243 include/linux/security.h int call_blocking_lsm_notifier(enum lsm_event event, void *data); event 450 include/linux/security.h static inline int call_blocking_lsm_notifier(enum lsm_event event, void *data) event 91 include/linux/soc/mediatek/mtk-cmdq.h int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event); event 100 include/linux/soc/mediatek/mtk-cmdq.h int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event); event 143 include/linux/syscalls.h .event.funcs = &enter_syscall_print_funcs, \ event 159 include/linux/syscalls.h .event.funcs = &exit_syscall_print_funcs, \ event 361 include/linux/syscalls.h struct epoll_event __user *event); event 106 include/linux/sysctl.h atomic_t event; event 112 include/linux/sysctl.h return (void *)(unsigned long)atomic_read(&poll->event); event 116 include/linux/sysctl.h .event = ATOMIC_INIT(0), \ event 334 include/linux/thermal.h enum events event; event 499 include/linux/thermal.h enum thermal_notify_event event) event 549 include/linux/thermal.h enum events event); event 552 include/linux/thermal.h enum events event) event 100 include/linux/tpm_eventlog.h u8 event[0]; event 105 include/linux/tpm_eventlog.h u8 event[0]; event 158 include/linux/tpm_eventlog.h static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event, event 175 include/linux/tpm_eventlog.h marker = event; event 177 include/linux/tpm_eventlog.h marker = marker + sizeof(event->pcr_idx) + sizeof(event->event_type) event 178 include/linux/tpm_eventlog.h + sizeof(event->count); event 193 include/linux/tpm_eventlog.h event = (struct tcg_pcr_event2_head *)mapping; event 198 include/linux/tpm_eventlog.h count = READ_ONCE(event->count); event 199 include/linux/tpm_eventlog.h event_type = READ_ONCE(event->event_type); event 201 include/linux/tpm_eventlog.h efispecid = (struct tcg_efi_specid_event_head *)event_header->event; event 210 include/linux/tpm_eventlog.h halg_size = sizeof(event->digests[i].alg_id); event 52 include/linux/trace_events.h struct trace_event *event); event 444 include/linux/trace_events.h struct ring_buffer_event *event); event 478 include/linux/trace_events.h int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog); event 479 include/linux/trace_events.h void perf_event_detach_bpf_prog(struct perf_event *event); event 480 include/linux/trace_events.h int perf_event_query_prog_array(struct perf_event *event, void __user *info); event 485 include/linux/trace_events.h int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, event 495 include/linux/trace_events.h perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog) event 500 include/linux/trace_events.h static inline void perf_event_detach_bpf_prog(struct perf_event *event) { } event 503 include/linux/trace_events.h perf_event_query_prog_array(struct perf_event *event, void __user *info) event 522 include/linux/trace_events.h static inline int bpf_get_perf_event_info(const struct perf_event *event, event 552 include/linux/trace_events.h int trace_set_clr_event(const char *system, const char *event, int set); event 579 include/linux/trace_events.h extern int perf_trace_init(struct perf_event *event); event 580 include/linux/trace_events.h extern void perf_trace_destroy(struct perf_event *event); event 581 include/linux/trace_events.h extern int perf_trace_add(struct perf_event *event, int flags); event 582 include/linux/trace_events.h extern void perf_trace_del(struct perf_event *event, int flags); event 584 include/linux/trace_events.h extern int perf_kprobe_init(struct perf_event *event, bool is_retprobe); event 585 include/linux/trace_events.h extern void perf_kprobe_destroy(struct perf_event *event); event 586 include/linux/trace_events.h extern int bpf_get_kprobe_info(const struct perf_event *event, event 592 include/linux/trace_events.h extern int perf_uprobe_init(struct perf_event *event, event 594 include/linux/trace_events.h extern void perf_uprobe_destroy(struct perf_event *event); event 595 include/linux/trace_events.h extern int bpf_get_uprobe_info(const struct perf_event *event, event 599 include/linux/trace_events.h extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, event 601 include/linux/trace_events.h extern void ftrace_profile_free_filter(struct perf_event *event); event 407 include/linux/tracepoint.h #define TRACE_EVENT_FLAGS(event, flag) event 409 include/linux/tracepoint.h #define TRACE_EVENT_PERF_PERM(event, expr...) event 545 include/linux/tracepoint.h #define TRACE_EVENT_FLAGS(event, flag) event 547 include/linux/tracepoint.h #define TRACE_EVENT_PERF_PERM(event, expr...) event 73 include/linux/uio_driver.h atomic_t event; event 70 include/linux/usb/chipidea.h int (*notify_event) (struct ci_hdrc *ci, unsigned event); event 227 include/linux/usb/phy.h extern void usb_phy_set_event(struct usb_phy *x, unsigned long event); event 266 include/linux/usb/phy.h static inline void usb_phy_set_event(struct usb_phy *x, unsigned long event) event 97 include/linux/usb/renesas_usbhs.h int (*notifier)(struct notifier_block *nb, unsigned long event, event 46 include/linux/vmstat.h unsigned long event[NR_VM_EVENT_ITEMS]; event 57 include/linux/vmstat.h raw_cpu_inc(vm_event_states.event[item]); event 62 include/linux/vmstat.h this_cpu_inc(vm_event_states.event[item]); event 67 include/linux/vmstat.h raw_cpu_add(vm_event_states.event[item], delta); event 72 include/linux/vmstat.h this_cpu_add(vm_event_states.event[item], delta); event 41 include/linux/vmw_vmci_api.h int vmci_event_subscribe(u32 event, event 616 include/linux/vmw_vmci_defs.h u32 event; /* 4 bytes. */ event 130 include/linux/vt_kern.h void vt_event_post(unsigned int event, unsigned int old, unsigned int new); event 34 include/media/v4l2-event.h struct v4l2_event event; event 91 include/media/v4l2-event.h int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event, event 250 include/misc/cxl.h struct cxl_event_afu_driver_reserved *event, event 297 include/net/addrconf.h void inet6_netconf_notify_devconf(struct net *net, int event, int type, event 2240 include/net/bluetooth/hci.h __u16 event; event 2246 include/net/bluetooth/hci.h __u16 event; event 1454 include/net/bluetooth/hci_core.h const void *param, u8 event, u32 timeout); event 1473 include/net/bluetooth/hci_core.h void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event, event 1477 include/net/bluetooth/hci_core.h void hci_sock_dev_event(struct hci_dev *hdev, int event); event 6895 include/net/cfg80211.h enum nl80211_radar_event event, gfp_t gfp); event 39 include/net/dcbnl.h int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd, event 41 include/net/dcbnl.h int dcbnl_cee_notify(struct net_device *dev, int event, int cmd, event 29 include/net/garp.h u8 event; event 466 include/net/ip6_fib.h void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info, event 205 include/net/ip6_route.h void rt6_disable_ip(struct net_device *dev, unsigned long event); event 206 include/net/ip6_route.h void rt6_sync_down_dev(struct net_device *dev, unsigned long event); event 437 include/net/ip_fib.h int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force); event 548 include/net/iw_handler.h char *iwe_stream_add_value(struct iw_request_info *info, char *event, event 3939 include/net/mac80211.h const struct ieee80211_event *event); event 502 include/net/ndisc.h void inet6_ifinfo_notify(int event, struct inet6_dev *idev); event 105 include/net/netfilter/nf_conntrack_ecache.h nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct) event 118 include/net/netfilter/nf_conntrack_ecache.h set_bit(event, &e->cache); event 123 include/net/netfilter/nf_conntrack_ecache.h nf_conntrack_event_report(enum ip_conntrack_events event, struct nf_conn *ct, event 132 include/net/netfilter/nf_conntrack_ecache.h return nf_conntrack_eventmask_report(1 << event, ct, portid, report); event 139 include/net/netfilter/nf_conntrack_ecache.h nf_conntrack_event(enum ip_conntrack_events event, struct nf_conn *ct) event 147 include/net/netfilter/nf_conntrack_ecache.h return nf_conntrack_eventmask_report(1 << event, ct, 0, 0); event 170 include/net/netfilter/nf_conntrack_ecache.h void nf_ct_expect_event_report(enum ip_conntrack_expect_events event, event 1095 include/net/netfilter/nf_tables.h int event, int family, int report, gfp_t gfp); event 42 include/net/nfc/hci.h int (*event_received)(struct nfc_hci_dev *hdev, u8 pipe, u8 event, event 252 include/net/nfc/hci.h void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event, event 270 include/net/nfc/hci.h int nfc_hci_send_event(struct nfc_hci_dev *hdev, u8 gate, u8 event, event 80 include/net/nfc/nci_core.h void (*hci_event_received)(struct nci_dev *ndev, u8 pipe, u8 event, event 301 include/net/nfc/nci_core.h int nci_hci_send_event(struct nci_dev *ndev, u8 gate, u8 event, event 36 include/net/phonet/pn_dev.h void phonet_address_notify(int event, struct net_device *dev, u8 addr); event 40 include/net/phonet/pn_dev.h void rtm_phonet_notify(int event, struct net_device *dev, u8 dst); event 400 include/net/sctp/sctp.h struct sctp_ulpevent *event = sctp_skb2event(skb); event 405 include/net/sctp/sctp.h atomic_add(event->rmem_len, &sk->sk_rmem_alloc); event 409 include/net/sctp/sctp.h sk_mem_charge(sk, event->rmem_len); event 31 include/net/sctp/stream_interleave.h struct sctp_ulpevent *event); event 143 include/net/sctp/ulpevent.h void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event, event 145 include/net/sctp/ulpevent.h void sctp_ulpevent_read_rcvinfo(const struct sctp_ulpevent *event, event 147 include/net/sctp/ulpevent.h void sctp_ulpevent_read_nxtinfo(const struct sctp_ulpevent *event, event 150 include/net/sctp/ulpevent.h __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event); event 174 include/net/sctp/ulpevent.h static inline bool sctp_ulpevent_is_enabled(const struct sctp_ulpevent *event, event 179 include/net/sctp/ulpevent.h if (!sctp_ulpevent_is_notification(event)) event 182 include/net/sctp/ulpevent.h sn_type = sctp_ulpevent_get_notification_type(event); event 85 include/net/smc.h void smcd_handle_event(struct smcd_dev *dev, struct smcd_event *event); event 1125 include/net/tcp.h static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event) event 1130 include/net/tcp.h icsk->icsk_ca_ops->cwnd_event(sk, event); event 292 include/net/xfrm.h u32 event; event 304 include/net/xfrm.h void (*notify)(struct xfrm_state *x, int event); event 267 include/rdma/ib_cm.h enum ib_cm_event_type event; event 314 include/rdma/ib_cm.h const struct ib_cm_event *event); event 489 include/rdma/ib_cm.h int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event); event 716 include/rdma/ib_verbs.h const char *__attribute_const__ ib_event_msg(enum ib_event_type event); event 727 include/rdma/ib_verbs.h enum ib_event_type event; event 2903 include/rdma/ib_verbs.h void ib_dispatch_event(const struct ib_event *event); event 50 include/rdma/iw_cm.h enum iw_cm_event_type event; event 69 include/rdma/iw_cm.h struct iw_cm_event *event); event 80 include/rdma/iw_cm.h struct iw_cm_event *event); event 66 include/rdma/rdma_cm.h const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event); event 108 include/rdma/rdma_cm.h enum rdma_cm_event_type event; event 126 include/rdma/rdma_cm.h struct rdma_cm_event *event); event 310 include/rdma/rdma_cm.h int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event); event 206 include/scsi/libfc.h enum fc_rport_event event; event 284 include/scsi/libsas.h int event; event 296 include/scsi/libsas.h struct asd_sas_phy *phy, int event) event 300 include/scsi/libsas.h ev->event = event; event 417 include/scsi/scsi_transport_iscsi.h enum iscsi_uevent_e event); event 222 include/sound/hdaudio.h void (*unsol_event)(struct hdac_device *dev, unsigned int event); event 69 include/sound/rawmidi.h void (*event)(struct snd_rawmidi_substream *substream); event 71 include/sound/seq_kernel.h int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char *buf, event 73 include/sound/seq_kernel.h int snd_seq_dump_var_event(const struct snd_seq_event *event, event 27 include/sound/seq_virmidi.h struct snd_seq_event event; event 73 include/sound/soc-component.h int (*stream_event)(struct snd_soc_component *component, int event); event 287 include/sound/soc-component.h int event); event 60 include/sound/soc-dapm.h .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \ event 64 include/sound/soc-dapm.h .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \ event 68 include/sound/soc-dapm.h .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \ event 72 include/sound/soc-dapm.h .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \ event 141 include/sound/soc-dapm.h .event = wevent, .event_flags = wflags} event 147 include/sound/soc-dapm.h .event = wevent, .event_flags = wflags} event 153 include/sound/soc-dapm.h .event = wevent, .event_flags = wflags} event 159 include/sound/soc-dapm.h .num_kcontrols = wncontrols, .event = wevent, .event_flags = wflags} event 165 include/sound/soc-dapm.h .event = wevent, .event_flags = wflags} event 171 include/sound/soc-dapm.h .event = wevent, .event_flags = wflags} event 178 include/sound/soc-dapm.h .event = wevent, .event_flags = wflags, \ event 184 include/sound/soc-dapm.h .event = wevent, .event_flags = wflags, .subseq = wsubseq} event 192 include/sound/soc-dapm.h .event = wevent, .event_flags = wflags} event 198 include/sound/soc-dapm.h .event = wevent, .event_flags = wflags} event 204 include/sound/soc-dapm.h .event = wevent, .event_flags = wflags} event 209 include/sound/soc-dapm.h .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \ event 213 include/sound/soc-dapm.h .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \ event 224 include/sound/soc-dapm.h .event = wevent, .event_flags = wflags } event 232 include/sound/soc-dapm.h .event = wevent, .event_flags = wflags } event 240 include/sound/soc-dapm.h .event = wevent, .event_flags = wflags} event 249 include/sound/soc-dapm.h .event = wevent, .event_flags = wflags} event 252 include/sound/soc-dapm.h .reg = SND_SOC_NOPM, .event = dapm_clock_event, \ event 263 include/sound/soc-dapm.h .event = wevent, .event_flags = wflags} event 266 include/sound/soc-dapm.h .reg = SND_SOC_NOPM, .shift = wdelay, .event = dapm_regulator_event, \ event 273 include/sound/soc-dapm.h .reg = SND_SOC_NOPM, .event = dapm_pinctrl_event, \ event 380 include/sound/soc-dapm.h struct snd_kcontrol *kcontrol, int event); event 382 include/sound/soc-dapm.h struct snd_kcontrol *kcontrol, int event); event 384 include/sound/soc-dapm.h struct snd_kcontrol *kcontrol, int event); event 436 include/sound/soc-dapm.h int event); event 627 include/sound/soc-dapm.h int (*event)(struct snd_soc_dapm_widget*, struct snd_kcontrol *, int); event 168 include/sound/soc-dpcm.h int event); event 104 include/sound/soc-topology.h struct snd_kcontrol *k, int event); event 91 include/sound/timer.h int event, event 116 include/sound/timer.h void snd_timer_notify(struct snd_timer *timer, int event, struct timespec *tstamp); event 1101 include/trace/events/afs.h enum afs_flock_event event, int error), event 1103 include/trace/events/afs.h TP_ARGS(vnode, fl, event, error), event 1107 include/trace/events/afs.h __field(enum afs_flock_event, event ) event 1115 include/trace/events/afs.h __entry->event = event; event 1124 include/trace/events/afs.h __print_symbolic(__entry->event, afs_flock_events), event 134 include/trace/events/power.h #define pm_verb_symbolic(event) \ event 135 include/trace/events/power.h __print_symbolic(event, \ event 178 include/trace/events/power.h TP_PROTO(struct device *dev, const char *pm_ops, int event), event 180 include/trace/events/power.h TP_ARGS(dev, pm_ops, event), event 187 include/trace/events/power.h __field(int, event) event 196 include/trace/events/power.h __entry->event = event; event 201 include/trace/events/power.h pm_verb_symbolic(__entry->event)) event 308 include/trace/events/rpcrdma.h struct rdma_cm_event *event event 311 include/trace/events/rpcrdma.h TP_ARGS(r_xprt, event), event 315 include/trace/events/rpcrdma.h __field(unsigned int, event) event 323 include/trace/events/rpcrdma.h __entry->event = event->event; event 324 include/trace/events/rpcrdma.h __entry->status = event->status; event 331 include/trace/events/rpcrdma.h __entry->r_xprt, rdma_show_cm_event(__entry->event), event 332 include/trace/events/rpcrdma.h __entry->event, __entry->status event 412 include/trace/events/rpcrdma.h const struct ib_event *event event 415 include/trace/events/rpcrdma.h TP_ARGS(r_xprt, event), event 419 include/trace/events/rpcrdma.h __field(unsigned int, event) event 420 include/trace/events/rpcrdma.h __string(name, event->device->name) event 427 include/trace/events/rpcrdma.h __entry->event = event->event; event 428 include/trace/events/rpcrdma.h __assign_str(name, event->device->name); event 435 include/trace/events/rpcrdma.h __get_str(name), rdma_show_ib_event(__entry->event), event 436 include/trace/events/rpcrdma.h __entry->event event 1751 include/trace/events/rpcrdma.h const struct rdma_cm_event *event, event 1755 include/trace/events/rpcrdma.h TP_ARGS(event, sap), event 1758 include/trace/events/rpcrdma.h __field(unsigned int, event) event 1764 include/trace/events/rpcrdma.h __entry->event = event->event; event 1765 include/trace/events/rpcrdma.h __entry->status = event->status; event 1772 include/trace/events/rpcrdma.h rdma_show_cm_event(__entry->event), event 1773 include/trace/events/rpcrdma.h __entry->event, __entry->status event 1779 include/trace/events/rpcrdma.h const struct ib_event *event, event 1783 include/trace/events/rpcrdma.h TP_ARGS(event, sap), event 1786 include/trace/events/rpcrdma.h __field(unsigned int, event) event 1787 include/trace/events/rpcrdma.h __string(device, event->device->name) event 1792 include/trace/events/rpcrdma.h __entry->event = event->event; event 1793 include/trace/events/rpcrdma.h __assign_str(device, event->device->name); event 1800 include/trace/events/rpcrdma.h rdma_show_ib_event(__entry->event), __entry->event event 216 include/trace/trace_events.h #define TRACE_EVENT_FLAGS(event, flag) event 219 include/trace/trace_events.h #define TRACE_EVENT_PERF_PERM(event, expr...) event 372 include/trace/trace_events.h struct trace_event *event) \ event 380 include/trace/trace_events.h if (entry->type != event_##call.event.type) { \ event 780 include/trace/trace_events.h .event.funcs = &trace_event_type_funcs_##template, \ event 797 include/trace/trace_events.h .event.funcs = &trace_event_type_funcs_##call, \ event 426 include/uapi/linux/cec.h __u32 event; event 115 include/uapi/linux/iommu.h struct iommu_fault_unrecoverable event; event 1128 include/uapi/linux/soundcard.h #define _CHN_VOICE(dev, event, chn, note, parm) \ event 1132 include/uapi/linux/soundcard.h _seqbuf[_seqbufptr+2] = (event);\ event 1153 include/uapi/linux/soundcard.h #define _CHN_COMMON(dev, event, chn, p1, p2, w14) \ event 1157 include/uapi/linux/soundcard.h _seqbuf[_seqbufptr+2] = (event);\ event 129 include/uapi/linux/tipc.h __u32 event; /* event type */ event 74 include/uapi/linux/userfaultfd.h __u8 event; event 63 include/uapi/linux/virtio_console.h __virtio16 event; /* The kind of control event (see below) */ event 100 include/uapi/linux/virtio_scsi.h __virtio32 event; event 67 include/uapi/linux/vt.h unsigned int event; event 246 include/uapi/rdma/rdma_user_cm.h __u32 event; event 279 include/uapi/rdma/rdma_user_cm.h __u32 event; event 228 include/uapi/sound/asequencer.h int event; /* processed event type */ event 268 include/uapi/sound/asequencer.h struct snd_seq_event *event; /* quoted event */ event 306 include/uapi/sound/asequencer.h struct snd_seq_event event; event 814 include/uapi/sound/asound.h int event; event 649 kernel/bpf/arraymap.c ee->event = perf_file->private_data; event 675 kernel/bpf/arraymap.c struct perf_event *event; event 684 kernel/bpf/arraymap.c event = perf_file->private_data; event 685 kernel/bpf/arraymap.c if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP) event 768 kernel/bpf/devmap.c ulong event, void *ptr) event 774 kernel/bpf/devmap.c switch (event) { event 2769 kernel/bpf/syscall.c const struct perf_event *event; event 2817 kernel/bpf/syscall.c event = perf_get_event(file); event 2818 kernel/bpf/syscall.c if (!IS_ERR(event)) { event 2823 kernel/bpf/syscall.c err = bpf_get_perf_event_info(event, &prog_id, &fd_type, event 267 kernel/compat.c int get_compat_sigevent(struct sigevent *event, event 270 kernel/compat.c memset(event, 0, sizeof(*event)); event 272 kernel/compat.c __get_user(event->sigev_value.sival_int, event 274 kernel/compat.c __get_user(event->sigev_signo, &u_event->sigev_signo) || event 275 kernel/compat.c __get_user(event->sigev_notify, &u_event->sigev_notify) || event 276 kernel/compat.c __get_user(event->sigev_notify_thread_id, event 18 kernel/cpu_pm.c static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls) event 28 kernel/cpu_pm.c ret = __atomic_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL, event 174 kernel/events/core.c static bool is_kernel_event(struct perf_event *event) event 176 kernel/events/core.c return READ_ONCE(event->owner) == TASK_TOMBSTONE; event 202 kernel/events/core.c struct perf_event *event; event 210 kernel/events/core.c struct perf_event *event = efs->event; event 211 kernel/events/core.c struct perf_event_context *ctx = event->ctx; event 246 kernel/events/core.c efs->func(event, cpuctx, ctx, efs->data); event 253 kernel/events/core.c static void event_function_call(struct perf_event *event, event_f func, void *data) event 255 kernel/events/core.c struct perf_event_context *ctx = event->ctx; event 258 kernel/events/core.c .event = event, event 263 kernel/events/core.c if (!event->parent) { event 273 kernel/events/core.c cpu_function_call(event->cpu, event_function, &efs); event 298 kernel/events/core.c func(event, NULL, ctx, data); event 306 kernel/events/core.c static void event_function_local(struct perf_event *event, event_f func, void *data) event 308 kernel/events/core.c struct perf_event_context *ctx = event->ctx; event 345 kernel/events/core.c func(event, cpuctx, ctx, data); event 572 kernel/events/core.c static u64 perf_event_time(struct perf_event *event); event 586 kernel/events/core.c static inline u64 perf_event_clock(struct perf_event *event) event 588 kernel/events/core.c return event->clock(); event 614 kernel/events/core.c __perf_effective_state(struct perf_event *event) event 616 kernel/events/core.c struct perf_event *leader = event->group_leader; event 621 kernel/events/core.c return event->state; event 625 kernel/events/core.c __perf_update_times(struct perf_event *event, u64 now, u64 *enabled, u64 *running) event 627 kernel/events/core.c enum perf_event_state state = __perf_effective_state(event); event 628 kernel/events/core.c u64 delta = now - event->tstamp; event 630 kernel/events/core.c *enabled = event->total_time_enabled; event 634 kernel/events/core.c *running = event->total_time_running; event 639 kernel/events/core.c static void perf_event_update_time(struct perf_event *event) event 641 kernel/events/core.c u64 now = perf_event_time(event); event 643 kernel/events/core.c __perf_update_times(event, now, &event->total_time_enabled, event 644 kernel/events/core.c &event->total_time_running); event 645 kernel/events/core.c event->tstamp = now; event 657 kernel/events/core.c perf_event_set_state(struct perf_event *event, enum perf_event_state state) event 659 kernel/events/core.c if (event->state == state) event 662 kernel/events/core.c perf_event_update_time(event); event 667 kernel/events/core.c if ((event->state < 0) ^ (state < 0)) event 668 kernel/events/core.c perf_event_update_sibling_time(event); event 670 kernel/events/core.c WRITE_ONCE(event->state, state); event 676 kernel/events/core.c perf_cgroup_match(struct perf_event *event) event 678 kernel/events/core.c struct perf_event_context *ctx = event->ctx; event 682 kernel/events/core.c if (!event->cgrp) event 696 kernel/events/core.c event->cgrp->css.cgroup); event 699 kernel/events/core.c static inline void perf_detach_cgroup(struct perf_event *event) event 701 kernel/events/core.c css_put(&event->cgrp->css); event 702 kernel/events/core.c event->cgrp = NULL; event 705 kernel/events/core.c static inline int is_cgroup_event(struct perf_event *event) event 707 kernel/events/core.c return event->cgrp != NULL; event 710 kernel/events/core.c static inline u64 perf_cgroup_event_time(struct perf_event *event) event 714 kernel/events/core.c t = per_cpu_ptr(event->cgrp->info, event->cpu); event 744 kernel/events/core.c static inline void update_cgrp_time_from_event(struct perf_event *event) event 752 kernel/events/core.c if (!is_cgroup_event(event)) event 755 kernel/events/core.c cgrp = perf_cgroup_from_task(current, event->ctx); event 759 kernel/events/core.c if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) event 760 kernel/events/core.c __update_cgrp_time(event->cgrp); event 899 kernel/events/core.c static inline int perf_cgroup_connect(int fd, struct perf_event *event, event 919 kernel/events/core.c event->cgrp = cgrp; event 927 kernel/events/core.c perf_detach_cgroup(event); event 936 kernel/events/core.c perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) event 939 kernel/events/core.c t = per_cpu_ptr(event->cgrp->info, event->cpu); event 940 kernel/events/core.c event->shadow_ctx_time = now - t->timestamp; event 948 kernel/events/core.c list_update_cgroup_event(struct perf_event *event, event 954 kernel/events/core.c if (!is_cgroup_event(event)) event 972 kernel/events/core.c if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) event 995 kernel/events/core.c perf_cgroup_match(struct perf_event *event) event 1000 kernel/events/core.c static inline void perf_detach_cgroup(struct perf_event *event) event 1003 kernel/events/core.c static inline int is_cgroup_event(struct perf_event *event) event 1008 kernel/events/core.c static inline void update_cgrp_time_from_event(struct perf_event *event) event 1026 kernel/events/core.c static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, event 1045 kernel/events/core.c perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) event 1049 kernel/events/core.c static inline u64 perf_cgroup_event_time(struct perf_event *event) event 1055 kernel/events/core.c list_update_cgroup_event(struct perf_event *event, event 1270 kernel/events/core.c perf_event_ctx_lock_nested(struct perf_event *event, int nesting) event 1276 kernel/events/core.c ctx = READ_ONCE(event->ctx); event 1284 kernel/events/core.c if (event->ctx != ctx) { event 1294 kernel/events/core.c perf_event_ctx_lock(struct perf_event *event) event 1296 kernel/events/core.c return perf_event_ctx_lock_nested(event, 0); event 1299 kernel/events/core.c static void perf_event_ctx_unlock(struct perf_event *event, event 1325 kernel/events/core.c static u32 perf_event_pid_type(struct perf_event *event, struct task_struct *p, event 1332 kernel/events/core.c if (event->parent) event 1333 kernel/events/core.c event = event->parent; event 1335 kernel/events/core.c nr = __task_pid_nr_ns(p, type, event->ns); event 1342 kernel/events/core.c static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) event 1344 kernel/events/core.c return perf_event_pid_type(event, p, PIDTYPE_TGID); event 1347 kernel/events/core.c static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) event 1349 kernel/events/core.c return perf_event_pid_type(event, p, PIDTYPE_PID); event 1356 kernel/events/core.c static u64 primary_event_id(struct perf_event *event) event 1358 kernel/events/core.c u64 id = event->id; event 1360 kernel/events/core.c if (event->parent) event 1361 kernel/events/core.c id = event->parent->id; event 1462 kernel/events/core.c static u64 perf_event_time(struct perf_event *event) event 1464 kernel/events/core.c struct perf_event_context *ctx = event->ctx; event 1466 kernel/events/core.c if (is_cgroup_event(event)) event 1467 kernel/events/core.c return perf_cgroup_event_time(event); event 1472 kernel/events/core.c static enum event_type_t get_event_type(struct perf_event *event) event 1474 kernel/events/core.c struct perf_event_context *ctx = event->ctx; event 1483 kernel/events/core.c if (event->group_leader != event) event 1484 kernel/events/core.c event = event->group_leader; event 1486 kernel/events/core.c event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE; event 1496 kernel/events/core.c static void init_event_group(struct perf_event *event) event 1498 kernel/events/core.c RB_CLEAR_NODE(&event->group_node); event 1499 kernel/events/core.c event->group_index = 0; event 1507 kernel/events/core.c get_event_groups(struct perf_event *event, struct perf_event_context *ctx) event 1509 kernel/events/core.c if (event->attr.pinned) event 1553 kernel/events/core.c struct perf_event *event) event 1559 kernel/events/core.c event->group_index = ++groups->index; event 1568 kernel/events/core.c if (perf_event_groups_less(event, node_event)) event 1574 kernel/events/core.c rb_link_node(&event->group_node, parent, node); event 1575 kernel/events/core.c rb_insert_color(&event->group_node, &groups->tree); event 1582 kernel/events/core.c add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx) event 1586 kernel/events/core.c groups = get_event_groups(event, ctx); event 1587 kernel/events/core.c perf_event_groups_insert(groups, event); event 1595 kernel/events/core.c struct perf_event *event) event 1597 kernel/events/core.c WARN_ON_ONCE(RB_EMPTY_NODE(&event->group_node) || event 1600 kernel/events/core.c rb_erase(&event->group_node, &groups->tree); event 1601 kernel/events/core.c init_event_group(event); event 1608 kernel/events/core.c del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx) event 1612 kernel/events/core.c groups = get_event_groups(event, ctx); event 1613 kernel/events/core.c perf_event_groups_delete(groups, event); event 1645 kernel/events/core.c perf_event_groups_next(struct perf_event *event) event 1649 kernel/events/core.c next = rb_entry_safe(rb_next(&event->group_node), typeof(*event), group_node); event 1650 kernel/events/core.c if (next && next->cpu == event->cpu) event 1659 kernel/events/core.c #define perf_event_groups_for_each(event, groups) \ event 1660 kernel/events/core.c for (event = rb_entry_safe(rb_first(&((groups)->tree)), \ event 1661 kernel/events/core.c typeof(*event), group_node); event; \ event 1662 kernel/events/core.c event = rb_entry_safe(rb_next(&event->group_node), \ event 1663 kernel/events/core.c typeof(*event), group_node)) event 1670 kernel/events/core.c list_add_event(struct perf_event *event, struct perf_event_context *ctx) event 1674 kernel/events/core.c WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); event 1675 kernel/events/core.c event->attach_state |= PERF_ATTACH_CONTEXT; event 1677 kernel/events/core.c event->tstamp = perf_event_time(event); event 1684 kernel/events/core.c if (event->group_leader == event) { event 1685 kernel/events/core.c event->group_caps = event->event_caps; event 1686 kernel/events/core.c add_event_to_groups(event, ctx); event 1689 kernel/events/core.c list_update_cgroup_event(event, ctx, true); event 1691 kernel/events/core.c list_add_rcu(&event->event_entry, &ctx->event_list); event 1693 kernel/events/core.c if (event->attr.inherit_stat) event 1702 kernel/events/core.c static inline void perf_event__state_init(struct perf_event *event) event 1704 kernel/events/core.c event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : event 1708 kernel/events/core.c static void __perf_event_read_size(struct perf_event *event, int nr_siblings) event 1714 kernel/events/core.c if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) event 1717 kernel/events/core.c if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) event 1720 kernel/events/core.c if (event->attr.read_format & PERF_FORMAT_ID) event 1723 kernel/events/core.c if (event->attr.read_format & PERF_FORMAT_GROUP) { event 1729 kernel/events/core.c event->read_size = size; event 1732 kernel/events/core.c static void __perf_event_header_size(struct perf_event *event, u64 sample_type) event 1750 kernel/events/core.c size += event->read_size; event 1761 kernel/events/core.c event->header_size = size; event 1768 kernel/events/core.c static void perf_event__header_size(struct perf_event *event) event 1770 kernel/events/core.c __perf_event_read_size(event, event 1771 kernel/events/core.c event->group_leader->nr_siblings); event 1772 kernel/events/core.c __perf_event_header_size(event, event->attr.sample_type); event 1775 kernel/events/core.c static void perf_event__id_header_size(struct perf_event *event) event 1778 kernel/events/core.c u64 sample_type = event->attr.sample_type; event 1799 kernel/events/core.c event->id_header_size = size; event 1802 kernel/events/core.c static bool perf_event_validate_size(struct perf_event *event) event 1808 kernel/events/core.c __perf_event_read_size(event, event->group_leader->nr_siblings + 1); event 1809 kernel/events/core.c __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ); event 1810 kernel/events/core.c perf_event__id_header_size(event); event 1816 kernel/events/core.c if (event->read_size + event->header_size + event 1817 kernel/events/core.c event->id_header_size + sizeof(struct perf_event_header) >= 16*1024) event 1823 kernel/events/core.c static void perf_group_attach(struct perf_event *event) event 1825 kernel/events/core.c struct perf_event *group_leader = event->group_leader, *pos; event 1827 kernel/events/core.c lockdep_assert_held(&event->ctx->lock); event 1832 kernel/events/core.c if (event->attach_state & PERF_ATTACH_GROUP) event 1835 kernel/events/core.c event->attach_state |= PERF_ATTACH_GROUP; event 1837 kernel/events/core.c if (group_leader == event) event 1840 kernel/events/core.c WARN_ON_ONCE(group_leader->ctx != event->ctx); event 1842 kernel/events/core.c group_leader->group_caps &= event->event_caps; event 1844 kernel/events/core.c list_add_tail(&event->sibling_list, &group_leader->sibling_list); event 1858 kernel/events/core.c list_del_event(struct perf_event *event, struct perf_event_context *ctx) event 1860 kernel/events/core.c WARN_ON_ONCE(event->ctx != ctx); event 1866 kernel/events/core.c if (!(event->attach_state & PERF_ATTACH_CONTEXT)) event 1869 kernel/events/core.c event->attach_state &= ~PERF_ATTACH_CONTEXT; event 1871 kernel/events/core.c list_update_cgroup_event(event, ctx, false); event 1874 kernel/events/core.c if (event->attr.inherit_stat) event 1877 kernel/events/core.c list_del_rcu(&event->event_entry); event 1879 kernel/events/core.c if (event->group_leader == event) event 1880 kernel/events/core.c del_event_from_groups(event, ctx); event 1889 kernel/events/core.c if (event->state > PERF_EVENT_STATE_OFF) event 1890 kernel/events/core.c perf_event_set_state(event, PERF_EVENT_STATE_OFF); event 1896 kernel/events/core.c perf_aux_output_match(struct perf_event *event, struct perf_event *aux_event) event 1901 kernel/events/core.c if (!event->pmu->aux_output_match) event 1904 kernel/events/core.c return event->pmu->aux_output_match(aux_event); event 1907 kernel/events/core.c static void put_event(struct perf_event *event); event 1908 kernel/events/core.c static void event_sched_out(struct perf_event *event, event 1912 kernel/events/core.c static void perf_put_aux_event(struct perf_event *event) event 1914 kernel/events/core.c struct perf_event_context *ctx = event->ctx; event 1921 kernel/events/core.c if (event->aux_event) { event 1922 kernel/events/core.c iter = event->aux_event; event 1923 kernel/events/core.c event->aux_event = NULL; event 1932 kernel/events/core.c for_each_sibling_event(iter, event->group_leader) { event 1933 kernel/events/core.c if (iter->aux_event != event) event 1937 kernel/events/core.c put_event(event); event 1945 kernel/events/core.c perf_event_set_state(event, PERF_EVENT_STATE_ERROR); event 1949 kernel/events/core.c static int perf_get_aux_event(struct perf_event *event, event 1961 kernel/events/core.c if (!perf_aux_output_match(event, group_leader)) event 1973 kernel/events/core.c event->aux_event = group_leader; event 1978 kernel/events/core.c static void perf_group_detach(struct perf_event *event) event 1981 kernel/events/core.c struct perf_event_context *ctx = event->ctx; event 1988 kernel/events/core.c if (!(event->attach_state & PERF_ATTACH_GROUP)) event 1991 kernel/events/core.c event->attach_state &= ~PERF_ATTACH_GROUP; event 1993 kernel/events/core.c perf_put_aux_event(event); event 1998 kernel/events/core.c if (event->group_leader != event) { event 1999 kernel/events/core.c list_del_init(&event->sibling_list); event 2000 kernel/events/core.c event->group_leader->nr_siblings--; event 2009 kernel/events/core.c list_for_each_entry_safe(sibling, tmp, &event->sibling_list, sibling_list) { event 2015 kernel/events/core.c sibling->group_caps = event->group_caps; event 2017 kernel/events/core.c if (!RB_EMPTY_NODE(&event->group_node)) { event 2018 kernel/events/core.c add_event_to_groups(sibling, event->ctx); event 2028 kernel/events/core.c WARN_ON_ONCE(sibling->ctx != event->ctx); event 2032 kernel/events/core.c perf_event__header_size(event->group_leader); event 2034 kernel/events/core.c for_each_sibling_event(tmp, event->group_leader) event 2038 kernel/events/core.c static bool is_orphaned_event(struct perf_event *event) event 2040 kernel/events/core.c return event->state == PERF_EVENT_STATE_DEAD; event 2043 kernel/events/core.c static inline int __pmu_filter_match(struct perf_event *event) event 2045 kernel/events/core.c struct pmu *pmu = event->pmu; event 2046 kernel/events/core.c return pmu->filter_match ? pmu->filter_match(event) : 1; event 2055 kernel/events/core.c static inline int pmu_filter_match(struct perf_event *event) event 2059 kernel/events/core.c if (!__pmu_filter_match(event)) event 2062 kernel/events/core.c for_each_sibling_event(sibling, event) { event 2071 kernel/events/core.c event_filter_match(struct perf_event *event) event 2073 kernel/events/core.c return (event->cpu == -1 || event->cpu == smp_processor_id()) && event 2074 kernel/events/core.c perf_cgroup_match(event) && pmu_filter_match(event); event 2078 kernel/events/core.c event_sched_out(struct perf_event *event, event 2084 kernel/events/core.c WARN_ON_ONCE(event->ctx != ctx); event 2087 kernel/events/core.c if (event->state != PERF_EVENT_STATE_ACTIVE) event 2095 kernel/events/core.c list_del_init(&event->active_list); event 2097 kernel/events/core.c perf_pmu_disable(event->pmu); event 2099 kernel/events/core.c event->pmu->del(event, 0); event 2100 kernel/events/core.c event->oncpu = -1; event 2102 kernel/events/core.c if (READ_ONCE(event->pending_disable) >= 0) { event 2103 kernel/events/core.c WRITE_ONCE(event->pending_disable, -1); event 2106 kernel/events/core.c perf_event_set_state(event, state); event 2108 kernel/events/core.c if (!is_software_event(event)) event 2112 kernel/events/core.c if (event->attr.freq && event->attr.sample_freq) event 2114 kernel/events/core.c if (event->attr.exclusive || !cpuctx->active_oncpu) event 2117 kernel/events/core.c perf_pmu_enable(event->pmu); event 2125 kernel/events/core.c struct perf_event *event; event 2137 kernel/events/core.c for_each_sibling_event(event, group_event) event 2138 kernel/events/core.c event_sched_out(event, cpuctx, ctx); event 2155 kernel/events/core.c __perf_remove_from_context(struct perf_event *event, event 2167 kernel/events/core.c event_sched_out(event, cpuctx, ctx); event 2169 kernel/events/core.c perf_group_detach(event); event 2170 kernel/events/core.c list_del_event(event, ctx); event 2191 kernel/events/core.c static void perf_remove_from_context(struct perf_event *event, unsigned long flags) event 2193 kernel/events/core.c struct perf_event_context *ctx = event->ctx; event 2197 kernel/events/core.c event_function_call(event, __perf_remove_from_context, (void *)flags); event 2205 kernel/events/core.c WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); event 2207 kernel/events/core.c (event->attach_state & PERF_ATTACH_GROUP)) { event 2213 kernel/events/core.c perf_group_detach(event); event 2221 kernel/events/core.c static void __perf_event_disable(struct perf_event *event, event 2226 kernel/events/core.c if (event->state < PERF_EVENT_STATE_INACTIVE) event 2231 kernel/events/core.c update_cgrp_time_from_event(event); event 2234 kernel/events/core.c if (event == event->group_leader) event 2235 kernel/events/core.c group_sched_out(event, cpuctx, ctx); event 2237 kernel/events/core.c event_sched_out(event, cpuctx, ctx); event 2239 kernel/events/core.c perf_event_set_state(event, PERF_EVENT_STATE_OFF); event 2256 kernel/events/core.c static void _perf_event_disable(struct perf_event *event) event 2258 kernel/events/core.c struct perf_event_context *ctx = event->ctx; event 2261 kernel/events/core.c if (event->state <= PERF_EVENT_STATE_OFF) { event 2267 kernel/events/core.c event_function_call(event, __perf_event_disable, NULL); event 2270 kernel/events/core.c void perf_event_disable_local(struct perf_event *event) event 2272 kernel/events/core.c event_function_local(event, __perf_event_disable, NULL); event 2279 kernel/events/core.c void perf_event_disable(struct perf_event *event) event 2283 kernel/events/core.c ctx = perf_event_ctx_lock(event); event 2284 kernel/events/core.c _perf_event_disable(event); event 2285 kernel/events/core.c perf_event_ctx_unlock(event, ctx); event 2289 kernel/events/core.c void perf_event_disable_inatomic(struct perf_event *event) event 2291 kernel/events/core.c WRITE_ONCE(event->pending_disable, smp_processor_id()); event 2293 kernel/events/core.c irq_work_queue(&event->pending); event 2296 kernel/events/core.c static void perf_set_shadow_time(struct perf_event *event, event 2324 kernel/events/core.c if (is_cgroup_event(event)) event 2325 kernel/events/core.c perf_cgroup_set_shadow_time(event, event->tstamp); event 2327 kernel/events/core.c event->shadow_ctx_time = event->tstamp - ctx->timestamp; event 2332 kernel/events/core.c static void perf_log_throttle(struct perf_event *event, int enable); event 2333 kernel/events/core.c static void perf_log_itrace_start(struct perf_event *event); event 2336 kernel/events/core.c event_sched_in(struct perf_event *event, event 2344 kernel/events/core.c if (event->state <= PERF_EVENT_STATE_OFF) event 2347 kernel/events/core.c WRITE_ONCE(event->oncpu, smp_processor_id()); event 2354 kernel/events/core.c perf_event_set_state(event, PERF_EVENT_STATE_ACTIVE); event 2361 kernel/events/core.c if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { event 2362 kernel/events/core.c perf_log_throttle(event, 1); event 2363 kernel/events/core.c event->hw.interrupts = 0; event 2366 kernel/events/core.c perf_pmu_disable(event->pmu); event 2368 kernel/events/core.c perf_set_shadow_time(event, ctx); event 2370 kernel/events/core.c perf_log_itrace_start(event); event 2372 kernel/events/core.c if (event->pmu->add(event, PERF_EF_START)) { event 2373 kernel/events/core.c perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); event 2374 kernel/events/core.c event->oncpu = -1; event 2379 kernel/events/core.c if (!is_software_event(event)) event 2383 kernel/events/core.c if (event->attr.freq && event->attr.sample_freq) event 2386 kernel/events/core.c if (event->attr.exclusive) event 2390 kernel/events/core.c perf_pmu_enable(event->pmu); event 2400 kernel/events/core.c struct perf_event *event, *partial_group = NULL; event 2417 kernel/events/core.c for_each_sibling_event(event, group_event) { event 2418 kernel/events/core.c if (event_sched_in(event, cpuctx, ctx)) { event 2419 kernel/events/core.c partial_group = event; event 2433 kernel/events/core.c for_each_sibling_event(event, group_event) { event 2434 kernel/events/core.c if (event == partial_group) event 2437 kernel/events/core.c event_sched_out(event, cpuctx, ctx); event 2451 kernel/events/core.c static int group_can_go_on(struct perf_event *event, event 2458 kernel/events/core.c if (event->group_caps & PERF_EV_CAP_SOFTWARE) event 2470 kernel/events/core.c if (event->attr.exclusive && cpuctx->active_oncpu) event 2479 kernel/events/core.c static void add_event_to_ctx(struct perf_event *event, event 2482 kernel/events/core.c list_add_event(event, ctx); event 2483 kernel/events/core.c perf_group_attach(event); event 2589 kernel/events/core.c struct perf_event *event = info; event 2590 kernel/events/core.c struct perf_event_context *ctx = event->ctx; event 2621 kernel/events/core.c if (is_cgroup_event(event)) { event 2628 kernel/events/core.c event->cgrp->css.cgroup); event 2634 kernel/events/core.c add_event_to_ctx(event, ctx); event 2635 kernel/events/core.c ctx_resched(cpuctx, task_ctx, get_event_type(event)); event 2637 kernel/events/core.c add_event_to_ctx(event, ctx); event 2646 kernel/events/core.c static bool exclusive_event_installable(struct perf_event *event, event 2656 kernel/events/core.c struct perf_event *event, event 2663 kernel/events/core.c WARN_ON_ONCE(!exclusive_event_installable(event, ctx)); event 2665 kernel/events/core.c if (event->cpu != -1) event 2666 kernel/events/core.c event->cpu = cpu; event 2672 kernel/events/core.c smp_store_release(&event->ctx, ctx); event 2675 kernel/events/core.c cpu_function_call(cpu, __perf_install_in_context, event); event 2717 kernel/events/core.c if (!task_function_call(task, __perf_install_in_context, event)) event 2739 kernel/events/core.c add_event_to_ctx(event, ctx); event 2746 kernel/events/core.c static void __perf_event_enable(struct perf_event *event, event 2751 kernel/events/core.c struct perf_event *leader = event->group_leader; event 2754 kernel/events/core.c if (event->state >= PERF_EVENT_STATE_INACTIVE || event 2755 kernel/events/core.c event->state <= PERF_EVENT_STATE_ERROR) event 2761 kernel/events/core.c perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); event 2766 kernel/events/core.c if (!event_filter_match(event)) { event 2775 kernel/events/core.c if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) { event 2784 kernel/events/core.c ctx_resched(cpuctx, task_ctx, get_event_type(event)); event 2796 kernel/events/core.c static void _perf_event_enable(struct perf_event *event) event 2798 kernel/events/core.c struct perf_event_context *ctx = event->ctx; event 2801 kernel/events/core.c if (event->state >= PERF_EVENT_STATE_INACTIVE || event 2802 kernel/events/core.c event->state < PERF_EVENT_STATE_ERROR) { event 2814 kernel/events/core.c if (event->state == PERF_EVENT_STATE_ERROR) event 2815 kernel/events/core.c event->state = PERF_EVENT_STATE_OFF; event 2818 kernel/events/core.c event_function_call(event, __perf_event_enable, NULL); event 2824 kernel/events/core.c void perf_event_enable(struct perf_event *event) event 2828 kernel/events/core.c ctx = perf_event_ctx_lock(event); event 2829 kernel/events/core.c _perf_event_enable(event); event 2830 kernel/events/core.c perf_event_ctx_unlock(event, ctx); event 2835 kernel/events/core.c struct perf_event *event; event 2842 kernel/events/core.c struct perf_event *event = sd->event; event 2845 kernel/events/core.c if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) event 2855 kernel/events/core.c if (READ_ONCE(event->oncpu) != smp_processor_id()) event 2858 kernel/events/core.c event->pmu->stop(event, PERF_EF_UPDATE); event 2870 kernel/events/core.c event->pmu->start(event, 0); event 2875 kernel/events/core.c static int perf_event_stop(struct perf_event *event, int restart) event 2878 kernel/events/core.c .event = event, event 2884 kernel/events/core.c if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) event 2895 kernel/events/core.c ret = cpu_function_call(READ_ONCE(event->oncpu), event 2924 kernel/events/core.c void perf_event_addr_filters_sync(struct perf_event *event) event 2926 kernel/events/core.c struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); event 2928 kernel/events/core.c if (!has_addr_filter(event)) event 2932 kernel/events/core.c if (event->addr_filters_gen != event->hw.addr_filters_gen) { event 2933 kernel/events/core.c event->pmu->addr_filters_sync(event); event 2934 kernel/events/core.c event->hw.addr_filters_gen = event->addr_filters_gen; event 2940 kernel/events/core.c static int _perf_event_refresh(struct perf_event *event, int refresh) event 2945 kernel/events/core.c if (event->attr.inherit || !is_sampling_event(event)) event 2948 kernel/events/core.c atomic_add(refresh, &event->event_limit); event 2949 kernel/events/core.c _perf_event_enable(event); event 2957 kernel/events/core.c int perf_event_refresh(struct perf_event *event, int refresh) event 2962 kernel/events/core.c ctx = perf_event_ctx_lock(event); event 2963 kernel/events/core.c ret = _perf_event_refresh(event, refresh); event 2964 kernel/events/core.c perf_event_ctx_unlock(event, ctx); event 2985 kernel/events/core.c static int perf_event_modify_attr(struct perf_event *event, event 2988 kernel/events/core.c if (event->attr.type != attr->type) event 2991 kernel/events/core.c switch (event->attr.type) { event 2993 kernel/events/core.c return perf_event_modify_breakpoint(event, attr); event 3004 kernel/events/core.c struct perf_event *event, *tmp; event 3058 kernel/events/core.c list_for_each_entry_safe(event, tmp, &ctx->pinned_active, active_list) event 3059 kernel/events/core.c group_sched_out(event, cpuctx, ctx); event 3063 kernel/events/core.c list_for_each_entry_safe(event, tmp, &ctx->flexible_active, active_list) event 3064 kernel/events/core.c group_sched_out(event, cpuctx, ctx); event 3107 kernel/events/core.c static void __perf_event_sync_stat(struct perf_event *event, event 3112 kernel/events/core.c if (!event->attr.inherit_stat) event 3122 kernel/events/core.c if (event->state == PERF_EVENT_STATE_ACTIVE) event 3123 kernel/events/core.c event->pmu->read(event); event 3125 kernel/events/core.c perf_event_update_time(event); event 3132 kernel/events/core.c value = local64_xchg(&event->count, value); event 3135 kernel/events/core.c swap(event->total_time_enabled, next_event->total_time_enabled); event 3136 kernel/events/core.c swap(event->total_time_running, next_event->total_time_running); event 3141 kernel/events/core.c perf_event_update_userpage(event); event 3148 kernel/events/core.c struct perf_event *event, *next_event; event 3155 kernel/events/core.c event = list_first_entry(&ctx->event_list, event 3161 kernel/events/core.c while (&event->event_entry != &ctx->event_list && event 3164 kernel/events/core.c __perf_event_sync_stat(event, next_event); event 3166 kernel/events/core.c event = list_next_entry(event, event_entry); event 3387 kernel/events/core.c static int pinned_sched_in(struct perf_event *event, void *data) event 3391 kernel/events/core.c if (event->state <= PERF_EVENT_STATE_OFF) event 3394 kernel/events/core.c if (!event_filter_match(event)) event 3397 kernel/events/core.c if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) { event 3398 kernel/events/core.c if (!group_sched_in(event, sid->cpuctx, sid->ctx)) event 3399 kernel/events/core.c list_add_tail(&event->active_list, &sid->ctx->pinned_active); event 3406 kernel/events/core.c if (event->state == PERF_EVENT_STATE_INACTIVE) event 3407 kernel/events/core.c perf_event_set_state(event, PERF_EVENT_STATE_ERROR); event 3412 kernel/events/core.c static int flexible_sched_in(struct perf_event *event, void *data) event 3416 kernel/events/core.c if (event->state <= PERF_EVENT_STATE_OFF) event 3419 kernel/events/core.c if (!event_filter_match(event)) event 3422 kernel/events/core.c if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) { event 3423 kernel/events/core.c int ret = group_sched_in(event, sid->cpuctx, sid->ctx); event 3429 kernel/events/core.c list_add_tail(&event->active_list, &sid->ctx->flexible_active); event 3594 kernel/events/core.c static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) event 3596 kernel/events/core.c u64 frequency = event->attr.sample_freq; event 3670 kernel/events/core.c static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable) event 3672 kernel/events/core.c struct hw_perf_event *hwc = &event->hw; event 3676 kernel/events/core.c period = perf_calculate_period(event, nsec, count); event 3690 kernel/events/core.c event->pmu->stop(event, PERF_EF_UPDATE); event 3695 kernel/events/core.c event->pmu->start(event, PERF_EF_RELOAD); event 3707 kernel/events/core.c struct perf_event *event; event 3723 kernel/events/core.c list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { event 3724 kernel/events/core.c if (event->state != PERF_EVENT_STATE_ACTIVE) event 3727 kernel/events/core.c if (!event_filter_match(event)) event 3730 kernel/events/core.c perf_pmu_disable(event->pmu); event 3732 kernel/events/core.c hwc = &event->hw; event 3736 kernel/events/core.c perf_log_throttle(event, 1); event 3737 kernel/events/core.c event->pmu->start(event, 0); event 3740 kernel/events/core.c if (!event->attr.freq || !event->attr.sample_freq) event 3746 kernel/events/core.c event->pmu->stop(event, PERF_EF_UPDATE); event 3748 kernel/events/core.c now = local64_read(&event->count); event 3760 kernel/events/core.c perf_adjust_period(event, period, delta, false); event 3762 kernel/events/core.c event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); event 3764 kernel/events/core.c perf_pmu_enable(event->pmu); event 3774 kernel/events/core.c static void rotate_ctx(struct perf_event_context *ctx, struct perf_event *event) event 3783 kernel/events/core.c perf_event_groups_delete(&ctx->flexible_groups, event); event 3784 kernel/events/core.c perf_event_groups_insert(&ctx->flexible_groups, event); event 3791 kernel/events/core.c struct perf_event *event; event 3794 kernel/events/core.c event = list_first_entry_or_null(&ctx->flexible_active, event 3798 kernel/events/core.c if (!event) { event 3799 kernel/events/core.c event = rb_entry_safe(rb_first(&ctx->flexible_groups.tree), event 3800 kernel/events/core.c typeof(*event), group_node); event 3803 kernel/events/core.c return event; event 3870 kernel/events/core.c static int event_enable_on_exec(struct perf_event *event, event 3873 kernel/events/core.c if (!event->attr.enable_on_exec) event 3876 kernel/events/core.c event->attr.enable_on_exec = 0; event 3877 kernel/events/core.c if (event->state >= PERF_EVENT_STATE_INACTIVE) event 3880 kernel/events/core.c perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); event 3894 kernel/events/core.c struct perf_event *event; event 3906 kernel/events/core.c list_for_each_entry(event, &ctx->event_list, event_entry) { event 3907 kernel/events/core.c enabled |= event_enable_on_exec(event, ctx); event 3908 kernel/events/core.c event_type |= get_event_type(event); event 3930 kernel/events/core.c struct perf_event *event; event 3935 kernel/events/core.c static int __perf_event_read_cpu(struct perf_event *event, int event_cpu) event 3939 kernel/events/core.c if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) { event 3958 kernel/events/core.c struct perf_event *sub, *event = data->event; event 3959 kernel/events/core.c struct perf_event_context *ctx = event->ctx; event 3961 kernel/events/core.c struct pmu *pmu = event->pmu; event 3976 kernel/events/core.c update_cgrp_time_from_event(event); event 3979 kernel/events/core.c perf_event_update_time(event); event 3981 kernel/events/core.c perf_event_update_sibling_time(event); event 3983 kernel/events/core.c if (event->state != PERF_EVENT_STATE_ACTIVE) event 3987 kernel/events/core.c pmu->read(event); event 3994 kernel/events/core.c pmu->read(event); event 3996 kernel/events/core.c for_each_sibling_event(sub, event) { event 4012 kernel/events/core.c static inline u64 perf_event_count(struct perf_event *event) event 4014 kernel/events/core.c return local64_read(&event->count) + atomic64_read(&event->child_count); event 4025 kernel/events/core.c int perf_event_read_local(struct perf_event *event, u64 *value, event 4041 kernel/events/core.c if (event->attr.inherit) { event 4047 kernel/events/core.c if ((event->attach_state & PERF_ATTACH_TASK) && event 4048 kernel/events/core.c event->hw.target != current) { event 4054 kernel/events/core.c if (!(event->attach_state & PERF_ATTACH_TASK) && event 4055 kernel/events/core.c event->cpu != smp_processor_id()) { event 4061 kernel/events/core.c if (event->attr.pinned && event->oncpu != smp_processor_id()) { event 4071 kernel/events/core.c if (event->oncpu == smp_processor_id()) event 4072 kernel/events/core.c event->pmu->read(event); event 4074 kernel/events/core.c *value = local64_read(&event->count); event 4076 kernel/events/core.c u64 now = event->shadow_ctx_time + perf_clock(); event 4079 kernel/events/core.c __perf_update_times(event, now, &__enabled, &__running); event 4091 kernel/events/core.c static int perf_event_read(struct perf_event *event, bool group) event 4093 kernel/events/core.c enum perf_event_state state = READ_ONCE(event->state); event 4112 kernel/events/core.c event_cpu = READ_ONCE(event->oncpu); event 4117 kernel/events/core.c .event = event, event 4123 kernel/events/core.c event_cpu = __perf_event_read_cpu(event, event_cpu); event 4140 kernel/events/core.c struct perf_event_context *ctx = event->ctx; event 4144 kernel/events/core.c state = event->state; event 4156 kernel/events/core.c update_cgrp_time_from_event(event); event 4159 kernel/events/core.c perf_event_update_time(event); event 4161 kernel/events/core.c perf_event_update_sibling_time(event); event 4226 kernel/events/core.c struct perf_event *event) event 4233 kernel/events/core.c int cpu = event->cpu; event 4253 kernel/events/core.c if (event->attach_state & PERF_ATTACH_TASK_DATA) { event 4320 kernel/events/core.c static void perf_event_free_filter(struct perf_event *event); event 4321 kernel/events/core.c static void perf_event_free_bpf_prog(struct perf_event *event); event 4325 kernel/events/core.c struct perf_event *event; event 4327 kernel/events/core.c event = container_of(head, struct perf_event, rcu_head); event 4328 kernel/events/core.c if (event->ns) event 4329 kernel/events/core.c put_pid_ns(event->ns); event 4330 kernel/events/core.c perf_event_free_filter(event); event 4331 kernel/events/core.c kfree(event); event 4334 kernel/events/core.c static void ring_buffer_attach(struct perf_event *event, event 4337 kernel/events/core.c static void detach_sb_event(struct perf_event *event) event 4339 kernel/events/core.c struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu); event 4342 kernel/events/core.c list_del_rcu(&event->sb_list); event 4346 kernel/events/core.c static bool is_sb_event(struct perf_event *event) event 4348 kernel/events/core.c struct perf_event_attr *attr = &event->attr; event 4350 kernel/events/core.c if (event->parent) event 4353 kernel/events/core.c if (event->attach_state & PERF_ATTACH_TASK) event 4365 kernel/events/core.c static void unaccount_pmu_sb_event(struct perf_event *event) event 4367 kernel/events/core.c if (is_sb_event(event)) event 4368 kernel/events/core.c detach_sb_event(event); event 4371 kernel/events/core.c static void unaccount_event_cpu(struct perf_event *event, int cpu) event 4373 kernel/events/core.c if (event->parent) event 4376 kernel/events/core.c if (is_cgroup_event(event)) event 4402 kernel/events/core.c static void unaccount_event(struct perf_event *event) event 4406 kernel/events/core.c if (event->parent) event 4409 kernel/events/core.c if (event->attach_state & PERF_ATTACH_TASK) event 4411 kernel/events/core.c if (event->attr.mmap || event->attr.mmap_data) event 4413 kernel/events/core.c if (event->attr.comm) event 4415 kernel/events/core.c if (event->attr.namespaces) event 4417 kernel/events/core.c if (event->attr.task) event 4419 kernel/events/core.c if (event->attr.freq) event 4421 kernel/events/core.c if (event->attr.context_switch) { event 4425 kernel/events/core.c if (is_cgroup_event(event)) event 4427 kernel/events/core.c if (has_branch_stack(event)) event 4429 kernel/events/core.c if (event->attr.ksymbol) event 4431 kernel/events/core.c if (event->attr.bpf_event) event 4439 kernel/events/core.c unaccount_event_cpu(event, event->cpu); event 4441 kernel/events/core.c unaccount_pmu_sb_event(event); event 4464 kernel/events/core.c static int exclusive_event_init(struct perf_event *event) event 4466 kernel/events/core.c struct pmu *pmu = event->pmu; event 4484 kernel/events/core.c if (event->attach_state & PERF_ATTACH_TASK) { event 4495 kernel/events/core.c static void exclusive_event_destroy(struct perf_event *event) event 4497 kernel/events/core.c struct pmu *pmu = event->pmu; event 4503 kernel/events/core.c if (event->attach_state & PERF_ATTACH_TASK) event 4519 kernel/events/core.c static bool exclusive_event_installable(struct perf_event *event, event 4523 kernel/events/core.c struct pmu *pmu = event->pmu; event 4531 kernel/events/core.c if (exclusive_event_match(iter_event, event)) event 4538 kernel/events/core.c static void perf_addr_filters_splice(struct perf_event *event, event 4541 kernel/events/core.c static void _free_event(struct perf_event *event) event 4543 kernel/events/core.c irq_work_sync(&event->pending); event 4545 kernel/events/core.c unaccount_event(event); event 4547 kernel/events/core.c if (event->rb) { event 4554 kernel/events/core.c mutex_lock(&event->mmap_mutex); event 4555 kernel/events/core.c ring_buffer_attach(event, NULL); event 4556 kernel/events/core.c mutex_unlock(&event->mmap_mutex); event 4559 kernel/events/core.c if (is_cgroup_event(event)) event 4560 kernel/events/core.c perf_detach_cgroup(event); event 4562 kernel/events/core.c if (!event->parent) { event 4563 kernel/events/core.c if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) event 4567 kernel/events/core.c perf_event_free_bpf_prog(event); event 4568 kernel/events/core.c perf_addr_filters_splice(event, NULL); event 4569 kernel/events/core.c kfree(event->addr_filter_ranges); event 4571 kernel/events/core.c if (event->destroy) event 4572 kernel/events/core.c event->destroy(event); event 4578 kernel/events/core.c if (event->hw.target) event 4579 kernel/events/core.c put_task_struct(event->hw.target); event 4585 kernel/events/core.c if (event->ctx) event 4586 kernel/events/core.c put_ctx(event->ctx); event 4588 kernel/events/core.c exclusive_event_destroy(event); event 4589 kernel/events/core.c module_put(event->pmu->module); event 4591 kernel/events/core.c call_rcu(&event->rcu_head, free_event_rcu); event 4598 kernel/events/core.c static void free_event(struct perf_event *event) event 4600 kernel/events/core.c if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1, event 4602 kernel/events/core.c atomic_long_read(&event->refcount), event)) { event 4607 kernel/events/core.c _free_event(event); event 4613 kernel/events/core.c static void perf_remove_from_owner(struct perf_event *event) event 4624 kernel/events/core.c owner = READ_ONCE(event->owner); event 4652 kernel/events/core.c if (event->owner) { event 4653 kernel/events/core.c list_del_init(&event->owner_entry); event 4654 kernel/events/core.c smp_store_release(&event->owner, NULL); event 4661 kernel/events/core.c static void put_event(struct perf_event *event) event 4663 kernel/events/core.c if (!atomic_long_dec_and_test(&event->refcount)) event 4666 kernel/events/core.c _free_event(event); event 4674 kernel/events/core.c int perf_event_release_kernel(struct perf_event *event) event 4676 kernel/events/core.c struct perf_event_context *ctx = event->ctx; event 4685 kernel/events/core.c WARN_ON_ONCE(event->attach_state & event 4690 kernel/events/core.c if (!is_kernel_event(event)) event 4691 kernel/events/core.c perf_remove_from_owner(event); event 4693 kernel/events/core.c ctx = perf_event_ctx_lock(event); event 4695 kernel/events/core.c perf_remove_from_context(event, DETACH_GROUP); event 4709 kernel/events/core.c event->state = PERF_EVENT_STATE_DEAD; event 4712 kernel/events/core.c perf_event_ctx_unlock(event, ctx); event 4715 kernel/events/core.c mutex_lock(&event->child_mutex); event 4716 kernel/events/core.c list_for_each_entry(child, &event->child_list, child_list) { event 4738 kernel/events/core.c mutex_unlock(&event->child_mutex); event 4740 kernel/events/core.c mutex_lock(&event->child_mutex); event 4747 kernel/events/core.c tmp = list_first_entry_or_null(&event->child_list, event 4756 kernel/events/core.c put_event(event); event 4759 kernel/events/core.c mutex_unlock(&event->child_mutex); event 4764 kernel/events/core.c mutex_unlock(&event->child_mutex); event 4781 kernel/events/core.c put_event(event); /* Must be the 'last' reference */ event 4795 kernel/events/core.c static u64 __perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) event 4803 kernel/events/core.c mutex_lock(&event->child_mutex); event 4805 kernel/events/core.c (void)perf_event_read(event, false); event 4806 kernel/events/core.c total += perf_event_count(event); event 4808 kernel/events/core.c *enabled += event->total_time_enabled + event 4809 kernel/events/core.c atomic64_read(&event->child_total_time_enabled); event 4810 kernel/events/core.c *running += event->total_time_running + event 4811 kernel/events/core.c atomic64_read(&event->child_total_time_running); event 4813 kernel/events/core.c list_for_each_entry(child, &event->child_list, child_list) { event 4819 kernel/events/core.c mutex_unlock(&event->child_mutex); event 4824 kernel/events/core.c u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) event 4829 kernel/events/core.c ctx = perf_event_ctx_lock(event); event 4830 kernel/events/core.c count = __perf_event_read_value(event, enabled, running); event 4831 kernel/events/core.c perf_event_ctx_unlock(event, ctx); event 4884 kernel/events/core.c static int perf_read_group(struct perf_event *event, event 4887 kernel/events/core.c struct perf_event *leader = event->group_leader, *child; event 4894 kernel/events/core.c values = kzalloc(event->read_size, GFP_KERNEL); event 4918 kernel/events/core.c ret = event->read_size; event 4919 kernel/events/core.c if (copy_to_user(buf, values, event->read_size)) event 4930 kernel/events/core.c static int perf_read_one(struct perf_event *event, event 4937 kernel/events/core.c values[n++] = __perf_event_read_value(event, &enabled, &running); event 4943 kernel/events/core.c values[n++] = primary_event_id(event); event 4951 kernel/events/core.c static bool is_event_hup(struct perf_event *event) event 4955 kernel/events/core.c if (event->state > PERF_EVENT_STATE_EXIT) event 4958 kernel/events/core.c mutex_lock(&event->child_mutex); event 4959 kernel/events/core.c no_children = list_empty(&event->child_list); event 4960 kernel/events/core.c mutex_unlock(&event->child_mutex); event 4968 kernel/events/core.c __perf_read(struct perf_event *event, char __user *buf, size_t count) event 4970 kernel/events/core.c u64 read_format = event->attr.read_format; event 4978 kernel/events/core.c if (event->state == PERF_EVENT_STATE_ERROR) event 4981 kernel/events/core.c if (count < event->read_size) event 4984 kernel/events/core.c WARN_ON_ONCE(event->ctx->parent_ctx); event 4986 kernel/events/core.c ret = perf_read_group(event, read_format, buf); event 4988 kernel/events/core.c ret = perf_read_one(event, read_format, buf); event 4996 kernel/events/core.c struct perf_event *event = file->private_data; event 5000 kernel/events/core.c ctx = perf_event_ctx_lock(event); event 5001 kernel/events/core.c ret = __perf_read(event, buf, count); event 5002 kernel/events/core.c perf_event_ctx_unlock(event, ctx); event 5009 kernel/events/core.c struct perf_event *event = file->private_data; event 5013 kernel/events/core.c poll_wait(file, &event->waitq, wait); event 5015 kernel/events/core.c if (is_event_hup(event)) event 5022 kernel/events/core.c mutex_lock(&event->mmap_mutex); event 5023 kernel/events/core.c rb = event->rb; event 5026 kernel/events/core.c mutex_unlock(&event->mmap_mutex); event 5030 kernel/events/core.c static void _perf_event_reset(struct perf_event *event) event 5032 kernel/events/core.c (void)perf_event_read(event, false); event 5033 kernel/events/core.c local64_set(&event->count, 0); event 5034 kernel/events/core.c perf_event_update_userpage(event); event 5043 kernel/events/core.c static void perf_event_for_each_child(struct perf_event *event, event 5048 kernel/events/core.c WARN_ON_ONCE(event->ctx->parent_ctx); event 5050 kernel/events/core.c mutex_lock(&event->child_mutex); event 5051 kernel/events/core.c func(event); event 5052 kernel/events/core.c list_for_each_entry(child, &event->child_list, child_list) event 5054 kernel/events/core.c mutex_unlock(&event->child_mutex); event 5057 kernel/events/core.c static void perf_event_for_each(struct perf_event *event, event 5060 kernel/events/core.c struct perf_event_context *ctx = event->ctx; event 5065 kernel/events/core.c event = event->group_leader; event 5067 kernel/events/core.c perf_event_for_each_child(event, func); event 5068 kernel/events/core.c for_each_sibling_event(sibling, event) event 5072 kernel/events/core.c static void __perf_event_period(struct perf_event *event, event 5080 kernel/events/core.c if (event->attr.freq) { event 5081 kernel/events/core.c event->attr.sample_freq = value; event 5083 kernel/events/core.c event->attr.sample_period = value; event 5084 kernel/events/core.c event->hw.sample_period = value; event 5087 kernel/events/core.c active = (event->state == PERF_EVENT_STATE_ACTIVE); event 5094 kernel/events/core.c if (event->hw.interrupts == MAX_INTERRUPTS) { event 5095 kernel/events/core.c event->hw.interrupts = 0; event 5096 kernel/events/core.c perf_log_throttle(event, 1); event 5098 kernel/events/core.c event->pmu->stop(event, PERF_EF_UPDATE); event 5101 kernel/events/core.c local64_set(&event->hw.period_left, 0); event 5104 kernel/events/core.c event->pmu->start(event, PERF_EF_RELOAD); event 5109 kernel/events/core.c static int perf_event_check_period(struct perf_event *event, u64 value) event 5111 kernel/events/core.c return event->pmu->check_period(event, value); event 5114 kernel/events/core.c static int perf_event_period(struct perf_event *event, u64 __user *arg) event 5118 kernel/events/core.c if (!is_sampling_event(event)) event 5127 kernel/events/core.c if (event->attr.freq && value > sysctl_perf_event_sample_rate) event 5130 kernel/events/core.c if (perf_event_check_period(event, value)) event 5133 kernel/events/core.c if (!event->attr.freq && (value & (1ULL << 63))) event 5136 kernel/events/core.c event_function_call(event, __perf_event_period, &value); event 5157 kernel/events/core.c static int perf_event_set_output(struct perf_event *event, event 5159 kernel/events/core.c static int perf_event_set_filter(struct perf_event *event, void __user *arg); event 5160 kernel/events/core.c static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd); event 5164 kernel/events/core.c static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg) event 5181 kernel/events/core.c return _perf_event_refresh(event, arg); event 5184 kernel/events/core.c return perf_event_period(event, (u64 __user *)arg); event 5188 kernel/events/core.c u64 id = primary_event_id(event); event 5205 kernel/events/core.c ret = perf_event_set_output(event, output_event); event 5208 kernel/events/core.c ret = perf_event_set_output(event, NULL); event 5214 kernel/events/core.c return perf_event_set_filter(event, (void __user *)arg); event 5217 kernel/events/core.c return perf_event_set_bpf_prog(event, arg); event 5223 kernel/events/core.c rb = rcu_dereference(event->rb); event 5234 kernel/events/core.c return perf_event_query_prog_array(event, (void __user *)arg); event 5244 kernel/events/core.c return perf_event_modify_attr(event, &new_attr); event 5251 kernel/events/core.c perf_event_for_each(event, func); event 5253 kernel/events/core.c perf_event_for_each_child(event, func); event 5260 kernel/events/core.c struct perf_event *event = file->private_data; event 5264 kernel/events/core.c ctx = perf_event_ctx_lock(event); event 5265 kernel/events/core.c ret = _perf_ioctl(event, cmd, arg); event 5266 kernel/events/core.c perf_event_ctx_unlock(event, ctx); event 5296 kernel/events/core.c struct perf_event *event; event 5299 kernel/events/core.c list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { event 5300 kernel/events/core.c ctx = perf_event_ctx_lock(event); event 5301 kernel/events/core.c perf_event_for_each_child(event, _perf_event_enable); event 5302 kernel/events/core.c perf_event_ctx_unlock(event, ctx); event 5312 kernel/events/core.c struct perf_event *event; event 5315 kernel/events/core.c list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { event 5316 kernel/events/core.c ctx = perf_event_ctx_lock(event); event 5317 kernel/events/core.c perf_event_for_each_child(event, _perf_event_disable); event 5318 kernel/events/core.c perf_event_ctx_unlock(event, ctx); event 5325 kernel/events/core.c static int perf_event_index(struct perf_event *event) event 5327 kernel/events/core.c if (event->hw.state & PERF_HES_STOPPED) event 5330 kernel/events/core.c if (event->state != PERF_EVENT_STATE_ACTIVE) event 5333 kernel/events/core.c return event->pmu->event_idx(event); event 5336 kernel/events/core.c static void calc_timer_values(struct perf_event *event, event 5344 kernel/events/core.c ctx_time = event->shadow_ctx_time + *now; event 5345 kernel/events/core.c __perf_update_times(event, ctx_time, enabled, running); event 5348 kernel/events/core.c static void perf_event_init_userpage(struct perf_event *event) event 5354 kernel/events/core.c rb = rcu_dereference(event->rb); event 5371 kernel/events/core.c struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now) event 5380 kernel/events/core.c void perf_event_update_userpage(struct perf_event *event) event 5387 kernel/events/core.c rb = rcu_dereference(event->rb); event 5400 kernel/events/core.c calc_timer_values(event, &now, &enabled, &running); event 5410 kernel/events/core.c userpg->index = perf_event_index(event); event 5411 kernel/events/core.c userpg->offset = perf_event_count(event); event 5413 kernel/events/core.c userpg->offset -= local64_read(&event->hw.prev_count); event 5416 kernel/events/core.c atomic64_read(&event->child_total_time_enabled); event 5419 kernel/events/core.c atomic64_read(&event->child_total_time_running); event 5421 kernel/events/core.c arch_perf_update_userpage(event, userpg, now); event 5433 kernel/events/core.c struct perf_event *event = vmf->vma->vm_file->private_data; event 5444 kernel/events/core.c rb = rcu_dereference(event->rb); event 5466 kernel/events/core.c static void ring_buffer_attach(struct perf_event *event, event 5472 kernel/events/core.c if (event->rb) { event 5477 kernel/events/core.c WARN_ON_ONCE(event->rcu_pending); event 5479 kernel/events/core.c old_rb = event->rb; event 5481 kernel/events/core.c list_del_rcu(&event->rb_entry); event 5484 kernel/events/core.c event->rcu_batches = get_state_synchronize_rcu(); event 5485 kernel/events/core.c event->rcu_pending = 1; event 5489 kernel/events/core.c if (event->rcu_pending) { event 5490 kernel/events/core.c cond_synchronize_rcu(event->rcu_batches); event 5491 kernel/events/core.c event->rcu_pending = 0; event 5495 kernel/events/core.c list_add_rcu(&event->rb_entry, &rb->event_list); event 5509 kernel/events/core.c if (has_aux(event)) event 5510 kernel/events/core.c perf_event_stop(event, 0); event 5512 kernel/events/core.c rcu_assign_pointer(event->rb, rb); event 5521 kernel/events/core.c wake_up_all(&event->waitq); event 5525 kernel/events/core.c static void ring_buffer_wakeup(struct perf_event *event) event 5530 kernel/events/core.c rb = rcu_dereference(event->rb); event 5532 kernel/events/core.c list_for_each_entry_rcu(event, &rb->event_list, rb_entry) event 5533 kernel/events/core.c wake_up_all(&event->waitq); event 5538 kernel/events/core.c struct ring_buffer *ring_buffer_get(struct perf_event *event) event 5543 kernel/events/core.c rb = rcu_dereference(event->rb); event 5565 kernel/events/core.c struct perf_event *event = vma->vm_file->private_data; event 5567 kernel/events/core.c atomic_inc(&event->mmap_count); event 5568 kernel/events/core.c atomic_inc(&event->rb->mmap_count); event 5571 kernel/events/core.c atomic_inc(&event->rb->aux_mmap_count); event 5573 kernel/events/core.c if (event->pmu->event_mapped) event 5574 kernel/events/core.c event->pmu->event_mapped(event, vma->vm_mm); event 5577 kernel/events/core.c static void perf_pmu_output_stop(struct perf_event *event); event 5589 kernel/events/core.c struct perf_event *event = vma->vm_file->private_data; event 5591 kernel/events/core.c struct ring_buffer *rb = ring_buffer_get(event); event 5596 kernel/events/core.c if (event->pmu->event_unmapped) event 5597 kernel/events/core.c event->pmu->event_unmapped(event, vma->vm_mm); event 5605 kernel/events/core.c atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) { event 5612 kernel/events/core.c perf_pmu_output_stop(event); event 5622 kernel/events/core.c mutex_unlock(&event->mmap_mutex); event 5627 kernel/events/core.c if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) event 5630 kernel/events/core.c ring_buffer_attach(event, NULL); event 5631 kernel/events/core.c mutex_unlock(&event->mmap_mutex); event 5644 kernel/events/core.c list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { event 5645 kernel/events/core.c if (!atomic_long_inc_not_zero(&event->refcount)) { event 5654 kernel/events/core.c mutex_lock(&event->mmap_mutex); event 5665 kernel/events/core.c if (event->rb == rb) event 5666 kernel/events/core.c ring_buffer_attach(event, NULL); event 5668 kernel/events/core.c mutex_unlock(&event->mmap_mutex); event 5669 kernel/events/core.c put_event(event); event 5706 kernel/events/core.c struct perf_event *event = file->private_data; event 5721 kernel/events/core.c if (event->cpu == -1 && event->attr.inherit) event 5739 kernel/events/core.c if (!event->rb) event 5744 kernel/events/core.c mutex_lock(&event->mmap_mutex); event 5747 kernel/events/core.c rb = event->rb; event 5799 kernel/events/core.c WARN_ON_ONCE(event->ctx->parent_ctx); event 5801 kernel/events/core.c mutex_lock(&event->mmap_mutex); event 5802 kernel/events/core.c if (event->rb) { event 5803 kernel/events/core.c if (event->rb->nr_pages != nr_pages) { event 5808 kernel/events/core.c if (!atomic_inc_not_zero(&event->rb->mmap_count)) { event 5814 kernel/events/core.c mutex_unlock(&event->mmap_mutex); event 5866 kernel/events/core.c WARN_ON(!rb && event->rb); event 5873 kernel/events/core.c event->attr.watermark ? event->attr.wakeup_watermark : 0, event 5874 kernel/events/core.c event->cpu, flags); event 5885 kernel/events/core.c ring_buffer_attach(event, rb); event 5887 kernel/events/core.c perf_event_init_userpage(event); event 5888 kernel/events/core.c perf_event_update_userpage(event); event 5890 kernel/events/core.c ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages, event 5891 kernel/events/core.c event->attr.aux_watermark, flags); event 5901 kernel/events/core.c atomic_inc(&event->mmap_count); event 5906 kernel/events/core.c mutex_unlock(&event->mmap_mutex); event 5915 kernel/events/core.c if (event->pmu->event_mapped) event 5916 kernel/events/core.c event->pmu->event_mapped(event, vma->vm_mm); event 5924 kernel/events/core.c struct perf_event *event = filp->private_data; event 5928 kernel/events/core.c retval = fasync_helper(fd, filp, on, &event->fasync); event 5955 kernel/events/core.c static inline struct fasync_struct **perf_event_fasync(struct perf_event *event) event 5958 kernel/events/core.c if (event->parent) event 5959 kernel/events/core.c event = event->parent; event 5960 kernel/events/core.c return &event->fasync; event 5963 kernel/events/core.c void perf_event_wakeup(struct perf_event *event) event 5965 kernel/events/core.c ring_buffer_wakeup(event); event 5967 kernel/events/core.c if (event->pending_kill) { event 5968 kernel/events/core.c kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill); event 5969 kernel/events/core.c event->pending_kill = 0; event 5973 kernel/events/core.c static void perf_pending_event_disable(struct perf_event *event) event 5975 kernel/events/core.c int cpu = READ_ONCE(event->pending_disable); event 5981 kernel/events/core.c WRITE_ONCE(event->pending_disable, -1); event 5982 kernel/events/core.c perf_event_disable_local(event); event 6006 kernel/events/core.c irq_work_queue_on(&event->pending, cpu); event 6011 kernel/events/core.c struct perf_event *event = container_of(entry, struct perf_event, pending); event 6020 kernel/events/core.c perf_pending_event_disable(event); event 6022 kernel/events/core.c if (event->pending_wakeup) { event 6023 kernel/events/core.c event->pending_wakeup = 0; event 6024 kernel/events/core.c perf_event_wakeup(event); event 6192 kernel/events/core.c struct perf_event *event) event 6194 kernel/events/core.c u64 sample_type = event->attr.sample_type; event 6197 kernel/events/core.c header->size += event->id_header_size; event 6201 kernel/events/core.c data->tid_entry.pid = perf_event_pid(event, current); event 6202 kernel/events/core.c data->tid_entry.tid = perf_event_tid(event, current); event 6206 kernel/events/core.c data->time = perf_event_clock(event); event 6209 kernel/events/core.c data->id = primary_event_id(event); event 6212 kernel/events/core.c data->stream_id = event->id; event 6222 kernel/events/core.c struct perf_event *event) event 6224 kernel/events/core.c if (event->attr.sample_id_all) event 6225 kernel/events/core.c __perf_event_header__init_id(header, data, event); event 6252 kernel/events/core.c void perf_event__output_id_sample(struct perf_event *event, event 6256 kernel/events/core.c if (event->attr.sample_id_all) event 6261 kernel/events/core.c struct perf_event *event, event 6264 kernel/events/core.c u64 read_format = event->attr.read_format; event 6268 kernel/events/core.c values[n++] = perf_event_count(event); event 6271 kernel/events/core.c atomic64_read(&event->child_total_time_enabled); event 6275 kernel/events/core.c atomic64_read(&event->child_total_time_running); event 6278 kernel/events/core.c values[n++] = primary_event_id(event); event 6284 kernel/events/core.c struct perf_event *event, event 6287 kernel/events/core.c struct perf_event *leader = event->group_leader, *sub; event 6288 kernel/events/core.c u64 read_format = event->attr.read_format; event 6300 kernel/events/core.c if ((leader != event) && event 6313 kernel/events/core.c if ((sub != event) && event 6336 kernel/events/core.c struct perf_event *event) event 6339 kernel/events/core.c u64 read_format = event->attr.read_format; event 6351 kernel/events/core.c calc_timer_values(event, &now, &enabled, &running); event 6353 kernel/events/core.c if (event->attr.read_format & PERF_FORMAT_GROUP) event 6354 kernel/events/core.c perf_output_read_group(handle, event, enabled, running); event 6356 kernel/events/core.c perf_output_read_one(handle, event, enabled, running); event 6362 kernel/events/core.c struct perf_event *event) event 6396 kernel/events/core.c perf_output_read(handle, event); event 6467 kernel/events/core.c u64 mask = event->attr.sample_regs_user; event 6498 kernel/events/core.c u64 mask = event->attr.sample_regs_intr; event 6509 kernel/events/core.c if (!event->attr.watermark) { event 6510 kernel/events/core.c int wakeup_events = event->attr.wakeup_events; event 6562 kernel/events/core.c perf_callchain(struct perf_event *event, struct pt_regs *regs) event 6564 kernel/events/core.c bool kernel = !event->attr.exclude_callchain_kernel; event 6565 kernel/events/core.c bool user = !event->attr.exclude_callchain_user; event 6567 kernel/events/core.c bool crosstask = event->ctx->task && event->ctx->task != current; event 6568 kernel/events/core.c const u32 max_stack = event->attr.sample_max_stack; event 6581 kernel/events/core.c struct perf_event *event, event 6584 kernel/events/core.c u64 sample_type = event->attr.sample_type; event 6587 kernel/events/core.c header->size = sizeof(*header) + event->header_size; event 6592 kernel/events/core.c __perf_event_header__init_id(header, data, event); event 6601 kernel/events/core.c data->callchain = perf_callchain(event, regs); event 6651 kernel/events/core.c u64 mask = event->attr.sample_regs_user; event 6665 kernel/events/core.c u16 stack_size = event->attr.sample_stack_user; event 6690 kernel/events/core.c u64 mask = event->attr.sample_regs_intr; event 6703 kernel/events/core.c __perf_event_output(struct perf_event *event, event 6717 kernel/events/core.c perf_prepare_sample(&header, data, event, regs); event 6719 kernel/events/core.c err = output_begin(&handle, event, header.size); event 6723 kernel/events/core.c perf_output_sample(&handle, &header, data, event); event 6733 kernel/events/core.c perf_event_output_forward(struct perf_event *event, event 6737 kernel/events/core.c __perf_event_output(event, data, regs, perf_output_begin_forward); event 6741 kernel/events/core.c perf_event_output_backward(struct perf_event *event, event 6745 kernel/events/core.c __perf_event_output(event, data, regs, perf_output_begin_backward); event 6749 kernel/events/core.c perf_event_output(struct perf_event *event, event 6753 kernel/events/core.c return __perf_event_output(event, data, regs, perf_output_begin); event 6768 kernel/events/core.c perf_event_read_event(struct perf_event *event, event 6777 kernel/events/core.c .size = sizeof(read_event) + event->read_size, event 6779 kernel/events/core.c .pid = perf_event_pid(event, task), event 6780 kernel/events/core.c .tid = perf_event_tid(event, task), event 6784 kernel/events/core.c perf_event_header__init_id(&read_event.header, &sample, event); event 6785 kernel/events/core.c ret = perf_output_begin(&handle, event, read_event.header.size); event 6790 kernel/events/core.c perf_output_read(&handle, event); event 6791 kernel/events/core.c perf_event__output_id_sample(event, &handle, &sample); event 6796 kernel/events/core.c typedef void (perf_iterate_f)(struct perf_event *event, void *data); event 6803 kernel/events/core.c struct perf_event *event; event 6805 kernel/events/core.c list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { event 6807 kernel/events/core.c if (event->state < PERF_EVENT_STATE_INACTIVE) event 6809 kernel/events/core.c if (!event_filter_match(event)) event 6813 kernel/events/core.c output(event, data); event 6820 kernel/events/core.c struct perf_event *event; event 6822 kernel/events/core.c list_for_each_entry_rcu(event, &pel->list, sb_list) { event 6828 kernel/events/core.c if (!smp_load_acquire(&event->ctx)) event 6831 kernel/events/core.c if (event->state < PERF_EVENT_STATE_INACTIVE) event 6833 kernel/events/core.c if (!event_filter_match(event)) event 6835 kernel/events/core.c output(event, data); event 6881 kernel/events/core.c static void perf_event_addr_filters_exec(struct perf_event *event, void *data) event 6883 kernel/events/core.c struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); event 6888 kernel/events/core.c if (!has_addr_filter(event)) event 6894 kernel/events/core.c event->addr_filter_ranges[count].start = 0; event 6895 kernel/events/core.c event->addr_filter_ranges[count].size = 0; event 6903 kernel/events/core.c event->addr_filters_gen++; event 6907 kernel/events/core.c perf_event_stop(event, 1); event 6934 kernel/events/core.c static void __perf_event_output_stop(struct perf_event *event, void *data) event 6936 kernel/events/core.c struct perf_event *parent = event->parent; event 6940 kernel/events/core.c .event = event, event 6943 kernel/events/core.c if (!has_aux(event)) event 6947 kernel/events/core.c parent = event; event 6965 kernel/events/core.c struct perf_event *event = info; event 6966 kernel/events/core.c struct pmu *pmu = event->ctx->pmu; event 6969 kernel/events/core.c .rb = event->rb, event 6982 kernel/events/core.c static void perf_pmu_output_stop(struct perf_event *event) event 6989 kernel/events/core.c list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) { event 7003 kernel/events/core.c err = cpu_function_call(cpu, __perf_pmu_output_stop, event); event 7033 kernel/events/core.c static int perf_event_task_match(struct perf_event *event) event 7035 kernel/events/core.c return event->attr.comm || event->attr.mmap || event 7036 kernel/events/core.c event->attr.mmap2 || event->attr.mmap_data || event 7037 kernel/events/core.c event->attr.task; event 7040 kernel/events/core.c static void perf_event_task_output(struct perf_event *event, event 7049 kernel/events/core.c if (!perf_event_task_match(event)) event 7052 kernel/events/core.c perf_event_header__init_id(&task_event->event_id.header, &sample, event); event 7054 kernel/events/core.c ret = perf_output_begin(&handle, event, event 7059 kernel/events/core.c task_event->event_id.pid = perf_event_pid(event, task); event 7060 kernel/events/core.c task_event->event_id.tid = perf_event_tid(event, task); event 7063 kernel/events/core.c task_event->event_id.ppid = perf_event_pid(event, event 7065 kernel/events/core.c task_event->event_id.ptid = perf_event_pid(event, event 7068 kernel/events/core.c task_event->event_id.ppid = perf_event_pid(event, current); event 7069 kernel/events/core.c task_event->event_id.ptid = perf_event_tid(event, current); event 7072 kernel/events/core.c task_event->event_id.time = perf_event_clock(event); event 7076 kernel/events/core.c perf_event__output_id_sample(event, &handle, &sample); event 7139 kernel/events/core.c static int perf_event_comm_match(struct perf_event *event) event 7141 kernel/events/core.c return event->attr.comm; event 7144 kernel/events/core.c static void perf_event_comm_output(struct perf_event *event, event 7153 kernel/events/core.c if (!perf_event_comm_match(event)) event 7156 kernel/events/core.c perf_event_header__init_id(&comm_event->event_id.header, &sample, event); event 7157 kernel/events/core.c ret = perf_output_begin(&handle, event, event 7163 kernel/events/core.c comm_event->event_id.pid = perf_event_pid(event, comm_event->task); event 7164 kernel/events/core.c comm_event->event_id.tid = perf_event_tid(event, comm_event->task); event 7170 kernel/events/core.c perf_event__output_id_sample(event, &handle, &sample); event 7238 kernel/events/core.c static int perf_event_namespaces_match(struct perf_event *event) event 7240 kernel/events/core.c return event->attr.namespaces; event 7243 kernel/events/core.c static void perf_event_namespaces_output(struct perf_event *event, event 7252 kernel/events/core.c if (!perf_event_namespaces_match(event)) event 7256 kernel/events/core.c &sample, event); event 7257 kernel/events/core.c ret = perf_output_begin(&handle, event, event 7262 kernel/events/core.c namespaces_event->event_id.pid = perf_event_pid(event, event 7264 kernel/events/core.c namespaces_event->event_id.tid = perf_event_tid(event, event 7269 kernel/events/core.c perf_event__output_id_sample(event, &handle, &sample); event 7376 kernel/events/core.c static int perf_event_mmap_match(struct perf_event *event, event 7383 kernel/events/core.c return (!executable && event->attr.mmap_data) || event 7384 kernel/events/core.c (executable && (event->attr.mmap || event->attr.mmap2)); event 7387 kernel/events/core.c static void perf_event_mmap_output(struct perf_event *event, event 7397 kernel/events/core.c if (!perf_event_mmap_match(event, data)) event 7400 kernel/events/core.c if (event->attr.mmap2) { event 7410 kernel/events/core.c perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); event 7411 kernel/events/core.c ret = perf_output_begin(&handle, event, event 7416 kernel/events/core.c mmap_event->event_id.pid = perf_event_pid(event, current); event 7417 kernel/events/core.c mmap_event->event_id.tid = perf_event_tid(event, current); event 7421 kernel/events/core.c if (event->attr.mmap2) { event 7433 kernel/events/core.c perf_event__output_id_sample(event, &handle, &sample); event 7606 kernel/events/core.c static void __perf_addr_filters_adjust(struct perf_event *event, void *data) event 7608 kernel/events/core.c struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); event 7614 kernel/events/core.c if (!has_addr_filter(event)) event 7623 kernel/events/core.c &event->addr_filter_ranges[count])) event 7630 kernel/events/core.c event->addr_filters_gen++; event 7634 kernel/events/core.c perf_event_stop(event, 1); event 7698 kernel/events/core.c void perf_event_aux_event(struct perf_event *event, unsigned long head, event 7720 kernel/events/core.c perf_event_header__init_id(&rec.header, &sample, event); event 7721 kernel/events/core.c ret = perf_output_begin(&handle, event, rec.header.size); event 7727 kernel/events/core.c perf_event__output_id_sample(event, &handle, &sample); event 7735 kernel/events/core.c void perf_log_lost_samples(struct perf_event *event, u64 lost) event 7753 kernel/events/core.c perf_event_header__init_id(&lost_samples_event.header, &sample, event); event 7755 kernel/events/core.c ret = perf_output_begin(&handle, event, event 7761 kernel/events/core.c perf_event__output_id_sample(event, &handle, &sample); event 7780 kernel/events/core.c static int perf_event_switch_match(struct perf_event *event) event 7782 kernel/events/core.c return event->attr.context_switch; event 7785 kernel/events/core.c static void perf_event_switch_output(struct perf_event *event, void *data) event 7792 kernel/events/core.c if (!perf_event_switch_match(event)) event 7796 kernel/events/core.c if (event->ctx->task) { event 7803 kernel/events/core.c perf_event_pid(event, se->next_prev); event 7805 kernel/events/core.c perf_event_tid(event, se->next_prev); event 7808 kernel/events/core.c perf_event_header__init_id(&se->event_id.header, &sample, event); event 7810 kernel/events/core.c ret = perf_output_begin(&handle, event, se->event_id.header.size); event 7814 kernel/events/core.c if (event->ctx->task) event 7819 kernel/events/core.c perf_event__output_id_sample(event, &handle, &sample); event 7858 kernel/events/core.c static void perf_log_throttle(struct perf_event *event, int enable) event 7875 kernel/events/core.c .time = perf_event_clock(event), event 7876 kernel/events/core.c .id = primary_event_id(event), event 7877 kernel/events/core.c .stream_id = event->id, event 7883 kernel/events/core.c perf_event_header__init_id(&throttle_event.header, &sample, event); event 7885 kernel/events/core.c ret = perf_output_begin(&handle, event, event 7891 kernel/events/core.c perf_event__output_id_sample(event, &handle, &sample); event 7911 kernel/events/core.c static int perf_event_ksymbol_match(struct perf_event *event) event 7913 kernel/events/core.c return event->attr.ksymbol; event 7916 kernel/events/core.c static void perf_event_ksymbol_output(struct perf_event *event, void *data) event 7923 kernel/events/core.c if (!perf_event_ksymbol_match(event)) event 7927 kernel/events/core.c &sample, event); event 7928 kernel/events/core.c ret = perf_output_begin(&handle, event, event 7935 kernel/events/core.c perf_event__output_id_sample(event, &handle, &sample); event 8001 kernel/events/core.c static int perf_event_bpf_match(struct perf_event *event) event 8003 kernel/events/core.c return event->attr.bpf_event; event 8006 kernel/events/core.c static void perf_event_bpf_output(struct perf_event *event, void *data) event 8013 kernel/events/core.c if (!perf_event_bpf_match(event)) event 8017 kernel/events/core.c &sample, event); event 8018 kernel/events/core.c ret = perf_output_begin(&handle, event, event 8024 kernel/events/core.c perf_event__output_id_sample(event, &handle, &sample); event 8096 kernel/events/core.c void perf_event_itrace_started(struct perf_event *event) event 8098 kernel/events/core.c event->attach_state |= PERF_ATTACH_ITRACE; event 8101 kernel/events/core.c static void perf_log_itrace_start(struct perf_event *event) event 8112 kernel/events/core.c if (event->parent) event 8113 kernel/events/core.c event = event->parent; event 8115 kernel/events/core.c if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) || event 8116 kernel/events/core.c event->attach_state & PERF_ATTACH_ITRACE) event 8122 kernel/events/core.c rec.pid = perf_event_pid(event, current); event 8123 kernel/events/core.c rec.tid = perf_event_tid(event, current); event 8125 kernel/events/core.c perf_event_header__init_id(&rec.header, &sample, event); event 8126 kernel/events/core.c ret = perf_output_begin(&handle, event, rec.header.size); event 8132 kernel/events/core.c perf_event__output_id_sample(event, &handle, &sample); event 8138 kernel/events/core.c __perf_event_account_interrupt(struct perf_event *event, int throttle) event 8140 kernel/events/core.c struct hw_perf_event *hwc = &event->hw; event 8155 kernel/events/core.c perf_log_throttle(event, 0); event 8160 kernel/events/core.c if (event->attr.freq) { event 8167 kernel/events/core.c perf_adjust_period(event, delta, hwc->last_period, true); event 8173 kernel/events/core.c int perf_event_account_interrupt(struct perf_event *event) event 8175 kernel/events/core.c return __perf_event_account_interrupt(event, 1); event 8182 kernel/events/core.c static int __perf_event_overflow(struct perf_event *event, event 8186 kernel/events/core.c int events = atomic_read(&event->event_limit); event 8193 kernel/events/core.c if (unlikely(!is_sampling_event(event))) event 8196 kernel/events/core.c ret = __perf_event_account_interrupt(event, throttle); event 8203 kernel/events/core.c event->pending_kill = POLL_IN; event 8204 kernel/events/core.c if (events && atomic_dec_and_test(&event->event_limit)) { event 8206 kernel/events/core.c event->pending_kill = POLL_HUP; event 8208 kernel/events/core.c perf_event_disable_inatomic(event); event 8211 kernel/events/core.c READ_ONCE(event->overflow_handler)(event, data, regs); event 8213 kernel/events/core.c if (*perf_event_fasync(event) && event->pending_kill) { event 8214 kernel/events/core.c event->pending_wakeup = 1; event 8215 kernel/events/core.c irq_work_queue(&event->pending); event 8221 kernel/events/core.c int perf_event_overflow(struct perf_event *event, event 8225 kernel/events/core.c return __perf_event_overflow(event, 1, data, regs); event 8250 kernel/events/core.c u64 perf_swevent_set_period(struct perf_event *event) event 8252 kernel/events/core.c struct hw_perf_event *hwc = &event->hw; event 8273 kernel/events/core.c static void perf_swevent_overflow(struct perf_event *event, u64 overflow, event 8277 kernel/events/core.c struct hw_perf_event *hwc = &event->hw; event 8281 kernel/events/core.c overflow = perf_swevent_set_period(event); event 8287 kernel/events/core.c if (__perf_event_overflow(event, throttle, event 8299 kernel/events/core.c static void perf_swevent_event(struct perf_event *event, u64 nr, event 8303 kernel/events/core.c struct hw_perf_event *hwc = &event->hw; event 8305 kernel/events/core.c local64_add(nr, &event->count); event 8310 kernel/events/core.c if (!is_sampling_event(event)) event 8313 kernel/events/core.c if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) { event 8315 kernel/events/core.c return perf_swevent_overflow(event, 1, data, regs); event 8317 kernel/events/core.c data->period = event->hw.last_period; event 8319 kernel/events/core.c if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) event 8320 kernel/events/core.c return perf_swevent_overflow(event, 1, data, regs); event 8325 kernel/events/core.c perf_swevent_overflow(event, 0, data, regs); event 8328 kernel/events/core.c static int perf_exclude_event(struct perf_event *event, event 8331 kernel/events/core.c if (event->hw.state & PERF_HES_STOPPED) event 8335 kernel/events/core.c if (event->attr.exclude_user && user_mode(regs)) event 8338 kernel/events/core.c if (event->attr.exclude_kernel && !user_mode(regs)) event 8345 kernel/events/core.c static int perf_swevent_match(struct perf_event *event, event 8351 kernel/events/core.c if (event->attr.type != type) event 8354 kernel/events/core.c if (event->attr.config != event_id) event 8357 kernel/events/core.c if (perf_exclude_event(event, regs)) event 8393 kernel/events/core.c find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) event 8396 kernel/events/core.c u32 event_id = event->attr.config; event 8397 kernel/events/core.c u64 type = event->attr.type; event 8405 kernel/events/core.c lockdep_is_held(&event->ctx->lock)); event 8418 kernel/events/core.c struct perf_event *event; event 8426 kernel/events/core.c hlist_for_each_entry_rcu(event, head, hlist_entry) { event 8427 kernel/events/core.c if (perf_swevent_match(event, type, event_id, data, regs)) event 8428 kernel/events/core.c perf_swevent_event(event, nr, data, regs); event 8478 kernel/events/core.c static void perf_swevent_read(struct perf_event *event) event 8482 kernel/events/core.c static int perf_swevent_add(struct perf_event *event, int flags) event 8485 kernel/events/core.c struct hw_perf_event *hwc = &event->hw; event 8488 kernel/events/core.c if (is_sampling_event(event)) { event 8490 kernel/events/core.c perf_swevent_set_period(event); event 8495 kernel/events/core.c head = find_swevent_head(swhash, event); event 8499 kernel/events/core.c hlist_add_head_rcu(&event->hlist_entry, head); event 8500 kernel/events/core.c perf_event_update_userpage(event); event 8505 kernel/events/core.c static void perf_swevent_del(struct perf_event *event, int flags) event 8507 kernel/events/core.c hlist_del_rcu(&event->hlist_entry); event 8510 kernel/events/core.c static void perf_swevent_start(struct perf_event *event, int flags) event 8512 kernel/events/core.c event->hw.state = 0; event 8515 kernel/events/core.c static void perf_swevent_stop(struct perf_event *event, int flags) event 8517 kernel/events/core.c event->hw.state = PERF_HES_STOPPED; event 8609 kernel/events/core.c static void sw_perf_event_destroy(struct perf_event *event) event 8611 kernel/events/core.c u64 event_id = event->attr.config; event 8613 kernel/events/core.c WARN_ON(event->parent); event 8619 kernel/events/core.c static int perf_swevent_init(struct perf_event *event) event 8621 kernel/events/core.c u64 event_id = event->attr.config; event 8623 kernel/events/core.c if (event->attr.type != PERF_TYPE_SOFTWARE) event 8629 kernel/events/core.c if (has_branch_stack(event)) event 8644 kernel/events/core.c if (!event->parent) { event 8652 kernel/events/core.c event->destroy = sw_perf_event_destroy; event 8673 kernel/events/core.c static int perf_tp_filter_match(struct perf_event *event, event 8679 kernel/events/core.c if (event->parent) event 8680 kernel/events/core.c event = event->parent; event 8682 kernel/events/core.c if (likely(!event->filter) || filter_match_preds(event->filter, record)) event 8687 kernel/events/core.c static int perf_tp_event_match(struct perf_event *event, event 8691 kernel/events/core.c if (event->hw.state & PERF_HES_STOPPED) event 8696 kernel/events/core.c if (event->attr.exclude_kernel && !user_mode(regs)) event 8699 kernel/events/core.c if (!perf_tp_filter_match(event, data)) event 8717 kernel/events/core.c perf_tp_event(call->event.type, count, raw_data, size, regs, head, event 8727 kernel/events/core.c struct perf_event *event; event 8741 kernel/events/core.c hlist_for_each_entry_rcu(event, head, hlist_entry) { event 8742 kernel/events/core.c if (perf_tp_event_match(event, &data, regs)) event 8743 kernel/events/core.c perf_swevent_event(event, count, &data, regs); event 8759 kernel/events/core.c list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { event 8760 kernel/events/core.c if (event->cpu != smp_processor_id()) event 8762 kernel/events/core.c if (event->attr.type != PERF_TYPE_TRACEPOINT) event 8764 kernel/events/core.c if (event->attr.config != entry->type) event 8766 kernel/events/core.c if (perf_tp_event_match(event, &data, regs)) event 8767 kernel/events/core.c perf_swevent_event(event, count, &data, regs); event 8777 kernel/events/core.c static void tp_perf_event_destroy(struct perf_event *event) event 8779 kernel/events/core.c perf_trace_destroy(event); event 8782 kernel/events/core.c static int perf_tp_event_init(struct perf_event *event) event 8786 kernel/events/core.c if (event->attr.type != PERF_TYPE_TRACEPOINT) event 8792 kernel/events/core.c if (has_branch_stack(event)) event 8795 kernel/events/core.c err = perf_trace_init(event); event 8799 kernel/events/core.c event->destroy = tp_perf_event_destroy; event 8855 kernel/events/core.c static int perf_kprobe_event_init(struct perf_event *event); event 8867 kernel/events/core.c static int perf_kprobe_event_init(struct perf_event *event) event 8872 kernel/events/core.c if (event->attr.type != perf_kprobe.type) event 8881 kernel/events/core.c if (has_branch_stack(event)) event 8884 kernel/events/core.c is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE; event 8885 kernel/events/core.c err = perf_kprobe_init(event, is_retprobe); event 8889 kernel/events/core.c event->destroy = perf_kprobe_destroy; event 8914 kernel/events/core.c static int perf_uprobe_event_init(struct perf_event *event); event 8926 kernel/events/core.c static int perf_uprobe_event_init(struct perf_event *event) event 8932 kernel/events/core.c if (event->attr.type != perf_uprobe.type) event 8941 kernel/events/core.c if (has_branch_stack(event)) event 8944 kernel/events/core.c is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE; event 8945 kernel/events/core.c ref_ctr_offset = event->attr.config >> PERF_UPROBE_REF_CTR_OFFSET_SHIFT; event 8946 kernel/events/core.c err = perf_uprobe_init(event, ref_ctr_offset, is_retprobe); event 8950 kernel/events/core.c event->destroy = perf_uprobe_destroy; event 8967 kernel/events/core.c static void perf_event_free_filter(struct perf_event *event) event 8969 kernel/events/core.c ftrace_profile_free_filter(event); event 8973 kernel/events/core.c static void bpf_overflow_handler(struct perf_event *event, event 8979 kernel/events/core.c .event = event, event 8988 kernel/events/core.c ret = BPF_PROG_RUN(event->prog, &ctx); event 8996 kernel/events/core.c event->orig_overflow_handler(event, data, regs); event 8999 kernel/events/core.c static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd) event 9003 kernel/events/core.c if (event->overflow_handler_context) event 9007 kernel/events/core.c if (event->prog) event 9014 kernel/events/core.c event->prog = prog; event 9015 kernel/events/core.c event->orig_overflow_handler = READ_ONCE(event->overflow_handler); event 9016 kernel/events/core.c WRITE_ONCE(event->overflow_handler, bpf_overflow_handler); event 9020 kernel/events/core.c static void perf_event_free_bpf_handler(struct perf_event *event) event 9022 kernel/events/core.c struct bpf_prog *prog = event->prog; event 9027 kernel/events/core.c WRITE_ONCE(event->overflow_handler, event->orig_overflow_handler); event 9028 kernel/events/core.c event->prog = NULL; event 9032 kernel/events/core.c static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd) event 9036 kernel/events/core.c static void perf_event_free_bpf_handler(struct perf_event *event) event 9045 kernel/events/core.c static inline bool perf_event_is_tracing(struct perf_event *event) event 9047 kernel/events/core.c if (event->pmu == &perf_tracepoint) event 9050 kernel/events/core.c if (event->pmu == &perf_kprobe) event 9054 kernel/events/core.c if (event->pmu == &perf_uprobe) event 9060 kernel/events/core.c static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) event 9066 kernel/events/core.c if (!perf_event_is_tracing(event)) event 9067 kernel/events/core.c return perf_event_set_bpf_handler(event, prog_fd); event 9069 kernel/events/core.c is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_UKPROBE; event 9070 kernel/events/core.c is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT; event 9071 kernel/events/core.c is_syscall_tp = is_syscall_trace_event(event->tp_event); event 9090 kernel/events/core.c !(event->tp_event->flags & TRACE_EVENT_FL_KPROBE)) { event 9096 kernel/events/core.c int off = trace_event_get_offsets(event->tp_event); event 9104 kernel/events/core.c ret = perf_event_attach_bpf_prog(event, prog); event 9110 kernel/events/core.c static void perf_event_free_bpf_prog(struct perf_event *event) event 9112 kernel/events/core.c if (!perf_event_is_tracing(event)) { event 9113 kernel/events/core.c perf_event_free_bpf_handler(event); event 9116 kernel/events/core.c perf_event_detach_bpf_prog(event); event 9125 kernel/events/core.c static void perf_event_free_filter(struct perf_event *event) event 9129 kernel/events/core.c static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) event 9134 kernel/events/core.c static void perf_event_free_bpf_prog(struct perf_event *event) event 9156 kernel/events/core.c perf_addr_filter_new(struct perf_event *event, struct list_head *filters) event 9158 kernel/events/core.c int node = cpu_to_node(event->cpu == -1 ? 0 : event->cpu); event 9185 kernel/events/core.c static void perf_addr_filters_splice(struct perf_event *event, event 9191 kernel/events/core.c if (!has_addr_filter(event)) event 9195 kernel/events/core.c if (event->parent) event 9198 kernel/events/core.c raw_spin_lock_irqsave(&event->addr_filters.lock, flags); event 9200 kernel/events/core.c list_splice_init(&event->addr_filters.list, &list); event 9202 kernel/events/core.c list_splice(head, &event->addr_filters.list); event 9204 kernel/events/core.c raw_spin_unlock_irqrestore(&event->addr_filters.lock, flags); event 9233 kernel/events/core.c static void perf_event_addr_filters_apply(struct perf_event *event) event 9235 kernel/events/core.c struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); event 9236 kernel/events/core.c struct task_struct *task = READ_ONCE(event->ctx->task); event 9250 kernel/events/core.c mm = get_task_mm(event->ctx->task); event 9264 kernel/events/core.c event->addr_filter_ranges[count].start = 0; event 9265 kernel/events/core.c event->addr_filter_ranges[count].size = 0; event 9267 kernel/events/core.c perf_addr_filter_apply(filter, mm, &event->addr_filter_ranges[count]); event 9269 kernel/events/core.c event->addr_filter_ranges[count].start = filter->offset; event 9270 kernel/events/core.c event->addr_filter_ranges[count].size = filter->size; event 9276 kernel/events/core.c event->addr_filters_gen++; event 9286 kernel/events/core.c perf_event_stop(event, 1); event 9340 kernel/events/core.c perf_event_parse_addr_filter(struct perf_event *event, char *fstr, event 9367 kernel/events/core.c filter = perf_addr_filter_new(event, filters); event 9430 kernel/events/core.c if (kernel && event->attr.exclude_kernel) event 9454 kernel/events/core.c if (!event->ctx->task) event 9472 kernel/events/core.c event->addr_filters.nr_file_filters++; event 9498 kernel/events/core.c perf_event_set_addr_filter(struct perf_event *event, char *filter_str) event 9507 kernel/events/core.c lockdep_assert_held(&event->ctx->mutex); event 9509 kernel/events/core.c if (WARN_ON_ONCE(event->parent)) event 9512 kernel/events/core.c ret = perf_event_parse_addr_filter(event, filter_str, &filters); event 9516 kernel/events/core.c ret = event->pmu->addr_filters_validate(&filters); event 9521 kernel/events/core.c perf_addr_filters_splice(event, &filters); event 9524 kernel/events/core.c perf_event_for_each_child(event, perf_event_addr_filters_apply); event 9532 kernel/events/core.c event->addr_filters.nr_file_filters = 0; event 9537 kernel/events/core.c static int perf_event_set_filter(struct perf_event *event, void __user *arg) event 9547 kernel/events/core.c if (perf_event_is_tracing(event)) { event 9548 kernel/events/core.c struct perf_event_context *ctx = event->ctx; event 9562 kernel/events/core.c ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); event 9566 kernel/events/core.c if (has_addr_filter(event)) event 9567 kernel/events/core.c ret = perf_event_set_addr_filter(event, filter_str); event 9582 kernel/events/core.c struct perf_event *event; event 9585 kernel/events/core.c event = container_of(hrtimer, struct perf_event, hw.hrtimer); event 9587 kernel/events/core.c if (event->state != PERF_EVENT_STATE_ACTIVE) event 9590 kernel/events/core.c event->pmu->read(event); event 9592 kernel/events/core.c perf_sample_data_init(&data, 0, event->hw.last_period); event 9595 kernel/events/core.c if (regs && !perf_exclude_event(event, regs)) { event 9596 kernel/events/core.c if (!(event->attr.exclude_idle && is_idle_task(current))) event 9597 kernel/events/core.c if (__perf_event_overflow(event, 1, &data, regs)) event 9601 kernel/events/core.c period = max_t(u64, 10000, event->hw.sample_period); event 9607 kernel/events/core.c static void perf_swevent_start_hrtimer(struct perf_event *event) event 9609 kernel/events/core.c struct hw_perf_event *hwc = &event->hw; event 9612 kernel/events/core.c if (!is_sampling_event(event)) event 9628 kernel/events/core.c static void perf_swevent_cancel_hrtimer(struct perf_event *event) event 9630 kernel/events/core.c struct hw_perf_event *hwc = &event->hw; event 9632 kernel/events/core.c if (is_sampling_event(event)) { event 9640 kernel/events/core.c static void perf_swevent_init_hrtimer(struct perf_event *event) event 9642 kernel/events/core.c struct hw_perf_event *hwc = &event->hw; event 9644 kernel/events/core.c if (!is_sampling_event(event)) event 9654 kernel/events/core.c if (event->attr.freq) { event 9655 kernel/events/core.c long freq = event->attr.sample_freq; event 9657 kernel/events/core.c event->attr.sample_period = NSEC_PER_SEC / freq; event 9658 kernel/events/core.c hwc->sample_period = event->attr.sample_period; event 9661 kernel/events/core.c event->attr.freq = 0; event 9669 kernel/events/core.c static void cpu_clock_event_update(struct perf_event *event) event 9675 kernel/events/core.c prev = local64_xchg(&event->hw.prev_count, now); event 9676 kernel/events/core.c local64_add(now - prev, &event->count); event 9679 kernel/events/core.c static void cpu_clock_event_start(struct perf_event *event, int flags) event 9681 kernel/events/core.c local64_set(&event->hw.prev_count, local_clock()); event 9682 kernel/events/core.c perf_swevent_start_hrtimer(event); event 9685 kernel/events/core.c static void cpu_clock_event_stop(struct perf_event *event, int flags) event 9687 kernel/events/core.c perf_swevent_cancel_hrtimer(event); event 9688 kernel/events/core.c cpu_clock_event_update(event); event 9691 kernel/events/core.c static int cpu_clock_event_add(struct perf_event *event, int flags) event 9694 kernel/events/core.c cpu_clock_event_start(event, flags); event 9695 kernel/events/core.c perf_event_update_userpage(event); event 9700 kernel/events/core.c static void cpu_clock_event_del(struct perf_event *event, int flags) event 9702 kernel/events/core.c cpu_clock_event_stop(event, flags); event 9705 kernel/events/core.c static void cpu_clock_event_read(struct perf_event *event) event 9707 kernel/events/core.c cpu_clock_event_update(event); event 9710 kernel/events/core.c static int cpu_clock_event_init(struct perf_event *event) event 9712 kernel/events/core.c if (event->attr.type != PERF_TYPE_SOFTWARE) event 9715 kernel/events/core.c if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) event 9721 kernel/events/core.c if (has_branch_stack(event)) event 9724 kernel/events/core.c perf_swevent_init_hrtimer(event); event 9746 kernel/events/core.c static void task_clock_event_update(struct perf_event *event, u64 now) event 9751 kernel/events/core.c prev = local64_xchg(&event->hw.prev_count, now); event 9753 kernel/events/core.c local64_add(delta, &event->count); event 9756 kernel/events/core.c static void task_clock_event_start(struct perf_event *event, int flags) event 9758 kernel/events/core.c local64_set(&event->hw.prev_count, event->ctx->time); event 9759 kernel/events/core.c perf_swevent_start_hrtimer(event); event 9762 kernel/events/core.c static void task_clock_event_stop(struct perf_event *event, int flags) event 9764 kernel/events/core.c perf_swevent_cancel_hrtimer(event); event 9765 kernel/events/core.c task_clock_event_update(event, event->ctx->time); event 9768 kernel/events/core.c static int task_clock_event_add(struct perf_event *event, int flags) event 9771 kernel/events/core.c task_clock_event_start(event, flags); event 9772 kernel/events/core.c perf_event_update_userpage(event); event 9777 kernel/events/core.c static void task_clock_event_del(struct perf_event *event, int flags) event 9779 kernel/events/core.c task_clock_event_stop(event, PERF_EF_UPDATE); event 9782 kernel/events/core.c static void task_clock_event_read(struct perf_event *event) event 9785 kernel/events/core.c u64 delta = now - event->ctx->timestamp; event 9786 kernel/events/core.c u64 time = event->ctx->time + delta; event 9788 kernel/events/core.c task_clock_event_update(event, time); event 9791 kernel/events/core.c static int task_clock_event_init(struct perf_event *event) event 9793 kernel/events/core.c if (event->attr.type != PERF_TYPE_SOFTWARE) event 9796 kernel/events/core.c if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) event 9802 kernel/events/core.c if (has_branch_stack(event)) event 9805 kernel/events/core.c perf_swevent_init_hrtimer(event); event 9836 kernel/events/core.c static int perf_event_nop_int(struct perf_event *event, u64 value) event 9878 kernel/events/core.c static int perf_event_idx_default(struct perf_event *event) event 10201 kernel/events/core.c static inline bool has_extended_regs(struct perf_event *event) event 10203 kernel/events/core.c return (event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK) || event 10204 kernel/events/core.c (event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK); event 10207 kernel/events/core.c static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) event 10221 kernel/events/core.c if (event->group_leader != event && pmu->task_ctx_nr != perf_sw_context) { event 10226 kernel/events/core.c ctx = perf_event_ctx_lock_nested(event->group_leader, event 10231 kernel/events/core.c event->pmu = pmu; event 10232 kernel/events/core.c ret = pmu->event_init(event); event 10235 kernel/events/core.c perf_event_ctx_unlock(event->group_leader, ctx); event 10239 kernel/events/core.c has_extended_regs(event)) event 10243 kernel/events/core.c event_has_any_exclude_flag(event)) event 10246 kernel/events/core.c if (ret && event->destroy) event 10247 kernel/events/core.c event->destroy(event); event 10256 kernel/events/core.c static struct pmu *perf_init_event(struct perf_event *event) event 10265 kernel/events/core.c if (event->parent && event->parent->pmu) { event 10266 kernel/events/core.c pmu = event->parent->pmu; event 10267 kernel/events/core.c ret = perf_try_init_event(pmu, event); event 10273 kernel/events/core.c pmu = idr_find(&pmu_idr, event->attr.type); event 10276 kernel/events/core.c ret = perf_try_init_event(pmu, event); event 10283 kernel/events/core.c ret = perf_try_init_event(pmu, event); event 10299 kernel/events/core.c static void attach_sb_event(struct perf_event *event) event 10301 kernel/events/core.c struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu); event 10304 kernel/events/core.c list_add_rcu(&event->sb_list, &pel->list); event 10315 kernel/events/core.c static void account_pmu_sb_event(struct perf_event *event) event 10317 kernel/events/core.c if (is_sb_event(event)) event 10318 kernel/events/core.c attach_sb_event(event); event 10321 kernel/events/core.c static void account_event_cpu(struct perf_event *event, int cpu) event 10323 kernel/events/core.c if (event->parent) event 10326 kernel/events/core.c if (is_cgroup_event(event)) event 10351 kernel/events/core.c static void account_event(struct perf_event *event) event 10355 kernel/events/core.c if (event->parent) event 10358 kernel/events/core.c if (event->attach_state & PERF_ATTACH_TASK) event 10360 kernel/events/core.c if (event->attr.mmap || event->attr.mmap_data) event 10362 kernel/events/core.c if (event->attr.comm) event 10364 kernel/events/core.c if (event->attr.namespaces) event 10366 kernel/events/core.c if (event->attr.task) event 10368 kernel/events/core.c if (event->attr.freq) event 10370 kernel/events/core.c if (event->attr.context_switch) { event 10374 kernel/events/core.c if (has_branch_stack(event)) event 10376 kernel/events/core.c if (is_cgroup_event(event)) event 10378 kernel/events/core.c if (event->attr.ksymbol) event 10380 kernel/events/core.c if (event->attr.bpf_event) event 10411 kernel/events/core.c account_event_cpu(event, event->cpu); event 10413 kernel/events/core.c account_pmu_sb_event(event); event 10428 kernel/events/core.c struct perf_event *event; event 10437 kernel/events/core.c event = kzalloc(sizeof(*event), GFP_KERNEL); event 10438 kernel/events/core.c if (!event) event 10446 kernel/events/core.c group_leader = event; event 10448 kernel/events/core.c mutex_init(&event->child_mutex); event 10449 kernel/events/core.c INIT_LIST_HEAD(&event->child_list); event 10451 kernel/events/core.c INIT_LIST_HEAD(&event->event_entry); event 10452 kernel/events/core.c INIT_LIST_HEAD(&event->sibling_list); event 10453 kernel/events/core.c INIT_LIST_HEAD(&event->active_list); event 10454 kernel/events/core.c init_event_group(event); event 10455 kernel/events/core.c INIT_LIST_HEAD(&event->rb_entry); event 10456 kernel/events/core.c INIT_LIST_HEAD(&event->active_entry); event 10457 kernel/events/core.c INIT_LIST_HEAD(&event->addr_filters.list); event 10458 kernel/events/core.c INIT_HLIST_NODE(&event->hlist_entry); event 10461 kernel/events/core.c init_waitqueue_head(&event->waitq); event 10462 kernel/events/core.c event->pending_disable = -1; event 10463 kernel/events/core.c init_irq_work(&event->pending, perf_pending_event); event 10465 kernel/events/core.c mutex_init(&event->mmap_mutex); event 10466 kernel/events/core.c raw_spin_lock_init(&event->addr_filters.lock); event 10468 kernel/events/core.c atomic_long_set(&event->refcount, 1); event 10469 kernel/events/core.c event->cpu = cpu; event 10470 kernel/events/core.c event->attr = *attr; event 10471 kernel/events/core.c event->group_leader = group_leader; event 10472 kernel/events/core.c event->pmu = NULL; event 10473 kernel/events/core.c event->oncpu = -1; event 10475 kernel/events/core.c event->parent = parent_event; event 10477 kernel/events/core.c event->ns = get_pid_ns(task_active_pid_ns(current)); event 10478 kernel/events/core.c event->id = atomic64_inc_return(&perf_event_id); event 10480 kernel/events/core.c event->state = PERF_EVENT_STATE_INACTIVE; event 10483 kernel/events/core.c event->attach_state = PERF_ATTACH_TASK; event 10489 kernel/events/core.c event->hw.target = get_task_struct(task); event 10492 kernel/events/core.c event->clock = &local_clock; event 10494 kernel/events/core.c event->clock = parent_event->clock; event 10507 kernel/events/core.c event->prog = prog; event 10508 kernel/events/core.c event->orig_overflow_handler = event 10515 kernel/events/core.c event->overflow_handler = overflow_handler; event 10516 kernel/events/core.c event->overflow_handler_context = context; event 10517 kernel/events/core.c } else if (is_write_backward(event)){ event 10518 kernel/events/core.c event->overflow_handler = perf_event_output_backward; event 10519 kernel/events/core.c event->overflow_handler_context = NULL; event 10521 kernel/events/core.c event->overflow_handler = perf_event_output_forward; event 10522 kernel/events/core.c event->overflow_handler_context = NULL; event 10525 kernel/events/core.c perf_event__state_init(event); event 10529 kernel/events/core.c hwc = &event->hw; event 10544 kernel/events/core.c if (!has_branch_stack(event)) event 10545 kernel/events/core.c event->attr.branch_sample_type = 0; event 10548 kernel/events/core.c err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader); event 10553 kernel/events/core.c pmu = perf_init_event(event); event 10568 kernel/events/core.c if (event->attr.aux_output && event 10574 kernel/events/core.c err = exclusive_event_init(event); event 10578 kernel/events/core.c if (has_addr_filter(event)) { event 10579 kernel/events/core.c event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters, event 10582 kernel/events/core.c if (!event->addr_filter_ranges) { event 10591 kernel/events/core.c if (event->parent) { event 10592 kernel/events/core.c struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); event 10595 kernel/events/core.c memcpy(event->addr_filter_ranges, event 10596 kernel/events/core.c event->parent->addr_filter_ranges, event 10602 kernel/events/core.c event->addr_filters_gen = 1; event 10605 kernel/events/core.c if (!event->parent) { event 10606 kernel/events/core.c if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { event 10614 kernel/events/core.c account_event(event); event 10616 kernel/events/core.c return event; event 10619 kernel/events/core.c kfree(event->addr_filter_ranges); event 10622 kernel/events/core.c exclusive_event_destroy(event); event 10625 kernel/events/core.c if (event->destroy) event 10626 kernel/events/core.c event->destroy(event); event 10629 kernel/events/core.c if (is_cgroup_event(event)) event 10630 kernel/events/core.c perf_detach_cgroup(event); event 10631 kernel/events/core.c if (event->ns) event 10632 kernel/events/core.c put_pid_ns(event->ns); event 10633 kernel/events/core.c if (event->hw.target) event 10634 kernel/events/core.c put_task_struct(event->hw.target); event 10635 kernel/events/core.c kfree(event); event 10747 kernel/events/core.c perf_event_set_output(struct perf_event *event, struct perf_event *output_event) event 10756 kernel/events/core.c if (event == output_event) event 10762 kernel/events/core.c if (output_event->cpu != event->cpu) event 10768 kernel/events/core.c if (output_event->cpu == -1 && output_event->ctx != event->ctx) event 10774 kernel/events/core.c if (output_event->clock != event->clock) event 10781 kernel/events/core.c if (is_write_backward(output_event) != is_write_backward(event)) event 10787 kernel/events/core.c if (has_aux(event) && has_aux(output_event) && event 10788 kernel/events/core.c event->pmu != output_event->pmu) event 10792 kernel/events/core.c mutex_lock(&event->mmap_mutex); event 10794 kernel/events/core.c if (atomic_read(&event->mmap_count)) event 10804 kernel/events/core.c ring_buffer_attach(event, rb); event 10808 kernel/events/core.c mutex_unlock(&event->mmap_mutex); event 10823 kernel/events/core.c static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id) event 10829 kernel/events/core.c event->clock = &ktime_get_mono_fast_ns; event 10834 kernel/events/core.c event->clock = &ktime_get_raw_fast_ns; event 10839 kernel/events/core.c event->clock = &ktime_get_real_ns; event 10843 kernel/events/core.c event->clock = &ktime_get_boottime_ns; event 10847 kernel/events/core.c event->clock = &ktime_get_clocktai_ns; event 10854 kernel/events/core.c if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI)) event 10904 kernel/events/core.c struct perf_event *event, *sibling; event 11017 kernel/events/core.c event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, event 11019 kernel/events/core.c if (IS_ERR(event)) { event 11020 kernel/events/core.c err = PTR_ERR(event); event 11024 kernel/events/core.c if (is_sampling_event(event)) { event 11025 kernel/events/core.c if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) { event 11035 kernel/events/core.c pmu = event->pmu; event 11038 kernel/events/core.c err = perf_event_set_clock(event, attr.clockid); event 11044 kernel/events/core.c event->event_caps |= PERF_EV_CAP_SOFTWARE; event 11047 kernel/events/core.c if (is_software_event(event) && event 11058 kernel/events/core.c } else if (!is_software_event(event) && event 11073 kernel/events/core.c ctx = find_get_context(pmu, task, event); event 11093 kernel/events/core.c if (group_leader->clock != event->clock) event 11101 kernel/events/core.c if (group_leader->cpu != event->cpu) event 11127 kernel/events/core.c err = perf_event_set_output(event, output_event); event 11132 kernel/events/core.c event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, event 11187 kernel/events/core.c if (!perf_event_validate_size(event)) { event 11208 kernel/events/core.c if (event->attr.aux_output && !perf_get_aux_event(event, group_leader)) { event 11217 kernel/events/core.c if (!exclusive_event_installable(event, ctx)) { event 11280 kernel/events/core.c perf_event__header_size(event); event 11281 kernel/events/core.c perf_event__id_header_size(event); event 11283 kernel/events/core.c event->owner = current; event 11285 kernel/events/core.c perf_install_in_context(ctx, event, event->cpu); event 11298 kernel/events/core.c list_add_tail(&event->owner_entry, ¤t->perf_event_list); event 11326 kernel/events/core.c free_event(event); event 11354 kernel/events/core.c struct perf_event *event; event 11364 kernel/events/core.c event = perf_event_alloc(attr, cpu, task, NULL, NULL, event 11366 kernel/events/core.c if (IS_ERR(event)) { event 11367 kernel/events/core.c err = PTR_ERR(event); event 11372 kernel/events/core.c event->owner = TASK_TOMBSTONE; event 11377 kernel/events/core.c ctx = find_get_context(event->pmu, task, event); event 11405 kernel/events/core.c if (!exclusive_event_installable(event, ctx)) { event 11410 kernel/events/core.c perf_install_in_context(ctx, event, event->cpu); event 11414 kernel/events/core.c return event; event 11421 kernel/events/core.c free_event(event); event 11431 kernel/events/core.c struct perf_event *event, *tmp; event 11442 kernel/events/core.c list_for_each_entry_safe(event, tmp, &src_ctx->event_list, event 11444 kernel/events/core.c perf_remove_from_context(event, 0); event 11445 kernel/events/core.c unaccount_event_cpu(event, src_cpu); event 11447 kernel/events/core.c list_add(&event->migrate_entry, &events); event 11463 kernel/events/core.c list_for_each_entry_safe(event, tmp, &events, migrate_entry) { event 11464 kernel/events/core.c if (event->group_leader == event) event 11467 kernel/events/core.c list_del(&event->migrate_entry); event 11468 kernel/events/core.c if (event->state >= PERF_EVENT_STATE_OFF) event 11469 kernel/events/core.c event->state = PERF_EVENT_STATE_INACTIVE; event 11470 kernel/events/core.c account_event_cpu(event, dst_cpu); event 11471 kernel/events/core.c perf_install_in_context(dst_ctx, event, dst_cpu); event 11479 kernel/events/core.c list_for_each_entry_safe(event, tmp, &events, migrate_entry) { event 11480 kernel/events/core.c list_del(&event->migrate_entry); event 11481 kernel/events/core.c if (event->state >= PERF_EVENT_STATE_OFF) event 11482 kernel/events/core.c event->state = PERF_EVENT_STATE_INACTIVE; event 11483 kernel/events/core.c account_event_cpu(event, dst_cpu); event 11484 kernel/events/core.c perf_install_in_context(dst_ctx, event, dst_cpu); event 11639 kernel/events/core.c struct perf_event *event, *tmp; event 11643 kernel/events/core.c list_for_each_entry_safe(event, tmp, &child->perf_event_list, event 11645 kernel/events/core.c list_del_init(&event->owner_entry); event 11652 kernel/events/core.c smp_store_release(&event->owner, NULL); event 11668 kernel/events/core.c static void perf_free_event(struct perf_event *event, event 11671 kernel/events/core.c struct perf_event *parent = event->parent; event 11677 kernel/events/core.c list_del_init(&event->child_list); event 11683 kernel/events/core.c perf_group_detach(event); event 11684 kernel/events/core.c list_del_event(event, ctx); event 11686 kernel/events/core.c free_event(event); event 11699 kernel/events/core.c struct perf_event *event, *tmp; event 11720 kernel/events/core.c list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) event 11721 kernel/events/core.c perf_free_event(event, ctx); event 11774 kernel/events/core.c const struct perf_event_attr *perf_event_attrs(struct perf_event *event) event 11776 kernel/events/core.c if (!event) event 11779 kernel/events/core.c return &event->attr; event 11950 kernel/events/core.c inherit_task_group(struct perf_event *event, struct task_struct *parent, event 11958 kernel/events/core.c if (!event->attr.inherit) { event 11978 kernel/events/core.c ret = inherit_group(event, parent, parent_ctx, event 11994 kernel/events/core.c struct perf_event *event; event 12028 kernel/events/core.c perf_event_groups_for_each(event, &parent_ctx->pinned_groups) { event 12029 kernel/events/core.c ret = inherit_task_group(event, parent, parent_ctx, event 12044 kernel/events/core.c perf_event_groups_for_each(event, &parent_ctx->flexible_groups) { event 12045 kernel/events/core.c ret = inherit_task_group(event, parent, parent_ctx, event 12149 kernel/events/core.c struct perf_event *event; event 12153 kernel/events/core.c list_for_each_entry(event, &ctx->event_list, event_entry) event 12154 kernel/events/core.c __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP); event 595 kernel/events/hw_breakpoint.c static void bp_perf_event_destroy(struct perf_event *event) event 597 kernel/events/hw_breakpoint.c release_bp_slot(event); event 80 kernel/events/internal.h extern void perf_event_wakeup(struct perf_event *event); event 81 kernel/events/internal.h extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event, event 84 kernel/events/internal.h extern struct ring_buffer *ring_buffer_get(struct perf_event *event); event 92 kernel/events/internal.h void perf_event_aux_event(struct perf_event *event, unsigned long head, event 24 kernel/events/ring_buffer.c handle->event->pending_wakeup = 1; event 25 kernel/events/ring_buffer.c irq_work_queue(&handle->event->pending); event 150 kernel/events/ring_buffer.c struct perf_event *event, unsigned int size, event 166 kernel/events/ring_buffer.c if (event->parent) event 167 kernel/events/ring_buffer.c event = event->parent; event 169 kernel/events/ring_buffer.c rb = rcu_dereference(event->rb); event 180 kernel/events/ring_buffer.c handle->event = event; event 185 kernel/events/ring_buffer.c if (event->attr.sample_id_all) event 186 kernel/events/ring_buffer.c size += event->id_header_size; event 245 kernel/events/ring_buffer.c lost_event.id = event->id; event 249 kernel/events/ring_buffer.c &sample_data, event); event 251 kernel/events/ring_buffer.c perf_event__output_id_sample(event, handle, &sample_data); event 266 kernel/events/ring_buffer.c struct perf_event *event, unsigned int size) event 268 kernel/events/ring_buffer.c return __perf_output_begin(handle, event, size, false); event 272 kernel/events/ring_buffer.c struct perf_event *event, unsigned int size) event 274 kernel/events/ring_buffer.c return __perf_output_begin(handle, event, size, true); event 278 kernel/events/ring_buffer.c struct perf_event *event, unsigned int size) event 281 kernel/events/ring_buffer.c return __perf_output_begin(handle, event, size, event 282 kernel/events/ring_buffer.c unlikely(is_write_backward(event))); event 360 kernel/events/ring_buffer.c struct perf_event *event) event 362 kernel/events/ring_buffer.c struct perf_event *output_event = event; event 409 kernel/events/ring_buffer.c handle->event = event; event 431 kernel/events/ring_buffer.c event->pending_disable = smp_processor_id(); event 446 kernel/events/ring_buffer.c handle->event = NULL; event 507 kernel/events/ring_buffer.c perf_event_aux_event(handle->event, aux_head, size, event 516 kernel/events/ring_buffer.c handle->event->pending_disable = smp_processor_id(); event 520 kernel/events/ring_buffer.c handle->event = NULL; event 558 kernel/events/ring_buffer.c if (!handle->event) event 629 kernel/events/ring_buffer.c int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event, event 633 kernel/events/ring_buffer.c int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu); event 636 kernel/events/ring_buffer.c if (!has_aux(event)) event 661 kernel/events/ring_buffer.c rb->free_aux = event->pmu->free_aux; event 682 kernel/events/ring_buffer.c if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) && event 690 kernel/events/ring_buffer.c rb->aux_priv = event->pmu->setup_aux(event, rb->aux_pages, nr_pages, event 54 kernel/gcov/base.c static int gcov_module_notifier(struct notifier_block *nb, unsigned long event, event 61 kernel/gcov/base.c if (event != MODULE_STATE_GOING) event 77 kernel/hung_task.c hung_task_panic(struct notifier_block *this, unsigned long event, void *ptr) event 37 kernel/locking/lock_events.h static inline void __lockevent_inc(enum lock_events event, bool cond) event 40 kernel/locking/lock_events.h raw_cpu_inc(lockevents[event]); event 46 kernel/locking/lock_events.h static inline void __lockevent_add(enum lock_events event, int inc) event 48 kernel/locking/lock_events.h raw_cpu_add(lockevents[event], inc); event 538 kernel/sched/psi.c if (cmpxchg(&t->event, 0, 1) == 0) event 1046 kernel/sched/psi.c t->event = 0; event 1183 kernel/sched/psi.c if (cmpxchg(&t->event, 1, 0) == 1) event 405 kernel/time/posix-timers.c static struct pid *good_sigevent(sigevent_t * event) event 410 kernel/time/posix-timers.c switch (event->sigev_notify) { event 412 kernel/time/posix-timers.c pid = find_vpid(event->sigev_notify_thread_id); event 419 kernel/time/posix-timers.c if (event->sigev_signo <= 0 || event->sigev_signo > SIGRTMAX) event 472 kernel/time/posix-timers.c static int do_timer_create(clockid_t which_clock, struct sigevent *event, event 502 kernel/time/posix-timers.c if (event) { event 504 kernel/time/posix-timers.c new_timer->it_pid = get_pid(good_sigevent(event)); event 510 kernel/time/posix-timers.c new_timer->it_sigev_notify = event->sigev_notify; event 511 kernel/time/posix-timers.c new_timer->sigq->info.si_signo = event->sigev_signo; event 512 kernel/time/posix-timers.c new_timer->sigq->info.si_value = event->sigev_value; event 556 kernel/time/posix-timers.c sigevent_t event; event 558 kernel/time/posix-timers.c if (copy_from_user(&event, timer_event_spec, sizeof (event))) event 560 kernel/time/posix-timers.c return do_timer_create(which_clock, &event, created_timer_id); event 571 kernel/time/posix-timers.c sigevent_t event; event 573 kernel/time/posix-timers.c if (get_compat_sigevent(&event, timer_event_spec)) event 575 kernel/time/posix-timers.c return do_timer_create(which_clock, &event, created_timer_id); event 71 kernel/trace/blktrace.c struct ring_buffer_event *event = NULL; event 81 kernel/trace/blktrace.c event = trace_buffer_lock_reserve(buffer, TRACE_BLK, event 84 kernel/trace/blktrace.c if (!event) event 86 kernel/trace/blktrace.c t = ring_buffer_event_data(event); event 108 kernel/trace/blktrace.c trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc); event 218 kernel/trace/blktrace.c struct ring_buffer_event *event = NULL; event 254 kernel/trace/blktrace.c event = trace_buffer_lock_reserve(buffer, TRACE_BLK, event 257 kernel/trace/blktrace.c if (!event) event 259 kernel/trace/blktrace.c t = ring_buffer_event_data(event); event 302 kernel/trace/blktrace.c trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc); event 1538 kernel/trace/blktrace.c int flags, struct trace_event *event) event 1560 kernel/trace/blktrace.c struct trace_event *event) event 366 kernel/trace/bpf_trace.c return perf_event_read_local(ee->event, value, enabled, running); event 427 kernel/trace/bpf_trace.c struct perf_event *event; event 438 kernel/trace/bpf_trace.c event = ee->event; event 439 kernel/trace/bpf_trace.c if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE || event 440 kernel/trace/bpf_trace.c event->attr.config != PERF_COUNT_SW_BPF_OUTPUT)) event 443 kernel/trace/bpf_trace.c if (unlikely(event->oncpu != cpu)) event 446 kernel/trace/bpf_trace.c return perf_event_output(event, sd, regs); event 902 kernel/trace/bpf_trace.c err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled, event 1194 kernel/trace/bpf_trace.c int perf_event_attach_bpf_prog(struct perf_event *event, event 1206 kernel/trace/bpf_trace.c (!trace_kprobe_on_func_entry(event->tp_event) || event 1207 kernel/trace/bpf_trace.c !trace_kprobe_error_injectable(event->tp_event))) event 1212 kernel/trace/bpf_trace.c if (event->prog) event 1215 kernel/trace/bpf_trace.c old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); event 1227 kernel/trace/bpf_trace.c event->prog = prog; event 1228 kernel/trace/bpf_trace.c rcu_assign_pointer(event->tp_event->prog_array, new_array); event 1236 kernel/trace/bpf_trace.c void perf_event_detach_bpf_prog(struct perf_event *event) event 1244 kernel/trace/bpf_trace.c if (!event->prog) event 1247 kernel/trace/bpf_trace.c old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); event 1248 kernel/trace/bpf_trace.c ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array); event 1252 kernel/trace/bpf_trace.c bpf_prog_array_delete_safe(old_array, event->prog); event 1254 kernel/trace/bpf_trace.c rcu_assign_pointer(event->tp_event->prog_array, new_array); event 1258 kernel/trace/bpf_trace.c bpf_prog_put(event->prog); event 1259 kernel/trace/bpf_trace.c event->prog = NULL; event 1265 kernel/trace/bpf_trace.c int perf_event_query_prog_array(struct perf_event *event, void __user *info) event 1275 kernel/trace/bpf_trace.c if (event->attr.type != PERF_TYPE_TRACEPOINT) event 1294 kernel/trace/bpf_trace.c progs = bpf_event_rcu_dereference(event->tp_event->prog_array); event 1411 kernel/trace/bpf_trace.c int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, event 1419 kernel/trace/bpf_trace.c prog = event->prog; event 1428 kernel/trace/bpf_trace.c flags = event->tp_event->flags; event 1430 kernel/trace/bpf_trace.c is_syscall_tp = is_syscall_trace_event(event->tp_event); event 1433 kernel/trace/bpf_trace.c *buf = is_tracepoint ? event->tp_event->tp->name event 1434 kernel/trace/bpf_trace.c : event->tp_event->name; event 1443 kernel/trace/bpf_trace.c err = bpf_get_kprobe_info(event, fd_type, buf, event 1445 kernel/trace/bpf_trace.c event->attr.type == PERF_TYPE_TRACEPOINT); event 1449 kernel/trace/bpf_trace.c err = bpf_get_uprobe_info(event, fd_type, buf, event 1451 kernel/trace/bpf_trace.c event->attr.type == PERF_TYPE_TRACEPOINT); event 142 kernel/trace/ring_buffer.c #define skip_time_extend(event) \ event 143 kernel/trace/ring_buffer.c ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND)) event 145 kernel/trace/ring_buffer.c #define extended_time(event) \ event 146 kernel/trace/ring_buffer.c (event->type_len >= RINGBUF_TYPE_TIME_EXTEND) event 148 kernel/trace/ring_buffer.c static inline int rb_null_event(struct ring_buffer_event *event) event 150 kernel/trace/ring_buffer.c return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta; event 153 kernel/trace/ring_buffer.c static void rb_event_set_padding(struct ring_buffer_event *event) event 156 kernel/trace/ring_buffer.c event->type_len = RINGBUF_TYPE_PADDING; event 157 kernel/trace/ring_buffer.c event->time_delta = 0; event 161 kernel/trace/ring_buffer.c rb_event_data_length(struct ring_buffer_event *event) event 165 kernel/trace/ring_buffer.c if (event->type_len) event 166 kernel/trace/ring_buffer.c length = event->type_len * RB_ALIGNMENT; event 168 kernel/trace/ring_buffer.c length = event->array[0]; event 178 kernel/trace/ring_buffer.c rb_event_length(struct ring_buffer_event *event) event 180 kernel/trace/ring_buffer.c switch (event->type_len) { event 182 kernel/trace/ring_buffer.c if (rb_null_event(event)) event 185 kernel/trace/ring_buffer.c return event->array[0] + RB_EVNT_HDR_SIZE; event 194 kernel/trace/ring_buffer.c return rb_event_data_length(event); event 207 kernel/trace/ring_buffer.c rb_event_ts_length(struct ring_buffer_event *event) event 211 kernel/trace/ring_buffer.c if (extended_time(event)) { event 214 kernel/trace/ring_buffer.c event = skip_time_extend(event); event 216 kernel/trace/ring_buffer.c return len + rb_event_length(event); event 229 kernel/trace/ring_buffer.c unsigned ring_buffer_event_length(struct ring_buffer_event *event) event 233 kernel/trace/ring_buffer.c if (extended_time(event)) event 234 kernel/trace/ring_buffer.c event = skip_time_extend(event); event 236 kernel/trace/ring_buffer.c length = rb_event_length(event); event 237 kernel/trace/ring_buffer.c if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX) event 240 kernel/trace/ring_buffer.c if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) event 241 kernel/trace/ring_buffer.c length -= sizeof(event->array[0]); event 248 kernel/trace/ring_buffer.c rb_event_data(struct ring_buffer_event *event) event 250 kernel/trace/ring_buffer.c if (extended_time(event)) event 251 kernel/trace/ring_buffer.c event = skip_time_extend(event); event 252 kernel/trace/ring_buffer.c BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX); event 254 kernel/trace/ring_buffer.c if (event->type_len) event 255 kernel/trace/ring_buffer.c return (void *)&event->array[0]; event 257 kernel/trace/ring_buffer.c return (void *)&event->array[1]; event 264 kernel/trace/ring_buffer.c void *ring_buffer_event_data(struct ring_buffer_event *event) event 266 kernel/trace/ring_buffer.c return rb_event_data(event); event 287 kernel/trace/ring_buffer.c u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event) event 291 kernel/trace/ring_buffer.c ts = event->array[0]; event 293 kernel/trace/ring_buffer.c ts += event->time_delta; event 1941 kernel/trace/ring_buffer.c rb_event_index(struct ring_buffer_event *event) event 1943 kernel/trace/ring_buffer.c unsigned long addr = (unsigned long)event; event 2133 kernel/trace/ring_buffer.c struct ring_buffer_event *event; event 2153 kernel/trace/ring_buffer.c event = __rb_page_index(tail_page, tail); event 2180 kernel/trace/ring_buffer.c rb_event_set_padding(event); event 2188 kernel/trace/ring_buffer.c event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE; event 2189 kernel/trace/ring_buffer.c event->type_len = RINGBUF_TYPE_PADDING; event 2191 kernel/trace/ring_buffer.c event->time_delta = 1; event 2308 kernel/trace/ring_buffer.c rb_add_time_stamp(struct ring_buffer_event *event, u64 delta, bool abs) event 2311 kernel/trace/ring_buffer.c event->type_len = RINGBUF_TYPE_TIME_STAMP; event 2313 kernel/trace/ring_buffer.c event->type_len = RINGBUF_TYPE_TIME_EXTEND; event 2316 kernel/trace/ring_buffer.c if (abs || rb_event_index(event)) { event 2317 kernel/trace/ring_buffer.c event->time_delta = delta & TS_MASK; event 2318 kernel/trace/ring_buffer.c event->array[0] = delta >> TS_SHIFT; event 2321 kernel/trace/ring_buffer.c event->time_delta = 0; event 2322 kernel/trace/ring_buffer.c event->array[0] = 0; event 2325 kernel/trace/ring_buffer.c return skip_time_extend(event); event 2329 kernel/trace/ring_buffer.c struct ring_buffer_event *event); event 2344 kernel/trace/ring_buffer.c struct ring_buffer_event *event, event 2351 kernel/trace/ring_buffer.c if (unlikely(!rb_event_is_commit(cpu_buffer, event))) event 2361 kernel/trace/ring_buffer.c event = rb_add_time_stamp(event, info->delta, abs); event 2366 kernel/trace/ring_buffer.c event->time_delta = delta; event 2369 kernel/trace/ring_buffer.c event->type_len = 0; event 2370 kernel/trace/ring_buffer.c event->array[0] = length; event 2372 kernel/trace/ring_buffer.c event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); event 2377 kernel/trace/ring_buffer.c struct ring_buffer_event event; /* Used only for sizeof array */ event 2384 kernel/trace/ring_buffer.c length += sizeof(event.array[0]); event 2416 kernel/trace/ring_buffer.c struct ring_buffer_event *event) event 2423 kernel/trace/ring_buffer.c new_index = rb_event_index(event); event 2424 kernel/trace/ring_buffer.c old_index = new_index + rb_event_ts_length(event); event 2425 kernel/trace/ring_buffer.c addr = (unsigned long)event; event 2433 kernel/trace/ring_buffer.c unsigned long event_length = rb_event_length(event); event 2547 kernel/trace/ring_buffer.c static inline void rb_event_discard(struct ring_buffer_event *event) event 2549 kernel/trace/ring_buffer.c if (extended_time(event)) event 2550 kernel/trace/ring_buffer.c event = skip_time_extend(event); event 2553 kernel/trace/ring_buffer.c event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE; event 2554 kernel/trace/ring_buffer.c event->type_len = RINGBUF_TYPE_PADDING; event 2556 kernel/trace/ring_buffer.c if (!event->time_delta) event 2557 kernel/trace/ring_buffer.c event->time_delta = 1; event 2562 kernel/trace/ring_buffer.c struct ring_buffer_event *event) event 2564 kernel/trace/ring_buffer.c unsigned long addr = (unsigned long)event; event 2567 kernel/trace/ring_buffer.c index = rb_event_index(event); event 2576 kernel/trace/ring_buffer.c struct ring_buffer_event *event) event 2584 kernel/trace/ring_buffer.c if (rb_event_is_commit(cpu_buffer, event)) { event 2589 kernel/trace/ring_buffer.c if (!rb_event_index(event)) event 2592 kernel/trace/ring_buffer.c else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) { event 2593 kernel/trace/ring_buffer.c delta = ring_buffer_event_time_stamp(event); event 2595 kernel/trace/ring_buffer.c } else if (event->type_len == RINGBUF_TYPE_TIME_STAMP) { event 2596 kernel/trace/ring_buffer.c delta = ring_buffer_event_time_stamp(event); event 2599 kernel/trace/ring_buffer.c cpu_buffer->write_stamp += event->time_delta; event 2604 kernel/trace/ring_buffer.c struct ring_buffer_event *event) event 2607 kernel/trace/ring_buffer.c rb_update_write_stamp(cpu_buffer, event); event 2779 kernel/trace/ring_buffer.c struct ring_buffer_event *event) event 2786 kernel/trace/ring_buffer.c rb_commit(cpu_buffer, event); event 2819 kernel/trace/ring_buffer.c struct ring_buffer_event *event; event 2852 kernel/trace/ring_buffer.c event = __rb_page_index(tail_page, tail); event 2853 kernel/trace/ring_buffer.c rb_update_event(cpu_buffer, event, info); event 2867 kernel/trace/ring_buffer.c return event; event 2875 kernel/trace/ring_buffer.c struct ring_buffer_event *event; event 2930 kernel/trace/ring_buffer.c event = __rb_reserve_next(cpu_buffer, &info); event 2932 kernel/trace/ring_buffer.c if (unlikely(PTR_ERR(event) == -EAGAIN)) { event 2938 kernel/trace/ring_buffer.c if (!event) event 2941 kernel/trace/ring_buffer.c return event; event 2967 kernel/trace/ring_buffer.c struct ring_buffer_event *event; event 2992 kernel/trace/ring_buffer.c event = rb_reserve_next_event(buffer, cpu_buffer, length); event 2993 kernel/trace/ring_buffer.c if (!event) event 2996 kernel/trace/ring_buffer.c return event; event 3014 kernel/trace/ring_buffer.c struct ring_buffer_event *event) event 3016 kernel/trace/ring_buffer.c unsigned long addr = (unsigned long)event; event 3066 kernel/trace/ring_buffer.c struct ring_buffer_event *event) event 3072 kernel/trace/ring_buffer.c rb_event_discard(event); event 3084 kernel/trace/ring_buffer.c rb_decrement_entry(cpu_buffer, event); event 3085 kernel/trace/ring_buffer.c if (rb_try_to_discard(cpu_buffer, event)) event 3092 kernel/trace/ring_buffer.c rb_update_write_stamp(cpu_buffer, event); event 3121 kernel/trace/ring_buffer.c struct ring_buffer_event *event; event 3147 kernel/trace/ring_buffer.c event = rb_reserve_next_event(buffer, cpu_buffer, length); event 3148 kernel/trace/ring_buffer.c if (!event) event 3151 kernel/trace/ring_buffer.c body = rb_event_data(event); event 3155 kernel/trace/ring_buffer.c rb_commit(cpu_buffer, event); event 3613 kernel/trace/ring_buffer.c struct ring_buffer_event *event) event 3617 kernel/trace/ring_buffer.c switch (event->type_len) { event 3622 kernel/trace/ring_buffer.c delta = ring_buffer_event_time_stamp(event); event 3627 kernel/trace/ring_buffer.c delta = ring_buffer_event_time_stamp(event); event 3632 kernel/trace/ring_buffer.c cpu_buffer->read_stamp += event->time_delta; event 3643 kernel/trace/ring_buffer.c struct ring_buffer_event *event) event 3647 kernel/trace/ring_buffer.c switch (event->type_len) { event 3652 kernel/trace/ring_buffer.c delta = ring_buffer_event_time_stamp(event); event 3657 kernel/trace/ring_buffer.c delta = ring_buffer_event_time_stamp(event); event 3662 kernel/trace/ring_buffer.c iter->read_stamp += event->time_delta; event 3808 kernel/trace/ring_buffer.c struct ring_buffer_event *event; event 3818 kernel/trace/ring_buffer.c event = rb_reader_event(cpu_buffer); event 3820 kernel/trace/ring_buffer.c if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX) event 3823 kernel/trace/ring_buffer.c rb_update_read_stamp(cpu_buffer, event); event 3825 kernel/trace/ring_buffer.c length = rb_event_length(event); event 3832 kernel/trace/ring_buffer.c struct ring_buffer_event *event; event 3848 kernel/trace/ring_buffer.c event = rb_iter_head_event(iter); event 3850 kernel/trace/ring_buffer.c length = rb_event_length(event); event 3861 kernel/trace/ring_buffer.c rb_update_iter_read_stamp(iter, event); event 3880 kernel/trace/ring_buffer.c struct ring_buffer_event *event; event 3900 kernel/trace/ring_buffer.c event = rb_reader_event(cpu_buffer); event 3902 kernel/trace/ring_buffer.c switch (event->type_len) { event 3904 kernel/trace/ring_buffer.c if (rb_null_event(event)) event 3914 kernel/trace/ring_buffer.c return event; event 3923 kernel/trace/ring_buffer.c *ts = ring_buffer_event_time_stamp(event); event 3933 kernel/trace/ring_buffer.c *ts = cpu_buffer->read_stamp + event->time_delta; event 3939 kernel/trace/ring_buffer.c return event; event 3954 kernel/trace/ring_buffer.c struct ring_buffer_event *event; event 3995 kernel/trace/ring_buffer.c event = rb_iter_head_event(iter); event 3997 kernel/trace/ring_buffer.c switch (event->type_len) { event 3999 kernel/trace/ring_buffer.c if (rb_null_event(event)) { event 4004 kernel/trace/ring_buffer.c return event; event 4013 kernel/trace/ring_buffer.c *ts = ring_buffer_event_time_stamp(event); event 4023 kernel/trace/ring_buffer.c *ts = iter->read_stamp + event->time_delta; event 4027 kernel/trace/ring_buffer.c return event; event 4084 kernel/trace/ring_buffer.c struct ring_buffer_event *event; event 4094 kernel/trace/ring_buffer.c event = rb_buffer_peek(cpu_buffer, ts, lost_events); event 4095 kernel/trace/ring_buffer.c if (event && event->type_len == RINGBUF_TYPE_PADDING) event 4100 kernel/trace/ring_buffer.c if (event && event->type_len == RINGBUF_TYPE_PADDING) event 4103 kernel/trace/ring_buffer.c return event; event 4118 kernel/trace/ring_buffer.c struct ring_buffer_event *event; event 4123 kernel/trace/ring_buffer.c event = rb_iter_peek(iter, ts); event 4126 kernel/trace/ring_buffer.c if (event && event->type_len == RINGBUF_TYPE_PADDING) event 4129 kernel/trace/ring_buffer.c return event; event 4148 kernel/trace/ring_buffer.c struct ring_buffer_event *event = NULL; event 4163 kernel/trace/ring_buffer.c event = rb_buffer_peek(cpu_buffer, ts, lost_events); event 4164 kernel/trace/ring_buffer.c if (event) { event 4175 kernel/trace/ring_buffer.c if (event && event->type_len == RINGBUF_TYPE_PADDING) event 4178 kernel/trace/ring_buffer.c return event; event 4310 kernel/trace/ring_buffer.c struct ring_buffer_event *event; event 4316 kernel/trace/ring_buffer.c event = rb_iter_peek(iter, ts); event 4317 kernel/trace/ring_buffer.c if (!event) event 4320 kernel/trace/ring_buffer.c if (event->type_len == RINGBUF_TYPE_PADDING) event 4327 kernel/trace/ring_buffer.c return event; event 4704 kernel/trace/ring_buffer.c struct ring_buffer_event *event; event 4739 kernel/trace/ring_buffer.c event = rb_reader_event(cpu_buffer); event 4768 kernel/trace/ring_buffer.c size = rb_event_ts_length(event); event 4784 kernel/trace/ring_buffer.c size = rb_event_length(event); event 4796 kernel/trace/ring_buffer.c event = rb_reader_event(cpu_buffer); event 4798 kernel/trace/ring_buffer.c size = rb_event_ts_length(event); event 4962 kernel/trace/ring_buffer.c struct ring_buffer_event *event; event 4982 kernel/trace/ring_buffer.c event = ring_buffer_lock_reserve(data->buffer, len); event 4983 kernel/trace/ring_buffer.c if (!event) { event 4994 kernel/trace/ring_buffer.c event_len = ring_buffer_event_length(event); event 4999 kernel/trace/ring_buffer.c item = ring_buffer_event_data(event); event 5022 kernel/trace/ring_buffer.c ring_buffer_unlock_commit(data->buffer, event); event 5143 kernel/trace/ring_buffer.c struct ring_buffer_event *event; event 5181 kernel/trace/ring_buffer.c while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) { event 5183 kernel/trace/ring_buffer.c item = ring_buffer_event_data(event); event 5184 kernel/trace/ring_buffer.c total_len += ring_buffer_event_length(event); event 87 kernel/trace/ring_buffer_benchmark.c struct ring_buffer_event *event; event 91 kernel/trace/ring_buffer_benchmark.c event = ring_buffer_consume(buffer, cpu, &ts, NULL); event 92 kernel/trace/ring_buffer_benchmark.c if (!event) event 95 kernel/trace/ring_buffer_benchmark.c entry = ring_buffer_event_data(event); event 107 kernel/trace/ring_buffer_benchmark.c struct ring_buffer_event *event; event 133 kernel/trace/ring_buffer_benchmark.c event = (void *)&rpage->data[i]; event 134 kernel/trace/ring_buffer_benchmark.c switch (event->type_len) { event 137 kernel/trace/ring_buffer_benchmark.c if (!event->time_delta) event 139 kernel/trace/ring_buffer_benchmark.c inc = event->array[0] + 4; event 145 kernel/trace/ring_buffer_benchmark.c entry = ring_buffer_event_data(event); event 151 kernel/trace/ring_buffer_benchmark.c if (!event->array[0]) { event 155 kernel/trace/ring_buffer_benchmark.c inc = event->array[0] + 4; event 158 kernel/trace/ring_buffer_benchmark.c entry = ring_buffer_event_data(event); event 164 kernel/trace/ring_buffer_benchmark.c inc = ((event->type_len + 1) * 4); event 249 kernel/trace/ring_buffer_benchmark.c struct ring_buffer_event *event; event 254 kernel/trace/ring_buffer_benchmark.c event = ring_buffer_lock_reserve(buffer, 10); event 255 kernel/trace/ring_buffer_benchmark.c if (!event) { event 259 kernel/trace/ring_buffer_benchmark.c entry = ring_buffer_event_data(event); event 261 kernel/trace/ring_buffer_benchmark.c ring_buffer_unlock_commit(buffer, event); event 327 kernel/trace/trace.c struct ring_buffer_event *event) event 331 kernel/trace/trace.c __trace_event_discard_commit(buffer, event); event 759 kernel/trace/trace.c trace_event_setup(struct ring_buffer_event *event, event 762 kernel/trace/trace.c struct trace_entry *ent = ring_buffer_event_data(event); event 773 kernel/trace/trace.c struct ring_buffer_event *event; event 775 kernel/trace/trace.c event = ring_buffer_lock_reserve(buffer, len); event 776 kernel/trace/trace.c if (event != NULL) event 777 kernel/trace/trace.c trace_event_setup(event, type, flags, pc); event 779 kernel/trace/trace.c return event; event 813 kernel/trace/trace.c __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event) event 818 kernel/trace/trace.c if (this_cpu_read(trace_buffered_event) == event) { event 820 kernel/trace/trace.c ring_buffer_write(buffer, event->array[0], &event->array[1]); event 824 kernel/trace/trace.c ring_buffer_unlock_commit(buffer, event); event 835 kernel/trace/trace.c struct ring_buffer_event *event; event 854 kernel/trace/trace.c event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, event 856 kernel/trace/trace.c if (!event) event 859 kernel/trace/trace.c entry = ring_buffer_event_data(event); event 871 kernel/trace/trace.c __buffer_unlock_commit(buffer, event); event 885 kernel/trace/trace.c struct ring_buffer_event *event; event 902 kernel/trace/trace.c event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size, event 904 kernel/trace/trace.c if (!event) event 907 kernel/trace/trace.c entry = ring_buffer_event_data(event); event 911 kernel/trace/trace.c __buffer_unlock_commit(buffer, event); event 2391 kernel/trace/trace.c struct ring_buffer_event *event; event 2406 kernel/trace/trace.c event = page_address(page); event 2407 kernel/trace/trace.c memset(event, 0, sizeof(*event)); event 2409 kernel/trace/trace.c per_cpu(trace_buffered_event, cpu) = event; event 2531 kernel/trace/trace.c struct trace_event *event; event 2540 kernel/trace/trace.c if (!event_call || !event_call->event.funcs || event 2541 kernel/trace/trace.c !event_call->event.funcs->trace) event 2544 kernel/trace/trace.c event = &fbuffer->trace_file->event_call->event; event 2549 kernel/trace/trace.c event_call->event.funcs->trace(iter, 0, event); event 2595 kernel/trace/trace.c fbuffer->event, fbuffer->entry, event 2611 kernel/trace/trace.c struct ring_buffer_event *event, event 2615 kernel/trace/trace.c __buffer_unlock_commit(buffer, event); event 2632 kernel/trace/trace.c struct ring_buffer_event *event) event 2634 kernel/trace/trace.c __buffer_unlock_commit(buffer, event); event 2639 kernel/trace/trace.c struct ring_buffer_event *event) event 2644 kernel/trace/trace.c entry = ring_buffer_event_data(event); event 2645 kernel/trace/trace.c size = ring_buffer_event_length(event); event 2665 kernel/trace/trace.c static void ftrace_exports(struct ring_buffer_event *event) event 2673 kernel/trace/trace.c trace_process_export(export, event); event 2767 kernel/trace/trace.c struct ring_buffer_event *event; event 2770 kernel/trace/trace.c event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), event 2772 kernel/trace/trace.c if (!event) event 2774 kernel/trace/trace.c entry = ring_buffer_event_data(event); event 2778 kernel/trace/trace.c if (!call_filter_check_discard(call, entry, buffer, event)) { event 2780 kernel/trace/trace.c ftrace_exports(event); event 2781 kernel/trace/trace.c __buffer_unlock_commit(buffer, event); event 2809 kernel/trace/trace.c struct ring_buffer_event *event; event 2858 kernel/trace/trace.c event = __trace_buffer_lock_reserve(buffer, TRACE_STACK, event 2860 kernel/trace/trace.c if (!event) event 2862 kernel/trace/trace.c entry = ring_buffer_event_data(event); event 2867 kernel/trace/trace.c if (!call_filter_check_discard(call, entry, buffer, event)) event 2868 kernel/trace/trace.c __buffer_unlock_commit(buffer, event); event 2942 kernel/trace/trace.c struct ring_buffer_event *event; event 2965 kernel/trace/trace.c event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, event 2967 kernel/trace/trace.c if (!event) event 2969 kernel/trace/trace.c entry = ring_buffer_event_data(event); event 2975 kernel/trace/trace.c if (!call_filter_check_discard(call, entry, buffer, event)) event 2976 kernel/trace/trace.c __buffer_unlock_commit(buffer, event); event 3108 kernel/trace/trace.c struct ring_buffer_event *event; event 3139 kernel/trace/trace.c event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, event 3141 kernel/trace/trace.c if (!event) event 3143 kernel/trace/trace.c entry = ring_buffer_event_data(event); event 3148 kernel/trace/trace.c if (!call_filter_check_discard(call, entry, buffer, event)) { event 3149 kernel/trace/trace.c __buffer_unlock_commit(buffer, event); event 3170 kernel/trace/trace.c struct ring_buffer_event *event; event 3196 kernel/trace/trace.c event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, event 3198 kernel/trace/trace.c if (!event) event 3200 kernel/trace/trace.c entry = ring_buffer_event_data(event); event 3204 kernel/trace/trace.c if (!call_filter_check_discard(call, entry, buffer, event)) { event 3205 kernel/trace/trace.c __buffer_unlock_commit(buffer, event); event 3279 kernel/trace/trace.c struct ring_buffer_event *event; event 3283 kernel/trace/trace.c event = ring_buffer_iter_peek(buf_iter, ts); event 3285 kernel/trace/trace.c event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts, event 3288 kernel/trace/trace.c if (event) { event 3289 kernel/trace/trace.c iter->ent_size = ring_buffer_event_length(event); event 3290 kernel/trace/trace.c return ring_buffer_event_data(event); event 3410 kernel/trace/trace.c struct ring_buffer_event *event; event 3428 kernel/trace/trace.c while ((event = ring_buffer_iter_peek(buf_iter, &ts))) { event 3729 kernel/trace/trace.c struct trace_event *event; event 3735 kernel/trace/trace.c event = ftrace_find_event(entry->type); event 3747 kernel/trace/trace.c if (event) event 3748 kernel/trace/trace.c return event->funcs->trace(iter, sym_flags, event); event 3760 kernel/trace/trace.c struct trace_event *event; event 3771 kernel/trace/trace.c event = ftrace_find_event(entry->type); event 3772 kernel/trace/trace.c if (event) event 3773 kernel/trace/trace.c return event->funcs->raw(iter, 0, event); event 3786 kernel/trace/trace.c struct trace_event *event; event 3798 kernel/trace/trace.c event = ftrace_find_event(entry->type); event 3799 kernel/trace/trace.c if (event) { event 3800 kernel/trace/trace.c enum print_line_t ret = event->funcs->hex(iter, 0, event); event 3815 kernel/trace/trace.c struct trace_event *event; event 3827 kernel/trace/trace.c event = ftrace_find_event(entry->type); event 3828 kernel/trace/trace.c return event ? event->funcs->binary(iter, 0, event) : event 6384 kernel/trace/trace.c struct ring_buffer_event *event; event 6416 kernel/trace/trace.c event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, event 6418 kernel/trace/trace.c if (unlikely(!event)) event 6422 kernel/trace/trace.c entry = ring_buffer_event_data(event); event 6437 kernel/trace/trace.c tt = event_triggers_call(tr->trace_marker_file, entry, event); event 6446 kernel/trace/trace.c __buffer_unlock_commit(buffer, event); event 6465 kernel/trace/trace.c struct ring_buffer_event *event; event 6496 kernel/trace/trace.c event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size, event 6498 kernel/trace/trace.c if (!event) event 6502 kernel/trace/trace.c entry = ring_buffer_event_data(event); event 6512 kernel/trace/trace.c __buffer_unlock_commit(buffer, event); event 8830 kernel/trace/trace.c unsigned long event, void *unused) event 714 kernel/trace/trace.h struct ring_buffer_event *event); event 1372 kernel/trace/trace.h struct ring_buffer_event *event); event 1376 kernel/trace/trace.h struct ring_buffer_event *event, event 1382 kernel/trace/trace.h struct ring_buffer_event *event, event 1385 kernel/trace/trace.h trace_buffer_unlock_commit_regs(tr, buffer, event, flags, pc, NULL); event 1395 kernel/trace/trace.h struct ring_buffer_event *event) event 1397 kernel/trace/trace.h if (this_cpu_read(trace_buffered_event) == event) { event 1402 kernel/trace/trace.h ring_buffer_discard_commit(buffer, event); event 1421 kernel/trace/trace.h struct ring_buffer_event *event, event 1428 kernel/trace/trace.h *tt = event_triggers_call(file, entry, event); event 1433 kernel/trace/trace.h __trace_event_discard_commit(buffer, event); event 1456 kernel/trace/trace.h struct ring_buffer_event *event, event 1461 kernel/trace/trace.h if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) event 1462 kernel/trace/trace.h trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc); event 1487 kernel/trace/trace.h struct ring_buffer_event *event, event 1493 kernel/trace/trace.h if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) event 1494 kernel/trace/trace.h trace_buffer_unlock_commit_regs(file->tr, buffer, event, event 1517 kernel/trace/trace.h typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event); event 1590 kernel/trace/trace.h const char *event); event 1593 kernel/trace/trace.h const char *event); event 36 kernel/trace/trace_branch.c struct ring_buffer_event *event; event 64 kernel/trace/trace_branch.c event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH, event 66 kernel/trace/trace_branch.c if (!event) event 69 kernel/trace/trace_branch.c entry = ring_buffer_event_data(event); event 85 kernel/trace/trace_branch.c if (!call_filter_check_discard(call, entry, buffer, event)) event 86 kernel/trace/trace_branch.c trace_buffer_unlock_commit_nostack(buffer, event); event 141 kernel/trace/trace_branch.c int flags, struct trace_event *event) event 37 kernel/trace/trace_dynevent.c char *system = NULL, *event, *p; event 43 kernel/trace/trace_dynevent.c event = &argv[0][2]; event 45 kernel/trace/trace_dynevent.c event = strchr(argv[0], ':'); event 46 kernel/trace/trace_dynevent.c if (!event) event 48 kernel/trace/trace_dynevent.c event++; event 52 kernel/trace/trace_dynevent.c p = strchr(event, '/'); event 54 kernel/trace/trace_dynevent.c system = event; event 55 kernel/trace/trace_dynevent.c event = p + 1; event 58 kernel/trace/trace_dynevent.c if (event[0] == '\0') event 65 kernel/trace/trace_dynevent.c if (!pos->ops->match(system, event, event 46 kernel/trace/trace_dynevent.h bool (*match)(const char *system, const char *event, event 220 kernel/trace/trace_event_perf.c if (tp_event->event.type == event_id && event 433 kernel/trace/trace_event_perf.c struct perf_event *event; event 441 kernel/trace/trace_event_perf.c event = container_of(ops, struct perf_event, ftrace_ops); event 449 kernel/trace/trace_event_perf.c head.first = &event->hlist_entry; event 471 kernel/trace/trace_event_perf.c static int perf_ftrace_function_register(struct perf_event *event) event 473 kernel/trace/trace_event_perf.c struct ftrace_ops *ops = &event->ftrace_ops; event 482 kernel/trace/trace_event_perf.c static int perf_ftrace_function_unregister(struct perf_event *event) event 484 kernel/trace/trace_event_perf.c struct ftrace_ops *ops = &event->ftrace_ops; event 493 kernel/trace/trace_event_perf.c struct perf_event *event = data; event 507 kernel/trace/trace_event_perf.c event->ftrace_ops.private = (void *)(unsigned long)smp_processor_id(); event 510 kernel/trace/trace_event_perf.c event->ftrace_ops.private = (void *)(unsigned long)nr_cpu_ids; event 222 kernel/trace/trace_events.c id = register_trace_event(&call->event); event 268 kernel/trace/trace_events.c fbuffer->event = event 270 kernel/trace/trace_events.c event_call->event.type, len, event 272 kernel/trace/trace_events.c if (!fbuffer->event) event 275 kernel/trace/trace_events.c fbuffer->entry = ring_buffer_event_data(fbuffer->event); event 733 kernel/trace/trace_events.c const char *sub, const char *event, int set) event 760 kernel/trace/trace_events.c if (event && strcmp(event, name) != 0) event 780 kernel/trace/trace_events.c const char *sub, const char *event, int set) event 785 kernel/trace/trace_events.c ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set); event 793 kernel/trace/trace_events.c char *event = NULL, *sub = NULL, *match; event 811 kernel/trace/trace_events.c event = buf; event 816 kernel/trace/trace_events.c if (!strlen(event) || strcmp(event, "*") == 0) event 817 kernel/trace/trace_events.c event = NULL; event 820 kernel/trace/trace_events.c ret = __ftrace_set_clr_event(tr, match, sub, event, set); event 842 kernel/trace/trace_events.c int trace_set_clr_event(const char *system, const char *event, int set) event 849 kernel/trace/trace_events.c return __ftrace_set_clr_event(tr, NULL, system, event, set); event 1224 kernel/trace/trace_events.c seq_printf(m, "ID: %d\n", call->event.type); event 1981 kernel/trace/trace_events.c if (call->event.type && call->class->reg) event 1983 kernel/trace/trace_events.c (void *)(long)call->event.type, event 2065 kernel/trace/trace_events.c if (call->event.funcs) event 2066 kernel/trace/trace_events.c __unregister_trace_event(&call->event); event 2374 kernel/trace/trace_events.c #define for_each_event(event, start, end) \ event 2375 kernel/trace/trace_events.c for (event = start; \ event 2376 kernel/trace/trace_events.c (unsigned long)event < (unsigned long)end; \ event 2377 kernel/trace/trace_events.c event++) event 2470 kernel/trace/trace_events.c __find_event_file(struct trace_array *tr, const char *system, const char *event) event 2484 kernel/trace/trace_events.c if (strcmp(event, name) == 0 && event 2493 kernel/trace/trace_events.c find_event_file(struct trace_array *tr, const char *system, const char *event) event 2497 kernel/trace/trace_events.c file = __find_event_file(tr, system, event); event 2700 kernel/trace/trace_events.c const char *event; event 2716 kernel/trace/trace_events.c event = strsep(¶m, ":"); event 2721 kernel/trace/trace_events.c file = find_event_file(tr, system, event); event 3361 kernel/trace/trace_events.c struct ring_buffer_event *event; event 3379 kernel/trace/trace_events.c event = trace_event_buffer_lock_reserve(&buffer, &event_trace_file, event 3382 kernel/trace/trace_events.c if (!event) event 3384 kernel/trace/trace_events.c entry = ring_buffer_event_data(event); event 3388 kernel/trace/trace_events.c event_trigger_unlock_commit(&event_trace_file, buffer, event, event 593 kernel/trace/trace_events_filter.c static int filter_pred_LT_##type(struct filter_pred *pred, void *event) \ event 595 kernel/trace/trace_events_filter.c type *addr = (type *)(event + pred->offset); \ event 599 kernel/trace/trace_events_filter.c static int filter_pred_LE_##type(struct filter_pred *pred, void *event) \ event 601 kernel/trace/trace_events_filter.c type *addr = (type *)(event + pred->offset); \ event 605 kernel/trace/trace_events_filter.c static int filter_pred_GT_##type(struct filter_pred *pred, void *event) \ event 607 kernel/trace/trace_events_filter.c type *addr = (type *)(event + pred->offset); \ event 611 kernel/trace/trace_events_filter.c static int filter_pred_GE_##type(struct filter_pred *pred, void *event) \ event 613 kernel/trace/trace_events_filter.c type *addr = (type *)(event + pred->offset); \ event 617 kernel/trace/trace_events_filter.c static int filter_pred_BAND_##type(struct filter_pred *pred, void *event) \ event 619 kernel/trace/trace_events_filter.c type *addr = (type *)(event + pred->offset); \ event 632 kernel/trace/trace_events_filter.c static int filter_pred_##size(struct filter_pred *pred, void *event) \ event 634 kernel/trace/trace_events_filter.c u##size *addr = (u##size *)(event + pred->offset); \ event 658 kernel/trace/trace_events_filter.c static int filter_pred_string(struct filter_pred *pred, void *event) event 660 kernel/trace/trace_events_filter.c char *addr = (char *)(event + pred->offset); event 671 kernel/trace/trace_events_filter.c static int filter_pred_pchar(struct filter_pred *pred, void *event) event 673 kernel/trace/trace_events_filter.c char **addr = (char **)(event + pred->offset); event 694 kernel/trace/trace_events_filter.c static int filter_pred_strloc(struct filter_pred *pred, void *event) event 696 kernel/trace/trace_events_filter.c u32 str_item = *(u32 *)(event + pred->offset); event 699 kernel/trace/trace_events_filter.c char *addr = (char *)(event + str_loc); event 710 kernel/trace/trace_events_filter.c static int filter_pred_cpu(struct filter_pred *pred, void *event) event 736 kernel/trace/trace_events_filter.c static int filter_pred_comm(struct filter_pred *pred, void *event) event 745 kernel/trace/trace_events_filter.c static int filter_pred_none(struct filter_pred *pred, void *event) event 1893 kernel/trace/trace_events_filter.c void ftrace_profile_free_filter(struct perf_event *event) event 1895 kernel/trace/trace_events_filter.c struct event_filter *filter = event->filter; event 1897 kernel/trace/trace_events_filter.c event->filter = NULL; event 2025 kernel/trace/trace_events_filter.c static int ftrace_function_set_filter(struct perf_event *event, event 2033 kernel/trace/trace_events_filter.c .ops = &event->ftrace_ops, event 2049 kernel/trace/trace_events_filter.c static int ftrace_function_set_filter(struct perf_event *event, event 2056 kernel/trace/trace_events_filter.c int ftrace_profile_set_filter(struct perf_event *event, int event_id, event 2065 kernel/trace/trace_events_filter.c call = event->tp_event; event 2072 kernel/trace/trace_events_filter.c if (event->filter) event 2080 kernel/trace/trace_events_filter.c err = ftrace_function_set_filter(event, filter); event 2082 kernel/trace/trace_events_filter.c event->filter = filter; event 2180 kernel/trace/trace_events_filter.c static int test_pred_visited_fn(struct filter_pred *pred, void *event) event 86 kernel/trace/trace_events_hist.c void *event); event 156 kernel/trace/trace_events_hist.c void *event) event 164 kernel/trace/trace_events_hist.c void *event) event 172 kernel/trace/trace_events_hist.c void *event) event 174 kernel/trace/trace_events_hist.c char *addr = (char *)(event + hist_field->field->offset); event 182 kernel/trace/trace_events_hist.c void *event) event 184 kernel/trace/trace_events_hist.c u32 str_item = *(u32 *)(event + hist_field->field->offset); event 186 kernel/trace/trace_events_hist.c char *addr = (char *)(event + str_loc); event 194 kernel/trace/trace_events_hist.c void *event) event 196 kernel/trace/trace_events_hist.c char **addr = (char **)(event + hist_field->field->offset); event 204 kernel/trace/trace_events_hist.c void *event) event 208 kernel/trace/trace_events_hist.c u64 val = operand->fn(operand, elt, rbe, event); event 216 kernel/trace/trace_events_hist.c void *event) event 221 kernel/trace/trace_events_hist.c u64 val1 = operand1->fn(operand1, elt, rbe, event); event 222 kernel/trace/trace_events_hist.c u64 val2 = operand2->fn(operand2, elt, rbe, event); event 230 kernel/trace/trace_events_hist.c void *event) event 235 kernel/trace/trace_events_hist.c u64 val1 = operand1->fn(operand1, elt, rbe, event); event 236 kernel/trace/trace_events_hist.c u64 val2 = operand2->fn(operand2, elt, rbe, event); event 244 kernel/trace/trace_events_hist.c void *event) event 248 kernel/trace/trace_events_hist.c s64 sval = (s64)operand->fn(operand, elt, rbe, event); event 258 kernel/trace/trace_events_hist.c void *event) \ event 260 kernel/trace/trace_events_hist.c type *addr = (type *)(event + hist_field->field->offset); \ event 382 kernel/trace/trace_events_hist.c static bool synth_event_match(const char *system, const char *event, event 425 kernel/trace/trace_events_hist.c struct synth_event *event = to_synth_event(ev); event 427 kernel/trace/trace_events_hist.c return event->ref != 0; event 430 kernel/trace/trace_events_hist.c static bool synth_event_match(const char *system, const char *event, event 435 kernel/trace/trace_events_hist.c return strcmp(sev->name, event) == 0 && event 485 kernel/trace/trace_events_hist.c char *event; event 650 kernel/trace/trace_events_hist.c struct synth_event *event = call->data; event 656 kernel/trace/trace_events_hist.c for (i = 0, n_u64 = 0; i < event->n_fields; i++) { event 657 kernel/trace/trace_events_hist.c size = event->fields[i]->size; event 658 kernel/trace/trace_events_hist.c is_signed = event->fields[i]->is_signed; event 659 kernel/trace/trace_events_hist.c type = event->fields[i]->type; event 660 kernel/trace/trace_events_hist.c name = event->fields[i]->name; event 666 kernel/trace/trace_events_hist.c if (event->fields[i]->is_string) { event 675 kernel/trace/trace_events_hist.c event->n_u64 = n_u64; event 839 kernel/trace/trace_events_hist.c struct trace_event *event) event 850 kernel/trace/trace_events_hist.c se = container_of(event, struct synth_event, call.event); event 911 kernel/trace/trace_events_hist.c struct synth_event *event; event 915 kernel/trace/trace_events_hist.c event = trace_file->event_call->data; event 920 kernel/trace/trace_events_hist.c fields_size = event->n_u64 * sizeof(u64); event 934 kernel/trace/trace_events_hist.c for (i = 0, n_u64 = 0; i < event->n_fields; i++) { event 936 kernel/trace/trace_events_hist.c if (event->fields[i]->is_string) { event 943 kernel/trace/trace_events_hist.c struct synth_field *field = event->fields[i]; event 980 kernel/trace/trace_events_hist.c static int __set_synth_event_print_fmt(struct synth_event *event, event 991 kernel/trace/trace_events_hist.c for (i = 0; i < event->n_fields; i++) { event 992 kernel/trace/trace_events_hist.c fmt = synth_field_fmt(event->fields[i]->type); event 994 kernel/trace/trace_events_hist.c event->fields[i]->name, fmt, event 995 kernel/trace/trace_events_hist.c i == event->n_fields - 1 ? "" : ", "); event 999 kernel/trace/trace_events_hist.c for (i = 0; i < event->n_fields; i++) { event 1001 kernel/trace/trace_events_hist.c ", REC->%s", event->fields[i]->name); event 1012 kernel/trace/trace_events_hist.c struct synth_event *event = call->data; event 1017 kernel/trace/trace_events_hist.c len = __set_synth_event_print_fmt(event, NULL, 0); event 1024 kernel/trace/trace_events_hist.c __set_synth_event_print_fmt(event, print_fmt, len + 1); event 1146 kernel/trace/trace_events_hist.c static inline void trace_synth(struct synth_event *event, u64 *var_ref_vals, event 1149 kernel/trace/trace_events_hist.c struct tracepoint *tp = event->tp; event 1173 kernel/trace/trace_events_hist.c struct synth_event *event; event 1178 kernel/trace/trace_events_hist.c event = to_synth_event(pos); event 1179 kernel/trace/trace_events_hist.c if (strcmp(event->name, name) == 0) event 1180 kernel/trace/trace_events_hist.c return event; event 1186 kernel/trace/trace_events_hist.c static int register_synth_event(struct synth_event *event) event 1188 kernel/trace/trace_events_hist.c struct trace_event_call *call = &event->call; event 1191 kernel/trace/trace_events_hist.c event->call.class = &event->class; event 1192 kernel/trace/trace_events_hist.c event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL); event 1193 kernel/trace/trace_events_hist.c if (!event->class.system) { event 1198 kernel/trace/trace_events_hist.c event->tp = alloc_synth_tracepoint(event->name); event 1199 kernel/trace/trace_events_hist.c if (IS_ERR(event->tp)) { event 1200 kernel/trace/trace_events_hist.c ret = PTR_ERR(event->tp); event 1201 kernel/trace/trace_events_hist.c event->tp = NULL; event 1206 kernel/trace/trace_events_hist.c call->event.funcs = &synth_event_funcs; event 1209 kernel/trace/trace_events_hist.c ret = register_trace_event(&call->event); event 1217 kernel/trace/trace_events_hist.c call->data = event; event 1218 kernel/trace/trace_events_hist.c call->tp = event->tp; event 1235 kernel/trace/trace_events_hist.c unregister_trace_event(&call->event); event 1239 kernel/trace/trace_events_hist.c static int unregister_synth_event(struct synth_event *event) event 1241 kernel/trace/trace_events_hist.c struct trace_event_call *call = &event->call; event 1249 kernel/trace/trace_events_hist.c static void free_synth_event(struct synth_event *event) event 1253 kernel/trace/trace_events_hist.c if (!event) event 1256 kernel/trace/trace_events_hist.c for (i = 0; i < event->n_fields; i++) event 1257 kernel/trace/trace_events_hist.c free_synth_field(event->fields[i]); event 1259 kernel/trace/trace_events_hist.c kfree(event->fields); event 1260 kernel/trace/trace_events_hist.c kfree(event->name); event 1261 kernel/trace/trace_events_hist.c kfree(event->class.system); event 1262 kernel/trace/trace_events_hist.c free_synth_tracepoint(event->tp); event 1263 kernel/trace/trace_events_hist.c free_synth_event_print_fmt(&event->call); event 1264 kernel/trace/trace_events_hist.c kfree(event); event 1270 kernel/trace/trace_events_hist.c struct synth_event *event; event 1273 kernel/trace/trace_events_hist.c event = kzalloc(sizeof(*event), GFP_KERNEL); event 1274 kernel/trace/trace_events_hist.c if (!event) { event 1275 kernel/trace/trace_events_hist.c event = ERR_PTR(-ENOMEM); event 1279 kernel/trace/trace_events_hist.c event->name = kstrdup(name, GFP_KERNEL); event 1280 kernel/trace/trace_events_hist.c if (!event->name) { event 1281 kernel/trace/trace_events_hist.c kfree(event); event 1282 kernel/trace/trace_events_hist.c event = ERR_PTR(-ENOMEM); event 1286 kernel/trace/trace_events_hist.c event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL); event 1287 kernel/trace/trace_events_hist.c if (!event->fields) { event 1288 kernel/trace/trace_events_hist.c free_synth_event(event); event 1289 kernel/trace/trace_events_hist.c event = ERR_PTR(-ENOMEM); event 1293 kernel/trace/trace_events_hist.c dyn_event_init(&event->devent, &synth_event_ops); event 1296 kernel/trace/trace_events_hist.c event->fields[i] = fields[i]; event 1298 kernel/trace/trace_events_hist.c event->n_fields = n_fields; event 1300 kernel/trace/trace_events_hist.c return event; event 1308 kernel/trace/trace_events_hist.c struct synth_event *event = data->synth_event; event 1310 kernel/trace/trace_events_hist.c trace_synth(event, var_ref_vals, data->var_ref_idx); event 1321 kernel/trace/trace_events_hist.c struct synth_event *event = NULL; event 1336 kernel/trace/trace_events_hist.c event = find_synth_event(name); event 1337 kernel/trace/trace_events_hist.c if (event) { event 1364 kernel/trace/trace_events_hist.c event = alloc_synth_event(name, n_fields, fields); event 1365 kernel/trace/trace_events_hist.c if (IS_ERR(event)) { event 1366 kernel/trace/trace_events_hist.c ret = PTR_ERR(event); event 1367 kernel/trace/trace_events_hist.c event = NULL; event 1370 kernel/trace/trace_events_hist.c ret = register_synth_event(event); event 1372 kernel/trace/trace_events_hist.c dyn_event_add(&event->devent); event 1374 kernel/trace/trace_events_hist.c free_synth_event(event); event 1389 kernel/trace/trace_events_hist.c struct synth_event *event = NULL; event 1395 kernel/trace/trace_events_hist.c event = find_synth_event(name + 1); event 1396 kernel/trace/trace_events_hist.c if (event) { event 1397 kernel/trace/trace_events_hist.c if (event->ref) event 1400 kernel/trace/trace_events_hist.c ret = unregister_synth_event(event); event 1402 kernel/trace/trace_events_hist.c dyn_event_remove(&event->devent); event 1403 kernel/trace/trace_events_hist.c free_synth_event(event); event 1437 kernel/trace/trace_events_hist.c struct synth_event *event = to_synth_event(ev); event 1440 kernel/trace/trace_events_hist.c if (event->ref) event 1443 kernel/trace/trace_events_hist.c ret = unregister_synth_event(event); event 1448 kernel/trace/trace_events_hist.c free_synth_event(event); event 1452 kernel/trace/trace_events_hist.c static int __synth_event_show(struct seq_file *m, struct synth_event *event) event 1457 kernel/trace/trace_events_hist.c seq_printf(m, "%s\t", event->name); event 1459 kernel/trace/trace_events_hist.c for (i = 0; i < event->n_fields; i++) { event 1460 kernel/trace/trace_events_hist.c field = event->fields[i]; event 1464 kernel/trace/trace_events_hist.c i == event->n_fields - 1 ? "" : "; "); event 1474 kernel/trace/trace_events_hist.c struct synth_event *event = to_synth_event(ev); event 1476 kernel/trace/trace_events_hist.c seq_printf(m, "s:%s/", event->class.system); event 1478 kernel/trace/trace_events_hist.c return __synth_event_show(m, event); event 1534 kernel/trace/trace_events_hist.c void *event) event 1550 kernel/trace/trace_events_hist.c void *event) event 1881 kernel/trace/trace_events_hist.c char *event_name = data->match_data.event; event 1930 kernel/trace/trace_events_hist.c void *event) event 4141 kernel/trace/trace_events_hist.c kfree(data->match_data.event); event 4179 kernel/trace/trace_events_hist.c static int check_synth_field(struct synth_event *event, event 4185 kernel/trace/trace_events_hist.c if (field_pos >= event->n_fields) event 4188 kernel/trace/trace_events_hist.c field = event->fields[field_pos]; event 4199 kernel/trace/trace_events_hist.c char *system, char *event, char *var) event 4206 kernel/trace/trace_events_hist.c hist_field = find_target_event_var(hist_data, system, event, var); event 4210 kernel/trace/trace_events_hist.c event = data->match_data.event; event 4213 kernel/trace/trace_events_hist.c hist_field = find_event_var(hist_data, system, event, var); event 4225 kernel/trace/trace_events_hist.c char *event, char *var) event 4236 kernel/trace/trace_events_hist.c field_var = create_target_field_var(hist_data, system, event, var); event 4250 kernel/trace/trace_events_hist.c event = data->match_data.event; event 4260 kernel/trace/trace_events_hist.c hist_field = create_field_var_hist(hist_data, system, event, var); event 4280 kernel/trace/trace_events_hist.c struct synth_event *event; event 4291 kernel/trace/trace_events_hist.c event = find_synth_event(synth_event_name); event 4292 kernel/trace/trace_events_hist.c if (!event) { event 4297 kernel/trace/trace_events_hist.c event->ref++; event 4338 kernel/trace/trace_events_hist.c if (check_synth_field(event, hist_field, field_pos) == 0) { event 4366 kernel/trace/trace_events_hist.c if (field_pos != event->n_fields) { event 4367 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_SYNTH_COUNT_MISMATCH, errpos(event->name)); event 4372 kernel/trace/trace_events_hist.c data->synth_event = event; event 4376 kernel/trace/trace_events_hist.c event->ref--; event 4476 kernel/trace/trace_events_hist.c data->match_data.event = kstrdup(match_event, GFP_KERNEL); event 4477 kernel/trace/trace_events_hist.c if (!data->match_data.event) { event 5067 kernel/trace/trace_events_hist.c data->match_data.event); event 5119 kernel/trace/trace_events_hist.c if (strcmp(data->match_data.event, event 5120 kernel/trace/trace_events_hist.c data_test->match_data.event) != 0) event 6401 kernel/trace/trace_events_hist.c struct ring_buffer_event *event) event 6419 kernel/trace/trace_events_hist.c struct ring_buffer_event *event) event 6427 kernel/trace/trace_events_hist.c hist_enable_trigger(data, rec, event); event 57 kernel/trace/trace_events_trigger.c struct ring_buffer_event *event) event 70 kernel/trace/trace_events_trigger.c data->ops->func(data, rec, event); event 80 kernel/trace/trace_events_trigger.c data->ops->func(data, rec, event); event 929 kernel/trace/trace_events_trigger.c struct ring_buffer_event *event) event 939 kernel/trace/trace_events_trigger.c struct ring_buffer_event *event) event 955 kernel/trace/trace_events_trigger.c struct ring_buffer_event *event) event 965 kernel/trace/trace_events_trigger.c struct ring_buffer_event *event) event 1063 kernel/trace/trace_events_trigger.c struct ring_buffer_event *event) event 1075 kernel/trace/trace_events_trigger.c struct ring_buffer_event *event) event 1083 kernel/trace/trace_events_trigger.c snapshot_trigger(data, rec, event); event 1168 kernel/trace/trace_events_trigger.c struct ring_buffer_event *event) event 1175 kernel/trace/trace_events_trigger.c struct ring_buffer_event *event) event 1183 kernel/trace/trace_events_trigger.c stacktrace_trigger(data, rec, event); event 1246 kernel/trace/trace_events_trigger.c struct ring_buffer_event *event) event 1258 kernel/trace/trace_events_trigger.c struct ring_buffer_event *event) event 1272 kernel/trace/trace_events_trigger.c event_enable_trigger(data, rec, event); event 1357 kernel/trace/trace_events_trigger.c const char *event; event 1376 kernel/trace/trace_events_trigger.c event = strsep(&trigger, ":"); event 1379 kernel/trace/trace_events_trigger.c event_enable_file = find_event_file(tr, system, event); event 186 kernel/trace/trace_export.c .event.type = etype, \ event 103 kernel/trace/trace_functions_graph.c struct ring_buffer_event *event; event 107 kernel/trace/trace_functions_graph.c event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, event 109 kernel/trace/trace_functions_graph.c if (!event) event 111 kernel/trace/trace_functions_graph.c entry = ring_buffer_event_data(event); event 113 kernel/trace/trace_functions_graph.c if (!call_filter_check_discard(call, entry, buffer, event)) event 114 kernel/trace/trace_functions_graph.c trace_buffer_unlock_commit_nostack(buffer, event); event 223 kernel/trace/trace_functions_graph.c struct ring_buffer_event *event; event 227 kernel/trace/trace_functions_graph.c event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, event 229 kernel/trace/trace_functions_graph.c if (!event) event 231 kernel/trace/trace_functions_graph.c entry = ring_buffer_event_data(event); event 233 kernel/trace/trace_functions_graph.c if (!call_filter_check_discard(call, entry, buffer, event)) event 234 kernel/trace/trace_functions_graph.c trace_buffer_unlock_commit_nostack(buffer, event); event 425 kernel/trace/trace_functions_graph.c struct ring_buffer_event *event; event 441 kernel/trace/trace_functions_graph.c event = ring_buffer_iter_peek(ring_iter, NULL); event 449 kernel/trace/trace_functions_graph.c event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu, event 453 kernel/trace/trace_functions_graph.c if (!event) event 456 kernel/trace/trace_functions_graph.c next = ring_buffer_event_data(event); event 976 kernel/trace/trace_functions_graph.c struct trace_event *event; event 1014 kernel/trace/trace_functions_graph.c event = ftrace_find_event(ent->type); event 1015 kernel/trace/trace_functions_graph.c if (!event) event 1018 kernel/trace/trace_functions_graph.c ret = event->funcs->trace(iter, sym_flags, event); event 1107 kernel/trace/trace_functions_graph.c struct trace_event *event) event 108 kernel/trace/trace_hwlat.c struct ring_buffer_event *event; event 116 kernel/trace/trace_hwlat.c event = trace_buffer_lock_reserve(buffer, TRACE_HWLAT, sizeof(*entry), event 118 kernel/trace/trace_hwlat.c if (!event) event 120 kernel/trace/trace_hwlat.c entry = ring_buffer_event_data(event); event 128 kernel/trace/trace_hwlat.c if (!call_filter_check_discard(call, entry, buffer, event)) event 129 kernel/trace/trace_hwlat.c trace_buffer_unlock_commit_nostack(buffer, event); event 42 kernel/trace/trace_kprobe.c static bool trace_kprobe_match(const char *system, const char *event, event 163 kernel/trace/trace_kprobe.c static bool trace_kprobe_match(const char *system, const char *event, event 168 kernel/trace/trace_kprobe.c return strcmp(trace_probe_name(&tk->tp), event) == 0 && event 257 kernel/trace/trace_kprobe.c const char *event, event 293 kernel/trace/trace_kprobe.c ret = trace_probe_init(&tk->tp, event, group, false); event 304 kernel/trace/trace_kprobe.c static struct trace_kprobe *find_trace_kprobe(const char *event, event 311 kernel/trace/trace_kprobe.c if (strcmp(trace_probe_name(&tk->tp), event) == 0 && event 560 kernel/trace/trace_kprobe.c struct trace_probe_event *tpe = orig->tp.event; event 740 kernel/trace/trace_kprobe.c const char *event = NULL, *group = KPROBE_EVENT_SYSTEM; event 762 kernel/trace/trace_kprobe.c event = strchr(&argv[0][1], ':'); event 763 kernel/trace/trace_kprobe.c if (event) event 764 kernel/trace/trace_kprobe.c event++; event 771 kernel/trace/trace_kprobe.c if (event) event 772 kernel/trace/trace_kprobe.c len = event - &argv[0][1] - 1; event 823 kernel/trace/trace_kprobe.c if (event) { event 824 kernel/trace/trace_kprobe.c ret = traceprobe_parse_event_name(&event, &group, buf, event 825 kernel/trace/trace_kprobe.c event - argv[0]); event 837 kernel/trace/trace_kprobe.c event = buf; event 841 kernel/trace/trace_kprobe.c tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive, event 1180 kernel/trace/trace_kprobe.c struct ring_buffer_event *event; event 1197 kernel/trace/trace_kprobe.c event = trace_event_buffer_lock_reserve(&buffer, trace_file, event 1198 kernel/trace/trace_kprobe.c call->event.type, event 1200 kernel/trace/trace_kprobe.c if (!event) event 1203 kernel/trace/trace_kprobe.c entry = ring_buffer_event_data(event); event 1207 kernel/trace/trace_kprobe.c event_trigger_unlock_commit_regs(trace_file, buffer, event, event 1228 kernel/trace/trace_kprobe.c struct ring_buffer_event *event; event 1245 kernel/trace/trace_kprobe.c event = trace_event_buffer_lock_reserve(&buffer, trace_file, event 1246 kernel/trace/trace_kprobe.c call->event.type, event 1248 kernel/trace/trace_kprobe.c if (!event) event 1251 kernel/trace/trace_kprobe.c entry = ring_buffer_event_data(event); event 1256 kernel/trace/trace_kprobe.c event_trigger_unlock_commit_regs(trace_file, buffer, event, event 1274 kernel/trace/trace_kprobe.c struct trace_event *event) event 1282 kernel/trace/trace_kprobe.c container_of(event, struct trace_event_call, event)); event 1304 kernel/trace/trace_kprobe.c struct trace_event *event) event 1312 kernel/trace/trace_kprobe.c container_of(event, struct trace_event_call, event)); event 1415 kernel/trace/trace_kprobe.c perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, event 1451 kernel/trace/trace_kprobe.c perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, event 1456 kernel/trace/trace_kprobe.c int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type, event 1460 kernel/trace/trace_kprobe.c const char *pevent = trace_event_name(event->tp_event); event 1461 kernel/trace/trace_kprobe.c const char *group = event->tp_event->class->system; event 1467 kernel/trace/trace_kprobe.c tk = event->tp_event->data; event 1492 kernel/trace/trace_kprobe.c static int kprobe_register(struct trace_event_call *event, event 1499 kernel/trace/trace_kprobe.c return enable_trace_kprobe(event, file); event 1501 kernel/trace/trace_kprobe.c return disable_trace_kprobe(event, file); event 1505 kernel/trace/trace_kprobe.c return enable_trace_kprobe(event, NULL); event 1507 kernel/trace/trace_kprobe.c return disable_trace_kprobe(event, NULL); event 1565 kernel/trace/trace_kprobe.c call->event.funcs = &kretprobe_funcs; event 1568 kernel/trace/trace_kprobe.c call->event.funcs = &kprobe_funcs; event 1596 kernel/trace/trace_kprobe.c char *event; event 1603 kernel/trace/trace_kprobe.c event = func ? func : "DUMMY_EVENT"; event 1605 kernel/trace/trace_kprobe.c tk = alloc_trace_kprobe(KPROBE_EVENT_SYSTEM, event, (void *)addr, func, event 301 kernel/trace/trace_mmiotrace.c struct ring_buffer_event *event; event 305 kernel/trace/trace_mmiotrace.c event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW, event 307 kernel/trace/trace_mmiotrace.c if (!event) { event 311 kernel/trace/trace_mmiotrace.c entry = ring_buffer_event_data(event); event 314 kernel/trace/trace_mmiotrace.c if (!call_filter_check_discard(call, entry, buffer, event)) event 315 kernel/trace/trace_mmiotrace.c trace_buffer_unlock_commit(tr, buffer, event, 0, pc); event 331 kernel/trace/trace_mmiotrace.c struct ring_buffer_event *event; event 335 kernel/trace/trace_mmiotrace.c event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP, event 337 kernel/trace/trace_mmiotrace.c if (!event) { event 341 kernel/trace/trace_mmiotrace.c entry = ring_buffer_event_data(event); event 344 kernel/trace/trace_mmiotrace.c if (!call_filter_check_discard(call, entry, buffer, event)) event 345 kernel/trace/trace_mmiotrace.c trace_buffer_unlock_commit(tr, buffer, event, 0, pc); event 280 kernel/trace/trace_output.c struct trace_event_call *event; event 285 kernel/trace/trace_output.c event = container_of(trace_event, struct trace_event_call, event); event 288 kernel/trace/trace_output.c if (entry->type != event->event.type) { event 294 kernel/trace/trace_output.c trace_seq_printf(s, "%s: ", trace_event_name(event)); event 648 kernel/trace/trace_output.c struct trace_event *event; event 653 kernel/trace/trace_output.c hlist_for_each_entry(event, &event_hash[key], node) { event 654 kernel/trace/trace_output.c if (event->type == type) event 655 kernel/trace/trace_output.c return event; event 716 kernel/trace/trace_output.c int register_trace_event(struct trace_event *event) event 723 kernel/trace/trace_output.c if (WARN_ON(!event)) event 726 kernel/trace/trace_output.c if (WARN_ON(!event->funcs)) event 729 kernel/trace/trace_output.c INIT_LIST_HEAD(&event->list); event 731 kernel/trace/trace_output.c if (!event->type) { event 736 kernel/trace/trace_output.c event->type = trace_search_list(&list); event 737 kernel/trace/trace_output.c if (!event->type) event 742 kernel/trace/trace_output.c event->type = next_event_type++; event 746 kernel/trace/trace_output.c if (WARN_ON(ftrace_find_event(event->type))) event 749 kernel/trace/trace_output.c list_add_tail(&event->list, list); event 751 kernel/trace/trace_output.c } else if (event->type > __TRACE_LAST_TYPE) { event 757 kernel/trace/trace_output.c if (ftrace_find_event(event->type)) event 761 kernel/trace/trace_output.c if (event->funcs->trace == NULL) event 762 kernel/trace/trace_output.c event->funcs->trace = trace_nop_print; event 763 kernel/trace/trace_output.c if (event->funcs->raw == NULL) event 764 kernel/trace/trace_output.c event->funcs->raw = trace_nop_print; event 765 kernel/trace/trace_output.c if (event->funcs->hex == NULL) event 766 kernel/trace/trace_output.c event->funcs->hex = trace_nop_print; event 767 kernel/trace/trace_output.c if (event->funcs->binary == NULL) event 768 kernel/trace/trace_output.c event->funcs->binary = trace_nop_print; event 770 kernel/trace/trace_output.c key = event->type & (EVENT_HASHSIZE - 1); event 772 kernel/trace/trace_output.c hlist_add_head(&event->node, &event_hash[key]); event 774 kernel/trace/trace_output.c ret = event->type; event 785 kernel/trace/trace_output.c int __unregister_trace_event(struct trace_event *event) event 787 kernel/trace/trace_output.c hlist_del(&event->node); event 788 kernel/trace/trace_output.c list_del(&event->list); event 796 kernel/trace/trace_output.c int unregister_trace_event(struct trace_event *event) event 799 kernel/trace/trace_output.c __unregister_trace_event(event); event 811 kernel/trace/trace_output.c struct trace_event *event) event 820 kernel/trace/trace_output.c struct trace_event *event) event 840 kernel/trace/trace_output.c struct trace_event *event) event 854 kernel/trace/trace_output.c struct trace_event *event) event 868 kernel/trace/trace_output.c struct trace_event *event) event 921 kernel/trace/trace_output.c struct trace_event *event) event 927 kernel/trace/trace_output.c int flags, struct trace_event *event) event 955 kernel/trace/trace_output.c struct trace_event *event) event 961 kernel/trace/trace_output.c struct trace_event *event) event 991 kernel/trace/trace_output.c struct trace_event *event) event 997 kernel/trace/trace_output.c struct trace_event *event) event 1003 kernel/trace/trace_output.c int flags, struct trace_event *event) event 1048 kernel/trace/trace_output.c int flags, struct trace_event *event) event 1084 kernel/trace/trace_output.c int flags, struct trace_event *event) event 1138 kernel/trace/trace_output.c struct trace_event *event) event 1173 kernel/trace/trace_output.c struct trace_event *event) event 1203 kernel/trace/trace_output.c struct trace_event *event) event 1221 kernel/trace/trace_output.c struct trace_event *event) event 1247 kernel/trace/trace_output.c struct trace_event *event) event 1265 kernel/trace/trace_output.c struct trace_event *event) event 1290 kernel/trace/trace_output.c int flags, struct trace_event *event) event 1304 kernel/trace/trace_output.c struct trace_event *event) event 1326 kernel/trace/trace_output.c struct trace_event *event) event 1371 kernel/trace/trace_output.c struct trace_event *event; event 1375 kernel/trace/trace_output.c event = events[i]; event 1377 kernel/trace/trace_output.c ret = register_trace_event(event); event 1380 kernel/trace/trace_output.c event->type); event 27 kernel/trace/trace_output.h int flags, struct trace_event *event); event 32 kernel/trace/trace_output.h extern int __unregister_trace_event(struct trace_event *event); event 232 kernel/trace/trace_probe.c const char *slash, *event = *pevent; event 235 kernel/trace/trace_probe.c slash = strchr(event, '/'); event 237 kernel/trace/trace_probe.c if (slash == event) { event 241 kernel/trace/trace_probe.c if (slash - event + 1 > MAX_EVENT_NAME_LEN) { event 245 kernel/trace/trace_probe.c strlcpy(buf, event, slash - event + 1); event 252 kernel/trace/trace_probe.c offset += slash - event + 1; event 253 kernel/trace/trace_probe.c event = *pevent; event 255 kernel/trace/trace_probe.c len = strlen(event); event 263 kernel/trace/trace_probe.c if (!is_good_name(event)) { event 961 kernel/trace/trace_probe.c trace_probe_event_free(tp->event); event 963 kernel/trace/trace_probe.c tp->event = to->event; event 973 kernel/trace/trace_probe.c trace_probe_event_free(tp->event); event 974 kernel/trace/trace_probe.c tp->event = NULL; event 984 kernel/trace/trace_probe.c if (tp->event) event 988 kernel/trace/trace_probe.c int trace_probe_init(struct trace_probe *tp, const char *event, event 995 kernel/trace/trace_probe.c if (!event || !group) event 1001 kernel/trace/trace_probe.c tp->event = kzalloc(size, GFP_KERNEL); event 1002 kernel/trace/trace_probe.c if (!tp->event) event 1005 kernel/trace/trace_probe.c INIT_LIST_HEAD(&tp->event->files); event 1006 kernel/trace/trace_probe.c INIT_LIST_HEAD(&tp->event->class.fields); event 1007 kernel/trace/trace_probe.c INIT_LIST_HEAD(&tp->event->probes); event 1009 kernel/trace/trace_probe.c list_add(&tp->event->probes, &tp->list); event 1012 kernel/trace/trace_probe.c call->class = &tp->event->class; event 1013 kernel/trace/trace_probe.c call->name = kstrdup(event, GFP_KERNEL); event 1019 kernel/trace/trace_probe.c tp->event->class.system = kstrdup(group, GFP_KERNEL); event 1020 kernel/trace/trace_probe.c if (!tp->event->class.system) { event 1037 kernel/trace/trace_probe.c ret = register_trace_event(&call->event); event 1043 kernel/trace/trace_probe.c unregister_trace_event(&call->event); event 1058 kernel/trace/trace_probe.c list_add_tail_rcu(&link->list, &tp->event->files); event 1089 kernel/trace/trace_probe.c if (list_empty(&tp->event->files)) event 244 kernel/trace/trace_probe.h struct trace_probe_event *event; event 258 kernel/trace/trace_probe.h return !!(tp->event->flags & flag); event 264 kernel/trace/trace_probe.h tp->event->flags |= flag; event 270 kernel/trace/trace_probe.h tp->event->flags &= ~flag; event 280 kernel/trace/trace_probe.h return trace_event_name(&tp->event->call); event 285 kernel/trace/trace_probe.h return tp->event->call.class->system; event 291 kernel/trace/trace_probe.h return &tp->event->call; event 310 kernel/trace/trace_probe.h return &tp->event->probes; event 323 kernel/trace/trace_probe.h return trace_remove_event_call(&tp->event->call); event 328 kernel/trace/trace_probe.h return !!list_is_singular(&tp->event->files); event 331 kernel/trace/trace_probe.h int trace_probe_init(struct trace_probe *tp, const char *event, event 347 kernel/trace/trace_probe.h list_for_each_entry(pos, &(tp)->event->files, list) event 349 kernel/trace/trace_probe.h list_for_each_entry_rcu(pos, &(tp)->event->files, list) event 382 kernel/trace/trace_sched_wakeup.c struct ring_buffer_event *event; event 385 kernel/trace/trace_sched_wakeup.c event = trace_buffer_lock_reserve(buffer, TRACE_CTX, event 387 kernel/trace/trace_sched_wakeup.c if (!event) event 389 kernel/trace/trace_sched_wakeup.c entry = ring_buffer_event_data(event); event 398 kernel/trace/trace_sched_wakeup.c if (!call_filter_check_discard(call, entry, buffer, event)) event 399 kernel/trace/trace_sched_wakeup.c trace_buffer_unlock_commit(tr, buffer, event, flags, pc); event 409 kernel/trace/trace_sched_wakeup.c struct ring_buffer_event *event; event 413 kernel/trace/trace_sched_wakeup.c event = trace_buffer_lock_reserve(buffer, TRACE_WAKE, event 415 kernel/trace/trace_sched_wakeup.c if (!event) event 417 kernel/trace/trace_sched_wakeup.c entry = ring_buffer_event_data(event); event 426 kernel/trace/trace_sched_wakeup.c if (!call_filter_check_discard(call, entry, buffer, event)) event 427 kernel/trace/trace_sched_wakeup.c trace_buffer_unlock_commit(tr, buffer, event, flags, pc); event 28 kernel/trace/trace_selftest.c struct ring_buffer_event *event; event 32 kernel/trace/trace_selftest.c while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) { event 33 kernel/trace/trace_selftest.c entry = ring_buffer_event_data(event); event 17 kernel/trace/trace_syscalls.c static int syscall_enter_register(struct trace_event_call *event, event 19 kernel/trace/trace_syscalls.c static int syscall_exit_register(struct trace_event_call *event, event 123 kernel/trace/trace_syscalls.c struct trace_event *event) event 139 kernel/trace/trace_syscalls.c if (entry->enter_event->event.type != ent->type) { event 170 kernel/trace/trace_syscalls.c struct trace_event *event) event 187 kernel/trace/trace_syscalls.c if (entry->exit_event->event.type != ent->type) { event 314 kernel/trace/trace_syscalls.c struct ring_buffer_event *event; event 344 kernel/trace/trace_syscalls.c event = trace_buffer_lock_reserve(buffer, event 345 kernel/trace/trace_syscalls.c sys_data->enter_event->event.type, size, irq_flags, pc); event 346 kernel/trace/trace_syscalls.c if (!event) event 349 kernel/trace/trace_syscalls.c entry = ring_buffer_event_data(event); event 354 kernel/trace/trace_syscalls.c event_trigger_unlock_commit(trace_file, buffer, event, entry, event 364 kernel/trace/trace_syscalls.c struct ring_buffer_event *event; event 390 kernel/trace/trace_syscalls.c event = trace_buffer_lock_reserve(buffer, event 391 kernel/trace/trace_syscalls.c sys_data->exit_event->event.type, sizeof(*entry), event 393 kernel/trace/trace_syscalls.c if (!event) event 396 kernel/trace/trace_syscalls.c entry = ring_buffer_event_data(event); event 400 kernel/trace/trace_syscalls.c event_trigger_unlock_commit(trace_file, buffer, event, entry, event 630 kernel/trace/trace_syscalls.c sys_data->enter_event->event.type, 1, regs, event 726 kernel/trace/trace_syscalls.c perf_trace_buf_submit(rec, size, rctx, sys_data->exit_event->event.type, event 766 kernel/trace/trace_syscalls.c static int syscall_enter_register(struct trace_event_call *event, event 773 kernel/trace/trace_syscalls.c return reg_event_syscall_enter(file, event); event 775 kernel/trace/trace_syscalls.c unreg_event_syscall_enter(file, event); event 780 kernel/trace/trace_syscalls.c return perf_sysenter_enable(event); event 782 kernel/trace/trace_syscalls.c perf_sysenter_disable(event); event 794 kernel/trace/trace_syscalls.c static int syscall_exit_register(struct trace_event_call *event, event 801 kernel/trace/trace_syscalls.c return reg_event_syscall_exit(file, event); event 803 kernel/trace/trace_syscalls.c unreg_event_syscall_exit(file, event); event 808 kernel/trace/trace_syscalls.c return perf_sysexit_enable(event); event 810 kernel/trace/trace_syscalls.c perf_sysexit_disable(event); event 41 kernel/trace/trace_uprobe.c static bool trace_uprobe_match(const char *system, const char *event, event 312 kernel/trace/trace_uprobe.c static bool trace_uprobe_match(const char *system, const char *event, event 317 kernel/trace/trace_uprobe.c return strcmp(trace_probe_name(&tu->tp), event) == 0 && event 338 kernel/trace/trace_uprobe.c alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) event 347 kernel/trace/trace_uprobe.c ret = trace_probe_init(&tu->tp, event, group, true); event 355 kernel/trace/trace_uprobe.c init_trace_uprobe_filter(tu->tp.event->filter); event 375 kernel/trace/trace_uprobe.c static struct trace_uprobe *find_probe_event(const char *event, const char *group) event 381 kernel/trace/trace_uprobe.c if (strcmp(trace_probe_name(&tu->tp), event) == 0 && event 410 kernel/trace/trace_uprobe.c struct trace_probe_event *tpe = orig->tp.event; event 536 kernel/trace/trace_uprobe.c const char *event = NULL, *group = UPROBE_EVENT_SYSTEM; event 561 kernel/trace/trace_uprobe.c event = &argv[0][2]; event 629 kernel/trace/trace_uprobe.c if (event) { event 630 kernel/trace/trace_uprobe.c ret = traceprobe_parse_event_name(&event, &group, buf, event 631 kernel/trace/trace_uprobe.c event - argv[0]); event 649 kernel/trace/trace_uprobe.c event = buf; event 656 kernel/trace/trace_uprobe.c tu = alloc_trace_uprobe(group, event, argc, is_return); event 934 kernel/trace/trace_uprobe.c struct ring_buffer_event *event; event 950 kernel/trace/trace_uprobe.c event = trace_event_buffer_lock_reserve(&buffer, trace_file, event 951 kernel/trace/trace_uprobe.c call->event.type, size, 0, 0); event 952 kernel/trace/trace_uprobe.c if (!event) event 955 kernel/trace/trace_uprobe.c entry = ring_buffer_event_data(event); event 967 kernel/trace/trace_uprobe.c event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0); event 1001 kernel/trace/trace_uprobe.c print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event) event 1010 kernel/trace/trace_uprobe.c container_of(event, struct trace_event_call, event)); event 1064 kernel/trace/trace_uprobe.c WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter)); event 1105 kernel/trace/trace_uprobe.c WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter)); event 1188 kernel/trace/trace_uprobe.c struct perf_event *event; event 1193 kernel/trace/trace_uprobe.c list_for_each_entry(event, &filter->perf_events, hw.tp_list) { event 1194 kernel/trace/trace_uprobe.c if (event->hw.target->mm == mm) event 1203 kernel/trace/trace_uprobe.c struct perf_event *event) event 1205 kernel/trace/trace_uprobe.c return __uprobe_perf_filter(filter, event->hw.target->mm); event 1209 kernel/trace/trace_uprobe.c struct perf_event *event) event 1214 kernel/trace/trace_uprobe.c if (event->hw.target) { event 1215 kernel/trace/trace_uprobe.c list_del(&event->hw.tp_list); event 1217 kernel/trace/trace_uprobe.c (event->hw.target->flags & PF_EXITING) || event 1218 kernel/trace/trace_uprobe.c trace_uprobe_filter_event(filter, event); event 1230 kernel/trace/trace_uprobe.c struct perf_event *event) event 1235 kernel/trace/trace_uprobe.c if (event->hw.target) { event 1245 kernel/trace/trace_uprobe.c event->parent || event->attr.enable_on_exec || event 1246 kernel/trace/trace_uprobe.c trace_uprobe_filter_event(filter, event); event 1247 kernel/trace/trace_uprobe.c list_add(&event->hw.tp_list, &filter->perf_events); event 1258 kernel/trace/trace_uprobe.c struct perf_event *event) event 1269 kernel/trace/trace_uprobe.c if (trace_uprobe_filter_remove(tu->tp.event->filter, event)) event 1283 kernel/trace/trace_uprobe.c struct perf_event *event) event 1294 kernel/trace/trace_uprobe.c if (trace_uprobe_filter_add(tu->tp.event->filter, event)) event 1300 kernel/trace/trace_uprobe.c uprobe_perf_close(call, event); event 1316 kernel/trace/trace_uprobe.c filter = tu->tp.event->filter; event 1372 kernel/trace/trace_uprobe.c perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, event 1397 kernel/trace/trace_uprobe.c int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type, event 1401 kernel/trace/trace_uprobe.c const char *pevent = trace_event_name(event->tp_event); event 1402 kernel/trace/trace_uprobe.c const char *group = event->tp_event->class->system; event 1408 kernel/trace/trace_uprobe.c tu = event->tp_event->data; event 1421 kernel/trace/trace_uprobe.c trace_uprobe_register(struct trace_event_call *event, enum trace_reg type, event 1428 kernel/trace/trace_uprobe.c return probe_event_enable(event, file, NULL); event 1431 kernel/trace/trace_uprobe.c probe_event_disable(event, file); event 1436 kernel/trace/trace_uprobe.c return probe_event_enable(event, NULL, uprobe_perf_filter); event 1439 kernel/trace/trace_uprobe.c probe_event_disable(event, NULL); event 1443 kernel/trace/trace_uprobe.c return uprobe_perf_open(event, data); event 1446 kernel/trace/trace_uprobe.c return uprobe_perf_close(event, data); event 1535 kernel/trace/trace_uprobe.c call->event.funcs = &uprobe_funcs; event 110 kernel/watchdog_hld.c static void watchdog_overflow_callback(struct perf_event *event, event 115 kernel/watchdog_hld.c event->hw.interrupts = 0; event 207 kernel/watchdog_hld.c struct perf_event *event = this_cpu_read(watchdog_ev); event 209 kernel/watchdog_hld.c if (event) { event 210 kernel/watchdog_hld.c perf_event_disable(event); event 212 kernel/watchdog_hld.c this_cpu_write(dead_event, event); event 228 kernel/watchdog_hld.c struct perf_event *event = per_cpu(dead_event, cpu); event 234 kernel/watchdog_hld.c if (event) event 235 kernel/watchdog_hld.c perf_event_release_kernel(event); event 253 kernel/watchdog_hld.c struct perf_event *event = per_cpu(watchdog_ev, cpu); event 255 kernel/watchdog_hld.c if (event) event 256 kernel/watchdog_hld.c perf_event_disable(event); event 275 kernel/watchdog_hld.c struct perf_event *event = per_cpu(watchdog_ev, cpu); event 277 kernel/watchdog_hld.c if (event) event 278 kernel/watchdog_hld.c perf_event_enable(event); event 830 mm/memcontrol.c static unsigned long memcg_events(struct mem_cgroup *memcg, int event) event 832 mm/memcontrol.c return atomic_long_read(&memcg->vmevents[event]); event 835 mm/memcontrol.c static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event) event 841 mm/memcontrol.c x += per_cpu(memcg->vmstats_local->events[event], cpu); event 4312 mm/memcontrol.c struct mem_cgroup_eventfd_list *event; event 4314 mm/memcontrol.c event = kmalloc(sizeof(*event), GFP_KERNEL); event 4315 mm/memcontrol.c if (!event) event 4320 mm/memcontrol.c event->eventfd = eventfd; event 4321 mm/memcontrol.c list_add(&event->list, &memcg->oom_notify); event 4623 mm/memcontrol.c struct mem_cgroup_event *event = event 4625 mm/memcontrol.c struct mem_cgroup *memcg = event->memcg; event 4627 mm/memcontrol.c remove_wait_queue(event->wqh, &event->wait); event 4629 mm/memcontrol.c event->unregister_event(memcg, event->eventfd); event 4632 mm/memcontrol.c eventfd_signal(event->eventfd, 1); event 4634 mm/memcontrol.c eventfd_ctx_put(event->eventfd); event 4635 mm/memcontrol.c kfree(event); event 4647 mm/memcontrol.c struct mem_cgroup_event *event = event 4649 mm/memcontrol.c struct mem_cgroup *memcg = event->memcg; event 4663 mm/memcontrol.c if (!list_empty(&event->list)) { event 4664 mm/memcontrol.c list_del_init(&event->list); event 4669 mm/memcontrol.c schedule_work(&event->remove); event 4680 mm/memcontrol.c struct mem_cgroup_event *event = event 4683 mm/memcontrol.c event->wqh = wqh; event 4684 mm/memcontrol.c add_wait_queue(wqh, &event->wait); event 4700 mm/memcontrol.c struct mem_cgroup_event *event; event 4721 mm/memcontrol.c event = kzalloc(sizeof(*event), GFP_KERNEL); event 4722 mm/memcontrol.c if (!event) event 4725 mm/memcontrol.c event->memcg = memcg; event 4726 mm/memcontrol.c INIT_LIST_HEAD(&event->list); event 4727 mm/memcontrol.c init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc); event 4728 mm/memcontrol.c init_waitqueue_func_entry(&event->wait, memcg_event_wake); event 4729 mm/memcontrol.c INIT_WORK(&event->remove, memcg_event_remove); event 4737 mm/memcontrol.c event->eventfd = eventfd_ctx_fileget(efile.file); event 4738 mm/memcontrol.c if (IS_ERR(event->eventfd)) { event 4739 mm/memcontrol.c ret = PTR_ERR(event->eventfd); event 4766 mm/memcontrol.c event->register_event = mem_cgroup_usage_register_event; event 4767 mm/memcontrol.c event->unregister_event = mem_cgroup_usage_unregister_event; event 4769 mm/memcontrol.c event->register_event = mem_cgroup_oom_register_event; event 4770 mm/memcontrol.c event->unregister_event = mem_cgroup_oom_unregister_event; event 4772 mm/memcontrol.c event->register_event = vmpressure_register_event; event 4773 mm/memcontrol.c event->unregister_event = vmpressure_unregister_event; event 4775 mm/memcontrol.c event->register_event = memsw_cgroup_usage_register_event; event 4776 mm/memcontrol.c event->unregister_event = memsw_cgroup_usage_unregister_event; event 4797 mm/memcontrol.c ret = event->register_event(memcg, event->eventfd, buf); event 4801 mm/memcontrol.c vfs_poll(efile.file, &event->pt); event 4804 mm/memcontrol.c list_add(&event->list, &memcg->event_list); event 4817 mm/memcontrol.c eventfd_ctx_put(event->eventfd); event 4821 mm/memcontrol.c kfree(event); event 5252 mm/memcontrol.c struct mem_cgroup_event *event, *tmp; event 5260 mm/memcontrol.c list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { event 5261 mm/memcontrol.c list_del_init(&event->list); event 5262 mm/memcontrol.c schedule_work(&event->remove); event 541 mm/mmu_notifier.c if (!range->vma || range->event != MMU_NOTIFY_PROTECTION_VMA) event 122 mm/vmstat.c ret[i] += this->event[i]; event 151 mm/vmstat.c count_vm_events(i, fold_state->event[i]); event 152 mm/vmstat.c fold_state->event[i] = 0; event 108 net/6lowpan/core.c unsigned long event, void *ptr) event 122 net/6lowpan/core.c switch (event) { event 276 net/802/garp.c enum garp_attr_event event) event 301 net/802/garp.c ga->event = event; event 311 net/802/garp.c struct garp_attr *attr, enum garp_event event) event 315 net/802/garp.c state = garp_applicant_state_table[attr->state][event].state; event 319 net/802/garp.c switch (garp_applicant_state_table[attr->state][event].action) { event 380 net/802/garp.c static void garp_gid_event(struct garp_applicant *app, enum garp_event event) event 389 net/802/garp.c garp_attr_event(app, attr, event); event 430 net/802/garp.c enum garp_event event; event 447 net/802/garp.c switch (ga->event) { event 454 net/802/garp.c event = GARP_EVENT_R_JOIN_EMPTY; event 457 net/802/garp.c event = GARP_EVENT_R_JOIN_IN; event 460 net/802/garp.c event = GARP_EVENT_R_LEAVE_EMPTY; event 463 net/802/garp.c event = GARP_EVENT_R_EMPTY; event 474 net/802/garp.c garp_attr_event(app, attr, event); event 468 net/802/mrp.c struct mrp_attr *attr, enum mrp_event event) event 472 net/802/mrp.c state = mrp_applicant_state_table[attr->state][event]; event 478 net/802/mrp.c if (event == MRP_EVENT_TX) { event 565 net/802/mrp.c static void mrp_mad_event(struct mrp_applicant *app, enum mrp_event event) event 574 net/802/mrp.c mrp_attr_event(app, attr, event); event 635 net/802/mrp.c enum mrp_event event; event 645 net/802/mrp.c event = MRP_EVENT_R_NEW; event 648 net/802/mrp.c event = MRP_EVENT_R_JOIN_IN; event 651 net/802/mrp.c event = MRP_EVENT_R_IN; event 654 net/802/mrp.c event = MRP_EVENT_R_JOIN_MT; event 657 net/802/mrp.c event = MRP_EVENT_R_MT; event 660 net/802/mrp.c event = MRP_EVENT_R_LV; event 666 net/802/mrp.c mrp_attr_event(app, attr, event); event 341 net/8021q/vlan.c static int __vlan_device_event(struct net_device *dev, unsigned long event) event 345 net/8021q/vlan.c switch (event) { event 361 net/8021q/vlan.c static int vlan_device_event(struct notifier_block *unused, unsigned long event, event 376 net/8021q/vlan.c int err = __vlan_device_event(dev, event); event 382 net/8021q/vlan.c if ((event == NETDEV_UP) && event 388 net/8021q/vlan.c if (event == NETDEV_DOWN && event 401 net/8021q/vlan.c switch (event) { event 504 net/8021q/vlan.c call_netdevice_notifiers(event, vlandev); event 239 net/9p/trans_rdma.c p9_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event) event 243 net/9p/trans_rdma.c switch (event->event) { event 356 net/9p/trans_rdma.c static void qp_event_handler(struct ib_event *event, void *context) event 359 net/9p/trans_rdma.c event->event, context); event 327 net/appletalk/aarp.c static int aarp_device_event(struct notifier_block *this, unsigned long event, event 336 net/appletalk/aarp.c if (event == NETDEV_DOWN) { event 638 net/appletalk/ddp.c static int ddp_device_event(struct notifier_block *this, unsigned long event, event 646 net/appletalk/ddp.c if (event == NETDEV_DOWN) event 147 net/atm/br2684.c static int atm_dev_event(struct notifier_block *this, unsigned long event, event 157 net/atm/br2684.c pr_debug("event=%ld dev=%p\n", event, atm_dev); event 545 net/atm/clip.c static int clip_device_event(struct notifier_block *this, unsigned long event, event 553 net/atm/clip.c if (event == NETDEV_UNREGISTER) event 560 net/atm/clip.c switch (event) { event 578 net/atm/clip.c static int clip_inet_event(struct notifier_block *this, unsigned long event, event 589 net/atm/clip.c if (event != NETDEV_UP) event 97 net/atm/mpc.c unsigned long event, void *dev); event 1000 net/atm/mpc.c unsigned long event, void *ptr) event 1012 net/atm/mpc.c switch (event) { event 109 net/ax25/af_ax25.c static int ax25_device_event(struct notifier_block *this, unsigned long event, event 121 net/ax25/af_ax25.c switch (event) { event 1001 net/batman-adv/hard-interface.c static int batadv_hard_if_event_softif(unsigned long event, event 1006 net/batman-adv/hard-interface.c switch (event) { event 1021 net/batman-adv/hard-interface.c unsigned long event, void *ptr) event 1029 net/batman-adv/hard-interface.c return batadv_hard_if_event_softif(event, net_dev); event 1032 net/batman-adv/hard-interface.c if (!hard_iface && (event == NETDEV_REGISTER || event 1033 net/batman-adv/hard-interface.c event == NETDEV_POST_TYPE_CHANGE)) event 1039 net/batman-adv/hard-interface.c switch (event) { event 1243 net/bluetooth/6lowpan.c unsigned long event, void *ptr) event 1251 net/bluetooth/6lowpan.c switch (event) { event 5776 net/bluetooth/hci_event.c u8 event, struct sk_buff *skb) event 5792 net/bluetooth/hci_event.c if (event) { event 5793 net/bluetooth/hci_event.c if (hdr->evt != event) event 5833 net/bluetooth/hci_event.c u8 status = 0, event = hdr->evt, req_evt = 0; event 5836 net/bluetooth/hci_event.c if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) { event 5841 net/bluetooth/hci_event.c req_evt = event; event 5849 net/bluetooth/hci_event.c if (req_complete_skb || event == HCI_EV_CMD_STATUS || event 5850 net/bluetooth/hci_event.c event == HCI_EV_CMD_COMPLETE) event 5855 net/bluetooth/hci_event.c switch (event) { event 6033 net/bluetooth/hci_event.c BT_DBG("%s event 0x%2.2x", hdev->name, event); event 128 net/bluetooth/hci_request.c const void *param, u8 event, u32 timeout) event 138 net/bluetooth/hci_request.c hci_req_add_ev(&req, opcode, plen, param, event); event 313 net/bluetooth/hci_request.c const void *param, u8 event) event 337 net/bluetooth/hci_request.c bt_cb(skb)->hci.req_event = event; event 46 net/bluetooth/hci_request.h const void *param, u8 event); event 350 net/bluetooth/hci_sock.c void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event, event 384 net/bluetooth/hci_sock.c put_unaligned_le16(event, skb_put(skb, 2)); event 404 net/bluetooth/hci_sock.c static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event) event 412 net/bluetooth/hci_sock.c switch (event) { event 727 net/bluetooth/hci_sock.c void hci_sock_dev_event(struct hci_dev *hdev, int event) event 729 net/bluetooth/hci_sock.c BT_DBG("hdev %s event %d", hdev->name, event); event 735 net/bluetooth/hci_sock.c skb = create_monitor_event(hdev, event); event 743 net/bluetooth/hci_sock.c if (event <= HCI_DEV_DOWN) { event 747 net/bluetooth/hci_sock.c ev.event = event; event 752 net/bluetooth/hci_sock.c if (event == HCI_DEV_UNREG) { event 705 net/bluetooth/hidp/core.c input->event = hidp_input_event; event 62 net/bluetooth/l2cap_core.c struct sk_buff_head *skbs, u8 event); event 2712 net/bluetooth/l2cap_core.c struct sk_buff_head *skbs, u8 event) event 2715 net/bluetooth/l2cap_core.c event); event 2717 net/bluetooth/l2cap_core.c switch (event) { event 2784 net/bluetooth/l2cap_core.c struct sk_buff_head *skbs, u8 event) event 2787 net/bluetooth/l2cap_core.c event); event 2789 net/bluetooth/l2cap_core.c switch (event) { event 2861 net/bluetooth/l2cap_core.c struct sk_buff_head *skbs, u8 event) event 2864 net/bluetooth/l2cap_core.c chan, control, skbs, event, chan->tx_state); event 2868 net/bluetooth/l2cap_core.c l2cap_tx_state_xmit(chan, control, skbs, event); event 2871 net/bluetooth/l2cap_core.c l2cap_tx_state_wait_f(chan, control, skbs, event); event 5997 net/bluetooth/l2cap_core.c u8 event; event 6002 net/bluetooth/l2cap_core.c event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR; event 6003 net/bluetooth/l2cap_core.c l2cap_tx(chan, NULL, NULL, event); event 6223 net/bluetooth/l2cap_core.c struct sk_buff *skb, u8 event) event 6229 net/bluetooth/l2cap_core.c event); event 6231 net/bluetooth/l2cap_core.c switch (event) { event 6357 net/bluetooth/l2cap_core.c struct sk_buff *skb, u8 event) event 6364 net/bluetooth/l2cap_core.c event); event 6366 net/bluetooth/l2cap_core.c switch (event) { event 6514 net/bluetooth/l2cap_core.c struct sk_buff *skb, u8 event) event 6519 net/bluetooth/l2cap_core.c event); event 6544 net/bluetooth/l2cap_core.c if (event == L2CAP_EV_RECV_IFRAME) event 6547 net/bluetooth/l2cap_core.c return l2cap_rx_state_recv(chan, control, NULL, event); event 6552 net/bluetooth/l2cap_core.c struct sk_buff *skb, u8 event) event 6583 net/bluetooth/l2cap_core.c err = l2cap_rx_state_recv(chan, control, skb, event); event 6598 net/bluetooth/l2cap_core.c struct sk_buff *skb, u8 event) event 6603 net/bluetooth/l2cap_core.c control, skb, event, chan->rx_state); event 6608 net/bluetooth/l2cap_core.c err = l2cap_rx_state_recv(chan, control, skb, event); event 6612 net/bluetooth/l2cap_core.c event); event 6615 net/bluetooth/l2cap_core.c err = l2cap_rx_state_wait_p(chan, control, skb, event); event 6618 net/bluetooth/l2cap_core.c err = l2cap_rx_state_wait_f(chan, control, skb, event); event 6674 net/bluetooth/l2cap_core.c u8 event; event 6717 net/bluetooth/l2cap_core.c event = L2CAP_EV_RECV_IFRAME; event 6718 net/bluetooth/l2cap_core.c err = l2cap_rx(chan, control, skb, event); event 6750 net/bluetooth/l2cap_core.c event = rx_func_to_event[control->super]; event 6751 net/bluetooth/l2cap_core.c if (l2cap_rx(chan, control, skb, event)) event 250 net/bluetooth/mgmt.c static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data, event 253 net/bluetooth/mgmt.c return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len, event 257 net/bluetooth/mgmt.c static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data, event 260 net/bluetooth/mgmt.c return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len, event 264 net/bluetooth/mgmt.c static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len, event 267 net/bluetooth/mgmt.c return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len, event 59 net/bluetooth/mgmt_util.c int mgmt_send_event(u16 event, struct hci_dev *hdev, unsigned short channel, event 70 net/bluetooth/mgmt_util.c hdr->opcode = cpu_to_le16(event); event 86 net/bluetooth/mgmt_util.c hci_send_monitor_ctrl_event(hdev, event, data, data_len, event 34 net/bluetooth/mgmt_util.h int mgmt_send_event(u16 event, struct hci_dev *hdev, unsigned short channel, event 28 net/bridge/br.c static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr) event 40 net/bridge/br.c err = br_vlan_bridge_event(dev, event, ptr); event 44 net/bridge/br.c if (event == NETDEV_REGISTER) { event 58 net/bridge/br.c switch (event) { event 127 net/bridge/br.c call_netdevice_notifiers(event, br->dev); event 131 net/bridge/br.c if (event != NETDEV_UNREGISTER) event 132 net/bridge/br.c br_vlan_port_event(p, event); event 135 net/bridge/br.c if (!notified && (event == NETDEV_CHANGEADDR || event == NETDEV_UP || event 136 net/bridge/br.c event == NETDEV_CHANGE || event == NETDEV_DOWN)) event 148 net/bridge/br.c unsigned long event, void *ptr) event 162 net/bridge/br.c switch (event) { event 960 net/bridge/br_netfilter_hooks.c static int brnf_device_event(struct notifier_block *unused, unsigned long event, event 968 net/bridge/br_netfilter_hooks.c if (event != NETDEV_REGISTER || !(dev->priv_flags & IFF_EBRIDGE)) event 373 net/bridge/br_netlink.c u32 pid, u32 seq, int event, unsigned int flags, event 387 net/bridge/br_netlink.c event, dev->name, br->dev->name); event 389 net/bridge/br_netlink.c nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags); event 411 net/bridge/br_netlink.c if (event == RTM_NEWLINK && port) { event 466 net/bridge/br_netlink.c void br_ifinfo_notify(int event, const struct net_bridge *br, event 488 net/bridge/br_netlink.c br_debug(br, "port %u(%s) event %d\n", port_no, dev->name, event); event 494 net/bridge/br_netlink.c err = br_fill_ifinfo(skb, port, 0, 0, event, 0, filter, dev); event 899 net/bridge/br_private.h void br_vlan_port_event(struct net_bridge_port *p, unsigned long event); event 900 net/bridge/br_private.h int br_vlan_bridge_event(struct net_device *dev, unsigned long event, event 1087 net/bridge/br_private.h unsigned long event) event 1092 net/bridge/br_private.h unsigned long event, void *ptr) event 1160 net/bridge/br_private.h void br_ifinfo_notify(int event, const struct net_bridge *br, event 1461 net/bridge/br_vlan.c int br_vlan_bridge_event(struct net_device *dev, unsigned long event, void *ptr) event 1468 net/bridge/br_vlan.c switch (event) { event 1495 net/bridge/br_vlan.c void br_vlan_port_event(struct net_bridge_port *p, unsigned long event) event 1500 net/bridge/br_vlan.c switch (event) { event 1477 net/core/drop_monitor.c unsigned long event, void *ptr) event 1483 net/core/drop_monitor.c switch (event) { event 186 net/core/failover.c failover_event(struct notifier_block *this, unsigned long event, void *ptr) event 194 net/core/failover.c switch (event) { event 86 net/core/fib_rules.c static void notify_rule_change(int event, struct fib_rule *rule, event 1135 net/core/fib_rules.c static void notify_rule_change(int event, struct fib_rule *rule, event 1148 net/core/fib_rules.c err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops); event 1190 net/core/fib_rules.c static int fib_rules_event(struct notifier_block *this, unsigned long event, event 1199 net/core/fib_rules.c switch (event) { event 272 net/core/netprio_cgroup.c unsigned long event, void *ptr) event 282 net/core/netprio_cgroup.c switch (event) { event 1942 net/core/pktgen.c unsigned long event, void *ptr) event 1954 net/core/pktgen.c switch (event) { event 1458 net/core/rtnetlink.c static u32 rtnl_get_event(unsigned long event) event 1462 net/core/rtnetlink.c switch (event) { event 1591 net/core/rtnetlink.c u32 event, int *new_nsid, int new_ifindex, event 1644 net/core/rtnetlink.c if (event != IFLA_EVENT_NONE) { event 1645 net/core/rtnetlink.c if (nla_put_u32(skb, IFLA_EVENT, event)) event 3491 net/core/rtnetlink.c u32 event, gfp_t flags, int *new_nsid, event 3504 net/core/rtnetlink.c type, 0, 0, change, 0, 0, event, event 3527 net/core/rtnetlink.c unsigned int change, u32 event, event 3535 net/core/rtnetlink.c skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid, event 5287 net/core/rtnetlink.c static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr) event 5291 net/core/rtnetlink.c switch (event) { event 5305 net/core/rtnetlink.c rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event), event 1370 net/dcb/dcbnl.c static int dcbnl_notify(struct net_device *dev, int event, int cmd, event 1382 net/dcb/dcbnl.c skb = dcbnl_newmsg(event, cmd, portid, seq, 0, &nlh); event 1404 net/dcb/dcbnl.c int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd, event 1407 net/dcb/dcbnl.c return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_IEEE); event 1411 net/dcb/dcbnl.c int dcbnl_cee_notify(struct net_device *dev, int event, int cmd, event 1414 net/dcb/dcbnl.c return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_CEE); event 1852 net/dcb/dcbnl.c struct dcb_app_type event; event 1855 net/dcb/dcbnl.c event.ifindex = dev->ifindex; event 1856 net/dcb/dcbnl.c memcpy(&event.app, new, sizeof(event.app)); event 1858 net/dcb/dcbnl.c event.dcbx = dev->dcbnl_ops->getdcbx(dev); event 1878 net/dcb/dcbnl.c call_dcbevent_notifiers(DCB_APP_EVENT, &event); event 1915 net/dcb/dcbnl.c struct dcb_app_type event; event 1918 net/dcb/dcbnl.c event.ifindex = dev->ifindex; event 1919 net/dcb/dcbnl.c memcpy(&event.app, new, sizeof(event.app)); event 1921 net/dcb/dcbnl.c event.dcbx = dev->dcbnl_ops->getdcbx(dev); event 1934 net/dcb/dcbnl.c call_dcbevent_notifiers(DCB_APP_EVENT, &event); event 1947 net/dcb/dcbnl.c struct dcb_app_type event; event 1950 net/dcb/dcbnl.c event.ifindex = dev->ifindex; event 1951 net/dcb/dcbnl.c memcpy(&event.app, del, sizeof(event.app)); event 1953 net/dcb/dcbnl.c event.dcbx = dev->dcbnl_ops->getdcbx(dev); event 1965 net/dcb/dcbnl.c call_dcbevent_notifiers(DCB_APP_EVENT, &event); event 129 net/dccp/timer.c int event = 0; event 148 net/dccp/timer.c event = icsk->icsk_pending; event 151 net/dccp/timer.c switch (event) { event 2079 net/decnet/af_decnet.c static int dn_device_event(struct notifier_block *this, unsigned long event, event 2087 net/decnet/af_decnet.c switch (event) { event 79 net/decnet/dn_dev.c static void dn_ifaddr_notify(int event, struct dn_ifaddr *ifa); event 685 net/decnet/dn_dev.c u32 portid, u32 seq, int event, unsigned int flags) event 691 net/decnet/dn_dev.c nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags); event 718 net/decnet/dn_dev.c static void dn_ifaddr_notify(int event, struct dn_ifaddr *ifa) event 727 net/decnet/dn_dev.c err = dn_nl_fill_ifaddr(skb, ifa, 0, 0, event, 0); event 675 net/decnet/dn_fib.c static int dn_fib_dnaddr_event(struct notifier_block *this, unsigned long event, void *ptr) event 679 net/decnet/dn_fib.c switch (event) { event 1546 net/decnet/dn_route.c int event, int nowait, unsigned int flags) event 1553 net/decnet/dn_route.c nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags); event 301 net/decnet/dn_table.c static int dn_fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event, event 308 net/decnet/dn_table.c nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags); event 382 net/decnet/dn_table.c static void dn_rtmsg_fib(int event, struct dn_fib_node *f, int z, u32 tb_id, event 393 net/decnet/dn_table.c err = dn_fib_dump_info(skb, portid, nlh->nlmsg_seq, event, tb_id, event 1546 net/dsa/slave.c unsigned long event, void *ptr) event 1550 net/dsa/slave.c if (event == NETDEV_CHANGEUPPER) { event 1564 net/dsa/slave.c unsigned long event; event 1577 net/dsa/slave.c switch (switchdev_work->event) { event 1630 net/dsa/slave.c unsigned long event, void *ptr) event 1636 net/dsa/slave.c if (event == SWITCHDEV_PORT_ATTR_SET) { event 1653 net/dsa/slave.c switchdev_work->event = event; event 1655 net/dsa/slave.c switch (event) { event 1676 net/dsa/slave.c unsigned long event, void *ptr) event 1681 net/dsa/slave.c switch (event) { event 298 net/dsa/switch.c unsigned long event, void *info) event 303 net/dsa/switch.c switch (event) { event 342 net/dsa/switch.c event, err); event 18 net/hsr/hsr_main.c static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event, event 42 net/hsr/hsr_main.c switch (event) { event 206 net/ieee802154/6lowpan/core.c unsigned long event, void *ptr) event 217 net/ieee802154/6lowpan/core.c switch (event) { event 1239 net/ipv4/arp.c static int arp_netdev_event(struct notifier_block *this, unsigned long event, event 1245 net/ipv4/arp.c switch (event) { event 110 net/ipv4/devinet.c int event; event 194 net/ipv4/devinet.c static void rtmsg_ifa(int event, struct in_ifaddr *, struct nlmsghdr *, u32); event 1521 net/ipv4/devinet.c static int inetdev_event(struct notifier_block *this, unsigned long event, event 1530 net/ipv4/devinet.c if (event == NETDEV_REGISTER) { event 1538 net/ipv4/devinet.c } else if (event == NETDEV_CHANGEMTU) { event 1546 net/ipv4/devinet.c switch (event) { event 1656 net/ipv4/devinet.c nlh = nlmsg_put(skb, args->portid, args->seq, args->event, sizeof(*ifm), event 1807 net/ipv4/devinet.c .event = RTM_NEWADDR, event 1885 net/ipv4/devinet.c static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh, event 1891 net/ipv4/devinet.c .event = event, event 2029 net/ipv4/devinet.c u32 seq, int event, unsigned int flags, event 2036 net/ipv4/devinet.c nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg), event 2087 net/ipv4/devinet.c void inet_netconf_notify_devconf(struct net *net, int event, int type, event 2098 net/ipv4/devinet.c event, 0, type); event 1411 net/ipv4/fib_frontend.c static void fib_disable_ip(struct net_device *dev, unsigned long event, event 1414 net/ipv4/fib_frontend.c if (fib_sync_down_dev(dev, event, force)) event 1421 net/ipv4/fib_frontend.c static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr) event 1427 net/ipv4/fib_frontend.c switch (event) { event 1443 net/ipv4/fib_frontend.c fib_disable_ip(dev, event, true); event 1452 net/ipv4/fib_frontend.c static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) event 1462 net/ipv4/fib_frontend.c if (event == NETDEV_UNREGISTER) { event 1463 net/ipv4/fib_frontend.c fib_disable_ip(dev, event, true); event 1472 net/ipv4/fib_frontend.c switch (event) { event 1484 net/ipv4/fib_frontend.c fib_disable_ip(dev, event, false); event 1491 net/ipv4/fib_frontend.c fib_sync_down_dev(dev, event, false); event 38 net/ipv4/fib_lookup.h int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, u32 tb_id, event 41 net/ipv4/fib_lookup.h void rtmsg_fib(int event, __be32 key, struct fib_alias *fa, int dst_len, event 503 net/ipv4/fib_semantics.c void rtmsg_fib(int event, __be32 key, struct fib_alias *fa, event 515 net/ipv4/fib_semantics.c err = fib_dump_info(skb, info->portid, seq, event, tb_id, event 1727 net/ipv4/fib_semantics.c int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event, event 1735 net/ipv4/fib_semantics.c nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags); event 1922 net/ipv4/fib_semantics.c int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force) event 1948 net/ipv4/fib_semantics.c switch (event) { event 1962 net/ipv4/fib_semantics.c if (event == NETDEV_UNREGISTER && event 1970 net/ipv4/fib_semantics.c switch (event) { event 1123 net/ipv4/fib_trie.c enum fib_event_type event = FIB_EVENT_ENTRY_ADD; event 1246 net/ipv4/fib_trie.c event = FIB_EVENT_ENTRY_APPEND; event 1270 net/ipv4/fib_trie.c err = call_fib_entry_notifiers(net, event, key, plen, new_fa, extack); event 3048 net/ipv4/igmp.c unsigned long event, void *ptr) event 3053 net/ipv4/igmp.c switch (event) { event 1744 net/ipv4/ipmr.c static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr) event 1752 net/ipv4/ipmr.c if (event != NETDEV_UNREGISTER) event 188 net/ipv4/netfilter/ipt_CLUSTERIP.c clusterip_netdev_event(struct notifier_block *this, unsigned long event, event 198 net/ipv4/netfilter/ipt_CLUSTERIP.c switch (event) { event 216 net/ipv4/nexthop.c int event, u32 portid, u32 seq, unsigned int nlflags) event 224 net/ipv4/nexthop.c nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags); event 345 net/ipv4/nexthop.c static void nexthop_notify(int event, struct nexthop *nh, struct nl_info *info) event 356 net/ipv4/nexthop.c err = nh_fill_node(skb, nh, event, info->portid, seq, nlflags); event 1784 net/ipv4/nexthop.c unsigned long event, void *ptr) event 1789 net/ipv4/nexthop.c switch (event) { event 329 net/ipv4/tcp_bbr.c static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event) event 334 net/ipv4/tcp_bbr.c if (event == CA_EVENT_TX_START && tp->app_limited) { event 153 net/ipv4/tcp_cubic.c static void bictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event) event 155 net/ipv4/tcp_cubic.c if (event == CA_EVENT_TX_START) { event 576 net/ipv4/tcp_timer.c int event; event 588 net/ipv4/tcp_timer.c event = icsk->icsk_pending; event 590 net/ipv4/tcp_timer.c switch (event) { event 152 net/ipv4/tcp_vegas.c void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event) event 154 net/ipv4/tcp_vegas.c if (event == CA_EVENT_CWND_RESTART || event 155 net/ipv4/tcp_vegas.c event == CA_EVENT_TX_START) event 22 net/ipv4/tcp_vegas.h void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event); event 113 net/ipv4/tcp_veno.c static void tcp_veno_cwnd_event(struct sock *sk, enum tcp_ca_event event) event 115 net/ipv4/tcp_veno.c if (event == CA_EVENT_CWND_RESTART || event == CA_EVENT_TX_START) event 240 net/ipv4/tcp_westwood.c static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event) event 245 net/ipv4/tcp_westwood.c switch (event) { event 166 net/ipv6/addrconf.c unsigned long event); event 181 net/ipv6/addrconf.c static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); event 182 net/ipv6/addrconf.c static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); event 184 net/ipv6/addrconf.c static void inet6_prefix_notify(int event, struct inet6_dev *idev, event 515 net/ipv6/addrconf.c u32 seq, int event, unsigned int flags, event 522 net/ipv6/addrconf.c nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg), event 566 net/ipv6/addrconf.c void inet6_netconf_notify_devconf(struct net *net, int event, int type, event 577 net/ipv6/addrconf.c event, 0, type); event 3487 net/ipv6/addrconf.c static int addrconf_notify(struct notifier_block *this, unsigned long event, event 3498 net/ipv6/addrconf.c switch (event) { event 3541 net/ipv6/addrconf.c if (event == NETDEV_UP) { event 3559 net/ipv6/addrconf.c } else if (event == NETDEV_CHANGE) { event 3562 net/ipv6/addrconf.c rt6_sync_down_dev(dev, event); event 3643 net/ipv6/addrconf.c addrconf_ifdown(dev, event != NETDEV_DOWN); event 3664 net/ipv6/addrconf.c addrconf_type_change(dev, event); event 3688 net/ipv6/addrconf.c static void addrconf_type_change(struct net_device *dev, unsigned long event) event 3695 net/ipv6/addrconf.c if (event == NETDEV_POST_TYPE_CHANGE) event 3697 net/ipv6/addrconf.c else if (event == NETDEV_PRE_TYPE_CHANGE) event 3709 net/ipv6/addrconf.c unsigned long event = how ? NETDEV_UNREGISTER : NETDEV_DOWN; event 3718 net/ipv6/addrconf.c rt6_disable_ip(dev, event); event 4898 net/ipv6/addrconf.c int event; event 4911 net/ipv6/addrconf.c nlh = nlmsg_put(skb, args->portid, args->seq, args->event, event 4981 net/ipv6/addrconf.c nlh = nlmsg_put(skb, args->portid, args->seq, args->event, event 5013 net/ipv6/addrconf.c nlh = nlmsg_put(skb, args->portid, args->seq, args->event, event 5048 net/ipv6/addrconf.c fillargs->event = RTM_NEWADDR; event 5064 net/ipv6/addrconf.c fillargs->event = RTM_GETMULTICAST; event 5077 net/ipv6/addrconf.c fillargs->event = RTM_GETANYCAST; event 5307 net/ipv6/addrconf.c .event = RTM_NEWADDR, event 5372 net/ipv6/addrconf.c static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa) event 5379 net/ipv6/addrconf.c .event = event, event 5777 net/ipv6/addrconf.c u32 portid, u32 seq, int event, unsigned int flags) event 5784 net/ipv6/addrconf.c nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags); event 5895 net/ipv6/addrconf.c void inet6_ifinfo_notify(int event, struct inet6_dev *idev) event 5905 net/ipv6/addrconf.c err = inet6_fill_ifinfo(skb, idev, 0, 0, event, 0); event 5928 net/ipv6/addrconf.c int event, unsigned int flags) event 5934 net/ipv6/addrconf.c nlh = nlmsg_put(skb, portid, seq, event, sizeof(*pmsg), flags); event 5966 net/ipv6/addrconf.c static void inet6_prefix_notify(int event, struct inet6_dev *idev, event 5977 net/ipv6/addrconf.c err = inet6_fill_prefix(skb, idev, pinfo, 0, 0, event, 0); event 5991 net/ipv6/addrconf.c static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) event 5995 net/ipv6/addrconf.c if (event) event 5998 net/ipv6/addrconf.c inet6_ifa_notify(event ? : RTM_NEWADDR, ifp); event 6000 net/ipv6/addrconf.c switch (event) { event 6047 net/ipv6/addrconf.c static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) event 6051 net/ipv6/addrconf.c __ipv6_ifa_notify(event, ifp); event 441 net/ipv6/addrlabel.c u32 portid, u32 seq, int event, event 444 net/ipv6/addrlabel.c struct nlmsghdr *nlh = nlmsg_put(skb, portid, seq, event, event 1230 net/ipv6/ip6mr.c unsigned long event, void *ptr) event 1238 net/ipv6/ip6mr.c if (event != NETDEV_UNREGISTER) event 2643 net/ipv6/mcast.c unsigned long event, event 2649 net/ipv6/mcast.c switch (event) { event 1774 net/ipv6/ndisc.c static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) event 1781 net/ipv6/ndisc.c switch (event) { event 4525 net/ipv6/route.c unsigned long event; event 4717 net/ipv6/route.c switch (arg->event) { event 4751 net/ipv6/route.c void rt6_sync_down_dev(struct net_device *dev, unsigned long event) event 4756 net/ipv6/route.c .event = event, event 4767 net/ipv6/route.c void rt6_disable_ip(struct net_device *dev, unsigned long event) event 4769 net/ipv6/route.c rt6_sync_down_dev(dev, event); event 5940 net/ipv6/route.c void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info, event 5956 net/ipv6/route.c event, info->portid, seq, nlm_flags); event 6004 net/ipv6/route.c unsigned long event, void *ptr) event 6012 net/ipv6/route.c if (event == NETDEV_REGISTER) { event 6022 net/ipv6/route.c } else if (event == NETDEV_UNREGISTER && event 2347 net/iucv/af_iucv.c unsigned long event, void *ptr) event 2353 net/iucv/af_iucv.c switch (event) { event 2360 net/iucv/af_iucv.c if (event == NETDEV_GOING_DOWN) event 828 net/iucv/iucv.c unsigned long event, void *ptr) event 1438 net/key/af_key.c static inline int event2poltype(int event) event 1440 net/key/af_key.c switch (event) { event 1450 net/key/af_key.c pr_err("pfkey: Unknown policy event %d\n", event); event 1457 net/key/af_key.c static inline int event2keytype(int event) event 1459 net/key/af_key.c switch (event) { event 1469 net/key/af_key.c pr_err("pfkey: Unknown SA event %d\n", event); event 1489 net/key/af_key.c hdr->sadb_msg_type = event2keytype(c->event); event 1527 net/key/af_key.c c.event = XFRM_MSG_NEWSA; event 1529 net/key/af_key.c c.event = XFRM_MSG_UPDSA; event 1569 net/key/af_key.c c.event = XFRM_MSG_DELSA; event 1781 net/key/af_key.c c.event = XFRM_MSG_FLUSHSA; event 2224 net/key/af_key.c if (c->data.byid && c->event == XFRM_MSG_DELPOLICY) event 2227 net/key/af_key.c out_hdr->sadb_msg_type = event2poltype(c->event); event 2334 net/key/af_key.c c.event = XFRM_MSG_UPDPOLICY; event 2336 net/key/af_key.c c.event = XFRM_MSG_NEWPOLICY; event 2418 net/key/af_key.c c.event = XFRM_MSG_DELPOLICY; event 2667 net/key/af_key.c c.event = XFRM_MSG_DELPOLICY; event 2784 net/key/af_key.c c.event = XFRM_MSG_FLUSHPOLICY; event 3060 net/key/af_key.c switch (c->event) { event 3072 net/key/af_key.c pr_err("pfkey: Unknown SA event %d\n", c->event); event 3084 net/key/af_key.c switch (c->event) { event 3096 net/key/af_key.c pr_err("pfkey: Unknown policy event %d\n", c->event); event 785 net/mac80211/driver-ops.h const struct ieee80211_event *event) event 787 net/mac80211/driver-ops.h trace_drv_event_callback(local, sdata, event); event 789 net/mac80211/driver-ops.h local->ops->event_callback(&local->hw, &sdata->vif, event); event 804 net/mac80211/mesh_plink.c enum plink_event event) event 807 net/mac80211/mesh_plink.c u16 reason = (event == CLS_ACPT) ? event 842 net/mac80211/mesh_plink.c struct sta_info *sta, enum plink_event event) event 850 net/mac80211/mesh_plink.c mplstates[sta->mesh->plink_state], mplevents[event]); event 855 net/mac80211/mesh_plink.c switch (event) { event 874 net/mac80211/mesh_plink.c switch (event) { event 878 net/mac80211/mesh_plink.c mesh_plink_close(sdata, sta, event); event 895 net/mac80211/mesh_plink.c switch (event) { event 899 net/mac80211/mesh_plink.c mesh_plink_close(sdata, sta, event); event 913 net/mac80211/mesh_plink.c switch (event) { event 917 net/mac80211/mesh_plink.c mesh_plink_close(sdata, sta, event); event 929 net/mac80211/mesh_plink.c switch (event) { event 934 net/mac80211/mesh_plink.c mesh_plink_close(sdata, sta, event); event 946 net/mac80211/mesh_plink.c switch (event) { event 1007 net/mac80211/mesh_plink.c enum plink_event event = PLINK_UNDEFINED; event 1016 net/mac80211/mesh_plink.c event = OPN_RJCT; event 1032 net/mac80211/mesh_plink.c event = OPN_ACPT; event 1046 net/mac80211/mesh_plink.c event = OPN_RJCT; event 1049 net/mac80211/mesh_plink.c event = OPN_IGNR; event 1051 net/mac80211/mesh_plink.c event = OPN_ACPT; event 1055 net/mac80211/mesh_plink.c event = CNF_RJCT; event 1059 net/mac80211/mesh_plink.c event = CNF_IGNR; event 1061 net/mac80211/mesh_plink.c event = CNF_ACPT; event 1074 net/mac80211/mesh_plink.c event = CLS_ACPT; event 1076 net/mac80211/mesh_plink.c event = CLS_IGNR; event 1078 net/mac80211/mesh_plink.c event = CLS_IGNR; event 1080 net/mac80211/mesh_plink.c event = CLS_ACPT; event 1088 net/mac80211/mesh_plink.c return event; event 1099 net/mac80211/mesh_plink.c enum plink_event event; event 1155 net/mac80211/mesh_plink.c event = mesh_plink_get_event(sdata, sta, elems, ftype, llid, plid); event 1157 net/mac80211/mesh_plink.c if (event == OPN_ACPT) { event 1166 net/mac80211/mesh_plink.c } else if (!sta && event == OPN_RJCT) { event 1171 net/mac80211/mesh_plink.c } else if (!sta || event == PLINK_UNDEFINED) { event 1176 net/mac80211/mesh_plink.c if (event == CNF_ACPT) { event 1184 net/mac80211/mesh_plink.c changed |= mesh_plink_fsm(sdata, sta, event); event 2656 net/mac80211/mlme.c struct ieee80211_event event = { event 2667 net/mac80211/mlme.c drv_event_callback(sdata->local, sdata, &event); event 2888 net/mac80211/mlme.c struct ieee80211_event event = { event 2928 net/mac80211/mlme.c event.u.mlme.status = MLME_DENIED; event 2929 net/mac80211/mlme.c event.u.mlme.reason = status_code; event 2930 net/mac80211/mlme.c drv_event_callback(sdata->local, sdata, &event); event 2956 net/mac80211/mlme.c event.u.mlme.status = MLME_SUCCESS; event 2957 net/mac80211/mlme.c drv_event_callback(sdata->local, sdata, &event); event 3533 net/mac80211/mlme.c struct ieee80211_event event = { event 3593 net/mac80211/mlme.c event.u.mlme.status = MLME_DENIED; event 3594 net/mac80211/mlme.c event.u.mlme.reason = status_code; event 3595 net/mac80211/mlme.c drv_event_callback(sdata->local, sdata, &event); event 3603 net/mac80211/mlme.c event.u.mlme.status = MLME_SUCCESS; event 3604 net/mac80211/mlme.c drv_event_callback(sdata->local, sdata, &event); event 3721 net/mac80211/mlme.c struct ieee80211_event event = { event 3732 net/mac80211/mlme.c event.u.rssi.data = RSSI_EVENT_HIGH; event 3733 net/mac80211/mlme.c drv_event_callback(local, sdata, &event); event 3738 net/mac80211/mlme.c event.u.rssi.data = RSSI_EVENT_LOW; event 3739 net/mac80211/mlme.c drv_event_callback(local, sdata, &event); event 4349 net/mac80211/mlme.c struct ieee80211_event event = { event 4360 net/mac80211/mlme.c drv_event_callback(sdata->local, sdata, &event); event 4370 net/mac80211/mlme.c struct ieee80211_event event = { event 4378 net/mac80211/mlme.c drv_event_callback(sdata->local, sdata, &event); event 2905 net/mac80211/rx.c struct ieee80211_event event = { event 2929 net/mac80211/rx.c event.u.ba.tid = tid; event 2930 net/mac80211/rx.c event.u.ba.ssn = start_seq_num; event 2931 net/mac80211/rx.c event.u.ba.sta = &rx->sta->sta; event 2944 net/mac80211/rx.c drv_event_callback(rx->local, rx->sdata, &event); event 3733 net/mac80211/rx.c struct ieee80211_event event = { event 3738 net/mac80211/rx.c drv_event_callback(rx.local, rx.sdata, &event); event 73 net/mpls/af_mpls.c static void rtmsg_lfib(int event, u32 label, struct mpls_route *rt, event 542 net/mpls/af_mpls.c int event = new ? RTM_NEWROUTE : RTM_DELROUTE; event 547 net/mpls/af_mpls.c rtmsg_lfib(event, index, rt, nlh, net, portid, nlm_flags); event 1132 net/mpls/af_mpls.c u32 portid, u32 seq, int event, event 1139 net/mpls/af_mpls.c nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg), event 1181 net/mpls/af_mpls.c static void mpls_netconf_notify_devconf(struct net *net, int event, event 1191 net/mpls/af_mpls.c err = mpls_netconf_fill_devconf(skb, mdev, 0, 0, event, 0, type); event 1494 net/mpls/af_mpls.c static void mpls_ifdown(struct net_device *dev, int event) event 1516 net/mpls/af_mpls.c switch (event) { event 1525 net/mpls/af_mpls.c if (event == NETDEV_UNREGISTER) event 1540 net/mpls/af_mpls.c if (event == NETDEV_UNREGISTER && deleted == rt->rt_nhn) event 1580 net/mpls/af_mpls.c static int mpls_dev_notify(struct notifier_block *this, unsigned long event, event 1587 net/mpls/af_mpls.c if (event == NETDEV_REGISTER) { event 1609 net/mpls/af_mpls.c switch (event) { event 1611 net/mpls/af_mpls.c mpls_ifdown(dev, event); event 1625 net/mpls/af_mpls.c mpls_ifdown(dev, event); event 1628 net/mpls/af_mpls.c mpls_ifdown(dev, event); event 1963 net/mpls/af_mpls.c static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event, event 1970 net/mpls/af_mpls.c nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags); event 2251 net/mpls/af_mpls.c static void rtmsg_lfib(int event, u32 label, struct mpls_route *rt, event 2263 net/mpls/af_mpls.c err = mpls_dump_route(skb, portid, seq, event, label, rt, nlm_flags); event 1644 net/netfilter/ipvs/ip_vs_ctl.c static int ip_vs_dst_event(struct notifier_block *this, unsigned long event, event 1654 net/netfilter/ipvs/ip_vs_ctl.c if (event != NETDEV_DOWN || !ipvs) event 383 net/netfilter/ipvs/ip_vs_proto_sctp.c int event, next_state; event 421 net/netfilter/ipvs/ip_vs_proto_sctp.c event = (chunk_type < sizeof(sctp_events)) ? event 434 net/netfilter/ipvs/ip_vs_proto_sctp.c next_state = sctp_states[direction][event][cp->state]; event 230 net/netfilter/nf_conntrack_ecache.c void nf_ct_expect_event_report(enum ip_conntrack_expect_events event, event 248 net/netfilter/nf_conntrack_ecache.c if (e->expmask & (1 << event)) { event 254 net/netfilter/nf_conntrack_ecache.c notify->fcn(1 << event, &item); event 517 net/netfilter/nf_conntrack_netlink.c unsigned int flags = portid ? NLM_F_MULTI : 0, event; event 519 net/netfilter/nf_conntrack_netlink.c event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_CT_NEW); event 520 net/netfilter/nf_conntrack_netlink.c nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags); event 2204 net/netfilter/nf_conntrack_netlink.c unsigned int flags = portid ? NLM_F_MULTI : 0, event; event 2206 net/netfilter/nf_conntrack_netlink.c event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, event 2208 net/netfilter/nf_conntrack_netlink.c nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags); event 2288 net/netfilter/nf_conntrack_netlink.c unsigned int flags = portid ? NLM_F_MULTI : 0, event; event 2291 net/netfilter/nf_conntrack_netlink.c event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_CT_GET_STATS); event 2292 net/netfilter/nf_conntrack_netlink.c nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags); event 2800 net/netfilter/nf_conntrack_netlink.c int event, const struct nf_conntrack_expect *exp) event 2806 net/netfilter/nf_conntrack_netlink.c event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_EXP, event); event 2807 net/netfilter/nf_conntrack_netlink.c nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags); event 3435 net/netfilter/nf_conntrack_netlink.c unsigned int flags = portid ? NLM_F_MULTI : 0, event; event 3437 net/netfilter/nf_conntrack_netlink.c event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, event 3439 net/netfilter/nf_conntrack_netlink.c nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags); event 76 net/netfilter/nf_nat_masquerade.c unsigned long event, event 82 net/netfilter/nf_nat_masquerade.c if (event == NETDEV_DOWN) { event 110 net/netfilter/nf_nat_masquerade.c unsigned long event, event 124 net/netfilter/nf_nat_masquerade.c if (event == NETDEV_DOWN) event 232 net/netfilter/nf_nat_masquerade.c unsigned long event, void *ptr) event 239 net/netfilter/nf_nat_masquerade.c if (event != NETDEV_DOWN || atomic_read(&v6_worker_count) >= 16) event 596 net/netfilter/nf_tables_api.c u32 portid, u32 seq, int event, u32 flags, event 602 net/netfilter/nf_tables_api.c event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event); event 603 net/netfilter/nf_tables_api.c nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags); event 627 net/netfilter/nf_tables_api.c static void nf_tables_table_notify(const struct nft_ctx *ctx, int event) event 641 net/netfilter/nf_tables_api.c event, 0, ctx->family, ctx->table); event 1258 net/netfilter/nf_tables_api.c u32 portid, u32 seq, int event, u32 flags, event 1265 net/netfilter/nf_tables_api.c event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event); event 1266 net/netfilter/nf_tables_api.c nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags); event 1330 net/netfilter/nf_tables_api.c static void nf_tables_chain_notify(const struct nft_ctx *ctx, int event) event 1344 net/netfilter/nf_tables_api.c event, 0, ctx->family, ctx->table, event 2341 net/netfilter/nf_tables_api.c u32 portid, u32 seq, int event, event 2352 net/netfilter/nf_tables_api.c u16 type = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event); event 2371 net/netfilter/nf_tables_api.c if (event != NFT_MSG_DELRULE && prule) { event 2403 net/netfilter/nf_tables_api.c const struct nft_rule *rule, int event) event 2417 net/netfilter/nf_tables_api.c event, 0, ctx->family, ctx->table, event 3298 net/netfilter/nf_tables_api.c const struct nft_set *set, u16 event, u16 flags) event 3306 net/netfilter/nf_tables_api.c event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event); event 3307 net/netfilter/nf_tables_api.c nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), event 3376 net/netfilter/nf_tables_api.c const struct nft_set *set, int event, event 3391 net/netfilter/nf_tables_api.c err = nf_tables_fill_set(skb, ctx, set, event, 0); event 3887 net/netfilter/nf_tables_api.c struct nft_set_binding *binding, bool event) event 3893 net/netfilter/nf_tables_api.c if (event) event 4114 net/netfilter/nf_tables_api.c int event; event 4139 net/netfilter/nf_tables_api.c event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, NFT_MSG_NEWSETELEM); event 4143 net/netfilter/nf_tables_api.c nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), event 4205 net/netfilter/nf_tables_api.c u32 portid, int event, u16 flags, event 4214 net/netfilter/nf_tables_api.c event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event); event 4215 net/netfilter/nf_tables_api.c nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), event 4379 net/netfilter/nf_tables_api.c int event, u16 flags) event 4393 net/netfilter/nf_tables_api.c err = nf_tables_fill_setelem_info(skb, ctx, 0, portid, event, flags, event 5311 net/netfilter/nf_tables_api.c u32 portid, u32 seq, int event, u32 flags, event 5318 net/netfilter/nf_tables_api.c event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event); event 5319 net/netfilter/nf_tables_api.c nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags); event 5567 net/netfilter/nf_tables_api.c struct nft_object *obj, u32 portid, u32 seq, int event, event 5581 net/netfilter/nf_tables_api.c err = nf_tables_fill_obj_info(skb, net, portid, seq, event, 0, family, event 5596 net/netfilter/nf_tables_api.c struct nft_object *obj, int event) event 5598 net/netfilter/nf_tables_api.c nft_obj_notify(ctx->net, ctx->table, obj, ctx->portid, ctx->seq, event, event 5984 net/netfilter/nf_tables_api.c u32 portid, u32 seq, int event, event 5993 net/netfilter/nf_tables_api.c event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event); event 5994 net/netfilter/nf_tables_api.c nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags); event 6186 net/netfilter/nf_tables_api.c int event) event 6200 net/netfilter/nf_tables_api.c ctx->seq, event, 0, event 6229 net/netfilter/nf_tables_api.c int event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, NFT_MSG_NEWGEN); event 6231 net/netfilter/nf_tables_api.c nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), 0); event 6253 net/netfilter/nf_tables_api.c static void nft_flowtable_event(unsigned long event, struct net_device *dev, event 6269 net/netfilter/nf_tables_api.c unsigned long event, void *ptr) event 6276 net/netfilter/nf_tables_api.c if (event != NETDEV_UNREGISTER) event 6283 net/netfilter/nf_tables_api.c nft_flowtable_event(event, dev, flowtable); event 6296 net/netfilter/nf_tables_api.c int event) event 462 net/netfilter/nf_tables_offload.c unsigned long event, void *ptr) event 468 net/netfilter/nf_tables_offload.c if (event != NETDEV_UNREGISTER) event 190 net/netfilter/nf_tables_trace.c u16 event; event 221 net/netfilter/nf_tables_trace.c event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, NFT_MSG_TRACE); event 222 net/netfilter/nf_tables_trace.c nlh = nlmsg_put(skb, 0, 0, event, sizeof(struct nfgenmsg), 0); event 132 net/netfilter/nfnetlink_acct.c int event, struct nf_acct *acct) event 140 net/netfilter/nfnetlink_acct.c event = nfnl_msg_type(NFNL_SUBSYS_ACCT, event); event 141 net/netfilter/nfnetlink_acct.c nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags); event 526 net/netfilter/nfnetlink_cthelper.c int event, struct nf_conntrack_helper *helper) event 533 net/netfilter/nfnetlink_cthelper.c event = nfnl_msg_type(NFNL_SUBSYS_CTHELPER, event); event 534 net/netfilter/nfnetlink_cthelper.c nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags); event 160 net/netfilter/nfnetlink_cttimeout.c int event, struct ctnl_timeout *timeout) event 169 net/netfilter/nfnetlink_cttimeout.c event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_TIMEOUT, event); event 170 net/netfilter/nfnetlink_cttimeout.c nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags); event 380 net/netfilter/nfnetlink_cttimeout.c u32 seq, u32 type, int event, u16 l3num, event 390 net/netfilter/nfnetlink_cttimeout.c event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_TIMEOUT, event); event 391 net/netfilter/nfnetlink_cttimeout.c nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags); event 819 net/netfilter/nfnetlink_log.c unsigned long event, void *ptr) event 824 net/netfilter/nfnetlink_log.c if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) { event 945 net/netfilter/nfnetlink_queue.c unsigned long event, void *ptr) event 950 net/netfilter/nfnetlink_queue.c if (event == NETDEV_DOWN) event 975 net/netfilter/nfnetlink_queue.c unsigned long event, void *ptr) event 980 net/netfilter/nfnetlink_queue.c if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) { event 286 net/netfilter/nft_chain_filter.c static void nft_netdev_event(unsigned long event, struct net_device *dev, event 291 net/netfilter/nft_chain_filter.c switch (event) { event 315 net/netfilter/nft_chain_filter.c unsigned long event, void *ptr) event 324 net/netfilter/nft_chain_filter.c if (event != NETDEV_UNREGISTER && event 325 net/netfilter/nft_chain_filter.c event != NETDEV_CHANGENAME) event 340 net/netfilter/nft_chain_filter.c nft_netdev_event(event, dev, &ctx); event 571 net/netfilter/nft_compat.c int event, u16 family, const char *name, event 578 net/netfilter/nft_compat.c event = nfnl_msg_type(NFNL_SUBSYS_NFT_COMPAT, event); event 579 net/netfilter/nft_compat.c nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags); event 238 net/netfilter/nft_flow_offload.c unsigned long event, void *ptr) event 242 net/netfilter/nft_flow_offload.c if (event != NETDEV_DOWN) event 60 net/netfilter/xt_TEE.c static int tee_netdev_event(struct notifier_block *this, unsigned long event, event 70 net/netfilter/xt_TEE.c switch (event) { event 699 net/netlabel/netlabel_unlabeled.c unsigned long event, void *ptr) event 708 net/netlabel/netlabel_unlabeled.c if (event == NETDEV_DOWN) { event 89 net/netlink/genetlink.c static int genl_ctrl_event(int event, const struct genl_family *family, event 924 net/netlink/genetlink.c static int genl_ctrl_event(int event, const struct genl_family *family, event 934 net/netlink/genetlink.c switch (event) { event 938 net/netlink/genetlink.c msg = ctrl_build_family_msg(family, 0, 0, event); event 943 net/netlink/genetlink.c msg = ctrl_build_mcgrp_msg(family, grp, grp_id, 0, 0, event); event 93 net/netrom/af_netrom.c static int nr_device_event(struct notifier_block *this, unsigned long event, void *ptr) event 100 net/netrom/af_netrom.c if (event != NETDEV_DOWN) event 91 net/nfc/hci/command.c int nfc_hci_send_event(struct nfc_hci_dev *hdev, u8 gate, u8 event, event 96 net/nfc/hci/command.c pr_debug("%d to gate %d\n", event, gate); event 102 net/nfc/hci/command.c return nfc_hci_hcp_message_tx(hdev, pipe, NFC_HCI_HCP_EVENT, event, event 381 net/nfc/hci/core.c void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event, event 388 net/nfc/hci/core.c pr_err("Discarded event %x to invalid pipe %x\n", event, pipe); event 394 net/nfc/hci/core.c pr_err("Discarded event %x to unopened pipe %x\n", event, pipe); event 399 net/nfc/hci/core.c r = hdev->ops->event_received(hdev, pipe, event, skb); event 404 net/nfc/hci/core.c switch (event) { event 429 net/nfc/hci/core.c pr_info("Discarded unknown event %x to gate %x\n", event, gate); event 208 net/nfc/nci/hci.c int nci_hci_send_event(struct nci_dev *ndev, u8 gate, u8 event, event 217 net/nfc/nci/hci.c NCI_HCP_HEADER(NCI_HCI_HCP_EVENT, event), event 276 net/nfc/nci/hci.c u8 event, struct sk_buff *skb) event 279 net/nfc/nci/hci.c ndev->ops->hci_event_received(ndev, pipe, event, skb); event 1817 net/nfc/netlink.c unsigned long event, void *ptr) event 1822 net/nfc/netlink.c if (event != NETLINK_URELEASE || n->protocol != NETLINK_GENERIC) event 59 net/openvswitch/dp_notify.c static int dp_device_event(struct notifier_block *unused, unsigned long event, event 72 net/openvswitch/dp_notify.c if (event == NETDEV_UNREGISTER) { event 23 net/phonet/pn_netlink.c u32 portid, u32 seq, int event); event 25 net/phonet/pn_netlink.c void phonet_address_notify(int event, struct net_device *dev, u8 addr) event 34 net/phonet/pn_netlink.c err = fill_addr(skb, dev, addr, 0, 0, event); event 96 net/phonet/pn_netlink.c u32 portid, u32 seq, int event) event 101 net/phonet/pn_netlink.c nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), 0); event 161 net/phonet/pn_netlink.c u32 portid, u32 seq, int event) event 166 net/phonet/pn_netlink.c nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), 0); event 191 net/phonet/pn_netlink.c void rtm_phonet_notify(int event, struct net_device *dev, u8 dst) event 200 net/phonet/pn_netlink.c err = fill_route(skb, dev, dst, 0, 0, event); event 380 net/rds/ib.h struct rdma_cm_event *event, bool isv6); event 383 net/rds/ib.h struct rdma_cm_event *event); event 98 net/rds/ib_cm.c void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_event *event) event 109 net/rds/ib_cm.c dp = event->param.conn.private_data; event 111 net/rds/ib_cm.c if (event->param.conn.private_data_len >= event 123 net/rds/ib_cm.c } else if (event->param.conn.private_data_len >= event 266 net/rds/ib_cm.c static void rds_ib_cq_event_handler(struct ib_event *event, void *data) event 269 net/rds/ib_cm.c event->event, ib_event_msg(event->event), data); event 387 net/rds/ib_cm.c static void rds_ib_qp_event_handler(struct ib_event *event, void *data) event 392 net/rds/ib_cm.c rdsdebug("conn %p ic %p event %u (%s)\n", conn, ic, event->event, event 393 net/rds/ib_cm.c ib_event_msg(event->event)); event 395 net/rds/ib_cm.c switch (event->event) { event 401 net/rds/ib_cm.c event->event, ib_event_msg(event->event), event 632 net/rds/ib_cm.c static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event, bool isv6) event 634 net/rds/ib_cm.c const union rds_ib_conn_priv *dp = event->param.conn.private_data; event 651 net/rds/ib_cm.c if (!event->param.conn.private_data_len) { event 670 net/rds/ib_cm.c if (event->param.conn.private_data_len < data_len || major == 0) event 722 net/rds/ib_cm.c struct rdma_cm_event *event, bool isv6) event 742 net/rds/ib_cm.c version = rds_ib_protocol_compatible(event, isv6); event 748 net/rds/ib_cm.c dp = event->param.conn.private_data; event 852 net/rds/ib_cm.c event->param.conn.responder_resources, event 853 net/rds/ib_cm.c event->param.conn.initiator_depth, isv6); event 50 net/rds/rdma_transport.c struct rdma_cm_event *event, event 61 net/rds/rdma_transport.c event->event, rdma_event_msg(event->event)); event 77 net/rds/rdma_transport.c if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) event 83 net/rds/rdma_transport.c switch (event->event) { event 85 net/rds/rdma_transport.c ret = trans->cm_handle_connect(cm_id, event, isv6); event 115 net/rds/rdma_transport.c trans->cm_connect_complete(conn, event); event 121 net/rds/rdma_transport.c err = (int *)rdma_consumer_reject_data(cm_id, event, &len); event 134 net/rds/rdma_transport.c rdma_reject_msg(cm_id, event->status)); event 166 net/rds/rdma_transport.c event->event, rdma_event_msg(event->event)); event 174 net/rds/rdma_transport.c rdsdebug("id %p event %u (%s) handling ret %d\n", cm_id, event->event, event 175 net/rds/rdma_transport.c rdma_event_msg(event->event), ret); event 181 net/rds/rdma_transport.c struct rdma_cm_event *event) event 183 net/rds/rdma_transport.c return rds_rdma_cm_event_handler_cmn(cm_id, event, false); event 188 net/rds/rdma_transport.c struct rdma_cm_event *event) event 190 net/rds/rdma_transport.c return rds_rdma_cm_event_handler_cmn(cm_id, event, true); event 22 net/rds/rdma_transport.h struct rdma_cm_event *event); event 24 net/rds/rdma_transport.h struct rdma_cm_event *event); event 566 net/rds/rds.h struct rdma_cm_event *event, bool isv6); event 569 net/rds/rds.h struct rdma_cm_event *event); event 314 net/rfkill/core.c if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP)) event 310 net/rfkill/input.c .event = rfkill_event, event 182 net/rose/af_rose.c unsigned long event, void *ptr) event 189 net/rose/af_rose.c if (event != NETDEV_DOWN) event 1039 net/sched/act_api.c u32 portid, u32 seq, u16 flags, int event, int bind, event 1047 net/sched/act_api.c nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags); event 1074 net/sched/act_api.c struct tc_action *actions[], int event, event 1082 net/sched/act_api.c if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event, event 1279 net/sched/act_api.c u32 portid, int event, struct netlink_ext_ack *extack) event 1292 net/sched/act_api.c if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) { event 1312 net/sched/act_api.c if (event == RTM_GETACTION) event 1313 net/sched/act_api.c ret = tcf_get_notify(net, portid, n, actions, event, extack); event 382 net/sched/act_mirred.c unsigned long event, void *ptr) event 388 net/sched/act_mirred.c if (event == NETDEV_UNREGISTER) { event 457 net/sched/cls_api.c u32 seq, u16 flags, int event, bool unicast); event 1784 net/sched/cls_api.c u32 portid, u32 seq, u16 flags, int event, event 1791 net/sched/cls_api.c nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); event 1829 net/sched/cls_api.c u32 parent, void *fh, int event, bool unicast, event 1841 net/sched/cls_api.c n->nlmsg_seq, n->nlmsg_flags, event, event 1902 net/sched/cls_api.c struct tcf_chain *chain, int event, event 1910 net/sched/cls_api.c q, parent, NULL, event, false, rtnl_held); event 2609 net/sched/cls_api.c u32 portid, u32 seq, u16 flags, int event) event 2620 net/sched/cls_api.c nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); event 2656 net/sched/cls_api.c u32 seq, u16 flags, int event, bool unicast) event 2670 net/sched/cls_api.c seq, flags, event) <= 0) { event 868 net/sched/sch_api.c u32 portid, u32 seq, u16 flags, int event) event 881 net/sched/sch_api.c nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); event 1787 net/sched/sch_api.c u32 portid, u32 seq, u16 flags, int event) event 1796 net/sched/sch_api.c nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); event 1833 net/sched/sch_api.c unsigned long cl, int event) event 1843 net/sched/sch_api.c if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) { event 331 net/sched/sch_cbs.c static int cbs_dev_notifier(struct notifier_block *nb, unsigned long event, event 341 net/sched/sch_cbs.c if (event != NETDEV_UP && event != NETDEV_CHANGE) event 917 net/sched/sch_htb.c s64 event = q->near_ev_cache[level]; event 919 net/sched/sch_htb.c if (q->now >= event) { event 920 net/sched/sch_htb.c event = htb_do_events(q, level, start_at); event 921 net/sched/sch_htb.c if (!event) event 922 net/sched/sch_htb.c event = q->now + NSEC_PER_SEC; event 923 net/sched/sch_htb.c q->near_ev_cache[level] = event; event 926 net/sched/sch_htb.c if (next_event > event) event 927 net/sched/sch_htb.c next_event = event; event 1068 net/sched/sch_taprio.c static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event, event 1078 net/sched/sch_taprio.c if (event != NETDEV_UP && event != NETDEV_CHANGE) event 785 net/sctp/associola.c struct sctp_ulpevent *event; event 845 net/sctp/associola.c event = sctp_ulpevent_make_peer_addr_change(asoc, &addr, event 847 net/sctp/associola.c if (event) event 848 net/sctp/associola.c asoc->stream.si->enqueue_event(&asoc->ulpq, event); event 805 net/sctp/ipv6.c static void sctp_inet6_event_msgname(struct sctp_ulpevent *event, event 816 net/sctp/ipv6.c asoc = event->asoc; event 892 net/sctp/protocol.c static void sctp_inet_event_msgname(struct sctp_ulpevent *event, char *msgname, event 900 net/sctp/protocol.c asoc = event->asoc; event 590 net/sctp/sm_sideeffect.c struct sctp_ulpevent *event; event 592 net/sctp/sm_sideeffect.c event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_CANT_STR_ASSOC, event 596 net/sctp/sm_sideeffect.c if (event) event 598 net/sctp/sm_sideeffect.c SCTP_ULPEVENT(event)); event 616 net/sctp/sm_sideeffect.c struct sctp_ulpevent *event; event 623 net/sctp/sm_sideeffect.c event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST, event 627 net/sctp/sm_sideeffect.c event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST, event 630 net/sctp/sm_sideeffect.c if (event) event 632 net/sctp/sm_sideeffect.c SCTP_ULPEVENT(event)); event 5435 net/sctp/sm_statefuns.c struct sctp_ulpevent *event; event 5437 net/sctp/sm_statefuns.c event = sctp_ulpevent_make_sender_dry_event(asoc, GFP_ATOMIC); event 5438 net/sctp/sm_statefuns.c if (!event) event 5441 net/sctp/sm_statefuns.c sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(event)); event 2094 net/sctp/socket.c struct sctp_ulpevent *event = NULL; event 2128 net/sctp/socket.c event = sctp_skb2event(skb); event 2133 net/sctp/socket.c if (event->chunk && event->chunk->head_skb) event 2134 net/sctp/socket.c head_skb = event->chunk->head_skb; event 2138 net/sctp/socket.c if (sctp_ulpevent_is_notification(event)) { event 2140 net/sctp/socket.c sp->pf->event_msgname(event, msg->msg_name, addr_len); event 2147 net/sctp/socket.c sctp_ulpevent_read_nxtinfo(event, msg, sk); event 2150 net/sctp/socket.c sctp_ulpevent_read_rcvinfo(event, msg); event 2153 net/sctp/socket.c sctp_ulpevent_read_sndrcvinfo(event, msg); event 2172 net/sctp/socket.c if (!sctp_ulpevent_is_notification(event)) event 2173 net/sctp/socket.c sctp_assoc_rwnd_increase(event->asoc, copied); event 2175 net/sctp/socket.c } else if ((event->msg_flags & MSG_NOTIFICATION) || event 2176 net/sctp/socket.c (event->msg_flags & MSG_EOR)) event 2192 net/sctp/socket.c sctp_ulpevent_free(event); event 2250 net/sctp/socket.c struct sctp_ulpevent *event; event 2254 net/sctp/socket.c event = sctp_ulpevent_make_sender_dry_event(asoc, event 2256 net/sctp/socket.c if (!event) event 2259 net/sctp/socket.c asoc->stream.si->enqueue_event(&asoc->ulpq, event); event 4437 net/sctp/socket.c struct sctp_ulpevent *event; event 4443 net/sctp/socket.c event = sctp_ulpevent_make_sender_dry_event(asoc, event 4445 net/sctp/socket.c if (!event) event 4448 net/sctp/socket.c asoc->stream.si->enqueue_event(&asoc->ulpq, event); event 9027 net/sctp/socket.c struct sctp_ulpevent *event = sctp_skb2event(skb); event 9029 net/sctp/socket.c atomic_sub(event->rmem_len, &sk->sk_rmem_alloc); event 9034 net/sctp/socket.c sk_mem_uncharge(sk, event->rmem_len); event 9366 net/sctp/socket.c struct sctp_ulpevent *event; event 9416 net/sctp/socket.c event = sctp_skb2event(skb); event 9417 net/sctp/socket.c if (event->asoc == assoc) { event 9445 net/sctp/socket.c event = sctp_skb2event(skb); event 9446 net/sctp/socket.c if (event->asoc == assoc) { event 125 net/sctp/stream_interleave.c struct sctp_ulpevent *event) event 132 net/sctp/stream_interleave.c __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); event 138 net/sctp/stream_interleave.c if (event->stream == cevent->stream && event 139 net/sctp/stream_interleave.c event->mid == cevent->mid && event 141 net/sctp/stream_interleave.c (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) && event 142 net/sctp/stream_interleave.c event->fsn > cevent->fsn))) { event 143 net/sctp/stream_interleave.c __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); event 147 net/sctp/stream_interleave.c if ((event->stream == cevent->stream && event 148 net/sctp/stream_interleave.c MID_lt(cevent->mid, event->mid)) || event 149 net/sctp/stream_interleave.c event->stream > cevent->stream) { event 150 net/sctp/stream_interleave.c __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); event 158 net/sctp/stream_interleave.c if (event->stream < cevent->stream || event 159 net/sctp/stream_interleave.c (event->stream == cevent->stream && event 160 net/sctp/stream_interleave.c MID_lt(event->mid, cevent->mid))) { event 164 net/sctp/stream_interleave.c if (event->stream == cevent->stream && event 165 net/sctp/stream_interleave.c event->mid == cevent->mid && event 167 net/sctp/stream_interleave.c (event->msg_flags & SCTP_DATA_FIRST_FRAG || event 168 net/sctp/stream_interleave.c event->fsn < cevent->fsn)) { event 175 net/sctp/stream_interleave.c __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); event 177 net/sctp/stream_interleave.c __skb_queue_before(&ulpq->reasm, loc, sctp_event2skb(event)); event 182 net/sctp/stream_interleave.c struct sctp_ulpevent *event) event 192 net/sctp/stream_interleave.c sin = sctp_stream_in(&ulpq->asoc->stream, event->stream); event 197 net/sctp/stream_interleave.c if (cevent->stream < event->stream) event 200 net/sctp/stream_interleave.c if (cevent->stream > event->stream || event 260 net/sctp/stream_interleave.c struct sctp_ulpevent *event) event 273 net/sctp/stream_interleave.c sin = sctp_stream_in(&ulpq->asoc->stream, event->stream); event 278 net/sctp/stream_interleave.c if (cevent->stream < event->stream) event 280 net/sctp/stream_interleave.c if (cevent->stream > event->stream) event 283 net/sctp/stream_interleave.c if (MID_lt(cevent->mid, event->mid)) event 285 net/sctp/stream_interleave.c if (MID_lt(event->mid, cevent->mid)) event 351 net/sctp/stream_interleave.c struct sctp_ulpevent *event) event 356 net/sctp/stream_interleave.c if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) { event 357 net/sctp/stream_interleave.c event->msg_flags |= MSG_EOR; event 358 net/sctp/stream_interleave.c return event; event 361 net/sctp/stream_interleave.c sctp_intl_store_reasm(ulpq, event); event 363 net/sctp/stream_interleave.c sin = sctp_stream_in(&ulpq->asoc->stream, event->stream); event 364 net/sctp/stream_interleave.c if (sin->pd_mode && event->mid == sin->mid && event 365 net/sctp/stream_interleave.c event->fsn == sin->fsn) event 366 net/sctp/stream_interleave.c retval = sctp_intl_retrieve_partial(ulpq, event); event 369 net/sctp/stream_interleave.c retval = sctp_intl_retrieve_reassembled(ulpq, event); event 375 net/sctp/stream_interleave.c struct sctp_ulpevent *event) event 382 net/sctp/stream_interleave.c __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); event 387 net/sctp/stream_interleave.c if (event->stream == cevent->stream && event 388 net/sctp/stream_interleave.c MID_lt(cevent->mid, event->mid)) { event 389 net/sctp/stream_interleave.c __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); event 393 net/sctp/stream_interleave.c if (event->stream > cevent->stream) { event 394 net/sctp/stream_interleave.c __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); event 402 net/sctp/stream_interleave.c if (cevent->stream > event->stream) { event 406 net/sctp/stream_interleave.c if (cevent->stream == event->stream && event 407 net/sctp/stream_interleave.c MID_lt(event->mid, cevent->mid)) { event 414 net/sctp/stream_interleave.c __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); event 416 net/sctp/stream_interleave.c __skb_queue_before(&ulpq->lobby, loc, sctp_event2skb(event)); event 420 net/sctp/stream_interleave.c struct sctp_ulpevent *event) event 425 net/sctp/stream_interleave.c __u16 sid = event->stream; event 428 net/sctp/stream_interleave.c event_list = (struct sk_buff_head *)sctp_event2skb(event)->prev; event 451 net/sctp/stream_interleave.c struct sctp_ulpevent *event) event 457 net/sctp/stream_interleave.c sid = event->stream; event 459 net/sctp/stream_interleave.c if (event->mid != sctp_mid_peek(stream, in, sid)) { event 460 net/sctp/stream_interleave.c sctp_intl_store_ordered(ulpq, event); event 466 net/sctp/stream_interleave.c sctp_intl_retrieve_ordered(ulpq, event); event 468 net/sctp/stream_interleave.c return event; event 476 net/sctp/stream_interleave.c struct sctp_ulpevent *event; event 480 net/sctp/stream_interleave.c event = sctp_skb2event(skb); event 484 net/sctp/stream_interleave.c !sctp_ulpevent_is_notification(event))) event 487 net/sctp/stream_interleave.c if (!sctp_ulpevent_is_notification(event)) { event 492 net/sctp/stream_interleave.c if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe)) event 512 net/sctp/stream_interleave.c sctp_ulpevent_free(event); event 518 net/sctp/stream_interleave.c struct sctp_ulpevent *event) event 525 net/sctp/stream_interleave.c __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event)); event 531 net/sctp/stream_interleave.c if (event->stream == cevent->stream && event 532 net/sctp/stream_interleave.c event->mid == cevent->mid && event 534 net/sctp/stream_interleave.c (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) && event 535 net/sctp/stream_interleave.c event->fsn > cevent->fsn))) { event 536 net/sctp/stream_interleave.c __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event)); event 540 net/sctp/stream_interleave.c if ((event->stream == cevent->stream && event 541 net/sctp/stream_interleave.c MID_lt(cevent->mid, event->mid)) || event 542 net/sctp/stream_interleave.c event->stream > cevent->stream) { event 543 net/sctp/stream_interleave.c __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event)); event 550 net/sctp/stream_interleave.c if (event->stream < cevent->stream || event 551 net/sctp/stream_interleave.c (event->stream == cevent->stream && event 552 net/sctp/stream_interleave.c MID_lt(event->mid, cevent->mid))) event 555 net/sctp/stream_interleave.c if (event->stream == cevent->stream && event 556 net/sctp/stream_interleave.c event->mid == cevent->mid && event 558 net/sctp/stream_interleave.c (event->msg_flags & SCTP_DATA_FIRST_FRAG || event 559 net/sctp/stream_interleave.c event->fsn < cevent->fsn)) event 563 net/sctp/stream_interleave.c __skb_queue_before(&ulpq->reasm_uo, pos, sctp_event2skb(event)); event 568 net/sctp/stream_interleave.c struct sctp_ulpevent *event) event 578 net/sctp/stream_interleave.c sin = sctp_stream_in(&ulpq->asoc->stream, event->stream); event 583 net/sctp/stream_interleave.c if (cevent->stream < event->stream) event 585 net/sctp/stream_interleave.c if (cevent->stream > event->stream) event 649 net/sctp/stream_interleave.c struct sctp_ulpevent *event) event 662 net/sctp/stream_interleave.c sin = sctp_stream_in(&ulpq->asoc->stream, event->stream); event 667 net/sctp/stream_interleave.c if (cevent->stream < event->stream) event 669 net/sctp/stream_interleave.c if (cevent->stream > event->stream) event 672 net/sctp/stream_interleave.c if (MID_lt(cevent->mid, event->mid)) event 674 net/sctp/stream_interleave.c if (MID_lt(event->mid, cevent->mid)) event 741 net/sctp/stream_interleave.c struct sctp_ulpevent *event) event 746 net/sctp/stream_interleave.c if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) { event 747 net/sctp/stream_interleave.c event->msg_flags |= MSG_EOR; event 748 net/sctp/stream_interleave.c return event; event 751 net/sctp/stream_interleave.c sctp_intl_store_reasm_uo(ulpq, event); event 753 net/sctp/stream_interleave.c sin = sctp_stream_in(&ulpq->asoc->stream, event->stream); event 754 net/sctp/stream_interleave.c if (sin->pd_mode_uo && event->mid == sin->mid_uo && event 755 net/sctp/stream_interleave.c event->fsn == sin->fsn_uo) event 756 net/sctp/stream_interleave.c retval = sctp_intl_retrieve_partial_uo(ulpq, event); event 759 net/sctp/stream_interleave.c retval = sctp_intl_retrieve_reassembled_uo(ulpq, event); event 831 net/sctp/stream_interleave.c struct sctp_ulpevent *event; event 835 net/sctp/stream_interleave.c event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp); event 836 net/sctp/stream_interleave.c if (!event) event 839 net/sctp/stream_interleave.c event->mid = ntohl(chunk->subh.idata_hdr->mid); event 840 net/sctp/stream_interleave.c if (event->msg_flags & SCTP_DATA_FIRST_FRAG) event 841 net/sctp/stream_interleave.c event->ppid = chunk->subh.idata_hdr->ppid; event 843 net/sctp/stream_interleave.c event->fsn = ntohl(chunk->subh.idata_hdr->fsn); event 845 net/sctp/stream_interleave.c if (!(event->msg_flags & SCTP_DATA_UNORDERED)) { event 846 net/sctp/stream_interleave.c event = sctp_intl_reasm(ulpq, event); event 847 net/sctp/stream_interleave.c if (event) { event 849 net/sctp/stream_interleave.c __skb_queue_tail(&temp, sctp_event2skb(event)); event 851 net/sctp/stream_interleave.c if (event->msg_flags & MSG_EOR) event 852 net/sctp/stream_interleave.c event = sctp_intl_order(ulpq, event); event 855 net/sctp/stream_interleave.c event = sctp_intl_reasm_uo(ulpq, event); event 856 net/sctp/stream_interleave.c if (event) { event 858 net/sctp/stream_interleave.c __skb_queue_tail(&temp, sctp_event2skb(event)); event 862 net/sctp/stream_interleave.c if (event) { event 863 net/sctp/stream_interleave.c event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0; event 937 net/sctp/stream_interleave.c struct sctp_ulpevent *event; event 942 net/sctp/stream_interleave.c event = sctp_intl_retrieve_first(ulpq); event 943 net/sctp/stream_interleave.c if (event) { event 945 net/sctp/stream_interleave.c __skb_queue_tail(&temp, sctp_event2skb(event)); event 948 net/sctp/stream_interleave.c } while (event); event 953 net/sctp/stream_interleave.c event = sctp_intl_retrieve_first_uo(ulpq); event 954 net/sctp/stream_interleave.c if (event) { event 956 net/sctp/stream_interleave.c __skb_queue_tail(&temp, sctp_event2skb(event)); event 959 net/sctp/stream_interleave.c } while (event); event 1016 net/sctp/stream_interleave.c struct sctp_ulpevent *cevent, *event = NULL; event 1039 net/sctp/stream_interleave.c if (!event) event 1040 net/sctp/stream_interleave.c event = sctp_skb2event(pos); event 1045 net/sctp/stream_interleave.c if (!event && pos != (struct sk_buff *)lobby) { event 1054 net/sctp/stream_interleave.c event = sctp_skb2event(pos); event 1058 net/sctp/stream_interleave.c if (event) { event 1059 net/sctp/stream_interleave.c sctp_intl_retrieve_ordered(ulpq, event); event 1221 net/sctp/stream_interleave.c struct sctp_ulpevent *event = sctp_skb2event(pos); event 1222 net/sctp/stream_interleave.c __u32 tsn = event->tsn; event 1226 net/sctp/stream_interleave.c sctp_ulpevent_free(event); event 1231 net/sctp/stream_interleave.c struct sctp_ulpevent *event = sctp_skb2event(pos); event 1232 net/sctp/stream_interleave.c __u32 tsn = event->tsn; event 1236 net/sctp/stream_interleave.c sctp_ulpevent_free(event); event 1299 net/sctp/stream_interleave.c static int do_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) event 1304 net/sctp/stream_interleave.c __skb_queue_tail(&temp, sctp_event2skb(event)); event 1328 net/sctp/stream_interleave.c struct sctp_ulpevent *event) event 1333 net/sctp/stream_interleave.c __skb_queue_tail(&temp, sctp_event2skb(event)); event 31 net/sctp/ulpevent.c static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event, event 33 net/sctp/ulpevent.c static void sctp_ulpevent_release_data(struct sctp_ulpevent *event); event 34 net/sctp/ulpevent.c static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event); event 38 net/sctp/ulpevent.c static void sctp_ulpevent_init(struct sctp_ulpevent *event, event 42 net/sctp/ulpevent.c memset(event, 0, sizeof(struct sctp_ulpevent)); event 43 net/sctp/ulpevent.c event->msg_flags = msg_flags; event 44 net/sctp/ulpevent.c event->rmem_len = len; event 51 net/sctp/ulpevent.c struct sctp_ulpevent *event; event 58 net/sctp/ulpevent.c event = sctp_skb2event(skb); event 59 net/sctp/ulpevent.c sctp_ulpevent_init(event, msg_flags, skb->truesize); event 61 net/sctp/ulpevent.c return event; event 68 net/sctp/ulpevent.c int sctp_ulpevent_is_notification(const struct sctp_ulpevent *event) event 70 net/sctp/ulpevent.c return MSG_NOTIFICATION == (event->msg_flags & MSG_NOTIFICATION); event 76 net/sctp/ulpevent.c static inline void sctp_ulpevent_set_owner(struct sctp_ulpevent *event, event 79 net/sctp/ulpevent.c struct sctp_chunk *chunk = event->chunk; event 86 net/sctp/ulpevent.c skb = sctp_event2skb(event); event 87 net/sctp/ulpevent.c event->asoc = (struct sctp_association *)asoc; event 88 net/sctp/ulpevent.c atomic_add(event->rmem_len, &event->asoc->rmem_alloc); event 95 net/sctp/ulpevent.c static inline void sctp_ulpevent_release_owner(struct sctp_ulpevent *event) event 97 net/sctp/ulpevent.c struct sctp_association *asoc = event->asoc; event 99 net/sctp/ulpevent.c atomic_sub(event->rmem_len, &asoc->rmem_alloc); event 119 net/sctp/ulpevent.c struct sctp_ulpevent *event; event 137 net/sctp/ulpevent.c event = sctp_skb2event(skb); event 138 net/sctp/ulpevent.c sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize); event 148 net/sctp/ulpevent.c event = sctp_ulpevent_new(sizeof(struct sctp_assoc_change), event 150 net/sctp/ulpevent.c if (!event) event 153 net/sctp/ulpevent.c skb = sctp_event2skb(event); event 224 net/sctp/ulpevent.c sctp_ulpevent_set_owner(event, asoc); event 227 net/sctp/ulpevent.c return event; event 246 net/sctp/ulpevent.c struct sctp_ulpevent *event; event 250 net/sctp/ulpevent.c event = sctp_ulpevent_new(sizeof(struct sctp_paddr_change), event 252 net/sctp/ulpevent.c if (!event) event 255 net/sctp/ulpevent.c skb = sctp_event2skb(event); event 315 net/sctp/ulpevent.c sctp_ulpevent_set_owner(event, asoc); event 333 net/sctp/ulpevent.c return event; event 360 net/sctp/ulpevent.c struct sctp_ulpevent *event; event 384 net/sctp/ulpevent.c event = sctp_skb2event(skb); event 385 net/sctp/ulpevent.c sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize); event 398 net/sctp/ulpevent.c sctp_ulpevent_set_owner(event, asoc); event 401 net/sctp/ulpevent.c return event; event 415 net/sctp/ulpevent.c struct sctp_ulpevent *event; event 435 net/sctp/ulpevent.c event = sctp_skb2event(skb); event 436 net/sctp/ulpevent.c sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize); event 506 net/sctp/ulpevent.c sctp_ulpevent_set_owner(event, asoc); event 508 net/sctp/ulpevent.c return event; event 523 net/sctp/ulpevent.c struct sctp_ulpevent *event; event 527 net/sctp/ulpevent.c event = sctp_ulpevent_new(sizeof(struct sctp_shutdown_event), event 529 net/sctp/ulpevent.c if (!event) event 532 net/sctp/ulpevent.c skb = sctp_event2skb(event); event 568 net/sctp/ulpevent.c sctp_ulpevent_set_owner(event, asoc); event 571 net/sctp/ulpevent.c return event; event 585 net/sctp/ulpevent.c struct sctp_ulpevent *event; event 589 net/sctp/ulpevent.c event = sctp_ulpevent_new(sizeof(struct sctp_adaptation_event), event 591 net/sctp/ulpevent.c if (!event) event 594 net/sctp/ulpevent.c skb = sctp_event2skb(event); event 601 net/sctp/ulpevent.c sctp_ulpevent_set_owner(event, asoc); event 604 net/sctp/ulpevent.c return event; event 621 net/sctp/ulpevent.c struct sctp_ulpevent *event = NULL; event 673 net/sctp/ulpevent.c event = sctp_skb2event(skb); event 679 net/sctp/ulpevent.c sctp_ulpevent_init(event, 0, skb->len + sizeof(struct sk_buff)); event 685 net/sctp/ulpevent.c event->chunk = chunk; event 687 net/sctp/ulpevent.c sctp_ulpevent_receive_data(event, asoc); event 689 net/sctp/ulpevent.c event->stream = ntohs(chunk->subh.data_hdr->stream); event 691 net/sctp/ulpevent.c event->flags |= SCTP_UNORDERED; event 692 net/sctp/ulpevent.c event->cumtsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map); event 694 net/sctp/ulpevent.c event->tsn = ntohl(chunk->subh.data_hdr->tsn); event 695 net/sctp/ulpevent.c event->msg_flags |= chunk->chunk_hdr->flags; event 697 net/sctp/ulpevent.c return event; event 718 net/sctp/ulpevent.c struct sctp_ulpevent *event; event 722 net/sctp/ulpevent.c event = sctp_ulpevent_new(sizeof(struct sctp_pdapi_event), event 724 net/sctp/ulpevent.c if (!event) event 727 net/sctp/ulpevent.c skb = sctp_event2skb(event); event 759 net/sctp/ulpevent.c sctp_ulpevent_set_owner(event, asoc); event 762 net/sctp/ulpevent.c return event; event 771 net/sctp/ulpevent.c struct sctp_ulpevent *event; event 775 net/sctp/ulpevent.c event = sctp_ulpevent_new(sizeof(struct sctp_authkey_event), event 777 net/sctp/ulpevent.c if (!event) event 780 net/sctp/ulpevent.c skb = sctp_event2skb(event); event 794 net/sctp/ulpevent.c sctp_ulpevent_set_owner(event, asoc); event 797 net/sctp/ulpevent.c return event; event 809 net/sctp/ulpevent.c struct sctp_ulpevent *event; event 813 net/sctp/ulpevent.c event = sctp_ulpevent_new(sizeof(struct sctp_sender_dry_event), event 815 net/sctp/ulpevent.c if (!event) event 818 net/sctp/ulpevent.c skb = sctp_event2skb(event); event 824 net/sctp/ulpevent.c sctp_ulpevent_set_owner(event, asoc); event 827 net/sctp/ulpevent.c return event; event 835 net/sctp/ulpevent.c struct sctp_ulpevent *event; event 840 net/sctp/ulpevent.c event = sctp_ulpevent_new(length, MSG_NOTIFICATION, gfp); event 841 net/sctp/ulpevent.c if (!event) event 844 net/sctp/ulpevent.c skb = sctp_event2skb(event); event 850 net/sctp/ulpevent.c sctp_ulpevent_set_owner(event, asoc); event 856 net/sctp/ulpevent.c return event; event 864 net/sctp/ulpevent.c struct sctp_ulpevent *event; event 867 net/sctp/ulpevent.c event = sctp_ulpevent_new(sizeof(struct sctp_assoc_reset_event), event 869 net/sctp/ulpevent.c if (!event) event 872 net/sctp/ulpevent.c skb = sctp_event2skb(event); event 878 net/sctp/ulpevent.c sctp_ulpevent_set_owner(event, asoc); event 883 net/sctp/ulpevent.c return event; event 891 net/sctp/ulpevent.c struct sctp_ulpevent *event; event 894 net/sctp/ulpevent.c event = sctp_ulpevent_new(sizeof(struct sctp_stream_change_event), event 896 net/sctp/ulpevent.c if (!event) event 899 net/sctp/ulpevent.c skb = sctp_event2skb(event); event 905 net/sctp/ulpevent.c sctp_ulpevent_set_owner(event, asoc); event 910 net/sctp/ulpevent.c return event; event 916 net/sctp/ulpevent.c __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event) event 921 net/sctp/ulpevent.c skb = sctp_event2skb(event); event 929 net/sctp/ulpevent.c void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event, event 934 net/sctp/ulpevent.c if (sctp_ulpevent_is_notification(event)) event 938 net/sctp/ulpevent.c sinfo.sinfo_stream = event->stream; event 939 net/sctp/ulpevent.c sinfo.sinfo_ssn = event->ssn; event 940 net/sctp/ulpevent.c sinfo.sinfo_ppid = event->ppid; event 941 net/sctp/ulpevent.c sinfo.sinfo_flags = event->flags; event 942 net/sctp/ulpevent.c sinfo.sinfo_tsn = event->tsn; event 943 net/sctp/ulpevent.c sinfo.sinfo_cumtsn = event->cumtsn; event 944 net/sctp/ulpevent.c sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc); event 946 net/sctp/ulpevent.c sinfo.sinfo_context = event->asoc->default_rcv_context; event 957 net/sctp/ulpevent.c void sctp_ulpevent_read_rcvinfo(const struct sctp_ulpevent *event, event 962 net/sctp/ulpevent.c if (sctp_ulpevent_is_notification(event)) event 966 net/sctp/ulpevent.c rinfo.rcv_sid = event->stream; event 967 net/sctp/ulpevent.c rinfo.rcv_ssn = event->ssn; event 968 net/sctp/ulpevent.c rinfo.rcv_ppid = event->ppid; event 969 net/sctp/ulpevent.c rinfo.rcv_flags = event->flags; event 970 net/sctp/ulpevent.c rinfo.rcv_tsn = event->tsn; event 971 net/sctp/ulpevent.c rinfo.rcv_cumtsn = event->cumtsn; event 972 net/sctp/ulpevent.c rinfo.rcv_assoc_id = sctp_assoc2id(event->asoc); event 973 net/sctp/ulpevent.c rinfo.rcv_context = event->asoc->default_rcv_context; event 982 net/sctp/ulpevent.c static void __sctp_ulpevent_read_nxtinfo(const struct sctp_ulpevent *event, event 989 net/sctp/ulpevent.c nxtinfo.nxt_sid = event->stream; event 990 net/sctp/ulpevent.c nxtinfo.nxt_ppid = event->ppid; event 991 net/sctp/ulpevent.c nxtinfo.nxt_flags = event->flags; event 992 net/sctp/ulpevent.c if (sctp_ulpevent_is_notification(event)) event 995 net/sctp/ulpevent.c nxtinfo.nxt_assoc_id = sctp_assoc2id(event->asoc); event 1001 net/sctp/ulpevent.c void sctp_ulpevent_read_nxtinfo(const struct sctp_ulpevent *event, event 1020 net/sctp/ulpevent.c static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event, event 1025 net/sctp/ulpevent.c skb = sctp_event2skb(event); event 1027 net/sctp/ulpevent.c sctp_ulpevent_set_owner(event, asoc); event 1046 net/sctp/ulpevent.c static void sctp_ulpevent_release_data(struct sctp_ulpevent *event) event 1058 net/sctp/ulpevent.c skb = sctp_event2skb(event); event 1074 net/sctp/ulpevent.c sctp_assoc_rwnd_increase(event->asoc, len); event 1075 net/sctp/ulpevent.c sctp_chunk_put(event->chunk); event 1076 net/sctp/ulpevent.c sctp_ulpevent_release_owner(event); event 1079 net/sctp/ulpevent.c static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event) event 1083 net/sctp/ulpevent.c skb = sctp_event2skb(event); event 1098 net/sctp/ulpevent.c sctp_chunk_put(event->chunk); event 1099 net/sctp/ulpevent.c sctp_ulpevent_release_owner(event); event 1106 net/sctp/ulpevent.c void sctp_ulpevent_free(struct sctp_ulpevent *event) event 1108 net/sctp/ulpevent.c if (sctp_ulpevent_is_notification(event)) event 1109 net/sctp/ulpevent.c sctp_ulpevent_release_owner(event); event 1111 net/sctp/ulpevent.c sctp_ulpevent_release_data(event); event 1113 net/sctp/ulpevent.c kfree_skb(sctp_event2skb(event)); event 1123 net/sctp/ulpevent.c struct sctp_ulpevent *event = sctp_skb2event(skb); event 1125 net/sctp/ulpevent.c if (!sctp_ulpevent_is_notification(event)) event 1128 net/sctp/ulpevent.c sctp_ulpevent_free(event); event 60 net/sctp/ulpqueue.c struct sctp_ulpevent *event; event 63 net/sctp/ulpqueue.c event = sctp_skb2event(skb); event 64 net/sctp/ulpqueue.c sctp_ulpevent_free(event); event 68 net/sctp/ulpqueue.c event = sctp_skb2event(skb); event 69 net/sctp/ulpqueue.c sctp_ulpevent_free(event); event 73 net/sctp/ulpqueue.c event = sctp_skb2event(skb); event 74 net/sctp/ulpqueue.c sctp_ulpevent_free(event); event 89 net/sctp/ulpqueue.c struct sctp_ulpevent *event; event 93 net/sctp/ulpqueue.c event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp); event 94 net/sctp/ulpqueue.c if (!event) event 97 net/sctp/ulpqueue.c event->ssn = ntohs(chunk->subh.data_hdr->ssn); event 98 net/sctp/ulpqueue.c event->ppid = chunk->subh.data_hdr->ppid; event 101 net/sctp/ulpqueue.c event = sctp_ulpq_reasm(ulpq, event); event 104 net/sctp/ulpqueue.c if (event) { event 107 net/sctp/ulpqueue.c __skb_queue_tail(&temp, sctp_event2skb(event)); event 109 net/sctp/ulpqueue.c if (event->msg_flags & MSG_EOR) event 110 net/sctp/ulpqueue.c event = sctp_ulpq_order(ulpq, event); event 116 net/sctp/ulpqueue.c if (event) { event 117 net/sctp/ulpqueue.c event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0; event 149 net/sctp/ulpqueue.c struct sctp_ulpevent *event; event 152 net/sctp/ulpqueue.c event = sctp_skb2event(skb); event 153 net/sctp/ulpqueue.c if (event->asoc == asoc) { event 186 net/sctp/ulpqueue.c struct sctp_ulpevent *event; event 192 net/sctp/ulpqueue.c event = sctp_skb2event(skb); event 199 net/sctp/ulpqueue.c !sctp_ulpevent_is_notification(event))) event 202 net/sctp/ulpqueue.c if (!sctp_ulpevent_is_notification(event)) { event 207 net/sctp/ulpqueue.c if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe)) event 224 net/sctp/ulpqueue.c if ((event->msg_flags & MSG_NOTIFICATION) || event 226 net/sctp/ulpqueue.c (event->msg_flags & SCTP_DATA_FRAG_MASK))) event 229 net/sctp/ulpqueue.c clear_pd = event->msg_flags & MSG_EOR; event 265 net/sctp/ulpqueue.c sctp_ulpevent_free(event); event 274 net/sctp/ulpqueue.c struct sctp_ulpevent *event) event 280 net/sctp/ulpqueue.c tsn = event->tsn; event 285 net/sctp/ulpqueue.c __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); event 293 net/sctp/ulpqueue.c __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); event 307 net/sctp/ulpqueue.c __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event)); event 325 net/sctp/ulpqueue.c struct sctp_ulpevent *event; event 389 net/sctp/ulpqueue.c event = sctp_skb2event(f_frag); event 392 net/sctp/ulpqueue.c return event; event 579 net/sctp/ulpqueue.c struct sctp_ulpevent *event) event 584 net/sctp/ulpqueue.c if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) { event 585 net/sctp/ulpqueue.c event->msg_flags |= MSG_EOR; event 586 net/sctp/ulpqueue.c return event; event 589 net/sctp/ulpqueue.c sctp_ulpq_store_reasm(ulpq, event); event 598 net/sctp/ulpqueue.c ctsn = event->tsn; event 689 net/sctp/ulpqueue.c struct sctp_ulpevent *event; event 696 net/sctp/ulpqueue.c event = sctp_skb2event(pos); event 697 net/sctp/ulpqueue.c tsn = event->tsn; event 706 net/sctp/ulpqueue.c sctp_ulpevent_free(event); event 719 net/sctp/ulpqueue.c struct sctp_ulpevent *event = NULL; event 724 net/sctp/ulpqueue.c while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) { event 728 net/sctp/ulpqueue.c __skb_queue_tail(&temp, sctp_event2skb(event)); event 731 net/sctp/ulpqueue.c if (event->msg_flags & MSG_EOR) event 732 net/sctp/ulpqueue.c event = sctp_ulpq_order(ulpq, event); event 737 net/sctp/ulpqueue.c if (event) event 747 net/sctp/ulpqueue.c struct sctp_ulpevent *event) event 755 net/sctp/ulpqueue.c sid = event->stream; event 758 net/sctp/ulpqueue.c event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev; event 789 net/sctp/ulpqueue.c struct sctp_ulpevent *event) event 798 net/sctp/ulpqueue.c __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); event 802 net/sctp/ulpqueue.c sid = event->stream; event 803 net/sctp/ulpqueue.c ssn = event->ssn; event 809 net/sctp/ulpqueue.c __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); event 814 net/sctp/ulpqueue.c __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); event 834 net/sctp/ulpqueue.c __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event)); event 838 net/sctp/ulpqueue.c struct sctp_ulpevent *event) event 844 net/sctp/ulpqueue.c if (event->msg_flags & SCTP_DATA_UNORDERED) event 845 net/sctp/ulpqueue.c return event; event 848 net/sctp/ulpqueue.c sid = event->stream; event 849 net/sctp/ulpqueue.c ssn = event->ssn; event 857 net/sctp/ulpqueue.c sctp_ulpq_store_ordered(ulpq, event); event 867 net/sctp/ulpqueue.c sctp_ulpq_retrieve_ordered(ulpq, event); event 869 net/sctp/ulpqueue.c return event; event 879 net/sctp/ulpqueue.c struct sctp_ulpevent *event; event 889 net/sctp/ulpqueue.c event = NULL; event 908 net/sctp/ulpqueue.c if (!event) event 910 net/sctp/ulpqueue.c event = sctp_skb2event(pos); event 919 net/sctp/ulpqueue.c if (event == NULL && pos != (struct sk_buff *)lobby) { event 928 net/sctp/ulpqueue.c event = sctp_skb2event(pos); event 935 net/sctp/ulpqueue.c if (event) { event 937 net/sctp/ulpqueue.c sctp_ulpq_retrieve_ordered(ulpq, event); event 971 net/sctp/ulpqueue.c struct sctp_ulpevent *event; event 977 net/sctp/ulpqueue.c event = sctp_skb2event(skb); event 978 net/sctp/ulpqueue.c tsn = event->tsn; event 1001 net/sctp/ulpqueue.c sctp_ulpevent_free(event); event 1029 net/sctp/ulpqueue.c struct sctp_ulpevent *event; event 1061 net/sctp/ulpqueue.c event = sctp_ulpq_retrieve_first(ulpq); event 1063 net/sctp/ulpqueue.c if (event) { event 1067 net/sctp/ulpqueue.c __skb_queue_tail(&temp, sctp_event2skb(event)); event 259 net/smc/smc_ib.c switch (ibevent->event) { event 304 net/smc/smc_ib.c switch (ibevent->event) { event 184 net/smc/smc_ism.c struct smcd_event event; event 206 net/smc/smc_ism.c ev_info.info = wrk->event.info; event 207 net/smc/smc_ism.c switch (wrk->event.code) { event 209 net/smc/smc_ism.c smc_smcd_terminate(wrk->smcd, wrk->event.tok, ev_info.vlan_id); event 215 net/smc/smc_ism.c wrk->event.tok, event 245 net/smc/smc_ism.c switch (wrk->event.type) { event 247 net/smc/smc_ism.c smc_smcd_terminate(wrk->smcd, wrk->event.tok, VLAN_VID_MASK); event 341 net/smc/smc_ism.c void smcd_handle_event(struct smcd_dev *smcd, struct smcd_event *event) event 351 net/smc/smc_ism.c wrk->event = *event; event 653 net/smc/smc_pnet.c unsigned long event, void *ptr) event 657 net/smc/smc_pnet.c switch (event) { event 164 net/sunrpc/clnt.c static int rpc_clnt_skip_event(struct rpc_clnt *clnt, unsigned long event) event 169 net/sunrpc/clnt.c switch (event) { event 184 net/sunrpc/clnt.c static int __rpc_clnt_handle_event(struct rpc_clnt *clnt, unsigned long event, event 189 net/sunrpc/clnt.c switch (event) { event 201 net/sunrpc/clnt.c printk(KERN_ERR "%s: unknown event: %ld\n", __func__, event); event 207 net/sunrpc/clnt.c static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event, event 213 net/sunrpc/clnt.c if (!rpc_clnt_skip_event(clnt, event)) event 214 net/sunrpc/clnt.c error = __rpc_clnt_handle_event(clnt, event, sb); event 221 net/sunrpc/clnt.c static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event) event 228 net/sunrpc/clnt.c if (rpc_clnt_skip_event(clnt, event)) event 237 net/sunrpc/clnt.c static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event, event 244 net/sunrpc/clnt.c while ((clnt = rpc_get_client_for_event(sb->s_fs_info, event))) { event 245 net/sunrpc/clnt.c error = __rpc_pipefs_event(clnt, event, sb); event 102 net/sunrpc/xprtrdma/svc_rdma_transport.c static void qp_event_handler(struct ib_event *event, void *context) event 106 net/sunrpc/xprtrdma/svc_rdma_transport.c trace_svcrdma_qp_error(event, (struct sockaddr *)&xprt->xpt_remote); event 107 net/sunrpc/xprtrdma/svc_rdma_transport.c switch (event->event) { event 240 net/sunrpc/xprtrdma/svc_rdma_transport.c struct rdma_cm_event *event) event 244 net/sunrpc/xprtrdma/svc_rdma_transport.c trace_svcrdma_cm_event(event, sap); event 246 net/sunrpc/xprtrdma/svc_rdma_transport.c switch (event->event) { event 250 net/sunrpc/xprtrdma/svc_rdma_transport.c rdma_event_msg(event->event), event->event); event 251 net/sunrpc/xprtrdma/svc_rdma_transport.c handle_connect_req(cma_id, &event->param.conn); event 257 net/sunrpc/xprtrdma/svc_rdma_transport.c rdma_event_msg(event->event), event->event); event 265 net/sunrpc/xprtrdma/svc_rdma_transport.c struct rdma_cm_event *event) event 271 net/sunrpc/xprtrdma/svc_rdma_transport.c trace_svcrdma_cm_event(event, sap); event 273 net/sunrpc/xprtrdma/svc_rdma_transport.c switch (event->event) { event 292 net/sunrpc/xprtrdma/svc_rdma_transport.c rdma_event_msg(event->event), event->event); event 300 net/sunrpc/xprtrdma/svc_rdma_transport.c rdma_event_msg(event->event), event->event); event 116 net/sunrpc/xprtrdma/verbs.c rpcrdma_qp_event_handler(struct ib_event *event, void *context) event 122 net/sunrpc/xprtrdma/verbs.c trace_xprtrdma_qp_event(r_xprt, event); event 218 net/sunrpc/xprtrdma/verbs.c rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event) event 227 net/sunrpc/xprtrdma/verbs.c trace_xprtrdma_cm_event(r_xprt, event); event 228 net/sunrpc/xprtrdma/verbs.c switch (event->event) { event 260 net/sunrpc/xprtrdma/verbs.c rpcrdma_update_connect_private(r_xprt, &event->param.conn); event 272 net/sunrpc/xprtrdma/verbs.c rdma_reject_msg(id, event->status)); event 274 net/sunrpc/xprtrdma/verbs.c if (event->status == IB_CM_REJ_STALE_CONN) event 289 net/sunrpc/xprtrdma/verbs.c ia->ri_id->device->name, rdma_event_msg(event->event)); event 663 net/tipc/group.c u32 event, u16 seqno, event 671 net/tipc/group.c evt.event = event; event 688 net/tipc/group.c msg_set_grp_evt(hdr, event); event 859 net/tipc/group.c int event = evt->event; event 874 net/tipc/group.c switch (event) { event 43 net/tipc/subscr.c u32 event, u32 port, u32 node) event 49 net/tipc/subscr.c tipc_evt_write(evt, event, event); event 54 net/tipc/subscr.c tipc_topsrv_queue_evt(sub->net, sub->conid, event, evt); event 77 net/tipc/subscr.c u32 event, u32 port, u32 node, event 99 net/tipc/subscr.c event, port, node); event 78 net/tipc/subscr.h u32 event, u32 port, u32 node, event 313 net/tipc/topsrv.c u32 event, struct tipc_event *evt) event 329 net/tipc/topsrv.c e->inactive = (event == TIPC_SUBSCR_TIMEOUT); event 48 net/tipc/topsrv.h u32 event, struct tipc_event *evt); event 1214 net/tls/tls_device.c static int tls_dev_event(struct notifier_block *this, unsigned long event, event 1223 net/tls/tls_device.c switch (event) { event 428 net/vmw_vsock/virtio_transport.c struct virtio_vsock_event *event) event 435 net/vmw_vsock/virtio_transport.c sg_init_one(&sg, event, sizeof(*event)); event 437 net/vmw_vsock/virtio_transport.c return virtqueue_add_inbuf(vq, &sg, 1, event, GFP_KERNEL); event 446 net/vmw_vsock/virtio_transport.c struct virtio_vsock_event *event = &vsock->event_list[i]; event 448 net/vmw_vsock/virtio_transport.c virtio_vsock_event_fill_one(vsock, event); event 475 net/vmw_vsock/virtio_transport.c struct virtio_vsock_event *event) event 477 net/vmw_vsock/virtio_transport.c switch (le32_to_cpu(event->id)) { event 499 net/vmw_vsock/virtio_transport.c struct virtio_vsock_event *event; event 503 net/vmw_vsock/virtio_transport.c while ((event = virtqueue_get_buf(vq, &len)) != NULL) { event 504 net/vmw_vsock/virtio_transport.c if (len == sizeof(*event)) event 505 net/vmw_vsock/virtio_transport.c virtio_vsock_event_handle(vsock, event); event 507 net/vmw_vsock/virtio_transport.c virtio_vsock_event_fill_one(vsock, event); event 870 net/wireless/mlme.c enum nl80211_radar_event event, gfp_t gfp) event 877 net/wireless/mlme.c trace_cfg80211_cac_event(netdev, event); event 879 net/wireless/mlme.c if (WARN_ON(!wdev->cac_started && event != NL80211_RADAR_CAC_STARTED)) event 885 net/wireless/mlme.c switch (event) { event 907 net/wireless/mlme.c nl80211_radar_notify(rdev, chandef, event, netdev, gfp); event 16327 net/wireless/nl80211.c enum nl80211_radar_event event, event 16356 net/wireless/nl80211.c if (nla_put_u32(msg, NL80211_ATTR_RADAR_EVENT, event)) event 120 net/wireless/nl80211.h enum nl80211_radar_event event, event 3917 net/wireless/reg.c enum nl80211_radar_event event) event 3939 net/wireless/reg.c if (event == NL80211_RADAR_DETECTED || event 3940 net/wireless/reg.c event == NL80211_RADAR_CAC_FINISHED) { event 3945 net/wireless/reg.c nl80211_radar_notify(rdev, chandef, event, NULL, GFP_KERNEL); event 171 net/wireless/reg.h enum nl80211_radar_event event); event 458 net/wireless/wext-core.c struct iw_event *event; /* Mallocated whole event */ event 572 net/wireless/wext-core.c event = nla_data(nla); event 575 net/wireless/wext-core.c memset(event, 0, hdr_len); event 576 net/wireless/wext-core.c event->len = event_len; event 577 net/wireless/wext-core.c event->cmd = cmd; event 578 net/wireless/wext-core.c memcpy(&event->u, ((char *) wrqu) + wrqu_off, hdr_len - IW_EV_LCP_LEN); event 580 net/wireless/wext-core.c memcpy(((char *) event) + hdr_len, extra, extra_len); event 1167 net/wireless/wext-core.c char *iwe_stream_add_value(struct iw_request_info *info, char *event, event 1182 net/wireless/wext-core.c iwe->len = value - event; event 1183 net/wireless/wext-core.c memcpy(event, (char *) iwe, lcp_len); event 93 net/wireless/wext-sme.c bool event = true; event 102 net/wireless/wext-sme.c event = false; event 104 net/wireless/wext-sme.c WLAN_REASON_DEAUTH_LEAVING, event); event 169 net/wireless/wext-sme.c bool event = true; event 178 net/wireless/wext-sme.c event = false; event 180 net/wireless/wext-sme.c WLAN_REASON_DEAUTH_LEAVING, event); event 221 net/x25/af_x25.c static int x25_device_event(struct notifier_block *this, unsigned long event, event 235 net/x25/af_x25.c switch (event) { event 379 net/xfrm/xfrm_device.c static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr) event 383 net/xfrm/xfrm_device.c switch (event) { event 38 net/xfrm/xfrm_replay.c static void xfrm_replay_notify(struct xfrm_state *x, int event) event 51 net/xfrm/xfrm_replay.c switch (event) { event 57 net/xfrm/xfrm_replay.c event = XFRM_REPLAY_TIMEOUT; event 75 net/xfrm/xfrm_replay.c c.event = XFRM_MSG_NEWAE; event 76 net/xfrm/xfrm_replay.c c.data.aevent = event; event 277 net/xfrm/xfrm_replay.c static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event) event 293 net/xfrm/xfrm_replay.c switch (event) { event 300 net/xfrm/xfrm_replay.c event = XFRM_REPLAY_TIMEOUT; event 319 net/xfrm/xfrm_replay.c c.event = XFRM_MSG_NEWAE; event 320 net/xfrm/xfrm_replay.c c.data.aevent = event; event 328 net/xfrm/xfrm_replay.c static void xfrm_replay_notify_esn(struct xfrm_state *x, int event) event 345 net/xfrm/xfrm_replay.c switch (event) { event 367 net/xfrm/xfrm_replay.c event = XFRM_REPLAY_TIMEOUT; event 385 net/xfrm/xfrm_replay.c c.event = XFRM_MSG_NEWAE; event 386 net/xfrm/xfrm_replay.c c.data.aevent = event; event 2150 net/xfrm/xfrm_state.c c.event = XFRM_MSG_EXPIRE; event 2198 net/xfrm/xfrm_state.c c.event = XFRM_MSG_POLEXPIRE; event 708 net/xfrm/xfrm_user.c c.event = nlh->nlmsg_type; event 778 net/xfrm/xfrm_user.c c.event = nlh->nlmsg_type; event 1680 net/xfrm/xfrm_user.c c.event = nlh->nlmsg_type; event 1925 net/xfrm/xfrm_user.c c.event = nlh->nlmsg_type; event 1951 net/xfrm/xfrm_user.c c.event = nlh->nlmsg_type; event 2120 net/xfrm/xfrm_user.c c.event = nlh->nlmsg_type; event 2151 net/xfrm/xfrm_user.c c.event = nlh->nlmsg_type; event 2841 net/xfrm/xfrm_user.c if (c->event == XFRM_MSG_DELSA) { event 2852 net/xfrm/xfrm_user.c nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0); event 2858 net/xfrm/xfrm_user.c if (c->event == XFRM_MSG_DELSA) { event 2891 net/xfrm/xfrm_user.c switch (c->event) { event 2904 net/xfrm/xfrm_user.c c->event); event 3109 net/xfrm/xfrm_user.c if (c->event == XFRM_MSG_DELPOLICY) { event 3121 net/xfrm/xfrm_user.c nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0); event 3127 net/xfrm/xfrm_user.c if (c->event == XFRM_MSG_DELPOLICY) { event 3197 net/xfrm/xfrm_user.c switch (c->event) { event 3208 net/xfrm/xfrm_user.c c->event); event 45 samples/bpf/bpf_load.c static int populate_prog_array(const char *event, int prog_fd) event 47 samples/bpf/bpf_load.c int ind = atoi(event), err; event 76 samples/bpf/bpf_load.c static int load_and_attach(const char *event, struct bpf_insn *prog, int size) event 78 samples/bpf/bpf_load.c bool is_socket = strncmp(event, "socket", 6) == 0; event 79 samples/bpf/bpf_load.c bool is_kprobe = strncmp(event, "kprobe/", 7) == 0; event 80 samples/bpf/bpf_load.c bool is_kretprobe = strncmp(event, "kretprobe/", 10) == 0; event 81 samples/bpf/bpf_load.c bool is_tracepoint = strncmp(event, "tracepoint/", 11) == 0; event 82 samples/bpf/bpf_load.c bool is_raw_tracepoint = strncmp(event, "raw_tracepoint/", 15) == 0; event 83 samples/bpf/bpf_load.c bool is_xdp = strncmp(event, "xdp", 3) == 0; event 84 samples/bpf/bpf_load.c bool is_perf_event = strncmp(event, "perf_event", 10) == 0; event 85 samples/bpf/bpf_load.c bool is_cgroup_skb = strncmp(event, "cgroup/skb", 10) == 0; event 86 samples/bpf/bpf_load.c bool is_cgroup_sk = strncmp(event, "cgroup/sock", 11) == 0; event 87 samples/bpf/bpf_load.c bool is_sockops = strncmp(event, "sockops", 7) == 0; event 88 samples/bpf/bpf_load.c bool is_sk_skb = strncmp(event, "sk_skb", 6) == 0; event 89 samples/bpf/bpf_load.c bool is_sk_msg = strncmp(event, "sk_msg", 6) == 0; event 124 samples/bpf/bpf_load.c printf("Unknown event '%s'\n", event); event 145 samples/bpf/bpf_load.c event += 6; event 147 samples/bpf/bpf_load.c event += 7; event 148 samples/bpf/bpf_load.c if (*event != '/') event 150 samples/bpf/bpf_load.c event++; event 151 samples/bpf/bpf_load.c if (!isdigit(*event)) { event 155 samples/bpf/bpf_load.c return populate_prog_array(event, fd); event 159 samples/bpf/bpf_load.c efd = bpf_raw_tracepoint_open(event + 15, fd); event 161 samples/bpf/bpf_load.c printf("tracepoint %s %s\n", event + 15, strerror(errno)); event 173 samples/bpf/bpf_load.c event += 7; event 175 samples/bpf/bpf_load.c event += 10; event 177 samples/bpf/bpf_load.c if (*event == 0) { event 182 samples/bpf/bpf_load.c if (isdigit(*event)) event 183 samples/bpf/bpf_load.c return populate_prog_array(event, fd); event 186 samples/bpf/bpf_load.c if (strncmp(event, "sys_", 4) == 0) { event 188 samples/bpf/bpf_load.c is_kprobe ? 'p' : 'r', event, event); event 198 samples/bpf/bpf_load.c is_kprobe ? 'p' : 'r', event, event); event 202 samples/bpf/bpf_load.c event, strerror(errno)); event 210 samples/bpf/bpf_load.c strcat(buf, event); event 213 samples/bpf/bpf_load.c event += 11; event 215 samples/bpf/bpf_load.c if (*event == 0) { event 221 samples/bpf/bpf_load.c strcat(buf, event); event 227 samples/bpf/bpf_load.c printf("failed to open event %s\n", event); event 233 samples/bpf/bpf_load.c printf("read from '%s' failed '%s'\n", event, strerror(errno)); event 456 samples/uhid/uhid-example.c ret = event(fd); event 24 scripts/dtc/yamltree.c #define yaml_emitter_emit_or_die(emitter, event) ( \ event 26 scripts/dtc/yamltree.c if (!yaml_emitter_emit(emitter, event)) \ event 34 scripts/dtc/yamltree.c yaml_event_t event; event 48 scripts/dtc/yamltree.c yaml_sequence_start_event_initialize(&event, NULL, event 50 scripts/dtc/yamltree.c yaml_emitter_emit_or_die(emitter, &event); event 81 scripts/dtc/yamltree.c yaml_scalar_event_initialize(&event, NULL, event 85 scripts/dtc/yamltree.c yaml_scalar_event_initialize(&event, NULL, event 88 scripts/dtc/yamltree.c yaml_emitter_emit_or_die(emitter, &event); event 91 scripts/dtc/yamltree.c yaml_sequence_end_event_initialize(&event); event 92 scripts/dtc/yamltree.c yaml_emitter_emit_or_die(emitter, &event); event 97 scripts/dtc/yamltree.c yaml_event_t event; event 106 scripts/dtc/yamltree.c yaml_scalar_event_initialize(&event, NULL, event 109 scripts/dtc/yamltree.c yaml_emitter_emit_or_die(emitter, &event); event 114 scripts/dtc/yamltree.c yaml_event_t event; event 119 scripts/dtc/yamltree.c yaml_scalar_event_initialize(&event, NULL, event 122 scripts/dtc/yamltree.c yaml_emitter_emit_or_die(emitter, &event); event 126 scripts/dtc/yamltree.c yaml_scalar_event_initialize(&event, NULL, event 130 scripts/dtc/yamltree.c yaml_emitter_emit_or_die(emitter, &event); event 137 scripts/dtc/yamltree.c yaml_sequence_start_event_initialize(&event, NULL, event 139 scripts/dtc/yamltree.c yaml_emitter_emit_or_die(emitter, &event); event 171 scripts/dtc/yamltree.c yaml_sequence_end_event_initialize(&event); event 172 scripts/dtc/yamltree.c yaml_emitter_emit_or_die(emitter, &event); event 180 scripts/dtc/yamltree.c yaml_event_t event; event 185 scripts/dtc/yamltree.c yaml_mapping_start_event_initialize(&event, NULL, event 187 scripts/dtc/yamltree.c yaml_emitter_emit_or_die(emitter, &event); event 194 scripts/dtc/yamltree.c yaml_scalar_event_initialize(&event, NULL, event 197 scripts/dtc/yamltree.c yaml_emitter_emit_or_die(emitter, &event); event 201 scripts/dtc/yamltree.c yaml_mapping_end_event_initialize(&event); event 202 scripts/dtc/yamltree.c yaml_emitter_emit_or_die(emitter, &event); event 208 scripts/dtc/yamltree.c yaml_event_t event; event 212 scripts/dtc/yamltree.c yaml_stream_start_event_initialize(&event, YAML_UTF8_ENCODING); event 213 scripts/dtc/yamltree.c yaml_emitter_emit_or_die(&emitter, &event); event 215 scripts/dtc/yamltree.c yaml_document_start_event_initialize(&event, NULL, NULL, NULL, 0); event 216 scripts/dtc/yamltree.c yaml_emitter_emit_or_die(&emitter, &event); event 218 scripts/dtc/yamltree.c yaml_sequence_start_event_initialize(&event, NULL, (yaml_char_t *)YAML_SEQ_TAG, 1, YAML_ANY_SEQUENCE_STYLE); event 219 scripts/dtc/yamltree.c yaml_emitter_emit_or_die(&emitter, &event); event 223 scripts/dtc/yamltree.c yaml_sequence_end_event_initialize(&event); event 224 scripts/dtc/yamltree.c yaml_emitter_emit_or_die(&emitter, &event); event 226 scripts/dtc/yamltree.c yaml_document_end_event_initialize(&event, 0); event 227 scripts/dtc/yamltree.c yaml_emitter_emit_or_die(&emitter, &event); event 229 scripts/dtc/yamltree.c yaml_stream_end_event_initialize(&event); event 230 scripts/dtc/yamltree.c yaml_emitter_emit_or_die(&emitter, &event); event 414 scripts/kconfig/gconf.c gboolean on_window1_delete_event(GtkWidget * widget, GdkEvent * event, event 880 scripts/kconfig/gconf.c GdkEventButton * event, gpointer user_data) event 890 scripts/kconfig/gconf.c gint tx = (gint) event->x; event 891 scripts/kconfig/gconf.c gint ty = (gint) event->y; event 907 scripts/kconfig/gconf.c if (event->type == GDK_2BUTTON_PRESS) { event 937 scripts/kconfig/gconf.c GdkEventKey * event, gpointer user_data) event 950 scripts/kconfig/gconf.c if (event->keyval == GDK_space) { event 957 scripts/kconfig/gconf.c if (event->keyval == GDK_KP_Enter) { event 965 scripts/kconfig/gconf.c if (!strcasecmp(event->string, "n")) event 967 scripts/kconfig/gconf.c else if (!strcasecmp(event->string, "m")) event 969 scripts/kconfig/gconf.c else if (!strcasecmp(event->string, "y")) event 998 scripts/kconfig/gconf.c GdkEventButton * event, gpointer user_data) event 1006 scripts/kconfig/gconf.c gint tx = (gint) event->x; event 1007 scripts/kconfig/gconf.c gint ty = (gint) event->y; event 1018 scripts/kconfig/gconf.c if (event->type == GDK_2BUTTON_PRESS) { event 163 security/integrity/ima/ima.h int ima_lsm_policy_change(struct notifier_block *nb, unsigned long event, event 348 security/integrity/ima/ima_policy.c int ima_lsm_policy_change(struct notifier_block *nb, unsigned long event, event 351 security/integrity/ima/ima_policy.c if (event != LSM_POLICY_CHANGE) event 463 security/security.c int call_blocking_lsm_notifier(enum lsm_event event, void *data) event 466 security/security.c event, data); event 77 security/selinux/avc.c int (*callback) (u32 event); event 795 security/selinux/avc.c int __init avc_add_callback(int (*callback)(u32 event), u32 events) event 829 security/selinux/avc.c u32 event, u32 perms, u8 driver, u8 xperm, u32 ssid, event 899 security/selinux/avc.c switch (event) { event 184 security/selinux/hooks.c static int selinux_netcache_avc_callback(u32 event) event 186 security/selinux/hooks.c if (event == AVC_CALLBACK_RESET) { event 195 security/selinux/hooks.c static int selinux_lsm_notifier_avc_callback(u32 event) event 197 security/selinux/hooks.c if (event == AVC_CALLBACK_RESET) { event 181 security/selinux/include/avc.h int avc_add_callback(int (*callback)(u32 event), u32 events); event 251 security/selinux/netif.c unsigned long event, void *ptr) event 255 security/selinux/netif.c if (event == NETDEV_DOWN) event 3514 security/selinux/ss/services.c static int aurule_avc_callback(u32 event) event 3518 security/selinux/ss/services.c if (event == AVC_CALLBACK_RESET && aurule_callback) event 560 sound/core/pcm_native.c int event) event 564 sound/core/pcm_native.c snd_timer_notify(substream->timer, event, event 96 sound/core/rawmidi.c if (runtime->event) event 97 sound/core/rawmidi.c runtime->event(runtime->substream); event 122 sound/core/rawmidi.c runtime->event = NULL; event 946 sound/core/rawmidi.c if (runtime->event) event 154 sound/core/seq/oss/seq_oss_rw.c struct snd_seq_event event; event 161 sound/core/seq/oss/seq_oss_rw.c memset(&event, 0, sizeof(event)); event 163 sound/core/seq/oss/seq_oss_rw.c event.type = SNDRV_SEQ_EVENT_NOTEOFF; event 164 sound/core/seq/oss/seq_oss_rw.c snd_seq_oss_fill_addr(dp, &event, dp->addr.client, dp->addr.port); event 166 sound/core/seq/oss/seq_oss_rw.c if (snd_seq_oss_process_event(dp, rec, &event)) event 169 sound/core/seq/oss/seq_oss_rw.c event.time.tick = snd_seq_oss_timer_cur_tick(dp->timer); event 171 sound/core/seq/oss/seq_oss_rw.c snd_seq_oss_dispatch(dp, &event, 0, 0); event 173 sound/core/seq/oss/seq_oss_rw.c rc = snd_seq_kernel_client_enqueue(dp->cseq, &event, opt, event 67 sound/core/seq/seq_clientmgr.c struct snd_seq_event *event, event 70 sound/core/seq/seq_clientmgr.c struct snd_seq_event *event, event 442 sound/core/seq/seq_clientmgr.c if (snd_seq_ev_is_variable(&cell->event)) { event 444 sound/core/seq/seq_clientmgr.c tmpev = cell->event; event 452 sound/core/seq/seq_clientmgr.c err = snd_seq_expand_var_event(&cell->event, count, event 461 sound/core/seq/seq_clientmgr.c if (copy_to_user(buf, &cell->event, sizeof(struct snd_seq_event))) { event 499 sound/core/seq/seq_clientmgr.c static struct snd_seq_client *get_event_dest_client(struct snd_seq_event *event, event 504 sound/core/seq/seq_clientmgr.c dest = snd_seq_client_use_ptr(event->dest.client); event 510 sound/core/seq/seq_clientmgr.c ! test_bit(event->type, dest->event_filter)) event 534 sound/core/seq/seq_clientmgr.c struct snd_seq_event *event, event 553 sound/core/seq/seq_clientmgr.c bounce_ev.dest.port = event->source.port; event 554 sound/core/seq/seq_clientmgr.c bounce_ev.data.quote.origin = event->dest; event 555 sound/core/seq/seq_clientmgr.c bounce_ev.data.quote.event = event; event 572 sound/core/seq/seq_clientmgr.c static int update_timestamp_of_queue(struct snd_seq_event *event, event 580 sound/core/seq/seq_clientmgr.c event->queue = queue; event 581 sound/core/seq/seq_clientmgr.c event->flags &= ~SNDRV_SEQ_TIME_STAMP_MASK; event 583 sound/core/seq/seq_clientmgr.c event->time.time = snd_seq_timer_get_cur_time(q->timer, true); event 584 sound/core/seq/seq_clientmgr.c event->flags |= SNDRV_SEQ_TIME_STAMP_REAL; event 586 sound/core/seq/seq_clientmgr.c event->time.tick = snd_seq_timer_get_cur_tick(q->timer); event 587 sound/core/seq/seq_clientmgr.c event->flags |= SNDRV_SEQ_TIME_STAMP_TICK; event 602 sound/core/seq/seq_clientmgr.c struct snd_seq_event *event, event 610 sound/core/seq/seq_clientmgr.c direct = snd_seq_ev_is_direct(event); event 612 sound/core/seq/seq_clientmgr.c dest = get_event_dest_client(event, filter); event 615 sound/core/seq/seq_clientmgr.c dest_port = snd_seq_port_use_ptr(dest, event->dest.port); event 626 sound/core/seq/seq_clientmgr.c update_timestamp_of_queue(event, dest_port->time_queue, event 632 sound/core/seq/seq_clientmgr.c result = snd_seq_fifo_event_in(dest->data.user.fifo, event); event 638 sound/core/seq/seq_clientmgr.c result = dest_port->event_input(event, direct, event 653 sound/core/seq/seq_clientmgr.c result = bounce_error_event(client, event, result, atomic, hop); event 663 sound/core/seq/seq_clientmgr.c struct snd_seq_event *event, event 672 sound/core/seq/seq_clientmgr.c src_port = snd_seq_port_use_ptr(client, event->source.port); event 676 sound/core/seq/seq_clientmgr.c event_saved = *event; event 688 sound/core/seq/seq_clientmgr.c event->dest = subs->info.dest; event 691 sound/core/seq/seq_clientmgr.c update_timestamp_of_queue(event, subs->info.queue, event 693 sound/core/seq/seq_clientmgr.c err = snd_seq_deliver_single_event(client, event, event 703 sound/core/seq/seq_clientmgr.c *event = event_saved; event 709 sound/core/seq/seq_clientmgr.c *event = event_saved; /* restore */ event 720 sound/core/seq/seq_clientmgr.c struct snd_seq_event *event, event 727 sound/core/seq/seq_clientmgr.c dest_client = get_event_dest_client(event, SNDRV_SEQ_FILTER_BROADCAST); event 733 sound/core/seq/seq_clientmgr.c event->dest.port = port->addr.port; event 735 sound/core/seq/seq_clientmgr.c err = snd_seq_deliver_single_event(NULL, event, event 748 sound/core/seq/seq_clientmgr.c event->dest.port = SNDRV_SEQ_ADDRESS_BROADCAST; /* restore */ event 757 sound/core/seq/seq_clientmgr.c struct snd_seq_event *event, int atomic, int hop) event 763 sound/core/seq/seq_clientmgr.c addr = event->dest; /* save */ event 769 sound/core/seq/seq_clientmgr.c event->dest.client = dest; event 770 sound/core/seq/seq_clientmgr.c event->dest.port = addr.port; event 772 sound/core/seq/seq_clientmgr.c err = port_broadcast_event(client, event, atomic, hop); event 775 sound/core/seq/seq_clientmgr.c err = snd_seq_deliver_single_event(NULL, event, event 786 sound/core/seq/seq_clientmgr.c event->dest = addr; /* restore */ event 792 sound/core/seq/seq_clientmgr.c static int multicast_event(struct snd_seq_client *client, struct snd_seq_event *event, event 809 sound/core/seq/seq_clientmgr.c static int snd_seq_deliver_event(struct snd_seq_client *client, struct snd_seq_event *event, event 817 sound/core/seq/seq_clientmgr.c event->source.client, event->source.port, event 818 sound/core/seq/seq_clientmgr.c event->dest.client, event->dest.port); event 822 sound/core/seq/seq_clientmgr.c if (snd_seq_ev_is_variable(event) && event 823 sound/core/seq/seq_clientmgr.c snd_BUG_ON(atomic && (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR))) event 826 sound/core/seq/seq_clientmgr.c if (event->queue == SNDRV_SEQ_ADDRESS_SUBSCRIBERS || event 827 sound/core/seq/seq_clientmgr.c event->dest.client == SNDRV_SEQ_ADDRESS_SUBSCRIBERS) event 828 sound/core/seq/seq_clientmgr.c result = deliver_to_subscribers(client, event, atomic, hop); event 830 sound/core/seq/seq_clientmgr.c else if (event->queue == SNDRV_SEQ_ADDRESS_BROADCAST || event 831 sound/core/seq/seq_clientmgr.c event->dest.client == SNDRV_SEQ_ADDRESS_BROADCAST) event 832 sound/core/seq/seq_clientmgr.c result = broadcast_event(client, event, atomic, hop); event 833 sound/core/seq/seq_clientmgr.c else if (event->dest.client >= SNDRV_SEQ_MAX_CLIENTS) event 834 sound/core/seq/seq_clientmgr.c result = multicast_event(client, event, atomic, hop); event 835 sound/core/seq/seq_clientmgr.c else if (event->dest.port == SNDRV_SEQ_ADDRESS_BROADCAST) event 836 sound/core/seq/seq_clientmgr.c result = port_broadcast_event(client, event, atomic, hop); event 839 sound/core/seq/seq_clientmgr.c result = snd_seq_deliver_single_event(client, event, 0, atomic, hop); event 862 sound/core/seq/seq_clientmgr.c client = snd_seq_client_use_ptr(cell->event.source.client); event 868 sound/core/seq/seq_clientmgr.c if (cell->event.type == SNDRV_SEQ_EVENT_NOTE) { event 876 sound/core/seq/seq_clientmgr.c tmpev = cell->event; event 885 sound/core/seq/seq_clientmgr.c ev = &cell->event; event 913 sound/core/seq/seq_clientmgr.c result = snd_seq_deliver_event(client, &cell->event, atomic, hop); event 927 sound/core/seq/seq_clientmgr.c struct snd_seq_event *event, event 936 sound/core/seq/seq_clientmgr.c if (event->queue == SNDRV_SEQ_ADDRESS_SUBSCRIBERS) { event 937 sound/core/seq/seq_clientmgr.c event->dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS; event 938 sound/core/seq/seq_clientmgr.c event->queue = SNDRV_SEQ_QUEUE_DIRECT; event 941 sound/core/seq/seq_clientmgr.c if (event->queue == SNDRV_SEQ_ADDRESS_BROADCAST) { event 942 sound/core/seq/seq_clientmgr.c event->dest.client = SNDRV_SEQ_ADDRESS_BROADCAST; event 943 sound/core/seq/seq_clientmgr.c event->queue = SNDRV_SEQ_QUEUE_DIRECT; event 946 sound/core/seq/seq_clientmgr.c if (event->dest.client == SNDRV_SEQ_ADDRESS_SUBSCRIBERS) { event 948 sound/core/seq/seq_clientmgr.c struct snd_seq_client_port *src_port = snd_seq_port_use_ptr(client, event->source.port); event 955 sound/core/seq/seq_clientmgr.c if (snd_seq_ev_is_direct(event)) { event 956 sound/core/seq/seq_clientmgr.c if (event->type == SNDRV_SEQ_EVENT_NOTE) event 958 sound/core/seq/seq_clientmgr.c return snd_seq_deliver_event(client, event, atomic, hop); event 962 sound/core/seq/seq_clientmgr.c if (snd_seq_queue_is_used(event->queue, client->number) <= 0) event 968 sound/core/seq/seq_clientmgr.c err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic, event 1025 sound/core/seq/seq_clientmgr.c struct snd_seq_event event; event 1051 sound/core/seq/seq_clientmgr.c len = sizeof(event); event 1052 sound/core/seq/seq_clientmgr.c if (copy_from_user(&event, buf, len)) { event 1056 sound/core/seq/seq_clientmgr.c event.source.client = client->number; /* fill in client number */ event 1058 sound/core/seq/seq_clientmgr.c if (check_event_type_and_length(&event)) { event 1064 sound/core/seq/seq_clientmgr.c if (event.type == SNDRV_SEQ_EVENT_NONE) event 1066 sound/core/seq/seq_clientmgr.c else if (snd_seq_ev_is_reserved(&event)) { event 1071 sound/core/seq/seq_clientmgr.c if (snd_seq_ev_is_variable(&event)) { event 1072 sound/core/seq/seq_clientmgr.c int extlen = event.data.ext.len & ~SNDRV_SEQ_EXT_MASK; event 1079 sound/core/seq/seq_clientmgr.c event.data.ext.len = extlen | SNDRV_SEQ_EXT_USRPTR; event 1080 sound/core/seq/seq_clientmgr.c event.data.ext.ptr = (char __force *)buf event 1085 sound/core/seq/seq_clientmgr.c if (client->convert32 && snd_seq_ev_is_varusr(&event)) { event 1086 sound/core/seq/seq_clientmgr.c void *ptr = (void __force *)compat_ptr(event.data.raw32.d[1]); event 1087 sound/core/seq/seq_clientmgr.c event.data.ext.ptr = ptr; event 1093 sound/core/seq/seq_clientmgr.c err = snd_seq_client_enqueue_event(client, &event, file, event 1448 sound/core/seq/seq_clientmgr.c struct snd_seq_event event; event 1450 sound/core/seq/seq_clientmgr.c memset(&event, 0, sizeof(event)); event 1451 sound/core/seq/seq_clientmgr.c event.type = evtype; event 1452 sound/core/seq/seq_clientmgr.c event.data.connect.dest = info->dest; event 1453 sound/core/seq/seq_clientmgr.c event.data.connect.sender = info->sender; event 1455 sound/core/seq/seq_clientmgr.c return snd_seq_system_notify(client, port, &event); /* non-atomic */ event 102 sound/core/seq/seq_fifo.c struct snd_seq_event *event) event 112 sound/core/seq/seq_fifo.c err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL, NULL); /* always non-blocking */ event 35 sound/core/seq/seq_fifo.h int snd_seq_fifo_event_in(struct snd_seq_fifo *f, struct snd_seq_event *event); event 58 sound/core/seq/seq_memory.c static int get_var_len(const struct snd_seq_event *event) event 60 sound/core/seq/seq_memory.c if ((event->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE) event 63 sound/core/seq/seq_memory.c return event->data.ext.len & ~SNDRV_SEQ_EXT_MASK; event 66 sound/core/seq/seq_memory.c int snd_seq_dump_var_event(const struct snd_seq_event *event, event 72 sound/core/seq/seq_memory.c if ((len = get_var_len(event)) <= 0) event 75 sound/core/seq/seq_memory.c if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) { event 77 sound/core/seq/seq_memory.c char __user *curptr = (char __force __user *)event->data.ext.ptr; event 92 sound/core/seq/seq_memory.c if (!(event->data.ext.len & SNDRV_SEQ_EXT_CHAINED)) event 93 sound/core/seq/seq_memory.c return func(private_data, event->data.ext.ptr, len); event 95 sound/core/seq/seq_memory.c cell = (struct snd_seq_event_cell *)event->data.ext.ptr; event 100 sound/core/seq/seq_memory.c err = func(private_data, &cell->event, size); event 130 sound/core/seq/seq_memory.c int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char *buf, event 136 sound/core/seq/seq_memory.c if ((len = get_var_len(event)) < 0) event 144 sound/core/seq/seq_memory.c if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) { event 147 sound/core/seq/seq_memory.c if (copy_from_user(buf, (void __force __user *)event->data.ext.ptr, len)) event 151 sound/core/seq/seq_memory.c err = snd_seq_dump_var_event(event, event 184 sound/core/seq/seq_memory.c if (snd_seq_ev_is_variable(&cell->event)) { event 185 sound/core/seq/seq_memory.c if (cell->event.data.ext.len & SNDRV_SEQ_EXT_CHAINED) { event 187 sound/core/seq/seq_memory.c curp = cell->event.data.ext.ptr; event 279 sound/core/seq/seq_memory.c int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event, event 291 sound/core/seq/seq_memory.c if (snd_seq_ev_is_variable(event)) { event 292 sound/core/seq/seq_memory.c extlen = event->data.ext.len & ~SNDRV_SEQ_EXT_MASK; event 303 sound/core/seq/seq_memory.c cell->event = *event; event 306 sound/core/seq/seq_memory.c if (snd_seq_ev_is_variable(event)) { event 308 sound/core/seq/seq_memory.c int is_chained = event->data.ext.len & SNDRV_SEQ_EXT_CHAINED; event 309 sound/core/seq/seq_memory.c int is_usrptr = event->data.ext.len & SNDRV_SEQ_EXT_USRPTR; event 313 sound/core/seq/seq_memory.c cell->event.data.ext.len = extlen | SNDRV_SEQ_EXT_CHAINED; event 314 sound/core/seq/seq_memory.c cell->event.data.ext.ptr = NULL; event 316 sound/core/seq/seq_memory.c src = (struct snd_seq_event_cell *)event->data.ext.ptr; event 317 sound/core/seq/seq_memory.c buf = (char *)event->data.ext.ptr; event 328 sound/core/seq/seq_memory.c if (cell->event.data.ext.ptr == NULL) event 329 sound/core/seq/seq_memory.c cell->event.data.ext.ptr = tmp; event 335 sound/core/seq/seq_memory.c tmp->event = src->event; event 338 sound/core/seq/seq_memory.c if (copy_from_user(&tmp->event, (char __force __user *)buf, size)) { event 343 sound/core/seq/seq_memory.c memcpy(&tmp->event, buf, size); event 16 sound/core/seq/seq_memory.h struct snd_seq_event event; event 53 sound/core/seq/seq_memory.h int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event, event 186 sound/core/seq/seq_midi.c runtime->event = snd_midi_input_event; event 49 sound/core/seq/seq_midi_event.c int event; event 89 sound/core/seq/seq_midi_event.c int event; event 184 sound/core/seq/seq_midi_event.c ev->type = status_event[ST_SPECIAL + c - 0xf0].event; event 215 sound/core/seq/seq_midi_event.c ev->type = status_event[dev->type].event; event 299 sound/core/seq/seq_midi_event.c if (ev->type == status_event[type].event) event 303 sound/core/seq/seq_midi_event.c if (ev->type == extra_event[type].event) event 143 sound/core/seq/seq_prioq.c prior = (cell->event.flags & SNDRV_SEQ_PRIORITY_MASK); event 151 sound/core/seq/seq_prioq.c if (compare_timestamp(&cell->event, &f->tail->event)) { event 170 sound/core/seq/seq_prioq.c int rel = compare_timestamp_rel(&cell->event, &cur->event); event 225 sound/core/seq/seq_prioq.c if (cell && current_time && !event_is_ready(&cell->event, current_time)) event 255 sound/core/seq/seq_prioq.c if (cell->event.source.client == client || event 256 sound/core/seq/seq_prioq.c cell->event.dest.client == client) event 260 sound/core/seq/seq_prioq.c switch (cell->event.flags & SNDRV_SEQ_TIME_STAMP_MASK) { event 262 sound/core/seq/seq_prioq.c if (cell->event.time.tick) event 266 sound/core/seq/seq_prioq.c if (cell->event.time.time.tv_sec || event 267 sound/core/seq/seq_prioq.c cell->event.time.time.tv_nsec) event 309 sound/core/seq/seq_prioq.c cell->event.type, event 310 sound/core/seq/seq_prioq.c cell->event.source.client, event 311 sound/core/seq/seq_prioq.c cell->event.dest.client, event 398 sound/core/seq/seq_prioq.c if (cell->event.source.client == client && event 399 sound/core/seq/seq_prioq.c prioq_remove_match(info, &cell->event)) { event 296 sound/core/seq/seq_queue.c dest = cell->event.queue; /* destination queue */ event 301 sound/core/seq/seq_queue.c if ((cell->event.flags & SNDRV_SEQ_TIME_MODE_MASK) == SNDRV_SEQ_TIME_MODE_REL) { event 302 sound/core/seq/seq_queue.c switch (cell->event.flags & SNDRV_SEQ_TIME_STAMP_MASK) { event 304 sound/core/seq/seq_queue.c cell->event.time.tick += q->timer->tick.cur_tick; event 308 sound/core/seq/seq_queue.c snd_seq_inc_real_time(&cell->event.time.time, event 312 sound/core/seq/seq_queue.c cell->event.flags &= ~SNDRV_SEQ_TIME_MODE_MASK; event 313 sound/core/seq/seq_queue.c cell->event.flags |= SNDRV_SEQ_TIME_MODE_ABS; event 316 sound/core/seq/seq_queue.c switch (cell->event.flags & SNDRV_SEQ_TIME_STAMP_MASK) { event 147 sound/core/seq/seq_virmidi.c &vmidi->event)) event 149 sound/core/seq/seq_virmidi.c if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) { event 151 sound/core/seq/seq_virmidi.c &vmidi->event, event 153 sound/core/seq/seq_virmidi.c vmidi->event.type = SNDRV_SEQ_EVENT_NONE; event 224 sound/core/seq/seq_virmidi.c snd_virmidi_init_event(vmidi, &vmidi->event); event 468 sound/core/timer.c static void snd_timer_notify1(struct snd_timer_instance *ti, int event) event 479 sound/core/timer.c if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_START || event 480 sound/core/timer.c event > SNDRV_TIMER_EVENT_PAUSE)) event 483 sound/core/timer.c (event == SNDRV_TIMER_EVENT_START || event 484 sound/core/timer.c event == SNDRV_TIMER_EVENT_CONTINUE)) event 487 sound/core/timer.c ti->ccallback(ti, event, &tstamp, resolution); event 496 sound/core/timer.c ts->ccallback(ts, event + 100, &tstamp, resolution); event 1045 sound/core/timer.c void snd_timer_notify(struct snd_timer *timer, int event, struct timespec *tstamp) event 1055 sound/core/timer.c if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_MSTART || event 1056 sound/core/timer.c event > SNDRV_TIMER_EVENT_MRESUME)) event 1059 sound/core/timer.c if (event == SNDRV_TIMER_EVENT_MSTART || event 1060 sound/core/timer.c event == SNDRV_TIMER_EVENT_MCONTINUE || event 1061 sound/core/timer.c event == SNDRV_TIMER_EVENT_MRESUME) event 1065 sound/core/timer.c ti->ccallback(ti, event, tstamp, resolution); event 1068 sound/core/timer.c ts->ccallback(ts, event, tstamp, resolution); event 1337 sound/core/timer.c int event, event 1345 sound/core/timer.c if (event >= SNDRV_TIMER_EVENT_START && event 1346 sound/core/timer.c event <= SNDRV_TIMER_EVENT_PAUSE) event 1348 sound/core/timer.c if ((tu->filter & (1 << event)) == 0 || !tu->tread) event 1351 sound/core/timer.c r1.event = event; event 1394 sound/core/timer.c r1.event = SNDRV_TIMER_EVENT_RESOLUTION; event 1408 sound/core/timer.c if (r->event == SNDRV_TIMER_EVENT_TICK) { event 1415 sound/core/timer.c r1.event = SNDRV_TIMER_EVENT_TICK; event 1861 sound/core/timer.c tread.event = SNDRV_TIMER_EVENT_EARLY; event 94 sound/drivers/pcsp/pcsp_input.c input_dev->event = pcspkr_input_event; event 24 sound/firewire/bebob/bebob_hwdep.c union snd_firewire_event event; event 38 sound/firewire/bebob/bebob_hwdep.c memset(&event, 0, sizeof(event)); event 40 sound/firewire/bebob/bebob_hwdep.c event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS; event 41 sound/firewire/bebob/bebob_hwdep.c event.lock_status.status = (bebob->dev_lock_count > 0); event 44 sound/firewire/bebob/bebob_hwdep.c count = min_t(long, count, sizeof(event.lock_status)); event 49 sound/firewire/bebob/bebob_hwdep.c if (copy_to_user(buf, &event, count)) event 16 sound/firewire/dice/dice-hwdep.c union snd_firewire_event event; event 30 sound/firewire/dice/dice-hwdep.c memset(&event, 0, sizeof(event)); event 32 sound/firewire/dice/dice-hwdep.c event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS; event 33 sound/firewire/dice/dice-hwdep.c event.lock_status.status = dice->dev_lock_count > 0; event 36 sound/firewire/dice/dice-hwdep.c count = min_t(long, count, sizeof(event.lock_status)); event 38 sound/firewire/dice/dice-hwdep.c event.dice_notification.type = event 40 sound/firewire/dice/dice-hwdep.c event.dice_notification.notification = dice->notification_bits; event 43 sound/firewire/dice/dice-hwdep.c count = min_t(long, count, sizeof(event.dice_notification)); event 48 sound/firewire/dice/dice-hwdep.c if (copy_to_user(buf, &event, count)) event 24 sound/firewire/digi00x/digi00x-hwdep.c union snd_firewire_event event; event 38 sound/firewire/digi00x/digi00x-hwdep.c memset(&event, 0, sizeof(event)); event 40 sound/firewire/digi00x/digi00x-hwdep.c event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS; event 41 sound/firewire/digi00x/digi00x-hwdep.c event.lock_status.status = (dg00x->dev_lock_count > 0); event 44 sound/firewire/digi00x/digi00x-hwdep.c count = min_t(long, count, sizeof(event.lock_status)); event 46 sound/firewire/digi00x/digi00x-hwdep.c event.digi00x_message.type = event 48 sound/firewire/digi00x/digi00x-hwdep.c event.digi00x_message.message = dg00x->msg; event 51 sound/firewire/digi00x/digi00x-hwdep.c count = min_t(long, count, sizeof(event.digi00x_message)); event 56 sound/firewire/digi00x/digi00x-hwdep.c if (copy_to_user(buf, &event, count)) event 23 sound/firewire/fireface/ff-hwdep.c union snd_firewire_event event; event 37 sound/firewire/fireface/ff-hwdep.c memset(&event, 0, sizeof(event)); event 39 sound/firewire/fireface/ff-hwdep.c event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS; event 40 sound/firewire/fireface/ff-hwdep.c event.lock_status.status = (ff->dev_lock_count > 0); event 43 sound/firewire/fireface/ff-hwdep.c count = min_t(long, count, sizeof(event.lock_status)); event 48 sound/firewire/fireface/ff-hwdep.c if (copy_to_user(buf, &event, count)) event 101 sound/firewire/fireworks/fireworks_hwdep.c union snd_firewire_event event = { event 107 sound/firewire/fireworks/fireworks_hwdep.c event.lock_status.status = (efw->dev_lock_count > 0); event 112 sound/firewire/fireworks/fireworks_hwdep.c count = min_t(long, count, sizeof(event.lock_status)); event 114 sound/firewire/fireworks/fireworks_hwdep.c if (copy_to_user(buf, &event, count)) event 24 sound/firewire/motu/motu-hwdep.c union snd_firewire_event event; event 38 sound/firewire/motu/motu-hwdep.c memset(&event, 0, sizeof(event)); event 40 sound/firewire/motu/motu-hwdep.c event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS; event 41 sound/firewire/motu/motu-hwdep.c event.lock_status.status = (motu->dev_lock_count > 0); event 44 sound/firewire/motu/motu-hwdep.c count = min_t(long, count, sizeof(event.lock_status)); event 46 sound/firewire/motu/motu-hwdep.c event.motu_notification.type = SNDRV_FIREWIRE_EVENT_MOTU_NOTIFICATION; event 47 sound/firewire/motu/motu-hwdep.c event.motu_notification.message = motu->msg; event 50 sound/firewire/motu/motu-hwdep.c count = min_t(long, count, sizeof(event.motu_notification)); event 55 sound/firewire/motu/motu-hwdep.c if (copy_to_user(buf, &event, count)) event 23 sound/firewire/oxfw/oxfw-hwdep.c union snd_firewire_event event; event 37 sound/firewire/oxfw/oxfw-hwdep.c memset(&event, 0, sizeof(event)); event 39 sound/firewire/oxfw/oxfw-hwdep.c event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS; event 40 sound/firewire/oxfw/oxfw-hwdep.c event.lock_status.status = (oxfw->dev_lock_count > 0); event 43 sound/firewire/oxfw/oxfw-hwdep.c count = min_t(long, count, sizeof(event.lock_status)); event 48 sound/firewire/oxfw/oxfw-hwdep.c if (copy_to_user(buf, &event, count)) event 21 sound/firewire/tascam/tascam-hwdep.c struct snd_firewire_event_lock_status event = { event 25 sound/firewire/tascam/tascam-hwdep.c event.status = (tscm->dev_lock_count > 0); event 27 sound/firewire/tascam/tascam-hwdep.c count = min_t(long, count, sizeof(event)); event 31 sound/firewire/tascam/tascam-hwdep.c if (copy_to_user(buf, &event, count)) event 930 sound/pci/asihpi/hpi_internal.h struct hpi_async_event event; event 1135 sound/pci/cmipci.c int event; event 1140 sound/pci/cmipci.c event = SNDRV_CTL_EVENT_MASK_INFO; event 1143 sound/pci/cmipci.c event |= SNDRV_CTL_EVENT_MASK_VALUE; event 1146 sound/pci/cmipci.c snd_ctl_notify(cm->card, event, &ctl->id); event 1171 sound/pci/cmipci.c int event; event 1176 sound/pci/cmipci.c event = SNDRV_CTL_EVENT_MASK_INFO; event 1180 sound/pci/cmipci.c event |= SNDRV_CTL_EVENT_MASK_VALUE; event 1182 sound/pci/cmipci.c snd_ctl_notify(cm->card, event, &ctl->id); event 1962 sound/pci/es1968.c u32 event; event 1964 sound/pci/es1968.c if (!(event = inb(chip->io_port + 0x1A))) event 1969 sound/pci/es1968.c if (event & ESM_HWVOL_IRQ) event 1975 sound/pci/es1968.c if ((event & ESM_MPU401_IRQ) && chip->rmidi) { event 1979 sound/pci/es1968.c if (event & ESM_SOUND_IRQ) { event 243 sound/pci/hda/hda_beep.c input_dev->event = snd_hda_beep_event; event 561 sound/pci/hda/hda_jack.c struct hda_jack_tbl *event; event 564 sound/pci/hda/hda_jack.c event = snd_hda_jack_tbl_get_from_tag(codec, tag); event 565 sound/pci/hda/hda_jack.c if (!event) event 567 sound/pci/hda/hda_jack.c event->jack_dirty = 1; event 569 sound/pci/hda/hda_jack.c call_jack_callback(codec, res, event); event 845 sound/pci/hda/patch_realtek.c ((codec)->core.dev.power.power_state.event == PM_EVENT_RESUME) event 847 sound/pci/hda/patch_realtek.c ((codec)->core.dev.power.power_state.event == PM_EVENT_RESTORE) event 3643 sound/pci/hda/patch_realtek.c codec->core.dev.power.power_state.event == PM_EVENT_RESTORE) { event 4305 sound/pci/hda/patch_realtek.c struct hda_jack_callback *event) event 490 sound/pci/hda/patch_sigmatel.c struct hda_jack_callback *event) event 498 sound/pci/hda/patch_sigmatel.c !!(data & (1 << event->private_data))); event 232 sound/ppc/beep.c input_dev->event = snd_pmac_beep_event; event 380 sound/soc/codecs/88pm860x-codec.c struct snd_kcontrol *kcontrol, int event) event 397 sound/soc/codecs/88pm860x-codec.c struct snd_kcontrol *kcontrol, int event) event 407 sound/soc/codecs/88pm860x-codec.c switch (event) { event 549 sound/soc/codecs/adau1373.c struct snd_kcontrol *kcontrol, int event) event 556 sound/soc/codecs/adau1373.c if (SND_SOC_DAPM_EVENT_ON(event)) event 564 sound/soc/codecs/adau1373.c if (SND_SOC_DAPM_EVENT_ON(event)) event 255 sound/soc/codecs/adau1761.c struct snd_kcontrol *kcontrol, int event) event 174 sound/soc/codecs/adau1781.c struct snd_kcontrol *kcontrol, int event) event 73 sound/soc/codecs/adau17x1.c struct snd_kcontrol *kcontrol, int event) event 78 sound/soc/codecs/adau17x1.c if (SND_SOC_DAPM_EVENT_ON(event)) { event 92 sound/soc/codecs/adau17x1.c if (SND_SOC_DAPM_EVENT_ON(event)) { event 103 sound/soc/codecs/adau17x1.c struct snd_kcontrol *kcontrol, int event) event 23 sound/soc/codecs/adau7002.c struct snd_kcontrol *kcontrol, int event) event 30 sound/soc/codecs/adau7002.c switch (event) { event 175 sound/soc/codecs/ak4642.c struct snd_kcontrol *kcontrol, int event) event 179 sound/soc/codecs/ak4642.c switch (event) { event 159 sound/soc/codecs/ak4671.c struct snd_kcontrol *kcontrol, int event) event 163 sound/soc/codecs/ak4671.c switch (event) { event 51 sound/soc/codecs/alc5623.c struct snd_kcontrol *kcontrol, int event) event 61 sound/soc/codecs/alc5623.c switch (event) { event 114 sound/soc/codecs/alc5632.c struct snd_kcontrol *kcontrol, int event) event 124 sound/soc/codecs/alc5632.c switch (event) { event 82 sound/soc/codecs/arizona.c int event) event 88 sound/soc/codecs/arizona.c switch (event) { event 111 sound/soc/codecs/arizona.c return arizona_out_ev(w, kcontrol, event); event 907 sound/soc/codecs/arizona.c int event) event 918 sound/soc/codecs/arizona.c switch (event) { event 954 sound/soc/codecs/arizona.c int event) event 960 sound/soc/codecs/arizona.c switch (event) { event 1072 sound/soc/codecs/arizona.c int event) event 1080 sound/soc/codecs/arizona.c switch (event) { event 1089 sound/soc/codecs/arizona.c return arizona_out_ev(w, kcontrol, event); event 1105 sound/soc/codecs/arizona.c return arizona_out_ev(w, kcontrol, event); event 1197 sound/soc/codecs/arizona.c struct snd_kcontrol *kcontrol, int event) event 1205 sound/soc/codecs/arizona.c switch (event) { event 1239 sound/soc/codecs/arizona.c int event) event 1244 sound/soc/codecs/arizona.c switch (event) { event 1325 sound/soc/codecs/arizona.c struct snd_kcontrol *kcontrol, int event) event 1352 sound/soc/codecs/arizona.c switch (event) { event 258 sound/soc/codecs/arizona.h int event); event 260 sound/soc/codecs/arizona.h int event); event 262 sound/soc/codecs/arizona.h int event); event 264 sound/soc/codecs/arizona.h int event); event 272 sound/soc/codecs/arizona.h int event); event 300 sound/soc/codecs/arizona.h struct snd_kcontrol *kcontrol, int event); event 257 sound/soc/codecs/cpcap.c struct snd_kcontrol *kcontrol, int event) event 267 sound/soc/codecs/cpcap.c switch (event) { event 311 sound/soc/codecs/cros_ec_codec.c struct snd_kcontrol *kcontrol, int event) event 316 sound/soc/codecs/cros_ec_codec.c switch (event) { event 198 sound/soc/codecs/cs35l33.c struct snd_kcontrol *kcontrol, int event) event 203 sound/soc/codecs/cs35l33.c switch (event) { event 218 sound/soc/codecs/cs35l33.c dev_err(component->dev, "Invalid event = 0x%x\n", event); event 226 sound/soc/codecs/cs35l33.c struct snd_kcontrol *kcontrol, int event) event 232 sound/soc/codecs/cs35l33.c switch (event) { event 259 sound/soc/codecs/cs35l33.c dev_err(component->dev, "Invalid event = 0x%x\n", event); event 267 sound/soc/codecs/cs35l33.c struct snd_kcontrol *kcontrol, int event) event 275 sound/soc/codecs/cs35l33.c switch (event) { event 296 sound/soc/codecs/cs35l33.c dev_err(component->dev, "Invalid event = 0x%x\n", event); event 234 sound/soc/codecs/cs35l34.c struct snd_kcontrol *kcontrol, int event) event 240 sound/soc/codecs/cs35l34.c switch (event) { event 263 sound/soc/codecs/cs35l34.c pr_err("Invalid event = 0x%x\n", event); event 339 sound/soc/codecs/cs35l34.c struct snd_kcontrol *kcontrol, int event) event 344 sound/soc/codecs/cs35l34.c switch (event) { event 360 sound/soc/codecs/cs35l34.c pr_err("Invalid event = 0x%x\n", event); event 379 sound/soc/codecs/cs35l34.c struct snd_kcontrol *kcontrol, int event) event 386 sound/soc/codecs/cs35l34.c switch (event) { event 417 sound/soc/codecs/cs35l34.c pr_err("Invalid event = 0x%x\n", event); event 191 sound/soc/codecs/cs35l35.c struct snd_kcontrol *kcontrol, int event) event 197 sound/soc/codecs/cs35l35.c switch (event) { event 230 sound/soc/codecs/cs35l35.c dev_err(component->dev, "Invalid event = 0x%x\n", event); event 237 sound/soc/codecs/cs35l35.c struct snd_kcontrol *kcontrol, int event) event 244 sound/soc/codecs/cs35l35.c switch (event) { event 300 sound/soc/codecs/cs35l35.c dev_err(component->dev, "Invalid event = 0x%x\n", event); event 504 sound/soc/codecs/cs35l36.c struct snd_kcontrol *kcontrol, int event) event 512 sound/soc/codecs/cs35l36.c switch (event) { event 548 sound/soc/codecs/cs35l36.c dev_dbg(component->dev, "Invalid event = 0x%x\n", event); event 556 sound/soc/codecs/cs35l36.c struct snd_kcontrol *kcontrol, int event) event 563 sound/soc/codecs/cs35l36.c switch (event) { event 579 sound/soc/codecs/cs35l36.c dev_dbg(component->dev, "Invalid event = 0x%x\n", event); event 465 sound/soc/codecs/cs42l42.c struct snd_kcontrol *kcontrol, int event) event 469 sound/soc/codecs/cs42l42.c if (event & SND_SOC_DAPM_POST_PMU) { event 481 sound/soc/codecs/cs42l42.c } else if (event & SND_SOC_DAPM_PRE_PMD) { event 493 sound/soc/codecs/cs42l42.c dev_err(component->dev, "Invalid event 0x%x\n", event); event 163 sound/soc/codecs/cs42l51.c struct snd_kcontrol *kcontrol, int event) event 167 sound/soc/codecs/cs42l51.c switch (event) { event 997 sound/soc/codecs/cs42l52.c cs42l52->beep->event = cs42l52_beep_event; event 1061 sound/soc/codecs/cs42l56.c cs42l56->beep->event = cs42l56_beep_event; event 481 sound/soc/codecs/cs42l73.c struct snd_kcontrol *kcontrol, int event) event 485 sound/soc/codecs/cs42l73.c switch (event) { event 491 sound/soc/codecs/cs42l73.c pr_err("Invalid event = 0x%x\n", event); event 497 sound/soc/codecs/cs42l73.c struct snd_kcontrol *kcontrol, int event) event 501 sound/soc/codecs/cs42l73.c switch (event) { event 508 sound/soc/codecs/cs42l73.c pr_err("Invalid event = 0x%x\n", event); event 515 sound/soc/codecs/cs42l73.c struct snd_kcontrol *kcontrol, int event) event 519 sound/soc/codecs/cs42l73.c switch (event) { event 526 sound/soc/codecs/cs42l73.c pr_err("Invalid event = 0x%x\n", event); event 1142 sound/soc/codecs/cs43130.c struct snd_kcontrol *kcontrol, int event) event 1147 sound/soc/codecs/cs43130.c switch (event) { event 1192 sound/soc/codecs/cs43130.c dev_err(component->dev, "Invalid event = 0x%x\n", event); event 1199 sound/soc/codecs/cs43130.c struct snd_kcontrol *kcontrol, int event) event 1204 sound/soc/codecs/cs43130.c switch (event) { event 1249 sound/soc/codecs/cs43130.c dev_err(component->dev, "Invalid event = 0x%x\n", event); event 1268 sound/soc/codecs/cs43130.c struct snd_kcontrol *kcontrol, int event) event 1273 sound/soc/codecs/cs43130.c switch (event) { event 1325 sound/soc/codecs/cs43130.c dev_err(component->dev, "Invalid DAC event = 0x%x\n", event); event 1348 sound/soc/codecs/cs43130.c struct snd_kcontrol *kcontrol, int event) event 1353 sound/soc/codecs/cs43130.c switch (event) { event 1363 sound/soc/codecs/cs43130.c dev_err(component->dev, "Invalid HPIN event = 0x%x\n", event); event 69 sound/soc/codecs/cs47l15.c int event) event 86 sound/soc/codecs/cs47l15.c switch (event) { event 96 sound/soc/codecs/cs47l15.c return wm_adsp_early_event(w, kcontrol, event); event 60 sound/soc/codecs/cs47l24.c struct snd_kcontrol *kcontrol, int event) event 77 sound/soc/codecs/cs47l24.c return wm_adsp_early_event(w, kcontrol, event); event 87 sound/soc/codecs/cs47l35.c int event) event 107 sound/soc/codecs/cs47l35.c switch (event) { event 117 sound/soc/codecs/cs47l35.c return wm_adsp_early_event(w, kcontrol, event); event 184 sound/soc/codecs/cs47l35.c struct snd_kcontrol *kcontrol, int event) event 188 sound/soc/codecs/cs47l35.c switch (event) { event 191 sound/soc/codecs/cs47l35.c return madera_hp_ev(w, kcontrol, event); event 193 sound/soc/codecs/cs47l35.c ret = madera_hp_ev(w, kcontrol, event); event 200 sound/soc/codecs/cs47l35.c ret = madera_hp_ev(w, kcontrol, event); event 111 sound/soc/codecs/cs47l85.c int event) event 131 sound/soc/codecs/cs47l85.c switch (event) { event 141 sound/soc/codecs/cs47l85.c return wm_adsp_early_event(w, kcontrol, event); event 246 sound/soc/codecs/cs47l85.c struct snd_kcontrol *kcontrol, int event) event 250 sound/soc/codecs/cs47l85.c switch (event) { event 253 sound/soc/codecs/cs47l85.c return madera_hp_ev(w, kcontrol, event); event 255 sound/soc/codecs/cs47l85.c ret = madera_hp_ev(w, kcontrol, event); event 262 sound/soc/codecs/cs47l85.c ret = madera_hp_ev(w, kcontrol, event); event 111 sound/soc/codecs/cs47l90.c int event) event 128 sound/soc/codecs/cs47l90.c switch (event) { event 138 sound/soc/codecs/cs47l90.c return wm_adsp_early_event(w, kcontrol, event); event 136 sound/soc/codecs/cs47l92.c int event) event 153 sound/soc/codecs/cs47l92.c switch (event) { event 163 sound/soc/codecs/cs47l92.c return wm_adsp_early_event(w, kcontrol, event); event 869 sound/soc/codecs/cx2072x.c struct snd_kcontrol *kcontrol, int event) event 874 sound/soc/codecs/cx2072x.c switch (event) { event 1163 sound/soc/codecs/cx2072x.c .subseq = wsubseq, .event = wevent, .event_flags = wflags} event 1170 sound/soc/codecs/cx2072x.c .event = wevent, .event_flags = wflags} event 1177 sound/soc/codecs/cx2072x.c .event = wevent, .event_flags = wflags} event 1184 sound/soc/codecs/cx2072x.c .event = wevent, .event_flags = wflags} event 732 sound/soc/codecs/da7213.c struct snd_kcontrol *kcontrol, int event) event 740 sound/soc/codecs/da7213.c switch (event) { event 1346 sound/soc/codecs/da7218.c struct snd_kcontrol *kcontrol, int event) event 1369 sound/soc/codecs/da7218.c switch (event) { event 1395 sound/soc/codecs/da7218.c struct snd_kcontrol *kcontrol, int event) event 1403 sound/soc/codecs/da7218.c switch (event) { event 1478 sound/soc/codecs/da7218.c struct snd_kcontrol *kcontrol, int event) event 1490 sound/soc/codecs/da7218.c switch (event) { event 1505 sound/soc/codecs/da7218.c struct snd_kcontrol *kcontrol, int event) event 1509 sound/soc/codecs/da7218.c switch (event) { event 768 sound/soc/codecs/da7219.c struct snd_kcontrol *kcontrol, int event) event 773 sound/soc/codecs/da7219.c switch (event) { event 792 sound/soc/codecs/da7219.c struct snd_kcontrol *kcontrol, int event) event 801 sound/soc/codecs/da7219.c switch (event) { event 868 sound/soc/codecs/da7219.c struct snd_kcontrol *kcontrol, int event) event 870 sound/soc/codecs/da7219.c switch (event) { event 883 sound/soc/codecs/da7219.c struct snd_kcontrol *kcontrol, int event) event 901 sound/soc/codecs/da7219.c switch (event) { event 921 sound/soc/codecs/da7219.c struct snd_kcontrol *kcontrol, int event) event 926 sound/soc/codecs/da7219.c switch (event) { event 607 sound/soc/codecs/da732x.c struct snd_kcontrol *kcontrol, int event) event 611 sound/soc/codecs/da732x.c switch (event) { event 661 sound/soc/codecs/da732x.c struct snd_kcontrol *kcontrol, int event) event 665 sound/soc/codecs/da732x.c switch (event) { event 55 sound/soc/codecs/dmic.c struct snd_kcontrol *kcontrol, int event) { event 59 sound/soc/codecs/dmic.c switch (event) { event 687 sound/soc/codecs/hdac_hdmi.c int (*event)(struct snd_soc_dapm_widget *, event 701 sound/soc/codecs/hdac_hdmi.c w->event = event; event 769 sound/soc/codecs/hdac_hdmi.c struct snd_kcontrol *kc, int event) event 776 sound/soc/codecs/hdac_hdmi.c __func__, w->name, event); event 786 sound/soc/codecs/hdac_hdmi.c switch (event) { event 814 sound/soc/codecs/hdac_hdmi.c struct snd_kcontrol *kc, int event) event 822 sound/soc/codecs/hdac_hdmi.c __func__, w->name, event); event 828 sound/soc/codecs/hdac_hdmi.c switch (event) { event 869 sound/soc/codecs/hdac_hdmi.c struct snd_kcontrol *kc, int event) event 876 sound/soc/codecs/hdac_hdmi.c __func__, w->name, event); event 192 sound/soc/codecs/jz4725b.c int event) event 199 sound/soc/codecs/jz4725b.c switch (event) { event 191 sound/soc/codecs/madera.c struct snd_kcontrol *kcontrol, int event) event 222 sound/soc/codecs/madera.c struct snd_kcontrol *kcontrol, int event) event 230 sound/soc/codecs/madera.c switch (event) { event 451 sound/soc/codecs/madera.c int event) event 468 sound/soc/codecs/madera.c switch (event) { event 2221 sound/soc/codecs/madera.c int event) event 2233 sound/soc/codecs/madera.c switch (event) { event 2269 sound/soc/codecs/madera.c struct snd_kcontrol *kcontrol, int event) event 2289 sound/soc/codecs/madera.c switch (event) { event 2369 sound/soc/codecs/madera.c struct snd_kcontrol *kcontrol, int event) event 2379 sound/soc/codecs/madera.c switch (event) { event 2388 sound/soc/codecs/madera.c return madera_out_ev(w, kcontrol, event); event 2416 sound/soc/codecs/madera.c return madera_out_ev(w, kcontrol, event); event 2421 sound/soc/codecs/madera.c int event) event 2426 sound/soc/codecs/madera.c switch (event) { event 385 sound/soc/codecs/madera.h struct snd_kcontrol *kcontrol, int event); event 387 sound/soc/codecs/madera.h struct snd_kcontrol *kcontrol, int event); event 389 sound/soc/codecs/madera.h struct snd_kcontrol *kcontrol, int event); event 391 sound/soc/codecs/madera.h struct snd_kcontrol *kcontrol, int event); event 393 sound/soc/codecs/madera.h struct snd_kcontrol *kcontrol, int event); event 395 sound/soc/codecs/madera.h struct snd_kcontrol *kcontrol, int event); event 398 sound/soc/codecs/madera.h int event); event 26 sound/soc/codecs/max9759.c struct snd_kcontrol *control, int event) event 31 sound/soc/codecs/max9759.c if (SND_SOC_DAPM_EVENT_ON(event)) event 617 sound/soc/codecs/max98088.c struct snd_kcontrol *kcontrol, int event) event 622 sound/soc/codecs/max98088.c switch (event) { event 647 sound/soc/codecs/max98088.c int event, int line, u8 channel) event 667 sound/soc/codecs/max98088.c switch (event) { event 688 sound/soc/codecs/max98088.c struct snd_kcontrol *k, int event) event 690 sound/soc/codecs/max98088.c return max98088_line_pga(w, event, LINE_INA, 1); event 694 sound/soc/codecs/max98088.c struct snd_kcontrol *k, int event) event 696 sound/soc/codecs/max98088.c return max98088_line_pga(w, event, LINE_INA, 2); event 700 sound/soc/codecs/max98088.c struct snd_kcontrol *k, int event) event 702 sound/soc/codecs/max98088.c return max98088_line_pga(w, event, LINE_INB, 1); event 706 sound/soc/codecs/max98088.c struct snd_kcontrol *k, int event) event 708 sound/soc/codecs/max98088.c return max98088_line_pga(w, event, LINE_INB, 2); event 728 sound/soc/codecs/max98090.c struct snd_kcontrol *kcontrol, int event) event 748 sound/soc/codecs/max98090.c switch (event) { event 775 sound/soc/codecs/max98090.c struct snd_kcontrol *kcontrol, int event) event 780 sound/soc/codecs/max98090.c if (event & SND_SOC_DAPM_POST_PMU) event 2479 sound/soc/codecs/max98090.c enum snd_soc_dapm_type event, int subseq) event 596 sound/soc/codecs/max98095.c struct snd_kcontrol *kcontrol, int event) event 601 sound/soc/codecs/max98095.c switch (event) { event 626 sound/soc/codecs/max98095.c int event, u8 channel) event 637 sound/soc/codecs/max98095.c switch (event) { event 658 sound/soc/codecs/max98095.c struct snd_kcontrol *k, int event) event 660 sound/soc/codecs/max98095.c return max98095_line_pga(w, event, 1); event 664 sound/soc/codecs/max98095.c struct snd_kcontrol *k, int event) event 666 sound/soc/codecs/max98095.c return max98095_line_pga(w, event, 2); event 674 sound/soc/codecs/max98095.c struct snd_kcontrol *kcontrol, int event) event 678 sound/soc/codecs/max98095.c switch (event) { event 403 sound/soc/codecs/max98373.c struct snd_kcontrol *kcontrol, int event) event 408 sound/soc/codecs/max98373.c switch (event) { event 105 sound/soc/codecs/max98504.c struct snd_kcontrol *kcontrol, int event) event 110 sound/soc/codecs/max98504.c switch (event) { event 38 sound/soc/codecs/max9860.c unsigned long event, void *data) event 42 sound/soc/codecs/max9860.c if (event & REGULATOR_EVENT_DISABLE) { event 98 sound/soc/codecs/max98925.c struct snd_kcontrol *kcontrol, int event) event 103 sound/soc/codecs/max98925.c switch (event) { event 497 sound/soc/codecs/max98927.c struct snd_kcontrol *kcontrol, int event) event 502 sound/soc/codecs/max98927.c switch (event) { event 373 sound/soc/codecs/msm8916-wcd-analog.c *component, int event, event 376 sound/soc/codecs/msm8916-wcd-analog.c switch (event) { event 388 sound/soc/codecs/msm8916-wcd-analog.c *component, int event, event 392 sound/soc/codecs/msm8916-wcd-analog.c switch (event) { event 413 sound/soc/codecs/msm8916-wcd-analog.c *kcontrol, int event) event 418 sound/soc/codecs/msm8916-wcd-analog.c return pm8916_wcd_analog_enable_micbias_ext(component, event, w->reg, event 425 sound/soc/codecs/msm8916-wcd-analog.c *kcontrol, int event) event 430 sound/soc/codecs/msm8916-wcd-analog.c return pm8916_wcd_analog_enable_micbias_ext(component, event, w->reg, event 438 sound/soc/codecs/msm8916-wcd-analog.c *kcontrol, int event) event 443 sound/soc/codecs/msm8916-wcd-analog.c switch (event) { event 451 sound/soc/codecs/msm8916-wcd-analog.c return pm8916_wcd_analog_enable_micbias_int(component, event, w->reg, event 555 sound/soc/codecs/msm8916-wcd-analog.c *kcontrol, int event) event 560 sound/soc/codecs/msm8916-wcd-analog.c switch (event) { event 574 sound/soc/codecs/msm8916-wcd-analog.c return pm8916_wcd_analog_enable_micbias_int(component, event, w->reg, event 580 sound/soc/codecs/msm8916-wcd-analog.c int event) event 591 sound/soc/codecs/msm8916-wcd-analog.c switch (event) { event 653 sound/soc/codecs/msm8916-wcd-analog.c int event) event 657 sound/soc/codecs/msm8916-wcd-analog.c switch (event) { event 357 sound/soc/codecs/msm8916-wcd-digital.c struct snd_kcontrol *kcontrol, int event) event 363 sound/soc/codecs/msm8916-wcd-digital.c switch (event) { event 578 sound/soc/codecs/msm8916-wcd-digital.c int event) event 582 sound/soc/codecs/msm8916-wcd-digital.c switch (event) { event 601 sound/soc/codecs/msm8916-wcd-digital.c int event) event 612 sound/soc/codecs/msm8916-wcd-digital.c switch (event) { event 664 sound/soc/codecs/msm8916-wcd-digital.c int event) event 682 sound/soc/codecs/msm8916-wcd-digital.c switch (event) { event 580 sound/soc/codecs/mt6351.c int event) event 584 sound/soc/codecs/mt6351.c switch (event) { event 624 sound/soc/codecs/mt6351.c int event) event 628 sound/soc/codecs/mt6351.c switch (event) { event 648 sound/soc/codecs/mt6351.c int event) event 652 sound/soc/codecs/mt6351.c switch (event) { event 668 sound/soc/codecs/mt6351.c int event) event 674 sound/soc/codecs/mt6351.c __func__, event, priv->dl_rate); event 676 sound/soc/codecs/mt6351.c switch (event) { event 713 sound/soc/codecs/mt6351.c int event) event 720 sound/soc/codecs/mt6351.c __func__, event, priv->hp_en_counter); event 722 sound/soc/codecs/mt6351.c switch (event) { event 841 sound/soc/codecs/mt6351.c int event) event 847 sound/soc/codecs/mt6351.c __func__, event, priv->ul_rate); event 849 sound/soc/codecs/mt6351.c switch (event) { event 904 sound/soc/codecs/mt6351.c int event) event 908 sound/soc/codecs/mt6351.c switch (event) { event 927 sound/soc/codecs/mt6351.c int event) event 931 sound/soc/codecs/mt6351.c switch (event) { event 957 sound/soc/codecs/mt6351.c int event) event 961 sound/soc/codecs/mt6351.c switch (event) { event 987 sound/soc/codecs/mt6351.c int event) event 991 sound/soc/codecs/mt6351.c switch (event) { event 1015 sound/soc/codecs/mt6351.c int event) event 1019 sound/soc/codecs/mt6351.c switch (event) { event 1043 sound/soc/codecs/mt6351.c int event) event 1047 sound/soc/codecs/mt6351.c switch (event) { event 751 sound/soc/codecs/mt6358.c int event) event 756 sound/soc/codecs/mt6358.c dev_dbg(priv->dev, "%s(), event = 0x%x\n", __func__, event); event 758 sound/soc/codecs/mt6358.c switch (event) { event 774 sound/soc/codecs/mt6358.c int event) event 779 sound/soc/codecs/mt6358.c dev_dbg(priv->dev, "%s(), event = 0x%x\n", __func__, event); event 781 sound/soc/codecs/mt6358.c switch (event) { event 813 sound/soc/codecs/mt6358.c int event) event 819 sound/soc/codecs/mt6358.c __func__, event, priv->dl_rate); event 821 sound/soc/codecs/mt6358.c switch (event) { event 1245 sound/soc/codecs/mt6358.c int event) event 1254 sound/soc/codecs/mt6358.c event, event 1258 sound/soc/codecs/mt6358.c switch (event) { event 1303 sound/soc/codecs/mt6358.c int event) event 1310 sound/soc/codecs/mt6358.c event, event 1313 sound/soc/codecs/mt6358.c switch (event) { event 1431 sound/soc/codecs/mt6358.c int event) event 1437 sound/soc/codecs/mt6358.c __func__, event, priv->ul_rate); event 1439 sound/soc/codecs/mt6358.c switch (event) { event 1455 sound/soc/codecs/mt6358.c int event) event 1461 sound/soc/codecs/mt6358.c __func__, event); event 1463 sound/soc/codecs/mt6358.c switch (event) { event 1783 sound/soc/codecs/mt6358.c int event) event 1790 sound/soc/codecs/mt6358.c __func__, event, mux); event 1792 sound/soc/codecs/mt6358.c switch (event) { event 1829 sound/soc/codecs/mt6358.c int event) event 1836 sound/soc/codecs/mt6358.c __func__, event, mux); event 1845 sound/soc/codecs/mt6358.c int event) event 1852 sound/soc/codecs/mt6358.c __func__, event, mux); event 1861 sound/soc/codecs/mt6358.c int event) event 1868 sound/soc/codecs/mt6358.c __func__, event, mux); event 1877 sound/soc/codecs/mt6358.c int event) event 1884 sound/soc/codecs/mt6358.c __func__, event, mux); event 1893 sound/soc/codecs/mt6358.c int event) event 1895 sound/soc/codecs/mt6358.c switch (event) { event 234 sound/soc/codecs/nau8540.c struct snd_kcontrol *k, int event) event 239 sound/soc/codecs/nau8540.c if (SND_SOC_DAPM_EVENT_ON(event)) { event 246 sound/soc/codecs/nau8540.c } else if (SND_SOC_DAPM_EVENT_OFF(event)) { event 256 sound/soc/codecs/nau8540.c struct snd_kcontrol *k, int event) event 261 sound/soc/codecs/nau8540.c if (SND_SOC_DAPM_EVENT_OFF(event)) { event 421 sound/soc/codecs/nau8824.c struct snd_kcontrol *kcontrol, int event) event 426 sound/soc/codecs/nau8824.c switch (event) { event 444 sound/soc/codecs/nau8824.c struct snd_kcontrol *kcontrol, int event) event 449 sound/soc/codecs/nau8824.c switch (event) { event 468 sound/soc/codecs/nau8824.c struct snd_kcontrol *kcontrol, int event) event 473 sound/soc/codecs/nau8824.c switch (event) { event 494 sound/soc/codecs/nau8824.c struct snd_kcontrol *k, int event) event 502 sound/soc/codecs/nau8824.c if (SND_SOC_DAPM_EVENT_OFF(event)) { event 553 sound/soc/codecs/nau8824.c struct snd_kcontrol *k, int event) event 898 sound/soc/codecs/nau8824.c int adc_value, event = 0, event_mask = 0; event 910 sound/soc/codecs/nau8824.c event |= SND_JACK_HEADPHONE; event 916 sound/soc/codecs/nau8824.c event |= SND_JACK_HEADSET; event 919 sound/soc/codecs/nau8824.c snd_soc_jack_report(nau8824->jack, event, event_mask); event 976 sound/soc/codecs/nau8824.c int active_irq, clear_irq = 0, event = 0, event_mask = 0; event 1002 sound/soc/codecs/nau8824.c event |= button_pressed; event 1003 sound/soc/codecs/nau8824.c dev_dbg(nau8824->dev, "button %x pressed\n", event); event 1034 sound/soc/codecs/nau8824.c snd_soc_jack_report(nau8824->jack, event, event_mask); event 913 sound/soc/codecs/nau8825.c struct snd_kcontrol *kcontrol, int event) event 918 sound/soc/codecs/nau8825.c switch (event) { event 937 sound/soc/codecs/nau8825.c struct snd_kcontrol *kcontrol, int event) event 942 sound/soc/codecs/nau8825.c switch (event) { event 961 sound/soc/codecs/nau8825.c struct snd_kcontrol *kcontrol, int event) event 966 sound/soc/codecs/nau8825.c switch (event) { event 1675 sound/soc/codecs/nau8825.c int active_irq, clear_irq = 0, event = 0, event_mask = 0; event 1700 sound/soc/codecs/nau8825.c event |= nau8825->button_pressed; event 1708 sound/soc/codecs/nau8825.c event |= nau8825_jack_insert(nau8825); event 1754 sound/soc/codecs/nau8825.c nau8825->xtalk_event = event; event 1795 sound/soc/codecs/nau8825.c snd_soc_jack_report(nau8825->jack, event, event_mask); event 29 sound/soc/codecs/pcm3008.c int event) event 35 sound/soc/codecs/pcm3008.c SND_SOC_DAPM_EVENT_ON(event)); event 42 sound/soc/codecs/pcm3008.c int event) event 48 sound/soc/codecs/pcm3008.c SND_SOC_DAPM_EVENT_ON(event)); event 60 sound/soc/codecs/pcm512x.c unsigned long event, void *data) \ event 64 sound/soc/codecs/pcm512x.c if (event & REGULATOR_EVENT_DISABLE) { \ event 1382 sound/soc/codecs/rt1011.c struct snd_kcontrol *kcontrol, int event) event 1387 sound/soc/codecs/rt1011.c switch (event) { event 437 sound/soc/codecs/rt1305.c struct snd_kcontrol *kcontrol, int event) event 442 sound/soc/codecs/rt1305.c switch (event) { event 299 sound/soc/codecs/rt1308.c struct snd_kcontrol *kcontrol, int event) event 304 sound/soc/codecs/rt1308.c switch (event) { event 433 sound/soc/codecs/rt286.c struct snd_kcontrol *kcontrol, int event) event 437 sound/soc/codecs/rt286.c switch (event) { event 455 sound/soc/codecs/rt286.c struct snd_kcontrol *kcontrol, int event) event 459 sound/soc/codecs/rt286.c switch (event) { event 474 sound/soc/codecs/rt286.c struct snd_kcontrol *kcontrol, int event) event 478 sound/soc/codecs/rt286.c switch (event) { event 493 sound/soc/codecs/rt286.c struct snd_kcontrol *kcontrol, int event) event 497 sound/soc/codecs/rt286.c switch (event) { event 457 sound/soc/codecs/rt298.c struct snd_kcontrol *kcontrol, int event) event 461 sound/soc/codecs/rt298.c switch (event) { event 479 sound/soc/codecs/rt298.c struct snd_kcontrol *kcontrol, int event) event 483 sound/soc/codecs/rt298.c switch (event) { event 498 sound/soc/codecs/rt298.c struct snd_kcontrol *kcontrol, int event) event 505 sound/soc/codecs/rt298.c switch (event) { event 544 sound/soc/codecs/rt298.c struct snd_kcontrol *kcontrol, int event) event 548 sound/soc/codecs/rt298.c switch (event) { event 516 sound/soc/codecs/rt5514.c struct snd_kcontrol *kcontrol, int event) event 464 sound/soc/codecs/rt5616.c struct snd_kcontrol *kcontrol, int event) event 468 sound/soc/codecs/rt5616.c switch (event) { event 488 sound/soc/codecs/rt5616.c struct snd_kcontrol *kcontrol, int event) event 492 sound/soc/codecs/rt5616.c switch (event) { event 554 sound/soc/codecs/rt5616.c struct snd_kcontrol *kcontrol, int event) event 558 sound/soc/codecs/rt5616.c switch (event) { event 624 sound/soc/codecs/rt5616.c struct snd_kcontrol *kcontrol, int event) event 628 sound/soc/codecs/rt5616.c switch (event) { event 652 sound/soc/codecs/rt5616.c struct snd_kcontrol *kcontrol, int event) event 656 sound/soc/codecs/rt5616.c switch (event) { event 675 sound/soc/codecs/rt5616.c struct snd_kcontrol *kcontrol, int event) event 679 sound/soc/codecs/rt5616.c switch (event) { event 560 sound/soc/codecs/rt5631.c struct snd_kcontrol *kcontrol, int event) event 565 sound/soc/codecs/rt5631.c switch (event) { event 594 sound/soc/codecs/rt5631.c struct snd_kcontrol *kcontrol, int event) event 460 sound/soc/codecs/rt5640.c struct snd_kcontrol *kcontrol, int event) event 969 sound/soc/codecs/rt5640.c struct snd_kcontrol *kcontrol, int event) event 974 sound/soc/codecs/rt5640.c switch (event) { event 993 sound/soc/codecs/rt5640.c struct snd_kcontrol *kcontrol, int event) event 997 sound/soc/codecs/rt5640.c switch (event) { event 1022 sound/soc/codecs/rt5640.c struct snd_kcontrol *kcontrol, int event) event 1026 sound/soc/codecs/rt5640.c switch (event) { event 1038 sound/soc/codecs/rt5640.c struct snd_kcontrol *kcontrol, int event) event 1043 sound/soc/codecs/rt5640.c switch (event) { event 846 sound/soc/codecs/rt5645.c struct snd_kcontrol *kcontrol, int event) event 1737 sound/soc/codecs/rt5645.c struct snd_kcontrol *kcontrol, int event) event 1742 sound/soc/codecs/rt5645.c switch (event) { event 1803 sound/soc/codecs/rt5645.c struct snd_kcontrol *kcontrol, int event) event 1807 sound/soc/codecs/rt5645.c switch (event) { event 1836 sound/soc/codecs/rt5645.c struct snd_kcontrol *kcontrol, int event) event 1840 sound/soc/codecs/rt5645.c switch (event) { event 1866 sound/soc/codecs/rt5645.c struct snd_kcontrol *kcontrol, int event) event 1870 sound/soc/codecs/rt5645.c switch (event) { event 1889 sound/soc/codecs/rt5645.c struct snd_kcontrol *k, int event) event 1894 sound/soc/codecs/rt5645.c switch (event) { event 1910 sound/soc/codecs/rt5645.c struct snd_kcontrol *k, int event) event 1914 sound/soc/codecs/rt5645.c switch (event) { event 1935 sound/soc/codecs/rt5645.c struct snd_kcontrol *k, int event) event 1939 sound/soc/codecs/rt5645.c switch (event) { event 377 sound/soc/codecs/rt5651.c struct snd_kcontrol *kcontrol, int event) event 682 sound/soc/codecs/rt5651.c struct snd_kcontrol *kcontrol, int event) event 687 sound/soc/codecs/rt5651.c switch (event) { event 720 sound/soc/codecs/rt5651.c struct snd_kcontrol *kcontrol, int event) event 725 sound/soc/codecs/rt5651.c switch (event) { event 764 sound/soc/codecs/rt5651.c struct snd_kcontrol *kcontrol, int event) event 770 sound/soc/codecs/rt5651.c switch (event) { event 785 sound/soc/codecs/rt5651.c struct snd_kcontrol *kcontrol, int event) event 789 sound/soc/codecs/rt5651.c switch (event) { event 808 sound/soc/codecs/rt5651.c struct snd_kcontrol *kcontrol, int event) event 812 sound/soc/codecs/rt5651.c switch (event) { event 831 sound/soc/codecs/rt5651.c struct snd_kcontrol *kcontrol, int event) event 835 sound/soc/codecs/rt5651.c switch (event) { event 1603 sound/soc/codecs/rt5659.c struct snd_kcontrol *kcontrol, int event) event 1623 sound/soc/codecs/rt5659.c struct snd_kcontrol *kcontrol, int event) event 1627 sound/soc/codecs/rt5659.c switch (event) { event 1648 sound/soc/codecs/rt5659.c struct snd_kcontrol *kcontrol, int event) event 1653 sound/soc/codecs/rt5659.c switch (event) { event 1674 sound/soc/codecs/rt5659.c struct snd_kcontrol *kcontrol, int event) event 1678 sound/soc/codecs/rt5659.c switch (event) { event 2382 sound/soc/codecs/rt5659.c struct snd_kcontrol *kcontrol, int event) event 2386 sound/soc/codecs/rt5659.c switch (event) { event 2414 sound/soc/codecs/rt5659.c struct snd_kcontrol *kcontrol, int event) event 2418 sound/soc/codecs/rt5659.c switch (event) { event 2436 sound/soc/codecs/rt5659.c struct snd_kcontrol *kcontrol, int event) event 2440 sound/soc/codecs/rt5659.c switch (event) { event 2458 sound/soc/codecs/rt5659.c struct snd_kcontrol *kcontrol, int event) event 2460 sound/soc/codecs/rt5659.c switch (event) { event 352 sound/soc/codecs/rt5660.c struct snd_kcontrol *kcontrol, int event) event 539 sound/soc/codecs/rt5660.c struct snd_kcontrol *kcontrol, int event) event 543 sound/soc/codecs/rt5660.c switch (event) { event 2319 sound/soc/codecs/rt5663.c struct snd_kcontrol *kcontrol, int event) event 2324 sound/soc/codecs/rt5663.c switch (event) { event 2377 sound/soc/codecs/rt5663.c struct snd_kcontrol *kcontrol, int event) event 2382 sound/soc/codecs/rt5663.c switch (event) { event 2407 sound/soc/codecs/rt5663.c struct snd_kcontrol *kcontrol, int event) event 2411 sound/soc/codecs/rt5663.c switch (event) { event 2431 sound/soc/codecs/rt5663.c struct snd_kcontrol *kcontrol, int event) event 2435 sound/soc/codecs/rt5663.c switch (event) { event 1477 sound/soc/codecs/rt5665.c struct snd_kcontrol *kcontrol, int event) event 1497 sound/soc/codecs/rt5665.c struct snd_kcontrol *kcontrol, int event) event 1501 sound/soc/codecs/rt5665.c switch (event) { event 2472 sound/soc/codecs/rt5665.c struct snd_kcontrol *kcontrol, int event) event 2476 sound/soc/codecs/rt5665.c switch (event) { event 2504 sound/soc/codecs/rt5665.c struct snd_kcontrol *kcontrol, int event) event 2508 sound/soc/codecs/rt5665.c switch (event) { event 2530 sound/soc/codecs/rt5665.c struct snd_kcontrol *kcontrol, int event) event 2534 sound/soc/codecs/rt5665.c switch (event) { event 2554 sound/soc/codecs/rt5665.c struct snd_kcontrol *kcontrol, int event) event 2556 sound/soc/codecs/rt5665.c switch (event) { event 2570 sound/soc/codecs/rt5665.c struct snd_kcontrol *kcontrol, int event) event 2574 sound/soc/codecs/rt5665.c switch (event) { event 2628 sound/soc/codecs/rt5665.c struct snd_kcontrol *kcontrol, int event) event 2657 sound/soc/codecs/rt5665.c switch (event) { event 1169 sound/soc/codecs/rt5668.c struct snd_kcontrol *kcontrol, int event) event 1186 sound/soc/codecs/rt5668.c struct snd_kcontrol *kcontrol, int event) event 1418 sound/soc/codecs/rt5668.c struct snd_kcontrol *kcontrol, int event) event 1423 sound/soc/codecs/rt5668.c switch (event) { event 1451 sound/soc/codecs/rt5668.c struct snd_kcontrol *kcontrol, int event) event 1453 sound/soc/codecs/rt5668.c switch (event) { event 1467 sound/soc/codecs/rt5668.c struct snd_kcontrol *kcontrol, int event) event 1472 sound/soc/codecs/rt5668.c switch (event) { event 697 sound/soc/codecs/rt5670.c struct snd_kcontrol *kcontrol, int event) event 1365 sound/soc/codecs/rt5670.c struct snd_kcontrol *kcontrol, int event) event 1370 sound/soc/codecs/rt5670.c switch (event) { event 1401 sound/soc/codecs/rt5670.c struct snd_kcontrol *kcontrol, int event) event 1406 sound/soc/codecs/rt5670.c switch (event) { event 1451 sound/soc/codecs/rt5670.c struct snd_kcontrol *kcontrol, int event) event 1455 sound/soc/codecs/rt5670.c switch (event) { event 1474 sound/soc/codecs/rt5670.c struct snd_kcontrol *kcontrol, int event) event 1478 sound/soc/codecs/rt5670.c switch (event) { event 915 sound/soc/codecs/rt5677.c struct snd_kcontrol *kcontrol, int event) event 2397 sound/soc/codecs/rt5677.c struct snd_kcontrol *kcontrol, int event) event 2402 sound/soc/codecs/rt5677.c switch (event) { event 2421 sound/soc/codecs/rt5677.c struct snd_kcontrol *kcontrol, int event) event 2426 sound/soc/codecs/rt5677.c switch (event) { event 2445 sound/soc/codecs/rt5677.c struct snd_kcontrol *kcontrol, int event) event 2450 sound/soc/codecs/rt5677.c switch (event) { event 2467 sound/soc/codecs/rt5677.c struct snd_kcontrol *kcontrol, int event) event 2472 sound/soc/codecs/rt5677.c switch (event) { event 2489 sound/soc/codecs/rt5677.c struct snd_kcontrol *kcontrol, int event) event 2494 sound/soc/codecs/rt5677.c switch (event) { event 2516 sound/soc/codecs/rt5677.c struct snd_kcontrol *kcontrol, int event) event 2522 sound/soc/codecs/rt5677.c switch (event) { event 2539 sound/soc/codecs/rt5677.c struct snd_kcontrol *kcontrol, int event) event 2545 sound/soc/codecs/rt5677.c switch (event) { event 2562 sound/soc/codecs/rt5677.c struct snd_kcontrol *kcontrol, int event) event 2567 sound/soc/codecs/rt5677.c switch (event) { event 2587 sound/soc/codecs/rt5677.c struct snd_kcontrol *kcontrol, int event) event 2589 sound/soc/codecs/rt5677.c switch (event) { event 1195 sound/soc/codecs/rt5682.c struct snd_kcontrol *kcontrol, int event) event 1212 sound/soc/codecs/rt5682.c struct snd_kcontrol *kcontrol, int event) event 1455 sound/soc/codecs/rt5682.c struct snd_kcontrol *kcontrol, int event) event 1460 sound/soc/codecs/rt5682.c switch (event) { event 1477 sound/soc/codecs/rt5682.c struct snd_kcontrol *kcontrol, int event) event 1482 sound/soc/codecs/rt5682.c switch (event) { event 1512 sound/soc/codecs/rt5682.c struct snd_kcontrol *kcontrol, int event) event 1514 sound/soc/codecs/rt5682.c switch (event) { event 1528 sound/soc/codecs/rt5682.c struct snd_kcontrol *kcontrol, int event) event 1533 sound/soc/codecs/rt5682.c switch (event) { event 267 sound/soc/codecs/sgtl5000.c struct snd_kcontrol *kcontrol, int event) event 272 sound/soc/codecs/sgtl5000.c switch (event) { event 289 sound/soc/codecs/sgtl5000.c int event, int event_source) event 310 sound/soc/codecs/sgtl5000.c switch (event) { event 341 sound/soc/codecs/sgtl5000.c struct snd_kcontrol *kcontrol, int event) event 346 sound/soc/codecs/sgtl5000.c return vag_and_mute_control(component, event, HP_POWER_EVENT); event 354 sound/soc/codecs/sgtl5000.c struct snd_kcontrol *kcontrol, int event) event 359 sound/soc/codecs/sgtl5000.c return vag_and_mute_control(component, event, ADC_POWER_EVENT); event 363 sound/soc/codecs/sgtl5000.c struct snd_kcontrol *kcontrol, int event) event 368 sound/soc/codecs/sgtl5000.c return vag_and_mute_control(component, event, DAC_POWER_EVENT); event 19 sound/soc/codecs/simple-amplifier.c struct snd_kcontrol *control, int event) event 25 sound/soc/codecs/simple-amplifier.c switch (event) { event 93 sound/soc/codecs/sirf-audio-codec.c struct snd_kcontrol *kcontrol, int event) event 95 sound/soc/codecs/sirf-audio-codec.c switch (event) { event 118 sound/soc/codecs/sirf-audio-codec.c struct snd_kcontrol *kcontrol, int event) event 124 sound/soc/codecs/sirf-audio-codec.c switch (event) { event 141 sound/soc/codecs/sirf-audio-codec.c struct snd_kcontrol *kcontrol, int event) event 147 sound/soc/codecs/sirf-audio-codec.c switch (event) { event 20 sound/soc/codecs/ssm2305.c struct snd_kcontrol *kctrl, int event) event 26 sound/soc/codecs/ssm2305.c SND_SOC_DAPM_EVENT_ON(event)); event 117 sound/soc/codecs/ssm2602.c struct snd_kcontrol *kcontrol, int event) event 81 sound/soc/codecs/tas2552.c struct snd_kcontrol *kcontrol, int event) event 85 sound/soc/codecs/tas2552.c switch (event) { event 364 sound/soc/codecs/tas5720.c struct snd_kcontrol *kcontrol, int event) event 370 sound/soc/codecs/tas5720.c if (event & SND_SOC_DAPM_POST_PMU) { event 393 sound/soc/codecs/tas5720.c } else if (event & SND_SOC_DAPM_PRE_PMD) { event 73 sound/soc/codecs/tas6424.c struct snd_kcontrol *kcontrol, int event) event 78 sound/soc/codecs/tas6424.c dev_dbg(component->dev, "%s() event=0x%0x\n", __func__, event); event 80 sound/soc/codecs/tas6424.c if (event & SND_SOC_DAPM_POST_PMU) { event 90 sound/soc/codecs/tas6424.c } else if (event & SND_SOC_DAPM_PRE_PMD) { event 353 sound/soc/codecs/tlv320aic31xx.c struct snd_kcontrol *kcontrol, int event) event 389 sound/soc/codecs/tlv320aic31xx.c switch (event) { event 397 sound/soc/codecs/tlv320aic31xx.c event, w->name); event 449 sound/soc/codecs/tlv320aic31xx.c struct snd_kcontrol *kcontrol, int event) event 454 sound/soc/codecs/tlv320aic31xx.c switch (event) { event 1123 sound/soc/codecs/tlv320aic31xx.c unsigned long event, void *data) event 1129 sound/soc/codecs/tlv320aic31xx.c if (event & REGULATOR_EVENT_DISABLE) { event 54 sound/soc/codecs/tlv320aic32x4.c struct snd_kcontrol *kcontrol, int event) event 58 sound/soc/codecs/tlv320aic32x4.c switch (event) { event 213 sound/soc/codecs/tlv320aic3x.c struct snd_kcontrol *kcontrol, int event) event 218 sound/soc/codecs/tlv320aic3x.c switch (event) { event 1365 sound/soc/codecs/tlv320aic3x.c unsigned long event, void *data) event 1371 sound/soc/codecs/tlv320aic3x.c if (event & REGULATOR_EVENT_DISABLE) { event 412 sound/soc/codecs/tlv320dac33.c struct snd_kcontrol *kcontrol, int event) event 417 sound/soc/codecs/tlv320dac33.c switch (event) { event 94 sound/soc/codecs/tpa6130a2.c struct snd_kcontrol *kctrl, int event) event 99 sound/soc/codecs/tpa6130a2.c if (SND_SOC_DAPM_EVENT_ON(event)) { event 354 sound/soc/codecs/tscs42xx.c struct snd_kcontrol *kcontrol, int event) event 361 sound/soc/codecs/tscs42xx.c struct snd_kcontrol *kcontrol, int event) event 368 sound/soc/codecs/tscs42xx.c struct snd_kcontrol *kcontrol, int event) event 374 sound/soc/codecs/tscs42xx.c if (SND_SOC_DAPM_EVENT_ON(event)) event 383 sound/soc/codecs/tscs42xx.c struct snd_kcontrol *kcontrol, int event) event 714 sound/soc/codecs/tscs454.c struct snd_kcontrol *kcontrol, int event) event 732 sound/soc/codecs/tscs454.c if (event == SND_SOC_DAPM_POST_PMU) event 554 sound/soc/codecs/twl4030.c struct snd_kcontrol *kcontrol, int event) \ event 559 sound/soc/codecs/twl4030.c switch (event) { \ event 609 sound/soc/codecs/twl4030.c struct snd_kcontrol *kcontrol, int event) event 613 sound/soc/codecs/twl4030.c switch (event) { event 625 sound/soc/codecs/twl4030.c struct snd_kcontrol *kcontrol, int event) event 629 sound/soc/codecs/twl4030.c switch (event) { event 641 sound/soc/codecs/twl4030.c struct snd_kcontrol *kcontrol, int event) event 650 sound/soc/codecs/twl4030.c struct snd_kcontrol *kcontrol, int event) event 654 sound/soc/codecs/twl4030.c switch (event) { event 666 sound/soc/codecs/twl4030.c struct snd_kcontrol *kcontrol, int event) event 672 sound/soc/codecs/twl4030.c switch (event) { event 755 sound/soc/codecs/twl4030.c struct snd_kcontrol *kcontrol, int event) event 760 sound/soc/codecs/twl4030.c switch (event) { event 780 sound/soc/codecs/twl4030.c struct snd_kcontrol *kcontrol, int event) event 785 sound/soc/codecs/twl4030.c switch (event) { event 805 sound/soc/codecs/twl4030.c struct snd_kcontrol *kcontrol, int event) event 223 sound/soc/codecs/twl6040.c struct snd_kcontrol *kcontrol, int event) event 235 sound/soc/codecs/twl6040.c if (SND_SOC_DAPM_EVENT_ON(event)) { event 250 sound/soc/codecs/twl6040.c struct snd_kcontrol *kcontrol, int event) event 256 sound/soc/codecs/twl6040.c if (SND_SOC_DAPM_EVENT_ON(event)) { event 45 sound/soc/codecs/wcd-clsh-v2.h enum wcd_clsh_event event, event 2574 sound/soc/codecs/wcd9335.c int event) event 2590 sound/soc/codecs/wcd9335.c switch (event) { event 2612 sound/soc/codecs/wcd9335.c struct snd_kcontrol *kc, int event) event 2614 sound/soc/codecs/wcd9335.c return __wcd9335_codec_enable_micbias(w, event); event 2653 sound/soc/codecs/wcd9335.c struct snd_kcontrol *kc, int event) event 2657 sound/soc/codecs/wcd9335.c switch (event) { event 2723 sound/soc/codecs/wcd9335.c struct snd_kcontrol *kc, int event) event 2771 sound/soc/codecs/wcd9335.c switch (event) { event 2910 sound/soc/codecs/wcd9335.c struct snd_kcontrol *kc, int event) event 2957 sound/soc/codecs/wcd9335.c switch (event) { event 3020 sound/soc/codecs/wcd9335.c int event) event 3026 sound/soc/codecs/wcd9335.c switch (event) { event 3040 sound/soc/codecs/wcd9335.c struct snd_kcontrol *kc, int event) event 3081 sound/soc/codecs/wcd9335.c switch (event) { event 3150 sound/soc/codecs/wcd9335.c u16 prim_int_reg, int event) event 3164 sound/soc/codecs/wcd9335.c if (hd2_enable_reg && SND_SOC_DAPM_EVENT_ON(event)) { event 3176 sound/soc/codecs/wcd9335.c if (hd2_enable_reg && SND_SOC_DAPM_EVENT_OFF(event)) { event 3191 sound/soc/codecs/wcd9335.c u16 reg, int event) event 3197 sound/soc/codecs/wcd9335.c switch (event) { event 3204 sound/soc/codecs/wcd9335.c wcd9335_codec_hd2_control(comp, prim_int_reg, event); event 3229 sound/soc/codecs/wcd9335.c wcd9335_codec_hd2_control(comp, prim_int_reg, event); event 3238 sound/soc/codecs/wcd9335.c int interp_n, int event) event 3255 sound/soc/codecs/wcd9335.c if (SND_SOC_DAPM_EVENT_ON(event)) { event 3273 sound/soc/codecs/wcd9335.c if (SND_SOC_DAPM_EVENT_OFF(event)) { event 3299 sound/soc/codecs/wcd9335.c struct snd_kcontrol *kc, int event) event 3340 sound/soc/codecs/wcd9335.c switch (event) { event 3343 sound/soc/codecs/wcd9335.c wcd9335_codec_enable_prim_interpolator(comp, reg, event); event 3346 sound/soc/codecs/wcd9335.c wcd9335_config_compander(comp, w->shift, event); event 3352 sound/soc/codecs/wcd9335.c wcd9335_config_compander(comp, w->shift, event); event 3353 sound/soc/codecs/wcd9335.c wcd9335_codec_enable_prim_interpolator(comp, reg, event); event 3397 sound/soc/codecs/wcd9335.c int event) event 3399 sound/soc/codecs/wcd9335.c if (SND_SOC_DAPM_EVENT_ON(event)) { event 3418 sound/soc/codecs/wcd9335.c if (SND_SOC_DAPM_EVENT_OFF(event)) { event 3434 sound/soc/codecs/wcd9335.c int event) event 3436 sound/soc/codecs/wcd9335.c if (SND_SOC_DAPM_EVENT_ON(event)) { event 3465 sound/soc/codecs/wcd9335.c if (SND_SOC_DAPM_EVENT_OFF(event)) { event 3492 sound/soc/codecs/wcd9335.c int event) event 3494 sound/soc/codecs/wcd9335.c if (SND_SOC_DAPM_EVENT_ON(event)) { event 3507 sound/soc/codecs/wcd9335.c if (SND_SOC_DAPM_EVENT_OFF(event)) { event 3518 sound/soc/codecs/wcd9335.c int event, int mode) event 3522 sound/soc/codecs/wcd9335.c wcd9335_codec_hph_lp_config(component, event); event 3525 sound/soc/codecs/wcd9335.c wcd9335_codec_hph_lohifi_config(component, event); event 3528 sound/soc/codecs/wcd9335.c wcd9335_codec_hph_hifi_config(component, event); event 3535 sound/soc/codecs/wcd9335.c int event) event 3542 sound/soc/codecs/wcd9335.c switch (event) { event 3557 sound/soc/codecs/wcd9335.c wcd9335_codec_hph_mode_config(comp, event, hph_mode); event 3571 sound/soc/codecs/wcd9335.c wcd9335_codec_hph_mode_config(comp, event, hph_mode); event 3584 sound/soc/codecs/wcd9335.c struct snd_kcontrol *kc, int event) event 3589 sound/soc/codecs/wcd9335.c switch (event) { event 3604 sound/soc/codecs/wcd9335.c struct snd_kcontrol *kc, int event) event 3609 sound/soc/codecs/wcd9335.c switch (event) { event 3625 sound/soc/codecs/wcd9335.c int mode, int event) event 3629 sound/soc/codecs/wcd9335.c switch (event) { event 3650 sound/soc/codecs/wcd9335.c if (SND_SOC_DAPM_EVENT_ON(event)) { event 3677 sound/soc/codecs/wcd9335.c if (SND_SOC_DAPM_EVENT_OFF(event)) event 3686 sound/soc/codecs/wcd9335.c int event) event 3693 sound/soc/codecs/wcd9335.c switch (event) { event 3713 sound/soc/codecs/wcd9335.c wcd9335_codec_hph_mode_config(comp, event, hph_mode); event 3722 sound/soc/codecs/wcd9335.c wcd9335_codec_hph_mode_config(comp, event, hph_mode); event 3735 sound/soc/codecs/wcd9335.c int event) event 3741 sound/soc/codecs/wcd9335.c switch (event) { event 3751 sound/soc/codecs/wcd9335.c wcd9335_codec_hph_post_pa_config(wcd, hph_mode, event); event 3768 sound/soc/codecs/wcd9335.c wcd9335_codec_hph_post_pa_config(wcd, hph_mode, event); event 3783 sound/soc/codecs/wcd9335.c int event) event 3809 sound/soc/codecs/wcd9335.c switch (event) { event 3854 sound/soc/codecs/wcd9335.c struct snd_kcontrol *kc, int event) event 3859 sound/soc/codecs/wcd9335.c switch (event) { event 3884 sound/soc/codecs/wcd9335.c struct snd_kcontrol *kc, int event) event 3890 sound/soc/codecs/wcd9335.c switch (event) { event 3899 sound/soc/codecs/wcd9335.c wcd9335_codec_hph_post_pa_config(wcd, hph_mode, event); event 3916 sound/soc/codecs/wcd9335.c wcd9335_codec_hph_post_pa_config(wcd, hph_mode, event); event 3930 sound/soc/codecs/wcd9335.c struct snd_kcontrol *kc, int event) event 3934 sound/soc/codecs/wcd9335.c switch (event) { event 4309 sound/soc/codecs/wcd9335.c struct snd_kcontrol *kc, int event) event 4313 sound/soc/codecs/wcd9335.c switch (event) { event 675 sound/soc/codecs/wm2000.c struct snd_kcontrol *kcontrol, int event) event 683 sound/soc/codecs/wm2000.c if (SND_SOC_DAPM_EVENT_ON(event)) event 686 sound/soc/codecs/wm2000.c if (SND_SOC_DAPM_EVENT_OFF(event)) event 734 sound/soc/codecs/wm5100.c enum snd_soc_dapm_type event, int subseq) event 773 sound/soc/codecs/wm5100.c int event) event 838 sound/soc/codecs/wm5100.c int event) event 581 sound/soc/codecs/wm5102.c struct snd_kcontrol *kcontrol, int event) event 600 sound/soc/codecs/wm5102.c switch (event) { event 611 sound/soc/codecs/wm5102.c return arizona_clk_ev(w, kcontrol, event); event 616 sound/soc/codecs/wm5102.c return arizona_dvfs_sysclk_ev(w, kcontrol, event); event 620 sound/soc/codecs/wm5102.c struct snd_kcontrol *kcontrol, int event) event 627 sound/soc/codecs/wm5102.c switch (event) { event 661 sound/soc/codecs/wm5102.c return wm_adsp_early_event(w, kcontrol, event); event 159 sound/soc/codecs/wm5110.c struct snd_kcontrol *kcontrol, int event) event 178 sound/soc/codecs/wm5110.c switch (event) { event 187 sound/soc/codecs/wm5110.c return arizona_clk_ev(w, kcontrol, event); event 196 sound/soc/codecs/wm5110.c struct snd_kcontrol *kcontrol, int event) event 213 sound/soc/codecs/wm5110.c return wm_adsp_early_event(w, kcontrol, event); event 364 sound/soc/codecs/wm5110.c struct snd_kcontrol *kcontrol, int event) event 373 sound/soc/codecs/wm5110.c switch (event) { event 386 sound/soc/codecs/wm5110.c return arizona_hp_ev(w, kcontrol, event); event 504 sound/soc/codecs/wm5110.c struct snd_kcontrol *kcontrol, int event) event 520 sound/soc/codecs/wm5110.c switch (event) { event 563 sound/soc/codecs/wm5110.c struct snd_kcontrol *kcontrol, int event) event 572 sound/soc/codecs/wm5110.c wm5110_in_analog_ev(w, kcontrol, event); event 579 sound/soc/codecs/wm5110.c return arizona_in_ev(w, kcontrol, event); event 254 sound/soc/codecs/wm8350.c struct snd_kcontrol *kcontrol, int event) event 275 sound/soc/codecs/wm8350.c switch (event) { event 320 sound/soc/codecs/wm8400.c struct snd_kcontrol * kcontrol, int event) event 89 sound/soc/codecs/wm8770.c struct snd_kcontrol *kcontrol, int event); event 91 sound/soc/codecs/wm8770.c struct snd_kcontrol *kcontrol, int event); event 100 sound/soc/codecs/wm8770.c unsigned long event, void *data) \ event 104 sound/soc/codecs/wm8770.c if (event & REGULATOR_EVENT_DISABLE) { \ event 306 sound/soc/codecs/wm8770.c struct snd_kcontrol *kcontrol, int event) event 310 sound/soc/codecs/wm8770.c switch (event) { event 323 sound/soc/codecs/wm8770.c struct snd_kcontrol *kcontrol, int event) event 327 sound/soc/codecs/wm8770.c switch (event) { event 75 sound/soc/codecs/wm8804.c struct snd_kcontrol *kcontrol, int event); event 84 sound/soc/codecs/wm8804.c unsigned long event, void *data) \ event 88 sound/soc/codecs/wm8804.c if (event & REGULATOR_EVENT_DISABLE) { \ event 135 sound/soc/codecs/wm8804.c struct snd_kcontrol *kcontrol, int event) event 140 sound/soc/codecs/wm8804.c switch (event) { event 222 sound/soc/codecs/wm8900.c struct snd_kcontrol *kcontrol, int event) event 227 sound/soc/codecs/wm8900.c switch (event) { event 279 sound/soc/codecs/wm8900.c WARN(1, "Invalid event %d\n", event); event 259 sound/soc/codecs/wm8903.c struct snd_kcontrol *kcontrol, int event) event 261 sound/soc/codecs/wm8903.c WARN_ON(event != SND_SOC_DAPM_POST_PMU); event 268 sound/soc/codecs/wm8903.c struct snd_kcontrol *kcontrol, int event) event 273 sound/soc/codecs/wm8903.c switch (event) { event 290 sound/soc/codecs/wm8903.c enum snd_soc_dapm_type event, int subseq) event 642 sound/soc/codecs/wm8904.c struct snd_kcontrol *kcontrol, int event) event 644 sound/soc/codecs/wm8904.c if (WARN_ON(event != SND_SOC_DAPM_POST_PMU)) event 654 sound/soc/codecs/wm8904.c struct snd_kcontrol *kcontrol, int event) event 659 sound/soc/codecs/wm8904.c switch (event) { event 692 sound/soc/codecs/wm8904.c struct snd_kcontrol *kcontrol, int event) event 731 sound/soc/codecs/wm8904.c switch (event) { event 331 sound/soc/codecs/wm8955.c struct snd_kcontrol *kcontrol, int event) event 344 sound/soc/codecs/wm8955.c switch (event) { event 416 sound/soc/codecs/wm8958-dsp2.c struct snd_kcontrol *kcontrol, int event) event 421 sound/soc/codecs/wm8958-dsp2.c switch (event) { event 192 sound/soc/codecs/wm8961.c struct snd_kcontrol *kcontrol, int event) event 201 sound/soc/codecs/wm8961.c if (event & SND_SOC_DAPM_POST_PMU) { event 251 sound/soc/codecs/wm8961.c if (event & SND_SOC_DAPM_PRE_PMD) { event 284 sound/soc/codecs/wm8961.c struct snd_kcontrol *kcontrol, int event) event 290 sound/soc/codecs/wm8961.c if (event & SND_SOC_DAPM_POST_PMU) { event 300 sound/soc/codecs/wm8961.c if (event & SND_SOC_DAPM_PRE_PMD) { event 93 sound/soc/codecs/wm8962.c unsigned long event, void *data) \ event 97 sound/soc/codecs/wm8962.c if (event & REGULATOR_EVENT_DISABLE) { \ event 1844 sound/soc/codecs/wm8962.c struct snd_kcontrol *kcontrol, int event) event 1846 sound/soc/codecs/wm8962.c switch (event) { event 1852 sound/soc/codecs/wm8962.c WARN(1, "Invalid event %d\n", event); event 1860 sound/soc/codecs/wm8962.c struct snd_kcontrol *kcontrol, int event) event 1868 sound/soc/codecs/wm8962.c switch (event) { event 1944 sound/soc/codecs/wm8962.c WARN(1, "Invalid event %d\n", event); event 1954 sound/soc/codecs/wm8962.c struct snd_kcontrol *kcontrol, int event) event 1977 sound/soc/codecs/wm8962.c switch (event) { event 1981 sound/soc/codecs/wm8962.c WARN(1, "Invalid event %d\n", event); event 1987 sound/soc/codecs/wm8962.c struct snd_kcontrol *kcontrol, int event) event 1992 sound/soc/codecs/wm8962.c switch (event) { event 2004 sound/soc/codecs/wm8962.c WARN(1, "Invalid event %d\n", event); event 3253 sound/soc/codecs/wm8962.c wm8962->beep->event = wm8962_beep_event; event 242 sound/soc/codecs/wm8988.c struct snd_kcontrol *kcontrol, int event) event 371 sound/soc/codecs/wm8990.c struct snd_kcontrol *kcontrol, int event) event 358 sound/soc/codecs/wm8991.c struct snd_kcontrol *kcontrol, int event) event 807 sound/soc/codecs/wm8993.c struct snd_kcontrol *kcontrol, int event) event 811 sound/soc/codecs/wm8993.c switch (event) { event 810 sound/soc/codecs/wm8994.c struct snd_kcontrol *kcontrol, int event) event 815 sound/soc/codecs/wm8994.c switch (event) { event 986 sound/soc/codecs/wm8994.c struct snd_kcontrol *kcontrol, int event) event 990 sound/soc/codecs/wm8994.c switch (event) { event 1042 sound/soc/codecs/wm8994.c struct snd_kcontrol *kcontrol, int event) event 1062 sound/soc/codecs/wm8994.c switch (event) { event 1140 sound/soc/codecs/wm8994.c struct snd_kcontrol *kcontrol, int event) event 1148 sound/soc/codecs/wm8994.c switch (event) { event 1225 sound/soc/codecs/wm8994.c struct snd_kcontrol *kcontrol, int event) event 1230 sound/soc/codecs/wm8994.c switch (event) { event 1243 sound/soc/codecs/wm8994.c struct snd_kcontrol *kcontrol, int event) event 1248 sound/soc/codecs/wm8994.c switch (event) { event 1261 sound/soc/codecs/wm8994.c struct snd_kcontrol *kcontrol, int event) event 1266 sound/soc/codecs/wm8994.c switch (event) { event 1288 sound/soc/codecs/wm8994.c wm8958_aif_ev(w, kcontrol, event); event 1294 sound/soc/codecs/wm8994.c struct snd_kcontrol *kcontrol, int event) event 1299 sound/soc/codecs/wm8994.c switch (event) { event 1322 sound/soc/codecs/wm8994.c struct snd_kcontrol *kcontrol, int event) event 1324 sound/soc/codecs/wm8994.c late_enable_ev(w, kcontrol, event); event 1329 sound/soc/codecs/wm8994.c struct snd_kcontrol *kcontrol, int event) event 1331 sound/soc/codecs/wm8994.c late_enable_ev(w, kcontrol, event); event 1336 sound/soc/codecs/wm8994.c struct snd_kcontrol *kcontrol, int event) event 1377 sound/soc/codecs/wm8994.c struct snd_kcontrol *kcontrol, int event) event 52 sound/soc/codecs/wm8994.h struct snd_kcontrol *kcontrol, int event); event 395 sound/soc/codecs/wm8995.c unsigned long event, void *data) \ event 399 sound/soc/codecs/wm8995.c if (event & REGULATOR_EVENT_DISABLE) { \ event 559 sound/soc/codecs/wm8995.c struct snd_kcontrol *kcontrol, int event) event 563 sound/soc/codecs/wm8995.c switch (event) { event 608 sound/soc/codecs/wm8995.c struct snd_kcontrol *kcontrol, int event) event 615 sound/soc/codecs/wm8995.c switch (event) { event 758 sound/soc/codecs/wm8995.c struct snd_kcontrol *kcontrol, int event) event 762 sound/soc/codecs/wm8995.c switch (event) { event 103 sound/soc/codecs/wm8996.c unsigned long event, void *data) \ event 107 sound/soc/codecs/wm8996.c if (event & REGULATOR_EVENT_DISABLE) { \ event 597 sound/soc/codecs/wm8996.c struct snd_kcontrol *kcontrol, int event) event 602 sound/soc/codecs/wm8996.c switch (event) { event 610 sound/soc/codecs/wm8996.c WARN(1, "Invalid event %d\n", event); event 618 sound/soc/codecs/wm8996.c struct snd_kcontrol *kcontrol, int event) event 620 sound/soc/codecs/wm8996.c switch (event) { event 625 sound/soc/codecs/wm8996.c WARN(1, "Invalid event %d\n", event); event 632 sound/soc/codecs/wm8996.c struct snd_kcontrol *kcontrol, int event) event 638 sound/soc/codecs/wm8996.c switch (event) { event 646 sound/soc/codecs/wm8996.c WARN(1, "Invalid event %d\n", event); event 686 sound/soc/codecs/wm8996.c enum snd_soc_dapm_type event, int subseq) event 756 sound/soc/codecs/wm8996.c struct snd_kcontrol *kcontrol, int event) event 761 sound/soc/codecs/wm8996.c switch (event) { event 766 sound/soc/codecs/wm8996.c WARN(1, "Invalid event %d\n", event); event 82 sound/soc/codecs/wm8997.c struct snd_kcontrol *kcontrol, int event) event 99 sound/soc/codecs/wm8997.c switch (event) { event 110 sound/soc/codecs/wm8997.c return arizona_clk_ev(w, kcontrol, event); event 115 sound/soc/codecs/wm8997.c return arizona_dvfs_sysclk_ev(w, kcontrol, event); event 39 sound/soc/codecs/wm8998.c int event) event 44 sound/soc/codecs/wm8998.c switch (event) { event 730 sound/soc/codecs/wm9081.c struct snd_kcontrol *kcontrol, int event) event 749 sound/soc/codecs/wm9081.c switch (event) { event 239 sound/soc/codecs/wm9090.c struct snd_kcontrol *kcontrol, int event) event 244 sound/soc/codecs/wm9090.c switch (event) { event 193 sound/soc/codecs/wm9713.c struct snd_kcontrol *kcontrol, int event) event 197 sound/soc/codecs/wm9713.c if (WARN_ON(event != SND_SOC_DAPM_PRE_PMD)) event 1368 sound/soc/codecs/wm_adsp.c unsigned int event) event 1380 sound/soc/codecs/wm_adsp.c ret = wm_coeff_write_acked_control(ctl, event); event 1384 sound/soc/codecs/wm_adsp.c event, ctl->alg_region.alg, ret); event 2700 sound/soc/codecs/wm_adsp.c int event) event 2713 sound/soc/codecs/wm_adsp.c switch (event) { event 3072 sound/soc/codecs/wm_adsp.c struct snd_kcontrol *kcontrol, int event) event 3079 sound/soc/codecs/wm_adsp.c switch (event) { event 3127 sound/soc/codecs/wm_adsp.c struct snd_kcontrol *kcontrol, int event) event 3134 sound/soc/codecs/wm_adsp.c switch (event) { event 148 sound/soc/codecs/wm_adsp.h .reg = SND_SOC_NOPM, .shift = num, .event = event_fn, \ event 152 sound/soc/codecs/wm_adsp.h .reg = SND_SOC_NOPM, .shift = num, .event = wm_adsp_event, \ event 169 sound/soc/codecs/wm_adsp.h struct snd_kcontrol *kcontrol, int event); event 172 sound/soc/codecs/wm_adsp.h struct snd_kcontrol *kcontrol, int event); event 179 sound/soc/codecs/wm_adsp.h struct snd_kcontrol *kcontrol, int event); event 496 sound/soc/codecs/wm_hubs.c struct snd_kcontrol *kcontrol, int event) event 501 sound/soc/codecs/wm_hubs.c switch (event) { event 538 sound/soc/codecs/wm_hubs.c struct snd_kcontrol *kcontrol, int event) event 543 sound/soc/codecs/wm_hubs.c switch (event) { event 590 sound/soc/codecs/wm_hubs.c struct snd_kcontrol *control, int event) event 595 sound/soc/codecs/wm_hubs.c switch (event) { event 607 sound/soc/codecs/wm_hubs.c WARN(1, "Invalid event %d\n", event); event 615 sound/soc/codecs/wm_hubs.c struct snd_kcontrol *control, int event) event 639 sound/soc/codecs/wm_hubs.c *flag = SND_SOC_DAPM_EVENT_ON(event); event 645 sound/soc/codecs/wm_hubs.c struct snd_kcontrol *kcontrol, int event) event 55 sound/soc/codecs/zx_aud96p22.c struct snd_kcontrol *kcontrol, int event) event 61 sound/soc/codecs/zx_aud96p22.c if (event != SND_SOC_DAPM_POST_PMU) event 72 sound/soc/codecs/zx_aud96p22.c struct snd_kcontrol *kcontrol, int event) event 78 sound/soc/codecs/zx_aud96p22.c if (event != SND_SOC_DAPM_POST_PMU) event 29 sound/soc/generic/audio-graph-card.c int event) event 34 sound/soc/generic/audio-graph-card.c switch (event) { event 518 sound/soc/intel/atom/sst-atom-controls.c struct snd_kcontrol *k, int event) event 520 sound/soc/intel/atom/sst-atom-controls.c if (SND_SOC_DAPM_EVENT_ON(event)) event 605 sound/soc/intel/atom/sst-atom-controls.c struct snd_kcontrol *k, int event) event 629 sound/soc/intel/atom/sst-atom-controls.c switch (event) { event 645 sound/soc/intel/atom/sst-atom-controls.c if (SND_SOC_DAPM_EVENT_ON(event) || event 646 sound/soc/intel/atom/sst-atom-controls.c event == SND_SOC_DAPM_POST_REG) event 960 sound/soc/intel/atom/sst-atom-controls.c struct snd_kcontrol *k, int event) event 968 sound/soc/intel/atom/sst-atom-controls.c if (SND_SOC_DAPM_EVENT_ON(event)) { event 980 sound/soc/intel/atom/sst-atom-controls.c struct snd_kcontrol *k, int event) event 992 sound/soc/intel/atom/sst-atom-controls.c if (SND_SOC_DAPM_EVENT_ON(event)) event 1011 sound/soc/intel/atom/sst-atom-controls.c if (SND_SOC_DAPM_EVENT_ON(event)) event 1017 sound/soc/intel/atom/sst-atom-controls.c struct snd_kcontrol *k, int event) event 1026 sound/soc/intel/atom/sst-atom-controls.c if (SND_SOC_DAPM_EVENT_ON(event)) event 1049 sound/soc/intel/atom/sst-atom-controls.c if (SND_SOC_DAPM_EVENT_ON(event)) event 592 sound/soc/intel/atom/sst-atom-controls.h .event = wevent, .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD, \ event 600 sound/soc/intel/atom/sst-atom-controls.h .event = wevent, .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD, \ event 608 sound/soc/intel/atom/sst-atom-controls.h .event = wevent, .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD, \ event 616 sound/soc/intel/atom/sst-atom-controls.h .event = wevent, .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD, \ event 624 sound/soc/intel/atom/sst-atom-controls.h .event = wevent, .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD, \ event 633 sound/soc/intel/atom/sst-atom-controls.h .event = wevent, .event_flags = wflags, \ event 641 sound/soc/intel/atom/sst-atom-controls.h .event = wevent, .event_flags = wflags, \ event 649 sound/soc/intel/atom/sst-atom-controls.h .event = wevent, .event_flags = wflags, \ event 655 sound/soc/intel/atom/sst-atom-controls.h #define SST_PATH_INPUT(name, task_id, loc_id, event) \ event 656 sound/soc/intel/atom/sst-atom-controls.h SST_PATH(name, task_id, loc_id, event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD) event 658 sound/soc/intel/atom/sst-atom-controls.h #define SST_PATH_LINKED_INPUT(name, task_id, loc_id, linked_wname, event) \ event 659 sound/soc/intel/atom/sst-atom-controls.h SST_LINKED_PATH(name, task_id, loc_id, linked_wname, event, \ event 662 sound/soc/intel/atom/sst-atom-controls.h #define SST_PATH_OUTPUT(name, task_id, loc_id, event) \ event 663 sound/soc/intel/atom/sst-atom-controls.h SST_PATH(name, task_id, loc_id, event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD) event 665 sound/soc/intel/atom/sst-atom-controls.h #define SST_PATH_LINKED_OUTPUT(name, task_id, loc_id, linked_wname, event) \ event 666 sound/soc/intel/atom/sst-atom-controls.h SST_LINKED_PATH(name, task_id, loc_id, linked_wname, event, \ event 669 sound/soc/intel/atom/sst-atom-controls.h #define SST_PATH_MEDIA_LOOP_OUTPUT(name, task_id, loc_id, format, event) \ event 670 sound/soc/intel/atom/sst-atom-controls.h SST_PATH_MEDIA_LOOP(name, task_id, loc_id, format, event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD) event 676 sound/soc/intel/atom/sst-atom-controls.h .event = wevent, .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD | \ event 31 sound/soc/intel/boards/bdw-rt5677.c struct snd_kcontrol *k, int event) event 37 sound/soc/intel/boards/bdw-rt5677.c if (SND_SOC_DAPM_EVENT_ON(event)) event 41 sound/soc/intel/boards/bdw-rt5677.c SND_SOC_DAPM_EVENT_ON(event)); event 55 sound/soc/intel/boards/bxt_da7219_max98357a.c struct snd_kcontrol *k, int event) event 68 sound/soc/intel/boards/bxt_da7219_max98357a.c if (SND_SOC_DAPM_EVENT_OFF(event)) { event 73 sound/soc/intel/boards/bxt_da7219_max98357a.c } else if(SND_SOC_DAPM_EVENT_ON(event)) { event 75 sound/soc/intel/boards/bytcht_es8316.c struct snd_kcontrol *kcontrol, int event) event 80 sound/soc/intel/boards/bytcht_es8316.c if (SND_SOC_DAPM_EVENT_ON(event)) event 222 sound/soc/intel/boards/bytcr_rt5640.c struct snd_kcontrol *k, int event) event 240 sound/soc/intel/boards/bytcr_rt5640.c if (SND_SOC_DAPM_EVENT_ON(event)) { event 172 sound/soc/intel/boards/bytcr_rt5651.c struct snd_kcontrol *k, int event) event 189 sound/soc/intel/boards/bytcr_rt5651.c if (SND_SOC_DAPM_EVENT_ON(event)) { event 222 sound/soc/intel/boards/bytcr_rt5651.c struct snd_kcontrol *kcontrol, int event) event 227 sound/soc/intel/boards/bytcr_rt5651.c if (SND_SOC_DAPM_EVENT_ON(event)) event 43 sound/soc/intel/boards/cht_bsw_max98090_ti.c struct snd_kcontrol *k, int event) event 61 sound/soc/intel/boards/cht_bsw_max98090_ti.c if (SND_SOC_DAPM_EVENT_ON(event)) { event 130 sound/soc/intel/boards/cht_bsw_max98090_ti.c unsigned long event, void *data) event 135 sound/soc/intel/boards/cht_bsw_max98090_ti.c if (event & SND_JACK_MICROPHONE) { event 68 sound/soc/intel/boards/cht_bsw_rt5645.c struct snd_kcontrol *k, int event) event 85 sound/soc/intel/boards/cht_bsw_rt5645.c if (SND_SOC_DAPM_EVENT_ON(event)) { event 49 sound/soc/intel/boards/cht_bsw_rt5672.c struct snd_kcontrol *k, int event) event 63 sound/soc/intel/boards/cht_bsw_rt5672.c if (SND_SOC_DAPM_EVENT_ON(event)) { event 54 sound/soc/intel/boards/kbl_da7219_max98357a.c struct snd_kcontrol *k, int event) event 67 sound/soc/intel/boards/kbl_da7219_max98357a.c if (SND_SOC_DAPM_EVENT_OFF(event)) { event 72 sound/soc/intel/boards/kbl_da7219_max98357a.c } else if (SND_SOC_DAPM_EVENT_ON(event)) { event 65 sound/soc/intel/boards/kbl_da7219_max98927.c struct snd_kcontrol *k, int event) event 86 sound/soc/intel/boards/kbl_da7219_max98927.c if (SND_SOC_DAPM_EVENT_OFF(event)) { event 91 sound/soc/intel/boards/kbl_da7219_max98927.c } else if (SND_SOC_DAPM_EVENT_ON(event)) { event 93 sound/soc/intel/boards/kbl_rt5660.c struct snd_kcontrol *k, int event) event 99 sound/soc/intel/boards/kbl_rt5660.c !(SND_SOC_DAPM_EVENT_ON(event))); event 70 sound/soc/intel/boards/kbl_rt5663_max98927.c struct snd_kcontrol *k, int event) event 82 sound/soc/intel/boards/kbl_rt5663_max98927.c switch (event) { event 50 sound/soc/intel/boards/skl_nau88l25_max98357a.c struct snd_kcontrol *k, int event) event 63 sound/soc/intel/boards/skl_nau88l25_max98357a.c if (SND_SOC_DAPM_EVENT_ON(event)) { event 60 sound/soc/intel/boards/skl_nau88l25_ssm4567.c struct snd_kcontrol *k, int event) event 73 sound/soc/intel/boards/skl_nau88l25_ssm4567.c if (SND_SOC_DAPM_EVENT_ON(event)) { event 1271 sound/soc/intel/skylake/skl-topology.c struct snd_kcontrol *k, int event) event 1276 sound/soc/intel/skylake/skl-topology.c switch (event) { event 1300 sound/soc/intel/skylake/skl-topology.c struct snd_kcontrol *k, int event) event 1306 sound/soc/intel/skylake/skl-topology.c switch (event) { event 132 sound/soc/mediatek/mt6797/mt6797-dai-adda.c int event) event 138 sound/soc/mediatek/mt6797/mt6797-dai-adda.c __func__, w->name, event); event 140 sound/soc/mediatek/mt6797/mt6797-dai-adda.c switch (event) { event 144 sound/soc/mediatek/mt8183/mt8183-dai-adda.c int event) event 151 sound/soc/mediatek/mt8183/mt8183-dai-adda.c __func__, w->name, event); event 153 sound/soc/mediatek/mt8183/mt8183-dai-adda.c switch (event) { event 273 sound/soc/mediatek/mt8183/mt8183-dai-i2s.c int event) event 279 sound/soc/mediatek/mt8183/mt8183-dai-i2s.c __func__, w->name, event); event 281 sound/soc/mediatek/mt8183/mt8183-dai-i2s.c switch (event) { event 303 sound/soc/mediatek/mt8183/mt8183-dai-i2s.c int event) event 310 sound/soc/mediatek/mt8183/mt8183-dai-i2s.c __func__, w->name, event); event 319 sound/soc/mediatek/mt8183/mt8183-dai-i2s.c switch (event) { event 248 sound/soc/mediatek/mt8183/mt8183-dai-tdm.c int event) event 256 sound/soc/mediatek/mt8183/mt8183-dai-tdm.c __func__, w->name, event); event 258 sound/soc/mediatek/mt8183/mt8183-dai-tdm.c switch (event) { event 274 sound/soc/mediatek/mt8183/mt8183-dai-tdm.c int event) event 282 sound/soc/mediatek/mt8183/mt8183-dai-tdm.c __func__, w->name, event); event 284 sound/soc/mediatek/mt8183/mt8183-dai-tdm.c switch (event) { event 227 sound/soc/meson/axg-tdm-formatter.c int event) event 233 sound/soc/meson/axg-tdm-formatter.c switch (event) { event 243 sound/soc/meson/axg-tdm-formatter.c dev_err(c->dev, "Unexpected event %d\n", event); event 43 sound/soc/meson/axg-tdm-formatter.h int event); event 200 sound/soc/pxa/corgi.c struct snd_kcontrol *k, int event) event 202 sound/soc/pxa/corgi.c gpio_set_value(CORGI_GPIO_APM_ON, SND_SOC_DAPM_EVENT_ON(event)); event 207 sound/soc/pxa/corgi.c struct snd_kcontrol *k, int event) event 209 sound/soc/pxa/corgi.c gpio_set_value(CORGI_GPIO_MIC_BIAS, SND_SOC_DAPM_EVENT_ON(event)); event 34 sound/soc/pxa/e740_wm9705.c struct snd_kcontrol *kcontrol, int event) event 36 sound/soc/pxa/e740_wm9705.c if (event & SND_SOC_DAPM_PRE_PMU) event 38 sound/soc/pxa/e740_wm9705.c else if (event & SND_SOC_DAPM_POST_PMD) event 47 sound/soc/pxa/e740_wm9705.c struct snd_kcontrol *kcontrol, int event) event 49 sound/soc/pxa/e740_wm9705.c if (event & SND_SOC_DAPM_PRE_PMU) event 51 sound/soc/pxa/e740_wm9705.c else if (event & SND_SOC_DAPM_POST_PMD) event 22 sound/soc/pxa/e750_wm9705.c struct snd_kcontrol *kcontrol, int event) event 24 sound/soc/pxa/e750_wm9705.c if (event & SND_SOC_DAPM_PRE_PMU) event 26 sound/soc/pxa/e750_wm9705.c else if (event & SND_SOC_DAPM_POST_PMD) event 33 sound/soc/pxa/e750_wm9705.c struct snd_kcontrol *kcontrol, int event) event 35 sound/soc/pxa/e750_wm9705.c if (event & SND_SOC_DAPM_PRE_PMU) event 37 sound/soc/pxa/e750_wm9705.c else if (event & SND_SOC_DAPM_POST_PMD) event 21 sound/soc/pxa/e800_wm9712.c struct snd_kcontrol *kcontrol, int event) event 23 sound/soc/pxa/e800_wm9712.c if (event & SND_SOC_DAPM_PRE_PMU) event 25 sound/soc/pxa/e800_wm9712.c else if (event & SND_SOC_DAPM_POST_PMD) event 32 sound/soc/pxa/e800_wm9712.c struct snd_kcontrol *kcontrol, int event) event 34 sound/soc/pxa/e800_wm9712.c if (event & SND_SOC_DAPM_PRE_PMU) event 36 sound/soc/pxa/e800_wm9712.c else if (event & SND_SOC_DAPM_POST_PMD) event 82 sound/soc/pxa/hx4700.c struct snd_kcontrol *k, int event) event 84 sound/soc/pxa/hx4700.c gpio_set_value(GPIO107_HX4700_SPK_nSD, !!SND_SOC_DAPM_EVENT_ON(event)); event 89 sound/soc/pxa/hx4700.c struct snd_kcontrol *k, int event) event 91 sound/soc/pxa/hx4700.c gpio_set_value(GPIO92_HX4700_HP_DRIVER, !!SND_SOC_DAPM_EVENT_ON(event)); event 228 sound/soc/pxa/magician.c struct snd_kcontrol *k, int event) event 230 sound/soc/pxa/magician.c gpio_set_value(EGPIO_MAGICIAN_SPK_POWER, SND_SOC_DAPM_EVENT_ON(event)); event 235 sound/soc/pxa/magician.c struct snd_kcontrol *k, int event) event 237 sound/soc/pxa/magician.c gpio_set_value(EGPIO_MAGICIAN_EP_POWER, SND_SOC_DAPM_EVENT_ON(event)); event 242 sound/soc/pxa/magician.c struct snd_kcontrol *k, int event) event 244 sound/soc/pxa/magician.c gpio_set_value(EGPIO_MAGICIAN_MIC_POWER, SND_SOC_DAPM_EVENT_ON(event)); event 69 sound/soc/pxa/mioa701_wm9713.c struct snd_kcontrol *kctl, int event) event 77 sound/soc/pxa/mioa701_wm9713.c return rear_amp_power(component, SND_SOC_DAPM_EVENT_ON(event)); event 174 sound/soc/pxa/poodle.c struct snd_kcontrol *k, int event) event 176 sound/soc/pxa/poodle.c if (SND_SOC_DAPM_EVENT_ON(event)) event 200 sound/soc/pxa/spitz.c struct snd_kcontrol *k, int event) event 202 sound/soc/pxa/spitz.c gpio_set_value_cansleep(spitz_mic_gpio, SND_SOC_DAPM_EVENT_ON(event)); event 129 sound/soc/pxa/tosa.c struct snd_kcontrol *k, int event) event 131 sound/soc/pxa/tosa.c gpio_set_value(TOSA_GPIO_L_MUTE, SND_SOC_DAPM_EVENT_ON(event) ? 1 : 0); event 34 sound/soc/rockchip/rk3288_hdmi_analog.c struct snd_kcontrol *k, int event) event 42 sound/soc/rockchip/rk3288_hdmi_analog.c SND_SOC_DAPM_EVENT_ON(event)); event 63 sound/soc/rockchip/rockchip_max98090.c static int rk_jack_event(struct notifier_block *nb, unsigned long event, event 69 sound/soc/rockchip/rockchip_max98090.c if (event & SND_JACK_MICROPHONE) { event 123 sound/soc/samsung/h1940_uda1380.c struct snd_kcontrol *kcontrol, int event) event 125 sound/soc/samsung/h1940_uda1380.c if (SND_SOC_DAPM_EVENT_ON(event)) event 176 sound/soc/samsung/littlemill.c struct snd_kcontrol *kcontrol, int event) event 186 sound/soc/samsung/littlemill.c switch (event) { event 189 sound/soc/samsung/neo1973_wm8753.c struct snd_kcontrol *k, int event) event 191 sound/soc/samsung/neo1973_wm8753.c gpio_set_value(S3C2410_GPJ(1), SND_SOC_DAPM_EVENT_OFF(event)); event 32 sound/soc/samsung/rx1950_uda1380.c struct snd_kcontrol *kcontrol, int event); event 138 sound/soc/samsung/rx1950_uda1380.c struct snd_kcontrol *kcontrol, int event) event 140 sound/soc/samsung/rx1950_uda1380.c if (SND_SOC_DAPM_EVENT_ON(event)) event 104 sound/soc/samsung/smartq_wm8987.c int event) event 108 sound/soc/samsung/smartq_wm8987.c gpiod_set_value(gpio, SND_SOC_DAPM_EVENT_OFF(event)); event 262 sound/soc/samsung/tm2_wm5110.c struct snd_kcontrol *kcontrol, int event) event 267 sound/soc/samsung/tm2_wm5110.c switch (event) { event 37 sound/soc/sh/siu.h __u32 event; /* SPB program starting conditions */ event 166 sound/soc/sh/siu_dai.c ydef[7] = fw->spbpar[idx].event; event 186 sound/soc/sh/siu_dai.c ydef[6] = fw->spbpar[idx].event; event 23 sound/soc/sirf/sirf-audio.c struct snd_kcontrol *ctrl, int event) event 28 sound/soc/sirf/sirf-audio.c int on = !SND_SOC_DAPM_EVENT_OFF(event); event 36 sound/soc/sirf/sirf-audio.c struct snd_kcontrol *ctrl, int event) event 41 sound/soc/sirf/sirf-audio.c int on = !SND_SOC_DAPM_EVENT_OFF(event); event 63 sound/soc/soc-component.c int event) event 66 sound/soc/soc-component.c return component->driver->stream_event(component, event); event 1337 sound/soc/soc-dapm.c struct snd_kcontrol *kcontrol, int event) event 1343 sound/soc/soc-dapm.c if (SND_SOC_DAPM_EVENT_ON(event)) { event 1371 sound/soc/soc-dapm.c struct snd_kcontrol *kcontrol, int event) event 1380 sound/soc/soc-dapm.c if (SND_SOC_DAPM_EVENT_ON(event)) event 1396 sound/soc/soc-dapm.c struct snd_kcontrol *kcontrol, int event) event 1403 sound/soc/soc-dapm.c if (SND_SOC_DAPM_EVENT_ON(event)) { event 1521 sound/soc/soc-dapm.c struct snd_soc_dapm_widget *w, int event) event 1526 sound/soc/soc-dapm.c switch (event) { event 1552 sound/soc/soc-dapm.c WARN(1, "Unknown event %d\n", event); event 1559 sound/soc/soc-dapm.c if (w->event && (w->event_flags & event)) { event 1563 sound/soc/soc-dapm.c trace_snd_soc_dapm_widget_event_start(w, event); event 1564 sound/soc/soc-dapm.c ret = w->event(w, NULL, event); event 1565 sound/soc/soc-dapm.c trace_snd_soc_dapm_widget_event_done(w, event); event 1632 sound/soc/soc-dapm.c struct list_head *list, int event, bool power_up) event 1678 sound/soc/soc-dapm.c if (!w->event) event 1682 sound/soc/soc-dapm.c if (event == SND_SOC_DAPM_STREAM_START) event 1683 sound/soc/soc-dapm.c ret = w->event(w, event 1685 sound/soc/soc-dapm.c else if (event == SND_SOC_DAPM_STREAM_STOP) event 1686 sound/soc/soc-dapm.c ret = w->event(w, event 1691 sound/soc/soc-dapm.c if (!w->event) event 1695 sound/soc/soc-dapm.c if (event == SND_SOC_DAPM_STREAM_START) event 1696 sound/soc/soc-dapm.c ret = w->event(w, event 1698 sound/soc/soc-dapm.c else if (event == SND_SOC_DAPM_STREAM_STOP) event 1699 sound/soc/soc-dapm.c ret = w->event(w, event 1750 sound/soc/soc-dapm.c if (w->event && (w->event_flags & SND_SOC_DAPM_PRE_REG)) { event 1751 sound/soc/soc-dapm.c ret = w->event(w, update->kcontrol, SND_SOC_DAPM_PRE_REG); event 1779 sound/soc/soc-dapm.c if (w->event && (w->event_flags & SND_SOC_DAPM_POST_REG)) { event 1780 sound/soc/soc-dapm.c ret = w->event(w, update->kcontrol, SND_SOC_DAPM_POST_REG); event 1952 sound/soc/soc-dapm.c static int dapm_power_widgets(struct snd_soc_card *card, int event) event 2058 sound/soc/soc-dapm.c dapm_seq_run(card, &down_list, event, false); event 2063 sound/soc/soc-dapm.c dapm_seq_run(card, &up_list, event, true); event 2080 sound/soc/soc-dapm.c ret = snd_soc_component_stream_event(d->component, event); event 3916 sound/soc/soc-dapm.c struct snd_kcontrol *kcontrol, int event) event 3927 sound/soc/soc-dapm.c switch (event) { event 3992 sound/soc/soc-dapm.c WARN(1, "Unknown event %d\n", event); event 4142 sound/soc/soc-dapm.c template.event = snd_soc_dai_link_event; event 4377 sound/soc/soc-dapm.c int event) event 4398 sound/soc/soc-dapm.c switch (event) { event 4434 sound/soc/soc-dapm.c int event) event 4439 sound/soc/soc-dapm.c soc_dapm_dai_stream_event(rtd->cpu_dai, stream, event); event 4441 sound/soc/soc-dapm.c soc_dapm_dai_stream_event(codec_dai, stream, event); event 4443 sound/soc/soc-dapm.c dapm_power_widgets(rtd->card, event); event 4458 sound/soc/soc-dapm.c int event) event 4463 sound/soc/soc-dapm.c soc_dapm_stream_event(rtd, stream, event); event 155 sound/soc/soc-pcm.c int event) event 164 sound/soc/soc-pcm.c be->dai_link->name, event, dir); event 166 sound/soc/soc-pcm.c if ((event == SND_SOC_DAPM_STREAM_STOP) && event 170 sound/soc/soc-pcm.c snd_soc_dapm_stream_event(be, dir, event); event 173 sound/soc/soc-pcm.c snd_soc_dapm_stream_event(fe, dir, event); event 666 sound/soc/soc-topology.c w->event = NULL; event 672 sound/soc/soc-topology.c w->event = events[i].event_handler; event 135 sound/soc/sof/topology.c struct snd_kcontrol *k, int event) event 147 sound/soc/sof/topology.c event, w->name); event 150 sound/soc/sof/topology.c switch (event) { event 1326 sound/soc/sunxi/sun4i-codec.c struct snd_kcontrol *k, int event) event 1331 sound/soc/sunxi/sun4i-codec.c !!SND_SOC_DAPM_EVENT_ON(event)); event 1333 sound/soc/sunxi/sun4i-codec.c if (SND_SOC_DAPM_EVENT_ON(event)) { event 345 sound/soc/sunxi/sun8i-codec-analog.c struct snd_kcontrol *k, int event) event 349 sound/soc/sunxi/sun8i-codec-analog.c if (SND_SOC_DAPM_EVENT_ON(event)) { event 359 sound/soc/sunxi/sun8i-codec-analog.c } else if (SND_SOC_DAPM_EVENT_OFF(event)) { event 70 sound/soc/tegra/tegra_rt5677.c struct snd_kcontrol *k, int event) event 80 sound/soc/tegra/tegra_rt5677.c SND_SOC_DAPM_EVENT_ON(event)); event 115 sound/soc/tegra/tegra_wm8903.c struct snd_kcontrol *k, int event) event 125 sound/soc/tegra/tegra_wm8903.c SND_SOC_DAPM_EVENT_ON(event)); event 131 sound/soc/tegra/tegra_wm8903.c struct snd_kcontrol *k, int event) event 141 sound/soc/tegra/tegra_wm8903.c !SND_SOC_DAPM_EVENT_ON(event)); event 30 sound/soc/ti/ams-delta.c struct snd_kcontrol *k, int event) event 32 sound/soc/ti/ams-delta.c gpiod_set_value_cansleep(handset_mute, !SND_SOC_DAPM_EVENT_ON(event)); event 37 sound/soc/ti/ams-delta.c struct snd_kcontrol *k, int event) event 39 sound/soc/ti/ams-delta.c gpiod_set_value_cansleep(handsfree_mute, !SND_SOC_DAPM_EVENT_ON(event)); event 187 sound/soc/ti/n810.c struct snd_kcontrol *k, int event) event 189 sound/soc/ti/n810.c if (SND_SOC_DAPM_EVENT_ON(event)) event 198 sound/soc/ti/n810.c struct snd_kcontrol *k, int event) event 200 sound/soc/ti/n810.c if (SND_SOC_DAPM_EVENT_ON(event)) event 66 sound/soc/ti/omap3pandora.c struct snd_kcontrol *k, int event) event 74 sound/soc/ti/omap3pandora.c if (SND_SOC_DAPM_EVENT_ON(event)) { event 92 sound/soc/ti/omap3pandora.c struct snd_kcontrol *k, int event) event 94 sound/soc/ti/omap3pandora.c if (SND_SOC_DAPM_EVENT_ON(event)) event 141 sound/soc/ti/rx51.c struct snd_kcontrol *k, int event) event 148 sound/soc/ti/rx51.c !!SND_SOC_DAPM_EVENT_ON(event)); event 1884 sound/sparc/cs4231.c static void snd_cs4231_ebus_play_callback(struct ebus_dma_info *p, int event, event 1893 sound/sparc/cs4231.c int event, void *cookie) event 30 sound/synth/emux/emux_oss.c int cmd, unsigned char *event, int atomic, int hop); event 32 sound/synth/emux/emux_oss.c int cmd, unsigned char *event, int atomic, int hop); event 321 sound/synth/emux/emux_oss.c unsigned char *event, int atomic, int hop) event 329 sound/synth/emux/emux_oss.c voice = event[3]; event 335 sound/synth/emux/emux_oss.c p1 = *(unsigned short *) &event[4]; event 336 sound/synth/emux/emux_oss.c p2 = *(short *) &event[6]; event 387 sound/synth/emux/emux_oss.c port->drum_flags = *(unsigned int*)&event[4]; event 416 sound/synth/emux/emux_oss.c unsigned char *event, int atomic, int hop) event 427 sound/synth/emux/emux_oss.c voice = event[3]; event 433 sound/synth/emux/emux_oss.c p1 = *(unsigned short *) &event[4]; event 434 sound/synth/emux/emux_oss.c plong = *(int*) &event[4]; event 114 sound/xen/xen_snd_front_evtchnl.c struct xensnd_evt *event; event 116 sound/xen/xen_snd_front_evtchnl.c event = &XENSND_IN_RING_REF(page, cons); event 117 sound/xen/xen_snd_front_evtchnl.c if (unlikely(event->id != channel->evt_id++)) event 120 sound/xen/xen_snd_front_evtchnl.c switch (event->type) { event 123 sound/xen/xen_snd_front_evtchnl.c event->op.cur_pos.position); event 66 tools/bpf/bpftool/map_perf_ring.c print_bpf_output(void *private_data, int cpu, struct perf_event_header *event) event 68 tools/bpf/bpftool/map_perf_ring.c struct perf_event_sample *e = container_of(event, event 71 tools/bpf/bpftool/map_perf_ring.c struct perf_event_lost *lost = container_of(event, event 79 tools/gpio/gpio-event-mon.c struct gpioevent_data event; event 81 tools/gpio/gpio-event-mon.c ret = read(req.fd, &event, sizeof(event)); event 94 tools/gpio/gpio-event-mon.c if (ret != sizeof(event)) { event 99 tools/gpio/gpio-event-mon.c fprintf(stdout, "GPIO EVENT %llu: ", event.timestamp); event 100 tools/gpio/gpio-event-mon.c switch (event.id) { event 124 tools/iio/iio_event_monitor.c static bool event_is_known(struct iio_event_data *event) event 126 tools/iio/iio_event_monitor.c enum iio_chan_type type = IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event->id); event 127 tools/iio/iio_event_monitor.c enum iio_modifier mod = IIO_EVENT_CODE_EXTRACT_MODIFIER(event->id); event 128 tools/iio/iio_event_monitor.c enum iio_event_type ev_type = IIO_EVENT_CODE_EXTRACT_TYPE(event->id); event 129 tools/iio/iio_event_monitor.c enum iio_event_direction dir = IIO_EVENT_CODE_EXTRACT_DIR(event->id); event 244 tools/iio/iio_event_monitor.c static void print_event(struct iio_event_data *event) event 246 tools/iio/iio_event_monitor.c enum iio_chan_type type = IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event->id); event 247 tools/iio/iio_event_monitor.c enum iio_modifier mod = IIO_EVENT_CODE_EXTRACT_MODIFIER(event->id); event 248 tools/iio/iio_event_monitor.c enum iio_event_type ev_type = IIO_EVENT_CODE_EXTRACT_TYPE(event->id); event 249 tools/iio/iio_event_monitor.c enum iio_event_direction dir = IIO_EVENT_CODE_EXTRACT_DIR(event->id); event 250 tools/iio/iio_event_monitor.c int chan = IIO_EVENT_CODE_EXTRACT_CHAN(event->id); event 251 tools/iio/iio_event_monitor.c int chan2 = IIO_EVENT_CODE_EXTRACT_CHAN2(event->id); event 252 tools/iio/iio_event_monitor.c bool diff = IIO_EVENT_CODE_EXTRACT_DIFF(event->id); event 254 tools/iio/iio_event_monitor.c if (!event_is_known(event)) { event 256 tools/iio/iio_event_monitor.c event->timestamp, event->id); event 261 tools/iio/iio_event_monitor.c printf("Event: time: %lld, type: %s", event->timestamp, event 283 tools/iio/iio_event_monitor.c struct iio_event_data event; event 341 tools/iio/iio_event_monitor.c ret = read(event_fd, &event, sizeof(event)); event 353 tools/iio/iio_event_monitor.c if (ret != sizeof(event)) { event 359 tools/iio/iio_event_monitor.c print_event(&event); event 814 tools/include/uapi/sound/asound.h int event; event 86 tools/lib/traceevent/event-parse-local.h void tep_free_event(struct tep_event *event); event 45 tools/lib/traceevent/event-parse.c #define do_warning_event(event, fmt, ...) \ event 50 tools/lib/traceevent/event-parse.c if (event) \ event 51 tools/lib/traceevent/event-parse.c warning("[%s:%s] " fmt, event->system, \ event 52 tools/lib/traceevent/event-parse.c event->name, ##__VA_ARGS__); \ event 99 tools/lib/traceevent/event-parse.c struct tep_event *event, struct tep_print_arg *arg); event 817 tools/lib/traceevent/event-parse.c static int add_event(struct tep_handle *tep, struct tep_event *event) event 820 tools/lib/traceevent/event-parse.c struct tep_event **events = realloc(tep->events, sizeof(event) * event 828 tools/lib/traceevent/event-parse.c if (tep->events[i]->id > event->id) event 834 tools/lib/traceevent/event-parse.c sizeof(event) * (tep->nr_events - i)); event 836 tools/lib/traceevent/event-parse.c tep->events[i] = event; event 839 tools/lib/traceevent/event-parse.c event->tep = tep; event 1428 tools/lib/traceevent/event-parse.c static int event_read_fields(struct tep_event *event, struct tep_format_field **fields) event 1456 tools/lib/traceevent/event-parse.c if (event->flags & TEP_EVENT_FL_ISFTRACE && event 1475 tools/lib/traceevent/event-parse.c field->event = event; event 1486 tools/lib/traceevent/event-parse.c (event->flags & TEP_EVENT_FL_ISFTRACE && event 1515 tools/lib/traceevent/event-parse.c do_warning_event(event, "%s: no type found", __func__); event 1562 tools/lib/traceevent/event-parse.c do_warning_event(event, "failed to find token"); event 1691 tools/lib/traceevent/event-parse.c field->elementsize = event->tep ? event 1692 tools/lib/traceevent/event-parse.c event->tep->long_size : event 1715 tools/lib/traceevent/event-parse.c static int event_read_format(struct tep_event *event) event 1730 tools/lib/traceevent/event-parse.c ret = event_read_fields(event, &event->format.common_fields); event 1733 tools/lib/traceevent/event-parse.c event->format.nr_common = ret; event 1735 tools/lib/traceevent/event-parse.c ret = event_read_fields(event, &event->format.fields); event 1738 tools/lib/traceevent/event-parse.c event->format.nr_fields = ret; event 1748 tools/lib/traceevent/event-parse.c process_arg_token(struct tep_event *event, struct tep_print_arg *arg, event 1752 tools/lib/traceevent/event-parse.c process_arg(struct tep_event *event, struct tep_print_arg *arg, char **tok) event 1760 tools/lib/traceevent/event-parse.c return process_arg_token(event, arg, tok, type); event 1764 tools/lib/traceevent/event-parse.c process_op(struct tep_event *event, struct tep_print_arg *arg, char **tok); event 1771 tools/lib/traceevent/event-parse.c process_field_arg(struct tep_event *event, struct tep_print_arg *arg, char **tok) event 1775 tools/lib/traceevent/event-parse.c type = process_arg(event, arg, tok); event 1778 tools/lib/traceevent/event-parse.c type = process_op(event, arg, tok); event 1785 tools/lib/traceevent/event-parse.c process_cond(struct tep_event *event, struct tep_print_arg *top, char **tok) event 1796 tools/lib/traceevent/event-parse.c do_warning_event(event, "%s: not enough memory!", __func__); event 1808 tools/lib/traceevent/event-parse.c type = process_arg(event, left, &token); event 1816 tools/lib/traceevent/event-parse.c type = process_op(event, left, &token); event 1825 tools/lib/traceevent/event-parse.c type = process_arg(event, right, &token); event 1841 tools/lib/traceevent/event-parse.c process_array(struct tep_event *event, struct tep_print_arg *top, char **tok) event 1849 tools/lib/traceevent/event-parse.c do_warning_event(event, "%s: not enough memory!", __func__); event 1856 tools/lib/traceevent/event-parse.c type = process_arg(event, arg, &token); event 1943 tools/lib/traceevent/event-parse.c process_op(struct tep_event *event, struct tep_print_arg *arg, char **tok) event 1955 tools/lib/traceevent/event-parse.c do_warning_event(event, "bad op token %s", token); event 1965 tools/lib/traceevent/event-parse.c do_warning_event(event, "bad op token %s", token); event 1986 tools/lib/traceevent/event-parse.c type = process_arg(event, right, tok); event 2003 tools/lib/traceevent/event-parse.c type = process_cond(event, arg, tok); event 2037 tools/lib/traceevent/event-parse.c event->flags |= TEP_EVENT_FL_FAILED; event 2052 tools/lib/traceevent/event-parse.c do_warning_event(event, "bad pointer type"); event 2073 tools/lib/traceevent/event-parse.c type = process_arg_token(event, right, tok, type); event 2112 tools/lib/traceevent/event-parse.c type = process_array(event, arg, tok); event 2115 tools/lib/traceevent/event-parse.c do_warning_event(event, "unknown op '%s'", token); event 2116 tools/lib/traceevent/event-parse.c event->flags |= TEP_EVENT_FL_FAILED; event 2128 tools/lib/traceevent/event-parse.c return process_op(event, arg, tok); event 2130 tools/lib/traceevent/event-parse.c return process_op(event, right, tok); event 2136 tools/lib/traceevent/event-parse.c do_warning_event(event, "%s: not enough memory!", __func__); event 2144 tools/lib/traceevent/event-parse.c process_entry(struct tep_event *event __maybe_unused, struct tep_print_arg *arg, event 2162 tools/lib/traceevent/event-parse.c arg->field.field = tep_find_any_field(event, arg->field.name); event 2166 tools/lib/traceevent/event-parse.c arg->field.field = tep_find_any_field(event, arg->field.name); event 2183 tools/lib/traceevent/event-parse.c static int alloc_and_process_delim(struct tep_event *event, char *next_token, event 2193 tools/lib/traceevent/event-parse.c do_warning_event(event, "%s: not enough memory!", __func__); event 2198 tools/lib/traceevent/event-parse.c type = process_arg(event, field, &token); event 2518 tools/lib/traceevent/event-parse.c process_fields(struct tep_event *event, struct tep_print_flag_sym **list, char **tok) event 2537 tools/lib/traceevent/event-parse.c type = process_arg(event, arg, &token); event 2540 tools/lib/traceevent/event-parse.c type = process_op(event, arg, &token); event 2565 tools/lib/traceevent/event-parse.c type = process_arg(event, arg, &token); event 2599 tools/lib/traceevent/event-parse.c process_flags(struct tep_event *event, struct tep_print_arg *arg, char **tok) event 2610 tools/lib/traceevent/event-parse.c do_warning_event(event, "%s: not enough memory!", __func__); event 2614 tools/lib/traceevent/event-parse.c type = process_field_arg(event, field, &token); event 2618 tools/lib/traceevent/event-parse.c type = process_op(event, field, &token); event 2635 tools/lib/traceevent/event-parse.c type = process_fields(event, &arg->flags.flags, &token); event 2652 tools/lib/traceevent/event-parse.c process_symbols(struct tep_event *event, struct tep_print_arg *arg, char **tok) event 2663 tools/lib/traceevent/event-parse.c do_warning_event(event, "%s: not enough memory!", __func__); event 2667 tools/lib/traceevent/event-parse.c type = process_field_arg(event, field, &token); event 2674 tools/lib/traceevent/event-parse.c type = process_fields(event, &arg->symbol.symbols, &token); event 2691 tools/lib/traceevent/event-parse.c process_hex_common(struct tep_event *event, struct tep_print_arg *arg, event 2697 tools/lib/traceevent/event-parse.c if (alloc_and_process_delim(event, ",", &arg->hex.field)) event 2700 tools/lib/traceevent/event-parse.c if (alloc_and_process_delim(event, ")", &arg->hex.size)) event 2714 tools/lib/traceevent/event-parse.c process_hex(struct tep_event *event, struct tep_print_arg *arg, char **tok) event 2716 tools/lib/traceevent/event-parse.c return process_hex_common(event, arg, tok, TEP_PRINT_HEX); event 2720 tools/lib/traceevent/event-parse.c process_hex_str(struct tep_event *event, struct tep_print_arg *arg, event 2723 tools/lib/traceevent/event-parse.c return process_hex_common(event, arg, tok, TEP_PRINT_HEX_STR); event 2727 tools/lib/traceevent/event-parse.c process_int_array(struct tep_event *event, struct tep_print_arg *arg, char **tok) event 2732 tools/lib/traceevent/event-parse.c if (alloc_and_process_delim(event, ",", &arg->int_array.field)) event 2735 tools/lib/traceevent/event-parse.c if (alloc_and_process_delim(event, ",", &arg->int_array.count)) event 2738 tools/lib/traceevent/event-parse.c if (alloc_and_process_delim(event, ")", &arg->int_array.el_size)) event 2755 tools/lib/traceevent/event-parse.c process_dynamic_array(struct tep_event *event, struct tep_print_arg *arg, char **tok) event 2775 tools/lib/traceevent/event-parse.c field = tep_find_field(event, token); event 2794 tools/lib/traceevent/event-parse.c do_warning_event(event, "%s: not enough memory!", __func__); event 2799 tools/lib/traceevent/event-parse.c type = process_arg(event, arg, &token); event 2819 tools/lib/traceevent/event-parse.c process_dynamic_array_len(struct tep_event *event, struct tep_print_arg *arg, event 2832 tools/lib/traceevent/event-parse.c field = tep_find_field(event, token); event 2855 tools/lib/traceevent/event-parse.c process_paren(struct tep_event *event, struct tep_print_arg *arg, char **tok) event 2861 tools/lib/traceevent/event-parse.c type = process_arg(event, arg, &token); event 2867 tools/lib/traceevent/event-parse.c type = process_op(event, arg, &token); event 2889 tools/lib/traceevent/event-parse.c do_warning_event(event, "previous needed to be TEP_PRINT_ATOM"); event 2895 tools/lib/traceevent/event-parse.c do_warning_event(event, "%s: not enough memory!", event 2903 tools/lib/traceevent/event-parse.c type = process_arg_token(event, item_arg, &token, type); event 2918 tools/lib/traceevent/event-parse.c process_str(struct tep_event *event __maybe_unused, struct tep_print_arg *arg, event 2947 tools/lib/traceevent/event-parse.c process_bitmask(struct tep_event *event __maybe_unused, struct tep_print_arg *arg, event 3008 tools/lib/traceevent/event-parse.c process_func_handler(struct tep_event *event, struct tep_function_handler *func, event 3026 tools/lib/traceevent/event-parse.c do_warning_event(event, "%s: not enough memory!", event 3031 tools/lib/traceevent/event-parse.c type = process_arg(event, farg, &token); event 3034 tools/lib/traceevent/event-parse.c do_warning_event(event, event 3037 tools/lib/traceevent/event-parse.c event->name, i + 1); event 3042 tools/lib/traceevent/event-parse.c do_warning_event(event, event 3044 tools/lib/traceevent/event-parse.c func->name, func->nr_args, event->name); event 3066 tools/lib/traceevent/event-parse.c process_function(struct tep_event *event, struct tep_print_arg *arg, event 3074 tools/lib/traceevent/event-parse.c return process_flags(event, arg, tok); event 3079 tools/lib/traceevent/event-parse.c return process_symbols(event, arg, tok); event 3083 tools/lib/traceevent/event-parse.c return process_hex(event, arg, tok); event 3087 tools/lib/traceevent/event-parse.c return process_hex_str(event, arg, tok); event 3091 tools/lib/traceevent/event-parse.c return process_int_array(event, arg, tok); event 3095 tools/lib/traceevent/event-parse.c return process_str(event, arg, tok); event 3099 tools/lib/traceevent/event-parse.c return process_bitmask(event, arg, tok); event 3103 tools/lib/traceevent/event-parse.c return process_dynamic_array(event, arg, tok); event 3107 tools/lib/traceevent/event-parse.c return process_dynamic_array_len(event, arg, tok); event 3110 tools/lib/traceevent/event-parse.c func = find_func_handler(event->tep, token); event 3113 tools/lib/traceevent/event-parse.c return process_func_handler(event, func, arg, tok); event 3116 tools/lib/traceevent/event-parse.c do_warning_event(event, "function %s not defined", token); event 3122 tools/lib/traceevent/event-parse.c process_arg_token(struct tep_event *event, struct tep_print_arg *arg, event 3134 tools/lib/traceevent/event-parse.c type = process_entry(event, arg, &token); event 3149 tools/lib/traceevent/event-parse.c type = process_function(event, arg, atom, &token); event 3183 tools/lib/traceevent/event-parse.c type = process_paren(event, arg, &token); event 3191 tools/lib/traceevent/event-parse.c type = process_op(event, arg, &token); event 3202 tools/lib/traceevent/event-parse.c do_warning_event(event, "unexpected type %d", type); event 3210 tools/lib/traceevent/event-parse.c static int event_read_print_args(struct tep_event *event, struct tep_print_arg **list) event 3225 tools/lib/traceevent/event-parse.c do_warning_event(event, "%s: not enough memory!", event 3230 tools/lib/traceevent/event-parse.c type = process_arg(event, arg, &token); event 3242 tools/lib/traceevent/event-parse.c type = process_op(event, arg, &token); event 3268 tools/lib/traceevent/event-parse.c static int event_read_print(struct tep_event *event) event 3287 tools/lib/traceevent/event-parse.c event->print_fmt.format = token; event 3288 tools/lib/traceevent/event-parse.c event->print_fmt.args = NULL; event 3300 tools/lib/traceevent/event-parse.c if (asprintf(&cat, "%s%s", event->print_fmt.format, token) < 0) event 3303 tools/lib/traceevent/event-parse.c free_token(event->print_fmt.format); event 3304 tools/lib/traceevent/event-parse.c event->print_fmt.format = NULL; event 3314 tools/lib/traceevent/event-parse.c ret = event_read_print_args(event, &event->print_fmt.args); event 3334 tools/lib/traceevent/event-parse.c tep_find_common_field(struct tep_event *event, const char *name) event 3338 tools/lib/traceevent/event-parse.c for (format = event->format.common_fields; event 3356 tools/lib/traceevent/event-parse.c tep_find_field(struct tep_event *event, const char *name) event 3360 tools/lib/traceevent/event-parse.c for (format = event->format.fields; event 3379 tools/lib/traceevent/event-parse.c tep_find_any_field(struct tep_event *event, const char *name) event 3383 tools/lib/traceevent/event-parse.c format = tep_find_common_field(event, name); event 3386 tools/lib/traceevent/event-parse.c return tep_find_field(event, name); event 3440 tools/lib/traceevent/event-parse.c *value = tep_read_number(field->event->tep, event 3451 tools/lib/traceevent/event-parse.c struct tep_event *event; event 3463 tools/lib/traceevent/event-parse.c event = tep->events[0]; event 3464 tools/lib/traceevent/event-parse.c field = tep_find_common_field(event, type); event 3574 tools/lib/traceevent/event-parse.c struct tep_event *event = NULL; event 3583 tools/lib/traceevent/event-parse.c event = tep->events[i]; event 3584 tools/lib/traceevent/event-parse.c if (strcmp(event->name, name) == 0) { event 3587 tools/lib/traceevent/event-parse.c if (strcmp(event->system, sys) == 0) event 3592 tools/lib/traceevent/event-parse.c event = NULL; event 3594 tools/lib/traceevent/event-parse.c tep->last_event = event; event 3595 tools/lib/traceevent/event-parse.c return event; event 3599 tools/lib/traceevent/event-parse.c eval_num_arg(void *data, int size, struct tep_event *event, struct tep_print_arg *arg) event 3601 tools/lib/traceevent/event-parse.c struct tep_handle *tep = event->tep; event 3617 tools/lib/traceevent/event-parse.c arg->field.field = tep_find_any_field(event, arg->field.name); event 3633 tools/lib/traceevent/event-parse.c val = eval_num_arg(data, size, event, arg->typecast.item); event 3642 tools/lib/traceevent/event-parse.c val = process_defined_func(&s, data, size, event, arg); event 3652 tools/lib/traceevent/event-parse.c right = eval_num_arg(data, size, event, arg->op.right); event 3683 tools/lib/traceevent/event-parse.c tep_find_any_field(event, larg->field.name); event 3702 tools/lib/traceevent/event-parse.c left = eval_num_arg(data, size, event, arg->op.left); event 3705 tools/lib/traceevent/event-parse.c val = eval_num_arg(data, size, event, arg->op.left); event 3707 tools/lib/traceevent/event-parse.c val = eval_num_arg(data, size, event, arg->op.right); event 3711 tools/lib/traceevent/event-parse.c left = eval_num_arg(data, size, event, arg->op.left); event 3712 tools/lib/traceevent/event-parse.c right = eval_num_arg(data, size, event, arg->op.right); event 3826 tools/lib/traceevent/event-parse.c do_warning_event(event, "%s: unknown op '%s'", __func__, arg->op.op); event 3830 tools/lib/traceevent/event-parse.c do_warning_event(event, "%s: field %s not found", event 3939 tools/lib/traceevent/event-parse.c struct tep_event *event, const char *format, event 3942 tools/lib/traceevent/event-parse.c struct tep_handle *tep = event->tep; event 3963 tools/lib/traceevent/event-parse.c field = tep_find_any_field(event, arg->field.name); event 4008 tools/lib/traceevent/event-parse.c do_warning_event(event, "%s: not enough memory!", event 4018 tools/lib/traceevent/event-parse.c val = eval_num_arg(data, size, event, arg->flags.field); event 4041 tools/lib/traceevent/event-parse.c val = eval_num_arg(data, size, event, arg->symbol.field); event 4064 tools/lib/traceevent/event-parse.c field = tep_find_any_field(event, str); event 4071 tools/lib/traceevent/event-parse.c len = eval_num_arg(data, size, event, arg->hex.size); event 4095 tools/lib/traceevent/event-parse.c field = tep_find_any_field(event, str); event 4102 tools/lib/traceevent/event-parse.c len = eval_num_arg(data, size, event, arg->int_array.count); event 4103 tools/lib/traceevent/event-parse.c el_size = eval_num_arg(data, size, event, event 4135 tools/lib/traceevent/event-parse.c f = tep_find_any_field(event, arg->string.string); event 4153 tools/lib/traceevent/event-parse.c f = tep_find_any_field(event, arg->bitmask.bitmask); event 4169 tools/lib/traceevent/event-parse.c val = eval_num_arg(data, size, event, arg->op.left); event 4171 tools/lib/traceevent/event-parse.c print_str_arg(s, data, size, event, event 4174 tools/lib/traceevent/event-parse.c print_str_arg(s, data, size, event, event 4178 tools/lib/traceevent/event-parse.c process_defined_func(s, data, size, event, arg); event 4188 tools/lib/traceevent/event-parse.c do_warning_event(event, "%s: field %s not found", event 4194 tools/lib/traceevent/event-parse.c struct tep_event *event, struct tep_print_arg *arg) event 4226 tools/lib/traceevent/event-parse.c args[i] = eval_num_arg(data, size, event, farg); event 4230 tools/lib/traceevent/event-parse.c print_str_arg(&str, data, size, event, "%s", -1, farg); event 4234 tools/lib/traceevent/event-parse.c do_warning_event(event, "%s(%d): malloc str", event 4242 tools/lib/traceevent/event-parse.c do_warning_event(event, "%s(%d): malloc str", event 4255 tools/lib/traceevent/event-parse.c do_warning_event(event, "Unexpected end of arguments\n"); event 4289 tools/lib/traceevent/event-parse.c static struct tep_print_arg *make_bprint_args(char *fmt, void *data, int size, struct tep_event *event) event 4291 tools/lib/traceevent/event-parse.c struct tep_handle *tep = event->tep; event 4303 tools/lib/traceevent/event-parse.c field = tep_find_field(event, "buf"); event 4305 tools/lib/traceevent/event-parse.c do_warning_event(event, "can't find buffer field for binary printk"); event 4308 tools/lib/traceevent/event-parse.c ip_field = tep_find_field(event, "ip"); event 4310 tools/lib/traceevent/event-parse.c do_warning_event(event, "can't find ip field for binary printk"); event 4324 tools/lib/traceevent/event-parse.c do_warning_event(event, "%s(%d): not enough memory!", event 4426 tools/lib/traceevent/event-parse.c do_warning_event(event, "%s(%d): not enough memory!", event 4450 tools/lib/traceevent/event-parse.c do_warning_event(event, "%s(%d): not enough memory!", event 4477 tools/lib/traceevent/event-parse.c struct tep_event *event) event 4479 tools/lib/traceevent/event-parse.c struct tep_handle *tep = event->tep; event 4488 tools/lib/traceevent/event-parse.c field = tep_find_field(event, "fmt"); event 4490 tools/lib/traceevent/event-parse.c do_warning_event(event, "can't find format field for binary printk"); event 4512 tools/lib/traceevent/event-parse.c struct tep_event *event, struct tep_print_arg *arg) event 4518 tools/lib/traceevent/event-parse.c process_defined_func(s, data, size, event, arg); event 4532 tools/lib/traceevent/event-parse.c tep_find_any_field(event, arg->field.name); event 4534 tools/lib/traceevent/event-parse.c do_warning_event(event, "%s: field %s not found", event 4665 tools/lib/traceevent/event-parse.c void *data, int size, struct tep_event *event, event 4671 tools/lib/traceevent/event-parse.c process_defined_func(s, data, size, event, arg); event 4682 tools/lib/traceevent/event-parse.c tep_find_any_field(event, arg->field.name); event 4702 tools/lib/traceevent/event-parse.c void *data, int size, struct tep_event *event, event 4717 tools/lib/traceevent/event-parse.c process_defined_func(s, data, size, event, arg); event 4728 tools/lib/traceevent/event-parse.c tep_find_any_field(event, arg->field.name); event 4752 tools/lib/traceevent/event-parse.c void *data, int size, struct tep_event *event, event 4775 tools/lib/traceevent/event-parse.c process_defined_func(s, data, size, event, arg); event 4786 tools/lib/traceevent/event-parse.c tep_find_any_field(event, arg->field.name); event 4834 tools/lib/traceevent/event-parse.c void *data, int size, struct tep_event *event, event 4850 tools/lib/traceevent/event-parse.c rc += print_ipv4_arg(s, ptr, i, data, size, event, arg); event 4853 tools/lib/traceevent/event-parse.c rc += print_ipv6_arg(s, ptr, i, data, size, event, arg); event 4856 tools/lib/traceevent/event-parse.c rc += print_ipsa_arg(s, ptr, i, data, size, event, arg); event 4880 tools/lib/traceevent/event-parse.c struct tep_handle *tep = field->event->tep; event 4941 tools/lib/traceevent/event-parse.c int size __maybe_unused, struct tep_event *event) event 4945 tools/lib/traceevent/event-parse.c field = event->format.fields; event 4953 tools/lib/traceevent/event-parse.c static void pretty_print(struct trace_seq *s, void *data, int size, struct tep_event *event) event 4955 tools/lib/traceevent/event-parse.c struct tep_handle *tep = event->tep; event 4956 tools/lib/traceevent/event-parse.c struct tep_print_fmt *print_fmt = &event->print_fmt; event 4972 tools/lib/traceevent/event-parse.c if (event->flags & TEP_EVENT_FL_FAILED) { event 4974 tools/lib/traceevent/event-parse.c tep_print_fields(s, data, size, event); event 4978 tools/lib/traceevent/event-parse.c if (event->flags & TEP_EVENT_FL_ISBPRINT) { event 4979 tools/lib/traceevent/event-parse.c bprint_fmt = get_bprint_format(data, size, event); event 4980 tools/lib/traceevent/event-parse.c args = make_bprint_args(bprint_fmt, data, size, event); event 5032 tools/lib/traceevent/event-parse.c do_warning_event(event, "no argument match"); event 5033 tools/lib/traceevent/event-parse.c event->flags |= TEP_EVENT_FL_FAILED; event 5036 tools/lib/traceevent/event-parse.c len_arg = eval_num_arg(data, size, event, arg); event 5065 tools/lib/traceevent/event-parse.c print_mac_arg(s, *ptr, data, size, event, arg); event 5071 tools/lib/traceevent/event-parse.c n = print_ip_arg(s, ptr, data, size, event, arg); event 5086 tools/lib/traceevent/event-parse.c do_warning_event(event, "no argument match"); event 5087 tools/lib/traceevent/event-parse.c event->flags |= TEP_EVENT_FL_FAILED; event 5096 tools/lib/traceevent/event-parse.c do_warning_event(event, "bad format!"); event 5097 tools/lib/traceevent/event-parse.c event->flags |= TEP_EVENT_FL_FAILED; event 5104 tools/lib/traceevent/event-parse.c val = eval_num_arg(data, size, event, arg); event 5162 tools/lib/traceevent/event-parse.c do_warning_event(event, "bad count (%d)", ls); event 5163 tools/lib/traceevent/event-parse.c event->flags |= TEP_EVENT_FL_FAILED; event 5168 tools/lib/traceevent/event-parse.c do_warning_event(event, "no matching argument"); event 5169 tools/lib/traceevent/event-parse.c event->flags |= TEP_EVENT_FL_FAILED; event 5178 tools/lib/traceevent/event-parse.c do_warning_event(event, "bad format!"); event 5179 tools/lib/traceevent/event-parse.c event->flags |= TEP_EVENT_FL_FAILED; event 5189 tools/lib/traceevent/event-parse.c print_str_arg(&p, data, size, event, event 5204 tools/lib/traceevent/event-parse.c if (event->flags & TEP_EVENT_FL_FAILED) { event 5465 tools/lib/traceevent/event-parse.c struct tep_event *event, struct tep_record *record) event 5469 tools/lib/traceevent/event-parse.c if (raw || (event->flags & TEP_EVENT_FL_PRINTRAW)) event 5470 tools/lib/traceevent/event-parse.c tep_print_fields(s, record->data, record->size, event); event 5473 tools/lib/traceevent/event-parse.c if (event->handler && !(event->flags & TEP_EVENT_FL_NOHANDLE)) event 5474 tools/lib/traceevent/event-parse.c print_pretty = event->handler(s, record, event, event 5475 tools/lib/traceevent/event-parse.c event->context); event 5478 tools/lib/traceevent/event-parse.c pretty_print(s, record->data, record->size, event); event 5515 tools/lib/traceevent/event-parse.c char *format, struct tep_event *event, event 5554 tools/lib/traceevent/event-parse.c struct tep_record *record, struct tep_event *event, event 5567 tools/lib/traceevent/event-parse.c print_event_info(s, type->format, true, event, record); event 5569 tools/lib/traceevent/event-parse.c print_event_info(s, type->format, false, event, record); event 5571 tools/lib/traceevent/event-parse.c trace_seq_printf(s, type->format, event->name); event 5579 tools/lib/traceevent/event-parse.c struct tep_record *record, struct tep_event *event, event 5592 tools/lib/traceevent/event-parse.c return print_event_time(tep, s, type->format, event, record); event 5661 tools/lib/traceevent/event-parse.c struct tep_event *event; event 5666 tools/lib/traceevent/event-parse.c event = tep_find_event_by_record(tep, record); event 5681 tools/lib/traceevent/event-parse.c print_string(tep, s, record, event, event 5685 tools/lib/traceevent/event-parse.c print_int(tep, s, record, event, event 5898 tools/lib/traceevent/event-parse.c struct tep_format_field **tep_event_common_fields(struct tep_event *event) event 5900 tools/lib/traceevent/event-parse.c return get_event_fields("common", event->name, event 5901 tools/lib/traceevent/event-parse.c event->format.nr_common, event 5902 tools/lib/traceevent/event-parse.c event->format.common_fields); event 5912 tools/lib/traceevent/event-parse.c struct tep_format_field **tep_event_fields(struct tep_event *event) event 5914 tools/lib/traceevent/event-parse.c return get_event_fields("event", event->name, event 5915 tools/lib/traceevent/event-parse.c event->format.nr_fields, event 5916 tools/lib/traceevent/event-parse.c event->format.fields); event 6154 tools/lib/traceevent/event-parse.c static int event_matches(struct tep_event *event, event 6158 tools/lib/traceevent/event-parse.c if (id >= 0 && id != event->id) event 6161 tools/lib/traceevent/event-parse.c if (event_name && (strcmp(event_name, event->name) != 0)) event 6164 tools/lib/traceevent/event-parse.c if (sys_name && (strcmp(sys_name, event->system) != 0)) event 6177 tools/lib/traceevent/event-parse.c static int find_event_handle(struct tep_handle *tep, struct tep_event *event) event 6184 tools/lib/traceevent/event-parse.c if (event_matches(event, handle->id, event 6194 tools/lib/traceevent/event-parse.c event->id, event->system, event->name); event 6196 tools/lib/traceevent/event-parse.c event->handler = handle->func; event 6197 tools/lib/traceevent/event-parse.c event->context = handle->context; event 6222 tools/lib/traceevent/event-parse.c struct tep_event *event; event 6227 tools/lib/traceevent/event-parse.c *eventp = event = alloc_event(); event 6228 tools/lib/traceevent/event-parse.c if (!event) event 6231 tools/lib/traceevent/event-parse.c event->name = event_read_name(); event 6232 tools/lib/traceevent/event-parse.c if (!event->name) { event 6239 tools/lib/traceevent/event-parse.c event->flags |= TEP_EVENT_FL_ISFTRACE; event 6241 tools/lib/traceevent/event-parse.c if (strcmp(event->name, "bprint") == 0) event 6242 tools/lib/traceevent/event-parse.c event->flags |= TEP_EVENT_FL_ISBPRINT; event 6245 tools/lib/traceevent/event-parse.c event->id = event_read_id(); event 6246 tools/lib/traceevent/event-parse.c if (event->id < 0) { event 6255 tools/lib/traceevent/event-parse.c event->system = strdup(sys); event 6256 tools/lib/traceevent/event-parse.c if (!event->system) { event 6262 tools/lib/traceevent/event-parse.c event->tep = tep; event 6264 tools/lib/traceevent/event-parse.c ret = event_read_format(event); event 6274 tools/lib/traceevent/event-parse.c if (tep && find_event_handle(tep, event)) event 6277 tools/lib/traceevent/event-parse.c ret = event_read_print(event); event 6285 tools/lib/traceevent/event-parse.c if (!ret && (event->flags & TEP_EVENT_FL_ISFTRACE)) { event 6290 tools/lib/traceevent/event-parse.c list = &event->print_fmt.args; event 6291 tools/lib/traceevent/event-parse.c for (field = event->format.fields; field; field = field->next) { event 6294 tools/lib/traceevent/event-parse.c event->flags |= TEP_EVENT_FL_FAILED; event 6300 tools/lib/traceevent/event-parse.c event->flags |= TEP_EVENT_FL_FAILED; event 6314 tools/lib/traceevent/event-parse.c event->flags |= TEP_EVENT_FL_FAILED; event 6318 tools/lib/traceevent/event-parse.c free(event->system); event 6319 tools/lib/traceevent/event-parse.c free(event->name); event 6320 tools/lib/traceevent/event-parse.c free(event); event 6332 tools/lib/traceevent/event-parse.c struct tep_event *event = *eventp; event 6334 tools/lib/traceevent/event-parse.c if (event == NULL) event 6337 tools/lib/traceevent/event-parse.c if (tep && add_event(tep, event)) { event 6343 tools/lib/traceevent/event-parse.c if (PRINT_ARGS && event->print_fmt.args) event 6344 tools/lib/traceevent/event-parse.c print_args(event->print_fmt.args); event 6349 tools/lib/traceevent/event-parse.c tep_free_event(event); event 6393 tools/lib/traceevent/event-parse.c struct tep_event *event = NULL; event 6394 tools/lib/traceevent/event-parse.c return __parse_event(tep, &event, buf, size, sys); event 6430 tools/lib/traceevent/event-parse.c void *tep_get_field_raw(struct trace_seq *s, struct tep_event *event, event 6439 tools/lib/traceevent/event-parse.c if (!event) event 6442 tools/lib/traceevent/event-parse.c field = tep_find_field(event, name); event 6456 tools/lib/traceevent/event-parse.c offset = tep_read_number(event->tep, event 6477 tools/lib/traceevent/event-parse.c int tep_get_field_val(struct trace_seq *s, struct tep_event *event, event 6483 tools/lib/traceevent/event-parse.c if (!event) event 6486 tools/lib/traceevent/event-parse.c field = tep_find_field(event, name); event 6502 tools/lib/traceevent/event-parse.c int tep_get_common_field_val(struct trace_seq *s, struct tep_event *event, event 6508 tools/lib/traceevent/event-parse.c if (!event) event 6511 tools/lib/traceevent/event-parse.c field = tep_find_common_field(event, name); event 6527 tools/lib/traceevent/event-parse.c int tep_get_any_field_val(struct trace_seq *s, struct tep_event *event, event 6533 tools/lib/traceevent/event-parse.c if (!event) event 6536 tools/lib/traceevent/event-parse.c field = tep_find_any_field(event, name); event 6554 tools/lib/traceevent/event-parse.c struct tep_event *event, const char *name, event 6557 tools/lib/traceevent/event-parse.c struct tep_format_field *field = tep_find_field(event, name); event 6587 tools/lib/traceevent/event-parse.c struct tep_event *event, const char *name, event 6590 tools/lib/traceevent/event-parse.c struct tep_format_field *field = tep_find_field(event, name); event 6591 tools/lib/traceevent/event-parse.c struct tep_handle *tep = event->tep; event 6751 tools/lib/traceevent/event-parse.c struct tep_event *event; event 6755 tools/lib/traceevent/event-parse.c event = tep_find_event(tep, id); event 6756 tools/lib/traceevent/event-parse.c if (!event) event 6758 tools/lib/traceevent/event-parse.c if (event_name && (strcmp(event_name, event->name) != 0)) event 6760 tools/lib/traceevent/event-parse.c if (sys_name && (strcmp(sys_name, event->system) != 0)) event 6763 tools/lib/traceevent/event-parse.c event = tep_find_event_by_name(tep, sys_name, event_name); event 6764 tools/lib/traceevent/event-parse.c if (!event) event 6767 tools/lib/traceevent/event-parse.c return event; event 6797 tools/lib/traceevent/event-parse.c struct tep_event *event; event 6800 tools/lib/traceevent/event-parse.c event = search_event(tep, id, sys_name, event_name); event 6801 tools/lib/traceevent/event-parse.c if (event == NULL) event 6805 tools/lib/traceevent/event-parse.c event->id, event->system, event->name); event 6807 tools/lib/traceevent/event-parse.c event->handler = func; event 6808 tools/lib/traceevent/event-parse.c event->context = context; event 6881 tools/lib/traceevent/event-parse.c struct tep_event *event; event 6885 tools/lib/traceevent/event-parse.c event = search_event(tep, id, sys_name, event_name); event 6886 tools/lib/traceevent/event-parse.c if (event == NULL) event 6889 tools/lib/traceevent/event-parse.c if (event->handler == func && event->context == context) { event 6891 tools/lib/traceevent/event-parse.c event->id, event->system, event->name); event 6893 tools/lib/traceevent/event-parse.c event->handler = NULL; event 6894 tools/lib/traceevent/event-parse.c event->context = NULL; event 6968 tools/lib/traceevent/event-parse.c void tep_free_event(struct tep_event *event) event 6970 tools/lib/traceevent/event-parse.c free(event->name); event 6971 tools/lib/traceevent/event-parse.c free(event->system); event 6973 tools/lib/traceevent/event-parse.c free_formats(&event->format); event 6975 tools/lib/traceevent/event-parse.c free(event->print_fmt.format); event 6976 tools/lib/traceevent/event-parse.c free_args(event->print_fmt.args); event 6978 tools/lib/traceevent/event-parse.c free(event); event 64 tools/lib/traceevent/event-parse.h struct tep_event *event, event 146 tools/lib/traceevent/event-parse.h struct tep_event *event; event 469 tools/lib/traceevent/event-parse.h void *tep_get_field_raw(struct trace_seq *s, struct tep_event *event, event 473 tools/lib/traceevent/event-parse.h int tep_get_field_val(struct trace_seq *s, struct tep_event *event, event 476 tools/lib/traceevent/event-parse.h int tep_get_common_field_val(struct trace_seq *s, struct tep_event *event, event 479 tools/lib/traceevent/event-parse.h int tep_get_any_field_val(struct trace_seq *s, struct tep_event *event, event 484 tools/lib/traceevent/event-parse.h struct tep_event *event, const char *name, event 488 tools/lib/traceevent/event-parse.h struct tep_event *event, const char *name, event 509 tools/lib/traceevent/event-parse.h struct tep_format_field *tep_find_common_field(struct tep_event *event, const char *name); event 510 tools/lib/traceevent/event-parse.h struct tep_format_field *tep_find_field(struct tep_event *event, const char *name); event 511 tools/lib/traceevent/event-parse.h struct tep_format_field *tep_find_any_field(struct tep_event *event, const char *name); event 542 tools/lib/traceevent/event-parse.h int size __maybe_unused, struct tep_event *event); event 549 tools/lib/traceevent/event-parse.h struct tep_format_field **tep_event_common_fields(struct tep_event *event); event 550 tools/lib/traceevent/event-parse.h struct tep_format_field **tep_event_fields(struct tep_event *event); event 707 tools/lib/traceevent/event-parse.h struct tep_event *event; event 30 tools/lib/traceevent/parse-filter.c struct tep_event *event; event 157 tools/lib/traceevent/parse-filter.c filter_type->event = tep_find_event(filter->tep, id); event 232 tools/lib/traceevent/parse-filter.c struct tep_event *event) event 242 tools/lib/traceevent/parse-filter.c list->event = event; event 246 tools/lib/traceevent/parse-filter.c static int event_match(struct tep_event *event, event 250 tools/lib/traceevent/parse-filter.c return !regexec(sreg, event->system, 0, NULL, 0) && event 251 tools/lib/traceevent/parse-filter.c !regexec(ereg, event->name, 0, NULL, 0); event 254 tools/lib/traceevent/parse-filter.c return !regexec(ereg, event->system, 0, NULL, 0) || event 255 tools/lib/traceevent/parse-filter.c !regexec(ereg, event->name, 0, NULL, 0); event 262 tools/lib/traceevent/parse-filter.c struct tep_event *event; event 303 tools/lib/traceevent/parse-filter.c event = tep->events[i]; event 304 tools/lib/traceevent/parse-filter.c if (event_match(event, sys_name ? &sreg : NULL, &ereg)) { event 306 tools/lib/traceevent/parse-filter.c if (add_event(events, event) < 0) { event 327 tools/lib/traceevent/parse-filter.c struct event_list *event; event 330 tools/lib/traceevent/parse-filter.c event = events; event 332 tools/lib/traceevent/parse-filter.c free(event); event 337 tools/lib/traceevent/parse-filter.c create_arg_item(struct tep_event *event, const char *token, event 372 tools/lib/traceevent/parse-filter.c field = tep_find_any_field(event, token); event 943 tools/lib/traceevent/parse-filter.c process_filter(struct tep_event *event, struct tep_filter_arg **parg, event 967 tools/lib/traceevent/parse-filter.c ret = create_arg_item(event, token, type, &arg, error_str); event 1012 tools/lib/traceevent/parse-filter.c ret = process_filter(event, &arg, error_str, 0); event 1108 tools/lib/traceevent/parse-filter.c ret = process_filter(event, &arg, error_str, 1); event 1183 tools/lib/traceevent/parse-filter.c process_event(struct tep_event *event, const char *filter_str, event 1190 tools/lib/traceevent/parse-filter.c ret = process_filter(event, parg, error_str, 0); event 1208 tools/lib/traceevent/parse-filter.c filter_event(struct tep_event_filter *filter, struct tep_event *event, event 1216 tools/lib/traceevent/parse-filter.c ret = process_event(event, filter_str, &arg, error_str); event 1230 tools/lib/traceevent/parse-filter.c filter_type = add_filter_type(filter, event->id); event 1263 tools/lib/traceevent/parse-filter.c struct event_list *event; event 1332 tools/lib/traceevent/parse-filter.c for (event = events; event; event = event->next) { event 1333 tools/lib/traceevent/parse-filter.c ret = filter_event(filter, event->event, filter_start, event 1341 tools/lib/traceevent/parse-filter.c test = tep_filter_make_string(filter, event->event->id); event 1343 tools/lib/traceevent/parse-filter.c printf(" '%s: %s'\n", event->event->name, test); event 1459 tools/lib/traceevent/parse-filter.c struct tep_event *event; event 1465 tools/lib/traceevent/parse-filter.c sys = filter_type->event->system; event 1466 tools/lib/traceevent/parse-filter.c name = filter_type->event->name; event 1467 tools/lib/traceevent/parse-filter.c event = tep_find_event_by_name(filter->tep, sys, name); event 1468 tools/lib/traceevent/parse-filter.c if (!event) event 1489 tools/lib/traceevent/parse-filter.c filter_type = add_filter_type(filter, event->id); event 1502 tools/lib/traceevent/parse-filter.c filter_event(filter, event, str, NULL); event 1529 tools/lib/traceevent/parse-filter.c static int test_filter(struct tep_event *event, struct tep_filter_arg *arg, event 1533 tools/lib/traceevent/parse-filter.c get_comm(struct tep_event *event, struct tep_record *record) event 1538 tools/lib/traceevent/parse-filter.c pid = tep_data_pid(event->tep, record); event 1539 tools/lib/traceevent/parse-filter.c comm = tep_data_comm_from_pid(event->tep, pid); event 1544 tools/lib/traceevent/parse-filter.c get_value(struct tep_event *event, event 1553 tools/lib/traceevent/parse-filter.c name = get_comm(event, record); event 1580 tools/lib/traceevent/parse-filter.c get_arg_value(struct tep_event *event, struct tep_filter_arg *arg, event 1584 tools/lib/traceevent/parse-filter.c get_exp_value(struct tep_event *event, struct tep_filter_arg *arg, event 1589 tools/lib/traceevent/parse-filter.c lval = get_arg_value(event, arg->exp.left, record, err); event 1590 tools/lib/traceevent/parse-filter.c rval = get_arg_value(event, arg->exp.right, record, err); event 1639 tools/lib/traceevent/parse-filter.c get_arg_value(struct tep_event *event, struct tep_filter_arg *arg, event 1644 tools/lib/traceevent/parse-filter.c return get_value(event, arg->field.field, record); event 1654 tools/lib/traceevent/parse-filter.c return get_exp_value(event, arg, record, err); event 1663 tools/lib/traceevent/parse-filter.c static int test_num(struct tep_event *event, struct tep_filter_arg *arg, event 1668 tools/lib/traceevent/parse-filter.c lval = get_arg_value(event, arg->num.left, record, err); event 1669 tools/lib/traceevent/parse-filter.c rval = get_arg_value(event, arg->num.right, record, err); event 1706 tools/lib/traceevent/parse-filter.c struct tep_event *event; event 1736 tools/lib/traceevent/parse-filter.c event = arg->str.field->event; event 1737 tools/lib/traceevent/parse-filter.c tep = event->tep; event 1738 tools/lib/traceevent/parse-filter.c addr = get_value(event, arg->str.field, record); event 1754 tools/lib/traceevent/parse-filter.c static int test_str(struct tep_event *event, struct tep_filter_arg *arg, event 1760 tools/lib/traceevent/parse-filter.c val = get_comm(event, record); event 1785 tools/lib/traceevent/parse-filter.c static int test_op(struct tep_event *event, struct tep_filter_arg *arg, event 1790 tools/lib/traceevent/parse-filter.c return test_filter(event, arg->op.left, record, err) && event 1791 tools/lib/traceevent/parse-filter.c test_filter(event, arg->op.right, record, err); event 1794 tools/lib/traceevent/parse-filter.c return test_filter(event, arg->op.left, record, err) || event 1795 tools/lib/traceevent/parse-filter.c test_filter(event, arg->op.right, record, err); event 1798 tools/lib/traceevent/parse-filter.c return !test_filter(event, arg->op.right, record, err); event 1807 tools/lib/traceevent/parse-filter.c static int test_filter(struct tep_event *event, struct tep_filter_arg *arg, event 1823 tools/lib/traceevent/parse-filter.c return test_op(event, arg, record, err); event 1826 tools/lib/traceevent/parse-filter.c return test_num(event, arg, record, err); event 1829 tools/lib/traceevent/parse-filter.c return test_str(event, arg, record, err); event 1838 tools/lib/traceevent/parse-filter.c return !!get_arg_value(event, arg, record, err); event 1899 tools/lib/traceevent/parse-filter.c ret = test_filter(filter_type->event, filter_type->filter, record, &err); event 127 tools/lib/traceevent/plugins/plugin_function.c struct tep_event *event, void *context) event 129 tools/lib/traceevent/plugins/plugin_function.c struct tep_handle *tep = event->tep; event 136 tools/lib/traceevent/plugins/plugin_function.c if (tep_get_field_val(s, event, "ip", record, &function, 1)) event 141 tools/lib/traceevent/plugins/plugin_function.c if (tep_get_field_val(s, event, "parent_ip", record, &pfunction, 1)) event 30 tools/lib/traceevent/plugins/plugin_hrtimer.c struct tep_event *event, void *context) event 34 tools/lib/traceevent/plugins/plugin_hrtimer.c if (tep_print_num_field(s, "0x%llx", event, "timer", event 36 tools/lib/traceevent/plugins/plugin_hrtimer.c tep_print_num_field(s, "0x%llx", event, "hrtimer", event 41 tools/lib/traceevent/plugins/plugin_hrtimer.c tep_print_num_field(s, "%llu", event, "now", record, 1); event 43 tools/lib/traceevent/plugins/plugin_hrtimer.c tep_print_func_field(s, " function=%s", event, "function", event 50 tools/lib/traceevent/plugins/plugin_hrtimer.c struct tep_event *event, void *context) event 54 tools/lib/traceevent/plugins/plugin_hrtimer.c if (tep_print_num_field(s, "0x%llx", event, "timer", event 56 tools/lib/traceevent/plugins/plugin_hrtimer.c tep_print_num_field(s, "0x%llx", event, "hrtimer", event 59 tools/lib/traceevent/plugins/plugin_hrtimer.c tep_print_func_field(s, " function=%s", event, "function", event 63 tools/lib/traceevent/plugins/plugin_hrtimer.c tep_print_num_field(s, "%llu", event, "expires", record, 1); event 66 tools/lib/traceevent/plugins/plugin_hrtimer.c tep_print_num_field(s, "%llu", event, "softexpires", record, 1); event 28 tools/lib/traceevent/plugins/plugin_kmem.c struct tep_event *event, void *context) event 35 tools/lib/traceevent/plugins/plugin_kmem.c field = tep_find_field(event, "call_site"); event 42 tools/lib/traceevent/plugins/plugin_kmem.c func = tep_find_function(event->tep, val); event 46 tools/lib/traceevent/plugins/plugin_kmem.c addr = tep_find_function_address(event->tep, val); event 252 tools/lib/traceevent/plugins/plugin_kvm.c struct tep_event *event, const char *field) event 258 tools/lib/traceevent/plugins/plugin_kvm.c if (tep_get_field_val(s, event, field, record, &val, 1) < 0) event 261 tools/lib/traceevent/plugins/plugin_kvm.c if (tep_get_field_val(s, event, "isa", record, &isa, 0) < 0) event 273 tools/lib/traceevent/plugins/plugin_kvm.c struct tep_event *event, void *context) event 277 tools/lib/traceevent/plugins/plugin_kvm.c if (print_exit_reason(s, record, event, "exit_reason") < 0) event 280 tools/lib/traceevent/plugins/plugin_kvm.c tep_print_num_field(s, " rip 0x%lx", event, "guest_rip", record, 1); event 282 tools/lib/traceevent/plugins/plugin_kvm.c if (tep_get_field_val(s, event, "info1", record, &info1, 0) >= 0 event 283 tools/lib/traceevent/plugins/plugin_kvm.c && tep_get_field_val(s, event, "info2", record, &info2, 0) >= 0) event 296 tools/lib/traceevent/plugins/plugin_kvm.c struct tep_event *event, void *context) event 303 tools/lib/traceevent/plugins/plugin_kvm.c if (tep_get_field_val(s, event, "rip", record, &rip, 1) < 0) event 306 tools/lib/traceevent/plugins/plugin_kvm.c if (tep_get_field_val(s, event, "csbase", record, &csbase, 1) < 0) event 309 tools/lib/traceevent/plugins/plugin_kvm.c if (tep_get_field_val(s, event, "len", record, &len, 1) < 0) event 312 tools/lib/traceevent/plugins/plugin_kvm.c if (tep_get_field_val(s, event, "flags", record, &flags, 1) < 0) event 315 tools/lib/traceevent/plugins/plugin_kvm.c if (tep_get_field_val(s, event, "failed", record, &failed, 1) < 0) event 318 tools/lib/traceevent/plugins/plugin_kvm.c insn = tep_get_field_raw(s, event, "insn", record, &llen, 1); event 335 tools/lib/traceevent/plugins/plugin_kvm.c struct tep_event *event, void *context) event 337 tools/lib/traceevent/plugins/plugin_kvm.c if (print_exit_reason(s, record, event, "exit_code") < 0) event 340 tools/lib/traceevent/plugins/plugin_kvm.c tep_print_num_field(s, " info1 %llx", event, "exit_info1", record, 1); event 341 tools/lib/traceevent/plugins/plugin_kvm.c tep_print_num_field(s, " info2 %llx", event, "exit_info2", record, 1); event 342 tools/lib/traceevent/plugins/plugin_kvm.c tep_print_num_field(s, " int_info %llx", event, "exit_int_info", record, 1); event 343 tools/lib/traceevent/plugins/plugin_kvm.c tep_print_num_field(s, " int_info_err %llx", event, "exit_int_info_err", record, 1); event 349 tools/lib/traceevent/plugins/plugin_kvm.c struct tep_event *event, void *context) event 351 tools/lib/traceevent/plugins/plugin_kvm.c tep_print_num_field(s, "rip %llx ", event, "rip", record, 1); event 353 tools/lib/traceevent/plugins/plugin_kvm.c return kvm_nested_vmexit_inject_handler(s, record, event, context); event 375 tools/lib/traceevent/plugins/plugin_kvm.c struct tep_event *event, void *context) event 383 tools/lib/traceevent/plugins/plugin_kvm.c if (tep_get_field_val(s, event, "role", record, &val, 1) < 0) event 392 tools/lib/traceevent/plugins/plugin_kvm.c if (tep_is_file_bigendian(event->tep) == event 393 tools/lib/traceevent/plugins/plugin_kvm.c tep_is_local_bigendian(event->tep)) { event 410 tools/lib/traceevent/plugins/plugin_kvm.c tep_print_num_field(s, " root %u ", event, event 413 tools/lib/traceevent/plugins/plugin_kvm.c if (tep_get_field_val(s, event, "unsync", record, &val, 1) < 0) event 422 tools/lib/traceevent/plugins/plugin_kvm.c struct tep_event *event, void *context) event 426 tools/lib/traceevent/plugins/plugin_kvm.c if (tep_get_field_val(s, event, "created", record, &val, 1) < 0) event 431 tools/lib/traceevent/plugins/plugin_kvm.c if (tep_get_field_val(s, event, "gfn", record, &val, 1) < 0) event 435 tools/lib/traceevent/plugins/plugin_kvm.c return kvm_mmu_print_role(s, record, event, context); event 29 tools/lib/traceevent/plugins/plugin_mac80211.c static void print_string(struct trace_seq *s, struct tep_event *event, event 32 tools/lib/traceevent/plugins/plugin_mac80211.c struct tep_format_field *f = tep_find_field(event, name); event 57 tools/lib/traceevent/plugins/plugin_mac80211.c #define SF(fn) tep_print_num_field(s, fn ":%d", event, fn, record, 0) event 58 tools/lib/traceevent/plugins/plugin_mac80211.c #define SFX(fn) tep_print_num_field(s, fn ":%#x", event, fn, record, 0) event 63 tools/lib/traceevent/plugins/plugin_mac80211.c struct tep_event *event, void *context) event 67 tools/lib/traceevent/plugins/plugin_mac80211.c print_string(s, event, "wiphy_name", data); event 69 tools/lib/traceevent/plugins/plugin_mac80211.c print_string(s, event, "vif_name", data); event 70 tools/lib/traceevent/plugins/plugin_mac80211.c tep_print_num_field(s, "(%d)", event, "vif_type", record, 1); event 65 tools/lib/traceevent/plugins/plugin_sched_switch.c tep_register_comm(field->event->tep, comm, pid); event 70 tools/lib/traceevent/plugins/plugin_sched_switch.c struct tep_event *event, void *context) event 75 tools/lib/traceevent/plugins/plugin_sched_switch.c if (tep_get_field_val(s, event, "pid", record, &val, 1)) event 78 tools/lib/traceevent/plugins/plugin_sched_switch.c field = tep_find_any_field(event, "comm"); event 85 tools/lib/traceevent/plugins/plugin_sched_switch.c if (tep_get_field_val(s, event, "prio", record, &val, 0) == 0) event 88 tools/lib/traceevent/plugins/plugin_sched_switch.c if (tep_get_field_val(s, event, "success", record, &val, 1) == 0) event 91 tools/lib/traceevent/plugins/plugin_sched_switch.c if (tep_get_field_val(s, event, "target_cpu", record, &val, 0) == 0) event 99 tools/lib/traceevent/plugins/plugin_sched_switch.c struct tep_event *event, void *context) event 104 tools/lib/traceevent/plugins/plugin_sched_switch.c if (tep_get_field_val(s, event, "prev_pid", record, &val, 1)) event 107 tools/lib/traceevent/plugins/plugin_sched_switch.c field = tep_find_any_field(event, "prev_comm"); event 114 tools/lib/traceevent/plugins/plugin_sched_switch.c if (tep_get_field_val(s, event, "prev_prio", record, &val, 0) == 0) event 117 tools/lib/traceevent/plugins/plugin_sched_switch.c if (tep_get_field_val(s, event, "prev_state", record, &val, 0) == 0) event 122 tools/lib/traceevent/plugins/plugin_sched_switch.c if (tep_get_field_val(s, event, "next_pid", record, &val, 1)) event 125 tools/lib/traceevent/plugins/plugin_sched_switch.c field = tep_find_any_field(event, "next_comm"); event 132 tools/lib/traceevent/plugins/plugin_sched_switch.c if (tep_get_field_val(s, event, "next_prio", record, &val, 0) == 0) event 190 tools/perf/arch/powerpc/util/kvm-stat.c bool event = false; event 194 tools/perf/arch/powerpc/util/kvm-stat.c OPT_BOOLEAN('e', "event", &event, NULL), event 206 tools/perf/arch/powerpc/util/kvm-stat.c if (!event) { event 47 tools/perf/arch/x86/tests/intel-cqm.c void *event; event 108 tools/perf/arch/x86/tests/intel-cqm.c event = mmap(NULL, mmap_len, PROT_READ, MAP_SHARED, fd[0], 0); event 109 tools/perf/arch/x86/tests/intel-cqm.c if (event == (void *)(-1)) { event 118 tools/perf/arch/x86/tests/intel-cqm.c munmap(event, mmap_len); event 66 tools/perf/arch/x86/tests/perf-time-to-tsc.c union perf_event *event; event 123 tools/perf/arch/x86/tests/perf-time-to-tsc.c while ((event = perf_mmap__read_event(md)) != NULL) { event 126 tools/perf/arch/x86/tests/perf-time-to-tsc.c if (event->header.type != PERF_RECORD_COMM || event 127 tools/perf/arch/x86/tests/perf-time-to-tsc.c (pid_t)event->comm.pid != getpid() || event 128 tools/perf/arch/x86/tests/perf-time-to-tsc.c (pid_t)event->comm.tid != getpid()) event 131 tools/perf/arch/x86/tests/perf-time-to-tsc.c if (strcmp(event->comm.comm, comm1) == 0) { event 132 tools/perf/arch/x86/tests/perf-time-to-tsc.c CHECK__(perf_evsel__parse_sample(evsel, event, event 136 tools/perf/arch/x86/tests/perf-time-to-tsc.c if (strcmp(event->comm.comm, comm2) == 0) { event 137 tools/perf/arch/x86/tests/perf-time-to-tsc.c CHECK__(perf_evsel__parse_sample(evsel, event, event 23 tools/perf/arch/x86/util/event.c union perf_event *event = zalloc(sizeof(event->mmap) + event 26 tools/perf/arch/x86/util/event.c if (!event) { event 41 tools/perf/arch/x86/util/event.c size = sizeof(event->mmap) - sizeof(event->mmap.filename) + event 45 tools/perf/arch/x86/util/event.c memset(event, 0, size); event 47 tools/perf/arch/x86/util/event.c event->mmap.header.type = PERF_RECORD_MMAP; event 54 tools/perf/arch/x86/util/event.c event->header.misc = PERF_RECORD_MISC_KERNEL; event 56 tools/perf/arch/x86/util/event.c event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; event 58 tools/perf/arch/x86/util/event.c event->mmap.header.size = size; event 60 tools/perf/arch/x86/util/event.c event->mmap.start = pos->start; event 61 tools/perf/arch/x86/util/event.c event->mmap.len = pos->end - pos->start; event 62 tools/perf/arch/x86/util/event.c event->mmap.pgoff = pos->pgoff; event 63 tools/perf/arch/x86/util/event.c event->mmap.pid = machine->pid; event 65 tools/perf/arch/x86/util/event.c strlcpy(event->mmap.filename, kmap->name, PATH_MAX); event 67 tools/perf/arch/x86/util/event.c if (perf_tool__process_synth_event(tool, event, machine, event 74 tools/perf/arch/x86/util/event.c free(event); event 58 tools/perf/arch/x86/util/tsc.c union perf_event event = { event 79 tools/perf/arch/x86/util/tsc.c event.time_conv.time_mult = tc.time_mult; event 80 tools/perf/arch/x86/util/tsc.c event.time_conv.time_shift = tc.time_shift; event 81 tools/perf/arch/x86/util/tsc.c event.time_conv.time_zero = tc.time_zero; event 83 tools/perf/arch/x86/util/tsc.c return process(tool, &event, NULL, machine); event 262 tools/perf/builtin-annotate.c union perf_event *event, event 273 tools/perf/builtin-annotate.c event->header.type); event 292 tools/perf/builtin-annotate.c union perf_event *event) event 294 tools/perf/builtin-annotate.c if (event->feat.feat_id < HEADER_LAST_FEATURE) event 295 tools/perf/builtin-annotate.c return perf_event__process_feature(session, event); event 257 tools/perf/builtin-c2c.c union perf_event *event, event 272 tools/perf/builtin-c2c.c event->header.type); event 382 tools/perf/builtin-diff.c union perf_event *event, event 399 tools/perf/builtin-diff.c event->header.type); event 53 tools/perf/builtin-inject.c union perf_event event[0]; event 69 tools/perf/builtin-inject.c union perf_event *event) event 74 tools/perf/builtin-inject.c return output_bytes(inject, event, event->header.size); event 78 tools/perf/builtin-inject.c union perf_event *event, event 81 tools/perf/builtin-inject.c return perf_event__repipe_synth(tool, event); event 86 tools/perf/builtin-inject.c union perf_event *event __maybe_unused, event 94 tools/perf/builtin-inject.c union perf_event *event) event 96 tools/perf/builtin-inject.c return perf_event__repipe_synth(session->tool, event); event 100 tools/perf/builtin-inject.c union perf_event *event, event 107 tools/perf/builtin-inject.c ret = perf_event__process_attr(tool, event, pevlist); event 114 tools/perf/builtin-inject.c return perf_event__repipe_synth(tool, event); event 139 tools/perf/builtin-inject.c union perf_event *event) event 155 tools/perf/builtin-inject.c event, offset); event 161 tools/perf/builtin-inject.c ret = output_bytes(inject, event, event->header.size); event 165 tools/perf/builtin-inject.c event->auxtrace.size); event 167 tools/perf/builtin-inject.c ret = output_bytes(inject, event, event 168 tools/perf/builtin-inject.c event->header.size + event->auxtrace.size); event 173 tools/perf/builtin-inject.c return event->auxtrace.size; event 180 tools/perf/builtin-inject.c union perf_event *event __maybe_unused) event 189 tools/perf/builtin-inject.c union perf_event *event, event 193 tools/perf/builtin-inject.c return perf_event__repipe_synth(tool, event); event 197 tools/perf/builtin-inject.c union perf_event *event __maybe_unused, event 205 tools/perf/builtin-inject.c union perf_event *event __maybe_unused, event 218 tools/perf/builtin-inject.c union perf_event *event, event 224 tools/perf/builtin-inject.c union perf_event *event, event 231 tools/perf/builtin-inject.c return f(tool, event, sample, evsel, machine); event 234 tools/perf/builtin-inject.c build_id__mark_dso_hit(tool, event, sample, evsel, machine); event 236 tools/perf/builtin-inject.c return perf_event__repipe_synth(tool, event); event 240 tools/perf/builtin-inject.c union perf_event *event, event 246 tools/perf/builtin-inject.c err = perf_event__process_mmap(tool, event, sample, machine); event 247 tools/perf/builtin-inject.c perf_event__repipe(tool, event, sample, machine); event 254 tools/perf/builtin-inject.c union perf_event *event, event 266 tools/perf/builtin-inject.c event->mmap.filename, sample->pid, &n); event 273 tools/perf/builtin-inject.c return perf_event__repipe_mmap(tool, event, sample, machine); event 278 tools/perf/builtin-inject.c union perf_event *event, event 284 tools/perf/builtin-inject.c err = perf_event__process_mmap2(tool, event, sample, machine); event 285 tools/perf/builtin-inject.c perf_event__repipe(tool, event, sample, machine); event 292 tools/perf/builtin-inject.c union perf_event *event, event 304 tools/perf/builtin-inject.c event->mmap2.filename, sample->pid, &n); event 311 tools/perf/builtin-inject.c return perf_event__repipe_mmap2(tool, event, sample, machine); event 316 tools/perf/builtin-inject.c union perf_event *event, event 322 tools/perf/builtin-inject.c err = perf_event__process_fork(tool, event, sample, machine); event 323 tools/perf/builtin-inject.c perf_event__repipe(tool, event, sample, machine); event 329 tools/perf/builtin-inject.c union perf_event *event, event 335 tools/perf/builtin-inject.c err = perf_event__process_comm(tool, event, sample, machine); event 336 tools/perf/builtin-inject.c perf_event__repipe(tool, event, sample, machine); event 342 tools/perf/builtin-inject.c union perf_event *event, event 346 tools/perf/builtin-inject.c int err = perf_event__process_namespaces(tool, event, sample, machine); event 348 tools/perf/builtin-inject.c perf_event__repipe(tool, event, sample, machine); event 354 tools/perf/builtin-inject.c union perf_event *event, event 360 tools/perf/builtin-inject.c err = perf_event__process_exit(tool, event, sample, machine); event 361 tools/perf/builtin-inject.c perf_event__repipe(tool, event, sample, machine); event 367 tools/perf/builtin-inject.c union perf_event *event) event 371 tools/perf/builtin-inject.c perf_event__repipe_synth(session->tool, event); event 372 tools/perf/builtin-inject.c err = perf_event__process_tracing_data(session, event); event 378 tools/perf/builtin-inject.c union perf_event *event) event 382 tools/perf/builtin-inject.c perf_event__repipe_synth(session->tool, event); event 383 tools/perf/builtin-inject.c err = perf_event__process_id_index(session, event); event 427 tools/perf/builtin-inject.c union perf_event *event, event 438 tools/perf/builtin-inject.c event->header.type); event 463 tools/perf/builtin-inject.c perf_event__repipe(tool, event, sample, machine); event 468 tools/perf/builtin-inject.c union perf_event *event __maybe_unused, event 488 tools/perf/builtin-inject.c union perf_event *event, event 496 tools/perf/builtin-inject.c perf_inject__sched_process_exit(tool, event, sample, evsel, machine); event 498 tools/perf/builtin-inject.c ent = malloc(event->header.size + sizeof(struct event_entry)); event 506 tools/perf/builtin-inject.c memcpy(&ent->event, event, event->header.size); event 512 tools/perf/builtin-inject.c union perf_event *event __maybe_unused, event 530 tools/perf/builtin-inject.c event_sw = &ent->event[0]; event 562 tools/perf/builtin-inject.c union perf_event *event __maybe_unused, event 942 tools/perf/builtin-kmem.c union perf_event *event, event 953 tools/perf/builtin-kmem.c event->header.type); event 160 tools/perf/builtin-kvm.c struct kvm_event *event; event 166 tools/perf/builtin-kvm.c list_for_each_entry(event, head, hash_entry) { event 168 tools/perf/builtin-kvm.c event->total.time = 0; event 169 tools/perf/builtin-kvm.c init_stats(&event->total.stats); event 171 tools/perf/builtin-kvm.c for (j = 0; j < event->max_vcpu; ++j) { event 172 tools/perf/builtin-kvm.c event->vcpu[j].time = 0; event 173 tools/perf/builtin-kvm.c init_stats(&event->vcpu[j].stats); event 185 tools/perf/builtin-kvm.c static bool kvm_event_expand(struct kvm_event *event, int vcpu_id) event 187 tools/perf/builtin-kvm.c int old_max_vcpu = event->max_vcpu; event 190 tools/perf/builtin-kvm.c if (vcpu_id < event->max_vcpu) event 193 tools/perf/builtin-kvm.c while (event->max_vcpu <= vcpu_id) event 194 tools/perf/builtin-kvm.c event->max_vcpu += DEFAULT_VCPU_NUM; event 196 tools/perf/builtin-kvm.c prev = event->vcpu; event 197 tools/perf/builtin-kvm.c event->vcpu = realloc(event->vcpu, event 198 tools/perf/builtin-kvm.c event->max_vcpu * sizeof(*event->vcpu)); event 199 tools/perf/builtin-kvm.c if (!event->vcpu) { event 205 tools/perf/builtin-kvm.c memset(event->vcpu + old_max_vcpu, 0, event 206 tools/perf/builtin-kvm.c (event->max_vcpu - old_max_vcpu) * sizeof(*event->vcpu)); event 212 tools/perf/builtin-kvm.c struct kvm_event *event; event 214 tools/perf/builtin-kvm.c event = zalloc(sizeof(*event)); event 215 tools/perf/builtin-kvm.c if (!event) { event 220 tools/perf/builtin-kvm.c event->key = *key; event 221 tools/perf/builtin-kvm.c init_stats(&event->total.stats); event 222 tools/perf/builtin-kvm.c return event; event 228 tools/perf/builtin-kvm.c struct kvm_event *event; event 234 tools/perf/builtin-kvm.c list_for_each_entry(event, head, hash_entry) { event 235 tools/perf/builtin-kvm.c if (event->key.key == key->key && event->key.info == key->info) event 236 tools/perf/builtin-kvm.c return event; event 239 tools/perf/builtin-kvm.c event = kvm_alloc_init_event(key); event 240 tools/perf/builtin-kvm.c if (!event) event 243 tools/perf/builtin-kvm.c list_add(&event->hash_entry, head); event 244 tools/perf/builtin-kvm.c return event; event 251 tools/perf/builtin-kvm.c struct kvm_event *event = NULL; event 254 tools/perf/builtin-kvm.c event = find_create_kvm_event(kvm, key); event 256 tools/perf/builtin-kvm.c vcpu_record->last_event = event; event 268 tools/perf/builtin-kvm.c static double kvm_event_rel_stddev(int vcpu_id, struct kvm_event *event) event 270 tools/perf/builtin-kvm.c struct kvm_event_stats *kvm_stats = &event->total; event 273 tools/perf/builtin-kvm.c kvm_stats = &event->vcpu[vcpu_id]; event 279 tools/perf/builtin-kvm.c static bool update_kvm_event(struct kvm_event *event, int vcpu_id, event 283 tools/perf/builtin-kvm.c kvm_update_event_stats(&event->total, time_diff); event 287 tools/perf/builtin-kvm.c if (!kvm_event_expand(event, vcpu_id)) event 290 tools/perf/builtin-kvm.c kvm_update_event_stats(&event->vcpu[vcpu_id], time_diff); event 321 tools/perf/builtin-kvm.c struct kvm_event *event = NULL; event 324 tools/perf/builtin-kvm.c event = find_create_kvm_event(kvm, key); event 326 tools/perf/builtin-kvm.c vcpu_record->last_event = event; event 331 tools/perf/builtin-kvm.c static bool skip_event(const char *event) event 336 tools/perf/builtin-kvm.c if (!strcmp(event, *skip_events)) event 347 tools/perf/builtin-kvm.c struct kvm_event *event; event 356 tools/perf/builtin-kvm.c event = vcpu_record->last_event; event 369 tools/perf/builtin-kvm.c if (!event && key->key == INVALID_KEY) event 372 tools/perf/builtin-kvm.c if (!event) event 373 tools/perf/builtin-kvm.c event = find_create_kvm_event(kvm, key); event 375 tools/perf/builtin-kvm.c if (!event) event 392 tools/perf/builtin-kvm.c kvm->events_ops->decode_key(kvm, &event->key, decode); event 400 tools/perf/builtin-kvm.c return update_kvm_event(event, vcpu, time_diff); event 457 tools/perf/builtin-kvm.c static u64 get_event_ ##func(struct kvm_event *event, int vcpu) \ event 460 tools/perf/builtin-kvm.c return event->total.field; \ event 462 tools/perf/builtin-kvm.c if (vcpu >= event->max_vcpu) \ event 465 tools/perf/builtin-kvm.c return event->vcpu[vcpu].field; \ event 507 tools/perf/builtin-kvm.c static void insert_to_result(struct rb_root *result, struct kvm_event *event, event 518 tools/perf/builtin-kvm.c if (bigger(event, p, vcpu)) event 524 tools/perf/builtin-kvm.c rb_link_node(&event->rb, parent, rb); event 525 tools/perf/builtin-kvm.c rb_insert_color(&event->rb, result); event 529 tools/perf/builtin-kvm.c update_total_count(struct perf_kvm_stat *kvm, struct kvm_event *event) event 533 tools/perf/builtin-kvm.c kvm->total_count += get_event_count(event, vcpu); event 534 tools/perf/builtin-kvm.c kvm->total_time += get_event_time(event, vcpu); event 537 tools/perf/builtin-kvm.c static bool event_is_valid(struct kvm_event *event, int vcpu) event 539 tools/perf/builtin-kvm.c return !!get_event_count(event, vcpu); event 546 tools/perf/builtin-kvm.c struct kvm_event *event; event 549 tools/perf/builtin-kvm.c list_for_each_entry(event, &kvm->kvm_events_cache[i], hash_entry) { event 550 tools/perf/builtin-kvm.c if (event_is_valid(event, vcpu)) { event 551 tools/perf/builtin-kvm.c update_total_count(kvm, event); event 552 tools/perf/builtin-kvm.c insert_to_result(&kvm->result, event, event 609 tools/perf/builtin-kvm.c struct kvm_event *event; event 629 tools/perf/builtin-kvm.c while ((event = pop_from_result(&kvm->result))) { event 632 tools/perf/builtin-kvm.c ecount = get_event_count(event, vcpu); event 633 tools/perf/builtin-kvm.c etime = get_event_time(event, vcpu); event 634 tools/perf/builtin-kvm.c max = get_event_max(event, vcpu); event 635 tools/perf/builtin-kvm.c min = get_event_min(event, vcpu); event 637 tools/perf/builtin-kvm.c kvm->events_ops->decode_key(kvm, &event->key, decode); event 645 tools/perf/builtin-kvm.c kvm_event_rel_stddev(vcpu, event)); event 658 tools/perf/builtin-kvm.c union perf_event *event __maybe_unused, event 679 tools/perf/builtin-kvm.c union perf_event *event, event 695 tools/perf/builtin-kvm.c event->header.type); event 754 tools/perf/builtin-kvm.c union perf_event *event; event 766 tools/perf/builtin-kvm.c while ((event = perf_mmap__read_event(md)) != NULL) { event 767 tools/perf/builtin-kvm.c err = perf_evlist__parse_sample_timestamp(evlist, event, ×tamp); event 774 tools/perf/builtin-kvm.c err = perf_session__queue_event(kvm->session, event, timestamp, 0); event 814 tools/perf/builtin-lock.c union perf_event *event, event 825 tools/perf/builtin-lock.c event->header.type); event 154 tools/perf/builtin-mem.c union perf_event *event, event 164 tools/perf/builtin-mem.c event->header.type); event 234 tools/perf/builtin-mem.c union perf_event *event, event 239 tools/perf/builtin-mem.c return dump_raw_samples(tool, event, sample, machine); event 339 tools/perf/builtin-probe.c const char *event = NULL, *group = NULL; event 369 tools/perf/builtin-probe.c show_perf_probe_event(tev->group, tev->event, pev, event 373 tools/perf/builtin-probe.c event = tev->event; event 379 tools/perf/builtin-probe.c if (event) { event 382 tools/perf/builtin-probe.c pr_info("\tperf record -e %s:%s -aR sleep 1\n\n", group, event); event 478 tools/perf/builtin-record.c union perf_event *event, event 483 tools/perf/builtin-record.c return record__write(rec, NULL, event, event->header.size); event 532 tools/perf/builtin-record.c union perf_event *event, void *data1, event 549 tools/perf/builtin-record.c event, file_offset); event 559 tools/perf/builtin-record.c record__write(rec, map, event, event->header.size); event 822 tools/perf/builtin-record.c union perf_event *event, event 838 tools/perf/builtin-record.c return build_id__mark_dso_hit(tool, event, sample, evsel, machine); event 917 tools/perf/builtin-record.c struct perf_record_compressed *event = record; event 918 tools/perf/builtin-record.c size_t size = sizeof(*event); event 921 tools/perf/builtin-record.c event->header.size += increment; event 925 tools/perf/builtin-record.c event->header.type = PERF_RECORD_COMPRESSED; event 926 tools/perf/builtin-record.c event->header.size = size; event 1493 tools/perf/builtin-record.c union perf_event *event; event 1496 tools/perf/builtin-record.c event = malloc(sizeof(event->comm) + machine->id_hdr_size); event 1497 tools/perf/builtin-record.c if (event == NULL) { event 1508 tools/perf/builtin-record.c tgid = perf_event__synthesize_comm(tool, event, event 1512 tools/perf/builtin-record.c free(event); event 1517 tools/perf/builtin-record.c event = malloc(sizeof(event->namespaces) + event 1520 tools/perf/builtin-record.c if (event == NULL) { event 1528 tools/perf/builtin-record.c perf_event__synthesize_namespaces(tool, event, event 1532 tools/perf/builtin-record.c free(event); event 213 tools/perf/builtin-report.c union perf_event *event) event 217 tools/perf/builtin-report.c if (event->feat.feat_id < HEADER_LAST_FEATURE) event 218 tools/perf/builtin-report.c return perf_event__process_feature(session, event); event 220 tools/perf/builtin-report.c if (event->feat.feat_id != HEADER_LAST_FEATURE) { event 222 tools/perf/builtin-report.c event->feat.feat_id); event 236 tools/perf/builtin-report.c union perf_event *event, event 261 tools/perf/builtin-report.c event->header.type); event 306 tools/perf/builtin-report.c union perf_event *event, event 316 tools/perf/builtin-report.c event->read.pid, event->read.tid, event 319 tools/perf/builtin-report.c event->read.value); event 152 tools/perf/builtin-sched.c int (*fork_event)(struct perf_sched *sched, union perf_event *event, event 357 tools/perf/builtin-sched.c struct sched_atom *event = zalloc(sizeof(*event)); event 361 tools/perf/builtin-sched.c event->timestamp = timestamp; event 362 tools/perf/builtin-sched.c event->nr = idx; event 369 tools/perf/builtin-sched.c task->atoms[idx] = event; event 371 tools/perf/builtin-sched.c return event; event 385 tools/perf/builtin-sched.c struct sched_atom *event, *curr_event = last_event(task); event 397 tools/perf/builtin-sched.c event = get_new_event(task, timestamp); event 399 tools/perf/builtin-sched.c event->type = SCHED_EVENT_RUN; event 400 tools/perf/builtin-sched.c event->duration = duration; event 408 tools/perf/builtin-sched.c struct sched_atom *event, *wakee_event; event 410 tools/perf/builtin-sched.c event = get_new_event(task, timestamp); event 411 tools/perf/builtin-sched.c event->type = SCHED_EVENT_WAKEUP; event 412 tools/perf/builtin-sched.c event->wakee = wakee; event 427 tools/perf/builtin-sched.c event->wait_sem = wakee_event->wait_sem; event 435 tools/perf/builtin-sched.c struct sched_atom *event = get_new_event(task, timestamp); event 437 tools/perf/builtin-sched.c event->type = SCHED_EVENT_SLEEP; event 875 tools/perf/builtin-sched.c union perf_event *event, event 880 tools/perf/builtin-sched.c child = machine__findnew_thread(machine, event->fork.pid, event 881 tools/perf/builtin-sched.c event->fork.tid); event 882 tools/perf/builtin-sched.c parent = machine__findnew_thread(machine, event->fork.ppid, event 883 tools/perf/builtin-sched.c event->fork.ptid); event 1703 tools/perf/builtin-sched.c union perf_event *event, event 1710 tools/perf/builtin-sched.c perf_event__process_fork(tool, event, sample, machine); event 1714 tools/perf/builtin-sched.c return sched->tp_handler->fork_event(sched, event, machine); event 1738 tools/perf/builtin-sched.c union perf_event *event __maybe_unused, event 1754 tools/perf/builtin-sched.c union perf_event *event, event 1762 tools/perf/builtin-sched.c err = perf_event__process_comm(tool, event, sample, machine); event 2397 tools/perf/builtin-sched.c union perf_event *event __maybe_unused, event 2481 tools/perf/builtin-sched.c union perf_event *event __maybe_unused, event 2509 tools/perf/builtin-sched.c union perf_event *event, event 2526 tools/perf/builtin-sched.c event->header.type); event 2635 tools/perf/builtin-sched.c union perf_event *event, event 2640 tools/perf/builtin-sched.c return timehist_sched_change_event(tool, event, evsel, sample, machine); event 2644 tools/perf/builtin-sched.c union perf_event *event, event 2652 tools/perf/builtin-sched.c printf("lost %" PRI_lu64 " events on cpu %d\n", event->lost.lost, sample->cpu); event 2905 tools/perf/builtin-sched.c union perf_event *event, event 2911 tools/perf/builtin-sched.c union perf_event *event, event 2926 tools/perf/builtin-sched.c err = f(tool, event, evsel, sample, machine); event 1993 tools/perf/builtin-script.c union perf_event *event, event 2019 tools/perf/builtin-script.c event->header.type); event 2030 tools/perf/builtin-script.c scripting_ops->process_event(event, sample, evsel, &al); event 2039 tools/perf/builtin-script.c static int process_attr(struct perf_tool *tool, union perf_event *event, event 2048 tools/perf/builtin-script.c err = perf_event__process_attr(tool, event, pevlist); event 2086 tools/perf/builtin-script.c union perf_event *event, event 2096 tools/perf/builtin-script.c thread = machine__findnew_thread(machine, event->comm.pid, event->comm.tid); event 2102 tools/perf/builtin-script.c if (perf_event__process_comm(tool, event, sample, machine) < 0) event 2108 tools/perf/builtin-script.c sample->tid = event->comm.tid; event 2109 tools/perf/builtin-script.c sample->pid = event->comm.pid; event 2114 tools/perf/builtin-script.c perf_event__fprintf(event, stdout); event 2123 tools/perf/builtin-script.c union perf_event *event, event 2133 tools/perf/builtin-script.c thread = machine__findnew_thread(machine, event->namespaces.pid, event 2134 tools/perf/builtin-script.c event->namespaces.tid); event 2140 tools/perf/builtin-script.c if (perf_event__process_namespaces(tool, event, sample, machine) < 0) event 2146 tools/perf/builtin-script.c sample->tid = event->namespaces.tid; event 2147 tools/perf/builtin-script.c sample->pid = event->namespaces.pid; event 2152 tools/perf/builtin-script.c perf_event__fprintf(event, stdout); event 2161 tools/perf/builtin-script.c union perf_event *event, event 2170 tools/perf/builtin-script.c if (perf_event__process_fork(tool, event, sample, machine) < 0) event 2173 tools/perf/builtin-script.c thread = machine__findnew_thread(machine, event->fork.pid, event->fork.tid); event 2181 tools/perf/builtin-script.c sample->time = event->fork.time; event 2182 tools/perf/builtin-script.c sample->tid = event->fork.tid; event 2183 tools/perf/builtin-script.c sample->pid = event->fork.pid; event 2188 tools/perf/builtin-script.c perf_event__fprintf(event, stdout); event 2195 tools/perf/builtin-script.c union perf_event *event, event 2205 tools/perf/builtin-script.c thread = machine__findnew_thread(machine, event->fork.pid, event->fork.tid); event 2214 tools/perf/builtin-script.c sample->tid = event->fork.tid; event 2215 tools/perf/builtin-script.c sample->pid = event->fork.pid; event 2220 tools/perf/builtin-script.c perf_event__fprintf(event, stdout); event 2223 tools/perf/builtin-script.c if (perf_event__process_exit(tool, event, sample, machine) < 0) event 2231 tools/perf/builtin-script.c union perf_event *event, event 2240 tools/perf/builtin-script.c if (perf_event__process_mmap(tool, event, sample, machine) < 0) event 2243 tools/perf/builtin-script.c thread = machine__findnew_thread(machine, event->mmap.pid, event->mmap.tid); event 2252 tools/perf/builtin-script.c sample->tid = event->mmap.tid; event 2253 tools/perf/builtin-script.c sample->pid = event->mmap.pid; event 2258 tools/perf/builtin-script.c perf_event__fprintf(event, stdout); event 2265 tools/perf/builtin-script.c union perf_event *event, event 2274 tools/perf/builtin-script.c if (perf_event__process_mmap2(tool, event, sample, machine) < 0) event 2277 tools/perf/builtin-script.c thread = machine__findnew_thread(machine, event->mmap2.pid, event->mmap2.tid); event 2286 tools/perf/builtin-script.c sample->tid = event->mmap2.tid; event 2287 tools/perf/builtin-script.c sample->pid = event->mmap2.pid; event 2292 tools/perf/builtin-script.c perf_event__fprintf(event, stdout); event 2299 tools/perf/builtin-script.c union perf_event *event, event 2308 tools/perf/builtin-script.c if (perf_event__process_switch(tool, event, sample, machine) < 0) event 2312 tools/perf/builtin-script.c scripting_ops->process_switch(event, sample, machine); event 2327 tools/perf/builtin-script.c perf_event__fprintf(event, stdout); event 2335 tools/perf/builtin-script.c union perf_event *event, event 2352 tools/perf/builtin-script.c perf_event__fprintf(event, stdout); event 2360 tools/perf/builtin-script.c union perf_event *event, event 2364 tools/perf/builtin-script.c perf_event__fprintf(event, stdout); event 2370 tools/perf/builtin-script.c union perf_event *event, event 2379 tools/perf/builtin-script.c if (machine__process_ksymbol(machine, event, sample) < 0) event 2383 tools/perf/builtin-script.c perf_event__fprintf(event, stdout); event 2395 tools/perf/builtin-script.c event->header.type, stdout); event 2396 tools/perf/builtin-script.c perf_event__fprintf(event, stdout); event 3255 tools/perf/builtin-script.c union perf_event *event) event 3257 tools/perf/builtin-script.c struct perf_record_stat_round *round = &event->stat_round; event 3270 tools/perf/builtin-script.c union perf_event *event) event 3272 tools/perf/builtin-script.c perf_event__read_stat_config(&stat_config, &event->stat_config); event 3297 tools/perf/builtin-script.c union perf_event *event) event 3307 tools/perf/builtin-script.c script->threads = thread_map__new_event(&event->thread_map); event 3316 tools/perf/builtin-script.c union perf_event *event) event 3326 tools/perf/builtin-script.c script->cpus = cpu_map__new_data(&event->cpu_map.data); event 3334 tools/perf/builtin-script.c union perf_event *event) event 3336 tools/perf/builtin-script.c if (event->feat.feat_id < HEADER_LAST_FEATURE) event 3337 tools/perf/builtin-script.c return perf_event__process_feature(session, event); event 3343 tools/perf/builtin-script.c union perf_event *event) event 3347 tools/perf/builtin-script.c int ret = perf_event__process_auxtrace_info(session, event); event 215 tools/perf/builtin-stat.c union perf_event *event, event 219 tools/perf/builtin-stat.c if (perf_data__write(&perf_stat.data, event, event->header.size) < 0) { event 224 tools/perf/builtin-stat.c perf_stat.bytes_written += event->header.size; event 1454 tools/perf/builtin-stat.c union perf_event *event) event 1456 tools/perf/builtin-stat.c struct perf_record_stat_round *stat_round = &event->stat_round; event 1480 tools/perf/builtin-stat.c union perf_event *event) event 1485 tools/perf/builtin-stat.c perf_event__read_stat_config(&stat_config, &event->stat_config); event 1523 tools/perf/builtin-stat.c union perf_event *event) event 1533 tools/perf/builtin-stat.c st->threads = thread_map__new_event(&event->thread_map); event 1542 tools/perf/builtin-stat.c union perf_event *event) event 1553 tools/perf/builtin-stat.c cpus = cpu_map__new_data(&event->cpu_map.data); event 314 tools/perf/builtin-timechart.c union perf_event *event, event 319 tools/perf/builtin-timechart.c pid_set_comm(tchart, event->comm.tid, event->comm.comm); event 324 tools/perf/builtin-timechart.c union perf_event *event, event 329 tools/perf/builtin-timechart.c pid_fork(tchart, event->fork.pid, event->fork.ppid, event->fork.time); event 334 tools/perf/builtin-timechart.c union perf_event *event, event 339 tools/perf/builtin-timechart.c pid_exit(tchart, event->fork.pid, event->fork.time); event 473 tools/perf/builtin-timechart.c static const char *cat_backtrace(union perf_event *event, event 496 tools/perf/builtin-timechart.c event->header.type); event 553 tools/perf/builtin-timechart.c union perf_event *event, event 570 tools/perf/builtin-timechart.c cat_backtrace(event, sample, machine)); event 732 tools/perf/builtin-top.c const union perf_event *event, event 761 tools/perf/builtin-top.c if (event->header.misc & PERF_RECORD_MISC_EXACT_IP) event 840 tools/perf/builtin-top.c perf_top__process_lost(struct perf_top *top, union perf_event *event, event 845 tools/perf/builtin-top.c top->lost += event->lost.lost; event 846 tools/perf/builtin-top.c top->lost_total += event->lost.lost; event 847 tools/perf/builtin-top.c hists->stats.total_lost += event->lost.lost; event 852 tools/perf/builtin-top.c union perf_event *event, event 857 tools/perf/builtin-top.c top->lost += event->lost_samples.lost; event 858 tools/perf/builtin-top.c top->lost_total += event->lost_samples.lost; event 859 tools/perf/builtin-top.c hists->stats.total_lost_samples += event->lost_samples.lost; event 869 tools/perf/builtin-top.c union perf_event *event; event 875 tools/perf/builtin-top.c while ((event = perf_mmap__read_event(md)) != NULL) { event 878 tools/perf/builtin-top.c ret = perf_evlist__parse_sample_timestamp(evlist, event, &last_timestamp); event 882 tools/perf/builtin-top.c ret = ordered_events__queue(top->qe.in, event, last_timestamp, 0); event 1114 tools/perf/builtin-top.c union perf_event *event = qevent->event; event 1117 tools/perf/builtin-top.c if (event->header.type != PERF_RECORD_SAMPLE) event 1130 tools/perf/builtin-top.c union perf_event *event = qevent->event; event 1142 tools/perf/builtin-top.c ret = perf_evlist__parse_sample(evlist, event, &sample); event 1151 tools/perf/builtin-top.c if (event->header.type == PERF_RECORD_SAMPLE) { event 1183 tools/perf/builtin-top.c if (event->header.type == PERF_RECORD_SAMPLE) event 1189 tools/perf/builtin-top.c if (event->header.type == PERF_RECORD_SAMPLE) { event 1190 tools/perf/builtin-top.c perf_event__process_sample(&top->tool, event, evsel, event 1192 tools/perf/builtin-top.c } else if (event->header.type == PERF_RECORD_LOST) { event 1193 tools/perf/builtin-top.c perf_top__process_lost(top, event, evsel); event 1194 tools/perf/builtin-top.c } else if (event->header.type == PERF_RECORD_LOST_SAMPLES) { event 1195 tools/perf/builtin-top.c perf_top__process_lost_samples(top, event, evsel); event 1196 tools/perf/builtin-top.c } else if (event->header.type < PERF_RECORD_MAX) { event 1197 tools/perf/builtin-top.c hists__inc_nr_events(evsel__hists(evsel), event->header.type); event 1198 tools/perf/builtin-top.c machine__process_event(machine, event, &sample); event 1358 tools/perf/builtin-trace.c union perf_event *event, struct perf_sample *sample) event 1362 tools/perf/builtin-trace.c switch (event->header.type) { event 1365 tools/perf/builtin-trace.c "LOST %" PRIu64 " events!\n", event->lost.lost); event 1366 tools/perf/builtin-trace.c ret = machine__process_lost_event(machine, event, sample); event 1369 tools/perf/builtin-trace.c ret = machine__process_event(machine, event, sample); event 1377 tools/perf/builtin-trace.c union perf_event *event, event 1382 tools/perf/builtin-trace.c return trace__process_event(trace, machine, event, sample); event 1791 tools/perf/builtin-trace.c union perf_event *event, event 1945 tools/perf/builtin-trace.c union perf_event *event __maybe_unused, event 2093 tools/perf/builtin-trace.c union perf_event *event __maybe_unused, event 2227 tools/perf/builtin-trace.c union perf_event *event __maybe_unused, event 2288 tools/perf/builtin-trace.c union perf_event *event __maybe_unused, event 2350 tools/perf/builtin-trace.c union perf_event *event __maybe_unused, event 2452 tools/perf/builtin-trace.c union perf_event *event __maybe_unused, event 2543 tools/perf/builtin-trace.c union perf_event *event, event 2562 tools/perf/builtin-trace.c handler(trace, evsel, event, sample); event 2679 tools/perf/builtin-trace.c static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample) event 2681 tools/perf/builtin-trace.c const u32 type = event->header.type; event 2685 tools/perf/builtin-trace.c trace__process_event(trace, trace->host, event, sample); event 2707 tools/perf/builtin-trace.c handler(trace, evsel, event, sample); event 3208 tools/perf/builtin-trace.c static int __trace__deliver_event(struct trace *trace, union perf_event *event) event 3214 tools/perf/builtin-trace.c err = perf_evlist__parse_sample(evlist, event, &sample); event 3218 tools/perf/builtin-trace.c trace__handle_event(trace, event, &sample); event 3240 tools/perf/builtin-trace.c static int trace__deliver_event(struct trace *trace, union perf_event *event) event 3245 tools/perf/builtin-trace.c return __trace__deliver_event(trace, event); event 3247 tools/perf/builtin-trace.c err = perf_evlist__parse_sample_timestamp(trace->evlist, event, &trace->oe.last); event 3251 tools/perf/builtin-trace.c err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0); event 3259 tools/perf/builtin-trace.c struct ordered_event *event) event 3263 tools/perf/builtin-trace.c return __trace__deliver_event(trace, event->event); event 3449 tools/perf/builtin-trace.c union perf_event *event; event 3456 tools/perf/builtin-trace.c while ((event = perf_mmap__read_event(md)) != NULL) { event 3459 tools/perf/builtin-trace.c err = trace__deliver_event(trace, event); event 196 tools/perf/pmu-events/jevents.c char **event, jsmntok_t *val) event 204 tools/perf/pmu-events/jevents.c addfield(map, event, ",", f->kernel, &newval); event 321 tools/perf/pmu-events/jevents.c static int print_events_table_entry(void *data, char *name, char *event, event 339 tools/perf/pmu-events/jevents.c if (event) event 340 tools/perf/pmu-events/jevents.c fprintf(outfp, "\t.event = \"%s\",\n", event); event 365 tools/perf/pmu-events/jevents.c char *event; event 392 tools/perf/pmu-events/jevents.c op(event); \ event 416 tools/perf/pmu-events/jevents.c static int save_arch_std_events(void *data, char *name, char *event, event 451 tools/perf/pmu-events/jevents.c const char *event; event 465 tools/perf/pmu-events/jevents.c static char *real_event(const char *name, char *event) event 474 tools/perf/pmu-events/jevents.c return (char *)fixed[i].event; event 475 tools/perf/pmu-events/jevents.c return event; event 479 tools/perf/pmu-events/jevents.c try_fixup(const char *fn, char *arch_std, char **event, char **desc, event 489 tools/perf/pmu-events/jevents.c if (!eventcode && es->event) { event 491 tools/perf/pmu-events/jevents.c free(*event); event 492 tools/perf/pmu-events/jevents.c *event = NULL; event 506 tools/perf/pmu-events/jevents.c int (*func)(void *data, char *name, char *event, char *desc, event 529 tools/perf/pmu-events/jevents.c char *event = NULL, *desc = NULL, *name = NULL; event 560 tools/perf/pmu-events/jevents.c if (match_field(map, field, nz, &event, val)) { event 641 tools/perf/pmu-events/jevents.c addfield(map, &event, ",", buf, NULL); event 647 tools/perf/pmu-events/jevents.c addfield(map, &event, ",", filter, NULL); event 649 tools/perf/pmu-events/jevents.c addfield(map, &event, ",", msr->pname, msrval); event 658 tools/perf/pmu-events/jevents.c err = try_fixup(fn, arch_std, &event, &desc, &name, event 665 tools/perf/pmu-events/jevents.c err = func(data, name, real_event(name, event), desc, long_desc, event 668 tools/perf/pmu-events/jevents.c free(event); event 6 tools/perf/pmu-events/jevents.h int (*func)(void *data, char *name, char *event, char *desc, event 10 tools/perf/pmu-events/pmu-events.h const char *event; event 38 tools/perf/tests/backward-ring-buffer.c union perf_event *event; event 41 tools/perf/tests/backward-ring-buffer.c while ((event = perf_mmap__read_event(map)) != NULL) { event 42 tools/perf/tests/backward-ring-buffer.c const u32 type = event->header.type; event 183 tools/perf/tests/bpf.c union perf_event *event; event 190 tools/perf/tests/bpf.c while ((event = perf_mmap__read_event(md)) != NULL) { event 191 tools/perf/tests/bpf.c const u32 type = event->header.type; event 374 tools/perf/tests/code-reading.c union perf_event *event, struct state *state) event 380 tools/perf/tests/code-reading.c if (perf_evlist__parse_sample(evlist, event, &sample)) { event 397 tools/perf/tests/code-reading.c union perf_event *event, struct state *state) event 399 tools/perf/tests/code-reading.c if (event->header.type == PERF_RECORD_SAMPLE) event 400 tools/perf/tests/code-reading.c return process_sample_event(machine, evlist, event, state); event 402 tools/perf/tests/code-reading.c if (event->header.type == PERF_RECORD_THROTTLE || event 403 tools/perf/tests/code-reading.c event->header.type == PERF_RECORD_UNTHROTTLE) event 406 tools/perf/tests/code-reading.c if (event->header.type < PERF_RECORD_MAX) { event 409 tools/perf/tests/code-reading.c ret = machine__process_event(machine, event, NULL); event 412 tools/perf/tests/code-reading.c event->header.type); event 422 tools/perf/tests/code-reading.c union perf_event *event; event 431 tools/perf/tests/code-reading.c while ((event = perf_mmap__read_event(md)) != NULL) { event 432 tools/perf/tests/code-reading.c ret = process_event(machine, evlist, event, state); event 508 tools/perf/tests/code-reading.c const char *event = excl_kernel ? "cycles:u" : "cycles"; event 522 tools/perf/tests/code-reading.c return event; event 524 tools/perf/tests/code-reading.c return event; event 528 tools/perf/tests/code-reading.c event = excl_kernel ? "cpu-clock:u" : "cpu-clock"; event 531 tools/perf/tests/code-reading.c return event; event 15 tools/perf/tests/cpumap.c union perf_event *event, event 19 tools/perf/tests/cpumap.c struct perf_record_cpu_map *map_event = &event->cpu_map; event 49 tools/perf/tests/cpumap.c union perf_event *event, event 53 tools/perf/tests/cpumap.c struct perf_record_cpu_map *map_event = &event->cpu_map; event 28 tools/perf/tests/dwarf-unwind.c union perf_event *event, event 32 tools/perf/tests/dwarf-unwind.c return machine__process_mmap2_event(machine, event, sample); event 37 tools/perf/tests/dwarf-unwind.c union perf_event event; event 40 tools/perf/tests/dwarf-unwind.c return perf_event__synthesize_mmap_events(NULL, &event, pid, pid, event 16 tools/perf/tests/event_update.c union perf_event *event, event 20 tools/perf/tests/event_update.c struct perf_record_event_update *ev = (struct perf_record_event_update *)event; event 29 tools/perf/tests/event_update.c union perf_event *event, event 33 tools/perf/tests/event_update.c struct perf_record_event_update *ev = (struct perf_record_event_update *)event; event 50 tools/perf/tests/event_update.c union perf_event *event, event 55 tools/perf/tests/event_update.c struct perf_record_event_update *ev = (struct perf_record_event_update *)event; event 64 tools/perf/tests/event_update.c union perf_event *event, event 68 tools/perf/tests/event_update.c struct perf_record_event_update *ev = (struct perf_record_event_update *)event; event 34 tools/perf/tests/keep-tracking.c union perf_event *event; event 43 tools/perf/tests/keep-tracking.c while ((event = perf_mmap__read_event(md)) != NULL) { event 44 tools/perf/tests/keep-tracking.c if (event->header.type == PERF_RECORD_COMM && event 45 tools/perf/tests/keep-tracking.c (pid_t)event->comm.pid == getpid() && event 46 tools/perf/tests/keep-tracking.c (pid_t)event->comm.tid == getpid() && event 47 tools/perf/tests/keep-tracking.c strcmp(event->comm.comm, comm) == 0) event 34 tools/perf/tests/mmap-basic.c union perf_event *event; event 119 tools/perf/tests/mmap-basic.c while ((event = perf_mmap__read_event(md)) != NULL) { event 122 tools/perf/tests/mmap-basic.c if (event->header.type != PERF_RECORD_SAMPLE) { event 124 tools/perf/tests/mmap-basic.c perf_event__name(event->header.type)); event 128 tools/perf/tests/mmap-basic.c err = perf_evlist__parse_sample(evlist, event, &sample); event 91 tools/perf/tests/openat-syscall-tp-fields.c union perf_event *event; event 98 tools/perf/tests/openat-syscall-tp-fields.c while ((event = perf_mmap__read_event(md)) != NULL) { event 99 tools/perf/tests/openat-syscall-tp-fields.c const u32 type = event->header.type; event 110 tools/perf/tests/openat-syscall-tp-fields.c err = perf_evsel__parse_sample(evsel, event, &sample); event 12 tools/perf/tests/parse-no-sample-id-all.c static int process_event(struct evlist **pevlist, union perf_event *event) event 16 tools/perf/tests/parse-no-sample-id-all.c if (event->header.type == PERF_RECORD_HEADER_ATTR) { event 17 tools/perf/tests/parse-no-sample-id-all.c if (perf_event__process_attr(NULL, event, pevlist)) { event 24 tools/perf/tests/parse-no-sample-id-all.c if (event->header.type >= PERF_RECORD_USER_TYPE_START) event 30 tools/perf/tests/parse-no-sample-id-all.c if (perf_evlist__parse_sample(*pevlist, event, &sample)) { event 169 tools/perf/tests/perf-record.c union perf_event *event; event 176 tools/perf/tests/perf-record.c while ((event = perf_mmap__read_event(md)) != NULL) { event 177 tools/perf/tests/perf-record.c const u32 type = event->header.type; event 184 tools/perf/tests/perf-record.c err = perf_evlist__parse_sample(evlist, event, &sample); event 187 tools/perf/tests/perf-record.c perf_event__fprintf(event, stderr); event 194 tools/perf/tests/perf-record.c perf_event__fprintf(event, stderr); event 228 tools/perf/tests/perf-record.c (pid_t)event->comm.pid != evlist->workload.pid) { event 236 tools/perf/tests/perf-record.c event->comm.pid != event->comm.tid) { event 243 tools/perf/tests/perf-record.c if (strcmp(event->comm.comm, cmd)) { event 251 tools/perf/tests/perf-record.c mmap_filename = event->mmap.filename; event 254 tools/perf/tests/perf-record.c mmap_filename = event->mmap2.filename; event 167 tools/perf/tests/sample-parsing.c union perf_event *event; event 246 tools/perf/tests/sample-parsing.c event = malloc(bufsz); event 247 tools/perf/tests/sample-parsing.c if (!event) { event 252 tools/perf/tests/sample-parsing.c memset(event, 0xff, bufsz); event 253 tools/perf/tests/sample-parsing.c event->header.type = PERF_RECORD_SAMPLE; event 254 tools/perf/tests/sample-parsing.c event->header.misc = 0; event 255 tools/perf/tests/sample-parsing.c event->header.size = sz; event 257 tools/perf/tests/sample-parsing.c err = perf_event__synthesize_sample(event, sample_type, read_format, event 267 tools/perf/tests/sample-parsing.c if (*(i - 1 + (u8 *)event) != 0xff) event 278 tools/perf/tests/sample-parsing.c err = perf_evsel__parse_sample(&evsel, event, &sample_out); event 293 tools/perf/tests/sample-parsing.c free(event); event 60 tools/perf/tests/sdt.c const char *group, const char *event) event 70 tools/perf/tests/sdt.c if (!probe_cache__find_by_name(cache, group, event)) { event 71 tools/perf/tests/sdt.c pr_debug("Failed to find %s:%s in the cache\n", group, event); event 25 tools/perf/tests/stat.c union perf_event *event, event 29 tools/perf/tests/stat.c struct perf_record_stat_config *config = &event->stat_config; event 65 tools/perf/tests/stat.c union perf_event *event, event 69 tools/perf/tests/stat.c struct perf_record_stat *st = &event->stat; event 95 tools/perf/tests/stat.c union perf_event *event, event 99 tools/perf/tests/stat.c struct perf_record_stat_round *stat_round = &event->stat_round; event 33 tools/perf/tests/sw-clock.c union perf_event *event; event 105 tools/perf/tests/sw-clock.c while ((event = perf_mmap__read_event(md)) != NULL) { event 108 tools/perf/tests/sw-clock.c if (event->header.type != PERF_RECORD_SAMPLE) event 111 tools/perf/tests/sw-clock.c err = perf_evlist__parse_sample(evlist, event, &sample); event 71 tools/perf/tests/switch-tracking.c union perf_event *event, const char *comm, int nr) event 73 tools/perf/tests/switch-tracking.c if (event->header.type == PERF_RECORD_COMM && event 74 tools/perf/tests/switch-tracking.c (pid_t)event->comm.pid == getpid() && event 75 tools/perf/tests/switch-tracking.c (pid_t)event->comm.tid == getpid() && event 76 tools/perf/tests/switch-tracking.c strcmp(event->comm.comm, comm) == 0) { event 82 tools/perf/tests/switch-tracking.c pr_debug3("comm event: %s nr: %d\n", event->comm.comm, nr); event 122 tools/perf/tests/switch-tracking.c union perf_event *event, event 130 tools/perf/tests/switch-tracking.c if (perf_evlist__parse_sample(evlist, event, &sample)) { event 171 tools/perf/tests/switch-tracking.c static int process_event(struct evlist *evlist, union perf_event *event, event 174 tools/perf/tests/switch-tracking.c if (event->header.type == PERF_RECORD_SAMPLE) event 175 tools/perf/tests/switch-tracking.c return process_sample_event(evlist, event, switch_tracking); event 177 tools/perf/tests/switch-tracking.c if (event->header.type == PERF_RECORD_COMM) { event 180 tools/perf/tests/switch-tracking.c err = check_comm(switch_tracking, event, "Test COMM 1", 0); event 184 tools/perf/tests/switch-tracking.c err = check_comm(switch_tracking, event, "Test COMM 2", 1); event 188 tools/perf/tests/switch-tracking.c err = check_comm(switch_tracking, event, "Test COMM 3", 2); event 192 tools/perf/tests/switch-tracking.c err = check_comm(switch_tracking, event, "Test COMM 4", 3); event 207 tools/perf/tests/switch-tracking.c union perf_event *event; event 212 tools/perf/tests/switch-tracking.c union perf_event *event) event 222 tools/perf/tests/switch-tracking.c node->event = event; event 225 tools/perf/tests/switch-tracking.c if (perf_evlist__parse_sample(evlist, event, &sample)) { event 263 tools/perf/tests/switch-tracking.c union perf_event *event; event 275 tools/perf/tests/switch-tracking.c while ((event = perf_mmap__read_event(md)) != NULL) { event 277 tools/perf/tests/switch-tracking.c ret = add_event(evlist, &events, event); event 299 tools/perf/tests/switch-tracking.c ret = process_event(evlist, events_array[pos].event, event 44 tools/perf/tests/task-exit.c union perf_event *event; event 125 tools/perf/tests/task-exit.c while ((event = perf_mmap__read_event(md)) != NULL) { event 126 tools/perf/tests/task-exit.c if (event->header.type == PERF_RECORD_EXIT) event 63 tools/perf/tests/thread-map.c union perf_event *event, event 67 tools/perf/tests/thread-map.c struct perf_record_thread_map *map = &event->thread_map; event 74 tools/perf/tests/thread-map.c threads = thread_map__new_event(&event->thread_map); event 96 tools/perf/util/arm-spe.c union perf_event *event __maybe_unused, event 104 tools/perf/util/arm-spe.c union perf_event *event, event 122 tools/perf/util/arm-spe.c err = auxtrace_queues__add_event(&spe->queues, session, event, event 191 tools/perf/util/arm-spe.c int arm_spe_process_auxtrace_info(union perf_event *event, event 194 tools/perf/util/arm-spe.c struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info; event 27 tools/perf/util/arm-spe.h int arm_spe_process_auxtrace_info(union perf_event *event, event 359 tools/perf/util/auxtrace.c union perf_event *event, off_t data_offset, event 364 tools/perf/util/auxtrace.c .tid = event->auxtrace.tid, event 365 tools/perf/util/auxtrace.c .cpu = event->auxtrace.cpu, event 367 tools/perf/util/auxtrace.c .offset = event->auxtrace.offset, event 368 tools/perf/util/auxtrace.c .reference = event->auxtrace.reference, event 369 tools/perf/util/auxtrace.c .size = event->auxtrace.size, event 371 tools/perf/util/auxtrace.c unsigned int idx = event->auxtrace.idx; event 381 tools/perf/util/auxtrace.c union perf_event *event; event 386 tools/perf/util/auxtrace.c PERF_SAMPLE_MAX_SIZE, &event, NULL); event 390 tools/perf/util/auxtrace.c if (event->header.type == PERF_RECORD_AUXTRACE) { event 391 tools/perf/util/auxtrace.c if (event->header.size < sizeof(struct perf_record_auxtrace) || event 392 tools/perf/util/auxtrace.c event->header.size != sz) { event 396 tools/perf/util/auxtrace.c file_offset += event->header.size; event 397 tools/perf/util/auxtrace.c err = auxtrace_queues__add_event(queues, session, event, event 658 tools/perf/util/auxtrace.c union perf_event *event, off_t file_offset) event 669 tools/perf/util/auxtrace.c auxtrace_index->entries[nr].sz = event->header.size; event 918 tools/perf/util/auxtrace.c union perf_event *event) event 920 tools/perf/util/auxtrace.c enum auxtrace_type type = event->auxtrace_info.type; event 927 tools/perf/util/auxtrace.c return intel_pt_process_auxtrace_info(event, session); event 929 tools/perf/util/auxtrace.c return intel_bts_process_auxtrace_info(event, session); event 931 tools/perf/util/auxtrace.c return arm_spe_process_auxtrace_info(event, session); event 933 tools/perf/util/auxtrace.c return cs_etm__process_auxtrace_info(event, session); event 935 tools/perf/util/auxtrace.c return s390_cpumsf_process_auxtrace_info(event, session); event 943 tools/perf/util/auxtrace.c union perf_event *event) event 949 tools/perf/util/auxtrace.c event->auxtrace.size, event->auxtrace.offset, event 950 tools/perf/util/auxtrace.c event->auxtrace.reference, event->auxtrace.idx, event 951 tools/perf/util/auxtrace.c event->auxtrace.tid, event->auxtrace.cpu); event 954 tools/perf/util/auxtrace.c return event->auxtrace.size; event 956 tools/perf/util/auxtrace.c if (!session->auxtrace || event->header.type != PERF_RECORD_AUXTRACE) event 959 tools/perf/util/auxtrace.c err = session->auxtrace->process_auxtrace_event(session, event, session->tool); event 963 tools/perf/util/auxtrace.c return event->auxtrace.size; event 1172 tools/perf/util/auxtrace.c size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp) event 1174 tools/perf/util/auxtrace.c struct perf_record_auxtrace_error *e = &event->auxtrace_error; event 1200 tools/perf/util/auxtrace.c union perf_event *event) event 1202 tools/perf/util/auxtrace.c struct perf_record_auxtrace_error *e = &event->auxtrace_error; event 1222 tools/perf/util/auxtrace.c union perf_event *event) event 1227 tools/perf/util/auxtrace.c perf_event__fprintf_auxtrace_error(event, stdout); event 2202 tools/perf/util/auxtrace.c int auxtrace__process_event(struct perf_session *session, union perf_event *event, event 2208 tools/perf/util/auxtrace.c return session->auxtrace->process_event(session, event, sample, tool); event 150 tools/perf/util/auxtrace.h union perf_event *event, event 154 tools/perf/util/auxtrace.h union perf_event *event, event 449 tools/perf/util/auxtrace.h union perf_event *event, void *data1, event 463 tools/perf/util/auxtrace.h union perf_event *event, off_t data_offset, event 517 tools/perf/util/auxtrace.h int auxtrace_index__auxtrace_event(struct list_head *head, union perf_event *event, event 529 tools/perf/util/auxtrace.h union perf_event *event); event 531 tools/perf/util/auxtrace.h union perf_event *event); event 533 tools/perf/util/auxtrace.h union perf_event *event); event 539 tools/perf/util/auxtrace.h size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp); event 541 tools/perf/util/auxtrace.h union perf_event *event); event 550 tools/perf/util/auxtrace.h int auxtrace__process_event(struct perf_session *session, union perf_event *event, event 619 tools/perf/util/auxtrace.h union perf_event *event event 652 tools/perf/util/auxtrace.h union perf_event *event __maybe_unused, event 34 tools/perf/util/bpf-event.c union perf_event *event, event 40 tools/perf/util/bpf-event.c int id = event->bpf.id; event 69 tools/perf/util/bpf-event.c int machine__process_bpf(struct machine *machine, union perf_event *event, event 73 tools/perf/util/bpf-event.c perf_event__fprintf_bpf(event, stdout); event 75 tools/perf/util/bpf-event.c switch (event->bpf.type) { event 77 tools/perf/util/bpf-event.c return machine__process_bpf_event_load(machine, event, sample); event 87 tools/perf/util/bpf-event.c pr_debug("unexpected bpf event type of %d\n", event->bpf.type); event 161 tools/perf/util/bpf-event.c union perf_event *event, event 164 tools/perf/util/bpf-event.c struct perf_record_ksymbol *ksymbol_event = &event->ksymbol; event 165 tools/perf/util/bpf-event.c struct perf_record_bpf_event *bpf_event = &event->bpf; event 249 tools/perf/util/bpf-event.c memset((void *)event + event->header.size, 0, machine->id_hdr_size); event 250 tools/perf/util/bpf-event.c event->header.size += machine->id_hdr_size; event 251 tools/perf/util/bpf-event.c err = perf_tool__process_synth_event(tool, event, event 267 tools/perf/util/bpf-event.c memset((void *)event + event->header.size, 0, machine->id_hdr_size); event 268 tools/perf/util/bpf-event.c event->header.size += machine->id_hdr_size; event 285 tools/perf/util/bpf-event.c err = perf_tool__process_synth_event(tool, event, event 300 tools/perf/util/bpf-event.c union perf_event *event; event 305 tools/perf/util/bpf-event.c event = malloc(sizeof(event->bpf) + KSYM_NAME_LEN + machine->id_hdr_size); event 306 tools/perf/util/bpf-event.c if (!event) event 331 tools/perf/util/bpf-event.c event, opts); event 340 tools/perf/util/bpf-event.c free(event); event 395 tools/perf/util/bpf-event.c static int bpf_event__sb_cb(union perf_event *event, void *data) event 399 tools/perf/util/bpf-event.c if (event->header.type != PERF_RECORD_BPF_EVENT) event 402 tools/perf/util/bpf-event.c switch (event->bpf.type) { event 404 tools/perf/util/bpf-event.c perf_env__add_bpf_info(env, event->bpf.id); event 414 tools/perf/util/bpf-event.c pr_debug("unexpected bpf event type of %d\n", event->bpf.type); event 34 tools/perf/util/bpf-event.h int machine__process_bpf(struct machine *machine, union perf_event *event, event 43 tools/perf/util/bpf-event.h union perf_event *event __maybe_unused, event 372 tools/perf/util/bpf-loader.c if (!pev->event) { event 713 tools/perf/util/bpf-loader.c "%s:%s", tev->group, tev->event); event 794 tools/perf/util/bpf-loader.c err = (*func)(tev->group, tev->event, fd, obj, arg); event 48 tools/perf/util/bpf-loader.h typedef int (*bpf_prog_iter_callback_t)(const char *group, const char *event, event 40 tools/perf/util/build-id.c union perf_event *event, event 51 tools/perf/util/build-id.c event->header.type); event 63 tools/perf/util/build-id.c union perf_event *event, event 69 tools/perf/util/build-id.c event->fork.pid, event 70 tools/perf/util/build-id.c event->fork.tid); event 72 tools/perf/util/build-id.c dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid, event 73 tools/perf/util/build-id.c event->fork.ppid, event->fork.ptid); event 26 tools/perf/util/build-id.h int build_id__mark_dso_hit(struct perf_tool *tool, union perf_event *event, event 992 tools/perf/util/cs-etm.c static int cs_etm__inject_event(union perf_event *event, event 995 tools/perf/util/cs-etm.c event->header.size = perf_event__sample_event_size(sample, type, 0); event 996 tools/perf/util/cs-etm.c return perf_event__synthesize_sample(event, type, 0, sample); event 1118 tools/perf/util/cs-etm.c union perf_event *event = tidq->event_buf; event 1121 tools/perf/util/cs-etm.c event->sample.header.type = PERF_RECORD_SAMPLE; event 1122 tools/perf/util/cs-etm.c event->sample.header.misc = cs_etm__cpu_mode(etmq, addr); event 1123 tools/perf/util/cs-etm.c event->sample.header.size = sizeof(struct perf_event_header); event 1133 tools/perf/util/cs-etm.c sample.cpumode = event->sample.header.misc; event 1143 tools/perf/util/cs-etm.c ret = cs_etm__inject_event(event, &sample, event 1149 tools/perf/util/cs-etm.c ret = perf_session__deliver_synth_event(etm->session, event, &sample); event 1172 tools/perf/util/cs-etm.c union perf_event *event = tidq->event_buf; event 1181 tools/perf/util/cs-etm.c event->sample.header.type = PERF_RECORD_SAMPLE; event 1182 tools/perf/util/cs-etm.c event->sample.header.misc = cs_etm__cpu_mode(etmq, ip); event 1183 tools/perf/util/cs-etm.c event->sample.header.size = sizeof(struct perf_event_header); event 1194 tools/perf/util/cs-etm.c sample.cpumode = event->sample.header.misc; event 1214 tools/perf/util/cs-etm.c ret = cs_etm__inject_event(event, &sample, event 1220 tools/perf/util/cs-etm.c ret = perf_session__deliver_synth_event(etm->session, event, &sample); event 1236 tools/perf/util/cs-etm.c union perf_event *event, event 1244 tools/perf/util/cs-etm.c event, NULL); event 2190 tools/perf/util/cs-etm.c union perf_event *event) event 2202 tools/perf/util/cs-etm.c event->itrace_start.pid, event 2203 tools/perf/util/cs-etm.c event->itrace_start.tid); event 2213 tools/perf/util/cs-etm.c union perf_event *event) event 2216 tools/perf/util/cs-etm.c bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT; event 2238 tools/perf/util/cs-etm.c event->context_switch.next_prev_pid, event 2239 tools/perf/util/cs-etm.c event->context_switch.next_prev_tid); event 2249 tools/perf/util/cs-etm.c union perf_event *event, event 2279 tools/perf/util/cs-etm.c event->header.type == PERF_RECORD_EXIT) event 2281 tools/perf/util/cs-etm.c event->fork.tid); event 2283 tools/perf/util/cs-etm.c if (event->header.type == PERF_RECORD_ITRACE_START) event 2284 tools/perf/util/cs-etm.c return cs_etm__process_itrace_start(etm, event); event 2285 tools/perf/util/cs-etm.c else if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) event 2286 tools/perf/util/cs-etm.c return cs_etm__process_switch_cpu_wide(etm, event); event 2289 tools/perf/util/cs-etm.c event->header.type == PERF_RECORD_AUX) event 2296 tools/perf/util/cs-etm.c union perf_event *event, event 2318 tools/perf/util/cs-etm.c event, data_offset, &buffer); event 2397 tools/perf/util/cs-etm.c int cs_etm__process_auxtrace_info(union perf_event *event, event 2400 tools/perf/util/cs-etm.c struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info; event 176 tools/perf/util/cs-etm.h int cs_etm__process_auxtrace_info(union perf_event *event, event 188 tools/perf/util/cs-etm.h cs_etm__process_auxtrace_info(union perf_event *event __maybe_unused, event 96 tools/perf/util/data-convert-bt.c struct bt_ctf_event *event, event 123 tools/perf/util/data-convert-bt.c ret = bt_ctf_event_set_payload(event, name, field); event 138 tools/perf/util/data-convert-bt.c struct bt_ctf_event *event, \ event 143 tools/perf/util/data-convert-bt.c return value_set(type, event, name, (u64) val); \ event 156 tools/perf/util/data-convert-bt.c value_set_string(struct ctf_writer *cw, struct bt_ctf_event *event, event 175 tools/perf/util/data-convert-bt.c ret = bt_ctf_event_set_payload(event, name, field); event 288 tools/perf/util/data-convert-bt.c struct bt_ctf_event *event, event 313 tools/perf/util/data-convert-bt.c tmp_val = tep_read_number(fmtf->event->tep, event 357 tools/perf/util/data-convert-bt.c fmtf->event->tep, event 373 tools/perf/util/data-convert-bt.c ret = bt_ctf_event_set_payload(event, name, field); event 382 tools/perf/util/data-convert-bt.c ret = bt_ctf_event_set_payload(event, name, array_field); event 398 tools/perf/util/data-convert-bt.c struct bt_ctf_event *event, event 406 tools/perf/util/data-convert-bt.c ret = add_tracepoint_field_value(cw, event_class, event, sample, event 416 tools/perf/util/data-convert-bt.c struct bt_ctf_event *event, event 424 tools/perf/util/data-convert-bt.c ret = add_tracepoint_fields_values(cw, event_class, event, event 427 tools/perf/util/data-convert-bt.c ret = add_tracepoint_fields_values(cw, event_class, event, event 435 tools/perf/util/data-convert-bt.c struct bt_ctf_event *event, event 462 tools/perf/util/data-convert-bt.c ret = bt_ctf_event_set_payload(event, "raw_len", len_field); event 496 tools/perf/util/data-convert-bt.c ret = bt_ctf_event_set_payload(event, "raw_data", seq_field); event 513 tools/perf/util/data-convert-bt.c struct bt_ctf_event *event, event 536 tools/perf/util/data-convert-bt.c ret = bt_ctf_event_set_payload(event, "perf_callchain_size", len_field); event 571 tools/perf/util/data-convert-bt.c ret = bt_ctf_event_set_payload(event, "perf_callchain", seq_field); event 587 tools/perf/util/data-convert-bt.c struct bt_ctf_event *event, event 606 tools/perf/util/data-convert-bt.c ret = value_set_u64_hex(cw, event, "perf_ip", sample->ip); event 612 tools/perf/util/data-convert-bt.c ret = value_set_s32(cw, event, "perf_tid", sample->tid); event 616 tools/perf/util/data-convert-bt.c ret = value_set_s32(cw, event, "perf_pid", sample->pid); event 623 tools/perf/util/data-convert-bt.c ret = value_set_u64(cw, event, "perf_id", sample->id); event 629 tools/perf/util/data-convert-bt.c ret = value_set_u64(cw, event, "perf_stream_id", sample->stream_id); event 635 tools/perf/util/data-convert-bt.c ret = value_set_u64(cw, event, "perf_period", sample->period); event 641 tools/perf/util/data-convert-bt.c ret = value_set_u64(cw, event, "perf_weight", sample->weight); event 647 tools/perf/util/data-convert-bt.c ret = value_set_u64(cw, event, "perf_data_src", event 654 tools/perf/util/data-convert-bt.c ret = value_set_u64(cw, event, "perf_transaction", event 797 tools/perf/util/data-convert-bt.c struct bt_ctf_event *event; event 812 tools/perf/util/data-convert-bt.c event = bt_ctf_event_create(event_class); event 813 tools/perf/util/data-convert-bt.c if (!event) { event 820 tools/perf/util/data-convert-bt.c ret = add_generic_values(cw, event, evsel, sample); event 825 tools/perf/util/data-convert-bt.c ret = add_tracepoint_values(cw, event_class, event, event 833 tools/perf/util/data-convert-bt.c event, sample->callchain); event 839 tools/perf/util/data-convert-bt.c ret = add_bpf_output_values(event_class, event, sample); event 850 tools/perf/util/data-convert-bt.c bt_ctf_stream_append_event(cs->stream, event); event 853 tools/perf/util/data-convert-bt.c bt_ctf_event_put(event); event 859 tools/perf/util/data-convert-bt.c ret = value_set_##_type(cw, event, #_field, _event->_name._field);\ event 873 tools/perf/util/data-convert-bt.c struct bt_ctf_event *event; \ event 879 tools/perf/util/data-convert-bt.c event = bt_ctf_event_create(event_class); \ event 880 tools/perf/util/data-convert-bt.c if (!event) { \ event 893 tools/perf/util/data-convert-bt.c bt_ctf_stream_append_event(cs->stream, event); \ event 895 tools/perf/util/data-convert-bt.c bt_ctf_event_put(event); \ event 344 tools/perf/util/db-export.c int db_export__sample(struct db_export *dbe, union perf_event *event, event 350 tools/perf/util/db-export.c .event = event, event 550 tools/perf/util/db-export.c int db_export__switch(struct db_export *dbe, union perf_event *event, event 553 tools/perf/util/db-export.c bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT; event 555 tools/perf/util/db-export.c (event->header.misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT); event 575 tools/perf/util/db-export.c if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) { event 576 tools/perf/util/db-export.c pid_t pid = event->context_switch.next_prev_pid; event 577 tools/perf/util/db-export.c pid_t tid = event->context_switch.next_prev_tid; event 26 tools/perf/util/db-export.h union perf_event *event; event 98 tools/perf/util/db-export.h int db_export__sample(struct db_export *dbe, union perf_event *event, event 107 tools/perf/util/db-export.h int db_export__switch(struct db_export *dbe, union perf_event *event, event 119 tools/perf/util/debug.c union perf_event *event = (union perf_event *)extra; event 127 tools/perf/util/debug.c event->header.size); event 164 tools/perf/util/debug.c void trace_event(union perf_event *event) event 166 tools/perf/util/debug.c unsigned char *raw_event = (void *)event; event 171 tools/perf/util/debug.c print_binary(raw_event, event->header.size, 16, event 172 tools/perf/util/debug.c trace_event_printer, event); event 44 tools/perf/util/debug.h void trace_event(union perf_event *event); event 121 tools/perf/util/event.c struct perf_record_stat_config *event) event 125 tools/perf/util/event.c for (i = 0; i < event->nr; i++) { event 127 tools/perf/util/event.c switch (event->data[i].tag) { event 130 tools/perf/util/event.c config->__val = event->data[i].val; \ event 139 tools/perf/util/event.c event->data[i].tag); event 144 tools/perf/util/event.c size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp) event 148 tools/perf/util/event.c if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC) event 153 tools/perf/util/event.c return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid); event 156 tools/perf/util/event.c size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp) event 162 tools/perf/util/event.c ns_link_info = event->namespaces.link_info; event 163 tools/perf/util/event.c nr_namespaces = event->namespaces.nr_namespaces; event 166 tools/perf/util/event.c event->namespaces.pid, event 167 tools/perf/util/event.c event->namespaces.tid, event 184 tools/perf/util/event.c union perf_event *event, event 188 tools/perf/util/event.c return machine__process_comm_event(machine, event, sample); event 192 tools/perf/util/event.c union perf_event *event, event 196 tools/perf/util/event.c return machine__process_namespaces_event(machine, event, sample); event 200 tools/perf/util/event.c union perf_event *event, event 204 tools/perf/util/event.c return machine__process_lost_event(machine, event, sample); event 208 tools/perf/util/event.c union perf_event *event, event 212 tools/perf/util/event.c return machine__process_aux_event(machine, event); event 216 tools/perf/util/event.c union perf_event *event, event 220 tools/perf/util/event.c return machine__process_itrace_start_event(machine, event); event 224 tools/perf/util/event.c union perf_event *event, event 228 tools/perf/util/event.c return machine__process_lost_samples_event(machine, event, sample); event 232 tools/perf/util/event.c union perf_event *event, event 236 tools/perf/util/event.c return machine__process_switch_event(machine, event); event 240 tools/perf/util/event.c union perf_event *event, event 244 tools/perf/util/event.c return machine__process_ksymbol(machine, event, sample); event 248 tools/perf/util/event.c union perf_event *event, event 252 tools/perf/util/event.c return machine__process_bpf(machine, event, sample); event 255 tools/perf/util/event.c size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp) event 258 tools/perf/util/event.c event->mmap.pid, event->mmap.tid, event->mmap.start, event 259 tools/perf/util/event.c event->mmap.len, event->mmap.pgoff, event 260 tools/perf/util/event.c (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x', event 261 tools/perf/util/event.c event->mmap.filename); event 264 tools/perf/util/event.c size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp) event 268 tools/perf/util/event.c event->mmap2.pid, event->mmap2.tid, event->mmap2.start, event 269 tools/perf/util/event.c event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj, event 270 tools/perf/util/event.c event->mmap2.min, event->mmap2.ino, event 271 tools/perf/util/event.c event->mmap2.ino_generation, event 272 tools/perf/util/event.c (event->mmap2.prot & PROT_READ) ? 'r' : '-', event 273 tools/perf/util/event.c (event->mmap2.prot & PROT_WRITE) ? 'w' : '-', event 274 tools/perf/util/event.c (event->mmap2.prot & PROT_EXEC) ? 'x' : '-', event 275 tools/perf/util/event.c (event->mmap2.flags & MAP_SHARED) ? 's' : 'p', event 276 tools/perf/util/event.c event->mmap2.filename); event 279 tools/perf/util/event.c size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp) event 281 tools/perf/util/event.c struct perf_thread_map *threads = thread_map__new_event(&event->thread_map); event 295 tools/perf/util/event.c size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp) event 297 tools/perf/util/event.c struct perf_cpu_map *cpus = cpu_map__new_data(&event->cpu_map.data); event 312 tools/perf/util/event.c union perf_event *event, event 316 tools/perf/util/event.c return machine__process_mmap_event(machine, event, sample); event 320 tools/perf/util/event.c union perf_event *event, event 324 tools/perf/util/event.c return machine__process_mmap2_event(machine, event, sample); event 327 tools/perf/util/event.c size_t perf_event__fprintf_task(union perf_event *event, FILE *fp) event 330 tools/perf/util/event.c event->fork.pid, event->fork.tid, event 331 tools/perf/util/event.c event->fork.ppid, event->fork.ptid); event 335 tools/perf/util/event.c union perf_event *event, event 339 tools/perf/util/event.c return machine__process_fork_event(machine, event, sample); event 343 tools/perf/util/event.c union perf_event *event, event 347 tools/perf/util/event.c return machine__process_exit_event(machine, event, sample); event 350 tools/perf/util/event.c size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp) event 353 tools/perf/util/event.c event->aux.aux_offset, event->aux.aux_size, event 354 tools/perf/util/event.c event->aux.flags, event 355 tools/perf/util/event.c event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "", event 356 tools/perf/util/event.c event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : "", event 357 tools/perf/util/event.c event->aux.flags & PERF_AUX_FLAG_PARTIAL ? "P" : ""); event 360 tools/perf/util/event.c size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp) event 363 tools/perf/util/event.c event->itrace_start.pid, event->itrace_start.tid); event 366 tools/perf/util/event.c size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp) event 368 tools/perf/util/event.c bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT; event 370 tools/perf/util/event.c !(event->header.misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT) ? event 373 tools/perf/util/event.c if (event->header.type == PERF_RECORD_SWITCH) event 378 tools/perf/util/event.c event->context_switch.next_prev_pid, event 379 tools/perf/util/event.c event->context_switch.next_prev_tid); event 382 tools/perf/util/event.c static size_t perf_event__fprintf_lost(union perf_event *event, FILE *fp) event 384 tools/perf/util/event.c return fprintf(fp, " lost %" PRI_lu64 "\n", event->lost.lost); event 387 tools/perf/util/event.c size_t perf_event__fprintf_ksymbol(union perf_event *event, FILE *fp) event 390 tools/perf/util/event.c event->ksymbol.addr, event->ksymbol.len, event 391 tools/perf/util/event.c event->ksymbol.ksym_type, event 392 tools/perf/util/event.c event->ksymbol.flags, event->ksymbol.name); event 395 tools/perf/util/event.c size_t perf_event__fprintf_bpf(union perf_event *event, FILE *fp) event 398 tools/perf/util/event.c event->bpf.type, event->bpf.flags, event->bpf.id); event 401 tools/perf/util/event.c size_t perf_event__fprintf(union perf_event *event, FILE *fp) event 404 tools/perf/util/event.c perf_event__name(event->header.type)); event 406 tools/perf/util/event.c switch (event->header.type) { event 408 tools/perf/util/event.c ret += perf_event__fprintf_comm(event, fp); event 412 tools/perf/util/event.c ret += perf_event__fprintf_task(event, fp); event 415 tools/perf/util/event.c ret += perf_event__fprintf_mmap(event, fp); event 418 tools/perf/util/event.c ret += perf_event__fprintf_namespaces(event, fp); event 421 tools/perf/util/event.c ret += perf_event__fprintf_mmap2(event, fp); event 424 tools/perf/util/event.c ret += perf_event__fprintf_aux(event, fp); event 427 tools/perf/util/event.c ret += perf_event__fprintf_itrace_start(event, fp); event 431 tools/perf/util/event.c ret += perf_event__fprintf_switch(event, fp); event 434 tools/perf/util/event.c ret += perf_event__fprintf_lost(event, fp); event 437 tools/perf/util/event.c ret += perf_event__fprintf_ksymbol(event, fp); event 440 tools/perf/util/event.c ret += perf_event__fprintf_bpf(event, fp); event 450 tools/perf/util/event.c union perf_event *event, event 454 tools/perf/util/event.c return machine__process_event(machine, event, sample); event 288 tools/perf/util/event.h struct perf_record_stat_config *event); event 291 tools/perf/util/event.h union perf_event *event, event 295 tools/perf/util/event.h union perf_event *event, event 299 tools/perf/util/event.h union perf_event *event, event 303 tools/perf/util/event.h union perf_event *event, event 307 tools/perf/util/event.h union perf_event *event, event 311 tools/perf/util/event.h union perf_event *event, event 315 tools/perf/util/event.h union perf_event *event, event 319 tools/perf/util/event.h union perf_event *event, event 323 tools/perf/util/event.h union perf_event *event, event 327 tools/perf/util/event.h union perf_event *event, event 331 tools/perf/util/event.h union perf_event *event, event 335 tools/perf/util/event.h union perf_event *event, event 339 tools/perf/util/event.h union perf_event *event, event 343 tools/perf/util/event.h union perf_event *event, event 363 tools/perf/util/event.h size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp); event 364 tools/perf/util/event.h size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp); event 365 tools/perf/util/event.h size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp); event 366 tools/perf/util/event.h size_t perf_event__fprintf_task(union perf_event *event, FILE *fp); event 367 tools/perf/util/event.h size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp); event 368 tools/perf/util/event.h size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp); event 369 tools/perf/util/event.h size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp); event 370 tools/perf/util/event.h size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp); event 371 tools/perf/util/event.h size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp); event 372 tools/perf/util/event.h size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp); event 373 tools/perf/util/event.h size_t perf_event__fprintf_ksymbol(union perf_event *event, FILE *fp); event 374 tools/perf/util/event.h size_t perf_event__fprintf_bpf(union perf_event *event, FILE *fp); event 375 tools/perf/util/event.h size_t perf_event__fprintf(union perf_event *event, FILE *fp); event 491 tools/perf/util/evlist.c union perf_event *event, u64 *id) event 493 tools/perf/util/evlist.c const __u64 *array = event->sample.array; event 496 tools/perf/util/evlist.c n = (event->header.size - sizeof(event->header)) >> 3; event 498 tools/perf/util/evlist.c if (event->header.type == PERF_RECORD_SAMPLE) { event 512 tools/perf/util/evlist.c union perf_event *event) event 524 tools/perf/util/evlist.c event->header.type != PERF_RECORD_SAMPLE) event 527 tools/perf/util/evlist.c if (perf_evlist__event2id(evlist, event, &id)) event 1403 tools/perf/util/evlist.c int perf_evlist__parse_sample(struct evlist *evlist, union perf_event *event, event 1406 tools/perf/util/evlist.c struct evsel *evsel = perf_evlist__event2evsel(evlist, event); event 1410 tools/perf/util/evlist.c return perf_evsel__parse_sample(evsel, event, sample); event 1414 tools/perf/util/evlist.c union perf_event *event, event 1417 tools/perf/util/evlist.c struct evsel *evsel = perf_evlist__event2evsel(evlist, event); event 1421 tools/perf/util/evlist.c return perf_evsel__parse_sample_timestamp(evsel, event, timestamp); event 1730 tools/perf/util/evlist.c union perf_event *event; event 1734 tools/perf/util/evlist.c while ((event = perf_mmap__read_event(map)) != NULL) { event 1735 tools/perf/util/evlist.c struct evsel *evsel = perf_evlist__event2evsel(evlist, event); event 1738 tools/perf/util/evlist.c evsel->side_band.cb(event, evsel->side_band.data); event 69 tools/perf/util/evlist.h union perf_event *event, event 218 tools/perf/util/evlist.h int perf_evlist__parse_sample(struct evlist *evlist, union perf_event *event, event 222 tools/perf/util/evlist.h union perf_event *event, event 331 tools/perf/util/evlist.h union perf_event *event); event 1844 tools/perf/util/evsel.c const union perf_event *event, event 1848 tools/perf/util/evsel.c const __u64 *array = event->sample.array; event 1852 tools/perf/util/evsel.c array += ((event->header.size - event 1853 tools/perf/util/evsel.c sizeof(event->header)) / sizeof(u64)) - 1; event 1920 tools/perf/util/evsel.c perf_event__check_size(union perf_event *event, unsigned int sample_size) event 1927 tools/perf/util/evsel.c if (sample_size + sizeof(event->header) > event->header.size) event 1933 tools/perf/util/evsel.c int perf_evsel__parse_sample(struct evsel *evsel, union perf_event *event, event 1939 tools/perf/util/evsel.c u16 max_size = event->header.size; event 1940 tools/perf/util/evsel.c const void *endp = (void *)event + max_size; event 1953 tools/perf/util/evsel.c data->cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; event 1954 tools/perf/util/evsel.c data->misc = event->header.misc; event 1958 tools/perf/util/evsel.c if (event->header.type != PERF_RECORD_SAMPLE) { event 1961 tools/perf/util/evsel.c return perf_evsel__parse_id_sample(evsel, event, data); event 1964 tools/perf/util/evsel.c array = event->sample.array; event 1966 tools/perf/util/evsel.c if (perf_event__check_size(event, evsel->sample_size)) event 2152 tools/perf/util/evsel.c - (char *) event); event 2213 tools/perf/util/evsel.c union perf_event *event, event 2222 tools/perf/util/evsel.c if (event->header.type != PERF_RECORD_SAMPLE) { event 2229 tools/perf/util/evsel.c if (perf_evsel__parse_id_sample(evsel, event, &data)) event 2236 tools/perf/util/evsel.c array = event->sample.array; event 2238 tools/perf/util/evsel.c if (perf_event__check_size(event, evsel->sample_size)) event 21 tools/perf/util/evsel.h typedef int (perf_evsel__sb_cb_t)(union perf_event *event, void *data); event 300 tools/perf/util/evsel.h int perf_evsel__parse_sample(struct evsel *evsel, union perf_event *event, event 304 tools/perf/util/evsel.h union perf_event *event, event 2114 tools/perf/util/header.c struct evsel *event) event 2118 tools/perf/util/header.c if (!event->name) event 2121 tools/perf/util/header.c evsel = perf_evlist__find_by_index(evlist, event->idx); event 2128 tools/perf/util/header.c evsel->name = strdup(event->name); event 3487 tools/perf/util/header.c struct tep_event *event; event 3499 tools/perf/util/header.c event = tep_find_event(pevent, evsel->core.attr.config); event 3500 tools/perf/util/header.c if (event == NULL) { event 3506 tools/perf/util/header.c snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name); event 3512 tools/perf/util/header.c evsel->tp_format = event; event 3639 tools/perf/util/header.c union perf_event *event) event 3643 tools/perf/util/header.c struct perf_record_header_feature *fe = (struct perf_record_header_feature *)event; event 3660 tools/perf/util/header.c ff.size = event->header.size - sizeof(*fe); event 3680 tools/perf/util/header.c size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp) event 3682 tools/perf/util/header.c struct perf_record_event_update *ev = &event->event_update; event 3720 tools/perf/util/header.c union perf_event *event, event 3733 tools/perf/util/header.c evsel = evsel__new(&event->attr.attr); event 3739 tools/perf/util/header.c ids = event->header.size; event 3740 tools/perf/util/header.c ids -= (void *)&event->attr.id - (void *)event; event 3751 tools/perf/util/header.c perf_evlist__id_add(&evlist->core, &evsel->core, 0, i, event->attr.id[i]); event 3758 tools/perf/util/header.c union perf_event *event, event 3761 tools/perf/util/header.c struct perf_record_event_update *ev = &event->event_update; event 3804 tools/perf/util/header.c union perf_event *event) event 3806 tools/perf/util/header.c ssize_t size_read, padding, size = event->tracing_data.size; event 3843 tools/perf/util/header.c union perf_event *event) event 3845 tools/perf/util/header.c __event_process_build_id(&event->build_id, event 3846 tools/perf/util/header.c event->build_id.filename, event 139 tools/perf/util/header.h union perf_event *event); event 140 tools/perf/util/header.h int perf_event__process_attr(struct perf_tool *tool, union perf_event *event, event 143 tools/perf/util/header.h union perf_event *event, event 145 tools/perf/util/header.h size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp); event 147 tools/perf/util/header.h union perf_event *event); event 149 tools/perf/util/header.h union perf_event *event); event 133 tools/perf/util/intel-bts.c union perf_event event; event 136 tools/perf/util/intel-bts.c auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE, event 140 tools/perf/util/intel-bts.c err = perf_session__deliver_synth_event(bts->session, &event, NULL); event 277 tools/perf/util/intel-bts.c union perf_event event; event 297 tools/perf/util/intel-bts.c event.sample.header.type = PERF_RECORD_SAMPLE; event 298 tools/perf/util/intel-bts.c event.sample.header.misc = sample.cpumode; event 299 tools/perf/util/intel-bts.c event.sample.header.size = sizeof(struct perf_event_header); event 302 tools/perf/util/intel-bts.c event.sample.header.size = bts->branches_event_size; event 303 tools/perf/util/intel-bts.c ret = perf_event__synthesize_sample(&event, event 310 tools/perf/util/intel-bts.c ret = perf_session__deliver_synth_event(bts->session, &event, &sample); event 347 tools/perf/util/intel-bts.c union perf_event event; event 350 tools/perf/util/intel-bts.c auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE, event 354 tools/perf/util/intel-bts.c err = perf_session__deliver_synth_event(bts->session, &event, NULL); event 592 tools/perf/util/intel-bts.c union perf_event *event, event 621 tools/perf/util/intel-bts.c if (event->header.type == PERF_RECORD_EXIT) { event 622 tools/perf/util/intel-bts.c err = intel_bts_process_tid_exit(bts, event->fork.tid); event 627 tools/perf/util/intel-bts.c if (event->header.type == PERF_RECORD_AUX && event 628 tools/perf/util/intel-bts.c (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) && event 636 tools/perf/util/intel-bts.c union perf_event *event, event 659 tools/perf/util/intel-bts.c err = auxtrace_queues__add_event(&bts->queues, session, event, event 737 tools/perf/util/intel-bts.c union perf_event *event, event 745 tools/perf/util/intel-bts.c event, NULL); event 848 tools/perf/util/intel-bts.c int intel_bts_process_auxtrace_info(union perf_event *event, event 851 tools/perf/util/intel-bts.c struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info; event 31 tools/perf/util/intel-bts.h int intel_bts_process_auxtrace_info(union perf_event *event, event 226 tools/perf/util/intel-pt.c static void intel_pt_log_event(union perf_event *event) event 233 tools/perf/util/intel-pt.c perf_event__fprintf(event, f); event 1203 tools/perf/util/intel-pt.c union perf_event *event, event 1206 tools/perf/util/intel-pt.c event->sample.header.type = PERF_RECORD_SAMPLE; event 1207 tools/perf/util/intel-pt.c event->sample.header.size = sizeof(struct perf_event_header); event 1218 tools/perf/util/intel-pt.c union perf_event *event, event 1221 tools/perf/util/intel-pt.c intel_pt_prep_a_sample(ptq, event, sample); event 1232 tools/perf/util/intel-pt.c event->sample.header.misc = sample->cpumode; event 1235 tools/perf/util/intel-pt.c static int intel_pt_inject_event(union perf_event *event, event 1238 tools/perf/util/intel-pt.c event->header.size = perf_event__sample_event_size(sample, type, 0); event 1239 tools/perf/util/intel-pt.c return perf_event__synthesize_sample(event, type, 0, sample); event 1243 tools/perf/util/intel-pt.c union perf_event *event, event 1249 tools/perf/util/intel-pt.c return intel_pt_inject_event(event, sample, type); event 1253 tools/perf/util/intel-pt.c union perf_event *event, event 1258 tools/perf/util/intel-pt.c ret = intel_pt_opt_inject(pt, event, sample, type); event 1262 tools/perf/util/intel-pt.c ret = perf_session__deliver_synth_event(pt->session, event, sample); event 1272 tools/perf/util/intel-pt.c union perf_event *event = ptq->event_buf; event 1285 tools/perf/util/intel-pt.c intel_pt_prep_b_sample(pt, ptq, event, &sample); event 1312 tools/perf/util/intel-pt.c return intel_pt_deliver_synth_b_event(pt, event, &sample, event 1318 tools/perf/util/intel-pt.c union perf_event *event, event 1321 tools/perf/util/intel-pt.c intel_pt_prep_b_sample(pt, ptq, event, sample); event 1338 tools/perf/util/intel-pt.c union perf_event *event, event 1344 tools/perf/util/intel-pt.c ret = intel_pt_deliver_synth_b_event(pt, event, sample, type); event 1355 tools/perf/util/intel-pt.c union perf_event *event = ptq->event_buf; event 1361 tools/perf/util/intel-pt.c intel_pt_prep_sample(pt, ptq, event, &sample); event 1376 tools/perf/util/intel-pt.c return intel_pt_deliver_synth_event(pt, ptq, event, &sample, event 1383 tools/perf/util/intel-pt.c union perf_event *event = ptq->event_buf; event 1389 tools/perf/util/intel-pt.c intel_pt_prep_sample(pt, ptq, event, &sample); event 1394 tools/perf/util/intel-pt.c return intel_pt_deliver_synth_event(pt, ptq, event, &sample, event 1400 tools/perf/util/intel-pt.c union perf_event *event, event 1403 tools/perf/util/intel-pt.c intel_pt_prep_sample(pt, ptq, event, sample); event 1416 tools/perf/util/intel-pt.c union perf_event *event = ptq->event_buf; event 1423 tools/perf/util/intel-pt.c intel_pt_prep_p_sample(pt, ptq, event, &sample); event 1435 tools/perf/util/intel-pt.c return intel_pt_deliver_synth_event(pt, ptq, event, &sample, event 1442 tools/perf/util/intel-pt.c union perf_event *event = ptq->event_buf; event 1452 tools/perf/util/intel-pt.c intel_pt_prep_p_sample(pt, ptq, event, &sample); event 1465 tools/perf/util/intel-pt.c return intel_pt_deliver_synth_event(pt, ptq, event, &sample, event 1472 tools/perf/util/intel-pt.c union perf_event *event = ptq->event_buf; event 1479 tools/perf/util/intel-pt.c intel_pt_prep_p_sample(pt, ptq, event, &sample); event 1490 tools/perf/util/intel-pt.c return intel_pt_deliver_synth_event(pt, ptq, event, &sample, event 1497 tools/perf/util/intel-pt.c union perf_event *event = ptq->event_buf; event 1504 tools/perf/util/intel-pt.c intel_pt_prep_p_sample(pt, ptq, event, &sample); event 1515 tools/perf/util/intel-pt.c return intel_pt_deliver_synth_event(pt, ptq, event, &sample, event 1522 tools/perf/util/intel-pt.c union perf_event *event = ptq->event_buf; event 1529 tools/perf/util/intel-pt.c intel_pt_prep_p_sample(pt, ptq, event, &sample); event 1540 tools/perf/util/intel-pt.c return intel_pt_deliver_synth_event(pt, ptq, event, &sample, event 1547 tools/perf/util/intel-pt.c union perf_event *event = ptq->event_buf; event 1554 tools/perf/util/intel-pt.c intel_pt_prep_p_sample(pt, ptq, event, &sample); event 1565 tools/perf/util/intel-pt.c return intel_pt_deliver_synth_event(pt, ptq, event, &sample, event 1704 tools/perf/util/intel-pt.c union perf_event *event = ptq->event_buf; event 1714 tools/perf/util/intel-pt.c intel_pt_prep_a_sample(ptq, event, &sample); event 1735 tools/perf/util/intel-pt.c event->sample.header.misc = cpumode | PERF_RECORD_MISC_EXACT_IP; event 1821 tools/perf/util/intel-pt.c return intel_pt_deliver_synth_event(pt, ptq, event, &sample, sample_type); event 1827 tools/perf/util/intel-pt.c union perf_event event; event 1833 tools/perf/util/intel-pt.c auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE, event 1836 tools/perf/util/intel-pt.c err = perf_session__deliver_synth_event(pt->session, &event, NULL); event 2465 tools/perf/util/intel-pt.c static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event, event 2468 tools/perf/util/intel-pt.c bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT; event 2477 tools/perf/util/intel-pt.c if (event->header.type != PERF_RECORD_SWITCH_CPU_WIDE) { event 2481 tools/perf/util/intel-pt.c pid = event->context_switch.next_prev_pid; event 2482 tools/perf/util/intel-pt.c tid = event->context_switch.next_prev_tid; event 2507 tools/perf/util/intel-pt.c union perf_event *event, event 2514 tools/perf/util/intel-pt.c sample->cpu, event->itrace_start.pid, event 2515 tools/perf/util/intel-pt.c event->itrace_start.tid, sample->time, event 2519 tools/perf/util/intel-pt.c event->itrace_start.pid, event 2520 tools/perf/util/intel-pt.c event->itrace_start.tid); event 2524 tools/perf/util/intel-pt.c union perf_event *event, event 2553 tools/perf/util/intel-pt.c if (event->header.type == PERF_RECORD_EXIT) { event 2555 tools/perf/util/intel-pt.c event->fork.tid, event 2564 tools/perf/util/intel-pt.c if (event->header.type == PERF_RECORD_AUX && event 2565 tools/perf/util/intel-pt.c (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) && event 2572 tools/perf/util/intel-pt.c if (pt->switch_evsel && event->header.type == PERF_RECORD_SAMPLE) event 2574 tools/perf/util/intel-pt.c else if (event->header.type == PERF_RECORD_ITRACE_START) event 2575 tools/perf/util/intel-pt.c err = intel_pt_process_itrace_start(pt, event, sample); event 2576 tools/perf/util/intel-pt.c else if (event->header.type == PERF_RECORD_SWITCH || event 2577 tools/perf/util/intel-pt.c event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) event 2578 tools/perf/util/intel-pt.c err = intel_pt_context_switch(pt, event, sample); event 2581 tools/perf/util/intel-pt.c event->header.type, sample->cpu, sample->time, timestamp); event 2582 tools/perf/util/intel-pt.c intel_pt_log_event(event); event 2641 tools/perf/util/intel-pt.c union perf_event *event, event 2661 tools/perf/util/intel-pt.c err = auxtrace_queues__add_event(&pt->queues, session, event, event 2685 tools/perf/util/intel-pt.c union perf_event *event, event 2692 tools/perf/util/intel-pt.c return perf_session__deliver_synth_event(intel_pt_synth->session, event, event 3073 tools/perf/util/intel-pt.c int intel_pt_process_auxtrace_info(union perf_event *event, event 3076 tools/perf/util/intel-pt.c struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info; event 42 tools/perf/util/intel-pt.h int intel_pt_process_auxtrace_info(union perf_event *event, event 357 tools/perf/util/jitdump.c jit_inject_event(struct jit_buf_desc *jd, union perf_event *event) event 361 tools/perf/util/jitdump.c size = perf_data__write(jd->output, event, event->header.size); event 389 tools/perf/util/jitdump.c union perf_event *event; event 416 tools/perf/util/jitdump.c event = calloc(1, sizeof(*event) + idr_size); event 417 tools/perf/util/jitdump.c if (!event) event 420 tools/perf/util/jitdump.c filename = event->mmap2.filename; event 446 tools/perf/util/jitdump.c free(event); event 452 tools/perf/util/jitdump.c event->mmap2.header.type = PERF_RECORD_MMAP2; event 453 tools/perf/util/jitdump.c event->mmap2.header.misc = PERF_RECORD_MISC_USER; event 454 tools/perf/util/jitdump.c event->mmap2.header.size = (sizeof(event->mmap2) - event 455 tools/perf/util/jitdump.c (sizeof(event->mmap2.filename) - size) + idr_size); event 457 tools/perf/util/jitdump.c event->mmap2.pgoff = GEN_ELF_TEXT_OFFSET; event 458 tools/perf/util/jitdump.c event->mmap2.start = addr; event 459 tools/perf/util/jitdump.c event->mmap2.len = usize ? ALIGN_8(csize) + usize : csize; event 460 tools/perf/util/jitdump.c event->mmap2.pid = pid; event 461 tools/perf/util/jitdump.c event->mmap2.tid = tid; event 462 tools/perf/util/jitdump.c event->mmap2.ino = st.st_ino; event 463 tools/perf/util/jitdump.c event->mmap2.maj = major(st.st_dev); event 464 tools/perf/util/jitdump.c event->mmap2.min = minor(st.st_dev); event 465 tools/perf/util/jitdump.c event->mmap2.prot = st.st_mode; event 466 tools/perf/util/jitdump.c event->mmap2.flags = MAP_SHARED; event 467 tools/perf/util/jitdump.c event->mmap2.ino_generation = 1; event 469 tools/perf/util/jitdump.c id = (void *)((unsigned long)event + event->mmap.header.size - idr_size); event 488 tools/perf/util/jitdump.c ret = perf_event__process_mmap2(tool, event, &sample, jd->machine); event 492 tools/perf/util/jitdump.c ret = jit_inject_event(jd, event); event 497 tools/perf/util/jitdump.c build_id__mark_dso_hit(tool, event, &sample, NULL, jd->machine); event 505 tools/perf/util/jitdump.c union perf_event *event; event 527 tools/perf/util/jitdump.c event = calloc(1, sizeof(*event) + 16); event 528 tools/perf/util/jitdump.c if (!event) event 531 tools/perf/util/jitdump.c filename = event->mmap2.filename; event 544 tools/perf/util/jitdump.c event->mmap2.header.type = PERF_RECORD_MMAP2; event 545 tools/perf/util/jitdump.c event->mmap2.header.misc = PERF_RECORD_MISC_USER; event 546 tools/perf/util/jitdump.c event->mmap2.header.size = (sizeof(event->mmap2) - event 547 tools/perf/util/jitdump.c (sizeof(event->mmap2.filename) - size) + idr_size); event 548 tools/perf/util/jitdump.c event->mmap2.pgoff = GEN_ELF_TEXT_OFFSET; event 549 tools/perf/util/jitdump.c event->mmap2.start = jr->move.new_code_addr; event 550 tools/perf/util/jitdump.c event->mmap2.len = usize ? ALIGN_8(jr->move.code_size) + usize event 552 tools/perf/util/jitdump.c event->mmap2.pid = pid; event 553 tools/perf/util/jitdump.c event->mmap2.tid = tid; event 554 tools/perf/util/jitdump.c event->mmap2.ino = st.st_ino; event 555 tools/perf/util/jitdump.c event->mmap2.maj = major(st.st_dev); event 556 tools/perf/util/jitdump.c event->mmap2.min = minor(st.st_dev); event 557 tools/perf/util/jitdump.c event->mmap2.prot = st.st_mode; event 558 tools/perf/util/jitdump.c event->mmap2.flags = MAP_SHARED; event 559 tools/perf/util/jitdump.c event->mmap2.ino_generation = 1; event 561 tools/perf/util/jitdump.c id = (void *)((unsigned long)event + event->mmap.header.size - idr_size); event 580 tools/perf/util/jitdump.c ret = perf_event__process_mmap2(tool, event, &sample, jd->machine); event 584 tools/perf/util/jitdump.c ret = jit_inject_event(jd, event); event 586 tools/perf/util/jitdump.c build_id__mark_dso_hit(tool, event, &sample, NULL, jd->machine); event 596 tools/perf/util/machine.c int machine__process_comm_event(struct machine *machine, union perf_event *event, event 600 tools/perf/util/machine.c event->comm.pid, event 601 tools/perf/util/machine.c event->comm.tid); event 602 tools/perf/util/machine.c bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC; event 609 tools/perf/util/machine.c perf_event__fprintf_comm(event, stdout); event 612 tools/perf/util/machine.c __thread__set_comm(thread, event->comm.comm, sample->time, exec)) { event 623 tools/perf/util/machine.c union perf_event *event, event 627 tools/perf/util/machine.c event->namespaces.pid, event 628 tools/perf/util/machine.c event->namespaces.tid); event 631 tools/perf/util/machine.c WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES, event 635 tools/perf/util/machine.c WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES, event 640 tools/perf/util/machine.c perf_event__fprintf_namespaces(event, stdout); event 643 tools/perf/util/machine.c thread__set_namespaces(thread, sample->time, &event->namespaces)) { event 654 tools/perf/util/machine.c union perf_event *event, struct perf_sample *sample __maybe_unused) event 657 tools/perf/util/machine.c event->lost.id, event->lost.lost); event 662 tools/perf/util/machine.c union perf_event *event, struct perf_sample *sample) event 665 tools/perf/util/machine.c sample->id, event->lost_samples.lost); event 694 tools/perf/util/machine.c union perf_event *event) event 697 tools/perf/util/machine.c perf_event__fprintf_aux(event, stdout); event 702 tools/perf/util/machine.c union perf_event *event) event 705 tools/perf/util/machine.c perf_event__fprintf_itrace_start(event, stdout); event 710 tools/perf/util/machine.c union perf_event *event) event 713 tools/perf/util/machine.c perf_event__fprintf_switch(event, stdout); event 718 tools/perf/util/machine.c union perf_event *event, event 724 tools/perf/util/machine.c map = map_groups__find(&machine->kmaps, event->ksymbol.addr); event 726 tools/perf/util/machine.c map = dso__new_map(event->ksymbol.name); event 730 tools/perf/util/machine.c map->start = event->ksymbol.addr; event 731 tools/perf/util/machine.c map->end = map->start + event->ksymbol.len; event 736 tools/perf/util/machine.c event->ksymbol.len, event 737 tools/perf/util/machine.c 0, 0, event->ksymbol.name); event 745 tools/perf/util/machine.c union perf_event *event, event 750 tools/perf/util/machine.c map = map_groups__find(&machine->kmaps, event->ksymbol.addr); event 758 tools/perf/util/machine.c union perf_event *event, event 762 tools/perf/util/machine.c perf_event__fprintf_ksymbol(event, stdout); event 764 tools/perf/util/machine.c if (event->ksymbol.flags & PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER) event 765 tools/perf/util/machine.c return machine__process_ksymbol_unregister(machine, event, event 767 tools/perf/util/machine.c return machine__process_ksymbol_register(machine, event, sample); event 1512 tools/perf/util/machine.c union perf_event *event) event 1515 tools/perf/util/machine.c is_entry_trampoline(event->mmap.filename); event 1519 tools/perf/util/machine.c union perf_event *event) event 1524 tools/perf/util/machine.c .start = event->mmap.start, event 1525 tools/perf/util/machine.c .end = event->mmap.start + event->mmap.len, event 1526 tools/perf/util/machine.c .pgoff = event->mmap.pgoff, event 1532 tools/perf/util/machine.c strlcpy(xm.name, event->mmap.filename, KMAP_NAME_LEN); event 1538 tools/perf/util/machine.c union perf_event *event) event 1553 tools/perf/util/machine.c is_kernel_mmap = memcmp(event->mmap.filename, event 1556 tools/perf/util/machine.c if (event->mmap.filename[0] == '/' || event 1557 tools/perf/util/machine.c (!is_kernel_mmap && event->mmap.filename[0] == '[')) { event 1558 tools/perf/util/machine.c map = machine__findnew_module_map(machine, event->mmap.start, event 1559 tools/perf/util/machine.c event->mmap.filename); event 1563 tools/perf/util/machine.c map->end = map->start + event->mmap.len; event 1565 tools/perf/util/machine.c const char *symbol_name = (event->mmap.filename + event 1620 tools/perf/util/machine.c machine__update_kernel_mmap(machine, event->mmap.start, event 1621 tools/perf/util/machine.c event->mmap.start + event->mmap.len); event 1628 tools/perf/util/machine.c if (event->mmap.pgoff != 0) { event 1631 tools/perf/util/machine.c event->mmap.pgoff); event 1640 tools/perf/util/machine.c } else if (perf_event__is_extra_kernel_mmap(machine, event)) { event 1641 tools/perf/util/machine.c return machine__process_extra_kernel_map(machine, event); event 1649 tools/perf/util/machine.c union perf_event *event, event 1657 tools/perf/util/machine.c perf_event__fprintf_mmap2(event, stdout); event 1661 tools/perf/util/machine.c ret = machine__process_kernel_mmap_event(machine, event); event 1667 tools/perf/util/machine.c thread = machine__findnew_thread(machine, event->mmap2.pid, event 1668 tools/perf/util/machine.c event->mmap2.tid); event 1672 tools/perf/util/machine.c map = map__new(machine, event->mmap2.start, event 1673 tools/perf/util/machine.c event->mmap2.len, event->mmap2.pgoff, event 1674 tools/perf/util/machine.c event->mmap2.maj, event 1675 tools/perf/util/machine.c event->mmap2.min, event->mmap2.ino, event 1676 tools/perf/util/machine.c event->mmap2.ino_generation, event 1677 tools/perf/util/machine.c event->mmap2.prot, event 1678 tools/perf/util/machine.c event->mmap2.flags, event 1679 tools/perf/util/machine.c event->mmap2.filename, thread); event 1701 tools/perf/util/machine.c int machine__process_mmap_event(struct machine *machine, union perf_event *event, event 1710 tools/perf/util/machine.c perf_event__fprintf_mmap(event, stdout); event 1714 tools/perf/util/machine.c ret = machine__process_kernel_mmap_event(machine, event); event 1720 tools/perf/util/machine.c thread = machine__findnew_thread(machine, event->mmap.pid, event 1721 tools/perf/util/machine.c event->mmap.tid); event 1725 tools/perf/util/machine.c if (!(event->header.misc & PERF_RECORD_MISC_MMAP_DATA)) event 1728 tools/perf/util/machine.c map = map__new(machine, event->mmap.start, event 1729 tools/perf/util/machine.c event->mmap.len, event->mmap.pgoff, event 1731 tools/perf/util/machine.c event->mmap.filename, event 1792 tools/perf/util/machine.c int machine__process_fork_event(struct machine *machine, union perf_event *event, event 1796 tools/perf/util/machine.c event->fork.pid, event 1797 tools/perf/util/machine.c event->fork.tid); event 1799 tools/perf/util/machine.c event->fork.ppid, event 1800 tools/perf/util/machine.c event->fork.ptid); event 1805 tools/perf/util/machine.c perf_event__fprintf_task(event, stdout); event 1813 tools/perf/util/machine.c if (parent->pid_ != (pid_t)event->fork.ppid) { event 1818 tools/perf/util/machine.c parent = machine__findnew_thread(machine, event->fork.ppid, event 1819 tools/perf/util/machine.c event->fork.ptid); event 1828 tools/perf/util/machine.c thread = machine__findnew_thread(machine, event->fork.pid, event 1829 tools/perf/util/machine.c event->fork.tid); event 1844 tools/perf/util/machine.c if (event->fork.header.misc & PERF_RECORD_MISC_FORK_EXEC) event 1858 tools/perf/util/machine.c int machine__process_exit_event(struct machine *machine, union perf_event *event, event 1862 tools/perf/util/machine.c event->fork.pid, event 1863 tools/perf/util/machine.c event->fork.tid); event 1866 tools/perf/util/machine.c perf_event__fprintf_task(event, stdout); event 1876 tools/perf/util/machine.c int machine__process_event(struct machine *machine, union perf_event *event, event 1881 tools/perf/util/machine.c switch (event->header.type) { event 1883 tools/perf/util/machine.c ret = machine__process_comm_event(machine, event, sample); break; event 1885 tools/perf/util/machine.c ret = machine__process_mmap_event(machine, event, sample); break; event 1887 tools/perf/util/machine.c ret = machine__process_namespaces_event(machine, event, sample); break; event 1889 tools/perf/util/machine.c ret = machine__process_mmap2_event(machine, event, sample); break; event 1891 tools/perf/util/machine.c ret = machine__process_fork_event(machine, event, sample); break; event 1893 tools/perf/util/machine.c ret = machine__process_exit_event(machine, event, sample); break; event 1895 tools/perf/util/machine.c ret = machine__process_lost_event(machine, event, sample); break; event 1897 tools/perf/util/machine.c ret = machine__process_aux_event(machine, event); break; event 1899 tools/perf/util/machine.c ret = machine__process_itrace_start_event(machine, event); break; event 1901 tools/perf/util/machine.c ret = machine__process_lost_samples_event(machine, event, sample); break; event 1904 tools/perf/util/machine.c ret = machine__process_switch_event(machine, event); break; event 1906 tools/perf/util/machine.c ret = machine__process_ksymbol(machine, event, sample); break; event 1908 tools/perf/util/machine.c ret = machine__process_bpf(machine, event, sample); break; event 111 tools/perf/util/machine.h int machine__process_comm_event(struct machine *machine, union perf_event *event, event 113 tools/perf/util/machine.h int machine__process_exit_event(struct machine *machine, union perf_event *event, event 115 tools/perf/util/machine.h int machine__process_fork_event(struct machine *machine, union perf_event *event, event 117 tools/perf/util/machine.h int machine__process_lost_event(struct machine *machine, union perf_event *event, event 119 tools/perf/util/machine.h int machine__process_lost_samples_event(struct machine *machine, union perf_event *event, event 122 tools/perf/util/machine.h union perf_event *event); event 124 tools/perf/util/machine.h union perf_event *event); event 126 tools/perf/util/machine.h union perf_event *event); event 128 tools/perf/util/machine.h union perf_event *event, event 130 tools/perf/util/machine.h int machine__process_mmap_event(struct machine *machine, union perf_event *event, event 132 tools/perf/util/machine.h int machine__process_mmap2_event(struct machine *machine, union perf_event *event, event 135 tools/perf/util/machine.h union perf_event *event, event 137 tools/perf/util/machine.h int machine__process_event(struct machine *machine, union perf_event *event, event 36 tools/perf/util/mmap.c union perf_event *event = NULL; event 39 tools/perf/util/mmap.c if (diff >= (int)sizeof(event->header)) { event 42 tools/perf/util/mmap.c event = (union perf_event *)&data[*startp & map->core.mask]; event 43 tools/perf/util/mmap.c size = event->header.size; event 45 tools/perf/util/mmap.c if (size < sizeof(event->header) || diff < (int)size) event 54 tools/perf/util/mmap.c unsigned int len = min(sizeof(*event), size), cpy; event 65 tools/perf/util/mmap.c event = (union perf_event *)map->core.event_copy; event 71 tools/perf/util/mmap.c return event; event 88 tools/perf/util/mmap.c union perf_event *event; event 100 tools/perf/util/mmap.c event = perf_mmap__read(map, &map->core.start, map->core.end); event 105 tools/perf/util/mmap.c return event; event 40 tools/perf/util/namespaces.c struct namespaces *namespaces__new(struct perf_record_namespaces *event) event 43 tools/perf/util/namespaces.c u64 link_info_size = ((event ? event->nr_namespaces : NR_NAMESPACES) * event 52 tools/perf/util/namespaces.c if (event) event 53 tools/perf/util/namespaces.c memcpy(namespaces->link_info, event->link_info, link_info_size); event 28 tools/perf/util/namespaces.h struct namespaces *namespaces__new(struct perf_record_namespaces *event); event 65 tools/perf/util/ordered-events.c union perf_event *event) event 70 tools/perf/util/ordered-events.c new_event = memdup(event, event->header.size); event 72 tools/perf/util/ordered-events.c oe->cur_alloc_size += event->header.size; event 79 tools/perf/util/ordered-events.c union perf_event *event) event 81 tools/perf/util/ordered-events.c return oe->copy_on_queue ? __dup_event(oe, event) : event; event 84 tools/perf/util/ordered-events.c static void __free_dup_event(struct ordered_events *oe, union perf_event *event) event 86 tools/perf/util/ordered-events.c if (event) { event 87 tools/perf/util/ordered-events.c oe->cur_alloc_size -= event->header.size; event 88 tools/perf/util/ordered-events.c free(event); event 92 tools/perf/util/ordered-events.c static void free_dup_event(struct ordered_events *oe, union perf_event *event) event 95 tools/perf/util/ordered-events.c __free_dup_event(oe, event); event 100 tools/perf/util/ordered-events.c union perf_event *event) event 107 tools/perf/util/ordered-events.c new_event = dup_event(oe, event); event 144 tools/perf/util/ordered-events.c new = &oe->buffer->event[oe->buffer_idx]; event 161 tools/perf/util/ordered-events.c new = &oe->buffer->event[0]; event 167 tools/perf/util/ordered-events.c new->event = new_event; event 173 tools/perf/util/ordered-events.c union perf_event *event) event 177 tools/perf/util/ordered-events.c new = alloc_event(oe, event); event 186 tools/perf/util/ordered-events.c void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event) event 188 tools/perf/util/ordered-events.c list_move(&event->list, &oe->cache); event 190 tools/perf/util/ordered-events.c free_dup_event(oe, event->event); event 191 tools/perf/util/ordered-events.c event->event = NULL; event 194 tools/perf/util/ordered-events.c int ordered_events__queue(struct ordered_events *oe, union perf_event *event, event 210 tools/perf/util/ordered-events.c oevent = ordered_events__new_event(oe, timestamp, event); event 213 tools/perf/util/ordered-events.c oevent = ordered_events__new_event(oe, timestamp, event); event 351 tools/perf/util/ordered-events.c struct ordered_event *event; event 356 tools/perf/util/ordered-events.c event = list_first_entry(&oe->events, struct ordered_event, list); event 357 tools/perf/util/ordered-events.c return event->timestamp; event 380 tools/perf/util/ordered-events.c __free_dup_event(oe, buffer->event[i].event); event 12 tools/perf/util/ordered-events.h union perf_event *event; event 28 tools/perf/util/ordered-events.h struct ordered_event *event); event 32 tools/perf/util/ordered-events.h struct ordered_event event[0]; event 55 tools/perf/util/ordered-events.h int ordered_events__queue(struct ordered_events *oe, union perf_event *event, event 57 tools/perf/util/ordered-events.h void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event); event 635 tools/perf/util/parse-events.c static int add_bpf_event(const char *group, const char *event, int fd, struct bpf_object *obj, event 655 tools/perf/util/parse-events.c group, event, fd); event 658 tools/perf/util/parse-events.c event, parse_state->error, event 664 tools/perf/util/parse-events.c group, event); event 671 tools/perf/util/parse-events.c pr_debug("adding %s:%s\n", group, event); event 675 tools/perf/util/parse-events.c group, event, pos); event 1246 tools/perf/util/parse-events.c const char *sys, const char *event, event 1259 tools/perf/util/parse-events.c return add_tracepoint_multi_sys(list, idx, sys, event, event 1262 tools/perf/util/parse-events.c return add_tracepoint_event(list, idx, sys, event, event 1976 tools/perf/util/parse-events.c const char *event) event 1980 tools/perf/util/parse-events.c char *buf = (char *) event; event 1986 tools/perf/util/parse-events.c int len_event = strlen(event); event 2009 tools/perf/util/parse-events.c strncpy(buf, event + cut, max_len); event 2399 tools/perf/util/parse-events.c !strglobmatch(ent->pev.event, event_glob)) event 2402 tools/perf/util/parse-events.c ent->pev.event, nd->s); event 147 tools/perf/util/parse-events.h const char *sys, const char *event, event 221 tools/perf/util/parse-events.h const char *event); event 91 tools/perf/util/parse-events.y %type <head> event event 108 tools/perf/util/parse-events.y char *event; event 136 tools/perf/util/parse-events.y groups ',' event event 139 tools/perf/util/parse-events.y struct list_head *event = $3; event 141 tools/perf/util/parse-events.y parse_events_update_lists(event, list); event 147 tools/perf/util/parse-events.y event event 180 tools/perf/util/parse-events.y events ',' event event 182 tools/perf/util/parse-events.y struct list_head *event = $3; event 185 tools/perf/util/parse-events.y parse_events_update_lists(event, list); event 189 tools/perf/util/parse-events.y event event 191 tools/perf/util/parse-events.y event: event_mod event 443 tools/perf/util/parse-events.y tracepoint.event = $5; event 786 tools/perf/util/pmu.c (char *)pe->desc, (char *)pe->event, event 1322 tools/perf/util/probe-event.c pev->event = strdup_esc(*arg); event 1323 tools/perf/util/probe-event.c if (pev->event == NULL) event 1326 tools/perf/util/probe-event.c if (!pev->sdt && !is_c_func_name(pev->event)) { event 1327 tools/perf/util/probe-event.c zfree(&pev->event); event 1382 tools/perf/util/probe-event.c if (asprintf(&pev->point.function, "%%%s", pev->event) < 0) event 1767 tools/perf/util/probe-event.c tev->event = strdup(fmt3_str); event 1768 tools/perf/util/probe-event.c if (tev->group == NULL || tev->event == NULL) { event 1772 tools/perf/util/probe-event.c pr_debug("Group:%s Event:%s probe:%c\n", tev->group, tev->event, pr); event 1947 tools/perf/util/probe-event.c if (pev->event) event 1949 tools/perf/util/probe-event.c pev->event) < 0) event 2057 tools/perf/util/probe-event.c tev->group, tev->event) < 0) event 2178 tools/perf/util/probe-event.c pev->event = strdup(tev->event); event 2180 tools/perf/util/probe-event.c if (pev->event == NULL || pev->group == NULL) event 2217 tools/perf/util/probe-event.c zfree(&pev->event); event 2288 tools/perf/util/probe-event.c dst->event = strdup_or_goto(src->event, out_err); event 2316 tools/perf/util/probe-event.c zfree(&tev->event); event 2446 tools/perf/util/probe-event.c static int perf_probe_event__sprintf(const char *group, const char *event, event 2454 tools/perf/util/probe-event.c if (asprintf(&buf, "%s:%s", group, event) < 0) event 2488 tools/perf/util/probe-event.c int show_perf_probe_event(const char *group, const char *event, event 2495 tools/perf/util/probe-event.c ret = perf_probe_event__sprintf(group, event, pev, module, &buf); event 2513 tools/perf/util/probe-event.c if (strfilter__compare(filter, tev->event)) event 2517 tools/perf/util/probe-event.c if (e_snprintf(tmp, 128, "%s:%s", tev->group, tev->event) < 0) event 2547 tools/perf/util/probe-event.c ret = show_perf_probe_event(pev.group, pev.event, event 2670 tools/perf/util/probe-event.c tev->group, tev->event); event 2694 tools/perf/util/probe-event.c const char *event, *group; event 2699 tools/perf/util/probe-event.c if (pev->event && !pev->sdt) event 2700 tools/perf/util/probe-event.c event = pev->event; event 2701 tools/perf/util/probe-event.c else if (tev->event) event 2702 tools/perf/util/probe-event.c event = tev->event; event 2708 tools/perf/util/probe-event.c event = pev->point.function; event 2710 tools/perf/util/probe-event.c event = tev->point.realname; event 2720 tools/perf/util/probe-event.c ret = get_new_event_name(buf, 64, event, namelist, event 2725 tools/perf/util/probe-event.c event = buf; event 2727 tools/perf/util/probe-event.c tev->event = strdup(event); event 2729 tools/perf/util/probe-event.c if (tev->event == NULL || tev->group == NULL) event 2733 tools/perf/util/probe-event.c strlist__add(namelist, event); event 3098 tools/perf/util/probe-event.c if (pev->event) { event 3099 tools/perf/util/probe-event.c tev->event = strdup(pev->event); event 3100 tools/perf/util/probe-event.c if (!tev->event) event 3189 tools/perf/util/probe-event.c if (!entry->pev.event || !entry->pev.group) event 3192 tools/perf/util/probe-event.c strglobmatch(entry->pev.event, pev->event)) { event 53 tools/perf/util/probe-event.h char *event; /* Event name */ event 91 tools/perf/util/probe-event.h char *event; /* Event name */ event 169 tools/perf/util/probe-event.h int show_perf_probe_event(const char *group, const char *event, event 203 tools/perf/util/probe-file.c tev.event); event 207 tools/perf/util/probe-file.c ret = strlist__add(sl, tev.event); event 606 tools/perf/util/probe-file.c if (entry->pev.event && event 607 tools/perf/util/probe-file.c streql(entry->pev.event, pev->event) && event 615 tools/perf/util/probe-file.c if ((pev->event && event 617 tools/perf/util/probe-file.c streql(entry->pev.event, pev->event))) || event 630 tools/perf/util/probe-file.c const char *group, const char *event) event 637 tools/perf/util/probe-file.c streql(entry->pev.event, event)) event 849 tools/perf/util/probe-file.c entry->pev.event = strdup(note->name); event 941 tools/perf/util/probe-file.c if (entry->pev.event) { event 942 tools/perf/util/probe-file.c snprintf(buf, 128, "%s:%s", entry->pev.group, entry->pev.event); event 68 tools/perf/util/probe-file.h const char *group, const char *event); event 97 tools/perf/util/python.c offsetof(struct pyrf_event, event) + offsetof(struct type, member), \ event 109 tools/perf/util/python.c union perf_event event; event 146 tools/perf/util/python.c pevent->event.mmap.pid, pevent->event.mmap.tid, event 147 tools/perf/util/python.c pevent->event.mmap.start, pevent->event.mmap.len, event 148 tools/perf/util/python.c pevent->event.mmap.pgoff, pevent->event.mmap.filename) < 0) { event 184 tools/perf/util/python.c pevent->event.header.type == PERF_RECORD_FORK ? "fork" : "exit", event 185 tools/perf/util/python.c pevent->event.fork.pid, event 186 tools/perf/util/python.c pevent->event.fork.ppid, event 187 tools/perf/util/python.c pevent->event.fork.tid, event 188 tools/perf/util/python.c pevent->event.fork.ptid, event 189 tools/perf/util/python.c pevent->event.fork.time); event 216 tools/perf/util/python.c pevent->event.comm.pid, event 217 tools/perf/util/python.c pevent->event.comm.tid, event 218 tools/perf/util/python.c pevent->event.comm.comm); event 244 tools/perf/util/python.c struct perf_record_throttle *te = (struct perf_record_throttle *)(&pevent->event.header + 1); event 248 tools/perf/util/python.c pevent->event.header.type == PERF_RECORD_THROTTLE ? "" : "un", event 278 tools/perf/util/python.c pevent->event.lost.id, pevent->event.lost.lost) < 0) { event 309 tools/perf/util/python.c pevent->event.read.pid, event 310 tools/perf/util/python.c pevent->event.read.tid); event 357 tools/perf/util/python.c struct tep_handle *pevent = field->event->tep; event 455 tools/perf/util/python.c pevent->event.context_switch.next_prev_pid, event 456 tools/perf/util/python.c pevent->event.context_switch.next_prev_tid, event 457 tools/perf/util/python.c !!(pevent->event.header.misc & PERF_RECORD_MISC_SWITCH_OUT)) < 0) { event 529 tools/perf/util/python.c static PyObject *pyrf_event__new(union perf_event *event) event 534 tools/perf/util/python.c if ((event->header.type < PERF_RECORD_MMAP || event 535 tools/perf/util/python.c event->header.type > PERF_RECORD_SAMPLE) && event 536 tools/perf/util/python.c !(event->header.type == PERF_RECORD_SWITCH || event 537 tools/perf/util/python.c event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)) event 540 tools/perf/util/python.c ptype = pyrf_event__type[event->header.type]; event 543 tools/perf/util/python.c memcpy(&pevent->event, event, event->header.size); event 1011 tools/perf/util/python.c union perf_event *event; event 1028 tools/perf/util/python.c event = perf_mmap__read_event(md); event 1029 tools/perf/util/python.c if (event != NULL) { event 1030 tools/perf/util/python.c PyObject *pyevent = pyrf_event__new(event); event 1037 tools/perf/util/python.c evsel = perf_evlist__event2evsel(evlist, event); event 1045 tools/perf/util/python.c err = perf_evsel__parse_sample(evsel, event, &pevent->sample); event 514 tools/perf/util/s390-cpumsf.c union perf_event event; event 516 tools/perf/util/s390-cpumsf.c memset(&event, 0, sizeof(event)); event 531 tools/perf/util/s390-cpumsf.c event.sample.header.type = PERF_RECORD_SAMPLE; event 532 tools/perf/util/s390-cpumsf.c event.sample.header.misc = sample.cpumode; event 533 tools/perf/util/s390-cpumsf.c event.sample.header.size = sizeof(struct perf_event_header); event 538 tools/perf/util/s390-cpumsf.c if (perf_session__deliver_synth_event(sfq->sf->session, &event, event 889 tools/perf/util/s390-cpumsf.c union perf_event event; event 893 tools/perf/util/s390-cpumsf.c auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE, event 896 tools/perf/util/s390-cpumsf.c err = perf_session__deliver_synth_event(sf->session, &event, NULL); event 912 tools/perf/util/s390-cpumsf.c union perf_event *event, event 932 tools/perf/util/s390-cpumsf.c if (event->header.type == PERF_RECORD_SAMPLE && event 935 tools/perf/util/s390-cpumsf.c ev_bc000 = perf_evlist__event2evsel(session->evlist, event); event 942 tools/perf/util/s390-cpumsf.c if (event->header.type == PERF_RECORD_AUX && event 943 tools/perf/util/s390-cpumsf.c event->aux.flags & PERF_AUX_FLAG_TRUNCATED) event 961 tools/perf/util/s390-cpumsf.c union perf_event *event __maybe_unused, event 984 tools/perf/util/s390-cpumsf.c err = auxtrace_queues__add_event(&sf->queues, session, event, event 1108 tools/perf/util/s390-cpumsf.c int s390_cpumsf_process_auxtrace_info(union perf_event *event, event 1111 tools/perf/util/s390-cpumsf.c struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info; event 19 tools/perf/util/s390-cpumsf.h int s390_cpumsf_process_auxtrace_info(union perf_event *event, event 145 tools/perf/util/s390-sample-raw.c for (; evp->name || evp->event || evp->desc; ++evp) { event 146 tools/perf/util/s390-sample-raw.c if (evp->name == NULL || evp->event == NULL) event 148 tools/perf/util/s390-sample-raw.c rc = sscanf(evp->event, "event=%x", &event_nr); event 200 tools/perf/util/s390-sample-raw.c void perf_evlist__s390_sample_raw(struct evlist *evlist, union perf_event *event, event 205 tools/perf/util/s390-sample-raw.c if (event->header.type != PERF_RECORD_SAMPLE) event 208 tools/perf/util/s390-sample-raw.c ev_bc000 = perf_evlist__event2evsel(evlist, event); event 10 tools/perf/util/sample-raw.h union perf_event *event, event 194 tools/perf/util/scripting-engines/trace-event-perl.c static void define_event_symbols(struct tep_event *event, event 214 tools/perf/util/scripting-engines/trace-event-perl.c define_event_symbols(event, ev_name, args->flags.field); event 219 tools/perf/util/scripting-engines/trace-event-perl.c define_event_symbols(event, ev_name, args->symbol.field); event 226 tools/perf/util/scripting-engines/trace-event-perl.c define_event_symbols(event, ev_name, args->hex.field); event 227 tools/perf/util/scripting-engines/trace-event-perl.c define_event_symbols(event, ev_name, args->hex.size); event 230 tools/perf/util/scripting-engines/trace-event-perl.c define_event_symbols(event, ev_name, args->int_array.field); event 231 tools/perf/util/scripting-engines/trace-event-perl.c define_event_symbols(event, ev_name, args->int_array.count); event 232 tools/perf/util/scripting-engines/trace-event-perl.c define_event_symbols(event, ev_name, args->int_array.el_size); event 241 tools/perf/util/scripting-engines/trace-event-perl.c define_event_symbols(event, ev_name, args->typecast.item); event 246 tools/perf/util/scripting-engines/trace-event-perl.c define_event_symbols(event, ev_name, args->op.left); event 247 tools/perf/util/scripting-engines/trace-event-perl.c define_event_symbols(event, ev_name, args->op.right); event 257 tools/perf/util/scripting-engines/trace-event-perl.c define_event_symbols(event, ev_name, args->next); event 343 tools/perf/util/scripting-engines/trace-event-perl.c struct tep_event *event = evsel->tp_format; event 359 tools/perf/util/scripting-engines/trace-event-perl.c if (!event) { event 364 tools/perf/util/scripting-engines/trace-event-perl.c pid = raw_field_value(event, "common_pid", data); event 366 tools/perf/util/scripting-engines/trace-event-perl.c sprintf(handler, "%s::%s", event->system, event->name); event 368 tools/perf/util/scripting-engines/trace-event-perl.c if (!test_and_set_bit(event->id, events_defined)) event 369 tools/perf/util/scripting-engines/trace-event-perl.c define_event_symbols(event, handler, event->print_fmt.args); event 392 tools/perf/util/scripting-engines/trace-event-perl.c for (field = event->format.fields; field; field = field->next) { event 402 tools/perf/util/scripting-engines/trace-event-perl.c val = read_size(event, data + field->offset, event 432 tools/perf/util/scripting-engines/trace-event-perl.c static void perl_process_event_generic(union perf_event *event, event 444 tools/perf/util/scripting-engines/trace-event-perl.c XPUSHs(sv_2mortal(newSVpvn((const char *)event, event->header.size))); event 456 tools/perf/util/scripting-engines/trace-event-perl.c static void perl_process_event(union perf_event *event, event 462 tools/perf/util/scripting-engines/trace-event-perl.c perl_process_event_generic(event, sample, evsel); event 544 tools/perf/util/scripting-engines/trace-event-perl.c struct tep_event *event = NULL; event 611 tools/perf/util/scripting-engines/trace-event-perl.c event = all_events[i]; event 612 tools/perf/util/scripting-engines/trace-event-perl.c fprintf(ofp, "sub %s::%s\n{\n", event->system, event->name); event 627 tools/perf/util/scripting-engines/trace-event-perl.c for (f = event->format.fields; f; f = f->next) { event 646 tools/perf/util/scripting-engines/trace-event-perl.c for (f = event->format.fields; f; f = f->next) { event 670 tools/perf/util/scripting-engines/trace-event-perl.c for (f = event->format.fields; f; f = f->next) { event 683 tools/perf/util/scripting-engines/trace-event-perl.c fprintf(ofp, "%s::%s\", ", event->system, event 684 tools/perf/util/scripting-engines/trace-event-perl.c event->name); event 693 tools/perf/util/scripting-engines/trace-event-perl.c fprintf(ofp, "%s::%s\", ", event->system, event 694 tools/perf/util/scripting-engines/trace-event-perl.c event->name); event 271 tools/perf/util/scripting-engines/trace-event-python.c static void define_event_symbols(struct tep_event *event, event 291 tools/perf/util/scripting-engines/trace-event-python.c define_event_symbols(event, ev_name, args->flags.field); event 298 tools/perf/util/scripting-engines/trace-event-python.c define_event_symbols(event, ev_name, args->symbol.field); event 305 tools/perf/util/scripting-engines/trace-event-python.c define_event_symbols(event, ev_name, args->hex.field); event 306 tools/perf/util/scripting-engines/trace-event-python.c define_event_symbols(event, ev_name, args->hex.size); event 309 tools/perf/util/scripting-engines/trace-event-python.c define_event_symbols(event, ev_name, args->int_array.field); event 310 tools/perf/util/scripting-engines/trace-event-python.c define_event_symbols(event, ev_name, args->int_array.count); event 311 tools/perf/util/scripting-engines/trace-event-python.c define_event_symbols(event, ev_name, args->int_array.el_size); event 316 tools/perf/util/scripting-engines/trace-event-python.c define_event_symbols(event, ev_name, args->typecast.item); event 321 tools/perf/util/scripting-engines/trace-event-python.c define_event_symbols(event, ev_name, args->op.left); event 322 tools/perf/util/scripting-engines/trace-event-python.c define_event_symbols(event, ev_name, args->op.right); event 336 tools/perf/util/scripting-engines/trace-event-python.c define_event_symbols(event, ev_name, args->next); event 339 tools/perf/util/scripting-engines/trace-event-python.c static PyObject *get_field_numeric_entry(struct tep_event *event, event 358 tools/perf/util/scripting-engines/trace-event-python.c val = read_size(event, data + field->offset + i * item_size, event 796 tools/perf/util/scripting-engines/trace-event-python.c struct tep_event *event = evsel->tp_format; event 810 tools/perf/util/scripting-engines/trace-event-python.c if (!event) { event 816 tools/perf/util/scripting-engines/trace-event-python.c pid = raw_field_value(event, "common_pid", data); event 818 tools/perf/util/scripting-engines/trace-event-python.c sprintf(handler_name, "%s__%s", event->system, event->name); event 820 tools/perf/util/scripting-engines/trace-event-python.c if (!test_and_set_bit(event->id, events_defined)) event 821 tools/perf/util/scripting-engines/trace-event-python.c define_event_symbols(event, handler_name, event->print_fmt.args); event 869 tools/perf/util/scripting-engines/trace-event-python.c for (field = event->format.fields; field; field = field->next) { event 891 tools/perf/util/scripting-engines/trace-event-python.c obj = get_field_numeric_entry(event, field, data); event 1312 tools/perf/util/scripting-engines/trace-event-python.c static void python_process_event(union perf_event *event, event 1326 tools/perf/util/scripting-engines/trace-event-python.c db_export__sample(&tables->dbe, event, sample, evsel, al); event 1332 tools/perf/util/scripting-engines/trace-event-python.c static void python_process_switch(union perf_event *event, event 1339 tools/perf/util/scripting-engines/trace-event-python.c db_export__switch(&tables->dbe, event, sample, machine); event 1692 tools/perf/util/scripting-engines/trace-event-python.c struct tep_event *event = NULL; event 1743 tools/perf/util/scripting-engines/trace-event-python.c event = all_events[i]; event 1744 tools/perf/util/scripting-engines/trace-event-python.c fprintf(ofp, "def %s__%s(", event->system, event->name); event 1757 tools/perf/util/scripting-engines/trace-event-python.c for (f = event->format.fields; f; f = f->next) { event 1782 tools/perf/util/scripting-engines/trace-event-python.c for (f = event->format.fields; f; f = f->next) { event 1807 tools/perf/util/scripting-engines/trace-event-python.c for (f = event->format.fields; f; f = f->next) { event 1820 tools/perf/util/scripting-engines/trace-event-python.c fprintf(ofp, "%s__%s\", ", event->system, event 1821 tools/perf/util/scripting-engines/trace-event-python.c event->name); event 1830 tools/perf/util/scripting-engines/trace-event-python.c fprintf(ofp, "%s__%s\", ", event->system, event 1831 tools/perf/util/scripting-engines/trace-event-python.c event->name); event 40 tools/perf/util/session.c union perf_event *event, u64 file_offset) event 70 tools/perf/util/session.c src = (void *)event + sizeof(struct perf_record_compressed); event 71 tools/perf/util/session.c src_size = event->pack.header.size - sizeof(struct perf_record_compressed); event 100 tools/perf/util/session.c union perf_event *event, event 178 tools/perf/util/session.c struct ordered_event *event) event 183 tools/perf/util/session.c return perf_session__deliver_event(session, event->event, event 184 tools/perf/util/session.c session->tool, event->file_offset); event 306 tools/perf/util/session.c union perf_event *event event 314 tools/perf/util/session.c union perf_event *event __maybe_unused, event 323 tools/perf/util/session.c union perf_event *event __maybe_unused, event 328 tools/perf/util/session.c perf_event__fprintf_event_update(event, stdout); event 335 tools/perf/util/session.c union perf_event *event __maybe_unused, event 345 tools/perf/util/session.c union perf_event *event __maybe_unused, event 354 tools/perf/util/session.c union perf_event *event __maybe_unused, event 362 tools/perf/util/session.c union perf_event *event, event 381 tools/perf/util/session.c union perf_event *event) event 385 tools/perf/util/session.c skipn(perf_data__fd(session->data), event->auxtrace.size); event 386 tools/perf/util/session.c return event->auxtrace.size; event 390 tools/perf/util/session.c union perf_event *event __maybe_unused) event 399 tools/perf/util/session.c union perf_event *event __maybe_unused) event 402 tools/perf/util/session.c perf_event__fprintf_thread_map(event, stdout); event 410 tools/perf/util/session.c union perf_event *event __maybe_unused) event 413 tools/perf/util/session.c perf_event__fprintf_cpu_map(event, stdout); event 421 tools/perf/util/session.c union perf_event *event __maybe_unused) event 424 tools/perf/util/session.c perf_event__fprintf_stat_config(event, stdout); event 431 tools/perf/util/session.c union perf_event *event) event 434 tools/perf/util/session.c perf_event__fprintf_stat(event, stdout); event 441 tools/perf/util/session.c union perf_event *event) event 444 tools/perf/util/session.c perf_event__fprintf_stat_round(event, stdout); event 451 tools/perf/util/session.c union perf_event *event __maybe_unused, event 534 tools/perf/util/session.c static void swap_sample_id_all(union perf_event *event, void *data) event 536 tools/perf/util/session.c void *end = (void *) event + event->header.size; event 543 tools/perf/util/session.c static void perf_event__all64_swap(union perf_event *event, event 546 tools/perf/util/session.c struct perf_event_header *hdr = &event->header; event 547 tools/perf/util/session.c mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); event 550 tools/perf/util/session.c static void perf_event__comm_swap(union perf_event *event, bool sample_id_all) event 552 tools/perf/util/session.c event->comm.pid = bswap_32(event->comm.pid); event 553 tools/perf/util/session.c event->comm.tid = bswap_32(event->comm.tid); event 556 tools/perf/util/session.c void *data = &event->comm.comm; event 559 tools/perf/util/session.c swap_sample_id_all(event, data); event 563 tools/perf/util/session.c static void perf_event__mmap_swap(union perf_event *event, event 566 tools/perf/util/session.c event->mmap.pid = bswap_32(event->mmap.pid); event 567 tools/perf/util/session.c event->mmap.tid = bswap_32(event->mmap.tid); event 568 tools/perf/util/session.c event->mmap.start = bswap_64(event->mmap.start); event 569 tools/perf/util/session.c event->mmap.len = bswap_64(event->mmap.len); event 570 tools/perf/util/session.c event->mmap.pgoff = bswap_64(event->mmap.pgoff); event 573 tools/perf/util/session.c void *data = &event->mmap.filename; event 576 tools/perf/util/session.c swap_sample_id_all(event, data); event 580 tools/perf/util/session.c static void perf_event__mmap2_swap(union perf_event *event, event 583 tools/perf/util/session.c event->mmap2.pid = bswap_32(event->mmap2.pid); event 584 tools/perf/util/session.c event->mmap2.tid = bswap_32(event->mmap2.tid); event 585 tools/perf/util/session.c event->mmap2.start = bswap_64(event->mmap2.start); event 586 tools/perf/util/session.c event->mmap2.len = bswap_64(event->mmap2.len); event 587 tools/perf/util/session.c event->mmap2.pgoff = bswap_64(event->mmap2.pgoff); event 588 tools/perf/util/session.c event->mmap2.maj = bswap_32(event->mmap2.maj); event 589 tools/perf/util/session.c event->mmap2.min = bswap_32(event->mmap2.min); event 590 tools/perf/util/session.c event->mmap2.ino = bswap_64(event->mmap2.ino); event 593 tools/perf/util/session.c void *data = &event->mmap2.filename; event 596 tools/perf/util/session.c swap_sample_id_all(event, data); event 599 tools/perf/util/session.c static void perf_event__task_swap(union perf_event *event, bool sample_id_all) event 601 tools/perf/util/session.c event->fork.pid = bswap_32(event->fork.pid); event 602 tools/perf/util/session.c event->fork.tid = bswap_32(event->fork.tid); event 603 tools/perf/util/session.c event->fork.ppid = bswap_32(event->fork.ppid); event 604 tools/perf/util/session.c event->fork.ptid = bswap_32(event->fork.ptid); event 605 tools/perf/util/session.c event->fork.time = bswap_64(event->fork.time); event 608 tools/perf/util/session.c swap_sample_id_all(event, &event->fork + 1); event 611 tools/perf/util/session.c static void perf_event__read_swap(union perf_event *event, bool sample_id_all) event 613 tools/perf/util/session.c event->read.pid = bswap_32(event->read.pid); event 614 tools/perf/util/session.c event->read.tid = bswap_32(event->read.tid); event 615 tools/perf/util/session.c event->read.value = bswap_64(event->read.value); event 616 tools/perf/util/session.c event->read.time_enabled = bswap_64(event->read.time_enabled); event 617 tools/perf/util/session.c event->read.time_running = bswap_64(event->read.time_running); event 618 tools/perf/util/session.c event->read.id = bswap_64(event->read.id); event 621 tools/perf/util/session.c swap_sample_id_all(event, &event->read + 1); event 624 tools/perf/util/session.c static void perf_event__aux_swap(union perf_event *event, bool sample_id_all) event 626 tools/perf/util/session.c event->aux.aux_offset = bswap_64(event->aux.aux_offset); event 627 tools/perf/util/session.c event->aux.aux_size = bswap_64(event->aux.aux_size); event 628 tools/perf/util/session.c event->aux.flags = bswap_64(event->aux.flags); event 631 tools/perf/util/session.c swap_sample_id_all(event, &event->aux + 1); event 634 tools/perf/util/session.c static void perf_event__itrace_start_swap(union perf_event *event, event 637 tools/perf/util/session.c event->itrace_start.pid = bswap_32(event->itrace_start.pid); event 638 tools/perf/util/session.c event->itrace_start.tid = bswap_32(event->itrace_start.tid); event 641 tools/perf/util/session.c swap_sample_id_all(event, &event->itrace_start + 1); event 644 tools/perf/util/session.c static void perf_event__switch_swap(union perf_event *event, bool sample_id_all) event 646 tools/perf/util/session.c if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) { event 647 tools/perf/util/session.c event->context_switch.next_prev_pid = event 648 tools/perf/util/session.c bswap_32(event->context_switch.next_prev_pid); event 649 tools/perf/util/session.c event->context_switch.next_prev_tid = event 650 tools/perf/util/session.c bswap_32(event->context_switch.next_prev_tid); event 654 tools/perf/util/session.c swap_sample_id_all(event, &event->context_switch + 1); event 657 tools/perf/util/session.c static void perf_event__throttle_swap(union perf_event *event, event 660 tools/perf/util/session.c event->throttle.time = bswap_64(event->throttle.time); event 661 tools/perf/util/session.c event->throttle.id = bswap_64(event->throttle.id); event 662 tools/perf/util/session.c event->throttle.stream_id = bswap_64(event->throttle.stream_id); event 665 tools/perf/util/session.c swap_sample_id_all(event, &event->throttle + 1); event 668 tools/perf/util/session.c static void perf_event__namespaces_swap(union perf_event *event, event 673 tools/perf/util/session.c event->namespaces.pid = bswap_32(event->namespaces.pid); event 674 tools/perf/util/session.c event->namespaces.tid = bswap_32(event->namespaces.tid); event 675 tools/perf/util/session.c event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces); event 677 tools/perf/util/session.c for (i = 0; i < event->namespaces.nr_namespaces; i++) { event 678 tools/perf/util/session.c struct perf_ns_link_info *ns = &event->namespaces.link_info[i]; event 685 tools/perf/util/session.c swap_sample_id_all(event, &event->namespaces.link_info[i]); event 765 tools/perf/util/session.c static void perf_event__hdr_attr_swap(union perf_event *event, event 770 tools/perf/util/session.c perf_event__attr_swap(&event->attr.attr); event 772 tools/perf/util/session.c size = event->header.size; event 773 tools/perf/util/session.c size -= (void *)&event->attr.id - (void *)event; event 774 tools/perf/util/session.c mem_bswap_64(event->attr.id, size); event 777 tools/perf/util/session.c static void perf_event__event_update_swap(union perf_event *event, event 780 tools/perf/util/session.c event->event_update.type = bswap_64(event->event_update.type); event 781 tools/perf/util/session.c event->event_update.id = bswap_64(event->event_update.id); event 784 tools/perf/util/session.c static void perf_event__event_type_swap(union perf_event *event, event 787 tools/perf/util/session.c event->event_type.event_type.event_id = event 788 tools/perf/util/session.c bswap_64(event->event_type.event_type.event_id); event 791 tools/perf/util/session.c static void perf_event__tracing_data_swap(union perf_event *event, event 794 tools/perf/util/session.c event->tracing_data.size = bswap_32(event->tracing_data.size); event 797 tools/perf/util/session.c static void perf_event__auxtrace_info_swap(union perf_event *event, event 802 tools/perf/util/session.c event->auxtrace_info.type = bswap_32(event->auxtrace_info.type); event 804 tools/perf/util/session.c size = event->header.size; event 805 tools/perf/util/session.c size -= (void *)&event->auxtrace_info.priv - (void *)event; event 806 tools/perf/util/session.c mem_bswap_64(event->auxtrace_info.priv, size); event 809 tools/perf/util/session.c static void perf_event__auxtrace_swap(union perf_event *event, event 812 tools/perf/util/session.c event->auxtrace.size = bswap_64(event->auxtrace.size); event 813 tools/perf/util/session.c event->auxtrace.offset = bswap_64(event->auxtrace.offset); event 814 tools/perf/util/session.c event->auxtrace.reference = bswap_64(event->auxtrace.reference); event 815 tools/perf/util/session.c event->auxtrace.idx = bswap_32(event->auxtrace.idx); event 816 tools/perf/util/session.c event->auxtrace.tid = bswap_32(event->auxtrace.tid); event 817 tools/perf/util/session.c event->auxtrace.cpu = bswap_32(event->auxtrace.cpu); event 820 tools/perf/util/session.c static void perf_event__auxtrace_error_swap(union perf_event *event, event 823 tools/perf/util/session.c event->auxtrace_error.type = bswap_32(event->auxtrace_error.type); event 824 tools/perf/util/session.c event->auxtrace_error.code = bswap_32(event->auxtrace_error.code); event 825 tools/perf/util/session.c event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu); event 826 tools/perf/util/session.c event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid); event 827 tools/perf/util/session.c event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid); event 828 tools/perf/util/session.c event->auxtrace_error.fmt = bswap_32(event->auxtrace_error.fmt); event 829 tools/perf/util/session.c event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip); event 830 tools/perf/util/session.c if (event->auxtrace_error.fmt) event 831 tools/perf/util/session.c event->auxtrace_error.time = bswap_64(event->auxtrace_error.time); event 834 tools/perf/util/session.c static void perf_event__thread_map_swap(union perf_event *event, event 839 tools/perf/util/session.c event->thread_map.nr = bswap_64(event->thread_map.nr); event 841 tools/perf/util/session.c for (i = 0; i < event->thread_map.nr; i++) event 842 tools/perf/util/session.c event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid); event 845 tools/perf/util/session.c static void perf_event__cpu_map_swap(union perf_event *event, event 848 tools/perf/util/session.c struct perf_record_cpu_map_data *data = &event->cpu_map.data; event 881 tools/perf/util/session.c static void perf_event__stat_config_swap(union perf_event *event, event 886 tools/perf/util/session.c size = event->stat_config.nr * sizeof(event->stat_config.data[0]); event 888 tools/perf/util/session.c mem_bswap_64(&event->stat_config.nr, size); event 891 tools/perf/util/session.c static void perf_event__stat_swap(union perf_event *event, event 894 tools/perf/util/session.c event->stat.id = bswap_64(event->stat.id); event 895 tools/perf/util/session.c event->stat.thread = bswap_32(event->stat.thread); event 896 tools/perf/util/session.c event->stat.cpu = bswap_32(event->stat.cpu); event 897 tools/perf/util/session.c event->stat.val = bswap_64(event->stat.val); event 898 tools/perf/util/session.c event->stat.ena = bswap_64(event->stat.ena); event 899 tools/perf/util/session.c event->stat.run = bswap_64(event->stat.run); event 902 tools/perf/util/session.c static void perf_event__stat_round_swap(union perf_event *event, event 905 tools/perf/util/session.c event->stat_round.type = bswap_64(event->stat_round.type); event 906 tools/perf/util/session.c event->stat_round.time = bswap_64(event->stat_round.time); event 909 tools/perf/util/session.c typedef void (*perf_event__swap_op)(union perf_event *event, event 987 tools/perf/util/session.c union perf_event *event __maybe_unused, event 995 tools/perf/util/session.c int perf_session__queue_event(struct perf_session *s, union perf_event *event, event 998 tools/perf/util/session.c return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset); event 1152 tools/perf/util/session.c union perf_event *event, event 1157 tools/perf/util/session.c if (event->header.type != PERF_RECORD_SAMPLE && event 1200 tools/perf/util/session.c static void dump_event(struct evlist *evlist, union perf_event *event, event 1207 tools/perf/util/session.c file_offset, event->header.size, event->header.type); event 1209 tools/perf/util/session.c trace_event(event); event 1210 tools/perf/util/session.c if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw) event 1211 tools/perf/util/session.c evlist->trace_event_sample_raw(evlist, event, sample); event 1214 tools/perf/util/session.c perf_evlist__print_tstamp(evlist, event, sample); event 1217 tools/perf/util/session.c event->header.size, perf_event__name(event->header.type)); event 1220 tools/perf/util/session.c static void dump_sample(struct evsel *evsel, union perf_event *event, event 1229 tools/perf/util/session.c event->header.misc, sample->pid, sample->tid, sample->ip, event 1265 tools/perf/util/session.c static void dump_read(struct evsel *evsel, union perf_event *event) event 1267 tools/perf/util/session.c struct perf_record_read *read_event = &event->read; event 1273 tools/perf/util/session.c printf(": %d %d %s %" PRI_lu64 "\n", event->read.pid, event->read.tid, event 1275 tools/perf/util/session.c event->read.value); event 1293 tools/perf/util/session.c union perf_event *event, event 1303 tools/perf/util/session.c if (event->header.type == PERF_RECORD_MMAP event 1304 tools/perf/util/session.c || event->header.type == PERF_RECORD_MMAP2) event 1305 tools/perf/util/session.c pid = event->mmap.pid; event 1320 tools/perf/util/session.c union perf_event *event, event 1347 tools/perf/util/session.c return tool->sample(tool, event, sample, evsel, machine); event 1352 tools/perf/util/session.c union perf_event *event, event 1360 tools/perf/util/session.c ret = deliver_sample_value(evlist, tool, event, sample, event 1373 tools/perf/util/session.c union perf_event *event, event 1384 tools/perf/util/session.c return tool->sample(tool, event, sample, evsel, machine); event 1388 tools/perf/util/session.c return deliver_sample_group(evlist, tool, event, sample, event 1391 tools/perf/util/session.c return deliver_sample_value(evlist, tool, event, sample, event 1397 tools/perf/util/session.c union perf_event *event, event 1404 tools/perf/util/session.c dump_event(evlist, event, file_offset, sample); event 1408 tools/perf/util/session.c machine = machines__find_for_cpumode(machines, event, sample); event 1410 tools/perf/util/session.c switch (event->header.type) { event 1416 tools/perf/util/session.c dump_sample(evsel, event, sample); event 1421 tools/perf/util/session.c return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine); event 1423 tools/perf/util/session.c return tool->mmap(tool, event, sample, machine); event 1425 tools/perf/util/session.c if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT) event 1427 tools/perf/util/session.c return tool->mmap2(tool, event, sample, machine); event 1429 tools/perf/util/session.c return tool->comm(tool, event, sample, machine); event 1431 tools/perf/util/session.c return tool->namespaces(tool, event, sample, machine); event 1433 tools/perf/util/session.c return tool->fork(tool, event, sample, machine); event 1435 tools/perf/util/session.c return tool->exit(tool, event, sample, machine); event 1438 tools/perf/util/session.c evlist->stats.total_lost += event->lost.lost; event 1439 tools/perf/util/session.c return tool->lost(tool, event, sample, machine); event 1442 tools/perf/util/session.c evlist->stats.total_lost_samples += event->lost_samples.lost; event 1443 tools/perf/util/session.c return tool->lost_samples(tool, event, sample, machine); event 1445 tools/perf/util/session.c dump_read(evsel, event); event 1446 tools/perf/util/session.c return tool->read(tool, event, sample, evsel, machine); event 1448 tools/perf/util/session.c return tool->throttle(tool, event, sample, machine); event 1450 tools/perf/util/session.c return tool->unthrottle(tool, event, sample, machine); event 1453 tools/perf/util/session.c if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) event 1455 tools/perf/util/session.c if (event->aux.flags & PERF_AUX_FLAG_PARTIAL) event 1458 tools/perf/util/session.c return tool->aux(tool, event, sample, machine); event 1460 tools/perf/util/session.c return tool->itrace_start(tool, event, sample, machine); event 1463 tools/perf/util/session.c return tool->context_switch(tool, event, sample, machine); event 1465 tools/perf/util/session.c return tool->ksymbol(tool, event, sample, machine); event 1467 tools/perf/util/session.c return tool->bpf(tool, event, sample, machine); event 1475 tools/perf/util/session.c union perf_event *event, event 1482 tools/perf/util/session.c ret = perf_evlist__parse_sample(session->evlist, event, &sample); event 1488 tools/perf/util/session.c ret = auxtrace__process_event(session, event, &sample, tool); event 1495 tools/perf/util/session.c event, &sample, tool, file_offset); event 1499 tools/perf/util/session.c union perf_event *event, event 1508 tools/perf/util/session.c if (event->header.type != PERF_RECORD_COMPRESSED || event 1510 tools/perf/util/session.c dump_event(session->evlist, event, file_offset, &sample); event 1513 tools/perf/util/session.c switch (event->header.type) { event 1515 tools/perf/util/session.c err = tool->attr(tool, event, &session->evlist); event 1522 tools/perf/util/session.c return tool->event_update(tool, event, &session->evlist); event 1532 tools/perf/util/session.c return tool->tracing_data(session, event); event 1534 tools/perf/util/session.c return tool->build_id(session, event); event 1536 tools/perf/util/session.c return tool->finished_round(tool, event, oe); event 1538 tools/perf/util/session.c return tool->id_index(session, event); event 1540 tools/perf/util/session.c return tool->auxtrace_info(session, event); event 1543 tools/perf/util/session.c lseek(fd, file_offset + event->header.size, SEEK_SET); event 1544 tools/perf/util/session.c return tool->auxtrace(session, event); event 1546 tools/perf/util/session.c perf_session__auxtrace_error_inc(session, event); event 1547 tools/perf/util/session.c return tool->auxtrace_error(session, event); event 1549 tools/perf/util/session.c return tool->thread_map(session, event); event 1551 tools/perf/util/session.c return tool->cpu_map(session, event); event 1553 tools/perf/util/session.c return tool->stat_config(session, event); event 1555 tools/perf/util/session.c return tool->stat(session, event); event 1557 tools/perf/util/session.c return tool->stat_round(session, event); event 1559 tools/perf/util/session.c session->time_conv = event->time_conv; event 1560 tools/perf/util/session.c return tool->time_conv(session, event); event 1562 tools/perf/util/session.c return tool->feature(session, event); event 1564 tools/perf/util/session.c err = tool->compressed(session, event, file_offset); event 1566 tools/perf/util/session.c dump_event(session->evlist, event, file_offset, &sample); event 1574 tools/perf/util/session.c union perf_event *event, event 1580 tools/perf/util/session.c events_stats__inc(&evlist->stats, event->header.type); event 1582 tools/perf/util/session.c if (event->header.type >= PERF_RECORD_USER_TYPE_START) event 1583 tools/perf/util/session.c return perf_session__process_user_event(session, event, 0); event 1585 tools/perf/util/session.c return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0); event 1588 tools/perf/util/session.c static void event_swap(union perf_event *event, bool sample_id_all) event 1592 tools/perf/util/session.c swap = perf_event__swap_ops[event->header.type]; event 1594 tools/perf/util/session.c swap(event, sample_id_all); event 1602 tools/perf/util/session.c union perf_event *event; event 1607 tools/perf/util/session.c event = file_offset - session->one_mmap_offset + event 1625 tools/perf/util/session.c event = (union perf_event *)buf; event 1628 tools/perf/util/session.c perf_event_header__bswap(&event->header); event 1630 tools/perf/util/session.c if (event->header.size < hdr_sz || event->header.size > buf_sz) event 1633 tools/perf/util/session.c rest = event->header.size - hdr_sz; event 1639 tools/perf/util/session.c event_swap(event, perf_evlist__sample_id_all(session->evlist)); event 1643 tools/perf/util/session.c if (sample && event->header.type < PERF_RECORD_USER_TYPE_START && event 1644 tools/perf/util/session.c perf_evlist__parse_sample(session->evlist, event, sample)) event 1647 tools/perf/util/session.c *event_ptr = event; event 1653 tools/perf/util/session.c union perf_event *event, u64 file_offset) event 1660 tools/perf/util/session.c event_swap(event, perf_evlist__sample_id_all(evlist)); event 1662 tools/perf/util/session.c if (event->header.type >= PERF_RECORD_HEADER_MAX) event 1665 tools/perf/util/session.c events_stats__inc(&evlist->stats, event->header.type); event 1667 tools/perf/util/session.c if (event->header.type >= PERF_RECORD_USER_TYPE_START) event 1668 tools/perf/util/session.c return perf_session__process_user_event(session, event, file_offset); event 1673 tools/perf/util/session.c ret = perf_evlist__parse_sample_timestamp(evlist, event, ×tamp); event 1677 tools/perf/util/session.c ret = perf_session__queue_event(session, event, timestamp, file_offset); event 1682 tools/perf/util/session.c return perf_session__deliver_event(session, event, tool, file_offset); event 1857 tools/perf/util/session.c union perf_event *event; event 1875 tools/perf/util/session.c event = buf; event 1876 tools/perf/util/session.c err = readn(fd, event, sizeof(struct perf_event_header)); event 1886 tools/perf/util/session.c perf_event_header__bswap(&event->header); event 1888 tools/perf/util/session.c size = event->header.size; event 1902 tools/perf/util/session.c event = buf; event 1904 tools/perf/util/session.c p = event; event 1920 tools/perf/util/session.c if ((skip = perf_session__process_event(session, event, head)) < 0) { event 1922 tools/perf/util/session.c head, event->header.size, event->header.type); event 1960 tools/perf/util/session.c union perf_event *event; event 1966 tools/perf/util/session.c if (head + sizeof(event->header) > mmap_size) event 1969 tools/perf/util/session.c event = (union perf_event *)(buf + head); event 1971 tools/perf/util/session.c perf_event_header__bswap(&event->header); event 1973 tools/perf/util/session.c if (head + event->header.size <= mmap_size) event 1974 tools/perf/util/session.c return event; event 1978 tools/perf/util/session.c perf_event_header__bswap(&event->header); event 1981 tools/perf/util/session.c " fuzzed or compressed perf.data?\n",__func__, head, event->header.size, mmap_size); event 2008 tools/perf/util/session.c union perf_event *event = fetch_decomp_event(decomp->head, decomp->size, decomp->data, event 2011 tools/perf/util/session.c if (!event) event 2014 tools/perf/util/session.c size = event->header.size; event 2017 tools/perf/util/session.c (skip = perf_session__process_event(session, event, file_pos)) < 0) { event 2019 tools/perf/util/session.c decomp->file_pos + decomp->head, event->header.size, event->header.type); event 2047 tools/perf/util/session.c union perf_event *event, event 2066 tools/perf/util/session.c union perf_event *event; event 2109 tools/perf/util/session.c event = fetch_mmaped_event(head, mmap_size, buf, session->header.needs_swap); event 2110 tools/perf/util/session.c if (IS_ERR(event)) event 2111 tools/perf/util/session.c return PTR_ERR(event); event 2113 tools/perf/util/session.c if (!event) { event 2125 tools/perf/util/session.c size = event->header.size; event 2130 tools/perf/util/session.c (skip = rd->process(session, event, file_pos)) < 0) { event 2132 tools/perf/util/session.c file_offset + head, event->header.size, event 2133 tools/perf/util/session.c event->header.type, strerror(-skip)); event 2161 tools/perf/util/session.c union perf_event *event, event 2164 tools/perf/util/session.c return perf_session__process_event(session, event, file_offset); event 2398 tools/perf/util/session.c union perf_event *event) event 2401 tools/perf/util/session.c struct perf_record_id_index *ie = &event->id_index; event 70 tools/perf/util/session.h int perf_session__queue_event(struct perf_session *s, union perf_event *event, event 135 tools/perf/util/session.h union perf_event *event, event 139 tools/perf/util/session.h union perf_event *event); event 2289 tools/perf/util/sort.c static int parse_field_name(char *str, char **event, char **field, char **opt) event 2307 tools/perf/util/sort.c *event = event_name; event 404 tools/perf/util/stat.c union perf_event *event) event 407 tools/perf/util/stat.c struct perf_record_stat *st = &event->stat; event 425 tools/perf/util/stat.c size_t perf_event__fprintf_stat(union perf_event *event, FILE *fp) event 427 tools/perf/util/stat.c struct perf_record_stat *st = (struct perf_record_stat *)event; event 438 tools/perf/util/stat.c size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp) event 440 tools/perf/util/stat.c struct perf_record_stat_round *rd = (struct perf_record_stat_round *)event; event 449 tools/perf/util/stat.c size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp) event 454 tools/perf/util/stat.c perf_event__read_stat_config(&sc, &event->stat_config); event 206 tools/perf/util/stat.h union perf_event *event); event 208 tools/perf/util/stat.h size_t perf_event__fprintf_stat(union perf_event *event, FILE *fp); event 209 tools/perf/util/stat.h size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp); event 210 tools/perf/util/stat.h size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp); event 49 tools/perf/util/synthetic-events.c union perf_event *event, event 60 tools/perf/util/synthetic-events.c .cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK, event 63 tools/perf/util/synthetic-events.c return process(tool, event, &synth_sample, machine); event 138 tools/perf/util/synthetic-events.c static int perf_event__prepare_comm(union perf_event *event, pid_t pid, event 146 tools/perf/util/synthetic-events.c memset(&event->comm, 0, sizeof(event->comm)); event 149 tools/perf/util/synthetic-events.c if (perf_event__get_comm_ids(pid, event->comm.comm, event 150 tools/perf/util/synthetic-events.c sizeof(event->comm.comm), event 161 tools/perf/util/synthetic-events.c event->comm.pid = *tgid; event 162 tools/perf/util/synthetic-events.c event->comm.header.type = PERF_RECORD_COMM; event 164 tools/perf/util/synthetic-events.c size = strlen(event->comm.comm) + 1; event 166 tools/perf/util/synthetic-events.c memset(event->comm.comm + size, 0, machine->id_hdr_size); event 167 tools/perf/util/synthetic-events.c event->comm.header.size = (sizeof(event->comm) - event 168 tools/perf/util/synthetic-events.c (sizeof(event->comm.comm) - size) + event 170 tools/perf/util/synthetic-events.c event->comm.tid = pid; event 176 tools/perf/util/synthetic-events.c union perf_event *event, pid_t pid, event 182 tools/perf/util/synthetic-events.c if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0) event 185 tools/perf/util/synthetic-events.c if (perf_tool__process_synth_event(tool, event, machine, process) != 0) event 205 tools/perf/util/synthetic-events.c union perf_event *event, event 216 tools/perf/util/synthetic-events.c memset(&event->namespaces, 0, (sizeof(event->namespaces) + event 220 tools/perf/util/synthetic-events.c event->namespaces.pid = tgid; event 221 tools/perf/util/synthetic-events.c event->namespaces.tid = pid; event 223 tools/perf/util/synthetic-events.c event->namespaces.nr_namespaces = NR_NAMESPACES; event 225 tools/perf/util/synthetic-events.c ns_link_info = event->namespaces.link_info; event 227 tools/perf/util/synthetic-events.c for (idx = 0; idx < event->namespaces.nr_namespaces; idx++) event 231 tools/perf/util/synthetic-events.c event->namespaces.header.type = PERF_RECORD_NAMESPACES; event 233 tools/perf/util/synthetic-events.c event->namespaces.header.size = (sizeof(event->namespaces) + event 237 tools/perf/util/synthetic-events.c if (perf_tool__process_synth_event(tool, event, machine, process) != 0) event 244 tools/perf/util/synthetic-events.c union perf_event *event, event 249 tools/perf/util/synthetic-events.c memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size); event 257 tools/perf/util/synthetic-events.c event->fork.ppid = ppid; event 258 tools/perf/util/synthetic-events.c event->fork.ptid = ppid; event 260 tools/perf/util/synthetic-events.c event->fork.ppid = tgid; event 261 tools/perf/util/synthetic-events.c event->fork.ptid = tgid; event 263 tools/perf/util/synthetic-events.c event->fork.pid = tgid; event 264 tools/perf/util/synthetic-events.c event->fork.tid = pid; event 265 tools/perf/util/synthetic-events.c event->fork.header.type = PERF_RECORD_FORK; event 266 tools/perf/util/synthetic-events.c event->fork.header.misc = PERF_RECORD_MISC_FORK_EXEC; event 268 tools/perf/util/synthetic-events.c event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size); event 270 tools/perf/util/synthetic-events.c if (perf_tool__process_synth_event(tool, event, machine, process) != 0) event 277 tools/perf/util/synthetic-events.c union perf_event *event, event 307 tools/perf/util/synthetic-events.c event->header.type = PERF_RECORD_MMAP2; event 336 tools/perf/util/synthetic-events.c &event->mmap2.start, &event->mmap2.len, prot, event 337 tools/perf/util/synthetic-events.c &event->mmap2.pgoff, &event->mmap2.maj, event 338 tools/perf/util/synthetic-events.c &event->mmap2.min, event 347 tools/perf/util/synthetic-events.c event->mmap2.ino = (u64)ino; event 353 tools/perf/util/synthetic-events.c event->header.misc = PERF_RECORD_MISC_USER; event 355 tools/perf/util/synthetic-events.c event->header.misc = PERF_RECORD_MISC_GUEST_USER; event 358 tools/perf/util/synthetic-events.c event->mmap2.prot = 0; event 359 tools/perf/util/synthetic-events.c event->mmap2.flags = 0; event 361 tools/perf/util/synthetic-events.c event->mmap2.prot |= PROT_READ; event 363 tools/perf/util/synthetic-events.c event->mmap2.prot |= PROT_WRITE; event 365 tools/perf/util/synthetic-events.c event->mmap2.prot |= PROT_EXEC; event 368 tools/perf/util/synthetic-events.c event->mmap2.flags |= MAP_SHARED; event 370 tools/perf/util/synthetic-events.c event->mmap2.flags |= MAP_PRIVATE; event 376 tools/perf/util/synthetic-events.c event->header.misc |= PERF_RECORD_MISC_MMAP_DATA; event 381 tools/perf/util/synthetic-events.c event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT; event 389 tools/perf/util/synthetic-events.c event->mmap2.flags |= MAP_HUGETLB; event 393 tools/perf/util/synthetic-events.c memcpy(event->mmap2.filename, execname, size); event 395 tools/perf/util/synthetic-events.c event->mmap2.len -= event->mmap.start; event 396 tools/perf/util/synthetic-events.c event->mmap2.header.size = (sizeof(event->mmap2) - event 397 tools/perf/util/synthetic-events.c (sizeof(event->mmap2.filename) - size)); event 398 tools/perf/util/synthetic-events.c memset(event->mmap2.filename + size, 0, machine->id_hdr_size); event 399 tools/perf/util/synthetic-events.c event->mmap2.header.size += machine->id_hdr_size; event 400 tools/perf/util/synthetic-events.c event->mmap2.pid = tgid; event 401 tools/perf/util/synthetic-events.c event->mmap2.tid = pid; event 403 tools/perf/util/synthetic-events.c if (perf_tool__process_synth_event(tool, event, machine, process) != 0) { event 422 tools/perf/util/synthetic-events.c union perf_event *event = zalloc((sizeof(event->mmap) + event 424 tools/perf/util/synthetic-events.c if (event == NULL) { event 430 tools/perf/util/synthetic-events.c event->header.type = PERF_RECORD_MMAP; event 437 tools/perf/util/synthetic-events.c event->header.misc = PERF_RECORD_MISC_KERNEL; event 439 tools/perf/util/synthetic-events.c event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; event 448 tools/perf/util/synthetic-events.c event->mmap.header.type = PERF_RECORD_MMAP; event 449 tools/perf/util/synthetic-events.c event->mmap.header.size = (sizeof(event->mmap) - event 450 tools/perf/util/synthetic-events.c (sizeof(event->mmap.filename) - size)); event 451 tools/perf/util/synthetic-events.c memset(event->mmap.filename + size, 0, machine->id_hdr_size); event 452 tools/perf/util/synthetic-events.c event->mmap.header.size += machine->id_hdr_size; event 453 tools/perf/util/synthetic-events.c event->mmap.start = pos->start; event 454 tools/perf/util/synthetic-events.c event->mmap.len = pos->end - pos->start; event 455 tools/perf/util/synthetic-events.c event->mmap.pid = machine->pid; event 457 tools/perf/util/synthetic-events.c memcpy(event->mmap.filename, pos->dso->long_name, event 459 tools/perf/util/synthetic-events.c if (perf_tool__process_synth_event(tool, event, machine, process) != 0) { event 465 tools/perf/util/synthetic-events.c free(event); event 821 tools/perf/util/synthetic-events.c union perf_event *event; event 835 tools/perf/util/synthetic-events.c event = zalloc((sizeof(event->mmap) + machine->id_hdr_size)); event 836 tools/perf/util/synthetic-events.c if (event == NULL) { event 847 tools/perf/util/synthetic-events.c event->header.misc = PERF_RECORD_MISC_KERNEL; event 849 tools/perf/util/synthetic-events.c event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; event 852 tools/perf/util/synthetic-events.c size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), event 855 tools/perf/util/synthetic-events.c event->mmap.header.type = PERF_RECORD_MMAP; event 856 tools/perf/util/synthetic-events.c event->mmap.header.size = (sizeof(event->mmap) - event 857 tools/perf/util/synthetic-events.c (sizeof(event->mmap.filename) - size) + machine->id_hdr_size); event 858 tools/perf/util/synthetic-events.c event->mmap.pgoff = kmap->ref_reloc_sym->addr; event 859 tools/perf/util/synthetic-events.c event->mmap.start = map->start; event 860 tools/perf/util/synthetic-events.c event->mmap.len = map->end - event->mmap.start; event 861 tools/perf/util/synthetic-events.c event->mmap.pid = machine->pid; event 863 tools/perf/util/synthetic-events.c err = perf_tool__process_synth_event(tool, event, machine, process); event 864 tools/perf/util/synthetic-events.c free(event); event 887 tools/perf/util/synthetic-events.c union perf_event *event; event 890 tools/perf/util/synthetic-events.c size = sizeof(event->thread_map); event 891 tools/perf/util/synthetic-events.c size += threads->nr * sizeof(event->thread_map.entries[0]); event 893 tools/perf/util/synthetic-events.c event = zalloc(size); event 894 tools/perf/util/synthetic-events.c if (!event) event 897 tools/perf/util/synthetic-events.c event->header.type = PERF_RECORD_THREAD_MAP; event 898 tools/perf/util/synthetic-events.c event->header.size = size; event 899 tools/perf/util/synthetic-events.c event->thread_map.nr = threads->nr; event 902 tools/perf/util/synthetic-events.c struct perf_record_thread_map_entry *entry = &event->thread_map.entries[i]; event 912 tools/perf/util/synthetic-events.c err = process(tool, event, NULL, machine); event 914 tools/perf/util/synthetic-events.c free(event); event 1016 tools/perf/util/synthetic-events.c struct perf_record_cpu_map *event; event 1020 tools/perf/util/synthetic-events.c event = cpu_map_data__alloc(map, &size, &type, &max); event 1021 tools/perf/util/synthetic-events.c if (!event) event 1024 tools/perf/util/synthetic-events.c event->header.type = PERF_RECORD_CPU_MAP; event 1025 tools/perf/util/synthetic-events.c event->header.size = size; event 1026 tools/perf/util/synthetic-events.c event->data.type = type; event 1028 tools/perf/util/synthetic-events.c cpu_map_data__synthesize(&event->data, map, type, max); event 1029 tools/perf/util/synthetic-events.c return event; event 1037 tools/perf/util/synthetic-events.c struct perf_record_cpu_map *event; event 1040 tools/perf/util/synthetic-events.c event = cpu_map_event__new(map); event 1041 tools/perf/util/synthetic-events.c if (!event) event 1044 tools/perf/util/synthetic-events.c err = process(tool, (union perf_event *) event, NULL, machine); event 1046 tools/perf/util/synthetic-events.c free(event); event 1055 tools/perf/util/synthetic-events.c struct perf_record_stat_config *event; event 1058 tools/perf/util/synthetic-events.c size = sizeof(*event); event 1059 tools/perf/util/synthetic-events.c size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0])); event 1061 tools/perf/util/synthetic-events.c event = zalloc(size); event 1062 tools/perf/util/synthetic-events.c if (!event) event 1065 tools/perf/util/synthetic-events.c event->header.type = PERF_RECORD_STAT_CONFIG; event 1066 tools/perf/util/synthetic-events.c event->header.size = size; event 1067 tools/perf/util/synthetic-events.c event->nr = PERF_STAT_CONFIG_TERM__MAX; event 1070 tools/perf/util/synthetic-events.c event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term; \ event 1071 tools/perf/util/synthetic-events.c event->data[i].val = __val; \ event 1082 tools/perf/util/synthetic-events.c err = process(tool, (union perf_event *) event, NULL, machine); event 1084 tools/perf/util/synthetic-events.c free(event); event 1094 tools/perf/util/synthetic-events.c struct perf_record_stat event; event 1096 tools/perf/util/synthetic-events.c event.header.type = PERF_RECORD_STAT; event 1097 tools/perf/util/synthetic-events.c event.header.size = sizeof(event); event 1098 tools/perf/util/synthetic-events.c event.header.misc = 0; event 1100 tools/perf/util/synthetic-events.c event.id = id; event 1101 tools/perf/util/synthetic-events.c event.cpu = cpu; event 1102 tools/perf/util/synthetic-events.c event.thread = thread; event 1103 tools/perf/util/synthetic-events.c event.val = count->val; event 1104 tools/perf/util/synthetic-events.c event.ena = count->ena; event 1105 tools/perf/util/synthetic-events.c event.run = count->run; event 1107 tools/perf/util/synthetic-events.c return process(tool, (union perf_event *) &event, NULL, machine); event 1115 tools/perf/util/synthetic-events.c struct perf_record_stat_round event; event 1117 tools/perf/util/synthetic-events.c event.header.type = PERF_RECORD_STAT_ROUND; event 1118 tools/perf/util/synthetic-events.c event.header.size = sizeof(event); event 1119 tools/perf/util/synthetic-events.c event.header.misc = 0; event 1121 tools/perf/util/synthetic-events.c event.time = evtime; event 1122 tools/perf/util/synthetic-events.c event.type = type; event 1124 tools/perf/util/synthetic-events.c return process(tool, (union perf_event *) &event, NULL, machine); event 1234 tools/perf/util/synthetic-events.c int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format, event 1245 tools/perf/util/synthetic-events.c array = event->sample.array; event 29 tools/perf/util/synthetic-events.h typedef int (*perf_event__handler_t)(struct perf_tool *tool, union perf_event *event, event 45 tools/perf/util/synthetic-events.h int perf_event__synthesize_mmap_events(struct perf_tool *tool, union perf_event *event, pid_t pid, pid_t tgid, perf_event__handler_t process, struct machine *machine, bool mmap_data); event 47 tools/perf/util/synthetic-events.h int perf_event__synthesize_namespaces(struct perf_tool *tool, union perf_event *event, pid_t pid, pid_t tgid, perf_event__handler_t process, struct machine *machine); event 48 tools/perf/util/synthetic-events.h int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format, const struct perf_sample *sample); event 58 tools/perf/util/synthetic-events.h pid_t perf_event__synthesize_comm(struct perf_tool *tool, union perf_event *event, pid_t pid, perf_event__handler_t process, struct machine *machine); event 60 tools/perf/util/synthetic-events.h int perf_tool__process_synth_event(struct perf_tool *tool, union perf_event *event, struct machine *machine, perf_event__handler_t process); event 172 tools/perf/util/thread.c struct perf_record_namespaces *event) event 176 tools/perf/util/thread.c new = namespaces__new(event); event 196 tools/perf/util/thread.c struct perf_record_namespaces *event) event 201 tools/perf/util/thread.c ret = __thread__set_namespaces(thread, timestamp, event); event 77 tools/perf/util/thread.h struct perf_record_namespaces *event); event 372 tools/perf/util/thread_map.c struct perf_record_thread_map *event) event 376 tools/perf/util/thread_map.c threads->nr = (int) event->nr; event 378 tools/perf/util/thread_map.c for (i = 0; i < event->nr; i++) { event 379 tools/perf/util/thread_map.c perf_thread_map__set_pid(threads, i, (pid_t) event->entries[i].pid); event 380 tools/perf/util/thread_map.c threads->map[i].comm = strndup(event->entries[i].comm, 16); event 386 tools/perf/util/thread_map.c struct perf_thread_map *thread_map__new_event(struct perf_record_thread_map *event) event 390 tools/perf/util/thread_map.c threads = thread_map__alloc(event->nr); event 392 tools/perf/util/thread_map.c thread_map__copy_event(threads, event); event 19 tools/perf/util/thread_map.h struct perf_thread_map *thread_map__new_event(struct perf_record_thread_map *event); event 18 tools/perf/util/tool.h typedef int (*event_sample)(struct perf_tool *tool, union perf_event *event, event 22 tools/perf/util/tool.h typedef int (*event_op)(struct perf_tool *tool, union perf_event *event, event 26 tools/perf/util/tool.h union perf_event *event, event 29 tools/perf/util/tool.h typedef int (*event_op2)(struct perf_session *session, union perf_event *event); event 30 tools/perf/util/tool.h typedef s64 (*event_op3)(struct perf_session *session, union perf_event *event); event 31 tools/perf/util/tool.h typedef int (*event_op4)(struct perf_session *session, union perf_event *event, u64 data); event 33 tools/perf/util/tool.h typedef int (*event_oe)(struct perf_tool *tool, union perf_event *event, event 19 tools/perf/util/trace-event-parse.c struct tep_event *event; event 24 tools/perf/util/trace-event-parse.c event = tep_get_first_event(pevent); event 25 tools/perf/util/trace-event-parse.c if (!event) event 28 tools/perf/util/trace-event-parse.c field = tep_find_common_field(event, type); event 81 tools/perf/util/trace-event-parse.c raw_field_value(struct tep_event *event, const char *name, void *data) event 86 tools/perf/util/trace-event-parse.c field = tep_find_any_field(event, name); event 95 tools/perf/util/trace-event-parse.c unsigned long long read_size(struct tep_event *event, void *ptr, int size) event 97 tools/perf/util/trace-event-parse.c return tep_read_number(event->tep, ptr, size); event 100 tools/perf/util/trace-event-parse.c void event_format__fprintf(struct tep_event *event, event 112 tools/perf/util/trace-event-parse.c tep_print_event(event->tep, &s, &record, "%s", TEP_PRINT_INFO); event 117 tools/perf/util/trace-event-parse.c void event_format__print(struct tep_event *event, event 120 tools/perf/util/trace-event-parse.c return event_format__fprintf(event, cpu, data, size, stdout); event 29 tools/perf/util/trace-event-scripting.c static void process_event_unsupported(union perf_event *event __maybe_unused, event 79 tools/perf/util/trace-event.c struct tep_event *event = NULL; event 95 tools/perf/util/trace-event.c tep_parse_format(pevent, &event, data, size, sys); event 98 tools/perf/util/trace-event.c return event; event 31 tools/perf/util/trace-event.h void event_format__fprintf(struct tep_event *event, event 34 tools/perf/util/trace-event.h void event_format__print(struct tep_event *event, event 42 tools/perf/util/trace-event.h raw_field_value(struct tep_event *event, const char *name, void *data); event 50 tools/perf/util/trace-event.h unsigned long long read_size(struct tep_event *event, void *ptr, int size); event 77 tools/perf/util/trace-event.h void (*process_event) (union perf_event *event, event 81 tools/perf/util/trace-event.h void (*process_switch)(union perf_event *event, event 164 tools/testing/selftests/bpf/progs/pyperf.h Event* event = bpf_map_lookup_elem(&eventmap, &zero); event 165 tools/testing/selftests/bpf/progs/pyperf.h if (!event) event 168 tools/testing/selftests/bpf/progs/pyperf.h event->pid = pid; event 170 tools/testing/selftests/bpf/progs/pyperf.h event->tid = (pid_t)pid_tgid; event 171 tools/testing/selftests/bpf/progs/pyperf.h bpf_get_current_comm(&event->comm, sizeof(event->comm)); event 173 tools/testing/selftests/bpf/progs/pyperf.h event->user_stack_id = bpf_get_stackid(ctx, &stackmap, BPF_F_USER_STACK); event 174 tools/testing/selftests/bpf/progs/pyperf.h event->kernel_stack_id = bpf_get_stackid(ctx, &stackmap, 0); event 186 tools/testing/selftests/bpf/progs/pyperf.h event->thread_current = thread_state == thread_state_current; event 196 tools/testing/selftests/bpf/progs/pyperf.h event->pthread_match = pthread_created == pthread_self; event 198 tools/testing/selftests/bpf/progs/pyperf.h event->pthread_match = 1; event 201 tools/testing/selftests/bpf/progs/pyperf.h if (event->pthread_match || !pidData->use_tls) { event 232 tools/testing/selftests/bpf/progs/pyperf.h event->stack[i] = *symbol_id; event 233 tools/testing/selftests/bpf/progs/pyperf.h event->stack_len = i + 1; event 237 tools/testing/selftests/bpf/progs/pyperf.h event->stack_complete = frame_ptr == NULL; event 239 tools/testing/selftests/bpf/progs/pyperf.h event->stack_complete = 1; event 246 tools/testing/selftests/bpf/progs/pyperf.h event->has_meta = 0; event 247 tools/testing/selftests/bpf/progs/pyperf.h bpf_perf_event_output(ctx, &perfmap, 0, event, offsetof(Event, metadata)); event 147 tools/testing/selftests/bpf/progs/test_tcp_estats.c struct tcp_estats_event event; event 163 tools/testing/selftests/bpf/progs/test_tcp_estats.c static __always_inline void tcp_estats_ev_init(struct tcp_estats_event *event, event 166 tools/testing/selftests/bpf/progs/test_tcp_estats.c event->magic = TCP_ESTATS_MAGIC; event 167 tools/testing/selftests/bpf/progs/test_tcp_estats.c event->ts = bpf_ktime_get_ns(); event 168 tools/testing/selftests/bpf/progs/test_tcp_estats.c event->event_type = type; event 228 tools/testing/selftests/bpf/progs/test_tcp_estats.c struct tcp_estats_event *event, event 232 tools/testing/selftests/bpf/progs/test_tcp_estats.c tcp_estats_ev_init(event, type); event 243 tools/testing/selftests/bpf/progs/test_tcp_estats.c tcp_estats_init(sk, &ev.event, &ev.conn_id, type); event 31 tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c static inline void update_event_map(int event) event 40 tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c g.event_map |= (1 << event); event 45 tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c g.event_map |= (1 << event); event 114 tools/testing/selftests/bpf/test_lirc_mode2_user.c struct input_event event; event 120 tools/testing/selftests/bpf/test_lirc_mode2_user.c ret = read(inputfd, &event, sizeof(event)); event 121 tools/testing/selftests/bpf/test_lirc_mode2_user.c if (ret != sizeof(event)) { event 126 tools/testing/selftests/bpf/test_lirc_mode2_user.c if (event.type == EV_MSC && event.code == MSC_SCAN && event 127 tools/testing/selftests/bpf/test_lirc_mode2_user.c event.value == 0xdead) { event 143 tools/testing/selftests/bpf/test_lirc_mode2_user.c ret = read(inputfd, &event, sizeof(event)); event 144 tools/testing/selftests/bpf/test_lirc_mode2_user.c if (ret != sizeof(event)) { event 149 tools/testing/selftests/bpf/test_lirc_mode2_user.c if (event.type == EV_REL && event.code == REL_Y && event 150 tools/testing/selftests/bpf/test_lirc_mode2_user.c event.value == 1 ) { event 132 tools/testing/selftests/breakpoints/step_after_suspend_test.c struct sigevent event = {}; event 361 tools/testing/selftests/pidfd/pidfd_test.c struct epoll_event event, events[MAX_EVENTS]; event 368 tools/testing/selftests/pidfd/pidfd_test.c event.events = EPOLLIN; event 369 tools/testing/selftests/pidfd/pidfd_test.c event.data.fd = pidfd; event 371 tools/testing/selftests/pidfd/pidfd_test.c if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, pidfd, &event)) { event 19 tools/testing/selftests/powerpc/pmu/count_instructions.c static void setup_event(struct event *e, u64 config, char *name) event 29 tools/testing/selftests/powerpc/pmu/count_instructions.c static int do_count_loop(struct event *events, u64 instructions, event 74 tools/testing/selftests/powerpc/pmu/count_instructions.c static u64 determine_overhead(struct event *events) event 96 tools/testing/selftests/powerpc/pmu/count_instructions.c struct event events[2]; event 64 tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c struct event event; event 68 tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c event_init_named(&event, 0x1001e, "cycles"); event 69 tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c event_leader_ebb_init(&event); event 71 tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c event.attr.exclude_kernel = 1; event 72 tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c event.attr.exclude_hv = 1; event 73 tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c event.attr.exclude_idle = 1; event 75 tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c FAIL_IF(event_open(&event)); event 79 tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c FAIL_IF(ebb_event_enable(&event)); event 98 tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c event_close(&event); event 21 tools/testing/selftests/powerpc/pmu/ebb/close_clears_pmcc_test.c struct event event; event 25 tools/testing/selftests/powerpc/pmu/ebb/close_clears_pmcc_test.c event_init_named(&event, 0x1001e, "cycles"); event 26 tools/testing/selftests/powerpc/pmu/ebb/close_clears_pmcc_test.c event_leader_ebb_init(&event); event 28 tools/testing/selftests/powerpc/pmu/ebb/close_clears_pmcc_test.c FAIL_IF(event_open(&event)); event 33 tools/testing/selftests/powerpc/pmu/ebb/close_clears_pmcc_test.c FAIL_IF(ebb_event_enable(&event)); event 41 tools/testing/selftests/powerpc/pmu/ebb/close_clears_pmcc_test.c event_close(&event); event 22 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c static int setup_cpu_event(struct event *event, int cpu) event 24 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c event_init_named(event, 0x400FA, "PM_RUN_INST_CMPL"); event 26 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c event->attr.pinned = 1; event 28 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c event->attr.exclude_kernel = 1; event 29 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c event->attr.exclude_hv = 1; event 30 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c event->attr.exclude_idle = 1; event 33 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c FAIL_IF(event_open_with_cpu(event, cpu)); event 34 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c FAIL_IF(event_enable(event)); event 42 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c struct event event; event 62 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c rc = setup_cpu_event(&event, cpu); event 80 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c FAIL_IF(event_disable(&event)); event 81 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c FAIL_IF(event_read(&event)); event 83 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c event_report(&event); event 86 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c FAIL_IF(event.result.value == 0); event 87 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c FAIL_IF(event.result.enabled != event.result.running); event 22 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c static int setup_cpu_event(struct event *event, int cpu) event 24 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c event_init_named(event, 0x400FA, "PM_RUN_INST_CMPL"); event 26 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c event->attr.exclude_kernel = 1; event 27 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c event->attr.exclude_hv = 1; event 28 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c event->attr.exclude_idle = 1; event 31 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c FAIL_IF(event_open_with_cpu(event, cpu)); event 32 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c FAIL_IF(event_enable(event)); event 40 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c struct event event; event 60 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c rc = setup_cpu_event(&event, cpu); event 78 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c FAIL_IF(event_disable(&event)); event 79 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c FAIL_IF(event_read(&event)); event 81 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c event_report(&event); event 17 tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c struct event event; event 21 tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c event_init_named(&event, 0x1001e, "cycles"); event 22 tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c event_leader_ebb_init(&event); event 24 tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c event.attr.exclude_kernel = 1; event 25 tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c event.attr.exclude_hv = 1; event 26 tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c event.attr.exclude_idle = 1; event 28 tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c FAIL_IF(event_open(&event)); event 33 tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c FAIL_IF(ebb_event_enable(&event)); event 49 tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c event_close(&event); event 55 tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c struct event event; event 61 tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c event_init_named(&event, 0x1001e, "cycles"); event 62 tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c event_leader_ebb_init(&event); event 64 tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c event.attr.exclude_kernel = 1; event 65 tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c event.attr.exclude_hv = 1; event 66 tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c event.attr.exclude_idle = 1; event 68 tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c FAIL_IF(event_open(&event)); event 72 tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c FAIL_IF(ebb_event_enable(&event)); event 108 tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c event_close(&event); event 24 tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c struct event event; event 31 tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c event_init_named(&event, 0x1001e, "cycles"); event 32 tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c event_leader_ebb_init(&event); event 34 tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c event.attr.exclude_kernel = 1; event 35 tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c event.attr.exclude_hv = 1; event 36 tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c event.attr.exclude_idle = 1; event 38 tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c FAIL_IF(event_open(&event)); event 44 tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c FAIL_IF(ebb_event_enable(&event)); event 78 tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c event_close(&event); event 275 tools/testing/selftests/powerpc/pmu/ebb/ebb.c int ebb_event_enable(struct event *e) event 331 tools/testing/selftests/powerpc/pmu/ebb/ebb.c void event_ebb_init(struct event *e) event 336 tools/testing/selftests/powerpc/pmu/ebb/ebb.c void event_bhrb_init(struct event *e, unsigned ifm) event 341 tools/testing/selftests/powerpc/pmu/ebb/ebb.c void event_leader_ebb_init(struct event *e) event 351 tools/testing/selftests/powerpc/pmu/ebb/ebb.c struct event event; event 356 tools/testing/selftests/powerpc/pmu/ebb/ebb.c event_init_named(&event, 0x1001e, "cycles"); event 357 tools/testing/selftests/powerpc/pmu/ebb/ebb.c event_leader_ebb_init(&event); event 359 tools/testing/selftests/powerpc/pmu/ebb/ebb.c event.attr.exclude_kernel = 1; event 360 tools/testing/selftests/powerpc/pmu/ebb/ebb.c event.attr.exclude_hv = 1; event 361 tools/testing/selftests/powerpc/pmu/ebb/ebb.c event.attr.exclude_idle = 1; event 363 tools/testing/selftests/powerpc/pmu/ebb/ebb.c FAIL_IF(event_open(&event)); event 369 tools/testing/selftests/powerpc/pmu/ebb/ebb.c FAIL_IF(event_enable(&event)); event 371 tools/testing/selftests/powerpc/pmu/ebb/ebb.c if (event_read(&event)) { event 403 tools/testing/selftests/powerpc/pmu/ebb/ebb.c event_close(&event); event 47 tools/testing/selftests/powerpc/pmu/ebb/ebb.h void event_leader_ebb_init(struct event *e); event 48 tools/testing/selftests/powerpc/pmu/ebb/ebb.h void event_ebb_init(struct event *e); event 49 tools/testing/selftests/powerpc/pmu/ebb/ebb.h void event_bhrb_init(struct event *e, unsigned ifm); event 52 tools/testing/selftests/powerpc/pmu/ebb/ebb.h int ebb_event_enable(struct event *e); event 58 tools/testing/selftests/powerpc/pmu/ebb/ebb.h void event_ebb_init(struct event *e); event 59 tools/testing/selftests/powerpc/pmu/ebb/ebb.h void event_leader_ebb_init(struct event *e); event 47 tools/testing/selftests/powerpc/pmu/ebb/ebb_on_child_test.c struct event event; event 65 tools/testing/selftests/powerpc/pmu/ebb/ebb_on_child_test.c event_init_named(&event, 0x1001e, "cycles"); event 66 tools/testing/selftests/powerpc/pmu/ebb/ebb_on_child_test.c event_leader_ebb_init(&event); event 68 tools/testing/selftests/powerpc/pmu/ebb/ebb_on_child_test.c event.attr.exclude_kernel = 1; event 69 tools/testing/selftests/powerpc/pmu/ebb/ebb_on_child_test.c event.attr.exclude_hv = 1; event 70 tools/testing/selftests/powerpc/pmu/ebb/ebb_on_child_test.c event.attr.exclude_idle = 1; event 72 tools/testing/selftests/powerpc/pmu/ebb/ebb_on_child_test.c FAIL_IF(event_open_with_pid(&event, pid)); event 73 tools/testing/selftests/powerpc/pmu/ebb/ebb_on_child_test.c FAIL_IF(ebb_event_enable(&event)); event 80 tools/testing/selftests/powerpc/pmu/ebb/ebb_on_child_test.c event_close(&event); event 54 tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c struct event event; event 73 tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c event_init_named(&event, 0x1001e, "cycles"); event 74 tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c event_leader_ebb_init(&event); event 76 tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c event.attr.exclude_kernel = 1; event 77 tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c event.attr.exclude_hv = 1; event 78 tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c event.attr.exclude_idle = 1; event 80 tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c FAIL_IF(event_open_with_pid(&event, pid)); event 81 tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c FAIL_IF(ebb_event_enable(&event)); event 86 tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c event_close(&event); event 22 tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c static int setup_cpu_event(struct event *event, int cpu) event 24 tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c event_init_named(event, 0x400FA, "PM_RUN_INST_CMPL"); event 26 tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c event->attr.exclude_kernel = 1; event 27 tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c event->attr.exclude_hv = 1; event 28 tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c event->attr.exclude_idle = 1; event 31 tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c FAIL_IF(event_open_with_cpu(event, cpu)); event 32 tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c FAIL_IF(event_enable(event)); event 40 tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c struct event event; event 63 tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c rc = setup_cpu_event(&event, cpu); event 74 tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c FAIL_IF(event_disable(&event)); event 75 tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c FAIL_IF(event_read(&event)); event 77 tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c event_report(&event); event 80 tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c FAIL_IF(event.result.enabled >= event.result.running); event 17 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c struct event event, leader; event 21 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c event_init(&event, 0x1001e); event 22 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c event_leader_ebb_init(&event); event 24 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c FAIL_IF(event_open(&event)); event 25 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c event_close(&event); event 28 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c event_init(&event, 0x001e); /* CYCLES - no PMC specified */ event 29 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c event_leader_ebb_init(&event); event 31 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c FAIL_IF(event_open(&event) == 0); event 34 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c event_init(&event, 0x2001e); event 35 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c event_leader_ebb_init(&event); event 36 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c event.attr.exclusive = 0; event 38 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c FAIL_IF(event_open(&event) == 0); event 41 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c event_init(&event, 0x3001e); event 42 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c event_leader_ebb_init(&event); event 43 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c event.attr.freq = 1; event 45 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c FAIL_IF(event_open(&event) == 0); event 48 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c event_init(&event, 0x4001e); event 49 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c event_leader_ebb_init(&event); event 50 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c event.attr.sample_period = 1; event 52 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c FAIL_IF(event_open(&event) == 0); event 55 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c event_init(&event, 0x1001e); event 56 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c event_leader_ebb_init(&event); event 57 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c event.attr.enable_on_exec = 1; event 59 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c FAIL_IF(event_open(&event) == 0); event 62 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c event_init(&event, 0x1001e); event 63 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c event_leader_ebb_init(&event); event 64 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c event.attr.inherit = 1; event 66 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c FAIL_IF(event_open(&event) == 0); event 73 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c event_init(&event, 0x20002); event 74 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c event_ebb_init(&event); event 77 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c FAIL_IF(event_open_with_group(&event, leader.fd)); event 79 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c event_close(&event); event 86 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c event_init(&event, 0x20002); event 89 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c FAIL_IF(event_open_with_group(&event, leader.fd) == 0); event 100 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c event_init(&event, 0x20002); event 101 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c event_ebb_init(&event); event 104 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c FAIL_IF(event_open_with_group(&event, leader.fd) == 0); event 121 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c event_init(&event, 0x1001e); event 122 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c event_leader_ebb_init(&event); event 125 tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c FAIL_IF(event_open_with_cpu(&event, 0) == 0); event 24 tools/testing/selftests/powerpc/pmu/ebb/fork_cleanup_test.c static struct event event; event 36 tools/testing/selftests/powerpc/pmu/ebb/fork_cleanup_test.c FAIL_IF(event_read(&event)); event 48 tools/testing/selftests/powerpc/pmu/ebb/fork_cleanup_test.c event_init_named(&event, 0x1001e, "cycles"); event 49 tools/testing/selftests/powerpc/pmu/ebb/fork_cleanup_test.c event_leader_ebb_init(&event); event 51 tools/testing/selftests/powerpc/pmu/ebb/fork_cleanup_test.c FAIL_IF(event_open(&event)); event 57 tools/testing/selftests/powerpc/pmu/ebb/fork_cleanup_test.c FAIL_IF(ebb_event_enable(&event)); event 72 tools/testing/selftests/powerpc/pmu/ebb/fork_cleanup_test.c event_close(&event); event 25 tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c static int do_count_loop(struct event *event, uint64_t instructions, event 45 tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c event->result.value = ebb_state.stats.pmc_count[4-1]; event 47 tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c difference = event->result.value - expected; event 48 tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c percentage = (double)difference / event->result.value * 100; event 53 tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c printf("Actual %llu\n", event->result.value); event 63 tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c if (difference / event->result.value) event 70 tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c static uint64_t determine_overhead(struct event *event) event 75 tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c do_count_loop(event, 0, 0, false); event 76 tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c overhead = event->result.value; event 79 tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c do_count_loop(event, 0, 0, false); event 80 tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c current = event->result.value; event 111 tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c struct event event; event 116 tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c event_init_named(&event, 0x400FA, "PM_RUN_INST_CMPL"); event 117 tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c event_leader_ebb_init(&event); event 118 tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c event.attr.exclude_kernel = 1; event 119 tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c event.attr.exclude_hv = 1; event 120 tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c event.attr.exclude_idle = 1; event 122 tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c FAIL_IF(event_open(&event)); event 123 tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c FAIL_IF(ebb_event_enable(&event)); event 131 tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c overhead = determine_overhead(&event); event 135 tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c FAIL_IF(do_count_loop(&event, 0x100000, overhead, true)); event 138 tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c FAIL_IF(do_count_loop(&event, 0xa00000, overhead, true)); event 141 tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c FAIL_IF(do_count_loop(&event, 0x6400000, overhead, true)); event 144 tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c FAIL_IF(do_count_loop(&event, 0x40000000, overhead, true)); event 147 tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c FAIL_IF(do_count_loop(&event, 0x400000000, overhead, true)); event 150 tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c FAIL_IF(do_count_loop(&event, 0x1000000000, overhead, true)); event 153 tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c FAIL_IF(do_count_loop(&event, 0x2000000000, overhead, true)); event 156 tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c event_close(&event); event 24 tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c struct event event; event 29 tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c event_init_named(&event, 0x40002, "instructions"); event 30 tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c event_leader_ebb_init(&event); event 32 tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c event.attr.exclude_kernel = 1; event 33 tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c event.attr.exclude_hv = 1; event 34 tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c event.attr.exclude_idle = 1; event 36 tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c FAIL_IF(event_open(&event)); event 41 tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c FAIL_IF(ebb_event_enable(&event)); event 84 tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c event_close(&event); event 18 tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c struct event events[6]; event 32 tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c struct event event; event 39 tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c event_init_named(&event, 0x1001e, "cycles"); event 40 tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c event_leader_ebb_init(&event); event 42 tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c event.attr.exclude_kernel = 1; event 43 tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c event.attr.exclude_hv = 1; event 44 tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c event.attr.exclude_idle = 1; event 46 tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c FAIL_IF(event_open(&event)); event 52 tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c FAIL_IF(ebb_event_enable(&event)); event 68 tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c event_close(&event); event 18 tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c struct event event; event 24 tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c event_init_named(&event, 0x1001e, "cycles"); event 25 tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c event_leader_ebb_init(&event); event 27 tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c event.attr.exclude_kernel = 1; event 28 tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c event.attr.exclude_hv = 1; event 29 tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c event.attr.exclude_idle = 1; event 31 tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c FAIL_IF(event_open(&event)); event 32 tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c FAIL_IF(ebb_event_enable(&event)); event 51 tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c event_close(&event); event 59 tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c struct event event; event 63 tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c event_init_named(&event, 0x1001e, "cycles"); event 64 tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c event_leader_ebb_init(&event); event 66 tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c event.attr.exclude_kernel = 1; event 67 tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c event.attr.exclude_hv = 1; event 68 tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c event.attr.exclude_idle = 1; event 70 tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c FAIL_IF(event_open(&event)); event 75 tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c FAIL_IF(ebb_event_enable(&event)); event 92 tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c event_close(&event); event 50 tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c struct event event; event 55 tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c event_init(&event, 0x2001e); event 56 tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c event_leader_ebb_init(&event); event 58 tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c event.attr.exclude_kernel = 1; event 59 tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c event.attr.exclude_hv = 1; event 60 tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c event.attr.exclude_idle = 1; event 62 tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c FAIL_IF(event_open(&event)); event 67 tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c FAIL_IF(ebb_event_enable(&event)); event 85 tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c event_close(&event); event 22 tools/testing/selftests/powerpc/pmu/ebb/task_event_pinned_vs_ebb_test.c static int setup_child_event(struct event *event, pid_t child_pid) event 24 tools/testing/selftests/powerpc/pmu/ebb/task_event_pinned_vs_ebb_test.c event_init_named(event, 0x400FA, "PM_RUN_INST_CMPL"); event 26 tools/testing/selftests/powerpc/pmu/ebb/task_event_pinned_vs_ebb_test.c event->attr.pinned = 1; event 28 tools/testing/selftests/powerpc/pmu/ebb/task_event_pinned_vs_ebb_test.c event->attr.exclude_kernel = 1; event 29 tools/testing/selftests/powerpc/pmu/ebb/task_event_pinned_vs_ebb_test.c event->attr.exclude_hv = 1; event 30 tools/testing/selftests/powerpc/pmu/ebb/task_event_pinned_vs_ebb_test.c event->attr.exclude_idle = 1; event 32 tools/testing/selftests/powerpc/pmu/ebb/task_event_pinned_vs_ebb_test.c FAIL_IF(event_open_with_pid(event, child_pid)); event 33 tools/testing/selftests/powerpc/pmu/ebb/task_event_pinned_vs_ebb_test.c FAIL_IF(event_enable(event)); event 41 tools/testing/selftests/powerpc/pmu/ebb/task_event_pinned_vs_ebb_test.c struct event event; event 57 tools/testing/selftests/powerpc/pmu/ebb/task_event_pinned_vs_ebb_test.c rc = setup_child_event(&event, pid); event 74 tools/testing/selftests/powerpc/pmu/ebb/task_event_pinned_vs_ebb_test.c FAIL_IF(event_disable(&event)); event 75 tools/testing/selftests/powerpc/pmu/ebb/task_event_pinned_vs_ebb_test.c FAIL_IF(event_read(&event)); event 77 tools/testing/selftests/powerpc/pmu/ebb/task_event_pinned_vs_ebb_test.c event_report(&event); event 79 tools/testing/selftests/powerpc/pmu/ebb/task_event_pinned_vs_ebb_test.c FAIL_IF(event.result.value == 0); event 84 tools/testing/selftests/powerpc/pmu/ebb/task_event_pinned_vs_ebb_test.c FAIL_IF(event.result.enabled == 0); event 85 tools/testing/selftests/powerpc/pmu/ebb/task_event_pinned_vs_ebb_test.c FAIL_IF(event.result.running == 0); event 22 tools/testing/selftests/powerpc/pmu/ebb/task_event_vs_ebb_test.c static int setup_child_event(struct event *event, pid_t child_pid) event 24 tools/testing/selftests/powerpc/pmu/ebb/task_event_vs_ebb_test.c event_init_named(event, 0x400FA, "PM_RUN_INST_CMPL"); event 26 tools/testing/selftests/powerpc/pmu/ebb/task_event_vs_ebb_test.c event->attr.exclude_kernel = 1; event 27 tools/testing/selftests/powerpc/pmu/ebb/task_event_vs_ebb_test.c event->attr.exclude_hv = 1; event 28 tools/testing/selftests/powerpc/pmu/ebb/task_event_vs_ebb_test.c event->attr.exclude_idle = 1; event 30 tools/testing/selftests/powerpc/pmu/ebb/task_event_vs_ebb_test.c FAIL_IF(event_open_with_pid(event, child_pid)); event 31 tools/testing/selftests/powerpc/pmu/ebb/task_event_vs_ebb_test.c FAIL_IF(event_enable(event)); event 39 tools/testing/selftests/powerpc/pmu/ebb/task_event_vs_ebb_test.c struct event event; event 55 tools/testing/selftests/powerpc/pmu/ebb/task_event_vs_ebb_test.c rc = setup_child_event(&event, pid); event 72 tools/testing/selftests/powerpc/pmu/ebb/task_event_vs_ebb_test.c FAIL_IF(event_disable(&event)); event 73 tools/testing/selftests/powerpc/pmu/ebb/task_event_vs_ebb_test.c FAIL_IF(event_read(&event)); event 75 tools/testing/selftests/powerpc/pmu/ebb/task_event_vs_ebb_test.c event_report(&event); event 23 tools/testing/selftests/powerpc/pmu/event.c void event_init_opts(struct event *e, u64 config, int type, char *name) event 37 tools/testing/selftests/powerpc/pmu/event.c void event_init_named(struct event *e, u64 config, char *name) event 42 tools/testing/selftests/powerpc/pmu/event.c void event_init(struct event *e, u64 config) event 52 tools/testing/selftests/powerpc/pmu/event.c int event_open_with_options(struct event *e, pid_t pid, int cpu, int group_fd) event 63 tools/testing/selftests/powerpc/pmu/event.c int event_open_with_group(struct event *e, int group_fd) event 68 tools/testing/selftests/powerpc/pmu/event.c int event_open_with_pid(struct event *e, pid_t pid) event 73 tools/testing/selftests/powerpc/pmu/event.c int event_open_with_cpu(struct event *e, int cpu) event 78 tools/testing/selftests/powerpc/pmu/event.c int event_open(struct event *e) event 83 tools/testing/selftests/powerpc/pmu/event.c void event_close(struct event *e) event 88 tools/testing/selftests/powerpc/pmu/event.c int event_enable(struct event *e) event 93 tools/testing/selftests/powerpc/pmu/event.c int event_disable(struct event *e) event 98 tools/testing/selftests/powerpc/pmu/event.c int event_reset(struct event *e) event 103 tools/testing/selftests/powerpc/pmu/event.c int event_read(struct event *e) event 116 tools/testing/selftests/powerpc/pmu/event.c void event_report_justified(struct event *e, int name_width, int result_width) event 128 tools/testing/selftests/powerpc/pmu/event.c void event_report(struct event *e) event 27 tools/testing/selftests/powerpc/pmu/event.h void event_init(struct event *e, u64 config); event 28 tools/testing/selftests/powerpc/pmu/event.h void event_init_named(struct event *e, u64 config, char *name); event 29 tools/testing/selftests/powerpc/pmu/event.h void event_init_opts(struct event *e, u64 config, int type, char *name); event 30 tools/testing/selftests/powerpc/pmu/event.h int event_open_with_options(struct event *e, pid_t pid, int cpu, int group_fd); event 31 tools/testing/selftests/powerpc/pmu/event.h int event_open_with_group(struct event *e, int group_fd); event 32 tools/testing/selftests/powerpc/pmu/event.h int event_open_with_pid(struct event *e, pid_t pid); event 33 tools/testing/selftests/powerpc/pmu/event.h int event_open_with_cpu(struct event *e, int cpu); event 34 tools/testing/selftests/powerpc/pmu/event.h int event_open(struct event *e); event 35 tools/testing/selftests/powerpc/pmu/event.h void event_close(struct event *e); event 36 tools/testing/selftests/powerpc/pmu/event.h int event_enable(struct event *e); event 37 tools/testing/selftests/powerpc/pmu/event.h int event_disable(struct event *e); event 38 tools/testing/selftests/powerpc/pmu/event.h int event_reset(struct event *e); event 39 tools/testing/selftests/powerpc/pmu/event.h int event_read(struct event *e); event 40 tools/testing/selftests/powerpc/pmu/event.h void event_report_justified(struct event *e, int name_width, int result_width); event 41 tools/testing/selftests/powerpc/pmu/event.h void event_report(struct event *e); event 19 tools/testing/selftests/powerpc/pmu/l3_bank_test.c struct event event; event 26 tools/testing/selftests/powerpc/pmu/l3_bank_test.c event_init(&event, 0x84918F); event 28 tools/testing/selftests/powerpc/pmu/l3_bank_test.c FAIL_IF(event_open(&event)); event 33 tools/testing/selftests/powerpc/pmu/l3_bank_test.c event_read(&event); event 34 tools/testing/selftests/powerpc/pmu/l3_bank_test.c event_report(&event); event 36 tools/testing/selftests/powerpc/pmu/l3_bank_test.c FAIL_IF(event.result.running == 0); event 37 tools/testing/selftests/powerpc/pmu/l3_bank_test.c FAIL_IF(event.result.enabled == 0); event 39 tools/testing/selftests/powerpc/pmu/l3_bank_test.c event_close(&event); event 25 tools/testing/selftests/powerpc/pmu/per_event_excludes.c struct event *e, events[4]; event 147 tools/testing/selftests/ptp/testptp.c struct ptp_extts_event event; event 354 tools/testing/selftests/ptp/testptp.c cnt = read(fd, &event, sizeof(event)); event 355 tools/testing/selftests/ptp/testptp.c if (cnt != sizeof(event)) { event 359 tools/testing/selftests/ptp/testptp.c printf("event index %u at %lld.%09u\n", event.index, event 360 tools/testing/selftests/ptp/testptp.c event.t.sec, event.t.nsec); event 475 tools/testing/selftests/vm/userfaultfd.c if (msg->event != UFFD_EVENT_PAGEFAULT) event 477 tools/testing/selftests/vm/userfaultfd.c msg->event), exit(1); event 521 tools/testing/selftests/vm/userfaultfd.c switch (msg.event) { event 524 tools/testing/selftests/vm/userfaultfd.c msg.event), exit(1); event 153 tools/usb/ffs-aio-example/multibuff/device_app/aio_multibuff.c static void display_event(struct usb_functionfs_event *event) event 164 tools/usb/ffs-aio-example/multibuff/device_app/aio_multibuff.c switch (event->type) { event 172 tools/usb/ffs-aio-example/multibuff/device_app/aio_multibuff.c printf("Event %s\n", names[event->type]); event 179 tools/usb/ffs-aio-example/multibuff/device_app/aio_multibuff.c struct usb_functionfs_event event; event 181 tools/usb/ffs-aio-example/multibuff/device_app/aio_multibuff.c ret = read(ep0, &event, sizeof(event)); event 186 tools/usb/ffs-aio-example/multibuff/device_app/aio_multibuff.c display_event(&event); event 187 tools/usb/ffs-aio-example/multibuff/device_app/aio_multibuff.c switch (event.type) { event 189 tools/usb/ffs-aio-example/multibuff/device_app/aio_multibuff.c if (event.u.setup.bRequestType & USB_DIR_IN) event 141 tools/usb/ffs-aio-example/simple/device_app/aio_simple.c static void display_event(struct usb_functionfs_event *event) event 152 tools/usb/ffs-aio-example/simple/device_app/aio_simple.c switch (event->type) { event 160 tools/usb/ffs-aio-example/simple/device_app/aio_simple.c printf("Event %s\n", names[event->type]); event 166 tools/usb/ffs-aio-example/simple/device_app/aio_simple.c struct usb_functionfs_event event; event 176 tools/usb/ffs-aio-example/simple/device_app/aio_simple.c ret = read(ep0, &event, sizeof(event)); event 181 tools/usb/ffs-aio-example/simple/device_app/aio_simple.c display_event(&event); event 182 tools/usb/ffs-aio-example/simple/device_app/aio_simple.c switch (event.type) { event 184 tools/usb/ffs-aio-example/simple/device_app/aio_simple.c if (event.u.setup.bRequestType & USB_DIR_IN) event 600 tools/usb/ffs-test.c const struct usb_functionfs_event *event = buf; event 605 tools/usb/ffs-test.c for (n = nbytes / sizeof *event; n; --n, ++event) event 606 tools/usb/ffs-test.c switch (event->type) { event 614 tools/usb/ffs-test.c printf("Event %s\n", names[event->type]); event 615 tools/usb/ffs-test.c if (event->type == FUNCTIONFS_SETUP) event 616 tools/usb/ffs-test.c handle_setup(&event->u.setup); event 620 tools/usb/ffs-test.c printf("Event %03u (unknown)\n", event->type); event 19 tools/virtio/ringtest/ring.c static inline bool need_event(unsigned short event, event 23 tools/virtio/ringtest/ring.c return (unsigned short)(next - event - 1) < (unsigned short)(next - prev); event 57 tools/virtio/ringtest/ring.c struct event *event; event 87 tools/virtio/ringtest/ring.c event = calloc(1, sizeof(*event)); event 88 tools/virtio/ringtest/ring.c if (!event) { event 180 tools/virtio/ringtest/ring.c event->call_index = guest.last_used_idx; event 194 tools/virtio/ringtest/ring.c need = need_event(event->kick_index, event 213 tools/virtio/ringtest/ring.c event->kick_index = host.used_idx; event 262 tools/virtio/ringtest/ring.c need = need_event(event->call_index, event 568 virt/kvm/arm/pmu.c struct perf_event *event; event 615 virt/kvm/arm/pmu.c event = perf_event_create_kernel_counter(&attr, -1, current, event 625 virt/kvm/arm/pmu.c event = perf_event_create_kernel_counter(&attr, -1, current, event 629 virt/kvm/arm/pmu.c if (IS_ERR(event)) { event 631 virt/kvm/arm/pmu.c PTR_ERR(event)); event 635 virt/kvm/arm/pmu.c pmc->perf_event = event;