num_events 326 arch/arm/kernel/perf_event_v6.c for (idx = 0; idx < cpu_pmu->num_events; ++idx) { num_events 504 arch/arm/kernel/perf_event_v6.c cpu_pmu->num_events = 3; num_events 555 arch/arm/kernel/perf_event_v6.c cpu_pmu->num_events = 3; num_events 657 arch/arm/kernel/perf_event_v7.c (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1) num_events 973 arch/arm/kernel/perf_event_v7.c for (idx = 0; idx < cpu_pmu->num_events; ++idx) { num_events 1052 arch/arm/kernel/perf_event_v7.c for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) { num_events 1096 arch/arm/kernel/perf_event_v7.c u32 idx, nb_cnt = cpu_pmu->num_events, val; num_events 1197 arch/arm/kernel/perf_event_v7.c &arm_pmu->num_events, 1); num_events 1559 arch/arm/kernel/perf_event_v7.c u32 idx, nb_cnt = cpu_pmu->num_events; num_events 1892 arch/arm/kernel/perf_event_v7.c u32 idx, nb_cnt = cpu_pmu->num_events; num_events 173 arch/arm/kernel/perf_event_xscale.c for (idx = 0; idx < cpu_pmu->num_events; ++idx) { num_events 383 arch/arm/kernel/perf_event_xscale.c cpu_pmu->num_events = 3; num_events 519 arch/arm/kernel/perf_event_xscale.c for (idx = 0; idx < cpu_pmu->num_events; ++idx) { num_events 752 arch/arm/kernel/perf_event_xscale.c cpu_pmu->num_events = 5; num_events 345 arch/arm64/kernel/perf_event.c (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1) num_events 718 arch/arm64/kernel/perf_event.c for (idx = 0; idx < cpu_pmu->num_events; ++idx) { num_events 761 arch/arm64/kernel/perf_event.c for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; idx ++) { num_events 777 arch/arm64/kernel/perf_event.c for (idx = ARMV8_IDX_COUNTER0 + 1; idx < cpu_pmu->num_events; idx += 2) { num_events 877 arch/arm64/kernel/perf_event.c u32 idx, nb_cnt = cpu_pmu->num_events; num_events 979 arch/arm64/kernel/perf_event.c cpu_pmu->num_events = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT) num_events 983 arch/arm64/kernel/perf_event.c cpu_pmu->num_events += 1; num_events 82 arch/nds32/include/asm/pmu.h int num_events; num_events 250 arch/nds32/kernel/perf_event_cpu.c for (idx = 0; idx < cpu_pmu->num_events; ++idx) { num_events 289 arch/nds32/kernel/perf_event_cpu.c return ((idx >= 0) && (idx < cpu_pmu->num_events)); num_events 670 arch/nds32/kernel/perf_event_cpu.c cpu_pmu->num_events = nds32_read_num_pfm_events(); num_events 699 arch/nds32/kernel/perf_event_cpu.c nds32_pmu->num_events); num_events 1062 arch/nds32/kernel/perf_event_cpu.c nds32_pmu->name, nds32_pmu->num_events); num_events 1780 arch/powerpc/perf/core-book3s.c static atomic_t num_events; num_events 1789 arch/powerpc/perf/core-book3s.c if (!atomic_add_unless(&num_events, -1, 1)) { num_events 1791 arch/powerpc/perf/core-book3s.c if (atomic_dec_return(&num_events) == 0) num_events 1988 arch/powerpc/perf/core-book3s.c if (!atomic_inc_not_zero(&num_events)) { num_events 1990 arch/powerpc/perf/core-book3s.c if (atomic_read(&num_events) == 0 && num_events 1994 arch/powerpc/perf/core-book3s.c atomic_inc(&num_events); num_events 30 arch/powerpc/perf/core-fsl-emb.c static atomic_t num_events; num_events 222 arch/powerpc/perf/core-fsl-emb.c if (atomic_read(&num_events)) { num_events 444 arch/powerpc/perf/core-fsl-emb.c if (!atomic_add_unless(&num_events, -1, 1)) { num_events 446 arch/powerpc/perf/core-fsl-emb.c if (atomic_dec_return(&num_events) == 0) num_events 569 arch/powerpc/perf/core-fsl-emb.c if (!atomic_inc_not_zero(&num_events)) { num_events 571 arch/powerpc/perf/core-fsl-emb.c if (atomic_read(&num_events) == 0 && num_events 575 arch/powerpc/perf/core-fsl-emb.c atomic_inc(&num_events); num_events 83 arch/powerpc/perf/e500-pmu.c static int num_events = 128; num_events 91 arch/powerpc/perf/e500-pmu.c if (event_low >= num_events) num_events 125 arch/powerpc/perf/e500-pmu.c num_events = 256; num_events 85 arch/powerpc/perf/e6500-pmu.c static int num_events = 512; num_events 91 arch/powerpc/perf/e6500-pmu.c if (event_low >= num_events || num_events 166 arch/s390/kernel/perf_cpum_cf.c static atomic_t num_events = ATOMIC_INIT(0); num_events 173 arch/s390/kernel/perf_cpum_cf.c if (!atomic_add_unless(&num_events, -1, 1)) { num_events 175 arch/s390/kernel/perf_cpum_cf.c if (atomic_dec_return(&num_events) == 0) num_events 275 arch/s390/kernel/perf_cpum_cf.c if (!atomic_inc_not_zero(&num_events)) { num_events 277 arch/s390/kernel/perf_cpum_cf.c if (atomic_read(&num_events) == 0 && __kernel_cpumcf_begin()) num_events 280 arch/s390/kernel/perf_cpum_cf.c atomic_inc(&num_events); num_events 531 arch/s390/kernel/perf_cpum_sf.c static atomic_t num_events; num_events 599 arch/s390/kernel/perf_cpum_sf.c if (!atomic_add_unless(&num_events, -1, 1)) { num_events 601 arch/s390/kernel/perf_cpum_sf.c if (atomic_dec_return(&num_events) == 0) num_events 776 arch/s390/kernel/perf_cpum_sf.c if (!atomic_inc_not_zero(&num_events)) { num_events 778 arch/s390/kernel/perf_cpum_sf.c if (atomic_read(&num_events) == 0 && reserve_pmc_hardware()) num_events 781 arch/s390/kernel/perf_cpum_sf.c atomic_inc(&num_events); num_events 2087 arch/s390/kernel/perf_cpum_sf.c if (!atomic_read(&num_events)) num_events 31 arch/sh/include/asm/hw_breakpoint.h unsigned int num_events; num_events 11 arch/sh/include/asm/perf_event.h unsigned int num_events; num_events 227 arch/sh/kernel/cpu/sh4/perf_event.c for (i = 0; i < sh7750_pmu.num_events; i++) num_events 235 arch/sh/kernel/cpu/sh4/perf_event.c for (i = 0; i < sh7750_pmu.num_events; i++) num_events 241 arch/sh/kernel/cpu/sh4/perf_event.c .num_events = 2, num_events 261 arch/sh/kernel/cpu/sh4a/perf_event.c for (i = 0; i < sh4a_pmu.num_events; i++) num_events 269 arch/sh/kernel/cpu/sh4a/perf_event.c for (i = 0; i < sh4a_pmu.num_events; i++) num_events 275 arch/sh/kernel/cpu/sh4a/perf_event.c .num_events = 2, num_events 48 arch/sh/kernel/cpu/sh4a/ubc.c for (i = 0; i < sh4a_ubc.num_events; i++) num_events 58 arch/sh/kernel/cpu/sh4a/ubc.c for (i = 0; i < sh4a_ubc.num_events; i++) num_events 68 arch/sh/kernel/cpu/sh4a/ubc.c for (i = 0; i < sh4a_ubc.num_events; i++) num_events 87 arch/sh/kernel/cpu/sh4a/ubc.c .num_events = 2, num_events 114 arch/sh/kernel/cpu/sh4a/ubc.c for (i = 0; i < sh4a_ubc.num_events; i++) { num_events 35 arch/sh/kernel/hw_breakpoint.c static struct sh_ubc ubc_dummy = { .num_events = 0 }; num_events 52 arch/sh/kernel/hw_breakpoint.c for (i = 0; i < sh_ubc->num_events; i++) { num_events 61 arch/sh/kernel/hw_breakpoint.c if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot")) num_events 84 arch/sh/kernel/hw_breakpoint.c for (i = 0; i < sh_ubc->num_events; i++) { num_events 93 arch/sh/kernel/hw_breakpoint.c if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot")) num_events 267 arch/sh/kernel/hw_breakpoint.c for (i = 0; i < sh_ubc->num_events; i++) { num_events 297 arch/sh/kernel/hw_breakpoint.c for (i = 0; i < sh_ubc->num_events; i++) { num_events 405 arch/sh/kernel/hw_breakpoint.c WARN_ON(ubc->num_events > HBP_NUM); num_events 39 arch/sh/kernel/perf_event.c static atomic_t num_events; num_events 74 arch/sh/kernel/perf_event.c return sh_pmu->num_events; num_events 83 arch/sh/kernel/perf_event.c if (!atomic_add_unless(&num_events, -1, 1)) { num_events 85 arch/sh/kernel/perf_event.c if (atomic_dec_return(&num_events) == 0) num_events 136 arch/sh/kernel/perf_event.c if (!atomic_inc_not_zero(&num_events)) { num_events 138 arch/sh/kernel/perf_event.c if (atomic_read(&num_events) == 0 && num_events 142 arch/sh/kernel/perf_event.c atomic_inc(&num_events); num_events 272 arch/sh/kernel/perf_event.c idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events); num_events 273 arch/sh/kernel/perf_event.c if (idx == sh_pmu->num_events) num_events 375 arch/sh/kernel/perf_event.c WARN_ON(_pmu->num_events > MAX_HWEVENTS); num_events 36 drivers/devfreq/event/exynos-ppmu.c unsigned int num_events; num_events 519 drivers/devfreq/event/exynos-ppmu.c info->num_events = count; num_events 668 drivers/devfreq/event/exynos-ppmu.c size = sizeof(struct devfreq_event_dev *) * info->num_events; num_events 676 drivers/devfreq/event/exynos-ppmu.c for (i = 0; i < info->num_events; i++) { num_events 420 drivers/dma/imx-sdma.c int num_events; num_events 455 drivers/dma/imx-sdma.c .num_events = 32, num_events 476 drivers/dma/imx-sdma.c .num_events = 48, num_events 482 drivers/dma/imx-sdma.c .num_events = 48, num_events 500 drivers/dma/imx-sdma.c .num_events = 48, num_events 520 drivers/dma/imx-sdma.c .num_events = 48, num_events 539 drivers/dma/imx-sdma.c .num_events = 48, num_events 557 drivers/dma/imx-sdma.c .num_events = 48, num_events 563 drivers/dma/imx-sdma.c .num_events = 48, num_events 1633 drivers/dma/imx-sdma.c if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events) num_events 1639 drivers/dma/imx-sdma.c if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events) num_events 1899 drivers/dma/imx-sdma.c for (i = 0; i < sdma->drvdata->num_events; i++) num_events 283 drivers/dma/pl330.c unsigned int num_events:6; num_events 1663 drivers/dma/pl330.c if (pl330->pcfg.num_events < 32 num_events 1664 drivers/dma/pl330.c && val & ~((1 << pl330->pcfg.num_events) - 1)) { num_events 1672 drivers/dma/pl330.c for (ev = 0; ev < pl330->pcfg.num_events; ev++) { num_events 1735 drivers/dma/pl330.c for (ev = 0; ev < pl330->pcfg.num_events; ev++) num_events 1788 drivers/dma/pl330.c if (ev >= 0 && ev < pl330->pcfg.num_events num_events 1847 drivers/dma/pl330.c pl330->pcfg.num_events = val; num_events 1949 drivers/dma/pl330.c if (pl330->pcfg.num_events == 0) { num_events 1964 drivers/dma/pl330.c for (i = 0; i < pl330->pcfg.num_events; i++) num_events 3172 drivers/dma/pl330.c pcfg->num_peri, pcfg->num_events); num_events 1099 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c err = kfd_wait_on_events(p, args->num_events, num_events 527 drivers/gpu/drm/amd/amdkfd/kfd_events.c static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events) num_events 532 drivers/gpu/drm/amd/amdkfd/kfd_events.c event_waiters = kmalloc_array(num_events, num_events 536 drivers/gpu/drm/amd/amdkfd/kfd_events.c for (i = 0; (event_waiters) && (i < num_events) ; i++) { num_events 581 drivers/gpu/drm/amd/amdkfd/kfd_events.c static uint32_t test_event_condition(bool all, uint32_t num_events, num_events 587 drivers/gpu/drm/amd/amdkfd/kfd_events.c for (i = 0; i < num_events; i++) { num_events 599 drivers/gpu/drm/amd/amdkfd/kfd_events.c return activated_count == num_events ? num_events 607 drivers/gpu/drm/amd/amdkfd/kfd_events.c static int copy_signaled_event_data(uint32_t num_events, num_events 617 drivers/gpu/drm/amd/amdkfd/kfd_events.c for (i = 0; i < num_events; i++) { num_events 653 drivers/gpu/drm/amd/amdkfd/kfd_events.c static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters) num_events 657 drivers/gpu/drm/amd/amdkfd/kfd_events.c for (i = 0; i < num_events; i++) num_events 666 drivers/gpu/drm/amd/amdkfd/kfd_events.c uint32_t num_events, void __user *data, num_events 678 drivers/gpu/drm/amd/amdkfd/kfd_events.c event_waiters = alloc_event_waiters(num_events); num_events 686 drivers/gpu/drm/amd/amdkfd/kfd_events.c for (i = 0; i < num_events; i++) { num_events 702 drivers/gpu/drm/amd/amdkfd/kfd_events.c *wait_result = test_event_condition(all, num_events, event_waiters); num_events 704 drivers/gpu/drm/amd/amdkfd/kfd_events.c ret = copy_signaled_event_data(num_events, num_events 715 drivers/gpu/drm/amd/amdkfd/kfd_events.c for (i = 0; i < num_events; i++) num_events 750 drivers/gpu/drm/amd/amdkfd/kfd_events.c *wait_result = test_event_condition(all, num_events, num_events 766 drivers/gpu/drm/amd/amdkfd/kfd_events.c ret = copy_signaled_event_data(num_events, num_events 771 drivers/gpu/drm/amd/amdkfd/kfd_events.c free_waiters(num_events, event_waiters); num_events 1009 drivers/gpu/drm/amd/amdkfd/kfd_priv.h uint32_t num_events, void __user *data, num_events 182 drivers/iio/accel/mma9553.c int num_events; num_events 225 drivers/iio/accel/mma9553.c data->num_events = MMA9553_EVENTS_INFO_SIZE; num_events 226 drivers/iio/accel/mma9553.c for (i = 0; i < data->num_events; i++) { num_events 239 drivers/iio/accel/mma9553.c for (i = 0; i < data->num_events; i++) num_events 254 drivers/iio/accel/mma9553.c for (i = 0; i < data->num_events; i++) num_events 1848 drivers/infiniband/hw/mlx5/devx.c static bool is_valid_events_legacy(int num_events, u16 *event_type_num_list, num_events 1853 drivers/infiniband/hw/mlx5/devx.c for (i = 0; i < num_events; i++) { num_events 1868 drivers/infiniband/hw/mlx5/devx.c int num_events, u16 *event_type_num_list, num_events 1883 drivers/infiniband/hw/mlx5/devx.c return is_valid_events_legacy(num_events, event_type_num_list, num_events 1887 drivers/infiniband/hw/mlx5/devx.c for (i = 0; i < num_events; i++) { num_events 1933 drivers/infiniband/hw/mlx5/devx.c int num_events; num_events 1979 drivers/infiniband/hw/mlx5/devx.c num_events = uverbs_attr_ptr_get_array_size( num_events 1983 drivers/infiniband/hw/mlx5/devx.c if (num_events < 0) num_events 1984 drivers/infiniband/hw/mlx5/devx.c return num_events; num_events 1986 drivers/infiniband/hw/mlx5/devx.c if (num_events > MAX_NUM_EVENTS) num_events 1992 drivers/infiniband/hw/mlx5/devx.c if (!is_valid_events(dev->mdev, num_events, event_type_num_list, obj)) num_events 2001 drivers/infiniband/hw/mlx5/devx.c for (i = 0; i < num_events; i++) { num_events 404 drivers/net/wireless/intel/iwlwifi/dvm/main.c u32 start_idx, u32 num_events, num_events 430 drivers/net/wireless/intel/iwlwifi/dvm/main.c if (WARN_ON(num_events > capacity - start_idx)) num_events 431 drivers/net/wireless/intel/iwlwifi/dvm/main.c num_events = capacity - start_idx; num_events 437 drivers/net/wireless/intel/iwlwifi/dvm/main.c for (i = 0; i < num_events; i++) { num_events 1697 drivers/net/wireless/intel/iwlwifi/dvm/main.c u32 num_events, u32 mode, num_events 1709 drivers/net/wireless/intel/iwlwifi/dvm/main.c if (num_events == 0) num_events 1737 drivers/net/wireless/intel/iwlwifi/dvm/main.c for (i = 0; i < num_events; i++) { num_events 451 drivers/perf/arm_pmu.c int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); num_events 529 drivers/perf/arm_pmu.c max_events = __oprofile_cpu_pmu->num_events; num_events 666 drivers/perf/arm_pmu.c for (idx = 0; idx < armpmu->num_events; idx++) { num_events 705 drivers/perf/arm_pmu.c int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); num_events 874 drivers/perf/arm_pmu.c pmu->name, pmu->num_events); num_events 3294 drivers/scsi/smartpqi/smartpqi_init.c unsigned int num_events; num_events 3303 drivers/scsi/smartpqi/smartpqi_init.c num_events = 0; num_events 3311 drivers/scsi/smartpqi/smartpqi_init.c num_events++; num_events 3333 drivers/scsi/smartpqi/smartpqi_init.c if (num_events) { num_events 3339 drivers/scsi/smartpqi/smartpqi_init.c return num_events; num_events 99 include/linux/perf/arm_pmu.h int num_events; num_events 72 include/net/bluetooth/mgmt.h __le16 num_events; num_events 195 include/sound/soc-topology.h const struct snd_soc_tplg_widget_events *events, int num_events, num_events 290 include/uapi/linux/kfd_ioctl.h __u32 num_events; /* to KFD */ num_events 304 net/bluetooth/mgmt.c u16 num_commands, num_events; num_events 312 net/bluetooth/mgmt.c num_events = ARRAY_SIZE(mgmt_events); num_events 315 net/bluetooth/mgmt.c num_events = ARRAY_SIZE(mgmt_untrusted_events); num_events 318 net/bluetooth/mgmt.c rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16)); num_events 325 net/bluetooth/mgmt.c rp->num_events = cpu_to_le16(num_events); num_events 333 net/bluetooth/mgmt.c for (i = 0; i < num_events; i++, opcode++) num_events 341 net/bluetooth/mgmt.c for (i = 0; i < num_events; i++, opcode++) num_events 662 sound/soc/soc-topology.c int num_events, u16 event_type) num_events 668 sound/soc/soc-topology.c for (i = 0; i < num_events; i++) { num_events 63 tools/perf/util/intel-bts.c unsigned long num_events; num_events 281 tools/perf/util/intel-bts.c bts->num_events++ <= bts->synth_opts.initial_skip) num_events 120 tools/perf/util/intel-pt.c unsigned long num_events; num_events 1188 tools/perf/util/intel-pt.c pt->num_events++ < pt->synth_opts.initial_skip; num_events 1199 tools/perf/util/intel-pt.c pt->num_events + 4 < pt->synth_opts.initial_skip;