cpuc              391 arch/alpha/kernel/perf_event.c static void maybe_change_configuration(struct cpu_hw_events *cpuc)
cpuc              395 arch/alpha/kernel/perf_event.c 	if (cpuc->n_added == 0)
cpuc              399 arch/alpha/kernel/perf_event.c 	for (j = 0; j < cpuc->n_events; j++) {
cpuc              400 arch/alpha/kernel/perf_event.c 		struct perf_event *pe = cpuc->event[j];
cpuc              402 arch/alpha/kernel/perf_event.c 		if (cpuc->current_idx[j] != PMC_NO_INDEX &&
cpuc              403 arch/alpha/kernel/perf_event.c 			cpuc->current_idx[j] != pe->hw.idx) {
cpuc              404 arch/alpha/kernel/perf_event.c 			alpha_perf_event_update(pe, &pe->hw, cpuc->current_idx[j], 0);
cpuc              405 arch/alpha/kernel/perf_event.c 			cpuc->current_idx[j] = PMC_NO_INDEX;
cpuc              410 arch/alpha/kernel/perf_event.c 	cpuc->idx_mask = 0;
cpuc              411 arch/alpha/kernel/perf_event.c 	for (j = 0; j < cpuc->n_events; j++) {
cpuc              412 arch/alpha/kernel/perf_event.c 		struct perf_event *pe = cpuc->event[j];
cpuc              416 arch/alpha/kernel/perf_event.c 		if (cpuc->current_idx[j] == PMC_NO_INDEX) {
cpuc              418 arch/alpha/kernel/perf_event.c 			cpuc->current_idx[j] = idx;
cpuc              422 arch/alpha/kernel/perf_event.c 			cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
cpuc              424 arch/alpha/kernel/perf_event.c 	cpuc->config = cpuc->event[0]->hw.config_base;
cpuc              435 arch/alpha/kernel/perf_event.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              456 arch/alpha/kernel/perf_event.c 	n0 = cpuc->n_events;
cpuc              458 arch/alpha/kernel/perf_event.c 		cpuc->event[n0] = event;
cpuc              459 arch/alpha/kernel/perf_event.c 		cpuc->evtype[n0] = event->hw.event_base;
cpuc              460 arch/alpha/kernel/perf_event.c 		cpuc->current_idx[n0] = PMC_NO_INDEX;
cpuc              462 arch/alpha/kernel/perf_event.c 		if (!alpha_check_constraints(cpuc->event, cpuc->evtype, n0+1)) {
cpuc              463 arch/alpha/kernel/perf_event.c 			cpuc->n_events++;
cpuc              464 arch/alpha/kernel/perf_event.c 			cpuc->n_added++;
cpuc              487 arch/alpha/kernel/perf_event.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              495 arch/alpha/kernel/perf_event.c 	for (j = 0; j < cpuc->n_events; j++) {
cpuc              496 arch/alpha/kernel/perf_event.c 		if (event == cpuc->event[j]) {
cpuc              497 arch/alpha/kernel/perf_event.c 			int idx = cpuc->current_idx[j];
cpuc              502 arch/alpha/kernel/perf_event.c 			while (++j < cpuc->n_events) {
cpuc              503 arch/alpha/kernel/perf_event.c 				cpuc->event[j - 1] = cpuc->event[j];
cpuc              504 arch/alpha/kernel/perf_event.c 				cpuc->evtype[j - 1] = cpuc->evtype[j];
cpuc              505 arch/alpha/kernel/perf_event.c 				cpuc->current_idx[j - 1] =
cpuc              506 arch/alpha/kernel/perf_event.c 					cpuc->current_idx[j];
cpuc              513 arch/alpha/kernel/perf_event.c 			cpuc->idx_mask &= ~(1UL<<idx);
cpuc              514 arch/alpha/kernel/perf_event.c 			cpuc->n_events--;
cpuc              535 arch/alpha/kernel/perf_event.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              538 arch/alpha/kernel/perf_event.c 		cpuc->idx_mask &= ~(1UL<<hwc->idx);
cpuc              547 arch/alpha/kernel/perf_event.c 	if (cpuc->enabled)
cpuc              555 arch/alpha/kernel/perf_event.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              567 arch/alpha/kernel/perf_event.c 	cpuc->idx_mask |= 1UL<<hwc->idx;
cpuc              568 arch/alpha/kernel/perf_event.c 	if (cpuc->enabled)
cpuc              722 arch/alpha/kernel/perf_event.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              724 arch/alpha/kernel/perf_event.c 	if (cpuc->enabled)
cpuc              727 arch/alpha/kernel/perf_event.c 	cpuc->enabled = 1;
cpuc              730 arch/alpha/kernel/perf_event.c 	if (cpuc->n_events > 0) {
cpuc              732 arch/alpha/kernel/perf_event.c 		maybe_change_configuration(cpuc);
cpuc              736 arch/alpha/kernel/perf_event.c 		wrperfmon(PERFMON_CMD_DESIRED_EVENTS, cpuc->config);
cpuc              737 arch/alpha/kernel/perf_event.c 		wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
cpuc              748 arch/alpha/kernel/perf_event.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              750 arch/alpha/kernel/perf_event.c 	if (!cpuc->enabled)
cpuc              753 arch/alpha/kernel/perf_event.c 	cpuc->enabled = 0;
cpuc              754 arch/alpha/kernel/perf_event.c 	cpuc->n_added = 0;
cpuc              756 arch/alpha/kernel/perf_event.c 	wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
cpuc              807 arch/alpha/kernel/perf_event.c 	struct cpu_hw_events *cpuc;
cpuc              814 arch/alpha/kernel/perf_event.c 	cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              821 arch/alpha/kernel/perf_event.c 	wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
cpuc              828 arch/alpha/kernel/perf_event.c 		wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
cpuc              834 arch/alpha/kernel/perf_event.c 	for (j = 0; j < cpuc->n_events; j++) {
cpuc              835 arch/alpha/kernel/perf_event.c 		if (cpuc->current_idx[j] == idx)
cpuc              839 arch/alpha/kernel/perf_event.c 	if (unlikely(j == cpuc->n_events)) {
cpuc              841 arch/alpha/kernel/perf_event.c 		wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
cpuc              845 arch/alpha/kernel/perf_event.c 	event = cpuc->event[j];
cpuc              851 arch/alpha/kernel/perf_event.c 		wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
cpuc              867 arch/alpha/kernel/perf_event.c 	wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
cpuc              310 arch/arm/kernel/perf_event_v6.c 	struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
cpuc              327 arch/arm/kernel/perf_event_v6.c 		struct perf_event *event = cpuc->events[idx];
cpuc              388 arch/arm/kernel/perf_event_v6.c armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
cpuc              394 arch/arm/kernel/perf_event_v6.c 		if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask))
cpuc              403 arch/arm/kernel/perf_event_v6.c 		if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask))
cpuc              406 arch/arm/kernel/perf_event_v6.c 		if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask))
cpuc              414 arch/arm/kernel/perf_event_v6.c static void armv6pmu_clear_event_idx(struct pmu_hw_events *cpuc,
cpuc              417 arch/arm/kernel/perf_event_v6.c 	clear_bit(event->hw.idx, cpuc->used_mask);
cpuc              953 arch/arm/kernel/perf_event_v7.c 	struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
cpuc              974 arch/arm/kernel/perf_event_v7.c 		struct perf_event *event = cpuc->events[idx];
cpuc             1032 arch/arm/kernel/perf_event_v7.c static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
cpuc             1042 arch/arm/kernel/perf_event_v7.c 		if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask))
cpuc             1053 arch/arm/kernel/perf_event_v7.c 		if (!test_and_set_bit(idx, cpuc->used_mask))
cpuc             1061 arch/arm/kernel/perf_event_v7.c static void armv7pmu_clear_event_idx(struct pmu_hw_events *cpuc,
cpuc             1064 arch/arm/kernel/perf_event_v7.c 	clear_bit(event->hw.idx, cpuc->used_mask);
cpuc             1606 arch/arm/kernel/perf_event_v7.c static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc,
cpuc             1626 arch/arm/kernel/perf_event_v7.c 		if (test_and_set_bit(bit, cpuc->used_mask))
cpuc             1630 arch/arm/kernel/perf_event_v7.c 	idx = armv7pmu_get_event_idx(cpuc, event);
cpuc             1632 arch/arm/kernel/perf_event_v7.c 		clear_bit(bit, cpuc->used_mask);
cpuc             1637 arch/arm/kernel/perf_event_v7.c static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
cpuc             1647 arch/arm/kernel/perf_event_v7.c 	armv7pmu_clear_event_idx(cpuc, event);
cpuc             1650 arch/arm/kernel/perf_event_v7.c 		clear_bit(bit, cpuc->used_mask);
cpuc             1939 arch/arm/kernel/perf_event_v7.c static int scorpion_pmu_get_event_idx(struct pmu_hw_events *cpuc,
cpuc             1956 arch/arm/kernel/perf_event_v7.c 		if (test_and_set_bit(bit, cpuc->used_mask))
cpuc             1960 arch/arm/kernel/perf_event_v7.c 	idx = armv7pmu_get_event_idx(cpuc, event);
cpuc             1962 arch/arm/kernel/perf_event_v7.c 		clear_bit(bit, cpuc->used_mask);
cpuc             1967 arch/arm/kernel/perf_event_v7.c static void scorpion_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
cpuc             1977 arch/arm/kernel/perf_event_v7.c 	armv7pmu_clear_event_idx(cpuc, event);
cpuc             1980 arch/arm/kernel/perf_event_v7.c 		clear_bit(bit, cpuc->used_mask);
cpuc              149 arch/arm/kernel/perf_event_xscale.c 	struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
cpuc              174 arch/arm/kernel/perf_event_xscale.c 		struct perf_event *event = cpuc->events[idx];
cpuc              275 arch/arm/kernel/perf_event_xscale.c xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
cpuc              280 arch/arm/kernel/perf_event_xscale.c 		if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask))
cpuc              285 arch/arm/kernel/perf_event_xscale.c 		if (!test_and_set_bit(XSCALE_COUNTER1, cpuc->used_mask))
cpuc              288 arch/arm/kernel/perf_event_xscale.c 		if (!test_and_set_bit(XSCALE_COUNTER0, cpuc->used_mask))
cpuc              295 arch/arm/kernel/perf_event_xscale.c static void xscalepmu_clear_event_idx(struct pmu_hw_events *cpuc,
cpuc              298 arch/arm/kernel/perf_event_xscale.c 	clear_bit(event->hw.idx, cpuc->used_mask);
cpuc              501 arch/arm/kernel/perf_event_xscale.c 	struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
cpuc              520 arch/arm/kernel/perf_event_xscale.c 		struct perf_event *event = cpuc->events[idx];
cpuc              649 arch/arm/kernel/perf_event_xscale.c xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc,
cpuc              652 arch/arm/kernel/perf_event_xscale.c 	int idx = xscale1pmu_get_event_idx(cpuc, event);
cpuc              656 arch/arm/kernel/perf_event_xscale.c 	if (!test_and_set_bit(XSCALE_COUNTER3, cpuc->used_mask))
cpuc              658 arch/arm/kernel/perf_event_xscale.c 	else if (!test_and_set_bit(XSCALE_COUNTER2, cpuc->used_mask))
cpuc              693 arch/arm64/kernel/perf_event.c 	struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
cpuc              719 arch/arm64/kernel/perf_event.c 		struct perf_event *event = cpuc->events[idx];
cpuc              756 arch/arm64/kernel/perf_event.c static int armv8pmu_get_single_idx(struct pmu_hw_events *cpuc,
cpuc              762 arch/arm64/kernel/perf_event.c 		if (!test_and_set_bit(idx, cpuc->used_mask))
cpuc              768 arch/arm64/kernel/perf_event.c static int armv8pmu_get_chain_idx(struct pmu_hw_events *cpuc,
cpuc              778 arch/arm64/kernel/perf_event.c 		if (!test_and_set_bit(idx, cpuc->used_mask)) {
cpuc              780 arch/arm64/kernel/perf_event.c 			if (!test_and_set_bit(idx - 1, cpuc->used_mask))
cpuc              783 arch/arm64/kernel/perf_event.c 			clear_bit(idx, cpuc->used_mask);
cpuc              789 arch/arm64/kernel/perf_event.c static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
cpuc              798 arch/arm64/kernel/perf_event.c 		if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
cpuc              806 arch/arm64/kernel/perf_event.c 		return	armv8pmu_get_chain_idx(cpuc, cpu_pmu);
cpuc              808 arch/arm64/kernel/perf_event.c 		return armv8pmu_get_single_idx(cpuc, cpu_pmu);
cpuc              811 arch/arm64/kernel/perf_event.c static void armv8pmu_clear_event_idx(struct pmu_hw_events *cpuc,
cpuc              816 arch/arm64/kernel/perf_event.c 	clear_bit(idx, cpuc->used_mask);
cpuc              818 arch/arm64/kernel/perf_event.c 		clear_bit(idx - 1, cpuc->used_mask);
cpuc             1106 arch/csky/kernel/perf_event.c 	struct pmu_hw_events *cpuc = this_cpu_ptr(csky_pmu.hw_events);
cpuc             1124 arch/csky/kernel/perf_event.c 		struct perf_event *event = cpuc->events[idx];
cpuc              285 arch/mips/kernel/perf_event_mipsxx.c static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
cpuc              308 arch/mips/kernel/perf_event_mipsxx.c 			!test_and_set_bit(i, cpuc->used_mask))
cpuc              318 arch/mips/kernel/perf_event_mipsxx.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              323 arch/mips/kernel/perf_event_mipsxx.c 	cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
cpuc              330 arch/mips/kernel/perf_event_mipsxx.c 		cpuc->saved_ctrl[idx] |=
cpuc              335 arch/mips/kernel/perf_event_mipsxx.c 		cpuc->saved_ctrl[idx] |= M_TC_EN_ALL;
cpuc              348 arch/mips/kernel/perf_event_mipsxx.c 		cpuc->saved_ctrl[idx] |= ctrl;
cpuc              358 arch/mips/kernel/perf_event_mipsxx.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              364 arch/mips/kernel/perf_event_mipsxx.c 	cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
cpuc              366 arch/mips/kernel/perf_event_mipsxx.c 	mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
cpuc              458 arch/mips/kernel/perf_event_mipsxx.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              466 arch/mips/kernel/perf_event_mipsxx.c 	idx = mipsxx_pmu_alloc_counter(cpuc, hwc);
cpuc              478 arch/mips/kernel/perf_event_mipsxx.c 	cpuc->events[idx] = event;
cpuc              494 arch/mips/kernel/perf_event_mipsxx.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              501 arch/mips/kernel/perf_event_mipsxx.c 	cpuc->events[idx] = NULL;
cpuc              502 arch/mips/kernel/perf_event_mipsxx.c 	clear_bit(idx, cpuc->used_mask);
cpuc              733 arch/mips/kernel/perf_event_mipsxx.c static void handle_associated_event(struct cpu_hw_events *cpuc,
cpuc              737 arch/mips/kernel/perf_event_mipsxx.c 	struct perf_event *event = cpuc->events[idx];
cpuc             1356 arch/mips/kernel/perf_event_mipsxx.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1363 arch/mips/kernel/perf_event_mipsxx.c 		cpuc->saved_ctrl[ctr] = mipsxx_pmu_read_control(ctr);
cpuc             1364 arch/mips/kernel/perf_event_mipsxx.c 		mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] &
cpuc             1372 arch/mips/kernel/perf_event_mipsxx.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1377 arch/mips/kernel/perf_event_mipsxx.c 		mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]);
cpuc             1383 arch/mips/kernel/perf_event_mipsxx.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1409 arch/mips/kernel/perf_event_mipsxx.c 		if (!test_bit(n, cpuc->used_mask))
cpuc             1416 arch/mips/kernel/perf_event_mipsxx.c 		handle_associated_event(cpuc, n, &data, regs);
cpuc              230 arch/nds32/kernel/perf_event_cpu.c 	struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
cpuc              251 arch/nds32/kernel/perf_event_cpu.c 		struct perf_event *event = cpuc->events[idx];
cpuc              551 arch/nds32/kernel/perf_event_cpu.c static int nds32_pmu_get_event_idx(struct pmu_hw_events *cpuc,
cpuc              574 arch/nds32/kernel/perf_event_cpu.c 		if (!test_and_set_bit(idx, cpuc->used_mask))
cpuc              576 arch/nds32/kernel/perf_event_cpu.c 		if (!test_and_set_bit(NDS32_IDX_COUNTER0, cpuc->used_mask))
cpuc              578 arch/nds32/kernel/perf_event_cpu.c 		if (!test_and_set_bit(NDS32_IDX_COUNTER1, cpuc->used_mask))
cpuc              581 arch/nds32/kernel/perf_event_cpu.c 		if (!test_and_set_bit(idx, cpuc->used_mask))
cpuc              583 arch/nds32/kernel/perf_event_cpu.c 		else if (!test_and_set_bit(NDS32_IDX_COUNTER1, cpuc->used_mask))
cpuc              586 arch/nds32/kernel/perf_event_cpu.c 			 (NDS32_IDX_CYCLE_COUNTER, cpuc->used_mask))
cpuc              589 arch/nds32/kernel/perf_event_cpu.c 		if (!test_and_set_bit(idx, cpuc->used_mask))
cpuc              299 arch/riscv/kernel/perf_event.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              302 arch/riscv/kernel/perf_event.c 	if (cpuc->n_events == riscv_pmu->num_counters)
cpuc              314 arch/riscv/kernel/perf_event.c 	cpuc->events[hwc->idx] = event;
cpuc              315 arch/riscv/kernel/perf_event.c 	cpuc->n_events++;
cpuc              330 arch/riscv/kernel/perf_event.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              333 arch/riscv/kernel/perf_event.c 	cpuc->events[hwc->idx] = NULL;
cpuc              334 arch/riscv/kernel/perf_event.c 	cpuc->n_events--;
cpuc              219 arch/sh/kernel/perf_event.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              225 arch/sh/kernel/perf_event.c 		cpuc->events[idx] = NULL;
cpuc              237 arch/sh/kernel/perf_event.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              247 arch/sh/kernel/perf_event.c 	cpuc->events[idx] = event;
cpuc              254 arch/sh/kernel/perf_event.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              257 arch/sh/kernel/perf_event.c 	__clear_bit(event->hw.idx, cpuc->used_mask);
cpuc              264 arch/sh/kernel/perf_event.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              271 arch/sh/kernel/perf_event.c 	if (__test_and_set_bit(idx, cpuc->used_mask)) {
cpuc              272 arch/sh/kernel/perf_event.c 		idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events);
cpuc              276 arch/sh/kernel/perf_event.c 		__set_bit(idx, cpuc->used_mask);
cpuc              827 arch/sparc/kernel/perf_event.c static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
cpuc              835 arch/sparc/kernel/perf_event.c 	enc = perf_event_get_enc(cpuc->events[idx]);
cpuc              837 arch/sparc/kernel/perf_event.c 	val = cpuc->pcr[pcr_index];
cpuc              840 arch/sparc/kernel/perf_event.c 	cpuc->pcr[pcr_index] = val;
cpuc              842 arch/sparc/kernel/perf_event.c 	pcr_ops->write_pcr(pcr_index, cpuc->pcr[pcr_index]);
cpuc              845 arch/sparc/kernel/perf_event.c static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
cpuc              855 arch/sparc/kernel/perf_event.c 	val = cpuc->pcr[pcr_index];
cpuc              858 arch/sparc/kernel/perf_event.c 	cpuc->pcr[pcr_index] = val;
cpuc              860 arch/sparc/kernel/perf_event.c 	pcr_ops->write_pcr(pcr_index, cpuc->pcr[pcr_index]);
cpuc              923 arch/sparc/kernel/perf_event.c static void read_in_all_counters(struct cpu_hw_events *cpuc)
cpuc              927 arch/sparc/kernel/perf_event.c 	for (i = 0; i < cpuc->n_events; i++) {
cpuc              928 arch/sparc/kernel/perf_event.c 		struct perf_event *cp = cpuc->event[i];
cpuc              930 arch/sparc/kernel/perf_event.c 		if (cpuc->current_idx[i] != PIC_NO_INDEX &&
cpuc              931 arch/sparc/kernel/perf_event.c 		    cpuc->current_idx[i] != cp->hw.idx) {
cpuc              933 arch/sparc/kernel/perf_event.c 						cpuc->current_idx[i]);
cpuc              934 arch/sparc/kernel/perf_event.c 			cpuc->current_idx[i] = PIC_NO_INDEX;
cpuc              947 arch/sparc/kernel/perf_event.c static void calculate_single_pcr(struct cpu_hw_events *cpuc)
cpuc              951 arch/sparc/kernel/perf_event.c 	if (!cpuc->n_added)
cpuc              955 arch/sparc/kernel/perf_event.c 	for (i = 0; i < cpuc->n_events; i++) {
cpuc              956 arch/sparc/kernel/perf_event.c 		struct perf_event *cp = cpuc->event[i];
cpuc              961 arch/sparc/kernel/perf_event.c 		if (cpuc->current_idx[i] != PIC_NO_INDEX)
cpuc              965 arch/sparc/kernel/perf_event.c 		cpuc->current_idx[i] = idx;
cpuc              967 arch/sparc/kernel/perf_event.c 		enc = perf_event_get_enc(cpuc->events[i]);
cpuc              968 arch/sparc/kernel/perf_event.c 		cpuc->pcr[0] &= ~mask_for_index(idx);
cpuc              970 arch/sparc/kernel/perf_event.c 			cpuc->pcr[0] |= nop_for_index(idx);
cpuc              972 arch/sparc/kernel/perf_event.c 			cpuc->pcr[0] |= event_encoding(enc, idx);
cpuc              977 arch/sparc/kernel/perf_event.c 	cpuc->pcr[0] |= cpuc->event[0]->hw.config_base;
cpuc              983 arch/sparc/kernel/perf_event.c static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
cpuc              987 arch/sparc/kernel/perf_event.c 	if (!cpuc->n_added)
cpuc              990 arch/sparc/kernel/perf_event.c 	for (i = 0; i < cpuc->n_events; i++) {
cpuc              991 arch/sparc/kernel/perf_event.c 		struct perf_event *cp = cpuc->event[i];
cpuc              995 arch/sparc/kernel/perf_event.c 		if (cpuc->current_idx[i] != PIC_NO_INDEX)
cpuc              998 arch/sparc/kernel/perf_event.c 		cpuc->current_idx[i] = idx;
cpuc             1006 arch/sparc/kernel/perf_event.c 	for (i = 0; i < cpuc->n_events; i++) {
cpuc             1007 arch/sparc/kernel/perf_event.c 		struct perf_event *cp = cpuc->event[i];
cpuc             1010 arch/sparc/kernel/perf_event.c 		cpuc->pcr[idx] |= cp->hw.config_base;
cpuc             1017 arch/sparc/kernel/perf_event.c static void update_pcrs_for_enable(struct cpu_hw_events *cpuc)
cpuc             1019 arch/sparc/kernel/perf_event.c 	if (cpuc->n_added)
cpuc             1020 arch/sparc/kernel/perf_event.c 		read_in_all_counters(cpuc);
cpuc             1023 arch/sparc/kernel/perf_event.c 		calculate_single_pcr(cpuc);
cpuc             1025 arch/sparc/kernel/perf_event.c 		calculate_multiple_pcrs(cpuc);
cpuc             1031 arch/sparc/kernel/perf_event.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1034 arch/sparc/kernel/perf_event.c 	if (cpuc->enabled)
cpuc             1037 arch/sparc/kernel/perf_event.c 	cpuc->enabled = 1;
cpuc             1040 arch/sparc/kernel/perf_event.c 	if (cpuc->n_events)
cpuc             1041 arch/sparc/kernel/perf_event.c 		update_pcrs_for_enable(cpuc);
cpuc             1044 arch/sparc/kernel/perf_event.c 		pcr_ops->write_pcr(i, cpuc->pcr[i]);
cpuc             1049 arch/sparc/kernel/perf_event.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1052 arch/sparc/kernel/perf_event.c 	if (!cpuc->enabled)
cpuc             1055 arch/sparc/kernel/perf_event.c 	cpuc->enabled = 0;
cpuc             1056 arch/sparc/kernel/perf_event.c 	cpuc->n_added = 0;
cpuc             1059 arch/sparc/kernel/perf_event.c 		u64 val = cpuc->pcr[i];
cpuc             1063 arch/sparc/kernel/perf_event.c 		cpuc->pcr[i] = val;
cpuc             1064 arch/sparc/kernel/perf_event.c 		pcr_ops->write_pcr(i, cpuc->pcr[i]);
cpuc             1068 arch/sparc/kernel/perf_event.c static int active_event_index(struct cpu_hw_events *cpuc,
cpuc             1073 arch/sparc/kernel/perf_event.c 	for (i = 0; i < cpuc->n_events; i++) {
cpuc             1074 arch/sparc/kernel/perf_event.c 		if (cpuc->event[i] == event)
cpuc             1077 arch/sparc/kernel/perf_event.c 	BUG_ON(i == cpuc->n_events);
cpuc             1078 arch/sparc/kernel/perf_event.c 	return cpuc->current_idx[i];
cpuc             1083 arch/sparc/kernel/perf_event.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1084 arch/sparc/kernel/perf_event.c 	int idx = active_event_index(cpuc, event);
cpuc             1093 arch/sparc/kernel/perf_event.c 	sparc_pmu_enable_event(cpuc, &event->hw, idx);
cpuc             1100 arch/sparc/kernel/perf_event.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1101 arch/sparc/kernel/perf_event.c 	int idx = active_event_index(cpuc, event);
cpuc             1104 arch/sparc/kernel/perf_event.c 		sparc_pmu_disable_event(cpuc, &event->hw, idx);
cpuc             1116 arch/sparc/kernel/perf_event.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1122 arch/sparc/kernel/perf_event.c 	for (i = 0; i < cpuc->n_events; i++) {
cpuc             1123 arch/sparc/kernel/perf_event.c 		if (event == cpuc->event[i]) {
cpuc             1132 arch/sparc/kernel/perf_event.c 			while (++i < cpuc->n_events) {
cpuc             1133 arch/sparc/kernel/perf_event.c 				cpuc->event[i - 1] = cpuc->event[i];
cpuc             1134 arch/sparc/kernel/perf_event.c 				cpuc->events[i - 1] = cpuc->events[i];
cpuc             1135 arch/sparc/kernel/perf_event.c 				cpuc->current_idx[i - 1] =
cpuc             1136 arch/sparc/kernel/perf_event.c 					cpuc->current_idx[i];
cpuc             1141 arch/sparc/kernel/perf_event.c 			cpuc->n_events--;
cpuc             1151 arch/sparc/kernel/perf_event.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1152 arch/sparc/kernel/perf_event.c 	int idx = active_event_index(cpuc, event);
cpuc             1163 arch/sparc/kernel/perf_event.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1168 arch/sparc/kernel/perf_event.c 		cpuc->pcr[i] = pcr_ops->read_pcr(i);
cpuc             1374 arch/sparc/kernel/perf_event.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1380 arch/sparc/kernel/perf_event.c 	n0 = cpuc->n_events;
cpuc             1384 arch/sparc/kernel/perf_event.c 	cpuc->event[n0] = event;
cpuc             1385 arch/sparc/kernel/perf_event.c 	cpuc->events[n0] = event->hw.event_base;
cpuc             1386 arch/sparc/kernel/perf_event.c 	cpuc->current_idx[n0] = PIC_NO_INDEX;
cpuc             1397 arch/sparc/kernel/perf_event.c 	if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
cpuc             1400 arch/sparc/kernel/perf_event.c 	if (check_excludes(cpuc->event, n0, 1))
cpuc             1402 arch/sparc/kernel/perf_event.c 	if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1))
cpuc             1406 arch/sparc/kernel/perf_event.c 	cpuc->n_events++;
cpuc             1407 arch/sparc/kernel/perf_event.c 	cpuc->n_added++;
cpuc             1552 arch/sparc/kernel/perf_event.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1558 arch/sparc/kernel/perf_event.c 	WARN_ON_ONCE(!cpuc->txn_flags);	/* no txn in flight */
cpuc             1560 arch/sparc/kernel/perf_event.c 	if (cpuc->txn_flags & ~PERF_PMU_TXN_ADD) {
cpuc             1561 arch/sparc/kernel/perf_event.c 		cpuc->txn_flags = 0;
cpuc             1565 arch/sparc/kernel/perf_event.c 	n = cpuc->n_events;
cpuc             1566 arch/sparc/kernel/perf_event.c 	if (check_excludes(cpuc->event, 0, n))
cpuc             1568 arch/sparc/kernel/perf_event.c 	if (sparc_check_constraints(cpuc->event, cpuc->events, n))
cpuc             1571 arch/sparc/kernel/perf_event.c 	cpuc->txn_flags = 0;
cpuc             1618 arch/sparc/kernel/perf_event.c 	struct cpu_hw_events *cpuc;
cpuc             1639 arch/sparc/kernel/perf_event.c 	cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1650 arch/sparc/kernel/perf_event.c 		pcr_ops->write_pcr(0, cpuc->pcr[0]);
cpuc             1652 arch/sparc/kernel/perf_event.c 	for (i = 0; i < cpuc->n_events; i++) {
cpuc             1653 arch/sparc/kernel/perf_event.c 		struct perf_event *event = cpuc->event[i];
cpuc             1654 arch/sparc/kernel/perf_event.c 		int idx = cpuc->current_idx[i];
cpuc             1660 arch/sparc/kernel/perf_event.c 			pcr_ops->write_pcr(idx, cpuc->pcr[idx]);
cpuc              347 arch/x86/events/amd/core.c static inline int amd_has_nb(struct cpu_hw_events *cpuc)
cpuc              349 arch/x86/events/amd/core.c 	struct amd_nb *nb = cpuc->amd_nb;
cpuc              375 arch/x86/events/amd/core.c static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc,
cpuc              378 arch/x86/events/amd/core.c 	struct amd_nb *nb = cpuc->amd_nb;
cpuc              432 arch/x86/events/amd/core.c __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
cpuc              436 arch/x86/events/amd/core.c 	struct amd_nb *nb = cpuc->amd_nb;
cpuc              443 arch/x86/events/amd/core.c 	if (cpuc->is_fake)
cpuc              508 arch/x86/events/amd/core.c 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
cpuc              510 arch/x86/events/amd/core.c 	WARN_ON_ONCE(cpuc->amd_nb);
cpuc              515 arch/x86/events/amd/core.c 	cpuc->amd_nb = amd_alloc_nb(cpu);
cpuc              516 arch/x86/events/amd/core.c 	if (!cpuc->amd_nb)
cpuc              524 arch/x86/events/amd/core.c 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
cpuc              525 arch/x86/events/amd/core.c 	void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED];
cpuc              529 arch/x86/events/amd/core.c 	cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
cpuc              543 arch/x86/events/amd/core.c 			*onln = cpuc->amd_nb;
cpuc              544 arch/x86/events/amd/core.c 			cpuc->amd_nb = nb;
cpuc              549 arch/x86/events/amd/core.c 	cpuc->amd_nb->nb_id = nb_id;
cpuc              550 arch/x86/events/amd/core.c 	cpuc->amd_nb->refcnt++;
cpuc              603 arch/x86/events/amd/core.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              623 arch/x86/events/amd/core.c 		if (!test_bit(idx, cpuc->active_mask))
cpuc              667 arch/x86/events/amd/core.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              675 arch/x86/events/amd/core.c 	active = __bitmap_weight(cpuc->active_mask, X86_PMC_IDX_MAX);
cpuc              698 arch/x86/events/amd/core.c amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
cpuc              704 arch/x86/events/amd/core.c 	if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)))
cpuc              707 arch/x86/events/amd/core.c 	return __amd_get_nb_event_constraints(cpuc, event, NULL);
cpuc              710 arch/x86/events/amd/core.c static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
cpuc              713 arch/x86/events/amd/core.c 	if (amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))
cpuc              714 arch/x86/events/amd/core.c 		__amd_put_nb_event_constraints(cpuc, event);
cpuc              807 arch/x86/events/amd/core.c amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, int idx,
cpuc              882 arch/x86/events/amd/core.c amd_get_event_constraints_f17h(struct cpu_hw_events *cpuc, int idx,
cpuc             1019 arch/x86/events/amd/core.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1021 arch/x86/events/amd/core.c 	cpuc->perf_ctr_virt_mask = 0;
cpuc             1031 arch/x86/events/amd/core.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1039 arch/x86/events/amd/core.c 	cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
cpuc              616 arch/x86/events/core.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              622 arch/x86/events/core.c 		if (!test_bit(idx, cpuc->active_mask))
cpuc              647 arch/x86/events/core.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              652 arch/x86/events/core.c 	if (!cpuc->enabled)
cpuc              655 arch/x86/events/core.c 	cpuc->n_added = 0;
cpuc              656 arch/x86/events/core.c 	cpuc->enabled = 0;
cpuc              664 arch/x86/events/core.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              668 arch/x86/events/core.c 		struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
cpuc              670 arch/x86/events/core.c 		if (!test_bit(idx, cpuc->active_mask))
cpuc              871 arch/x86/events/core.c int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
cpuc              888 arch/x86/events/core.c 	n0 = cpuc->n_events;
cpuc              889 arch/x86/events/core.c 	if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
cpuc              890 arch/x86/events/core.c 		n0 -= cpuc->n_txn;
cpuc              893 arch/x86/events/core.c 		x86_pmu.start_scheduling(cpuc);
cpuc              896 arch/x86/events/core.c 		c = cpuc->event_constraint[i];
cpuc              910 arch/x86/events/core.c 			c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]);
cpuc              911 arch/x86/events/core.c 			cpuc->event_constraint[i] = c;
cpuc              922 arch/x86/events/core.c 		hwc = &cpuc->event_list[i]->hw;
cpuc              923 arch/x86/events/core.c 		c = cpuc->event_constraint[i];
cpuc              956 arch/x86/events/core.c 		if (is_ht_workaround_enabled() && !cpuc->is_fake &&
cpuc              957 arch/x86/events/core.c 		    READ_ONCE(cpuc->excl_cntrs->exclusive_present))
cpuc              960 arch/x86/events/core.c 		unsched = perf_assign_events(cpuc->event_constraint, n, wmin,
cpuc              976 arch/x86/events/core.c 			e = cpuc->event_list[i];
cpuc              978 arch/x86/events/core.c 				x86_pmu.commit_scheduling(cpuc, i, assign[i]);
cpuc              982 arch/x86/events/core.c 			e = cpuc->event_list[i];
cpuc              988 arch/x86/events/core.c 				x86_pmu.put_event_constraints(cpuc, e);
cpuc              990 arch/x86/events/core.c 			cpuc->event_constraint[i] = NULL;
cpuc              995 arch/x86/events/core.c 		x86_pmu.stop_scheduling(cpuc);
cpuc             1004 arch/x86/events/core.c static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
cpuc             1012 arch/x86/events/core.c 	n = cpuc->n_events;
cpuc             1013 arch/x86/events/core.c 	if (!cpuc->n_events)
cpuc             1014 arch/x86/events/core.c 		cpuc->pebs_output = 0;
cpuc             1016 arch/x86/events/core.c 	if (!cpuc->is_fake && leader->attr.precise_ip) {
cpuc             1028 arch/x86/events/core.c 		if (cpuc->pebs_output &&
cpuc             1029 arch/x86/events/core.c 		    cpuc->pebs_output != is_pebs_pt(leader) + 1)
cpuc             1032 arch/x86/events/core.c 		cpuc->pebs_output = is_pebs_pt(leader) + 1;
cpuc             1038 arch/x86/events/core.c 		cpuc->event_list[n] = leader;
cpuc             1052 arch/x86/events/core.c 		cpuc->event_list[n] = event;
cpuc             1059 arch/x86/events/core.c 				struct cpu_hw_events *cpuc, int i)
cpuc             1063 arch/x86/events/core.c 	hwc->idx = cpuc->assign[i];
cpuc             1065 arch/x86/events/core.c 	hwc->last_tag = ++cpuc->tags[i];
cpuc             1103 arch/x86/events/core.c 					struct cpu_hw_events *cpuc,
cpuc             1106 arch/x86/events/core.c 	return hwc->idx == cpuc->assign[i] &&
cpuc             1108 arch/x86/events/core.c 		hwc->last_tag == cpuc->tags[i];
cpuc             1115 arch/x86/events/core.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1118 arch/x86/events/core.c 	int i, added = cpuc->n_added;
cpuc             1123 arch/x86/events/core.c 	if (cpuc->enabled)
cpuc             1126 arch/x86/events/core.c 	if (cpuc->n_added) {
cpuc             1127 arch/x86/events/core.c 		int n_running = cpuc->n_events - cpuc->n_added;
cpuc             1135 arch/x86/events/core.c 			event = cpuc->event_list[i];
cpuc             1145 arch/x86/events/core.c 			    match_prev_assignment(hwc, cpuc, i))
cpuc             1161 arch/x86/events/core.c 		for (i = 0; i < cpuc->n_events; i++) {
cpuc             1162 arch/x86/events/core.c 			event = cpuc->event_list[i];
cpuc             1165 arch/x86/events/core.c 			if (!match_prev_assignment(hwc, cpuc, i))
cpuc             1166 arch/x86/events/core.c 				x86_assign_hw_event(event, cpuc, i);
cpuc             1175 arch/x86/events/core.c 		cpuc->n_added = 0;
cpuc             1179 arch/x86/events/core.c 	cpuc->enabled = 1;
cpuc             1269 arch/x86/events/core.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1276 arch/x86/events/core.c 	n0 = cpuc->n_events;
cpuc             1277 arch/x86/events/core.c 	ret = n = collect_events(cpuc, event, false);
cpuc             1293 arch/x86/events/core.c 	if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
cpuc             1296 arch/x86/events/core.c 	ret = x86_pmu.schedule_events(cpuc, n, assign);
cpuc             1303 arch/x86/events/core.c 	memcpy(cpuc->assign, assign, n*sizeof(int));
cpuc             1310 arch/x86/events/core.c 	cpuc->n_events = n;
cpuc             1311 arch/x86/events/core.c 	cpuc->n_added += n - n0;
cpuc             1312 arch/x86/events/core.c 	cpuc->n_txn += n - n0;
cpuc             1329 arch/x86/events/core.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1345 arch/x86/events/core.c 	cpuc->events[idx] = event;
cpuc             1346 arch/x86/events/core.c 	__set_bit(idx, cpuc->active_mask);
cpuc             1347 arch/x86/events/core.c 	__set_bit(idx, cpuc->running);
cpuc             1356 arch/x86/events/core.c 	struct cpu_hw_events *cpuc;
cpuc             1366 arch/x86/events/core.c 	cpuc = &per_cpu(cpu_hw_events, cpu);
cpuc             1388 arch/x86/events/core.c 	pr_info("CPU#%d: active:     %016llx\n", cpu, *(u64 *)cpuc->active_mask);
cpuc             1414 arch/x86/events/core.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1417 arch/x86/events/core.c 	if (test_bit(hwc->idx, cpuc->active_mask)) {
cpuc             1419 arch/x86/events/core.c 		__clear_bit(hwc->idx, cpuc->active_mask);
cpuc             1420 arch/x86/events/core.c 		cpuc->events[hwc->idx] = NULL;
cpuc             1437 arch/x86/events/core.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1448 arch/x86/events/core.c 	if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
cpuc             1456 arch/x86/events/core.c 	for (i = 0; i < cpuc->n_events; i++) {
cpuc             1457 arch/x86/events/core.c 		if (event == cpuc->event_list[i])
cpuc             1461 arch/x86/events/core.c 	if (WARN_ON_ONCE(i == cpuc->n_events)) /* called ->del() without ->add() ? */
cpuc             1465 arch/x86/events/core.c 	if (i >= cpuc->n_events - cpuc->n_added)
cpuc             1466 arch/x86/events/core.c 		--cpuc->n_added;
cpuc             1469 arch/x86/events/core.c 		x86_pmu.put_event_constraints(cpuc, event);
cpuc             1472 arch/x86/events/core.c 	while (++i < cpuc->n_events) {
cpuc             1473 arch/x86/events/core.c 		cpuc->event_list[i-1] = cpuc->event_list[i];
cpuc             1474 arch/x86/events/core.c 		cpuc->event_constraint[i-1] = cpuc->event_constraint[i];
cpuc             1476 arch/x86/events/core.c 	cpuc->event_constraint[i-1] = NULL;
cpuc             1477 arch/x86/events/core.c 	--cpuc->n_events;
cpuc             1494 arch/x86/events/core.c 	struct cpu_hw_events *cpuc;
cpuc             1499 arch/x86/events/core.c 	cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1512 arch/x86/events/core.c 		if (!test_bit(idx, cpuc->active_mask))
cpuc             1515 arch/x86/events/core.c 		event = cpuc->events[idx];
cpuc             1580 arch/x86/events/core.c 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
cpuc             1584 arch/x86/events/core.c 		cpuc->kfree_on_online[i] = NULL;
cpuc             1599 arch/x86/events/core.c 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
cpuc             1603 arch/x86/events/core.c 		kfree(cpuc->kfree_on_online[i]);
cpuc             1604 arch/x86/events/core.c 		cpuc->kfree_on_online[i] = NULL;
cpuc             1897 arch/x86/events/core.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1899 arch/x86/events/core.c 	WARN_ON_ONCE(cpuc->txn_flags);		/* txn already in flight */
cpuc             1901 arch/x86/events/core.c 	cpuc->txn_flags = txn_flags;
cpuc             1917 arch/x86/events/core.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1919 arch/x86/events/core.c 	WARN_ON_ONCE(!cpuc->txn_flags);	/* no txn in flight */
cpuc             1921 arch/x86/events/core.c 	txn_flags = cpuc->txn_flags;
cpuc             1922 arch/x86/events/core.c 	cpuc->txn_flags = 0;
cpuc             1944 arch/x86/events/core.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1948 arch/x86/events/core.c 	WARN_ON_ONCE(!cpuc->txn_flags);	/* no txn in flight */
cpuc             1950 arch/x86/events/core.c 	if (cpuc->txn_flags & ~PERF_PMU_TXN_ADD) {
cpuc             1951 arch/x86/events/core.c 		cpuc->txn_flags = 0;
cpuc             1955 arch/x86/events/core.c 	n = cpuc->n_events;
cpuc             1960 arch/x86/events/core.c 	ret = x86_pmu.schedule_events(cpuc, n, assign);
cpuc             1968 arch/x86/events/core.c 	memcpy(cpuc->assign, assign, n*sizeof(int));
cpuc             1970 arch/x86/events/core.c 	cpuc->txn_flags = 0;
cpuc             1982 arch/x86/events/core.c static void free_fake_cpuc(struct cpu_hw_events *cpuc)
cpuc             1984 arch/x86/events/core.c 	intel_cpuc_finish(cpuc);
cpuc             1985 arch/x86/events/core.c 	kfree(cpuc);
cpuc             1990 arch/x86/events/core.c 	struct cpu_hw_events *cpuc;
cpuc             1993 arch/x86/events/core.c 	cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
cpuc             1994 arch/x86/events/core.c 	if (!cpuc)
cpuc             1996 arch/x86/events/core.c 	cpuc->is_fake = 1;
cpuc             1998 arch/x86/events/core.c 	if (intel_cpuc_prepare(cpuc, cpu))
cpuc             2001 arch/x86/events/core.c 	return cpuc;
cpuc             2003 arch/x86/events/core.c 	free_fake_cpuc(cpuc);
cpuc              256 arch/x86/events/intel/bts.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              267 arch/x86/events/intel/bts.c 	bts->ds_back.bts_buffer_base = cpuc->ds->bts_buffer_base;
cpuc              268 arch/x86/events/intel/bts.c 	bts->ds_back.bts_absolute_maximum = cpuc->ds->bts_absolute_maximum;
cpuc              269 arch/x86/events/intel/bts.c 	bts->ds_back.bts_interrupt_threshold = cpuc->ds->bts_interrupt_threshold;
cpuc              301 arch/x86/events/intel/bts.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              326 arch/x86/events/intel/bts.c 		cpuc->ds->bts_index = bts->ds_back.bts_buffer_base;
cpuc              327 arch/x86/events/intel/bts.c 		cpuc->ds->bts_buffer_base = bts->ds_back.bts_buffer_base;
cpuc              328 arch/x86/events/intel/bts.c 		cpuc->ds->bts_absolute_maximum = bts->ds_back.bts_absolute_maximum;
cpuc              329 arch/x86/events/intel/bts.c 		cpuc->ds->bts_interrupt_threshold = bts->ds_back.bts_interrupt_threshold;
cpuc              516 arch/x86/events/intel/bts.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              521 arch/x86/events/intel/bts.c 	if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
cpuc             1951 arch/x86/events/intel/core.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1955 arch/x86/events/intel/core.c 	if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
cpuc             1969 arch/x86/events/intel/core.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1974 arch/x86/events/intel/core.c 			x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
cpuc             1976 arch/x86/events/intel/core.c 	if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
cpuc             1978 arch/x86/events/intel/core.c 			cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
cpuc             2008 arch/x86/events/intel/core.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             2041 arch/x86/events/intel/core.c 		event = cpuc->events[i];
cpuc             2055 arch/x86/events/intel/core.c 		event = cpuc->events[i];
cpuc             2073 arch/x86/events/intel/core.c static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
cpuc             2077 arch/x86/events/intel/core.c 	if (cpuc->tfa_shadow != val) {
cpuc             2078 arch/x86/events/intel/core.c 		cpuc->tfa_shadow = val;
cpuc             2083 arch/x86/events/intel/core.c static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
cpuc             2089 arch/x86/events/intel/core.c 		intel_set_tfa(cpuc, true);
cpuc             2094 arch/x86/events/intel/core.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             2100 arch/x86/events/intel/core.c 	if (!test_bit(3, cpuc->active_mask))
cpuc             2101 arch/x86/events/intel/core.c 		intel_set_tfa(cpuc, false);
cpuc             2152 arch/x86/events/intel/core.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             2160 arch/x86/events/intel/core.c 	cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
cpuc             2161 arch/x86/events/intel/core.c 	cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
cpuc             2162 arch/x86/events/intel/core.c 	cpuc->intel_cp_status &= ~(1ull << hwc->idx);
cpuc             2234 arch/x86/events/intel/core.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             2245 arch/x86/events/intel/core.c 		cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
cpuc             2247 arch/x86/events/intel/core.c 		cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
cpuc             2250 arch/x86/events/intel/core.c 		cpuc->intel_cp_status |= (1ull << hwc->idx);
cpuc             2333 arch/x86/events/intel/core.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             2369 arch/x86/events/intel/core.c 		status &= ~cpuc->pebs_enabled;
cpuc             2371 arch/x86/events/intel/core.c 		status &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
cpuc             2399 arch/x86/events/intel/core.c 	status |= cpuc->intel_cp_status;
cpuc             2402 arch/x86/events/intel/core.c 		struct perf_event *event = cpuc->events[bit];
cpuc             2406 arch/x86/events/intel/core.c 		if (!test_bit(bit, cpuc->active_mask))
cpuc             2415 arch/x86/events/intel/core.c 			data.br_stack = &cpuc->lbr_stack;
cpuc             2446 arch/x86/events/intel/core.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             2450 arch/x86/events/intel/core.c 	int pmu_enabled = cpuc->enabled;
cpuc             2454 arch/x86/events/intel/core.c 	cpuc->enabled = 0;
cpuc             2455 arch/x86/events/intel/core.c 	if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
cpuc             2509 arch/x86/events/intel/core.c 	cpuc->enabled = pmu_enabled;
cpuc             2519 arch/x86/events/intel/core.c 	struct cpu_hw_events *cpuc;
cpuc             2525 arch/x86/events/intel/core.c 	cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             2531 arch/x86/events/intel/core.c 	pmu_enabled = cpuc->enabled;
cpuc             2539 arch/x86/events/intel/core.c 	cpuc->enabled = 0;
cpuc             2574 arch/x86/events/intel/core.c 	cpuc->enabled = pmu_enabled;
cpuc             2640 arch/x86/events/intel/core.c __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
cpuc             2654 arch/x86/events/intel/core.c 	if (reg->alloc && !cpuc->is_fake)
cpuc             2658 arch/x86/events/intel/core.c 	era = &cpuc->shared_regs->regs[idx];
cpuc             2677 arch/x86/events/intel/core.c 		if (!cpuc->is_fake) {
cpuc             2715 arch/x86/events/intel/core.c __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
cpuc             2728 arch/x86/events/intel/core.c 	if (!reg->alloc || cpuc->is_fake)
cpuc             2731 arch/x86/events/intel/core.c 	era = &cpuc->shared_regs->regs[reg->idx];
cpuc             2741 arch/x86/events/intel/core.c intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
cpuc             2749 arch/x86/events/intel/core.c 		c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
cpuc             2755 arch/x86/events/intel/core.c 		d = __intel_shared_reg_get_constraints(cpuc, event, breg);
cpuc             2757 arch/x86/events/intel/core.c 			__intel_shared_reg_put_constraints(cpuc, xreg);
cpuc             2765 arch/x86/events/intel/core.c x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
cpuc             2783 arch/x86/events/intel/core.c __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
cpuc             2792 arch/x86/events/intel/core.c 	c = intel_shared_regs_constraints(cpuc, event);
cpuc             2800 arch/x86/events/intel/core.c 	return x86_get_event_constraints(cpuc, idx, event);
cpuc             2804 arch/x86/events/intel/core.c intel_start_scheduling(struct cpu_hw_events *cpuc)
cpuc             2806 arch/x86/events/intel/core.c 	struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
cpuc             2808 arch/x86/events/intel/core.c 	int tid = cpuc->excl_thread_id;
cpuc             2813 arch/x86/events/intel/core.c 	if (cpuc->is_fake || !is_ht_workaround_enabled())
cpuc             2833 arch/x86/events/intel/core.c static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
cpuc             2835 arch/x86/events/intel/core.c 	struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
cpuc             2836 arch/x86/events/intel/core.c 	struct event_constraint *c = cpuc->event_constraint[idx];
cpuc             2838 arch/x86/events/intel/core.c 	int tid = cpuc->excl_thread_id;
cpuc             2840 arch/x86/events/intel/core.c 	if (cpuc->is_fake || !is_ht_workaround_enabled())
cpuc             2860 arch/x86/events/intel/core.c intel_stop_scheduling(struct cpu_hw_events *cpuc)
cpuc             2862 arch/x86/events/intel/core.c 	struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
cpuc             2864 arch/x86/events/intel/core.c 	int tid = cpuc->excl_thread_id;
cpuc             2869 arch/x86/events/intel/core.c 	if (cpuc->is_fake || !is_ht_workaround_enabled())
cpuc             2887 arch/x86/events/intel/core.c dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx)
cpuc             2889 arch/x86/events/intel/core.c 	WARN_ON_ONCE(!cpuc->constraint_list);
cpuc             2897 arch/x86/events/intel/core.c 		cx = &cpuc->constraint_list[idx];
cpuc             2916 arch/x86/events/intel/core.c intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
cpuc             2919 arch/x86/events/intel/core.c 	struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
cpuc             2921 arch/x86/events/intel/core.c 	int tid = cpuc->excl_thread_id;
cpuc             2928 arch/x86/events/intel/core.c 	if (cpuc->is_fake || !is_ht_workaround_enabled())
cpuc             2945 arch/x86/events/intel/core.c 	c = dyn_constraint(cpuc, c, idx);
cpuc             2966 arch/x86/events/intel/core.c 		if (!cpuc->n_excl++)
cpuc             3016 arch/x86/events/intel/core.c intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
cpuc             3021 arch/x86/events/intel/core.c 	c1 = cpuc->event_constraint[idx];
cpuc             3028 arch/x86/events/intel/core.c 	c2 = __intel_get_event_constraints(cpuc, idx, event);
cpuc             3036 arch/x86/events/intel/core.c 	if (cpuc->excl_cntrs)
cpuc             3037 arch/x86/events/intel/core.c 		return intel_get_excl_constraints(cpuc, event, idx, c2);
cpuc             3042 arch/x86/events/intel/core.c static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
cpuc             3046 arch/x86/events/intel/core.c 	struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
cpuc             3047 arch/x86/events/intel/core.c 	int tid = cpuc->excl_thread_id;
cpuc             3053 arch/x86/events/intel/core.c 	if (cpuc->is_fake)
cpuc             3061 arch/x86/events/intel/core.c 		if (!--cpuc->n_excl)
cpuc             3088 arch/x86/events/intel/core.c intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
cpuc             3095 arch/x86/events/intel/core.c 		__intel_shared_reg_put_constraints(cpuc, reg);
cpuc             3099 arch/x86/events/intel/core.c 		__intel_shared_reg_put_constraints(cpuc, reg);
cpuc             3102 arch/x86/events/intel/core.c static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
cpuc             3105 arch/x86/events/intel/core.c 	intel_put_shared_regs_event_constraints(cpuc, event);
cpuc             3112 arch/x86/events/intel/core.c 	if (cpuc->excl_cntrs)
cpuc             3113 arch/x86/events/intel/core.c 		intel_put_excl_constraints(cpuc, event);
cpuc             3337 arch/x86/events/intel/core.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             3338 arch/x86/events/intel/core.c 	struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
cpuc             3341 arch/x86/events/intel/core.c 	arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
cpuc             3342 arch/x86/events/intel/core.c 	arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
cpuc             3344 arch/x86/events/intel/core.c 		arr[0].guest &= ~cpuc->pebs_enabled;
cpuc             3346 arch/x86/events/intel/core.c 		arr[0].guest &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
cpuc             3359 arch/x86/events/intel/core.c 		arr[1].host = cpuc->pebs_enabled;
cpuc             3369 arch/x86/events/intel/core.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             3370 arch/x86/events/intel/core.c 	struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
cpuc             3374 arch/x86/events/intel/core.c 		struct perf_event *event = cpuc->events[idx];
cpuc             3379 arch/x86/events/intel/core.c 		if (!test_bit(idx, cpuc->active_mask))
cpuc             3403 arch/x86/events/intel/core.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             3407 arch/x86/events/intel/core.c 		struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
cpuc             3409 arch/x86/events/intel/core.c 		if (!test_bit(idx, cpuc->active_mask) ||
cpuc             3410 arch/x86/events/intel/core.c 				cpuc->events[idx]->attr.exclude_host)
cpuc             3467 arch/x86/events/intel/core.c hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
cpuc             3472 arch/x86/events/intel/core.c 	c = intel_get_event_constraints(cpuc, idx, event);
cpuc             3485 arch/x86/events/intel/core.c icl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
cpuc             3496 arch/x86/events/intel/core.c 	return hsw_get_event_constraints(cpuc, idx, event);
cpuc             3500 arch/x86/events/intel/core.c glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
cpuc             3509 arch/x86/events/intel/core.c 	c = intel_get_event_constraints(cpuc, idx, event);
cpuc             3515 arch/x86/events/intel/core.c tnt_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
cpuc             3532 arch/x86/events/intel/core.c 	c = intel_get_event_constraints(cpuc, idx, event);
cpuc             3540 arch/x86/events/intel/core.c tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
cpuc             3543 arch/x86/events/intel/core.c 	struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event);
cpuc             3549 arch/x86/events/intel/core.c 		c = dyn_constraint(cpuc, c, idx);
cpuc             3648 arch/x86/events/intel/core.c int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
cpuc             3650 arch/x86/events/intel/core.c 	cpuc->pebs_record_size = x86_pmu.pebs_record_size;
cpuc             3653 arch/x86/events/intel/core.c 		cpuc->shared_regs = allocate_shared_regs(cpu);
cpuc             3654 arch/x86/events/intel/core.c 		if (!cpuc->shared_regs)
cpuc             3661 arch/x86/events/intel/core.c 		cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
cpuc             3662 arch/x86/events/intel/core.c 		if (!cpuc->constraint_list)
cpuc             3667 arch/x86/events/intel/core.c 		cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
cpuc             3668 arch/x86/events/intel/core.c 		if (!cpuc->excl_cntrs)
cpuc             3671 arch/x86/events/intel/core.c 		cpuc->excl_thread_id = 0;
cpuc             3677 arch/x86/events/intel/core.c 	kfree(cpuc->constraint_list);
cpuc             3678 arch/x86/events/intel/core.c 	cpuc->constraint_list = NULL;
cpuc             3681 arch/x86/events/intel/core.c 	kfree(cpuc->shared_regs);
cpuc             3682 arch/x86/events/intel/core.c 	cpuc->shared_regs = NULL;
cpuc             3708 arch/x86/events/intel/core.c 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
cpuc             3718 arch/x86/events/intel/core.c 	cpuc->lbr_sel = NULL;
cpuc             3721 arch/x86/events/intel/core.c 		WARN_ON_ONCE(cpuc->tfa_shadow);
cpuc             3722 arch/x86/events/intel/core.c 		cpuc->tfa_shadow = ~0ULL;
cpuc             3723 arch/x86/events/intel/core.c 		intel_set_tfa(cpuc, false);
cpuc             3732 arch/x86/events/intel/core.c 	if (!cpuc->shared_regs)
cpuc             3741 arch/x86/events/intel/core.c 				cpuc->kfree_on_online[0] = cpuc->shared_regs;
cpuc             3742 arch/x86/events/intel/core.c 				cpuc->shared_regs = pc;
cpuc             3746 arch/x86/events/intel/core.c 		cpuc->shared_regs->core_id = core_id;
cpuc             3747 arch/x86/events/intel/core.c 		cpuc->shared_regs->refcnt++;
cpuc             3751 arch/x86/events/intel/core.c 		cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
cpuc             3761 arch/x86/events/intel/core.c 				cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
cpuc             3762 arch/x86/events/intel/core.c 				cpuc->excl_cntrs = c;
cpuc             3764 arch/x86/events/intel/core.c 					cpuc->excl_thread_id = 1;
cpuc             3768 arch/x86/events/intel/core.c 		cpuc->excl_cntrs->core_id = core_id;
cpuc             3769 arch/x86/events/intel/core.c 		cpuc->excl_cntrs->refcnt++;
cpuc             3773 arch/x86/events/intel/core.c static void free_excl_cntrs(struct cpu_hw_events *cpuc)
cpuc             3777 arch/x86/events/intel/core.c 	c = cpuc->excl_cntrs;
cpuc             3781 arch/x86/events/intel/core.c 		cpuc->excl_cntrs = NULL;
cpuc             3784 arch/x86/events/intel/core.c 	kfree(cpuc->constraint_list);
cpuc             3785 arch/x86/events/intel/core.c 	cpuc->constraint_list = NULL;
cpuc             3796 arch/x86/events/intel/core.c void intel_cpuc_finish(struct cpu_hw_events *cpuc)
cpuc             3800 arch/x86/events/intel/core.c 	pc = cpuc->shared_regs;
cpuc             3804 arch/x86/events/intel/core.c 		cpuc->shared_regs = NULL;
cpuc             3807 arch/x86/events/intel/core.c 	free_excl_cntrs(cpuc);
cpuc             4342 arch/x86/events/intel/core.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             4348 arch/x86/events/intel/core.c 	if (test_bit(3, cpuc->active_mask))
cpuc              565 arch/x86/events/intel/ds.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              568 arch/x86/events/intel/ds.c 	if (!cpuc->ds)
cpuc              582 arch/x86/events/intel/ds.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              583 arch/x86/events/intel/ds.c 	struct debug_store *ds = cpuc->ds;
cpuc              589 arch/x86/events/intel/ds.c 	struct perf_event *event = cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
cpuc              903 arch/x86/events/intel/ds.c static inline bool pebs_needs_sched_cb(struct cpu_hw_events *cpuc)
cpuc              905 arch/x86/events/intel/ds.c 	if (cpuc->n_pebs == cpuc->n_pebs_via_pt)
cpuc              908 arch/x86/events/intel/ds.c 	return cpuc->n_pebs && (cpuc->n_pebs == cpuc->n_large_pebs);
cpuc              913 arch/x86/events/intel/ds.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              915 arch/x86/events/intel/ds.c 	if (!sched_in && pebs_needs_sched_cb(cpuc))
cpuc              919 arch/x86/events/intel/ds.c static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
cpuc              921 arch/x86/events/intel/ds.c 	struct debug_store *ds = cpuc->ds;
cpuc              925 arch/x86/events/intel/ds.c 	if (cpuc->n_pebs_via_pt)
cpuc              933 arch/x86/events/intel/ds.c 	if (cpuc->n_pebs == cpuc->n_large_pebs) {
cpuc              935 arch/x86/events/intel/ds.c 			reserved * cpuc->pebs_record_size;
cpuc              937 arch/x86/events/intel/ds.c 		threshold = ds->pebs_buffer_base + cpuc->pebs_record_size;
cpuc              945 arch/x86/events/intel/ds.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              946 arch/x86/events/intel/ds.c 	u64 pebs_data_cfg = cpuc->pebs_data_cfg;
cpuc              958 arch/x86/events/intel/ds.c 	cpuc->pebs_record_size = sz;
cpuc             1012 arch/x86/events/intel/ds.c pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
cpuc             1021 arch/x86/events/intel/ds.c 	bool update = cpuc->n_pebs == 1;
cpuc             1023 arch/x86/events/intel/ds.c 	if (needed_cb != pebs_needs_sched_cb(cpuc)) {
cpuc             1040 arch/x86/events/intel/ds.c 		if (cpuc->n_pebs == 1) {
cpuc             1041 arch/x86/events/intel/ds.c 			cpuc->pebs_data_cfg = 0;
cpuc             1042 arch/x86/events/intel/ds.c 			cpuc->pebs_record_size = sizeof(struct pebs_basic);
cpuc             1048 arch/x86/events/intel/ds.c 		if (pebs_data_cfg & ~cpuc->pebs_data_cfg) {
cpuc             1049 arch/x86/events/intel/ds.c 			cpuc->pebs_data_cfg |= pebs_data_cfg;
cpuc             1056 arch/x86/events/intel/ds.c 		pebs_update_threshold(cpuc);
cpuc             1061 arch/x86/events/intel/ds.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1063 arch/x86/events/intel/ds.c 	bool needed_cb = pebs_needs_sched_cb(cpuc);
cpuc             1065 arch/x86/events/intel/ds.c 	cpuc->n_pebs++;
cpuc             1067 arch/x86/events/intel/ds.c 		cpuc->n_large_pebs++;
cpuc             1069 arch/x86/events/intel/ds.c 		cpuc->n_pebs_via_pt++;
cpuc             1071 arch/x86/events/intel/ds.c 	pebs_update_state(needed_cb, cpuc, event, true);
cpuc             1076 arch/x86/events/intel/ds.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1081 arch/x86/events/intel/ds.c 	if (!(cpuc->pebs_enabled & ~PEBS_VIA_PT_MASK))
cpuc             1082 arch/x86/events/intel/ds.c 		cpuc->pebs_enabled &= ~PEBS_VIA_PT_MASK;
cpuc             1087 arch/x86/events/intel/ds.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1089 arch/x86/events/intel/ds.c 	struct debug_store *ds = cpuc->ds;
cpuc             1095 arch/x86/events/intel/ds.c 		cpuc->pebs_enabled |= PEBS_PMI_AFTER_EACH_RECORD;
cpuc             1097 arch/x86/events/intel/ds.c 	cpuc->pebs_enabled |= PEBS_OUTPUT_PT;
cpuc             1104 arch/x86/events/intel/ds.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1106 arch/x86/events/intel/ds.c 	struct debug_store *ds = cpuc->ds;
cpuc             1110 arch/x86/events/intel/ds.c 	cpuc->pebs_enabled |= 1ULL << hwc->idx;
cpuc             1113 arch/x86/events/intel/ds.c 		cpuc->pebs_enabled |= 1ULL << (hwc->idx + 32);
cpuc             1115 arch/x86/events/intel/ds.c 		cpuc->pebs_enabled |= 1ULL << 63;
cpuc             1119 arch/x86/events/intel/ds.c 		if (cpuc->pebs_data_cfg != cpuc->active_pebs_data_cfg) {
cpuc             1120 arch/x86/events/intel/ds.c 			wrmsrl(MSR_PEBS_DATA_CFG, cpuc->pebs_data_cfg);
cpuc             1121 arch/x86/events/intel/ds.c 			cpuc->active_pebs_data_cfg = cpuc->pebs_data_cfg;
cpuc             1145 arch/x86/events/intel/ds.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1147 arch/x86/events/intel/ds.c 	bool needed_cb = pebs_needs_sched_cb(cpuc);
cpuc             1149 arch/x86/events/intel/ds.c 	cpuc->n_pebs--;
cpuc             1151 arch/x86/events/intel/ds.c 		cpuc->n_large_pebs--;
cpuc             1153 arch/x86/events/intel/ds.c 		cpuc->n_pebs_via_pt--;
cpuc             1155 arch/x86/events/intel/ds.c 	pebs_update_state(needed_cb, cpuc, event, false);
cpuc             1160 arch/x86/events/intel/ds.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1163 arch/x86/events/intel/ds.c 	if (cpuc->n_pebs == cpuc->n_large_pebs &&
cpuc             1164 arch/x86/events/intel/ds.c 	    cpuc->n_pebs != cpuc->n_pebs_via_pt)
cpuc             1167 arch/x86/events/intel/ds.c 	cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
cpuc             1171 arch/x86/events/intel/ds.c 		cpuc->pebs_enabled &= ~(1ULL << (hwc->idx + 32));
cpuc             1173 arch/x86/events/intel/ds.c 		cpuc->pebs_enabled &= ~(1ULL << 63);
cpuc             1177 arch/x86/events/intel/ds.c 	if (cpuc->enabled)
cpuc             1178 arch/x86/events/intel/ds.c 		wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
cpuc             1185 arch/x86/events/intel/ds.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1187 arch/x86/events/intel/ds.c 	if (cpuc->pebs_enabled)
cpuc             1188 arch/x86/events/intel/ds.c 		wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
cpuc             1193 arch/x86/events/intel/ds.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1195 arch/x86/events/intel/ds.c 	if (cpuc->pebs_enabled)
cpuc             1201 arch/x86/events/intel/ds.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1202 arch/x86/events/intel/ds.c 	unsigned long from = cpuc->lbr_entries[0].from;
cpuc             1203 arch/x86/events/intel/ds.c 	unsigned long old_to, to = cpuc->lbr_entries[0].to;
cpuc             1218 arch/x86/events/intel/ds.c 	if (!cpuc->lbr_stack.nr || !from || !to)
cpuc             1348 arch/x86/events/intel/ds.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1478 arch/x86/events/intel/ds.c 		data->br_stack = &cpuc->lbr_stack;
cpuc             1513 arch/x86/events/intel/ds.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1604 arch/x86/events/intel/ds.c 			data->br_stack = &cpuc->lbr_stack;
cpuc             1618 arch/x86/events/intel/ds.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1632 arch/x86/events/intel/ds.c 	for (at = base; at < top; at += cpuc->pebs_record_size) {
cpuc             1644 arch/x86/events/intel/ds.c 			pebs_status = status & cpuc->pebs_enabled;
cpuc             1733 arch/x86/events/intel/ds.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1754 arch/x86/events/intel/ds.c 		at += cpuc->pebs_record_size;
cpuc             1774 arch/x86/events/intel/ds.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1775 arch/x86/events/intel/ds.c 	struct debug_store *ds = cpuc->ds;
cpuc             1776 arch/x86/events/intel/ds.c 	struct perf_event *event = cpuc->events[0]; /* PMC0 only */
cpuc             1791 arch/x86/events/intel/ds.c 	if (!test_bit(0, cpuc->active_mask))
cpuc             1810 arch/x86/events/intel/ds.c static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int size)
cpuc             1822 arch/x86/events/intel/ds.c 	for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled, size) {
cpuc             1823 arch/x86/events/intel/ds.c 		event = cpuc->events[bit];
cpuc             1831 arch/x86/events/intel/ds.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1832 arch/x86/events/intel/ds.c 	struct debug_store *ds = cpuc->ds;
cpuc             1856 arch/x86/events/intel/ds.c 		intel_pmu_pebs_event_update_no_drain(cpuc, size);
cpuc             1864 arch/x86/events/intel/ds.c 		pebs_status = p->status & cpuc->pebs_enabled;
cpuc             1883 arch/x86/events/intel/ds.c 		if (!pebs_status && cpuc->pebs_enabled &&
cpuc             1884 arch/x86/events/intel/ds.c 			!(cpuc->pebs_enabled & (cpuc->pebs_enabled-1)))
cpuc             1885 arch/x86/events/intel/ds.c 			pebs_status = cpuc->pebs_enabled;
cpuc             1920 arch/x86/events/intel/ds.c 		event = cpuc->events[bit];
cpuc             1946 arch/x86/events/intel/ds.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1947 arch/x86/events/intel/ds.c 	struct debug_store *ds = cpuc->ds;
cpuc             1966 arch/x86/events/intel/ds.c 		intel_pmu_pebs_event_update_no_drain(cpuc, size);
cpuc             1970 arch/x86/events/intel/ds.c 	for (at = base; at < top; at += cpuc->pebs_record_size) {
cpuc             1973 arch/x86/events/intel/ds.c 		pebs_status = get_pebs_status(at) & cpuc->pebs_enabled;
cpuc             1984 arch/x86/events/intel/ds.c 		event = cpuc->events[bit];
cpuc              216 arch/x86/events/intel/knc.c 	struct cpu_hw_events *cpuc;
cpuc              221 arch/x86/events/intel/knc.c 	cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              243 arch/x86/events/intel/knc.c 		struct perf_event *event = cpuc->events[bit];
cpuc              247 arch/x86/events/intel/knc.c 		if (!test_bit(bit, cpuc->active_mask))
cpuc              268 arch/x86/events/intel/knc.c 	if (cpuc->enabled)
cpuc              146 arch/x86/events/intel/lbr.c static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc);
cpuc              155 arch/x86/events/intel/lbr.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              169 arch/x86/events/intel/lbr.c 	if (cpuc->lbr_sel)
cpuc              170 arch/x86/events/intel/lbr.c 		lbr_select = cpuc->lbr_sel->config & x86_pmu.lbr_sel_mask;
cpuc              171 arch/x86/events/intel/lbr.c 	if (!pmi && cpuc->lbr_sel)
cpuc              219 arch/x86/events/intel/lbr.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              229 arch/x86/events/intel/lbr.c 	cpuc->last_task_ctx = NULL;
cpuc              230 arch/x86/events/intel/lbr.c 	cpuc->last_log_id = 0;
cpuc              342 arch/x86/events/intel/lbr.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              359 arch/x86/events/intel/lbr.c 	if ((task_ctx == cpuc->last_task_ctx) &&
cpuc              360 arch/x86/events/intel/lbr.c 	    (task_ctx->log_id == cpuc->last_log_id) &&
cpuc              390 arch/x86/events/intel/lbr.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              416 arch/x86/events/intel/lbr.c 	cpuc->last_task_ctx = task_ctx;
cpuc              417 arch/x86/events/intel/lbr.c 	cpuc->last_log_id = ++task_ctx->log_id;
cpuc              422 arch/x86/events/intel/lbr.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              425 arch/x86/events/intel/lbr.c 	if (!cpuc->lbr_users)
cpuc              459 arch/x86/events/intel/lbr.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              465 arch/x86/events/intel/lbr.c 	cpuc->br_sel = event->hw.branch_reg.reg;
cpuc              467 arch/x86/events/intel/lbr.c 	if (branch_user_callstack(cpuc->br_sel) && event->ctx->task_ctx_data) {
cpuc              492 arch/x86/events/intel/lbr.c 		cpuc->lbr_pebs_users++;
cpuc              494 arch/x86/events/intel/lbr.c 	if (!cpuc->lbr_users++ && !event->total_time_running)
cpuc              500 arch/x86/events/intel/lbr.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              506 arch/x86/events/intel/lbr.c 	if (branch_user_callstack(cpuc->br_sel) &&
cpuc              513 arch/x86/events/intel/lbr.c 		cpuc->lbr_pebs_users--;
cpuc              514 arch/x86/events/intel/lbr.c 	cpuc->lbr_users--;
cpuc              515 arch/x86/events/intel/lbr.c 	WARN_ON_ONCE(cpuc->lbr_users < 0);
cpuc              516 arch/x86/events/intel/lbr.c 	WARN_ON_ONCE(cpuc->lbr_pebs_users < 0);
cpuc              522 arch/x86/events/intel/lbr.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              524 arch/x86/events/intel/lbr.c 	if (cpuc->lbr_users)
cpuc              530 arch/x86/events/intel/lbr.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              532 arch/x86/events/intel/lbr.c 	if (cpuc->lbr_users)
cpuc              536 arch/x86/events/intel/lbr.c static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
cpuc              554 arch/x86/events/intel/lbr.c 		cpuc->lbr_entries[i].from	= msr_lastbranch.from;
cpuc              555 arch/x86/events/intel/lbr.c 		cpuc->lbr_entries[i].to		= msr_lastbranch.to;
cpuc              556 arch/x86/events/intel/lbr.c 		cpuc->lbr_entries[i].mispred	= 0;
cpuc              557 arch/x86/events/intel/lbr.c 		cpuc->lbr_entries[i].predicted	= 0;
cpuc              558 arch/x86/events/intel/lbr.c 		cpuc->lbr_entries[i].in_tx	= 0;
cpuc              559 arch/x86/events/intel/lbr.c 		cpuc->lbr_entries[i].abort	= 0;
cpuc              560 arch/x86/events/intel/lbr.c 		cpuc->lbr_entries[i].cycles	= 0;
cpuc              561 arch/x86/events/intel/lbr.c 		cpuc->lbr_entries[i].type	= 0;
cpuc              562 arch/x86/events/intel/lbr.c 		cpuc->lbr_entries[i].reserved	= 0;
cpuc              564 arch/x86/events/intel/lbr.c 	cpuc->lbr_stack.nr = i;
cpuc              572 arch/x86/events/intel/lbr.c static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
cpuc              582 arch/x86/events/intel/lbr.c 	if (cpuc->lbr_sel) {
cpuc              583 arch/x86/events/intel/lbr.c 		need_info = !(cpuc->lbr_sel->config & LBR_NO_INFO);
cpuc              584 arch/x86/events/intel/lbr.c 		if (cpuc->lbr_sel->config & LBR_CALL_STACK)
cpuc              648 arch/x86/events/intel/lbr.c 		cpuc->lbr_entries[out].from	 = from;
cpuc              649 arch/x86/events/intel/lbr.c 		cpuc->lbr_entries[out].to	 = to;
cpuc              650 arch/x86/events/intel/lbr.c 		cpuc->lbr_entries[out].mispred	 = mis;
cpuc              651 arch/x86/events/intel/lbr.c 		cpuc->lbr_entries[out].predicted = pred;
cpuc              652 arch/x86/events/intel/lbr.c 		cpuc->lbr_entries[out].in_tx	 = in_tx;
cpuc              653 arch/x86/events/intel/lbr.c 		cpuc->lbr_entries[out].abort	 = abort;
cpuc              654 arch/x86/events/intel/lbr.c 		cpuc->lbr_entries[out].cycles	 = cycles;
cpuc              655 arch/x86/events/intel/lbr.c 		cpuc->lbr_entries[out].type	 = 0;
cpuc              656 arch/x86/events/intel/lbr.c 		cpuc->lbr_entries[out].reserved	 = 0;
cpuc              659 arch/x86/events/intel/lbr.c 	cpuc->lbr_stack.nr = out;
cpuc              664 arch/x86/events/intel/lbr.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              672 arch/x86/events/intel/lbr.c 	if (!cpuc->lbr_users || cpuc->lbr_users == cpuc->lbr_pebs_users)
cpuc              676 arch/x86/events/intel/lbr.c 		intel_pmu_lbr_read_32(cpuc);
cpuc              678 arch/x86/events/intel/lbr.c 		intel_pmu_lbr_read_64(cpuc);
cpuc              680 arch/x86/events/intel/lbr.c 	intel_pmu_lbr_filter(cpuc);
cpuc             1042 arch/x86/events/intel/lbr.c intel_pmu_lbr_filter(struct cpu_hw_events *cpuc)
cpuc             1045 arch/x86/events/intel/lbr.c 	int br_sel = cpuc->br_sel;
cpuc             1054 arch/x86/events/intel/lbr.c 	for (i = 0; i < cpuc->lbr_stack.nr; i++) {
cpuc             1056 arch/x86/events/intel/lbr.c 		from = cpuc->lbr_entries[i].from;
cpuc             1057 arch/x86/events/intel/lbr.c 		to = cpuc->lbr_entries[i].to;
cpuc             1059 arch/x86/events/intel/lbr.c 		type = branch_type(from, to, cpuc->lbr_entries[i].abort);
cpuc             1061 arch/x86/events/intel/lbr.c 			if (cpuc->lbr_entries[i].in_tx)
cpuc             1069 arch/x86/events/intel/lbr.c 			cpuc->lbr_entries[i].from = 0;
cpuc             1074 arch/x86/events/intel/lbr.c 			cpuc->lbr_entries[i].type = common_branch_type(type);
cpuc             1081 arch/x86/events/intel/lbr.c 	for (i = 0; i < cpuc->lbr_stack.nr; ) {
cpuc             1082 arch/x86/events/intel/lbr.c 		if (!cpuc->lbr_entries[i].from) {
cpuc             1084 arch/x86/events/intel/lbr.c 			while (++j < cpuc->lbr_stack.nr)
cpuc             1085 arch/x86/events/intel/lbr.c 				cpuc->lbr_entries[j-1] = cpuc->lbr_entries[j];
cpuc             1086 arch/x86/events/intel/lbr.c 			cpuc->lbr_stack.nr--;
cpuc             1087 arch/x86/events/intel/lbr.c 			if (!cpuc->lbr_entries[i].from)
cpuc             1096 arch/x86/events/intel/lbr.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1099 arch/x86/events/intel/lbr.c 	cpuc->lbr_stack.nr = x86_pmu.lbr_nr;
cpuc             1102 arch/x86/events/intel/lbr.c 		struct perf_branch_entry *e = &cpuc->lbr_entries[i];
cpuc             1113 arch/x86/events/intel/lbr.c 	intel_pmu_lbr_filter(cpuc);
cpuc              918 arch/x86/events/intel/p4.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              922 arch/x86/events/intel/p4.c 		struct perf_event *event = cpuc->events[idx];
cpuc              923 arch/x86/events/intel/p4.c 		if (!test_bit(idx, cpuc->active_mask))
cpuc              987 arch/x86/events/intel/p4.c 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc              991 arch/x86/events/intel/p4.c 		struct perf_event *event = cpuc->events[idx];
cpuc              992 arch/x86/events/intel/p4.c 		if (!test_bit(idx, cpuc->active_mask))
cpuc             1001 arch/x86/events/intel/p4.c 	struct cpu_hw_events *cpuc;
cpuc             1007 arch/x86/events/intel/p4.c 	cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc             1012 arch/x86/events/intel/p4.c 		if (!test_bit(idx, cpuc->active_mask)) {
cpuc             1014 arch/x86/events/intel/p4.c 			if (__test_and_clear_bit(idx, cpuc->running))
cpuc             1019 arch/x86/events/intel/p4.c 		event = cpuc->events[idx];
cpuc             1206 arch/x86/events/intel/p4.c static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
cpuc             1223 arch/x86/events/intel/p4.c 		hwc = &cpuc->event_list[i]->hw;
cpuc              581 arch/x86/events/perf_event.h 	int		(*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
cpuc              600 arch/x86/events/perf_event.h 			(*get_event_constraints)(struct cpu_hw_events *cpuc,
cpuc              604 arch/x86/events/perf_event.h 	void		(*put_event_constraints)(struct cpu_hw_events *cpuc,
cpuc              607 arch/x86/events/perf_event.h 	void		(*start_scheduling)(struct cpu_hw_events *cpuc);
cpuc              609 arch/x86/events/perf_event.h 	void		(*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr);
cpuc              611 arch/x86/events/perf_event.h 	void		(*stop_scheduling)(struct cpu_hw_events *cpuc);
cpuc              849 arch/x86/events/perf_event.h int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
cpuc              949 arch/x86/events/perf_event.h x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
cpuc              952 arch/x86/events/perf_event.h extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu);
cpuc              953 arch/x86/events/perf_event.h extern void intel_cpuc_finish(struct cpu_hw_events *cpuc);
cpuc             1093 arch/x86/events/perf_event.h static inline int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
cpuc             1098 arch/x86/events/perf_event.h static inline void intel_cpuc_finish(struct cpu_hw_events *cpuc)
cpuc              253 kernel/rcu/srcutree.c 		struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
cpuc              255 kernel/rcu/srcutree.c 		sum += READ_ONCE(cpuc->srcu_lock_count[idx]);
cpuc              270 kernel/rcu/srcutree.c 		struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
cpuc              272 kernel/rcu/srcutree.c 		sum += READ_ONCE(cpuc->srcu_unlock_count[idx]);
cpuc              339 kernel/rcu/srcutree.c 		struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
cpuc              341 kernel/rcu/srcutree.c 		sum += READ_ONCE(cpuc->srcu_lock_count[0]);
cpuc              342 kernel/rcu/srcutree.c 		sum += READ_ONCE(cpuc->srcu_lock_count[1]);
cpuc              343 kernel/rcu/srcutree.c 		sum -= READ_ONCE(cpuc->srcu_unlock_count[0]);
cpuc              344 kernel/rcu/srcutree.c 		sum -= READ_ONCE(cpuc->srcu_unlock_count[1]);