perf_ibs          190 arch/x86/events/amd/ibs.c static struct perf_ibs perf_ibs_fetch;
perf_ibs          191 arch/x86/events/amd/ibs.c static struct perf_ibs perf_ibs_op;
perf_ibs          193 arch/x86/events/amd/ibs.c static struct perf_ibs *get_ibs_pmu(int type)
perf_ibs          259 arch/x86/events/amd/ibs.c 	struct perf_ibs *perf_ibs;
perf_ibs          263 arch/x86/events/amd/ibs.c 	perf_ibs = get_ibs_pmu(event->attr.type);
perf_ibs          264 arch/x86/events/amd/ibs.c 	if (perf_ibs) {
perf_ibs          267 arch/x86/events/amd/ibs.c 		perf_ibs = &perf_ibs_op;
perf_ibs          273 arch/x86/events/amd/ibs.c 	if (event->pmu != &perf_ibs->pmu)
perf_ibs          276 arch/x86/events/amd/ibs.c 	if (config & ~perf_ibs->config_mask)
perf_ibs          280 arch/x86/events/amd/ibs.c 		if (config & perf_ibs->cnt_mask)
perf_ibs          294 arch/x86/events/amd/ibs.c 		max_cnt = config & perf_ibs->cnt_mask;
perf_ibs          295 arch/x86/events/amd/ibs.c 		config &= ~perf_ibs->cnt_mask;
perf_ibs          310 arch/x86/events/amd/ibs.c 	hwc->config_base = perf_ibs->msr;
perf_ibs          316 arch/x86/events/amd/ibs.c static int perf_ibs_set_period(struct perf_ibs *perf_ibs,
perf_ibs          322 arch/x86/events/amd/ibs.c 	overflow = perf_event_set_period(hwc, 1<<4, perf_ibs->max_period, period);
perf_ibs          347 arch/x86/events/amd/ibs.c perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event,
perf_ibs          350 arch/x86/events/amd/ibs.c 	u64 count = perf_ibs->get_count(*config);
perf_ibs          359 arch/x86/events/amd/ibs.c 		count = perf_ibs->get_count(*config);
perf_ibs          363 arch/x86/events/amd/ibs.c static inline void perf_ibs_enable_event(struct perf_ibs *perf_ibs,
perf_ibs          366 arch/x86/events/amd/ibs.c 	wrmsrl(hwc->config_base, hwc->config | config | perf_ibs->enable_mask);
perf_ibs          376 arch/x86/events/amd/ibs.c static inline void perf_ibs_disable_event(struct perf_ibs *perf_ibs,
perf_ibs          379 arch/x86/events/amd/ibs.c 	config &= ~perf_ibs->cnt_mask;
perf_ibs          382 arch/x86/events/amd/ibs.c 	config &= ~perf_ibs->enable_mask;
perf_ibs          395 arch/x86/events/amd/ibs.c 	struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
perf_ibs          396 arch/x86/events/amd/ibs.c 	struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
perf_ibs          405 arch/x86/events/amd/ibs.c 	perf_ibs_set_period(perf_ibs, hwc, &period);
perf_ibs          412 arch/x86/events/amd/ibs.c 	perf_ibs_enable_event(perf_ibs, hwc, period >> 4);
perf_ibs          420 arch/x86/events/amd/ibs.c 	struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
perf_ibs          421 arch/x86/events/amd/ibs.c 	struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
perf_ibs          443 arch/x86/events/amd/ibs.c 		perf_ibs_disable_event(perf_ibs, hwc, config);
perf_ibs          465 arch/x86/events/amd/ibs.c 	config &= ~perf_ibs->valid_mask;
perf_ibs          467 arch/x86/events/amd/ibs.c 	perf_ibs_event_update(perf_ibs, event, &config);
perf_ibs          473 arch/x86/events/amd/ibs.c 	struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
perf_ibs          474 arch/x86/events/amd/ibs.c 	struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
perf_ibs          491 arch/x86/events/amd/ibs.c 	struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
perf_ibs          492 arch/x86/events/amd/ibs.c 	struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
perf_ibs          519 arch/x86/events/amd/ibs.c static struct perf_ibs perf_ibs_fetch = {
perf_ibs          544 arch/x86/events/amd/ibs.c static struct perf_ibs perf_ibs_op = {
perf_ibs          569 arch/x86/events/amd/ibs.c static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
perf_ibs          571 arch/x86/events/amd/ibs.c 	struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
perf_ibs          603 arch/x86/events/amd/ibs.c 	if (!(*buf++ & perf_ibs->valid_mask))
perf_ibs          607 arch/x86/events/amd/ibs.c 	perf_ibs_event_update(perf_ibs, event, config);
perf_ibs          609 arch/x86/events/amd/ibs.c 	if (!perf_ibs_set_period(perf_ibs, hwc, &period))
perf_ibs          615 arch/x86/events/amd/ibs.c 	check_rip = (perf_ibs == &perf_ibs_op && (ibs_caps & IBS_CAPS_RIPINVALIDCHK));
perf_ibs          617 arch/x86/events/amd/ibs.c 		offset_max = perf_ibs->offset_max;
perf_ibs          625 arch/x86/events/amd/ibs.c 		offset = find_next_bit(perf_ibs->offset_mask,
perf_ibs          626 arch/x86/events/amd/ibs.c 				       perf_ibs->offset_max,
perf_ibs          675 arch/x86/events/amd/ibs.c 		perf_ibs_enable_event(perf_ibs, hwc, period);
perf_ibs          701 arch/x86/events/amd/ibs.c static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name)
perf_ibs          710 arch/x86/events/amd/ibs.c 	perf_ibs->pcpu = pcpu;
perf_ibs          713 arch/x86/events/amd/ibs.c 	if (perf_ibs->format_attrs[0]) {
perf_ibs          714 arch/x86/events/amd/ibs.c 		memset(&perf_ibs->format_group, 0, sizeof(perf_ibs->format_group));
perf_ibs          715 arch/x86/events/amd/ibs.c 		perf_ibs->format_group.name	= "format";
perf_ibs          716 arch/x86/events/amd/ibs.c 		perf_ibs->format_group.attrs	= perf_ibs->format_attrs;
perf_ibs          718 arch/x86/events/amd/ibs.c 		memset(&perf_ibs->attr_groups, 0, sizeof(perf_ibs->attr_groups));
perf_ibs          719 arch/x86/events/amd/ibs.c 		perf_ibs->attr_groups[0]	= &perf_ibs->format_group;
perf_ibs          720 arch/x86/events/amd/ibs.c 		perf_ibs->pmu.attr_groups	= perf_ibs->attr_groups;
perf_ibs          723 arch/x86/events/amd/ibs.c 	ret = perf_pmu_register(&perf_ibs->pmu, name, -1);
perf_ibs          725 arch/x86/events/amd/ibs.c 		perf_ibs->pcpu = NULL;