bts               190 arch/x86/entry/calling.h 	bts	$X86_CR3_PCID_NOFLUSH_BIT, \reg
bts               183 arch/x86/events/intel/bts.c static void bts_update(struct bts_ctx *bts)
bts               187 arch/x86/events/intel/bts.c 	struct bts_buffer *buf = perf_get_aux(&bts->handle);
bts               201 arch/x86/events/intel/bts.c 			perf_aux_output_flag(&bts->handle,
bts               228 arch/x86/events/intel/bts.c 	struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
bts               229 arch/x86/events/intel/bts.c 	struct bts_buffer *buf = perf_get_aux(&bts->handle);
bts               248 arch/x86/events/intel/bts.c 	WRITE_ONCE(bts->state, BTS_STATE_ACTIVE);
bts               257 arch/x86/events/intel/bts.c 	struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
bts               260 arch/x86/events/intel/bts.c 	buf = perf_aux_output_begin(&bts->handle, event);
bts               264 arch/x86/events/intel/bts.c 	if (bts_buffer_reset(buf, &bts->handle))
bts               267 arch/x86/events/intel/bts.c 	bts->ds_back.bts_buffer_base = cpuc->ds->bts_buffer_base;
bts               268 arch/x86/events/intel/bts.c 	bts->ds_back.bts_absolute_maximum = cpuc->ds->bts_absolute_maximum;
bts               269 arch/x86/events/intel/bts.c 	bts->ds_back.bts_interrupt_threshold = cpuc->ds->bts_interrupt_threshold;
bts               279 arch/x86/events/intel/bts.c 	perf_aux_output_end(&bts->handle, 0);
bts               287 arch/x86/events/intel/bts.c 	struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
bts               290 arch/x86/events/intel/bts.c 	WRITE_ONCE(bts->state, state);
bts               302 arch/x86/events/intel/bts.c 	struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
bts               304 arch/x86/events/intel/bts.c 	int state = READ_ONCE(bts->state);
bts               310 arch/x86/events/intel/bts.c 		buf = perf_get_aux(&bts->handle);
bts               315 arch/x86/events/intel/bts.c 		bts_update(bts);
bts               319 arch/x86/events/intel/bts.c 				bts->handle.head =
bts               322 arch/x86/events/intel/bts.c 			perf_aux_output_end(&bts->handle,
bts               326 arch/x86/events/intel/bts.c 		cpuc->ds->bts_index = bts->ds_back.bts_buffer_base;
bts               327 arch/x86/events/intel/bts.c 		cpuc->ds->bts_buffer_base = bts->ds_back.bts_buffer_base;
bts               328 arch/x86/events/intel/bts.c 		cpuc->ds->bts_absolute_maximum = bts->ds_back.bts_absolute_maximum;
bts               329 arch/x86/events/intel/bts.c 		cpuc->ds->bts_interrupt_threshold = bts->ds_back.bts_interrupt_threshold;
bts               335 arch/x86/events/intel/bts.c 	struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
bts               336 arch/x86/events/intel/bts.c 	int state = READ_ONCE(bts->state);
bts               349 arch/x86/events/intel/bts.c 	if (bts->handle.event)
bts               350 arch/x86/events/intel/bts.c 		__bts_event_start(bts->handle.event);
bts               355 arch/x86/events/intel/bts.c 	struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
bts               361 arch/x86/events/intel/bts.c 	if (READ_ONCE(bts->state) != BTS_STATE_ACTIVE)
bts               364 arch/x86/events/intel/bts.c 	if (bts->handle.event)
bts               365 arch/x86/events/intel/bts.c 		__bts_event_stop(bts->handle.event, BTS_STATE_INACTIVE);
bts               447 arch/x86/events/intel/bts.c 	struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
bts               448 arch/x86/events/intel/bts.c 	struct perf_event *event = bts->handle.event;
bts               464 arch/x86/events/intel/bts.c 	if (READ_ONCE(bts->state) == BTS_STATE_STOPPED)
bts               467 arch/x86/events/intel/bts.c 	buf = perf_get_aux(&bts->handle);
bts               480 arch/x86/events/intel/bts.c 	bts_update(bts);
bts               486 arch/x86/events/intel/bts.c 	perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0));
bts               488 arch/x86/events/intel/bts.c 	buf = perf_aux_output_begin(&bts->handle, event);
bts               490 arch/x86/events/intel/bts.c 		err = bts_buffer_reset(buf, &bts->handle);
bts               493 arch/x86/events/intel/bts.c 		WRITE_ONCE(bts->state, BTS_STATE_STOPPED);
bts               501 arch/x86/events/intel/bts.c 			perf_aux_output_end(&bts->handle, 0);
bts               515 arch/x86/events/intel/bts.c 	struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
bts               524 arch/x86/events/intel/bts.c 	if (bts->handle.event)
bts               582 arch/x86/events/intel/bts.c 	if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts)
bts              2448 arch/x86/events/intel/core.c 	bool bts = false;
bts              2456 arch/x86/events/intel/core.c 		bts = true;
bts              2507 arch/x86/events/intel/core.c 	if (bts)
bts               397 arch/x86/events/intel/ds.c 	if (!x86_pmu.bts)
bts               424 arch/x86/events/intel/ds.c 	if (!x86_pmu.bts)
bts               452 arch/x86/events/intel/ds.c 	if (!x86_pmu.bts && !x86_pmu.pebs)
bts               481 arch/x86/events/intel/ds.c 	if (!x86_pmu.bts && !x86_pmu.pebs)
bts               484 arch/x86/events/intel/ds.c 	if (!x86_pmu.bts)
bts               520 arch/x86/events/intel/ds.c 		if (x86_pmu.bts && !bts_err)
bts              2009 arch/x86/events/intel/ds.c 	x86_pmu.bts  = boot_cpu_has(X86_FEATURE_BTS);
bts              2098 arch/x86/events/intel/ds.c 	if (!x86_pmu.bts && !x86_pmu.pebs)
bts               654 arch/x86/events/perf_event.h 	unsigned int	bts			:1,
bts                60 arch/x86/include/asm/bitops.h 		asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
bts                68 arch/x86/include/asm/bitops.h 	asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
bts               138 arch/x86/include/asm/bitops.h 	return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), *addr, c, "Ir", nr);
bts               152 arch/x86/include/asm/bitops.h 	asm(__ASM_SIZE(bts) " %2,%1"
bts                34 arch/x86/include/asm/sync_bitops.h 	asm volatile("lock; " __ASM_SIZE(bts) " %1,%0"
bts                85 arch/x86/include/asm/sync_bitops.h 	return GEN_BINARY_RMWcc("lock; " __ASM_SIZE(bts), *addr, c, "Ir", nr);
bts              1026 arch/x86/kvm/emulate.c FASTOP2W(bts);
bts                43 drivers/gpu/drm/lima/lima_vm.c 		vm->bts[pbe].cpu[bte] = 0;
bts                57 drivers/gpu/drm/lima/lima_vm.c 		if (!vm->bts[pbe].cpu) {
bts                62 drivers/gpu/drm/lima/lima_vm.c 			vm->bts[pbe].cpu = dma_alloc_wc(
bts                64 drivers/gpu/drm/lima/lima_vm.c 				&vm->bts[pbe].dma, GFP_KERNEL | __GFP_ZERO);
bts                65 drivers/gpu/drm/lima/lima_vm.c 			if (!vm->bts[pbe].cpu) {
bts                71 drivers/gpu/drm/lima/lima_vm.c 			pts = vm->bts[pbe].dma;
bts                79 drivers/gpu/drm/lima/lima_vm.c 		vm->bts[pbe].cpu[bte] = dma[i++] | LIMA_VM_FLAGS_CACHE;
bts               244 drivers/gpu/drm/lima/lima_vm.c 		if (vm->bts[i].cpu)
bts               246 drivers/gpu/drm/lima/lima_vm.c 				    vm->bts[i].cpu, vm->bts[i].dma);
bts               265 drivers/gpu/drm/lima/lima_vm.c 		if (!vm->bts[i].cpu)
bts               268 drivers/gpu/drm/lima/lima_vm.c 		pt = vm->bts[i].cpu;
bts                38 drivers/gpu/drm/lima/lima_vm.h 	struct lima_vm_page bts[LIMA_VM_NUM_BT];
bts               622 fs/btrfs/send.c 	struct btrfs_timespec bts;
bts               623 fs/btrfs/send.c 	read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts));
bts               624 fs/btrfs/send.c 	return tlv_put(sctx, attr, &bts, sizeof(bts));
bts                67 tools/perf/util/intel-bts.c 	struct intel_bts	*bts;
bts                86 tools/perf/util/intel-bts.c static void intel_bts_dump(struct intel_bts *bts __maybe_unused,
bts               124 tools/perf/util/intel-bts.c static void intel_bts_dump_event(struct intel_bts *bts, unsigned char *buf,
bts               128 tools/perf/util/intel-bts.c 	intel_bts_dump(bts, buf, len);
bts               131 tools/perf/util/intel-bts.c static int intel_bts_lost(struct intel_bts *bts, struct perf_sample *sample)
bts               140 tools/perf/util/intel-bts.c 	err = perf_session__deliver_synth_event(bts->session, &event, NULL);
bts               148 tools/perf/util/intel-bts.c static struct intel_bts_queue *intel_bts_alloc_queue(struct intel_bts *bts,
bts               157 tools/perf/util/intel-bts.c 	btsq->bts = bts;
bts               166 tools/perf/util/intel-bts.c static int intel_bts_setup_queue(struct intel_bts *bts,
bts               176 tools/perf/util/intel-bts.c 		btsq = intel_bts_alloc_queue(bts, queue_nr);
bts               186 tools/perf/util/intel-bts.c 	if (bts->sampling_mode)
bts               196 tools/perf/util/intel-bts.c 		ret = auxtrace_heap__add(&bts->heap, queue_nr,
bts               206 tools/perf/util/intel-bts.c static int intel_bts_setup_queues(struct intel_bts *bts)
bts               211 tools/perf/util/intel-bts.c 	for (i = 0; i < bts->queues.nr_queues; i++) {
bts               212 tools/perf/util/intel-bts.c 		ret = intel_bts_setup_queue(bts, &bts->queues.queue_array[i],
bts               220 tools/perf/util/intel-bts.c static inline int intel_bts_update_queues(struct intel_bts *bts)
bts               222 tools/perf/util/intel-bts.c 	if (bts->queues.new_data) {
bts               223 tools/perf/util/intel-bts.c 		bts->queues.new_data = false;
bts               224 tools/perf/util/intel-bts.c 		return intel_bts_setup_queues(bts);
bts               265 tools/perf/util/intel-bts.c static inline u8 intel_bts_cpumode(struct intel_bts *bts, uint64_t ip)
bts               267 tools/perf/util/intel-bts.c 	return machine__kernel_ip(bts->machine, ip) ?
bts               276 tools/perf/util/intel-bts.c 	struct intel_bts *bts = btsq->bts;
bts               280 tools/perf/util/intel-bts.c 	if (bts->synth_opts.initial_skip &&
bts               281 tools/perf/util/intel-bts.c 	    bts->num_events++ <= bts->synth_opts.initial_skip)
bts               285 tools/perf/util/intel-bts.c 	sample.cpumode = intel_bts_cpumode(bts, sample.ip);
bts               289 tools/perf/util/intel-bts.c 	sample.id = btsq->bts->branches_id;
bts               290 tools/perf/util/intel-bts.c 	sample.stream_id = btsq->bts->branches_id;
bts               301 tools/perf/util/intel-bts.c 	if (bts->synth_opts.inject) {
bts               302 tools/perf/util/intel-bts.c 		event.sample.header.size = bts->branches_event_size;
bts               304 tools/perf/util/intel-bts.c 						    bts->branches_sample_type,
bts               310 tools/perf/util/intel-bts.c 	ret = perf_session__deliver_synth_event(bts->session, &event, &sample);
bts               320 tools/perf/util/intel-bts.c 	struct machine *machine = btsq->bts->machine;
bts               344 tools/perf/util/intel-bts.c static int intel_bts_synth_error(struct intel_bts *bts, int cpu, pid_t pid,
bts               354 tools/perf/util/intel-bts.c 	err = perf_session__deliver_synth_event(bts->session, &event, NULL);
bts               383 tools/perf/util/intel-bts.c 			if (!btsq->bts->synth_opts.errors)
bts               385 tools/perf/util/intel-bts.c 			err = intel_bts_synth_error(btsq->bts, btsq->cpu,
bts               392 tools/perf/util/intel-bts.c 		if (!machine__kernel_ip(btsq->bts->machine, branch->from) &&
bts               393 tools/perf/util/intel-bts.c 		    machine__kernel_ip(btsq->bts->machine, branch->to) &&
bts               412 tools/perf/util/intel-bts.c 	u32 filter = btsq->bts->branches_filter;
bts               423 tools/perf/util/intel-bts.c 	if (!btsq->bts->sample_branches)
bts               430 tools/perf/util/intel-bts.c 		if (btsq->bts->synth_opts.thread_stack)
bts               456 tools/perf/util/intel-bts.c 		thread = machine__find_thread(btsq->bts->machine, -1,
bts               461 tools/perf/util/intel-bts.c 		thread = machine__findnew_thread(btsq->bts->machine, btsq->pid,
bts               465 tools/perf/util/intel-bts.c 	queue = &btsq->bts->queues.queue_array[btsq->queue_nr];
bts               471 tools/perf/util/intel-bts.c 		if (!btsq->bts->sampling_mode)
bts               484 tools/perf/util/intel-bts.c 		int fd = perf_data__fd(btsq->bts->session->data);
bts               493 tools/perf/util/intel-bts.c 	if (btsq->bts->snapshot_mode && !buffer->consecutive &&
bts               499 tools/perf/util/intel-bts.c 	if (!btsq->bts->synth_opts.callchain &&
bts               500 tools/perf/util/intel-bts.c 	    !btsq->bts->synth_opts.thread_stack && thread &&
bts               501 tools/perf/util/intel-bts.c 	    (!old_buffer || btsq->bts->sampling_mode ||
bts               502 tools/perf/util/intel-bts.c 	     (btsq->bts->snapshot_mode && !buffer->consecutive)))
bts               514 tools/perf/util/intel-bts.c 		if (!btsq->bts->sampling_mode)
bts               537 tools/perf/util/intel-bts.c static int intel_bts_process_tid_exit(struct intel_bts *bts, pid_t tid)
bts               539 tools/perf/util/intel-bts.c 	struct auxtrace_queues *queues = &bts->queues;
bts               543 tools/perf/util/intel-bts.c 		struct auxtrace_queue *queue = &bts->queues.queue_array[i];
bts               552 tools/perf/util/intel-bts.c static int intel_bts_process_queues(struct intel_bts *bts, u64 timestamp)
bts               561 tools/perf/util/intel-bts.c 		if (!bts->heap.heap_cnt)
bts               564 tools/perf/util/intel-bts.c 		if (bts->heap.heap_array[0].ordinal > timestamp)
bts               567 tools/perf/util/intel-bts.c 		queue_nr = bts->heap.heap_array[0].queue_nr;
bts               568 tools/perf/util/intel-bts.c 		queue = &bts->queues.queue_array[queue_nr];
bts               571 tools/perf/util/intel-bts.c 		auxtrace_heap__pop(&bts->heap);
bts               575 tools/perf/util/intel-bts.c 			auxtrace_heap__add(&bts->heap, queue_nr, ts);
bts               580 tools/perf/util/intel-bts.c 			ret = auxtrace_heap__add(&bts->heap, queue_nr, ts);
bts               596 tools/perf/util/intel-bts.c 	struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
bts               610 tools/perf/util/intel-bts.c 		timestamp = perf_time_to_tsc(sample->time, &bts->tc);
bts               614 tools/perf/util/intel-bts.c 	err = intel_bts_update_queues(bts);
bts               618 tools/perf/util/intel-bts.c 	err = intel_bts_process_queues(bts, timestamp);
bts               622 tools/perf/util/intel-bts.c 		err = intel_bts_process_tid_exit(bts, event->fork.tid);
bts               629 tools/perf/util/intel-bts.c 	    bts->synth_opts.errors)
bts               630 tools/perf/util/intel-bts.c 		err = intel_bts_lost(bts, sample);
bts               639 tools/perf/util/intel-bts.c 	struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
bts               642 tools/perf/util/intel-bts.c 	if (bts->sampling_mode)
bts               645 tools/perf/util/intel-bts.c 	if (!bts->data_queued) {
bts               659 tools/perf/util/intel-bts.c 		err = auxtrace_queues__add_event(&bts->queues, session, event,
bts               667 tools/perf/util/intel-bts.c 				intel_bts_dump_event(bts, buffer->data,
bts               680 tools/perf/util/intel-bts.c 	struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
bts               684 tools/perf/util/intel-bts.c 	if (dump_trace || bts->sampling_mode)
bts               690 tools/perf/util/intel-bts.c 	ret = intel_bts_update_queues(bts);
bts               694 tools/perf/util/intel-bts.c 	return intel_bts_process_queues(bts, MAX_TIMESTAMP);
bts               708 tools/perf/util/intel-bts.c 	struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
bts               710 tools/perf/util/intel-bts.c 	struct auxtrace_queues *queues = &bts->queues;
bts               722 tools/perf/util/intel-bts.c 	struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
bts               725 tools/perf/util/intel-bts.c 	auxtrace_heap__free(&bts->heap);
bts               728 tools/perf/util/intel-bts.c 	free(bts);
bts               760 tools/perf/util/intel-bts.c static int intel_bts_synth_events(struct intel_bts *bts,
bts               771 tools/perf/util/intel-bts.c 		if (evsel->core.attr.type == bts->pmu_type && evsel->core.ids) {
bts               802 tools/perf/util/intel-bts.c 	if (bts->synth_opts.branches) {
bts               814 tools/perf/util/intel-bts.c 		bts->sample_branches = true;
bts               815 tools/perf/util/intel-bts.c 		bts->branches_sample_type = attr.sample_type;
bts               816 tools/perf/util/intel-bts.c 		bts->branches_id = id;
bts               821 tools/perf/util/intel-bts.c 		bts->branches_event_size = sizeof(struct perf_record_sample) +
bts               853 tools/perf/util/intel-bts.c 	struct intel_bts *bts;
bts               860 tools/perf/util/intel-bts.c 	bts = zalloc(sizeof(struct intel_bts));
bts               861 tools/perf/util/intel-bts.c 	if (!bts)
bts               864 tools/perf/util/intel-bts.c 	err = auxtrace_queues__init(&bts->queues);
bts               868 tools/perf/util/intel-bts.c 	bts->session = session;
bts               869 tools/perf/util/intel-bts.c 	bts->machine = &session->machines.host; /* No kvm support */
bts               870 tools/perf/util/intel-bts.c 	bts->auxtrace_type = auxtrace_info->type;
bts               871 tools/perf/util/intel-bts.c 	bts->pmu_type = auxtrace_info->priv[INTEL_BTS_PMU_TYPE];
bts               872 tools/perf/util/intel-bts.c 	bts->tc.time_shift = auxtrace_info->priv[INTEL_BTS_TIME_SHIFT];
bts               873 tools/perf/util/intel-bts.c 	bts->tc.time_mult = auxtrace_info->priv[INTEL_BTS_TIME_MULT];
bts               874 tools/perf/util/intel-bts.c 	bts->tc.time_zero = auxtrace_info->priv[INTEL_BTS_TIME_ZERO];
bts               875 tools/perf/util/intel-bts.c 	bts->cap_user_time_zero =
bts               877 tools/perf/util/intel-bts.c 	bts->snapshot_mode = auxtrace_info->priv[INTEL_BTS_SNAPSHOT_MODE];
bts               879 tools/perf/util/intel-bts.c 	bts->sampling_mode = false;
bts               881 tools/perf/util/intel-bts.c 	bts->auxtrace.process_event = intel_bts_process_event;
bts               882 tools/perf/util/intel-bts.c 	bts->auxtrace.process_auxtrace_event = intel_bts_process_auxtrace_event;
bts               883 tools/perf/util/intel-bts.c 	bts->auxtrace.flush_events = intel_bts_flush;
bts               884 tools/perf/util/intel-bts.c 	bts->auxtrace.free_events = intel_bts_free_events;
bts               885 tools/perf/util/intel-bts.c 	bts->auxtrace.free = intel_bts_free;
bts               886 tools/perf/util/intel-bts.c 	session->auxtrace = &bts->auxtrace;
bts               895 tools/perf/util/intel-bts.c 		bts->synth_opts = *session->itrace_synth_opts;
bts               897 tools/perf/util/intel-bts.c 		itrace_synth_opts__set_default(&bts->synth_opts,
bts               899 tools/perf/util/intel-bts.c 		bts->synth_opts.thread_stack =
bts               903 tools/perf/util/intel-bts.c 	if (bts->synth_opts.calls)
bts               904 tools/perf/util/intel-bts.c 		bts->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
bts               906 tools/perf/util/intel-bts.c 	if (bts->synth_opts.returns)
bts               907 tools/perf/util/intel-bts.c 		bts->branches_filter |= PERF_IP_FLAG_RETURN |
bts               910 tools/perf/util/intel-bts.c 	err = intel_bts_synth_events(bts, session);
bts               914 tools/perf/util/intel-bts.c 	err = auxtrace_queues__process_index(&bts->queues, session);
bts               918 tools/perf/util/intel-bts.c 	if (bts->queues.populated)
bts               919 tools/perf/util/intel-bts.c 		bts->data_queued = true;
bts               924 tools/perf/util/intel-bts.c 	auxtrace_queues__free(&bts->queues);
bts               927 tools/perf/util/intel-bts.c 	free(bts);