/linux-4.4.14/drivers/isdn/mISDN/ |
H A D | dsp_audio.c | 105 static unsigned char linear2ulaw(short sample) linear2ulaw() argument 127 /* Get the sample into sign-magnitude. */ linear2ulaw() 128 sign = (sample >> 8) & 0x80; /* set aside the sign */ linear2ulaw() 130 sample = -sample; /* get magnitude */ linear2ulaw() 133 sample = sample + BIAS; linear2ulaw() 134 exponent = exp_lut[(sample >> 7) & 0xFF]; linear2ulaw() 135 mantissa = (sample >> (exponent + 3)) & 0x0F; linear2ulaw() 180 * the seven bit sample is the number of every second alaw-sample ordered by 187 * generate table for conversion law from/to 7-bit alaw-like sample * 210 /* spl is the source: the law-sample (converted to alaw) */ dsp_audio_generate_seven() 214 /* find the 7-bit-sample */ dsp_audio_generate_seven() 242 s32 sample; dsp_audio_generate_mix_table() local 248 sample = dsp_audio_law_to_s32[i]; dsp_audio_generate_mix_table() 249 sample += dsp_audio_law_to_s32[j]; dsp_audio_generate_mix_table() 250 if (sample > 32767) dsp_audio_generate_mix_table() 251 sample = 32767; dsp_audio_generate_mix_table() 252 if (sample < -32768) dsp_audio_generate_mix_table() 253 sample = -32768; dsp_audio_generate_mix_table() 255 dsp_audio_s16_to_law[sample & 0xffff]; dsp_audio_generate_mix_table() 306 register s32 sample; dsp_audio_generate_volume_changes() local 329 sample = dsp_audio_law_to_s32[i] * num[0] / denum[0]; dsp_audio_generate_volume_changes() 330 if (sample < -32768) dsp_audio_generate_volume_changes() 331 sample = -32768; dsp_audio_generate_volume_changes() 332 else if (sample > 32767) dsp_audio_generate_volume_changes() 333 sample = 32767; dsp_audio_generate_volume_changes() 334 dsp_audio_increase1[i] = dsp_audio_s16_to_law[sample & 0xffff]; dsp_audio_generate_volume_changes() 335 sample = dsp_audio_law_to_s32[i] * num[1] / denum[1]; dsp_audio_generate_volume_changes() 336 if (sample < -32768) dsp_audio_generate_volume_changes() 337 sample = -32768; dsp_audio_generate_volume_changes() 338 else if (sample > 32767) dsp_audio_generate_volume_changes() 339 sample = 32767; dsp_audio_generate_volume_changes() 340 dsp_audio_increase2[i] = dsp_audio_s16_to_law[sample & 0xffff]; dsp_audio_generate_volume_changes() 341 sample = dsp_audio_law_to_s32[i] * num[2] / denum[2]; dsp_audio_generate_volume_changes() 342 if (sample < -32768) dsp_audio_generate_volume_changes() 343 sample = -32768; dsp_audio_generate_volume_changes() 344 else if (sample > 32767) dsp_audio_generate_volume_changes() 345 sample = 32767; dsp_audio_generate_volume_changes() 346 dsp_audio_increase3[i] = dsp_audio_s16_to_law[sample & 0xffff]; dsp_audio_generate_volume_changes() 347 sample = dsp_audio_law_to_s32[i] * num[3] / denum[3]; dsp_audio_generate_volume_changes() 348 if (sample < -32768) dsp_audio_generate_volume_changes() 349 sample = -32768; dsp_audio_generate_volume_changes() 350 else if (sample > 32767) dsp_audio_generate_volume_changes() 351 sample = 32767; dsp_audio_generate_volume_changes() 352 dsp_audio_increase4[i] = dsp_audio_s16_to_law[sample & 0xffff]; dsp_audio_generate_volume_changes() 353 sample = dsp_audio_law_to_s32[i] * num[4] / denum[4]; dsp_audio_generate_volume_changes() 354 if (sample < -32768) dsp_audio_generate_volume_changes() 355 sample = -32768; dsp_audio_generate_volume_changes() 356 else if (sample > 32767) dsp_audio_generate_volume_changes() 357 sample = 32767; dsp_audio_generate_volume_changes() 358 dsp_audio_increase5[i] = dsp_audio_s16_to_law[sample & 0xffff]; dsp_audio_generate_volume_changes() 359 sample = dsp_audio_law_to_s32[i] * num[5] / denum[5]; dsp_audio_generate_volume_changes() 360 if (sample < -32768) dsp_audio_generate_volume_changes() 361 sample = -32768; dsp_audio_generate_volume_changes() 362 else if (sample > 32767) dsp_audio_generate_volume_changes() 363 sample = 32767; dsp_audio_generate_volume_changes() 364 dsp_audio_increase6[i] = dsp_audio_s16_to_law[sample & 0xffff]; dsp_audio_generate_volume_changes() 365 sample = dsp_audio_law_to_s32[i] * num[6] / denum[6]; dsp_audio_generate_volume_changes() 366 if (sample < -32768) dsp_audio_generate_volume_changes() 367 sample = -32768; dsp_audio_generate_volume_changes() 368 else if (sample > 32767) dsp_audio_generate_volume_changes() 369 sample = 32767; dsp_audio_generate_volume_changes() 370 dsp_audio_increase7[i] = dsp_audio_s16_to_law[sample & 0xffff]; dsp_audio_generate_volume_changes() 371 sample = dsp_audio_law_to_s32[i] * num[7] / denum[7]; dsp_audio_generate_volume_changes() 372 if (sample < -32768) dsp_audio_generate_volume_changes() 373 sample = -32768; dsp_audio_generate_volume_changes() 374 else if (sample > 32767) dsp_audio_generate_volume_changes() 375 sample = 32767; dsp_audio_generate_volume_changes() 376 dsp_audio_increase8[i] = dsp_audio_s16_to_law[sample & 0xffff]; dsp_audio_generate_volume_changes()
|
H A D | dsp_biquad.h | 53 static inline int16_t biquad2(struct biquad2_state *bq, int16_t sample) biquad2() argument 58 z0 = sample * bq->gain + bq->z1 * bq->a1 + bq->z2 * bq->a2; biquad2()
|
H A D | dsp_cmx.c | 1313 register s32 sample; local 1492 sample = dsp_audio_law_to_s32[p[t]] + 1495 if (sample < -32768) 1496 sample = -32768; 1497 else if (sample > 32767) 1498 sample = 32767; 1499 *d++ = dsp_audio_s16_to_law[sample & 0xffff]; 1522 sample = dsp_audio_law_to_s32[p[t]] + *c++ - 1524 if (sample < -32768) 1525 sample = -32768; 1526 else if (sample > 32767) 1527 sample = 32767; 1528 *d++ = dsp_audio_s16_to_law[sample & 0xffff]; 1534 sample = *c++ - dsp_audio_law_to_s32[q[r]]; 1535 if (sample < -32768) 1536 sample = -32768; 1537 else if (sample > 32767) 1538 sample = 32767; 1539 *d++ = dsp_audio_s16_to_law[sample & 0xffff]; 1550 sample = dsp_audio_law_to_s32[p[t]] + *c++; 1551 if (sample < -32768) 1552 sample = -32768; 1553 else if (sample > 32767) 1554 sample = 32767; 1555 *d++ = dsp_audio_s16_to_law[sample & 0xffff]; 1561 sample = *c++; 1562 if (sample < -32768) 1563 sample = -32768; 1564 else if (sample > 32767) 1565 sample = 32767; 1566 *d++ = dsp_audio_s16_to_law[sample & 0xffff]; 1625 static u16 dsp_count; /* last sample count */ 1626 static int dsp_count_valid; /* if we have last sample count */
|
H A D | l1oip_codec.c | 6 * -> compression by reducing the number of sample resolution to 4 34 Each sample is converted to a-LAW with only 16 steps of level resolution. 259 * The result size must be the number of sample in packet. (2 * input data) 324 int i1, i2, c, sample; l1oip_4bit_alloc() local 357 sample = _4bit_to_ulaw[i1]; l1oip_4bit_alloc() 359 sample = _4bit_to_alaw[i1]; l1oip_4bit_alloc() 362 table_dec[(i1 << 4) | i2] |= (sample << 8); l1oip_4bit_alloc() 363 table_dec[(i2 << 4) | i1] |= sample; l1oip_4bit_alloc()
|
H A D | dsp_dtmf.c | 98 * calculate the coefficients of the given sample and decode * 101 /* the given sample is decoded. if the sample is not long enough for a 113 * spl and len - the sample
|
H A D | dsp_tones.c | 42 /* the last sample+1 is in phase with the first sample. the error is low */ 368 * count - the sample from the beginning of the pattern (phase) 371 * return - the sk_buff with the sample 391 count = tone->count; /* gives current sample */ dsp_tone_copy() 393 /* copy sample */ dsp_tone_copy() 395 /* find sample to start with */ dsp_tone_copy() 438 dsp_tone_hw_message(struct dsp *dsp, u8 *sample, int len) dsp_tone_hw_message() argument 444 (len) ? HFC_SPL_LOOP_ON : HFC_SPL_LOOP_OFF, len, sample, dsp_tone_hw_message()
|
/linux-4.4.14/tools/perf/arch/arm/tests/ |
H A D | dwarf-unwind.c | 11 static int sample_ustack(struct perf_sample *sample, sample_ustack() argument 14 struct stack_dump *stack = &sample->user_stack; sample_ustack() 21 pr_debug("failed to allocate sample uregs data\n"); sample_ustack() 43 int test__arch_unwind_sample(struct perf_sample *sample, test__arch_unwind_sample() argument 46 struct regs_dump *regs = &sample->user_regs; test__arch_unwind_sample() 51 pr_debug("failed to allocate sample uregs data\n"); test__arch_unwind_sample() 60 return sample_ustack(sample, thread, buf); test__arch_unwind_sample()
|
/linux-4.4.14/tools/perf/arch/arm64/tests/ |
H A D | dwarf-unwind.c | 11 static int sample_ustack(struct perf_sample *sample, sample_ustack() argument 14 struct stack_dump *stack = &sample->user_stack; sample_ustack() 21 pr_debug("failed to allocate sample uregs data\n"); sample_ustack() 43 int test__arch_unwind_sample(struct perf_sample *sample, test__arch_unwind_sample() argument 46 struct regs_dump *regs = &sample->user_regs; test__arch_unwind_sample() 51 pr_debug("failed to allocate sample uregs data\n"); test__arch_unwind_sample() 60 return sample_ustack(sample, thread, buf); test__arch_unwind_sample()
|
/linux-4.4.14/tools/perf/arch/x86/tests/ |
H A D | dwarf-unwind.c | 12 static int sample_ustack(struct perf_sample *sample, sample_ustack() argument 15 struct stack_dump *stack = &sample->user_stack; sample_ustack() 22 pr_debug("failed to allocate sample uregs data\n"); sample_ustack() 44 int test__arch_unwind_sample(struct perf_sample *sample, test__arch_unwind_sample() argument 47 struct regs_dump *regs = &sample->user_regs; test__arch_unwind_sample() 52 pr_debug("failed to allocate sample uregs data\n"); test__arch_unwind_sample() 61 return sample_ustack(sample, thread, buf); test__arch_unwind_sample()
|
H A D | perf-time-to-tsc.c | 111 struct perf_sample sample; test__perf_time_to_tsc() local 120 &sample)); test__perf_time_to_tsc() 121 comm1_time = sample.time; test__perf_time_to_tsc() 125 &sample)); test__perf_time_to_tsc() 126 comm2_time = sample.time; test__perf_time_to_tsc()
|
/linux-4.4.14/tools/perf/ |
H A D | builtin-timechart.c | 271 struct cpu_sample *sample; pid_put_sample() local 283 sample = zalloc(sizeof(*sample)); pid_put_sample() 284 assert(sample != NULL); pid_put_sample() 285 sample->start_time = start; pid_put_sample() 286 sample->end_time = end; pid_put_sample() 287 sample->type = type; pid_put_sample() 288 sample->next = c->samples; pid_put_sample() 289 sample->cpu = cpu; pid_put_sample() 290 sample->backtrace = backtrace; pid_put_sample() 291 c->samples = sample; pid_put_sample() 293 if (sample->type == TYPE_RUNNING && end > start && start > 0) { pid_put_sample() 313 struct perf_sample *sample __maybe_unused, process_comm_event() 323 struct perf_sample *sample __maybe_unused, process_fork_event() 333 struct perf_sample *sample __maybe_unused, process_exit_event() 472 struct perf_sample *sample, cat_backtrace() 481 struct ip_callchain *chain = sample->callchain; cat_backtrace() 492 if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) { cat_backtrace() 551 struct perf_sample *sample, 556 struct perf_sample *sample, process_sample_event() 563 if (!tchart->first_time || tchart->first_time > sample->time) process_sample_event() 564 tchart->first_time = sample->time; process_sample_event() 565 if (tchart->last_time < sample->time) process_sample_event() 566 tchart->last_time = sample->time; process_sample_event() 571 return f(tchart, evsel, sample, process_sample_event() 572 cat_backtrace(event, sample, machine)); process_sample_event() 581 struct perf_sample *sample, process_sample_cpu_idle() 584 u32 state = perf_evsel__intval(evsel, sample, "state"); process_sample_cpu_idle() 585 u32 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id"); process_sample_cpu_idle() 588 c_state_end(tchart, cpu_id, sample->time); process_sample_cpu_idle() 590 c_state_start(cpu_id, sample->time, state); process_sample_cpu_idle() 597 struct perf_sample *sample, process_sample_cpu_frequency() 600 u32 state = perf_evsel__intval(evsel, sample, "state"); process_sample_cpu_frequency() 601 u32 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id"); process_sample_cpu_frequency() 603 p_state_change(tchart, cpu_id, sample->time, state); process_sample_cpu_frequency() 610 struct perf_sample *sample, process_sample_sched_wakeup() 613 u8 flags = perf_evsel__intval(evsel, sample, "common_flags"); process_sample_sched_wakeup() 614 int waker = perf_evsel__intval(evsel, sample, "common_pid"); process_sample_sched_wakeup() 615 int wakee = perf_evsel__intval(evsel, sample, "pid"); process_sample_sched_wakeup() 617 sched_wakeup(tchart, sample->cpu, sample->time, waker, wakee, flags, backtrace); process_sample_sched_wakeup() 624 struct perf_sample *sample, process_sample_sched_switch() 627 int prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"); process_sample_sched_switch() 628 int next_pid = perf_evsel__intval(evsel, sample, "next_pid"); process_sample_sched_switch() 629 u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state"); process_sample_sched_switch() 631 sched_switch(tchart, sample->cpu, sample->time, prev_pid, next_pid, process_sample_sched_switch() 640 struct perf_sample *sample, process_sample_power_start() 643 u64 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id"); process_sample_power_start() 644 u64 value = perf_evsel__intval(evsel, sample, "value"); process_sample_power_start() 646 c_state_start(cpu_id, sample->time, value); process_sample_power_start() 653 struct perf_sample *sample, process_sample_power_end() 656 c_state_end(tchart, sample->cpu, sample->time); process_sample_power_end() 663 struct perf_sample *sample, process_sample_power_frequency() 666 u64 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id"); process_sample_power_frequency() 667 u64 value = perf_evsel__intval(evsel, sample, "value"); process_sample_power_frequency() 669 p_state_change(tchart, cpu_id, sample->time, value); process_sample_power_frequency() 675 * After the last sample we need to wrap up the current C/P state 725 struct io_sample *sample; pid_begin_io_sample() local 750 sample = zalloc(sizeof(*sample)); pid_begin_io_sample() 751 if (!sample) pid_begin_io_sample() 753 sample->start_time = start; pid_begin_io_sample() 754 sample->type = type; pid_begin_io_sample() 755 sample->fd = fd; pid_begin_io_sample() 756 sample->next = c->io_samples; pid_begin_io_sample() 757 c->io_samples = sample; pid_begin_io_sample() 770 struct io_sample *sample, *prev; pid_end_io_sample() local 777 sample = c->io_samples; pid_end_io_sample() 779 if (!sample) /* skip partially captured events */ pid_end_io_sample() 782 if (sample->end_time) { pid_end_io_sample() 788 if (sample->type != type) { pid_end_io_sample() 793 sample->end_time = end; pid_end_io_sample() 794 prev = sample->next; pid_end_io_sample() 798 if (sample->end_time - sample->start_time < tchart->min_time) pid_end_io_sample() 799 sample->end_time = sample->start_time + tchart->min_time; pid_end_io_sample() 800 if (prev && sample->start_time < prev->end_time) { pid_end_io_sample() 802 sample->start_time = prev->end_time; pid_end_io_sample() 804 prev->end_time = sample->start_time; pid_end_io_sample() 808 sample->err = ret; pid_end_io_sample() 817 sample->bytes = ret; pid_end_io_sample() 822 prev->type == sample->type && pid_end_io_sample() 823 prev->err == sample->err && pid_end_io_sample() 824 prev->fd == sample->fd && pid_end_io_sample() 825 prev->end_time + tchart->merge_dist >= sample->start_time) { pid_end_io_sample() 827 sample->bytes += prev->bytes; pid_end_io_sample() 828 sample->merges += prev->merges + 1; pid_end_io_sample() 830 sample->start_time = prev->start_time; pid_end_io_sample() 831 sample->next = prev->next; pid_end_io_sample() 834 if (!sample->err && sample->bytes > c->max_bytes) pid_end_io_sample() 835 c->max_bytes = sample->bytes; pid_end_io_sample() 846 struct perf_sample *sample) process_enter_read() 848 long fd = perf_evsel__intval(evsel, sample, "fd"); process_enter_read() 849 return pid_begin_io_sample(tchart, sample->tid, IOTYPE_READ, process_enter_read() 850 sample->time, fd); process_enter_read() 856 struct perf_sample *sample) process_exit_read() 858 long ret = perf_evsel__intval(evsel, sample, "ret"); process_exit_read() 859 return pid_end_io_sample(tchart, sample->tid, IOTYPE_READ, process_exit_read() 860 sample->time, ret); process_exit_read() 866 struct perf_sample *sample) process_enter_write() 868 long fd = perf_evsel__intval(evsel, sample, "fd"); process_enter_write() 869 return pid_begin_io_sample(tchart, sample->tid, IOTYPE_WRITE, process_enter_write() 870 sample->time, fd); process_enter_write() 876 struct perf_sample *sample) process_exit_write() 878 long ret = perf_evsel__intval(evsel, sample, "ret"); process_exit_write() 879 return pid_end_io_sample(tchart, sample->tid, IOTYPE_WRITE, process_exit_write() 880 sample->time, ret); process_exit_write() 886 struct perf_sample *sample) process_enter_sync() 888 long fd = perf_evsel__intval(evsel, sample, "fd"); process_enter_sync() 889 return pid_begin_io_sample(tchart, sample->tid, IOTYPE_SYNC, process_enter_sync() 890 sample->time, fd); process_enter_sync() 896 struct perf_sample *sample) process_exit_sync() 898 long ret = perf_evsel__intval(evsel, sample, "ret"); process_exit_sync() 899 return pid_end_io_sample(tchart, sample->tid, IOTYPE_SYNC, process_exit_sync() 900 sample->time, ret); process_exit_sync() 906 struct perf_sample *sample) process_enter_tx() 908 long fd = perf_evsel__intval(evsel, sample, "fd"); process_enter_tx() 909 return pid_begin_io_sample(tchart, sample->tid, IOTYPE_TX, process_enter_tx() 910 sample->time, fd); process_enter_tx() 916 struct perf_sample *sample) process_exit_tx() 918 long ret = perf_evsel__intval(evsel, sample, "ret"); process_exit_tx() 919 return pid_end_io_sample(tchart, sample->tid, IOTYPE_TX, process_exit_tx() 920 sample->time, ret); process_exit_tx() 926 struct perf_sample *sample) process_enter_rx() 928 long fd = perf_evsel__intval(evsel, sample, "fd"); process_enter_rx() 929 return pid_begin_io_sample(tchart, sample->tid, IOTYPE_RX, process_enter_rx() 930 sample->time, fd); process_enter_rx() 936 struct perf_sample *sample) process_exit_rx() 938 long ret = perf_evsel__intval(evsel, sample, "ret"); process_exit_rx() 939 return pid_end_io_sample(tchart, sample->tid, IOTYPE_RX, process_exit_rx() 940 sample->time, ret); process_exit_rx() 946 struct perf_sample *sample) process_enter_poll() 948 long fd = perf_evsel__intval(evsel, sample, "fd"); process_enter_poll() 949 return pid_begin_io_sample(tchart, sample->tid, IOTYPE_POLL, process_enter_poll() 950 sample->time, fd); process_enter_poll() 956 struct perf_sample *sample) process_exit_poll() 958 long ret = perf_evsel__intval(evsel, sample, "ret"); process_exit_poll() 959 return pid_end_io_sample(tchart, sample->tid, IOTYPE_POLL, process_exit_poll() 960 sample->time, ret); process_exit_poll() 1109 struct cpu_sample *sample; draw_cpu_usage() local 1114 sample = c->samples; draw_cpu_usage() 1115 while (sample) { draw_cpu_usage() 1116 if (sample->type == TYPE_RUNNING) { draw_cpu_usage() 1117 svg_process(sample->cpu, draw_cpu_usage() 1118 sample->start_time, draw_cpu_usage() 1119 sample->end_time, draw_cpu_usage() 1122 sample->backtrace); draw_cpu_usage() 1125 sample = sample->next; draw_cpu_usage() 1140 struct io_sample *sample; draw_io_bars() local 1154 sample = c->io_samples; draw_io_bars() 1155 for (sample = c->io_samples; sample; sample = sample->next) { draw_io_bars() 1156 double h = (double)sample->bytes / c->max_bytes; draw_io_bars() 1159 sample->err == -EAGAIN) draw_io_bars() 1162 if (sample->err) draw_io_bars() 1165 if (sample->type == IOTYPE_SYNC) draw_io_bars() 1167 sample->start_time, draw_io_bars() 1168 sample->end_time, draw_io_bars() 1170 sample->err ? "error" : "sync", draw_io_bars() 1171 sample->fd, draw_io_bars() 1172 sample->err, draw_io_bars() 1173 sample->merges); draw_io_bars() 1174 else if (sample->type == IOTYPE_POLL) draw_io_bars() 1176 sample->start_time, draw_io_bars() 1177 sample->end_time, draw_io_bars() 1179 sample->err ? "error" : "poll", draw_io_bars() 1180 sample->fd, draw_io_bars() 1181 sample->err, draw_io_bars() 1182 sample->merges); draw_io_bars() 1183 else if (sample->type == IOTYPE_READ) draw_io_bars() 1185 sample->start_time, draw_io_bars() 1186 sample->end_time, draw_io_bars() 1188 sample->err ? "error" : "disk", draw_io_bars() 1189 sample->fd, draw_io_bars() 1190 sample->err, draw_io_bars() 1191 sample->merges); draw_io_bars() 1192 else if (sample->type == IOTYPE_WRITE) draw_io_bars() 1194 sample->start_time, draw_io_bars() 1195 sample->end_time, draw_io_bars() 1197 sample->err ? "error" : "disk", draw_io_bars() 1198 sample->fd, draw_io_bars() 1199 sample->err, draw_io_bars() 1200 sample->merges); draw_io_bars() 1201 else if (sample->type == IOTYPE_RX) draw_io_bars() 1203 sample->start_time, draw_io_bars() 1204 sample->end_time, draw_io_bars() 1206 sample->err ? "error" : "net", draw_io_bars() 1207 sample->fd, draw_io_bars() 1208 sample->err, draw_io_bars() 1209 sample->merges); draw_io_bars() 1210 else if (sample->type == IOTYPE_TX) draw_io_bars() 1212 sample->start_time, draw_io_bars() 1213 sample->end_time, draw_io_bars() 1215 sample->err ? "error" : "net", draw_io_bars() 1216 sample->fd, draw_io_bars() 1217 sample->err, draw_io_bars() 1218 sample->merges); draw_io_bars() 1252 struct cpu_sample *sample; draw_process_bars() local 1268 sample = c->samples; draw_process_bars() 1269 while (sample) { draw_process_bars() 1270 if (sample->type == TYPE_RUNNING) draw_process_bars() 1271 svg_running(Y, sample->cpu, draw_process_bars() 1272 sample->start_time, draw_process_bars() 1273 sample->end_time, draw_process_bars() 1274 sample->backtrace); draw_process_bars() 1275 if (sample->type == TYPE_BLOCKED) draw_process_bars() 1276 svg_blocked(Y, sample->cpu, draw_process_bars() 1277 sample->start_time, draw_process_bars() 1278 sample->end_time, draw_process_bars() 1279 sample->backtrace); draw_process_bars() 1280 if (sample->type == TYPE_WAITING) draw_process_bars() 1281 svg_waiting(Y, sample->cpu, draw_process_bars() 1282 sample->start_time, draw_process_bars() 1283 sample->end_time, draw_process_bars() 1284 sample->backtrace); draw_process_bars() 1285 sample = sample->next; draw_process_bars() 1927 .sample = process_sample_event, cmd_timechart() 471 cat_backtrace(union perf_event *event, struct perf_sample *sample, struct machine *machine) cat_backtrace() argument 554 process_sample_event(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine) process_sample_event() argument 579 process_sample_cpu_idle(struct timechart *tchart __maybe_unused, struct perf_evsel *evsel, struct perf_sample *sample, const char *backtrace __maybe_unused) process_sample_cpu_idle() argument 595 process_sample_cpu_frequency(struct timechart *tchart, struct perf_evsel *evsel, struct perf_sample *sample, const char *backtrace __maybe_unused) process_sample_cpu_frequency() argument 608 process_sample_sched_wakeup(struct timechart *tchart, struct perf_evsel *evsel, struct perf_sample *sample, const char *backtrace) process_sample_sched_wakeup() argument 622 process_sample_sched_switch(struct timechart *tchart, struct perf_evsel *evsel, struct perf_sample *sample, const char *backtrace) process_sample_sched_switch() argument 638 process_sample_power_start(struct timechart *tchart __maybe_unused, struct perf_evsel *evsel, struct perf_sample *sample, const char *backtrace __maybe_unused) process_sample_power_start() argument 651 process_sample_power_end(struct timechart *tchart, struct perf_evsel *evsel __maybe_unused, struct perf_sample *sample, const char *backtrace __maybe_unused) process_sample_power_end() argument 661 process_sample_power_frequency(struct timechart *tchart, struct perf_evsel *evsel, struct perf_sample *sample, const char *backtrace __maybe_unused) process_sample_power_frequency() argument 844 process_enter_read(struct timechart *tchart, struct perf_evsel *evsel, struct perf_sample *sample) process_enter_read() argument 854 process_exit_read(struct timechart *tchart, struct perf_evsel *evsel, struct perf_sample *sample) process_exit_read() argument 864 process_enter_write(struct timechart *tchart, struct perf_evsel *evsel, struct perf_sample *sample) process_enter_write() argument 874 process_exit_write(struct timechart *tchart, struct perf_evsel *evsel, struct perf_sample *sample) process_exit_write() argument 884 process_enter_sync(struct timechart *tchart, struct perf_evsel *evsel, struct perf_sample *sample) process_enter_sync() argument 894 process_exit_sync(struct timechart *tchart, struct perf_evsel *evsel, struct perf_sample *sample) process_exit_sync() argument 904 process_enter_tx(struct timechart *tchart, struct perf_evsel *evsel, struct perf_sample *sample) process_enter_tx() argument 914 process_exit_tx(struct timechart *tchart, struct perf_evsel *evsel, struct perf_sample *sample) process_exit_tx() argument 924 process_enter_rx(struct timechart *tchart, struct perf_evsel *evsel, struct perf_sample *sample) process_enter_rx() argument 934 process_exit_rx(struct timechart *tchart, struct perf_evsel *evsel, struct perf_sample *sample) process_exit_rx() argument 944 process_enter_poll(struct timechart *tchart, struct perf_evsel *evsel, struct perf_sample *sample) process_enter_poll() argument 954 process_exit_poll(struct timechart *tchart, struct perf_evsel *evsel, struct perf_sample *sample) process_exit_poll() argument
|
H A D | builtin-script.c | 226 pr_err("Display of symbols requested but neither sample IP nor " perf_evsel__check_attr() 227 "sample address\nis selected. Hence, no addresses to convert " perf_evsel__check_attr() 237 pr_err("Display of DSO requested but neither sample IP nor " perf_evsel__check_attr() 238 "sample address\nis selected. Hence, no addresses to convert " perf_evsel__check_attr() 243 pr_err("Display of source line number requested but sample IP is not\n" perf_evsel__check_attr() 371 struct perf_sample *sample, print_sample_iregs() 375 struct regs_dump *regs = &sample->intr_regs; print_sample_iregs() 388 static void print_sample_start(struct perf_sample *sample, print_sample_start() argument 407 printf("%5d/%-5d ", sample->pid, sample->tid); print_sample_start() 409 printf("%5d ", sample->pid); print_sample_start() 411 printf("%5d ", sample->tid); print_sample_start() 415 printf("%3d ", sample->cpu); print_sample_start() 417 printf("[%03d] ", sample->cpu); print_sample_start() 421 nsecs = sample->time; print_sample_start() 442 struct perf_sample *sample, print_sample_brstack() 446 struct branch_stack *br = sample->branch_stack; print_sample_brstack() 464 struct perf_sample *sample, print_sample_brstacksym() 468 struct branch_stack *br = sample->branch_stack; print_sample_brstacksym() 504 struct perf_sample *sample, print_sample_addr() 510 printf("%16" PRIx64, sample->addr); print_sample_addr() 515 perf_event__preprocess_sample_addr(event, sample, thread, &al); print_sample_addr() 533 struct perf_sample *sample, print_sample_bts() 545 if (symbol_conf.use_callchain && sample->callchain) { print_sample_bts() 554 perf_evsel__print_ip(evsel, sample, al, print_opts, print_sample_bts() 563 print_sample_addr(event, sample, thread, attr); print_sample_bts() 591 static void process_event(union perf_event *event, struct perf_sample *sample, process_event() argument 600 print_sample_start(sample, thread, evsel); process_event() 603 printf("%10" PRIu64 " ", sample->period); process_event() 611 print_sample_flags(sample->flags); process_event() 614 print_sample_bts(event, sample, evsel, thread, al); process_event() 619 event_format__print(evsel->tp_format, sample->cpu, process_event() 620 sample->raw_data, sample->raw_size); process_event() 622 print_sample_addr(event, sample, thread, attr); process_event() 630 perf_evsel__print_ip(evsel, sample, al, process_event() 636 print_sample_iregs(event, sample, thread, attr); process_event() 639 print_sample_brstack(event, sample, thread, attr); process_event() 641 print_sample_brstacksym(event, sample, thread, attr); process_event() 701 struct perf_sample *sample, process_sample_event() 708 if (sample->time < last_timestamp) { process_sample_event() 711 sample->time); process_sample_event() 714 last_timestamp = sample->time; process_sample_event() 718 if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) { process_sample_event() 727 if (cpu_list && !test_bit(sample->cpu, cpu_bitmap)) process_sample_event() 730 scripting_ops->process_event(event, sample, evsel, &al); process_sample_event() 777 struct perf_sample *sample, process_comm_event() 783 struct perf_evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id); process_comm_event() 792 if (perf_event__process_comm(tool, event, sample, machine) < 0) process_comm_event() 796 sample->cpu = 0; process_comm_event() 797 sample->time = 0; process_comm_event() 798 sample->tid = event->comm.tid; process_comm_event() 799 sample->pid = event->comm.pid; process_comm_event() 801 print_sample_start(sample, thread, evsel); process_comm_event() 811 struct perf_sample *sample, process_fork_event() 817 struct perf_evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id); process_fork_event() 819 if (perf_event__process_fork(tool, event, sample, machine) < 0) process_fork_event() 829 sample->cpu = 0; process_fork_event() 830 sample->time = event->fork.time; process_fork_event() 831 sample->tid = event->fork.tid; process_fork_event() 832 sample->pid = event->fork.pid; process_fork_event() 834 print_sample_start(sample, thread, evsel); process_fork_event() 842 struct perf_sample *sample, process_exit_event() 849 struct perf_evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id); process_exit_event() 858 sample->cpu = 0; process_exit_event() 859 sample->time = 0; process_exit_event() 860 sample->tid = event->fork.tid; process_exit_event() 861 sample->pid = event->fork.pid; process_exit_event() 863 print_sample_start(sample, thread, evsel); process_exit_event() 866 if (perf_event__process_exit(tool, event, sample, machine) < 0) process_exit_event() 875 struct perf_sample *sample, process_mmap_event() 881 struct perf_evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id); process_mmap_event() 883 if (perf_event__process_mmap(tool, event, sample, machine) < 0) process_mmap_event() 893 sample->cpu = 0; process_mmap_event() 894 sample->time = 0; process_mmap_event() 895 sample->tid = event->mmap.tid; process_mmap_event() 896 sample->pid = event->mmap.pid; process_mmap_event() 898 print_sample_start(sample, thread, evsel); process_mmap_event() 906 struct perf_sample *sample, process_mmap2_event() 912 struct perf_evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id); process_mmap2_event() 914 if (perf_event__process_mmap2(tool, event, sample, machine) < 0) process_mmap2_event() 924 sample->cpu = 0; process_mmap2_event() 925 sample->time = 0; process_mmap2_event() 926 sample->tid = event->mmap2.tid; process_mmap2_event() 927 sample->pid = event->mmap2.pid; process_mmap2_event() 929 print_sample_start(sample, thread, evsel); process_mmap2_event() 937 struct perf_sample *sample, process_switch_event() 943 struct perf_evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id); process_switch_event() 945 if (perf_event__process_switch(tool, event, sample, machine) < 0) process_switch_event() 948 thread = machine__findnew_thread(machine, sample->pid, process_switch_event() 949 sample->tid); process_switch_event() 955 print_sample_start(sample, thread, evsel); process_switch_event() 1713 .sample = process_sample_event, cmd_script() 370 print_sample_iregs(union perf_event *event __maybe_unused, struct perf_sample *sample, struct thread *thread __maybe_unused, struct perf_event_attr *attr) print_sample_iregs() argument 441 print_sample_brstack(union perf_event *event __maybe_unused, struct perf_sample *sample, struct thread *thread __maybe_unused, struct perf_event_attr *attr __maybe_unused) print_sample_brstack() argument 463 print_sample_brstacksym(union perf_event *event __maybe_unused, struct perf_sample *sample, struct thread *thread __maybe_unused, struct perf_event_attr *attr __maybe_unused) print_sample_brstacksym() argument 503 print_sample_addr(union perf_event *event, struct perf_sample *sample, struct thread *thread, struct perf_event_attr *attr) print_sample_addr() argument 532 print_sample_bts(union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, struct thread *thread, struct addr_location *al) print_sample_bts() argument 699 process_sample_event(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine) process_sample_event() argument 775 process_comm_event(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct machine *machine) process_comm_event() argument 809 process_fork_event(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct machine *machine) process_fork_event() argument 840 process_exit_event(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct machine *machine) process_exit_event() argument 873 process_mmap_event(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct machine *machine) process_mmap_event() argument 904 process_mmap2_event(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct machine *machine) process_mmap2_event() argument 935 process_switch_event(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct machine *machine) process_switch_event() argument
|
H A D | builtin-inject.c | 175 struct perf_sample *sample __maybe_unused, perf_event__repipe() 183 struct perf_sample *sample __maybe_unused, perf_event__drop() 191 struct perf_sample *sample, perf_event__drop_aux() 197 inject->aux_id = sample->id; perf_event__drop_aux() 204 struct perf_sample *sample, 210 struct perf_sample *sample, perf_event__repipe_sample() 216 return f(tool, event, sample, evsel, machine); perf_event__repipe_sample() 219 build_id__mark_dso_hit(tool, event, sample, evsel, machine); perf_event__repipe_sample() 226 struct perf_sample *sample, perf_event__repipe_mmap() 231 err = perf_event__process_mmap(tool, event, sample, machine); perf_event__repipe_mmap() 232 perf_event__repipe(tool, event, sample, machine); perf_event__repipe_mmap() 239 struct perf_sample *sample, perf_event__repipe_mmap2() 244 err = perf_event__process_mmap2(tool, event, sample, machine); perf_event__repipe_mmap2() 245 perf_event__repipe(tool, event, sample, machine); perf_event__repipe_mmap2() 252 struct perf_sample *sample, perf_event__repipe_fork() 257 err = perf_event__process_fork(tool, event, sample, machine); perf_event__repipe_fork() 258 perf_event__repipe(tool, event, sample, machine); perf_event__repipe_fork() 265 struct perf_sample *sample, perf_event__repipe_comm() 270 err = perf_event__process_comm(tool, event, sample, machine); perf_event__repipe_comm() 271 perf_event__repipe(tool, event, sample, machine); perf_event__repipe_comm() 278 struct perf_sample *sample, perf_event__repipe_exit() 283 err = perf_event__process_exit(tool, event, sample, machine); perf_event__repipe_exit() 284 perf_event__repipe(tool, event, sample, machine); perf_event__repipe_exit() 353 struct perf_sample *sample, perf_event__inject_buildid() 363 thread = machine__findnew_thread(machine, sample->pid, sample->tid); perf_event__inject_buildid() 370 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, sample->ip, &al); perf_event__inject_buildid() 393 perf_event__repipe(tool, event, sample, machine); perf_event__inject_buildid() 399 struct perf_sample *sample, perf_inject__sched_process_exit() 407 if (sample->tid == ent->tid) { perf_inject__sched_process_exit() 419 struct perf_sample *sample, perf_inject__sched_switch() 426 perf_inject__sched_process_exit(tool, event, sample, evsel, machine); perf_inject__sched_switch() 435 ent->tid = sample->tid; perf_inject__sched_switch() 443 struct perf_sample *sample, perf_inject__sched_stat() 451 u32 pid = perf_evsel__intval(evsel, sample, "pid"); perf_inject__sched_stat() 463 sample_sw.period = sample->period; perf_inject__sched_stat() 464 sample_sw.time = sample->time; perf_inject__sched_stat() 494 struct perf_sample *sample __maybe_unused, drop_sample() 524 * and it has a compatible sample type. 585 inject->tool.sample = perf_event__inject_buildid; __cmd_inject() 671 .sample = perf_event__repipe_sample, cmd_inject() 189 perf_event__drop_aux(struct perf_tool *tool, union perf_event *event __maybe_unused, struct perf_sample *sample, struct machine *machine __maybe_unused) perf_event__drop_aux() argument 208 perf_event__repipe_sample(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine) perf_event__repipe_sample() argument 224 perf_event__repipe_mmap(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct machine *machine) perf_event__repipe_mmap() argument 237 perf_event__repipe_mmap2(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct machine *machine) perf_event__repipe_mmap2() argument 250 perf_event__repipe_fork(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct machine *machine) perf_event__repipe_fork() argument 263 perf_event__repipe_comm(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct machine *machine) perf_event__repipe_comm() argument 276 perf_event__repipe_exit(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct machine *machine) perf_event__repipe_exit() argument 351 perf_event__inject_buildid(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel __maybe_unused, struct machine *machine) perf_event__inject_buildid() argument 397 perf_inject__sched_process_exit(struct perf_tool *tool, union perf_event *event __maybe_unused, struct perf_sample *sample, struct perf_evsel *evsel __maybe_unused, struct machine *machine __maybe_unused) perf_inject__sched_process_exit() argument 417 perf_inject__sched_switch(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine) perf_inject__sched_switch() argument 441 perf_inject__sched_stat(struct perf_tool *tool, union perf_event *event __maybe_unused, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine) perf_inject__sched_stat() argument
|
H A D | builtin-lock.c | 347 struct perf_sample *sample); 350 struct perf_sample *sample); 353 struct perf_sample *sample); 356 struct perf_sample *sample); 396 struct perf_sample *sample) report_lock_acquire_event() 402 const char *name = perf_evsel__strval(evsel, sample, "name"); report_lock_acquire_event() 403 u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr"); report_lock_acquire_event() 404 int flag = perf_evsel__intval(evsel, sample, "flag"); report_lock_acquire_event() 414 ts = thread_stat_findnew(sample->tid); report_lock_acquire_event() 462 seq->prev_event_time = sample->time; report_lock_acquire_event() 468 struct perf_sample *sample) report_lock_acquired_event() 475 const char *name = perf_evsel__strval(evsel, sample, "name"); report_lock_acquired_event() 476 u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr"); report_lock_acquired_event() 486 ts = thread_stat_findnew(sample->tid); report_lock_acquired_event() 501 contended_term = sample->time - seq->prev_event_time; report_lock_acquired_event() 525 seq->prev_event_time = sample->time; report_lock_acquired_event() 531 struct perf_sample *sample) report_lock_contended_event() 537 const char *name = perf_evsel__strval(evsel, sample, "name"); report_lock_contended_event() 538 u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr"); report_lock_contended_event() 548 ts = thread_stat_findnew(sample->tid); report_lock_contended_event() 580 seq->prev_event_time = sample->time; report_lock_contended_event() 586 struct perf_sample *sample) report_lock_release_event() 592 const char *name = perf_evsel__strval(evsel, sample, "name"); report_lock_release_event() 593 u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr"); report_lock_release_event() 603 ts = thread_stat_findnew(sample->tid); report_lock_release_event() 656 struct perf_sample *sample) perf_evsel__process_lock_acquire() 659 return trace_handler->acquire_event(evsel, sample); perf_evsel__process_lock_acquire() 664 struct perf_sample *sample) perf_evsel__process_lock_acquired() 667 return trace_handler->acquired_event(evsel, sample); perf_evsel__process_lock_acquired() 672 struct perf_sample *sample) perf_evsel__process_lock_contended() 675 return trace_handler->contended_event(evsel, sample); perf_evsel__process_lock_contended() 680 struct perf_sample *sample) perf_evsel__process_lock_release() 683 return trace_handler->release_event(evsel, sample); perf_evsel__process_lock_release() 806 struct perf_sample *sample); 810 struct perf_sample *sample, process_sample_event() 815 struct thread *thread = machine__findnew_thread(machine, sample->pid, process_sample_event() 816 sample->tid); process_sample_event() 826 err = f(evsel, sample); process_sample_event() 859 .sample = process_sample_event, __cmd_report() 395 report_lock_acquire_event(struct perf_evsel *evsel, struct perf_sample *sample) report_lock_acquire_event() argument 467 report_lock_acquired_event(struct perf_evsel *evsel, struct perf_sample *sample) report_lock_acquired_event() argument 530 report_lock_contended_event(struct perf_evsel *evsel, struct perf_sample *sample) report_lock_contended_event() argument 585 report_lock_release_event(struct perf_evsel *evsel, struct perf_sample *sample) report_lock_release_event() argument 655 perf_evsel__process_lock_acquire(struct perf_evsel *evsel, struct perf_sample *sample) perf_evsel__process_lock_acquire() argument 663 perf_evsel__process_lock_acquired(struct perf_evsel *evsel, struct perf_sample *sample) perf_evsel__process_lock_acquired() argument 671 perf_evsel__process_lock_contended(struct perf_evsel *evsel, struct perf_sample *sample) perf_evsel__process_lock_contended() argument 679 perf_evsel__process_lock_release(struct perf_evsel *evsel, struct perf_sample *sample) perf_evsel__process_lock_release() argument 808 process_sample_event(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine) process_sample_event() argument
|
H A D | builtin-kvm.c | 37 struct perf_sample *sample, exit_event_get_key() 41 key->key = perf_evsel__intval(evsel, sample, KVM_EXIT_REASON); exit_event_get_key() 50 struct perf_sample *sample, struct event_key *key) exit_event_begin() 53 exit_event_get_key(evsel, sample, key); exit_event_begin() 66 struct perf_sample *sample __maybe_unused, exit_event_end() 266 struct perf_sample *sample, is_child_event() 278 child_ops->get_key(evsel, sample, key); is_child_event() 289 struct perf_sample *sample __maybe_unused) handle_child_event() 315 struct perf_sample *sample) handle_end_event() 352 if (sample->time < time_begin) { handle_end_event() 357 time_diff = sample->time - time_begin; handle_end_event() 365 sample->time, sample->pid, vcpu_record->vcpu_id, handle_end_event() 376 struct perf_sample *sample) per_vcpu_record() 388 vcpu_record->vcpu_id = perf_evsel__intval(evsel, sample, VCPU_ID); per_vcpu_record() 398 struct perf_sample *sample) handle_kvm_event() 404 vcpu_record = per_vcpu_record(thread, evsel, sample); handle_kvm_event() 413 if (kvm->events_ops->is_begin_event(evsel, sample, &key)) handle_kvm_event() 414 return handle_begin_event(kvm, vcpu_record, &key, sample->time); handle_kvm_event() 416 if (is_child_event(kvm, evsel, sample, &key)) handle_kvm_event() 417 return handle_child_event(kvm, vcpu_record, &key, sample); handle_kvm_event() 419 if (kvm->events_ops->is_end_event(evsel, sample, &key)) handle_kvm_event() 420 return handle_end_event(kvm, vcpu_record, &key, sample); handle_kvm_event() 456 DEF_SORT_NAME_KEY(sample, count), 628 struct perf_sample *sample __maybe_unused, process_lost_event() 639 struct perf_sample *sample) skip_sample() 641 if (kvm->pid_list && intlist__find(kvm->pid_list, sample->pid) == NULL) skip_sample() 649 struct perf_sample *sample, process_sample_event() 658 if (skip_sample(kvm, sample)) process_sample_event() 661 thread = machine__findnew_thread(machine, sample->pid, sample->tid); process_sample_event() 668 if (!handle_kvm_event(kvm, thread, evsel, sample)) process_sample_event() 722 struct perf_sample sample; perf_kvm__mmap_read_idx() local 728 err = perf_evlist__parse_sample(kvm->evlist, event, &sample); perf_kvm__mmap_read_idx() 731 pr_err("Failed to parse sample\n"); perf_kvm__mmap_read_idx() 735 err = perf_session__queue_event(kvm->session, event, &sample, 0); perf_kvm__mmap_read_idx() 743 pr_err("Failed to enqueue sample: %d\n", err); perf_kvm__mmap_read_idx() 747 /* save time stamp of our first sample for this mmap */ perf_kvm__mmap_read_idx() 749 *mmap_time = sample.time; perf_kvm__mmap_read_idx() 999 /* make sure these are *not*; want as small a sample as possible */ evlist__for_each() 1044 .sample = process_sample_event, read_events() 1211 "key for sorting: sample(sort by samples number)" kvm_events_report() 1313 "key for sorting: sample(sort by samples number)" kvm_events_live() 1333 kvm->tool.sample = process_sample_event; kvm_events_live() 1443 .sort_key = "sample", kvm_cmd_stat() 36 exit_event_get_key(struct perf_evsel *evsel, struct perf_sample *sample, struct event_key *key) exit_event_get_key() argument 49 exit_event_begin(struct perf_evsel *evsel, struct perf_sample *sample, struct event_key *key) exit_event_begin() argument 264 is_child_event(struct perf_kvm_stat *kvm, struct perf_evsel *evsel, struct perf_sample *sample, struct event_key *key) is_child_event() argument 312 handle_end_event(struct perf_kvm_stat *kvm, struct vcpu_event_record *vcpu_record, struct event_key *key, struct perf_sample *sample) handle_end_event() argument 374 per_vcpu_record(struct thread *thread, struct perf_evsel *evsel, struct perf_sample *sample) per_vcpu_record() argument 395 handle_kvm_event(struct perf_kvm_stat *kvm, struct thread *thread, struct perf_evsel *evsel, struct perf_sample *sample) handle_kvm_event() argument 638 skip_sample(struct perf_kvm_stat *kvm, struct perf_sample *sample) skip_sample() argument 647 process_sample_event(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine) process_sample_event() argument
|
H A D | builtin-mem.c | 63 struct perf_sample *sample, dump_raw_samples() 70 if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) { dump_raw_samples() 92 sample->pid, dump_raw_samples() 94 sample->tid, dump_raw_samples() 96 sample->ip, dump_raw_samples() 98 sample->addr, dump_raw_samples() 100 sample->weight, dump_raw_samples() 102 sample->data_src, dump_raw_samples() 113 struct perf_sample *sample, process_sample_event() 117 return dump_raw_samples(tool, event, sample, machine); process_sample_event() 261 .sample = process_sample_event, cmd_mem() 61 dump_raw_samples(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct machine *machine) dump_raw_samples() argument 111 process_sample_event(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel __maybe_unused, struct machine *machine) process_sample_event() argument
|
H A D | builtin-sched.c | 107 struct perf_sample *sample, struct machine *machine); 110 struct perf_sample *sample, struct machine *machine); 113 struct perf_sample *sample, struct machine *machine); 121 struct perf_sample *sample, 695 struct perf_evsel *evsel, struct perf_sample *sample, replay_wakeup_event() 698 const char *comm = perf_evsel__strval(evsel, sample, "comm"); replay_wakeup_event() 699 const u32 pid = perf_evsel__intval(evsel, sample, "pid"); replay_wakeup_event() 705 printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid); replay_wakeup_event() 708 waker = register_pid(sched, sample->tid, "<unknown>"); replay_wakeup_event() 711 add_sched_event_wakeup(sched, waker, sample->time, wakee); replay_wakeup_event() 717 struct perf_sample *sample, replay_switch_event() 720 const char *prev_comm = perf_evsel__strval(evsel, sample, "prev_comm"), replay_switch_event() 721 *next_comm = perf_evsel__strval(evsel, sample, "next_comm"); replay_switch_event() 722 const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), replay_switch_event() 723 next_pid = perf_evsel__intval(evsel, sample, "next_pid"); replay_switch_event() 724 const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state"); replay_switch_event() 726 u64 timestamp0, timestamp = sample->time; replay_switch_event() 727 int cpu = sample->cpu; replay_switch_event() 956 struct perf_sample *sample, latency_switch_event() 959 const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), latency_switch_event() 960 next_pid = perf_evsel__intval(evsel, sample, "next_pid"); latency_switch_event() 961 const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state"); latency_switch_event() 964 u64 timestamp0, timestamp = sample->time; latency_switch_event() 965 int cpu = sample->cpu, err = -1; latency_switch_event() 1026 struct perf_sample *sample, latency_runtime_event() 1029 const u32 pid = perf_evsel__intval(evsel, sample, "pid"); latency_runtime_event() 1030 const u64 runtime = perf_evsel__intval(evsel, sample, "runtime"); latency_runtime_event() 1033 u64 timestamp = sample->time; latency_runtime_event() 1034 int cpu = sample->cpu, err = -1; latency_runtime_event() 1061 struct perf_sample *sample, latency_wakeup_event() 1064 const u32 pid = perf_evsel__intval(evsel, sample, "pid"); latency_wakeup_event() 1068 u64 timestamp = sample->time; latency_wakeup_event() 1122 struct perf_sample *sample, latency_migrate_task_event() 1125 const u32 pid = perf_evsel__intval(evsel, sample, "pid"); latency_migrate_task_event() 1126 u64 timestamp = sample->time; latency_migrate_task_event() 1331 struct perf_sample *sample, process_sched_wakeup_event() 1337 return sched->tp_handler->wakeup_event(sched, evsel, sample, machine); process_sched_wakeup_event() 1343 struct perf_sample *sample, struct machine *machine) map_switch_event() 1345 const u32 next_pid = perf_evsel__intval(evsel, sample, "next_pid"); map_switch_event() 1348 u64 timestamp0, timestamp = sample->time; map_switch_event() 1350 int cpu, this_cpu = sample->cpu; map_switch_event() 1430 struct perf_sample *sample, process_sched_switch_event() 1434 int this_cpu = sample->cpu, err = 0; process_sched_switch_event() 1435 u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), process_sched_switch_event() 1436 next_pid = perf_evsel__intval(evsel, sample, "next_pid"); process_sched_switch_event() 1448 err = sched->tp_handler->switch_event(sched, evsel, sample, machine); process_sched_switch_event() 1456 struct perf_sample *sample, process_sched_runtime_event() 1462 return sched->tp_handler->runtime_event(sched, evsel, sample, machine); process_sched_runtime_event() 1469 struct perf_sample *sample, perf_sched__process_fork_event() 1475 perf_event__process_fork(tool, event, sample, machine); perf_sched__process_fork_event() 1486 struct perf_sample *sample, process_sched_migrate_task_event() 1492 return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine); process_sched_migrate_task_event() 1499 struct perf_sample *sample, 1504 struct perf_sample *sample, perf_sched__process_tracepoint_sample() 1512 err = f(tool, evsel, sample, machine); perf_sched__process_tracepoint_sample() 1785 .sample = perf_sched__process_tracepoint_sample, cmd_sched() 694 replay_wakeup_event(struct perf_sched *sched, struct perf_evsel *evsel, struct perf_sample *sample, struct machine *machine __maybe_unused) replay_wakeup_event() argument 715 replay_switch_event(struct perf_sched *sched, struct perf_evsel *evsel, struct perf_sample *sample, struct machine *machine __maybe_unused) replay_switch_event() argument 954 latency_switch_event(struct perf_sched *sched, struct perf_evsel *evsel, struct perf_sample *sample, struct machine *machine) latency_switch_event() argument 1024 latency_runtime_event(struct perf_sched *sched, struct perf_evsel *evsel, struct perf_sample *sample, struct machine *machine) latency_runtime_event() argument 1059 latency_wakeup_event(struct perf_sched *sched, struct perf_evsel *evsel, struct perf_sample *sample, struct machine *machine) latency_wakeup_event() argument 1120 latency_migrate_task_event(struct perf_sched *sched, struct perf_evsel *evsel, struct perf_sample *sample, struct machine *machine) latency_migrate_task_event() argument 1329 process_sched_wakeup_event(struct perf_tool *tool, struct perf_evsel *evsel, struct perf_sample *sample, struct machine *machine) process_sched_wakeup_event() argument 1342 map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel, struct perf_sample *sample, struct machine *machine) map_switch_event() argument 1428 process_sched_switch_event(struct perf_tool *tool, struct perf_evsel *evsel, struct perf_sample *sample, struct machine *machine) process_sched_switch_event() argument 1454 process_sched_runtime_event(struct perf_tool *tool, struct perf_evsel *evsel, struct perf_sample *sample, struct machine *machine) process_sched_runtime_event() argument 1467 perf_sched__process_fork_event(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct machine *machine) perf_sched__process_fork_event() argument 1484 process_sched_migrate_task_event(struct perf_tool *tool, struct perf_evsel *evsel, struct perf_sample *sample, struct machine *machine) process_sched_migrate_task_event() argument 1502 perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_unused, union perf_event *event __maybe_unused, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine) perf_sched__process_tracepoint_sample() argument
|
H A D | builtin-trace.c | 113 u64 (*integer)(struct tp_field *field, struct perf_sample *sample); 114 void *(*pointer)(struct tp_field *field, struct perf_sample *sample); 119 static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \ 122 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \ 132 static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \ 135 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \ 169 static void *tp_field__ptr(struct tp_field *field, struct perf_sample *sample) tp_field__ptr() argument 171 return sample->raw_data + field->offset; tp_field__ptr() 265 #define perf_evsel__sc_tp_uint(evsel, name, sample) \ 267 fields->name.integer(&fields->name, sample); }) 269 #define perf_evsel__sc_tp_ptr(evsel, name, sample) \ 271 fields->name.pointer(&fields->name, sample); }) 1611 union perf_event *event, struct perf_sample *sample) trace__process_event() 1619 ret = machine__process_lost_event(machine, event, sample); trace__process_event() 1621 ret = machine__process_event(machine, event, sample); trace__process_event() 1630 struct perf_sample *sample, trace__tool_process() 1634 return trace__process_event(trace, machine, event, sample); trace__tool_process() 1863 struct perf_sample *sample); 1909 int id, struct perf_sample *sample) thread__update_stats() 1928 if (ttrace->entry_time && sample->time > ttrace->entry_time) thread__update_stats() 1929 duration = sample->time - ttrace->entry_time; thread__update_stats() 1934 static int trace__printf_interrupted_entry(struct trace *trace, struct perf_sample *sample) trace__printf_interrupted_entry() argument 1948 duration = sample->time - ttrace->entry_time; trace__printf_interrupted_entry() 1950 printed = trace__fprintf_entry_head(trace, trace->current, duration, sample->time, trace->output); trace__printf_interrupted_entry() 1959 struct perf_sample *sample) trace__sys_enter() 1965 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1; trace__sys_enter() 1972 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); trace__sys_enter() 1977 args = perf_evsel__sc_tp_ptr(evsel, args, sample); trace__sys_enter() 1986 trace__printf_interrupted_entry(trace, sample); trace__sys_enter() 1988 ttrace->entry_time = sample->time; trace__sys_enter() 1997 trace__fprintf_entry_head(trace, thread, 1, sample->time, trace->output); trace__sys_enter() 2018 struct perf_sample *sample) trace__sys_exit() 2023 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1; trace__sys_exit() 2030 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); trace__sys_exit() 2036 thread__update_stats(ttrace, id, sample); trace__sys_exit() 2038 ret = perf_evsel__sc_tp_uint(evsel, ret, sample); trace__sys_exit() 2046 ttrace->exit_time = sample->time; trace__sys_exit() 2049 duration = sample->time - ttrace->entry_time; trace__sys_exit() 2058 trace__fprintf_entry_head(trace, thread, duration, sample->time, trace->output); trace__sys_exit() 2095 struct perf_sample *sample) trace__vfs_getname() 2097 struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); trace__vfs_getname() 2102 const char *filename = perf_evsel__rawptr(evsel, sample, "pathname"); trace__vfs_getname() 2152 struct perf_sample *sample) trace__sched_stat_runtime() 2154 u64 runtime = perf_evsel__intval(evsel, sample, "runtime"); trace__sched_stat_runtime() 2157 sample->pid, trace__sched_stat_runtime() 2158 sample->tid); trace__sched_stat_runtime() 2172 perf_evsel__strval(evsel, sample, "comm"), trace__sched_stat_runtime() 2173 (pid_t)perf_evsel__intval(evsel, sample, "pid"), trace__sched_stat_runtime() 2175 perf_evsel__intval(evsel, sample, "vruntime")); trace__sched_stat_runtime() 2182 struct perf_sample *sample) trace__event_handler() 2184 trace__printf_interrupted_entry(trace, sample); trace__event_handler() 2185 trace__fprintf_tstamp(trace, sample->time, trace->output); trace__event_handler() 2193 event_format__fprintf(evsel->tp_format, sample->cpu, trace__event_handler() 2194 sample->raw_data, sample->raw_size, trace__event_handler() 2202 static void print_location(FILE *f, struct perf_sample *sample, print_location() argument 2216 fprintf(f, "0x%" PRIx64, sample->addr); print_location() 2222 struct perf_sample *sample) trace__pgfault() 2231 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); trace__pgfault() 2245 sample->ip, &al); trace__pgfault() 2247 trace__fprintf_entry_head(trace, thread, 0, sample->time, trace->output); trace__pgfault() 2253 print_location(trace->output, sample, &al, false, true); trace__pgfault() 2258 sample->addr, &al); trace__pgfault() 2262 MAP__FUNCTION, sample->addr, &al); trace__pgfault() 2270 print_location(trace->output, sample, &al, true, false); trace__pgfault() 2280 static bool skip_sample(struct trace *trace, struct perf_sample *sample) skip_sample() argument 2282 if ((trace->pid_list && intlist__find(trace->pid_list, sample->pid)) || skip_sample() 2283 (trace->tid_list && intlist__find(trace->tid_list, sample->tid))) skip_sample() 2294 struct perf_sample *sample, trace__process_sample() 2303 if (skip_sample(trace, sample)) trace__process_sample() 2307 trace->base_time = sample->time; trace__process_sample() 2311 handler(trace, evsel, event, sample); trace__process_sample() 2440 static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample) trace__handle_event() argument 2446 trace->base_time = sample->time; trace__handle_event() 2449 trace__process_event(trace, trace->host, event, sample); trace__handle_event() 2453 evsel = perf_evlist__id2evsel(trace->evlist, sample->id); trace__handle_event() 2455 fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id); trace__handle_event() 2460 sample->raw_data == NULL) { trace__handle_event() 2461 fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n", trace__handle_event() 2462 perf_evsel__name(evsel), sample->tid, trace__handle_event() 2463 sample->cpu, sample->raw_size); trace__handle_event() 2466 handler(trace, evsel, event, sample); trace__handle_event() 2636 struct perf_sample sample; trace__run() local 2640 err = perf_evlist__parse_sample(evlist, event, &sample); trace__run() 2642 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err); trace__run() 2646 trace__handle_event(trace, event, &sample); trace__run() 2748 trace->tool.sample = trace__process_sample; trace__replay() 1610 trace__process_event(struct trace *trace, struct machine *machine, union perf_event *event, struct perf_sample *sample) trace__process_event() argument 1628 trace__tool_process(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct machine *machine) trace__tool_process() argument 1908 thread__update_stats(struct thread_trace *ttrace, int id, struct perf_sample *sample) thread__update_stats() argument 1957 trace__sys_enter(struct trace *trace, struct perf_evsel *evsel, union perf_event *event __maybe_unused, struct perf_sample *sample) trace__sys_enter() argument 2016 trace__sys_exit(struct trace *trace, struct perf_evsel *evsel, union perf_event *event __maybe_unused, struct perf_sample *sample) trace__sys_exit() argument 2093 trace__vfs_getname(struct trace *trace, struct perf_evsel *evsel, union perf_event *event __maybe_unused, struct perf_sample *sample) trace__vfs_getname() argument 2150 trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evsel, union perf_event *event __maybe_unused, struct perf_sample *sample) trace__sched_stat_runtime() argument 2180 trace__event_handler(struct trace *trace, struct perf_evsel *evsel, union perf_event *event __maybe_unused, struct perf_sample *sample) trace__event_handler() argument 2219 trace__pgfault(struct trace *trace, struct perf_evsel *evsel, union perf_event *event, struct perf_sample *sample) trace__pgfault() argument 2292 trace__process_sample(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine __maybe_unused) trace__process_sample() argument
|
H A D | builtin-kmem.c | 154 struct perf_sample *sample) perf_evsel__process_alloc_event() 156 unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr"), perf_evsel__process_alloc_event() 157 call_site = perf_evsel__intval(evsel, sample, "call_site"); perf_evsel__process_alloc_event() 158 int bytes_req = perf_evsel__intval(evsel, sample, "bytes_req"), perf_evsel__process_alloc_event() 159 bytes_alloc = perf_evsel__intval(evsel, sample, "bytes_alloc"); perf_evsel__process_alloc_event() 161 if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, sample->cpu) || perf_evsel__process_alloc_event() 173 struct perf_sample *sample) perf_evsel__process_alloc_node_event() 175 int ret = perf_evsel__process_alloc_event(evsel, sample); perf_evsel__process_alloc_node_event() 178 int node1 = cpu__get_node(sample->cpu), perf_evsel__process_alloc_node_event() 179 node2 = perf_evsel__intval(evsel, sample, "node"); perf_evsel__process_alloc_node_event() 217 struct perf_sample *sample) perf_evsel__process_free_event() 219 unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr"); perf_evsel__process_free_event() 226 if ((short)sample->cpu != s_alloc->alloc_cpu) { perf_evsel__process_free_event() 366 static u64 find_callsite(struct perf_evsel *evsel, struct perf_sample *sample) find_callsite() argument 377 al.thread = machine__findnew_thread(machine, sample->pid, sample->tid); find_callsite() 378 sample__resolve_callchain(sample, NULL, evsel, &al, 16); find_callsite() 407 pr_debug2("unknown callsite: %"PRIx64 "\n", sample->ip); find_callsite() 408 return sample->ip; find_callsite() 707 static int parse_gfp_flags(struct perf_evsel *evsel, struct perf_sample *sample, parse_gfp_flags() argument 711 .cpu = sample->cpu, parse_gfp_flags() 712 .data = sample->raw_data, parse_gfp_flags() 713 .size = sample->raw_size, parse_gfp_flags() 759 struct perf_sample *sample) perf_evsel__process_page_alloc_event() 762 unsigned int order = perf_evsel__intval(evsel, sample, "order"); perf_evsel__process_page_alloc_event() 763 unsigned int gfp_flags = perf_evsel__intval(evsel, sample, "gfp_flags"); perf_evsel__process_page_alloc_event() 764 unsigned int migrate_type = perf_evsel__intval(evsel, sample, perf_evsel__process_page_alloc_event() 776 page = perf_evsel__intval(evsel, sample, "pfn"); perf_evsel__process_page_alloc_event() 778 page = perf_evsel__intval(evsel, sample, "page"); perf_evsel__process_page_alloc_event() 790 if (parse_gfp_flags(evsel, sample, gfp_flags) < 0) perf_evsel__process_page_alloc_event() 793 callsite = find_callsite(evsel, sample); perf_evsel__process_page_alloc_event() 832 struct perf_sample *sample) perf_evsel__process_page_free_event() 835 unsigned int order = perf_evsel__intval(evsel, sample, "order"); perf_evsel__process_page_free_event() 843 page = perf_evsel__intval(evsel, sample, "pfn"); perf_evsel__process_page_free_event() 845 page = perf_evsel__intval(evsel, sample, "page"); perf_evsel__process_page_free_event() 901 struct perf_sample *sample); 905 struct perf_sample *sample, process_sample_event() 910 struct thread *thread = machine__findnew_thread(machine, sample->pid, process_sample_event() 911 sample->tid); process_sample_event() 923 err = f(evsel, sample); process_sample_event() 932 .sample = process_sample_event, 153 perf_evsel__process_alloc_event(struct perf_evsel *evsel, struct perf_sample *sample) perf_evsel__process_alloc_event() argument 172 perf_evsel__process_alloc_node_event(struct perf_evsel *evsel, struct perf_sample *sample) perf_evsel__process_alloc_node_event() argument 216 perf_evsel__process_free_event(struct perf_evsel *evsel, struct perf_sample *sample) perf_evsel__process_free_event() argument 758 perf_evsel__process_page_alloc_event(struct perf_evsel *evsel, struct perf_sample *sample) perf_evsel__process_page_alloc_event() argument 831 perf_evsel__process_page_free_event(struct perf_evsel *evsel, struct perf_sample *sample) perf_evsel__process_page_free_event() argument 903 process_sample_event(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine) process_sample_event() argument
|
H A D | builtin-top.c | 406 fprintf(stdout, "\t[z] toggle sample zeroing. \t(%d)\n", top->zero ? 1 : 0); perf_top__print_mapped_keys() 699 hist__account_cycles(iter->sample->branch_stack, al, iter->sample, hist_iter__top_callback() 707 struct perf_sample *sample, perf_event__process_sample() 720 if (!intlist__has_entry(seen, sample->pid)) { perf_event__process_sample() 722 sample->pid); perf_event__process_sample() 723 intlist__add(seen, sample->pid); perf_event__process_sample() 737 if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) perf_event__process_sample() 790 .sample = sample, perf_event__process_sample() 813 struct perf_sample sample; perf_top__mmap_read_idx() local 822 ret = perf_evlist__parse_sample(top->evlist, event, &sample); perf_top__mmap_read_idx() 824 pr_err("Can't parse sample, err = %d\n", ret); perf_top__mmap_read_idx() 828 evsel = perf_evlist__id2evsel(session->evlist, sample.id); perf_top__mmap_read_idx() 852 sample.pid); perf_top__mmap_read_idx() 871 &sample, machine); perf_top__mmap_read_idx() 874 machine__process_event(machine, event, &sample); perf_top__mmap_read_idx() 1124 OPT_U64('c', "count", &opts->user_interval, "event period to sample"), cmd_top() 1170 "output field(s): overhead, period, sample plus all of sort keys"), cmd_top() 1216 "branch any", "sample any taken branches", cmd_top() 704 perf_event__process_sample(struct perf_tool *tool, const union perf_event *event, struct perf_evsel *evsel, struct perf_sample *sample, struct machine *machine) perf_event__process_sample() argument
|
H A D | builtin-annotate.c | 50 struct perf_sample *sample __maybe_unused, perf_evsel__add_sample() 86 struct perf_sample *sample, process_sample_event() 94 if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) { process_sample_event() 100 if (ann->cpu_list && !test_bit(sample->cpu, ann->cpu_bitmap)) process_sample_event() 103 if (!al.filtered && perf_evsel__add_sample(evsel, sample, &al, ann)) { process_sample_event() 285 .sample = process_sample_event, cmd_annotate() 84 process_sample_event(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine) process_sample_event() argument
|
H A D | builtin-evlist.c | 46 OPT_BOOLEAN('F', "freq", &details.freq, "Show the sample frequency"), cmd_evlist()
|
H A D | builtin-record.c | 69 struct perf_sample *sample __maybe_unused, process_synthesized_event() 335 struct perf_sample *sample, process_sample_event() 343 return build_id__mark_dso_hit(tool, event, sample, evsel, machine); process_sample_event() 1004 .sample = process_sample_event, 1042 "collect raw sample records from all opened counters"), 1047 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"), 1070 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"), 1073 "Record the sample timestamps"), 1074 OPT_BOOLEAN('P', "period", &record.opts.period, "Record the sample period"), 1076 "don't sample"), 1090 "branch any", "sample any taken branches", 1097 "sample by weight (on special events only)"), 1099 "sample transaction flags (special events only)"), 1103 "sample selected machine registers on interrupt," 333 process_sample_event(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine) process_sample_event() argument
|
H A D | builtin-report.c | 107 hist__account_cycles(iter->sample->branch_stack, al, iter->sample, hist_iter__report_callback() 140 struct perf_sample *sample, process_sample_event() 148 .sample = sample, process_sample_event() 154 if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) { process_sample_event() 163 if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap)) process_sample_event() 171 if (!sample->branch_stack) process_sample_event() 195 struct perf_sample *sample __maybe_unused, process_read_event() 649 .sample = process_sample_event, cmd_report() 701 "output field(s): overhead, period, sample plus all of sort keys"), cmd_report() 703 "Show sample percentage for different cpu modes"), cmd_report() 705 "Show sample percentage for different cpu modes", PARSE_OPT_HIDDEN), cmd_report() 138 process_sample_event(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine) process_sample_event() argument
|
/linux-4.4.14/samples/trace_events/ |
H A D | Makefile | 10 # Here trace-events-sample.c does the CREATE_TRACE_POINTS. 12 CFLAGS_trace-events-sample.o := -I$(src) 14 obj-$(CONFIG_SAMPLE_TRACE_EVENTS) += trace-events-sample.o
|
H A D | trace-events-sample.c | 11 #include "trace-events-sample.h" 91 simple_tsk_fn = kthread_run(simple_thread_fn, NULL, "event-sample-fn"); foo_bar_reg() 108 simple_tsk = kthread_run(simple_thread, NULL, "event-sample"); trace_event_init() 129 MODULE_DESCRIPTION("trace-events-sample");
|
H A D | trace-events-sample.h | 7 * In this case, it would look for sample-trace.h 13 * This file is called trace-events-sample.h but we want the system 14 * to be called "sample-trace". Therefore we must define the name of this 17 * #define TRACE_INCLUDE_FILE trace-events-sample 25 #define TRACE_SYSTEM sample-trace 499 * CFLAGS_trace-events-sample.o := -I$(src) 506 * CFLAGS_trace-events-sample.o := -I$(PWD) 522 #define TRACE_INCLUDE_FILE trace-events-sample
|
/linux-4.4.14/tools/perf/arch/x86/util/ |
H A D | kvm-stat.c | 19 static void mmio_event_get_key(struct perf_evsel *evsel, struct perf_sample *sample, mmio_event_get_key() argument 22 key->key = perf_evsel__intval(evsel, sample, "gpa"); mmio_event_get_key() 23 key->info = perf_evsel__intval(evsel, sample, "type"); mmio_event_get_key() 31 struct perf_sample *sample, struct event_key *key) mmio_event_begin() 39 perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_WRITE) { mmio_event_begin() 40 mmio_event_get_key(evsel, sample, key); mmio_event_begin() 47 static bool mmio_event_end(struct perf_evsel *evsel, struct perf_sample *sample, mmio_event_end() argument 56 perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_READ) { mmio_event_end() 57 mmio_event_get_key(evsel, sample, key); mmio_event_end() 82 struct perf_sample *sample, ioport_event_get_key() 85 key->key = perf_evsel__intval(evsel, sample, "port"); ioport_event_get_key() 86 key->info = perf_evsel__intval(evsel, sample, "rw"); ioport_event_get_key() 90 struct perf_sample *sample, ioport_event_begin() 94 ioport_event_get_key(evsel, sample, key); ioport_event_begin() 102 struct perf_sample *sample __maybe_unused, ioport_event_end() 30 mmio_event_begin(struct perf_evsel *evsel, struct perf_sample *sample, struct event_key *key) mmio_event_begin() argument 81 ioport_event_get_key(struct perf_evsel *evsel, struct perf_sample *sample, struct event_key *key) ioport_event_get_key() argument 89 ioport_event_begin(struct perf_evsel *evsel, struct perf_sample *sample, struct event_key *key) ioport_event_begin() argument
|
H A D | unwind-libdw.c | 8 struct regs_dump *user_regs = &ui->sample->user_regs; libdw__arch_set_initial_registers()
|
/linux-4.4.14/include/linux/platform_data/ |
H A D | ad7791.h | 8 * @unipolar: If set to true sample in unipolar mode, if set to false sample in
|
H A D | ad7793.h | 92 * @unipolar: If set to true sample in unipolar mode, if set to false sample in
|
H A D | ad7266.h | 25 * enum ad7266_mode - AD7266 sample mode
|
/linux-4.4.14/tools/perf/arch/x86/include/ |
H A D | arch-tests.h | 13 int test__arch_unwind_sample(struct perf_sample *sample,
|
/linux-4.4.14/tools/perf/util/ |
H A D | unwind-libdw.h | 13 struct perf_sample *sample; member in struct:unwind_info
|
H A D | tool.h | 18 struct perf_sample *sample, 22 struct perf_sample *sample, struct machine *machine); 38 event_sample sample, member in struct:perf_tool
|
H A D | kvm-stat.h | 47 struct perf_sample *sample, 54 struct perf_sample *sample, 57 struct perf_sample *sample, struct event_key *key); 111 struct perf_sample *sample, 114 struct perf_sample *sample, 117 struct perf_sample *sample,
|
H A D | thread-stack.c | 69 * @ref: external reference (e.g. db_id of sample) 537 struct perf_sample *sample, thread_stack__bottom() 546 if (sample->ip) { thread_stack__bottom() 547 ip = sample->ip; thread_stack__bottom() 549 } else if (sample->addr) { thread_stack__bottom() 550 ip = sample->addr; thread_stack__bottom() 561 return thread_stack__push_cp(thread->ts, ip, sample->time, ref, cp, thread_stack__bottom() 567 struct perf_sample *sample, thread_stack__no_call_return() 576 if (sample->ip >= ks && sample->addr < ks) { thread_stack__no_call_return() 580 sample->time, ref, thread_stack__no_call_return() 589 to_al->sym, sample->addr, thread_stack__no_call_return() 593 return thread_stack__push_cp(ts, 0, sample->time, ref, thread_stack__no_call_return() 596 } else if (thread_stack__in_kernel(ts) && sample->ip < ks) { thread_stack__no_call_return() 600 sample->time, ref, thread_stack__no_call_return() 613 cp = call_path__findnew(cpr, parent, from_al->sym, sample->ip, thread_stack__no_call_return() 618 err = thread_stack__push_cp(ts, sample->addr, sample->time, ref, cp, thread_stack__no_call_return() 623 return thread_stack__pop_cp(thread, ts, sample->addr, sample->time, ref, thread_stack__no_call_return() 650 struct perf_sample *sample, u64 ref) thread_stack__trace_end() 665 ret_addr = sample->ip + sample->insn_len; thread_stack__trace_end() 667 return thread_stack__push_cp(ts, ret_addr, sample->time, ref, cp, thread_stack__trace_end() 672 struct perf_sample *sample, thread_stack__process() 708 err = thread_stack__bottom(thread, ts, sample, from_al, to_al, thread_stack__process() 715 ts->last_time = sample->time; thread_stack__process() 717 if (sample->flags & PERF_IP_FLAG_CALL) { thread_stack__process() 722 if (!sample->ip || !sample->addr) thread_stack__process() 725 ret_addr = sample->ip + sample->insn_len; thread_stack__process() 726 if (ret_addr == sample->addr) thread_stack__process() 730 to_al->sym, sample->addr, thread_stack__process() 734 err = thread_stack__push_cp(ts, ret_addr, sample->time, ref, thread_stack__process() 736 } else if (sample->flags & PERF_IP_FLAG_RETURN) { thread_stack__process() 737 if (!sample->ip || !sample->addr) thread_stack__process() 740 err = thread_stack__pop_cp(thread, ts, sample->addr, thread_stack__process() 741 sample->time, ref, from_al->sym); thread_stack__process() 745 err = thread_stack__no_call_return(thread, ts, sample, thread_stack__process() 748 } else if (sample->flags & PERF_IP_FLAG_TRACE_BEGIN) { thread_stack__process() 749 err = thread_stack__trace_begin(thread, ts, sample->time, ref); thread_stack__process() 750 } else if (sample->flags & PERF_IP_FLAG_TRACE_END) { thread_stack__process() 751 err = thread_stack__trace_end(ts, sample, ref); thread_stack__process() 536 thread_stack__bottom(struct thread *thread, struct thread_stack *ts, struct perf_sample *sample, struct addr_location *from_al, struct addr_location *to_al, u64 ref) thread_stack__bottom() argument 565 thread_stack__no_call_return(struct thread *thread, struct thread_stack *ts, struct perf_sample *sample, struct addr_location *from_al, struct addr_location *to_al, u64 ref) thread_stack__no_call_return() argument 649 thread_stack__trace_end(struct thread_stack *ts, struct perf_sample *sample, u64 ref) thread_stack__trace_end() argument 671 thread_stack__process(struct thread *thread, struct comm *comm, struct perf_sample *sample, struct addr_location *from_al, struct addr_location *to_al, u64 ref, struct call_return_processor *crp) thread_stack__process() argument
|
H A D | session.c | 23 struct perf_sample *sample, 100 struct perf_sample sample; ordered_events__deliver_event() local 103 int ret = perf_evlist__parse_sample(session->evlist, event->event, &sample); ordered_events__deliver_event() 106 pr_err("Can't parse sample, err = %d\n", ret); ordered_events__deliver_event() 110 return perf_session__deliver_event(session, event->event, &sample, ordered_events__deliver_event() 210 struct perf_sample *sample __maybe_unused, process_event_sample_stub() 220 struct perf_sample *sample __maybe_unused, process_event_stub() 301 if (tool->sample == NULL) perf_tool__fill_defaults() 302 tool->sample = process_event_sample_stub; perf_tool__fill_defaults() 698 struct perf_sample *sample, u64 file_offset) perf_session__queue_event() 700 return ordered_events__queue(&s->ordered_events, event, sample, file_offset); perf_session__queue_event() 703 static void callchain__lbr_callstack_printf(struct perf_sample *sample) callchain__lbr_callstack_printf() argument 705 struct ip_callchain *callchain = sample->callchain; callchain__lbr_callstack_printf() 706 struct branch_stack *lbr_stack = sample->branch_stack; callchain__lbr_callstack_printf() 751 struct perf_sample *sample) callchain__printf() 754 struct ip_callchain *callchain = sample->callchain; callchain__printf() 757 callchain__lbr_callstack_printf(sample); callchain__printf() 766 static void branch_stack__printf(struct perf_sample *sample) branch_stack__printf() argument 770 printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr); branch_stack__printf() 772 for (i = 0; i < sample->branch_stack->nr; i++) { branch_stack__printf() 773 struct branch_entry *e = &sample->branch_stack->entries[i]; branch_stack__printf() 824 static void regs_user__printf(struct perf_sample *sample) regs_user__printf() argument 826 struct regs_dump *user_regs = &sample->user_regs; regs_user__printf() 832 static void regs_intr__printf(struct perf_sample *sample) regs_intr__printf() argument 834 struct regs_dump *intr_regs = &sample->intr_regs; regs_intr__printf() 848 struct perf_sample *sample) perf_evlist__print_tstamp() 859 printf("%u ", sample->cpu); perf_evlist__print_tstamp() 862 printf("%" PRIu64 " ", sample->time); perf_evlist__print_tstamp() 865 static void sample_read__printf(struct perf_sample *sample, u64 read_format) sample_read__printf() argument 871 sample->read.time_enabled); sample_read__printf() 875 sample->read.time_running); sample_read__printf() 880 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr); sample_read__printf() 882 for (i = 0; i < sample->read.group.nr; i++) { sample_read__printf() 885 value = &sample->read.group.values[i]; sample_read__printf() 892 sample->read.one.id, sample->read.one.value); sample_read__printf() 896 u64 file_offset, struct perf_sample *sample) dump_event() 906 if (sample) dump_event() 907 perf_evlist__print_tstamp(evlist, event, sample); dump_event() 914 struct perf_sample *sample) dump_sample() 922 event->header.misc, sample->pid, sample->tid, sample->ip, dump_sample() 923 sample->period, sample->addr); dump_sample() 928 callchain__printf(evsel, sample); dump_sample() 931 branch_stack__printf(sample); dump_sample() 934 regs_user__printf(sample); dump_sample() 937 regs_intr__printf(sample); dump_sample() 940 stack_user__printf(&sample->user_stack); dump_sample() 943 printf("... weight: %" PRIu64 "\n", sample->weight); dump_sample() 946 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src); dump_sample() 949 printf("... transaction: %" PRIx64 "\n", sample->transaction); dump_sample() 952 sample_read__printf(sample, evsel->attr.read_format); dump_sample() 957 struct perf_sample *sample) machines__find_for_cpumode() 971 pid = sample->pid; machines__find_for_cpumode() 985 struct perf_sample *sample, deliver_sample_value() 992 sample->id = v->id; deliver_sample_value() 993 sample->period = v->value - sid->period; deliver_sample_value() 1002 return tool->sample(tool, event, sample, sid->evsel, machine); deliver_sample_value() 1008 struct perf_sample *sample, deliver_sample_group() 1014 for (i = 0; i < sample->read.group.nr; i++) { deliver_sample_group() 1015 ret = deliver_sample_value(evlist, tool, event, sample, deliver_sample_group() 1016 &sample->read.group.values[i], deliver_sample_group() 1029 struct perf_sample *sample, perf_evlist__deliver_sample() 1037 /* Standard sample delievery. */ perf_evlist__deliver_sample() 1039 return tool->sample(tool, event, sample, evsel, machine); perf_evlist__deliver_sample() 1043 return deliver_sample_group(evlist, tool, event, sample, perf_evlist__deliver_sample() 1046 return deliver_sample_value(evlist, tool, event, sample, perf_evlist__deliver_sample() 1047 &sample->read.one, machine); perf_evlist__deliver_sample() 1053 struct perf_sample *sample, machines__deliver_event() 1059 dump_event(evlist, event, file_offset, sample); machines__deliver_event() 1061 evsel = perf_evlist__id2evsel(evlist, sample->id); machines__deliver_event() 1063 machine = machines__find_for_cpumode(machines, event, sample); machines__deliver_event() 1071 dump_sample(evsel, event, sample); machines__deliver_event() 1076 return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine); machines__deliver_event() 1078 return tool->mmap(tool, event, sample, machine); machines__deliver_event() 1082 return tool->mmap2(tool, event, sample, machine); machines__deliver_event() 1084 return tool->comm(tool, event, sample, machine); machines__deliver_event() 1086 return tool->fork(tool, event, sample, machine); machines__deliver_event() 1088 return tool->exit(tool, event, sample, machine); machines__deliver_event() 1092 return tool->lost(tool, event, sample, machine); machines__deliver_event() 1096 return tool->lost_samples(tool, event, sample, machine); machines__deliver_event() 1098 return tool->read(tool, event, sample, evsel, machine); machines__deliver_event() 1100 return tool->throttle(tool, event, sample, machine); machines__deliver_event() 1102 return tool->unthrottle(tool, event, sample, machine); machines__deliver_event() 1107 return tool->aux(tool, event, sample, machine); machines__deliver_event() 1109 return tool->itrace_start(tool, event, sample, machine); machines__deliver_event() 1112 return tool->context_switch(tool, event, sample, machine); machines__deliver_event() 1121 struct perf_sample *sample, perf_session__deliver_event() 1127 ret = auxtrace__process_event(session, event, sample, tool); perf_session__deliver_event() 1134 event, sample, tool, file_offset); perf_session__deliver_event() 1189 struct perf_sample *sample) perf_session__deliver_synth_event() 1199 return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0); perf_session__deliver_synth_event() 1214 struct perf_sample *sample) perf_session__peek_event() 1257 if (sample && event->header.type < PERF_RECORD_USER_TYPE_START && perf_session__peek_event() 1258 perf_evlist__parse_sample(session->evlist, event, sample)) perf_session__peek_event() 1271 struct perf_sample sample; perf_session__process_event() local 1286 * For all kernel events we get the sample data perf_session__process_event() 1288 ret = perf_evlist__parse_sample(evlist, event, &sample); perf_session__process_event() 1293 ret = perf_session__queue_event(session, event, &sample, file_offset); perf_session__process_event() 1298 return perf_session__deliver_event(session, event, &sample, tool, perf_session__process_event() 1701 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg); perf_session__has_traces() 1785 void perf_evsel__print_ip(struct perf_evsel *evsel, struct perf_sample *sample, perf_evsel__print_ip() argument 1798 if (symbol_conf.use_callchain && sample->callchain) { perf_evsel__print_ip() 1802 sample, NULL, NULL, perf_evsel__print_ip() 1862 printf("%16" PRIx64, sample->ip); perf_evsel__print_ip() 697 perf_session__queue_event(struct perf_session *s, union perf_event *event, struct perf_sample *sample, u64 file_offset) perf_session__queue_event() argument 750 callchain__printf(struct perf_evsel *evsel, struct perf_sample *sample) callchain__printf() argument 846 perf_evlist__print_tstamp(struct perf_evlist *evlist, union perf_event *event, struct perf_sample *sample) perf_evlist__print_tstamp() argument 895 dump_event(struct perf_evlist *evlist, union perf_event *event, u64 file_offset, struct perf_sample *sample) dump_event() argument 913 dump_sample(struct perf_evsel *evsel, union perf_event *event, struct perf_sample *sample) dump_sample() argument 955 machines__find_for_cpumode(struct machines *machines, union perf_event *event, struct perf_sample *sample) machines__find_for_cpumode() argument 982 deliver_sample_value(struct perf_evlist *evlist, struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct sample_read_value *v, struct machine *machine) deliver_sample_value() argument 1005 deliver_sample_group(struct perf_evlist *evlist, struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct machine *machine) deliver_sample_group() argument 1026 perf_evlist__deliver_sample(struct perf_evlist *evlist, struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine) perf_evlist__deliver_sample() argument 1050 machines__deliver_event(struct machines *machines, struct perf_evlist *evlist, union perf_event *event, struct perf_sample *sample, struct perf_tool *tool, u64 file_offset) machines__deliver_event() argument 1119 perf_session__deliver_event(struct perf_session *session, union perf_event *event, struct perf_sample *sample, struct perf_tool *tool, u64 file_offset) perf_session__deliver_event() argument 1187 perf_session__deliver_synth_event(struct perf_session *session, union perf_event *event, struct perf_sample *sample) perf_session__deliver_synth_event() argument 1211 perf_session__peek_event(struct perf_session *session, off_t file_offset, void *buf, size_t buf_sz, union perf_event **event_ptr, struct perf_sample *sample) perf_session__peek_event() argument
|
H A D | intel-pt.c | 954 struct perf_sample *sample, u64 type, intel_pt_inject_event() 957 event->header.size = perf_event__sample_event_size(sample, type, 0); intel_pt_inject_event() 958 return perf_event__synthesize_sample(event, type, 0, sample, swapped); intel_pt_inject_event() 966 struct perf_sample sample = { .ip = 0, }; intel_pt_synth_branch_sample() local 975 event->sample.header.type = PERF_RECORD_SAMPLE; intel_pt_synth_branch_sample() 976 event->sample.header.misc = PERF_RECORD_MISC_USER; intel_pt_synth_branch_sample() 977 event->sample.header.size = sizeof(struct perf_event_header); intel_pt_synth_branch_sample() 980 sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc); intel_pt_synth_branch_sample() 982 sample.ip = ptq->state->from_ip; intel_pt_synth_branch_sample() 983 sample.pid = ptq->pid; intel_pt_synth_branch_sample() 984 sample.tid = ptq->tid; intel_pt_synth_branch_sample() 985 sample.addr = ptq->state->to_ip; intel_pt_synth_branch_sample() 986 sample.id = ptq->pt->branches_id; intel_pt_synth_branch_sample() 987 sample.stream_id = ptq->pt->branches_id; intel_pt_synth_branch_sample() 988 sample.period = 1; intel_pt_synth_branch_sample() 989 sample.cpu = ptq->cpu; intel_pt_synth_branch_sample() 990 sample.flags = ptq->flags; intel_pt_synth_branch_sample() 991 sample.insn_len = ptq->insn_len; intel_pt_synth_branch_sample() 1001 .from = sample.ip, intel_pt_synth_branch_sample() 1002 .to = sample.addr, intel_pt_synth_branch_sample() 1005 sample.branch_stack = (struct branch_stack *)&dummy_bs; intel_pt_synth_branch_sample() 1009 ret = intel_pt_inject_event(event, &sample, intel_pt_synth_branch_sample() 1016 ret = perf_session__deliver_synth_event(pt->session, event, &sample); intel_pt_synth_branch_sample() 1029 struct perf_sample sample = { .ip = 0, }; intel_pt_synth_instruction_sample() local 1031 event->sample.header.type = PERF_RECORD_SAMPLE; intel_pt_synth_instruction_sample() 1032 event->sample.header.misc = PERF_RECORD_MISC_USER; intel_pt_synth_instruction_sample() 1033 event->sample.header.size = sizeof(struct perf_event_header); intel_pt_synth_instruction_sample() 1036 sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc); intel_pt_synth_instruction_sample() 1038 sample.ip = ptq->state->from_ip; intel_pt_synth_instruction_sample() 1039 sample.pid = ptq->pid; intel_pt_synth_instruction_sample() 1040 sample.tid = ptq->tid; intel_pt_synth_instruction_sample() 1041 sample.addr = ptq->state->to_ip; intel_pt_synth_instruction_sample() 1042 sample.id = ptq->pt->instructions_id; intel_pt_synth_instruction_sample() 1043 sample.stream_id = ptq->pt->instructions_id; intel_pt_synth_instruction_sample() 1044 sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt; intel_pt_synth_instruction_sample() 1045 sample.cpu = ptq->cpu; intel_pt_synth_instruction_sample() 1046 sample.flags = ptq->flags; intel_pt_synth_instruction_sample() 1047 sample.insn_len = ptq->insn_len; intel_pt_synth_instruction_sample() 1053 pt->synth_opts.callchain_sz, sample.ip); intel_pt_synth_instruction_sample() 1054 sample.callchain = ptq->chain; intel_pt_synth_instruction_sample() 1059 sample.branch_stack = ptq->last_branch; intel_pt_synth_instruction_sample() 1063 ret = intel_pt_inject_event(event, &sample, intel_pt_synth_instruction_sample() 1070 ret = perf_session__deliver_synth_event(pt->session, event, &sample); intel_pt_synth_instruction_sample() 1086 struct perf_sample sample = { .ip = 0, }; intel_pt_synth_transaction_sample() local 1088 event->sample.header.type = PERF_RECORD_SAMPLE; intel_pt_synth_transaction_sample() 1089 event->sample.header.misc = PERF_RECORD_MISC_USER; intel_pt_synth_transaction_sample() 1090 event->sample.header.size = sizeof(struct perf_event_header); intel_pt_synth_transaction_sample() 1093 sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc); intel_pt_synth_transaction_sample() 1095 sample.ip = ptq->state->from_ip; intel_pt_synth_transaction_sample() 1096 sample.pid = ptq->pid; intel_pt_synth_transaction_sample() 1097 sample.tid = ptq->tid; intel_pt_synth_transaction_sample() 1098 sample.addr = ptq->state->to_ip; intel_pt_synth_transaction_sample() 1099 sample.id = ptq->pt->transactions_id; intel_pt_synth_transaction_sample() 1100 sample.stream_id = ptq->pt->transactions_id; intel_pt_synth_transaction_sample() 1101 sample.period = 1; intel_pt_synth_transaction_sample() 1102 sample.cpu = ptq->cpu; intel_pt_synth_transaction_sample() 1103 sample.flags = ptq->flags; intel_pt_synth_transaction_sample() 1104 sample.insn_len = ptq->insn_len; intel_pt_synth_transaction_sample() 1108 pt->synth_opts.callchain_sz, sample.ip); intel_pt_synth_transaction_sample() 1109 sample.callchain = ptq->chain; intel_pt_synth_transaction_sample() 1114 sample.branch_stack = ptq->last_branch; intel_pt_synth_transaction_sample() 1118 ret = intel_pt_inject_event(event, &sample, intel_pt_synth_transaction_sample() 1125 ret = perf_session__deliver_synth_event(pt->session, event, &sample); intel_pt_synth_transaction_sample() 1475 static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample) intel_pt_lost() argument 1477 return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu, intel_pt_lost() 1478 sample->pid, sample->tid, 0); intel_pt_lost() 1555 struct perf_sample *sample) intel_pt_process_switch() 1561 evsel = perf_evlist__id2evsel(pt->session->evlist, sample->id); intel_pt_process_switch() 1565 tid = perf_evsel__intval(evsel, sample, "next_pid"); intel_pt_process_switch() 1566 cpu = sample->cpu; intel_pt_process_switch() 1569 cpu, tid, sample->time, perf_time_to_tsc(sample->time, intel_pt_process_switch() 1572 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time); intel_pt_process_switch() 1580 struct perf_sample *sample) intel_pt_context_switch() 1586 cpu = sample->cpu; intel_pt_context_switch() 1600 pid = sample->pid; intel_pt_context_switch() 1601 tid = sample->tid; intel_pt_context_switch() 1610 cpu, pid, tid, sample->time, perf_time_to_tsc(sample->time, intel_pt_context_switch() 1613 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time); intel_pt_context_switch() 1622 struct perf_sample *sample) intel_pt_process_itrace_start() 1628 sample->cpu, event->itrace_start.pid, intel_pt_process_itrace_start() 1629 event->itrace_start.tid, sample->time, intel_pt_process_itrace_start() 1630 perf_time_to_tsc(sample->time, &pt->tc)); intel_pt_process_itrace_start() 1632 return machine__set_current_tid(pt->machine, sample->cpu, intel_pt_process_itrace_start() 1639 struct perf_sample *sample, intel_pt_process_event() 1655 if (sample->time && sample->time != (u64)-1) intel_pt_process_event() 1656 timestamp = perf_time_to_tsc(sample->time, &pt->tc); intel_pt_process_event() 1670 sample->time); intel_pt_process_event() 1681 err = intel_pt_lost(pt, sample); intel_pt_process_event() 1687 err = intel_pt_process_switch(pt, sample); intel_pt_process_event() 1689 err = intel_pt_process_itrace_start(pt, event, sample); intel_pt_process_event() 1692 err = intel_pt_context_switch(pt, event, sample); intel_pt_process_event() 1696 sample->cpu, sample->time, timestamp); intel_pt_process_event() 1800 struct perf_sample *sample __maybe_unused, intel_pt_event_synth() 1880 pr_debug("Synthesizing 'instructions' event with id %" PRIu64 " sample type %#" PRIx64 "\n", 1901 pr_debug("Synthesizing 'transactions' event with id %" PRIu64 " sample type %#" PRIx64 "\n", 1928 pr_debug("Synthesizing 'branches' event with id %" PRIu64 " sample type %#" PRIx64 "\n", 953 intel_pt_inject_event(union perf_event *event, struct perf_sample *sample, u64 type, bool swapped) intel_pt_inject_event() argument 1554 intel_pt_process_switch(struct intel_pt *pt, struct perf_sample *sample) intel_pt_process_switch() argument 1579 intel_pt_context_switch(struct intel_pt *pt, union perf_event *event, struct perf_sample *sample) intel_pt_context_switch() argument 1620 intel_pt_process_itrace_start(struct intel_pt *pt, union perf_event *event, struct perf_sample *sample) intel_pt_process_itrace_start() argument 1637 intel_pt_process_event(struct perf_session *session, union perf_event *event, struct perf_sample *sample, struct perf_tool *tool) intel_pt_process_event() argument
|
H A D | event.h | 86 /* perf sample has 16 bits size limit */ 369 struct sample_event sample; member in union:perf_event 390 struct perf_sample *sample, 412 struct perf_sample *sample, 416 struct perf_sample *sample, 420 struct perf_sample *sample, 424 struct perf_sample *sample, 428 struct perf_sample *sample, 432 struct perf_sample *sample, 436 struct perf_sample *sample, 440 struct perf_sample *sample, 444 struct perf_sample *sample, 448 struct perf_sample *sample, 452 struct perf_sample *sample, 460 struct perf_sample *sample); 469 struct perf_sample *sample, 475 size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, 479 const struct perf_sample *sample,
|
H A D | intel-bts.c | 136 static int intel_bts_lost(struct intel_bts *bts, struct perf_sample *sample) intel_bts_lost() argument 142 INTEL_BTS_ERR_LOST, sample->cpu, sample->pid, intel_bts_lost() 143 sample->tid, 0, "Lost trace data"); intel_bts_lost() 276 struct perf_sample sample = { .ip = 0, }; intel_bts_synth_branch_sample() local 278 event.sample.header.type = PERF_RECORD_SAMPLE; intel_bts_synth_branch_sample() 279 event.sample.header.misc = PERF_RECORD_MISC_USER; intel_bts_synth_branch_sample() 280 event.sample.header.size = sizeof(struct perf_event_header); intel_bts_synth_branch_sample() 282 sample.ip = le64_to_cpu(branch->from); intel_bts_synth_branch_sample() 283 sample.pid = btsq->pid; intel_bts_synth_branch_sample() 284 sample.tid = btsq->tid; intel_bts_synth_branch_sample() 285 sample.addr = le64_to_cpu(branch->to); intel_bts_synth_branch_sample() 286 sample.id = btsq->bts->branches_id; intel_bts_synth_branch_sample() 287 sample.stream_id = btsq->bts->branches_id; intel_bts_synth_branch_sample() 288 sample.period = 1; intel_bts_synth_branch_sample() 289 sample.cpu = btsq->cpu; intel_bts_synth_branch_sample() 290 sample.flags = btsq->sample_flags; intel_bts_synth_branch_sample() 291 sample.insn_len = btsq->intel_pt_insn.length; intel_bts_synth_branch_sample() 294 event.sample.header.size = bts->branches_event_size; intel_bts_synth_branch_sample() 297 0, &sample, intel_bts_synth_branch_sample() 303 ret = perf_session__deliver_synth_event(bts->session, &event, &sample); intel_bts_synth_branch_sample() 597 struct perf_sample *sample, intel_bts_process_event() 613 if (sample->time && sample->time != (u64)-1) intel_bts_process_event() 614 timestamp = perf_time_to_tsc(sample->time, &bts->tc); intel_bts_process_event() 634 err = intel_bts_lost(bts, sample); intel_bts_process_event() 742 struct perf_sample *sample __maybe_unused, intel_bts_event_synth() 810 pr_debug("Synthesizing 'branches' event with id %" PRIu64 " sample type %#" PRIx64 "\n", 822 * We only use sample types from PERF_SAMPLE_MASK so we can use 595 intel_bts_process_event(struct perf_session *session, union perf_event *event, struct perf_sample *sample, struct perf_tool *tool) intel_bts_process_event() argument
|
H A D | machine.h | 84 struct perf_sample *sample); 86 struct perf_sample *sample); 88 struct perf_sample *sample); 90 struct perf_sample *sample); 92 struct perf_sample *sample); 100 struct perf_sample *sample); 102 struct perf_sample *sample); 104 struct perf_sample *sample); 140 struct branch_info *sample__resolve_bstack(struct perf_sample *sample, 142 struct mem_info *sample__resolve_mem(struct perf_sample *sample, 146 struct perf_sample *sample,
|
H A D | evsel.c | 105 * @sample_type: sample type 108 * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct 138 * @sample_type: sample type 141 * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if 142 * sample_id_all is used there is an id sample appended to non-sample events. 1510 struct perf_sample *sample) perf_evsel__parse_id_sample() 1513 const u64 *array = event->sample.array; perf_evsel__parse_id_sample() 1521 sample->id = *array; perf_evsel__parse_id_sample() 1533 sample->cpu = u.val32[0]; perf_evsel__parse_id_sample() 1538 sample->stream_id = *array; perf_evsel__parse_id_sample() 1543 sample->id = *array; perf_evsel__parse_id_sample() 1548 sample->time = *array; perf_evsel__parse_id_sample() 1561 sample->pid = u.val32[0]; perf_evsel__parse_id_sample() 1562 sample->tid = u.val32[1]; perf_evsel__parse_id_sample() 1612 array = event->sample.array; perf_evsel__parse_sample() 1851 size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, perf_event__sample_event_size() argument 1891 sz = sample->read.group.nr * perf_event__sample_event_size() 1900 sz = (sample->callchain->nr + 1) * sizeof(u64); perf_event__sample_event_size() 1906 result += sample->raw_size; perf_event__sample_event_size() 1910 sz = sample->branch_stack->nr * sizeof(struct branch_entry); perf_event__sample_event_size() 1916 if (sample->user_regs.abi) { perf_event__sample_event_size() 1918 sz = hweight_long(sample->user_regs.mask) * sizeof(u64); perf_event__sample_event_size() 1926 sz = sample->user_stack.size; perf_event__sample_event_size() 1944 if (sample->intr_regs.abi) { perf_event__sample_event_size() 1946 sz = hweight_long(sample->intr_regs.mask) * sizeof(u64); perf_event__sample_event_size() 1958 const struct perf_sample *sample, perf_event__synthesize_sample() 1969 array = event->sample.array; perf_event__synthesize_sample() 1972 *array = sample->id; perf_event__synthesize_sample() 1977 *array = sample->ip; perf_event__synthesize_sample() 1982 u.val32[0] = sample->pid; perf_event__synthesize_sample() 1983 u.val32[1] = sample->tid; perf_event__synthesize_sample() 1998 *array = sample->time; perf_event__synthesize_sample() 2003 *array = sample->addr; perf_event__synthesize_sample() 2008 *array = sample->id; perf_event__synthesize_sample() 2013 *array = sample->stream_id; perf_event__synthesize_sample() 2018 u.val32[0] = sample->cpu; perf_event__synthesize_sample() 2031 *array = sample->period; perf_event__synthesize_sample() 2037 *array = sample->read.group.nr; perf_event__synthesize_sample() 2039 *array = sample->read.one.value; perf_event__synthesize_sample() 2043 *array = sample->read.time_enabled; perf_event__synthesize_sample() 2048 *array = sample->read.time_running; perf_event__synthesize_sample() 2054 sz = sample->read.group.nr * perf_event__synthesize_sample() 2056 memcpy(array, sample->read.group.values, sz); perf_event__synthesize_sample() 2059 *array = sample->read.one.id; perf_event__synthesize_sample() 2065 sz = (sample->callchain->nr + 1) * sizeof(u64); perf_event__synthesize_sample() 2066 memcpy(array, sample->callchain, sz); perf_event__synthesize_sample() 2071 u.val32[0] = sample->raw_size; perf_event__synthesize_sample() 2084 memcpy(array, sample->raw_data, sample->raw_size); perf_event__synthesize_sample() 2085 array = (void *)array + sample->raw_size; perf_event__synthesize_sample() 2089 sz = sample->branch_stack->nr * sizeof(struct branch_entry); perf_event__synthesize_sample() 2091 memcpy(array, sample->branch_stack, sz); perf_event__synthesize_sample() 2096 if (sample->user_regs.abi) { perf_event__synthesize_sample() 2097 *array++ = sample->user_regs.abi; perf_event__synthesize_sample() 2098 sz = hweight_long(sample->user_regs.mask) * sizeof(u64); perf_event__synthesize_sample() 2099 memcpy(array, sample->user_regs.regs, sz); perf_event__synthesize_sample() 2107 sz = sample->user_stack.size; perf_event__synthesize_sample() 2110 memcpy(array, sample->user_stack.data, sz); perf_event__synthesize_sample() 2117 *array = sample->weight; perf_event__synthesize_sample() 2122 *array = sample->data_src; perf_event__synthesize_sample() 2127 *array = sample->transaction; perf_event__synthesize_sample() 2132 if (sample->intr_regs.abi) { perf_event__synthesize_sample() 2133 *array++ = sample->intr_regs.abi; perf_event__synthesize_sample() 2134 sz = hweight_long(sample->intr_regs.mask) * sizeof(u64); perf_event__synthesize_sample() 2135 memcpy(array, sample->intr_regs.regs, sz); perf_event__synthesize_sample() 2150 void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample, perf_evsel__rawptr() argument 2162 offset = *(int *)(sample->raw_data + field->offset); perf_evsel__rawptr() 2166 return sample->raw_data + offset; perf_evsel__rawptr() 2169 u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample, perf_evsel__intval() argument 2179 ptr = sample->raw_data + field->offset; perf_evsel__intval() 1508 perf_evsel__parse_id_sample(const struct perf_evsel *evsel, const union perf_event *event, struct perf_sample *sample) perf_evsel__parse_id_sample() argument 1956 perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format, const struct perf_sample *sample, bool swapped) perf_event__synthesize_sample() argument
|
H A D | session.h | 56 struct perf_sample *sample); 61 struct perf_sample *sample, u64 file_offset); 106 void perf_evsel__print_ip(struct perf_evsel *evsel, struct perf_sample *sample, 130 struct perf_sample *sample);
|
H A D | hist.c | 510 struct perf_sample *sample = iter->sample; iter_prepare_mem_entry() local 513 mi = sample__resolve_mem(sample, al); iter_prepare_mem_entry() 532 cost = iter->sample->weight; iter_add_single_mem_entry() 566 err = hist_entry__append_callchain(he, iter->sample); iter_finish_mem_entry() 584 struct perf_sample *sample = iter->sample; iter_prepare_branch_entry() local 586 bi = sample__resolve_bstack(sample, al); iter_prepare_branch_entry() 591 iter->total = sample->branch_stack->nr; iter_prepare_branch_entry() 679 struct perf_sample *sample = iter->sample; iter_add_single_normal_entry() local 683 sample->period, sample->weight, iter_add_single_normal_entry() 684 sample->transaction, true); iter_add_single_normal_entry() 698 struct perf_sample *sample = iter->sample; iter_finish_normal_entry() local 707 return hist_entry__append_callchain(he, sample); iter_finish_normal_entry() 739 struct perf_sample *sample = iter->sample; iter_add_single_cumulative_entry() local 745 sample->period, sample->weight, iter_add_single_cumulative_entry() 746 sample->transaction, true); iter_add_single_cumulative_entry() 753 hist_entry__append_callchain(he, sample); iter_add_single_cumulative_entry() 784 struct perf_sample *sample = iter->sample; iter_add_next_cumulative_entry() local 819 sample->period, sample->weight, iter_add_next_cumulative_entry() 820 sample->transaction, false); iter_add_next_cumulative_entry() 828 callchain_append(he->callchain, &cursor, sample->period); iter_add_next_cumulative_entry() 879 err = sample__resolve_callchain(iter->sample, &iter->parent, hist_entry_iter__add() 1488 struct perf_sample *sample, bool nonany_branch_mode) hist__account_cycles() 1496 bi = sample__resolve_bstack(sample, al); hist__account_cycles() 1487 hist__account_cycles(struct branch_stack *bs, struct addr_location *al, struct perf_sample *sample, bool nonany_branch_mode) hist__account_cycles() argument
|
H A D | machine.c | 465 struct perf_sample *sample) machine__process_comm_event() 480 __thread__set_comm(thread, event->comm.comm, sample->time, exec)) { machine__process_comm_event() 491 union perf_event *event, struct perf_sample *sample __maybe_unused) machine__process_lost_event() 499 union perf_event *event, struct perf_sample *sample) machine__process_lost_samples_event() 502 sample->id, event->lost_samples.lost); machine__process_lost_samples_event() 1259 struct perf_sample *sample __maybe_unused) machine__process_mmap2_event() 1313 struct perf_sample *sample __maybe_unused) machine__process_mmap_event() 1390 struct perf_sample *sample) machine__process_fork_event() 1428 thread__fork(thread, parent, sample->time) < 0) { machine__process_fork_event() 1439 struct perf_sample *sample __maybe_unused) machine__process_exit_event() 1457 struct perf_sample *sample) machine__process_event() 1463 ret = machine__process_comm_event(machine, event, sample); break; machine__process_event() 1465 ret = machine__process_mmap_event(machine, event, sample); break; machine__process_event() 1467 ret = machine__process_mmap2_event(machine, event, sample); break; machine__process_event() 1469 ret = machine__process_fork_event(machine, event, sample); break; machine__process_event() 1471 ret = machine__process_exit_event(machine, event, sample); break; machine__process_event() 1473 ret = machine__process_lost_event(machine, event, sample); break; machine__process_event() 1479 ret = machine__process_lost_samples_event(machine, event, sample); break; machine__process_event() 1532 * Check there as a fallback option before dropping the sample. ip__resolve_data() 1543 struct mem_info *sample__resolve_mem(struct perf_sample *sample, sample__resolve_mem() argument 1551 ip__resolve_ams(al->thread, &mi->iaddr, sample->ip); sample__resolve_mem() 1552 ip__resolve_data(al->thread, al->cpumode, &mi->daddr, sample->addr); sample__resolve_mem() 1553 mi->data_src.val = sample->data_src; sample__resolve_mem() 1615 struct branch_info *sample__resolve_bstack(struct perf_sample *sample, sample__resolve_bstack() argument 1619 const struct branch_stack *bs = sample->branch_stack; sample__resolve_bstack() 1675 * Recolve LBR callstack chain sample 1682 struct perf_sample *sample, resolve_lbr_callchain_sample() 1687 struct ip_callchain *chain = sample->callchain; resolve_lbr_callchain_sample() 1700 struct branch_stack *lbr_stack = sample->branch_stack; resolve_lbr_callchain_sample() 1748 struct perf_sample *sample, thread__resolve_callchain_sample() 1753 struct branch_stack *branch = sample->branch_stack; thread__resolve_callchain_sample() 1754 struct ip_callchain *chain = sample->callchain; thread__resolve_callchain_sample() 1764 err = resolve_lbr_callchain_sample(thread, sample, parent, thread__resolve_callchain_sample() 1779 * more context for a sample than just the callers. thread__resolve_callchain_sample() 1872 struct perf_sample *sample, thread__resolve_callchain() 1878 sample, parent, thread__resolve_callchain() 1889 if ((!sample->user_regs.regs) || thread__resolve_callchain() 1890 (!sample->user_stack.size)) thread__resolve_callchain() 1894 thread, sample, max_stack); thread__resolve_callchain() 464 machine__process_comm_event(struct machine *machine, union perf_event *event, struct perf_sample *sample) machine__process_comm_event() argument 498 machine__process_lost_samples_event(struct machine *machine __maybe_unused, union perf_event *event, struct perf_sample *sample) machine__process_lost_samples_event() argument 1389 machine__process_fork_event(struct machine *machine, union perf_event *event, struct perf_sample *sample) machine__process_fork_event() argument 1456 machine__process_event(struct machine *machine, union perf_event *event, struct perf_sample *sample) machine__process_event() argument 1681 resolve_lbr_callchain_sample(struct thread *thread, struct perf_sample *sample, struct symbol **parent, struct addr_location *root_al, int max_stack) resolve_lbr_callchain_sample() argument 1746 thread__resolve_callchain_sample(struct thread *thread, struct perf_evsel *evsel, struct perf_sample *sample, struct symbol **parent, struct addr_location *root_al, int max_stack) thread__resolve_callchain_sample() argument 1870 thread__resolve_callchain(struct thread *thread, struct perf_evsel *evsel, struct perf_sample *sample, struct symbol **parent, struct addr_location *root_al, int max_stack) thread__resolve_callchain() argument
|
H A D | event.c | 716 struct perf_sample *sample, perf_event__process_comm() 719 return machine__process_comm_event(machine, event, sample); perf_event__process_comm() 724 struct perf_sample *sample, perf_event__process_lost() 727 return machine__process_lost_event(machine, event, sample); perf_event__process_lost() 732 struct perf_sample *sample __maybe_unused, perf_event__process_aux() 740 struct perf_sample *sample __maybe_unused, perf_event__process_itrace_start() 748 struct perf_sample *sample, perf_event__process_lost_samples() 751 return machine__process_lost_samples_event(machine, event, sample); perf_event__process_lost_samples() 756 struct perf_sample *sample __maybe_unused, perf_event__process_switch() 788 struct perf_sample *sample, perf_event__process_mmap() 791 return machine__process_mmap_event(machine, event, sample); perf_event__process_mmap() 796 struct perf_sample *sample, perf_event__process_mmap2() 799 return machine__process_mmap2_event(machine, event, sample); perf_event__process_mmap2() 811 struct perf_sample *sample, perf_event__process_fork() 814 return machine__process_fork_event(machine, event, sample); perf_event__process_fork() 819 struct perf_sample *sample, perf_event__process_exit() 822 return machine__process_exit_event(machine, event, sample); perf_event__process_exit() 892 struct perf_sample *sample, perf_event__process() 895 return machine__process_event(machine, event, sample); perf_event__process() 993 struct perf_sample *sample) perf_event__preprocess_sample() 996 struct thread *thread = machine__findnew_thread(machine, sample->pid, perf_event__preprocess_sample() 997 sample->tid); perf_event__preprocess_sample() 1014 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, sample->ip, al); perf_event__preprocess_sample() 1023 al->cpu = sample->cpu; perf_event__preprocess_sample() 1091 struct perf_sample *sample, perf_event__preprocess_sample_addr() 1097 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, sample->addr, al); perf_event__preprocess_sample_addr() 1100 sample->addr, al); perf_event__preprocess_sample_addr() 1102 al->cpu = sample->cpu; perf_event__preprocess_sample_addr() 714 perf_event__process_comm(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, struct machine *machine) perf_event__process_comm() argument 722 perf_event__process_lost(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, struct machine *machine) perf_event__process_lost() argument 746 perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, struct machine *machine) perf_event__process_lost_samples() argument 786 perf_event__process_mmap(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, struct machine *machine) perf_event__process_mmap() argument 794 perf_event__process_mmap2(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, struct machine *machine) perf_event__process_mmap2() argument 809 perf_event__process_fork(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, struct machine *machine) perf_event__process_fork() argument 817 perf_event__process_exit(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, struct machine *machine) perf_event__process_exit() argument 890 perf_event__process(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, struct machine *machine) perf_event__process() argument 990 perf_event__preprocess_sample(const union perf_event *event, struct machine *machine, struct addr_location *al, struct perf_sample *sample) perf_event__preprocess_sample() argument 1090 perf_event__preprocess_sample_addr(union perf_event *event, struct perf_sample *sample, struct thread *thread, struct addr_location *al) perf_event__preprocess_sample_addr() argument
|
H A D | evsel.h | 72 * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of 75 * PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if sample_id_all 76 * is used there is an id sample appended to non-sample events 242 void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample, 244 u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample, 248 struct perf_sample *sample, perf_evsel__strval() 251 return perf_evsel__rawptr(evsel, sample, name); perf_evsel__strval() 308 struct perf_sample *sample); 247 perf_evsel__strval(struct perf_evsel *evsel, struct perf_sample *sample, const char *name) perf_evsel__strval() argument
|
H A D | data-convert-bt.c | 209 struct perf_sample *sample, add_tracepoint_field_value() 216 void *data = sample->raw_data; add_tracepoint_field_value() 321 struct perf_sample *sample) add_tracepoint_fields_values() 327 ret = add_tracepoint_field_value(cw, event_class, event, sample, add_tracepoint_fields_values() 339 struct perf_sample *sample) add_tracepoint_values() 346 common_fields, sample); add_tracepoint_values() 349 fields, sample); add_tracepoint_values() 357 struct perf_sample *sample) add_generic_values() 375 ret = value_set_u64_hex(cw, event, "perf_ip", sample->ip); add_generic_values() 381 ret = value_set_s32(cw, event, "perf_tid", sample->tid); add_generic_values() 385 ret = value_set_s32(cw, event, "perf_pid", sample->pid); add_generic_values() 392 ret = value_set_u64(cw, event, "perf_id", sample->id); add_generic_values() 398 ret = value_set_u64(cw, event, "perf_stream_id", sample->stream_id); add_generic_values() 404 ret = value_set_u64(cw, event, "perf_period", sample->period); add_generic_values() 410 ret = value_set_u64(cw, event, "perf_weight", sample->weight); add_generic_values() 417 sample->data_src); add_generic_values() 424 sample->transaction); add_generic_values() 525 static int get_sample_cpu(struct ctf_writer *cw, struct perf_sample *sample, get_sample_cpu() argument 531 cpu = sample->cpu; get_sample_cpu() 557 struct perf_sample *sample, process_sample_event() 578 pr_time2(sample->time, "sample %" PRIu64 "\n", c->events_count); process_sample_event() 586 bt_ctf_clock_set_time(cw->clock, sample->time); process_sample_event() 588 ret = add_generic_values(cw, event, evsel, sample); process_sample_event() 594 evsel, sample); process_sample_event() 599 cs = ctf_stream(cw, get_sample_cpu(cw, sample, evsel)); process_sample_event() 1114 .sample = process_sample_event, bt_convert__perf2ctf() 206 add_tracepoint_field_value(struct ctf_writer *cw, struct bt_ctf_event_class *event_class, struct bt_ctf_event *event, struct perf_sample *sample, struct format_field *fmtf) add_tracepoint_field_value() argument 317 add_tracepoint_fields_values(struct ctf_writer *cw, struct bt_ctf_event_class *event_class, struct bt_ctf_event *event, struct format_field *fields, struct perf_sample *sample) add_tracepoint_fields_values() argument 335 add_tracepoint_values(struct ctf_writer *cw, struct bt_ctf_event_class *event_class, struct bt_ctf_event *event, struct perf_evsel *evsel, struct perf_sample *sample) add_tracepoint_values() argument 354 add_generic_values(struct ctf_writer *cw, struct bt_ctf_event *event, struct perf_evsel *evsel, struct perf_sample *sample) add_generic_values() argument 555 process_sample_event(struct perf_tool *tool, union perf_event *_event __maybe_unused, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine __maybe_unused) process_sample_event() argument
|
H A D | thread-stack.h | 53 * @call_ref: external reference to 'call' sample (e.g. db_id) 54 * @return_ref: external reference to 'return' sample (e.g. db_id) 107 struct perf_sample *sample,
|
H A D | build-id.h | 21 struct perf_sample *sample, struct perf_evsel *evsel,
|
H A D | ordered-events.h | 47 struct perf_sample *sample, u64 file_offset);
|
H A D | db-export.c | 289 struct perf_sample *sample, struct perf_evsel *evsel, db_export__sample() 295 .sample = sample, db_export__sample() 336 perf_event__preprocess_sample_addr(event, sample, thread, &addr_al); db_export__sample() 342 err = thread_stack__process(thread, comm, sample, al, db_export__sample() 288 db_export__sample(struct db_export *dbe, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, struct addr_location *al) db_export__sample() argument
|
H A D | unwind-libdw.c | 112 struct stack_dump *stack = &ui->sample->user_stack; memory_read() 117 ret = perf_reg_value(&start, &ui->sample->user_regs, PERF_REG_SP); memory_read() 172 .sample = data, unwind__get_entries()
|
H A D | build-id.c | 26 struct perf_sample *sample, build_id__mark_dso_hit() 32 struct thread *thread = machine__findnew_thread(machine, sample->pid, build_id__mark_dso_hit() 33 sample->tid); build_id__mark_dso_hit() 41 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, sample->ip, &al); build_id__mark_dso_hit() 52 struct perf_sample *sample perf_event__exit_del_thread() 72 .sample = build_id__mark_dso_hit, 24 build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel __maybe_unused, struct machine *machine) build_id__mark_dso_hit() argument
|
H A D | db-export.h | 35 struct perf_sample *sample; member in struct:export_sample 98 struct perf_sample *sample, struct perf_evsel *evsel,
|
H A D | unwind-libunwind.c | 88 struct perf_sample *sample; member in struct:unwind_info 443 struct stack_dump *stack = &ui->sample->user_stack; access_mem() 449 if (__write || !stack || !ui->sample->user_regs.regs) { access_mem() 454 ret = perf_reg_value(&start, &ui->sample->user_regs, PERF_REG_SP); access_mem() 497 if (!ui->sample->user_regs.regs) { access_reg() 506 ret = perf_reg_value(&val, &ui->sample->user_regs, id); access_reg() 645 .sample = data, unwind__get_entries()
|
H A D | callchain.h | 197 int sample__resolve_callchain(struct perf_sample *sample, struct symbol **parent, 200 int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *sample);
|
H A D | trace-event.h | 75 struct perf_sample *sample,
|
/linux-4.4.14/tools/perf/arch/s390/util/ |
H A D | kvm-stat.c | 22 struct perf_sample *sample, event_icpt_insn_get_key() 27 insn = perf_evsel__intval(evsel, sample, "instruction"); event_icpt_insn_get_key() 33 struct perf_sample *sample, event_sigp_get_key() 36 key->key = perf_evsel__intval(evsel, sample, "order_code"); event_sigp_get_key() 41 struct perf_sample *sample, event_diag_get_key() 44 key->key = perf_evsel__intval(evsel, sample, "code"); event_diag_get_key() 49 struct perf_sample *sample, event_icpt_prog_get_key() 52 key->key = perf_evsel__intval(evsel, sample, "code"); event_icpt_prog_get_key() 21 event_icpt_insn_get_key(struct perf_evsel *evsel, struct perf_sample *sample, struct event_key *key) event_icpt_insn_get_key() argument 32 event_sigp_get_key(struct perf_evsel *evsel, struct perf_sample *sample, struct event_key *key) event_sigp_get_key() argument 40 event_diag_get_key(struct perf_evsel *evsel, struct perf_sample *sample, struct event_key *key) event_diag_get_key() argument 48 event_icpt_prog_get_key(struct perf_evsel *evsel, struct perf_sample *sample, struct event_key *key) event_icpt_prog_get_key() argument
|
/linux-4.4.14/drivers/oprofile/ |
H A D | cpu_buffer.c | 130 * This function prepares the cpu buffer to write a sample. 136 * op_cpu_buffer_write_commit() after preparing the sample. In case of 138 * sample. 146 size * sizeof(entry->sample->data[0])); op_cpu_buffer_write_reserve() 149 entry->sample = ring_buffer_event_data(entry->event); op_cpu_buffer_write_reserve() 151 entry->data = entry->sample->data; op_cpu_buffer_write_reserve() 153 return entry->sample; op_cpu_buffer_write_reserve() 169 entry->sample = ring_buffer_event_data(e); op_cpu_buffer_read_entry() 171 / sizeof(entry->sample->data[0]); op_cpu_buffer_read_entry() 172 entry->data = entry->sample->data; op_cpu_buffer_read_entry() 173 return entry->sample; op_cpu_buffer_read_entry() 186 struct op_sample *sample; op_add_code() local 219 sample = op_cpu_buffer_write_reserve(&entry, size); op_add_code() 220 if (!sample) op_add_code() 223 sample->eip = ESCAPE_CODE; op_add_code() 224 sample->event = flags; op_add_code() 239 struct op_sample *sample; op_add_sample() local 241 sample = op_cpu_buffer_write_reserve(&entry, 0); op_add_sample() 242 if (!sample) op_add_sample() 245 sample->eip = pc; op_add_sample() 246 sample->event = event; op_add_sample() 352 * oprofile_write_commit(&entry) to commit the sample. 358 struct op_sample *sample; oprofile_write_reserve() local 368 sample = op_cpu_buffer_write_reserve(entry, size + 2); oprofile_write_reserve() 369 if (!sample) oprofile_write_reserve() 371 sample->eip = ESCAPE_CODE; oprofile_write_reserve() 372 sample->event = 0; /* no flags */ oprofile_write_reserve()
|
H A D | buffer_sync.c | 244 /* Convert the EIP value of a sample into a persistent dentry/offset 374 * Add a sample to the global event buffer. If possible the 375 * sample is converted into a persistent dentry/offset pair 389 /* add userspace sample */ add_sample() 504 struct op_sample *sample; sync_buffer() local 514 sample = op_cpu_buffer_read_entry(&entry, cpu); sync_buffer() 515 if (!sample) sync_buffer() 518 if (is_code(sample->eip)) { sync_buffer() 519 flags = sample->event; sync_buffer() 548 /* ignore sample */ sync_buffer() 551 if (add_sample(mm, sample, in_kernel)) sync_buffer() 554 /* ignore backtraces if failed to add a sample */ sync_buffer()
|
H A D | cpu_buffer.h | 59 * reset these to invalid values; the next sample collected will
|
/linux-4.4.14/tools/perf/tests/ |
H A D | dwarf-unwind.c | 23 struct perf_sample *sample __maybe_unused, mmap_handler() 73 struct perf_sample sample; unwind_thread() local 77 memset(&sample, 0, sizeof(sample)); unwind_thread() 79 if (test__arch_unwind_sample(&sample, thread)) { unwind_thread() 80 pr_debug("failed to get unwind sample\n"); unwind_thread() 85 &sample, MAX_STACK); unwind_thread() 95 free(sample.user_stack.data); unwind_thread() 96 free(sample.user_regs.regs); unwind_thread()
|
H A D | sample-parsing.c | 172 struct perf_sample sample = { do_test() local 223 sample.read.group.nr = 4; do_test() 224 sample.read.group.values = values; do_test() 226 sample.read.one.value = 0x08789faeb786aa87ULL; do_test() 227 sample.read.one.id = 99; do_test() 230 sz = perf_event__sample_event_size(&sample, sample_type, read_format); do_test() 244 &sample, false); do_test() 271 if (!samples_same(&sample, &sample_out, sample_type, read_format)) { do_test() 286 * test__sample_parsing - test sample parsing. 288 * This function implements a test that synthesizes a sample event, parses it 289 * and then checks that the parsed sample matches the original sample. The test 290 * checks sample format bits separately and together. If the test passes %0 is 302 * Fail the test if it has not been updated when new sample format bits test__sample_parsing() 307 pr_debug("sample format has changed, some new PERF_SAMPLE_ bit was introduced - test needs updating\n"); test__sample_parsing() 311 /* Test each sample format bit separately */ test__sample_parsing() 336 /* Test all sample format bits together */ test__sample_parsing()
|
H A D | openat-syscall-tp-fields.c | 81 struct perf_sample sample; test__syscall_openat_tp_fields() local 90 err = perf_evsel__parse_sample(evsel, event, &sample); test__syscall_openat_tp_fields() 92 pr_debug("Can't parse sample, err = %d\n", err); test__syscall_openat_tp_fields() 96 tp_flags = perf_evsel__intval(evsel, &sample, "flags"); test__syscall_openat_tp_fields()
|
H A D | parse-no-sample-id-all.c | 14 struct perf_sample sample; process_event() local 30 if (perf_evlist__parse_sample(*pevlist, event, &sample)) { process_event() 62 * sample_id_all bit. Without the sample_id_all bit, non-sample events (such as 63 * mmap events) do not have an id sample appended, and consequently logic
|
H A D | hists_link.c | 13 struct sample { struct 22 static struct sample fake_common_samples[] = { 35 static struct sample fake_samples[][5] = { 67 struct perf_sample sample = { .period = 1, }; add_hist_entries() local 85 sample.pid = fake_common_samples[k].pid; evlist__for_each() 86 sample.tid = fake_common_samples[k].pid; evlist__for_each() 87 sample.ip = fake_common_samples[k].ip; evlist__for_each() 89 &sample) < 0) evlist__for_each() 111 sample.pid = fake_samples[i][k].pid; evlist__for_each() 112 sample.tid = fake_samples[i][k].pid; evlist__for_each() 113 sample.ip = fake_samples[i][k].ip; evlist__for_each() 115 &sample) < 0) evlist__for_each() 139 static int find_sample(struct sample *samples, size_t nr_samples, find_sample() 310 /* process sample events */ test__hists_link()
|
H A D | perf-record.c | 50 struct perf_sample sample; test__PERF_RECORD() local 168 err = perf_evlist__parse_sample(evlist, event, &sample); test__PERF_RECORD() 172 pr_debug("Couldn't parse sample\n"); test__PERF_RECORD() 177 pr_info("%" PRIu64" %d ", sample.time, sample.cpu); test__PERF_RECORD() 181 if (prev_time > sample.time) { test__PERF_RECORD() 183 name, prev_time, sample.time); test__PERF_RECORD() 187 prev_time = sample.time; test__PERF_RECORD() 189 if (sample.cpu != cpu) { test__PERF_RECORD() 191 name, cpu, sample.cpu); test__PERF_RECORD() 195 if ((pid_t)sample.pid != evlist->workload.pid) { test__PERF_RECORD() 197 name, evlist->workload.pid, sample.pid); test__PERF_RECORD() 201 if ((pid_t)sample.tid != evlist->workload.pid) { test__PERF_RECORD() 203 name, evlist->workload.pid, sample.tid); test__PERF_RECORD()
|
H A D | mmap-basic.c | 14 * sample.id field to map back to its respective perf_evsel instance. 103 struct perf_sample sample; test__basic_mmap() local 111 err = perf_evlist__parse_sample(evlist, event, &sample); test__basic_mmap() 113 pr_err("Can't parse sample, err = %d\n", err); test__basic_mmap() 118 evsel = perf_evlist__id2evsel(evlist, sample.id); test__basic_mmap() 121 " doesn't map to an evsel\n", sample.id); test__basic_mmap()
|
H A D | sw-clock.c | 94 struct perf_sample sample; __test__sw_clock_freq() local 99 err = perf_evlist__parse_sample(evlist, event, &sample); __test__sw_clock_freq() 101 pr_debug("Error during parse sample\n"); __test__sw_clock_freq() 105 total_periods += sample.period; __test__sw_clock_freq()
|
H A D | hists_filter.c | 13 struct sample { struct 23 static struct sample fake_samples[] = { 51 struct perf_sample sample = { .period = 100, }; add_hist_entries() local 55 * each evsel will have 10 samples but the 4th sample add_hist_entries() 68 .sample = &sample, evlist__for_each() 79 sample.pid = fake_samples[i].pid; evlist__for_each() 80 sample.tid = fake_samples[i].pid; evlist__for_each() 81 sample.ip = fake_samples[i].ip; evlist__for_each() 84 &sample) < 0) evlist__for_each() 138 /* process sample events */ test__hists_filter() 231 * be counted as a separate entry but the sample count and evlist__for_each()
|
H A D | switch-tracking.c | 117 struct perf_sample sample; process_sample_event() local 122 if (perf_evlist__parse_sample(evlist, event, &sample)) { process_sample_event() 127 evsel = perf_evlist__id2evsel(evlist, sample.id); process_sample_event() 129 next_tid = perf_evsel__intval(evsel, &sample, "next_pid"); process_sample_event() 130 prev_tid = perf_evsel__intval(evsel, &sample, "prev_pid"); process_sample_event() 131 cpu = sample.cpu; process_sample_event() 206 struct perf_sample sample; add_event() local 217 if (perf_evlist__parse_sample(evlist, event, &sample)) { add_event() 222 if (!sample.time) { add_event() 227 node->event_time = sample.time; add_event()
|
H A D | hists_cumulate.c | 13 struct sample { struct 22 static struct sample fake_samples[] = { 80 struct perf_sample sample = { .period = 1000, }; add_hist_entries() local 91 .sample = &sample, add_hist_entries() 100 sample.pid = fake_samples[i].pid; add_hist_entries() 101 sample.tid = fake_samples[i].pid; add_hist_entries() 102 sample.ip = fake_samples[i].ip; add_hist_entries() 103 sample.callchain = (struct ip_callchain *)fake_callchains[i]; add_hist_entries() 106 &sample) < 0) add_hist_entries()
|
H A D | tests.h | 76 int test__arch_unwind_sample(struct perf_sample *sample,
|
H A D | hists_output.c | 13 struct sample { struct 23 static struct sample fake_samples[] = { 50 struct perf_sample sample = { .period = 100, }; add_hist_entries() local 61 .sample = &sample, add_hist_entries() 66 sample.cpu = fake_samples[i].cpu; add_hist_entries() 67 sample.pid = fake_samples[i].pid; add_hist_entries() 68 sample.tid = fake_samples[i].pid; add_hist_entries() 69 sample.ip = fake_samples[i].ip; add_hist_entries() 72 &sample) < 0) add_hist_entries()
|
/linux-4.4.14/drivers/media/pci/cx25821/ |
H A D | cx25821-biffuncs.h | 28 static inline u8 getBit(u32 sample, u8 index) getBit() argument 30 return (u8) ((sample >> index) & 1); getBit() 38 static inline u32 setBitAtPos(u32 sample, u8 bit) setBitAtPos() argument 40 sample |= (1 << bit); setBitAtPos() 41 return sample; setBitAtPos()
|
/linux-4.4.14/arch/s390/kernel/ |
H A D | perf_cpum_sf.c | 28 /* Minimum number of sample-data-block-tables: 30 * A single table contains up to 511 pointers to sample-data-blocks. 34 /* Number of sample-data-blocks per sample-data-block-table (SDBT): 53 * the number of sample-data-block-tables into account. Note that these 75 unsigned long num_sdb; /* Number of sample-data-blocks */ 76 unsigned long num_sdbt; /* Number of sample-data-block-tables */ 77 unsigned long *tail; /* last sample-data-block-table */ 160 /* Allocate and initialize sample-data-block */ alloc_sample_data_block() 167 /* Link SDB into the sample-data-block-table */ alloc_sample_data_block() 176 * Allocates new sample-data-blocks and adds them to the specified sampling 230 /* Allocate a new sample-data-block. realloc_sampling_buffer() 256 * specified number of sample-data-blocks (SDB). For each allocation, 257 * a 4K page is used. The number of sample-data-block-tables (SDBT) 270 /* Allocate the sample-data-block-table origin */ alloc_sampling_buffer() 283 /* Allocate requested number of sample-data-blocks */ alloc_sampling_buffer() 349 /* The sample size depends on the sampling function: The basic-sampling event_sample_size() 372 /* Allocate raw sample buffer allocate_buffers() 374 * The raw sample buffer is used to temporarily store sampling data allocate_buffers() 375 * entries for perf raw sample processing. The buffer size mainly allocate_buffers() 381 * 2. The perf raw sample data must be 8-byte aligned (u64) and allocate_buffers() 385 * 3. Store the raw sample buffer pointer in the perf event allocate_buffers() 401 * 1. Determine the sample data size which depends on the used allocate_buffers() 410 * sample-data-blocks (sdb). allocate_buffers() 412 * 3. Compute the number of sample-data-blocks and ensure a minimum allocate_buffers() 419 * 4. Compute the number of sample-data-block-tables (SDBT) and allocate_buffers() 461 * buffer. Accept up to 5% sample data loss. compute_sfb_extent() 463 * sample-data-blocks. compute_sfb_extent() 489 /* The sample_overflow contains the average number of sample data sfb_account_overflows() 490 * that has been lost because sample-data-blocks were full. sfb_account_overflows() 492 * Calculate the total number of sample data entries that has been sfb_account_overflows() 499 /* Compute number of sample-data-blocks */ sfb_account_overflows() 627 /* Free raw sample buffer */ hw_perf_event_destroy() 652 /* (Re)set to first sample-data-block-table */ hw_reset_registers() 753 * of sample periods. If the specified sample period is __hw_perf_event_init() 758 /* The perf core maintains a maximum sample rate that is __hw_perf_event_init() 767 debug_sprintf_event(sfdbg, 1, "Sampling rate exceeds maximum perf sample rate\n"); __hw_perf_event_init() 774 /* Initialize sample data overflow accounting */ __hw_perf_event_init() 967 * @sample: Hardware sample data 969 * Use the hardware sample data to create perf event sample. The sample 984 /* Setup perf sample */ perf_push_sample() 992 * field which is unused contains additional sample-data-entry related perf_push_sample() 1024 * sample. perf_push_sample() 1048 static int sample_format_is_valid(struct hws_combined_entry *sample, sample_format_is_valid() argument 1055 if (sample->basic.def != 0x0001) sample_format_is_valid() 1062 if (sample->diag.def < 0x8001) sample_format_is_valid() 1067 static int sample_is_consistent(struct hws_combined_entry *sample, sample_is_consistent() argument 1082 if (sample->basic.I || sample->basic.W) sample_is_consistent() 1087 static void reset_sample_slot(struct hws_combined_entry *sample, reset_sample_slot() argument 1091 sample->basic.def = 0; reset_sample_slot() 1093 sample->diag.def = 0; reset_sample_slot() 1097 struct hws_combined_entry *sample) sfr_store_sample() 1100 sfr->basic = sample->basic; sfr_store_sample() 1102 memcpy(&sfr->diag, &sample->diag, sfr->dsdes); sfr_store_sample() 1105 static void debug_sample_entry(struct hws_combined_entry *sample, debug_sample_entry() argument 1112 sample->basic.def, &sample->basic, debug_sample_entry() 1114 ? sample->diag.def : 0xFFFF, debug_sample_entry() 1116 ? &sample->diag : NULL); debug_sample_entry() 1119 /* hw_collect_samples() - Walk through a sample-data-block and collect samples 1124 * Walks through a sample-data-block and collects sampling data entries that are 1143 struct hws_combined_entry *sample; hw_collect_samples() local 1148 /* Prepare and initialize raw sample data */ hw_collect_samples() 1154 sample = (struct hws_combined_entry *) *sdbt; hw_collect_samples() 1155 while ((unsigned long *) sample < (unsigned long *) te) { hw_collect_samples() 1156 /* Check for an empty sample */ hw_collect_samples() 1157 if (!sample->basic.def) hw_collect_samples() 1164 if (sample_format_is_valid(sample, flags)) { hw_collect_samples() 1166 * throttle event delivery. Remaining sample data is hw_collect_samples() 1170 if (sample_is_consistent(sample, flags)) { hw_collect_samples() 1171 /* Deliver sample data to perf */ hw_collect_samples() 1172 sfr_store_sample(sfr, sample); hw_collect_samples() 1179 debug_sample_entry(sample, te, flags); hw_collect_samples() 1186 * This is typically the case for sample-data-blocks hw_collect_samples() 1194 /* Reset sample slot and advance to next sample */ hw_collect_samples() 1195 reset_sample_slot(sample, flags); hw_collect_samples() 1196 sample += sample_size; hw_collect_samples() 1202 * @flush_all: Flag to also flush partially filled sample-data-blocks 1208 * Only full sample-data-blocks are processed. Specify the flash_all flag 1209 * to also walk through partially filled sample-data-blocks. It is ignored 1211 * enforces the processing of full sample-data-blocks only (trailer entries 1228 /* Get the trailer entry of the sample-data-block */ hw_perf_event_update() 1238 /* Check the sample overflow count */ hw_perf_event_update() 1240 /* Account sample overflows and, if a particular limit hw_perf_event_update() 1246 /* Timestamps are valid for full sample-data-blocks only */ hw_perf_event_update() 1252 /* Collect all samples from a single sample-data-block and hw_perf_event_update() 1267 /* Advance to next sample-data-block */ hw_perf_event_update() 1275 /* Stop processing sample-data if all samples of the current hw_perf_event_update() 1276 * sample-data-block were flushed even if it was not full. hw_perf_event_update() 1282 * processing any remaining sample-data-blocks. hw_perf_event_update() 1288 /* Account sample overflows in the event hardware structure */ hw_perf_event_update() 1294 "overflow stats: sample=%llu event=%llu\n", hw_perf_event_update() 1491 /* Loss of sample data due to high-priority machine activities */ cpumf_measurement_alert() 1096 sfr_store_sample(struct sf_raw_sample *sfr, struct hws_combined_entry *sample) sfr_store_sample() argument
|
/linux-4.4.14/include/media/ |
H A D | soc_mediabus.h | 20 * sample represents one pixel 24 * @SOC_MBUS_PACKING_EXTEND16: sample width (e.g., 10 bits) has to be extended 29 * @SOC_MBUS_PACKING_EXTEND32: sample width (e.g., 24 bits) has to be extended 43 * enum soc_mbus_order - sample order on the media bus 44 * @SOC_MBUS_ORDER_LE: least significant sample first 45 * @SOC_MBUS_ORDER_BE: most significant sample first 75 * @packing: Type of sample-packing, that has to be used 77 * @bits_per_sample: How many bits the bridge has to sample
|
/linux-4.4.14/drivers/staging/iio/resolver/ |
H A D | ad2s1210.h | 15 unsigned sample; member in struct:ad2s1210_platform_data
|
H A D | ad2s1200.c | 28 /* input pin sample and rdvel is controlled by driver */ 39 int sample; member in struct:ad2s1200_state 55 gpio_set_value(st->sample, 0); ad2s1200_read_raw() 58 gpio_set_value(st->sample, 1); ad2s1200_read_raw() 79 /* delay (2 * AD2S1200_TSCLK + 20) ns for sample pulse */ ad2s1200_read_raw() 127 st->sample = pins[0]; ad2s1200_probe()
|
H A D | ad2s1210.c | 390 /* read the fault register since last sample */ ad2s1210_show_fault() 413 gpio_set_value(st->pdata->sample, 0); ad2s1210_clear_fault() 416 gpio_set_value(st->pdata->sample, 1); ad2s1210_clear_fault() 420 gpio_set_value(st->pdata->sample, 0); ad2s1210_clear_fault() 421 gpio_set_value(st->pdata->sample, 1); ad2s1210_clear_fault() 477 gpio_set_value(st->pdata->sample, 0); ad2s1210_read_raw() 523 gpio_set_value(st->pdata->sample, 1); ad2s1210_read_raw() 645 { st->pdata->sample, GPIOF_DIR_IN, "sample" }, ad2s1210_setup_gpios() 659 { st->pdata->sample, GPIOF_DIR_IN, "sample" }, ad2s1210_free_gpios()
|
/linux-4.4.14/drivers/input/touchscreen/ |
H A D | wm9705.c | 57 * Set adc sample delay. 70 MODULE_PARM_DESC(delay, "Set adc sample delay."); 104 * ADC sample delay times in uS 160 /* polling mode sample settling delay */ wm9705_phy_init() 169 dev_dbg(wm->dev, "setting adc sample delay to %d u Secs.", wm9705_phy_init() 213 * Read a sample from the WM9705 adc in polling mode. 215 static int wm9705_poll_sample(struct wm97xx *wm, int adcsel, int *sample) wm9705_poll_sample() argument 248 dev_dbg(wm->dev, "adc sample timeout"); wm9705_poll_sample() 252 *sample = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD); wm9705_poll_sample() 256 /* check we have correct sample */ wm9705_poll_sample() 257 if ((*sample ^ adcsel) & WM97XX_ADCSEL_MASK) { wm9705_poll_sample() 258 dev_dbg(wm->dev, "adc wrong sample, wanted %x got %x", wm9705_poll_sample() 260 *sample & WM97XX_ADCSEL_MASK); wm9705_poll_sample() 264 if (wants_pen && !(*sample & WM97XX_PEN_DOWN)) { wm9705_poll_sample()
|
H A D | wm9712.c | 70 * Set adc sample delay. 83 MODULE_PARM_DESC(delay, "Set adc sample delay."); 122 * ADC sample delay times in uS 192 /* polling mode sample settling delay */ wm9712_phy_init() 199 dev_dbg(wm->dev, "setting adc sample delay to %d u Secs.\n", wm9712_phy_init() 253 * Read a sample from the WM9712 adc in polling mode. 255 static int wm9712_poll_sample(struct wm97xx *wm, int adcsel, int *sample) wm9712_poll_sample() argument 288 dev_dbg(wm->dev, "adc sample timeout\n"); wm9712_poll_sample() 292 *sample = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD); wm9712_poll_sample() 296 /* check we have correct sample */ wm9712_poll_sample() 297 if ((*sample ^ adcsel) & WM97XX_ADCSEL_MASK) { wm9712_poll_sample() 298 dev_dbg(wm->dev, "adc wrong sample, wanted %x got %x\n", wm9712_poll_sample() 300 *sample & WM97XX_ADCSEL_MASK); wm9712_poll_sample() 304 if (wants_pen && !(*sample & WM97XX_PEN_DOWN)) { wm9712_poll_sample() 306 *sample = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD); wm9712_poll_sample() 307 if (!(*sample & WM97XX_PEN_DOWN)) { wm9712_poll_sample() 352 dev_dbg(wm->dev, "adc sample timeout\n"); wm9712_poll_coord() 366 /* check we have correct sample */ wm9712_poll_coord()
|
H A D | mc13783_ts.c | 41 unsigned int sample[4]; member in struct:mc13783_ts_priv 81 x0 = priv->sample[0] & 0xfff; mc13783_ts_report_sample() 82 x1 = priv->sample[1] & 0xfff; mc13783_ts_report_sample() 83 x2 = priv->sample[2] & 0xfff; mc13783_ts_report_sample() 84 y0 = priv->sample[3] & 0xfff; mc13783_ts_report_sample() 85 y1 = (priv->sample[0] >> 12) & 0xfff; mc13783_ts_report_sample() 86 y2 = (priv->sample[1] >> 12) & 0xfff; mc13783_ts_report_sample() 87 cr0 = (priv->sample[2] >> 12) & 0xfff; mc13783_ts_report_sample() 88 cr1 = (priv->sample[3] >> 12) & 0xfff; mc13783_ts_report_sample() 131 priv->sample) == 0) mc13783_ts_work()
|
H A D | wm9713.c | 70 * Set adc sample delay. 83 MODULE_PARM_DESC(delay, "Set adc sample delay."); 122 * ADC sample delay times in uS 196 /* sample settling delay */ wm9713_phy_init() 200 dev_info(wm->dev, "setting adc sample delay to %d u Secs.", wm9713_phy_init() 258 * Read a sample from the WM9713 adc in polling mode. 260 static int wm9713_poll_sample(struct wm97xx *wm, int adcsel, int *sample) wm9713_poll_sample() argument 298 dev_dbg(wm->dev, "adc sample timeout"); wm9713_poll_sample() 302 *sample = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD); wm9713_poll_sample() 306 /* check we have correct sample */ wm9713_poll_sample() 307 if ((*sample ^ adcsel) & WM97XX_ADCSEL_MASK) { wm9713_poll_sample() 308 dev_dbg(wm->dev, "adc wrong sample, wanted %x got %x", wm9713_poll_sample() 310 *sample & WM97XX_ADCSEL_MASK); wm9713_poll_sample() 314 if (wants_pen && !(*sample & WM97XX_PEN_DOWN)) { wm9713_poll_sample() 363 dev_dbg(wm->dev, "adc sample timeout"); wm9713_poll_coord() 377 /* check we have correct sample */ wm9713_poll_coord()
|
H A D | ads7846.c | 61 #define TS_POLL_DELAY 1 /* ms delay before the first sample */ 64 /* this driver doesn't aim at the peak continuous sample rate */ 65 #define SAMPLE_BITS (8 /*cmd*/ + 16 /*sample*/ + 2 /* before, after */) 298 __be16 sample ____cacheline_aligned; 309 u8 sample[3] ____cacheline_aligned; 346 /* take sample */ ads7846_read12_ser() 352 req->xfer[3].rx_buf = &req->sample; ads7846_read12_ser() 377 status = be16_to_cpu(req->sample); ads7846_read12_ser() 401 req->xfer[0].rx_buf = req->sample; ads7845_read12_ser() 413 status = be16_to_cpu(*((u16 *)&req->sample[1])); ads7845_read12_ser() 438 * ADS7846 could use the low-accuracy two-sample scheme, but can't do the high 639 * the whole sample, repeat it in the next sampling ads7846_debounce_filter() 1020 * The first sample after switching drivers can be low quality; ads7846_setup_spi_msg() 1066 /* ... maybe discard first sample ... */ ads7846_setup_spi_msg() 1099 /* ... maybe discard first sample ... */ ads7846_setup_spi_msg() 1130 /* ... maybe discard first sample ... */ ads7846_setup_spi_msg() 1268 /* don't exceed max specified sample rate */ ads7846_probe() 1270 dev_err(&spi->dev, "f(sample) %d KHz?\n", ads7846_probe() 1411 * Take a first sample, leaving nPENIRQ active and vREF off; avoid ads7846_probe()
|
H A D | lpc32xx_ts.c | 104 * than 4 samples, but its ok to pop 4 and let the last sample lpc32xx_ts_interrupt() 120 /* Data is only valid if pen is still down in last sample */ lpc32xx_ts_interrupt() 122 /* Use average of 2nd and 3rd sample for position */ lpc32xx_ts_interrupt() 153 /* Set the TSC FIFO depth to 4 samples @ 10-bits per sample (max) */ lpc32xx_setup_tsc() 172 * Set sample rate to about 240Hz per X/Y pair. A single measurement lpc32xx_setup_tsc() 173 * consists of 4 pairs which gives about a 60Hz sample rate based on lpc32xx_setup_tsc()
|
H A D | jornada720_ts.c | 33 int x_data[4]; /* X sample values */ 34 int y_data[4]; /* Y sample values */
|
H A D | ad7877.c | 167 u16 sample ____cacheline_aligned; 230 req->xfer[1].rx_buf = &req->sample; ad7877_read() 237 ret = status ? : req->sample; ad7877_read() 273 int sample; ad7877_read_adc() local 306 req->xfer[3].rx_buf = &req->sample; ad7877_read_adc() 324 sample = req->sample; ad7877_read_adc() 328 return status ? : sample; ad7877_read_adc()
|
/linux-4.4.14/include/uapi/sound/ |
H A D | sfnt_info.h | 56 #define SNDRV_SFNT_PROBE_DATA 8 /* optarg=sample */ 72 unsigned short type; /* sample type */ 77 #define SNDRV_SFNT_PAT_SHARED 0x200 /* sample is shared */ 117 unsigned short sample; /* sample id */ member in struct:soundfont_voice_info 118 int start, end; /* sample offset correction */ 120 short rate_offset; /* sample rate pitch offset */ 121 unsigned short mode; /* sample mode */ 135 unsigned char amplitude; /* sample volume (127 max) */ 139 unsigned short sample_mode; /* sample mode_flag (set by driver) */ 156 * sample wave information 159 /* wave table sample header: 32 bytes */ 162 unsigned short sample; /* sample id */ member in struct:soundfont_sample_info
|
H A D | sb16_csp.h | 36 /* CSP sample width */ 79 int sample_width; /* sample width, look above */ 89 unsigned short acc_width; /* accepted sample width */ 90 unsigned short acc_rates; /* accepted sample rates */ 93 unsigned short run_width; /* current sample width */
|
/linux-4.4.14/drivers/misc/echo/ |
H A D | oslec.h | 76 * oslec_update: Process a sample through a voice echo canceller. 78 * @tx: The transmitted audio sample. 79 * @rx: The received audio sample. 81 * The return value is the clean (echo cancelled) received sample. 88 * @tx: The transmitted auio sample. 90 * The return value is the HP filtered transmit sample, send this to your D/A.
|
H A D | fir.h | 41 history sample offsets that are 16 bit aligned - the dual MAC needs 146 static inline int16_t fir16(struct fir16_state_t *fir, int16_t sample) fir16() argument 150 fir->history[fir->curr_pos] = sample; fir16() 151 fir->history[fir->curr_pos + fir->taps] = sample; fir16() 159 fir->history[fir->curr_pos] = sample; fir16() 195 static inline int16_t fir32(struct fir32_state_t *fir, int16_t sample) fir32() argument 202 fir->history[fir->curr_pos] = sample; fir32()
|
H A D | echo.h | 106 energy is that we must do this very quickly. Given a reasonably long sample of 116 The echo cancellor processes both the transmit and receive streams sample by 117 sample. The processing function is not declared inline. Unfortunately, 118 cancellation requires many operations per sample, so the call overhead is only 183 /* snapshot sample of coeffs used for development */
|
/linux-4.4.14/drivers/gpu/drm/bridge/ |
H A D | dw_hdmi-ahb-audio.c | 168 u32 b, sample = *src++; dw_hdmi_reformat_iec958() local 170 b = (sample & 8) << (28 - 3); dw_hdmi_reformat_iec958() 172 sample >>= 4; dw_hdmi_reformat_iec958() 174 *dst++ = sample | b; dw_hdmi_reformat_iec958() 178 static u32 parity(u32 sample) parity() argument 180 sample ^= sample >> 16; parity() 181 sample ^= sample >> 8; parity() 182 sample ^= sample >> 4; parity() 183 sample ^= sample >> 2; parity() 184 sample ^= sample >> 1; parity() 185 return (sample & 1) << 27; parity() 205 u32 sample = *src++; dw_hdmi_reformat_s24() local 207 sample &= ~0xff000000; dw_hdmi_reformat_s24() 208 sample |= *cs++ << 24; dw_hdmi_reformat_s24() 209 sample |= parity(sample & ~0xf8000000); dw_hdmi_reformat_s24() 211 *dst++ = sample; dw_hdmi_reformat_s24()
|
/linux-4.4.14/samples/rpmsg/ |
H A D | rpmsg_client_sample.c | 2 * Remote processor messaging - sample client driver 69 dev_info(&rpdev->dev, "rpmsg sample client driver is removed\n"); rpmsg_sample_remove() 73 { .name = "rpmsg-client-sample" }, 99 MODULE_DESCRIPTION("Remote processor messaging sample client driver");
|
/linux-4.4.14/drivers/staging/iio/accel/ |
H A D | adis16203.h | 15 #define ADIS16203_ALM_SMPL1 0x24 /* Alarm 1, sample period */ 16 #define ADIS16203_ALM_SMPL2 0x26 /* Alarm 2, sample period */ 21 #define ADIS16203_SMPL_PRD 0x36 /* Internal sample period (rate) control */
|
H A D | adis16201.h | 24 #define ADIS16201_ALM_SMPL1 0x24 /* Alarm 1, sample period */ 25 #define ADIS16201_ALM_SMPL2 0x26 /* Alarm 2, sample period */ 30 #define ADIS16201_SMPL_PRD 0x36 /* Internal sample period (rate) control */
|
H A D | adis16209.h | 38 /* Alarm 1, sample period */ 40 /* Alarm 2, sample period */ 50 /* Internal sample period (rate) control */
|
H A D | adis16220.h | 85 /* AIN2 sample > ALM_MAG2 */ 87 /* AIN1 sample > ALM_MAG1 */ 89 /* Acceleration sample > ALM_MAGA */
|
H A D | adis16204.h | 29 #define ADIS16204_SMPL_PRD 0x36 /* Internal sample period (rate) control */
|
/linux-4.4.14/drivers/media/rc/ |
H A D | ene_ir.h | 32 #define ENE_FW_SAMPLE_BUFFER 0xF8F0 /* sample buffer */ 33 #define ENE_FW_SAMPLE_SPACE 0x80 /* sample is space */ 64 #define ENE_FW_SMPL_BUF_FAN_PLS 0x8000 /* combined sample is pulse */ 65 #define ENE_FW_SMPL_BUF_FAN_MSK 0x0FFF /* combined sample maximum value */ 66 #define ENE_FW_SAMPLE_PERIOD_FAN 61 /* fan input has fixed sample period */ 126 /* RLC configuration - sample period (1us resulution) + idle mode */ 134 #define ENE_CIRRLC_OUT_PULSE 0x80 /* Transmitted sample is pulse */ 220 int r_pointer; /* pointer to next sample to read */ 221 int w_pointer; /* pointer to next sample hw will write */ 225 unsigned int tx_sample; /* current sample for TX */ 226 bool tx_sample_pulse; /* current sample is pulse */ 233 /* one more sample pending*/
|
H A D | ir-lirc-codec.c | 36 int sample; ir_lirc_decode() local 48 sample = LIRC_SPACE(LIRC_VALUE_MASK); ir_lirc_decode() 53 sample = LIRC_FREQUENCY(ev.carrier); ir_lirc_decode() 54 IR_dprintk(2, "carrier report (freq: %d)\n", sample); ir_lirc_decode() 69 sample = LIRC_TIMEOUT(ev.duration / 1000); ir_lirc_decode() 70 IR_dprintk(2, "timeout report (duration: %d)\n", sample); ir_lirc_decode() 72 /* Normal sample */ ir_lirc_decode() 92 sample = ev.pulse ? LIRC_PULSE(ev.duration / 1000) : ir_lirc_decode() 99 (unsigned char *) &sample); ir_lirc_decode()
|
H A D | nuvoton-cir.h | 8 * sample code upon which portions of this driver are based. Indirect 182 /* select sample period as 50us */ 266 /* select a same sample period like cir register */ 378 /* MCE CIR signal length, related on sample period */ 381 * 43ms / 50us (sample period) * 0.85 (inaccuracy) 386 * 26ms / 50us (sample period) * 0.85 (inaccuracy) 392 * 24ms / 50us (sample period) * 0.85 (inaccuracy)
|
H A D | fintek-cir.c | 295 u8 sample; fintek_process_rx_ir_data() local 300 sample = fintek->buf[i]; fintek_process_rx_ir_data() 303 fintek->cmd = sample; fintek_process_rx_ir_data() 318 fintek->rem = fintek_cmdsize(fintek->cmd, sample); fintek_process_rx_ir_data() 327 rawir.pulse = ((sample & BUF_PULSE_BIT) != 0); fintek_process_rx_ir_data() 328 rawir.duration = US_TO_NS((sample & BUF_SAMPLE_MASK) fintek_process_rx_ir_data() 356 u8 sample, status; fintek_get_rx_ir_data() local 366 sample = fintek_cir_reg_read(fintek, CIR_RX_DATA); fintek_get_rx_ir_data() 367 fit_dbg("%s: sample: 0x%02x", __func__, sample); fintek_get_rx_ir_data() 369 fintek->buf[fintek->pkts] = sample; fintek_get_rx_ir_data()
|
H A D | ene_ir.c | 192 /* Read properities of hw sample buffer */ ene_rx_setup_hw_buffer() 284 /* Gets address of next sample from HW ring buffer */ ene_rx_get_sample_reg() 406 /* set sample period*/ ene_rx_setup() 453 input with a maximum sample */ ene_rx_setup() 622 /* TX one sample - must be called with dev->hw_lock*/ ene_tx_sample() 626 u32 sample; ene_tx_sample() local 634 /* Grab next TX sample */ ene_tx_sample() 643 dbg("TX: last sample sent by hardware"); ene_tx_sample() 650 sample = dev->tx_buffer[dev->tx_pos++]; ene_tx_sample() 653 dev->tx_sample = DIV_ROUND_CLOSEST(sample, sample_period); ene_tx_sample() 662 dbg("TX: sample %8d (%s)", raw_tx * sample_period, ene_tx_sample() 780 dbg_verbose("next sample to read at: %04x", reg); ene_isr() 790 /* read high part of the sample */ ene_isr() 1042 /* don't allow too short/long sample periods */ ene_probe() 1199 MODULE_PARM_DESC(sample_period, "Hardware sample period (50 us default)");
|
/linux-4.4.14/sound/core/oss/ |
H A D | mulaw.c | 157 unsigned char *dst, u16 sample) cvt_s16_to_native() 159 sample ^= data->flip; cvt_s16_to_native() 161 sample = swab16(sample); cvt_s16_to_native() 164 memcpy(dst + data->native_ofs, (char *)&sample + data->copy_ofs, cvt_s16_to_native() 194 signed short sample = ulaw2linear(*src); mulaw_decode() local 195 cvt_s16_to_native(data, dst, sample); mulaw_decode() 205 u16 sample = 0; cvt_native_to_s16() local 206 memcpy((char *)&sample + data->copy_ofs, src + data->native_ofs, cvt_native_to_s16() 209 sample = swab16(sample); cvt_native_to_s16() 210 sample ^= data->flip; cvt_native_to_s16() 211 return (signed short)sample; cvt_native_to_s16() 240 signed short sample = cvt_native_to_s16(data, src); mulaw_encode() local 241 *dst = linear2ulaw(sample); mulaw_encode() 156 cvt_s16_to_native(struct mulaw_priv *data, unsigned char *dst, u16 sample) cvt_s16_to_native() argument
|
H A D | pcm_plugin.h | 36 unsigned int first; /* offset to first sample in bits */ 59 int src_width; /* sample width in bits */ 60 int dst_width; /* sample width in bits */
|
/linux-4.4.14/include/linux/ |
H A D | wm97xx.h | 58 #define WM97XX_DELAY(i) ((i << 4) & 0x00f0) /* sample delay times */ 72 #define WM9712_WAIT 0x0200 /* wait until adc is read before next sample */ 75 #define WM9712_MASK_EDGE 0x0080 /* rising/falling edge on pin delays sample */ 76 #define WM9712_MASK_SYNC 0x00c0 /* rising/falling edge on mask initiates sample */ 89 #define WM9705_WAIT 0x0100 /* wait until adc is read before next sample */ 93 #define WM9705_MASK_EDGE 0x0020 /* rising/falling edge on pin delays sample */ 94 #define WM9705_MASK_SYNC 0x0030 /* rising/falling edge on mask initiates sample */ 145 /*---------------- Return codes from sample reading functions ---------------*/ 147 /* More data is available; call the sample gathering function again */ 149 /* The returned sample is valid */ 232 /* read 1 sample */ 233 int (*poll_sample) (struct wm97xx *, int adcsel, int *sample); 262 /* pre and post sample - can be used to minimise any analog noise */
|
H A D | oprofile.h | 94 * Add a sample. This may be called from any context. 99 * Add an extended sample. Use this when the PC is not from the regs, and 109 * Add an hardware sample. 184 struct op_sample *sample; member in struct:op_entry
|
/linux-4.4.14/tools/perf/util/scripting-engines/ |
H A D | trace-event-perl.c | 248 static void perl_process_tracepoint(struct perf_sample *sample, perl_process_tracepoint() argument 258 int cpu = sample->cpu; perl_process_tracepoint() 259 void *data = sample->raw_data; perl_process_tracepoint() 260 unsigned long long nsecs = sample->time; perl_process_tracepoint() 338 struct perf_sample *sample, perl_process_event_generic() 351 XPUSHs(sv_2mortal(newSVpvn((const char *)sample, sizeof(*sample)))); perl_process_event_generic() 352 XPUSHs(sv_2mortal(newSVpvn((const char *)sample->raw_data, sample->raw_size))); perl_process_event_generic() 362 struct perf_sample *sample, perl_process_event() 366 perl_process_tracepoint(sample, evsel, al->thread); perl_process_event() 367 perl_process_event_generic(event, sample, evsel); perl_process_event() 603 "# $sample:\tstruct perf_sample\tutil/event.h\n" perl_generate_script() 608 "\tmy ($event, $attr, $sample, $raw_data) = @_;\n" perl_generate_script() 612 "\tmy @sample\t= unpack(\"QLLQQQQQLL\", $sample);\n" perl_generate_script() 616 "\tprint Dumper \\@event, \\@attr, \\@sample, \\@raw_data;\n" perl_generate_script() 337 perl_process_event_generic(union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel) perl_process_event_generic() argument 361 perl_process_event(union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, struct addr_location *al) perl_process_event() argument
|
H A D | trace-event-python.c | 307 static PyObject *python_process_callchain(struct perf_sample *sample, python_process_callchain() argument 317 if (!symbol_conf.use_callchain || !sample->callchain) python_process_callchain() 321 sample, NULL, NULL, python_process_callchain() 383 static void python_process_tracepoint(struct perf_sample *sample, python_process_tracepoint() argument 395 int cpu = sample->cpu; python_process_tracepoint() 396 void *data = sample->raw_data; python_process_tracepoint() 397 unsigned long long nsecs = sample->time; python_process_tracepoint() 432 callchain = python_process_callchain(sample, evsel, al); python_process_tracepoint() 688 tuple_set_u64(t, 8, es->sample->ip); python_export_sample() 689 tuple_set_u64(t, 9, es->sample->time); python_export_sample() 690 tuple_set_s32(t, 10, es->sample->cpu); python_export_sample() 694 tuple_set_u64(t, 14, es->sample->addr); python_export_sample() 695 tuple_set_u64(t, 15, es->sample->period); python_export_sample() 696 tuple_set_u64(t, 16, es->sample->weight); python_export_sample() 697 tuple_set_u64(t, 17, es->sample->transaction); python_export_sample() 698 tuple_set_u64(t, 18, es->sample->data_src); python_export_sample() 699 tuple_set_s32(t, 19, es->sample->flags & PERF_BRANCH_MASK); python_export_sample() 700 tuple_set_s32(t, 20, !!(es->sample->flags & PERF_IP_FLAG_IN_TX)); python_export_sample() 767 static void python_process_general_event(struct perf_sample *sample, python_process_general_event() argument 802 PyInt_FromLong(sample->pid)); python_process_general_event() 804 PyInt_FromLong(sample->tid)); python_process_general_event() 806 PyInt_FromLong(sample->cpu)); python_process_general_event() 808 PyLong_FromUnsignedLongLong(sample->ip)); python_process_general_event() 810 PyLong_FromUnsignedLongLong(sample->time)); python_process_general_event() 812 PyLong_FromUnsignedLongLong(sample->period)); python_process_general_event() 813 pydict_set_item_string_decref(dict, "sample", dict_sample); python_process_general_event() 816 (const char *)sample->raw_data, sample->raw_size)); python_process_general_event() 829 callchain = python_process_callchain(sample, evsel, al); python_process_general_event() 843 struct perf_sample *sample, python_process_event() 851 python_process_tracepoint(sample, evsel, al); python_process_event() 856 db_export__sample(&tables->dbe, event, sample, evsel, al); python_process_event() 858 python_process_general_event(sample, evsel, al); python_process_event() 946 SET_TABLE_HANDLER(sample); set_table_handlers() 842 python_process_event(union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, struct addr_location *al) python_process_event() argument
|
/linux-4.4.14/include/sound/ |
H A D | soundfont.h | 40 struct snd_sf_sample *sample; /* Link to sample */ member in struct:snd_sf_zone 64 struct snd_sf_sample *samples; /* The sample headers */ 68 * Type of the sample access callback 91 int sample_counter; /* last allocated time for sample */ 93 int sample_locked; /* locked time for sample */
|
H A D | sb16_csp.h | 65 int acc_width; /* accepted sample width */ 66 int acc_rates; /* accepted sample rates */ 69 int run_width; /* current sample width */
|
H A D | pcm_params.h | 314 * params_format - get the sample format from the hw params 324 * params_subformat - get the sample subformat from the hw params 345 * params_width - get the number of bits of the sample format from the hw params 348 * This function returns the number of bits per sample that the selected sample 357 * params_physical_width - get the storage size of the sample format from the hw params 360 * This functions returns the number of bits per sample that the selected sample
|
H A D | wm8996.h | 22 * ReTune Mobile configurations are specified with a label, sample
|
H A D | asoundef.h | 39 #define IEC958_AES0_PRO_FREQ_UNLOCKED (1<<5) /* source sample frequency: 0 = locked, 1 = unlocked */ 40 #define IEC958_AES0_PRO_FS (3<<6) /* mask - sample frequency */ 114 #define IEC958_AES2_PRO_SBITS (7<<0) /* mask - sample bits */ 128 #define IEC958_AES3_CON_FS (15<<0) /* mask - sample frequency */ 145 #define IEC958_AES4_CON_WORDLEN (7<<1) /* mask - sample word length */ 152 #define IEC958_AES4_CON_ORIGFS (15<<4) /* mask - original sample frequency */ 195 #define CEA861_AUDIO_INFOFRAME_DB2SF (7<<2) /* mask - sample frequency */ 204 #define CEA861_AUDIO_INFOFRAME_DB2SS (3<<0) /* mask - sample size */
|
H A D | designware_i2s.h | 29 * @data_width: number of bits per sample (8/16/24/32 bit)
|
H A D | wavefront.h | 370 as a sample type. 397 Because most/all of the sample data we pass in via pointers has 399 disk), it would be nice to allow handling of multi-channel sample 410 0 no channel selection (use channel 1, sample is MONO) 420 of sample data just to select one of them needs to find some tools 540 u16 number; /* patch/sample/prog number */ 550 So, a stereo sample (2 channels) of 558 u16 __user *dataptr; /* actual sample data */ 565 actual sample data.
|
H A D | emu10k1.h | 150 #define INTE_SAMPLERATETRACKER 0x00002000 /* Enable sample rate tracker interrupts */ 170 /* NOTE: Each channel takes 1/64th of a sample */ 200 #define HCFG_AUTOMUTE_ASYNC 0x00008000 /* When set, the async sample rate convertors */ 204 #define HCFG_AUTOMUTE_SPDIF 0x00004000 /* When set, the async sample rate convertors */ 213 #define HCFG_I2S_ASRC_ENABLE 0x00000070 /* When set, enables asynchronous sample rate */ 232 /* the same async sample rate tracker (ZVIDEO) */ 237 #define HCFG_AUTOMUTE 0x00000010 /* When set, the async sample rate convertors */ 297 /* of 1024 sample periods should be allowed */ 299 #define TIMER_RATE_MASK 0x000003ff /* Timer interrupt rate in sample periods */ 449 #define CCR_LOOPFLAG 0x00000100 /* Set for a single sample period when a loop occurs */ 592 #define ADCCR 0x42 /* ADC sample rate/stereo control register */ 600 #define A_ADCCR_SAMPLERATE_MASK 0x0000000F /* Audigy sample rate convertor output rate */ 602 #define ADCCR_SAMPLERATE_48 0x00000000 /* 48kHz sample rate */ 603 #define ADCCR_SAMPLERATE_44 0x00000001 /* 44.1kHz sample rate */ 604 #define ADCCR_SAMPLERATE_32 0x00000002 /* 32kHz sample rate */ 605 #define ADCCR_SAMPLERATE_24 0x00000003 /* 24kHz sample rate */ 606 #define ADCCR_SAMPLERATE_22 0x00000004 /* 22.05kHz sample rate */ 607 #define ADCCR_SAMPLERATE_16 0x00000005 /* 16kHz sample rate */ 608 #define ADCCR_SAMPLERATE_11 0x00000006 /* 11.025kHz sample rate */ 609 #define ADCCR_SAMPLERATE_8 0x00000007 /* 8kHz sample rate */ 610 #define A_ADCCR_SAMPLERATE_12 0x00000006 /* 12kHz sample rate */ 611 #define A_ADCCR_SAMPLERATE_11 0x00000007 /* 11.025kHz sample rate */ 612 #define A_ADCCR_SAMPLERATE_8 0x00000008 /* 8kHz sample rate */ 744 #define SPCS_SAMPLERATE_44 0x00000000 /* 44.1kHz sample rate */ 745 #define SPCS_SAMPLERATE_48 0x02000000 /* 48kHz sample rate */ 746 #define SPCS_SAMPLERATE_32 0x03000000 /* 32kHz sample rate */ 796 #define GPSRCS 0x61 /* General Purpose SPDIF sample rate cvt status */ 798 #define ZVSRCS 0x62 /* ZVideo sample rate converter status */ 800 /* Assumes sample lock */ 870 #define A_SPDIF_SAMPLERATE 0x76 /* Set the sample rate of SPDIF output */ 871 #define A_SAMPLE_RATE 0x76 /* Various sample rate settings. */
|
/linux-4.4.14/drivers/cpufreq/ |
H A D | intel_pstate.c | 67 struct sample { struct 115 struct sample sample; member in struct:cpudata 866 struct sample *sample = &cpu->sample; intel_pstate_calc_busy() local 869 core_pct = int_tofp(sample->aperf) * int_tofp(100); intel_pstate_calc_busy() 870 core_pct = div64_u64(core_pct, int_tofp(sample->mperf)); intel_pstate_calc_busy() 872 sample->freq = fp_toint( intel_pstate_calc_busy() 878 sample->core_pct_busy = (int32_t)core_pct; intel_pstate_calc_busy() 898 cpu->last_sample_time = cpu->sample.time; intel_pstate_sample() 899 cpu->sample.time = ktime_get(); intel_pstate_sample() 900 cpu->sample.aperf = aperf; intel_pstate_sample() 901 cpu->sample.mperf = mperf; intel_pstate_sample() 902 cpu->sample.tsc = tsc; intel_pstate_sample() 903 cpu->sample.aperf -= cpu->prev_aperf; intel_pstate_sample() 904 cpu->sample.mperf -= cpu->prev_mperf; intel_pstate_sample() 905 cpu->sample.tsc -= cpu->prev_tsc; intel_pstate_sample() 940 * the last sample period. intel_pstate_get_scaled_busy() 943 * performance to what we requested during the last sample intel_pstate_get_scaled_busy() 947 core_busy = cpu->sample.core_pct_busy; intel_pstate_get_scaled_busy() 955 * is significantly greater (3x) than our sample interval. If it intel_pstate_get_scaled_busy() 960 duration_us = ktime_us_delta(cpu->sample.time, intel_pstate_get_scaled_busy() 977 struct sample *sample; intel_pstate_adjust_busy_pstate() local 989 sample = &cpu->sample; intel_pstate_adjust_busy_pstate() 990 trace_pstate_sample(fp_toint(sample->core_pct_busy), intel_pstate_adjust_busy_pstate() 994 sample->mperf, intel_pstate_adjust_busy_pstate() 995 sample->aperf, intel_pstate_adjust_busy_pstate() 996 sample->tsc, intel_pstate_adjust_busy_pstate() 997 sample->freq); intel_pstate_adjust_busy_pstate() 1090 struct sample *sample; intel_pstate_get() local 1096 sample = &cpu->sample; intel_pstate_get() 1097 return sample->freq; intel_pstate_get()
|
/linux-4.4.14/tools/testing/selftests/powerpc/pmu/ebb/ |
H A D | lost_exception_test.c | 44 * We want a low sample period, but we also want to get out of the EBB test_body() 65 /* Change the sample period slightly to try and hit the race */ test_body() 88 /* We vary our sample period so we need extra fudge here */ test_body()
|
H A D | back_to_back_ebbs_test.c | 19 * We do this by counting with a stupidly low sample period, causing us to
|
/linux-4.4.14/arch/s390/include/asm/ |
H A D | perf_event.h | 43 /* Perf pt_regs extension for sample-data-entry indicators */ 45 unsigned char in_guest:1; /* guest sample */ 73 /* Structure for sampling data entries to be passed as perf raw sample data 74 * to user space. Note that raw sample data must be aligned and, thus, might
|
H A D | cpu_mf.h | 22 #define CPU_MF_INT_SF_LSDA (1 << 22) /* loss of sample data alert */ 118 u8 data[]; /* Machine-dependent sample data */ 136 unsigned long long overflow; /* 64 - sample Overflow count */ 268 /* Return pointer to trailer entry of an sample data block */ trailer_entry_ptr() 280 /* Return if the entry in the sample data block table (sdbt)
|
/linux-4.4.14/arch/arm/mach-sa1100/include/mach/ |
H A D | mtd-xip.h | 22 /* we sample OSCR and convert desired delta to usec (1/4 ~= 1000000/3686400) */
|
H A D | h3xxx.h | 53 #define H3600_GPIO_CLK_SET0 12 /* audio sample rate clock generator */
|
/linux-4.4.14/sound/usb/ |
H A D | format.c | 89 usb_audio_info(chip, "%u:%d : sample bitwidth %d in over sample bytes %d\n", parse_audio_format_i_type() 115 "%u:%d : unsupported sample bitwidth %d in %d bytes\n", parse_audio_format_i_type() 151 * parse the format descriptor and stores the possible sample rates 226 * Helper function to walk the array of sample rate triplets reported by 228 * get to know how many sample rates we have to expect. 249 * for ranges with res == 1, we announce a continuous sample parse_uac2_sample_rate_range() 285 * parse the format descriptor and stores the possible sample rates 303 /* get the number of sample rates first by only fetching 2 bytes */ parse_audio_format_rates_v2() 312 "%s(): unable to retrieve number of sample rates (clock %d)\n", parse_audio_format_rates_v2() 334 "%s(): unable to retrieve sample rate range (clock %d)\n", parse_audio_format_rates_v2() 341 * We just use the return value to know how many sample rates we parse_audio_format_rates_v2() 403 /* gather possible sample rates */ parse_audio_format_i() 404 /* audio class v1 reports possible sample rates as part of the parse_audio_format_i() 466 ret = parse_audio_format_rates_v1(chip, fp, _fmt, 8); /* fmt[8..] sample rates */ parse_audio_format_ii() 510 /* extigy apparently supports sample rates other than 48k snd_usb_parse_audio_format()
|
H A D | clock.c | 2 * Clock domain and sample rate management functions 261 * For all kinds of sample rate settings and other device queries, 263 * clock multipliers and sample rate converters may be specified as 308 /* Don't check the sample rate for devices which we know don't set_sample_rate_v1() 404 /* Some devices doesn't respond to sample rate changes while the set_sample_rate_v2()
|
/linux-4.4.14/tools/perf/arch/arm/util/ |
H A D | unwind-libdw.c | 8 struct regs_dump *user_regs = &ui->sample->user_regs; libdw__arch_set_initial_registers()
|
/linux-4.4.14/samples/livepatch/ |
H A D | livepatch-sample.c | 2 * livepatch-sample.c - Kernel Live Patching Sample Module 33 * $ insmod livepatch-sample.ko
|
/linux-4.4.14/arch/m68k/ifpsp060/ |
H A D | fskeleton.S | 61 | simply execute an "rte" as with the sample code below. 75 | The sample routine below simply clears the exception status bit and 94 | The sample routine below simply clears the exception status bit and 113 | The sample routine below simply clears the exception status bit and 132 | The sample routine below simply clears the exception status bit and 151 | The sample routine below simply clears the exception status bit and 170 | The sample routine below simply clears the exception status bit and 188 | The sample routine below clears the exception status bit, clears the NaN 226 | The sample code below enables the FPU, sets the PC field in the exception stack 249 | The sample code below simply executes an "rte".
|
/linux-4.4.14/sound/synth/emux/ |
H A D | soundfont.c | 125 * the actual driver to write sample data to the board or whatever 332 /* probe sample in the current list -- nothing to be loaded */ 338 /* search the specified sample by optarg */ probe_data() 378 * increment sample counter 390 * allocate a new sample list record 408 * delete sample list -- this is an exceptional job. 409 * only the last allocated sample can be deleted. 415 /* only last sample is accepted */ sf_sample_delete() 604 /* look up the sample */ load_info() 605 zone->sample = set_sample(sf, &zone->v); load_info() 658 /* search the specified sample */ 662 struct snd_sf_sample *sample; set_sample() local 664 sample = find_sample(sf, avp->sample); set_sample() 665 if (sample == NULL) set_sample() 668 /* add in the actual sample offsets: set_sample() 670 * from sample pointers. Here we calculate the actual DRAM set_sample() 671 * offset from sample pointers. set_sample() 673 avp->start += sample->v.start; set_sample() 674 avp->end += sample->v.end; set_sample() 675 avp->loopstart += sample->v.loopstart; set_sample() 676 avp->loopend += sample->v.loopend; set_sample() 679 avp->sample_mode = sample->v.mode_flags; set_sample() 681 return sample; set_sample() 684 /* find the sample pointer with the given id in the soundfont */ 694 if (p->v.sample == sample_id) find_sample() 702 * Load sample information, this can include data to be loaded onto 731 if (find_sample(sf, sample_info.sample)) { load_data() 732 /* if shared sample, skip this data */ load_data() 738 /* Allocate a new sample structure */ load_data() 839 * sample pitch offset for the specified sample rate 975 smp->v.sample = sample_id; load_guspatch() 1033 zone->v.sample = sample_id; /* the last sample */ load_guspatch() 1117 zone->sample = set_sample(sf, &zone->v); load_guspatch() 1157 if (! cur->mapped && cur->sample == NULL) { rebuild_presets() 1158 /* try again to search the corresponding sample */ rebuild_presets() 1159 cur->sample = set_sample(sf, &cur->v); rebuild_presets() 1160 if (cur->sample == NULL) rebuild_presets()
|
/linux-4.4.14/drivers/mtd/nand/gpmi-nand/ |
H A D | gpmi-lib.c | 393 * The clock's period affects the sample delay in a number of ways: gpmi_nfc_compute_hardware_timing() 395 * (1) The NFC HAL tells us the maximum clock period the sample delay gpmi_nfc_compute_hardware_timing() 399 * (2) We need to convert from an ideal sample delay, in ns, to a gpmi_nfc_compute_hardware_timing() 400 * "sample delay factor," which the NFC uses. This factor depends on gpmi_nfc_compute_hardware_timing() 409 * SDF is the sample delay factor, which is dimensionless. gpmi_nfc_compute_hardware_timing() 459 * Compute the maximum sample delay the NFC allows, under current gpmi_nfc_compute_hardware_timing() 460 * conditions. If the clock is running too slowly, no sample delay is gpmi_nfc_compute_hardware_timing() 467 * Compute the delay implied by the largest sample delay factor gpmi_nfc_compute_hardware_timing() 475 * Check if the implied sample delay larger than the NFC gpmi_nfc_compute_hardware_timing() 489 * sample delay. gpmi_nfc_compute_hardware_timing() 495 * The ideal sample delay may be greater than the maximum gpmi_nfc_compute_hardware_timing() 496 * allowed by the NFC. If so, we can trade off sample delay time gpmi_nfc_compute_hardware_timing() 501 * the sample delay until we've satisified the constraints or gpmi_nfc_compute_hardware_timing() 516 * Compute the sample delay factor that corresponds most closely gpmi_nfc_compute_hardware_timing() 517 * to the ideal sample delay. If the result is too large for the gpmi_nfc_compute_hardware_timing() 521 * sample delay factor. We do this because the form of the gpmi_nfc_compute_hardware_timing() 586 * When control arrives here, the eye is open. The ideal time to sample gpmi_nfc_compute_hardware_timing() 624 * (1) The *ideal* sample delay time must not be negative. If it is, we gpmi_nfc_compute_hardware_timing() 627 * (2) The *ideal* sample delay time must not be greater than that gpmi_nfc_compute_hardware_timing() 633 * (3) The *quantized* sample delay time must not fall either before the gpmi_nfc_compute_hardware_timing() 638 /* Jam a negative ideal sample delay to zero. */ gpmi_nfc_compute_hardware_timing() 643 * Extend the data setup as needed to reduce the ideal sample delay gpmi_nfc_compute_hardware_timing() 657 * Decrease the ideal sample delay by one half cycle, to keep it gpmi_nfc_compute_hardware_timing() 662 /* Jam a negative ideal sample delay to zero. */ gpmi_nfc_compute_hardware_timing() 668 * Compute the sample delay factor that corresponds to the ideal sample gpmi_nfc_compute_hardware_timing() 672 * Notice that we use the ns_to_cycles function to compute the sample gpmi_nfc_compute_hardware_timing() 685 * continuously evaluate whether or not the data sample delay is inside gpmi_nfc_compute_hardware_timing() 699 * While the quantized sample time falls outside the eye, reduce the gpmi_nfc_compute_hardware_timing() 700 * sample delay or extend the data setup to move the sampling point back gpmi_nfc_compute_hardware_timing() 707 * If control arrives here, the quantized sample delay falls gpmi_nfc_compute_hardware_timing() 713 * If control arrives here, the quantized sample delay gpmi_nfc_compute_hardware_timing() 723 * If control arrives here, the quantized sample delay falls gpmi_nfc_compute_hardware_timing() 724 * before the eye opens. Shift the sample point by increasing gpmi_nfc_compute_hardware_timing() 736 * Decrease the ideal sample delay by one half cycle, to keep it gpmi_nfc_compute_hardware_timing() 744 /* Jam a negative ideal sample delay to zero. */ gpmi_nfc_compute_hardware_timing() 749 * We have a new ideal sample delay, so re-compute the quantized gpmi_nfc_compute_hardware_timing() 794 * The GPMI implements a feedback read strobe to sample the read data. 1043 /* If no sample delay is called for, return immediately. */ gpmi_begin()
|
/linux-4.4.14/sound/pci/ |
H A D | ad1889.h | 36 #define AD_DS_WAS 0x08 /* wave channel sample rate */ 37 #define AD_DS_WAS_WAS 0xffff /* sample rate mask */ 39 #define AD_DS_RES 0x0a /* resampler channel sample rate */ 40 #define AD_DS_RES_RES 0xffff /* sample rate mask */ 152 #define AD_AC97_SR0 0x178 /* sample rate 0, 0xbb80 == 48K */ 154 #define AD_AC97_SR1 0x17a /* sample rate 1 */ 160 #define AD_AC97_ACIC_VSRM 0x0008 /* variable sample rate mode */
|
/linux-4.4.14/sound/pci/emu10k1/ |
H A D | emu10k1_patch.c | 35 * allocate a sample block and copy data from userspace 54 "emu: rom font for sample %d\n", sp->v.sample); snd_emu10k1_sample_new() 156 /* add sample pointer */ snd_emu10k1_sample_new() 161 /* loopend -> sample end */ snd_emu10k1_sample_new() 179 /* if no blank loop is attached in the sample, add it */ snd_emu10k1_sample_new() 213 * free a sample block
|
H A D | p16v.h | 50 * Find out how to change capture sample rates. E.g. To record SPDIF at 48000Hz. 85 /* The sample rate of the SPDIF outputs is set by modifying a register in the EMU10K2 PTR register A_SPDIF_SAMPLERATE. 86 * The sample rate is also controlled by the same registers that control the rate of the EMU10K2 sample rate converters. 162 #define CAPTURE_RATE_STATUS 0x17 /* Capture sample rate. Read only */ 164 * [18:16] Channel 0 Detected sample rate. 0 - 44.1khz 170 * [22:20] Channel 1 Detected sample rate. 172 * [26:24] Channel 2 Detected sample rate. 174 * [30:28] Channel 3 Detected sample rate. 178 #define PLAYBACK_LAST_SAMPLE 0x20 /* The sample currently being played. Read only */ 220 /* SRC48 and SRCMULTI sample rate select and output select. */
|
H A D | emu10k1_callback.c | 266 /* check if sample is finished playing (non-looping only) */ lookup_voices() 424 unsigned int val, sample; start_voice() 427 sample = 0x80808080; start_voice() 429 sample = 0; start_voice() 435 snd_emu10k1_ptr_write(hw, CDE, ch, sample); start_voice() 436 snd_emu10k1_ptr_write(hw, CDF, ch, sample); start_voice()
|
H A D | timer.c | 71 .resolution = 20833, /* 1 sample @ 48KHZ = 20.833...us */
|
/linux-4.4.14/drivers/macintosh/ |
H A D | windfarm_pid.h | 12 * for CPU control with 2 input sample types (temp and power) 37 int index; /* index of current sample */ 46 extern s32 wf_pid_run(struct wf_pid_state *st, s32 sample);
|
H A D | rack-meter.c | 39 /* Number of samples in a sample buffer */ 315 u32 sample = 0; rackmeter_calc_sample() local 318 sample >>= 1; rackmeter_calc_sample() 319 sample |= ((rm->ubuf[led] >= 0x80) << 15); rackmeter_calc_sample() 321 return (sample << 17) | (sample >> 15); rackmeter_calc_sample() 357 /* Fill it now. This routine converts the 8 bits depth sample array rackmeter_irq()
|
/linux-4.4.14/net/dccp/ccids/lib/ |
H A D | packet_history.c | 402 * to compute a sample with given data - calling function should check this. 406 u32 sample = 0, tfrc_rx_hist_sample_rtt() local 412 sample = SUB16(tfrc_rx_hist_rtt_prev_s(h)->tfrchrx_ccval, tfrc_rx_hist_sample_rtt() 414 if (sample) tfrc_rx_hist_sample_rtt() 415 sample = 4 / sample * tfrc_rx_hist_sample_rtt() 434 sample = ktime_to_us(net_timedelta(tfrc_rx_hist_rtt_last_s(h)->tfrchrx_tstamp)); tfrc_rx_hist_sample_rtt() 440 if (unlikely(sample > DCCP_SANE_RTT_MAX)) { tfrc_rx_hist_sample_rtt() 441 DCCP_WARN("RTT sample %u too large, using max\n", sample); tfrc_rx_hist_sample_rtt() 442 sample = DCCP_SANE_RTT_MAX; tfrc_rx_hist_sample_rtt() 448 return sample; tfrc_rx_hist_sample_rtt()
|
/linux-4.4.14/sound/isa/wavefront/ |
H A D | wavefront_synth.c | 16 * including patch, sample and program loading and unloading, conversion 19 * sample management as well. 153 { 0x01, "Bad sample number" }, 154 { 0x02, "Out of sample memory" }, 158 { 0x0B, "No sample load request pending" }, 187 { WFC_DOWNLOAD_SAMPLE, "download sample", 190 { WFC_DOWNLOAD_SAMPLE_HEADER, "download sample header", 192 { WFC_UPLOAD_SAMPLE_HEADER, "upload sample header", 13, 2, 0 }, 208 { WFC_DOWNLOAD_SAMPLE_ALIAS, "download sample alias", 210 { WFC_UPLOAD_SAMPLE_ALIAS, "upload sample alias", WF_ALIAS_BYTES, 2, 0}, 211 { WFC_DELETE_SAMPLE, "delete sample", 0, 2, NEEDS_ACK }, 212 { WFC_IDENTIFY_SAMPLE_TYPE, "identify sample type", 5, 2, 0 }, 213 { WFC_UPLOAD_SAMPLE_PARAMS, "upload sample parameters" }, 597 WaveFront: sample, patch and program management. 625 /* check sample status */ wavefront_get_sample_status() 628 snd_printk ("cannot request sample count.\n"); wavefront_get_sample_status() 640 snd_printk(KERN_WARNING "cannot identify sample " wavefront_get_sample_status() 666 snd_printk ("unknown sample type for " wavefront_get_sample_status() 825 /* XXX need to mark SLOT_USED for sample used by wavefront_send_program() 867 and used conventionally to refer to sample sizes, wavefront_send_sample() 886 DPRINT (WF_DEBUG_LOAD_PATCH, "sample %sdownload for slot %d, " wavefront_send_sample() 899 snd_printk ("unspecified sample => %d\n", x); wavefront_send_sample() 906 on the ROM samples should cover just the sample data or wavefront_send_sample() 907 the sample header. For now, it only covers the sample data, wavefront_send_sample() 908 so anyone is free at all times to rewrite sample headers. wavefront_send_sample() 910 My reason for this is that we have the sample headers wavefront_send_sample() 912 can always be reset if needed. The sample data, however, wavefront_send_sample() 925 a copy of the patch/program/sample header data. wavefront_send_sample() 930 snd_printk ("sample slot %d " wavefront_send_sample() 945 "load %d byte sample.\n", wavefront_send_sample() 1002 is always half the size of the sample data in bytes. wavefront_send_sample() 1009 so, build the sample header right here. wavefront_send_sample() 1052 snd_printk ("sample %sdownload refused.\n", wavefront_send_sample() 1094 /* 8 bit sample wavefront_send_sample() 1106 /* 16 bit sample wavefront_send_sample() 1138 snd_printk ("upload sample " wavefront_send_sample() 1142 snd_printk ("upload sample " wavefront_send_sample() 1152 /* Note, label is here because sending the sample header shouldn't wavefront_send_sample() 1221 DPRINT(WF_DEBUG_LOAD_PATCH|WF_DEBUG_DATA, "sample[%d] = %d\n", wavefront_send_multisample() 1277 "during sample loop.\n"); wavefront_fetch_multisample() 1284 "during sample loop.\n"); wavefront_fetch_multisample() 1292 DPRINT (WF_DEBUG_DATA, "msample sample[%d] = %d\n", wavefront_fetch_multisample() 1337 snd_printk ("no free sample slots!\n"); 1384 case WF_ST_SAMPLE: /* sample or sample_header, based on patch->size */ wavefront_load_patch() 1564 snd_printk ("support for sample alias upload " wavefront_synth_control() 1607 "sample aliases still " wavefront_synth_control() 2069 /* SETUPSND.EXE asks for sample memory config here, but since i wavefront_do_reset()
|
/linux-4.4.14/drivers/media/i2c/ |
H A D | tlv320aic23b.c | 79 case 32000: /* set sample rate to 32 kHz */ tlv320aic23b_s_clock_freq() 82 case 44100: /* set sample rate to 44.1 kHz */ tlv320aic23b_s_clock_freq() 85 case 48000: /* set sample rate to 48 kHz */ tlv320aic23b_s_clock_freq() 174 /* set sample rate to 48 kHz */ tlv320aic23b_probe()
|
/linux-4.4.14/drivers/media/usb/msi2500/ |
H A D | msi2500.c | 154 u32 sample; /* for sample rate calc */ member in struct:msi2500_dev 180 * | 00- 03 | sequence number of first sample in that USB packet 186 * signed 8-bit sample 193 * | 00- 03 | sequence number of first sample in that USB packet 228 * for bit shifting sample by given bits, increasing actual sampling resolution. 237 * | 00- 03 | sequence number of first sample in that USB packet 243 * signed 12-bit sample 249 * | 00- 03 | sequence number of first sample in that USB packet 255 * signed 14-bit sample 262 u32 sample[3]; msi2500_convert_stream() local 268 sample[i] = src[3] << 24 | src[2] << 16 | src[1] << 8 | msi2500_convert_stream() 270 if (i == 0 && dev->next_sample != sample[0]) { msi2500_convert_stream() 273 sample[0] - dev->next_sample, msi2500_convert_stream() 275 sample[0]); msi2500_convert_stream() 298 dev->next_sample = sample[i] + 504; msi2500_convert_stream() 320 dev->next_sample = sample[i] + 252; msi2500_convert_stream() 330 dev->next_sample = sample[i] + 384; msi2500_convert_stream() 337 dev->next_sample = sample[i] + 504; msi2500_convert_stream() 344 dev->next_sample = sample[i] + 336; msi2500_convert_stream() 351 dev->next_sample = sample[i] + 252; msi2500_convert_stream() 358 /* calculate sample rate and output it in 10 seconds intervals */ msi2500_convert_stream() 363 unsigned int samples = dev->next_sample - dev->sample; msi2500_convert_stream() 366 dev->sample = dev->next_sample; msi2500_convert_stream() 367 dev_dbg(dev->dev, "size=%u samples=%u msecs=%u sample rate=%lu\n", msi2500_convert_stream()
|
/linux-4.4.14/include/uapi/linux/ |
H A D | sound.h | 16 #define SND_DEV_DSP16 5 /* Like /dev/dsp but 16 bits/sample */
|
/linux-4.4.14/arch/arm/mach-pxa/include/mach/ |
H A D | mtd-xip.h | 22 /* we sample OSCR and convert desired delta to usec (1/4 ~= 1000000/3686400) */
|
/linux-4.4.14/drivers/iio/adc/ |
H A D | ad7266.c | 47 __be16 sample[2]; member in struct:ad7266_state::__anon5122 55 return spi_read(st->spi, &st->data.sample[0], 2); ad7266_wakeup() 61 return spi_read(st->spi, &st->data.sample[0], 1); ad7266_powerdown() 90 ret = spi_read(st->spi, st->data.sample, 4); ad7266_trigger_handler() 143 *val = be16_to_cpu(st->data.sample[address % 2]); ad7266_read_single() 448 st->single_xfer[0].rx_buf = &st->data.sample[0]; ad7266_probe() 452 st->single_xfer[1].rx_buf = st->data.sample; ad7266_probe() 456 st->single_xfer[2].tx_buf = &st->data.sample[0]; ad7266_probe()
|
H A D | ad_sigma_delta.c | 251 unsigned int sample, raw_sample; ad_sigma_delta_single_conversion() local 295 sample = raw_sample >> chan->scan_type.shift; ad_sigma_delta_single_conversion() 296 sample &= (1 << chan->scan_type.realbits) - 1; ad_sigma_delta_single_conversion() 297 *val = sample; ad_sigma_delta_single_conversion()
|
/linux-4.4.14/drivers/net/wireless/ath/ |
H A D | spectral_common.h | 28 /* FFT sample format given to userspace via debugfs. 31 * other fields after adding another sample type
|
/linux-4.4.14/drivers/isdn/i4l/ |
H A D | isdn_audio.c | 176 #define AMP_BITS 9 /* bits per sample, reduced to avoid overflow */ 242 isdn_audio_linear2ulaw(int sample) isdn_audio_linear2ulaw() argument 268 /* Get the sample into sign-magnitude. */ isdn_audio_linear2ulaw() 269 sign = (sample >> 8) & 0x80; /* set aside the sign */ isdn_audio_linear2ulaw() 271 sample = -sample; /* get magnitude */ isdn_audio_linear2ulaw() 272 if (sample > CLIP) isdn_audio_linear2ulaw() 273 sample = CLIP; /* clip the magnitude */ isdn_audio_linear2ulaw() 276 sample = sample + BIAS; isdn_audio_linear2ulaw() 277 exponent = exp_lut[(sample >> 7) & 0xFF]; isdn_audio_linear2ulaw() 278 mantissa = (sample >> (exponent + 3)) & 0x0F; isdn_audio_linear2ulaw() 448 isdn_audio_goertzel(int *sample, modem_info *info) isdn_audio_goertzel() argument 469 sk = sample[n] + ((cos2pik[k] * sk1) >> 15) - sk2; isdn_audio_goertzel()
|
/linux-4.4.14/kernel/time/ |
H A D | posix-cpu-timers.c | 80 * given the current clock sample. 181 unsigned long long *sample) cpu_clock_sample() 187 *sample = prof_ticks(p); cpu_clock_sample() 190 *sample = virt_ticks(p); cpu_clock_sample() 193 *sample = task_sched_runtime(p); cpu_clock_sample() 264 unsigned long long *sample) cpu_clock_sample_group() 273 *sample = cputime_to_expires(cputime.utime + cputime.stime); cpu_clock_sample_group() 277 *sample = cputime_to_expires(cputime.utime); cpu_clock_sample_group() 281 *sample = cputime.sum_exec_runtime; cpu_clock_sample_group() 564 unsigned long long *sample) cpu_timer_sample_group() 573 *sample = cputime_to_expires(cputime.utime + cputime.stime); cpu_timer_sample_group() 576 *sample = cputime_to_expires(cputime.utime); cpu_timer_sample_group() 579 *sample = cputime.sum_exec_runtime; cpu_timer_sample_group() 665 * We need to sample the current value to convert the new posix_cpu_timer_set() 668 * timer, we need a sample to balance the thread expiry posix_cpu_timer_set() 670 * check if it's already passed. In short, we need a sample. posix_cpu_timer_set() 805 * We can't even collect a sample any more. posix_cpu_timer_get() 1056 * Fetch the current sample and update the timer's expiry time. posix_cpu_timer_schedule() 1077 * We can't even collect a sample any more. posix_cpu_timer_schedule() 1109 * @sample: The task_cputime structure to be checked for expiration. 1110 * @expires: Expiration times, against which @sample will be checked. 1112 * Checks @sample against @expires to see if any field of @sample has expired. 1116 static inline int task_cputime_expired(const struct task_cputime *sample, task_cputime_expired() argument 1119 if (expires->utime && sample->utime >= expires->utime) task_cputime_expired() 1121 if (expires->stime && sample->utime + sample->stime >= expires->stime) task_cputime_expired() 1124 sample->sum_exec_runtime >= expires->sum_exec_runtime) task_cputime_expired() 180 cpu_clock_sample(const clockid_t which_clock, struct task_struct *p, unsigned long long *sample) cpu_clock_sample() argument 262 cpu_clock_sample_group(const clockid_t which_clock, struct task_struct *p, unsigned long long *sample) cpu_clock_sample_group() argument 562 cpu_timer_sample_group(const clockid_t which_clock, struct task_struct *p, unsigned long long *sample) cpu_timer_sample_group() argument
|
/linux-4.4.14/net/ipv4/ |
H A D | tcp_westwood.c | 42 u8 reset_rtt_min; /* Reset RTT min to next RTT sample*/ 87 /* If the filter is empty fill it with the first sample of bandwidth */ westwood_filter() 100 * but all westwood needs is the last sample of srtt. 122 * bandwidth sample westwood_update_window() 132 * 50ms we don't filter but we continue 'building the sample'. westwood_update_window()
|
/linux-4.4.14/drivers/iio/imu/ |
H A D | adis16400.h | 66 #define ADIS16400_SMPL_PRD 0x36 /* Internal sample period (rate) control */ 75 #define ADIS16400_ALM_SMPL1 0x44 /* Alarm 1 sample size */ 76 #define ADIS16400_ALM_SMPL2 0x46 /* Alarm 2 sample size */
|
/linux-4.4.14/sound/pci/ctxfi/ |
H A D | ctatc.h | 67 void **srccs; /* SRCs for sample rate conversion */ 80 unsigned int rsr; /* reference sample rate in Hz */ 81 unsigned int msr; /* master sample rate in rsr */
|
/linux-4.4.14/sound/soc/samsung/ |
H A D | h1940_uda1380.c | 102 /* set MCLK division for sample rate */ h1940_hw_params() 108 /* set BCLK division for sample rate */ h1940_hw_params() 114 /* set prescaler division for sample rate */ h1940_hw_params()
|
H A D | rx1950_uda1380.c | 190 /* set MCLK division for sample rate */ rx1950_hw_params() 196 /* set BCLK division for sample rate */ rx1950_hw_params() 202 /* set prescaler division for sample rate */ rx1950_hw_params()
|
H A D | neo1973_wm8753.c | 79 /* set MCLK division for sample rate */ neo1973_hifi_hw_params() 85 /* set codec BCLK division for sample rate */ neo1973_hifi_hw_params() 90 /* set prescaler division for sample rate */ neo1973_hifi_hw_params() 146 /* set codec PCM division for sample rate */ neo1973_voice_hw_params()
|
/linux-4.4.14/kernel/sched/ |
H A D | loadavg.c | 48 * again, being late doesn't loose the delta, just wrecks the sample. 125 * - When we go NO_HZ idle during the window, we can negate our sample 143 * contribution, since we effectively move our sample point to a known 147 * sample, for this cpu (effectively using the idle-delta for this cpu which 204 * If we're still before the sample window, we're done. calc_load_exit_idle() 210 * We woke inside or after the sample window, this means we're already calc_load_exit_idle()
|
/linux-4.4.14/arch/powerpc/oprofile/ |
H A D | op_model_power4.c | 116 * handler. Starting with Power 7+ we only record the sample for power4_reg_setup() 118 * the sample is always recorded. power4_reg_setup() 124 * to zero so the sample will always be power4_reg_setup() 154 * Older CPUs require the MMCRA sample bit to be always set, but newer 392 * save the sample if the SIAR valid bit is power4_handle_interrupt() 394 * always save the sample. power4_handle_interrupt()
|
/linux-4.4.14/drivers/scsi/ |
H A D | t128.h | 32 * equivalent (my sample board had part second sourced from ZILOG). 47 * Note : my sample board *WAS NOT* populated with the SRAM, so this
|
/linux-4.4.14/drivers/spi/ |
H A D | spi-bitbang-txrx.h | 68 /* sample MSB (from slave) on leading edge */ bitbang_txrx_be_cpha0() 101 /* sample MSB (from slave) on trailing edge */ bitbang_txrx_be_cpha1()
|
/linux-4.4.14/include/drm/ |
H A D | i915_component.h | 40 * @sync_audio_rate: set n/cts based on the sample rate 65 * @aud_sample_rate: the array of audio sample rate per port
|
/linux-4.4.14/arch/sparc/include/uapi/asm/ |
H A D | envctrl.h | 20 * Below is sample usage: 73 * Below is a sample application:
|
/linux-4.4.14/sound/pci/echoaudio/ |
H A D | darla24_dsp.c | 131 "set_sample_rate: Error, invalid sample rate %d\n", set_sample_rate() 143 /* Override the sample rate if this card is set to Echo sync. */ set_sample_rate()
|
H A D | echoaudio_dsp.h | 138 Setting the sample rates on Layla24 is somewhat schizophrenic. 145 the future), Layla24 also has "continuous sample rate mode". In this mode, 146 Layla24 can generate any sample rate between 25 and 50 kHz inclusive, or 154 -Set double-speed mode if you want to use sample rates above 50 kHz 170 write the frequency register to change the sample rate. This could be 387 * three bytes per sample; if you had two samples 0x112233 and 0x445566 393 * container. In other words, each sample is a 32-bit signed 501 * Mia sample rate and clock setting constants 649 u32 sample_rate; /* Card sample rate in Hz 0x00c 4 */
|
/linux-4.4.14/drivers/mmc/host/ |
H A D | sunxi-mmc.c | 220 u32 sample; member in struct:sunxi_mmc_clk_delay 669 sclk_dly = host->clk_delays[SDXC_CLK_400K].sample; sunxi_mmc_clk_set_rate() 672 sclk_dly = host->clk_delays[SDXC_CLK_25M].sample; sunxi_mmc_clk_set_rate() 676 sclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].sample; sunxi_mmc_clk_set_rate() 679 sclk_dly = host->clk_delays[SDXC_CLK_50M].sample; sunxi_mmc_clk_set_rate() 902 [SDXC_CLK_400K] = { .output = 180, .sample = 180 }, 903 [SDXC_CLK_25M] = { .output = 180, .sample = 75 }, 904 [SDXC_CLK_50M] = { .output = 90, .sample = 120 }, 905 [SDXC_CLK_50M_DDR] = { .output = 60, .sample = 120 }, 909 [SDXC_CLK_400K] = { .output = 180, .sample = 180 }, 910 [SDXC_CLK_25M] = { .output = 180, .sample = 75 }, 911 [SDXC_CLK_50M] = { .output = 150, .sample = 120 }, 912 [SDXC_CLK_50M_DDR] = { .output = 90, .sample = 120 }, 961 host->clk_sample = devm_clk_get(&pdev->dev, "sample"); sunxi_mmc_resource_request() 963 dev_err(&pdev->dev, "Could not get sample clock\n"); sunxi_mmc_resource_request() 991 dev_err(&pdev->dev, "Enable sample clk err %d\n", ret); sunxi_mmc_resource_request()
|
H A D | dw_mmc-exynos.c | 291 /* Update tuned sample timing */ dw_mci_exynos_set_ios() 381 static inline void dw_mci_exynos_set_clksmpl(struct dw_mci *host, u8 sample) dw_mci_exynos_set_clksmpl() argument 391 clksel = SDMMC_CLKSEL_UP_SAMPLE(clksel, sample); dw_mci_exynos_set_clksmpl() 403 u8 sample; dw_mci_exynos_move_next_clksmpl() local 411 sample = (clksel + 1) & 0x7; dw_mci_exynos_move_next_clksmpl() 412 clksel = SDMMC_CLKSEL_UP_SAMPLE(clksel, sample); dw_mci_exynos_move_next_clksmpl() 420 return sample; dw_mci_exynos_move_next_clksmpl()
|
/linux-4.4.14/drivers/media/pci/cx18/ |
H A D | cx18-av-firmware.c | 159 /* 0xC4000914[5]: 0 = left sample on WS=0, 1 = left sample on WS=1 */ cx18_av_loadfw() 166 /* 0xC4000918[5]: 0 = left sample on WS=0, 1 = left sample on WS=1 */ cx18_av_loadfw()
|
/linux-4.4.14/drivers/media/dvb-frontends/ |
H A D | atbm8830.h | 46 /* Decoder sample TS data at rising edge of clock */
|
/linux-4.4.14/arch/x86/include/asm/trace/ |
H A D | irq_vectors.h | 79 * 2) generates perf sample
|
/linux-4.4.14/samples/kprobes/ |
H A D | jprobe_example.c | 2 * Here's a sample kernel module showing the use of jprobes to dump
|
/linux-4.4.14/include/linux/spi/ |
H A D | ads7846.h | 44 * per sample */
|
/linux-4.4.14/arch/mips/include/asm/octeon/ |
H A D | cvmx-smix-defs.h | 153 uint64_t sample:4; member in struct:cvmx_smix_clk::cvmx_smix_clk_s 157 uint64_t sample:4; 176 uint64_t sample:4; member in struct:cvmx_smix_clk::cvmx_smix_clk_cn30xx 180 uint64_t sample:4;
|
/linux-4.4.14/arch/mips/mti-sead3/ |
H A D | sead3-time.c | 32 orig = readl(status_reg) & 0x2; /* get original sample */ estimate_cpu_frequency()
|
/linux-4.4.14/sound/isa/sb/ |
H A D | emu8000_patch.c | 119 * write sample word data 143 * Write the sample to EMU800 memory. This routine is invoked out of 260 /* if no blank loop is attached in the sample, add it */ snd_emu8000_sample_new() 284 * free a sample block
|
/linux-4.4.14/sound/soc/intel/skylake/ |
H A D | skl-nhlt.h | 41 } sample; member in struct:wav_fmt_ext
|
/linux-4.4.14/sound/soc/omap/ |
H A D | omap-mcbsp.h | 28 /* Source clocks for McBSP sample rate generator */
|
/linux-4.4.14/sound/soc/codecs/ |
H A D | wm8727.c | 37 * Note this is a simple chip with no configuration interface, sample rate is
|
H A D | cs4270.c | 160 * @ratio: the ratio of MCLK to the sample rate 188 * never select any sample rates that require divide-by-1.5. 237 * The value of MCLK is used to determine which sample rates are supported 242 * a standard sample rate. If there's a match, then it is added to the list 243 * of supported sample rates. 246 * otherwise the list of supported sample rates will not be available in 250 * theoretically possible sample rates to be enabled. Call it again with a 318 * Specifically, the sample rate and the data format. 353 /* Set the sample rate */ cs4270_hw_params()
|
/linux-4.4.14/include/linux/iio/ |
H A D | buffer.h | 131 * @data: sample data 132 * @timestamp: timestamp for the sample data 136 * the sample data buffer before pushing it to the device buffers. The sample
|
/linux-4.4.14/drivers/gpu/drm/nouveau/dispnv04/ |
H A D | dac.c | 55 * arbitrary limit to number of sense oscillations tolerated in one sample 61 * arbitrary limit to number of conflicting sample pairs to tolerate at a 196 /* take sample pairs until both samples in the pair agree */ nv04_dac_detect() 242 uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder); nv17_dac_sample_load() local 311 sample = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset); nv17_dac_sample_load() 313 sample &= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset); nv17_dac_sample_load() 332 return sample; nv17_dac_sample_load()
|
/linux-4.4.14/sound/pci/cs46xx/ |
H A D | cs46xx_dsp_scb_types.h | 66 |page|__|__| ___________________________|_________|__page, if !sample-end___________|____| 143 Stream sample pointer & MAC-unit mode for this stream 160 Fractional increment per output sample in the input sample buffer 164 increment per output sample. 282 number of 3 sample triplets to output each 332 hi: Fractional sample accumulator 0.16b 344 hi: Fractional sample correction factor 0.16b 412 16-bit input sample to obtain the 473 16-bit sample msb-aligned (count
|
H A D | dsp_spos.c | 258 /* default SPDIF input sample rate cs46xx_dsp_spos_create() 338 struct dsp_segment_desc *sample) dsp_load_sample() 342 if (!sample) { dsp_load_sample() 344 "dsp_spos: module got no sample segment\n"); dsp_load_sample() 348 doffset = (sample->offset * 4 + DSP_SAMPLE_BYTE_OFFSET); dsp_load_sample() 349 dsize = sample->size * 4; dsp_load_sample() 352 "dsp_spos: downloading sample data to chip (%08x-%08x)\n", dsp_load_sample() 355 if (snd_cs46xx_download (chip,sample->data,doffset,dsize)) { dsp_load_sample() 357 "dsp_spos: failed to sample data to DSP\n"); dsp_load_sample() 390 dev_dbg(chip->card->dev, "dsp_spos: clearing sample area\n"); cs46xx_dsp_load_module() 849 if ((entry = snd_info_create_card_entry(card, "sample", ins->proc_dsp_dir)) != NULL) { cs46xx_dsp_proc_init() 1384 /* input sample converter */ cs46xx_dsp_scb_and_task_init() 1500 sample rate we will use this SRC to adjust it */ cs46xx_dsp_scb_and_task_init() 1584 /* NOTE: The SPDIF input task write the sample in mono cs46xx_dsp_async_init() 1771 /* reset SPDIF input sample buffer pointer */ cs46xx_dsp_enable_spdif_in() 1784 /* set SPDIF input sample rate and unmute cs46xx_dsp_enable_spdif_in() 1978 /* clear parameter, sample and code areas */ cs46xx_dsp_resume() 337 dsp_load_sample(struct snd_cs46xx *chip, struct dsp_segment_desc *sample) dsp_load_sample() argument
|
/linux-4.4.14/drivers/isdn/hardware/eicon/ |
H A D | capidtmf.c | 185 static void capidtmf_goertzel_loop(long *buffer, long *coeffs, short *sample, long count) capidtmf_goertzel_loop() argument 200 q0 = sample[j] - q2 + (c * (q1 >> 16)) + (((dword)(((dword) d) * ((dword)(q1 & 0xffff)))) >> 15); capidtmf_goertzel_loop() 211 q0 = sample[j] - q2 - ((c * (q1 >> 16)) + (((dword)(((dword) d) * ((dword)(q1 & 0xffff)))) >> 15)); capidtmf_goertzel_loop() 226 q0 = sample[j] - q2 + (c * (q1 >> 16)) + (((dword)(((dword)(c >> 1)) * ((dword)(q1 & 0xffff)))) >> 15); capidtmf_goertzel_loop() 237 q0 = sample[j] - q2 - ((c * (q1 >> 16)) + (((dword)(((dword)(c >> 1)) * ((dword)(q1 & 0xffff)))) >> 15)); capidtmf_goertzel_loop()
|
/linux-4.4.14/sound/soc/sh/ |
H A D | ssi.c | 25 * and can be independent from the actual sample bit depth. This is 27 * fixed TDM slot size, regardless of sample resolution. 160 /* DATA WORD LENGTH (DWL): databits in audio sample */ ssi_hw_params() 172 pr_debug("ssi: invalid sample width\n"); ssi_hw_params() 282 ssicr |= CR_SCKP; /* sample data at low clkedge */ ssi_set_fmt()
|
H A D | fsi.c | 141 * A : sample widtht 16bit setting 142 * B : sample widtht 24bit setting 164 * period/frame/sample image 175 * ||[ sample ][ sample ]|[ sample ][ sample ]| ... | 185 * | [ sample ] | 186 * | [ sample ] | 187 * | [ sample ] | 188 * | [ sample ] | 211 int fifo_sample_capa; /* sample capacity of FSI FIFO */ 212 int buff_sample_capa; /* sample capacity of ALSA buffer */ 213 int buff_sample_pos; /* sample position of ALSA buffer */ 214 int period_samples; /* sample number / 1 period */ 216 int sample_width; /* sample width */ 1451 * The maximum number of sample data varies depending fsi_fifo_init()
|
/linux-4.4.14/drivers/net/wireless/ath/ath9k/ |
H A D | common-spectral.c | 46 u8 *sample; ath_cmn_max_idx_verify_ht20_fft() local 60 sample = sample_end - SPECTRAL_HT20_SAMPLE_LEN + 1; ath_cmn_max_idx_verify_ht20_fft() 75 if (sample[max_index] != (max_magnitude >> max_exp)) ath_cmn_max_idx_verify_ht20_fft() 85 u8 *sample; ath_cmn_max_idx_verify_ht20_40_fft() local 100 sample = sample_end - SPECTRAL_HT20_40_SAMPLE_LEN + 1; ath_cmn_max_idx_verify_ht20_40_fft() 124 (sample[upper_max_index] == (upper_mag >> max_exp))) ath_cmn_max_idx_verify_ht20_40_fft() 128 (sample[lower_max_index - dc_pos] == (lower_mag >> max_exp))) ath_cmn_max_idx_verify_ht20_40_fft() 131 if ((sample[upper_max_index + dc_pos] != (upper_mag >> max_exp)) || ath_cmn_max_idx_verify_ht20_40_fft() 132 (sample[lower_max_index] != (lower_mag >> max_exp))) ath_cmn_max_idx_verify_ht20_40_fft() 197 * sample and invalid, interpolate it. ath_cmn_process_ht20_fft() 362 * sample and invalid, interpolate it. ath_cmn_process_ht20_40_fft() 566 /* Only a single sample received, no need to look ath_cmn_process_fft() 567 * for the sample's end, do the correction based ath_cmn_process_fft() 581 * the next byte (the first bin of the next sample) ath_cmn_process_fft() 600 * this sample's boundaries, discard any further ath_cmn_process_fft()
|
/linux-4.4.14/net/dccp/ |
H A D | input.c | 427 /* Obtain usec RTT sample from SYN exchange (used by TFRC). */ dccp_rcv_request_sent_state_process() 527 u32 sample = dp->dccps_options_received.dccpor_timestamp_echo; dccp_rcv_respond_partopen_state_process() local 552 /* Obtain usec RTT sample from SYN exchange (used by TFRC). */ dccp_rcv_respond_partopen_state_process() 553 if (likely(sample)) { dccp_rcv_respond_partopen_state_process() 554 long delta = dccp_timestamp() - sample; dccp_rcv_respond_partopen_state_process() 711 * dccp_sample_rtt - Validate and finalise computation of RTT sample 715 * called immediately when the ACK used for the RTT sample arrives. 723 DCCP_WARN("unusable RTT sample %ld, using min\n", delta); dccp_sample_rtt() 727 DCCP_WARN("RTT sample %ld too large, using max\n", delta); dccp_sample_rtt()
|
/linux-4.4.14/sound/firewire/dice/ |
H A D | dice-interface.h | 84 * The current sample rate and clock source; read/write. Whether a clock 85 * source or sample rate is supported is device-specific; the internal clock 127 * Status of the sample clock; read-only. 132 /* The actual sample rate; CLOCK_RATE_32000-_192000 or _NONE. */ 185 * Supported sample rates and clock sources; read-only. 357 * Current sample rate (CLOCK_RATE_* >> CLOCK_RATE_SHIFT), _32000-_192000 or
|
/linux-4.4.14/drivers/staging/rdma/ipath/ |
H A D | ipath_debug.h | 52 #define __IPATH_TRSAMPLE 0x8 /* generate trace buffer sample entries */ 81 #define __IPATH_TRSAMPLE 0x0 /* generate trace buffer sample entries */
|
/linux-4.4.14/drivers/media/rc/img-ir/ |
H A D | img-ir-raw.c | 66 * The raw decoders expect to get a final sample even if there are no edges, in 68 * time we use this timer to emit a final sample to satisfy them.
|
/linux-4.4.14/sound/aoa/soundbus/i2sbus/ |
H A D | interface.h | 67 * - external sample frequency interrupt (don't understand) 68 * - external sample frequency
|
/linux-4.4.14/sound/pci/ice1712/ |
H A D | delta.h | 62 #define ICE1712_DELTA_DFS 0x01 /* fast/slow sample rate mode */ 145 #define ICE1712_DELTA_1010LT_WORDCLOCK 0x80 /* sample clock source: 0 = Word Clock Input, 1 = S/PDIF Input ??? */
|
/linux-4.4.14/sound/soc/davinci/ |
H A D | davinci-vcif.c | 63 /* Start the sample generator and enable transmitter/receiver */ davinci_vcif_start() 82 /* Reset transmitter/receiver and sample rate/frame sync generators */ davinci_vcif_stop()
|
/linux-4.4.14/sound/soc/pxa/ |
H A D | raumfeld.c | 72 /* set freq to 0 to enable all possible codec sample rates */ raumfeld_cs4270_startup() 81 /* set freq to 0 to enable all possible codec sample rates */ raumfeld_cs4270_shutdown()
|
/linux-4.4.14/include/linux/iio/adc/ |
H A D | ad_sigma_delta.h | 37 * modify or drop the sample data, it, may be NULL. 39 * if there is just one read-only sample data shift register.
|
/linux-4.4.14/arch/alpha/oprofile/ |
H A D | op_model_ev6.c | 88 /* Record the sample. */ ev6_handle_interrupt()
|
/linux-4.4.14/sound/pci/lx6464es/ |
H A D | lx6464es.h | 94 u32 board_sample_rate; /* sample rate read from
|