trace 174 arch/arc/kernel/stacktrace.c struct stack_trace *trace = arg; trace 176 arch/arc/kernel/stacktrace.c if (trace->skip > 0) trace 177 arch/arc/kernel/stacktrace.c trace->skip--; trace 179 arch/arc/kernel/stacktrace.c trace->entries[trace->nr_entries++] = address; trace 181 arch/arc/kernel/stacktrace.c if (trace->nr_entries >= trace->max_entries) trace 189 arch/arc/kernel/stacktrace.c struct stack_trace *trace = arg; trace 194 arch/arc/kernel/stacktrace.c if (trace->skip > 0) trace 195 arch/arc/kernel/stacktrace.c trace->skip--; trace 197 arch/arc/kernel/stacktrace.c trace->entries[trace->nr_entries++] = address; trace 199 arch/arc/kernel/stacktrace.c if (trace->nr_entries >= trace->max_entries) trace 248 arch/arc/kernel/stacktrace.c void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) trace 251 arch/arc/kernel/stacktrace.c arc_unwind_core(tsk, NULL, __collect_all_but_sched, trace); trace 254 arch/arc/kernel/stacktrace.c void save_stack_trace(struct stack_trace *trace) trace 257 arch/arc/kernel/stacktrace.c arc_unwind_core(NULL, NULL, __collect_all, trace); trace 67 arch/arm/kernel/stacktrace.c struct stack_trace *trace; trace 75 arch/arm/kernel/stacktrace.c struct stack_trace *trace = data->trace; trace 86 arch/arm/kernel/stacktrace.c trace->entries[trace->nr_entries++] = addr; trace 88 arch/arm/kernel/stacktrace.c if (trace->nr_entries >= trace->max_entries) trace 96 arch/arm/kernel/stacktrace.c trace->entries[trace->nr_entries++] = regs->ARM_pc; trace 98 arch/arm/kernel/stacktrace.c return trace->nr_entries >= trace->max_entries; trace 103 arch/arm/kernel/stacktrace.c struct stack_trace *trace, unsigned int nosched) trace 108 arch/arm/kernel/stacktrace.c data.trace = trace; trace 109 arch/arm/kernel/stacktrace.c data.skip = trace->skip; trace 138 arch/arm/kernel/stacktrace.c void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) trace 143 arch/arm/kernel/stacktrace.c data.trace = trace; trace 144 arch/arm/kernel/stacktrace.c data.skip = trace->skip; trace 155 arch/arm/kernel/stacktrace.c void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) trace 157 arch/arm/kernel/stacktrace.c __save_stack_trace(tsk, trace, 1); trace 161 arch/arm/kernel/stacktrace.c void save_stack_trace(struct stack_trace *trace) trace 163 arch/arm/kernel/stacktrace.c __save_stack_trace(current, trace, 0); trace 83 arch/arm/kvm/trace.h #define TRACE_INCLUDE_FILE trace trace 134 arch/arm64/kernel/stacktrace.c struct stack_trace *trace; trace 142 arch/arm64/kernel/stacktrace.c struct stack_trace *trace = data->trace; trace 152 arch/arm64/kernel/stacktrace.c trace->entries[trace->nr_entries++] = addr; trace 154 arch/arm64/kernel/stacktrace.c return trace->nr_entries >= trace->max_entries; trace 157 arch/arm64/kernel/stacktrace.c void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) trace 162 arch/arm64/kernel/stacktrace.c data.trace = trace; trace 163 arch/arm64/kernel/stacktrace.c data.skip = trace->skip; trace 172 arch/arm64/kernel/stacktrace.c struct stack_trace *trace, unsigned int nosched) trace 180 arch/arm64/kernel/stacktrace.c data.trace = trace; trace 181 arch/arm64/kernel/stacktrace.c data.skip = trace->skip; trace 201 arch/arm64/kernel/stacktrace.c void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) trace 203 arch/arm64/kernel/stacktrace.c __save_stack_trace(tsk, trace, 1); trace 206 arch/arm64/kernel/stacktrace.c void save_stack_trace(struct stack_trace *trace) trace 208 arch/arm64/kernel/stacktrace.c __save_stack_trace(current, trace, 0); trace 35 arch/arm64/kernel/trace-events-emulation.h #define TRACE_INCLUDE_FILE trace-events-emulation trace 213 arch/arm64/kvm/trace.h #define TRACE_INCLUDE_FILE trace trace 9 arch/csky/kernel/stacktrace.c void save_stack_trace(struct stack_trace *trace) trace 11 arch/csky/kernel/stacktrace.c save_stack_trace_tsk(current, trace); trace 15 arch/csky/kernel/stacktrace.c void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) trace 19 arch/csky/kernel/stacktrace.c int skip = trace->skip; trace 49 arch/csky/kernel/stacktrace.c trace->entries[trace->nr_entries++] = lpp; trace 50 arch/csky/kernel/stacktrace.c if (trace->nr_entries >= trace->max_entries) trace 22 arch/hexagon/kernel/stacktrace.c void save_stack_trace(struct stack_trace *trace) trace 27 arch/hexagon/kernel/stacktrace.c int skip = trace->skip; trace 39 arch/hexagon/kernel/stacktrace.c trace->entries[trace->nr_entries++] = frame->rets; trace 40 arch/hexagon/kernel/stacktrace.c if (trace->nr_entries >= trace->max_entries) trace 15 arch/ia64/kernel/stacktrace.c struct stack_trace *trace = arg; trace 17 arch/ia64/kernel/stacktrace.c int skip = trace->skip; trace 19 arch/ia64/kernel/stacktrace.c trace->nr_entries = 0; trace 25 arch/ia64/kernel/stacktrace.c trace->entries[trace->nr_entries++] = ip; trace 26 arch/ia64/kernel/stacktrace.c if (trace->nr_entries == trace->max_entries) trace 36 arch/ia64/kernel/stacktrace.c void save_stack_trace(struct stack_trace *trace) trace 38 arch/ia64/kernel/stacktrace.c unw_init_running(ia64_do_save_stack, trace); trace 340 arch/m68k/fpsp040/fpsp.h .set TRACE_VEC,0x2024 | trace trap trace 26 arch/microblaze/include/asm/unwind.h void microblaze_unwind(struct task_struct *task, struct stack_trace *trace); trace 19 arch/microblaze/kernel/stacktrace.c void save_stack_trace(struct stack_trace *trace) trace 22 arch/microblaze/kernel/stacktrace.c trace->skip += 2; trace 23 arch/microblaze/kernel/stacktrace.c microblaze_unwind(NULL, trace); trace 27 arch/microblaze/kernel/stacktrace.c void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) trace 29 arch/microblaze/kernel/stacktrace.c microblaze_unwind(tsk, trace); trace 157 arch/microblaze/kernel/unwind.c struct stack_trace *trace); trace 165 arch/microblaze/kernel/unwind.c unsigned long fp, struct stack_trace *trace) trace 171 arch/microblaze/kernel/unwind.c unsigned long fp, struct stack_trace *trace) trace 174 arch/microblaze/kernel/unwind.c microblaze_unwind_inner(task, regs->pc, regs->r1, regs->r15, trace); trace 191 arch/microblaze/kernel/unwind.c struct stack_trace *trace) trace 221 arch/microblaze/kernel/unwind.c regs->r15, trace); trace 230 arch/microblaze/kernel/unwind.c if (!trace) trace 232 arch/microblaze/kernel/unwind.c unwind_trap(task, pc, fp, trace); trace 238 arch/microblaze/kernel/unwind.c if (trace) { trace 240 arch/microblaze/kernel/unwind.c if (trace->skip > 0) trace 241 arch/microblaze/kernel/unwind.c trace->skip--; trace 243 arch/microblaze/kernel/unwind.c trace->entries[trace->nr_entries++] = pc; trace 245 arch/microblaze/kernel/unwind.c if (trace->nr_entries >= trace->max_entries) trace 286 arch/microblaze/kernel/unwind.c void microblaze_unwind(struct task_struct *task, struct stack_trace *trace) trace 292 arch/microblaze/kernel/unwind.c regs->r15, trace); trace 302 arch/microblaze/kernel/unwind.c cpu_context->r15, trace); trace 317 arch/microblaze/kernel/unwind.c microblaze_unwind_inner(current, pc, fp, 0, trace); trace 17 arch/mips/kernel/stacktrace.c static void save_raw_context_stack(struct stack_trace *trace, trace 27 arch/mips/kernel/stacktrace.c if (trace->skip > 0) trace 28 arch/mips/kernel/stacktrace.c trace->skip--; trace 30 arch/mips/kernel/stacktrace.c trace->entries[trace->nr_entries++] = addr; trace 31 arch/mips/kernel/stacktrace.c if (trace->nr_entries >= trace->max_entries) trace 37 arch/mips/kernel/stacktrace.c static void save_context_stack(struct stack_trace *trace, trace 50 arch/mips/kernel/stacktrace.c save_raw_context_stack(trace, sp, savesched); trace 55 arch/mips/kernel/stacktrace.c if (trace->skip > 0) trace 56 arch/mips/kernel/stacktrace.c trace->skip--; trace 58 arch/mips/kernel/stacktrace.c trace->entries[trace->nr_entries++] = pc; trace 59 arch/mips/kernel/stacktrace.c if (trace->nr_entries >= trace->max_entries) trace 65 arch/mips/kernel/stacktrace.c save_raw_context_stack(trace, sp, savesched); trace 72 arch/mips/kernel/stacktrace.c void save_stack_trace(struct stack_trace *trace) trace 74 arch/mips/kernel/stacktrace.c save_stack_trace_tsk(current, trace); trace 78 arch/mips/kernel/stacktrace.c void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) trace 83 arch/mips/kernel/stacktrace.c WARN_ON(trace->nr_entries || !trace->max_entries); trace 91 arch/mips/kernel/stacktrace.c save_context_stack(trace, tsk, regs, tsk == current); trace 18 arch/mips/kvm/trace.h #define TRACE_INCLUDE_FILE trace trace 9 arch/nds32/kernel/stacktrace.c void save_stack_trace(struct stack_trace *trace) trace 11 arch/nds32/kernel/stacktrace.c save_stack_trace_tsk(current, trace); trace 15 arch/nds32/kernel/stacktrace.c void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) trace 18 arch/nds32/kernel/stacktrace.c int skip = trace->skip; trace 45 arch/nds32/kernel/stacktrace.c trace->entries[trace->nr_entries++] = lpp; trace 46 arch/nds32/kernel/stacktrace.c if (trace->nr_entries >= trace->max_entries) trace 17 arch/openrisc/include/asm/unwinder.h void (*trace)(void *data, unsigned long addr, trace 27 arch/openrisc/kernel/stacktrace.c struct stack_trace *trace = data; trace 32 arch/openrisc/kernel/stacktrace.c if (trace->skip > 0) { trace 33 arch/openrisc/kernel/stacktrace.c trace->skip--; trace 37 arch/openrisc/kernel/stacktrace.c if (trace->nr_entries < trace->max_entries) trace 38 arch/openrisc/kernel/stacktrace.c trace->entries[trace->nr_entries++] = addr; trace 41 arch/openrisc/kernel/stacktrace.c void save_stack_trace(struct stack_trace *trace) trace 43 arch/openrisc/kernel/stacktrace.c unwind_stack(trace, (unsigned long *) &trace, save_stack_address); trace 50 arch/openrisc/kernel/stacktrace.c struct stack_trace *trace = (struct stack_trace *)data; trace 58 arch/openrisc/kernel/stacktrace.c if (trace->skip > 0) { trace 59 arch/openrisc/kernel/stacktrace.c trace->skip--; trace 63 arch/openrisc/kernel/stacktrace.c if (trace->nr_entries < trace->max_entries) trace 64 arch/openrisc/kernel/stacktrace.c trace->entries[trace->nr_entries++] = addr; trace 67 arch/openrisc/kernel/stacktrace.c void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) trace 76 arch/openrisc/kernel/stacktrace.c unwind_stack(trace, sp, save_stack_address_nosched); trace 81 arch/openrisc/kernel/stacktrace.c save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) trace 83 arch/openrisc/kernel/stacktrace.c unwind_stack(trace, (unsigned long *) regs->sp, trace 61 arch/openrisc/kernel/unwinder.c void (*trace)(void *data, unsigned long addr, int reliable)) trace 81 arch/openrisc/kernel/unwinder.c trace(data, frameinfo->ra, reliable); trace 94 arch/openrisc/kernel/unwinder.c void (*trace)(void *data, unsigned long addr, int reliable)) trace 101 arch/openrisc/kernel/unwinder.c trace(data, addr, 0); trace 16 arch/parisc/kernel/stacktrace.c static void dump_trace(struct task_struct *task, struct stack_trace *trace) trace 23 arch/parisc/kernel/stacktrace.c trace->nr_entries = 0; trace 24 arch/parisc/kernel/stacktrace.c while (trace->nr_entries < trace->max_entries) { trace 29 arch/parisc/kernel/stacktrace.c trace->entries[trace->nr_entries++] = info.ip; trace 36 arch/parisc/kernel/stacktrace.c void save_stack_trace(struct stack_trace *trace) trace 38 arch/parisc/kernel/stacktrace.c dump_trace(current, trace); trace 42 arch/parisc/kernel/stacktrace.c void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) trace 44 arch/parisc/kernel/stacktrace.c dump_trace(tsk, trace); trace 241 arch/powerpc/include/asm/trace.h #define TRACE_INCLUDE_FILE trace trace 29 arch/powerpc/kernel/stacktrace.c static void save_context_stack(struct stack_trace *trace, unsigned long sp, trace 43 arch/powerpc/kernel/stacktrace.c if (!trace->skip) trace 44 arch/powerpc/kernel/stacktrace.c trace->entries[trace->nr_entries++] = ip; trace 46 arch/powerpc/kernel/stacktrace.c trace->skip--; trace 49 arch/powerpc/kernel/stacktrace.c if (trace->nr_entries >= trace->max_entries) trace 56 arch/powerpc/kernel/stacktrace.c void save_stack_trace(struct stack_trace *trace) trace 62 arch/powerpc/kernel/stacktrace.c save_context_stack(trace, sp, current, 1); trace 66 arch/powerpc/kernel/stacktrace.c void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) trace 78 arch/powerpc/kernel/stacktrace.c save_context_stack(trace, sp, tsk, 0); trace 85 arch/powerpc/kernel/stacktrace.c save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) trace 87 arch/powerpc/kernel/stacktrace.c save_context_stack(trace, regs->gpr[1], current, 0); trace 99 arch/powerpc/kernel/stacktrace.c struct stack_trace *trace) trace 195 arch/powerpc/kernel/stacktrace.c if (trace->nr_entries >= trace->max_entries) trace 197 arch/powerpc/kernel/stacktrace.c if (!trace->skip) trace 198 arch/powerpc/kernel/stacktrace.c trace->entries[trace->nr_entries++] = ip; trace 200 arch/powerpc/kernel/stacktrace.c trace->skip--; trace 206 arch/powerpc/kernel/stacktrace.c struct stack_trace *trace) trace 217 arch/powerpc/kernel/stacktrace.c ret = __save_stack_trace_tsk_reliable(tsk, trace); trace 125 arch/powerpc/kvm/trace.h #define TRACE_INCLUDE_FILE trace trace 112 arch/powerpc/platforms/powernv/vas-trace.h #define TRACE_INCLUDE_FILE vas-trace trace 136 arch/riscv/kernel/stacktrace.c struct stack_trace *trace = arg; trace 140 arch/riscv/kernel/stacktrace.c if (unlikely(trace->skip > 0)) { trace 141 arch/riscv/kernel/stacktrace.c trace->skip--; trace 145 arch/riscv/kernel/stacktrace.c trace->entries[trace->nr_entries++] = pc; trace 146 arch/riscv/kernel/stacktrace.c return (trace->nr_entries >= trace->max_entries); trace 157 arch/riscv/kernel/stacktrace.c void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) trace 159 arch/riscv/kernel/stacktrace.c walk_stackframe(tsk, NULL, save_trace, trace); trace 163 arch/riscv/kernel/stacktrace.c void save_stack_trace(struct stack_trace *trace) trace 165 arch/riscv/kernel/stacktrace.c save_stack_trace_tsk(NULL, trace); trace 20 arch/s390/include/asm/trace/diag.h #define TRACE_INCLUDE_PATH asm/trace trace 120 arch/s390/include/asm/trace/zcrypt.h #define TRACE_INCLUDE_PATH asm/trace trace 11 arch/s390/kvm/trace-s390.h #define TRACE_INCLUDE_FILE trace-s390 trace 14 arch/s390/kvm/trace.h #define TRACE_INCLUDE_FILE trace trace 28 arch/sh/kernel/stacktrace.c struct stack_trace *trace = data; trace 33 arch/sh/kernel/stacktrace.c if (trace->skip > 0) { trace 34 arch/sh/kernel/stacktrace.c trace->skip--; trace 38 arch/sh/kernel/stacktrace.c if (trace->nr_entries < trace->max_entries) trace 39 arch/sh/kernel/stacktrace.c trace->entries[trace->nr_entries++] = addr; trace 47 arch/sh/kernel/stacktrace.c void save_stack_trace(struct stack_trace *trace) trace 51 arch/sh/kernel/stacktrace.c unwind_stack(current, NULL, sp, &save_stack_ops, trace); trace 58 arch/sh/kernel/stacktrace.c struct stack_trace *trace = (struct stack_trace *)data; trace 66 arch/sh/kernel/stacktrace.c if (trace->skip > 0) { trace 67 arch/sh/kernel/stacktrace.c trace->skip--; trace 71 arch/sh/kernel/stacktrace.c if (trace->nr_entries < trace->max_entries) trace 72 arch/sh/kernel/stacktrace.c trace->entries[trace->nr_entries++] = addr; trace 80 arch/sh/kernel/stacktrace.c void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) trace 84 arch/sh/kernel/stacktrace.c unwind_stack(current, NULL, sp, &save_stack_ops_nosched, trace); trace 14 arch/sparc/kernel/stacktrace.c struct stack_trace *trace, trace 55 arch/sparc/kernel/stacktrace.c if (trace->skip > 0) trace 56 arch/sparc/kernel/stacktrace.c trace->skip--; trace 58 arch/sparc/kernel/stacktrace.c trace->entries[trace->nr_entries++] = pc; trace 66 arch/sparc/kernel/stacktrace.c if (trace->nr_entries < trace 67 arch/sparc/kernel/stacktrace.c trace->max_entries) trace 68 arch/sparc/kernel/stacktrace.c trace->entries[trace->nr_entries++] = pc; trace 74 arch/sparc/kernel/stacktrace.c } while (trace->nr_entries < trace->max_entries); trace 77 arch/sparc/kernel/stacktrace.c void save_stack_trace(struct stack_trace *trace) trace 79 arch/sparc/kernel/stacktrace.c __save_stack_trace(current_thread_info(), trace, false); trace 83 arch/sparc/kernel/stacktrace.c void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) trace 87 arch/sparc/kernel/stacktrace.c __save_stack_trace(tp, trace, true); trace 46 arch/um/kernel/stacktrace.c struct stack_trace *trace = data; trace 50 arch/um/kernel/stacktrace.c if (trace->nr_entries >= trace->max_entries) trace 53 arch/um/kernel/stacktrace.c trace->entries[trace->nr_entries++] = address; trace 60 arch/um/kernel/stacktrace.c static void __save_stack_trace(struct task_struct *tsk, struct stack_trace *trace) trace 62 arch/um/kernel/stacktrace.c dump_trace(tsk, &dump_ops, trace); trace 65 arch/um/kernel/stacktrace.c void save_stack_trace(struct stack_trace *trace) trace 67 arch/um/kernel/stacktrace.c __save_stack_trace(current, trace); trace 71 arch/um/kernel/stacktrace.c void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) trace 73 arch/um/kernel/stacktrace.c __save_stack_trace(tsk, trace); trace 72 arch/unicore32/kernel/stacktrace.c struct stack_trace *trace; trace 80 arch/unicore32/kernel/stacktrace.c struct stack_trace *trace = data->trace; trace 90 arch/unicore32/kernel/stacktrace.c trace->entries[trace->nr_entries++] = addr; trace 92 arch/unicore32/kernel/stacktrace.c return trace->nr_entries >= trace->max_entries; trace 95 arch/unicore32/kernel/stacktrace.c void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) trace 100 arch/unicore32/kernel/stacktrace.c data.trace = trace; trace 101 arch/unicore32/kernel/stacktrace.c data.skip = trace->skip; trace 122 arch/unicore32/kernel/stacktrace.c void save_stack_trace(struct stack_trace *trace) trace 124 arch/unicore32/kernel/stacktrace.c save_stack_trace_tsk(current, trace); trace 6 arch/x86/include/asm/msr-trace.h #define TRACE_INCLUDE_FILE msr-trace trace 295 arch/x86/include/asm/processor.h unsigned short trace; trace 93 arch/x86/include/asm/trace/fpu.h #define TRACE_INCLUDE_PATH asm/trace/ trace 77 arch/x86/include/asm/trace/hyperv.h #define TRACE_INCLUDE_PATH asm/trace/ trace 128 arch/x86/include/asm/trace/mpx.h #define TRACE_INCLUDE_PATH asm/trace/ trace 724 arch/x86/kernel/cpu/cacheinfo.c unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; trace 826 arch/x86/kernel/cpu/cacheinfo.c trace += cache_table[k].size; trace 1504 arch/x86/kvm/trace.h #define TRACE_INCLUDE_FILE trace trace 137 arch/x86/mm/mmio-mod.c struct remap_trace *trace = p->private; trace 149 arch/x86/mm/mmio-mod.c my_trace->phys = addr - trace->probe.addr + trace->phys; trace 150 arch/x86/mm/mmio-mod.c my_trace->map_id = trace->id; trace 225 arch/x86/mm/mmio-mod.c struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL); trace 234 arch/x86/mm/mmio-mod.c if (!trace) { trace 239 arch/x86/mm/mmio-mod.c *trace = (struct remap_trace) { trace 245 arch/x86/mm/mmio-mod.c .private = trace trace 250 arch/x86/mm/mmio-mod.c map.map_id = trace->id; trace 254 arch/x86/mm/mmio-mod.c kfree(trace); trace 259 arch/x86/mm/mmio-mod.c list_add_tail(&trace->list, &trace_list); trace 261 arch/x86/mm/mmio-mod.c register_kmmio_probe(&trace->probe); trace 288 arch/x86/mm/mmio-mod.c struct remap_trace *trace; trace 298 arch/x86/mm/mmio-mod.c list_for_each_entry_safe(trace, tmp, &trace_list, list) { trace 299 arch/x86/mm/mmio-mod.c if ((unsigned long)addr == trace->probe.addr) { trace 301 arch/x86/mm/mmio-mod.c unregister_kmmio_probe(&trace->probe); trace 302 arch/x86/mm/mmio-mod.c list_del(&trace->list); trace 303 arch/x86/mm/mmio-mod.c found_trace = trace; trace 344 arch/x86/mm/mmio-mod.c struct remap_trace *trace; trace 353 arch/x86/mm/mmio-mod.c list_for_each_entry(trace, &trace_list, list) { trace 355 arch/x86/mm/mmio-mod.c trace->probe.addr, trace->probe.len); trace 357 arch/x86/mm/mmio-mod.c unregister_kmmio_probe(&trace->probe); trace 361 arch/x86/mm/mmio-mod.c list_for_each_entry_safe(trace, tmp, &trace_list, list) { trace 362 arch/x86/mm/mmio-mod.c list_del(&trace->list); trace 363 arch/x86/mm/mmio-mod.c kfree(trace); trace 202 arch/xtensa/kernel/stacktrace.c struct stack_trace *trace; trace 209 arch/xtensa/kernel/stacktrace.c struct stack_trace *trace = trace_data->trace; trace 218 arch/xtensa/kernel/stacktrace.c trace->entries[trace->nr_entries++] = frame->pc; trace 219 arch/xtensa/kernel/stacktrace.c return trace->nr_entries >= trace->max_entries; trace 222 arch/xtensa/kernel/stacktrace.c void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace) trace 225 arch/xtensa/kernel/stacktrace.c .trace = trace, trace 226 arch/xtensa/kernel/stacktrace.c .skip = trace->skip, trace 232 arch/xtensa/kernel/stacktrace.c void save_stack_trace(struct stack_trace *trace) trace 234 arch/xtensa/kernel/stacktrace.c save_stack_trace_tsk(current, trace); trace 255 drivers/base/regmap/trace.h #define TRACE_INCLUDE_FILE trace trace 266 drivers/clk/mvebu/armada-37xx-periph.c PERIPH_CLK_FULL(trace, 22, 18, DIV_SEL0, 20, clk_table6); trace 285 drivers/clk/mvebu/armada-37xx-periph.c REF_CLK_FULL(trace), trace 382 drivers/gpu/drm/i915/gvt/trace.h #define TRACE_INCLUDE_FILE trace trace 868 drivers/gpu/drm/i915/i915_trace.h TP_PROTO(bool write, i915_reg_t reg, u64 val, int len, bool trace), trace 870 drivers/gpu/drm/i915/i915_trace.h TP_ARGS(write, reg, val, len, trace), trace 872 drivers/gpu/drm/i915/i915_trace.h TP_CONDITION(trace), trace 1132 drivers/gpu/drm/i915/intel_uncore.c trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ trace 1137 drivers/gpu/drm/i915/intel_uncore.c gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \ trace 1145 drivers/gpu/drm/i915/intel_uncore.c gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \ trace 1178 drivers/gpu/drm/i915/intel_uncore.c trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ trace 1210 drivers/gpu/drm/i915/intel_uncore.c func##_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \ trace 1240 drivers/gpu/drm/i915/intel_uncore.c trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ trace 1247 drivers/gpu/drm/i915/intel_uncore.c gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ trace 1255 drivers/gpu/drm/i915/intel_uncore.c gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ trace 1278 drivers/gpu/drm/i915/intel_uncore.c trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ trace 1289 drivers/gpu/drm/i915/intel_uncore.c gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ trace 1302 drivers/gpu/drm/i915/intel_uncore.c func##_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ trace 86 drivers/gpu/drm/i915/intel_uncore.h i915_reg_t r, bool trace); trace 88 drivers/gpu/drm/i915/intel_uncore.h i915_reg_t r, bool trace); trace 90 drivers/gpu/drm/i915/intel_uncore.h i915_reg_t r, bool trace); trace 92 drivers/gpu/drm/i915/intel_uncore.h i915_reg_t r, bool trace); trace 95 drivers/gpu/drm/i915/intel_uncore.h i915_reg_t r, u8 val, bool trace); trace 97 drivers/gpu/drm/i915/intel_uncore.h i915_reg_t r, u16 val, bool trace); trace 99 drivers/gpu/drm/i915/intel_uncore.h i915_reg_t r, u32 val, bool trace); trace 29 drivers/gpu/drm/i915/selftests/mock_uncore.c nop_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { } trace 36 drivers/gpu/drm/i915/selftests/mock_uncore.c nop_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { return 0; } trace 33 drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h #define CONN_TRACE(c,f,a...) CONN_MSG((c), trace, f, ##a) trace 55 drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h #define OUTP_TRACE(o,f,a...) OUTP_MSG((o), trace, f, ##a) trace 928 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c u32 trace = nvkm_rd32(gr->base.engine.subdev.device, 0x40981c); trace 929 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c if (trace & 0x00000040) trace 324 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("auxch read failed with %d\n", ret); trace 337 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("auxch write failed with %d\n", ret); trace 487 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("\t[0x%02x] (R[0x%06x] & 0x%08x) == 0x%08x\n", trace 504 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("\t[0x%02x] (0x%04x[0x%02x] & 0x%02x) == 0x%02x\n", trace 598 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("RESERVED 0x%02x\t", opcode); trace 612 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("DONE\n"); trace 632 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("IO_RESTRICT_PROG\tR[0x%06x] = " trace 642 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("\t0x%08x *\n", data); trace 645 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("\t0x%08x\n", data); trace 650 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("}]\n"); trace 664 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("REPEAT\t0x%02x\n", count); trace 673 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("REPEAT\t0x%02x\n", count); trace 696 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("IO_RESTRICT_PLL\tR[0x%06x] =PLL= " trace 706 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("\t%dkHz *\n", freq); trace 711 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("\t%dkHz\n", freq); trace 716 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("}]\n"); trace 726 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("END_REPEAT\n"); trace 751 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("COPY\t0x%04x[0x%02x] &= 0x%02x |= " trace 769 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("NOT\n"); trace 784 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("IO_FLAG_CONDITION\t0x%02x\n", cond); trace 805 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("GENERIC_CONDITION\t0x%02x 0x%02x\n", cond, size); trace 855 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("IO_MASK_OR\t0x03d4[0x%02x] &= ~(1 << 0x%02x)\n", index, or); trace 874 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("IO_OR\t0x03d4[0x%02x] |= (1 << 0x%02x)\n", index, or); trace 892 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("ANDN_REG\tR[0x%06x] &= ~0x%08x\n", reg, mask); trace 909 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("OR_REG\tR[0x%06x] |= 0x%08x\n", reg, mask); trace 929 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("INDEX_ADDRESS_LATCHED\tR[0x%06x] : R[0x%06x]\n", creg, dreg); trace 930 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("\tCTRL &= 0x%08x |= 0x%08x\n", mask, data); trace 937 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("\t[0x%02x] = 0x%02x\n", iaddr, idata); trace 961 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("IO_RESTRICT_PLL2\t" trace 970 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("\t%dkHz *\n", freq); trace 973 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("\t%dkHz\n", freq); trace 977 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("}]\n"); trace 991 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("PLL2\tR[0x%06x] =PLL= %dkHz\n", reg, freq); trace 1009 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("I2C_BYTE\tI2C[0x%02x][0x%02x]\n", index, addr); trace 1018 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("\t[0x%02x] &= 0x%02x |= 0x%02x\n", reg, mask, data); trace 1040 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("ZM_I2C_BYTE\tI2C[0x%02x][0x%02x]\n", index, addr); trace 1047 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("\t[0x%02x] = 0x%02x\n", reg, data); trace 1067 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("ZM_I2C\tI2C[0x%02x][0x%02x]\n", index, addr); trace 1072 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("\t0x%02x\n", data[i]); trace 1102 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("TMDS\tT[0x%02x][0x%02x] &= 0x%02x |= 0x%02x\n", trace 1126 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("TMDS_ZM_GROUP\tT[0x%02x]\n", tmds); trace 1133 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("\t[0x%02x] = 0x%02x\n", addr, data); trace 1155 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("CR_INDEX_ADDR C[%02x] C[%02x]\n", addr0, addr1); trace 1162 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("\t\t[0x%02x] = 0x%02x\n", base, data); trace 1184 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("CR\t\tC[0x%02x] &= 0x%02x |= 0x%02x\n", addr, mask, data); trace 1202 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("ZM_CR\tC[0x%02x] = 0x%02x\n", addr, data); trace 1218 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("ZM_CR_GROUP\n"); trace 1225 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("\t\tC[0x%02x] = 0x%02x\n", addr, data); trace 1244 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("CONDITION_TIME\t0x%02x 0x%02x\n", cond, retry); trace 1269 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("LTIME\t0x%04x\n", msec); trace 1287 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("ZM_REG_SEQUENCE\t0x%02x\n", count); trace 1293 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("\t\tR[0x%06x] = 0x%08x\n", base, data); trace 1313 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("PLL_INDIRECT\tR[0x%06x] =PLL= VBIOS[%04x] = %dkHz\n", trace 1332 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("ZM_REG_INDIRECT\tR[0x%06x] = VBIOS[0x%04x] = 0x%08x\n", trace 1350 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("SUB_DIRECT\t0x%04x\n", addr); trace 1375 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("JUMP\t0x%04x\n", offset); trace 1398 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("I2C_IF\tI2C[0x%02x][0x%02x][0x%02x] & 0x%02x == 0x%02x\n", trace 1426 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("COPY_NV_REG\tR[0x%06x] &= 0x%08x |= " trace 1448 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("ZM_INDEX_IO\tI[0x%04x][0x%02x] = 0x%02x\n", port, index, data); trace 1463 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("COMPUTE_MEM\n"); trace 1485 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("RESET\tR[0x%08x] = 0x%08x, 0x%08x", reg, data1, data2); trace 1519 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("CONFIGURE_MEM\n"); trace 1568 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("CONFIGURE_CLK\n"); trace 1602 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("CONFIGURE_PREINIT\n"); trace 1631 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("IO\t\tI[0x%04x] &= 0x%02x |= 0x%02x\n", port, mask, data); trace 1669 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("SUB\t0x%02x\n", index); trace 1696 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("RAM_CONDITION\t" trace 1716 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("NV_REG\tR[0x%06x] &= 0x%08x |= 0x%08x\n", reg, mask, data); trace 1733 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("MACRO\t0x%02x\n", macro); trace 1739 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("\t\tR[0x%06x] = 0x%08x\n", addr, data); trace 1753 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("RESUME\n"); trace 1769 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("STRAP_CONDITION\t(R[0x101000] & 0x%08x) == 0x%08x\n", mask, value); trace 1786 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("TIME\t0x%04x\n", usec); trace 1807 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("CONDITION\t0x%02x\n", cond); trace 1824 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("IO_CONDITION\t0x%02x\n", cond); trace 1842 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("ZM_REG\tR[0x%06x] = 0x%04x\n", addr, data); trace 1862 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("INDEX_IO\tI[0x%04x][0x%02x] &= 0x%02x |= 0x%02x\n", trace 1881 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("PLL\tR[0x%06x] =PLL= %dkHz\n", reg, freq); trace 1898 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("ZM_REG\tR[0x%06x] = 0x%08x\n", addr, data); trace 1920 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("RAM_RESTRICT_PLL\t0x%02x\n", type); trace 1927 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("%dkHz *\n", freq); trace 1930 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("%dkHz\n", freq); trace 1944 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("RESET_BEGUN\n"); trace 1955 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("RESET_END\n"); trace 1968 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("GPIO\n"); trace 1990 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("RAM_RESTRICT_ZM_REG_GROUP\t" trace 1995 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("\tR[0x%06x] = {\n", addr); trace 2000 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("\t\t0x%08x *\n", data); trace 2003 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("\t\t0x%08x\n", data); trace 2008 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("\t}\n"); trace 2024 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("COPY_ZM_REG\tR[0x%06x] = R[0x%06x]\n", dreg, sreg); trace 2041 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("ZM_REG_GROUP\tR[0x%06x] =\n", addr); trace 2046 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("\t0x%08x\n", data); trace 2069 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("INIT_XLAT\tR[0x%06x] &= 0x%08x |= " trace 2093 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("ZM_MASK_ADD\tR[0x%06x] &= 0x%08x += 0x%08x\n", addr, mask, add); trace 2112 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("AUXCH\tAUX[0x%08x] 0x%02x\n", addr, count); trace 2118 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("\tAUX[0x%08x] &= 0x%02x |= 0x%02x\n", addr, mask, data); trace 2136 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("ZM_AUXCH\tAUX[0x%08x] 0x%02x\n", addr, count); trace 2141 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("\tAUX[0x%08x] = 0x%02x\n", addr, data); trace 2163 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("I2C_LONG_IF\t" trace 2200 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("GPIO_NE\t"); trace 2214 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c trace("\tFUNC[0x%02x]", func.func); trace 38 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h #define AUX_TRACE(b,f,a...) AUX_MSG((b), trace, f, ##a) trace 38 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h #define BUS_TRACE(b,f,a...) BUS_MSG((b), trace, f, ##a) trace 67 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h #define PAD_TRACE(p,f,a...) PAD_MSG((p), trace, f, ##a) trace 67 drivers/gpu/drm/tegra/trace.h #define TRACE_INCLUDE_FILE trace trace 590 drivers/i2c/i2c-core-smbus.c goto trace; trace 600 drivers/i2c/i2c-core-smbus.c trace: trace 144 drivers/lightnvm/pblk-trace.h #define TRACE_INCLUDE_FILE pblk-trace trace 453 drivers/media/pci/ivtv/ivtvfb.c u32 trace; trace 459 drivers/media/pci/ivtv/ivtvfb.c trace = read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16; trace 460 drivers/media/pci/ivtv/ivtvfb.c if (itv->is_out_50hz && trace > 312) trace 461 drivers/media/pci/ivtv/ivtvfb.c trace -= 312; trace 462 drivers/media/pci/ivtv/ivtvfb.c else if (itv->is_out_60hz && trace > 262) trace 463 drivers/media/pci/ivtv/ivtvfb.c trace -= 262; trace 464 drivers/media/pci/ivtv/ivtvfb.c if (trace == 1) trace 467 drivers/media/pci/ivtv/ivtvfb.c vblank.vcount = trace; trace 162 drivers/media/platform/coda/trace.h #define TRACE_INCLUDE_FILE trace trace 1202 drivers/media/usb/pwc/pwc-if.c module_param_named(trace, pwc_trace, int, 0644); trace 1208 drivers/media/usb/pwc/pwc-if.c MODULE_PARM_DESC(trace, "For debugging purposes"); trace 2388 drivers/media/usb/uvc/uvc_driver.c module_param_named(trace, uvc_trace_param, uint, S_IRUGO|S_IWUSR); trace 2389 drivers/media/usb/uvc/uvc_driver.c MODULE_PARM_DESC(trace, "Trace level bitmask"); trace 682 drivers/misc/cxl/cxl.h struct dentry *trace; trace 690 drivers/misc/cxl/trace.h #define TRACE_INCLUDE_FILE trace trace 902 drivers/misc/genwqe/card_utils.c int i, traps, traces, trace, trace_entries, trace_entry, ring; trace 959 drivers/misc/genwqe/card_utils.c for (trace = 0; trace <= traces; trace++) { trace 961 drivers/misc/genwqe/card_utils.c GENWQE_EXTENDED_DIAG_SELECTOR(ring, trace); trace 968 drivers/misc/genwqe/card_utils.c trace_entry < (trace ? trace_entries : traps); trace 82 drivers/misc/mei/mei-trace.h #define TRACE_INCLUDE_FILE mei-trace trace 177 drivers/misc/ocxl/trace.h #define TRACE_INCLUDE_FILE trace trace 392 drivers/net/ethernet/alteon/acenic.c static int trace[ACE_MAX_MOD_PARMS]; trace 409 drivers/net/ethernet/alteon/acenic.c module_param_array(trace, int, NULL, 0); trace 416 drivers/net/ethernet/alteon/acenic.c MODULE_PARM_DESC(trace, "AceNIC/3C985/NetGear firmware trace level"); trace 1358 drivers/net/ethernet/alteon/acenic.c if (trace[board_idx]) trace 1359 drivers/net/ethernet/alteon/acenic.c writel(trace[board_idx], ®s->TuneTrace); trace 2621 drivers/net/ethernet/alteon/acenic.c ecmd->trace = readl(®s->TuneTrace); trace 157 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h #define TRACE_INCLUDE_FILE dpaa2-eth-trace trace 454 drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c struct tracer_event *tracer_event, u64 *trace) trace 458 drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c tracer_event->event_id = MLX5_GET(tracer_event, trace, event_id); trace 459 drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c tracer_event->lost_event = MLX5_GET(tracer_event, trace, lost); trace 464 drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c urts = MLX5_GET(tracer_timestamp_event, trace, urts); trace 471 drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c trace, timestamp7_0); trace 473 drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c trace, timestamp39_8); trace 475 drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c trace, timestamp52_40); trace 488 drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c MLX5_GET(tracer_string_event, trace, timestamp); trace 490 drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c MLX5_GET(tracer_string_event, trace, string_param); trace 492 drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c MLX5_GET(tracer_string_event, trace, tmsn); trace 494 drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c MLX5_GET(tracer_string_event, trace, tdsn); trace 323 drivers/net/ethernet/neterion/vxge/vxge-main.h #define VXGE_COPY_DEBUG_INFO_TO_LL(vdev, err, trace) { \ trace 326 drivers/net/ethernet/neterion/vxge/vxge-main.h vdev->vpaths[i].level_trace = trace; \ trace 329 drivers/net/ethernet/neterion/vxge/vxge-main.h vdev->level_trace = trace; \ trace 6860 drivers/net/ethernet/qlogic/qed/qed_debug.c struct mcp_trace *trace; trace 6890 drivers/net/ethernet/qlogic/qed/qed_debug.c trace = (struct mcp_trace *)dump_buf; trace 6891 drivers/net/ethernet/qlogic/qed/qed_debug.c if (trace->signature != MFW_TRACE_SIGNATURE || !trace->size) trace 6894 drivers/net/ethernet/qlogic/qed/qed_debug.c trace_buf = (u8 *)dump_buf + sizeof(*trace); trace 6895 drivers/net/ethernet/qlogic/qed/qed_debug.c offset = trace->trace_oldest; trace 6896 drivers/net/ethernet/qlogic/qed/qed_debug.c data_size = qed_cyclic_sub(trace->trace_prod, offset, trace->size); trace 6932 drivers/net/ethernet/qlogic/qed/qed_debug.c trace->size, trace 13368 drivers/net/ethernet/qlogic/qed/qed_hsi.h struct mcp_trace trace; trace 13369 drivers/net/ethernet/qlogic/qed/qed_hsi.h #define MCP_TRACE_P ((struct mcp_trace *)(STRUCT_OFFSET(trace))) trace 274 drivers/net/fjes/fjes_ethtool.c if (hw->hw_info.trace) trace 275 drivers/net/fjes/fjes_ethtool.c memcpy(buf, hw->hw_info.trace, hw->hw_info.trace_size); trace 331 drivers/net/fjes/fjes_hw.c hw->hw_info.trace = vzalloc(FJES_DEBUG_BUFFER_SIZE); trace 349 drivers/net/fjes/fjes_hw.c vfree(hw->hw_info.trace); trace 350 drivers/net/fjes/fjes_hw.c hw->hw_info.trace = NULL; trace 1189 drivers/net/fjes/fjes_hw.c if (!hw->hw_info.trace) trace 1191 drivers/net/fjes/fjes_hw.c memset(hw->hw_info.trace, 0, FJES_DEBUG_BUFFER_SIZE); trace 1202 drivers/net/fjes/fjes_hw.c addr = ((u8 *)hw->hw_info.trace) + i * FJES_DEBUG_PAGE_SIZE; trace 1255 drivers/net/fjes/fjes_hw.c if (!hw->hw_info.trace) trace 287 drivers/net/fjes/fjes_hw.h struct es_device_trace *trace; trace 790 drivers/net/hippi/rrunner.h u8 trace[3072]; trace 534 drivers/net/wireless/ath/ath10k/trace.h #define TRACE_INCLUDE_FILE trace trace 103 drivers/net/wireless/ath/ath5k/trace.h #define TRACE_INCLUDE_FILE trace trace 330 drivers/net/wireless/ath/ath6kl/trace.h #define TRACE_INCLUDE_FILE trace trace 68 drivers/net/wireless/ath/trace.h #define TRACE_INCLUDE_FILE trace trace 296 drivers/net/wireless/ath/wil6210/trace.h #define TRACE_INCLUDE_FILE trace trace 1860 drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c int trace; trace 1866 drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c if (sscanf(buf, "%d", &trace) != 1) trace 1869 drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c if (trace) { trace 59 drivers/net/wireless/mediatek/mt76/trace.h #define TRACE_INCLUDE_FILE trace trace 390 drivers/net/wireless/mediatek/mt7601u/trace.h #define TRACE_INCLUDE_FILE trace trace 172 drivers/nvme/host/trace.h #define TRACE_INCLUDE_FILE trace trace 138 drivers/nvme/target/trace.h #define TRACE_INCLUDE_FILE trace trace 1523 drivers/pinctrl/tegra/pinctrl-tegra114.c FUNCTION(trace), trace 1688 drivers/pinctrl/tegra/pinctrl-tegra124.c FUNCTION(trace), trace 1952 drivers/pinctrl/tegra/pinctrl-tegra20.c FUNCTION(trace), trace 2083 drivers/pinctrl/tegra/pinctrl-tegra30.c FUNCTION(trace), trace 606 drivers/remoteproc/remoteproc_core.c struct rproc_debug_trace *trace; trace 621 drivers/remoteproc/remoteproc_core.c trace = kzalloc(sizeof(*trace), GFP_KERNEL); trace 622 drivers/remoteproc/remoteproc_core.c if (!trace) trace 626 drivers/remoteproc/remoteproc_core.c trace->trace_mem.len = rsc->len; trace 627 drivers/remoteproc/remoteproc_core.c trace->trace_mem.da = rsc->da; trace 630 drivers/remoteproc/remoteproc_core.c trace->rproc = rproc; trace 636 drivers/remoteproc/remoteproc_core.c trace->tfile = rproc_create_trace_file(name, rproc, trace); trace 637 drivers/remoteproc/remoteproc_core.c if (!trace->tfile) { trace 638 drivers/remoteproc/remoteproc_core.c kfree(trace); trace 642 drivers/remoteproc/remoteproc_core.c list_add_tail(&trace->node, &rproc->traces); trace 1255 drivers/remoteproc/remoteproc_core.c struct rproc_debug_trace *trace, *ttmp; trace 1260 drivers/remoteproc/remoteproc_core.c list_for_each_entry_safe(trace, ttmp, &rproc->traces, node) { trace 1261 drivers/remoteproc/remoteproc_core.c rproc_remove_trace_file(trace->tfile); trace 1263 drivers/remoteproc/remoteproc_core.c list_del(&trace->node); trace 1264 drivers/remoteproc/remoteproc_core.c kfree(trace); trace 43 drivers/remoteproc/remoteproc_debugfs.c struct rproc_mem_entry *trace = &data->trace_mem; trace 48 drivers/remoteproc/remoteproc_debugfs.c va = rproc_da_to_va(data->rproc, trace->da, trace->len); trace 52 drivers/remoteproc/remoteproc_debugfs.c trace->name); trace 55 drivers/remoteproc/remoteproc_debugfs.c len = strnlen(va, trace->len); trace 320 drivers/remoteproc/remoteproc_debugfs.c struct rproc_debug_trace *trace) trace 324 drivers/remoteproc/remoteproc_debugfs.c tfile = debugfs_create_file(name, 0400, rproc->dbg_dir, trace, trace 39 drivers/remoteproc/remoteproc_internal.h struct rproc_debug_trace *trace); trace 364 drivers/remoteproc/stm32_rproc.c struct rproc_debug_trace *trace; trace 368 drivers/remoteproc/stm32_rproc.c list_for_each_entry(trace, &rproc->traces, node) { trace 372 drivers/remoteproc/stm32_rproc.c if (segment->da == trace->trace_mem.da) { trace 379 drivers/remoteproc/stm32_rproc.c rproc_coredump_add_segment(rproc, trace->trace_mem.da, trace 380 drivers/remoteproc/stm32_rproc.c trace->trace_mem.len); trace 401 drivers/s390/cio/trace.h #define TRACE_INCLUDE_FILE trace trace 396 drivers/s390/net/lcs.c LCS_DBF_TEXT_(4, trace, " %02x%02x%02x", trace 480 drivers/s390/net/lcs.c LCS_DBF_TEXT_(4, trace,"ssch%s", dev_name(&channel->ccwdev->dev)); trace 489 drivers/s390/net/lcs.c LCS_DBF_TEXT_(4,trace,"essh%s", trace 504 drivers/s390/net/lcs.c LCS_DBF_TEXT(4,trace,"clearch"); trace 505 drivers/s390/net/lcs.c LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev)); trace 510 drivers/s390/net/lcs.c LCS_DBF_TEXT_(4, trace, "ecsc%s", trace 531 drivers/s390/net/lcs.c LCS_DBF_TEXT(4,trace,"haltsch"); trace 532 drivers/s390/net/lcs.c LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev)); trace 538 drivers/s390/net/lcs.c LCS_DBF_TEXT_(4, trace, "ehsc%s", trace 556 drivers/s390/net/lcs.c LCS_DBF_TEXT(2, trace, "chstart"); trace 574 drivers/s390/net/lcs.c LCS_DBF_TEXT(2, trace, "chhalt"); trace 588 drivers/s390/net/lcs.c LCS_DBF_TEXT(5, trace, "_getbuff"); trace 606 drivers/s390/net/lcs.c LCS_DBF_TEXT(5, trace, "getbuff"); trace 625 drivers/s390/net/lcs.c LCS_DBF_TEXT_(5, trace, "rsch%s", dev_name(&channel->ccwdev->dev)); trace 628 drivers/s390/net/lcs.c LCS_DBF_TEXT_(4, trace, "ersc%s", trace 646 drivers/s390/net/lcs.c LCS_DBF_TEXT(5, trace, "rdybits"); trace 666 drivers/s390/net/lcs.c LCS_DBF_TEXT(5, trace, "rdybuff"); trace 691 drivers/s390/net/lcs.c LCS_DBF_TEXT(5, trace, "prcsbuff"); trace 723 drivers/s390/net/lcs.c LCS_DBF_TEXT(5, trace, "relbuff"); trace 740 drivers/s390/net/lcs.c LCS_DBF_TEXT(4, trace, "getlncmd"); trace 774 drivers/s390/net/lcs.c LCS_DBF_TEXT(4, trace, "getreply"); trace 797 drivers/s390/net/lcs.c LCS_DBF_TEXT(4, trace, "notiwait"); trace 826 drivers/s390/net/lcs.c LCS_DBF_TEXT(4, trace, "timeout"); trace 853 drivers/s390/net/lcs.c LCS_DBF_TEXT(4, trace, "sendcmd"); trace 874 drivers/s390/net/lcs.c LCS_DBF_TEXT_(4, trace, "rc:%d",reply->rc); trace 889 drivers/s390/net/lcs.c LCS_DBF_TEXT(2, trace, "startup"); trace 907 drivers/s390/net/lcs.c LCS_DBF_TEXT(2, trace, "shutdown"); trace 921 drivers/s390/net/lcs.c LCS_DBF_TEXT(2, trace, "statcb"); trace 931 drivers/s390/net/lcs.c LCS_DBF_TEXT(2,trace, "cmdstat"); trace 951 drivers/s390/net/lcs.c LCS_DBF_TEXT(2, trace, "cmdstpln"); trace 967 drivers/s390/net/lcs.c LCS_DBF_TEXT(2, trace, "srtlancb"); trace 978 drivers/s390/net/lcs.c LCS_DBF_TEXT(2, trace, "cmdstaln"); trace 998 drivers/s390/net/lcs.c LCS_DBF_TEXT(2, trace, "cmdsetim"); trace 1009 drivers/s390/net/lcs.c LCS_DBF_TEXT_(2, trace, "%x",ipm_list->ipm.ip_addr); trace 1022 drivers/s390/net/lcs.c LCS_DBF_TEXT(2, trace, "cmddelim"); trace 1033 drivers/s390/net/lcs.c LCS_DBF_TEXT_(2, trace, "%x",ipm_list->ipm.ip_addr); trace 1043 drivers/s390/net/lcs.c LCS_DBF_TEXT(2, trace, "chkmccb"); trace 1057 drivers/s390/net/lcs.c LCS_DBF_TEXT(2, trace, "cmdqipa"); trace 1088 drivers/s390/net/lcs.c LCS_DBF_TEXT(4,trace, "fixipm"); trace 1138 drivers/s390/net/lcs.c LCS_DBF_TEXT(4,trace, "getmac"); trace 1154 drivers/s390/net/lcs.c LCS_DBF_TEXT(4, trace, "remmclst"); trace 1180 drivers/s390/net/lcs.c LCS_DBF_TEXT(4, trace, "chkmcent"); trace 1204 drivers/s390/net/lcs.c LCS_DBF_TEXT(4, trace, "setmclst"); trace 1221 drivers/s390/net/lcs.c LCS_DBF_HEX(2,trace,&ipm->ipm.ip_addr,4); trace 1237 drivers/s390/net/lcs.c LCS_DBF_TEXT(4, trace, "regmulti"); trace 1273 drivers/s390/net/lcs.c LCS_DBF_TEXT(4, trace, "setmulti"); trace 1291 drivers/s390/net/lcs.c LCS_DBF_TEXT(2, trace, "ckirberr"); trace 1292 drivers/s390/net/lcs.c LCS_DBF_TEXT_(2, trace, " rc%d", -EIO); trace 1297 drivers/s390/net/lcs.c LCS_DBF_TEXT(2, trace, "ckirberr"); trace 1298 drivers/s390/net/lcs.c LCS_DBF_TEXT_(2, trace, " rc%d", -ETIMEDOUT); trace 1304 drivers/s390/net/lcs.c LCS_DBF_TEXT(2, trace, "ckirberr"); trace 1305 drivers/s390/net/lcs.c LCS_DBF_TEXT(2, trace, " rc???"); trace 1323 drivers/s390/net/lcs.c LCS_DBF_TEXT(2, trace, "CGENCHK"); trace 1329 drivers/s390/net/lcs.c LCS_DBF_TEXT(2, trace, "REVIND"); trace 1334 drivers/s390/net/lcs.c LCS_DBF_TEXT(2, trace, "CMDREJ"); trace 1341 drivers/s390/net/lcs.c LCS_DBF_TEXT(2, trace, "ZEROSEN"); trace 1344 drivers/s390/net/lcs.c LCS_DBF_TEXT(2, trace, "DGENCHK"); trace 1353 drivers/s390/net/lcs.c LCS_DBF_TEXT(2, trace, "startrec"); trace 1380 drivers/s390/net/lcs.c LCS_DBF_TEXT_(5, trace, "Rint%s", dev_name(&cdev->dev)); trace 1381 drivers/s390/net/lcs.c LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.cstat, trace 1383 drivers/s390/net/lcs.c LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.fctl, trace 1454 drivers/s390/net/lcs.c LCS_DBF_TEXT_(5, trace, "tlet%s", dev_name(&channel->ccwdev->dev)); trace 1485 drivers/s390/net/lcs.c LCS_DBF_TEXT(5, trace, "emittx"); trace 1501 drivers/s390/net/lcs.c LCS_DBF_TEXT(5, trace, "txbuffcb"); trace 1528 drivers/s390/net/lcs.c LCS_DBF_TEXT(5, trace, "hardxmit"); trace 1590 drivers/s390/net/lcs.c LCS_DBF_TEXT(5, trace, "pktxmit"); trace 1604 drivers/s390/net/lcs.c LCS_DBF_TEXT(2, trace, "strtauto"); trace 1626 drivers/s390/net/lcs.c LCS_DBF_TEXT(2, trace, "startlan"); trace 1715 drivers/s390/net/lcs.c LCS_DBF_TEXT(5, trace, "krnthrd"); trace 1730 drivers/s390/net/lcs.c LCS_DBF_TEXT(5, trace, "getctrl"); trace 1744 drivers/s390/net/lcs.c LCS_DBF_TEXT(5, trace, "noLGWcmd"); trace 1759 drivers/s390/net/lcs.c LCS_DBF_TEXT(5, trace, "getskb"); trace 1792 drivers/s390/net/lcs.c LCS_DBF_TEXT(5, trace, "lcsgtpkt"); trace 1795 drivers/s390/net/lcs.c LCS_DBF_TEXT(4, trace, "-eiogpkt"); trace 1840 drivers/s390/net/lcs.c LCS_DBF_TEXT(4, trace, "netstats"); trace 1855 drivers/s390/net/lcs.c LCS_DBF_TEXT(2, trace, "stopdev"); trace 1879 drivers/s390/net/lcs.c LCS_DBF_TEXT(2, trace, "opendev"); trace 2255 drivers/s390/net/lcs.c LCS_DBF_TEXT(4, trace, "recover1"); trace 2258 drivers/s390/net/lcs.c LCS_DBF_TEXT(4, trace, "recover2"); trace 2458 drivers/s390/net/lcs.c LCS_DBF_TEXT(0, trace, "cleanup"); trace 677 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 4, __func__); trace 721 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 4, __func__); trace 804 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 3, __func__); trace 824 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 3, __func__); trace 834 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 3, __func__); trace 845 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 3, __func__); trace 857 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 3, __func__); trace 876 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 3, __func__); trace 959 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 3, __func__); trace 965 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n"); trace 1035 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 3, __func__); trace 1055 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 3, __func__); trace 1077 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 3, __func__); trace 1107 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 3, __func__); trace 1280 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 3, __func__); trace 1286 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 3, __func__); trace 1302 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 3, __func__); trace 1326 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 3, __func__); trace 1360 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 4, __func__); trace 1411 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 5, __func__); trace 1424 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 5, __func__); trace 1486 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 3, __func__); trace 1520 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 5, __func__); trace 1532 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 3, __func__); trace 1578 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 5, __func__); trace 1589 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 5, __func__); trace 1600 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 5, __func__); trace 1610 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 4, __func__); trace 1622 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 5, __func__); trace 1631 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 4, __func__); trace 1643 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 5, __func__); trace 1652 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 4, __func__); trace 1664 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 5, __func__); trace 1673 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 5, __func__); trace 1685 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 5, __func__); trace 1694 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 4, __func__); trace 1706 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 5, __func__); trace 1715 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 4, __func__); trace 1727 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 5, __func__); trace 1736 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 4, __func__); trace 1748 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 5, __func__); trace 1757 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 4, __func__); trace 1805 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 3, __func__); trace 1836 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 3, __func__); trace 1904 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 3, __func__); trace 1928 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 3, __func__); trace 2017 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 3, __func__); trace 2082 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 3, __func__); trace 2147 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 3, __func__); trace 2178 drivers/s390/net/netiucv.c IUCV_DBF_TEXT(trace, 3, __func__); trace 1020 drivers/scsi/esas2r/atioctl.h struct atto_hba_trace trace; trace 914 drivers/scsi/esas2r/esas2r_ioctl.c struct atto_hba_trace *trc = &hi->data.trace; trace 152 drivers/scsi/ibmvscsi/ibmvfc.c entry = &vhost->trace[vhost->trace_index++]; trace 184 drivers/scsi/ibmvscsi/ibmvfc.c struct ibmvfc_trace_entry *entry = &vhost->trace[vhost->trace_index++]; trace 3038 drivers/scsi/ibmvscsi/ibmvfc.c char *src = (char *)vhost->trace; trace 4580 drivers/scsi/ibmvscsi/ibmvfc.c kfree(vhost->trace); trace 4647 drivers/scsi/ibmvscsi/ibmvfc.c vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES, trace 4650 drivers/scsi/ibmvscsi/ibmvfc.c if (!vhost->trace) trace 4665 drivers/scsi/ibmvscsi/ibmvfc.c kfree(vhost->trace); trace 686 drivers/scsi/ibmvscsi/ibmvfc.h struct ibmvfc_trace_entry *trace; trace 594 drivers/scsi/ipr.c trace_entry = &ioa_cfg->trace[trace_index]; trace 3104 drivers/scsi/ipr.c memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE); trace 3467 drivers/scsi/ipr.c ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace, trace 9572 drivers/scsi/ipr.c kfree(ioa_cfg->trace); trace 9808 drivers/scsi/ipr.c ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES, trace 9812 drivers/scsi/ipr.c if (!ioa_cfg->trace) trace 1511 drivers/scsi/ipr.h struct ipr_trace_entry *trace; trace 1691 drivers/scsi/ipr.h u32 trace[IPR_TRACE_SIZE / sizeof(u32)]; trace 4533 drivers/scsi/scsi_transport_iscsi.c void iscsi_dbg_trace(void (*trace)(struct device *dev, struct va_format *), trace 4542 drivers/scsi/scsi_transport_iscsi.c trace(dev, &vaf); trace 80 drivers/soc/qcom/trace-rpmh.h #define TRACE_INCLUDE_FILE trace-rpmh trace 94 drivers/staging/media/tegra-vde/trace.h #define TRACE_INCLUDE_FILE trace trace 120 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c int trace; trace 2943 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c return instance->trace; trace 2947 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c vchiq_instance_set_trace(VCHIQ_INSTANCE_T instance, int trace) trace 2955 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c service->trace = trace; trace 2958 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c instance->trace = (trace != 0); trace 177 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h vchiq_instance_set_trace(VCHIQ_INSTANCE_T instance, int trace); trace 23 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c (((srv) && (srv)->trace) ? VCHIQ_LOG_TRACE : vchiq_core_msg_log_level) trace 25 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c (((srv) && (srv)->trace) || (vchiq_core_msg_log_level >= (lev))) trace 2314 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c service->trace = 0; trace 3291 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c service->trace = value; trace 264 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h char trace; trace 133 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c int trace; trace 135 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c trace = vchiq_instance_get_trace(instance); trace 136 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c seq_printf(f, "%s\n", trace ? "Y" : "N"); trace 139 drivers/tty/serial/icom.c trace(icom_port, "RET_PORT_MEM", 0); trace 178 drivers/tty/serial/icom.c trace(icom_port, "GET_PORT_MEM", trace 188 drivers/tty/serial/icom.c trace(icom_port, "GET_PORT_MEM", trace 198 drivers/tty/serial/icom.c trace(icom_port, "GET_PORT_MEM", trace 216 drivers/tty/serial/icom.c trace(icom_port, "FOD_ADDR", stgAddr); trace 222 drivers/tty/serial/icom.c trace(icom_port, "FOD_ADDR", stgAddr); trace 223 drivers/tty/serial/icom.c trace(icom_port, "FOD_XBUFF", trace 231 drivers/tty/serial/icom.c trace(icom_port, "FOD_XBUFF", trace 244 drivers/tty/serial/icom.c trace(icom_port, "FID_ADDR", stgAddr); trace 253 drivers/tty/serial/icom.c trace(icom_port, "FID_RBUFF", trace 261 drivers/tty/serial/icom.c trace(icom_port, "FID_RBUFF", trace 351 drivers/tty/serial/icom.c trace(icom_port, "CLEAR_INTERRUPTS", 0); trace 507 drivers/tty/serial/icom.c trace(icom_port, "STARTUP", 0); trace 520 drivers/tty/serial/icom.c trace(icom_port, "CABLE_ID", raw_cable_id); trace 580 drivers/tty/serial/icom.c trace(icom_port, "SHUTDOWN", 0); trace 621 drivers/tty/serial/icom.c trace(ICOM_PORT, "WRITE", 0); trace 625 drivers/tty/serial/icom.c trace(ICOM_PORT, "WRITE_FULL", 0); trace 654 drivers/tty/serial/icom.c trace(ICOM_PORT, "WRITE_START", data_count); trace 672 drivers/tty/serial/icom.c trace(icom_port, "CHECK_MODEM", status); trace 699 drivers/tty/serial/icom.c trace(icom_port, "XMIT_COMPLETE", 0); trace 721 drivers/tty/serial/icom.c trace(icom_port, "XMIT_DISABLED", 0); trace 733 drivers/tty/serial/icom.c trace(icom_port, "RCV_COMPLETE", 0); trace 740 drivers/tty/serial/icom.c trace(icom_port, "FID_STATUS", status); trace 743 drivers/tty/serial/icom.c trace(icom_port, "RCV_COUNT", count); trace 745 drivers/tty/serial/icom.c trace(icom_port, "REAL_COUNT", count); trace 765 drivers/tty/serial/icom.c trace(icom_port, "BREAK_DET", 0); trace 789 drivers/tty/serial/icom.c trace(icom_port, "IGNORE_CHAR", 0); trace 798 drivers/tty/serial/icom.c trace(icom_port, "PARITY_ERROR", 0); trace 838 drivers/tty/serial/icom.c trace(icom_port, "INTERRUPT", port_int_reg); trace 944 drivers/tty/serial/icom.c trace(ICOM_PORT, "SET_MODEM", 0); trace 948 drivers/tty/serial/icom.c trace(ICOM_PORT, "RAISE_RTS", 0); trace 951 drivers/tty/serial/icom.c trace(ICOM_PORT, "LOWER_RTS", 0); trace 956 drivers/tty/serial/icom.c trace(ICOM_PORT, "RAISE_DTR", 0); trace 959 drivers/tty/serial/icom.c trace(ICOM_PORT, "LOWER_DTR", 0); trace 971 drivers/tty/serial/icom.c trace(ICOM_PORT, "GET_MODEM", 0); trace 986 drivers/tty/serial/icom.c trace(ICOM_PORT, "STOP", 0); trace 995 drivers/tty/serial/icom.c trace(ICOM_PORT, "START", 0); trace 1010 drivers/tty/serial/icom.c trace(ICOM_PORT, "SEND_XCHAR", ch); trace 1017 drivers/tty/serial/icom.c trace(ICOM_PORT, "QUICK_WRITE", 0); trace 1044 drivers/tty/serial/icom.c trace(ICOM_PORT, "BREAK", 0); trace 1063 drivers/tty/serial/icom.c trace(ICOM_PORT, "STARTUP_ERROR", 0); trace 1074 drivers/tty/serial/icom.c trace(ICOM_PORT, "CLOSE", 0); trace 1100 drivers/tty/serial/icom.c trace(ICOM_PORT, "CHANGE_SPEED", 0); trace 1131 drivers/tty/serial/icom.c trace(ICOM_PORT, "PARENB", 0); trace 1136 drivers/tty/serial/icom.c trace(ICOM_PORT, "PARODD", 0); trace 1248 drivers/tty/serial/icom.c trace(ICOM_PORT, "XR_ENAB", 0); trace 491 drivers/usb/cdns3/trace.h #define TRACE_INCLUDE_FILE trace trace 341 drivers/usb/dwc3/trace.h #define TRACE_INCLUDE_FILE trace trace 34 drivers/usb/gadget/function/f_uvc.c module_param_named(trace, uvc_gadget_trace_param, uint, 0644); trace 35 drivers/usb/gadget/function/f_uvc.c MODULE_PARM_DESC(trace, "Trace level bitmask"); trace 287 drivers/usb/gadget/udc/trace.h #define TRACE_INCLUDE_FILE trace trace 121 drivers/usb/host/imx21-dbg.c struct debug_isoc_trace *trace = &imx21->isoc_trace[ trace 125 drivers/usb/host/imx21-dbg.c trace->schedule_frame = td->frame; trace 126 drivers/usb/host/imx21-dbg.c trace->submit_frame = frame; trace 127 drivers/usb/host/imx21-dbg.c trace->request_len = td->len; trace 128 drivers/usb/host/imx21-dbg.c trace->td = td; trace 134 drivers/usb/host/imx21-dbg.c struct debug_isoc_trace *trace, *trace_failed; trace 138 drivers/usb/host/imx21-dbg.c trace = imx21->isoc_trace; trace 139 drivers/usb/host/imx21-dbg.c for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace); i++, trace++) { trace 140 drivers/usb/host/imx21-dbg.c if (trace->td == td) { trace 141 drivers/usb/host/imx21-dbg.c trace->done_frame = frame; trace 142 drivers/usb/host/imx21-dbg.c trace->done_len = len; trace 143 drivers/usb/host/imx21-dbg.c trace->cc = cc; trace 144 drivers/usb/host/imx21-dbg.c trace->td = NULL; trace 156 drivers/usb/host/imx21-dbg.c *trace_failed = *trace; trace 377 drivers/usb/host/imx21-dbg.c const char *name, int index, struct debug_isoc_trace *trace) trace 387 drivers/usb/host/imx21-dbg.c trace->cc, trace 388 drivers/usb/host/imx21-dbg.c trace->schedule_frame, trace->schedule_frame & 0xFFFF, trace 389 drivers/usb/host/imx21-dbg.c trace->submit_frame, trace->submit_frame & 0xFFFF, trace 390 drivers/usb/host/imx21-dbg.c trace->done_frame, trace->done_frame & 0xFFFF, trace 391 drivers/usb/host/imx21-dbg.c trace->request_len, trace 392 drivers/usb/host/imx21-dbg.c trace->done_len); trace 398 drivers/usb/host/imx21-dbg.c struct debug_isoc_trace *trace; trace 404 drivers/usb/host/imx21-dbg.c trace = imx21->isoc_trace_failed; trace 405 drivers/usb/host/imx21-dbg.c for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace_failed); i++, trace++) trace 406 drivers/usb/host/imx21-dbg.c debug_isoc_show_one(s, "isoc failed", i, trace); trace 408 drivers/usb/host/imx21-dbg.c trace = imx21->isoc_trace; trace 409 drivers/usb/host/imx21-dbg.c for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace); i++, trace++) trace 410 drivers/usb/host/imx21-dbg.c debug_isoc_show_one(s, "isoc", i, trace); trace 22 drivers/usb/host/xhci-dbg.c void xhci_dbg_trace(struct xhci_hcd *xhci, void (*trace)(struct va_format *), trace 32 drivers/usb/host/xhci-dbg.c trace(&vaf); trace 603 drivers/usb/host/xhci-trace.h #define TRACE_INCLUDE_FILE xhci-trace trace 1974 drivers/usb/host/xhci.h void xhci_dbg_trace(struct xhci_hcd *xhci, void (*trace)(struct va_format *), trace 171 drivers/usb/typec/ucsi/trace.h #define TRACE_INCLUDE_FILE trace trace 95 drivers/vfio/pci/trace.h #define TRACE_INCLUDE_FILE trace trace 53 fs/btrfs/ref-verify.c unsigned long trace[MAX_TRACE]; trace 208 fs/btrfs/ref-verify.c ra->trace_len = stack_trace_save(ra->trace, MAX_TRACE, 2); trace 218 fs/btrfs/ref-verify.c stack_trace_print(ra->trace, ra->trace_len, 2); trace 883 fs/cifs/trace.h #define TRACE_INCLUDE_FILE trace trace 1562 fs/locks.c goto trace; trace 1566 fs/locks.c goto trace; trace 1570 fs/locks.c trace: trace 311 fs/nfsd/trace.h #define TRACE_INCLUDE_FILE trace trace 913 fs/xfs/scrub/trace.h #define TRACE_INCLUDE_FILE scrub/trace trace 744 include/linux/ftrace.h extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace); trace 860 include/linux/ftrace.h set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); trace 865 include/linux/ftrace.h clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); trace 870 include/linux/ftrace.h return tsk->trace & TSK_TRACE_FL_TRACE; trace 875 include/linux/ftrace.h set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); trace 880 include/linux/ftrace.h clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); trace 885 include/linux/ftrace.h return tsk->trace & TSK_TRACE_FL_GRAPH; trace 193 include/linux/lockdep.h const struct lock_trace *trace; trace 1201 include/linux/sched.h unsigned long trace; trace 12 include/linux/stacktrace.h void stack_trace_print(const unsigned long *trace, unsigned int nr_entries, trace 70 include/linux/stacktrace.h extern void save_stack_trace(struct stack_trace *trace); trace 72 include/linux/stacktrace.h struct stack_trace *trace); trace 74 include/linux/stacktrace.h struct stack_trace *trace); trace 76 include/linux/stacktrace.h struct stack_trace *trace); trace 77 include/linux/stacktrace.h extern void save_stack_trace_user(struct stack_trace *trace); trace 76 include/linux/trace_events.h struct tracer *trace; trace 1214 include/net/netfilter/nf_tables.h bool trace; trace 84 include/trace/define_trace.h # define __TRACE_INCLUDE(system) <trace/events/system.h> trace 286 include/trace/events/fs_dax.h #include <trace/define_trace.h> trace 15 include/trace/events/iscsi.h void iscsi_dbg_trace(void (*trace)(struct device *dev, struct va_format *), trace 73 include/trace/events/syscalls.h #include <trace/define_trace.h> trace 268 include/trace/events/v4l2.h #include <trace/define_trace.h> trace 69 include/trace/events/vb2.h #include <trace/define_trace.h> trace 365 include/trace/trace_events.h .trace = trace_raw_output_##call, \ trace 391 include/trace/trace_events.h .trace = trace_raw_output_##call, \ trace 55 include/uapi/linux/genwqe/genwqe_card.h #define GENWQE_EXTENDED_DIAG_SELECTOR(ring, trace) (((ring) << 8) | (trace)) trace 350 kernel/bpf/stackmap.c struct perf_callchain_entry *trace; trace 366 kernel/bpf/stackmap.c trace = get_perf_callchain(regs, init_nr, kernel, user, trace 369 kernel/bpf/stackmap.c if (unlikely(!trace)) trace 376 kernel/bpf/stackmap.c trace_nr = trace->nr - init_nr; trace 384 kernel/bpf/stackmap.c ips = trace->ip + skip + init_nr; trace 453 kernel/bpf/stackmap.c struct perf_callchain_entry *trace; trace 474 kernel/bpf/stackmap.c trace = get_perf_callchain(regs, init_nr, kernel, user, trace 476 kernel/bpf/stackmap.c if (unlikely(!trace)) trace 479 kernel/bpf/stackmap.c trace_nr = trace->nr - init_nr; trace 486 kernel/bpf/stackmap.c ips = trace->ip + skip + init_nr; trace 1762 kernel/fork.c int trace, trace 2170 kernel/fork.c ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); trace 2335 kernel/fork.c int trace = 0; trace 2346 kernel/fork.c trace = PTRACE_EVENT_VFORK; trace 2348 kernel/fork.c trace = PTRACE_EVENT_CLONE; trace 2350 kernel/fork.c trace = PTRACE_EVENT_FORK; trace 2352 kernel/fork.c if (likely(!ptrace_event_enabled(current, trace))) trace 2353 kernel/fork.c trace = 0; trace 2356 kernel/fork.c p = copy_process(NULL, trace, NUMA_NO_NODE, args); trace 2383 kernel/fork.c if (unlikely(trace)) trace 2384 kernel/fork.c ptrace_event_pid(trace, pid); trace 482 kernel/locking/lockdep.c struct lock_trace *trace, *t2; trace 490 kernel/locking/lockdep.c trace = (struct lock_trace *)(stack_trace + nr_stack_trace_entries); trace 503 kernel/locking/lockdep.c trace->nr_entries = stack_trace_save(trace->entries, max_entries, 3); trace 505 kernel/locking/lockdep.c hash = jhash(trace->entries, trace->nr_entries * trace 506 kernel/locking/lockdep.c sizeof(trace->entries[0]), 0); trace 507 kernel/locking/lockdep.c trace->hash = hash; trace 510 kernel/locking/lockdep.c if (traces_identical(trace, t2)) trace 513 kernel/locking/lockdep.c nr_stack_trace_entries += LOCK_TRACE_SIZE_IN_LONGS + trace->nr_entries; trace 514 kernel/locking/lockdep.c hlist_add_head(&trace->hash_entry, hash_head); trace 516 kernel/locking/lockdep.c return trace; trace 522 kernel/locking/lockdep.c struct lock_trace *trace; trace 527 kernel/locking/lockdep.c hlist_for_each_entry(trace, &stack_trace_hash[i], hash_entry) { trace 1305 kernel/locking/lockdep.c const struct lock_trace *trace) trace 1319 kernel/locking/lockdep.c entry->trace = trace; trace 1540 kernel/locking/lockdep.c static void print_lock_trace(const struct lock_trace *trace, trace 1543 kernel/locking/lockdep.c stack_trace_print(trace->entries, trace->nr_entries, spaces); trace 1558 kernel/locking/lockdep.c print_lock_trace(target->trace, 6); trace 1661 kernel/locking/lockdep.c this->trace = save_trace(); trace 1662 kernel/locking/lockdep.c if (!this->trace) trace 1789 kernel/locking/lockdep.c struct lock_trace **const trace) trace 1803 kernel/locking/lockdep.c if (!*trace) { trace 1809 kernel/locking/lockdep.c *trace = save_trace(); trace 1958 kernel/locking/lockdep.c print_lock_trace(entry->trace, 2); trace 2085 kernel/locking/lockdep.c prev_root->trace = save_trace(); trace 2086 kernel/locking/lockdep.c if (!prev_root->trace) trace 2092 kernel/locking/lockdep.c next_root->trace = save_trace(); trace 2093 kernel/locking/lockdep.c if (!next_root->trace) trace 2446 kernel/locking/lockdep.c struct lock_trace **const trace) trace 2479 kernel/locking/lockdep.c ret = check_noncircular(next, prev, trace); trace 2521 kernel/locking/lockdep.c if (!*trace) { trace 2522 kernel/locking/lockdep.c *trace = save_trace(); trace 2523 kernel/locking/lockdep.c if (!*trace) trace 2533 kernel/locking/lockdep.c next->acquire_ip, distance, *trace); trace 2540 kernel/locking/lockdep.c next->acquire_ip, distance, *trace); trace 2556 kernel/locking/lockdep.c struct lock_trace *trace = NULL; trace 2585 kernel/locking/lockdep.c &trace); trace 3176 kernel/locking/lockdep.c root->trace = save_trace(); trace 3177 kernel/locking/lockdep.c if (!root->trace) trace 253 kernel/stacktrace.c save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) trace 259 kernel/stacktrace.c save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) trace 275 kernel/stacktrace.c struct stack_trace trace = { trace 281 kernel/stacktrace.c save_stack_trace(&trace); trace 282 kernel/stacktrace.c return trace.nr_entries; trace 299 kernel/stacktrace.c struct stack_trace trace = { trace 306 kernel/stacktrace.c save_stack_trace_tsk(task, &trace); trace 307 kernel/stacktrace.c return trace.nr_entries; trace 322 kernel/stacktrace.c struct stack_trace trace = { trace 328 kernel/stacktrace.c save_stack_trace_regs(regs, &trace); trace 329 kernel/stacktrace.c return trace.nr_entries; trace 348 kernel/stacktrace.c struct stack_trace trace = { trace 352 kernel/stacktrace.c int ret = save_stack_trace_tsk_reliable(tsk, &trace); trace 354 kernel/stacktrace.c return ret ? ret : trace.nr_entries; trace 368 kernel/stacktrace.c struct stack_trace trace = { trace 373 kernel/stacktrace.c save_stack_trace_user(&trace); trace 374 kernel/stacktrace.c return trace.nr_entries; trace 1601 kernel/trace/blktrace.c .trace = blk_trace_event_print, trace 102 kernel/trace/fgraph.c struct ftrace_graph_ent trace; trace 104 kernel/trace/fgraph.c trace.func = func; trace 105 kernel/trace/fgraph.c trace.depth = ++current->curr_ret_depth; trace 111 kernel/trace/fgraph.c if (!ftrace_graph_entry(&trace)) trace 124 kernel/trace/fgraph.c ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret, trace 168 kernel/trace/fgraph.c trace->func = current->ret_stack[index].func; trace 169 kernel/trace/fgraph.c trace->calltime = current->ret_stack[index].calltime; trace 170 kernel/trace/fgraph.c trace->overrun = atomic_read(¤t->trace_overrun); trace 171 kernel/trace/fgraph.c trace->depth = current->curr_ret_depth--; trace 211 kernel/trace/fgraph.c struct ftrace_graph_ret trace; trace 214 kernel/trace/fgraph.c ftrace_pop_return_trace(&trace, &ret, frame_pointer); trace 215 kernel/trace/fgraph.c trace.rettime = trace_clock_local(); trace 216 kernel/trace/fgraph.c ftrace_graph_return(&trace); trace 330 kernel/trace/fgraph.c int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) trace 420 kernel/trace/fgraph.c static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace) trace 422 kernel/trace/fgraph.c if (!ftrace_ops_test(&global_ops, trace->func, NULL)) trace 424 kernel/trace/fgraph.c return __ftrace_graph_entry(trace); trace 453 kernel/trace/ftrace.c static void *function_stat_start(struct tracer_stat *trace) trace 456 kernel/trace/ftrace.c container_of(trace, struct ftrace_profile_stat, stat); trace 796 kernel/trace/ftrace.c static int profile_graph_entry(struct ftrace_graph_ent *trace) trace 800 kernel/trace/ftrace.c function_profile_call(trace->func, 0, NULL, NULL); trace 813 kernel/trace/ftrace.c static void profile_graph_return(struct ftrace_graph_ret *trace) trace 827 kernel/trace/ftrace.c if (!trace->calltime) trace 830 kernel/trace/ftrace.c calltime = trace->rettime - trace->calltime; trace 846 kernel/trace/ftrace.c rec = ftrace_find_profiled_func(stat, trace->func); trace 1845 kernel/trace/trace.c type->flags->trace = type; trace 2541 kernel/trace/trace.c !event_call->event.funcs->trace) trace 2549 kernel/trace/trace.c event_call->event.funcs->trace(iter, 0, event); trace 3458 kernel/trace/trace.c if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) trace 3459 kernel/trace/trace.c *iter->trace = *tr->current_trace; trace 3463 kernel/trace/trace.c if (iter->snapshot && iter->trace->use_max_tr) trace 3508 kernel/trace/trace.c if (iter->snapshot && iter->trace->use_max_tr) trace 3640 kernel/trace/trace.c struct tracer *type = iter->trace; trace 3748 kernel/trace/trace.c return event->funcs->trace(iter, sym_flags, event); trace 3879 kernel/trace/trace.c if (iter->trace && iter->trace->print_line) { trace 3880 kernel/trace/trace.c ret = iter->trace->print_line(iter); trace 4015 kernel/trace/trace.c seq_printf(m, "# tracer: %s\n", iter->trace->name); trace 4021 kernel/trace/trace.c else if (iter->trace && iter->trace->print_header) trace 4022 kernel/trace/trace.c iter->trace->print_header(m); trace 4094 kernel/trace/trace.c iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL); trace 4095 kernel/trace/trace.c if (!iter->trace) trace 4098 kernel/trace/trace.c *iter->trace = *tr->current_trace; trace 4118 kernel/trace/trace.c if (iter->trace && iter->trace->open) trace 4119 kernel/trace/trace.c iter->trace->open(iter); trace 4160 kernel/trace/trace.c kfree(iter->trace); trace 4223 kernel/trace/trace.c if (iter->trace && iter->trace->close) trace 4224 kernel/trace/trace.c iter->trace->close(iter); trace 4236 kernel/trace/trace.c kfree(iter->trace); trace 4558 kernel/trace/trace.c struct tracer *trace = tracer_flags->trace; trace 4561 kernel/trace/trace.c ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg); trace 4575 kernel/trace/trace.c struct tracer *trace = tr->current_trace; trace 4576 kernel/trace/trace.c struct tracer_flags *tracer_flags = trace->flags; trace 4584 kernel/trace/trace.c return __set_tracer_option(tr, trace->flags, opts, neg); trace 5877 kernel/trace/trace.c iter->trace = tr->current_trace; trace 5900 kernel/trace/trace.c if (iter->trace->pipe_open) trace 5901 kernel/trace/trace.c iter->trace->pipe_open(iter); trace 5926 kernel/trace/trace.c if (iter->trace->pipe_close) trace 5927 kernel/trace/trace.c iter->trace->pipe_close(iter); trace 6028 kernel/trace/trace.c if (iter->trace->read) { trace 6029 kernel/trace/trace.c sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); trace 6189 kernel/trace/trace.c if (iter->trace->splice_read) { trace 6190 kernel/trace/trace.c ret = iter->trace->splice_read(iter, filp, trace 6845 kernel/trace/trace.c if (info->iter.trace->use_max_tr) { trace 7225 kernel/trace/trace.c info->iter.trace = tr->current_trace; trace 8902 kernel/trace/trace.c iter->trace = iter->tr->current_trace; trace 8906 kernel/trace/trace.c if (iter->trace && iter->trace->open) trace 8907 kernel/trace/trace.c iter->trace->open(iter); trace 432 kernel/trace/trace.h struct tracer *trace; trace 487 kernel/trace/trace.h int (*selftest)(struct tracer *trace, trace 740 kernel/trace/trace.h void trace_graph_return(struct ftrace_graph_ret *trace); trace 741 kernel/trace/trace.h int trace_graph_entry(struct ftrace_graph_ent *trace); trace 820 kernel/trace/trace.h extern int trace_selftest_startup_function(struct tracer *trace, trace 822 kernel/trace/trace.h extern int trace_selftest_startup_function_graph(struct tracer *trace, trace 824 kernel/trace/trace.h extern int trace_selftest_startup_irqsoff(struct tracer *trace, trace 826 kernel/trace/trace.h extern int trace_selftest_startup_preemptoff(struct tracer *trace, trace 828 kernel/trace/trace.h extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace, trace 830 kernel/trace/trace.h extern int trace_selftest_startup_wakeup(struct tracer *trace, trace 832 kernel/trace/trace.h extern int trace_selftest_startup_nop(struct tracer *trace, trace 834 kernel/trace/trace.h extern int trace_selftest_startup_branch(struct tracer *trace, trace 928 kernel/trace/trace.h struct ftrace_graph_ent *trace, trace 931 kernel/trace/trace.h struct ftrace_graph_ret *trace, trace 938 kernel/trace/trace.h static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace) trace 940 kernel/trace/trace.h unsigned long addr = trace->func; trace 966 kernel/trace/trace.h trace_recursion_set_depth(trace->depth); trace 985 kernel/trace/trace.h static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace) trace 988 kernel/trace/trace.h trace->depth == trace_recursion_depth()) trace 1015 kernel/trace/trace.h static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace) trace 1024 kernel/trace/trace.h static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace) trace 1030 kernel/trace/trace.h static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace) trace 1034 kernel/trace/trace.h ftrace_graph_addr(trace)) || trace 1035 kernel/trace/trace.h (trace->depth < 0) || trace 1036 kernel/trace/trace.h (fgraph_max_depth && trace->depth >= fgraph_max_depth); trace 165 kernel/trace/trace_branch.c .trace = trace_branch_print, trace 317 kernel/trace/trace_branch.c static void *annotated_branch_stat_start(struct tracer_stat *trace) trace 407 kernel/trace/trace_branch.c static void *all_branch_stat_start(struct tracer_stat *trace) trace 648 kernel/trace/trace_events_hist.c struct synth_trace_event trace; trace 649 kernel/trace/trace_events_hist.c int offset = offsetof(typeof(trace), fields); trace 900 kernel/trace/trace_events_hist.c .trace = print_synth_event trace 98 kernel/trace/trace_functions_graph.c struct ftrace_graph_ent *trace, trace 112 kernel/trace/trace_functions_graph.c entry->graph_ent = *trace; trace 127 kernel/trace/trace_functions_graph.c int trace_graph_entry(struct ftrace_graph_ent *trace) trace 147 kernel/trace/trace_functions_graph.c if (ftrace_graph_notrace_addr(trace->func)) { trace 159 kernel/trace/trace_functions_graph.c if (ftrace_graph_ignore_func(trace)) trace 178 kernel/trace/trace_functions_graph.c ret = __trace_graph_entry(tr, trace, flags, pc); trace 218 kernel/trace/trace_functions_graph.c struct ftrace_graph_ret *trace, trace 232 kernel/trace/trace_functions_graph.c entry->ret = *trace; trace 237 kernel/trace/trace_functions_graph.c void trace_graph_return(struct ftrace_graph_ret *trace) trace 246 kernel/trace/trace_functions_graph.c ftrace_graph_addr_finish(trace); trace 259 kernel/trace/trace_functions_graph.c __trace_graph_return(tr, trace, flags, pc); trace 274 kernel/trace/trace_functions_graph.c static void trace_graph_thresh_return(struct ftrace_graph_ret *trace) trace 276 kernel/trace/trace_functions_graph.c ftrace_graph_addr_finish(trace); trace 284 kernel/trace/trace_functions_graph.c (trace->rettime - trace->calltime < tracing_thresh)) trace 287 kernel/trace/trace_functions_graph.c trace_graph_return(trace); trace 901 kernel/trace/trace_functions_graph.c print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, trace 905 kernel/trace/trace_functions_graph.c unsigned long long duration = trace->rettime - trace->calltime; trace 913 kernel/trace/trace_functions_graph.c if (check_irq_return(iter, flags, trace->depth)) trace 927 kernel/trace/trace_functions_graph.c cpu_data->depth = trace->depth - 1; trace 929 kernel/trace/trace_functions_graph.c if (trace->depth < FTRACE_RETFUNC_DEPTH && trace 930 kernel/trace/trace_functions_graph.c !WARN_ON_ONCE(trace->depth < 0)) { trace 931 kernel/trace/trace_functions_graph.c if (cpu_data->enter_funcs[trace->depth] != trace->func) trace 933 kernel/trace/trace_functions_graph.c cpu_data->enter_funcs[trace->depth] = 0; trace 943 kernel/trace/trace_functions_graph.c for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) trace 956 kernel/trace/trace_functions_graph.c trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func); trace 961 kernel/trace/trace_functions_graph.c trace->overrun); trace 963 kernel/trace/trace_functions_graph.c print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, trace 1018 kernel/trace/trace_functions_graph.c ret = event->funcs->trace(iter, sym_flags, event); trace 1268 kernel/trace/trace_functions_graph.c .trace = print_graph_function_event, trace 175 kernel/trace/trace_irqsoff.c static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) trace 183 kernel/trace/trace_irqsoff.c if (ftrace_graph_ignore_func(trace)) trace 192 kernel/trace/trace_irqsoff.c if (ftrace_graph_notrace_addr(trace->func)) trace 199 kernel/trace/trace_irqsoff.c ret = __trace_graph_entry(tr, trace, flags, pc); trace 205 kernel/trace/trace_irqsoff.c static void irqsoff_graph_return(struct ftrace_graph_ret *trace) trace 212 kernel/trace/trace_irqsoff.c ftrace_graph_addr_finish(trace); trace 218 kernel/trace/trace_irqsoff.c __trace_graph_return(tr, trace, flags, pc); trace 1553 kernel/trace/trace_kprobe.c .trace = print_kretprobe_event trace 1557 kernel/trace/trace_kprobe.c .trace = print_kprobe_event trace 761 kernel/trace/trace_output.c if (event->funcs->trace == NULL) trace 762 kernel/trace/trace_output.c event->funcs->trace = trace_nop_print; trace 882 kernel/trace/trace_output.c .trace = trace_fn_trace, trace 1022 kernel/trace/trace_output.c .trace = trace_ctx_print, trace 1034 kernel/trace/trace_output.c .trace = trace_wake_print, trace 1074 kernel/trace/trace_output.c .trace = trace_stack_print, trace 1127 kernel/trace/trace_output.c .trace = trace_user_stack_print, trace 1191 kernel/trace/trace_output.c .trace = trace_hwlat_print, trace 1235 kernel/trace/trace_output.c .trace = trace_bputs_print, trace 1279 kernel/trace/trace_output.c .trace = trace_bprint_print, trace 1316 kernel/trace/trace_output.c .trace = trace_print_print, trace 1345 kernel/trace/trace_output.c .trace = trace_raw_data, trace 115 kernel/trace/trace_sched_wakeup.c static int wakeup_graph_entry(struct ftrace_graph_ent *trace) trace 122 kernel/trace/trace_sched_wakeup.c if (ftrace_graph_ignore_func(trace)) trace 131 kernel/trace/trace_sched_wakeup.c if (ftrace_graph_notrace_addr(trace->func)) trace 138 kernel/trace/trace_sched_wakeup.c ret = __trace_graph_entry(tr, trace, flags, pc); trace 145 kernel/trace/trace_sched_wakeup.c static void wakeup_graph_return(struct ftrace_graph_ret *trace) trace 152 kernel/trace/trace_sched_wakeup.c ftrace_graph_addr_finish(trace); trace 158 kernel/trace/trace_sched_wakeup.c __trace_graph_return(tr, trace, flags, pc); trace 97 kernel/trace/trace_selftest.c static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) trace 100 kernel/trace/trace_selftest.c trace->name, init_ret); trace 325 kernel/trace/trace_selftest.c static int trace_selftest_startup_dynamic_tracing(struct tracer *trace, trace 355 kernel/trace/trace_selftest.c ret = tracer_init(trace, tr); trace 357 kernel/trace/trace_selftest.c warn_failed_init_tracer(trace, ret); trace 393 kernel/trace/trace_selftest.c trace->reset(tr); trace 401 kernel/trace/trace_selftest.c trace->reset(tr); trace 536 kernel/trace/trace_selftest.c # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) trace 653 kernel/trace/trace_selftest.c trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) trace 672 kernel/trace/trace_selftest.c ret = tracer_init(trace, tr); trace 674 kernel/trace/trace_selftest.c warn_failed_init_tracer(trace, ret); trace 688 kernel/trace/trace_selftest.c trace->reset(tr); trace 697 kernel/trace/trace_selftest.c ret = trace_selftest_startup_dynamic_tracing(trace, tr, trace 727 kernel/trace/trace_selftest.c static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) trace 741 kernel/trace/trace_selftest.c return trace_graph_entry(trace); trace 754 kernel/trace/trace_selftest.c trace_selftest_startup_function_graph(struct tracer *trace, trace 775 kernel/trace/trace_selftest.c warn_failed_init_tracer(trace, ret); trace 821 kernel/trace/trace_selftest.c trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) trace 828 kernel/trace/trace_selftest.c ret = tracer_init(trace, tr); trace 830 kernel/trace/trace_selftest.c warn_failed_init_tracer(trace, ret); trace 847 kernel/trace/trace_selftest.c trace->stop(tr); trace 854 kernel/trace/trace_selftest.c trace->reset(tr); trace 870 kernel/trace/trace_selftest.c trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) trace 890 kernel/trace/trace_selftest.c ret = tracer_init(trace, tr); trace 892 kernel/trace/trace_selftest.c warn_failed_init_tracer(trace, ret); trace 909 kernel/trace/trace_selftest.c trace->stop(tr); trace 916 kernel/trace/trace_selftest.c trace->reset(tr); trace 932 kernel/trace/trace_selftest.c trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) trace 952 kernel/trace/trace_selftest.c ret = tracer_init(trace, tr); trace 954 kernel/trace/trace_selftest.c warn_failed_init_tracer(trace, ret); trace 975 kernel/trace/trace_selftest.c trace->stop(tr); trace 996 kernel/trace/trace_selftest.c trace->start(tr); trace 1005 kernel/trace/trace_selftest.c trace->stop(tr); trace 1024 kernel/trace/trace_selftest.c trace->reset(tr); trace 1033 kernel/trace/trace_selftest.c trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) trace 1085 kernel/trace/trace_selftest.c trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) trace 1108 kernel/trace/trace_selftest.c ret = tracer_init(trace, tr); trace 1110 kernel/trace/trace_selftest.c warn_failed_init_tracer(trace, ret); trace 1144 kernel/trace/trace_selftest.c trace->reset(tr); trace 1163 kernel/trace/trace_selftest.c trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) trace 1169 kernel/trace/trace_selftest.c ret = tracer_init(trace, tr); trace 1171 kernel/trace/trace_selftest.c warn_failed_init_tracer(trace, ret); trace 1181 kernel/trace/trace_selftest.c trace->reset(tr); trace 310 kernel/trace/trace_stat.c int register_stat_tracer(struct tracer_stat *trace) trace 315 kernel/trace/trace_stat.c if (!trace) trace 318 kernel/trace/trace_stat.c if (!trace->stat_start || !trace->stat_next || !trace->stat_show) trace 324 kernel/trace/trace_stat.c if (node->ts == trace) trace 334 kernel/trace/trace_stat.c session->ts = trace; trace 353 kernel/trace/trace_stat.c void unregister_stat_tracer(struct tracer_stat *trace) trace 359 kernel/trace/trace_stat.c if (node->ts == trace) { trace 16 kernel/trace/trace_stat.h void *(*stat_start)(struct tracer_stat *trace); trace 31 kernel/trace/trace_stat.h extern int register_stat_tracer(struct tracer_stat *trace); trace 32 kernel/trace/trace_stat.h extern void unregister_stat_tracer(struct tracer_stat *trace); trace 128 kernel/trace/trace_syscalls.c struct syscall_trace_enter *trace; trace 132 kernel/trace/trace_syscalls.c trace = (typeof(trace))ent; trace 133 kernel/trace/trace_syscalls.c syscall = trace->nr; trace 157 kernel/trace/trace_syscalls.c trace->args[i], trace 174 kernel/trace/trace_syscalls.c struct syscall_trace_exit *trace; trace 178 kernel/trace/trace_syscalls.c trace = (typeof(trace))ent; trace 179 kernel/trace/trace_syscalls.c syscall = trace->nr; trace 193 kernel/trace/trace_syscalls.c trace->ret); trace 202 kernel/trace/trace_syscalls.c sizeof(type) != sizeof(trace.field) ? \ trace 204 kernel/trace/trace_syscalls.c #type, #name, offsetof(typeof(trace), field), \ trace 205 kernel/trace/trace_syscalls.c sizeof(trace.field), is_signed_type(type) trace 270 kernel/trace/trace_syscalls.c struct syscall_trace_enter trace; trace 274 kernel/trace/trace_syscalls.c int offset = offsetof(typeof(trace), args); trace 294 kernel/trace/trace_syscalls.c struct syscall_trace_exit trace; trace 506 kernel/trace/trace_syscalls.c .trace = print_syscall_enter, trace 510 kernel/trace/trace_syscalls.c .trace = print_syscall_exit, trace 1528 kernel/trace/trace_uprobe.c .trace = print_uprobe_event trace 158 mm/kmemleak.c unsigned long trace[MAX_TRACE]; trace 349 mm/kmemleak.c void *ptr = (void *)object->trace[i]; trace 370 mm/kmemleak.c stack_trace_print(object->trace, object->trace_len, 4); trace 561 mm/kmemleak.c static int __save_stack_trace(unsigned long *trace) trace 563 mm/kmemleak.c return stack_trace_save(trace, MAX_TRACE, 2); trace 618 mm/kmemleak.c object->trace_len = __save_stack_trace(object->trace); trace 1030 mm/kmemleak.c object->trace_len = __save_stack_trace(object->trace); trace 1117 mm/slub.c trace(s, page, object, 1); trace 1197 mm/slub.c trace(s, page, object, 0); trace 5200 mm/slub.c SLAB_ATTR(trace); trace 2231 mm/vmscan.c struct scan_control *sc, bool trace) trace 2267 mm/vmscan.c if (trace) trace 69 net/batman-adv/trace.h #define TRACE_INCLUDE_FILE trace trace 2137 net/ceph/osd_client.c char trace[sizeof(struct ceph_blkin_trace_info)]; trace 83 net/dccp/trace.h #define TRACE_INCLUDE_FILE trace trace 318 net/ieee802154/trace.h #define TRACE_INCLUDE_FILE trace trace 2716 net/mac80211/trace.h #define TRACE_INCLUDE_FILE trace trace 272 net/mac802154/trace.h #define TRACE_INCLUDE_FILE trace trace 30 net/netfilter/nf_tables_core.c if (!info->trace || !pkt->skb->nf_trace) trace 162 net/netfilter/nf_tables_core.c info.trace = false; trace 292 net/netfilter/nf_tables_trace.c info->trace = true; trace 410 net/rds/af_rds.c struct rds_rx_trace_so trace; trace 416 net/rds/af_rds.c if (copy_from_user(&trace, optval, sizeof(trace))) trace 419 net/rds/af_rds.c if (trace.rx_traces > RDS_MSG_RX_DGRAM_TRACE_MAX) trace 422 net/rds/af_rds.c rs->rs_rx_traces = trace.rx_traces; trace 424 net/rds/af_rds.c if (trace.rx_trace_pos[i] > RDS_MSG_RX_DGRAM_TRACE_MAX) { trace 428 net/rds/af_rds.c rs->rs_rx_trace[i] = trace.rx_trace_pos[i]; trace 58 net/rxrpc/call_event.c goto trace; trace 73 net/rxrpc/call_event.c goto trace; trace 128 net/rxrpc/call_event.c trace: trace 248 net/sched/sch_generic.c goto trace; trace 271 net/sched/sch_generic.c trace: trace 430 net/tipc/trace.h #define TRACE_INCLUDE_FILE trace trace 3471 net/wireless/trace.h #define TRACE_INCLUDE_FILE trace trace 26 samples/trace_events/trace-events-sample.h #define TRACE_SYSTEM sample-trace trace 523 samples/trace_events/trace-events-sample.h #define TRACE_INCLUDE_FILE trace-events-sample trace 77 sound/firewire/amdtp-stream-trace.h #define TRACE_INCLUDE_FILE amdtp-stream-trace trace 84 sound/firewire/motu/amdtp-motu-trace.h #define TRACE_INCLUDE_FILE amdtp-motu-trace trace 88 sound/hda/trace.h #define TRACE_INCLUDE_FILE trace trace 13 tools/include/linux/stacktrace.h static inline void print_stack_trace(struct stack_trace *trace, int spaces) trace 15 tools/include/linux/stacktrace.h backtrace_symbols_fd((void **)trace->entries, trace->nr_entries, 1); trace 18 tools/include/linux/stacktrace.h #define save_stack_trace(trace) \ trace 19 tools/include/linux/stacktrace.h ((trace)->nr_entries = \ trace 20 tools/include/linux/stacktrace.h backtrace((void **)(trace)->entries, (trace)->max_entries)) trace 1194 tools/perf/builtin-trace.c struct trace *trace) trace 1198 tools/perf/builtin-trace.c if (ttrace == NULL || trace->fd_path_disabled) trace 1205 tools/perf/builtin-trace.c if (!trace->live) trace 1207 tools/perf/builtin-trace.c ++trace->stats.proc_getname; trace 1219 tools/perf/builtin-trace.c const char *path = thread__fd_path(arg->thread, fd, arg->trace); trace 1227 tools/perf/builtin-trace.c size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size) trace 1230 tools/perf/builtin-trace.c struct thread *thread = machine__find_thread(trace->host, pid, pid); trace 1233 tools/perf/builtin-trace.c const char *path = thread__fd_path(thread, fd, trace); trace 1290 tools/perf/builtin-trace.c if (!arg->trace->vfs_getname) trace 1297 tools/perf/builtin-trace.c static bool trace__filter_duration(struct trace *trace, double t) trace 1299 tools/perf/builtin-trace.c return t < (trace->duration_filter * NSEC_PER_MSEC); trace 1302 tools/perf/builtin-trace.c static size_t __trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp) trace 1304 tools/perf/builtin-trace.c double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC; trace 1315 tools/perf/builtin-trace.c static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp) trace 1318 tools/perf/builtin-trace.c return __trace__fprintf_tstamp(trace, tstamp, fp); trace 1332 tools/perf/builtin-trace.c static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp) trace 1336 tools/perf/builtin-trace.c if (trace->multiple_threads) { trace 1337 tools/perf/builtin-trace.c if (trace->show_comm) trace 1345 tools/perf/builtin-trace.c static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread, trace 1350 tools/perf/builtin-trace.c if (trace->show_tstamp) trace 1351 tools/perf/builtin-trace.c printed = trace__fprintf_tstamp(trace, tstamp, fp); trace 1352 tools/perf/builtin-trace.c if (trace->show_duration) trace 1354 tools/perf/builtin-trace.c return printed + trace__fprintf_comm_tid(trace, thread, fp); trace 1357 tools/perf/builtin-trace.c static int trace__process_event(struct trace *trace, struct machine *machine, trace 1364 tools/perf/builtin-trace.c color_fprintf(trace->output, PERF_COLOR_RED, trace 1381 tools/perf/builtin-trace.c struct trace *trace = container_of(tool, struct trace, tool); trace 1382 tools/perf/builtin-trace.c return trace__process_event(trace, machine, event, sample); trace 1403 tools/perf/builtin-trace.c static int trace__symbols_init(struct trace *trace, struct evlist *evlist) trace 1410 tools/perf/builtin-trace.c trace->host = machine__new_host(); trace 1411 tools/perf/builtin-trace.c if (trace->host == NULL) trace 1414 tools/perf/builtin-trace.c err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr); trace 1418 tools/perf/builtin-trace.c err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target, trace 1428 tools/perf/builtin-trace.c static void trace__symbols__exit(struct trace *trace) trace 1430 tools/perf/builtin-trace.c machine__exit(trace->host); trace 1431 tools/perf/builtin-trace.c trace->host = NULL; trace 1500 tools/perf/builtin-trace.c static int trace__read_syscall_info(struct trace *trace, int id) trace 1504 tools/perf/builtin-trace.c const char *name = syscalltbl__name(trace->sctbl, id); trace 1506 tools/perf/builtin-trace.c if (trace->syscalls.table == NULL) { trace 1507 tools/perf/builtin-trace.c trace->syscalls.table = calloc(trace->sctbl->syscalls.max_id + 1, sizeof(*sc)); trace 1508 tools/perf/builtin-trace.c if (trace->syscalls.table == NULL) trace 1512 tools/perf/builtin-trace.c sc = trace->syscalls.table + id; trace 1562 tools/perf/builtin-trace.c static int trace__validate_ev_qualifier(struct trace *trace) trace 1567 tools/perf/builtin-trace.c size_t nr_used = 0, nr_allocated = strlist__nr_entries(trace->ev_qualifier); trace 1569 tools/perf/builtin-trace.c trace->ev_qualifier_ids.entries = malloc(nr_allocated * trace 1570 tools/perf/builtin-trace.c sizeof(trace->ev_qualifier_ids.entries[0])); trace 1572 tools/perf/builtin-trace.c if (trace->ev_qualifier_ids.entries == NULL) { trace 1574 tools/perf/builtin-trace.c trace->output); trace 1579 tools/perf/builtin-trace.c strlist__for_each_entry(pos, trace->ev_qualifier) { trace 1581 tools/perf/builtin-trace.c int id = syscalltbl__id(trace->sctbl, sc), match_next = -1; trace 1584 tools/perf/builtin-trace.c id = syscalltbl__strglobmatch_first(trace->sctbl, sc, &match_next); trace 1599 tools/perf/builtin-trace.c trace->ev_qualifier_ids.entries[nr_used++] = id; trace 1604 tools/perf/builtin-trace.c id = syscalltbl__strglobmatch_next(trace->sctbl, sc, &match_next); trace 1611 tools/perf/builtin-trace.c entries = realloc(trace->ev_qualifier_ids.entries, trace 1612 tools/perf/builtin-trace.c nr_allocated * sizeof(trace->ev_qualifier_ids.entries[0])); trace 1615 tools/perf/builtin-trace.c fputs("\nError:\t Not enough memory for parsing\n", trace->output); trace 1618 tools/perf/builtin-trace.c trace->ev_qualifier_ids.entries = entries; trace 1620 tools/perf/builtin-trace.c trace->ev_qualifier_ids.entries[nr_used++] = id; trace 1624 tools/perf/builtin-trace.c trace->ev_qualifier_ids.nr = nr_used; trace 1625 tools/perf/builtin-trace.c qsort(trace->ev_qualifier_ids.entries, nr_used, sizeof(int), intcmp); trace 1631 tools/perf/builtin-trace.c zfree(&trace->ev_qualifier_ids.entries); trace 1632 tools/perf/builtin-trace.c trace->ev_qualifier_ids.nr = 0; trace 1636 tools/perf/builtin-trace.c static __maybe_unused bool trace__syscall_enabled(struct trace *trace, int id) trace 1640 tools/perf/builtin-trace.c if (trace->ev_qualifier_ids.nr == 0) trace 1643 tools/perf/builtin-trace.c in_ev_qualifier = bsearch(&id, trace->ev_qualifier_ids.entries, trace 1644 tools/perf/builtin-trace.c trace->ev_qualifier_ids.nr, sizeof(int), intcmp) != NULL; trace 1647 tools/perf/builtin-trace.c return !trace->not_ev_qualifier; trace 1649 tools/perf/builtin-trace.c return trace->not_ev_qualifier; trace 1705 tools/perf/builtin-trace.c struct trace *trace, struct thread *thread) trace 1718 tools/perf/builtin-trace.c .trace = trace, trace 1720 tools/perf/builtin-trace.c .show_string_prefix = trace->show_string_prefix, trace 1752 tools/perf/builtin-trace.c !trace->show_zeros && trace 1762 tools/perf/builtin-trace.c if (trace->show_arg_names) trace 1790 tools/perf/builtin-trace.c typedef int (*tracepoint_handler)(struct trace *trace, struct evsel *evsel, trace 1794 tools/perf/builtin-trace.c static struct syscall *trace__syscall_info(struct trace *trace, trace 1813 tools/perf/builtin-trace.c fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n", trace 1821 tools/perf/builtin-trace.c if (id > trace->sctbl->syscalls.max_id) trace 1824 tools/perf/builtin-trace.c if ((trace->syscalls.table == NULL || trace->syscalls.table[id].name == NULL) && trace 1825 tools/perf/builtin-trace.c (err = trace__read_syscall_info(trace, id)) != 0) trace 1828 tools/perf/builtin-trace.c if (trace->syscalls.table[id].name == NULL) { trace 1829 tools/perf/builtin-trace.c if (trace->syscalls.table[id].nonexistent) trace 1834 tools/perf/builtin-trace.c return &trace->syscalls.table[id]; trace 1839 tools/perf/builtin-trace.c fprintf(trace->output, "Problems reading syscall %d: %d (%s)", id, -err, str_error_r(-err, sbuf, sizeof(sbuf))); trace 1840 tools/perf/builtin-trace.c if (id <= trace->sctbl->syscalls.max_id && trace->syscalls.table[id].name != NULL) trace 1841 tools/perf/builtin-trace.c fprintf(trace->output, "(%s)", trace->syscalls.table[id].name); trace 1842 tools/perf/builtin-trace.c fputs(" information\n", trace->output); trace 1873 tools/perf/builtin-trace.c static int trace__printf_interrupted_entry(struct trace *trace) trace 1879 tools/perf/builtin-trace.c if (trace->failure_only || trace->current == NULL) trace 1882 tools/perf/builtin-trace.c ttrace = thread__priv(trace->current); trace 1887 tools/perf/builtin-trace.c printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output); trace 1888 tools/perf/builtin-trace.c printed += len = fprintf(trace->output, "%s)", ttrace->entry_str); trace 1890 tools/perf/builtin-trace.c if (len < trace->args_alignment - 4) trace 1891 tools/perf/builtin-trace.c printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " "); trace 1893 tools/perf/builtin-trace.c printed += fprintf(trace->output, " ...\n"); trace 1896 tools/perf/builtin-trace.c ++trace->nr_events_printed; trace 1901 tools/perf/builtin-trace.c static int trace__fprintf_sample(struct trace *trace, struct evsel *evsel, trace 1906 tools/perf/builtin-trace.c if (trace->print_sample) { trace 1909 tools/perf/builtin-trace.c printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n", trace 1944 tools/perf/builtin-trace.c static int trace__sys_enter(struct trace *trace, struct evsel *evsel, trace 1955 tools/perf/builtin-trace.c struct syscall *sc = trace__syscall_info(trace, evsel, id); trace 1961 tools/perf/builtin-trace.c thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); trace 1962 tools/perf/builtin-trace.c ttrace = thread__trace(thread, trace->output); trace 1966 tools/perf/builtin-trace.c trace__fprintf_sample(trace, evsel, sample, thread); trace 1976 tools/perf/builtin-trace.c if (!(trace->duration_filter || trace->summary_only || trace->min_stack)) trace 1977 tools/perf/builtin-trace.c trace__printf_interrupted_entry(trace); trace 1988 tools/perf/builtin-trace.c if (evsel != trace->syscalls.events.sys_enter) trace 1989 tools/perf/builtin-trace.c augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size); trace 1995 tools/perf/builtin-trace.c args, augmented_args, augmented_args_size, trace, thread); trace 1998 tools/perf/builtin-trace.c if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) { trace 2001 tools/perf/builtin-trace.c trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output); trace 2002 tools/perf/builtin-trace.c printed = fprintf(trace->output, "%s)", ttrace->entry_str); trace 2003 tools/perf/builtin-trace.c if (trace->args_alignment > printed) trace 2004 tools/perf/builtin-trace.c alignment = trace->args_alignment - printed; trace 2005 tools/perf/builtin-trace.c fprintf(trace->output, "%*s= ?\n", alignment, " "); trace 2013 tools/perf/builtin-trace.c if (trace->current != thread) { trace 2014 tools/perf/builtin-trace.c thread__put(trace->current); trace 2015 tools/perf/builtin-trace.c trace->current = thread__get(thread); trace 2023 tools/perf/builtin-trace.c static int trace__fprintf_sys_enter(struct trace *trace, struct evsel *evsel, trace 2029 tools/perf/builtin-trace.c struct syscall *sc = trace__syscall_info(trace, evsel, id); trace 2037 tools/perf/builtin-trace.c thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); trace 2038 tools/perf/builtin-trace.c ttrace = thread__trace(thread, trace->output); trace 2047 tools/perf/builtin-trace.c augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size); trace 2048 tools/perf/builtin-trace.c syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread); trace 2049 tools/perf/builtin-trace.c fprintf(trace->output, "%s", msg); trace 2056 tools/perf/builtin-trace.c static int trace__resolve_callchain(struct trace *trace, struct evsel *evsel, trace 2063 tools/perf/builtin-trace.c trace->max_stack; trace 2066 tools/perf/builtin-trace.c if (machine__resolve(trace->host, &al, sample) < 0) trace 2074 tools/perf/builtin-trace.c static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample) trace 2081 tools/perf/builtin-trace.c return sample__fprintf_callchain(sample, 38, print_opts, &callchain_cursor, symbol_conf.bt_stop_list, trace->output); trace 2092 tools/perf/builtin-trace.c static int trace__sys_exit(struct trace *trace, struct evsel *evsel, trace 2101 tools/perf/builtin-trace.c int alignment = trace->args_alignment; trace 2102 tools/perf/builtin-trace.c struct syscall *sc = trace__syscall_info(trace, evsel, id); trace 2108 tools/perf/builtin-trace.c thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); trace 2109 tools/perf/builtin-trace.c ttrace = thread__trace(thread, trace->output); trace 2113 tools/perf/builtin-trace.c trace__fprintf_sample(trace, evsel, sample, thread); trace 2115 tools/perf/builtin-trace.c if (trace->summary) trace 2120 tools/perf/builtin-trace.c if (!trace->fd_path_disabled && sc->is_open && ret >= 0 && ttrace->filename.pending_open) { trace 2123 tools/perf/builtin-trace.c ++trace->stats.vfs_getname; trace 2128 tools/perf/builtin-trace.c if (trace__filter_duration(trace, duration)) trace 2131 tools/perf/builtin-trace.c } else if (trace->duration_filter) trace 2135 tools/perf/builtin-trace.c callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor); trace 2137 tools/perf/builtin-trace.c if (callchain_cursor.nr < trace->min_stack) trace 2143 tools/perf/builtin-trace.c if (trace->summary_only || (ret >= 0 && trace->failure_only)) trace 2146 tools/perf/builtin-trace.c trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace->output); trace 2149 tools/perf/builtin-trace.c printed = fprintf(trace->output, "%s", ttrace->entry_str); trace 2151 tools/perf/builtin-trace.c printed += fprintf(trace->output, " ... ["); trace 2152 tools/perf/builtin-trace.c color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued"); trace 2154 tools/perf/builtin-trace.c printed += fprintf(trace->output, "]: %s()", sc->name); trace 2164 tools/perf/builtin-trace.c fprintf(trace->output, ")%*s= ", alignment, " "); trace 2170 tools/perf/builtin-trace.c fprintf(trace->output, "%ld", ret); trace 2177 tools/perf/builtin-trace.c fprintf(trace->output, "-1 %s (%s)", e, emsg); trace 2180 tools/perf/builtin-trace.c fprintf(trace->output, "0 (Timeout)"); trace 2186 tools/perf/builtin-trace.c .trace = trace, trace 2190 tools/perf/builtin-trace.c fprintf(trace->output, "%s", bf); trace 2192 tools/perf/builtin-trace.c fprintf(trace->output, "%#lx", ret); trace 2194 tools/perf/builtin-trace.c struct thread *child = machine__find_thread(trace->host, ret, ret); trace 2197 tools/perf/builtin-trace.c fprintf(trace->output, "%ld", ret); trace 2199 tools/perf/builtin-trace.c fprintf(trace->output, " (%s)", thread__comm_str(child)); trace 2205 tools/perf/builtin-trace.c fputc('\n', trace->output); trace 2211 tools/perf/builtin-trace.c if (++trace->nr_events_printed == trace->max_events && trace->max_events != ULONG_MAX) trace 2215 tools/perf/builtin-trace.c trace__fprintf_callchain(trace, sample); trace 2226 tools/perf/builtin-trace.c static int trace__vfs_getname(struct trace *trace, struct evsel *evsel, trace 2230 tools/perf/builtin-trace.c struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); trace 2287 tools/perf/builtin-trace.c static int trace__sched_stat_runtime(struct trace *trace, struct evsel *evsel, trace 2293 tools/perf/builtin-trace.c struct thread *thread = machine__findnew_thread(trace->host, trace 2296 tools/perf/builtin-trace.c struct thread_trace *ttrace = thread__trace(thread, trace->output); trace 2302 tools/perf/builtin-trace.c trace->runtime_ms += runtime_ms; trace 2308 tools/perf/builtin-trace.c fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n", trace 2341 tools/perf/builtin-trace.c static void bpf_output__fprintf(struct trace *trace, trace 2345 tools/perf/builtin-trace.c bpf_output__printer, NULL, trace->output); trace 2346 tools/perf/builtin-trace.c ++trace->nr_events_printed; trace 2349 tools/perf/builtin-trace.c static int trace__event_handler(struct trace *trace, struct evsel *evsel, trace 2364 tools/perf/builtin-trace.c thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); trace 2367 tools/perf/builtin-trace.c callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor); trace 2369 tools/perf/builtin-trace.c if (callchain_cursor.nr < trace->min_stack) trace 2375 tools/perf/builtin-trace.c trace__printf_interrupted_entry(trace); trace 2376 tools/perf/builtin-trace.c trace__fprintf_tstamp(trace, sample->time, trace->output); trace 2378 tools/perf/builtin-trace.c if (trace->trace_syscalls && trace->show_duration) trace 2379 tools/perf/builtin-trace.c fprintf(trace->output, "( ): "); trace 2382 tools/perf/builtin-trace.c trace__fprintf_comm_tid(trace, thread, trace->output); trace 2384 tools/perf/builtin-trace.c if (evsel == trace->syscalls.events.augmented) { trace 2386 tools/perf/builtin-trace.c struct syscall *sc = trace__syscall_info(trace, evsel, id); trace 2389 tools/perf/builtin-trace.c fprintf(trace->output, "%s(", sc->name); trace 2390 tools/perf/builtin-trace.c trace__fprintf_sys_enter(trace, evsel, sample); trace 2391 tools/perf/builtin-trace.c fputc(')', trace->output); trace 2402 tools/perf/builtin-trace.c fprintf(trace->output, "%s:", evsel->name); trace 2405 tools/perf/builtin-trace.c bpf_output__fprintf(trace, sample); trace 2408 tools/perf/builtin-trace.c trace__fprintf_sys_enter(trace, evsel, sample)) { trace 2411 tools/perf/builtin-trace.c trace->output); trace 2412 tools/perf/builtin-trace.c ++trace->nr_events_printed; trace 2422 tools/perf/builtin-trace.c fprintf(trace->output, "\n"); trace 2425 tools/perf/builtin-trace.c trace__fprintf_callchain(trace, sample); trace 2450 tools/perf/builtin-trace.c static int trace__pgfault(struct trace *trace, trace 2462 tools/perf/builtin-trace.c thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); trace 2465 tools/perf/builtin-trace.c callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor); trace 2467 tools/perf/builtin-trace.c if (callchain_cursor.nr < trace->min_stack) trace 2473 tools/perf/builtin-trace.c ttrace = thread__trace(thread, trace->output); trace 2482 tools/perf/builtin-trace.c if (trace->summary_only) trace 2487 tools/perf/builtin-trace.c trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output); trace 2489 tools/perf/builtin-trace.c fprintf(trace->output, "%sfault [", trace 2493 tools/perf/builtin-trace.c print_location(trace->output, sample, &al, false, true); trace 2495 tools/perf/builtin-trace.c fprintf(trace->output, "] => "); trace 2508 tools/perf/builtin-trace.c print_location(trace->output, sample, &al, true, false); trace 2510 tools/perf/builtin-trace.c fprintf(trace->output, " (%c%c)\n", map_type, al.level); trace 2513 tools/perf/builtin-trace.c trace__fprintf_callchain(trace, sample); trace 2517 tools/perf/builtin-trace.c ++trace->nr_events_printed; trace 2525 tools/perf/builtin-trace.c static void trace__set_base_time(struct trace *trace, trace 2537 tools/perf/builtin-trace.c if (trace->base_time == 0 && !trace->full_time && trace 2539 tools/perf/builtin-trace.c trace->base_time = sample->time; trace 2548 tools/perf/builtin-trace.c struct trace *trace = container_of(tool, struct trace, tool); trace 2554 tools/perf/builtin-trace.c thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); trace 2558 tools/perf/builtin-trace.c trace__set_base_time(trace, evsel, sample); trace 2561 tools/perf/builtin-trace.c ++trace->nr_events; trace 2562 tools/perf/builtin-trace.c handler(trace, evsel, event, sample); trace 2569 tools/perf/builtin-trace.c static int trace__record(struct trace *trace, int argc, const char **argv) trace 2599 tools/perf/builtin-trace.c if (trace->trace_syscalls) { trace 2615 tools/perf/builtin-trace.c if (trace->trace_pgfaults & TRACE_PFMAJ) trace 2619 tools/perf/builtin-trace.c if (trace->trace_pgfaults & TRACE_PFMIN) trace 2629 tools/perf/builtin-trace.c static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp); trace 2679 tools/perf/builtin-trace.c static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample) trace 2685 tools/perf/builtin-trace.c trace__process_event(trace, trace->host, event, sample); trace 2689 tools/perf/builtin-trace.c evsel = perf_evlist__id2evsel(trace->evlist, sample->id); trace 2691 tools/perf/builtin-trace.c fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id); trace 2695 tools/perf/builtin-trace.c if (evswitch__discard(&trace->evswitch, evsel)) trace 2698 tools/perf/builtin-trace.c trace__set_base_time(trace, evsel, sample); trace 2702 tools/perf/builtin-trace.c fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n", trace 2707 tools/perf/builtin-trace.c handler(trace, evsel, event, sample); trace 2710 tools/perf/builtin-trace.c if (trace->nr_events_printed >= trace->max_events && trace->max_events != ULONG_MAX) trace 2714 tools/perf/builtin-trace.c static int trace__add_syscall_newtp(struct trace *trace) trace 2717 tools/perf/builtin-trace.c struct evlist *evlist = trace->evlist; trace 2734 tools/perf/builtin-trace.c perf_evsel__config_callchain(sys_enter, &trace->opts, &callchain_param); trace 2735 tools/perf/builtin-trace.c perf_evsel__config_callchain(sys_exit, &trace->opts, &callchain_param); trace 2740 tools/perf/builtin-trace.c if (callchain_param.enabled && !trace->kernel_syscallchains) { trace 2749 tools/perf/builtin-trace.c trace->syscalls.events.sys_enter = sys_enter; trace 2750 tools/perf/builtin-trace.c trace->syscalls.events.sys_exit = sys_exit; trace 2763 tools/perf/builtin-trace.c static int trace__set_ev_qualifier_tp_filter(struct trace *trace) trace 2767 tools/perf/builtin-trace.c char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier, trace 2768 tools/perf/builtin-trace.c trace->ev_qualifier_ids.nr, trace 2769 tools/perf/builtin-trace.c trace->ev_qualifier_ids.entries); trace 2774 tools/perf/builtin-trace.c if (!perf_evsel__append_tp_filter(trace->syscalls.events.sys_enter, trace 2776 tools/perf/builtin-trace.c sys_exit = trace->syscalls.events.sys_exit; trace 2789 tools/perf/builtin-trace.c static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name) trace 2791 tools/perf/builtin-trace.c if (trace->bpf_obj == NULL) trace 2794 tools/perf/builtin-trace.c return bpf_object__find_program_by_title(trace->bpf_obj, name); trace 2797 tools/perf/builtin-trace.c static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, struct syscall *sc, trace 2805 tools/perf/builtin-trace.c prog = trace__find_bpf_program_by_title(trace, default_prog_name); trace 2810 tools/perf/builtin-trace.c prog = trace__find_bpf_program_by_title(trace, default_prog_name); trace 2817 tools/perf/builtin-trace.c prog = trace__find_bpf_program_by_title(trace, prog_name); trace 2827 tools/perf/builtin-trace.c return trace->syscalls.unaugmented_prog; trace 2830 tools/perf/builtin-trace.c static void trace__init_syscall_bpf_progs(struct trace *trace, int id) trace 2832 tools/perf/builtin-trace.c struct syscall *sc = trace__syscall_info(trace, NULL, id); trace 2837 tools/perf/builtin-trace.c sc->bpf_prog.sys_enter = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_enter : NULL, "enter"); trace 2838 tools/perf/builtin-trace.c sc->bpf_prog.sys_exit = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_exit : NULL, "exit"); trace 2841 tools/perf/builtin-trace.c static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int id) trace 2843 tools/perf/builtin-trace.c struct syscall *sc = trace__syscall_info(trace, NULL, id); trace 2844 tools/perf/builtin-trace.c return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->syscalls.unaugmented_prog); trace 2847 tools/perf/builtin-trace.c static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int id) trace 2849 tools/perf/builtin-trace.c struct syscall *sc = trace__syscall_info(trace, NULL, id); trace 2850 tools/perf/builtin-trace.c return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->syscalls.unaugmented_prog); trace 2853 tools/perf/builtin-trace.c static void trace__init_bpf_map_syscall_args(struct trace *trace, int id, struct bpf_map_syscall_entry *entry) trace 2855 tools/perf/builtin-trace.c struct syscall *sc = trace__syscall_info(trace, NULL, id); trace 2872 tools/perf/builtin-trace.c static int trace__set_ev_qualifier_bpf_filter(struct trace *trace) trace 2874 tools/perf/builtin-trace.c int fd = bpf_map__fd(trace->syscalls.map); trace 2876 tools/perf/builtin-trace.c .enabled = !trace->not_ev_qualifier, trace 2881 tools/perf/builtin-trace.c for (i = 0; i < trace->ev_qualifier_ids.nr; ++i) { trace 2882 tools/perf/builtin-trace.c int key = trace->ev_qualifier_ids.entries[i]; trace 2885 tools/perf/builtin-trace.c trace__init_bpf_map_syscall_args(trace, key, &value); trace 2886 tools/perf/builtin-trace.c trace__init_syscall_bpf_progs(trace, key); trace 2897 tools/perf/builtin-trace.c static int __trace__init_syscalls_bpf_map(struct trace *trace, bool enabled) trace 2899 tools/perf/builtin-trace.c int fd = bpf_map__fd(trace->syscalls.map); trace 2905 tools/perf/builtin-trace.c for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) { trace 2907 tools/perf/builtin-trace.c trace__init_bpf_map_syscall_args(trace, key, &value); trace 2917 tools/perf/builtin-trace.c static int trace__init_syscalls_bpf_map(struct trace *trace) trace 2921 tools/perf/builtin-trace.c if (trace->ev_qualifier_ids.nr) trace 2922 tools/perf/builtin-trace.c enabled = trace->not_ev_qualifier; trace 2924 tools/perf/builtin-trace.c return __trace__init_syscalls_bpf_map(trace, enabled); trace 2927 tools/perf/builtin-trace.c static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace, struct syscall *sc) trace 2943 tools/perf/builtin-trace.c for (id = 0; id < trace->sctbl->syscalls.nr_entries; ++id) { trace 2944 tools/perf/builtin-trace.c struct syscall *pair = trace__syscall_info(trace, NULL, id); trace 2949 tools/perf/builtin-trace.c pair->bpf_prog.sys_enter == trace->syscalls.unaugmented_prog) trace 3001 tools/perf/builtin-trace.c pair_prog = trace__find_syscall_bpf_prog(trace, pair, pair->fmt ? pair->fmt->bpf_prog_name.sys_enter : NULL, "enter"); trace 3002 tools/perf/builtin-trace.c if (pair_prog == trace->syscalls.unaugmented_prog) trace 3015 tools/perf/builtin-trace.c static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace) trace 3017 tools/perf/builtin-trace.c int map_enter_fd = bpf_map__fd(trace->syscalls.prog_array.sys_enter), trace 3018 tools/perf/builtin-trace.c map_exit_fd = bpf_map__fd(trace->syscalls.prog_array.sys_exit); trace 3021 tools/perf/builtin-trace.c for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) { trace 3024 tools/perf/builtin-trace.c if (!trace__syscall_enabled(trace, key)) trace 3027 tools/perf/builtin-trace.c trace__init_syscall_bpf_progs(trace, key); trace 3030 tools/perf/builtin-trace.c prog_fd = trace__bpf_prog_sys_enter_fd(trace, key); trace 3034 tools/perf/builtin-trace.c prog_fd = trace__bpf_prog_sys_exit_fd(trace, key); trace 3068 tools/perf/builtin-trace.c for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) { trace 3069 tools/perf/builtin-trace.c struct syscall *sc = trace__syscall_info(trace, NULL, key); trace 3080 tools/perf/builtin-trace.c if (sc->bpf_prog.sys_enter != trace->syscalls.unaugmented_prog) trace 3087 tools/perf/builtin-trace.c pair_prog = trace__find_usable_bpf_prog_entry(trace, sc); trace 3107 tools/perf/builtin-trace.c static int trace__set_ev_qualifier_bpf_filter(struct trace *trace __maybe_unused) trace 3112 tools/perf/builtin-trace.c static int trace__init_syscalls_bpf_map(struct trace *trace __maybe_unused) trace 3117 tools/perf/builtin-trace.c static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace __maybe_unused, trace 3123 tools/perf/builtin-trace.c static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace __maybe_unused) trace 3129 tools/perf/builtin-trace.c static int trace__set_ev_qualifier_filter(struct trace *trace) trace 3131 tools/perf/builtin-trace.c if (trace->syscalls.map) trace 3132 tools/perf/builtin-trace.c return trace__set_ev_qualifier_bpf_filter(trace); trace 3133 tools/perf/builtin-trace.c if (trace->syscalls.events.sys_enter) trace 3134 tools/perf/builtin-trace.c return trace__set_ev_qualifier_tp_filter(trace); trace 3156 tools/perf/builtin-trace.c static int trace__set_filter_loop_pids(struct trace *trace) trace 3162 tools/perf/builtin-trace.c struct thread *thread = machine__find_thread(trace->host, pids[0], pids[0]); trace 3165 tools/perf/builtin-trace.c struct thread *parent = machine__find_thread(trace->host, thread->ppid, thread->ppid); trace 3178 tools/perf/builtin-trace.c err = perf_evlist__set_tp_filter_pids(trace->evlist, nr, pids); trace 3179 tools/perf/builtin-trace.c if (!err && trace->filter_pids.map) trace 3180 tools/perf/builtin-trace.c err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids); trace 3185 tools/perf/builtin-trace.c static int trace__set_filter_pids(struct trace *trace) trace 3194 tools/perf/builtin-trace.c if (trace->filter_pids.nr > 0) { trace 3195 tools/perf/builtin-trace.c err = perf_evlist__set_tp_filter_pids(trace->evlist, trace->filter_pids.nr, trace 3196 tools/perf/builtin-trace.c trace->filter_pids.entries); trace 3197 tools/perf/builtin-trace.c if (!err && trace->filter_pids.map) { trace 3198 tools/perf/builtin-trace.c err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr, trace 3199 tools/perf/builtin-trace.c trace->filter_pids.entries); trace 3201 tools/perf/builtin-trace.c } else if (perf_thread_map__pid(trace->evlist->core.threads, 0) == -1) { trace 3202 tools/perf/builtin-trace.c err = trace__set_filter_loop_pids(trace); trace 3208 tools/perf/builtin-trace.c static int __trace__deliver_event(struct trace *trace, union perf_event *event) trace 3210 tools/perf/builtin-trace.c struct evlist *evlist = trace->evlist; trace 3216 tools/perf/builtin-trace.c fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err); trace 3218 tools/perf/builtin-trace.c trace__handle_event(trace, event, &sample); trace 3223 tools/perf/builtin-trace.c static int __trace__flush_events(struct trace *trace) trace 3225 tools/perf/builtin-trace.c u64 first = ordered_events__first_time(&trace->oe.data); trace 3226 tools/perf/builtin-trace.c u64 flush = trace->oe.last - NSEC_PER_SEC; trace 3230 tools/perf/builtin-trace.c return ordered_events__flush_time(&trace->oe.data, flush); trace 3235 tools/perf/builtin-trace.c static int trace__flush_events(struct trace *trace) trace 3237 tools/perf/builtin-trace.c return !trace->sort_events ? 0 : __trace__flush_events(trace); trace 3240 tools/perf/builtin-trace.c static int trace__deliver_event(struct trace *trace, union perf_event *event) trace 3244 tools/perf/builtin-trace.c if (!trace->sort_events) trace 3245 tools/perf/builtin-trace.c return __trace__deliver_event(trace, event); trace 3247 tools/perf/builtin-trace.c err = perf_evlist__parse_sample_timestamp(trace->evlist, event, &trace->oe.last); trace 3251 tools/perf/builtin-trace.c err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0); trace 3255 tools/perf/builtin-trace.c return trace__flush_events(trace); trace 3261 tools/perf/builtin-trace.c struct trace *trace = container_of(oe, struct trace, oe.data); trace 3263 tools/perf/builtin-trace.c return __trace__deliver_event(trace, event->event); trace 3266 tools/perf/builtin-trace.c static int trace__run(struct trace *trace, int argc, const char **argv) trace 3268 tools/perf/builtin-trace.c struct evlist *evlist = trace->evlist; trace 3275 tools/perf/builtin-trace.c trace->live = true; trace 3277 tools/perf/builtin-trace.c if (!trace->raw_augmented_syscalls) { trace 3278 tools/perf/builtin-trace.c if (trace->trace_syscalls && trace__add_syscall_newtp(trace)) trace 3281 tools/perf/builtin-trace.c if (trace->trace_syscalls) trace 3282 tools/perf/builtin-trace.c trace->vfs_getname = evlist__add_vfs_getname(evlist); trace 3285 tools/perf/builtin-trace.c if ((trace->trace_pgfaults & TRACE_PFMAJ)) { trace 3289 tools/perf/builtin-trace.c perf_evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param); trace 3293 tools/perf/builtin-trace.c if ((trace->trace_pgfaults & TRACE_PFMIN)) { trace 3297 tools/perf/builtin-trace.c perf_evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param); trace 3301 tools/perf/builtin-trace.c if (trace->sched && trace 3331 tools/perf/builtin-trace.c if (trace->cgroup) trace 3332 tools/perf/builtin-trace.c evlist__set_default_cgroup(trace->evlist, trace->cgroup); trace 3334 tools/perf/builtin-trace.c err = perf_evlist__create_maps(evlist, &trace->opts.target); trace 3336 tools/perf/builtin-trace.c fprintf(trace->output, "Problems parsing the target to trace, check your options!\n"); trace 3340 tools/perf/builtin-trace.c err = trace__symbols_init(trace, evlist); trace 3342 tools/perf/builtin-trace.c fprintf(trace->output, "Problems initializing symbol libraries!\n"); trace 3346 tools/perf/builtin-trace.c perf_evlist__config(evlist, &trace->opts, &callchain_param); trace 3352 tools/perf/builtin-trace.c err = perf_evlist__prepare_workload(evlist, &trace->opts.target, trace 3355 tools/perf/builtin-trace.c fprintf(trace->output, "Couldn't run the workload!\n"); trace 3374 tools/perf/builtin-trace.c err = trace__set_filter_pids(trace); trace 3378 tools/perf/builtin-trace.c if (trace->syscalls.map) trace 3379 tools/perf/builtin-trace.c trace__init_syscalls_bpf_map(trace); trace 3381 tools/perf/builtin-trace.c if (trace->syscalls.prog_array.sys_enter) trace 3382 tools/perf/builtin-trace.c trace__init_syscalls_bpf_prog_array_maps(trace); trace 3384 tools/perf/builtin-trace.c if (trace->ev_qualifier_ids.nr > 0) { trace 3385 tools/perf/builtin-trace.c err = trace__set_ev_qualifier_filter(trace); trace 3389 tools/perf/builtin-trace.c if (trace->syscalls.events.sys_exit) { trace 3391 tools/perf/builtin-trace.c trace->syscalls.events.sys_exit->filter); trace 3406 tools/perf/builtin-trace.c trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(trace->sctbl, "close")); trace 3412 tools/perf/builtin-trace.c if (trace->dump.map) trace 3413 tools/perf/builtin-trace.c bpf_map__fprintf(trace->dump.map, trace->output); trace 3415 tools/perf/builtin-trace.c err = evlist__mmap(evlist, trace->opts.mmap_pages); trace 3419 tools/perf/builtin-trace.c if (!target__none(&trace->opts.target) && !trace->opts.initial_delay) trace 3425 tools/perf/builtin-trace.c if (trace->opts.initial_delay) { trace 3426 tools/perf/builtin-trace.c usleep(trace->opts.initial_delay * 1000); trace 3430 tools/perf/builtin-trace.c trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 || trace 3443 tools/perf/builtin-trace.c evsel->core.attr.sample_max_stack = trace->max_stack; trace 3446 tools/perf/builtin-trace.c before = trace->nr_events; trace 3457 tools/perf/builtin-trace.c ++trace->nr_events; trace 3459 tools/perf/builtin-trace.c err = trace__deliver_event(trace, event); trace 3476 tools/perf/builtin-trace.c if (trace->nr_events == before) { trace 3485 tools/perf/builtin-trace.c if (trace__flush_events(trace)) trace 3493 tools/perf/builtin-trace.c thread__zput(trace->current); trace 3497 tools/perf/builtin-trace.c if (trace->sort_events) trace 3498 tools/perf/builtin-trace.c ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL); trace 3501 tools/perf/builtin-trace.c if (trace->summary) trace 3502 tools/perf/builtin-trace.c trace__fprintf_thread_summary(trace, trace->output); trace 3504 tools/perf/builtin-trace.c if (trace->show_tool_stats) { trace 3505 tools/perf/builtin-trace.c fprintf(trace->output, "Stats:\n " trace 3508 tools/perf/builtin-trace.c trace->stats.vfs_getname, trace 3509 tools/perf/builtin-trace.c trace->stats.proc_getname); trace 3514 tools/perf/builtin-trace.c trace__symbols__exit(trace); trace 3517 tools/perf/builtin-trace.c cgroup__put(trace->cgroup); trace 3518 tools/perf/builtin-trace.c trace->evlist = NULL; trace 3519 tools/perf/builtin-trace.c trace->live = false; trace 3540 tools/perf/builtin-trace.c fprintf(trace->output, "%s\n", errbuf); trace 3544 tools/perf/builtin-trace.c fprintf(trace->output, trace 3551 tools/perf/builtin-trace.c fprintf(trace->output, "Not enough memory to run!\n"); trace 3555 tools/perf/builtin-trace.c fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno)); trace 3559 tools/perf/builtin-trace.c static int trace__replay(struct trace *trace) trace 3567 tools/perf/builtin-trace.c .force = trace->force, trace 3573 tools/perf/builtin-trace.c trace->tool.sample = trace__process_sample; trace 3574 tools/perf/builtin-trace.c trace->tool.mmap = perf_event__process_mmap; trace 3575 tools/perf/builtin-trace.c trace->tool.mmap2 = perf_event__process_mmap2; trace 3576 tools/perf/builtin-trace.c trace->tool.comm = perf_event__process_comm; trace 3577 tools/perf/builtin-trace.c trace->tool.exit = perf_event__process_exit; trace 3578 tools/perf/builtin-trace.c trace->tool.fork = perf_event__process_fork; trace 3579 tools/perf/builtin-trace.c trace->tool.attr = perf_event__process_attr; trace 3580 tools/perf/builtin-trace.c trace->tool.tracing_data = perf_event__process_tracing_data; trace 3581 tools/perf/builtin-trace.c trace->tool.build_id = perf_event__process_build_id; trace 3582 tools/perf/builtin-trace.c trace->tool.namespaces = perf_event__process_namespaces; trace 3584 tools/perf/builtin-trace.c trace->tool.ordered_events = true; trace 3585 tools/perf/builtin-trace.c trace->tool.ordering_requires_timestamps = true; trace 3588 tools/perf/builtin-trace.c trace->multiple_threads = true; trace 3590 tools/perf/builtin-trace.c session = perf_session__new(&data, false, &trace->tool); trace 3594 tools/perf/builtin-trace.c if (trace->opts.target.pid) trace 3595 tools/perf/builtin-trace.c symbol_conf.pid_list_str = strdup(trace->opts.target.pid); trace 3597 tools/perf/builtin-trace.c if (trace->opts.target.tid) trace 3598 tools/perf/builtin-trace.c symbol_conf.tid_list_str = strdup(trace->opts.target.tid); trace 3603 tools/perf/builtin-trace.c trace->host = &session->machines.host; trace 3649 tools/perf/builtin-trace.c else if (trace->summary) trace 3650 tools/perf/builtin-trace.c trace__fprintf_thread_summary(trace, trace->output); trace 3682 tools/perf/builtin-trace.c struct trace *trace, FILE *fp) trace 3710 tools/perf/builtin-trace.c sc = &trace->syscalls.table[syscall_stats_entry->syscall]; trace 3724 tools/perf/builtin-trace.c static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace) trace 3733 tools/perf/builtin-trace.c ratio = (double)ttrace->nr_events / trace->nr_events * 100.0; trace 3742 tools/perf/builtin-trace.c if (trace->sched) trace 3747 tools/perf/builtin-trace.c printed += thread__dump_stats(ttrace, trace, fp); trace 3764 tools/perf/builtin-trace.c static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp) trace 3771 tools/perf/builtin-trace.c DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host, i); trace 3779 tools/perf/builtin-trace.c printed += trace__fprintf_thread(fp, threads_entry->thread, trace); trace 3789 tools/perf/builtin-trace.c struct trace *trace = opt->value; trace 3791 tools/perf/builtin-trace.c trace->duration_filter = atof(str); trace 3800 tools/perf/builtin-trace.c struct trace *trace = opt->value; trace 3810 tools/perf/builtin-trace.c i = trace->filter_pids.nr = intlist__nr_entries(list) + 1; trace 3811 tools/perf/builtin-trace.c trace->filter_pids.entries = calloc(i, sizeof(pid_t)); trace 3813 tools/perf/builtin-trace.c if (trace->filter_pids.entries == NULL) trace 3816 tools/perf/builtin-trace.c trace->filter_pids.entries[0] = getpid(); trace 3818 tools/perf/builtin-trace.c for (i = 1; i < trace->filter_pids.nr; ++i) trace 3819 tools/perf/builtin-trace.c trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i; trace 3827 tools/perf/builtin-trace.c static int trace__open_output(struct trace *trace, const char *filename) trace 3839 tools/perf/builtin-trace.c trace->output = fopen(filename, "w"); trace 3841 tools/perf/builtin-trace.c return trace->output == NULL ? -errno : 0; trace 3910 tools/perf/builtin-trace.c struct trace *trace = (struct trace *)opt->value; trace 3923 tools/perf/builtin-trace.c trace->not_ev_qualifier = true; trace 3931 tools/perf/builtin-trace.c if (syscalltbl__id(trace->sctbl, s) >= 0 || trace 3932 tools/perf/builtin-trace.c syscalltbl__strglobmatch_first(trace->sctbl, s, &idx) >= 0) { trace 3968 tools/perf/builtin-trace.c trace->ev_qualifier = strlist__new(lists[1], &slist_config); trace 3969 tools/perf/builtin-trace.c if (trace->ev_qualifier == NULL) { trace 3970 tools/perf/builtin-trace.c fputs("Not enough memory to parse event qualifier", trace->output); trace 3974 tools/perf/builtin-trace.c if (trace__validate_ev_qualifier(trace)) trace 3976 tools/perf/builtin-trace.c trace->trace_syscalls = true; trace 3982 tools/perf/builtin-trace.c struct option o = OPT_CALLBACK('e', "event", &trace->evlist, "event", trace 3996 tools/perf/builtin-trace.c struct trace *trace = opt->value; trace 3998 tools/perf/builtin-trace.c if (!list_empty(&trace->evlist->core.entries)) trace 4001 tools/perf/builtin-trace.c trace->cgroup = evlist__findnew_cgroup(trace->evlist, str); trace 4006 tools/perf/builtin-trace.c static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace, const char *name) trace 4008 tools/perf/builtin-trace.c if (trace->bpf_obj == NULL) trace 4011 tools/perf/builtin-trace.c return bpf_object__find_map_by_name(trace->bpf_obj, name); trace 4014 tools/perf/builtin-trace.c static void trace__set_bpf_map_filtered_pids(struct trace *trace) trace 4016 tools/perf/builtin-trace.c trace->filter_pids.map = trace__find_bpf_map_by_name(trace, "pids_filtered"); trace 4019 tools/perf/builtin-trace.c static void trace__set_bpf_map_syscalls(struct trace *trace) trace 4021 tools/perf/builtin-trace.c trace->syscalls.map = trace__find_bpf_map_by_name(trace, "syscalls"); trace 4022 tools/perf/builtin-trace.c trace->syscalls.prog_array.sys_enter = trace__find_bpf_map_by_name(trace, "syscalls_sys_enter"); trace 4023 tools/perf/builtin-trace.c trace->syscalls.prog_array.sys_exit = trace__find_bpf_map_by_name(trace, "syscalls_sys_exit"); trace 4028 tools/perf/builtin-trace.c struct trace *trace = arg; trace 4032 tools/perf/builtin-trace.c struct option o = OPT_CALLBACK('e', "event", &trace->evlist, "event", trace 4042 tools/perf/builtin-trace.c trace->show_tstamp = perf_config_bool(var, value); trace 4044 tools/perf/builtin-trace.c trace->show_duration = perf_config_bool(var, value); trace 4046 tools/perf/builtin-trace.c trace->show_arg_names = perf_config_bool(var, value); trace 4047 tools/perf/builtin-trace.c if (!trace->show_arg_names) trace 4048 tools/perf/builtin-trace.c trace->show_zeros = true; trace 4051 tools/perf/builtin-trace.c if (!trace->show_arg_names && !new_show_zeros) { trace 4055 tools/perf/builtin-trace.c trace->show_zeros = new_show_zeros; trace 4057 tools/perf/builtin-trace.c trace->show_string_prefix = perf_config_bool(var, value); trace 4059 tools/perf/builtin-trace.c trace->opts.no_inherit = perf_config_bool(var, value); trace 4063 tools/perf/builtin-trace.c trace->args_alignment = args_alignment; trace 4078 tools/perf/builtin-trace.c struct trace trace = { trace 4103 tools/perf/builtin-trace.c OPT_CALLBACK('e', "event", &trace, "event", trace 4106 tools/perf/builtin-trace.c OPT_BOOLEAN(0, "comm", &trace.show_comm, trace 4108 tools/perf/builtin-trace.c OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"), trace 4109 tools/perf/builtin-trace.c OPT_CALLBACK(0, "expr", &trace, "expr", "list of syscalls/events to trace", trace 4113 tools/perf/builtin-trace.c OPT_STRING('p', "pid", &trace.opts.target.pid, "pid", trace 4115 tools/perf/builtin-trace.c OPT_STRING('t', "tid", &trace.opts.target.tid, "tid", trace 4117 tools/perf/builtin-trace.c OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids", trace 4119 tools/perf/builtin-trace.c OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide, trace 4121 tools/perf/builtin-trace.c OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu", trace 4123 tools/perf/builtin-trace.c OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit, trace 4125 tools/perf/builtin-trace.c OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages", trace 4128 tools/perf/builtin-trace.c OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user", trace 4130 tools/perf/builtin-trace.c OPT_CALLBACK(0, "duration", &trace, "float", trace 4136 tools/perf/builtin-trace.c OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"), trace 4138 tools/perf/builtin-trace.c OPT_BOOLEAN('T', "time", &trace.full_time, trace 4140 tools/perf/builtin-trace.c OPT_BOOLEAN(0, "failure", &trace.failure_only, trace 4142 tools/perf/builtin-trace.c OPT_BOOLEAN('s', "summary", &trace.summary_only, trace 4144 tools/perf/builtin-trace.c OPT_BOOLEAN('S', "with-summary", &trace.summary, trace 4146 tools/perf/builtin-trace.c OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min", trace 4148 tools/perf/builtin-trace.c OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"), trace 4149 tools/perf/builtin-trace.c OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"), trace 4150 tools/perf/builtin-trace.c OPT_CALLBACK(0, "call-graph", &trace.opts, trace 4153 tools/perf/builtin-trace.c OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains, trace 4155 tools/perf/builtin-trace.c OPT_ULONG(0, "max-events", &trace.max_events, trace 4157 tools/perf/builtin-trace.c OPT_UINTEGER(0, "min-stack", &trace.min_stack, trace 4160 tools/perf/builtin-trace.c OPT_UINTEGER(0, "max-stack", &trace.max_stack, trace 4164 tools/perf/builtin-trace.c OPT_BOOLEAN(0, "sort-events", &trace.sort_events, trace 4166 tools/perf/builtin-trace.c OPT_BOOLEAN(0, "print-sample", &trace.print_sample, trace 4170 tools/perf/builtin-trace.c OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only", trace 4172 tools/perf/builtin-trace.c OPT_UINTEGER('D', "delay", &trace.opts.initial_delay, trace 4175 tools/perf/builtin-trace.c OPTS_EVSWITCH(&trace.evswitch), trace 4188 tools/perf/builtin-trace.c trace.evlist = evlist__new(); trace 4189 tools/perf/builtin-trace.c trace.sctbl = syscalltbl__new(); trace 4191 tools/perf/builtin-trace.c if (trace.evlist == NULL || trace.sctbl == NULL) { trace 4206 tools/perf/builtin-trace.c err = perf_config(trace__config, &trace); trace 4213 tools/perf/builtin-trace.c if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) { trace 4218 tools/perf/builtin-trace.c evsel = bpf__setup_output_event(trace.evlist, "__augmented_syscalls__"); trace 4220 tools/perf/builtin-trace.c bpf__strerror_setup_output_event(trace.evlist, PTR_ERR(evsel), bf, sizeof(bf)); trace 4226 tools/perf/builtin-trace.c trace.syscalls.events.augmented = evsel; trace 4228 tools/perf/builtin-trace.c evsel = perf_evlist__find_tracepoint_by_name(trace.evlist, "raw_syscalls:sys_enter"); trace 4239 tools/perf/builtin-trace.c trace.bpf_obj = evsel->bpf_obj; trace 4241 tools/perf/builtin-trace.c trace__set_bpf_map_filtered_pids(&trace); trace 4242 tools/perf/builtin-trace.c trace__set_bpf_map_syscalls(&trace); trace 4243 tools/perf/builtin-trace.c trace.syscalls.unaugmented_prog = trace__find_bpf_program_by_title(&trace, "!raw_syscalls:unaugmented"); trace 4246 tools/perf/builtin-trace.c err = bpf__setup_stdout(trace.evlist); trace 4248 tools/perf/builtin-trace.c bpf__strerror_setup_stdout(trace.evlist, err, bf, sizeof(bf)); trace 4256 tools/perf/builtin-trace.c trace.dump.map = trace__find_bpf_map_by_name(&trace, map_dump_str); trace 4257 tools/perf/builtin-trace.c if (trace.dump.map == NULL) { trace 4263 tools/perf/builtin-trace.c if (trace.trace_pgfaults) { trace 4264 tools/perf/builtin-trace.c trace.opts.sample_address = true; trace 4265 tools/perf/builtin-trace.c trace.opts.sample_time = true; trace 4268 tools/perf/builtin-trace.c if (trace.opts.mmap_pages == UINT_MAX) trace 4271 tools/perf/builtin-trace.c if (trace.max_stack == UINT_MAX) { trace 4272 tools/perf/builtin-trace.c trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl__max_stack(); trace 4277 tools/perf/builtin-trace.c if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled) { trace 4278 tools/perf/builtin-trace.c record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false); trace 4284 tools/perf/builtin-trace.c trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4; trace 4289 tools/perf/builtin-trace.c if (trace.evlist->core.nr_entries > 0) { trace 4290 tools/perf/builtin-trace.c evlist__set_evsel_handler(trace.evlist, trace__event_handler); trace 4291 tools/perf/builtin-trace.c if (evlist__set_syscall_tp_fields(trace.evlist)) { trace 4297 tools/perf/builtin-trace.c if (trace.sort_events) { trace 4298 tools/perf/builtin-trace.c ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace); trace 4299 tools/perf/builtin-trace.c ordered_events__set_copy_on_queue(&trace.oe.data, true); trace 4313 tools/perf/builtin-trace.c if (trace.syscalls.events.augmented) { trace 4314 tools/perf/builtin-trace.c evlist__for_each_entry(trace.evlist, evsel) { trace 4318 tools/perf/builtin-trace.c trace.raw_augmented_syscalls = true; trace 4322 tools/perf/builtin-trace.c if (trace.syscalls.events.augmented->priv == NULL && trace 4324 tools/perf/builtin-trace.c struct evsel *augmented = trace.syscalls.events.augmented; trace 4371 tools/perf/builtin-trace.c if (trace.raw_augmented_syscalls) trace 4372 tools/perf/builtin-trace.c trace.raw_augmented_syscalls_args_size = (6 + 1) * sizeof(long) + sc->id.offset; trace 4380 tools/perf/builtin-trace.c return trace__record(&trace, argc-1, &argv[1]); trace 4383 tools/perf/builtin-trace.c if (trace.summary_only) trace 4384 tools/perf/builtin-trace.c trace.summary = trace.summary_only; trace 4386 tools/perf/builtin-trace.c if (!trace.trace_syscalls && !trace.trace_pgfaults && trace 4387 tools/perf/builtin-trace.c trace.evlist->core.nr_entries == 0 /* Was --events used? */) { trace 4388 tools/perf/builtin-trace.c trace.trace_syscalls = true; trace 4392 tools/perf/builtin-trace.c err = trace__open_output(&trace, output_name); trace 4399 tools/perf/builtin-trace.c err = evswitch__init(&trace.evswitch, trace.evlist, stderr); trace 4403 tools/perf/builtin-trace.c err = target__validate(&trace.opts.target); trace 4405 tools/perf/builtin-trace.c target__strerror(&trace.opts.target, err, bf, sizeof(bf)); trace 4406 tools/perf/builtin-trace.c fprintf(trace.output, "%s", bf); trace 4410 tools/perf/builtin-trace.c err = target__parse_uid(&trace.opts.target); trace 4412 tools/perf/builtin-trace.c target__strerror(&trace.opts.target, err, bf, sizeof(bf)); trace 4413 tools/perf/builtin-trace.c fprintf(trace.output, "%s", bf); trace 4417 tools/perf/builtin-trace.c if (!argc && target__none(&trace.opts.target)) trace 4418 tools/perf/builtin-trace.c trace.opts.target.system_wide = true; trace 4421 tools/perf/builtin-trace.c err = trace__replay(&trace); trace 4423 tools/perf/builtin-trace.c err = trace__run(&trace, argc, argv); trace 4427 tools/perf/builtin-trace.c fclose(trace.output); trace 32 tools/perf/trace/beauty/beauty.h struct trace; trace 54 tools/perf/trace/beauty/beauty.h size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size); trace 102 tools/perf/trace/beauty/beauty.h struct trace *trace; trace 26 tools/perf/trace/beauty/kcmp.c return pid__scnprintf_fd(arg->trace, pid, fd, bf, size); trace 6 tools/perf/trace/beauty/pid.c struct trace *trace = arg->trace; trace 8 tools/perf/trace/beauty/pid.c struct thread *thread = machine__findnew_thread(trace->host, pid, pid); trace 1847 tools/perf/util/sort.c MK_SORT_ENTRY_CHK(trace) trace 22 tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c int trace(void *ctx) trace 38 tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c trace_log_counter(ebb_state.trace, ebb_state.stats.ebb_count); trace 53 tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c trace_log_reg(ebb_state.trace, SPRN_SIAR, siar); trace 56 tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c trace_log_reg(ebb_state.trace, SPRN_PMC1, val); trace 59 tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c trace_log_reg(ebb_state.trace, SPRN_MMCR0, val); trace 37 tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c trace_log_counter(ebb_state.trace, ebb_state.stats.ebb_count); trace 40 tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c trace_log_reg(ebb_state.trace, SPRN_MMCR0, val); trace 43 tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c trace_log_string(ebb_state.trace, "frozen"); trace 111 tools/testing/selftests/powerpc/pmu/ebb/ebb.c trace_log_counter(ebb_state.trace, ebb_state.stats.ebb_count); trace 114 tools/testing/selftests/powerpc/pmu/ebb/ebb.c trace_log_reg(ebb_state.trace, SPRN_MMCR0, val); trace 249 tools/testing/selftests/powerpc/pmu/ebb/ebb.c trace_buffer_print(ebb_state.trace); trace 266 tools/testing/selftests/powerpc/pmu/ebb/ebb.c trace_log_reg(ebb_state.trace, SPRN_PMC1 + pmc - 1, val); trace 486 tools/testing/selftests/powerpc/pmu/ebb/ebb.c ebb_state.trace = trace_buffer_allocate(1 * 1024 * 1024); trace 29 tools/testing/selftests/powerpc/pmu/ebb/ebb.h struct trace_buffer *trace; trace 1019 tools/testing/selftests/seccomp/seccomp_bpf.c struct sock_fprog trace; trace 1074 tools/testing/selftests/seccomp/seccomp_bpf.c FILTER_ALLOC(trace); trace 1085 tools/testing/selftests/seccomp/seccomp_bpf.c FILTER_FREE(trace); trace 1104 tools/testing/selftests/seccomp/seccomp_bpf.c ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); trace 1130 tools/testing/selftests/seccomp/seccomp_bpf.c ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); trace 1163 tools/testing/selftests/seccomp/seccomp_bpf.c ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); trace 1186 tools/testing/selftests/seccomp/seccomp_bpf.c ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); trace 1213 tools/testing/selftests/seccomp/seccomp_bpf.c ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); trace 1236 tools/testing/selftests/seccomp/seccomp_bpf.c ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); trace 1258 tools/testing/selftests/seccomp/seccomp_bpf.c ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); trace 1280 tools/testing/selftests/seccomp/seccomp_bpf.c ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); trace 1297 tools/testing/selftests/seccomp/seccomp_bpf.c ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); trace 34 tools/vm/slabinfo.c unsigned int sanity_checks, slab_size, store_user, trace; trace 561 tools/vm/slabinfo.c s->align, s->objs_per_slab, onoff(s->trace), trace 621 tools/vm/slabinfo.c if (s->trace) trace 767 tools/vm/slabinfo.c if (tracing && !s->trace) { trace 773 tools/vm/slabinfo.c if (!tracing && s->trace) trace 1239 tools/vm/slabinfo.c slab->trace = get_obj("trace"); trace 375 virt/kvm/arm/trace.h #define TRACE_INCLUDE_FILE trace trace 35 virt/kvm/arm/vgic/trace.h #define TRACE_INCLUDE_FILE trace