This source file includes following definitions.
- hist_field_none
- hist_field_counter
- hist_field_string
- hist_field_dynstring
- hist_field_pstring
- hist_field_log2
- hist_field_plus
- hist_field_minus
- hist_field_unary_minus
- is_synth_event
- to_synth_event
- synth_event_is_busy
- synth_event_match
- track_data_free
- track_data_alloc
- errpos
- last_cmd_set
- hist_err
- hist_err_clear
- synth_event_define_fields
- synth_field_signed
- synth_field_is_string
- synth_field_string_size
- synth_field_size
- synth_field_fmt
- print_synth_event_num_val
- print_synth_event
- trace_event_raw_event_synth
- free_synth_event_print_fmt
- __set_synth_event_print_fmt
- set_synth_event_print_fmt
- free_synth_field
- parse_synth_field
- free_synth_tracepoint
- alloc_synth_tracepoint
- trace_synth
- find_synth_event
- register_synth_event
- unregister_synth_event
- free_synth_event
- alloc_synth_event
- action_trace
- __create_synth_event
- create_or_delete_synth_event
- synth_event_create
- synth_event_release
- __synth_event_show
- synth_event_show
- synth_events_seq_show
- synth_events_open
- synth_events_write
- hist_field_timestamp
- hist_field_cpu
- check_field_for_var_ref
- find_var_ref
- find_any_var_ref
- check_var_refs
- find_hist_vars
- field_has_hist_vars
- has_hist_vars
- save_hist_vars
- remove_hist_vars
- find_var_field
- find_var
- find_var_file
- find_file_var
- find_match_var
- find_event_var
- hist_field_var_ref
- resolve_var_refs
- hist_field_name
- select_value_fn
- parse_map_size
- destroy_hist_trigger_attrs
- parse_action
- parse_assignment
- parse_hist_trigger_attrs
- save_comm
- hist_elt_data_free
- hist_trigger_elt_data_free
- hist_trigger_elt_data_alloc
- hist_trigger_elt_data_init
- get_hist_field_flags
- expr_field_str
- expr_str
- contains_operator
- get_hist_field
- __destroy_hist_field
- destroy_hist_field
- create_hist_field
- destroy_hist_fields
- init_var_ref
- find_var_ref_idx
- create_var_ref
- is_var_ref
- field_name_from_var
- local_field_var_ref
- parse_var_ref
- parse_field
- create_alias
- parse_atom
- parse_unary
- check_expr_operands
- parse_expr
- find_trigger_filter
- compatible_keys
- find_compatible_hist
- find_synthetic_field_var
- create_field_var_hist
- find_target_event_var
- __update_field_vars
- update_field_vars
- save_track_data_vars
- create_var
- create_field_var
- create_target_field_var
- check_track_val_max
- check_track_val_changed
- get_track_val
- save_track_val
- save_track_data
- check_track_val
- cond_snapshot_update
- save_track_data_snapshot
- snapshot_action
- track_data_snapshot_print
- cond_snapshot_update
- save_track_data_snapshot
- track_data_snapshot_print
- track_data_print
- ontrack_action
- action_data_destroy
- track_data_destroy
- track_data_create
- parse_action_params
- action_parse
- track_data_parse
- onmatch_destroy
- destroy_field_var
- destroy_field_vars
- save_field_var
- check_synth_field
- trace_action_find_var
- trace_action_create_field_var
- trace_action_create
- action_create
- onmatch_create
- onmatch_parse
- create_hitcount_val
- __create_val_field
- create_val_field
- create_var_field
- create_val_fields
- create_key_field
- create_key_fields
- create_var_fields
- free_var_defs
- parse_var_defs
- create_hist_fields
- is_descending
- create_sort_keys
- destroy_actions
- parse_actions
- create_actions
- print_actions
- print_action_spec
- print_track_data_spec
- print_onmatch_spec
- actions_match
- print_actions_spec
- destroy_field_var_hists
- destroy_hist_data
- create_tracing_map_fields
- create_hist_data
- hist_trigger_elt_update
- add_to_key
- hist_trigger_actions
- event_hist_trigger
- hist_trigger_stacktrace_print
- hist_trigger_print_key
- hist_trigger_entry_print
- print_entries
- hist_trigger_show
- hist_show
- event_hist_open
- hist_field_print
- event_hist_trigger_print
- event_hist_trigger_init
- unregister_field_var_hists
- event_hist_trigger_free
- event_hist_trigger_named_init
- event_hist_trigger_named_free
- event_hist_get_trigger_ops
- hist_clear
- compatible_field
- hist_trigger_match
- hist_register_trigger
- hist_trigger_enable
- have_hist_trigger_match
- hist_trigger_check_refs
- hist_unregister_trigger
- hist_file_check_refs
- hist_unreg_all
- event_hist_trigger_func
- register_trigger_hist_cmd
- hist_enable_trigger
- hist_enable_count_trigger
- hist_enable_get_trigger_ops
- hist_enable_unreg_all
- unregister_trigger_hist_enable_disable_cmds
- register_trigger_hist_enable_disable_cmds
- trace_events_hist_init
1
2
3
4
5
6
7
8 #include <linux/module.h>
9 #include <linux/kallsyms.h>
10 #include <linux/security.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/stacktrace.h>
14 #include <linux/rculist.h>
15 #include <linux/tracefs.h>
16
17
18 #include <linux/trace_events.h>
19 #include <trace/events/mmflags.h>
20
21 #include "tracing_map.h"
22 #include "trace.h"
23 #include "trace_dynevent.h"
24
25 #define SYNTH_SYSTEM "synthetic"
26 #define SYNTH_FIELDS_MAX 16
27
28 #define STR_VAR_LEN_MAX 32
29
30 #define ERRORS \
31 C(NONE, "No error"), \
32 C(DUPLICATE_VAR, "Variable already defined"), \
33 C(VAR_NOT_UNIQUE, "Variable name not unique, need to use fully qualified name (subsys.event.var) for variable"), \
34 C(TOO_MANY_VARS, "Too many variables defined"), \
35 C(MALFORMED_ASSIGNMENT, "Malformed assignment"), \
36 C(NAMED_MISMATCH, "Named hist trigger doesn't match existing named trigger (includes variables)"), \
37 C(TRIGGER_EEXIST, "Hist trigger already exists"), \
38 C(TRIGGER_ENOENT_CLEAR, "Can't clear or continue a nonexistent hist trigger"), \
39 C(SET_CLOCK_FAIL, "Couldn't set trace_clock"), \
40 C(BAD_FIELD_MODIFIER, "Invalid field modifier"), \
41 C(TOO_MANY_SUBEXPR, "Too many subexpressions (3 max)"), \
42 C(TIMESTAMP_MISMATCH, "Timestamp units in expression don't match"), \
43 C(TOO_MANY_FIELD_VARS, "Too many field variables defined"), \
44 C(EVENT_FILE_NOT_FOUND, "Event file not found"), \
45 C(HIST_NOT_FOUND, "Matching event histogram not found"), \
46 C(HIST_CREATE_FAIL, "Couldn't create histogram for field"), \
47 C(SYNTH_VAR_NOT_FOUND, "Couldn't find synthetic variable"), \
48 C(SYNTH_EVENT_NOT_FOUND,"Couldn't find synthetic event"), \
49 C(SYNTH_TYPE_MISMATCH, "Param type doesn't match synthetic event field type"), \
50 C(SYNTH_COUNT_MISMATCH, "Param count doesn't match synthetic event field count"), \
51 C(FIELD_VAR_PARSE_FAIL, "Couldn't parse field variable"), \
52 C(VAR_CREATE_FIND_FAIL, "Couldn't create or find variable"), \
53 C(ONX_NOT_VAR, "For onmax(x) or onchange(x), x must be a variable"), \
54 C(ONX_VAR_NOT_FOUND, "Couldn't find onmax or onchange variable"), \
55 C(ONX_VAR_CREATE_FAIL, "Couldn't create onmax or onchange variable"), \
56 C(FIELD_VAR_CREATE_FAIL,"Couldn't create field variable"), \
57 C(TOO_MANY_PARAMS, "Too many action params"), \
58 C(PARAM_NOT_FOUND, "Couldn't find param"), \
59 C(INVALID_PARAM, "Invalid action param"), \
60 C(ACTION_NOT_FOUND, "No action found"), \
61 C(NO_SAVE_PARAMS, "No params found for save()"), \
62 C(TOO_MANY_SAVE_ACTIONS,"Can't have more than one save() action per hist"), \
63 C(ACTION_MISMATCH, "Handler doesn't support action"), \
64 C(NO_CLOSING_PAREN, "No closing paren found"), \
65 C(SUBSYS_NOT_FOUND, "Missing subsystem"), \
66 C(INVALID_SUBSYS_EVENT, "Invalid subsystem or event name"), \
67 C(INVALID_REF_KEY, "Using variable references in keys not supported"), \
68 C(VAR_NOT_FOUND, "Couldn't find variable"), \
69 C(FIELD_NOT_FOUND, "Couldn't find field"),
70
71 #undef C
72 #define C(a, b) HIST_ERR_##a
73
74 enum { ERRORS };
75
76 #undef C
77 #define C(a, b) b
78
79 static const char *err_text[] = { ERRORS };
80
81 struct hist_field;
82
83 typedef u64 (*hist_field_fn_t) (struct hist_field *field,
84 struct tracing_map_elt *elt,
85 struct ring_buffer_event *rbe,
86 void *event);
87
88 #define HIST_FIELD_OPERANDS_MAX 2
89 #define HIST_FIELDS_MAX (TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX)
90 #define HIST_ACTIONS_MAX 8
91
92 enum field_op_id {
93 FIELD_OP_NONE,
94 FIELD_OP_PLUS,
95 FIELD_OP_MINUS,
96 FIELD_OP_UNARY_MINUS,
97 };
98
99
100
101
102
103
104
105
106
107
108
109 struct hist_var {
110 char *name;
111 struct hist_trigger_data *hist_data;
112 unsigned int idx;
113 };
114
115 struct hist_field {
116 struct ftrace_event_field *field;
117 unsigned long flags;
118 hist_field_fn_t fn;
119 unsigned int ref;
120 unsigned int size;
121 unsigned int offset;
122 unsigned int is_signed;
123 const char *type;
124 struct hist_field *operands[HIST_FIELD_OPERANDS_MAX];
125 struct hist_trigger_data *hist_data;
126
127
128
129
130 struct hist_var var;
131 enum field_op_id operator;
132 char *system;
133 char *event_name;
134
135
136
137
138
139 char *name;
140
141
142
143
144
145
146
147
148
149 unsigned int var_ref_idx;
150 bool read_once;
151 };
152
153 static u64 hist_field_none(struct hist_field *field,
154 struct tracing_map_elt *elt,
155 struct ring_buffer_event *rbe,
156 void *event)
157 {
158 return 0;
159 }
160
161 static u64 hist_field_counter(struct hist_field *field,
162 struct tracing_map_elt *elt,
163 struct ring_buffer_event *rbe,
164 void *event)
165 {
166 return 1;
167 }
168
169 static u64 hist_field_string(struct hist_field *hist_field,
170 struct tracing_map_elt *elt,
171 struct ring_buffer_event *rbe,
172 void *event)
173 {
174 char *addr = (char *)(event + hist_field->field->offset);
175
176 return (u64)(unsigned long)addr;
177 }
178
179 static u64 hist_field_dynstring(struct hist_field *hist_field,
180 struct tracing_map_elt *elt,
181 struct ring_buffer_event *rbe,
182 void *event)
183 {
184 u32 str_item = *(u32 *)(event + hist_field->field->offset);
185 int str_loc = str_item & 0xffff;
186 char *addr = (char *)(event + str_loc);
187
188 return (u64)(unsigned long)addr;
189 }
190
191 static u64 hist_field_pstring(struct hist_field *hist_field,
192 struct tracing_map_elt *elt,
193 struct ring_buffer_event *rbe,
194 void *event)
195 {
196 char **addr = (char **)(event + hist_field->field->offset);
197
198 return (u64)(unsigned long)*addr;
199 }
200
201 static u64 hist_field_log2(struct hist_field *hist_field,
202 struct tracing_map_elt *elt,
203 struct ring_buffer_event *rbe,
204 void *event)
205 {
206 struct hist_field *operand = hist_field->operands[0];
207
208 u64 val = operand->fn(operand, elt, rbe, event);
209
210 return (u64) ilog2(roundup_pow_of_two(val));
211 }
212
213 static u64 hist_field_plus(struct hist_field *hist_field,
214 struct tracing_map_elt *elt,
215 struct ring_buffer_event *rbe,
216 void *event)
217 {
218 struct hist_field *operand1 = hist_field->operands[0];
219 struct hist_field *operand2 = hist_field->operands[1];
220
221 u64 val1 = operand1->fn(operand1, elt, rbe, event);
222 u64 val2 = operand2->fn(operand2, elt, rbe, event);
223
224 return val1 + val2;
225 }
226
227 static u64 hist_field_minus(struct hist_field *hist_field,
228 struct tracing_map_elt *elt,
229 struct ring_buffer_event *rbe,
230 void *event)
231 {
232 struct hist_field *operand1 = hist_field->operands[0];
233 struct hist_field *operand2 = hist_field->operands[1];
234
235 u64 val1 = operand1->fn(operand1, elt, rbe, event);
236 u64 val2 = operand2->fn(operand2, elt, rbe, event);
237
238 return val1 - val2;
239 }
240
241 static u64 hist_field_unary_minus(struct hist_field *hist_field,
242 struct tracing_map_elt *elt,
243 struct ring_buffer_event *rbe,
244 void *event)
245 {
246 struct hist_field *operand = hist_field->operands[0];
247
248 s64 sval = (s64)operand->fn(operand, elt, rbe, event);
249 u64 val = (u64)-sval;
250
251 return val;
252 }
253
254 #define DEFINE_HIST_FIELD_FN(type) \
255 static u64 hist_field_##type(struct hist_field *hist_field, \
256 struct tracing_map_elt *elt, \
257 struct ring_buffer_event *rbe, \
258 void *event) \
259 { \
260 type *addr = (type *)(event + hist_field->field->offset); \
261 \
262 return (u64)(unsigned long)*addr; \
263 }
264
265 DEFINE_HIST_FIELD_FN(s64);
266 DEFINE_HIST_FIELD_FN(u64);
267 DEFINE_HIST_FIELD_FN(s32);
268 DEFINE_HIST_FIELD_FN(u32);
269 DEFINE_HIST_FIELD_FN(s16);
270 DEFINE_HIST_FIELD_FN(u16);
271 DEFINE_HIST_FIELD_FN(s8);
272 DEFINE_HIST_FIELD_FN(u8);
273
274 #define for_each_hist_field(i, hist_data) \
275 for ((i) = 0; (i) < (hist_data)->n_fields; (i)++)
276
277 #define for_each_hist_val_field(i, hist_data) \
278 for ((i) = 0; (i) < (hist_data)->n_vals; (i)++)
279
280 #define for_each_hist_key_field(i, hist_data) \
281 for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
282
283 #define HIST_STACKTRACE_DEPTH 16
284 #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
285 #define HIST_STACKTRACE_SKIP 5
286
287 #define HITCOUNT_IDX 0
288 #define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
289
290 enum hist_field_flags {
291 HIST_FIELD_FL_HITCOUNT = 1 << 0,
292 HIST_FIELD_FL_KEY = 1 << 1,
293 HIST_FIELD_FL_STRING = 1 << 2,
294 HIST_FIELD_FL_HEX = 1 << 3,
295 HIST_FIELD_FL_SYM = 1 << 4,
296 HIST_FIELD_FL_SYM_OFFSET = 1 << 5,
297 HIST_FIELD_FL_EXECNAME = 1 << 6,
298 HIST_FIELD_FL_SYSCALL = 1 << 7,
299 HIST_FIELD_FL_STACKTRACE = 1 << 8,
300 HIST_FIELD_FL_LOG2 = 1 << 9,
301 HIST_FIELD_FL_TIMESTAMP = 1 << 10,
302 HIST_FIELD_FL_TIMESTAMP_USECS = 1 << 11,
303 HIST_FIELD_FL_VAR = 1 << 12,
304 HIST_FIELD_FL_EXPR = 1 << 13,
305 HIST_FIELD_FL_VAR_REF = 1 << 14,
306 HIST_FIELD_FL_CPU = 1 << 15,
307 HIST_FIELD_FL_ALIAS = 1 << 16,
308 };
309
310 struct var_defs {
311 unsigned int n_vars;
312 char *name[TRACING_MAP_VARS_MAX];
313 char *expr[TRACING_MAP_VARS_MAX];
314 };
315
316 struct hist_trigger_attrs {
317 char *keys_str;
318 char *vals_str;
319 char *sort_key_str;
320 char *name;
321 char *clock;
322 bool pause;
323 bool cont;
324 bool clear;
325 bool ts_in_usecs;
326 unsigned int map_bits;
327
328 char *assignment_str[TRACING_MAP_VARS_MAX];
329 unsigned int n_assignments;
330
331 char *action_str[HIST_ACTIONS_MAX];
332 unsigned int n_actions;
333
334 struct var_defs var_defs;
335 };
336
337 struct field_var {
338 struct hist_field *var;
339 struct hist_field *val;
340 };
341
342 struct field_var_hist {
343 struct hist_trigger_data *hist_data;
344 char *cmd;
345 };
346
347 struct hist_trigger_data {
348 struct hist_field *fields[HIST_FIELDS_MAX];
349 unsigned int n_vals;
350 unsigned int n_keys;
351 unsigned int n_fields;
352 unsigned int n_vars;
353 unsigned int key_size;
354 struct tracing_map_sort_key sort_keys[TRACING_MAP_SORT_KEYS_MAX];
355 unsigned int n_sort_keys;
356 struct trace_event_file *event_file;
357 struct hist_trigger_attrs *attrs;
358 struct tracing_map *map;
359 bool enable_timestamps;
360 bool remove;
361 struct hist_field *var_refs[TRACING_MAP_VARS_MAX];
362 unsigned int n_var_refs;
363
364 struct action_data *actions[HIST_ACTIONS_MAX];
365 unsigned int n_actions;
366
367 struct field_var *field_vars[SYNTH_FIELDS_MAX];
368 unsigned int n_field_vars;
369 unsigned int n_field_var_str;
370 struct field_var_hist *field_var_hists[SYNTH_FIELDS_MAX];
371 unsigned int n_field_var_hists;
372
373 struct field_var *save_vars[SYNTH_FIELDS_MAX];
374 unsigned int n_save_vars;
375 unsigned int n_save_var_str;
376 };
377
378 static int synth_event_create(int argc, const char **argv);
379 static int synth_event_show(struct seq_file *m, struct dyn_event *ev);
380 static int synth_event_release(struct dyn_event *ev);
381 static bool synth_event_is_busy(struct dyn_event *ev);
382 static bool synth_event_match(const char *system, const char *event,
383 int argc, const char **argv, struct dyn_event *ev);
384
385 static struct dyn_event_operations synth_event_ops = {
386 .create = synth_event_create,
387 .show = synth_event_show,
388 .is_busy = synth_event_is_busy,
389 .free = synth_event_release,
390 .match = synth_event_match,
391 };
392
393 struct synth_field {
394 char *type;
395 char *name;
396 size_t size;
397 bool is_signed;
398 bool is_string;
399 };
400
401 struct synth_event {
402 struct dyn_event devent;
403 int ref;
404 char *name;
405 struct synth_field **fields;
406 unsigned int n_fields;
407 unsigned int n_u64;
408 struct trace_event_class class;
409 struct trace_event_call call;
410 struct tracepoint *tp;
411 };
412
413 static bool is_synth_event(struct dyn_event *ev)
414 {
415 return ev->ops == &synth_event_ops;
416 }
417
418 static struct synth_event *to_synth_event(struct dyn_event *ev)
419 {
420 return container_of(ev, struct synth_event, devent);
421 }
422
423 static bool synth_event_is_busy(struct dyn_event *ev)
424 {
425 struct synth_event *event = to_synth_event(ev);
426
427 return event->ref != 0;
428 }
429
430 static bool synth_event_match(const char *system, const char *event,
431 int argc, const char **argv, struct dyn_event *ev)
432 {
433 struct synth_event *sev = to_synth_event(ev);
434
435 return strcmp(sev->name, event) == 0 &&
436 (!system || strcmp(system, SYNTH_SYSTEM) == 0);
437 }
438
439 struct action_data;
440
441 typedef void (*action_fn_t) (struct hist_trigger_data *hist_data,
442 struct tracing_map_elt *elt, void *rec,
443 struct ring_buffer_event *rbe, void *key,
444 struct action_data *data, u64 *var_ref_vals);
445
446 typedef bool (*check_track_val_fn_t) (u64 track_val, u64 var_val);
447
448 enum handler_id {
449 HANDLER_ONMATCH = 1,
450 HANDLER_ONMAX,
451 HANDLER_ONCHANGE,
452 };
453
454 enum action_id {
455 ACTION_SAVE = 1,
456 ACTION_TRACE,
457 ACTION_SNAPSHOT,
458 };
459
460 struct action_data {
461 enum handler_id handler;
462 enum action_id action;
463 char *action_name;
464 action_fn_t fn;
465
466 unsigned int n_params;
467 char *params[SYNTH_FIELDS_MAX];
468
469
470
471
472
473
474
475
476
477
478 unsigned int var_ref_idx[TRACING_MAP_VARS_MAX];
479 struct synth_event *synth_event;
480 bool use_trace_keyword;
481 char *synth_event_name;
482
483 union {
484 struct {
485 char *event;
486 char *event_system;
487 } match_data;
488
489 struct {
490
491
492
493
494
495
496
497
498 char *var_str;
499
500
501
502
503
504 struct hist_field *var_ref;
505
506
507
508
509
510
511 struct hist_field *track_var;
512
513 check_track_val_fn_t check_val;
514 action_fn_t save_data;
515 } track_data;
516 };
517 };
518
519 struct track_data {
520 u64 track_val;
521 bool updated;
522
523 unsigned int key_len;
524 void *key;
525 struct tracing_map_elt elt;
526
527 struct action_data *action_data;
528 struct hist_trigger_data *hist_data;
529 };
530
531 struct hist_elt_data {
532 char *comm;
533 u64 *var_ref_vals;
534 char *field_var_str[SYNTH_FIELDS_MAX];
535 };
536
537 struct snapshot_context {
538 struct tracing_map_elt *elt;
539 void *key;
540 };
541
542 static void track_data_free(struct track_data *track_data)
543 {
544 struct hist_elt_data *elt_data;
545
546 if (!track_data)
547 return;
548
549 kfree(track_data->key);
550
551 elt_data = track_data->elt.private_data;
552 if (elt_data) {
553 kfree(elt_data->comm);
554 kfree(elt_data);
555 }
556
557 kfree(track_data);
558 }
559
560 static struct track_data *track_data_alloc(unsigned int key_len,
561 struct action_data *action_data,
562 struct hist_trigger_data *hist_data)
563 {
564 struct track_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
565 struct hist_elt_data *elt_data;
566
567 if (!data)
568 return ERR_PTR(-ENOMEM);
569
570 data->key = kzalloc(key_len, GFP_KERNEL);
571 if (!data->key) {
572 track_data_free(data);
573 return ERR_PTR(-ENOMEM);
574 }
575
576 data->key_len = key_len;
577 data->action_data = action_data;
578 data->hist_data = hist_data;
579
580 elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL);
581 if (!elt_data) {
582 track_data_free(data);
583 return ERR_PTR(-ENOMEM);
584 }
585 data->elt.private_data = elt_data;
586
587 elt_data->comm = kzalloc(TASK_COMM_LEN, GFP_KERNEL);
588 if (!elt_data->comm) {
589 track_data_free(data);
590 return ERR_PTR(-ENOMEM);
591 }
592
593 return data;
594 }
595
596 static char last_cmd[MAX_FILTER_STR_VAL];
597 static char last_cmd_loc[MAX_FILTER_STR_VAL];
598
599 static int errpos(char *str)
600 {
601 return err_pos(last_cmd, str);
602 }
603
604 static void last_cmd_set(struct trace_event_file *file, char *str)
605 {
606 const char *system = NULL, *name = NULL;
607 struct trace_event_call *call;
608
609 if (!str)
610 return;
611
612 strncpy(last_cmd, str, MAX_FILTER_STR_VAL - 1);
613
614 if (file) {
615 call = file->event_call;
616
617 system = call->class->system;
618 if (system) {
619 name = trace_event_name(call);
620 if (!name)
621 system = NULL;
622 }
623 }
624
625 if (system)
626 snprintf(last_cmd_loc, MAX_FILTER_STR_VAL, "hist:%s:%s", system, name);
627 }
628
629 static void hist_err(struct trace_array *tr, u8 err_type, u8 err_pos)
630 {
631 tracing_log_err(tr, last_cmd_loc, last_cmd, err_text,
632 err_type, err_pos);
633 }
634
635 static void hist_err_clear(void)
636 {
637 last_cmd[0] = '\0';
638 last_cmd_loc[0] = '\0';
639 }
640
641 struct synth_trace_event {
642 struct trace_entry ent;
643 u64 fields[];
644 };
645
646 static int synth_event_define_fields(struct trace_event_call *call)
647 {
648 struct synth_trace_event trace;
649 int offset = offsetof(typeof(trace), fields);
650 struct synth_event *event = call->data;
651 unsigned int i, size, n_u64;
652 char *name, *type;
653 bool is_signed;
654 int ret = 0;
655
656 for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
657 size = event->fields[i]->size;
658 is_signed = event->fields[i]->is_signed;
659 type = event->fields[i]->type;
660 name = event->fields[i]->name;
661 ret = trace_define_field(call, type, name, offset, size,
662 is_signed, FILTER_OTHER);
663 if (ret)
664 break;
665
666 if (event->fields[i]->is_string) {
667 offset += STR_VAR_LEN_MAX;
668 n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
669 } else {
670 offset += sizeof(u64);
671 n_u64++;
672 }
673 }
674
675 event->n_u64 = n_u64;
676
677 return ret;
678 }
679
680 static bool synth_field_signed(char *type)
681 {
682 if (str_has_prefix(type, "u"))
683 return false;
684 if (strcmp(type, "gfp_t") == 0)
685 return false;
686
687 return true;
688 }
689
690 static int synth_field_is_string(char *type)
691 {
692 if (strstr(type, "char[") != NULL)
693 return true;
694
695 return false;
696 }
697
698 static int synth_field_string_size(char *type)
699 {
700 char buf[4], *end, *start;
701 unsigned int len;
702 int size, err;
703
704 start = strstr(type, "char[");
705 if (start == NULL)
706 return -EINVAL;
707 start += sizeof("char[") - 1;
708
709 end = strchr(type, ']');
710 if (!end || end < start)
711 return -EINVAL;
712
713 len = end - start;
714 if (len > 3)
715 return -EINVAL;
716
717 strncpy(buf, start, len);
718 buf[len] = '\0';
719
720 err = kstrtouint(buf, 0, &size);
721 if (err)
722 return err;
723
724 if (size > STR_VAR_LEN_MAX)
725 return -EINVAL;
726
727 return size;
728 }
729
730 static int synth_field_size(char *type)
731 {
732 int size = 0;
733
734 if (strcmp(type, "s64") == 0)
735 size = sizeof(s64);
736 else if (strcmp(type, "u64") == 0)
737 size = sizeof(u64);
738 else if (strcmp(type, "s32") == 0)
739 size = sizeof(s32);
740 else if (strcmp(type, "u32") == 0)
741 size = sizeof(u32);
742 else if (strcmp(type, "s16") == 0)
743 size = sizeof(s16);
744 else if (strcmp(type, "u16") == 0)
745 size = sizeof(u16);
746 else if (strcmp(type, "s8") == 0)
747 size = sizeof(s8);
748 else if (strcmp(type, "u8") == 0)
749 size = sizeof(u8);
750 else if (strcmp(type, "char") == 0)
751 size = sizeof(char);
752 else if (strcmp(type, "unsigned char") == 0)
753 size = sizeof(unsigned char);
754 else if (strcmp(type, "int") == 0)
755 size = sizeof(int);
756 else if (strcmp(type, "unsigned int") == 0)
757 size = sizeof(unsigned int);
758 else if (strcmp(type, "long") == 0)
759 size = sizeof(long);
760 else if (strcmp(type, "unsigned long") == 0)
761 size = sizeof(unsigned long);
762 else if (strcmp(type, "pid_t") == 0)
763 size = sizeof(pid_t);
764 else if (strcmp(type, "gfp_t") == 0)
765 size = sizeof(gfp_t);
766 else if (synth_field_is_string(type))
767 size = synth_field_string_size(type);
768
769 return size;
770 }
771
772 static const char *synth_field_fmt(char *type)
773 {
774 const char *fmt = "%llu";
775
776 if (strcmp(type, "s64") == 0)
777 fmt = "%lld";
778 else if (strcmp(type, "u64") == 0)
779 fmt = "%llu";
780 else if (strcmp(type, "s32") == 0)
781 fmt = "%d";
782 else if (strcmp(type, "u32") == 0)
783 fmt = "%u";
784 else if (strcmp(type, "s16") == 0)
785 fmt = "%d";
786 else if (strcmp(type, "u16") == 0)
787 fmt = "%u";
788 else if (strcmp(type, "s8") == 0)
789 fmt = "%d";
790 else if (strcmp(type, "u8") == 0)
791 fmt = "%u";
792 else if (strcmp(type, "char") == 0)
793 fmt = "%d";
794 else if (strcmp(type, "unsigned char") == 0)
795 fmt = "%u";
796 else if (strcmp(type, "int") == 0)
797 fmt = "%d";
798 else if (strcmp(type, "unsigned int") == 0)
799 fmt = "%u";
800 else if (strcmp(type, "long") == 0)
801 fmt = "%ld";
802 else if (strcmp(type, "unsigned long") == 0)
803 fmt = "%lu";
804 else if (strcmp(type, "pid_t") == 0)
805 fmt = "%d";
806 else if (strcmp(type, "gfp_t") == 0)
807 fmt = "%x";
808 else if (synth_field_is_string(type))
809 fmt = "%s";
810
811 return fmt;
812 }
813
814 static void print_synth_event_num_val(struct trace_seq *s,
815 char *print_fmt, char *name,
816 int size, u64 val, char *space)
817 {
818 switch (size) {
819 case 1:
820 trace_seq_printf(s, print_fmt, name, (u8)val, space);
821 break;
822
823 case 2:
824 trace_seq_printf(s, print_fmt, name, (u16)val, space);
825 break;
826
827 case 4:
828 trace_seq_printf(s, print_fmt, name, (u32)val, space);
829 break;
830
831 default:
832 trace_seq_printf(s, print_fmt, name, val, space);
833 break;
834 }
835 }
836
837 static enum print_line_t print_synth_event(struct trace_iterator *iter,
838 int flags,
839 struct trace_event *event)
840 {
841 struct trace_array *tr = iter->tr;
842 struct trace_seq *s = &iter->seq;
843 struct synth_trace_event *entry;
844 struct synth_event *se;
845 unsigned int i, n_u64;
846 char print_fmt[32];
847 const char *fmt;
848
849 entry = (struct synth_trace_event *)iter->ent;
850 se = container_of(event, struct synth_event, call.event);
851
852 trace_seq_printf(s, "%s: ", se->name);
853
854 for (i = 0, n_u64 = 0; i < se->n_fields; i++) {
855 if (trace_seq_has_overflowed(s))
856 goto end;
857
858 fmt = synth_field_fmt(se->fields[i]->type);
859
860
861 if (tr->trace_flags & TRACE_ITER_VERBOSE)
862 trace_seq_printf(s, "%s ", fmt);
863
864 snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt);
865
866
867 if (se->fields[i]->is_string) {
868 trace_seq_printf(s, print_fmt, se->fields[i]->name,
869 (char *)&entry->fields[n_u64],
870 i == se->n_fields - 1 ? "" : " ");
871 n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
872 } else {
873 struct trace_print_flags __flags[] = {
874 __def_gfpflag_names, {-1, NULL} };
875 char *space = (i == se->n_fields - 1 ? "" : " ");
876
877 print_synth_event_num_val(s, print_fmt,
878 se->fields[i]->name,
879 se->fields[i]->size,
880 entry->fields[n_u64],
881 space);
882
883 if (strcmp(se->fields[i]->type, "gfp_t") == 0) {
884 trace_seq_puts(s, " (");
885 trace_print_flags_seq(s, "|",
886 entry->fields[n_u64],
887 __flags);
888 trace_seq_putc(s, ')');
889 }
890 n_u64++;
891 }
892 }
893 end:
894 trace_seq_putc(s, '\n');
895
896 return trace_handle_return(s);
897 }
898
899 static struct trace_event_functions synth_event_funcs = {
900 .trace = print_synth_event
901 };
902
903 static notrace void trace_event_raw_event_synth(void *__data,
904 u64 *var_ref_vals,
905 unsigned int *var_ref_idx)
906 {
907 struct trace_event_file *trace_file = __data;
908 struct synth_trace_event *entry;
909 struct trace_event_buffer fbuffer;
910 struct ring_buffer *buffer;
911 struct synth_event *event;
912 unsigned int i, n_u64, val_idx;
913 int fields_size = 0;
914
915 event = trace_file->event_call->data;
916
917 if (trace_trigger_soft_disabled(trace_file))
918 return;
919
920 fields_size = event->n_u64 * sizeof(u64);
921
922
923
924
925
926 buffer = trace_file->tr->trace_buffer.buffer;
927 ring_buffer_nest_start(buffer);
928
929 entry = trace_event_buffer_reserve(&fbuffer, trace_file,
930 sizeof(*entry) + fields_size);
931 if (!entry)
932 goto out;
933
934 for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
935 val_idx = var_ref_idx[i];
936 if (event->fields[i]->is_string) {
937 char *str_val = (char *)(long)var_ref_vals[val_idx];
938 char *str_field = (char *)&entry->fields[n_u64];
939
940 strscpy(str_field, str_val, STR_VAR_LEN_MAX);
941 n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
942 } else {
943 struct synth_field *field = event->fields[i];
944 u64 val = var_ref_vals[val_idx];
945
946 switch (field->size) {
947 case 1:
948 *(u8 *)&entry->fields[n_u64] = (u8)val;
949 break;
950
951 case 2:
952 *(u16 *)&entry->fields[n_u64] = (u16)val;
953 break;
954
955 case 4:
956 *(u32 *)&entry->fields[n_u64] = (u32)val;
957 break;
958
959 default:
960 entry->fields[n_u64] = val;
961 break;
962 }
963 n_u64++;
964 }
965 }
966
967 trace_event_buffer_commit(&fbuffer);
968 out:
969 ring_buffer_nest_end(buffer);
970 }
971
972 static void free_synth_event_print_fmt(struct trace_event_call *call)
973 {
974 if (call) {
975 kfree(call->print_fmt);
976 call->print_fmt = NULL;
977 }
978 }
979
980 static int __set_synth_event_print_fmt(struct synth_event *event,
981 char *buf, int len)
982 {
983 const char *fmt;
984 int pos = 0;
985 int i;
986
987
988 #define LEN_OR_ZERO (len ? len - pos : 0)
989
990 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
991 for (i = 0; i < event->n_fields; i++) {
992 fmt = synth_field_fmt(event->fields[i]->type);
993 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s",
994 event->fields[i]->name, fmt,
995 i == event->n_fields - 1 ? "" : ", ");
996 }
997 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
998
999 for (i = 0; i < event->n_fields; i++) {
1000 pos += snprintf(buf + pos, LEN_OR_ZERO,
1001 ", REC->%s", event->fields[i]->name);
1002 }
1003
1004 #undef LEN_OR_ZERO
1005
1006
1007 return pos;
1008 }
1009
1010 static int set_synth_event_print_fmt(struct trace_event_call *call)
1011 {
1012 struct synth_event *event = call->data;
1013 char *print_fmt;
1014 int len;
1015
1016
1017 len = __set_synth_event_print_fmt(event, NULL, 0);
1018
1019 print_fmt = kmalloc(len + 1, GFP_KERNEL);
1020 if (!print_fmt)
1021 return -ENOMEM;
1022
1023
1024 __set_synth_event_print_fmt(event, print_fmt, len + 1);
1025 call->print_fmt = print_fmt;
1026
1027 return 0;
1028 }
1029
1030 static void free_synth_field(struct synth_field *field)
1031 {
1032 kfree(field->type);
1033 kfree(field->name);
1034 kfree(field);
1035 }
1036
1037 static struct synth_field *parse_synth_field(int argc, const char **argv,
1038 int *consumed)
1039 {
1040 struct synth_field *field;
1041 const char *prefix = NULL, *field_type = argv[0], *field_name, *array;
1042 int len, ret = 0;
1043
1044 if (field_type[0] == ';')
1045 field_type++;
1046
1047 if (!strcmp(field_type, "unsigned")) {
1048 if (argc < 3)
1049 return ERR_PTR(-EINVAL);
1050 prefix = "unsigned ";
1051 field_type = argv[1];
1052 field_name = argv[2];
1053 *consumed = 3;
1054 } else {
1055 field_name = argv[1];
1056 *consumed = 2;
1057 }
1058
1059 field = kzalloc(sizeof(*field), GFP_KERNEL);
1060 if (!field)
1061 return ERR_PTR(-ENOMEM);
1062
1063 len = strlen(field_name);
1064 array = strchr(field_name, '[');
1065 if (array)
1066 len -= strlen(array);
1067 else if (field_name[len - 1] == ';')
1068 len--;
1069
1070 field->name = kmemdup_nul(field_name, len, GFP_KERNEL);
1071 if (!field->name) {
1072 ret = -ENOMEM;
1073 goto free;
1074 }
1075
1076 if (field_type[0] == ';')
1077 field_type++;
1078 len = strlen(field_type) + 1;
1079 if (array)
1080 len += strlen(array);
1081 if (prefix)
1082 len += strlen(prefix);
1083
1084 field->type = kzalloc(len, GFP_KERNEL);
1085 if (!field->type) {
1086 ret = -ENOMEM;
1087 goto free;
1088 }
1089 if (prefix)
1090 strcat(field->type, prefix);
1091 strcat(field->type, field_type);
1092 if (array) {
1093 strcat(field->type, array);
1094 if (field->type[len - 1] == ';')
1095 field->type[len - 1] = '\0';
1096 }
1097
1098 field->size = synth_field_size(field->type);
1099 if (!field->size) {
1100 ret = -EINVAL;
1101 goto free;
1102 }
1103
1104 if (synth_field_is_string(field->type))
1105 field->is_string = true;
1106
1107 field->is_signed = synth_field_signed(field->type);
1108
1109 out:
1110 return field;
1111 free:
1112 free_synth_field(field);
1113 field = ERR_PTR(ret);
1114 goto out;
1115 }
1116
1117 static void free_synth_tracepoint(struct tracepoint *tp)
1118 {
1119 if (!tp)
1120 return;
1121
1122 kfree(tp->name);
1123 kfree(tp);
1124 }
1125
1126 static struct tracepoint *alloc_synth_tracepoint(char *name)
1127 {
1128 struct tracepoint *tp;
1129
1130 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
1131 if (!tp)
1132 return ERR_PTR(-ENOMEM);
1133
1134 tp->name = kstrdup(name, GFP_KERNEL);
1135 if (!tp->name) {
1136 kfree(tp);
1137 return ERR_PTR(-ENOMEM);
1138 }
1139
1140 return tp;
1141 }
1142
1143 typedef void (*synth_probe_func_t) (void *__data, u64 *var_ref_vals,
1144 unsigned int *var_ref_idx);
1145
1146 static inline void trace_synth(struct synth_event *event, u64 *var_ref_vals,
1147 unsigned int *var_ref_idx)
1148 {
1149 struct tracepoint *tp = event->tp;
1150
1151 if (unlikely(atomic_read(&tp->key.enabled) > 0)) {
1152 struct tracepoint_func *probe_func_ptr;
1153 synth_probe_func_t probe_func;
1154 void *__data;
1155
1156 if (!(cpu_online(raw_smp_processor_id())))
1157 return;
1158
1159 probe_func_ptr = rcu_dereference_sched((tp)->funcs);
1160 if (probe_func_ptr) {
1161 do {
1162 probe_func = probe_func_ptr->func;
1163 __data = probe_func_ptr->data;
1164 probe_func(__data, var_ref_vals, var_ref_idx);
1165 } while ((++probe_func_ptr)->func);
1166 }
1167 }
1168 }
1169
1170 static struct synth_event *find_synth_event(const char *name)
1171 {
1172 struct dyn_event *pos;
1173 struct synth_event *event;
1174
1175 for_each_dyn_event(pos) {
1176 if (!is_synth_event(pos))
1177 continue;
1178 event = to_synth_event(pos);
1179 if (strcmp(event->name, name) == 0)
1180 return event;
1181 }
1182
1183 return NULL;
1184 }
1185
1186 static int register_synth_event(struct synth_event *event)
1187 {
1188 struct trace_event_call *call = &event->call;
1189 int ret = 0;
1190
1191 event->call.class = &event->class;
1192 event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL);
1193 if (!event->class.system) {
1194 ret = -ENOMEM;
1195 goto out;
1196 }
1197
1198 event->tp = alloc_synth_tracepoint(event->name);
1199 if (IS_ERR(event->tp)) {
1200 ret = PTR_ERR(event->tp);
1201 event->tp = NULL;
1202 goto out;
1203 }
1204
1205 INIT_LIST_HEAD(&call->class->fields);
1206 call->event.funcs = &synth_event_funcs;
1207 call->class->define_fields = synth_event_define_fields;
1208
1209 ret = register_trace_event(&call->event);
1210 if (!ret) {
1211 ret = -ENODEV;
1212 goto out;
1213 }
1214 call->flags = TRACE_EVENT_FL_TRACEPOINT;
1215 call->class->reg = trace_event_reg;
1216 call->class->probe = trace_event_raw_event_synth;
1217 call->data = event;
1218 call->tp = event->tp;
1219
1220 ret = trace_add_event_call(call);
1221 if (ret) {
1222 pr_warn("Failed to register synthetic event: %s\n",
1223 trace_event_name(call));
1224 goto err;
1225 }
1226
1227 ret = set_synth_event_print_fmt(call);
1228 if (ret < 0) {
1229 trace_remove_event_call(call);
1230 goto err;
1231 }
1232 out:
1233 return ret;
1234 err:
1235 unregister_trace_event(&call->event);
1236 goto out;
1237 }
1238
1239 static int unregister_synth_event(struct synth_event *event)
1240 {
1241 struct trace_event_call *call = &event->call;
1242 int ret;
1243
1244 ret = trace_remove_event_call(call);
1245
1246 return ret;
1247 }
1248
1249 static void free_synth_event(struct synth_event *event)
1250 {
1251 unsigned int i;
1252
1253 if (!event)
1254 return;
1255
1256 for (i = 0; i < event->n_fields; i++)
1257 free_synth_field(event->fields[i]);
1258
1259 kfree(event->fields);
1260 kfree(event->name);
1261 kfree(event->class.system);
1262 free_synth_tracepoint(event->tp);
1263 free_synth_event_print_fmt(&event->call);
1264 kfree(event);
1265 }
1266
1267 static struct synth_event *alloc_synth_event(const char *name, int n_fields,
1268 struct synth_field **fields)
1269 {
1270 struct synth_event *event;
1271 unsigned int i;
1272
1273 event = kzalloc(sizeof(*event), GFP_KERNEL);
1274 if (!event) {
1275 event = ERR_PTR(-ENOMEM);
1276 goto out;
1277 }
1278
1279 event->name = kstrdup(name, GFP_KERNEL);
1280 if (!event->name) {
1281 kfree(event);
1282 event = ERR_PTR(-ENOMEM);
1283 goto out;
1284 }
1285
1286 event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL);
1287 if (!event->fields) {
1288 free_synth_event(event);
1289 event = ERR_PTR(-ENOMEM);
1290 goto out;
1291 }
1292
1293 dyn_event_init(&event->devent, &synth_event_ops);
1294
1295 for (i = 0; i < n_fields; i++)
1296 event->fields[i] = fields[i];
1297
1298 event->n_fields = n_fields;
1299 out:
1300 return event;
1301 }
1302
1303 static void action_trace(struct hist_trigger_data *hist_data,
1304 struct tracing_map_elt *elt, void *rec,
1305 struct ring_buffer_event *rbe, void *key,
1306 struct action_data *data, u64 *var_ref_vals)
1307 {
1308 struct synth_event *event = data->synth_event;
1309
1310 trace_synth(event, var_ref_vals, data->var_ref_idx);
1311 }
1312
1313 struct hist_var_data {
1314 struct list_head list;
1315 struct hist_trigger_data *hist_data;
1316 };
1317
1318 static int __create_synth_event(int argc, const char *name, const char **argv)
1319 {
1320 struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
1321 struct synth_event *event = NULL;
1322 int i, consumed = 0, n_fields = 0, ret = 0;
1323
1324
1325
1326
1327
1328
1329
1330
1331 if (name[0] == '\0' || argc < 1)
1332 return -EINVAL;
1333
1334 mutex_lock(&event_mutex);
1335
1336 event = find_synth_event(name);
1337 if (event) {
1338 ret = -EEXIST;
1339 goto out;
1340 }
1341
1342 for (i = 0; i < argc - 1; i++) {
1343 if (strcmp(argv[i], ";") == 0)
1344 continue;
1345 if (n_fields == SYNTH_FIELDS_MAX) {
1346 ret = -EINVAL;
1347 goto err;
1348 }
1349
1350 field = parse_synth_field(argc - i, &argv[i], &consumed);
1351 if (IS_ERR(field)) {
1352 ret = PTR_ERR(field);
1353 goto err;
1354 }
1355 fields[n_fields++] = field;
1356 i += consumed - 1;
1357 }
1358
1359 if (i < argc && strcmp(argv[i], ";") != 0) {
1360 ret = -EINVAL;
1361 goto err;
1362 }
1363
1364 event = alloc_synth_event(name, n_fields, fields);
1365 if (IS_ERR(event)) {
1366 ret = PTR_ERR(event);
1367 event = NULL;
1368 goto err;
1369 }
1370 ret = register_synth_event(event);
1371 if (!ret)
1372 dyn_event_add(&event->devent);
1373 else
1374 free_synth_event(event);
1375 out:
1376 mutex_unlock(&event_mutex);
1377
1378 return ret;
1379 err:
1380 for (i = 0; i < n_fields; i++)
1381 free_synth_field(fields[i]);
1382
1383 goto out;
1384 }
1385
1386 static int create_or_delete_synth_event(int argc, char **argv)
1387 {
1388 const char *name = argv[0];
1389 struct synth_event *event = NULL;
1390 int ret;
1391
1392
1393 if (name[0] == '!') {
1394 mutex_lock(&event_mutex);
1395 event = find_synth_event(name + 1);
1396 if (event) {
1397 if (event->ref)
1398 ret = -EBUSY;
1399 else {
1400 ret = unregister_synth_event(event);
1401 if (!ret) {
1402 dyn_event_remove(&event->devent);
1403 free_synth_event(event);
1404 }
1405 }
1406 } else
1407 ret = -ENOENT;
1408 mutex_unlock(&event_mutex);
1409 return ret;
1410 }
1411
1412 ret = __create_synth_event(argc - 1, name, (const char **)argv + 1);
1413 return ret == -ECANCELED ? -EINVAL : ret;
1414 }
1415
1416 static int synth_event_create(int argc, const char **argv)
1417 {
1418 const char *name = argv[0];
1419 int len;
1420
1421 if (name[0] != 's' || name[1] != ':')
1422 return -ECANCELED;
1423 name += 2;
1424
1425
1426 if (strchr(name, '/')) {
1427 len = str_has_prefix(name, SYNTH_SYSTEM "/");
1428 if (len == 0)
1429 return -EINVAL;
1430 name += len;
1431 }
1432 return __create_synth_event(argc - 1, name, argv + 1);
1433 }
1434
1435 static int synth_event_release(struct dyn_event *ev)
1436 {
1437 struct synth_event *event = to_synth_event(ev);
1438 int ret;
1439
1440 if (event->ref)
1441 return -EBUSY;
1442
1443 ret = unregister_synth_event(event);
1444 if (ret)
1445 return ret;
1446
1447 dyn_event_remove(ev);
1448 free_synth_event(event);
1449 return 0;
1450 }
1451
1452 static int __synth_event_show(struct seq_file *m, struct synth_event *event)
1453 {
1454 struct synth_field *field;
1455 unsigned int i;
1456
1457 seq_printf(m, "%s\t", event->name);
1458
1459 for (i = 0; i < event->n_fields; i++) {
1460 field = event->fields[i];
1461
1462
1463 seq_printf(m, "%s %s%s", field->type, field->name,
1464 i == event->n_fields - 1 ? "" : "; ");
1465 }
1466
1467 seq_putc(m, '\n');
1468
1469 return 0;
1470 }
1471
1472 static int synth_event_show(struct seq_file *m, struct dyn_event *ev)
1473 {
1474 struct synth_event *event = to_synth_event(ev);
1475
1476 seq_printf(m, "s:%s/", event->class.system);
1477
1478 return __synth_event_show(m, event);
1479 }
1480
1481 static int synth_events_seq_show(struct seq_file *m, void *v)
1482 {
1483 struct dyn_event *ev = v;
1484
1485 if (!is_synth_event(ev))
1486 return 0;
1487
1488 return __synth_event_show(m, to_synth_event(ev));
1489 }
1490
1491 static const struct seq_operations synth_events_seq_op = {
1492 .start = dyn_event_seq_start,
1493 .next = dyn_event_seq_next,
1494 .stop = dyn_event_seq_stop,
1495 .show = synth_events_seq_show,
1496 };
1497
1498 static int synth_events_open(struct inode *inode, struct file *file)
1499 {
1500 int ret;
1501
1502 ret = security_locked_down(LOCKDOWN_TRACEFS);
1503 if (ret)
1504 return ret;
1505
1506 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
1507 ret = dyn_events_release_all(&synth_event_ops);
1508 if (ret < 0)
1509 return ret;
1510 }
1511
1512 return seq_open(file, &synth_events_seq_op);
1513 }
1514
1515 static ssize_t synth_events_write(struct file *file,
1516 const char __user *buffer,
1517 size_t count, loff_t *ppos)
1518 {
1519 return trace_parse_run_command(file, buffer, count, ppos,
1520 create_or_delete_synth_event);
1521 }
1522
1523 static const struct file_operations synth_events_fops = {
1524 .open = synth_events_open,
1525 .write = synth_events_write,
1526 .read = seq_read,
1527 .llseek = seq_lseek,
1528 .release = seq_release,
1529 };
1530
1531 static u64 hist_field_timestamp(struct hist_field *hist_field,
1532 struct tracing_map_elt *elt,
1533 struct ring_buffer_event *rbe,
1534 void *event)
1535 {
1536 struct hist_trigger_data *hist_data = hist_field->hist_data;
1537 struct trace_array *tr = hist_data->event_file->tr;
1538
1539 u64 ts = ring_buffer_event_time_stamp(rbe);
1540
1541 if (hist_data->attrs->ts_in_usecs && trace_clock_in_ns(tr))
1542 ts = ns2usecs(ts);
1543
1544 return ts;
1545 }
1546
1547 static u64 hist_field_cpu(struct hist_field *hist_field,
1548 struct tracing_map_elt *elt,
1549 struct ring_buffer_event *rbe,
1550 void *event)
1551 {
1552 int cpu = smp_processor_id();
1553
1554 return cpu;
1555 }
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568 static struct hist_field *
1569 check_field_for_var_ref(struct hist_field *hist_field,
1570 struct hist_trigger_data *var_data,
1571 unsigned int var_idx)
1572 {
1573 WARN_ON(!(hist_field && hist_field->flags & HIST_FIELD_FL_VAR_REF));
1574
1575 if (hist_field && hist_field->var.idx == var_idx &&
1576 hist_field->var.hist_data == var_data)
1577 return hist_field;
1578
1579 return NULL;
1580 }
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594 static struct hist_field *find_var_ref(struct hist_trigger_data *hist_data,
1595 struct hist_trigger_data *var_data,
1596 unsigned int var_idx)
1597 {
1598 struct hist_field *hist_field;
1599 unsigned int i;
1600
1601 for (i = 0; i < hist_data->n_var_refs; i++) {
1602 hist_field = hist_data->var_refs[i];
1603 if (check_field_for_var_ref(hist_field, var_data, var_idx))
1604 return hist_field;
1605 }
1606
1607 return NULL;
1608 }
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624 static struct hist_field *find_any_var_ref(struct hist_trigger_data *hist_data,
1625 unsigned int var_idx)
1626 {
1627 struct trace_array *tr = hist_data->event_file->tr;
1628 struct hist_field *found = NULL;
1629 struct hist_var_data *var_data;
1630
1631 list_for_each_entry(var_data, &tr->hist_vars, list) {
1632 if (var_data->hist_data == hist_data)
1633 continue;
1634 found = find_var_ref(var_data->hist_data, hist_data, var_idx);
1635 if (found)
1636 break;
1637 }
1638
1639 return found;
1640 }
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655 static bool check_var_refs(struct hist_trigger_data *hist_data)
1656 {
1657 struct hist_field *field;
1658 bool found = false;
1659 int i;
1660
1661 for_each_hist_field(i, hist_data) {
1662 field = hist_data->fields[i];
1663 if (field && field->flags & HIST_FIELD_FL_VAR) {
1664 if (find_any_var_ref(hist_data, field->var.idx)) {
1665 found = true;
1666 break;
1667 }
1668 }
1669 }
1670
1671 return found;
1672 }
1673
1674 static struct hist_var_data *find_hist_vars(struct hist_trigger_data *hist_data)
1675 {
1676 struct trace_array *tr = hist_data->event_file->tr;
1677 struct hist_var_data *var_data, *found = NULL;
1678
1679 list_for_each_entry(var_data, &tr->hist_vars, list) {
1680 if (var_data->hist_data == hist_data) {
1681 found = var_data;
1682 break;
1683 }
1684 }
1685
1686 return found;
1687 }
1688
1689 static bool field_has_hist_vars(struct hist_field *hist_field,
1690 unsigned int level)
1691 {
1692 int i;
1693
1694 if (level > 3)
1695 return false;
1696
1697 if (!hist_field)
1698 return false;
1699
1700 if (hist_field->flags & HIST_FIELD_FL_VAR ||
1701 hist_field->flags & HIST_FIELD_FL_VAR_REF)
1702 return true;
1703
1704 for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++) {
1705 struct hist_field *operand;
1706
1707 operand = hist_field->operands[i];
1708 if (field_has_hist_vars(operand, level + 1))
1709 return true;
1710 }
1711
1712 return false;
1713 }
1714
1715 static bool has_hist_vars(struct hist_trigger_data *hist_data)
1716 {
1717 struct hist_field *hist_field;
1718 int i;
1719
1720 for_each_hist_field(i, hist_data) {
1721 hist_field = hist_data->fields[i];
1722 if (field_has_hist_vars(hist_field, 0))
1723 return true;
1724 }
1725
1726 return false;
1727 }
1728
1729 static int save_hist_vars(struct hist_trigger_data *hist_data)
1730 {
1731 struct trace_array *tr = hist_data->event_file->tr;
1732 struct hist_var_data *var_data;
1733
1734 var_data = find_hist_vars(hist_data);
1735 if (var_data)
1736 return 0;
1737
1738 if (tracing_check_open_get_tr(tr))
1739 return -ENODEV;
1740
1741 var_data = kzalloc(sizeof(*var_data), GFP_KERNEL);
1742 if (!var_data) {
1743 trace_array_put(tr);
1744 return -ENOMEM;
1745 }
1746
1747 var_data->hist_data = hist_data;
1748 list_add(&var_data->list, &tr->hist_vars);
1749
1750 return 0;
1751 }
1752
1753 static void remove_hist_vars(struct hist_trigger_data *hist_data)
1754 {
1755 struct trace_array *tr = hist_data->event_file->tr;
1756 struct hist_var_data *var_data;
1757
1758 var_data = find_hist_vars(hist_data);
1759 if (!var_data)
1760 return;
1761
1762 if (WARN_ON(check_var_refs(hist_data)))
1763 return;
1764
1765 list_del(&var_data->list);
1766
1767 kfree(var_data);
1768
1769 trace_array_put(tr);
1770 }
1771
1772 static struct hist_field *find_var_field(struct hist_trigger_data *hist_data,
1773 const char *var_name)
1774 {
1775 struct hist_field *hist_field, *found = NULL;
1776 int i;
1777
1778 for_each_hist_field(i, hist_data) {
1779 hist_field = hist_data->fields[i];
1780 if (hist_field && hist_field->flags & HIST_FIELD_FL_VAR &&
1781 strcmp(hist_field->var.name, var_name) == 0) {
1782 found = hist_field;
1783 break;
1784 }
1785 }
1786
1787 return found;
1788 }
1789
1790 static struct hist_field *find_var(struct hist_trigger_data *hist_data,
1791 struct trace_event_file *file,
1792 const char *var_name)
1793 {
1794 struct hist_trigger_data *test_data;
1795 struct event_trigger_data *test;
1796 struct hist_field *hist_field;
1797
1798 lockdep_assert_held(&event_mutex);
1799
1800 hist_field = find_var_field(hist_data, var_name);
1801 if (hist_field)
1802 return hist_field;
1803
1804 list_for_each_entry(test, &file->triggers, list) {
1805 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1806 test_data = test->private_data;
1807 hist_field = find_var_field(test_data, var_name);
1808 if (hist_field)
1809 return hist_field;
1810 }
1811 }
1812
1813 return NULL;
1814 }
1815
1816 static struct trace_event_file *find_var_file(struct trace_array *tr,
1817 char *system,
1818 char *event_name,
1819 char *var_name)
1820 {
1821 struct hist_trigger_data *var_hist_data;
1822 struct hist_var_data *var_data;
1823 struct trace_event_file *file, *found = NULL;
1824
1825 if (system)
1826 return find_event_file(tr, system, event_name);
1827
1828 list_for_each_entry(var_data, &tr->hist_vars, list) {
1829 var_hist_data = var_data->hist_data;
1830 file = var_hist_data->event_file;
1831 if (file == found)
1832 continue;
1833
1834 if (find_var_field(var_hist_data, var_name)) {
1835 if (found) {
1836 hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE, errpos(var_name));
1837 return NULL;
1838 }
1839
1840 found = file;
1841 }
1842 }
1843
1844 return found;
1845 }
1846
1847 static struct hist_field *find_file_var(struct trace_event_file *file,
1848 const char *var_name)
1849 {
1850 struct hist_trigger_data *test_data;
1851 struct event_trigger_data *test;
1852 struct hist_field *hist_field;
1853
1854 lockdep_assert_held(&event_mutex);
1855
1856 list_for_each_entry(test, &file->triggers, list) {
1857 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1858 test_data = test->private_data;
1859 hist_field = find_var_field(test_data, var_name);
1860 if (hist_field)
1861 return hist_field;
1862 }
1863 }
1864
1865 return NULL;
1866 }
1867
1868 static struct hist_field *
1869 find_match_var(struct hist_trigger_data *hist_data, char *var_name)
1870 {
1871 struct trace_array *tr = hist_data->event_file->tr;
1872 struct hist_field *hist_field, *found = NULL;
1873 struct trace_event_file *file;
1874 unsigned int i;
1875
1876 for (i = 0; i < hist_data->n_actions; i++) {
1877 struct action_data *data = hist_data->actions[i];
1878
1879 if (data->handler == HANDLER_ONMATCH) {
1880 char *system = data->match_data.event_system;
1881 char *event_name = data->match_data.event;
1882
1883 file = find_var_file(tr, system, event_name, var_name);
1884 if (!file)
1885 continue;
1886 hist_field = find_file_var(file, var_name);
1887 if (hist_field) {
1888 if (found) {
1889 hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE,
1890 errpos(var_name));
1891 return ERR_PTR(-EINVAL);
1892 }
1893
1894 found = hist_field;
1895 }
1896 }
1897 }
1898 return found;
1899 }
1900
1901 static struct hist_field *find_event_var(struct hist_trigger_data *hist_data,
1902 char *system,
1903 char *event_name,
1904 char *var_name)
1905 {
1906 struct trace_array *tr = hist_data->event_file->tr;
1907 struct hist_field *hist_field = NULL;
1908 struct trace_event_file *file;
1909
1910 if (!system || !event_name) {
1911 hist_field = find_match_var(hist_data, var_name);
1912 if (IS_ERR(hist_field))
1913 return NULL;
1914 if (hist_field)
1915 return hist_field;
1916 }
1917
1918 file = find_var_file(tr, system, event_name, var_name);
1919 if (!file)
1920 return NULL;
1921
1922 hist_field = find_file_var(file, var_name);
1923
1924 return hist_field;
1925 }
1926
1927 static u64 hist_field_var_ref(struct hist_field *hist_field,
1928 struct tracing_map_elt *elt,
1929 struct ring_buffer_event *rbe,
1930 void *event)
1931 {
1932 struct hist_elt_data *elt_data;
1933 u64 var_val = 0;
1934
1935 if (WARN_ON_ONCE(!elt))
1936 return var_val;
1937
1938 elt_data = elt->private_data;
1939 var_val = elt_data->var_ref_vals[hist_field->var_ref_idx];
1940
1941 return var_val;
1942 }
1943
1944 static bool resolve_var_refs(struct hist_trigger_data *hist_data, void *key,
1945 u64 *var_ref_vals, bool self)
1946 {
1947 struct hist_trigger_data *var_data;
1948 struct tracing_map_elt *var_elt;
1949 struct hist_field *hist_field;
1950 unsigned int i, var_idx;
1951 bool resolved = true;
1952 u64 var_val = 0;
1953
1954 for (i = 0; i < hist_data->n_var_refs; i++) {
1955 hist_field = hist_data->var_refs[i];
1956 var_idx = hist_field->var.idx;
1957 var_data = hist_field->var.hist_data;
1958
1959 if (var_data == NULL) {
1960 resolved = false;
1961 break;
1962 }
1963
1964 if ((self && var_data != hist_data) ||
1965 (!self && var_data == hist_data))
1966 continue;
1967
1968 var_elt = tracing_map_lookup(var_data->map, key);
1969 if (!var_elt) {
1970 resolved = false;
1971 break;
1972 }
1973
1974 if (!tracing_map_var_set(var_elt, var_idx)) {
1975 resolved = false;
1976 break;
1977 }
1978
1979 if (self || !hist_field->read_once)
1980 var_val = tracing_map_read_var(var_elt, var_idx);
1981 else
1982 var_val = tracing_map_read_var_once(var_elt, var_idx);
1983
1984 var_ref_vals[i] = var_val;
1985 }
1986
1987 return resolved;
1988 }
1989
1990 static const char *hist_field_name(struct hist_field *field,
1991 unsigned int level)
1992 {
1993 const char *field_name = "";
1994
1995 if (level > 1)
1996 return field_name;
1997
1998 if (field->field)
1999 field_name = field->field->name;
2000 else if (field->flags & HIST_FIELD_FL_LOG2 ||
2001 field->flags & HIST_FIELD_FL_ALIAS)
2002 field_name = hist_field_name(field->operands[0], ++level);
2003 else if (field->flags & HIST_FIELD_FL_CPU)
2004 field_name = "cpu";
2005 else if (field->flags & HIST_FIELD_FL_EXPR ||
2006 field->flags & HIST_FIELD_FL_VAR_REF) {
2007 if (field->system) {
2008 static char full_name[MAX_FILTER_STR_VAL];
2009
2010 strcat(full_name, field->system);
2011 strcat(full_name, ".");
2012 strcat(full_name, field->event_name);
2013 strcat(full_name, ".");
2014 strcat(full_name, field->name);
2015 field_name = full_name;
2016 } else
2017 field_name = field->name;
2018 } else if (field->flags & HIST_FIELD_FL_TIMESTAMP)
2019 field_name = "common_timestamp";
2020
2021 if (field_name == NULL)
2022 field_name = "";
2023
2024 return field_name;
2025 }
2026
2027 static hist_field_fn_t select_value_fn(int field_size, int field_is_signed)
2028 {
2029 hist_field_fn_t fn = NULL;
2030
2031 switch (field_size) {
2032 case 8:
2033 if (field_is_signed)
2034 fn = hist_field_s64;
2035 else
2036 fn = hist_field_u64;
2037 break;
2038 case 4:
2039 if (field_is_signed)
2040 fn = hist_field_s32;
2041 else
2042 fn = hist_field_u32;
2043 break;
2044 case 2:
2045 if (field_is_signed)
2046 fn = hist_field_s16;
2047 else
2048 fn = hist_field_u16;
2049 break;
2050 case 1:
2051 if (field_is_signed)
2052 fn = hist_field_s8;
2053 else
2054 fn = hist_field_u8;
2055 break;
2056 }
2057
2058 return fn;
2059 }
2060
2061 static int parse_map_size(char *str)
2062 {
2063 unsigned long size, map_bits;
2064 int ret;
2065
2066 ret = kstrtoul(str, 0, &size);
2067 if (ret)
2068 goto out;
2069
2070 map_bits = ilog2(roundup_pow_of_two(size));
2071 if (map_bits < TRACING_MAP_BITS_MIN ||
2072 map_bits > TRACING_MAP_BITS_MAX)
2073 ret = -EINVAL;
2074 else
2075 ret = map_bits;
2076 out:
2077 return ret;
2078 }
2079
2080 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs *attrs)
2081 {
2082 unsigned int i;
2083
2084 if (!attrs)
2085 return;
2086
2087 for (i = 0; i < attrs->n_assignments; i++)
2088 kfree(attrs->assignment_str[i]);
2089
2090 for (i = 0; i < attrs->n_actions; i++)
2091 kfree(attrs->action_str[i]);
2092
2093 kfree(attrs->name);
2094 kfree(attrs->sort_key_str);
2095 kfree(attrs->keys_str);
2096 kfree(attrs->vals_str);
2097 kfree(attrs->clock);
2098 kfree(attrs);
2099 }
2100
2101 static int parse_action(char *str, struct hist_trigger_attrs *attrs)
2102 {
2103 int ret = -EINVAL;
2104
2105 if (attrs->n_actions >= HIST_ACTIONS_MAX)
2106 return ret;
2107
2108 if ((str_has_prefix(str, "onmatch(")) ||
2109 (str_has_prefix(str, "onmax(")) ||
2110 (str_has_prefix(str, "onchange("))) {
2111 attrs->action_str[attrs->n_actions] = kstrdup(str, GFP_KERNEL);
2112 if (!attrs->action_str[attrs->n_actions]) {
2113 ret = -ENOMEM;
2114 return ret;
2115 }
2116 attrs->n_actions++;
2117 ret = 0;
2118 }
2119 return ret;
2120 }
2121
2122 static int parse_assignment(struct trace_array *tr,
2123 char *str, struct hist_trigger_attrs *attrs)
2124 {
2125 int len, ret = 0;
2126
2127 if ((len = str_has_prefix(str, "key=")) ||
2128 (len = str_has_prefix(str, "keys="))) {
2129 attrs->keys_str = kstrdup(str + len, GFP_KERNEL);
2130 if (!attrs->keys_str) {
2131 ret = -ENOMEM;
2132 goto out;
2133 }
2134 } else if ((len = str_has_prefix(str, "val=")) ||
2135 (len = str_has_prefix(str, "vals=")) ||
2136 (len = str_has_prefix(str, "values="))) {
2137 attrs->vals_str = kstrdup(str + len, GFP_KERNEL);
2138 if (!attrs->vals_str) {
2139 ret = -ENOMEM;
2140 goto out;
2141 }
2142 } else if ((len = str_has_prefix(str, "sort="))) {
2143 attrs->sort_key_str = kstrdup(str + len, GFP_KERNEL);
2144 if (!attrs->sort_key_str) {
2145 ret = -ENOMEM;
2146 goto out;
2147 }
2148 } else if (str_has_prefix(str, "name=")) {
2149 attrs->name = kstrdup(str, GFP_KERNEL);
2150 if (!attrs->name) {
2151 ret = -ENOMEM;
2152 goto out;
2153 }
2154 } else if ((len = str_has_prefix(str, "clock="))) {
2155 str += len;
2156
2157 str = strstrip(str);
2158 attrs->clock = kstrdup(str, GFP_KERNEL);
2159 if (!attrs->clock) {
2160 ret = -ENOMEM;
2161 goto out;
2162 }
2163 } else if ((len = str_has_prefix(str, "size="))) {
2164 int map_bits = parse_map_size(str + len);
2165
2166 if (map_bits < 0) {
2167 ret = map_bits;
2168 goto out;
2169 }
2170 attrs->map_bits = map_bits;
2171 } else {
2172 char *assignment;
2173
2174 if (attrs->n_assignments == TRACING_MAP_VARS_MAX) {
2175 hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(str));
2176 ret = -EINVAL;
2177 goto out;
2178 }
2179
2180 assignment = kstrdup(str, GFP_KERNEL);
2181 if (!assignment) {
2182 ret = -ENOMEM;
2183 goto out;
2184 }
2185
2186 attrs->assignment_str[attrs->n_assignments++] = assignment;
2187 }
2188 out:
2189 return ret;
2190 }
2191
2192 static struct hist_trigger_attrs *
2193 parse_hist_trigger_attrs(struct trace_array *tr, char *trigger_str)
2194 {
2195 struct hist_trigger_attrs *attrs;
2196 int ret = 0;
2197
2198 attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
2199 if (!attrs)
2200 return ERR_PTR(-ENOMEM);
2201
2202 while (trigger_str) {
2203 char *str = strsep(&trigger_str, ":");
2204 char *rhs;
2205
2206 rhs = strchr(str, '=');
2207 if (rhs) {
2208 if (!strlen(++rhs)) {
2209 ret = -EINVAL;
2210 goto free;
2211 }
2212 ret = parse_assignment(tr, str, attrs);
2213 if (ret)
2214 goto free;
2215 } else if (strcmp(str, "pause") == 0)
2216 attrs->pause = true;
2217 else if ((strcmp(str, "cont") == 0) ||
2218 (strcmp(str, "continue") == 0))
2219 attrs->cont = true;
2220 else if (strcmp(str, "clear") == 0)
2221 attrs->clear = true;
2222 else {
2223 ret = parse_action(str, attrs);
2224 if (ret)
2225 goto free;
2226 }
2227 }
2228
2229 if (!attrs->keys_str) {
2230 ret = -EINVAL;
2231 goto free;
2232 }
2233
2234 if (!attrs->clock) {
2235 attrs->clock = kstrdup("global", GFP_KERNEL);
2236 if (!attrs->clock) {
2237 ret = -ENOMEM;
2238 goto free;
2239 }
2240 }
2241
2242 return attrs;
2243 free:
2244 destroy_hist_trigger_attrs(attrs);
2245
2246 return ERR_PTR(ret);
2247 }
2248
2249 static inline void save_comm(char *comm, struct task_struct *task)
2250 {
2251 if (!task->pid) {
2252 strcpy(comm, "<idle>");
2253 return;
2254 }
2255
2256 if (WARN_ON_ONCE(task->pid < 0)) {
2257 strcpy(comm, "<XXX>");
2258 return;
2259 }
2260
2261 strncpy(comm, task->comm, TASK_COMM_LEN);
2262 }
2263
2264 static void hist_elt_data_free(struct hist_elt_data *elt_data)
2265 {
2266 unsigned int i;
2267
2268 for (i = 0; i < SYNTH_FIELDS_MAX; i++)
2269 kfree(elt_data->field_var_str[i]);
2270
2271 kfree(elt_data->comm);
2272 kfree(elt_data);
2273 }
2274
2275 static void hist_trigger_elt_data_free(struct tracing_map_elt *elt)
2276 {
2277 struct hist_elt_data *elt_data = elt->private_data;
2278
2279 hist_elt_data_free(elt_data);
2280 }
2281
2282 static int hist_trigger_elt_data_alloc(struct tracing_map_elt *elt)
2283 {
2284 struct hist_trigger_data *hist_data = elt->map->private_data;
2285 unsigned int size = TASK_COMM_LEN;
2286 struct hist_elt_data *elt_data;
2287 struct hist_field *key_field;
2288 unsigned int i, n_str;
2289
2290 elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL);
2291 if (!elt_data)
2292 return -ENOMEM;
2293
2294 for_each_hist_key_field(i, hist_data) {
2295 key_field = hist_data->fields[i];
2296
2297 if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
2298 elt_data->comm = kzalloc(size, GFP_KERNEL);
2299 if (!elt_data->comm) {
2300 kfree(elt_data);
2301 return -ENOMEM;
2302 }
2303 break;
2304 }
2305 }
2306
2307 n_str = hist_data->n_field_var_str + hist_data->n_save_var_str;
2308
2309 size = STR_VAR_LEN_MAX;
2310
2311 for (i = 0; i < n_str; i++) {
2312 elt_data->field_var_str[i] = kzalloc(size, GFP_KERNEL);
2313 if (!elt_data->field_var_str[i]) {
2314 hist_elt_data_free(elt_data);
2315 return -ENOMEM;
2316 }
2317 }
2318
2319 elt->private_data = elt_data;
2320
2321 return 0;
2322 }
2323
2324 static void hist_trigger_elt_data_init(struct tracing_map_elt *elt)
2325 {
2326 struct hist_elt_data *elt_data = elt->private_data;
2327
2328 if (elt_data->comm)
2329 save_comm(elt_data->comm, current);
2330 }
2331
2332 static const struct tracing_map_ops hist_trigger_elt_data_ops = {
2333 .elt_alloc = hist_trigger_elt_data_alloc,
2334 .elt_free = hist_trigger_elt_data_free,
2335 .elt_init = hist_trigger_elt_data_init,
2336 };
2337
2338 static const char *get_hist_field_flags(struct hist_field *hist_field)
2339 {
2340 const char *flags_str = NULL;
2341
2342 if (hist_field->flags & HIST_FIELD_FL_HEX)
2343 flags_str = "hex";
2344 else if (hist_field->flags & HIST_FIELD_FL_SYM)
2345 flags_str = "sym";
2346 else if (hist_field->flags & HIST_FIELD_FL_SYM_OFFSET)
2347 flags_str = "sym-offset";
2348 else if (hist_field->flags & HIST_FIELD_FL_EXECNAME)
2349 flags_str = "execname";
2350 else if (hist_field->flags & HIST_FIELD_FL_SYSCALL)
2351 flags_str = "syscall";
2352 else if (hist_field->flags & HIST_FIELD_FL_LOG2)
2353 flags_str = "log2";
2354 else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP_USECS)
2355 flags_str = "usecs";
2356
2357 return flags_str;
2358 }
2359
2360 static void expr_field_str(struct hist_field *field, char *expr)
2361 {
2362 if (field->flags & HIST_FIELD_FL_VAR_REF)
2363 strcat(expr, "$");
2364
2365 strcat(expr, hist_field_name(field, 0));
2366
2367 if (field->flags && !(field->flags & HIST_FIELD_FL_VAR_REF)) {
2368 const char *flags_str = get_hist_field_flags(field);
2369
2370 if (flags_str) {
2371 strcat(expr, ".");
2372 strcat(expr, flags_str);
2373 }
2374 }
2375 }
2376
2377 static char *expr_str(struct hist_field *field, unsigned int level)
2378 {
2379 char *expr;
2380
2381 if (level > 1)
2382 return NULL;
2383
2384 expr = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
2385 if (!expr)
2386 return NULL;
2387
2388 if (!field->operands[0]) {
2389 expr_field_str(field, expr);
2390 return expr;
2391 }
2392
2393 if (field->operator == FIELD_OP_UNARY_MINUS) {
2394 char *subexpr;
2395
2396 strcat(expr, "-(");
2397 subexpr = expr_str(field->operands[0], ++level);
2398 if (!subexpr) {
2399 kfree(expr);
2400 return NULL;
2401 }
2402 strcat(expr, subexpr);
2403 strcat(expr, ")");
2404
2405 kfree(subexpr);
2406
2407 return expr;
2408 }
2409
2410 expr_field_str(field->operands[0], expr);
2411
2412 switch (field->operator) {
2413 case FIELD_OP_MINUS:
2414 strcat(expr, "-");
2415 break;
2416 case FIELD_OP_PLUS:
2417 strcat(expr, "+");
2418 break;
2419 default:
2420 kfree(expr);
2421 return NULL;
2422 }
2423
2424 expr_field_str(field->operands[1], expr);
2425
2426 return expr;
2427 }
2428
2429 static int contains_operator(char *str)
2430 {
2431 enum field_op_id field_op = FIELD_OP_NONE;
2432 char *op;
2433
2434 op = strpbrk(str, "+-");
2435 if (!op)
2436 return FIELD_OP_NONE;
2437
2438 switch (*op) {
2439 case '-':
2440 if (*str == '-')
2441 field_op = FIELD_OP_UNARY_MINUS;
2442 else
2443 field_op = FIELD_OP_MINUS;
2444 break;
2445 case '+':
2446 field_op = FIELD_OP_PLUS;
2447 break;
2448 default:
2449 break;
2450 }
2451
2452 return field_op;
2453 }
2454
2455 static void get_hist_field(struct hist_field *hist_field)
2456 {
2457 hist_field->ref++;
2458 }
2459
2460 static void __destroy_hist_field(struct hist_field *hist_field)
2461 {
2462 if (--hist_field->ref > 1)
2463 return;
2464
2465 kfree(hist_field->var.name);
2466 kfree(hist_field->name);
2467 kfree(hist_field->type);
2468
2469 kfree(hist_field->system);
2470 kfree(hist_field->event_name);
2471
2472 kfree(hist_field);
2473 }
2474
2475 static void destroy_hist_field(struct hist_field *hist_field,
2476 unsigned int level)
2477 {
2478 unsigned int i;
2479
2480 if (level > 3)
2481 return;
2482
2483 if (!hist_field)
2484 return;
2485
2486 if (hist_field->flags & HIST_FIELD_FL_VAR_REF)
2487 return;
2488
2489 for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++)
2490 destroy_hist_field(hist_field->operands[i], level + 1);
2491
2492 __destroy_hist_field(hist_field);
2493 }
2494
2495 static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
2496 struct ftrace_event_field *field,
2497 unsigned long flags,
2498 char *var_name)
2499 {
2500 struct hist_field *hist_field;
2501
2502 if (field && is_function_field(field))
2503 return NULL;
2504
2505 hist_field = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
2506 if (!hist_field)
2507 return NULL;
2508
2509 hist_field->ref = 1;
2510
2511 hist_field->hist_data = hist_data;
2512
2513 if (flags & HIST_FIELD_FL_EXPR || flags & HIST_FIELD_FL_ALIAS)
2514 goto out;
2515
2516 if (flags & HIST_FIELD_FL_VAR_REF) {
2517 hist_field->fn = hist_field_var_ref;
2518 goto out;
2519 }
2520
2521 if (flags & HIST_FIELD_FL_HITCOUNT) {
2522 hist_field->fn = hist_field_counter;
2523 hist_field->size = sizeof(u64);
2524 hist_field->type = kstrdup("u64", GFP_KERNEL);
2525 if (!hist_field->type)
2526 goto free;
2527 goto out;
2528 }
2529
2530 if (flags & HIST_FIELD_FL_STACKTRACE) {
2531 hist_field->fn = hist_field_none;
2532 goto out;
2533 }
2534
2535 if (flags & HIST_FIELD_FL_LOG2) {
2536 unsigned long fl = flags & ~HIST_FIELD_FL_LOG2;
2537 hist_field->fn = hist_field_log2;
2538 hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL);
2539 hist_field->size = hist_field->operands[0]->size;
2540 hist_field->type = kstrdup(hist_field->operands[0]->type, GFP_KERNEL);
2541 if (!hist_field->type)
2542 goto free;
2543 goto out;
2544 }
2545
2546 if (flags & HIST_FIELD_FL_TIMESTAMP) {
2547 hist_field->fn = hist_field_timestamp;
2548 hist_field->size = sizeof(u64);
2549 hist_field->type = kstrdup("u64", GFP_KERNEL);
2550 if (!hist_field->type)
2551 goto free;
2552 goto out;
2553 }
2554
2555 if (flags & HIST_FIELD_FL_CPU) {
2556 hist_field->fn = hist_field_cpu;
2557 hist_field->size = sizeof(int);
2558 hist_field->type = kstrdup("unsigned int", GFP_KERNEL);
2559 if (!hist_field->type)
2560 goto free;
2561 goto out;
2562 }
2563
2564 if (WARN_ON_ONCE(!field))
2565 goto out;
2566
2567 if (is_string_field(field)) {
2568 flags |= HIST_FIELD_FL_STRING;
2569
2570 hist_field->size = MAX_FILTER_STR_VAL;
2571 hist_field->type = kstrdup(field->type, GFP_KERNEL);
2572 if (!hist_field->type)
2573 goto free;
2574
2575 if (field->filter_type == FILTER_STATIC_STRING)
2576 hist_field->fn = hist_field_string;
2577 else if (field->filter_type == FILTER_DYN_STRING)
2578 hist_field->fn = hist_field_dynstring;
2579 else
2580 hist_field->fn = hist_field_pstring;
2581 } else {
2582 hist_field->size = field->size;
2583 hist_field->is_signed = field->is_signed;
2584 hist_field->type = kstrdup(field->type, GFP_KERNEL);
2585 if (!hist_field->type)
2586 goto free;
2587
2588 hist_field->fn = select_value_fn(field->size,
2589 field->is_signed);
2590 if (!hist_field->fn) {
2591 destroy_hist_field(hist_field, 0);
2592 return NULL;
2593 }
2594 }
2595 out:
2596 hist_field->field = field;
2597 hist_field->flags = flags;
2598
2599 if (var_name) {
2600 hist_field->var.name = kstrdup(var_name, GFP_KERNEL);
2601 if (!hist_field->var.name)
2602 goto free;
2603 }
2604
2605 return hist_field;
2606 free:
2607 destroy_hist_field(hist_field, 0);
2608 return NULL;
2609 }
2610
2611 static void destroy_hist_fields(struct hist_trigger_data *hist_data)
2612 {
2613 unsigned int i;
2614
2615 for (i = 0; i < HIST_FIELDS_MAX; i++) {
2616 if (hist_data->fields[i]) {
2617 destroy_hist_field(hist_data->fields[i], 0);
2618 hist_data->fields[i] = NULL;
2619 }
2620 }
2621
2622 for (i = 0; i < hist_data->n_var_refs; i++) {
2623 WARN_ON(!(hist_data->var_refs[i]->flags & HIST_FIELD_FL_VAR_REF));
2624 __destroy_hist_field(hist_data->var_refs[i]);
2625 hist_data->var_refs[i] = NULL;
2626 }
2627 }
2628
2629 static int init_var_ref(struct hist_field *ref_field,
2630 struct hist_field *var_field,
2631 char *system, char *event_name)
2632 {
2633 int err = 0;
2634
2635 ref_field->var.idx = var_field->var.idx;
2636 ref_field->var.hist_data = var_field->hist_data;
2637 ref_field->size = var_field->size;
2638 ref_field->is_signed = var_field->is_signed;
2639 ref_field->flags |= var_field->flags &
2640 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
2641
2642 if (system) {
2643 ref_field->system = kstrdup(system, GFP_KERNEL);
2644 if (!ref_field->system)
2645 return -ENOMEM;
2646 }
2647
2648 if (event_name) {
2649 ref_field->event_name = kstrdup(event_name, GFP_KERNEL);
2650 if (!ref_field->event_name) {
2651 err = -ENOMEM;
2652 goto free;
2653 }
2654 }
2655
2656 if (var_field->var.name) {
2657 ref_field->name = kstrdup(var_field->var.name, GFP_KERNEL);
2658 if (!ref_field->name) {
2659 err = -ENOMEM;
2660 goto free;
2661 }
2662 } else if (var_field->name) {
2663 ref_field->name = kstrdup(var_field->name, GFP_KERNEL);
2664 if (!ref_field->name) {
2665 err = -ENOMEM;
2666 goto free;
2667 }
2668 }
2669
2670 ref_field->type = kstrdup(var_field->type, GFP_KERNEL);
2671 if (!ref_field->type) {
2672 err = -ENOMEM;
2673 goto free;
2674 }
2675 out:
2676 return err;
2677 free:
2678 kfree(ref_field->system);
2679 kfree(ref_field->event_name);
2680 kfree(ref_field->name);
2681
2682 goto out;
2683 }
2684
2685 static int find_var_ref_idx(struct hist_trigger_data *hist_data,
2686 struct hist_field *var_field)
2687 {
2688 struct hist_field *ref_field;
2689 int i;
2690
2691 for (i = 0; i < hist_data->n_var_refs; i++) {
2692 ref_field = hist_data->var_refs[i];
2693 if (ref_field->var.idx == var_field->var.idx &&
2694 ref_field->var.hist_data == var_field->hist_data)
2695 return i;
2696 }
2697
2698 return -ENOENT;
2699 }
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716 static struct hist_field *create_var_ref(struct hist_trigger_data *hist_data,
2717 struct hist_field *var_field,
2718 char *system, char *event_name)
2719 {
2720 unsigned long flags = HIST_FIELD_FL_VAR_REF;
2721 struct hist_field *ref_field;
2722 int i;
2723
2724
2725 for (i = 0; i < hist_data->n_var_refs; i++) {
2726 ref_field = hist_data->var_refs[i];
2727 if (ref_field->var.idx == var_field->var.idx &&
2728 ref_field->var.hist_data == var_field->hist_data) {
2729 get_hist_field(ref_field);
2730 return ref_field;
2731 }
2732 }
2733
2734 ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL);
2735 if (ref_field) {
2736 if (init_var_ref(ref_field, var_field, system, event_name)) {
2737 destroy_hist_field(ref_field, 0);
2738 return NULL;
2739 }
2740
2741 hist_data->var_refs[hist_data->n_var_refs] = ref_field;
2742 ref_field->var_ref_idx = hist_data->n_var_refs++;
2743 }
2744
2745 return ref_field;
2746 }
2747
2748 static bool is_var_ref(char *var_name)
2749 {
2750 if (!var_name || strlen(var_name) < 2 || var_name[0] != '$')
2751 return false;
2752
2753 return true;
2754 }
2755
2756 static char *field_name_from_var(struct hist_trigger_data *hist_data,
2757 char *var_name)
2758 {
2759 char *name, *field;
2760 unsigned int i;
2761
2762 for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) {
2763 name = hist_data->attrs->var_defs.name[i];
2764
2765 if (strcmp(var_name, name) == 0) {
2766 field = hist_data->attrs->var_defs.expr[i];
2767 if (contains_operator(field) || is_var_ref(field))
2768 continue;
2769 return field;
2770 }
2771 }
2772
2773 return NULL;
2774 }
2775
2776 static char *local_field_var_ref(struct hist_trigger_data *hist_data,
2777 char *system, char *event_name,
2778 char *var_name)
2779 {
2780 struct trace_event_call *call;
2781
2782 if (system && event_name) {
2783 call = hist_data->event_file->event_call;
2784
2785 if (strcmp(system, call->class->system) != 0)
2786 return NULL;
2787
2788 if (strcmp(event_name, trace_event_name(call)) != 0)
2789 return NULL;
2790 }
2791
2792 if (!!system != !!event_name)
2793 return NULL;
2794
2795 if (!is_var_ref(var_name))
2796 return NULL;
2797
2798 var_name++;
2799
2800 return field_name_from_var(hist_data, var_name);
2801 }
2802
2803 static struct hist_field *parse_var_ref(struct hist_trigger_data *hist_data,
2804 char *system, char *event_name,
2805 char *var_name)
2806 {
2807 struct hist_field *var_field = NULL, *ref_field = NULL;
2808 struct trace_array *tr = hist_data->event_file->tr;
2809
2810 if (!is_var_ref(var_name))
2811 return NULL;
2812
2813 var_name++;
2814
2815 var_field = find_event_var(hist_data, system, event_name, var_name);
2816 if (var_field)
2817 ref_field = create_var_ref(hist_data, var_field,
2818 system, event_name);
2819
2820 if (!ref_field)
2821 hist_err(tr, HIST_ERR_VAR_NOT_FOUND, errpos(var_name));
2822
2823 return ref_field;
2824 }
2825
2826 static struct ftrace_event_field *
2827 parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
2828 char *field_str, unsigned long *flags)
2829 {
2830 struct ftrace_event_field *field = NULL;
2831 char *field_name, *modifier, *str;
2832 struct trace_array *tr = file->tr;
2833
2834 modifier = str = kstrdup(field_str, GFP_KERNEL);
2835 if (!modifier)
2836 return ERR_PTR(-ENOMEM);
2837
2838 field_name = strsep(&modifier, ".");
2839 if (modifier) {
2840 if (strcmp(modifier, "hex") == 0)
2841 *flags |= HIST_FIELD_FL_HEX;
2842 else if (strcmp(modifier, "sym") == 0)
2843 *flags |= HIST_FIELD_FL_SYM;
2844 else if (strcmp(modifier, "sym-offset") == 0)
2845 *flags |= HIST_FIELD_FL_SYM_OFFSET;
2846 else if ((strcmp(modifier, "execname") == 0) &&
2847 (strcmp(field_name, "common_pid") == 0))
2848 *flags |= HIST_FIELD_FL_EXECNAME;
2849 else if (strcmp(modifier, "syscall") == 0)
2850 *flags |= HIST_FIELD_FL_SYSCALL;
2851 else if (strcmp(modifier, "log2") == 0)
2852 *flags |= HIST_FIELD_FL_LOG2;
2853 else if (strcmp(modifier, "usecs") == 0)
2854 *flags |= HIST_FIELD_FL_TIMESTAMP_USECS;
2855 else {
2856 hist_err(tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(modifier));
2857 field = ERR_PTR(-EINVAL);
2858 goto out;
2859 }
2860 }
2861
2862 if (strcmp(field_name, "common_timestamp") == 0) {
2863 *flags |= HIST_FIELD_FL_TIMESTAMP;
2864 hist_data->enable_timestamps = true;
2865 if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS)
2866 hist_data->attrs->ts_in_usecs = true;
2867 } else if (strcmp(field_name, "cpu") == 0)
2868 *flags |= HIST_FIELD_FL_CPU;
2869 else {
2870 field = trace_find_event_field(file->event_call, field_name);
2871 if (!field || !field->size) {
2872 hist_err(tr, HIST_ERR_FIELD_NOT_FOUND, errpos(field_name));
2873 field = ERR_PTR(-EINVAL);
2874 goto out;
2875 }
2876 }
2877 out:
2878 kfree(str);
2879
2880 return field;
2881 }
2882
2883 static struct hist_field *create_alias(struct hist_trigger_data *hist_data,
2884 struct hist_field *var_ref,
2885 char *var_name)
2886 {
2887 struct hist_field *alias = NULL;
2888 unsigned long flags = HIST_FIELD_FL_ALIAS | HIST_FIELD_FL_VAR;
2889
2890 alias = create_hist_field(hist_data, NULL, flags, var_name);
2891 if (!alias)
2892 return NULL;
2893
2894 alias->fn = var_ref->fn;
2895 alias->operands[0] = var_ref;
2896
2897 if (init_var_ref(alias, var_ref, var_ref->system, var_ref->event_name)) {
2898 destroy_hist_field(alias, 0);
2899 return NULL;
2900 }
2901
2902 alias->var_ref_idx = var_ref->var_ref_idx;
2903
2904 return alias;
2905 }
2906
2907 static struct hist_field *parse_atom(struct hist_trigger_data *hist_data,
2908 struct trace_event_file *file, char *str,
2909 unsigned long *flags, char *var_name)
2910 {
2911 char *s, *ref_system = NULL, *ref_event = NULL, *ref_var = str;
2912 struct ftrace_event_field *field = NULL;
2913 struct hist_field *hist_field = NULL;
2914 int ret = 0;
2915
2916 s = strchr(str, '.');
2917 if (s) {
2918 s = strchr(++s, '.');
2919 if (s) {
2920 ref_system = strsep(&str, ".");
2921 if (!str) {
2922 ret = -EINVAL;
2923 goto out;
2924 }
2925 ref_event = strsep(&str, ".");
2926 if (!str) {
2927 ret = -EINVAL;
2928 goto out;
2929 }
2930 ref_var = str;
2931 }
2932 }
2933
2934 s = local_field_var_ref(hist_data, ref_system, ref_event, ref_var);
2935 if (!s) {
2936 hist_field = parse_var_ref(hist_data, ref_system,
2937 ref_event, ref_var);
2938 if (hist_field) {
2939 if (var_name) {
2940 hist_field = create_alias(hist_data, hist_field, var_name);
2941 if (!hist_field) {
2942 ret = -ENOMEM;
2943 goto out;
2944 }
2945 }
2946 return hist_field;
2947 }
2948 } else
2949 str = s;
2950
2951 field = parse_field(hist_data, file, str, flags);
2952 if (IS_ERR(field)) {
2953 ret = PTR_ERR(field);
2954 goto out;
2955 }
2956
2957 hist_field = create_hist_field(hist_data, field, *flags, var_name);
2958 if (!hist_field) {
2959 ret = -ENOMEM;
2960 goto out;
2961 }
2962
2963 return hist_field;
2964 out:
2965 return ERR_PTR(ret);
2966 }
2967
2968 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
2969 struct trace_event_file *file,
2970 char *str, unsigned long flags,
2971 char *var_name, unsigned int level);
2972
2973 static struct hist_field *parse_unary(struct hist_trigger_data *hist_data,
2974 struct trace_event_file *file,
2975 char *str, unsigned long flags,
2976 char *var_name, unsigned int level)
2977 {
2978 struct hist_field *operand1, *expr = NULL;
2979 unsigned long operand_flags;
2980 int ret = 0;
2981 char *s;
2982
2983
2984
2985 if (level > 3) {
2986 hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str));
2987 ret = -EINVAL;
2988 goto free;
2989 }
2990
2991 str++;
2992
2993 s = strchr(str, '(');
2994 if (s)
2995 str++;
2996 else {
2997 ret = -EINVAL;
2998 goto free;
2999 }
3000
3001 s = strrchr(str, ')');
3002 if (s)
3003 *s = '\0';
3004 else {
3005 ret = -EINVAL;
3006 goto free;
3007 }
3008
3009 flags |= HIST_FIELD_FL_EXPR;
3010 expr = create_hist_field(hist_data, NULL, flags, var_name);
3011 if (!expr) {
3012 ret = -ENOMEM;
3013 goto free;
3014 }
3015
3016 operand_flags = 0;
3017 operand1 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level);
3018 if (IS_ERR(operand1)) {
3019 ret = PTR_ERR(operand1);
3020 goto free;
3021 }
3022
3023 expr->flags |= operand1->flags &
3024 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
3025 expr->fn = hist_field_unary_minus;
3026 expr->operands[0] = operand1;
3027 expr->operator = FIELD_OP_UNARY_MINUS;
3028 expr->name = expr_str(expr, 0);
3029 expr->type = kstrdup(operand1->type, GFP_KERNEL);
3030 if (!expr->type) {
3031 ret = -ENOMEM;
3032 goto free;
3033 }
3034
3035 return expr;
3036 free:
3037 destroy_hist_field(expr, 0);
3038 return ERR_PTR(ret);
3039 }
3040
3041 static int check_expr_operands(struct trace_array *tr,
3042 struct hist_field *operand1,
3043 struct hist_field *operand2)
3044 {
3045 unsigned long operand1_flags = operand1->flags;
3046 unsigned long operand2_flags = operand2->flags;
3047
3048 if ((operand1_flags & HIST_FIELD_FL_VAR_REF) ||
3049 (operand1_flags & HIST_FIELD_FL_ALIAS)) {
3050 struct hist_field *var;
3051
3052 var = find_var_field(operand1->var.hist_data, operand1->name);
3053 if (!var)
3054 return -EINVAL;
3055 operand1_flags = var->flags;
3056 }
3057
3058 if ((operand2_flags & HIST_FIELD_FL_VAR_REF) ||
3059 (operand2_flags & HIST_FIELD_FL_ALIAS)) {
3060 struct hist_field *var;
3061
3062 var = find_var_field(operand2->var.hist_data, operand2->name);
3063 if (!var)
3064 return -EINVAL;
3065 operand2_flags = var->flags;
3066 }
3067
3068 if ((operand1_flags & HIST_FIELD_FL_TIMESTAMP_USECS) !=
3069 (operand2_flags & HIST_FIELD_FL_TIMESTAMP_USECS)) {
3070 hist_err(tr, HIST_ERR_TIMESTAMP_MISMATCH, 0);
3071 return -EINVAL;
3072 }
3073
3074 return 0;
3075 }
3076
3077 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
3078 struct trace_event_file *file,
3079 char *str, unsigned long flags,
3080 char *var_name, unsigned int level)
3081 {
3082 struct hist_field *operand1 = NULL, *operand2 = NULL, *expr = NULL;
3083 unsigned long operand_flags;
3084 int field_op, ret = -EINVAL;
3085 char *sep, *operand1_str;
3086
3087 if (level > 3) {
3088 hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str));
3089 return ERR_PTR(-EINVAL);
3090 }
3091
3092 field_op = contains_operator(str);
3093
3094 if (field_op == FIELD_OP_NONE)
3095 return parse_atom(hist_data, file, str, &flags, var_name);
3096
3097 if (field_op == FIELD_OP_UNARY_MINUS)
3098 return parse_unary(hist_data, file, str, flags, var_name, ++level);
3099
3100 switch (field_op) {
3101 case FIELD_OP_MINUS:
3102 sep = "-";
3103 break;
3104 case FIELD_OP_PLUS:
3105 sep = "+";
3106 break;
3107 default:
3108 goto free;
3109 }
3110
3111 operand1_str = strsep(&str, sep);
3112 if (!operand1_str || !str)
3113 goto free;
3114
3115 operand_flags = 0;
3116 operand1 = parse_atom(hist_data, file, operand1_str,
3117 &operand_flags, NULL);
3118 if (IS_ERR(operand1)) {
3119 ret = PTR_ERR(operand1);
3120 operand1 = NULL;
3121 goto free;
3122 }
3123
3124
3125 operand_flags = 0;
3126 operand2 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level);
3127 if (IS_ERR(operand2)) {
3128 ret = PTR_ERR(operand2);
3129 operand2 = NULL;
3130 goto free;
3131 }
3132
3133 ret = check_expr_operands(file->tr, operand1, operand2);
3134 if (ret)
3135 goto free;
3136
3137 flags |= HIST_FIELD_FL_EXPR;
3138
3139 flags |= operand1->flags &
3140 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
3141
3142 expr = create_hist_field(hist_data, NULL, flags, var_name);
3143 if (!expr) {
3144 ret = -ENOMEM;
3145 goto free;
3146 }
3147
3148 operand1->read_once = true;
3149 operand2->read_once = true;
3150
3151 expr->operands[0] = operand1;
3152 expr->operands[1] = operand2;
3153 expr->operator = field_op;
3154 expr->name = expr_str(expr, 0);
3155 expr->type = kstrdup(operand1->type, GFP_KERNEL);
3156 if (!expr->type) {
3157 ret = -ENOMEM;
3158 goto free;
3159 }
3160
3161 switch (field_op) {
3162 case FIELD_OP_MINUS:
3163 expr->fn = hist_field_minus;
3164 break;
3165 case FIELD_OP_PLUS:
3166 expr->fn = hist_field_plus;
3167 break;
3168 default:
3169 ret = -EINVAL;
3170 goto free;
3171 }
3172
3173 return expr;
3174 free:
3175 destroy_hist_field(operand1, 0);
3176 destroy_hist_field(operand2, 0);
3177 destroy_hist_field(expr, 0);
3178
3179 return ERR_PTR(ret);
3180 }
3181
3182 static char *find_trigger_filter(struct hist_trigger_data *hist_data,
3183 struct trace_event_file *file)
3184 {
3185 struct event_trigger_data *test;
3186
3187 lockdep_assert_held(&event_mutex);
3188
3189 list_for_each_entry(test, &file->triggers, list) {
3190 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
3191 if (test->private_data == hist_data)
3192 return test->filter_str;
3193 }
3194 }
3195
3196 return NULL;
3197 }
3198
3199 static struct event_command trigger_hist_cmd;
3200 static int event_hist_trigger_func(struct event_command *cmd_ops,
3201 struct trace_event_file *file,
3202 char *glob, char *cmd, char *param);
3203
3204 static bool compatible_keys(struct hist_trigger_data *target_hist_data,
3205 struct hist_trigger_data *hist_data,
3206 unsigned int n_keys)
3207 {
3208 struct hist_field *target_hist_field, *hist_field;
3209 unsigned int n, i, j;
3210
3211 if (hist_data->n_fields - hist_data->n_vals != n_keys)
3212 return false;
3213
3214 i = hist_data->n_vals;
3215 j = target_hist_data->n_vals;
3216
3217 for (n = 0; n < n_keys; n++) {
3218 hist_field = hist_data->fields[i + n];
3219 target_hist_field = target_hist_data->fields[j + n];
3220
3221 if (strcmp(hist_field->type, target_hist_field->type) != 0)
3222 return false;
3223 if (hist_field->size != target_hist_field->size)
3224 return false;
3225 if (hist_field->is_signed != target_hist_field->is_signed)
3226 return false;
3227 }
3228
3229 return true;
3230 }
3231
3232 static struct hist_trigger_data *
3233 find_compatible_hist(struct hist_trigger_data *target_hist_data,
3234 struct trace_event_file *file)
3235 {
3236 struct hist_trigger_data *hist_data;
3237 struct event_trigger_data *test;
3238 unsigned int n_keys;
3239
3240 lockdep_assert_held(&event_mutex);
3241
3242 n_keys = target_hist_data->n_fields - target_hist_data->n_vals;
3243
3244 list_for_each_entry(test, &file->triggers, list) {
3245 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
3246 hist_data = test->private_data;
3247
3248 if (compatible_keys(target_hist_data, hist_data, n_keys))
3249 return hist_data;
3250 }
3251 }
3252
3253 return NULL;
3254 }
3255
3256 static struct trace_event_file *event_file(struct trace_array *tr,
3257 char *system, char *event_name)
3258 {
3259 struct trace_event_file *file;
3260
3261 file = __find_event_file(tr, system, event_name);
3262 if (!file)
3263 return ERR_PTR(-EINVAL);
3264
3265 return file;
3266 }
3267
3268 static struct hist_field *
3269 find_synthetic_field_var(struct hist_trigger_data *target_hist_data,
3270 char *system, char *event_name, char *field_name)
3271 {
3272 struct hist_field *event_var;
3273 char *synthetic_name;
3274
3275 synthetic_name = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
3276 if (!synthetic_name)
3277 return ERR_PTR(-ENOMEM);
3278
3279 strcpy(synthetic_name, "synthetic_");
3280 strcat(synthetic_name, field_name);
3281
3282 event_var = find_event_var(target_hist_data, system, event_name, synthetic_name);
3283
3284 kfree(synthetic_name);
3285
3286 return event_var;
3287 }
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314 static struct hist_field *
3315 create_field_var_hist(struct hist_trigger_data *target_hist_data,
3316 char *subsys_name, char *event_name, char *field_name)
3317 {
3318 struct trace_array *tr = target_hist_data->event_file->tr;
3319 struct hist_field *event_var = ERR_PTR(-EINVAL);
3320 struct hist_trigger_data *hist_data;
3321 unsigned int i, n, first = true;
3322 struct field_var_hist *var_hist;
3323 struct trace_event_file *file;
3324 struct hist_field *key_field;
3325 char *saved_filter;
3326 char *cmd;
3327 int ret;
3328
3329 if (target_hist_data->n_field_var_hists >= SYNTH_FIELDS_MAX) {
3330 hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name));
3331 return ERR_PTR(-EINVAL);
3332 }
3333
3334 file = event_file(tr, subsys_name, event_name);
3335
3336 if (IS_ERR(file)) {
3337 hist_err(tr, HIST_ERR_EVENT_FILE_NOT_FOUND, errpos(field_name));
3338 ret = PTR_ERR(file);
3339 return ERR_PTR(ret);
3340 }
3341
3342
3343
3344
3345
3346
3347
3348 hist_data = find_compatible_hist(target_hist_data, file);
3349 if (!hist_data) {
3350 hist_err(tr, HIST_ERR_HIST_NOT_FOUND, errpos(field_name));
3351 return ERR_PTR(-EINVAL);
3352 }
3353
3354
3355 event_var = find_synthetic_field_var(target_hist_data, subsys_name,
3356 event_name, field_name);
3357 if (!IS_ERR_OR_NULL(event_var))
3358 return event_var;
3359
3360 var_hist = kzalloc(sizeof(*var_hist), GFP_KERNEL);
3361 if (!var_hist)
3362 return ERR_PTR(-ENOMEM);
3363
3364 cmd = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
3365 if (!cmd) {
3366 kfree(var_hist);
3367 return ERR_PTR(-ENOMEM);
3368 }
3369
3370
3371 strcat(cmd, "keys=");
3372
3373 for_each_hist_key_field(i, hist_data) {
3374 key_field = hist_data->fields[i];
3375 if (!first)
3376 strcat(cmd, ",");
3377 strcat(cmd, key_field->field->name);
3378 first = false;
3379 }
3380
3381
3382 strcat(cmd, ":synthetic_");
3383 strcat(cmd, field_name);
3384 strcat(cmd, "=");
3385 strcat(cmd, field_name);
3386
3387
3388 saved_filter = find_trigger_filter(hist_data, file);
3389 if (saved_filter) {
3390 strcat(cmd, " if ");
3391 strcat(cmd, saved_filter);
3392 }
3393
3394 var_hist->cmd = kstrdup(cmd, GFP_KERNEL);
3395 if (!var_hist->cmd) {
3396 kfree(cmd);
3397 kfree(var_hist);
3398 return ERR_PTR(-ENOMEM);
3399 }
3400
3401
3402 var_hist->hist_data = hist_data;
3403
3404
3405 ret = event_hist_trigger_func(&trigger_hist_cmd, file,
3406 "", "hist", cmd);
3407 if (ret) {
3408 kfree(cmd);
3409 kfree(var_hist->cmd);
3410 kfree(var_hist);
3411 hist_err(tr, HIST_ERR_HIST_CREATE_FAIL, errpos(field_name));
3412 return ERR_PTR(ret);
3413 }
3414
3415 kfree(cmd);
3416
3417
3418 event_var = find_synthetic_field_var(target_hist_data, subsys_name,
3419 event_name, field_name);
3420 if (IS_ERR_OR_NULL(event_var)) {
3421 kfree(var_hist->cmd);
3422 kfree(var_hist);
3423 hist_err(tr, HIST_ERR_SYNTH_VAR_NOT_FOUND, errpos(field_name));
3424 return ERR_PTR(-EINVAL);
3425 }
3426
3427 n = target_hist_data->n_field_var_hists;
3428 target_hist_data->field_var_hists[n] = var_hist;
3429 target_hist_data->n_field_var_hists++;
3430
3431 return event_var;
3432 }
3433
3434 static struct hist_field *
3435 find_target_event_var(struct hist_trigger_data *hist_data,
3436 char *subsys_name, char *event_name, char *var_name)
3437 {
3438 struct trace_event_file *file = hist_data->event_file;
3439 struct hist_field *hist_field = NULL;
3440
3441 if (subsys_name) {
3442 struct trace_event_call *call;
3443
3444 if (!event_name)
3445 return NULL;
3446
3447 call = file->event_call;
3448
3449 if (strcmp(subsys_name, call->class->system) != 0)
3450 return NULL;
3451
3452 if (strcmp(event_name, trace_event_name(call)) != 0)
3453 return NULL;
3454 }
3455
3456 hist_field = find_var_field(hist_data, var_name);
3457
3458 return hist_field;
3459 }
3460
3461 static inline void __update_field_vars(struct tracing_map_elt *elt,
3462 struct ring_buffer_event *rbe,
3463 void *rec,
3464 struct field_var **field_vars,
3465 unsigned int n_field_vars,
3466 unsigned int field_var_str_start)
3467 {
3468 struct hist_elt_data *elt_data = elt->private_data;
3469 unsigned int i, j, var_idx;
3470 u64 var_val;
3471
3472 for (i = 0, j = field_var_str_start; i < n_field_vars; i++) {
3473 struct field_var *field_var = field_vars[i];
3474 struct hist_field *var = field_var->var;
3475 struct hist_field *val = field_var->val;
3476
3477 var_val = val->fn(val, elt, rbe, rec);
3478 var_idx = var->var.idx;
3479
3480 if (val->flags & HIST_FIELD_FL_STRING) {
3481 char *str = elt_data->field_var_str[j++];
3482 char *val_str = (char *)(uintptr_t)var_val;
3483
3484 strscpy(str, val_str, STR_VAR_LEN_MAX);
3485 var_val = (u64)(uintptr_t)str;
3486 }
3487 tracing_map_set_var(elt, var_idx, var_val);
3488 }
3489 }
3490
3491 static void update_field_vars(struct hist_trigger_data *hist_data,
3492 struct tracing_map_elt *elt,
3493 struct ring_buffer_event *rbe,
3494 void *rec)
3495 {
3496 __update_field_vars(elt, rbe, rec, hist_data->field_vars,
3497 hist_data->n_field_vars, 0);
3498 }
3499
3500 static void save_track_data_vars(struct hist_trigger_data *hist_data,
3501 struct tracing_map_elt *elt, void *rec,
3502 struct ring_buffer_event *rbe, void *key,
3503 struct action_data *data, u64 *var_ref_vals)
3504 {
3505 __update_field_vars(elt, rbe, rec, hist_data->save_vars,
3506 hist_data->n_save_vars, hist_data->n_field_var_str);
3507 }
3508
3509 static struct hist_field *create_var(struct hist_trigger_data *hist_data,
3510 struct trace_event_file *file,
3511 char *name, int size, const char *type)
3512 {
3513 struct hist_field *var;
3514 int idx;
3515
3516 if (find_var(hist_data, file, name) && !hist_data->remove) {
3517 var = ERR_PTR(-EINVAL);
3518 goto out;
3519 }
3520
3521 var = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
3522 if (!var) {
3523 var = ERR_PTR(-ENOMEM);
3524 goto out;
3525 }
3526
3527 idx = tracing_map_add_var(hist_data->map);
3528 if (idx < 0) {
3529 kfree(var);
3530 var = ERR_PTR(-EINVAL);
3531 goto out;
3532 }
3533
3534 var->ref = 1;
3535 var->flags = HIST_FIELD_FL_VAR;
3536 var->var.idx = idx;
3537 var->var.hist_data = var->hist_data = hist_data;
3538 var->size = size;
3539 var->var.name = kstrdup(name, GFP_KERNEL);
3540 var->type = kstrdup(type, GFP_KERNEL);
3541 if (!var->var.name || !var->type) {
3542 kfree(var->var.name);
3543 kfree(var->type);
3544 kfree(var);
3545 var = ERR_PTR(-ENOMEM);
3546 }
3547 out:
3548 return var;
3549 }
3550
3551 static struct field_var *create_field_var(struct hist_trigger_data *hist_data,
3552 struct trace_event_file *file,
3553 char *field_name)
3554 {
3555 struct hist_field *val = NULL, *var = NULL;
3556 unsigned long flags = HIST_FIELD_FL_VAR;
3557 struct trace_array *tr = file->tr;
3558 struct field_var *field_var;
3559 int ret = 0;
3560
3561 if (hist_data->n_field_vars >= SYNTH_FIELDS_MAX) {
3562 hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name));
3563 ret = -EINVAL;
3564 goto err;
3565 }
3566
3567 val = parse_atom(hist_data, file, field_name, &flags, NULL);
3568 if (IS_ERR(val)) {
3569 hist_err(tr, HIST_ERR_FIELD_VAR_PARSE_FAIL, errpos(field_name));
3570 ret = PTR_ERR(val);
3571 goto err;
3572 }
3573
3574 var = create_var(hist_data, file, field_name, val->size, val->type);
3575 if (IS_ERR(var)) {
3576 hist_err(tr, HIST_ERR_VAR_CREATE_FIND_FAIL, errpos(field_name));
3577 kfree(val);
3578 ret = PTR_ERR(var);
3579 goto err;
3580 }
3581
3582 field_var = kzalloc(sizeof(struct field_var), GFP_KERNEL);
3583 if (!field_var) {
3584 kfree(val);
3585 kfree(var);
3586 ret = -ENOMEM;
3587 goto err;
3588 }
3589
3590 field_var->var = var;
3591 field_var->val = val;
3592 out:
3593 return field_var;
3594 err:
3595 field_var = ERR_PTR(ret);
3596 goto out;
3597 }
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619 static struct field_var *
3620 create_target_field_var(struct hist_trigger_data *target_hist_data,
3621 char *subsys_name, char *event_name, char *var_name)
3622 {
3623 struct trace_event_file *file = target_hist_data->event_file;
3624
3625 if (subsys_name) {
3626 struct trace_event_call *call;
3627
3628 if (!event_name)
3629 return NULL;
3630
3631 call = file->event_call;
3632
3633 if (strcmp(subsys_name, call->class->system) != 0)
3634 return NULL;
3635
3636 if (strcmp(event_name, trace_event_name(call)) != 0)
3637 return NULL;
3638 }
3639
3640 return create_field_var(target_hist_data, file, var_name);
3641 }
3642
3643 static bool check_track_val_max(u64 track_val, u64 var_val)
3644 {
3645 if (var_val <= track_val)
3646 return false;
3647
3648 return true;
3649 }
3650
3651 static bool check_track_val_changed(u64 track_val, u64 var_val)
3652 {
3653 if (var_val == track_val)
3654 return false;
3655
3656 return true;
3657 }
3658
3659 static u64 get_track_val(struct hist_trigger_data *hist_data,
3660 struct tracing_map_elt *elt,
3661 struct action_data *data)
3662 {
3663 unsigned int track_var_idx = data->track_data.track_var->var.idx;
3664 u64 track_val;
3665
3666 track_val = tracing_map_read_var(elt, track_var_idx);
3667
3668 return track_val;
3669 }
3670
3671 static void save_track_val(struct hist_trigger_data *hist_data,
3672 struct tracing_map_elt *elt,
3673 struct action_data *data, u64 var_val)
3674 {
3675 unsigned int track_var_idx = data->track_data.track_var->var.idx;
3676
3677 tracing_map_set_var(elt, track_var_idx, var_val);
3678 }
3679
3680 static void save_track_data(struct hist_trigger_data *hist_data,
3681 struct tracing_map_elt *elt, void *rec,
3682 struct ring_buffer_event *rbe, void *key,
3683 struct action_data *data, u64 *var_ref_vals)
3684 {
3685 if (data->track_data.save_data)
3686 data->track_data.save_data(hist_data, elt, rec, rbe, key, data, var_ref_vals);
3687 }
3688
3689 static bool check_track_val(struct tracing_map_elt *elt,
3690 struct action_data *data,
3691 u64 var_val)
3692 {
3693 struct hist_trigger_data *hist_data;
3694 u64 track_val;
3695
3696 hist_data = data->track_data.track_var->hist_data;
3697 track_val = get_track_val(hist_data, elt, data);
3698
3699 return data->track_data.check_val(track_val, var_val);
3700 }
3701
3702 #ifdef CONFIG_TRACER_SNAPSHOT
3703 static bool cond_snapshot_update(struct trace_array *tr, void *cond_data)
3704 {
3705
3706 struct track_data *track_data = tr->cond_snapshot->cond_data;
3707 struct hist_elt_data *elt_data, *track_elt_data;
3708 struct snapshot_context *context = cond_data;
3709 struct action_data *action;
3710 u64 track_val;
3711
3712 if (!track_data)
3713 return false;
3714
3715 action = track_data->action_data;
3716
3717 track_val = get_track_val(track_data->hist_data, context->elt,
3718 track_data->action_data);
3719
3720 if (!action->track_data.check_val(track_data->track_val, track_val))
3721 return false;
3722
3723 track_data->track_val = track_val;
3724 memcpy(track_data->key, context->key, track_data->key_len);
3725
3726 elt_data = context->elt->private_data;
3727 track_elt_data = track_data->elt.private_data;
3728 if (elt_data->comm)
3729 strncpy(track_elt_data->comm, elt_data->comm, TASK_COMM_LEN);
3730
3731 track_data->updated = true;
3732
3733 return true;
3734 }
3735
3736 static void save_track_data_snapshot(struct hist_trigger_data *hist_data,
3737 struct tracing_map_elt *elt, void *rec,
3738 struct ring_buffer_event *rbe, void *key,
3739 struct action_data *data,
3740 u64 *var_ref_vals)
3741 {
3742 struct trace_event_file *file = hist_data->event_file;
3743 struct snapshot_context context;
3744
3745 context.elt = elt;
3746 context.key = key;
3747
3748 tracing_snapshot_cond(file->tr, &context);
3749 }
3750
3751 static void hist_trigger_print_key(struct seq_file *m,
3752 struct hist_trigger_data *hist_data,
3753 void *key,
3754 struct tracing_map_elt *elt);
3755
3756 static struct action_data *snapshot_action(struct hist_trigger_data *hist_data)
3757 {
3758 unsigned int i;
3759
3760 if (!hist_data->n_actions)
3761 return NULL;
3762
3763 for (i = 0; i < hist_data->n_actions; i++) {
3764 struct action_data *data = hist_data->actions[i];
3765
3766 if (data->action == ACTION_SNAPSHOT)
3767 return data;
3768 }
3769
3770 return NULL;
3771 }
3772
3773 static void track_data_snapshot_print(struct seq_file *m,
3774 struct hist_trigger_data *hist_data)
3775 {
3776 struct trace_event_file *file = hist_data->event_file;
3777 struct track_data *track_data;
3778 struct action_data *action;
3779
3780 track_data = tracing_cond_snapshot_data(file->tr);
3781 if (!track_data)
3782 return;
3783
3784 if (!track_data->updated)
3785 return;
3786
3787 action = snapshot_action(hist_data);
3788 if (!action)
3789 return;
3790
3791 seq_puts(m, "\nSnapshot taken (see tracing/snapshot). Details:\n");
3792 seq_printf(m, "\ttriggering value { %s(%s) }: %10llu",
3793 action->handler == HANDLER_ONMAX ? "onmax" : "onchange",
3794 action->track_data.var_str, track_data->track_val);
3795
3796 seq_puts(m, "\ttriggered by event with key: ");
3797 hist_trigger_print_key(m, hist_data, track_data->key, &track_data->elt);
3798 seq_putc(m, '\n');
3799 }
3800 #else
3801 static bool cond_snapshot_update(struct trace_array *tr, void *cond_data)
3802 {
3803 return false;
3804 }
3805 static void save_track_data_snapshot(struct hist_trigger_data *hist_data,
3806 struct tracing_map_elt *elt, void *rec,
3807 struct ring_buffer_event *rbe, void *key,
3808 struct action_data *data,
3809 u64 *var_ref_vals) {}
3810 static void track_data_snapshot_print(struct seq_file *m,
3811 struct hist_trigger_data *hist_data) {}
3812 #endif
3813
3814 static void track_data_print(struct seq_file *m,
3815 struct hist_trigger_data *hist_data,
3816 struct tracing_map_elt *elt,
3817 struct action_data *data)
3818 {
3819 u64 track_val = get_track_val(hist_data, elt, data);
3820 unsigned int i, save_var_idx;
3821
3822 if (data->handler == HANDLER_ONMAX)
3823 seq_printf(m, "\n\tmax: %10llu", track_val);
3824 else if (data->handler == HANDLER_ONCHANGE)
3825 seq_printf(m, "\n\tchanged: %10llu", track_val);
3826
3827 if (data->action == ACTION_SNAPSHOT)
3828 return;
3829
3830 for (i = 0; i < hist_data->n_save_vars; i++) {
3831 struct hist_field *save_val = hist_data->save_vars[i]->val;
3832 struct hist_field *save_var = hist_data->save_vars[i]->var;
3833 u64 val;
3834
3835 save_var_idx = save_var->var.idx;
3836
3837 val = tracing_map_read_var(elt, save_var_idx);
3838
3839 if (save_val->flags & HIST_FIELD_FL_STRING) {
3840 seq_printf(m, " %s: %-32s", save_var->var.name,
3841 (char *)(uintptr_t)(val));
3842 } else
3843 seq_printf(m, " %s: %10llu", save_var->var.name, val);
3844 }
3845 }
3846
3847 static void ontrack_action(struct hist_trigger_data *hist_data,
3848 struct tracing_map_elt *elt, void *rec,
3849 struct ring_buffer_event *rbe, void *key,
3850 struct action_data *data, u64 *var_ref_vals)
3851 {
3852 u64 var_val = var_ref_vals[data->track_data.var_ref->var_ref_idx];
3853
3854 if (check_track_val(elt, data, var_val)) {
3855 save_track_val(hist_data, elt, data, var_val);
3856 save_track_data(hist_data, elt, rec, rbe, key, data, var_ref_vals);
3857 }
3858 }
3859
3860 static void action_data_destroy(struct action_data *data)
3861 {
3862 unsigned int i;
3863
3864 lockdep_assert_held(&event_mutex);
3865
3866 kfree(data->action_name);
3867
3868 for (i = 0; i < data->n_params; i++)
3869 kfree(data->params[i]);
3870
3871 if (data->synth_event)
3872 data->synth_event->ref--;
3873
3874 kfree(data->synth_event_name);
3875
3876 kfree(data);
3877 }
3878
3879 static void track_data_destroy(struct hist_trigger_data *hist_data,
3880 struct action_data *data)
3881 {
3882 struct trace_event_file *file = hist_data->event_file;
3883
3884 destroy_hist_field(data->track_data.track_var, 0);
3885
3886 if (data->action == ACTION_SNAPSHOT) {
3887 struct track_data *track_data;
3888
3889 track_data = tracing_cond_snapshot_data(file->tr);
3890 if (track_data && track_data->hist_data == hist_data) {
3891 tracing_snapshot_cond_disable(file->tr);
3892 track_data_free(track_data);
3893 }
3894 }
3895
3896 kfree(data->track_data.var_str);
3897
3898 action_data_destroy(data);
3899 }
3900
3901 static int action_create(struct hist_trigger_data *hist_data,
3902 struct action_data *data);
3903
3904 static int track_data_create(struct hist_trigger_data *hist_data,
3905 struct action_data *data)
3906 {
3907 struct hist_field *var_field, *ref_field, *track_var = NULL;
3908 struct trace_event_file *file = hist_data->event_file;
3909 struct trace_array *tr = file->tr;
3910 char *track_data_var_str;
3911 int ret = 0;
3912
3913 track_data_var_str = data->track_data.var_str;
3914 if (track_data_var_str[0] != '$') {
3915 hist_err(tr, HIST_ERR_ONX_NOT_VAR, errpos(track_data_var_str));
3916 return -EINVAL;
3917 }
3918 track_data_var_str++;
3919
3920 var_field = find_target_event_var(hist_data, NULL, NULL, track_data_var_str);
3921 if (!var_field) {
3922 hist_err(tr, HIST_ERR_ONX_VAR_NOT_FOUND, errpos(track_data_var_str));
3923 return -EINVAL;
3924 }
3925
3926 ref_field = create_var_ref(hist_data, var_field, NULL, NULL);
3927 if (!ref_field)
3928 return -ENOMEM;
3929
3930 data->track_data.var_ref = ref_field;
3931
3932 if (data->handler == HANDLER_ONMAX)
3933 track_var = create_var(hist_data, file, "__max", sizeof(u64), "u64");
3934 if (IS_ERR(track_var)) {
3935 hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0);
3936 ret = PTR_ERR(track_var);
3937 goto out;
3938 }
3939
3940 if (data->handler == HANDLER_ONCHANGE)
3941 track_var = create_var(hist_data, file, "__change", sizeof(u64), "u64");
3942 if (IS_ERR(track_var)) {
3943 hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0);
3944 ret = PTR_ERR(track_var);
3945 goto out;
3946 }
3947 data->track_data.track_var = track_var;
3948
3949 ret = action_create(hist_data, data);
3950 out:
3951 return ret;
3952 }
3953
3954 static int parse_action_params(struct trace_array *tr, char *params,
3955 struct action_data *data)
3956 {
3957 char *param, *saved_param;
3958 bool first_param = true;
3959 int ret = 0;
3960
3961 while (params) {
3962 if (data->n_params >= SYNTH_FIELDS_MAX) {
3963 hist_err(tr, HIST_ERR_TOO_MANY_PARAMS, 0);
3964 goto out;
3965 }
3966
3967 param = strsep(¶ms, ",");
3968 if (!param) {
3969 hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, 0);
3970 ret = -EINVAL;
3971 goto out;
3972 }
3973
3974 param = strstrip(param);
3975 if (strlen(param) < 2) {
3976 hist_err(tr, HIST_ERR_INVALID_PARAM, errpos(param));
3977 ret = -EINVAL;
3978 goto out;
3979 }
3980
3981 saved_param = kstrdup(param, GFP_KERNEL);
3982 if (!saved_param) {
3983 ret = -ENOMEM;
3984 goto out;
3985 }
3986
3987 if (first_param && data->use_trace_keyword) {
3988 data->synth_event_name = saved_param;
3989 first_param = false;
3990 continue;
3991 }
3992 first_param = false;
3993
3994 data->params[data->n_params++] = saved_param;
3995 }
3996 out:
3997 return ret;
3998 }
3999
4000 static int action_parse(struct trace_array *tr, char *str, struct action_data *data,
4001 enum handler_id handler)
4002 {
4003 char *action_name;
4004 int ret = 0;
4005
4006 strsep(&str, ".");
4007 if (!str) {
4008 hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0);
4009 ret = -EINVAL;
4010 goto out;
4011 }
4012
4013 action_name = strsep(&str, "(");
4014 if (!action_name || !str) {
4015 hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0);
4016 ret = -EINVAL;
4017 goto out;
4018 }
4019
4020 if (str_has_prefix(action_name, "save")) {
4021 char *params = strsep(&str, ")");
4022
4023 if (!params) {
4024 hist_err(tr, HIST_ERR_NO_SAVE_PARAMS, 0);
4025 ret = -EINVAL;
4026 goto out;
4027 }
4028
4029 ret = parse_action_params(tr, params, data);
4030 if (ret)
4031 goto out;
4032
4033 if (handler == HANDLER_ONMAX)
4034 data->track_data.check_val = check_track_val_max;
4035 else if (handler == HANDLER_ONCHANGE)
4036 data->track_data.check_val = check_track_val_changed;
4037 else {
4038 hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name));
4039 ret = -EINVAL;
4040 goto out;
4041 }
4042
4043 data->track_data.save_data = save_track_data_vars;
4044 data->fn = ontrack_action;
4045 data->action = ACTION_SAVE;
4046 } else if (str_has_prefix(action_name, "snapshot")) {
4047 char *params = strsep(&str, ")");
4048
4049 if (!str) {
4050 hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(params));
4051 ret = -EINVAL;
4052 goto out;
4053 }
4054
4055 if (handler == HANDLER_ONMAX)
4056 data->track_data.check_val = check_track_val_max;
4057 else if (handler == HANDLER_ONCHANGE)
4058 data->track_data.check_val = check_track_val_changed;
4059 else {
4060 hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name));
4061 ret = -EINVAL;
4062 goto out;
4063 }
4064
4065 data->track_data.save_data = save_track_data_snapshot;
4066 data->fn = ontrack_action;
4067 data->action = ACTION_SNAPSHOT;
4068 } else {
4069 char *params = strsep(&str, ")");
4070
4071 if (str_has_prefix(action_name, "trace"))
4072 data->use_trace_keyword = true;
4073
4074 if (params) {
4075 ret = parse_action_params(tr, params, data);
4076 if (ret)
4077 goto out;
4078 }
4079
4080 if (handler == HANDLER_ONMAX)
4081 data->track_data.check_val = check_track_val_max;
4082 else if (handler == HANDLER_ONCHANGE)
4083 data->track_data.check_val = check_track_val_changed;
4084
4085 if (handler != HANDLER_ONMATCH) {
4086 data->track_data.save_data = action_trace;
4087 data->fn = ontrack_action;
4088 } else
4089 data->fn = action_trace;
4090
4091 data->action = ACTION_TRACE;
4092 }
4093
4094 data->action_name = kstrdup(action_name, GFP_KERNEL);
4095 if (!data->action_name) {
4096 ret = -ENOMEM;
4097 goto out;
4098 }
4099
4100 data->handler = handler;
4101 out:
4102 return ret;
4103 }
4104
4105 static struct action_data *track_data_parse(struct hist_trigger_data *hist_data,
4106 char *str, enum handler_id handler)
4107 {
4108 struct action_data *data;
4109 int ret = -EINVAL;
4110 char *var_str;
4111
4112 data = kzalloc(sizeof(*data), GFP_KERNEL);
4113 if (!data)
4114 return ERR_PTR(-ENOMEM);
4115
4116 var_str = strsep(&str, ")");
4117 if (!var_str || !str) {
4118 ret = -EINVAL;
4119 goto free;
4120 }
4121
4122 data->track_data.var_str = kstrdup(var_str, GFP_KERNEL);
4123 if (!data->track_data.var_str) {
4124 ret = -ENOMEM;
4125 goto free;
4126 }
4127
4128 ret = action_parse(hist_data->event_file->tr, str, data, handler);
4129 if (ret)
4130 goto free;
4131 out:
4132 return data;
4133 free:
4134 track_data_destroy(hist_data, data);
4135 data = ERR_PTR(ret);
4136 goto out;
4137 }
4138
4139 static void onmatch_destroy(struct action_data *data)
4140 {
4141 kfree(data->match_data.event);
4142 kfree(data->match_data.event_system);
4143
4144 action_data_destroy(data);
4145 }
4146
4147 static void destroy_field_var(struct field_var *field_var)
4148 {
4149 if (!field_var)
4150 return;
4151
4152 destroy_hist_field(field_var->var, 0);
4153 destroy_hist_field(field_var->val, 0);
4154
4155 kfree(field_var);
4156 }
4157
4158 static void destroy_field_vars(struct hist_trigger_data *hist_data)
4159 {
4160 unsigned int i;
4161
4162 for (i = 0; i < hist_data->n_field_vars; i++)
4163 destroy_field_var(hist_data->field_vars[i]);
4164
4165 for (i = 0; i < hist_data->n_save_vars; i++)
4166 destroy_field_var(hist_data->save_vars[i]);
4167 }
4168
4169 static void save_field_var(struct hist_trigger_data *hist_data,
4170 struct field_var *field_var)
4171 {
4172 hist_data->field_vars[hist_data->n_field_vars++] = field_var;
4173
4174 if (field_var->val->flags & HIST_FIELD_FL_STRING)
4175 hist_data->n_field_var_str++;
4176 }
4177
4178
4179 static int check_synth_field(struct synth_event *event,
4180 struct hist_field *hist_field,
4181 unsigned int field_pos)
4182 {
4183 struct synth_field *field;
4184
4185 if (field_pos >= event->n_fields)
4186 return -EINVAL;
4187
4188 field = event->fields[field_pos];
4189
4190 if (strcmp(field->type, hist_field->type) != 0)
4191 return -EINVAL;
4192
4193 return 0;
4194 }
4195
4196 static struct hist_field *
4197 trace_action_find_var(struct hist_trigger_data *hist_data,
4198 struct action_data *data,
4199 char *system, char *event, char *var)
4200 {
4201 struct trace_array *tr = hist_data->event_file->tr;
4202 struct hist_field *hist_field;
4203
4204 var++;
4205
4206 hist_field = find_target_event_var(hist_data, system, event, var);
4207 if (!hist_field) {
4208 if (!system && data->handler == HANDLER_ONMATCH) {
4209 system = data->match_data.event_system;
4210 event = data->match_data.event;
4211 }
4212
4213 hist_field = find_event_var(hist_data, system, event, var);
4214 }
4215
4216 if (!hist_field)
4217 hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, errpos(var));
4218
4219 return hist_field;
4220 }
4221
4222 static struct hist_field *
4223 trace_action_create_field_var(struct hist_trigger_data *hist_data,
4224 struct action_data *data, char *system,
4225 char *event, char *var)
4226 {
4227 struct hist_field *hist_field = NULL;
4228 struct field_var *field_var;
4229
4230
4231
4232
4233
4234
4235
4236 field_var = create_target_field_var(hist_data, system, event, var);
4237
4238 if (field_var && !IS_ERR(field_var)) {
4239 save_field_var(hist_data, field_var);
4240 hist_field = field_var->var;
4241 } else {
4242 field_var = NULL;
4243
4244
4245
4246
4247
4248 if (!system && data->handler == HANDLER_ONMATCH) {
4249 system = data->match_data.event_system;
4250 event = data->match_data.event;
4251 }
4252
4253
4254
4255
4256
4257
4258
4259
4260 hist_field = create_field_var_hist(hist_data, system, event, var);
4261 if (IS_ERR(hist_field))
4262 goto free;
4263 }
4264 out:
4265 return hist_field;
4266 free:
4267 destroy_field_var(field_var);
4268 hist_field = NULL;
4269 goto out;
4270 }
4271
4272 static int trace_action_create(struct hist_trigger_data *hist_data,
4273 struct action_data *data)
4274 {
4275 struct trace_array *tr = hist_data->event_file->tr;
4276 char *event_name, *param, *system = NULL;
4277 struct hist_field *hist_field, *var_ref;
4278 unsigned int i;
4279 unsigned int field_pos = 0;
4280 struct synth_event *event;
4281 char *synth_event_name;
4282 int var_ref_idx, ret = 0;
4283
4284 lockdep_assert_held(&event_mutex);
4285
4286 if (data->use_trace_keyword)
4287 synth_event_name = data->synth_event_name;
4288 else
4289 synth_event_name = data->action_name;
4290
4291 event = find_synth_event(synth_event_name);
4292 if (!event) {
4293 hist_err(tr, HIST_ERR_SYNTH_EVENT_NOT_FOUND, errpos(synth_event_name));
4294 return -EINVAL;
4295 }
4296
4297 event->ref++;
4298
4299 for (i = 0; i < data->n_params; i++) {
4300 char *p;
4301
4302 p = param = kstrdup(data->params[i], GFP_KERNEL);
4303 if (!param) {
4304 ret = -ENOMEM;
4305 goto err;
4306 }
4307
4308 system = strsep(¶m, ".");
4309 if (!param) {
4310 param = (char *)system;
4311 system = event_name = NULL;
4312 } else {
4313 event_name = strsep(¶m, ".");
4314 if (!param) {
4315 kfree(p);
4316 ret = -EINVAL;
4317 goto err;
4318 }
4319 }
4320
4321 if (param[0] == '$')
4322 hist_field = trace_action_find_var(hist_data, data,
4323 system, event_name,
4324 param);
4325 else
4326 hist_field = trace_action_create_field_var(hist_data,
4327 data,
4328 system,
4329 event_name,
4330 param);
4331
4332 if (!hist_field) {
4333 kfree(p);
4334 ret = -EINVAL;
4335 goto err;
4336 }
4337
4338 if (check_synth_field(event, hist_field, field_pos) == 0) {
4339 var_ref = create_var_ref(hist_data, hist_field,
4340 system, event_name);
4341 if (!var_ref) {
4342 kfree(p);
4343 ret = -ENOMEM;
4344 goto err;
4345 }
4346
4347 var_ref_idx = find_var_ref_idx(hist_data, var_ref);
4348 if (WARN_ON(var_ref_idx < 0)) {
4349 ret = var_ref_idx;
4350 goto err;
4351 }
4352
4353 data->var_ref_idx[i] = var_ref_idx;
4354
4355 field_pos++;
4356 kfree(p);
4357 continue;
4358 }
4359
4360 hist_err(tr, HIST_ERR_SYNTH_TYPE_MISMATCH, errpos(param));
4361 kfree(p);
4362 ret = -EINVAL;
4363 goto err;
4364 }
4365
4366 if (field_pos != event->n_fields) {
4367 hist_err(tr, HIST_ERR_SYNTH_COUNT_MISMATCH, errpos(event->name));
4368 ret = -EINVAL;
4369 goto err;
4370 }
4371
4372 data->synth_event = event;
4373 out:
4374 return ret;
4375 err:
4376 event->ref--;
4377
4378 goto out;
4379 }
4380
4381 static int action_create(struct hist_trigger_data *hist_data,
4382 struct action_data *data)
4383 {
4384 struct trace_event_file *file = hist_data->event_file;
4385 struct trace_array *tr = file->tr;
4386 struct track_data *track_data;
4387 struct field_var *field_var;
4388 unsigned int i;
4389 char *param;
4390 int ret = 0;
4391
4392 if (data->action == ACTION_TRACE)
4393 return trace_action_create(hist_data, data);
4394
4395 if (data->action == ACTION_SNAPSHOT) {
4396 track_data = track_data_alloc(hist_data->key_size, data, hist_data);
4397 if (IS_ERR(track_data)) {
4398 ret = PTR_ERR(track_data);
4399 goto out;
4400 }
4401
4402 ret = tracing_snapshot_cond_enable(file->tr, track_data,
4403 cond_snapshot_update);
4404 if (ret)
4405 track_data_free(track_data);
4406
4407 goto out;
4408 }
4409
4410 if (data->action == ACTION_SAVE) {
4411 if (hist_data->n_save_vars) {
4412 ret = -EEXIST;
4413 hist_err(tr, HIST_ERR_TOO_MANY_SAVE_ACTIONS, 0);
4414 goto out;
4415 }
4416
4417 for (i = 0; i < data->n_params; i++) {
4418 param = kstrdup(data->params[i], GFP_KERNEL);
4419 if (!param) {
4420 ret = -ENOMEM;
4421 goto out;
4422 }
4423
4424 field_var = create_target_field_var(hist_data, NULL, NULL, param);
4425 if (IS_ERR(field_var)) {
4426 hist_err(tr, HIST_ERR_FIELD_VAR_CREATE_FAIL,
4427 errpos(param));
4428 ret = PTR_ERR(field_var);
4429 kfree(param);
4430 goto out;
4431 }
4432
4433 hist_data->save_vars[hist_data->n_save_vars++] = field_var;
4434 if (field_var->val->flags & HIST_FIELD_FL_STRING)
4435 hist_data->n_save_var_str++;
4436 kfree(param);
4437 }
4438 }
4439 out:
4440 return ret;
4441 }
4442
4443 static int onmatch_create(struct hist_trigger_data *hist_data,
4444 struct action_data *data)
4445 {
4446 return action_create(hist_data, data);
4447 }
4448
4449 static struct action_data *onmatch_parse(struct trace_array *tr, char *str)
4450 {
4451 char *match_event, *match_event_system;
4452 struct action_data *data;
4453 int ret = -EINVAL;
4454
4455 data = kzalloc(sizeof(*data), GFP_KERNEL);
4456 if (!data)
4457 return ERR_PTR(-ENOMEM);
4458
4459 match_event = strsep(&str, ")");
4460 if (!match_event || !str) {
4461 hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(match_event));
4462 goto free;
4463 }
4464
4465 match_event_system = strsep(&match_event, ".");
4466 if (!match_event) {
4467 hist_err(tr, HIST_ERR_SUBSYS_NOT_FOUND, errpos(match_event_system));
4468 goto free;
4469 }
4470
4471 if (IS_ERR(event_file(tr, match_event_system, match_event))) {
4472 hist_err(tr, HIST_ERR_INVALID_SUBSYS_EVENT, errpos(match_event));
4473 goto free;
4474 }
4475
4476 data->match_data.event = kstrdup(match_event, GFP_KERNEL);
4477 if (!data->match_data.event) {
4478 ret = -ENOMEM;
4479 goto free;
4480 }
4481
4482 data->match_data.event_system = kstrdup(match_event_system, GFP_KERNEL);
4483 if (!data->match_data.event_system) {
4484 ret = -ENOMEM;
4485 goto free;
4486 }
4487
4488 ret = action_parse(tr, str, data, HANDLER_ONMATCH);
4489 if (ret)
4490 goto free;
4491 out:
4492 return data;
4493 free:
4494 onmatch_destroy(data);
4495 data = ERR_PTR(ret);
4496 goto out;
4497 }
4498
4499 static int create_hitcount_val(struct hist_trigger_data *hist_data)
4500 {
4501 hist_data->fields[HITCOUNT_IDX] =
4502 create_hist_field(hist_data, NULL, HIST_FIELD_FL_HITCOUNT, NULL);
4503 if (!hist_data->fields[HITCOUNT_IDX])
4504 return -ENOMEM;
4505
4506 hist_data->n_vals++;
4507 hist_data->n_fields++;
4508
4509 if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
4510 return -EINVAL;
4511
4512 return 0;
4513 }
4514
4515 static int __create_val_field(struct hist_trigger_data *hist_data,
4516 unsigned int val_idx,
4517 struct trace_event_file *file,
4518 char *var_name, char *field_str,
4519 unsigned long flags)
4520 {
4521 struct hist_field *hist_field;
4522 int ret = 0;
4523
4524 hist_field = parse_expr(hist_data, file, field_str, flags, var_name, 0);
4525 if (IS_ERR(hist_field)) {
4526 ret = PTR_ERR(hist_field);
4527 goto out;
4528 }
4529
4530 hist_data->fields[val_idx] = hist_field;
4531
4532 ++hist_data->n_vals;
4533 ++hist_data->n_fields;
4534
4535 if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX))
4536 ret = -EINVAL;
4537 out:
4538 return ret;
4539 }
4540
4541 static int create_val_field(struct hist_trigger_data *hist_data,
4542 unsigned int val_idx,
4543 struct trace_event_file *file,
4544 char *field_str)
4545 {
4546 if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX))
4547 return -EINVAL;
4548
4549 return __create_val_field(hist_data, val_idx, file, NULL, field_str, 0);
4550 }
4551
4552 static int create_var_field(struct hist_trigger_data *hist_data,
4553 unsigned int val_idx,
4554 struct trace_event_file *file,
4555 char *var_name, char *expr_str)
4556 {
4557 struct trace_array *tr = hist_data->event_file->tr;
4558 unsigned long flags = 0;
4559
4560 if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX))
4561 return -EINVAL;
4562
4563 if (find_var(hist_data, file, var_name) && !hist_data->remove) {
4564 hist_err(tr, HIST_ERR_DUPLICATE_VAR, errpos(var_name));
4565 return -EINVAL;
4566 }
4567
4568 flags |= HIST_FIELD_FL_VAR;
4569 hist_data->n_vars++;
4570 if (WARN_ON(hist_data->n_vars > TRACING_MAP_VARS_MAX))
4571 return -EINVAL;
4572
4573 return __create_val_field(hist_data, val_idx, file, var_name, expr_str, flags);
4574 }
4575
4576 static int create_val_fields(struct hist_trigger_data *hist_data,
4577 struct trace_event_file *file)
4578 {
4579 char *fields_str, *field_str;
4580 unsigned int i, j = 1;
4581 int ret;
4582
4583 ret = create_hitcount_val(hist_data);
4584 if (ret)
4585 goto out;
4586
4587 fields_str = hist_data->attrs->vals_str;
4588 if (!fields_str)
4589 goto out;
4590
4591 for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX &&
4592 j < TRACING_MAP_VALS_MAX; i++) {
4593 field_str = strsep(&fields_str, ",");
4594 if (!field_str)
4595 break;
4596
4597 if (strcmp(field_str, "hitcount") == 0)
4598 continue;
4599
4600 ret = create_val_field(hist_data, j++, file, field_str);
4601 if (ret)
4602 goto out;
4603 }
4604
4605 if (fields_str && (strcmp(fields_str, "hitcount") != 0))
4606 ret = -EINVAL;
4607 out:
4608 return ret;
4609 }
4610
4611 static int create_key_field(struct hist_trigger_data *hist_data,
4612 unsigned int key_idx,
4613 unsigned int key_offset,
4614 struct trace_event_file *file,
4615 char *field_str)
4616 {
4617 struct trace_array *tr = hist_data->event_file->tr;
4618 struct hist_field *hist_field = NULL;
4619 unsigned long flags = 0;
4620 unsigned int key_size;
4621 int ret = 0;
4622
4623 if (WARN_ON(key_idx >= HIST_FIELDS_MAX))
4624 return -EINVAL;
4625
4626 flags |= HIST_FIELD_FL_KEY;
4627
4628 if (strcmp(field_str, "stacktrace") == 0) {
4629 flags |= HIST_FIELD_FL_STACKTRACE;
4630 key_size = sizeof(unsigned long) * HIST_STACKTRACE_DEPTH;
4631 hist_field = create_hist_field(hist_data, NULL, flags, NULL);
4632 } else {
4633 hist_field = parse_expr(hist_data, file, field_str, flags,
4634 NULL, 0);
4635 if (IS_ERR(hist_field)) {
4636 ret = PTR_ERR(hist_field);
4637 goto out;
4638 }
4639
4640 if (field_has_hist_vars(hist_field, 0)) {
4641 hist_err(tr, HIST_ERR_INVALID_REF_KEY, errpos(field_str));
4642 destroy_hist_field(hist_field, 0);
4643 ret = -EINVAL;
4644 goto out;
4645 }
4646
4647 key_size = hist_field->size;
4648 }
4649
4650 hist_data->fields[key_idx] = hist_field;
4651
4652 key_size = ALIGN(key_size, sizeof(u64));
4653 hist_data->fields[key_idx]->size = key_size;
4654 hist_data->fields[key_idx]->offset = key_offset;
4655
4656 hist_data->key_size += key_size;
4657
4658 if (hist_data->key_size > HIST_KEY_SIZE_MAX) {
4659 ret = -EINVAL;
4660 goto out;
4661 }
4662
4663 hist_data->n_keys++;
4664 hist_data->n_fields++;
4665
4666 if (WARN_ON(hist_data->n_keys > TRACING_MAP_KEYS_MAX))
4667 return -EINVAL;
4668
4669 ret = key_size;
4670 out:
4671 return ret;
4672 }
4673
4674 static int create_key_fields(struct hist_trigger_data *hist_data,
4675 struct trace_event_file *file)
4676 {
4677 unsigned int i, key_offset = 0, n_vals = hist_data->n_vals;
4678 char *fields_str, *field_str;
4679 int ret = -EINVAL;
4680
4681 fields_str = hist_data->attrs->keys_str;
4682 if (!fields_str)
4683 goto out;
4684
4685 for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) {
4686 field_str = strsep(&fields_str, ",");
4687 if (!field_str)
4688 break;
4689 ret = create_key_field(hist_data, i, key_offset,
4690 file, field_str);
4691 if (ret < 0)
4692 goto out;
4693 key_offset += ret;
4694 }
4695 if (fields_str) {
4696 ret = -EINVAL;
4697 goto out;
4698 }
4699 ret = 0;
4700 out:
4701 return ret;
4702 }
4703
4704 static int create_var_fields(struct hist_trigger_data *hist_data,
4705 struct trace_event_file *file)
4706 {
4707 unsigned int i, j = hist_data->n_vals;
4708 int ret = 0;
4709
4710 unsigned int n_vars = hist_data->attrs->var_defs.n_vars;
4711
4712 for (i = 0; i < n_vars; i++) {
4713 char *var_name = hist_data->attrs->var_defs.name[i];
4714 char *expr = hist_data->attrs->var_defs.expr[i];
4715
4716 ret = create_var_field(hist_data, j++, file, var_name, expr);
4717 if (ret)
4718 goto out;
4719 }
4720 out:
4721 return ret;
4722 }
4723
4724 static void free_var_defs(struct hist_trigger_data *hist_data)
4725 {
4726 unsigned int i;
4727
4728 for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) {
4729 kfree(hist_data->attrs->var_defs.name[i]);
4730 kfree(hist_data->attrs->var_defs.expr[i]);
4731 }
4732
4733 hist_data->attrs->var_defs.n_vars = 0;
4734 }
4735
4736 static int parse_var_defs(struct hist_trigger_data *hist_data)
4737 {
4738 struct trace_array *tr = hist_data->event_file->tr;
4739 char *s, *str, *var_name, *field_str;
4740 unsigned int i, j, n_vars = 0;
4741 int ret = 0;
4742
4743 for (i = 0; i < hist_data->attrs->n_assignments; i++) {
4744 str = hist_data->attrs->assignment_str[i];
4745 for (j = 0; j < TRACING_MAP_VARS_MAX; j++) {
4746 field_str = strsep(&str, ",");
4747 if (!field_str)
4748 break;
4749
4750 var_name = strsep(&field_str, "=");
4751 if (!var_name || !field_str) {
4752 hist_err(tr, HIST_ERR_MALFORMED_ASSIGNMENT,
4753 errpos(var_name));
4754 ret = -EINVAL;
4755 goto free;
4756 }
4757
4758 if (n_vars == TRACING_MAP_VARS_MAX) {
4759 hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(var_name));
4760 ret = -EINVAL;
4761 goto free;
4762 }
4763
4764 s = kstrdup(var_name, GFP_KERNEL);
4765 if (!s) {
4766 ret = -ENOMEM;
4767 goto free;
4768 }
4769 hist_data->attrs->var_defs.name[n_vars] = s;
4770
4771 s = kstrdup(field_str, GFP_KERNEL);
4772 if (!s) {
4773 kfree(hist_data->attrs->var_defs.name[n_vars]);
4774 ret = -ENOMEM;
4775 goto free;
4776 }
4777 hist_data->attrs->var_defs.expr[n_vars++] = s;
4778
4779 hist_data->attrs->var_defs.n_vars = n_vars;
4780 }
4781 }
4782
4783 return ret;
4784 free:
4785 free_var_defs(hist_data);
4786
4787 return ret;
4788 }
4789
4790 static int create_hist_fields(struct hist_trigger_data *hist_data,
4791 struct trace_event_file *file)
4792 {
4793 int ret;
4794
4795 ret = parse_var_defs(hist_data);
4796 if (ret)
4797 goto out;
4798
4799 ret = create_val_fields(hist_data, file);
4800 if (ret)
4801 goto out;
4802
4803 ret = create_var_fields(hist_data, file);
4804 if (ret)
4805 goto out;
4806
4807 ret = create_key_fields(hist_data, file);
4808 if (ret)
4809 goto out;
4810 out:
4811 free_var_defs(hist_data);
4812
4813 return ret;
4814 }
4815
4816 static int is_descending(const char *str)
4817 {
4818 if (!str)
4819 return 0;
4820
4821 if (strcmp(str, "descending") == 0)
4822 return 1;
4823
4824 if (strcmp(str, "ascending") == 0)
4825 return 0;
4826
4827 return -EINVAL;
4828 }
4829
4830 static int create_sort_keys(struct hist_trigger_data *hist_data)
4831 {
4832 char *fields_str = hist_data->attrs->sort_key_str;
4833 struct tracing_map_sort_key *sort_key;
4834 int descending, ret = 0;
4835 unsigned int i, j, k;
4836
4837 hist_data->n_sort_keys = 1;
4838
4839 if (!fields_str)
4840 goto out;
4841
4842 for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) {
4843 struct hist_field *hist_field;
4844 char *field_str, *field_name;
4845 const char *test_name;
4846
4847 sort_key = &hist_data->sort_keys[i];
4848
4849 field_str = strsep(&fields_str, ",");
4850 if (!field_str)
4851 break;
4852
4853 if (!*field_str) {
4854 ret = -EINVAL;
4855 break;
4856 }
4857
4858 if ((i == TRACING_MAP_SORT_KEYS_MAX - 1) && fields_str) {
4859 ret = -EINVAL;
4860 break;
4861 }
4862
4863 field_name = strsep(&field_str, ".");
4864 if (!field_name || !*field_name) {
4865 ret = -EINVAL;
4866 break;
4867 }
4868
4869 if (strcmp(field_name, "hitcount") == 0) {
4870 descending = is_descending(field_str);
4871 if (descending < 0) {
4872 ret = descending;
4873 break;
4874 }
4875 sort_key->descending = descending;
4876 continue;
4877 }
4878
4879 for (j = 1, k = 1; j < hist_data->n_fields; j++) {
4880 unsigned int idx;
4881
4882 hist_field = hist_data->fields[j];
4883 if (hist_field->flags & HIST_FIELD_FL_VAR)
4884 continue;
4885
4886 idx = k++;
4887
4888 test_name = hist_field_name(hist_field, 0);
4889
4890 if (strcmp(field_name, test_name) == 0) {
4891 sort_key->field_idx = idx;
4892 descending = is_descending(field_str);
4893 if (descending < 0) {
4894 ret = descending;
4895 goto out;
4896 }
4897 sort_key->descending = descending;
4898 break;
4899 }
4900 }
4901 if (j == hist_data->n_fields) {
4902 ret = -EINVAL;
4903 break;
4904 }
4905 }
4906
4907 hist_data->n_sort_keys = i;
4908 out:
4909 return ret;
4910 }
4911
4912 static void destroy_actions(struct hist_trigger_data *hist_data)
4913 {
4914 unsigned int i;
4915
4916 for (i = 0; i < hist_data->n_actions; i++) {
4917 struct action_data *data = hist_data->actions[i];
4918
4919 if (data->handler == HANDLER_ONMATCH)
4920 onmatch_destroy(data);
4921 else if (data->handler == HANDLER_ONMAX ||
4922 data->handler == HANDLER_ONCHANGE)
4923 track_data_destroy(hist_data, data);
4924 else
4925 kfree(data);
4926 }
4927 }
4928
4929 static int parse_actions(struct hist_trigger_data *hist_data)
4930 {
4931 struct trace_array *tr = hist_data->event_file->tr;
4932 struct action_data *data;
4933 unsigned int i;
4934 int ret = 0;
4935 char *str;
4936 int len;
4937
4938 for (i = 0; i < hist_data->attrs->n_actions; i++) {
4939 str = hist_data->attrs->action_str[i];
4940
4941 if ((len = str_has_prefix(str, "onmatch("))) {
4942 char *action_str = str + len;
4943
4944 data = onmatch_parse(tr, action_str);
4945 if (IS_ERR(data)) {
4946 ret = PTR_ERR(data);
4947 break;
4948 }
4949 } else if ((len = str_has_prefix(str, "onmax("))) {
4950 char *action_str = str + len;
4951
4952 data = track_data_parse(hist_data, action_str,
4953 HANDLER_ONMAX);
4954 if (IS_ERR(data)) {
4955 ret = PTR_ERR(data);
4956 break;
4957 }
4958 } else if ((len = str_has_prefix(str, "onchange("))) {
4959 char *action_str = str + len;
4960
4961 data = track_data_parse(hist_data, action_str,
4962 HANDLER_ONCHANGE);
4963 if (IS_ERR(data)) {
4964 ret = PTR_ERR(data);
4965 break;
4966 }
4967 } else {
4968 ret = -EINVAL;
4969 break;
4970 }
4971
4972 hist_data->actions[hist_data->n_actions++] = data;
4973 }
4974
4975 return ret;
4976 }
4977
4978 static int create_actions(struct hist_trigger_data *hist_data)
4979 {
4980 struct action_data *data;
4981 unsigned int i;
4982 int ret = 0;
4983
4984 for (i = 0; i < hist_data->attrs->n_actions; i++) {
4985 data = hist_data->actions[i];
4986
4987 if (data->handler == HANDLER_ONMATCH) {
4988 ret = onmatch_create(hist_data, data);
4989 if (ret)
4990 break;
4991 } else if (data->handler == HANDLER_ONMAX ||
4992 data->handler == HANDLER_ONCHANGE) {
4993 ret = track_data_create(hist_data, data);
4994 if (ret)
4995 break;
4996 } else {
4997 ret = -EINVAL;
4998 break;
4999 }
5000 }
5001
5002 return ret;
5003 }
5004
5005 static void print_actions(struct seq_file *m,
5006 struct hist_trigger_data *hist_data,
5007 struct tracing_map_elt *elt)
5008 {
5009 unsigned int i;
5010
5011 for (i = 0; i < hist_data->n_actions; i++) {
5012 struct action_data *data = hist_data->actions[i];
5013
5014 if (data->action == ACTION_SNAPSHOT)
5015 continue;
5016
5017 if (data->handler == HANDLER_ONMAX ||
5018 data->handler == HANDLER_ONCHANGE)
5019 track_data_print(m, hist_data, elt, data);
5020 }
5021 }
5022
5023 static void print_action_spec(struct seq_file *m,
5024 struct hist_trigger_data *hist_data,
5025 struct action_data *data)
5026 {
5027 unsigned int i;
5028
5029 if (data->action == ACTION_SAVE) {
5030 for (i = 0; i < hist_data->n_save_vars; i++) {
5031 seq_printf(m, "%s", hist_data->save_vars[i]->var->var.name);
5032 if (i < hist_data->n_save_vars - 1)
5033 seq_puts(m, ",");
5034 }
5035 } else if (data->action == ACTION_TRACE) {
5036 if (data->use_trace_keyword)
5037 seq_printf(m, "%s", data->synth_event_name);
5038 for (i = 0; i < data->n_params; i++) {
5039 if (i || data->use_trace_keyword)
5040 seq_puts(m, ",");
5041 seq_printf(m, "%s", data->params[i]);
5042 }
5043 }
5044 }
5045
5046 static void print_track_data_spec(struct seq_file *m,
5047 struct hist_trigger_data *hist_data,
5048 struct action_data *data)
5049 {
5050 if (data->handler == HANDLER_ONMAX)
5051 seq_puts(m, ":onmax(");
5052 else if (data->handler == HANDLER_ONCHANGE)
5053 seq_puts(m, ":onchange(");
5054 seq_printf(m, "%s", data->track_data.var_str);
5055 seq_printf(m, ").%s(", data->action_name);
5056
5057 print_action_spec(m, hist_data, data);
5058
5059 seq_puts(m, ")");
5060 }
5061
5062 static void print_onmatch_spec(struct seq_file *m,
5063 struct hist_trigger_data *hist_data,
5064 struct action_data *data)
5065 {
5066 seq_printf(m, ":onmatch(%s.%s).", data->match_data.event_system,
5067 data->match_data.event);
5068
5069 seq_printf(m, "%s(", data->action_name);
5070
5071 print_action_spec(m, hist_data, data);
5072
5073 seq_puts(m, ")");
5074 }
5075
5076 static bool actions_match(struct hist_trigger_data *hist_data,
5077 struct hist_trigger_data *hist_data_test)
5078 {
5079 unsigned int i, j;
5080
5081 if (hist_data->n_actions != hist_data_test->n_actions)
5082 return false;
5083
5084 for (i = 0; i < hist_data->n_actions; i++) {
5085 struct action_data *data = hist_data->actions[i];
5086 struct action_data *data_test = hist_data_test->actions[i];
5087 char *action_name, *action_name_test;
5088
5089 if (data->handler != data_test->handler)
5090 return false;
5091 if (data->action != data_test->action)
5092 return false;
5093
5094 if (data->n_params != data_test->n_params)
5095 return false;
5096
5097 for (j = 0; j < data->n_params; j++) {
5098 if (strcmp(data->params[j], data_test->params[j]) != 0)
5099 return false;
5100 }
5101
5102 if (data->use_trace_keyword)
5103 action_name = data->synth_event_name;
5104 else
5105 action_name = data->action_name;
5106
5107 if (data_test->use_trace_keyword)
5108 action_name_test = data_test->synth_event_name;
5109 else
5110 action_name_test = data_test->action_name;
5111
5112 if (strcmp(action_name, action_name_test) != 0)
5113 return false;
5114
5115 if (data->handler == HANDLER_ONMATCH) {
5116 if (strcmp(data->match_data.event_system,
5117 data_test->match_data.event_system) != 0)
5118 return false;
5119 if (strcmp(data->match_data.event,
5120 data_test->match_data.event) != 0)
5121 return false;
5122 } else if (data->handler == HANDLER_ONMAX ||
5123 data->handler == HANDLER_ONCHANGE) {
5124 if (strcmp(data->track_data.var_str,
5125 data_test->track_data.var_str) != 0)
5126 return false;
5127 }
5128 }
5129
5130 return true;
5131 }
5132
5133
5134 static void print_actions_spec(struct seq_file *m,
5135 struct hist_trigger_data *hist_data)
5136 {
5137 unsigned int i;
5138
5139 for (i = 0; i < hist_data->n_actions; i++) {
5140 struct action_data *data = hist_data->actions[i];
5141
5142 if (data->handler == HANDLER_ONMATCH)
5143 print_onmatch_spec(m, hist_data, data);
5144 else if (data->handler == HANDLER_ONMAX ||
5145 data->handler == HANDLER_ONCHANGE)
5146 print_track_data_spec(m, hist_data, data);
5147 }
5148 }
5149
5150 static void destroy_field_var_hists(struct hist_trigger_data *hist_data)
5151 {
5152 unsigned int i;
5153
5154 for (i = 0; i < hist_data->n_field_var_hists; i++) {
5155 kfree(hist_data->field_var_hists[i]->cmd);
5156 kfree(hist_data->field_var_hists[i]);
5157 }
5158 }
5159
5160 static void destroy_hist_data(struct hist_trigger_data *hist_data)
5161 {
5162 if (!hist_data)
5163 return;
5164
5165 destroy_hist_trigger_attrs(hist_data->attrs);
5166 destroy_hist_fields(hist_data);
5167 tracing_map_destroy(hist_data->map);
5168
5169 destroy_actions(hist_data);
5170 destroy_field_vars(hist_data);
5171 destroy_field_var_hists(hist_data);
5172
5173 kfree(hist_data);
5174 }
5175
5176 static int create_tracing_map_fields(struct hist_trigger_data *hist_data)
5177 {
5178 struct tracing_map *map = hist_data->map;
5179 struct ftrace_event_field *field;
5180 struct hist_field *hist_field;
5181 int i, idx = 0;
5182
5183 for_each_hist_field(i, hist_data) {
5184 hist_field = hist_data->fields[i];
5185 if (hist_field->flags & HIST_FIELD_FL_KEY) {
5186 tracing_map_cmp_fn_t cmp_fn;
5187
5188 field = hist_field->field;
5189
5190 if (hist_field->flags & HIST_FIELD_FL_STACKTRACE)
5191 cmp_fn = tracing_map_cmp_none;
5192 else if (!field)
5193 cmp_fn = tracing_map_cmp_num(hist_field->size,
5194 hist_field->is_signed);
5195 else if (is_string_field(field))
5196 cmp_fn = tracing_map_cmp_string;
5197 else
5198 cmp_fn = tracing_map_cmp_num(field->size,
5199 field->is_signed);
5200 idx = tracing_map_add_key_field(map,
5201 hist_field->offset,
5202 cmp_fn);
5203 } else if (!(hist_field->flags & HIST_FIELD_FL_VAR))
5204 idx = tracing_map_add_sum_field(map);
5205
5206 if (idx < 0)
5207 return idx;
5208
5209 if (hist_field->flags & HIST_FIELD_FL_VAR) {
5210 idx = tracing_map_add_var(map);
5211 if (idx < 0)
5212 return idx;
5213 hist_field->var.idx = idx;
5214 hist_field->var.hist_data = hist_data;
5215 }
5216 }
5217
5218 return 0;
5219 }
5220
5221 static struct hist_trigger_data *
5222 create_hist_data(unsigned int map_bits,
5223 struct hist_trigger_attrs *attrs,
5224 struct trace_event_file *file,
5225 bool remove)
5226 {
5227 const struct tracing_map_ops *map_ops = NULL;
5228 struct hist_trigger_data *hist_data;
5229 int ret = 0;
5230
5231 hist_data = kzalloc(sizeof(*hist_data), GFP_KERNEL);
5232 if (!hist_data)
5233 return ERR_PTR(-ENOMEM);
5234
5235 hist_data->attrs = attrs;
5236 hist_data->remove = remove;
5237 hist_data->event_file = file;
5238
5239 ret = parse_actions(hist_data);
5240 if (ret)
5241 goto free;
5242
5243 ret = create_hist_fields(hist_data, file);
5244 if (ret)
5245 goto free;
5246
5247 ret = create_sort_keys(hist_data);
5248 if (ret)
5249 goto free;
5250
5251 map_ops = &hist_trigger_elt_data_ops;
5252
5253 hist_data->map = tracing_map_create(map_bits, hist_data->key_size,
5254 map_ops, hist_data);
5255 if (IS_ERR(hist_data->map)) {
5256 ret = PTR_ERR(hist_data->map);
5257 hist_data->map = NULL;
5258 goto free;
5259 }
5260
5261 ret = create_tracing_map_fields(hist_data);
5262 if (ret)
5263 goto free;
5264 out:
5265 return hist_data;
5266 free:
5267 hist_data->attrs = NULL;
5268
5269 destroy_hist_data(hist_data);
5270
5271 hist_data = ERR_PTR(ret);
5272
5273 goto out;
5274 }
5275
5276 static void hist_trigger_elt_update(struct hist_trigger_data *hist_data,
5277 struct tracing_map_elt *elt, void *rec,
5278 struct ring_buffer_event *rbe,
5279 u64 *var_ref_vals)
5280 {
5281 struct hist_elt_data *elt_data;
5282 struct hist_field *hist_field;
5283 unsigned int i, var_idx;
5284 u64 hist_val;
5285
5286 elt_data = elt->private_data;
5287 elt_data->var_ref_vals = var_ref_vals;
5288
5289 for_each_hist_val_field(i, hist_data) {
5290 hist_field = hist_data->fields[i];
5291 hist_val = hist_field->fn(hist_field, elt, rbe, rec);
5292 if (hist_field->flags & HIST_FIELD_FL_VAR) {
5293 var_idx = hist_field->var.idx;
5294 tracing_map_set_var(elt, var_idx, hist_val);
5295 continue;
5296 }
5297 tracing_map_update_sum(elt, i, hist_val);
5298 }
5299
5300 for_each_hist_key_field(i, hist_data) {
5301 hist_field = hist_data->fields[i];
5302 if (hist_field->flags & HIST_FIELD_FL_VAR) {
5303 hist_val = hist_field->fn(hist_field, elt, rbe, rec);
5304 var_idx = hist_field->var.idx;
5305 tracing_map_set_var(elt, var_idx, hist_val);
5306 }
5307 }
5308
5309 update_field_vars(hist_data, elt, rbe, rec);
5310 }
5311
5312 static inline void add_to_key(char *compound_key, void *key,
5313 struct hist_field *key_field, void *rec)
5314 {
5315 size_t size = key_field->size;
5316
5317 if (key_field->flags & HIST_FIELD_FL_STRING) {
5318 struct ftrace_event_field *field;
5319
5320 field = key_field->field;
5321 if (field->filter_type == FILTER_DYN_STRING)
5322 size = *(u32 *)(rec + field->offset) >> 16;
5323 else if (field->filter_type == FILTER_PTR_STRING)
5324 size = strlen(key);
5325 else if (field->filter_type == FILTER_STATIC_STRING)
5326 size = field->size;
5327
5328
5329 if (size > key_field->size - 1)
5330 size = key_field->size - 1;
5331
5332 strncpy(compound_key + key_field->offset, (char *)key, size);
5333 } else
5334 memcpy(compound_key + key_field->offset, key, size);
5335 }
5336
5337 static void
5338 hist_trigger_actions(struct hist_trigger_data *hist_data,
5339 struct tracing_map_elt *elt, void *rec,
5340 struct ring_buffer_event *rbe, void *key,
5341 u64 *var_ref_vals)
5342 {
5343 struct action_data *data;
5344 unsigned int i;
5345
5346 for (i = 0; i < hist_data->n_actions; i++) {
5347 data = hist_data->actions[i];
5348 data->fn(hist_data, elt, rec, rbe, key, data, var_ref_vals);
5349 }
5350 }
5351
5352 static void event_hist_trigger(struct event_trigger_data *data, void *rec,
5353 struct ring_buffer_event *rbe)
5354 {
5355 struct hist_trigger_data *hist_data = data->private_data;
5356 bool use_compound_key = (hist_data->n_keys > 1);
5357 unsigned long entries[HIST_STACKTRACE_DEPTH];
5358 u64 var_ref_vals[TRACING_MAP_VARS_MAX];
5359 char compound_key[HIST_KEY_SIZE_MAX];
5360 struct tracing_map_elt *elt = NULL;
5361 struct hist_field *key_field;
5362 u64 field_contents;
5363 void *key = NULL;
5364 unsigned int i;
5365
5366 memset(compound_key, 0, hist_data->key_size);
5367
5368 for_each_hist_key_field(i, hist_data) {
5369 key_field = hist_data->fields[i];
5370
5371 if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
5372 memset(entries, 0, HIST_STACKTRACE_SIZE);
5373 stack_trace_save(entries, HIST_STACKTRACE_DEPTH,
5374 HIST_STACKTRACE_SKIP);
5375 key = entries;
5376 } else {
5377 field_contents = key_field->fn(key_field, elt, rbe, rec);
5378 if (key_field->flags & HIST_FIELD_FL_STRING) {
5379 key = (void *)(unsigned long)field_contents;
5380 use_compound_key = true;
5381 } else
5382 key = (void *)&field_contents;
5383 }
5384
5385 if (use_compound_key)
5386 add_to_key(compound_key, key, key_field, rec);
5387 }
5388
5389 if (use_compound_key)
5390 key = compound_key;
5391
5392 if (hist_data->n_var_refs &&
5393 !resolve_var_refs(hist_data, key, var_ref_vals, false))
5394 return;
5395
5396 elt = tracing_map_insert(hist_data->map, key);
5397 if (!elt)
5398 return;
5399
5400 hist_trigger_elt_update(hist_data, elt, rec, rbe, var_ref_vals);
5401
5402 if (resolve_var_refs(hist_data, key, var_ref_vals, true))
5403 hist_trigger_actions(hist_data, elt, rec, rbe, key, var_ref_vals);
5404 }
5405
5406 static void hist_trigger_stacktrace_print(struct seq_file *m,
5407 unsigned long *stacktrace_entries,
5408 unsigned int max_entries)
5409 {
5410 char str[KSYM_SYMBOL_LEN];
5411 unsigned int spaces = 8;
5412 unsigned int i;
5413
5414 for (i = 0; i < max_entries; i++) {
5415 if (!stacktrace_entries[i])
5416 return;
5417
5418 seq_printf(m, "%*c", 1 + spaces, ' ');
5419 sprint_symbol(str, stacktrace_entries[i]);
5420 seq_printf(m, "%s\n", str);
5421 }
5422 }
5423
5424 static void hist_trigger_print_key(struct seq_file *m,
5425 struct hist_trigger_data *hist_data,
5426 void *key,
5427 struct tracing_map_elt *elt)
5428 {
5429 struct hist_field *key_field;
5430 char str[KSYM_SYMBOL_LEN];
5431 bool multiline = false;
5432 const char *field_name;
5433 unsigned int i;
5434 u64 uval;
5435
5436 seq_puts(m, "{ ");
5437
5438 for_each_hist_key_field(i, hist_data) {
5439 key_field = hist_data->fields[i];
5440
5441 if (i > hist_data->n_vals)
5442 seq_puts(m, ", ");
5443
5444 field_name = hist_field_name(key_field, 0);
5445
5446 if (key_field->flags & HIST_FIELD_FL_HEX) {
5447 uval = *(u64 *)(key + key_field->offset);
5448 seq_printf(m, "%s: %llx", field_name, uval);
5449 } else if (key_field->flags & HIST_FIELD_FL_SYM) {
5450 uval = *(u64 *)(key + key_field->offset);
5451 sprint_symbol_no_offset(str, uval);
5452 seq_printf(m, "%s: [%llx] %-45s", field_name,
5453 uval, str);
5454 } else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) {
5455 uval = *(u64 *)(key + key_field->offset);
5456 sprint_symbol(str, uval);
5457 seq_printf(m, "%s: [%llx] %-55s", field_name,
5458 uval, str);
5459 } else if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
5460 struct hist_elt_data *elt_data = elt->private_data;
5461 char *comm;
5462
5463 if (WARN_ON_ONCE(!elt_data))
5464 return;
5465
5466 comm = elt_data->comm;
5467
5468 uval = *(u64 *)(key + key_field->offset);
5469 seq_printf(m, "%s: %-16s[%10llu]", field_name,
5470 comm, uval);
5471 } else if (key_field->flags & HIST_FIELD_FL_SYSCALL) {
5472 const char *syscall_name;
5473
5474 uval = *(u64 *)(key + key_field->offset);
5475 syscall_name = get_syscall_name(uval);
5476 if (!syscall_name)
5477 syscall_name = "unknown_syscall";
5478
5479 seq_printf(m, "%s: %-30s[%3llu]", field_name,
5480 syscall_name, uval);
5481 } else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
5482 seq_puts(m, "stacktrace:\n");
5483 hist_trigger_stacktrace_print(m,
5484 key + key_field->offset,
5485 HIST_STACKTRACE_DEPTH);
5486 multiline = true;
5487 } else if (key_field->flags & HIST_FIELD_FL_LOG2) {
5488 seq_printf(m, "%s: ~ 2^%-2llu", field_name,
5489 *(u64 *)(key + key_field->offset));
5490 } else if (key_field->flags & HIST_FIELD_FL_STRING) {
5491 seq_printf(m, "%s: %-50s", field_name,
5492 (char *)(key + key_field->offset));
5493 } else {
5494 uval = *(u64 *)(key + key_field->offset);
5495 seq_printf(m, "%s: %10llu", field_name, uval);
5496 }
5497 }
5498
5499 if (!multiline)
5500 seq_puts(m, " ");
5501
5502 seq_puts(m, "}");
5503 }
5504
5505 static void hist_trigger_entry_print(struct seq_file *m,
5506 struct hist_trigger_data *hist_data,
5507 void *key,
5508 struct tracing_map_elt *elt)
5509 {
5510 const char *field_name;
5511 unsigned int i;
5512
5513 hist_trigger_print_key(m, hist_data, key, elt);
5514
5515 seq_printf(m, " hitcount: %10llu",
5516 tracing_map_read_sum(elt, HITCOUNT_IDX));
5517
5518 for (i = 1; i < hist_data->n_vals; i++) {
5519 field_name = hist_field_name(hist_data->fields[i], 0);
5520
5521 if (hist_data->fields[i]->flags & HIST_FIELD_FL_VAR ||
5522 hist_data->fields[i]->flags & HIST_FIELD_FL_EXPR)
5523 continue;
5524
5525 if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) {
5526 seq_printf(m, " %s: %10llx", field_name,
5527 tracing_map_read_sum(elt, i));
5528 } else {
5529 seq_printf(m, " %s: %10llu", field_name,
5530 tracing_map_read_sum(elt, i));
5531 }
5532 }
5533
5534 print_actions(m, hist_data, elt);
5535
5536 seq_puts(m, "\n");
5537 }
5538
5539 static int print_entries(struct seq_file *m,
5540 struct hist_trigger_data *hist_data)
5541 {
5542 struct tracing_map_sort_entry **sort_entries = NULL;
5543 struct tracing_map *map = hist_data->map;
5544 int i, n_entries;
5545
5546 n_entries = tracing_map_sort_entries(map, hist_data->sort_keys,
5547 hist_data->n_sort_keys,
5548 &sort_entries);
5549 if (n_entries < 0)
5550 return n_entries;
5551
5552 for (i = 0; i < n_entries; i++)
5553 hist_trigger_entry_print(m, hist_data,
5554 sort_entries[i]->key,
5555 sort_entries[i]->elt);
5556
5557 tracing_map_destroy_sort_entries(sort_entries, n_entries);
5558
5559 return n_entries;
5560 }
5561
5562 static void hist_trigger_show(struct seq_file *m,
5563 struct event_trigger_data *data, int n)
5564 {
5565 struct hist_trigger_data *hist_data;
5566 int n_entries;
5567
5568 if (n > 0)
5569 seq_puts(m, "\n\n");
5570
5571 seq_puts(m, "# event histogram\n#\n# trigger info: ");
5572 data->ops->print(m, data->ops, data);
5573 seq_puts(m, "#\n\n");
5574
5575 hist_data = data->private_data;
5576 n_entries = print_entries(m, hist_data);
5577 if (n_entries < 0)
5578 n_entries = 0;
5579
5580 track_data_snapshot_print(m, hist_data);
5581
5582 seq_printf(m, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n",
5583 (u64)atomic64_read(&hist_data->map->hits),
5584 n_entries, (u64)atomic64_read(&hist_data->map->drops));
5585 }
5586
5587 static int hist_show(struct seq_file *m, void *v)
5588 {
5589 struct event_trigger_data *data;
5590 struct trace_event_file *event_file;
5591 int n = 0, ret = 0;
5592
5593 mutex_lock(&event_mutex);
5594
5595 event_file = event_file_data(m->private);
5596 if (unlikely(!event_file)) {
5597 ret = -ENODEV;
5598 goto out_unlock;
5599 }
5600
5601 list_for_each_entry(data, &event_file->triggers, list) {
5602 if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
5603 hist_trigger_show(m, data, n++);
5604 }
5605
5606 out_unlock:
5607 mutex_unlock(&event_mutex);
5608
5609 return ret;
5610 }
5611
5612 static int event_hist_open(struct inode *inode, struct file *file)
5613 {
5614 int ret;
5615
5616 ret = security_locked_down(LOCKDOWN_TRACEFS);
5617 if (ret)
5618 return ret;
5619
5620 return single_open(file, hist_show, file);
5621 }
5622
5623 const struct file_operations event_hist_fops = {
5624 .open = event_hist_open,
5625 .read = seq_read,
5626 .llseek = seq_lseek,
5627 .release = single_release,
5628 };
5629
5630 static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
5631 {
5632 const char *field_name = hist_field_name(hist_field, 0);
5633
5634 if (hist_field->var.name)
5635 seq_printf(m, "%s=", hist_field->var.name);
5636
5637 if (hist_field->flags & HIST_FIELD_FL_CPU)
5638 seq_puts(m, "cpu");
5639 else if (field_name) {
5640 if (hist_field->flags & HIST_FIELD_FL_VAR_REF ||
5641 hist_field->flags & HIST_FIELD_FL_ALIAS)
5642 seq_putc(m, '$');
5643 seq_printf(m, "%s", field_name);
5644 } else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP)
5645 seq_puts(m, "common_timestamp");
5646
5647 if (hist_field->flags) {
5648 if (!(hist_field->flags & HIST_FIELD_FL_VAR_REF) &&
5649 !(hist_field->flags & HIST_FIELD_FL_EXPR)) {
5650 const char *flags = get_hist_field_flags(hist_field);
5651
5652 if (flags)
5653 seq_printf(m, ".%s", flags);
5654 }
5655 }
5656 }
5657
5658 static int event_hist_trigger_print(struct seq_file *m,
5659 struct event_trigger_ops *ops,
5660 struct event_trigger_data *data)
5661 {
5662 struct hist_trigger_data *hist_data = data->private_data;
5663 struct hist_field *field;
5664 bool have_var = false;
5665 unsigned int i;
5666
5667 seq_puts(m, "hist:");
5668
5669 if (data->name)
5670 seq_printf(m, "%s:", data->name);
5671
5672 seq_puts(m, "keys=");
5673
5674 for_each_hist_key_field(i, hist_data) {
5675 field = hist_data->fields[i];
5676
5677 if (i > hist_data->n_vals)
5678 seq_puts(m, ",");
5679
5680 if (field->flags & HIST_FIELD_FL_STACKTRACE)
5681 seq_puts(m, "stacktrace");
5682 else
5683 hist_field_print(m, field);
5684 }
5685
5686 seq_puts(m, ":vals=");
5687
5688 for_each_hist_val_field(i, hist_data) {
5689 field = hist_data->fields[i];
5690 if (field->flags & HIST_FIELD_FL_VAR) {
5691 have_var = true;
5692 continue;
5693 }
5694
5695 if (i == HITCOUNT_IDX)
5696 seq_puts(m, "hitcount");
5697 else {
5698 seq_puts(m, ",");
5699 hist_field_print(m, field);
5700 }
5701 }
5702
5703 if (have_var) {
5704 unsigned int n = 0;
5705
5706 seq_puts(m, ":");
5707
5708 for_each_hist_val_field(i, hist_data) {
5709 field = hist_data->fields[i];
5710
5711 if (field->flags & HIST_FIELD_FL_VAR) {
5712 if (n++)
5713 seq_puts(m, ",");
5714 hist_field_print(m, field);
5715 }
5716 }
5717 }
5718
5719 seq_puts(m, ":sort=");
5720
5721 for (i = 0; i < hist_data->n_sort_keys; i++) {
5722 struct tracing_map_sort_key *sort_key;
5723 unsigned int idx, first_key_idx;
5724
5725
5726 first_key_idx = hist_data->n_vals - hist_data->n_vars;
5727
5728 sort_key = &hist_data->sort_keys[i];
5729 idx = sort_key->field_idx;
5730
5731 if (WARN_ON(idx >= HIST_FIELDS_MAX))
5732 return -EINVAL;
5733
5734 if (i > 0)
5735 seq_puts(m, ",");
5736
5737 if (idx == HITCOUNT_IDX)
5738 seq_puts(m, "hitcount");
5739 else {
5740 if (idx >= first_key_idx)
5741 idx += hist_data->n_vars;
5742 hist_field_print(m, hist_data->fields[idx]);
5743 }
5744
5745 if (sort_key->descending)
5746 seq_puts(m, ".descending");
5747 }
5748 seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits));
5749 if (hist_data->enable_timestamps)
5750 seq_printf(m, ":clock=%s", hist_data->attrs->clock);
5751
5752 print_actions_spec(m, hist_data);
5753
5754 if (data->filter_str)
5755 seq_printf(m, " if %s", data->filter_str);
5756
5757 if (data->paused)
5758 seq_puts(m, " [paused]");
5759 else
5760 seq_puts(m, " [active]");
5761
5762 seq_putc(m, '\n');
5763
5764 return 0;
5765 }
5766
5767 static int event_hist_trigger_init(struct event_trigger_ops *ops,
5768 struct event_trigger_data *data)
5769 {
5770 struct hist_trigger_data *hist_data = data->private_data;
5771
5772 if (!data->ref && hist_data->attrs->name)
5773 save_named_trigger(hist_data->attrs->name, data);
5774
5775 data->ref++;
5776
5777 return 0;
5778 }
5779
5780 static void unregister_field_var_hists(struct hist_trigger_data *hist_data)
5781 {
5782 struct trace_event_file *file;
5783 unsigned int i;
5784 char *cmd;
5785 int ret;
5786
5787 for (i = 0; i < hist_data->n_field_var_hists; i++) {
5788 file = hist_data->field_var_hists[i]->hist_data->event_file;
5789 cmd = hist_data->field_var_hists[i]->cmd;
5790 ret = event_hist_trigger_func(&trigger_hist_cmd, file,
5791 "!hist", "hist", cmd);
5792 }
5793 }
5794
5795 static void event_hist_trigger_free(struct event_trigger_ops *ops,
5796 struct event_trigger_data *data)
5797 {
5798 struct hist_trigger_data *hist_data = data->private_data;
5799
5800 if (WARN_ON_ONCE(data->ref <= 0))
5801 return;
5802
5803 data->ref--;
5804 if (!data->ref) {
5805 if (data->name)
5806 del_named_trigger(data);
5807
5808 trigger_data_free(data);
5809
5810 remove_hist_vars(hist_data);
5811
5812 unregister_field_var_hists(hist_data);
5813
5814 destroy_hist_data(hist_data);
5815 }
5816 }
5817
5818 static struct event_trigger_ops event_hist_trigger_ops = {
5819 .func = event_hist_trigger,
5820 .print = event_hist_trigger_print,
5821 .init = event_hist_trigger_init,
5822 .free = event_hist_trigger_free,
5823 };
5824
5825 static int event_hist_trigger_named_init(struct event_trigger_ops *ops,
5826 struct event_trigger_data *data)
5827 {
5828 data->ref++;
5829
5830 save_named_trigger(data->named_data->name, data);
5831
5832 event_hist_trigger_init(ops, data->named_data);
5833
5834 return 0;
5835 }
5836
5837 static void event_hist_trigger_named_free(struct event_trigger_ops *ops,
5838 struct event_trigger_data *data)
5839 {
5840 if (WARN_ON_ONCE(data->ref <= 0))
5841 return;
5842
5843 event_hist_trigger_free(ops, data->named_data);
5844
5845 data->ref--;
5846 if (!data->ref) {
5847 del_named_trigger(data);
5848 trigger_data_free(data);
5849 }
5850 }
5851
5852 static struct event_trigger_ops event_hist_trigger_named_ops = {
5853 .func = event_hist_trigger,
5854 .print = event_hist_trigger_print,
5855 .init = event_hist_trigger_named_init,
5856 .free = event_hist_trigger_named_free,
5857 };
5858
5859 static struct event_trigger_ops *event_hist_get_trigger_ops(char *cmd,
5860 char *param)
5861 {
5862 return &event_hist_trigger_ops;
5863 }
5864
5865 static void hist_clear(struct event_trigger_data *data)
5866 {
5867 struct hist_trigger_data *hist_data = data->private_data;
5868
5869 if (data->name)
5870 pause_named_trigger(data);
5871
5872 tracepoint_synchronize_unregister();
5873
5874 tracing_map_clear(hist_data->map);
5875
5876 if (data->name)
5877 unpause_named_trigger(data);
5878 }
5879
5880 static bool compatible_field(struct ftrace_event_field *field,
5881 struct ftrace_event_field *test_field)
5882 {
5883 if (field == test_field)
5884 return true;
5885 if (field == NULL || test_field == NULL)
5886 return false;
5887 if (strcmp(field->name, test_field->name) != 0)
5888 return false;
5889 if (strcmp(field->type, test_field->type) != 0)
5890 return false;
5891 if (field->size != test_field->size)
5892 return false;
5893 if (field->is_signed != test_field->is_signed)
5894 return false;
5895
5896 return true;
5897 }
5898
5899 static bool hist_trigger_match(struct event_trigger_data *data,
5900 struct event_trigger_data *data_test,
5901 struct event_trigger_data *named_data,
5902 bool ignore_filter)
5903 {
5904 struct tracing_map_sort_key *sort_key, *sort_key_test;
5905 struct hist_trigger_data *hist_data, *hist_data_test;
5906 struct hist_field *key_field, *key_field_test;
5907 unsigned int i;
5908
5909 if (named_data && (named_data != data_test) &&
5910 (named_data != data_test->named_data))
5911 return false;
5912
5913 if (!named_data && is_named_trigger(data_test))
5914 return false;
5915
5916 hist_data = data->private_data;
5917 hist_data_test = data_test->private_data;
5918
5919 if (hist_data->n_vals != hist_data_test->n_vals ||
5920 hist_data->n_fields != hist_data_test->n_fields ||
5921 hist_data->n_sort_keys != hist_data_test->n_sort_keys)
5922 return false;
5923
5924 if (!ignore_filter) {
5925 if ((data->filter_str && !data_test->filter_str) ||
5926 (!data->filter_str && data_test->filter_str))
5927 return false;
5928 }
5929
5930 for_each_hist_field(i, hist_data) {
5931 key_field = hist_data->fields[i];
5932 key_field_test = hist_data_test->fields[i];
5933
5934 if (key_field->flags != key_field_test->flags)
5935 return false;
5936 if (!compatible_field(key_field->field, key_field_test->field))
5937 return false;
5938 if (key_field->offset != key_field_test->offset)
5939 return false;
5940 if (key_field->size != key_field_test->size)
5941 return false;
5942 if (key_field->is_signed != key_field_test->is_signed)
5943 return false;
5944 if (!!key_field->var.name != !!key_field_test->var.name)
5945 return false;
5946 if (key_field->var.name &&
5947 strcmp(key_field->var.name, key_field_test->var.name) != 0)
5948 return false;
5949 }
5950
5951 for (i = 0; i < hist_data->n_sort_keys; i++) {
5952 sort_key = &hist_data->sort_keys[i];
5953 sort_key_test = &hist_data_test->sort_keys[i];
5954
5955 if (sort_key->field_idx != sort_key_test->field_idx ||
5956 sort_key->descending != sort_key_test->descending)
5957 return false;
5958 }
5959
5960 if (!ignore_filter && data->filter_str &&
5961 (strcmp(data->filter_str, data_test->filter_str) != 0))
5962 return false;
5963
5964 if (!actions_match(hist_data, hist_data_test))
5965 return false;
5966
5967 return true;
5968 }
5969
5970 static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
5971 struct event_trigger_data *data,
5972 struct trace_event_file *file)
5973 {
5974 struct hist_trigger_data *hist_data = data->private_data;
5975 struct event_trigger_data *test, *named_data = NULL;
5976 struct trace_array *tr = file->tr;
5977 int ret = 0;
5978
5979 if (hist_data->attrs->name) {
5980 named_data = find_named_trigger(hist_data->attrs->name);
5981 if (named_data) {
5982 if (!hist_trigger_match(data, named_data, named_data,
5983 true)) {
5984 hist_err(tr, HIST_ERR_NAMED_MISMATCH, errpos(hist_data->attrs->name));
5985 ret = -EINVAL;
5986 goto out;
5987 }
5988 }
5989 }
5990
5991 if (hist_data->attrs->name && !named_data)
5992 goto new;
5993
5994 lockdep_assert_held(&event_mutex);
5995
5996 list_for_each_entry(test, &file->triggers, list) {
5997 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
5998 if (!hist_trigger_match(data, test, named_data, false))
5999 continue;
6000 if (hist_data->attrs->pause)
6001 test->paused = true;
6002 else if (hist_data->attrs->cont)
6003 test->paused = false;
6004 else if (hist_data->attrs->clear)
6005 hist_clear(test);
6006 else {
6007 hist_err(tr, HIST_ERR_TRIGGER_EEXIST, 0);
6008 ret = -EEXIST;
6009 }
6010 goto out;
6011 }
6012 }
6013 new:
6014 if (hist_data->attrs->cont || hist_data->attrs->clear) {
6015 hist_err(tr, HIST_ERR_TRIGGER_ENOENT_CLEAR, 0);
6016 ret = -ENOENT;
6017 goto out;
6018 }
6019
6020 if (hist_data->attrs->pause)
6021 data->paused = true;
6022
6023 if (named_data) {
6024 data->private_data = named_data->private_data;
6025 set_named_trigger_data(data, named_data);
6026 data->ops = &event_hist_trigger_named_ops;
6027 }
6028
6029 if (data->ops->init) {
6030 ret = data->ops->init(data->ops, data);
6031 if (ret < 0)
6032 goto out;
6033 }
6034
6035 if (hist_data->enable_timestamps) {
6036 char *clock = hist_data->attrs->clock;
6037
6038 ret = tracing_set_clock(file->tr, hist_data->attrs->clock);
6039 if (ret) {
6040 hist_err(tr, HIST_ERR_SET_CLOCK_FAIL, errpos(clock));
6041 goto out;
6042 }
6043
6044 tracing_set_time_stamp_abs(file->tr, true);
6045 }
6046
6047 if (named_data)
6048 destroy_hist_data(hist_data);
6049
6050 ret++;
6051 out:
6052 return ret;
6053 }
6054
6055 static int hist_trigger_enable(struct event_trigger_data *data,
6056 struct trace_event_file *file)
6057 {
6058 int ret = 0;
6059
6060 list_add_tail_rcu(&data->list, &file->triggers);
6061
6062 update_cond_flag(file);
6063
6064 if (trace_event_trigger_enable_disable(file, 1) < 0) {
6065 list_del_rcu(&data->list);
6066 update_cond_flag(file);
6067 ret--;
6068 }
6069
6070 return ret;
6071 }
6072
6073 static bool have_hist_trigger_match(struct event_trigger_data *data,
6074 struct trace_event_file *file)
6075 {
6076 struct hist_trigger_data *hist_data = data->private_data;
6077 struct event_trigger_data *test, *named_data = NULL;
6078 bool match = false;
6079
6080 lockdep_assert_held(&event_mutex);
6081
6082 if (hist_data->attrs->name)
6083 named_data = find_named_trigger(hist_data->attrs->name);
6084
6085 list_for_each_entry(test, &file->triggers, list) {
6086 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6087 if (hist_trigger_match(data, test, named_data, false)) {
6088 match = true;
6089 break;
6090 }
6091 }
6092 }
6093
6094 return match;
6095 }
6096
6097 static bool hist_trigger_check_refs(struct event_trigger_data *data,
6098 struct trace_event_file *file)
6099 {
6100 struct hist_trigger_data *hist_data = data->private_data;
6101 struct event_trigger_data *test, *named_data = NULL;
6102
6103 lockdep_assert_held(&event_mutex);
6104
6105 if (hist_data->attrs->name)
6106 named_data = find_named_trigger(hist_data->attrs->name);
6107
6108 list_for_each_entry(test, &file->triggers, list) {
6109 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6110 if (!hist_trigger_match(data, test, named_data, false))
6111 continue;
6112 hist_data = test->private_data;
6113 if (check_var_refs(hist_data))
6114 return true;
6115 break;
6116 }
6117 }
6118
6119 return false;
6120 }
6121
6122 static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops,
6123 struct event_trigger_data *data,
6124 struct trace_event_file *file)
6125 {
6126 struct hist_trigger_data *hist_data = data->private_data;
6127 struct event_trigger_data *test, *named_data = NULL;
6128 bool unregistered = false;
6129
6130 lockdep_assert_held(&event_mutex);
6131
6132 if (hist_data->attrs->name)
6133 named_data = find_named_trigger(hist_data->attrs->name);
6134
6135 list_for_each_entry(test, &file->triggers, list) {
6136 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6137 if (!hist_trigger_match(data, test, named_data, false))
6138 continue;
6139 unregistered = true;
6140 list_del_rcu(&test->list);
6141 trace_event_trigger_enable_disable(file, 0);
6142 update_cond_flag(file);
6143 break;
6144 }
6145 }
6146
6147 if (unregistered && test->ops->free)
6148 test->ops->free(test->ops, test);
6149
6150 if (hist_data->enable_timestamps) {
6151 if (!hist_data->remove || unregistered)
6152 tracing_set_time_stamp_abs(file->tr, false);
6153 }
6154 }
6155
6156 static bool hist_file_check_refs(struct trace_event_file *file)
6157 {
6158 struct hist_trigger_data *hist_data;
6159 struct event_trigger_data *test;
6160
6161 lockdep_assert_held(&event_mutex);
6162
6163 list_for_each_entry(test, &file->triggers, list) {
6164 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6165 hist_data = test->private_data;
6166 if (check_var_refs(hist_data))
6167 return true;
6168 }
6169 }
6170
6171 return false;
6172 }
6173
6174 static void hist_unreg_all(struct trace_event_file *file)
6175 {
6176 struct event_trigger_data *test, *n;
6177 struct hist_trigger_data *hist_data;
6178 struct synth_event *se;
6179 const char *se_name;
6180
6181 lockdep_assert_held(&event_mutex);
6182
6183 if (hist_file_check_refs(file))
6184 return;
6185
6186 list_for_each_entry_safe(test, n, &file->triggers, list) {
6187 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6188 hist_data = test->private_data;
6189 list_del_rcu(&test->list);
6190 trace_event_trigger_enable_disable(file, 0);
6191
6192 se_name = trace_event_name(file->event_call);
6193 se = find_synth_event(se_name);
6194 if (se)
6195 se->ref--;
6196
6197 update_cond_flag(file);
6198 if (hist_data->enable_timestamps)
6199 tracing_set_time_stamp_abs(file->tr, false);
6200 if (test->ops->free)
6201 test->ops->free(test->ops, test);
6202 }
6203 }
6204 }
6205
6206 static int event_hist_trigger_func(struct event_command *cmd_ops,
6207 struct trace_event_file *file,
6208 char *glob, char *cmd, char *param)
6209 {
6210 unsigned int hist_trigger_bits = TRACING_MAP_BITS_DEFAULT;
6211 struct event_trigger_data *trigger_data;
6212 struct hist_trigger_attrs *attrs;
6213 struct event_trigger_ops *trigger_ops;
6214 struct hist_trigger_data *hist_data;
6215 struct synth_event *se;
6216 const char *se_name;
6217 bool remove = false;
6218 char *trigger, *p;
6219 int ret = 0;
6220
6221 lockdep_assert_held(&event_mutex);
6222
6223 if (glob && strlen(glob)) {
6224 hist_err_clear();
6225 last_cmd_set(file, param);
6226 }
6227
6228 if (!param)
6229 return -EINVAL;
6230
6231 if (glob[0] == '!')
6232 remove = true;
6233
6234
6235
6236
6237
6238 p = trigger = param;
6239 do {
6240 p = strstr(p, "if");
6241 if (!p)
6242 break;
6243 if (p == param)
6244 return -EINVAL;
6245 if (*(p - 1) != ' ' && *(p - 1) != '\t') {
6246 p++;
6247 continue;
6248 }
6249 if (p >= param + strlen(param) - (sizeof("if") - 1) - 1)
6250 return -EINVAL;
6251 if (*(p + sizeof("if") - 1) != ' ' && *(p + sizeof("if") - 1) != '\t') {
6252 p++;
6253 continue;
6254 }
6255 break;
6256 } while (p);
6257
6258 if (!p)
6259 param = NULL;
6260 else {
6261 *(p - 1) = '\0';
6262 param = strstrip(p);
6263 trigger = strstrip(trigger);
6264 }
6265
6266 attrs = parse_hist_trigger_attrs(file->tr, trigger);
6267 if (IS_ERR(attrs))
6268 return PTR_ERR(attrs);
6269
6270 if (attrs->map_bits)
6271 hist_trigger_bits = attrs->map_bits;
6272
6273 hist_data = create_hist_data(hist_trigger_bits, attrs, file, remove);
6274 if (IS_ERR(hist_data)) {
6275 destroy_hist_trigger_attrs(attrs);
6276 return PTR_ERR(hist_data);
6277 }
6278
6279 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
6280
6281 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
6282 if (!trigger_data) {
6283 ret = -ENOMEM;
6284 goto out_free;
6285 }
6286
6287 trigger_data->count = -1;
6288 trigger_data->ops = trigger_ops;
6289 trigger_data->cmd_ops = cmd_ops;
6290
6291 INIT_LIST_HEAD(&trigger_data->list);
6292 RCU_INIT_POINTER(trigger_data->filter, NULL);
6293
6294 trigger_data->private_data = hist_data;
6295
6296
6297 if (param && cmd_ops->set_filter) {
6298 ret = cmd_ops->set_filter(param, trigger_data, file);
6299 if (ret < 0)
6300 goto out_free;
6301 }
6302
6303 if (remove) {
6304 if (!have_hist_trigger_match(trigger_data, file))
6305 goto out_free;
6306
6307 if (hist_trigger_check_refs(trigger_data, file)) {
6308 ret = -EBUSY;
6309 goto out_free;
6310 }
6311
6312 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
6313 se_name = trace_event_name(file->event_call);
6314 se = find_synth_event(se_name);
6315 if (se)
6316 se->ref--;
6317 ret = 0;
6318 goto out_free;
6319 }
6320
6321 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
6322
6323
6324
6325
6326
6327 if (!ret) {
6328 if (!(attrs->pause || attrs->cont || attrs->clear))
6329 ret = -ENOENT;
6330 goto out_free;
6331 } else if (ret < 0)
6332 goto out_free;
6333
6334 if (get_named_trigger_data(trigger_data))
6335 goto enable;
6336
6337 if (has_hist_vars(hist_data))
6338 save_hist_vars(hist_data);
6339
6340 ret = create_actions(hist_data);
6341 if (ret)
6342 goto out_unreg;
6343
6344 ret = tracing_map_init(hist_data->map);
6345 if (ret)
6346 goto out_unreg;
6347 enable:
6348 ret = hist_trigger_enable(trigger_data, file);
6349 if (ret)
6350 goto out_unreg;
6351
6352 se_name = trace_event_name(file->event_call);
6353 se = find_synth_event(se_name);
6354 if (se)
6355 se->ref++;
6356
6357 ret = 0;
6358 out:
6359 if (ret == 0)
6360 hist_err_clear();
6361
6362 return ret;
6363 out_unreg:
6364 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
6365 out_free:
6366 if (cmd_ops->set_filter)
6367 cmd_ops->set_filter(NULL, trigger_data, NULL);
6368
6369 remove_hist_vars(hist_data);
6370
6371 kfree(trigger_data);
6372
6373 destroy_hist_data(hist_data);
6374 goto out;
6375 }
6376
6377 static struct event_command trigger_hist_cmd = {
6378 .name = "hist",
6379 .trigger_type = ETT_EVENT_HIST,
6380 .flags = EVENT_CMD_FL_NEEDS_REC,
6381 .func = event_hist_trigger_func,
6382 .reg = hist_register_trigger,
6383 .unreg = hist_unregister_trigger,
6384 .unreg_all = hist_unreg_all,
6385 .get_trigger_ops = event_hist_get_trigger_ops,
6386 .set_filter = set_trigger_filter,
6387 };
6388
6389 __init int register_trigger_hist_cmd(void)
6390 {
6391 int ret;
6392
6393 ret = register_event_command(&trigger_hist_cmd);
6394 WARN_ON(ret < 0);
6395
6396 return ret;
6397 }
6398
6399 static void
6400 hist_enable_trigger(struct event_trigger_data *data, void *rec,
6401 struct ring_buffer_event *event)
6402 {
6403 struct enable_trigger_data *enable_data = data->private_data;
6404 struct event_trigger_data *test;
6405
6406 list_for_each_entry_rcu(test, &enable_data->file->triggers, list,
6407 lockdep_is_held(&event_mutex)) {
6408 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6409 if (enable_data->enable)
6410 test->paused = false;
6411 else
6412 test->paused = true;
6413 }
6414 }
6415 }
6416
6417 static void
6418 hist_enable_count_trigger(struct event_trigger_data *data, void *rec,
6419 struct ring_buffer_event *event)
6420 {
6421 if (!data->count)
6422 return;
6423
6424 if (data->count != -1)
6425 (data->count)--;
6426
6427 hist_enable_trigger(data, rec, event);
6428 }
6429
6430 static struct event_trigger_ops hist_enable_trigger_ops = {
6431 .func = hist_enable_trigger,
6432 .print = event_enable_trigger_print,
6433 .init = event_trigger_init,
6434 .free = event_enable_trigger_free,
6435 };
6436
6437 static struct event_trigger_ops hist_enable_count_trigger_ops = {
6438 .func = hist_enable_count_trigger,
6439 .print = event_enable_trigger_print,
6440 .init = event_trigger_init,
6441 .free = event_enable_trigger_free,
6442 };
6443
6444 static struct event_trigger_ops hist_disable_trigger_ops = {
6445 .func = hist_enable_trigger,
6446 .print = event_enable_trigger_print,
6447 .init = event_trigger_init,
6448 .free = event_enable_trigger_free,
6449 };
6450
6451 static struct event_trigger_ops hist_disable_count_trigger_ops = {
6452 .func = hist_enable_count_trigger,
6453 .print = event_enable_trigger_print,
6454 .init = event_trigger_init,
6455 .free = event_enable_trigger_free,
6456 };
6457
6458 static struct event_trigger_ops *
6459 hist_enable_get_trigger_ops(char *cmd, char *param)
6460 {
6461 struct event_trigger_ops *ops;
6462 bool enable;
6463
6464 enable = (strcmp(cmd, ENABLE_HIST_STR) == 0);
6465
6466 if (enable)
6467 ops = param ? &hist_enable_count_trigger_ops :
6468 &hist_enable_trigger_ops;
6469 else
6470 ops = param ? &hist_disable_count_trigger_ops :
6471 &hist_disable_trigger_ops;
6472
6473 return ops;
6474 }
6475
6476 static void hist_enable_unreg_all(struct trace_event_file *file)
6477 {
6478 struct event_trigger_data *test, *n;
6479
6480 list_for_each_entry_safe(test, n, &file->triggers, list) {
6481 if (test->cmd_ops->trigger_type == ETT_HIST_ENABLE) {
6482 list_del_rcu(&test->list);
6483 update_cond_flag(file);
6484 trace_event_trigger_enable_disable(file, 0);
6485 if (test->ops->free)
6486 test->ops->free(test->ops, test);
6487 }
6488 }
6489 }
6490
6491 static struct event_command trigger_hist_enable_cmd = {
6492 .name = ENABLE_HIST_STR,
6493 .trigger_type = ETT_HIST_ENABLE,
6494 .func = event_enable_trigger_func,
6495 .reg = event_enable_register_trigger,
6496 .unreg = event_enable_unregister_trigger,
6497 .unreg_all = hist_enable_unreg_all,
6498 .get_trigger_ops = hist_enable_get_trigger_ops,
6499 .set_filter = set_trigger_filter,
6500 };
6501
6502 static struct event_command trigger_hist_disable_cmd = {
6503 .name = DISABLE_HIST_STR,
6504 .trigger_type = ETT_HIST_ENABLE,
6505 .func = event_enable_trigger_func,
6506 .reg = event_enable_register_trigger,
6507 .unreg = event_enable_unregister_trigger,
6508 .unreg_all = hist_enable_unreg_all,
6509 .get_trigger_ops = hist_enable_get_trigger_ops,
6510 .set_filter = set_trigger_filter,
6511 };
6512
6513 static __init void unregister_trigger_hist_enable_disable_cmds(void)
6514 {
6515 unregister_event_command(&trigger_hist_enable_cmd);
6516 unregister_event_command(&trigger_hist_disable_cmd);
6517 }
6518
6519 __init int register_trigger_hist_enable_disable_cmds(void)
6520 {
6521 int ret;
6522
6523 ret = register_event_command(&trigger_hist_enable_cmd);
6524 if (WARN_ON(ret < 0))
6525 return ret;
6526 ret = register_event_command(&trigger_hist_disable_cmd);
6527 if (WARN_ON(ret < 0))
6528 unregister_trigger_hist_enable_disable_cmds();
6529
6530 return ret;
6531 }
6532
6533 static __init int trace_events_hist_init(void)
6534 {
6535 struct dentry *entry = NULL;
6536 struct dentry *d_tracer;
6537 int err = 0;
6538
6539 err = dyn_event_register(&synth_event_ops);
6540 if (err) {
6541 pr_warn("Could not register synth_event_ops\n");
6542 return err;
6543 }
6544
6545 d_tracer = tracing_init_dentry();
6546 if (IS_ERR(d_tracer)) {
6547 err = PTR_ERR(d_tracer);
6548 goto err;
6549 }
6550
6551 entry = tracefs_create_file("synthetic_events", 0644, d_tracer,
6552 NULL, &synth_events_fops);
6553 if (!entry) {
6554 err = -ENODEV;
6555 goto err;
6556 }
6557
6558 return err;
6559 err:
6560 pr_warn("Could not create tracefs 'synthetic_events' entry\n");
6561
6562 return err;
6563 }
6564
6565 fs_initcall(trace_events_hist_init);