This source file includes following definitions.
- trace_hwlat_sample
- trace_hwlat_callback
- get_sample
- move_to_next_cpu
- kthread_fn
- start_kthread
- stop_kthread
- hwlat_read
- hwlat_width_write
- hwlat_window_write
- init_tracefs
- hwlat_tracer_start
- hwlat_tracer_stop
- hwlat_tracer_init
- hwlat_tracer_reset
- init_hwlat_tracer
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40 #include <linux/kthread.h>
41 #include <linux/tracefs.h>
42 #include <linux/uaccess.h>
43 #include <linux/cpumask.h>
44 #include <linux/delay.h>
45 #include <linux/sched/clock.h>
46 #include "trace.h"
47
48 static struct trace_array *hwlat_trace;
49
50 #define U64STR_SIZE 22
51
52 #define BANNER "hwlat_detector: "
53 #define DEFAULT_SAMPLE_WINDOW 1000000
54 #define DEFAULT_SAMPLE_WIDTH 500000
55 #define DEFAULT_LAT_THRESHOLD 10
56
57
58 static struct task_struct *hwlat_kthread;
59
60 static struct dentry *hwlat_sample_width;
61 static struct dentry *hwlat_sample_window;
62
63
64 static unsigned long save_tracing_thresh;
65
66
67 static u64 nmi_ts_start;
68 static u64 nmi_total_ts;
69 static int nmi_count;
70 static int nmi_cpu;
71
72
73 bool trace_hwlat_callback_enabled;
74
75
76 static u64 last_tracing_thresh = DEFAULT_LAT_THRESHOLD * NSEC_PER_USEC;
77
78
79 struct hwlat_sample {
80 u64 seqnum;
81 u64 duration;
82 u64 outer_duration;
83 u64 nmi_total_ts;
84 struct timespec64 timestamp;
85 int nmi_count;
86 };
87
88
89 static struct hwlat_data {
90
91 struct mutex lock;
92
93 u64 count;
94
95 u64 sample_window;
96 u64 sample_width;
97
98 } hwlat_data = {
99 .sample_window = DEFAULT_SAMPLE_WINDOW,
100 .sample_width = DEFAULT_SAMPLE_WIDTH,
101 };
102
103 static void trace_hwlat_sample(struct hwlat_sample *sample)
104 {
105 struct trace_array *tr = hwlat_trace;
106 struct trace_event_call *call = &event_hwlat;
107 struct ring_buffer *buffer = tr->trace_buffer.buffer;
108 struct ring_buffer_event *event;
109 struct hwlat_entry *entry;
110 unsigned long flags;
111 int pc;
112
113 pc = preempt_count();
114 local_save_flags(flags);
115
116 event = trace_buffer_lock_reserve(buffer, TRACE_HWLAT, sizeof(*entry),
117 flags, pc);
118 if (!event)
119 return;
120 entry = ring_buffer_event_data(event);
121 entry->seqnum = sample->seqnum;
122 entry->duration = sample->duration;
123 entry->outer_duration = sample->outer_duration;
124 entry->timestamp = sample->timestamp;
125 entry->nmi_total_ts = sample->nmi_total_ts;
126 entry->nmi_count = sample->nmi_count;
127
128 if (!call_filter_check_discard(call, entry, buffer, event))
129 trace_buffer_unlock_commit_nostack(buffer, event);
130 }
131
132
133 #define time_type u64
134 #define time_get() trace_clock_local()
135 #define time_to_us(x) div_u64(x, 1000)
136 #define time_sub(a, b) ((a) - (b))
137 #define init_time(a, b) (a = b)
138 #define time_u64(a) a
139
140 void trace_hwlat_callback(bool enter)
141 {
142 if (smp_processor_id() != nmi_cpu)
143 return;
144
145
146
147
148
149 if (!IS_ENABLED(CONFIG_GENERIC_SCHED_CLOCK)) {
150 if (enter)
151 nmi_ts_start = time_get();
152 else
153 nmi_total_ts += time_get() - nmi_ts_start;
154 }
155
156 if (enter)
157 nmi_count++;
158 }
159
160
161
162
163
164
165
166
167 static int get_sample(void)
168 {
169 struct trace_array *tr = hwlat_trace;
170 time_type start, t1, t2, last_t2;
171 s64 diff, total, last_total = 0;
172 u64 sample = 0;
173 u64 thresh = tracing_thresh;
174 u64 outer_sample = 0;
175 int ret = -1;
176
177 do_div(thresh, NSEC_PER_USEC);
178
179 nmi_cpu = smp_processor_id();
180 nmi_total_ts = 0;
181 nmi_count = 0;
182
183 barrier();
184
185 trace_hwlat_callback_enabled = true;
186
187 init_time(last_t2, 0);
188 start = time_get();
189
190 do {
191
192 t1 = time_get();
193 t2 = time_get();
194
195 if (time_u64(last_t2)) {
196
197 diff = time_to_us(time_sub(t1, last_t2));
198
199 if (diff < 0) {
200 pr_err(BANNER "time running backwards\n");
201 goto out;
202 }
203 if (diff > outer_sample)
204 outer_sample = diff;
205 }
206 last_t2 = t2;
207
208 total = time_to_us(time_sub(t2, start));
209
210
211 if (total < last_total) {
212 pr_err("Time total overflowed\n");
213 break;
214 }
215 last_total = total;
216
217
218 diff = time_to_us(time_sub(t2, t1));
219
220
221 if (diff < 0) {
222 pr_err(BANNER "time running backwards\n");
223 goto out;
224 }
225
226 if (diff > sample)
227 sample = diff;
228
229 } while (total <= hwlat_data.sample_width);
230
231 barrier();
232 trace_hwlat_callback_enabled = false;
233 barrier();
234
235 ret = 0;
236
237
238 if (sample > thresh || outer_sample > thresh) {
239 struct hwlat_sample s;
240
241 ret = 1;
242
243
244 if (nmi_total_ts)
245 do_div(nmi_total_ts, NSEC_PER_USEC);
246
247 hwlat_data.count++;
248 s.seqnum = hwlat_data.count;
249 s.duration = sample;
250 s.outer_duration = outer_sample;
251 ktime_get_real_ts64(&s.timestamp);
252 s.nmi_total_ts = nmi_total_ts;
253 s.nmi_count = nmi_count;
254 trace_hwlat_sample(&s);
255
256
257 if (sample > tr->max_latency)
258 tr->max_latency = sample;
259 if (outer_sample > tr->max_latency)
260 tr->max_latency = outer_sample;
261 }
262
263 out:
264 return ret;
265 }
266
267 static struct cpumask save_cpumask;
268 static bool disable_migrate;
269
270 static void move_to_next_cpu(void)
271 {
272 struct cpumask *current_mask = &save_cpumask;
273 int next_cpu;
274
275 if (disable_migrate)
276 return;
277
278
279
280
281
282 if (!cpumask_equal(current_mask, current->cpus_ptr))
283 goto disable;
284
285 get_online_cpus();
286 cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
287 next_cpu = cpumask_next(smp_processor_id(), current_mask);
288 put_online_cpus();
289
290 if (next_cpu >= nr_cpu_ids)
291 next_cpu = cpumask_first(current_mask);
292
293 if (next_cpu >= nr_cpu_ids)
294 goto disable;
295
296 cpumask_clear(current_mask);
297 cpumask_set_cpu(next_cpu, current_mask);
298
299 sched_setaffinity(0, current_mask);
300 return;
301
302 disable:
303 disable_migrate = true;
304 }
305
306
307
308
309
310
311
312
313
314
315
316 static int kthread_fn(void *data)
317 {
318 u64 interval;
319
320 while (!kthread_should_stop()) {
321
322 move_to_next_cpu();
323
324 local_irq_disable();
325 get_sample();
326 local_irq_enable();
327
328 mutex_lock(&hwlat_data.lock);
329 interval = hwlat_data.sample_window - hwlat_data.sample_width;
330 mutex_unlock(&hwlat_data.lock);
331
332 do_div(interval, USEC_PER_MSEC);
333
334
335 if (interval < 1)
336 interval = 1;
337
338 if (msleep_interruptible(interval))
339 break;
340 }
341
342 return 0;
343 }
344
345
346
347
348
349
350
351 static int start_kthread(struct trace_array *tr)
352 {
353 struct cpumask *current_mask = &save_cpumask;
354 struct task_struct *kthread;
355 int next_cpu;
356
357 if (WARN_ON(hwlat_kthread))
358 return 0;
359
360
361 current_mask = &save_cpumask;
362 get_online_cpus();
363 cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
364 put_online_cpus();
365 next_cpu = cpumask_first(current_mask);
366
367 kthread = kthread_create(kthread_fn, NULL, "hwlatd");
368 if (IS_ERR(kthread)) {
369 pr_err(BANNER "could not start sampling thread\n");
370 return -ENOMEM;
371 }
372
373 cpumask_clear(current_mask);
374 cpumask_set_cpu(next_cpu, current_mask);
375 sched_setaffinity(kthread->pid, current_mask);
376
377 hwlat_kthread = kthread;
378 wake_up_process(kthread);
379
380 return 0;
381 }
382
383
384
385
386
387
388
389 static void stop_kthread(void)
390 {
391 if (!hwlat_kthread)
392 return;
393 kthread_stop(hwlat_kthread);
394 hwlat_kthread = NULL;
395 }
396
397
398
399
400
401
402
403
404
405
406
407 static ssize_t hwlat_read(struct file *filp, char __user *ubuf,
408 size_t cnt, loff_t *ppos)
409 {
410 char buf[U64STR_SIZE];
411 u64 *entry = filp->private_data;
412 u64 val;
413 int len;
414
415 if (!entry)
416 return -EFAULT;
417
418 if (cnt > sizeof(buf))
419 cnt = sizeof(buf);
420
421 val = *entry;
422
423 len = snprintf(buf, sizeof(buf), "%llu\n", val);
424
425 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
426 }
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443 static ssize_t
444 hwlat_width_write(struct file *filp, const char __user *ubuf,
445 size_t cnt, loff_t *ppos)
446 {
447 u64 val;
448 int err;
449
450 err = kstrtoull_from_user(ubuf, cnt, 10, &val);
451 if (err)
452 return err;
453
454 mutex_lock(&hwlat_data.lock);
455 if (val < hwlat_data.sample_window)
456 hwlat_data.sample_width = val;
457 else
458 err = -EINVAL;
459 mutex_unlock(&hwlat_data.lock);
460
461 if (err)
462 return err;
463
464 return cnt;
465 }
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482 static ssize_t
483 hwlat_window_write(struct file *filp, const char __user *ubuf,
484 size_t cnt, loff_t *ppos)
485 {
486 u64 val;
487 int err;
488
489 err = kstrtoull_from_user(ubuf, cnt, 10, &val);
490 if (err)
491 return err;
492
493 mutex_lock(&hwlat_data.lock);
494 if (hwlat_data.sample_width < val)
495 hwlat_data.sample_window = val;
496 else
497 err = -EINVAL;
498 mutex_unlock(&hwlat_data.lock);
499
500 if (err)
501 return err;
502
503 return cnt;
504 }
505
506 static const struct file_operations width_fops = {
507 .open = tracing_open_generic,
508 .read = hwlat_read,
509 .write = hwlat_width_write,
510 };
511
512 static const struct file_operations window_fops = {
513 .open = tracing_open_generic,
514 .read = hwlat_read,
515 .write = hwlat_window_write,
516 };
517
518
519
520
521
522
523
524
525
526 static int init_tracefs(void)
527 {
528 struct dentry *d_tracer;
529 struct dentry *top_dir;
530
531 d_tracer = tracing_init_dentry();
532 if (IS_ERR(d_tracer))
533 return -ENOMEM;
534
535 top_dir = tracefs_create_dir("hwlat_detector", d_tracer);
536 if (!top_dir)
537 return -ENOMEM;
538
539 hwlat_sample_window = tracefs_create_file("window", 0640,
540 top_dir,
541 &hwlat_data.sample_window,
542 &window_fops);
543 if (!hwlat_sample_window)
544 goto err;
545
546 hwlat_sample_width = tracefs_create_file("width", 0644,
547 top_dir,
548 &hwlat_data.sample_width,
549 &width_fops);
550 if (!hwlat_sample_width)
551 goto err;
552
553 return 0;
554
555 err:
556 tracefs_remove_recursive(top_dir);
557 return -ENOMEM;
558 }
559
560 static void hwlat_tracer_start(struct trace_array *tr)
561 {
562 int err;
563
564 err = start_kthread(tr);
565 if (err)
566 pr_err(BANNER "Cannot start hwlat kthread\n");
567 }
568
569 static void hwlat_tracer_stop(struct trace_array *tr)
570 {
571 stop_kthread();
572 }
573
574 static bool hwlat_busy;
575
576 static int hwlat_tracer_init(struct trace_array *tr)
577 {
578
579 if (hwlat_busy)
580 return -EBUSY;
581
582 hwlat_trace = tr;
583
584 disable_migrate = false;
585 hwlat_data.count = 0;
586 tr->max_latency = 0;
587 save_tracing_thresh = tracing_thresh;
588
589
590 if (!tracing_thresh)
591 tracing_thresh = last_tracing_thresh;
592
593 if (tracer_tracing_is_on(tr))
594 hwlat_tracer_start(tr);
595
596 hwlat_busy = true;
597
598 return 0;
599 }
600
601 static void hwlat_tracer_reset(struct trace_array *tr)
602 {
603 stop_kthread();
604
605
606 last_tracing_thresh = tracing_thresh;
607
608 tracing_thresh = save_tracing_thresh;
609 hwlat_busy = false;
610 }
611
612 static struct tracer hwlat_tracer __read_mostly =
613 {
614 .name = "hwlat",
615 .init = hwlat_tracer_init,
616 .reset = hwlat_tracer_reset,
617 .start = hwlat_tracer_start,
618 .stop = hwlat_tracer_stop,
619 .allow_instances = true,
620 };
621
622 __init static int init_hwlat_tracer(void)
623 {
624 int ret;
625
626 mutex_init(&hwlat_data.lock);
627
628 ret = register_tracer(&hwlat_tracer);
629 if (ret)
630 return ret;
631
632 init_tracefs();
633
634 return 0;
635 }
636 late_initcall(init_hwlat_tracer);