1/* 2 * ring buffer tester and benchmark 3 * 4 * Copyright (C) 2009 Steven Rostedt <srostedt@redhat.com> 5 */ 6#include <linux/ring_buffer.h> 7#include <linux/completion.h> 8#include <linux/kthread.h> 9#include <linux/module.h> 10#include <linux/ktime.h> 11#include <asm/local.h> 12 13struct rb_page { 14 u64 ts; 15 local_t commit; 16 char data[4080]; 17}; 18 19/* run time and sleep time in seconds */ 20#define RUN_TIME 10ULL 21#define SLEEP_TIME 10 22 23/* number of events for writer to wake up the reader */ 24static int wakeup_interval = 100; 25 26static int reader_finish; 27static struct completion read_start; 28static struct completion read_done; 29 30static struct ring_buffer *buffer; 31static struct task_struct *producer; 32static struct task_struct *consumer; 33static unsigned long read; 34 35static int disable_reader; 36module_param(disable_reader, uint, 0644); 37MODULE_PARM_DESC(disable_reader, "only run producer"); 38 39static int write_iteration = 50; 40module_param(write_iteration, uint, 0644); 41MODULE_PARM_DESC(write_iteration, "# of writes between timestamp readings"); 42 43static int producer_nice = MAX_NICE; 44static int consumer_nice = MAX_NICE; 45 46static int producer_fifo = -1; 47static int consumer_fifo = -1; 48 49module_param(producer_nice, uint, 0644); 50MODULE_PARM_DESC(producer_nice, "nice prio for producer"); 51 52module_param(consumer_nice, uint, 0644); 53MODULE_PARM_DESC(consumer_nice, "nice prio for consumer"); 54 55module_param(producer_fifo, uint, 0644); 56MODULE_PARM_DESC(producer_fifo, "fifo prio for producer"); 57 58module_param(consumer_fifo, uint, 0644); 59MODULE_PARM_DESC(consumer_fifo, "fifo prio for consumer"); 60 61static int read_events; 62 63static int kill_test; 64 65#define KILL_TEST() \ 66 do { \ 67 if (!kill_test) { \ 68 kill_test = 1; \ 69 WARN_ON(1); \ 70 } \ 71 } while (0) 72 73enum event_status { 74 EVENT_FOUND, 75 EVENT_DROPPED, 76}; 77 78static enum event_status read_event(int cpu) 79{ 80 struct ring_buffer_event *event; 81 int *entry; 82 u64 ts; 83 84 event = ring_buffer_consume(buffer, cpu, &ts, NULL); 85 if (!event) 86 return EVENT_DROPPED; 87 88 entry = ring_buffer_event_data(event); 89 if (*entry != cpu) { 90 KILL_TEST(); 91 return EVENT_DROPPED; 92 } 93 94 read++; 95 return EVENT_FOUND; 96} 97 98static enum event_status read_page(int cpu) 99{ 100 struct ring_buffer_event *event; 101 struct rb_page *rpage; 102 unsigned long commit; 103 void *bpage; 104 int *entry; 105 int ret; 106 int inc; 107 int i; 108 109 bpage = ring_buffer_alloc_read_page(buffer, cpu); 110 if (!bpage) 111 return EVENT_DROPPED; 112 113 ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1); 114 if (ret >= 0) { 115 rpage = bpage; 116 /* The commit may have missed event flags set, clear them */ 117 commit = local_read(&rpage->commit) & 0xfffff; 118 for (i = 0; i < commit && !kill_test; i += inc) { 119 120 if (i >= (PAGE_SIZE - offsetof(struct rb_page, data))) { 121 KILL_TEST(); 122 break; 123 } 124 125 inc = -1; 126 event = (void *)&rpage->data[i]; 127 switch (event->type_len) { 128 case RINGBUF_TYPE_PADDING: 129 /* failed writes may be discarded events */ 130 if (!event->time_delta) 131 KILL_TEST(); 132 inc = event->array[0] + 4; 133 break; 134 case RINGBUF_TYPE_TIME_EXTEND: 135 inc = 8; 136 break; 137 case 0: 138 entry = ring_buffer_event_data(event); 139 if (*entry != cpu) { 140 KILL_TEST(); 141 break; 142 } 143 read++; 144 if (!event->array[0]) { 145 KILL_TEST(); 146 break; 147 } 148 inc = event->array[0] + 4; 149 break; 150 default: 151 entry = ring_buffer_event_data(event); 152 if (*entry != cpu) { 153 KILL_TEST(); 154 break; 155 } 156 read++; 157 inc = ((event->type_len + 1) * 4); 158 } 159 if (kill_test) 160 break; 161 162 if (inc <= 0) { 163 KILL_TEST(); 164 break; 165 } 166 } 167 } 168 ring_buffer_free_read_page(buffer, bpage); 169 170 if (ret < 0) 171 return EVENT_DROPPED; 172 return EVENT_FOUND; 173} 174 175static void ring_buffer_consumer(void) 176{ 177 /* toggle between reading pages and events */ 178 read_events ^= 1; 179 180 read = 0; 181 while (!reader_finish && !kill_test) { 182 int found; 183 184 do { 185 int cpu; 186 187 found = 0; 188 for_each_online_cpu(cpu) { 189 enum event_status stat; 190 191 if (read_events) 192 stat = read_event(cpu); 193 else 194 stat = read_page(cpu); 195 196 if (kill_test) 197 break; 198 if (stat == EVENT_FOUND) 199 found = 1; 200 } 201 } while (found && !kill_test); 202 203 set_current_state(TASK_INTERRUPTIBLE); 204 if (reader_finish) 205 break; 206 207 schedule(); 208 } 209 reader_finish = 0; 210 complete(&read_done); 211} 212 213static void ring_buffer_producer(void) 214{ 215 ktime_t start_time, end_time, timeout; 216 unsigned long long time; 217 unsigned long long entries; 218 unsigned long long overruns; 219 unsigned long missed = 0; 220 unsigned long hit = 0; 221 unsigned long avg; 222 int cnt = 0; 223 224 /* 225 * Hammer the buffer for 10 secs (this may 226 * make the system stall) 227 */ 228 trace_printk("Starting ring buffer hammer\n"); 229 start_time = ktime_get(); 230 timeout = ktime_add_ns(start_time, RUN_TIME * NSEC_PER_SEC); 231 do { 232 struct ring_buffer_event *event; 233 int *entry; 234 int i; 235 236 for (i = 0; i < write_iteration; i++) { 237 event = ring_buffer_lock_reserve(buffer, 10); 238 if (!event) { 239 missed++; 240 } else { 241 hit++; 242 entry = ring_buffer_event_data(event); 243 *entry = smp_processor_id(); 244 ring_buffer_unlock_commit(buffer, event); 245 } 246 } 247 end_time = ktime_get(); 248 249 cnt++; 250 if (consumer && !(cnt % wakeup_interval)) 251 wake_up_process(consumer); 252 253#ifndef CONFIG_PREEMPT 254 /* 255 * If we are a non preempt kernel, the 10 second run will 256 * stop everything while it runs. Instead, we will call 257 * cond_resched and also add any time that was lost by a 258 * rescedule. 259 * 260 * Do a cond resched at the same frequency we would wake up 261 * the reader. 262 */ 263 if (cnt % wakeup_interval) 264 cond_resched(); 265#endif 266 267 } while (ktime_before(end_time, timeout) && !kill_test); 268 trace_printk("End ring buffer hammer\n"); 269 270 if (consumer) { 271 /* Init both completions here to avoid races */ 272 init_completion(&read_start); 273 init_completion(&read_done); 274 /* the completions must be visible before the finish var */ 275 smp_wmb(); 276 reader_finish = 1; 277 /* finish var visible before waking up the consumer */ 278 smp_wmb(); 279 wake_up_process(consumer); 280 wait_for_completion(&read_done); 281 } 282 283 time = ktime_us_delta(end_time, start_time); 284 285 entries = ring_buffer_entries(buffer); 286 overruns = ring_buffer_overruns(buffer); 287 288 if (kill_test) 289 trace_printk("ERROR!\n"); 290 291 if (!disable_reader) { 292 if (consumer_fifo < 0) 293 trace_printk("Running Consumer at nice: %d\n", 294 consumer_nice); 295 else 296 trace_printk("Running Consumer at SCHED_FIFO %d\n", 297 consumer_fifo); 298 } 299 if (producer_fifo < 0) 300 trace_printk("Running Producer at nice: %d\n", 301 producer_nice); 302 else 303 trace_printk("Running Producer at SCHED_FIFO %d\n", 304 producer_fifo); 305 306 /* Let the user know that the test is running at low priority */ 307 if (producer_fifo < 0 && consumer_fifo < 0 && 308 producer_nice == MAX_NICE && consumer_nice == MAX_NICE) 309 trace_printk("WARNING!!! This test is running at lowest priority.\n"); 310 311 trace_printk("Time: %lld (usecs)\n", time); 312 trace_printk("Overruns: %lld\n", overruns); 313 if (disable_reader) 314 trace_printk("Read: (reader disabled)\n"); 315 else 316 trace_printk("Read: %ld (by %s)\n", read, 317 read_events ? "events" : "pages"); 318 trace_printk("Entries: %lld\n", entries); 319 trace_printk("Total: %lld\n", entries + overruns + read); 320 trace_printk("Missed: %ld\n", missed); 321 trace_printk("Hit: %ld\n", hit); 322 323 /* Convert time from usecs to millisecs */ 324 do_div(time, USEC_PER_MSEC); 325 if (time) 326 hit /= (long)time; 327 else 328 trace_printk("TIME IS ZERO??\n"); 329 330 trace_printk("Entries per millisec: %ld\n", hit); 331 332 if (hit) { 333 /* Calculate the average time in nanosecs */ 334 avg = NSEC_PER_MSEC / hit; 335 trace_printk("%ld ns per entry\n", avg); 336 } 337 338 if (missed) { 339 if (time) 340 missed /= (long)time; 341 342 trace_printk("Total iterations per millisec: %ld\n", 343 hit + missed); 344 345 /* it is possible that hit + missed will overflow and be zero */ 346 if (!(hit + missed)) { 347 trace_printk("hit + missed overflowed and totalled zero!\n"); 348 hit--; /* make it non zero */ 349 } 350 351 /* Caculate the average time in nanosecs */ 352 avg = NSEC_PER_MSEC / (hit + missed); 353 trace_printk("%ld ns per entry\n", avg); 354 } 355} 356 357static void wait_to_die(void) 358{ 359 set_current_state(TASK_INTERRUPTIBLE); 360 while (!kthread_should_stop()) { 361 schedule(); 362 set_current_state(TASK_INTERRUPTIBLE); 363 } 364 __set_current_state(TASK_RUNNING); 365} 366 367static int ring_buffer_consumer_thread(void *arg) 368{ 369 while (!kthread_should_stop() && !kill_test) { 370 complete(&read_start); 371 372 ring_buffer_consumer(); 373 374 set_current_state(TASK_INTERRUPTIBLE); 375 if (kthread_should_stop() || kill_test) 376 break; 377 378 schedule(); 379 } 380 __set_current_state(TASK_RUNNING); 381 382 if (kill_test) 383 wait_to_die(); 384 385 return 0; 386} 387 388static int ring_buffer_producer_thread(void *arg) 389{ 390 init_completion(&read_start); 391 392 while (!kthread_should_stop() && !kill_test) { 393 ring_buffer_reset(buffer); 394 395 if (consumer) { 396 smp_wmb(); 397 wake_up_process(consumer); 398 wait_for_completion(&read_start); 399 } 400 401 ring_buffer_producer(); 402 403 trace_printk("Sleeping for 10 secs\n"); 404 set_current_state(TASK_INTERRUPTIBLE); 405 schedule_timeout(HZ * SLEEP_TIME); 406 } 407 408 if (kill_test) 409 wait_to_die(); 410 411 return 0; 412} 413 414static int __init ring_buffer_benchmark_init(void) 415{ 416 int ret; 417 418 /* make a one meg buffer in overwite mode */ 419 buffer = ring_buffer_alloc(1000000, RB_FL_OVERWRITE); 420 if (!buffer) 421 return -ENOMEM; 422 423 if (!disable_reader) { 424 consumer = kthread_create(ring_buffer_consumer_thread, 425 NULL, "rb_consumer"); 426 ret = PTR_ERR(consumer); 427 if (IS_ERR(consumer)) 428 goto out_fail; 429 } 430 431 producer = kthread_run(ring_buffer_producer_thread, 432 NULL, "rb_producer"); 433 ret = PTR_ERR(producer); 434 435 if (IS_ERR(producer)) 436 goto out_kill; 437 438 /* 439 * Run them as low-prio background tasks by default: 440 */ 441 if (!disable_reader) { 442 if (consumer_fifo >= 0) { 443 struct sched_param param = { 444 .sched_priority = consumer_fifo 445 }; 446 sched_setscheduler(consumer, SCHED_FIFO, ¶m); 447 } else 448 set_user_nice(consumer, consumer_nice); 449 } 450 451 if (producer_fifo >= 0) { 452 struct sched_param param = { 453 .sched_priority = producer_fifo 454 }; 455 sched_setscheduler(producer, SCHED_FIFO, ¶m); 456 } else 457 set_user_nice(producer, producer_nice); 458 459 return 0; 460 461 out_kill: 462 if (consumer) 463 kthread_stop(consumer); 464 465 out_fail: 466 ring_buffer_free(buffer); 467 return ret; 468} 469 470static void __exit ring_buffer_benchmark_exit(void) 471{ 472 kthread_stop(producer); 473 if (consumer) 474 kthread_stop(consumer); 475 ring_buffer_free(buffer); 476} 477 478module_init(ring_buffer_benchmark_init); 479module_exit(ring_buffer_benchmark_exit); 480 481MODULE_AUTHOR("Steven Rostedt"); 482MODULE_DESCRIPTION("ring_buffer_benchmark"); 483MODULE_LICENSE("GPL"); 484