root/kernel/trace/blktrace.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. trace_note
  2. trace_note_tsk
  3. trace_note_time
  4. __trace_note_message
  5. act_log_check
  6. __blk_add_trace
  7. blk_trace_free
  8. get_probe_ref
  9. put_probe_ref
  10. blk_trace_cleanup
  11. __blk_trace_remove
  12. blk_trace_remove
  13. blk_dropped_read
  14. blk_msg_write
  15. blk_subbuf_start_callback
  16. blk_remove_buf_file_callback
  17. blk_create_buf_file_callback
  18. blk_trace_setup_lba
  19. do_blk_trace_setup
  20. __blk_trace_setup
  21. blk_trace_setup
  22. compat_blk_trace_setup
  23. __blk_trace_startstop
  24. blk_trace_startstop
  25. blk_trace_ioctl
  26. blk_trace_shutdown
  27. blk_trace_bio_get_cgid
  28. blk_trace_bio_get_cgid
  29. blk_trace_request_get_cgid
  30. blk_add_trace_rq
  31. blk_add_trace_rq_insert
  32. blk_add_trace_rq_issue
  33. blk_add_trace_rq_requeue
  34. blk_add_trace_rq_complete
  35. blk_add_trace_bio
  36. blk_add_trace_bio_bounce
  37. blk_add_trace_bio_complete
  38. blk_add_trace_bio_backmerge
  39. blk_add_trace_bio_frontmerge
  40. blk_add_trace_bio_queue
  41. blk_add_trace_getrq
  42. blk_add_trace_sleeprq
  43. blk_add_trace_plug
  44. blk_add_trace_unplug
  45. blk_add_trace_split
  46. blk_add_trace_bio_remap
  47. blk_add_trace_rq_remap
  48. blk_add_driver_data
  49. blk_register_tracepoints
  50. blk_unregister_tracepoints
  51. fill_rwbs
  52. te_blk_io_trace
  53. pdu_start
  54. cgid_start
  55. pdu_real_len
  56. t_action
  57. t_bytes
  58. t_sec
  59. t_sector
  60. t_error
  61. get_pdu_int
  62. get_pdu_remap
  63. blk_log_action_classic
  64. blk_log_action
  65. blk_log_dump_pdu
  66. blk_log_generic
  67. blk_log_with_error
  68. blk_log_remap
  69. blk_log_plug
  70. blk_log_unplug
  71. blk_log_split
  72. blk_log_msg
  73. blk_tracer_print_header
  74. blk_tracer_start
  75. blk_tracer_init
  76. blk_tracer_stop
  77. blk_tracer_reset
  78. print_one_line
  79. blk_trace_event_print
  80. blk_trace_synthesize_old_trace
  81. blk_trace_event_print_binary
  82. blk_tracer_print_line
  83. blk_tracer_set_flag
  84. init_blk_tracer
  85. blk_trace_remove_queue
  86. blk_trace_setup_queue
  87. blk_trace_str2mask
  88. blk_trace_mask2str
  89. blk_trace_get_queue
  90. sysfs_blk_trace_attr_show
  91. sysfs_blk_trace_attr_store
  92. blk_trace_init_sysfs
  93. blk_trace_remove_sysfs
  94. blk_fill_rwbs

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
   4  *
   5  */
   6 #include <linux/kernel.h>
   7 #include <linux/blkdev.h>
   8 #include <linux/blktrace_api.h>
   9 #include <linux/percpu.h>
  10 #include <linux/init.h>
  11 #include <linux/mutex.h>
  12 #include <linux/slab.h>
  13 #include <linux/debugfs.h>
  14 #include <linux/export.h>
  15 #include <linux/time.h>
  16 #include <linux/uaccess.h>
  17 #include <linux/list.h>
  18 #include <linux/blk-cgroup.h>
  19 
  20 #include "../../block/blk.h"
  21 
  22 #include <trace/events/block.h>
  23 
  24 #include "trace_output.h"
  25 
  26 #ifdef CONFIG_BLK_DEV_IO_TRACE
  27 
  28 static unsigned int blktrace_seq __read_mostly = 1;
  29 
  30 static struct trace_array *blk_tr;
  31 static bool blk_tracer_enabled __read_mostly;
  32 
  33 static LIST_HEAD(running_trace_list);
  34 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(running_trace_lock);
  35 
  36 /* Select an alternative, minimalistic output than the original one */
  37 #define TRACE_BLK_OPT_CLASSIC   0x1
  38 #define TRACE_BLK_OPT_CGROUP    0x2
  39 #define TRACE_BLK_OPT_CGNAME    0x4
  40 
  41 static struct tracer_opt blk_tracer_opts[] = {
  42         /* Default disable the minimalistic output */
  43         { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
  44 #ifdef CONFIG_BLK_CGROUP
  45         { TRACER_OPT(blk_cgroup, TRACE_BLK_OPT_CGROUP) },
  46         { TRACER_OPT(blk_cgname, TRACE_BLK_OPT_CGNAME) },
  47 #endif
  48         { }
  49 };
  50 
  51 static struct tracer_flags blk_tracer_flags = {
  52         .val  = 0,
  53         .opts = blk_tracer_opts,
  54 };
  55 
  56 /* Global reference count of probes */
  57 static DEFINE_MUTEX(blk_probe_mutex);
  58 static int blk_probes_ref;
  59 
  60 static void blk_register_tracepoints(void);
  61 static void blk_unregister_tracepoints(void);
  62 
  63 /*
  64  * Send out a notify message.
  65  */
  66 static void trace_note(struct blk_trace *bt, pid_t pid, int action,
  67                        const void *data, size_t len,
  68                        union kernfs_node_id *cgid)
  69 {
  70         struct blk_io_trace *t;
  71         struct ring_buffer_event *event = NULL;
  72         struct ring_buffer *buffer = NULL;
  73         int pc = 0;
  74         int cpu = smp_processor_id();
  75         bool blk_tracer = blk_tracer_enabled;
  76         ssize_t cgid_len = cgid ? sizeof(*cgid) : 0;
  77 
  78         if (blk_tracer) {
  79                 buffer = blk_tr->trace_buffer.buffer;
  80                 pc = preempt_count();
  81                 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
  82                                                   sizeof(*t) + len + cgid_len,
  83                                                   0, pc);
  84                 if (!event)
  85                         return;
  86                 t = ring_buffer_event_data(event);
  87                 goto record_it;
  88         }
  89 
  90         if (!bt->rchan)
  91                 return;
  92 
  93         t = relay_reserve(bt->rchan, sizeof(*t) + len + cgid_len);
  94         if (t) {
  95                 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
  96                 t->time = ktime_to_ns(ktime_get());
  97 record_it:
  98                 t->device = bt->dev;
  99                 t->action = action | (cgid ? __BLK_TN_CGROUP : 0);
 100                 t->pid = pid;
 101                 t->cpu = cpu;
 102                 t->pdu_len = len + cgid_len;
 103                 if (cgid)
 104                         memcpy((void *)t + sizeof(*t), cgid, cgid_len);
 105                 memcpy((void *) t + sizeof(*t) + cgid_len, data, len);
 106 
 107                 if (blk_tracer)
 108                         trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
 109         }
 110 }
 111 
 112 /*
 113  * Send out a notify for this process, if we haven't done so since a trace
 114  * started
 115  */
 116 static void trace_note_tsk(struct task_struct *tsk)
 117 {
 118         unsigned long flags;
 119         struct blk_trace *bt;
 120 
 121         tsk->btrace_seq = blktrace_seq;
 122         spin_lock_irqsave(&running_trace_lock, flags);
 123         list_for_each_entry(bt, &running_trace_list, running_list) {
 124                 trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
 125                            sizeof(tsk->comm), NULL);
 126         }
 127         spin_unlock_irqrestore(&running_trace_lock, flags);
 128 }
 129 
 130 static void trace_note_time(struct blk_trace *bt)
 131 {
 132         struct timespec64 now;
 133         unsigned long flags;
 134         u32 words[2];
 135 
 136         /* need to check user space to see if this breaks in y2038 or y2106 */
 137         ktime_get_real_ts64(&now);
 138         words[0] = (u32)now.tv_sec;
 139         words[1] = now.tv_nsec;
 140 
 141         local_irq_save(flags);
 142         trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words), NULL);
 143         local_irq_restore(flags);
 144 }
 145 
 146 void __trace_note_message(struct blk_trace *bt, struct blkcg *blkcg,
 147         const char *fmt, ...)
 148 {
 149         int n;
 150         va_list args;
 151         unsigned long flags;
 152         char *buf;
 153 
 154         if (unlikely(bt->trace_state != Blktrace_running &&
 155                      !blk_tracer_enabled))
 156                 return;
 157 
 158         /*
 159          * If the BLK_TC_NOTIFY action mask isn't set, don't send any note
 160          * message to the trace.
 161          */
 162         if (!(bt->act_mask & BLK_TC_NOTIFY))
 163                 return;
 164 
 165         local_irq_save(flags);
 166         buf = this_cpu_ptr(bt->msg_data);
 167         va_start(args, fmt);
 168         n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
 169         va_end(args);
 170 
 171         if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
 172                 blkcg = NULL;
 173 #ifdef CONFIG_BLK_CGROUP
 174         trace_note(bt, 0, BLK_TN_MESSAGE, buf, n,
 175                 blkcg ? cgroup_get_kernfs_id(blkcg->css.cgroup) : NULL);
 176 #else
 177         trace_note(bt, 0, BLK_TN_MESSAGE, buf, n, NULL);
 178 #endif
 179         local_irq_restore(flags);
 180 }
 181 EXPORT_SYMBOL_GPL(__trace_note_message);
 182 
 183 static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
 184                          pid_t pid)
 185 {
 186         if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
 187                 return 1;
 188         if (sector && (sector < bt->start_lba || sector > bt->end_lba))
 189                 return 1;
 190         if (bt->pid && pid != bt->pid)
 191                 return 1;
 192 
 193         return 0;
 194 }
 195 
 196 /*
 197  * Data direction bit lookup
 198  */
 199 static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
 200                                  BLK_TC_ACT(BLK_TC_WRITE) };
 201 
 202 #define BLK_TC_RAHEAD           BLK_TC_AHEAD
 203 #define BLK_TC_PREFLUSH         BLK_TC_FLUSH
 204 
 205 /* The ilog2() calls fall out because they're constant */
 206 #define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \
 207           (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name))
 208 
 209 /*
 210  * The worker for the various blk_add_trace*() types. Fills out a
 211  * blk_io_trace structure and places it in a per-cpu subbuffer.
 212  */
 213 static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
 214                      int op, int op_flags, u32 what, int error, int pdu_len,
 215                      void *pdu_data, union kernfs_node_id *cgid)
 216 {
 217         struct task_struct *tsk = current;
 218         struct ring_buffer_event *event = NULL;
 219         struct ring_buffer *buffer = NULL;
 220         struct blk_io_trace *t;
 221         unsigned long flags = 0;
 222         unsigned long *sequence;
 223         pid_t pid;
 224         int cpu, pc = 0;
 225         bool blk_tracer = blk_tracer_enabled;
 226         ssize_t cgid_len = cgid ? sizeof(*cgid) : 0;
 227 
 228         if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
 229                 return;
 230 
 231         what |= ddir_act[op_is_write(op) ? WRITE : READ];
 232         what |= MASK_TC_BIT(op_flags, SYNC);
 233         what |= MASK_TC_BIT(op_flags, RAHEAD);
 234         what |= MASK_TC_BIT(op_flags, META);
 235         what |= MASK_TC_BIT(op_flags, PREFLUSH);
 236         what |= MASK_TC_BIT(op_flags, FUA);
 237         if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)
 238                 what |= BLK_TC_ACT(BLK_TC_DISCARD);
 239         if (op == REQ_OP_FLUSH)
 240                 what |= BLK_TC_ACT(BLK_TC_FLUSH);
 241         if (cgid)
 242                 what |= __BLK_TA_CGROUP;
 243 
 244         pid = tsk->pid;
 245         if (act_log_check(bt, what, sector, pid))
 246                 return;
 247         cpu = raw_smp_processor_id();
 248 
 249         if (blk_tracer) {
 250                 tracing_record_cmdline(current);
 251 
 252                 buffer = blk_tr->trace_buffer.buffer;
 253                 pc = preempt_count();
 254                 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
 255                                                   sizeof(*t) + pdu_len + cgid_len,
 256                                                   0, pc);
 257                 if (!event)
 258                         return;
 259                 t = ring_buffer_event_data(event);
 260                 goto record_it;
 261         }
 262 
 263         if (unlikely(tsk->btrace_seq != blktrace_seq))
 264                 trace_note_tsk(tsk);
 265 
 266         /*
 267          * A word about the locking here - we disable interrupts to reserve
 268          * some space in the relay per-cpu buffer, to prevent an irq
 269          * from coming in and stepping on our toes.
 270          */
 271         local_irq_save(flags);
 272         t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len + cgid_len);
 273         if (t) {
 274                 sequence = per_cpu_ptr(bt->sequence, cpu);
 275 
 276                 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
 277                 t->sequence = ++(*sequence);
 278                 t->time = ktime_to_ns(ktime_get());
 279 record_it:
 280                 /*
 281                  * These two are not needed in ftrace as they are in the
 282                  * generic trace_entry, filled by tracing_generic_entry_update,
 283                  * but for the trace_event->bin() synthesizer benefit we do it
 284                  * here too.
 285                  */
 286                 t->cpu = cpu;
 287                 t->pid = pid;
 288 
 289                 t->sector = sector;
 290                 t->bytes = bytes;
 291                 t->action = what;
 292                 t->device = bt->dev;
 293                 t->error = error;
 294                 t->pdu_len = pdu_len + cgid_len;
 295 
 296                 if (cgid_len)
 297                         memcpy((void *)t + sizeof(*t), cgid, cgid_len);
 298                 if (pdu_len)
 299                         memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len);
 300 
 301                 if (blk_tracer) {
 302                         trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
 303                         return;
 304                 }
 305         }
 306 
 307         local_irq_restore(flags);
 308 }
 309 
 310 static void blk_trace_free(struct blk_trace *bt)
 311 {
 312         debugfs_remove(bt->msg_file);
 313         debugfs_remove(bt->dropped_file);
 314         relay_close(bt->rchan);
 315         debugfs_remove(bt->dir);
 316         free_percpu(bt->sequence);
 317         free_percpu(bt->msg_data);
 318         kfree(bt);
 319 }
 320 
 321 static void get_probe_ref(void)
 322 {
 323         mutex_lock(&blk_probe_mutex);
 324         if (++blk_probes_ref == 1)
 325                 blk_register_tracepoints();
 326         mutex_unlock(&blk_probe_mutex);
 327 }
 328 
 329 static void put_probe_ref(void)
 330 {
 331         mutex_lock(&blk_probe_mutex);
 332         if (!--blk_probes_ref)
 333                 blk_unregister_tracepoints();
 334         mutex_unlock(&blk_probe_mutex);
 335 }
 336 
 337 static void blk_trace_cleanup(struct blk_trace *bt)
 338 {
 339         synchronize_rcu();
 340         blk_trace_free(bt);
 341         put_probe_ref();
 342 }
 343 
 344 static int __blk_trace_remove(struct request_queue *q)
 345 {
 346         struct blk_trace *bt;
 347 
 348         bt = xchg(&q->blk_trace, NULL);
 349         if (!bt)
 350                 return -EINVAL;
 351 
 352         if (bt->trace_state != Blktrace_running)
 353                 blk_trace_cleanup(bt);
 354 
 355         return 0;
 356 }
 357 
 358 int blk_trace_remove(struct request_queue *q)
 359 {
 360         int ret;
 361 
 362         mutex_lock(&q->blk_trace_mutex);
 363         ret = __blk_trace_remove(q);
 364         mutex_unlock(&q->blk_trace_mutex);
 365 
 366         return ret;
 367 }
 368 EXPORT_SYMBOL_GPL(blk_trace_remove);
 369 
 370 static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
 371                                 size_t count, loff_t *ppos)
 372 {
 373         struct blk_trace *bt = filp->private_data;
 374         char buf[16];
 375 
 376         snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
 377 
 378         return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
 379 }
 380 
 381 static const struct file_operations blk_dropped_fops = {
 382         .owner =        THIS_MODULE,
 383         .open =         simple_open,
 384         .read =         blk_dropped_read,
 385         .llseek =       default_llseek,
 386 };
 387 
 388 static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
 389                                 size_t count, loff_t *ppos)
 390 {
 391         char *msg;
 392         struct blk_trace *bt;
 393 
 394         if (count >= BLK_TN_MAX_MSG)
 395                 return -EINVAL;
 396 
 397         msg = memdup_user_nul(buffer, count);
 398         if (IS_ERR(msg))
 399                 return PTR_ERR(msg);
 400 
 401         bt = filp->private_data;
 402         __trace_note_message(bt, NULL, "%s", msg);
 403         kfree(msg);
 404 
 405         return count;
 406 }
 407 
 408 static const struct file_operations blk_msg_fops = {
 409         .owner =        THIS_MODULE,
 410         .open =         simple_open,
 411         .write =        blk_msg_write,
 412         .llseek =       noop_llseek,
 413 };
 414 
 415 /*
 416  * Keep track of how many times we encountered a full subbuffer, to aid
 417  * the user space app in telling how many lost events there were.
 418  */
 419 static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
 420                                      void *prev_subbuf, size_t prev_padding)
 421 {
 422         struct blk_trace *bt;
 423 
 424         if (!relay_buf_full(buf))
 425                 return 1;
 426 
 427         bt = buf->chan->private_data;
 428         atomic_inc(&bt->dropped);
 429         return 0;
 430 }
 431 
 432 static int blk_remove_buf_file_callback(struct dentry *dentry)
 433 {
 434         debugfs_remove(dentry);
 435 
 436         return 0;
 437 }
 438 
 439 static struct dentry *blk_create_buf_file_callback(const char *filename,
 440                                                    struct dentry *parent,
 441                                                    umode_t mode,
 442                                                    struct rchan_buf *buf,
 443                                                    int *is_global)
 444 {
 445         return debugfs_create_file(filename, mode, parent, buf,
 446                                         &relay_file_operations);
 447 }
 448 
 449 static struct rchan_callbacks blk_relay_callbacks = {
 450         .subbuf_start           = blk_subbuf_start_callback,
 451         .create_buf_file        = blk_create_buf_file_callback,
 452         .remove_buf_file        = blk_remove_buf_file_callback,
 453 };
 454 
 455 static void blk_trace_setup_lba(struct blk_trace *bt,
 456                                 struct block_device *bdev)
 457 {
 458         struct hd_struct *part = NULL;
 459 
 460         if (bdev)
 461                 part = bdev->bd_part;
 462 
 463         if (part) {
 464                 bt->start_lba = part->start_sect;
 465                 bt->end_lba = part->start_sect + part->nr_sects;
 466         } else {
 467                 bt->start_lba = 0;
 468                 bt->end_lba = -1ULL;
 469         }
 470 }
 471 
 472 /*
 473  * Setup everything required to start tracing
 474  */
 475 static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
 476                               struct block_device *bdev,
 477                               struct blk_user_trace_setup *buts)
 478 {
 479         struct blk_trace *bt = NULL;
 480         struct dentry *dir = NULL;
 481         int ret;
 482 
 483         if (!buts->buf_size || !buts->buf_nr)
 484                 return -EINVAL;
 485 
 486         if (!blk_debugfs_root)
 487                 return -ENOENT;
 488 
 489         strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
 490         buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
 491 
 492         /*
 493          * some device names have larger paths - convert the slashes
 494          * to underscores for this to work as expected
 495          */
 496         strreplace(buts->name, '/', '_');
 497 
 498         bt = kzalloc(sizeof(*bt), GFP_KERNEL);
 499         if (!bt)
 500                 return -ENOMEM;
 501 
 502         ret = -ENOMEM;
 503         bt->sequence = alloc_percpu(unsigned long);
 504         if (!bt->sequence)
 505                 goto err;
 506 
 507         bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
 508         if (!bt->msg_data)
 509                 goto err;
 510 
 511         ret = -ENOENT;
 512 
 513         dir = debugfs_lookup(buts->name, blk_debugfs_root);
 514         if (!dir)
 515                 bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root);
 516 
 517         bt->dev = dev;
 518         atomic_set(&bt->dropped, 0);
 519         INIT_LIST_HEAD(&bt->running_list);
 520 
 521         ret = -EIO;
 522         bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
 523                                                &blk_dropped_fops);
 524 
 525         bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
 526 
 527         bt->rchan = relay_open("trace", dir, buts->buf_size,
 528                                 buts->buf_nr, &blk_relay_callbacks, bt);
 529         if (!bt->rchan)
 530                 goto err;
 531 
 532         bt->act_mask = buts->act_mask;
 533         if (!bt->act_mask)
 534                 bt->act_mask = (u16) -1;
 535 
 536         blk_trace_setup_lba(bt, bdev);
 537 
 538         /* overwrite with user settings */
 539         if (buts->start_lba)
 540                 bt->start_lba = buts->start_lba;
 541         if (buts->end_lba)
 542                 bt->end_lba = buts->end_lba;
 543 
 544         bt->pid = buts->pid;
 545         bt->trace_state = Blktrace_setup;
 546 
 547         ret = -EBUSY;
 548         if (cmpxchg(&q->blk_trace, NULL, bt))
 549                 goto err;
 550 
 551         get_probe_ref();
 552 
 553         ret = 0;
 554 err:
 555         if (dir && !bt->dir)
 556                 dput(dir);
 557         if (ret)
 558                 blk_trace_free(bt);
 559         return ret;
 560 }
 561 
 562 static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
 563                              struct block_device *bdev, char __user *arg)
 564 {
 565         struct blk_user_trace_setup buts;
 566         int ret;
 567 
 568         ret = copy_from_user(&buts, arg, sizeof(buts));
 569         if (ret)
 570                 return -EFAULT;
 571 
 572         ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
 573         if (ret)
 574                 return ret;
 575 
 576         if (copy_to_user(arg, &buts, sizeof(buts))) {
 577                 __blk_trace_remove(q);
 578                 return -EFAULT;
 579         }
 580         return 0;
 581 }
 582 
 583 int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
 584                     struct block_device *bdev,
 585                     char __user *arg)
 586 {
 587         int ret;
 588 
 589         mutex_lock(&q->blk_trace_mutex);
 590         ret = __blk_trace_setup(q, name, dev, bdev, arg);
 591         mutex_unlock(&q->blk_trace_mutex);
 592 
 593         return ret;
 594 }
 595 EXPORT_SYMBOL_GPL(blk_trace_setup);
 596 
 597 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
 598 static int compat_blk_trace_setup(struct request_queue *q, char *name,
 599                                   dev_t dev, struct block_device *bdev,
 600                                   char __user *arg)
 601 {
 602         struct blk_user_trace_setup buts;
 603         struct compat_blk_user_trace_setup cbuts;
 604         int ret;
 605 
 606         if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
 607                 return -EFAULT;
 608 
 609         buts = (struct blk_user_trace_setup) {
 610                 .act_mask = cbuts.act_mask,
 611                 .buf_size = cbuts.buf_size,
 612                 .buf_nr = cbuts.buf_nr,
 613                 .start_lba = cbuts.start_lba,
 614                 .end_lba = cbuts.end_lba,
 615                 .pid = cbuts.pid,
 616         };
 617 
 618         ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
 619         if (ret)
 620                 return ret;
 621 
 622         if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) {
 623                 __blk_trace_remove(q);
 624                 return -EFAULT;
 625         }
 626 
 627         return 0;
 628 }
 629 #endif
 630 
 631 static int __blk_trace_startstop(struct request_queue *q, int start)
 632 {
 633         int ret;
 634         struct blk_trace *bt;
 635 
 636         bt = rcu_dereference_protected(q->blk_trace,
 637                                        lockdep_is_held(&q->blk_trace_mutex));
 638         if (bt == NULL)
 639                 return -EINVAL;
 640 
 641         /*
 642          * For starting a trace, we can transition from a setup or stopped
 643          * trace. For stopping a trace, the state must be running
 644          */
 645         ret = -EINVAL;
 646         if (start) {
 647                 if (bt->trace_state == Blktrace_setup ||
 648                     bt->trace_state == Blktrace_stopped) {
 649                         blktrace_seq++;
 650                         smp_mb();
 651                         bt->trace_state = Blktrace_running;
 652                         spin_lock_irq(&running_trace_lock);
 653                         list_add(&bt->running_list, &running_trace_list);
 654                         spin_unlock_irq(&running_trace_lock);
 655 
 656                         trace_note_time(bt);
 657                         ret = 0;
 658                 }
 659         } else {
 660                 if (bt->trace_state == Blktrace_running) {
 661                         bt->trace_state = Blktrace_stopped;
 662                         spin_lock_irq(&running_trace_lock);
 663                         list_del_init(&bt->running_list);
 664                         spin_unlock_irq(&running_trace_lock);
 665                         relay_flush(bt->rchan);
 666                         ret = 0;
 667                 }
 668         }
 669 
 670         return ret;
 671 }
 672 
 673 int blk_trace_startstop(struct request_queue *q, int start)
 674 {
 675         int ret;
 676 
 677         mutex_lock(&q->blk_trace_mutex);
 678         ret = __blk_trace_startstop(q, start);
 679         mutex_unlock(&q->blk_trace_mutex);
 680 
 681         return ret;
 682 }
 683 EXPORT_SYMBOL_GPL(blk_trace_startstop);
 684 
 685 /*
 686  * When reading or writing the blktrace sysfs files, the references to the
 687  * opened sysfs or device files should prevent the underlying block device
 688  * from being removed. So no further delete protection is really needed.
 689  */
 690 
 691 /**
 692  * blk_trace_ioctl: - handle the ioctls associated with tracing
 693  * @bdev:       the block device
 694  * @cmd:        the ioctl cmd
 695  * @arg:        the argument data, if any
 696  *
 697  **/
 698 int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
 699 {
 700         struct request_queue *q;
 701         int ret, start = 0;
 702         char b[BDEVNAME_SIZE];
 703 
 704         q = bdev_get_queue(bdev);
 705         if (!q)
 706                 return -ENXIO;
 707 
 708         mutex_lock(&q->blk_trace_mutex);
 709 
 710         switch (cmd) {
 711         case BLKTRACESETUP:
 712                 bdevname(bdev, b);
 713                 ret = __blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
 714                 break;
 715 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
 716         case BLKTRACESETUP32:
 717                 bdevname(bdev, b);
 718                 ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
 719                 break;
 720 #endif
 721         case BLKTRACESTART:
 722                 start = 1;
 723                 /* fall through */
 724         case BLKTRACESTOP:
 725                 ret = __blk_trace_startstop(q, start);
 726                 break;
 727         case BLKTRACETEARDOWN:
 728                 ret = __blk_trace_remove(q);
 729                 break;
 730         default:
 731                 ret = -ENOTTY;
 732                 break;
 733         }
 734 
 735         mutex_unlock(&q->blk_trace_mutex);
 736         return ret;
 737 }
 738 
 739 /**
 740  * blk_trace_shutdown: - stop and cleanup trace structures
 741  * @q:    the request queue associated with the device
 742  *
 743  **/
 744 void blk_trace_shutdown(struct request_queue *q)
 745 {
 746         mutex_lock(&q->blk_trace_mutex);
 747         if (rcu_dereference_protected(q->blk_trace,
 748                                       lockdep_is_held(&q->blk_trace_mutex))) {
 749                 __blk_trace_startstop(q, 0);
 750                 __blk_trace_remove(q);
 751         }
 752 
 753         mutex_unlock(&q->blk_trace_mutex);
 754 }
 755 
 756 #ifdef CONFIG_BLK_CGROUP
 757 static union kernfs_node_id *
 758 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
 759 {
 760         struct blk_trace *bt;
 761 
 762         /* We don't use the 'bt' value here except as an optimization... */
 763         bt = rcu_dereference_protected(q->blk_trace, 1);
 764         if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
 765                 return NULL;
 766 
 767         if (!bio->bi_blkg)
 768                 return NULL;
 769         return cgroup_get_kernfs_id(bio_blkcg(bio)->css.cgroup);
 770 }
 771 #else
 772 static union kernfs_node_id *
 773 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
 774 {
 775         return NULL;
 776 }
 777 #endif
 778 
 779 static union kernfs_node_id *
 780 blk_trace_request_get_cgid(struct request_queue *q, struct request *rq)
 781 {
 782         if (!rq->bio)
 783                 return NULL;
 784         /* Use the first bio */
 785         return blk_trace_bio_get_cgid(q, rq->bio);
 786 }
 787 
 788 /*
 789  * blktrace probes
 790  */
 791 
 792 /**
 793  * blk_add_trace_rq - Add a trace for a request oriented action
 794  * @rq:         the source request
 795  * @error:      return status to log
 796  * @nr_bytes:   number of completed bytes
 797  * @what:       the action
 798  * @cgid:       the cgroup info
 799  *
 800  * Description:
 801  *     Records an action against a request. Will log the bio offset + size.
 802  *
 803  **/
 804 static void blk_add_trace_rq(struct request *rq, int error,
 805                              unsigned int nr_bytes, u32 what,
 806                              union kernfs_node_id *cgid)
 807 {
 808         struct blk_trace *bt;
 809 
 810         rcu_read_lock();
 811         bt = rcu_dereference(rq->q->blk_trace);
 812         if (likely(!bt)) {
 813                 rcu_read_unlock();
 814                 return;
 815         }
 816 
 817         if (blk_rq_is_passthrough(rq))
 818                 what |= BLK_TC_ACT(BLK_TC_PC);
 819         else
 820                 what |= BLK_TC_ACT(BLK_TC_FS);
 821 
 822         __blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, req_op(rq),
 823                         rq->cmd_flags, what, error, 0, NULL, cgid);
 824         rcu_read_unlock();
 825 }
 826 
 827 static void blk_add_trace_rq_insert(void *ignore,
 828                                     struct request_queue *q, struct request *rq)
 829 {
 830         blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT,
 831                          blk_trace_request_get_cgid(q, rq));
 832 }
 833 
 834 static void blk_add_trace_rq_issue(void *ignore,
 835                                    struct request_queue *q, struct request *rq)
 836 {
 837         blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE,
 838                          blk_trace_request_get_cgid(q, rq));
 839 }
 840 
 841 static void blk_add_trace_rq_requeue(void *ignore,
 842                                      struct request_queue *q,
 843                                      struct request *rq)
 844 {
 845         blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE,
 846                          blk_trace_request_get_cgid(q, rq));
 847 }
 848 
 849 static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
 850                         int error, unsigned int nr_bytes)
 851 {
 852         blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE,
 853                          blk_trace_request_get_cgid(rq->q, rq));
 854 }
 855 
 856 /**
 857  * blk_add_trace_bio - Add a trace for a bio oriented action
 858  * @q:          queue the io is for
 859  * @bio:        the source bio
 860  * @what:       the action
 861  * @error:      error, if any
 862  *
 863  * Description:
 864  *     Records an action against a bio. Will log the bio offset + size.
 865  *
 866  **/
 867 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
 868                               u32 what, int error)
 869 {
 870         struct blk_trace *bt;
 871 
 872         rcu_read_lock();
 873         bt = rcu_dereference(q->blk_trace);
 874         if (likely(!bt)) {
 875                 rcu_read_unlock();
 876                 return;
 877         }
 878 
 879         __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
 880                         bio_op(bio), bio->bi_opf, what, error, 0, NULL,
 881                         blk_trace_bio_get_cgid(q, bio));
 882         rcu_read_unlock();
 883 }
 884 
 885 static void blk_add_trace_bio_bounce(void *ignore,
 886                                      struct request_queue *q, struct bio *bio)
 887 {
 888         blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
 889 }
 890 
 891 static void blk_add_trace_bio_complete(void *ignore,
 892                                        struct request_queue *q, struct bio *bio,
 893                                        int error)
 894 {
 895         blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);
 896 }
 897 
 898 static void blk_add_trace_bio_backmerge(void *ignore,
 899                                         struct request_queue *q,
 900                                         struct request *rq,
 901                                         struct bio *bio)
 902 {
 903         blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0);
 904 }
 905 
 906 static void blk_add_trace_bio_frontmerge(void *ignore,
 907                                          struct request_queue *q,
 908                                          struct request *rq,
 909                                          struct bio *bio)
 910 {
 911         blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0);
 912 }
 913 
 914 static void blk_add_trace_bio_queue(void *ignore,
 915                                     struct request_queue *q, struct bio *bio)
 916 {
 917         blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0);
 918 }
 919 
 920 static void blk_add_trace_getrq(void *ignore,
 921                                 struct request_queue *q,
 922                                 struct bio *bio, int rw)
 923 {
 924         if (bio)
 925                 blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
 926         else {
 927                 struct blk_trace *bt;
 928 
 929                 rcu_read_lock();
 930                 bt = rcu_dereference(q->blk_trace);
 931                 if (bt)
 932                         __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0,
 933                                         NULL, NULL);
 934                 rcu_read_unlock();
 935         }
 936 }
 937 
 938 
 939 static void blk_add_trace_sleeprq(void *ignore,
 940                                   struct request_queue *q,
 941                                   struct bio *bio, int rw)
 942 {
 943         if (bio)
 944                 blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
 945         else {
 946                 struct blk_trace *bt;
 947 
 948                 rcu_read_lock();
 949                 bt = rcu_dereference(q->blk_trace);
 950                 if (bt)
 951                         __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_SLEEPRQ,
 952                                         0, 0, NULL, NULL);
 953                 rcu_read_unlock();
 954         }
 955 }
 956 
 957 static void blk_add_trace_plug(void *ignore, struct request_queue *q)
 958 {
 959         struct blk_trace *bt;
 960 
 961         rcu_read_lock();
 962         bt = rcu_dereference(q->blk_trace);
 963         if (bt)
 964                 __blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, NULL);
 965         rcu_read_unlock();
 966 }
 967 
 968 static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
 969                                     unsigned int depth, bool explicit)
 970 {
 971         struct blk_trace *bt;
 972 
 973         rcu_read_lock();
 974         bt = rcu_dereference(q->blk_trace);
 975         if (bt) {
 976                 __be64 rpdu = cpu_to_be64(depth);
 977                 u32 what;
 978 
 979                 if (explicit)
 980                         what = BLK_TA_UNPLUG_IO;
 981                 else
 982                         what = BLK_TA_UNPLUG_TIMER;
 983 
 984                 __blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, NULL);
 985         }
 986         rcu_read_unlock();
 987 }
 988 
 989 static void blk_add_trace_split(void *ignore,
 990                                 struct request_queue *q, struct bio *bio,
 991                                 unsigned int pdu)
 992 {
 993         struct blk_trace *bt;
 994 
 995         rcu_read_lock();
 996         bt = rcu_dereference(q->blk_trace);
 997         if (bt) {
 998                 __be64 rpdu = cpu_to_be64(pdu);
 999 
1000                 __blk_add_trace(bt, bio->bi_iter.bi_sector,
1001                                 bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf,
1002                                 BLK_TA_SPLIT, bio->bi_status, sizeof(rpdu),
1003                                 &rpdu, blk_trace_bio_get_cgid(q, bio));
1004         }
1005         rcu_read_unlock();
1006 }
1007 
1008 /**
1009  * blk_add_trace_bio_remap - Add a trace for a bio-remap operation
1010  * @ignore:     trace callback data parameter (not used)
1011  * @q:          queue the io is for
1012  * @bio:        the source bio
1013  * @dev:        target device
1014  * @from:       source sector
1015  *
1016  * Description:
1017  *     Device mapper or raid target sometimes need to split a bio because
1018  *     it spans a stripe (or similar). Add a trace for that action.
1019  *
1020  **/
1021 static void blk_add_trace_bio_remap(void *ignore,
1022                                     struct request_queue *q, struct bio *bio,
1023                                     dev_t dev, sector_t from)
1024 {
1025         struct blk_trace *bt;
1026         struct blk_io_trace_remap r;
1027 
1028         rcu_read_lock();
1029         bt = rcu_dereference(q->blk_trace);
1030         if (likely(!bt)) {
1031                 rcu_read_unlock();
1032                 return;
1033         }
1034 
1035         r.device_from = cpu_to_be32(dev);
1036         r.device_to   = cpu_to_be32(bio_dev(bio));
1037         r.sector_from = cpu_to_be64(from);
1038 
1039         __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
1040                         bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_status,
1041                         sizeof(r), &r, blk_trace_bio_get_cgid(q, bio));
1042         rcu_read_unlock();
1043 }
1044 
1045 /**
1046  * blk_add_trace_rq_remap - Add a trace for a request-remap operation
1047  * @ignore:     trace callback data parameter (not used)
1048  * @q:          queue the io is for
1049  * @rq:         the source request
1050  * @dev:        target device
1051  * @from:       source sector
1052  *
1053  * Description:
1054  *     Device mapper remaps request to other devices.
1055  *     Add a trace for that action.
1056  *
1057  **/
1058 static void blk_add_trace_rq_remap(void *ignore,
1059                                    struct request_queue *q,
1060                                    struct request *rq, dev_t dev,
1061                                    sector_t from)
1062 {
1063         struct blk_trace *bt;
1064         struct blk_io_trace_remap r;
1065 
1066         rcu_read_lock();
1067         bt = rcu_dereference(q->blk_trace);
1068         if (likely(!bt)) {
1069                 rcu_read_unlock();
1070                 return;
1071         }
1072 
1073         r.device_from = cpu_to_be32(dev);
1074         r.device_to   = cpu_to_be32(disk_devt(rq->rq_disk));
1075         r.sector_from = cpu_to_be64(from);
1076 
1077         __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
1078                         rq_data_dir(rq), 0, BLK_TA_REMAP, 0,
1079                         sizeof(r), &r, blk_trace_request_get_cgid(q, rq));
1080         rcu_read_unlock();
1081 }
1082 
1083 /**
1084  * blk_add_driver_data - Add binary message with driver-specific data
1085  * @q:          queue the io is for
1086  * @rq:         io request
1087  * @data:       driver-specific data
1088  * @len:        length of driver-specific data
1089  *
1090  * Description:
1091  *     Some drivers might want to write driver-specific data per request.
1092  *
1093  **/
1094 void blk_add_driver_data(struct request_queue *q,
1095                          struct request *rq,
1096                          void *data, size_t len)
1097 {
1098         struct blk_trace *bt;
1099 
1100         rcu_read_lock();
1101         bt = rcu_dereference(q->blk_trace);
1102         if (likely(!bt)) {
1103                 rcu_read_unlock();
1104                 return;
1105         }
1106 
1107         __blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0,
1108                                 BLK_TA_DRV_DATA, 0, len, data,
1109                                 blk_trace_request_get_cgid(q, rq));
1110         rcu_read_unlock();
1111 }
1112 EXPORT_SYMBOL_GPL(blk_add_driver_data);
1113 
1114 static void blk_register_tracepoints(void)
1115 {
1116         int ret;
1117 
1118         ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1119         WARN_ON(ret);
1120         ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1121         WARN_ON(ret);
1122         ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1123         WARN_ON(ret);
1124         ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1125         WARN_ON(ret);
1126         ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1127         WARN_ON(ret);
1128         ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1129         WARN_ON(ret);
1130         ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1131         WARN_ON(ret);
1132         ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1133         WARN_ON(ret);
1134         ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1135         WARN_ON(ret);
1136         ret = register_trace_block_getrq(blk_add_trace_getrq, NULL);
1137         WARN_ON(ret);
1138         ret = register_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1139         WARN_ON(ret);
1140         ret = register_trace_block_plug(blk_add_trace_plug, NULL);
1141         WARN_ON(ret);
1142         ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
1143         WARN_ON(ret);
1144         ret = register_trace_block_split(blk_add_trace_split, NULL);
1145         WARN_ON(ret);
1146         ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1147         WARN_ON(ret);
1148         ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1149         WARN_ON(ret);
1150 }
1151 
1152 static void blk_unregister_tracepoints(void)
1153 {
1154         unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1155         unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1156         unregister_trace_block_split(blk_add_trace_split, NULL);
1157         unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
1158         unregister_trace_block_plug(blk_add_trace_plug, NULL);
1159         unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1160         unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
1161         unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1162         unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1163         unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1164         unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1165         unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1166         unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1167         unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1168         unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1169         unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1170 
1171         tracepoint_synchronize_unregister();
1172 }
1173 
1174 /*
1175  * struct blk_io_tracer formatting routines
1176  */
1177 
1178 static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
1179 {
1180         int i = 0;
1181         int tc = t->action >> BLK_TC_SHIFT;
1182 
1183         if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1184                 rwbs[i++] = 'N';
1185                 goto out;
1186         }
1187 
1188         if (tc & BLK_TC_FLUSH)
1189                 rwbs[i++] = 'F';
1190 
1191         if (tc & BLK_TC_DISCARD)
1192                 rwbs[i++] = 'D';
1193         else if (tc & BLK_TC_WRITE)
1194                 rwbs[i++] = 'W';
1195         else if (t->bytes)
1196                 rwbs[i++] = 'R';
1197         else
1198                 rwbs[i++] = 'N';
1199 
1200         if (tc & BLK_TC_FUA)
1201                 rwbs[i++] = 'F';
1202         if (tc & BLK_TC_AHEAD)
1203                 rwbs[i++] = 'A';
1204         if (tc & BLK_TC_SYNC)
1205                 rwbs[i++] = 'S';
1206         if (tc & BLK_TC_META)
1207                 rwbs[i++] = 'M';
1208 out:
1209         rwbs[i] = '\0';
1210 }
1211 
1212 static inline
1213 const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
1214 {
1215         return (const struct blk_io_trace *)ent;
1216 }
1217 
1218 static inline const void *pdu_start(const struct trace_entry *ent, bool has_cg)
1219 {
1220         return (void *)(te_blk_io_trace(ent) + 1) +
1221                 (has_cg ? sizeof(union kernfs_node_id) : 0);
1222 }
1223 
1224 static inline const void *cgid_start(const struct trace_entry *ent)
1225 {
1226         return (void *)(te_blk_io_trace(ent) + 1);
1227 }
1228 
1229 static inline int pdu_real_len(const struct trace_entry *ent, bool has_cg)
1230 {
1231         return te_blk_io_trace(ent)->pdu_len -
1232                         (has_cg ? sizeof(union kernfs_node_id) : 0);
1233 }
1234 
1235 static inline u32 t_action(const struct trace_entry *ent)
1236 {
1237         return te_blk_io_trace(ent)->action;
1238 }
1239 
1240 static inline u32 t_bytes(const struct trace_entry *ent)
1241 {
1242         return te_blk_io_trace(ent)->bytes;
1243 }
1244 
1245 static inline u32 t_sec(const struct trace_entry *ent)
1246 {
1247         return te_blk_io_trace(ent)->bytes >> 9;
1248 }
1249 
1250 static inline unsigned long long t_sector(const struct trace_entry *ent)
1251 {
1252         return te_blk_io_trace(ent)->sector;
1253 }
1254 
1255 static inline __u16 t_error(const struct trace_entry *ent)
1256 {
1257         return te_blk_io_trace(ent)->error;
1258 }
1259 
1260 static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg)
1261 {
1262         const __u64 *val = pdu_start(ent, has_cg);
1263         return be64_to_cpu(*val);
1264 }
1265 
1266 static void get_pdu_remap(const struct trace_entry *ent,
1267                           struct blk_io_trace_remap *r, bool has_cg)
1268 {
1269         const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg);
1270         __u64 sector_from = __r->sector_from;
1271 
1272         r->device_from = be32_to_cpu(__r->device_from);
1273         r->device_to   = be32_to_cpu(__r->device_to);
1274         r->sector_from = be64_to_cpu(sector_from);
1275 }
1276 
1277 typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act,
1278         bool has_cg);
1279 
1280 static void blk_log_action_classic(struct trace_iterator *iter, const char *act,
1281         bool has_cg)
1282 {
1283         char rwbs[RWBS_LEN];
1284         unsigned long long ts  = iter->ts;
1285         unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
1286         unsigned secs          = (unsigned long)ts;
1287         const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1288 
1289         fill_rwbs(rwbs, t);
1290 
1291         trace_seq_printf(&iter->seq,
1292                          "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
1293                          MAJOR(t->device), MINOR(t->device), iter->cpu,
1294                          secs, nsec_rem, iter->ent->pid, act, rwbs);
1295 }
1296 
1297 static void blk_log_action(struct trace_iterator *iter, const char *act,
1298         bool has_cg)
1299 {
1300         char rwbs[RWBS_LEN];
1301         const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1302 
1303         fill_rwbs(rwbs, t);
1304         if (has_cg) {
1305                 const union kernfs_node_id *id = cgid_start(iter->ent);
1306 
1307                 if (blk_tracer_flags.val & TRACE_BLK_OPT_CGNAME) {
1308                         char blkcg_name_buf[NAME_MAX + 1] = "<...>";
1309 
1310                         cgroup_path_from_kernfs_id(id, blkcg_name_buf,
1311                                 sizeof(blkcg_name_buf));
1312                         trace_seq_printf(&iter->seq, "%3d,%-3d %s %2s %3s ",
1313                                  MAJOR(t->device), MINOR(t->device),
1314                                  blkcg_name_buf, act, rwbs);
1315                 } else
1316                         trace_seq_printf(&iter->seq,
1317                                  "%3d,%-3d %x,%-x %2s %3s ",
1318                                  MAJOR(t->device), MINOR(t->device),
1319                                  id->ino, id->generation, act, rwbs);
1320         } else
1321                 trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
1322                                  MAJOR(t->device), MINOR(t->device), act, rwbs);
1323 }
1324 
1325 static void blk_log_dump_pdu(struct trace_seq *s,
1326         const struct trace_entry *ent, bool has_cg)
1327 {
1328         const unsigned char *pdu_buf;
1329         int pdu_len;
1330         int i, end;
1331 
1332         pdu_buf = pdu_start(ent, has_cg);
1333         pdu_len = pdu_real_len(ent, has_cg);
1334 
1335         if (!pdu_len)
1336                 return;
1337 
1338         /* find the last zero that needs to be printed */
1339         for (end = pdu_len - 1; end >= 0; end--)
1340                 if (pdu_buf[end])
1341                         break;
1342         end++;
1343 
1344         trace_seq_putc(s, '(');
1345 
1346         for (i = 0; i < pdu_len; i++) {
1347 
1348                 trace_seq_printf(s, "%s%02x",
1349                                  i == 0 ? "" : " ", pdu_buf[i]);
1350 
1351                 /*
1352                  * stop when the rest is just zeroes and indicate so
1353                  * with a ".." appended
1354                  */
1355                 if (i == end && end != pdu_len - 1) {
1356                         trace_seq_puts(s, " ..) ");
1357                         return;
1358                 }
1359         }
1360 
1361         trace_seq_puts(s, ") ");
1362 }
1363 
1364 static void blk_log_generic(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1365 {
1366         char cmd[TASK_COMM_LEN];
1367 
1368         trace_find_cmdline(ent->pid, cmd);
1369 
1370         if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1371                 trace_seq_printf(s, "%u ", t_bytes(ent));
1372                 blk_log_dump_pdu(s, ent, has_cg);
1373                 trace_seq_printf(s, "[%s]\n", cmd);
1374         } else {
1375                 if (t_sec(ent))
1376                         trace_seq_printf(s, "%llu + %u [%s]\n",
1377                                                 t_sector(ent), t_sec(ent), cmd);
1378                 else
1379                         trace_seq_printf(s, "[%s]\n", cmd);
1380         }
1381 }
1382 
1383 static void blk_log_with_error(struct trace_seq *s,
1384                               const struct trace_entry *ent, bool has_cg)
1385 {
1386         if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1387                 blk_log_dump_pdu(s, ent, has_cg);
1388                 trace_seq_printf(s, "[%d]\n", t_error(ent));
1389         } else {
1390                 if (t_sec(ent))
1391                         trace_seq_printf(s, "%llu + %u [%d]\n",
1392                                          t_sector(ent),
1393                                          t_sec(ent), t_error(ent));
1394                 else
1395                         trace_seq_printf(s, "%llu [%d]\n",
1396                                          t_sector(ent), t_error(ent));
1397         }
1398 }
1399 
1400 static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1401 {
1402         struct blk_io_trace_remap r = { .device_from = 0, };
1403 
1404         get_pdu_remap(ent, &r, has_cg);
1405         trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
1406                          t_sector(ent), t_sec(ent),
1407                          MAJOR(r.device_from), MINOR(r.device_from),
1408                          (unsigned long long)r.sector_from);
1409 }
1410 
1411 static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1412 {
1413         char cmd[TASK_COMM_LEN];
1414 
1415         trace_find_cmdline(ent->pid, cmd);
1416 
1417         trace_seq_printf(s, "[%s]\n", cmd);
1418 }
1419 
1420 static void blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1421 {
1422         char cmd[TASK_COMM_LEN];
1423 
1424         trace_find_cmdline(ent->pid, cmd);
1425 
1426         trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent, has_cg));
1427 }
1428 
1429 static void blk_log_split(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1430 {
1431         char cmd[TASK_COMM_LEN];
1432 
1433         trace_find_cmdline(ent->pid, cmd);
1434 
1435         trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
1436                          get_pdu_int(ent, has_cg), cmd);
1437 }
1438 
1439 static void blk_log_msg(struct trace_seq *s, const struct trace_entry *ent,
1440                         bool has_cg)
1441 {
1442 
1443         trace_seq_putmem(s, pdu_start(ent, has_cg),
1444                 pdu_real_len(ent, has_cg));
1445         trace_seq_putc(s, '\n');
1446 }
1447 
1448 /*
1449  * struct tracer operations
1450  */
1451 
1452 static void blk_tracer_print_header(struct seq_file *m)
1453 {
1454         if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1455                 return;
1456         seq_puts(m, "# DEV   CPU TIMESTAMP     PID ACT FLG\n"
1457                     "#  |     |     |           |   |   |\n");
1458 }
1459 
1460 static void blk_tracer_start(struct trace_array *tr)
1461 {
1462         blk_tracer_enabled = true;
1463 }
1464 
1465 static int blk_tracer_init(struct trace_array *tr)
1466 {
1467         blk_tr = tr;
1468         blk_tracer_start(tr);
1469         return 0;
1470 }
1471 
1472 static void blk_tracer_stop(struct trace_array *tr)
1473 {
1474         blk_tracer_enabled = false;
1475 }
1476 
1477 static void blk_tracer_reset(struct trace_array *tr)
1478 {
1479         blk_tracer_stop(tr);
1480 }
1481 
1482 static const struct {
1483         const char *act[2];
1484         void       (*print)(struct trace_seq *s, const struct trace_entry *ent,
1485                             bool has_cg);
1486 } what2act[] = {
1487         [__BLK_TA_QUEUE]        = {{  "Q", "queue" },      blk_log_generic },
1488         [__BLK_TA_BACKMERGE]    = {{  "M", "backmerge" },  blk_log_generic },
1489         [__BLK_TA_FRONTMERGE]   = {{  "F", "frontmerge" }, blk_log_generic },
1490         [__BLK_TA_GETRQ]        = {{  "G", "getrq" },      blk_log_generic },
1491         [__BLK_TA_SLEEPRQ]      = {{  "S", "sleeprq" },    blk_log_generic },
1492         [__BLK_TA_REQUEUE]      = {{  "R", "requeue" },    blk_log_with_error },
1493         [__BLK_TA_ISSUE]        = {{  "D", "issue" },      blk_log_generic },
1494         [__BLK_TA_COMPLETE]     = {{  "C", "complete" },   blk_log_with_error },
1495         [__BLK_TA_PLUG]         = {{  "P", "plug" },       blk_log_plug },
1496         [__BLK_TA_UNPLUG_IO]    = {{  "U", "unplug_io" },  blk_log_unplug },
1497         [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
1498         [__BLK_TA_INSERT]       = {{  "I", "insert" },     blk_log_generic },
1499         [__BLK_TA_SPLIT]        = {{  "X", "split" },      blk_log_split },
1500         [__BLK_TA_BOUNCE]       = {{  "B", "bounce" },     blk_log_generic },
1501         [__BLK_TA_REMAP]        = {{  "A", "remap" },      blk_log_remap },
1502 };
1503 
1504 static enum print_line_t print_one_line(struct trace_iterator *iter,
1505                                         bool classic)
1506 {
1507         struct trace_array *tr = iter->tr;
1508         struct trace_seq *s = &iter->seq;
1509         const struct blk_io_trace *t;
1510         u16 what;
1511         bool long_act;
1512         blk_log_action_t *log_action;
1513         bool has_cg;
1514 
1515         t          = te_blk_io_trace(iter->ent);
1516         what       = (t->action & ((1 << BLK_TC_SHIFT) - 1)) & ~__BLK_TA_CGROUP;
1517         long_act   = !!(tr->trace_flags & TRACE_ITER_VERBOSE);
1518         log_action = classic ? &blk_log_action_classic : &blk_log_action;
1519         has_cg     = t->action & __BLK_TA_CGROUP;
1520 
1521         if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1522                 log_action(iter, long_act ? "message" : "m", has_cg);
1523                 blk_log_msg(s, iter->ent, has_cg);
1524                 return trace_handle_return(s);
1525         }
1526 
1527         if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
1528                 trace_seq_printf(s, "Unknown action %x\n", what);
1529         else {
1530                 log_action(iter, what2act[what].act[long_act], has_cg);
1531                 what2act[what].print(s, iter->ent, has_cg);
1532         }
1533 
1534         return trace_handle_return(s);
1535 }
1536 
1537 static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
1538                                                int flags, struct trace_event *event)
1539 {
1540         return print_one_line(iter, false);
1541 }
1542 
1543 static void blk_trace_synthesize_old_trace(struct trace_iterator *iter)
1544 {
1545         struct trace_seq *s = &iter->seq;
1546         struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
1547         const int offset = offsetof(struct blk_io_trace, sector);
1548         struct blk_io_trace old = {
1549                 .magic    = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
1550                 .time     = iter->ts,
1551         };
1552 
1553         trace_seq_putmem(s, &old, offset);
1554         trace_seq_putmem(s, &t->sector,
1555                          sizeof(old) - offset + t->pdu_len);
1556 }
1557 
1558 static enum print_line_t
1559 blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
1560                              struct trace_event *event)
1561 {
1562         blk_trace_synthesize_old_trace(iter);
1563 
1564         return trace_handle_return(&iter->seq);
1565 }
1566 
1567 static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
1568 {
1569         if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1570                 return TRACE_TYPE_UNHANDLED;
1571 
1572         return print_one_line(iter, true);
1573 }
1574 
1575 static int
1576 blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1577 {
1578         /* don't output context-info for blk_classic output */
1579         if (bit == TRACE_BLK_OPT_CLASSIC) {
1580                 if (set)
1581                         tr->trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
1582                 else
1583                         tr->trace_flags |= TRACE_ITER_CONTEXT_INFO;
1584         }
1585         return 0;
1586 }
1587 
1588 static struct tracer blk_tracer __read_mostly = {
1589         .name           = "blk",
1590         .init           = blk_tracer_init,
1591         .reset          = blk_tracer_reset,
1592         .start          = blk_tracer_start,
1593         .stop           = blk_tracer_stop,
1594         .print_header   = blk_tracer_print_header,
1595         .print_line     = blk_tracer_print_line,
1596         .flags          = &blk_tracer_flags,
1597         .set_flag       = blk_tracer_set_flag,
1598 };
1599 
1600 static struct trace_event_functions trace_blk_event_funcs = {
1601         .trace          = blk_trace_event_print,
1602         .binary         = blk_trace_event_print_binary,
1603 };
1604 
1605 static struct trace_event trace_blk_event = {
1606         .type           = TRACE_BLK,
1607         .funcs          = &trace_blk_event_funcs,
1608 };
1609 
1610 static int __init init_blk_tracer(void)
1611 {
1612         if (!register_trace_event(&trace_blk_event)) {
1613                 pr_warn("Warning: could not register block events\n");
1614                 return 1;
1615         }
1616 
1617         if (register_tracer(&blk_tracer) != 0) {
1618                 pr_warn("Warning: could not register the block tracer\n");
1619                 unregister_trace_event(&trace_blk_event);
1620                 return 1;
1621         }
1622 
1623         return 0;
1624 }
1625 
1626 device_initcall(init_blk_tracer);
1627 
1628 static int blk_trace_remove_queue(struct request_queue *q)
1629 {
1630         struct blk_trace *bt;
1631 
1632         bt = xchg(&q->blk_trace, NULL);
1633         if (bt == NULL)
1634                 return -EINVAL;
1635 
1636         put_probe_ref();
1637         synchronize_rcu();
1638         blk_trace_free(bt);
1639         return 0;
1640 }
1641 
1642 /*
1643  * Setup everything required to start tracing
1644  */
1645 static int blk_trace_setup_queue(struct request_queue *q,
1646                                  struct block_device *bdev)
1647 {
1648         struct blk_trace *bt = NULL;
1649         int ret = -ENOMEM;
1650 
1651         bt = kzalloc(sizeof(*bt), GFP_KERNEL);
1652         if (!bt)
1653                 return -ENOMEM;
1654 
1655         bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
1656         if (!bt->msg_data)
1657                 goto free_bt;
1658 
1659         bt->dev = bdev->bd_dev;
1660         bt->act_mask = (u16)-1;
1661 
1662         blk_trace_setup_lba(bt, bdev);
1663 
1664         ret = -EBUSY;
1665         if (cmpxchg(&q->blk_trace, NULL, bt))
1666                 goto free_bt;
1667 
1668         get_probe_ref();
1669         return 0;
1670 
1671 free_bt:
1672         blk_trace_free(bt);
1673         return ret;
1674 }
1675 
1676 /*
1677  * sysfs interface to enable and configure tracing
1678  */
1679 
1680 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1681                                          struct device_attribute *attr,
1682                                          char *buf);
1683 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1684                                           struct device_attribute *attr,
1685                                           const char *buf, size_t count);
1686 #define BLK_TRACE_DEVICE_ATTR(_name) \
1687         DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1688                     sysfs_blk_trace_attr_show, \
1689                     sysfs_blk_trace_attr_store)
1690 
1691 static BLK_TRACE_DEVICE_ATTR(enable);
1692 static BLK_TRACE_DEVICE_ATTR(act_mask);
1693 static BLK_TRACE_DEVICE_ATTR(pid);
1694 static BLK_TRACE_DEVICE_ATTR(start_lba);
1695 static BLK_TRACE_DEVICE_ATTR(end_lba);
1696 
1697 static struct attribute *blk_trace_attrs[] = {
1698         &dev_attr_enable.attr,
1699         &dev_attr_act_mask.attr,
1700         &dev_attr_pid.attr,
1701         &dev_attr_start_lba.attr,
1702         &dev_attr_end_lba.attr,
1703         NULL
1704 };
1705 
1706 struct attribute_group blk_trace_attr_group = {
1707         .name  = "trace",
1708         .attrs = blk_trace_attrs,
1709 };
1710 
1711 static const struct {
1712         int mask;
1713         const char *str;
1714 } mask_maps[] = {
1715         { BLK_TC_READ,          "read"          },
1716         { BLK_TC_WRITE,         "write"         },
1717         { BLK_TC_FLUSH,         "flush"         },
1718         { BLK_TC_SYNC,          "sync"          },
1719         { BLK_TC_QUEUE,         "queue"         },
1720         { BLK_TC_REQUEUE,       "requeue"       },
1721         { BLK_TC_ISSUE,         "issue"         },
1722         { BLK_TC_COMPLETE,      "complete"      },
1723         { BLK_TC_FS,            "fs"            },
1724         { BLK_TC_PC,            "pc"            },
1725         { BLK_TC_NOTIFY,        "notify"        },
1726         { BLK_TC_AHEAD,         "ahead"         },
1727         { BLK_TC_META,          "meta"          },
1728         { BLK_TC_DISCARD,       "discard"       },
1729         { BLK_TC_DRV_DATA,      "drv_data"      },
1730         { BLK_TC_FUA,           "fua"           },
1731 };
1732 
1733 static int blk_trace_str2mask(const char *str)
1734 {
1735         int i;
1736         int mask = 0;
1737         char *buf, *s, *token;
1738 
1739         buf = kstrdup(str, GFP_KERNEL);
1740         if (buf == NULL)
1741                 return -ENOMEM;
1742         s = strstrip(buf);
1743 
1744         while (1) {
1745                 token = strsep(&s, ",");
1746                 if (token == NULL)
1747                         break;
1748 
1749                 if (*token == '\0')
1750                         continue;
1751 
1752                 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1753                         if (strcasecmp(token, mask_maps[i].str) == 0) {
1754                                 mask |= mask_maps[i].mask;
1755                                 break;
1756                         }
1757                 }
1758                 if (i == ARRAY_SIZE(mask_maps)) {
1759                         mask = -EINVAL;
1760                         break;
1761                 }
1762         }
1763         kfree(buf);
1764 
1765         return mask;
1766 }
1767 
1768 static ssize_t blk_trace_mask2str(char *buf, int mask)
1769 {
1770         int i;
1771         char *p = buf;
1772 
1773         for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1774                 if (mask & mask_maps[i].mask) {
1775                         p += sprintf(p, "%s%s",
1776                                     (p == buf) ? "" : ",", mask_maps[i].str);
1777                 }
1778         }
1779         *p++ = '\n';
1780 
1781         return p - buf;
1782 }
1783 
1784 static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
1785 {
1786         if (bdev->bd_disk == NULL)
1787                 return NULL;
1788 
1789         return bdev_get_queue(bdev);
1790 }
1791 
1792 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1793                                          struct device_attribute *attr,
1794                                          char *buf)
1795 {
1796         struct hd_struct *p = dev_to_part(dev);
1797         struct request_queue *q;
1798         struct block_device *bdev;
1799         struct blk_trace *bt;
1800         ssize_t ret = -ENXIO;
1801 
1802         bdev = bdget(part_devt(p));
1803         if (bdev == NULL)
1804                 goto out;
1805 
1806         q = blk_trace_get_queue(bdev);
1807         if (q == NULL)
1808                 goto out_bdput;
1809 
1810         mutex_lock(&q->blk_trace_mutex);
1811 
1812         bt = rcu_dereference_protected(q->blk_trace,
1813                                        lockdep_is_held(&q->blk_trace_mutex));
1814         if (attr == &dev_attr_enable) {
1815                 ret = sprintf(buf, "%u\n", !!bt);
1816                 goto out_unlock_bdev;
1817         }
1818 
1819         if (bt == NULL)
1820                 ret = sprintf(buf, "disabled\n");
1821         else if (attr == &dev_attr_act_mask)
1822                 ret = blk_trace_mask2str(buf, bt->act_mask);
1823         else if (attr == &dev_attr_pid)
1824                 ret = sprintf(buf, "%u\n", bt->pid);
1825         else if (attr == &dev_attr_start_lba)
1826                 ret = sprintf(buf, "%llu\n", bt->start_lba);
1827         else if (attr == &dev_attr_end_lba)
1828                 ret = sprintf(buf, "%llu\n", bt->end_lba);
1829 
1830 out_unlock_bdev:
1831         mutex_unlock(&q->blk_trace_mutex);
1832 out_bdput:
1833         bdput(bdev);
1834 out:
1835         return ret;
1836 }
1837 
1838 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1839                                           struct device_attribute *attr,
1840                                           const char *buf, size_t count)
1841 {
1842         struct block_device *bdev;
1843         struct request_queue *q;
1844         struct hd_struct *p;
1845         struct blk_trace *bt;
1846         u64 value;
1847         ssize_t ret = -EINVAL;
1848 
1849         if (count == 0)
1850                 goto out;
1851 
1852         if (attr == &dev_attr_act_mask) {
1853                 if (kstrtoull(buf, 0, &value)) {
1854                         /* Assume it is a list of trace category names */
1855                         ret = blk_trace_str2mask(buf);
1856                         if (ret < 0)
1857                                 goto out;
1858                         value = ret;
1859                 }
1860         } else if (kstrtoull(buf, 0, &value))
1861                 goto out;
1862 
1863         ret = -ENXIO;
1864 
1865         p = dev_to_part(dev);
1866         bdev = bdget(part_devt(p));
1867         if (bdev == NULL)
1868                 goto out;
1869 
1870         q = blk_trace_get_queue(bdev);
1871         if (q == NULL)
1872                 goto out_bdput;
1873 
1874         mutex_lock(&q->blk_trace_mutex);
1875 
1876         bt = rcu_dereference_protected(q->blk_trace,
1877                                        lockdep_is_held(&q->blk_trace_mutex));
1878         if (attr == &dev_attr_enable) {
1879                 if (!!value == !!bt) {
1880                         ret = 0;
1881                         goto out_unlock_bdev;
1882                 }
1883                 if (value)
1884                         ret = blk_trace_setup_queue(q, bdev);
1885                 else
1886                         ret = blk_trace_remove_queue(q);
1887                 goto out_unlock_bdev;
1888         }
1889 
1890         ret = 0;
1891         if (bt == NULL) {
1892                 ret = blk_trace_setup_queue(q, bdev);
1893                 bt = rcu_dereference_protected(q->blk_trace,
1894                                 lockdep_is_held(&q->blk_trace_mutex));
1895         }
1896 
1897         if (ret == 0) {
1898                 if (attr == &dev_attr_act_mask)
1899                         bt->act_mask = value;
1900                 else if (attr == &dev_attr_pid)
1901                         bt->pid = value;
1902                 else if (attr == &dev_attr_start_lba)
1903                         bt->start_lba = value;
1904                 else if (attr == &dev_attr_end_lba)
1905                         bt->end_lba = value;
1906         }
1907 
1908 out_unlock_bdev:
1909         mutex_unlock(&q->blk_trace_mutex);
1910 out_bdput:
1911         bdput(bdev);
1912 out:
1913         return ret ? ret : count;
1914 }
1915 
1916 int blk_trace_init_sysfs(struct device *dev)
1917 {
1918         return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
1919 }
1920 
1921 void blk_trace_remove_sysfs(struct device *dev)
1922 {
1923         sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
1924 }
1925 
1926 #endif /* CONFIG_BLK_DEV_IO_TRACE */
1927 
1928 #ifdef CONFIG_EVENT_TRACING
1929 
1930 void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes)
1931 {
1932         int i = 0;
1933 
1934         if (op & REQ_PREFLUSH)
1935                 rwbs[i++] = 'F';
1936 
1937         switch (op & REQ_OP_MASK) {
1938         case REQ_OP_WRITE:
1939         case REQ_OP_WRITE_SAME:
1940                 rwbs[i++] = 'W';
1941                 break;
1942         case REQ_OP_DISCARD:
1943                 rwbs[i++] = 'D';
1944                 break;
1945         case REQ_OP_SECURE_ERASE:
1946                 rwbs[i++] = 'D';
1947                 rwbs[i++] = 'E';
1948                 break;
1949         case REQ_OP_FLUSH:
1950                 rwbs[i++] = 'F';
1951                 break;
1952         case REQ_OP_READ:
1953                 rwbs[i++] = 'R';
1954                 break;
1955         default:
1956                 rwbs[i++] = 'N';
1957         }
1958 
1959         if (op & REQ_FUA)
1960                 rwbs[i++] = 'F';
1961         if (op & REQ_RAHEAD)
1962                 rwbs[i++] = 'A';
1963         if (op & REQ_SYNC)
1964                 rwbs[i++] = 'S';
1965         if (op & REQ_META)
1966                 rwbs[i++] = 'M';
1967 
1968         rwbs[i] = '\0';
1969 }
1970 EXPORT_SYMBOL_GPL(blk_fill_rwbs);
1971 
1972 #endif /* CONFIG_EVENT_TRACING */
1973 

/* [<][>][^][v][top][bottom][index][help] */