root/kernel/kcov.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. check_kcov_mode
  2. canonicalize_ip
  3. __sanitizer_cov_trace_pc
  4. write_comp_data
  5. __sanitizer_cov_trace_cmp1
  6. __sanitizer_cov_trace_cmp2
  7. __sanitizer_cov_trace_cmp4
  8. __sanitizer_cov_trace_cmp8
  9. __sanitizer_cov_trace_const_cmp1
  10. __sanitizer_cov_trace_const_cmp2
  11. __sanitizer_cov_trace_const_cmp4
  12. __sanitizer_cov_trace_const_cmp8
  13. __sanitizer_cov_trace_switch
  14. kcov_get
  15. kcov_put
  16. kcov_task_init
  17. kcov_task_exit
  18. kcov_mmap
  19. kcov_open
  20. kcov_close
  21. kcov_fault_in_area
  22. kcov_ioctl_locked
  23. kcov_ioctl
  24. kcov_init

   1 // SPDX-License-Identifier: GPL-2.0
   2 #define pr_fmt(fmt) "kcov: " fmt
   3 
   4 #define DISABLE_BRANCH_PROFILING
   5 #include <linux/atomic.h>
   6 #include <linux/compiler.h>
   7 #include <linux/errno.h>
   8 #include <linux/export.h>
   9 #include <linux/types.h>
  10 #include <linux/file.h>
  11 #include <linux/fs.h>
  12 #include <linux/init.h>
  13 #include <linux/mm.h>
  14 #include <linux/preempt.h>
  15 #include <linux/printk.h>
  16 #include <linux/sched.h>
  17 #include <linux/slab.h>
  18 #include <linux/spinlock.h>
  19 #include <linux/vmalloc.h>
  20 #include <linux/debugfs.h>
  21 #include <linux/uaccess.h>
  22 #include <linux/kcov.h>
  23 #include <linux/refcount.h>
  24 #include <asm/setup.h>
  25 
  26 /* Number of 64-bit words written per one comparison: */
  27 #define KCOV_WORDS_PER_CMP 4
  28 
  29 /*
  30  * kcov descriptor (one per opened debugfs file).
  31  * State transitions of the descriptor:
  32  *  - initial state after open()
  33  *  - then there must be a single ioctl(KCOV_INIT_TRACE) call
  34  *  - then, mmap() call (several calls are allowed but not useful)
  35  *  - then, ioctl(KCOV_ENABLE, arg), where arg is
  36  *      KCOV_TRACE_PC - to trace only the PCs
  37  *      or
  38  *      KCOV_TRACE_CMP - to trace only the comparison operands
  39  *  - then, ioctl(KCOV_DISABLE) to disable the task.
  40  * Enabling/disabling ioctls can be repeated (only one task a time allowed).
  41  */
  42 struct kcov {
  43         /*
  44          * Reference counter. We keep one for:
  45          *  - opened file descriptor
  46          *  - task with enabled coverage (we can't unwire it from another task)
  47          */
  48         refcount_t              refcount;
  49         /* The lock protects mode, size, area and t. */
  50         spinlock_t              lock;
  51         enum kcov_mode          mode;
  52         /* Size of arena (in long's for KCOV_MODE_TRACE). */
  53         unsigned                size;
  54         /* Coverage buffer shared with user space. */
  55         void                    *area;
  56         /* Task for which we collect coverage, or NULL. */
  57         struct task_struct      *t;
  58 };
  59 
  60 static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
  61 {
  62         unsigned int mode;
  63 
  64         /*
  65          * We are interested in code coverage as a function of a syscall inputs,
  66          * so we ignore code executed in interrupts.
  67          */
  68         if (!in_task())
  69                 return false;
  70         mode = READ_ONCE(t->kcov_mode);
  71         /*
  72          * There is some code that runs in interrupts but for which
  73          * in_interrupt() returns false (e.g. preempt_schedule_irq()).
  74          * READ_ONCE()/barrier() effectively provides load-acquire wrt
  75          * interrupts, there are paired barrier()/WRITE_ONCE() in
  76          * kcov_ioctl_locked().
  77          */
  78         barrier();
  79         return mode == needed_mode;
  80 }
  81 
  82 static notrace unsigned long canonicalize_ip(unsigned long ip)
  83 {
  84 #ifdef CONFIG_RANDOMIZE_BASE
  85         ip -= kaslr_offset();
  86 #endif
  87         return ip;
  88 }
  89 
  90 /*
  91  * Entry point from instrumented code.
  92  * This is called once per basic-block/edge.
  93  */
  94 void notrace __sanitizer_cov_trace_pc(void)
  95 {
  96         struct task_struct *t;
  97         unsigned long *area;
  98         unsigned long ip = canonicalize_ip(_RET_IP_);
  99         unsigned long pos;
 100 
 101         t = current;
 102         if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t))
 103                 return;
 104 
 105         area = t->kcov_area;
 106         /* The first 64-bit word is the number of subsequent PCs. */
 107         pos = READ_ONCE(area[0]) + 1;
 108         if (likely(pos < t->kcov_size)) {
 109                 area[pos] = ip;
 110                 WRITE_ONCE(area[0], pos);
 111         }
 112 }
 113 EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
 114 
 115 #ifdef CONFIG_KCOV_ENABLE_COMPARISONS
 116 static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
 117 {
 118         struct task_struct *t;
 119         u64 *area;
 120         u64 count, start_index, end_pos, max_pos;
 121 
 122         t = current;
 123         if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t))
 124                 return;
 125 
 126         ip = canonicalize_ip(ip);
 127 
 128         /*
 129          * We write all comparison arguments and types as u64.
 130          * The buffer was allocated for t->kcov_size unsigned longs.
 131          */
 132         area = (u64 *)t->kcov_area;
 133         max_pos = t->kcov_size * sizeof(unsigned long);
 134 
 135         count = READ_ONCE(area[0]);
 136 
 137         /* Every record is KCOV_WORDS_PER_CMP 64-bit words. */
 138         start_index = 1 + count * KCOV_WORDS_PER_CMP;
 139         end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64);
 140         if (likely(end_pos <= max_pos)) {
 141                 area[start_index] = type;
 142                 area[start_index + 1] = arg1;
 143                 area[start_index + 2] = arg2;
 144                 area[start_index + 3] = ip;
 145                 WRITE_ONCE(area[0], count + 1);
 146         }
 147 }
 148 
 149 void notrace __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2)
 150 {
 151         write_comp_data(KCOV_CMP_SIZE(0), arg1, arg2, _RET_IP_);
 152 }
 153 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp1);
 154 
 155 void notrace __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2)
 156 {
 157         write_comp_data(KCOV_CMP_SIZE(1), arg1, arg2, _RET_IP_);
 158 }
 159 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2);
 160 
 161 void notrace __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2)
 162 {
 163         write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_);
 164 }
 165 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp4);
 166 
 167 void notrace __sanitizer_cov_trace_cmp8(u64 arg1, u64 arg2)
 168 {
 169         write_comp_data(KCOV_CMP_SIZE(3), arg1, arg2, _RET_IP_);
 170 }
 171 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp8);
 172 
 173 void notrace __sanitizer_cov_trace_const_cmp1(u8 arg1, u8 arg2)
 174 {
 175         write_comp_data(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2,
 176                         _RET_IP_);
 177 }
 178 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp1);
 179 
 180 void notrace __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2)
 181 {
 182         write_comp_data(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2,
 183                         _RET_IP_);
 184 }
 185 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2);
 186 
 187 void notrace __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2)
 188 {
 189         write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
 190                         _RET_IP_);
 191 }
 192 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp4);
 193 
 194 void notrace __sanitizer_cov_trace_const_cmp8(u64 arg1, u64 arg2)
 195 {
 196         write_comp_data(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2,
 197                         _RET_IP_);
 198 }
 199 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp8);
 200 
 201 void notrace __sanitizer_cov_trace_switch(u64 val, u64 *cases)
 202 {
 203         u64 i;
 204         u64 count = cases[0];
 205         u64 size = cases[1];
 206         u64 type = KCOV_CMP_CONST;
 207 
 208         switch (size) {
 209         case 8:
 210                 type |= KCOV_CMP_SIZE(0);
 211                 break;
 212         case 16:
 213                 type |= KCOV_CMP_SIZE(1);
 214                 break;
 215         case 32:
 216                 type |= KCOV_CMP_SIZE(2);
 217                 break;
 218         case 64:
 219                 type |= KCOV_CMP_SIZE(3);
 220                 break;
 221         default:
 222                 return;
 223         }
 224         for (i = 0; i < count; i++)
 225                 write_comp_data(type, cases[i + 2], val, _RET_IP_);
 226 }
 227 EXPORT_SYMBOL(__sanitizer_cov_trace_switch);
 228 #endif /* ifdef CONFIG_KCOV_ENABLE_COMPARISONS */
 229 
 230 static void kcov_get(struct kcov *kcov)
 231 {
 232         refcount_inc(&kcov->refcount);
 233 }
 234 
 235 static void kcov_put(struct kcov *kcov)
 236 {
 237         if (refcount_dec_and_test(&kcov->refcount)) {
 238                 vfree(kcov->area);
 239                 kfree(kcov);
 240         }
 241 }
 242 
 243 void kcov_task_init(struct task_struct *t)
 244 {
 245         WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED);
 246         barrier();
 247         t->kcov_size = 0;
 248         t->kcov_area = NULL;
 249         t->kcov = NULL;
 250 }
 251 
 252 void kcov_task_exit(struct task_struct *t)
 253 {
 254         struct kcov *kcov;
 255 
 256         kcov = t->kcov;
 257         if (kcov == NULL)
 258                 return;
 259         spin_lock(&kcov->lock);
 260         if (WARN_ON(kcov->t != t)) {
 261                 spin_unlock(&kcov->lock);
 262                 return;
 263         }
 264         /* Just to not leave dangling references behind. */
 265         kcov_task_init(t);
 266         kcov->t = NULL;
 267         kcov->mode = KCOV_MODE_INIT;
 268         spin_unlock(&kcov->lock);
 269         kcov_put(kcov);
 270 }
 271 
 272 static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
 273 {
 274         int res = 0;
 275         void *area;
 276         struct kcov *kcov = vma->vm_file->private_data;
 277         unsigned long size, off;
 278         struct page *page;
 279 
 280         area = vmalloc_user(vma->vm_end - vma->vm_start);
 281         if (!area)
 282                 return -ENOMEM;
 283 
 284         spin_lock(&kcov->lock);
 285         size = kcov->size * sizeof(unsigned long);
 286         if (kcov->mode != KCOV_MODE_INIT || vma->vm_pgoff != 0 ||
 287             vma->vm_end - vma->vm_start != size) {
 288                 res = -EINVAL;
 289                 goto exit;
 290         }
 291         if (!kcov->area) {
 292                 kcov->area = area;
 293                 vma->vm_flags |= VM_DONTEXPAND;
 294                 spin_unlock(&kcov->lock);
 295                 for (off = 0; off < size; off += PAGE_SIZE) {
 296                         page = vmalloc_to_page(kcov->area + off);
 297                         if (vm_insert_page(vma, vma->vm_start + off, page))
 298                                 WARN_ONCE(1, "vm_insert_page() failed");
 299                 }
 300                 return 0;
 301         }
 302 exit:
 303         spin_unlock(&kcov->lock);
 304         vfree(area);
 305         return res;
 306 }
 307 
 308 static int kcov_open(struct inode *inode, struct file *filep)
 309 {
 310         struct kcov *kcov;
 311 
 312         kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
 313         if (!kcov)
 314                 return -ENOMEM;
 315         kcov->mode = KCOV_MODE_DISABLED;
 316         refcount_set(&kcov->refcount, 1);
 317         spin_lock_init(&kcov->lock);
 318         filep->private_data = kcov;
 319         return nonseekable_open(inode, filep);
 320 }
 321 
 322 static int kcov_close(struct inode *inode, struct file *filep)
 323 {
 324         kcov_put(filep->private_data);
 325         return 0;
 326 }
 327 
 328 /*
 329  * Fault in a lazily-faulted vmalloc area before it can be used by
 330  * __santizer_cov_trace_pc(), to avoid recursion issues if any code on the
 331  * vmalloc fault handling path is instrumented.
 332  */
 333 static void kcov_fault_in_area(struct kcov *kcov)
 334 {
 335         unsigned long stride = PAGE_SIZE / sizeof(unsigned long);
 336         unsigned long *area = kcov->area;
 337         unsigned long offset;
 338 
 339         for (offset = 0; offset < kcov->size; offset += stride)
 340                 READ_ONCE(area[offset]);
 341 }
 342 
 343 static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
 344                              unsigned long arg)
 345 {
 346         struct task_struct *t;
 347         unsigned long size, unused;
 348 
 349         switch (cmd) {
 350         case KCOV_INIT_TRACE:
 351                 /*
 352                  * Enable kcov in trace mode and setup buffer size.
 353                  * Must happen before anything else.
 354                  */
 355                 if (kcov->mode != KCOV_MODE_DISABLED)
 356                         return -EBUSY;
 357                 /*
 358                  * Size must be at least 2 to hold current position and one PC.
 359                  * Later we allocate size * sizeof(unsigned long) memory,
 360                  * that must not overflow.
 361                  */
 362                 size = arg;
 363                 if (size < 2 || size > INT_MAX / sizeof(unsigned long))
 364                         return -EINVAL;
 365                 kcov->size = size;
 366                 kcov->mode = KCOV_MODE_INIT;
 367                 return 0;
 368         case KCOV_ENABLE:
 369                 /*
 370                  * Enable coverage for the current task.
 371                  * At this point user must have been enabled trace mode,
 372                  * and mmapped the file. Coverage collection is disabled only
 373                  * at task exit or voluntary by KCOV_DISABLE. After that it can
 374                  * be enabled for another task.
 375                  */
 376                 if (kcov->mode != KCOV_MODE_INIT || !kcov->area)
 377                         return -EINVAL;
 378                 t = current;
 379                 if (kcov->t != NULL || t->kcov != NULL)
 380                         return -EBUSY;
 381                 if (arg == KCOV_TRACE_PC)
 382                         kcov->mode = KCOV_MODE_TRACE_PC;
 383                 else if (arg == KCOV_TRACE_CMP)
 384 #ifdef CONFIG_KCOV_ENABLE_COMPARISONS
 385                         kcov->mode = KCOV_MODE_TRACE_CMP;
 386 #else
 387                 return -ENOTSUPP;
 388 #endif
 389                 else
 390                         return -EINVAL;
 391                 kcov_fault_in_area(kcov);
 392                 /* Cache in task struct for performance. */
 393                 t->kcov_size = kcov->size;
 394                 t->kcov_area = kcov->area;
 395                 /* See comment in check_kcov_mode(). */
 396                 barrier();
 397                 WRITE_ONCE(t->kcov_mode, kcov->mode);
 398                 t->kcov = kcov;
 399                 kcov->t = t;
 400                 /* This is put either in kcov_task_exit() or in KCOV_DISABLE. */
 401                 kcov_get(kcov);
 402                 return 0;
 403         case KCOV_DISABLE:
 404                 /* Disable coverage for the current task. */
 405                 unused = arg;
 406                 if (unused != 0 || current->kcov != kcov)
 407                         return -EINVAL;
 408                 t = current;
 409                 if (WARN_ON(kcov->t != t))
 410                         return -EINVAL;
 411                 kcov_task_init(t);
 412                 kcov->t = NULL;
 413                 kcov->mode = KCOV_MODE_INIT;
 414                 kcov_put(kcov);
 415                 return 0;
 416         default:
 417                 return -ENOTTY;
 418         }
 419 }
 420 
 421 static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
 422 {
 423         struct kcov *kcov;
 424         int res;
 425 
 426         kcov = filep->private_data;
 427         spin_lock(&kcov->lock);
 428         res = kcov_ioctl_locked(kcov, cmd, arg);
 429         spin_unlock(&kcov->lock);
 430         return res;
 431 }
 432 
 433 static const struct file_operations kcov_fops = {
 434         .open           = kcov_open,
 435         .unlocked_ioctl = kcov_ioctl,
 436         .compat_ioctl   = kcov_ioctl,
 437         .mmap           = kcov_mmap,
 438         .release        = kcov_close,
 439 };
 440 
 441 static int __init kcov_init(void)
 442 {
 443         /*
 444          * The kcov debugfs file won't ever get removed and thus,
 445          * there is no need to protect it against removal races. The
 446          * use of debugfs_create_file_unsafe() is actually safe here.
 447          */
 448         debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops);
 449 
 450         return 0;
 451 }
 452 
 453 device_initcall(kcov_init);

/* [<][>][^][v][top][bottom][index][help] */