root/arch/x86/kernel/stacktrace.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. arch_stack_walk
  2. arch_stack_walk_reliable
  3. copy_stack_frame
  4. arch_stack_walk_user

   1 /*
   2  * Stack trace management functions
   3  *
   4  *  Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
   5  */
   6 #include <linux/sched.h>
   7 #include <linux/sched/debug.h>
   8 #include <linux/sched/task_stack.h>
   9 #include <linux/stacktrace.h>
  10 #include <linux/export.h>
  11 #include <linux/uaccess.h>
  12 #include <asm/stacktrace.h>
  13 #include <asm/unwind.h>
  14 
  15 void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
  16                      struct task_struct *task, struct pt_regs *regs)
  17 {
  18         struct unwind_state state;
  19         unsigned long addr;
  20 
  21         if (regs && !consume_entry(cookie, regs->ip, false))
  22                 return;
  23 
  24         for (unwind_start(&state, task, regs, NULL); !unwind_done(&state);
  25              unwind_next_frame(&state)) {
  26                 addr = unwind_get_return_address(&state);
  27                 if (!addr || !consume_entry(cookie, addr, false))
  28                         break;
  29         }
  30 }
  31 
  32 /*
  33  * This function returns an error if it detects any unreliable features of the
  34  * stack.  Otherwise it guarantees that the stack trace is reliable.
  35  *
  36  * If the task is not 'current', the caller *must* ensure the task is inactive.
  37  */
  38 int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
  39                              void *cookie, struct task_struct *task)
  40 {
  41         struct unwind_state state;
  42         struct pt_regs *regs;
  43         unsigned long addr;
  44 
  45         for (unwind_start(&state, task, NULL, NULL);
  46              !unwind_done(&state) && !unwind_error(&state);
  47              unwind_next_frame(&state)) {
  48 
  49                 regs = unwind_get_entry_regs(&state, NULL);
  50                 if (regs) {
  51                         /* Success path for user tasks */
  52                         if (user_mode(regs))
  53                                 return 0;
  54 
  55                         /*
  56                          * Kernel mode registers on the stack indicate an
  57                          * in-kernel interrupt or exception (e.g., preemption
  58                          * or a page fault), which can make frame pointers
  59                          * unreliable.
  60                          */
  61 
  62                         if (IS_ENABLED(CONFIG_FRAME_POINTER))
  63                                 return -EINVAL;
  64                 }
  65 
  66                 addr = unwind_get_return_address(&state);
  67 
  68                 /*
  69                  * A NULL or invalid return address probably means there's some
  70                  * generated code which __kernel_text_address() doesn't know
  71                  * about.
  72                  */
  73                 if (!addr)
  74                         return -EINVAL;
  75 
  76                 if (!consume_entry(cookie, addr, false))
  77                         return -EINVAL;
  78         }
  79 
  80         /* Check for stack corruption */
  81         if (unwind_error(&state))
  82                 return -EINVAL;
  83 
  84         /* Success path for non-user tasks, i.e. kthreads and idle tasks */
  85         if (!(task->flags & (PF_KTHREAD | PF_IDLE)))
  86                 return -EINVAL;
  87 
  88         return 0;
  89 }
  90 
  91 /* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
  92 
  93 struct stack_frame_user {
  94         const void __user       *next_fp;
  95         unsigned long           ret_addr;
  96 };
  97 
  98 static int
  99 copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
 100 {
 101         int ret;
 102 
 103         if (__range_not_ok(fp, sizeof(*frame), TASK_SIZE))
 104                 return 0;
 105 
 106         ret = 1;
 107         pagefault_disable();
 108         if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
 109                 ret = 0;
 110         pagefault_enable();
 111 
 112         return ret;
 113 }
 114 
 115 void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
 116                           const struct pt_regs *regs)
 117 {
 118         const void __user *fp = (const void __user *)regs->bp;
 119 
 120         if (!consume_entry(cookie, regs->ip, false))
 121                 return;
 122 
 123         while (1) {
 124                 struct stack_frame_user frame;
 125 
 126                 frame.next_fp = NULL;
 127                 frame.ret_addr = 0;
 128                 if (!copy_stack_frame(fp, &frame))
 129                         break;
 130                 if ((unsigned long)fp < regs->sp)
 131                         break;
 132                 if (!frame.ret_addr)
 133                         break;
 134                 if (!consume_entry(cookie, frame.ret_addr, false))
 135                         break;
 136                 fp = frame.next_fp;
 137         }
 138 }
 139 

/* [<][>][^][v][top][bottom][index][help] */