root/arch/sh/kernel/unwinder.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. select_unwinder
  2. unwinder_enqueue
  3. unwinder_register
  4. unwind_stack

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Copyright (C) 2009  Matt Fleming
   4  *
   5  * Based, in part, on kernel/time/clocksource.c.
   6  *
   7  * This file provides arbitration code for stack unwinders.
   8  *
   9  * Multiple stack unwinders can be available on a system, usually with
  10  * the most accurate unwinder being the currently active one.
  11  */
  12 #include <linux/errno.h>
  13 #include <linux/list.h>
  14 #include <linux/spinlock.h>
  15 #include <linux/module.h>
  16 #include <asm/unwinder.h>
  17 #include <linux/atomic.h>
  18 
  19 /*
  20  * This is the most basic stack unwinder an architecture can
  21  * provide. For architectures without reliable frame pointers, e.g.
  22  * RISC CPUs, it can be implemented by looking through the stack for
  23  * addresses that lie within the kernel text section.
  24  *
  25  * Other CPUs, e.g. x86, can use their frame pointer register to
  26  * construct more accurate stack traces.
  27  */
  28 static struct list_head unwinder_list;
  29 static struct unwinder stack_reader = {
  30         .name = "stack-reader",
  31         .dump = stack_reader_dump,
  32         .rating = 50,
  33         .list = {
  34                 .next = &unwinder_list,
  35                 .prev = &unwinder_list,
  36         },
  37 };
  38 
  39 /*
  40  * "curr_unwinder" points to the stack unwinder currently in use. This
  41  * is the unwinder with the highest rating.
  42  *
  43  * "unwinder_list" is a linked-list of all available unwinders, sorted
  44  * by rating.
  45  *
  46  * All modifications of "curr_unwinder" and "unwinder_list" must be
  47  * performed whilst holding "unwinder_lock".
  48  */
  49 static struct unwinder *curr_unwinder = &stack_reader;
  50 
  51 static struct list_head unwinder_list = {
  52         .next = &stack_reader.list,
  53         .prev = &stack_reader.list,
  54 };
  55 
  56 static DEFINE_SPINLOCK(unwinder_lock);
  57 
  58 /**
  59  * select_unwinder - Select the best registered stack unwinder.
  60  *
  61  * Private function. Must hold unwinder_lock when called.
  62  *
  63  * Select the stack unwinder with the best rating. This is useful for
  64  * setting up curr_unwinder.
  65  */
  66 static struct unwinder *select_unwinder(void)
  67 {
  68         struct unwinder *best;
  69 
  70         if (list_empty(&unwinder_list))
  71                 return NULL;
  72 
  73         best = list_entry(unwinder_list.next, struct unwinder, list);
  74         if (best == curr_unwinder)
  75                 return NULL;
  76 
  77         return best;
  78 }
  79 
  80 /*
  81  * Enqueue the stack unwinder sorted by rating.
  82  */
  83 static int unwinder_enqueue(struct unwinder *ops)
  84 {
  85         struct list_head *tmp, *entry = &unwinder_list;
  86 
  87         list_for_each(tmp, &unwinder_list) {
  88                 struct unwinder *o;
  89 
  90                 o = list_entry(tmp, struct unwinder, list);
  91                 if (o == ops)
  92                         return -EBUSY;
  93                 /* Keep track of the place, where to insert */
  94                 if (o->rating >= ops->rating)
  95                         entry = tmp;
  96         }
  97         list_add(&ops->list, entry);
  98 
  99         return 0;
 100 }
 101 
 102 /**
 103  * unwinder_register - Used to install new stack unwinder
 104  * @u: unwinder to be registered
 105  *
 106  * Install the new stack unwinder on the unwinder list, which is sorted
 107  * by rating.
 108  *
 109  * Returns -EBUSY if registration fails, zero otherwise.
 110  */
 111 int unwinder_register(struct unwinder *u)
 112 {
 113         unsigned long flags;
 114         int ret;
 115 
 116         spin_lock_irqsave(&unwinder_lock, flags);
 117         ret = unwinder_enqueue(u);
 118         if (!ret)
 119                 curr_unwinder = select_unwinder();
 120         spin_unlock_irqrestore(&unwinder_lock, flags);
 121 
 122         return ret;
 123 }
 124 
 125 int unwinder_faulted = 0;
 126 
 127 /*
 128  * Unwind the call stack and pass information to the stacktrace_ops
 129  * functions. Also handle the case where we need to switch to a new
 130  * stack dumper because the current one faulted unexpectedly.
 131  */
 132 void unwind_stack(struct task_struct *task, struct pt_regs *regs,
 133                   unsigned long *sp, const struct stacktrace_ops *ops,
 134                   void *data)
 135 {
 136         unsigned long flags;
 137 
 138         /*
 139          * The problem with unwinders with high ratings is that they are
 140          * inherently more complicated than the simple ones with lower
 141          * ratings. We are therefore more likely to fault in the
 142          * complicated ones, e.g. hitting BUG()s. If we fault in the
 143          * code for the current stack unwinder we try to downgrade to
 144          * one with a lower rating.
 145          *
 146          * Hopefully this will give us a semi-reliable stacktrace so we
 147          * can diagnose why curr_unwinder->dump() faulted.
 148          */
 149         if (unwinder_faulted) {
 150                 spin_lock_irqsave(&unwinder_lock, flags);
 151 
 152                 /* Make sure no one beat us to changing the unwinder */
 153                 if (unwinder_faulted && !list_is_singular(&unwinder_list)) {
 154                         list_del(&curr_unwinder->list);
 155                         curr_unwinder = select_unwinder();
 156 
 157                         unwinder_faulted = 0;
 158                 }
 159 
 160                 spin_unlock_irqrestore(&unwinder_lock, flags);
 161         }
 162 
 163         curr_unwinder->dump(task, regs, sp, ops, data);
 164 }
 165 EXPORT_SYMBOL_GPL(unwind_stack);

/* [<][>][^][v][top][bottom][index][help] */