root/arch/ia64/kernel/crash.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. crash_save_this_cpu
  2. kdump_wait_cpu_freeze
  3. machine_crash_shutdown
  4. machine_kdump_on_init
  5. kdump_cpu_freeze
  6. kdump_init_notifier
  7. machine_crash_setup

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * arch/ia64/kernel/crash.c
   4  *
   5  * Architecture specific (ia64) functions for kexec based crash dumps.
   6  *
   7  * Created by: Khalid Aziz <khalid.aziz@hp.com>
   8  * Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
   9  * Copyright (C) 2005 Intel Corp        Zou Nan hai <nanhai.zou@intel.com>
  10  *
  11  */
  12 #include <linux/smp.h>
  13 #include <linux/delay.h>
  14 #include <linux/crash_dump.h>
  15 #include <linux/memblock.h>
  16 #include <linux/kexec.h>
  17 #include <linux/elfcore.h>
  18 #include <linux/sysctl.h>
  19 #include <linux/init.h>
  20 #include <linux/kdebug.h>
  21 
  22 #include <asm/mca.h>
  23 
  24 int kdump_status[NR_CPUS];
  25 static atomic_t kdump_cpu_frozen;
  26 atomic_t kdump_in_progress;
  27 static int kdump_freeze_monarch;
  28 static int kdump_on_init = 1;
  29 static int kdump_on_fatal_mca = 1;
  30 
  31 extern void ia64_dump_cpu_regs(void *);
  32 
  33 static DEFINE_PER_CPU(struct elf_prstatus, elf_prstatus);
  34 
  35 void
  36 crash_save_this_cpu(void)
  37 {
  38         void *buf;
  39         unsigned long cfm, sof, sol;
  40 
  41         int cpu = smp_processor_id();
  42         struct elf_prstatus *prstatus = &per_cpu(elf_prstatus, cpu);
  43 
  44         elf_greg_t *dst = (elf_greg_t *)&(prstatus->pr_reg);
  45         memset(prstatus, 0, sizeof(*prstatus));
  46         prstatus->pr_pid = current->pid;
  47 
  48         ia64_dump_cpu_regs(dst);
  49         cfm = dst[43];
  50         sol = (cfm >> 7) & 0x7f;
  51         sof = cfm & 0x7f;
  52         dst[46] = (unsigned long)ia64_rse_skip_regs((unsigned long *)dst[46],
  53                         sof - sol);
  54 
  55         buf = (u64 *) per_cpu_ptr(crash_notes, cpu);
  56         if (!buf)
  57                 return;
  58         buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS, prstatus,
  59                         sizeof(*prstatus));
  60         final_note(buf);
  61 }
  62 
  63 #ifdef CONFIG_SMP
  64 static int
  65 kdump_wait_cpu_freeze(void)
  66 {
  67         int cpu_num = num_online_cpus() - 1;
  68         int timeout = 1000;
  69         while(timeout-- > 0) {
  70                 if (atomic_read(&kdump_cpu_frozen) == cpu_num)
  71                         return 0;
  72                 udelay(1000);
  73         }
  74         return 1;
  75 }
  76 #endif
  77 
  78 void
  79 machine_crash_shutdown(struct pt_regs *pt)
  80 {
  81         /* This function is only called after the system
  82          * has paniced or is otherwise in a critical state.
  83          * The minimum amount of code to allow a kexec'd kernel
  84          * to run successfully needs to happen here.
  85          *
  86          * In practice this means shooting down the other cpus in
  87          * an SMP system.
  88          */
  89         kexec_disable_iosapic();
  90 #ifdef CONFIG_SMP
  91         /*
  92          * If kdump_on_init is set and an INIT is asserted here, kdump will
  93          * be started again via INIT monarch.
  94          */
  95         local_irq_disable();
  96         ia64_set_psr_mc();      /* mask MCA/INIT */
  97         if (atomic_inc_return(&kdump_in_progress) != 1)
  98                 unw_init_running(kdump_cpu_freeze, NULL);
  99 
 100         /*
 101          * Now this cpu is ready for kdump.
 102          * Stop all others by IPI or INIT.  They could receive INIT from
 103          * outside and might be INIT monarch, but only thing they have to
 104          * do is falling into kdump_cpu_freeze().
 105          *
 106          * If an INIT is asserted here:
 107          * - All receivers might be slaves, since some of cpus could already
 108          *   be frozen and INIT might be masked on monarch.  In this case,
 109          *   all slaves will be frozen soon since kdump_in_progress will let
 110          *   them into DIE_INIT_SLAVE_LEAVE.
 111          * - One might be a monarch, but INIT rendezvous will fail since
 112          *   at least this cpu already have INIT masked so it never join
 113          *   to the rendezvous.  In this case, all slaves and monarch will
 114          *   be frozen soon with no wait since the INIT rendezvous is skipped
 115          *   by kdump_in_progress.
 116          */
 117         kdump_smp_send_stop();
 118         /* not all cpu response to IPI, send INIT to freeze them */
 119         if (kdump_wait_cpu_freeze()) {
 120                 kdump_smp_send_init();
 121                 /* wait again, don't go ahead if possible */
 122                 kdump_wait_cpu_freeze();
 123         }
 124 #endif
 125 }
 126 
 127 static void
 128 machine_kdump_on_init(void)
 129 {
 130         crash_save_vmcoreinfo();
 131         local_irq_disable();
 132         kexec_disable_iosapic();
 133         machine_kexec(ia64_kimage);
 134 }
 135 
 136 void
 137 kdump_cpu_freeze(struct unw_frame_info *info, void *arg)
 138 {
 139         int cpuid;
 140 
 141         local_irq_disable();
 142         cpuid = smp_processor_id();
 143         crash_save_this_cpu();
 144         current->thread.ksp = (__u64)info->sw - 16;
 145 
 146         ia64_set_psr_mc();      /* mask MCA/INIT and stop reentrance */
 147 
 148         atomic_inc(&kdump_cpu_frozen);
 149         kdump_status[cpuid] = 1;
 150         mb();
 151         for (;;)
 152                 cpu_relax();
 153 }
 154 
 155 static int
 156 kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
 157 {
 158         struct ia64_mca_notify_die *nd;
 159         struct die_args *args = data;
 160 
 161         if (atomic_read(&kdump_in_progress)) {
 162                 switch (val) {
 163                 case DIE_INIT_MONARCH_LEAVE:
 164                         if (!kdump_freeze_monarch)
 165                                 break;
 166                         /* fall through */
 167                 case DIE_INIT_SLAVE_LEAVE:
 168                 case DIE_INIT_MONARCH_ENTER:
 169                 case DIE_MCA_RENDZVOUS_LEAVE:
 170                         unw_init_running(kdump_cpu_freeze, NULL);
 171                         break;
 172                 }
 173         }
 174 
 175         if (!kdump_on_init && !kdump_on_fatal_mca)
 176                 return NOTIFY_DONE;
 177 
 178         if (!ia64_kimage) {
 179                 if (val == DIE_INIT_MONARCH_LEAVE)
 180                         ia64_mca_printk(KERN_NOTICE
 181                                         "%s: kdump not configured\n",
 182                                         __func__);
 183                 return NOTIFY_DONE;
 184         }
 185 
 186         if (val != DIE_INIT_MONARCH_LEAVE &&
 187             val != DIE_INIT_MONARCH_PROCESS &&
 188             val != DIE_MCA_MONARCH_LEAVE)
 189                 return NOTIFY_DONE;
 190 
 191         nd = (struct ia64_mca_notify_die *)args->err;
 192 
 193         switch (val) {
 194         case DIE_INIT_MONARCH_PROCESS:
 195                 /* Reason code 1 means machine check rendezvous*/
 196                 if (kdump_on_init && (nd->sos->rv_rc != 1)) {
 197                         if (atomic_inc_return(&kdump_in_progress) != 1)
 198                                 kdump_freeze_monarch = 1;
 199                 }
 200                 break;
 201         case DIE_INIT_MONARCH_LEAVE:
 202                 /* Reason code 1 means machine check rendezvous*/
 203                 if (kdump_on_init && (nd->sos->rv_rc != 1))
 204                         machine_kdump_on_init();
 205                 break;
 206         case DIE_MCA_MONARCH_LEAVE:
 207                 /* *(nd->data) indicate if MCA is recoverable */
 208                 if (kdump_on_fatal_mca && !(*(nd->data))) {
 209                         if (atomic_inc_return(&kdump_in_progress) == 1)
 210                                 machine_kdump_on_init();
 211                         /* We got fatal MCA while kdump!? No way!! */
 212                 }
 213                 break;
 214         }
 215         return NOTIFY_DONE;
 216 }
 217 
 218 #ifdef CONFIG_SYSCTL
 219 static struct ctl_table kdump_ctl_table[] = {
 220         {
 221                 .procname = "kdump_on_init",
 222                 .data = &kdump_on_init,
 223                 .maxlen = sizeof(int),
 224                 .mode = 0644,
 225                 .proc_handler = proc_dointvec,
 226         },
 227         {
 228                 .procname = "kdump_on_fatal_mca",
 229                 .data = &kdump_on_fatal_mca,
 230                 .maxlen = sizeof(int),
 231                 .mode = 0644,
 232                 .proc_handler = proc_dointvec,
 233         },
 234         { }
 235 };
 236 
 237 static struct ctl_table sys_table[] = {
 238         {
 239           .procname = "kernel",
 240           .mode = 0555,
 241           .child = kdump_ctl_table,
 242         },
 243         { }
 244 };
 245 #endif
 246 
 247 static int
 248 machine_crash_setup(void)
 249 {
 250         /* be notified before default_monarch_init_process */
 251         static struct notifier_block kdump_init_notifier_nb = {
 252                 .notifier_call = kdump_init_notifier,
 253                 .priority = 1,
 254         };
 255         int ret;
 256         if((ret = register_die_notifier(&kdump_init_notifier_nb)) != 0)
 257                 return ret;
 258 #ifdef CONFIG_SYSCTL
 259         register_sysctl_table(sys_table);
 260 #endif
 261         return 0;
 262 }
 263 
 264 __initcall(machine_crash_setup);
 265 

/* [<][>][^][v][top][bottom][index][help] */