1/* 2 * Routines for doing kexec-based kdump. 3 * 4 * Copyright (C) 2005, IBM Corp. 5 * 6 * Created by: Michael Ellerman 7 * 8 * This source code is licensed under the GNU General Public License, 9 * Version 2. See the file COPYING for more details. 10 */ 11 12#undef DEBUG 13 14#include <linux/crash_dump.h> 15#include <linux/io.h> 16#include <linux/memblock.h> 17#include <asm/code-patching.h> 18#include <asm/kdump.h> 19#include <asm/prom.h> 20#include <asm/firmware.h> 21#include <asm/uaccess.h> 22#include <asm/rtas.h> 23 24#ifdef DEBUG 25#include <asm/udbg.h> 26#define DBG(fmt...) udbg_printf(fmt) 27#else 28#define DBG(fmt...) 29#endif 30 31#ifndef CONFIG_NONSTATIC_KERNEL 32void __init reserve_kdump_trampoline(void) 33{ 34 memblock_reserve(0, KDUMP_RESERVE_LIMIT); 35} 36 37static void __init create_trampoline(unsigned long addr) 38{ 39 unsigned int *p = (unsigned int *)addr; 40 41 /* The maximum range of a single instruction branch, is the current 42 * instruction's address + (32 MB - 4) bytes. For the trampoline we 43 * need to branch to current address + 32 MB. So we insert a nop at 44 * the trampoline address, then the next instruction (+ 4 bytes) 45 * does a branch to (32 MB - 4). The net effect is that when we 46 * branch to "addr" we jump to ("addr" + 32 MB). Although it requires 47 * two instructions it doesn't require any registers. 48 */ 49 patch_instruction(p, PPC_INST_NOP); 50 patch_branch(++p, addr + PHYSICAL_START, 0); 51} 52 53void __init setup_kdump_trampoline(void) 54{ 55 unsigned long i; 56 57 DBG(" -> setup_kdump_trampoline()\n"); 58 59 for (i = KDUMP_TRAMPOLINE_START; i < KDUMP_TRAMPOLINE_END; i += 8) { 60 create_trampoline(i); 61 } 62 63#ifdef CONFIG_PPC_PSERIES 64 create_trampoline(__pa(system_reset_fwnmi) - PHYSICAL_START); 65 create_trampoline(__pa(machine_check_fwnmi) - PHYSICAL_START); 66#endif /* CONFIG_PPC_PSERIES */ 67 68 DBG(" <- setup_kdump_trampoline()\n"); 69} 70#endif /* CONFIG_NONSTATIC_KERNEL */ 71 72static size_t copy_oldmem_vaddr(void *vaddr, char *buf, size_t csize, 73 unsigned long offset, int userbuf) 74{ 75 if (userbuf) { 76 if (copy_to_user((char __user *)buf, (vaddr + offset), csize)) 77 return -EFAULT; 78 } else 79 memcpy(buf, (vaddr + offset), csize); 80 81 return csize; 82} 83 84/** 85 * copy_oldmem_page - copy one page from "oldmem" 86 * @pfn: page frame number to be copied 87 * @buf: target memory address for the copy; this can be in kernel address 88 * space or user address space (see @userbuf) 89 * @csize: number of bytes to copy 90 * @offset: offset in bytes into the page (based on pfn) to begin the copy 91 * @userbuf: if set, @buf is in user address space, use copy_to_user(), 92 * otherwise @buf is in kernel address space, use memcpy(). 93 * 94 * Copy a page from "oldmem". For this page, there is no pte mapped 95 * in the current kernel. We stitch up a pte, similar to kmap_atomic. 96 */ 97ssize_t copy_oldmem_page(unsigned long pfn, char *buf, 98 size_t csize, unsigned long offset, int userbuf) 99{ 100 void *vaddr; 101 phys_addr_t paddr; 102 103 if (!csize) 104 return 0; 105 106 csize = min_t(size_t, csize, PAGE_SIZE); 107 paddr = pfn << PAGE_SHIFT; 108 109 if (memblock_is_region_memory(paddr, csize)) { 110 vaddr = __va(paddr); 111 csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); 112 } else { 113 vaddr = __ioremap(paddr, PAGE_SIZE, 0); 114 csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); 115 iounmap(vaddr); 116 } 117 118 return csize; 119} 120 121#ifdef CONFIG_PPC_RTAS 122/* 123 * The crashkernel region will almost always overlap the RTAS region, so 124 * we have to be careful when shrinking the crashkernel region. 125 */ 126void crash_free_reserved_phys_range(unsigned long begin, unsigned long end) 127{ 128 unsigned long addr; 129 const __be32 *basep, *sizep; 130 unsigned int rtas_start = 0, rtas_end = 0; 131 132 basep = of_get_property(rtas.dev, "linux,rtas-base", NULL); 133 sizep = of_get_property(rtas.dev, "rtas-size", NULL); 134 135 if (basep && sizep) { 136 rtas_start = be32_to_cpup(basep); 137 rtas_end = rtas_start + be32_to_cpup(sizep); 138 } 139 140 for (addr = begin; addr < end; addr += PAGE_SIZE) { 141 /* Does this page overlap with the RTAS region? */ 142 if (addr <= rtas_end && ((addr + PAGE_SIZE) > rtas_start)) 143 continue; 144 145 free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT)); 146 } 147} 148#endif 149