root/kernel/kexec_core.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. kexec_should_crash
  2. kexec_crash_loaded
  3. sanity_check_segment_list
  4. do_kimage_alloc_init
  5. kimage_is_destination_range
  6. kimage_alloc_pages
  7. kimage_free_pages
  8. kimage_free_page_list
  9. kimage_alloc_normal_control_pages
  10. kimage_alloc_crash_control_pages
  11. kimage_alloc_control_pages
  12. kimage_crash_copy_vmcoreinfo
  13. kimage_add_entry
  14. kimage_set_destination
  15. kimage_add_page
  16. kimage_free_extra_pages
  17. kimage_terminate
  18. kimage_free_entry
  19. kimage_free
  20. kimage_dst_used
  21. kimage_alloc_page
  22. kimage_load_normal_segment
  23. kimage_load_crash_segment
  24. kimage_load_segment
  25. __crash_kexec
  26. crash_kexec
  27. crash_get_memory_size
  28. crash_free_reserved_phys_range
  29. crash_shrink_memory
  30. crash_save_cpu
  31. crash_notes_memory_init
  32. kernel_kexec
  33. arch_kexec_protect_crashkres
  34. arch_kexec_unprotect_crashkres

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * kexec.c - kexec system call core code.
   4  * Copyright (C) 2002-2004 Eric Biederman  <ebiederm@xmission.com>
   5  */
   6 
   7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   8 
   9 #include <linux/capability.h>
  10 #include <linux/mm.h>
  11 #include <linux/file.h>
  12 #include <linux/slab.h>
  13 #include <linux/fs.h>
  14 #include <linux/kexec.h>
  15 #include <linux/mutex.h>
  16 #include <linux/list.h>
  17 #include <linux/highmem.h>
  18 #include <linux/syscalls.h>
  19 #include <linux/reboot.h>
  20 #include <linux/ioport.h>
  21 #include <linux/hardirq.h>
  22 #include <linux/elf.h>
  23 #include <linux/elfcore.h>
  24 #include <linux/utsname.h>
  25 #include <linux/numa.h>
  26 #include <linux/suspend.h>
  27 #include <linux/device.h>
  28 #include <linux/freezer.h>
  29 #include <linux/pm.h>
  30 #include <linux/cpu.h>
  31 #include <linux/uaccess.h>
  32 #include <linux/io.h>
  33 #include <linux/console.h>
  34 #include <linux/vmalloc.h>
  35 #include <linux/swap.h>
  36 #include <linux/syscore_ops.h>
  37 #include <linux/compiler.h>
  38 #include <linux/hugetlb.h>
  39 #include <linux/frame.h>
  40 
  41 #include <asm/page.h>
  42 #include <asm/sections.h>
  43 
  44 #include <crypto/hash.h>
  45 #include <crypto/sha.h>
  46 #include "kexec_internal.h"
  47 
  48 DEFINE_MUTEX(kexec_mutex);
  49 
  50 /* Per cpu memory for storing cpu states in case of system crash. */
  51 note_buf_t __percpu *crash_notes;
  52 
  53 /* Flag to indicate we are going to kexec a new kernel */
  54 bool kexec_in_progress = false;
  55 
  56 
  57 /* Location of the reserved area for the crash kernel */
  58 struct resource crashk_res = {
  59         .name  = "Crash kernel",
  60         .start = 0,
  61         .end   = 0,
  62         .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
  63         .desc  = IORES_DESC_CRASH_KERNEL
  64 };
  65 struct resource crashk_low_res = {
  66         .name  = "Crash kernel",
  67         .start = 0,
  68         .end   = 0,
  69         .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
  70         .desc  = IORES_DESC_CRASH_KERNEL
  71 };
  72 
  73 int kexec_should_crash(struct task_struct *p)
  74 {
  75         /*
  76          * If crash_kexec_post_notifiers is enabled, don't run
  77          * crash_kexec() here yet, which must be run after panic
  78          * notifiers in panic().
  79          */
  80         if (crash_kexec_post_notifiers)
  81                 return 0;
  82         /*
  83          * There are 4 panic() calls in do_exit() path, each of which
  84          * corresponds to each of these 4 conditions.
  85          */
  86         if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
  87                 return 1;
  88         return 0;
  89 }
  90 
  91 int kexec_crash_loaded(void)
  92 {
  93         return !!kexec_crash_image;
  94 }
  95 EXPORT_SYMBOL_GPL(kexec_crash_loaded);
  96 
  97 /*
  98  * When kexec transitions to the new kernel there is a one-to-one
  99  * mapping between physical and virtual addresses.  On processors
 100  * where you can disable the MMU this is trivial, and easy.  For
 101  * others it is still a simple predictable page table to setup.
 102  *
 103  * In that environment kexec copies the new kernel to its final
 104  * resting place.  This means I can only support memory whose
 105  * physical address can fit in an unsigned long.  In particular
 106  * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
 107  * If the assembly stub has more restrictive requirements
 108  * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
 109  * defined more restrictively in <asm/kexec.h>.
 110  *
 111  * The code for the transition from the current kernel to the
 112  * the new kernel is placed in the control_code_buffer, whose size
 113  * is given by KEXEC_CONTROL_PAGE_SIZE.  In the best case only a single
 114  * page of memory is necessary, but some architectures require more.
 115  * Because this memory must be identity mapped in the transition from
 116  * virtual to physical addresses it must live in the range
 117  * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
 118  * modifiable.
 119  *
 120  * The assembly stub in the control code buffer is passed a linked list
 121  * of descriptor pages detailing the source pages of the new kernel,
 122  * and the destination addresses of those source pages.  As this data
 123  * structure is not used in the context of the current OS, it must
 124  * be self-contained.
 125  *
 126  * The code has been made to work with highmem pages and will use a
 127  * destination page in its final resting place (if it happens
 128  * to allocate it).  The end product of this is that most of the
 129  * physical address space, and most of RAM can be used.
 130  *
 131  * Future directions include:
 132  *  - allocating a page table with the control code buffer identity
 133  *    mapped, to simplify machine_kexec and make kexec_on_panic more
 134  *    reliable.
 135  */
 136 
 137 /*
 138  * KIMAGE_NO_DEST is an impossible destination address..., for
 139  * allocating pages whose destination address we do not care about.
 140  */
 141 #define KIMAGE_NO_DEST (-1UL)
 142 #define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
 143 
 144 static struct page *kimage_alloc_page(struct kimage *image,
 145                                        gfp_t gfp_mask,
 146                                        unsigned long dest);
 147 
 148 int sanity_check_segment_list(struct kimage *image)
 149 {
 150         int i;
 151         unsigned long nr_segments = image->nr_segments;
 152         unsigned long total_pages = 0;
 153         unsigned long nr_pages = totalram_pages();
 154 
 155         /*
 156          * Verify we have good destination addresses.  The caller is
 157          * responsible for making certain we don't attempt to load
 158          * the new image into invalid or reserved areas of RAM.  This
 159          * just verifies it is an address we can use.
 160          *
 161          * Since the kernel does everything in page size chunks ensure
 162          * the destination addresses are page aligned.  Too many
 163          * special cases crop of when we don't do this.  The most
 164          * insidious is getting overlapping destination addresses
 165          * simply because addresses are changed to page size
 166          * granularity.
 167          */
 168         for (i = 0; i < nr_segments; i++) {
 169                 unsigned long mstart, mend;
 170 
 171                 mstart = image->segment[i].mem;
 172                 mend   = mstart + image->segment[i].memsz;
 173                 if (mstart > mend)
 174                         return -EADDRNOTAVAIL;
 175                 if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
 176                         return -EADDRNOTAVAIL;
 177                 if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
 178                         return -EADDRNOTAVAIL;
 179         }
 180 
 181         /* Verify our destination addresses do not overlap.
 182          * If we alloed overlapping destination addresses
 183          * through very weird things can happen with no
 184          * easy explanation as one segment stops on another.
 185          */
 186         for (i = 0; i < nr_segments; i++) {
 187                 unsigned long mstart, mend;
 188                 unsigned long j;
 189 
 190                 mstart = image->segment[i].mem;
 191                 mend   = mstart + image->segment[i].memsz;
 192                 for (j = 0; j < i; j++) {
 193                         unsigned long pstart, pend;
 194 
 195                         pstart = image->segment[j].mem;
 196                         pend   = pstart + image->segment[j].memsz;
 197                         /* Do the segments overlap ? */
 198                         if ((mend > pstart) && (mstart < pend))
 199                                 return -EINVAL;
 200                 }
 201         }
 202 
 203         /* Ensure our buffer sizes are strictly less than
 204          * our memory sizes.  This should always be the case,
 205          * and it is easier to check up front than to be surprised
 206          * later on.
 207          */
 208         for (i = 0; i < nr_segments; i++) {
 209                 if (image->segment[i].bufsz > image->segment[i].memsz)
 210                         return -EINVAL;
 211         }
 212 
 213         /*
 214          * Verify that no more than half of memory will be consumed. If the
 215          * request from userspace is too large, a large amount of time will be
 216          * wasted allocating pages, which can cause a soft lockup.
 217          */
 218         for (i = 0; i < nr_segments; i++) {
 219                 if (PAGE_COUNT(image->segment[i].memsz) > nr_pages / 2)
 220                         return -EINVAL;
 221 
 222                 total_pages += PAGE_COUNT(image->segment[i].memsz);
 223         }
 224 
 225         if (total_pages > nr_pages / 2)
 226                 return -EINVAL;
 227 
 228         /*
 229          * Verify we have good destination addresses.  Normally
 230          * the caller is responsible for making certain we don't
 231          * attempt to load the new image into invalid or reserved
 232          * areas of RAM.  But crash kernels are preloaded into a
 233          * reserved area of ram.  We must ensure the addresses
 234          * are in the reserved area otherwise preloading the
 235          * kernel could corrupt things.
 236          */
 237 
 238         if (image->type == KEXEC_TYPE_CRASH) {
 239                 for (i = 0; i < nr_segments; i++) {
 240                         unsigned long mstart, mend;
 241 
 242                         mstart = image->segment[i].mem;
 243                         mend = mstart + image->segment[i].memsz - 1;
 244                         /* Ensure we are within the crash kernel limits */
 245                         if ((mstart < phys_to_boot_phys(crashk_res.start)) ||
 246                             (mend > phys_to_boot_phys(crashk_res.end)))
 247                                 return -EADDRNOTAVAIL;
 248                 }
 249         }
 250 
 251         return 0;
 252 }
 253 
 254 struct kimage *do_kimage_alloc_init(void)
 255 {
 256         struct kimage *image;
 257 
 258         /* Allocate a controlling structure */
 259         image = kzalloc(sizeof(*image), GFP_KERNEL);
 260         if (!image)
 261                 return NULL;
 262 
 263         image->head = 0;
 264         image->entry = &image->head;
 265         image->last_entry = &image->head;
 266         image->control_page = ~0; /* By default this does not apply */
 267         image->type = KEXEC_TYPE_DEFAULT;
 268 
 269         /* Initialize the list of control pages */
 270         INIT_LIST_HEAD(&image->control_pages);
 271 
 272         /* Initialize the list of destination pages */
 273         INIT_LIST_HEAD(&image->dest_pages);
 274 
 275         /* Initialize the list of unusable pages */
 276         INIT_LIST_HEAD(&image->unusable_pages);
 277 
 278         return image;
 279 }
 280 
 281 int kimage_is_destination_range(struct kimage *image,
 282                                         unsigned long start,
 283                                         unsigned long end)
 284 {
 285         unsigned long i;
 286 
 287         for (i = 0; i < image->nr_segments; i++) {
 288                 unsigned long mstart, mend;
 289 
 290                 mstart = image->segment[i].mem;
 291                 mend = mstart + image->segment[i].memsz;
 292                 if ((end > mstart) && (start < mend))
 293                         return 1;
 294         }
 295 
 296         return 0;
 297 }
 298 
 299 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
 300 {
 301         struct page *pages;
 302 
 303         if (fatal_signal_pending(current))
 304                 return NULL;
 305         pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order);
 306         if (pages) {
 307                 unsigned int count, i;
 308 
 309                 pages->mapping = NULL;
 310                 set_page_private(pages, order);
 311                 count = 1 << order;
 312                 for (i = 0; i < count; i++)
 313                         SetPageReserved(pages + i);
 314 
 315                 arch_kexec_post_alloc_pages(page_address(pages), count,
 316                                             gfp_mask);
 317 
 318                 if (gfp_mask & __GFP_ZERO)
 319                         for (i = 0; i < count; i++)
 320                                 clear_highpage(pages + i);
 321         }
 322 
 323         return pages;
 324 }
 325 
 326 static void kimage_free_pages(struct page *page)
 327 {
 328         unsigned int order, count, i;
 329 
 330         order = page_private(page);
 331         count = 1 << order;
 332 
 333         arch_kexec_pre_free_pages(page_address(page), count);
 334 
 335         for (i = 0; i < count; i++)
 336                 ClearPageReserved(page + i);
 337         __free_pages(page, order);
 338 }
 339 
 340 void kimage_free_page_list(struct list_head *list)
 341 {
 342         struct page *page, *next;
 343 
 344         list_for_each_entry_safe(page, next, list, lru) {
 345                 list_del(&page->lru);
 346                 kimage_free_pages(page);
 347         }
 348 }
 349 
 350 static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
 351                                                         unsigned int order)
 352 {
 353         /* Control pages are special, they are the intermediaries
 354          * that are needed while we copy the rest of the pages
 355          * to their final resting place.  As such they must
 356          * not conflict with either the destination addresses
 357          * or memory the kernel is already using.
 358          *
 359          * The only case where we really need more than one of
 360          * these are for architectures where we cannot disable
 361          * the MMU and must instead generate an identity mapped
 362          * page table for all of the memory.
 363          *
 364          * At worst this runs in O(N) of the image size.
 365          */
 366         struct list_head extra_pages;
 367         struct page *pages;
 368         unsigned int count;
 369 
 370         count = 1 << order;
 371         INIT_LIST_HEAD(&extra_pages);
 372 
 373         /* Loop while I can allocate a page and the page allocated
 374          * is a destination page.
 375          */
 376         do {
 377                 unsigned long pfn, epfn, addr, eaddr;
 378 
 379                 pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order);
 380                 if (!pages)
 381                         break;
 382                 pfn   = page_to_boot_pfn(pages);
 383                 epfn  = pfn + count;
 384                 addr  = pfn << PAGE_SHIFT;
 385                 eaddr = epfn << PAGE_SHIFT;
 386                 if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
 387                               kimage_is_destination_range(image, addr, eaddr)) {
 388                         list_add(&pages->lru, &extra_pages);
 389                         pages = NULL;
 390                 }
 391         } while (!pages);
 392 
 393         if (pages) {
 394                 /* Remember the allocated page... */
 395                 list_add(&pages->lru, &image->control_pages);
 396 
 397                 /* Because the page is already in it's destination
 398                  * location we will never allocate another page at
 399                  * that address.  Therefore kimage_alloc_pages
 400                  * will not return it (again) and we don't need
 401                  * to give it an entry in image->segment[].
 402                  */
 403         }
 404         /* Deal with the destination pages I have inadvertently allocated.
 405          *
 406          * Ideally I would convert multi-page allocations into single
 407          * page allocations, and add everything to image->dest_pages.
 408          *
 409          * For now it is simpler to just free the pages.
 410          */
 411         kimage_free_page_list(&extra_pages);
 412 
 413         return pages;
 414 }
 415 
 416 static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
 417                                                       unsigned int order)
 418 {
 419         /* Control pages are special, they are the intermediaries
 420          * that are needed while we copy the rest of the pages
 421          * to their final resting place.  As such they must
 422          * not conflict with either the destination addresses
 423          * or memory the kernel is already using.
 424          *
 425          * Control pages are also the only pags we must allocate
 426          * when loading a crash kernel.  All of the other pages
 427          * are specified by the segments and we just memcpy
 428          * into them directly.
 429          *
 430          * The only case where we really need more than one of
 431          * these are for architectures where we cannot disable
 432          * the MMU and must instead generate an identity mapped
 433          * page table for all of the memory.
 434          *
 435          * Given the low demand this implements a very simple
 436          * allocator that finds the first hole of the appropriate
 437          * size in the reserved memory region, and allocates all
 438          * of the memory up to and including the hole.
 439          */
 440         unsigned long hole_start, hole_end, size;
 441         struct page *pages;
 442 
 443         pages = NULL;
 444         size = (1 << order) << PAGE_SHIFT;
 445         hole_start = (image->control_page + (size - 1)) & ~(size - 1);
 446         hole_end   = hole_start + size - 1;
 447         while (hole_end <= crashk_res.end) {
 448                 unsigned long i;
 449 
 450                 cond_resched();
 451 
 452                 if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
 453                         break;
 454                 /* See if I overlap any of the segments */
 455                 for (i = 0; i < image->nr_segments; i++) {
 456                         unsigned long mstart, mend;
 457 
 458                         mstart = image->segment[i].mem;
 459                         mend   = mstart + image->segment[i].memsz - 1;
 460                         if ((hole_end >= mstart) && (hole_start <= mend)) {
 461                                 /* Advance the hole to the end of the segment */
 462                                 hole_start = (mend + (size - 1)) & ~(size - 1);
 463                                 hole_end   = hole_start + size - 1;
 464                                 break;
 465                         }
 466                 }
 467                 /* If I don't overlap any segments I have found my hole! */
 468                 if (i == image->nr_segments) {
 469                         pages = pfn_to_page(hole_start >> PAGE_SHIFT);
 470                         image->control_page = hole_end;
 471                         break;
 472                 }
 473         }
 474 
 475         /* Ensure that these pages are decrypted if SME is enabled. */
 476         if (pages)
 477                 arch_kexec_post_alloc_pages(page_address(pages), 1 << order, 0);
 478 
 479         return pages;
 480 }
 481 
 482 
 483 struct page *kimage_alloc_control_pages(struct kimage *image,
 484                                          unsigned int order)
 485 {
 486         struct page *pages = NULL;
 487 
 488         switch (image->type) {
 489         case KEXEC_TYPE_DEFAULT:
 490                 pages = kimage_alloc_normal_control_pages(image, order);
 491                 break;
 492         case KEXEC_TYPE_CRASH:
 493                 pages = kimage_alloc_crash_control_pages(image, order);
 494                 break;
 495         }
 496 
 497         return pages;
 498 }
 499 
 500 int kimage_crash_copy_vmcoreinfo(struct kimage *image)
 501 {
 502         struct page *vmcoreinfo_page;
 503         void *safecopy;
 504 
 505         if (image->type != KEXEC_TYPE_CRASH)
 506                 return 0;
 507 
 508         /*
 509          * For kdump, allocate one vmcoreinfo safe copy from the
 510          * crash memory. as we have arch_kexec_protect_crashkres()
 511          * after kexec syscall, we naturally protect it from write
 512          * (even read) access under kernel direct mapping. But on
 513          * the other hand, we still need to operate it when crash
 514          * happens to generate vmcoreinfo note, hereby we rely on
 515          * vmap for this purpose.
 516          */
 517         vmcoreinfo_page = kimage_alloc_control_pages(image, 0);
 518         if (!vmcoreinfo_page) {
 519                 pr_warn("Could not allocate vmcoreinfo buffer\n");
 520                 return -ENOMEM;
 521         }
 522         safecopy = vmap(&vmcoreinfo_page, 1, VM_MAP, PAGE_KERNEL);
 523         if (!safecopy) {
 524                 pr_warn("Could not vmap vmcoreinfo buffer\n");
 525                 return -ENOMEM;
 526         }
 527 
 528         image->vmcoreinfo_data_copy = safecopy;
 529         crash_update_vmcoreinfo_safecopy(safecopy);
 530 
 531         return 0;
 532 }
 533 
 534 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
 535 {
 536         if (*image->entry != 0)
 537                 image->entry++;
 538 
 539         if (image->entry == image->last_entry) {
 540                 kimage_entry_t *ind_page;
 541                 struct page *page;
 542 
 543                 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
 544                 if (!page)
 545                         return -ENOMEM;
 546 
 547                 ind_page = page_address(page);
 548                 *image->entry = virt_to_boot_phys(ind_page) | IND_INDIRECTION;
 549                 image->entry = ind_page;
 550                 image->last_entry = ind_page +
 551                                       ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
 552         }
 553         *image->entry = entry;
 554         image->entry++;
 555         *image->entry = 0;
 556 
 557         return 0;
 558 }
 559 
 560 static int kimage_set_destination(struct kimage *image,
 561                                    unsigned long destination)
 562 {
 563         int result;
 564 
 565         destination &= PAGE_MASK;
 566         result = kimage_add_entry(image, destination | IND_DESTINATION);
 567 
 568         return result;
 569 }
 570 
 571 
 572 static int kimage_add_page(struct kimage *image, unsigned long page)
 573 {
 574         int result;
 575 
 576         page &= PAGE_MASK;
 577         result = kimage_add_entry(image, page | IND_SOURCE);
 578 
 579         return result;
 580 }
 581 
 582 
 583 static void kimage_free_extra_pages(struct kimage *image)
 584 {
 585         /* Walk through and free any extra destination pages I may have */
 586         kimage_free_page_list(&image->dest_pages);
 587 
 588         /* Walk through and free any unusable pages I have cached */
 589         kimage_free_page_list(&image->unusable_pages);
 590 
 591 }
 592 void kimage_terminate(struct kimage *image)
 593 {
 594         if (*image->entry != 0)
 595                 image->entry++;
 596 
 597         *image->entry = IND_DONE;
 598 }
 599 
 600 #define for_each_kimage_entry(image, ptr, entry) \
 601         for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
 602                 ptr = (entry & IND_INDIRECTION) ? \
 603                         boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
 604 
 605 static void kimage_free_entry(kimage_entry_t entry)
 606 {
 607         struct page *page;
 608 
 609         page = boot_pfn_to_page(entry >> PAGE_SHIFT);
 610         kimage_free_pages(page);
 611 }
 612 
 613 void kimage_free(struct kimage *image)
 614 {
 615         kimage_entry_t *ptr, entry;
 616         kimage_entry_t ind = 0;
 617 
 618         if (!image)
 619                 return;
 620 
 621         if (image->vmcoreinfo_data_copy) {
 622                 crash_update_vmcoreinfo_safecopy(NULL);
 623                 vunmap(image->vmcoreinfo_data_copy);
 624         }
 625 
 626         kimage_free_extra_pages(image);
 627         for_each_kimage_entry(image, ptr, entry) {
 628                 if (entry & IND_INDIRECTION) {
 629                         /* Free the previous indirection page */
 630                         if (ind & IND_INDIRECTION)
 631                                 kimage_free_entry(ind);
 632                         /* Save this indirection page until we are
 633                          * done with it.
 634                          */
 635                         ind = entry;
 636                 } else if (entry & IND_SOURCE)
 637                         kimage_free_entry(entry);
 638         }
 639         /* Free the final indirection page */
 640         if (ind & IND_INDIRECTION)
 641                 kimage_free_entry(ind);
 642 
 643         /* Handle any machine specific cleanup */
 644         machine_kexec_cleanup(image);
 645 
 646         /* Free the kexec control pages... */
 647         kimage_free_page_list(&image->control_pages);
 648 
 649         /*
 650          * Free up any temporary buffers allocated. This might hit if
 651          * error occurred much later after buffer allocation.
 652          */
 653         if (image->file_mode)
 654                 kimage_file_post_load_cleanup(image);
 655 
 656         kfree(image);
 657 }
 658 
 659 static kimage_entry_t *kimage_dst_used(struct kimage *image,
 660                                         unsigned long page)
 661 {
 662         kimage_entry_t *ptr, entry;
 663         unsigned long destination = 0;
 664 
 665         for_each_kimage_entry(image, ptr, entry) {
 666                 if (entry & IND_DESTINATION)
 667                         destination = entry & PAGE_MASK;
 668                 else if (entry & IND_SOURCE) {
 669                         if (page == destination)
 670                                 return ptr;
 671                         destination += PAGE_SIZE;
 672                 }
 673         }
 674 
 675         return NULL;
 676 }
 677 
 678 static struct page *kimage_alloc_page(struct kimage *image,
 679                                         gfp_t gfp_mask,
 680                                         unsigned long destination)
 681 {
 682         /*
 683          * Here we implement safeguards to ensure that a source page
 684          * is not copied to its destination page before the data on
 685          * the destination page is no longer useful.
 686          *
 687          * To do this we maintain the invariant that a source page is
 688          * either its own destination page, or it is not a
 689          * destination page at all.
 690          *
 691          * That is slightly stronger than required, but the proof
 692          * that no problems will not occur is trivial, and the
 693          * implementation is simply to verify.
 694          *
 695          * When allocating all pages normally this algorithm will run
 696          * in O(N) time, but in the worst case it will run in O(N^2)
 697          * time.   If the runtime is a problem the data structures can
 698          * be fixed.
 699          */
 700         struct page *page;
 701         unsigned long addr;
 702 
 703         /*
 704          * Walk through the list of destination pages, and see if I
 705          * have a match.
 706          */
 707         list_for_each_entry(page, &image->dest_pages, lru) {
 708                 addr = page_to_boot_pfn(page) << PAGE_SHIFT;
 709                 if (addr == destination) {
 710                         list_del(&page->lru);
 711                         return page;
 712                 }
 713         }
 714         page = NULL;
 715         while (1) {
 716                 kimage_entry_t *old;
 717 
 718                 /* Allocate a page, if we run out of memory give up */
 719                 page = kimage_alloc_pages(gfp_mask, 0);
 720                 if (!page)
 721                         return NULL;
 722                 /* If the page cannot be used file it away */
 723                 if (page_to_boot_pfn(page) >
 724                                 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
 725                         list_add(&page->lru, &image->unusable_pages);
 726                         continue;
 727                 }
 728                 addr = page_to_boot_pfn(page) << PAGE_SHIFT;
 729 
 730                 /* If it is the destination page we want use it */
 731                 if (addr == destination)
 732                         break;
 733 
 734                 /* If the page is not a destination page use it */
 735                 if (!kimage_is_destination_range(image, addr,
 736                                                   addr + PAGE_SIZE))
 737                         break;
 738 
 739                 /*
 740                  * I know that the page is someones destination page.
 741                  * See if there is already a source page for this
 742                  * destination page.  And if so swap the source pages.
 743                  */
 744                 old = kimage_dst_used(image, addr);
 745                 if (old) {
 746                         /* If so move it */
 747                         unsigned long old_addr;
 748                         struct page *old_page;
 749 
 750                         old_addr = *old & PAGE_MASK;
 751                         old_page = boot_pfn_to_page(old_addr >> PAGE_SHIFT);
 752                         copy_highpage(page, old_page);
 753                         *old = addr | (*old & ~PAGE_MASK);
 754 
 755                         /* The old page I have found cannot be a
 756                          * destination page, so return it if it's
 757                          * gfp_flags honor the ones passed in.
 758                          */
 759                         if (!(gfp_mask & __GFP_HIGHMEM) &&
 760                             PageHighMem(old_page)) {
 761                                 kimage_free_pages(old_page);
 762                                 continue;
 763                         }
 764                         addr = old_addr;
 765                         page = old_page;
 766                         break;
 767                 }
 768                 /* Place the page on the destination list, to be used later */
 769                 list_add(&page->lru, &image->dest_pages);
 770         }
 771 
 772         return page;
 773 }
 774 
 775 static int kimage_load_normal_segment(struct kimage *image,
 776                                          struct kexec_segment *segment)
 777 {
 778         unsigned long maddr;
 779         size_t ubytes, mbytes;
 780         int result;
 781         unsigned char __user *buf = NULL;
 782         unsigned char *kbuf = NULL;
 783 
 784         result = 0;
 785         if (image->file_mode)
 786                 kbuf = segment->kbuf;
 787         else
 788                 buf = segment->buf;
 789         ubytes = segment->bufsz;
 790         mbytes = segment->memsz;
 791         maddr = segment->mem;
 792 
 793         result = kimage_set_destination(image, maddr);
 794         if (result < 0)
 795                 goto out;
 796 
 797         while (mbytes) {
 798                 struct page *page;
 799                 char *ptr;
 800                 size_t uchunk, mchunk;
 801 
 802                 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
 803                 if (!page) {
 804                         result  = -ENOMEM;
 805                         goto out;
 806                 }
 807                 result = kimage_add_page(image, page_to_boot_pfn(page)
 808                                                                 << PAGE_SHIFT);
 809                 if (result < 0)
 810                         goto out;
 811 
 812                 ptr = kmap(page);
 813                 /* Start with a clear page */
 814                 clear_page(ptr);
 815                 ptr += maddr & ~PAGE_MASK;
 816                 mchunk = min_t(size_t, mbytes,
 817                                 PAGE_SIZE - (maddr & ~PAGE_MASK));
 818                 uchunk = min(ubytes, mchunk);
 819 
 820                 /* For file based kexec, source pages are in kernel memory */
 821                 if (image->file_mode)
 822                         memcpy(ptr, kbuf, uchunk);
 823                 else
 824                         result = copy_from_user(ptr, buf, uchunk);
 825                 kunmap(page);
 826                 if (result) {
 827                         result = -EFAULT;
 828                         goto out;
 829                 }
 830                 ubytes -= uchunk;
 831                 maddr  += mchunk;
 832                 if (image->file_mode)
 833                         kbuf += mchunk;
 834                 else
 835                         buf += mchunk;
 836                 mbytes -= mchunk;
 837 
 838                 cond_resched();
 839         }
 840 out:
 841         return result;
 842 }
 843 
 844 static int kimage_load_crash_segment(struct kimage *image,
 845                                         struct kexec_segment *segment)
 846 {
 847         /* For crash dumps kernels we simply copy the data from
 848          * user space to it's destination.
 849          * We do things a page at a time for the sake of kmap.
 850          */
 851         unsigned long maddr;
 852         size_t ubytes, mbytes;
 853         int result;
 854         unsigned char __user *buf = NULL;
 855         unsigned char *kbuf = NULL;
 856 
 857         result = 0;
 858         if (image->file_mode)
 859                 kbuf = segment->kbuf;
 860         else
 861                 buf = segment->buf;
 862         ubytes = segment->bufsz;
 863         mbytes = segment->memsz;
 864         maddr = segment->mem;
 865         while (mbytes) {
 866                 struct page *page;
 867                 char *ptr;
 868                 size_t uchunk, mchunk;
 869 
 870                 page = boot_pfn_to_page(maddr >> PAGE_SHIFT);
 871                 if (!page) {
 872                         result  = -ENOMEM;
 873                         goto out;
 874                 }
 875                 arch_kexec_post_alloc_pages(page_address(page), 1, 0);
 876                 ptr = kmap(page);
 877                 ptr += maddr & ~PAGE_MASK;
 878                 mchunk = min_t(size_t, mbytes,
 879                                 PAGE_SIZE - (maddr & ~PAGE_MASK));
 880                 uchunk = min(ubytes, mchunk);
 881                 if (mchunk > uchunk) {
 882                         /* Zero the trailing part of the page */
 883                         memset(ptr + uchunk, 0, mchunk - uchunk);
 884                 }
 885 
 886                 /* For file based kexec, source pages are in kernel memory */
 887                 if (image->file_mode)
 888                         memcpy(ptr, kbuf, uchunk);
 889                 else
 890                         result = copy_from_user(ptr, buf, uchunk);
 891                 kexec_flush_icache_page(page);
 892                 kunmap(page);
 893                 arch_kexec_pre_free_pages(page_address(page), 1);
 894                 if (result) {
 895                         result = -EFAULT;
 896                         goto out;
 897                 }
 898                 ubytes -= uchunk;
 899                 maddr  += mchunk;
 900                 if (image->file_mode)
 901                         kbuf += mchunk;
 902                 else
 903                         buf += mchunk;
 904                 mbytes -= mchunk;
 905 
 906                 cond_resched();
 907         }
 908 out:
 909         return result;
 910 }
 911 
 912 int kimage_load_segment(struct kimage *image,
 913                                 struct kexec_segment *segment)
 914 {
 915         int result = -ENOMEM;
 916 
 917         switch (image->type) {
 918         case KEXEC_TYPE_DEFAULT:
 919                 result = kimage_load_normal_segment(image, segment);
 920                 break;
 921         case KEXEC_TYPE_CRASH:
 922                 result = kimage_load_crash_segment(image, segment);
 923                 break;
 924         }
 925 
 926         return result;
 927 }
 928 
 929 struct kimage *kexec_image;
 930 struct kimage *kexec_crash_image;
 931 int kexec_load_disabled;
 932 
 933 /*
 934  * No panic_cpu check version of crash_kexec().  This function is called
 935  * only when panic_cpu holds the current CPU number; this is the only CPU
 936  * which processes crash_kexec routines.
 937  */
 938 void __noclone __crash_kexec(struct pt_regs *regs)
 939 {
 940         /* Take the kexec_mutex here to prevent sys_kexec_load
 941          * running on one cpu from replacing the crash kernel
 942          * we are using after a panic on a different cpu.
 943          *
 944          * If the crash kernel was not located in a fixed area
 945          * of memory the xchg(&kexec_crash_image) would be
 946          * sufficient.  But since I reuse the memory...
 947          */
 948         if (mutex_trylock(&kexec_mutex)) {
 949                 if (kexec_crash_image) {
 950                         struct pt_regs fixed_regs;
 951 
 952                         crash_setup_regs(&fixed_regs, regs);
 953                         crash_save_vmcoreinfo();
 954                         machine_crash_shutdown(&fixed_regs);
 955                         machine_kexec(kexec_crash_image);
 956                 }
 957                 mutex_unlock(&kexec_mutex);
 958         }
 959 }
 960 STACK_FRAME_NON_STANDARD(__crash_kexec);
 961 
 962 void crash_kexec(struct pt_regs *regs)
 963 {
 964         int old_cpu, this_cpu;
 965 
 966         /*
 967          * Only one CPU is allowed to execute the crash_kexec() code as with
 968          * panic().  Otherwise parallel calls of panic() and crash_kexec()
 969          * may stop each other.  To exclude them, we use panic_cpu here too.
 970          */
 971         this_cpu = raw_smp_processor_id();
 972         old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);
 973         if (old_cpu == PANIC_CPU_INVALID) {
 974                 /* This is the 1st CPU which comes here, so go ahead. */
 975                 printk_safe_flush_on_panic();
 976                 __crash_kexec(regs);
 977 
 978                 /*
 979                  * Reset panic_cpu to allow another panic()/crash_kexec()
 980                  * call.
 981                  */
 982                 atomic_set(&panic_cpu, PANIC_CPU_INVALID);
 983         }
 984 }
 985 
 986 size_t crash_get_memory_size(void)
 987 {
 988         size_t size = 0;
 989 
 990         mutex_lock(&kexec_mutex);
 991         if (crashk_res.end != crashk_res.start)
 992                 size = resource_size(&crashk_res);
 993         mutex_unlock(&kexec_mutex);
 994         return size;
 995 }
 996 
 997 void __weak crash_free_reserved_phys_range(unsigned long begin,
 998                                            unsigned long end)
 999 {
1000         unsigned long addr;
1001 
1002         for (addr = begin; addr < end; addr += PAGE_SIZE)
1003                 free_reserved_page(boot_pfn_to_page(addr >> PAGE_SHIFT));
1004 }
1005 
1006 int crash_shrink_memory(unsigned long new_size)
1007 {
1008         int ret = 0;
1009         unsigned long start, end;
1010         unsigned long old_size;
1011         struct resource *ram_res;
1012 
1013         mutex_lock(&kexec_mutex);
1014 
1015         if (kexec_crash_image) {
1016                 ret = -ENOENT;
1017                 goto unlock;
1018         }
1019         start = crashk_res.start;
1020         end = crashk_res.end;
1021         old_size = (end == 0) ? 0 : end - start + 1;
1022         if (new_size >= old_size) {
1023                 ret = (new_size == old_size) ? 0 : -EINVAL;
1024                 goto unlock;
1025         }
1026 
1027         ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL);
1028         if (!ram_res) {
1029                 ret = -ENOMEM;
1030                 goto unlock;
1031         }
1032 
1033         start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
1034         end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
1035 
1036         crash_free_reserved_phys_range(end, crashk_res.end);
1037 
1038         if ((start == end) && (crashk_res.parent != NULL))
1039                 release_resource(&crashk_res);
1040 
1041         ram_res->start = end;
1042         ram_res->end = crashk_res.end;
1043         ram_res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
1044         ram_res->name = "System RAM";
1045 
1046         crashk_res.end = end - 1;
1047 
1048         insert_resource(&iomem_resource, ram_res);
1049 
1050 unlock:
1051         mutex_unlock(&kexec_mutex);
1052         return ret;
1053 }
1054 
1055 void crash_save_cpu(struct pt_regs *regs, int cpu)
1056 {
1057         struct elf_prstatus prstatus;
1058         u32 *buf;
1059 
1060         if ((cpu < 0) || (cpu >= nr_cpu_ids))
1061                 return;
1062 
1063         /* Using ELF notes here is opportunistic.
1064          * I need a well defined structure format
1065          * for the data I pass, and I need tags
1066          * on the data to indicate what information I have
1067          * squirrelled away.  ELF notes happen to provide
1068          * all of that, so there is no need to invent something new.
1069          */
1070         buf = (u32 *)per_cpu_ptr(crash_notes, cpu);
1071         if (!buf)
1072                 return;
1073         memset(&prstatus, 0, sizeof(prstatus));
1074         prstatus.pr_pid = current->pid;
1075         elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
1076         buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1077                               &prstatus, sizeof(prstatus));
1078         final_note(buf);
1079 }
1080 
1081 static int __init crash_notes_memory_init(void)
1082 {
1083         /* Allocate memory for saving cpu registers. */
1084         size_t size, align;
1085 
1086         /*
1087          * crash_notes could be allocated across 2 vmalloc pages when percpu
1088          * is vmalloc based . vmalloc doesn't guarantee 2 continuous vmalloc
1089          * pages are also on 2 continuous physical pages. In this case the
1090          * 2nd part of crash_notes in 2nd page could be lost since only the
1091          * starting address and size of crash_notes are exported through sysfs.
1092          * Here round up the size of crash_notes to the nearest power of two
1093          * and pass it to __alloc_percpu as align value. This can make sure
1094          * crash_notes is allocated inside one physical page.
1095          */
1096         size = sizeof(note_buf_t);
1097         align = min(roundup_pow_of_two(sizeof(note_buf_t)), PAGE_SIZE);
1098 
1099         /*
1100          * Break compile if size is bigger than PAGE_SIZE since crash_notes
1101          * definitely will be in 2 pages with that.
1102          */
1103         BUILD_BUG_ON(size > PAGE_SIZE);
1104 
1105         crash_notes = __alloc_percpu(size, align);
1106         if (!crash_notes) {
1107                 pr_warn("Memory allocation for saving cpu register states failed\n");
1108                 return -ENOMEM;
1109         }
1110         return 0;
1111 }
1112 subsys_initcall(crash_notes_memory_init);
1113 
1114 
1115 /*
1116  * Move into place and start executing a preloaded standalone
1117  * executable.  If nothing was preloaded return an error.
1118  */
1119 int kernel_kexec(void)
1120 {
1121         int error = 0;
1122 
1123         if (!mutex_trylock(&kexec_mutex))
1124                 return -EBUSY;
1125         if (!kexec_image) {
1126                 error = -EINVAL;
1127                 goto Unlock;
1128         }
1129 
1130 #ifdef CONFIG_KEXEC_JUMP
1131         if (kexec_image->preserve_context) {
1132                 lock_system_sleep();
1133                 pm_prepare_console();
1134                 error = freeze_processes();
1135                 if (error) {
1136                         error = -EBUSY;
1137                         goto Restore_console;
1138                 }
1139                 suspend_console();
1140                 error = dpm_suspend_start(PMSG_FREEZE);
1141                 if (error)
1142                         goto Resume_console;
1143                 /* At this point, dpm_suspend_start() has been called,
1144                  * but *not* dpm_suspend_end(). We *must* call
1145                  * dpm_suspend_end() now.  Otherwise, drivers for
1146                  * some devices (e.g. interrupt controllers) become
1147                  * desynchronized with the actual state of the
1148                  * hardware at resume time, and evil weirdness ensues.
1149                  */
1150                 error = dpm_suspend_end(PMSG_FREEZE);
1151                 if (error)
1152                         goto Resume_devices;
1153                 error = suspend_disable_secondary_cpus();
1154                 if (error)
1155                         goto Enable_cpus;
1156                 local_irq_disable();
1157                 error = syscore_suspend();
1158                 if (error)
1159                         goto Enable_irqs;
1160         } else
1161 #endif
1162         {
1163                 kexec_in_progress = true;
1164                 kernel_restart_prepare(NULL);
1165                 migrate_to_reboot_cpu();
1166 
1167                 /*
1168                  * migrate_to_reboot_cpu() disables CPU hotplug assuming that
1169                  * no further code needs to use CPU hotplug (which is true in
1170                  * the reboot case). However, the kexec path depends on using
1171                  * CPU hotplug again; so re-enable it here.
1172                  */
1173                 cpu_hotplug_enable();
1174                 pr_emerg("Starting new kernel\n");
1175                 machine_shutdown();
1176         }
1177 
1178         machine_kexec(kexec_image);
1179 
1180 #ifdef CONFIG_KEXEC_JUMP
1181         if (kexec_image->preserve_context) {
1182                 syscore_resume();
1183  Enable_irqs:
1184                 local_irq_enable();
1185  Enable_cpus:
1186                 suspend_enable_secondary_cpus();
1187                 dpm_resume_start(PMSG_RESTORE);
1188  Resume_devices:
1189                 dpm_resume_end(PMSG_RESTORE);
1190  Resume_console:
1191                 resume_console();
1192                 thaw_processes();
1193  Restore_console:
1194                 pm_restore_console();
1195                 unlock_system_sleep();
1196         }
1197 #endif
1198 
1199  Unlock:
1200         mutex_unlock(&kexec_mutex);
1201         return error;
1202 }
1203 
1204 /*
1205  * Protection mechanism for crashkernel reserved memory after
1206  * the kdump kernel is loaded.
1207  *
1208  * Provide an empty default implementation here -- architecture
1209  * code may override this
1210  */
1211 void __weak arch_kexec_protect_crashkres(void)
1212 {}
1213 
1214 void __weak arch_kexec_unprotect_crashkres(void)
1215 {}

/* [<][>][^][v][top][bottom][index][help] */