This source file includes following definitions.
- kexec_should_crash
- kexec_crash_loaded
- sanity_check_segment_list
- do_kimage_alloc_init
- kimage_is_destination_range
- kimage_alloc_pages
- kimage_free_pages
- kimage_free_page_list
- kimage_alloc_normal_control_pages
- kimage_alloc_crash_control_pages
- kimage_alloc_control_pages
- kimage_crash_copy_vmcoreinfo
- kimage_add_entry
- kimage_set_destination
- kimage_add_page
- kimage_free_extra_pages
- kimage_terminate
- kimage_free_entry
- kimage_free
- kimage_dst_used
- kimage_alloc_page
- kimage_load_normal_segment
- kimage_load_crash_segment
- kimage_load_segment
- __crash_kexec
- crash_kexec
- crash_get_memory_size
- crash_free_reserved_phys_range
- crash_shrink_memory
- crash_save_cpu
- crash_notes_memory_init
- kernel_kexec
- arch_kexec_protect_crashkres
- arch_kexec_unprotect_crashkres
1
2
3
4
5
6
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9 #include <linux/capability.h>
10 #include <linux/mm.h>
11 #include <linux/file.h>
12 #include <linux/slab.h>
13 #include <linux/fs.h>
14 #include <linux/kexec.h>
15 #include <linux/mutex.h>
16 #include <linux/list.h>
17 #include <linux/highmem.h>
18 #include <linux/syscalls.h>
19 #include <linux/reboot.h>
20 #include <linux/ioport.h>
21 #include <linux/hardirq.h>
22 #include <linux/elf.h>
23 #include <linux/elfcore.h>
24 #include <linux/utsname.h>
25 #include <linux/numa.h>
26 #include <linux/suspend.h>
27 #include <linux/device.h>
28 #include <linux/freezer.h>
29 #include <linux/pm.h>
30 #include <linux/cpu.h>
31 #include <linux/uaccess.h>
32 #include <linux/io.h>
33 #include <linux/console.h>
34 #include <linux/vmalloc.h>
35 #include <linux/swap.h>
36 #include <linux/syscore_ops.h>
37 #include <linux/compiler.h>
38 #include <linux/hugetlb.h>
39 #include <linux/frame.h>
40
41 #include <asm/page.h>
42 #include <asm/sections.h>
43
44 #include <crypto/hash.h>
45 #include <crypto/sha.h>
46 #include "kexec_internal.h"
47
48 DEFINE_MUTEX(kexec_mutex);
49
50
51 note_buf_t __percpu *crash_notes;
52
53
54 bool kexec_in_progress = false;
55
56
57
58 struct resource crashk_res = {
59 .name = "Crash kernel",
60 .start = 0,
61 .end = 0,
62 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
63 .desc = IORES_DESC_CRASH_KERNEL
64 };
65 struct resource crashk_low_res = {
66 .name = "Crash kernel",
67 .start = 0,
68 .end = 0,
69 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
70 .desc = IORES_DESC_CRASH_KERNEL
71 };
72
73 int kexec_should_crash(struct task_struct *p)
74 {
75
76
77
78
79
80 if (crash_kexec_post_notifiers)
81 return 0;
82
83
84
85
86 if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
87 return 1;
88 return 0;
89 }
90
91 int kexec_crash_loaded(void)
92 {
93 return !!kexec_crash_image;
94 }
95 EXPORT_SYMBOL_GPL(kexec_crash_loaded);
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141 #define KIMAGE_NO_DEST (-1UL)
142 #define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
143
144 static struct page *kimage_alloc_page(struct kimage *image,
145 gfp_t gfp_mask,
146 unsigned long dest);
147
148 int sanity_check_segment_list(struct kimage *image)
149 {
150 int i;
151 unsigned long nr_segments = image->nr_segments;
152 unsigned long total_pages = 0;
153 unsigned long nr_pages = totalram_pages();
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168 for (i = 0; i < nr_segments; i++) {
169 unsigned long mstart, mend;
170
171 mstart = image->segment[i].mem;
172 mend = mstart + image->segment[i].memsz;
173 if (mstart > mend)
174 return -EADDRNOTAVAIL;
175 if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
176 return -EADDRNOTAVAIL;
177 if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
178 return -EADDRNOTAVAIL;
179 }
180
181
182
183
184
185
186 for (i = 0; i < nr_segments; i++) {
187 unsigned long mstart, mend;
188 unsigned long j;
189
190 mstart = image->segment[i].mem;
191 mend = mstart + image->segment[i].memsz;
192 for (j = 0; j < i; j++) {
193 unsigned long pstart, pend;
194
195 pstart = image->segment[j].mem;
196 pend = pstart + image->segment[j].memsz;
197
198 if ((mend > pstart) && (mstart < pend))
199 return -EINVAL;
200 }
201 }
202
203
204
205
206
207
208 for (i = 0; i < nr_segments; i++) {
209 if (image->segment[i].bufsz > image->segment[i].memsz)
210 return -EINVAL;
211 }
212
213
214
215
216
217
218 for (i = 0; i < nr_segments; i++) {
219 if (PAGE_COUNT(image->segment[i].memsz) > nr_pages / 2)
220 return -EINVAL;
221
222 total_pages += PAGE_COUNT(image->segment[i].memsz);
223 }
224
225 if (total_pages > nr_pages / 2)
226 return -EINVAL;
227
228
229
230
231
232
233
234
235
236
237
238 if (image->type == KEXEC_TYPE_CRASH) {
239 for (i = 0; i < nr_segments; i++) {
240 unsigned long mstart, mend;
241
242 mstart = image->segment[i].mem;
243 mend = mstart + image->segment[i].memsz - 1;
244
245 if ((mstart < phys_to_boot_phys(crashk_res.start)) ||
246 (mend > phys_to_boot_phys(crashk_res.end)))
247 return -EADDRNOTAVAIL;
248 }
249 }
250
251 return 0;
252 }
253
254 struct kimage *do_kimage_alloc_init(void)
255 {
256 struct kimage *image;
257
258
259 image = kzalloc(sizeof(*image), GFP_KERNEL);
260 if (!image)
261 return NULL;
262
263 image->head = 0;
264 image->entry = &image->head;
265 image->last_entry = &image->head;
266 image->control_page = ~0;
267 image->type = KEXEC_TYPE_DEFAULT;
268
269
270 INIT_LIST_HEAD(&image->control_pages);
271
272
273 INIT_LIST_HEAD(&image->dest_pages);
274
275
276 INIT_LIST_HEAD(&image->unusable_pages);
277
278 return image;
279 }
280
281 int kimage_is_destination_range(struct kimage *image,
282 unsigned long start,
283 unsigned long end)
284 {
285 unsigned long i;
286
287 for (i = 0; i < image->nr_segments; i++) {
288 unsigned long mstart, mend;
289
290 mstart = image->segment[i].mem;
291 mend = mstart + image->segment[i].memsz;
292 if ((end > mstart) && (start < mend))
293 return 1;
294 }
295
296 return 0;
297 }
298
299 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
300 {
301 struct page *pages;
302
303 if (fatal_signal_pending(current))
304 return NULL;
305 pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order);
306 if (pages) {
307 unsigned int count, i;
308
309 pages->mapping = NULL;
310 set_page_private(pages, order);
311 count = 1 << order;
312 for (i = 0; i < count; i++)
313 SetPageReserved(pages + i);
314
315 arch_kexec_post_alloc_pages(page_address(pages), count,
316 gfp_mask);
317
318 if (gfp_mask & __GFP_ZERO)
319 for (i = 0; i < count; i++)
320 clear_highpage(pages + i);
321 }
322
323 return pages;
324 }
325
326 static void kimage_free_pages(struct page *page)
327 {
328 unsigned int order, count, i;
329
330 order = page_private(page);
331 count = 1 << order;
332
333 arch_kexec_pre_free_pages(page_address(page), count);
334
335 for (i = 0; i < count; i++)
336 ClearPageReserved(page + i);
337 __free_pages(page, order);
338 }
339
340 void kimage_free_page_list(struct list_head *list)
341 {
342 struct page *page, *next;
343
344 list_for_each_entry_safe(page, next, list, lru) {
345 list_del(&page->lru);
346 kimage_free_pages(page);
347 }
348 }
349
350 static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
351 unsigned int order)
352 {
353
354
355
356
357
358
359
360
361
362
363
364
365
366 struct list_head extra_pages;
367 struct page *pages;
368 unsigned int count;
369
370 count = 1 << order;
371 INIT_LIST_HEAD(&extra_pages);
372
373
374
375
376 do {
377 unsigned long pfn, epfn, addr, eaddr;
378
379 pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order);
380 if (!pages)
381 break;
382 pfn = page_to_boot_pfn(pages);
383 epfn = pfn + count;
384 addr = pfn << PAGE_SHIFT;
385 eaddr = epfn << PAGE_SHIFT;
386 if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
387 kimage_is_destination_range(image, addr, eaddr)) {
388 list_add(&pages->lru, &extra_pages);
389 pages = NULL;
390 }
391 } while (!pages);
392
393 if (pages) {
394
395 list_add(&pages->lru, &image->control_pages);
396
397
398
399
400
401
402
403 }
404
405
406
407
408
409
410
411 kimage_free_page_list(&extra_pages);
412
413 return pages;
414 }
415
416 static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
417 unsigned int order)
418 {
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440 unsigned long hole_start, hole_end, size;
441 struct page *pages;
442
443 pages = NULL;
444 size = (1 << order) << PAGE_SHIFT;
445 hole_start = (image->control_page + (size - 1)) & ~(size - 1);
446 hole_end = hole_start + size - 1;
447 while (hole_end <= crashk_res.end) {
448 unsigned long i;
449
450 cond_resched();
451
452 if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
453 break;
454
455 for (i = 0; i < image->nr_segments; i++) {
456 unsigned long mstart, mend;
457
458 mstart = image->segment[i].mem;
459 mend = mstart + image->segment[i].memsz - 1;
460 if ((hole_end >= mstart) && (hole_start <= mend)) {
461
462 hole_start = (mend + (size - 1)) & ~(size - 1);
463 hole_end = hole_start + size - 1;
464 break;
465 }
466 }
467
468 if (i == image->nr_segments) {
469 pages = pfn_to_page(hole_start >> PAGE_SHIFT);
470 image->control_page = hole_end;
471 break;
472 }
473 }
474
475
476 if (pages)
477 arch_kexec_post_alloc_pages(page_address(pages), 1 << order, 0);
478
479 return pages;
480 }
481
482
483 struct page *kimage_alloc_control_pages(struct kimage *image,
484 unsigned int order)
485 {
486 struct page *pages = NULL;
487
488 switch (image->type) {
489 case KEXEC_TYPE_DEFAULT:
490 pages = kimage_alloc_normal_control_pages(image, order);
491 break;
492 case KEXEC_TYPE_CRASH:
493 pages = kimage_alloc_crash_control_pages(image, order);
494 break;
495 }
496
497 return pages;
498 }
499
500 int kimage_crash_copy_vmcoreinfo(struct kimage *image)
501 {
502 struct page *vmcoreinfo_page;
503 void *safecopy;
504
505 if (image->type != KEXEC_TYPE_CRASH)
506 return 0;
507
508
509
510
511
512
513
514
515
516
517 vmcoreinfo_page = kimage_alloc_control_pages(image, 0);
518 if (!vmcoreinfo_page) {
519 pr_warn("Could not allocate vmcoreinfo buffer\n");
520 return -ENOMEM;
521 }
522 safecopy = vmap(&vmcoreinfo_page, 1, VM_MAP, PAGE_KERNEL);
523 if (!safecopy) {
524 pr_warn("Could not vmap vmcoreinfo buffer\n");
525 return -ENOMEM;
526 }
527
528 image->vmcoreinfo_data_copy = safecopy;
529 crash_update_vmcoreinfo_safecopy(safecopy);
530
531 return 0;
532 }
533
534 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
535 {
536 if (*image->entry != 0)
537 image->entry++;
538
539 if (image->entry == image->last_entry) {
540 kimage_entry_t *ind_page;
541 struct page *page;
542
543 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
544 if (!page)
545 return -ENOMEM;
546
547 ind_page = page_address(page);
548 *image->entry = virt_to_boot_phys(ind_page) | IND_INDIRECTION;
549 image->entry = ind_page;
550 image->last_entry = ind_page +
551 ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
552 }
553 *image->entry = entry;
554 image->entry++;
555 *image->entry = 0;
556
557 return 0;
558 }
559
560 static int kimage_set_destination(struct kimage *image,
561 unsigned long destination)
562 {
563 int result;
564
565 destination &= PAGE_MASK;
566 result = kimage_add_entry(image, destination | IND_DESTINATION);
567
568 return result;
569 }
570
571
572 static int kimage_add_page(struct kimage *image, unsigned long page)
573 {
574 int result;
575
576 page &= PAGE_MASK;
577 result = kimage_add_entry(image, page | IND_SOURCE);
578
579 return result;
580 }
581
582
583 static void kimage_free_extra_pages(struct kimage *image)
584 {
585
586 kimage_free_page_list(&image->dest_pages);
587
588
589 kimage_free_page_list(&image->unusable_pages);
590
591 }
592 void kimage_terminate(struct kimage *image)
593 {
594 if (*image->entry != 0)
595 image->entry++;
596
597 *image->entry = IND_DONE;
598 }
599
600 #define for_each_kimage_entry(image, ptr, entry) \
601 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
602 ptr = (entry & IND_INDIRECTION) ? \
603 boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
604
605 static void kimage_free_entry(kimage_entry_t entry)
606 {
607 struct page *page;
608
609 page = boot_pfn_to_page(entry >> PAGE_SHIFT);
610 kimage_free_pages(page);
611 }
612
613 void kimage_free(struct kimage *image)
614 {
615 kimage_entry_t *ptr, entry;
616 kimage_entry_t ind = 0;
617
618 if (!image)
619 return;
620
621 if (image->vmcoreinfo_data_copy) {
622 crash_update_vmcoreinfo_safecopy(NULL);
623 vunmap(image->vmcoreinfo_data_copy);
624 }
625
626 kimage_free_extra_pages(image);
627 for_each_kimage_entry(image, ptr, entry) {
628 if (entry & IND_INDIRECTION) {
629
630 if (ind & IND_INDIRECTION)
631 kimage_free_entry(ind);
632
633
634
635 ind = entry;
636 } else if (entry & IND_SOURCE)
637 kimage_free_entry(entry);
638 }
639
640 if (ind & IND_INDIRECTION)
641 kimage_free_entry(ind);
642
643
644 machine_kexec_cleanup(image);
645
646
647 kimage_free_page_list(&image->control_pages);
648
649
650
651
652
653 if (image->file_mode)
654 kimage_file_post_load_cleanup(image);
655
656 kfree(image);
657 }
658
659 static kimage_entry_t *kimage_dst_used(struct kimage *image,
660 unsigned long page)
661 {
662 kimage_entry_t *ptr, entry;
663 unsigned long destination = 0;
664
665 for_each_kimage_entry(image, ptr, entry) {
666 if (entry & IND_DESTINATION)
667 destination = entry & PAGE_MASK;
668 else if (entry & IND_SOURCE) {
669 if (page == destination)
670 return ptr;
671 destination += PAGE_SIZE;
672 }
673 }
674
675 return NULL;
676 }
677
678 static struct page *kimage_alloc_page(struct kimage *image,
679 gfp_t gfp_mask,
680 unsigned long destination)
681 {
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700 struct page *page;
701 unsigned long addr;
702
703
704
705
706
707 list_for_each_entry(page, &image->dest_pages, lru) {
708 addr = page_to_boot_pfn(page) << PAGE_SHIFT;
709 if (addr == destination) {
710 list_del(&page->lru);
711 return page;
712 }
713 }
714 page = NULL;
715 while (1) {
716 kimage_entry_t *old;
717
718
719 page = kimage_alloc_pages(gfp_mask, 0);
720 if (!page)
721 return NULL;
722
723 if (page_to_boot_pfn(page) >
724 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
725 list_add(&page->lru, &image->unusable_pages);
726 continue;
727 }
728 addr = page_to_boot_pfn(page) << PAGE_SHIFT;
729
730
731 if (addr == destination)
732 break;
733
734
735 if (!kimage_is_destination_range(image, addr,
736 addr + PAGE_SIZE))
737 break;
738
739
740
741
742
743
744 old = kimage_dst_used(image, addr);
745 if (old) {
746
747 unsigned long old_addr;
748 struct page *old_page;
749
750 old_addr = *old & PAGE_MASK;
751 old_page = boot_pfn_to_page(old_addr >> PAGE_SHIFT);
752 copy_highpage(page, old_page);
753 *old = addr | (*old & ~PAGE_MASK);
754
755
756
757
758
759 if (!(gfp_mask & __GFP_HIGHMEM) &&
760 PageHighMem(old_page)) {
761 kimage_free_pages(old_page);
762 continue;
763 }
764 addr = old_addr;
765 page = old_page;
766 break;
767 }
768
769 list_add(&page->lru, &image->dest_pages);
770 }
771
772 return page;
773 }
774
775 static int kimage_load_normal_segment(struct kimage *image,
776 struct kexec_segment *segment)
777 {
778 unsigned long maddr;
779 size_t ubytes, mbytes;
780 int result;
781 unsigned char __user *buf = NULL;
782 unsigned char *kbuf = NULL;
783
784 result = 0;
785 if (image->file_mode)
786 kbuf = segment->kbuf;
787 else
788 buf = segment->buf;
789 ubytes = segment->bufsz;
790 mbytes = segment->memsz;
791 maddr = segment->mem;
792
793 result = kimage_set_destination(image, maddr);
794 if (result < 0)
795 goto out;
796
797 while (mbytes) {
798 struct page *page;
799 char *ptr;
800 size_t uchunk, mchunk;
801
802 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
803 if (!page) {
804 result = -ENOMEM;
805 goto out;
806 }
807 result = kimage_add_page(image, page_to_boot_pfn(page)
808 << PAGE_SHIFT);
809 if (result < 0)
810 goto out;
811
812 ptr = kmap(page);
813
814 clear_page(ptr);
815 ptr += maddr & ~PAGE_MASK;
816 mchunk = min_t(size_t, mbytes,
817 PAGE_SIZE - (maddr & ~PAGE_MASK));
818 uchunk = min(ubytes, mchunk);
819
820
821 if (image->file_mode)
822 memcpy(ptr, kbuf, uchunk);
823 else
824 result = copy_from_user(ptr, buf, uchunk);
825 kunmap(page);
826 if (result) {
827 result = -EFAULT;
828 goto out;
829 }
830 ubytes -= uchunk;
831 maddr += mchunk;
832 if (image->file_mode)
833 kbuf += mchunk;
834 else
835 buf += mchunk;
836 mbytes -= mchunk;
837
838 cond_resched();
839 }
840 out:
841 return result;
842 }
843
844 static int kimage_load_crash_segment(struct kimage *image,
845 struct kexec_segment *segment)
846 {
847
848
849
850
851 unsigned long maddr;
852 size_t ubytes, mbytes;
853 int result;
854 unsigned char __user *buf = NULL;
855 unsigned char *kbuf = NULL;
856
857 result = 0;
858 if (image->file_mode)
859 kbuf = segment->kbuf;
860 else
861 buf = segment->buf;
862 ubytes = segment->bufsz;
863 mbytes = segment->memsz;
864 maddr = segment->mem;
865 while (mbytes) {
866 struct page *page;
867 char *ptr;
868 size_t uchunk, mchunk;
869
870 page = boot_pfn_to_page(maddr >> PAGE_SHIFT);
871 if (!page) {
872 result = -ENOMEM;
873 goto out;
874 }
875 arch_kexec_post_alloc_pages(page_address(page), 1, 0);
876 ptr = kmap(page);
877 ptr += maddr & ~PAGE_MASK;
878 mchunk = min_t(size_t, mbytes,
879 PAGE_SIZE - (maddr & ~PAGE_MASK));
880 uchunk = min(ubytes, mchunk);
881 if (mchunk > uchunk) {
882
883 memset(ptr + uchunk, 0, mchunk - uchunk);
884 }
885
886
887 if (image->file_mode)
888 memcpy(ptr, kbuf, uchunk);
889 else
890 result = copy_from_user(ptr, buf, uchunk);
891 kexec_flush_icache_page(page);
892 kunmap(page);
893 arch_kexec_pre_free_pages(page_address(page), 1);
894 if (result) {
895 result = -EFAULT;
896 goto out;
897 }
898 ubytes -= uchunk;
899 maddr += mchunk;
900 if (image->file_mode)
901 kbuf += mchunk;
902 else
903 buf += mchunk;
904 mbytes -= mchunk;
905
906 cond_resched();
907 }
908 out:
909 return result;
910 }
911
912 int kimage_load_segment(struct kimage *image,
913 struct kexec_segment *segment)
914 {
915 int result = -ENOMEM;
916
917 switch (image->type) {
918 case KEXEC_TYPE_DEFAULT:
919 result = kimage_load_normal_segment(image, segment);
920 break;
921 case KEXEC_TYPE_CRASH:
922 result = kimage_load_crash_segment(image, segment);
923 break;
924 }
925
926 return result;
927 }
928
929 struct kimage *kexec_image;
930 struct kimage *kexec_crash_image;
931 int kexec_load_disabled;
932
933
934
935
936
937
938 void __noclone __crash_kexec(struct pt_regs *regs)
939 {
940
941
942
943
944
945
946
947
948 if (mutex_trylock(&kexec_mutex)) {
949 if (kexec_crash_image) {
950 struct pt_regs fixed_regs;
951
952 crash_setup_regs(&fixed_regs, regs);
953 crash_save_vmcoreinfo();
954 machine_crash_shutdown(&fixed_regs);
955 machine_kexec(kexec_crash_image);
956 }
957 mutex_unlock(&kexec_mutex);
958 }
959 }
960 STACK_FRAME_NON_STANDARD(__crash_kexec);
961
962 void crash_kexec(struct pt_regs *regs)
963 {
964 int old_cpu, this_cpu;
965
966
967
968
969
970
971 this_cpu = raw_smp_processor_id();
972 old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);
973 if (old_cpu == PANIC_CPU_INVALID) {
974
975 printk_safe_flush_on_panic();
976 __crash_kexec(regs);
977
978
979
980
981
982 atomic_set(&panic_cpu, PANIC_CPU_INVALID);
983 }
984 }
985
986 size_t crash_get_memory_size(void)
987 {
988 size_t size = 0;
989
990 mutex_lock(&kexec_mutex);
991 if (crashk_res.end != crashk_res.start)
992 size = resource_size(&crashk_res);
993 mutex_unlock(&kexec_mutex);
994 return size;
995 }
996
997 void __weak crash_free_reserved_phys_range(unsigned long begin,
998 unsigned long end)
999 {
1000 unsigned long addr;
1001
1002 for (addr = begin; addr < end; addr += PAGE_SIZE)
1003 free_reserved_page(boot_pfn_to_page(addr >> PAGE_SHIFT));
1004 }
1005
1006 int crash_shrink_memory(unsigned long new_size)
1007 {
1008 int ret = 0;
1009 unsigned long start, end;
1010 unsigned long old_size;
1011 struct resource *ram_res;
1012
1013 mutex_lock(&kexec_mutex);
1014
1015 if (kexec_crash_image) {
1016 ret = -ENOENT;
1017 goto unlock;
1018 }
1019 start = crashk_res.start;
1020 end = crashk_res.end;
1021 old_size = (end == 0) ? 0 : end - start + 1;
1022 if (new_size >= old_size) {
1023 ret = (new_size == old_size) ? 0 : -EINVAL;
1024 goto unlock;
1025 }
1026
1027 ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL);
1028 if (!ram_res) {
1029 ret = -ENOMEM;
1030 goto unlock;
1031 }
1032
1033 start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
1034 end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
1035
1036 crash_free_reserved_phys_range(end, crashk_res.end);
1037
1038 if ((start == end) && (crashk_res.parent != NULL))
1039 release_resource(&crashk_res);
1040
1041 ram_res->start = end;
1042 ram_res->end = crashk_res.end;
1043 ram_res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
1044 ram_res->name = "System RAM";
1045
1046 crashk_res.end = end - 1;
1047
1048 insert_resource(&iomem_resource, ram_res);
1049
1050 unlock:
1051 mutex_unlock(&kexec_mutex);
1052 return ret;
1053 }
1054
1055 void crash_save_cpu(struct pt_regs *regs, int cpu)
1056 {
1057 struct elf_prstatus prstatus;
1058 u32 *buf;
1059
1060 if ((cpu < 0) || (cpu >= nr_cpu_ids))
1061 return;
1062
1063
1064
1065
1066
1067
1068
1069
1070 buf = (u32 *)per_cpu_ptr(crash_notes, cpu);
1071 if (!buf)
1072 return;
1073 memset(&prstatus, 0, sizeof(prstatus));
1074 prstatus.pr_pid = current->pid;
1075 elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
1076 buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1077 &prstatus, sizeof(prstatus));
1078 final_note(buf);
1079 }
1080
1081 static int __init crash_notes_memory_init(void)
1082 {
1083
1084 size_t size, align;
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096 size = sizeof(note_buf_t);
1097 align = min(roundup_pow_of_two(sizeof(note_buf_t)), PAGE_SIZE);
1098
1099
1100
1101
1102
1103 BUILD_BUG_ON(size > PAGE_SIZE);
1104
1105 crash_notes = __alloc_percpu(size, align);
1106 if (!crash_notes) {
1107 pr_warn("Memory allocation for saving cpu register states failed\n");
1108 return -ENOMEM;
1109 }
1110 return 0;
1111 }
1112 subsys_initcall(crash_notes_memory_init);
1113
1114
1115
1116
1117
1118
1119 int kernel_kexec(void)
1120 {
1121 int error = 0;
1122
1123 if (!mutex_trylock(&kexec_mutex))
1124 return -EBUSY;
1125 if (!kexec_image) {
1126 error = -EINVAL;
1127 goto Unlock;
1128 }
1129
1130 #ifdef CONFIG_KEXEC_JUMP
1131 if (kexec_image->preserve_context) {
1132 lock_system_sleep();
1133 pm_prepare_console();
1134 error = freeze_processes();
1135 if (error) {
1136 error = -EBUSY;
1137 goto Restore_console;
1138 }
1139 suspend_console();
1140 error = dpm_suspend_start(PMSG_FREEZE);
1141 if (error)
1142 goto Resume_console;
1143
1144
1145
1146
1147
1148
1149
1150 error = dpm_suspend_end(PMSG_FREEZE);
1151 if (error)
1152 goto Resume_devices;
1153 error = suspend_disable_secondary_cpus();
1154 if (error)
1155 goto Enable_cpus;
1156 local_irq_disable();
1157 error = syscore_suspend();
1158 if (error)
1159 goto Enable_irqs;
1160 } else
1161 #endif
1162 {
1163 kexec_in_progress = true;
1164 kernel_restart_prepare(NULL);
1165 migrate_to_reboot_cpu();
1166
1167
1168
1169
1170
1171
1172
1173 cpu_hotplug_enable();
1174 pr_emerg("Starting new kernel\n");
1175 machine_shutdown();
1176 }
1177
1178 machine_kexec(kexec_image);
1179
1180 #ifdef CONFIG_KEXEC_JUMP
1181 if (kexec_image->preserve_context) {
1182 syscore_resume();
1183 Enable_irqs:
1184 local_irq_enable();
1185 Enable_cpus:
1186 suspend_enable_secondary_cpus();
1187 dpm_resume_start(PMSG_RESTORE);
1188 Resume_devices:
1189 dpm_resume_end(PMSG_RESTORE);
1190 Resume_console:
1191 resume_console();
1192 thaw_processes();
1193 Restore_console:
1194 pm_restore_console();
1195 unlock_system_sleep();
1196 }
1197 #endif
1198
1199 Unlock:
1200 mutex_unlock(&kexec_mutex);
1201 return error;
1202 }
1203
1204
1205
1206
1207
1208
1209
1210
1211 void __weak arch_kexec_protect_crashkres(void)
1212 {}
1213
1214 void __weak arch_kexec_unprotect_crashkres(void)
1215 {}