This source file includes following definitions.
- register_oldmem_pfn_is_ram
- unregister_oldmem_pfn_is_ram
- pfn_is_ram
- read_from_oldmem
- elfcorehdr_alloc
- elfcorehdr_free
- elfcorehdr_read
- elfcorehdr_read_notes
- remap_oldmem_pfn_range
- copy_oldmem_page_encrypted
- copy_to
- vmcoredd_copy_dumps
- vmcoredd_mmap_dumps
- __read_vmcore
- read_vmcore
- mmap_vmcore_fault
- vmcore_alloc_buf
- remap_oldmem_pfn_checked
- vmcore_remap_oldmem_pfn
- mmap_vmcore
- mmap_vmcore
- get_new_element
- get_vmcore_size
- update_note_header_size_elf64
- get_note_number_and_size_elf64
- copy_notes_elf64
- merge_note_headers_elf64
- update_note_header_size_elf32
- get_note_number_and_size_elf32
- copy_notes_elf32
- merge_note_headers_elf32
- process_ptload_program_headers_elf64
- process_ptload_program_headers_elf32
- set_vmcore_list_offsets
- free_elfcorebuf
- parse_crash_elf64_headers
- parse_crash_elf32_headers
- parse_crash_elf_headers
- vmcoredd_write_header
- vmcoredd_update_program_headers
- vmcoredd_update_size
- vmcore_add_device_dump
- vmcore_free_device_dumps
- vmcore_init
- vmcore_cleanup
1
2
3
4
5
6
7
8
9
10
11 #include <linux/mm.h>
12 #include <linux/kcore.h>
13 #include <linux/user.h>
14 #include <linux/elf.h>
15 #include <linux/elfcore.h>
16 #include <linux/export.h>
17 #include <linux/slab.h>
18 #include <linux/highmem.h>
19 #include <linux/printk.h>
20 #include <linux/memblock.h>
21 #include <linux/init.h>
22 #include <linux/crash_dump.h>
23 #include <linux/list.h>
24 #include <linux/moduleparam.h>
25 #include <linux/mutex.h>
26 #include <linux/vmalloc.h>
27 #include <linux/pagemap.h>
28 #include <linux/uaccess.h>
29 #include <linux/mem_encrypt.h>
30 #include <asm/pgtable.h>
31 #include <asm/io.h>
32 #include "internal.h"
33
34
35
36
37 static LIST_HEAD(vmcore_list);
38
39
40 static char *elfcorebuf;
41 static size_t elfcorebuf_sz;
42 static size_t elfcorebuf_sz_orig;
43
44 static char *elfnotes_buf;
45 static size_t elfnotes_sz;
46
47 static size_t elfnotes_orig_sz;
48
49
50 static u64 vmcore_size;
51
52 static struct proc_dir_entry *proc_vmcore;
53
54 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
55
56 static LIST_HEAD(vmcoredd_list);
57 static DEFINE_MUTEX(vmcoredd_mutex);
58
59 static bool vmcoredd_disabled;
60 core_param(novmcoredd, vmcoredd_disabled, bool, 0);
61 #endif
62
63
64 static size_t vmcoredd_orig_sz;
65
66
67
68
69
70 static int (*oldmem_pfn_is_ram)(unsigned long pfn);
71
72 int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn))
73 {
74 if (oldmem_pfn_is_ram)
75 return -EBUSY;
76 oldmem_pfn_is_ram = fn;
77 return 0;
78 }
79 EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram);
80
81 void unregister_oldmem_pfn_is_ram(void)
82 {
83 oldmem_pfn_is_ram = NULL;
84 wmb();
85 }
86 EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram);
87
88 static int pfn_is_ram(unsigned long pfn)
89 {
90 int (*fn)(unsigned long pfn);
91
92 int ret = 1;
93
94
95
96
97
98
99 fn = oldmem_pfn_is_ram;
100 if (fn)
101 ret = fn(pfn);
102
103 return ret;
104 }
105
106
107 ssize_t read_from_oldmem(char *buf, size_t count,
108 u64 *ppos, int userbuf,
109 bool encrypted)
110 {
111 unsigned long pfn, offset;
112 size_t nr_bytes;
113 ssize_t read = 0, tmp;
114
115 if (!count)
116 return 0;
117
118 offset = (unsigned long)(*ppos % PAGE_SIZE);
119 pfn = (unsigned long)(*ppos / PAGE_SIZE);
120
121 do {
122 if (count > (PAGE_SIZE - offset))
123 nr_bytes = PAGE_SIZE - offset;
124 else
125 nr_bytes = count;
126
127
128 if (pfn_is_ram(pfn) == 0)
129 memset(buf, 0, nr_bytes);
130 else {
131 if (encrypted)
132 tmp = copy_oldmem_page_encrypted(pfn, buf,
133 nr_bytes,
134 offset,
135 userbuf);
136 else
137 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
138 offset, userbuf);
139
140 if (tmp < 0)
141 return tmp;
142 }
143 *ppos += nr_bytes;
144 count -= nr_bytes;
145 buf += nr_bytes;
146 read += nr_bytes;
147 ++pfn;
148 offset = 0;
149 } while (count);
150
151 return read;
152 }
153
154
155
156
157 int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
158 {
159 return 0;
160 }
161
162
163
164
165 void __weak elfcorehdr_free(unsigned long long addr)
166 {}
167
168
169
170
171 ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
172 {
173 return read_from_oldmem(buf, count, ppos, 0, false);
174 }
175
176
177
178
179 ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
180 {
181 return read_from_oldmem(buf, count, ppos, 0, mem_encrypt_active());
182 }
183
184
185
186
187 int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
188 unsigned long from, unsigned long pfn,
189 unsigned long size, pgprot_t prot)
190 {
191 prot = pgprot_encrypted(prot);
192 return remap_pfn_range(vma, from, pfn, size, prot);
193 }
194
195
196
197
198 ssize_t __weak
199 copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
200 unsigned long offset, int userbuf)
201 {
202 return copy_oldmem_page(pfn, buf, csize, offset, userbuf);
203 }
204
205
206
207
208 static int copy_to(void *target, void *src, size_t size, int userbuf)
209 {
210 if (userbuf) {
211 if (copy_to_user((char __user *) target, src, size))
212 return -EFAULT;
213 } else {
214 memcpy(target, src, size);
215 }
216 return 0;
217 }
218
219 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
220 static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf)
221 {
222 struct vmcoredd_node *dump;
223 u64 offset = 0;
224 int ret = 0;
225 size_t tsz;
226 char *buf;
227
228 mutex_lock(&vmcoredd_mutex);
229 list_for_each_entry(dump, &vmcoredd_list, list) {
230 if (start < offset + dump->size) {
231 tsz = min(offset + (u64)dump->size - start, (u64)size);
232 buf = dump->buf + start - offset;
233 if (copy_to(dst, buf, tsz, userbuf)) {
234 ret = -EFAULT;
235 goto out_unlock;
236 }
237
238 size -= tsz;
239 start += tsz;
240 dst += tsz;
241
242
243 if (!size)
244 goto out_unlock;
245 }
246 offset += dump->size;
247 }
248
249 out_unlock:
250 mutex_unlock(&vmcoredd_mutex);
251 return ret;
252 }
253
254 #ifdef CONFIG_MMU
255 static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
256 u64 start, size_t size)
257 {
258 struct vmcoredd_node *dump;
259 u64 offset = 0;
260 int ret = 0;
261 size_t tsz;
262 char *buf;
263
264 mutex_lock(&vmcoredd_mutex);
265 list_for_each_entry(dump, &vmcoredd_list, list) {
266 if (start < offset + dump->size) {
267 tsz = min(offset + (u64)dump->size - start, (u64)size);
268 buf = dump->buf + start - offset;
269 if (remap_vmalloc_range_partial(vma, dst, buf, 0,
270 tsz)) {
271 ret = -EFAULT;
272 goto out_unlock;
273 }
274
275 size -= tsz;
276 start += tsz;
277 dst += tsz;
278
279
280 if (!size)
281 goto out_unlock;
282 }
283 offset += dump->size;
284 }
285
286 out_unlock:
287 mutex_unlock(&vmcoredd_mutex);
288 return ret;
289 }
290 #endif
291 #endif
292
293
294
295
296 static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
297 int userbuf)
298 {
299 ssize_t acc = 0, tmp;
300 size_t tsz;
301 u64 start;
302 struct vmcore *m = NULL;
303
304 if (buflen == 0 || *fpos >= vmcore_size)
305 return 0;
306
307
308 if (buflen > vmcore_size - *fpos)
309 buflen = vmcore_size - *fpos;
310
311
312 if (*fpos < elfcorebuf_sz) {
313 tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen);
314 if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf))
315 return -EFAULT;
316 buflen -= tsz;
317 *fpos += tsz;
318 buffer += tsz;
319 acc += tsz;
320
321
322 if (buflen == 0)
323 return acc;
324 }
325
326
327 if (*fpos < elfcorebuf_sz + elfnotes_sz) {
328 void *kaddr;
329
330
331
332
333
334
335
336
337
338
339 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
340
341 if (*fpos < elfcorebuf_sz + vmcoredd_orig_sz) {
342 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
343 (size_t)*fpos, buflen);
344 start = *fpos - elfcorebuf_sz;
345 if (vmcoredd_copy_dumps(buffer, start, tsz, userbuf))
346 return -EFAULT;
347
348 buflen -= tsz;
349 *fpos += tsz;
350 buffer += tsz;
351 acc += tsz;
352
353
354 if (!buflen)
355 return acc;
356 }
357 #endif
358
359
360 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen);
361 kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz;
362 if (copy_to(buffer, kaddr, tsz, userbuf))
363 return -EFAULT;
364
365 buflen -= tsz;
366 *fpos += tsz;
367 buffer += tsz;
368 acc += tsz;
369
370
371 if (buflen == 0)
372 return acc;
373 }
374
375 list_for_each_entry(m, &vmcore_list, list) {
376 if (*fpos < m->offset + m->size) {
377 tsz = (size_t)min_t(unsigned long long,
378 m->offset + m->size - *fpos,
379 buflen);
380 start = m->paddr + *fpos - m->offset;
381 tmp = read_from_oldmem(buffer, tsz, &start,
382 userbuf, mem_encrypt_active());
383 if (tmp < 0)
384 return tmp;
385 buflen -= tsz;
386 *fpos += tsz;
387 buffer += tsz;
388 acc += tsz;
389
390
391 if (buflen == 0)
392 return acc;
393 }
394 }
395
396 return acc;
397 }
398
399 static ssize_t read_vmcore(struct file *file, char __user *buffer,
400 size_t buflen, loff_t *fpos)
401 {
402 return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
403 }
404
405
406
407
408
409
410
411
412 static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
413 {
414 #ifdef CONFIG_S390
415 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
416 pgoff_t index = vmf->pgoff;
417 struct page *page;
418 loff_t offset;
419 char *buf;
420 int rc;
421
422 page = find_or_create_page(mapping, index, GFP_KERNEL);
423 if (!page)
424 return VM_FAULT_OOM;
425 if (!PageUptodate(page)) {
426 offset = (loff_t) index << PAGE_SHIFT;
427 buf = __va((page_to_pfn(page) << PAGE_SHIFT));
428 rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
429 if (rc < 0) {
430 unlock_page(page);
431 put_page(page);
432 return vmf_error(rc);
433 }
434 SetPageUptodate(page);
435 }
436 unlock_page(page);
437 vmf->page = page;
438 return 0;
439 #else
440 return VM_FAULT_SIGBUS;
441 #endif
442 }
443
444 static const struct vm_operations_struct vmcore_mmap_ops = {
445 .fault = mmap_vmcore_fault,
446 };
447
448
449
450
451
452
453
454
455
456
457
458 static inline char *vmcore_alloc_buf(size_t size)
459 {
460 #ifdef CONFIG_MMU
461 return vmalloc_user(size);
462 #else
463 return vzalloc(size);
464 #endif
465 }
466
467
468
469
470
471
472
473
474 #ifdef CONFIG_MMU
475
476
477
478
479
480
481
482
483
484
485
486
487 static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
488 unsigned long from, unsigned long pfn,
489 unsigned long size, pgprot_t prot)
490 {
491 unsigned long map_size;
492 unsigned long pos_start, pos_end, pos;
493 unsigned long zeropage_pfn = my_zero_pfn(0);
494 size_t len = 0;
495
496 pos_start = pfn;
497 pos_end = pfn + (size >> PAGE_SHIFT);
498
499 for (pos = pos_start; pos < pos_end; ++pos) {
500 if (!pfn_is_ram(pos)) {
501
502
503
504
505
506 if (pos > pos_start) {
507
508 map_size = (pos - pos_start) << PAGE_SHIFT;
509 if (remap_oldmem_pfn_range(vma, from + len,
510 pos_start, map_size,
511 prot))
512 goto fail;
513 len += map_size;
514 }
515
516 if (remap_oldmem_pfn_range(vma, from + len,
517 zeropage_pfn,
518 PAGE_SIZE, prot))
519 goto fail;
520 len += PAGE_SIZE;
521 pos_start = pos + 1;
522 }
523 }
524 if (pos > pos_start) {
525
526 map_size = (pos - pos_start) << PAGE_SHIFT;
527 if (remap_oldmem_pfn_range(vma, from + len, pos_start,
528 map_size, prot))
529 goto fail;
530 }
531 return 0;
532 fail:
533 do_munmap(vma->vm_mm, from, len, NULL);
534 return -EAGAIN;
535 }
536
537 static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
538 unsigned long from, unsigned long pfn,
539 unsigned long size, pgprot_t prot)
540 {
541
542
543
544
545 if (oldmem_pfn_is_ram)
546 return remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
547 else
548 return remap_oldmem_pfn_range(vma, from, pfn, size, prot);
549 }
550
551 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
552 {
553 size_t size = vma->vm_end - vma->vm_start;
554 u64 start, end, len, tsz;
555 struct vmcore *m;
556
557 start = (u64)vma->vm_pgoff << PAGE_SHIFT;
558 end = start + size;
559
560 if (size > vmcore_size || end > vmcore_size)
561 return -EINVAL;
562
563 if (vma->vm_flags & (VM_WRITE | VM_EXEC))
564 return -EPERM;
565
566 vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
567 vma->vm_flags |= VM_MIXEDMAP;
568 vma->vm_ops = &vmcore_mmap_ops;
569
570 len = 0;
571
572 if (start < elfcorebuf_sz) {
573 u64 pfn;
574
575 tsz = min(elfcorebuf_sz - (size_t)start, size);
576 pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
577 if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
578 vma->vm_page_prot))
579 return -EAGAIN;
580 size -= tsz;
581 start += tsz;
582 len += tsz;
583
584 if (size == 0)
585 return 0;
586 }
587
588 if (start < elfcorebuf_sz + elfnotes_sz) {
589 void *kaddr;
590
591
592
593
594
595
596
597
598
599
600
601
602 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
603
604 if (start < elfcorebuf_sz + vmcoredd_orig_sz) {
605 u64 start_off;
606
607 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
608 (size_t)start, size);
609 start_off = start - elfcorebuf_sz;
610 if (vmcoredd_mmap_dumps(vma, vma->vm_start + len,
611 start_off, tsz))
612 goto fail;
613
614 size -= tsz;
615 start += tsz;
616 len += tsz;
617
618
619 if (!size)
620 return 0;
621 }
622 #endif
623
624
625 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
626 kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz;
627 if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
628 kaddr, 0, tsz))
629 goto fail;
630
631 size -= tsz;
632 start += tsz;
633 len += tsz;
634
635 if (size == 0)
636 return 0;
637 }
638
639 list_for_each_entry(m, &vmcore_list, list) {
640 if (start < m->offset + m->size) {
641 u64 paddr = 0;
642
643 tsz = (size_t)min_t(unsigned long long,
644 m->offset + m->size - start, size);
645 paddr = m->paddr + start - m->offset;
646 if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len,
647 paddr >> PAGE_SHIFT, tsz,
648 vma->vm_page_prot))
649 goto fail;
650 size -= tsz;
651 start += tsz;
652 len += tsz;
653
654 if (size == 0)
655 return 0;
656 }
657 }
658
659 return 0;
660 fail:
661 do_munmap(vma->vm_mm, vma->vm_start, len, NULL);
662 return -EAGAIN;
663 }
664 #else
665 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
666 {
667 return -ENOSYS;
668 }
669 #endif
670
671 static const struct file_operations proc_vmcore_operations = {
672 .read = read_vmcore,
673 .llseek = default_llseek,
674 .mmap = mmap_vmcore,
675 };
676
677 static struct vmcore* __init get_new_element(void)
678 {
679 return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
680 }
681
682 static u64 get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
683 struct list_head *vc_list)
684 {
685 u64 size;
686 struct vmcore *m;
687
688 size = elfsz + elfnotesegsz;
689 list_for_each_entry(m, vc_list, list) {
690 size += m->size;
691 }
692 return size;
693 }
694
695
696
697
698
699
700
701
702
703
704 static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
705 {
706 int i, rc=0;
707 Elf64_Phdr *phdr_ptr;
708 Elf64_Nhdr *nhdr_ptr;
709
710 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
711 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
712 void *notes_section;
713 u64 offset, max_sz, sz, real_sz = 0;
714 if (phdr_ptr->p_type != PT_NOTE)
715 continue;
716 max_sz = phdr_ptr->p_memsz;
717 offset = phdr_ptr->p_offset;
718 notes_section = kmalloc(max_sz, GFP_KERNEL);
719 if (!notes_section)
720 return -ENOMEM;
721 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
722 if (rc < 0) {
723 kfree(notes_section);
724 return rc;
725 }
726 nhdr_ptr = notes_section;
727 while (nhdr_ptr->n_namesz != 0) {
728 sz = sizeof(Elf64_Nhdr) +
729 (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
730 (((u64)nhdr_ptr->n_descsz + 3) & ~3);
731 if ((real_sz + sz) > max_sz) {
732 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
733 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
734 break;
735 }
736 real_sz += sz;
737 nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
738 }
739 kfree(notes_section);
740 phdr_ptr->p_memsz = real_sz;
741 if (real_sz == 0) {
742 pr_warn("Warning: Zero PT_NOTE entries found\n");
743 }
744 }
745
746 return 0;
747 }
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767 static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
768 int *nr_ptnote, u64 *sz_ptnote)
769 {
770 int i;
771 Elf64_Phdr *phdr_ptr;
772
773 *nr_ptnote = *sz_ptnote = 0;
774
775 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
776 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
777 if (phdr_ptr->p_type != PT_NOTE)
778 continue;
779 *nr_ptnote += 1;
780 *sz_ptnote += phdr_ptr->p_memsz;
781 }
782
783 return 0;
784 }
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802 static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
803 {
804 int i, rc=0;
805 Elf64_Phdr *phdr_ptr;
806
807 phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
808
809 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
810 u64 offset;
811 if (phdr_ptr->p_type != PT_NOTE)
812 continue;
813 offset = phdr_ptr->p_offset;
814 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
815 &offset);
816 if (rc < 0)
817 return rc;
818 notes_buf += phdr_ptr->p_memsz;
819 }
820
821 return 0;
822 }
823
824
825 static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
826 char **notes_buf, size_t *notes_sz)
827 {
828 int i, nr_ptnote=0, rc=0;
829 char *tmp;
830 Elf64_Ehdr *ehdr_ptr;
831 Elf64_Phdr phdr;
832 u64 phdr_sz = 0, note_off;
833
834 ehdr_ptr = (Elf64_Ehdr *)elfptr;
835
836 rc = update_note_header_size_elf64(ehdr_ptr);
837 if (rc < 0)
838 return rc;
839
840 rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
841 if (rc < 0)
842 return rc;
843
844 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
845 *notes_buf = vmcore_alloc_buf(*notes_sz);
846 if (!*notes_buf)
847 return -ENOMEM;
848
849 rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
850 if (rc < 0)
851 return rc;
852
853
854 phdr.p_type = PT_NOTE;
855 phdr.p_flags = 0;
856 note_off = sizeof(Elf64_Ehdr) +
857 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
858 phdr.p_offset = roundup(note_off, PAGE_SIZE);
859 phdr.p_vaddr = phdr.p_paddr = 0;
860 phdr.p_filesz = phdr.p_memsz = phdr_sz;
861 phdr.p_align = 0;
862
863
864 tmp = elfptr + sizeof(Elf64_Ehdr);
865 memcpy(tmp, &phdr, sizeof(phdr));
866 tmp += sizeof(phdr);
867
868
869 i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
870 *elfsz = *elfsz - i;
871 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
872 memset(elfptr + *elfsz, 0, i);
873 *elfsz = roundup(*elfsz, PAGE_SIZE);
874
875
876 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
877
878
879
880
881 elfnotes_orig_sz = phdr.p_memsz;
882
883 return 0;
884 }
885
886
887
888
889
890
891
892
893
894
895 static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
896 {
897 int i, rc=0;
898 Elf32_Phdr *phdr_ptr;
899 Elf32_Nhdr *nhdr_ptr;
900
901 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
902 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
903 void *notes_section;
904 u64 offset, max_sz, sz, real_sz = 0;
905 if (phdr_ptr->p_type != PT_NOTE)
906 continue;
907 max_sz = phdr_ptr->p_memsz;
908 offset = phdr_ptr->p_offset;
909 notes_section = kmalloc(max_sz, GFP_KERNEL);
910 if (!notes_section)
911 return -ENOMEM;
912 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
913 if (rc < 0) {
914 kfree(notes_section);
915 return rc;
916 }
917 nhdr_ptr = notes_section;
918 while (nhdr_ptr->n_namesz != 0) {
919 sz = sizeof(Elf32_Nhdr) +
920 (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
921 (((u64)nhdr_ptr->n_descsz + 3) & ~3);
922 if ((real_sz + sz) > max_sz) {
923 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
924 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
925 break;
926 }
927 real_sz += sz;
928 nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
929 }
930 kfree(notes_section);
931 phdr_ptr->p_memsz = real_sz;
932 if (real_sz == 0) {
933 pr_warn("Warning: Zero PT_NOTE entries found\n");
934 }
935 }
936
937 return 0;
938 }
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958 static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
959 int *nr_ptnote, u64 *sz_ptnote)
960 {
961 int i;
962 Elf32_Phdr *phdr_ptr;
963
964 *nr_ptnote = *sz_ptnote = 0;
965
966 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
967 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
968 if (phdr_ptr->p_type != PT_NOTE)
969 continue;
970 *nr_ptnote += 1;
971 *sz_ptnote += phdr_ptr->p_memsz;
972 }
973
974 return 0;
975 }
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993 static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
994 {
995 int i, rc=0;
996 Elf32_Phdr *phdr_ptr;
997
998 phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
999
1000 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1001 u64 offset;
1002 if (phdr_ptr->p_type != PT_NOTE)
1003 continue;
1004 offset = phdr_ptr->p_offset;
1005 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
1006 &offset);
1007 if (rc < 0)
1008 return rc;
1009 notes_buf += phdr_ptr->p_memsz;
1010 }
1011
1012 return 0;
1013 }
1014
1015
1016 static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
1017 char **notes_buf, size_t *notes_sz)
1018 {
1019 int i, nr_ptnote=0, rc=0;
1020 char *tmp;
1021 Elf32_Ehdr *ehdr_ptr;
1022 Elf32_Phdr phdr;
1023 u64 phdr_sz = 0, note_off;
1024
1025 ehdr_ptr = (Elf32_Ehdr *)elfptr;
1026
1027 rc = update_note_header_size_elf32(ehdr_ptr);
1028 if (rc < 0)
1029 return rc;
1030
1031 rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
1032 if (rc < 0)
1033 return rc;
1034
1035 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
1036 *notes_buf = vmcore_alloc_buf(*notes_sz);
1037 if (!*notes_buf)
1038 return -ENOMEM;
1039
1040 rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
1041 if (rc < 0)
1042 return rc;
1043
1044
1045 phdr.p_type = PT_NOTE;
1046 phdr.p_flags = 0;
1047 note_off = sizeof(Elf32_Ehdr) +
1048 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
1049 phdr.p_offset = roundup(note_off, PAGE_SIZE);
1050 phdr.p_vaddr = phdr.p_paddr = 0;
1051 phdr.p_filesz = phdr.p_memsz = phdr_sz;
1052 phdr.p_align = 0;
1053
1054
1055 tmp = elfptr + sizeof(Elf32_Ehdr);
1056 memcpy(tmp, &phdr, sizeof(phdr));
1057 tmp += sizeof(phdr);
1058
1059
1060 i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
1061 *elfsz = *elfsz - i;
1062 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
1063 memset(elfptr + *elfsz, 0, i);
1064 *elfsz = roundup(*elfsz, PAGE_SIZE);
1065
1066
1067 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
1068
1069
1070
1071
1072 elfnotes_orig_sz = phdr.p_memsz;
1073
1074 return 0;
1075 }
1076
1077
1078
1079 static int __init process_ptload_program_headers_elf64(char *elfptr,
1080 size_t elfsz,
1081 size_t elfnotes_sz,
1082 struct list_head *vc_list)
1083 {
1084 int i;
1085 Elf64_Ehdr *ehdr_ptr;
1086 Elf64_Phdr *phdr_ptr;
1087 loff_t vmcore_off;
1088 struct vmcore *new;
1089
1090 ehdr_ptr = (Elf64_Ehdr *)elfptr;
1091 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr));
1092
1093
1094 vmcore_off = elfsz + elfnotes_sz;
1095
1096 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1097 u64 paddr, start, end, size;
1098
1099 if (phdr_ptr->p_type != PT_LOAD)
1100 continue;
1101
1102 paddr = phdr_ptr->p_offset;
1103 start = rounddown(paddr, PAGE_SIZE);
1104 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1105 size = end - start;
1106
1107
1108 new = get_new_element();
1109 if (!new)
1110 return -ENOMEM;
1111 new->paddr = start;
1112 new->size = size;
1113 list_add_tail(&new->list, vc_list);
1114
1115
1116 phdr_ptr->p_offset = vmcore_off + (paddr - start);
1117 vmcore_off = vmcore_off + size;
1118 }
1119 return 0;
1120 }
1121
1122 static int __init process_ptload_program_headers_elf32(char *elfptr,
1123 size_t elfsz,
1124 size_t elfnotes_sz,
1125 struct list_head *vc_list)
1126 {
1127 int i;
1128 Elf32_Ehdr *ehdr_ptr;
1129 Elf32_Phdr *phdr_ptr;
1130 loff_t vmcore_off;
1131 struct vmcore *new;
1132
1133 ehdr_ptr = (Elf32_Ehdr *)elfptr;
1134 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr));
1135
1136
1137 vmcore_off = elfsz + elfnotes_sz;
1138
1139 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1140 u64 paddr, start, end, size;
1141
1142 if (phdr_ptr->p_type != PT_LOAD)
1143 continue;
1144
1145 paddr = phdr_ptr->p_offset;
1146 start = rounddown(paddr, PAGE_SIZE);
1147 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1148 size = end - start;
1149
1150
1151 new = get_new_element();
1152 if (!new)
1153 return -ENOMEM;
1154 new->paddr = start;
1155 new->size = size;
1156 list_add_tail(&new->list, vc_list);
1157
1158
1159 phdr_ptr->p_offset = vmcore_off + (paddr - start);
1160 vmcore_off = vmcore_off + size;
1161 }
1162 return 0;
1163 }
1164
1165
1166 static void set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
1167 struct list_head *vc_list)
1168 {
1169 loff_t vmcore_off;
1170 struct vmcore *m;
1171
1172
1173 vmcore_off = elfsz + elfnotes_sz;
1174
1175 list_for_each_entry(m, vc_list, list) {
1176 m->offset = vmcore_off;
1177 vmcore_off += m->size;
1178 }
1179 }
1180
1181 static void free_elfcorebuf(void)
1182 {
1183 free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
1184 elfcorebuf = NULL;
1185 vfree(elfnotes_buf);
1186 elfnotes_buf = NULL;
1187 }
1188
1189 static int __init parse_crash_elf64_headers(void)
1190 {
1191 int rc=0;
1192 Elf64_Ehdr ehdr;
1193 u64 addr;
1194
1195 addr = elfcorehdr_addr;
1196
1197
1198 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
1199 if (rc < 0)
1200 return rc;
1201
1202
1203 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1204 (ehdr.e_type != ET_CORE) ||
1205 !vmcore_elf64_check_arch(&ehdr) ||
1206 ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
1207 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1208 ehdr.e_version != EV_CURRENT ||
1209 ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
1210 ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
1211 ehdr.e_phnum == 0) {
1212 pr_warn("Warning: Core image elf header is not sane\n");
1213 return -EINVAL;
1214 }
1215
1216
1217 elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
1218 ehdr.e_phnum * sizeof(Elf64_Phdr);
1219 elfcorebuf_sz = elfcorebuf_sz_orig;
1220 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1221 get_order(elfcorebuf_sz_orig));
1222 if (!elfcorebuf)
1223 return -ENOMEM;
1224 addr = elfcorehdr_addr;
1225 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1226 if (rc < 0)
1227 goto fail;
1228
1229
1230 rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
1231 &elfnotes_buf, &elfnotes_sz);
1232 if (rc)
1233 goto fail;
1234 rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
1235 elfnotes_sz, &vmcore_list);
1236 if (rc)
1237 goto fail;
1238 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1239 return 0;
1240 fail:
1241 free_elfcorebuf();
1242 return rc;
1243 }
1244
1245 static int __init parse_crash_elf32_headers(void)
1246 {
1247 int rc=0;
1248 Elf32_Ehdr ehdr;
1249 u64 addr;
1250
1251 addr = elfcorehdr_addr;
1252
1253
1254 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
1255 if (rc < 0)
1256 return rc;
1257
1258
1259 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1260 (ehdr.e_type != ET_CORE) ||
1261 !vmcore_elf32_check_arch(&ehdr) ||
1262 ehdr.e_ident[EI_CLASS] != ELFCLASS32||
1263 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1264 ehdr.e_version != EV_CURRENT ||
1265 ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
1266 ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
1267 ehdr.e_phnum == 0) {
1268 pr_warn("Warning: Core image elf header is not sane\n");
1269 return -EINVAL;
1270 }
1271
1272
1273 elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
1274 elfcorebuf_sz = elfcorebuf_sz_orig;
1275 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1276 get_order(elfcorebuf_sz_orig));
1277 if (!elfcorebuf)
1278 return -ENOMEM;
1279 addr = elfcorehdr_addr;
1280 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1281 if (rc < 0)
1282 goto fail;
1283
1284
1285 rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
1286 &elfnotes_buf, &elfnotes_sz);
1287 if (rc)
1288 goto fail;
1289 rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
1290 elfnotes_sz, &vmcore_list);
1291 if (rc)
1292 goto fail;
1293 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1294 return 0;
1295 fail:
1296 free_elfcorebuf();
1297 return rc;
1298 }
1299
1300 static int __init parse_crash_elf_headers(void)
1301 {
1302 unsigned char e_ident[EI_NIDENT];
1303 u64 addr;
1304 int rc=0;
1305
1306 addr = elfcorehdr_addr;
1307 rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
1308 if (rc < 0)
1309 return rc;
1310 if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
1311 pr_warn("Warning: Core image elf header not found\n");
1312 return -EINVAL;
1313 }
1314
1315 if (e_ident[EI_CLASS] == ELFCLASS64) {
1316 rc = parse_crash_elf64_headers();
1317 if (rc)
1318 return rc;
1319 } else if (e_ident[EI_CLASS] == ELFCLASS32) {
1320 rc = parse_crash_elf32_headers();
1321 if (rc)
1322 return rc;
1323 } else {
1324 pr_warn("Warning: Core image elf header is not sane\n");
1325 return -EINVAL;
1326 }
1327
1328
1329 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1330 &vmcore_list);
1331
1332 return 0;
1333 }
1334
1335 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345 static void vmcoredd_write_header(void *buf, struct vmcoredd_data *data,
1346 u32 size)
1347 {
1348 struct vmcoredd_header *vdd_hdr = (struct vmcoredd_header *)buf;
1349
1350 vdd_hdr->n_namesz = sizeof(vdd_hdr->name);
1351 vdd_hdr->n_descsz = size + sizeof(vdd_hdr->dump_name);
1352 vdd_hdr->n_type = NT_VMCOREDD;
1353
1354 strncpy((char *)vdd_hdr->name, VMCOREDD_NOTE_NAME,
1355 sizeof(vdd_hdr->name));
1356 memcpy(vdd_hdr->dump_name, data->dump_name, sizeof(vdd_hdr->dump_name));
1357 }
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368 static void vmcoredd_update_program_headers(char *elfptr, size_t elfnotesz,
1369 size_t vmcoreddsz)
1370 {
1371 unsigned char *e_ident = (unsigned char *)elfptr;
1372 u64 start, end, size;
1373 loff_t vmcore_off;
1374 u32 i;
1375
1376 vmcore_off = elfcorebuf_sz + elfnotesz;
1377
1378 if (e_ident[EI_CLASS] == ELFCLASS64) {
1379 Elf64_Ehdr *ehdr = (Elf64_Ehdr *)elfptr;
1380 Elf64_Phdr *phdr = (Elf64_Phdr *)(elfptr + sizeof(Elf64_Ehdr));
1381
1382
1383 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1384 if (phdr->p_type == PT_NOTE) {
1385
1386 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1387 phdr->p_filesz = phdr->p_memsz;
1388 continue;
1389 }
1390
1391 start = rounddown(phdr->p_offset, PAGE_SIZE);
1392 end = roundup(phdr->p_offset + phdr->p_memsz,
1393 PAGE_SIZE);
1394 size = end - start;
1395 phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1396 vmcore_off += size;
1397 }
1398 } else {
1399 Elf32_Ehdr *ehdr = (Elf32_Ehdr *)elfptr;
1400 Elf32_Phdr *phdr = (Elf32_Phdr *)(elfptr + sizeof(Elf32_Ehdr));
1401
1402
1403 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1404 if (phdr->p_type == PT_NOTE) {
1405
1406 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1407 phdr->p_filesz = phdr->p_memsz;
1408 continue;
1409 }
1410
1411 start = rounddown(phdr->p_offset, PAGE_SIZE);
1412 end = roundup(phdr->p_offset + phdr->p_memsz,
1413 PAGE_SIZE);
1414 size = end - start;
1415 phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1416 vmcore_off += size;
1417 }
1418 }
1419 }
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430 static void vmcoredd_update_size(size_t dump_size)
1431 {
1432 vmcoredd_orig_sz += dump_size;
1433 elfnotes_sz = roundup(elfnotes_orig_sz, PAGE_SIZE) + vmcoredd_orig_sz;
1434 vmcoredd_update_program_headers(elfcorebuf, elfnotes_sz,
1435 vmcoredd_orig_sz);
1436
1437
1438 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1439
1440 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1441 &vmcore_list);
1442 proc_vmcore->size = vmcore_size;
1443 }
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453 int vmcore_add_device_dump(struct vmcoredd_data *data)
1454 {
1455 struct vmcoredd_node *dump;
1456 void *buf = NULL;
1457 size_t data_size;
1458 int ret;
1459
1460 if (vmcoredd_disabled) {
1461 pr_err_once("Device dump is disabled\n");
1462 return -EINVAL;
1463 }
1464
1465 if (!data || !strlen(data->dump_name) ||
1466 !data->vmcoredd_callback || !data->size)
1467 return -EINVAL;
1468
1469 dump = vzalloc(sizeof(*dump));
1470 if (!dump) {
1471 ret = -ENOMEM;
1472 goto out_err;
1473 }
1474
1475
1476 data_size = roundup(sizeof(struct vmcoredd_header) + data->size,
1477 PAGE_SIZE);
1478
1479
1480 buf = vmcore_alloc_buf(data_size);
1481 if (!buf) {
1482 ret = -ENOMEM;
1483 goto out_err;
1484 }
1485
1486 vmcoredd_write_header(buf, data, data_size -
1487 sizeof(struct vmcoredd_header));
1488
1489
1490 ret = data->vmcoredd_callback(data, buf +
1491 sizeof(struct vmcoredd_header));
1492 if (ret)
1493 goto out_err;
1494
1495 dump->buf = buf;
1496 dump->size = data_size;
1497
1498
1499 mutex_lock(&vmcoredd_mutex);
1500 list_add_tail(&dump->list, &vmcoredd_list);
1501 mutex_unlock(&vmcoredd_mutex);
1502
1503 vmcoredd_update_size(data_size);
1504 return 0;
1505
1506 out_err:
1507 if (buf)
1508 vfree(buf);
1509
1510 if (dump)
1511 vfree(dump);
1512
1513 return ret;
1514 }
1515 EXPORT_SYMBOL(vmcore_add_device_dump);
1516 #endif
1517
1518
1519 static void vmcore_free_device_dumps(void)
1520 {
1521 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1522 mutex_lock(&vmcoredd_mutex);
1523 while (!list_empty(&vmcoredd_list)) {
1524 struct vmcoredd_node *dump;
1525
1526 dump = list_first_entry(&vmcoredd_list, struct vmcoredd_node,
1527 list);
1528 list_del(&dump->list);
1529 vfree(dump->buf);
1530 vfree(dump);
1531 }
1532 mutex_unlock(&vmcoredd_mutex);
1533 #endif
1534 }
1535
1536
1537 static int __init vmcore_init(void)
1538 {
1539 int rc = 0;
1540
1541
1542 rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
1543 if (rc)
1544 return rc;
1545
1546
1547
1548
1549 if (!(is_vmcore_usable()))
1550 return rc;
1551 rc = parse_crash_elf_headers();
1552 if (rc) {
1553 pr_warn("Kdump: vmcore not initialized\n");
1554 return rc;
1555 }
1556 elfcorehdr_free(elfcorehdr_addr);
1557 elfcorehdr_addr = ELFCORE_ADDR_ERR;
1558
1559 proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations);
1560 if (proc_vmcore)
1561 proc_vmcore->size = vmcore_size;
1562 return 0;
1563 }
1564 fs_initcall(vmcore_init);
1565
1566
1567 void vmcore_cleanup(void)
1568 {
1569 if (proc_vmcore) {
1570 proc_remove(proc_vmcore);
1571 proc_vmcore = NULL;
1572 }
1573
1574
1575 while (!list_empty(&vmcore_list)) {
1576 struct vmcore *m;
1577
1578 m = list_first_entry(&vmcore_list, struct vmcore, list);
1579 list_del(&m->list);
1580 kfree(m);
1581 }
1582 free_elfcorebuf();
1583
1584
1585 vmcore_free_device_dumps();
1586 }