This source file includes following definitions.
- mem_region_callback
- map_acpi_tables
- map_acpi_tables
- map_efi_systab
- free_transition_pgtable
- init_transition_pgtable
- alloc_pgt_page
- init_pgtable
- set_idt
- set_gdt
- load_segments
- arch_update_purgatory
- arch_update_purgatory
- machine_kexec_prepare
- machine_kexec_cleanup
- machine_kexec
- arch_crash_save_vmcoreinfo
- arch_kexec_kernel_image_load
- arch_kexec_apply_relocations_add
- kexec_mark_range
- kexec_mark_crashkres
- arch_kexec_protect_crashkres
- arch_kexec_unprotect_crashkres
- arch_kexec_post_alloc_pages
- arch_kexec_pre_free_pages
1
2
3
4
5
6
7 #define pr_fmt(fmt) "kexec: " fmt
8
9 #include <linux/mm.h>
10 #include <linux/kexec.h>
11 #include <linux/string.h>
12 #include <linux/gfp.h>
13 #include <linux/reboot.h>
14 #include <linux/numa.h>
15 #include <linux/ftrace.h>
16 #include <linux/io.h>
17 #include <linux/suspend.h>
18 #include <linux/vmalloc.h>
19 #include <linux/efi.h>
20
21 #include <asm/init.h>
22 #include <asm/pgtable.h>
23 #include <asm/tlbflush.h>
24 #include <asm/mmu_context.h>
25 #include <asm/io_apic.h>
26 #include <asm/debugreg.h>
27 #include <asm/kexec-bzimage64.h>
28 #include <asm/setup.h>
29 #include <asm/set_memory.h>
30
31 #ifdef CONFIG_ACPI
32
33
34
35
36 struct init_pgtable_data {
37 struct x86_mapping_info *info;
38 pgd_t *level4p;
39 };
40
41 static int mem_region_callback(struct resource *res, void *arg)
42 {
43 struct init_pgtable_data *data = arg;
44 unsigned long mstart, mend;
45
46 mstart = res->start;
47 mend = mstart + resource_size(res) - 1;
48
49 return kernel_ident_mapping_init(data->info, data->level4p, mstart, mend);
50 }
51
52 static int
53 map_acpi_tables(struct x86_mapping_info *info, pgd_t *level4p)
54 {
55 struct init_pgtable_data data;
56 unsigned long flags;
57 int ret;
58
59 data.info = info;
60 data.level4p = level4p;
61 flags = IORESOURCE_MEM | IORESOURCE_BUSY;
62
63 ret = walk_iomem_res_desc(IORES_DESC_ACPI_TABLES, flags, 0, -1,
64 &data, mem_region_callback);
65 if (ret && ret != -EINVAL)
66 return ret;
67
68
69 ret = walk_iomem_res_desc(IORES_DESC_ACPI_NV_STORAGE, flags, 0, -1,
70 &data, mem_region_callback);
71 if (ret && ret != -EINVAL)
72 return ret;
73
74 return 0;
75 }
76 #else
77 static int map_acpi_tables(struct x86_mapping_info *info, pgd_t *level4p) { return 0; }
78 #endif
79
80 #ifdef CONFIG_KEXEC_FILE
81 const struct kexec_file_ops * const kexec_file_loaders[] = {
82 &kexec_bzImage64_ops,
83 NULL
84 };
85 #endif
86
87 static int
88 map_efi_systab(struct x86_mapping_info *info, pgd_t *level4p)
89 {
90 #ifdef CONFIG_EFI
91 unsigned long mstart, mend;
92
93 if (!efi_enabled(EFI_BOOT))
94 return 0;
95
96 mstart = (boot_params.efi_info.efi_systab |
97 ((u64)boot_params.efi_info.efi_systab_hi<<32));
98
99 if (efi_enabled(EFI_64BIT))
100 mend = mstart + sizeof(efi_system_table_64_t);
101 else
102 mend = mstart + sizeof(efi_system_table_32_t);
103
104 if (!mstart)
105 return 0;
106
107 return kernel_ident_mapping_init(info, level4p, mstart, mend);
108 #endif
109 return 0;
110 }
111
112 static void free_transition_pgtable(struct kimage *image)
113 {
114 free_page((unsigned long)image->arch.p4d);
115 image->arch.p4d = NULL;
116 free_page((unsigned long)image->arch.pud);
117 image->arch.pud = NULL;
118 free_page((unsigned long)image->arch.pmd);
119 image->arch.pmd = NULL;
120 free_page((unsigned long)image->arch.pte);
121 image->arch.pte = NULL;
122 }
123
124 static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
125 {
126 pgprot_t prot = PAGE_KERNEL_EXEC_NOENC;
127 unsigned long vaddr, paddr;
128 int result = -ENOMEM;
129 p4d_t *p4d;
130 pud_t *pud;
131 pmd_t *pmd;
132 pte_t *pte;
133
134 vaddr = (unsigned long)relocate_kernel;
135 paddr = __pa(page_address(image->control_code_page)+PAGE_SIZE);
136 pgd += pgd_index(vaddr);
137 if (!pgd_present(*pgd)) {
138 p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL);
139 if (!p4d)
140 goto err;
141 image->arch.p4d = p4d;
142 set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE));
143 }
144 p4d = p4d_offset(pgd, vaddr);
145 if (!p4d_present(*p4d)) {
146 pud = (pud_t *)get_zeroed_page(GFP_KERNEL);
147 if (!pud)
148 goto err;
149 image->arch.pud = pud;
150 set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE));
151 }
152 pud = pud_offset(p4d, vaddr);
153 if (!pud_present(*pud)) {
154 pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
155 if (!pmd)
156 goto err;
157 image->arch.pmd = pmd;
158 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
159 }
160 pmd = pmd_offset(pud, vaddr);
161 if (!pmd_present(*pmd)) {
162 pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
163 if (!pte)
164 goto err;
165 image->arch.pte = pte;
166 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
167 }
168 pte = pte_offset_kernel(pmd, vaddr);
169
170 if (sev_active())
171 prot = PAGE_KERNEL_EXEC;
172
173 set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, prot));
174 return 0;
175 err:
176 return result;
177 }
178
179 static void *alloc_pgt_page(void *data)
180 {
181 struct kimage *image = (struct kimage *)data;
182 struct page *page;
183 void *p = NULL;
184
185 page = kimage_alloc_control_pages(image, 0);
186 if (page) {
187 p = page_address(page);
188 clear_page(p);
189 }
190
191 return p;
192 }
193
194 static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
195 {
196 struct x86_mapping_info info = {
197 .alloc_pgt_page = alloc_pgt_page,
198 .context = image,
199 .page_flag = __PAGE_KERNEL_LARGE_EXEC,
200 .kernpg_flag = _KERNPG_TABLE_NOENC,
201 };
202 unsigned long mstart, mend;
203 pgd_t *level4p;
204 int result;
205 int i;
206
207 level4p = (pgd_t *)__va(start_pgtable);
208 clear_page(level4p);
209
210 if (sev_active()) {
211 info.page_flag |= _PAGE_ENC;
212 info.kernpg_flag |= _PAGE_ENC;
213 }
214
215 if (direct_gbpages)
216 info.direct_gbpages = true;
217
218 for (i = 0; i < nr_pfn_mapped; i++) {
219 mstart = pfn_mapped[i].start << PAGE_SHIFT;
220 mend = pfn_mapped[i].end << PAGE_SHIFT;
221
222 result = kernel_ident_mapping_init(&info,
223 level4p, mstart, mend);
224 if (result)
225 return result;
226 }
227
228
229
230
231
232
233
234 for (i = 0; i < image->nr_segments; i++) {
235 mstart = image->segment[i].mem;
236 mend = mstart + image->segment[i].memsz;
237
238 result = kernel_ident_mapping_init(&info,
239 level4p, mstart, mend);
240
241 if (result)
242 return result;
243 }
244
245
246
247
248
249 result = map_efi_systab(&info, level4p);
250 if (result)
251 return result;
252
253 result = map_acpi_tables(&info, level4p);
254 if (result)
255 return result;
256
257 return init_transition_pgtable(image, level4p);
258 }
259
260 static void set_idt(void *newidt, u16 limit)
261 {
262 struct desc_ptr curidt;
263
264
265 curidt.size = limit;
266 curidt.address = (unsigned long)newidt;
267
268 __asm__ __volatile__ (
269 "lidtq %0\n"
270 : : "m" (curidt)
271 );
272 };
273
274
275 static void set_gdt(void *newgdt, u16 limit)
276 {
277 struct desc_ptr curgdt;
278
279
280 curgdt.size = limit;
281 curgdt.address = (unsigned long)newgdt;
282
283 __asm__ __volatile__ (
284 "lgdtq %0\n"
285 : : "m" (curgdt)
286 );
287 };
288
289 static void load_segments(void)
290 {
291 __asm__ __volatile__ (
292 "\tmovl %0,%%ds\n"
293 "\tmovl %0,%%es\n"
294 "\tmovl %0,%%ss\n"
295 "\tmovl %0,%%fs\n"
296 "\tmovl %0,%%gs\n"
297 : : "a" (__KERNEL_DS) : "memory"
298 );
299 }
300
301 #ifdef CONFIG_KEXEC_FILE
302
303 static int arch_update_purgatory(struct kimage *image)
304 {
305 int ret = 0;
306
307 if (!image->file_mode)
308 return 0;
309
310
311 if (image->type == KEXEC_TYPE_CRASH) {
312 ret = kexec_purgatory_get_set_symbol(image,
313 "purgatory_backup_dest",
314 &image->arch.backup_load_addr,
315 sizeof(image->arch.backup_load_addr), 0);
316 if (ret)
317 return ret;
318
319 ret = kexec_purgatory_get_set_symbol(image,
320 "purgatory_backup_src",
321 &image->arch.backup_src_start,
322 sizeof(image->arch.backup_src_start), 0);
323 if (ret)
324 return ret;
325
326 ret = kexec_purgatory_get_set_symbol(image,
327 "purgatory_backup_sz",
328 &image->arch.backup_src_sz,
329 sizeof(image->arch.backup_src_sz), 0);
330 if (ret)
331 return ret;
332 }
333
334 return ret;
335 }
336 #else
337 static inline int arch_update_purgatory(struct kimage *image)
338 {
339 return 0;
340 }
341 #endif
342
343 int machine_kexec_prepare(struct kimage *image)
344 {
345 unsigned long start_pgtable;
346 int result;
347
348
349 start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
350
351
352 result = init_pgtable(image, start_pgtable);
353 if (result)
354 return result;
355
356
357 result = arch_update_purgatory(image);
358 if (result)
359 return result;
360
361 return 0;
362 }
363
364 void machine_kexec_cleanup(struct kimage *image)
365 {
366 free_transition_pgtable(image);
367 }
368
369
370
371
372
373 void machine_kexec(struct kimage *image)
374 {
375 unsigned long page_list[PAGES_NR];
376 void *control_page;
377 int save_ftrace_enabled;
378
379 #ifdef CONFIG_KEXEC_JUMP
380 if (image->preserve_context)
381 save_processor_state();
382 #endif
383
384 save_ftrace_enabled = __ftrace_enabled_save();
385
386
387 local_irq_disable();
388 hw_breakpoint_disable();
389
390 if (image->preserve_context) {
391 #ifdef CONFIG_X86_IO_APIC
392
393
394
395
396
397
398 clear_IO_APIC();
399 restore_boot_irq_mode();
400 #endif
401 }
402
403 control_page = page_address(image->control_code_page) + PAGE_SIZE;
404 memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
405
406 page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page);
407 page_list[VA_CONTROL_PAGE] = (unsigned long)control_page;
408 page_list[PA_TABLE_PAGE] =
409 (unsigned long)__pa(page_address(image->control_code_page));
410
411 if (image->type == KEXEC_TYPE_DEFAULT)
412 page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page)
413 << PAGE_SHIFT);
414
415
416
417
418
419
420
421
422
423
424
425 load_segments();
426
427
428
429
430 set_gdt(phys_to_virt(0), 0);
431 set_idt(phys_to_virt(0), 0);
432
433
434 image->start = relocate_kernel((unsigned long)image->head,
435 (unsigned long)page_list,
436 image->start,
437 image->preserve_context,
438 sme_active());
439
440 #ifdef CONFIG_KEXEC_JUMP
441 if (image->preserve_context)
442 restore_processor_state();
443 #endif
444
445 __ftrace_enabled_restore(save_ftrace_enabled);
446 }
447
448 void arch_crash_save_vmcoreinfo(void)
449 {
450 u64 sme_mask = sme_me_mask;
451
452 VMCOREINFO_NUMBER(phys_base);
453 VMCOREINFO_SYMBOL(init_top_pgt);
454 vmcoreinfo_append_str("NUMBER(pgtable_l5_enabled)=%d\n",
455 pgtable_l5_enabled());
456
457 #ifdef CONFIG_NUMA
458 VMCOREINFO_SYMBOL(node_data);
459 VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
460 #endif
461 vmcoreinfo_append_str("KERNELOFFSET=%lx\n",
462 kaslr_offset());
463 VMCOREINFO_NUMBER(KERNEL_IMAGE_SIZE);
464 VMCOREINFO_NUMBER(sme_mask);
465 }
466
467
468
469 #ifdef CONFIG_KEXEC_FILE
470 void *arch_kexec_kernel_image_load(struct kimage *image)
471 {
472 vfree(image->arch.elf_headers);
473 image->arch.elf_headers = NULL;
474
475 if (!image->fops || !image->fops->load)
476 return ERR_PTR(-ENOEXEC);
477
478 return image->fops->load(image, image->kernel_buf,
479 image->kernel_buf_len, image->initrd_buf,
480 image->initrd_buf_len, image->cmdline_buf,
481 image->cmdline_buf_len);
482 }
483
484
485
486
487
488
489
490
491
492
493
494 int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
495 Elf_Shdr *section, const Elf_Shdr *relsec,
496 const Elf_Shdr *symtabsec)
497 {
498 unsigned int i;
499 Elf64_Rela *rel;
500 Elf64_Sym *sym;
501 void *location;
502 unsigned long address, sec_base, value;
503 const char *strtab, *name, *shstrtab;
504 const Elf_Shdr *sechdrs;
505
506
507 sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff;
508 strtab = (char *)pi->ehdr + sechdrs[symtabsec->sh_link].sh_offset;
509 shstrtab = (char *)pi->ehdr + sechdrs[pi->ehdr->e_shstrndx].sh_offset;
510
511 rel = (void *)pi->ehdr + relsec->sh_offset;
512
513 pr_debug("Applying relocate section %s to %u\n",
514 shstrtab + relsec->sh_name, relsec->sh_info);
515
516 for (i = 0; i < relsec->sh_size / sizeof(*rel); i++) {
517
518
519
520
521
522
523
524
525
526
527
528 location = pi->purgatory_buf;
529 location += section->sh_offset;
530 location += rel[i].r_offset;
531
532
533 address = section->sh_addr + rel[i].r_offset;
534
535
536
537
538
539
540
541 sym = (void *)pi->ehdr + symtabsec->sh_offset;
542 sym += ELF64_R_SYM(rel[i].r_info);
543
544 if (sym->st_name)
545 name = strtab + sym->st_name;
546 else
547 name = shstrtab + sechdrs[sym->st_shndx].sh_name;
548
549 pr_debug("Symbol: %s info: %02x shndx: %02x value=%llx size: %llx\n",
550 name, sym->st_info, sym->st_shndx, sym->st_value,
551 sym->st_size);
552
553 if (sym->st_shndx == SHN_UNDEF) {
554 pr_err("Undefined symbol: %s\n", name);
555 return -ENOEXEC;
556 }
557
558 if (sym->st_shndx == SHN_COMMON) {
559 pr_err("symbol '%s' in common section\n", name);
560 return -ENOEXEC;
561 }
562
563 if (sym->st_shndx == SHN_ABS)
564 sec_base = 0;
565 else if (sym->st_shndx >= pi->ehdr->e_shnum) {
566 pr_err("Invalid section %d for symbol %s\n",
567 sym->st_shndx, name);
568 return -ENOEXEC;
569 } else
570 sec_base = pi->sechdrs[sym->st_shndx].sh_addr;
571
572 value = sym->st_value;
573 value += sec_base;
574 value += rel[i].r_addend;
575
576 switch (ELF64_R_TYPE(rel[i].r_info)) {
577 case R_X86_64_NONE:
578 break;
579 case R_X86_64_64:
580 *(u64 *)location = value;
581 break;
582 case R_X86_64_32:
583 *(u32 *)location = value;
584 if (value != *(u32 *)location)
585 goto overflow;
586 break;
587 case R_X86_64_32S:
588 *(s32 *)location = value;
589 if ((s64)value != *(s32 *)location)
590 goto overflow;
591 break;
592 case R_X86_64_PC32:
593 case R_X86_64_PLT32:
594 value -= (u64)address;
595 *(u32 *)location = value;
596 break;
597 default:
598 pr_err("Unknown rela relocation: %llu\n",
599 ELF64_R_TYPE(rel[i].r_info));
600 return -ENOEXEC;
601 }
602 }
603 return 0;
604
605 overflow:
606 pr_err("Overflow in relocation type %d value 0x%lx\n",
607 (int)ELF64_R_TYPE(rel[i].r_info), value);
608 return -ENOEXEC;
609 }
610 #endif
611
612 static int
613 kexec_mark_range(unsigned long start, unsigned long end, bool protect)
614 {
615 struct page *page;
616 unsigned int nr_pages;
617
618
619
620
621
622 if (!end || start > end)
623 return 0;
624
625 page = pfn_to_page(start >> PAGE_SHIFT);
626 nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
627 if (protect)
628 return set_pages_ro(page, nr_pages);
629 else
630 return set_pages_rw(page, nr_pages);
631 }
632
633 static void kexec_mark_crashkres(bool protect)
634 {
635 unsigned long control;
636
637 kexec_mark_range(crashk_low_res.start, crashk_low_res.end, protect);
638
639
640 control = PFN_PHYS(page_to_pfn(kexec_crash_image->control_code_page));
641
642 kexec_mark_range(crashk_res.start, control + PAGE_SIZE - 1, protect);
643 control += KEXEC_CONTROL_PAGE_SIZE;
644 kexec_mark_range(control, crashk_res.end, protect);
645 }
646
647 void arch_kexec_protect_crashkres(void)
648 {
649 kexec_mark_crashkres(true);
650 }
651
652 void arch_kexec_unprotect_crashkres(void)
653 {
654 kexec_mark_crashkres(false);
655 }
656
657
658
659
660
661
662
663
664
665
666 int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, gfp_t gfp)
667 {
668 if (sev_active())
669 return 0;
670
671
672
673
674
675
676 return set_memory_decrypted((unsigned long)vaddr, pages);
677 }
678
679 void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages)
680 {
681 if (sev_active())
682 return;
683
684
685
686
687
688 set_memory_encrypted((unsigned long)vaddr, pages);
689 }