root/arch/x86/platform/efi/efi.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. setup_add_efi_memmap
  2. phys_efi_set_virtual_address_map
  3. efi_find_mirror
  4. do_add_efi_memmap
  5. efi_memblock_x86_reserve_range
  6. efi_memmap_entry_valid
  7. efi_clean_memmap
  8. efi_print_memmap
  9. efi_systab_init
  10. efi_runtime_init32
  11. efi_runtime_init64
  12. efi_runtime_init
  13. efi_init
  14. efi_set_executable
  15. runtime_code_page_mkexec
  16. efi_memory_uc
  17. old_map_region
  18. efi_merge_regions
  19. get_systab_virt_addr
  20. realloc_pages
  21. efi_map_next_entry_reverse
  22. efi_map_next_entry
  23. should_map_region
  24. efi_map_regions
  25. kexec_enter_virtual_mode
  26. __efi_enter_virtual_mode
  27. efi_enter_virtual_mode
  28. arch_parse_efi_cmdline
  29. efi_is_table_address

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Common EFI (Extensible Firmware Interface) support functions
   4  * Based on Extensible Firmware Interface Specification version 1.0
   5  *
   6  * Copyright (C) 1999 VA Linux Systems
   7  * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
   8  * Copyright (C) 1999-2002 Hewlett-Packard Co.
   9  *      David Mosberger-Tang <davidm@hpl.hp.com>
  10  *      Stephane Eranian <eranian@hpl.hp.com>
  11  * Copyright (C) 2005-2008 Intel Co.
  12  *      Fenghua Yu <fenghua.yu@intel.com>
  13  *      Bibo Mao <bibo.mao@intel.com>
  14  *      Chandramouli Narayanan <mouli@linux.intel.com>
  15  *      Huang Ying <ying.huang@intel.com>
  16  * Copyright (C) 2013 SuSE Labs
  17  *      Borislav Petkov <bp@suse.de> - runtime services VA mapping
  18  *
  19  * Copied from efi_32.c to eliminate the duplicated code between EFI
  20  * 32/64 support code. --ying 2007-10-26
  21  *
  22  * All EFI Runtime Services are not implemented yet as EFI only
  23  * supports physical mode addressing on SoftSDV. This is to be fixed
  24  * in a future version.  --drummond 1999-07-20
  25  *
  26  * Implemented EFI runtime services and virtual mode calls.  --davidm
  27  *
  28  * Goutham Rao: <goutham.rao@intel.com>
  29  *      Skip non-WB memory and ignore empty memory ranges.
  30  */
  31 
  32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  33 
  34 #include <linux/kernel.h>
  35 #include <linux/init.h>
  36 #include <linux/efi.h>
  37 #include <linux/efi-bgrt.h>
  38 #include <linux/export.h>
  39 #include <linux/memblock.h>
  40 #include <linux/slab.h>
  41 #include <linux/spinlock.h>
  42 #include <linux/uaccess.h>
  43 #include <linux/time.h>
  44 #include <linux/io.h>
  45 #include <linux/reboot.h>
  46 #include <linux/bcd.h>
  47 
  48 #include <asm/setup.h>
  49 #include <asm/efi.h>
  50 #include <asm/e820/api.h>
  51 #include <asm/time.h>
  52 #include <asm/set_memory.h>
  53 #include <asm/tlbflush.h>
  54 #include <asm/x86_init.h>
  55 #include <asm/uv/uv.h>
  56 
  57 static struct efi efi_phys __initdata;
  58 static efi_system_table_t efi_systab __initdata;
  59 
  60 static efi_config_table_type_t arch_tables[] __initdata = {
  61 #ifdef CONFIG_X86_UV
  62         {UV_SYSTEM_TABLE_GUID, "UVsystab", &uv_systab_phys},
  63 #endif
  64         {NULL_GUID, NULL, NULL},
  65 };
  66 
  67 static const unsigned long * const efi_tables[] = {
  68         &efi.mps,
  69         &efi.acpi,
  70         &efi.acpi20,
  71         &efi.smbios,
  72         &efi.smbios3,
  73         &efi.boot_info,
  74         &efi.hcdp,
  75         &efi.uga,
  76 #ifdef CONFIG_X86_UV
  77         &uv_systab_phys,
  78 #endif
  79         &efi.fw_vendor,
  80         &efi.runtime,
  81         &efi.config_table,
  82         &efi.esrt,
  83         &efi.properties_table,
  84         &efi.mem_attr_table,
  85 #ifdef CONFIG_EFI_RCI2_TABLE
  86         &rci2_table_phys,
  87 #endif
  88         &efi.tpm_log,
  89         &efi.tpm_final_log,
  90 };
  91 
  92 u64 efi_setup;          /* efi setup_data physical address */
  93 
  94 static int add_efi_memmap __initdata;
  95 static int __init setup_add_efi_memmap(char *arg)
  96 {
  97         add_efi_memmap = 1;
  98         return 0;
  99 }
 100 early_param("add_efi_memmap", setup_add_efi_memmap);
 101 
 102 static efi_status_t __init phys_efi_set_virtual_address_map(
 103         unsigned long memory_map_size,
 104         unsigned long descriptor_size,
 105         u32 descriptor_version,
 106         efi_memory_desc_t *virtual_map)
 107 {
 108         efi_status_t status;
 109         unsigned long flags;
 110         pgd_t *save_pgd;
 111 
 112         save_pgd = efi_call_phys_prolog();
 113         if (!save_pgd)
 114                 return EFI_ABORTED;
 115 
 116         /* Disable interrupts around EFI calls: */
 117         local_irq_save(flags);
 118         status = efi_call_phys(efi_phys.set_virtual_address_map,
 119                                memory_map_size, descriptor_size,
 120                                descriptor_version, virtual_map);
 121         local_irq_restore(flags);
 122 
 123         efi_call_phys_epilog(save_pgd);
 124 
 125         return status;
 126 }
 127 
 128 void __init efi_find_mirror(void)
 129 {
 130         efi_memory_desc_t *md;
 131         u64 mirror_size = 0, total_size = 0;
 132 
 133         for_each_efi_memory_desc(md) {
 134                 unsigned long long start = md->phys_addr;
 135                 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
 136 
 137                 total_size += size;
 138                 if (md->attribute & EFI_MEMORY_MORE_RELIABLE) {
 139                         memblock_mark_mirror(start, size);
 140                         mirror_size += size;
 141                 }
 142         }
 143         if (mirror_size)
 144                 pr_info("Memory: %lldM/%lldM mirrored memory\n",
 145                         mirror_size>>20, total_size>>20);
 146 }
 147 
 148 /*
 149  * Tell the kernel about the EFI memory map.  This might include
 150  * more than the max 128 entries that can fit in the e820 legacy
 151  * (zeropage) memory map.
 152  */
 153 
 154 static void __init do_add_efi_memmap(void)
 155 {
 156         efi_memory_desc_t *md;
 157 
 158         for_each_efi_memory_desc(md) {
 159                 unsigned long long start = md->phys_addr;
 160                 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
 161                 int e820_type;
 162 
 163                 switch (md->type) {
 164                 case EFI_LOADER_CODE:
 165                 case EFI_LOADER_DATA:
 166                 case EFI_BOOT_SERVICES_CODE:
 167                 case EFI_BOOT_SERVICES_DATA:
 168                 case EFI_CONVENTIONAL_MEMORY:
 169                         if (md->attribute & EFI_MEMORY_WB)
 170                                 e820_type = E820_TYPE_RAM;
 171                         else
 172                                 e820_type = E820_TYPE_RESERVED;
 173                         break;
 174                 case EFI_ACPI_RECLAIM_MEMORY:
 175                         e820_type = E820_TYPE_ACPI;
 176                         break;
 177                 case EFI_ACPI_MEMORY_NVS:
 178                         e820_type = E820_TYPE_NVS;
 179                         break;
 180                 case EFI_UNUSABLE_MEMORY:
 181                         e820_type = E820_TYPE_UNUSABLE;
 182                         break;
 183                 case EFI_PERSISTENT_MEMORY:
 184                         e820_type = E820_TYPE_PMEM;
 185                         break;
 186                 default:
 187                         /*
 188                          * EFI_RESERVED_TYPE EFI_RUNTIME_SERVICES_CODE
 189                          * EFI_RUNTIME_SERVICES_DATA EFI_MEMORY_MAPPED_IO
 190                          * EFI_MEMORY_MAPPED_IO_PORT_SPACE EFI_PAL_CODE
 191                          */
 192                         e820_type = E820_TYPE_RESERVED;
 193                         break;
 194                 }
 195                 e820__range_add(start, size, e820_type);
 196         }
 197         e820__update_table(e820_table);
 198 }
 199 
 200 int __init efi_memblock_x86_reserve_range(void)
 201 {
 202         struct efi_info *e = &boot_params.efi_info;
 203         struct efi_memory_map_data data;
 204         phys_addr_t pmap;
 205         int rv;
 206 
 207         if (efi_enabled(EFI_PARAVIRT))
 208                 return 0;
 209 
 210 #ifdef CONFIG_X86_32
 211         /* Can't handle data above 4GB at this time */
 212         if (e->efi_memmap_hi) {
 213                 pr_err("Memory map is above 4GB, disabling EFI.\n");
 214                 return -EINVAL;
 215         }
 216         pmap =  e->efi_memmap;
 217 #else
 218         pmap = (e->efi_memmap | ((__u64)e->efi_memmap_hi << 32));
 219 #endif
 220         data.phys_map           = pmap;
 221         data.size               = e->efi_memmap_size;
 222         data.desc_size          = e->efi_memdesc_size;
 223         data.desc_version       = e->efi_memdesc_version;
 224 
 225         rv = efi_memmap_init_early(&data);
 226         if (rv)
 227                 return rv;
 228 
 229         if (add_efi_memmap)
 230                 do_add_efi_memmap();
 231 
 232         WARN(efi.memmap.desc_version != 1,
 233              "Unexpected EFI_MEMORY_DESCRIPTOR version %ld",
 234              efi.memmap.desc_version);
 235 
 236         memblock_reserve(pmap, efi.memmap.nr_map * efi.memmap.desc_size);
 237 
 238         return 0;
 239 }
 240 
 241 #define OVERFLOW_ADDR_SHIFT     (64 - EFI_PAGE_SHIFT)
 242 #define OVERFLOW_ADDR_MASK      (U64_MAX << OVERFLOW_ADDR_SHIFT)
 243 #define U64_HIGH_BIT            (~(U64_MAX >> 1))
 244 
 245 static bool __init efi_memmap_entry_valid(const efi_memory_desc_t *md, int i)
 246 {
 247         u64 end = (md->num_pages << EFI_PAGE_SHIFT) + md->phys_addr - 1;
 248         u64 end_hi = 0;
 249         char buf[64];
 250 
 251         if (md->num_pages == 0) {
 252                 end = 0;
 253         } else if (md->num_pages > EFI_PAGES_MAX ||
 254                    EFI_PAGES_MAX - md->num_pages <
 255                    (md->phys_addr >> EFI_PAGE_SHIFT)) {
 256                 end_hi = (md->num_pages & OVERFLOW_ADDR_MASK)
 257                         >> OVERFLOW_ADDR_SHIFT;
 258 
 259                 if ((md->phys_addr & U64_HIGH_BIT) && !(end & U64_HIGH_BIT))
 260                         end_hi += 1;
 261         } else {
 262                 return true;
 263         }
 264 
 265         pr_warn_once(FW_BUG "Invalid EFI memory map entries:\n");
 266 
 267         if (end_hi) {
 268                 pr_warn("mem%02u: %s range=[0x%016llx-0x%llx%016llx] (invalid)\n",
 269                         i, efi_md_typeattr_format(buf, sizeof(buf), md),
 270                         md->phys_addr, end_hi, end);
 271         } else {
 272                 pr_warn("mem%02u: %s range=[0x%016llx-0x%016llx] (invalid)\n",
 273                         i, efi_md_typeattr_format(buf, sizeof(buf), md),
 274                         md->phys_addr, end);
 275         }
 276         return false;
 277 }
 278 
 279 static void __init efi_clean_memmap(void)
 280 {
 281         efi_memory_desc_t *out = efi.memmap.map;
 282         const efi_memory_desc_t *in = out;
 283         const efi_memory_desc_t *end = efi.memmap.map_end;
 284         int i, n_removal;
 285 
 286         for (i = n_removal = 0; in < end; i++) {
 287                 if (efi_memmap_entry_valid(in, i)) {
 288                         if (out != in)
 289                                 memcpy(out, in, efi.memmap.desc_size);
 290                         out = (void *)out + efi.memmap.desc_size;
 291                 } else {
 292                         n_removal++;
 293                 }
 294                 in = (void *)in + efi.memmap.desc_size;
 295         }
 296 
 297         if (n_removal > 0) {
 298                 u64 size = efi.memmap.nr_map - n_removal;
 299 
 300                 pr_warn("Removing %d invalid memory map entries.\n", n_removal);
 301                 efi_memmap_install(efi.memmap.phys_map, size);
 302         }
 303 }
 304 
 305 void __init efi_print_memmap(void)
 306 {
 307         efi_memory_desc_t *md;
 308         int i = 0;
 309 
 310         for_each_efi_memory_desc(md) {
 311                 char buf[64];
 312 
 313                 pr_info("mem%02u: %s range=[0x%016llx-0x%016llx] (%lluMB)\n",
 314                         i++, efi_md_typeattr_format(buf, sizeof(buf), md),
 315                         md->phys_addr,
 316                         md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1,
 317                         (md->num_pages >> (20 - EFI_PAGE_SHIFT)));
 318         }
 319 }
 320 
 321 static int __init efi_systab_init(void *phys)
 322 {
 323         if (efi_enabled(EFI_64BIT)) {
 324                 efi_system_table_64_t *systab64;
 325                 struct efi_setup_data *data = NULL;
 326                 u64 tmp = 0;
 327 
 328                 if (efi_setup) {
 329                         data = early_memremap(efi_setup, sizeof(*data));
 330                         if (!data)
 331                                 return -ENOMEM;
 332                 }
 333                 systab64 = early_memremap((unsigned long)phys,
 334                                          sizeof(*systab64));
 335                 if (systab64 == NULL) {
 336                         pr_err("Couldn't map the system table!\n");
 337                         if (data)
 338                                 early_memunmap(data, sizeof(*data));
 339                         return -ENOMEM;
 340                 }
 341 
 342                 efi_systab.hdr = systab64->hdr;
 343                 efi_systab.fw_vendor = data ? (unsigned long)data->fw_vendor :
 344                                               systab64->fw_vendor;
 345                 tmp |= data ? data->fw_vendor : systab64->fw_vendor;
 346                 efi_systab.fw_revision = systab64->fw_revision;
 347                 efi_systab.con_in_handle = systab64->con_in_handle;
 348                 tmp |= systab64->con_in_handle;
 349                 efi_systab.con_in = systab64->con_in;
 350                 tmp |= systab64->con_in;
 351                 efi_systab.con_out_handle = systab64->con_out_handle;
 352                 tmp |= systab64->con_out_handle;
 353                 efi_systab.con_out = systab64->con_out;
 354                 tmp |= systab64->con_out;
 355                 efi_systab.stderr_handle = systab64->stderr_handle;
 356                 tmp |= systab64->stderr_handle;
 357                 efi_systab.stderr = systab64->stderr;
 358                 tmp |= systab64->stderr;
 359                 efi_systab.runtime = data ?
 360                                      (void *)(unsigned long)data->runtime :
 361                                      (void *)(unsigned long)systab64->runtime;
 362                 tmp |= data ? data->runtime : systab64->runtime;
 363                 efi_systab.boottime = (void *)(unsigned long)systab64->boottime;
 364                 tmp |= systab64->boottime;
 365                 efi_systab.nr_tables = systab64->nr_tables;
 366                 efi_systab.tables = data ? (unsigned long)data->tables :
 367                                            systab64->tables;
 368                 tmp |= data ? data->tables : systab64->tables;
 369 
 370                 early_memunmap(systab64, sizeof(*systab64));
 371                 if (data)
 372                         early_memunmap(data, sizeof(*data));
 373 #ifdef CONFIG_X86_32
 374                 if (tmp >> 32) {
 375                         pr_err("EFI data located above 4GB, disabling EFI.\n");
 376                         return -EINVAL;
 377                 }
 378 #endif
 379         } else {
 380                 efi_system_table_32_t *systab32;
 381 
 382                 systab32 = early_memremap((unsigned long)phys,
 383                                          sizeof(*systab32));
 384                 if (systab32 == NULL) {
 385                         pr_err("Couldn't map the system table!\n");
 386                         return -ENOMEM;
 387                 }
 388 
 389                 efi_systab.hdr = systab32->hdr;
 390                 efi_systab.fw_vendor = systab32->fw_vendor;
 391                 efi_systab.fw_revision = systab32->fw_revision;
 392                 efi_systab.con_in_handle = systab32->con_in_handle;
 393                 efi_systab.con_in = systab32->con_in;
 394                 efi_systab.con_out_handle = systab32->con_out_handle;
 395                 efi_systab.con_out = systab32->con_out;
 396                 efi_systab.stderr_handle = systab32->stderr_handle;
 397                 efi_systab.stderr = systab32->stderr;
 398                 efi_systab.runtime = (void *)(unsigned long)systab32->runtime;
 399                 efi_systab.boottime = (void *)(unsigned long)systab32->boottime;
 400                 efi_systab.nr_tables = systab32->nr_tables;
 401                 efi_systab.tables = systab32->tables;
 402 
 403                 early_memunmap(systab32, sizeof(*systab32));
 404         }
 405 
 406         efi.systab = &efi_systab;
 407 
 408         /*
 409          * Verify the EFI Table
 410          */
 411         if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) {
 412                 pr_err("System table signature incorrect!\n");
 413                 return -EINVAL;
 414         }
 415         if ((efi.systab->hdr.revision >> 16) == 0)
 416                 pr_err("Warning: System table version %d.%02d, expected 1.00 or greater!\n",
 417                        efi.systab->hdr.revision >> 16,
 418                        efi.systab->hdr.revision & 0xffff);
 419 
 420         return 0;
 421 }
 422 
 423 static int __init efi_runtime_init32(void)
 424 {
 425         efi_runtime_services_32_t *runtime;
 426 
 427         runtime = early_memremap((unsigned long)efi.systab->runtime,
 428                         sizeof(efi_runtime_services_32_t));
 429         if (!runtime) {
 430                 pr_err("Could not map the runtime service table!\n");
 431                 return -ENOMEM;
 432         }
 433 
 434         /*
 435          * We will only need *early* access to the SetVirtualAddressMap
 436          * EFI runtime service. All other runtime services will be called
 437          * via the virtual mapping.
 438          */
 439         efi_phys.set_virtual_address_map =
 440                         (efi_set_virtual_address_map_t *)
 441                         (unsigned long)runtime->set_virtual_address_map;
 442         early_memunmap(runtime, sizeof(efi_runtime_services_32_t));
 443 
 444         return 0;
 445 }
 446 
 447 static int __init efi_runtime_init64(void)
 448 {
 449         efi_runtime_services_64_t *runtime;
 450 
 451         runtime = early_memremap((unsigned long)efi.systab->runtime,
 452                         sizeof(efi_runtime_services_64_t));
 453         if (!runtime) {
 454                 pr_err("Could not map the runtime service table!\n");
 455                 return -ENOMEM;
 456         }
 457 
 458         /*
 459          * We will only need *early* access to the SetVirtualAddressMap
 460          * EFI runtime service. All other runtime services will be called
 461          * via the virtual mapping.
 462          */
 463         efi_phys.set_virtual_address_map =
 464                         (efi_set_virtual_address_map_t *)
 465                         (unsigned long)runtime->set_virtual_address_map;
 466         early_memunmap(runtime, sizeof(efi_runtime_services_64_t));
 467 
 468         return 0;
 469 }
 470 
 471 static int __init efi_runtime_init(void)
 472 {
 473         int rv;
 474 
 475         /*
 476          * Check out the runtime services table. We need to map
 477          * the runtime services table so that we can grab the physical
 478          * address of several of the EFI runtime functions, needed to
 479          * set the firmware into virtual mode.
 480          *
 481          * When EFI_PARAVIRT is in force then we could not map runtime
 482          * service memory region because we do not have direct access to it.
 483          * However, runtime services are available through proxy functions
 484          * (e.g. in case of Xen dom0 EFI implementation they call special
 485          * hypercall which executes relevant EFI functions) and that is why
 486          * they are always enabled.
 487          */
 488 
 489         if (!efi_enabled(EFI_PARAVIRT)) {
 490                 if (efi_enabled(EFI_64BIT))
 491                         rv = efi_runtime_init64();
 492                 else
 493                         rv = efi_runtime_init32();
 494 
 495                 if (rv)
 496                         return rv;
 497         }
 498 
 499         set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
 500 
 501         return 0;
 502 }
 503 
 504 void __init efi_init(void)
 505 {
 506         efi_char16_t *c16;
 507         char vendor[100] = "unknown";
 508         int i = 0;
 509 
 510 #ifdef CONFIG_X86_32
 511         if (boot_params.efi_info.efi_systab_hi ||
 512             boot_params.efi_info.efi_memmap_hi) {
 513                 pr_info("Table located above 4GB, disabling EFI.\n");
 514                 return;
 515         }
 516         efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab;
 517 #else
 518         efi_phys.systab = (efi_system_table_t *)
 519                           (boot_params.efi_info.efi_systab |
 520                           ((__u64)boot_params.efi_info.efi_systab_hi<<32));
 521 #endif
 522 
 523         if (efi_systab_init(efi_phys.systab))
 524                 return;
 525 
 526         efi.config_table = (unsigned long)efi.systab->tables;
 527         efi.fw_vendor    = (unsigned long)efi.systab->fw_vendor;
 528         efi.runtime      = (unsigned long)efi.systab->runtime;
 529 
 530         /*
 531          * Show what we know for posterity
 532          */
 533         c16 = early_memremap_ro(efi.systab->fw_vendor,
 534                                 sizeof(vendor) * sizeof(efi_char16_t));
 535         if (c16) {
 536                 for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
 537                         vendor[i] = c16[i];
 538                 vendor[i] = '\0';
 539                 early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t));
 540         } else {
 541                 pr_err("Could not map the firmware vendor!\n");
 542         }
 543 
 544         pr_info("EFI v%u.%.02u by %s\n",
 545                 efi.systab->hdr.revision >> 16,
 546                 efi.systab->hdr.revision & 0xffff, vendor);
 547 
 548         if (efi_reuse_config(efi.systab->tables, efi.systab->nr_tables))
 549                 return;
 550 
 551         if (efi_config_init(arch_tables))
 552                 return;
 553 
 554         /*
 555          * Note: We currently don't support runtime services on an EFI
 556          * that doesn't match the kernel 32/64-bit mode.
 557          */
 558 
 559         if (!efi_runtime_supported())
 560                 pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
 561         else {
 562                 if (efi_runtime_disabled() || efi_runtime_init()) {
 563                         efi_memmap_unmap();
 564                         return;
 565                 }
 566         }
 567 
 568         efi_clean_memmap();
 569 
 570         if (efi_enabled(EFI_DBG))
 571                 efi_print_memmap();
 572 }
 573 
 574 void __init efi_set_executable(efi_memory_desc_t *md, bool executable)
 575 {
 576         u64 addr, npages;
 577 
 578         addr = md->virt_addr;
 579         npages = md->num_pages;
 580 
 581         memrange_efi_to_native(&addr, &npages);
 582 
 583         if (executable)
 584                 set_memory_x(addr, npages);
 585         else
 586                 set_memory_nx(addr, npages);
 587 }
 588 
 589 void __init runtime_code_page_mkexec(void)
 590 {
 591         efi_memory_desc_t *md;
 592 
 593         /* Make EFI runtime service code area executable */
 594         for_each_efi_memory_desc(md) {
 595                 if (md->type != EFI_RUNTIME_SERVICES_CODE)
 596                         continue;
 597 
 598                 efi_set_executable(md, true);
 599         }
 600 }
 601 
 602 void __init efi_memory_uc(u64 addr, unsigned long size)
 603 {
 604         unsigned long page_shift = 1UL << EFI_PAGE_SHIFT;
 605         u64 npages;
 606 
 607         npages = round_up(size, page_shift) / page_shift;
 608         memrange_efi_to_native(&addr, &npages);
 609         set_memory_uc(addr, npages);
 610 }
 611 
 612 void __init old_map_region(efi_memory_desc_t *md)
 613 {
 614         u64 start_pfn, end_pfn, end;
 615         unsigned long size;
 616         void *va;
 617 
 618         start_pfn = PFN_DOWN(md->phys_addr);
 619         size      = md->num_pages << PAGE_SHIFT;
 620         end       = md->phys_addr + size;
 621         end_pfn   = PFN_UP(end);
 622 
 623         if (pfn_range_is_mapped(start_pfn, end_pfn)) {
 624                 va = __va(md->phys_addr);
 625 
 626                 if (!(md->attribute & EFI_MEMORY_WB))
 627                         efi_memory_uc((u64)(unsigned long)va, size);
 628         } else
 629                 va = efi_ioremap(md->phys_addr, size,
 630                                  md->type, md->attribute);
 631 
 632         md->virt_addr = (u64) (unsigned long) va;
 633         if (!va)
 634                 pr_err("ioremap of 0x%llX failed!\n",
 635                        (unsigned long long)md->phys_addr);
 636 }
 637 
 638 /* Merge contiguous regions of the same type and attribute */
 639 static void __init efi_merge_regions(void)
 640 {
 641         efi_memory_desc_t *md, *prev_md = NULL;
 642 
 643         for_each_efi_memory_desc(md) {
 644                 u64 prev_size;
 645 
 646                 if (!prev_md) {
 647                         prev_md = md;
 648                         continue;
 649                 }
 650 
 651                 if (prev_md->type != md->type ||
 652                     prev_md->attribute != md->attribute) {
 653                         prev_md = md;
 654                         continue;
 655                 }
 656 
 657                 prev_size = prev_md->num_pages << EFI_PAGE_SHIFT;
 658 
 659                 if (md->phys_addr == (prev_md->phys_addr + prev_size)) {
 660                         prev_md->num_pages += md->num_pages;
 661                         md->type = EFI_RESERVED_TYPE;
 662                         md->attribute = 0;
 663                         continue;
 664                 }
 665                 prev_md = md;
 666         }
 667 }
 668 
 669 static void __init get_systab_virt_addr(efi_memory_desc_t *md)
 670 {
 671         unsigned long size;
 672         u64 end, systab;
 673 
 674         size = md->num_pages << EFI_PAGE_SHIFT;
 675         end = md->phys_addr + size;
 676         systab = (u64)(unsigned long)efi_phys.systab;
 677         if (md->phys_addr <= systab && systab < end) {
 678                 systab += md->virt_addr - md->phys_addr;
 679                 efi.systab = (efi_system_table_t *)(unsigned long)systab;
 680         }
 681 }
 682 
 683 static void *realloc_pages(void *old_memmap, int old_shift)
 684 {
 685         void *ret;
 686 
 687         ret = (void *)__get_free_pages(GFP_KERNEL, old_shift + 1);
 688         if (!ret)
 689                 goto out;
 690 
 691         /*
 692          * A first-time allocation doesn't have anything to copy.
 693          */
 694         if (!old_memmap)
 695                 return ret;
 696 
 697         memcpy(ret, old_memmap, PAGE_SIZE << old_shift);
 698 
 699 out:
 700         free_pages((unsigned long)old_memmap, old_shift);
 701         return ret;
 702 }
 703 
 704 /*
 705  * Iterate the EFI memory map in reverse order because the regions
 706  * will be mapped top-down. The end result is the same as if we had
 707  * mapped things forward, but doesn't require us to change the
 708  * existing implementation of efi_map_region().
 709  */
 710 static inline void *efi_map_next_entry_reverse(void *entry)
 711 {
 712         /* Initial call */
 713         if (!entry)
 714                 return efi.memmap.map_end - efi.memmap.desc_size;
 715 
 716         entry -= efi.memmap.desc_size;
 717         if (entry < efi.memmap.map)
 718                 return NULL;
 719 
 720         return entry;
 721 }
 722 
 723 /*
 724  * efi_map_next_entry - Return the next EFI memory map descriptor
 725  * @entry: Previous EFI memory map descriptor
 726  *
 727  * This is a helper function to iterate over the EFI memory map, which
 728  * we do in different orders depending on the current configuration.
 729  *
 730  * To begin traversing the memory map @entry must be %NULL.
 731  *
 732  * Returns %NULL when we reach the end of the memory map.
 733  */
 734 static void *efi_map_next_entry(void *entry)
 735 {
 736         if (!efi_enabled(EFI_OLD_MEMMAP) && efi_enabled(EFI_64BIT)) {
 737                 /*
 738                  * Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE
 739                  * config table feature requires us to map all entries
 740                  * in the same order as they appear in the EFI memory
 741                  * map. That is to say, entry N must have a lower
 742                  * virtual address than entry N+1. This is because the
 743                  * firmware toolchain leaves relative references in
 744                  * the code/data sections, which are split and become
 745                  * separate EFI memory regions. Mapping things
 746                  * out-of-order leads to the firmware accessing
 747                  * unmapped addresses.
 748                  *
 749                  * Since we need to map things this way whether or not
 750                  * the kernel actually makes use of
 751                  * EFI_PROPERTIES_TABLE, let's just switch to this
 752                  * scheme by default for 64-bit.
 753                  */
 754                 return efi_map_next_entry_reverse(entry);
 755         }
 756 
 757         /* Initial call */
 758         if (!entry)
 759                 return efi.memmap.map;
 760 
 761         entry += efi.memmap.desc_size;
 762         if (entry >= efi.memmap.map_end)
 763                 return NULL;
 764 
 765         return entry;
 766 }
 767 
 768 static bool should_map_region(efi_memory_desc_t *md)
 769 {
 770         /*
 771          * Runtime regions always require runtime mappings (obviously).
 772          */
 773         if (md->attribute & EFI_MEMORY_RUNTIME)
 774                 return true;
 775 
 776         /*
 777          * 32-bit EFI doesn't suffer from the bug that requires us to
 778          * reserve boot services regions, and mixed mode support
 779          * doesn't exist for 32-bit kernels.
 780          */
 781         if (IS_ENABLED(CONFIG_X86_32))
 782                 return false;
 783 
 784         /*
 785          * Map all of RAM so that we can access arguments in the 1:1
 786          * mapping when making EFI runtime calls.
 787          */
 788         if (IS_ENABLED(CONFIG_EFI_MIXED) && !efi_is_native()) {
 789                 if (md->type == EFI_CONVENTIONAL_MEMORY ||
 790                     md->type == EFI_LOADER_DATA ||
 791                     md->type == EFI_LOADER_CODE)
 792                         return true;
 793         }
 794 
 795         /*
 796          * Map boot services regions as a workaround for buggy
 797          * firmware that accesses them even when they shouldn't.
 798          *
 799          * See efi_{reserve,free}_boot_services().
 800          */
 801         if (md->type == EFI_BOOT_SERVICES_CODE ||
 802             md->type == EFI_BOOT_SERVICES_DATA)
 803                 return true;
 804 
 805         return false;
 806 }
 807 
 808 /*
 809  * Map the efi memory ranges of the runtime services and update new_mmap with
 810  * virtual addresses.
 811  */
 812 static void * __init efi_map_regions(int *count, int *pg_shift)
 813 {
 814         void *p, *new_memmap = NULL;
 815         unsigned long left = 0;
 816         unsigned long desc_size;
 817         efi_memory_desc_t *md;
 818 
 819         desc_size = efi.memmap.desc_size;
 820 
 821         p = NULL;
 822         while ((p = efi_map_next_entry(p))) {
 823                 md = p;
 824 
 825                 if (!should_map_region(md))
 826                         continue;
 827 
 828                 efi_map_region(md);
 829                 get_systab_virt_addr(md);
 830 
 831                 if (left < desc_size) {
 832                         new_memmap = realloc_pages(new_memmap, *pg_shift);
 833                         if (!new_memmap)
 834                                 return NULL;
 835 
 836                         left += PAGE_SIZE << *pg_shift;
 837                         (*pg_shift)++;
 838                 }
 839 
 840                 memcpy(new_memmap + (*count * desc_size), md, desc_size);
 841 
 842                 left -= desc_size;
 843                 (*count)++;
 844         }
 845 
 846         return new_memmap;
 847 }
 848 
 849 static void __init kexec_enter_virtual_mode(void)
 850 {
 851 #ifdef CONFIG_KEXEC_CORE
 852         efi_memory_desc_t *md;
 853         unsigned int num_pages;
 854 
 855         efi.systab = NULL;
 856 
 857         /*
 858          * We don't do virtual mode, since we don't do runtime services, on
 859          * non-native EFI. With efi=old_map, we don't do runtime services in
 860          * kexec kernel because in the initial boot something else might
 861          * have been mapped at these virtual addresses.
 862          */
 863         if (!efi_is_native() || efi_enabled(EFI_OLD_MEMMAP)) {
 864                 efi_memmap_unmap();
 865                 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
 866                 return;
 867         }
 868 
 869         if (efi_alloc_page_tables()) {
 870                 pr_err("Failed to allocate EFI page tables\n");
 871                 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
 872                 return;
 873         }
 874 
 875         /*
 876         * Map efi regions which were passed via setup_data. The virt_addr is a
 877         * fixed addr which was used in first kernel of a kexec boot.
 878         */
 879         for_each_efi_memory_desc(md) {
 880                 efi_map_region_fixed(md); /* FIXME: add error handling */
 881                 get_systab_virt_addr(md);
 882         }
 883 
 884         /*
 885          * Unregister the early EFI memmap from efi_init() and install
 886          * the new EFI memory map.
 887          */
 888         efi_memmap_unmap();
 889 
 890         if (efi_memmap_init_late(efi.memmap.phys_map,
 891                                  efi.memmap.desc_size * efi.memmap.nr_map)) {
 892                 pr_err("Failed to remap late EFI memory map\n");
 893                 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
 894                 return;
 895         }
 896 
 897         BUG_ON(!efi.systab);
 898 
 899         num_pages = ALIGN(efi.memmap.nr_map * efi.memmap.desc_size, PAGE_SIZE);
 900         num_pages >>= PAGE_SHIFT;
 901 
 902         if (efi_setup_page_tables(efi.memmap.phys_map, num_pages)) {
 903                 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
 904                 return;
 905         }
 906 
 907         efi_sync_low_kernel_mappings();
 908 
 909         /*
 910          * Now that EFI is in virtual mode, update the function
 911          * pointers in the runtime service table to the new virtual addresses.
 912          *
 913          * Call EFI services through wrapper functions.
 914          */
 915         efi.runtime_version = efi_systab.hdr.revision;
 916 
 917         efi_native_runtime_setup();
 918 
 919         efi.set_virtual_address_map = NULL;
 920 
 921         if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX))
 922                 runtime_code_page_mkexec();
 923 #endif
 924 }
 925 
 926 /*
 927  * This function will switch the EFI runtime services to virtual mode.
 928  * Essentially, we look through the EFI memmap and map every region that
 929  * has the runtime attribute bit set in its memory descriptor into the
 930  * efi_pgd page table.
 931  *
 932  * The old method which used to update that memory descriptor with the
 933  * virtual address obtained from ioremap() is still supported when the
 934  * kernel is booted with efi=old_map on its command line. Same old
 935  * method enabled the runtime services to be called without having to
 936  * thunk back into physical mode for every invocation.
 937  *
 938  * The new method does a pagetable switch in a preemption-safe manner
 939  * so that we're in a different address space when calling a runtime
 940  * function. For function arguments passing we do copy the PUDs of the
 941  * kernel page table into efi_pgd prior to each call.
 942  *
 943  * Specially for kexec boot, efi runtime maps in previous kernel should
 944  * be passed in via setup_data. In that case runtime ranges will be mapped
 945  * to the same virtual addresses as the first kernel, see
 946  * kexec_enter_virtual_mode().
 947  */
 948 static void __init __efi_enter_virtual_mode(void)
 949 {
 950         int count = 0, pg_shift = 0;
 951         void *new_memmap = NULL;
 952         efi_status_t status;
 953         unsigned long pa;
 954 
 955         efi.systab = NULL;
 956 
 957         if (efi_alloc_page_tables()) {
 958                 pr_err("Failed to allocate EFI page tables\n");
 959                 goto err;
 960         }
 961 
 962         efi_merge_regions();
 963         new_memmap = efi_map_regions(&count, &pg_shift);
 964         if (!new_memmap) {
 965                 pr_err("Error reallocating memory, EFI runtime non-functional!\n");
 966                 goto err;
 967         }
 968 
 969         pa = __pa(new_memmap);
 970 
 971         /*
 972          * Unregister the early EFI memmap from efi_init() and install
 973          * the new EFI memory map that we are about to pass to the
 974          * firmware via SetVirtualAddressMap().
 975          */
 976         efi_memmap_unmap();
 977 
 978         if (efi_memmap_init_late(pa, efi.memmap.desc_size * count)) {
 979                 pr_err("Failed to remap late EFI memory map\n");
 980                 goto err;
 981         }
 982 
 983         if (efi_enabled(EFI_DBG)) {
 984                 pr_info("EFI runtime memory map:\n");
 985                 efi_print_memmap();
 986         }
 987 
 988         if (WARN_ON(!efi.systab))
 989                 goto err;
 990 
 991         if (efi_setup_page_tables(pa, 1 << pg_shift))
 992                 goto err;
 993 
 994         efi_sync_low_kernel_mappings();
 995 
 996         if (efi_is_native()) {
 997                 status = phys_efi_set_virtual_address_map(
 998                                 efi.memmap.desc_size * count,
 999                                 efi.memmap.desc_size,
1000                                 efi.memmap.desc_version,
1001                                 (efi_memory_desc_t *)pa);
1002         } else {
1003                 status = efi_thunk_set_virtual_address_map(
1004                                 efi_phys.set_virtual_address_map,
1005                                 efi.memmap.desc_size * count,
1006                                 efi.memmap.desc_size,
1007                                 efi.memmap.desc_version,
1008                                 (efi_memory_desc_t *)pa);
1009         }
1010 
1011         if (status != EFI_SUCCESS) {
1012                 pr_err("Unable to switch EFI into virtual mode (status=%lx)!\n",
1013                        status);
1014                 goto err;
1015         }
1016 
1017         efi_free_boot_services();
1018 
1019         /*
1020          * Now that EFI is in virtual mode, update the function
1021          * pointers in the runtime service table to the new virtual addresses.
1022          *
1023          * Call EFI services through wrapper functions.
1024          */
1025         efi.runtime_version = efi_systab.hdr.revision;
1026 
1027         if (efi_is_native())
1028                 efi_native_runtime_setup();
1029         else
1030                 efi_thunk_runtime_setup();
1031 
1032         efi.set_virtual_address_map = NULL;
1033 
1034         /*
1035          * Apply more restrictive page table mapping attributes now that
1036          * SVAM() has been called and the firmware has performed all
1037          * necessary relocation fixups for the new virtual addresses.
1038          */
1039         efi_runtime_update_mappings();
1040 
1041         /* clean DUMMY object */
1042         efi_delete_dummy_variable();
1043         return;
1044 
1045 err:
1046         clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
1047 }
1048 
1049 void __init efi_enter_virtual_mode(void)
1050 {
1051         if (efi_enabled(EFI_PARAVIRT))
1052                 return;
1053 
1054         if (efi_setup)
1055                 kexec_enter_virtual_mode();
1056         else
1057                 __efi_enter_virtual_mode();
1058 
1059         efi_dump_pagetable();
1060 }
1061 
1062 static int __init arch_parse_efi_cmdline(char *str)
1063 {
1064         if (!str) {
1065                 pr_warn("need at least one option\n");
1066                 return -EINVAL;
1067         }
1068 
1069         if (parse_option_str(str, "old_map"))
1070                 set_bit(EFI_OLD_MEMMAP, &efi.flags);
1071 
1072         return 0;
1073 }
1074 early_param("efi", arch_parse_efi_cmdline);
1075 
1076 bool efi_is_table_address(unsigned long phys_addr)
1077 {
1078         unsigned int i;
1079 
1080         if (phys_addr == EFI_INVALID_TABLE_ADDR)
1081                 return false;
1082 
1083         for (i = 0; i < ARRAY_SIZE(efi_tables); i++)
1084                 if (*(efi_tables[i]) == phys_addr)
1085                         return true;
1086 
1087         return false;
1088 }

/* [<][>][^][v][top][bottom][index][help] */