root/arch/arm64/kernel/efi.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. create_mapping_protection
  2. efi_create_mapping
  3. set_permissions
  4. efi_set_mapping_permissions
  5. efi_poweroff_required
  6. efi_handle_corrupted_x18

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Extensible Firmware Interface
   4  *
   5  * Based on Extensible Firmware Interface Specification version 2.4
   6  *
   7  * Copyright (C) 2013, 2014 Linaro Ltd.
   8  */
   9 
  10 #include <linux/efi.h>
  11 #include <linux/init.h>
  12 
  13 #include <asm/efi.h>
  14 
  15 /*
  16  * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
  17  * executable, everything else can be mapped with the XN bits
  18  * set. Also take the new (optional) RO/XP bits into account.
  19  */
  20 static __init pteval_t create_mapping_protection(efi_memory_desc_t *md)
  21 {
  22         u64 attr = md->attribute;
  23         u32 type = md->type;
  24 
  25         if (type == EFI_MEMORY_MAPPED_IO)
  26                 return PROT_DEVICE_nGnRE;
  27 
  28         if (WARN_ONCE(!PAGE_ALIGNED(md->phys_addr),
  29                       "UEFI Runtime regions are not aligned to 64 KB -- buggy firmware?"))
  30                 /*
  31                  * If the region is not aligned to the page size of the OS, we
  32                  * can not use strict permissions, since that would also affect
  33                  * the mapping attributes of the adjacent regions.
  34                  */
  35                 return pgprot_val(PAGE_KERNEL_EXEC);
  36 
  37         /* R-- */
  38         if ((attr & (EFI_MEMORY_XP | EFI_MEMORY_RO)) ==
  39             (EFI_MEMORY_XP | EFI_MEMORY_RO))
  40                 return pgprot_val(PAGE_KERNEL_RO);
  41 
  42         /* R-X */
  43         if (attr & EFI_MEMORY_RO)
  44                 return pgprot_val(PAGE_KERNEL_ROX);
  45 
  46         /* RW- */
  47         if (((attr & (EFI_MEMORY_RP | EFI_MEMORY_WP | EFI_MEMORY_XP)) ==
  48              EFI_MEMORY_XP) ||
  49             type != EFI_RUNTIME_SERVICES_CODE)
  50                 return pgprot_val(PAGE_KERNEL);
  51 
  52         /* RWX */
  53         return pgprot_val(PAGE_KERNEL_EXEC);
  54 }
  55 
  56 /* we will fill this structure from the stub, so don't put it in .bss */
  57 struct screen_info screen_info __section(.data);
  58 
  59 int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
  60 {
  61         pteval_t prot_val = create_mapping_protection(md);
  62         bool page_mappings_only = (md->type == EFI_RUNTIME_SERVICES_CODE ||
  63                                    md->type == EFI_RUNTIME_SERVICES_DATA);
  64 
  65         if (!PAGE_ALIGNED(md->phys_addr) ||
  66             !PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT)) {
  67                 /*
  68                  * If the end address of this region is not aligned to page
  69                  * size, the mapping is rounded up, and may end up sharing a
  70                  * page frame with the next UEFI memory region. If we create
  71                  * a block entry now, we may need to split it again when mapping
  72                  * the next region, and support for that is going to be removed
  73                  * from the MMU routines. So avoid block mappings altogether in
  74                  * that case.
  75                  */
  76                 page_mappings_only = true;
  77         }
  78 
  79         create_pgd_mapping(mm, md->phys_addr, md->virt_addr,
  80                            md->num_pages << EFI_PAGE_SHIFT,
  81                            __pgprot(prot_val | PTE_NG), page_mappings_only);
  82         return 0;
  83 }
  84 
  85 static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data)
  86 {
  87         efi_memory_desc_t *md = data;
  88         pte_t pte = READ_ONCE(*ptep);
  89 
  90         if (md->attribute & EFI_MEMORY_RO)
  91                 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
  92         if (md->attribute & EFI_MEMORY_XP)
  93                 pte = set_pte_bit(pte, __pgprot(PTE_PXN));
  94         set_pte(ptep, pte);
  95         return 0;
  96 }
  97 
  98 int __init efi_set_mapping_permissions(struct mm_struct *mm,
  99                                        efi_memory_desc_t *md)
 100 {
 101         BUG_ON(md->type != EFI_RUNTIME_SERVICES_CODE &&
 102                md->type != EFI_RUNTIME_SERVICES_DATA);
 103 
 104         /*
 105          * Calling apply_to_page_range() is only safe on regions that are
 106          * guaranteed to be mapped down to pages. Since we are only called
 107          * for regions that have been mapped using efi_create_mapping() above
 108          * (and this is checked by the generic Memory Attributes table parsing
 109          * routines), there is no need to check that again here.
 110          */
 111         return apply_to_page_range(mm, md->virt_addr,
 112                                    md->num_pages << EFI_PAGE_SHIFT,
 113                                    set_permissions, md);
 114 }
 115 
 116 /*
 117  * UpdateCapsule() depends on the system being shutdown via
 118  * ResetSystem().
 119  */
 120 bool efi_poweroff_required(void)
 121 {
 122         return efi_enabled(EFI_RUNTIME_SERVICES);
 123 }
 124 
 125 asmlinkage efi_status_t efi_handle_corrupted_x18(efi_status_t s, const char *f)
 126 {
 127         pr_err_ratelimited(FW_BUG "register x18 corrupted by EFI %s\n", f);
 128         return s;
 129 }

/* [<][>][^][v][top][bottom][index][help] */