root/lib/ioremap.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. set_nohugeiomap
  2. ioremap_huge_init
  3. ioremap_p4d_enabled
  4. ioremap_pud_enabled
  5. ioremap_pmd_enabled
  6. ioremap_p4d_enabled
  7. ioremap_pud_enabled
  8. ioremap_pmd_enabled
  9. ioremap_pte_range
  10. ioremap_try_huge_pmd
  11. ioremap_pmd_range
  12. ioremap_try_huge_pud
  13. ioremap_pud_range
  14. ioremap_try_huge_p4d
  15. ioremap_p4d_range
  16. ioremap_page_range

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Re-map IO memory to kernel address space so that we can access it.
   4  * This is needed for high PCI addresses that aren't mapped in the
   5  * 640k-1MB IO memory area on PC's
   6  *
   7  * (C) Copyright 1995 1996 Linus Torvalds
   8  */
   9 #include <linux/vmalloc.h>
  10 #include <linux/mm.h>
  11 #include <linux/sched.h>
  12 #include <linux/io.h>
  13 #include <linux/export.h>
  14 #include <asm/cacheflush.h>
  15 #include <asm/pgtable.h>
  16 
  17 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
  18 static int __read_mostly ioremap_p4d_capable;
  19 static int __read_mostly ioremap_pud_capable;
  20 static int __read_mostly ioremap_pmd_capable;
  21 static int __read_mostly ioremap_huge_disabled;
  22 
  23 static int __init set_nohugeiomap(char *str)
  24 {
  25         ioremap_huge_disabled = 1;
  26         return 0;
  27 }
  28 early_param("nohugeiomap", set_nohugeiomap);
  29 
  30 void __init ioremap_huge_init(void)
  31 {
  32         if (!ioremap_huge_disabled) {
  33                 if (arch_ioremap_p4d_supported())
  34                         ioremap_p4d_capable = 1;
  35                 if (arch_ioremap_pud_supported())
  36                         ioremap_pud_capable = 1;
  37                 if (arch_ioremap_pmd_supported())
  38                         ioremap_pmd_capable = 1;
  39         }
  40 }
  41 
  42 static inline int ioremap_p4d_enabled(void)
  43 {
  44         return ioremap_p4d_capable;
  45 }
  46 
  47 static inline int ioremap_pud_enabled(void)
  48 {
  49         return ioremap_pud_capable;
  50 }
  51 
  52 static inline int ioremap_pmd_enabled(void)
  53 {
  54         return ioremap_pmd_capable;
  55 }
  56 
  57 #else   /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
  58 static inline int ioremap_p4d_enabled(void) { return 0; }
  59 static inline int ioremap_pud_enabled(void) { return 0; }
  60 static inline int ioremap_pmd_enabled(void) { return 0; }
  61 #endif  /* CONFIG_HAVE_ARCH_HUGE_VMAP */
  62 
  63 static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
  64                 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
  65 {
  66         pte_t *pte;
  67         u64 pfn;
  68 
  69         pfn = phys_addr >> PAGE_SHIFT;
  70         pte = pte_alloc_kernel(pmd, addr);
  71         if (!pte)
  72                 return -ENOMEM;
  73         do {
  74                 BUG_ON(!pte_none(*pte));
  75                 set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
  76                 pfn++;
  77         } while (pte++, addr += PAGE_SIZE, addr != end);
  78         return 0;
  79 }
  80 
  81 static int ioremap_try_huge_pmd(pmd_t *pmd, unsigned long addr,
  82                                 unsigned long end, phys_addr_t phys_addr,
  83                                 pgprot_t prot)
  84 {
  85         if (!ioremap_pmd_enabled())
  86                 return 0;
  87 
  88         if ((end - addr) != PMD_SIZE)
  89                 return 0;
  90 
  91         if (!IS_ALIGNED(addr, PMD_SIZE))
  92                 return 0;
  93 
  94         if (!IS_ALIGNED(phys_addr, PMD_SIZE))
  95                 return 0;
  96 
  97         if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
  98                 return 0;
  99 
 100         return pmd_set_huge(pmd, phys_addr, prot);
 101 }
 102 
 103 static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
 104                 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
 105 {
 106         pmd_t *pmd;
 107         unsigned long next;
 108 
 109         pmd = pmd_alloc(&init_mm, pud, addr);
 110         if (!pmd)
 111                 return -ENOMEM;
 112         do {
 113                 next = pmd_addr_end(addr, end);
 114 
 115                 if (ioremap_try_huge_pmd(pmd, addr, next, phys_addr, prot))
 116                         continue;
 117 
 118                 if (ioremap_pte_range(pmd, addr, next, phys_addr, prot))
 119                         return -ENOMEM;
 120         } while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
 121         return 0;
 122 }
 123 
 124 static int ioremap_try_huge_pud(pud_t *pud, unsigned long addr,
 125                                 unsigned long end, phys_addr_t phys_addr,
 126                                 pgprot_t prot)
 127 {
 128         if (!ioremap_pud_enabled())
 129                 return 0;
 130 
 131         if ((end - addr) != PUD_SIZE)
 132                 return 0;
 133 
 134         if (!IS_ALIGNED(addr, PUD_SIZE))
 135                 return 0;
 136 
 137         if (!IS_ALIGNED(phys_addr, PUD_SIZE))
 138                 return 0;
 139 
 140         if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
 141                 return 0;
 142 
 143         return pud_set_huge(pud, phys_addr, prot);
 144 }
 145 
 146 static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
 147                 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
 148 {
 149         pud_t *pud;
 150         unsigned long next;
 151 
 152         pud = pud_alloc(&init_mm, p4d, addr);
 153         if (!pud)
 154                 return -ENOMEM;
 155         do {
 156                 next = pud_addr_end(addr, end);
 157 
 158                 if (ioremap_try_huge_pud(pud, addr, next, phys_addr, prot))
 159                         continue;
 160 
 161                 if (ioremap_pmd_range(pud, addr, next, phys_addr, prot))
 162                         return -ENOMEM;
 163         } while (pud++, phys_addr += (next - addr), addr = next, addr != end);
 164         return 0;
 165 }
 166 
 167 static int ioremap_try_huge_p4d(p4d_t *p4d, unsigned long addr,
 168                                 unsigned long end, phys_addr_t phys_addr,
 169                                 pgprot_t prot)
 170 {
 171         if (!ioremap_p4d_enabled())
 172                 return 0;
 173 
 174         if ((end - addr) != P4D_SIZE)
 175                 return 0;
 176 
 177         if (!IS_ALIGNED(addr, P4D_SIZE))
 178                 return 0;
 179 
 180         if (!IS_ALIGNED(phys_addr, P4D_SIZE))
 181                 return 0;
 182 
 183         if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
 184                 return 0;
 185 
 186         return p4d_set_huge(p4d, phys_addr, prot);
 187 }
 188 
 189 static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr,
 190                 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
 191 {
 192         p4d_t *p4d;
 193         unsigned long next;
 194 
 195         p4d = p4d_alloc(&init_mm, pgd, addr);
 196         if (!p4d)
 197                 return -ENOMEM;
 198         do {
 199                 next = p4d_addr_end(addr, end);
 200 
 201                 if (ioremap_try_huge_p4d(p4d, addr, next, phys_addr, prot))
 202                         continue;
 203 
 204                 if (ioremap_pud_range(p4d, addr, next, phys_addr, prot))
 205                         return -ENOMEM;
 206         } while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
 207         return 0;
 208 }
 209 
 210 int ioremap_page_range(unsigned long addr,
 211                        unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
 212 {
 213         pgd_t *pgd;
 214         unsigned long start;
 215         unsigned long next;
 216         int err;
 217 
 218         might_sleep();
 219         BUG_ON(addr >= end);
 220 
 221         start = addr;
 222         pgd = pgd_offset_k(addr);
 223         do {
 224                 next = pgd_addr_end(addr, end);
 225                 err = ioremap_p4d_range(pgd, addr, next, phys_addr, prot);
 226                 if (err)
 227                         break;
 228         } while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
 229 
 230         flush_cache_vmap(start, end);
 231 
 232         return err;
 233 }

/* [<][>][^][v][top][bottom][index][help] */