root/arch/arc/mm/highmem.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. kmap
  2. kmap_atomic
  3. __kunmap_atomic
  4. alloc_kmap_pgtable
  5. kmap_init

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (C) 2015 Synopsys, Inc. (www.synopsys.com)
   4  */
   5 
   6 #include <linux/memblock.h>
   7 #include <linux/export.h>
   8 #include <linux/highmem.h>
   9 #include <asm/processor.h>
  10 #include <asm/pgtable.h>
  11 #include <asm/pgalloc.h>
  12 #include <asm/tlbflush.h>
  13 
  14 /*
  15  * HIGHMEM API:
  16  *
  17  * kmap() API provides sleep semantics hence referred to as "permanent maps"
  18  * It allows mapping LAST_PKMAP pages, using @last_pkmap_nr as the cursor
  19  * for book-keeping
  20  *
  21  * kmap_atomic() can't sleep (calls pagefault_disable()), thus it provides
  22  * shortlived ala "temporary mappings" which historically were implemented as
  23  * fixmaps (compile time addr etc). Their book-keeping is done per cpu.
  24  *
  25  *      Both these facts combined (preemption disabled and per-cpu allocation)
  26  *      means the total number of concurrent fixmaps will be limited to max
  27  *      such allocations in a single control path. Thus KM_TYPE_NR (another
  28  *      historic relic) is a small'ish number which caps max percpu fixmaps
  29  *
  30  * ARC HIGHMEM Details
  31  *
  32  * - the kernel vaddr space from 0x7z to 0x8z (currently used by vmalloc/module)
  33  *   is now shared between vmalloc and kmap (non overlapping though)
  34  *
  35  * - Both fixmap/pkmap use a dedicated page table each, hooked up to swapper PGD
  36  *   This means each only has 1 PGDIR_SIZE worth of kvaddr mappings, which means
  37  *   2M of kvaddr space for typical config (8K page and 11:8:13 traversal split)
  38  *
  39  * - fixmap anyhow needs a limited number of mappings. So 2M kvaddr == 256 PTE
  40  *   slots across NR_CPUS would be more than sufficient (generic code defines
  41  *   KM_TYPE_NR as 20).
  42  *
  43  * - pkmap being preemptible, in theory could do with more than 256 concurrent
  44  *   mappings. However, generic pkmap code: map_new_virtual(), doesn't traverse
  45  *   the PGD and only works with a single page table @pkmap_page_table, hence
  46  *   sets the limit
  47  */
  48 
  49 extern pte_t * pkmap_page_table;
  50 static pte_t * fixmap_page_table;
  51 
  52 void *kmap(struct page *page)
  53 {
  54         BUG_ON(in_interrupt());
  55         if (!PageHighMem(page))
  56                 return page_address(page);
  57 
  58         return kmap_high(page);
  59 }
  60 EXPORT_SYMBOL(kmap);
  61 
  62 void *kmap_atomic(struct page *page)
  63 {
  64         int idx, cpu_idx;
  65         unsigned long vaddr;
  66 
  67         preempt_disable();
  68         pagefault_disable();
  69         if (!PageHighMem(page))
  70                 return page_address(page);
  71 
  72         cpu_idx = kmap_atomic_idx_push();
  73         idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
  74         vaddr = FIXMAP_ADDR(idx);
  75 
  76         set_pte_at(&init_mm, vaddr, fixmap_page_table + idx,
  77                    mk_pte(page, kmap_prot));
  78 
  79         return (void *)vaddr;
  80 }
  81 EXPORT_SYMBOL(kmap_atomic);
  82 
  83 void __kunmap_atomic(void *kv)
  84 {
  85         unsigned long kvaddr = (unsigned long)kv;
  86 
  87         if (kvaddr >= FIXMAP_BASE && kvaddr < (FIXMAP_BASE + FIXMAP_SIZE)) {
  88 
  89                 /*
  90                  * Because preemption is disabled, this vaddr can be associated
  91                  * with the current allocated index.
  92                  * But in case of multiple live kmap_atomic(), it still relies on
  93                  * callers to unmap in right order.
  94                  */
  95                 int cpu_idx = kmap_atomic_idx();
  96                 int idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
  97 
  98                 WARN_ON(kvaddr != FIXMAP_ADDR(idx));
  99 
 100                 pte_clear(&init_mm, kvaddr, fixmap_page_table + idx);
 101                 local_flush_tlb_kernel_range(kvaddr, kvaddr + PAGE_SIZE);
 102 
 103                 kmap_atomic_idx_pop();
 104         }
 105 
 106         pagefault_enable();
 107         preempt_enable();
 108 }
 109 EXPORT_SYMBOL(__kunmap_atomic);
 110 
 111 static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
 112 {
 113         pgd_t *pgd_k;
 114         pud_t *pud_k;
 115         pmd_t *pmd_k;
 116         pte_t *pte_k;
 117 
 118         pgd_k = pgd_offset_k(kvaddr);
 119         pud_k = pud_offset(pgd_k, kvaddr);
 120         pmd_k = pmd_offset(pud_k, kvaddr);
 121 
 122         pte_k = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
 123         if (!pte_k)
 124                 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 125                       __func__, PAGE_SIZE, PAGE_SIZE);
 126 
 127         pmd_populate_kernel(&init_mm, pmd_k, pte_k);
 128         return pte_k;
 129 }
 130 
 131 void __init kmap_init(void)
 132 {
 133         /* Due to recursive include hell, we can't do this in processor.h */
 134         BUILD_BUG_ON(PAGE_OFFSET < (VMALLOC_END + FIXMAP_SIZE + PKMAP_SIZE));
 135 
 136         BUILD_BUG_ON(KM_TYPE_NR > PTRS_PER_PTE);
 137         pkmap_page_table = alloc_kmap_pgtable(PKMAP_BASE);
 138 
 139         BUILD_BUG_ON(LAST_PKMAP > PTRS_PER_PTE);
 140         fixmap_page_table = alloc_kmap_pgtable(FIXMAP_BASE);
 141 }

/* [<][>][^][v][top][bottom][index][help] */