root/arch/powerpc/mm/highmem.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. kmap_atomic_prot
  2. __kunmap_atomic

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * highmem.c: virtual kernel memory mappings for high memory
   4  *
   5  * PowerPC version, stolen from the i386 version.
   6  *
   7  * Used in CONFIG_HIGHMEM systems for memory pages which
   8  * are not addressable by direct kernel virtual addresses.
   9  *
  10  * Copyright (C) 1999 Gerhard Wichert, Siemens AG
  11  *                    Gerhard.Wichert@pdb.siemens.de
  12  *
  13  *
  14  * Redesigned the x86 32-bit VM architecture to deal with
  15  * up to 16 Terrabyte physical memory. With current x86 CPUs
  16  * we now support up to 64 Gigabytes physical RAM.
  17  *
  18  * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
  19  *
  20  * Reworked for PowerPC by various contributors. Moved from
  21  * highmem.h by Benjamin Herrenschmidt (c) 2009 IBM Corp.
  22  */
  23 
  24 #include <linux/highmem.h>
  25 #include <linux/module.h>
  26 
  27 /*
  28  * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
  29  * gives a more generic (and caching) interface. But kmap_atomic can
  30  * be used in IRQ contexts, so in some (very limited) cases we need
  31  * it.
  32  */
  33 void *kmap_atomic_prot(struct page *page, pgprot_t prot)
  34 {
  35         unsigned long vaddr;
  36         int idx, type;
  37 
  38         preempt_disable();
  39         pagefault_disable();
  40         if (!PageHighMem(page))
  41                 return page_address(page);
  42 
  43         type = kmap_atomic_idx_push();
  44         idx = type + KM_TYPE_NR*smp_processor_id();
  45         vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  46         WARN_ON(IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !pte_none(*(kmap_pte - idx)));
  47         __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1);
  48         local_flush_tlb_page(NULL, vaddr);
  49 
  50         return (void*) vaddr;
  51 }
  52 EXPORT_SYMBOL(kmap_atomic_prot);
  53 
  54 void __kunmap_atomic(void *kvaddr)
  55 {
  56         unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  57 
  58         if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
  59                 pagefault_enable();
  60                 preempt_enable();
  61                 return;
  62         }
  63 
  64         if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM)) {
  65                 int type = kmap_atomic_idx();
  66                 unsigned int idx;
  67 
  68                 idx = type + KM_TYPE_NR * smp_processor_id();
  69                 WARN_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
  70 
  71                 /*
  72                  * force other mappings to Oops if they'll try to access
  73                  * this pte without first remap it
  74                  */
  75                 pte_clear(&init_mm, vaddr, kmap_pte-idx);
  76                 local_flush_tlb_page(NULL, vaddr);
  77         }
  78 
  79         kmap_atomic_idx_pop();
  80         pagefault_enable();
  81         preempt_enable();
  82 }
  83 EXPORT_SYMBOL(__kunmap_atomic);

/* [<][>][^][v][top][bottom][index][help] */