root/arch/microblaze/mm/highmem.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. kmap_atomic_prot
  2. __kunmap_atomic

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * highmem.c: virtual kernel memory mappings for high memory
   4  *
   5  * PowerPC version, stolen from the i386 version.
   6  *
   7  * Used in CONFIG_HIGHMEM systems for memory pages which
   8  * are not addressable by direct kernel virtual addresses.
   9  *
  10  * Copyright (C) 1999 Gerhard Wichert, Siemens AG
  11  *                    Gerhard.Wichert@pdb.siemens.de
  12  *
  13  *
  14  * Redesigned the x86 32-bit VM architecture to deal with
  15  * up to 16 Terrabyte physical memory. With current x86 CPUs
  16  * we now support up to 64 Gigabytes physical RAM.
  17  *
  18  * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
  19  *
  20  * Reworked for PowerPC by various contributors. Moved from
  21  * highmem.h by Benjamin Herrenschmidt (c) 2009 IBM Corp.
  22  */
  23 
  24 #include <linux/export.h>
  25 #include <linux/highmem.h>
  26 
  27 /*
  28  * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
  29  * gives a more generic (and caching) interface. But kmap_atomic can
  30  * be used in IRQ contexts, so in some (very limited) cases we need
  31  * it.
  32  */
  33 #include <asm/tlbflush.h>
  34 
  35 void *kmap_atomic_prot(struct page *page, pgprot_t prot)
  36 {
  37 
  38         unsigned long vaddr;
  39         int idx, type;
  40 
  41         preempt_disable();
  42         pagefault_disable();
  43         if (!PageHighMem(page))
  44                 return page_address(page);
  45 
  46 
  47         type = kmap_atomic_idx_push();
  48         idx = type + KM_TYPE_NR*smp_processor_id();
  49         vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  50 #ifdef CONFIG_DEBUG_HIGHMEM
  51         BUG_ON(!pte_none(*(kmap_pte-idx)));
  52 #endif
  53         set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
  54         local_flush_tlb_page(NULL, vaddr);
  55 
  56         return (void *) vaddr;
  57 }
  58 EXPORT_SYMBOL(kmap_atomic_prot);
  59 
  60 void __kunmap_atomic(void *kvaddr)
  61 {
  62         unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  63         int type;
  64         unsigned int idx;
  65 
  66         if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
  67                 pagefault_enable();
  68                 preempt_enable();
  69                 return;
  70         }
  71 
  72         type = kmap_atomic_idx();
  73 
  74         idx = type + KM_TYPE_NR * smp_processor_id();
  75 #ifdef CONFIG_DEBUG_HIGHMEM
  76         BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
  77 #endif
  78         /*
  79          * force other mappings to Oops if they'll try to access
  80          * this pte without first remap it
  81          */
  82         pte_clear(&init_mm, vaddr, kmap_pte-idx);
  83         local_flush_tlb_page(NULL, vaddr);
  84 
  85         kmap_atomic_idx_pop();
  86         pagefault_enable();
  87         preempt_enable();
  88 }
  89 EXPORT_SYMBOL(__kunmap_atomic);

/* [<][>][^][v][top][bottom][index][help] */