root/arch/sparc/include/asm/highmem.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. kmap
  2. kunmap

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 /*
   3  * highmem.h: virtual kernel memory mappings for high memory
   4  *
   5  * Used in CONFIG_HIGHMEM systems for memory pages which
   6  * are not addressable by direct kernel virtual addresses.
   7  *
   8  * Copyright (C) 1999 Gerhard Wichert, Siemens AG
   9  *                    Gerhard.Wichert@pdb.siemens.de
  10  *
  11  *
  12  * Redesigned the x86 32-bit VM architecture to deal with 
  13  * up to 16 Terrabyte physical memory. With current x86 CPUs
  14  * we now support up to 64 Gigabytes physical RAM.
  15  *
  16  * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
  17  */
  18 
  19 #ifndef _ASM_HIGHMEM_H
  20 #define _ASM_HIGHMEM_H
  21 
  22 #ifdef __KERNEL__
  23 
  24 #include <linux/interrupt.h>
  25 #include <asm/vaddrs.h>
  26 #include <asm/kmap_types.h>
  27 #include <asm/pgtable.h>
  28 
  29 /* declarations for highmem.c */
  30 extern unsigned long highstart_pfn, highend_pfn;
  31 
  32 extern pgprot_t kmap_prot;
  33 extern pte_t *pkmap_page_table;
  34 
  35 void kmap_init(void) __init;
  36 
  37 /*
  38  * Right now we initialize only a single pte table. It can be extended
  39  * easily, subsequent pte tables have to be allocated in one physical
  40  * chunk of RAM.  Currently the simplest way to do this is to align the
  41  * pkmap region on a pagetable boundary (4MB).
  42  */
  43 #define LAST_PKMAP 1024
  44 #define PKMAP_SIZE (LAST_PKMAP << PAGE_SHIFT)
  45 #define PKMAP_BASE PMD_ALIGN(SRMMU_NOCACHE_VADDR + (SRMMU_MAX_NOCACHE_PAGES << PAGE_SHIFT))
  46 
  47 #define LAST_PKMAP_MASK (LAST_PKMAP - 1)
  48 #define PKMAP_NR(virt)  ((virt - PKMAP_BASE) >> PAGE_SHIFT)
  49 #define PKMAP_ADDR(nr)  (PKMAP_BASE + ((nr) << PAGE_SHIFT))
  50 
  51 #define PKMAP_END (PKMAP_ADDR(LAST_PKMAP))
  52 
  53 void *kmap_high(struct page *page);
  54 void kunmap_high(struct page *page);
  55 
  56 static inline void *kmap(struct page *page)
  57 {
  58         BUG_ON(in_interrupt());
  59         if (!PageHighMem(page))
  60                 return page_address(page);
  61         return kmap_high(page);
  62 }
  63 
  64 static inline void kunmap(struct page *page)
  65 {
  66         BUG_ON(in_interrupt());
  67         if (!PageHighMem(page))
  68                 return;
  69         kunmap_high(page);
  70 }
  71 
  72 void *kmap_atomic(struct page *page);
  73 void __kunmap_atomic(void *kvaddr);
  74 
  75 #define flush_cache_kmaps()     flush_cache_all()
  76 
  77 #endif /* __KERNEL__ */
  78 
  79 #endif /* _ASM_HIGHMEM_H */

/* [<][>][^][v][top][bottom][index][help] */