1/* 2 * highmem.c: virtual kernel memory mappings for high memory 3 * 4 * PowerPC version, stolen from the i386 version. 5 * 6 * Used in CONFIG_HIGHMEM systems for memory pages which 7 * are not addressable by direct kernel virtual addresses. 8 * 9 * Copyright (C) 1999 Gerhard Wichert, Siemens AG 10 * Gerhard.Wichert@pdb.siemens.de 11 * 12 * 13 * Redesigned the x86 32-bit VM architecture to deal with 14 * up to 16 Terrabyte physical memory. With current x86 CPUs 15 * we now support up to 64 Gigabytes physical RAM. 16 * 17 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> 18 * 19 * Reworked for PowerPC by various contributors. Moved from 20 * highmem.h by Benjamin Herrenschmidt (c) 2009 IBM Corp. 21 */ 22 23#include <linux/highmem.h> 24#include <linux/module.h> 25 26/* 27 * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap 28 * gives a more generic (and caching) interface. But kmap_atomic can 29 * be used in IRQ contexts, so in some (very limited) cases we need 30 * it. 31 */ 32void *kmap_atomic_prot(struct page *page, pgprot_t prot) 33{ 34 unsigned long vaddr; 35 int idx, type; 36 37 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ 38 pagefault_disable(); 39 if (!PageHighMem(page)) 40 return page_address(page); 41 42 type = kmap_atomic_idx_push(); 43 idx = type + KM_TYPE_NR*smp_processor_id(); 44 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 45#ifdef CONFIG_DEBUG_HIGHMEM 46 BUG_ON(!pte_none(*(kmap_pte-idx))); 47#endif 48 __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1); 49 local_flush_tlb_page(NULL, vaddr); 50 51 return (void*) vaddr; 52} 53EXPORT_SYMBOL(kmap_atomic_prot); 54 55void __kunmap_atomic(void *kvaddr) 56{ 57 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 58 int type; 59 60 if (vaddr < __fix_to_virt(FIX_KMAP_END)) { 61 pagefault_enable(); 62 return; 63 } 64 65 type = kmap_atomic_idx(); 66 67#ifdef CONFIG_DEBUG_HIGHMEM 68 { 69 unsigned int idx; 70 71 idx = type + KM_TYPE_NR * smp_processor_id(); 72 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); 73 74 /* 75 * force other mappings to Oops if they'll try to access 76 * this pte without first remap it 77 */ 78 pte_clear(&init_mm, vaddr, kmap_pte-idx); 79 local_flush_tlb_page(NULL, vaddr); 80 } 81#endif 82 83 kmap_atomic_idx_pop(); 84 pagefault_enable(); 85} 86EXPORT_SYMBOL(__kunmap_atomic); 87