root/arch/microblaze/mm/pgtable.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __ioremap
  2. ioremap
  3. iounmap
  4. map_page
  5. mapin_ram
  6. get_pteptr
  7. iopa
  8. pte_alloc_one_kernel
  9. __set_fixmap

   1 /*
   2  *  This file contains the routines setting up the linux page tables.
   3  *
   4  * Copyright (C) 2008 Michal Simek
   5  * Copyright (C) 2008 PetaLogix
   6  *
   7  *    Copyright (C) 2007 Xilinx, Inc.  All rights reserved.
   8  *
   9  *  Derived from arch/ppc/mm/pgtable.c:
  10  *    -- paulus
  11  *
  12  *  Derived from arch/ppc/mm/init.c:
  13  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  14  *
  15  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  16  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
  17  *    Copyright (C) 1996 Paul Mackerras
  18  *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
  19  *
  20  *  Derived from "arch/i386/mm/init.c"
  21  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  22  *
  23  *  This file is subject to the terms and conditions of the GNU General
  24  *  Public License.  See the file COPYING in the main directory of this
  25  *  archive for more details.
  26  *
  27  */
  28 
  29 #include <linux/export.h>
  30 #include <linux/kernel.h>
  31 #include <linux/types.h>
  32 #include <linux/vmalloc.h>
  33 #include <linux/init.h>
  34 #include <linux/mm_types.h>
  35 
  36 #include <asm/pgtable.h>
  37 #include <asm/pgalloc.h>
  38 #include <linux/io.h>
  39 #include <asm/mmu.h>
  40 #include <asm/sections.h>
  41 #include <asm/fixmap.h>
  42 
  43 unsigned long ioremap_base;
  44 unsigned long ioremap_bot;
  45 EXPORT_SYMBOL(ioremap_bot);
  46 
  47 static void __iomem *__ioremap(phys_addr_t addr, unsigned long size,
  48                 unsigned long flags)
  49 {
  50         unsigned long v, i;
  51         phys_addr_t p;
  52         int err;
  53 
  54         /*
  55          * Choose an address to map it to.
  56          * Once the vmalloc system is running, we use it.
  57          * Before then, we use space going down from ioremap_base
  58          * (ioremap_bot records where we're up to).
  59          */
  60         p = addr & PAGE_MASK;
  61         size = PAGE_ALIGN(addr + size) - p;
  62 
  63         /*
  64          * Don't allow anybody to remap normal RAM that we're using.
  65          * mem_init() sets high_memory so only do the check after that.
  66          *
  67          * However, allow remap of rootfs: TBD
  68          */
  69 
  70         if (mem_init_done &&
  71                 p >= memory_start && p < virt_to_phys(high_memory) &&
  72                 !(p >= __virt_to_phys((phys_addr_t)__bss_stop) &&
  73                 p < __virt_to_phys((phys_addr_t)__bss_stop))) {
  74                 pr_warn("__ioremap(): phys addr "PTE_FMT" is RAM lr %ps\n",
  75                         (unsigned long)p, __builtin_return_address(0));
  76                 return NULL;
  77         }
  78 
  79         if (size == 0)
  80                 return NULL;
  81 
  82         /*
  83          * Is it already mapped? If the whole area is mapped then we're
  84          * done, otherwise remap it since we want to keep the virt addrs for
  85          * each request contiguous.
  86          *
  87          * We make the assumption here that if the bottom and top
  88          * of the range we want are mapped then it's mapped to the
  89          * same virt address (and this is contiguous).
  90          *  -- Cort
  91          */
  92 
  93         if (mem_init_done) {
  94                 struct vm_struct *area;
  95                 area = get_vm_area(size, VM_IOREMAP);
  96                 if (area == NULL)
  97                         return NULL;
  98                 v = (unsigned long) area->addr;
  99         } else {
 100                 v = (ioremap_bot -= size);
 101         }
 102 
 103         if ((flags & _PAGE_PRESENT) == 0)
 104                 flags |= _PAGE_KERNEL;
 105         if (flags & _PAGE_NO_CACHE)
 106                 flags |= _PAGE_GUARDED;
 107 
 108         err = 0;
 109         for (i = 0; i < size && err == 0; i += PAGE_SIZE)
 110                 err = map_page(v + i, p + i, flags);
 111         if (err) {
 112                 if (mem_init_done)
 113                         vfree((void *)v);
 114                 return NULL;
 115         }
 116 
 117         return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
 118 }
 119 
 120 void __iomem *ioremap(phys_addr_t addr, unsigned long size)
 121 {
 122         return __ioremap(addr, size, _PAGE_NO_CACHE);
 123 }
 124 EXPORT_SYMBOL(ioremap);
 125 
 126 void iounmap(volatile void __iomem *addr)
 127 {
 128         if ((__force void *)addr > high_memory &&
 129                                         (unsigned long) addr < ioremap_bot)
 130                 vfree((void *) (PAGE_MASK & (unsigned long) addr));
 131 }
 132 EXPORT_SYMBOL(iounmap);
 133 
 134 
 135 int map_page(unsigned long va, phys_addr_t pa, int flags)
 136 {
 137         pmd_t *pd;
 138         pte_t *pg;
 139         int err = -ENOMEM;
 140         /* Use upper 10 bits of VA to index the first level map */
 141         pd = pmd_offset(pgd_offset_k(va), va);
 142         /* Use middle 10 bits of VA to index the second-level map */
 143         pg = pte_alloc_kernel(pd, va); /* from powerpc - pgtable.c */
 144         /* pg = pte_alloc_kernel(&init_mm, pd, va); */
 145 
 146         if (pg != NULL) {
 147                 err = 0;
 148                 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
 149                                 __pgprot(flags)));
 150                 if (unlikely(mem_init_done))
 151                         _tlbie(va);
 152         }
 153         return err;
 154 }
 155 
 156 /*
 157  * Map in all of physical memory starting at CONFIG_KERNEL_START.
 158  */
 159 void __init mapin_ram(void)
 160 {
 161         unsigned long v, p, s, f;
 162 
 163         v = CONFIG_KERNEL_START;
 164         p = memory_start;
 165         for (s = 0; s < lowmem_size; s += PAGE_SIZE) {
 166                 f = _PAGE_PRESENT | _PAGE_ACCESSED |
 167                                 _PAGE_SHARED | _PAGE_HWEXEC;
 168                 if ((char *) v < _stext || (char *) v >= _etext)
 169                         f |= _PAGE_WRENABLE;
 170                 else
 171                         /* On the MicroBlaze, no user access
 172                            forces R/W kernel access */
 173                         f |= _PAGE_USER;
 174                 map_page(v, p, f);
 175                 v += PAGE_SIZE;
 176                 p += PAGE_SIZE;
 177         }
 178 }
 179 
 180 /* is x a power of 2? */
 181 #define is_power_of_2(x)        ((x) != 0 && (((x) & ((x) - 1)) == 0))
 182 
 183 /* Scan the real Linux page tables and return a PTE pointer for
 184  * a virtual address in a context.
 185  * Returns true (1) if PTE was found, zero otherwise.  The pointer to
 186  * the PTE pointer is unmodified if PTE is not found.
 187  */
 188 static int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
 189 {
 190         pgd_t   *pgd;
 191         pmd_t   *pmd;
 192         pte_t   *pte;
 193         int     retval = 0;
 194 
 195         pgd = pgd_offset(mm, addr & PAGE_MASK);
 196         if (pgd) {
 197                 pmd = pmd_offset(pgd, addr & PAGE_MASK);
 198                 if (pmd_present(*pmd)) {
 199                         pte = pte_offset_kernel(pmd, addr & PAGE_MASK);
 200                         if (pte) {
 201                                 retval = 1;
 202                                 *ptep = pte;
 203                         }
 204                 }
 205         }
 206         return retval;
 207 }
 208 
 209 /* Find physical address for this virtual address.  Normally used by
 210  * I/O functions, but anyone can call it.
 211  */
 212 unsigned long iopa(unsigned long addr)
 213 {
 214         unsigned long pa;
 215 
 216         pte_t *pte;
 217         struct mm_struct *mm;
 218 
 219         /* Allow mapping of user addresses (within the thread)
 220          * for DMA if necessary.
 221          */
 222         if (addr < TASK_SIZE)
 223                 mm = current->mm;
 224         else
 225                 mm = &init_mm;
 226 
 227         pa = 0;
 228         if (get_pteptr(mm, addr, &pte))
 229                 pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
 230 
 231         return pa;
 232 }
 233 
 234 __ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
 235 {
 236         pte_t *pte;
 237         if (mem_init_done) {
 238                 pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
 239         } else {
 240                 pte = (pte_t *)early_get_page();
 241                 if (pte)
 242                         clear_page(pte);
 243         }
 244         return pte;
 245 }
 246 
 247 void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
 248 {
 249         unsigned long address = __fix_to_virt(idx);
 250 
 251         if (idx >= __end_of_fixed_addresses)
 252                 BUG();
 253 
 254         map_page(address, phys, pgprot_val(flags));
 255 }

/* [<][>][^][v][top][bottom][index][help] */