1/* 2 * Memory subsystem initialization for Hexagon 3 * 4 * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 and 8 * only version 2 as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 18 * 02110-1301, USA. 19 */ 20 21#include <linux/init.h> 22#include <linux/mm.h> 23#include <linux/bootmem.h> 24#include <asm/atomic.h> 25#include <linux/highmem.h> 26#include <asm/tlb.h> 27#include <asm/sections.h> 28#include <asm/vm_mmu.h> 29 30/* 31 * Define a startpg just past the end of the kernel image and a lastpg 32 * that corresponds to the end of real or simulated platform memory. 33 */ 34#define bootmem_startpg (PFN_UP(((unsigned long) _end) - PAGE_OFFSET + PHYS_OFFSET)) 35 36unsigned long bootmem_lastpg; /* Should be set by platform code */ 37unsigned long __phys_offset; /* physical kernel offset >> 12 */ 38 39/* Set as variable to limit PMD copies */ 40int max_kernel_seg = 0x303; 41 42/* think this should be (page_size-1) the way it's used...*/ 43unsigned long zero_page_mask; 44 45/* indicate pfn's of high memory */ 46unsigned long highstart_pfn, highend_pfn; 47 48DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 49 50/* Default cache attribute for newly created page tables */ 51unsigned long _dflt_cache_att = CACHEDEF; 52 53/* 54 * The current "generation" of kernel map, which should not roll 55 * over until Hell freezes over. Actual bound in years needs to be 56 * calculated to confirm. 57 */ 58DEFINE_SPINLOCK(kmap_gen_lock); 59 60/* checkpatch says don't init this to 0. */ 61unsigned long long kmap_generation; 62 63/* 64 * mem_init - initializes memory 65 * 66 * Frees up bootmem 67 * Fixes up more stuff for HIGHMEM 68 * Calculates and displays memory available/used 69 */ 70void __init mem_init(void) 71{ 72 /* No idea where this is actually declared. Seems to evade LXR. */ 73 free_all_bootmem(); 74 mem_init_print_info(NULL); 75 76 /* 77 * To-Do: someone somewhere should wipe out the bootmem map 78 * after we're done? 79 */ 80 81 /* 82 * This can be moved to some more virtual-memory-specific 83 * initialization hook at some point. Set the init_mm 84 * descriptors "context" value to point to the initial 85 * kernel segment table's physical address. 86 */ 87 init_mm.context.ptbase = __pa(init_mm.pgd); 88} 89 90/* 91 * free_initmem - frees memory used by stuff declared with __init 92 * 93 * Todo: free pages between __init_begin and __init_end; possibly 94 * some devtree related stuff as well. 95 */ 96void __init_refok free_initmem(void) 97{ 98} 99 100/* 101 * free_initrd_mem - frees... initrd memory. 102 * @start - start of init memory 103 * @end - end of init memory 104 * 105 * Apparently has to be passed the address of the initrd memory. 106 * 107 * Wrapped by #ifdef CONFIG_BLKDEV_INITRD 108 */ 109void free_initrd_mem(unsigned long start, unsigned long end) 110{ 111} 112 113void sync_icache_dcache(pte_t pte) 114{ 115 unsigned long addr; 116 struct page *page; 117 118 page = pte_page(pte); 119 addr = (unsigned long) page_address(page); 120 121 __vmcache_idsync(addr, PAGE_SIZE); 122} 123 124/* 125 * In order to set up page allocator "nodes", 126 * somebody has to call free_area_init() for UMA. 127 * 128 * In this mode, we only have one pg_data_t 129 * structure: contig_mem_data. 130 */ 131void __init paging_init(void) 132{ 133 unsigned long zones_sizes[MAX_NR_ZONES] = {0, }; 134 135 /* 136 * This is not particularly well documented anywhere, but 137 * give ZONE_NORMAL all the memory, including the big holes 138 * left by the kernel+bootmem_map which are already left as reserved 139 * in the bootmem_map; free_area_init should see those bits and 140 * adjust accordingly. 141 */ 142 143 zones_sizes[ZONE_NORMAL] = max_low_pfn; 144 145 free_area_init(zones_sizes); /* sets up the zonelists and mem_map */ 146 147 /* 148 * Start of high memory area. Will probably need something more 149 * fancy if we... get more fancy. 150 */ 151 high_memory = (void *)((bootmem_lastpg + 1) << PAGE_SHIFT); 152} 153 154#ifndef DMA_RESERVE 155#define DMA_RESERVE (4) 156#endif 157 158#define DMA_CHUNKSIZE (1<<22) 159#define DMA_RESERVED_BYTES (DMA_RESERVE * DMA_CHUNKSIZE) 160 161/* 162 * Pick out the memory size. We look for mem=size, 163 * where size is "size[KkMm]" 164 */ 165static int __init early_mem(char *p) 166{ 167 unsigned long size; 168 char *endp; 169 170 size = memparse(p, &endp); 171 172 bootmem_lastpg = PFN_DOWN(size); 173 174 return 0; 175} 176early_param("mem", early_mem); 177 178size_t hexagon_coherent_pool_size = (size_t) (DMA_RESERVE << 22); 179 180void __init setup_arch_memory(void) 181{ 182 int bootmap_size; 183 /* XXX Todo: this probably should be cleaned up */ 184 u32 *segtable = (u32 *) &swapper_pg_dir[0]; 185 u32 *segtable_end; 186 187 /* 188 * Set up boot memory allocator 189 * 190 * The Gorman book also talks about these functions. 191 * This needs to change for highmem setups. 192 */ 193 194 /* Prior to this, bootmem_lastpg is actually mem size */ 195 bootmem_lastpg += ARCH_PFN_OFFSET; 196 197 /* Memory size needs to be a multiple of 16M */ 198 bootmem_lastpg = PFN_DOWN((bootmem_lastpg << PAGE_SHIFT) & 199 ~((BIG_KERNEL_PAGE_SIZE) - 1)); 200 201 /* 202 * Reserve the top DMA_RESERVE bytes of RAM for DMA (uncached) 203 * memory allocation 204 */ 205 206 max_low_pfn = bootmem_lastpg - PFN_DOWN(DMA_RESERVED_BYTES); 207 min_low_pfn = ARCH_PFN_OFFSET; 208 bootmap_size = init_bootmem_node(NODE_DATA(0), bootmem_startpg, min_low_pfn, max_low_pfn); 209 210 printk(KERN_INFO "bootmem_startpg: 0x%08lx\n", bootmem_startpg); 211 printk(KERN_INFO "bootmem_lastpg: 0x%08lx\n", bootmem_lastpg); 212 printk(KERN_INFO "bootmap_size: %d\n", bootmap_size); 213 printk(KERN_INFO "min_low_pfn: 0x%08lx\n", min_low_pfn); 214 printk(KERN_INFO "max_low_pfn: 0x%08lx\n", max_low_pfn); 215 216 /* 217 * The default VM page tables (will be) populated with 218 * VA=PA+PAGE_OFFSET mapping. We go in and invalidate entries 219 * higher than what we have memory for. 220 */ 221 222 /* this is pointer arithmetic; each entry covers 4MB */ 223 segtable = segtable + (PAGE_OFFSET >> 22); 224 225 /* this actually only goes to the end of the first gig */ 226 segtable_end = segtable + (1<<(30-22)); 227 228 /* 229 * Move forward to the start of empty pages; take into account 230 * phys_offset shift. 231 */ 232 233 segtable += (bootmem_lastpg-ARCH_PFN_OFFSET)>>(22-PAGE_SHIFT); 234 { 235 int i; 236 237 for (i = 1 ; i <= DMA_RESERVE ; i++) 238 segtable[-i] = ((segtable[-i] & __HVM_PTE_PGMASK_4MB) 239 | __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X 240 | __HEXAGON_C_UNC << 6 241 | __HVM_PDE_S_4MB); 242 } 243 244 printk(KERN_INFO "clearing segtable from %p to %p\n", segtable, 245 segtable_end); 246 while (segtable < (segtable_end-8)) 247 *(segtable++) = __HVM_PDE_S_INVALID; 248 /* stop the pointer at the device I/O 4MB page */ 249 250 printk(KERN_INFO "segtable = %p (should be equal to _K_io_map)\n", 251 segtable); 252 253#if 0 254 /* Other half of the early device table from vm_init_segtable. */ 255 printk(KERN_INFO "&_K_init_devicetable = 0x%08x\n", 256 (unsigned long) _K_init_devicetable-PAGE_OFFSET); 257 *segtable = ((u32) (unsigned long) _K_init_devicetable-PAGE_OFFSET) | 258 __HVM_PDE_S_4KB; 259 printk(KERN_INFO "*segtable = 0x%08x\n", *segtable); 260#endif 261 262 /* 263 * Free all the memory that wasn't taken up by the bootmap, the DMA 264 * reserve, or kernel itself. 265 */ 266 free_bootmem(PFN_PHYS(bootmem_startpg) + bootmap_size, 267 PFN_PHYS(bootmem_lastpg - bootmem_startpg) - bootmap_size - 268 DMA_RESERVED_BYTES); 269 270 /* 271 * The bootmem allocator seemingly just lives to feed memory 272 * to the paging system 273 */ 274 printk(KERN_INFO "PAGE_SIZE=%lu\n", PAGE_SIZE); 275 paging_init(); /* See Gorman Book, 2.3 */ 276 277 /* 278 * At this point, the page allocator is kind of initialized, but 279 * apparently no pages are available (just like with the bootmem 280 * allocator), and need to be freed themselves via mem_init(), 281 * which is called by start_kernel() later on in the process 282 */ 283} 284