This source file includes following definitions.
- __ia64_sync_icache_dcache
- arch_sync_dma_for_cpu
- ia64_set_rbs_bot
- ia64_init_addr_space
- free_initmem
- free_initrd_mem
- put_kernel_page
- setup_gate
- gate_vma_init
- get_gate_vma
- in_gate_area_no_mm
- in_gate_area
- ia64_mmu_init
- vmemmap_find_next_valid_pfn
- create_mem_map_page_table
- virtual_memmap_init
- memmap_init
- ia64_pfn_valid
- find_largest_hole
- register_active_ranges
- find_max_min_low_pfn
- nolwsys_setup
- mem_init
- arch_add_memory
- arch_remove_memory
1
2
3
4
5
6
7
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10
11 #include <linux/dma-noncoherent.h>
12 #include <linux/dmar.h>
13 #include <linux/efi.h>
14 #include <linux/elf.h>
15 #include <linux/memblock.h>
16 #include <linux/mm.h>
17 #include <linux/sched/signal.h>
18 #include <linux/mmzone.h>
19 #include <linux/module.h>
20 #include <linux/personality.h>
21 #include <linux/reboot.h>
22 #include <linux/slab.h>
23 #include <linux/swap.h>
24 #include <linux/proc_fs.h>
25 #include <linux/bitops.h>
26 #include <linux/kexec.h>
27 #include <linux/swiotlb.h>
28
29 #include <asm/dma.h>
30 #include <asm/io.h>
31 #include <asm/numa.h>
32 #include <asm/patch.h>
33 #include <asm/pgalloc.h>
34 #include <asm/sal.h>
35 #include <asm/sections.h>
36 #include <asm/tlb.h>
37 #include <linux/uaccess.h>
38 #include <asm/unistd.h>
39 #include <asm/mca.h>
40
41 extern void ia64_tlb_init (void);
42
43 unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
44
45 #ifdef CONFIG_VIRTUAL_MEM_MAP
46 unsigned long VMALLOC_END = VMALLOC_END_INIT;
47 EXPORT_SYMBOL(VMALLOC_END);
48 struct page *vmem_map;
49 EXPORT_SYMBOL(vmem_map);
50 #endif
51
52 struct page *zero_page_memmap_ptr;
53 EXPORT_SYMBOL(zero_page_memmap_ptr);
54
55 void
56 __ia64_sync_icache_dcache (pte_t pte)
57 {
58 unsigned long addr;
59 struct page *page;
60
61 page = pte_page(pte);
62 addr = (unsigned long) page_address(page);
63
64 if (test_bit(PG_arch_1, &page->flags))
65 return;
66
67 flush_icache_range(addr, addr + page_size(page));
68 set_bit(PG_arch_1, &page->flags);
69 }
70
71
72
73
74
75
76 void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
77 size_t size, enum dma_data_direction dir)
78 {
79 unsigned long pfn = PHYS_PFN(paddr);
80
81 do {
82 set_bit(PG_arch_1, &pfn_to_page(pfn)->flags);
83 } while (++pfn <= PHYS_PFN(paddr + size - 1));
84 }
85
86 inline void
87 ia64_set_rbs_bot (void)
88 {
89 unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16;
90
91 if (stack_size > MAX_USER_STACK_SIZE)
92 stack_size = MAX_USER_STACK_SIZE;
93 current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size);
94 }
95
96
97
98
99
100
101
102 void
103 ia64_init_addr_space (void)
104 {
105 struct vm_area_struct *vma;
106
107 ia64_set_rbs_bot();
108
109
110
111
112
113
114 vma = vm_area_alloc(current->mm);
115 if (vma) {
116 vma_set_anonymous(vma);
117 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
118 vma->vm_end = vma->vm_start + PAGE_SIZE;
119 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
120 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
121 down_write(¤t->mm->mmap_sem);
122 if (insert_vm_struct(current->mm, vma)) {
123 up_write(¤t->mm->mmap_sem);
124 vm_area_free(vma);
125 return;
126 }
127 up_write(¤t->mm->mmap_sem);
128 }
129
130
131 if (!(current->personality & MMAP_PAGE_ZERO)) {
132 vma = vm_area_alloc(current->mm);
133 if (vma) {
134 vma_set_anonymous(vma);
135 vma->vm_end = PAGE_SIZE;
136 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
137 vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
138 VM_DONTEXPAND | VM_DONTDUMP;
139 down_write(¤t->mm->mmap_sem);
140 if (insert_vm_struct(current->mm, vma)) {
141 up_write(¤t->mm->mmap_sem);
142 vm_area_free(vma);
143 return;
144 }
145 up_write(¤t->mm->mmap_sem);
146 }
147 }
148 }
149
150 void
151 free_initmem (void)
152 {
153 free_reserved_area(ia64_imva(__init_begin), ia64_imva(__init_end),
154 -1, "unused kernel");
155 }
156
157 void __init
158 free_initrd_mem (unsigned long start, unsigned long end)
159 {
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191 start = PAGE_ALIGN(start);
192 end = end & PAGE_MASK;
193
194 if (start < end)
195 printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
196
197 for (; start < end; start += PAGE_SIZE) {
198 if (!virt_addr_valid(start))
199 continue;
200 free_reserved_page(virt_to_page(start));
201 }
202 }
203
204
205
206
207 static struct page * __init
208 put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
209 {
210 pgd_t *pgd;
211 pud_t *pud;
212 pmd_t *pmd;
213 pte_t *pte;
214
215 pgd = pgd_offset_k(address);
216
217 {
218 pud = pud_alloc(&init_mm, pgd, address);
219 if (!pud)
220 goto out;
221 pmd = pmd_alloc(&init_mm, pud, address);
222 if (!pmd)
223 goto out;
224 pte = pte_alloc_kernel(pmd, address);
225 if (!pte)
226 goto out;
227 if (!pte_none(*pte))
228 goto out;
229 set_pte(pte, mk_pte(page, pgprot));
230 }
231 out:
232
233 return page;
234 }
235
236 static void __init
237 setup_gate (void)
238 {
239 struct page *page;
240
241
242
243
244
245
246 page = virt_to_page(ia64_imva(__start_gate_section));
247 put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
248 #ifdef HAVE_BUGGY_SEGREL
249 page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
250 put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
251 #else
252 put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
253
254 {
255 unsigned long addr;
256
257 for (addr = GATE_ADDR + PAGE_SIZE;
258 addr < GATE_ADDR + PERCPU_PAGE_SIZE;
259 addr += PAGE_SIZE)
260 {
261 put_kernel_page(ZERO_PAGE(0), addr,
262 PAGE_READONLY);
263 put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE,
264 PAGE_READONLY);
265 }
266 }
267 #endif
268 ia64_patch_gate();
269 }
270
271 static struct vm_area_struct gate_vma;
272
273 static int __init gate_vma_init(void)
274 {
275 vma_init(&gate_vma, NULL);
276 gate_vma.vm_start = FIXADDR_USER_START;
277 gate_vma.vm_end = FIXADDR_USER_END;
278 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
279 gate_vma.vm_page_prot = __P101;
280
281 return 0;
282 }
283 __initcall(gate_vma_init);
284
285 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
286 {
287 return &gate_vma;
288 }
289
290 int in_gate_area_no_mm(unsigned long addr)
291 {
292 if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
293 return 1;
294 return 0;
295 }
296
297 int in_gate_area(struct mm_struct *mm, unsigned long addr)
298 {
299 return in_gate_area_no_mm(addr);
300 }
301
302 void ia64_mmu_init(void *my_cpu_data)
303 {
304 unsigned long pta, impl_va_bits;
305 extern void tlb_init(void);
306
307 #ifdef CONFIG_DISABLE_VHPT
308 # define VHPT_ENABLE_BIT 0
309 #else
310 # define VHPT_ENABLE_BIT 1
311 #endif
312
313
314
315
316
317
318
319
320
321
322
323
324 # define pte_bits 3
325 # define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
326
327
328
329
330
331
332
333 # define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits)
334 # define POW2(n) (1ULL << (n))
335
336 impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
337
338 if (impl_va_bits < 51 || impl_va_bits > 61)
339 panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
340
341
342
343
344
345
346 if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) ||
347 (mapped_space_bits > impl_va_bits - 1))
348 panic("Cannot build a big enough virtual-linear page table"
349 " to cover mapped address space.\n"
350 " Try using a smaller page size.\n");
351
352
353
354 pta = POW2(61) - POW2(vmlpt_bits);
355
356
357
358
359
360
361
362 ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
363
364 ia64_tlb_init();
365
366 #ifdef CONFIG_HUGETLB_PAGE
367 ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
368 ia64_srlz_d();
369 #endif
370 }
371
372 #ifdef CONFIG_VIRTUAL_MEM_MAP
373 int vmemmap_find_next_valid_pfn(int node, int i)
374 {
375 unsigned long end_address, hole_next_pfn;
376 unsigned long stop_address;
377 pg_data_t *pgdat = NODE_DATA(node);
378
379 end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
380 end_address = PAGE_ALIGN(end_address);
381 stop_address = (unsigned long) &vmem_map[pgdat_end_pfn(pgdat)];
382
383 do {
384 pgd_t *pgd;
385 pud_t *pud;
386 pmd_t *pmd;
387 pte_t *pte;
388
389 pgd = pgd_offset_k(end_address);
390 if (pgd_none(*pgd)) {
391 end_address += PGDIR_SIZE;
392 continue;
393 }
394
395 pud = pud_offset(pgd, end_address);
396 if (pud_none(*pud)) {
397 end_address += PUD_SIZE;
398 continue;
399 }
400
401 pmd = pmd_offset(pud, end_address);
402 if (pmd_none(*pmd)) {
403 end_address += PMD_SIZE;
404 continue;
405 }
406
407 pte = pte_offset_kernel(pmd, end_address);
408 retry_pte:
409 if (pte_none(*pte)) {
410 end_address += PAGE_SIZE;
411 pte++;
412 if ((end_address < stop_address) &&
413 (end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
414 goto retry_pte;
415 continue;
416 }
417
418 break;
419 } while (end_address < stop_address);
420
421 end_address = min(end_address, stop_address);
422 end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
423 hole_next_pfn = end_address / sizeof(struct page);
424 return hole_next_pfn - pgdat->node_start_pfn;
425 }
426
427 int __init create_mem_map_page_table(u64 start, u64 end, void *arg)
428 {
429 unsigned long address, start_page, end_page;
430 struct page *map_start, *map_end;
431 int node;
432 pgd_t *pgd;
433 pud_t *pud;
434 pmd_t *pmd;
435 pte_t *pte;
436
437 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
438 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
439
440 start_page = (unsigned long) map_start & PAGE_MASK;
441 end_page = PAGE_ALIGN((unsigned long) map_end);
442 node = paddr_to_nid(__pa(start));
443
444 for (address = start_page; address < end_page; address += PAGE_SIZE) {
445 pgd = pgd_offset_k(address);
446 if (pgd_none(*pgd)) {
447 pud = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
448 if (!pud)
449 goto err_alloc;
450 pgd_populate(&init_mm, pgd, pud);
451 }
452 pud = pud_offset(pgd, address);
453
454 if (pud_none(*pud)) {
455 pmd = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
456 if (!pmd)
457 goto err_alloc;
458 pud_populate(&init_mm, pud, pmd);
459 }
460 pmd = pmd_offset(pud, address);
461
462 if (pmd_none(*pmd)) {
463 pte = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
464 if (!pte)
465 goto err_alloc;
466 pmd_populate_kernel(&init_mm, pmd, pte);
467 }
468 pte = pte_offset_kernel(pmd, address);
469
470 if (pte_none(*pte)) {
471 void *page = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE,
472 node);
473 if (!page)
474 goto err_alloc;
475 set_pte(pte, pfn_pte(__pa(page) >> PAGE_SHIFT,
476 PAGE_KERNEL));
477 }
478 }
479 return 0;
480
481 err_alloc:
482 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d\n",
483 __func__, PAGE_SIZE, PAGE_SIZE, node);
484 return -ENOMEM;
485 }
486
487 struct memmap_init_callback_data {
488 struct page *start;
489 struct page *end;
490 int nid;
491 unsigned long zone;
492 };
493
494 static int __meminit
495 virtual_memmap_init(u64 start, u64 end, void *arg)
496 {
497 struct memmap_init_callback_data *args;
498 struct page *map_start, *map_end;
499
500 args = (struct memmap_init_callback_data *) arg;
501 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
502 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
503
504 if (map_start < args->start)
505 map_start = args->start;
506 if (map_end > args->end)
507 map_end = args->end;
508
509
510
511
512
513
514 map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page);
515 map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end)
516 / sizeof(struct page));
517
518 if (map_start < map_end)
519 memmap_init_zone((unsigned long)(map_end - map_start),
520 args->nid, args->zone, page_to_pfn(map_start),
521 MEMMAP_EARLY, NULL);
522 return 0;
523 }
524
525 void __meminit
526 memmap_init (unsigned long size, int nid, unsigned long zone,
527 unsigned long start_pfn)
528 {
529 if (!vmem_map) {
530 memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY,
531 NULL);
532 } else {
533 struct page *start;
534 struct memmap_init_callback_data args;
535
536 start = pfn_to_page(start_pfn);
537 args.start = start;
538 args.end = start + size;
539 args.nid = nid;
540 args.zone = zone;
541
542 efi_memmap_walk(virtual_memmap_init, &args);
543 }
544 }
545
546 int
547 ia64_pfn_valid (unsigned long pfn)
548 {
549 char byte;
550 struct page *pg = pfn_to_page(pfn);
551
552 return (__get_user(byte, (char __user *) pg) == 0)
553 && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK))
554 || (__get_user(byte, (char __user *) (pg + 1) - 1) == 0));
555 }
556 EXPORT_SYMBOL(ia64_pfn_valid);
557
558 int __init find_largest_hole(u64 start, u64 end, void *arg)
559 {
560 u64 *max_gap = arg;
561
562 static u64 last_end = PAGE_OFFSET;
563
564
565
566 if (*max_gap < (start - last_end))
567 *max_gap = start - last_end;
568 last_end = end;
569 return 0;
570 }
571
572 #endif
573
574 int __init register_active_ranges(u64 start, u64 len, int nid)
575 {
576 u64 end = start + len;
577
578 #ifdef CONFIG_KEXEC
579 if (start > crashk_res.start && start < crashk_res.end)
580 start = crashk_res.end;
581 if (end > crashk_res.start && end < crashk_res.end)
582 end = crashk_res.start;
583 #endif
584
585 if (start < end)
586 memblock_add_node(__pa(start), end - start, nid);
587 return 0;
588 }
589
590 int
591 find_max_min_low_pfn (u64 start, u64 end, void *arg)
592 {
593 unsigned long pfn_start, pfn_end;
594 #ifdef CONFIG_FLATMEM
595 pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT;
596 pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT;
597 #else
598 pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT;
599 pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT;
600 #endif
601 min_low_pfn = min(min_low_pfn, pfn_start);
602 max_low_pfn = max(max_low_pfn, pfn_end);
603 return 0;
604 }
605
606
607
608
609
610
611
612
613
614 static int nolwsys __initdata;
615
616 static int __init
617 nolwsys_setup (char *s)
618 {
619 nolwsys = 1;
620 return 1;
621 }
622
623 __setup("nolwsys", nolwsys_setup);
624
625 void __init
626 mem_init (void)
627 {
628 int i;
629
630 BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
631 BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
632 BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE);
633
634
635
636
637
638
639 #ifdef CONFIG_INTEL_IOMMU
640 detect_intel_iommu();
641 if (!iommu_detected)
642 #endif
643 #ifdef CONFIG_SWIOTLB
644 swiotlb_init(1);
645 #endif
646
647 #ifdef CONFIG_FLATMEM
648 BUG_ON(!mem_map);
649 #endif
650
651 set_max_mapnr(max_low_pfn);
652 high_memory = __va(max_low_pfn * PAGE_SIZE);
653 memblock_free_all();
654 mem_init_print_info(NULL);
655
656
657
658
659
660
661 for (i = 0; i < NR_syscalls; ++i) {
662 extern unsigned long fsyscall_table[NR_syscalls];
663 extern unsigned long sys_call_table[NR_syscalls];
664
665 if (!fsyscall_table[i] || nolwsys)
666 fsyscall_table[i] = sys_call_table[i] | 1;
667 }
668 setup_gate();
669 }
670
671 #ifdef CONFIG_MEMORY_HOTPLUG
672 int arch_add_memory(int nid, u64 start, u64 size,
673 struct mhp_restrictions *restrictions)
674 {
675 unsigned long start_pfn = start >> PAGE_SHIFT;
676 unsigned long nr_pages = size >> PAGE_SHIFT;
677 int ret;
678
679 ret = __add_pages(nid, start_pfn, nr_pages, restrictions);
680 if (ret)
681 printk("%s: Problem encountered in __add_pages() as ret=%d\n",
682 __func__, ret);
683
684 return ret;
685 }
686
687 void arch_remove_memory(int nid, u64 start, u64 size,
688 struct vmem_altmap *altmap)
689 {
690 unsigned long start_pfn = start >> PAGE_SHIFT;
691 unsigned long nr_pages = size >> PAGE_SHIFT;
692
693 __remove_pages(start_pfn, nr_pages, altmap);
694 }
695 #endif