This source file includes following definitions.
- pgd_alloc
- __bad_pagetable
- __bad_page
- load_PCB
- switch_to_system_map
- callback_init
- paging_init
- srm_paging_stop
- mem_init
1
2
3
4
5
6
7
8
9
10 #include <linux/pagemap.h>
11 #include <linux/signal.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/types.h>
17 #include <linux/ptrace.h>
18 #include <linux/mman.h>
19 #include <linux/mm.h>
20 #include <linux/swap.h>
21 #include <linux/init.h>
22 #include <linux/memblock.h>
23 #include <linux/vmalloc.h>
24 #include <linux/gfp.h>
25
26 #include <linux/uaccess.h>
27 #include <asm/pgtable.h>
28 #include <asm/pgalloc.h>
29 #include <asm/hwrpb.h>
30 #include <asm/dma.h>
31 #include <asm/mmu_context.h>
32 #include <asm/console.h>
33 #include <asm/tlb.h>
34 #include <asm/setup.h>
35 #include <asm/sections.h>
36
37 extern void die_if_kernel(char *,struct pt_regs *,long);
38
39 static struct pcb_struct original_pcb;
40
41 pgd_t *
42 pgd_alloc(struct mm_struct *mm)
43 {
44 pgd_t *ret, *init;
45
46 ret = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
47 init = pgd_offset(&init_mm, 0UL);
48 if (ret) {
49 #ifdef CONFIG_ALPHA_LARGE_VMALLOC
50 memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
51 (PTRS_PER_PGD - USER_PTRS_PER_PGD - 1)*sizeof(pgd_t));
52 #else
53 pgd_val(ret[PTRS_PER_PGD-2]) = pgd_val(init[PTRS_PER_PGD-2]);
54 #endif
55
56
57 pgd_val(ret[PTRS_PER_PGD-1])
58 = pte_val(mk_pte(virt_to_page(ret), PAGE_KERNEL));
59 }
60 return ret;
61 }
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77 pmd_t *
78 __bad_pagetable(void)
79 {
80 memset((void *) EMPTY_PGT, 0, PAGE_SIZE);
81 return (pmd_t *) EMPTY_PGT;
82 }
83
84 pte_t
85 __bad_page(void)
86 {
87 memset((void *) EMPTY_PGE, 0, PAGE_SIZE);
88 return pte_mkdirty(mk_pte(virt_to_page(EMPTY_PGE), PAGE_SHARED));
89 }
90
91 static inline unsigned long
92 load_PCB(struct pcb_struct *pcb)
93 {
94 register unsigned long sp __asm__("$30");
95 pcb->ksp = sp;
96 return __reload_thread(pcb);
97 }
98
99
100
101 static inline void
102 switch_to_system_map(void)
103 {
104 unsigned long newptbr;
105 unsigned long original_pcb_ptr;
106
107
108
109 memset(swapper_pg_dir, 0, PAGE_SIZE);
110 newptbr = ((unsigned long) swapper_pg_dir - PAGE_OFFSET) >> PAGE_SHIFT;
111 pgd_val(swapper_pg_dir[1023]) =
112 (newptbr << 32) | pgprot_val(PAGE_KERNEL);
113
114
115
116 if (hwrpb->vptb != 0xfffffffe00000000UL) {
117 wrvptptr(0xfffffffe00000000UL);
118 hwrpb->vptb = 0xfffffffe00000000UL;
119 hwrpb_update_checksum(hwrpb);
120 }
121
122
123 init_thread_info.pcb.ptbr = newptbr;
124 init_thread_info.pcb.flags = 1;
125 original_pcb_ptr = load_PCB(&init_thread_info.pcb);
126 tbia();
127
128
129
130
131
132
133
134
135 if (original_pcb_ptr < PAGE_OFFSET) {
136 original_pcb_ptr = (unsigned long)
137 phys_to_virt(original_pcb_ptr);
138 }
139 original_pcb = *(struct pcb_struct *) original_pcb_ptr;
140 }
141
142 int callback_init_done;
143
144 void * __init
145 callback_init(void * kernel_end)
146 {
147 struct crb_struct * crb;
148 pgd_t *pgd;
149 pmd_t *pmd;
150 void *two_pages;
151
152
153 crb = (struct crb_struct *)((char *)hwrpb + hwrpb->crb_offset);
154
155 if (alpha_using_srm) {
156
157 if (srm_fixup(VMALLOC_START, (unsigned long)hwrpb))
158 __halt();
159
160
161 crb->dispatch_va = (struct procdesc_struct *)
162 (VMALLOC_START + (unsigned long)crb->dispatch_va
163 - crb->map[0].va);
164 crb->fixup_va = (struct procdesc_struct *)
165 (VMALLOC_START + (unsigned long)crb->fixup_va
166 - crb->map[0].va);
167 }
168
169 switch_to_system_map();
170
171
172
173
174
175
176
177
178
179
180
181 two_pages = (void *)
182 (((unsigned long)kernel_end + ~PAGE_MASK) & PAGE_MASK);
183 kernel_end = two_pages + 2*PAGE_SIZE;
184 memset(two_pages, 0, 2*PAGE_SIZE);
185
186 pgd = pgd_offset_k(VMALLOC_START);
187 pgd_set(pgd, (pmd_t *)two_pages);
188 pmd = pmd_offset(pgd, VMALLOC_START);
189 pmd_set(pmd, (pte_t *)(two_pages + PAGE_SIZE));
190
191 if (alpha_using_srm) {
192 static struct vm_struct console_remap_vm;
193 unsigned long nr_pages = 0;
194 unsigned long vaddr;
195 unsigned long i, j;
196
197
198 for (i = 0; i < crb->map_entries; ++i)
199 nr_pages += crb->map[i].count;
200
201
202 console_remap_vm.flags = VM_ALLOC;
203 console_remap_vm.size = nr_pages << PAGE_SHIFT;
204 vm_area_register_early(&console_remap_vm, PAGE_SIZE);
205
206 vaddr = (unsigned long)console_remap_vm.addr;
207
208
209
210 for (i = 0; i < crb->map_entries; ++i) {
211 unsigned long pfn = crb->map[i].pa >> PAGE_SHIFT;
212 crb->map[i].va = vaddr;
213 for (j = 0; j < crb->map[i].count; ++j) {
214
215
216
217 if (pmd != pmd_offset(pgd, vaddr)) {
218 memset(kernel_end, 0, PAGE_SIZE);
219 pmd = pmd_offset(pgd, vaddr);
220 pmd_set(pmd, (pte_t *)kernel_end);
221 kernel_end += PAGE_SIZE;
222 }
223 set_pte(pte_offset_kernel(pmd, vaddr),
224 pfn_pte(pfn, PAGE_KERNEL));
225 pfn++;
226 vaddr += PAGE_SIZE;
227 }
228 }
229 }
230
231 callback_init_done = 1;
232 return kernel_end;
233 }
234
235
236 #ifndef CONFIG_DISCONTIGMEM
237
238
239
240 void __init paging_init(void)
241 {
242 unsigned long zones_size[MAX_NR_ZONES] = {0, };
243 unsigned long dma_pfn, high_pfn;
244
245 dma_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
246 high_pfn = max_pfn = max_low_pfn;
247
248 if (dma_pfn >= high_pfn)
249 zones_size[ZONE_DMA] = high_pfn;
250 else {
251 zones_size[ZONE_DMA] = dma_pfn;
252 zones_size[ZONE_NORMAL] = high_pfn - dma_pfn;
253 }
254
255
256 free_area_init(zones_size);
257
258
259 memset((void *)ZERO_PGE, 0, PAGE_SIZE);
260 }
261 #endif
262
263 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM)
264 void
265 srm_paging_stop (void)
266 {
267
268 swapper_pg_dir[1] = swapper_pg_dir[1023];
269 tbia();
270 wrvptptr(0x200000000UL);
271 hwrpb->vptb = 0x200000000UL;
272 hwrpb_update_checksum(hwrpb);
273
274
275 load_PCB(&original_pcb);
276 tbia();
277 }
278 #endif
279
280 void __init
281 mem_init(void)
282 {
283 set_max_mapnr(max_low_pfn);
284 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
285 memblock_free_all();
286 mem_init_print_info(NULL);
287 }