This source file includes following definitions.
- make_lowmem_page_readonly
- make_lowmem_page_readwrite
- xen_page_pinned
- xen_extend_mmu_update
- xen_extend_mmuext_op
- xen_set_pmd_hyper
- xen_set_pmd
- set_pte_mfn
- xen_batched_set_pte
- __xen_set_pte
- xen_set_pte
- xen_set_pte_at
- xen_ptep_modify_prot_start
- xen_ptep_modify_prot_commit
- pte_mfn_to_pfn
- pte_pfn_to_mfn
- xen_pte_val
- xen_pgd_val
- xen_make_pte
- xen_make_pgd
- xen_pmd_val
- xen_set_pud_hyper
- xen_set_pud
- xen_set_pte_atomic
- xen_pte_clear
- xen_pmd_clear
- xen_make_pmd
- xen_pud_val
- xen_make_pud
- xen_get_user_pgd
- __xen_set_p4d_hyper
- xen_set_p4d_hyper
- xen_set_p4d
- xen_p4d_val
- xen_make_p4d
- xen_pmd_walk
- xen_pud_walk
- xen_p4d_walk
- __xen_pgd_walk
- xen_pgd_walk
- xen_pte_lock
- xen_pte_unlock
- xen_do_pin
- xen_pin_page
- __xen_pgd_pin
- xen_pgd_pin
- xen_mm_pin_all
- xen_mark_pinned
- xen_after_bootmem
- xen_unpin_page
- __xen_pgd_unpin
- xen_pgd_unpin
- xen_mm_unpin_all
- xen_activate_mm
- xen_dup_mmap
- drop_mm_ref_this_cpu
- xen_drop_mm_ref
- xen_drop_mm_ref
- xen_exit_mmap
- pin_pagetable_pfn
- xen_cleanhighmap
- xen_free_ro_pages
- xen_cleanmfnmap_free_pgtbl
- xen_cleanmfnmap_pmd
- xen_cleanmfnmap_pud
- xen_cleanmfnmap_p4d
- xen_cleanmfnmap
- xen_pagetable_p2m_free
- xen_pagetable_cleanhighmap
- xen_pagetable_p2m_setup
- xen_pagetable_init
- xen_write_cr2
- xen_flush_tlb
- xen_flush_tlb_one_user
- xen_flush_tlb_others
- xen_read_cr3
- set_current_cr3
- __xen_write_cr3
- xen_write_cr3
- xen_write_cr3_init
- xen_pgd_alloc
- xen_pgd_free
- xen_make_pte_init
- xen_set_pte_init
- xen_alloc_pte_init
- xen_alloc_pmd_init
- xen_release_pte_init
- xen_release_pmd_init
- __pin_pagetable_pfn
- __set_pfn_prot
- xen_alloc_ptpage
- xen_alloc_pte
- xen_alloc_pmd
- xen_release_ptpage
- xen_release_pte
- xen_release_pmd
- xen_alloc_pud
- xen_release_pud
- xen_reserve_top
- __ka
- m2p
- m2v
- set_page_prot_flags
- set_page_prot
- xen_map_identity_early
- xen_setup_machphys_mapping
- convert_pfn_mfn
- check_pt_base
- xen_setup_kernel_pagetable
- xen_read_phys_ulong
- xen_early_virt_to_phys
- xen_relocate_p2m
- xen_write_cr3_init
- xen_find_pt_base
- xen_setup_kernel_pagetable
- xen_reserve_special_pages
- xen_pt_check_e820
- xen_set_fixmap
- xen_post_allocator_init
- xen_leave_lazy_mmu
- xen_init_mmu_ops
- xen_zap_pfn_range
- xen_remap_exchanged_ptes
- xen_exchange_memory
- xen_create_contiguous_region
- xen_destroy_contiguous_region
- xen_flush_tlb_all
- remap_area_pfn_pte_fn
- xen_remap_pfn
- paddr_vmcoreinfo_note
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43 #include <linux/sched/mm.h>
44 #include <linux/highmem.h>
45 #include <linux/debugfs.h>
46 #include <linux/bug.h>
47 #include <linux/vmalloc.h>
48 #include <linux/export.h>
49 #include <linux/init.h>
50 #include <linux/gfp.h>
51 #include <linux/memblock.h>
52 #include <linux/seq_file.h>
53 #include <linux/crash_dump.h>
54 #ifdef CONFIG_KEXEC_CORE
55 #include <linux/kexec.h>
56 #endif
57
58 #include <trace/events/xen.h>
59
60 #include <asm/pgtable.h>
61 #include <asm/tlbflush.h>
62 #include <asm/fixmap.h>
63 #include <asm/mmu_context.h>
64 #include <asm/setup.h>
65 #include <asm/paravirt.h>
66 #include <asm/e820/api.h>
67 #include <asm/linkage.h>
68 #include <asm/page.h>
69 #include <asm/init.h>
70 #include <asm/pat.h>
71 #include <asm/smp.h>
72 #include <asm/tlb.h>
73
74 #include <asm/xen/hypercall.h>
75 #include <asm/xen/hypervisor.h>
76
77 #include <xen/xen.h>
78 #include <xen/page.h>
79 #include <xen/interface/xen.h>
80 #include <xen/interface/hvm/hvm_op.h>
81 #include <xen/interface/version.h>
82 #include <xen/interface/memory.h>
83 #include <xen/hvc-console.h>
84
85 #include "multicalls.h"
86 #include "mmu.h"
87 #include "debugfs.h"
88
89 #ifdef CONFIG_X86_32
90
91
92
93
94
95 #define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
96 static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
97 #endif
98 #ifdef CONFIG_X86_64
99
100 static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
101 #endif
102
103
104
105
106
107 static DEFINE_SPINLOCK(xen_reservation_lock);
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123 DEFINE_PER_CPU(unsigned long, xen_cr3);
124 DEFINE_PER_CPU(unsigned long, xen_current_cr3);
125
126 static phys_addr_t xen_pt_base, xen_pt_size __initdata;
127
128 static DEFINE_STATIC_KEY_FALSE(xen_struct_pages_ready);
129
130
131
132
133
134 #define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
135
136 void make_lowmem_page_readonly(void *vaddr)
137 {
138 pte_t *pte, ptev;
139 unsigned long address = (unsigned long)vaddr;
140 unsigned int level;
141
142 pte = lookup_address(address, &level);
143 if (pte == NULL)
144 return;
145
146 ptev = pte_wrprotect(*pte);
147
148 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
149 BUG();
150 }
151
152 void make_lowmem_page_readwrite(void *vaddr)
153 {
154 pte_t *pte, ptev;
155 unsigned long address = (unsigned long)vaddr;
156 unsigned int level;
157
158 pte = lookup_address(address, &level);
159 if (pte == NULL)
160 return;
161
162 ptev = pte_mkwrite(*pte);
163
164 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
165 BUG();
166 }
167
168
169
170
171
172
173 static bool xen_page_pinned(void *ptr)
174 {
175 if (static_branch_likely(&xen_struct_pages_ready)) {
176 struct page *page = virt_to_page(ptr);
177
178 return PagePinned(page);
179 }
180 return true;
181 }
182
183 static void xen_extend_mmu_update(const struct mmu_update *update)
184 {
185 struct multicall_space mcs;
186 struct mmu_update *u;
187
188 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
189
190 if (mcs.mc != NULL) {
191 mcs.mc->args[1]++;
192 } else {
193 mcs = __xen_mc_entry(sizeof(*u));
194 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
195 }
196
197 u = mcs.args;
198 *u = *update;
199 }
200
201 static void xen_extend_mmuext_op(const struct mmuext_op *op)
202 {
203 struct multicall_space mcs;
204 struct mmuext_op *u;
205
206 mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
207
208 if (mcs.mc != NULL) {
209 mcs.mc->args[1]++;
210 } else {
211 mcs = __xen_mc_entry(sizeof(*u));
212 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
213 }
214
215 u = mcs.args;
216 *u = *op;
217 }
218
219 static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
220 {
221 struct mmu_update u;
222
223 preempt_disable();
224
225 xen_mc_batch();
226
227
228 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
229 u.val = pmd_val_ma(val);
230 xen_extend_mmu_update(&u);
231
232 xen_mc_issue(PARAVIRT_LAZY_MMU);
233
234 preempt_enable();
235 }
236
237 static void xen_set_pmd(pmd_t *ptr, pmd_t val)
238 {
239 trace_xen_mmu_set_pmd(ptr, val);
240
241
242
243 if (!xen_page_pinned(ptr)) {
244 *ptr = val;
245 return;
246 }
247
248 xen_set_pmd_hyper(ptr, val);
249 }
250
251
252
253
254
255 void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
256 {
257 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
258 }
259
260 static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
261 {
262 struct mmu_update u;
263
264 if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
265 return false;
266
267 xen_mc_batch();
268
269 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
270 u.val = pte_val_ma(pteval);
271 xen_extend_mmu_update(&u);
272
273 xen_mc_issue(PARAVIRT_LAZY_MMU);
274
275 return true;
276 }
277
278 static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
279 {
280 if (!xen_batched_set_pte(ptep, pteval)) {
281
282
283
284
285
286
287
288 struct mmu_update u;
289
290 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
291 u.val = pte_val_ma(pteval);
292 HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
293 }
294 }
295
296 static void xen_set_pte(pte_t *ptep, pte_t pteval)
297 {
298 trace_xen_mmu_set_pte(ptep, pteval);
299 __xen_set_pte(ptep, pteval);
300 }
301
302 static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
303 pte_t *ptep, pte_t pteval)
304 {
305 trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
306 __xen_set_pte(ptep, pteval);
307 }
308
309 pte_t xen_ptep_modify_prot_start(struct vm_area_struct *vma,
310 unsigned long addr, pte_t *ptep)
311 {
312
313 trace_xen_mmu_ptep_modify_prot_start(vma->vm_mm, addr, ptep, *ptep);
314 return *ptep;
315 }
316
317 void xen_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
318 pte_t *ptep, pte_t pte)
319 {
320 struct mmu_update u;
321
322 trace_xen_mmu_ptep_modify_prot_commit(vma->vm_mm, addr, ptep, pte);
323 xen_mc_batch();
324
325 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
326 u.val = pte_val_ma(pte);
327 xen_extend_mmu_update(&u);
328
329 xen_mc_issue(PARAVIRT_LAZY_MMU);
330 }
331
332
333 static pteval_t pte_mfn_to_pfn(pteval_t val)
334 {
335 if (val & _PAGE_PRESENT) {
336 unsigned long mfn = (val & XEN_PTE_MFN_MASK) >> PAGE_SHIFT;
337 unsigned long pfn = mfn_to_pfn(mfn);
338
339 pteval_t flags = val & PTE_FLAGS_MASK;
340 if (unlikely(pfn == ~0))
341 val = flags & ~_PAGE_PRESENT;
342 else
343 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
344 }
345
346 return val;
347 }
348
349 static pteval_t pte_pfn_to_mfn(pteval_t val)
350 {
351 if (val & _PAGE_PRESENT) {
352 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
353 pteval_t flags = val & PTE_FLAGS_MASK;
354 unsigned long mfn;
355
356 mfn = __pfn_to_mfn(pfn);
357
358
359
360
361
362
363
364 if (unlikely(mfn == INVALID_P2M_ENTRY)) {
365 mfn = 0;
366 flags = 0;
367 } else
368 mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
369 val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
370 }
371
372 return val;
373 }
374
375 __visible pteval_t xen_pte_val(pte_t pte)
376 {
377 pteval_t pteval = pte.pte;
378
379 return pte_mfn_to_pfn(pteval);
380 }
381 PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
382
383 __visible pgdval_t xen_pgd_val(pgd_t pgd)
384 {
385 return pte_mfn_to_pfn(pgd.pgd);
386 }
387 PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
388
389 __visible pte_t xen_make_pte(pteval_t pte)
390 {
391 pte = pte_pfn_to_mfn(pte);
392
393 return native_make_pte(pte);
394 }
395 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
396
397 __visible pgd_t xen_make_pgd(pgdval_t pgd)
398 {
399 pgd = pte_pfn_to_mfn(pgd);
400 return native_make_pgd(pgd);
401 }
402 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
403
404 __visible pmdval_t xen_pmd_val(pmd_t pmd)
405 {
406 return pte_mfn_to_pfn(pmd.pmd);
407 }
408 PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
409
410 static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
411 {
412 struct mmu_update u;
413
414 preempt_disable();
415
416 xen_mc_batch();
417
418
419 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
420 u.val = pud_val_ma(val);
421 xen_extend_mmu_update(&u);
422
423 xen_mc_issue(PARAVIRT_LAZY_MMU);
424
425 preempt_enable();
426 }
427
428 static void xen_set_pud(pud_t *ptr, pud_t val)
429 {
430 trace_xen_mmu_set_pud(ptr, val);
431
432
433
434 if (!xen_page_pinned(ptr)) {
435 *ptr = val;
436 return;
437 }
438
439 xen_set_pud_hyper(ptr, val);
440 }
441
442 #ifdef CONFIG_X86_PAE
443 static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
444 {
445 trace_xen_mmu_set_pte_atomic(ptep, pte);
446 __xen_set_pte(ptep, pte);
447 }
448
449 static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
450 {
451 trace_xen_mmu_pte_clear(mm, addr, ptep);
452 __xen_set_pte(ptep, native_make_pte(0));
453 }
454
455 static void xen_pmd_clear(pmd_t *pmdp)
456 {
457 trace_xen_mmu_pmd_clear(pmdp);
458 set_pmd(pmdp, __pmd(0));
459 }
460 #endif
461
462 __visible pmd_t xen_make_pmd(pmdval_t pmd)
463 {
464 pmd = pte_pfn_to_mfn(pmd);
465 return native_make_pmd(pmd);
466 }
467 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
468
469 #ifdef CONFIG_X86_64
470 __visible pudval_t xen_pud_val(pud_t pud)
471 {
472 return pte_mfn_to_pfn(pud.pud);
473 }
474 PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
475
476 __visible pud_t xen_make_pud(pudval_t pud)
477 {
478 pud = pte_pfn_to_mfn(pud);
479
480 return native_make_pud(pud);
481 }
482 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
483
484 static pgd_t *xen_get_user_pgd(pgd_t *pgd)
485 {
486 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
487 unsigned offset = pgd - pgd_page;
488 pgd_t *user_ptr = NULL;
489
490 if (offset < pgd_index(USER_LIMIT)) {
491 struct page *page = virt_to_page(pgd_page);
492 user_ptr = (pgd_t *)page->private;
493 if (user_ptr)
494 user_ptr += offset;
495 }
496
497 return user_ptr;
498 }
499
500 static void __xen_set_p4d_hyper(p4d_t *ptr, p4d_t val)
501 {
502 struct mmu_update u;
503
504 u.ptr = virt_to_machine(ptr).maddr;
505 u.val = p4d_val_ma(val);
506 xen_extend_mmu_update(&u);
507 }
508
509
510
511
512
513
514
515
516 static void __init xen_set_p4d_hyper(p4d_t *ptr, p4d_t val)
517 {
518 preempt_disable();
519
520 xen_mc_batch();
521
522 __xen_set_p4d_hyper(ptr, val);
523
524 xen_mc_issue(PARAVIRT_LAZY_MMU);
525
526 preempt_enable();
527 }
528
529 static void xen_set_p4d(p4d_t *ptr, p4d_t val)
530 {
531 pgd_t *user_ptr = xen_get_user_pgd((pgd_t *)ptr);
532 pgd_t pgd_val;
533
534 trace_xen_mmu_set_p4d(ptr, (p4d_t *)user_ptr, val);
535
536
537
538 if (!xen_page_pinned(ptr)) {
539 *ptr = val;
540 if (user_ptr) {
541 WARN_ON(xen_page_pinned(user_ptr));
542 pgd_val.pgd = p4d_val_ma(val);
543 *user_ptr = pgd_val;
544 }
545 return;
546 }
547
548
549
550 xen_mc_batch();
551
552 __xen_set_p4d_hyper(ptr, val);
553 if (user_ptr)
554 __xen_set_p4d_hyper((p4d_t *)user_ptr, val);
555
556 xen_mc_issue(PARAVIRT_LAZY_MMU);
557 }
558
559 #if CONFIG_PGTABLE_LEVELS >= 5
560 __visible p4dval_t xen_p4d_val(p4d_t p4d)
561 {
562 return pte_mfn_to_pfn(p4d.p4d);
563 }
564 PV_CALLEE_SAVE_REGS_THUNK(xen_p4d_val);
565
566 __visible p4d_t xen_make_p4d(p4dval_t p4d)
567 {
568 p4d = pte_pfn_to_mfn(p4d);
569
570 return native_make_p4d(p4d);
571 }
572 PV_CALLEE_SAVE_REGS_THUNK(xen_make_p4d);
573 #endif
574 #endif
575
576 static int xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd,
577 int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
578 bool last, unsigned long limit)
579 {
580 int i, nr, flush = 0;
581
582 nr = last ? pmd_index(limit) + 1 : PTRS_PER_PMD;
583 for (i = 0; i < nr; i++) {
584 if (!pmd_none(pmd[i]))
585 flush |= (*func)(mm, pmd_page(pmd[i]), PT_PTE);
586 }
587 return flush;
588 }
589
590 static int xen_pud_walk(struct mm_struct *mm, pud_t *pud,
591 int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
592 bool last, unsigned long limit)
593 {
594 int i, nr, flush = 0;
595
596 nr = last ? pud_index(limit) + 1 : PTRS_PER_PUD;
597 for (i = 0; i < nr; i++) {
598 pmd_t *pmd;
599
600 if (pud_none(pud[i]))
601 continue;
602
603 pmd = pmd_offset(&pud[i], 0);
604 if (PTRS_PER_PMD > 1)
605 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
606 flush |= xen_pmd_walk(mm, pmd, func,
607 last && i == nr - 1, limit);
608 }
609 return flush;
610 }
611
612 static int xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
613 int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
614 bool last, unsigned long limit)
615 {
616 int flush = 0;
617 pud_t *pud;
618
619
620 if (p4d_none(*p4d))
621 return flush;
622
623 pud = pud_offset(p4d, 0);
624 if (PTRS_PER_PUD > 1)
625 flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
626 flush |= xen_pud_walk(mm, pud, func, last, limit);
627 return flush;
628 }
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645 static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
646 int (*func)(struct mm_struct *mm, struct page *,
647 enum pt_level),
648 unsigned long limit)
649 {
650 int i, nr, flush = 0;
651 unsigned hole_low = 0, hole_high = 0;
652
653
654 limit--;
655 BUG_ON(limit >= FIXADDR_TOP);
656
657 #ifdef CONFIG_X86_64
658
659
660
661
662 hole_low = pgd_index(GUARD_HOLE_BASE_ADDR);
663 hole_high = pgd_index(GUARD_HOLE_END_ADDR);
664 #endif
665
666 nr = pgd_index(limit) + 1;
667 for (i = 0; i < nr; i++) {
668 p4d_t *p4d;
669
670 if (i >= hole_low && i < hole_high)
671 continue;
672
673 if (pgd_none(pgd[i]))
674 continue;
675
676 p4d = p4d_offset(&pgd[i], 0);
677 flush |= xen_p4d_walk(mm, p4d, func, i == nr - 1, limit);
678 }
679
680
681
682 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
683
684 return flush;
685 }
686
687 static int xen_pgd_walk(struct mm_struct *mm,
688 int (*func)(struct mm_struct *mm, struct page *,
689 enum pt_level),
690 unsigned long limit)
691 {
692 return __xen_pgd_walk(mm, mm->pgd, func, limit);
693 }
694
695
696
697 static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
698 {
699 spinlock_t *ptl = NULL;
700
701 #if USE_SPLIT_PTE_PTLOCKS
702 ptl = ptlock_ptr(page);
703 spin_lock_nest_lock(ptl, &mm->page_table_lock);
704 #endif
705
706 return ptl;
707 }
708
709 static void xen_pte_unlock(void *v)
710 {
711 spinlock_t *ptl = v;
712 spin_unlock(ptl);
713 }
714
715 static void xen_do_pin(unsigned level, unsigned long pfn)
716 {
717 struct mmuext_op op;
718
719 op.cmd = level;
720 op.arg1.mfn = pfn_to_mfn(pfn);
721
722 xen_extend_mmuext_op(&op);
723 }
724
725 static int xen_pin_page(struct mm_struct *mm, struct page *page,
726 enum pt_level level)
727 {
728 unsigned pgfl = TestSetPagePinned(page);
729 int flush;
730
731 if (pgfl)
732 flush = 0;
733 else if (PageHighMem(page))
734
735
736 flush = 1;
737 else {
738 void *pt = lowmem_page_address(page);
739 unsigned long pfn = page_to_pfn(page);
740 struct multicall_space mcs = __xen_mc_entry(0);
741 spinlock_t *ptl;
742
743 flush = 0;
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765 ptl = NULL;
766 if (level == PT_PTE)
767 ptl = xen_pte_lock(page, mm);
768
769 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
770 pfn_pte(pfn, PAGE_KERNEL_RO),
771 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
772
773 if (ptl) {
774 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
775
776
777
778 xen_mc_callback(xen_pte_unlock, ptl);
779 }
780 }
781
782 return flush;
783 }
784
785
786
787
788 static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
789 {
790 trace_xen_mmu_pgd_pin(mm, pgd);
791
792 xen_mc_batch();
793
794 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
795
796 xen_mc_issue(0);
797
798 kmap_flush_unused();
799
800 xen_mc_batch();
801 }
802
803 #ifdef CONFIG_X86_64
804 {
805 pgd_t *user_pgd = xen_get_user_pgd(pgd);
806
807 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
808
809 if (user_pgd) {
810 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
811 xen_do_pin(MMUEXT_PIN_L4_TABLE,
812 PFN_DOWN(__pa(user_pgd)));
813 }
814 }
815 #else
816 #ifdef CONFIG_X86_PAE
817
818 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
819 PT_PMD);
820 #endif
821 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
822 #endif
823 xen_mc_issue(0);
824 }
825
826 static void xen_pgd_pin(struct mm_struct *mm)
827 {
828 __xen_pgd_pin(mm, mm->pgd);
829 }
830
831
832
833
834
835
836
837
838
839
840
841 void xen_mm_pin_all(void)
842 {
843 struct page *page;
844
845 spin_lock(&pgd_lock);
846
847 list_for_each_entry(page, &pgd_list, lru) {
848 if (!PagePinned(page)) {
849 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
850 SetPageSavePinned(page);
851 }
852 }
853
854 spin_unlock(&pgd_lock);
855 }
856
857 static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
858 enum pt_level level)
859 {
860 SetPagePinned(page);
861 return 0;
862 }
863
864
865
866
867
868
869
870 static void __init xen_after_bootmem(void)
871 {
872 static_branch_enable(&xen_struct_pages_ready);
873 #ifdef CONFIG_X86_64
874 SetPagePinned(virt_to_page(level3_user_vsyscall));
875 #endif
876 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
877 }
878
879 static int xen_unpin_page(struct mm_struct *mm, struct page *page,
880 enum pt_level level)
881 {
882 unsigned pgfl = TestClearPagePinned(page);
883
884 if (pgfl && !PageHighMem(page)) {
885 void *pt = lowmem_page_address(page);
886 unsigned long pfn = page_to_pfn(page);
887 spinlock_t *ptl = NULL;
888 struct multicall_space mcs;
889
890
891
892
893
894
895
896
897 if (level == PT_PTE) {
898 ptl = xen_pte_lock(page, mm);
899
900 if (ptl)
901 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
902 }
903
904 mcs = __xen_mc_entry(0);
905
906 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
907 pfn_pte(pfn, PAGE_KERNEL),
908 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
909
910 if (ptl) {
911
912 xen_mc_callback(xen_pte_unlock, ptl);
913 }
914 }
915
916 return 0;
917 }
918
919
920 static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
921 {
922 trace_xen_mmu_pgd_unpin(mm, pgd);
923
924 xen_mc_batch();
925
926 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
927
928 #ifdef CONFIG_X86_64
929 {
930 pgd_t *user_pgd = xen_get_user_pgd(pgd);
931
932 if (user_pgd) {
933 xen_do_pin(MMUEXT_UNPIN_TABLE,
934 PFN_DOWN(__pa(user_pgd)));
935 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
936 }
937 }
938 #endif
939
940 #ifdef CONFIG_X86_PAE
941
942 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
943 PT_PMD);
944 #endif
945
946 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
947
948 xen_mc_issue(0);
949 }
950
951 static void xen_pgd_unpin(struct mm_struct *mm)
952 {
953 __xen_pgd_unpin(mm, mm->pgd);
954 }
955
956
957
958
959
960 void xen_mm_unpin_all(void)
961 {
962 struct page *page;
963
964 spin_lock(&pgd_lock);
965
966 list_for_each_entry(page, &pgd_list, lru) {
967 if (PageSavePinned(page)) {
968 BUG_ON(!PagePinned(page));
969 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
970 ClearPageSavePinned(page);
971 }
972 }
973
974 spin_unlock(&pgd_lock);
975 }
976
977 static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
978 {
979 spin_lock(&next->page_table_lock);
980 xen_pgd_pin(next);
981 spin_unlock(&next->page_table_lock);
982 }
983
984 static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
985 {
986 spin_lock(&mm->page_table_lock);
987 xen_pgd_pin(mm);
988 spin_unlock(&mm->page_table_lock);
989 }
990
991 static void drop_mm_ref_this_cpu(void *info)
992 {
993 struct mm_struct *mm = info;
994
995 if (this_cpu_read(cpu_tlbstate.loaded_mm) == mm)
996 leave_mm(smp_processor_id());
997
998
999
1000
1001
1002 if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
1003 xen_mc_flush();
1004 }
1005
1006 #ifdef CONFIG_SMP
1007
1008
1009
1010
1011 static void xen_drop_mm_ref(struct mm_struct *mm)
1012 {
1013 cpumask_var_t mask;
1014 unsigned cpu;
1015
1016 drop_mm_ref_this_cpu(mm);
1017
1018
1019 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1020 for_each_online_cpu(cpu) {
1021 if (per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1022 continue;
1023 smp_call_function_single(cpu, drop_mm_ref_this_cpu, mm, 1);
1024 }
1025 return;
1026 }
1027
1028
1029
1030
1031
1032
1033
1034
1035 cpumask_clear(mask);
1036 for_each_online_cpu(cpu) {
1037 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
1038 cpumask_set_cpu(cpu, mask);
1039 }
1040
1041 smp_call_function_many(mask, drop_mm_ref_this_cpu, mm, 1);
1042 free_cpumask_var(mask);
1043 }
1044 #else
1045 static void xen_drop_mm_ref(struct mm_struct *mm)
1046 {
1047 drop_mm_ref_this_cpu(mm);
1048 }
1049 #endif
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065 static void xen_exit_mmap(struct mm_struct *mm)
1066 {
1067 get_cpu();
1068 xen_drop_mm_ref(mm);
1069 put_cpu();
1070
1071 spin_lock(&mm->page_table_lock);
1072
1073
1074 if (xen_page_pinned(mm->pgd))
1075 xen_pgd_unpin(mm);
1076
1077 spin_unlock(&mm->page_table_lock);
1078 }
1079
1080 static void xen_post_allocator_init(void);
1081
1082 static void __init pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1083 {
1084 struct mmuext_op op;
1085
1086 op.cmd = cmd;
1087 op.arg1.mfn = pfn_to_mfn(pfn);
1088 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1089 BUG();
1090 }
1091
1092 #ifdef CONFIG_X86_64
1093 static void __init xen_cleanhighmap(unsigned long vaddr,
1094 unsigned long vaddr_end)
1095 {
1096 unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
1097 pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr);
1098
1099
1100
1101 for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD));
1102 pmd++, vaddr += PMD_SIZE) {
1103 if (pmd_none(*pmd))
1104 continue;
1105 if (vaddr < (unsigned long) _text || vaddr > kernel_end)
1106 set_pmd(pmd, __pmd(0));
1107 }
1108
1109
1110 xen_mc_flush();
1111 }
1112
1113
1114
1115
1116 static void __init xen_free_ro_pages(unsigned long paddr, unsigned long size)
1117 {
1118 void *vaddr = __va(paddr);
1119 void *vaddr_end = vaddr + size;
1120
1121 for (; vaddr < vaddr_end; vaddr += PAGE_SIZE)
1122 make_lowmem_page_readwrite(vaddr);
1123
1124 memblock_free(paddr, size);
1125 }
1126
1127 static void __init xen_cleanmfnmap_free_pgtbl(void *pgtbl, bool unpin)
1128 {
1129 unsigned long pa = __pa(pgtbl) & PHYSICAL_PAGE_MASK;
1130
1131 if (unpin)
1132 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(pa));
1133 ClearPagePinned(virt_to_page(__va(pa)));
1134 xen_free_ro_pages(pa, PAGE_SIZE);
1135 }
1136
1137 static void __init xen_cleanmfnmap_pmd(pmd_t *pmd, bool unpin)
1138 {
1139 unsigned long pa;
1140 pte_t *pte_tbl;
1141 int i;
1142
1143 if (pmd_large(*pmd)) {
1144 pa = pmd_val(*pmd) & PHYSICAL_PAGE_MASK;
1145 xen_free_ro_pages(pa, PMD_SIZE);
1146 return;
1147 }
1148
1149 pte_tbl = pte_offset_kernel(pmd, 0);
1150 for (i = 0; i < PTRS_PER_PTE; i++) {
1151 if (pte_none(pte_tbl[i]))
1152 continue;
1153 pa = pte_pfn(pte_tbl[i]) << PAGE_SHIFT;
1154 xen_free_ro_pages(pa, PAGE_SIZE);
1155 }
1156 set_pmd(pmd, __pmd(0));
1157 xen_cleanmfnmap_free_pgtbl(pte_tbl, unpin);
1158 }
1159
1160 static void __init xen_cleanmfnmap_pud(pud_t *pud, bool unpin)
1161 {
1162 unsigned long pa;
1163 pmd_t *pmd_tbl;
1164 int i;
1165
1166 if (pud_large(*pud)) {
1167 pa = pud_val(*pud) & PHYSICAL_PAGE_MASK;
1168 xen_free_ro_pages(pa, PUD_SIZE);
1169 return;
1170 }
1171
1172 pmd_tbl = pmd_offset(pud, 0);
1173 for (i = 0; i < PTRS_PER_PMD; i++) {
1174 if (pmd_none(pmd_tbl[i]))
1175 continue;
1176 xen_cleanmfnmap_pmd(pmd_tbl + i, unpin);
1177 }
1178 set_pud(pud, __pud(0));
1179 xen_cleanmfnmap_free_pgtbl(pmd_tbl, unpin);
1180 }
1181
1182 static void __init xen_cleanmfnmap_p4d(p4d_t *p4d, bool unpin)
1183 {
1184 unsigned long pa;
1185 pud_t *pud_tbl;
1186 int i;
1187
1188 if (p4d_large(*p4d)) {
1189 pa = p4d_val(*p4d) & PHYSICAL_PAGE_MASK;
1190 xen_free_ro_pages(pa, P4D_SIZE);
1191 return;
1192 }
1193
1194 pud_tbl = pud_offset(p4d, 0);
1195 for (i = 0; i < PTRS_PER_PUD; i++) {
1196 if (pud_none(pud_tbl[i]))
1197 continue;
1198 xen_cleanmfnmap_pud(pud_tbl + i, unpin);
1199 }
1200 set_p4d(p4d, __p4d(0));
1201 xen_cleanmfnmap_free_pgtbl(pud_tbl, unpin);
1202 }
1203
1204
1205
1206
1207
1208 static void __init xen_cleanmfnmap(unsigned long vaddr)
1209 {
1210 pgd_t *pgd;
1211 p4d_t *p4d;
1212 bool unpin;
1213
1214 unpin = (vaddr == 2 * PGDIR_SIZE);
1215 vaddr &= PMD_MASK;
1216 pgd = pgd_offset_k(vaddr);
1217 p4d = p4d_offset(pgd, 0);
1218 if (!p4d_none(*p4d))
1219 xen_cleanmfnmap_p4d(p4d, unpin);
1220 }
1221
1222 static void __init xen_pagetable_p2m_free(void)
1223 {
1224 unsigned long size;
1225 unsigned long addr;
1226
1227 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1228
1229
1230 if ((unsigned long)xen_p2m_addr == xen_start_info->mfn_list)
1231 return;
1232
1233
1234 memset((void *)xen_start_info->mfn_list, 0xff, size);
1235
1236 addr = xen_start_info->mfn_list;
1237
1238
1239
1240
1241
1242
1243
1244 size = roundup(size, PMD_SIZE);
1245
1246 if (addr >= __START_KERNEL_map) {
1247 xen_cleanhighmap(addr, addr + size);
1248 size = PAGE_ALIGN(xen_start_info->nr_pages *
1249 sizeof(unsigned long));
1250 memblock_free(__pa(addr), size);
1251 } else {
1252 xen_cleanmfnmap(addr);
1253 }
1254 }
1255
1256 static void __init xen_pagetable_cleanhighmap(void)
1257 {
1258 unsigned long size;
1259 unsigned long addr;
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270 addr = xen_start_info->pt_base;
1271 size = xen_start_info->nr_pt_frames * PAGE_SIZE;
1272
1273 xen_cleanhighmap(addr, roundup(addr + size, PMD_SIZE * 2));
1274 xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
1275 }
1276 #endif
1277
1278 static void __init xen_pagetable_p2m_setup(void)
1279 {
1280 xen_vmalloc_p2m_tree();
1281
1282 #ifdef CONFIG_X86_64
1283 xen_pagetable_p2m_free();
1284
1285 xen_pagetable_cleanhighmap();
1286 #endif
1287
1288 xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
1289 }
1290
1291 static void __init xen_pagetable_init(void)
1292 {
1293 paging_init();
1294 xen_post_allocator_init();
1295
1296 xen_pagetable_p2m_setup();
1297
1298
1299 xen_build_mfn_list_list();
1300
1301
1302 xen_remap_memory();
1303 xen_setup_mfn_list_list();
1304 }
1305 static void xen_write_cr2(unsigned long cr2)
1306 {
1307 this_cpu_read(xen_vcpu)->arch.cr2 = cr2;
1308 }
1309
1310 static noinline void xen_flush_tlb(void)
1311 {
1312 struct mmuext_op *op;
1313 struct multicall_space mcs;
1314
1315 preempt_disable();
1316
1317 mcs = xen_mc_entry(sizeof(*op));
1318
1319 op = mcs.args;
1320 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1321 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1322
1323 xen_mc_issue(PARAVIRT_LAZY_MMU);
1324
1325 preempt_enable();
1326 }
1327
1328 static void xen_flush_tlb_one_user(unsigned long addr)
1329 {
1330 struct mmuext_op *op;
1331 struct multicall_space mcs;
1332
1333 trace_xen_mmu_flush_tlb_one_user(addr);
1334
1335 preempt_disable();
1336
1337 mcs = xen_mc_entry(sizeof(*op));
1338 op = mcs.args;
1339 op->cmd = MMUEXT_INVLPG_LOCAL;
1340 op->arg1.linear_addr = addr & PAGE_MASK;
1341 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1342
1343 xen_mc_issue(PARAVIRT_LAZY_MMU);
1344
1345 preempt_enable();
1346 }
1347
1348 static void xen_flush_tlb_others(const struct cpumask *cpus,
1349 const struct flush_tlb_info *info)
1350 {
1351 struct {
1352 struct mmuext_op op;
1353 DECLARE_BITMAP(mask, NR_CPUS);
1354 } *args;
1355 struct multicall_space mcs;
1356 const size_t mc_entry_size = sizeof(args->op) +
1357 sizeof(args->mask[0]) * BITS_TO_LONGS(num_possible_cpus());
1358
1359 trace_xen_mmu_flush_tlb_others(cpus, info->mm, info->start, info->end);
1360
1361 if (cpumask_empty(cpus))
1362 return;
1363
1364 mcs = xen_mc_entry(mc_entry_size);
1365 args = mcs.args;
1366 args->op.arg2.vcpumask = to_cpumask(args->mask);
1367
1368
1369 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1370 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
1371
1372 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
1373 if (info->end != TLB_FLUSH_ALL &&
1374 (info->end - info->start) <= PAGE_SIZE) {
1375 args->op.cmd = MMUEXT_INVLPG_MULTI;
1376 args->op.arg1.linear_addr = info->start;
1377 }
1378
1379 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1380
1381 xen_mc_issue(PARAVIRT_LAZY_MMU);
1382 }
1383
1384 static unsigned long xen_read_cr3(void)
1385 {
1386 return this_cpu_read(xen_cr3);
1387 }
1388
1389 static void set_current_cr3(void *v)
1390 {
1391 this_cpu_write(xen_current_cr3, (unsigned long)v);
1392 }
1393
1394 static void __xen_write_cr3(bool kernel, unsigned long cr3)
1395 {
1396 struct mmuext_op op;
1397 unsigned long mfn;
1398
1399 trace_xen_mmu_write_cr3(kernel, cr3);
1400
1401 if (cr3)
1402 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1403 else
1404 mfn = 0;
1405
1406 WARN_ON(mfn == 0 && kernel);
1407
1408 op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1409 op.arg1.mfn = mfn;
1410
1411 xen_extend_mmuext_op(&op);
1412
1413 if (kernel) {
1414 this_cpu_write(xen_cr3, cr3);
1415
1416
1417
1418 xen_mc_callback(set_current_cr3, (void *)cr3);
1419 }
1420 }
1421 static void xen_write_cr3(unsigned long cr3)
1422 {
1423 BUG_ON(preemptible());
1424
1425 xen_mc_batch();
1426
1427
1428
1429 this_cpu_write(xen_cr3, cr3);
1430
1431 __xen_write_cr3(true, cr3);
1432
1433 #ifdef CONFIG_X86_64
1434 {
1435 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1436 if (user_pgd)
1437 __xen_write_cr3(false, __pa(user_pgd));
1438 else
1439 __xen_write_cr3(false, 0);
1440 }
1441 #endif
1442
1443 xen_mc_issue(PARAVIRT_LAZY_CPU);
1444 }
1445
1446 #ifdef CONFIG_X86_64
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467 static void __init xen_write_cr3_init(unsigned long cr3)
1468 {
1469 BUG_ON(preemptible());
1470
1471 xen_mc_batch();
1472
1473
1474
1475 this_cpu_write(xen_cr3, cr3);
1476
1477 __xen_write_cr3(true, cr3);
1478
1479 xen_mc_issue(PARAVIRT_LAZY_CPU);
1480 }
1481 #endif
1482
1483 static int xen_pgd_alloc(struct mm_struct *mm)
1484 {
1485 pgd_t *pgd = mm->pgd;
1486 int ret = 0;
1487
1488 BUG_ON(PagePinned(virt_to_page(pgd)));
1489
1490 #ifdef CONFIG_X86_64
1491 {
1492 struct page *page = virt_to_page(pgd);
1493 pgd_t *user_pgd;
1494
1495 BUG_ON(page->private != 0);
1496
1497 ret = -ENOMEM;
1498
1499 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1500 page->private = (unsigned long)user_pgd;
1501
1502 if (user_pgd != NULL) {
1503 #ifdef CONFIG_X86_VSYSCALL_EMULATION
1504 user_pgd[pgd_index(VSYSCALL_ADDR)] =
1505 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1506 #endif
1507 ret = 0;
1508 }
1509
1510 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1511 }
1512 #endif
1513 return ret;
1514 }
1515
1516 static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1517 {
1518 #ifdef CONFIG_X86_64
1519 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1520
1521 if (user_pgd)
1522 free_page((unsigned long)user_pgd);
1523 #endif
1524 }
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540 __visible pte_t xen_make_pte_init(pteval_t pte)
1541 {
1542 #ifdef CONFIG_X86_64
1543 unsigned long pfn;
1544
1545
1546
1547
1548
1549
1550
1551 pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT;
1552 if (xen_start_info->mfn_list < __START_KERNEL_map &&
1553 pfn >= xen_start_info->first_p2m_pfn &&
1554 pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
1555 pte &= ~_PAGE_RW;
1556 #endif
1557 pte = pte_pfn_to_mfn(pte);
1558 return native_make_pte(pte);
1559 }
1560 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init);
1561
1562 static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
1563 {
1564 #ifdef CONFIG_X86_32
1565
1566 if (pte_mfn(pte) != INVALID_P2M_ENTRY
1567 && pte_val_ma(*ptep) & _PAGE_PRESENT)
1568 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1569 pte_val_ma(pte));
1570 #endif
1571 __xen_set_pte(ptep, pte);
1572 }
1573
1574
1575
1576 static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
1577 {
1578 #ifdef CONFIG_FLATMEM
1579 BUG_ON(mem_map);
1580 #endif
1581 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1582 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1583 }
1584
1585
1586 static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
1587 {
1588 #ifdef CONFIG_FLATMEM
1589 BUG_ON(mem_map);
1590 #endif
1591 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1592 }
1593
1594
1595
1596 static void __init xen_release_pte_init(unsigned long pfn)
1597 {
1598 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1599 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1600 }
1601
1602 static void __init xen_release_pmd_init(unsigned long pfn)
1603 {
1604 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1605 }
1606
1607 static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1608 {
1609 struct multicall_space mcs;
1610 struct mmuext_op *op;
1611
1612 mcs = __xen_mc_entry(sizeof(*op));
1613 op = mcs.args;
1614 op->cmd = cmd;
1615 op->arg1.mfn = pfn_to_mfn(pfn);
1616
1617 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
1618 }
1619
1620 static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
1621 {
1622 struct multicall_space mcs;
1623 unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
1624
1625 mcs = __xen_mc_entry(0);
1626 MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
1627 pfn_pte(pfn, prot), 0);
1628 }
1629
1630
1631
1632 static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
1633 unsigned level)
1634 {
1635 bool pinned = xen_page_pinned(mm->pgd);
1636
1637 trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
1638
1639 if (pinned) {
1640 struct page *page = pfn_to_page(pfn);
1641
1642 if (static_branch_likely(&xen_struct_pages_ready))
1643 SetPagePinned(page);
1644
1645 if (!PageHighMem(page)) {
1646 xen_mc_batch();
1647
1648 __set_pfn_prot(pfn, PAGE_KERNEL_RO);
1649
1650 if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
1651 __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1652
1653 xen_mc_issue(PARAVIRT_LAZY_MMU);
1654 } else {
1655
1656
1657 kmap_flush_unused();
1658 }
1659 }
1660 }
1661
1662 static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1663 {
1664 xen_alloc_ptpage(mm, pfn, PT_PTE);
1665 }
1666
1667 static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1668 {
1669 xen_alloc_ptpage(mm, pfn, PT_PMD);
1670 }
1671
1672
1673 static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
1674 {
1675 struct page *page = pfn_to_page(pfn);
1676 bool pinned = PagePinned(page);
1677
1678 trace_xen_mmu_release_ptpage(pfn, level, pinned);
1679
1680 if (pinned) {
1681 if (!PageHighMem(page)) {
1682 xen_mc_batch();
1683
1684 if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
1685 __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1686
1687 __set_pfn_prot(pfn, PAGE_KERNEL);
1688
1689 xen_mc_issue(PARAVIRT_LAZY_MMU);
1690 }
1691 ClearPagePinned(page);
1692 }
1693 }
1694
1695 static void xen_release_pte(unsigned long pfn)
1696 {
1697 xen_release_ptpage(pfn, PT_PTE);
1698 }
1699
1700 static void xen_release_pmd(unsigned long pfn)
1701 {
1702 xen_release_ptpage(pfn, PT_PMD);
1703 }
1704
1705 #ifdef CONFIG_X86_64
1706 static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1707 {
1708 xen_alloc_ptpage(mm, pfn, PT_PUD);
1709 }
1710
1711 static void xen_release_pud(unsigned long pfn)
1712 {
1713 xen_release_ptpage(pfn, PT_PUD);
1714 }
1715 #endif
1716
1717 void __init xen_reserve_top(void)
1718 {
1719 #ifdef CONFIG_X86_32
1720 unsigned long top = HYPERVISOR_VIRT_START;
1721 struct xen_platform_parameters pp;
1722
1723 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1724 top = pp.virt_start;
1725
1726 reserve_top_address(-top);
1727 #endif
1728 }
1729
1730
1731
1732
1733
1734 static void * __init __ka(phys_addr_t paddr)
1735 {
1736 #ifdef CONFIG_X86_64
1737 return (void *)(paddr + __START_KERNEL_map);
1738 #else
1739 return __va(paddr);
1740 #endif
1741 }
1742
1743
1744 static unsigned long __init m2p(phys_addr_t maddr)
1745 {
1746 phys_addr_t paddr;
1747
1748 maddr &= XEN_PTE_MFN_MASK;
1749 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1750
1751 return paddr;
1752 }
1753
1754
1755 static void * __init m2v(phys_addr_t maddr)
1756 {
1757 return __ka(m2p(maddr));
1758 }
1759
1760
1761 static void __init set_page_prot_flags(void *addr, pgprot_t prot,
1762 unsigned long flags)
1763 {
1764 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1765 pte_t pte = pfn_pte(pfn, prot);
1766
1767 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
1768 BUG();
1769 }
1770 static void __init set_page_prot(void *addr, pgprot_t prot)
1771 {
1772 return set_page_prot_flags(addr, prot, UVMF_NONE);
1773 }
1774 #ifdef CONFIG_X86_32
1775 static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1776 {
1777 unsigned pmdidx, pteidx;
1778 unsigned ident_pte;
1779 unsigned long pfn;
1780
1781 level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
1782 PAGE_SIZE);
1783
1784 ident_pte = 0;
1785 pfn = 0;
1786 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1787 pte_t *pte_page;
1788
1789
1790 if (pmd_present(pmd[pmdidx]))
1791 pte_page = m2v(pmd[pmdidx].pmd);
1792 else {
1793
1794 if (ident_pte == LEVEL1_IDENT_ENTRIES)
1795 break;
1796
1797 pte_page = &level1_ident_pgt[ident_pte];
1798 ident_pte += PTRS_PER_PTE;
1799
1800 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1801 }
1802
1803
1804 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1805 pte_t pte;
1806
1807 if (pfn > max_pfn_mapped)
1808 max_pfn_mapped = pfn;
1809
1810 if (!pte_none(pte_page[pteidx]))
1811 continue;
1812
1813 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1814 pte_page[pteidx] = pte;
1815 }
1816 }
1817
1818 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1819 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1820
1821 set_page_prot(pmd, PAGE_KERNEL_RO);
1822 }
1823 #endif
1824 void __init xen_setup_machphys_mapping(void)
1825 {
1826 struct xen_machphys_mapping mapping;
1827
1828 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
1829 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
1830 machine_to_phys_nr = mapping.max_mfn + 1;
1831 } else {
1832 machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
1833 }
1834 #ifdef CONFIG_X86_32
1835 WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1))
1836 < machine_to_phys_mapping);
1837 #endif
1838 }
1839
1840 #ifdef CONFIG_X86_64
1841 static void __init convert_pfn_mfn(void *v)
1842 {
1843 pte_t *pte = v;
1844 int i;
1845
1846
1847
1848 for (i = 0; i < PTRS_PER_PTE; i++)
1849 pte[i] = xen_make_pte(pte[i].pte);
1850 }
1851 static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
1852 unsigned long addr)
1853 {
1854 if (*pt_base == PFN_DOWN(__pa(addr))) {
1855 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
1856 clear_page((void *)addr);
1857 (*pt_base)++;
1858 }
1859 if (*pt_end == PFN_DOWN(__pa(addr))) {
1860 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
1861 clear_page((void *)addr);
1862 (*pt_end)--;
1863 }
1864 }
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875 void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
1876 {
1877 pud_t *l3;
1878 pmd_t *l2;
1879 unsigned long addr[3];
1880 unsigned long pt_base, pt_end;
1881 unsigned i;
1882
1883
1884
1885
1886
1887 if (xen_start_info->mfn_list < __START_KERNEL_map)
1888 max_pfn_mapped = xen_start_info->first_p2m_pfn;
1889 else
1890 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
1891
1892 pt_base = PFN_DOWN(__pa(xen_start_info->pt_base));
1893 pt_end = pt_base + xen_start_info->nr_pt_frames;
1894
1895
1896 init_top_pgt[0] = __pgd(0);
1897
1898
1899
1900
1901 convert_pfn_mfn(init_top_pgt);
1902
1903
1904 convert_pfn_mfn(level3_ident_pgt);
1905
1906
1907 convert_pfn_mfn(level3_kernel_pgt);
1908
1909
1910 convert_pfn_mfn(level2_fixmap_pgt);
1911
1912
1913 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1914 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1915
1916 addr[0] = (unsigned long)pgd;
1917 addr[1] = (unsigned long)l3;
1918 addr[2] = (unsigned long)l2;
1919
1920
1921
1922
1923
1924
1925 copy_page(level2_ident_pgt, l2);
1926
1927 copy_page(level2_kernel_pgt, l2);
1928
1929
1930
1931
1932
1933 if (__supported_pte_mask & _PAGE_NX) {
1934 for (i = 0; i < PTRS_PER_PMD; ++i) {
1935 if (pmd_none(level2_ident_pgt[i]))
1936 continue;
1937 level2_ident_pgt[i] = pmd_set_flags(level2_ident_pgt[i], _PAGE_NX);
1938 }
1939 }
1940
1941
1942 i = pgd_index(xen_start_info->mfn_list);
1943 if (i && i < pgd_index(__START_KERNEL_map))
1944 init_top_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i];
1945
1946
1947 set_page_prot(init_top_pgt, PAGE_KERNEL_RO);
1948 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1949 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1950 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1951 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
1952 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1953 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1954
1955 for (i = 0; i < FIXMAP_PMD_NUM; i++) {
1956 set_page_prot(level1_fixmap_pgt + i * PTRS_PER_PTE,
1957 PAGE_KERNEL_RO);
1958 }
1959
1960
1961 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1962 PFN_DOWN(__pa_symbol(init_top_pgt)));
1963
1964
1965 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1966
1967
1968
1969
1970
1971 xen_mc_batch();
1972 __xen_write_cr3(true, __pa(init_top_pgt));
1973 xen_mc_issue(PARAVIRT_LAZY_CPU);
1974
1975
1976
1977
1978
1979
1980
1981 for (i = 0; i < ARRAY_SIZE(addr); i++)
1982 check_pt_base(&pt_base, &pt_end, addr[i]);
1983
1984
1985 xen_pt_base = PFN_PHYS(pt_base);
1986 xen_pt_size = (pt_end - pt_base) * PAGE_SIZE;
1987 memblock_reserve(xen_pt_base, xen_pt_size);
1988
1989
1990 xen_start_info = (struct start_info *)__va(__pa(xen_start_info));
1991 }
1992
1993
1994
1995
1996 static unsigned long __init xen_read_phys_ulong(phys_addr_t addr)
1997 {
1998 unsigned long *vaddr;
1999 unsigned long val;
2000
2001 vaddr = early_memremap_ro(addr, sizeof(val));
2002 val = *vaddr;
2003 early_memunmap(vaddr, sizeof(val));
2004 return val;
2005 }
2006
2007
2008
2009
2010
2011
2012 static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
2013 {
2014 phys_addr_t pa;
2015 pgd_t pgd;
2016 pud_t pud;
2017 pmd_t pmd;
2018 pte_t pte;
2019
2020 pa = read_cr3_pa();
2021 pgd = native_make_pgd(xen_read_phys_ulong(pa + pgd_index(vaddr) *
2022 sizeof(pgd)));
2023 if (!pgd_present(pgd))
2024 return 0;
2025
2026 pa = pgd_val(pgd) & PTE_PFN_MASK;
2027 pud = native_make_pud(xen_read_phys_ulong(pa + pud_index(vaddr) *
2028 sizeof(pud)));
2029 if (!pud_present(pud))
2030 return 0;
2031 pa = pud_val(pud) & PTE_PFN_MASK;
2032 if (pud_large(pud))
2033 return pa + (vaddr & ~PUD_MASK);
2034
2035 pmd = native_make_pmd(xen_read_phys_ulong(pa + pmd_index(vaddr) *
2036 sizeof(pmd)));
2037 if (!pmd_present(pmd))
2038 return 0;
2039 pa = pmd_val(pmd) & PTE_PFN_MASK;
2040 if (pmd_large(pmd))
2041 return pa + (vaddr & ~PMD_MASK);
2042
2043 pte = native_make_pte(xen_read_phys_ulong(pa + pte_index(vaddr) *
2044 sizeof(pte)));
2045 if (!pte_present(pte))
2046 return 0;
2047 pa = pte_pfn(pte) << PAGE_SHIFT;
2048
2049 return pa | (vaddr & ~PAGE_MASK);
2050 }
2051
2052
2053
2054
2055
2056 void __init xen_relocate_p2m(void)
2057 {
2058 phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys;
2059 unsigned long p2m_pfn, p2m_pfn_end, n_frames, pfn, pfn_end;
2060 int n_pte, n_pt, n_pmd, n_pud, idx_pte, idx_pt, idx_pmd, idx_pud;
2061 pte_t *pt;
2062 pmd_t *pmd;
2063 pud_t *pud;
2064 pgd_t *pgd;
2065 unsigned long *new_p2m;
2066
2067 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
2068 n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT;
2069 n_pt = roundup(size, PMD_SIZE) >> PMD_SHIFT;
2070 n_pmd = roundup(size, PUD_SIZE) >> PUD_SHIFT;
2071 n_pud = roundup(size, P4D_SIZE) >> P4D_SHIFT;
2072 n_frames = n_pte + n_pt + n_pmd + n_pud;
2073
2074 new_area = xen_find_free_area(PFN_PHYS(n_frames));
2075 if (!new_area) {
2076 xen_raw_console_write("Can't find new memory area for p2m needed due to E820 map conflict\n");
2077 BUG();
2078 }
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088 pud_phys = new_area;
2089 pmd_phys = pud_phys + PFN_PHYS(n_pud);
2090 pt_phys = pmd_phys + PFN_PHYS(n_pmd);
2091 p2m_pfn = PFN_DOWN(pt_phys) + n_pt;
2092
2093 pgd = __va(read_cr3_pa());
2094 new_p2m = (unsigned long *)(2 * PGDIR_SIZE);
2095 for (idx_pud = 0; idx_pud < n_pud; idx_pud++) {
2096 pud = early_memremap(pud_phys, PAGE_SIZE);
2097 clear_page(pud);
2098 for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD);
2099 idx_pmd++) {
2100 pmd = early_memremap(pmd_phys, PAGE_SIZE);
2101 clear_page(pmd);
2102 for (idx_pt = 0; idx_pt < min(n_pt, PTRS_PER_PMD);
2103 idx_pt++) {
2104 pt = early_memremap(pt_phys, PAGE_SIZE);
2105 clear_page(pt);
2106 for (idx_pte = 0;
2107 idx_pte < min(n_pte, PTRS_PER_PTE);
2108 idx_pte++) {
2109 pt[idx_pte] = pfn_pte(p2m_pfn,
2110 PAGE_KERNEL);
2111 p2m_pfn++;
2112 }
2113 n_pte -= PTRS_PER_PTE;
2114 early_memunmap(pt, PAGE_SIZE);
2115 make_lowmem_page_readonly(__va(pt_phys));
2116 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE,
2117 PFN_DOWN(pt_phys));
2118 pmd[idx_pt] = __pmd(_PAGE_TABLE | pt_phys);
2119 pt_phys += PAGE_SIZE;
2120 }
2121 n_pt -= PTRS_PER_PMD;
2122 early_memunmap(pmd, PAGE_SIZE);
2123 make_lowmem_page_readonly(__va(pmd_phys));
2124 pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE,
2125 PFN_DOWN(pmd_phys));
2126 pud[idx_pmd] = __pud(_PAGE_TABLE | pmd_phys);
2127 pmd_phys += PAGE_SIZE;
2128 }
2129 n_pmd -= PTRS_PER_PUD;
2130 early_memunmap(pud, PAGE_SIZE);
2131 make_lowmem_page_readonly(__va(pud_phys));
2132 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(pud_phys));
2133 set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys));
2134 pud_phys += PAGE_SIZE;
2135 }
2136
2137
2138 memcpy(new_p2m, xen_p2m_addr, size);
2139 xen_p2m_addr = new_p2m;
2140
2141
2142 p2m_pfn = PFN_DOWN(xen_early_virt_to_phys(xen_start_info->mfn_list));
2143 BUG_ON(!p2m_pfn);
2144 p2m_pfn_end = p2m_pfn + PFN_DOWN(size);
2145
2146 if (xen_start_info->mfn_list < __START_KERNEL_map) {
2147 pfn = xen_start_info->first_p2m_pfn;
2148 pfn_end = xen_start_info->first_p2m_pfn +
2149 xen_start_info->nr_p2m_frames;
2150 set_pgd(pgd + 1, __pgd(0));
2151 } else {
2152 pfn = p2m_pfn;
2153 pfn_end = p2m_pfn_end;
2154 }
2155
2156 memblock_free(PFN_PHYS(pfn), PAGE_SIZE * (pfn_end - pfn));
2157 while (pfn < pfn_end) {
2158 if (pfn == p2m_pfn) {
2159 pfn = p2m_pfn_end;
2160 continue;
2161 }
2162 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
2163 pfn++;
2164 }
2165
2166 xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
2167 xen_start_info->first_p2m_pfn = PFN_DOWN(new_area);
2168 xen_start_info->nr_p2m_frames = n_frames;
2169 }
2170
2171 #else
2172 static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
2173 static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
2174 RESERVE_BRK(fixup_kernel_pmd, PAGE_SIZE);
2175 RESERVE_BRK(fixup_kernel_pte, PAGE_SIZE);
2176
2177 static void __init xen_write_cr3_init(unsigned long cr3)
2178 {
2179 unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
2180
2181 BUG_ON(read_cr3_pa() != __pa(initial_page_table));
2182 BUG_ON(cr3 != __pa(swapper_pg_dir));
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194 swapper_kernel_pmd =
2195 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
2196 copy_page(swapper_kernel_pmd, initial_kernel_pmd);
2197 swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
2198 __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
2199 set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
2200
2201 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
2202 xen_write_cr3(cr3);
2203 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
2204
2205 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
2206 PFN_DOWN(__pa(initial_page_table)));
2207 set_page_prot(initial_page_table, PAGE_KERNEL);
2208 set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
2209
2210 pv_ops.mmu.write_cr3 = &xen_write_cr3;
2211 }
2212
2213
2214
2215
2216
2217
2218 static phys_addr_t __init xen_find_pt_base(pmd_t *pmd)
2219 {
2220 phys_addr_t pt_base, paddr;
2221 unsigned pmdidx;
2222
2223 pt_base = min(__pa(xen_start_info->pt_base), __pa(pmd));
2224
2225 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++)
2226 if (pmd_present(pmd[pmdidx]) && !pmd_large(pmd[pmdidx])) {
2227 paddr = m2p(pmd[pmdidx].pmd);
2228 pt_base = min(pt_base, paddr);
2229 }
2230
2231 return pt_base;
2232 }
2233
2234 void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
2235 {
2236 pmd_t *kernel_pmd;
2237
2238 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
2239
2240 xen_pt_base = xen_find_pt_base(kernel_pmd);
2241 xen_pt_size = xen_start_info->nr_pt_frames * PAGE_SIZE;
2242
2243 initial_kernel_pmd =
2244 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
2245
2246 max_pfn_mapped = PFN_DOWN(xen_pt_base + xen_pt_size + 512 * 1024);
2247
2248 copy_page(initial_kernel_pmd, kernel_pmd);
2249
2250 xen_map_identity_early(initial_kernel_pmd, max_pfn);
2251
2252 copy_page(initial_page_table, pgd);
2253 initial_page_table[KERNEL_PGD_BOUNDARY] =
2254 __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
2255
2256 set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
2257 set_page_prot(initial_page_table, PAGE_KERNEL_RO);
2258 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
2259
2260 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
2261
2262 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
2263 PFN_DOWN(__pa(initial_page_table)));
2264 xen_write_cr3(__pa(initial_page_table));
2265
2266 memblock_reserve(xen_pt_base, xen_pt_size);
2267 }
2268 #endif
2269
2270 void __init xen_reserve_special_pages(void)
2271 {
2272 phys_addr_t paddr;
2273
2274 memblock_reserve(__pa(xen_start_info), PAGE_SIZE);
2275 if (xen_start_info->store_mfn) {
2276 paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->store_mfn));
2277 memblock_reserve(paddr, PAGE_SIZE);
2278 }
2279 if (!xen_initial_domain()) {
2280 paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->console.domU.mfn));
2281 memblock_reserve(paddr, PAGE_SIZE);
2282 }
2283 }
2284
2285 void __init xen_pt_check_e820(void)
2286 {
2287 if (xen_is_e820_reserved(xen_pt_base, xen_pt_size)) {
2288 xen_raw_console_write("Xen hypervisor allocated page table memory conflicts with E820 map\n");
2289 BUG();
2290 }
2291 }
2292
2293 static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
2294
2295 static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
2296 {
2297 pte_t pte;
2298
2299 phys >>= PAGE_SHIFT;
2300
2301 switch (idx) {
2302 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
2303 #ifdef CONFIG_X86_32
2304 case FIX_WP_TEST:
2305 # ifdef CONFIG_HIGHMEM
2306 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
2307 # endif
2308 #elif defined(CONFIG_X86_VSYSCALL_EMULATION)
2309 case VSYSCALL_PAGE:
2310 #endif
2311
2312 pte = pfn_pte(phys, prot);
2313 break;
2314
2315 #ifdef CONFIG_X86_LOCAL_APIC
2316 case FIX_APIC_BASE:
2317 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2318 break;
2319 #endif
2320
2321 #ifdef CONFIG_X86_IO_APIC
2322 case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
2323
2324
2325
2326
2327 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2328 break;
2329 #endif
2330
2331 case FIX_PARAVIRT_BOOTMAP:
2332
2333
2334 pte = mfn_pte(phys, prot);
2335 break;
2336
2337 default:
2338
2339 pte = mfn_pte(phys, prot);
2340 break;
2341 }
2342
2343 __native_set_fixmap(idx, pte);
2344
2345 #ifdef CONFIG_X86_VSYSCALL_EMULATION
2346
2347
2348 if (idx == VSYSCALL_PAGE) {
2349 unsigned long vaddr = __fix_to_virt(idx);
2350 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
2351 }
2352 #endif
2353 }
2354
2355 static void __init xen_post_allocator_init(void)
2356 {
2357 pv_ops.mmu.set_pte = xen_set_pte;
2358 pv_ops.mmu.set_pmd = xen_set_pmd;
2359 pv_ops.mmu.set_pud = xen_set_pud;
2360 #ifdef CONFIG_X86_64
2361 pv_ops.mmu.set_p4d = xen_set_p4d;
2362 #endif
2363
2364
2365
2366 pv_ops.mmu.alloc_pte = xen_alloc_pte;
2367 pv_ops.mmu.alloc_pmd = xen_alloc_pmd;
2368 pv_ops.mmu.release_pte = xen_release_pte;
2369 pv_ops.mmu.release_pmd = xen_release_pmd;
2370 #ifdef CONFIG_X86_64
2371 pv_ops.mmu.alloc_pud = xen_alloc_pud;
2372 pv_ops.mmu.release_pud = xen_release_pud;
2373 #endif
2374 pv_ops.mmu.make_pte = PV_CALLEE_SAVE(xen_make_pte);
2375
2376 #ifdef CONFIG_X86_64
2377 pv_ops.mmu.write_cr3 = &xen_write_cr3;
2378 #endif
2379 }
2380
2381 static void xen_leave_lazy_mmu(void)
2382 {
2383 preempt_disable();
2384 xen_mc_flush();
2385 paravirt_leave_lazy_mmu();
2386 preempt_enable();
2387 }
2388
2389 static const struct pv_mmu_ops xen_mmu_ops __initconst = {
2390 .read_cr2 = __PV_IS_CALLEE_SAVE(xen_read_cr2),
2391 .write_cr2 = xen_write_cr2,
2392
2393 .read_cr3 = xen_read_cr3,
2394 .write_cr3 = xen_write_cr3_init,
2395
2396 .flush_tlb_user = xen_flush_tlb,
2397 .flush_tlb_kernel = xen_flush_tlb,
2398 .flush_tlb_one_user = xen_flush_tlb_one_user,
2399 .flush_tlb_others = xen_flush_tlb_others,
2400 .tlb_remove_table = tlb_remove_table,
2401
2402 .pgd_alloc = xen_pgd_alloc,
2403 .pgd_free = xen_pgd_free,
2404
2405 .alloc_pte = xen_alloc_pte_init,
2406 .release_pte = xen_release_pte_init,
2407 .alloc_pmd = xen_alloc_pmd_init,
2408 .release_pmd = xen_release_pmd_init,
2409
2410 .set_pte = xen_set_pte_init,
2411 .set_pte_at = xen_set_pte_at,
2412 .set_pmd = xen_set_pmd_hyper,
2413
2414 .ptep_modify_prot_start = __ptep_modify_prot_start,
2415 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
2416
2417 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
2418 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
2419
2420 .make_pte = PV_CALLEE_SAVE(xen_make_pte_init),
2421 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
2422
2423 #ifdef CONFIG_X86_PAE
2424 .set_pte_atomic = xen_set_pte_atomic,
2425 .pte_clear = xen_pte_clear,
2426 .pmd_clear = xen_pmd_clear,
2427 #endif
2428 .set_pud = xen_set_pud_hyper,
2429
2430 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2431 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
2432
2433 #ifdef CONFIG_X86_64
2434 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
2435 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
2436 .set_p4d = xen_set_p4d_hyper,
2437
2438 .alloc_pud = xen_alloc_pmd_init,
2439 .release_pud = xen_release_pmd_init,
2440
2441 #if CONFIG_PGTABLE_LEVELS >= 5
2442 .p4d_val = PV_CALLEE_SAVE(xen_p4d_val),
2443 .make_p4d = PV_CALLEE_SAVE(xen_make_p4d),
2444 #endif
2445 #endif
2446
2447 .activate_mm = xen_activate_mm,
2448 .dup_mmap = xen_dup_mmap,
2449 .exit_mmap = xen_exit_mmap,
2450
2451 .lazy_mode = {
2452 .enter = paravirt_enter_lazy_mmu,
2453 .leave = xen_leave_lazy_mmu,
2454 .flush = paravirt_flush_lazy_mmu,
2455 },
2456
2457 .set_fixmap = xen_set_fixmap,
2458 };
2459
2460 void __init xen_init_mmu_ops(void)
2461 {
2462 x86_init.paging.pagetable_init = xen_pagetable_init;
2463 x86_init.hyper.init_after_bootmem = xen_after_bootmem;
2464
2465 pv_ops.mmu = xen_mmu_ops;
2466
2467 memset(dummy_mapping, 0xff, PAGE_SIZE);
2468 }
2469
2470
2471 #define MAX_CONTIG_ORDER 9
2472 static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2473
2474 #define VOID_PTE (mfn_pte(0, __pgprot(0)))
2475 static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
2476 unsigned long *in_frames,
2477 unsigned long *out_frames)
2478 {
2479 int i;
2480 struct multicall_space mcs;
2481
2482 xen_mc_batch();
2483 for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
2484 mcs = __xen_mc_entry(0);
2485
2486 if (in_frames)
2487 in_frames[i] = virt_to_mfn(vaddr);
2488
2489 MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
2490 __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
2491
2492 if (out_frames)
2493 out_frames[i] = virt_to_pfn(vaddr);
2494 }
2495 xen_mc_issue(0);
2496 }
2497
2498
2499
2500
2501
2502
2503 static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
2504 unsigned long *mfns,
2505 unsigned long first_mfn)
2506 {
2507 unsigned i, limit;
2508 unsigned long mfn;
2509
2510 xen_mc_batch();
2511
2512 limit = 1u << order;
2513 for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
2514 struct multicall_space mcs;
2515 unsigned flags;
2516
2517 mcs = __xen_mc_entry(0);
2518 if (mfns)
2519 mfn = mfns[i];
2520 else
2521 mfn = first_mfn + i;
2522
2523 if (i < (limit - 1))
2524 flags = 0;
2525 else {
2526 if (order == 0)
2527 flags = UVMF_INVLPG | UVMF_ALL;
2528 else
2529 flags = UVMF_TLB_FLUSH | UVMF_ALL;
2530 }
2531
2532 MULTI_update_va_mapping(mcs.mc, vaddr,
2533 mfn_pte(mfn, PAGE_KERNEL), flags);
2534
2535 set_phys_to_machine(virt_to_pfn(vaddr), mfn);
2536 }
2537
2538 xen_mc_issue(0);
2539 }
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549 static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2550 unsigned long *pfns_in,
2551 unsigned long extents_out,
2552 unsigned int order_out,
2553 unsigned long *mfns_out,
2554 unsigned int address_bits)
2555 {
2556 long rc;
2557 int success;
2558
2559 struct xen_memory_exchange exchange = {
2560 .in = {
2561 .nr_extents = extents_in,
2562 .extent_order = order_in,
2563 .extent_start = pfns_in,
2564 .domid = DOMID_SELF
2565 },
2566 .out = {
2567 .nr_extents = extents_out,
2568 .extent_order = order_out,
2569 .extent_start = mfns_out,
2570 .address_bits = address_bits,
2571 .domid = DOMID_SELF
2572 }
2573 };
2574
2575 BUG_ON(extents_in << order_in != extents_out << order_out);
2576
2577 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
2578 success = (exchange.nr_exchanged == extents_in);
2579
2580 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
2581 BUG_ON(success && (rc != 0));
2582
2583 return success;
2584 }
2585
2586 int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
2587 unsigned int address_bits,
2588 dma_addr_t *dma_handle)
2589 {
2590 unsigned long *in_frames = discontig_frames, out_frame;
2591 unsigned long flags;
2592 int success;
2593 unsigned long vstart = (unsigned long)phys_to_virt(pstart);
2594
2595
2596
2597
2598
2599
2600
2601 if (unlikely(order > MAX_CONTIG_ORDER))
2602 return -ENOMEM;
2603
2604 memset((void *) vstart, 0, PAGE_SIZE << order);
2605
2606 spin_lock_irqsave(&xen_reservation_lock, flags);
2607
2608
2609 xen_zap_pfn_range(vstart, order, in_frames, NULL);
2610
2611
2612 out_frame = virt_to_pfn(vstart);
2613 success = xen_exchange_memory(1UL << order, 0, in_frames,
2614 1, order, &out_frame,
2615 address_bits);
2616
2617
2618 if (success)
2619 xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
2620 else
2621 xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
2622
2623 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2624
2625 *dma_handle = virt_to_machine(vstart).maddr;
2626 return success ? 0 : -ENOMEM;
2627 }
2628
2629 void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
2630 {
2631 unsigned long *out_frames = discontig_frames, in_frame;
2632 unsigned long flags;
2633 int success;
2634 unsigned long vstart;
2635
2636 if (unlikely(order > MAX_CONTIG_ORDER))
2637 return;
2638
2639 vstart = (unsigned long)phys_to_virt(pstart);
2640 memset((void *) vstart, 0, PAGE_SIZE << order);
2641
2642 spin_lock_irqsave(&xen_reservation_lock, flags);
2643
2644
2645 in_frame = virt_to_mfn(vstart);
2646
2647
2648 xen_zap_pfn_range(vstart, order, NULL, out_frames);
2649
2650
2651 success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
2652 0, out_frames, 0);
2653
2654
2655 if (success)
2656 xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
2657 else
2658 xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
2659
2660 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2661 }
2662
2663 static noinline void xen_flush_tlb_all(void)
2664 {
2665 struct mmuext_op *op;
2666 struct multicall_space mcs;
2667
2668 preempt_disable();
2669
2670 mcs = xen_mc_entry(sizeof(*op));
2671
2672 op = mcs.args;
2673 op->cmd = MMUEXT_TLB_FLUSH_ALL;
2674 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
2675
2676 xen_mc_issue(PARAVIRT_LAZY_MMU);
2677
2678 preempt_enable();
2679 }
2680
2681 #define REMAP_BATCH_SIZE 16
2682
2683 struct remap_data {
2684 xen_pfn_t *pfn;
2685 bool contiguous;
2686 bool no_translate;
2687 pgprot_t prot;
2688 struct mmu_update *mmu_update;
2689 };
2690
2691 static int remap_area_pfn_pte_fn(pte_t *ptep, unsigned long addr, void *data)
2692 {
2693 struct remap_data *rmd = data;
2694 pte_t pte = pte_mkspecial(mfn_pte(*rmd->pfn, rmd->prot));
2695
2696
2697
2698
2699
2700 if (rmd->contiguous)
2701 (*rmd->pfn)++;
2702 else
2703 rmd->pfn++;
2704
2705 rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
2706 rmd->mmu_update->ptr |= rmd->no_translate ?
2707 MMU_PT_UPDATE_NO_TRANSLATE :
2708 MMU_NORMAL_PT_UPDATE;
2709 rmd->mmu_update->val = pte_val_ma(pte);
2710 rmd->mmu_update++;
2711
2712 return 0;
2713 }
2714
2715 int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
2716 xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
2717 unsigned int domid, bool no_translate, struct page **pages)
2718 {
2719 int err = 0;
2720 struct remap_data rmd;
2721 struct mmu_update mmu_update[REMAP_BATCH_SIZE];
2722 unsigned long range;
2723 int mapped = 0;
2724
2725 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
2726
2727 rmd.pfn = pfn;
2728 rmd.prot = prot;
2729
2730
2731
2732
2733 rmd.contiguous = !err_ptr;
2734 rmd.no_translate = no_translate;
2735
2736 while (nr) {
2737 int index = 0;
2738 int done = 0;
2739 int batch = min(REMAP_BATCH_SIZE, nr);
2740 int batch_left = batch;
2741
2742 range = (unsigned long)batch << PAGE_SHIFT;
2743
2744 rmd.mmu_update = mmu_update;
2745 err = apply_to_page_range(vma->vm_mm, addr, range,
2746 remap_area_pfn_pte_fn, &rmd);
2747 if (err)
2748 goto out;
2749
2750
2751
2752
2753
2754 do {
2755 int i;
2756
2757 err = HYPERVISOR_mmu_update(&mmu_update[index],
2758 batch_left, &done, domid);
2759
2760
2761
2762
2763
2764
2765 if (err_ptr) {
2766 for (i = index; i < index + done; i++)
2767 err_ptr[i] = 0;
2768 }
2769 if (err < 0) {
2770 if (!err_ptr)
2771 goto out;
2772 err_ptr[i] = err;
2773 done++;
2774 } else
2775 mapped += done;
2776 batch_left -= done;
2777 index += done;
2778 } while (batch_left);
2779
2780 nr -= batch;
2781 addr += range;
2782 if (err_ptr)
2783 err_ptr += batch;
2784 cond_resched();
2785 }
2786 out:
2787
2788 xen_flush_tlb_all();
2789
2790 return err < 0 ? err : mapped;
2791 }
2792 EXPORT_SYMBOL_GPL(xen_remap_pfn);
2793
2794 #ifdef CONFIG_KEXEC_CORE
2795 phys_addr_t paddr_vmcoreinfo_note(void)
2796 {
2797 if (xen_pv_domain())
2798 return virt_to_machine(vmcoreinfo_note).maddr;
2799 else
2800 return __pa(vmcoreinfo_note);
2801 }
2802 #endif