1
2 #ifndef _ASM_X86_PARAVIRT_TYPES_H
3 #define _ASM_X86_PARAVIRT_TYPES_H
4
5
6 #define CLBR_NONE 0
7 #define CLBR_EAX (1 << 0)
8 #define CLBR_ECX (1 << 1)
9 #define CLBR_EDX (1 << 2)
10 #define CLBR_EDI (1 << 3)
11
12 #ifdef CONFIG_X86_32
13
14 #define CLBR_ANY ((1 << 4) - 1)
15
16 #define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX)
17 #define CLBR_RET_REG (CLBR_EAX | CLBR_EDX)
18 #define CLBR_SCRATCH (0)
19 #else
20 #define CLBR_RAX CLBR_EAX
21 #define CLBR_RCX CLBR_ECX
22 #define CLBR_RDX CLBR_EDX
23 #define CLBR_RDI CLBR_EDI
24 #define CLBR_RSI (1 << 4)
25 #define CLBR_R8 (1 << 5)
26 #define CLBR_R9 (1 << 6)
27 #define CLBR_R10 (1 << 7)
28 #define CLBR_R11 (1 << 8)
29
30 #define CLBR_ANY ((1 << 9) - 1)
31
32 #define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
33 CLBR_RCX | CLBR_R8 | CLBR_R9)
34 #define CLBR_RET_REG (CLBR_RAX)
35 #define CLBR_SCRATCH (CLBR_R10 | CLBR_R11)
36
37 #endif
38
39 #define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
40
41 #ifndef __ASSEMBLY__
42
43 #include <asm/desc_defs.h>
44 #include <asm/kmap_types.h>
45 #include <asm/pgtable_types.h>
46 #include <asm/nospec-branch.h>
47
48 struct page;
49 struct thread_struct;
50 struct desc_ptr;
51 struct tss_struct;
52 struct mm_struct;
53 struct desc_struct;
54 struct task_struct;
55 struct cpumask;
56 struct flush_tlb_info;
57 struct mmu_gather;
58 struct vm_area_struct;
59
60
61
62
63
64 struct paravirt_callee_save {
65 void *func;
66 };
67
68
69 struct pv_info {
70 #ifdef CONFIG_PARAVIRT_XXL
71 unsigned int kernel_rpl;
72 int shared_kernel_pmd;
73
74 #ifdef CONFIG_X86_64
75 u16 extra_user_64bit_cs;
76 #endif
77 #endif
78
79 const char *name;
80 };
81
82 struct pv_init_ops {
83
84
85
86
87
88
89
90
91 unsigned (*patch)(u8 type, void *insn_buff,
92 unsigned long addr, unsigned len);
93 } __no_randomize_layout;
94
95 #ifdef CONFIG_PARAVIRT_XXL
96 struct pv_lazy_ops {
97
98 void (*enter)(void);
99 void (*leave)(void);
100 void (*flush)(void);
101 } __no_randomize_layout;
102 #endif
103
104 struct pv_time_ops {
105 unsigned long long (*sched_clock)(void);
106 unsigned long long (*steal_clock)(int cpu);
107 } __no_randomize_layout;
108
109 struct pv_cpu_ops {
110
111 void (*io_delay)(void);
112
113 #ifdef CONFIG_PARAVIRT_XXL
114 unsigned long (*get_debugreg)(int regno);
115 void (*set_debugreg)(int regno, unsigned long value);
116
117 unsigned long (*read_cr0)(void);
118 void (*write_cr0)(unsigned long);
119
120 void (*write_cr4)(unsigned long);
121
122
123 void (*load_tr_desc)(void);
124 void (*load_gdt)(const struct desc_ptr *);
125 void (*load_idt)(const struct desc_ptr *);
126 void (*set_ldt)(const void *desc, unsigned entries);
127 unsigned long (*store_tr)(void);
128 void (*load_tls)(struct thread_struct *t, unsigned int cpu);
129 #ifdef CONFIG_X86_64
130 void (*load_gs_index)(unsigned int idx);
131 #endif
132 void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
133 const void *desc);
134 void (*write_gdt_entry)(struct desc_struct *,
135 int entrynum, const void *desc, int size);
136 void (*write_idt_entry)(gate_desc *,
137 int entrynum, const gate_desc *gate);
138 void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
139 void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
140
141 void (*load_sp0)(unsigned long sp0);
142
143 void (*set_iopl_mask)(unsigned mask);
144
145 void (*wbinvd)(void);
146
147
148 void (*cpuid)(unsigned int *eax, unsigned int *ebx,
149 unsigned int *ecx, unsigned int *edx);
150
151
152 u64 (*read_msr)(unsigned int msr);
153 void (*write_msr)(unsigned int msr, unsigned low, unsigned high);
154
155
156
157
158
159 u64 (*read_msr_safe)(unsigned int msr, int *err);
160 int (*write_msr_safe)(unsigned int msr, unsigned low, unsigned high);
161
162 u64 (*read_pmc)(int counter);
163
164
165
166
167
168
169
170 void (*usergs_sysret64)(void);
171
172
173
174 void (*iret)(void);
175
176 void (*swapgs)(void);
177
178 void (*start_context_switch)(struct task_struct *prev);
179 void (*end_context_switch)(struct task_struct *next);
180 #endif
181 } __no_randomize_layout;
182
183 struct pv_irq_ops {
184 #ifdef CONFIG_PARAVIRT_XXL
185
186
187
188
189
190
191
192
193
194 struct paravirt_callee_save save_fl;
195 struct paravirt_callee_save restore_fl;
196 struct paravirt_callee_save irq_disable;
197 struct paravirt_callee_save irq_enable;
198
199 void (*safe_halt)(void);
200 void (*halt)(void);
201 #endif
202 } __no_randomize_layout;
203
204 struct pv_mmu_ops {
205
206 void (*flush_tlb_user)(void);
207 void (*flush_tlb_kernel)(void);
208 void (*flush_tlb_one_user)(unsigned long addr);
209 void (*flush_tlb_others)(const struct cpumask *cpus,
210 const struct flush_tlb_info *info);
211
212 void (*tlb_remove_table)(struct mmu_gather *tlb, void *table);
213
214
215 void (*exit_mmap)(struct mm_struct *mm);
216
217 #ifdef CONFIG_PARAVIRT_XXL
218 struct paravirt_callee_save read_cr2;
219 void (*write_cr2)(unsigned long);
220
221 unsigned long (*read_cr3)(void);
222 void (*write_cr3)(unsigned long);
223
224
225 void (*activate_mm)(struct mm_struct *prev,
226 struct mm_struct *next);
227 void (*dup_mmap)(struct mm_struct *oldmm,
228 struct mm_struct *mm);
229
230
231 int (*pgd_alloc)(struct mm_struct *mm);
232 void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
233
234
235
236
237
238 void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
239 void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
240 void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
241 void (*alloc_p4d)(struct mm_struct *mm, unsigned long pfn);
242 void (*release_pte)(unsigned long pfn);
243 void (*release_pmd)(unsigned long pfn);
244 void (*release_pud)(unsigned long pfn);
245 void (*release_p4d)(unsigned long pfn);
246
247
248 void (*set_pte)(pte_t *ptep, pte_t pteval);
249 void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
250 pte_t *ptep, pte_t pteval);
251 void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
252
253 pte_t (*ptep_modify_prot_start)(struct vm_area_struct *vma, unsigned long addr,
254 pte_t *ptep);
255 void (*ptep_modify_prot_commit)(struct vm_area_struct *vma, unsigned long addr,
256 pte_t *ptep, pte_t pte);
257
258 struct paravirt_callee_save pte_val;
259 struct paravirt_callee_save make_pte;
260
261 struct paravirt_callee_save pgd_val;
262 struct paravirt_callee_save make_pgd;
263
264 #if CONFIG_PGTABLE_LEVELS >= 3
265 #ifdef CONFIG_X86_PAE
266 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
267 void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
268 pte_t *ptep);
269 void (*pmd_clear)(pmd_t *pmdp);
270
271 #endif
272
273 void (*set_pud)(pud_t *pudp, pud_t pudval);
274
275 struct paravirt_callee_save pmd_val;
276 struct paravirt_callee_save make_pmd;
277
278 #if CONFIG_PGTABLE_LEVELS >= 4
279 struct paravirt_callee_save pud_val;
280 struct paravirt_callee_save make_pud;
281
282 void (*set_p4d)(p4d_t *p4dp, p4d_t p4dval);
283
284 #if CONFIG_PGTABLE_LEVELS >= 5
285 struct paravirt_callee_save p4d_val;
286 struct paravirt_callee_save make_p4d;
287
288 void (*set_pgd)(pgd_t *pgdp, pgd_t pgdval);
289 #endif
290
291 #endif
292
293 #endif
294
295 struct pv_lazy_ops lazy_mode;
296
297
298
299
300
301 void (*set_fixmap)(unsigned idx,
302 phys_addr_t phys, pgprot_t flags);
303 #endif
304 } __no_randomize_layout;
305
306 struct arch_spinlock;
307 #ifdef CONFIG_SMP
308 #include <asm/spinlock_types.h>
309 #endif
310
311 struct qspinlock;
312
313 struct pv_lock_ops {
314 void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val);
315 struct paravirt_callee_save queued_spin_unlock;
316
317 void (*wait)(u8 *ptr, u8 val);
318 void (*kick)(int cpu);
319
320 struct paravirt_callee_save vcpu_is_preempted;
321 } __no_randomize_layout;
322
323
324
325
326 struct paravirt_patch_template {
327 struct pv_init_ops init;
328 struct pv_time_ops time;
329 struct pv_cpu_ops cpu;
330 struct pv_irq_ops irq;
331 struct pv_mmu_ops mmu;
332 struct pv_lock_ops lock;
333 } __no_randomize_layout;
334
335 extern struct pv_info pv_info;
336 extern struct paravirt_patch_template pv_ops;
337
338 #define PARAVIRT_PATCH(x) \
339 (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
340
341 #define paravirt_type(op) \
342 [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
343 [paravirt_opptr] "i" (&(pv_ops.op))
344 #define paravirt_clobber(clobber) \
345 [paravirt_clobber] "i" (clobber)
346
347
348
349
350
351 #define _paravirt_alt(insn_string, type, clobber) \
352 "771:\n\t" insn_string "\n" "772:\n" \
353 ".pushsection .parainstructions,\"a\"\n" \
354 _ASM_ALIGN "\n" \
355 _ASM_PTR " 771b\n" \
356 " .byte " type "\n" \
357 " .byte 772b-771b\n" \
358 " .short " clobber "\n" \
359 ".popsection\n"
360
361
362 #define paravirt_alt(insn_string) \
363 _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
364
365
366 #define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t"
367
368 unsigned paravirt_patch_ident_64(void *insn_buff, unsigned len);
369 unsigned paravirt_patch_default(u8 type, void *insn_buff, unsigned long addr, unsigned len);
370 unsigned paravirt_patch_insns(void *insn_buff, unsigned len, const char *start, const char *end);
371
372 unsigned native_patch(u8 type, void *insn_buff, unsigned long addr, unsigned len);
373
374 int paravirt_disable_iospace(void);
375
376
377
378
379
380
381
382 #define PARAVIRT_CALL \
383 ANNOTATE_RETPOLINE_SAFE \
384 "call *%c[paravirt_opptr];"
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450 #ifdef CONFIG_X86_32
451 #define PVOP_VCALL_ARGS \
452 unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx;
453
454 #define PVOP_CALL_ARGS PVOP_VCALL_ARGS
455
456 #define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
457 #define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x))
458 #define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x))
459
460 #define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
461 "=c" (__ecx)
462 #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
463
464 #define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx)
465 #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
466
467 #define EXTRA_CLOBBERS
468 #define VEXTRA_CLOBBERS
469 #else
470
471 #define PVOP_VCALL_ARGS \
472 unsigned long __edi = __edi, __esi = __esi, \
473 __edx = __edx, __ecx = __ecx, __eax = __eax;
474
475 #define PVOP_CALL_ARGS PVOP_VCALL_ARGS
476
477 #define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
478 #define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
479 #define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x))
480 #define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x))
481
482 #define PVOP_VCALL_CLOBBERS "=D" (__edi), \
483 "=S" (__esi), "=d" (__edx), \
484 "=c" (__ecx)
485 #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
486
487
488 #define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
489 #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
490
491 #define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
492 #define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
493 #endif
494
495 #ifdef CONFIG_PARAVIRT_DEBUG
496 #define PVOP_TEST_NULL(op) BUG_ON(pv_ops.op == NULL)
497 #else
498 #define PVOP_TEST_NULL(op) ((void)pv_ops.op)
499 #endif
500
501 #define PVOP_RETMASK(rettype) \
502 ({ unsigned long __mask = ~0UL; \
503 switch (sizeof(rettype)) { \
504 case 1: __mask = 0xffUL; break; \
505 case 2: __mask = 0xffffUL; break; \
506 case 4: __mask = 0xffffffffUL; break; \
507 default: break; \
508 } \
509 __mask; \
510 })
511
512
513 #define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \
514 pre, post, ...) \
515 ({ \
516 rettype __ret; \
517 PVOP_CALL_ARGS; \
518 PVOP_TEST_NULL(op); \
519 \
520 \
521 if (sizeof(rettype) > sizeof(unsigned long)) { \
522 asm volatile(pre \
523 paravirt_alt(PARAVIRT_CALL) \
524 post \
525 : call_clbr, ASM_CALL_CONSTRAINT \
526 : paravirt_type(op), \
527 paravirt_clobber(clbr), \
528 ##__VA_ARGS__ \
529 : "memory", "cc" extra_clbr); \
530 __ret = (rettype)((((u64)__edx) << 32) | __eax); \
531 } else { \
532 asm volatile(pre \
533 paravirt_alt(PARAVIRT_CALL) \
534 post \
535 : call_clbr, ASM_CALL_CONSTRAINT \
536 : paravirt_type(op), \
537 paravirt_clobber(clbr), \
538 ##__VA_ARGS__ \
539 : "memory", "cc" extra_clbr); \
540 __ret = (rettype)(__eax & PVOP_RETMASK(rettype)); \
541 } \
542 __ret; \
543 })
544
545 #define __PVOP_CALL(rettype, op, pre, post, ...) \
546 ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \
547 EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
548
549 #define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \
550 ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
551 PVOP_CALLEE_CLOBBERS, , \
552 pre, post, ##__VA_ARGS__)
553
554
555 #define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \
556 ({ \
557 PVOP_VCALL_ARGS; \
558 PVOP_TEST_NULL(op); \
559 asm volatile(pre \
560 paravirt_alt(PARAVIRT_CALL) \
561 post \
562 : call_clbr, ASM_CALL_CONSTRAINT \
563 : paravirt_type(op), \
564 paravirt_clobber(clbr), \
565 ##__VA_ARGS__ \
566 : "memory", "cc" extra_clbr); \
567 })
568
569 #define __PVOP_VCALL(op, pre, post, ...) \
570 ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \
571 VEXTRA_CLOBBERS, \
572 pre, post, ##__VA_ARGS__)
573
574 #define __PVOP_VCALLEESAVE(op, pre, post, ...) \
575 ____PVOP_VCALL(op.func, CLBR_RET_REG, \
576 PVOP_VCALLEE_CLOBBERS, , \
577 pre, post, ##__VA_ARGS__)
578
579
580
581 #define PVOP_CALL0(rettype, op) \
582 __PVOP_CALL(rettype, op, "", "")
583 #define PVOP_VCALL0(op) \
584 __PVOP_VCALL(op, "", "")
585
586 #define PVOP_CALLEE0(rettype, op) \
587 __PVOP_CALLEESAVE(rettype, op, "", "")
588 #define PVOP_VCALLEE0(op) \
589 __PVOP_VCALLEESAVE(op, "", "")
590
591
592 #define PVOP_CALL1(rettype, op, arg1) \
593 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
594 #define PVOP_VCALL1(op, arg1) \
595 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
596
597 #define PVOP_CALLEE1(rettype, op, arg1) \
598 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
599 #define PVOP_VCALLEE1(op, arg1) \
600 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
601
602
603 #define PVOP_CALL2(rettype, op, arg1, arg2) \
604 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
605 PVOP_CALL_ARG2(arg2))
606 #define PVOP_VCALL2(op, arg1, arg2) \
607 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
608 PVOP_CALL_ARG2(arg2))
609
610 #define PVOP_CALLEE2(rettype, op, arg1, arg2) \
611 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
612 PVOP_CALL_ARG2(arg2))
613 #define PVOP_VCALLEE2(op, arg1, arg2) \
614 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \
615 PVOP_CALL_ARG2(arg2))
616
617
618 #define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
619 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
620 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
621 #define PVOP_VCALL3(op, arg1, arg2, arg3) \
622 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
623 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
624
625
626 #ifdef CONFIG_X86_32
627 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
628 __PVOP_CALL(rettype, op, \
629 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
630 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
631 PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
632 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
633 __PVOP_VCALL(op, \
634 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
635 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
636 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
637 #else
638 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
639 __PVOP_CALL(rettype, op, "", "", \
640 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
641 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
642 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
643 __PVOP_VCALL(op, "", "", \
644 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
645 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
646 #endif
647
648
649 enum paravirt_lazy_mode {
650 PARAVIRT_LAZY_NONE,
651 PARAVIRT_LAZY_MMU,
652 PARAVIRT_LAZY_CPU,
653 };
654
655 enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
656 void paravirt_start_context_switch(struct task_struct *prev);
657 void paravirt_end_context_switch(struct task_struct *next);
658
659 void paravirt_enter_lazy_mmu(void);
660 void paravirt_leave_lazy_mmu(void);
661 void paravirt_flush_lazy_mmu(void);
662
663 void _paravirt_nop(void);
664 u64 _paravirt_ident_64(u64);
665
666 #define paravirt_nop ((void *)_paravirt_nop)
667
668
669 struct paravirt_patch_site {
670 u8 *instr;
671 u8 type;
672 u8 len;
673 };
674
675 extern struct paravirt_patch_site __parainstructions[],
676 __parainstructions_end[];
677
678 #endif
679
680 #endif