pv_ops             18 arch/arm/include/asm/paravirt.h extern struct paravirt_patch_template pv_ops;
pv_ops             22 arch/arm/include/asm/paravirt.h 	return pv_ops.time.steal_clock(cpu);
pv_ops             17 arch/arm/kernel/paravirt.c struct paravirt_patch_template pv_ops;
pv_ops             18 arch/arm/kernel/paravirt.c EXPORT_SYMBOL_GPL(pv_ops);
pv_ops             18 arch/arm64/include/asm/paravirt.h extern struct paravirt_patch_template pv_ops;
pv_ops             22 arch/arm64/include/asm/paravirt.h 	return pv_ops.time.steal_clock(cpu);
pv_ops             17 arch/arm64/kernel/paravirt.c struct paravirt_patch_template pv_ops;
pv_ops             18 arch/arm64/kernel/paravirt.c EXPORT_SYMBOL_GPL(pv_ops);
pv_ops             76 arch/x86/hyperv/hv_spinlock.c 	pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
pv_ops             77 arch/x86/hyperv/hv_spinlock.c 	pv_ops.lock.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
pv_ops             78 arch/x86/hyperv/hv_spinlock.c 	pv_ops.lock.wait = hv_qlock_wait;
pv_ops             79 arch/x86/hyperv/hv_spinlock.c 	pv_ops.lock.kick = hv_qlock_kick;
pv_ops             80 arch/x86/hyperv/hv_spinlock.c 	pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(hv_vcpu_is_preempted);
pv_ops            236 arch/x86/hyperv/mmu.c 	pv_ops.mmu.flush_tlb_others = hyperv_flush_tlb_others;
pv_ops            237 arch/x86/hyperv/mmu.c 	pv_ops.mmu.tlb_remove_table = tlb_remove_table;
pv_ops             42 arch/x86/include/asm/paravirt.h 	pv_ops.cpu.io_delay();
pv_ops             44 arch/x86/include/asm/paravirt.h 	pv_ops.cpu.io_delay();
pv_ops             45 arch/x86/include/asm/paravirt.h 	pv_ops.cpu.io_delay();
pv_ops             46 arch/x86/include/asm/paravirt.h 	pv_ops.cpu.io_delay();
pv_ops            429 arch/x86/include/asm/paravirt.h 		pv_ops.mmu.ptep_modify_prot_commit(vma, addr, ptep, pte);
pv_ops            448 arch/x86/include/asm/paravirt.h 		pv_ops.mmu.set_pte_at(mm, addr, ptep, pte);
pv_ops            633 arch/x86/include/asm/paravirt.h 	pv_ops.mmu.set_fixmap(idx, phys, flags);
pv_ops            863 arch/x86/include/asm/paravirt.h 		  jmp PARA_INDIRECT(pv_ops+PV_CPU_iret);)
pv_ops            869 arch/x86/include/asm/paravirt.h 		  call PARA_INDIRECT(pv_ops+PV_IRQ_irq_disable);	\
pv_ops            876 arch/x86/include/asm/paravirt.h 		  call PARA_INDIRECT(pv_ops+PV_IRQ_irq_enable);		\
pv_ops            899 arch/x86/include/asm/paravirt.h 		  call PARA_INDIRECT(pv_ops+PV_CPU_swapgs);		\
pv_ops            905 arch/x86/include/asm/paravirt.h 		  jmp PARA_INDIRECT(pv_ops+PV_CPU_usergs_sysret64);)
pv_ops            912 arch/x86/include/asm/paravirt.h 		  call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl);	    \
pv_ops            923 arch/x86/include/asm/paravirt.h 		  call PARA_INDIRECT(pv_ops+PV_MMU_read_cr2);		\
pv_ops            336 arch/x86/include/asm/paravirt_types.h extern struct paravirt_patch_template pv_ops;
pv_ops            343 arch/x86/include/asm/paravirt_types.h 	[paravirt_opptr] "i" (&(pv_ops.op))
pv_ops            496 arch/x86/include/asm/paravirt_types.h #define PVOP_TEST_NULL(op)	BUG_ON(pv_ops.op == NULL)
pv_ops            498 arch/x86/include/asm/paravirt_types.h #define PVOP_TEST_NULL(op)	((void)pv_ops.op)
pv_ops            605 arch/x86/kernel/alternative.c 		used = pv_ops.init.patch(p->type, insn_buff, (unsigned long)p->instr, p->len);
pv_ops           1420 arch/x86/kernel/cpu/common.c 		if (pv_ops.cpu.iret == native_iret)
pv_ops            364 arch/x86/kernel/cpu/mshyperv.c 	pv_ops.time.sched_clock = sched_clock;
pv_ops            135 arch/x86/kernel/cpu/vmware.c 	pv_ops.time.sched_clock = vmware_sched_clock;
pv_ops            142 arch/x86/kernel/cpu/vmware.c 	pv_ops.cpu.io_delay = paravirt_nop;
pv_ops            273 arch/x86/kernel/kvm.c 		pv_ops.cpu.io_delay = kvm_io_delay;
pv_ops            622 arch/x86/kernel/kvm.c 		pv_ops.time.steal_clock = kvm_steal_clock;
pv_ops            628 arch/x86/kernel/kvm.c 		pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
pv_ops            629 arch/x86/kernel/kvm.c 		pv_ops.mmu.tlb_remove_table = tlb_remove_table;
pv_ops            846 arch/x86/kernel/kvm.c 	pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
pv_ops            847 arch/x86/kernel/kvm.c 	pv_ops.lock.queued_spin_unlock =
pv_ops            849 arch/x86/kernel/kvm.c 	pv_ops.lock.wait = kvm_wait;
pv_ops            850 arch/x86/kernel/kvm.c 	pv_ops.lock.kick = kvm_kick_cpu;
pv_ops            853 arch/x86/kernel/kvm.c 		pv_ops.lock.vcpu_is_preempted =
pv_ops            110 arch/x86/kernel/kvmclock.c 	pv_ops.time.sched_clock = kvm_sched_clock_read;
pv_ops             20 arch/x86/kernel/paravirt-spinlocks.c 	return pv_ops.lock.queued_spin_unlock.func ==
pv_ops             32 arch/x86/kernel/paravirt-spinlocks.c 	return pv_ops.lock.vcpu_is_preempted.func ==
pv_ops            123 arch/x86/kernel/paravirt.c 	void *opfunc = *((void **)&pv_ops + type);
pv_ops            296 arch/x86/kernel/paravirt.c struct paravirt_patch_template pv_ops = {
pv_ops            458 arch/x86/kernel/paravirt.c EXPORT_SYMBOL(pv_ops);
pv_ops            250 arch/x86/kernel/tsc.c 	return pv_ops.time.sched_clock == native_sched_clock;
pv_ops            993 arch/x86/xen/enlighten_pv.c 		pv_ops.irq.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
pv_ops            994 arch/x86/xen/enlighten_pv.c 		pv_ops.irq.restore_fl =
pv_ops            996 arch/x86/xen/enlighten_pv.c 		pv_ops.irq.irq_disable =
pv_ops            998 arch/x86/xen/enlighten_pv.c 		pv_ops.irq.irq_enable =
pv_ops           1000 arch/x86/xen/enlighten_pv.c 		pv_ops.mmu.read_cr2 =
pv_ops           1171 arch/x86/xen/enlighten_pv.c 	pv_ops.cpu.write_gdt_entry = xen_write_gdt_entry_boot;
pv_ops           1172 arch/x86/xen/enlighten_pv.c 	pv_ops.cpu.load_gdt = xen_load_gdt_boot;
pv_ops           1177 arch/x86/xen/enlighten_pv.c 	pv_ops.cpu.write_gdt_entry = xen_write_gdt_entry;
pv_ops           1178 arch/x86/xen/enlighten_pv.c 	pv_ops.cpu.load_gdt = xen_load_gdt;
pv_ops           1203 arch/x86/xen/enlighten_pv.c 	pv_ops.init.patch = paravirt_patch_default;
pv_ops           1204 arch/x86/xen/enlighten_pv.c 	pv_ops.cpu = xen_cpu_ops;
pv_ops           1274 arch/x86/xen/enlighten_pv.c 		pv_ops.mmu.ptep_modify_prot_start =
pv_ops           1276 arch/x86/xen/enlighten_pv.c 		pv_ops.mmu.ptep_modify_prot_commit =
pv_ops            131 arch/x86/xen/irq.c 	pv_ops.irq = xen_irq_ops;
pv_ops             76 arch/x86/xen/mmu_hvm.c 		pv_ops.mmu.exit_mmap = xen_hvm_exit_mmap;
pv_ops           2210 arch/x86/xen/mmu_pv.c 	pv_ops.mmu.write_cr3 = &xen_write_cr3;
pv_ops           2357 arch/x86/xen/mmu_pv.c 	pv_ops.mmu.set_pte = xen_set_pte;
pv_ops           2358 arch/x86/xen/mmu_pv.c 	pv_ops.mmu.set_pmd = xen_set_pmd;
pv_ops           2359 arch/x86/xen/mmu_pv.c 	pv_ops.mmu.set_pud = xen_set_pud;
pv_ops           2361 arch/x86/xen/mmu_pv.c 	pv_ops.mmu.set_p4d = xen_set_p4d;
pv_ops           2366 arch/x86/xen/mmu_pv.c 	pv_ops.mmu.alloc_pte = xen_alloc_pte;
pv_ops           2367 arch/x86/xen/mmu_pv.c 	pv_ops.mmu.alloc_pmd = xen_alloc_pmd;
pv_ops           2368 arch/x86/xen/mmu_pv.c 	pv_ops.mmu.release_pte = xen_release_pte;
pv_ops           2369 arch/x86/xen/mmu_pv.c 	pv_ops.mmu.release_pmd = xen_release_pmd;
pv_ops           2371 arch/x86/xen/mmu_pv.c 	pv_ops.mmu.alloc_pud = xen_alloc_pud;
pv_ops           2372 arch/x86/xen/mmu_pv.c 	pv_ops.mmu.release_pud = xen_release_pud;
pv_ops           2374 arch/x86/xen/mmu_pv.c 	pv_ops.mmu.make_pte = PV_CALLEE_SAVE(xen_make_pte);
pv_ops           2377 arch/x86/xen/mmu_pv.c 	pv_ops.mmu.write_cr3 = &xen_write_cr3;
pv_ops           2465 arch/x86/xen/mmu_pv.c 	pv_ops.mmu = xen_mmu_ops;
pv_ops            130 arch/x86/xen/spinlock.c 	pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
pv_ops            131 arch/x86/xen/spinlock.c 	pv_ops.lock.queued_spin_unlock =
pv_ops            133 arch/x86/xen/spinlock.c 	pv_ops.lock.wait = xen_qlock_wait;
pv_ops            134 arch/x86/xen/spinlock.c 	pv_ops.lock.kick = xen_qlock_kick;
pv_ops            135 arch/x86/xen/spinlock.c 	pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen);
pv_ops            522 arch/x86/xen/time.c 	pv_ops.time = xen_time_ops;
pv_ops            564 arch/x86/xen/time.c 	pv_ops.time = xen_time_ops;
pv_ops            178 drivers/xen/time.c 	pv_ops.time.steal_clock = xen_steal_clock;