uindices 28 arch/arm/include/asm/kvm_coproc.h int kvm_arm_copy_msrindices(struct kvm_vcpu *vcpu, u64 __user *uindices); uindices 32 arch/arm/include/asm/kvm_coproc.h int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); uindices 282 arch/arm/include/asm/kvm_host.h int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); uindices 1064 arch/arm/kvm/coproc.c static int copy_vfp_regids(u64 __user *uindices) uindices 1072 arch/arm/kvm/coproc.c uindices)) uindices 1074 arch/arm/kvm/coproc.c uindices++; uindices 1078 arch/arm/kvm/coproc.c if (put_user(u32reg | vfp_sysregs[i], uindices)) uindices 1080 arch/arm/kvm/coproc.c uindices++; uindices 1189 arch/arm/kvm/coproc.c static int copy_vfp_regids(u64 __user *uindices) uindices 1275 arch/arm/kvm/coproc.c static int write_demux_regids(u64 __user *uindices) uindices 1284 arch/arm/kvm/coproc.c if (put_user(val | i, uindices)) uindices 1286 arch/arm/kvm/coproc.c uindices++; uindices 1378 arch/arm/kvm/coproc.c int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) uindices 1385 arch/arm/kvm/coproc.c if (put_user(cp15_to_index(&invariant_cp15[i]), uindices)) uindices 1387 arch/arm/kvm/coproc.c uindices++; uindices 1390 arch/arm/kvm/coproc.c err = walk_cp15(vcpu, uindices); uindices 1393 arch/arm/kvm/coproc.c uindices += err; uindices 1395 arch/arm/kvm/coproc.c err = copy_vfp_regids(uindices); uindices 1398 arch/arm/kvm/coproc.c uindices += err; uindices 1400 arch/arm/kvm/coproc.c return write_demux_regids(uindices); uindices 119 arch/arm/kvm/guest.c static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) uindices 121 arch/arm/kvm/guest.c if (put_user(KVM_REG_ARM_TIMER_CTL, uindices)) uindices 123 arch/arm/kvm/guest.c uindices++; uindices 124 arch/arm/kvm/guest.c if (put_user(KVM_REG_ARM_TIMER_CNT, uindices)) uindices 126 arch/arm/kvm/guest.c uindices++; uindices 127 arch/arm/kvm/guest.c if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices)) uindices 177 arch/arm/kvm/guest.c int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) uindices 184 arch/arm/kvm/guest.c if (put_user(core_reg | i, uindices)) uindices 186 arch/arm/kvm/guest.c uindices++; uindices 189 arch/arm/kvm/guest.c ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices); uindices 192 arch/arm/kvm/guest.c uindices += kvm_arm_get_fw_num_regs(vcpu); uindices 194 arch/arm/kvm/guest.c ret = copy_timer_indices(vcpu, uindices); uindices 197 arch/arm/kvm/guest.c uindices += NUM_TIMER_REGS; uindices 199 arch/arm/kvm/guest.c return kvm_arm_copy_coproc_indices(vcpu, uindices); uindices 41 arch/arm64/include/asm/kvm_coproc.h int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); uindices 454 arch/arm64/kvm/guest.c u64 __user *uindices) uindices 484 arch/arm64/kvm/guest.c if (uindices) { uindices 485 arch/arm64/kvm/guest.c if (put_user(reg, uindices)) uindices 487 arch/arm64/kvm/guest.c uindices++; uindices 518 arch/arm64/kvm/guest.c static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) uindices 520 arch/arm64/kvm/guest.c if (put_user(KVM_REG_ARM_TIMER_CTL, uindices)) uindices 522 arch/arm64/kvm/guest.c uindices++; uindices 523 arch/arm64/kvm/guest.c if (put_user(KVM_REG_ARM_TIMER_CNT, uindices)) uindices 525 arch/arm64/kvm/guest.c uindices++; uindices 526 arch/arm64/kvm/guest.c if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices)) uindices 569 arch/arm64/kvm/guest.c u64 __user *uindices) uindices 587 arch/arm64/kvm/guest.c if (put_user(reg, uindices++)) uindices 594 arch/arm64/kvm/guest.c if (put_user(reg, uindices++)) uindices 601 arch/arm64/kvm/guest.c if (put_user(reg, uindices++)) uindices 607 arch/arm64/kvm/guest.c if (put_user(reg, uindices++)) uindices 638 arch/arm64/kvm/guest.c int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) uindices 642 arch/arm64/kvm/guest.c ret = copy_core_reg_indices(vcpu, uindices); uindices 645 arch/arm64/kvm/guest.c uindices += ret; uindices 647 arch/arm64/kvm/guest.c ret = copy_sve_reg_indices(vcpu, uindices); uindices 650 arch/arm64/kvm/guest.c uindices += ret; uindices 652 arch/arm64/kvm/guest.c ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices); uindices 655 arch/arm64/kvm/guest.c uindices += kvm_arm_get_fw_num_regs(vcpu); uindices 657 arch/arm64/kvm/guest.c ret = copy_timer_indices(vcpu, uindices); uindices 660 arch/arm64/kvm/guest.c uindices += NUM_TIMER_REGS; uindices 662 arch/arm64/kvm/guest.c return kvm_arm_copy_sys_reg_indices(vcpu, uindices); uindices 2611 arch/arm64/kvm/sys_regs.c static int write_demux_regids(u64 __user *uindices) uindices 2620 arch/arm64/kvm/sys_regs.c if (put_user(val | i, uindices)) uindices 2622 arch/arm64/kvm/sys_regs.c uindices++; uindices 2715 arch/arm64/kvm/sys_regs.c int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) uindices 2722 arch/arm64/kvm/sys_regs.c if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices)) uindices 2724 arch/arm64/kvm/sys_regs.c uindices++; uindices 2727 arch/arm64/kvm/sys_regs.c err = walk_sys_regs(vcpu, uindices); uindices 2730 arch/arm64/kvm/sys_regs.c uindices += err; uindices 2732 arch/arm64/kvm/sys_regs.c return write_demux_regids(uindices); uindices 48 include/kvm/arm_psci.h int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); uindices 444 virt/kvm/arm/psci.c int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) uindices 446 virt/kvm/arm/psci.c if (put_user(KVM_REG_ARM_PSCI_VERSION, uindices++)) uindices 449 virt/kvm/arm/psci.c if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1, uindices++)) uindices 452 virt/kvm/arm/psci.c if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2, uindices++))