Lines Matching refs:vc

169 	struct kvmppc_vcore *vc = vcpu->arch.vcore;  in kvmppc_core_vcpu_load_hv()  local
178 if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) { in kvmppc_core_vcpu_load_hv()
179 spin_lock_irqsave(&vc->stoltb_lock, flags); in kvmppc_core_vcpu_load_hv()
180 if (vc->preempt_tb != TB_NIL) { in kvmppc_core_vcpu_load_hv()
181 vc->stolen_tb += mftb() - vc->preempt_tb; in kvmppc_core_vcpu_load_hv()
182 vc->preempt_tb = TB_NIL; in kvmppc_core_vcpu_load_hv()
184 spin_unlock_irqrestore(&vc->stoltb_lock, flags); in kvmppc_core_vcpu_load_hv()
197 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_put_hv() local
200 if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) { in kvmppc_core_vcpu_put_hv()
201 spin_lock_irqsave(&vc->stoltb_lock, flags); in kvmppc_core_vcpu_put_hv()
202 vc->preempt_tb = mftb(); in kvmppc_core_vcpu_put_hv()
203 spin_unlock_irqrestore(&vc->stoltb_lock, flags); in kvmppc_core_vcpu_put_hv()
231 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_set_arch_compat() local
260 spin_lock(&vc->lock); in kvmppc_set_arch_compat()
261 vc->arch_compat = arch_compat; in kvmppc_set_arch_compat()
262 vc->pcr = pcr; in kvmppc_set_arch_compat()
263 spin_unlock(&vc->lock); in kvmppc_set_arch_compat()
542 static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now) in vcore_stolen_time() argument
547 spin_lock_irqsave(&vc->stoltb_lock, flags); in vcore_stolen_time()
548 p = vc->stolen_tb; in vcore_stolen_time()
549 if (vc->vcore_state != VCORE_INACTIVE && in vcore_stolen_time()
550 vc->preempt_tb != TB_NIL) in vcore_stolen_time()
551 p += now - vc->preempt_tb; in vcore_stolen_time()
552 spin_unlock_irqrestore(&vc->stoltb_lock, flags); in vcore_stolen_time()
557 struct kvmppc_vcore *vc) in kvmppc_create_dtl_entry() argument
568 core_stolen = vcore_stolen_time(vc, now); in kvmppc_create_dtl_entry()
579 dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid); in kvmppc_create_dtl_entry()
580 dt->timebase = cpu_to_be64(now + vc->tb_offset); in kvmppc_create_dtl_entry()
984 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_set_lpcr() local
988 spin_lock(&vc->lock); in kvmppc_set_lpcr()
993 if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) { in kvmppc_set_lpcr()
998 if (vcpu->arch.vcore != vc) in kvmppc_set_lpcr()
1019 vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); in kvmppc_set_lpcr()
1020 spin_unlock(&vc->lock); in kvmppc_set_lpcr()
1731 static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, in kvmppc_remove_runnable() argument
1740 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - in kvmppc_remove_runnable()
1745 --vc->n_runnable; in kvmppc_remove_runnable()
1795 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_start_thread() local
1801 cpu = vc->pcpu + vcpu->arch.ptid; in kvmppc_start_thread()
1803 tpaca->kvm_hstate.kvm_vcore = vc; in kvmppc_start_thread()
1805 vcpu->cpu = vc->pcpu; in kvmppc_start_thread()
1872 static void kvmppc_start_saving_l2_cache(struct kvmppc_vcore *vc) in kvmppc_start_saving_l2_cache() argument
1876 phy_addr = (phys_addr_t)virt_to_phys(vc->mpp_buffer); in kvmppc_start_saving_l2_cache()
1882 vc->mpp_buffer_is_valid = true; in kvmppc_start_saving_l2_cache()
1885 static void kvmppc_start_restoring_l2_cache(const struct kvmppc_vcore *vc) in kvmppc_start_restoring_l2_cache() argument
1889 phy_addr = virt_to_phys(vc->mpp_buffer); in kvmppc_start_restoring_l2_cache()
1899 static void prepare_threads(struct kvmppc_vcore *vc) in prepare_threads() argument
1903 list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads, in prepare_threads()
1913 kvmppc_remove_runnable(vc, vcpu); in prepare_threads()
1918 static void post_guest_process(struct kvmppc_vcore *vc) in post_guest_process() argument
1925 list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads, in post_guest_process()
1949 kvmppc_remove_runnable(vc, vcpu); in post_guest_process()
1959 static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) in kvmppc_run_core() argument
1969 prepare_threads(vc); in kvmppc_run_core()
1972 if (vc->runner->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_run_core()
1978 vc->entry_exit_map = 0; in kvmppc_run_core()
1979 vc->preempt_tb = TB_NIL; in kvmppc_run_core()
1980 vc->in_guest = 0; in kvmppc_run_core()
1981 vc->napping_threads = 0; in kvmppc_run_core()
1982 vc->conferring_threads = 0; in kvmppc_run_core()
1990 ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) { in kvmppc_run_core()
1991 list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads, in kvmppc_run_core()
1994 kvmppc_remove_runnable(vc, vcpu); in kvmppc_run_core()
2001 vc->pcpu = smp_processor_id(); in kvmppc_run_core()
2002 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { in kvmppc_run_core()
2004 kvmppc_create_dtl_entry(vcpu, vc); in kvmppc_run_core()
2009 get_paca()->kvm_hstate.kvm_vcore = vc; in kvmppc_run_core()
2012 vc->vcore_state = VCORE_RUNNING; in kvmppc_run_core()
2015 trace_kvmppc_run_core(vc, 0); in kvmppc_run_core()
2017 spin_unlock(&vc->lock); in kvmppc_run_core()
2021 srcu_idx = srcu_read_lock(&vc->kvm->srcu); in kvmppc_run_core()
2023 if (vc->mpp_buffer_is_valid) in kvmppc_run_core()
2024 kvmppc_start_restoring_l2_cache(vc); in kvmppc_run_core()
2028 spin_lock(&vc->lock); in kvmppc_run_core()
2030 if (vc->mpp_buffer) in kvmppc_run_core()
2031 kvmppc_start_saving_l2_cache(vc); in kvmppc_run_core()
2034 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) in kvmppc_run_core()
2039 kvmppc_release_hwthread(vc->pcpu + i); in kvmppc_run_core()
2041 vc->vcore_state = VCORE_EXITING; in kvmppc_run_core()
2042 spin_unlock(&vc->lock); in kvmppc_run_core()
2044 srcu_read_unlock(&vc->kvm->srcu, srcu_idx); in kvmppc_run_core()
2052 spin_lock(&vc->lock); in kvmppc_run_core()
2053 post_guest_process(vc); in kvmppc_run_core()
2056 vc->vcore_state = VCORE_INACTIVE; in kvmppc_run_core()
2057 trace_kvmppc_run_core(vc, 1); in kvmppc_run_core()
2078 static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc) in kvmppc_vcore_blocked() argument
2085 prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE); in kvmppc_vcore_blocked()
2091 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { in kvmppc_vcore_blocked()
2099 finish_wait(&vc->wq, &wait); in kvmppc_vcore_blocked()
2103 vc->vcore_state = VCORE_SLEEPING; in kvmppc_vcore_blocked()
2104 trace_kvmppc_vcore_blocked(vc, 0); in kvmppc_vcore_blocked()
2105 spin_unlock(&vc->lock); in kvmppc_vcore_blocked()
2107 finish_wait(&vc->wq, &wait); in kvmppc_vcore_blocked()
2108 spin_lock(&vc->lock); in kvmppc_vcore_blocked()
2109 vc->vcore_state = VCORE_INACTIVE; in kvmppc_vcore_blocked()
2110 trace_kvmppc_vcore_blocked(vc, 1); in kvmppc_vcore_blocked()
2116 struct kvmppc_vcore *vc; in kvmppc_run_vcpu() local
2129 vc = vcpu->arch.vcore; in kvmppc_run_vcpu()
2130 spin_lock(&vc->lock); in kvmppc_run_vcpu()
2134 vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); in kvmppc_run_vcpu()
2137 list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads); in kvmppc_run_vcpu()
2138 ++vc->n_runnable; in kvmppc_run_vcpu()
2146 if (vc->vcore_state == VCORE_RUNNING && !VCORE_IS_EXITING(vc)) { in kvmppc_run_vcpu()
2147 kvmppc_create_dtl_entry(vcpu, vc); in kvmppc_run_vcpu()
2150 } else if (vc->vcore_state == VCORE_SLEEPING) { in kvmppc_run_vcpu()
2151 wake_up(&vc->wq); in kvmppc_run_vcpu()
2158 if (vc->vcore_state != VCORE_INACTIVE) { in kvmppc_run_vcpu()
2159 spin_unlock(&vc->lock); in kvmppc_run_vcpu()
2161 spin_lock(&vc->lock); in kvmppc_run_vcpu()
2164 list_for_each_entry_safe(v, vn, &vc->runnable_threads, in kvmppc_run_vcpu()
2168 kvmppc_remove_runnable(vc, v); in kvmppc_run_vcpu()
2175 if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_run_vcpu()
2178 list_for_each_entry(v, &vc->runnable_threads, arch.run_list) { in kvmppc_run_vcpu()
2184 vc->runner = vcpu; in kvmppc_run_vcpu()
2185 if (n_ceded == vc->n_runnable) { in kvmppc_run_vcpu()
2186 kvmppc_vcore_blocked(vc); in kvmppc_run_vcpu()
2188 vc->vcore_state = VCORE_PREEMPT; in kvmppc_run_vcpu()
2190 cond_resched_lock(&vc->lock); in kvmppc_run_vcpu()
2191 vc->vcore_state = VCORE_INACTIVE; in kvmppc_run_vcpu()
2193 kvmppc_run_core(vc); in kvmppc_run_vcpu()
2195 vc->runner = NULL; in kvmppc_run_vcpu()
2199 (vc->vcore_state == VCORE_RUNNING || in kvmppc_run_vcpu()
2200 vc->vcore_state == VCORE_EXITING)) { in kvmppc_run_vcpu()
2201 spin_unlock(&vc->lock); in kvmppc_run_vcpu()
2203 spin_lock(&vc->lock); in kvmppc_run_vcpu()
2207 kvmppc_remove_runnable(vc, vcpu); in kvmppc_run_vcpu()
2213 if (vc->n_runnable && vc->vcore_state == VCORE_INACTIVE) { in kvmppc_run_vcpu()
2215 v = list_first_entry(&vc->runnable_threads, in kvmppc_run_vcpu()
2221 spin_unlock(&vc->lock); in kvmppc_run_vcpu()
2422 struct kvmppc_vcore *vc = kvm->arch.vcores[i]; in kvmppc_update_lpcr() local
2423 if (!vc) in kvmppc_update_lpcr()
2425 spin_lock(&vc->lock); in kvmppc_update_lpcr()
2426 vc->lpcr = (vc->lpcr & ~mask) | lpcr; in kvmppc_update_lpcr()
2427 spin_unlock(&vc->lock); in kvmppc_update_lpcr()
2577 struct kvmppc_vcore *vc = kvm->arch.vcores[i]; in kvmppc_free_vcores() local
2578 free_pages((unsigned long)vc->mpp_buffer, in kvmppc_free_vcores()