Lines Matching refs:vc
167 static void kvmppc_core_start_stolen(struct kvmppc_vcore *vc) in kvmppc_core_start_stolen() argument
171 spin_lock_irqsave(&vc->stoltb_lock, flags); in kvmppc_core_start_stolen()
172 vc->preempt_tb = mftb(); in kvmppc_core_start_stolen()
173 spin_unlock_irqrestore(&vc->stoltb_lock, flags); in kvmppc_core_start_stolen()
176 static void kvmppc_core_end_stolen(struct kvmppc_vcore *vc) in kvmppc_core_end_stolen() argument
180 spin_lock_irqsave(&vc->stoltb_lock, flags); in kvmppc_core_end_stolen()
181 if (vc->preempt_tb != TB_NIL) { in kvmppc_core_end_stolen()
182 vc->stolen_tb += mftb() - vc->preempt_tb; in kvmppc_core_end_stolen()
183 vc->preempt_tb = TB_NIL; in kvmppc_core_end_stolen()
185 spin_unlock_irqrestore(&vc->stoltb_lock, flags); in kvmppc_core_end_stolen()
190 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_load_hv() local
199 if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING) in kvmppc_core_vcpu_load_hv()
200 kvmppc_core_end_stolen(vc); in kvmppc_core_vcpu_load_hv()
213 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_put_hv() local
216 if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING) in kvmppc_core_vcpu_put_hv()
217 kvmppc_core_start_stolen(vc); in kvmppc_core_vcpu_put_hv()
245 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_set_arch_compat() local
274 spin_lock(&vc->lock); in kvmppc_set_arch_compat()
275 vc->arch_compat = arch_compat; in kvmppc_set_arch_compat()
276 vc->pcr = pcr; in kvmppc_set_arch_compat()
277 spin_unlock(&vc->lock); in kvmppc_set_arch_compat()
556 static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now) in vcore_stolen_time() argument
561 spin_lock_irqsave(&vc->stoltb_lock, flags); in vcore_stolen_time()
562 p = vc->stolen_tb; in vcore_stolen_time()
563 if (vc->vcore_state != VCORE_INACTIVE && in vcore_stolen_time()
564 vc->preempt_tb != TB_NIL) in vcore_stolen_time()
565 p += now - vc->preempt_tb; in vcore_stolen_time()
566 spin_unlock_irqrestore(&vc->stoltb_lock, flags); in vcore_stolen_time()
571 struct kvmppc_vcore *vc) in kvmppc_create_dtl_entry() argument
582 core_stolen = vcore_stolen_time(vc, now); in kvmppc_create_dtl_entry()
593 dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid); in kvmppc_create_dtl_entry()
594 dt->timebase = cpu_to_be64(now + vc->tb_offset); in kvmppc_create_dtl_entry()
999 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_set_lpcr() local
1003 spin_lock(&vc->lock); in kvmppc_set_lpcr()
1008 if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) { in kvmppc_set_lpcr()
1013 if (vcpu->arch.vcore != vc) in kvmppc_set_lpcr()
1034 vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); in kvmppc_set_lpcr()
1035 spin_unlock(&vc->lock); in kvmppc_set_lpcr()
1741 static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, in kvmppc_remove_runnable() argument
1750 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - in kvmppc_remove_runnable()
1755 --vc->n_runnable; in kvmppc_remove_runnable()
1804 static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc) in kvmppc_start_thread() argument
1808 struct kvmppc_vcore *mvc = vc->master_vcore; in kvmppc_start_thread()
1810 cpu = vc->pcpu; in kvmppc_start_thread()
1912 static void kvmppc_vcore_preempt(struct kvmppc_vcore *vc) in kvmppc_vcore_preempt() argument
1916 vc->vcore_state = VCORE_PREEMPT; in kvmppc_vcore_preempt()
1917 vc->pcpu = smp_processor_id(); in kvmppc_vcore_preempt()
1918 if (vc->num_threads < threads_per_subcore) { in kvmppc_vcore_preempt()
1920 list_add_tail(&vc->preempt_list, &lp->list); in kvmppc_vcore_preempt()
1925 kvmppc_core_start_stolen(vc); in kvmppc_vcore_preempt()
1928 static void kvmppc_vcore_end_preempt(struct kvmppc_vcore *vc) in kvmppc_vcore_end_preempt() argument
1932 kvmppc_core_end_stolen(vc); in kvmppc_vcore_end_preempt()
1933 if (!list_empty(&vc->preempt_list)) { in kvmppc_vcore_end_preempt()
1934 lp = &per_cpu(preempted_vcores, vc->pcpu); in kvmppc_vcore_end_preempt()
1936 list_del_init(&vc->preempt_list); in kvmppc_vcore_end_preempt()
1939 vc->vcore_state = VCORE_INACTIVE; in kvmppc_vcore_end_preempt()
1961 static void init_core_info(struct core_info *cip, struct kvmppc_vcore *vc) in init_core_info() argument
1967 cip->max_subcore_threads = vc->num_threads; in init_core_info()
1968 cip->total_threads = vc->num_threads; in init_core_info()
1969 cip->subcore_threads[0] = vc->num_threads; in init_core_info()
1970 cip->subcore_vm[0] = vc->kvm; in init_core_info()
1973 list_add_tail(&vc->preempt_list, &cip->vcs[0]); in init_core_info()
1993 static void init_master_vcore(struct kvmppc_vcore *vc) in init_master_vcore() argument
1995 vc->master_vcore = vc; in init_master_vcore()
1996 vc->entry_exit_map = 0; in init_master_vcore()
1997 vc->in_guest = 0; in init_master_vcore()
1998 vc->napping_threads = 0; in init_master_vcore()
1999 vc->conferring_threads = 0; in init_master_vcore()
2013 struct kvmppc_vcore *vc, *vcnext; in can_split_piggybacked_subcores() local
2022 vc = list_first_entry(&cip->vcs[sub], struct kvmppc_vcore, in can_split_piggybacked_subcores()
2024 if (vc->num_threads > 2) in can_split_piggybacked_subcores()
2039 list_for_each_entry_safe(vc, vcnext, &cip->vcs[sub], preempt_list) { in can_split_piggybacked_subcores()
2041 list_del(&vc->preempt_list); in can_split_piggybacked_subcores()
2042 list_add_tail(&vc->preempt_list, &cip->vcs[new_sub]); in can_split_piggybacked_subcores()
2045 cip->subcore_vm[new_sub] = vc->kvm; in can_split_piggybacked_subcores()
2046 init_master_vcore(vc); in can_split_piggybacked_subcores()
2047 master_vc = vc; in can_split_piggybacked_subcores()
2050 vc->master_vcore = master_vc; in can_split_piggybacked_subcores()
2054 thr += vc->num_threads; in can_split_piggybacked_subcores()
2062 static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip) in can_dynamic_split() argument
2064 int n_threads = vc->num_threads; in can_dynamic_split()
2075 vc->num_threads <= 2) { in can_dynamic_split()
2092 cip->total_threads += vc->num_threads; in can_dynamic_split()
2093 cip->subcore_threads[sub] = vc->num_threads; in can_dynamic_split()
2094 cip->subcore_vm[sub] = vc->kvm; in can_dynamic_split()
2095 init_master_vcore(vc); in can_dynamic_split()
2096 list_del(&vc->preempt_list); in can_dynamic_split()
2097 list_add_tail(&vc->preempt_list, &cip->vcs[sub]); in can_dynamic_split()
2105 struct kvmppc_vcore *vc; in can_piggyback_subcore() local
2108 vc = list_first_entry(&cip->vcs[sub], struct kvmppc_vcore, in can_piggyback_subcore()
2112 if (pvc->kvm != vc->kvm || in can_piggyback_subcore()
2113 pvc->tb_offset != vc->tb_offset || in can_piggyback_subcore()
2114 pvc->pcr != vc->pcr || in can_piggyback_subcore()
2115 pvc->lpcr != vc->lpcr) in can_piggyback_subcore()
2120 (vc->num_threads > 1 || pvc->num_threads > 1)) in can_piggyback_subcore()
2132 pvc->master_vcore = vc; in can_piggyback_subcore()
2161 static void prepare_threads(struct kvmppc_vcore *vc) in prepare_threads() argument
2165 list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads, in prepare_threads()
2175 kvmppc_remove_runnable(vc, vcpu); in prepare_threads()
2211 static void post_guest_process(struct kvmppc_vcore *vc, bool is_master) in post_guest_process() argument
2218 spin_lock(&vc->lock); in post_guest_process()
2220 list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads, in post_guest_process()
2245 kvmppc_remove_runnable(vc, vcpu); in post_guest_process()
2249 list_del_init(&vc->preempt_list); in post_guest_process()
2252 kvmppc_vcore_preempt(vc); in post_guest_process()
2253 } else if (vc->runner) { in post_guest_process()
2254 vc->vcore_state = VCORE_PREEMPT; in post_guest_process()
2255 kvmppc_core_start_stolen(vc); in post_guest_process()
2257 vc->vcore_state = VCORE_INACTIVE; in post_guest_process()
2259 if (vc->n_runnable > 0 && vc->runner == NULL) { in post_guest_process()
2261 vcpu = list_first_entry(&vc->runnable_threads, in post_guest_process()
2266 spin_unlock(&vc->lock); in post_guest_process()
2273 static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) in kvmppc_run_core() argument
2292 prepare_threads(vc); in kvmppc_run_core()
2295 if (vc->runner->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_run_core()
2301 init_master_vcore(vc); in kvmppc_run_core()
2302 vc->preempt_tb = TB_NIL; in kvmppc_run_core()
2310 ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) { in kvmppc_run_core()
2311 list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads, in kvmppc_run_core()
2314 kvmppc_remove_runnable(vc, vcpu); in kvmppc_run_core()
2324 init_core_info(&core_info, vc); in kvmppc_run_core()
2329 if (vc->num_threads < target_threads) in kvmppc_run_core()
2426 vc->vcore_state = VCORE_RUNNING; in kvmppc_run_core()
2429 trace_kvmppc_run_core(vc, 0); in kvmppc_run_core()
2437 srcu_idx = srcu_read_lock(&vc->kvm->srcu); in kvmppc_run_core()
2441 srcu_read_unlock(&vc->kvm->srcu, srcu_idx); in kvmppc_run_core()
2443 spin_lock(&vc->lock); in kvmppc_run_core()
2445 vc->vcore_state = VCORE_EXITING; in kvmppc_run_core()
2477 spin_unlock(&vc->lock); in kvmppc_run_core()
2486 post_guest_process(pvc, pvc == vc); in kvmppc_run_core()
2488 spin_lock(&vc->lock); in kvmppc_run_core()
2492 vc->vcore_state = VCORE_INACTIVE; in kvmppc_run_core()
2493 trace_kvmppc_run_core(vc, 1); in kvmppc_run_core()
2500 static void kvmppc_wait_for_exec(struct kvmppc_vcore *vc, in kvmppc_wait_for_exec() argument
2507 spin_unlock(&vc->lock); in kvmppc_wait_for_exec()
2509 spin_lock(&vc->lock); in kvmppc_wait_for_exec()
2518 static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc) in kvmppc_vcore_blocked() argument
2525 prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE); in kvmppc_vcore_blocked()
2531 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { in kvmppc_vcore_blocked()
2539 finish_wait(&vc->wq, &wait); in kvmppc_vcore_blocked()
2543 vc->vcore_state = VCORE_SLEEPING; in kvmppc_vcore_blocked()
2544 trace_kvmppc_vcore_blocked(vc, 0); in kvmppc_vcore_blocked()
2545 spin_unlock(&vc->lock); in kvmppc_vcore_blocked()
2547 finish_wait(&vc->wq, &wait); in kvmppc_vcore_blocked()
2548 spin_lock(&vc->lock); in kvmppc_vcore_blocked()
2549 vc->vcore_state = VCORE_INACTIVE; in kvmppc_vcore_blocked()
2550 trace_kvmppc_vcore_blocked(vc, 1); in kvmppc_vcore_blocked()
2556 struct kvmppc_vcore *vc; in kvmppc_run_vcpu() local
2569 vc = vcpu->arch.vcore; in kvmppc_run_vcpu()
2570 spin_lock(&vc->lock); in kvmppc_run_vcpu()
2574 vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); in kvmppc_run_vcpu()
2577 list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads); in kvmppc_run_vcpu()
2578 ++vc->n_runnable; in kvmppc_run_vcpu()
2586 if (vc->vcore_state == VCORE_PIGGYBACK) { in kvmppc_run_vcpu()
2587 struct kvmppc_vcore *mvc = vc->master_vcore; in kvmppc_run_vcpu()
2591 kvmppc_create_dtl_entry(vcpu, vc); in kvmppc_run_vcpu()
2592 kvmppc_start_thread(vcpu, vc); in kvmppc_run_vcpu()
2597 } else if (vc->vcore_state == VCORE_RUNNING && in kvmppc_run_vcpu()
2598 !VCORE_IS_EXITING(vc)) { in kvmppc_run_vcpu()
2599 kvmppc_create_dtl_entry(vcpu, vc); in kvmppc_run_vcpu()
2600 kvmppc_start_thread(vcpu, vc); in kvmppc_run_vcpu()
2602 } else if (vc->vcore_state == VCORE_SLEEPING) { in kvmppc_run_vcpu()
2603 wake_up(&vc->wq); in kvmppc_run_vcpu()
2610 if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL) in kvmppc_run_vcpu()
2611 kvmppc_vcore_end_preempt(vc); in kvmppc_run_vcpu()
2613 if (vc->vcore_state != VCORE_INACTIVE) { in kvmppc_run_vcpu()
2614 kvmppc_wait_for_exec(vc, vcpu, TASK_INTERRUPTIBLE); in kvmppc_run_vcpu()
2617 list_for_each_entry_safe(v, vn, &vc->runnable_threads, in kvmppc_run_vcpu()
2621 kvmppc_remove_runnable(vc, v); in kvmppc_run_vcpu()
2628 if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_run_vcpu()
2631 list_for_each_entry(v, &vc->runnable_threads, arch.run_list) { in kvmppc_run_vcpu()
2637 vc->runner = vcpu; in kvmppc_run_vcpu()
2638 if (n_ceded == vc->n_runnable) { in kvmppc_run_vcpu()
2639 kvmppc_vcore_blocked(vc); in kvmppc_run_vcpu()
2641 kvmppc_vcore_preempt(vc); in kvmppc_run_vcpu()
2643 cond_resched_lock(&vc->lock); in kvmppc_run_vcpu()
2644 if (vc->vcore_state == VCORE_PREEMPT) in kvmppc_run_vcpu()
2645 kvmppc_vcore_end_preempt(vc); in kvmppc_run_vcpu()
2647 kvmppc_run_core(vc); in kvmppc_run_vcpu()
2649 vc->runner = NULL; in kvmppc_run_vcpu()
2653 (vc->vcore_state == VCORE_RUNNING || in kvmppc_run_vcpu()
2654 vc->vcore_state == VCORE_EXITING || in kvmppc_run_vcpu()
2655 vc->vcore_state == VCORE_PIGGYBACK)) in kvmppc_run_vcpu()
2656 kvmppc_wait_for_exec(vc, vcpu, TASK_UNINTERRUPTIBLE); in kvmppc_run_vcpu()
2658 if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL) in kvmppc_run_vcpu()
2659 kvmppc_vcore_end_preempt(vc); in kvmppc_run_vcpu()
2662 kvmppc_remove_runnable(vc, vcpu); in kvmppc_run_vcpu()
2668 if (vc->n_runnable && vc->vcore_state == VCORE_INACTIVE) { in kvmppc_run_vcpu()
2670 v = list_first_entry(&vc->runnable_threads, in kvmppc_run_vcpu()
2676 spin_unlock(&vc->lock); in kvmppc_run_vcpu()
2882 struct kvmppc_vcore *vc = kvm->arch.vcores[i]; in kvmppc_update_lpcr() local
2883 if (!vc) in kvmppc_update_lpcr()
2885 spin_lock(&vc->lock); in kvmppc_update_lpcr()
2886 vc->lpcr = (vc->lpcr & ~mask) | lpcr; in kvmppc_update_lpcr()
2887 spin_unlock(&vc->lock); in kvmppc_update_lpcr()