Lines Matching refs:spu
154 node = ctx->spu->node; in spu_update_sched_info()
201 struct spu *spu; in do_notify_spus_active() local
204 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { in do_notify_spus_active()
205 if (spu->alloc_state != SPU_FREE) { in do_notify_spus_active()
206 struct spu_context *ctx = spu->ctx; in do_notify_spus_active()
222 static void spu_bind_context(struct spu *spu, struct spu_context *ctx) in spu_bind_context() argument
224 spu_context_trace(spu_bind_context__enter, ctx, spu); in spu_bind_context()
229 atomic_inc(&cbe_spu_info[spu->node].reserved_spus); in spu_bind_context()
231 ctx->stats.slb_flt_base = spu->stats.slb_flt; in spu_bind_context()
232 ctx->stats.class2_intr_base = spu->stats.class2_intr; in spu_bind_context()
234 spu_associate_mm(spu, ctx->owner); in spu_bind_context()
236 spin_lock_irq(&spu->register_lock); in spu_bind_context()
237 spu->ctx = ctx; in spu_bind_context()
238 spu->flags = 0; in spu_bind_context()
239 ctx->spu = spu; in spu_bind_context()
241 spu->pid = current->pid; in spu_bind_context()
242 spu->tgid = current->tgid; in spu_bind_context()
243 spu->ibox_callback = spufs_ibox_callback; in spu_bind_context()
244 spu->wbox_callback = spufs_wbox_callback; in spu_bind_context()
245 spu->stop_callback = spufs_stop_callback; in spu_bind_context()
246 spu->mfc_callback = spufs_mfc_callback; in spu_bind_context()
247 spin_unlock_irq(&spu->register_lock); in spu_bind_context()
251 spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0); in spu_bind_context()
252 spu_restore(&ctx->csa, spu); in spu_bind_context()
253 spu->timestamp = jiffies; in spu_bind_context()
254 spu_switch_notify(spu, ctx); in spu_bind_context()
263 static inline int sched_spu(struct spu *spu) in sched_spu() argument
265 BUG_ON(!mutex_is_locked(&cbe_spu_info[spu->node].list_mutex)); in sched_spu()
267 return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED)); in sched_spu()
304 static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff, in aff_ref_location()
307 struct spu *spu; in aff_ref_location() local
333 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { in aff_ref_location()
334 if (spu->ctx && spu->ctx->gang && !spu->ctx->aff_offset in aff_ref_location()
335 && spu->ctx->gang->aff_ref_spu) in aff_ref_location()
336 available_spus -= spu->ctx->gang->contexts; in aff_ref_location()
344 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { in aff_ref_location()
345 if ((!mem_aff || spu->has_mem_affinity) && in aff_ref_location()
346 sched_spu(spu)) { in aff_ref_location()
348 return spu; in aff_ref_location()
360 struct spu *tmp; in aff_set_ref_point_location()
380 static struct spu *ctx_location(struct spu *ref, int offset, int node) in ctx_location()
382 struct spu *spu; in ctx_location() local
384 spu = NULL; in ctx_location()
386 list_for_each_entry(spu, ref->aff_list.prev, aff_list) { in ctx_location()
387 BUG_ON(spu->node != node); in ctx_location()
390 if (sched_spu(spu)) in ctx_location()
394 list_for_each_entry_reverse(spu, ref->aff_list.next, aff_list) { in ctx_location()
395 BUG_ON(spu->node != node); in ctx_location()
398 if (sched_spu(spu)) in ctx_location()
403 return spu; in ctx_location()
436 static void spu_unbind_context(struct spu *spu, struct spu_context *ctx) in spu_unbind_context() argument
440 spu_context_trace(spu_unbind_context__enter, ctx, spu); in spu_unbind_context()
444 if (spu->ctx->flags & SPU_CREATE_NOSCHED) in spu_unbind_context()
445 atomic_dec(&cbe_spu_info[spu->node].reserved_spus); in spu_unbind_context()
455 spu_switch_notify(spu, NULL); in spu_unbind_context()
457 spu_save(&ctx->csa, spu); in spu_unbind_context()
458 spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0); in spu_unbind_context()
460 spin_lock_irq(&spu->register_lock); in spu_unbind_context()
461 spu->timestamp = jiffies; in spu_unbind_context()
463 spu->ibox_callback = NULL; in spu_unbind_context()
464 spu->wbox_callback = NULL; in spu_unbind_context()
465 spu->stop_callback = NULL; in spu_unbind_context()
466 spu->mfc_callback = NULL; in spu_unbind_context()
467 spu->pid = 0; in spu_unbind_context()
468 spu->tgid = 0; in spu_unbind_context()
470 spu->flags = 0; in spu_unbind_context()
471 spu->ctx = NULL; in spu_unbind_context()
472 spin_unlock_irq(&spu->register_lock); in spu_unbind_context()
474 spu_associate_mm(spu, NULL); in spu_unbind_context()
477 (spu->stats.slb_flt - ctx->stats.slb_flt_base); in spu_unbind_context()
479 (spu->stats.class2_intr - ctx->stats.class2_intr_base); in spu_unbind_context()
483 ctx->spu = NULL; in spu_unbind_context()
571 static struct spu *spu_get_idle(struct spu_context *ctx) in spu_get_idle()
573 struct spu *spu, *aff_ref_spu; in spu_get_idle() local
587 spu = ctx_location(aff_ref_spu, ctx->aff_offset, node); in spu_get_idle()
588 if (spu && spu->alloc_state == SPU_FREE) in spu_get_idle()
604 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { in spu_get_idle()
605 if (spu->alloc_state == SPU_FREE) in spu_get_idle()
616 spu->alloc_state = SPU_USED; in spu_get_idle()
618 spu_context_trace(spu_get_idle__found, ctx, spu); in spu_get_idle()
619 spu_init_channels(spu); in spu_get_idle()
620 return spu; in spu_get_idle()
629 static struct spu *find_victim(struct spu_context *ctx) in find_victim()
632 struct spu *spu; in find_victim() local
652 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { in find_victim()
653 struct spu_context *tmp = spu->ctx; in find_victim()
658 victim = spu->ctx; in find_victim()
682 spu = victim->spu; in find_victim()
683 if (!spu || victim->prio <= ctx->prio) { in find_victim()
695 spu_context_trace(__spu_deactivate__unload, ctx, spu); in find_victim()
699 spu_unbind_context(spu, victim); in find_victim()
703 spu->stats.invol_ctx_switch++; in find_victim()
710 return spu; in find_victim()
717 static void __spu_schedule(struct spu *spu, struct spu_context *ctx) in __spu_schedule() argument
719 int node = spu->node; in __spu_schedule()
725 if (spu->ctx == NULL) { in __spu_schedule()
726 spu_bind_context(spu, ctx); in __spu_schedule()
728 spu->alloc_state = SPU_USED; in __spu_schedule()
739 static void spu_schedule(struct spu *spu, struct spu_context *ctx) in spu_schedule() argument
745 __spu_schedule(spu, ctx); in spu_schedule()
762 static void spu_unschedule(struct spu *spu, struct spu_context *ctx, in spu_unschedule() argument
765 int node = spu->node; in spu_unschedule()
770 spu->alloc_state = SPU_FREE; in spu_unschedule()
771 spu_unbind_context(spu, ctx); in spu_unschedule()
773 spu->stats.invol_ctx_switch++; in spu_unschedule()
788 struct spu *spu; in spu_activate() local
796 if (ctx->spu) in spu_activate()
803 spu = spu_get_idle(ctx); in spu_activate()
808 if (!spu && rt_prio(ctx->prio)) in spu_activate()
809 spu = find_victim(ctx); in spu_activate()
810 if (spu) { in spu_activate()
814 __spu_schedule(spu, ctx); in spu_activate()
864 struct spu *spu = ctx->spu; in __spu_deactivate() local
867 if (spu) { in __spu_deactivate()
868 new = grab_runnable_context(max_prio, spu->node); in __spu_deactivate()
870 spu_unschedule(spu, ctx, new == NULL); in __spu_deactivate()
876 spu_schedule(spu, new); in __spu_deactivate()
922 struct spu *spu = NULL; in spusched_tick() local
937 spu = ctx->spu; in spusched_tick()
939 spu_context_trace(spusched_tick__preempt, ctx, spu); in spusched_tick()
941 new = grab_runnable_context(ctx->prio + 1, spu->node); in spusched_tick()
943 spu_unschedule(spu, ctx, 0); in spusched_tick()
955 spu_schedule(spu, new); in spusched_tick()
1008 struct spu *spu; in spusched_thread() local
1018 list_for_each_entry(spu, &cbe_spu_info[node].spus, in spusched_thread()
1020 struct spu_context *ctx = spu->ctx; in spusched_thread()
1042 struct spu *spu; in spuctx_switch_state() local
1052 spu = ctx->spu; in spuctx_switch_state()
1060 if (spu) { in spuctx_switch_state()
1062 spu->stats.times[old_state] += delta; in spuctx_switch_state()
1063 spu->stats.util_state = new_state; in spuctx_switch_state()
1064 spu->stats.tstamp = curtime; in spuctx_switch_state()
1065 node = spu->node; in spuctx_switch_state()
1155 struct spu *spu; in spu_sched_exit() local
1166 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) in spu_sched_exit()
1167 if (spu->alloc_state != SPU_FREE) in spu_sched_exit()
1168 spu->alloc_state = SPU_FREE; in spu_sched_exit()