Lines Matching refs:ctx

97 void spu_set_timeslice(struct spu_context *ctx)  in spu_set_timeslice()  argument
99 if (ctx->prio < NORMAL_PRIO) in spu_set_timeslice()
100 ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio); in spu_set_timeslice()
102 ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio); in spu_set_timeslice()
108 void __spu_update_sched_info(struct spu_context *ctx) in __spu_update_sched_info() argument
114 BUG_ON(!list_empty(&ctx->rq)); in __spu_update_sched_info()
121 ctx->tid = current->pid; in __spu_update_sched_info()
130 ctx->prio = current->prio; in __spu_update_sched_info()
132 ctx->prio = current->static_prio; in __spu_update_sched_info()
133 ctx->policy = current->policy; in __spu_update_sched_info()
143 cpumask_copy(&ctx->cpus_allowed, tsk_cpus_allowed(current)); in __spu_update_sched_info()
146 ctx->last_ran = raw_smp_processor_id(); in __spu_update_sched_info()
149 void spu_update_sched_info(struct spu_context *ctx) in spu_update_sched_info() argument
153 if (ctx->state == SPU_STATE_RUNNABLE) { in spu_update_sched_info()
154 node = ctx->spu->node; in spu_update_sched_info()
160 __spu_update_sched_info(ctx); in spu_update_sched_info()
163 __spu_update_sched_info(ctx); in spu_update_sched_info()
167 static int __node_allowed(struct spu_context *ctx, int node) in __node_allowed() argument
172 if (cpumask_intersects(mask, &ctx->cpus_allowed)) in __node_allowed()
179 static int node_allowed(struct spu_context *ctx, int node) in node_allowed() argument
184 rval = __node_allowed(ctx, node); in node_allowed()
206 struct spu_context *ctx = spu->ctx; in do_notify_spus_active() local
208 &ctx->sched_flags); in do_notify_spus_active()
210 wake_up_all(&ctx->stop_wq); in do_notify_spus_active()
222 static void spu_bind_context(struct spu *spu, struct spu_context *ctx) in spu_bind_context() argument
224 spu_context_trace(spu_bind_context__enter, ctx, spu); in spu_bind_context()
226 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); in spu_bind_context()
228 if (ctx->flags & SPU_CREATE_NOSCHED) in spu_bind_context()
231 ctx->stats.slb_flt_base = spu->stats.slb_flt; in spu_bind_context()
232 ctx->stats.class2_intr_base = spu->stats.class2_intr; in spu_bind_context()
234 spu_associate_mm(spu, ctx->owner); in spu_bind_context()
237 spu->ctx = ctx; in spu_bind_context()
239 ctx->spu = spu; in spu_bind_context()
240 ctx->ops = &spu_hw_ops; in spu_bind_context()
249 spu_unmap_mappings(ctx); in spu_bind_context()
251 spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0); in spu_bind_context()
252 spu_restore(&ctx->csa, spu); in spu_bind_context()
254 spu_switch_notify(spu, ctx); in spu_bind_context()
255 ctx->state = SPU_STATE_RUNNABLE; in spu_bind_context()
257 spuctx_switch_state(ctx, SPU_UTIL_USER); in spu_bind_context()
267 return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED)); in sched_spu()
272 struct spu_context *ctx; in aff_merge_remaining_ctxs() local
274 list_for_each_entry(ctx, &gang->aff_list_head, aff_list) { in aff_merge_remaining_ctxs()
275 if (list_empty(&ctx->aff_list)) in aff_merge_remaining_ctxs()
276 list_add(&ctx->aff_list, &gang->aff_list_head); in aff_merge_remaining_ctxs()
283 struct spu_context *ctx; in aff_set_offsets() local
287 list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list, in aff_set_offsets()
289 if (&ctx->aff_list == &gang->aff_list_head) in aff_set_offsets()
291 ctx->aff_offset = offset--; in aff_set_offsets()
295 list_for_each_entry(ctx, gang->aff_ref_ctx->aff_list.prev, aff_list) { in aff_set_offsets()
296 if (&ctx->aff_list == &gang->aff_list_head) in aff_set_offsets()
298 ctx->aff_offset = offset++; in aff_set_offsets()
304 static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff, in aff_ref_location() argument
328 if (!node_allowed(ctx, node)) in aff_ref_location()
334 if (spu->ctx && spu->ctx->gang && !spu->ctx->aff_offset in aff_ref_location()
335 && spu->ctx->gang->aff_ref_spu) in aff_ref_location()
336 available_spus -= spu->ctx->gang->contexts; in aff_ref_location()
339 if (available_spus < ctx->gang->contexts) { in aff_ref_location()
359 struct spu_context *ctx; in aff_set_ref_point_location() local
369 list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list, in aff_set_ref_point_location()
371 if (&ctx->aff_list == &gang->aff_list_head) in aff_set_ref_point_location()
373 lowest_offset = ctx->aff_offset; in aff_set_ref_point_location()
410 static int has_affinity(struct spu_context *ctx) in has_affinity() argument
412 struct spu_gang *gang = ctx->gang; in has_affinity()
414 if (list_empty(&ctx->aff_list)) in has_affinity()
417 if (atomic_read(&ctx->gang->aff_sched_count) == 0) in has_affinity()
418 ctx->gang->aff_ref_spu = NULL; in has_affinity()
436 static void spu_unbind_context(struct spu *spu, struct spu_context *ctx) in spu_unbind_context() argument
440 spu_context_trace(spu_unbind_context__enter, ctx, spu); in spu_unbind_context()
442 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); in spu_unbind_context()
444 if (spu->ctx->flags & SPU_CREATE_NOSCHED) in spu_unbind_context()
447 if (ctx->gang) in spu_unbind_context()
453 atomic_dec_if_positive(&ctx->gang->aff_sched_count); in spu_unbind_context()
456 spu_unmap_mappings(ctx); in spu_unbind_context()
457 spu_save(&ctx->csa, spu); in spu_unbind_context()
458 spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0); in spu_unbind_context()
462 ctx->state = SPU_STATE_SAVED; in spu_unbind_context()
469 ctx->ops = &spu_backing_ops; in spu_unbind_context()
471 spu->ctx = NULL; in spu_unbind_context()
476 ctx->stats.slb_flt += in spu_unbind_context()
477 (spu->stats.slb_flt - ctx->stats.slb_flt_base); in spu_unbind_context()
478 ctx->stats.class2_intr += in spu_unbind_context()
479 (spu->stats.class2_intr - ctx->stats.class2_intr_base); in spu_unbind_context()
482 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED); in spu_unbind_context()
483 ctx->spu = NULL; in spu_unbind_context()
485 if (spu_stopped(ctx, &status)) in spu_unbind_context()
486 wake_up_all(&ctx->stop_wq); in spu_unbind_context()
493 static void __spu_add_to_rq(struct spu_context *ctx) in __spu_add_to_rq() argument
508 if (list_empty(&ctx->rq)) { in __spu_add_to_rq()
509 list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]); in __spu_add_to_rq()
510 set_bit(ctx->prio, spu_prio->bitmap); in __spu_add_to_rq()
516 static void spu_add_to_rq(struct spu_context *ctx) in spu_add_to_rq() argument
519 __spu_add_to_rq(ctx); in spu_add_to_rq()
523 static void __spu_del_from_rq(struct spu_context *ctx) in __spu_del_from_rq() argument
525 int prio = ctx->prio; in __spu_del_from_rq()
527 if (!list_empty(&ctx->rq)) { in __spu_del_from_rq()
530 list_del_init(&ctx->rq); in __spu_del_from_rq()
537 void spu_del_from_rq(struct spu_context *ctx) in spu_del_from_rq() argument
540 __spu_del_from_rq(ctx); in spu_del_from_rq()
544 static void spu_prio_wait(struct spu_context *ctx) in spu_prio_wait() argument
553 BUG_ON(!(ctx->flags & SPU_CREATE_NOSCHED)); in spu_prio_wait()
556 prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE); in spu_prio_wait()
558 __spu_add_to_rq(ctx); in spu_prio_wait()
560 mutex_unlock(&ctx->state_mutex); in spu_prio_wait()
562 mutex_lock(&ctx->state_mutex); in spu_prio_wait()
564 __spu_del_from_rq(ctx); in spu_prio_wait()
568 remove_wait_queue(&ctx->stop_wq, &wait); in spu_prio_wait()
571 static struct spu *spu_get_idle(struct spu_context *ctx) in spu_get_idle() argument
576 spu_context_nospu_trace(spu_get_idle__enter, ctx); in spu_get_idle()
578 if (ctx->gang) { in spu_get_idle()
579 mutex_lock(&ctx->gang->aff_mutex); in spu_get_idle()
580 if (has_affinity(ctx)) { in spu_get_idle()
581 aff_ref_spu = ctx->gang->aff_ref_spu; in spu_get_idle()
582 atomic_inc(&ctx->gang->aff_sched_count); in spu_get_idle()
583 mutex_unlock(&ctx->gang->aff_mutex); in spu_get_idle()
587 spu = ctx_location(aff_ref_spu, ctx->aff_offset, node); in spu_get_idle()
592 atomic_dec(&ctx->gang->aff_sched_count); in spu_get_idle()
595 mutex_unlock(&ctx->gang->aff_mutex); in spu_get_idle()
600 if (!node_allowed(ctx, node)) in spu_get_idle()
612 spu_context_nospu_trace(spu_get_idle__not_found, ctx); in spu_get_idle()
618 spu_context_trace(spu_get_idle__found, ctx, spu); in spu_get_idle()
629 static struct spu *find_victim(struct spu_context *ctx) in find_victim() argument
635 spu_context_nospu_trace(spu_find_victim__enter, ctx); in find_victim()
648 if (!node_allowed(ctx, node)) in find_victim()
653 struct spu_context *tmp = spu->ctx; in find_victim()
655 if (tmp && tmp->prio > ctx->prio && in find_victim()
658 victim = spu->ctx; in find_victim()
683 if (!spu || victim->prio <= ctx->prio) { in find_victim()
695 spu_context_trace(__spu_deactivate__unload, ctx, spu); in find_victim()
717 static void __spu_schedule(struct spu *spu, struct spu_context *ctx) in __spu_schedule() argument
722 spu_set_timeslice(ctx); in __spu_schedule()
725 if (spu->ctx == NULL) { in __spu_schedule()
726 spu_bind_context(spu, ctx); in __spu_schedule()
734 wake_up_all(&ctx->run_wq); in __spu_schedule()
736 spu_add_to_rq(ctx); in __spu_schedule()
739 static void spu_schedule(struct spu *spu, struct spu_context *ctx) in spu_schedule() argument
743 mutex_lock(&ctx->state_mutex); in spu_schedule()
744 if (ctx->state == SPU_STATE_SAVED) in spu_schedule()
745 __spu_schedule(spu, ctx); in spu_schedule()
746 spu_release(ctx); in spu_schedule()
762 static void spu_unschedule(struct spu *spu, struct spu_context *ctx, in spu_unschedule() argument
771 spu_unbind_context(spu, ctx); in spu_unschedule()
772 ctx->stats.invol_ctx_switch++; in spu_unschedule()
786 int spu_activate(struct spu_context *ctx, unsigned long flags) in spu_activate() argument
796 if (ctx->spu) in spu_activate()
803 spu = spu_get_idle(ctx); in spu_activate()
808 if (!spu && rt_prio(ctx->prio)) in spu_activate()
809 spu = find_victim(ctx); in spu_activate()
813 runcntl = ctx->ops->runcntl_read(ctx); in spu_activate()
814 __spu_schedule(spu, ctx); in spu_activate()
816 spuctx_switch_state(ctx, SPU_UTIL_USER); in spu_activate()
821 if (ctx->flags & SPU_CREATE_NOSCHED) { in spu_activate()
822 spu_prio_wait(ctx); in spu_activate()
826 spu_add_to_rq(ctx); in spu_activate()
839 struct spu_context *ctx; in grab_runnable_context() local
847 list_for_each_entry(ctx, rq, rq) { in grab_runnable_context()
849 if (__node_allowed(ctx, node)) { in grab_runnable_context()
850 __spu_del_from_rq(ctx); in grab_runnable_context()
856 ctx = NULL; in grab_runnable_context()
859 return ctx; in grab_runnable_context()
862 static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio) in __spu_deactivate() argument
864 struct spu *spu = ctx->spu; in __spu_deactivate()
870 spu_unschedule(spu, ctx, new == NULL); in __spu_deactivate()
875 spu_release(ctx); in __spu_deactivate()
879 mutex_lock(&ctx->state_mutex); in __spu_deactivate()
895 void spu_deactivate(struct spu_context *ctx) in spu_deactivate() argument
897 spu_context_nospu_trace(spu_deactivate__enter, ctx); in spu_deactivate()
898 __spu_deactivate(ctx, 1, MAX_PRIO); in spu_deactivate()
909 void spu_yield(struct spu_context *ctx) in spu_yield() argument
911 spu_context_nospu_trace(spu_yield__enter, ctx); in spu_yield()
912 if (!(ctx->flags & SPU_CREATE_NOSCHED)) { in spu_yield()
913 mutex_lock(&ctx->state_mutex); in spu_yield()
914 __spu_deactivate(ctx, 0, MAX_PRIO); in spu_yield()
915 mutex_unlock(&ctx->state_mutex); in spu_yield()
919 static noinline void spusched_tick(struct spu_context *ctx) in spusched_tick() argument
924 if (spu_acquire(ctx)) in spusched_tick()
927 if (ctx->state != SPU_STATE_RUNNABLE) in spusched_tick()
929 if (ctx->flags & SPU_CREATE_NOSCHED) in spusched_tick()
931 if (ctx->policy == SCHED_FIFO) in spusched_tick()
934 if (--ctx->time_slice && test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) in spusched_tick()
937 spu = ctx->spu; in spusched_tick()
939 spu_context_trace(spusched_tick__preempt, ctx, spu); in spusched_tick()
941 new = grab_runnable_context(ctx->prio + 1, spu->node); in spusched_tick()
943 spu_unschedule(spu, ctx, 0); in spusched_tick()
944 if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) in spusched_tick()
945 spu_add_to_rq(ctx); in spusched_tick()
947 spu_context_nospu_trace(spusched_tick__newslice, ctx); in spusched_tick()
948 if (!ctx->time_slice) in spusched_tick()
949 ctx->time_slice++; in spusched_tick()
952 spu_release(ctx); in spusched_tick()
1020 struct spu_context *ctx = spu->ctx; in spusched_thread() local
1022 if (ctx) { in spusched_thread()
1023 get_spu_context(ctx); in spusched_thread()
1025 spusched_tick(ctx); in spusched_thread()
1027 put_spu_context(ctx); in spusched_thread()
1037 void spuctx_switch_state(struct spu_context *ctx, in spuctx_switch_state() argument
1047 delta = curtime - ctx->stats.tstamp; in spuctx_switch_state()
1049 WARN_ON(!mutex_is_locked(&ctx->state_mutex)); in spuctx_switch_state()
1052 spu = ctx->spu; in spuctx_switch_state()
1053 old_state = ctx->stats.util_state; in spuctx_switch_state()
1054 ctx->stats.util_state = new_state; in spuctx_switch_state()
1055 ctx->stats.tstamp = curtime; in spuctx_switch_state()
1061 ctx->stats.times[old_state] += delta; in spuctx_switch_state()