Lines Matching refs:gpu

29 static void bs_init(struct msm_gpu *gpu)  in bs_init()  argument
31 if (gpu->bus_scale_table) { in bs_init()
32 gpu->bsc = msm_bus_scale_register_client(gpu->bus_scale_table); in bs_init()
33 DBG("bus scale client: %08x", gpu->bsc); in bs_init()
37 static void bs_fini(struct msm_gpu *gpu) in bs_fini() argument
39 if (gpu->bsc) { in bs_fini()
40 msm_bus_scale_unregister_client(gpu->bsc); in bs_fini()
41 gpu->bsc = 0; in bs_fini()
45 static void bs_set(struct msm_gpu *gpu, int idx) in bs_set() argument
47 if (gpu->bsc) { in bs_set()
49 msm_bus_scale_client_update_request(gpu->bsc, idx); in bs_set()
53 static void bs_init(struct msm_gpu *gpu) {} in bs_init() argument
54 static void bs_fini(struct msm_gpu *gpu) {} in bs_fini() argument
55 static void bs_set(struct msm_gpu *gpu, int idx) {} in bs_set() argument
58 static int enable_pwrrail(struct msm_gpu *gpu) in enable_pwrrail() argument
60 struct drm_device *dev = gpu->dev; in enable_pwrrail()
63 if (gpu->gpu_reg) { in enable_pwrrail()
64 ret = regulator_enable(gpu->gpu_reg); in enable_pwrrail()
71 if (gpu->gpu_cx) { in enable_pwrrail()
72 ret = regulator_enable(gpu->gpu_cx); in enable_pwrrail()
82 static int disable_pwrrail(struct msm_gpu *gpu) in disable_pwrrail() argument
84 if (gpu->gpu_cx) in disable_pwrrail()
85 regulator_disable(gpu->gpu_cx); in disable_pwrrail()
86 if (gpu->gpu_reg) in disable_pwrrail()
87 regulator_disable(gpu->gpu_reg); in disable_pwrrail()
91 static int enable_clk(struct msm_gpu *gpu) in enable_clk() argument
97 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) { in enable_clk()
98 if (gpu->grp_clks[i]) { in enable_clk()
99 clk_prepare(gpu->grp_clks[i]); in enable_clk()
100 rate_clk = gpu->grp_clks[i]; in enable_clk()
104 if (rate_clk && gpu->fast_rate) in enable_clk()
105 clk_set_rate(rate_clk, gpu->fast_rate); in enable_clk()
107 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) in enable_clk()
108 if (gpu->grp_clks[i]) in enable_clk()
109 clk_enable(gpu->grp_clks[i]); in enable_clk()
114 static int disable_clk(struct msm_gpu *gpu) in disable_clk() argument
120 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) { in disable_clk()
121 if (gpu->grp_clks[i]) { in disable_clk()
122 clk_disable(gpu->grp_clks[i]); in disable_clk()
123 rate_clk = gpu->grp_clks[i]; in disable_clk()
127 if (rate_clk && gpu->slow_rate) in disable_clk()
128 clk_set_rate(rate_clk, gpu->slow_rate); in disable_clk()
130 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) in disable_clk()
131 if (gpu->grp_clks[i]) in disable_clk()
132 clk_unprepare(gpu->grp_clks[i]); in disable_clk()
137 static int enable_axi(struct msm_gpu *gpu) in enable_axi() argument
139 if (gpu->ebi1_clk) in enable_axi()
140 clk_prepare_enable(gpu->ebi1_clk); in enable_axi()
141 if (gpu->bus_freq) in enable_axi()
142 bs_set(gpu, gpu->bus_freq); in enable_axi()
146 static int disable_axi(struct msm_gpu *gpu) in disable_axi() argument
148 if (gpu->ebi1_clk) in disable_axi()
149 clk_disable_unprepare(gpu->ebi1_clk); in disable_axi()
150 if (gpu->bus_freq) in disable_axi()
151 bs_set(gpu, 0); in disable_axi()
155 int msm_gpu_pm_resume(struct msm_gpu *gpu) in msm_gpu_pm_resume() argument
157 struct drm_device *dev = gpu->dev; in msm_gpu_pm_resume()
160 DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt); in msm_gpu_pm_resume()
164 if (gpu->active_cnt++ > 0) in msm_gpu_pm_resume()
167 if (WARN_ON(gpu->active_cnt <= 0)) in msm_gpu_pm_resume()
170 ret = enable_pwrrail(gpu); in msm_gpu_pm_resume()
174 ret = enable_clk(gpu); in msm_gpu_pm_resume()
178 ret = enable_axi(gpu); in msm_gpu_pm_resume()
185 int msm_gpu_pm_suspend(struct msm_gpu *gpu) in msm_gpu_pm_suspend() argument
187 struct drm_device *dev = gpu->dev; in msm_gpu_pm_suspend()
190 DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt); in msm_gpu_pm_suspend()
194 if (--gpu->active_cnt > 0) in msm_gpu_pm_suspend()
197 if (WARN_ON(gpu->active_cnt < 0)) in msm_gpu_pm_suspend()
200 ret = disable_axi(gpu); in msm_gpu_pm_suspend()
204 ret = disable_clk(gpu); in msm_gpu_pm_suspend()
208 ret = disable_pwrrail(gpu); in msm_gpu_pm_suspend()
221 struct msm_gpu *gpu = container_of(work, struct msm_gpu, inactive_work); in inactive_worker() local
222 struct drm_device *dev = gpu->dev; in inactive_worker()
224 if (gpu->inactive) in inactive_worker()
227 DBG("%s: inactive!\n", gpu->name); in inactive_worker()
229 if (!(msm_gpu_active(gpu) || gpu->inactive)) { in inactive_worker()
230 disable_axi(gpu); in inactive_worker()
231 disable_clk(gpu); in inactive_worker()
232 gpu->inactive = true; in inactive_worker()
239 struct msm_gpu *gpu = (struct msm_gpu *)data; in inactive_handler() local
240 struct msm_drm_private *priv = gpu->dev->dev_private; in inactive_handler()
242 queue_work(priv->wq, &gpu->inactive_work); in inactive_handler()
246 static void inactive_cancel(struct msm_gpu *gpu) in inactive_cancel() argument
248 DBG("%s", gpu->name); in inactive_cancel()
249 del_timer(&gpu->inactive_timer); in inactive_cancel()
250 if (gpu->inactive) { in inactive_cancel()
251 enable_clk(gpu); in inactive_cancel()
252 enable_axi(gpu); in inactive_cancel()
253 gpu->inactive = false; in inactive_cancel()
257 static void inactive_start(struct msm_gpu *gpu) in inactive_start() argument
259 DBG("%s", gpu->name); in inactive_start()
260 mod_timer(&gpu->inactive_timer, in inactive_start()
268 static void retire_submits(struct msm_gpu *gpu, uint32_t fence);
272 struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work); in recover_worker() local
273 struct drm_device *dev = gpu->dev; in recover_worker()
275 dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name); in recover_worker()
278 if (msm_gpu_active(gpu)) { in recover_worker()
280 uint32_t fence = gpu->funcs->last_fence(gpu); in recover_worker()
283 retire_submits(gpu, fence + 1); in recover_worker()
285 inactive_cancel(gpu); in recover_worker()
286 gpu->funcs->recover(gpu); in recover_worker()
289 list_for_each_entry(submit, &gpu->submit_list, node) { in recover_worker()
290 gpu->funcs->submit(gpu, submit, NULL); in recover_worker()
295 msm_gpu_retire(gpu); in recover_worker()
298 static void hangcheck_timer_reset(struct msm_gpu *gpu) in hangcheck_timer_reset() argument
300 DBG("%s", gpu->name); in hangcheck_timer_reset()
301 mod_timer(&gpu->hangcheck_timer, in hangcheck_timer_reset()
307 struct msm_gpu *gpu = (struct msm_gpu *)data; in hangcheck_handler() local
308 struct drm_device *dev = gpu->dev; in hangcheck_handler()
310 uint32_t fence = gpu->funcs->last_fence(gpu); in hangcheck_handler()
312 if (fence != gpu->hangcheck_fence) { in hangcheck_handler()
314 gpu->hangcheck_fence = fence; in hangcheck_handler()
315 } else if (fence < gpu->submitted_fence) { in hangcheck_handler()
317 gpu->hangcheck_fence = fence; in hangcheck_handler()
319 gpu->name); in hangcheck_handler()
321 gpu->name, fence); in hangcheck_handler()
323 gpu->name, gpu->submitted_fence); in hangcheck_handler()
324 queue_work(priv->wq, &gpu->recover_work); in hangcheck_handler()
328 if (gpu->submitted_fence > gpu->hangcheck_fence) in hangcheck_handler()
329 hangcheck_timer_reset(gpu); in hangcheck_handler()
332 queue_work(priv->wq, &gpu->retire_work); in hangcheck_handler()
340 static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs) in update_hw_cntrs() argument
342 uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)]; in update_hw_cntrs()
343 int i, n = min(ncntrs, gpu->num_perfcntrs); in update_hw_cntrs()
346 for (i = 0; i < gpu->num_perfcntrs; i++) in update_hw_cntrs()
347 current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg); in update_hw_cntrs()
351 cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i]; in update_hw_cntrs()
354 for (i = 0; i < gpu->num_perfcntrs; i++) in update_hw_cntrs()
355 gpu->last_cntrs[i] = current_cntrs[i]; in update_hw_cntrs()
360 static void update_sw_cntrs(struct msm_gpu *gpu) in update_sw_cntrs() argument
366 spin_lock_irqsave(&gpu->perf_lock, flags); in update_sw_cntrs()
367 if (!gpu->perfcntr_active) in update_sw_cntrs()
371 elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time)); in update_sw_cntrs()
373 gpu->totaltime += elapsed; in update_sw_cntrs()
374 if (gpu->last_sample.active) in update_sw_cntrs()
375 gpu->activetime += elapsed; in update_sw_cntrs()
377 gpu->last_sample.active = msm_gpu_active(gpu); in update_sw_cntrs()
378 gpu->last_sample.time = time; in update_sw_cntrs()
381 spin_unlock_irqrestore(&gpu->perf_lock, flags); in update_sw_cntrs()
384 void msm_gpu_perfcntr_start(struct msm_gpu *gpu) in msm_gpu_perfcntr_start() argument
388 spin_lock_irqsave(&gpu->perf_lock, flags); in msm_gpu_perfcntr_start()
390 gpu->last_sample.active = msm_gpu_active(gpu); in msm_gpu_perfcntr_start()
391 gpu->last_sample.time = ktime_get(); in msm_gpu_perfcntr_start()
392 gpu->activetime = gpu->totaltime = 0; in msm_gpu_perfcntr_start()
393 gpu->perfcntr_active = true; in msm_gpu_perfcntr_start()
394 update_hw_cntrs(gpu, 0, NULL); in msm_gpu_perfcntr_start()
395 spin_unlock_irqrestore(&gpu->perf_lock, flags); in msm_gpu_perfcntr_start()
398 void msm_gpu_perfcntr_stop(struct msm_gpu *gpu) in msm_gpu_perfcntr_stop() argument
400 gpu->perfcntr_active = false; in msm_gpu_perfcntr_stop()
404 int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime, in msm_gpu_perfcntr_sample() argument
410 spin_lock_irqsave(&gpu->perf_lock, flags); in msm_gpu_perfcntr_sample()
412 if (!gpu->perfcntr_active) { in msm_gpu_perfcntr_sample()
417 *activetime = gpu->activetime; in msm_gpu_perfcntr_sample()
418 *totaltime = gpu->totaltime; in msm_gpu_perfcntr_sample()
420 gpu->activetime = gpu->totaltime = 0; in msm_gpu_perfcntr_sample()
422 ret = update_hw_cntrs(gpu, ncntrs, cntrs); in msm_gpu_perfcntr_sample()
425 spin_unlock_irqrestore(&gpu->perf_lock, flags); in msm_gpu_perfcntr_sample()
434 static void retire_submits(struct msm_gpu *gpu, uint32_t fence) in retire_submits() argument
436 struct drm_device *dev = gpu->dev; in retire_submits()
440 while (!list_empty(&gpu->submit_list)) { in retire_submits()
443 submit = list_first_entry(&gpu->submit_list, in retire_submits()
457 struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work); in retire_worker() local
458 struct drm_device *dev = gpu->dev; in retire_worker()
459 uint32_t fence = gpu->funcs->last_fence(gpu); in retire_worker()
461 msm_update_fence(gpu->dev, fence); in retire_worker()
465 retire_submits(gpu, fence); in retire_worker()
467 while (!list_empty(&gpu->active_list)) { in retire_worker()
470 obj = list_first_entry(&gpu->active_list, in retire_worker()
477 msm_gem_put_iova(&obj->base, gpu->id); in retire_worker()
486 if (!msm_gpu_active(gpu)) in retire_worker()
487 inactive_start(gpu); in retire_worker()
491 void msm_gpu_retire(struct msm_gpu *gpu) in msm_gpu_retire() argument
493 struct msm_drm_private *priv = gpu->dev->dev_private; in msm_gpu_retire()
494 queue_work(priv->wq, &gpu->retire_work); in msm_gpu_retire()
495 update_sw_cntrs(gpu); in msm_gpu_retire()
499 int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, in msm_gpu_submit() argument
502 struct drm_device *dev = gpu->dev; in msm_gpu_submit()
510 gpu->submitted_fence = submit->fence; in msm_gpu_submit()
512 inactive_cancel(gpu); in msm_gpu_submit()
514 list_add_tail(&submit->node, &gpu->submit_list); in msm_gpu_submit()
518 gpu->submitted_fence = submit->fence; in msm_gpu_submit()
520 update_sw_cntrs(gpu); in msm_gpu_submit()
528 WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu)); in msm_gpu_submit()
536 submit->gpu->id, &iova); in msm_gpu_submit()
540 msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence); in msm_gpu_submit()
543 msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence); in msm_gpu_submit()
546 ret = gpu->funcs->submit(gpu, submit, ctx); in msm_gpu_submit()
549 hangcheck_timer_reset(gpu); in msm_gpu_submit()
560 struct msm_gpu *gpu = data; in irq_handler() local
561 return gpu->funcs->irq(gpu); in irq_handler()
570 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, in msm_gpu_init() argument
576 if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs))) in msm_gpu_init()
577 gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs); in msm_gpu_init()
579 gpu->dev = drm; in msm_gpu_init()
580 gpu->funcs = funcs; in msm_gpu_init()
581 gpu->name = name; in msm_gpu_init()
582 gpu->inactive = true; in msm_gpu_init()
584 INIT_LIST_HEAD(&gpu->active_list); in msm_gpu_init()
585 INIT_WORK(&gpu->retire_work, retire_worker); in msm_gpu_init()
586 INIT_WORK(&gpu->inactive_work, inactive_worker); in msm_gpu_init()
587 INIT_WORK(&gpu->recover_work, recover_worker); in msm_gpu_init()
589 INIT_LIST_HEAD(&gpu->submit_list); in msm_gpu_init()
591 setup_timer(&gpu->inactive_timer, inactive_handler, in msm_gpu_init()
592 (unsigned long)gpu); in msm_gpu_init()
593 setup_timer(&gpu->hangcheck_timer, hangcheck_handler, in msm_gpu_init()
594 (unsigned long)gpu); in msm_gpu_init()
596 spin_lock_init(&gpu->perf_lock); in msm_gpu_init()
598 BUG_ON(ARRAY_SIZE(clk_names) != ARRAY_SIZE(gpu->grp_clks)); in msm_gpu_init()
601 gpu->mmio = msm_ioremap(pdev, ioname, name); in msm_gpu_init()
602 if (IS_ERR(gpu->mmio)) { in msm_gpu_init()
603 ret = PTR_ERR(gpu->mmio); in msm_gpu_init()
608 gpu->irq = platform_get_irq_byname(pdev, irqname); in msm_gpu_init()
609 if (gpu->irq < 0) { in msm_gpu_init()
610 ret = gpu->irq; in msm_gpu_init()
615 ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, in msm_gpu_init()
616 IRQF_TRIGGER_HIGH, gpu->name, gpu); in msm_gpu_init()
618 dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret); in msm_gpu_init()
624 gpu->grp_clks[i] = devm_clk_get(&pdev->dev, clk_names[i]); in msm_gpu_init()
625 DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]); in msm_gpu_init()
626 if (IS_ERR(gpu->grp_clks[i])) in msm_gpu_init()
627 gpu->grp_clks[i] = NULL; in msm_gpu_init()
630 gpu->ebi1_clk = devm_clk_get(&pdev->dev, "bus_clk"); in msm_gpu_init()
631 DBG("ebi1_clk: %p", gpu->ebi1_clk); in msm_gpu_init()
632 if (IS_ERR(gpu->ebi1_clk)) in msm_gpu_init()
633 gpu->ebi1_clk = NULL; in msm_gpu_init()
636 gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd"); in msm_gpu_init()
637 DBG("gpu_reg: %p", gpu->gpu_reg); in msm_gpu_init()
638 if (IS_ERR(gpu->gpu_reg)) in msm_gpu_init()
639 gpu->gpu_reg = NULL; in msm_gpu_init()
641 gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx"); in msm_gpu_init()
642 DBG("gpu_cx: %p", gpu->gpu_cx); in msm_gpu_init()
643 if (IS_ERR(gpu->gpu_cx)) in msm_gpu_init()
644 gpu->gpu_cx = NULL; in msm_gpu_init()
653 gpu->mmu = msm_iommu_new(&pdev->dev, iommu); in msm_gpu_init()
654 if (IS_ERR(gpu->mmu)) { in msm_gpu_init()
655 ret = PTR_ERR(gpu->mmu); in msm_gpu_init()
657 gpu->mmu = NULL; in msm_gpu_init()
665 gpu->id = msm_register_mmu(drm, gpu->mmu); in msm_gpu_init()
670 gpu->rb = msm_ringbuffer_new(gpu, ringsz); in msm_gpu_init()
672 if (IS_ERR(gpu->rb)) { in msm_gpu_init()
673 ret = PTR_ERR(gpu->rb); in msm_gpu_init()
674 gpu->rb = NULL; in msm_gpu_init()
679 bs_init(gpu); in msm_gpu_init()
687 void msm_gpu_cleanup(struct msm_gpu *gpu) in msm_gpu_cleanup() argument
689 DBG("%s", gpu->name); in msm_gpu_cleanup()
691 WARN_ON(!list_empty(&gpu->active_list)); in msm_gpu_cleanup()
693 bs_fini(gpu); in msm_gpu_cleanup()
695 if (gpu->rb) { in msm_gpu_cleanup()
696 if (gpu->rb_iova) in msm_gpu_cleanup()
697 msm_gem_put_iova(gpu->rb->bo, gpu->id); in msm_gpu_cleanup()
698 msm_ringbuffer_destroy(gpu->rb); in msm_gpu_cleanup()
701 if (gpu->mmu) in msm_gpu_cleanup()
702 gpu->mmu->funcs->destroy(gpu->mmu); in msm_gpu_cleanup()