This source file includes following definitions.
- to_panfrost_job
- to_panfrost_fence
- panfrost_fence_get_driver_name
- panfrost_fence_get_timeline_name
- panfrost_fence_create
- panfrost_job_get_slot
- panfrost_job_write_affinity
- panfrost_job_hw_submit
- panfrost_acquire_object_fences
- panfrost_attach_object_fences
- panfrost_job_push
- panfrost_job_cleanup
- panfrost_job_put
- panfrost_job_free
- panfrost_job_dependency
- panfrost_job_run
- panfrost_job_enable_interrupts
- panfrost_job_timedout
- panfrost_job_irq_handler
- panfrost_job_init
- panfrost_job_fini
- panfrost_job_open
- panfrost_job_close
- panfrost_job_is_idle
1
2
3
4 #include <linux/delay.h>
5 #include <linux/interrupt.h>
6 #include <linux/io.h>
7 #include <linux/platform_device.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/dma-resv.h>
10 #include <drm/gpu_scheduler.h>
11 #include <drm/panfrost_drm.h>
12
13 #include "panfrost_device.h"
14 #include "panfrost_devfreq.h"
15 #include "panfrost_job.h"
16 #include "panfrost_features.h"
17 #include "panfrost_issues.h"
18 #include "panfrost_gem.h"
19 #include "panfrost_regs.h"
20 #include "panfrost_gpu.h"
21 #include "panfrost_mmu.h"
22
23 #define job_write(dev, reg, data) writel(data, dev->iomem + (reg))
24 #define job_read(dev, reg) readl(dev->iomem + (reg))
25
26 struct panfrost_queue_state {
27 struct drm_gpu_scheduler sched;
28
29 u64 fence_context;
30 u64 emit_seqno;
31 };
32
33 struct panfrost_job_slot {
34 struct panfrost_queue_state queue[NUM_JOB_SLOTS];
35 spinlock_t job_lock;
36 };
37
38 static struct panfrost_job *
39 to_panfrost_job(struct drm_sched_job *sched_job)
40 {
41 return container_of(sched_job, struct panfrost_job, base);
42 }
43
44 struct panfrost_fence {
45 struct dma_fence base;
46 struct drm_device *dev;
47
48 u64 seqno;
49 int queue;
50 };
51
52 static inline struct panfrost_fence *
53 to_panfrost_fence(struct dma_fence *fence)
54 {
55 return (struct panfrost_fence *)fence;
56 }
57
58 static const char *panfrost_fence_get_driver_name(struct dma_fence *fence)
59 {
60 return "panfrost";
61 }
62
63 static const char *panfrost_fence_get_timeline_name(struct dma_fence *fence)
64 {
65 struct panfrost_fence *f = to_panfrost_fence(fence);
66
67 switch (f->queue) {
68 case 0:
69 return "panfrost-js-0";
70 case 1:
71 return "panfrost-js-1";
72 case 2:
73 return "panfrost-js-2";
74 default:
75 return NULL;
76 }
77 }
78
79 static const struct dma_fence_ops panfrost_fence_ops = {
80 .get_driver_name = panfrost_fence_get_driver_name,
81 .get_timeline_name = panfrost_fence_get_timeline_name,
82 };
83
84 static struct dma_fence *panfrost_fence_create(struct panfrost_device *pfdev, int js_num)
85 {
86 struct panfrost_fence *fence;
87 struct panfrost_job_slot *js = pfdev->js;
88
89 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
90 if (!fence)
91 return ERR_PTR(-ENOMEM);
92
93 fence->dev = pfdev->ddev;
94 fence->queue = js_num;
95 fence->seqno = ++js->queue[js_num].emit_seqno;
96 dma_fence_init(&fence->base, &panfrost_fence_ops, &js->job_lock,
97 js->queue[js_num].fence_context, fence->seqno);
98
99 return &fence->base;
100 }
101
102 static int panfrost_job_get_slot(struct panfrost_job *job)
103 {
104
105
106
107
108 if (job->requirements & PANFROST_JD_REQ_FS)
109 return 0;
110
111
112 #if 0
113 if (job->requirements & PANFROST_JD_REQ_ONLY_COMPUTE) {
114 if ((job->requirements & PANFROST_JD_REQ_CORE_GRP_MASK) &&
115 (job->pfdev->features.nr_core_groups == 2))
116 return 2;
117 if (panfrost_has_hw_issue(job->pfdev, HW_ISSUE_8987))
118 return 2;
119 }
120 #endif
121 return 1;
122 }
123
124 static void panfrost_job_write_affinity(struct panfrost_device *pfdev,
125 u32 requirements,
126 int js)
127 {
128 u64 affinity;
129
130
131
132
133
134
135 affinity = pfdev->features.shader_present;
136
137 job_write(pfdev, JS_AFFINITY_NEXT_LO(js), affinity & 0xFFFFFFFF);
138 job_write(pfdev, JS_AFFINITY_NEXT_HI(js), affinity >> 32);
139 }
140
141 static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
142 {
143 struct panfrost_device *pfdev = job->pfdev;
144 u32 cfg;
145 u64 jc_head = job->jc;
146 int ret;
147
148 ret = pm_runtime_get_sync(pfdev->dev);
149 if (ret < 0)
150 return;
151
152 if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js)))) {
153 pm_runtime_put_sync_autosuspend(pfdev->dev);
154 return;
155 }
156
157 cfg = panfrost_mmu_as_get(pfdev, &job->file_priv->mmu);
158
159 panfrost_devfreq_record_transition(pfdev, js);
160
161 job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF);
162 job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32);
163
164 panfrost_job_write_affinity(pfdev, job->requirements, js);
165
166
167
168 cfg |= JS_CONFIG_THREAD_PRI(8) |
169 JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE |
170 JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE;
171
172 if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
173 cfg |= JS_CONFIG_ENABLE_FLUSH_REDUCTION;
174
175 if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10649))
176 cfg |= JS_CONFIG_START_MMU;
177
178 job_write(pfdev, JS_CONFIG_NEXT(js), cfg);
179
180 if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
181 job_write(pfdev, JS_FLUSH_ID_NEXT(js), job->flush_id);
182
183
184 dev_dbg(pfdev->dev, "JS: Submitting atom %p to js[%d] with head=0x%llx",
185 job, js, jc_head);
186
187 job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START);
188 }
189
190 static void panfrost_acquire_object_fences(struct drm_gem_object **bos,
191 int bo_count,
192 struct dma_fence **implicit_fences)
193 {
194 int i;
195
196 for (i = 0; i < bo_count; i++)
197 implicit_fences[i] = dma_resv_get_excl_rcu(bos[i]->resv);
198 }
199
200 static void panfrost_attach_object_fences(struct drm_gem_object **bos,
201 int bo_count,
202 struct dma_fence *fence)
203 {
204 int i;
205
206 for (i = 0; i < bo_count; i++)
207 dma_resv_add_excl_fence(bos[i]->resv, fence);
208 }
209
210 int panfrost_job_push(struct panfrost_job *job)
211 {
212 struct panfrost_device *pfdev = job->pfdev;
213 int slot = panfrost_job_get_slot(job);
214 struct drm_sched_entity *entity = &job->file_priv->sched_entity[slot];
215 struct ww_acquire_ctx acquire_ctx;
216 int ret = 0;
217
218 mutex_lock(&pfdev->sched_lock);
219
220 ret = drm_gem_lock_reservations(job->bos, job->bo_count,
221 &acquire_ctx);
222 if (ret) {
223 mutex_unlock(&pfdev->sched_lock);
224 return ret;
225 }
226
227 ret = drm_sched_job_init(&job->base, entity, NULL);
228 if (ret) {
229 mutex_unlock(&pfdev->sched_lock);
230 goto unlock;
231 }
232
233 job->render_done_fence = dma_fence_get(&job->base.s_fence->finished);
234
235 kref_get(&job->refcount);
236
237 panfrost_acquire_object_fences(job->bos, job->bo_count,
238 job->implicit_fences);
239
240 drm_sched_entity_push_job(&job->base, entity);
241
242 mutex_unlock(&pfdev->sched_lock);
243
244 panfrost_attach_object_fences(job->bos, job->bo_count,
245 job->render_done_fence);
246
247 unlock:
248 drm_gem_unlock_reservations(job->bos, job->bo_count, &acquire_ctx);
249
250 return ret;
251 }
252
253 static void panfrost_job_cleanup(struct kref *ref)
254 {
255 struct panfrost_job *job = container_of(ref, struct panfrost_job,
256 refcount);
257 unsigned int i;
258
259 if (job->in_fences) {
260 for (i = 0; i < job->in_fence_count; i++)
261 dma_fence_put(job->in_fences[i]);
262 kvfree(job->in_fences);
263 }
264 if (job->implicit_fences) {
265 for (i = 0; i < job->bo_count; i++)
266 dma_fence_put(job->implicit_fences[i]);
267 kvfree(job->implicit_fences);
268 }
269 dma_fence_put(job->done_fence);
270 dma_fence_put(job->render_done_fence);
271
272 if (job->mappings) {
273 for (i = 0; i < job->bo_count; i++) {
274 if (!job->mappings[i])
275 break;
276
277 atomic_dec(&job->mappings[i]->obj->gpu_usecount);
278 panfrost_gem_mapping_put(job->mappings[i]);
279 }
280 kvfree(job->mappings);
281 }
282
283 if (job->bos) {
284 struct panfrost_gem_object *bo;
285
286 for (i = 0; i < job->bo_count; i++) {
287 bo = to_panfrost_bo(job->bos[i]);
288 drm_gem_object_put_unlocked(job->bos[i]);
289 }
290
291 kvfree(job->bos);
292 }
293
294 kfree(job);
295 }
296
297 void panfrost_job_put(struct panfrost_job *job)
298 {
299 kref_put(&job->refcount, panfrost_job_cleanup);
300 }
301
302 static void panfrost_job_free(struct drm_sched_job *sched_job)
303 {
304 struct panfrost_job *job = to_panfrost_job(sched_job);
305
306 drm_sched_job_cleanup(sched_job);
307
308 panfrost_job_put(job);
309 }
310
311 static struct dma_fence *panfrost_job_dependency(struct drm_sched_job *sched_job,
312 struct drm_sched_entity *s_entity)
313 {
314 struct panfrost_job *job = to_panfrost_job(sched_job);
315 struct dma_fence *fence;
316 unsigned int i;
317
318
319 for (i = 0; i < job->in_fence_count; i++) {
320 if (job->in_fences[i]) {
321 fence = job->in_fences[i];
322 job->in_fences[i] = NULL;
323 return fence;
324 }
325 }
326
327
328 for (i = 0; i < job->bo_count; i++) {
329 if (job->implicit_fences[i]) {
330 fence = job->implicit_fences[i];
331 job->implicit_fences[i] = NULL;
332 return fence;
333 }
334 }
335
336 return NULL;
337 }
338
339 static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job)
340 {
341 struct panfrost_job *job = to_panfrost_job(sched_job);
342 struct panfrost_device *pfdev = job->pfdev;
343 int slot = panfrost_job_get_slot(job);
344 struct dma_fence *fence = NULL;
345
346 if (unlikely(job->base.s_fence->finished.error))
347 return NULL;
348
349 pfdev->jobs[slot] = job;
350
351 fence = panfrost_fence_create(pfdev, slot);
352 if (IS_ERR(fence))
353 return NULL;
354
355 if (job->done_fence)
356 dma_fence_put(job->done_fence);
357 job->done_fence = dma_fence_get(fence);
358
359 panfrost_job_hw_submit(job, slot);
360
361 return fence;
362 }
363
364 void panfrost_job_enable_interrupts(struct panfrost_device *pfdev)
365 {
366 int j;
367 u32 irq_mask = 0;
368
369 for (j = 0; j < NUM_JOB_SLOTS; j++) {
370 irq_mask |= MK_JS_MASK(j);
371 }
372
373 job_write(pfdev, JOB_INT_CLEAR, irq_mask);
374 job_write(pfdev, JOB_INT_MASK, irq_mask);
375 }
376
377 static void panfrost_job_timedout(struct drm_sched_job *sched_job)
378 {
379 struct panfrost_job *job = to_panfrost_job(sched_job);
380 struct panfrost_device *pfdev = job->pfdev;
381 int js = panfrost_job_get_slot(job);
382 unsigned long flags;
383 int i;
384
385
386
387
388
389 if (dma_fence_is_signaled(job->done_fence))
390 return;
391
392 dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
393 js,
394 job_read(pfdev, JS_CONFIG(js)),
395 job_read(pfdev, JS_STATUS(js)),
396 job_read(pfdev, JS_HEAD_LO(js)),
397 job_read(pfdev, JS_TAIL_LO(js)),
398 sched_job);
399
400 if (!mutex_trylock(&pfdev->reset_lock))
401 return;
402
403 for (i = 0; i < NUM_JOB_SLOTS; i++) {
404 struct drm_gpu_scheduler *sched = &pfdev->js->queue[i].sched;
405
406 drm_sched_stop(sched, sched_job);
407 if (js != i)
408
409 cancel_delayed_work_sync(&sched->work_tdr);
410 }
411
412 drm_sched_increase_karma(sched_job);
413
414 spin_lock_irqsave(&pfdev->js->job_lock, flags);
415 for (i = 0; i < NUM_JOB_SLOTS; i++) {
416 if (pfdev->jobs[i]) {
417 pm_runtime_put_noidle(pfdev->dev);
418 pfdev->jobs[i] = NULL;
419 }
420 }
421 spin_unlock_irqrestore(&pfdev->js->job_lock, flags);
422
423
424
425 panfrost_devfreq_record_transition(pfdev, js);
426 panfrost_device_reset(pfdev);
427
428 for (i = 0; i < NUM_JOB_SLOTS; i++)
429 drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched);
430
431
432 for (i = 0; i < NUM_JOB_SLOTS; i++)
433 drm_sched_start(&pfdev->js->queue[i].sched, true);
434
435 mutex_unlock(&pfdev->reset_lock);
436 }
437
438 static const struct drm_sched_backend_ops panfrost_sched_ops = {
439 .dependency = panfrost_job_dependency,
440 .run_job = panfrost_job_run,
441 .timedout_job = panfrost_job_timedout,
442 .free_job = panfrost_job_free
443 };
444
445 static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
446 {
447 struct panfrost_device *pfdev = data;
448 u32 status = job_read(pfdev, JOB_INT_STAT);
449 int j;
450
451 dev_dbg(pfdev->dev, "jobslot irq status=%x\n", status);
452
453 if (!status)
454 return IRQ_NONE;
455
456 pm_runtime_mark_last_busy(pfdev->dev);
457
458 for (j = 0; status; j++) {
459 u32 mask = MK_JS_MASK(j);
460
461 if (!(status & mask))
462 continue;
463
464 job_write(pfdev, JOB_INT_CLEAR, mask);
465
466 if (status & JOB_INT_MASK_ERR(j)) {
467 job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_NOP);
468
469 dev_err(pfdev->dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x",
470 j,
471 panfrost_exception_name(pfdev, job_read(pfdev, JS_STATUS(j))),
472 job_read(pfdev, JS_HEAD_LO(j)),
473 job_read(pfdev, JS_TAIL_LO(j)));
474
475 drm_sched_fault(&pfdev->js->queue[j].sched);
476 }
477
478 if (status & JOB_INT_MASK_DONE(j)) {
479 struct panfrost_job *job;
480
481 spin_lock(&pfdev->js->job_lock);
482 job = pfdev->jobs[j];
483
484 if (job) {
485 pfdev->jobs[j] = NULL;
486
487 panfrost_mmu_as_put(pfdev, &job->file_priv->mmu);
488 panfrost_devfreq_record_transition(pfdev, j);
489
490 dma_fence_signal_locked(job->done_fence);
491 pm_runtime_put_autosuspend(pfdev->dev);
492 }
493 spin_unlock(&pfdev->js->job_lock);
494 }
495
496 status &= ~mask;
497 }
498
499 return IRQ_HANDLED;
500 }
501
502 int panfrost_job_init(struct panfrost_device *pfdev)
503 {
504 struct panfrost_job_slot *js;
505 int ret, j, irq;
506
507 pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL);
508 if (!js)
509 return -ENOMEM;
510
511 spin_lock_init(&js->job_lock);
512
513 irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "job");
514 if (irq <= 0)
515 return -ENODEV;
516
517 ret = devm_request_irq(pfdev->dev, irq, panfrost_job_irq_handler,
518 IRQF_SHARED, "job", pfdev);
519 if (ret) {
520 dev_err(pfdev->dev, "failed to request job irq");
521 return ret;
522 }
523
524 for (j = 0; j < NUM_JOB_SLOTS; j++) {
525 js->queue[j].fence_context = dma_fence_context_alloc(1);
526
527 ret = drm_sched_init(&js->queue[j].sched,
528 &panfrost_sched_ops,
529 1, 0, msecs_to_jiffies(500),
530 "pan_js");
531 if (ret) {
532 dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret);
533 goto err_sched;
534 }
535 }
536
537 panfrost_job_enable_interrupts(pfdev);
538
539 return 0;
540
541 err_sched:
542 for (j--; j >= 0; j--)
543 drm_sched_fini(&js->queue[j].sched);
544
545 return ret;
546 }
547
548 void panfrost_job_fini(struct panfrost_device *pfdev)
549 {
550 struct panfrost_job_slot *js = pfdev->js;
551 int j;
552
553 job_write(pfdev, JOB_INT_MASK, 0);
554
555 for (j = 0; j < NUM_JOB_SLOTS; j++)
556 drm_sched_fini(&js->queue[j].sched);
557
558 }
559
560 int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
561 {
562 struct panfrost_device *pfdev = panfrost_priv->pfdev;
563 struct panfrost_job_slot *js = pfdev->js;
564 struct drm_sched_rq *rq;
565 int ret, i;
566
567 for (i = 0; i < NUM_JOB_SLOTS; i++) {
568 rq = &js->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
569 ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i], &rq, 1, NULL);
570 if (WARN_ON(ret))
571 return ret;
572 }
573 return 0;
574 }
575
576 void panfrost_job_close(struct panfrost_file_priv *panfrost_priv)
577 {
578 int i;
579
580 for (i = 0; i < NUM_JOB_SLOTS; i++)
581 drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]);
582 }
583
584 int panfrost_job_is_idle(struct panfrost_device *pfdev)
585 {
586 struct panfrost_job_slot *js = pfdev->js;
587 int i;
588
589 for (i = 0; i < NUM_JOB_SLOTS; i++) {
590
591 if (atomic_read(&js->queue[i].sched.hw_rq_count))
592 return false;
593
594
595 if (pfdev->devfreq.slot[i].busy)
596 return false;
597 }
598
599 return true;
600 }