This source file includes following definitions.
- gen8_guc_raise_irq
- gen11_guc_raise_irq
- guc_send_reg
- intel_guc_init_send_regs
- intel_guc_init_early
- guc_shared_data_create
- guc_shared_data_destroy
- guc_ctl_debug_flags
- guc_ctl_feature_flags
- guc_ctl_ctxinfo_flags
- guc_ctl_log_params_flags
- guc_ctl_ads_flags
- guc_init_params
- intel_guc_write_params
- intel_guc_init
- intel_guc_fini
- intel_guc_send_nop
- intel_guc_to_host_event_handler_nop
- intel_guc_send_mmio
- intel_guc_to_host_process_recv_msg
- intel_guc_sample_forcewake
- intel_guc_auth_huc
- intel_guc_suspend
- intel_guc_reset_engine
- intel_guc_resume
- intel_guc_allocate_vma
1
2
3
4
5
6 #include "gt/intel_gt.h"
7 #include "intel_guc.h"
8 #include "intel_guc_ads.h"
9 #include "intel_guc_submission.h"
10 #include "i915_drv.h"
11
12 static void gen8_guc_raise_irq(struct intel_guc *guc)
13 {
14 struct intel_gt *gt = guc_to_gt(guc);
15
16 intel_uncore_write(gt->uncore, GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
17 }
18
19 static void gen11_guc_raise_irq(struct intel_guc *guc)
20 {
21 struct intel_gt *gt = guc_to_gt(guc);
22
23 intel_uncore_write(gt->uncore, GEN11_GUC_HOST_INTERRUPT, 0);
24 }
25
26 static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
27 {
28 GEM_BUG_ON(!guc->send_regs.base);
29 GEM_BUG_ON(!guc->send_regs.count);
30 GEM_BUG_ON(i >= guc->send_regs.count);
31
32 return _MMIO(guc->send_regs.base + 4 * i);
33 }
34
35 void intel_guc_init_send_regs(struct intel_guc *guc)
36 {
37 struct intel_gt *gt = guc_to_gt(guc);
38 enum forcewake_domains fw_domains = 0;
39 unsigned int i;
40
41 if (INTEL_GEN(gt->i915) >= 11) {
42 guc->send_regs.base =
43 i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
44 guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT;
45 } else {
46 guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
47 guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
48 BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
49 }
50
51 for (i = 0; i < guc->send_regs.count; i++) {
52 fw_domains |= intel_uncore_forcewake_for_reg(gt->uncore,
53 guc_send_reg(guc, i),
54 FW_REG_READ | FW_REG_WRITE);
55 }
56 guc->send_regs.fw_domains = fw_domains;
57 }
58
59 void intel_guc_init_early(struct intel_guc *guc)
60 {
61 struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
62
63 intel_guc_fw_init_early(guc);
64 intel_guc_ct_init_early(&guc->ct);
65 intel_guc_log_init_early(&guc->log);
66 intel_guc_submission_init_early(guc);
67
68 mutex_init(&guc->send_mutex);
69 spin_lock_init(&guc->irq_lock);
70 guc->send = intel_guc_send_nop;
71 guc->handler = intel_guc_to_host_event_handler_nop;
72 if (INTEL_GEN(i915) >= 11) {
73 guc->notify = gen11_guc_raise_irq;
74 guc->interrupts.reset = gen11_reset_guc_interrupts;
75 guc->interrupts.enable = gen11_enable_guc_interrupts;
76 guc->interrupts.disable = gen11_disable_guc_interrupts;
77 } else {
78 guc->notify = gen8_guc_raise_irq;
79 guc->interrupts.reset = gen9_reset_guc_interrupts;
80 guc->interrupts.enable = gen9_enable_guc_interrupts;
81 guc->interrupts.disable = gen9_disable_guc_interrupts;
82 }
83 }
84
85 static int guc_shared_data_create(struct intel_guc *guc)
86 {
87 struct i915_vma *vma;
88 void *vaddr;
89
90 vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
91 if (IS_ERR(vma))
92 return PTR_ERR(vma);
93
94 vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
95 if (IS_ERR(vaddr)) {
96 i915_vma_unpin_and_release(&vma, 0);
97 return PTR_ERR(vaddr);
98 }
99
100 guc->shared_data = vma;
101 guc->shared_data_vaddr = vaddr;
102
103 return 0;
104 }
105
106 static void guc_shared_data_destroy(struct intel_guc *guc)
107 {
108 i915_vma_unpin_and_release(&guc->shared_data, I915_VMA_RELEASE_MAP);
109 }
110
111 static u32 guc_ctl_debug_flags(struct intel_guc *guc)
112 {
113 u32 level = intel_guc_log_get_level(&guc->log);
114 u32 flags = 0;
115
116 if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
117 flags |= GUC_LOG_DISABLED;
118 else
119 flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
120 GUC_LOG_VERBOSITY_SHIFT;
121
122 return flags;
123 }
124
125 static u32 guc_ctl_feature_flags(struct intel_guc *guc)
126 {
127 u32 flags = 0;
128
129 if (!intel_guc_is_submission_supported(guc))
130 flags |= GUC_CTL_DISABLE_SCHEDULER;
131
132 return flags;
133 }
134
135 static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc)
136 {
137 u32 flags = 0;
138
139 if (intel_guc_is_submission_supported(guc)) {
140 u32 ctxnum, base;
141
142 base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool);
143 ctxnum = GUC_MAX_STAGE_DESCRIPTORS / 16;
144
145 base >>= PAGE_SHIFT;
146 flags |= (base << GUC_CTL_BASE_ADDR_SHIFT) |
147 (ctxnum << GUC_CTL_CTXNUM_IN16_SHIFT);
148 }
149 return flags;
150 }
151
152 static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
153 {
154 u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT;
155 u32 flags;
156
157 #if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
158 #define UNIT SZ_1M
159 #define FLAG GUC_LOG_ALLOC_IN_MEGABYTE
160 #else
161 #define UNIT SZ_4K
162 #define FLAG 0
163 #endif
164
165 BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
166 BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, UNIT));
167 BUILD_BUG_ON(!DPC_BUFFER_SIZE);
168 BUILD_BUG_ON(!IS_ALIGNED(DPC_BUFFER_SIZE, UNIT));
169 BUILD_BUG_ON(!ISR_BUFFER_SIZE);
170 BUILD_BUG_ON(!IS_ALIGNED(ISR_BUFFER_SIZE, UNIT));
171
172 BUILD_BUG_ON((CRASH_BUFFER_SIZE / UNIT - 1) >
173 (GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
174 BUILD_BUG_ON((DPC_BUFFER_SIZE / UNIT - 1) >
175 (GUC_LOG_DPC_MASK >> GUC_LOG_DPC_SHIFT));
176 BUILD_BUG_ON((ISR_BUFFER_SIZE / UNIT - 1) >
177 (GUC_LOG_ISR_MASK >> GUC_LOG_ISR_SHIFT));
178
179 flags = GUC_LOG_VALID |
180 GUC_LOG_NOTIFY_ON_HALF_FULL |
181 FLAG |
182 ((CRASH_BUFFER_SIZE / UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
183 ((DPC_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DPC_SHIFT) |
184 ((ISR_BUFFER_SIZE / UNIT - 1) << GUC_LOG_ISR_SHIFT) |
185 (offset << GUC_LOG_BUF_ADDR_SHIFT);
186
187 #undef UNIT
188 #undef FLAG
189
190 return flags;
191 }
192
193 static u32 guc_ctl_ads_flags(struct intel_guc *guc)
194 {
195 u32 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
196 u32 flags = ads << GUC_ADS_ADDR_SHIFT;
197
198 return flags;
199 }
200
201
202
203
204
205
206 static void guc_init_params(struct intel_guc *guc)
207 {
208 u32 *params = guc->params;
209 int i;
210
211 BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
212
213 params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc);
214 params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
215 params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
216 params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
217 params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
218
219 for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
220 DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);
221 }
222
223
224
225
226
227
228 void intel_guc_write_params(struct intel_guc *guc)
229 {
230 struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
231 int i;
232
233
234
235
236
237
238 intel_uncore_forcewake_get(uncore, FORCEWAKE_BLITTER);
239
240 intel_uncore_write(uncore, SOFT_SCRATCH(0), 0);
241
242 for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
243 intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), guc->params[i]);
244
245 intel_uncore_forcewake_put(uncore, FORCEWAKE_BLITTER);
246 }
247
248 int intel_guc_init(struct intel_guc *guc)
249 {
250 struct intel_gt *gt = guc_to_gt(guc);
251 int ret;
252
253 ret = intel_uc_fw_init(&guc->fw);
254 if (ret)
255 goto err_fetch;
256
257 ret = guc_shared_data_create(guc);
258 if (ret)
259 goto err_fw;
260 GEM_BUG_ON(!guc->shared_data);
261
262 ret = intel_guc_log_create(&guc->log);
263 if (ret)
264 goto err_shared;
265
266 ret = intel_guc_ads_create(guc);
267 if (ret)
268 goto err_log;
269 GEM_BUG_ON(!guc->ads_vma);
270
271 ret = intel_guc_ct_init(&guc->ct);
272 if (ret)
273 goto err_ads;
274
275 if (intel_guc_is_submission_supported(guc)) {
276
277
278
279
280 ret = intel_guc_submission_init(guc);
281 if (ret)
282 goto err_ct;
283 }
284
285
286 guc_init_params(guc);
287
288
289 i915_ggtt_enable_guc(gt->ggtt);
290
291 return 0;
292
293 err_ct:
294 intel_guc_ct_fini(&guc->ct);
295 err_ads:
296 intel_guc_ads_destroy(guc);
297 err_log:
298 intel_guc_log_destroy(&guc->log);
299 err_shared:
300 guc_shared_data_destroy(guc);
301 err_fw:
302 intel_uc_fw_fini(&guc->fw);
303 err_fetch:
304 intel_uc_fw_cleanup_fetch(&guc->fw);
305 DRM_DEV_DEBUG_DRIVER(gt->i915->drm.dev, "failed with %d\n", ret);
306 return ret;
307 }
308
309 void intel_guc_fini(struct intel_guc *guc)
310 {
311 struct intel_gt *gt = guc_to_gt(guc);
312
313 if (!intel_uc_fw_is_available(&guc->fw))
314 return;
315
316 i915_ggtt_disable_guc(gt->ggtt);
317
318 if (intel_guc_is_submission_supported(guc))
319 intel_guc_submission_fini(guc);
320
321 intel_guc_ct_fini(&guc->ct);
322
323 intel_guc_ads_destroy(guc);
324 intel_guc_log_destroy(&guc->log);
325 guc_shared_data_destroy(guc);
326 intel_uc_fw_fini(&guc->fw);
327 intel_uc_fw_cleanup_fetch(&guc->fw);
328 }
329
330 int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
331 u32 *response_buf, u32 response_buf_size)
332 {
333 WARN(1, "Unexpected send: action=%#x\n", *action);
334 return -ENODEV;
335 }
336
337 void intel_guc_to_host_event_handler_nop(struct intel_guc *guc)
338 {
339 WARN(1, "Unexpected event: no suitable handler\n");
340 }
341
342
343
344
345 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
346 u32 *response_buf, u32 response_buf_size)
347 {
348 struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
349 u32 status;
350 int i;
351 int ret;
352
353 GEM_BUG_ON(!len);
354 GEM_BUG_ON(len > guc->send_regs.count);
355
356
357 GEM_BUG_ON(*action & ~INTEL_GUC_MSG_CODE_MASK);
358
359
360 GEM_BUG_ON(*action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
361 *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);
362
363 mutex_lock(&guc->send_mutex);
364 intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains);
365
366 for (i = 0; i < len; i++)
367 intel_uncore_write(uncore, guc_send_reg(guc, i), action[i]);
368
369 intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1));
370
371 intel_guc_notify(guc);
372
373
374
375
376
377 ret = __intel_wait_for_register_fw(uncore,
378 guc_send_reg(guc, 0),
379 INTEL_GUC_MSG_TYPE_MASK,
380 INTEL_GUC_MSG_TYPE_RESPONSE <<
381 INTEL_GUC_MSG_TYPE_SHIFT,
382 10, 10, &status);
383
384 if (!ret && !INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(status))
385 ret = -EIO;
386
387 if (ret) {
388 DRM_ERROR("MMIO: GuC action %#x failed with error %d %#x\n",
389 action[0], ret, status);
390 goto out;
391 }
392
393 if (response_buf) {
394 int count = min(response_buf_size, guc->send_regs.count - 1);
395
396 for (i = 0; i < count; i++)
397 response_buf[i] = intel_uncore_read(uncore,
398 guc_send_reg(guc, i + 1));
399 }
400
401
402 ret = INTEL_GUC_MSG_TO_DATA(status);
403
404 out:
405 intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains);
406 mutex_unlock(&guc->send_mutex);
407
408 return ret;
409 }
410
411 int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
412 const u32 *payload, u32 len)
413 {
414 u32 msg;
415
416 if (unlikely(!len))
417 return -EPROTO;
418
419
420 msg = payload[0] & guc->msg_enabled_mask;
421
422 if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
423 INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED))
424 intel_guc_log_handle_flush_event(&guc->log);
425
426 return 0;
427 }
428
429 int intel_guc_sample_forcewake(struct intel_guc *guc)
430 {
431 struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
432 u32 action[2];
433
434 action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
435
436 if (!HAS_RC6(dev_priv) || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
437 action[1] = 0;
438 else
439
440 action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;
441
442 return intel_guc_send(guc, action, ARRAY_SIZE(action));
443 }
444
445
446
447
448
449
450
451
452
453
454
455
456 int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
457 {
458 u32 action[] = {
459 INTEL_GUC_ACTION_AUTHENTICATE_HUC,
460 rsa_offset
461 };
462
463 return intel_guc_send(guc, action, ARRAY_SIZE(action));
464 }
465
466
467
468
469
470 int intel_guc_suspend(struct intel_guc *guc)
471 {
472 struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
473 int ret;
474 u32 status;
475 u32 action[] = {
476 INTEL_GUC_ACTION_ENTER_S_STATE,
477 GUC_POWER_D1,
478 };
479
480
481
482
483
484
485
486
487
488
489
490 intel_uncore_write(uncore, SOFT_SCRATCH(14),
491 INTEL_GUC_SLEEP_STATE_INVALID_MASK);
492
493 ret = intel_guc_send(guc, action, ARRAY_SIZE(action));
494 if (ret)
495 return ret;
496
497 ret = __intel_wait_for_register(uncore, SOFT_SCRATCH(14),
498 INTEL_GUC_SLEEP_STATE_INVALID_MASK,
499 0, 0, 10, &status);
500 if (ret)
501 return ret;
502
503 if (status != INTEL_GUC_SLEEP_STATE_SUCCESS) {
504 DRM_ERROR("GuC failed to change sleep state. "
505 "action=0x%x, err=%u\n",
506 action[0], status);
507 return -EIO;
508 }
509
510 return 0;
511 }
512
513
514
515
516
517
518 int intel_guc_reset_engine(struct intel_guc *guc,
519 struct intel_engine_cs *engine)
520 {
521 u32 data[7];
522
523 GEM_BUG_ON(!guc->execbuf_client);
524
525 data[0] = INTEL_GUC_ACTION_REQUEST_ENGINE_RESET;
526 data[1] = engine->guc_id;
527 data[2] = 0;
528 data[3] = 0;
529 data[4] = 0;
530 data[5] = guc->execbuf_client->stage_id;
531 data[6] = intel_guc_ggtt_offset(guc, guc->shared_data);
532
533 return intel_guc_send(guc, data, ARRAY_SIZE(data));
534 }
535
536
537
538
539
540 int intel_guc_resume(struct intel_guc *guc)
541 {
542 u32 action[] = {
543 INTEL_GUC_ACTION_EXIT_S_STATE,
544 GUC_POWER_D0,
545 };
546
547 return intel_guc_send(guc, action, ARRAY_SIZE(action));
548 }
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
592 {
593 struct intel_gt *gt = guc_to_gt(guc);
594 struct drm_i915_gem_object *obj;
595 struct i915_vma *vma;
596 u64 flags;
597 int ret;
598
599 obj = i915_gem_object_create_shmem(gt->i915, size);
600 if (IS_ERR(obj))
601 return ERR_CAST(obj);
602
603 vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
604 if (IS_ERR(vma))
605 goto err;
606
607 flags = PIN_GLOBAL | PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
608 ret = i915_vma_pin(vma, 0, 0, flags);
609 if (ret) {
610 vma = ERR_PTR(ret);
611 goto err;
612 }
613
614 return i915_vma_make_unshrinkable(vma);
615
616 err:
617 i915_gem_object_put(obj);
618 return vma;
619 }