1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2019 Intel Corporation
5 */
6
7 #ifndef __INTEL_ENGINE_TYPES__
8 #define __INTEL_ENGINE_TYPES__
9
10 #include <linux/hashtable.h>
11 #include <linux/irq_work.h>
12 #include <linux/kref.h>
13 #include <linux/list.h>
14 #include <linux/llist.h>
15 #include <linux/rbtree.h>
16 #include <linux/timer.h>
17 #include <linux/types.h>
18
19 #include "i915_gem.h"
20 #include "i915_pmu.h"
21 #include "i915_priolist_types.h"
22 #include "i915_selftest.h"
23 #include "intel_engine_pool_types.h"
24 #include "intel_sseu.h"
25 #include "intel_timeline_types.h"
26 #include "intel_wakeref.h"
27 #include "intel_workarounds_types.h"
28
29 /* Legacy HW Engine ID */
30
31 #define RCS0_HW 0
32 #define VCS0_HW 1
33 #define BCS0_HW 2
34 #define VECS0_HW 3
35 #define VCS1_HW 4
36 #define VCS2_HW 6
37 #define VCS3_HW 7
38 #define VECS1_HW 12
39
40 /* Gen11+ HW Engine class + instance */
41 #define RENDER_CLASS 0
42 #define VIDEO_DECODE_CLASS 1
43 #define VIDEO_ENHANCEMENT_CLASS 2
44 #define COPY_ENGINE_CLASS 3
45 #define OTHER_CLASS 4
46 #define MAX_ENGINE_CLASS 4
47 #define MAX_ENGINE_INSTANCE 3
48
49 #define I915_MAX_SLICES 3
50 #define I915_MAX_SUBSLICES 8
51
52 #define I915_CMD_HASH_ORDER 9
53
54 struct dma_fence;
55 struct drm_i915_gem_object;
56 struct drm_i915_reg_table;
57 struct i915_gem_context;
58 struct i915_request;
59 struct i915_sched_attr;
60 struct intel_gt;
61 struct intel_uncore;
62
63 typedef u8 intel_engine_mask_t;
64 #define ALL_ENGINES ((intel_engine_mask_t)~0ul)
65
66 struct intel_hw_status_page {
67 struct i915_vma *vma;
68 u32 *addr;
69 };
70
71 struct intel_instdone {
72 u32 instdone;
73 /* The following exist only in the RCS engine */
74 u32 slice_common;
75 u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
76 u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
77 };
78
79 struct intel_engine_hangcheck {
80 u64 acthd;
81 u32 last_ring;
82 u32 last_head;
83 unsigned long action_timestamp;
84 struct intel_instdone instdone;
85 };
86
87 struct intel_ring {
88 struct kref ref;
89 struct i915_vma *vma;
90 void *vaddr;
91
92 /*
93 * As we have two types of rings, one global to the engine used
94 * by ringbuffer submission and those that are exclusive to a
95 * context used by execlists, we have to play safe and allow
96 * atomic updates to the pin_count. However, the actual pinning
97 * of the context is either done during initialisation for
98 * ringbuffer submission or serialised as part of the context
99 * pinning for execlists, and so we do not need a mutex ourselves
100 * to serialise intel_ring_pin/intel_ring_unpin.
101 */
102 atomic_t pin_count;
103
104 u32 head;
105 u32 tail;
106 u32 emit;
107
108 u32 space;
109 u32 size;
110 u32 wrap;
111 u32 effective_size;
112 };
113
114 /*
115 * we use a single page to load ctx workarounds so all of these
116 * values are referred in terms of dwords
117 *
118 * struct i915_wa_ctx_bb:
119 * offset: specifies batch starting position, also helpful in case
120 * if we want to have multiple batches at different offsets based on
121 * some criteria. It is not a requirement at the moment but provides
122 * an option for future use.
123 * size: size of the batch in DWORDS
124 */
125 struct i915_ctx_workarounds {
126 struct i915_wa_ctx_bb {
127 u32 offset;
128 u32 size;
129 } indirect_ctx, per_ctx;
130 struct i915_vma *vma;
131 };
132
133 #define I915_MAX_VCS 4
134 #define I915_MAX_VECS 2
135
136 /*
137 * Engine IDs definitions.
138 * Keep instances of the same type engine together.
139 */
140 enum intel_engine_id {
141 RCS0 = 0,
142 BCS0,
143 VCS0,
144 VCS1,
145 VCS2,
146 VCS3,
147 #define _VCS(n) (VCS0 + (n))
148 VECS0,
149 VECS1,
150 #define _VECS(n) (VECS0 + (n))
151 I915_NUM_ENGINES
152 };
153
154 struct st_preempt_hang {
155 struct completion completion;
156 unsigned int count;
157 bool inject_hang;
158 };
159
160 /**
161 * struct intel_engine_execlists - execlist submission queue and port state
162 *
163 * The struct intel_engine_execlists represents the combined logical state of
164 * driver and the hardware state for execlist mode of submission.
165 */
166 struct intel_engine_execlists {
167 /**
168 * @tasklet: softirq tasklet for bottom handler
169 */
170 struct tasklet_struct tasklet;
171
172 /**
173 * @timer: kick the current context if its timeslice expires
174 */
175 struct timer_list timer;
176
177 /**
178 * @default_priolist: priority list for I915_PRIORITY_NORMAL
179 */
180 struct i915_priolist default_priolist;
181
182 /**
183 * @no_priolist: priority lists disabled
184 */
185 bool no_priolist;
186
187 /**
188 * @submit_reg: gen-specific execlist submission register
189 * set to the ExecList Submission Port (elsp) register pre-Gen11 and to
190 * the ExecList Submission Queue Contents register array for Gen11+
191 */
192 u32 __iomem *submit_reg;
193
194 /**
195 * @ctrl_reg: the enhanced execlists control register, used to load the
196 * submit queue on the HW and to request preemptions to idle
197 */
198 u32 __iomem *ctrl_reg;
199
200 #define EXECLIST_MAX_PORTS 2
201 /**
202 * @active: the currently known context executing on HW
203 */
204 struct i915_request * const *active;
205 /**
206 * @inflight: the set of contexts submitted and acknowleged by HW
207 *
208 * The set of inflight contexts is managed by reading CS events
209 * from the HW. On a context-switch event (not preemption), we
210 * know the HW has transitioned from port0 to port1, and we
211 * advance our inflight/active tracking accordingly.
212 */
213 struct i915_request *inflight[EXECLIST_MAX_PORTS + 1 /* sentinel */];
214 /**
215 * @pending: the next set of contexts submitted to ELSP
216 *
217 * We store the array of contexts that we submit to HW (via ELSP) and
218 * promote them to the inflight array once HW has signaled the
219 * preemption or idle-to-active event.
220 */
221 struct i915_request *pending[EXECLIST_MAX_PORTS + 1];
222
223 /**
224 * @port_mask: number of execlist ports - 1
225 */
226 unsigned int port_mask;
227
228 /**
229 * @switch_priority_hint: Second context priority.
230 *
231 * We submit multiple contexts to the HW simultaneously and would
232 * like to occasionally switch between them to emulate timeslicing.
233 * To know when timeslicing is suitable, we track the priority of
234 * the context submitted second.
235 */
236 int switch_priority_hint;
237
238 /**
239 * @queue_priority_hint: Highest pending priority.
240 *
241 * When we add requests into the queue, or adjust the priority of
242 * executing requests, we compute the maximum priority of those
243 * pending requests. We can then use this value to determine if
244 * we need to preempt the executing requests to service the queue.
245 * However, since the we may have recorded the priority of an inflight
246 * request we wanted to preempt but since completed, at the time of
247 * dequeuing the priority hint may no longer may match the highest
248 * available request priority.
249 */
250 int queue_priority_hint;
251
252 /**
253 * @queue: queue of requests, in priority lists
254 */
255 struct rb_root_cached queue;
256 struct rb_root_cached virtual;
257
258 /**
259 * @csb_write: control register for Context Switch buffer
260 *
261 * Note this register may be either mmio or HWSP shadow.
262 */
263 u32 *csb_write;
264
265 /**
266 * @csb_status: status array for Context Switch buffer
267 *
268 * Note these register may be either mmio or HWSP shadow.
269 */
270 u32 *csb_status;
271
272 /**
273 * @csb_size: context status buffer FIFO size
274 */
275 u8 csb_size;
276
277 /**
278 * @csb_head: context status buffer head
279 */
280 u8 csb_head;
281
282 I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;)
283 };
284
285 #define INTEL_ENGINE_CS_MAX_NAME 8
286
287 struct intel_engine_cs {
288 struct drm_i915_private *i915;
289 struct intel_gt *gt;
290 struct intel_uncore *uncore;
291 char name[INTEL_ENGINE_CS_MAX_NAME];
292
293 enum intel_engine_id id;
294 enum intel_engine_id legacy_idx;
295
296 unsigned int hw_id;
297 unsigned int guc_id;
298
299 intel_engine_mask_t mask;
300
301 u8 class;
302 u8 instance;
303
304 u16 uabi_class;
305 u16 uabi_instance;
306
307 u32 context_size;
308 u32 mmio_base;
309
310 u32 uabi_capabilities;
311
312 struct rb_node uabi_node;
313
314 struct intel_sseu sseu;
315
316 struct {
317 spinlock_t lock;
318 struct list_head requests;
319 } active;
320
321 struct llist_head barrier_tasks;
322
323 struct intel_context *kernel_context; /* pinned */
324
325 intel_engine_mask_t saturated; /* submitting semaphores too late? */
326
327 unsigned long serial;
328
329 unsigned long wakeref_serial;
330 struct intel_wakeref wakeref;
331 struct drm_i915_gem_object *default_state;
332 void *pinned_default_state;
333
334 struct {
335 struct intel_ring *ring;
336 struct intel_timeline *timeline;
337 } legacy;
338
339 /* Rather than have every client wait upon all user interrupts,
340 * with the herd waking after every interrupt and each doing the
341 * heavyweight seqno dance, we delegate the task (of being the
342 * bottom-half of the user interrupt) to the first client. After
343 * every interrupt, we wake up one client, who does the heavyweight
344 * coherent seqno read and either goes back to sleep (if incomplete),
345 * or wakes up all the completed clients in parallel, before then
346 * transferring the bottom-half status to the next client in the queue.
347 *
348 * Compared to walking the entire list of waiters in a single dedicated
349 * bottom-half, we reduce the latency of the first waiter by avoiding
350 * a context switch, but incur additional coherent seqno reads when
351 * following the chain of request breadcrumbs. Since it is most likely
352 * that we have a single client waiting on each seqno, then reducing
353 * the overhead of waking that client is much preferred.
354 */
355 struct intel_breadcrumbs {
356 spinlock_t irq_lock;
357 struct list_head signalers;
358
359 struct irq_work irq_work; /* for use from inside irq_lock */
360
361 unsigned int irq_enabled;
362
363 bool irq_armed;
364 } breadcrumbs;
365
366 struct intel_engine_pmu {
367 /**
368 * @enable: Bitmask of enable sample events on this engine.
369 *
370 * Bits correspond to sample event types, for instance
371 * I915_SAMPLE_QUEUED is bit 0 etc.
372 */
373 u32 enable;
374 /**
375 * @enable_count: Reference count for the enabled samplers.
376 *
377 * Index number corresponds to @enum drm_i915_pmu_engine_sample.
378 */
379 unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT];
380 /**
381 * @sample: Counter values for sampling events.
382 *
383 * Our internal timer stores the current counters in this field.
384 *
385 * Index number corresponds to @enum drm_i915_pmu_engine_sample.
386 */
387 struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT];
388 } pmu;
389
390 /*
391 * A pool of objects to use as shadow copies of client batch buffers
392 * when the command parser is enabled. Prevents the client from
393 * modifying the batch contents after software parsing.
394 */
395 struct intel_engine_pool pool;
396
397 struct intel_hw_status_page status_page;
398 struct i915_ctx_workarounds wa_ctx;
399 struct i915_wa_list ctx_wa_list;
400 struct i915_wa_list wa_list;
401 struct i915_wa_list whitelist;
402
403 u32 irq_keep_mask; /* always keep these interrupts */
404 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
405 void (*irq_enable)(struct intel_engine_cs *engine);
406 void (*irq_disable)(struct intel_engine_cs *engine);
407
408 int (*resume)(struct intel_engine_cs *engine);
409
410 struct {
411 void (*prepare)(struct intel_engine_cs *engine);
412 void (*reset)(struct intel_engine_cs *engine, bool stalled);
413 void (*finish)(struct intel_engine_cs *engine);
414 } reset;
415
416 void (*park)(struct intel_engine_cs *engine);
417 void (*unpark)(struct intel_engine_cs *engine);
418
419 void (*set_default_submission)(struct intel_engine_cs *engine);
420
421 const struct intel_context_ops *cops;
422
423 int (*request_alloc)(struct i915_request *rq);
424
425 int (*emit_flush)(struct i915_request *request, u32 mode);
426 #define EMIT_INVALIDATE BIT(0)
427 #define EMIT_FLUSH BIT(1)
428 #define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
429 int (*emit_bb_start)(struct i915_request *rq,
430 u64 offset, u32 length,
431 unsigned int dispatch_flags);
432 #define I915_DISPATCH_SECURE BIT(0)
433 #define I915_DISPATCH_PINNED BIT(1)
434 int (*emit_init_breadcrumb)(struct i915_request *rq);
435 u32 *(*emit_fini_breadcrumb)(struct i915_request *rq,
436 u32 *cs);
437 unsigned int emit_fini_breadcrumb_dw;
438
439 /* Pass the request to the hardware queue (e.g. directly into
440 * the legacy ringbuffer or to the end of an execlist).
441 *
442 * This is called from an atomic context with irqs disabled; must
443 * be irq safe.
444 */
445 void (*submit_request)(struct i915_request *rq);
446
447 /*
448 * Called on signaling of a SUBMIT_FENCE, passing along the signaling
449 * request down to the bonded pairs.
450 */
451 void (*bond_execute)(struct i915_request *rq,
452 struct dma_fence *signal);
453
454 /*
455 * Call when the priority on a request has changed and it and its
456 * dependencies may need rescheduling. Note the request itself may
457 * not be ready to run!
458 */
459 void (*schedule)(struct i915_request *request,
460 const struct i915_sched_attr *attr);
461
462 /*
463 * Cancel all requests on the hardware, or queued for execution.
464 * This should only cancel the ready requests that have been
465 * submitted to the engine (via the engine->submit_request callback).
466 * This is called when marking the device as wedged.
467 */
468 void (*cancel_requests)(struct intel_engine_cs *engine);
469
470 void (*destroy)(struct intel_engine_cs *engine);
471
472 struct intel_engine_execlists execlists;
473
474 /* status_notifier: list of callbacks for context-switch changes */
475 struct atomic_notifier_head context_status_notifier;
476
477 struct intel_engine_hangcheck hangcheck;
478
479 #define I915_ENGINE_USING_CMD_PARSER BIT(0)
480 #define I915_ENGINE_SUPPORTS_STATS BIT(1)
481 #define I915_ENGINE_HAS_PREEMPTION BIT(2)
482 #define I915_ENGINE_HAS_SEMAPHORES BIT(3)
483 #define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4)
484 #define I915_ENGINE_IS_VIRTUAL BIT(5)
485 #define I915_ENGINE_REQUIRES_CMD_PARSER BIT(7)
486 unsigned int flags;
487
488 /*
489 * Table of commands the command parser needs to know about
490 * for this engine.
491 */
492 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
493
494 /*
495 * Table of registers allowed in commands that read/write registers.
496 */
497 const struct drm_i915_reg_table *reg_tables;
498 int reg_table_count;
499
500 /*
501 * Returns the bitmask for the length field of the specified command.
502 * Return 0 for an unrecognized/invalid command.
503 *
504 * If the command parser finds an entry for a command in the engine's
505 * cmd_tables, it gets the command's length based on the table entry.
506 * If not, it calls this function to determine the per-engine length
507 * field encoding for the command (i.e. different opcode ranges use
508 * certain bits to encode the command length in the header).
509 */
510 u32 (*get_cmd_length_mask)(u32 cmd_header);
511
512 struct {
513 /**
514 * @lock: Lock protecting the below fields.
515 */
516 seqlock_t lock;
517 /**
518 * @enabled: Reference count indicating number of listeners.
519 */
520 unsigned int enabled;
521 /**
522 * @active: Number of contexts currently scheduled in.
523 */
524 unsigned int active;
525 /**
526 * @enabled_at: Timestamp when busy stats were enabled.
527 */
528 ktime_t enabled_at;
529 /**
530 * @start: Timestamp of the last idle to active transition.
531 *
532 * Idle is defined as active == 0, active is active > 0.
533 */
534 ktime_t start;
535 /**
536 * @total: Total time this engine was busy.
537 *
538 * Accumulated time not counting the most recent block in cases
539 * where engine is currently busy (active > 0).
540 */
541 ktime_t total;
542 } stats;
543 };
544
545 static inline bool
546 intel_engine_using_cmd_parser(const struct intel_engine_cs *engine)
547 {
548 return engine->flags & I915_ENGINE_USING_CMD_PARSER;
549 }
550
551 static inline bool
552 intel_engine_requires_cmd_parser(const struct intel_engine_cs *engine)
553 {
554 return engine->flags & I915_ENGINE_REQUIRES_CMD_PARSER;
555 }
556
557 static inline bool
558 intel_engine_supports_stats(const struct intel_engine_cs *engine)
559 {
560 return engine->flags & I915_ENGINE_SUPPORTS_STATS;
561 }
562
563 static inline bool
564 intel_engine_has_preemption(const struct intel_engine_cs *engine)
565 {
566 return engine->flags & I915_ENGINE_HAS_PREEMPTION;
567 }
568
569 static inline bool
570 intel_engine_has_semaphores(const struct intel_engine_cs *engine)
571 {
572 return engine->flags & I915_ENGINE_HAS_SEMAPHORES;
573 }
574
575 static inline bool
576 intel_engine_needs_breadcrumb_tasklet(const struct intel_engine_cs *engine)
577 {
578 return engine->flags & I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
579 }
580
581 static inline bool
582 intel_engine_is_virtual(const struct intel_engine_cs *engine)
583 {
584 return engine->flags & I915_ENGINE_IS_VIRTUAL;
585 }
586
587 #define instdone_slice_mask(dev_priv__) \
588 (IS_GEN(dev_priv__, 7) ? \
589 1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask)
590
591 #define instdone_subslice_mask(dev_priv__) \
592 (IS_GEN(dev_priv__, 7) ? \
593 1 : RUNTIME_INFO(dev_priv__)->sseu.subslice_mask[0])
594
595 #define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
596 for ((slice__) = 0, (subslice__) = 0; \
597 (slice__) < I915_MAX_SLICES; \
598 (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
599 (slice__) += ((subslice__) == 0)) \
600 for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
601 (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
602
603 #endif /* __INTEL_ENGINE_TYPES_H__ */