This source file includes following definitions.
- intel_engine_lookup_user
- intel_engine_add_user
- engine_cmp
- get_engines
- sort_engines
- set_scheduler_caps
- intel_engine_class_repr
- legacy_ring_idx
- add_legacy_ring
- intel_engines_driver_register
- intel_engines_has_context_isolation
1
2
3
4
5
6
7 #include <linux/list.h>
8 #include <linux/list_sort.h>
9 #include <linux/llist.h>
10
11 #include "i915_drv.h"
12 #include "intel_engine.h"
13 #include "intel_engine_user.h"
14
15 struct intel_engine_cs *
16 intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
17 {
18 struct rb_node *p = i915->uabi_engines.rb_node;
19
20 while (p) {
21 struct intel_engine_cs *it =
22 rb_entry(p, typeof(*it), uabi_node);
23
24 if (class < it->uabi_class)
25 p = p->rb_left;
26 else if (class > it->uabi_class ||
27 instance > it->uabi_instance)
28 p = p->rb_right;
29 else if (instance < it->uabi_instance)
30 p = p->rb_left;
31 else
32 return it;
33 }
34
35 return NULL;
36 }
37
38 void intel_engine_add_user(struct intel_engine_cs *engine)
39 {
40 llist_add((struct llist_node *)&engine->uabi_node,
41 (struct llist_head *)&engine->i915->uabi_engines);
42 }
43
44 static const u8 uabi_classes[] = {
45 [RENDER_CLASS] = I915_ENGINE_CLASS_RENDER,
46 [COPY_ENGINE_CLASS] = I915_ENGINE_CLASS_COPY,
47 [VIDEO_DECODE_CLASS] = I915_ENGINE_CLASS_VIDEO,
48 [VIDEO_ENHANCEMENT_CLASS] = I915_ENGINE_CLASS_VIDEO_ENHANCE,
49 };
50
51 static int engine_cmp(void *priv, struct list_head *A, struct list_head *B)
52 {
53 const struct intel_engine_cs *a =
54 container_of((struct rb_node *)A, typeof(*a), uabi_node);
55 const struct intel_engine_cs *b =
56 container_of((struct rb_node *)B, typeof(*b), uabi_node);
57
58 if (uabi_classes[a->class] < uabi_classes[b->class])
59 return -1;
60 if (uabi_classes[a->class] > uabi_classes[b->class])
61 return 1;
62
63 if (a->instance < b->instance)
64 return -1;
65 if (a->instance > b->instance)
66 return 1;
67
68 return 0;
69 }
70
71 static struct llist_node *get_engines(struct drm_i915_private *i915)
72 {
73 return llist_del_all((struct llist_head *)&i915->uabi_engines);
74 }
75
76 static void sort_engines(struct drm_i915_private *i915,
77 struct list_head *engines)
78 {
79 struct llist_node *pos, *next;
80
81 llist_for_each_safe(pos, next, get_engines(i915)) {
82 struct intel_engine_cs *engine =
83 container_of((struct rb_node *)pos, typeof(*engine),
84 uabi_node);
85 list_add((struct list_head *)&engine->uabi_node, engines);
86 }
87 list_sort(NULL, engines, engine_cmp);
88 }
89
90 static void set_scheduler_caps(struct drm_i915_private *i915)
91 {
92 static const struct {
93 u8 engine;
94 u8 sched;
95 } map[] = {
96 #define MAP(x, y) { ilog2(I915_ENGINE_##x), ilog2(I915_SCHEDULER_CAP_##y) }
97 MAP(HAS_PREEMPTION, PREEMPTION),
98 MAP(HAS_SEMAPHORES, SEMAPHORES),
99 MAP(SUPPORTS_STATS, ENGINE_BUSY_STATS),
100 #undef MAP
101 };
102 struct intel_engine_cs *engine;
103 u32 enabled, disabled;
104
105 enabled = 0;
106 disabled = 0;
107 for_each_uabi_engine(engine, i915) {
108 int i;
109
110 if (engine->schedule)
111 enabled |= (I915_SCHEDULER_CAP_ENABLED |
112 I915_SCHEDULER_CAP_PRIORITY);
113 else
114 disabled |= (I915_SCHEDULER_CAP_ENABLED |
115 I915_SCHEDULER_CAP_PRIORITY);
116
117 for (i = 0; i < ARRAY_SIZE(map); i++) {
118 if (engine->flags & BIT(map[i].engine))
119 enabled |= BIT(map[i].sched);
120 else
121 disabled |= BIT(map[i].sched);
122 }
123 }
124
125 i915->caps.scheduler = enabled & ~disabled;
126 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED))
127 i915->caps.scheduler = 0;
128 }
129
130 const char *intel_engine_class_repr(u8 class)
131 {
132 static const char * const uabi_names[] = {
133 [RENDER_CLASS] = "rcs",
134 [COPY_ENGINE_CLASS] = "bcs",
135 [VIDEO_DECODE_CLASS] = "vcs",
136 [VIDEO_ENHANCEMENT_CLASS] = "vecs",
137 };
138
139 if (class >= ARRAY_SIZE(uabi_names) || !uabi_names[class])
140 return "xxx";
141
142 return uabi_names[class];
143 }
144
145 struct legacy_ring {
146 struct intel_gt *gt;
147 u8 class;
148 u8 instance;
149 };
150
151 static int legacy_ring_idx(const struct legacy_ring *ring)
152 {
153 static const struct {
154 u8 base, max;
155 } map[] = {
156 [RENDER_CLASS] = { RCS0, 1 },
157 [COPY_ENGINE_CLASS] = { BCS0, 1 },
158 [VIDEO_DECODE_CLASS] = { VCS0, I915_MAX_VCS },
159 [VIDEO_ENHANCEMENT_CLASS] = { VECS0, I915_MAX_VECS },
160 };
161
162 if (GEM_DEBUG_WARN_ON(ring->class >= ARRAY_SIZE(map)))
163 return -1;
164
165 if (GEM_DEBUG_WARN_ON(ring->instance >= map[ring->class].max))
166 return -1;
167
168 return map[ring->class].base + ring->instance;
169 }
170
171 static void add_legacy_ring(struct legacy_ring *ring,
172 struct intel_engine_cs *engine)
173 {
174 int idx;
175
176 if (engine->gt != ring->gt || engine->class != ring->class) {
177 ring->gt = engine->gt;
178 ring->class = engine->class;
179 ring->instance = 0;
180 }
181
182 idx = legacy_ring_idx(ring);
183 if (unlikely(idx == -1))
184 return;
185
186 GEM_BUG_ON(idx >= ARRAY_SIZE(ring->gt->engine));
187 ring->gt->engine[idx] = engine;
188 ring->instance++;
189
190 engine->legacy_idx = idx;
191 }
192
193 void intel_engines_driver_register(struct drm_i915_private *i915)
194 {
195 struct legacy_ring ring = {};
196 u8 uabi_instances[4] = {};
197 struct list_head *it, *next;
198 struct rb_node **p, *prev;
199 LIST_HEAD(engines);
200
201 sort_engines(i915, &engines);
202
203 prev = NULL;
204 p = &i915->uabi_engines.rb_node;
205 list_for_each_safe(it, next, &engines) {
206 struct intel_engine_cs *engine =
207 container_of((struct rb_node *)it, typeof(*engine),
208 uabi_node);
209 char old[sizeof(engine->name)];
210
211 GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes));
212 engine->uabi_class = uabi_classes[engine->class];
213
214 GEM_BUG_ON(engine->uabi_class >= ARRAY_SIZE(uabi_instances));
215 engine->uabi_instance = uabi_instances[engine->uabi_class]++;
216
217
218 memcpy(old, engine->name, sizeof(engine->name));
219 scnprintf(engine->name, sizeof(engine->name), "%s%u",
220 intel_engine_class_repr(engine->class),
221 engine->uabi_instance);
222 DRM_DEBUG_DRIVER("renamed %s to %s\n", old, engine->name);
223
224 rb_link_node(&engine->uabi_node, prev, p);
225 rb_insert_color(&engine->uabi_node, &i915->uabi_engines);
226
227 GEM_BUG_ON(intel_engine_lookup_user(i915,
228 engine->uabi_class,
229 engine->uabi_instance) != engine);
230
231
232 add_legacy_ring(&ring, engine);
233
234 prev = &engine->uabi_node;
235 p = &prev->rb_right;
236 }
237
238 if (IS_ENABLED(CONFIG_DRM_I915_SELFTESTS) &&
239 IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
240 struct intel_engine_cs *engine;
241 unsigned int isolation;
242 int class, inst;
243 int errors = 0;
244
245 for (class = 0; class < ARRAY_SIZE(uabi_instances); class++) {
246 for (inst = 0; inst < uabi_instances[class]; inst++) {
247 engine = intel_engine_lookup_user(i915,
248 class, inst);
249 if (!engine) {
250 pr_err("UABI engine not found for { class:%d, instance:%d }\n",
251 class, inst);
252 errors++;
253 continue;
254 }
255
256 if (engine->uabi_class != class ||
257 engine->uabi_instance != inst) {
258 pr_err("Wrong UABI engine:%s { class:%d, instance:%d } found for { class:%d, instance:%d }\n",
259 engine->name,
260 engine->uabi_class,
261 engine->uabi_instance,
262 class, inst);
263 errors++;
264 continue;
265 }
266 }
267 }
268
269
270
271
272
273 isolation = intel_engines_has_context_isolation(i915);
274 for_each_uabi_engine(engine, i915) {
275 unsigned int bit = BIT(engine->uabi_class);
276 unsigned int expected = engine->default_state ? bit : 0;
277
278 if ((isolation & bit) != expected) {
279 pr_err("mismatching default context state for class %d on engine %s\n",
280 engine->uabi_class, engine->name);
281 errors++;
282 }
283 }
284
285 if (WARN(errors, "Invalid UABI engine mapping found"))
286 i915->uabi_engines = RB_ROOT;
287 }
288
289 set_scheduler_caps(i915);
290 }
291
292 unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
293 {
294 struct intel_engine_cs *engine;
295 unsigned int which;
296
297 which = 0;
298 for_each_uabi_engine(engine, i915)
299 if (engine->default_state)
300 which |= BIT(engine->uabi_class);
301
302 return which;
303 }