This source file includes following definitions.
- __engine_unpark
- __timeline_mark_lock
- __timeline_mark_unlock
- __timeline_mark_lock
- __timeline_mark_unlock
- switch_to_kernel_context
- __engine_park
- intel_engine_init__pm
1
2
3
4
5
6
7 #include "i915_drv.h"
8
9 #include "intel_engine.h"
10 #include "intel_engine_pm.h"
11 #include "intel_engine_pool.h"
12 #include "intel_gt.h"
13 #include "intel_gt_pm.h"
14
15 static int __engine_unpark(struct intel_wakeref *wf)
16 {
17 struct intel_engine_cs *engine =
18 container_of(wf, typeof(*engine), wakeref);
19 void *map;
20
21 GEM_TRACE("%s\n", engine->name);
22
23 intel_gt_pm_get(engine->gt);
24
25
26 map = NULL;
27 if (engine->default_state)
28 map = i915_gem_object_pin_map(engine->default_state,
29 I915_MAP_WB);
30 if (!IS_ERR_OR_NULL(map))
31 engine->pinned_default_state = map;
32
33 if (engine->unpark)
34 engine->unpark(engine);
35
36 intel_engine_init_hangcheck(engine);
37 return 0;
38 }
39
40 #if IS_ENABLED(CONFIG_LOCKDEP)
41
42 static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
43 {
44 unsigned long flags;
45
46 local_irq_save(flags);
47 mutex_acquire(&ce->timeline->mutex.dep_map, 2, 0, _THIS_IP_);
48
49 return flags;
50 }
51
52 static inline void __timeline_mark_unlock(struct intel_context *ce,
53 unsigned long flags)
54 {
55 mutex_release(&ce->timeline->mutex.dep_map, 0, _THIS_IP_);
56 local_irq_restore(flags);
57 }
58
59 #else
60
61 static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
62 {
63 return 0;
64 }
65
66 static inline void __timeline_mark_unlock(struct intel_context *ce,
67 unsigned long flags)
68 {
69 }
70
71 #endif
72
73 static bool switch_to_kernel_context(struct intel_engine_cs *engine)
74 {
75 struct i915_request *rq;
76 unsigned long flags;
77 bool result = true;
78
79
80 if (engine->wakeref_serial == engine->serial)
81 return true;
82
83
84 if (intel_gt_is_wedged(engine->gt))
85 return true;
86
87
88
89
90
91
92
93
94
95
96
97
98
99 flags = __timeline_mark_lock(engine->kernel_context);
100
101 rq = __i915_request_create(engine->kernel_context, GFP_NOWAIT);
102 if (IS_ERR(rq))
103
104 goto out_unlock;
105
106 intel_timeline_enter(rq->timeline);
107
108
109 engine->wakeref_serial = engine->serial + 1;
110 i915_request_add_active_barriers(rq);
111
112
113 rq->sched.attr.priority = I915_PRIORITY_UNPREEMPTABLE;
114 __i915_request_commit(rq);
115
116
117 __intel_wakeref_defer_park(&engine->wakeref);
118 __i915_request_queue(rq, NULL);
119
120 result = false;
121 out_unlock:
122 __timeline_mark_unlock(engine->kernel_context, flags);
123 return result;
124 }
125
126 static int __engine_park(struct intel_wakeref *wf)
127 {
128 struct intel_engine_cs *engine =
129 container_of(wf, typeof(*engine), wakeref);
130
131 engine->saturated = 0;
132
133
134
135
136
137
138
139
140 if (!switch_to_kernel_context(engine))
141 return -EBUSY;
142
143 GEM_TRACE("%s\n", engine->name);
144
145 intel_engine_disarm_breadcrumbs(engine);
146 intel_engine_pool_park(&engine->pool);
147
148
149 GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);
150
151 if (engine->park)
152 engine->park(engine);
153
154 if (engine->pinned_default_state) {
155 i915_gem_object_unpin_map(engine->default_state);
156 engine->pinned_default_state = NULL;
157 }
158
159 engine->execlists.no_priolist = false;
160
161 intel_gt_pm_put(engine->gt);
162 return 0;
163 }
164
165 static const struct intel_wakeref_ops wf_ops = {
166 .get = __engine_unpark,
167 .put = __engine_park,
168 };
169
170 void intel_engine_init__pm(struct intel_engine_cs *engine)
171 {
172 struct intel_runtime_pm *rpm = &engine->i915->runtime_pm;
173
174 intel_wakeref_init(&engine->wakeref, rpm, &wf_ops);
175 }
176
177 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
178 #include "selftest_engine_pm.c"
179 #endif