This source file includes following definitions.
- call_idle_barriers
- i915_gem_park
- idle_work_handler
- retire_work_handler
- pm_notifier
- switch_to_kernel_context_sync
- i915_gem_load_power_context
- i915_gem_suspend
- first_mm_object
- i915_gem_suspend_late
- i915_gem_resume
- i915_gem_init__pm
1
2
3
4
5
6
7 #include "gem/i915_gem_pm.h"
8 #include "gt/intel_gt.h"
9 #include "gt/intel_gt_pm.h"
10
11 #include "i915_drv.h"
12 #include "i915_globals.h"
13
14 static void call_idle_barriers(struct intel_engine_cs *engine)
15 {
16 struct llist_node *node, *next;
17
18 llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
19 struct i915_active_request *active =
20 container_of((struct list_head *)node,
21 typeof(*active), link);
22
23 INIT_LIST_HEAD(&active->link);
24 RCU_INIT_POINTER(active->request, NULL);
25
26 active->retire(active, NULL);
27 }
28 }
29
30 static void i915_gem_park(struct drm_i915_private *i915)
31 {
32 struct intel_engine_cs *engine;
33 enum intel_engine_id id;
34
35 lockdep_assert_held(&i915->drm.struct_mutex);
36
37 for_each_engine(engine, i915, id)
38 call_idle_barriers(engine);
39
40 i915_vma_parked(i915);
41
42 i915_globals_park();
43 }
44
45 static void idle_work_handler(struct work_struct *work)
46 {
47 struct drm_i915_private *i915 =
48 container_of(work, typeof(*i915), gem.idle_work);
49 bool park;
50
51 cancel_delayed_work_sync(&i915->gem.retire_work);
52 mutex_lock(&i915->drm.struct_mutex);
53
54 intel_wakeref_lock(&i915->gt.wakeref);
55 park = (!intel_wakeref_is_active(&i915->gt.wakeref) &&
56 !work_pending(work));
57 intel_wakeref_unlock(&i915->gt.wakeref);
58 if (park)
59 i915_gem_park(i915);
60 else
61 queue_delayed_work(i915->wq,
62 &i915->gem.retire_work,
63 round_jiffies_up_relative(HZ));
64
65 mutex_unlock(&i915->drm.struct_mutex);
66 }
67
68 static void retire_work_handler(struct work_struct *work)
69 {
70 struct drm_i915_private *i915 =
71 container_of(work, typeof(*i915), gem.retire_work.work);
72
73
74 if (mutex_trylock(&i915->drm.struct_mutex)) {
75 i915_retire_requests(i915);
76 mutex_unlock(&i915->drm.struct_mutex);
77 }
78
79 queue_delayed_work(i915->wq,
80 &i915->gem.retire_work,
81 round_jiffies_up_relative(HZ));
82 }
83
84 static int pm_notifier(struct notifier_block *nb,
85 unsigned long action,
86 void *data)
87 {
88 struct drm_i915_private *i915 =
89 container_of(nb, typeof(*i915), gem.pm_notifier);
90
91 switch (action) {
92 case INTEL_GT_UNPARK:
93 i915_globals_unpark();
94 queue_delayed_work(i915->wq,
95 &i915->gem.retire_work,
96 round_jiffies_up_relative(HZ));
97 break;
98
99 case INTEL_GT_PARK:
100 queue_work(i915->wq, &i915->gem.idle_work);
101 break;
102 }
103
104 return NOTIFY_OK;
105 }
106
107 static bool switch_to_kernel_context_sync(struct intel_gt *gt)
108 {
109 bool result = !intel_gt_is_wedged(gt);
110
111 do {
112 if (i915_gem_wait_for_idle(gt->i915,
113 I915_WAIT_LOCKED |
114 I915_WAIT_FOR_IDLE_BOOST,
115 I915_GEM_IDLE_TIMEOUT) == -ETIME) {
116
117 if (i915_modparams.reset) {
118 dev_err(gt->i915->drm.dev,
119 "Failed to idle engines, declaring wedged!\n");
120 GEM_TRACE_DUMP();
121 }
122
123
124
125
126
127 intel_gt_set_wedged(gt);
128 result = false;
129 }
130 } while (i915_retire_requests(gt->i915) && result);
131
132 if (intel_gt_pm_wait_for_idle(gt))
133 result = false;
134
135 return result;
136 }
137
138 bool i915_gem_load_power_context(struct drm_i915_private *i915)
139 {
140 return switch_to_kernel_context_sync(&i915->gt);
141 }
142
143 void i915_gem_suspend(struct drm_i915_private *i915)
144 {
145 GEM_TRACE("\n");
146
147 intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0);
148 flush_workqueue(i915->wq);
149
150 mutex_lock(&i915->drm.struct_mutex);
151
152
153
154
155
156
157
158
159
160
161 switch_to_kernel_context_sync(&i915->gt);
162
163 mutex_unlock(&i915->drm.struct_mutex);
164
165 cancel_delayed_work_sync(&i915->gt.hangcheck.work);
166
167 i915_gem_drain_freed_objects(i915);
168
169 intel_uc_suspend(&i915->gt.uc);
170 }
171
172 static struct drm_i915_gem_object *first_mm_object(struct list_head *list)
173 {
174 return list_first_entry_or_null(list,
175 struct drm_i915_gem_object,
176 mm.link);
177 }
178
179 void i915_gem_suspend_late(struct drm_i915_private *i915)
180 {
181 struct drm_i915_gem_object *obj;
182 struct list_head *phases[] = {
183 &i915->mm.shrink_list,
184 &i915->mm.purge_list,
185 NULL
186 }, **phase;
187 unsigned long flags;
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209 spin_lock_irqsave(&i915->mm.obj_lock, flags);
210 for (phase = phases; *phase; phase++) {
211 LIST_HEAD(keep);
212
213 while ((obj = first_mm_object(*phase))) {
214 list_move_tail(&obj->mm.link, &keep);
215
216
217 if (!kref_get_unless_zero(&obj->base.refcount))
218 continue;
219
220 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
221
222 i915_gem_object_lock(obj);
223 WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
224 i915_gem_object_unlock(obj);
225 i915_gem_object_put(obj);
226
227 spin_lock_irqsave(&i915->mm.obj_lock, flags);
228 }
229
230 list_splice_tail(&keep, *phase);
231 }
232 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
233
234 i915_gem_sanitize(i915);
235 }
236
237 void i915_gem_resume(struct drm_i915_private *i915)
238 {
239 GEM_TRACE("\n");
240
241 mutex_lock(&i915->drm.struct_mutex);
242 intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
243
244 if (i915_gem_init_hw(i915))
245 goto err_wedged;
246
247
248
249
250
251
252 if (intel_gt_resume(&i915->gt))
253 goto err_wedged;
254
255 intel_uc_resume(&i915->gt.uc);
256
257
258 if (!i915_gem_load_power_context(i915))
259 goto err_wedged;
260
261 out_unlock:
262 intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
263 mutex_unlock(&i915->drm.struct_mutex);
264 return;
265
266 err_wedged:
267 if (!intel_gt_is_wedged(&i915->gt)) {
268 dev_err(i915->drm.dev,
269 "Failed to re-initialize GPU, declaring it wedged!\n");
270 intel_gt_set_wedged(&i915->gt);
271 }
272 goto out_unlock;
273 }
274
275 void i915_gem_init__pm(struct drm_i915_private *i915)
276 {
277 INIT_WORK(&i915->gem.idle_work, idle_work_handler);
278 INIT_DELAYED_WORK(&i915->gem.retire_work, retire_work_handler);
279
280 i915->gem.pm_notifier.notifier_call = pm_notifier;
281 blocking_notifier_chain_register(&i915->gt.pm_notifications,
282 &i915->gem.pm_notifier);
283 }