This source file includes following definitions.
- i915_gem_context_is_closed
- i915_gem_context_set_closed
- i915_gem_context_no_error_capture
- i915_gem_context_set_no_error_capture
- i915_gem_context_clear_no_error_capture
- i915_gem_context_is_bannable
- i915_gem_context_set_bannable
- i915_gem_context_clear_bannable
- i915_gem_context_is_recoverable
- i915_gem_context_set_recoverable
- i915_gem_context_clear_recoverable
- i915_gem_context_is_banned
- i915_gem_context_set_banned
- i915_gem_context_force_single_submission
- i915_gem_context_set_force_single_submission
- i915_gem_context_user_engines
- i915_gem_context_set_user_engines
- i915_gem_context_clear_user_engines
- i915_gem_context_pin_hw_id
- i915_gem_context_unpin_hw_id
- i915_gem_context_is_kernel
- i915_gem_context_get
- i915_gem_context_put
- i915_gem_context_engines
- i915_gem_context_lock_engines
- i915_gem_context_unlock_engines
- i915_gem_context_get_engine
- i915_gem_engines_iter_init
1
2
3
4
5
6
7 #ifndef __I915_GEM_CONTEXT_H__
8 #define __I915_GEM_CONTEXT_H__
9
10 #include "i915_gem_context_types.h"
11
12 #include "gt/intel_context.h"
13
14 #include "i915_gem.h"
15 #include "i915_scheduler.h"
16 #include "intel_device_info.h"
17
18 struct drm_device;
19 struct drm_file;
20
21 static inline bool i915_gem_context_is_closed(const struct i915_gem_context *ctx)
22 {
23 return test_bit(CONTEXT_CLOSED, &ctx->flags);
24 }
25
26 static inline void i915_gem_context_set_closed(struct i915_gem_context *ctx)
27 {
28 GEM_BUG_ON(i915_gem_context_is_closed(ctx));
29 set_bit(CONTEXT_CLOSED, &ctx->flags);
30 }
31
32 static inline bool i915_gem_context_no_error_capture(const struct i915_gem_context *ctx)
33 {
34 return test_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags);
35 }
36
37 static inline void i915_gem_context_set_no_error_capture(struct i915_gem_context *ctx)
38 {
39 set_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags);
40 }
41
42 static inline void i915_gem_context_clear_no_error_capture(struct i915_gem_context *ctx)
43 {
44 clear_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags);
45 }
46
47 static inline bool i915_gem_context_is_bannable(const struct i915_gem_context *ctx)
48 {
49 return test_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
50 }
51
52 static inline void i915_gem_context_set_bannable(struct i915_gem_context *ctx)
53 {
54 set_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
55 }
56
57 static inline void i915_gem_context_clear_bannable(struct i915_gem_context *ctx)
58 {
59 clear_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
60 }
61
62 static inline bool i915_gem_context_is_recoverable(const struct i915_gem_context *ctx)
63 {
64 return test_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
65 }
66
67 static inline void i915_gem_context_set_recoverable(struct i915_gem_context *ctx)
68 {
69 set_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
70 }
71
72 static inline void i915_gem_context_clear_recoverable(struct i915_gem_context *ctx)
73 {
74 clear_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
75 }
76
77 static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx)
78 {
79 return test_bit(CONTEXT_BANNED, &ctx->flags);
80 }
81
82 static inline void i915_gem_context_set_banned(struct i915_gem_context *ctx)
83 {
84 set_bit(CONTEXT_BANNED, &ctx->flags);
85 }
86
87 static inline bool i915_gem_context_force_single_submission(const struct i915_gem_context *ctx)
88 {
89 return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
90 }
91
92 static inline void i915_gem_context_set_force_single_submission(struct i915_gem_context *ctx)
93 {
94 __set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
95 }
96
97 static inline bool
98 i915_gem_context_user_engines(const struct i915_gem_context *ctx)
99 {
100 return test_bit(CONTEXT_USER_ENGINES, &ctx->flags);
101 }
102
103 static inline void
104 i915_gem_context_set_user_engines(struct i915_gem_context *ctx)
105 {
106 set_bit(CONTEXT_USER_ENGINES, &ctx->flags);
107 }
108
109 static inline void
110 i915_gem_context_clear_user_engines(struct i915_gem_context *ctx)
111 {
112 clear_bit(CONTEXT_USER_ENGINES, &ctx->flags);
113 }
114
115 int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx);
116 static inline int i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
117 {
118 if (atomic_inc_not_zero(&ctx->hw_id_pin_count))
119 return 0;
120
121 return __i915_gem_context_pin_hw_id(ctx);
122 }
123
124 static inline void i915_gem_context_unpin_hw_id(struct i915_gem_context *ctx)
125 {
126 GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == 0u);
127 atomic_dec(&ctx->hw_id_pin_count);
128 }
129
130 static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
131 {
132 return !ctx->file_priv;
133 }
134
135
136 int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
137 void i915_gem_contexts_fini(struct drm_i915_private *dev_priv);
138
139 int i915_gem_context_open(struct drm_i915_private *i915,
140 struct drm_file *file);
141 void i915_gem_context_close(struct drm_file *file);
142
143 void i915_gem_context_release(struct kref *ctx_ref);
144
145 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
146 struct drm_file *file);
147 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
148 struct drm_file *file);
149
150 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
151 struct drm_file *file);
152 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
153 struct drm_file *file);
154 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
155 struct drm_file *file_priv);
156 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
157 struct drm_file *file_priv);
158 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
159 struct drm_file *file);
160
161 struct i915_gem_context *
162 i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio);
163
164 static inline struct i915_gem_context *
165 i915_gem_context_get(struct i915_gem_context *ctx)
166 {
167 kref_get(&ctx->ref);
168 return ctx;
169 }
170
171 static inline void i915_gem_context_put(struct i915_gem_context *ctx)
172 {
173 kref_put(&ctx->ref, i915_gem_context_release);
174 }
175
176 static inline struct i915_gem_engines *
177 i915_gem_context_engines(struct i915_gem_context *ctx)
178 {
179 return rcu_dereference_protected(ctx->engines,
180 lockdep_is_held(&ctx->engines_mutex));
181 }
182
183 static inline struct i915_gem_engines *
184 i915_gem_context_lock_engines(struct i915_gem_context *ctx)
185 __acquires(&ctx->engines_mutex)
186 {
187 mutex_lock(&ctx->engines_mutex);
188 return i915_gem_context_engines(ctx);
189 }
190
191 static inline void
192 i915_gem_context_unlock_engines(struct i915_gem_context *ctx)
193 __releases(&ctx->engines_mutex)
194 {
195 mutex_unlock(&ctx->engines_mutex);
196 }
197
198 static inline struct intel_context *
199 i915_gem_context_get_engine(struct i915_gem_context *ctx, unsigned int idx)
200 {
201 struct intel_context *ce = ERR_PTR(-EINVAL);
202
203 rcu_read_lock(); {
204 struct i915_gem_engines *e = rcu_dereference(ctx->engines);
205 if (likely(idx < e->num_engines && e->engines[idx]))
206 ce = intel_context_get(e->engines[idx]);
207 } rcu_read_unlock();
208
209 return ce;
210 }
211
212 static inline void
213 i915_gem_engines_iter_init(struct i915_gem_engines_iter *it,
214 struct i915_gem_engines *engines)
215 {
216 GEM_BUG_ON(!engines);
217 it->engines = engines;
218 it->idx = 0;
219 }
220
221 struct intel_context *
222 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it);
223
224 #define for_each_gem_engine(ce, engines, it) \
225 for (i915_gem_engines_iter_init(&(it), (engines)); \
226 ((ce) = i915_gem_engines_iter_next(&(it)));)
227
228 struct i915_lut_handle *i915_lut_handle_alloc(void);
229 void i915_lut_handle_free(struct i915_lut_handle *lut);
230
231 #endif