This source file includes following definitions.
- __busy_read_flag
- __busy_write_id
- __busy_set_if_active
- busy_check_reader
- busy_check_writer
- i915_gem_busy_ioctl
1
2
3
4
5
6
7 #include "gt/intel_engine.h"
8
9 #include "i915_gem_ioctls.h"
10 #include "i915_gem_object.h"
11
12 static __always_inline u32 __busy_read_flag(u16 id)
13 {
14 if (id == (u16)I915_ENGINE_CLASS_INVALID)
15 return 0xffff0000u;
16
17 GEM_BUG_ON(id >= 16);
18 return 0x10000u << id;
19 }
20
21 static __always_inline u32 __busy_write_id(u16 id)
22 {
23
24
25
26
27
28
29
30
31
32 if (id == (u16)I915_ENGINE_CLASS_INVALID)
33 return 0xffffffffu;
34
35 return (id + 1) | __busy_read_flag(id);
36 }
37
38 static __always_inline unsigned int
39 __busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u16 id))
40 {
41 const struct i915_request *rq;
42
43
44
45
46
47
48
49
50
51 if (!dma_fence_is_i915(fence))
52 return 0;
53
54
55 rq = container_of(fence, const struct i915_request, fence);
56 if (i915_request_completed(rq))
57 return 0;
58
59
60 BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class));
61 return flag(rq->engine->uabi_class);
62 }
63
64 static __always_inline unsigned int
65 busy_check_reader(const struct dma_fence *fence)
66 {
67 return __busy_set_if_active(fence, __busy_read_flag);
68 }
69
70 static __always_inline unsigned int
71 busy_check_writer(const struct dma_fence *fence)
72 {
73 if (!fence)
74 return 0;
75
76 return __busy_set_if_active(fence, __busy_write_id);
77 }
78
79 int
80 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
81 struct drm_file *file)
82 {
83 struct drm_i915_gem_busy *args = data;
84 struct drm_i915_gem_object *obj;
85 struct dma_resv_list *list;
86 unsigned int seq;
87 int err;
88
89 err = -ENOENT;
90 rcu_read_lock();
91 obj = i915_gem_object_lookup_rcu(file, args->handle);
92 if (!obj)
93 goto out;
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112 retry:
113 seq = raw_read_seqcount(&obj->base.resv->seq);
114
115
116 args->busy =
117 busy_check_writer(rcu_dereference(obj->base.resv->fence_excl));
118
119
120 list = rcu_dereference(obj->base.resv->fence);
121 if (list) {
122 unsigned int shared_count = list->shared_count, i;
123
124 for (i = 0; i < shared_count; ++i) {
125 struct dma_fence *fence =
126 rcu_dereference(list->shared[i]);
127
128 args->busy |= busy_check_reader(fence);
129 }
130 }
131
132 if (args->busy && read_seqcount_retry(&obj->base.resv->seq, seq))
133 goto retry;
134
135 err = 0;
136 out:
137 rcu_read_unlock();
138 return err;
139 }