This source file includes following definitions.
- radeon_mn_invalidate_range_start
- radeon_mn_release
- radeon_mn_alloc_notifier
- radeon_mn_free_notifier
- radeon_mn_register
- radeon_mn_unregister
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31 #include <linux/firmware.h>
32 #include <linux/module.h>
33 #include <linux/mmu_notifier.h>
34
35 #include <drm/drm.h>
36
37 #include "radeon.h"
38
39 struct radeon_mn {
40 struct mmu_notifier mn;
41
42
43 struct mutex lock;
44 struct rb_root_cached objects;
45 };
46
47 struct radeon_mn_node {
48 struct interval_tree_node it;
49 struct list_head bos;
50 };
51
52
53
54
55
56
57
58
59
60
61
62
63 static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
64 const struct mmu_notifier_range *range)
65 {
66 struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
67 struct ttm_operation_ctx ctx = { false, false };
68 struct interval_tree_node *it;
69 unsigned long end;
70 int ret = 0;
71
72
73 end = range->end - 1;
74
75
76
77
78 if (mmu_notifier_range_blockable(range))
79 mutex_lock(&rmn->lock);
80 else if (!mutex_trylock(&rmn->lock))
81 return -EAGAIN;
82
83 it = interval_tree_iter_first(&rmn->objects, range->start, end);
84 while (it) {
85 struct radeon_mn_node *node;
86 struct radeon_bo *bo;
87 long r;
88
89 if (!mmu_notifier_range_blockable(range)) {
90 ret = -EAGAIN;
91 goto out_unlock;
92 }
93
94 node = container_of(it, struct radeon_mn_node, it);
95 it = interval_tree_iter_next(it, range->start, end);
96
97 list_for_each_entry(bo, &node->bos, mn_list) {
98
99 if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
100 continue;
101
102 r = radeon_bo_reserve(bo, true);
103 if (r) {
104 DRM_ERROR("(%ld) failed to reserve user bo\n", r);
105 continue;
106 }
107
108 r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
109 true, false, MAX_SCHEDULE_TIMEOUT);
110 if (r <= 0)
111 DRM_ERROR("(%ld) failed to wait for user bo\n", r);
112
113 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
114 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
115 if (r)
116 DRM_ERROR("(%ld) failed to validate user bo\n", r);
117
118 radeon_bo_unreserve(bo);
119 }
120 }
121
122 out_unlock:
123 mutex_unlock(&rmn->lock);
124
125 return ret;
126 }
127
128 static void radeon_mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
129 {
130 struct mmu_notifier_range range = {
131 .mm = mm,
132 .start = 0,
133 .end = ULONG_MAX,
134 .flags = 0,
135 .event = MMU_NOTIFY_UNMAP,
136 };
137
138 radeon_mn_invalidate_range_start(mn, &range);
139 }
140
141 static struct mmu_notifier *radeon_mn_alloc_notifier(struct mm_struct *mm)
142 {
143 struct radeon_mn *rmn;
144
145 rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
146 if (!rmn)
147 return ERR_PTR(-ENOMEM);
148
149 mutex_init(&rmn->lock);
150 rmn->objects = RB_ROOT_CACHED;
151 return &rmn->mn;
152 }
153
154 static void radeon_mn_free_notifier(struct mmu_notifier *mn)
155 {
156 kfree(container_of(mn, struct radeon_mn, mn));
157 }
158
159 static const struct mmu_notifier_ops radeon_mn_ops = {
160 .release = radeon_mn_release,
161 .invalidate_range_start = radeon_mn_invalidate_range_start,
162 .alloc_notifier = radeon_mn_alloc_notifier,
163 .free_notifier = radeon_mn_free_notifier,
164 };
165
166
167
168
169
170
171
172
173
174
175 int radeon_mn_register(struct radeon_bo *bo, unsigned long addr)
176 {
177 unsigned long end = addr + radeon_bo_size(bo) - 1;
178 struct mmu_notifier *mn;
179 struct radeon_mn *rmn;
180 struct radeon_mn_node *node = NULL;
181 struct list_head bos;
182 struct interval_tree_node *it;
183
184 mn = mmu_notifier_get(&radeon_mn_ops, current->mm);
185 if (IS_ERR(mn))
186 return PTR_ERR(mn);
187 rmn = container_of(mn, struct radeon_mn, mn);
188
189 INIT_LIST_HEAD(&bos);
190
191 mutex_lock(&rmn->lock);
192
193 while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
194 kfree(node);
195 node = container_of(it, struct radeon_mn_node, it);
196 interval_tree_remove(&node->it, &rmn->objects);
197 addr = min(it->start, addr);
198 end = max(it->last, end);
199 list_splice(&node->bos, &bos);
200 }
201
202 if (!node) {
203 node = kmalloc(sizeof(struct radeon_mn_node), GFP_KERNEL);
204 if (!node) {
205 mutex_unlock(&rmn->lock);
206 return -ENOMEM;
207 }
208 }
209
210 bo->mn = rmn;
211
212 node->it.start = addr;
213 node->it.last = end;
214 INIT_LIST_HEAD(&node->bos);
215 list_splice(&bos, &node->bos);
216 list_add(&bo->mn_list, &node->bos);
217
218 interval_tree_insert(&node->it, &rmn->objects);
219
220 mutex_unlock(&rmn->lock);
221
222 return 0;
223 }
224
225
226
227
228
229
230
231
232 void radeon_mn_unregister(struct radeon_bo *bo)
233 {
234 struct radeon_mn *rmn = bo->mn;
235 struct list_head *head;
236
237 if (!rmn)
238 return;
239
240 mutex_lock(&rmn->lock);
241
242 head = bo->mn_list.next;
243
244 list_del(&bo->mn_list);
245
246 if (list_empty(head)) {
247 struct radeon_mn_node *node;
248 node = container_of(head, struct radeon_mn_node, bos);
249 interval_tree_remove(&node->it, &rmn->objects);
250 kfree(node);
251 }
252
253 mutex_unlock(&rmn->lock);
254
255 mmu_notifier_put(&rmn->mn);
256 bo->mn = NULL;
257 }