This source file includes following definitions.
- amdgpu_mem_info_gtt_total_show
- amdgpu_mem_info_gtt_used_show
- amdgpu_gtt_mgr_init
- amdgpu_gtt_mgr_fini
- amdgpu_gtt_mgr_has_gart_addr
- amdgpu_gtt_mgr_alloc
- amdgpu_gtt_mgr_new
- amdgpu_gtt_mgr_del
- amdgpu_gtt_mgr_usage
- amdgpu_gtt_mgr_recover
- amdgpu_gtt_mgr_debug
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25 #include "amdgpu.h"
26
27 struct amdgpu_gtt_mgr {
28 struct drm_mm mm;
29 spinlock_t lock;
30 atomic64_t available;
31 };
32
33 struct amdgpu_gtt_node {
34 struct drm_mm_node node;
35 struct ttm_buffer_object *tbo;
36 };
37
38
39
40
41
42
43
44
45
46 static ssize_t amdgpu_mem_info_gtt_total_show(struct device *dev,
47 struct device_attribute *attr, char *buf)
48 {
49 struct drm_device *ddev = dev_get_drvdata(dev);
50 struct amdgpu_device *adev = ddev->dev_private;
51
52 return snprintf(buf, PAGE_SIZE, "%llu\n",
53 (adev->mman.bdev.man[TTM_PL_TT].size) * PAGE_SIZE);
54 }
55
56
57
58
59
60
61
62
63
64 static ssize_t amdgpu_mem_info_gtt_used_show(struct device *dev,
65 struct device_attribute *attr, char *buf)
66 {
67 struct drm_device *ddev = dev_get_drvdata(dev);
68 struct amdgpu_device *adev = ddev->dev_private;
69
70 return snprintf(buf, PAGE_SIZE, "%llu\n",
71 amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]));
72 }
73
74 static DEVICE_ATTR(mem_info_gtt_total, S_IRUGO,
75 amdgpu_mem_info_gtt_total_show, NULL);
76 static DEVICE_ATTR(mem_info_gtt_used, S_IRUGO,
77 amdgpu_mem_info_gtt_used_show, NULL);
78
79
80
81
82
83
84
85
86
87 static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
88 unsigned long p_size)
89 {
90 struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
91 struct amdgpu_gtt_mgr *mgr;
92 uint64_t start, size;
93 int ret;
94
95 mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
96 if (!mgr)
97 return -ENOMEM;
98
99 start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
100 size = (adev->gmc.gart_size >> PAGE_SHIFT) - start;
101 drm_mm_init(&mgr->mm, start, size);
102 spin_lock_init(&mgr->lock);
103 atomic64_set(&mgr->available, p_size);
104 man->priv = mgr;
105
106 ret = device_create_file(adev->dev, &dev_attr_mem_info_gtt_total);
107 if (ret) {
108 DRM_ERROR("Failed to create device file mem_info_gtt_total\n");
109 return ret;
110 }
111 ret = device_create_file(adev->dev, &dev_attr_mem_info_gtt_used);
112 if (ret) {
113 DRM_ERROR("Failed to create device file mem_info_gtt_used\n");
114 return ret;
115 }
116
117 return 0;
118 }
119
120
121
122
123
124
125
126
127
128 static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
129 {
130 struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
131 struct amdgpu_gtt_mgr *mgr = man->priv;
132 spin_lock(&mgr->lock);
133 drm_mm_takedown(&mgr->mm);
134 spin_unlock(&mgr->lock);
135 kfree(mgr);
136 man->priv = NULL;
137
138 device_remove_file(adev->dev, &dev_attr_mem_info_gtt_total);
139 device_remove_file(adev->dev, &dev_attr_mem_info_gtt_used);
140
141 return 0;
142 }
143
144
145
146
147
148
149
150
151 bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem)
152 {
153 struct amdgpu_gtt_node *node = mem->mm_node;
154
155 return (node->node.start != AMDGPU_BO_INVALID_OFFSET);
156 }
157
158
159
160
161
162
163
164
165
166
167
168 static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
169 struct ttm_buffer_object *tbo,
170 const struct ttm_place *place,
171 struct ttm_mem_reg *mem)
172 {
173 struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
174 struct amdgpu_gtt_mgr *mgr = man->priv;
175 struct amdgpu_gtt_node *node = mem->mm_node;
176 enum drm_mm_insert_mode mode;
177 unsigned long fpfn, lpfn;
178 int r;
179
180 if (amdgpu_gtt_mgr_has_gart_addr(mem))
181 return 0;
182
183 if (place)
184 fpfn = place->fpfn;
185 else
186 fpfn = 0;
187
188 if (place && place->lpfn)
189 lpfn = place->lpfn;
190 else
191 lpfn = adev->gart.num_cpu_pages;
192
193 mode = DRM_MM_INSERT_BEST;
194 if (place && place->flags & TTM_PL_FLAG_TOPDOWN)
195 mode = DRM_MM_INSERT_HIGH;
196
197 spin_lock(&mgr->lock);
198 r = drm_mm_insert_node_in_range(&mgr->mm, &node->node, mem->num_pages,
199 mem->page_alignment, 0, fpfn, lpfn,
200 mode);
201 spin_unlock(&mgr->lock);
202
203 if (!r)
204 mem->start = node->node.start;
205
206 return r;
207 }
208
209
210
211
212
213
214
215
216
217
218
219 static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
220 struct ttm_buffer_object *tbo,
221 const struct ttm_place *place,
222 struct ttm_mem_reg *mem)
223 {
224 struct amdgpu_gtt_mgr *mgr = man->priv;
225 struct amdgpu_gtt_node *node;
226 int r;
227
228 spin_lock(&mgr->lock);
229 if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) &&
230 atomic64_read(&mgr->available) < mem->num_pages) {
231 spin_unlock(&mgr->lock);
232 return 0;
233 }
234 atomic64_sub(mem->num_pages, &mgr->available);
235 spin_unlock(&mgr->lock);
236
237 node = kzalloc(sizeof(*node), GFP_KERNEL);
238 if (!node) {
239 r = -ENOMEM;
240 goto err_out;
241 }
242
243 node->node.start = AMDGPU_BO_INVALID_OFFSET;
244 node->node.size = mem->num_pages;
245 node->tbo = tbo;
246 mem->mm_node = node;
247
248 if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) {
249 r = amdgpu_gtt_mgr_alloc(man, tbo, place, mem);
250 if (unlikely(r)) {
251 kfree(node);
252 mem->mm_node = NULL;
253 r = 0;
254 goto err_out;
255 }
256 } else {
257 mem->start = node->node.start;
258 }
259
260 return 0;
261 err_out:
262 atomic64_add(mem->num_pages, &mgr->available);
263
264 return r;
265 }
266
267
268
269
270
271
272
273
274
275
276
277 static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man,
278 struct ttm_mem_reg *mem)
279 {
280 struct amdgpu_gtt_mgr *mgr = man->priv;
281 struct amdgpu_gtt_node *node = mem->mm_node;
282
283 if (!node)
284 return;
285
286 spin_lock(&mgr->lock);
287 if (node->node.start != AMDGPU_BO_INVALID_OFFSET)
288 drm_mm_remove_node(&node->node);
289 spin_unlock(&mgr->lock);
290 atomic64_add(mem->num_pages, &mgr->available);
291
292 kfree(node);
293 mem->mm_node = NULL;
294 }
295
296
297
298
299
300
301
302
303 uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man)
304 {
305 struct amdgpu_gtt_mgr *mgr = man->priv;
306 s64 result = man->size - atomic64_read(&mgr->available);
307
308 return (result > 0 ? result : 0) * PAGE_SIZE;
309 }
310
311 int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man)
312 {
313 struct amdgpu_gtt_mgr *mgr = man->priv;
314 struct amdgpu_gtt_node *node;
315 struct drm_mm_node *mm_node;
316 int r = 0;
317
318 spin_lock(&mgr->lock);
319 drm_mm_for_each_node(mm_node, &mgr->mm) {
320 node = container_of(mm_node, struct amdgpu_gtt_node, node);
321 r = amdgpu_ttm_recover_gart(node->tbo);
322 if (r)
323 break;
324 }
325 spin_unlock(&mgr->lock);
326
327 return r;
328 }
329
330
331
332
333
334
335
336
337
338 static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man,
339 struct drm_printer *printer)
340 {
341 struct amdgpu_gtt_mgr *mgr = man->priv;
342
343 spin_lock(&mgr->lock);
344 drm_mm_print(&mgr->mm, printer);
345 spin_unlock(&mgr->lock);
346
347 drm_printf(printer, "man size:%llu pages, gtt available:%lld pages, usage:%lluMB\n",
348 man->size, (u64)atomic64_read(&mgr->available),
349 amdgpu_gtt_mgr_usage(man) >> 20);
350 }
351
352 const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func = {
353 .init = amdgpu_gtt_mgr_init,
354 .takedown = amdgpu_gtt_mgr_fini,
355 .get_node = amdgpu_gtt_mgr_new,
356 .put_node = amdgpu_gtt_mgr_del,
357 .debug = amdgpu_gtt_mgr_debug
358 };