This source file includes following definitions.
- amdgpu_gart_dummy_page_init
- amdgpu_gart_dummy_page_fini
- amdgpu_gart_table_vram_alloc
- amdgpu_gart_table_vram_pin
- amdgpu_gart_table_vram_unpin
- amdgpu_gart_table_vram_free
- amdgpu_gart_unbind
- amdgpu_gart_map
- amdgpu_gart_bind
- amdgpu_gart_init
- amdgpu_gart_fini
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29 #include <linux/pci.h>
30 #include <linux/vmalloc.h>
31
32 #include <drm/amdgpu_drm.h>
33 #ifdef CONFIG_X86
34 #include <asm/set_memory.h>
35 #endif
36 #include "amdgpu.h"
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72 static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
73 {
74 struct page *dummy_page = adev->mman.bdev.glob->dummy_read_page;
75
76 if (adev->dummy_page_addr)
77 return 0;
78 adev->dummy_page_addr = pci_map_page(adev->pdev, dummy_page, 0,
79 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
80 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page_addr)) {
81 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
82 adev->dummy_page_addr = 0;
83 return -ENOMEM;
84 }
85 return 0;
86 }
87
88
89
90
91
92
93
94
95 static void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev)
96 {
97 if (!adev->dummy_page_addr)
98 return;
99 pci_unmap_page(adev->pdev, adev->dummy_page_addr,
100 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
101 adev->dummy_page_addr = 0;
102 }
103
104
105
106
107
108
109
110
111
112
113
114 int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
115 {
116 int r;
117
118 if (adev->gart.bo == NULL) {
119 struct amdgpu_bo_param bp;
120
121 memset(&bp, 0, sizeof(bp));
122 bp.size = adev->gart.table_size;
123 bp.byte_align = PAGE_SIZE;
124 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
125 bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
126 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
127 bp.type = ttm_bo_type_kernel;
128 bp.resv = NULL;
129 r = amdgpu_bo_create(adev, &bp, &adev->gart.bo);
130 if (r) {
131 return r;
132 }
133 }
134 return 0;
135 }
136
137
138
139
140
141
142
143
144
145
146
147 int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
148 {
149 int r;
150
151 r = amdgpu_bo_reserve(adev->gart.bo, false);
152 if (unlikely(r != 0))
153 return r;
154 r = amdgpu_bo_pin(adev->gart.bo, AMDGPU_GEM_DOMAIN_VRAM);
155 if (r) {
156 amdgpu_bo_unreserve(adev->gart.bo);
157 return r;
158 }
159 r = amdgpu_bo_kmap(adev->gart.bo, &adev->gart.ptr);
160 if (r)
161 amdgpu_bo_unpin(adev->gart.bo);
162 amdgpu_bo_unreserve(adev->gart.bo);
163 return r;
164 }
165
166
167
168
169
170
171
172
173
174 void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev)
175 {
176 int r;
177
178 if (adev->gart.bo == NULL) {
179 return;
180 }
181 r = amdgpu_bo_reserve(adev->gart.bo, true);
182 if (likely(r == 0)) {
183 amdgpu_bo_kunmap(adev->gart.bo);
184 amdgpu_bo_unpin(adev->gart.bo);
185 amdgpu_bo_unreserve(adev->gart.bo);
186 adev->gart.ptr = NULL;
187 }
188 }
189
190
191
192
193
194
195
196
197
198
199 void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
200 {
201 if (adev->gart.bo == NULL) {
202 return;
203 }
204 amdgpu_bo_unref(&adev->gart.bo);
205 }
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221 int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
222 int pages)
223 {
224 unsigned t;
225 unsigned p;
226 int i, j;
227 u64 page_base;
228
229 uint64_t flags = 0;
230
231 if (!adev->gart.ready) {
232 WARN(1, "trying to unbind memory from uninitialized GART !\n");
233 return -EINVAL;
234 }
235
236 t = offset / AMDGPU_GPU_PAGE_SIZE;
237 p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
238 for (i = 0; i < pages; i++, p++) {
239 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
240 adev->gart.pages[p] = NULL;
241 #endif
242 page_base = adev->dummy_page_addr;
243 if (!adev->gart.ptr)
244 continue;
245
246 for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) {
247 amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr,
248 t, page_base, flags);
249 page_base += AMDGPU_GPU_PAGE_SIZE;
250 }
251 }
252 mb();
253 amdgpu_asic_flush_hdp(adev, NULL);
254 for (i = 0; i < adev->num_vmhubs; i++)
255 amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
256
257 return 0;
258 }
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273 int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
274 int pages, dma_addr_t *dma_addr, uint64_t flags,
275 void *dst)
276 {
277 uint64_t page_base;
278 unsigned i, j, t;
279
280 if (!adev->gart.ready) {
281 WARN(1, "trying to bind memory to uninitialized GART !\n");
282 return -EINVAL;
283 }
284
285 t = offset / AMDGPU_GPU_PAGE_SIZE;
286
287 for (i = 0; i < pages; i++) {
288 page_base = dma_addr[i];
289 for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) {
290 amdgpu_gmc_set_pte_pde(adev, dst, t, page_base, flags);
291 page_base += AMDGPU_GPU_PAGE_SIZE;
292 }
293 }
294 return 0;
295 }
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310 int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
311 int pages, struct page **pagelist, dma_addr_t *dma_addr,
312 uint64_t flags)
313 {
314 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
315 unsigned t,p;
316 #endif
317 int r, i;
318
319 if (!adev->gart.ready) {
320 WARN(1, "trying to bind memory to uninitialized GART !\n");
321 return -EINVAL;
322 }
323
324 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
325 t = offset / AMDGPU_GPU_PAGE_SIZE;
326 p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
327 for (i = 0; i < pages; i++, p++)
328 adev->gart.pages[p] = pagelist ? pagelist[i] : NULL;
329 #endif
330
331 if (!adev->gart.ptr)
332 return 0;
333
334 r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
335 adev->gart.ptr);
336 if (r)
337 return r;
338
339 mb();
340 amdgpu_asic_flush_hdp(adev, NULL);
341 for (i = 0; i < adev->num_vmhubs; i++)
342 amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
343 return 0;
344 }
345
346
347
348
349
350
351
352
353
354 int amdgpu_gart_init(struct amdgpu_device *adev)
355 {
356 int r;
357
358 if (adev->dummy_page_addr)
359 return 0;
360
361
362 if (PAGE_SIZE < AMDGPU_GPU_PAGE_SIZE) {
363 DRM_ERROR("Page size is smaller than GPU page size!\n");
364 return -EINVAL;
365 }
366 r = amdgpu_gart_dummy_page_init(adev);
367 if (r)
368 return r;
369
370 adev->gart.num_cpu_pages = adev->gmc.gart_size / PAGE_SIZE;
371 adev->gart.num_gpu_pages = adev->gmc.gart_size / AMDGPU_GPU_PAGE_SIZE;
372 DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
373 adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
374
375 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
376
377 adev->gart.pages = vzalloc(array_size(sizeof(void *),
378 adev->gart.num_cpu_pages));
379 if (adev->gart.pages == NULL)
380 return -ENOMEM;
381 #endif
382
383 return 0;
384 }
385
386
387
388
389
390
391
392
393 void amdgpu_gart_fini(struct amdgpu_device *adev)
394 {
395 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
396 vfree(adev->gart.pages);
397 adev->gart.pages = NULL;
398 #endif
399 amdgpu_gart_dummy_page_fini(adev);
400 }