This source file includes following definitions.
- radeon_gart_table_ram_alloc
- radeon_gart_table_ram_free
- radeon_gart_table_vram_alloc
- radeon_gart_table_vram_pin
- radeon_gart_table_vram_unpin
- radeon_gart_table_vram_free
- radeon_gart_unbind
- radeon_gart_bind
- radeon_gart_init
- radeon_gart_fini
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29 #include <linux/vmalloc.h>
30
31 #include <drm/drm_pci.h>
32 #include <drm/radeon_drm.h>
33 #ifdef CONFIG_X86
34 #include <asm/set_memory.h>
35 #endif
36 #include "radeon.h"
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71 int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
72 {
73 void *ptr;
74
75 ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size,
76 &rdev->gart.table_addr);
77 if (ptr == NULL) {
78 return -ENOMEM;
79 }
80 #ifdef CONFIG_X86
81 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
82 rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
83 set_memory_uc((unsigned long)ptr,
84 rdev->gart.table_size >> PAGE_SHIFT);
85 }
86 #endif
87 rdev->gart.ptr = ptr;
88 memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size);
89 return 0;
90 }
91
92
93
94
95
96
97
98
99
100
101 void radeon_gart_table_ram_free(struct radeon_device *rdev)
102 {
103 if (rdev->gart.ptr == NULL) {
104 return;
105 }
106 #ifdef CONFIG_X86
107 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
108 rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
109 set_memory_wb((unsigned long)rdev->gart.ptr,
110 rdev->gart.table_size >> PAGE_SHIFT);
111 }
112 #endif
113 pci_free_consistent(rdev->pdev, rdev->gart.table_size,
114 (void *)rdev->gart.ptr,
115 rdev->gart.table_addr);
116 rdev->gart.ptr = NULL;
117 rdev->gart.table_addr = 0;
118 }
119
120
121
122
123
124
125
126
127
128
129
130 int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
131 {
132 int r;
133
134 if (rdev->gart.robj == NULL) {
135 r = radeon_bo_create(rdev, rdev->gart.table_size,
136 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
137 0, NULL, NULL, &rdev->gart.robj);
138 if (r) {
139 return r;
140 }
141 }
142 return 0;
143 }
144
145
146
147
148
149
150
151
152
153
154
155 int radeon_gart_table_vram_pin(struct radeon_device *rdev)
156 {
157 uint64_t gpu_addr;
158 int r;
159
160 r = radeon_bo_reserve(rdev->gart.robj, false);
161 if (unlikely(r != 0))
162 return r;
163 r = radeon_bo_pin(rdev->gart.robj,
164 RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
165 if (r) {
166 radeon_bo_unreserve(rdev->gart.robj);
167 return r;
168 }
169 r = radeon_bo_kmap(rdev->gart.robj, &rdev->gart.ptr);
170 if (r)
171 radeon_bo_unpin(rdev->gart.robj);
172 radeon_bo_unreserve(rdev->gart.robj);
173 rdev->gart.table_addr = gpu_addr;
174
175 if (!r) {
176 int i;
177
178
179
180
181 for (i = 0; i < rdev->gart.num_gpu_pages; i++)
182 radeon_gart_set_page(rdev, i, rdev->gart.pages_entry[i]);
183 mb();
184 radeon_gart_tlb_flush(rdev);
185 }
186
187 return r;
188 }
189
190
191
192
193
194
195
196
197
198 void radeon_gart_table_vram_unpin(struct radeon_device *rdev)
199 {
200 int r;
201
202 if (rdev->gart.robj == NULL) {
203 return;
204 }
205 r = radeon_bo_reserve(rdev->gart.robj, false);
206 if (likely(r == 0)) {
207 radeon_bo_kunmap(rdev->gart.robj);
208 radeon_bo_unpin(rdev->gart.robj);
209 radeon_bo_unreserve(rdev->gart.robj);
210 rdev->gart.ptr = NULL;
211 }
212 }
213
214
215
216
217
218
219
220
221
222
223 void radeon_gart_table_vram_free(struct radeon_device *rdev)
224 {
225 if (rdev->gart.robj == NULL) {
226 return;
227 }
228 radeon_bo_unref(&rdev->gart.robj);
229 }
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244 void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
245 int pages)
246 {
247 unsigned t;
248 unsigned p;
249 int i, j;
250
251 if (!rdev->gart.ready) {
252 WARN(1, "trying to unbind memory from uninitialized GART !\n");
253 return;
254 }
255 t = offset / RADEON_GPU_PAGE_SIZE;
256 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
257 for (i = 0; i < pages; i++, p++) {
258 if (rdev->gart.pages[p]) {
259 rdev->gart.pages[p] = NULL;
260 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
261 rdev->gart.pages_entry[t] = rdev->dummy_page.entry;
262 if (rdev->gart.ptr) {
263 radeon_gart_set_page(rdev, t,
264 rdev->dummy_page.entry);
265 }
266 }
267 }
268 }
269 if (rdev->gart.ptr) {
270 mb();
271 radeon_gart_tlb_flush(rdev);
272 }
273 }
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289 int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
290 int pages, struct page **pagelist, dma_addr_t *dma_addr,
291 uint32_t flags)
292 {
293 unsigned t;
294 unsigned p;
295 uint64_t page_base, page_entry;
296 int i, j;
297
298 if (!rdev->gart.ready) {
299 WARN(1, "trying to bind memory to uninitialized GART !\n");
300 return -EINVAL;
301 }
302 t = offset / RADEON_GPU_PAGE_SIZE;
303 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
304
305 for (i = 0; i < pages; i++, p++) {
306 rdev->gart.pages[p] = pagelist[i];
307 page_base = dma_addr[i];
308 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
309 page_entry = radeon_gart_get_page_entry(page_base, flags);
310 rdev->gart.pages_entry[t] = page_entry;
311 if (rdev->gart.ptr) {
312 radeon_gart_set_page(rdev, t, page_entry);
313 }
314 page_base += RADEON_GPU_PAGE_SIZE;
315 }
316 }
317 if (rdev->gart.ptr) {
318 mb();
319 radeon_gart_tlb_flush(rdev);
320 }
321 return 0;
322 }
323
324
325
326
327
328
329
330
331
332 int radeon_gart_init(struct radeon_device *rdev)
333 {
334 int r, i;
335
336 if (rdev->gart.pages) {
337 return 0;
338 }
339
340 if (PAGE_SIZE < RADEON_GPU_PAGE_SIZE) {
341 DRM_ERROR("Page size is smaller than GPU page size!\n");
342 return -EINVAL;
343 }
344 r = radeon_dummy_page_init(rdev);
345 if (r)
346 return r;
347
348 rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
349 rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE;
350 DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
351 rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
352
353 rdev->gart.pages = vzalloc(array_size(sizeof(void *),
354 rdev->gart.num_cpu_pages));
355 if (rdev->gart.pages == NULL) {
356 radeon_gart_fini(rdev);
357 return -ENOMEM;
358 }
359 rdev->gart.pages_entry = vmalloc(array_size(sizeof(uint64_t),
360 rdev->gart.num_gpu_pages));
361 if (rdev->gart.pages_entry == NULL) {
362 radeon_gart_fini(rdev);
363 return -ENOMEM;
364 }
365
366 for (i = 0; i < rdev->gart.num_gpu_pages; i++)
367 rdev->gart.pages_entry[i] = rdev->dummy_page.entry;
368 return 0;
369 }
370
371
372
373
374
375
376
377
378 void radeon_gart_fini(struct radeon_device *rdev)
379 {
380 if (rdev->gart.ready) {
381
382 radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
383 }
384 rdev->gart.ready = false;
385 vfree(rdev->gart.pages);
386 vfree(rdev->gart.pages_entry);
387 rdev->gart.pages = NULL;
388 rdev->gart.pages_entry = NULL;
389
390 radeon_dummy_page_fini(rdev);
391 }