This source file includes following definitions.
- backend_func_destroy
- bo_driver_ttm_tt_create
- bo_driver_init_mem_type
- bo_driver_evict_flags
- bo_driver_verify_access
- bo_driver_io_mem_reserve
- bo_driver_io_mem_free
- drm_vram_mm_init
- drm_vram_mm_cleanup
- drm_vram_mm_mmap
- drm_vram_helper_alloc_mm
- drm_vram_helper_release_mm
- drm_vram_mm_file_operations_mmap
1
2
3 #include <drm/drm_device.h>
4 #include <drm/drm_file.h>
5 #include <drm/drm_vram_mm_helper.h>
6
7 #include <drm/ttm/ttm_page_alloc.h>
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22 static void backend_func_destroy(struct ttm_tt *tt)
23 {
24 ttm_tt_fini(tt);
25 kfree(tt);
26 }
27
28 static struct ttm_backend_func backend_func = {
29 .destroy = backend_func_destroy
30 };
31
32
33
34
35
36 static struct ttm_tt *bo_driver_ttm_tt_create(struct ttm_buffer_object *bo,
37 uint32_t page_flags)
38 {
39 struct ttm_tt *tt;
40 int ret;
41
42 tt = kzalloc(sizeof(*tt), GFP_KERNEL);
43 if (!tt)
44 return NULL;
45
46 tt->func = &backend_func;
47
48 ret = ttm_tt_init(tt, bo, page_flags);
49 if (ret < 0)
50 goto err_ttm_tt_init;
51
52 return tt;
53
54 err_ttm_tt_init:
55 kfree(tt);
56 return NULL;
57 }
58
59 static int bo_driver_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
60 struct ttm_mem_type_manager *man)
61 {
62 switch (type) {
63 case TTM_PL_SYSTEM:
64 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
65 man->available_caching = TTM_PL_MASK_CACHING;
66 man->default_caching = TTM_PL_FLAG_CACHED;
67 break;
68 case TTM_PL_VRAM:
69 man->func = &ttm_bo_manager_func;
70 man->flags = TTM_MEMTYPE_FLAG_FIXED |
71 TTM_MEMTYPE_FLAG_MAPPABLE;
72 man->available_caching = TTM_PL_FLAG_UNCACHED |
73 TTM_PL_FLAG_WC;
74 man->default_caching = TTM_PL_FLAG_WC;
75 break;
76 default:
77 return -EINVAL;
78 }
79 return 0;
80 }
81
82 static void bo_driver_evict_flags(struct ttm_buffer_object *bo,
83 struct ttm_placement *placement)
84 {
85 struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bo->bdev);
86
87 if (vmm->funcs && vmm->funcs->evict_flags)
88 vmm->funcs->evict_flags(bo, placement);
89 }
90
91 static int bo_driver_verify_access(struct ttm_buffer_object *bo,
92 struct file *filp)
93 {
94 struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bo->bdev);
95
96 if (!vmm->funcs || !vmm->funcs->verify_access)
97 return 0;
98 return vmm->funcs->verify_access(bo, filp);
99 }
100
101 static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev,
102 struct ttm_mem_reg *mem)
103 {
104 struct ttm_mem_type_manager *man = bdev->man + mem->mem_type;
105 struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bdev);
106
107 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
108 return -EINVAL;
109
110 mem->bus.addr = NULL;
111 mem->bus.size = mem->num_pages << PAGE_SHIFT;
112
113 switch (mem->mem_type) {
114 case TTM_PL_SYSTEM:
115 mem->bus.offset = 0;
116 mem->bus.base = 0;
117 mem->bus.is_iomem = false;
118 break;
119 case TTM_PL_VRAM:
120 mem->bus.offset = mem->start << PAGE_SHIFT;
121 mem->bus.base = vmm->vram_base;
122 mem->bus.is_iomem = true;
123 break;
124 default:
125 return -EINVAL;
126 }
127
128 return 0;
129 }
130
131 static void bo_driver_io_mem_free(struct ttm_bo_device *bdev,
132 struct ttm_mem_reg *mem)
133 { }
134
135 static struct ttm_bo_driver bo_driver = {
136 .ttm_tt_create = bo_driver_ttm_tt_create,
137 .ttm_tt_populate = ttm_pool_populate,
138 .ttm_tt_unpopulate = ttm_pool_unpopulate,
139 .init_mem_type = bo_driver_init_mem_type,
140 .eviction_valuable = ttm_bo_eviction_valuable,
141 .evict_flags = bo_driver_evict_flags,
142 .verify_access = bo_driver_verify_access,
143 .io_mem_reserve = bo_driver_io_mem_reserve,
144 .io_mem_free = bo_driver_io_mem_free,
145 };
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163 int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
164 uint64_t vram_base, size_t vram_size,
165 const struct drm_vram_mm_funcs *funcs)
166 {
167 int ret;
168
169 vmm->vram_base = vram_base;
170 vmm->vram_size = vram_size;
171 vmm->funcs = funcs;
172
173 ret = ttm_bo_device_init(&vmm->bdev, &bo_driver,
174 dev->anon_inode->i_mapping,
175 true);
176 if (ret)
177 return ret;
178
179 ret = ttm_bo_init_mm(&vmm->bdev, TTM_PL_VRAM, vram_size >> PAGE_SHIFT);
180 if (ret)
181 return ret;
182
183 return 0;
184 }
185 EXPORT_SYMBOL(drm_vram_mm_init);
186
187
188
189
190
191 void drm_vram_mm_cleanup(struct drm_vram_mm *vmm)
192 {
193 ttm_bo_device_release(&vmm->bdev);
194 }
195 EXPORT_SYMBOL(drm_vram_mm_cleanup);
196
197
198
199
200
201
202
203
204
205
206
207 int drm_vram_mm_mmap(struct file *filp, struct vm_area_struct *vma,
208 struct drm_vram_mm *vmm)
209 {
210 return ttm_bo_mmap(filp, vma, &vmm->bdev);
211 }
212 EXPORT_SYMBOL(drm_vram_mm_mmap);
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230 struct drm_vram_mm *drm_vram_helper_alloc_mm(
231 struct drm_device *dev, uint64_t vram_base, size_t vram_size,
232 const struct drm_vram_mm_funcs *funcs)
233 {
234 int ret;
235
236 if (WARN_ON(dev->vram_mm))
237 return dev->vram_mm;
238
239 dev->vram_mm = kzalloc(sizeof(*dev->vram_mm), GFP_KERNEL);
240 if (!dev->vram_mm)
241 return ERR_PTR(-ENOMEM);
242
243 ret = drm_vram_mm_init(dev->vram_mm, dev, vram_base, vram_size, funcs);
244 if (ret)
245 goto err_kfree;
246
247 return dev->vram_mm;
248
249 err_kfree:
250 kfree(dev->vram_mm);
251 dev->vram_mm = NULL;
252 return ERR_PTR(ret);
253 }
254 EXPORT_SYMBOL(drm_vram_helper_alloc_mm);
255
256
257
258
259
260
261 void drm_vram_helper_release_mm(struct drm_device *dev)
262 {
263 if (!dev->vram_mm)
264 return;
265
266 drm_vram_mm_cleanup(dev->vram_mm);
267 kfree(dev->vram_mm);
268 dev->vram_mm = NULL;
269 }
270 EXPORT_SYMBOL(drm_vram_helper_release_mm);
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286 int drm_vram_mm_file_operations_mmap(
287 struct file *filp, struct vm_area_struct *vma)
288 {
289 struct drm_file *file_priv = filp->private_data;
290 struct drm_device *dev = file_priv->minor->dev;
291
292 if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized"))
293 return -EINVAL;
294
295 return drm_vram_mm_mmap(filp, vma, dev->vram_mm);
296 }
297 EXPORT_SYMBOL(drm_vram_mm_file_operations_mmap);