This source file includes following definitions.
- vmw_setup_otable_base
- vmw_takedown_otable_base
- vmw_otable_batch_setup
- vmw_otables_setup
- vmw_otable_batch_takedown
- vmw_otables_takedown
- vmw_mob_calculate_pt_pages
- vmw_mob_create
- vmw_mob_pt_populate
- vmw_mob_assign_ppn
- vmw_mob_assign_ppn
- vmw_mob_build_pt
- vmw_mob_pt_setup
- vmw_mob_destroy
- vmw_mob_unbind
- vmw_mob_bind
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28 #include <linux/highmem.h>
29
30 #include "vmwgfx_drv.h"
31
32
33
34
35
36 #define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE ? 0 : 1))
37
38 #ifdef CONFIG_64BIT
39 #define VMW_PPN_SIZE 8
40 #define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH64_0
41 #define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH64_1
42 #define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH64_2
43 #else
44 #define VMW_PPN_SIZE 4
45 #define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH_0
46 #define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH_1
47 #define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH_2
48 #endif
49
50
51
52
53
54
55
56
57
58 struct vmw_mob {
59 struct ttm_buffer_object *pt_bo;
60 unsigned long num_pages;
61 unsigned pt_level;
62 dma_addr_t pt_root_page;
63 uint32_t id;
64 };
65
66
67
68
69
70
71
72 static const struct vmw_otable pre_dx_tables[] = {
73 {VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true},
74 {VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true},
75 {VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true},
76 {VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true},
77 {VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE,
78 NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE}
79 };
80
81 static const struct vmw_otable dx_tables[] = {
82 {VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true},
83 {VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true},
84 {VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true},
85 {VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true},
86 {VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE,
87 NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE},
88 {VMWGFX_NUM_DXCONTEXT * sizeof(SVGAOTableDXContextEntry), NULL, true},
89 };
90
91 static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
92 struct vmw_mob *mob);
93 static void vmw_mob_pt_setup(struct vmw_mob *mob,
94 struct vmw_piter data_iter,
95 unsigned long num_data_pages);
96
97
98
99
100
101
102
103
104
105
106
107
108
109 static int vmw_setup_otable_base(struct vmw_private *dev_priv,
110 SVGAOTableType type,
111 struct ttm_buffer_object *otable_bo,
112 unsigned long offset,
113 struct vmw_otable *otable)
114 {
115 struct {
116 SVGA3dCmdHeader header;
117 SVGA3dCmdSetOTableBase64 body;
118 } *cmd;
119 struct vmw_mob *mob;
120 const struct vmw_sg_table *vsgt;
121 struct vmw_piter iter;
122 int ret;
123
124 BUG_ON(otable->page_table != NULL);
125
126 vsgt = vmw_bo_sg_table(otable_bo);
127 vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT);
128 WARN_ON(!vmw_piter_next(&iter));
129
130 mob = vmw_mob_create(otable->size >> PAGE_SHIFT);
131 if (unlikely(mob == NULL)) {
132 DRM_ERROR("Failed creating OTable page table.\n");
133 return -ENOMEM;
134 }
135
136 if (otable->size <= PAGE_SIZE) {
137 mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
138 mob->pt_root_page = vmw_piter_dma_addr(&iter);
139 } else if (vsgt->num_regions == 1) {
140 mob->pt_level = SVGA3D_MOBFMT_RANGE;
141 mob->pt_root_page = vmw_piter_dma_addr(&iter);
142 } else {
143 ret = vmw_mob_pt_populate(dev_priv, mob);
144 if (unlikely(ret != 0))
145 goto out_no_populate;
146
147 vmw_mob_pt_setup(mob, iter, otable->size >> PAGE_SHIFT);
148 mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
149 }
150
151 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
152 if (unlikely(cmd == NULL)) {
153 ret = -ENOMEM;
154 goto out_no_fifo;
155 }
156
157 memset(cmd, 0, sizeof(*cmd));
158 cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64;
159 cmd->header.size = sizeof(cmd->body);
160 cmd->body.type = type;
161 cmd->body.baseAddress = mob->pt_root_page >> PAGE_SHIFT;
162 cmd->body.sizeInBytes = otable->size;
163 cmd->body.validSizeInBytes = 0;
164 cmd->body.ptDepth = mob->pt_level;
165
166
167
168
169
170
171 BUG_ON(mob->pt_level == VMW_MOBFMT_PTDEPTH_2);
172
173 vmw_fifo_commit(dev_priv, sizeof(*cmd));
174 otable->page_table = mob;
175
176 return 0;
177
178 out_no_fifo:
179 out_no_populate:
180 vmw_mob_destroy(mob);
181 return ret;
182 }
183
184
185
186
187
188
189
190
191
192 static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
193 SVGAOTableType type,
194 struct vmw_otable *otable)
195 {
196 struct {
197 SVGA3dCmdHeader header;
198 SVGA3dCmdSetOTableBase body;
199 } *cmd;
200 struct ttm_buffer_object *bo;
201
202 if (otable->page_table == NULL)
203 return;
204
205 bo = otable->page_table->pt_bo;
206 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
207 if (unlikely(cmd == NULL))
208 return;
209
210 memset(cmd, 0, sizeof(*cmd));
211 cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
212 cmd->header.size = sizeof(cmd->body);
213 cmd->body.type = type;
214 cmd->body.baseAddress = 0;
215 cmd->body.sizeInBytes = 0;
216 cmd->body.validSizeInBytes = 0;
217 cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
218 vmw_fifo_commit(dev_priv, sizeof(*cmd));
219
220 if (bo) {
221 int ret;
222
223 ret = ttm_bo_reserve(bo, false, true, NULL);
224 BUG_ON(ret != 0);
225
226 vmw_bo_fence_single(bo, NULL);
227 ttm_bo_unreserve(bo);
228 }
229
230 vmw_mob_destroy(otable->page_table);
231 otable->page_table = NULL;
232 }
233
234
235 static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
236 struct vmw_otable_batch *batch)
237 {
238 unsigned long offset;
239 unsigned long bo_size;
240 struct vmw_otable *otables = batch->otables;
241 struct ttm_operation_ctx ctx = {
242 .interruptible = false,
243 .no_wait_gpu = false
244 };
245 SVGAOTableType i;
246 int ret;
247
248 bo_size = 0;
249 for (i = 0; i < batch->num_otables; ++i) {
250 if (!otables[i].enabled)
251 continue;
252
253 otables[i].size =
254 (otables[i].size + PAGE_SIZE - 1) & PAGE_MASK;
255 bo_size += otables[i].size;
256 }
257
258 ret = ttm_bo_create(&dev_priv->bdev, bo_size,
259 ttm_bo_type_device,
260 &vmw_sys_ne_placement,
261 0, false, &batch->otable_bo);
262
263 if (unlikely(ret != 0))
264 goto out_no_bo;
265
266 ret = ttm_bo_reserve(batch->otable_bo, false, true, NULL);
267 BUG_ON(ret != 0);
268 ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm, &ctx);
269 if (unlikely(ret != 0))
270 goto out_unreserve;
271 ret = vmw_bo_map_dma(batch->otable_bo);
272 if (unlikely(ret != 0))
273 goto out_unreserve;
274
275 ttm_bo_unreserve(batch->otable_bo);
276
277 offset = 0;
278 for (i = 0; i < batch->num_otables; ++i) {
279 if (!batch->otables[i].enabled)
280 continue;
281
282 ret = vmw_setup_otable_base(dev_priv, i, batch->otable_bo,
283 offset,
284 &otables[i]);
285 if (unlikely(ret != 0))
286 goto out_no_setup;
287 offset += otables[i].size;
288 }
289
290 return 0;
291
292 out_unreserve:
293 ttm_bo_unreserve(batch->otable_bo);
294 out_no_setup:
295 for (i = 0; i < batch->num_otables; ++i) {
296 if (batch->otables[i].enabled)
297 vmw_takedown_otable_base(dev_priv, i,
298 &batch->otables[i]);
299 }
300
301 ttm_bo_put(batch->otable_bo);
302 batch->otable_bo = NULL;
303 out_no_bo:
304 return ret;
305 }
306
307
308
309
310
311
312
313
314
315
316
317
318 int vmw_otables_setup(struct vmw_private *dev_priv)
319 {
320 struct vmw_otable **otables = &dev_priv->otable_batch.otables;
321 int ret;
322
323 if (dev_priv->has_dx) {
324 *otables = kmemdup(dx_tables, sizeof(dx_tables), GFP_KERNEL);
325 if (!(*otables))
326 return -ENOMEM;
327
328 dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables);
329 } else {
330 *otables = kmemdup(pre_dx_tables, sizeof(pre_dx_tables),
331 GFP_KERNEL);
332 if (!(*otables))
333 return -ENOMEM;
334
335 dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables);
336 }
337
338 ret = vmw_otable_batch_setup(dev_priv, &dev_priv->otable_batch);
339 if (unlikely(ret != 0))
340 goto out_setup;
341
342 return 0;
343
344 out_setup:
345 kfree(*otables);
346 return ret;
347 }
348
349 static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
350 struct vmw_otable_batch *batch)
351 {
352 SVGAOTableType i;
353 struct ttm_buffer_object *bo = batch->otable_bo;
354 int ret;
355
356 for (i = 0; i < batch->num_otables; ++i)
357 if (batch->otables[i].enabled)
358 vmw_takedown_otable_base(dev_priv, i,
359 &batch->otables[i]);
360
361 ret = ttm_bo_reserve(bo, false, true, NULL);
362 BUG_ON(ret != 0);
363
364 vmw_bo_fence_single(bo, NULL);
365 ttm_bo_unreserve(bo);
366
367 ttm_bo_put(batch->otable_bo);
368 batch->otable_bo = NULL;
369 }
370
371
372
373
374
375
376
377
378 void vmw_otables_takedown(struct vmw_private *dev_priv)
379 {
380 vmw_otable_batch_takedown(dev_priv, &dev_priv->otable_batch);
381 kfree(dev_priv->otable_batch.otables);
382 }
383
384
385
386
387
388
389
390 static unsigned long vmw_mob_calculate_pt_pages(unsigned long data_pages)
391 {
392 unsigned long data_size = data_pages * PAGE_SIZE;
393 unsigned long tot_size = 0;
394
395 while (likely(data_size > PAGE_SIZE)) {
396 data_size = DIV_ROUND_UP(data_size, PAGE_SIZE);
397 data_size *= VMW_PPN_SIZE;
398 tot_size += (data_size + PAGE_SIZE - 1) & PAGE_MASK;
399 }
400
401 return tot_size >> PAGE_SHIFT;
402 }
403
404
405
406
407
408
409 struct vmw_mob *vmw_mob_create(unsigned long data_pages)
410 {
411 struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL);
412
413 if (unlikely(!mob))
414 return NULL;
415
416 mob->num_pages = vmw_mob_calculate_pt_pages(data_pages);
417
418 return mob;
419 }
420
421
422
423
424
425
426
427
428
429
430
431
432 static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
433 struct vmw_mob *mob)
434 {
435 int ret;
436 struct ttm_operation_ctx ctx = {
437 .interruptible = false,
438 .no_wait_gpu = false
439 };
440
441 BUG_ON(mob->pt_bo != NULL);
442
443 ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE,
444 ttm_bo_type_device,
445 &vmw_sys_ne_placement,
446 0, false, &mob->pt_bo);
447 if (unlikely(ret != 0))
448 return ret;
449
450 ret = ttm_bo_reserve(mob->pt_bo, false, true, NULL);
451
452 BUG_ON(ret != 0);
453 ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm, &ctx);
454 if (unlikely(ret != 0))
455 goto out_unreserve;
456 ret = vmw_bo_map_dma(mob->pt_bo);
457 if (unlikely(ret != 0))
458 goto out_unreserve;
459
460 ttm_bo_unreserve(mob->pt_bo);
461
462 return 0;
463
464 out_unreserve:
465 ttm_bo_unreserve(mob->pt_bo);
466 ttm_bo_put(mob->pt_bo);
467 mob->pt_bo = NULL;
468
469 return ret;
470 }
471
472
473
474
475
476
477
478
479
480
481 #if (VMW_PPN_SIZE == 8)
482 static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
483 {
484 *((u64 *) *addr) = val >> PAGE_SHIFT;
485 *addr += 2;
486 }
487 #else
488 static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
489 {
490 *(*addr)++ = val >> PAGE_SHIFT;
491 }
492 #endif
493
494
495
496
497
498
499
500
501
502
503
504
505 static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter,
506 unsigned long num_data_pages,
507 struct vmw_piter *pt_iter)
508 {
509 unsigned long pt_size = num_data_pages * VMW_PPN_SIZE;
510 unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE);
511 unsigned long pt_page;
512 u32 *addr, *save_addr;
513 unsigned long i;
514 struct page *page;
515
516 for (pt_page = 0; pt_page < num_pt_pages; ++pt_page) {
517 page = vmw_piter_page(pt_iter);
518
519 save_addr = addr = kmap_atomic(page);
520
521 for (i = 0; i < PAGE_SIZE / VMW_PPN_SIZE; ++i) {
522 vmw_mob_assign_ppn(&addr,
523 vmw_piter_dma_addr(data_iter));
524 if (unlikely(--num_data_pages == 0))
525 break;
526 WARN_ON(!vmw_piter_next(data_iter));
527 }
528 kunmap_atomic(save_addr);
529 vmw_piter_next(pt_iter);
530 }
531
532 return num_pt_pages;
533 }
534
535
536
537
538
539
540
541
542
543
544
545 static void vmw_mob_pt_setup(struct vmw_mob *mob,
546 struct vmw_piter data_iter,
547 unsigned long num_data_pages)
548 {
549 unsigned long num_pt_pages = 0;
550 struct ttm_buffer_object *bo = mob->pt_bo;
551 struct vmw_piter save_pt_iter;
552 struct vmw_piter pt_iter;
553 const struct vmw_sg_table *vsgt;
554 int ret;
555
556 ret = ttm_bo_reserve(bo, false, true, NULL);
557 BUG_ON(ret != 0);
558
559 vsgt = vmw_bo_sg_table(bo);
560 vmw_piter_start(&pt_iter, vsgt, 0);
561 BUG_ON(!vmw_piter_next(&pt_iter));
562 mob->pt_level = 0;
563 while (likely(num_data_pages > 1)) {
564 ++mob->pt_level;
565 BUG_ON(mob->pt_level > 2);
566 save_pt_iter = pt_iter;
567 num_pt_pages = vmw_mob_build_pt(&data_iter, num_data_pages,
568 &pt_iter);
569 data_iter = save_pt_iter;
570 num_data_pages = num_pt_pages;
571 }
572
573 mob->pt_root_page = vmw_piter_dma_addr(&save_pt_iter);
574 ttm_bo_unreserve(bo);
575 }
576
577
578
579
580
581
582 void vmw_mob_destroy(struct vmw_mob *mob)
583 {
584 if (mob->pt_bo) {
585 ttm_bo_put(mob->pt_bo);
586 mob->pt_bo = NULL;
587 }
588 kfree(mob);
589 }
590
591
592
593
594
595
596
597 void vmw_mob_unbind(struct vmw_private *dev_priv,
598 struct vmw_mob *mob)
599 {
600 struct {
601 SVGA3dCmdHeader header;
602 SVGA3dCmdDestroyGBMob body;
603 } *cmd;
604 int ret;
605 struct ttm_buffer_object *bo = mob->pt_bo;
606
607 if (bo) {
608 ret = ttm_bo_reserve(bo, false, true, NULL);
609
610
611
612 BUG_ON(ret != 0);
613 }
614
615 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
616 if (cmd) {
617 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
618 cmd->header.size = sizeof(cmd->body);
619 cmd->body.mobid = mob->id;
620 vmw_fifo_commit(dev_priv, sizeof(*cmd));
621 }
622
623 if (bo) {
624 vmw_bo_fence_single(bo, NULL);
625 ttm_bo_unreserve(bo);
626 }
627 vmw_fifo_resource_dec(dev_priv);
628 }
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645 int vmw_mob_bind(struct vmw_private *dev_priv,
646 struct vmw_mob *mob,
647 const struct vmw_sg_table *vsgt,
648 unsigned long num_data_pages,
649 int32_t mob_id)
650 {
651 int ret;
652 bool pt_set_up = false;
653 struct vmw_piter data_iter;
654 struct {
655 SVGA3dCmdHeader header;
656 SVGA3dCmdDefineGBMob64 body;
657 } *cmd;
658
659 mob->id = mob_id;
660 vmw_piter_start(&data_iter, vsgt, 0);
661 if (unlikely(!vmw_piter_next(&data_iter)))
662 return 0;
663
664 if (likely(num_data_pages == 1)) {
665 mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
666 mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
667 } else if (vsgt->num_regions == 1) {
668 mob->pt_level = SVGA3D_MOBFMT_RANGE;
669 mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
670 } else if (unlikely(mob->pt_bo == NULL)) {
671 ret = vmw_mob_pt_populate(dev_priv, mob);
672 if (unlikely(ret != 0))
673 return ret;
674
675 vmw_mob_pt_setup(mob, data_iter, num_data_pages);
676 pt_set_up = true;
677 mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
678 }
679
680 vmw_fifo_resource_inc(dev_priv);
681
682 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
683 if (unlikely(cmd == NULL))
684 goto out_no_cmd_space;
685
686 cmd->header.id = SVGA_3D_CMD_DEFINE_GB_MOB64;
687 cmd->header.size = sizeof(cmd->body);
688 cmd->body.mobid = mob_id;
689 cmd->body.ptDepth = mob->pt_level;
690 cmd->body.base = mob->pt_root_page >> PAGE_SHIFT;
691 cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE;
692
693 vmw_fifo_commit(dev_priv, sizeof(*cmd));
694
695 return 0;
696
697 out_no_cmd_space:
698 vmw_fifo_resource_dec(dev_priv);
699 if (pt_set_up) {
700 ttm_bo_put(mob->pt_bo);
701 mob->pt_bo = NULL;
702 }
703
704 return -ENOMEM;
705 }