This source file includes following definitions.
- vmw_gmrid_man_get_node
- vmw_gmrid_man_put_node
- vmw_gmrid_man_init
- vmw_gmrid_man_takedown
- vmw_gmrid_man_debug
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31 #include "vmwgfx_drv.h"
32 #include <drm/ttm/ttm_module.h>
33 #include <drm/ttm/ttm_bo_driver.h>
34 #include <drm/ttm/ttm_placement.h>
35 #include <linux/idr.h>
36 #include <linux/spinlock.h>
37 #include <linux/kernel.h>
38
39 struct vmwgfx_gmrid_man {
40 spinlock_t lock;
41 struct ida gmr_ida;
42 uint32_t max_gmr_ids;
43 uint32_t max_gmr_pages;
44 uint32_t used_gmr_pages;
45 };
46
47 static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
48 struct ttm_buffer_object *bo,
49 const struct ttm_place *place,
50 struct ttm_mem_reg *mem)
51 {
52 struct vmwgfx_gmrid_man *gman =
53 (struct vmwgfx_gmrid_man *)man->priv;
54 int id;
55
56 mem->mm_node = NULL;
57
58 id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
59 if (id < 0)
60 return (id != -ENOMEM ? 0 : id);
61
62 spin_lock(&gman->lock);
63
64 if (gman->max_gmr_pages > 0) {
65 gman->used_gmr_pages += bo->num_pages;
66 if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
67 goto nospace;
68 }
69
70 mem->mm_node = gman;
71 mem->start = id;
72 mem->num_pages = bo->num_pages;
73
74 spin_unlock(&gman->lock);
75 return 0;
76
77 nospace:
78 gman->used_gmr_pages -= bo->num_pages;
79 spin_unlock(&gman->lock);
80 ida_free(&gman->gmr_ida, id);
81 return 0;
82 }
83
84 static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
85 struct ttm_mem_reg *mem)
86 {
87 struct vmwgfx_gmrid_man *gman =
88 (struct vmwgfx_gmrid_man *)man->priv;
89
90 if (mem->mm_node) {
91 ida_free(&gman->gmr_ida, mem->start);
92 spin_lock(&gman->lock);
93 gman->used_gmr_pages -= mem->num_pages;
94 spin_unlock(&gman->lock);
95 mem->mm_node = NULL;
96 }
97 }
98
99 static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
100 unsigned long p_size)
101 {
102 struct vmw_private *dev_priv =
103 container_of(man->bdev, struct vmw_private, bdev);
104 struct vmwgfx_gmrid_man *gman =
105 kzalloc(sizeof(*gman), GFP_KERNEL);
106
107 if (unlikely(!gman))
108 return -ENOMEM;
109
110 spin_lock_init(&gman->lock);
111 gman->used_gmr_pages = 0;
112 ida_init(&gman->gmr_ida);
113
114 switch (p_size) {
115 case VMW_PL_GMR:
116 gman->max_gmr_ids = dev_priv->max_gmr_ids;
117 gman->max_gmr_pages = dev_priv->max_gmr_pages;
118 break;
119 case VMW_PL_MOB:
120 gman->max_gmr_ids = VMWGFX_NUM_MOB;
121 gman->max_gmr_pages = dev_priv->max_mob_pages;
122 break;
123 default:
124 BUG();
125 }
126 man->priv = (void *) gman;
127 return 0;
128 }
129
130 static int vmw_gmrid_man_takedown(struct ttm_mem_type_manager *man)
131 {
132 struct vmwgfx_gmrid_man *gman =
133 (struct vmwgfx_gmrid_man *)man->priv;
134
135 if (gman) {
136 ida_destroy(&gman->gmr_ida);
137 kfree(gman);
138 }
139 return 0;
140 }
141
142 static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
143 struct drm_printer *printer)
144 {
145 drm_printf(printer, "No debug info available for the GMR id manager\n");
146 }
147
148 const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
149 .init = vmw_gmrid_man_init,
150 .takedown = vmw_gmrid_man_takedown,
151 .get_node = vmw_gmrid_man_get_node,
152 .put_node = vmw_gmrid_man_put_node,
153 .debug = vmw_gmrid_man_debug
154 };