This source file includes following definitions.
- nvkm_gpuobj_rd32_fast
- nvkm_gpuobj_wr32_fast
- nvkm_gpuobj_heap_map
- nvkm_gpuobj_heap_rd32
- nvkm_gpuobj_heap_wr32
- nvkm_gpuobj_heap_release
- nvkm_gpuobj_heap_acquire
- nvkm_gpuobj_map
- nvkm_gpuobj_rd32
- nvkm_gpuobj_wr32
- nvkm_gpuobj_release
- nvkm_gpuobj_acquire
- nvkm_gpuobj_ctor
- nvkm_gpuobj_del
- nvkm_gpuobj_new
- nvkm_gpuobj_wrap
- nvkm_gpuobj_memcpy_to
- nvkm_gpuobj_memcpy_from
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24 #include <core/gpuobj.h>
25 #include <core/engine.h>
26
27 #include <subdev/instmem.h>
28 #include <subdev/bar.h>
29 #include <subdev/mmu.h>
30
31
32 static u32
33 nvkm_gpuobj_rd32_fast(struct nvkm_gpuobj *gpuobj, u32 offset)
34 {
35 return ioread32_native(gpuobj->map + offset);
36 }
37
38 static void
39 nvkm_gpuobj_wr32_fast(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
40 {
41 iowrite32_native(data, gpuobj->map + offset);
42 }
43
44
45 static int
46 nvkm_gpuobj_heap_map(struct nvkm_gpuobj *gpuobj, u64 offset,
47 struct nvkm_vmm *vmm, struct nvkm_vma *vma,
48 void *argv, u32 argc)
49 {
50 return nvkm_memory_map(gpuobj->memory, offset, vmm, vma, argv, argc);
51 }
52
53 static u32
54 nvkm_gpuobj_heap_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
55 {
56 return nvkm_ro32(gpuobj->memory, offset);
57 }
58
59 static void
60 nvkm_gpuobj_heap_wr32(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
61 {
62 nvkm_wo32(gpuobj->memory, offset, data);
63 }
64
65 static const struct nvkm_gpuobj_func nvkm_gpuobj_heap;
66 static void
67 nvkm_gpuobj_heap_release(struct nvkm_gpuobj *gpuobj)
68 {
69 gpuobj->func = &nvkm_gpuobj_heap;
70 nvkm_done(gpuobj->memory);
71 }
72
73 static const struct nvkm_gpuobj_func
74 nvkm_gpuobj_heap_fast = {
75 .release = nvkm_gpuobj_heap_release,
76 .rd32 = nvkm_gpuobj_rd32_fast,
77 .wr32 = nvkm_gpuobj_wr32_fast,
78 .map = nvkm_gpuobj_heap_map,
79 };
80
81 static const struct nvkm_gpuobj_func
82 nvkm_gpuobj_heap_slow = {
83 .release = nvkm_gpuobj_heap_release,
84 .rd32 = nvkm_gpuobj_heap_rd32,
85 .wr32 = nvkm_gpuobj_heap_wr32,
86 .map = nvkm_gpuobj_heap_map,
87 };
88
89 static void *
90 nvkm_gpuobj_heap_acquire(struct nvkm_gpuobj *gpuobj)
91 {
92 gpuobj->map = nvkm_kmap(gpuobj->memory);
93 if (likely(gpuobj->map))
94 gpuobj->func = &nvkm_gpuobj_heap_fast;
95 else
96 gpuobj->func = &nvkm_gpuobj_heap_slow;
97 return gpuobj->map;
98 }
99
100 static const struct nvkm_gpuobj_func
101 nvkm_gpuobj_heap = {
102 .acquire = nvkm_gpuobj_heap_acquire,
103 .map = nvkm_gpuobj_heap_map,
104 };
105
106
107 static int
108 nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, u64 offset,
109 struct nvkm_vmm *vmm, struct nvkm_vma *vma,
110 void *argv, u32 argc)
111 {
112 return nvkm_memory_map(gpuobj->parent, gpuobj->node->offset + offset,
113 vmm, vma, argv, argc);
114 }
115
116 static u32
117 nvkm_gpuobj_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
118 {
119 return nvkm_ro32(gpuobj->parent, gpuobj->node->offset + offset);
120 }
121
122 static void
123 nvkm_gpuobj_wr32(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
124 {
125 nvkm_wo32(gpuobj->parent, gpuobj->node->offset + offset, data);
126 }
127
128 static const struct nvkm_gpuobj_func nvkm_gpuobj_func;
129 static void
130 nvkm_gpuobj_release(struct nvkm_gpuobj *gpuobj)
131 {
132 gpuobj->func = &nvkm_gpuobj_func;
133 nvkm_done(gpuobj->parent);
134 }
135
136 static const struct nvkm_gpuobj_func
137 nvkm_gpuobj_fast = {
138 .release = nvkm_gpuobj_release,
139 .rd32 = nvkm_gpuobj_rd32_fast,
140 .wr32 = nvkm_gpuobj_wr32_fast,
141 .map = nvkm_gpuobj_map,
142 };
143
144 static const struct nvkm_gpuobj_func
145 nvkm_gpuobj_slow = {
146 .release = nvkm_gpuobj_release,
147 .rd32 = nvkm_gpuobj_rd32,
148 .wr32 = nvkm_gpuobj_wr32,
149 .map = nvkm_gpuobj_map,
150 };
151
152 static void *
153 nvkm_gpuobj_acquire(struct nvkm_gpuobj *gpuobj)
154 {
155 gpuobj->map = nvkm_kmap(gpuobj->parent);
156 if (likely(gpuobj->map)) {
157 gpuobj->map = (u8 *)gpuobj->map + gpuobj->node->offset;
158 gpuobj->func = &nvkm_gpuobj_fast;
159 } else {
160 gpuobj->func = &nvkm_gpuobj_slow;
161 }
162 return gpuobj->map;
163 }
164
165 static const struct nvkm_gpuobj_func
166 nvkm_gpuobj_func = {
167 .acquire = nvkm_gpuobj_acquire,
168 .map = nvkm_gpuobj_map,
169 };
170
171 static int
172 nvkm_gpuobj_ctor(struct nvkm_device *device, u32 size, int align, bool zero,
173 struct nvkm_gpuobj *parent, struct nvkm_gpuobj *gpuobj)
174 {
175 u32 offset;
176 int ret;
177
178 if (parent) {
179 if (align >= 0) {
180 ret = nvkm_mm_head(&parent->heap, 0, 1, size, size,
181 max(align, 1), &gpuobj->node);
182 } else {
183 ret = nvkm_mm_tail(&parent->heap, 0, 1, size, size,
184 -align, &gpuobj->node);
185 }
186 if (ret)
187 return ret;
188
189 gpuobj->parent = parent;
190 gpuobj->func = &nvkm_gpuobj_func;
191 gpuobj->addr = parent->addr + gpuobj->node->offset;
192 gpuobj->size = gpuobj->node->length;
193
194 if (zero) {
195 nvkm_kmap(gpuobj);
196 for (offset = 0; offset < gpuobj->size; offset += 4)
197 nvkm_wo32(gpuobj, offset, 0x00000000);
198 nvkm_done(gpuobj);
199 }
200 } else {
201 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size,
202 abs(align), zero, &gpuobj->memory);
203 if (ret)
204 return ret;
205
206 gpuobj->func = &nvkm_gpuobj_heap;
207 gpuobj->addr = nvkm_memory_addr(gpuobj->memory);
208 gpuobj->size = nvkm_memory_size(gpuobj->memory);
209 }
210
211 return nvkm_mm_init(&gpuobj->heap, 0, 0, gpuobj->size, 1);
212 }
213
214 void
215 nvkm_gpuobj_del(struct nvkm_gpuobj **pgpuobj)
216 {
217 struct nvkm_gpuobj *gpuobj = *pgpuobj;
218 if (gpuobj) {
219 if (gpuobj->parent)
220 nvkm_mm_free(&gpuobj->parent->heap, &gpuobj->node);
221 nvkm_mm_fini(&gpuobj->heap);
222 nvkm_memory_unref(&gpuobj->memory);
223 kfree(*pgpuobj);
224 *pgpuobj = NULL;
225 }
226 }
227
228 int
229 nvkm_gpuobj_new(struct nvkm_device *device, u32 size, int align, bool zero,
230 struct nvkm_gpuobj *parent, struct nvkm_gpuobj **pgpuobj)
231 {
232 struct nvkm_gpuobj *gpuobj;
233 int ret;
234
235 if (!(gpuobj = *pgpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL)))
236 return -ENOMEM;
237
238 ret = nvkm_gpuobj_ctor(device, size, align, zero, parent, gpuobj);
239 if (ret)
240 nvkm_gpuobj_del(pgpuobj);
241 return ret;
242 }
243
244
245
246
247
248
249 int
250 nvkm_gpuobj_wrap(struct nvkm_memory *memory, struct nvkm_gpuobj **pgpuobj)
251 {
252 if (!(*pgpuobj = kzalloc(sizeof(**pgpuobj), GFP_KERNEL)))
253 return -ENOMEM;
254
255 (*pgpuobj)->addr = nvkm_memory_addr(memory);
256 (*pgpuobj)->size = nvkm_memory_size(memory);
257 return 0;
258 }
259
260 void
261 nvkm_gpuobj_memcpy_to(struct nvkm_gpuobj *dst, u32 dstoffset, void *src,
262 u32 length)
263 {
264 int i;
265
266 for (i = 0; i < length; i += 4)
267 nvkm_wo32(dst, dstoffset + i, *(u32 *)(src + i));
268 }
269
270 void
271 nvkm_gpuobj_memcpy_from(void *dst, struct nvkm_gpuobj *src, u32 srcoffset,
272 u32 length)
273 {
274 int i;
275
276 for (i = 0; i < length; i += 4)
277 ((u32 *)src)[i / 4] = nvkm_ro32(src, srcoffset + i);
278 }