1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include <core/gpuobj.h>
25 #include <core/engine.h>
26 
27 #include <subdev/instmem.h>
28 #include <subdev/bar.h>
29 #include <subdev/mmu.h>
30 
31 void
nvkm_gpuobj_destroy(struct nvkm_gpuobj * gpuobj)32 nvkm_gpuobj_destroy(struct nvkm_gpuobj *gpuobj)
33 {
34 	int i;
35 
36 	if (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE) {
37 		for (i = 0; i < gpuobj->size; i += 4)
38 			nv_wo32(gpuobj, i, 0x00000000);
39 	}
40 
41 	if (gpuobj->node)
42 		nvkm_mm_free(&nv_gpuobj(gpuobj->parent)->heap, &gpuobj->node);
43 
44 	if (gpuobj->heap.block_size)
45 		nvkm_mm_fini(&gpuobj->heap);
46 
47 	nvkm_object_destroy(&gpuobj->object);
48 }
49 
50 int
nvkm_gpuobj_create_(struct nvkm_object * parent,struct nvkm_object * engine,struct nvkm_oclass * oclass,u32 pclass,struct nvkm_object * pargpu,u32 size,u32 align,u32 flags,int length,void ** pobject)51 nvkm_gpuobj_create_(struct nvkm_object *parent, struct nvkm_object *engine,
52 		    struct nvkm_oclass *oclass, u32 pclass,
53 		    struct nvkm_object *pargpu, u32 size, u32 align, u32 flags,
54 		    int length, void **pobject)
55 {
56 	struct nvkm_instmem *imem = nvkm_instmem(parent);
57 	struct nvkm_bar *bar = nvkm_bar(parent);
58 	struct nvkm_gpuobj *gpuobj;
59 	struct nvkm_mm *heap = NULL;
60 	int ret, i;
61 	u64 addr;
62 
63 	*pobject = NULL;
64 
65 	if (pargpu) {
66 		while ((pargpu = nv_pclass(pargpu, NV_GPUOBJ_CLASS))) {
67 			if (nv_gpuobj(pargpu)->heap.block_size)
68 				break;
69 			pargpu = pargpu->parent;
70 		}
71 
72 		if (unlikely(pargpu == NULL)) {
73 			nv_error(parent, "no gpuobj heap\n");
74 			return -EINVAL;
75 		}
76 
77 		addr =  nv_gpuobj(pargpu)->addr;
78 		heap = &nv_gpuobj(pargpu)->heap;
79 		atomic_inc(&parent->refcount);
80 	} else {
81 		ret = imem->alloc(imem, parent, size, align, &parent);
82 		pargpu = parent;
83 		if (ret)
84 			return ret;
85 
86 		addr = nv_memobj(pargpu)->addr;
87 		size = nv_memobj(pargpu)->size;
88 
89 		if (bar && bar->alloc) {
90 			struct nvkm_instobj *iobj = (void *)parent;
91 			struct nvkm_mem **mem = (void *)(iobj + 1);
92 			struct nvkm_mem *node = *mem;
93 			if (!bar->alloc(bar, parent, node, &pargpu)) {
94 				nvkm_object_ref(NULL, &parent);
95 				parent = pargpu;
96 			}
97 		}
98 	}
99 
100 	ret = nvkm_object_create_(parent, engine, oclass, pclass |
101 				  NV_GPUOBJ_CLASS, length, pobject);
102 	nvkm_object_ref(NULL, &parent);
103 	gpuobj = *pobject;
104 	if (ret)
105 		return ret;
106 
107 	gpuobj->parent = pargpu;
108 	gpuobj->flags = flags;
109 	gpuobj->addr = addr;
110 	gpuobj->size = size;
111 
112 	if (heap) {
113 		ret = nvkm_mm_head(heap, 0, 1, size, size, max(align, (u32)1),
114 				   &gpuobj->node);
115 		if (ret)
116 			return ret;
117 
118 		gpuobj->addr += gpuobj->node->offset;
119 	}
120 
121 	if (gpuobj->flags & NVOBJ_FLAG_HEAP) {
122 		ret = nvkm_mm_init(&gpuobj->heap, 0, gpuobj->size, 1);
123 		if (ret)
124 			return ret;
125 	}
126 
127 	if (flags & NVOBJ_FLAG_ZERO_ALLOC) {
128 		for (i = 0; i < gpuobj->size; i += 4)
129 			nv_wo32(gpuobj, i, 0x00000000);
130 	}
131 
132 	return ret;
133 }
134 
135 struct nvkm_gpuobj_class {
136 	struct nvkm_object *pargpu;
137 	u64 size;
138 	u32 align;
139 	u32 flags;
140 };
141 
142 static int
_nvkm_gpuobj_ctor(struct nvkm_object * parent,struct nvkm_object * engine,struct nvkm_oclass * oclass,void * data,u32 size,struct nvkm_object ** pobject)143 _nvkm_gpuobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
144 		  struct nvkm_oclass *oclass, void *data, u32 size,
145 		  struct nvkm_object **pobject)
146 {
147 	struct nvkm_gpuobj_class *args = data;
148 	struct nvkm_gpuobj *object;
149 	int ret;
150 
151 	ret = nvkm_gpuobj_create(parent, engine, oclass, 0, args->pargpu,
152 				 args->size, args->align, args->flags,
153 				 &object);
154 	*pobject = nv_object(object);
155 	if (ret)
156 		return ret;
157 
158 	return 0;
159 }
160 
161 void
_nvkm_gpuobj_dtor(struct nvkm_object * object)162 _nvkm_gpuobj_dtor(struct nvkm_object *object)
163 {
164 	nvkm_gpuobj_destroy(nv_gpuobj(object));
165 }
166 
167 int
_nvkm_gpuobj_init(struct nvkm_object * object)168 _nvkm_gpuobj_init(struct nvkm_object *object)
169 {
170 	return nvkm_gpuobj_init(nv_gpuobj(object));
171 }
172 
173 int
_nvkm_gpuobj_fini(struct nvkm_object * object,bool suspend)174 _nvkm_gpuobj_fini(struct nvkm_object *object, bool suspend)
175 {
176 	return nvkm_gpuobj_fini(nv_gpuobj(object), suspend);
177 }
178 
179 u32
_nvkm_gpuobj_rd32(struct nvkm_object * object,u64 addr)180 _nvkm_gpuobj_rd32(struct nvkm_object *object, u64 addr)
181 {
182 	struct nvkm_gpuobj *gpuobj = nv_gpuobj(object);
183 	struct nvkm_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
184 	if (gpuobj->node)
185 		addr += gpuobj->node->offset;
186 	return pfuncs->rd32(gpuobj->parent, addr);
187 }
188 
189 void
_nvkm_gpuobj_wr32(struct nvkm_object * object,u64 addr,u32 data)190 _nvkm_gpuobj_wr32(struct nvkm_object *object, u64 addr, u32 data)
191 {
192 	struct nvkm_gpuobj *gpuobj = nv_gpuobj(object);
193 	struct nvkm_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
194 	if (gpuobj->node)
195 		addr += gpuobj->node->offset;
196 	pfuncs->wr32(gpuobj->parent, addr, data);
197 }
198 
199 static struct nvkm_oclass
200 _nvkm_gpuobj_oclass = {
201 	.handle = 0x00000000,
202 	.ofuncs = &(struct nvkm_ofuncs) {
203 		.ctor = _nvkm_gpuobj_ctor,
204 		.dtor = _nvkm_gpuobj_dtor,
205 		.init = _nvkm_gpuobj_init,
206 		.fini = _nvkm_gpuobj_fini,
207 		.rd32 = _nvkm_gpuobj_rd32,
208 		.wr32 = _nvkm_gpuobj_wr32,
209 	},
210 };
211 
212 int
nvkm_gpuobj_new(struct nvkm_object * parent,struct nvkm_object * pargpu,u32 size,u32 align,u32 flags,struct nvkm_gpuobj ** pgpuobj)213 nvkm_gpuobj_new(struct nvkm_object *parent, struct nvkm_object *pargpu,
214 		u32 size, u32 align, u32 flags,
215 		struct nvkm_gpuobj **pgpuobj)
216 {
217 	struct nvkm_object *engine = parent;
218 	struct nvkm_gpuobj_class args = {
219 		.pargpu = pargpu,
220 		.size = size,
221 		.align = align,
222 		.flags = flags,
223 	};
224 
225 	if (!nv_iclass(engine, NV_SUBDEV_CLASS))
226 		engine = &engine->engine->subdev.object;
227 	BUG_ON(engine == NULL);
228 
229 	return nvkm_object_ctor(parent, engine, &_nvkm_gpuobj_oclass,
230 				&args, sizeof(args),
231 				(struct nvkm_object **)pgpuobj);
232 }
233 
234 int
nvkm_gpuobj_map(struct nvkm_gpuobj * gpuobj,u32 access,struct nvkm_vma * vma)235 nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, u32 access, struct nvkm_vma *vma)
236 {
237 	struct nvkm_bar *bar = nvkm_bar(gpuobj);
238 	int ret = -EINVAL;
239 
240 	if (bar && bar->umap) {
241 		struct nvkm_instobj *iobj = (void *)
242 			nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS);
243 		struct nvkm_mem **mem = (void *)(iobj + 1);
244 		ret = bar->umap(bar, *mem, access, vma);
245 	}
246 
247 	return ret;
248 }
249 
250 int
nvkm_gpuobj_map_vm(struct nvkm_gpuobj * gpuobj,struct nvkm_vm * vm,u32 access,struct nvkm_vma * vma)251 nvkm_gpuobj_map_vm(struct nvkm_gpuobj *gpuobj, struct nvkm_vm *vm,
252 		   u32 access, struct nvkm_vma *vma)
253 {
254 	struct nvkm_instobj *iobj = (void *)
255 		nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS);
256 	struct nvkm_mem **mem = (void *)(iobj + 1);
257 	int ret;
258 
259 	ret = nvkm_vm_get(vm, gpuobj->size, 12, access, vma);
260 	if (ret)
261 		return ret;
262 
263 	nvkm_vm_map(vma, *mem);
264 	return 0;
265 }
266 
267 void
nvkm_gpuobj_unmap(struct nvkm_vma * vma)268 nvkm_gpuobj_unmap(struct nvkm_vma *vma)
269 {
270 	if (vma->node) {
271 		nvkm_vm_unmap(vma);
272 		nvkm_vm_put(vma);
273 	}
274 }
275 
276 /* the below is basically only here to support sharing the paged dma object
277  * for PCI(E)GART on <=nv4x chipsets, and should *not* be expected to work
278  * anywhere else.
279  */
280 
281 static void
nvkm_gpudup_dtor(struct nvkm_object * object)282 nvkm_gpudup_dtor(struct nvkm_object *object)
283 {
284 	struct nvkm_gpuobj *gpuobj = (void *)object;
285 	nvkm_object_ref(NULL, &gpuobj->parent);
286 	nvkm_object_destroy(&gpuobj->object);
287 }
288 
289 static struct nvkm_oclass
290 nvkm_gpudup_oclass = {
291 	.handle = NV_GPUOBJ_CLASS,
292 	.ofuncs = &(struct nvkm_ofuncs) {
293 		.dtor = nvkm_gpudup_dtor,
294 		.init = nvkm_object_init,
295 		.fini = nvkm_object_fini,
296 	},
297 };
298 
299 int
nvkm_gpuobj_dup(struct nvkm_object * parent,struct nvkm_gpuobj * base,struct nvkm_gpuobj ** pgpuobj)300 nvkm_gpuobj_dup(struct nvkm_object *parent, struct nvkm_gpuobj *base,
301 		struct nvkm_gpuobj **pgpuobj)
302 {
303 	struct nvkm_gpuobj *gpuobj;
304 	int ret;
305 
306 	ret = nvkm_object_create(parent, &parent->engine->subdev.object,
307 				 &nvkm_gpudup_oclass, 0, &gpuobj);
308 	*pgpuobj = gpuobj;
309 	if (ret)
310 		return ret;
311 
312 	nvkm_object_ref(nv_object(base), &gpuobj->parent);
313 	gpuobj->addr = base->addr;
314 	gpuobj->size = base->size;
315 	return 0;
316 }
317