1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "dmacnv50.h"
25#include "rootnv50.h"
26
27#include <core/client.h>
28#include <core/oproxy.h>
29#include <core/ramht.h>
30#include <subdev/fb.h>
31#include <subdev/timer.h>
32#include <engine/dma.h>
33
34struct nv50_disp_dmac_object {
35	struct nvkm_oproxy oproxy;
36	struct nv50_disp_root *root;
37	int hash;
38};
39
40static void
41nv50_disp_dmac_child_del_(struct nvkm_oproxy *base)
42{
43	struct nv50_disp_dmac_object *object =
44		container_of(base, typeof(*object), oproxy);
45	nvkm_ramht_remove(object->root->ramht, object->hash);
46}
47
48static const struct nvkm_oproxy_func
49nv50_disp_dmac_child_func_ = {
50	.dtor[0] = nv50_disp_dmac_child_del_,
51};
52
53static int
54nv50_disp_dmac_child_new_(struct nv50_disp_chan *base,
55			  const struct nvkm_oclass *oclass,
56			  void *data, u32 size, struct nvkm_object **pobject)
57{
58	struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
59	struct nv50_disp_root *root = chan->base.root;
60	struct nvkm_device *device = root->disp->base.engine.subdev.device;
61	const struct nvkm_device_oclass *sclass = oclass->priv;
62	struct nv50_disp_dmac_object *object;
63	int ret;
64
65	if (!(object = kzalloc(sizeof(*object), GFP_KERNEL)))
66		return -ENOMEM;
67	nvkm_oproxy_ctor(&nv50_disp_dmac_child_func_, oclass, &object->oproxy);
68	object->root = root;
69	*pobject = &object->oproxy.base;
70
71	ret = sclass->ctor(device, oclass, data, size, &object->oproxy.object);
72	if (ret)
73		return ret;
74
75	object->hash = chan->func->bind(chan, object->oproxy.object,
76					      oclass->handle);
77	if (object->hash < 0)
78		return object->hash;
79
80	return 0;
81}
82
83static int
84nv50_disp_dmac_child_get_(struct nv50_disp_chan *base, int index,
85			  struct nvkm_oclass *sclass)
86{
87	struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
88	struct nv50_disp *disp = chan->base.root->disp;
89	struct nvkm_device *device = disp->base.engine.subdev.device;
90	const struct nvkm_device_oclass *oclass = NULL;
91
92	sclass->engine = nvkm_device_engine(device, NVKM_ENGINE_DMAOBJ);
93	if (sclass->engine && sclass->engine->func->base.sclass) {
94		sclass->engine->func->base.sclass(sclass, index, &oclass);
95		if (oclass) {
96			sclass->priv = oclass;
97			return 0;
98		}
99	}
100
101	return -EINVAL;
102}
103
104static void
105nv50_disp_dmac_fini_(struct nv50_disp_chan *base)
106{
107	struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
108	chan->func->fini(chan);
109}
110
111static int
112nv50_disp_dmac_init_(struct nv50_disp_chan *base)
113{
114	struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
115	return chan->func->init(chan);
116}
117
118static void *
119nv50_disp_dmac_dtor_(struct nv50_disp_chan *base)
120{
121	return nv50_disp_dmac(base);
122}
123
124static const struct nv50_disp_chan_func
125nv50_disp_dmac_func_ = {
126	.dtor = nv50_disp_dmac_dtor_,
127	.init = nv50_disp_dmac_init_,
128	.fini = nv50_disp_dmac_fini_,
129	.child_get = nv50_disp_dmac_child_get_,
130	.child_new = nv50_disp_dmac_child_new_,
131};
132
133int
134nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *func,
135		    const struct nv50_disp_chan_mthd *mthd,
136		    struct nv50_disp_root *root, int chid, int head, u64 push,
137		    const struct nvkm_oclass *oclass,
138		    struct nvkm_object **pobject)
139{
140	struct nvkm_device *device = root->disp->base.engine.subdev.device;
141	struct nvkm_client *client = oclass->client;
142	struct nvkm_dmaobj *dmaobj;
143	struct nv50_disp_dmac *chan;
144	int ret;
145
146	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
147		return -ENOMEM;
148	*pobject = &chan->base.object;
149	chan->func = func;
150
151	ret = nv50_disp_chan_ctor(&nv50_disp_dmac_func_, mthd, root,
152				  chid, head, oclass, &chan->base);
153	if (ret)
154		return ret;
155
156	dmaobj = nvkm_dma_search(device->dma, client, push);
157	if (!dmaobj)
158		return -ENOENT;
159
160	if (dmaobj->limit - dmaobj->start != 0xfff)
161		return -EINVAL;
162
163	switch (dmaobj->target) {
164	case NV_MEM_TARGET_VRAM:
165		chan->push = 0x00000001 | dmaobj->start >> 8;
166		break;
167	case NV_MEM_TARGET_PCI_NOSNOOP:
168		chan->push = 0x00000003 | dmaobj->start >> 8;
169		break;
170	default:
171		return -EINVAL;
172	}
173
174	return 0;
175}
176
177int
178nv50_disp_dmac_bind(struct nv50_disp_dmac *chan,
179		    struct nvkm_object *object, u32 handle)
180{
181	return nvkm_ramht_insert(chan->base.root->ramht, object,
182				 chan->base.chid, -10, handle,
183				 chan->base.chid << 28 |
184				 chan->base.chid);
185}
186
187static void
188nv50_disp_dmac_fini(struct nv50_disp_dmac *chan)
189{
190	struct nv50_disp *disp = chan->base.root->disp;
191	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
192	struct nvkm_device *device = subdev->device;
193	int chid = chan->base.chid;
194
195	/* deactivate channel */
196	nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000);
197	nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000);
198	if (nvkm_msec(device, 2000,
199		if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x001e0000))
200			break;
201	) < 0) {
202		nvkm_error(subdev, "ch %d fini timeout, %08x\n", chid,
203			   nvkm_rd32(device, 0x610200 + (chid * 0x10)));
204	}
205
206	/* disable error reporting and completion notifications */
207	nvkm_mask(device, 0x610028, 0x00010001 << chid, 0x00000000 << chid);
208}
209
210static int
211nv50_disp_dmac_init(struct nv50_disp_dmac *chan)
212{
213	struct nv50_disp *disp = chan->base.root->disp;
214	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
215	struct nvkm_device *device = subdev->device;
216	int chid = chan->base.chid;
217
218	/* enable error reporting */
219	nvkm_mask(device, 0x610028, 0x00010000 << chid, 0x00010000 << chid);
220
221	/* initialise channel for dma command submission */
222	nvkm_wr32(device, 0x610204 + (chid * 0x0010), chan->push);
223	nvkm_wr32(device, 0x610208 + (chid * 0x0010), 0x00010000);
224	nvkm_wr32(device, 0x61020c + (chid * 0x0010), chid);
225	nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010);
226	nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
227	nvkm_wr32(device, 0x610200 + (chid * 0x0010), 0x00000013);
228
229	/* wait for it to go inactive */
230	if (nvkm_msec(device, 2000,
231		if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x80000000))
232			break;
233	) < 0) {
234		nvkm_error(subdev, "ch %d init timeout, %08x\n", chid,
235			   nvkm_rd32(device, 0x610200 + (chid * 0x10)));
236		return -EBUSY;
237	}
238
239	return 0;
240}
241
242const struct nv50_disp_dmac_func
243nv50_disp_dmac_func = {
244	.init = nv50_disp_dmac_init,
245	.fini = nv50_disp_dmac_fini,
246	.bind = nv50_disp_dmac_bind,
247};
248