1 #ifndef __NVKM_PMU_MEMX_H__
2 #define __NVKM_PMU_MEMX_H__
3 #include "priv.h"
4
5 #include <core/device.h>
6
7 struct nvkm_memx {
8 struct nvkm_pmu *pmu;
9 u32 base;
10 u32 size;
11 struct {
12 u32 mthd;
13 u32 size;
14 u32 data[64];
15 } c;
16 };
17
18 static void
memx_out(struct nvkm_memx * memx)19 memx_out(struct nvkm_memx *memx)
20 {
21 struct nvkm_pmu *pmu = memx->pmu;
22 int i;
23
24 if (memx->c.mthd) {
25 nv_wr32(pmu, 0x10a1c4, (memx->c.size << 16) | memx->c.mthd);
26 for (i = 0; i < memx->c.size; i++)
27 nv_wr32(pmu, 0x10a1c4, memx->c.data[i]);
28 memx->c.mthd = 0;
29 memx->c.size = 0;
30 }
31 }
32
33 static void
memx_cmd(struct nvkm_memx * memx,u32 mthd,u32 size,u32 data[])34 memx_cmd(struct nvkm_memx *memx, u32 mthd, u32 size, u32 data[])
35 {
36 if ((memx->c.size + size >= ARRAY_SIZE(memx->c.data)) ||
37 (memx->c.mthd && memx->c.mthd != mthd))
38 memx_out(memx);
39 memcpy(&memx->c.data[memx->c.size], data, size * sizeof(data[0]));
40 memx->c.size += size;
41 memx->c.mthd = mthd;
42 }
43
44 int
nvkm_memx_init(struct nvkm_pmu * pmu,struct nvkm_memx ** pmemx)45 nvkm_memx_init(struct nvkm_pmu *pmu, struct nvkm_memx **pmemx)
46 {
47 struct nvkm_memx *memx;
48 u32 reply[2];
49 int ret;
50
51 ret = pmu->message(pmu, reply, PROC_MEMX, MEMX_MSG_INFO,
52 MEMX_INFO_DATA, 0);
53 if (ret)
54 return ret;
55
56 memx = *pmemx = kzalloc(sizeof(*memx), GFP_KERNEL);
57 if (!memx)
58 return -ENOMEM;
59 memx->pmu = pmu;
60 memx->base = reply[0];
61 memx->size = reply[1];
62
63 /* acquire data segment access */
64 do {
65 nv_wr32(pmu, 0x10a580, 0x00000003);
66 } while (nv_rd32(pmu, 0x10a580) != 0x00000003);
67 nv_wr32(pmu, 0x10a1c0, 0x01000000 | memx->base);
68 return 0;
69 }
70
71 int
nvkm_memx_fini(struct nvkm_memx ** pmemx,bool exec)72 nvkm_memx_fini(struct nvkm_memx **pmemx, bool exec)
73 {
74 struct nvkm_memx *memx = *pmemx;
75 struct nvkm_pmu *pmu = memx->pmu;
76 u32 finish, reply[2];
77
78 /* flush the cache... */
79 memx_out(memx);
80
81 /* release data segment access */
82 finish = nv_rd32(pmu, 0x10a1c0) & 0x00ffffff;
83 nv_wr32(pmu, 0x10a580, 0x00000000);
84
85 /* call MEMX process to execute the script, and wait for reply */
86 if (exec) {
87 pmu->message(pmu, reply, PROC_MEMX, MEMX_MSG_EXEC,
88 memx->base, finish);
89 }
90
91 nv_debug(memx->pmu, "Exec took %uns, PMU_IN %08x\n",
92 reply[0], reply[1]);
93 kfree(memx);
94 return 0;
95 }
96
97 void
nvkm_memx_wr32(struct nvkm_memx * memx,u32 addr,u32 data)98 nvkm_memx_wr32(struct nvkm_memx *memx, u32 addr, u32 data)
99 {
100 nv_debug(memx->pmu, "R[%06x] = 0x%08x\n", addr, data);
101 memx_cmd(memx, MEMX_WR32, 2, (u32[]){ addr, data });
102 }
103
104 void
nvkm_memx_wait(struct nvkm_memx * memx,u32 addr,u32 mask,u32 data,u32 nsec)105 nvkm_memx_wait(struct nvkm_memx *memx,
106 u32 addr, u32 mask, u32 data, u32 nsec)
107 {
108 nv_debug(memx->pmu, "R[%06x] & 0x%08x == 0x%08x, %d us\n",
109 addr, mask, data, nsec);
110 memx_cmd(memx, MEMX_WAIT, 4, (u32[]){ addr, mask, data, nsec });
111 memx_out(memx); /* fuc can't handle multiple */
112 }
113
114 void
nvkm_memx_nsec(struct nvkm_memx * memx,u32 nsec)115 nvkm_memx_nsec(struct nvkm_memx *memx, u32 nsec)
116 {
117 nv_debug(memx->pmu, " DELAY = %d ns\n", nsec);
118 memx_cmd(memx, MEMX_DELAY, 1, (u32[]){ nsec });
119 memx_out(memx); /* fuc can't handle multiple */
120 }
121
122 void
nvkm_memx_wait_vblank(struct nvkm_memx * memx)123 nvkm_memx_wait_vblank(struct nvkm_memx *memx)
124 {
125 struct nvkm_pmu *pmu = memx->pmu;
126 u32 heads, x, y, px = 0;
127 int i, head_sync;
128
129 if (nv_device(pmu)->chipset < 0xd0) {
130 heads = nv_rd32(pmu, 0x610050);
131 for (i = 0; i < 2; i++) {
132 /* Heuristic: sync to head with biggest resolution */
133 if (heads & (2 << (i << 3))) {
134 x = nv_rd32(pmu, 0x610b40 + (0x540 * i));
135 y = (x & 0xffff0000) >> 16;
136 x &= 0x0000ffff;
137 if ((x * y) > px) {
138 px = (x * y);
139 head_sync = i;
140 }
141 }
142 }
143 }
144
145 if (px == 0) {
146 nv_debug(memx->pmu, "WAIT VBLANK !NO ACTIVE HEAD\n");
147 return;
148 }
149
150 nv_debug(memx->pmu, "WAIT VBLANK HEAD%d\n", head_sync);
151 memx_cmd(memx, MEMX_VBLANK, 1, (u32[]){ head_sync });
152 memx_out(memx); /* fuc can't handle multiple */
153 }
154
155 void
nvkm_memx_train(struct nvkm_memx * memx)156 nvkm_memx_train(struct nvkm_memx *memx)
157 {
158 nv_debug(memx->pmu, " MEM TRAIN\n");
159 memx_cmd(memx, MEMX_TRAIN, 0, NULL);
160 }
161
162 int
nvkm_memx_train_result(struct nvkm_pmu * pmu,u32 * res,int rsize)163 nvkm_memx_train_result(struct nvkm_pmu *pmu, u32 *res, int rsize)
164 {
165 u32 reply[2], base, size, i;
166 int ret;
167
168 ret = pmu->message(pmu, reply, PROC_MEMX, MEMX_MSG_INFO,
169 MEMX_INFO_TRAIN, 0);
170 if (ret)
171 return ret;
172
173 base = reply[0];
174 size = reply[1] >> 2;
175 if (size > rsize)
176 return -ENOMEM;
177
178 /* read the packet */
179 nv_wr32(pmu, 0x10a1c0, 0x02000000 | base);
180
181 for (i = 0; i < size; i++)
182 res[i] = nv_rd32(pmu, 0x10a1c4);
183
184 return 0;
185 }
186
187 void
nvkm_memx_block(struct nvkm_memx * memx)188 nvkm_memx_block(struct nvkm_memx *memx)
189 {
190 nv_debug(memx->pmu, " HOST BLOCKED\n");
191 memx_cmd(memx, MEMX_ENTER, 0, NULL);
192 }
193
194 void
nvkm_memx_unblock(struct nvkm_memx * memx)195 nvkm_memx_unblock(struct nvkm_memx *memx)
196 {
197 nv_debug(memx->pmu, " HOST UNBLOCKED\n");
198 memx_cmd(memx, MEMX_LEAVE, 0, NULL);
199 }
200 #endif
201