Lines Matching refs:pmu

29 nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable)  in nvkm_pmu_pgob()  argument
31 const struct nvkm_pmu_impl *impl = (void *)nv_oclass(pmu); in nvkm_pmu_pgob()
33 impl->pgob(pmu, enable); in nvkm_pmu_pgob()
37 nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2], in nvkm_pmu_send() argument
40 struct nvkm_subdev *subdev = nv_subdev(pmu); in nvkm_pmu_send()
44 addr = nv_rd32(pmu, 0x10a4a0); in nvkm_pmu_send()
45 if (!nv_wait_ne(pmu, 0x10a4b0, 0xffffffff, addr ^ 8)) in nvkm_pmu_send()
54 pmu->recv.message = message; in nvkm_pmu_send()
55 pmu->recv.process = process; in nvkm_pmu_send()
60 nv_wr32(pmu, 0x10a580, 0x00000001); in nvkm_pmu_send()
61 } while (nv_rd32(pmu, 0x10a580) != 0x00000001); in nvkm_pmu_send()
64 nv_wr32(pmu, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) + in nvkm_pmu_send()
65 pmu->send.base)); in nvkm_pmu_send()
66 nv_wr32(pmu, 0x10a1c4, process); in nvkm_pmu_send()
67 nv_wr32(pmu, 0x10a1c4, message); in nvkm_pmu_send()
68 nv_wr32(pmu, 0x10a1c4, data0); in nvkm_pmu_send()
69 nv_wr32(pmu, 0x10a1c4, data1); in nvkm_pmu_send()
70 nv_wr32(pmu, 0x10a4a0, (addr + 1) & 0x0f); in nvkm_pmu_send()
73 nv_wr32(pmu, 0x10a580, 0x00000000); in nvkm_pmu_send()
77 wait_event(pmu->recv.wait, (pmu->recv.process == 0)); in nvkm_pmu_send()
78 reply[0] = pmu->recv.data[0]; in nvkm_pmu_send()
79 reply[1] = pmu->recv.data[1]; in nvkm_pmu_send()
89 struct nvkm_pmu *pmu = container_of(work, struct nvkm_pmu, recv.work); in nvkm_pmu_recv() local
93 u32 addr = nv_rd32(pmu, 0x10a4cc); in nvkm_pmu_recv()
94 if (addr == nv_rd32(pmu, 0x10a4c8)) in nvkm_pmu_recv()
99 nv_wr32(pmu, 0x10a580, 0x00000002); in nvkm_pmu_recv()
100 } while (nv_rd32(pmu, 0x10a580) != 0x00000002); in nvkm_pmu_recv()
103 nv_wr32(pmu, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) + in nvkm_pmu_recv()
104 pmu->recv.base)); in nvkm_pmu_recv()
105 process = nv_rd32(pmu, 0x10a1c4); in nvkm_pmu_recv()
106 message = nv_rd32(pmu, 0x10a1c4); in nvkm_pmu_recv()
107 data0 = nv_rd32(pmu, 0x10a1c4); in nvkm_pmu_recv()
108 data1 = nv_rd32(pmu, 0x10a1c4); in nvkm_pmu_recv()
109 nv_wr32(pmu, 0x10a4cc, (addr + 1) & 0x0f); in nvkm_pmu_recv()
112 nv_wr32(pmu, 0x10a580, 0x00000000); in nvkm_pmu_recv()
115 if (pmu->recv.process) { in nvkm_pmu_recv()
116 if (process == pmu->recv.process && in nvkm_pmu_recv()
117 message == pmu->recv.message) { in nvkm_pmu_recv()
118 pmu->recv.data[0] = data0; in nvkm_pmu_recv()
119 pmu->recv.data[1] = data1; in nvkm_pmu_recv()
120 pmu->recv.process = 0; in nvkm_pmu_recv()
121 wake_up(&pmu->recv.wait); in nvkm_pmu_recv()
129 nv_warn(pmu, "%c%c%c%c 0x%08x 0x%08x 0x%08x 0x%08x\n", in nvkm_pmu_recv()
140 struct nvkm_pmu *pmu = (void *)subdev; in nvkm_pmu_intr() local
141 u32 disp = nv_rd32(pmu, 0x10a01c); in nvkm_pmu_intr()
142 u32 intr = nv_rd32(pmu, 0x10a008) & disp & ~(disp >> 16); in nvkm_pmu_intr()
145 u32 stat = nv_rd32(pmu, 0x10a16c); in nvkm_pmu_intr()
147 nv_error(pmu, "UAS fault at 0x%06x addr 0x%08x\n", in nvkm_pmu_intr()
148 stat & 0x00ffffff, nv_rd32(pmu, 0x10a168)); in nvkm_pmu_intr()
149 nv_wr32(pmu, 0x10a16c, 0x00000000); in nvkm_pmu_intr()
155 schedule_work(&pmu->recv.work); in nvkm_pmu_intr()
156 nv_wr32(pmu, 0x10a004, 0x00000040); in nvkm_pmu_intr()
161 nv_info(pmu, "wr32 0x%06x 0x%08x\n", nv_rd32(pmu, 0x10a7a0), in nvkm_pmu_intr()
162 nv_rd32(pmu, 0x10a7a4)); in nvkm_pmu_intr()
163 nv_wr32(pmu, 0x10a004, 0x00000080); in nvkm_pmu_intr()
168 nv_error(pmu, "intr 0x%08x\n", intr); in nvkm_pmu_intr()
169 nv_wr32(pmu, 0x10a004, intr); in nvkm_pmu_intr()
176 struct nvkm_pmu *pmu = (void *)object; in _nvkm_pmu_fini() local
178 nv_wr32(pmu, 0x10a014, 0x00000060); in _nvkm_pmu_fini()
179 flush_work(&pmu->recv.work); in _nvkm_pmu_fini()
181 return nvkm_subdev_fini(&pmu->base, suspend); in _nvkm_pmu_fini()
188 struct nvkm_pmu *pmu = (void *)object; in _nvkm_pmu_init() local
191 ret = nvkm_subdev_init(&pmu->base); in _nvkm_pmu_init()
195 nv_subdev(pmu)->intr = nvkm_pmu_intr; in _nvkm_pmu_init()
196 pmu->message = nvkm_pmu_send; in _nvkm_pmu_init()
197 pmu->pgob = nvkm_pmu_pgob; in _nvkm_pmu_init()
200 nv_wr32(pmu, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */ in _nvkm_pmu_init()
201 nv_wait(pmu, 0x10a04c, 0xffffffff, 0x00000000); in _nvkm_pmu_init()
202 nv_mask(pmu, 0x000200, 0x00002000, 0x00000000); in _nvkm_pmu_init()
203 nv_mask(pmu, 0x000200, 0x00002000, 0x00002000); in _nvkm_pmu_init()
204 nv_rd32(pmu, 0x000200); in _nvkm_pmu_init()
205 nv_wait(pmu, 0x10a10c, 0x00000006, 0x00000000); in _nvkm_pmu_init()
208 nv_wr32(pmu, 0x10a1c0, 0x01000000); in _nvkm_pmu_init()
210 nv_wr32(pmu, 0x10a1c4, impl->data.data[i]); in _nvkm_pmu_init()
213 nv_wr32(pmu, 0x10a180, 0x01000000); in _nvkm_pmu_init()
216 nv_wr32(pmu, 0x10a188, i >> 6); in _nvkm_pmu_init()
217 nv_wr32(pmu, 0x10a184, impl->code.data[i]); in _nvkm_pmu_init()
221 nv_wr32(pmu, 0x10a10c, 0x00000000); in _nvkm_pmu_init()
222 nv_wr32(pmu, 0x10a104, 0x00000000); in _nvkm_pmu_init()
223 nv_wr32(pmu, 0x10a100, 0x00000002); in _nvkm_pmu_init()
226 if (!nv_wait_ne(pmu, 0x10a4d0, 0xffffffff, 0x00000000)) in _nvkm_pmu_init()
228 pmu->send.base = nv_rd32(pmu, 0x10a4d0) & 0x0000ffff; in _nvkm_pmu_init()
229 pmu->send.size = nv_rd32(pmu, 0x10a4d0) >> 16; in _nvkm_pmu_init()
232 if (!nv_wait_ne(pmu, 0x10a4dc, 0xffffffff, 0x00000000)) in _nvkm_pmu_init()
234 pmu->recv.base = nv_rd32(pmu, 0x10a4dc) & 0x0000ffff; in _nvkm_pmu_init()
235 pmu->recv.size = nv_rd32(pmu, 0x10a4dc) >> 16; in _nvkm_pmu_init()
237 nv_wr32(pmu, 0x10a010, 0x000000e0); in _nvkm_pmu_init()
245 struct nvkm_pmu *pmu; in nvkm_pmu_create_() local
250 pmu = *pobject; in nvkm_pmu_create_()
254 INIT_WORK(&pmu->recv.work, nvkm_pmu_recv); in nvkm_pmu_create_()
255 init_waitqueue_head(&pmu->recv.wait); in nvkm_pmu_create_()
264 struct nvkm_pmu *pmu; in _nvkm_pmu_ctor() local
265 int ret = nvkm_pmu_create(parent, engine, oclass, &pmu); in _nvkm_pmu_ctor()
266 *pobject = nv_object(pmu); in _nvkm_pmu_ctor()