1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "gt215.h"
25#include "pll.h"
26
27#include <core/device.h>
28#include <subdev/bios.h>
29#include <subdev/bios/pll.h>
30#include <subdev/timer.h>
31
32struct mcp77_clk_priv {
33	struct nvkm_clk base;
34	enum nv_clk_src csrc, ssrc, vsrc;
35	u32 cctrl, sctrl;
36	u32 ccoef, scoef;
37	u32 cpost, spost;
38	u32 vdiv;
39};
40
41static u32
42read_div(struct nvkm_clk *clk)
43{
44	return nv_rd32(clk, 0x004600);
45}
46
47static u32
48read_pll(struct nvkm_clk *clk, u32 base)
49{
50	u32 ctrl = nv_rd32(clk, base + 0);
51	u32 coef = nv_rd32(clk, base + 4);
52	u32 ref = clk->read(clk, nv_clk_src_href);
53	u32 post_div = 0;
54	u32 clock = 0;
55	int N1, M1;
56
57	switch (base){
58	case 0x4020:
59		post_div = 1 << ((nv_rd32(clk, 0x4070) & 0x000f0000) >> 16);
60		break;
61	case 0x4028:
62		post_div = (nv_rd32(clk, 0x4040) & 0x000f0000) >> 16;
63		break;
64	default:
65		break;
66	}
67
68	N1 = (coef & 0x0000ff00) >> 8;
69	M1 = (coef & 0x000000ff);
70	if ((ctrl & 0x80000000) && M1) {
71		clock = ref * N1 / M1;
72		clock = clock / post_div;
73	}
74
75	return clock;
76}
77
78static int
79mcp77_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
80{
81	struct mcp77_clk_priv *priv = (void *)clk;
82	u32 mast = nv_rd32(clk, 0x00c054);
83	u32 P = 0;
84
85	switch (src) {
86	case nv_clk_src_crystal:
87		return nv_device(priv)->crystal;
88	case nv_clk_src_href:
89		return 100000; /* PCIE reference clock */
90	case nv_clk_src_hclkm4:
91		return clk->read(clk, nv_clk_src_href) * 4;
92	case nv_clk_src_hclkm2d3:
93		return clk->read(clk, nv_clk_src_href) * 2 / 3;
94	case nv_clk_src_host:
95		switch (mast & 0x000c0000) {
96		case 0x00000000: return clk->read(clk, nv_clk_src_hclkm2d3);
97		case 0x00040000: break;
98		case 0x00080000: return clk->read(clk, nv_clk_src_hclkm4);
99		case 0x000c0000: return clk->read(clk, nv_clk_src_cclk);
100		}
101		break;
102	case nv_clk_src_core:
103		P = (nv_rd32(clk, 0x004028) & 0x00070000) >> 16;
104
105		switch (mast & 0x00000003) {
106		case 0x00000000: return clk->read(clk, nv_clk_src_crystal) >> P;
107		case 0x00000001: return 0;
108		case 0x00000002: return clk->read(clk, nv_clk_src_hclkm4) >> P;
109		case 0x00000003: return read_pll(clk, 0x004028) >> P;
110		}
111		break;
112	case nv_clk_src_cclk:
113		if ((mast & 0x03000000) != 0x03000000)
114			return clk->read(clk, nv_clk_src_core);
115
116		if ((mast & 0x00000200) == 0x00000000)
117			return clk->read(clk, nv_clk_src_core);
118
119		switch (mast & 0x00000c00) {
120		case 0x00000000: return clk->read(clk, nv_clk_src_href);
121		case 0x00000400: return clk->read(clk, nv_clk_src_hclkm4);
122		case 0x00000800: return clk->read(clk, nv_clk_src_hclkm2d3);
123		default: return 0;
124		}
125	case nv_clk_src_shader:
126		P = (nv_rd32(clk, 0x004020) & 0x00070000) >> 16;
127		switch (mast & 0x00000030) {
128		case 0x00000000:
129			if (mast & 0x00000040)
130				return clk->read(clk, nv_clk_src_href) >> P;
131			return clk->read(clk, nv_clk_src_crystal) >> P;
132		case 0x00000010: break;
133		case 0x00000020: return read_pll(clk, 0x004028) >> P;
134		case 0x00000030: return read_pll(clk, 0x004020) >> P;
135		}
136		break;
137	case nv_clk_src_mem:
138		return 0;
139		break;
140	case nv_clk_src_vdec:
141		P = (read_div(clk) & 0x00000700) >> 8;
142
143		switch (mast & 0x00400000) {
144		case 0x00400000:
145			return clk->read(clk, nv_clk_src_core) >> P;
146			break;
147		default:
148			return 500000 >> P;
149			break;
150		}
151		break;
152	default:
153		break;
154	}
155
156	nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast);
157	return 0;
158}
159
160static u32
161calc_pll(struct mcp77_clk_priv *priv, u32 reg,
162	 u32 clock, int *N, int *M, int *P)
163{
164	struct nvkm_bios *bios = nvkm_bios(priv);
165	struct nvbios_pll pll;
166	struct nvkm_clk *clk = &priv->base;
167	int ret;
168
169	ret = nvbios_pll_parse(bios, reg, &pll);
170	if (ret)
171		return 0;
172
173	pll.vco2.max_freq = 0;
174	pll.refclk = clk->read(clk, nv_clk_src_href);
175	if (!pll.refclk)
176		return 0;
177
178	return nv04_pll_calc(nv_subdev(priv), &pll, clock, N, M, NULL, NULL, P);
179}
180
181static inline u32
182calc_P(u32 src, u32 target, int *div)
183{
184	u32 clk0 = src, clk1 = src;
185	for (*div = 0; *div <= 7; (*div)++) {
186		if (clk0 <= target) {
187			clk1 = clk0 << (*div ? 1 : 0);
188			break;
189		}
190		clk0 >>= 1;
191	}
192
193	if (target - clk0 <= clk1 - target)
194		return clk0;
195	(*div)--;
196	return clk1;
197}
198
199static int
200mcp77_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
201{
202	struct mcp77_clk_priv *priv = (void *)clk;
203	const int shader = cstate->domain[nv_clk_src_shader];
204	const int core = cstate->domain[nv_clk_src_core];
205	const int vdec = cstate->domain[nv_clk_src_vdec];
206	u32 out = 0, clock = 0;
207	int N, M, P1, P2 = 0;
208	int divs = 0;
209
210	/* cclk: find suitable source, disable PLL if we can */
211	if (core < clk->read(clk, nv_clk_src_hclkm4))
212		out = calc_P(clk->read(clk, nv_clk_src_hclkm4), core, &divs);
213
214	/* Calculate clock * 2, so shader clock can use it too */
215	clock = calc_pll(priv, 0x4028, (core << 1), &N, &M, &P1);
216
217	if (abs(core - out) <= abs(core - (clock >> 1))) {
218		priv->csrc = nv_clk_src_hclkm4;
219		priv->cctrl = divs << 16;
220	} else {
221		/* NVCTRL is actually used _after_ NVPOST, and after what we
222		 * call NVPLL. To make matters worse, NVPOST is an integer
223		 * divider instead of a right-shift number. */
224		if(P1 > 2) {
225			P2 = P1 - 2;
226			P1 = 2;
227		}
228
229		priv->csrc = nv_clk_src_core;
230		priv->ccoef = (N << 8) | M;
231
232		priv->cctrl = (P2 + 1) << 16;
233		priv->cpost = (1 << P1) << 16;
234	}
235
236	/* sclk: nvpll + divisor, href or spll */
237	out = 0;
238	if (shader == clk->read(clk, nv_clk_src_href)) {
239		priv->ssrc = nv_clk_src_href;
240	} else {
241		clock = calc_pll(priv, 0x4020, shader, &N, &M, &P1);
242		if (priv->csrc == nv_clk_src_core)
243			out = calc_P((core << 1), shader, &divs);
244
245		if (abs(shader - out) <=
246		    abs(shader - clock) &&
247		   (divs + P2) <= 7) {
248			priv->ssrc = nv_clk_src_core;
249			priv->sctrl = (divs + P2) << 16;
250		} else {
251			priv->ssrc = nv_clk_src_shader;
252			priv->scoef = (N << 8) | M;
253			priv->sctrl = P1 << 16;
254		}
255	}
256
257	/* vclk */
258	out = calc_P(core, vdec, &divs);
259	clock = calc_P(500000, vdec, &P1);
260	if(abs(vdec - out) <= abs(vdec - clock)) {
261		priv->vsrc = nv_clk_src_cclk;
262		priv->vdiv = divs << 16;
263	} else {
264		priv->vsrc = nv_clk_src_vdec;
265		priv->vdiv = P1 << 16;
266	}
267
268	/* Print strategy! */
269	nv_debug(priv, "nvpll: %08x %08x %08x\n",
270			priv->ccoef, priv->cpost, priv->cctrl);
271	nv_debug(priv, " spll: %08x %08x %08x\n",
272			priv->scoef, priv->spost, priv->sctrl);
273	nv_debug(priv, " vdiv: %08x\n", priv->vdiv);
274	if (priv->csrc == nv_clk_src_hclkm4)
275		nv_debug(priv, "core: hrefm4\n");
276	else
277		nv_debug(priv, "core: nvpll\n");
278
279	if (priv->ssrc == nv_clk_src_hclkm4)
280		nv_debug(priv, "shader: hrefm4\n");
281	else if (priv->ssrc == nv_clk_src_core)
282		nv_debug(priv, "shader: nvpll\n");
283	else
284		nv_debug(priv, "shader: spll\n");
285
286	if (priv->vsrc == nv_clk_src_hclkm4)
287		nv_debug(priv, "vdec: 500MHz\n");
288	else
289		nv_debug(priv, "vdec: core\n");
290
291	return 0;
292}
293
294static int
295mcp77_clk_prog(struct nvkm_clk *clk)
296{
297	struct mcp77_clk_priv *priv = (void *)clk;
298	u32 pllmask = 0, mast;
299	unsigned long flags;
300	unsigned long *f = &flags;
301	int ret = 0;
302
303	ret = gt215_clk_pre(clk, f);
304	if (ret)
305		goto out;
306
307	/* First switch to safe clocks: href */
308	mast = nv_mask(clk, 0xc054, 0x03400e70, 0x03400640);
309	mast &= ~0x00400e73;
310	mast |= 0x03000000;
311
312	switch (priv->csrc) {
313	case nv_clk_src_hclkm4:
314		nv_mask(clk, 0x4028, 0x00070000, priv->cctrl);
315		mast |= 0x00000002;
316		break;
317	case nv_clk_src_core:
318		nv_wr32(clk, 0x402c, priv->ccoef);
319		nv_wr32(clk, 0x4028, 0x80000000 | priv->cctrl);
320		nv_wr32(clk, 0x4040, priv->cpost);
321		pllmask |= (0x3 << 8);
322		mast |= 0x00000003;
323		break;
324	default:
325		nv_warn(priv,"Reclocking failed: unknown core clock\n");
326		goto resume;
327	}
328
329	switch (priv->ssrc) {
330	case nv_clk_src_href:
331		nv_mask(clk, 0x4020, 0x00070000, 0x00000000);
332		/* mast |= 0x00000000; */
333		break;
334	case nv_clk_src_core:
335		nv_mask(clk, 0x4020, 0x00070000, priv->sctrl);
336		mast |= 0x00000020;
337		break;
338	case nv_clk_src_shader:
339		nv_wr32(clk, 0x4024, priv->scoef);
340		nv_wr32(clk, 0x4020, 0x80000000 | priv->sctrl);
341		nv_wr32(clk, 0x4070, priv->spost);
342		pllmask |= (0x3 << 12);
343		mast |= 0x00000030;
344		break;
345	default:
346		nv_warn(priv,"Reclocking failed: unknown sclk clock\n");
347		goto resume;
348	}
349
350	if (!nv_wait(clk, 0x004080, pllmask, pllmask)) {
351		nv_warn(priv,"Reclocking failed: unstable PLLs\n");
352		goto resume;
353	}
354
355	switch (priv->vsrc) {
356	case nv_clk_src_cclk:
357		mast |= 0x00400000;
358	default:
359		nv_wr32(clk, 0x4600, priv->vdiv);
360	}
361
362	nv_wr32(clk, 0xc054, mast);
363
364resume:
365	/* Disable some PLLs and dividers when unused */
366	if (priv->csrc != nv_clk_src_core) {
367		nv_wr32(clk, 0x4040, 0x00000000);
368		nv_mask(clk, 0x4028, 0x80000000, 0x00000000);
369	}
370
371	if (priv->ssrc != nv_clk_src_shader) {
372		nv_wr32(clk, 0x4070, 0x00000000);
373		nv_mask(clk, 0x4020, 0x80000000, 0x00000000);
374	}
375
376out:
377	if (ret == -EBUSY)
378		f = NULL;
379
380	gt215_clk_post(clk, f);
381	return ret;
382}
383
384static void
385mcp77_clk_tidy(struct nvkm_clk *clk)
386{
387}
388
389static struct nvkm_domain
390mcp77_domains[] = {
391	{ nv_clk_src_crystal, 0xff },
392	{ nv_clk_src_href   , 0xff },
393	{ nv_clk_src_core   , 0xff, 0, "core", 1000 },
394	{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
395	{ nv_clk_src_vdec   , 0xff, 0, "vdec", 1000 },
396	{ nv_clk_src_max }
397};
398
399static int
400mcp77_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
401	       struct nvkm_oclass *oclass, void *data, u32 size,
402	       struct nvkm_object **pobject)
403{
404	struct mcp77_clk_priv *priv;
405	int ret;
406
407	ret = nvkm_clk_create(parent, engine, oclass, mcp77_domains,
408			      NULL, 0, true, &priv);
409	*pobject = nv_object(priv);
410	if (ret)
411		return ret;
412
413	priv->base.read = mcp77_clk_read;
414	priv->base.calc = mcp77_clk_calc;
415	priv->base.prog = mcp77_clk_prog;
416	priv->base.tidy = mcp77_clk_tidy;
417	return 0;
418}
419
420struct nvkm_oclass *
421mcp77_clk_oclass = &(struct nvkm_oclass) {
422	.handle = NV_SUBDEV(CLK, 0xaa),
423	.ofuncs = &(struct nvkm_ofuncs) {
424		.ctor = mcp77_clk_ctor,
425		.dtor = _nvkm_clk_dtor,
426		.init = _nvkm_clk_init,
427		.fini = _nvkm_clk_fini,
428	},
429};
430