1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 *          Roy Spliet
24 */
25#define gt215_clk(p) container_of((p), struct gt215_clk, base)
26#include "gt215.h"
27#include "pll.h"
28
29#include <engine/fifo.h>
30#include <subdev/bios.h>
31#include <subdev/bios/pll.h>
32#include <subdev/timer.h>
33
34struct gt215_clk {
35	struct nvkm_clk base;
36	struct gt215_clk_info eng[nv_clk_src_max];
37};
38
39static u32 read_clk(struct gt215_clk *, int, bool);
40static u32 read_pll(struct gt215_clk *, int, u32);
41
42static u32
43read_vco(struct gt215_clk *clk, int idx)
44{
45	struct nvkm_device *device = clk->base.subdev.device;
46	u32 sctl = nvkm_rd32(device, 0x4120 + (idx * 4));
47
48	switch (sctl & 0x00000030) {
49	case 0x00000000:
50		return device->crystal;
51	case 0x00000020:
52		return read_pll(clk, 0x41, 0x00e820);
53	case 0x00000030:
54		return read_pll(clk, 0x42, 0x00e8a0);
55	default:
56		return 0;
57	}
58}
59
60static u32
61read_clk(struct gt215_clk *clk, int idx, bool ignore_en)
62{
63	struct nvkm_device *device = clk->base.subdev.device;
64	u32 sctl, sdiv, sclk;
65
66	/* refclk for the 0xe8xx plls is a fixed frequency */
67	if (idx >= 0x40) {
68		if (device->chipset == 0xaf) {
69			/* no joke.. seriously.. sigh.. */
70			return nvkm_rd32(device, 0x00471c) * 1000;
71		}
72
73		return device->crystal;
74	}
75
76	sctl = nvkm_rd32(device, 0x4120 + (idx * 4));
77	if (!ignore_en && !(sctl & 0x00000100))
78		return 0;
79
80	/* out_alt */
81	if (sctl & 0x00000400)
82		return 108000;
83
84	/* vco_out */
85	switch (sctl & 0x00003000) {
86	case 0x00000000:
87		if (!(sctl & 0x00000200))
88			return device->crystal;
89		return 0;
90	case 0x00002000:
91		if (sctl & 0x00000040)
92			return 108000;
93		return 100000;
94	case 0x00003000:
95		/* vco_enable */
96		if (!(sctl & 0x00000001))
97			return 0;
98
99		sclk = read_vco(clk, idx);
100		sdiv = ((sctl & 0x003f0000) >> 16) + 2;
101		return (sclk * 2) / sdiv;
102	default:
103		return 0;
104	}
105}
106
107static u32
108read_pll(struct gt215_clk *clk, int idx, u32 pll)
109{
110	struct nvkm_device *device = clk->base.subdev.device;
111	u32 ctrl = nvkm_rd32(device, pll + 0);
112	u32 sclk = 0, P = 1, N = 1, M = 1;
113
114	if (!(ctrl & 0x00000008)) {
115		if (ctrl & 0x00000001) {
116			u32 coef = nvkm_rd32(device, pll + 4);
117			M = (coef & 0x000000ff) >> 0;
118			N = (coef & 0x0000ff00) >> 8;
119			P = (coef & 0x003f0000) >> 16;
120
121			/* no post-divider on these..
122			 * XXX: it looks more like two post-"dividers" that
123			 * cross each other out in the default RPLL config */
124			if ((pll & 0x00ff00) == 0x00e800)
125				P = 1;
126
127			sclk = read_clk(clk, 0x00 + idx, false);
128		}
129	} else {
130		sclk = read_clk(clk, 0x10 + idx, false);
131	}
132
133	if (M * P)
134		return sclk * N / (M * P);
135
136	return 0;
137}
138
139static int
140gt215_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
141{
142	struct gt215_clk *clk = gt215_clk(base);
143	struct nvkm_subdev *subdev = &clk->base.subdev;
144	struct nvkm_device *device = subdev->device;
145	u32 hsrc;
146
147	switch (src) {
148	case nv_clk_src_crystal:
149		return device->crystal;
150	case nv_clk_src_core:
151	case nv_clk_src_core_intm:
152		return read_pll(clk, 0x00, 0x4200);
153	case nv_clk_src_shader:
154		return read_pll(clk, 0x01, 0x4220);
155	case nv_clk_src_mem:
156		return read_pll(clk, 0x02, 0x4000);
157	case nv_clk_src_disp:
158		return read_clk(clk, 0x20, false);
159	case nv_clk_src_vdec:
160		return read_clk(clk, 0x21, false);
161	case nv_clk_src_daemon:
162		return read_clk(clk, 0x25, false);
163	case nv_clk_src_host:
164		hsrc = (nvkm_rd32(device, 0xc040) & 0x30000000) >> 28;
165		switch (hsrc) {
166		case 0:
167			return read_clk(clk, 0x1d, false);
168		case 2:
169		case 3:
170			return 277000;
171		default:
172			nvkm_error(subdev, "unknown HOST clock source %d\n", hsrc);
173			return -EINVAL;
174		}
175	default:
176		nvkm_error(subdev, "invalid clock source %d\n", src);
177		return -EINVAL;
178	}
179
180	return 0;
181}
182
183int
184gt215_clk_info(struct nvkm_clk *base, int idx, u32 khz,
185	       struct gt215_clk_info *info)
186{
187	struct gt215_clk *clk = gt215_clk(base);
188	u32 oclk, sclk, sdiv;
189	s32 diff;
190
191	info->clk = 0;
192
193	switch (khz) {
194	case 27000:
195		info->clk = 0x00000100;
196		return khz;
197	case 100000:
198		info->clk = 0x00002100;
199		return khz;
200	case 108000:
201		info->clk = 0x00002140;
202		return khz;
203	default:
204		sclk = read_vco(clk, idx);
205		sdiv = min((sclk * 2) / khz, (u32)65);
206		oclk = (sclk * 2) / sdiv;
207		diff = ((khz + 3000) - oclk);
208
209		/* When imprecise, play it safe and aim for a clock lower than
210		 * desired rather than higher */
211		if (diff < 0) {
212			sdiv++;
213			oclk = (sclk * 2) / sdiv;
214		}
215
216		/* divider can go as low as 2, limited here because NVIDIA
217		 * and the VBIOS on my NVA8 seem to prefer using the PLL
218		 * for 810MHz - is there a good reason?
219		 * XXX: PLLs with refclk 810MHz?  */
220		if (sdiv > 4) {
221			info->clk = (((sdiv - 2) << 16) | 0x00003100);
222			return oclk;
223		}
224
225		break;
226	}
227
228	return -ERANGE;
229}
230
231int
232gt215_pll_info(struct nvkm_clk *base, int idx, u32 pll, u32 khz,
233	       struct gt215_clk_info *info)
234{
235	struct gt215_clk *clk = gt215_clk(base);
236	struct nvkm_subdev *subdev = &clk->base.subdev;
237	struct nvbios_pll limits;
238	int P, N, M, diff;
239	int ret;
240
241	info->pll = 0;
242
243	/* If we can get a within [-2, 3) MHz of a divider, we'll disable the
244	 * PLL and use the divider instead. */
245	ret = gt215_clk_info(&clk->base, idx, khz, info);
246	diff = khz - ret;
247	if (!pll || (diff >= -2000 && diff < 3000)) {
248		goto out;
249	}
250
251	/* Try with PLL */
252	ret = nvbios_pll_parse(subdev->device->bios, pll, &limits);
253	if (ret)
254		return ret;
255
256	ret = gt215_clk_info(&clk->base, idx - 0x10, limits.refclk, info);
257	if (ret != limits.refclk)
258		return -EINVAL;
259
260	ret = gt215_pll_calc(subdev, &limits, khz, &N, NULL, &M, &P);
261	if (ret >= 0) {
262		info->pll = (P << 16) | (N << 8) | M;
263	}
264
265out:
266	info->fb_delay = max(((khz + 7566) / 15133), (u32) 18);
267	return ret ? ret : -ERANGE;
268}
269
270static int
271calc_clk(struct gt215_clk *clk, struct nvkm_cstate *cstate,
272	 int idx, u32 pll, int dom)
273{
274	int ret = gt215_pll_info(&clk->base, idx, pll, cstate->domain[dom],
275				 &clk->eng[dom]);
276	if (ret >= 0)
277		return 0;
278	return ret;
279}
280
281static int
282calc_host(struct gt215_clk *clk, struct nvkm_cstate *cstate)
283{
284	int ret = 0;
285	u32 kHz = cstate->domain[nv_clk_src_host];
286	struct gt215_clk_info *info = &clk->eng[nv_clk_src_host];
287
288	if (kHz == 277000) {
289		info->clk = 0;
290		info->host_out = NVA3_HOST_277;
291		return 0;
292	}
293
294	info->host_out = NVA3_HOST_CLK;
295
296	ret = gt215_clk_info(&clk->base, 0x1d, kHz, info);
297	if (ret >= 0)
298		return 0;
299
300	return ret;
301}
302
303int
304gt215_clk_pre(struct nvkm_clk *clk, unsigned long *flags)
305{
306	struct nvkm_device *device = clk->subdev.device;
307	struct nvkm_fifo *fifo = device->fifo;
308
309	/* halt and idle execution engines */
310	nvkm_mask(device, 0x020060, 0x00070000, 0x00000000);
311	nvkm_mask(device, 0x002504, 0x00000001, 0x00000001);
312	/* Wait until the interrupt handler is finished */
313	if (nvkm_msec(device, 2000,
314		if (!nvkm_rd32(device, 0x000100))
315			break;
316	) < 0)
317		return -EBUSY;
318
319	if (fifo)
320		nvkm_fifo_pause(fifo, flags);
321
322	if (nvkm_msec(device, 2000,
323		if (nvkm_rd32(device, 0x002504) & 0x00000010)
324			break;
325	) < 0)
326		return -EIO;
327
328	if (nvkm_msec(device, 2000,
329		u32 tmp = nvkm_rd32(device, 0x00251c) & 0x0000003f;
330		if (tmp == 0x0000003f)
331			break;
332	) < 0)
333		return -EIO;
334
335	return 0;
336}
337
338void
339gt215_clk_post(struct nvkm_clk *clk, unsigned long *flags)
340{
341	struct nvkm_device *device = clk->subdev.device;
342	struct nvkm_fifo *fifo = device->fifo;
343
344	if (fifo && flags)
345		nvkm_fifo_start(fifo, flags);
346
347	nvkm_mask(device, 0x002504, 0x00000001, 0x00000000);
348	nvkm_mask(device, 0x020060, 0x00070000, 0x00040000);
349}
350
351static void
352disable_clk_src(struct gt215_clk *clk, u32 src)
353{
354	struct nvkm_device *device = clk->base.subdev.device;
355	nvkm_mask(device, src, 0x00000100, 0x00000000);
356	nvkm_mask(device, src, 0x00000001, 0x00000000);
357}
358
359static void
360prog_pll(struct gt215_clk *clk, int idx, u32 pll, int dom)
361{
362	struct gt215_clk_info *info = &clk->eng[dom];
363	struct nvkm_device *device = clk->base.subdev.device;
364	const u32 src0 = 0x004120 + (idx * 4);
365	const u32 src1 = 0x004160 + (idx * 4);
366	const u32 ctrl = pll + 0;
367	const u32 coef = pll + 4;
368	u32 bypass;
369
370	if (info->pll) {
371		/* Always start from a non-PLL clock */
372		bypass = nvkm_rd32(device, ctrl)  & 0x00000008;
373		if (!bypass) {
374			nvkm_mask(device, src1, 0x00000101, 0x00000101);
375			nvkm_mask(device, ctrl, 0x00000008, 0x00000008);
376			udelay(20);
377		}
378
379		nvkm_mask(device, src0, 0x003f3141, 0x00000101 | info->clk);
380		nvkm_wr32(device, coef, info->pll);
381		nvkm_mask(device, ctrl, 0x00000015, 0x00000015);
382		nvkm_mask(device, ctrl, 0x00000010, 0x00000000);
383		if (nvkm_msec(device, 2000,
384			if (nvkm_rd32(device, ctrl) & 0x00020000)
385				break;
386		) < 0) {
387			nvkm_mask(device, ctrl, 0x00000010, 0x00000010);
388			nvkm_mask(device, src0, 0x00000101, 0x00000000);
389			return;
390		}
391		nvkm_mask(device, ctrl, 0x00000010, 0x00000010);
392		nvkm_mask(device, ctrl, 0x00000008, 0x00000000);
393		disable_clk_src(clk, src1);
394	} else {
395		nvkm_mask(device, src1, 0x003f3141, 0x00000101 | info->clk);
396		nvkm_mask(device, ctrl, 0x00000018, 0x00000018);
397		udelay(20);
398		nvkm_mask(device, ctrl, 0x00000001, 0x00000000);
399		disable_clk_src(clk, src0);
400	}
401}
402
403static void
404prog_clk(struct gt215_clk *clk, int idx, int dom)
405{
406	struct gt215_clk_info *info = &clk->eng[dom];
407	struct nvkm_device *device = clk->base.subdev.device;
408	nvkm_mask(device, 0x004120 + (idx * 4), 0x003f3141, 0x00000101 | info->clk);
409}
410
411static void
412prog_host(struct gt215_clk *clk)
413{
414	struct gt215_clk_info *info = &clk->eng[nv_clk_src_host];
415	struct nvkm_device *device = clk->base.subdev.device;
416	u32 hsrc = (nvkm_rd32(device, 0xc040));
417
418	switch (info->host_out) {
419	case NVA3_HOST_277:
420		if ((hsrc & 0x30000000) == 0) {
421			nvkm_wr32(device, 0xc040, hsrc | 0x20000000);
422			disable_clk_src(clk, 0x4194);
423		}
424		break;
425	case NVA3_HOST_CLK:
426		prog_clk(clk, 0x1d, nv_clk_src_host);
427		if ((hsrc & 0x30000000) >= 0x20000000) {
428			nvkm_wr32(device, 0xc040, hsrc & ~0x30000000);
429		}
430		break;
431	default:
432		break;
433	}
434
435	/* This seems to be a clock gating factor on idle, always set to 64 */
436	nvkm_wr32(device, 0xc044, 0x3e);
437}
438
439static void
440prog_core(struct gt215_clk *clk, int dom)
441{
442	struct gt215_clk_info *info = &clk->eng[dom];
443	struct nvkm_device *device = clk->base.subdev.device;
444	u32 fb_delay = nvkm_rd32(device, 0x10002c);
445
446	if (fb_delay < info->fb_delay)
447		nvkm_wr32(device, 0x10002c, info->fb_delay);
448
449	prog_pll(clk, 0x00, 0x004200, dom);
450
451	if (fb_delay > info->fb_delay)
452		nvkm_wr32(device, 0x10002c, info->fb_delay);
453}
454
455static int
456gt215_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
457{
458	struct gt215_clk *clk = gt215_clk(base);
459	struct gt215_clk_info *core = &clk->eng[nv_clk_src_core];
460	int ret;
461
462	if ((ret = calc_clk(clk, cstate, 0x10, 0x4200, nv_clk_src_core)) ||
463	    (ret = calc_clk(clk, cstate, 0x11, 0x4220, nv_clk_src_shader)) ||
464	    (ret = calc_clk(clk, cstate, 0x20, 0x0000, nv_clk_src_disp)) ||
465	    (ret = calc_clk(clk, cstate, 0x21, 0x0000, nv_clk_src_vdec)) ||
466	    (ret = calc_host(clk, cstate)))
467		return ret;
468
469	/* XXX: Should be reading the highest bit in the VBIOS clock to decide
470	 * whether to use a PLL or not... but using a PLL defeats the purpose */
471	if (core->pll) {
472		ret = gt215_clk_info(&clk->base, 0x10,
473				     cstate->domain[nv_clk_src_core_intm],
474				     &clk->eng[nv_clk_src_core_intm]);
475		if (ret < 0)
476			return ret;
477	}
478
479	return 0;
480}
481
482static int
483gt215_clk_prog(struct nvkm_clk *base)
484{
485	struct gt215_clk *clk = gt215_clk(base);
486	struct gt215_clk_info *core = &clk->eng[nv_clk_src_core];
487	int ret = 0;
488	unsigned long flags;
489	unsigned long *f = &flags;
490
491	ret = gt215_clk_pre(&clk->base, f);
492	if (ret)
493		goto out;
494
495	if (core->pll)
496		prog_core(clk, nv_clk_src_core_intm);
497
498	prog_core(clk,  nv_clk_src_core);
499	prog_pll(clk, 0x01, 0x004220, nv_clk_src_shader);
500	prog_clk(clk, 0x20, nv_clk_src_disp);
501	prog_clk(clk, 0x21, nv_clk_src_vdec);
502	prog_host(clk);
503
504out:
505	if (ret == -EBUSY)
506		f = NULL;
507
508	gt215_clk_post(&clk->base, f);
509	return ret;
510}
511
512static void
513gt215_clk_tidy(struct nvkm_clk *base)
514{
515}
516
517static const struct nvkm_clk_func
518gt215_clk = {
519	.read = gt215_clk_read,
520	.calc = gt215_clk_calc,
521	.prog = gt215_clk_prog,
522	.tidy = gt215_clk_tidy,
523	.domains = {
524		{ nv_clk_src_crystal  , 0xff },
525		{ nv_clk_src_core     , 0x00, 0, "core", 1000 },
526		{ nv_clk_src_shader   , 0x01, 0, "shader", 1000 },
527		{ nv_clk_src_mem      , 0x02, 0, "memory", 1000 },
528		{ nv_clk_src_vdec     , 0x03 },
529		{ nv_clk_src_disp     , 0x04 },
530		{ nv_clk_src_host     , 0x05 },
531		{ nv_clk_src_core_intm, 0x06 },
532		{ nv_clk_src_max }
533	}
534};
535
536int
537gt215_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
538{
539	struct gt215_clk *clk;
540
541	if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
542		return -ENOMEM;
543	*pclk = &clk->base;
544
545	return nvkm_clk_ctor(&gt215_clk, device, index, true, &clk->base);
546}
547