1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#define gf100_ram(p) container_of((p), struct gf100_ram, base)
25#include "ram.h"
26#include "ramfuc.h"
27
28#include <core/option.h>
29#include <subdev/bios.h>
30#include <subdev/bios/pll.h>
31#include <subdev/bios/rammap.h>
32#include <subdev/bios/timing.h>
33#include <subdev/clk.h>
34#include <subdev/clk/pll.h>
35#include <subdev/ltc.h>
36
37struct gf100_ramfuc {
38	struct ramfuc base;
39
40	struct ramfuc_reg r_0x10fe20;
41	struct ramfuc_reg r_0x10fe24;
42	struct ramfuc_reg r_0x137320;
43	struct ramfuc_reg r_0x137330;
44
45	struct ramfuc_reg r_0x132000;
46	struct ramfuc_reg r_0x132004;
47	struct ramfuc_reg r_0x132100;
48
49	struct ramfuc_reg r_0x137390;
50
51	struct ramfuc_reg r_0x10f290;
52	struct ramfuc_reg r_0x10f294;
53	struct ramfuc_reg r_0x10f298;
54	struct ramfuc_reg r_0x10f29c;
55	struct ramfuc_reg r_0x10f2a0;
56
57	struct ramfuc_reg r_0x10f300;
58	struct ramfuc_reg r_0x10f338;
59	struct ramfuc_reg r_0x10f340;
60	struct ramfuc_reg r_0x10f344;
61	struct ramfuc_reg r_0x10f348;
62
63	struct ramfuc_reg r_0x10f910;
64	struct ramfuc_reg r_0x10f914;
65
66	struct ramfuc_reg r_0x100b0c;
67	struct ramfuc_reg r_0x10f050;
68	struct ramfuc_reg r_0x10f090;
69	struct ramfuc_reg r_0x10f200;
70	struct ramfuc_reg r_0x10f210;
71	struct ramfuc_reg r_0x10f310;
72	struct ramfuc_reg r_0x10f314;
73	struct ramfuc_reg r_0x10f610;
74	struct ramfuc_reg r_0x10f614;
75	struct ramfuc_reg r_0x10f800;
76	struct ramfuc_reg r_0x10f808;
77	struct ramfuc_reg r_0x10f824;
78	struct ramfuc_reg r_0x10f830;
79	struct ramfuc_reg r_0x10f988;
80	struct ramfuc_reg r_0x10f98c;
81	struct ramfuc_reg r_0x10f990;
82	struct ramfuc_reg r_0x10f998;
83	struct ramfuc_reg r_0x10f9b0;
84	struct ramfuc_reg r_0x10f9b4;
85	struct ramfuc_reg r_0x10fb04;
86	struct ramfuc_reg r_0x10fb08;
87	struct ramfuc_reg r_0x137300;
88	struct ramfuc_reg r_0x137310;
89	struct ramfuc_reg r_0x137360;
90	struct ramfuc_reg r_0x1373ec;
91	struct ramfuc_reg r_0x1373f0;
92	struct ramfuc_reg r_0x1373f8;
93
94	struct ramfuc_reg r_0x61c140;
95	struct ramfuc_reg r_0x611200;
96
97	struct ramfuc_reg r_0x13d8f4;
98};
99
100struct gf100_ram {
101	struct nvkm_ram base;
102	struct gf100_ramfuc fuc;
103	struct nvbios_pll refpll;
104	struct nvbios_pll mempll;
105};
106
107static void
108gf100_ram_train(struct gf100_ramfuc *fuc, u32 magic)
109{
110	struct gf100_ram *ram = container_of(fuc, typeof(*ram), fuc);
111	struct nvkm_fb *fb = ram->base.fb;
112	struct nvkm_device *device = fb->subdev.device;
113	u32 part = nvkm_rd32(device, 0x022438), i;
114	u32 mask = nvkm_rd32(device, 0x022554);
115	u32 addr = 0x110974;
116
117	ram_wr32(fuc, 0x10f910, magic);
118	ram_wr32(fuc, 0x10f914, magic);
119
120	for (i = 0; (magic & 0x80000000) && i < part; addr += 0x1000, i++) {
121		if (mask & (1 << i))
122			continue;
123		ram_wait(fuc, addr, 0x0000000f, 0x00000000, 500000);
124	}
125}
126
127static int
128gf100_ram_calc(struct nvkm_ram *base, u32 freq)
129{
130	struct gf100_ram *ram = gf100_ram(base);
131	struct gf100_ramfuc *fuc = &ram->fuc;
132	struct nvkm_subdev *subdev = &ram->base.fb->subdev;
133	struct nvkm_device *device = subdev->device;
134	struct nvkm_clk *clk = device->clk;
135	struct nvkm_bios *bios = device->bios;
136	struct nvbios_ramcfg cfg;
137	u8  ver, cnt, len, strap;
138	struct {
139		u32 data;
140		u8  size;
141	} rammap, ramcfg, timing;
142	int ref, div, out;
143	int from, mode;
144	int N1, M1, P;
145	int ret;
146
147	/* lookup memory config data relevant to the target frequency */
148	rammap.data = nvbios_rammapEm(bios, freq / 1000, &ver, &rammap.size,
149				      &cnt, &ramcfg.size, &cfg);
150	if (!rammap.data || ver != 0x10 || rammap.size < 0x0e) {
151		nvkm_error(subdev, "invalid/missing rammap entry\n");
152		return -EINVAL;
153	}
154
155	/* locate specific data set for the attached memory */
156	strap = nvbios_ramcfg_index(subdev);
157	if (strap >= cnt) {
158		nvkm_error(subdev, "invalid ramcfg strap\n");
159		return -EINVAL;
160	}
161
162	ramcfg.data = rammap.data + rammap.size + (strap * ramcfg.size);
163	if (!ramcfg.data || ver != 0x10 || ramcfg.size < 0x0e) {
164		nvkm_error(subdev, "invalid/missing ramcfg entry\n");
165		return -EINVAL;
166	}
167
168	/* lookup memory timings, if bios says they're present */
169	strap = nvbios_rd08(bios, ramcfg.data + 0x01);
170	if (strap != 0xff) {
171		timing.data = nvbios_timingEe(bios, strap, &ver, &timing.size,
172					      &cnt, &len);
173		if (!timing.data || ver != 0x10 || timing.size < 0x19) {
174			nvkm_error(subdev, "invalid/missing timing entry\n");
175			return -EINVAL;
176		}
177	} else {
178		timing.data = 0;
179	}
180
181	ret = ram_init(fuc, ram->base.fb);
182	if (ret)
183		return ret;
184
185	/* determine current mclk configuration */
186	from = !!(ram_rd32(fuc, 0x1373f0) & 0x00000002); /*XXX: ok? */
187
188	/* determine target mclk configuration */
189	if (!(ram_rd32(fuc, 0x137300) & 0x00000100))
190		ref = nvkm_clk_read(clk, nv_clk_src_sppll0);
191	else
192		ref = nvkm_clk_read(clk, nv_clk_src_sppll1);
193	div = max(min((ref * 2) / freq, (u32)65), (u32)2) - 2;
194	out = (ref * 2) / (div + 2);
195	mode = freq != out;
196
197	ram_mask(fuc, 0x137360, 0x00000002, 0x00000000);
198
199	if ((ram_rd32(fuc, 0x132000) & 0x00000002) || 0 /*XXX*/) {
200		ram_nuke(fuc, 0x132000);
201		ram_mask(fuc, 0x132000, 0x00000002, 0x00000002);
202		ram_mask(fuc, 0x132000, 0x00000002, 0x00000000);
203	}
204
205	if (mode == 1) {
206		ram_nuke(fuc, 0x10fe20);
207		ram_mask(fuc, 0x10fe20, 0x00000002, 0x00000002);
208		ram_mask(fuc, 0x10fe20, 0x00000002, 0x00000000);
209	}
210
211// 0x00020034 // 0x0000000a
212	ram_wr32(fuc, 0x132100, 0x00000001);
213
214	if (mode == 1 && from == 0) {
215		/* calculate refpll */
216		ret = gt215_pll_calc(subdev, &ram->refpll, ram->mempll.refclk,
217				     &N1, NULL, &M1, &P);
218		if (ret <= 0) {
219			nvkm_error(subdev, "unable to calc refpll\n");
220			return ret ? ret : -ERANGE;
221		}
222
223		ram_wr32(fuc, 0x10fe20, 0x20010000);
224		ram_wr32(fuc, 0x137320, 0x00000003);
225		ram_wr32(fuc, 0x137330, 0x81200006);
226		ram_wr32(fuc, 0x10fe24, (P << 16) | (N1 << 8) | M1);
227		ram_wr32(fuc, 0x10fe20, 0x20010001);
228		ram_wait(fuc, 0x137390, 0x00020000, 0x00020000, 64000);
229
230		/* calculate mempll */
231		ret = gt215_pll_calc(subdev, &ram->mempll, freq,
232				     &N1, NULL, &M1, &P);
233		if (ret <= 0) {
234			nvkm_error(subdev, "unable to calc refpll\n");
235			return ret ? ret : -ERANGE;
236		}
237
238		ram_wr32(fuc, 0x10fe20, 0x20010005);
239		ram_wr32(fuc, 0x132004, (P << 16) | (N1 << 8) | M1);
240		ram_wr32(fuc, 0x132000, 0x18010101);
241		ram_wait(fuc, 0x137390, 0x00000002, 0x00000002, 64000);
242	} else
243	if (mode == 0) {
244		ram_wr32(fuc, 0x137300, 0x00000003);
245	}
246
247	if (from == 0) {
248		ram_nuke(fuc, 0x10fb04);
249		ram_mask(fuc, 0x10fb04, 0x0000ffff, 0x00000000);
250		ram_nuke(fuc, 0x10fb08);
251		ram_mask(fuc, 0x10fb08, 0x0000ffff, 0x00000000);
252		ram_wr32(fuc, 0x10f988, 0x2004ff00);
253		ram_wr32(fuc, 0x10f98c, 0x003fc040);
254		ram_wr32(fuc, 0x10f990, 0x20012001);
255		ram_wr32(fuc, 0x10f998, 0x00011a00);
256		ram_wr32(fuc, 0x13d8f4, 0x00000000);
257	} else {
258		ram_wr32(fuc, 0x10f988, 0x20010000);
259		ram_wr32(fuc, 0x10f98c, 0x00000000);
260		ram_wr32(fuc, 0x10f990, 0x20012001);
261		ram_wr32(fuc, 0x10f998, 0x00010a00);
262	}
263
264	if (from == 0) {
265// 0x00020039 // 0x000000ba
266	}
267
268// 0x0002003a // 0x00000002
269	ram_wr32(fuc, 0x100b0c, 0x00080012);
270// 0x00030014 // 0x00000000 // 0x02b5f070
271// 0x00030014 // 0x00010000 // 0x02b5f070
272	ram_wr32(fuc, 0x611200, 0x00003300);
273// 0x00020034 // 0x0000000a
274// 0x00030020 // 0x00000001 // 0x00000000
275
276	ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000);
277	ram_wr32(fuc, 0x10f210, 0x00000000);
278	ram_nsec(fuc, 1000);
279	if (mode == 0)
280		gf100_ram_train(fuc, 0x000c1001);
281	ram_wr32(fuc, 0x10f310, 0x00000001);
282	ram_nsec(fuc, 1000);
283	ram_wr32(fuc, 0x10f090, 0x00000061);
284	ram_wr32(fuc, 0x10f090, 0xc000007f);
285	ram_nsec(fuc, 1000);
286
287	if (from == 0) {
288		ram_wr32(fuc, 0x10f824, 0x00007fd4);
289	} else {
290		ram_wr32(fuc, 0x1373ec, 0x00020404);
291	}
292
293	if (mode == 0) {
294		ram_mask(fuc, 0x10f808, 0x00080000, 0x00000000);
295		ram_mask(fuc, 0x10f200, 0x00008000, 0x00008000);
296		ram_wr32(fuc, 0x10f830, 0x41500010);
297		ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000);
298		ram_mask(fuc, 0x132100, 0x00000100, 0x00000100);
299		ram_wr32(fuc, 0x10f050, 0xff000090);
300		ram_wr32(fuc, 0x1373ec, 0x00020f0f);
301		ram_wr32(fuc, 0x1373f0, 0x00000003);
302		ram_wr32(fuc, 0x137310, 0x81201616);
303		ram_wr32(fuc, 0x132100, 0x00000001);
304// 0x00020039 // 0x000000ba
305		ram_wr32(fuc, 0x10f830, 0x00300017);
306		ram_wr32(fuc, 0x1373f0, 0x00000001);
307		ram_wr32(fuc, 0x10f824, 0x00007e77);
308		ram_wr32(fuc, 0x132000, 0x18030001);
309		ram_wr32(fuc, 0x10f090, 0x4000007e);
310		ram_nsec(fuc, 2000);
311		ram_wr32(fuc, 0x10f314, 0x00000001);
312		ram_wr32(fuc, 0x10f210, 0x80000000);
313		ram_wr32(fuc, 0x10f338, 0x00300220);
314		ram_wr32(fuc, 0x10f300, 0x0000011d);
315		ram_nsec(fuc, 1000);
316		ram_wr32(fuc, 0x10f290, 0x02060505);
317		ram_wr32(fuc, 0x10f294, 0x34208288);
318		ram_wr32(fuc, 0x10f298, 0x44050411);
319		ram_wr32(fuc, 0x10f29c, 0x0000114c);
320		ram_wr32(fuc, 0x10f2a0, 0x42e10069);
321		ram_wr32(fuc, 0x10f614, 0x40044f77);
322		ram_wr32(fuc, 0x10f610, 0x40044f77);
323		ram_wr32(fuc, 0x10f344, 0x00600009);
324		ram_nsec(fuc, 1000);
325		ram_wr32(fuc, 0x10f348, 0x00700008);
326		ram_wr32(fuc, 0x61c140, 0x19240000);
327		ram_wr32(fuc, 0x10f830, 0x00300017);
328		gf100_ram_train(fuc, 0x80021001);
329		gf100_ram_train(fuc, 0x80081001);
330		ram_wr32(fuc, 0x10f340, 0x00500004);
331		ram_nsec(fuc, 1000);
332		ram_wr32(fuc, 0x10f830, 0x01300017);
333		ram_wr32(fuc, 0x10f830, 0x00300017);
334// 0x00030020 // 0x00000000 // 0x00000000
335// 0x00020034 // 0x0000000b
336		ram_wr32(fuc, 0x100b0c, 0x00080028);
337		ram_wr32(fuc, 0x611200, 0x00003330);
338	} else {
339		ram_wr32(fuc, 0x10f800, 0x00001800);
340		ram_wr32(fuc, 0x13d8f4, 0x00000000);
341		ram_wr32(fuc, 0x1373ec, 0x00020404);
342		ram_wr32(fuc, 0x1373f0, 0x00000003);
343		ram_wr32(fuc, 0x10f830, 0x40700010);
344		ram_wr32(fuc, 0x10f830, 0x40500010);
345		ram_wr32(fuc, 0x13d8f4, 0x00000000);
346		ram_wr32(fuc, 0x1373f8, 0x00000000);
347		ram_wr32(fuc, 0x132100, 0x00000101);
348		ram_wr32(fuc, 0x137310, 0x89201616);
349		ram_wr32(fuc, 0x10f050, 0xff000090);
350		ram_wr32(fuc, 0x1373ec, 0x00030404);
351		ram_wr32(fuc, 0x1373f0, 0x00000002);
352	// 0x00020039 // 0x00000011
353		ram_wr32(fuc, 0x132100, 0x00000001);
354		ram_wr32(fuc, 0x1373f8, 0x00002000);
355		ram_nsec(fuc, 2000);
356		ram_wr32(fuc, 0x10f808, 0x7aaa0050);
357		ram_wr32(fuc, 0x10f830, 0x00500010);
358		ram_wr32(fuc, 0x10f200, 0x00ce1000);
359		ram_wr32(fuc, 0x10f090, 0x4000007e);
360		ram_nsec(fuc, 2000);
361		ram_wr32(fuc, 0x10f314, 0x00000001);
362		ram_wr32(fuc, 0x10f210, 0x80000000);
363		ram_wr32(fuc, 0x10f338, 0x00300200);
364		ram_wr32(fuc, 0x10f300, 0x0000084d);
365		ram_nsec(fuc, 1000);
366		ram_wr32(fuc, 0x10f290, 0x0b343825);
367		ram_wr32(fuc, 0x10f294, 0x3483028e);
368		ram_wr32(fuc, 0x10f298, 0x440c0600);
369		ram_wr32(fuc, 0x10f29c, 0x0000214c);
370		ram_wr32(fuc, 0x10f2a0, 0x42e20069);
371		ram_wr32(fuc, 0x10f200, 0x00ce0000);
372		ram_wr32(fuc, 0x10f614, 0x60044e77);
373		ram_wr32(fuc, 0x10f610, 0x60044e77);
374		ram_wr32(fuc, 0x10f340, 0x00500000);
375		ram_nsec(fuc, 1000);
376		ram_wr32(fuc, 0x10f344, 0x00600228);
377		ram_nsec(fuc, 1000);
378		ram_wr32(fuc, 0x10f348, 0x00700000);
379		ram_wr32(fuc, 0x13d8f4, 0x00000000);
380		ram_wr32(fuc, 0x61c140, 0x09a40000);
381
382		gf100_ram_train(fuc, 0x800e1008);
383
384		ram_nsec(fuc, 1000);
385		ram_wr32(fuc, 0x10f800, 0x00001804);
386	// 0x00030020 // 0x00000000 // 0x00000000
387	// 0x00020034 // 0x0000000b
388		ram_wr32(fuc, 0x13d8f4, 0x00000000);
389		ram_wr32(fuc, 0x100b0c, 0x00080028);
390		ram_wr32(fuc, 0x611200, 0x00003330);
391		ram_nsec(fuc, 100000);
392		ram_wr32(fuc, 0x10f9b0, 0x05313f41);
393		ram_wr32(fuc, 0x10f9b4, 0x00002f50);
394
395		gf100_ram_train(fuc, 0x010c1001);
396	}
397
398	ram_mask(fuc, 0x10f200, 0x00000800, 0x00000800);
399// 0x00020016 // 0x00000000
400
401	if (mode == 0)
402		ram_mask(fuc, 0x132000, 0x00000001, 0x00000000);
403
404	return 0;
405}
406
407static int
408gf100_ram_prog(struct nvkm_ram *base)
409{
410	struct gf100_ram *ram = gf100_ram(base);
411	struct nvkm_device *device = ram->base.fb->subdev.device;
412	ram_exec(&ram->fuc, nvkm_boolopt(device->cfgopt, "NvMemExec", true));
413	return 0;
414}
415
416static void
417gf100_ram_tidy(struct nvkm_ram *base)
418{
419	struct gf100_ram *ram = gf100_ram(base);
420	ram_exec(&ram->fuc, false);
421}
422
423extern const u8 gf100_pte_storage_type_map[256];
424
425void
426gf100_ram_put(struct nvkm_ram *ram, struct nvkm_mem **pmem)
427{
428	struct nvkm_ltc *ltc = ram->fb->subdev.device->ltc;
429	struct nvkm_mem *mem = *pmem;
430
431	*pmem = NULL;
432	if (unlikely(mem == NULL))
433		return;
434
435	mutex_lock(&ram->fb->subdev.mutex);
436	if (mem->tag)
437		nvkm_ltc_tags_free(ltc, &mem->tag);
438	__nv50_ram_put(ram, mem);
439	mutex_unlock(&ram->fb->subdev.mutex);
440
441	kfree(mem);
442}
443
444int
445gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
446	      u32 memtype, struct nvkm_mem **pmem)
447{
448	struct nvkm_ltc *ltc = ram->fb->subdev.device->ltc;
449	struct nvkm_mm *mm = &ram->vram;
450	struct nvkm_mm_node *r;
451	struct nvkm_mem *mem;
452	int type = (memtype & 0x0ff);
453	int back = (memtype & 0x800);
454	const bool comp = gf100_pte_storage_type_map[type] != type;
455	int ret;
456
457	size  >>= NVKM_RAM_MM_SHIFT;
458	align >>= NVKM_RAM_MM_SHIFT;
459	ncmin >>= NVKM_RAM_MM_SHIFT;
460	if (!ncmin)
461		ncmin = size;
462
463	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
464	if (!mem)
465		return -ENOMEM;
466
467	INIT_LIST_HEAD(&mem->regions);
468	mem->size = size;
469
470	mutex_lock(&ram->fb->subdev.mutex);
471	if (comp) {
472		/* compression only works with lpages */
473		if (align == (1 << (17 - NVKM_RAM_MM_SHIFT))) {
474			int n = size >> 5;
475			nvkm_ltc_tags_alloc(ltc, n, &mem->tag);
476		}
477
478		if (unlikely(!mem->tag))
479			type = gf100_pte_storage_type_map[type];
480	}
481	mem->memtype = type;
482
483	do {
484		if (back)
485			ret = nvkm_mm_tail(mm, 0, 1, size, ncmin, align, &r);
486		else
487			ret = nvkm_mm_head(mm, 0, 1, size, ncmin, align, &r);
488		if (ret) {
489			mutex_unlock(&ram->fb->subdev.mutex);
490			ram->func->put(ram, &mem);
491			return ret;
492		}
493
494		list_add_tail(&r->rl_entry, &mem->regions);
495		size -= r->length;
496	} while (size);
497	mutex_unlock(&ram->fb->subdev.mutex);
498
499	r = list_first_entry(&mem->regions, struct nvkm_mm_node, rl_entry);
500	mem->offset = (u64)r->offset << NVKM_RAM_MM_SHIFT;
501	*pmem = mem;
502	return 0;
503}
504
505static int
506gf100_ram_init(struct nvkm_ram *base)
507{
508	static const u8  train0[] = {
509		0x00, 0xff, 0x55, 0xaa, 0x33, 0xcc,
510		0x00, 0xff, 0xff, 0x00, 0xff, 0x00,
511	};
512	static const u32 train1[] = {
513		0x00000000, 0xffffffff,
514		0x55555555, 0xaaaaaaaa,
515		0x33333333, 0xcccccccc,
516		0xf0f0f0f0, 0x0f0f0f0f,
517		0x00ff00ff, 0xff00ff00,
518		0x0000ffff, 0xffff0000,
519	};
520	struct gf100_ram *ram = gf100_ram(base);
521	struct nvkm_device *device = ram->base.fb->subdev.device;
522	int i;
523
524	switch (ram->base.type) {
525	case NVKM_RAM_TYPE_GDDR5:
526		break;
527	default:
528		return 0;
529	}
530
531	/* prepare for ddr link training, and load training patterns */
532	for (i = 0; i < 0x30; i++) {
533		nvkm_wr32(device, 0x10f968, 0x00000000 | (i << 8));
534		nvkm_wr32(device, 0x10f96c, 0x00000000 | (i << 8));
535		nvkm_wr32(device, 0x10f920, 0x00000100 | train0[i % 12]);
536		nvkm_wr32(device, 0x10f924, 0x00000100 | train0[i % 12]);
537		nvkm_wr32(device, 0x10f918,              train1[i % 12]);
538		nvkm_wr32(device, 0x10f91c,              train1[i % 12]);
539		nvkm_wr32(device, 0x10f920, 0x00000000 | train0[i % 12]);
540		nvkm_wr32(device, 0x10f924, 0x00000000 | train0[i % 12]);
541		nvkm_wr32(device, 0x10f918,              train1[i % 12]);
542		nvkm_wr32(device, 0x10f91c,              train1[i % 12]);
543	}
544
545	return 0;
546}
547
548static const struct nvkm_ram_func
549gf100_ram_func = {
550	.init = gf100_ram_init,
551	.get = gf100_ram_get,
552	.put = gf100_ram_put,
553	.calc = gf100_ram_calc,
554	.prog = gf100_ram_prog,
555	.tidy = gf100_ram_tidy,
556};
557
558int
559gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
560	       u32 maskaddr, struct nvkm_ram *ram)
561{
562	struct nvkm_subdev *subdev = &fb->subdev;
563	struct nvkm_device *device = subdev->device;
564	struct nvkm_bios *bios = device->bios;
565	const u32 rsvd_head = ( 256 * 1024); /* vga memory */
566	const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
567	u32 parts = nvkm_rd32(device, 0x022438);
568	u32 pmask = nvkm_rd32(device, maskaddr);
569	u64 bsize = (u64)nvkm_rd32(device, 0x10f20c) << 20;
570	u64 psize, size = 0;
571	enum nvkm_ram_type type = nvkm_fb_bios_memtype(bios);
572	bool uniform = true;
573	int ret, i;
574
575	nvkm_debug(subdev, "100800: %08x\n", nvkm_rd32(device, 0x100800));
576	nvkm_debug(subdev, "parts %08x mask %08x\n", parts, pmask);
577
578	/* read amount of vram attached to each memory controller */
579	for (i = 0; i < parts; i++) {
580		if (pmask & (1 << i))
581			continue;
582
583		psize = (u64)nvkm_rd32(device, 0x11020c + (i * 0x1000)) << 20;
584		if (psize != bsize) {
585			if (psize < bsize)
586				bsize = psize;
587			uniform = false;
588		}
589
590		nvkm_debug(subdev, "%d: %d MiB\n", i, (u32)(psize >> 20));
591		size += psize;
592	}
593
594	ret = nvkm_ram_ctor(func, fb, type, size, 0, ram);
595	if (ret)
596		return ret;
597
598	nvkm_mm_fini(&ram->vram);
599
600	/* if all controllers have the same amount attached, there's no holes */
601	if (uniform) {
602		ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
603				   (size - rsvd_head - rsvd_tail) >>
604				   NVKM_RAM_MM_SHIFT, 1);
605		if (ret)
606			return ret;
607	} else {
608		/* otherwise, address lowest common amount from 0GiB */
609		ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
610				   ((bsize * parts) - rsvd_head) >>
611				   NVKM_RAM_MM_SHIFT, 1);
612		if (ret)
613			return ret;
614
615		/* and the rest starting from (8GiB + common_size) */
616		ret = nvkm_mm_init(&ram->vram, (0x0200000000ULL + bsize) >>
617				   NVKM_RAM_MM_SHIFT,
618				   (size - (bsize * parts) - rsvd_tail) >>
619				   NVKM_RAM_MM_SHIFT, 1);
620		if (ret)
621			return ret;
622	}
623
624	ram->ranks = (nvkm_rd32(device, 0x10f200) & 0x00000004) ? 2 : 1;
625	return 0;
626}
627
628int
629gf100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
630{
631	struct nvkm_subdev *subdev = &fb->subdev;
632	struct nvkm_bios *bios = subdev->device->bios;
633	struct gf100_ram *ram;
634	int ret;
635
636	if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
637		return -ENOMEM;
638	*pram = &ram->base;
639
640	ret = gf100_ram_ctor(&gf100_ram_func, fb, 0x022554, &ram->base);
641	if (ret)
642		return ret;
643
644	ret = nvbios_pll_parse(bios, 0x0c, &ram->refpll);
645	if (ret) {
646		nvkm_error(subdev, "mclk refpll data not found\n");
647		return ret;
648	}
649
650	ret = nvbios_pll_parse(bios, 0x04, &ram->mempll);
651	if (ret) {
652		nvkm_error(subdev, "mclk pll data not found\n");
653		return ret;
654	}
655
656	ram->fuc.r_0x10fe20 = ramfuc_reg(0x10fe20);
657	ram->fuc.r_0x10fe24 = ramfuc_reg(0x10fe24);
658	ram->fuc.r_0x137320 = ramfuc_reg(0x137320);
659	ram->fuc.r_0x137330 = ramfuc_reg(0x137330);
660
661	ram->fuc.r_0x132000 = ramfuc_reg(0x132000);
662	ram->fuc.r_0x132004 = ramfuc_reg(0x132004);
663	ram->fuc.r_0x132100 = ramfuc_reg(0x132100);
664
665	ram->fuc.r_0x137390 = ramfuc_reg(0x137390);
666
667	ram->fuc.r_0x10f290 = ramfuc_reg(0x10f290);
668	ram->fuc.r_0x10f294 = ramfuc_reg(0x10f294);
669	ram->fuc.r_0x10f298 = ramfuc_reg(0x10f298);
670	ram->fuc.r_0x10f29c = ramfuc_reg(0x10f29c);
671	ram->fuc.r_0x10f2a0 = ramfuc_reg(0x10f2a0);
672
673	ram->fuc.r_0x10f300 = ramfuc_reg(0x10f300);
674	ram->fuc.r_0x10f338 = ramfuc_reg(0x10f338);
675	ram->fuc.r_0x10f340 = ramfuc_reg(0x10f340);
676	ram->fuc.r_0x10f344 = ramfuc_reg(0x10f344);
677	ram->fuc.r_0x10f348 = ramfuc_reg(0x10f348);
678
679	ram->fuc.r_0x10f910 = ramfuc_reg(0x10f910);
680	ram->fuc.r_0x10f914 = ramfuc_reg(0x10f914);
681
682	ram->fuc.r_0x100b0c = ramfuc_reg(0x100b0c);
683	ram->fuc.r_0x10f050 = ramfuc_reg(0x10f050);
684	ram->fuc.r_0x10f090 = ramfuc_reg(0x10f090);
685	ram->fuc.r_0x10f200 = ramfuc_reg(0x10f200);
686	ram->fuc.r_0x10f210 = ramfuc_reg(0x10f210);
687	ram->fuc.r_0x10f310 = ramfuc_reg(0x10f310);
688	ram->fuc.r_0x10f314 = ramfuc_reg(0x10f314);
689	ram->fuc.r_0x10f610 = ramfuc_reg(0x10f610);
690	ram->fuc.r_0x10f614 = ramfuc_reg(0x10f614);
691	ram->fuc.r_0x10f800 = ramfuc_reg(0x10f800);
692	ram->fuc.r_0x10f808 = ramfuc_reg(0x10f808);
693	ram->fuc.r_0x10f824 = ramfuc_reg(0x10f824);
694	ram->fuc.r_0x10f830 = ramfuc_reg(0x10f830);
695	ram->fuc.r_0x10f988 = ramfuc_reg(0x10f988);
696	ram->fuc.r_0x10f98c = ramfuc_reg(0x10f98c);
697	ram->fuc.r_0x10f990 = ramfuc_reg(0x10f990);
698	ram->fuc.r_0x10f998 = ramfuc_reg(0x10f998);
699	ram->fuc.r_0x10f9b0 = ramfuc_reg(0x10f9b0);
700	ram->fuc.r_0x10f9b4 = ramfuc_reg(0x10f9b4);
701	ram->fuc.r_0x10fb04 = ramfuc_reg(0x10fb04);
702	ram->fuc.r_0x10fb08 = ramfuc_reg(0x10fb08);
703	ram->fuc.r_0x137310 = ramfuc_reg(0x137300);
704	ram->fuc.r_0x137310 = ramfuc_reg(0x137310);
705	ram->fuc.r_0x137360 = ramfuc_reg(0x137360);
706	ram->fuc.r_0x1373ec = ramfuc_reg(0x1373ec);
707	ram->fuc.r_0x1373f0 = ramfuc_reg(0x1373f0);
708	ram->fuc.r_0x1373f8 = ramfuc_reg(0x1373f8);
709
710	ram->fuc.r_0x61c140 = ramfuc_reg(0x61c140);
711	ram->fuc.r_0x611200 = ramfuc_reg(0x611200);
712
713	ram->fuc.r_0x13d8f4 = ramfuc_reg(0x13d8f4);
714	return 0;
715}
716