1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "priv.h"
25
26#include <core/client.h>
27#include <core/option.h>
28
29#include <nvif/class.h>
30#include <nvif/ioctl.h>
31#include <nvif/unpack.h>
32
33static u8
34nvkm_pm_count_perfdom(struct nvkm_pm *pm)
35{
36	struct nvkm_perfdom *dom;
37	u8 domain_nr = 0;
38
39	list_for_each_entry(dom, &pm->domains, head)
40		domain_nr++;
41	return domain_nr;
42}
43
44static u16
45nvkm_perfdom_count_perfsig(struct nvkm_perfdom *dom)
46{
47	u16 signal_nr = 0;
48	int i;
49
50	if (dom) {
51		for (i = 0; i < dom->signal_nr; i++) {
52			if (dom->signal[i].name)
53				signal_nr++;
54		}
55	}
56	return signal_nr;
57}
58
59static struct nvkm_perfdom *
60nvkm_perfdom_find(struct nvkm_pm *pm, int di)
61{
62	struct nvkm_perfdom *dom;
63	int tmp = 0;
64
65	list_for_each_entry(dom, &pm->domains, head) {
66		if (tmp++ == di)
67			return dom;
68	}
69	return NULL;
70}
71
72struct nvkm_perfsig *
73nvkm_perfsig_find(struct nvkm_pm *pm, u8 di, u8 si, struct nvkm_perfdom **pdom)
74{
75	struct nvkm_perfdom *dom = *pdom;
76
77	if (dom == NULL) {
78		dom = nvkm_perfdom_find(pm, di);
79		if (dom == NULL)
80			return NULL;
81		*pdom = dom;
82	}
83
84	if (!dom->signal[si].name)
85		return NULL;
86	return &dom->signal[si];
87}
88
89static u8
90nvkm_perfsig_count_perfsrc(struct nvkm_perfsig *sig)
91{
92	u8 source_nr = 0, i;
93
94	for (i = 0; i < ARRAY_SIZE(sig->source); i++) {
95		if (sig->source[i])
96			source_nr++;
97	}
98	return source_nr;
99}
100
101static struct nvkm_perfsrc *
102nvkm_perfsrc_find(struct nvkm_pm *pm, struct nvkm_perfsig *sig, int si)
103{
104	struct nvkm_perfsrc *src;
105	bool found = false;
106	int tmp = 1; /* Sources ID start from 1 */
107	u8 i;
108
109	for (i = 0; i < ARRAY_SIZE(sig->source) && sig->source[i]; i++) {
110		if (sig->source[i] == si) {
111			found = true;
112			break;
113		}
114	}
115
116	if (found) {
117		list_for_each_entry(src, &pm->sources, head) {
118			if (tmp++ == si)
119				return src;
120		}
121	}
122
123	return NULL;
124}
125
126static int
127nvkm_perfsrc_enable(struct nvkm_pm *pm, struct nvkm_perfctr *ctr)
128{
129	struct nvkm_subdev *subdev = &pm->engine.subdev;
130	struct nvkm_device *device = subdev->device;
131	struct nvkm_perfdom *dom = NULL;
132	struct nvkm_perfsig *sig;
133	struct nvkm_perfsrc *src;
134	u32 mask, value;
135	int i, j;
136
137	for (i = 0; i < 4; i++) {
138		for (j = 0; j < 8 && ctr->source[i][j]; j++) {
139			sig = nvkm_perfsig_find(pm, ctr->domain,
140						ctr->signal[i], &dom);
141			if (!sig)
142				return -EINVAL;
143
144			src = nvkm_perfsrc_find(pm, sig, ctr->source[i][j]);
145			if (!src)
146				return -EINVAL;
147
148			/* set enable bit if needed */
149			mask = value = 0x00000000;
150			if (src->enable)
151				mask = value = 0x80000000;
152			mask  |= (src->mask << src->shift);
153			value |= ((ctr->source[i][j] >> 32) << src->shift);
154
155			/* enable the source */
156			nvkm_mask(device, src->addr, mask, value);
157			nvkm_debug(subdev,
158				   "enabled source %08x %08x %08x\n",
159				   src->addr, mask, value);
160		}
161	}
162	return 0;
163}
164
165static int
166nvkm_perfsrc_disable(struct nvkm_pm *pm, struct nvkm_perfctr *ctr)
167{
168	struct nvkm_subdev *subdev = &pm->engine.subdev;
169	struct nvkm_device *device = subdev->device;
170	struct nvkm_perfdom *dom = NULL;
171	struct nvkm_perfsig *sig;
172	struct nvkm_perfsrc *src;
173	u32 mask;
174	int i, j;
175
176	for (i = 0; i < 4; i++) {
177		for (j = 0; j < 8 && ctr->source[i][j]; j++) {
178			sig = nvkm_perfsig_find(pm, ctr->domain,
179						ctr->signal[i], &dom);
180			if (!sig)
181				return -EINVAL;
182
183			src = nvkm_perfsrc_find(pm, sig, ctr->source[i][j]);
184			if (!src)
185				return -EINVAL;
186
187			/* unset enable bit if needed */
188			mask = 0x00000000;
189			if (src->enable)
190				mask = 0x80000000;
191			mask |= (src->mask << src->shift);
192
193			/* disable the source */
194			nvkm_mask(device, src->addr, mask, 0);
195			nvkm_debug(subdev, "disabled source %08x %08x\n",
196				   src->addr, mask);
197		}
198	}
199	return 0;
200}
201
202/*******************************************************************************
203 * Perfdom object classes
204 ******************************************************************************/
205static int
206nvkm_perfdom_init(struct nvkm_perfdom *dom, void *data, u32 size)
207{
208	union {
209		struct nvif_perfdom_init none;
210	} *args = data;
211	struct nvkm_object *object = &dom->object;
212	struct nvkm_pm *pm = dom->perfmon->pm;
213	int ret, i;
214
215	nvif_ioctl(object, "perfdom init size %d\n", size);
216	if (nvif_unvers(args->none)) {
217		nvif_ioctl(object, "perfdom init\n");
218	} else
219		return ret;
220
221	for (i = 0; i < 4; i++) {
222		if (dom->ctr[i]) {
223			dom->func->init(pm, dom, dom->ctr[i]);
224
225			/* enable sources */
226			nvkm_perfsrc_enable(pm, dom->ctr[i]);
227		}
228	}
229
230	/* start next batch of counters for sampling */
231	dom->func->next(pm, dom);
232	return 0;
233}
234
235static int
236nvkm_perfdom_sample(struct nvkm_perfdom *dom, void *data, u32 size)
237{
238	union {
239		struct nvif_perfdom_sample none;
240	} *args = data;
241	struct nvkm_object *object = &dom->object;
242	struct nvkm_pm *pm = dom->perfmon->pm;
243	int ret;
244
245	nvif_ioctl(object, "perfdom sample size %d\n", size);
246	if (nvif_unvers(args->none)) {
247		nvif_ioctl(object, "perfdom sample\n");
248	} else
249		return ret;
250	pm->sequence++;
251
252	/* sample previous batch of counters */
253	list_for_each_entry(dom, &pm->domains, head)
254		dom->func->next(pm, dom);
255
256	return 0;
257}
258
259static int
260nvkm_perfdom_read(struct nvkm_perfdom *dom, void *data, u32 size)
261{
262	union {
263		struct nvif_perfdom_read_v0 v0;
264	} *args = data;
265	struct nvkm_object *object = &dom->object;
266	struct nvkm_pm *pm = dom->perfmon->pm;
267	int ret, i;
268
269	nvif_ioctl(object, "perfdom read size %d\n", size);
270	if (nvif_unpack(args->v0, 0, 0, false)) {
271		nvif_ioctl(object, "perfdom read vers %d\n", args->v0.version);
272	} else
273		return ret;
274
275	for (i = 0; i < 4; i++) {
276		if (dom->ctr[i])
277			dom->func->read(pm, dom, dom->ctr[i]);
278	}
279
280	if (!dom->clk)
281		return -EAGAIN;
282
283	for (i = 0; i < 4; i++)
284		if (dom->ctr[i])
285			args->v0.ctr[i] = dom->ctr[i]->ctr;
286	args->v0.clk = dom->clk;
287	return 0;
288}
289
290static int
291nvkm_perfdom_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
292{
293	struct nvkm_perfdom *dom = nvkm_perfdom(object);
294	switch (mthd) {
295	case NVIF_PERFDOM_V0_INIT:
296		return nvkm_perfdom_init(dom, data, size);
297	case NVIF_PERFDOM_V0_SAMPLE:
298		return nvkm_perfdom_sample(dom, data, size);
299	case NVIF_PERFDOM_V0_READ:
300		return nvkm_perfdom_read(dom, data, size);
301	default:
302		break;
303	}
304	return -EINVAL;
305}
306
307static void *
308nvkm_perfdom_dtor(struct nvkm_object *object)
309{
310	struct nvkm_perfdom *dom = nvkm_perfdom(object);
311	struct nvkm_pm *pm = dom->perfmon->pm;
312	int i;
313
314	for (i = 0; i < 4; i++) {
315		struct nvkm_perfctr *ctr = dom->ctr[i];
316		if (ctr) {
317			nvkm_perfsrc_disable(pm, ctr);
318			if (ctr->head.next)
319				list_del(&ctr->head);
320		}
321		kfree(ctr);
322	}
323
324	return dom;
325}
326
327static int
328nvkm_perfctr_new(struct nvkm_perfdom *dom, int slot, u8 domain,
329		 struct nvkm_perfsig *signal[4], u64 source[4][8],
330		 u16 logic_op, struct nvkm_perfctr **pctr)
331{
332	struct nvkm_perfctr *ctr;
333	int i, j;
334
335	if (!dom)
336		return -EINVAL;
337
338	ctr = *pctr = kzalloc(sizeof(*ctr), GFP_KERNEL);
339	if (!ctr)
340		return -ENOMEM;
341
342	ctr->domain   = domain;
343	ctr->logic_op = logic_op;
344	ctr->slot     = slot;
345	for (i = 0; i < 4; i++) {
346		if (signal[i]) {
347			ctr->signal[i] = signal[i] - dom->signal;
348			for (j = 0; j < 8; j++)
349				ctr->source[i][j] = source[i][j];
350		}
351	}
352	list_add_tail(&ctr->head, &dom->list);
353
354	return 0;
355}
356
357static const struct nvkm_object_func
358nvkm_perfdom = {
359	.dtor = nvkm_perfdom_dtor,
360	.mthd = nvkm_perfdom_mthd,
361};
362
363static int
364nvkm_perfdom_new_(struct nvkm_perfmon *perfmon,
365		  const struct nvkm_oclass *oclass, void *data, u32 size,
366		  struct nvkm_object **pobject)
367{
368	union {
369		struct nvif_perfdom_v0 v0;
370	} *args = data;
371	struct nvkm_pm *pm = perfmon->pm;
372	struct nvkm_object *parent = oclass->parent;
373	struct nvkm_perfdom *sdom = NULL;
374	struct nvkm_perfctr *ctr[4] = {};
375	struct nvkm_perfdom *dom;
376	int c, s, m;
377	int ret;
378
379	nvif_ioctl(parent, "create perfdom size %d\n", size);
380	if (nvif_unpack(args->v0, 0, 0, false)) {
381		nvif_ioctl(parent, "create perfdom vers %d dom %d mode %02x\n",
382			   args->v0.version, args->v0.domain, args->v0.mode);
383	} else
384		return ret;
385
386	for (c = 0; c < ARRAY_SIZE(args->v0.ctr); c++) {
387		struct nvkm_perfsig *sig[4] = {};
388		u64 src[4][8] = {};
389
390		for (s = 0; s < ARRAY_SIZE(args->v0.ctr[c].signal); s++) {
391			sig[s] = nvkm_perfsig_find(pm, args->v0.domain,
392						   args->v0.ctr[c].signal[s],
393						   &sdom);
394			if (args->v0.ctr[c].signal[s] && !sig[s])
395				return -EINVAL;
396
397			for (m = 0; m < 8; m++) {
398				src[s][m] = args->v0.ctr[c].source[s][m];
399				if (src[s][m] && !nvkm_perfsrc_find(pm, sig[s],
400							            src[s][m]))
401					return -EINVAL;
402			}
403		}
404
405		ret = nvkm_perfctr_new(sdom, c, args->v0.domain, sig, src,
406				       args->v0.ctr[c].logic_op, &ctr[c]);
407		if (ret)
408			return ret;
409	}
410
411	if (!sdom)
412		return -EINVAL;
413
414	if (!(dom = kzalloc(sizeof(*dom), GFP_KERNEL)))
415		return -ENOMEM;
416	nvkm_object_ctor(&nvkm_perfdom, oclass, &dom->object);
417	dom->perfmon = perfmon;
418	*pobject = &dom->object;
419
420	dom->func = sdom->func;
421	dom->addr = sdom->addr;
422	dom->mode = args->v0.mode;
423	for (c = 0; c < ARRAY_SIZE(ctr); c++)
424		dom->ctr[c] = ctr[c];
425	return 0;
426}
427
428/*******************************************************************************
429 * Perfmon object classes
430 ******************************************************************************/
431static int
432nvkm_perfmon_mthd_query_domain(struct nvkm_perfmon *perfmon,
433			       void *data, u32 size)
434{
435	union {
436		struct nvif_perfmon_query_domain_v0 v0;
437	} *args = data;
438	struct nvkm_object *object = &perfmon->object;
439	struct nvkm_pm *pm = perfmon->pm;
440	struct nvkm_perfdom *dom;
441	u8 domain_nr;
442	int di, ret;
443
444	nvif_ioctl(object, "perfmon query domain size %d\n", size);
445	if (nvif_unpack(args->v0, 0, 0, false)) {
446		nvif_ioctl(object, "perfmon domain vers %d iter %02x\n",
447			   args->v0.version, args->v0.iter);
448		di = (args->v0.iter & 0xff) - 1;
449	} else
450		return ret;
451
452	domain_nr = nvkm_pm_count_perfdom(pm);
453	if (di >= (int)domain_nr)
454		return -EINVAL;
455
456	if (di >= 0) {
457		dom = nvkm_perfdom_find(pm, di);
458		if (dom == NULL)
459			return -EINVAL;
460
461		args->v0.id         = di;
462		args->v0.signal_nr  = nvkm_perfdom_count_perfsig(dom);
463		strncpy(args->v0.name, dom->name, sizeof(args->v0.name));
464
465		/* Currently only global counters (PCOUNTER) are implemented
466		 * but this will be different for local counters (MP). */
467		args->v0.counter_nr = 4;
468	}
469
470	if (++di < domain_nr) {
471		args->v0.iter = ++di;
472		return 0;
473	}
474
475	args->v0.iter = 0xff;
476	return 0;
477}
478
479static int
480nvkm_perfmon_mthd_query_signal(struct nvkm_perfmon *perfmon,
481			       void *data, u32 size)
482{
483	union {
484		struct nvif_perfmon_query_signal_v0 v0;
485	} *args = data;
486	struct nvkm_object *object = &perfmon->object;
487	struct nvkm_pm *pm = perfmon->pm;
488	struct nvkm_device *device = pm->engine.subdev.device;
489	struct nvkm_perfdom *dom;
490	struct nvkm_perfsig *sig;
491	const bool all = nvkm_boolopt(device->cfgopt, "NvPmShowAll", false);
492	const bool raw = nvkm_boolopt(device->cfgopt, "NvPmUnnamed", all);
493	int ret, si;
494
495	nvif_ioctl(object, "perfmon query signal size %d\n", size);
496	if (nvif_unpack(args->v0, 0, 0, false)) {
497		nvif_ioctl(object,
498			   "perfmon query signal vers %d dom %d iter %04x\n",
499			   args->v0.version, args->v0.domain, args->v0.iter);
500		si = (args->v0.iter & 0xffff) - 1;
501	} else
502		return ret;
503
504	dom = nvkm_perfdom_find(pm, args->v0.domain);
505	if (dom == NULL || si >= (int)dom->signal_nr)
506		return -EINVAL;
507
508	if (si >= 0) {
509		sig = &dom->signal[si];
510		if (raw || !sig->name) {
511			snprintf(args->v0.name, sizeof(args->v0.name),
512				 "/%s/%02x", dom->name, si);
513		} else {
514			strncpy(args->v0.name, sig->name,
515				sizeof(args->v0.name));
516		}
517
518		args->v0.signal = si;
519		args->v0.source_nr = nvkm_perfsig_count_perfsrc(sig);
520	}
521
522	while (++si < dom->signal_nr) {
523		if (all || dom->signal[si].name) {
524			args->v0.iter = ++si;
525			return 0;
526		}
527	}
528
529	args->v0.iter = 0xffff;
530	return 0;
531}
532
533static int
534nvkm_perfmon_mthd_query_source(struct nvkm_perfmon *perfmon,
535			       void *data, u32 size)
536{
537	union {
538		struct nvif_perfmon_query_source_v0 v0;
539	} *args = data;
540	struct nvkm_object *object = &perfmon->object;
541	struct nvkm_pm *pm = perfmon->pm;
542	struct nvkm_perfdom *dom = NULL;
543	struct nvkm_perfsig *sig;
544	struct nvkm_perfsrc *src;
545	u8 source_nr = 0;
546	int si, ret;
547
548	nvif_ioctl(object, "perfmon query source size %d\n", size);
549	if (nvif_unpack(args->v0, 0, 0, false)) {
550		nvif_ioctl(object,
551			   "perfmon source vers %d dom %d sig %02x iter %02x\n",
552			   args->v0.version, args->v0.domain, args->v0.signal,
553			   args->v0.iter);
554		si = (args->v0.iter & 0xff) - 1;
555	} else
556		return ret;
557
558	sig = nvkm_perfsig_find(pm, args->v0.domain, args->v0.signal, &dom);
559	if (!sig)
560		return -EINVAL;
561
562	source_nr = nvkm_perfsig_count_perfsrc(sig);
563	if (si >= (int)source_nr)
564		return -EINVAL;
565
566	if (si >= 0) {
567		src = nvkm_perfsrc_find(pm, sig, sig->source[si]);
568		if (!src)
569			return -EINVAL;
570
571		args->v0.source = sig->source[si];
572		args->v0.mask   = src->mask;
573		strncpy(args->v0.name, src->name, sizeof(args->v0.name));
574	}
575
576	if (++si < source_nr) {
577		args->v0.iter = ++si;
578		return 0;
579	}
580
581	args->v0.iter = 0xff;
582	return 0;
583}
584
585static int
586nvkm_perfmon_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
587{
588	struct nvkm_perfmon *perfmon = nvkm_perfmon(object);
589	switch (mthd) {
590	case NVIF_PERFMON_V0_QUERY_DOMAIN:
591		return nvkm_perfmon_mthd_query_domain(perfmon, data, size);
592	case NVIF_PERFMON_V0_QUERY_SIGNAL:
593		return nvkm_perfmon_mthd_query_signal(perfmon, data, size);
594	case NVIF_PERFMON_V0_QUERY_SOURCE:
595		return nvkm_perfmon_mthd_query_source(perfmon, data, size);
596	default:
597		break;
598	}
599	return -EINVAL;
600}
601
602static int
603nvkm_perfmon_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
604		       struct nvkm_object **pobject)
605{
606	struct nvkm_perfmon *perfmon = nvkm_perfmon(oclass->parent);
607	return nvkm_perfdom_new_(perfmon, oclass, data, size, pobject);
608}
609
610static int
611nvkm_perfmon_child_get(struct nvkm_object *object, int index,
612		       struct nvkm_oclass *oclass)
613{
614	if (index == 0) {
615		oclass->base.oclass = NVIF_IOCTL_NEW_V0_PERFDOM;
616		oclass->base.minver = 0;
617		oclass->base.maxver = 0;
618		oclass->ctor = nvkm_perfmon_child_new;
619		return 0;
620	}
621	return -EINVAL;
622}
623
624static void *
625nvkm_perfmon_dtor(struct nvkm_object *object)
626{
627	struct nvkm_perfmon *perfmon = nvkm_perfmon(object);
628	struct nvkm_pm *pm = perfmon->pm;
629	mutex_lock(&pm->engine.subdev.mutex);
630	if (pm->perfmon == &perfmon->object)
631		pm->perfmon = NULL;
632	mutex_unlock(&pm->engine.subdev.mutex);
633	return perfmon;
634}
635
636static const struct nvkm_object_func
637nvkm_perfmon = {
638	.dtor = nvkm_perfmon_dtor,
639	.mthd = nvkm_perfmon_mthd,
640	.sclass = nvkm_perfmon_child_get,
641};
642
643static int
644nvkm_perfmon_new(struct nvkm_pm *pm, const struct nvkm_oclass *oclass,
645		 void *data, u32 size, struct nvkm_object **pobject)
646{
647	struct nvkm_perfmon *perfmon;
648
649	if (!(perfmon = kzalloc(sizeof(*perfmon), GFP_KERNEL)))
650		return -ENOMEM;
651	nvkm_object_ctor(&nvkm_perfmon, oclass, &perfmon->object);
652	perfmon->pm = pm;
653	*pobject = &perfmon->object;
654	return 0;
655}
656
657/*******************************************************************************
658 * PPM engine/subdev functions
659 ******************************************************************************/
660
661static int
662nvkm_pm_oclass_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
663		   void *data, u32 size, struct nvkm_object **pobject)
664{
665	struct nvkm_pm *pm = nvkm_pm(oclass->engine);
666	int ret;
667
668	ret = nvkm_perfmon_new(pm, oclass, data, size, pobject);
669	if (ret)
670		return ret;
671
672	mutex_lock(&pm->engine.subdev.mutex);
673	if (pm->perfmon == NULL)
674		pm->perfmon = *pobject;
675	ret = (pm->perfmon == *pobject) ? 0 : -EBUSY;
676	mutex_unlock(&pm->engine.subdev.mutex);
677	return ret;
678}
679
680static const struct nvkm_device_oclass
681nvkm_pm_oclass = {
682	.base.oclass = NVIF_IOCTL_NEW_V0_PERFMON,
683	.base.minver = -1,
684	.base.maxver = -1,
685	.ctor = nvkm_pm_oclass_new,
686};
687
688static int
689nvkm_pm_oclass_get(struct nvkm_oclass *oclass, int index,
690		   const struct nvkm_device_oclass **class)
691{
692	if (index == 0) {
693		oclass->base = nvkm_pm_oclass.base;
694		*class = &nvkm_pm_oclass;
695		return index;
696	}
697	return 1;
698}
699
700int
701nvkm_perfsrc_new(struct nvkm_pm *pm, struct nvkm_perfsig *sig,
702		 const struct nvkm_specsrc *spec)
703{
704	const struct nvkm_specsrc *ssrc;
705	const struct nvkm_specmux *smux;
706	struct nvkm_perfsrc *src;
707	u8 source_nr = 0;
708
709	if (!spec) {
710		/* No sources are defined for this signal. */
711		return 0;
712	}
713
714	ssrc = spec;
715	while (ssrc->name) {
716		smux = ssrc->mux;
717		while (smux->name) {
718			bool found = false;
719			u8 source_id = 0;
720			u32 len;
721
722			list_for_each_entry(src, &pm->sources, head) {
723				if (src->addr == ssrc->addr &&
724				    src->shift == smux->shift) {
725					found = true;
726					break;
727				}
728				source_id++;
729			}
730
731			if (!found) {
732				src = kzalloc(sizeof(*src), GFP_KERNEL);
733				if (!src)
734					return -ENOMEM;
735
736				src->addr   = ssrc->addr;
737				src->mask   = smux->mask;
738				src->shift  = smux->shift;
739				src->enable = smux->enable;
740
741				len = strlen(ssrc->name) +
742				      strlen(smux->name) + 2;
743				src->name = kzalloc(len, GFP_KERNEL);
744				if (!src->name) {
745					kfree(src);
746					return -ENOMEM;
747				}
748				snprintf(src->name, len, "%s_%s", ssrc->name,
749					 smux->name);
750
751				list_add_tail(&src->head, &pm->sources);
752			}
753
754			sig->source[source_nr++] = source_id + 1;
755			smux++;
756		}
757		ssrc++;
758	}
759
760	return 0;
761}
762
763int
764nvkm_perfdom_new(struct nvkm_pm *pm, const char *name, u32 mask,
765		 u32 base, u32 size_unit, u32 size_domain,
766		 const struct nvkm_specdom *spec)
767{
768	const struct nvkm_specdom *sdom;
769	const struct nvkm_specsig *ssig;
770	struct nvkm_perfdom *dom;
771	int ret, i;
772
773	for (i = 0; i == 0 || mask; i++) {
774		u32 addr = base + (i * size_unit);
775		if (i && !(mask & (1 << i)))
776			continue;
777
778		sdom = spec;
779		while (sdom->signal_nr) {
780			dom = kzalloc(sizeof(*dom) + sdom->signal_nr *
781				      sizeof(*dom->signal), GFP_KERNEL);
782			if (!dom)
783				return -ENOMEM;
784
785			if (mask) {
786				snprintf(dom->name, sizeof(dom->name),
787					 "%s/%02x/%02x", name, i,
788					 (int)(sdom - spec));
789			} else {
790				snprintf(dom->name, sizeof(dom->name),
791					 "%s/%02x", name, (int)(sdom - spec));
792			}
793
794			list_add_tail(&dom->head, &pm->domains);
795			INIT_LIST_HEAD(&dom->list);
796			dom->func = sdom->func;
797			dom->addr = addr;
798			dom->signal_nr = sdom->signal_nr;
799
800			ssig = (sdom++)->signal;
801			while (ssig->name) {
802				struct nvkm_perfsig *sig =
803					&dom->signal[ssig->signal];
804				sig->name = ssig->name;
805				ret = nvkm_perfsrc_new(pm, sig, ssig->source);
806				if (ret)
807					return ret;
808				ssig++;
809			}
810
811			addr += size_domain;
812		}
813
814		mask &= ~(1 << i);
815	}
816
817	return 0;
818}
819
820static int
821nvkm_pm_fini(struct nvkm_engine *engine, bool suspend)
822{
823	struct nvkm_pm *pm = nvkm_pm(engine);
824	if (pm->func->fini)
825		pm->func->fini(pm);
826	return 0;
827}
828
829static void *
830nvkm_pm_dtor(struct nvkm_engine *engine)
831{
832	struct nvkm_pm *pm = nvkm_pm(engine);
833	struct nvkm_perfdom *dom, *next_dom;
834	struct nvkm_perfsrc *src, *next_src;
835
836	list_for_each_entry_safe(dom, next_dom, &pm->domains, head) {
837		list_del(&dom->head);
838		kfree(dom);
839	}
840
841	list_for_each_entry_safe(src, next_src, &pm->sources, head) {
842		list_del(&src->head);
843		kfree(src->name);
844		kfree(src);
845	}
846
847	return pm;
848}
849
850static const struct nvkm_engine_func
851nvkm_pm = {
852	.dtor = nvkm_pm_dtor,
853	.fini = nvkm_pm_fini,
854	.base.sclass = nvkm_pm_oclass_get,
855};
856
857int
858nvkm_pm_ctor(const struct nvkm_pm_func *func, struct nvkm_device *device,
859	     int index, struct nvkm_pm *pm)
860{
861	pm->func = func;
862	INIT_LIST_HEAD(&pm->domains);
863	INIT_LIST_HEAD(&pm->sources);
864	return nvkm_engine_ctor(&nvkm_pm, device, index, 0, true, &pm->engine);
865}
866