1/*
2 * clkgen-mux.c: ST GEN-MUX Clock driver
3 *
4 * Copyright (C) 2014 STMicroelectronics (R&D) Limited
5 *
6 * Authors: Stephen Gallimore <stephen.gallimore@st.com>
7 *	    Pankaj Dev <pankaj.dev@st.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 */
15
16#include <linux/slab.h>
17#include <linux/of_address.h>
18#include <linux/clk.h>
19#include <linux/clk-provider.h>
20#include "clkgen.h"
21
22static DEFINE_SPINLOCK(clkgena_divmux_lock);
23static DEFINE_SPINLOCK(clkgenf_lock);
24
25static const char ** __init clkgen_mux_get_parents(struct device_node *np,
26						       int *num_parents)
27{
28	const char **parents;
29	int nparents;
30
31	nparents = of_clk_get_parent_count(np);
32	if (WARN_ON(nparents <= 0))
33		return ERR_PTR(-EINVAL);
34
35	parents = kcalloc(nparents, sizeof(const char *), GFP_KERNEL);
36	if (!parents)
37		return ERR_PTR(-ENOMEM);
38
39	*num_parents = of_clk_parent_fill(np, parents, nparents);
40	return parents;
41}
42
43/**
44 * DOC: Clock mux with a programmable divider on each of its three inputs.
45 *      The mux has an input setting which effectively gates its output.
46 *
47 * Traits of this clock:
48 * prepare - clk_(un)prepare only ensures parent is (un)prepared
49 * enable - clk_enable and clk_disable are functional & control gating
50 * rate - set rate is supported
51 * parent - set/get parent
52 */
53
54#define NUM_INPUTS 3
55
56struct clkgena_divmux {
57	struct clk_hw hw;
58	/* Subclassed mux and divider structures */
59	struct clk_mux mux;
60	struct clk_divider div[NUM_INPUTS];
61	/* Enable/running feedback register bits for each input */
62	void __iomem *feedback_reg[NUM_INPUTS];
63	int feedback_bit_idx;
64
65	u8              muxsel;
66};
67
68#define to_clkgena_divmux(_hw) container_of(_hw, struct clkgena_divmux, hw)
69
70struct clkgena_divmux_data {
71	int num_outputs;
72	int mux_offset;
73	int mux_offset2;
74	int mux_start_bit;
75	int div_offsets[NUM_INPUTS];
76	int fb_offsets[NUM_INPUTS];
77	int fb_start_bit_idx;
78};
79
80#define CKGAX_CLKOPSRC_SWITCH_OFF 0x3
81
82static int clkgena_divmux_is_running(struct clkgena_divmux *mux)
83{
84	u32 regval = readl(mux->feedback_reg[mux->muxsel]);
85	u32 running = regval & BIT(mux->feedback_bit_idx);
86	return !!running;
87}
88
89static int clkgena_divmux_enable(struct clk_hw *hw)
90{
91	struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
92	struct clk_hw *mux_hw = &genamux->mux.hw;
93	unsigned long timeout;
94	int ret = 0;
95
96	__clk_hw_set_clk(mux_hw, hw);
97
98	ret = clk_mux_ops.set_parent(mux_hw, genamux->muxsel);
99	if (ret)
100		return ret;
101
102	timeout = jiffies + msecs_to_jiffies(10);
103
104	while (!clkgena_divmux_is_running(genamux)) {
105		if (time_after(jiffies, timeout))
106			return -ETIMEDOUT;
107		cpu_relax();
108	}
109
110	return 0;
111}
112
113static void clkgena_divmux_disable(struct clk_hw *hw)
114{
115	struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
116	struct clk_hw *mux_hw = &genamux->mux.hw;
117
118	__clk_hw_set_clk(mux_hw, hw);
119
120	clk_mux_ops.set_parent(mux_hw, CKGAX_CLKOPSRC_SWITCH_OFF);
121}
122
123static int clkgena_divmux_is_enabled(struct clk_hw *hw)
124{
125	struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
126	struct clk_hw *mux_hw = &genamux->mux.hw;
127
128	__clk_hw_set_clk(mux_hw, hw);
129
130	return (s8)clk_mux_ops.get_parent(mux_hw) > 0;
131}
132
133static u8 clkgena_divmux_get_parent(struct clk_hw *hw)
134{
135	struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
136	struct clk_hw *mux_hw = &genamux->mux.hw;
137
138	__clk_hw_set_clk(mux_hw, hw);
139
140	genamux->muxsel = clk_mux_ops.get_parent(mux_hw);
141	if ((s8)genamux->muxsel < 0) {
142		pr_debug("%s: %s: Invalid parent, setting to default.\n",
143		      __func__, clk_hw_get_name(hw));
144		genamux->muxsel = 0;
145	}
146
147	return genamux->muxsel;
148}
149
150static int clkgena_divmux_set_parent(struct clk_hw *hw, u8 index)
151{
152	struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
153
154	if (index >= CKGAX_CLKOPSRC_SWITCH_OFF)
155		return -EINVAL;
156
157	genamux->muxsel = index;
158
159	/*
160	 * If the mux is already enabled, call enable directly to set the
161	 * new mux position and wait for it to start running again. Otherwise
162	 * do nothing.
163	 */
164	if (clkgena_divmux_is_enabled(hw))
165		clkgena_divmux_enable(hw);
166
167	return 0;
168}
169
170static unsigned long clkgena_divmux_recalc_rate(struct clk_hw *hw,
171		unsigned long parent_rate)
172{
173	struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
174	struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw;
175
176	__clk_hw_set_clk(div_hw, hw);
177
178	return clk_divider_ops.recalc_rate(div_hw, parent_rate);
179}
180
181static int clkgena_divmux_set_rate(struct clk_hw *hw, unsigned long rate,
182				unsigned long parent_rate)
183{
184	struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
185	struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw;
186
187	__clk_hw_set_clk(div_hw, hw);
188
189	return clk_divider_ops.set_rate(div_hw, rate, parent_rate);
190}
191
192static long clkgena_divmux_round_rate(struct clk_hw *hw, unsigned long rate,
193				   unsigned long *prate)
194{
195	struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
196	struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw;
197
198	__clk_hw_set_clk(div_hw, hw);
199
200	return clk_divider_ops.round_rate(div_hw, rate, prate);
201}
202
203static const struct clk_ops clkgena_divmux_ops = {
204	.enable = clkgena_divmux_enable,
205	.disable = clkgena_divmux_disable,
206	.is_enabled = clkgena_divmux_is_enabled,
207	.get_parent = clkgena_divmux_get_parent,
208	.set_parent = clkgena_divmux_set_parent,
209	.round_rate = clkgena_divmux_round_rate,
210	.recalc_rate = clkgena_divmux_recalc_rate,
211	.set_rate = clkgena_divmux_set_rate,
212};
213
214/**
215 * clk_register_genamux - register a genamux clock with the clock framework
216 */
217static struct clk * __init clk_register_genamux(const char *name,
218				const char **parent_names, u8 num_parents,
219				void __iomem *reg,
220				const struct clkgena_divmux_data *muxdata,
221				u32 idx)
222{
223	/*
224	 * Fixed constants across all ClockgenA variants
225	 */
226	const int mux_width = 2;
227	const int divider_width = 5;
228	struct clkgena_divmux *genamux;
229	struct clk *clk;
230	struct clk_init_data init;
231	int i;
232
233	genamux = kzalloc(sizeof(*genamux), GFP_KERNEL);
234	if (!genamux)
235		return ERR_PTR(-ENOMEM);
236
237	init.name = name;
238	init.ops = &clkgena_divmux_ops;
239	init.flags = CLK_IS_BASIC | CLK_GET_RATE_NOCACHE;
240	init.parent_names = parent_names;
241	init.num_parents = num_parents;
242
243	genamux->mux.lock  = &clkgena_divmux_lock;
244	genamux->mux.mask = BIT(mux_width) - 1;
245	genamux->mux.shift = muxdata->mux_start_bit + (idx * mux_width);
246	if (genamux->mux.shift > 31) {
247		/*
248		 * We have spilled into the second mux register so
249		 * adjust the register address and the bit shift accordingly
250		 */
251		genamux->mux.reg = reg + muxdata->mux_offset2;
252		genamux->mux.shift -= 32;
253	} else {
254		genamux->mux.reg   = reg + muxdata->mux_offset;
255	}
256
257	for (i = 0; i < NUM_INPUTS; i++) {
258		/*
259		 * Divider config for each input
260		 */
261		void __iomem *divbase = reg + muxdata->div_offsets[i];
262		genamux->div[i].width = divider_width;
263		genamux->div[i].reg = divbase + (idx * sizeof(u32));
264
265		/*
266		 * Mux enabled/running feedback register for each input.
267		 */
268		genamux->feedback_reg[i] = reg + muxdata->fb_offsets[i];
269	}
270
271	genamux->feedback_bit_idx = muxdata->fb_start_bit_idx + idx;
272	genamux->hw.init = &init;
273
274	clk = clk_register(NULL, &genamux->hw);
275	if (IS_ERR(clk)) {
276		kfree(genamux);
277		goto err;
278	}
279
280	pr_debug("%s: parent %s rate %lu\n",
281			__clk_get_name(clk),
282			__clk_get_name(clk_get_parent(clk)),
283			clk_get_rate(clk));
284err:
285	return clk;
286}
287
288static struct clkgena_divmux_data st_divmux_c65hs = {
289	.num_outputs = 4,
290	.mux_offset = 0x14,
291	.mux_start_bit = 0,
292	.div_offsets = { 0x800, 0x900, 0xb00 },
293	.fb_offsets = { 0x18, 0x1c, 0x20 },
294	.fb_start_bit_idx = 0,
295};
296
297static struct clkgena_divmux_data st_divmux_c65ls = {
298	.num_outputs = 14,
299	.mux_offset = 0x14,
300	.mux_offset2 = 0x24,
301	.mux_start_bit = 8,
302	.div_offsets = { 0x810, 0xa10, 0xb10 },
303	.fb_offsets = { 0x18, 0x1c, 0x20 },
304	.fb_start_bit_idx = 4,
305};
306
307static struct clkgena_divmux_data st_divmux_c32odf0 = {
308	.num_outputs = 8,
309	.mux_offset = 0x1c,
310	.mux_start_bit = 0,
311	.div_offsets = { 0x800, 0x900, 0xa60 },
312	.fb_offsets = { 0x2c, 0x24, 0x28 },
313	.fb_start_bit_idx = 0,
314};
315
316static struct clkgena_divmux_data st_divmux_c32odf1 = {
317	.num_outputs = 8,
318	.mux_offset = 0x1c,
319	.mux_start_bit = 16,
320	.div_offsets = { 0x820, 0x980, 0xa80 },
321	.fb_offsets = { 0x2c, 0x24, 0x28 },
322	.fb_start_bit_idx = 8,
323};
324
325static struct clkgena_divmux_data st_divmux_c32odf2 = {
326	.num_outputs = 8,
327	.mux_offset = 0x20,
328	.mux_start_bit = 0,
329	.div_offsets = { 0x840, 0xa20, 0xb10 },
330	.fb_offsets = { 0x2c, 0x24, 0x28 },
331	.fb_start_bit_idx = 16,
332};
333
334static struct clkgena_divmux_data st_divmux_c32odf3 = {
335	.num_outputs = 8,
336	.mux_offset = 0x20,
337	.mux_start_bit = 16,
338	.div_offsets = { 0x860, 0xa40, 0xb30 },
339	.fb_offsets = { 0x2c, 0x24, 0x28 },
340	.fb_start_bit_idx = 24,
341};
342
343static const struct of_device_id clkgena_divmux_of_match[] = {
344	{
345		.compatible = "st,clkgena-divmux-c65-hs",
346		.data = &st_divmux_c65hs,
347	},
348	{
349		.compatible = "st,clkgena-divmux-c65-ls",
350		.data = &st_divmux_c65ls,
351	},
352	{
353		.compatible = "st,clkgena-divmux-c32-odf0",
354		.data = &st_divmux_c32odf0,
355	},
356	{
357		.compatible = "st,clkgena-divmux-c32-odf1",
358		.data = &st_divmux_c32odf1,
359	},
360	{
361		.compatible = "st,clkgena-divmux-c32-odf2",
362		.data = &st_divmux_c32odf2,
363	},
364	{
365		.compatible = "st,clkgena-divmux-c32-odf3",
366		.data = &st_divmux_c32odf3,
367	},
368	{}
369};
370
371static void __iomem * __init clkgen_get_register_base(struct device_node *np)
372{
373	struct device_node *pnode;
374	void __iomem *reg;
375
376	pnode = of_get_parent(np);
377	if (!pnode)
378		return NULL;
379
380	reg = of_iomap(pnode, 0);
381
382	of_node_put(pnode);
383	return reg;
384}
385
386static void __init st_of_clkgena_divmux_setup(struct device_node *np)
387{
388	const struct of_device_id *match;
389	const struct clkgena_divmux_data *data;
390	struct clk_onecell_data *clk_data;
391	void __iomem *reg;
392	const char **parents;
393	int num_parents = 0, i;
394
395	match = of_match_node(clkgena_divmux_of_match, np);
396	if (WARN_ON(!match))
397		return;
398
399	data = match->data;
400
401	reg = clkgen_get_register_base(np);
402	if (!reg)
403		return;
404
405	parents = clkgen_mux_get_parents(np, &num_parents);
406	if (IS_ERR(parents))
407		goto err_parents;
408
409	clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
410	if (!clk_data)
411		goto err_alloc;
412
413	clk_data->clk_num = data->num_outputs;
414	clk_data->clks = kcalloc(clk_data->clk_num, sizeof(struct clk *),
415				 GFP_KERNEL);
416
417	if (!clk_data->clks)
418		goto err_alloc_clks;
419
420	for (i = 0; i < clk_data->clk_num; i++) {
421		struct clk *clk;
422		const char *clk_name;
423
424		if (of_property_read_string_index(np, "clock-output-names",
425						  i, &clk_name))
426			break;
427
428		/*
429		 * If we read an empty clock name then the output is unused
430		 */
431		if (*clk_name == '\0')
432			continue;
433
434		clk = clk_register_genamux(clk_name, parents, num_parents,
435					   reg, data, i);
436
437		if (IS_ERR(clk))
438			goto err;
439
440		clk_data->clks[i] = clk;
441	}
442
443	kfree(parents);
444
445	of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
446	return;
447err:
448	kfree(clk_data->clks);
449err_alloc_clks:
450	kfree(clk_data);
451err_alloc:
452	kfree(parents);
453err_parents:
454	iounmap(reg);
455}
456CLK_OF_DECLARE(clkgenadivmux, "st,clkgena-divmux", st_of_clkgena_divmux_setup);
457
458struct clkgena_prediv_data {
459	u32 offset;
460	u8 shift;
461	struct clk_div_table *table;
462};
463
464static struct clk_div_table prediv_table16[] = {
465	{ .val = 0, .div = 1 },
466	{ .val = 1, .div = 16 },
467	{ .div = 0 },
468};
469
470static struct clkgena_prediv_data prediv_c65_data = {
471	.offset = 0x4c,
472	.shift = 31,
473	.table = prediv_table16,
474};
475
476static struct clkgena_prediv_data prediv_c32_data = {
477	.offset = 0x50,
478	.shift = 1,
479	.table = prediv_table16,
480};
481
482static const struct of_device_id clkgena_prediv_of_match[] = {
483	{ .compatible = "st,clkgena-prediv-c65", .data = &prediv_c65_data },
484	{ .compatible = "st,clkgena-prediv-c32", .data = &prediv_c32_data },
485	{}
486};
487
488static void __init st_of_clkgena_prediv_setup(struct device_node *np)
489{
490	const struct of_device_id *match;
491	void __iomem *reg;
492	const char *parent_name, *clk_name;
493	struct clk *clk;
494	const struct clkgena_prediv_data *data;
495
496	match = of_match_node(clkgena_prediv_of_match, np);
497	if (!match) {
498		pr_err("%s: No matching data\n", __func__);
499		return;
500	}
501
502	data = match->data;
503
504	reg = clkgen_get_register_base(np);
505	if (!reg)
506		return;
507
508	parent_name = of_clk_get_parent_name(np, 0);
509	if (!parent_name)
510		goto err;
511
512	if (of_property_read_string_index(np, "clock-output-names",
513					  0, &clk_name))
514		goto err;
515
516	clk = clk_register_divider_table(NULL, clk_name, parent_name,
517					 CLK_GET_RATE_NOCACHE,
518					 reg + data->offset, data->shift, 1,
519					 0, data->table, NULL);
520	if (IS_ERR(clk))
521		goto err;
522
523	of_clk_add_provider(np, of_clk_src_simple_get, clk);
524	pr_debug("%s: parent %s rate %u\n",
525		__clk_get_name(clk),
526		__clk_get_name(clk_get_parent(clk)),
527		(unsigned int)clk_get_rate(clk));
528
529	return;
530err:
531	iounmap(reg);
532}
533CLK_OF_DECLARE(clkgenaprediv, "st,clkgena-prediv", st_of_clkgena_prediv_setup);
534
535struct clkgen_mux_data {
536	u32 offset;
537	u8 shift;
538	u8 width;
539	spinlock_t *lock;
540	unsigned long clk_flags;
541	u8 mux_flags;
542};
543
544static struct clkgen_mux_data clkgen_mux_c_vcc_hd_416 = {
545	.offset = 0,
546	.shift = 0,
547	.width = 1,
548};
549
550static struct clkgen_mux_data clkgen_mux_f_vcc_fvdp_416 = {
551	.offset = 0,
552	.shift = 0,
553	.width = 1,
554};
555
556static struct clkgen_mux_data clkgen_mux_f_vcc_hva_416 = {
557	.offset = 0,
558	.shift = 0,
559	.width = 1,
560};
561
562static struct clkgen_mux_data clkgen_mux_f_vcc_hd_416 = {
563	.offset = 0,
564	.shift = 16,
565	.width = 1,
566	.lock = &clkgenf_lock,
567};
568
569static struct clkgen_mux_data clkgen_mux_c_vcc_sd_416 = {
570	.offset = 0,
571	.shift = 17,
572	.width = 1,
573	.lock = &clkgenf_lock,
574};
575
576static struct clkgen_mux_data stih415_a9_mux_data = {
577	.offset = 0,
578	.shift = 1,
579	.width = 2,
580	.lock = &clkgen_a9_lock,
581};
582static struct clkgen_mux_data stih416_a9_mux_data = {
583	.offset = 0,
584	.shift = 0,
585	.width = 2,
586};
587static struct clkgen_mux_data stih407_a9_mux_data = {
588	.offset = 0x1a4,
589	.shift = 0,
590	.width = 2,
591	.lock = &clkgen_a9_lock,
592};
593
594static const struct of_device_id mux_of_match[] = {
595	{
596		.compatible = "st,stih416-clkgenc-vcc-hd",
597		.data = &clkgen_mux_c_vcc_hd_416,
598	},
599	{
600		.compatible = "st,stih416-clkgenf-vcc-fvdp",
601		.data = &clkgen_mux_f_vcc_fvdp_416,
602	},
603	{
604		.compatible = "st,stih416-clkgenf-vcc-hva",
605		.data = &clkgen_mux_f_vcc_hva_416,
606	},
607	{
608		.compatible = "st,stih416-clkgenf-vcc-hd",
609		.data = &clkgen_mux_f_vcc_hd_416,
610	},
611	{
612		.compatible = "st,stih416-clkgenf-vcc-sd",
613		.data = &clkgen_mux_c_vcc_sd_416,
614	},
615	{
616		.compatible = "st,stih415-clkgen-a9-mux",
617		.data = &stih415_a9_mux_data,
618	},
619	{
620		.compatible = "st,stih416-clkgen-a9-mux",
621		.data = &stih416_a9_mux_data,
622	},
623	{
624		.compatible = "st,stih407-clkgen-a9-mux",
625		.data = &stih407_a9_mux_data,
626	},
627	{}
628};
629
630static void __init st_of_clkgen_mux_setup(struct device_node *np)
631{
632	const struct of_device_id *match;
633	struct clk *clk;
634	void __iomem *reg;
635	const char **parents;
636	int num_parents;
637	const struct clkgen_mux_data *data;
638
639	match = of_match_node(mux_of_match, np);
640	if (!match) {
641		pr_err("%s: No matching data\n", __func__);
642		return;
643	}
644
645	data = match->data;
646
647	reg = of_iomap(np, 0);
648	if (!reg) {
649		pr_err("%s: Failed to get base address\n", __func__);
650		return;
651	}
652
653	parents = clkgen_mux_get_parents(np, &num_parents);
654	if (IS_ERR(parents)) {
655		pr_err("%s: Failed to get parents (%ld)\n",
656				__func__, PTR_ERR(parents));
657		goto err_parents;
658	}
659
660	clk = clk_register_mux(NULL, np->name, parents, num_parents,
661				data->clk_flags | CLK_SET_RATE_PARENT,
662				reg + data->offset,
663				data->shift, data->width, data->mux_flags,
664				data->lock);
665	if (IS_ERR(clk))
666		goto err;
667
668	pr_debug("%s: parent %s rate %u\n",
669			__clk_get_name(clk),
670			__clk_get_name(clk_get_parent(clk)),
671			(unsigned int)clk_get_rate(clk));
672
673	kfree(parents);
674	of_clk_add_provider(np, of_clk_src_simple_get, clk);
675	return;
676
677err:
678	kfree(parents);
679err_parents:
680	iounmap(reg);
681}
682CLK_OF_DECLARE(clkgen_mux, "st,clkgen-mux", st_of_clkgen_mux_setup);
683
684#define VCC_MAX_CHANNELS 16
685
686#define VCC_GATE_OFFSET 0x0
687#define VCC_MUX_OFFSET 0x4
688#define VCC_DIV_OFFSET 0x8
689
690struct clkgen_vcc_data {
691	spinlock_t *lock;
692	unsigned long clk_flags;
693};
694
695static struct clkgen_vcc_data st_clkgenc_vcc_416 = {
696	.clk_flags = CLK_SET_RATE_PARENT,
697};
698
699static struct clkgen_vcc_data st_clkgenf_vcc_416 = {
700	.lock = &clkgenf_lock,
701};
702
703static const struct of_device_id vcc_of_match[] = {
704	{ .compatible = "st,stih416-clkgenc", .data = &st_clkgenc_vcc_416 },
705	{ .compatible = "st,stih416-clkgenf", .data = &st_clkgenf_vcc_416 },
706	{}
707};
708
709static void __init st_of_clkgen_vcc_setup(struct device_node *np)
710{
711	const struct of_device_id *match;
712	void __iomem *reg;
713	const char **parents;
714	int num_parents, i;
715	struct clk_onecell_data *clk_data;
716	const struct clkgen_vcc_data *data;
717
718	match = of_match_node(vcc_of_match, np);
719	if (WARN_ON(!match))
720		return;
721	data = match->data;
722
723	reg = of_iomap(np, 0);
724	if (!reg)
725		return;
726
727	parents = clkgen_mux_get_parents(np, &num_parents);
728	if (IS_ERR(parents))
729		goto err_parents;
730
731	clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
732	if (!clk_data)
733		goto err_alloc;
734
735	clk_data->clk_num = VCC_MAX_CHANNELS;
736	clk_data->clks = kcalloc(clk_data->clk_num, sizeof(struct clk *),
737				 GFP_KERNEL);
738
739	if (!clk_data->clks)
740		goto err_alloc_clks;
741
742	for (i = 0; i < clk_data->clk_num; i++) {
743		struct clk *clk;
744		const char *clk_name;
745		struct clk_gate *gate;
746		struct clk_divider *div;
747		struct clk_mux *mux;
748
749		if (of_property_read_string_index(np, "clock-output-names",
750						  i, &clk_name))
751			break;
752
753		/*
754		 * If we read an empty clock name then the output is unused
755		 */
756		if (*clk_name == '\0')
757			continue;
758
759		gate = kzalloc(sizeof(*gate), GFP_KERNEL);
760		if (!gate)
761			goto err;
762
763		div = kzalloc(sizeof(*div), GFP_KERNEL);
764		if (!div) {
765			kfree(gate);
766			goto err;
767		}
768
769		mux = kzalloc(sizeof(*mux), GFP_KERNEL);
770		if (!mux) {
771			kfree(gate);
772			kfree(div);
773			goto err;
774		}
775
776		gate->reg = reg + VCC_GATE_OFFSET;
777		gate->bit_idx = i;
778		gate->flags = CLK_GATE_SET_TO_DISABLE;
779		gate->lock = data->lock;
780
781		div->reg = reg + VCC_DIV_OFFSET;
782		div->shift = 2 * i;
783		div->width = 2;
784		div->flags = CLK_DIVIDER_POWER_OF_TWO |
785			CLK_DIVIDER_ROUND_CLOSEST;
786
787		mux->reg = reg + VCC_MUX_OFFSET;
788		mux->shift = 2 * i;
789		mux->mask = 0x3;
790
791		clk = clk_register_composite(NULL, clk_name, parents,
792					     num_parents,
793					     &mux->hw, &clk_mux_ops,
794					     &div->hw, &clk_divider_ops,
795					     &gate->hw, &clk_gate_ops,
796					     data->clk_flags |
797					     CLK_GET_RATE_NOCACHE);
798		if (IS_ERR(clk)) {
799			kfree(gate);
800			kfree(div);
801			kfree(mux);
802			goto err;
803		}
804
805		pr_debug("%s: parent %s rate %u\n",
806			__clk_get_name(clk),
807			__clk_get_name(clk_get_parent(clk)),
808			(unsigned int)clk_get_rate(clk));
809
810		clk_data->clks[i] = clk;
811	}
812
813	kfree(parents);
814
815	of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
816	return;
817
818err:
819	for (i = 0; i < clk_data->clk_num; i++) {
820		struct clk_composite *composite;
821
822		if (!clk_data->clks[i])
823			continue;
824
825		composite = container_of(__clk_get_hw(clk_data->clks[i]),
826					 struct clk_composite, hw);
827		kfree(container_of(composite->gate_hw, struct clk_gate, hw));
828		kfree(container_of(composite->rate_hw, struct clk_divider, hw));
829		kfree(container_of(composite->mux_hw, struct clk_mux, hw));
830	}
831
832	kfree(clk_data->clks);
833err_alloc_clks:
834	kfree(clk_data);
835err_alloc:
836	kfree(parents);
837err_parents:
838	iounmap(reg);
839}
840CLK_OF_DECLARE(clkgen_vcc, "st,clkgen-vcc", st_of_clkgen_vcc_setup);
841