1/*
2 * Marvell EBU SoC common clock handling
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Gregory CLEMENT <gregory.clement@free-electrons.com>
7 * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
8 * Andrew Lunn <andrew@lunn.ch>
9 *
10 * This file is licensed under the terms of the GNU General Public
11 * License version 2.  This program is licensed "as is" without any
12 * warranty of any kind, whether express or implied.
13 */
14
15#include <linux/kernel.h>
16#include <linux/clk.h>
17#include <linux/clkdev.h>
18#include <linux/clk-provider.h>
19#include <linux/io.h>
20#include <linux/of.h>
21#include <linux/of_address.h>
22#include <linux/syscore_ops.h>
23
24#include "common.h"
25
26/*
27 * Core Clocks
28 */
29
30#define SSCG_CONF_MODE(reg)	(((reg) >> 16) & 0x3)
31#define SSCG_SPREAD_DOWN	0x0
32#define SSCG_SPREAD_UP		0x1
33#define SSCG_SPREAD_CENTRAL	0x2
34#define SSCG_CONF_LOW(reg)	(((reg) >> 8) & 0xFF)
35#define SSCG_CONF_HIGH(reg)	((reg) & 0xFF)
36
37static struct clk_onecell_data clk_data;
38
39/*
40 * This function can be used by the Kirkwood, the Armada 370, the
41 * Armada XP and the Armada 375 SoC. The name of the function was
42 * chosen following the dt convention: using the first known SoC
43 * compatible with it.
44 */
45u32 kirkwood_fix_sscg_deviation(u32 system_clk)
46{
47	struct device_node *sscg_np = NULL;
48	void __iomem *sscg_map;
49	u32 sscg_reg;
50	s32 low_bound, high_bound;
51	u64 freq_swing_half;
52
53	sscg_np = of_find_node_by_name(NULL, "sscg");
54	if (sscg_np == NULL) {
55		pr_err("cannot get SSCG register node\n");
56		return system_clk;
57	}
58
59	sscg_map = of_iomap(sscg_np, 0);
60	if (sscg_map == NULL) {
61		pr_err("cannot map SSCG register\n");
62		goto out;
63	}
64
65	sscg_reg = readl(sscg_map);
66	high_bound = SSCG_CONF_HIGH(sscg_reg);
67	low_bound = SSCG_CONF_LOW(sscg_reg);
68
69	if ((high_bound - low_bound) <= 0)
70		goto out;
71	/*
72	 * From Marvell engineer we got the following formula (when
73	 * this code was written, the datasheet was erroneous)
74	 * Spread percentage = 1/96 * (H - L) / H
75	 * H = SSCG_High_Boundary
76	 * L = SSCG_Low_Boundary
77	 *
78	 * As the deviation is half of spread then it lead to the
79	 * following formula in the code.
80	 *
81	 * To avoid an overflow and not lose any significant digit in
82	 * the same time we have to use a 64 bit integer.
83	 */
84
85	freq_swing_half = (((u64)high_bound - (u64)low_bound)
86			* (u64)system_clk);
87	do_div(freq_swing_half, (2 * 96 * high_bound));
88
89	switch (SSCG_CONF_MODE(sscg_reg)) {
90	case SSCG_SPREAD_DOWN:
91		system_clk -= freq_swing_half;
92		break;
93	case SSCG_SPREAD_UP:
94		system_clk += freq_swing_half;
95		break;
96	case SSCG_SPREAD_CENTRAL:
97	default:
98		break;
99	}
100
101	iounmap(sscg_map);
102
103out:
104	of_node_put(sscg_np);
105
106	return system_clk;
107}
108
109void __init mvebu_coreclk_setup(struct device_node *np,
110				const struct coreclk_soc_desc *desc)
111{
112	const char *tclk_name = "tclk";
113	const char *cpuclk_name = "cpuclk";
114	void __iomem *base;
115	unsigned long rate;
116	int n;
117
118	base = of_iomap(np, 0);
119	if (WARN_ON(!base))
120		return;
121
122	/* Allocate struct for TCLK, cpu clk, and core ratio clocks */
123	clk_data.clk_num = 2 + desc->num_ratios;
124
125	/* One more clock for the optional refclk */
126	if (desc->get_refclk_freq)
127		clk_data.clk_num += 1;
128
129	clk_data.clks = kzalloc(clk_data.clk_num * sizeof(struct clk *),
130				GFP_KERNEL);
131	if (WARN_ON(!clk_data.clks)) {
132		iounmap(base);
133		return;
134	}
135
136	/* Register TCLK */
137	of_property_read_string_index(np, "clock-output-names", 0,
138				      &tclk_name);
139	rate = desc->get_tclk_freq(base);
140	clk_data.clks[0] = clk_register_fixed_rate(NULL, tclk_name, NULL,
141						   CLK_IS_ROOT, rate);
142	WARN_ON(IS_ERR(clk_data.clks[0]));
143
144	/* Register CPU clock */
145	of_property_read_string_index(np, "clock-output-names", 1,
146				      &cpuclk_name);
147	rate = desc->get_cpu_freq(base);
148
149	if (desc->is_sscg_enabled && desc->fix_sscg_deviation
150		&& desc->is_sscg_enabled(base))
151		rate = desc->fix_sscg_deviation(rate);
152
153	clk_data.clks[1] = clk_register_fixed_rate(NULL, cpuclk_name, NULL,
154						   CLK_IS_ROOT, rate);
155	WARN_ON(IS_ERR(clk_data.clks[1]));
156
157	/* Register fixed-factor clocks derived from CPU clock */
158	for (n = 0; n < desc->num_ratios; n++) {
159		const char *rclk_name = desc->ratios[n].name;
160		int mult, div;
161
162		of_property_read_string_index(np, "clock-output-names",
163					      2+n, &rclk_name);
164		desc->get_clk_ratio(base, desc->ratios[n].id, &mult, &div);
165		clk_data.clks[2+n] = clk_register_fixed_factor(NULL, rclk_name,
166				       cpuclk_name, 0, mult, div);
167		WARN_ON(IS_ERR(clk_data.clks[2+n]));
168	};
169
170	/* Register optional refclk */
171	if (desc->get_refclk_freq) {
172		const char *name = "refclk";
173		of_property_read_string_index(np, "clock-output-names",
174					      2 + desc->num_ratios, &name);
175		rate = desc->get_refclk_freq(base);
176		clk_data.clks[2 + desc->num_ratios] =
177			clk_register_fixed_rate(NULL, name, NULL,
178						CLK_IS_ROOT, rate);
179		WARN_ON(IS_ERR(clk_data.clks[2 + desc->num_ratios]));
180	}
181
182	/* SAR register isn't needed anymore */
183	iounmap(base);
184
185	of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
186}
187
188/*
189 * Clock Gating Control
190 */
191
192DEFINE_SPINLOCK(ctrl_gating_lock);
193
194struct clk_gating_ctrl {
195	spinlock_t *lock;
196	struct clk **gates;
197	int num_gates;
198	void __iomem *base;
199	u32 saved_reg;
200};
201
202#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
203
204static struct clk_gating_ctrl *ctrl;
205
206static struct clk *clk_gating_get_src(
207	struct of_phandle_args *clkspec, void *data)
208{
209	int n;
210
211	if (clkspec->args_count < 1)
212		return ERR_PTR(-EINVAL);
213
214	for (n = 0; n < ctrl->num_gates; n++) {
215		struct clk_gate *gate =
216			to_clk_gate(__clk_get_hw(ctrl->gates[n]));
217		if (clkspec->args[0] == gate->bit_idx)
218			return ctrl->gates[n];
219	}
220	return ERR_PTR(-ENODEV);
221}
222
223static int mvebu_clk_gating_suspend(void)
224{
225	ctrl->saved_reg = readl(ctrl->base);
226	return 0;
227}
228
229static void mvebu_clk_gating_resume(void)
230{
231	writel(ctrl->saved_reg, ctrl->base);
232}
233
234static struct syscore_ops clk_gate_syscore_ops = {
235	.suspend = mvebu_clk_gating_suspend,
236	.resume = mvebu_clk_gating_resume,
237};
238
239void __init mvebu_clk_gating_setup(struct device_node *np,
240				   const struct clk_gating_soc_desc *desc)
241{
242	struct clk *clk;
243	void __iomem *base;
244	const char *default_parent = NULL;
245	int n;
246
247	if (ctrl) {
248		pr_err("mvebu-clk-gating: cannot instantiate more than one gatable clock device\n");
249		return;
250	}
251
252	base = of_iomap(np, 0);
253	if (WARN_ON(!base))
254		return;
255
256	clk = of_clk_get(np, 0);
257	if (!IS_ERR(clk)) {
258		default_parent = __clk_get_name(clk);
259		clk_put(clk);
260	}
261
262	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
263	if (WARN_ON(!ctrl))
264		goto ctrl_out;
265
266	/* lock must already be initialized */
267	ctrl->lock = &ctrl_gating_lock;
268
269	ctrl->base = base;
270
271	/* Count, allocate, and register clock gates */
272	for (n = 0; desc[n].name;)
273		n++;
274
275	ctrl->num_gates = n;
276	ctrl->gates = kzalloc(ctrl->num_gates * sizeof(struct clk *),
277			      GFP_KERNEL);
278	if (WARN_ON(!ctrl->gates))
279		goto gates_out;
280
281	for (n = 0; n < ctrl->num_gates; n++) {
282		const char *parent =
283			(desc[n].parent) ? desc[n].parent : default_parent;
284		ctrl->gates[n] = clk_register_gate(NULL, desc[n].name, parent,
285					desc[n].flags, base, desc[n].bit_idx,
286					0, ctrl->lock);
287		WARN_ON(IS_ERR(ctrl->gates[n]));
288	}
289
290	of_clk_add_provider(np, clk_gating_get_src, ctrl);
291
292	register_syscore_ops(&clk_gate_syscore_ops);
293
294	return;
295gates_out:
296	kfree(ctrl);
297ctrl_out:
298	iounmap(base);
299}
300