1/*
2 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
3 *		http://www.samsung.com
4 *
5 * Amit Daniel Kachhap <amit.daniel@samsung.com>
6 *
7 * EXYNOS5440 - CPU frequency scaling support
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12*/
13
14#include <linux/clk.h>
15#include <linux/cpu.h>
16#include <linux/cpufreq.h>
17#include <linux/err.h>
18#include <linux/interrupt.h>
19#include <linux/io.h>
20#include <linux/module.h>
21#include <linux/of_address.h>
22#include <linux/of_irq.h>
23#include <linux/pm_opp.h>
24#include <linux/platform_device.h>
25#include <linux/slab.h>
26
27/* Register definitions */
28#define XMU_DVFS_CTRL		0x0060
29#define XMU_PMU_P0_7		0x0064
30#define XMU_C0_3_PSTATE		0x0090
31#define XMU_P_LIMIT		0x00a0
32#define XMU_P_STATUS		0x00a4
33#define XMU_PMUEVTEN		0x00d0
34#define XMU_PMUIRQEN		0x00d4
35#define XMU_PMUIRQ		0x00d8
36
37/* PMU mask and shift definations */
38#define P_VALUE_MASK		0x7
39
40#define XMU_DVFS_CTRL_EN_SHIFT	0
41
42#define P0_7_CPUCLKDEV_SHIFT	21
43#define P0_7_CPUCLKDEV_MASK	0x7
44#define P0_7_ATBCLKDEV_SHIFT	18
45#define P0_7_ATBCLKDEV_MASK	0x7
46#define P0_7_CSCLKDEV_SHIFT	15
47#define P0_7_CSCLKDEV_MASK	0x7
48#define P0_7_CPUEMA_SHIFT	28
49#define P0_7_CPUEMA_MASK	0xf
50#define P0_7_L2EMA_SHIFT	24
51#define P0_7_L2EMA_MASK		0xf
52#define P0_7_VDD_SHIFT		8
53#define P0_7_VDD_MASK		0x7f
54#define P0_7_FREQ_SHIFT		0
55#define P0_7_FREQ_MASK		0xff
56
57#define C0_3_PSTATE_VALID_SHIFT	8
58#define C0_3_PSTATE_CURR_SHIFT	4
59#define C0_3_PSTATE_NEW_SHIFT	0
60
61#define PSTATE_CHANGED_EVTEN_SHIFT	0
62
63#define PSTATE_CHANGED_IRQEN_SHIFT	0
64
65#define PSTATE_CHANGED_SHIFT		0
66
67/* some constant values for clock divider calculation */
68#define CPU_DIV_FREQ_MAX	500
69#define CPU_DBG_FREQ_MAX	375
70#define CPU_ATB_FREQ_MAX	500
71
72#define PMIC_LOW_VOLT		0x30
73#define PMIC_HIGH_VOLT		0x28
74
75#define CPUEMA_HIGH		0x2
76#define CPUEMA_MID		0x4
77#define CPUEMA_LOW		0x7
78
79#define L2EMA_HIGH		0x1
80#define L2EMA_MID		0x3
81#define L2EMA_LOW		0x4
82
83#define DIV_TAB_MAX	2
84/* frequency unit is 20MHZ */
85#define FREQ_UNIT	20
86#define MAX_VOLTAGE	1550000 /* In microvolt */
87#define VOLTAGE_STEP	12500	/* In microvolt */
88
89#define CPUFREQ_NAME		"exynos5440_dvfs"
90#define DEF_TRANS_LATENCY	100000
91
92enum cpufreq_level_index {
93	L0, L1, L2, L3, L4,
94	L5, L6, L7, L8, L9,
95};
96#define CPUFREQ_LEVEL_END	(L7 + 1)
97
98struct exynos_dvfs_data {
99	void __iomem *base;
100	struct resource *mem;
101	int irq;
102	struct clk *cpu_clk;
103	unsigned int latency;
104	struct cpufreq_frequency_table *freq_table;
105	unsigned int freq_count;
106	struct device *dev;
107	bool dvfs_enabled;
108	struct work_struct irq_work;
109};
110
111static struct exynos_dvfs_data *dvfs_info;
112static DEFINE_MUTEX(cpufreq_lock);
113static struct cpufreq_freqs freqs;
114
115static int init_div_table(void)
116{
117	struct cpufreq_frequency_table *pos, *freq_tbl = dvfs_info->freq_table;
118	unsigned int tmp, clk_div, ema_div, freq, volt_id;
119	struct dev_pm_opp *opp;
120
121	rcu_read_lock();
122	cpufreq_for_each_entry(pos, freq_tbl) {
123		opp = dev_pm_opp_find_freq_exact(dvfs_info->dev,
124					pos->frequency * 1000, true);
125		if (IS_ERR(opp)) {
126			rcu_read_unlock();
127			dev_err(dvfs_info->dev,
128				"failed to find valid OPP for %u KHZ\n",
129				pos->frequency);
130			return PTR_ERR(opp);
131		}
132
133		freq = pos->frequency / 1000; /* In MHZ */
134		clk_div = ((freq / CPU_DIV_FREQ_MAX) & P0_7_CPUCLKDEV_MASK)
135					<< P0_7_CPUCLKDEV_SHIFT;
136		clk_div |= ((freq / CPU_ATB_FREQ_MAX) & P0_7_ATBCLKDEV_MASK)
137					<< P0_7_ATBCLKDEV_SHIFT;
138		clk_div |= ((freq / CPU_DBG_FREQ_MAX) & P0_7_CSCLKDEV_MASK)
139					<< P0_7_CSCLKDEV_SHIFT;
140
141		/* Calculate EMA */
142		volt_id = dev_pm_opp_get_voltage(opp);
143		volt_id = (MAX_VOLTAGE - volt_id) / VOLTAGE_STEP;
144		if (volt_id < PMIC_HIGH_VOLT) {
145			ema_div = (CPUEMA_HIGH << P0_7_CPUEMA_SHIFT) |
146				(L2EMA_HIGH << P0_7_L2EMA_SHIFT);
147		} else if (volt_id > PMIC_LOW_VOLT) {
148			ema_div = (CPUEMA_LOW << P0_7_CPUEMA_SHIFT) |
149				(L2EMA_LOW << P0_7_L2EMA_SHIFT);
150		} else {
151			ema_div = (CPUEMA_MID << P0_7_CPUEMA_SHIFT) |
152				(L2EMA_MID << P0_7_L2EMA_SHIFT);
153		}
154
155		tmp = (clk_div | ema_div | (volt_id << P0_7_VDD_SHIFT)
156			| ((freq / FREQ_UNIT) << P0_7_FREQ_SHIFT));
157
158		__raw_writel(tmp, dvfs_info->base + XMU_PMU_P0_7 + 4 *
159						(pos - freq_tbl));
160	}
161
162	rcu_read_unlock();
163	return 0;
164}
165
166static void exynos_enable_dvfs(unsigned int cur_frequency)
167{
168	unsigned int tmp, cpu;
169	struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
170	struct cpufreq_frequency_table *pos;
171	/* Disable DVFS */
172	__raw_writel(0,	dvfs_info->base + XMU_DVFS_CTRL);
173
174	/* Enable PSTATE Change Event */
175	tmp = __raw_readl(dvfs_info->base + XMU_PMUEVTEN);
176	tmp |= (1 << PSTATE_CHANGED_EVTEN_SHIFT);
177	 __raw_writel(tmp, dvfs_info->base + XMU_PMUEVTEN);
178
179	/* Enable PSTATE Change IRQ */
180	tmp = __raw_readl(dvfs_info->base + XMU_PMUIRQEN);
181	tmp |= (1 << PSTATE_CHANGED_IRQEN_SHIFT);
182	 __raw_writel(tmp, dvfs_info->base + XMU_PMUIRQEN);
183
184	/* Set initial performance index */
185	cpufreq_for_each_entry(pos, freq_table)
186		if (pos->frequency == cur_frequency)
187			break;
188
189	if (pos->frequency == CPUFREQ_TABLE_END) {
190		dev_crit(dvfs_info->dev, "Boot up frequency not supported\n");
191		/* Assign the highest frequency */
192		pos = freq_table;
193		cur_frequency = pos->frequency;
194	}
195
196	dev_info(dvfs_info->dev, "Setting dvfs initial frequency = %uKHZ",
197						cur_frequency);
198
199	for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++) {
200		tmp = __raw_readl(dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4);
201		tmp &= ~(P_VALUE_MASK << C0_3_PSTATE_NEW_SHIFT);
202		tmp |= ((pos - freq_table) << C0_3_PSTATE_NEW_SHIFT);
203		__raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4);
204	}
205
206	/* Enable DVFS */
207	__raw_writel(1 << XMU_DVFS_CTRL_EN_SHIFT,
208				dvfs_info->base + XMU_DVFS_CTRL);
209}
210
211static int exynos_target(struct cpufreq_policy *policy, unsigned int index)
212{
213	unsigned int tmp;
214	int i;
215	struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
216
217	mutex_lock(&cpufreq_lock);
218
219	freqs.old = policy->cur;
220	freqs.new = freq_table[index].frequency;
221
222	cpufreq_freq_transition_begin(policy, &freqs);
223
224	/* Set the target frequency in all C0_3_PSTATE register */
225	for_each_cpu(i, policy->cpus) {
226		tmp = __raw_readl(dvfs_info->base + XMU_C0_3_PSTATE + i * 4);
227		tmp &= ~(P_VALUE_MASK << C0_3_PSTATE_NEW_SHIFT);
228		tmp |= (index << C0_3_PSTATE_NEW_SHIFT);
229
230		__raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + i * 4);
231	}
232	mutex_unlock(&cpufreq_lock);
233	return 0;
234}
235
236static void exynos_cpufreq_work(struct work_struct *work)
237{
238	unsigned int cur_pstate, index;
239	struct cpufreq_policy *policy = cpufreq_cpu_get(0); /* boot CPU */
240	struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
241
242	/* Ensure we can access cpufreq structures */
243	if (unlikely(dvfs_info->dvfs_enabled == false))
244		goto skip_work;
245
246	mutex_lock(&cpufreq_lock);
247	freqs.old = policy->cur;
248
249	cur_pstate = __raw_readl(dvfs_info->base + XMU_P_STATUS);
250	if (cur_pstate >> C0_3_PSTATE_VALID_SHIFT & 0x1)
251		index = (cur_pstate >> C0_3_PSTATE_CURR_SHIFT) & P_VALUE_MASK;
252	else
253		index = (cur_pstate >> C0_3_PSTATE_NEW_SHIFT) & P_VALUE_MASK;
254
255	if (likely(index < dvfs_info->freq_count)) {
256		freqs.new = freq_table[index].frequency;
257	} else {
258		dev_crit(dvfs_info->dev, "New frequency out of range\n");
259		freqs.new = freqs.old;
260	}
261	cpufreq_freq_transition_end(policy, &freqs, 0);
262
263	cpufreq_cpu_put(policy);
264	mutex_unlock(&cpufreq_lock);
265skip_work:
266	enable_irq(dvfs_info->irq);
267}
268
269static irqreturn_t exynos_cpufreq_irq(int irq, void *id)
270{
271	unsigned int tmp;
272
273	tmp = __raw_readl(dvfs_info->base + XMU_PMUIRQ);
274	if (tmp >> PSTATE_CHANGED_SHIFT & 0x1) {
275		__raw_writel(tmp, dvfs_info->base + XMU_PMUIRQ);
276		disable_irq_nosync(irq);
277		schedule_work(&dvfs_info->irq_work);
278	}
279	return IRQ_HANDLED;
280}
281
282static void exynos_sort_descend_freq_table(void)
283{
284	struct cpufreq_frequency_table *freq_tbl = dvfs_info->freq_table;
285	int i = 0, index;
286	unsigned int tmp_freq;
287	/*
288	 * Exynos5440 clock controller state logic expects the cpufreq table to
289	 * be in descending order. But the OPP library constructs the table in
290	 * ascending order. So to make the table descending we just need to
291	 * swap the i element with the N - i element.
292	 */
293	for (i = 0; i < dvfs_info->freq_count / 2; i++) {
294		index = dvfs_info->freq_count - i - 1;
295		tmp_freq = freq_tbl[i].frequency;
296		freq_tbl[i].frequency = freq_tbl[index].frequency;
297		freq_tbl[index].frequency = tmp_freq;
298	}
299}
300
301static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
302{
303	policy->clk = dvfs_info->cpu_clk;
304	return cpufreq_generic_init(policy, dvfs_info->freq_table,
305			dvfs_info->latency);
306}
307
308static struct cpufreq_driver exynos_driver = {
309	.flags		= CPUFREQ_STICKY | CPUFREQ_ASYNC_NOTIFICATION |
310				CPUFREQ_NEED_INITIAL_FREQ_CHECK,
311	.verify		= cpufreq_generic_frequency_table_verify,
312	.target_index	= exynos_target,
313	.get		= cpufreq_generic_get,
314	.init		= exynos_cpufreq_cpu_init,
315	.name		= CPUFREQ_NAME,
316	.attr		= cpufreq_generic_attr,
317};
318
319static const struct of_device_id exynos_cpufreq_match[] = {
320	{
321		.compatible = "samsung,exynos5440-cpufreq",
322	},
323	{},
324};
325MODULE_DEVICE_TABLE(of, exynos_cpufreq_match);
326
327static int exynos_cpufreq_probe(struct platform_device *pdev)
328{
329	int ret = -EINVAL;
330	struct device_node *np;
331	struct resource res;
332	unsigned int cur_frequency;
333
334	np =  pdev->dev.of_node;
335	if (!np)
336		return -ENODEV;
337
338	dvfs_info = devm_kzalloc(&pdev->dev, sizeof(*dvfs_info), GFP_KERNEL);
339	if (!dvfs_info) {
340		ret = -ENOMEM;
341		goto err_put_node;
342	}
343
344	dvfs_info->dev = &pdev->dev;
345
346	ret = of_address_to_resource(np, 0, &res);
347	if (ret)
348		goto err_put_node;
349
350	dvfs_info->base = devm_ioremap_resource(dvfs_info->dev, &res);
351	if (IS_ERR(dvfs_info->base)) {
352		ret = PTR_ERR(dvfs_info->base);
353		goto err_put_node;
354	}
355
356	dvfs_info->irq = irq_of_parse_and_map(np, 0);
357	if (!dvfs_info->irq) {
358		dev_err(dvfs_info->dev, "No cpufreq irq found\n");
359		ret = -ENODEV;
360		goto err_put_node;
361	}
362
363	ret = of_init_opp_table(dvfs_info->dev);
364	if (ret) {
365		dev_err(dvfs_info->dev, "failed to init OPP table: %d\n", ret);
366		goto err_put_node;
367	}
368
369	ret = dev_pm_opp_init_cpufreq_table(dvfs_info->dev,
370					    &dvfs_info->freq_table);
371	if (ret) {
372		dev_err(dvfs_info->dev,
373			"failed to init cpufreq table: %d\n", ret);
374		goto err_free_opp;
375	}
376	dvfs_info->freq_count = dev_pm_opp_get_opp_count(dvfs_info->dev);
377	exynos_sort_descend_freq_table();
378
379	if (of_property_read_u32(np, "clock-latency", &dvfs_info->latency))
380		dvfs_info->latency = DEF_TRANS_LATENCY;
381
382	dvfs_info->cpu_clk = devm_clk_get(dvfs_info->dev, "armclk");
383	if (IS_ERR(dvfs_info->cpu_clk)) {
384		dev_err(dvfs_info->dev, "Failed to get cpu clock\n");
385		ret = PTR_ERR(dvfs_info->cpu_clk);
386		goto err_free_table;
387	}
388
389	cur_frequency = clk_get_rate(dvfs_info->cpu_clk);
390	if (!cur_frequency) {
391		dev_err(dvfs_info->dev, "Failed to get clock rate\n");
392		ret = -EINVAL;
393		goto err_free_table;
394	}
395	cur_frequency /= 1000;
396
397	INIT_WORK(&dvfs_info->irq_work, exynos_cpufreq_work);
398	ret = devm_request_irq(dvfs_info->dev, dvfs_info->irq,
399				exynos_cpufreq_irq, IRQF_TRIGGER_NONE,
400				CPUFREQ_NAME, dvfs_info);
401	if (ret) {
402		dev_err(dvfs_info->dev, "Failed to register IRQ\n");
403		goto err_free_table;
404	}
405
406	ret = init_div_table();
407	if (ret) {
408		dev_err(dvfs_info->dev, "Failed to initialise div table\n");
409		goto err_free_table;
410	}
411
412	exynos_enable_dvfs(cur_frequency);
413	ret = cpufreq_register_driver(&exynos_driver);
414	if (ret) {
415		dev_err(dvfs_info->dev,
416			"%s: failed to register cpufreq driver\n", __func__);
417		goto err_free_table;
418	}
419
420	of_node_put(np);
421	dvfs_info->dvfs_enabled = true;
422	return 0;
423
424err_free_table:
425	dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
426err_free_opp:
427	of_free_opp_table(dvfs_info->dev);
428err_put_node:
429	of_node_put(np);
430	dev_err(&pdev->dev, "%s: failed initialization\n", __func__);
431	return ret;
432}
433
434static int exynos_cpufreq_remove(struct platform_device *pdev)
435{
436	cpufreq_unregister_driver(&exynos_driver);
437	dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
438	of_free_opp_table(dvfs_info->dev);
439	return 0;
440}
441
442static struct platform_driver exynos_cpufreq_platdrv = {
443	.driver = {
444		.name	= "exynos5440-cpufreq",
445		.of_match_table = exynos_cpufreq_match,
446	},
447	.probe		= exynos_cpufreq_probe,
448	.remove		= exynos_cpufreq_remove,
449};
450module_platform_driver(exynos_cpufreq_platdrv);
451
452MODULE_AUTHOR("Amit Daniel Kachhap <amit.daniel@samsung.com>");
453MODULE_DESCRIPTION("Exynos5440 cpufreq driver");
454MODULE_LICENSE("GPL");
455