1/*
2 * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/clk.h>
15#include <linux/clk-provider.h>
16
17#include "dsi_pll.h"
18#include "dsi.xml.h"
19
20/*
21 * DSI PLL 28nm - clock diagram (eg: DSI0):
22 *
23 *         dsi0analog_postdiv_clk
24 *                             |         dsi0indirect_path_div2_clk
25 *                             |          |
26 *                   +------+  |  +----+  |  |\   dsi0byte_mux
27 *  dsi0vco_clk --o--| DIV1 |--o--| /2 |--o--| \   |
28 *                |  +------+     +----+     | m|  |  +----+
29 *                |                          | u|--o--| /4 |-- dsi0pllbyte
30 *                |                          | x|     +----+
31 *                o--------------------------| /
32 *                |                          |/
33 *                |          +------+
34 *                o----------| DIV3 |------------------------- dsi0pll
35 *                           +------+
36 */
37
38#define POLL_MAX_READS			10
39#define POLL_TIMEOUT_US		50
40
41#define NUM_PROVIDED_CLKS		2
42
43#define VCO_REF_CLK_RATE		19200000
44#define VCO_MIN_RATE			350000000
45#define VCO_MAX_RATE			750000000
46
47#define DSI_BYTE_PLL_CLK		0
48#define DSI_PIXEL_PLL_CLK		1
49
50#define LPFR_LUT_SIZE			10
51struct lpfr_cfg {
52	unsigned long vco_rate;
53	u32 resistance;
54};
55
56/* Loop filter resistance: */
57static const struct lpfr_cfg lpfr_lut[LPFR_LUT_SIZE] = {
58	{ 479500000,  8 },
59	{ 480000000, 11 },
60	{ 575500000,  8 },
61	{ 576000000, 12 },
62	{ 610500000,  8 },
63	{ 659500000,  9 },
64	{ 671500000, 10 },
65	{ 672000000, 14 },
66	{ 708500000, 10 },
67	{ 750000000, 11 },
68};
69
70struct pll_28nm_cached_state {
71	unsigned long vco_rate;
72	u8 postdiv3;
73	u8 postdiv1;
74	u8 byte_mux;
75};
76
77struct dsi_pll_28nm {
78	struct msm_dsi_pll base;
79
80	int id;
81	struct platform_device *pdev;
82	void __iomem *mmio;
83
84	int vco_delay;
85
86	/* private clocks: */
87	struct clk *clks[NUM_DSI_CLOCKS_MAX];
88	u32 num_clks;
89
90	/* clock-provider: */
91	struct clk *provided_clks[NUM_PROVIDED_CLKS];
92	struct clk_onecell_data clk_data;
93
94	struct pll_28nm_cached_state cached_state;
95};
96
97#define to_pll_28nm(x)	container_of(x, struct dsi_pll_28nm, base)
98
99static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,
100				u32 nb_tries, u32 timeout_us)
101{
102	bool pll_locked = false;
103	u32 val;
104
105	while (nb_tries--) {
106		val = pll_read(pll_28nm->mmio + REG_DSI_28nm_PHY_PLL_STATUS);
107		pll_locked = !!(val & DSI_28nm_PHY_PLL_STATUS_PLL_RDY);
108
109		if (pll_locked)
110			break;
111
112		udelay(timeout_us);
113	}
114	DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
115
116	return pll_locked;
117}
118
119static void pll_28nm_software_reset(struct dsi_pll_28nm *pll_28nm)
120{
121	void __iomem *base = pll_28nm->mmio;
122
123	/*
124	 * Add HW recommended delays after toggling the software
125	 * reset bit off and back on.
126	 */
127	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG,
128			DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET, 1);
129	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG, 0x00, 1);
130}
131
132/*
133 * Clock Callbacks
134 */
135static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
136		unsigned long parent_rate)
137{
138	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
139	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
140	struct device *dev = &pll_28nm->pdev->dev;
141	void __iomem *base = pll_28nm->mmio;
142	unsigned long div_fbx1000, gen_vco_clk;
143	u32 refclk_cfg, frac_n_mode, frac_n_value;
144	u32 sdm_cfg0, sdm_cfg1, sdm_cfg2, sdm_cfg3;
145	u32 cal_cfg10, cal_cfg11;
146	u32 rem;
147	int i;
148
149	VERB("rate=%lu, parent's=%lu", rate, parent_rate);
150
151	/* Force postdiv2 to be div-4 */
152	pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV2_CFG, 3);
153
154	/* Configure the Loop filter resistance */
155	for (i = 0; i < LPFR_LUT_SIZE; i++)
156		if (rate <= lpfr_lut[i].vco_rate)
157			break;
158	if (i == LPFR_LUT_SIZE) {
159		dev_err(dev, "unable to get loop filter resistance. vco=%lu\n",
160				rate);
161		return -EINVAL;
162	}
163	pll_write(base + REG_DSI_28nm_PHY_PLL_LPFR_CFG, lpfr_lut[i].resistance);
164
165	/* Loop filter capacitance values : c1 and c2 */
166	pll_write(base + REG_DSI_28nm_PHY_PLL_LPFC1_CFG, 0x70);
167	pll_write(base + REG_DSI_28nm_PHY_PLL_LPFC2_CFG, 0x15);
168
169	rem = rate % VCO_REF_CLK_RATE;
170	if (rem) {
171		refclk_cfg = DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
172		frac_n_mode = 1;
173		div_fbx1000 = rate / (VCO_REF_CLK_RATE / 500);
174		gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 500);
175	} else {
176		refclk_cfg = 0x0;
177		frac_n_mode = 0;
178		div_fbx1000 = rate / (VCO_REF_CLK_RATE / 1000);
179		gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 1000);
180	}
181
182	DBG("refclk_cfg = %d", refclk_cfg);
183
184	rem = div_fbx1000 % 1000;
185	frac_n_value = (rem << 16) / 1000;
186
187	DBG("div_fb = %lu", div_fbx1000);
188	DBG("frac_n_value = %d", frac_n_value);
189
190	DBG("Generated VCO Clock: %lu", gen_vco_clk);
191	rem = 0;
192	sdm_cfg1 = pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1);
193	sdm_cfg1 &= ~DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK;
194	if (frac_n_mode) {
195		sdm_cfg0 = 0x0;
196		sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(0);
197		sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(
198				(u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
199		sdm_cfg3 = frac_n_value >> 8;
200		sdm_cfg2 = frac_n_value & 0xff;
201	} else {
202		sdm_cfg0 = DSI_28nm_PHY_PLL_SDM_CFG0_BYP;
203		sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(
204				(u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
205		sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(0);
206		sdm_cfg2 = 0;
207		sdm_cfg3 = 0;
208	}
209
210	DBG("sdm_cfg0=%d", sdm_cfg0);
211	DBG("sdm_cfg1=%d", sdm_cfg1);
212	DBG("sdm_cfg2=%d", sdm_cfg2);
213	DBG("sdm_cfg3=%d", sdm_cfg3);
214
215	cal_cfg11 = (u32)(gen_vco_clk / (256 * 1000000));
216	cal_cfg10 = (u32)((gen_vco_clk % (256 * 1000000)) / 1000000);
217	DBG("cal_cfg10=%d, cal_cfg11=%d", cal_cfg10, cal_cfg11);
218
219	pll_write(base + REG_DSI_28nm_PHY_PLL_CHGPUMP_CFG, 0x02);
220	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG3,    0x2b);
221	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG4,    0x06);
222	pll_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,  0x0d);
223
224	pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1, sdm_cfg1);
225	pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2,
226		DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(sdm_cfg2));
227	pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3,
228		DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(sdm_cfg3));
229	pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG4, 0x00);
230
231	/* Add hardware recommended delay for correct PLL configuration */
232	if (pll_28nm->vco_delay)
233		udelay(pll_28nm->vco_delay);
234
235	pll_write(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG, refclk_cfg);
236	pll_write(base + REG_DSI_28nm_PHY_PLL_PWRGEN_CFG, 0x00);
237	pll_write(base + REG_DSI_28nm_PHY_PLL_VCOLPF_CFG, 0x31);
238	pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0,   sdm_cfg0);
239	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG0,   0x12);
240	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG6,   0x30);
241	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG7,   0x00);
242	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG8,   0x60);
243	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG9,   0x00);
244	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG10,  cal_cfg10 & 0xff);
245	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG11,  cal_cfg11 & 0xff);
246	pll_write(base + REG_DSI_28nm_PHY_PLL_EFUSE_CFG,  0x20);
247
248	return 0;
249}
250
251static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)
252{
253	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
254	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
255
256	return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,
257					POLL_TIMEOUT_US);
258}
259
260static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
261		unsigned long parent_rate)
262{
263	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
264	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
265	void __iomem *base = pll_28nm->mmio;
266	u32 sdm0, doubler, sdm_byp_div;
267	u32 sdm_dc_off, sdm_freq_seed, sdm2, sdm3;
268	u32 ref_clk = VCO_REF_CLK_RATE;
269	unsigned long vco_rate;
270
271	VERB("parent_rate=%lu", parent_rate);
272
273	/* Check to see if the ref clk doubler is enabled */
274	doubler = pll_read(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG) &
275			DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
276	ref_clk += (doubler * VCO_REF_CLK_RATE);
277
278	/* see if it is integer mode or sdm mode */
279	sdm0 = pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0);
280	if (sdm0 & DSI_28nm_PHY_PLL_SDM_CFG0_BYP) {
281		/* integer mode */
282		sdm_byp_div = FIELD(
283				pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0),
284				DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV) + 1;
285		vco_rate = ref_clk * sdm_byp_div;
286	} else {
287		/* sdm mode */
288		sdm_dc_off = FIELD(
289				pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1),
290				DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET);
291		DBG("sdm_dc_off = %d", sdm_dc_off);
292		sdm2 = FIELD(pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2),
293				DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0);
294		sdm3 = FIELD(pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3),
295				DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8);
296		sdm_freq_seed = (sdm3 << 8) | sdm2;
297		DBG("sdm_freq_seed = %d", sdm_freq_seed);
298
299		vco_rate = (ref_clk * (sdm_dc_off + 1)) +
300			mult_frac(ref_clk, sdm_freq_seed, BIT(16));
301		DBG("vco rate = %lu", vco_rate);
302	}
303
304	DBG("returning vco rate = %lu", vco_rate);
305
306	return vco_rate;
307}
308
309static const struct clk_ops clk_ops_dsi_pll_28nm_vco = {
310	.round_rate = msm_dsi_pll_helper_clk_round_rate,
311	.set_rate = dsi_pll_28nm_clk_set_rate,
312	.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
313	.prepare = msm_dsi_pll_helper_clk_prepare,
314	.unprepare = msm_dsi_pll_helper_clk_unprepare,
315	.is_enabled = dsi_pll_28nm_clk_is_enabled,
316};
317
318/*
319 * PLL Callbacks
320 */
321static int dsi_pll_28nm_enable_seq_hpm(struct msm_dsi_pll *pll)
322{
323	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
324	struct device *dev = &pll_28nm->pdev->dev;
325	void __iomem *base = pll_28nm->mmio;
326	u32 max_reads = 5, timeout_us = 100;
327	bool locked;
328	u32 val;
329	int i;
330
331	DBG("id=%d", pll_28nm->id);
332
333	pll_28nm_software_reset(pll_28nm);
334
335	/*
336	 * PLL power up sequence.
337	 * Add necessary delays recommended by hardware.
338	 */
339	val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
340	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);
341
342	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
343	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
344
345	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
346	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
347
348	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
349	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
350
351	for (i = 0; i < 2; i++) {
352		/* DSI Uniphy lock detect setting */
353		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,
354				0x0c, 100);
355		pll_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
356
357		/* poll for PLL ready status */
358		locked = pll_28nm_poll_for_ready(pll_28nm,
359						max_reads, timeout_us);
360		if (locked)
361			break;
362
363		pll_28nm_software_reset(pll_28nm);
364
365		/*
366		 * PLL power up sequence.
367		 * Add necessary delays recommended by hardware.
368		 */
369		val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
370		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);
371
372		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
373		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
374
375		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
376		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 250);
377
378		val &= ~DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
379		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
380
381		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
382		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
383
384		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
385		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
386	}
387
388	if (unlikely(!locked))
389		dev_err(dev, "DSI PLL lock failed\n");
390	else
391		DBG("DSI PLL Lock success");
392
393	return locked ? 0 : -EINVAL;
394}
395
396static int dsi_pll_28nm_enable_seq_lp(struct msm_dsi_pll *pll)
397{
398	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
399	struct device *dev = &pll_28nm->pdev->dev;
400	void __iomem *base = pll_28nm->mmio;
401	bool locked;
402	u32 max_reads = 10, timeout_us = 50;
403	u32 val;
404
405	DBG("id=%d", pll_28nm->id);
406
407	pll_28nm_software_reset(pll_28nm);
408
409	/*
410	 * PLL power up sequence.
411	 * Add necessary delays recommended by hardware.
412	 */
413	pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_CAL_CFG1, 0x34, 500);
414
415	val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
416	pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
417
418	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
419	pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
420
421	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B |
422		DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
423	pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
424
425	/* DSI PLL toggle lock detect setting */
426	pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x04, 500);
427	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x05, 512);
428
429	locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
430
431	if (unlikely(!locked))
432		dev_err(dev, "DSI PLL lock failed\n");
433	else
434		DBG("DSI PLL lock success");
435
436	return locked ? 0 : -EINVAL;
437}
438
439static void dsi_pll_28nm_disable_seq(struct msm_dsi_pll *pll)
440{
441	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
442
443	DBG("id=%d", pll_28nm->id);
444	pll_write(pll_28nm->mmio + REG_DSI_28nm_PHY_PLL_GLB_CFG, 0x00);
445}
446
447static void dsi_pll_28nm_save_state(struct msm_dsi_pll *pll)
448{
449	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
450	struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
451	void __iomem *base = pll_28nm->mmio;
452
453	cached_state->postdiv3 =
454			pll_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG);
455	cached_state->postdiv1 =
456			pll_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG);
457	cached_state->byte_mux = pll_read(base + REG_DSI_28nm_PHY_PLL_VREG_CFG);
458	cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw);
459}
460
461static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
462{
463	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
464	struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
465	void __iomem *base = pll_28nm->mmio;
466	int ret;
467
468	ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
469					cached_state->vco_rate, 0);
470	if (ret) {
471		dev_err(&pll_28nm->pdev->dev,
472			"restore vco rate failed. ret=%d\n", ret);
473		return ret;
474	}
475
476	pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
477			cached_state->postdiv3);
478	pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
479			cached_state->postdiv1);
480	pll_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG,
481			cached_state->byte_mux);
482
483	return 0;
484}
485
486static int dsi_pll_28nm_get_provider(struct msm_dsi_pll *pll,
487				struct clk **byte_clk_provider,
488				struct clk **pixel_clk_provider)
489{
490	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
491
492	if (byte_clk_provider)
493		*byte_clk_provider = pll_28nm->provided_clks[DSI_BYTE_PLL_CLK];
494	if (pixel_clk_provider)
495		*pixel_clk_provider =
496				pll_28nm->provided_clks[DSI_PIXEL_PLL_CLK];
497
498	return 0;
499}
500
501static void dsi_pll_28nm_destroy(struct msm_dsi_pll *pll)
502{
503	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
504	int i;
505
506	msm_dsi_pll_helper_unregister_clks(pll_28nm->pdev,
507					pll_28nm->clks, pll_28nm->num_clks);
508
509	for (i = 0; i < NUM_PROVIDED_CLKS; i++)
510		pll_28nm->provided_clks[i] = NULL;
511
512	pll_28nm->num_clks = 0;
513	pll_28nm->clk_data.clks = NULL;
514	pll_28nm->clk_data.clk_num = 0;
515}
516
517static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
518{
519	char clk_name[32], parent1[32], parent2[32], vco_name[32];
520	struct clk_init_data vco_init = {
521		.parent_names = (const char *[]){ "xo" },
522		.num_parents = 1,
523		.name = vco_name,
524		.ops = &clk_ops_dsi_pll_28nm_vco,
525	};
526	struct device *dev = &pll_28nm->pdev->dev;
527	struct clk **clks = pll_28nm->clks;
528	struct clk **provided_clks = pll_28nm->provided_clks;
529	int num = 0;
530	int ret;
531
532	DBG("%d", pll_28nm->id);
533
534	snprintf(vco_name, 32, "dsi%dvco_clk", pll_28nm->id);
535	pll_28nm->base.clk_hw.init = &vco_init;
536	clks[num++] = clk_register(dev, &pll_28nm->base.clk_hw);
537
538	snprintf(clk_name, 32, "dsi%danalog_postdiv_clk", pll_28nm->id);
539	snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);
540	clks[num++] = clk_register_divider(dev, clk_name,
541			parent1, CLK_SET_RATE_PARENT,
542			pll_28nm->mmio +
543			REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
544			0, 4, 0, NULL);
545
546	snprintf(clk_name, 32, "dsi%dindirect_path_div2_clk", pll_28nm->id);
547	snprintf(parent1, 32, "dsi%danalog_postdiv_clk", pll_28nm->id);
548	clks[num++] = clk_register_fixed_factor(dev, clk_name,
549			parent1, CLK_SET_RATE_PARENT,
550			1, 2);
551
552	snprintf(clk_name, 32, "dsi%dpll", pll_28nm->id);
553	snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);
554	clks[num++] = provided_clks[DSI_PIXEL_PLL_CLK] =
555			clk_register_divider(dev, clk_name,
556				parent1, 0, pll_28nm->mmio +
557				REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
558				0, 8, 0, NULL);
559
560	snprintf(clk_name, 32, "dsi%dbyte_mux", pll_28nm->id);
561	snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);
562	snprintf(parent2, 32, "dsi%dindirect_path_div2_clk", pll_28nm->id);
563	clks[num++] = clk_register_mux(dev, clk_name,
564			(const char *[]){
565				parent1, parent2
566			}, 2, CLK_SET_RATE_PARENT, pll_28nm->mmio +
567			REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL);
568
569	snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->id);
570	snprintf(parent1, 32, "dsi%dbyte_mux", pll_28nm->id);
571	clks[num++] = provided_clks[DSI_BYTE_PLL_CLK] =
572			clk_register_fixed_factor(dev, clk_name,
573				parent1, CLK_SET_RATE_PARENT, 1, 4);
574
575	pll_28nm->num_clks = num;
576
577	pll_28nm->clk_data.clk_num = NUM_PROVIDED_CLKS;
578	pll_28nm->clk_data.clks = provided_clks;
579
580	ret = of_clk_add_provider(dev->of_node,
581			of_clk_src_onecell_get, &pll_28nm->clk_data);
582	if (ret) {
583		dev_err(dev, "failed to register clk provider: %d\n", ret);
584		return ret;
585	}
586
587	return 0;
588}
589
590struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device *pdev,
591					enum msm_dsi_phy_type type, int id)
592{
593	struct dsi_pll_28nm *pll_28nm;
594	struct msm_dsi_pll *pll;
595	int ret;
596
597	if (!pdev)
598		return ERR_PTR(-ENODEV);
599
600	pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);
601	if (!pll_28nm)
602		return ERR_PTR(-ENOMEM);
603
604	pll_28nm->pdev = pdev;
605	pll_28nm->id = id;
606
607	pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
608	if (IS_ERR_OR_NULL(pll_28nm->mmio)) {
609		dev_err(&pdev->dev, "%s: failed to map pll base\n", __func__);
610		return ERR_PTR(-ENOMEM);
611	}
612
613	pll = &pll_28nm->base;
614	pll->min_rate = VCO_MIN_RATE;
615	pll->max_rate = VCO_MAX_RATE;
616	pll->get_provider = dsi_pll_28nm_get_provider;
617	pll->destroy = dsi_pll_28nm_destroy;
618	pll->disable_seq = dsi_pll_28nm_disable_seq;
619	pll->save_state = dsi_pll_28nm_save_state;
620	pll->restore_state = dsi_pll_28nm_restore_state;
621
622	if (type == MSM_DSI_PHY_28NM_HPM) {
623		pll_28nm->vco_delay = 1;
624
625		pll->en_seq_cnt = 3;
626		pll->enable_seqs[0] = dsi_pll_28nm_enable_seq_hpm;
627		pll->enable_seqs[1] = dsi_pll_28nm_enable_seq_hpm;
628		pll->enable_seqs[2] = dsi_pll_28nm_enable_seq_hpm;
629	} else if (type == MSM_DSI_PHY_28NM_LP) {
630		pll_28nm->vco_delay = 1000;
631
632		pll->en_seq_cnt = 1;
633		pll->enable_seqs[0] = dsi_pll_28nm_enable_seq_lp;
634	} else {
635		dev_err(&pdev->dev, "phy type (%d) is not 28nm\n", type);
636		return ERR_PTR(-EINVAL);
637	}
638
639	ret = pll_28nm_register(pll_28nm);
640	if (ret) {
641		dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
642		return ERR_PTR(ret);
643	}
644
645	return pll;
646}
647
648