This source file includes following definitions.
- pll_28nm_poll_for_ready
- pll_28nm_software_reset
- dsi_pll_28nm_clk_set_rate
- dsi_pll_28nm_clk_is_enabled
- dsi_pll_28nm_clk_recalc_rate
- dsi_pll_28nm_enable_seq_hpm
- dsi_pll_28nm_enable_seq_lp
- dsi_pll_28nm_disable_seq
- dsi_pll_28nm_save_state
- dsi_pll_28nm_restore_state
- dsi_pll_28nm_get_provider
- dsi_pll_28nm_destroy
- pll_28nm_register
- msm_dsi_pll_28nm_init
1
2
3
4
5
6 #include <linux/clk.h>
7 #include <linux/clk-provider.h>
8
9 #include "dsi_pll.h"
10 #include "dsi.xml.h"
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30 #define POLL_MAX_READS 10
31 #define POLL_TIMEOUT_US 50
32
33 #define NUM_PROVIDED_CLKS 2
34
35 #define VCO_REF_CLK_RATE 19200000
36 #define VCO_MIN_RATE 350000000
37 #define VCO_MAX_RATE 750000000
38
39 #define DSI_BYTE_PLL_CLK 0
40 #define DSI_PIXEL_PLL_CLK 1
41
42 #define LPFR_LUT_SIZE 10
43 struct lpfr_cfg {
44 unsigned long vco_rate;
45 u32 resistance;
46 };
47
48
49 static const struct lpfr_cfg lpfr_lut[LPFR_LUT_SIZE] = {
50 { 479500000, 8 },
51 { 480000000, 11 },
52 { 575500000, 8 },
53 { 576000000, 12 },
54 { 610500000, 8 },
55 { 659500000, 9 },
56 { 671500000, 10 },
57 { 672000000, 14 },
58 { 708500000, 10 },
59 { 750000000, 11 },
60 };
61
62 struct pll_28nm_cached_state {
63 unsigned long vco_rate;
64 u8 postdiv3;
65 u8 postdiv1;
66 u8 byte_mux;
67 };
68
69 struct dsi_pll_28nm {
70 struct msm_dsi_pll base;
71
72 int id;
73 struct platform_device *pdev;
74 void __iomem *mmio;
75
76 int vco_delay;
77
78
79 struct clk *clks[NUM_DSI_CLOCKS_MAX];
80 u32 num_clks;
81
82
83 struct clk *provided_clks[NUM_PROVIDED_CLKS];
84 struct clk_onecell_data clk_data;
85
86 struct pll_28nm_cached_state cached_state;
87 };
88
89 #define to_pll_28nm(x) container_of(x, struct dsi_pll_28nm, base)
90
91 static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,
92 u32 nb_tries, u32 timeout_us)
93 {
94 bool pll_locked = false;
95 u32 val;
96
97 while (nb_tries--) {
98 val = pll_read(pll_28nm->mmio + REG_DSI_28nm_PHY_PLL_STATUS);
99 pll_locked = !!(val & DSI_28nm_PHY_PLL_STATUS_PLL_RDY);
100
101 if (pll_locked)
102 break;
103
104 udelay(timeout_us);
105 }
106 DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
107
108 return pll_locked;
109 }
110
111 static void pll_28nm_software_reset(struct dsi_pll_28nm *pll_28nm)
112 {
113 void __iomem *base = pll_28nm->mmio;
114
115
116
117
118
119 pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG,
120 DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET, 1);
121 pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG, 0x00, 1);
122 }
123
124
125
126
127 static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
128 unsigned long parent_rate)
129 {
130 struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
131 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
132 struct device *dev = &pll_28nm->pdev->dev;
133 void __iomem *base = pll_28nm->mmio;
134 unsigned long div_fbx1000, gen_vco_clk;
135 u32 refclk_cfg, frac_n_mode, frac_n_value;
136 u32 sdm_cfg0, sdm_cfg1, sdm_cfg2, sdm_cfg3;
137 u32 cal_cfg10, cal_cfg11;
138 u32 rem;
139 int i;
140
141 VERB("rate=%lu, parent's=%lu", rate, parent_rate);
142
143
144 pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV2_CFG, 3);
145
146
147 for (i = 0; i < LPFR_LUT_SIZE; i++)
148 if (rate <= lpfr_lut[i].vco_rate)
149 break;
150 if (i == LPFR_LUT_SIZE) {
151 DRM_DEV_ERROR(dev, "unable to get loop filter resistance. vco=%lu\n",
152 rate);
153 return -EINVAL;
154 }
155 pll_write(base + REG_DSI_28nm_PHY_PLL_LPFR_CFG, lpfr_lut[i].resistance);
156
157
158 pll_write(base + REG_DSI_28nm_PHY_PLL_LPFC1_CFG, 0x70);
159 pll_write(base + REG_DSI_28nm_PHY_PLL_LPFC2_CFG, 0x15);
160
161 rem = rate % VCO_REF_CLK_RATE;
162 if (rem) {
163 refclk_cfg = DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
164 frac_n_mode = 1;
165 div_fbx1000 = rate / (VCO_REF_CLK_RATE / 500);
166 gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 500);
167 } else {
168 refclk_cfg = 0x0;
169 frac_n_mode = 0;
170 div_fbx1000 = rate / (VCO_REF_CLK_RATE / 1000);
171 gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 1000);
172 }
173
174 DBG("refclk_cfg = %d", refclk_cfg);
175
176 rem = div_fbx1000 % 1000;
177 frac_n_value = (rem << 16) / 1000;
178
179 DBG("div_fb = %lu", div_fbx1000);
180 DBG("frac_n_value = %d", frac_n_value);
181
182 DBG("Generated VCO Clock: %lu", gen_vco_clk);
183 rem = 0;
184 sdm_cfg1 = pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1);
185 sdm_cfg1 &= ~DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK;
186 if (frac_n_mode) {
187 sdm_cfg0 = 0x0;
188 sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(0);
189 sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(
190 (u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
191 sdm_cfg3 = frac_n_value >> 8;
192 sdm_cfg2 = frac_n_value & 0xff;
193 } else {
194 sdm_cfg0 = DSI_28nm_PHY_PLL_SDM_CFG0_BYP;
195 sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(
196 (u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
197 sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(0);
198 sdm_cfg2 = 0;
199 sdm_cfg3 = 0;
200 }
201
202 DBG("sdm_cfg0=%d", sdm_cfg0);
203 DBG("sdm_cfg1=%d", sdm_cfg1);
204 DBG("sdm_cfg2=%d", sdm_cfg2);
205 DBG("sdm_cfg3=%d", sdm_cfg3);
206
207 cal_cfg11 = (u32)(gen_vco_clk / (256 * 1000000));
208 cal_cfg10 = (u32)((gen_vco_clk % (256 * 1000000)) / 1000000);
209 DBG("cal_cfg10=%d, cal_cfg11=%d", cal_cfg10, cal_cfg11);
210
211 pll_write(base + REG_DSI_28nm_PHY_PLL_CHGPUMP_CFG, 0x02);
212 pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG3, 0x2b);
213 pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG4, 0x06);
214 pll_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
215
216 pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1, sdm_cfg1);
217 pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2,
218 DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(sdm_cfg2));
219 pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3,
220 DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(sdm_cfg3));
221 pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG4, 0x00);
222
223
224 if (pll_28nm->vco_delay)
225 udelay(pll_28nm->vco_delay);
226
227 pll_write(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG, refclk_cfg);
228 pll_write(base + REG_DSI_28nm_PHY_PLL_PWRGEN_CFG, 0x00);
229 pll_write(base + REG_DSI_28nm_PHY_PLL_VCOLPF_CFG, 0x31);
230 pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0, sdm_cfg0);
231 pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG0, 0x12);
232 pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG6, 0x30);
233 pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG7, 0x00);
234 pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG8, 0x60);
235 pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG9, 0x00);
236 pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG10, cal_cfg10 & 0xff);
237 pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG11, cal_cfg11 & 0xff);
238 pll_write(base + REG_DSI_28nm_PHY_PLL_EFUSE_CFG, 0x20);
239
240 return 0;
241 }
242
243 static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)
244 {
245 struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
246 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
247
248 return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,
249 POLL_TIMEOUT_US);
250 }
251
252 static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
253 unsigned long parent_rate)
254 {
255 struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
256 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
257 void __iomem *base = pll_28nm->mmio;
258 u32 sdm0, doubler, sdm_byp_div;
259 u32 sdm_dc_off, sdm_freq_seed, sdm2, sdm3;
260 u32 ref_clk = VCO_REF_CLK_RATE;
261 unsigned long vco_rate;
262
263 VERB("parent_rate=%lu", parent_rate);
264
265
266 doubler = pll_read(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG) &
267 DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
268 ref_clk += (doubler * VCO_REF_CLK_RATE);
269
270
271 sdm0 = pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0);
272 if (sdm0 & DSI_28nm_PHY_PLL_SDM_CFG0_BYP) {
273
274 sdm_byp_div = FIELD(
275 pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0),
276 DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV) + 1;
277 vco_rate = ref_clk * sdm_byp_div;
278 } else {
279
280 sdm_dc_off = FIELD(
281 pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1),
282 DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET);
283 DBG("sdm_dc_off = %d", sdm_dc_off);
284 sdm2 = FIELD(pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2),
285 DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0);
286 sdm3 = FIELD(pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3),
287 DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8);
288 sdm_freq_seed = (sdm3 << 8) | sdm2;
289 DBG("sdm_freq_seed = %d", sdm_freq_seed);
290
291 vco_rate = (ref_clk * (sdm_dc_off + 1)) +
292 mult_frac(ref_clk, sdm_freq_seed, BIT(16));
293 DBG("vco rate = %lu", vco_rate);
294 }
295
296 DBG("returning vco rate = %lu", vco_rate);
297
298 return vco_rate;
299 }
300
301 static const struct clk_ops clk_ops_dsi_pll_28nm_vco = {
302 .round_rate = msm_dsi_pll_helper_clk_round_rate,
303 .set_rate = dsi_pll_28nm_clk_set_rate,
304 .recalc_rate = dsi_pll_28nm_clk_recalc_rate,
305 .prepare = msm_dsi_pll_helper_clk_prepare,
306 .unprepare = msm_dsi_pll_helper_clk_unprepare,
307 .is_enabled = dsi_pll_28nm_clk_is_enabled,
308 };
309
310
311
312
313 static int dsi_pll_28nm_enable_seq_hpm(struct msm_dsi_pll *pll)
314 {
315 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
316 struct device *dev = &pll_28nm->pdev->dev;
317 void __iomem *base = pll_28nm->mmio;
318 u32 max_reads = 5, timeout_us = 100;
319 bool locked;
320 u32 val;
321 int i;
322
323 DBG("id=%d", pll_28nm->id);
324
325 pll_28nm_software_reset(pll_28nm);
326
327
328
329
330
331 val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
332 pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);
333
334 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
335 pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
336
337 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
338 pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
339
340 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
341 pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
342
343 for (i = 0; i < 2; i++) {
344
345 pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,
346 0x0c, 100);
347 pll_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
348
349
350 locked = pll_28nm_poll_for_ready(pll_28nm,
351 max_reads, timeout_us);
352 if (locked)
353 break;
354
355 pll_28nm_software_reset(pll_28nm);
356
357
358
359
360
361 val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
362 pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);
363
364 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
365 pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
366
367 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
368 pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 250);
369
370 val &= ~DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
371 pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
372
373 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
374 pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
375
376 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
377 pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
378 }
379
380 if (unlikely(!locked))
381 DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
382 else
383 DBG("DSI PLL Lock success");
384
385 return locked ? 0 : -EINVAL;
386 }
387
388 static int dsi_pll_28nm_enable_seq_lp(struct msm_dsi_pll *pll)
389 {
390 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
391 struct device *dev = &pll_28nm->pdev->dev;
392 void __iomem *base = pll_28nm->mmio;
393 bool locked;
394 u32 max_reads = 10, timeout_us = 50;
395 u32 val;
396
397 DBG("id=%d", pll_28nm->id);
398
399 pll_28nm_software_reset(pll_28nm);
400
401
402
403
404
405 pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_CAL_CFG1, 0x34, 500);
406
407 val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
408 pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
409
410 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
411 pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
412
413 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B |
414 DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
415 pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
416
417
418 pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x04, 500);
419 pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x05, 512);
420
421 locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
422
423 if (unlikely(!locked))
424 DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
425 else
426 DBG("DSI PLL lock success");
427
428 return locked ? 0 : -EINVAL;
429 }
430
431 static void dsi_pll_28nm_disable_seq(struct msm_dsi_pll *pll)
432 {
433 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
434
435 DBG("id=%d", pll_28nm->id);
436 pll_write(pll_28nm->mmio + REG_DSI_28nm_PHY_PLL_GLB_CFG, 0x00);
437 }
438
439 static void dsi_pll_28nm_save_state(struct msm_dsi_pll *pll)
440 {
441 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
442 struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
443 void __iomem *base = pll_28nm->mmio;
444
445 cached_state->postdiv3 =
446 pll_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG);
447 cached_state->postdiv1 =
448 pll_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG);
449 cached_state->byte_mux = pll_read(base + REG_DSI_28nm_PHY_PLL_VREG_CFG);
450 cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw);
451 }
452
453 static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
454 {
455 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
456 struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
457 void __iomem *base = pll_28nm->mmio;
458 int ret;
459
460 ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
461 cached_state->vco_rate, 0);
462 if (ret) {
463 DRM_DEV_ERROR(&pll_28nm->pdev->dev,
464 "restore vco rate failed. ret=%d\n", ret);
465 return ret;
466 }
467
468 pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
469 cached_state->postdiv3);
470 pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
471 cached_state->postdiv1);
472 pll_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG,
473 cached_state->byte_mux);
474
475 return 0;
476 }
477
478 static int dsi_pll_28nm_get_provider(struct msm_dsi_pll *pll,
479 struct clk **byte_clk_provider,
480 struct clk **pixel_clk_provider)
481 {
482 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
483
484 if (byte_clk_provider)
485 *byte_clk_provider = pll_28nm->provided_clks[DSI_BYTE_PLL_CLK];
486 if (pixel_clk_provider)
487 *pixel_clk_provider =
488 pll_28nm->provided_clks[DSI_PIXEL_PLL_CLK];
489
490 return 0;
491 }
492
493 static void dsi_pll_28nm_destroy(struct msm_dsi_pll *pll)
494 {
495 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
496 int i;
497
498 msm_dsi_pll_helper_unregister_clks(pll_28nm->pdev,
499 pll_28nm->clks, pll_28nm->num_clks);
500
501 for (i = 0; i < NUM_PROVIDED_CLKS; i++)
502 pll_28nm->provided_clks[i] = NULL;
503
504 pll_28nm->num_clks = 0;
505 pll_28nm->clk_data.clks = NULL;
506 pll_28nm->clk_data.clk_num = 0;
507 }
508
509 static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
510 {
511 char clk_name[32], parent1[32], parent2[32], vco_name[32];
512 struct clk_init_data vco_init = {
513 .parent_names = (const char *[]){ "xo" },
514 .num_parents = 1,
515 .name = vco_name,
516 .flags = CLK_IGNORE_UNUSED,
517 .ops = &clk_ops_dsi_pll_28nm_vco,
518 };
519 struct device *dev = &pll_28nm->pdev->dev;
520 struct clk **clks = pll_28nm->clks;
521 struct clk **provided_clks = pll_28nm->provided_clks;
522 int num = 0;
523 int ret;
524
525 DBG("%d", pll_28nm->id);
526
527 snprintf(vco_name, 32, "dsi%dvco_clk", pll_28nm->id);
528 pll_28nm->base.clk_hw.init = &vco_init;
529 clks[num++] = clk_register(dev, &pll_28nm->base.clk_hw);
530
531 snprintf(clk_name, 32, "dsi%danalog_postdiv_clk", pll_28nm->id);
532 snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);
533 clks[num++] = clk_register_divider(dev, clk_name,
534 parent1, CLK_SET_RATE_PARENT,
535 pll_28nm->mmio +
536 REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
537 0, 4, 0, NULL);
538
539 snprintf(clk_name, 32, "dsi%dindirect_path_div2_clk", pll_28nm->id);
540 snprintf(parent1, 32, "dsi%danalog_postdiv_clk", pll_28nm->id);
541 clks[num++] = clk_register_fixed_factor(dev, clk_name,
542 parent1, CLK_SET_RATE_PARENT,
543 1, 2);
544
545 snprintf(clk_name, 32, "dsi%dpll", pll_28nm->id);
546 snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);
547 clks[num++] = provided_clks[DSI_PIXEL_PLL_CLK] =
548 clk_register_divider(dev, clk_name,
549 parent1, 0, pll_28nm->mmio +
550 REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
551 0, 8, 0, NULL);
552
553 snprintf(clk_name, 32, "dsi%dbyte_mux", pll_28nm->id);
554 snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);
555 snprintf(parent2, 32, "dsi%dindirect_path_div2_clk", pll_28nm->id);
556 clks[num++] = clk_register_mux(dev, clk_name,
557 (const char *[]){
558 parent1, parent2
559 }, 2, CLK_SET_RATE_PARENT, pll_28nm->mmio +
560 REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL);
561
562 snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->id);
563 snprintf(parent1, 32, "dsi%dbyte_mux", pll_28nm->id);
564 clks[num++] = provided_clks[DSI_BYTE_PLL_CLK] =
565 clk_register_fixed_factor(dev, clk_name,
566 parent1, CLK_SET_RATE_PARENT, 1, 4);
567
568 pll_28nm->num_clks = num;
569
570 pll_28nm->clk_data.clk_num = NUM_PROVIDED_CLKS;
571 pll_28nm->clk_data.clks = provided_clks;
572
573 ret = of_clk_add_provider(dev->of_node,
574 of_clk_src_onecell_get, &pll_28nm->clk_data);
575 if (ret) {
576 DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
577 return ret;
578 }
579
580 return 0;
581 }
582
583 struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device *pdev,
584 enum msm_dsi_phy_type type, int id)
585 {
586 struct dsi_pll_28nm *pll_28nm;
587 struct msm_dsi_pll *pll;
588 int ret;
589
590 if (!pdev)
591 return ERR_PTR(-ENODEV);
592
593 pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);
594 if (!pll_28nm)
595 return ERR_PTR(-ENOMEM);
596
597 pll_28nm->pdev = pdev;
598 pll_28nm->id = id;
599
600 pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
601 if (IS_ERR_OR_NULL(pll_28nm->mmio)) {
602 DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);
603 return ERR_PTR(-ENOMEM);
604 }
605
606 pll = &pll_28nm->base;
607 pll->min_rate = VCO_MIN_RATE;
608 pll->max_rate = VCO_MAX_RATE;
609 pll->get_provider = dsi_pll_28nm_get_provider;
610 pll->destroy = dsi_pll_28nm_destroy;
611 pll->disable_seq = dsi_pll_28nm_disable_seq;
612 pll->save_state = dsi_pll_28nm_save_state;
613 pll->restore_state = dsi_pll_28nm_restore_state;
614
615 if (type == MSM_DSI_PHY_28NM_HPM) {
616 pll_28nm->vco_delay = 1;
617
618 pll->en_seq_cnt = 3;
619 pll->enable_seqs[0] = dsi_pll_28nm_enable_seq_hpm;
620 pll->enable_seqs[1] = dsi_pll_28nm_enable_seq_hpm;
621 pll->enable_seqs[2] = dsi_pll_28nm_enable_seq_hpm;
622 } else if (type == MSM_DSI_PHY_28NM_LP) {
623 pll_28nm->vco_delay = 1000;
624
625 pll->en_seq_cnt = 1;
626 pll->enable_seqs[0] = dsi_pll_28nm_enable_seq_lp;
627 } else {
628 DRM_DEV_ERROR(&pdev->dev, "phy type (%d) is not 28nm\n", type);
629 return ERR_PTR(-EINVAL);
630 }
631
632 ret = pll_28nm_register(pll_28nm);
633 if (ret) {
634 DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
635 return ERR_PTR(ret);
636 }
637
638 return pll;
639 }
640