This source file includes following definitions.
- ufshcd_parse_clock_info
- ufshcd_populate_vreg
- ufshcd_parse_regulator_info
- ufshcd_pltfrm_suspend
- ufshcd_pltfrm_resume
- ufshcd_pltfrm_runtime_suspend
- ufshcd_pltfrm_runtime_resume
- ufshcd_pltfrm_runtime_idle
- ufshcd_pltfrm_shutdown
- ufshcd_init_lanes_per_dir
- ufshcd_get_pwr_dev_param
- ufshcd_pltfrm_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36 #include <linux/platform_device.h>
37 #include <linux/pm_runtime.h>
38 #include <linux/of.h>
39
40 #include "ufshcd.h"
41 #include "ufshcd-pltfrm.h"
42 #include "unipro.h"
43
44 #define UFSHCD_DEFAULT_LANES_PER_DIRECTION 2
45
46 static int ufshcd_parse_clock_info(struct ufs_hba *hba)
47 {
48 int ret = 0;
49 int cnt;
50 int i;
51 struct device *dev = hba->dev;
52 struct device_node *np = dev->of_node;
53 char *name;
54 u32 *clkfreq = NULL;
55 struct ufs_clk_info *clki;
56 int len = 0;
57 size_t sz = 0;
58
59 if (!np)
60 goto out;
61
62 cnt = of_property_count_strings(np, "clock-names");
63 if (!cnt || (cnt == -EINVAL)) {
64 dev_info(dev, "%s: Unable to find clocks, assuming enabled\n",
65 __func__);
66 } else if (cnt < 0) {
67 dev_err(dev, "%s: count clock strings failed, err %d\n",
68 __func__, cnt);
69 ret = cnt;
70 }
71
72 if (cnt <= 0)
73 goto out;
74
75 if (!of_get_property(np, "freq-table-hz", &len)) {
76 dev_info(dev, "freq-table-hz property not specified\n");
77 goto out;
78 }
79
80 if (len <= 0)
81 goto out;
82
83 sz = len / sizeof(*clkfreq);
84 if (sz != 2 * cnt) {
85 dev_err(dev, "%s len mismatch\n", "freq-table-hz");
86 ret = -EINVAL;
87 goto out;
88 }
89
90 clkfreq = devm_kcalloc(dev, sz, sizeof(*clkfreq),
91 GFP_KERNEL);
92 if (!clkfreq) {
93 ret = -ENOMEM;
94 goto out;
95 }
96
97 ret = of_property_read_u32_array(np, "freq-table-hz",
98 clkfreq, sz);
99 if (ret && (ret != -EINVAL)) {
100 dev_err(dev, "%s: error reading array %d\n",
101 "freq-table-hz", ret);
102 return ret;
103 }
104
105 for (i = 0; i < sz; i += 2) {
106 ret = of_property_read_string_index(np,
107 "clock-names", i/2, (const char **)&name);
108 if (ret)
109 goto out;
110
111 clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL);
112 if (!clki) {
113 ret = -ENOMEM;
114 goto out;
115 }
116
117 clki->min_freq = clkfreq[i];
118 clki->max_freq = clkfreq[i+1];
119 clki->name = kstrdup(name, GFP_KERNEL);
120 dev_dbg(dev, "%s: min %u max %u name %s\n", "freq-table-hz",
121 clki->min_freq, clki->max_freq, clki->name);
122 list_add_tail(&clki->list, &hba->clk_list_head);
123 }
124 out:
125 return ret;
126 }
127
128 #define MAX_PROP_SIZE 32
129 static int ufshcd_populate_vreg(struct device *dev, const char *name,
130 struct ufs_vreg **out_vreg)
131 {
132 int ret = 0;
133 char prop_name[MAX_PROP_SIZE];
134 struct ufs_vreg *vreg = NULL;
135 struct device_node *np = dev->of_node;
136
137 if (!np) {
138 dev_err(dev, "%s: non DT initialization\n", __func__);
139 goto out;
140 }
141
142 snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name);
143 if (!of_parse_phandle(np, prop_name, 0)) {
144 dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n",
145 __func__, prop_name);
146 goto out;
147 }
148
149 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
150 if (!vreg)
151 return -ENOMEM;
152
153 vreg->name = kstrdup(name, GFP_KERNEL);
154
155 snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
156 if (of_property_read_u32(np, prop_name, &vreg->max_uA)) {
157 dev_info(dev, "%s: unable to find %s\n", __func__, prop_name);
158 vreg->max_uA = 0;
159 }
160
161 if (!strcmp(name, "vcc")) {
162 if (of_property_read_bool(np, "vcc-supply-1p8")) {
163 vreg->min_uV = UFS_VREG_VCC_1P8_MIN_UV;
164 vreg->max_uV = UFS_VREG_VCC_1P8_MAX_UV;
165 } else {
166 vreg->min_uV = UFS_VREG_VCC_MIN_UV;
167 vreg->max_uV = UFS_VREG_VCC_MAX_UV;
168 }
169 } else if (!strcmp(name, "vccq")) {
170 vreg->min_uV = UFS_VREG_VCCQ_MIN_UV;
171 vreg->max_uV = UFS_VREG_VCCQ_MAX_UV;
172 } else if (!strcmp(name, "vccq2")) {
173 vreg->min_uV = UFS_VREG_VCCQ2_MIN_UV;
174 vreg->max_uV = UFS_VREG_VCCQ2_MAX_UV;
175 }
176
177 goto out;
178
179 out:
180 if (!ret)
181 *out_vreg = vreg;
182 return ret;
183 }
184
185
186
187
188
189
190
191
192
193
194 static int ufshcd_parse_regulator_info(struct ufs_hba *hba)
195 {
196 int err;
197 struct device *dev = hba->dev;
198 struct ufs_vreg_info *info = &hba->vreg_info;
199
200 err = ufshcd_populate_vreg(dev, "vdd-hba", &info->vdd_hba);
201 if (err)
202 goto out;
203
204 err = ufshcd_populate_vreg(dev, "vcc", &info->vcc);
205 if (err)
206 goto out;
207
208 err = ufshcd_populate_vreg(dev, "vccq", &info->vccq);
209 if (err)
210 goto out;
211
212 err = ufshcd_populate_vreg(dev, "vccq2", &info->vccq2);
213 out:
214 return err;
215 }
216
217 #ifdef CONFIG_PM
218
219
220
221
222
223
224
225 int ufshcd_pltfrm_suspend(struct device *dev)
226 {
227 return ufshcd_system_suspend(dev_get_drvdata(dev));
228 }
229 EXPORT_SYMBOL_GPL(ufshcd_pltfrm_suspend);
230
231
232
233
234
235
236
237
238 int ufshcd_pltfrm_resume(struct device *dev)
239 {
240 return ufshcd_system_resume(dev_get_drvdata(dev));
241 }
242 EXPORT_SYMBOL_GPL(ufshcd_pltfrm_resume);
243
244 int ufshcd_pltfrm_runtime_suspend(struct device *dev)
245 {
246 return ufshcd_runtime_suspend(dev_get_drvdata(dev));
247 }
248 EXPORT_SYMBOL_GPL(ufshcd_pltfrm_runtime_suspend);
249
250 int ufshcd_pltfrm_runtime_resume(struct device *dev)
251 {
252 return ufshcd_runtime_resume(dev_get_drvdata(dev));
253 }
254 EXPORT_SYMBOL_GPL(ufshcd_pltfrm_runtime_resume);
255
256 int ufshcd_pltfrm_runtime_idle(struct device *dev)
257 {
258 return ufshcd_runtime_idle(dev_get_drvdata(dev));
259 }
260 EXPORT_SYMBOL_GPL(ufshcd_pltfrm_runtime_idle);
261
262 #endif
263
264 void ufshcd_pltfrm_shutdown(struct platform_device *pdev)
265 {
266 ufshcd_shutdown((struct ufs_hba *)platform_get_drvdata(pdev));
267 }
268 EXPORT_SYMBOL_GPL(ufshcd_pltfrm_shutdown);
269
270 static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
271 {
272 struct device *dev = hba->dev;
273 int ret;
274
275 ret = of_property_read_u32(dev->of_node, "lanes-per-direction",
276 &hba->lanes_per_direction);
277 if (ret) {
278 dev_dbg(hba->dev,
279 "%s: failed to read lanes-per-direction, ret=%d\n",
280 __func__, ret);
281 hba->lanes_per_direction = UFSHCD_DEFAULT_LANES_PER_DIRECTION;
282 }
283 }
284
285
286
287
288
289
290
291
292
293
294 int ufshcd_get_pwr_dev_param(struct ufs_dev_params *pltfrm_param,
295 struct ufs_pa_layer_attr *dev_max,
296 struct ufs_pa_layer_attr *agreed_pwr)
297 {
298 int min_pltfrm_gear;
299 int min_dev_gear;
300 bool is_dev_sup_hs = false;
301 bool is_pltfrm_max_hs = false;
302
303 if (dev_max->pwr_rx == FAST_MODE)
304 is_dev_sup_hs = true;
305
306 if (pltfrm_param->desired_working_mode == UFS_HS_MODE) {
307 is_pltfrm_max_hs = true;
308 min_pltfrm_gear = min_t(u32, pltfrm_param->hs_rx_gear,
309 pltfrm_param->hs_tx_gear);
310 } else {
311 min_pltfrm_gear = min_t(u32, pltfrm_param->pwm_rx_gear,
312 pltfrm_param->pwm_tx_gear);
313 }
314
315
316
317
318
319
320 if (!is_dev_sup_hs && is_pltfrm_max_hs) {
321 pr_info("%s: device doesn't support HS\n",
322 __func__);
323 return -ENOTSUPP;
324 } else if (is_dev_sup_hs && is_pltfrm_max_hs) {
325
326
327
328
329
330
331 agreed_pwr->pwr_rx = pltfrm_param->rx_pwr_hs;
332 agreed_pwr->pwr_tx = agreed_pwr->pwr_rx;
333 } else {
334
335
336
337
338
339
340 agreed_pwr->pwr_rx = pltfrm_param->rx_pwr_pwm;
341 agreed_pwr->pwr_tx = agreed_pwr->pwr_rx;
342 }
343
344
345
346
347
348
349 agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
350 pltfrm_param->tx_lanes);
351 agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
352 pltfrm_param->rx_lanes);
353
354
355 min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
356
357
358
359
360
361
362
363
364
365 if ((is_dev_sup_hs && is_pltfrm_max_hs) ||
366 (!is_dev_sup_hs && !is_pltfrm_max_hs)) {
367 agreed_pwr->gear_rx =
368 min_t(u32, min_dev_gear, min_pltfrm_gear);
369 } else if (!is_dev_sup_hs) {
370 agreed_pwr->gear_rx = min_dev_gear;
371 } else {
372 agreed_pwr->gear_rx = min_pltfrm_gear;
373 }
374 agreed_pwr->gear_tx = agreed_pwr->gear_rx;
375
376 agreed_pwr->hs_rate = pltfrm_param->hs_rate;
377
378 return 0;
379 }
380 EXPORT_SYMBOL_GPL(ufshcd_get_pwr_dev_param);
381
382
383
384
385
386
387
388
389 int ufshcd_pltfrm_init(struct platform_device *pdev,
390 const struct ufs_hba_variant_ops *vops)
391 {
392 struct ufs_hba *hba;
393 void __iomem *mmio_base;
394 int irq, err;
395 struct device *dev = &pdev->dev;
396
397 mmio_base = devm_platform_ioremap_resource(pdev, 0);
398 if (IS_ERR(mmio_base)) {
399 err = PTR_ERR(mmio_base);
400 goto out;
401 }
402
403 irq = platform_get_irq(pdev, 0);
404 if (irq < 0) {
405 dev_err(dev, "IRQ resource not available\n");
406 err = -ENODEV;
407 goto out;
408 }
409
410 err = ufshcd_alloc_host(dev, &hba);
411 if (err) {
412 dev_err(&pdev->dev, "Allocation failed\n");
413 goto out;
414 }
415
416 hba->vops = vops;
417
418 err = ufshcd_parse_clock_info(hba);
419 if (err) {
420 dev_err(&pdev->dev, "%s: clock parse failed %d\n",
421 __func__, err);
422 goto dealloc_host;
423 }
424 err = ufshcd_parse_regulator_info(hba);
425 if (err) {
426 dev_err(&pdev->dev, "%s: regulator init failed %d\n",
427 __func__, err);
428 goto dealloc_host;
429 }
430
431 ufshcd_init_lanes_per_dir(hba);
432
433 err = ufshcd_init(hba, mmio_base, irq);
434 if (err) {
435 dev_err(dev, "Initialization failed\n");
436 goto dealloc_host;
437 }
438
439 platform_set_drvdata(pdev, hba);
440
441 pm_runtime_set_active(&pdev->dev);
442 pm_runtime_enable(&pdev->dev);
443
444 return 0;
445
446 dealloc_host:
447 ufshcd_dealloc_host(hba);
448 out:
449 return err;
450 }
451 EXPORT_SYMBOL_GPL(ufshcd_pltfrm_init);
452
453 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
454 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
455 MODULE_DESCRIPTION("UFS host controller Platform bus based glue driver");
456 MODULE_LICENSE("GPL");
457 MODULE_VERSION(UFSHCD_DRIVER_VERSION);