This source file includes following definitions.
- brcmstb_init_sram
- do_bsp_initiate_command
- brcmstb_pm_handshake
- shimphy_set
- ddr_ctrl_set
- s3entry_method0
- s3entry_method1
- s5entry_method1
- brcmstb_do_pmsm_power_down
- brcmstb_pm_poweroff
- brcmstb_pm_copy_to_sram
- brcmstb_pm_s2
- brcmstb_pm_s3_finish
- brcmstb_pm_do_s3
- brcmstb_pm_s3
- brcmstb_pm_standby
- brcmstb_pm_enter
- brcmstb_pm_valid
- brcmstb_ioremap_match
- brcmstb_pm_panic_notify
- brcmstb_pm_probe
- brcmstb_pm_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #define pr_fmt(fmt) "brcmstb-pm: " fmt
15
16 #include <linux/bitops.h>
17 #include <linux/compiler.h>
18 #include <linux/delay.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/err.h>
21 #include <linux/init.h>
22 #include <linux/io.h>
23 #include <linux/ioport.h>
24 #include <linux/kconfig.h>
25 #include <linux/kernel.h>
26 #include <linux/memblock.h>
27 #include <linux/module.h>
28 #include <linux/notifier.h>
29 #include <linux/of.h>
30 #include <linux/of_address.h>
31 #include <linux/platform_device.h>
32 #include <linux/pm.h>
33 #include <linux/printk.h>
34 #include <linux/proc_fs.h>
35 #include <linux/sizes.h>
36 #include <linux/slab.h>
37 #include <linux/sort.h>
38 #include <linux/suspend.h>
39 #include <linux/types.h>
40 #include <linux/uaccess.h>
41 #include <linux/soc/brcmstb/brcmstb.h>
42
43 #include <asm/fncpy.h>
44 #include <asm/setup.h>
45 #include <asm/suspend.h>
46
47 #include "pm.h"
48 #include "aon_defs.h"
49
50 #define SHIMPHY_DDR_PAD_CNTRL 0x8c
51
52
53 #define SHIMPHY_PAD_PLL_SEQUENCE BIT(8)
54 #define SHIMPHY_PAD_GATE_PLL_S3 BIT(9)
55
56
57 #define PWRDWN_SEQ_NO_SEQUENCING 0
58 #define PWRDWN_SEQ_HOLD_CHANNEL 1
59 #define PWRDWN_SEQ_RESET_PLL 2
60 #define PWRDWN_SEQ_POWERDOWN_PLL 3
61
62 #define SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK 0x00f00000
63 #define SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT 20
64
65 #define DDR_FORCE_CKE_RST_N BIT(3)
66 #define DDR_PHY_RST_N BIT(2)
67 #define DDR_PHY_CKE BIT(1)
68
69 #define DDR_PHY_NO_CHANNEL 0xffffffff
70
71 #define MAX_NUM_MEMC 3
72
73 struct brcmstb_memc {
74 void __iomem *ddr_phy_base;
75 void __iomem *ddr_shimphy_base;
76 void __iomem *ddr_ctrl;
77 };
78
79 struct brcmstb_pm_control {
80 void __iomem *aon_ctrl_base;
81 void __iomem *aon_sram;
82 struct brcmstb_memc memcs[MAX_NUM_MEMC];
83
84 void __iomem *boot_sram;
85 size_t boot_sram_len;
86
87 bool support_warm_boot;
88 size_t pll_status_offset;
89 int num_memc;
90
91 struct brcmstb_s3_params *s3_params;
92 dma_addr_t s3_params_pa;
93 int s3entry_method;
94 u32 warm_boot_offset;
95 u32 phy_a_standby_ctrl_offs;
96 u32 phy_b_standby_ctrl_offs;
97 bool needs_ddr_pad;
98 struct platform_device *pdev;
99 };
100
101 enum bsp_initiate_command {
102 BSP_CLOCK_STOP = 0x00,
103 BSP_GEN_RANDOM_KEY = 0x4A,
104 BSP_RESTORE_RANDOM_KEY = 0x55,
105 BSP_GEN_FIXED_KEY = 0x63,
106 };
107
108 #define PM_INITIATE 0x01
109 #define PM_INITIATE_SUCCESS 0x00
110 #define PM_INITIATE_FAIL 0xfe
111
112 static struct brcmstb_pm_control ctrl;
113
114 static int (*brcmstb_pm_do_s2_sram)(void __iomem *aon_ctrl_base,
115 void __iomem *ddr_phy_pll_status);
116
117 static int brcmstb_init_sram(struct device_node *dn)
118 {
119 void __iomem *sram;
120 struct resource res;
121 int ret;
122
123 ret = of_address_to_resource(dn, 0, &res);
124 if (ret)
125 return ret;
126
127
128 sram = __arm_ioremap_exec(res.start, resource_size(&res), false);
129 if (!sram)
130 return -ENOMEM;
131
132 ctrl.boot_sram = sram;
133 ctrl.boot_sram_len = resource_size(&res);
134
135 return 0;
136 }
137
138 static const struct of_device_id sram_dt_ids[] = {
139 { .compatible = "mmio-sram" },
140 { }
141 };
142
143 static int do_bsp_initiate_command(enum bsp_initiate_command cmd)
144 {
145 void __iomem *base = ctrl.aon_ctrl_base;
146 int ret;
147 int timeo = 1000 * 1000;
148
149 writel_relaxed(0, base + AON_CTRL_PM_INITIATE);
150 (void)readl_relaxed(base + AON_CTRL_PM_INITIATE);
151
152
153 writel_relaxed((cmd << 1) | PM_INITIATE, base + AON_CTRL_PM_INITIATE);
154
155
156
157
158
159 if (of_machine_is_compatible("brcm,bcm74371a0")) {
160 (void)readl_relaxed(base + AON_CTRL_PM_INITIATE);
161 mdelay(10);
162 return 0;
163 }
164
165 for (;;) {
166 ret = readl_relaxed(base + AON_CTRL_PM_INITIATE);
167 if (!(ret & PM_INITIATE))
168 break;
169 if (timeo <= 0) {
170 pr_err("error: timeout waiting for BSP (%x)\n", ret);
171 break;
172 }
173 timeo -= 50;
174 udelay(50);
175 }
176
177 return (ret & 0xff) != PM_INITIATE_SUCCESS;
178 }
179
180 static int brcmstb_pm_handshake(void)
181 {
182 void __iomem *base = ctrl.aon_ctrl_base;
183 u32 tmp;
184 int ret;
185
186
187 tmp = readl_relaxed(base + AON_CTRL_HOST_MISC_CMDS);
188 tmp &= ~1UL;
189 writel_relaxed(tmp, base + AON_CTRL_HOST_MISC_CMDS);
190 (void)readl_relaxed(base + AON_CTRL_HOST_MISC_CMDS);
191
192 ret = do_bsp_initiate_command(BSP_CLOCK_STOP);
193 if (ret)
194 pr_err("BSP handshake failed\n");
195
196
197
198
199
200 mdelay(3);
201
202 return ret;
203 }
204
205 static inline void shimphy_set(u32 value, u32 mask)
206 {
207 int i;
208
209 if (!ctrl.needs_ddr_pad)
210 return;
211
212 for (i = 0; i < ctrl.num_memc; i++) {
213 u32 tmp;
214
215 tmp = readl_relaxed(ctrl.memcs[i].ddr_shimphy_base +
216 SHIMPHY_DDR_PAD_CNTRL);
217 tmp = value | (tmp & mask);
218 writel_relaxed(tmp, ctrl.memcs[i].ddr_shimphy_base +
219 SHIMPHY_DDR_PAD_CNTRL);
220 }
221 wmb();
222 }
223
224 static inline void ddr_ctrl_set(bool warmboot)
225 {
226 int i;
227
228 for (i = 0; i < ctrl.num_memc; i++) {
229 u32 tmp;
230
231 tmp = readl_relaxed(ctrl.memcs[i].ddr_ctrl +
232 ctrl.warm_boot_offset);
233 if (warmboot)
234 tmp |= 1;
235 else
236 tmp &= ~1;
237 writel_relaxed(tmp, ctrl.memcs[i].ddr_ctrl +
238 ctrl.warm_boot_offset);
239 }
240
241 wmb();
242 }
243
244 static inline void s3entry_method0(void)
245 {
246 shimphy_set(SHIMPHY_PAD_GATE_PLL_S3 | SHIMPHY_PAD_PLL_SEQUENCE,
247 0xffffffff);
248 }
249
250 static inline void s3entry_method1(void)
251 {
252
253
254
255
256
257
258 shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
259 SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
260 ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
261
262 ddr_ctrl_set(true);
263 }
264
265 static inline void s5entry_method1(void)
266 {
267 int i;
268
269
270
271
272
273
274
275
276
277 shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
278 SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
279 ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
280
281 ddr_ctrl_set(false);
282
283 for (i = 0; i < ctrl.num_memc; i++) {
284 u32 tmp;
285
286
287 tmp = readl_relaxed(ctrl.memcs[i].ddr_phy_base +
288 ctrl.phy_a_standby_ctrl_offs);
289 tmp &= ~(DDR_PHY_RST_N | DDR_PHY_RST_N);
290 writel_relaxed(tmp, ctrl.memcs[i].ddr_phy_base +
291 ctrl.phy_a_standby_ctrl_offs);
292
293
294 if (ctrl.phy_b_standby_ctrl_offs != DDR_PHY_NO_CHANNEL) {
295 tmp = readl_relaxed(ctrl.memcs[i].ddr_phy_base +
296 ctrl.phy_b_standby_ctrl_offs);
297 tmp &= ~(DDR_PHY_RST_N | DDR_PHY_RST_N);
298 writel_relaxed(tmp, ctrl.memcs[i].ddr_phy_base +
299 ctrl.phy_b_standby_ctrl_offs);
300 }
301 }
302
303 wmb();
304 }
305
306
307
308
309
310 static void brcmstb_do_pmsm_power_down(unsigned long base_cmd, bool onewrite)
311 {
312 void __iomem *base = ctrl.aon_ctrl_base;
313
314 if ((ctrl.s3entry_method == 1) && (base_cmd == PM_COLD_CONFIG))
315 s5entry_method1();
316
317
318 writel_relaxed(base_cmd, base + AON_CTRL_PM_CTRL);
319
320 if (!onewrite) {
321 (void)readl_relaxed(base + AON_CTRL_PM_CTRL);
322
323 writel_relaxed(base_cmd | PM_PWR_DOWN, base + AON_CTRL_PM_CTRL);
324 (void)readl_relaxed(base + AON_CTRL_PM_CTRL);
325 }
326 wfi();
327 }
328
329
330 static void brcmstb_pm_poweroff(void)
331 {
332 brcmstb_pm_handshake();
333
334
335 writel_relaxed(0, ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
336 (void)readl_relaxed(ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
337
338
339 writel_relaxed(0x10, ctrl.aon_ctrl_base + AON_CTRL_PM_CPU_WAIT_COUNT);
340 (void)readl_relaxed(ctrl.aon_ctrl_base + AON_CTRL_PM_CPU_WAIT_COUNT);
341
342 if (ctrl.s3entry_method == 1) {
343 shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
344 SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
345 ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
346 ddr_ctrl_set(false);
347 brcmstb_do_pmsm_power_down(M1_PM_COLD_CONFIG, true);
348 return;
349 }
350
351 brcmstb_do_pmsm_power_down(PM_COLD_CONFIG, false);
352 }
353
354 static void *brcmstb_pm_copy_to_sram(void *fn, size_t len)
355 {
356 unsigned int size = ALIGN(len, FNCPY_ALIGN);
357
358 if (ctrl.boot_sram_len < size) {
359 pr_err("standby code will not fit in SRAM\n");
360 return NULL;
361 }
362
363 return fncpy(ctrl.boot_sram, fn, size);
364 }
365
366
367
368
369
370 static int brcmstb_pm_s2(void)
371 {
372
373 if (ctrl.s3entry_method == 1) {
374 shimphy_set((PWRDWN_SEQ_NO_SEQUENCING <<
375 SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
376 ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
377 ddr_ctrl_set(false);
378 }
379
380 brcmstb_pm_do_s2_sram = brcmstb_pm_copy_to_sram(&brcmstb_pm_do_s2,
381 brcmstb_pm_do_s2_sz);
382 if (!brcmstb_pm_do_s2_sram)
383 return -EINVAL;
384
385 return brcmstb_pm_do_s2_sram(ctrl.aon_ctrl_base,
386 ctrl.memcs[0].ddr_phy_base +
387 ctrl.pll_status_offset);
388 }
389
390
391
392
393
394
395 noinline int brcmstb_pm_s3_finish(void)
396 {
397 struct brcmstb_s3_params *params = ctrl.s3_params;
398 dma_addr_t params_pa = ctrl.s3_params_pa;
399 phys_addr_t reentry = virt_to_phys(&cpu_resume_arm);
400 enum bsp_initiate_command cmd;
401 u32 flags;
402
403
404
405
406
407
408 memset(params, 0, sizeof(*params) - sizeof(params->dtu));
409
410 flags = readl_relaxed(ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
411
412 flags &= S3_BOOTLOADER_RESERVED;
413 flags |= S3_FLAG_NO_MEM_VERIFY;
414 flags |= S3_FLAG_LOAD_RANDKEY;
415
416
417 if (flags & S3_FLAG_LOAD_RANDKEY)
418 cmd = BSP_GEN_RANDOM_KEY;
419 else
420 cmd = BSP_GEN_FIXED_KEY;
421 if (do_bsp_initiate_command(cmd)) {
422 pr_info("key loading failed\n");
423 return -EIO;
424 }
425
426 params->magic = BRCMSTB_S3_MAGIC;
427 params->reentry = reentry;
428
429
430 flush_cache_all();
431
432 flags |= BRCMSTB_S3_MAGIC_SHORT;
433
434 writel_relaxed(flags, ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
435 writel_relaxed(lower_32_bits(params_pa),
436 ctrl.aon_sram + AON_REG_CONTROL_LOW);
437 writel_relaxed(upper_32_bits(params_pa),
438 ctrl.aon_sram + AON_REG_CONTROL_HIGH);
439
440 switch (ctrl.s3entry_method) {
441 case 0:
442 s3entry_method0();
443 brcmstb_do_pmsm_power_down(PM_WARM_CONFIG, false);
444 break;
445 case 1:
446 s3entry_method1();
447 brcmstb_do_pmsm_power_down(M1_PM_WARM_CONFIG, true);
448 break;
449 default:
450 return -EINVAL;
451 }
452
453
454 return -EINTR;
455 }
456
457 static int brcmstb_pm_do_s3(unsigned long sp)
458 {
459 unsigned long save_sp;
460 int ret;
461
462 asm volatile (
463 "mov %[save], sp\n"
464 "mov sp, %[new]\n"
465 "bl brcmstb_pm_s3_finish\n"
466 "mov %[ret], r0\n"
467 "mov %[new], sp\n"
468 "mov sp, %[save]\n"
469 : [save] "=&r" (save_sp), [ret] "=&r" (ret)
470 : [new] "r" (sp)
471 );
472
473 return ret;
474 }
475
476 static int brcmstb_pm_s3(void)
477 {
478 void __iomem *sp = ctrl.boot_sram + ctrl.boot_sram_len;
479
480 return cpu_suspend((unsigned long)sp, brcmstb_pm_do_s3);
481 }
482
483 static int brcmstb_pm_standby(bool deep_standby)
484 {
485 int ret;
486
487 if (brcmstb_pm_handshake())
488 return -EIO;
489
490 if (deep_standby)
491 ret = brcmstb_pm_s3();
492 else
493 ret = brcmstb_pm_s2();
494 if (ret)
495 pr_err("%s: standby failed\n", __func__);
496
497 return ret;
498 }
499
500 static int brcmstb_pm_enter(suspend_state_t state)
501 {
502 int ret = -EINVAL;
503
504 switch (state) {
505 case PM_SUSPEND_STANDBY:
506 ret = brcmstb_pm_standby(false);
507 break;
508 case PM_SUSPEND_MEM:
509 ret = brcmstb_pm_standby(true);
510 break;
511 }
512
513 return ret;
514 }
515
516 static int brcmstb_pm_valid(suspend_state_t state)
517 {
518 switch (state) {
519 case PM_SUSPEND_STANDBY:
520 return true;
521 case PM_SUSPEND_MEM:
522 return ctrl.support_warm_boot;
523 default:
524 return false;
525 }
526 }
527
528 static const struct platform_suspend_ops brcmstb_pm_ops = {
529 .enter = brcmstb_pm_enter,
530 .valid = brcmstb_pm_valid,
531 };
532
533 static const struct of_device_id aon_ctrl_dt_ids[] = {
534 { .compatible = "brcm,brcmstb-aon-ctrl" },
535 {}
536 };
537
538 struct ddr_phy_ofdata {
539 bool supports_warm_boot;
540 size_t pll_status_offset;
541 int s3entry_method;
542 u32 warm_boot_offset;
543 u32 phy_a_standby_ctrl_offs;
544 u32 phy_b_standby_ctrl_offs;
545 };
546
547 static struct ddr_phy_ofdata ddr_phy_71_1 = {
548 .supports_warm_boot = true,
549 .pll_status_offset = 0x0c,
550 .s3entry_method = 1,
551 .warm_boot_offset = 0x2c,
552 .phy_a_standby_ctrl_offs = 0x198,
553 .phy_b_standby_ctrl_offs = DDR_PHY_NO_CHANNEL
554 };
555
556 static struct ddr_phy_ofdata ddr_phy_72_0 = {
557 .supports_warm_boot = true,
558 .pll_status_offset = 0x10,
559 .s3entry_method = 1,
560 .warm_boot_offset = 0x40,
561 .phy_a_standby_ctrl_offs = 0x2a4,
562 .phy_b_standby_ctrl_offs = 0x8a4
563 };
564
565 static struct ddr_phy_ofdata ddr_phy_225_1 = {
566 .supports_warm_boot = false,
567 .pll_status_offset = 0x4,
568 .s3entry_method = 0
569 };
570
571 static struct ddr_phy_ofdata ddr_phy_240_1 = {
572 .supports_warm_boot = true,
573 .pll_status_offset = 0x4,
574 .s3entry_method = 0
575 };
576
577 static const struct of_device_id ddr_phy_dt_ids[] = {
578 {
579 .compatible = "brcm,brcmstb-ddr-phy-v71.1",
580 .data = &ddr_phy_71_1,
581 },
582 {
583 .compatible = "brcm,brcmstb-ddr-phy-v72.0",
584 .data = &ddr_phy_72_0,
585 },
586 {
587 .compatible = "brcm,brcmstb-ddr-phy-v225.1",
588 .data = &ddr_phy_225_1,
589 },
590 {
591 .compatible = "brcm,brcmstb-ddr-phy-v240.1",
592 .data = &ddr_phy_240_1,
593 },
594 {
595
596 .compatible = "brcm,brcmstb-ddr-phy-v240.2",
597 .data = &ddr_phy_240_1,
598 },
599 {}
600 };
601
602 struct ddr_seq_ofdata {
603 bool needs_ddr_pad;
604 u32 warm_boot_offset;
605 };
606
607 static const struct ddr_seq_ofdata ddr_seq_b22 = {
608 .needs_ddr_pad = false,
609 .warm_boot_offset = 0x2c,
610 };
611
612 static const struct ddr_seq_ofdata ddr_seq = {
613 .needs_ddr_pad = true,
614 };
615
616 static const struct of_device_id ddr_shimphy_dt_ids[] = {
617 { .compatible = "brcm,brcmstb-ddr-shimphy-v1.0" },
618 {}
619 };
620
621 static const struct of_device_id brcmstb_memc_of_match[] = {
622 {
623 .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.1",
624 .data = &ddr_seq,
625 },
626 {
627 .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.2",
628 .data = &ddr_seq_b22,
629 },
630 {
631 .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.3",
632 .data = &ddr_seq_b22,
633 },
634 {
635 .compatible = "brcm,brcmstb-memc-ddr-rev-b.3.0",
636 .data = &ddr_seq_b22,
637 },
638 {
639 .compatible = "brcm,brcmstb-memc-ddr-rev-b.3.1",
640 .data = &ddr_seq_b22,
641 },
642 {
643 .compatible = "brcm,brcmstb-memc-ddr",
644 .data = &ddr_seq,
645 },
646 {},
647 };
648
649 static void __iomem *brcmstb_ioremap_match(const struct of_device_id *matches,
650 int index, const void **ofdata)
651 {
652 struct device_node *dn;
653 const struct of_device_id *match;
654
655 dn = of_find_matching_node_and_match(NULL, matches, &match);
656 if (!dn)
657 return ERR_PTR(-EINVAL);
658
659 if (ofdata)
660 *ofdata = match->data;
661
662 return of_io_request_and_map(dn, index, dn->full_name);
663 }
664
665 static int brcmstb_pm_panic_notify(struct notifier_block *nb,
666 unsigned long action, void *data)
667 {
668 writel_relaxed(BRCMSTB_PANIC_MAGIC, ctrl.aon_sram + AON_REG_PANIC);
669
670 return NOTIFY_DONE;
671 }
672
673 static struct notifier_block brcmstb_pm_panic_nb = {
674 .notifier_call = brcmstb_pm_panic_notify,
675 };
676
677 static int brcmstb_pm_probe(struct platform_device *pdev)
678 {
679 const struct ddr_phy_ofdata *ddr_phy_data;
680 const struct ddr_seq_ofdata *ddr_seq_data;
681 const struct of_device_id *of_id = NULL;
682 struct device_node *dn;
683 void __iomem *base;
684 int ret, i;
685
686
687 base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 0, NULL);
688 if (IS_ERR(base)) {
689 pr_err("error mapping AON_CTRL\n");
690 return PTR_ERR(base);
691 }
692 ctrl.aon_ctrl_base = base;
693
694
695 base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 1, NULL);
696 if (IS_ERR(base)) {
697
698 ctrl.aon_sram = ctrl.aon_ctrl_base +
699 AON_CTRL_SYSTEM_DATA_RAM_OFS;
700 } else {
701 ctrl.aon_sram = base;
702 }
703
704 writel_relaxed(0, ctrl.aon_sram + AON_REG_PANIC);
705
706
707 base = brcmstb_ioremap_match(ddr_phy_dt_ids, 0,
708 (const void **)&ddr_phy_data);
709 if (IS_ERR(base)) {
710 pr_err("error mapping DDR PHY\n");
711 return PTR_ERR(base);
712 }
713 ctrl.support_warm_boot = ddr_phy_data->supports_warm_boot;
714 ctrl.pll_status_offset = ddr_phy_data->pll_status_offset;
715
716 ctrl.memcs[0].ddr_phy_base = base;
717 ctrl.s3entry_method = ddr_phy_data->s3entry_method;
718 ctrl.phy_a_standby_ctrl_offs = ddr_phy_data->phy_a_standby_ctrl_offs;
719 ctrl.phy_b_standby_ctrl_offs = ddr_phy_data->phy_b_standby_ctrl_offs;
720
721
722
723
724
725 ctrl.warm_boot_offset = ddr_phy_data->warm_boot_offset;
726
727
728 for_each_matching_node(dn, ddr_shimphy_dt_ids) {
729 i = ctrl.num_memc;
730 if (i >= MAX_NUM_MEMC) {
731 pr_warn("too many MEMCs (max %d)\n", MAX_NUM_MEMC);
732 break;
733 }
734
735 base = of_io_request_and_map(dn, 0, dn->full_name);
736 if (IS_ERR(base)) {
737 if (!ctrl.support_warm_boot)
738 break;
739
740 pr_err("error mapping DDR SHIMPHY %d\n", i);
741 return PTR_ERR(base);
742 }
743 ctrl.memcs[i].ddr_shimphy_base = base;
744 ctrl.num_memc++;
745 }
746
747
748 i = 0;
749 for_each_matching_node(dn, brcmstb_memc_of_match) {
750 base = of_iomap(dn, 0);
751 if (!base) {
752 pr_err("error mapping DDR Sequencer %d\n", i);
753 return -ENOMEM;
754 }
755
756 of_id = of_match_node(brcmstb_memc_of_match, dn);
757 if (!of_id) {
758 iounmap(base);
759 return -EINVAL;
760 }
761
762 ddr_seq_data = of_id->data;
763 ctrl.needs_ddr_pad = ddr_seq_data->needs_ddr_pad;
764
765 if (ddr_seq_data->warm_boot_offset)
766 ctrl.warm_boot_offset = ddr_seq_data->warm_boot_offset;
767
768 ctrl.memcs[i].ddr_ctrl = base;
769 i++;
770 }
771
772 pr_debug("PM: supports warm boot:%d, method:%d, wboffs:%x\n",
773 ctrl.support_warm_boot, ctrl.s3entry_method,
774 ctrl.warm_boot_offset);
775
776 dn = of_find_matching_node(NULL, sram_dt_ids);
777 if (!dn) {
778 pr_err("SRAM not found\n");
779 return -EINVAL;
780 }
781
782 ret = brcmstb_init_sram(dn);
783 if (ret) {
784 pr_err("error setting up SRAM for PM\n");
785 return ret;
786 }
787
788 ctrl.pdev = pdev;
789
790 ctrl.s3_params = kmalloc(sizeof(*ctrl.s3_params), GFP_KERNEL);
791 if (!ctrl.s3_params)
792 return -ENOMEM;
793 ctrl.s3_params_pa = dma_map_single(&pdev->dev, ctrl.s3_params,
794 sizeof(*ctrl.s3_params),
795 DMA_TO_DEVICE);
796 if (dma_mapping_error(&pdev->dev, ctrl.s3_params_pa)) {
797 pr_err("error mapping DMA memory\n");
798 ret = -ENOMEM;
799 goto out;
800 }
801
802 atomic_notifier_chain_register(&panic_notifier_list,
803 &brcmstb_pm_panic_nb);
804
805 pm_power_off = brcmstb_pm_poweroff;
806 suspend_set_ops(&brcmstb_pm_ops);
807
808 return 0;
809
810 out:
811 kfree(ctrl.s3_params);
812
813 pr_warn("PM: initialization failed with code %d\n", ret);
814
815 return ret;
816 }
817
818 static struct platform_driver brcmstb_pm_driver = {
819 .driver = {
820 .name = "brcmstb-pm",
821 .of_match_table = aon_ctrl_dt_ids,
822 },
823 };
824
825 static int __init brcmstb_pm_init(void)
826 {
827 return platform_driver_probe(&brcmstb_pm_driver,
828 brcmstb_pm_probe);
829 }
830 module_init(brcmstb_pm_init);