This source file includes following definitions.
- pnv_save_sprs_for_deep_states
- pnv_get_supported_cpuidle_states
- pnv_fastsleep_workaround_apply
- show_fastsleep_workaround_applyonce
- store_fastsleep_workaround_applyonce
- atomic_start_thread_idle
- atomic_stop_thread_idle
- atomic_lock_thread_idle
- atomic_unlock_and_stop_thread_idle
- atomic_unlock_thread_idle
- power7_idle_insn
- power7_offline
- power7_idle_type
- power7_idle
- power9_idle_stop
- power9_offline_stop
- power9_idle_type
- power9_idle
- pnv_power9_force_smt4_catch
- pnv_power9_force_smt4_release
- pnv_program_cpu_hotplug_lpcr
- pnv_cpu_offline
- validate_psscr_val_mask
- pnv_power9_idle_init
- pnv_disable_deep_states
- pnv_probe_idle_states
- pnv_parse_cpuidle_dt
- pnv_init_idle_states
1
2
3
4
5
6
7
8 #include <linux/types.h>
9 #include <linux/mm.h>
10 #include <linux/slab.h>
11 #include <linux/of.h>
12 #include <linux/device.h>
13 #include <linux/cpu.h>
14
15 #include <asm/asm-prototypes.h>
16 #include <asm/firmware.h>
17 #include <asm/machdep.h>
18 #include <asm/opal.h>
19 #include <asm/cputhreads.h>
20 #include <asm/cpuidle.h>
21 #include <asm/code-patching.h>
22 #include <asm/smp.h>
23 #include <asm/runlatch.h>
24 #include <asm/dbell.h>
25
26 #include "powernv.h"
27 #include "subcore.h"
28
29
30 #define MAX_STOP_STATE 0xF
31
32 #define P9_STOP_SPR_MSR 2000
33 #define P9_STOP_SPR_PSSCR 855
34
35 static u32 supported_cpuidle_states;
36 struct pnv_idle_states_t *pnv_idle_states;
37 int nr_pnv_idle_states;
38
39
40
41
42
43 static u64 pnv_default_stop_val;
44 static u64 pnv_default_stop_mask;
45 static bool default_stop_found;
46
47
48
49
50 static u64 pnv_first_tb_loss_level = MAX_STOP_STATE + 1;
51 static u64 pnv_first_spr_loss_level = MAX_STOP_STATE + 1;
52
53
54
55
56
57 static u64 pnv_deepest_stop_psscr_val;
58 static u64 pnv_deepest_stop_psscr_mask;
59 static u64 pnv_deepest_stop_flag;
60 static bool deepest_stop_found;
61
62 static unsigned long power7_offline_type;
63
64 static int pnv_save_sprs_for_deep_states(void)
65 {
66 int cpu;
67 int rc;
68
69
70
71
72
73
74 uint64_t lpcr_val = mfspr(SPRN_LPCR);
75 uint64_t hid0_val = mfspr(SPRN_HID0);
76 uint64_t hid1_val = mfspr(SPRN_HID1);
77 uint64_t hid4_val = mfspr(SPRN_HID4);
78 uint64_t hid5_val = mfspr(SPRN_HID5);
79 uint64_t hmeer_val = mfspr(SPRN_HMEER);
80 uint64_t msr_val = MSR_IDLE;
81 uint64_t psscr_val = pnv_deepest_stop_psscr_val;
82
83 for_each_present_cpu(cpu) {
84 uint64_t pir = get_hard_smp_processor_id(cpu);
85 uint64_t hsprg0_val = (uint64_t)paca_ptrs[cpu];
86
87 rc = opal_slw_set_reg(pir, SPRN_HSPRG0, hsprg0_val);
88 if (rc != 0)
89 return rc;
90
91 rc = opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val);
92 if (rc != 0)
93 return rc;
94
95 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
96 rc = opal_slw_set_reg(pir, P9_STOP_SPR_MSR, msr_val);
97 if (rc)
98 return rc;
99
100 rc = opal_slw_set_reg(pir,
101 P9_STOP_SPR_PSSCR, psscr_val);
102
103 if (rc)
104 return rc;
105 }
106
107
108 if (cpu_thread_in_core(cpu) == 0) {
109
110 rc = opal_slw_set_reg(pir, SPRN_HMEER, hmeer_val);
111 if (rc != 0)
112 return rc;
113
114 rc = opal_slw_set_reg(pir, SPRN_HID0, hid0_val);
115 if (rc != 0)
116 return rc;
117
118
119 if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
120
121 rc = opal_slw_set_reg(pir, SPRN_HID1, hid1_val);
122 if (rc != 0)
123 return rc;
124
125 rc = opal_slw_set_reg(pir, SPRN_HID4, hid4_val);
126 if (rc != 0)
127 return rc;
128
129 rc = opal_slw_set_reg(pir, SPRN_HID5, hid5_val);
130 if (rc != 0)
131 return rc;
132 }
133 }
134 }
135
136 return 0;
137 }
138
139 u32 pnv_get_supported_cpuidle_states(void)
140 {
141 return supported_cpuidle_states;
142 }
143 EXPORT_SYMBOL_GPL(pnv_get_supported_cpuidle_states);
144
145 static void pnv_fastsleep_workaround_apply(void *info)
146
147 {
148 int rc;
149 int *err = info;
150
151 rc = opal_config_cpu_idle_state(OPAL_CONFIG_IDLE_FASTSLEEP,
152 OPAL_CONFIG_IDLE_APPLY);
153 if (rc)
154 *err = 1;
155 }
156
157 static bool power7_fastsleep_workaround_entry = true;
158 static bool power7_fastsleep_workaround_exit = true;
159
160
161
162
163
164
165 static u8 fastsleep_workaround_applyonce;
166
167 static ssize_t show_fastsleep_workaround_applyonce(struct device *dev,
168 struct device_attribute *attr, char *buf)
169 {
170 return sprintf(buf, "%u\n", fastsleep_workaround_applyonce);
171 }
172
173 static ssize_t store_fastsleep_workaround_applyonce(struct device *dev,
174 struct device_attribute *attr, const char *buf,
175 size_t count)
176 {
177 cpumask_t primary_thread_mask;
178 int err;
179 u8 val;
180
181 if (kstrtou8(buf, 0, &val) || val != 1)
182 return -EINVAL;
183
184 if (fastsleep_workaround_applyonce == 1)
185 return count;
186
187
188
189
190
191
192
193
194
195
196
197
198
199 power7_fastsleep_workaround_exit = false;
200
201 get_online_cpus();
202 primary_thread_mask = cpu_online_cores_map();
203 on_each_cpu_mask(&primary_thread_mask,
204 pnv_fastsleep_workaround_apply,
205 &err, 1);
206 put_online_cpus();
207 if (err) {
208 pr_err("fastsleep_workaround_applyonce change failed while running pnv_fastsleep_workaround_apply");
209 goto fail;
210 }
211
212 power7_fastsleep_workaround_entry = false;
213
214 fastsleep_workaround_applyonce = 1;
215
216 return count;
217 fail:
218 return -EIO;
219 }
220
221 static DEVICE_ATTR(fastsleep_workaround_applyonce, 0600,
222 show_fastsleep_workaround_applyonce,
223 store_fastsleep_workaround_applyonce);
224
225 static inline void atomic_start_thread_idle(void)
226 {
227 int cpu = raw_smp_processor_id();
228 int first = cpu_first_thread_sibling(cpu);
229 int thread_nr = cpu_thread_in_core(cpu);
230 unsigned long *state = &paca_ptrs[first]->idle_state;
231
232 clear_bit(thread_nr, state);
233 }
234
235 static inline void atomic_stop_thread_idle(void)
236 {
237 int cpu = raw_smp_processor_id();
238 int first = cpu_first_thread_sibling(cpu);
239 int thread_nr = cpu_thread_in_core(cpu);
240 unsigned long *state = &paca_ptrs[first]->idle_state;
241
242 set_bit(thread_nr, state);
243 }
244
245 static inline void atomic_lock_thread_idle(void)
246 {
247 int cpu = raw_smp_processor_id();
248 int first = cpu_first_thread_sibling(cpu);
249 unsigned long *state = &paca_ptrs[first]->idle_state;
250
251 while (unlikely(test_and_set_bit_lock(NR_PNV_CORE_IDLE_LOCK_BIT, state)))
252 barrier();
253 }
254
255 static inline void atomic_unlock_and_stop_thread_idle(void)
256 {
257 int cpu = raw_smp_processor_id();
258 int first = cpu_first_thread_sibling(cpu);
259 unsigned long thread = 1UL << cpu_thread_in_core(cpu);
260 unsigned long *state = &paca_ptrs[first]->idle_state;
261 u64 s = READ_ONCE(*state);
262 u64 new, tmp;
263
264 BUG_ON(!(s & PNV_CORE_IDLE_LOCK_BIT));
265 BUG_ON(s & thread);
266
267 again:
268 new = (s | thread) & ~PNV_CORE_IDLE_LOCK_BIT;
269 tmp = cmpxchg(state, s, new);
270 if (unlikely(tmp != s)) {
271 s = tmp;
272 goto again;
273 }
274 }
275
276 static inline void atomic_unlock_thread_idle(void)
277 {
278 int cpu = raw_smp_processor_id();
279 int first = cpu_first_thread_sibling(cpu);
280 unsigned long *state = &paca_ptrs[first]->idle_state;
281
282 BUG_ON(!test_bit(NR_PNV_CORE_IDLE_LOCK_BIT, state));
283 clear_bit_unlock(NR_PNV_CORE_IDLE_LOCK_BIT, state);
284 }
285
286
287 struct p7_sprs {
288
289 u64 tscr;
290 u64 worc;
291
292
293 u64 sdr1;
294 u64 rpr;
295
296
297 u64 lpcr;
298 u64 hfscr;
299 u64 fscr;
300 u64 purr;
301 u64 spurr;
302 u64 dscr;
303 u64 wort;
304
305
306 u64 amr;
307 u64 iamr;
308 u64 amor;
309 u64 uamor;
310 };
311
312 static unsigned long power7_idle_insn(unsigned long type)
313 {
314 int cpu = raw_smp_processor_id();
315 int first = cpu_first_thread_sibling(cpu);
316 unsigned long *state = &paca_ptrs[first]->idle_state;
317 unsigned long thread = 1UL << cpu_thread_in_core(cpu);
318 unsigned long core_thread_mask = (1UL << threads_per_core) - 1;
319 unsigned long srr1;
320 bool full_winkle;
321 struct p7_sprs sprs = {};
322 bool sprs_saved = false;
323 int rc;
324
325 if (unlikely(type != PNV_THREAD_NAP)) {
326 atomic_lock_thread_idle();
327
328 BUG_ON(!(*state & thread));
329 *state &= ~thread;
330
331 if (power7_fastsleep_workaround_entry) {
332 if ((*state & core_thread_mask) == 0) {
333 rc = opal_config_cpu_idle_state(
334 OPAL_CONFIG_IDLE_FASTSLEEP,
335 OPAL_CONFIG_IDLE_APPLY);
336 BUG_ON(rc);
337 }
338 }
339
340 if (type == PNV_THREAD_WINKLE) {
341 sprs.tscr = mfspr(SPRN_TSCR);
342 sprs.worc = mfspr(SPRN_WORC);
343
344 sprs.sdr1 = mfspr(SPRN_SDR1);
345 sprs.rpr = mfspr(SPRN_RPR);
346
347 sprs.lpcr = mfspr(SPRN_LPCR);
348 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
349 sprs.hfscr = mfspr(SPRN_HFSCR);
350 sprs.fscr = mfspr(SPRN_FSCR);
351 }
352 sprs.purr = mfspr(SPRN_PURR);
353 sprs.spurr = mfspr(SPRN_SPURR);
354 sprs.dscr = mfspr(SPRN_DSCR);
355 sprs.wort = mfspr(SPRN_WORT);
356
357 sprs_saved = true;
358
359
360
361
362
363
364
365
366 *state += 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT;
367 if ((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS)
368 >> PNV_CORE_IDLE_WINKLE_COUNT_SHIFT
369 == threads_per_core)
370 *state |= PNV_CORE_IDLE_THREAD_WINKLE_BITS;
371 WARN_ON((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) == 0);
372 }
373
374 atomic_unlock_thread_idle();
375 }
376
377 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
378 sprs.amr = mfspr(SPRN_AMR);
379 sprs.iamr = mfspr(SPRN_IAMR);
380 sprs.amor = mfspr(SPRN_AMOR);
381 sprs.uamor = mfspr(SPRN_UAMOR);
382 }
383
384 local_paca->thread_idle_state = type;
385 srr1 = isa206_idle_insn_mayloss(type);
386 local_paca->thread_idle_state = PNV_THREAD_RUNNING;
387
388 WARN_ON_ONCE(!srr1);
389 WARN_ON_ONCE(mfmsr() & (MSR_IR|MSR_DR));
390
391 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
392 if ((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS) {
393
394
395
396
397 mtspr(SPRN_AMR, sprs.amr);
398 mtspr(SPRN_IAMR, sprs.iamr);
399 mtspr(SPRN_AMOR, sprs.amor);
400 mtspr(SPRN_UAMOR, sprs.uamor);
401 }
402 }
403
404 if (unlikely((srr1 & SRR1_WAKEMASK_P8) == SRR1_WAKEHMI))
405 hmi_exception_realmode(NULL);
406
407 if (likely((srr1 & SRR1_WAKESTATE) != SRR1_WS_HVLOSS)) {
408 if (unlikely(type != PNV_THREAD_NAP)) {
409 atomic_lock_thread_idle();
410 if (type == PNV_THREAD_WINKLE) {
411 WARN_ON((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) == 0);
412 *state -= 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT;
413 *state &= ~(thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT);
414 }
415 atomic_unlock_and_stop_thread_idle();
416 }
417 return srr1;
418 }
419
420
421 BUG_ON(type == PNV_THREAD_NAP);
422
423 atomic_lock_thread_idle();
424
425 full_winkle = false;
426 if (type == PNV_THREAD_WINKLE) {
427 WARN_ON((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) == 0);
428 *state -= 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT;
429 if (*state & (thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT)) {
430 *state &= ~(thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT);
431 full_winkle = true;
432 BUG_ON(!sprs_saved);
433 }
434 }
435
436 WARN_ON(*state & thread);
437
438 if ((*state & core_thread_mask) != 0)
439 goto core_woken;
440
441
442 if (full_winkle) {
443 mtspr(SPRN_TSCR, sprs.tscr);
444 mtspr(SPRN_WORC, sprs.worc);
445 }
446
447 if (power7_fastsleep_workaround_exit) {
448 rc = opal_config_cpu_idle_state(OPAL_CONFIG_IDLE_FASTSLEEP,
449 OPAL_CONFIG_IDLE_UNDO);
450 BUG_ON(rc);
451 }
452
453
454 if (opal_resync_timebase() != OPAL_SUCCESS)
455 BUG();
456
457 core_woken:
458 if (!full_winkle)
459 goto subcore_woken;
460
461 if ((*state & local_paca->subcore_sibling_mask) != 0)
462 goto subcore_woken;
463
464
465 mtspr(SPRN_SDR1, sprs.sdr1);
466 mtspr(SPRN_RPR, sprs.rpr);
467
468 subcore_woken:
469
470
471
472
473
474 isync();
475 atomic_unlock_and_stop_thread_idle();
476
477
478 if (!full_winkle)
479 return srr1;
480
481
482 mtspr(SPRN_LPCR, sprs.lpcr);
483 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
484 mtspr(SPRN_HFSCR, sprs.hfscr);
485 mtspr(SPRN_FSCR, sprs.fscr);
486 }
487 mtspr(SPRN_PURR, sprs.purr);
488 mtspr(SPRN_SPURR, sprs.spurr);
489 mtspr(SPRN_DSCR, sprs.dscr);
490 mtspr(SPRN_WORT, sprs.wort);
491
492 mtspr(SPRN_SPRG3, local_paca->sprg_vdso);
493
494
495
496
497
498
499 __slb_restore_bolted_realmode();
500
501 return srr1;
502 }
503
504 extern unsigned long idle_kvm_start_guest(unsigned long srr1);
505
506 #ifdef CONFIG_HOTPLUG_CPU
507 static unsigned long power7_offline(void)
508 {
509 unsigned long srr1;
510
511 mtmsr(MSR_IDLE);
512
513 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530 local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_IDLE;
531 #endif
532
533 __ppc64_runlatch_off();
534 srr1 = power7_idle_insn(power7_offline_type);
535 __ppc64_runlatch_on();
536
537 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
538 local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_KERNEL;
539
540 smp_mb();
541 if (local_paca->kvm_hstate.hwthread_req)
542 srr1 = idle_kvm_start_guest(srr1);
543 #endif
544
545 mtmsr(MSR_KERNEL);
546
547 return srr1;
548 }
549 #endif
550
551 void power7_idle_type(unsigned long type)
552 {
553 unsigned long srr1;
554
555 if (!prep_irq_for_idle_irqsoff())
556 return;
557
558 mtmsr(MSR_IDLE);
559 __ppc64_runlatch_off();
560 srr1 = power7_idle_insn(type);
561 __ppc64_runlatch_on();
562 mtmsr(MSR_KERNEL);
563
564 fini_irq_for_idle_irqsoff();
565 irq_set_pending_from_srr1(srr1);
566 }
567
568 void power7_idle(void)
569 {
570 if (!powersave_nap)
571 return;
572
573 power7_idle_type(PNV_THREAD_NAP);
574 }
575
576 struct p9_sprs {
577
578 u64 ptcr;
579 u64 rpr;
580 u64 tscr;
581 u64 ldbar;
582
583
584 u64 lpcr;
585 u64 hfscr;
586 u64 fscr;
587 u64 pid;
588 u64 purr;
589 u64 spurr;
590 u64 dscr;
591 u64 wort;
592
593 u64 mmcra;
594 u32 mmcr0;
595 u32 mmcr1;
596 u64 mmcr2;
597
598
599 u64 amr;
600 u64 iamr;
601 u64 amor;
602 u64 uamor;
603 };
604
605 static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on)
606 {
607 int cpu = raw_smp_processor_id();
608 int first = cpu_first_thread_sibling(cpu);
609 unsigned long *state = &paca_ptrs[first]->idle_state;
610 unsigned long core_thread_mask = (1UL << threads_per_core) - 1;
611 unsigned long srr1;
612 unsigned long pls;
613 unsigned long mmcr0 = 0;
614 struct p9_sprs sprs = {};
615 bool sprs_saved = false;
616
617 if (!(psscr & (PSSCR_EC|PSSCR_ESL))) {
618
619
620 BUG_ON(!mmu_on);
621
622
623
624
625
626 srr1 = isa300_idle_stop_noloss(psscr);
627 if (likely(!srr1))
628 return 0;
629
630
631
632
633
634 BUG_ON((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS);
635
636 goto out;
637 }
638
639
640 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
641 if (cpu_has_feature(CPU_FTR_P9_TM_XER_SO_BUG)) {
642 local_paca->requested_psscr = psscr;
643
644 smp_mb();
645 if (atomic_read(&local_paca->dont_stop)) {
646 local_paca->requested_psscr = 0;
647 return 0;
648 }
649 }
650 #endif
651
652 if (!cpu_has_feature(CPU_FTR_POWER9_DD2_1)) {
653
654
655
656
657
658 mmcr0 = mfspr(SPRN_MMCR0);
659 }
660 if ((psscr & PSSCR_RL_MASK) >= pnv_first_spr_loss_level) {
661 sprs.lpcr = mfspr(SPRN_LPCR);
662 sprs.hfscr = mfspr(SPRN_HFSCR);
663 sprs.fscr = mfspr(SPRN_FSCR);
664 sprs.pid = mfspr(SPRN_PID);
665 sprs.purr = mfspr(SPRN_PURR);
666 sprs.spurr = mfspr(SPRN_SPURR);
667 sprs.dscr = mfspr(SPRN_DSCR);
668 sprs.wort = mfspr(SPRN_WORT);
669
670 sprs.mmcra = mfspr(SPRN_MMCRA);
671 sprs.mmcr0 = mfspr(SPRN_MMCR0);
672 sprs.mmcr1 = mfspr(SPRN_MMCR1);
673 sprs.mmcr2 = mfspr(SPRN_MMCR2);
674
675 sprs.ptcr = mfspr(SPRN_PTCR);
676 sprs.rpr = mfspr(SPRN_RPR);
677 sprs.tscr = mfspr(SPRN_TSCR);
678 if (!firmware_has_feature(FW_FEATURE_ULTRAVISOR))
679 sprs.ldbar = mfspr(SPRN_LDBAR);
680
681 sprs_saved = true;
682
683 atomic_start_thread_idle();
684 }
685
686 sprs.amr = mfspr(SPRN_AMR);
687 sprs.iamr = mfspr(SPRN_IAMR);
688 sprs.amor = mfspr(SPRN_AMOR);
689 sprs.uamor = mfspr(SPRN_UAMOR);
690
691 srr1 = isa300_idle_stop_mayloss(psscr);
692
693 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
694 local_paca->requested_psscr = 0;
695 #endif
696
697 psscr = mfspr(SPRN_PSSCR);
698
699 WARN_ON_ONCE(!srr1);
700 WARN_ON_ONCE(mfmsr() & (MSR_IR|MSR_DR));
701
702 if ((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS) {
703 unsigned long mmcra;
704
705
706
707
708
709 mtspr(SPRN_AMR, sprs.amr);
710 mtspr(SPRN_IAMR, sprs.iamr);
711 mtspr(SPRN_AMOR, sprs.amor);
712 mtspr(SPRN_UAMOR, sprs.uamor);
713
714
715
716
717
718
719 if (!cpu_has_feature(CPU_FTR_POWER9_DD2_1)) {
720 asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT);
721 mtspr(SPRN_MMCR0, mmcr0);
722 }
723
724
725
726
727
728 mmcra = mfspr(SPRN_MMCRA);
729 mmcra |= PPC_BIT(60);
730 mtspr(SPRN_MMCRA, mmcra);
731 mmcra &= ~PPC_BIT(60);
732 mtspr(SPRN_MMCRA, mmcra);
733 }
734
735 if (unlikely((srr1 & SRR1_WAKEMASK_P8) == SRR1_WAKEHMI))
736 hmi_exception_realmode(NULL);
737
738
739
740
741
742
743 pls = (psscr & PSSCR_PLS) >> PSSCR_PLS_SHIFT;
744 if (likely(pls < pnv_first_spr_loss_level)) {
745 if (sprs_saved)
746 atomic_stop_thread_idle();
747 goto out;
748 }
749
750
751 BUG_ON(!sprs_saved);
752
753 atomic_lock_thread_idle();
754
755 if ((*state & core_thread_mask) != 0)
756 goto core_woken;
757
758
759 mtspr(SPRN_PTCR, sprs.ptcr);
760 mtspr(SPRN_RPR, sprs.rpr);
761 mtspr(SPRN_TSCR, sprs.tscr);
762
763 if (pls >= pnv_first_tb_loss_level) {
764
765 if (opal_resync_timebase() != OPAL_SUCCESS)
766 BUG();
767 }
768
769
770
771
772
773
774 isync();
775
776 core_woken:
777 atomic_unlock_and_stop_thread_idle();
778
779
780 mtspr(SPRN_LPCR, sprs.lpcr);
781 mtspr(SPRN_HFSCR, sprs.hfscr);
782 mtspr(SPRN_FSCR, sprs.fscr);
783 mtspr(SPRN_PID, sprs.pid);
784 mtspr(SPRN_PURR, sprs.purr);
785 mtspr(SPRN_SPURR, sprs.spurr);
786 mtspr(SPRN_DSCR, sprs.dscr);
787 mtspr(SPRN_WORT, sprs.wort);
788
789 mtspr(SPRN_MMCRA, sprs.mmcra);
790 mtspr(SPRN_MMCR0, sprs.mmcr0);
791 mtspr(SPRN_MMCR1, sprs.mmcr1);
792 mtspr(SPRN_MMCR2, sprs.mmcr2);
793 if (!firmware_has_feature(FW_FEATURE_ULTRAVISOR))
794 mtspr(SPRN_LDBAR, sprs.ldbar);
795
796 mtspr(SPRN_SPRG3, local_paca->sprg_vdso);
797
798 if (!radix_enabled())
799 __slb_restore_bolted_realmode();
800
801 out:
802 if (mmu_on)
803 mtmsr(MSR_KERNEL);
804
805 return srr1;
806 }
807
808 #ifdef CONFIG_HOTPLUG_CPU
809 static unsigned long power9_offline_stop(unsigned long psscr)
810 {
811 unsigned long srr1;
812
813 #ifndef CONFIG_KVM_BOOK3S_HV_POSSIBLE
814 __ppc64_runlatch_off();
815 srr1 = power9_idle_stop(psscr, true);
816 __ppc64_runlatch_on();
817 #else
818
819
820
821
822
823
824
825
826
827
828 local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_IDLE;
829
830 __ppc64_runlatch_off();
831 srr1 = power9_idle_stop(psscr, false);
832 __ppc64_runlatch_on();
833
834 local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_KERNEL;
835
836 smp_mb();
837 if (local_paca->kvm_hstate.hwthread_req)
838 srr1 = idle_kvm_start_guest(srr1);
839 mtmsr(MSR_KERNEL);
840 #endif
841
842 return srr1;
843 }
844 #endif
845
846 void power9_idle_type(unsigned long stop_psscr_val,
847 unsigned long stop_psscr_mask)
848 {
849 unsigned long psscr;
850 unsigned long srr1;
851
852 if (!prep_irq_for_idle_irqsoff())
853 return;
854
855 psscr = mfspr(SPRN_PSSCR);
856 psscr = (psscr & ~stop_psscr_mask) | stop_psscr_val;
857
858 __ppc64_runlatch_off();
859 srr1 = power9_idle_stop(psscr, true);
860 __ppc64_runlatch_on();
861
862 fini_irq_for_idle_irqsoff();
863
864 irq_set_pending_from_srr1(srr1);
865 }
866
867
868
869
870 void power9_idle(void)
871 {
872 power9_idle_type(pnv_default_stop_val, pnv_default_stop_mask);
873 }
874
875 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
876
877
878
879
880
881
882
883
884
885 void pnv_power9_force_smt4_catch(void)
886 {
887 int cpu, cpu0, thr;
888 int awake_threads = 1;
889 int poke_threads = 0;
890 int need_awake = threads_per_core;
891
892 cpu = smp_processor_id();
893 cpu0 = cpu & ~(threads_per_core - 1);
894 for (thr = 0; thr < threads_per_core; ++thr) {
895 if (cpu != cpu0 + thr)
896 atomic_inc(&paca_ptrs[cpu0+thr]->dont_stop);
897 }
898
899 smp_mb();
900 for (thr = 0; thr < threads_per_core; ++thr) {
901 if (!paca_ptrs[cpu0+thr]->requested_psscr)
902 ++awake_threads;
903 else
904 poke_threads |= (1 << thr);
905 }
906
907
908 if (awake_threads < need_awake) {
909
910 for (thr = 0; thr < threads_per_core; ++thr) {
911 if (poke_threads & (1 << thr)) {
912 ppc_msgsnd_sync();
913 ppc_msgsnd(PPC_DBELL_MSGTYPE, 0,
914 paca_ptrs[cpu0+thr]->hw_cpu_id);
915 }
916 }
917
918 do {
919 for (thr = 0; thr < threads_per_core; ++thr) {
920 if ((poke_threads & (1 << thr)) &&
921 !paca_ptrs[cpu0+thr]->requested_psscr) {
922 ++awake_threads;
923 poke_threads &= ~(1 << thr);
924 }
925 }
926 } while (awake_threads < need_awake);
927 }
928 }
929 EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_catch);
930
931 void pnv_power9_force_smt4_release(void)
932 {
933 int cpu, cpu0, thr;
934
935 cpu = smp_processor_id();
936 cpu0 = cpu & ~(threads_per_core - 1);
937
938
939 for (thr = 0; thr < threads_per_core; ++thr) {
940 if (cpu != cpu0 + thr)
941 atomic_dec(&paca_ptrs[cpu0+thr]->dont_stop);
942 }
943 }
944 EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_release);
945 #endif
946
947 #ifdef CONFIG_HOTPLUG_CPU
948
949 void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val)
950 {
951 u64 pir = get_hard_smp_processor_id(cpu);
952
953 mtspr(SPRN_LPCR, lpcr_val);
954
955
956
957
958
959 if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT)
960 opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val);
961 }
962
963
964
965
966
967
968 unsigned long pnv_cpu_offline(unsigned int cpu)
969 {
970 unsigned long srr1;
971
972 __ppc64_runlatch_off();
973
974 if (cpu_has_feature(CPU_FTR_ARCH_300) && deepest_stop_found) {
975 unsigned long psscr;
976
977 psscr = mfspr(SPRN_PSSCR);
978 psscr = (psscr & ~pnv_deepest_stop_psscr_mask) |
979 pnv_deepest_stop_psscr_val;
980 srr1 = power9_offline_stop(psscr);
981 } else if (cpu_has_feature(CPU_FTR_ARCH_206) && power7_offline_type) {
982 srr1 = power7_offline();
983 } else {
984
985 while (!generic_check_cpu_restart(cpu)) {
986 HMT_low();
987 HMT_very_low();
988 }
989 srr1 = 0;
990 HMT_medium();
991 }
992
993 __ppc64_runlatch_on();
994
995 return srr1;
996 }
997 #endif
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036 int validate_psscr_val_mask(u64 *psscr_val, u64 *psscr_mask, u32 flags)
1037 {
1038 int err = 0;
1039
1040
1041
1042
1043
1044
1045 if (*psscr_mask == 0xf) {
1046 *psscr_val = *psscr_val | PSSCR_HV_DEFAULT_VAL;
1047 *psscr_mask = PSSCR_HV_DEFAULT_MASK;
1048 return err;
1049 }
1050
1051
1052
1053
1054
1055
1056
1057
1058 if (GET_PSSCR_ESL(*psscr_val) != GET_PSSCR_EC(*psscr_val)) {
1059 err = ERR_EC_ESL_MISMATCH;
1060 } else if ((flags & OPAL_PM_LOSE_FULL_CONTEXT) &&
1061 GET_PSSCR_ESL(*psscr_val) == 0) {
1062 err = ERR_DEEP_STATE_ESL_MISMATCH;
1063 }
1064
1065 return err;
1066 }
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078 static void __init pnv_power9_idle_init(void)
1079 {
1080 u64 max_residency_ns = 0;
1081 int i;
1082
1083
1084
1085
1086
1087
1088
1089
1090 pnv_first_tb_loss_level = MAX_STOP_STATE + 1;
1091 pnv_first_spr_loss_level = MAX_STOP_STATE + 1;
1092 for (i = 0; i < nr_pnv_idle_states; i++) {
1093 int err;
1094 struct pnv_idle_states_t *state = &pnv_idle_states[i];
1095 u64 psscr_rl = state->psscr_val & PSSCR_RL_MASK;
1096
1097 if ((state->flags & OPAL_PM_TIMEBASE_STOP) &&
1098 (pnv_first_tb_loss_level > psscr_rl))
1099 pnv_first_tb_loss_level = psscr_rl;
1100
1101 if ((state->flags & OPAL_PM_LOSE_FULL_CONTEXT) &&
1102 (pnv_first_spr_loss_level > psscr_rl))
1103 pnv_first_spr_loss_level = psscr_rl;
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113 if ((state->flags & OPAL_PM_TIMEBASE_STOP) &&
1114 (pnv_first_spr_loss_level > psscr_rl))
1115 pnv_first_spr_loss_level = psscr_rl;
1116
1117 err = validate_psscr_val_mask(&state->psscr_val,
1118 &state->psscr_mask,
1119 state->flags);
1120 if (err) {
1121 report_invalid_psscr_val(state->psscr_val, err);
1122 continue;
1123 }
1124
1125 state->valid = true;
1126
1127 if (max_residency_ns < state->residency_ns) {
1128 max_residency_ns = state->residency_ns;
1129 pnv_deepest_stop_psscr_val = state->psscr_val;
1130 pnv_deepest_stop_psscr_mask = state->psscr_mask;
1131 pnv_deepest_stop_flag = state->flags;
1132 deepest_stop_found = true;
1133 }
1134
1135 if (!default_stop_found &&
1136 (state->flags & OPAL_PM_STOP_INST_FAST)) {
1137 pnv_default_stop_val = state->psscr_val;
1138 pnv_default_stop_mask = state->psscr_mask;
1139 default_stop_found = true;
1140 WARN_ON(state->flags & OPAL_PM_LOSE_FULL_CONTEXT);
1141 }
1142 }
1143
1144 if (unlikely(!default_stop_found)) {
1145 pr_warn("cpuidle-powernv: No suitable default stop state found. Disabling platform idle.\n");
1146 } else {
1147 ppc_md.power_save = power9_idle;
1148 pr_info("cpuidle-powernv: Default stop: psscr = 0x%016llx,mask=0x%016llx\n",
1149 pnv_default_stop_val, pnv_default_stop_mask);
1150 }
1151
1152 if (unlikely(!deepest_stop_found)) {
1153 pr_warn("cpuidle-powernv: No suitable stop state for CPU-Hotplug. Offlined CPUs will busy wait");
1154 } else {
1155 pr_info("cpuidle-powernv: Deepest stop: psscr = 0x%016llx,mask=0x%016llx\n",
1156 pnv_deepest_stop_psscr_val,
1157 pnv_deepest_stop_psscr_mask);
1158 }
1159
1160 pr_info("cpuidle-powernv: First stop level that may lose SPRs = 0x%llx\n",
1161 pnv_first_spr_loss_level);
1162
1163 pr_info("cpuidle-powernv: First stop level that may lose timebase = 0x%llx\n",
1164 pnv_first_tb_loss_level);
1165 }
1166
1167 static void __init pnv_disable_deep_states(void)
1168 {
1169
1170
1171
1172
1173
1174 supported_cpuidle_states &= ~OPAL_PM_LOSE_FULL_CONTEXT;
1175 pr_warn("cpuidle-powernv: Disabling idle states that lose full context\n");
1176 pr_warn("cpuidle-powernv: Idle power-savings, CPU-Hotplug affected\n");
1177
1178 if (cpu_has_feature(CPU_FTR_ARCH_300) &&
1179 (pnv_deepest_stop_flag & OPAL_PM_LOSE_FULL_CONTEXT)) {
1180
1181
1182
1183
1184 if (default_stop_found) {
1185 pnv_deepest_stop_psscr_val = pnv_default_stop_val;
1186 pnv_deepest_stop_psscr_mask = pnv_default_stop_mask;
1187 pr_warn("cpuidle-powernv: Offlined CPUs will stop with psscr = 0x%016llx\n",
1188 pnv_deepest_stop_psscr_val);
1189 } else {
1190 deepest_stop_found = false;
1191 pr_warn("cpuidle-powernv: Offlined CPUs will busy wait\n");
1192 }
1193 }
1194 }
1195
1196
1197
1198
1199 static void __init pnv_probe_idle_states(void)
1200 {
1201 int i;
1202
1203 if (nr_pnv_idle_states < 0) {
1204 pr_warn("cpuidle-powernv: no idle states found in the DT\n");
1205 return;
1206 }
1207
1208 if (cpu_has_feature(CPU_FTR_ARCH_300))
1209 pnv_power9_idle_init();
1210
1211 for (i = 0; i < nr_pnv_idle_states; i++)
1212 supported_cpuidle_states |= pnv_idle_states[i].flags;
1213 }
1214
1215
1216
1217
1218
1219
1220
1221 static int pnv_parse_cpuidle_dt(void)
1222 {
1223 struct device_node *np;
1224 int nr_idle_states, i;
1225 int rc = 0;
1226 u32 *temp_u32;
1227 u64 *temp_u64;
1228 const char **temp_string;
1229
1230 np = of_find_node_by_path("/ibm,opal/power-mgt");
1231 if (!np) {
1232 pr_warn("opal: PowerMgmt Node not found\n");
1233 return -ENODEV;
1234 }
1235 nr_idle_states = of_property_count_u32_elems(np,
1236 "ibm,cpu-idle-state-flags");
1237
1238 pnv_idle_states = kcalloc(nr_idle_states, sizeof(*pnv_idle_states),
1239 GFP_KERNEL);
1240 temp_u32 = kcalloc(nr_idle_states, sizeof(u32), GFP_KERNEL);
1241 temp_u64 = kcalloc(nr_idle_states, sizeof(u64), GFP_KERNEL);
1242 temp_string = kcalloc(nr_idle_states, sizeof(char *), GFP_KERNEL);
1243
1244 if (!(pnv_idle_states && temp_u32 && temp_u64 && temp_string)) {
1245 pr_err("Could not allocate memory for dt parsing\n");
1246 rc = -ENOMEM;
1247 goto out;
1248 }
1249
1250
1251 if (of_property_read_u32_array(np, "ibm,cpu-idle-state-flags",
1252 temp_u32, nr_idle_states)) {
1253 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-flags in DT\n");
1254 rc = -EINVAL;
1255 goto out;
1256 }
1257 for (i = 0; i < nr_idle_states; i++)
1258 pnv_idle_states[i].flags = temp_u32[i];
1259
1260
1261 if (of_property_read_u32_array(np, "ibm,cpu-idle-state-latencies-ns",
1262 temp_u32, nr_idle_states)) {
1263 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n");
1264 rc = -EINVAL;
1265 goto out;
1266 }
1267 for (i = 0; i < nr_idle_states; i++)
1268 pnv_idle_states[i].latency_ns = temp_u32[i];
1269
1270
1271 if (of_property_read_u32_array(np, "ibm,cpu-idle-state-residency-ns",
1272 temp_u32, nr_idle_states)) {
1273 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n");
1274 rc = -EINVAL;
1275 goto out;
1276 }
1277 for (i = 0; i < nr_idle_states; i++)
1278 pnv_idle_states[i].residency_ns = temp_u32[i];
1279
1280
1281 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1282
1283 if (of_property_read_u64_array(np, "ibm,cpu-idle-state-psscr",
1284 temp_u64, nr_idle_states)) {
1285 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr in DT\n");
1286 rc = -EINVAL;
1287 goto out;
1288 }
1289 for (i = 0; i < nr_idle_states; i++)
1290 pnv_idle_states[i].psscr_val = temp_u64[i];
1291
1292
1293 if (of_property_read_u64_array(np, "ibm,cpu-idle-state-psscr-mask",
1294 temp_u64, nr_idle_states)) {
1295 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr-mask in DT\n");
1296 rc = -EINVAL;
1297 goto out;
1298 }
1299 for (i = 0; i < nr_idle_states; i++)
1300 pnv_idle_states[i].psscr_mask = temp_u64[i];
1301 }
1302
1303
1304
1305
1306
1307
1308
1309 if (of_property_read_string_array(np, "ibm,cpu-idle-state-names",
1310 temp_string, nr_idle_states) < 0) {
1311 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-names in DT\n");
1312 rc = -EINVAL;
1313 goto out;
1314 }
1315 for (i = 0; i < nr_idle_states; i++)
1316 strlcpy(pnv_idle_states[i].name, temp_string[i],
1317 PNV_IDLE_NAME_LEN);
1318 nr_pnv_idle_states = nr_idle_states;
1319 rc = 0;
1320 out:
1321 kfree(temp_u32);
1322 kfree(temp_u64);
1323 kfree(temp_string);
1324 return rc;
1325 }
1326
1327 static int __init pnv_init_idle_states(void)
1328 {
1329 int cpu;
1330 int rc = 0;
1331
1332
1333 for_each_present_cpu(cpu) {
1334 struct paca_struct *p = paca_ptrs[cpu];
1335
1336 p->idle_state = 0;
1337 if (cpu == cpu_first_thread_sibling(cpu))
1338 p->idle_state = (1 << threads_per_core) - 1;
1339
1340 if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
1341
1342 p->thread_idle_state = PNV_THREAD_RUNNING;
1343 } else {
1344
1345 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1346 p->requested_psscr = 0;
1347 atomic_set(&p->dont_stop, 0);
1348 #endif
1349 }
1350 }
1351
1352
1353 nr_pnv_idle_states = 0;
1354 supported_cpuidle_states = 0;
1355
1356 if (cpuidle_disable != IDLE_NO_OVERRIDE)
1357 goto out;
1358 rc = pnv_parse_cpuidle_dt();
1359 if (rc)
1360 return rc;
1361 pnv_probe_idle_states();
1362
1363 if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
1364 if (!(supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
1365 power7_fastsleep_workaround_entry = false;
1366 power7_fastsleep_workaround_exit = false;
1367 } else {
1368
1369
1370
1371
1372
1373
1374 device_create_file(cpu_subsys.dev_root,
1375 &dev_attr_fastsleep_workaround_applyonce);
1376 }
1377
1378 update_subcore_sibling_mask();
1379
1380 if (supported_cpuidle_states & OPAL_PM_NAP_ENABLED) {
1381 ppc_md.power_save = power7_idle;
1382 power7_offline_type = PNV_THREAD_NAP;
1383 }
1384
1385 if ((supported_cpuidle_states & OPAL_PM_WINKLE_ENABLED) &&
1386 (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT))
1387 power7_offline_type = PNV_THREAD_WINKLE;
1388 else if ((supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED) ||
1389 (supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1))
1390 power7_offline_type = PNV_THREAD_SLEEP;
1391 }
1392
1393 if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT) {
1394 if (pnv_save_sprs_for_deep_states())
1395 pnv_disable_deep_states();
1396 }
1397
1398 out:
1399 return 0;
1400 }
1401 machine_subsys_initcall(powernv, pnv_init_idle_states);