This source file includes following definitions.
- imcr_pic_to_apic
- imcr_apic_to_pic
- parse_lapic
- setup_apicpmtimer
- lapic_get_version
- lapic_is_integrated
- modern_apic
- apic_disable
- native_apic_wait_icr_idle
- native_safe_apic_wait_icr_idle
- native_apic_icr_write
- native_apic_icr_read
- get_physical_broadcast
- lapic_get_maxlvt
- __setup_APIC_LVTT
- eilvt_entry_is_changeable
- reserve_eilvt_offset
- setup_APIC_eilvt
- lapic_next_event
- lapic_next_deadline
- lapic_timer_shutdown
- lapic_timer_set_periodic_oneshot
- lapic_timer_set_periodic
- lapic_timer_set_oneshot
- lapic_timer_broadcast
- hsx_deadline_rev
- bdx_deadline_rev
- skx_deadline_rev
- apic_validate_deadline_timer
- setup_APIC_timer
- __lapic_update_tsc_freq
- lapic_update_tsc_freq
- lapic_cal_handler
- calibrate_by_pmtimer
- lapic_init_clockevent
- apic_needs_pit
- calibrate_APIC_clock
- setup_boot_APIC_clock
- setup_secondary_APIC_clock
- local_apic_timer_interrupt
- smp_apic_timer_interrupt
- setup_profiling_timer
- clear_local_APIC
- apic_soft_disable
- disable_local_APIC
- lapic_shutdown
- sync_Arb_IDs
- __apic_intr_mode_select
- apic_intr_mode_select
- init_bsp_APIC
- apic_intr_mode_init
- lapic_setup_esr
- apic_check_and_ack
- apic_pending_intr_clear
- setup_local_APIC
- end_local_APIC_setup
- apic_ap_setup
- __x2apic_disable
- __x2apic_enable
- setup_nox2apic
- x2apic_setup
- x2apic_disable
- x2apic_enable
- try_to_enable_x2apic
- check_x2apic
- validate_x2apic
- try_to_enable_x2apic
- __x2apic_enable
- enable_IR_x2apic
- detect_init_APIC
- apic_verify
- apic_force_enable
- detect_init_APIC
- init_apic_mappings
- register_lapic_address
- smp_spurious_interrupt
- smp_error_interrupt
- connect_bsp_APIC
- disconnect_bsp_APIC
- apic_id_is_primary_thread
- allocate_logical_cpuid
- generic_processor_info
- hard_smp_processor_id
- apic_set_eoi_write
- apic_bsp_up_setup
- apic_bsp_setup
- up_late_init
- lapic_suspend
- lapic_resume
- apic_pm_activate
- init_lapic_sysfs
- apic_pm_activate
- set_multi
- dmi_check_multi
- apic_is_clustered_box
- setup_disableapic
- setup_nolapic
- parse_lapic_timer_c2_ok
- parse_disable_apic_timer
- parse_nolapic_timer
- apic_set_verbosity
- lapic_insert_resource
- apic_set_disabled_cpu_apicid
- apic_set_extnmi
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 #include <linux/perf_event.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/mc146818rtc.h>
21 #include <linux/acpi_pmtmr.h>
22 #include <linux/clockchips.h>
23 #include <linux/interrupt.h>
24 #include <linux/memblock.h>
25 #include <linux/ftrace.h>
26 #include <linux/ioport.h>
27 #include <linux/export.h>
28 #include <linux/syscore_ops.h>
29 #include <linux/delay.h>
30 #include <linux/timex.h>
31 #include <linux/i8253.h>
32 #include <linux/dmar.h>
33 #include <linux/init.h>
34 #include <linux/cpu.h>
35 #include <linux/dmi.h>
36 #include <linux/smp.h>
37 #include <linux/mm.h>
38
39 #include <asm/trace/irq_vectors.h>
40 #include <asm/irq_remapping.h>
41 #include <asm/perf_event.h>
42 #include <asm/x86_init.h>
43 #include <asm/pgalloc.h>
44 #include <linux/atomic.h>
45 #include <asm/mpspec.h>
46 #include <asm/i8259.h>
47 #include <asm/proto.h>
48 #include <asm/traps.h>
49 #include <asm/apic.h>
50 #include <asm/io_apic.h>
51 #include <asm/desc.h>
52 #include <asm/hpet.h>
53 #include <asm/mtrr.h>
54 #include <asm/time.h>
55 #include <asm/smp.h>
56 #include <asm/mce.h>
57 #include <asm/tsc.h>
58 #include <asm/hypervisor.h>
59 #include <asm/cpu_device_id.h>
60 #include <asm/intel-family.h>
61 #include <asm/irq_regs.h>
62
63 unsigned int num_processors;
64
65 unsigned disabled_cpus;
66
67
68 unsigned int boot_cpu_physical_apicid __ro_after_init = -1U;
69 EXPORT_SYMBOL_GPL(boot_cpu_physical_apicid);
70
71 u8 boot_cpu_apic_version __ro_after_init;
72
73
74
75
76 static unsigned int max_physical_apicid;
77
78
79
80
81 physid_mask_t phys_cpu_present_map;
82
83
84
85
86
87
88 static unsigned int disabled_cpu_apicid __ro_after_init = BAD_APICID;
89
90
91
92
93
94 static int apic_extnmi __ro_after_init = APIC_EXTNMI_BSP;
95
96
97
98
99 DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid, BAD_APICID);
100 DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid, BAD_APICID);
101 DEFINE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_acpiid, U32_MAX);
102 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
103 EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
104 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_acpiid);
105
106 #ifdef CONFIG_X86_32
107
108
109
110
111
112
113
114 DEFINE_EARLY_PER_CPU_READ_MOSTLY(int, x86_cpu_to_logical_apicid, BAD_APICID);
115
116
117 static int enabled_via_apicbase __ro_after_init;
118
119
120
121
122
123
124
125
126
127 static inline void imcr_pic_to_apic(void)
128 {
129
130 outb(0x70, 0x22);
131
132 outb(0x01, 0x23);
133 }
134
135 static inline void imcr_apic_to_pic(void)
136 {
137
138 outb(0x70, 0x22);
139
140 outb(0x00, 0x23);
141 }
142 #endif
143
144
145
146
147
148
149 static int force_enable_local_apic __initdata;
150
151
152
153
154 static int __init parse_lapic(char *arg)
155 {
156 if (IS_ENABLED(CONFIG_X86_32) && !arg)
157 force_enable_local_apic = 1;
158 else if (arg && !strncmp(arg, "notscdeadline", 13))
159 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
160 return 0;
161 }
162 early_param("lapic", parse_lapic);
163
164 #ifdef CONFIG_X86_64
165 static int apic_calibrate_pmtmr __initdata;
166 static __init int setup_apicpmtimer(char *s)
167 {
168 apic_calibrate_pmtmr = 1;
169 notsc_setup(NULL);
170 return 0;
171 }
172 __setup("apicpmtimer", setup_apicpmtimer);
173 #endif
174
175 unsigned long mp_lapic_addr __ro_after_init;
176 int disable_apic __ro_after_init;
177
178 static int disable_apic_timer __initdata;
179
180 int local_apic_timer_c2_ok __ro_after_init;
181 EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
182
183
184
185
186 int apic_verbosity __ro_after_init;
187
188 int pic_mode __ro_after_init;
189
190
191 int smp_found_config __ro_after_init;
192
193 static struct resource lapic_resource = {
194 .name = "Local APIC",
195 .flags = IORESOURCE_MEM | IORESOURCE_BUSY,
196 };
197
198 unsigned int lapic_timer_period = 0;
199
200 static void apic_pm_activate(void);
201
202 static unsigned long apic_phys __ro_after_init;
203
204
205
206
207 static inline int lapic_get_version(void)
208 {
209 return GET_APIC_VERSION(apic_read(APIC_LVR));
210 }
211
212
213
214
215 static inline int lapic_is_integrated(void)
216 {
217 return APIC_INTEGRATED(lapic_get_version());
218 }
219
220
221
222
223 static int modern_apic(void)
224 {
225
226 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
227 boot_cpu_data.x86 >= 0xf)
228 return 1;
229
230
231 if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
232 return 1;
233
234 return lapic_get_version() >= 0x14;
235 }
236
237
238
239
240
241 static void __init apic_disable(void)
242 {
243 pr_info("APIC: switched to apic NOOP\n");
244 apic = &apic_noop;
245 }
246
247 void native_apic_wait_icr_idle(void)
248 {
249 while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
250 cpu_relax();
251 }
252
253 u32 native_safe_apic_wait_icr_idle(void)
254 {
255 u32 send_status;
256 int timeout;
257
258 timeout = 0;
259 do {
260 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
261 if (!send_status)
262 break;
263 inc_irq_stat(icr_read_retry_count);
264 udelay(100);
265 } while (timeout++ < 1000);
266
267 return send_status;
268 }
269
270 void native_apic_icr_write(u32 low, u32 id)
271 {
272 unsigned long flags;
273
274 local_irq_save(flags);
275 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id));
276 apic_write(APIC_ICR, low);
277 local_irq_restore(flags);
278 }
279
280 u64 native_apic_icr_read(void)
281 {
282 u32 icr1, icr2;
283
284 icr2 = apic_read(APIC_ICR2);
285 icr1 = apic_read(APIC_ICR);
286
287 return icr1 | ((u64)icr2 << 32);
288 }
289
290 #ifdef CONFIG_X86_32
291
292
293
294 int get_physical_broadcast(void)
295 {
296 return modern_apic() ? 0xff : 0xf;
297 }
298 #endif
299
300
301
302
303 int lapic_get_maxlvt(void)
304 {
305
306
307
308
309 return lapic_is_integrated() ? GET_APIC_MAXLVT(apic_read(APIC_LVR)) : 2;
310 }
311
312
313
314
315
316
317 #define APIC_DIVISOR 16
318 #define TSC_DIVISOR 8
319
320
321
322
323
324
325
326
327
328
329
330 static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
331 {
332 unsigned int lvtt_value, tmp_value;
333
334 lvtt_value = LOCAL_TIMER_VECTOR;
335 if (!oneshot)
336 lvtt_value |= APIC_LVT_TIMER_PERIODIC;
337 else if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
338 lvtt_value |= APIC_LVT_TIMER_TSCDEADLINE;
339
340 if (!lapic_is_integrated())
341 lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV);
342
343 if (!irqen)
344 lvtt_value |= APIC_LVT_MASKED;
345
346 apic_write(APIC_LVTT, lvtt_value);
347
348 if (lvtt_value & APIC_LVT_TIMER_TSCDEADLINE) {
349
350
351
352
353
354 asm volatile("mfence" : : : "memory");
355 return;
356 }
357
358
359
360
361 tmp_value = apic_read(APIC_TDCR);
362 apic_write(APIC_TDCR,
363 (tmp_value & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) |
364 APIC_TDR_DIV_16);
365
366 if (!oneshot)
367 apic_write(APIC_TMICT, clocks / APIC_DIVISOR);
368 }
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390 static atomic_t eilvt_offsets[APIC_EILVT_NR_MAX];
391
392 static inline int eilvt_entry_is_changeable(unsigned int old, unsigned int new)
393 {
394 return (old & APIC_EILVT_MASKED)
395 || (new == APIC_EILVT_MASKED)
396 || ((new & ~APIC_EILVT_MASKED) == old);
397 }
398
399 static unsigned int reserve_eilvt_offset(int offset, unsigned int new)
400 {
401 unsigned int rsvd, vector;
402
403 if (offset >= APIC_EILVT_NR_MAX)
404 return ~0;
405
406 rsvd = atomic_read(&eilvt_offsets[offset]);
407 do {
408 vector = rsvd & ~APIC_EILVT_MASKED;
409 if (vector && !eilvt_entry_is_changeable(vector, new))
410
411 return rsvd;
412 rsvd = atomic_cmpxchg(&eilvt_offsets[offset], rsvd, new);
413 } while (rsvd != new);
414
415 rsvd &= ~APIC_EILVT_MASKED;
416 if (rsvd && rsvd != vector)
417 pr_info("LVT offset %d assigned for vector 0x%02x\n",
418 offset, rsvd);
419
420 return new;
421 }
422
423
424
425
426
427
428
429 int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask)
430 {
431 unsigned long reg = APIC_EILVTn(offset);
432 unsigned int new, old, reserved;
433
434 new = (mask << 16) | (msg_type << 8) | vector;
435 old = apic_read(reg);
436 reserved = reserve_eilvt_offset(offset, new);
437
438 if (reserved != new) {
439 pr_err(FW_BUG "cpu %d, try to use APIC%lX (LVT offset %d) for "
440 "vector 0x%x, but the register is already in use for "
441 "vector 0x%x on another cpu\n",
442 smp_processor_id(), reg, offset, new, reserved);
443 return -EINVAL;
444 }
445
446 if (!eilvt_entry_is_changeable(old, new)) {
447 pr_err(FW_BUG "cpu %d, try to use APIC%lX (LVT offset %d) for "
448 "vector 0x%x, but the register is already in use for "
449 "vector 0x%x on this cpu\n",
450 smp_processor_id(), reg, offset, new, old);
451 return -EBUSY;
452 }
453
454 apic_write(reg, new);
455
456 return 0;
457 }
458 EXPORT_SYMBOL_GPL(setup_APIC_eilvt);
459
460
461
462
463 static int lapic_next_event(unsigned long delta,
464 struct clock_event_device *evt)
465 {
466 apic_write(APIC_TMICT, delta);
467 return 0;
468 }
469
470 static int lapic_next_deadline(unsigned long delta,
471 struct clock_event_device *evt)
472 {
473 u64 tsc;
474
475 tsc = rdtsc();
476 wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR));
477 return 0;
478 }
479
480 static int lapic_timer_shutdown(struct clock_event_device *evt)
481 {
482 unsigned int v;
483
484
485 if (evt->features & CLOCK_EVT_FEAT_DUMMY)
486 return 0;
487
488 v = apic_read(APIC_LVTT);
489 v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
490 apic_write(APIC_LVTT, v);
491 apic_write(APIC_TMICT, 0);
492 return 0;
493 }
494
495 static inline int
496 lapic_timer_set_periodic_oneshot(struct clock_event_device *evt, bool oneshot)
497 {
498
499 if (evt->features & CLOCK_EVT_FEAT_DUMMY)
500 return 0;
501
502 __setup_APIC_LVTT(lapic_timer_period, oneshot, 1);
503 return 0;
504 }
505
506 static int lapic_timer_set_periodic(struct clock_event_device *evt)
507 {
508 return lapic_timer_set_periodic_oneshot(evt, false);
509 }
510
511 static int lapic_timer_set_oneshot(struct clock_event_device *evt)
512 {
513 return lapic_timer_set_periodic_oneshot(evt, true);
514 }
515
516
517
518
519 static void lapic_timer_broadcast(const struct cpumask *mask)
520 {
521 #ifdef CONFIG_SMP
522 apic->send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
523 #endif
524 }
525
526
527
528
529
530 static struct clock_event_device lapic_clockevent = {
531 .name = "lapic",
532 .features = CLOCK_EVT_FEAT_PERIODIC |
533 CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP
534 | CLOCK_EVT_FEAT_DUMMY,
535 .shift = 32,
536 .set_state_shutdown = lapic_timer_shutdown,
537 .set_state_periodic = lapic_timer_set_periodic,
538 .set_state_oneshot = lapic_timer_set_oneshot,
539 .set_state_oneshot_stopped = lapic_timer_shutdown,
540 .set_next_event = lapic_next_event,
541 .broadcast = lapic_timer_broadcast,
542 .rating = 100,
543 .irq = -1,
544 };
545 static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
546
547 #define DEADLINE_MODEL_MATCH_FUNC(model, func) \
548 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&func }
549
550 #define DEADLINE_MODEL_MATCH_REV(model, rev) \
551 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)rev }
552
553 static __init u32 hsx_deadline_rev(void)
554 {
555 switch (boot_cpu_data.x86_stepping) {
556 case 0x02: return 0x3a;
557 case 0x04: return 0x0f;
558 }
559
560 return ~0U;
561 }
562
563 static __init u32 bdx_deadline_rev(void)
564 {
565 switch (boot_cpu_data.x86_stepping) {
566 case 0x02: return 0x00000011;
567 case 0x03: return 0x0700000e;
568 case 0x04: return 0x0f00000c;
569 case 0x05: return 0x0e000003;
570 }
571
572 return ~0U;
573 }
574
575 static __init u32 skx_deadline_rev(void)
576 {
577 switch (boot_cpu_data.x86_stepping) {
578 case 0x03: return 0x01000136;
579 case 0x04: return 0x02000014;
580 }
581
582 if (boot_cpu_data.x86_stepping > 4)
583 return 0;
584
585 return ~0U;
586 }
587
588 static const struct x86_cpu_id deadline_match[] __initconst = {
589 DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_HASWELL_X, hsx_deadline_rev),
590 DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_X, 0x0b000020),
591 DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_BROADWELL_D, bdx_deadline_rev),
592 DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_SKYLAKE_X, skx_deadline_rev),
593
594 DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL, 0x22),
595 DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_L, 0x20),
596 DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_G, 0x17),
597
598 DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL, 0x25),
599 DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_G, 0x17),
600
601 DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_SKYLAKE_L, 0xb2),
602 DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_SKYLAKE, 0xb2),
603
604 DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_KABYLAKE_L, 0x52),
605 DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_KABYLAKE, 0x52),
606
607 {},
608 };
609
610 static __init bool apic_validate_deadline_timer(void)
611 {
612 const struct x86_cpu_id *m;
613 u32 rev;
614
615 if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
616 return false;
617 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
618 return true;
619
620 m = x86_match_cpu(deadline_match);
621 if (!m)
622 return true;
623
624
625
626
627
628 if ((long)m->driver_data < 0)
629 rev = ((u32 (*)(void))(m->driver_data))();
630 else
631 rev = (u32)m->driver_data;
632
633 if (boot_cpu_data.microcode >= rev)
634 return true;
635
636 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
637 pr_err(FW_BUG "TSC_DEADLINE disabled due to Errata; "
638 "please update microcode to version: 0x%x (or later)\n", rev);
639 return false;
640 }
641
642
643
644
645
646 static void setup_APIC_timer(void)
647 {
648 struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
649
650 if (this_cpu_has(X86_FEATURE_ARAT)) {
651 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP;
652
653 lapic_clockevent.rating = 150;
654 }
655
656 memcpy(levt, &lapic_clockevent, sizeof(*levt));
657 levt->cpumask = cpumask_of(smp_processor_id());
658
659 if (this_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) {
660 levt->name = "lapic-deadline";
661 levt->features &= ~(CLOCK_EVT_FEAT_PERIODIC |
662 CLOCK_EVT_FEAT_DUMMY);
663 levt->set_next_event = lapic_next_deadline;
664 clockevents_config_and_register(levt,
665 tsc_khz * (1000 / TSC_DIVISOR),
666 0xF, ~0UL);
667 } else
668 clockevents_register_device(levt);
669 }
670
671
672
673
674
675 static void __lapic_update_tsc_freq(void *info)
676 {
677 struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
678
679 if (!this_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
680 return;
681
682 clockevents_update_freq(levt, tsc_khz * (1000 / TSC_DIVISOR));
683 }
684
685 void lapic_update_tsc_freq(void)
686 {
687
688
689
690
691
692 on_each_cpu(__lapic_update_tsc_freq, NULL, 0);
693 }
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716 #define LAPIC_CAL_LOOPS (HZ/10)
717
718 static __initdata int lapic_cal_loops = -1;
719 static __initdata long lapic_cal_t1, lapic_cal_t2;
720 static __initdata unsigned long long lapic_cal_tsc1, lapic_cal_tsc2;
721 static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
722 static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
723
724
725
726
727 static void __init lapic_cal_handler(struct clock_event_device *dev)
728 {
729 unsigned long long tsc = 0;
730 long tapic = apic_read(APIC_TMCCT);
731 unsigned long pm = acpi_pm_read_early();
732
733 if (boot_cpu_has(X86_FEATURE_TSC))
734 tsc = rdtsc();
735
736 switch (lapic_cal_loops++) {
737 case 0:
738 lapic_cal_t1 = tapic;
739 lapic_cal_tsc1 = tsc;
740 lapic_cal_pm1 = pm;
741 lapic_cal_j1 = jiffies;
742 break;
743
744 case LAPIC_CAL_LOOPS:
745 lapic_cal_t2 = tapic;
746 lapic_cal_tsc2 = tsc;
747 if (pm < lapic_cal_pm1)
748 pm += ACPI_PM_OVRRUN;
749 lapic_cal_pm2 = pm;
750 lapic_cal_j2 = jiffies;
751 break;
752 }
753 }
754
755 static int __init
756 calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc)
757 {
758 const long pm_100ms = PMTMR_TICKS_PER_SEC / 10;
759 const long pm_thresh = pm_100ms / 100;
760 unsigned long mult;
761 u64 res;
762
763 #ifndef CONFIG_X86_PM_TIMER
764 return -1;
765 #endif
766
767 apic_printk(APIC_VERBOSE, "... PM-Timer delta = %ld\n", deltapm);
768
769
770 if (!deltapm)
771 return -1;
772
773 mult = clocksource_hz2mult(PMTMR_TICKS_PER_SEC, 22);
774
775 if (deltapm > (pm_100ms - pm_thresh) &&
776 deltapm < (pm_100ms + pm_thresh)) {
777 apic_printk(APIC_VERBOSE, "... PM-Timer result ok\n");
778 return 0;
779 }
780
781 res = (((u64)deltapm) * mult) >> 22;
782 do_div(res, 1000000);
783 pr_warning("APIC calibration not consistent "
784 "with PM-Timer: %ldms instead of 100ms\n",(long)res);
785
786
787 res = (((u64)(*delta)) * pm_100ms);
788 do_div(res, deltapm);
789 pr_info("APIC delta adjusted to PM-Timer: "
790 "%lu (%ld)\n", (unsigned long)res, *delta);
791 *delta = (long)res;
792
793
794 if (boot_cpu_has(X86_FEATURE_TSC)) {
795 res = (((u64)(*deltatsc)) * pm_100ms);
796 do_div(res, deltapm);
797 apic_printk(APIC_VERBOSE, "TSC delta adjusted to "
798 "PM-Timer: %lu (%ld)\n",
799 (unsigned long)res, *deltatsc);
800 *deltatsc = (long)res;
801 }
802
803 return 0;
804 }
805
806 static int __init lapic_init_clockevent(void)
807 {
808 if (!lapic_timer_period)
809 return -1;
810
811
812 lapic_clockevent.mult = div_sc(lapic_timer_period/APIC_DIVISOR,
813 TICK_NSEC, lapic_clockevent.shift);
814 lapic_clockevent.max_delta_ns =
815 clockevent_delta2ns(0x7FFFFFFF, &lapic_clockevent);
816 lapic_clockevent.max_delta_ticks = 0x7FFFFFFF;
817 lapic_clockevent.min_delta_ns =
818 clockevent_delta2ns(0xF, &lapic_clockevent);
819 lapic_clockevent.min_delta_ticks = 0xF;
820
821 return 0;
822 }
823
824 bool __init apic_needs_pit(void)
825 {
826
827
828
829
830 if (!tsc_khz || !cpu_khz)
831 return true;
832
833
834 if (!boot_cpu_has(X86_FEATURE_APIC) || disable_apic)
835 return true;
836
837
838
839
840
841
842 if (apic_intr_mode == APIC_PIC ||
843 apic_intr_mode == APIC_VIRTUAL_WIRE_NO_CONFIG)
844 return true;
845
846
847 if (!boot_cpu_has(X86_FEATURE_ARAT))
848 return true;
849
850
851 if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
852 return false;
853
854
855 if (disable_apic_timer)
856 return true;
857
858
859
860
861 return lapic_timer_period == 0;
862 }
863
864 static int __init calibrate_APIC_clock(void)
865 {
866 struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
867 u64 tsc_perj = 0, tsc_start = 0;
868 unsigned long jif_start;
869 unsigned long deltaj;
870 long delta, deltatsc;
871 int pm_referenced = 0;
872
873 if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
874 return 0;
875
876
877
878
879
880
881 if (!lapic_init_clockevent()) {
882 apic_printk(APIC_VERBOSE, "lapic timer already calibrated %d\n",
883 lapic_timer_period);
884
885
886
887
888 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
889 return 0;
890 }
891
892 apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
893 "calibrating APIC timer ...\n");
894
895
896
897
898
899
900 local_irq_disable();
901
902
903
904
905
906 __setup_APIC_LVTT(0xffffffff, 0, 0);
907
908
909
910
911
912
913 jif_start = READ_ONCE(jiffies);
914
915 if (tsc_khz) {
916 tsc_start = rdtsc();
917 tsc_perj = div_u64((u64)tsc_khz * 1000, HZ);
918 }
919
920
921
922
923
924 local_irq_enable();
925
926 while (lapic_cal_loops <= LAPIC_CAL_LOOPS) {
927
928 while (1) {
929 if (tsc_khz) {
930 u64 tsc_now = rdtsc();
931 if ((tsc_now - tsc_start) >= tsc_perj) {
932 tsc_start += tsc_perj;
933 break;
934 }
935 } else {
936 unsigned long jif_now = READ_ONCE(jiffies);
937
938 if (time_after(jif_now, jif_start)) {
939 jif_start = jif_now;
940 break;
941 }
942 }
943 cpu_relax();
944 }
945
946
947 local_irq_disable();
948 lapic_cal_handler(NULL);
949 local_irq_enable();
950 }
951
952 local_irq_disable();
953
954
955 delta = lapic_cal_t1 - lapic_cal_t2;
956 apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta);
957
958 deltatsc = (long)(lapic_cal_tsc2 - lapic_cal_tsc1);
959
960
961 pm_referenced = !calibrate_by_pmtimer(lapic_cal_pm2 - lapic_cal_pm1,
962 &delta, &deltatsc);
963
964 lapic_timer_period = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS;
965 lapic_init_clockevent();
966
967 apic_printk(APIC_VERBOSE, "..... delta %ld\n", delta);
968 apic_printk(APIC_VERBOSE, "..... mult: %u\n", lapic_clockevent.mult);
969 apic_printk(APIC_VERBOSE, "..... calibration result: %u\n",
970 lapic_timer_period);
971
972 if (boot_cpu_has(X86_FEATURE_TSC)) {
973 apic_printk(APIC_VERBOSE, "..... CPU clock speed is "
974 "%ld.%04ld MHz.\n",
975 (deltatsc / LAPIC_CAL_LOOPS) / (1000000 / HZ),
976 (deltatsc / LAPIC_CAL_LOOPS) % (1000000 / HZ));
977 }
978
979 apic_printk(APIC_VERBOSE, "..... host bus clock speed is "
980 "%u.%04u MHz.\n",
981 lapic_timer_period / (1000000 / HZ),
982 lapic_timer_period % (1000000 / HZ));
983
984
985
986
987 if (lapic_timer_period < (1000000 / HZ)) {
988 local_irq_enable();
989 pr_warning("APIC frequency too slow, disabling apic timer\n");
990 return -1;
991 }
992
993 levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
994
995
996
997
998
999
1000 if (!pm_referenced && global_clock_event) {
1001 apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
1002
1003
1004
1005
1006 levt->event_handler = lapic_cal_handler;
1007 lapic_timer_set_periodic(levt);
1008 lapic_cal_loops = -1;
1009
1010
1011 local_irq_enable();
1012
1013 while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
1014 cpu_relax();
1015
1016
1017 local_irq_disable();
1018 lapic_timer_shutdown(levt);
1019
1020
1021 deltaj = lapic_cal_j2 - lapic_cal_j1;
1022 apic_printk(APIC_VERBOSE, "... jiffies delta = %lu\n", deltaj);
1023
1024
1025 if (deltaj >= LAPIC_CAL_LOOPS-2 && deltaj <= LAPIC_CAL_LOOPS+2)
1026 apic_printk(APIC_VERBOSE, "... jiffies result ok\n");
1027 else
1028 levt->features |= CLOCK_EVT_FEAT_DUMMY;
1029 }
1030 local_irq_enable();
1031
1032 if (levt->features & CLOCK_EVT_FEAT_DUMMY) {
1033 pr_warning("APIC timer disabled due to verification failure\n");
1034 return -1;
1035 }
1036
1037 return 0;
1038 }
1039
1040
1041
1042
1043
1044
1045 void __init setup_boot_APIC_clock(void)
1046 {
1047
1048
1049
1050
1051
1052
1053 if (disable_apic_timer) {
1054 pr_info("Disabling APIC timer\n");
1055
1056 if (num_possible_cpus() > 1) {
1057 lapic_clockevent.mult = 1;
1058 setup_APIC_timer();
1059 }
1060 return;
1061 }
1062
1063 if (calibrate_APIC_clock()) {
1064
1065 if (num_possible_cpus() > 1)
1066 setup_APIC_timer();
1067 return;
1068 }
1069
1070
1071
1072
1073
1074
1075 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
1076
1077
1078 setup_APIC_timer();
1079 amd_e400_c1e_apic_setup();
1080 }
1081
1082 void setup_secondary_APIC_clock(void)
1083 {
1084 setup_APIC_timer();
1085 amd_e400_c1e_apic_setup();
1086 }
1087
1088
1089
1090
1091 static void local_apic_timer_interrupt(void)
1092 {
1093 struct clock_event_device *evt = this_cpu_ptr(&lapic_events);
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106 if (!evt->event_handler) {
1107 pr_warning("Spurious LAPIC timer interrupt on cpu %d\n",
1108 smp_processor_id());
1109
1110 lapic_timer_shutdown(evt);
1111 return;
1112 }
1113
1114
1115
1116
1117 inc_irq_stat(apic_timer_irqs);
1118
1119 evt->event_handler(evt);
1120 }
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130 __visible void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs)
1131 {
1132 struct pt_regs *old_regs = set_irq_regs(regs);
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142 entering_ack_irq();
1143 trace_local_timer_entry(LOCAL_TIMER_VECTOR);
1144 local_apic_timer_interrupt();
1145 trace_local_timer_exit(LOCAL_TIMER_VECTOR);
1146 exiting_irq();
1147
1148 set_irq_regs(old_regs);
1149 }
1150
1151 int setup_profiling_timer(unsigned int multiplier)
1152 {
1153 return -EINVAL;
1154 }
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167 void clear_local_APIC(void)
1168 {
1169 int maxlvt;
1170 u32 v;
1171
1172
1173 if (!x2apic_mode && !apic_phys)
1174 return;
1175
1176 maxlvt = lapic_get_maxlvt();
1177
1178
1179
1180
1181 if (maxlvt >= 3) {
1182 v = ERROR_APIC_VECTOR;
1183 apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
1184 }
1185
1186
1187
1188
1189 v = apic_read(APIC_LVTT);
1190 apic_write(APIC_LVTT, v | APIC_LVT_MASKED);
1191 v = apic_read(APIC_LVT0);
1192 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
1193 v = apic_read(APIC_LVT1);
1194 apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
1195 if (maxlvt >= 4) {
1196 v = apic_read(APIC_LVTPC);
1197 apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
1198 }
1199
1200
1201 #ifdef CONFIG_X86_THERMAL_VECTOR
1202 if (maxlvt >= 5) {
1203 v = apic_read(APIC_LVTTHMR);
1204 apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED);
1205 }
1206 #endif
1207 #ifdef CONFIG_X86_MCE_INTEL
1208 if (maxlvt >= 6) {
1209 v = apic_read(APIC_LVTCMCI);
1210 if (!(v & APIC_LVT_MASKED))
1211 apic_write(APIC_LVTCMCI, v | APIC_LVT_MASKED);
1212 }
1213 #endif
1214
1215
1216
1217
1218 apic_write(APIC_LVTT, APIC_LVT_MASKED);
1219 apic_write(APIC_LVT0, APIC_LVT_MASKED);
1220 apic_write(APIC_LVT1, APIC_LVT_MASKED);
1221 if (maxlvt >= 3)
1222 apic_write(APIC_LVTERR, APIC_LVT_MASKED);
1223 if (maxlvt >= 4)
1224 apic_write(APIC_LVTPC, APIC_LVT_MASKED);
1225
1226
1227 if (lapic_is_integrated()) {
1228 if (maxlvt > 3)
1229
1230 apic_write(APIC_ESR, 0);
1231 apic_read(APIC_ESR);
1232 }
1233 }
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246 void apic_soft_disable(void)
1247 {
1248 u32 value;
1249
1250 clear_local_APIC();
1251
1252
1253 value = apic_read(APIC_SPIV);
1254 value &= ~APIC_SPIV_APIC_ENABLED;
1255 apic_write(APIC_SPIV, value);
1256 }
1257
1258
1259
1260
1261 void disable_local_APIC(void)
1262 {
1263
1264 if (!x2apic_mode && !apic_phys)
1265 return;
1266
1267 apic_soft_disable();
1268
1269 #ifdef CONFIG_X86_32
1270
1271
1272
1273
1274 if (enabled_via_apicbase) {
1275 unsigned int l, h;
1276
1277 rdmsr(MSR_IA32_APICBASE, l, h);
1278 l &= ~MSR_IA32_APICBASE_ENABLE;
1279 wrmsr(MSR_IA32_APICBASE, l, h);
1280 }
1281 #endif
1282 }
1283
1284
1285
1286
1287
1288
1289
1290 void lapic_shutdown(void)
1291 {
1292 unsigned long flags;
1293
1294 if (!boot_cpu_has(X86_FEATURE_APIC) && !apic_from_smp_config())
1295 return;
1296
1297 local_irq_save(flags);
1298
1299 #ifdef CONFIG_X86_32
1300 if (!enabled_via_apicbase)
1301 clear_local_APIC();
1302 else
1303 #endif
1304 disable_local_APIC();
1305
1306
1307 local_irq_restore(flags);
1308 }
1309
1310
1311
1312
1313 void __init sync_Arb_IDs(void)
1314 {
1315
1316
1317
1318
1319 if (modern_apic() || boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
1320 return;
1321
1322
1323
1324
1325 apic_wait_icr_idle();
1326
1327 apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
1328 apic_write(APIC_ICR, APIC_DEST_ALLINC |
1329 APIC_INT_LEVELTRIG | APIC_DM_INIT);
1330 }
1331
1332 enum apic_intr_mode_id apic_intr_mode __ro_after_init;
1333
1334 static int __init __apic_intr_mode_select(void)
1335 {
1336
1337 if (disable_apic) {
1338 pr_info("APIC disabled via kernel command line\n");
1339 return APIC_PIC;
1340 }
1341
1342
1343 #ifdef CONFIG_X86_64
1344
1345 if (!boot_cpu_has(X86_FEATURE_APIC)) {
1346 disable_apic = 1;
1347 pr_info("APIC disabled by BIOS\n");
1348 return APIC_PIC;
1349 }
1350 #else
1351
1352
1353
1354 if (!boot_cpu_has(X86_FEATURE_APIC) && !smp_found_config) {
1355 disable_apic = 1;
1356 return APIC_PIC;
1357 }
1358
1359
1360 if (!boot_cpu_has(X86_FEATURE_APIC) &&
1361 APIC_INTEGRATED(boot_cpu_apic_version)) {
1362 disable_apic = 1;
1363 pr_err(FW_BUG "Local APIC %d not detected, force emulation\n",
1364 boot_cpu_physical_apicid);
1365 return APIC_PIC;
1366 }
1367 #endif
1368
1369
1370 if (!smp_found_config) {
1371 disable_ioapic_support();
1372 if (!acpi_lapic) {
1373 pr_info("APIC: ACPI MADT or MP tables are not detected\n");
1374 return APIC_VIRTUAL_WIRE_NO_CONFIG;
1375 }
1376 return APIC_VIRTUAL_WIRE;
1377 }
1378
1379 #ifdef CONFIG_SMP
1380
1381 if (!setup_max_cpus) {
1382 pr_info("APIC: SMP mode deactivated\n");
1383 return APIC_SYMMETRIC_IO_NO_ROUTING;
1384 }
1385
1386 if (read_apic_id() != boot_cpu_physical_apicid) {
1387 panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
1388 read_apic_id(), boot_cpu_physical_apicid);
1389
1390 }
1391 #endif
1392
1393 return APIC_SYMMETRIC_IO;
1394 }
1395
1396
1397 void __init apic_intr_mode_select(void)
1398 {
1399 apic_intr_mode = __apic_intr_mode_select();
1400 }
1401
1402
1403
1404
1405 void __init init_bsp_APIC(void)
1406 {
1407 unsigned int value;
1408
1409
1410
1411
1412
1413 if (smp_found_config || !boot_cpu_has(X86_FEATURE_APIC))
1414 return;
1415
1416
1417
1418
1419 clear_local_APIC();
1420
1421
1422
1423
1424 value = apic_read(APIC_SPIV);
1425 value &= ~APIC_VECTOR_MASK;
1426 value |= APIC_SPIV_APIC_ENABLED;
1427
1428 #ifdef CONFIG_X86_32
1429
1430 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
1431 (boot_cpu_data.x86 == 15))
1432 value &= ~APIC_SPIV_FOCUS_DISABLED;
1433 else
1434 #endif
1435 value |= APIC_SPIV_FOCUS_DISABLED;
1436 value |= SPURIOUS_APIC_VECTOR;
1437 apic_write(APIC_SPIV, value);
1438
1439
1440
1441
1442 apic_write(APIC_LVT0, APIC_DM_EXTINT);
1443 value = APIC_DM_NMI;
1444 if (!lapic_is_integrated())
1445 value |= APIC_LVT_LEVEL_TRIGGER;
1446 if (apic_extnmi == APIC_EXTNMI_NONE)
1447 value |= APIC_LVT_MASKED;
1448 apic_write(APIC_LVT1, value);
1449 }
1450
1451 static void __init apic_bsp_setup(bool upmode);
1452
1453
1454 void __init apic_intr_mode_init(void)
1455 {
1456 bool upmode = IS_ENABLED(CONFIG_UP_LATE_INIT);
1457
1458 switch (apic_intr_mode) {
1459 case APIC_PIC:
1460 pr_info("APIC: Keep in PIC mode(8259)\n");
1461 return;
1462 case APIC_VIRTUAL_WIRE:
1463 pr_info("APIC: Switch to virtual wire mode setup\n");
1464 default_setup_apic_routing();
1465 break;
1466 case APIC_VIRTUAL_WIRE_NO_CONFIG:
1467 pr_info("APIC: Switch to virtual wire mode setup with no configuration\n");
1468 upmode = true;
1469 default_setup_apic_routing();
1470 break;
1471 case APIC_SYMMETRIC_IO:
1472 pr_info("APIC: Switch to symmetric I/O mode setup\n");
1473 default_setup_apic_routing();
1474 break;
1475 case APIC_SYMMETRIC_IO_NO_ROUTING:
1476 pr_info("APIC: Switch to symmetric I/O mode setup in no SMP routine\n");
1477 break;
1478 }
1479
1480 apic_bsp_setup(upmode);
1481 }
1482
1483 static void lapic_setup_esr(void)
1484 {
1485 unsigned int oldvalue, value, maxlvt;
1486
1487 if (!lapic_is_integrated()) {
1488 pr_info("No ESR for 82489DX.\n");
1489 return;
1490 }
1491
1492 if (apic->disable_esr) {
1493
1494
1495
1496
1497
1498
1499 pr_info("Leaving ESR disabled.\n");
1500 return;
1501 }
1502
1503 maxlvt = lapic_get_maxlvt();
1504 if (maxlvt > 3)
1505 apic_write(APIC_ESR, 0);
1506 oldvalue = apic_read(APIC_ESR);
1507
1508
1509 value = ERROR_APIC_VECTOR;
1510 apic_write(APIC_LVTERR, value);
1511
1512
1513
1514
1515 if (maxlvt > 3)
1516 apic_write(APIC_ESR, 0);
1517 value = apic_read(APIC_ESR);
1518 if (value != oldvalue)
1519 apic_printk(APIC_VERBOSE, "ESR value before enabling "
1520 "vector: 0x%08x after: 0x%08x\n",
1521 oldvalue, value);
1522 }
1523
1524 #define APIC_IR_REGS APIC_ISR_NR
1525 #define APIC_IR_BITS (APIC_IR_REGS * 32)
1526 #define APIC_IR_MAPSIZE (APIC_IR_BITS / BITS_PER_LONG)
1527
1528 union apic_ir {
1529 unsigned long map[APIC_IR_MAPSIZE];
1530 u32 regs[APIC_IR_REGS];
1531 };
1532
1533 static bool apic_check_and_ack(union apic_ir *irr, union apic_ir *isr)
1534 {
1535 int i, bit;
1536
1537
1538 for (i = 0; i < APIC_IR_REGS; i++)
1539 irr->regs[i] = apic_read(APIC_IRR + i * 0x10);
1540
1541
1542 for (i = 0; i < APIC_IR_REGS; i++)
1543 isr->regs[i] = apic_read(APIC_ISR + i * 0x10);
1544
1545
1546
1547
1548
1549
1550 if (!bitmap_empty(isr->map, APIC_IR_BITS)) {
1551
1552
1553
1554
1555
1556 for_each_set_bit(bit, isr->map, APIC_IR_BITS)
1557 ack_APIC_irq();
1558 return true;
1559 }
1560
1561 return !bitmap_empty(irr->map, APIC_IR_BITS);
1562 }
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578 static void apic_pending_intr_clear(void)
1579 {
1580 union apic_ir irr, isr;
1581 unsigned int i;
1582
1583
1584 for (i = 0; i < 512; i++) {
1585 if (!apic_check_and_ack(&irr, &isr))
1586 return;
1587 }
1588
1589 pr_warn("APIC: Stale IRR: %256pb ISR: %256pb\n", irr.map, isr.map);
1590 }
1591
1592
1593
1594
1595
1596
1597
1598 static void setup_local_APIC(void)
1599 {
1600 int cpu = smp_processor_id();
1601 unsigned int value;
1602
1603 if (disable_apic) {
1604 disable_ioapic_support();
1605 return;
1606 }
1607
1608
1609
1610
1611
1612 value = apic_read(APIC_SPIV);
1613 value &= ~APIC_SPIV_APIC_ENABLED;
1614 apic_write(APIC_SPIV, value);
1615
1616 #ifdef CONFIG_X86_32
1617
1618 if (lapic_is_integrated() && apic->disable_esr) {
1619 apic_write(APIC_ESR, 0);
1620 apic_write(APIC_ESR, 0);
1621 apic_write(APIC_ESR, 0);
1622 apic_write(APIC_ESR, 0);
1623 }
1624 #endif
1625
1626
1627
1628
1629 BUG_ON(!apic->apic_id_registered());
1630
1631
1632
1633
1634
1635
1636 apic->init_apic_ldr();
1637
1638 #ifdef CONFIG_X86_32
1639 if (apic->dest_logical) {
1640 int logical_apicid, ldr_apicid;
1641
1642
1643
1644
1645
1646
1647 logical_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
1648 ldr_apicid = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR));
1649 if (logical_apicid != BAD_APICID)
1650 WARN_ON(logical_apicid != ldr_apicid);
1651
1652 early_per_cpu(x86_cpu_to_logical_apicid, cpu) = ldr_apicid;
1653 }
1654 #endif
1655
1656
1657
1658
1659
1660
1661
1662 value = apic_read(APIC_TASKPRI);
1663 value &= ~APIC_TPRI_MASK;
1664 value |= 0x10;
1665 apic_write(APIC_TASKPRI, value);
1666
1667
1668 apic_pending_intr_clear();
1669
1670
1671
1672
1673 value = apic_read(APIC_SPIV);
1674 value &= ~APIC_VECTOR_MASK;
1675
1676
1677
1678 value |= APIC_SPIV_APIC_ENABLED;
1679
1680 #ifdef CONFIG_X86_32
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705 value &= ~APIC_SPIV_FOCUS_DISABLED;
1706 #endif
1707
1708
1709
1710
1711 value |= SPURIOUS_APIC_VECTOR;
1712 apic_write(APIC_SPIV, value);
1713
1714 perf_events_lapic_init();
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726 value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
1727 if (!cpu && (pic_mode || !value || skip_ioapic_setup)) {
1728 value = APIC_DM_EXTINT;
1729 apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", cpu);
1730 } else {
1731 value = APIC_DM_EXTINT | APIC_LVT_MASKED;
1732 apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n", cpu);
1733 }
1734 apic_write(APIC_LVT0, value);
1735
1736
1737
1738
1739
1740 if ((!cpu && apic_extnmi != APIC_EXTNMI_NONE) ||
1741 apic_extnmi == APIC_EXTNMI_ALL)
1742 value = APIC_DM_NMI;
1743 else
1744 value = APIC_DM_NMI | APIC_LVT_MASKED;
1745
1746
1747 if (!lapic_is_integrated())
1748 value |= APIC_LVT_LEVEL_TRIGGER;
1749 apic_write(APIC_LVT1, value);
1750
1751 #ifdef CONFIG_X86_MCE_INTEL
1752
1753 if (!cpu)
1754 cmci_recheck();
1755 #endif
1756 }
1757
1758 static void end_local_APIC_setup(void)
1759 {
1760 lapic_setup_esr();
1761
1762 #ifdef CONFIG_X86_32
1763 {
1764 unsigned int value;
1765
1766 value = apic_read(APIC_LVTT);
1767 value |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
1768 apic_write(APIC_LVTT, value);
1769 }
1770 #endif
1771
1772 apic_pm_activate();
1773 }
1774
1775
1776
1777
1778 void apic_ap_setup(void)
1779 {
1780 setup_local_APIC();
1781 end_local_APIC_setup();
1782 }
1783
1784 #ifdef CONFIG_X86_X2APIC
1785 int x2apic_mode;
1786
1787 enum {
1788 X2APIC_OFF,
1789 X2APIC_ON,
1790 X2APIC_DISABLED,
1791 };
1792 static int x2apic_state;
1793
1794 static void __x2apic_disable(void)
1795 {
1796 u64 msr;
1797
1798 if (!boot_cpu_has(X86_FEATURE_APIC))
1799 return;
1800
1801 rdmsrl(MSR_IA32_APICBASE, msr);
1802 if (!(msr & X2APIC_ENABLE))
1803 return;
1804
1805 wrmsrl(MSR_IA32_APICBASE, msr & ~(X2APIC_ENABLE | XAPIC_ENABLE));
1806 wrmsrl(MSR_IA32_APICBASE, msr & ~X2APIC_ENABLE);
1807 printk_once(KERN_INFO "x2apic disabled\n");
1808 }
1809
1810 static void __x2apic_enable(void)
1811 {
1812 u64 msr;
1813
1814 rdmsrl(MSR_IA32_APICBASE, msr);
1815 if (msr & X2APIC_ENABLE)
1816 return;
1817 wrmsrl(MSR_IA32_APICBASE, msr | X2APIC_ENABLE);
1818 printk_once(KERN_INFO "x2apic enabled\n");
1819 }
1820
1821 static int __init setup_nox2apic(char *str)
1822 {
1823 if (x2apic_enabled()) {
1824 int apicid = native_apic_msr_read(APIC_ID);
1825
1826 if (apicid >= 255) {
1827 pr_warning("Apicid: %08x, cannot enforce nox2apic\n",
1828 apicid);
1829 return 0;
1830 }
1831 pr_warning("x2apic already enabled.\n");
1832 __x2apic_disable();
1833 }
1834 setup_clear_cpu_cap(X86_FEATURE_X2APIC);
1835 x2apic_state = X2APIC_DISABLED;
1836 x2apic_mode = 0;
1837 return 0;
1838 }
1839 early_param("nox2apic", setup_nox2apic);
1840
1841
1842 void x2apic_setup(void)
1843 {
1844
1845
1846
1847
1848 if (x2apic_state != X2APIC_ON) {
1849 __x2apic_disable();
1850 return;
1851 }
1852 __x2apic_enable();
1853 }
1854
1855 static __init void x2apic_disable(void)
1856 {
1857 u32 x2apic_id, state = x2apic_state;
1858
1859 x2apic_mode = 0;
1860 x2apic_state = X2APIC_DISABLED;
1861
1862 if (state != X2APIC_ON)
1863 return;
1864
1865 x2apic_id = read_apic_id();
1866 if (x2apic_id >= 255)
1867 panic("Cannot disable x2apic, id: %08x\n", x2apic_id);
1868
1869 __x2apic_disable();
1870 register_lapic_address(mp_lapic_addr);
1871 }
1872
1873 static __init void x2apic_enable(void)
1874 {
1875 if (x2apic_state != X2APIC_OFF)
1876 return;
1877
1878 x2apic_mode = 1;
1879 x2apic_state = X2APIC_ON;
1880 __x2apic_enable();
1881 }
1882
1883 static __init void try_to_enable_x2apic(int remap_mode)
1884 {
1885 if (x2apic_state == X2APIC_DISABLED)
1886 return;
1887
1888 if (remap_mode != IRQ_REMAP_X2APIC_MODE) {
1889
1890
1891
1892 if (max_physical_apicid > 255 ||
1893 !x86_init.hyper.x2apic_available()) {
1894 pr_info("x2apic: IRQ remapping doesn't support X2APIC mode\n");
1895 x2apic_disable();
1896 return;
1897 }
1898
1899
1900
1901
1902
1903 x2apic_phys = 1;
1904 }
1905 x2apic_enable();
1906 }
1907
1908 void __init check_x2apic(void)
1909 {
1910 if (x2apic_enabled()) {
1911 pr_info("x2apic: enabled by BIOS, switching to x2apic ops\n");
1912 x2apic_mode = 1;
1913 x2apic_state = X2APIC_ON;
1914 } else if (!boot_cpu_has(X86_FEATURE_X2APIC)) {
1915 x2apic_state = X2APIC_DISABLED;
1916 }
1917 }
1918 #else
1919 static int __init validate_x2apic(void)
1920 {
1921 if (!apic_is_x2apic_enabled())
1922 return 0;
1923
1924
1925
1926 panic("BIOS has enabled x2apic but kernel doesn't support x2apic, please disable x2apic in BIOS.\n");
1927 }
1928 early_initcall(validate_x2apic);
1929
1930 static inline void try_to_enable_x2apic(int remap_mode) { }
1931 static inline void __x2apic_enable(void) { }
1932 #endif
1933
1934 void __init enable_IR_x2apic(void)
1935 {
1936 unsigned long flags;
1937 int ret, ir_stat;
1938
1939 if (skip_ioapic_setup) {
1940 pr_info("Not enabling interrupt remapping due to skipped IO-APIC setup\n");
1941 return;
1942 }
1943
1944 ir_stat = irq_remapping_prepare();
1945 if (ir_stat < 0 && !x2apic_supported())
1946 return;
1947
1948 ret = save_ioapic_entries();
1949 if (ret) {
1950 pr_info("Saving IO-APIC state failed: %d\n", ret);
1951 return;
1952 }
1953
1954 local_irq_save(flags);
1955 legacy_pic->mask_all();
1956 mask_ioapic_entries();
1957
1958
1959 if (ir_stat >= 0)
1960 ir_stat = irq_remapping_enable();
1961
1962 try_to_enable_x2apic(ir_stat);
1963
1964 if (ir_stat < 0)
1965 restore_ioapic_entries();
1966 legacy_pic->restore_mask();
1967 local_irq_restore(flags);
1968 }
1969
1970 #ifdef CONFIG_X86_64
1971
1972
1973
1974
1975
1976
1977 static int __init detect_init_APIC(void)
1978 {
1979 if (!boot_cpu_has(X86_FEATURE_APIC)) {
1980 pr_info("No local APIC present\n");
1981 return -1;
1982 }
1983
1984 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
1985 return 0;
1986 }
1987 #else
1988
1989 static int __init apic_verify(void)
1990 {
1991 u32 features, h, l;
1992
1993
1994
1995
1996
1997 features = cpuid_edx(1);
1998 if (!(features & (1 << X86_FEATURE_APIC))) {
1999 pr_warning("Could not enable APIC!\n");
2000 return -1;
2001 }
2002 set_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
2003 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
2004
2005
2006 if (boot_cpu_data.x86 >= 6) {
2007 rdmsr(MSR_IA32_APICBASE, l, h);
2008 if (l & MSR_IA32_APICBASE_ENABLE)
2009 mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
2010 }
2011
2012 pr_info("Found and enabled local APIC!\n");
2013 return 0;
2014 }
2015
2016 int __init apic_force_enable(unsigned long addr)
2017 {
2018 u32 h, l;
2019
2020 if (disable_apic)
2021 return -1;
2022
2023
2024
2025
2026
2027
2028 if (boot_cpu_data.x86 >= 6) {
2029 rdmsr(MSR_IA32_APICBASE, l, h);
2030 if (!(l & MSR_IA32_APICBASE_ENABLE)) {
2031 pr_info("Local APIC disabled by BIOS -- reenabling.\n");
2032 l &= ~MSR_IA32_APICBASE_BASE;
2033 l |= MSR_IA32_APICBASE_ENABLE | addr;
2034 wrmsr(MSR_IA32_APICBASE, l, h);
2035 enabled_via_apicbase = 1;
2036 }
2037 }
2038 return apic_verify();
2039 }
2040
2041
2042
2043
2044 static int __init detect_init_APIC(void)
2045 {
2046
2047 if (disable_apic)
2048 return -1;
2049
2050 switch (boot_cpu_data.x86_vendor) {
2051 case X86_VENDOR_AMD:
2052 if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) ||
2053 (boot_cpu_data.x86 >= 15))
2054 break;
2055 goto no_apic;
2056 case X86_VENDOR_HYGON:
2057 break;
2058 case X86_VENDOR_INTEL:
2059 if (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15 ||
2060 (boot_cpu_data.x86 == 5 && boot_cpu_has(X86_FEATURE_APIC)))
2061 break;
2062 goto no_apic;
2063 default:
2064 goto no_apic;
2065 }
2066
2067 if (!boot_cpu_has(X86_FEATURE_APIC)) {
2068
2069
2070
2071
2072 if (!force_enable_local_apic) {
2073 pr_info("Local APIC disabled by BIOS -- "
2074 "you can enable it with \"lapic\"\n");
2075 return -1;
2076 }
2077 if (apic_force_enable(APIC_DEFAULT_PHYS_BASE))
2078 return -1;
2079 } else {
2080 if (apic_verify())
2081 return -1;
2082 }
2083
2084 apic_pm_activate();
2085
2086 return 0;
2087
2088 no_apic:
2089 pr_info("No local APIC present or hardware disabled\n");
2090 return -1;
2091 }
2092 #endif
2093
2094
2095
2096
2097 void __init init_apic_mappings(void)
2098 {
2099 unsigned int new_apicid;
2100
2101 if (apic_validate_deadline_timer())
2102 pr_debug("TSC deadline timer available\n");
2103
2104 if (x2apic_mode) {
2105 boot_cpu_physical_apicid = read_apic_id();
2106 return;
2107 }
2108
2109
2110 if (!smp_found_config && detect_init_APIC()) {
2111
2112 pr_info("APIC: disable apic facility\n");
2113 apic_disable();
2114 } else {
2115 apic_phys = mp_lapic_addr;
2116
2117
2118
2119
2120
2121 if (!acpi_lapic && !smp_found_config)
2122 register_lapic_address(apic_phys);
2123 }
2124
2125
2126
2127
2128
2129 new_apicid = read_apic_id();
2130 if (boot_cpu_physical_apicid != new_apicid) {
2131 boot_cpu_physical_apicid = new_apicid;
2132
2133
2134
2135
2136
2137
2138
2139 boot_cpu_apic_version = GET_APIC_VERSION(apic_read(APIC_LVR));
2140 }
2141 }
2142
2143 void __init register_lapic_address(unsigned long address)
2144 {
2145 mp_lapic_addr = address;
2146
2147 if (!x2apic_mode) {
2148 set_fixmap_nocache(FIX_APIC_BASE, address);
2149 apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
2150 APIC_BASE, address);
2151 }
2152 if (boot_cpu_physical_apicid == -1U) {
2153 boot_cpu_physical_apicid = read_apic_id();
2154 boot_cpu_apic_version = GET_APIC_VERSION(apic_read(APIC_LVR));
2155 }
2156 }
2157
2158
2159
2160
2161
2162
2163
2164
2165 __visible void __irq_entry smp_spurious_interrupt(struct pt_regs *regs)
2166 {
2167 u8 vector = ~regs->orig_ax;
2168 u32 v;
2169
2170 entering_irq();
2171 trace_spurious_apic_entry(vector);
2172
2173 inc_irq_stat(irq_spurious_count);
2174
2175
2176
2177
2178 if (vector == SPURIOUS_APIC_VECTOR) {
2179
2180 pr_info("Spurious APIC interrupt (vector 0xFF) on CPU#%d, should never happen.\n",
2181 smp_processor_id());
2182 goto out;
2183 }
2184
2185
2186
2187
2188
2189 v = apic_read(APIC_ISR + ((vector & ~0x1f) >> 1));
2190 if (v & (1 << (vector & 0x1f))) {
2191 pr_info("Spurious interrupt (vector 0x%02x) on CPU#%d. Acked\n",
2192 vector, smp_processor_id());
2193 ack_APIC_irq();
2194 } else {
2195 pr_info("Spurious interrupt (vector 0x%02x) on CPU#%d. Not pending!\n",
2196 vector, smp_processor_id());
2197 }
2198 out:
2199 trace_spurious_apic_exit(vector);
2200 exiting_irq();
2201 }
2202
2203
2204
2205
2206 __visible void __irq_entry smp_error_interrupt(struct pt_regs *regs)
2207 {
2208 static const char * const error_interrupt_reason[] = {
2209 "Send CS error",
2210 "Receive CS error",
2211 "Send accept error",
2212 "Receive accept error",
2213 "Redirectable IPI",
2214 "Send illegal vector",
2215 "Received illegal vector",
2216 "Illegal register address",
2217 };
2218 u32 v, i = 0;
2219
2220 entering_irq();
2221 trace_error_apic_entry(ERROR_APIC_VECTOR);
2222
2223
2224 if (lapic_get_maxlvt() > 3)
2225 apic_write(APIC_ESR, 0);
2226 v = apic_read(APIC_ESR);
2227 ack_APIC_irq();
2228 atomic_inc(&irq_err_count);
2229
2230 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
2231 smp_processor_id(), v);
2232
2233 v &= 0xff;
2234 while (v) {
2235 if (v & 0x1)
2236 apic_printk(APIC_DEBUG, KERN_CONT " : %s", error_interrupt_reason[i]);
2237 i++;
2238 v >>= 1;
2239 }
2240
2241 apic_printk(APIC_DEBUG, KERN_CONT "\n");
2242
2243 trace_error_apic_exit(ERROR_APIC_VECTOR);
2244 exiting_irq();
2245 }
2246
2247
2248
2249
2250 static void __init connect_bsp_APIC(void)
2251 {
2252 #ifdef CONFIG_X86_32
2253 if (pic_mode) {
2254
2255
2256
2257 clear_local_APIC();
2258
2259
2260
2261
2262 apic_printk(APIC_VERBOSE, "leaving PIC mode, "
2263 "enabling APIC mode.\n");
2264 imcr_pic_to_apic();
2265 }
2266 #endif
2267 }
2268
2269
2270
2271
2272
2273
2274
2275
2276 void disconnect_bsp_APIC(int virt_wire_setup)
2277 {
2278 unsigned int value;
2279
2280 #ifdef CONFIG_X86_32
2281 if (pic_mode) {
2282
2283
2284
2285
2286
2287
2288 apic_printk(APIC_VERBOSE, "disabling APIC mode, "
2289 "entering PIC mode.\n");
2290 imcr_apic_to_pic();
2291 return;
2292 }
2293 #endif
2294
2295
2296
2297
2298 value = apic_read(APIC_SPIV);
2299 value &= ~APIC_VECTOR_MASK;
2300 value |= APIC_SPIV_APIC_ENABLED;
2301 value |= 0xf;
2302 apic_write(APIC_SPIV, value);
2303
2304 if (!virt_wire_setup) {
2305
2306
2307
2308
2309 value = apic_read(APIC_LVT0);
2310 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
2311 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
2312 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
2313 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
2314 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
2315 apic_write(APIC_LVT0, value);
2316 } else {
2317
2318 apic_write(APIC_LVT0, APIC_LVT_MASKED);
2319 }
2320
2321
2322
2323
2324
2325 value = apic_read(APIC_LVT1);
2326 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
2327 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
2328 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
2329 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
2330 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
2331 apic_write(APIC_LVT1, value);
2332 }
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342 static int nr_logical_cpuids = 1;
2343
2344
2345
2346
2347 static int cpuid_to_apicid[] = {
2348 [0 ... NR_CPUS - 1] = -1,
2349 };
2350
2351 #ifdef CONFIG_SMP
2352
2353
2354
2355
2356 bool apic_id_is_primary_thread(unsigned int apicid)
2357 {
2358 u32 mask;
2359
2360 if (smp_num_siblings == 1)
2361 return true;
2362
2363 mask = (1U << (fls(smp_num_siblings) - 1)) - 1;
2364 return !(apicid & mask);
2365 }
2366 #endif
2367
2368
2369
2370
2371
2372 static int allocate_logical_cpuid(int apicid)
2373 {
2374 int i;
2375
2376
2377
2378
2379
2380 for (i = 0; i < nr_logical_cpuids; i++) {
2381 if (cpuid_to_apicid[i] == apicid)
2382 return i;
2383 }
2384
2385
2386 if (nr_logical_cpuids >= nr_cpu_ids) {
2387 WARN_ONCE(1, "APIC: NR_CPUS/possible_cpus limit of %u reached. "
2388 "Processor %d/0x%x and the rest are ignored.\n",
2389 nr_cpu_ids, nr_logical_cpuids, apicid);
2390 return -EINVAL;
2391 }
2392
2393 cpuid_to_apicid[nr_logical_cpuids] = apicid;
2394 return nr_logical_cpuids++;
2395 }
2396
2397 int generic_processor_info(int apicid, int version)
2398 {
2399 int cpu, max = nr_cpu_ids;
2400 bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid,
2401 phys_cpu_present_map);
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422 if (disabled_cpu_apicid != BAD_APICID &&
2423 disabled_cpu_apicid != read_apic_id() &&
2424 disabled_cpu_apicid == apicid) {
2425 int thiscpu = num_processors + disabled_cpus;
2426
2427 pr_warning("APIC: Disabling requested cpu."
2428 " Processor %d/0x%x ignored.\n",
2429 thiscpu, apicid);
2430
2431 disabled_cpus++;
2432 return -ENODEV;
2433 }
2434
2435
2436
2437
2438
2439 if (!boot_cpu_detected && num_processors >= nr_cpu_ids - 1 &&
2440 apicid != boot_cpu_physical_apicid) {
2441 int thiscpu = max + disabled_cpus - 1;
2442
2443 pr_warning(
2444 "APIC: NR_CPUS/possible_cpus limit of %i almost"
2445 " reached. Keeping one slot for boot cpu."
2446 " Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
2447
2448 disabled_cpus++;
2449 return -ENODEV;
2450 }
2451
2452 if (num_processors >= nr_cpu_ids) {
2453 int thiscpu = max + disabled_cpus;
2454
2455 pr_warning("APIC: NR_CPUS/possible_cpus limit of %i "
2456 "reached. Processor %d/0x%x ignored.\n",
2457 max, thiscpu, apicid);
2458
2459 disabled_cpus++;
2460 return -EINVAL;
2461 }
2462
2463 if (apicid == boot_cpu_physical_apicid) {
2464
2465
2466
2467
2468
2469
2470
2471 cpu = 0;
2472
2473
2474 cpuid_to_apicid[0] = apicid;
2475 } else {
2476 cpu = allocate_logical_cpuid(apicid);
2477 if (cpu < 0) {
2478 disabled_cpus++;
2479 return -EINVAL;
2480 }
2481 }
2482
2483
2484
2485
2486 if (version == 0x0) {
2487 pr_warning("BIOS bug: APIC version is 0 for CPU %d/0x%x, fixing up to 0x10\n",
2488 cpu, apicid);
2489 version = 0x10;
2490 }
2491
2492 if (version != boot_cpu_apic_version) {
2493 pr_warning("BIOS bug: APIC version mismatch, boot CPU: %x, CPU %d: version %x\n",
2494 boot_cpu_apic_version, cpu, version);
2495 }
2496
2497 if (apicid > max_physical_apicid)
2498 max_physical_apicid = apicid;
2499
2500 #if defined(CONFIG_SMP) || defined(CONFIG_X86_64)
2501 early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
2502 early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
2503 #endif
2504 #ifdef CONFIG_X86_32
2505 early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
2506 apic->x86_32_early_logical_apicid(cpu);
2507 #endif
2508 set_cpu_possible(cpu, true);
2509 physid_set(apicid, phys_cpu_present_map);
2510 set_cpu_present(cpu, true);
2511 num_processors++;
2512
2513 return cpu;
2514 }
2515
2516 int hard_smp_processor_id(void)
2517 {
2518 return read_apic_id();
2519 }
2520
2521
2522
2523
2524
2525
2526
2527 void __init apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v))
2528 {
2529 struct apic **drv;
2530
2531 for (drv = __apicdrivers; drv < __apicdrivers_end; drv++) {
2532
2533 WARN_ON((*drv)->eoi_write == eoi_write);
2534 (*drv)->native_eoi_write = (*drv)->eoi_write;
2535 (*drv)->eoi_write = eoi_write;
2536 }
2537 }
2538
2539 static void __init apic_bsp_up_setup(void)
2540 {
2541 #ifdef CONFIG_X86_64
2542 apic_write(APIC_ID, apic->set_apic_id(boot_cpu_physical_apicid));
2543 #else
2544
2545
2546
2547
2548
2549 # ifdef CONFIG_CRASH_DUMP
2550 boot_cpu_physical_apicid = read_apic_id();
2551 # endif
2552 #endif
2553 physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
2554 }
2555
2556
2557
2558
2559
2560 static void __init apic_bsp_setup(bool upmode)
2561 {
2562 connect_bsp_APIC();
2563 if (upmode)
2564 apic_bsp_up_setup();
2565 setup_local_APIC();
2566
2567 enable_IO_APIC();
2568 end_local_APIC_setup();
2569 irq_remap_enable_fault_handling();
2570 setup_IO_APIC();
2571 }
2572
2573 #ifdef CONFIG_UP_LATE_INIT
2574 void __init up_late_init(void)
2575 {
2576 if (apic_intr_mode == APIC_PIC)
2577 return;
2578
2579
2580 x86_init.timers.setup_percpu_clockev();
2581 }
2582 #endif
2583
2584
2585
2586
2587 #ifdef CONFIG_PM
2588
2589 static struct {
2590
2591
2592
2593
2594
2595 int active;
2596
2597 unsigned int apic_id;
2598 unsigned int apic_taskpri;
2599 unsigned int apic_ldr;
2600 unsigned int apic_dfr;
2601 unsigned int apic_spiv;
2602 unsigned int apic_lvtt;
2603 unsigned int apic_lvtpc;
2604 unsigned int apic_lvt0;
2605 unsigned int apic_lvt1;
2606 unsigned int apic_lvterr;
2607 unsigned int apic_tmict;
2608 unsigned int apic_tdcr;
2609 unsigned int apic_thmr;
2610 unsigned int apic_cmci;
2611 } apic_pm_state;
2612
2613 static int lapic_suspend(void)
2614 {
2615 unsigned long flags;
2616 int maxlvt;
2617
2618 if (!apic_pm_state.active)
2619 return 0;
2620
2621 maxlvt = lapic_get_maxlvt();
2622
2623 apic_pm_state.apic_id = apic_read(APIC_ID);
2624 apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
2625 apic_pm_state.apic_ldr = apic_read(APIC_LDR);
2626 apic_pm_state.apic_dfr = apic_read(APIC_DFR);
2627 apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
2628 apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
2629 if (maxlvt >= 4)
2630 apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
2631 apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
2632 apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
2633 apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
2634 apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
2635 apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
2636 #ifdef CONFIG_X86_THERMAL_VECTOR
2637 if (maxlvt >= 5)
2638 apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
2639 #endif
2640 #ifdef CONFIG_X86_MCE_INTEL
2641 if (maxlvt >= 6)
2642 apic_pm_state.apic_cmci = apic_read(APIC_LVTCMCI);
2643 #endif
2644
2645 local_irq_save(flags);
2646 disable_local_APIC();
2647
2648 irq_remapping_disable();
2649
2650 local_irq_restore(flags);
2651 return 0;
2652 }
2653
2654 static void lapic_resume(void)
2655 {
2656 unsigned int l, h;
2657 unsigned long flags;
2658 int maxlvt;
2659
2660 if (!apic_pm_state.active)
2661 return;
2662
2663 local_irq_save(flags);
2664
2665
2666
2667
2668
2669
2670
2671 mask_ioapic_entries();
2672 legacy_pic->mask_all();
2673
2674 if (x2apic_mode) {
2675 __x2apic_enable();
2676 } else {
2677
2678
2679
2680
2681
2682
2683 if (boot_cpu_data.x86 >= 6) {
2684 rdmsr(MSR_IA32_APICBASE, l, h);
2685 l &= ~MSR_IA32_APICBASE_BASE;
2686 l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
2687 wrmsr(MSR_IA32_APICBASE, l, h);
2688 }
2689 }
2690
2691 maxlvt = lapic_get_maxlvt();
2692 apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
2693 apic_write(APIC_ID, apic_pm_state.apic_id);
2694 apic_write(APIC_DFR, apic_pm_state.apic_dfr);
2695 apic_write(APIC_LDR, apic_pm_state.apic_ldr);
2696 apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri);
2697 apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
2698 apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
2699 apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
2700 #ifdef CONFIG_X86_THERMAL_VECTOR
2701 if (maxlvt >= 5)
2702 apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
2703 #endif
2704 #ifdef CONFIG_X86_MCE_INTEL
2705 if (maxlvt >= 6)
2706 apic_write(APIC_LVTCMCI, apic_pm_state.apic_cmci);
2707 #endif
2708 if (maxlvt >= 4)
2709 apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
2710 apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
2711 apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
2712 apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
2713 apic_write(APIC_ESR, 0);
2714 apic_read(APIC_ESR);
2715 apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
2716 apic_write(APIC_ESR, 0);
2717 apic_read(APIC_ESR);
2718
2719 irq_remapping_reenable(x2apic_mode);
2720
2721 local_irq_restore(flags);
2722 }
2723
2724
2725
2726
2727
2728
2729 static struct syscore_ops lapic_syscore_ops = {
2730 .resume = lapic_resume,
2731 .suspend = lapic_suspend,
2732 };
2733
2734 static void apic_pm_activate(void)
2735 {
2736 apic_pm_state.active = 1;
2737 }
2738
2739 static int __init init_lapic_sysfs(void)
2740 {
2741
2742 if (boot_cpu_has(X86_FEATURE_APIC))
2743 register_syscore_ops(&lapic_syscore_ops);
2744
2745 return 0;
2746 }
2747
2748
2749 core_initcall(init_lapic_sysfs);
2750
2751 #else
2752
2753 static void apic_pm_activate(void) { }
2754
2755 #endif
2756
2757 #ifdef CONFIG_X86_64
2758
2759 static int multi_checked;
2760 static int multi;
2761
2762 static int set_multi(const struct dmi_system_id *d)
2763 {
2764 if (multi)
2765 return 0;
2766 pr_info("APIC: %s detected, Multi Chassis\n", d->ident);
2767 multi = 1;
2768 return 0;
2769 }
2770
2771 static const struct dmi_system_id multi_dmi_table[] = {
2772 {
2773 .callback = set_multi,
2774 .ident = "IBM System Summit2",
2775 .matches = {
2776 DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
2777 DMI_MATCH(DMI_PRODUCT_NAME, "Summit2"),
2778 },
2779 },
2780 {}
2781 };
2782
2783 static void dmi_check_multi(void)
2784 {
2785 if (multi_checked)
2786 return;
2787
2788 dmi_check_system(multi_dmi_table);
2789 multi_checked = 1;
2790 }
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800 int apic_is_clustered_box(void)
2801 {
2802 dmi_check_multi();
2803 return multi;
2804 }
2805 #endif
2806
2807
2808
2809
2810 static int __init setup_disableapic(char *arg)
2811 {
2812 disable_apic = 1;
2813 setup_clear_cpu_cap(X86_FEATURE_APIC);
2814 return 0;
2815 }
2816 early_param("disableapic", setup_disableapic);
2817
2818
2819 static int __init setup_nolapic(char *arg)
2820 {
2821 return setup_disableapic(arg);
2822 }
2823 early_param("nolapic", setup_nolapic);
2824
2825 static int __init parse_lapic_timer_c2_ok(char *arg)
2826 {
2827 local_apic_timer_c2_ok = 1;
2828 return 0;
2829 }
2830 early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
2831
2832 static int __init parse_disable_apic_timer(char *arg)
2833 {
2834 disable_apic_timer = 1;
2835 return 0;
2836 }
2837 early_param("noapictimer", parse_disable_apic_timer);
2838
2839 static int __init parse_nolapic_timer(char *arg)
2840 {
2841 disable_apic_timer = 1;
2842 return 0;
2843 }
2844 early_param("nolapic_timer", parse_nolapic_timer);
2845
2846 static int __init apic_set_verbosity(char *arg)
2847 {
2848 if (!arg) {
2849 #ifdef CONFIG_X86_64
2850 skip_ioapic_setup = 0;
2851 return 0;
2852 #endif
2853 return -EINVAL;
2854 }
2855
2856 if (strcmp("debug", arg) == 0)
2857 apic_verbosity = APIC_DEBUG;
2858 else if (strcmp("verbose", arg) == 0)
2859 apic_verbosity = APIC_VERBOSE;
2860 #ifdef CONFIG_X86_64
2861 else {
2862 pr_warning("APIC Verbosity level %s not recognised"
2863 " use apic=verbose or apic=debug\n", arg);
2864 return -EINVAL;
2865 }
2866 #endif
2867
2868 return 0;
2869 }
2870 early_param("apic", apic_set_verbosity);
2871
2872 static int __init lapic_insert_resource(void)
2873 {
2874 if (!apic_phys)
2875 return -1;
2876
2877
2878 lapic_resource.start = apic_phys;
2879 lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1;
2880 insert_resource(&iomem_resource, &lapic_resource);
2881
2882 return 0;
2883 }
2884
2885
2886
2887
2888
2889 late_initcall(lapic_insert_resource);
2890
2891 static int __init apic_set_disabled_cpu_apicid(char *arg)
2892 {
2893 if (!arg || !get_option(&arg, &disabled_cpu_apicid))
2894 return -EINVAL;
2895
2896 return 0;
2897 }
2898 early_param("disable_cpu_apicid", apic_set_disabled_cpu_apicid);
2899
2900 static int __init apic_set_extnmi(char *arg)
2901 {
2902 if (!arg)
2903 return -EINVAL;
2904
2905 if (!strncmp("all", arg, 3))
2906 apic_extnmi = APIC_EXTNMI_ALL;
2907 else if (!strncmp("none", arg, 4))
2908 apic_extnmi = APIC_EXTNMI_NONE;
2909 else if (!strncmp("bsp", arg, 3))
2910 apic_extnmi = APIC_EXTNMI_BSP;
2911 else {
2912 pr_warn("Unknown external NMI delivery mode `%s' ignored\n", arg);
2913 return -EINVAL;
2914 }
2915
2916 return 0;
2917 }
2918 early_param("apic_extnmi", apic_set_extnmi);