This source file includes following definitions.
- clockevent_to_channel
- hpet_readl
- hpet_writel
- hpet_set_mapping
- hpet_clear_mapping
- hpet_setup
- disable_hpet
- is_hpet_capable
- is_hpet_enabled
- _hpet_print_config
- hpet_reserve_platform_timers
- hpet_select_device_channel
- hpet_reserve_platform_timers
- hpet_select_device_channel
- hpet_stop_counter
- hpet_reset_counter
- hpet_start_counter
- hpet_restart_counter
- hpet_resume_device
- hpet_resume_counter
- hpet_enable_legacy_int
- hpet_clkevt_set_state_periodic
- hpet_clkevt_set_state_oneshot
- hpet_clkevt_set_state_shutdown
- hpet_clkevt_legacy_resume
- hpet_clkevt_set_next_event
- hpet_init_clockevent
- hpet_legacy_clockevent_register
- hpet_msi_unmask
- hpet_msi_mask
- hpet_msi_write
- hpet_clkevt_msi_resume
- hpet_msi_interrupt_handler
- hpet_setup_msi_irq
- init_one_hpet_msi_clockevent
- hpet_get_unused_clockevent
- hpet_cpuhp_online
- hpet_cpuhp_dead
- hpet_select_clockevents
- hpet_select_clockevents
- read_hpet
- read_hpet
- hpet_cfg_working
- hpet_counting
- hpet_enable
- hpet_late_init
- hpet_disable
- hpet_cnt_ahead
- hpet_register_irq_handler
- hpet_unregister_irq_handler
- hpet_rtc_timer_init
- hpet_disable_rtc_channel
- hpet_mask_rtc_irq_bit
- hpet_set_rtc_irq_bit
- hpet_set_alarm_time
- hpet_set_periodic_freq
- hpet_rtc_dropped_irq
- hpet_rtc_timer_reinit
- hpet_rtc_interrupt
1
2 #include <linux/clockchips.h>
3 #include <linux/interrupt.h>
4 #include <linux/export.h>
5 #include <linux/delay.h>
6 #include <linux/hpet.h>
7 #include <linux/cpu.h>
8 #include <linux/irq.h>
9
10 #include <asm/hpet.h>
11 #include <asm/time.h>
12
13 #undef pr_fmt
14 #define pr_fmt(fmt) "hpet: " fmt
15
16 enum hpet_mode {
17 HPET_MODE_UNUSED,
18 HPET_MODE_LEGACY,
19 HPET_MODE_CLOCKEVT,
20 HPET_MODE_DEVICE,
21 };
22
23 struct hpet_channel {
24 struct clock_event_device evt;
25 unsigned int num;
26 unsigned int cpu;
27 unsigned int irq;
28 unsigned int in_use;
29 enum hpet_mode mode;
30 unsigned int boot_cfg;
31 char name[10];
32 };
33
34 struct hpet_base {
35 unsigned int nr_channels;
36 unsigned int nr_clockevents;
37 unsigned int boot_cfg;
38 struct hpet_channel *channels;
39 };
40
41 #define HPET_MASK CLOCKSOURCE_MASK(32)
42
43 #define HPET_MIN_CYCLES 128
44 #define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1))
45
46
47
48
49 unsigned long hpet_address;
50 u8 hpet_blockid;
51 bool hpet_msi_disable;
52
53 #ifdef CONFIG_PCI_MSI
54 static DEFINE_PER_CPU(struct hpet_channel *, cpu_hpet_channel);
55 static struct irq_domain *hpet_domain;
56 #endif
57
58 static void __iomem *hpet_virt_address;
59
60 static struct hpet_base hpet_base;
61
62 static bool hpet_legacy_int_enabled;
63 static unsigned long hpet_freq;
64
65 bool boot_hpet_disable;
66 bool hpet_force_user;
67 static bool hpet_verbose;
68
69 static inline
70 struct hpet_channel *clockevent_to_channel(struct clock_event_device *evt)
71 {
72 return container_of(evt, struct hpet_channel, evt);
73 }
74
75 inline unsigned int hpet_readl(unsigned int a)
76 {
77 return readl(hpet_virt_address + a);
78 }
79
80 static inline void hpet_writel(unsigned int d, unsigned int a)
81 {
82 writel(d, hpet_virt_address + a);
83 }
84
85 static inline void hpet_set_mapping(void)
86 {
87 hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
88 }
89
90 static inline void hpet_clear_mapping(void)
91 {
92 iounmap(hpet_virt_address);
93 hpet_virt_address = NULL;
94 }
95
96
97
98
99 static int __init hpet_setup(char *str)
100 {
101 while (str) {
102 char *next = strchr(str, ',');
103
104 if (next)
105 *next++ = 0;
106 if (!strncmp("disable", str, 7))
107 boot_hpet_disable = true;
108 if (!strncmp("force", str, 5))
109 hpet_force_user = true;
110 if (!strncmp("verbose", str, 7))
111 hpet_verbose = true;
112 str = next;
113 }
114 return 1;
115 }
116 __setup("hpet=", hpet_setup);
117
118 static int __init disable_hpet(char *str)
119 {
120 boot_hpet_disable = true;
121 return 1;
122 }
123 __setup("nohpet", disable_hpet);
124
125 static inline int is_hpet_capable(void)
126 {
127 return !boot_hpet_disable && hpet_address;
128 }
129
130
131
132
133 int is_hpet_enabled(void)
134 {
135 return is_hpet_capable() && hpet_legacy_int_enabled;
136 }
137 EXPORT_SYMBOL_GPL(is_hpet_enabled);
138
139 static void _hpet_print_config(const char *function, int line)
140 {
141 u32 i, id, period, cfg, status, channels, l, h;
142
143 pr_info("%s(%d):\n", function, line);
144
145 id = hpet_readl(HPET_ID);
146 period = hpet_readl(HPET_PERIOD);
147 pr_info("ID: 0x%x, PERIOD: 0x%x\n", id, period);
148
149 cfg = hpet_readl(HPET_CFG);
150 status = hpet_readl(HPET_STATUS);
151 pr_info("CFG: 0x%x, STATUS: 0x%x\n", cfg, status);
152
153 l = hpet_readl(HPET_COUNTER);
154 h = hpet_readl(HPET_COUNTER+4);
155 pr_info("COUNTER_l: 0x%x, COUNTER_h: 0x%x\n", l, h);
156
157 channels = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1;
158
159 for (i = 0; i < channels; i++) {
160 l = hpet_readl(HPET_Tn_CFG(i));
161 h = hpet_readl(HPET_Tn_CFG(i)+4);
162 pr_info("T%d: CFG_l: 0x%x, CFG_h: 0x%x\n", i, l, h);
163
164 l = hpet_readl(HPET_Tn_CMP(i));
165 h = hpet_readl(HPET_Tn_CMP(i)+4);
166 pr_info("T%d: CMP_l: 0x%x, CMP_h: 0x%x\n", i, l, h);
167
168 l = hpet_readl(HPET_Tn_ROUTE(i));
169 h = hpet_readl(HPET_Tn_ROUTE(i)+4);
170 pr_info("T%d ROUTE_l: 0x%x, ROUTE_h: 0x%x\n", i, l, h);
171 }
172 }
173
174 #define hpet_print_config() \
175 do { \
176 if (hpet_verbose) \
177 _hpet_print_config(__func__, __LINE__); \
178 } while (0)
179
180
181
182
183
184 #ifdef CONFIG_HPET
185
186 static void __init hpet_reserve_platform_timers(void)
187 {
188 struct hpet_data hd;
189 unsigned int i;
190
191 memset(&hd, 0, sizeof(hd));
192 hd.hd_phys_address = hpet_address;
193 hd.hd_address = hpet_virt_address;
194 hd.hd_nirqs = hpet_base.nr_channels;
195
196
197
198
199
200
201 hd.hd_irq[0] = HPET_LEGACY_8254;
202 hd.hd_irq[1] = HPET_LEGACY_RTC;
203
204 for (i = 0; i < hpet_base.nr_channels; i++) {
205 struct hpet_channel *hc = hpet_base.channels + i;
206
207 if (i >= 2)
208 hd.hd_irq[i] = hc->irq;
209
210 switch (hc->mode) {
211 case HPET_MODE_UNUSED:
212 case HPET_MODE_DEVICE:
213 hc->mode = HPET_MODE_DEVICE;
214 break;
215 case HPET_MODE_CLOCKEVT:
216 case HPET_MODE_LEGACY:
217 hpet_reserve_timer(&hd, hc->num);
218 break;
219 }
220 }
221
222 hpet_alloc(&hd);
223 }
224
225 static void __init hpet_select_device_channel(void)
226 {
227 int i;
228
229 for (i = 0; i < hpet_base.nr_channels; i++) {
230 struct hpet_channel *hc = hpet_base.channels + i;
231
232
233 if (hc->mode == HPET_MODE_UNUSED) {
234 hc->mode = HPET_MODE_DEVICE;
235 return;
236 }
237 }
238 }
239
240 #else
241 static inline void hpet_reserve_platform_timers(void) { }
242 static inline void hpet_select_device_channel(void) {}
243 #endif
244
245
246 static void hpet_stop_counter(void)
247 {
248 u32 cfg = hpet_readl(HPET_CFG);
249
250 cfg &= ~HPET_CFG_ENABLE;
251 hpet_writel(cfg, HPET_CFG);
252 }
253
254 static void hpet_reset_counter(void)
255 {
256 hpet_writel(0, HPET_COUNTER);
257 hpet_writel(0, HPET_COUNTER + 4);
258 }
259
260 static void hpet_start_counter(void)
261 {
262 unsigned int cfg = hpet_readl(HPET_CFG);
263
264 cfg |= HPET_CFG_ENABLE;
265 hpet_writel(cfg, HPET_CFG);
266 }
267
268 static void hpet_restart_counter(void)
269 {
270 hpet_stop_counter();
271 hpet_reset_counter();
272 hpet_start_counter();
273 }
274
275 static void hpet_resume_device(void)
276 {
277 force_hpet_resume();
278 }
279
280 static void hpet_resume_counter(struct clocksource *cs)
281 {
282 hpet_resume_device();
283 hpet_restart_counter();
284 }
285
286 static void hpet_enable_legacy_int(void)
287 {
288 unsigned int cfg = hpet_readl(HPET_CFG);
289
290 cfg |= HPET_CFG_LEGACY;
291 hpet_writel(cfg, HPET_CFG);
292 hpet_legacy_int_enabled = true;
293 }
294
295 static int hpet_clkevt_set_state_periodic(struct clock_event_device *evt)
296 {
297 unsigned int channel = clockevent_to_channel(evt)->num;
298 unsigned int cfg, cmp, now;
299 uint64_t delta;
300
301 hpet_stop_counter();
302 delta = ((uint64_t)(NSEC_PER_SEC / HZ)) * evt->mult;
303 delta >>= evt->shift;
304 now = hpet_readl(HPET_COUNTER);
305 cmp = now + (unsigned int)delta;
306 cfg = hpet_readl(HPET_Tn_CFG(channel));
307 cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
308 HPET_TN_32BIT;
309 hpet_writel(cfg, HPET_Tn_CFG(channel));
310 hpet_writel(cmp, HPET_Tn_CMP(channel));
311 udelay(1);
312
313
314
315
316
317
318
319 hpet_writel((unsigned int)delta, HPET_Tn_CMP(channel));
320 hpet_start_counter();
321 hpet_print_config();
322
323 return 0;
324 }
325
326 static int hpet_clkevt_set_state_oneshot(struct clock_event_device *evt)
327 {
328 unsigned int channel = clockevent_to_channel(evt)->num;
329 unsigned int cfg;
330
331 cfg = hpet_readl(HPET_Tn_CFG(channel));
332 cfg &= ~HPET_TN_PERIODIC;
333 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
334 hpet_writel(cfg, HPET_Tn_CFG(channel));
335
336 return 0;
337 }
338
339 static int hpet_clkevt_set_state_shutdown(struct clock_event_device *evt)
340 {
341 unsigned int channel = clockevent_to_channel(evt)->num;
342 unsigned int cfg;
343
344 cfg = hpet_readl(HPET_Tn_CFG(channel));
345 cfg &= ~HPET_TN_ENABLE;
346 hpet_writel(cfg, HPET_Tn_CFG(channel));
347
348 return 0;
349 }
350
351 static int hpet_clkevt_legacy_resume(struct clock_event_device *evt)
352 {
353 hpet_enable_legacy_int();
354 hpet_print_config();
355 return 0;
356 }
357
358 static int
359 hpet_clkevt_set_next_event(unsigned long delta, struct clock_event_device *evt)
360 {
361 unsigned int channel = clockevent_to_channel(evt)->num;
362 u32 cnt;
363 s32 res;
364
365 cnt = hpet_readl(HPET_COUNTER);
366 cnt += (u32) delta;
367 hpet_writel(cnt, HPET_Tn_CMP(channel));
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391 res = (s32)(cnt - hpet_readl(HPET_COUNTER));
392
393 return res < HPET_MIN_CYCLES ? -ETIME : 0;
394 }
395
396 static void hpet_init_clockevent(struct hpet_channel *hc, unsigned int rating)
397 {
398 struct clock_event_device *evt = &hc->evt;
399
400 evt->rating = rating;
401 evt->irq = hc->irq;
402 evt->name = hc->name;
403 evt->cpumask = cpumask_of(hc->cpu);
404 evt->set_state_oneshot = hpet_clkevt_set_state_oneshot;
405 evt->set_next_event = hpet_clkevt_set_next_event;
406 evt->set_state_shutdown = hpet_clkevt_set_state_shutdown;
407
408 evt->features = CLOCK_EVT_FEAT_ONESHOT;
409 if (hc->boot_cfg & HPET_TN_PERIODIC) {
410 evt->features |= CLOCK_EVT_FEAT_PERIODIC;
411 evt->set_state_periodic = hpet_clkevt_set_state_periodic;
412 }
413 }
414
415 static void __init hpet_legacy_clockevent_register(struct hpet_channel *hc)
416 {
417
418
419
420
421 hc->cpu = boot_cpu_data.cpu_index;
422 strncpy(hc->name, "hpet", sizeof(hc->name));
423 hpet_init_clockevent(hc, 50);
424
425 hc->evt.tick_resume = hpet_clkevt_legacy_resume;
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455 hc->evt.features |= CLOCK_EVT_FEAT_PERIODIC;
456 hc->evt.set_state_periodic = hpet_clkevt_set_state_periodic;
457
458
459 hpet_enable_legacy_int();
460
461 clockevents_config_and_register(&hc->evt, hpet_freq,
462 HPET_MIN_PROG_DELTA, 0x7FFFFFFF);
463 global_clock_event = &hc->evt;
464 pr_debug("Clockevent registered\n");
465 }
466
467
468
469
470 #ifdef CONFIG_PCI_MSI
471
472 void hpet_msi_unmask(struct irq_data *data)
473 {
474 struct hpet_channel *hc = irq_data_get_irq_handler_data(data);
475 unsigned int cfg;
476
477 cfg = hpet_readl(HPET_Tn_CFG(hc->num));
478 cfg |= HPET_TN_ENABLE | HPET_TN_FSB;
479 hpet_writel(cfg, HPET_Tn_CFG(hc->num));
480 }
481
482 void hpet_msi_mask(struct irq_data *data)
483 {
484 struct hpet_channel *hc = irq_data_get_irq_handler_data(data);
485 unsigned int cfg;
486
487 cfg = hpet_readl(HPET_Tn_CFG(hc->num));
488 cfg &= ~(HPET_TN_ENABLE | HPET_TN_FSB);
489 hpet_writel(cfg, HPET_Tn_CFG(hc->num));
490 }
491
492 void hpet_msi_write(struct hpet_channel *hc, struct msi_msg *msg)
493 {
494 hpet_writel(msg->data, HPET_Tn_ROUTE(hc->num));
495 hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hc->num) + 4);
496 }
497
498 static int hpet_clkevt_msi_resume(struct clock_event_device *evt)
499 {
500 struct hpet_channel *hc = clockevent_to_channel(evt);
501 struct irq_data *data = irq_get_irq_data(hc->irq);
502 struct msi_msg msg;
503
504
505 irq_chip_compose_msi_msg(data, &msg);
506 hpet_msi_write(hc, &msg);
507 hpet_msi_unmask(data);
508 return 0;
509 }
510
511 static irqreturn_t hpet_msi_interrupt_handler(int irq, void *data)
512 {
513 struct hpet_channel *hc = data;
514 struct clock_event_device *evt = &hc->evt;
515
516 if (!evt->event_handler) {
517 pr_info("Spurious interrupt HPET channel %d\n", hc->num);
518 return IRQ_HANDLED;
519 }
520
521 evt->event_handler(evt);
522 return IRQ_HANDLED;
523 }
524
525 static int hpet_setup_msi_irq(struct hpet_channel *hc)
526 {
527 if (request_irq(hc->irq, hpet_msi_interrupt_handler,
528 IRQF_TIMER | IRQF_NOBALANCING,
529 hc->name, hc))
530 return -1;
531
532 disable_irq(hc->irq);
533 irq_set_affinity(hc->irq, cpumask_of(hc->cpu));
534 enable_irq(hc->irq);
535
536 pr_debug("%s irq %u for MSI\n", hc->name, hc->irq);
537
538 return 0;
539 }
540
541
542 static void init_one_hpet_msi_clockevent(struct hpet_channel *hc, int cpu)
543 {
544 struct clock_event_device *evt = &hc->evt;
545
546 hc->cpu = cpu;
547 per_cpu(cpu_hpet_channel, cpu) = hc;
548 hpet_setup_msi_irq(hc);
549
550 hpet_init_clockevent(hc, 110);
551 evt->tick_resume = hpet_clkevt_msi_resume;
552
553 clockevents_config_and_register(evt, hpet_freq, HPET_MIN_PROG_DELTA,
554 0x7FFFFFFF);
555 }
556
557 static struct hpet_channel *hpet_get_unused_clockevent(void)
558 {
559 int i;
560
561 for (i = 0; i < hpet_base.nr_channels; i++) {
562 struct hpet_channel *hc = hpet_base.channels + i;
563
564 if (hc->mode != HPET_MODE_CLOCKEVT || hc->in_use)
565 continue;
566 hc->in_use = 1;
567 return hc;
568 }
569 return NULL;
570 }
571
572 static int hpet_cpuhp_online(unsigned int cpu)
573 {
574 struct hpet_channel *hc = hpet_get_unused_clockevent();
575
576 if (hc)
577 init_one_hpet_msi_clockevent(hc, cpu);
578 return 0;
579 }
580
581 static int hpet_cpuhp_dead(unsigned int cpu)
582 {
583 struct hpet_channel *hc = per_cpu(cpu_hpet_channel, cpu);
584
585 if (!hc)
586 return 0;
587 free_irq(hc->irq, hc);
588 hc->in_use = 0;
589 per_cpu(cpu_hpet_channel, cpu) = NULL;
590 return 0;
591 }
592
593 static void __init hpet_select_clockevents(void)
594 {
595 unsigned int i;
596
597 hpet_base.nr_clockevents = 0;
598
599
600 if (hpet_msi_disable || boot_cpu_has(X86_FEATURE_ARAT))
601 return;
602
603 hpet_print_config();
604
605 hpet_domain = hpet_create_irq_domain(hpet_blockid);
606 if (!hpet_domain)
607 return;
608
609 for (i = 0; i < hpet_base.nr_channels; i++) {
610 struct hpet_channel *hc = hpet_base.channels + i;
611 int irq;
612
613 if (hc->mode != HPET_MODE_UNUSED)
614 continue;
615
616
617 if (!(hc->boot_cfg & HPET_TN_FSB_CAP))
618 continue;
619
620 sprintf(hc->name, "hpet%d", i);
621
622 irq = hpet_assign_irq(hpet_domain, hc, hc->num);
623 if (irq <= 0)
624 continue;
625
626 hc->irq = irq;
627 hc->mode = HPET_MODE_CLOCKEVT;
628
629 if (++hpet_base.nr_clockevents == num_possible_cpus())
630 break;
631 }
632
633 pr_info("%d channels of %d reserved for per-cpu timers\n",
634 hpet_base.nr_channels, hpet_base.nr_clockevents);
635 }
636
637 #else
638
639 static inline void hpet_select_clockevents(void) { }
640
641 #define hpet_cpuhp_online NULL
642 #define hpet_cpuhp_dead NULL
643
644 #endif
645
646
647
648
649 #if defined(CONFIG_SMP) && defined(CONFIG_64BIT)
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671 union hpet_lock {
672 struct {
673 arch_spinlock_t lock;
674 u32 value;
675 };
676 u64 lockval;
677 };
678
679 static union hpet_lock hpet __cacheline_aligned = {
680 { .lock = __ARCH_SPIN_LOCK_UNLOCKED, },
681 };
682
683 static u64 read_hpet(struct clocksource *cs)
684 {
685 unsigned long flags;
686 union hpet_lock old, new;
687
688 BUILD_BUG_ON(sizeof(union hpet_lock) != 8);
689
690
691
692
693 if (in_nmi())
694 return (u64)hpet_readl(HPET_COUNTER);
695
696
697
698
699 old.lockval = READ_ONCE(hpet.lockval);
700
701 if (arch_spin_is_locked(&old.lock))
702 goto contended;
703
704 local_irq_save(flags);
705 if (arch_spin_trylock(&hpet.lock)) {
706 new.value = hpet_readl(HPET_COUNTER);
707
708
709
710 WRITE_ONCE(hpet.value, new.value);
711 arch_spin_unlock(&hpet.lock);
712 local_irq_restore(flags);
713 return (u64)new.value;
714 }
715 local_irq_restore(flags);
716
717 contended:
718
719
720
721
722
723
724
725
726
727
728
729
730 do {
731 cpu_relax();
732 new.lockval = READ_ONCE(hpet.lockval);
733 } while ((new.value == old.value) && arch_spin_is_locked(&new.lock));
734
735 return (u64)new.value;
736 }
737 #else
738
739
740
741 static u64 read_hpet(struct clocksource *cs)
742 {
743 return (u64)hpet_readl(HPET_COUNTER);
744 }
745 #endif
746
747 static struct clocksource clocksource_hpet = {
748 .name = "hpet",
749 .rating = 250,
750 .read = read_hpet,
751 .mask = HPET_MASK,
752 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
753 .resume = hpet_resume_counter,
754 };
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771 static bool __init hpet_cfg_working(void)
772 {
773 int i;
774
775 for (i = 0; i < 1000; i++) {
776 if (hpet_readl(HPET_CFG) != 0xFFFFFFFF)
777 return true;
778 }
779
780 pr_warn("Config register invalid. Disabling HPET\n");
781 return false;
782 }
783
784 static bool __init hpet_counting(void)
785 {
786 u64 start, now, t1;
787
788 hpet_restart_counter();
789
790 t1 = hpet_readl(HPET_COUNTER);
791 start = rdtsc();
792
793
794
795
796
797
798
799 do {
800 if (t1 != hpet_readl(HPET_COUNTER))
801 return true;
802 now = rdtsc();
803 } while ((now - start) < 200000UL);
804
805 pr_warn("Counter not counting. HPET disabled\n");
806 return false;
807 }
808
809
810
811
812 int __init hpet_enable(void)
813 {
814 u32 hpet_period, cfg, id, irq;
815 unsigned int i, channels;
816 struct hpet_channel *hc;
817 u64 freq;
818
819 if (!is_hpet_capable())
820 return 0;
821
822 hpet_set_mapping();
823 if (!hpet_virt_address)
824 return 0;
825
826
827 if (!hpet_cfg_working())
828 goto out_nohpet;
829
830
831
832
833 hpet_period = hpet_readl(HPET_PERIOD);
834 if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD)
835 goto out_nohpet;
836
837
838 freq = FSEC_PER_SEC;
839 do_div(freq, hpet_period);
840 hpet_freq = freq;
841
842
843
844
845
846 id = hpet_readl(HPET_ID);
847 hpet_print_config();
848
849
850 channels = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1;
851
852
853
854
855
856 if (IS_ENABLED(CONFIG_HPET_EMULATE_RTC) && channels < 2)
857 goto out_nohpet;
858
859 hc = kcalloc(channels, sizeof(*hc), GFP_KERNEL);
860 if (!hc) {
861 pr_warn("Disabling HPET.\n");
862 goto out_nohpet;
863 }
864 hpet_base.channels = hc;
865 hpet_base.nr_channels = channels;
866
867
868 cfg = hpet_readl(HPET_CFG);
869 hpet_base.boot_cfg = cfg;
870 cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
871 hpet_writel(cfg, HPET_CFG);
872 if (cfg)
873 pr_warn("Global config: Unknown bits %#x\n", cfg);
874
875
876 for (i = 0; i < channels; i++, hc++) {
877 hc->num = i;
878
879 cfg = hpet_readl(HPET_Tn_CFG(i));
880 hc->boot_cfg = cfg;
881 irq = (cfg & Tn_INT_ROUTE_CNF_MASK) >> Tn_INT_ROUTE_CNF_SHIFT;
882 hc->irq = irq;
883
884 cfg &= ~(HPET_TN_ENABLE | HPET_TN_LEVEL | HPET_TN_FSB);
885 hpet_writel(cfg, HPET_Tn_CFG(i));
886
887 cfg &= ~(HPET_TN_PERIODIC | HPET_TN_PERIODIC_CAP
888 | HPET_TN_64BIT_CAP | HPET_TN_32BIT | HPET_TN_ROUTE
889 | HPET_TN_FSB | HPET_TN_FSB_CAP);
890 if (cfg)
891 pr_warn("Channel #%u config: Unknown bits %#x\n", i, cfg);
892 }
893 hpet_print_config();
894
895
896
897
898
899
900 if (!hpet_counting())
901 goto out_nohpet;
902
903 clocksource_register_hz(&clocksource_hpet, (u32)hpet_freq);
904
905 if (id & HPET_ID_LEGSUP) {
906 hpet_legacy_clockevent_register(&hpet_base.channels[0]);
907 hpet_base.channels[0].mode = HPET_MODE_LEGACY;
908 if (IS_ENABLED(CONFIG_HPET_EMULATE_RTC))
909 hpet_base.channels[1].mode = HPET_MODE_LEGACY;
910 return 1;
911 }
912 return 0;
913
914 out_nohpet:
915 kfree(hpet_base.channels);
916 hpet_base.channels = NULL;
917 hpet_base.nr_channels = 0;
918 hpet_clear_mapping();
919 hpet_address = 0;
920 return 0;
921 }
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937 static __init int hpet_late_init(void)
938 {
939 int ret;
940
941 if (!hpet_address) {
942 if (!force_hpet_address)
943 return -ENODEV;
944
945 hpet_address = force_hpet_address;
946 hpet_enable();
947 }
948
949 if (!hpet_virt_address)
950 return -ENODEV;
951
952 hpet_select_device_channel();
953 hpet_select_clockevents();
954 hpet_reserve_platform_timers();
955 hpet_print_config();
956
957 if (!hpet_base.nr_clockevents)
958 return 0;
959
960 ret = cpuhp_setup_state(CPUHP_AP_X86_HPET_ONLINE, "x86/hpet:online",
961 hpet_cpuhp_online, NULL);
962 if (ret)
963 return ret;
964 ret = cpuhp_setup_state(CPUHP_X86_HPET_DEAD, "x86/hpet:dead", NULL,
965 hpet_cpuhp_dead);
966 if (ret)
967 goto err_cpuhp;
968 return 0;
969
970 err_cpuhp:
971 cpuhp_remove_state(CPUHP_AP_X86_HPET_ONLINE);
972 return ret;
973 }
974 fs_initcall(hpet_late_init);
975
976 void hpet_disable(void)
977 {
978 unsigned int i;
979 u32 cfg;
980
981 if (!is_hpet_capable() || !hpet_virt_address)
982 return;
983
984
985 cfg = hpet_base.boot_cfg;
986 cfg &= ~HPET_CFG_ENABLE;
987 hpet_writel(cfg, HPET_CFG);
988
989
990 for (i = 0; i < hpet_base.nr_channels; i++)
991 hpet_writel(hpet_base.channels[i].boot_cfg, HPET_Tn_CFG(i));
992
993
994 if (hpet_base.boot_cfg & HPET_CFG_ENABLE)
995 hpet_writel(hpet_base.boot_cfg, HPET_CFG);
996 }
997
998 #ifdef CONFIG_HPET_EMULATE_RTC
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020 #include <linux/mc146818rtc.h>
1021 #include <linux/rtc.h>
1022
1023 #define DEFAULT_RTC_INT_FREQ 64
1024 #define DEFAULT_RTC_SHIFT 6
1025 #define RTC_NUM_INTS 1
1026
1027 static unsigned long hpet_rtc_flags;
1028 static int hpet_prev_update_sec;
1029 static struct rtc_time hpet_alarm_time;
1030 static unsigned long hpet_pie_count;
1031 static u32 hpet_t1_cmp;
1032 static u32 hpet_default_delta;
1033 static u32 hpet_pie_delta;
1034 static unsigned long hpet_pie_limit;
1035
1036 static rtc_irq_handler irq_handler;
1037
1038
1039
1040
1041 static inline int hpet_cnt_ahead(u32 c1, u32 c2)
1042 {
1043 return (s32)(c2 - c1) < 0;
1044 }
1045
1046
1047
1048
1049 int hpet_register_irq_handler(rtc_irq_handler handler)
1050 {
1051 if (!is_hpet_enabled())
1052 return -ENODEV;
1053 if (irq_handler)
1054 return -EBUSY;
1055
1056 irq_handler = handler;
1057
1058 return 0;
1059 }
1060 EXPORT_SYMBOL_GPL(hpet_register_irq_handler);
1061
1062
1063
1064
1065
1066 void hpet_unregister_irq_handler(rtc_irq_handler handler)
1067 {
1068 if (!is_hpet_enabled())
1069 return;
1070
1071 irq_handler = NULL;
1072 hpet_rtc_flags = 0;
1073 }
1074 EXPORT_SYMBOL_GPL(hpet_unregister_irq_handler);
1075
1076
1077
1078
1079
1080
1081
1082 int hpet_rtc_timer_init(void)
1083 {
1084 unsigned int cfg, cnt, delta;
1085 unsigned long flags;
1086
1087 if (!is_hpet_enabled())
1088 return 0;
1089
1090 if (!hpet_default_delta) {
1091 struct clock_event_device *evt = &hpet_base.channels[0].evt;
1092 uint64_t clc;
1093
1094 clc = (uint64_t) evt->mult * NSEC_PER_SEC;
1095 clc >>= evt->shift + DEFAULT_RTC_SHIFT;
1096 hpet_default_delta = clc;
1097 }
1098
1099 if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
1100 delta = hpet_default_delta;
1101 else
1102 delta = hpet_pie_delta;
1103
1104 local_irq_save(flags);
1105
1106 cnt = delta + hpet_readl(HPET_COUNTER);
1107 hpet_writel(cnt, HPET_T1_CMP);
1108 hpet_t1_cmp = cnt;
1109
1110 cfg = hpet_readl(HPET_T1_CFG);
1111 cfg &= ~HPET_TN_PERIODIC;
1112 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
1113 hpet_writel(cfg, HPET_T1_CFG);
1114
1115 local_irq_restore(flags);
1116
1117 return 1;
1118 }
1119 EXPORT_SYMBOL_GPL(hpet_rtc_timer_init);
1120
1121 static void hpet_disable_rtc_channel(void)
1122 {
1123 u32 cfg = hpet_readl(HPET_T1_CFG);
1124
1125 cfg &= ~HPET_TN_ENABLE;
1126 hpet_writel(cfg, HPET_T1_CFG);
1127 }
1128
1129
1130
1131
1132
1133
1134 int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
1135 {
1136 if (!is_hpet_enabled())
1137 return 0;
1138
1139 hpet_rtc_flags &= ~bit_mask;
1140 if (unlikely(!hpet_rtc_flags))
1141 hpet_disable_rtc_channel();
1142
1143 return 1;
1144 }
1145 EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit);
1146
1147 int hpet_set_rtc_irq_bit(unsigned long bit_mask)
1148 {
1149 unsigned long oldbits = hpet_rtc_flags;
1150
1151 if (!is_hpet_enabled())
1152 return 0;
1153
1154 hpet_rtc_flags |= bit_mask;
1155
1156 if ((bit_mask & RTC_UIE) && !(oldbits & RTC_UIE))
1157 hpet_prev_update_sec = -1;
1158
1159 if (!oldbits)
1160 hpet_rtc_timer_init();
1161
1162 return 1;
1163 }
1164 EXPORT_SYMBOL_GPL(hpet_set_rtc_irq_bit);
1165
1166 int hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec)
1167 {
1168 if (!is_hpet_enabled())
1169 return 0;
1170
1171 hpet_alarm_time.tm_hour = hrs;
1172 hpet_alarm_time.tm_min = min;
1173 hpet_alarm_time.tm_sec = sec;
1174
1175 return 1;
1176 }
1177 EXPORT_SYMBOL_GPL(hpet_set_alarm_time);
1178
1179 int hpet_set_periodic_freq(unsigned long freq)
1180 {
1181 uint64_t clc;
1182
1183 if (!is_hpet_enabled())
1184 return 0;
1185
1186 if (freq <= DEFAULT_RTC_INT_FREQ) {
1187 hpet_pie_limit = DEFAULT_RTC_INT_FREQ / freq;
1188 } else {
1189 struct clock_event_device *evt = &hpet_base.channels[0].evt;
1190
1191 clc = (uint64_t) evt->mult * NSEC_PER_SEC;
1192 do_div(clc, freq);
1193 clc >>= evt->shift;
1194 hpet_pie_delta = clc;
1195 hpet_pie_limit = 0;
1196 }
1197
1198 return 1;
1199 }
1200 EXPORT_SYMBOL_GPL(hpet_set_periodic_freq);
1201
1202 int hpet_rtc_dropped_irq(void)
1203 {
1204 return is_hpet_enabled();
1205 }
1206 EXPORT_SYMBOL_GPL(hpet_rtc_dropped_irq);
1207
1208 static void hpet_rtc_timer_reinit(void)
1209 {
1210 unsigned int delta;
1211 int lost_ints = -1;
1212
1213 if (unlikely(!hpet_rtc_flags))
1214 hpet_disable_rtc_channel();
1215
1216 if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
1217 delta = hpet_default_delta;
1218 else
1219 delta = hpet_pie_delta;
1220
1221
1222
1223
1224
1225 do {
1226 hpet_t1_cmp += delta;
1227 hpet_writel(hpet_t1_cmp, HPET_T1_CMP);
1228 lost_ints++;
1229 } while (!hpet_cnt_ahead(hpet_t1_cmp, hpet_readl(HPET_COUNTER)));
1230
1231 if (lost_ints) {
1232 if (hpet_rtc_flags & RTC_PIE)
1233 hpet_pie_count += lost_ints;
1234 if (printk_ratelimit())
1235 pr_warn("Lost %d RTC interrupts\n", lost_ints);
1236 }
1237 }
1238
1239 irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
1240 {
1241 struct rtc_time curr_time;
1242 unsigned long rtc_int_flag = 0;
1243
1244 hpet_rtc_timer_reinit();
1245 memset(&curr_time, 0, sizeof(struct rtc_time));
1246
1247 if (hpet_rtc_flags & (RTC_UIE | RTC_AIE))
1248 mc146818_get_time(&curr_time);
1249
1250 if (hpet_rtc_flags & RTC_UIE &&
1251 curr_time.tm_sec != hpet_prev_update_sec) {
1252 if (hpet_prev_update_sec >= 0)
1253 rtc_int_flag = RTC_UF;
1254 hpet_prev_update_sec = curr_time.tm_sec;
1255 }
1256
1257 if (hpet_rtc_flags & RTC_PIE && ++hpet_pie_count >= hpet_pie_limit) {
1258 rtc_int_flag |= RTC_PF;
1259 hpet_pie_count = 0;
1260 }
1261
1262 if (hpet_rtc_flags & RTC_AIE &&
1263 (curr_time.tm_sec == hpet_alarm_time.tm_sec) &&
1264 (curr_time.tm_min == hpet_alarm_time.tm_min) &&
1265 (curr_time.tm_hour == hpet_alarm_time.tm_hour))
1266 rtc_int_flag |= RTC_AF;
1267
1268 if (rtc_int_flag) {
1269 rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8));
1270 if (irq_handler)
1271 irq_handler(rtc_int_flag, dev_id);
1272 }
1273 return IRQ_HANDLED;
1274 }
1275 EXPORT_SYMBOL_GPL(hpet_rtc_interrupt);
1276 #endif