This source file includes following definitions.
- lock_vector_lock
- unlock_vector_lock
- init_irq_alloc_info
- copy_irq_alloc_info
- irqd_cfg
- alloc_apic_chip_data
- free_apic_chip_data
- apic_update_irq_cfg
- apic_update_vector
- vector_assign_managed_shutdown
- reserve_managed_vector
- reserve_irq_vector_locked
- reserve_irq_vector
- assign_vector_locked
- assign_irq_vector
- assign_irq_vector_any_locked
- assign_irq_vector_policy
- assign_managed_vector
- clear_irq_vector
- x86_vector_deactivate
- activate_reserved
- activate_managed
- x86_vector_activate
- vector_free_reserved_and_managed
- x86_vector_free_irqs
- vector_configure_legacy
- x86_vector_alloc_irqs
- x86_vector_debug_show
- arch_probe_nr_irqs
- lapic_assign_legacy_vector
- lapic_assign_system_vectors
- arch_early_irq_init
- __setup_vector_irq
- lapic_online
- lapic_offline
- apic_set_affinity
- apic_retrigger_irq
- apic_ack_irq
- apic_ack_edge
- free_moved_vector
- smp_irq_move_cleanup_interrupt
- __send_cleanup_vector
- send_cleanup_vector
- __irq_complete_move
- irq_complete_move
- irq_force_complete_move
- lapic_can_unplug_cpu
- print_APIC_field
- print_local_APIC
- print_local_APICs
- print_PIC
- setup_show_lapic
- print_ICs
1
2
3
4
5
6
7
8
9
10 #include <linux/interrupt.h>
11 #include <linux/irq.h>
12 #include <linux/seq_file.h>
13 #include <linux/init.h>
14 #include <linux/compiler.h>
15 #include <linux/slab.h>
16 #include <asm/irqdomain.h>
17 #include <asm/hw_irq.h>
18 #include <asm/traps.h>
19 #include <asm/apic.h>
20 #include <asm/i8259.h>
21 #include <asm/desc.h>
22 #include <asm/irq_remapping.h>
23
24 #include <asm/trace/irq_vectors.h>
25
26 struct apic_chip_data {
27 struct irq_cfg hw_irq_cfg;
28 unsigned int vector;
29 unsigned int prev_vector;
30 unsigned int cpu;
31 unsigned int prev_cpu;
32 unsigned int irq;
33 struct hlist_node clist;
34 unsigned int move_in_progress : 1,
35 is_managed : 1,
36 can_reserve : 1,
37 has_reserved : 1;
38 };
39
40 struct irq_domain *x86_vector_domain;
41 EXPORT_SYMBOL_GPL(x86_vector_domain);
42 static DEFINE_RAW_SPINLOCK(vector_lock);
43 static cpumask_var_t vector_searchmask;
44 static struct irq_chip lapic_controller;
45 static struct irq_matrix *vector_matrix;
46 #ifdef CONFIG_SMP
47 static DEFINE_PER_CPU(struct hlist_head, cleanup_list);
48 #endif
49
50 void lock_vector_lock(void)
51 {
52
53
54
55 raw_spin_lock(&vector_lock);
56 }
57
58 void unlock_vector_lock(void)
59 {
60 raw_spin_unlock(&vector_lock);
61 }
62
63 void init_irq_alloc_info(struct irq_alloc_info *info,
64 const struct cpumask *mask)
65 {
66 memset(info, 0, sizeof(*info));
67 info->mask = mask;
68 }
69
70 void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
71 {
72 if (src)
73 *dst = *src;
74 else
75 memset(dst, 0, sizeof(*dst));
76 }
77
78 static struct apic_chip_data *apic_chip_data(struct irq_data *irqd)
79 {
80 if (!irqd)
81 return NULL;
82
83 while (irqd->parent_data)
84 irqd = irqd->parent_data;
85
86 return irqd->chip_data;
87 }
88
89 struct irq_cfg *irqd_cfg(struct irq_data *irqd)
90 {
91 struct apic_chip_data *apicd = apic_chip_data(irqd);
92
93 return apicd ? &apicd->hw_irq_cfg : NULL;
94 }
95 EXPORT_SYMBOL_GPL(irqd_cfg);
96
97 struct irq_cfg *irq_cfg(unsigned int irq)
98 {
99 return irqd_cfg(irq_get_irq_data(irq));
100 }
101
102 static struct apic_chip_data *alloc_apic_chip_data(int node)
103 {
104 struct apic_chip_data *apicd;
105
106 apicd = kzalloc_node(sizeof(*apicd), GFP_KERNEL, node);
107 if (apicd)
108 INIT_HLIST_NODE(&apicd->clist);
109 return apicd;
110 }
111
112 static void free_apic_chip_data(struct apic_chip_data *apicd)
113 {
114 kfree(apicd);
115 }
116
117 static void apic_update_irq_cfg(struct irq_data *irqd, unsigned int vector,
118 unsigned int cpu)
119 {
120 struct apic_chip_data *apicd = apic_chip_data(irqd);
121
122 lockdep_assert_held(&vector_lock);
123
124 apicd->hw_irq_cfg.vector = vector;
125 apicd->hw_irq_cfg.dest_apicid = apic->calc_dest_apicid(cpu);
126 irq_data_update_effective_affinity(irqd, cpumask_of(cpu));
127 trace_vector_config(irqd->irq, vector, cpu,
128 apicd->hw_irq_cfg.dest_apicid);
129 }
130
131 static void apic_update_vector(struct irq_data *irqd, unsigned int newvec,
132 unsigned int newcpu)
133 {
134 struct apic_chip_data *apicd = apic_chip_data(irqd);
135 struct irq_desc *desc = irq_data_to_desc(irqd);
136 bool managed = irqd_affinity_is_managed(irqd);
137
138 lockdep_assert_held(&vector_lock);
139
140 trace_vector_update(irqd->irq, newvec, newcpu, apicd->vector,
141 apicd->cpu);
142
143
144
145
146
147
148
149 apicd->prev_vector = 0;
150 if (!apicd->vector || apicd->vector == MANAGED_IRQ_SHUTDOWN_VECTOR)
151 goto setnew;
152
153
154
155
156
157
158
159
160 if (cpu_online(apicd->cpu)) {
161 apicd->move_in_progress = true;
162 apicd->prev_vector = apicd->vector;
163 apicd->prev_cpu = apicd->cpu;
164 } else {
165 irq_matrix_free(vector_matrix, apicd->cpu, apicd->vector,
166 managed);
167 }
168
169 setnew:
170 apicd->vector = newvec;
171 apicd->cpu = newcpu;
172 BUG_ON(!IS_ERR_OR_NULL(per_cpu(vector_irq, newcpu)[newvec]));
173 per_cpu(vector_irq, newcpu)[newvec] = desc;
174 }
175
176 static void vector_assign_managed_shutdown(struct irq_data *irqd)
177 {
178 unsigned int cpu = cpumask_first(cpu_online_mask);
179
180 apic_update_irq_cfg(irqd, MANAGED_IRQ_SHUTDOWN_VECTOR, cpu);
181 }
182
183 static int reserve_managed_vector(struct irq_data *irqd)
184 {
185 const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
186 struct apic_chip_data *apicd = apic_chip_data(irqd);
187 unsigned long flags;
188 int ret;
189
190 raw_spin_lock_irqsave(&vector_lock, flags);
191 apicd->is_managed = true;
192 ret = irq_matrix_reserve_managed(vector_matrix, affmsk);
193 raw_spin_unlock_irqrestore(&vector_lock, flags);
194 trace_vector_reserve_managed(irqd->irq, ret);
195 return ret;
196 }
197
198 static void reserve_irq_vector_locked(struct irq_data *irqd)
199 {
200 struct apic_chip_data *apicd = apic_chip_data(irqd);
201
202 irq_matrix_reserve(vector_matrix);
203 apicd->can_reserve = true;
204 apicd->has_reserved = true;
205 irqd_set_can_reserve(irqd);
206 trace_vector_reserve(irqd->irq, 0);
207 vector_assign_managed_shutdown(irqd);
208 }
209
210 static int reserve_irq_vector(struct irq_data *irqd)
211 {
212 unsigned long flags;
213
214 raw_spin_lock_irqsave(&vector_lock, flags);
215 reserve_irq_vector_locked(irqd);
216 raw_spin_unlock_irqrestore(&vector_lock, flags);
217 return 0;
218 }
219
220 static int
221 assign_vector_locked(struct irq_data *irqd, const struct cpumask *dest)
222 {
223 struct apic_chip_data *apicd = apic_chip_data(irqd);
224 bool resvd = apicd->has_reserved;
225 unsigned int cpu = apicd->cpu;
226 int vector = apicd->vector;
227
228 lockdep_assert_held(&vector_lock);
229
230
231
232
233
234
235 if (vector && cpu_online(cpu) && cpumask_test_cpu(cpu, dest))
236 return 0;
237
238
239
240
241
242
243
244 if (apicd->move_in_progress || !hlist_unhashed(&apicd->clist))
245 return -EBUSY;
246
247 vector = irq_matrix_alloc(vector_matrix, dest, resvd, &cpu);
248 trace_vector_alloc(irqd->irq, vector, resvd, vector);
249 if (vector < 0)
250 return vector;
251 apic_update_vector(irqd, vector, cpu);
252 apic_update_irq_cfg(irqd, vector, cpu);
253
254 return 0;
255 }
256
257 static int assign_irq_vector(struct irq_data *irqd, const struct cpumask *dest)
258 {
259 unsigned long flags;
260 int ret;
261
262 raw_spin_lock_irqsave(&vector_lock, flags);
263 cpumask_and(vector_searchmask, dest, cpu_online_mask);
264 ret = assign_vector_locked(irqd, vector_searchmask);
265 raw_spin_unlock_irqrestore(&vector_lock, flags);
266 return ret;
267 }
268
269 static int assign_irq_vector_any_locked(struct irq_data *irqd)
270 {
271
272 const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
273 int node = irq_data_get_node(irqd);
274
275 if (node == NUMA_NO_NODE)
276 goto all;
277
278 cpumask_and(vector_searchmask, cpumask_of_node(node), affmsk);
279 if (!assign_vector_locked(irqd, vector_searchmask))
280 return 0;
281
282 if (!assign_vector_locked(irqd, cpumask_of_node(node)))
283 return 0;
284 all:
285
286 cpumask_and(vector_searchmask, affmsk, cpu_online_mask);
287 if (!assign_vector_locked(irqd, vector_searchmask))
288 return 0;
289
290 return assign_vector_locked(irqd, cpu_online_mask);
291 }
292
293 static int
294 assign_irq_vector_policy(struct irq_data *irqd, struct irq_alloc_info *info)
295 {
296 if (irqd_affinity_is_managed(irqd))
297 return reserve_managed_vector(irqd);
298 if (info->mask)
299 return assign_irq_vector(irqd, info->mask);
300
301
302
303
304 return reserve_irq_vector(irqd);
305 }
306
307 static int
308 assign_managed_vector(struct irq_data *irqd, const struct cpumask *dest)
309 {
310 const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
311 struct apic_chip_data *apicd = apic_chip_data(irqd);
312 int vector, cpu;
313
314 cpumask_and(vector_searchmask, dest, affmsk);
315
316
317 if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask))
318 return 0;
319 vector = irq_matrix_alloc_managed(vector_matrix, vector_searchmask,
320 &cpu);
321 trace_vector_alloc_managed(irqd->irq, vector, vector);
322 if (vector < 0)
323 return vector;
324 apic_update_vector(irqd, vector, cpu);
325 apic_update_irq_cfg(irqd, vector, cpu);
326 return 0;
327 }
328
329 static void clear_irq_vector(struct irq_data *irqd)
330 {
331 struct apic_chip_data *apicd = apic_chip_data(irqd);
332 bool managed = irqd_affinity_is_managed(irqd);
333 unsigned int vector = apicd->vector;
334
335 lockdep_assert_held(&vector_lock);
336
337 if (!vector)
338 return;
339
340 trace_vector_clear(irqd->irq, vector, apicd->cpu, apicd->prev_vector,
341 apicd->prev_cpu);
342
343 per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_SHUTDOWN;
344 irq_matrix_free(vector_matrix, apicd->cpu, vector, managed);
345 apicd->vector = 0;
346
347
348 vector = apicd->prev_vector;
349 if (!vector)
350 return;
351
352 per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_SHUTDOWN;
353 irq_matrix_free(vector_matrix, apicd->prev_cpu, vector, managed);
354 apicd->prev_vector = 0;
355 apicd->move_in_progress = 0;
356 hlist_del_init(&apicd->clist);
357 }
358
359 static void x86_vector_deactivate(struct irq_domain *dom, struct irq_data *irqd)
360 {
361 struct apic_chip_data *apicd = apic_chip_data(irqd);
362 unsigned long flags;
363
364 trace_vector_deactivate(irqd->irq, apicd->is_managed,
365 apicd->can_reserve, false);
366
367
368 if (!apicd->is_managed && !apicd->can_reserve)
369 return;
370
371 if (apicd->has_reserved)
372 return;
373
374 raw_spin_lock_irqsave(&vector_lock, flags);
375 clear_irq_vector(irqd);
376 if (apicd->can_reserve)
377 reserve_irq_vector_locked(irqd);
378 else
379 vector_assign_managed_shutdown(irqd);
380 raw_spin_unlock_irqrestore(&vector_lock, flags);
381 }
382
383 static int activate_reserved(struct irq_data *irqd)
384 {
385 struct apic_chip_data *apicd = apic_chip_data(irqd);
386 int ret;
387
388 ret = assign_irq_vector_any_locked(irqd);
389 if (!ret) {
390 apicd->has_reserved = false;
391
392
393
394
395
396
397
398 if (!irqd_can_reserve(irqd))
399 apicd->can_reserve = false;
400 }
401
402
403
404
405
406 if (!cpumask_subset(irq_data_get_effective_affinity_mask(irqd),
407 irq_data_get_affinity_mask(irqd))) {
408 pr_warn("irq %u: Affinity broken due to vector space exhaustion.\n",
409 irqd->irq);
410 }
411
412 return ret;
413 }
414
415 static int activate_managed(struct irq_data *irqd)
416 {
417 const struct cpumask *dest = irq_data_get_affinity_mask(irqd);
418 int ret;
419
420 cpumask_and(vector_searchmask, dest, cpu_online_mask);
421 if (WARN_ON_ONCE(cpumask_empty(vector_searchmask))) {
422
423 pr_err("Managed startup for irq %u, but no CPU\n", irqd->irq);
424 return -EINVAL;
425 }
426
427 ret = assign_managed_vector(irqd, vector_searchmask);
428
429
430
431
432 if (WARN_ON_ONCE(ret < 0)) {
433 pr_err("Managed startup irq %u, no vector available\n",
434 irqd->irq);
435 }
436 return ret;
437 }
438
439 static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd,
440 bool reserve)
441 {
442 struct apic_chip_data *apicd = apic_chip_data(irqd);
443 unsigned long flags;
444 int ret = 0;
445
446 trace_vector_activate(irqd->irq, apicd->is_managed,
447 apicd->can_reserve, reserve);
448
449
450 if (!apicd->can_reserve && !apicd->is_managed)
451 return 0;
452
453 raw_spin_lock_irqsave(&vector_lock, flags);
454 if (reserve || irqd_is_managed_and_shutdown(irqd))
455 vector_assign_managed_shutdown(irqd);
456 else if (apicd->is_managed)
457 ret = activate_managed(irqd);
458 else if (apicd->has_reserved)
459 ret = activate_reserved(irqd);
460 raw_spin_unlock_irqrestore(&vector_lock, flags);
461 return ret;
462 }
463
464 static void vector_free_reserved_and_managed(struct irq_data *irqd)
465 {
466 const struct cpumask *dest = irq_data_get_affinity_mask(irqd);
467 struct apic_chip_data *apicd = apic_chip_data(irqd);
468
469 trace_vector_teardown(irqd->irq, apicd->is_managed,
470 apicd->has_reserved);
471
472 if (apicd->has_reserved)
473 irq_matrix_remove_reserved(vector_matrix);
474 if (apicd->is_managed)
475 irq_matrix_remove_managed(vector_matrix, dest);
476 }
477
478 static void x86_vector_free_irqs(struct irq_domain *domain,
479 unsigned int virq, unsigned int nr_irqs)
480 {
481 struct apic_chip_data *apicd;
482 struct irq_data *irqd;
483 unsigned long flags;
484 int i;
485
486 for (i = 0; i < nr_irqs; i++) {
487 irqd = irq_domain_get_irq_data(x86_vector_domain, virq + i);
488 if (irqd && irqd->chip_data) {
489 raw_spin_lock_irqsave(&vector_lock, flags);
490 clear_irq_vector(irqd);
491 vector_free_reserved_and_managed(irqd);
492 apicd = irqd->chip_data;
493 irq_domain_reset_irq_data(irqd);
494 raw_spin_unlock_irqrestore(&vector_lock, flags);
495 free_apic_chip_data(apicd);
496 }
497 }
498 }
499
500 static bool vector_configure_legacy(unsigned int virq, struct irq_data *irqd,
501 struct apic_chip_data *apicd)
502 {
503 unsigned long flags;
504 bool realloc = false;
505
506 apicd->vector = ISA_IRQ_VECTOR(virq);
507 apicd->cpu = 0;
508
509 raw_spin_lock_irqsave(&vector_lock, flags);
510
511
512
513
514 if (irqd_is_activated(irqd)) {
515 trace_vector_setup(virq, true, 0);
516 apic_update_irq_cfg(irqd, apicd->vector, apicd->cpu);
517 } else {
518
519 apicd->can_reserve = true;
520 irqd_set_can_reserve(irqd);
521 clear_irq_vector(irqd);
522 realloc = true;
523 }
524 raw_spin_unlock_irqrestore(&vector_lock, flags);
525 return realloc;
526 }
527
528 static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
529 unsigned int nr_irqs, void *arg)
530 {
531 struct irq_alloc_info *info = arg;
532 struct apic_chip_data *apicd;
533 struct irq_data *irqd;
534 int i, err, node;
535
536 if (disable_apic)
537 return -ENXIO;
538
539
540 if ((info->flags & X86_IRQ_ALLOC_CONTIGUOUS_VECTORS) && nr_irqs > 1)
541 return -ENOSYS;
542
543 for (i = 0; i < nr_irqs; i++) {
544 irqd = irq_domain_get_irq_data(domain, virq + i);
545 BUG_ON(!irqd);
546 node = irq_data_get_node(irqd);
547 WARN_ON_ONCE(irqd->chip_data);
548 apicd = alloc_apic_chip_data(node);
549 if (!apicd) {
550 err = -ENOMEM;
551 goto error;
552 }
553
554 apicd->irq = virq + i;
555 irqd->chip = &lapic_controller;
556 irqd->chip_data = apicd;
557 irqd->hwirq = virq + i;
558 irqd_set_single_target(irqd);
559
560
561
562
563
564
565
566 if (info->flags & X86_IRQ_ALLOC_LEGACY) {
567 if (!vector_configure_legacy(virq + i, irqd, apicd))
568 continue;
569 }
570
571 err = assign_irq_vector_policy(irqd, info);
572 trace_vector_setup(virq + i, false, err);
573 if (err) {
574 irqd->chip_data = NULL;
575 free_apic_chip_data(apicd);
576 goto error;
577 }
578 }
579
580 return 0;
581
582 error:
583 x86_vector_free_irqs(domain, virq, i);
584 return err;
585 }
586
587 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
588 static void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d,
589 struct irq_data *irqd, int ind)
590 {
591 struct apic_chip_data apicd;
592 unsigned long flags;
593 int irq;
594
595 if (!irqd) {
596 irq_matrix_debug_show(m, vector_matrix, ind);
597 return;
598 }
599
600 irq = irqd->irq;
601 if (irq < nr_legacy_irqs() && !test_bit(irq, &io_apic_irqs)) {
602 seq_printf(m, "%*sVector: %5d\n", ind, "", ISA_IRQ_VECTOR(irq));
603 seq_printf(m, "%*sTarget: Legacy PIC all CPUs\n", ind, "");
604 return;
605 }
606
607 if (!irqd->chip_data) {
608 seq_printf(m, "%*sVector: Not assigned\n", ind, "");
609 return;
610 }
611
612 raw_spin_lock_irqsave(&vector_lock, flags);
613 memcpy(&apicd, irqd->chip_data, sizeof(apicd));
614 raw_spin_unlock_irqrestore(&vector_lock, flags);
615
616 seq_printf(m, "%*sVector: %5u\n", ind, "", apicd.vector);
617 seq_printf(m, "%*sTarget: %5u\n", ind, "", apicd.cpu);
618 if (apicd.prev_vector) {
619 seq_printf(m, "%*sPrevious vector: %5u\n", ind, "", apicd.prev_vector);
620 seq_printf(m, "%*sPrevious target: %5u\n", ind, "", apicd.prev_cpu);
621 }
622 seq_printf(m, "%*smove_in_progress: %u\n", ind, "", apicd.move_in_progress ? 1 : 0);
623 seq_printf(m, "%*sis_managed: %u\n", ind, "", apicd.is_managed ? 1 : 0);
624 seq_printf(m, "%*scan_reserve: %u\n", ind, "", apicd.can_reserve ? 1 : 0);
625 seq_printf(m, "%*shas_reserved: %u\n", ind, "", apicd.has_reserved ? 1 : 0);
626 seq_printf(m, "%*scleanup_pending: %u\n", ind, "", !hlist_unhashed(&apicd.clist));
627 }
628 #endif
629
630 static const struct irq_domain_ops x86_vector_domain_ops = {
631 .alloc = x86_vector_alloc_irqs,
632 .free = x86_vector_free_irqs,
633 .activate = x86_vector_activate,
634 .deactivate = x86_vector_deactivate,
635 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
636 .debug_show = x86_vector_debug_show,
637 #endif
638 };
639
640 int __init arch_probe_nr_irqs(void)
641 {
642 int nr;
643
644 if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
645 nr_irqs = NR_VECTORS * nr_cpu_ids;
646
647 nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids;
648 #if defined(CONFIG_PCI_MSI)
649
650
651
652 if (gsi_top <= NR_IRQS_LEGACY)
653 nr += 8 * nr_cpu_ids;
654 else
655 nr += gsi_top * 16;
656 #endif
657 if (nr < nr_irqs)
658 nr_irqs = nr;
659
660
661
662
663
664 return legacy_pic->probe();
665 }
666
667 void lapic_assign_legacy_vector(unsigned int irq, bool replace)
668 {
669
670
671
672
673
674 irq_matrix_assign_system(vector_matrix, ISA_IRQ_VECTOR(irq), replace);
675 }
676
677 void __init lapic_assign_system_vectors(void)
678 {
679 unsigned int i, vector = 0;
680
681 for_each_set_bit_from(vector, system_vectors, NR_VECTORS)
682 irq_matrix_assign_system(vector_matrix, vector, false);
683
684 if (nr_legacy_irqs() > 1)
685 lapic_assign_legacy_vector(PIC_CASCADE_IR, false);
686
687
688 irq_matrix_online(vector_matrix);
689
690
691 for (i = 0; i < nr_legacy_irqs(); i++) {
692 if (i != PIC_CASCADE_IR)
693 irq_matrix_assign(vector_matrix, ISA_IRQ_VECTOR(i));
694 }
695 }
696
697 int __init arch_early_irq_init(void)
698 {
699 struct fwnode_handle *fn;
700
701 fn = irq_domain_alloc_named_fwnode("VECTOR");
702 BUG_ON(!fn);
703 x86_vector_domain = irq_domain_create_tree(fn, &x86_vector_domain_ops,
704 NULL);
705 BUG_ON(x86_vector_domain == NULL);
706 irq_domain_free_fwnode(fn);
707 irq_set_default_host(x86_vector_domain);
708
709 arch_init_msi_domain(x86_vector_domain);
710
711 BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
712
713
714
715
716
717 vector_matrix = irq_alloc_matrix(NR_VECTORS, FIRST_EXTERNAL_VECTOR,
718 FIRST_SYSTEM_VECTOR);
719 BUG_ON(!vector_matrix);
720
721 return arch_early_ioapic_init();
722 }
723
724 #ifdef CONFIG_SMP
725
726 static struct irq_desc *__setup_vector_irq(int vector)
727 {
728 int isairq = vector - ISA_IRQ_VECTOR(0);
729
730
731 if (isairq < 0 || isairq >= nr_legacy_irqs())
732 return VECTOR_UNUSED;
733
734 if (test_bit(isairq, &io_apic_irqs))
735 return VECTOR_UNUSED;
736 return irq_to_desc(isairq);
737 }
738
739
740 void lapic_online(void)
741 {
742 unsigned int vector;
743
744 lockdep_assert_held(&vector_lock);
745
746
747 irq_matrix_online(vector_matrix);
748
749
750
751
752
753
754
755
756
757
758 for (vector = 0; vector < NR_VECTORS; vector++)
759 this_cpu_write(vector_irq[vector], __setup_vector_irq(vector));
760 }
761
762 void lapic_offline(void)
763 {
764 lock_vector_lock();
765 irq_matrix_offline(vector_matrix);
766 unlock_vector_lock();
767 }
768
769 static int apic_set_affinity(struct irq_data *irqd,
770 const struct cpumask *dest, bool force)
771 {
772 struct apic_chip_data *apicd = apic_chip_data(irqd);
773 int err;
774
775
776
777
778
779
780
781
782
783 if (!irqd_is_activated(irqd) &&
784 (apicd->is_managed || apicd->can_reserve))
785 return IRQ_SET_MASK_OK;
786
787 raw_spin_lock(&vector_lock);
788 cpumask_and(vector_searchmask, dest, cpu_online_mask);
789 if (irqd_affinity_is_managed(irqd))
790 err = assign_managed_vector(irqd, vector_searchmask);
791 else
792 err = assign_vector_locked(irqd, vector_searchmask);
793 raw_spin_unlock(&vector_lock);
794 return err ? err : IRQ_SET_MASK_OK;
795 }
796
797 #else
798 # define apic_set_affinity NULL
799 #endif
800
801 static int apic_retrigger_irq(struct irq_data *irqd)
802 {
803 struct apic_chip_data *apicd = apic_chip_data(irqd);
804 unsigned long flags;
805
806 raw_spin_lock_irqsave(&vector_lock, flags);
807 apic->send_IPI(apicd->cpu, apicd->vector);
808 raw_spin_unlock_irqrestore(&vector_lock, flags);
809
810 return 1;
811 }
812
813 void apic_ack_irq(struct irq_data *irqd)
814 {
815 irq_move_irq(irqd);
816 ack_APIC_irq();
817 }
818
819 void apic_ack_edge(struct irq_data *irqd)
820 {
821 irq_complete_move(irqd_cfg(irqd));
822 apic_ack_irq(irqd);
823 }
824
825 static struct irq_chip lapic_controller = {
826 .name = "APIC",
827 .irq_ack = apic_ack_edge,
828 .irq_set_affinity = apic_set_affinity,
829 .irq_retrigger = apic_retrigger_irq,
830 };
831
832 #ifdef CONFIG_SMP
833
834 static void free_moved_vector(struct apic_chip_data *apicd)
835 {
836 unsigned int vector = apicd->prev_vector;
837 unsigned int cpu = apicd->prev_cpu;
838 bool managed = apicd->is_managed;
839
840
841
842
843
844
845
846 WARN_ON_ONCE(managed);
847
848 trace_vector_free_moved(apicd->irq, cpu, vector, managed);
849 irq_matrix_free(vector_matrix, cpu, vector, managed);
850 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
851 hlist_del_init(&apicd->clist);
852 apicd->prev_vector = 0;
853 apicd->move_in_progress = 0;
854 }
855
856 asmlinkage __visible void __irq_entry smp_irq_move_cleanup_interrupt(void)
857 {
858 struct hlist_head *clhead = this_cpu_ptr(&cleanup_list);
859 struct apic_chip_data *apicd;
860 struct hlist_node *tmp;
861
862 entering_ack_irq();
863
864 raw_spin_lock(&vector_lock);
865
866 hlist_for_each_entry_safe(apicd, tmp, clhead, clist) {
867 unsigned int irr, vector = apicd->prev_vector;
868
869
870
871
872
873
874
875
876
877
878 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
879 if (irr & (1U << (vector % 32))) {
880 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
881 continue;
882 }
883 free_moved_vector(apicd);
884 }
885
886 raw_spin_unlock(&vector_lock);
887 exiting_irq();
888 }
889
890 static void __send_cleanup_vector(struct apic_chip_data *apicd)
891 {
892 unsigned int cpu;
893
894 raw_spin_lock(&vector_lock);
895 apicd->move_in_progress = 0;
896 cpu = apicd->prev_cpu;
897 if (cpu_online(cpu)) {
898 hlist_add_head(&apicd->clist, per_cpu_ptr(&cleanup_list, cpu));
899 apic->send_IPI(cpu, IRQ_MOVE_CLEANUP_VECTOR);
900 } else {
901 apicd->prev_vector = 0;
902 }
903 raw_spin_unlock(&vector_lock);
904 }
905
906 void send_cleanup_vector(struct irq_cfg *cfg)
907 {
908 struct apic_chip_data *apicd;
909
910 apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg);
911 if (apicd->move_in_progress)
912 __send_cleanup_vector(apicd);
913 }
914
915 static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
916 {
917 struct apic_chip_data *apicd;
918
919 apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg);
920 if (likely(!apicd->move_in_progress))
921 return;
922
923 if (vector == apicd->vector && apicd->cpu == smp_processor_id())
924 __send_cleanup_vector(apicd);
925 }
926
927 void irq_complete_move(struct irq_cfg *cfg)
928 {
929 __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
930 }
931
932
933
934
935 void irq_force_complete_move(struct irq_desc *desc)
936 {
937 struct apic_chip_data *apicd;
938 struct irq_data *irqd;
939 unsigned int vector;
940
941
942
943
944
945
946
947
948
949
950 irqd = irq_domain_get_irq_data(x86_vector_domain,
951 irq_desc_get_irq(desc));
952 if (!irqd)
953 return;
954
955 raw_spin_lock(&vector_lock);
956 apicd = apic_chip_data(irqd);
957 if (!apicd)
958 goto unlock;
959
960
961
962
963 vector = apicd->prev_vector;
964 if (!vector)
965 goto unlock;
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982 if (apicd->move_in_progress) {
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015 pr_warn("IRQ fixup: irq %d move in progress, old vector %d\n",
1016 irqd->irq, vector);
1017 }
1018 free_moved_vector(apicd);
1019 unlock:
1020 raw_spin_unlock(&vector_lock);
1021 }
1022
1023 #ifdef CONFIG_HOTPLUG_CPU
1024
1025
1026
1027
1028 int lapic_can_unplug_cpu(void)
1029 {
1030 unsigned int rsvd, avl, tomove, cpu = smp_processor_id();
1031 int ret = 0;
1032
1033 raw_spin_lock(&vector_lock);
1034 tomove = irq_matrix_allocated(vector_matrix);
1035 avl = irq_matrix_available(vector_matrix, true);
1036 if (avl < tomove) {
1037 pr_warn("CPU %u has %u vectors, %u available. Cannot disable CPU\n",
1038 cpu, tomove, avl);
1039 ret = -ENOSPC;
1040 goto out;
1041 }
1042 rsvd = irq_matrix_reserved(vector_matrix);
1043 if (avl < rsvd) {
1044 pr_warn("Reserved vectors %u > available %u. IRQ request may fail\n",
1045 rsvd, avl);
1046 }
1047 out:
1048 raw_spin_unlock(&vector_lock);
1049 return ret;
1050 }
1051 #endif
1052 #endif
1053
1054 static void __init print_APIC_field(int base)
1055 {
1056 int i;
1057
1058 printk(KERN_DEBUG);
1059
1060 for (i = 0; i < 8; i++)
1061 pr_cont("%08x", apic_read(base + i*0x10));
1062
1063 pr_cont("\n");
1064 }
1065
1066 static void __init print_local_APIC(void *dummy)
1067 {
1068 unsigned int i, v, ver, maxlvt;
1069 u64 icr;
1070
1071 pr_debug("printing local APIC contents on CPU#%d/%d:\n",
1072 smp_processor_id(), hard_smp_processor_id());
1073 v = apic_read(APIC_ID);
1074 pr_info("... APIC ID: %08x (%01x)\n", v, read_apic_id());
1075 v = apic_read(APIC_LVR);
1076 pr_info("... APIC VERSION: %08x\n", v);
1077 ver = GET_APIC_VERSION(v);
1078 maxlvt = lapic_get_maxlvt();
1079
1080 v = apic_read(APIC_TASKPRI);
1081 pr_debug("... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1082
1083
1084 if (APIC_INTEGRATED(ver)) {
1085 if (!APIC_XAPIC(ver)) {
1086 v = apic_read(APIC_ARBPRI);
1087 pr_debug("... APIC ARBPRI: %08x (%02x)\n",
1088 v, v & APIC_ARBPRI_MASK);
1089 }
1090 v = apic_read(APIC_PROCPRI);
1091 pr_debug("... APIC PROCPRI: %08x\n", v);
1092 }
1093
1094
1095
1096
1097
1098 if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
1099 v = apic_read(APIC_RRR);
1100 pr_debug("... APIC RRR: %08x\n", v);
1101 }
1102
1103 v = apic_read(APIC_LDR);
1104 pr_debug("... APIC LDR: %08x\n", v);
1105 if (!x2apic_enabled()) {
1106 v = apic_read(APIC_DFR);
1107 pr_debug("... APIC DFR: %08x\n", v);
1108 }
1109 v = apic_read(APIC_SPIV);
1110 pr_debug("... APIC SPIV: %08x\n", v);
1111
1112 pr_debug("... APIC ISR field:\n");
1113 print_APIC_field(APIC_ISR);
1114 pr_debug("... APIC TMR field:\n");
1115 print_APIC_field(APIC_TMR);
1116 pr_debug("... APIC IRR field:\n");
1117 print_APIC_field(APIC_IRR);
1118
1119
1120 if (APIC_INTEGRATED(ver)) {
1121
1122 if (maxlvt > 3)
1123 apic_write(APIC_ESR, 0);
1124
1125 v = apic_read(APIC_ESR);
1126 pr_debug("... APIC ESR: %08x\n", v);
1127 }
1128
1129 icr = apic_icr_read();
1130 pr_debug("... APIC ICR: %08x\n", (u32)icr);
1131 pr_debug("... APIC ICR2: %08x\n", (u32)(icr >> 32));
1132
1133 v = apic_read(APIC_LVTT);
1134 pr_debug("... APIC LVTT: %08x\n", v);
1135
1136 if (maxlvt > 3) {
1137
1138 v = apic_read(APIC_LVTPC);
1139 pr_debug("... APIC LVTPC: %08x\n", v);
1140 }
1141 v = apic_read(APIC_LVT0);
1142 pr_debug("... APIC LVT0: %08x\n", v);
1143 v = apic_read(APIC_LVT1);
1144 pr_debug("... APIC LVT1: %08x\n", v);
1145
1146 if (maxlvt > 2) {
1147
1148 v = apic_read(APIC_LVTERR);
1149 pr_debug("... APIC LVTERR: %08x\n", v);
1150 }
1151
1152 v = apic_read(APIC_TMICT);
1153 pr_debug("... APIC TMICT: %08x\n", v);
1154 v = apic_read(APIC_TMCCT);
1155 pr_debug("... APIC TMCCT: %08x\n", v);
1156 v = apic_read(APIC_TDCR);
1157 pr_debug("... APIC TDCR: %08x\n", v);
1158
1159 if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
1160 v = apic_read(APIC_EFEAT);
1161 maxlvt = (v >> 16) & 0xff;
1162 pr_debug("... APIC EFEAT: %08x\n", v);
1163 v = apic_read(APIC_ECTRL);
1164 pr_debug("... APIC ECTRL: %08x\n", v);
1165 for (i = 0; i < maxlvt; i++) {
1166 v = apic_read(APIC_EILVTn(i));
1167 pr_debug("... APIC EILVT%d: %08x\n", i, v);
1168 }
1169 }
1170 pr_cont("\n");
1171 }
1172
1173 static void __init print_local_APICs(int maxcpu)
1174 {
1175 int cpu;
1176
1177 if (!maxcpu)
1178 return;
1179
1180 preempt_disable();
1181 for_each_online_cpu(cpu) {
1182 if (cpu >= maxcpu)
1183 break;
1184 smp_call_function_single(cpu, print_local_APIC, NULL, 1);
1185 }
1186 preempt_enable();
1187 }
1188
1189 static void __init print_PIC(void)
1190 {
1191 unsigned int v;
1192 unsigned long flags;
1193
1194 if (!nr_legacy_irqs())
1195 return;
1196
1197 pr_debug("\nprinting PIC contents\n");
1198
1199 raw_spin_lock_irqsave(&i8259A_lock, flags);
1200
1201 v = inb(0xa1) << 8 | inb(0x21);
1202 pr_debug("... PIC IMR: %04x\n", v);
1203
1204 v = inb(0xa0) << 8 | inb(0x20);
1205 pr_debug("... PIC IRR: %04x\n", v);
1206
1207 outb(0x0b, 0xa0);
1208 outb(0x0b, 0x20);
1209 v = inb(0xa0) << 8 | inb(0x20);
1210 outb(0x0a, 0xa0);
1211 outb(0x0a, 0x20);
1212
1213 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
1214
1215 pr_debug("... PIC ISR: %04x\n", v);
1216
1217 v = inb(0x4d1) << 8 | inb(0x4d0);
1218 pr_debug("... PIC ELCR: %04x\n", v);
1219 }
1220
1221 static int show_lapic __initdata = 1;
1222 static __init int setup_show_lapic(char *arg)
1223 {
1224 int num = -1;
1225
1226 if (strcmp(arg, "all") == 0) {
1227 show_lapic = CONFIG_NR_CPUS;
1228 } else {
1229 get_option(&arg, &num);
1230 if (num >= 0)
1231 show_lapic = num;
1232 }
1233
1234 return 1;
1235 }
1236 __setup("show_lapic=", setup_show_lapic);
1237
1238 static int __init print_ICs(void)
1239 {
1240 if (apic_verbosity == APIC_QUIET)
1241 return 0;
1242
1243 print_PIC();
1244
1245
1246 if (!boot_cpu_has(X86_FEATURE_APIC) && !apic_from_smp_config())
1247 return 0;
1248
1249 print_local_APICs(show_lapic);
1250 print_IO_APICs();
1251
1252 return 0;
1253 }
1254
1255 late_initcall(print_ICs);