This source file includes following definitions.
- irq_needs_fixup
- migrate_one_irq
- irq_migrate_all_off_this_cpu
- irq_restore_affinity_of_irq
- irq_affinity_online_cpu
1
2
3
4
5
6
7
8
9
10
11
12 #include <linux/interrupt.h>
13 #include <linux/ratelimit.h>
14 #include <linux/irq.h>
15
16 #include "internals.h"
17
18
19 static inline bool irq_needs_fixup(struct irq_data *d)
20 {
21 const struct cpumask *m = irq_data_get_effective_affinity_mask(d);
22 unsigned int cpu = smp_processor_id();
23
24 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
25
26
27
28
29
30 if (cpumask_empty(m))
31 m = irq_data_get_affinity_mask(d);
32
33
34
35
36
37
38 if (cpumask_any_but(m, cpu) < nr_cpu_ids &&
39 cpumask_any_and(m, cpu_online_mask) >= nr_cpu_ids) {
40
41
42
43
44 pr_warn("Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n",
45 cpumask_pr_args(m), d->irq, cpu);
46 return true;
47 }
48 #endif
49 return cpumask_test_cpu(cpu, m);
50 }
51
52 static bool migrate_one_irq(struct irq_desc *desc)
53 {
54 struct irq_data *d = irq_desc_get_irq_data(desc);
55 struct irq_chip *chip = irq_data_get_irq_chip(d);
56 bool maskchip = !irq_can_move_pcntxt(d) && !irqd_irq_masked(d);
57 const struct cpumask *affinity;
58 bool brokeaff = false;
59 int err;
60
61
62
63
64
65
66 if (!chip || !chip->irq_set_affinity) {
67 pr_debug("IRQ %u: Unable to migrate away\n", d->irq);
68 return false;
69 }
70
71
72
73
74
75
76
77
78
79
80 if (irqd_is_per_cpu(d) || !irqd_is_started(d) || !irq_needs_fixup(d)) {
81
82
83
84
85 irq_fixup_move_pending(desc, false);
86 return false;
87 }
88
89
90
91
92
93
94
95 irq_force_complete_move(desc);
96
97
98
99
100
101
102
103 if (irq_fixup_move_pending(desc, true))
104 affinity = irq_desc_get_pending_mask(desc);
105 else
106 affinity = irq_data_get_affinity_mask(d);
107
108
109 if (maskchip && chip->irq_mask)
110 chip->irq_mask(d);
111
112 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
113
114
115
116
117 if (irqd_affinity_is_managed(d)) {
118 irqd_set_managed_shutdown(d);
119 irq_shutdown_and_deactivate(desc);
120 return false;
121 }
122 affinity = cpu_online_mask;
123 brokeaff = true;
124 }
125
126
127
128
129
130
131 err = irq_do_set_affinity(d, affinity, false);
132 if (err) {
133 pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
134 d->irq, err);
135 brokeaff = false;
136 }
137
138 if (maskchip && chip->irq_unmask)
139 chip->irq_unmask(d);
140
141 return brokeaff;
142 }
143
144
145
146
147
148
149
150
151
152
153
154 void irq_migrate_all_off_this_cpu(void)
155 {
156 struct irq_desc *desc;
157 unsigned int irq;
158
159 for_each_active_irq(irq) {
160 bool affinity_broken;
161
162 desc = irq_to_desc(irq);
163 raw_spin_lock(&desc->lock);
164 affinity_broken = migrate_one_irq(desc);
165 raw_spin_unlock(&desc->lock);
166
167 if (affinity_broken) {
168 pr_warn_ratelimited("IRQ %u: no longer affine to CPU%u\n",
169 irq, smp_processor_id());
170 }
171 }
172 }
173
174 static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu)
175 {
176 struct irq_data *data = irq_desc_get_irq_data(desc);
177 const struct cpumask *affinity = irq_data_get_affinity_mask(data);
178
179 if (!irqd_affinity_is_managed(data) || !desc->action ||
180 !irq_data_get_irq_chip(data) || !cpumask_test_cpu(cpu, affinity))
181 return;
182
183 if (irqd_is_managed_and_shutdown(data)) {
184 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
185 return;
186 }
187
188
189
190
191
192
193 if (!irqd_is_single_target(data))
194 irq_set_affinity_locked(data, affinity, false);
195 }
196
197
198
199
200
201 int irq_affinity_online_cpu(unsigned int cpu)
202 {
203 struct irq_desc *desc;
204 unsigned int irq;
205
206 irq_lock_sparse();
207 for_each_active_irq(irq) {
208 desc = irq_to_desc(irq);
209 raw_spin_lock_irq(&desc->lock);
210 irq_restore_affinity_of_irq(desc, cpu);
211 raw_spin_unlock_irq(&desc->lock);
212 }
213 irq_unlock_sparse();
214
215 return 0;
216 }