This source file includes following definitions.
- irq_spread_init_one
- alloc_node_to_cpumask
- free_node_to_cpumask
- build_node_to_cpumask
- get_nodes_in_cpumask
- ncpus_cmp_func
- alloc_nodes_vectors
- __irq_build_affinity_masks
- irq_build_affinity_masks
- default_calc_sets
- irq_create_affinity_masks
- irq_calc_affinity_vectors
1
2
3
4
5
6 #include <linux/interrupt.h>
7 #include <linux/kernel.h>
8 #include <linux/slab.h>
9 #include <linux/cpu.h>
10 #include <linux/sort.h>
11
12 static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk,
13 unsigned int cpus_per_vec)
14 {
15 const struct cpumask *siblmsk;
16 int cpu, sibl;
17
18 for ( ; cpus_per_vec > 0; ) {
19 cpu = cpumask_first(nmsk);
20
21
22 if (cpu >= nr_cpu_ids)
23 return;
24
25 cpumask_clear_cpu(cpu, nmsk);
26 cpumask_set_cpu(cpu, irqmsk);
27 cpus_per_vec--;
28
29
30 siblmsk = topology_sibling_cpumask(cpu);
31 for (sibl = -1; cpus_per_vec > 0; ) {
32 sibl = cpumask_next(sibl, siblmsk);
33 if (sibl >= nr_cpu_ids)
34 break;
35 if (!cpumask_test_and_clear_cpu(sibl, nmsk))
36 continue;
37 cpumask_set_cpu(sibl, irqmsk);
38 cpus_per_vec--;
39 }
40 }
41 }
42
43 static cpumask_var_t *alloc_node_to_cpumask(void)
44 {
45 cpumask_var_t *masks;
46 int node;
47
48 masks = kcalloc(nr_node_ids, sizeof(cpumask_var_t), GFP_KERNEL);
49 if (!masks)
50 return NULL;
51
52 for (node = 0; node < nr_node_ids; node++) {
53 if (!zalloc_cpumask_var(&masks[node], GFP_KERNEL))
54 goto out_unwind;
55 }
56
57 return masks;
58
59 out_unwind:
60 while (--node >= 0)
61 free_cpumask_var(masks[node]);
62 kfree(masks);
63 return NULL;
64 }
65
66 static void free_node_to_cpumask(cpumask_var_t *masks)
67 {
68 int node;
69
70 for (node = 0; node < nr_node_ids; node++)
71 free_cpumask_var(masks[node]);
72 kfree(masks);
73 }
74
75 static void build_node_to_cpumask(cpumask_var_t *masks)
76 {
77 int cpu;
78
79 for_each_possible_cpu(cpu)
80 cpumask_set_cpu(cpu, masks[cpu_to_node(cpu)]);
81 }
82
83 static int get_nodes_in_cpumask(cpumask_var_t *node_to_cpumask,
84 const struct cpumask *mask, nodemask_t *nodemsk)
85 {
86 int n, nodes = 0;
87
88
89 for_each_node(n) {
90 if (cpumask_intersects(mask, node_to_cpumask[n])) {
91 node_set(n, *nodemsk);
92 nodes++;
93 }
94 }
95 return nodes;
96 }
97
98 struct node_vectors {
99 unsigned id;
100
101 union {
102 unsigned nvectors;
103 unsigned ncpus;
104 };
105 };
106
107 static int ncpus_cmp_func(const void *l, const void *r)
108 {
109 const struct node_vectors *ln = l;
110 const struct node_vectors *rn = r;
111
112 return ln->ncpus - rn->ncpus;
113 }
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128 static void alloc_nodes_vectors(unsigned int numvecs,
129 cpumask_var_t *node_to_cpumask,
130 const struct cpumask *cpu_mask,
131 const nodemask_t nodemsk,
132 struct cpumask *nmsk,
133 struct node_vectors *node_vectors)
134 {
135 unsigned n, remaining_ncpus = 0;
136
137 for (n = 0; n < nr_node_ids; n++) {
138 node_vectors[n].id = n;
139 node_vectors[n].ncpus = UINT_MAX;
140 }
141
142 for_each_node_mask(n, nodemsk) {
143 unsigned ncpus;
144
145 cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
146 ncpus = cpumask_weight(nmsk);
147
148 if (!ncpus)
149 continue;
150 remaining_ncpus += ncpus;
151 node_vectors[n].ncpus = ncpus;
152 }
153
154 numvecs = min_t(unsigned, remaining_ncpus, numvecs);
155
156 sort(node_vectors, nr_node_ids, sizeof(node_vectors[0]),
157 ncpus_cmp_func, NULL);
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227 for (n = 0; n < nr_node_ids; n++) {
228 unsigned nvectors, ncpus;
229
230 if (node_vectors[n].ncpus == UINT_MAX)
231 continue;
232
233 WARN_ON_ONCE(numvecs == 0);
234
235 ncpus = node_vectors[n].ncpus;
236 nvectors = max_t(unsigned, 1,
237 numvecs * ncpus / remaining_ncpus);
238 WARN_ON_ONCE(nvectors > ncpus);
239
240 node_vectors[n].nvectors = nvectors;
241
242 remaining_ncpus -= ncpus;
243 numvecs -= nvectors;
244 }
245 }
246
247 static int __irq_build_affinity_masks(unsigned int startvec,
248 unsigned int numvecs,
249 unsigned int firstvec,
250 cpumask_var_t *node_to_cpumask,
251 const struct cpumask *cpu_mask,
252 struct cpumask *nmsk,
253 struct irq_affinity_desc *masks)
254 {
255 unsigned int i, n, nodes, cpus_per_vec, extra_vecs, done = 0;
256 unsigned int last_affv = firstvec + numvecs;
257 unsigned int curvec = startvec;
258 nodemask_t nodemsk = NODE_MASK_NONE;
259 struct node_vectors *node_vectors;
260
261 if (!cpumask_weight(cpu_mask))
262 return 0;
263
264 nodes = get_nodes_in_cpumask(node_to_cpumask, cpu_mask, &nodemsk);
265
266
267
268
269
270 if (numvecs <= nodes) {
271 for_each_node_mask(n, nodemsk) {
272 cpumask_or(&masks[curvec].mask, &masks[curvec].mask,
273 node_to_cpumask[n]);
274 if (++curvec == last_affv)
275 curvec = firstvec;
276 }
277 return numvecs;
278 }
279
280 node_vectors = kcalloc(nr_node_ids,
281 sizeof(struct node_vectors),
282 GFP_KERNEL);
283 if (!node_vectors)
284 return -ENOMEM;
285
286
287 alloc_nodes_vectors(numvecs, node_to_cpumask, cpu_mask,
288 nodemsk, nmsk, node_vectors);
289
290 for (i = 0; i < nr_node_ids; i++) {
291 unsigned int ncpus, v;
292 struct node_vectors *nv = &node_vectors[i];
293
294 if (nv->nvectors == UINT_MAX)
295 continue;
296
297
298 cpumask_and(nmsk, cpu_mask, node_to_cpumask[nv->id]);
299 ncpus = cpumask_weight(nmsk);
300 if (!ncpus)
301 continue;
302
303 WARN_ON_ONCE(nv->nvectors > ncpus);
304
305
306 extra_vecs = ncpus - nv->nvectors * (ncpus / nv->nvectors);
307
308
309 for (v = 0; v < nv->nvectors; v++, curvec++) {
310 cpus_per_vec = ncpus / nv->nvectors;
311
312
313 if (extra_vecs) {
314 cpus_per_vec++;
315 --extra_vecs;
316 }
317
318
319
320
321
322 if (curvec >= last_affv)
323 curvec = firstvec;
324 irq_spread_init_one(&masks[curvec].mask, nmsk,
325 cpus_per_vec);
326 }
327 done += nv->nvectors;
328 }
329 kfree(node_vectors);
330 return done;
331 }
332
333
334
335
336
337
338 static int irq_build_affinity_masks(unsigned int startvec, unsigned int numvecs,
339 unsigned int firstvec,
340 struct irq_affinity_desc *masks)
341 {
342 unsigned int curvec = startvec, nr_present = 0, nr_others = 0;
343 cpumask_var_t *node_to_cpumask;
344 cpumask_var_t nmsk, npresmsk;
345 int ret = -ENOMEM;
346
347 if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
348 return ret;
349
350 if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL))
351 goto fail_nmsk;
352
353 node_to_cpumask = alloc_node_to_cpumask();
354 if (!node_to_cpumask)
355 goto fail_npresmsk;
356
357
358 get_online_cpus();
359 build_node_to_cpumask(node_to_cpumask);
360
361
362 ret = __irq_build_affinity_masks(curvec, numvecs, firstvec,
363 node_to_cpumask, cpu_present_mask,
364 nmsk, masks);
365 if (ret < 0)
366 goto fail_build_affinity;
367 nr_present = ret;
368
369
370
371
372
373
374
375 if (nr_present >= numvecs)
376 curvec = firstvec;
377 else
378 curvec = firstvec + nr_present;
379 cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask);
380 ret = __irq_build_affinity_masks(curvec, numvecs, firstvec,
381 node_to_cpumask, npresmsk, nmsk,
382 masks);
383 if (ret >= 0)
384 nr_others = ret;
385
386 fail_build_affinity:
387 put_online_cpus();
388
389 if (ret >= 0)
390 WARN_ON(nr_present + nr_others < numvecs);
391
392 free_node_to_cpumask(node_to_cpumask);
393
394 fail_npresmsk:
395 free_cpumask_var(npresmsk);
396
397 fail_nmsk:
398 free_cpumask_var(nmsk);
399 return ret < 0 ? ret : 0;
400 }
401
402 static void default_calc_sets(struct irq_affinity *affd, unsigned int affvecs)
403 {
404 affd->nr_sets = 1;
405 affd->set_size[0] = affvecs;
406 }
407
408
409
410
411
412
413
414
415 struct irq_affinity_desc *
416 irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
417 {
418 unsigned int affvecs, curvec, usedvecs, i;
419 struct irq_affinity_desc *masks = NULL;
420
421
422
423
424
425
426
427 if (nvecs > affd->pre_vectors + affd->post_vectors)
428 affvecs = nvecs - affd->pre_vectors - affd->post_vectors;
429 else
430 affvecs = 0;
431
432
433
434
435
436 if (!affd->calc_sets)
437 affd->calc_sets = default_calc_sets;
438
439
440 affd->calc_sets(affd, affvecs);
441
442 if (WARN_ON_ONCE(affd->nr_sets > IRQ_AFFINITY_MAX_SETS))
443 return NULL;
444
445
446 if (!affvecs)
447 return NULL;
448
449 masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL);
450 if (!masks)
451 return NULL;
452
453
454 for (curvec = 0; curvec < affd->pre_vectors; curvec++)
455 cpumask_copy(&masks[curvec].mask, irq_default_affinity);
456
457
458
459
460
461 for (i = 0, usedvecs = 0; i < affd->nr_sets; i++) {
462 unsigned int this_vecs = affd->set_size[i];
463 int ret;
464
465 ret = irq_build_affinity_masks(curvec, this_vecs,
466 curvec, masks);
467 if (ret) {
468 kfree(masks);
469 return NULL;
470 }
471 curvec += this_vecs;
472 usedvecs += this_vecs;
473 }
474
475
476 if (usedvecs >= affvecs)
477 curvec = affd->pre_vectors + affvecs;
478 else
479 curvec = affd->pre_vectors + usedvecs;
480 for (; curvec < nvecs; curvec++)
481 cpumask_copy(&masks[curvec].mask, irq_default_affinity);
482
483
484 for (i = affd->pre_vectors; i < nvecs - affd->post_vectors; i++)
485 masks[i].is_managed = 1;
486
487 return masks;
488 }
489
490
491
492
493
494
495
496 unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
497 const struct irq_affinity *affd)
498 {
499 unsigned int resv = affd->pre_vectors + affd->post_vectors;
500 unsigned int set_vecs;
501
502 if (resv > minvec)
503 return 0;
504
505 if (affd->calc_sets) {
506 set_vecs = maxvec - resv;
507 } else {
508 get_online_cpus();
509 set_vecs = cpumask_weight(cpu_possible_mask);
510 put_online_cpus();
511 }
512
513 return resv + min(set_vecs, maxvec - resv);
514 }