This source file includes following definitions.
- store_cpu_topology
- acpi_cpu_is_threaded
- parse_acpi_topology
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/acpi.h>
15 #include <linux/arch_topology.h>
16 #include <linux/cacheinfo.h>
17 #include <linux/init.h>
18 #include <linux/percpu.h>
19
20 #include <asm/cpu.h>
21 #include <asm/cputype.h>
22 #include <asm/topology.h>
23
24 void store_cpu_topology(unsigned int cpuid)
25 {
26 struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
27 u64 mpidr;
28
29 if (cpuid_topo->package_id != -1)
30 goto topology_populated;
31
32 mpidr = read_cpuid_mpidr();
33
34
35 if (mpidr & MPIDR_UP_BITMASK)
36 return;
37
38
39 if (mpidr & MPIDR_MT_BITMASK) {
40
41 cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
42 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
43 cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 2) |
44 MPIDR_AFFINITY_LEVEL(mpidr, 3) << 8;
45 } else {
46
47 cpuid_topo->thread_id = -1;
48 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
49 cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 1) |
50 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8 |
51 MPIDR_AFFINITY_LEVEL(mpidr, 3) << 16;
52 }
53
54 pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
55 cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
56 cpuid_topo->thread_id, mpidr);
57
58 topology_populated:
59 update_siblings_masks(cpuid);
60 }
61
62 #ifdef CONFIG_ACPI
63 static bool __init acpi_cpu_is_threaded(int cpu)
64 {
65 int is_threaded = acpi_pptt_cpu_is_thread(cpu);
66
67
68
69
70
71 if (is_threaded < 0)
72 is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;
73
74 return !!is_threaded;
75 }
76
77
78
79
80
81 int __init parse_acpi_topology(void)
82 {
83 int cpu, topology_id;
84
85 if (acpi_disabled)
86 return 0;
87
88 for_each_possible_cpu(cpu) {
89 int i, cache_id;
90
91 topology_id = find_acpi_cpu_topology(cpu, 0);
92 if (topology_id < 0)
93 return topology_id;
94
95 if (acpi_cpu_is_threaded(cpu)) {
96 cpu_topology[cpu].thread_id = topology_id;
97 topology_id = find_acpi_cpu_topology(cpu, 1);
98 cpu_topology[cpu].core_id = topology_id;
99 } else {
100 cpu_topology[cpu].thread_id = -1;
101 cpu_topology[cpu].core_id = topology_id;
102 }
103 topology_id = find_acpi_cpu_topology_package(cpu);
104 cpu_topology[cpu].package_id = topology_id;
105
106 i = acpi_find_last_cache_level(cpu);
107
108 if (i > 0) {
109
110
111
112
113 cache_id = find_acpi_cpu_cache_topology(cpu, i);
114 if (cache_id > 0)
115 cpu_topology[cpu].llc_id = cache_id;
116 }
117 }
118
119 return 0;
120 }
121 #endif
122
123