Searched refs:cpu (Results 1 - 200 of 3007) sorted by relevance

1234567891011>>

/linux-4.1.27/drivers/xen/
H A Dcpu_hotplug.c9 #include <asm/cpu.h>
11 static void enable_hotplug_cpu(int cpu) enable_hotplug_cpu() argument
13 if (!cpu_present(cpu)) enable_hotplug_cpu()
14 arch_register_cpu(cpu); enable_hotplug_cpu()
16 set_cpu_present(cpu, true); enable_hotplug_cpu()
19 static void disable_hotplug_cpu(int cpu) disable_hotplug_cpu() argument
21 if (cpu_present(cpu)) disable_hotplug_cpu()
22 arch_unregister_cpu(cpu); disable_hotplug_cpu()
24 set_cpu_present(cpu, false); disable_hotplug_cpu()
27 static int vcpu_online(unsigned int cpu) vcpu_online() argument
32 sprintf(dir, "cpu/%u", cpu); vcpu_online()
36 pr_err("Unable to read cpu state\n"); vcpu_online()
45 pr_err("unknown state(%s) on CPU%d\n", state, cpu); vcpu_online()
48 static void vcpu_hotplug(unsigned int cpu) vcpu_hotplug() argument
50 if (!cpu_possible(cpu)) vcpu_hotplug()
53 switch (vcpu_online(cpu)) { vcpu_hotplug()
55 enable_hotplug_cpu(cpu); vcpu_hotplug()
58 (void)cpu_down(cpu); vcpu_hotplug()
59 disable_hotplug_cpu(cpu); vcpu_hotplug()
69 unsigned int cpu; handle_vcpu_hotplug_event() local
73 cpustr = strstr(node, "cpu/"); handle_vcpu_hotplug_event()
75 sscanf(cpustr, "cpu/%u", &cpu); handle_vcpu_hotplug_event()
76 vcpu_hotplug(cpu); handle_vcpu_hotplug_event()
83 int cpu; setup_cpu_watcher() local
85 .node = "cpu", setup_cpu_watcher()
90 for_each_possible_cpu(cpu) { for_each_possible_cpu()
91 if (vcpu_online(cpu) == 0) { for_each_possible_cpu()
92 (void)cpu_down(cpu); for_each_possible_cpu()
93 set_cpu_present(cpu, false); for_each_possible_cpu()
/linux-4.1.27/arch/s390/include/asm/
H A Dtopology.h7 struct cpu;
23 #define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id)
24 #define topology_thread_id(cpu) (per_cpu(cpu_topology, cpu).thread_id)
25 #define topology_thread_cpumask(cpu) (&per_cpu(cpu_topology, cpu).thread_mask)
26 #define topology_core_id(cpu) (per_cpu(cpu_topology, cpu).core_id)
27 #define topology_core_cpumask(cpu) (&per_cpu(cpu_topology, cpu).core_mask)
28 #define topology_book_id(cpu) (per_cpu(cpu_topology, cpu).book_id)
29 #define topology_book_cpumask(cpu) (&per_cpu(cpu_topology, cpu).book_mask)
33 int topology_cpu_init(struct cpu *);
38 const struct cpumask *cpu_coregroup_mask(int cpu);
43 static inline int topology_cpu_init(struct cpu *cpu) { return 0; } topology_expect_change() argument
H A Dsmp.h22 extern int __cpu_up(unsigned int cpu, struct task_struct *tidle);
24 extern void arch_send_call_function_single_ipi(int cpu);
31 extern int smp_store_status(int cpu);
32 extern int smp_vcpu_scheduled(int cpu);
33 extern void smp_yield_cpu(int cpu);
34 extern void smp_cpu_set_polarization(int cpu, int val);
35 extern int smp_cpu_get_polarization(int cpu);
53 static inline int smp_store_status(int cpu) { return 0; } smp_vcpu_scheduled() argument
54 static inline int smp_vcpu_scheduled(int cpu) { return 1; } smp_yield_cpu() argument
55 static inline void smp_yield_cpu(int cpu) { } smp_fill_possible_mask() argument
73 extern void __cpu_die(unsigned int cpu);
/linux-4.1.27/arch/arm/mach-hisi/
H A Dcore.h6 extern void hi3xxx_set_cpu_jump(int cpu, void *jump_addr);
7 extern int hi3xxx_get_cpu_jump(int cpu);
11 extern void hi3xxx_cpu_die(unsigned int cpu);
12 extern int hi3xxx_cpu_kill(unsigned int cpu);
13 extern void hi3xxx_set_cpu(int cpu, bool enable);
16 extern void hix5hd2_set_cpu(int cpu, bool enable);
17 extern void hix5hd2_cpu_die(unsigned int cpu);
20 extern void hip01_set_cpu(int cpu, bool enable);
21 extern void hip01_cpu_die(unsigned int cpu);
H A Dhotplug.c10 #include <linux/cpu.h>
79 static void set_cpu_hi3620(int cpu, bool enable) set_cpu_hi3620() argument
85 if ((cpu == 2) || (cpu == 3)) set_cpu_hi3620()
86 writel_relaxed(CPU2_ISO_CTRL << (cpu - 2), set_cpu_hi3620()
91 writel_relaxed(0x01 << cpu, ctrl_base + SCCPUCOREEN); set_cpu_hi3620()
96 writel_relaxed(val << cpu, ctrl_base + SCCPURSTDIS); set_cpu_hi3620()
99 writel_relaxed(val << cpu, ctrl_base + SCCPURSTEN); set_cpu_hi3620()
102 if ((cpu == 2) || (cpu == 3)) set_cpu_hi3620()
103 writel_relaxed(CPU2_ISO_CTRL << (cpu - 2), set_cpu_hi3620()
109 val &= ~(CPU0_WFI_MASK_CFG << cpu); set_cpu_hi3620()
115 writel_relaxed(val << cpu, ctrl_base + SCCPURSTDIS); set_cpu_hi3620()
119 val |= (CPU0_WFI_MASK_CFG << cpu); set_cpu_hi3620()
123 writel_relaxed(0x01 << cpu, ctrl_base + SCCPUCOREDIS); set_cpu_hi3620()
125 if ((cpu == 2) || (cpu == 3)) { set_cpu_hi3620()
127 writel_relaxed(CPU2_ISO_CTRL << (cpu - 2), set_cpu_hi3620()
135 writel_relaxed(val << cpu, ctrl_base + SCCPURSTEN); set_cpu_hi3620()
137 if ((cpu == 2) || (cpu == 3)) { set_cpu_hi3620()
139 writel_relaxed(CPU2_ISO_CTRL << (cpu - 2), set_cpu_hi3620()
160 void hi3xxx_set_cpu(int cpu, bool enable) hi3xxx_set_cpu() argument
168 set_cpu_hi3620(cpu, enable); hi3xxx_set_cpu()
183 void hix5hd2_set_cpu(int cpu, bool enable) hix5hd2_set_cpu() argument
215 void hip01_set_cpu(int cpu, bool enable) hip01_set_cpu() argument
265 void hi3xxx_cpu_die(unsigned int cpu) hi3xxx_cpu_die() argument
268 hi3xxx_set_cpu_jump(cpu, phys_to_virt(0)); hi3xxx_cpu_die()
272 panic("cpu %d unexpectedly exit from shutdown\n", cpu); hi3xxx_cpu_die()
275 int hi3xxx_cpu_kill(unsigned int cpu) hi3xxx_cpu_kill() argument
279 while (hi3xxx_get_cpu_jump(cpu)) hi3xxx_cpu_kill()
282 hi3xxx_set_cpu(cpu, false); hi3xxx_cpu_kill()
286 void hix5hd2_cpu_die(unsigned int cpu) hix5hd2_cpu_die() argument
289 hix5hd2_set_cpu(cpu, false); hix5hd2_cpu_die()
H A Dplatsmp.c26 void hi3xxx_set_cpu_jump(int cpu, void *jump_addr) hi3xxx_set_cpu_jump() argument
28 cpu = cpu_logical_map(cpu); hi3xxx_set_cpu_jump()
29 if (!cpu || !ctrl_base) hi3xxx_set_cpu_jump()
31 writel_relaxed(virt_to_phys(jump_addr), ctrl_base + ((cpu - 1) << 2)); hi3xxx_set_cpu_jump()
34 int hi3xxx_get_cpu_jump(int cpu) hi3xxx_get_cpu_jump() argument
36 cpu = cpu_logical_map(cpu); hi3xxx_get_cpu_jump()
37 if (!cpu || !ctrl_base) hi3xxx_get_cpu_jump()
39 return readl_relaxed(ctrl_base + ((cpu - 1) << 2)); hi3xxx_get_cpu_jump()
84 static int hi3xxx_boot_secondary(unsigned int cpu, struct task_struct *idle) hi3xxx_boot_secondary() argument
86 hi3xxx_set_cpu(cpu, true); hi3xxx_boot_secondary()
87 hi3xxx_set_cpu_jump(cpu, secondary_startup); hi3xxx_boot_secondary()
88 arch_send_wakeup_ipi_mask(cpumask_of(cpu)); hi3xxx_boot_secondary()
117 static int hix5hd2_boot_secondary(unsigned int cpu, struct task_struct *idle) hix5hd2_boot_secondary() argument
123 hix5hd2_set_cpu(cpu, true); hix5hd2_boot_secondary()
124 arch_send_wakeup_ipi_mask(cpumask_of(cpu)); hix5hd2_boot_secondary()
152 static int hip01_boot_secondary(unsigned int cpu, struct task_struct *idle) hip01_boot_secondary() argument
174 hip01_set_cpu(cpu, true); hip01_boot_secondary()
H A Dplatmcpm.c97 static int hip04_mcpm_power_up(unsigned int cpu, unsigned int cluster) hip04_mcpm_power_up() argument
104 if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER) hip04_mcpm_power_up()
109 if (hip04_cpu_table[cluster][cpu]) hip04_mcpm_power_up()
123 data = CORE_RESET_BIT(cpu) | NEON_RESET_BIT(cpu) | \ hip04_mcpm_power_up()
124 CORE_DEBUG_RESET_BIT(cpu); hip04_mcpm_power_up()
135 hip04_cpu_table[cluster][cpu]++; hip04_mcpm_power_up()
143 unsigned int mpidr, cpu, cluster; hip04_mcpm_power_down() local
147 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); hip04_mcpm_power_down()
150 __mcpm_cpu_going_down(cpu, cluster); hip04_mcpm_power_down()
154 hip04_cpu_table[cluster][cpu]--; hip04_mcpm_power_down()
155 if (hip04_cpu_table[cluster][cpu] == 1) { hip04_mcpm_power_down()
158 } else if (hip04_cpu_table[cluster][cpu] > 1) { hip04_mcpm_power_down()
159 pr_err("Cluster %d CPU%d boots multiple times\n", cluster, cpu); hip04_mcpm_power_down()
164 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) { hip04_mcpm_power_down()
180 __mcpm_cpu_down(cpu, cluster); hip04_mcpm_power_down()
186 static int hip04_mcpm_wait_for_powerdown(unsigned int cpu, unsigned int cluster) hip04_mcpm_wait_for_powerdown() argument
192 cpu >= HIP04_MAX_CPUS_PER_CLUSTER); hip04_mcpm_wait_for_powerdown()
197 if (hip04_cpu_table[cluster][cpu]) { hip04_mcpm_wait_for_powerdown()
203 if (data & CORE_WFI_STATUS(cpu)) hip04_mcpm_wait_for_powerdown()
212 data = CORE_RESET_BIT(cpu) | NEON_RESET_BIT(cpu) | \ hip04_mcpm_wait_for_powerdown()
213 CORE_DEBUG_RESET_BIT(cpu); hip04_mcpm_wait_for_powerdown()
218 if (data & CORE_RESET_STATUS(cpu)) hip04_mcpm_wait_for_powerdown()
232 unsigned int mpidr, cpu, cluster; hip04_mcpm_powered_up() local
235 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); hip04_mcpm_powered_up()
239 if (!hip04_cpu_table[cluster][cpu]) hip04_mcpm_powered_up()
240 hip04_cpu_table[cluster][cpu] = 1; hip04_mcpm_powered_up()
285 unsigned int mpidr, cpu, cluster; hip04_cpu_table_init() local
288 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); hip04_cpu_table_init()
292 cpu >= HIP04_MAX_CPUS_PER_CLUSTER) { hip04_cpu_table_init()
297 hip04_cpu_table[cluster][cpu] = 1; hip04_cpu_table_init()
/linux-4.1.27/arch/sparc/include/asm/
H A Dtopology_64.h8 static inline int cpu_to_node(int cpu) cpu_to_node() argument
10 return numa_cpu_lookup_table[cpu]; cpu_to_node()
41 #define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id)
42 #define topology_core_id(cpu) (cpu_data(cpu).core_id)
43 #define topology_core_cpumask(cpu) (&cpu_core_sib_map[cpu])
44 #define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
49 static inline const struct cpumask *cpu_coregroup_mask(int cpu) cpu_coregroup_mask() argument
51 return &cpu_core_map[cpu]; cpu_coregroup_mask()
/linux-4.1.27/tools/testing/selftests/cpu-hotplug/
H A DMakefile3 TEST_PROGS := cpu-on-off-test.sh
8 @/bin/bash ./cpu-on-off-test.sh -a || echo "cpu-hotplug selftests: [FAIL]"
H A Dcpu-on-off-test.sh23 if ! ls $SYSFS/devices/system/cpu/cpu* > /dev/null 2>&1; then
24 echo $msg cpu hotplug is not supported >&2
29 online_cpus=`cat $SYSFS/devices/system/cpu/online`
33 offline_cpus=`cat $SYSFS/devices/system/cpu/offline`
49 for cpu in $SYSFS/devices/system/cpu/cpu*; do
50 if [ -f $cpu/online ] && grep -q $state $cpu/online; then
51 echo ${cpu##/*/cpu}
68 grep -q 1 $SYSFS/devices/system/cpu/cpu$1/online
73 grep -q 0 $SYSFS/devices/system/cpu/cpu$1/online
78 echo 1 > $SYSFS/devices/system/cpu/cpu$1/online
83 echo 0 > $SYSFS/devices/system/cpu/cpu$1/online
88 local cpu=$1
90 if ! online_cpu $cpu; then
91 echo $FUNCNAME $cpu: unexpected fail >&2
92 elif ! cpu_is_online $cpu; then
93 echo $FUNCNAME $cpu: unexpected offline >&2
99 local cpu=$1
101 if online_cpu $cpu 2> /dev/null; then
102 echo $FUNCNAME $cpu: unexpected success >&2
103 elif ! cpu_is_offline $cpu; then
104 echo $FUNCNAME $cpu: unexpected online >&2
110 local cpu=$1
112 if ! offline_cpu $cpu; then
113 echo $FUNCNAME $cpu: unexpected fail >&2
114 elif ! cpu_is_offline $cpu; then
115 echo $FUNCNAME $cpu: unexpected offline >&2
121 local cpu=$1
123 if offline_cpu $cpu 2> /dev/null; then
124 echo $FUNCNAME $cpu: unexpected success >&2
125 elif ! cpu_is_online $cpu; then
126 echo $FUNCNAME $cpu: unexpected offline >&2
148 echo -e "\t default offline one cpu"
166 # Safe test (default) - offline and online one cpu
169 echo "Limited scope test: one hotplug cpu"
170 echo -e "\t (leaves cpu in the original state):"
171 echo -e "\t online to offline to online: cpu $online_max"
176 echo -e "\t offline to online to offline: cpu $offline_max"
191 for cpu in `hotplaggable_offline_cpus`; do
192 online_cpu_expect_success $cpu
198 for cpu in `hotpluggable_online_cpus`; do
199 offline_cpu_expect_success $cpu
205 for cpu in `hotplaggable_offline_cpus`; do
206 online_cpu_expect_success $cpu
210 # Test with cpu notifier error injection
214 NOTIFIER_ERR_INJECT_DIR=$DEBUGFS/notifier-error-inject/cpu
220 /sbin/modprobe -q -r cpu-notifier-error-inject
221 /sbin/modprobe -q cpu-notifier-error-inject priority=$priority
229 echo $msg cpu-notifier-error-inject module is not available >&2
240 for cpu in `hotpluggable_online_cpus`; do
241 offline_cpu_expect_success $cpu
248 for cpu in `hotplaggable_offline_cpus`; do
249 online_cpu_expect_fail $cpu
256 for cpu in `hotplaggable_offline_cpus`; do
257 online_cpu_expect_success $cpu
264 for cpu in `hotpluggable_online_cpus`; do
265 offline_cpu_expect_fail $cpu
269 /sbin/modprobe -q -r cpu-notifier-error-inject
/linux-4.1.27/arch/mips/include/asm/
H A Dtopology.h15 #define topology_physical_package_id(cpu) (cpu_data[cpu].package)
16 #define topology_core_id(cpu) (cpu_data[cpu].core)
17 #define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
18 #define topology_thread_cpumask(cpu) (&cpu_sibling_map[cpu])
H A Dr4k-timer.h15 extern void synchronise_count_master(int cpu);
16 extern void synchronise_count_slave(int cpu);
20 static inline void synchronise_count_master(int cpu) synchronise_count_master() argument
24 static inline void synchronise_count_slave(int cpu) synchronise_count_slave() argument
H A Dsmp.h28 #define raw_smp_processor_id() (current_thread_info()->cpu)
30 /* Map from cpu id to sequential logical cpu number. This will only
33 #define cpu_number_map(cpu) __cpu_number_map[cpu]
35 /* The reverse map from sequential logical cpu number to cpu id. */
37 #define cpu_logical_map(cpu) __cpu_logical_map[cpu]
45 /* Used by kexec crashdump to save all cpu's state */
61 static inline void smp_send_reschedule(int cpu) smp_send_reschedule() argument
65 mp_ops->send_ipi_single(cpu, SMP_RESCHEDULE_YOURSELF); smp_send_reschedule()
76 static inline void __cpu_die(unsigned int cpu) __cpu_die() argument
80 mp_ops->cpu_die(cpu); __cpu_die()
88 static inline void arch_send_call_function_single_ipi(int cpu) arch_send_call_function_single_ipi() argument
92 mp_ops->send_ipi_mask(cpumask_of(cpu), SMP_CALL_FUNCTION); arch_send_call_function_single_ipi()
H A Dmmu_context.h53 * For the fast tlb miss handlers, we keep a per cpu array of pointers
85 #define cpu_context(cpu, mm) ((mm)->context.asid[cpu])
86 #define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK)
87 #define asid_cache(cpu) (cpu_data[cpu].asid_cache)
102 get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) get_new_mmu_context() argument
105 unsigned long asid = asid_cache(cpu); get_new_mmu_context()
119 cpu_context(cpu, mm) = asid_cache(cpu) = asid; get_new_mmu_context()
142 unsigned int cpu = smp_processor_id(); switch_mm() local
148 if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) switch_mm()
149 get_new_mmu_context(next, cpu); switch_mm()
150 write_c0_entryhi(cpu_asid(cpu, next)); switch_mm()
157 cpumask_clear_cpu(cpu, mm_cpumask(prev)); switch_mm()
158 cpumask_set_cpu(cpu, mm_cpumask(next)); switch_mm()
182 unsigned int cpu = smp_processor_id(); activate_mm() local
188 get_new_mmu_context(next, cpu); activate_mm()
190 write_c0_entryhi(cpu_asid(cpu, next)); activate_mm()
194 cpumask_clear_cpu(cpu, mm_cpumask(prev)); activate_mm()
195 cpumask_set_cpu(cpu, mm_cpumask(next)); activate_mm()
206 drop_mmu_context(struct mm_struct *mm, unsigned cpu) drop_mmu_context() argument
213 if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { drop_mmu_context()
214 get_new_mmu_context(mm, cpu); drop_mmu_context()
215 write_c0_entryhi(cpu_asid(cpu, mm)); drop_mmu_context()
218 cpu_context(cpu, mm) = 0; drop_mmu_context()
H A Dbugs.h16 #include <asm/cpu.h>
17 #include <asm/cpu-info.h>
35 unsigned int cpu = smp_processor_id(); check_bugs() local
37 cpu_data[cpu].udelay_val = loops_per_jiffy; check_bugs()
H A Dcmp.h11 extern void cmp_boot_secondary(int cpu, struct task_struct *t);
16 extern void cmp_send_ipi(int cpu, unsigned int action);
/linux-4.1.27/arch/powerpc/platforms/pseries/
H A Doffline_states.h13 extern enum cpu_state_vals get_cpu_current_state(int cpu);
14 extern void set_cpu_current_state(int cpu, enum cpu_state_vals state);
15 extern void set_preferred_offline_state(int cpu, enum cpu_state_vals state);
16 extern void set_default_offline_state(int cpu);
18 static inline enum cpu_state_vals get_cpu_current_state(int cpu) get_cpu_current_state() argument
23 static inline void set_cpu_current_state(int cpu, enum cpu_state_vals state) set_cpu_current_state() argument
27 static inline void set_preferred_offline_state(int cpu, enum cpu_state_vals state) set_preferred_offline_state() argument
31 static inline void set_default_offline_state(int cpu) set_default_offline_state() argument
36 extern enum cpu_state_vals get_preferred_offline_state(int cpu);
H A Dhotplug-cpu.c25 #include <linux/cpu.h>
64 enum cpu_state_vals get_cpu_current_state(int cpu) get_cpu_current_state() argument
66 return per_cpu(current_state, cpu); get_cpu_current_state()
69 void set_cpu_current_state(int cpu, enum cpu_state_vals state) set_cpu_current_state() argument
71 per_cpu(current_state, cpu) = state; set_cpu_current_state()
74 enum cpu_state_vals get_preferred_offline_state(int cpu) get_preferred_offline_state() argument
76 return per_cpu(preferred_offline_state, cpu); get_preferred_offline_state()
79 void set_preferred_offline_state(int cpu, enum cpu_state_vals state) set_preferred_offline_state() argument
81 per_cpu(preferred_offline_state, cpu) = state; set_preferred_offline_state()
84 void set_default_offline_state(int cpu) set_default_offline_state() argument
86 per_cpu(preferred_offline_state, cpu) = default_offline_state; set_default_offline_state()
103 printk("cpu %u (hwid %u) Ready to die...\n", rtas_stop_self()
112 unsigned int cpu = smp_processor_id(); pseries_mach_cpu_die() local
120 if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { pseries_mach_cpu_die()
121 set_cpu_current_state(cpu, CPU_STATE_INACTIVE); pseries_mach_cpu_die()
131 while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { pseries_mach_cpu_die()
146 if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) { pseries_mach_cpu_die()
160 WARN_ON(get_preferred_offline_state(cpu) != CPU_STATE_OFFLINE); pseries_mach_cpu_die()
162 set_cpu_current_state(cpu, CPU_STATE_OFFLINE); pseries_mach_cpu_die()
173 int cpu = smp_processor_id(); pseries_cpu_disable() local
175 set_cpu_online(cpu, false); pseries_cpu_disable()
179 if (cpu == boot_cpuid) pseries_cpu_disable()
188 * pseries_cpu_die: Wait for the cpu to die.
189 * @cpu: logical processor id of the CPU whose death we're awaiting.
192 * the cpu-offline. Here we wait for long enough to allow the cpu in question
193 * to self-destroy so that the cpu-offline thread can send the CPU_DEAD
196 * OTOH, pseries_mach_cpu_die() is called by the @cpu when it wants to
199 static void pseries_cpu_die(unsigned int cpu) pseries_cpu_die() argument
203 unsigned int pcpu = get_hard_smp_processor_id(cpu); pseries_cpu_die()
205 if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { pseries_cpu_die()
208 if (get_cpu_current_state(cpu) == CPU_STATE_INACTIVE) { pseries_cpu_die()
214 } else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) { pseries_cpu_die()
226 printk("Querying DEAD? cpu %i (%i) shows %i\n", pseries_cpu_die()
227 cpu, pcpu, cpu_status); pseries_cpu_die()
235 paca[cpu].cpu_start = 0; pseries_cpu_die()
239 * Update cpu_present_mask and paca(s) for a new cpu node. The wrinkle
240 * here is that a cpu device node may represent up to two logical cpus
247 unsigned int cpu; pseries_add_processor() local
273 printk(KERN_ERR "Cannot add cpu %s; this system configuration" pseries_add_processor()
281 /* Found a range where we can insert the new cpu(s) */ pseries_add_processor()
293 for_each_cpu(cpu, tmp) { for_each_cpu()
294 BUG_ON(cpu_present(cpu)); for_each_cpu()
295 set_cpu_present(cpu, true); for_each_cpu()
296 set_hard_smp_processor_id(cpu, be32_to_cpu(*intserv++)); for_each_cpu()
307 * Update the present map for a cpu node which is going away, and set
313 unsigned int cpu; pseries_remove_processor() local
327 for_each_present_cpu(cpu) { for_each_present_cpu()
328 if (get_hard_smp_processor_id(cpu) != thread) for_each_present_cpu()
330 BUG_ON(cpu_online(cpu)); for_each_present_cpu()
331 set_cpu_present(cpu, false); for_each_present_cpu()
332 set_hard_smp_processor_id(cpu, -1); for_each_present_cpu()
335 if (cpu >= nr_cpu_ids)
336 printk(KERN_WARNING "Could not find cpu to remove "
385 int cpu; pseries_cpu_hotplug_init() local
400 qcss_tok = rtas_token("query-cpu-stopped-state"); pseries_cpu_hotplug_init()
419 for_each_online_cpu(cpu) pseries_cpu_hotplug_init()
420 set_default_offline_state(cpu); pseries_cpu_hotplug_init()
/linux-4.1.27/arch/arm64/include/asm/
H A Dtopology.h18 #define topology_physical_package_id(cpu) (cpu_topology[cpu].cluster_id)
19 #define topology_core_id(cpu) (cpu_topology[cpu].core_id)
20 #define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
21 #define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
25 const struct cpumask *cpu_coregroup_mask(int cpu);
H A Dcpu_ops.h27 * @name: Name of the property as appears in a devicetree cpu node's
30 * devicetree, for a given cpu node and proposed logical id.
31 * @cpu_prepare: Early one-time preparation step for a cpu. If there is a
34 * @cpu_boot: Boots a cpu into the kernel.
36 * synchronisation. Called from the cpu being booted.
37 * @cpu_disable: Prepares a cpu to die. May fail for some mechanism-specific
39 * from the cpu to be killed.
40 * @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the
41 * cpu being killed.
42 * @cpu_kill: Ensures a cpu has left the kernel. Called from another cpu.
44 * devicetree, for a given cpu node and proposed logical id.
45 * @cpu_suspend: Suspends a cpu and saves the required context. May fail owing
56 int (*cpu_disable)(unsigned int cpu);
57 void (*cpu_die)(unsigned int cpu);
58 int (*cpu_kill)(unsigned int cpu);
67 int __init cpu_read_ops(struct device_node *dn, int cpu);
H A Dcpuidle.h7 extern int arm_cpuidle_init(unsigned int cpu);
10 static inline int arm_cpuidle_init(unsigned int cpu) arm_cpuidle_init() argument
/linux-4.1.27/arch/arm/include/asm/
H A Dtopology.h18 #define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
19 #define topology_core_id(cpu) (cpu_topology[cpu].core_id)
20 #define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
21 #define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
25 const struct cpumask *cpu_coregroup_mask(int cpu);
H A Dhardirq.h19 #define __inc_irq_stat(cpu, member) __IRQ_STAT(cpu, member)++
20 #define __get_irq_stat(cpu, member) __IRQ_STAT(cpu, member)
23 u64 smp_irq_stat_cpu(unsigned int cpu);
25 #define smp_irq_stat_cpu(cpu) 0
H A Dsmp_plat.h11 #include <asm/cpu.h>
30 * smp_cpuid_part() - return part id for a given cpu
31 * @cpu: logical cpu id.
33 * Return: part id of logical cpu passed as argument.
35 static inline unsigned int smp_cpuid_part(int cpu) smp_cpuid_part() argument
37 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpu); smp_cpuid_part()
72 #define cpu_logical_map(cpu) __cpu_logical_map[cpu]
74 * Retrieve logical cpu index corresponding to a given MPIDR[23:0]
77 * Returns the cpu logical index or -EINVAL on look-up error
81 int cpu; get_logical_index() local
82 for (cpu = 0; cpu < nr_cpu_ids; cpu++) get_logical_index()
83 if (cpu_logical_map(cpu) == mpidr) get_logical_index()
84 return cpu; get_logical_index()
H A Dcpu.h2 * arch/arm/include/asm/cpu.h
14 #include <linux/cpu.h>
17 struct cpu cpu; member in struct:cpuinfo_arm
/linux-4.1.27/tools/power/cpupower/lib/
H A Dcpufreq.c16 int cpufreq_cpu_exists(unsigned int cpu) cpufreq_cpu_exists() argument
18 return sysfs_cpu_exists(cpu); cpufreq_cpu_exists()
21 unsigned long cpufreq_get_freq_kernel(unsigned int cpu) cpufreq_get_freq_kernel() argument
23 return sysfs_get_freq_kernel(cpu); cpufreq_get_freq_kernel()
26 unsigned long cpufreq_get_freq_hardware(unsigned int cpu) cpufreq_get_freq_hardware() argument
28 return sysfs_get_freq_hardware(cpu); cpufreq_get_freq_hardware()
31 unsigned long cpufreq_get_transition_latency(unsigned int cpu) cpufreq_get_transition_latency() argument
33 return sysfs_get_freq_transition_latency(cpu); cpufreq_get_transition_latency()
36 int cpufreq_get_hardware_limits(unsigned int cpu, cpufreq_get_hardware_limits() argument
42 return sysfs_get_freq_hardware_limits(cpu, min, max); cpufreq_get_hardware_limits()
45 char *cpufreq_get_driver(unsigned int cpu) cpufreq_get_driver() argument
47 return sysfs_get_freq_driver(cpu); cpufreq_get_driver()
57 struct cpufreq_policy *cpufreq_get_policy(unsigned int cpu) cpufreq_get_policy() argument
59 return sysfs_get_freq_policy(cpu); cpufreq_get_policy()
73 int cpu) cpufreq_get_available_governors()
75 return sysfs_get_freq_available_governors(cpu); cpufreq_get_available_governors()
97 *cpufreq_get_available_frequencies(unsigned int cpu) cpufreq_get_available_frequencies() argument
99 return sysfs_get_available_frequencies(cpu); cpufreq_get_available_frequencies()
118 struct cpufreq_affected_cpus *cpufreq_get_affected_cpus(unsigned int cpu) cpufreq_get_affected_cpus() argument
120 return sysfs_get_freq_affected_cpus(cpu); cpufreq_get_affected_cpus()
139 struct cpufreq_affected_cpus *cpufreq_get_related_cpus(unsigned int cpu) cpufreq_get_related_cpus() argument
141 return sysfs_get_freq_related_cpus(cpu); cpufreq_get_related_cpus()
150 int cpufreq_set_policy(unsigned int cpu, struct cpufreq_policy *policy) cpufreq_set_policy() argument
155 return sysfs_set_freq_policy(cpu, policy); cpufreq_set_policy()
159 int cpufreq_modify_policy_min(unsigned int cpu, unsigned long min_freq) cpufreq_modify_policy_min() argument
161 return sysfs_modify_freq_policy_min(cpu, min_freq); cpufreq_modify_policy_min()
165 int cpufreq_modify_policy_max(unsigned int cpu, unsigned long max_freq) cpufreq_modify_policy_max() argument
167 return sysfs_modify_freq_policy_max(cpu, max_freq); cpufreq_modify_policy_max()
171 int cpufreq_modify_policy_governor(unsigned int cpu, char *governor) cpufreq_modify_policy_governor() argument
176 return sysfs_modify_freq_policy_governor(cpu, governor); cpufreq_modify_policy_governor()
179 int cpufreq_set_frequency(unsigned int cpu, unsigned long target_frequency) cpufreq_set_frequency() argument
181 return sysfs_set_frequency(cpu, target_frequency); cpufreq_set_frequency()
184 struct cpufreq_stats *cpufreq_get_stats(unsigned int cpu, cpufreq_get_stats() argument
187 return sysfs_get_freq_stats(cpu, total_time); cpufreq_get_stats()
205 unsigned long cpufreq_get_transitions(unsigned int cpu) cpufreq_get_transitions() argument
207 return sysfs_get_freq_transitions(cpu); cpufreq_get_transitions()
72 cpufreq_get_available_governors(unsigned int cpu) cpufreq_get_available_governors() argument
H A Dsysfs.c19 #define PATH_TO_CPU "/sys/devices/system/cpu/"
50 static unsigned int sysfs_cpufreq_read_file(unsigned int cpu, const char *fname, sysfs_cpufreq_read_file() argument
55 snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpufreq/%s", sysfs_cpufreq_read_file()
56 cpu, fname); sysfs_cpufreq_read_file()
62 static unsigned int sysfs_cpufreq_write_file(unsigned int cpu, sysfs_cpufreq_write_file() argument
70 snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpufreq/%s", sysfs_cpufreq_write_file()
71 cpu, fname); sysfs_cpufreq_write_file()
114 static unsigned long sysfs_cpufreq_get_one_value(unsigned int cpu, sysfs_cpufreq_get_one_value() argument
125 len = sysfs_cpufreq_read_file(cpu, cpufreq_value_files[which], sysfs_cpufreq_get_one_value()
153 static char *sysfs_cpufreq_get_one_string(unsigned int cpu, sysfs_cpufreq_get_one_string() argument
163 len = sysfs_cpufreq_read_file(cpu, cpufreq_string_files[which], sysfs_cpufreq_get_one_string()
195 static int sysfs_cpufreq_write_one_value(unsigned int cpu, sysfs_cpufreq_write_one_value() argument
202 if (sysfs_cpufreq_write_file(cpu, cpufreq_write_files[which], sysfs_cpufreq_write_one_value()
209 unsigned long sysfs_get_freq_kernel(unsigned int cpu) sysfs_get_freq_kernel() argument
211 return sysfs_cpufreq_get_one_value(cpu, SCALING_CUR_FREQ); sysfs_get_freq_kernel()
214 unsigned long sysfs_get_freq_hardware(unsigned int cpu) sysfs_get_freq_hardware() argument
216 return sysfs_cpufreq_get_one_value(cpu, CPUINFO_CUR_FREQ); sysfs_get_freq_hardware()
219 unsigned long sysfs_get_freq_transition_latency(unsigned int cpu) sysfs_get_freq_transition_latency() argument
221 return sysfs_cpufreq_get_one_value(cpu, CPUINFO_LATENCY); sysfs_get_freq_transition_latency()
224 int sysfs_get_freq_hardware_limits(unsigned int cpu, sysfs_get_freq_hardware_limits() argument
231 *min = sysfs_cpufreq_get_one_value(cpu, CPUINFO_MIN_FREQ); sysfs_get_freq_hardware_limits()
235 *max = sysfs_cpufreq_get_one_value(cpu, CPUINFO_MAX_FREQ); sysfs_get_freq_hardware_limits()
242 char *sysfs_get_freq_driver(unsigned int cpu) sysfs_get_freq_driver() argument
244 return sysfs_cpufreq_get_one_string(cpu, SCALING_DRIVER); sysfs_get_freq_driver()
247 struct cpufreq_policy *sysfs_get_freq_policy(unsigned int cpu) sysfs_get_freq_policy() argument
255 policy->governor = sysfs_cpufreq_get_one_string(cpu, SCALING_GOVERNOR); sysfs_get_freq_policy()
260 policy->min = sysfs_cpufreq_get_one_value(cpu, SCALING_MIN_FREQ); sysfs_get_freq_policy()
261 policy->max = sysfs_cpufreq_get_one_value(cpu, SCALING_MAX_FREQ); sysfs_get_freq_policy()
272 sysfs_get_freq_available_governors(unsigned int cpu) { sysfs_get_freq_available_governors() argument
279 len = sysfs_cpufreq_read_file(cpu, "scaling_available_governors", sysfs_get_freq_available_governors()
328 sysfs_get_available_frequencies(unsigned int cpu) { sysfs_get_available_frequencies() argument
336 len = sysfs_cpufreq_read_file(cpu, "scaling_available_frequencies", sysfs_get_available_frequencies()
382 static struct cpufreq_affected_cpus *sysfs_get_cpu_list(unsigned int cpu, sysfs_get_cpu_list() argument
392 len = sysfs_cpufreq_read_file(cpu, file, linebuf, sizeof(linebuf)); sysfs_get_cpu_list()
420 if (sscanf(one_value, "%u", &current->cpu) != 1) sysfs_get_cpu_list()
438 struct cpufreq_affected_cpus *sysfs_get_freq_affected_cpus(unsigned int cpu) sysfs_get_freq_affected_cpus() argument
440 return sysfs_get_cpu_list(cpu, "affected_cpus"); sysfs_get_freq_affected_cpus()
443 struct cpufreq_affected_cpus *sysfs_get_freq_related_cpus(unsigned int cpu) sysfs_get_freq_related_cpus() argument
445 return sysfs_get_cpu_list(cpu, "related_cpus"); sysfs_get_freq_related_cpus()
448 struct cpufreq_stats *sysfs_get_freq_stats(unsigned int cpu, sysfs_get_freq_stats() argument
457 len = sysfs_cpufreq_read_file(cpu, "stats/time_in_state", sysfs_get_freq_stats()
507 unsigned long sysfs_get_freq_transitions(unsigned int cpu) sysfs_get_freq_transitions() argument
509 return sysfs_cpufreq_get_one_value(cpu, STATS_NUM_TRANSITIONS); sysfs_get_freq_transitions()
547 int sysfs_modify_freq_policy_governor(unsigned int cpu, char *governor) sysfs_modify_freq_policy_governor() argument
557 return sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_GOVERNOR, sysfs_modify_freq_policy_governor()
561 int sysfs_modify_freq_policy_max(unsigned int cpu, unsigned long max_freq) sysfs_modify_freq_policy_max() argument
567 return sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_MAX_FREQ, sysfs_modify_freq_policy_max()
572 int sysfs_modify_freq_policy_min(unsigned int cpu, unsigned long min_freq) sysfs_modify_freq_policy_min() argument
578 return sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_MIN_FREQ, sysfs_modify_freq_policy_min()
583 int sysfs_set_freq_policy(unsigned int cpu, struct cpufreq_policy *policy) sysfs_set_freq_policy() argument
604 old_min = sysfs_cpufreq_get_one_value(cpu, SCALING_MIN_FREQ); sysfs_set_freq_policy()
608 ret = sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_MAX_FREQ, sysfs_set_freq_policy()
614 ret = sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_MIN_FREQ, min, sysfs_set_freq_policy()
620 ret = sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_MAX_FREQ, sysfs_set_freq_policy()
626 return sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_GOVERNOR, sysfs_set_freq_policy()
630 int sysfs_set_frequency(unsigned int cpu, unsigned long target_frequency) sysfs_set_frequency() argument
632 struct cpufreq_policy *pol = sysfs_get_freq_policy(cpu); sysfs_set_frequency()
641 ret = sysfs_modify_freq_policy_governor(cpu, userspace_gov); sysfs_set_frequency()
652 return sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_SET_SPEED, sysfs_set_frequency()
659 int sysfs_cpu_exists(unsigned int cpu) sysfs_cpu_exists() argument
664 snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpu%u/", cpu); sysfs_cpu_exists()
/linux-4.1.27/arch/metag/include/asm/
H A Dtopology.h6 #define cpu_to_node(cpu) ((void)(cpu), 0)
20 const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
24 #define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
H A Dcpu.h7 struct cpu cpu; member in struct:cpuinfo_metag
H A Dirq.h5 extern void irq_ctx_init(int cpu);
6 extern void irq_ctx_exit(int cpu);
9 # define irq_ctx_init(cpu) do { } while (0)
10 # define irq_ctx_exit(cpu) do { } while (0)
H A Dsmp.h6 #define raw_smp_processor_id() (current_thread_info()->cpu)
13 extern void arch_send_call_function_single_ipi(int cpu);
21 extern void __cpu_die(unsigned int cpu);
H A Dmmu.h45 * For cpu "cpu" calculate and return the address of the
49 static inline unsigned long mmu_phys0_addr(unsigned int cpu) mmu_phys0_addr() argument
54 (MMCU_TnX_TABLE_PHYSX_STRIDE * cpu)) + mmu_phys0_addr()
61 * For cpu "cpu" calculate and return the address of the
65 static inline unsigned long mmu_phys1_addr(unsigned int cpu) mmu_phys1_addr() argument
70 (MMCU_TnX_TABLE_PHYSX_STRIDE * cpu)) + mmu_phys1_addr()
/linux-4.1.27/arch/sh/include/asm/
H A Dtopology.h6 #define cpu_to_node(cpu) ((void)(cpu),0)
20 const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
24 #define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
H A Dsmp.h15 #define raw_smp_processor_id() (current_thread_info()->cpu)
17 /* Map from cpu id to sequential logical cpu number. */
19 #define cpu_number_map(cpu) __cpu_number_map[cpu]
21 /* The reverse map from sequential logical cpu number to cpu id. */
23 #define cpu_logical_map(cpu) __cpu_logical_map[cpu]
40 void local_timer_setup(unsigned int cpu);
41 void local_timer_stop(unsigned int cpu);
43 void arch_send_call_function_single_ipi(int cpu);
47 void native_cpu_die(unsigned int cpu);
48 int native_cpu_disable(unsigned int cpu);
54 static inline void __cpu_die(unsigned int cpu) __cpu_die() argument
58 mp_ops->cpu_die(cpu); __cpu_die()
H A Dmmu_context.h11 #include <cpu/mmu_context.h>
35 #define asid_cache(cpu) (cpu_data[cpu].asid_cache)
38 #define cpu_context(cpu, mm) ((mm)->context.id[cpu])
40 #define cpu_asid(cpu, mm) \
41 (cpu_context((cpu), (mm)) & MMU_CONTEXT_ASID_MASK)
57 static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu) get_mmu_context() argument
59 unsigned long asid = asid_cache(cpu); get_mmu_context()
62 if (((cpu_context(cpu, mm) ^ asid) & MMU_CONTEXT_VERSION_MASK) == 0) get_mmu_context()
90 cpu_context(cpu, mm) = asid_cache(cpu) = asid; get_mmu_context()
112 static inline void activate_context(struct mm_struct *mm, unsigned int cpu) activate_context() argument
114 get_mmu_context(mm, cpu); activate_context()
115 set_asid(cpu_asid(cpu, mm)); activate_context()
122 unsigned int cpu = smp_processor_id(); switch_mm() local
125 cpumask_set_cpu(cpu, mm_cpumask(next)); switch_mm()
127 activate_context(next, cpu); switch_mm()
129 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) switch_mm()
130 activate_context(next, cpu); switch_mm()
141 #define cpu_asid(cpu, mm) ({ (void)cpu; NO_CONTEXT; })
158 unsigned int cpu = smp_processor_id(); enable_mmu() local
164 if (asid_cache(cpu) == NO_CONTEXT) enable_mmu()
165 asid_cache(cpu) = MMU_CONTEXT_FIRST_VERSION; enable_mmu()
167 set_asid(asid_cache(cpu) & MMU_CONTEXT_ASID_MASK); enable_mmu()
H A Dadc.h8 #include <cpu/adc.h>
H A Dsmp-ops.h8 void (*start_cpu)(unsigned int cpu, unsigned long entry_point);
9 void (*send_ipi)(unsigned int cpu, unsigned int message);
10 int (*cpu_disable)(unsigned int cpu);
11 void (*cpu_die)(unsigned int cpu);
H A Dclock.h10 /* arch/sh/kernel/cpu/clock-cpg.c */
13 /* arch/sh/kernel/cpu/clock.c */
/linux-4.1.27/arch/mips/mti-malta/
H A Dmalta-amon.c19 int amon_cpu_avail(int cpu) amon_cpu_avail() argument
23 if (cpu < 0 || cpu >= NCPULAUNCH) { amon_cpu_avail()
24 pr_debug("avail: cpu%d is out of range\n", cpu); amon_cpu_avail()
28 launch += cpu; amon_cpu_avail()
30 pr_debug("avail: cpu%d is not ready\n", cpu); amon_cpu_avail()
34 pr_debug("avail: too late.. cpu%d is already gone\n", cpu); amon_cpu_avail()
41 int amon_cpu_start(int cpu, amon_cpu_start() argument
48 if (!amon_cpu_avail(cpu)) amon_cpu_start()
50 if (cpu == smp_processor_id()) { amon_cpu_start()
51 pr_debug("launch: I am cpu%d!\n", cpu); amon_cpu_start()
54 launch += cpu; amon_cpu_start()
56 pr_debug("launch: starting cpu%d\n", cpu); amon_cpu_start()
70 pr_debug("launch: cpu%d gone!\n", cpu); amon_cpu_start()
/linux-4.1.27/kernel/
H A Dsmpboot.h7 struct task_struct *idle_thread_get(unsigned int cpu);
11 static inline struct task_struct *idle_thread_get(unsigned int cpu) { return NULL; } idle_thread_set_boot_cpu() argument
16 int smpboot_create_threads(unsigned int cpu);
17 void smpboot_park_threads(unsigned int cpu);
18 void smpboot_unpark_threads(unsigned int cpu);
H A Dsmpboot.c4 #include <linux/cpu.h>
28 struct task_struct *idle_thread_get(unsigned int cpu) idle_thread_get() argument
30 struct task_struct *tsk = per_cpu(idle_threads, cpu); idle_thread_get()
34 init_idle(tsk, cpu); idle_thread_get()
44 * idle_init - Initialize the idle thread for a cpu
45 * @cpu: The cpu for which the idle thread should be initialized
49 static inline void idle_init(unsigned int cpu) idle_init() argument
51 struct task_struct *tsk = per_cpu(idle_threads, cpu); idle_init()
54 tsk = fork_idle(cpu); idle_init()
56 pr_err("SMP: fork_idle() failed for CPU %u\n", cpu); idle_init()
58 per_cpu(idle_threads, cpu) = tsk; idle_init()
67 unsigned int cpu, boot_cpu; idle_threads_init() local
71 for_each_possible_cpu(cpu) { for_each_possible_cpu()
72 if (cpu != boot_cpu) for_each_possible_cpu()
73 idle_init(cpu); for_each_possible_cpu()
84 unsigned int cpu; member in struct:smpboot_thread_data
117 ht->cleanup(td->cpu, cpu_online(td->cpu)); smpboot_thread_fn()
126 BUG_ON(td->cpu != smp_processor_id()); smpboot_thread_fn()
127 ht->park(td->cpu); smpboot_thread_fn()
135 BUG_ON(td->cpu != smp_processor_id()); smpboot_thread_fn()
143 ht->setup(td->cpu); smpboot_thread_fn()
151 ht->unpark(td->cpu); smpboot_thread_fn()
156 if (!ht->thread_should_run(td->cpu)) { smpboot_thread_fn()
162 ht->thread_fn(td->cpu); smpboot_thread_fn()
168 __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu) __smpboot_create_thread() argument
170 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); __smpboot_create_thread()
176 td = kzalloc_node(sizeof(*td), GFP_KERNEL, cpu_to_node(cpu)); __smpboot_create_thread()
179 td->cpu = cpu; __smpboot_create_thread()
182 tsk = kthread_create_on_cpu(smpboot_thread_fn, td, cpu, __smpboot_create_thread()
189 *per_cpu_ptr(ht->store, cpu) = tsk; __smpboot_create_thread()
200 ht->create(cpu); __smpboot_create_thread()
205 int smpboot_create_threads(unsigned int cpu) smpboot_create_threads() argument
212 ret = __smpboot_create_thread(cur, cpu); smpboot_create_threads()
220 static void smpboot_unpark_thread(struct smp_hotplug_thread *ht, unsigned int cpu) smpboot_unpark_thread() argument
222 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); smpboot_unpark_thread()
225 ht->pre_unpark(cpu); smpboot_unpark_thread()
229 void smpboot_unpark_threads(unsigned int cpu) smpboot_unpark_threads() argument
235 smpboot_unpark_thread(cur, cpu); smpboot_unpark_threads()
239 static void smpboot_park_thread(struct smp_hotplug_thread *ht, unsigned int cpu) smpboot_park_thread() argument
241 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); smpboot_park_thread()
247 void smpboot_park_threads(unsigned int cpu) smpboot_park_threads() argument
253 smpboot_park_thread(cur, cpu); smpboot_park_threads()
259 unsigned int cpu; smpboot_destroy_threads() local
262 for_each_possible_cpu(cpu) { for_each_possible_cpu()
263 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); for_each_possible_cpu()
268 *per_cpu_ptr(ht->store, cpu) = NULL; for_each_possible_cpu()
281 unsigned int cpu; smpboot_register_percpu_thread() local
286 for_each_online_cpu(cpu) { for_each_online_cpu()
287 ret = __smpboot_create_thread(plug_thread, cpu); for_each_online_cpu()
292 smpboot_unpark_thread(plug_thread, cpu); for_each_online_cpu()
325 int cpu_report_state(int cpu) cpu_report_state() argument
327 return atomic_read(&per_cpu(cpu_hotplug_state, cpu)); cpu_report_state()
342 int cpu_check_up_prepare(int cpu) cpu_check_up_prepare() argument
345 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE); cpu_check_up_prepare()
349 switch (atomic_read(&per_cpu(cpu_hotplug_state, cpu))) { cpu_check_up_prepare()
354 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE); cpu_check_up_prepare()
398 void cpu_set_state_online(int cpu) cpu_set_state_online() argument
400 (void)atomic_xchg(&per_cpu(cpu_hotplug_state, cpu), CPU_ONLINE); cpu_set_state_online()
408 bool cpu_wait_death(unsigned int cpu, int seconds) cpu_wait_death() argument
418 if (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) == CPU_DEAD) cpu_wait_death()
423 while (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) != CPU_DEAD) { cpu_wait_death()
431 oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu)); cpu_wait_death()
435 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_POST_DEAD); cpu_wait_death()
438 if (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu), cpu_wait_death()
459 int cpu = smp_processor_id(); cpu_report_death() local
462 oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu)); cpu_report_death()
467 } while (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu), cpu_report_death()
H A Dcpu.c12 #include <linux/cpu.h>
68 * an ongoing cpu hotplug operation.
140 * Note that during a cpu-hotplug operation, the new readers, if any,
262 * @cpu: a CPU id
272 void clear_tasks_mm_cpumask(int cpu) clear_tasks_mm_cpumask() argument
277 * This function is called after the cpu is taken down and marked clear_tasks_mm_cpumask()
278 * offline, so its not like new tasks will ever get this cpu set in clear_tasks_mm_cpumask()
283 WARN_ON(cpu_online(cpu)); clear_tasks_mm_cpumask()
295 cpumask_clear_cpu(cpu, mm_cpumask(t->mm)); for_each_process()
312 * which was running on this cpu in the past, and do_each_thread()
313 * it's just been woken on another cpu. do_each_thread()
319 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n", do_each_thread()
350 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) _cpu_down() argument
353 void *hcpu = (void *)(long)cpu; _cpu_down()
363 if (!cpu_online(cpu)) _cpu_down()
373 __func__, cpu); _cpu_down()
392 smpboot_park_threads(cpu); _cpu_down()
398 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); _cpu_down()
401 smpboot_unpark_threads(cpu); _cpu_down()
405 BUG_ON(cpu_online(cpu)); _cpu_down()
409 * runnable tasks from the cpu, there's only the idle task left now _cpu_down()
414 while (!per_cpu(cpu_dead_idle, cpu)) _cpu_down()
417 per_cpu(cpu_dead_idle, cpu) = false; _cpu_down()
419 hotplug_cpu__broadcast_tick_pull(cpu); _cpu_down()
421 __cpu_die(cpu); _cpu_down()
424 tick_cleanup_dead_cpu(cpu); _cpu_down()
427 check_for_tasks(cpu); _cpu_down()
436 int __ref cpu_down(unsigned int cpu) cpu_down() argument
447 err = _cpu_down(cpu, 0); cpu_down()
462 int cpu = (long)hcpu; smpboot_thread_call() local
467 smpboot_unpark_threads(cpu); smpboot_thread_call()
488 static int _cpu_up(unsigned int cpu, int tasks_frozen) _cpu_up() argument
491 void *hcpu = (void *)(long)cpu; _cpu_up()
497 if (cpu_online(cpu) || !cpu_present(cpu)) { _cpu_up()
502 idle = idle_thread_get(cpu); _cpu_up()
508 ret = smpboot_create_threads(cpu); _cpu_up()
516 __func__, cpu); _cpu_up()
521 ret = __cpu_up(cpu, idle); _cpu_up()
524 BUG_ON(!cpu_online(cpu)); _cpu_up()
538 int cpu_up(unsigned int cpu) cpu_up() argument
542 if (!cpu_possible(cpu)) { cpu_up()
543 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n", cpu_up()
544 cpu); cpu_up()
551 err = try_online_node(cpu_to_node(cpu)); cpu_up()
562 err = _cpu_up(cpu, 0); cpu_up()
575 int cpu, first_cpu, error = 0; disable_nonboot_cpus() local
586 for_each_online_cpu(cpu) { for_each_online_cpu()
587 if (cpu == first_cpu) for_each_online_cpu()
589 trace_suspend_resume(TPS("CPU_OFF"), cpu, true); for_each_online_cpu()
590 error = _cpu_down(cpu, 1); for_each_online_cpu()
591 trace_suspend_resume(TPS("CPU_OFF"), cpu, false); for_each_online_cpu()
593 cpumask_set_cpu(cpu, frozen_cpus); for_each_online_cpu()
595 pr_err("Error taking CPU%d down: %d\n", cpu, error); for_each_online_cpu()
621 int cpu, error; enable_nonboot_cpus() local
633 for_each_cpu(cpu, frozen_cpus) { for_each_cpu()
634 trace_suspend_resume(TPS("CPU_ON"), cpu, true); for_each_cpu()
635 error = _cpu_up(cpu, 1); for_each_cpu()
636 trace_suspend_resume(TPS("CPU_ON"), cpu, false); for_each_cpu()
638 pr_info("CPU%d is up\n", cpu); for_each_cpu()
641 pr_warn("Error taking CPU%d up: %d\n", cpu, error); for_each_cpu()
699 * to disable cpu hotplug to avoid cpu hotplug race. cpu_hotplug_pm_sync_init()
709 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
710 * @cpu: cpu that just started
713 * It must be called by the arch code on the new cpu, before the new cpu
714 * enables interrupts and before the "boot" cpu returns from __cpu_up().
716 void notify_cpu_starting(unsigned int cpu) notify_cpu_starting() argument
721 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus)) notify_cpu_starting()
724 cpu_notify(val, (void *)(long)cpu); notify_cpu_starting()
778 void set_cpu_possible(unsigned int cpu, bool possible) set_cpu_possible() argument
781 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits)); set_cpu_possible()
783 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits)); set_cpu_possible()
786 void set_cpu_present(unsigned int cpu, bool present) set_cpu_present() argument
789 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits)); set_cpu_present()
791 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits)); set_cpu_present()
794 void set_cpu_online(unsigned int cpu, bool online) set_cpu_online() argument
797 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits)); set_cpu_online()
798 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits)); set_cpu_online()
800 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits)); set_cpu_online()
804 void set_cpu_active(unsigned int cpu, bool active) set_cpu_active() argument
807 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits)); set_cpu_active()
809 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits)); set_cpu_active()
/linux-4.1.27/arch/blackfin/kernel/cplb-mpu/
H A Dcplbinit.c22 void __init generate_cplb_tables_cpu(unsigned int cpu) generate_cplb_tables_cpu() argument
45 dcplb_tbl[cpu][i_d].addr = 0; generate_cplb_tables_cpu()
46 dcplb_tbl[cpu][i_d++].data = SDRAM_OOPS | PAGE_SIZE_1KB; generate_cplb_tables_cpu()
48 icplb_tbl[cpu][i_i].addr = 0; generate_cplb_tables_cpu()
49 icplb_tbl[cpu][i_i++].data = CPLB_VALID | i_cache | CPLB_USER_RD | PAGE_SIZE_1KB; generate_cplb_tables_cpu()
57 dcplb_tbl[cpu][i_d].addr = addr; generate_cplb_tables_cpu()
58 dcplb_tbl[cpu][i_d++].data = d_data; generate_cplb_tables_cpu()
59 icplb_tbl[cpu][i_i].addr = addr; generate_cplb_tables_cpu()
60 icplb_tbl[cpu][i_i++].data = i_data | (addr == 0 ? CPLB_USER_RD : 0); generate_cplb_tables_cpu()
66 dcplb_tbl[cpu][i_d].addr = addr; generate_cplb_tables_cpu()
67 dcplb_tbl[cpu][i_d++].data = d_data | CPLB_USER_RD; generate_cplb_tables_cpu()
68 icplb_tbl[cpu][i_i].addr = addr; generate_cplb_tables_cpu()
69 icplb_tbl[cpu][i_i++].data = i_data | CPLB_USER_RD; generate_cplb_tables_cpu()
74 dcplb_tbl[cpu][i_d].addr = get_l1_data_a_start_cpu(cpu); generate_cplb_tables_cpu()
75 dcplb_tbl[cpu][i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB; generate_cplb_tables_cpu()
78 icplb_tbl[cpu][i_i].addr = get_l1_code_start_cpu(cpu); generate_cplb_tables_cpu()
79 icplb_tbl[cpu][i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB; generate_cplb_tables_cpu()
84 dcplb_tbl[cpu][i_d].addr = L2_START; generate_cplb_tables_cpu()
85 dcplb_tbl[cpu][i_d++].data = L2_DMEMORY; generate_cplb_tables_cpu()
86 icplb_tbl[cpu][i_i].addr = L2_START; generate_cplb_tables_cpu()
87 icplb_tbl[cpu][i_i++].data = L2_IMEMORY; generate_cplb_tables_cpu()
95 dcplb_tbl[cpu][i_d++].data = 0; generate_cplb_tables_cpu()
97 icplb_tbl[cpu][i_i++].data = 0; generate_cplb_tables_cpu()
H A Dcplbmgr.c68 MGR_ATTR static int evict_one_icplb(unsigned int cpu) evict_one_icplb() argument
72 if ((icplb_tbl[cpu][i].data & CPLB_VALID) == 0) evict_one_icplb()
74 i = first_switched_icplb + icplb_rr_index[cpu]; evict_one_icplb()
77 icplb_rr_index[cpu] -= MAX_CPLBS - first_switched_icplb; evict_one_icplb()
79 icplb_rr_index[cpu]++; evict_one_icplb()
83 MGR_ATTR static int evict_one_dcplb(unsigned int cpu) evict_one_dcplb() argument
87 if ((dcplb_tbl[cpu][i].data & CPLB_VALID) == 0) evict_one_dcplb()
89 i = first_switched_dcplb + dcplb_rr_index[cpu]; evict_one_dcplb()
92 dcplb_rr_index[cpu] -= MAX_CPLBS - first_switched_dcplb; evict_one_dcplb()
94 dcplb_rr_index[cpu]++; evict_one_dcplb()
98 MGR_ATTR static noinline int dcplb_miss(unsigned int cpu) dcplb_miss() argument
106 nr_dcplb_miss[cpu]++; dcplb_miss()
124 mask = current_rwx_mask[cpu]; dcplb_miss()
146 mask = current_rwx_mask[cpu]; dcplb_miss()
160 idx = evict_one_dcplb(cpu); dcplb_miss()
163 dcplb_tbl[cpu][idx].addr = addr; dcplb_miss()
164 dcplb_tbl[cpu][idx].data = d_data; dcplb_miss()
174 MGR_ATTR static noinline int icplb_miss(unsigned int cpu) icplb_miss() argument
181 nr_icplb_miss[cpu]++; icplb_miss()
188 nr_icplb_supv_miss[cpu]++; icplb_miss()
196 if (icplb_tbl[cpu][idx].data & CPLB_VALID) { icplb_miss()
197 unsigned long this_addr = icplb_tbl[cpu][idx].addr; icplb_miss()
223 unsigned long *mask = current_rwx_mask[cpu]; icplb_miss()
254 unsigned long *mask = current_rwx_mask[cpu]; icplb_miss()
267 idx = evict_one_icplb(cpu); icplb_miss()
269 icplb_tbl[cpu][idx].addr = addr; icplb_miss()
270 icplb_tbl[cpu][idx].data = i_data; icplb_miss()
280 MGR_ATTR static noinline int dcplb_protection_fault(unsigned int cpu) dcplb_protection_fault() argument
284 nr_dcplb_prot[cpu]++; dcplb_protection_fault()
288 unsigned long data = dcplb_tbl[cpu][idx].data; dcplb_protection_fault()
292 dcplb_tbl[cpu][idx].data = data; dcplb_protection_fault()
303 unsigned int cpu = raw_smp_processor_id(); cplb_hdr() local
306 return dcplb_protection_fault(cpu); cplb_hdr()
308 return icplb_miss(cpu); cplb_hdr()
310 return dcplb_miss(cpu); cplb_hdr()
316 void flush_switched_cplbs(unsigned int cpu) flush_switched_cplbs() argument
321 nr_cplb_flush[cpu]++; flush_switched_cplbs()
326 icplb_tbl[cpu][i].data = 0; flush_switched_cplbs()
333 dcplb_tbl[cpu][i].data = 0; flush_switched_cplbs()
341 void set_mask_dcplbs(unsigned long *masks, unsigned int cpu) set_mask_dcplbs() argument
349 current_rwx_mask[cpu] = masks; set_mask_dcplbs()
354 current_rwx_mask[cpu] = masks; set_mask_dcplbs()
371 dcplb_tbl[cpu][i].addr = addr; set_mask_dcplbs()
372 dcplb_tbl[cpu][i].data = d_data; set_mask_dcplbs()
/linux-4.1.27/sound/soc/qcom/
H A DMakefile2 snd-soc-lpass-cpu-objs := lpass-cpu.o
5 obj-$(CONFIG_SND_SOC_LPASS_CPU) += snd-soc-lpass-cpu.o
/linux-4.1.27/arch/arm/mach-imx/
H A Dclk-cpu.c32 struct clk_cpu *cpu = to_clk_cpu(hw); clk_cpu_recalc_rate() local
34 return clk_get_rate(cpu->div); clk_cpu_recalc_rate()
40 struct clk_cpu *cpu = to_clk_cpu(hw); clk_cpu_round_rate() local
42 return clk_round_rate(cpu->pll, rate); clk_cpu_round_rate()
48 struct clk_cpu *cpu = to_clk_cpu(hw); clk_cpu_set_rate() local
52 ret = clk_set_parent(cpu->mux, cpu->step); clk_cpu_set_rate()
57 ret = clk_set_rate(cpu->pll, rate); clk_cpu_set_rate()
59 clk_set_parent(cpu->mux, cpu->pll); clk_cpu_set_rate()
63 clk_set_parent(cpu->mux, cpu->pll); clk_cpu_set_rate()
66 clk_set_rate(cpu->div, rate); clk_cpu_set_rate()
81 struct clk_cpu *cpu; imx_clk_cpu() local
85 cpu = kzalloc(sizeof(*cpu), GFP_KERNEL); imx_clk_cpu()
86 if (!cpu) imx_clk_cpu()
89 cpu->div = div; imx_clk_cpu()
90 cpu->mux = mux; imx_clk_cpu()
91 cpu->pll = pll; imx_clk_cpu()
92 cpu->step = step; imx_clk_cpu()
100 cpu->hw.init = &init; imx_clk_cpu()
102 clk = clk_register(NULL, &cpu->hw); imx_clk_cpu()
104 kfree(cpu); imx_clk_cpu()
H A Dsrc.c85 void imx_enable_cpu(int cpu, bool enable) imx_enable_cpu() argument
89 cpu = cpu_logical_map(cpu); imx_enable_cpu()
90 mask = 1 << (BP_SRC_SCR_CORE1_ENABLE + cpu - 1); imx_enable_cpu()
94 val |= 1 << (BP_SRC_SCR_CORE1_RST + cpu - 1); imx_enable_cpu()
99 void imx_set_cpu_jump(int cpu, void *jump_addr) imx_set_cpu_jump() argument
101 cpu = cpu_logical_map(cpu); imx_set_cpu_jump()
103 src_base + SRC_GPR1 + cpu * 8); imx_set_cpu_jump()
106 u32 imx_get_cpu_arg(int cpu) imx_get_cpu_arg() argument
108 cpu = cpu_logical_map(cpu); imx_get_cpu_arg()
109 return readl_relaxed(src_base + SRC_GPR1 + cpu * 8 + 4); imx_get_cpu_arg()
112 void imx_set_cpu_arg(int cpu, u32 arg) imx_set_cpu_arg() argument
114 cpu = cpu_logical_map(cpu); imx_set_cpu_arg()
115 writel_relaxed(arg, src_base + SRC_GPR1 + cpu * 8 + 4); imx_set_cpu_arg()
H A Dhotplug.c46 void imx_cpu_die(unsigned int cpu) imx_cpu_die() argument
50 * We use the cpu jumping argument register to sync with imx_cpu_die()
52 * the register being cleared to kill the cpu. imx_cpu_die()
54 imx_set_cpu_arg(cpu, ~0); imx_cpu_die()
60 int imx_cpu_kill(unsigned int cpu) imx_cpu_kill() argument
64 while (imx_get_cpu_arg(cpu) == 0) imx_cpu_kill()
67 imx_enable_cpu(cpu, false); imx_cpu_kill()
68 imx_set_cpu_arg(cpu, 0); imx_cpu_kill()
/linux-4.1.27/tools/perf/tests/
H A Dopen-syscall-all-cpus.c9 int err = -1, fd, cpu; test__open_syscall_event_on_all_cpus() local
48 for (cpu = 0; cpu < cpus->nr; ++cpu) { test__open_syscall_event_on_all_cpus()
49 unsigned int ncalls = nr_open_calls + cpu; test__open_syscall_event_on_all_cpus()
56 if (cpus->map[cpu] >= CPU_SETSIZE) { test__open_syscall_event_on_all_cpus()
57 pr_debug("Ignoring CPU %d\n", cpus->map[cpu]); test__open_syscall_event_on_all_cpus()
61 CPU_SET(cpus->map[cpu], &cpu_set); test__open_syscall_event_on_all_cpus()
64 cpus->map[cpu], test__open_syscall_event_on_all_cpus()
72 CPU_CLR(cpus->map[cpu], &cpu_set); test__open_syscall_event_on_all_cpus()
77 * we use the auto allocation it will allocate just for 1 cpu, test__open_syscall_event_on_all_cpus()
78 * as we start by cpu 0. test__open_syscall_event_on_all_cpus()
87 for (cpu = 0; cpu < cpus->nr; ++cpu) { test__open_syscall_event_on_all_cpus()
90 if (cpus->map[cpu] >= CPU_SETSIZE) test__open_syscall_event_on_all_cpus()
93 if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) { test__open_syscall_event_on_all_cpus()
99 expected = nr_open_calls + cpu; test__open_syscall_event_on_all_cpus()
100 if (evsel->counts->cpu[cpu].val != expected) { test__open_syscall_event_on_all_cpus()
101 pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n", test__open_syscall_event_on_all_cpus()
102 expected, cpus->map[cpu], evsel->counts->cpu[cpu].val); test__open_syscall_event_on_all_cpus()
/linux-4.1.27/arch/microblaze/kernel/cpu/
H A Dcpuinfo-static.c23 void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu) set_cpuinfo_static() argument
28 (fcpu(cpu, "xlnx,use-barrel") ? PVR0_USE_BARREL_MASK : 0) | set_cpuinfo_static()
29 (fcpu(cpu, "xlnx,use-msr-instr") ? PVR2_USE_MSR_INSTR : 0) | set_cpuinfo_static()
30 (fcpu(cpu, "xlnx,use-pcmp-instr") ? PVR2_USE_PCMP_INSTR : 0) | set_cpuinfo_static()
31 (fcpu(cpu, "xlnx,use-div") ? PVR0_USE_DIV_MASK : 0); set_cpuinfo_static()
43 ci->use_mult = fcpu(cpu, "xlnx,use-hw-mul"); set_cpuinfo_static()
51 ci->use_fpu = fcpu(cpu, "xlnx,use-fpu"); set_cpuinfo_static()
59 (fcpu(cpu, "xlnx,unaligned-exceptions") ? set_cpuinfo_static()
61 (fcpu(cpu, "xlnx,ill-opcode-exception") ? set_cpuinfo_static()
63 (fcpu(cpu, "xlnx,iopb-bus-exception") ? set_cpuinfo_static()
65 (fcpu(cpu, "xlnx,dopb-bus-exception") ? set_cpuinfo_static()
67 (fcpu(cpu, "xlnx,div-zero-exception") ? set_cpuinfo_static()
69 (fcpu(cpu, "xlnx,fpu-exception") ? PVR2_FPU_EXC_MASK : 0) | set_cpuinfo_static()
70 (fcpu(cpu, "xlnx,fsl-exception") ? PVR2_USE_EXTEND_FSL : 0); set_cpuinfo_static()
72 ci->use_icache = fcpu(cpu, "xlnx,use-icache"); set_cpuinfo_static()
73 ci->icache_tagbits = fcpu(cpu, "xlnx,addr-tag-bits"); set_cpuinfo_static()
74 ci->icache_write = fcpu(cpu, "xlnx,allow-icache-wr"); set_cpuinfo_static()
75 ci->icache_line_length = fcpu(cpu, "xlnx,icache-line-len") << 2; set_cpuinfo_static()
77 if (fcpu(cpu, "xlnx,icache-use-fsl")) set_cpuinfo_static()
82 ci->icache_size = fcpu(cpu, "i-cache-size"); set_cpuinfo_static()
83 ci->icache_base = fcpu(cpu, "i-cache-baseaddr"); set_cpuinfo_static()
84 ci->icache_high = fcpu(cpu, "i-cache-highaddr"); set_cpuinfo_static()
86 ci->use_dcache = fcpu(cpu, "xlnx,use-dcache"); set_cpuinfo_static()
87 ci->dcache_tagbits = fcpu(cpu, "xlnx,dcache-addr-tag"); set_cpuinfo_static()
88 ci->dcache_write = fcpu(cpu, "xlnx,allow-dcache-wr"); set_cpuinfo_static()
89 ci->dcache_line_length = fcpu(cpu, "xlnx,dcache-line-len") << 2; set_cpuinfo_static()
91 if (fcpu(cpu, "xlnx,dcache-use-fsl")) set_cpuinfo_static()
96 ci->dcache_size = fcpu(cpu, "d-cache-size"); set_cpuinfo_static()
97 ci->dcache_base = fcpu(cpu, "d-cache-baseaddr"); set_cpuinfo_static()
98 ci->dcache_high = fcpu(cpu, "d-cache-highaddr"); set_cpuinfo_static()
99 ci->dcache_wb = fcpu(cpu, "xlnx,dcache-use-writeback"); set_cpuinfo_static()
101 ci->use_dopb = fcpu(cpu, "xlnx,d-opb"); set_cpuinfo_static()
102 ci->use_iopb = fcpu(cpu, "xlnx,i-opb"); set_cpuinfo_static()
103 ci->use_dlmb = fcpu(cpu, "xlnx,d-lmb"); set_cpuinfo_static()
104 ci->use_ilmb = fcpu(cpu, "xlnx,i-lmb"); set_cpuinfo_static()
106 ci->num_fsl = fcpu(cpu, "xlnx,fsl-links"); set_cpuinfo_static()
107 ci->irq_edge = fcpu(cpu, "xlnx,interrupt-is-edge"); set_cpuinfo_static()
108 ci->irq_positive = fcpu(cpu, "xlnx,edge-is-positive"); set_cpuinfo_static()
111 ci->hw_debug = fcpu(cpu, "xlnx,debug-enabled"); set_cpuinfo_static()
112 ci->num_pc_brk = fcpu(cpu, "xlnx,number-of-pc-brk"); set_cpuinfo_static()
113 ci->num_rd_brk = fcpu(cpu, "xlnx,number-of-rd-addr-brk"); set_cpuinfo_static()
114 ci->num_wr_brk = fcpu(cpu, "xlnx,number-of-wr-addr-brk"); set_cpuinfo_static()
116 ci->pvr_user1 = fcpu(cpu, "xlnx,pvr-user1"); set_cpuinfo_static()
117 ci->pvr_user2 = fcpu(cpu, "xlnx,pvr-user2"); set_cpuinfo_static()
119 ci->mmu = fcpu(cpu, "xlnx,use-mmu"); set_cpuinfo_static()
120 ci->mmu_privins = fcpu(cpu, "xlnx,mmu-privileged-instr"); set_cpuinfo_static()
121 ci->endian = fcpu(cpu, "xlnx,endianness"); set_cpuinfo_static()
H A Dcpuinfo.c80 static struct device_node *cpu; variable in typeref:struct:device_node
84 cpu = (struct device_node *) of_find_node_by_type(NULL, "cpu"); setup_cpuinfo()
85 if (!cpu) setup_cpuinfo()
86 pr_err("You don't have cpu!!!\n"); setup_cpuinfo()
94 set_cpuinfo_static(&cpuinfo, cpu); setup_cpuinfo()
101 set_cpuinfo_static(&cpuinfo, cpu); setup_cpuinfo()
102 set_cpuinfo_pvr_full(&cpuinfo, cpu); setup_cpuinfo()
106 set_cpuinfo_static(&cpuinfo, cpu); setup_cpuinfo()
118 clk = of_clk_get(cpu, 0); setup_cpuinfo_clk()
122 cpuinfo.cpu_clock_freq = fcpu(cpu, "timebase-frequency"); setup_cpuinfo_clk()
/linux-4.1.27/tools/power/cpupower/utils/helpers/
H A Dtopology.c23 static int sysfs_topology_read_file(unsigned int cpu, const char *fname, int *result) sysfs_topology_read_file() argument
29 snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/topology/%s", sysfs_topology_read_file()
30 cpu, fname); sysfs_topology_read_file()
51 else if (top1->cpu < top2->cpu) __compare()
53 else if (top1->cpu > top2->cpu) __compare()
63 * Array is sorted after ->pkg, ->core, then ->cpu
67 int cpu, last_pkg, cpus = sysconf(_SC_NPROCESSORS_CONF); get_cpu_topology() local
73 for (cpu = 0; cpu < cpus; cpu++) { get_cpu_topology()
74 cpu_top->core_info[cpu].cpu = cpu; get_cpu_topology()
75 cpu_top->core_info[cpu].is_online = sysfs_is_cpu_online(cpu); get_cpu_topology()
77 cpu, get_cpu_topology()
79 &(cpu_top->core_info[cpu].pkg)) < 0) get_cpu_topology()
82 cpu, get_cpu_topology()
84 &(cpu_top->core_info[cpu].core)) < 0) get_cpu_topology()
95 for(cpu = 1; cpu < cpus; cpu++) { get_cpu_topology()
96 if(cpu_top->core_info[cpu].pkg != last_pkg) { get_cpu_topology()
97 last_pkg = cpu_top->core_info[cpu].pkg; get_cpu_topology()
106 for (cpu = 0; cpu_top->core_info[cpu].pkg = 0 && cpu < cpus; cpu++) { get_cpu_topology()
107 if (cpu_top->core_info[cpu].core == 0) get_cpu_topology()
H A Dsysfs.h4 #define PATH_TO_CPU "/sys/devices/system/cpu/"
10 extern unsigned int sysfs_idlestate_file_exists(unsigned int cpu,
14 extern int sysfs_is_cpu_online(unsigned int cpu);
16 extern int sysfs_is_idlestate_disabled(unsigned int cpu,
18 extern int sysfs_idlestate_disable(unsigned int cpu, unsigned int idlestate,
20 extern unsigned long sysfs_get_idlestate_latency(unsigned int cpu,
22 extern unsigned long sysfs_get_idlestate_usage(unsigned int cpu,
24 extern unsigned long long sysfs_get_idlestate_time(unsigned int cpu,
26 extern char *sysfs_get_idlestate_name(unsigned int cpu,
28 extern char *sysfs_get_idlestate_desc(unsigned int cpu,
30 extern unsigned int sysfs_get_idlestate_count(unsigned int cpu);
H A Dmsr.c26 int read_msr(int cpu, unsigned int idx, unsigned long long *val) read_msr() argument
31 sprintf(msr_file_name, "/dev/cpu/%d/msr", cpu); read_msr()
55 int write_msr(int cpu, unsigned int idx, unsigned long long val) write_msr() argument
60 sprintf(msr_file_name, "/dev/cpu/%d/msr", cpu); write_msr()
75 int msr_intel_get_perf_bias(unsigned int cpu) msr_intel_get_perf_bias() argument
83 ret = read_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &val); msr_intel_get_perf_bias()
89 int msr_intel_set_perf_bias(unsigned int cpu, unsigned int val) msr_intel_set_perf_bias() argument
96 ret = write_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, val); msr_intel_set_perf_bias()
102 unsigned long long msr_intel_get_turbo_ratio(unsigned int cpu) msr_intel_get_turbo_ratio() argument
110 ret = read_msr(cpu, MSR_NEHALEM_TURBO_RATIO_LIMIT, &val); msr_intel_get_turbo_ratio()
H A Dsysfs.c48 int sysfs_is_cpu_online(unsigned int cpu) sysfs_is_cpu_online() argument
58 snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u", cpu); sysfs_is_cpu_online()
67 snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/online", cpu); sysfs_is_cpu_online()
90 /* CPUidle idlestate specific /sys/devices/system/cpu/cpuX/cpuidle/ access */
93 /* CPUidle idlestate specific /sys/devices/system/cpu/cpuX/cpuidle/ access */
104 unsigned int sysfs_idlestate_file_exists(unsigned int cpu, sysfs_idlestate_file_exists() argument
112 snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpuidle/state%u/%s", sysfs_idlestate_file_exists()
113 cpu, idlestate, fname); sysfs_idlestate_file_exists()
125 unsigned int sysfs_idlestate_read_file(unsigned int cpu, unsigned int idlestate, sysfs_idlestate_read_file() argument
132 snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpuidle/state%u/%s", sysfs_idlestate_read_file()
133 cpu, idlestate, fname); sysfs_idlestate_read_file()
158 unsigned int sysfs_idlestate_write_file(unsigned int cpu, sysfs_idlestate_write_file() argument
167 snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpuidle/state%u/%s", sysfs_idlestate_write_file()
168 cpu, idlestate, fname); sysfs_idlestate_write_file()
204 static unsigned long long sysfs_idlestate_get_one_value(unsigned int cpu, sysfs_idlestate_get_one_value() argument
216 len = sysfs_idlestate_read_file(cpu, idlestate, sysfs_idlestate_get_one_value()
244 static char *sysfs_idlestate_get_one_string(unsigned int cpu, sysfs_idlestate_get_one_string() argument
255 len = sysfs_idlestate_read_file(cpu, idlestate, sysfs_idlestate_get_one_string()
278 int sysfs_is_idlestate_disabled(unsigned int cpu, sysfs_is_idlestate_disabled() argument
281 if (sysfs_get_idlestate_count(cpu) <= idlestate) sysfs_is_idlestate_disabled()
284 if (!sysfs_idlestate_file_exists(cpu, idlestate, sysfs_is_idlestate_disabled()
287 return sysfs_idlestate_get_one_value(cpu, idlestate, IDLESTATE_DISABLE); sysfs_is_idlestate_disabled()
299 int sysfs_idlestate_disable(unsigned int cpu, sysfs_idlestate_disable() argument
306 if (sysfs_get_idlestate_count(cpu) <= idlestate) sysfs_idlestate_disable()
309 if (!sysfs_idlestate_file_exists(cpu, idlestate, sysfs_idlestate_disable()
315 bytes_written = sysfs_idlestate_write_file(cpu, idlestate, "disable", sysfs_idlestate_disable()
322 unsigned long sysfs_get_idlestate_latency(unsigned int cpu, sysfs_get_idlestate_latency() argument
325 return sysfs_idlestate_get_one_value(cpu, idlestate, IDLESTATE_LATENCY); sysfs_get_idlestate_latency()
328 unsigned long sysfs_get_idlestate_usage(unsigned int cpu, sysfs_get_idlestate_usage() argument
331 return sysfs_idlestate_get_one_value(cpu, idlestate, IDLESTATE_USAGE); sysfs_get_idlestate_usage()
334 unsigned long long sysfs_get_idlestate_time(unsigned int cpu, sysfs_get_idlestate_time() argument
337 return sysfs_idlestate_get_one_value(cpu, idlestate, IDLESTATE_TIME); sysfs_get_idlestate_time()
340 char *sysfs_get_idlestate_name(unsigned int cpu, unsigned int idlestate) sysfs_get_idlestate_name() argument
342 return sysfs_idlestate_get_one_string(cpu, idlestate, IDLESTATE_NAME); sysfs_get_idlestate_name()
345 char *sysfs_get_idlestate_desc(unsigned int cpu, unsigned int idlestate) sysfs_get_idlestate_desc() argument
347 return sysfs_idlestate_get_one_string(cpu, idlestate, IDLESTATE_DESC); sysfs_get_idlestate_desc()
351 * Returns number of supported C-states of CPU core cpu
355 unsigned int sysfs_get_idlestate_count(unsigned int cpu) sysfs_get_idlestate_count() argument
366 snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpu%u/cpuidle/state0", cpu); sysfs_get_idlestate_count()
372 "cpu%u/cpuidle/state%d", cpu, idlestates); sysfs_get_idlestate_count()
379 /* CPUidle general /sys/devices/system/cpu/cpuidle/ sysfs access ********/
383 * fname is a relative path under "cpu/cpuidle/" dir
450 /* CPUidle idlestate specific /sys/devices/system/cpu/cpuX/cpuidle/ access */
/linux-4.1.27/drivers/oprofile/
H A Dnmi_timer_int.c36 static int nmi_timer_start_cpu(int cpu) nmi_timer_start_cpu() argument
38 struct perf_event *event = per_cpu(nmi_timer_events, cpu); nmi_timer_start_cpu()
41 event = perf_event_create_kernel_counter(&nmi_timer_attr, cpu, NULL, nmi_timer_start_cpu()
45 per_cpu(nmi_timer_events, cpu) = event; nmi_timer_start_cpu()
54 static void nmi_timer_stop_cpu(int cpu) nmi_timer_stop_cpu() argument
56 struct perf_event *event = per_cpu(nmi_timer_events, cpu); nmi_timer_stop_cpu()
65 int cpu = (unsigned long)data; nmi_timer_cpu_notifier() local
69 nmi_timer_start_cpu(cpu); nmi_timer_cpu_notifier()
72 nmi_timer_stop_cpu(cpu); nmi_timer_cpu_notifier()
84 int cpu; nmi_timer_start() local
88 for_each_online_cpu(cpu) nmi_timer_start()
89 nmi_timer_start_cpu(cpu); nmi_timer_start()
97 int cpu; nmi_timer_stop() local
100 for_each_online_cpu(cpu) nmi_timer_stop()
101 nmi_timer_stop_cpu(cpu); nmi_timer_stop()
109 int cpu; nmi_timer_shutdown() local
113 for_each_possible_cpu(cpu) { for_each_possible_cpu()
114 event = per_cpu(nmi_timer_events, cpu); for_each_possible_cpu()
118 per_cpu(nmi_timer_events, cpu) = NULL; for_each_possible_cpu()
127 int cpu, err; nmi_timer_setup() local
141 for_each_online_cpu(cpu) { for_each_online_cpu()
142 err = nmi_timer_start_cpu(cpu); for_each_online_cpu()
H A Dtimer_int.c16 #include <linux/cpu.h>
56 static void __oprofile_hrtimer_stop(int cpu) __oprofile_hrtimer_stop() argument
58 struct hrtimer *hrtimer = &per_cpu(oprofile_hrtimer, cpu); __oprofile_hrtimer_stop()
68 int cpu; oprofile_hrtimer_stop() local
71 for_each_online_cpu(cpu) oprofile_hrtimer_stop()
72 __oprofile_hrtimer_stop(cpu); oprofile_hrtimer_stop()
80 long cpu = (long) hcpu; oprofile_cpu_notify() local
85 smp_call_function_single(cpu, __oprofile_hrtimer_start, oprofile_cpu_notify()
90 __oprofile_hrtimer_stop(cpu); oprofile_cpu_notify()
/linux-4.1.27/arch/x86/vdso/
H A Dvgetcpu.c14 __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused) __vdso_getcpu() argument
20 if (cpu) __vdso_getcpu()
21 *cpu = p & VGETCPU_CPU_MASK; __vdso_getcpu()
27 long getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
/linux-4.1.27/arch/x86/boot/compressed/
H A Dcpuflags.c9 return test_bit(flag, cpu.flags); has_cpuflag()
/linux-4.1.27/include/uapi/linux/netfilter/
H A Dxt_cpu.h7 __u32 cpu; member in struct:xt_cpu_info
/linux-4.1.27/arch/arm64/kernel/
H A Dcpuidle.c18 int arm_cpuidle_init(unsigned int cpu) arm_cpuidle_init() argument
21 struct device_node *cpu_node = of_cpu_device_node_get(cpu); arm_cpuidle_init()
26 if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_init_idle) arm_cpuidle_init()
27 ret = cpu_ops[cpu]->cpu_init_idle(cpu_node, cpu); arm_cpuidle_init()
42 int cpu = smp_processor_id(); arm_cpuidle_suspend() local
48 if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend) arm_cpuidle_suspend()
50 return cpu_ops[cpu]->cpu_suspend(index); arm_cpuidle_suspend()
H A Dsmp.c30 #include <linux/cpu.h>
43 #include <asm/cpu.h>
77 static int boot_secondary(unsigned int cpu, struct task_struct *idle) boot_secondary() argument
79 if (cpu_ops[cpu]->cpu_boot) boot_secondary()
80 return cpu_ops[cpu]->cpu_boot(cpu); boot_secondary()
87 int __cpu_up(unsigned int cpu, struct task_struct *idle) __cpu_up() argument
101 ret = boot_secondary(cpu, idle); __cpu_up()
110 if (!cpu_online(cpu)) { __cpu_up()
111 pr_crit("CPU%u: failed to come online\n", cpu); __cpu_up()
115 pr_err("CPU%u: failed to boot: %d\n", cpu, ret); __cpu_up()
135 unsigned int cpu = smp_processor_id(); secondary_start_kernel() local
143 cpumask_set_cpu(cpu, mm_cpumask(mm)); secondary_start_kernel()
146 printk("CPU%u: Booted secondary processor\n", cpu); secondary_start_kernel()
159 if (cpu_ops[cpu]->cpu_postboot) secondary_start_kernel()
160 cpu_ops[cpu]->cpu_postboot(); secondary_start_kernel()
170 notify_cpu_starting(cpu); secondary_start_kernel()
172 smp_store_cpu_info(cpu); secondary_start_kernel()
179 set_cpu_online(cpu, true); secondary_start_kernel()
193 static int op_cpu_disable(unsigned int cpu) op_cpu_disable() argument
199 if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die) op_cpu_disable()
206 if (cpu_ops[cpu]->cpu_disable) op_cpu_disable()
207 return cpu_ops[cpu]->cpu_disable(cpu); op_cpu_disable()
217 unsigned int cpu = smp_processor_id(); __cpu_disable() local
220 ret = op_cpu_disable(cpu); __cpu_disable()
226 * and we must not schedule until we're ready to give up the cpu. __cpu_disable()
228 set_cpu_online(cpu, false); __cpu_disable()
238 clear_tasks_mm_cpumask(cpu); __cpu_disable()
243 static int op_cpu_kill(unsigned int cpu) op_cpu_kill() argument
250 if (!cpu_ops[cpu]->cpu_kill) op_cpu_kill()
253 return cpu_ops[cpu]->cpu_kill(cpu); op_cpu_kill()
262 void __cpu_die(unsigned int cpu) __cpu_die() argument
265 pr_crit("CPU%u: cpu didn't die\n", cpu); __cpu_die()
268 pr_notice("CPU%u: shutdown\n", cpu); __cpu_die()
276 if (!op_cpu_kill(cpu)) __cpu_die()
277 pr_warn("CPU%d may not have shut down cleanly\n", cpu); __cpu_die()
285 * of the other hotplug-cpu capable cores, so presumably coming
290 unsigned int cpu = smp_processor_id(); cpu_die() local
304 cpu_ops[cpu]->cpu_die(cpu); cpu_die()
323 * cpu logical map array containing MPIDR values related to logical
329 unsigned int i, cpu = 1; of_smp_init_cpus() local
332 while ((dn = of_find_node_by_type(dn, "cpu"))) { of_smp_init_cpus()
337 * A cpu node with missing "reg" property is of_smp_init_cpus()
359 * duplicates. If any is found just ignore the cpu. of_smp_init_cpus()
363 for (i = 1; (i < cpu) && (i < NR_CPUS); i++) { of_smp_init_cpus()
365 pr_err("%s: duplicate cpu reg properties in the DT\n", of_smp_init_cpus()
379 pr_err("%s: duplicate boot cpu reg property in DT\n", of_smp_init_cpus()
388 * initialized and the boot cpu doesn't need of_smp_init_cpus()
390 * incrementing cpu. of_smp_init_cpus()
395 if (cpu >= NR_CPUS) of_smp_init_cpus()
398 if (cpu_read_ops(dn, cpu) != 0) of_smp_init_cpus()
401 if (cpu_ops[cpu]->cpu_init(dn, cpu)) of_smp_init_cpus()
404 pr_debug("cpu logical map 0x%llx\n", hwid); of_smp_init_cpus()
405 cpu_logical_map(cpu) = hwid; of_smp_init_cpus()
407 cpu++; of_smp_init_cpus()
411 if (cpu > NR_CPUS) of_smp_init_cpus()
413 cpu, NR_CPUS); of_smp_init_cpus()
432 unsigned int cpu, ncores = num_possible_cpus(); smp_prepare_cpus() local
456 for_each_possible_cpu(cpu) { for_each_possible_cpu()
460 if (cpu == smp_processor_id()) for_each_possible_cpu()
463 if (!cpu_ops[cpu]) for_each_possible_cpu()
466 err = cpu_ops[cpu]->cpu_prepare(cpu); for_each_possible_cpu()
470 set_cpu_present(cpu, true); for_each_possible_cpu()
499 unsigned int cpu, i; show_ipi_list() local
504 for_each_online_cpu(cpu) show_ipi_list()
506 __get_irq_stat(cpu, ipi_irqs[i])); show_ipi_list()
511 u64 smp_irq_stat_cpu(unsigned int cpu) smp_irq_stat_cpu() argument
517 sum += __get_irq_stat(cpu, ipi_irqs[i]); smp_irq_stat_cpu()
527 void arch_send_call_function_single_ipi(int cpu) arch_send_call_function_single_ipi() argument
529 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC); arch_send_call_function_single_ipi()
545 static void ipi_cpu_stop(unsigned int cpu) ipi_cpu_stop() argument
550 pr_crit("CPU%u: stopping\n", cpu); ipi_cpu_stop()
555 set_cpu_online(cpu, false); ipi_cpu_stop()
568 unsigned int cpu = smp_processor_id(); handle_IPI() local
573 __inc_irq_stat(cpu, ipi_irqs[ipinr]); handle_IPI()
589 ipi_cpu_stop(cpu); handle_IPI()
610 pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); handle_IPI()
619 void smp_send_reschedule(int cpu) smp_send_reschedule() argument
621 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); smp_send_reschedule()
H A Dtopology.c14 #include <linux/cpu.h>
29 int cpu; get_cpu_for_node() local
31 cpu_node = of_parse_phandle(node, "cpu", 0); get_cpu_for_node()
35 for_each_possible_cpu(cpu) { for_each_possible_cpu()
36 if (of_get_cpu_node(cpu, NULL) == cpu_node) { for_each_possible_cpu()
38 return cpu; for_each_possible_cpu()
54 int cpu; parse_core() local
62 cpu = get_cpu_for_node(t); parse_core()
63 if (cpu >= 0) { parse_core()
64 cpu_topology[cpu].cluster_id = cluster_id; parse_core()
65 cpu_topology[cpu].core_id = core_id; parse_core()
66 cpu_topology[cpu].thread_id = i; parse_core()
78 cpu = get_cpu_for_node(core); parse_core()
79 if (cpu >= 0) { parse_core()
86 cpu_topology[cpu].cluster_id = cluster_id; parse_core()
87 cpu_topology[cpu].core_id = core_id; parse_core()
134 pr_err("%s: cpu-map children should be clusters\n", parse_cluster()
168 int cpu; parse_dt_topology() local
177 * When topology is provided cpu-map is essentially a root parse_dt_topology()
180 map = of_get_child_by_name(cn, "cpu-map"); parse_dt_topology()
192 for_each_possible_cpu(cpu) parse_dt_topology()
193 if (cpu_topology[cpu].cluster_id == -1) parse_dt_topology()
204 * cpu topology table
209 const struct cpumask *cpu_coregroup_mask(int cpu) cpu_coregroup_mask() argument
211 return &cpu_topology[cpu].core_sibling; cpu_coregroup_mask()
217 int cpu; update_siblings_masks() local
220 for_each_possible_cpu(cpu) { for_each_possible_cpu()
221 cpu_topo = &cpu_topology[cpu]; for_each_possible_cpu()
227 if (cpu != cpuid) for_each_possible_cpu()
228 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); for_each_possible_cpu()
234 if (cpu != cpuid) for_each_possible_cpu()
235 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); for_each_possible_cpu()
253 /* Create cpu topology mapping based on MPIDR. */ store_cpu_topology()
279 unsigned int cpu; reset_cpu_topology() local
281 for_each_possible_cpu(cpu) { for_each_possible_cpu()
282 struct cpu_topology *cpu_topo = &cpu_topology[cpu]; for_each_possible_cpu()
289 cpumask_set_cpu(cpu, &cpu_topo->core_sibling); for_each_possible_cpu()
291 cpumask_set_cpu(cpu, &cpu_topo->thread_sibling); for_each_possible_cpu()
H A Dcpuinfo.c19 #include <asm/cpu.h>
51 unsigned int cpu = smp_processor_id(); cpuinfo_detect_icache_policy() local
69 pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu); cpuinfo_detect_icache_policy()
92 static int check_reg_mask(char *name, u64 mask, u64 boot, u64 cur, int cpu) check_reg_mask() argument
98 name, (unsigned long)boot, cpu, (unsigned long)cur); check_reg_mask()
103 #define CHECK_MASK(field, mask, boot, cur, cpu) \
104 check_reg_mask(#field, mask, (boot)->reg_ ## field, (cur)->reg_ ## field, cpu)
106 #define CHECK(field, boot, cur, cpu) \
107 CHECK_MASK(field, ~0ULL, boot, cur, cpu)
114 unsigned int cpu = smp_processor_id(); cpuinfo_sanity_check() local
123 diff |= CHECK_MASK(ctr, 0xffff3fff, boot, cur, cpu); cpuinfo_sanity_check()
130 diff |= CHECK(dczid, boot, cur, cpu); cpuinfo_sanity_check()
133 diff |= CHECK(cntfrq, boot, cur, cpu); cpuinfo_sanity_check()
141 diff |= CHECK(id_aa64dfr0, boot, cur, cpu); cpuinfo_sanity_check()
142 diff |= CHECK(id_aa64dfr1, boot, cur, cpu); cpuinfo_sanity_check()
148 diff |= CHECK(id_aa64isar0, boot, cur, cpu); cpuinfo_sanity_check()
149 diff |= CHECK(id_aa64isar1, boot, cur, cpu); cpuinfo_sanity_check()
157 diff |= CHECK_MASK(id_aa64mmfr0, 0xffffffffffff0ff0, boot, cur, cpu); cpuinfo_sanity_check()
158 diff |= CHECK(id_aa64mmfr1, boot, cur, cpu); cpuinfo_sanity_check()
164 diff |= CHECK_MASK(id_aa64pfr0, 0xffffffffffff0fff, boot, cur, cpu); cpuinfo_sanity_check()
165 diff |= CHECK(id_aa64pfr1, boot, cur, cpu); cpuinfo_sanity_check()
171 diff |= CHECK(id_dfr0, boot, cur, cpu); cpuinfo_sanity_check()
172 diff |= CHECK(id_isar0, boot, cur, cpu); cpuinfo_sanity_check()
173 diff |= CHECK(id_isar1, boot, cur, cpu); cpuinfo_sanity_check()
174 diff |= CHECK(id_isar2, boot, cur, cpu); cpuinfo_sanity_check()
175 diff |= CHECK(id_isar3, boot, cur, cpu); cpuinfo_sanity_check()
176 diff |= CHECK(id_isar4, boot, cur, cpu); cpuinfo_sanity_check()
177 diff |= CHECK(id_isar5, boot, cur, cpu); cpuinfo_sanity_check()
183 diff |= CHECK_MASK(id_mmfr0, 0xff0fffff, boot, cur, cpu); cpuinfo_sanity_check()
184 diff |= CHECK(id_mmfr1, boot, cur, cpu); cpuinfo_sanity_check()
185 diff |= CHECK(id_mmfr2, boot, cur, cpu); cpuinfo_sanity_check()
186 diff |= CHECK(id_mmfr3, boot, cur, cpu); cpuinfo_sanity_check()
187 diff |= CHECK(id_pfr0, boot, cur, cpu); cpuinfo_sanity_check()
188 diff |= CHECK(id_pfr1, boot, cur, cpu); cpuinfo_sanity_check()
190 diff |= CHECK(mvfr0, boot, cur, cpu); cpuinfo_sanity_check()
191 diff |= CHECK(mvfr1, boot, cur, cpu); cpuinfo_sanity_check()
192 diff |= CHECK(mvfr2, boot, cur, cpu); cpuinfo_sanity_check()
/linux-4.1.27/arch/x86/include/asm/
H A Dcpu.h5 #include <linux/cpu.h>
18 #define cpu_physical_id(cpu) boot_cpu_physical_apicid prefill_possible_map()
25 struct cpu cpu; prefill_possible_map() member in struct:x86_cpu
33 extern int _debug_hotplug_cpu(int cpu, int action);
H A Dnuma.h41 extern int numa_cpu_node(int cpu);
48 static inline int numa_cpu_node(int cpu) numa_cpu_node() argument
59 extern void numa_set_node(int cpu, int node);
60 extern void numa_clear_node(int cpu);
62 extern void numa_add_cpu(int cpu);
63 extern void numa_remove_cpu(int cpu);
65 static inline void numa_set_node(int cpu, int node) { } numa_clear_node() argument
66 static inline void numa_clear_node(int cpu) { } init_cpu_to_node() argument
68 static inline void numa_add_cpu(int cpu) { } numa_remove_cpu() argument
69 static inline void numa_remove_cpu(int cpu) { } argument
73 void debug_cpumask_set_cpu(int cpu, int node, bool enable);
H A Dsmp.h40 static inline struct cpumask *cpu_sibling_mask(int cpu) cpu_sibling_mask() argument
42 return per_cpu(cpu_sibling_map, cpu); cpu_sibling_mask()
45 static inline struct cpumask *cpu_core_mask(int cpu) cpu_core_mask() argument
47 return per_cpu(cpu_core_map, cpu); cpu_core_mask()
50 static inline struct cpumask *cpu_llc_shared_mask(int cpu) cpu_llc_shared_mask() argument
52 return per_cpu(cpu_llc_shared_map, cpu); cpu_llc_shared_mask()
72 void (*smp_send_reschedule)(int cpu);
74 int (*cpu_up)(unsigned cpu, struct task_struct *tidle);
76 void (*cpu_die)(unsigned int cpu);
80 void (*send_call_func_single_ipi)(int cpu);
84 extern void set_cpu_sibling_map(int cpu);
117 static inline int __cpu_up(unsigned int cpu, struct task_struct *tidle) __cpu_up() argument
119 return smp_ops.cpu_up(cpu, tidle); __cpu_up()
127 static inline void __cpu_die(unsigned int cpu) __cpu_die() argument
129 smp_ops.cpu_die(cpu); __cpu_die()
137 static inline void smp_send_reschedule(int cpu) smp_send_reschedule() argument
139 smp_ops.smp_send_reschedule(cpu); smp_send_reschedule()
142 static inline void arch_send_call_function_single_ipi(int cpu) arch_send_call_function_single_ipi() argument
144 smp_ops.send_call_func_single_ipi(cpu); arch_send_call_function_single_ipi()
159 int common_cpu_die(unsigned int cpu);
160 void native_cpu_die(unsigned int cpu);
163 void wbinvd_on_cpu(int cpu);
167 void native_send_call_func_single_ipi(int cpu);
168 void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle);
172 #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
175 #define wbinvd_on_cpu(cpu) wbinvd() wbinvd_on_all_cpus()
201 ti->cpu; \
H A Dtopology.h50 /* Mappings between logical cpu number and node number */
57 extern int __cpu_to_node(int cpu);
60 extern int early_cpu_to_node(int cpu);
65 static inline int early_cpu_to_node(int cpu) early_cpu_to_node() argument
67 return early_per_cpu(x86_cpu_to_node_map, cpu); early_cpu_to_node()
109 static inline int early_cpu_to_node(int cpu) early_cpu_to_node() argument
120 extern const struct cpumask *cpu_coregroup_mask(int cpu); setup_node_to_cpumask_map()
122 #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) setup_node_to_cpumask_map()
123 #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) setup_node_to_cpumask_map()
126 #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) setup_node_to_cpumask_map()
127 #define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) setup_node_to_cpumask_map()
/linux-4.1.27/arch/arm/mach-bcm/
H A Dplatsmp-brcmstb.c67 static int per_cpu_sw_state_rd(u32 cpu) per_cpu_sw_state_rd() argument
69 sync_cache_r(SHIFT_PERCPU_PTR(&per_cpu_sw_state, per_cpu_offset(cpu))); per_cpu_sw_state_rd()
70 return per_cpu(per_cpu_sw_state, cpu); per_cpu_sw_state_rd()
73 static void per_cpu_sw_state_wr(u32 cpu, int val) per_cpu_sw_state_wr() argument
76 per_cpu(per_cpu_sw_state, cpu) = val; per_cpu_sw_state_wr()
77 sync_cache_w(SHIFT_PERCPU_PTR(&per_cpu_sw_state, per_cpu_offset(cpu))); per_cpu_sw_state_wr()
80 static inline void per_cpu_sw_state_wr(u32 cpu, int val) { } per_cpu_sw_state_wr() argument
83 static void __iomem *pwr_ctrl_get_base(u32 cpu) pwr_ctrl_get_base() argument
86 base += (cpu_logical_map(cpu) * 4); pwr_ctrl_get_base()
90 static u32 pwr_ctrl_rd(u32 cpu) pwr_ctrl_rd() argument
92 void __iomem *base = pwr_ctrl_get_base(cpu); pwr_ctrl_rd()
96 static void pwr_ctrl_set(unsigned int cpu, u32 val, u32 mask) pwr_ctrl_set() argument
98 void __iomem *base = pwr_ctrl_get_base(cpu); pwr_ctrl_set()
102 static void pwr_ctrl_clr(unsigned int cpu, u32 val, u32 mask) pwr_ctrl_clr() argument
104 void __iomem *base = pwr_ctrl_get_base(cpu); pwr_ctrl_clr()
109 static int pwr_ctrl_wait_tmout(unsigned int cpu, u32 set, u32 mask) pwr_ctrl_wait_tmout() argument
115 tmp = pwr_ctrl_rd(cpu) & mask; pwr_ctrl_wait_tmout()
120 tmp = pwr_ctrl_rd(cpu) & mask; pwr_ctrl_wait_tmout()
127 static void cpu_rst_cfg_set(u32 cpu, int set) cpu_rst_cfg_set() argument
132 val |= BIT(cpu_logical_map(cpu)); cpu_rst_cfg_set()
134 val &= ~BIT(cpu_logical_map(cpu)); cpu_rst_cfg_set()
138 static void cpu_set_boot_addr(u32 cpu, unsigned long boot_addr) cpu_set_boot_addr() argument
140 const int reg_ofs = cpu_logical_map(cpu) * 8; cpu_set_boot_addr()
145 static void brcmstb_cpu_boot(u32 cpu) brcmstb_cpu_boot() argument
148 per_cpu_sw_state_wr(cpu, 1); brcmstb_cpu_boot()
154 cpu_set_boot_addr(cpu, virt_to_phys(secondary_startup)); brcmstb_cpu_boot()
156 /* Unhalt the cpu */ brcmstb_cpu_boot()
157 cpu_rst_cfg_set(cpu, 0); brcmstb_cpu_boot()
160 static void brcmstb_cpu_power_on(u32 cpu) brcmstb_cpu_power_on() argument
166 pwr_ctrl_set(cpu, ZONE_MAN_ISO_CNTL_MASK, 0xffffff00); brcmstb_cpu_power_on()
167 pwr_ctrl_set(cpu, ZONE_MANUAL_CONTROL_MASK, -1); brcmstb_cpu_power_on()
168 pwr_ctrl_set(cpu, ZONE_RESERVED_1_MASK, -1); brcmstb_cpu_power_on()
170 pwr_ctrl_set(cpu, ZONE_MAN_MEM_PWR_MASK, -1); brcmstb_cpu_power_on()
172 if (pwr_ctrl_wait_tmout(cpu, 1, ZONE_MEM_PWR_STATE_MASK)) brcmstb_cpu_power_on()
175 pwr_ctrl_set(cpu, ZONE_MAN_CLKEN_MASK, -1); brcmstb_cpu_power_on()
177 if (pwr_ctrl_wait_tmout(cpu, 1, ZONE_DPG_PWR_STATE_MASK)) brcmstb_cpu_power_on()
180 pwr_ctrl_clr(cpu, ZONE_MAN_ISO_CNTL_MASK, -1); brcmstb_cpu_power_on()
181 pwr_ctrl_set(cpu, ZONE_MAN_RESET_CNTL_MASK, -1); brcmstb_cpu_power_on()
184 static int brcmstb_cpu_get_power_state(u32 cpu) brcmstb_cpu_get_power_state() argument
186 int tmp = pwr_ctrl_rd(cpu); brcmstb_cpu_get_power_state()
192 static void brcmstb_cpu_die(u32 cpu) brcmstb_cpu_die() argument
196 per_cpu_sw_state_wr(cpu, 0); brcmstb_cpu_die()
206 static int brcmstb_cpu_kill(u32 cpu) brcmstb_cpu_kill() argument
214 if (cpu == 0) { brcmstb_cpu_kill()
219 while (per_cpu_sw_state_rd(cpu)) brcmstb_cpu_kill()
222 pwr_ctrl_set(cpu, ZONE_MANUAL_CONTROL_MASK, -1); brcmstb_cpu_kill()
223 pwr_ctrl_clr(cpu, ZONE_MAN_RESET_CNTL_MASK, -1); brcmstb_cpu_kill()
224 pwr_ctrl_clr(cpu, ZONE_MAN_CLKEN_MASK, -1); brcmstb_cpu_kill()
225 pwr_ctrl_set(cpu, ZONE_MAN_ISO_CNTL_MASK, -1); brcmstb_cpu_kill()
226 pwr_ctrl_clr(cpu, ZONE_MAN_MEM_PWR_MASK, -1); brcmstb_cpu_kill()
228 if (pwr_ctrl_wait_tmout(cpu, 0, ZONE_MEM_PWR_STATE_MASK)) brcmstb_cpu_kill()
231 pwr_ctrl_clr(cpu, ZONE_RESERVED_1_MASK, -1); brcmstb_cpu_kill()
233 if (pwr_ctrl_wait_tmout(cpu, 0, ZONE_DPG_PWR_STATE_MASK)) brcmstb_cpu_kill()
240 cpu_rst_cfg_set(cpu, 1); brcmstb_cpu_kill()
253 name = "syscon-cpu"; setup_hifcpubiuctrl_regs()
344 static int brcmstb_boot_secondary(unsigned int cpu, struct task_struct *idle) brcmstb_boot_secondary() argument
351 if (brcmstb_cpu_get_power_state(cpu) == 0) brcmstb_boot_secondary()
352 brcmstb_cpu_power_on(cpu); brcmstb_boot_secondary()
354 brcmstb_cpu_boot(cpu); brcmstb_boot_secondary()
/linux-4.1.27/arch/xtensa/include/asm/
H A Dmmu_context.h34 #define cpu_asid_cache(cpu) per_cpu(asid_cache, cpu)
68 static inline void get_new_mmu_context(struct mm_struct *mm, unsigned int cpu) get_new_mmu_context() argument
70 unsigned long asid = cpu_asid_cache(cpu); get_new_mmu_context()
79 cpu_asid_cache(cpu) = asid; get_new_mmu_context()
80 mm->context.asid[cpu] = asid; get_new_mmu_context()
81 mm->context.cpu = cpu; get_new_mmu_context()
84 static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu) get_mmu_context() argument
91 unsigned long asid = mm->context.asid[cpu]; get_mmu_context()
94 ((asid ^ cpu_asid_cache(cpu)) & ~ASID_MASK)) get_mmu_context()
95 get_new_mmu_context(mm, cpu); get_mmu_context()
99 static inline void activate_context(struct mm_struct *mm, unsigned int cpu) activate_context() argument
101 get_mmu_context(mm, cpu); activate_context()
102 set_rasid_register(ASID_INSERT(mm->context.asid[cpu])); activate_context()
108 * instance. Valid cpu values are 0..(NR_CPUS-1), so initializing
115 int cpu; for_each_possible_cpu() local
116 for_each_possible_cpu(cpu) { for_each_possible_cpu()
117 mm->context.asid[cpu] = NO_CONTEXT; for_each_possible_cpu()
119 mm->context.cpu = -1;
126 unsigned int cpu = smp_processor_id(); switch_mm() local
127 int migrated = next->context.cpu != cpu; switch_mm()
131 next->context.cpu = cpu; switch_mm()
134 activate_context(next, cpu); switch_mm()
H A Dsmp.h14 #define raw_smp_processor_id() (current_thread_info()->cpu)
15 #define cpu_logical_map(cpu) (cpu)
24 void arch_send_call_function_single_ipi(int cpu);
34 void __cpu_die(unsigned int cpu);
/linux-4.1.27/arch/powerpc/platforms/ps3/
H A Dsmp.c36 * ps3_ipi_virqs - a per cpu array of virqs for ipi use
42 static void ps3_smp_message_pass(int cpu, int msg) ps3_smp_message_pass() argument
52 virq = per_cpu(ps3_ipi_virqs, cpu)[msg]; ps3_smp_message_pass()
57 " (%d)\n", __func__, __LINE__, cpu, msg, result); ps3_smp_message_pass()
62 int cpu; ps3_smp_probe() local
64 for (cpu = 0; cpu < 2; cpu++) { ps3_smp_probe()
66 unsigned int *virqs = per_cpu(ps3_ipi_virqs, cpu); ps3_smp_probe()
69 DBG(" -> %s:%d: (%d)\n", __func__, __LINE__, cpu); ps3_smp_probe()
83 result = ps3_event_receive_port_setup(cpu, &virqs[i]); ps3_smp_probe()
89 __func__, __LINE__, cpu, i, virqs[i]); ps3_smp_probe()
96 ps3_register_ipi_irq(cpu, virqs[i]); ps3_smp_probe()
99 ps3_register_ipi_debug_brk(cpu, virqs[PPC_MSG_DEBUGGER_BREAK]); ps3_smp_probe()
101 DBG(" <- %s:%d: (%d)\n", __func__, __LINE__, cpu); ps3_smp_probe()
105 void ps3_smp_cleanup_cpu(int cpu) ps3_smp_cleanup_cpu() argument
107 unsigned int *virqs = per_cpu(ps3_ipi_virqs, cpu); ps3_smp_cleanup_cpu()
110 DBG(" -> %s:%d: (%d)\n", __func__, __LINE__, cpu); ps3_smp_cleanup_cpu()
118 DBG(" <- %s:%d: (%d)\n", __func__, __LINE__, cpu); ps3_smp_cleanup_cpu()
/linux-4.1.27/arch/ia64/kernel/
H A Dnuma.c33 void map_cpu_to_node(int cpu, int nid) map_cpu_to_node() argument
37 cpu_to_node_map[cpu] = 0; map_cpu_to_node()
41 oldnid = cpu_to_node_map[cpu]; map_cpu_to_node()
42 if (cpumask_test_cpu(cpu, &node_to_cpu_mask[oldnid])) { map_cpu_to_node()
45 /* we don't have cpu-driven node hot add yet... map_cpu_to_node()
49 cpu_to_node_map[cpu] = nid; map_cpu_to_node()
50 cpumask_set_cpu(cpu, &node_to_cpu_mask[nid]); map_cpu_to_node()
54 void unmap_cpu_from_node(int cpu, int nid) unmap_cpu_from_node() argument
56 WARN_ON(!cpumask_test_cpu(cpu, &node_to_cpu_mask[nid])); unmap_cpu_from_node()
57 WARN_ON(cpu_to_node_map[cpu] != nid); unmap_cpu_from_node()
58 cpu_to_node_map[cpu] = 0; unmap_cpu_from_node()
59 cpumask_clear_cpu(cpu, &node_to_cpu_mask[nid]); unmap_cpu_from_node()
64 * build_cpu_to_node_map - setup cpu to node and node to cpumask arrays
66 * Build cpu to node mapping and initialize the per node cpu masks using
71 int cpu, i, node; build_cpu_to_node_map() local
76 for_each_possible_early_cpu(cpu) { for_each_possible_early_cpu()
79 if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) { for_each_possible_early_cpu()
83 map_cpu_to_node(cpu, node); for_each_possible_early_cpu()
H A Derr_inject.c30 #include <linux/cpu.h>
61 u32 cpu=dev->id; \
62 return sprintf(buf, "%lx\n", name[cpu]); \
70 unsigned int cpu=dev->id; \
71 name[cpu] = simple_strtoull(buf, NULL, 16); \
78 * processor. The cpu number in driver is only used for storing data.
84 unsigned int cpu=dev->id; store_call_start() local
88 printk(KERN_DEBUG "pal_mc_err_inject for cpu%d:\n", cpu); store_call_start()
89 printk(KERN_DEBUG "err_type_info=%lx,\n", err_type_info[cpu]); store_call_start()
90 printk(KERN_DEBUG "err_struct_info=%lx,\n", err_struct_info[cpu]); store_call_start()
92 err_data_buffer[cpu].data1, store_call_start()
93 err_data_buffer[cpu].data2, store_call_start()
94 err_data_buffer[cpu].data3); store_call_start()
100 status[cpu]=ia64_pal_mc_error_inject_phys(err_type_info[cpu], store_call_start()
101 err_struct_info[cpu], store_call_start()
102 ia64_tpa(&err_data_buffer[cpu]), store_call_start()
103 &capabilities[cpu], store_call_start()
104 &resources[cpu]); store_call_start()
107 status[cpu]=ia64_pal_mc_error_inject_virt(err_type_info[cpu], store_call_start()
108 err_struct_info[cpu], store_call_start()
109 ia64_tpa(&err_data_buffer[cpu]), store_call_start()
110 &capabilities[cpu], store_call_start()
111 &resources[cpu]); store_call_start()
114 status[cpu] = -EINVAL; store_call_start()
119 printk(KERN_DEBUG "Returns: status=%d,\n", (int)status[cpu]); store_call_start()
120 printk(KERN_DEBUG "capapbilities=%lx,\n", capabilities[cpu]); store_call_start()
121 printk(KERN_DEBUG "resources=%lx\n", resources[cpu]); store_call_start()
133 unsigned int cpu=dev->id; show_virtual_to_phys() local
134 return sprintf(buf, "%lx\n", phys_addr[cpu]); show_virtual_to_phys()
141 unsigned int cpu=dev->id; store_virtual_to_phys() local
154 phys_addr[cpu] = ia64_tpa(virt_addr); store_virtual_to_phys()
165 unsigned int cpu=dev->id; show_err_data_buffer() local
168 err_data_buffer[cpu].data1, show_err_data_buffer()
169 err_data_buffer[cpu].data2, show_err_data_buffer()
170 err_data_buffer[cpu].data3); show_err_data_buffer()
178 unsigned int cpu=dev->id; store_err_data_buffer() local
182 printk("write err_data_buffer=[%lx,%lx,%lx] on cpu%d\n", store_err_data_buffer()
183 err_data_buffer[cpu].data1, store_err_data_buffer()
184 err_data_buffer[cpu].data2, store_err_data_buffer()
185 err_data_buffer[cpu].data3, store_err_data_buffer()
186 cpu); store_err_data_buffer()
189 &err_data_buffer[cpu].data1, store_err_data_buffer()
190 &err_data_buffer[cpu].data2, store_err_data_buffer()
191 &err_data_buffer[cpu].data3); store_err_data_buffer()
241 unsigned int cpu = (unsigned long)hcpu; err_inject_cpu_callback() local
244 sys_dev = get_cpu_device(cpu); err_inject_cpu_callback()
H A Dtopology.c10 * Populate cpu entries in sysfs for non-numa systems as well
13 * Populate cpu cache entries in sysfs for cpu cache info
16 #include <linux/cpu.h>
28 #include <asm/cpu.h>
51 sysfs_cpus[num].cpu.hotpluggable = 1; arch_register_cpu()
54 return register_cpu(&sysfs_cpus[num].cpu, num); arch_register_cpu()
60 unregister_cpu(&sysfs_cpus[num].cpu); arch_unregister_cpu()
69 return register_cpu(&sysfs_cpus[num].cpu, num); arch_register_cpu()
104 * Export cpu cache information through sysfs
142 static void cache_shared_cpu_map_setup(unsigned int cpu, cache_shared_cpu_map_setup() argument
149 if (cpu_data(cpu)->threads_per_core <= 1 && cache_shared_cpu_map_setup()
150 cpu_data(cpu)->cores_per_socket <= 1) { cache_shared_cpu_map_setup()
151 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); cache_shared_cpu_map_setup()
164 if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id cache_shared_cpu_map_setup()
177 static void cache_shared_cpu_map_setup(unsigned int cpu, cache_shared_cpu_map_setup() argument
180 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); cache_shared_cpu_map_setup()
299 static void cpu_cache_sysfs_exit(unsigned int cpu) cpu_cache_sysfs_exit() argument
301 kfree(all_cpu_cache_info[cpu].cache_leaves); cpu_cache_sysfs_exit()
302 all_cpu_cache_info[cpu].cache_leaves = NULL; cpu_cache_sysfs_exit()
303 all_cpu_cache_info[cpu].num_cache_leaves = 0; cpu_cache_sysfs_exit()
304 memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject)); cpu_cache_sysfs_exit()
308 static int cpu_cache_sysfs_init(unsigned int cpu) cpu_cache_sysfs_init() argument
337 cache_shared_cpu_map_setup(cpu, cpu_cache_sysfs_init()
343 all_cpu_cache_info[cpu].cache_leaves = this_cache; cpu_cache_sysfs_init()
344 all_cpu_cache_info[cpu].num_cache_leaves = num_cache_leaves; cpu_cache_sysfs_init()
346 memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject)); cpu_cache_sysfs_init()
354 unsigned int cpu = sys_dev->id; cache_add_dev() local
360 if (all_cpu_cache_info[cpu].kobj.parent) cache_add_dev()
364 retval = set_cpus_allowed_ptr(current, cpumask_of(cpu)); cache_add_dev()
368 retval = cpu_cache_sysfs_init(cpu); cache_add_dev()
373 retval = kobject_init_and_add(&all_cpu_cache_info[cpu].kobj, cache_add_dev()
377 cpu_cache_sysfs_exit(cpu); cache_add_dev()
381 for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) { cache_add_dev()
382 this_object = LEAF_KOBJECT_PTR(cpu,i); cache_add_dev()
385 &all_cpu_cache_info[cpu].kobj, cache_add_dev()
389 kobject_put(&(LEAF_KOBJECT_PTR(cpu,j)->kobj)); cache_add_dev()
391 kobject_put(&all_cpu_cache_info[cpu].kobj); cache_add_dev()
392 cpu_cache_sysfs_exit(cpu); cache_add_dev()
397 kobject_uevent(&all_cpu_cache_info[cpu].kobj, KOBJ_ADD); cache_add_dev()
404 unsigned int cpu = sys_dev->id; cache_remove_dev() local
407 for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) cache_remove_dev()
408 kobject_put(&(LEAF_KOBJECT_PTR(cpu,i)->kobj)); cache_remove_dev()
410 if (all_cpu_cache_info[cpu].kobj.parent) { cache_remove_dev()
411 kobject_put(&all_cpu_cache_info[cpu].kobj); cache_remove_dev()
412 memset(&all_cpu_cache_info[cpu].kobj, cache_remove_dev()
417 cpu_cache_sysfs_exit(cpu); cache_remove_dev()
423 * When a cpu is hot-plugged, do a check and initiate
429 unsigned int cpu = (unsigned long)hcpu; cache_cpu_callback() local
432 sys_dev = get_cpu_device(cpu); cache_cpu_callback()
H A Dsmpboot.c28 #include <linux/cpu.h>
83 * start_ap in head.S uses this to store current booting cpu
348 * Ideally sets up per-cpu profiling hooks. Doesn't do much now...
385 /* Setup the per cpu irq handling data structures */ smp_callin()
463 do_boot_cpu (int sapicid, int cpu, struct task_struct *idle) do_boot_cpu() argument
468 Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid); do_boot_cpu()
470 set_brendez_area(cpu); do_boot_cpu()
471 platform_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0); do_boot_cpu()
478 if (cpumask_test_cpu(cpu, &cpu_callin_map)) do_boot_cpu()
485 if (!cpumask_test_cpu(cpu, &cpu_callin_map)) { do_boot_cpu()
486 printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid); do_boot_cpu()
487 ia64_cpu_to_sapicid[cpu] = -1; do_boot_cpu()
488 set_cpu_online(cpu, false); /* was set in smp_callin() */ do_boot_cpu()
510 int sapicid, cpu, i; smp_build_cpu_map() local
513 for (cpu = 0; cpu < NR_CPUS; cpu++) { smp_build_cpu_map()
514 ia64_cpu_to_sapicid[cpu] = -1; smp_build_cpu_map()
520 for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) { smp_build_cpu_map()
524 set_cpu_present(cpu, true); smp_build_cpu_map()
525 set_cpu_possible(cpu, true); smp_build_cpu_map()
526 ia64_cpu_to_sapicid[cpu] = sapicid; smp_build_cpu_map()
527 cpu++; smp_build_cpu_map()
552 current_thread_info()->cpu = 0; smp_prepare_cpus()
577 clear_cpu_sibling_map(int cpu) clear_cpu_sibling_map() argument
581 for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu)) clear_cpu_sibling_map()
582 cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i)); clear_cpu_sibling_map()
583 for_each_cpu(i, &cpu_core_map[cpu]) clear_cpu_sibling_map()
584 cpumask_clear_cpu(cpu, &cpu_core_map[i]); clear_cpu_sibling_map()
586 per_cpu(cpu_sibling_map, cpu) = cpu_core_map[cpu] = CPU_MASK_NONE; clear_cpu_sibling_map()
590 remove_siblinginfo(int cpu) remove_siblinginfo() argument
594 if (cpu_data(cpu)->threads_per_core == 1 && remove_siblinginfo()
595 cpu_data(cpu)->cores_per_socket == 1) { remove_siblinginfo()
596 cpumask_clear_cpu(cpu, &cpu_core_map[cpu]); remove_siblinginfo()
597 cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, cpu)); remove_siblinginfo()
601 last = (cpumask_weight(&cpu_core_map[cpu]) == 1 ? 1 : 0); remove_siblinginfo()
604 clear_cpu_sibling_map(cpu); remove_siblinginfo()
609 int migrate_platform_irqs(unsigned int cpu) migrate_platform_irqs() argument
619 if (cpe_vector > 0 && is_cpu_cpei_target(cpu)) { migrate_platform_irqs()
620 printk ("CPU (%d) is CPEI Target\n", cpu); migrate_platform_irqs()
638 printk ("Re-targeting CPEI to cpu %d\n", new_cpei_cpu); migrate_platform_irqs()
642 printk ("Unable to retarget CPEI, offline cpu [%d] failed\n", cpu); migrate_platform_irqs()
652 int cpu = smp_processor_id(); __cpu_disable() local
657 if (cpu == 0 && !bsp_remove_ok) { __cpu_disable()
663 if (!sn_cpu_disable_allowed(cpu)) __cpu_disable()
667 set_cpu_online(cpu, false); __cpu_disable()
669 if (migrate_platform_irqs(cpu)) { __cpu_disable()
670 set_cpu_online(cpu, true); __cpu_disable()
674 remove_siblinginfo(cpu); __cpu_disable()
677 cpumask_clear_cpu(cpu, &cpu_callin_map); __cpu_disable()
681 void __cpu_die(unsigned int cpu) __cpu_die() argument
687 if (per_cpu(cpu_state, cpu) == CPU_DEAD) __cpu_die()
689 printk ("CPU %d is now offline\n", cpu); __cpu_die()
694 printk(KERN_ERR "CPU %u didn't die...\n", cpu); __cpu_die()
701 int cpu; smp_cpus_done() local
708 for_each_online_cpu(cpu) { for_each_online_cpu()
709 bogosum += cpu_data(cpu)->loops_per_jiffy; for_each_online_cpu()
716 static inline void set_cpu_sibling_map(int cpu) set_cpu_sibling_map() argument
721 if ((cpu_data(cpu)->socket_id == cpu_data(i)->socket_id)) { for_each_online_cpu()
722 cpumask_set_cpu(i, &cpu_core_map[cpu]); for_each_online_cpu()
723 cpumask_set_cpu(cpu, &cpu_core_map[i]); for_each_online_cpu()
724 if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) { for_each_online_cpu()
726 &per_cpu(cpu_sibling_map, cpu)); for_each_online_cpu()
727 cpumask_set_cpu(cpu, for_each_online_cpu()
735 __cpu_up(unsigned int cpu, struct task_struct *tidle) __cpu_up() argument
740 sapicid = ia64_cpu_to_sapicid[cpu]; __cpu_up()
745 * Already booted cpu? not valid anymore since we dont __cpu_up()
748 if (cpumask_test_cpu(cpu, &cpu_callin_map)) __cpu_up()
751 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; __cpu_up()
753 ret = do_boot_cpu(sapicid, cpu, tidle); __cpu_up()
757 if (cpu_data(cpu)->threads_per_core == 1 && __cpu_up()
758 cpu_data(cpu)->cores_per_socket == 1) { __cpu_up()
759 cpumask_set_cpu(cpu, &per_cpu(cpu_sibling_map, cpu)); __cpu_up()
760 cpumask_set_cpu(cpu, &cpu_core_map[cpu]); __cpu_up()
764 set_cpu_sibling_map(cpu); __cpu_up()
794 * identify_siblings(cpu) gets called from identify_cpu. This populates the
841 * on at least one physical package. Due to hotplug cpu
/linux-4.1.27/arch/arm/mach-tegra/
H A Dplatsmp.c39 static void tegra_secondary_init(unsigned int cpu) tegra_secondary_init() argument
41 cpumask_set_cpu(cpu, &tegra_cpu_init_mask); tegra_secondary_init()
45 static int tegra20_boot_secondary(unsigned int cpu, struct task_struct *idle) tegra20_boot_secondary() argument
47 cpu = cpu_logical_map(cpu); tegra20_boot_secondary()
57 tegra_put_cpu_in_reset(cpu); tegra20_boot_secondary()
65 flowctrl_write_cpu_halt(cpu, 0); tegra20_boot_secondary()
67 tegra_enable_cpu_clock(cpu); tegra20_boot_secondary()
68 flowctrl_write_cpu_csr(cpu, 0); /* Clear flow controller CSR. */ tegra20_boot_secondary()
69 tegra_cpu_out_of_reset(cpu); tegra20_boot_secondary()
73 static int tegra30_boot_secondary(unsigned int cpu, struct task_struct *idle) tegra30_boot_secondary() argument
78 cpu = cpu_logical_map(cpu); tegra30_boot_secondary()
79 tegra_put_cpu_in_reset(cpu); tegra30_boot_secondary()
80 flowctrl_write_cpu_halt(cpu, 0); tegra30_boot_secondary()
96 if (cpumask_test_cpu(cpu, &tegra_cpu_init_mask)) { tegra30_boot_secondary()
99 if (tegra_pmc_cpu_is_powered(cpu)) tegra30_boot_secondary()
111 if (!tegra_pmc_cpu_is_powered(cpu)) { tegra30_boot_secondary()
112 ret = tegra_pmc_cpu_power_on(cpu); tegra30_boot_secondary()
118 while (!tegra_pmc_cpu_is_powered(cpu)) { tegra30_boot_secondary()
127 tegra_enable_cpu_clock(cpu); tegra30_boot_secondary()
131 ret = tegra_pmc_cpu_remove_clamping(cpu); tegra30_boot_secondary()
137 flowctrl_write_cpu_csr(cpu, 0); /* Clear flow controller CSR. */ tegra30_boot_secondary()
138 tegra_cpu_out_of_reset(cpu); tegra30_boot_secondary()
142 static int tegra114_boot_secondary(unsigned int cpu, struct task_struct *idle) tegra114_boot_secondary() argument
146 cpu = cpu_logical_map(cpu); tegra114_boot_secondary()
148 if (cpumask_test_cpu(cpu, &tegra_cpu_init_mask)) { tegra114_boot_secondary()
155 flowctrl_write_cpu_csr(cpu, 1); tegra114_boot_secondary()
156 flowctrl_write_cpu_halt(cpu, tegra114_boot_secondary()
165 ret = tegra_pmc_cpu_power_on(cpu); tegra114_boot_secondary()
171 static int tegra_boot_secondary(unsigned int cpu, tegra_boot_secondary() argument
175 return tegra20_boot_secondary(cpu, idle); tegra_boot_secondary()
177 return tegra30_boot_secondary(cpu, idle); tegra_boot_secondary()
179 return tegra114_boot_secondary(cpu, idle); tegra_boot_secondary()
181 return tegra114_boot_secondary(cpu, idle); tegra_boot_secondary()
/linux-4.1.27/tools/power/cpupower/utils/
H A Dcpuidle-info.c23 static void cpuidle_cpu_output(unsigned int cpu, int verbose) cpuidle_cpu_output() argument
28 printf(_ ("Analyzing CPU %d:\n"), cpu); cpuidle_cpu_output()
30 idlestates = sysfs_get_idlestate_count(cpu); cpuidle_cpu_output()
32 printf(_("CPU %u: No idle states\n"), cpu); cpuidle_cpu_output()
39 tmp = sysfs_get_idlestate_name(cpu, idlestate); cpuidle_cpu_output()
51 int disabled = sysfs_is_idlestate_disabled(cpu, idlestate); cpuidle_cpu_output()
55 tmp = sysfs_get_idlestate_name(cpu, idlestate); cpuidle_cpu_output()
61 tmp = sysfs_get_idlestate_desc(cpu, idlestate); cpuidle_cpu_output()
68 sysfs_get_idlestate_latency(cpu, idlestate)); cpuidle_cpu_output()
70 sysfs_get_idlestate_usage(cpu, idlestate)); cpuidle_cpu_output()
72 sysfs_get_idlestate_time(cpu, idlestate)); cpuidle_cpu_output()
100 static void proc_cpuidle_cpu_output(unsigned int cpu) proc_cpuidle_cpu_output() argument
105 cstates = sysfs_get_idlestate_count(cpu); proc_cpuidle_cpu_output()
107 printf(_("CPU %u: No C-states info\n"), cpu); proc_cpuidle_cpu_output()
120 sysfs_get_idlestate_latency(cpu, cstate)); proc_cpuidle_cpu_output()
122 sysfs_get_idlestate_usage(cpu, cstate)); proc_cpuidle_cpu_output()
124 sysfs_get_idlestate_time(cpu, cstate)); proc_cpuidle_cpu_output()
144 unsigned int cpu = 0; cmd_idle_info() local
189 for (cpu = bitmask_first(cpus_chosen); cmd_idle_info()
190 cpu <= bitmask_last(cpus_chosen); cpu++) { cmd_idle_info()
192 if (!bitmask_isbitset(cpus_chosen, cpu) || cmd_idle_info()
193 cpufreq_cpu_exists(cpu)) cmd_idle_info()
199 proc_cpuidle_cpu_output(cpu); cmd_idle_info()
203 cpuidle_cpu_output(cpu, verbose); cmd_idle_info()
H A Dcpuidle-set.c34 unsigned int cpu = 0, idlestate = 0, idlestates = 0; cmd_idle_set() local
105 for (cpu = bitmask_first(cpus_chosen); cmd_idle_set()
106 cpu <= bitmask_last(cpus_chosen); cpu++) { cmd_idle_set()
108 if (!bitmask_isbitset(cpus_chosen, cpu)) cmd_idle_set()
111 if (sysfs_is_cpu_online(cpu) != 1) cmd_idle_set()
114 idlestates = sysfs_get_idlestate_count(cpu); cmd_idle_set()
120 ret = sysfs_idlestate_disable(cpu, idlestate, 1); cmd_idle_set()
122 printf(_("Idlestate %u disabled on CPU %u\n"), idlestate, cpu); cmd_idle_set()
125 idlestate, cpu); cmd_idle_set()
130 idlestate, cpu); cmd_idle_set()
133 ret = sysfs_idlestate_disable(cpu, idlestate, 0); cmd_idle_set()
135 printf(_("Idlestate %u enabled on CPU %u\n"), idlestate, cpu); cmd_idle_set()
138 idlestate, cpu); cmd_idle_set()
143 idlestate, cpu); cmd_idle_set()
148 (cpu, idlestate); cmd_idle_set()
150 (cpu, idlestate); cmd_idle_set()
152 cpu, idlestate, state_latency, latency); cmd_idle_set()
156 (cpu, idlestate, 1); cmd_idle_set()
158 printf(_("Idlestate %u disabled on CPU %u\n"), idlestate, cpu); cmd_idle_set()
164 (cpu, idlestate); cmd_idle_set()
167 (cpu, idlestate, 0); cmd_idle_set()
169 printf(_("Idlestate %u enabled on CPU %u\n"), idlestate, cpu); cmd_idle_set()
H A Dcpufreq-info.c41 if (strstr(value, "cpu ")) count_cpus()
43 if (sscanf(value, "cpu%d ", &cpunr) != 1) count_cpus()
50 /* cpu count starts from 0, on error return 1 (UP) */ count_cpus()
57 unsigned int cpu, nr_cpus; proc_cpufreq_output() local
66 for (cpu = 0; cpu < nr_cpus; cpu++) { proc_cpufreq_output()
67 policy = cpufreq_get_policy(cpu); proc_cpufreq_output()
71 if (cpufreq_get_hardware_limits(cpu, &min, &max)) { proc_cpufreq_output()
78 cpu , policy->min, max ? min_pctg : 0, policy->max, proc_cpufreq_output()
166 static int get_boost_mode(unsigned int cpu) get_boost_mode() argument
176 ret = cpufreq_has_boost_support(cpu, &support, &active, &b_states); get_boost_mode()
179 " on CPU %d -- are you root?\n"), cpu); get_boost_mode()
195 ret = decode_pstates(cpu, cpupower_cpu_info.family, b_states, get_boost_mode()
220 intel_turbo_ratio = msr_intel_get_turbo_ratio(cpu); get_boost_mode()
247 static void debug_output_one(unsigned int cpu) debug_output_one() argument
259 if (cpufreq_cpu_exists(cpu)) debug_output_one()
262 freq_kernel = cpufreq_get_freq_kernel(cpu); debug_output_one()
263 freq_hardware = cpufreq_get_freq_hardware(cpu); debug_output_one()
265 driver = cpufreq_get_driver(cpu); debug_output_one()
273 cpus = cpufreq_get_related_cpus(cpu); debug_output_one()
277 printf("%d ", cpus->cpu); debug_output_one()
280 printf("%d\n", cpus->cpu); debug_output_one()
284 cpus = cpufreq_get_affected_cpus(cpu); debug_output_one()
288 printf("%d ", cpus->cpu); debug_output_one()
291 printf("%d\n", cpus->cpu); debug_output_one()
295 latency = cpufreq_get_transition_latency(cpu); debug_output_one()
302 if (!(cpufreq_get_hardware_limits(cpu, &min, &max))) { debug_output_one()
310 freqs = cpufreq_get_available_frequencies(cpu); debug_output_one()
323 governors = cpufreq_get_available_governors(cpu); debug_output_one()
334 policy = cpufreq_get_policy(cpu); debug_output_one()
357 stats = cpufreq_get_stats(cpu, &total_time); debug_output_one()
368 total_trans = cpufreq_get_transitions(cpu); debug_output_one()
374 get_boost_mode(cpu); debug_output_one()
380 static int get_freq_kernel(unsigned int cpu, unsigned int human) get_freq_kernel() argument
382 unsigned long freq = cpufreq_get_freq_kernel(cpu); get_freq_kernel()
396 static int get_freq_hardware(unsigned int cpu, unsigned int human) get_freq_hardware() argument
398 unsigned long freq = cpufreq_get_freq_hardware(cpu); get_freq_hardware()
411 static int get_hardware_limits(unsigned int cpu) get_hardware_limits() argument
414 if (cpufreq_get_hardware_limits(cpu, &min, &max)) get_hardware_limits()
422 static int get_driver(unsigned int cpu) get_driver() argument
424 char *driver = cpufreq_get_driver(cpu); get_driver()
434 static int get_policy(unsigned int cpu) get_policy() argument
436 struct cpufreq_policy *policy = cpufreq_get_policy(cpu); get_policy()
446 static int get_available_governors(unsigned int cpu) get_available_governors() argument
449 cpufreq_get_available_governors(cpu); get_available_governors()
465 static int get_affected_cpus(unsigned int cpu) get_affected_cpus() argument
467 struct cpufreq_affected_cpus *cpus = cpufreq_get_affected_cpus(cpu); get_affected_cpus()
472 printf("%d ", cpus->cpu); get_affected_cpus()
475 printf("%d\n", cpus->cpu); get_affected_cpus()
482 static int get_related_cpus(unsigned int cpu) get_related_cpus() argument
484 struct cpufreq_affected_cpus *cpus = cpufreq_get_related_cpus(cpu); get_related_cpus()
489 printf("%d ", cpus->cpu); get_related_cpus()
492 printf("%d\n", cpus->cpu); get_related_cpus()
499 static int get_freq_stats(unsigned int cpu, unsigned int human) get_freq_stats() argument
501 unsigned long total_trans = cpufreq_get_transitions(cpu); get_freq_stats()
503 struct cpufreq_stats *stats = cpufreq_get_stats(cpu, &total_time); get_freq_stats()
524 static int get_latency(unsigned int cpu, unsigned int human) get_latency() argument
526 unsigned long latency = cpufreq_get_transition_latency(cpu); get_latency()
562 unsigned int cpu = 0; cmd_freq_info() local
618 "combined with passing a --cpu argument\n")); cmd_freq_info()
634 printf(_("You can't specify more than one --cpu parameter and/or\n" cmd_freq_info()
645 for (cpu = bitmask_first(cpus_chosen); cmd_freq_info()
646 cpu <= bitmask_last(cpus_chosen); cpu++) { cmd_freq_info()
648 if (!bitmask_isbitset(cpus_chosen, cpu)) cmd_freq_info()
650 if (cpufreq_cpu_exists(cpu)) { cmd_freq_info()
651 printf(_("couldn't analyze CPU %d as it doesn't seem to be present\n"), cpu); cmd_freq_info()
654 printf(_("analyzing CPU %d:\n"), cpu); cmd_freq_info()
658 get_boost_mode(cpu); cmd_freq_info()
661 debug_output_one(cpu); cmd_freq_info()
664 ret = get_affected_cpus(cpu); cmd_freq_info()
667 ret = get_related_cpus(cpu); cmd_freq_info()
670 ret = get_available_governors(cpu); cmd_freq_info()
673 ret = get_policy(cpu); cmd_freq_info()
676 ret = get_driver(cpu); cmd_freq_info()
679 ret = get_hardware_limits(cpu); cmd_freq_info()
682 ret = get_freq_hardware(cpu, human); cmd_freq_info()
685 ret = get_freq_kernel(cpu, human); cmd_freq_info()
688 ret = get_freq_stats(cpu, human); cmd_freq_info()
691 ret = get_latency(cpu, human); cmd_freq_info()
H A Dcpupower-info.c34 unsigned int cpu; cmd_info() local
67 /* Add more per cpu options here */ cmd_info()
83 for (cpu = bitmask_first(cpus_chosen); cmd_info()
84 cpu <= bitmask_last(cpus_chosen); cpu++) { cmd_info()
86 if (!bitmask_isbitset(cpus_chosen, cpu) || cmd_info()
87 cpufreq_cpu_exists(cpu)) cmd_info()
90 printf(_("analyzing CPU %d:\n"), cpu); cmd_info()
93 ret = msr_intel_get_perf_bias(cpu); cmd_info()
H A Dcpupower-set.c35 unsigned int cpu; cmd_set() local
78 for (cpu = bitmask_first(cpus_chosen); cmd_set()
79 cpu <= bitmask_last(cpus_chosen); cpu++) { cmd_set()
81 if (!bitmask_isbitset(cpus_chosen, cpu) || cmd_set()
82 cpufreq_cpu_exists(cpu)) cmd_set()
86 ret = msr_intel_set_perf_bias(cpu, perf_bias); cmd_set()
89 "value on CPU %d\n"), cpu); cmd_set()
/linux-4.1.27/drivers/base/
H A Dcacheinfo.c4 * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
22 #include <linux/cpu.h>
31 /* pointer to per cpu cacheinfo */
33 #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu))
34 #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves)
35 #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list)
37 struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu) get_cpu_cacheinfo() argument
39 return ci_cacheinfo(cpu); get_cpu_cacheinfo()
43 static int cache_setup_of_node(unsigned int cpu) cache_setup_of_node() argument
47 struct device *cpu_dev = get_cpu_device(cpu); cache_setup_of_node()
48 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); cache_setup_of_node()
56 pr_err("No cpu device for CPU %d\n", cpu); cache_setup_of_node()
61 pr_err("Failed to find cpu%d device node\n", cpu); cache_setup_of_node()
65 while (index < cache_leaves(cpu)) { cache_setup_of_node()
70 np = of_node_get(np);/* cpu node itself */ cache_setup_of_node()
77 if (index != cache_leaves(cpu)) /* not all OF nodes populated */ cache_setup_of_node()
89 static inline int cache_setup_of_node(unsigned int cpu) { return 0; } cache_leaves_are_shared() argument
102 static int cache_shared_cpu_map_setup(unsigned int cpu) cache_shared_cpu_map_setup() argument
104 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); cache_shared_cpu_map_setup()
109 ret = cache_setup_of_node(cpu); cache_shared_cpu_map_setup()
113 for (index = 0; index < cache_leaves(cpu); index++) { cache_shared_cpu_map_setup()
121 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); for_each_online_cpu()
125 if (i == cpu || !sib_cpu_ci->info_list) for_each_online_cpu()
129 cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map); for_each_online_cpu()
138 static void cache_shared_cpu_map_remove(unsigned int cpu) cache_shared_cpu_map_remove() argument
140 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); cache_shared_cpu_map_remove()
144 for (index = 0; index < cache_leaves(cpu); index++) { cache_shared_cpu_map_remove()
149 if (sibling == cpu) /* skip itself */ cache_shared_cpu_map_remove()
157 cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map); cache_shared_cpu_map_remove()
164 static void free_cache_attributes(unsigned int cpu) free_cache_attributes() argument
166 if (!per_cpu_cacheinfo(cpu)) free_cache_attributes()
169 cache_shared_cpu_map_remove(cpu); free_cache_attributes()
171 kfree(per_cpu_cacheinfo(cpu)); free_cache_attributes()
172 per_cpu_cacheinfo(cpu) = NULL; free_cache_attributes()
175 int __weak init_cache_level(unsigned int cpu) init_cache_level() argument
180 int __weak populate_cache_leaves(unsigned int cpu) populate_cache_leaves() argument
185 static int detect_cache_attributes(unsigned int cpu) detect_cache_attributes() argument
189 if (init_cache_level(cpu) || !cache_leaves(cpu)) detect_cache_attributes()
192 per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), detect_cache_attributes()
194 if (per_cpu_cacheinfo(cpu) == NULL) detect_cache_attributes()
197 ret = populate_cache_leaves(cpu); detect_cache_attributes()
204 ret = cache_shared_cpu_map_setup(cpu); detect_cache_attributes()
207 cpu); detect_cache_attributes()
213 free_cache_attributes(cpu); detect_cache_attributes()
219 #define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu))
225 #define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu))
226 #define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx])
423 static void cpu_cache_sysfs_exit(unsigned int cpu) cpu_cache_sysfs_exit() argument
428 if (per_cpu_index_dev(cpu)) { cpu_cache_sysfs_exit()
429 for (i = 0; i < cache_leaves(cpu); i++) { cpu_cache_sysfs_exit()
430 ci_dev = per_cache_index_dev(cpu, i); cpu_cache_sysfs_exit()
435 kfree(per_cpu_index_dev(cpu)); cpu_cache_sysfs_exit()
436 per_cpu_index_dev(cpu) = NULL; cpu_cache_sysfs_exit()
438 device_unregister(per_cpu_cache_dev(cpu)); cpu_cache_sysfs_exit()
439 per_cpu_cache_dev(cpu) = NULL; cpu_cache_sysfs_exit()
442 static int cpu_cache_sysfs_init(unsigned int cpu) cpu_cache_sysfs_init() argument
444 struct device *dev = get_cpu_device(cpu); cpu_cache_sysfs_init()
446 if (per_cpu_cacheinfo(cpu) == NULL) cpu_cache_sysfs_init()
449 per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache"); cpu_cache_sysfs_init()
450 if (IS_ERR(per_cpu_cache_dev(cpu))) cpu_cache_sysfs_init()
451 return PTR_ERR(per_cpu_cache_dev(cpu)); cpu_cache_sysfs_init()
454 per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu), cpu_cache_sysfs_init()
456 if (unlikely(per_cpu_index_dev(cpu) == NULL)) cpu_cache_sysfs_init()
462 cpu_cache_sysfs_exit(cpu); cpu_cache_sysfs_init()
466 static int cache_add_dev(unsigned int cpu) cache_add_dev() argument
472 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); cache_add_dev()
475 rc = cpu_cache_sysfs_init(cpu); cache_add_dev()
479 parent = per_cpu_cache_dev(cpu); cache_add_dev()
480 for (i = 0; i < cache_leaves(cpu); i++) { cache_add_dev()
491 per_cache_index_dev(cpu, i) = ci_dev; cache_add_dev()
493 cpumask_set_cpu(cpu, &cache_dev_map); cache_add_dev()
497 cpu_cache_sysfs_exit(cpu); cache_add_dev()
501 static void cache_remove_dev(unsigned int cpu) cache_remove_dev() argument
503 if (!cpumask_test_cpu(cpu, &cache_dev_map)) cache_remove_dev()
505 cpumask_clear_cpu(cpu, &cache_dev_map); cache_remove_dev()
507 cpu_cache_sysfs_exit(cpu); cache_remove_dev()
513 unsigned int cpu = (unsigned long)hcpu; cacheinfo_cpu_callback() local
518 rc = detect_cache_attributes(cpu); cacheinfo_cpu_callback()
520 rc = cache_add_dev(cpu); cacheinfo_cpu_callback()
523 cache_remove_dev(cpu); cacheinfo_cpu_callback()
524 free_cache_attributes(cpu); cacheinfo_cpu_callback()
532 int cpu, rc = 0; cacheinfo_sysfs_init() local
536 for_each_online_cpu(cpu) { for_each_online_cpu()
537 rc = detect_cache_attributes(cpu); for_each_online_cpu()
540 rc = cache_add_dev(cpu); for_each_online_cpu()
542 free_cache_attributes(cpu); for_each_online_cpu()
543 pr_err("error populating cacheinfo..cpu%d\n", cpu); for_each_online_cpu()
H A Dcpu.c9 #include <linux/cpu.h>
34 static void change_cpu_under_node(struct cpu *cpu, change_cpu_under_node() argument
37 int cpuid = cpu->dev.id; change_cpu_under_node()
40 cpu->node_id = to_nid; change_cpu_under_node()
45 struct cpu *cpu = container_of(dev, struct cpu, dev); cpu_subsys_online() local
56 * When hot adding memory to memoryless node and enabling a cpu cpu_subsys_online()
57 * on the node, node number of the cpu may internally change. cpu_subsys_online()
61 change_cpu_under_node(cpu, from_nid, to_nid); cpu_subsys_online()
71 void unregister_cpu(struct cpu *cpu) unregister_cpu() argument
73 int logical_cpu = cpu->dev.id; unregister_cpu()
77 device_unregister(&cpu->dev); unregister_cpu()
125 .name = "cpu",
126 .dev_name = "cpu",
141 struct cpu *cpu = container_of(dev, struct cpu, dev); show_crash_notes() local
146 cpunum = cpu->dev.id; show_crash_notes()
149 * Might be reading other cpu's data based on which cpu read thread show_crash_notes()
150 * has been scheduled. But cpu data (memory) is allocated once during show_crash_notes()
197 * Print cpu online, possible, present, and system maps
276 * the only way to handle the issue of statically allocated cpu cpu_device_release()
277 * devices. The different architectures will have their cpu device cpu_device_release()
280 * by the cpu device. cpu_device_release()
295 n = sprintf(buf, "cpu:type:" CPU_FEATURE_TYPEFMT ":feature:", print_cpu_modalias()
324 * @cpu - cpu->hotpluggable field set to 1 will generate a control file in
330 int register_cpu(struct cpu *cpu, int num) register_cpu() argument
334 cpu->node_id = cpu_to_node(num); register_cpu()
335 memset(&cpu->dev, 0x00, sizeof(struct device)); register_cpu()
336 cpu->dev.id = num; register_cpu()
337 cpu->dev.bus = &cpu_subsys; register_cpu()
338 cpu->dev.release = cpu_device_release; register_cpu()
339 cpu->dev.offline_disabled = !cpu->hotpluggable; register_cpu()
340 cpu->dev.offline = !cpu_online(num); register_cpu()
341 cpu->dev.of_node = of_get_cpu_node(num, NULL); register_cpu()
343 cpu->dev.bus->uevent = cpu_uevent; register_cpu()
345 cpu->dev.groups = common_cpu_attr_groups; register_cpu()
346 if (cpu->hotpluggable) register_cpu()
347 cpu->dev.groups = hotplugable_cpu_attr_groups; register_cpu()
348 error = device_register(&cpu->dev); register_cpu()
350 per_cpu(cpu_sys_devices, num) = &cpu->dev; register_cpu()
357 struct device *get_cpu_device(unsigned cpu) get_cpu_device() argument
359 if (cpu < nr_cpu_ids && cpu_possible(cpu)) get_cpu_device()
360 return per_cpu(cpu_sys_devices, cpu); get_cpu_device()
449 bool cpu_is_hotpluggable(unsigned cpu) cpu_is_hotpluggable() argument
451 struct device *dev = get_cpu_device(cpu); cpu_is_hotpluggable()
452 return dev && container_of(dev, struct cpu, dev)->hotpluggable; cpu_is_hotpluggable()
457 static DEFINE_PER_CPU(struct cpu, cpu_devices);
/linux-4.1.27/arch/sh/kernel/cpu/sh4a/
H A Dsmp-shx3.c19 #include <linux/cpu.h>
33 unsigned int cpu = hard_smp_processor_id(); ipi_interrupt_handler() local
34 unsigned int offs = 4 * cpu; ipi_interrupt_handler()
48 unsigned int cpu = 0; shx3_smp_setup() local
51 init_cpu_possible(cpumask_of(cpu)); shx3_smp_setup()
54 __raw_writel(__raw_readl(STBCR_REG(cpu)) | STBCR_LTSLP, STBCR_REG(cpu)); shx3_smp_setup()
88 static void shx3_start_cpu(unsigned int cpu, unsigned long entry_point) shx3_start_cpu() argument
91 __raw_writel(entry_point, RESET_REG(cpu)); shx3_start_cpu()
93 __raw_writel(virt_to_phys(entry_point), RESET_REG(cpu)); shx3_start_cpu()
95 if (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP)) shx3_start_cpu()
96 __raw_writel(STBCR_MSTP, STBCR_REG(cpu)); shx3_start_cpu()
98 while (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP)) shx3_start_cpu()
102 __raw_writel(STBCR_RESET | STBCR_LTSLP, STBCR_REG(cpu)); shx3_start_cpu()
110 static void shx3_send_ipi(unsigned int cpu, unsigned int message) shx3_send_ipi() argument
112 unsigned long addr = 0xfe410070 + (cpu * 4); shx3_send_ipi()
114 BUG_ON(cpu >= 4); shx3_send_ipi()
119 static void shx3_update_boot_vector(unsigned int cpu) shx3_update_boot_vector() argument
121 __raw_writel(STBCR_MSTP, STBCR_REG(cpu)); shx3_update_boot_vector()
122 while (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP)) shx3_update_boot_vector()
124 __raw_writel(STBCR_RESET, STBCR_REG(cpu)); shx3_update_boot_vector()
130 unsigned int cpu = (unsigned int)hcpu; shx3_cpu_callback() local
134 shx3_update_boot_vector(cpu); shx3_cpu_callback()
137 pr_info("CPU %u is now online\n", cpu); shx3_cpu_callback()
/linux-4.1.27/arch/alpha/include/asm/
H A Dtopology.h9 static inline int cpu_to_node(int cpu) cpu_to_node() argument
16 node = alpha_mv.cpuid_to_nid(cpu); cpu_to_node()
29 int cpu;
36 for_each_online_cpu(cpu) {
37 if (cpu_to_node(cpu) == node)
38 cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
H A Dpercpu.h6 * 32-bit displacement from the GP. Which doesn't work for per cpu
7 * variables in modules, as an offset to the kernel per cpu area is
/linux-4.1.27/arch/x86/kernel/
H A Dsetup_percpu.c21 #include <asm/cpu.h>
69 unsigned int cpu; pcpu_need_numa() local
71 for_each_possible_cpu(cpu) { for_each_possible_cpu()
72 int node = early_cpu_to_node(cpu); for_each_possible_cpu()
87 * @cpu: cpu to allocate for
91 * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
98 static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, pcpu_alloc_bootmem() argument
103 int node = early_cpu_to_node(cpu); pcpu_alloc_bootmem()
108 pr_info("cpu %d has no node %d or node-local memory\n", pcpu_alloc_bootmem()
109 cpu, node); pcpu_alloc_bootmem()
110 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n", pcpu_alloc_bootmem()
111 cpu, size, __pa(ptr)); pcpu_alloc_bootmem()
115 pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n", pcpu_alloc_bootmem()
116 cpu, size, node, __pa(ptr)); pcpu_alloc_bootmem()
127 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) pcpu_fc_alloc() argument
129 return pcpu_alloc_bootmem(cpu, size, align); pcpu_fc_alloc()
154 static inline void setup_percpu_segment(int cpu) setup_percpu_segment() argument
159 pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF, setup_percpu_segment()
162 write_gdt_entry(get_cpu_gdt_table(cpu), setup_percpu_segment()
169 unsigned int cpu; setup_per_cpu_areas() local
221 for_each_possible_cpu(cpu) { for_each_possible_cpu()
222 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu]; for_each_possible_cpu()
223 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); for_each_possible_cpu()
224 per_cpu(cpu_number, cpu) = cpu; for_each_possible_cpu()
225 setup_percpu_segment(cpu); for_each_possible_cpu()
226 setup_stack_canary_segment(cpu); for_each_possible_cpu()
229 * initial arrays to the per cpu data areas. These for_each_possible_cpu()
235 per_cpu(x86_cpu_to_apicid, cpu) = for_each_possible_cpu()
236 early_per_cpu_map(x86_cpu_to_apicid, cpu); for_each_possible_cpu()
237 per_cpu(x86_bios_cpu_apicid, cpu) = for_each_possible_cpu()
238 early_per_cpu_map(x86_bios_cpu_apicid, cpu); for_each_possible_cpu()
241 per_cpu(x86_cpu_to_logical_apicid, cpu) = for_each_possible_cpu()
242 early_per_cpu_map(x86_cpu_to_logical_apicid, cpu); for_each_possible_cpu()
245 per_cpu(irq_stack_ptr, cpu) = for_each_possible_cpu()
246 per_cpu(irq_stack_union.irq_stack, cpu) + for_each_possible_cpu()
250 per_cpu(x86_cpu_to_node_map, cpu) = for_each_possible_cpu()
251 early_per_cpu_map(x86_cpu_to_node_map, cpu); for_each_possible_cpu()
253 * Ensure that the boot cpu numa_node is correct when the boot for_each_possible_cpu()
254 * cpu is on a node that doesn't have memory installed. for_each_possible_cpu()
258 * So set them all (boot cpu and all APs). for_each_possible_cpu()
260 set_cpu_numa_node(cpu, early_cpu_to_node(cpu)); for_each_possible_cpu()
266 if (!cpu) for_each_possible_cpu()
267 switch_to_new_gdt(cpu); for_each_possible_cpu()
285 /* Setup cpu initialized, callin, callout masks */
H A Dcpuid.c24 * This driver uses /dev/cpu/%d/cpuid where %d is the minor number, and on
39 #include <linux/cpu.h>
88 int cpu = iminor(file_inode(file)); cpuid_read() local
99 err = smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1); cpuid_read()
116 unsigned int cpu; cpuid_open() local
119 cpu = iminor(file_inode(file)); cpuid_open()
120 if (cpu >= nr_cpu_ids || !cpu_online(cpu)) cpuid_open()
123 c = &cpu_data(cpu); cpuid_open()
140 static int cpuid_device_create(int cpu) cpuid_device_create() argument
144 dev = device_create(cpuid_class, NULL, MKDEV(CPUID_MAJOR, cpu), NULL, cpuid_device_create()
145 "cpu%d", cpu); cpuid_device_create()
149 static void cpuid_device_destroy(int cpu) cpuid_device_destroy() argument
151 device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu)); cpuid_device_destroy()
157 unsigned int cpu = (unsigned long)hcpu; cpuid_class_cpu_callback() local
162 err = cpuid_device_create(cpu); cpuid_class_cpu_callback()
167 cpuid_device_destroy(cpu); cpuid_class_cpu_callback()
180 return kasprintf(GFP_KERNEL, "cpu/%u/cpuid", MINOR(dev->devt)); cpuid_devnode()
189 "cpu/cpuid", &cpuid_fops)) { cpuid_init()
222 __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid");
229 int cpu = 0; cpuid_exit() local
232 for_each_online_cpu(cpu) cpuid_exit()
233 cpuid_device_destroy(cpu); cpuid_exit()
235 __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid"); cpuid_exit()
/linux-4.1.27/arch/sh/kernel/
H A Dsmp.c21 #include <linux/cpu.h>
48 static inline void smp_store_cpu_info(unsigned int cpu) smp_store_cpu_info() argument
50 struct sh_cpuinfo *c = cpu_data + cpu; smp_store_cpu_info()
59 unsigned int cpu = smp_processor_id(); smp_prepare_cpus() local
62 current_thread_info()->cpu = cpu; smp_prepare_cpus()
72 unsigned int cpu = smp_processor_id(); smp_prepare_boot_cpu() local
74 __cpu_number_map[0] = cpu; smp_prepare_boot_cpu()
75 __cpu_logical_map[0] = cpu; smp_prepare_boot_cpu()
77 set_cpu_online(cpu, true); smp_prepare_boot_cpu()
78 set_cpu_possible(cpu, true); smp_prepare_boot_cpu()
80 per_cpu(cpu_state, cpu) = CPU_ONLINE; smp_prepare_boot_cpu()
84 void native_cpu_die(unsigned int cpu) native_cpu_die() argument
90 if (per_cpu(cpu_state, cpu) == CPU_DEAD) { native_cpu_die()
92 pr_info("CPU %u is now offline\n", cpu); native_cpu_die()
100 pr_err("CPU %u didn't die...\n", cpu); native_cpu_die()
103 int native_cpu_disable(unsigned int cpu) native_cpu_disable() argument
105 return cpu == 0 ? -EPERM : 0; native_cpu_disable()
125 unsigned int cpu = smp_processor_id(); __cpu_disable() local
128 ret = mp_ops->cpu_disable(cpu); __cpu_disable()
134 * and we must not schedule until we're ready to give up the cpu. __cpu_disable()
136 set_cpu_online(cpu, false); __cpu_disable()
146 local_timer_stop(cpu); __cpu_disable()
155 clear_tasks_mm_cpumask(cpu); __cpu_disable()
160 int native_cpu_disable(unsigned int cpu) native_cpu_disable() argument
165 void native_cpu_die(unsigned int cpu) native_cpu_die() argument
179 unsigned int cpu = smp_processor_id(); start_secondary() local
193 notify_cpu_starting(cpu); start_secondary()
198 local_timer_setup(cpu); start_secondary()
201 smp_store_cpu_info(cpu); start_secondary()
203 set_cpu_online(cpu, true); start_secondary()
204 per_cpu(cpu_state, cpu) = CPU_ONLINE; start_secondary()
218 int __cpu_up(unsigned int cpu, struct task_struct *tsk) __cpu_up() argument
222 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; __cpu_up()
234 mp_ops->start_cpu(cpu, (unsigned long)_stext); __cpu_up()
238 if (cpu_online(cpu)) __cpu_up()
245 if (cpu_online(cpu)) __cpu_up()
254 int cpu; smp_cpus_done() local
256 for_each_online_cpu(cpu) smp_cpus_done()
257 bogosum += cpu_data[cpu].loops_per_jiffy; smp_cpus_done()
265 void smp_send_reschedule(int cpu) smp_send_reschedule() argument
267 mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE); smp_send_reschedule()
277 int cpu; arch_send_call_function_ipi_mask() local
279 for_each_cpu(cpu, mask) arch_send_call_function_ipi_mask()
280 mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION); arch_send_call_function_ipi_mask()
283 void arch_send_call_function_single_ipi(int cpu) arch_send_call_function_single_ipi() argument
285 mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE); arch_send_call_function_single_ipi()
290 int cpu; smp_timer_broadcast() local
292 for_each_cpu(cpu, mask) smp_timer_broadcast()
293 mp_ops->send_ipi(cpu, SMP_MSG_TIMER); smp_timer_broadcast()
349 * address spaces, a new context is obtained on the current cpu, and tlb
354 * mm might be active on another cpu (eg debuggers doing the flushes on
H A Dtopology.c10 #include <linux/cpu.h>
19 static DEFINE_PER_CPU(struct cpu, cpu_devices);
24 static cpumask_t cpu_coregroup_map(unsigned int cpu) cpu_coregroup_map() argument
33 const struct cpumask *cpu_coregroup_mask(unsigned int cpu) cpu_coregroup_mask() argument
35 return &cpu_core_map[cpu]; cpu_coregroup_mask()
40 unsigned int cpu; arch_update_cpu_topology() local
42 for_each_possible_cpu(cpu) arch_update_cpu_topology()
43 cpu_core_map[cpu] = cpu_coregroup_map(cpu); arch_update_cpu_topology()
58 struct cpu *c = &per_cpu(cpu_devices, i); topology_init()
H A Dirq.c21 #include <cpu/mmu_context.h>
116 * allocate per-cpu stacks for hardirq and for softirq processing
118 void irq_ctx_init(int cpu) irq_ctx_init() argument
122 if (hardirq_ctx[cpu]) irq_ctx_init()
125 irqctx = (union irq_ctx *)&hardirq_stack[cpu * THREAD_SIZE]; irq_ctx_init()
127 irqctx->tinfo.cpu = cpu; irq_ctx_init()
131 hardirq_ctx[cpu] = irqctx; irq_ctx_init()
133 irqctx = (union irq_ctx *)&softirq_stack[cpu * THREAD_SIZE]; irq_ctx_init()
135 irqctx->tinfo.cpu = cpu; irq_ctx_init()
139 softirq_ctx[cpu] = irqctx; irq_ctx_init()
142 cpu, hardirq_ctx[cpu], softirq_ctx[cpu]); irq_ctx_init()
145 void irq_ctx_exit(int cpu) irq_ctx_exit() argument
147 hardirq_ctx[cpu] = NULL; irq_ctx_exit()
225 unsigned int irq, cpu = smp_processor_id(); migrate_irqs() local
230 if (data->node == cpu) { for_each_active_irq()
235 irq, cpu); for_each_active_irq()
/linux-4.1.27/include/linux/
H A Dsmpboot.h12 * @store: Pointer to per cpu storage for the task pointers
24 * parked (cpu offline)
26 * unparked (cpu online)
28 * unparked (cpu online). This is not guaranteed to be
29 * called on the target cpu of the thread. Careful!
36 int (*thread_should_run)(unsigned int cpu);
37 void (*thread_fn)(unsigned int cpu);
38 void (*create)(unsigned int cpu);
39 void (*setup)(unsigned int cpu);
40 void (*cleanup)(unsigned int cpu, bool online);
41 void (*park)(unsigned int cpu);
42 void (*unpark)(unsigned int cpu);
43 void (*pre_unpark)(unsigned int cpu);
H A Dirq_cpustat.h6 * architecture. Some arch (like s390) have per cpu hardware pages and
21 #define __IRQ_STAT(cpu, member) (irq_stat[cpu].member)
29 #define nmi_count(cpu) __IRQ_STAT((cpu), __nmi_count) /* i386 */
H A Dtopology.h82 static inline int cpu_to_node(int cpu) cpu_to_node() argument
84 return per_cpu(numa_node, cpu); cpu_to_node()
96 static inline void set_cpu_numa_node(int cpu, int node) set_cpu_numa_node() argument
98 per_cpu(numa_node, cpu) = node; set_cpu_numa_node()
117 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
148 static inline int cpu_to_mem(int cpu) cpu_to_mem() argument
150 return per_cpu(_numa_mem_, cpu); cpu_to_mem()
155 static inline void set_cpu_numa_mem(int cpu, int node) set_cpu_numa_mem() argument
157 per_cpu(_numa_mem_, cpu) = node; set_cpu_numa_mem()
158 _node_numa_mem_[cpu_to_node(cpu)] = node; set_cpu_numa_mem()
180 static inline int cpu_to_mem(int cpu) cpu_to_mem() argument
182 return cpu_to_node(cpu); cpu_to_mem()
189 #define topology_physical_package_id(cpu) ((void)(cpu), -1)
192 #define topology_core_id(cpu) ((void)(cpu), 0)
195 #define topology_thread_cpumask(cpu) cpumask_of(cpu)
198 #define topology_core_cpumask(cpu) cpumask_of(cpu)
202 static inline const struct cpumask *cpu_smt_mask(int cpu) cpu_smt_mask() argument
204 return topology_thread_cpumask(cpu); cpu_smt_mask()
208 static inline const struct cpumask *cpu_cpu_mask(int cpu) cpu_cpu_mask() argument
210 return cpumask_of_node(cpu_to_node(cpu)); cpu_cpu_mask()
H A Dmvebu-pmsu.h15 int mvebu_pmsu_dfs_request(int cpu);
17 static inline int mvebu_pmsu_dfs_request(int cpu) { return -ENODEV; } argument
/linux-4.1.27/arch/x86/lib/
H A Dcache-smp.c9 void wbinvd_on_cpu(int cpu) wbinvd_on_cpu() argument
11 smp_call_function_single(cpu, __wbinvd, NULL, 1); wbinvd_on_cpu()
H A Dmsr-smp.c34 int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) rdmsr_on_cpu() argument
42 err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1); rdmsr_on_cpu()
50 int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) rdmsrl_on_cpu() argument
58 err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1); rdmsrl_on_cpu()
65 int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) wrmsr_on_cpu() argument
75 err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1); wrmsr_on_cpu()
81 int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q) wrmsrl_on_cpu() argument
91 err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1); wrmsrl_on_cpu()
161 int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) rdmsr_safe_on_cpu() argument
169 err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1); rdmsr_safe_on_cpu()
177 int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) wrmsr_safe_on_cpu() argument
187 err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1); wrmsr_safe_on_cpu()
193 int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q) wrmsrl_safe_on_cpu() argument
203 err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1); wrmsrl_safe_on_cpu()
209 int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) rdmsrl_safe_on_cpu() argument
217 err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1); rdmsrl_safe_on_cpu()
242 int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs) rdmsr_safe_regs_on_cpu() argument
249 err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1); rdmsr_safe_regs_on_cpu()
255 int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs) wrmsr_safe_regs_on_cpu() argument
262 err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1); wrmsr_safe_regs_on_cpu()
/linux-4.1.27/arch/arm/mach-zynq/
H A Dslcr.c119 * zynq_slcr_cpu_start - Start cpu
120 * @cpu: cpu number
122 void zynq_slcr_cpu_start(int cpu) zynq_slcr_cpu_start() argument
127 reg &= ~(SLCR_A9_CPU_RST << cpu); zynq_slcr_cpu_start()
129 reg &= ~(SLCR_A9_CPU_CLKSTOP << cpu); zynq_slcr_cpu_start()
132 zynq_slcr_cpu_state_write(cpu, false); zynq_slcr_cpu_start()
136 * zynq_slcr_cpu_stop - Stop cpu
137 * @cpu: cpu number
139 void zynq_slcr_cpu_stop(int cpu) zynq_slcr_cpu_stop() argument
144 reg |= (SLCR_A9_CPU_CLKSTOP | SLCR_A9_CPU_RST) << cpu; zynq_slcr_cpu_stop()
149 * zynq_slcr_cpu_state - Read/write cpu state
150 * @cpu: cpu number
152 * SLCR_REBOOT_STATUS save upper 2 bits (31/30 cpu states for cpu0 and cpu1)
153 * 0 means cpu is running, 1 cpu is going to die.
155 * Return: true if cpu is running, false if cpu is going to die
157 bool zynq_slcr_cpu_state_read(int cpu) zynq_slcr_cpu_state_read() argument
162 state &= 1 << (31 - cpu); zynq_slcr_cpu_state_read()
168 * zynq_slcr_cpu_state - Read/write cpu state
169 * @cpu: cpu number
170 * @die: cpu state - true if cpu is going to die
172 * SLCR_REBOOT_STATUS save upper 2 bits (31/30 cpu states for cpu0 and cpu1)
173 * 0 means cpu is running, 1 cpu is going to die.
175 void zynq_slcr_cpu_state_write(int cpu, bool die) zynq_slcr_cpu_state_write() argument
180 mask = 1 << (31 - cpu); zynq_slcr_cpu_state_write()
/linux-4.1.27/arch/sh/include/cpu-sh4a/cpu/
H A Dserial.h4 /* arch/sh/kernel/cpu/sh4a/serial-sh7722.c */
/linux-4.1.27/arch/mips/kernel/
H A Dsmp.c33 #include <linux/cpu.h>
38 #include <asm/cpu.h>
67 * A logcal cpu mask containing only one VPE per core to
81 static inline void set_cpu_sibling_map(int cpu) set_cpu_sibling_map() argument
85 cpumask_set_cpu(cpu, &cpu_sibling_setup_map); set_cpu_sibling_map()
89 if (cpu_data[cpu].package == cpu_data[i].package && set_cpu_sibling_map()
90 cpu_data[cpu].core == cpu_data[i].core) { set_cpu_sibling_map()
91 cpumask_set_cpu(i, &cpu_sibling_map[cpu]); set_cpu_sibling_map()
92 cpumask_set_cpu(cpu, &cpu_sibling_map[i]); set_cpu_sibling_map()
96 cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]); set_cpu_sibling_map()
99 static inline void set_cpu_core_map(int cpu) set_cpu_core_map() argument
103 cpumask_set_cpu(cpu, &cpu_core_setup_map); set_cpu_core_map()
106 if (cpu_data[cpu].package == cpu_data[i].package) { set_cpu_core_map()
107 cpumask_set_cpu(i, &cpu_core_map[cpu]); set_cpu_core_map()
108 cpumask_set_cpu(cpu, &cpu_core_map[i]); set_cpu_core_map()
115 * new cpu appears or disappears.
154 unsigned int cpu; start_secondary() local
169 cpu = smp_processor_id(); start_secondary()
170 cpu_data[cpu].udelay_val = loops_per_jiffy; start_secondary()
172 cpumask_set_cpu(cpu, &cpu_coherent_mask); start_secondary()
173 notify_cpu_starting(cpu); start_secondary()
175 set_cpu_online(cpu, true); start_secondary()
177 set_cpu_sibling_map(cpu); start_secondary()
178 set_cpu_core_map(cpu); start_secondary()
182 cpumask_set_cpu(cpu, &cpu_callin_map); start_secondary()
184 synchronise_count_slave(cpu); start_secondary()
238 current_thread_info()->cpu = 0; smp_prepare_cpus()
249 /* preload SMP state for boot cpu */ smp_prepare_boot_cpu()
257 int __cpu_up(unsigned int cpu, struct task_struct *tidle) __cpu_up() argument
259 mp_ops->boot_secondary(cpu, tidle); __cpu_up()
264 while (!cpumask_test_cpu(cpu, &cpu_callin_map)) { __cpu_up()
269 synchronise_count_master(cpu); __cpu_up()
320 * address spaces, a new context is obtained on the current cpu, and tlb
325 * mm might be active on another cpu (eg debuggers doing the flushes on
337 unsigned int cpu; flush_tlb_mm() local
339 for_each_online_cpu(cpu) { for_each_online_cpu()
340 if (cpu != smp_processor_id() && cpu_context(cpu, mm)) for_each_online_cpu()
341 cpu_context(cpu, mm) = 0; for_each_online_cpu()
376 unsigned int cpu; flush_tlb_range() local
378 for_each_online_cpu(cpu) { for_each_online_cpu()
379 if (cpu != smp_processor_id() && cpu_context(cpu, mm)) for_each_online_cpu()
380 cpu_context(cpu, mm) = 0; for_each_online_cpu()
422 unsigned int cpu; flush_tlb_page() local
424 for_each_online_cpu(cpu) { for_each_online_cpu()
425 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm)) for_each_online_cpu()
426 cpu_context(cpu, vma->vm_mm) = 0; for_each_online_cpu()
453 int cpu = smp_processor_id(); dump_send_ipi() local
458 if (i != cpu) dump_send_ipi()
474 int cpu; tick_broadcast() local
476 for_each_cpu(cpu, mask) { for_each_cpu()
477 count = &per_cpu(tick_broadcast_count, cpu); for_each_cpu()
478 csd = &per_cpu(tick_broadcast_csd, cpu); for_each_cpu()
481 smp_call_function_single_async(cpu, csd); for_each_cpu()
487 int cpu = smp_processor_id(); tick_broadcast_callee() local
489 atomic_set(&per_cpu(tick_broadcast_count, cpu), 0); tick_broadcast_callee()
495 int cpu; tick_broadcast_init() local
497 for (cpu = 0; cpu < NR_CPUS; cpu++) { tick_broadcast_init()
498 csd = &per_cpu(tick_broadcast_csd, cpu); tick_broadcast_init()
H A Dtopology.c1 #include <linux/cpu.h>
8 static DEFINE_PER_CPU(struct cpu, cpu_devices);
20 struct cpu *c = &per_cpu(cpu_devices, i); topology_init()
H A Dcrash.c12 /* This keeps a track of which one is crashing cpu. */
20 int cpu = smp_processor_id(); crash_shutdown_secondary() local
24 if (!cpu_online(cpu)) crash_shutdown_secondary()
28 if (!cpumask_test_cpu(cpu, &cpus_in_crash)) crash_shutdown_secondary()
29 crash_save_cpu(regs, cpu); crash_shutdown_secondary()
30 cpumask_set_cpu(cpu, &cpus_in_crash); crash_shutdown_secondary()
42 unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ crash_kexec_prepare_cpus()
H A Dsmp-gic.c21 void gic_send_ipi_single(int cpu, unsigned int action) gic_send_ipi_single() argument
25 unsigned int core = cpu_data[cpu].core; gic_send_ipi_single()
27 pr_debug("CPU%d: %s cpu %d action %u status %08x\n", gic_send_ipi_single()
28 smp_processor_id(), __func__, cpu, action, read_c0_status()); gic_send_ipi_single()
34 intr = plat_ipi_call_int_xlate(cpu); gic_send_ipi_single()
38 intr = plat_ipi_resched_int_xlate(cpu); gic_send_ipi_single()
48 while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) { gic_send_ipi_single()
H A Dsmp-bmips.c18 #include <linux/cpu.h>
38 #include <asm/cpu-features.h>
51 static void bmips_set_reset_vec(int cpu, u32 val);
59 static void bmips43xx_send_ipi_single(int cpu, unsigned int action);
60 static void bmips5000_send_ipi_single(int cpu, unsigned int action);
68 #define CPUNUM(cpu, shift) (((cpu) + bmips_cpu_offset) << (shift))
69 #define ACTION_CLR_IPI(cpu, ipi) (0x2000 | CPUNUM(cpu, 9) | ((ipi) << 8))
70 #define ACTION_SET_IPI(cpu, ipi) (0x3000 | CPUNUM(cpu, 9) | ((ipi) << 8))
71 #define ACTION_BOOT_THREAD(cpu) (0x08 | CPUNUM(cpu, 0))
75 int i, cpu = 1, boot_cpu = 0; bmips_smp_setup() local
141 __cpu_number_map[i] = cpu; bmips_smp_setup()
142 __cpu_logical_map[cpu] = i; bmips_smp_setup()
143 cpu++; bmips_smp_setup()
180 static void bmips_boot_secondary(int cpu, struct task_struct *idle) bmips_boot_secondary() argument
201 pr_info("SMP: Booting CPU%d...\n", cpu); bmips_boot_secondary()
203 if (cpumask_test_cpu(cpu, &bmips_booted_mask)) { bmips_boot_secondary()
205 bmips_set_reset_vec(cpu, RESET_FROM_KSEG0); bmips_boot_secondary()
210 bmips43xx_send_ipi_single(cpu, 0); bmips_boot_secondary()
213 bmips5000_send_ipi_single(cpu, 0); bmips_boot_secondary()
217 bmips_set_reset_vec(cpu, RESET_FROM_KSEG1); bmips_boot_secondary()
223 if (cpu_logical_map(cpu) == 1) bmips_boot_secondary()
227 write_c0_brcm_action(ACTION_BOOT_THREAD(cpu)); bmips_boot_secondary()
230 cpumask_set_cpu(cpu, &bmips_booted_mask); bmips_boot_secondary()
273 static void bmips5000_send_ipi_single(int cpu, unsigned int action) bmips5000_send_ipi_single() argument
275 write_c0_brcm_action(ACTION_SET_IPI(cpu, action == SMP_CALL_FUNCTION)); bmips5000_send_ipi_single()
314 static void bmips43xx_send_ipi_single(int cpu, unsigned int action) bmips43xx_send_ipi_single() argument
319 set_c0_cause(cpu ? C_SW1 : C_SW0); bmips43xx_send_ipi_single()
320 per_cpu(ipi_action_mask, cpu) |= action; bmips43xx_send_ipi_single()
328 int action, cpu = irq - IPI0_IRQ; bmips43xx_ipi_interrupt() local
332 per_cpu(ipi_action_mask, cpu) = 0; bmips43xx_ipi_interrupt()
333 clear_c0_cause(cpu ? C_SW1 : C_SW0); bmips43xx_ipi_interrupt()
357 unsigned int cpu = smp_processor_id(); bmips_cpu_disable() local
359 if (cpu == 0) bmips_cpu_disable()
362 pr_info("SMP: CPU%d is offline\n", cpu); bmips_cpu_disable()
364 set_cpu_online(cpu, false); bmips_cpu_disable()
365 cpumask_clear_cpu(cpu, &cpu_callin_map); bmips_cpu_disable()
374 static void bmips_cpu_die(unsigned int cpu) bmips_cpu_die() argument
461 int cpu; member in struct:reset_vec_info
468 int shift = info->cpu & 0x01 ? 16 : 0; bmips_set_reset_vec_remote()
476 if (info->cpu & 0x02) { bmips_set_reset_vec_remote()
488 static void bmips_set_reset_vec(int cpu, u32 val) bmips_set_reset_vec() argument
494 info.cpu = cpu; bmips_set_reset_vec()
500 if (cpu == 0) bmips_set_reset_vec()
H A Dcpu-probe.c22 #include <asm/cpu.h>
23 #include <asm/cpu-features.h>
24 #include <asm/cpu-type.h>
204 /* Disable it in the boot cpu */ ftlb_disable()
270 * Probe whether cpu has config register by trying to play with
291 static inline void set_elf_platform(int cpu, const char *plat) set_elf_platform() argument
293 if (cpu == 0) set_elf_platform()
659 static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) cpu_probe_legacy() argument
664 __cpu_name[cpu] = "R2000"; cpu_probe_legacy()
676 __cpu_name[cpu] = "R3081"; cpu_probe_legacy()
679 __cpu_name[cpu] = "R3000A"; cpu_probe_legacy()
683 __cpu_name[cpu] = "R3000"; cpu_probe_legacy()
697 __cpu_name[cpu] = "R4400PC"; cpu_probe_legacy()
700 __cpu_name[cpu] = "R4000PC"; cpu_probe_legacy()
726 __cpu_name[cpu] = mc ? "R4400MC" : "R4400SC"; cpu_probe_legacy()
729 __cpu_name[cpu] = mc ? "R4000MC" : "R4000SC"; cpu_probe_legacy()
748 __cpu_name[cpu] = "NEC VR4111"; cpu_probe_legacy()
752 __cpu_name[cpu] = "NEC VR4121"; cpu_probe_legacy()
757 __cpu_name[cpu] = "NEC VR4122"; cpu_probe_legacy()
760 __cpu_name[cpu] = "NEC VR4181A"; cpu_probe_legacy()
766 __cpu_name[cpu] = "NEC VR4131"; cpu_probe_legacy()
770 __cpu_name[cpu] = "NEC VR4133"; cpu_probe_legacy()
776 __cpu_name[cpu] = "NEC Vr41xx"; cpu_probe_legacy()
782 __cpu_name[cpu] = "R4300"; cpu_probe_legacy()
791 __cpu_name[cpu] = "R4600"; cpu_probe_legacy()
807 __cpu_name[cpu] = "R4650"; cpu_probe_legacy()
820 __cpu_name[cpu] = "TX3927"; cpu_probe_legacy()
826 __cpu_name[cpu] = "TX3912"; cpu_probe_legacy()
831 __cpu_name[cpu] = "TX3922"; cpu_probe_legacy()
839 __cpu_name[cpu] = "R4700"; cpu_probe_legacy()
848 __cpu_name[cpu] = "R49XX"; cpu_probe_legacy()
858 __cpu_name[cpu] = "R5000"; cpu_probe_legacy()
866 __cpu_name[cpu] = "R5432"; cpu_probe_legacy()
874 __cpu_name[cpu] = "R5500"; cpu_probe_legacy()
882 __cpu_name[cpu] = "Nevada"; cpu_probe_legacy()
890 __cpu_name[cpu] = "R6000"; cpu_probe_legacy()
899 __cpu_name[cpu] = "R6000A"; cpu_probe_legacy()
908 __cpu_name[cpu] = "RM7000"; cpu_probe_legacy()
924 __cpu_name[cpu] = "RM8000"; cpu_probe_legacy()
933 __cpu_name[cpu] = "R10000"; cpu_probe_legacy()
943 __cpu_name[cpu] = "R12000"; cpu_probe_legacy()
954 __cpu_name[cpu] = "R16000"; cpu_probe_legacy()
957 __cpu_name[cpu] = "R14000"; cpu_probe_legacy()
970 __cpu_name[cpu] = "ICT Loongson-2"; cpu_probe_legacy()
971 set_elf_platform(cpu, "loongson2e"); cpu_probe_legacy()
977 __cpu_name[cpu] = "ICT Loongson-2"; cpu_probe_legacy()
978 set_elf_platform(cpu, "loongson2f"); cpu_probe_legacy()
984 __cpu_name[cpu] = "ICT Loongson-3"; cpu_probe_legacy()
985 set_elf_platform(cpu, "loongson3a"); cpu_probe_legacy()
991 __cpu_name[cpu] = "ICT Loongson-3"; cpu_probe_legacy()
992 set_elf_platform(cpu, "loongson3b"); cpu_probe_legacy()
1010 __cpu_name[cpu] = "Loongson 1B"; cpu_probe_legacy()
1018 static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu) cpu_probe_mips() argument
1025 __cpu_name[cpu] = "MIPS GENERIC QEMU"; cpu_probe_mips()
1030 __cpu_name[cpu] = "MIPS 4Kc"; cpu_probe_mips()
1036 __cpu_name[cpu] = "MIPS 4KEc"; cpu_probe_mips()
1042 __cpu_name[cpu] = "MIPS 4KSc"; cpu_probe_mips()
1047 __cpu_name[cpu] = "MIPS 5Kc"; cpu_probe_mips()
1052 __cpu_name[cpu] = "MIPS 5KE"; cpu_probe_mips()
1057 __cpu_name[cpu] = "MIPS 20Kc"; cpu_probe_mips()
1062 __cpu_name[cpu] = "MIPS 24Kc"; cpu_probe_mips()
1067 __cpu_name[cpu] = "MIPS 24KEc"; cpu_probe_mips()
1072 __cpu_name[cpu] = "MIPS 25Kc"; cpu_probe_mips()
1077 __cpu_name[cpu] = "MIPS 34Kc"; cpu_probe_mips()
1082 __cpu_name[cpu] = "MIPS 74Kc"; cpu_probe_mips()
1087 __cpu_name[cpu] = "MIPS M14Kc"; cpu_probe_mips()
1092 __cpu_name[cpu] = "MIPS M14KEc"; cpu_probe_mips()
1097 __cpu_name[cpu] = "MIPS 1004Kc"; cpu_probe_mips()
1102 __cpu_name[cpu] = "MIPS 1074Kc"; cpu_probe_mips()
1106 __cpu_name[cpu] = "MIPS interAptiv"; cpu_probe_mips()
1110 __cpu_name[cpu] = "MIPS interAptiv (multi)"; cpu_probe_mips()
1114 __cpu_name[cpu] = "MIPS proAptiv"; cpu_probe_mips()
1118 __cpu_name[cpu] = "MIPS proAptiv (multi)"; cpu_probe_mips()
1122 __cpu_name[cpu] = "MIPS P5600"; cpu_probe_mips()
1126 __cpu_name[cpu] = "MIPS M5150"; cpu_probe_mips()
1135 static inline void cpu_probe_alchemy(struct cpuinfo_mips *c, unsigned int cpu) cpu_probe_alchemy() argument
1144 __cpu_name[cpu] = "Au1000"; cpu_probe_alchemy()
1147 __cpu_name[cpu] = "Au1500"; cpu_probe_alchemy()
1150 __cpu_name[cpu] = "Au1100"; cpu_probe_alchemy()
1153 __cpu_name[cpu] = "Au1550"; cpu_probe_alchemy()
1156 __cpu_name[cpu] = "Au1200"; cpu_probe_alchemy()
1158 __cpu_name[cpu] = "Au1250"; cpu_probe_alchemy()
1161 __cpu_name[cpu] = "Au1210"; cpu_probe_alchemy()
1164 __cpu_name[cpu] = "Au1xxx"; cpu_probe_alchemy()
1171 static inline void cpu_probe_sibyte(struct cpuinfo_mips *c, unsigned int cpu) cpu_probe_sibyte() argument
1179 __cpu_name[cpu] = "SiByte SB1"; cpu_probe_sibyte()
1186 __cpu_name[cpu] = "SiByte SB1A"; cpu_probe_sibyte()
1191 static inline void cpu_probe_sandcraft(struct cpuinfo_mips *c, unsigned int cpu) cpu_probe_sandcraft() argument
1197 __cpu_name[cpu] = "Sandcraft SR71000"; cpu_probe_sandcraft()
1204 static inline void cpu_probe_nxp(struct cpuinfo_mips *c, unsigned int cpu) cpu_probe_nxp() argument
1210 __cpu_name[cpu] = "Philips PR4450"; cpu_probe_nxp()
1216 static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu) cpu_probe_broadcom() argument
1223 __cpu_name[cpu] = "Broadcom BMIPS32"; cpu_probe_broadcom()
1224 set_elf_platform(cpu, "bmips32"); cpu_probe_broadcom()
1230 __cpu_name[cpu] = "Broadcom BMIPS3300"; cpu_probe_broadcom()
1231 set_elf_platform(cpu, "bmips3300"); cpu_probe_broadcom()
1239 __cpu_name[cpu] = "Broadcom BMIPS4380"; cpu_probe_broadcom()
1240 set_elf_platform(cpu, "bmips4380"); cpu_probe_broadcom()
1243 __cpu_name[cpu] = "Broadcom BMIPS4350"; cpu_probe_broadcom()
1244 set_elf_platform(cpu, "bmips4350"); cpu_probe_broadcom()
1251 __cpu_name[cpu] = "Broadcom BMIPS5000"; cpu_probe_broadcom()
1252 set_elf_platform(cpu, "bmips5000"); cpu_probe_broadcom()
1258 static inline void cpu_probe_cavium(struct cpuinfo_mips *c, unsigned int cpu) cpu_probe_cavium() argument
1266 __cpu_name[cpu] = "Cavium Octeon"; cpu_probe_cavium()
1273 __cpu_name[cpu] = "Cavium Octeon+"; cpu_probe_cavium()
1275 set_elf_platform(cpu, "octeon"); cpu_probe_cavium()
1283 __cpu_name[cpu] = "Cavium Octeon II"; cpu_probe_cavium()
1284 set_elf_platform(cpu, "octeon2"); cpu_probe_cavium()
1289 __cpu_name[cpu] = "Cavium Octeon III"; cpu_probe_cavium()
1290 set_elf_platform(cpu, "octeon3"); cpu_probe_cavium()
1299 static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu) cpu_probe_ingenic() argument
1309 __cpu_name[cpu] = "Ingenic JZRISC"; cpu_probe_ingenic()
1317 static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu) cpu_probe_netlogic() argument
1323 __cpu_name[cpu] = "Au1300"; cpu_probe_netlogic()
1341 __cpu_name[cpu] = "Broadcom XLPII"; cpu_probe_netlogic()
1347 __cpu_name[cpu] = "Netlogic XLP"; cpu_probe_netlogic()
1359 __cpu_name[cpu] = "Netlogic XLR"; cpu_probe_netlogic()
1376 __cpu_name[cpu] = "Netlogic XLS"; cpu_probe_netlogic()
1410 unsigned int cpu = smp_processor_id(); cpu_probe() local
1423 cpu_probe_legacy(c, cpu); cpu_probe()
1426 cpu_probe_mips(c, cpu); cpu_probe()
1429 cpu_probe_alchemy(c, cpu); cpu_probe()
1432 cpu_probe_sibyte(c, cpu); cpu_probe()
1435 cpu_probe_broadcom(c, cpu); cpu_probe()
1438 cpu_probe_sandcraft(c, cpu); cpu_probe()
1441 cpu_probe_nxp(c, cpu); cpu_probe()
1444 cpu_probe_cavium(c, cpu); cpu_probe()
1447 cpu_probe_ingenic(c, cpu); cpu_probe()
1450 cpu_probe_netlogic(c, cpu); cpu_probe()
1454 BUG_ON(!__cpu_name[cpu]); cpu_probe()
1458 * Platform code can force the cpu type to optimize code cpu_probe()
1459 * generation. In that case be sure the cpu type is correctly cpu_probe()
1498 if (cpu == 0) cpu_probe()
H A Dcevt-bcm1480.c46 unsigned int cpu = smp_processor_id(); sibyte_set_mode() local
49 cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); sibyte_set_mode()
50 init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT)); sibyte_set_mode()
74 unsigned int cpu = smp_processor_id(); sibyte_next_event() local
77 cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); sibyte_next_event()
78 init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT)); sibyte_next_event()
89 unsigned int cpu = smp_processor_id(); sibyte_counter_handler() local
100 cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); sibyte_counter_handler()
114 unsigned int cpu = smp_processor_id(); sb1480_clockevent_init() local
115 unsigned int irq = K_BCM1480_INT_TIMER_0 + cpu; sb1480_clockevent_init()
116 struct irqaction *action = &per_cpu(sibyte_hpt_irqaction, cpu); sb1480_clockevent_init()
117 struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu); sb1480_clockevent_init()
118 unsigned char *name = per_cpu(sibyte_hpt_name, cpu); sb1480_clockevent_init()
120 BUG_ON(cpu > 3); /* Only have 4 general purpose timers */ sb1480_clockevent_init()
122 sprintf(name, "bcm1480-counter-%d", cpu); sb1480_clockevent_init()
131 cd->cpumask = cpumask_of(cpu); sb1480_clockevent_init()
136 bcm1480_mask_irq(cpu, irq); sb1480_clockevent_init()
139 * Map the timer interrupt to IP[4] of this cpu sb1480_clockevent_init()
142 IOADDR(A_BCM1480_IMR_REGISTER(cpu, sb1480_clockevent_init()
145 bcm1480_unmask_irq(cpu, irq); sb1480_clockevent_init()
152 irq_set_affinity(irq, cpumask_of(cpu)); sb1480_clockevent_init()
H A Dcevt-sb1250.c44 unsigned int cpu = smp_processor_id(); sibyte_set_mode() local
47 cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); sibyte_set_mode()
48 init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT)); sibyte_set_mode()
72 unsigned int cpu = smp_processor_id(); sibyte_next_event() local
75 cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); sibyte_next_event()
76 init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT)); sibyte_next_event()
87 unsigned int cpu = smp_processor_id(); sibyte_counter_handler() local
98 cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); sibyte_counter_handler()
112 unsigned int cpu = smp_processor_id(); sb1250_clockevent_init() local
113 unsigned int irq = K_INT_TIMER_0 + cpu; sb1250_clockevent_init()
114 struct irqaction *action = &per_cpu(sibyte_hpt_irqaction, cpu); sb1250_clockevent_init()
115 struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu); sb1250_clockevent_init()
116 unsigned char *name = per_cpu(sibyte_hpt_name, cpu); sb1250_clockevent_init()
119 BUG_ON(cpu > 2); sb1250_clockevent_init()
121 sprintf(name, "sb1250-counter-%d", cpu); sb1250_clockevent_init()
130 cd->cpumask = cpumask_of(cpu); sb1250_clockevent_init()
135 sb1250_mask_irq(cpu, irq); sb1250_clockevent_init()
138 * Map the timer interrupt to IP[4] of this cpu sb1250_clockevent_init()
141 IOADDR(A_IMR_REGISTER(cpu, R_IMR_INTERRUPT_MAP_BASE) + sb1250_clockevent_init()
144 sb1250_unmask_irq(cpu, irq); sb1250_clockevent_init()
151 irq_set_affinity(irq, cpumask_of(cpu)); sb1250_clockevent_init()
/linux-4.1.27/arch/blackfin/mach-bf561/include/mach/
H A Dsmp.h18 int platform_boot_secondary(unsigned int cpu, struct task_struct *idle);
20 void platform_secondary_init(unsigned int cpu);
26 void platform_send_ipi_cpu(unsigned int cpu, int irq);
28 void platform_clear_ipi(unsigned int cpu, int irq);
/linux-4.1.27/kernel/sched/
H A Dcpudeadline.h10 int cpu; member in struct:cpudl_item
25 void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid);
27 void cpudl_set_freecpu(struct cpudl *cp, int cpu);
28 void cpudl_clear_freecpu(struct cpudl *cp, int cpu);
H A Dcpuacct.c20 /* Time spent by the tasks of the cpu accounting group executing in ... */
28 /* track cpu usage of a group of tasks and its child groups */
31 /* cpuusage holds pointer to a u64-type object on every cpu */
41 /* return cpu accounting group to which this task belongs */ task_ca()
58 /* create a new cpu accounting group */
89 /* destroy an existing cpu accounting group */ cpuacct_css_free()
99 static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu) cpuacct_cpuusage_read() argument
101 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); cpuacct_cpuusage_read()
108 raw_spin_lock_irq(&cpu_rq(cpu)->lock); cpuacct_cpuusage_read()
110 raw_spin_unlock_irq(&cpu_rq(cpu)->lock); cpuacct_cpuusage_read()
118 static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val) cpuacct_cpuusage_write() argument
120 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); cpuacct_cpuusage_write()
126 raw_spin_lock_irq(&cpu_rq(cpu)->lock); cpuacct_cpuusage_write()
128 raw_spin_unlock_irq(&cpu_rq(cpu)->lock); cpuacct_cpuusage_write()
134 /* return total cpu usage (in nanoseconds) of a group */ cpuusage_read()
188 int cpu; cpuacct_stats_show() local
191 for_each_online_cpu(cpu) { for_each_online_cpu()
192 struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu); for_each_online_cpu()
200 for_each_online_cpu(cpu) { for_each_online_cpu()
201 struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu); for_each_online_cpu()
238 int cpu; cpuacct_charge() local
240 cpu = task_cpu(tsk); cpuacct_charge()
247 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); cpuacct_charge()
/linux-4.1.27/tools/power/cpupower/debug/i386/
H A Dcentrino-decode.c7 * linux/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
29 static int rdmsr(unsigned int cpu, unsigned int msr, rdmsr() argument
39 if (cpu > MCPU) rdmsr()
42 sprintf(file, "/dev/cpu/%d/msr", cpu); rdmsr()
76 static int decode_live(unsigned int cpu) decode_live() argument
81 err = rdmsr(cpu, MSR_IA32_PERF_STATUS, &lo, &hi); decode_live()
84 printf("can't get MSR_IA32_PERF_STATUS for cpu %d\n", cpu); decode_live()
85 printf("Possible trouble: you don't run an Enhanced SpeedStep capable cpu\n"); decode_live()
97 unsigned int cpu, mode = 0; main() local
100 cpu = 0; main()
102 cpu = strtoul(argv[1], NULL, 0); main()
103 if (cpu >= MCPU) main()
108 decode(cpu); main()
110 decode_live(cpu); main()
H A Dpowernow-k8-decode.c7 * linux/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
28 static int get_fidvid(uint32_t cpu, uint32_t *fid, uint32_t *vid) get_fidvid() argument
35 if (cpu > MCPU) get_fidvid()
38 sprintf(file, "/dev/cpu/%d/msr", cpu); get_fidvid()
72 int cpu; main() local
76 cpu = 0; main()
78 cpu = strtoul(argv[1], NULL, 0); main()
80 err = get_fidvid(cpu, &fid, &vid); main()
84 printf("Possible trouble: you don't run a powernow-k8 capable cpu\n"); main()
90 printf("cpu %d currently at %d MHz and %d mV\n", main()
91 cpu, main()
/linux-4.1.27/arch/blackfin/
H A DMakefile60 cpu-$(CONFIG_BF512) := bf512
61 cpu-$(CONFIG_BF514) := bf514
62 cpu-$(CONFIG_BF516) := bf516
63 cpu-$(CONFIG_BF518) := bf518
64 cpu-$(CONFIG_BF522) := bf522
65 cpu-$(CONFIG_BF523) := bf523
66 cpu-$(CONFIG_BF524) := bf524
67 cpu-$(CONFIG_BF525) := bf525
68 cpu-$(CONFIG_BF526) := bf526
69 cpu-$(CONFIG_BF527) := bf527
70 cpu-$(CONFIG_BF531) := bf531
71 cpu-$(CONFIG_BF532) := bf532
72 cpu-$(CONFIG_BF533) := bf533
73 cpu-$(CONFIG_BF534) := bf534
74 cpu-$(CONFIG_BF536) := bf536
75 cpu-$(CONFIG_BF537) := bf537
76 cpu-$(CONFIG_BF538) := bf538
77 cpu-$(CONFIG_BF539) := bf539
78 cpu-$(CONFIG_BF542) := bf542
79 cpu-$(CONFIG_BF542M) := bf542m
80 cpu-$(CONFIG_BF544) := bf544
81 cpu-$(CONFIG_BF544M) := bf544m
82 cpu-$(CONFIG_BF547) := bf547
83 cpu-$(CONFIG_BF547M) := bf547m
84 cpu-$(CONFIG_BF548) := bf548
85 cpu-$(CONFIG_BF548M) := bf548m
86 cpu-$(CONFIG_BF549) := bf549
87 cpu-$(CONFIG_BF549M) := bf549m
88 cpu-$(CONFIG_BF561) := bf561
89 cpu-$(CONFIG_BF609) := bf609
101 CPU_REV := $(cpu-y)-$(rev-y)
/linux-4.1.27/arch/arc/kernel/
H A Dsetup.c15 #include <linux/cpu.h>
47 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; read_arc_build_cfg_regs() local
48 FIX_PTR(cpu); read_arc_build_cfg_regs()
50 READ_BCR(AUX_IDENTITY, cpu->core); read_arc_build_cfg_regs()
51 READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa); read_arc_build_cfg_regs()
53 READ_BCR(ARC_REG_TIMERS_BCR, cpu->timers); read_arc_build_cfg_regs()
54 cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE); read_arc_build_cfg_regs()
57 cpu->uncached_base = uncached_space.start << 24; read_arc_build_cfg_regs()
59 READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy); read_arc_build_cfg_regs()
61 cpu->extn.norm = read_aux_reg(ARC_REG_NORM_BCR) > 1 ? 1 : 0; /* 2,3 */ read_arc_build_cfg_regs()
62 cpu->extn.barrel = read_aux_reg(ARC_REG_BARREL_BCR) > 1 ? 1 : 0; /* 2,3 */ read_arc_build_cfg_regs()
63 cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR) ? 1 : 0; /* 1,3 */ read_arc_build_cfg_regs()
64 cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR) ? 1 : 0; read_arc_build_cfg_regs()
65 cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR) > 1 ? 1 : 0; /* 2 */ read_arc_build_cfg_regs()
80 cpu->iccm.base_addr = iccm.base << 16; read_arc_build_cfg_regs()
81 cpu->iccm.sz = 0x2000 << (iccm.sz - 1); read_arc_build_cfg_regs()
87 cpu->dccm.sz = 0x800 << (dccm.sz); read_arc_build_cfg_regs()
90 cpu->dccm.base_addr = dccm_base.addr << 8; read_arc_build_cfg_regs()
94 READ_BCR(ARC_REG_XY_MEM_BCR, cpu->extn_xymem); read_arc_build_cfg_regs()
105 cpu->extn.fpu_sp = sp.ver ? 1 : 0; read_arc_build_cfg_regs()
106 cpu->extn.fpu_dp = dp.ver ? 1 : 0; read_arc_build_cfg_regs()
109 cpu->bpu.ver = bpu.ver; read_arc_build_cfg_regs()
110 cpu->bpu.full = bpu.fam ? 1 : 0; read_arc_build_cfg_regs()
112 cpu->bpu.num_cache = 256 << (bpu.ent - 1); read_arc_build_cfg_regs()
113 cpu->bpu.num_pred = 256 << (bpu.ent - 1); read_arc_build_cfg_regs()
118 cpu->extn.ap = bcr.ver ? 1 : 0; read_arc_build_cfg_regs()
121 cpu->extn.smart = bcr.ver ? 1 : 0; read_arc_build_cfg_regs()
124 cpu->extn.rtt = bcr.ver ? 1 : 0; read_arc_build_cfg_regs()
126 cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt; read_arc_build_cfg_regs()
143 struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id]; arc_cpu_mumbojumbo() local
144 struct bcr_identity *core = &cpu->core; arc_cpu_mumbojumbo()
150 FIX_PTR(cpu); arc_cpu_mumbojumbo()
156 atomic = cpu->isa.atomic1; arc_cpu_mumbojumbo()
157 if (!cpu->isa.ver) /* ISA BCR absent, use Kconfig info */ arc_cpu_mumbojumbo()
184 IS_AVAIL1(cpu->timers.t0, "Timer0 "), arc_cpu_mumbojumbo()
185 IS_AVAIL1(cpu->timers.t1, "Timer1 "), arc_cpu_mumbojumbo()
186 IS_AVAIL2(cpu->timers.rtsc, "64-bit RTSC ", CONFIG_ARC_HAS_RTSC)); arc_cpu_mumbojumbo()
195 IS_AVAIL1(cpu->extn_mpy.ver, "mpy "), arc_cpu_mumbojumbo()
196 IS_AVAIL1(cpu->extn.norm, "norm "), arc_cpu_mumbojumbo()
197 IS_AVAIL1(cpu->extn.barrel, "barrel-shift "), arc_cpu_mumbojumbo()
198 IS_AVAIL1(cpu->extn.swap, "swap "), arc_cpu_mumbojumbo()
199 IS_AVAIL1(cpu->extn.minmax, "minmax "), arc_cpu_mumbojumbo()
200 IS_AVAIL1(cpu->extn.crc, "crc "), arc_cpu_mumbojumbo()
203 if (cpu->bpu.ver) arc_cpu_mumbojumbo()
206 IS_AVAIL1(cpu->bpu.full, "full"), arc_cpu_mumbojumbo()
207 IS_AVAIL1(!cpu->bpu.full, "partial"), arc_cpu_mumbojumbo()
208 cpu->bpu.num_cache, cpu->bpu.num_pred); arc_cpu_mumbojumbo()
216 struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id]; arc_extn_mumbojumbo() local
218 FIX_PTR(cpu); arc_extn_mumbojumbo()
222 cpu->vec_base, cpu->uncached_base); arc_extn_mumbojumbo()
224 if (cpu->extn.fpu_sp || cpu->extn.fpu_dp) arc_extn_mumbojumbo()
226 IS_AVAIL1(cpu->extn.fpu_sp, "SP "), arc_extn_mumbojumbo()
227 IS_AVAIL1(cpu->extn.fpu_dp, "DP ")); arc_extn_mumbojumbo()
229 if (cpu->extn.debug) arc_extn_mumbojumbo()
231 IS_AVAIL1(cpu->extn.ap, "ActionPoint "), arc_extn_mumbojumbo()
232 IS_AVAIL1(cpu->extn.smart, "smaRT "), arc_extn_mumbojumbo()
233 IS_AVAIL1(cpu->extn.rtt, "RTT ")); arc_extn_mumbojumbo()
235 if (cpu->dccm.sz || cpu->iccm.sz) arc_extn_mumbojumbo()
237 cpu->dccm.base_addr, TO_KB(cpu->dccm.sz), arc_extn_mumbojumbo()
238 cpu->iccm.base_addr, TO_KB(cpu->iccm.sz)); arc_extn_mumbojumbo()
248 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; arc_chk_core_config() local
251 if (!cpu->timers.t0) arc_chk_core_config()
254 if (!cpu->timers.t1) arc_chk_core_config()
257 if (IS_ENABLED(CONFIG_ARC_HAS_RTSC) && !cpu->timers.rtsc) arc_chk_core_config()
265 if ((unsigned int)__arc_dccm_base != cpu->dccm.base_addr) arc_chk_core_config()
268 if (CONFIG_ARC_DCCM_SZ != cpu->dccm.sz) arc_chk_core_config()
273 if (CONFIG_ARC_ICCM_SZ != cpu->iccm.sz) arc_chk_core_config()
286 if (cpu->extn.fpu_dp && !fpu_enabled) arc_chk_core_config()
288 else if (!cpu->extn.fpu_dp && fpu_enabled) arc_chk_core_config()
448 * Callback returns cpu-id to iterator for show routine, NULL to stop. c_start()
449 * However since NULL is also a valid cpu-id (0), we use a round-about c_start()
451 * Encode cpu-id as 0xFFcccc, which is decoded by show routine. c_start()
473 static DEFINE_PER_CPU(struct cpu, cpu_topology);
477 int cpu; topology_init() local
479 for_each_present_cpu(cpu) topology_init()
480 register_cpu(&per_cpu(cpu_topology, cpu), cpu); topology_init()
H A Dsmp.c20 #include <linux/cpu.h>
36 /* XXX: per cpu ? Only needed once in early seconday boot */
92 void __weak arc_platform_smp_wait_to_boot(int cpu) arc_platform_smp_wait_to_boot() argument
117 unsigned int cpu = smp_processor_id(); start_kernel_secondary() local
125 cpumask_set_cpu(cpu, mm_cpumask(mm)); start_kernel_secondary()
127 notify_cpu_starting(cpu); start_kernel_secondary()
128 set_cpu_online(cpu, true); start_kernel_secondary()
130 pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu); start_kernel_secondary()
133 machine_desc->init_smp(cpu); start_kernel_secondary()
152 int __cpu_up(unsigned int cpu, struct task_struct *idle) __cpu_up() argument
158 pr_info("Idle Task [%d] %p", cpu, idle); __cpu_up()
159 pr_info("Trying to bring up CPU%u ...\n", cpu); __cpu_up()
162 plat_smp_ops.cpu_kick(cpu, __cpu_up()
168 if (cpu_online(cpu)) __cpu_up()
172 if (!cpu_online(cpu)) { __cpu_up()
173 pr_info("Timeout: CPU%u FAILED to comeup !!!\n", cpu); __cpu_up()
204 * IRQ), the msg-type needs to be conveyed via per-cpu data
209 static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg) ipi_send_msg_one() argument
211 unsigned long __percpu *ipi_data_ptr = per_cpu_ptr(&ipi_data, cpu); ipi_send_msg_one()
215 pr_debug("%d Sending msg [%d] to %d\n", smp_processor_id(), msg, cpu); ipi_send_msg_one()
237 plat_smp_ops.ipi_send(cpu); ipi_send_msg_one()
244 unsigned int cpu; ipi_send_msg() local
246 for_each_cpu(cpu, callmap) ipi_send_msg()
247 ipi_send_msg_one(cpu, msg); ipi_send_msg()
250 void smp_send_reschedule(int cpu) smp_send_reschedule() argument
252 ipi_send_msg_one(cpu, IPI_RESCHEDULE); smp_send_reschedule()
263 void arch_send_call_function_single_ipi(int cpu) arch_send_call_function_single_ipi() argument
265 ipi_send_msg_one(cpu, IPI_CALL_FUNC); arch_send_call_function_single_ipi()
309 pr_debug("IPI [%ld] received on cpu %d\n", do_IPI()
335 int smp_ipi_irq_setup(int cpu, int irq) smp_ipi_irq_setup() argument
337 int *dev = per_cpu_ptr(&ipi_dev, cpu); smp_ipi_irq_setup()
339 arc_request_percpu_irq(irq, cpu, do_IPI, "IPI Interrupt", dev); smp_ipi_irq_setup()
/linux-4.1.27/arch/x86/xen/
H A Dsmp.c25 #include <asm/cpu.h>
71 int cpu; cpu_bringup() local
82 cpu = smp_processor_id(); cpu_bringup()
83 smp_store_cpu_info(cpu); cpu_bringup()
84 cpu_data(cpu).x86_max_cores = 1; cpu_bringup()
85 set_cpu_sibling_map(cpu); cpu_bringup()
89 notify_cpu_starting(cpu); cpu_bringup()
91 set_cpu_online(cpu, true); cpu_bringup()
93 cpu_set_state_online(cpu); /* Implies full memory barrier. */ cpu_bringup()
100 * Note: cpu parameter is only relevant for PVH. The reason for passing it
102 * which we need the cpu number! So we pass it in rdi as first parameter.
104 asmlinkage __visible void cpu_bringup_and_idle(int cpu) cpu_bringup_and_idle() argument
109 xen_pvh_secondary_vcpu_init(cpu); cpu_bringup_and_idle()
115 static void xen_smp_intr_free(unsigned int cpu) xen_smp_intr_free() argument
117 if (per_cpu(xen_resched_irq, cpu).irq >= 0) { xen_smp_intr_free()
118 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL); xen_smp_intr_free()
119 per_cpu(xen_resched_irq, cpu).irq = -1; xen_smp_intr_free()
120 kfree(per_cpu(xen_resched_irq, cpu).name); xen_smp_intr_free()
121 per_cpu(xen_resched_irq, cpu).name = NULL; xen_smp_intr_free()
123 if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) { xen_smp_intr_free()
124 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL); xen_smp_intr_free()
125 per_cpu(xen_callfunc_irq, cpu).irq = -1; xen_smp_intr_free()
126 kfree(per_cpu(xen_callfunc_irq, cpu).name); xen_smp_intr_free()
127 per_cpu(xen_callfunc_irq, cpu).name = NULL; xen_smp_intr_free()
129 if (per_cpu(xen_debug_irq, cpu).irq >= 0) { xen_smp_intr_free()
130 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL); xen_smp_intr_free()
131 per_cpu(xen_debug_irq, cpu).irq = -1; xen_smp_intr_free()
132 kfree(per_cpu(xen_debug_irq, cpu).name); xen_smp_intr_free()
133 per_cpu(xen_debug_irq, cpu).name = NULL; xen_smp_intr_free()
135 if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) { xen_smp_intr_free()
136 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq, xen_smp_intr_free()
138 per_cpu(xen_callfuncsingle_irq, cpu).irq = -1; xen_smp_intr_free()
139 kfree(per_cpu(xen_callfuncsingle_irq, cpu).name); xen_smp_intr_free()
140 per_cpu(xen_callfuncsingle_irq, cpu).name = NULL; xen_smp_intr_free()
145 if (per_cpu(xen_irq_work, cpu).irq >= 0) { xen_smp_intr_free()
146 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL); xen_smp_intr_free()
147 per_cpu(xen_irq_work, cpu).irq = -1; xen_smp_intr_free()
148 kfree(per_cpu(xen_irq_work, cpu).name); xen_smp_intr_free()
149 per_cpu(xen_irq_work, cpu).name = NULL; xen_smp_intr_free()
152 static int xen_smp_intr_init(unsigned int cpu) xen_smp_intr_init() argument
157 resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu); xen_smp_intr_init()
159 cpu, xen_smp_intr_init()
166 per_cpu(xen_resched_irq, cpu).irq = rc; xen_smp_intr_init()
167 per_cpu(xen_resched_irq, cpu).name = resched_name; xen_smp_intr_init()
169 callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); xen_smp_intr_init()
171 cpu, xen_smp_intr_init()
178 per_cpu(xen_callfunc_irq, cpu).irq = rc; xen_smp_intr_init()
179 per_cpu(xen_callfunc_irq, cpu).name = callfunc_name; xen_smp_intr_init()
181 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); xen_smp_intr_init()
182 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt, xen_smp_intr_init()
187 per_cpu(xen_debug_irq, cpu).irq = rc; xen_smp_intr_init()
188 per_cpu(xen_debug_irq, cpu).name = debug_name; xen_smp_intr_init()
190 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); xen_smp_intr_init()
192 cpu, xen_smp_intr_init()
199 per_cpu(xen_callfuncsingle_irq, cpu).irq = rc; xen_smp_intr_init()
200 per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name; xen_smp_intr_init()
209 callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu); xen_smp_intr_init()
211 cpu, xen_smp_intr_init()
218 per_cpu(xen_irq_work, cpu).irq = rc; xen_smp_intr_init()
219 per_cpu(xen_irq_work, cpu).name = callfunc_name; xen_smp_intr_init()
224 xen_smp_intr_free(cpu); xen_smp_intr_init()
287 /* We've switched to the "real" per-cpu gdt, so make xen_smp_prepare_boot_cpu()
314 unsigned cpu; xen_smp_prepare_cpus() local
348 for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
350 set_cpu_possible(cpu, false);
353 for_each_possible_cpu(cpu)
354 set_cpu_present(cpu, true);
358 cpu_initialize_context(unsigned int cpu, struct task_struct *idle) cpu_initialize_context() argument
365 cpumask_set_cpu(cpu, cpu_callout_mask); cpu_initialize_context()
366 if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map)) cpu_initialize_context()
373 gdt = get_cpu_gdt_table(cpu); cpu_initialize_context()
410 ctxt->gs_base_kernel = per_cpu_offset(cpu); cpu_initialize_context()
417 per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir); cpu_initialize_context()
427 ctxt->user_regs.rdi = cpu; cpu_initialize_context()
433 if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt)) cpu_initialize_context()
440 static int xen_cpu_up(unsigned int cpu, struct task_struct *idle) xen_cpu_up() argument
444 common_cpu_up(cpu, idle); xen_cpu_up()
446 xen_setup_runstate_info(cpu); xen_cpu_up()
447 xen_setup_timer(cpu); xen_cpu_up()
448 xen_init_lock_cpu(cpu); xen_cpu_up()
454 rc = cpu_check_up_prepare(cpu); xen_cpu_up()
459 per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1; xen_cpu_up()
461 rc = cpu_initialize_context(cpu, idle); xen_cpu_up()
465 rc = xen_smp_intr_init(cpu); xen_cpu_up()
469 rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL); xen_cpu_up()
472 while (cpu_report_state(cpu) != CPU_ONLINE) xen_cpu_up()
485 unsigned int cpu = smp_processor_id(); xen_cpu_disable() local
486 if (cpu == 0) xen_cpu_disable()
495 static void xen_cpu_die(unsigned int cpu) xen_cpu_die() argument
497 while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) { xen_cpu_die()
502 if (common_cpu_die(cpu) == 0) { xen_cpu_die()
503 xen_smp_intr_free(cpu); xen_cpu_die()
504 xen_uninit_lock_cpu(cpu); xen_cpu_die()
505 xen_teardown_timer(cpu); xen_cpu_die()
515 * commit 4b0c0f294 (tick: Cleanup NOHZ per cpu data on cpu down) xen_play_dead()
529 static void xen_cpu_die(unsigned int cpu) xen_cpu_die() argument
542 int cpu = smp_processor_id(); stop_self() local
548 set_cpu_online(cpu, false); stop_self()
550 HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL); stop_self()
559 static void xen_smp_send_reschedule(int cpu) xen_smp_send_reschedule() argument
561 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); xen_smp_send_reschedule()
567 unsigned cpu; __xen_send_IPI_mask() local
569 for_each_cpu_and(cpu, mask, cpu_online_mask) __xen_send_IPI_mask()
570 xen_send_IPI_one(cpu, vector); __xen_send_IPI_mask()
575 int cpu; xen_smp_send_call_function_ipi() local
580 for_each_cpu(cpu, mask) { for_each_cpu()
581 if (xen_vcpu_stolen(cpu)) { for_each_cpu()
588 static void xen_smp_send_call_function_single_ipi(int cpu) xen_smp_send_call_function_single_ipi() argument
590 __xen_send_IPI_mask(cpumask_of(cpu), xen_smp_send_call_function_single_ipi()
654 unsigned cpu; xen_send_IPI_mask_allbutself() local
661 for_each_cpu_and(cpu, mask, cpu_online_mask) { for_each_cpu_and()
662 if (this_cpu == cpu) for_each_cpu_and()
665 xen_send_IPI_one(cpu, xen_vector); for_each_cpu_and()
735 static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) xen_hvm_cpu_up() argument
743 if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) { xen_hvm_cpu_up()
744 xen_smp_intr_free(cpu); xen_hvm_cpu_up()
745 xen_uninit_lock_cpu(cpu); xen_hvm_cpu_up()
753 rc = xen_smp_intr_init(cpu); xen_hvm_cpu_up()
756 rc = native_cpu_up(cpu, tidle); xen_hvm_cpu_up()
765 xen_init_lock_cpu(cpu); xen_hvm_cpu_up()
H A Dsmp.h12 extern void xen_pvh_early_cpu_init(int cpu, bool entry);
14 static inline void xen_pvh_early_cpu_init(int cpu, bool entry) xen_pvh_early_cpu_init() argument
/linux-4.1.27/drivers/cpufreq/
H A Dppc_cbe_cpufreq.h15 int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode);
16 int cbe_cpufreq_get_pmode(int cpu);
18 int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode);
H A Dppc_cbe_cpufreq.c50 static int set_pmode(unsigned int cpu, unsigned int slow_mode) set_pmode() argument
55 rc = cbe_cpufreq_set_pmode_pmi(cpu, slow_mode); set_pmode()
57 rc = cbe_cpufreq_set_pmode(cpu, slow_mode); set_pmode()
59 pr_debug("register contains slow mode %d\n", cbe_cpufreq_get_pmode(cpu)); set_pmode()
74 struct device_node *cpu; cbe_cpufreq_cpu_init() local
76 cpu = of_get_cpu_node(policy->cpu, NULL); cbe_cpufreq_cpu_init()
78 if (!cpu) cbe_cpufreq_cpu_init()
81 pr_debug("init cpufreq on CPU %d\n", policy->cpu); cbe_cpufreq_cpu_init()
86 if (!cbe_get_cpu_pmd_regs(policy->cpu) || cbe_cpufreq_cpu_init()
87 !cbe_get_cpu_mic_tm_regs(policy->cpu)) { cbe_cpufreq_cpu_init()
92 max_freqp = of_get_property(cpu, "clock-frequency", NULL); cbe_cpufreq_cpu_init()
94 of_node_put(cpu); cbe_cpufreq_cpu_init()
115 cur_pmode = cbe_cpufreq_get_pmode(policy->cpu);
121 cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
132 pr_debug("setting frequency for cpu %d to %d kHz, " \ cbe_cpufreq_target()
134 policy->cpu, cbe_cpufreq_target()
138 return set_pmode(policy->cpu, cbe_pmode_new); cbe_cpufreq_target()
H A Dsh-cpufreq.c24 #include <linux/cpu.h>
33 static unsigned int sh_cpufreq_get(unsigned int cpu) sh_cpufreq_get() argument
35 return (clk_get_rate(&per_cpu(sh_cpuclk, cpu)) + 500) / 1000; sh_cpufreq_get()
45 unsigned int cpu = policy->cpu; sh_cpufreq_target() local
46 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); sh_cpufreq_target()
53 set_cpus_allowed_ptr(current, cpumask_of(cpu)); sh_cpufreq_target()
55 BUG_ON(smp_processor_id() != cpu); sh_cpufreq_target()
57 dev = get_cpu_device(cpu); sh_cpufreq_target()
67 freqs.old = sh_cpufreq_get(cpu); sh_cpufreq_target()
83 struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu); sh_cpufreq_verify()
101 unsigned int cpu = policy->cpu; sh_cpufreq_cpu_init() local
102 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); sh_cpufreq_cpu_init()
106 dev = get_cpu_device(cpu); sh_cpufreq_cpu_init()
143 unsigned int cpu = policy->cpu; sh_cpufreq_cpu_exit() local
144 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); sh_cpufreq_cpu_exit()
H A Dcpufreq_userspace.c35 pr_debug("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); cpufreq_set()
38 if (!per_cpu(cpu_is_managed, policy->cpu)) cpufreq_set()
55 unsigned int cpu = policy->cpu; cpufreq_governor_userspace() local
61 pr_debug("started managing cpu %u\n", cpu); cpufreq_governor_userspace()
64 per_cpu(cpu_is_managed, cpu) = 1; cpufreq_governor_userspace()
68 pr_debug("managing cpu %u stopped\n", cpu); cpufreq_governor_userspace()
71 per_cpu(cpu_is_managed, cpu) = 0; cpufreq_governor_userspace()
76 pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz\n", cpufreq_governor_userspace()
77 cpu, policy->min, policy->max, cpufreq_governor_userspace()
H A Dintel_pstate.c22 #include <linux/cpu.h>
101 int cpu; member in struct:cpudata
229 static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu) intel_pstate_busy_pid_reset() argument
231 pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct); intel_pstate_busy_pid_reset()
232 pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct); intel_pstate_busy_pid_reset()
233 pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct); intel_pstate_busy_pid_reset()
235 pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0); intel_pstate_busy_pid_reset()
240 unsigned int cpu; intel_pstate_reset_all_pid() local
242 for_each_online_cpu(cpu) { for_each_online_cpu()
243 if (all_cpu_data[cpu]) for_each_online_cpu()
244 intel_pstate_busy_pid_reset(all_cpu_data[cpu]); for_each_online_cpu()
251 struct cpudata *cpu; update_turbo_state() local
253 cpu = all_cpu_data[0]; update_turbo_state()
257 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); update_turbo_state()
263 int min, max, cpu; intel_pstate_hwp_set() local
268 for_each_online_cpu(cpu) { for_each_online_cpu()
269 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); for_each_online_cpu()
282 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); for_each_online_cpu()
349 struct cpudata *cpu; show_turbo_pct() local
353 cpu = all_cpu_data[0]; show_turbo_pct()
355 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; show_turbo_pct()
356 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1; show_turbo_pct()
365 struct cpudata *cpu; show_num_pstates() local
368 cpu = all_cpu_data[0]; show_num_pstates()
369 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; show_num_pstates()
538 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); byt_set_pstate()
615 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); core_set_pstate()
686 static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) intel_pstate_get_min_max() argument
688 int max_perf = cpu->pstate.turbo_pstate; intel_pstate_get_min_max()
693 max_perf = cpu->pstate.max_pstate; intel_pstate_get_min_max()
697 * policy, or by cpu specific default values determined through intel_pstate_get_min_max()
702 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); intel_pstate_get_min_max()
705 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); intel_pstate_get_min_max()
708 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) intel_pstate_set_pstate() argument
714 intel_pstate_get_min_max(cpu, &min_perf, &max_perf); intel_pstate_set_pstate()
718 if (pstate == cpu->pstate.current_pstate) intel_pstate_set_pstate()
721 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); intel_pstate_set_pstate()
723 cpu->pstate.current_pstate = pstate; intel_pstate_set_pstate()
725 pstate_funcs.set(cpu, pstate); intel_pstate_set_pstate()
728 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) intel_pstate_get_cpu_pstates() argument
730 cpu->pstate.min_pstate = pstate_funcs.get_min(); intel_pstate_get_cpu_pstates()
731 cpu->pstate.max_pstate = pstate_funcs.get_max(); intel_pstate_get_cpu_pstates()
732 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); intel_pstate_get_cpu_pstates()
733 cpu->pstate.scaling = pstate_funcs.get_scaling(); intel_pstate_get_cpu_pstates()
736 pstate_funcs.get_vid(cpu); intel_pstate_get_cpu_pstates()
737 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); intel_pstate_get_cpu_pstates()
740 static inline void intel_pstate_calc_busy(struct cpudata *cpu) intel_pstate_calc_busy() argument
742 struct sample *sample = &cpu->sample; intel_pstate_calc_busy()
750 cpu->pstate.max_pstate * cpu->pstate.scaling / 100), intel_pstate_calc_busy()
756 static inline void intel_pstate_sample(struct cpudata *cpu) intel_pstate_sample() argument
764 if (cpu->prev_mperf == mperf) { intel_pstate_sample()
771 cpu->last_sample_time = cpu->sample.time; intel_pstate_sample()
772 cpu->sample.time = ktime_get(); intel_pstate_sample()
773 cpu->sample.aperf = aperf; intel_pstate_sample()
774 cpu->sample.mperf = mperf; intel_pstate_sample()
775 cpu->sample.aperf -= cpu->prev_aperf; intel_pstate_sample()
776 cpu->sample.mperf -= cpu->prev_mperf; intel_pstate_sample()
778 intel_pstate_calc_busy(cpu); intel_pstate_sample()
780 cpu->prev_aperf = aperf; intel_pstate_sample()
781 cpu->prev_mperf = mperf; intel_pstate_sample()
784 static inline void intel_hwp_set_sample_time(struct cpudata *cpu) intel_hwp_set_sample_time() argument
789 mod_timer_pinned(&cpu->timer, jiffies + delay); intel_hwp_set_sample_time()
792 static inline void intel_pstate_set_sample_time(struct cpudata *cpu) intel_pstate_set_sample_time() argument
797 mod_timer_pinned(&cpu->timer, jiffies + delay); intel_pstate_set_sample_time()
800 static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu) intel_pstate_get_scaled_busy() argument
817 core_busy = cpu->sample.core_pct_busy; intel_pstate_get_scaled_busy()
818 max_pstate = int_tofp(cpu->pstate.max_pstate); intel_pstate_get_scaled_busy()
819 current_pstate = int_tofp(cpu->pstate.current_pstate); intel_pstate_get_scaled_busy()
830 duration_us = ktime_us_delta(cpu->sample.time, intel_pstate_get_scaled_busy()
831 cpu->last_sample_time); intel_pstate_get_scaled_busy()
841 static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) intel_pstate_adjust_busy_pstate() argument
847 pid = &cpu->pid; intel_pstate_adjust_busy_pstate()
848 busy_scaled = intel_pstate_get_scaled_busy(cpu); intel_pstate_adjust_busy_pstate()
853 intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl); intel_pstate_adjust_busy_pstate()
858 struct cpudata *cpu = (struct cpudata *) __data; intel_hwp_timer_func() local
860 intel_pstate_sample(cpu); intel_hwp_timer_func()
861 intel_hwp_set_sample_time(cpu); intel_hwp_timer_func()
866 struct cpudata *cpu = (struct cpudata *) __data; intel_pstate_timer_func() local
869 intel_pstate_sample(cpu); intel_pstate_timer_func()
871 sample = &cpu->sample; intel_pstate_timer_func()
873 intel_pstate_adjust_busy_pstate(cpu); intel_pstate_timer_func()
876 fp_toint(intel_pstate_get_scaled_busy(cpu)), intel_pstate_timer_func()
877 cpu->pstate.current_pstate, intel_pstate_timer_func()
882 intel_pstate_set_sample_time(cpu); intel_pstate_timer_func()
917 struct cpudata *cpu; intel_pstate_init_cpu() local
925 cpu = all_cpu_data[cpunum]; intel_pstate_init_cpu()
927 cpu->cpu = cpunum; intel_pstate_init_cpu()
928 intel_pstate_get_cpu_pstates(cpu); intel_pstate_init_cpu()
930 init_timer_deferrable(&cpu->timer); intel_pstate_init_cpu()
931 cpu->timer.data = (unsigned long)cpu; intel_pstate_init_cpu()
932 cpu->timer.expires = jiffies + HZ/100; intel_pstate_init_cpu()
935 cpu->timer.function = intel_pstate_timer_func; intel_pstate_init_cpu()
937 cpu->timer.function = intel_hwp_timer_func; intel_pstate_init_cpu()
939 intel_pstate_busy_pid_reset(cpu); intel_pstate_init_cpu()
940 intel_pstate_sample(cpu); intel_pstate_init_cpu()
942 add_timer_on(&cpu->timer, cpunum); intel_pstate_init_cpu()
944 pr_debug("Intel pstate controlling: cpu %d\n", cpunum); intel_pstate_init_cpu()
952 struct cpudata *cpu; intel_pstate_get() local
954 cpu = all_cpu_data[cpu_num]; intel_pstate_get()
955 if (!cpu) intel_pstate_get()
957 sample = &cpu->sample; intel_pstate_get()
1007 int cpu_num = policy->cpu; intel_pstate_stop_cpu()
1008 struct cpudata *cpu = all_cpu_data[cpu_num]; intel_pstate_stop_cpu() local
1016 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); intel_pstate_stop_cpu()
1021 struct cpudata *cpu; intel_pstate_cpu_init() local
1024 rc = intel_pstate_init_cpu(policy->cpu); intel_pstate_cpu_init()
1028 cpu = all_cpu_data[policy->cpu]; intel_pstate_cpu_init()
1035 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling; intel_pstate_cpu_init()
1036 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling; intel_pstate_cpu_init()
1039 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; intel_pstate_cpu_init()
1042 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; intel_pstate_cpu_init()
1043 policy->cpuinfo.max_freq *= cpu->pstate.scaling; intel_pstate_cpu_init()
1046 cpumask_set_cpu(policy->cpu, policy->cpus); intel_pstate_cpu_init()
1211 int cpu, rc = 0; intel_pstate_init() local
1259 for_each_online_cpu(cpu) { for_each_online_cpu()
1260 if (all_cpu_data[cpu]) { for_each_online_cpu()
1261 del_timer_sync(&all_cpu_data[cpu]->timer); for_each_online_cpu()
1262 kfree(all_cpu_data[cpu]); for_each_online_cpu()
/linux-4.1.27/arch/tile/include/asm/
H A Dtopology.h22 /* Mappings between logical cpu number and node number. */
26 /* Returns the number of the node containing CPU 'cpu'. */ cpu_to_node()
27 static inline int cpu_to_node(int cpu) cpu_to_node() argument
29 return cpu_2_node[cpu]; cpu_to_node()
55 #define topology_physical_package_id(cpu) ((void)(cpu), 0)
56 #define topology_core_id(cpu) (cpu)
57 #define topology_core_cpumask(cpu) ((void)(cpu), cpu_online_mask)
58 #define topology_thread_cpumask(cpu) cpumask_of(cpu)
/linux-4.1.27/arch/ia64/include/asm/
H A Dcpu.h5 #include <linux/cpu.h>
10 struct cpu cpu; member in struct:ia64_cpu
H A Dtopology.h53 #define topology_physical_package_id(cpu) (cpu_data(cpu)->socket_id)
54 #define topology_core_id(cpu) (cpu_data(cpu)->core_id)
55 #define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
56 #define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
/linux-4.1.27/arch/powerpc/kernel/
H A Dsmp.c31 #include <linux/cpu.h>
88 * Returns 1 if the specified cpu should be brought up during boot.
214 void smp_muxed_ipi_set_data(int cpu, unsigned long data) smp_muxed_ipi_set_data() argument
216 struct cpu_messages *info = &per_cpu(ipi_message, cpu); smp_muxed_ipi_set_data()
221 void smp_muxed_ipi_message_pass(int cpu, int msg) smp_muxed_ipi_message_pass() argument
223 struct cpu_messages *info = &per_cpu(ipi_message, cpu); smp_muxed_ipi_message_pass()
235 smp_ops->cause_ipi(cpu, info->data); smp_muxed_ipi_message_pass()
267 static inline void do_message_pass(int cpu, int msg) do_message_pass() argument
270 smp_ops->message_pass(cpu, msg); do_message_pass()
273 smp_muxed_ipi_message_pass(cpu, msg); do_message_pass()
277 void smp_send_reschedule(int cpu) smp_send_reschedule() argument
280 do_message_pass(cpu, PPC_MSG_RESCHEDULE); smp_send_reschedule()
284 void arch_send_call_function_single_ipi(int cpu) arch_send_call_function_single_ipi() argument
286 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); arch_send_call_function_single_ipi()
291 unsigned int cpu; arch_send_call_function_ipi_mask() local
293 for_each_cpu(cpu, mask) arch_send_call_function_ipi_mask()
294 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); arch_send_call_function_ipi_mask()
300 unsigned int cpu; tick_broadcast() local
302 for_each_cpu(cpu, mask) tick_broadcast()
303 do_message_pass(cpu, PPC_MSG_TICK_BROADCAST); tick_broadcast()
310 int cpu; smp_send_debugger_break() local
316 for_each_online_cpu(cpu) smp_send_debugger_break()
317 if (cpu != me) smp_send_debugger_break()
318 do_message_pass(cpu, PPC_MSG_DEBUGGER_BREAK); smp_send_debugger_break()
361 unsigned int cpu; smp_prepare_cpus() local
366 * setup_cpu may need to be called on the boot cpu. We havent smp_prepare_cpus()
371 /* Fixup boot cpu */ smp_prepare_cpus()
375 for_each_possible_cpu(cpu) { for_each_possible_cpu()
376 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu), for_each_possible_cpu()
377 GFP_KERNEL, cpu_to_node(cpu)); for_each_possible_cpu()
378 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), for_each_possible_cpu()
379 GFP_KERNEL, cpu_to_node(cpu)); for_each_possible_cpu()
383 if (cpu_present(cpu)) { for_each_possible_cpu()
384 set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]); for_each_possible_cpu()
385 set_cpu_numa_mem(cpu, for_each_possible_cpu()
386 local_memory_node(numa_cpu_lookup_table[cpu])); for_each_possible_cpu()
411 unsigned int cpu = smp_processor_id(); generic_cpu_disable() local
413 if (cpu == boot_cpuid) generic_cpu_disable()
416 set_cpu_online(cpu, false); generic_cpu_disable()
424 void generic_cpu_die(unsigned int cpu) generic_cpu_die() argument
430 if (per_cpu(cpu_state, cpu) == CPU_DEAD) generic_cpu_die()
434 printk(KERN_ERR "CPU%d didn't die...\n", cpu); generic_cpu_die()
437 void generic_set_cpu_dead(unsigned int cpu) generic_set_cpu_dead() argument
439 per_cpu(cpu_state, cpu) = CPU_DEAD; generic_set_cpu_dead()
447 void generic_set_cpu_up(unsigned int cpu) generic_set_cpu_up() argument
449 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; generic_set_cpu_up()
452 int generic_check_cpu_restart(unsigned int cpu) generic_check_cpu_restart() argument
454 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE; generic_check_cpu_restart()
468 static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle) cpu_idle_thread_init() argument
473 paca[cpu].__current = idle; cpu_idle_thread_init()
474 paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD; cpu_idle_thread_init()
476 ti->cpu = cpu; cpu_idle_thread_init()
477 secondary_ti = current_set[cpu] = ti; cpu_idle_thread_init()
480 int __cpu_up(unsigned int cpu, struct task_struct *tidle) __cpu_up() argument
488 cpu_thread_in_subcore(cpu)) __cpu_up()
492 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) __cpu_up()
495 cpu_idle_thread_init(cpu, tidle); __cpu_up()
500 cpu_callin_map[cpu] = 0; __cpu_up()
509 DBG("smp: kicking cpu %d\n", cpu); __cpu_up()
510 rc = smp_ops->kick_cpu(cpu); __cpu_up()
512 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc); __cpu_up()
517 * wait to see if the cpu made a callin (is actually up). __cpu_up()
522 for (c = 50000; c && !cpu_callin_map[cpu]; c--) __cpu_up()
530 for (c = 5000; c && !cpu_callin_map[cpu]; c--) __cpu_up()
534 if (!cpu_callin_map[cpu]) { __cpu_up()
535 printk(KERN_ERR "Processor %u is stuck.\n", cpu); __cpu_up()
539 DBG("Processor %u found.\n", cpu); __cpu_up()
544 /* Wait until cpu puts itself in the online & active maps */ __cpu_up()
545 while (!cpu_online(cpu) || !cpu_active(cpu)) __cpu_up()
552 * logical cpu.
554 int cpu_to_core_id(int cpu) cpu_to_core_id() argument
560 np = of_get_cpu_node(cpu, NULL); cpu_to_core_id()
574 /* Helper routines for cpu to core mapping */ cpu_core_index_of_thread()
575 int cpu_core_index_of_thread(int cpu) cpu_core_index_of_thread() argument
577 return cpu >> threads_shift; cpu_core_index_of_thread()
587 static void traverse_siblings_chip_id(int cpu, bool add, int chipid) traverse_siblings_chip_id() argument
603 cpumask_set_cpu(cpu, cpu_core_mask(i)); for_each_cpu()
604 cpumask_set_cpu(i, cpu_core_mask(cpu)); for_each_cpu()
606 cpumask_clear_cpu(cpu, cpu_core_mask(i)); for_each_cpu()
607 cpumask_clear_cpu(i, cpu_core_mask(cpu)); for_each_cpu()
615 * i.e. during cpu online or offline.
617 static struct device_node *cpu_to_l2cache(int cpu) cpu_to_l2cache() argument
622 if (!cpu_present(cpu)) cpu_to_l2cache()
625 np = of_get_cpu_node(cpu, NULL); cpu_to_l2cache()
636 static void traverse_core_siblings(int cpu, bool add) traverse_core_siblings() argument
643 /* First see if we have ibm,chip-id properties in cpu nodes */ traverse_core_siblings()
644 np = of_get_cpu_node(cpu, NULL); traverse_core_siblings()
652 traverse_siblings_chip_id(cpu, add, chip); traverse_core_siblings()
657 l2_cache = cpu_to_l2cache(cpu); traverse_core_siblings()
665 cpumask_set_cpu(cpu, cpu_core_mask(i)); for_each_cpu()
666 cpumask_set_cpu(i, cpu_core_mask(cpu)); for_each_cpu()
668 cpumask_clear_cpu(cpu, cpu_core_mask(i)); for_each_cpu()
669 cpumask_clear_cpu(i, cpu_core_mask(cpu)); for_each_cpu()
680 unsigned int cpu = smp_processor_id(); start_secondary() local
686 smp_store_cpu_info(cpu); start_secondary()
689 cpu_callin_map[cpu] = 1; start_secondary()
692 smp_ops->setup_cpu(cpu); start_secondary()
705 base = cpu_first_thread_sibling(cpu); start_secondary()
707 if (cpu_is_offline(base + i) && (cpu != base + i)) start_secondary()
709 cpumask_set_cpu(cpu, cpu_sibling_mask(base + i)); start_secondary()
710 cpumask_set_cpu(base + i, cpu_sibling_mask(cpu)); start_secondary()
716 cpumask_set_cpu(cpu, cpu_core_mask(base + i)); start_secondary()
717 cpumask_set_cpu(base + i, cpu_core_mask(cpu)); start_secondary()
719 traverse_core_siblings(cpu, true); start_secondary()
721 set_numa_node(numa_cpu_lookup_table[cpu]); start_secondary()
722 set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu])); start_secondary()
725 notify_cpu_starting(cpu); start_secondary()
726 set_cpu_online(cpu, true); start_secondary()
793 int cpu = smp_processor_id(); __cpu_disable() local
805 base = cpu_first_thread_sibling(cpu); __cpu_disable()
807 cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i)); __cpu_disable()
808 cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu)); __cpu_disable()
809 cpumask_clear_cpu(cpu, cpu_core_mask(base + i)); __cpu_disable()
810 cpumask_clear_cpu(base + i, cpu_core_mask(cpu)); __cpu_disable()
812 traverse_core_siblings(cpu, false); __cpu_disable()
817 void __cpu_die(unsigned int cpu) __cpu_die() argument
820 smp_ops->cpu_die(cpu); __cpu_die()
H A Dtau_6xx.c2 * temp.c Thermal management for cpu's with Thermal Assist Units
10 * Silly, crazy ideas: use cpu load (from scheduler) and ICTC to extend battery
52 void set_thresholds(unsigned long cpu) set_thresholds() argument
59 mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TIE | THRM1_TID); set_thresholds()
64 mtspr (SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | THRM1_TIE); set_thresholds()
67 mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TID); set_thresholds()
68 mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V); set_thresholds()
72 void TAUupdate(int cpu) TAUupdate() argument
84 if (tau[cpu].low >= step_size){ TAUupdate()
85 tau[cpu].low -= step_size; TAUupdate()
86 tau[cpu].high -= (step_size - window_expand); TAUupdate()
88 tau[cpu].grew = 1; TAUupdate()
96 if (tau[cpu].high <= 127-step_size){ TAUupdate()
97 tau[cpu].low += (step_size - window_expand); TAUupdate()
98 tau[cpu].high += step_size; TAUupdate()
100 tau[cpu].grew = 1; TAUupdate()
108 printk("grew = %d\n", tau[cpu].grew); TAUupdate()
112 set_thresholds(cpu); TAUupdate()
125 int cpu = smp_processor_id(); TAUException() local
128 tau[cpu].interrupts++; TAUException()
130 TAUupdate(cpu); TAUException()
138 int cpu; tau_timeout() local
145 cpu = smp_processor_id(); tau_timeout()
148 TAUupdate(cpu); tau_timeout()
151 size = tau[cpu].high - tau[cpu].low; tau_timeout()
152 if (size > min_window && ! tau[cpu].grew) { tau_timeout()
156 tau[cpu].low += shrink; tau_timeout()
157 tau[cpu].high -= shrink; tau_timeout()
159 tau[cpu].low += 1; tau_timeout()
161 if ((tau[cpu].high - tau[cpu].low) != min_window){ tau_timeout()
168 tau[cpu].grew = 0; tau_timeout()
170 set_thresholds(cpu); tau_timeout()
209 unsigned long cpu = smp_processor_id(); TAU_init_smp() local
213 tau[cpu].low = 5; TAU_init_smp()
214 tau[cpu].high = 120; TAU_init_smp()
216 set_thresholds(cpu); TAU_init_smp()
257 u32 cpu_temp_both(unsigned long cpu) cpu_temp_both() argument
259 return ((tau[cpu].high << 16) | tau[cpu].low); cpu_temp_both()
262 int cpu_temp(unsigned long cpu) cpu_temp() argument
264 return ((tau[cpu].high + tau[cpu].low) / 2); cpu_temp()
267 int tau_interrupts(unsigned long cpu) tau_interrupts() argument
269 return (tau[cpu].interrupts); tau_interrupts()
/linux-4.1.27/arch/blackfin/mm/
H A Dsram-alloc.c68 unsigned int cpu; l1sram_init() local
77 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) { l1sram_init()
78 per_cpu(free_l1_ssram_head, cpu).next = l1sram_init()
80 if (!per_cpu(free_l1_ssram_head, cpu).next) { l1sram_init()
85 per_cpu(free_l1_ssram_head, cpu).next->paddr = (void *)get_l1_scratch_start_cpu(cpu) + reserve; l1sram_init()
86 per_cpu(free_l1_ssram_head, cpu).next->size = L1_SCRATCH_LENGTH - reserve; l1sram_init()
87 per_cpu(free_l1_ssram_head, cpu).next->pid = 0; l1sram_init()
88 per_cpu(free_l1_ssram_head, cpu).next->next = NULL; l1sram_init()
90 per_cpu(used_l1_ssram_head, cpu).next = NULL; l1sram_init()
93 spin_lock_init(&per_cpu(l1sram_lock, cpu)); l1sram_init()
102 unsigned int cpu; l1_data_sram_init() local
105 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) { l1_data_sram_init()
106 per_cpu(free_l1_data_A_sram_head, cpu).next = l1_data_sram_init()
108 if (!per_cpu(free_l1_data_A_sram_head, cpu).next) { l1_data_sram_init()
113 per_cpu(free_l1_data_A_sram_head, cpu).next->paddr = l1_data_sram_init()
114 (void *)get_l1_data_a_start_cpu(cpu) + (_ebss_l1 - _sdata_l1); l1_data_sram_init()
115 per_cpu(free_l1_data_A_sram_head, cpu).next->size = l1_data_sram_init()
117 per_cpu(free_l1_data_A_sram_head, cpu).next->pid = 0; l1_data_sram_init()
118 per_cpu(free_l1_data_A_sram_head, cpu).next->next = NULL; l1_data_sram_init()
120 per_cpu(used_l1_data_A_sram_head, cpu).next = NULL; l1_data_sram_init()
124 per_cpu(free_l1_data_A_sram_head, cpu).next->size >> 10); l1_data_sram_init()
128 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) { l1_data_sram_init()
129 per_cpu(free_l1_data_B_sram_head, cpu).next = l1_data_sram_init()
131 if (!per_cpu(free_l1_data_B_sram_head, cpu).next) { l1_data_sram_init()
136 per_cpu(free_l1_data_B_sram_head, cpu).next->paddr = l1_data_sram_init()
137 (void *)get_l1_data_b_start_cpu(cpu) + (_ebss_b_l1 - _sdata_b_l1); l1_data_sram_init()
138 per_cpu(free_l1_data_B_sram_head, cpu).next->size = l1_data_sram_init()
140 per_cpu(free_l1_data_B_sram_head, cpu).next->pid = 0; l1_data_sram_init()
141 per_cpu(free_l1_data_B_sram_head, cpu).next->next = NULL; l1_data_sram_init()
143 per_cpu(used_l1_data_B_sram_head, cpu).next = NULL; l1_data_sram_init()
147 per_cpu(free_l1_data_B_sram_head, cpu).next->size >> 10); l1_data_sram_init()
153 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) l1_data_sram_init()
154 spin_lock_init(&per_cpu(l1_data_sram_lock, cpu)); l1_data_sram_init()
161 unsigned int cpu; l1_inst_sram_init() local
162 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) { l1_inst_sram_init()
163 per_cpu(free_l1_inst_sram_head, cpu).next = l1_inst_sram_init()
165 if (!per_cpu(free_l1_inst_sram_head, cpu).next) { l1_inst_sram_init()
170 per_cpu(free_l1_inst_sram_head, cpu).next->paddr = l1_inst_sram_init()
171 (void *)get_l1_code_start_cpu(cpu) + (_etext_l1 - _stext_l1); l1_inst_sram_init()
172 per_cpu(free_l1_inst_sram_head, cpu).next->size = l1_inst_sram_init()
174 per_cpu(free_l1_inst_sram_head, cpu).next->pid = 0; l1_inst_sram_init()
175 per_cpu(free_l1_inst_sram_head, cpu).next->next = NULL; l1_inst_sram_init()
177 per_cpu(used_l1_inst_sram_head, cpu).next = NULL; l1_inst_sram_init()
181 per_cpu(free_l1_inst_sram_head, cpu).next->size >> 10); l1_inst_sram_init()
184 spin_lock_init(&per_cpu(l1_inst_sram_lock, cpu)); l1_inst_sram_init()
442 unsigned int cpu; l1_data_A_sram_alloc() local
444 cpu = smp_processor_id(); l1_data_A_sram_alloc()
446 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags); l1_data_A_sram_alloc()
448 addr = _sram_alloc(size, &per_cpu(free_l1_data_A_sram_head, cpu), l1_data_A_sram_alloc()
449 &per_cpu(used_l1_data_A_sram_head, cpu)); l1_data_A_sram_alloc()
452 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags); l1_data_A_sram_alloc()
469 unsigned int cpu; l1_data_A_sram_free() local
471 cpu = smp_processor_id(); l1_data_A_sram_free()
473 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags); l1_data_A_sram_free()
475 ret = _sram_free(addr, &per_cpu(free_l1_data_A_sram_head, cpu), l1_data_A_sram_free()
476 &per_cpu(used_l1_data_A_sram_head, cpu)); l1_data_A_sram_free()
479 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags); l1_data_A_sram_free()
493 unsigned int cpu; l1_data_B_sram_alloc() local
495 cpu = smp_processor_id(); l1_data_B_sram_alloc()
497 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags); l1_data_B_sram_alloc()
499 addr = _sram_alloc(size, &per_cpu(free_l1_data_B_sram_head, cpu), l1_data_B_sram_alloc()
500 &per_cpu(used_l1_data_B_sram_head, cpu)); l1_data_B_sram_alloc()
503 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags); l1_data_B_sram_alloc()
520 unsigned int cpu; l1_data_B_sram_free() local
522 cpu = smp_processor_id(); l1_data_B_sram_free()
524 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags); l1_data_B_sram_free()
526 ret = _sram_free(addr, &per_cpu(free_l1_data_B_sram_head, cpu), l1_data_B_sram_free()
527 &per_cpu(used_l1_data_B_sram_head, cpu)); l1_data_B_sram_free()
530 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags); l1_data_B_sram_free()
576 unsigned int cpu; l1_inst_sram_alloc() local
578 cpu = smp_processor_id(); l1_inst_sram_alloc()
580 spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags); l1_inst_sram_alloc()
582 addr = _sram_alloc(size, &per_cpu(free_l1_inst_sram_head, cpu), l1_inst_sram_alloc()
583 &per_cpu(used_l1_inst_sram_head, cpu)); l1_inst_sram_alloc()
586 spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags); l1_inst_sram_alloc()
603 unsigned int cpu; l1_inst_sram_free() local
605 cpu = smp_processor_id(); l1_inst_sram_free()
607 spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags); l1_inst_sram_free()
609 ret = _sram_free(addr, &per_cpu(free_l1_inst_sram_head, cpu), l1_inst_sram_free()
610 &per_cpu(used_l1_inst_sram_head, cpu)); l1_inst_sram_free()
613 spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags); l1_inst_sram_free()
627 unsigned int cpu; l1sram_alloc() local
629 cpu = smp_processor_id(); l1sram_alloc()
631 spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags); l1sram_alloc()
633 addr = _sram_alloc(size, &per_cpu(free_l1_ssram_head, cpu), l1sram_alloc()
634 &per_cpu(used_l1_ssram_head, cpu)); l1sram_alloc()
637 spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags); l1sram_alloc()
647 unsigned int cpu; l1sram_alloc_max() local
649 cpu = smp_processor_id(); l1sram_alloc_max()
651 spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags); l1sram_alloc_max()
653 addr = _sram_alloc_max(&per_cpu(free_l1_ssram_head, cpu), l1sram_alloc_max()
654 &per_cpu(used_l1_ssram_head, cpu), psize); l1sram_alloc_max()
657 spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags); l1sram_alloc_max()
667 unsigned int cpu; l1sram_free() local
669 cpu = smp_processor_id(); l1sram_free()
671 spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags); l1sram_free()
673 ret = _sram_free(addr, &per_cpu(free_l1_ssram_head, cpu), l1sram_free()
674 &per_cpu(used_l1_ssram_head, cpu)); l1sram_free()
677 spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags); l1sram_free()
840 unsigned int cpu; sram_proc_show() local
842 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) { sram_proc_show()
844 &per_cpu(free_l1_ssram_head, cpu), &per_cpu(used_l1_ssram_head, cpu))) sram_proc_show()
848 &per_cpu(free_l1_data_A_sram_head, cpu), sram_proc_show()
849 &per_cpu(used_l1_data_A_sram_head, cpu))) sram_proc_show()
854 &per_cpu(free_l1_data_B_sram_head, cpu), sram_proc_show()
855 &per_cpu(used_l1_data_B_sram_head, cpu))) sram_proc_show()
860 &per_cpu(free_l1_inst_sram_head, cpu), sram_proc_show()
861 &per_cpu(used_l1_inst_sram_head, cpu))) sram_proc_show()
/linux-4.1.27/tools/power/cpupower/utils/idle_monitor/
H A Dcpuidle_sysfs.c28 unsigned int cpu) cpuidle_get_count_percent()
30 unsigned long long statediff = current_count[cpu][id] cpuidle_get_count_percent()
31 - previous_count[cpu][id]; cpuidle_get_count_percent()
33 cpuidle_cstates[id].name, timediff, *percent, cpu); cpuidle_get_count_percent()
41 cpuidle_cstates[id].name, timediff, statediff, *percent, cpu); cpuidle_get_count_percent()
48 int cpu, state; cpuidle_start() local
50 for (cpu = 0; cpu < cpu_count; cpu++) { cpuidle_start()
53 previous_count[cpu][state] = cpuidle_start()
54 sysfs_get_idlestate_time(cpu, state); cpuidle_start()
56 cpu, state, previous_count[cpu][state]); cpuidle_start()
64 int cpu, state; cpuidle_stop() local
69 for (cpu = 0; cpu < cpu_count; cpu++) { cpuidle_stop()
72 current_count[cpu][state] = cpuidle_stop()
73 sysfs_get_idlestate_time(cpu, state); cpuidle_stop()
75 cpu, state, previous_count[cpu][state]); cpuidle_stop()
27 cpuidle_get_count_percent(unsigned int id, double *percent, unsigned int cpu) cpuidle_get_count_percent() argument
H A Dnhm_idle.c31 unsigned int cpu);
73 unsigned int cpu) nhm_get_count()
96 if (read_msr(cpu, msr, val)) nhm_get_count()
103 unsigned int cpu) nhm_get_count_percent()
107 if (!is_valid[cpu]) nhm_get_count_percent()
111 (current_count[id][cpu] - previous_count[id][cpu])) / nhm_get_count_percent()
115 nhm_cstates[id].name, previous_count[id][cpu], nhm_get_count_percent()
116 current_count[id][cpu], cpu); nhm_get_count_percent()
121 current_count[id][cpu] - previous_count[id][cpu], nhm_get_count_percent()
122 *percent, cpu); nhm_get_count_percent()
129 int num, cpu; nhm_start() local
135 for (cpu = 0; cpu < cpu_count; cpu++) { nhm_start()
136 is_valid[cpu] = !nhm_get_count(num, &val, cpu); nhm_start()
137 previous_count[num][cpu] = val; nhm_start()
149 int num, cpu; nhm_stop() local
154 for (cpu = 0; cpu < cpu_count; cpu++) { nhm_stop()
155 is_valid[cpu] = !nhm_get_count(num, &val, cpu); nhm_stop()
156 current_count[num][cpu] = val; nhm_stop()
72 nhm_get_count(enum intel_nhm_id id, unsigned long long *val, unsigned int cpu) nhm_get_count() argument
102 nhm_get_count_percent(unsigned int id, double *percent, unsigned int cpu) nhm_get_count_percent() argument
H A Dhsw_ext_idle.c30 unsigned int cpu);
65 unsigned int cpu) hsw_ext_get_count()
85 if (read_msr(cpu, msr, val)) hsw_ext_get_count()
91 unsigned int cpu) hsw_ext_get_count_percent()
95 if (!is_valid[cpu]) hsw_ext_get_count_percent()
99 (current_count[id][cpu] - previous_count[id][cpu])) / hsw_ext_get_count_percent()
103 hsw_ext_cstates[id].name, previous_count[id][cpu], hsw_ext_get_count_percent()
104 current_count[id][cpu], cpu); hsw_ext_get_count_percent()
109 current_count[id][cpu] - previous_count[id][cpu], hsw_ext_get_count_percent()
110 *percent, cpu); hsw_ext_get_count_percent()
117 int num, cpu; hsw_ext_start() local
121 for (cpu = 0; cpu < cpu_count; cpu++) { hsw_ext_start()
122 hsw_ext_get_count(num, &val, cpu); hsw_ext_start()
123 previous_count[num][cpu] = val; hsw_ext_start()
133 int num, cpu; hsw_ext_stop() local
138 for (cpu = 0; cpu < cpu_count; cpu++) { hsw_ext_stop()
139 is_valid[cpu] = !hsw_ext_get_count(num, &val, cpu); hsw_ext_stop()
140 current_count[num][cpu] = val; hsw_ext_stop()
64 hsw_ext_get_count(enum intel_hsw_ext_id id, unsigned long long *val, unsigned int cpu) hsw_ext_get_count() argument
90 hsw_ext_get_count_percent(unsigned int id, double *percent, unsigned int cpu) hsw_ext_get_count_percent() argument
H A Dsnb_idle.c28 unsigned int cpu);
62 unsigned int cpu) snb_get_count()
82 if (read_msr(cpu, msr, val)) snb_get_count()
88 unsigned int cpu) snb_get_count_percent()
92 if (!is_valid[cpu]) snb_get_count_percent()
96 (current_count[id][cpu] - previous_count[id][cpu])) / snb_get_count_percent()
100 snb_cstates[id].name, previous_count[id][cpu], snb_get_count_percent()
101 current_count[id][cpu], cpu); snb_get_count_percent()
106 current_count[id][cpu] - previous_count[id][cpu], snb_get_count_percent()
107 *percent, cpu); snb_get_count_percent()
114 int num, cpu; snb_start() local
118 for (cpu = 0; cpu < cpu_count; cpu++) { snb_start()
119 snb_get_count(num, &val, cpu); snb_start()
120 previous_count[num][cpu] = val; snb_start()
130 int num, cpu; snb_stop() local
135 for (cpu = 0; cpu < cpu_count; cpu++) { snb_stop()
136 is_valid[cpu] = !snb_get_count(num, &val, cpu); snb_stop()
137 current_count[num][cpu] = val; snb_stop()
61 snb_get_count(enum intel_snb_id id, unsigned long long *val, unsigned int cpu) snb_get_count() argument
87 snb_get_count_percent(unsigned int id, double *percent, unsigned int cpu) snb_get_count_percent() argument
H A Dmperf_monitor.c30 unsigned int cpu);
32 unsigned int cpu);
89 static int mperf_init_stats(unsigned int cpu) mperf_init_stats() argument
94 ret = read_msr(cpu, MSR_APERF, &val); mperf_init_stats()
95 aperf_previous_count[cpu] = val; mperf_init_stats()
96 ret |= read_msr(cpu, MSR_MPERF, &val); mperf_init_stats()
97 mperf_previous_count[cpu] = val; mperf_init_stats()
98 is_valid[cpu] = !ret; mperf_init_stats()
103 static int mperf_measure_stats(unsigned int cpu) mperf_measure_stats() argument
108 ret = read_msr(cpu, MSR_APERF, &val); mperf_measure_stats()
109 aperf_current_count[cpu] = val; mperf_measure_stats()
110 ret |= read_msr(cpu, MSR_MPERF, &val); mperf_measure_stats()
111 mperf_current_count[cpu] = val; mperf_measure_stats()
112 is_valid[cpu] = !ret; mperf_measure_stats()
118 unsigned int cpu) mperf_get_count_percent()
123 if (!is_valid[cpu]) mperf_get_count_percent()
129 mperf_diff = mperf_current_count[cpu] - mperf_previous_count[cpu]; mperf_get_count_percent()
130 aperf_diff = aperf_current_count[cpu] - aperf_previous_count[cpu]; mperf_get_count_percent()
149 mperf_cstates[id].name, mperf_diff, aperf_diff, cpu); mperf_get_count_percent()
155 unsigned int cpu) mperf_get_count_freq()
162 if (!is_valid[cpu]) mperf_get_count_freq()
165 mperf_diff = mperf_current_count[cpu] - mperf_previous_count[cpu]; mperf_get_count_freq()
166 aperf_diff = aperf_current_count[cpu] - aperf_previous_count[cpu]; mperf_get_count_freq()
188 int cpu; mperf_start() local
194 for (cpu = 0; cpu < cpu_count; cpu++) mperf_start()
195 mperf_init_stats(cpu); mperf_start()
205 int cpu; mperf_stop() local
207 for (cpu = 0; cpu < cpu_count; cpu++) mperf_stop()
208 mperf_measure_stats(cpu); mperf_stop()
226 * /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq
117 mperf_get_count_percent(unsigned int id, double *percent, unsigned int cpu) mperf_get_count_percent() argument
154 mperf_get_count_freq(unsigned int id, unsigned long long *count, unsigned int cpu) mperf_get_count_freq() argument
/linux-4.1.27/arch/powerpc/include/asm/
H A Dsmp.h36 extern int cpu_to_chip_id(int cpu);
41 void (*message_pass)(int cpu, int msg);
43 void (*cause_ipi)(int cpu, unsigned long data);
66 void generic_cpu_die(unsigned int cpu);
67 void generic_set_cpu_dead(unsigned int cpu);
68 void generic_set_cpu_up(unsigned int cpu);
69 int generic_check_cpu_restart(unsigned int cpu);
79 #define raw_smp_processor_id() (current_thread_info()->cpu)
82 static inline int get_hard_smp_processor_id(int cpu) get_hard_smp_processor_id() argument
84 return smp_hw_index[cpu]; get_hard_smp_processor_id()
87 static inline void set_hard_smp_processor_id(int cpu, int phys) set_hard_smp_processor_id() argument
89 smp_hw_index[cpu] = phys; set_hard_smp_processor_id()
96 static inline struct cpumask *cpu_sibling_mask(int cpu) cpu_sibling_mask() argument
98 return per_cpu(cpu_sibling_map, cpu); cpu_sibling_mask()
101 static inline struct cpumask *cpu_core_mask(int cpu) cpu_core_mask() argument
103 return per_cpu(cpu_core_map, cpu); cpu_core_mask()
106 extern int cpu_to_core_id(int cpu);
122 extern void smp_muxed_ipi_set_data(int cpu, unsigned long data);
123 extern void smp_muxed_ipi_message_pass(int cpu, int msg);
131 extern void __cpu_die(unsigned int cpu);
139 static inline const struct cpumask *cpu_sibling_mask(int cpu) cpu_sibling_mask() argument
141 return cpumask_of(cpu); cpu_sibling_mask()
147 static inline int get_hard_smp_processor_id(int cpu) get_hard_smp_processor_id() argument
149 return paca[cpu].hw_cpu_id; get_hard_smp_processor_id()
152 static inline void set_hard_smp_processor_id(int cpu, int phys) set_hard_smp_processor_id() argument
154 paca[cpu].hw_cpu_id = phys; set_hard_smp_processor_id()
163 static inline int get_hard_smp_processor_id(int cpu) get_hard_smp_processor_id() argument
168 static inline void set_hard_smp_processor_id(int cpu, int phys) set_hard_smp_processor_id() argument
178 extern void smp_mpic_setup_cpu(int cpu);
188 extern void arch_send_call_function_single_ipi(int cpu);
H A Dcputhreads.h36 * This function returns a cpumask which will have one "cpu" (or thread)
67 int cpu_core_index_of_thread(int cpu);
70 static inline int cpu_core_index_of_thread(int cpu) { return cpu; } cpu_first_thread_of_core() argument
74 static inline int cpu_thread_in_core(int cpu) cpu_thread_in_core() argument
76 return cpu & (threads_per_core - 1); cpu_thread_in_core()
79 static inline int cpu_thread_in_subcore(int cpu) cpu_thread_in_subcore() argument
81 return cpu & (threads_per_subcore - 1); cpu_thread_in_subcore()
84 static inline int cpu_first_thread_sibling(int cpu) cpu_first_thread_sibling() argument
86 return cpu & ~(threads_per_core - 1); cpu_first_thread_sibling()
89 static inline int cpu_last_thread_sibling(int cpu) cpu_last_thread_sibling() argument
91 return cpu | (threads_per_core - 1); cpu_last_thread_sibling()
H A Dcell-pmu.h79 extern u32 cbe_read_phys_ctr(u32 cpu, u32 phys_ctr);
80 extern void cbe_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val);
81 extern u32 cbe_read_ctr(u32 cpu, u32 ctr);
82 extern void cbe_write_ctr(u32 cpu, u32 ctr, u32 val);
84 extern u32 cbe_read_pm07_control(u32 cpu, u32 ctr);
85 extern void cbe_write_pm07_control(u32 cpu, u32 ctr, u32 val);
86 extern u32 cbe_read_pm(u32 cpu, enum pm_reg_name reg);
87 extern void cbe_write_pm(u32 cpu, enum pm_reg_name reg, u32 val);
89 extern u32 cbe_get_ctr_size(u32 cpu, u32 phys_ctr);
90 extern void cbe_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size);
92 extern void cbe_enable_pm(u32 cpu);
93 extern void cbe_disable_pm(u32 cpu);
95 extern void cbe_read_trace_buffer(u32 cpu, u64 *buf);
97 extern void cbe_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask);
98 extern void cbe_disable_pm_interrupts(u32 cpu);
99 extern u32 cbe_get_and_clear_pm_interrupts(u32 cpu);
H A Dtopology.h89 #define topology_physical_package_id(cpu) (cpu_to_chip_id(cpu))
90 #define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
91 #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
92 #define topology_core_id(cpu) (cpu_to_core_id(cpu))
/linux-4.1.27/arch/s390/oprofile/
H A Dhwsampler.c14 #include <linux/cpu.h>
80 static int smp_ctl_ssctl_stop(int cpu) smp_ctl_ssctl_stop() argument
86 cb = &per_cpu(sampler_cpu_buffer, cpu); smp_ctl_ssctl_stop()
92 smp_call_function_single(cpu, execute_ssctl, &ep, 1); smp_ctl_ssctl_stop()
95 printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu); smp_ctl_ssctl_stop()
100 smp_call_function_single(cpu, execute_qsi, &ep, 1); smp_ctl_ssctl_stop()
110 static int smp_ctl_ssctl_deactivate(int cpu) smp_ctl_ssctl_deactivate() argument
116 cb = &per_cpu(sampler_cpu_buffer, cpu); smp_ctl_ssctl_deactivate()
122 smp_call_function_single(cpu, execute_ssctl, &ep, 1); smp_ctl_ssctl_deactivate()
125 printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu); smp_ctl_ssctl_deactivate()
128 smp_call_function_single(cpu, execute_qsi, &ep, 1); smp_ctl_ssctl_deactivate()
136 static int smp_ctl_ssctl_enable_activate(int cpu, unsigned long interval) smp_ctl_ssctl_enable_activate() argument
142 cb = &per_cpu(sampler_cpu_buffer, cpu); smp_ctl_ssctl_enable_activate()
152 smp_call_function_single(cpu, execute_ssctl, &ep, 1); smp_ctl_ssctl_enable_activate()
155 printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu); smp_ctl_ssctl_enable_activate()
158 smp_call_function_single(cpu, execute_qsi, &ep, 1); smp_ctl_ssctl_enable_activate()
160 printk(KERN_ERR "hwsampler: CPU %d CPUMF QSI failed.\n", cpu); smp_ctl_ssctl_enable_activate()
165 static int smp_ctl_qsi(int cpu) smp_ctl_qsi() argument
170 cb = &per_cpu(sampler_cpu_buffer, cpu); smp_ctl_qsi()
173 smp_call_function_single(cpu, execute_qsi, &ep, 1); smp_ctl_qsi()
198 static void add_samples_to_oprofile(unsigned cpu, unsigned long *,
203 int cpu; init_all_cpu_buffers() local
206 for_each_online_cpu(cpu) { for_each_online_cpu()
207 cb = &per_cpu(sampler_cpu_buffer, cpu); for_each_online_cpu()
215 int cpu; prepare_cpu_buffers() local
217 for_each_online_cpu(cpu) { for_each_online_cpu()
218 cb = &per_cpu(sampler_cpu_buffer, cpu); for_each_online_cpu()
235 * @cpu: the cpu for which sampler memory is allocated
242 static int allocate_sdbt(int cpu) allocate_sdbt() argument
251 cb = &per_cpu(sampler_cpu_buffer, cpu); allocate_sdbt()
327 int cpu; deallocate_sdbt() local
332 for_each_online_cpu(cpu) { for_each_online_cpu()
338 cb = &per_cpu(sampler_cpu_buffer, cpu); for_each_online_cpu()
377 static int start_sampling(int cpu) start_sampling() argument
382 cb = &per_cpu(sampler_cpu_buffer, cpu); start_sampling()
383 rc = smp_ctl_ssctl_enable_activate(cpu, interval); start_sampling()
385 printk(KERN_INFO "hwsampler: CPU %d ssctl failed.\n", cpu); start_sampling()
391 printk(KERN_INFO "hwsampler: CPU %d ssctl not enabled.\n", cpu); start_sampling()
396 printk(KERN_INFO "hwsampler: CPU %d ssctl not active.\n", cpu); start_sampling()
402 cpu, interval); start_sampling()
410 static int stop_sampling(int cpu) stop_sampling() argument
416 rc = smp_ctl_qsi(cpu); stop_sampling()
419 cb = &per_cpu(sampler_cpu_buffer, cpu); stop_sampling()
421 printk(KERN_INFO "hwsampler: CPU %d, already stopped.\n", cpu); stop_sampling()
423 rc = smp_ctl_ssctl_stop(cpu); stop_sampling()
426 cpu, rc); stop_sampling()
430 printk(KERN_INFO "hwsampler: CPU %d, CPUMF Sampling stopped.\n", cpu); stop_sampling()
436 " count=%lu.\n", cpu, v); stop_sampling()
441 " count=%lu.\n", cpu, v); stop_sampling()
446 " count=%lu.\n", cpu, v); stop_sampling()
452 " count=%lu.\n", cpu, v); stop_sampling()
458 " count=%lu.\n", cpu, v); stop_sampling()
482 int cpu; hws_oom_callback() local
497 cpu = get_cpu(); hws_oom_callback()
498 cb = &per_cpu(sampler_cpu_buffer, cpu); hws_oom_callback()
509 cpu);
536 * @cpu: specifies the CPU to be set inactive.
540 int hwsampler_deactivate(unsigned int cpu) hwsampler_deactivate() argument
555 cb = &per_cpu(sampler_cpu_buffer, cpu); hwsampler_deactivate()
557 rc = smp_ctl_qsi(cpu); hwsampler_deactivate()
560 rc = smp_ctl_ssctl_deactivate(cpu); hwsampler_deactivate()
563 "hwsampler: CPU %d, CPUMF Deactivation failed.\n", cpu); hwsampler_deactivate()
569 queue_work_on(cpu, hws_wq, &cb->worker); hwsampler_deactivate()
583 * @cpu: specifies the CPU to be set active.
587 int hwsampler_activate(unsigned int cpu) hwsampler_activate() argument
599 cb = &per_cpu(sampler_cpu_buffer, cpu); hwsampler_activate()
601 rc = smp_ctl_qsi(cpu); hwsampler_activate()
605 rc = smp_ctl_ssctl_enable_activate(cpu, interval); hwsampler_activate()
609 cpu); hwsampler_activate()
622 unsigned int cpu; check_qsi_on_setup() local
625 for_each_online_cpu(cpu) { for_each_online_cpu()
626 cb = &per_cpu(sampler_cpu_buffer, cpu); for_each_online_cpu()
627 rc = smp_ctl_qsi(cpu); for_each_online_cpu()
639 rc = smp_ctl_ssctl_stop(cpu); for_each_online_cpu()
644 "CPU %d, CPUMF Sampling stopped now.\n", cpu); for_each_online_cpu()
652 unsigned int cpu; check_qsi_on_start() local
656 for_each_online_cpu(cpu) { for_each_online_cpu()
657 cb = &per_cpu(sampler_cpu_buffer, cpu); for_each_online_cpu()
658 rc = smp_ctl_qsi(cpu); for_each_online_cpu()
673 static void worker_on_start(unsigned int cpu) worker_on_start() argument
677 cb = &per_cpu(sampler_cpu_buffer, cpu); worker_on_start()
681 static int worker_check_error(unsigned int cpu, int ext_params) worker_check_error() argument
688 cb = &per_cpu(sampler_cpu_buffer, cpu); worker_check_error()
718 static void worker_on_finish(unsigned int cpu) worker_on_finish() argument
723 cb = &per_cpu(sampler_cpu_buffer, cpu); worker_on_finish()
726 rc = smp_ctl_qsi(cpu); worker_on_finish()
731 cpu); worker_on_finish()
732 rc = smp_ctl_ssctl_stop(cpu); worker_on_finish()
736 cpu); worker_on_finish()
739 if (i == cpu) for_each_online_cpu()
751 static void worker_on_interrupt(unsigned int cpu) worker_on_interrupt() argument
757 cb = &per_cpu(sampler_cpu_buffer, cpu); worker_on_interrupt()
780 add_samples_to_oprofile(cpu, sdbt, dear); worker_on_interrupt()
795 static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt, add_samples_to_oprofile() argument
856 unsigned int cpu; worker() local
861 cpu = smp_processor_id(); worker()
865 worker_on_start(cpu); worker()
867 if (worker_check_error(cpu, ext_params)) worker()
871 worker_on_interrupt(cpu); worker()
874 worker_on_finish(cpu); worker()
886 int cpu, rc; hwsampler_allocate() local
905 for_each_online_cpu(cpu) { for_each_online_cpu()
906 if (allocate_sdbt(cpu)) { for_each_online_cpu()
966 unsigned long hwsampler_get_sample_overflow_count(unsigned int cpu) hwsampler_get_sample_overflow_count() argument
970 cb = &per_cpu(sampler_cpu_buffer, cpu); hwsampler_get_sample_overflow_count()
978 int cpu; hwsampler_setup() local
1006 for_each_online_cpu(cpu) { for_each_online_cpu()
1007 cb = &per_cpu(sampler_cpu_buffer, cpu); for_each_online_cpu()
1009 rc = smp_ctl_qsi(cpu); for_each_online_cpu()
1086 int rc, cpu; hwsampler_start_all() local
1108 for_each_online_cpu(cpu) { for_each_online_cpu()
1109 rc = start_sampling(cpu); for_each_online_cpu()
1114 for_each_online_cpu(cpu) { for_each_online_cpu()
1115 stop_sampling(cpu); for_each_online_cpu()
1145 int tmp_rc, rc, cpu; hwsampler_stop_all() local
1158 for_each_online_cpu(cpu) { for_each_online_cpu()
1159 cb = &per_cpu(sampler_cpu_buffer, cpu); for_each_online_cpu()
1161 tmp_rc = stop_sampling(cpu); for_each_online_cpu()
/linux-4.1.27/kernel/trace/
H A Dtrace_kdb.c25 int cnt = 0, cpu; ftrace_dump_buf() local
30 for_each_tracing_cpu(cpu) { for_each_tracing_cpu()
31 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); for_each_tracing_cpu()
49 for_each_tracing_cpu(cpu) { for_each_tracing_cpu()
50 iter.buffer_iter[cpu] = for_each_tracing_cpu()
51 ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu); for_each_tracing_cpu()
52 ring_buffer_read_start(iter.buffer_iter[cpu]); for_each_tracing_cpu()
53 tracing_iter_reset(&iter, cpu); for_each_tracing_cpu()
87 for_each_tracing_cpu(cpu) { for_each_tracing_cpu()
88 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); for_each_tracing_cpu()
91 for_each_tracing_cpu(cpu) { for_each_tracing_cpu()
92 if (iter.buffer_iter[cpu]) { for_each_tracing_cpu()
93 ring_buffer_read_finish(iter.buffer_iter[cpu]); for_each_tracing_cpu()
94 iter.buffer_iter[cpu] = NULL; for_each_tracing_cpu()
135 kdb_register_flags("ftdump", kdb_ftdump, "[skip_#lines] [cpu]", kdb_ftrace_register()
/linux-4.1.27/include/linux/clk/
H A Dtegra.h43 void (*wait_for_reset)(u32 cpu);
44 void (*put_in_reset)(u32 cpu);
45 void (*out_of_reset)(u32 cpu);
46 void (*enable_clock)(u32 cpu);
47 void (*disable_clock)(u32 cpu);
57 static inline void tegra_wait_cpu_in_reset(u32 cpu) tegra_wait_cpu_in_reset() argument
62 tegra_cpu_car_ops->wait_for_reset(cpu); tegra_wait_cpu_in_reset()
65 static inline void tegra_put_cpu_in_reset(u32 cpu) tegra_put_cpu_in_reset() argument
70 tegra_cpu_car_ops->put_in_reset(cpu); tegra_put_cpu_in_reset()
73 static inline void tegra_cpu_out_of_reset(u32 cpu) tegra_cpu_out_of_reset() argument
78 tegra_cpu_car_ops->out_of_reset(cpu); tegra_cpu_out_of_reset()
81 static inline void tegra_enable_cpu_clock(u32 cpu) tegra_enable_cpu_clock() argument
86 tegra_cpu_car_ops->enable_clock(cpu); tegra_enable_cpu_clock()
89 static inline void tegra_disable_cpu_clock(u32 cpu) tegra_disable_cpu_clock() argument
94 tegra_cpu_car_ops->disable_clock(cpu); tegra_disable_cpu_clock()
/linux-4.1.27/arch/xtensa/kernel/
H A Dsmp.c15 #include <linux/cpu.h>
102 unsigned int cpu = smp_processor_id(); smp_prepare_boot_cpu() local
103 BUG_ON(cpu != 0); smp_prepare_boot_cpu()
104 cpu_asid_cache(cpu) = ASID_USER_FIRST; smp_prepare_boot_cpu()
117 unsigned int cpu = smp_processor_id(); secondary_start_kernel() local
123 pr_debug("%s: boot_secondary_processors:%d; Hanging cpu:%d\n", secondary_start_kernel()
124 __func__, boot_secondary_processors, cpu); secondary_start_kernel()
129 pr_debug("%s: boot_secondary_processors:%d; Booting cpu:%d\n", secondary_start_kernel()
130 __func__, boot_secondary_processors, cpu); secondary_start_kernel()
141 cpumask_set_cpu(cpu, mm_cpumask(mm)); secondary_start_kernel()
149 notify_cpu_starting(cpu); secondary_start_kernel()
152 local_timer_setup(cpu); secondary_start_kernel()
154 set_cpu_online(cpu, true); secondary_start_kernel()
165 unsigned cpu = (unsigned)p; mx_cpu_start() local
168 set_er(run_stall_mask & ~(1u << cpu), MPSCORE); mx_cpu_start()
169 pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n", mx_cpu_start()
170 __func__, cpu, run_stall_mask, get_er(MPSCORE)); mx_cpu_start()
175 unsigned cpu = (unsigned)p; mx_cpu_stop() local
178 set_er(run_stall_mask | (1u << cpu), MPSCORE); mx_cpu_stop()
179 pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n", mx_cpu_stop()
180 __func__, cpu, run_stall_mask, get_er(MPSCORE)); mx_cpu_stop()
188 static int boot_secondary(unsigned int cpu, struct task_struct *ts) boot_secondary() argument
195 cpu_start_id = cpu; boot_secondary()
199 smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1); boot_secondary()
216 (void *)cpu, 1); boot_secondary()
224 int __cpu_up(unsigned int cpu, struct task_struct *idle) __cpu_up() argument
228 if (cpu_asid_cache(cpu) == 0) __cpu_up()
229 cpu_asid_cache(cpu) = ASID_USER_FIRST; __cpu_up()
234 pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n", __cpu_up()
235 __func__, cpu, idle, start_info.stack); __cpu_up()
237 ret = boot_secondary(cpu, idle); __cpu_up()
241 if (!cpu_online(cpu)) __cpu_up()
246 pr_err("CPU %u failed to boot\n", cpu); __cpu_up()
258 unsigned int cpu = smp_processor_id(); __cpu_disable() local
262 * and we must not schedule until we're ready to give up the cpu. __cpu_disable()
264 set_cpu_online(cpu, false); __cpu_disable()
279 clear_tasks_mm_cpumask(cpu); __cpu_disable()
284 static void platform_cpu_kill(unsigned int cpu) platform_cpu_kill() argument
286 smp_call_function_single(0, mx_cpu_stop, (void *)cpu, true); platform_cpu_kill()
293 void __cpu_die(unsigned int cpu) __cpu_die() argument
299 if (cpu_start_id == -cpu) { __cpu_die()
300 platform_cpu_kill(cpu); __cpu_die()
304 pr_err("CPU%u: unable to kill\n", cpu); __cpu_die()
316 * of the other hotplug-cpu capable cores, so presumably coming
370 void arch_send_call_function_single_ipi(int cpu) arch_send_call_function_single_ipi() argument
372 send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC); arch_send_call_function_single_ipi()
375 void smp_send_reschedule(int cpu) smp_send_reschedule() argument
377 send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); smp_send_reschedule()
389 static void ipi_cpu_stop(unsigned int cpu) ipi_cpu_stop() argument
391 set_cpu_online(cpu, false); ipi_cpu_stop()
397 unsigned int cpu = smp_processor_id(); ipi_interrupt() local
398 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); ipi_interrupt()
402 msg = get_er(MIPICAUSE(cpu)); ipi_interrupt()
405 set_er(1 << i, MIPICAUSE(cpu)); ipi_interrupt()
414 ipi_cpu_stop(cpu); ipi_interrupt()
421 unsigned int cpu; show_ipi_list() local
426 for_each_online_cpu(cpu) show_ipi_list()
428 per_cpu(ipi_data, cpu).ipi_count[i]); show_ipi_list()
/linux-4.1.27/arch/metag/kernel/
H A Dtopology.c9 #include <linux/cpu.h>
17 #include <asm/cpu.h>
24 static cpumask_t cpu_coregroup_map(unsigned int cpu) cpu_coregroup_map() argument
29 const struct cpumask *cpu_coregroup_mask(unsigned int cpu) cpu_coregroup_mask() argument
31 return &cpu_core_map[cpu]; cpu_coregroup_mask()
36 unsigned int cpu; arch_update_cpu_topology() local
38 for_each_possible_cpu(cpu) arch_update_cpu_topology()
39 cpu_core_map[cpu] = cpu_coregroup_map(cpu); arch_update_cpu_topology()
56 cpuinfo->cpu.hotpluggable = 1; topology_init()
58 ret = register_cpu(&cpuinfo->cpu, i); topology_init()
/linux-4.1.27/arch/blackfin/mach-common/
H A Dsmp.c21 #include <linux/cpu.h>
35 #include <asm/cpu.h>
81 static void ipi_cpu_stop(unsigned int cpu) ipi_cpu_stop() argument
84 printk(KERN_CRIT "CPU%u: stopping\n", cpu); ipi_cpu_stop()
88 set_cpu_online(cpu, false); ipi_cpu_stop()
125 unsigned int cpu = smp_processor_id(); ipi_handler_int0() local
127 platform_clear_ipi(cpu, IRQ_SUPPLE_0); ipi_handler_int0()
134 int cpu = smp_processor_id(); ipi_timer() local
135 struct clock_event_device *evt = &per_cpu(coretmr_events, cpu); ipi_timer()
142 unsigned int cpu = smp_processor_id(); ipi_handler_int1() local
146 platform_clear_ipi(cpu, IRQ_SUPPLE_1); ipi_handler_int1()
165 ipi_cpu_stop(cpu); ipi_handler_int1()
180 unsigned int cpu; bfin_ipi_init() local
182 for_each_possible_cpu(cpu) { for_each_possible_cpu()
183 bfin_ipi_data = &per_cpu(bfin_ipi, cpu); for_each_possible_cpu()
191 unsigned int cpu; send_ipi() local
196 for_each_cpu(cpu, cpumask) { for_each_cpu()
197 bfin_ipi_data = &per_cpu(bfin_ipi, cpu); for_each_cpu()
203 for_each_cpu(cpu, cpumask)
204 platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1);
207 void arch_send_call_function_single_ipi(int cpu) arch_send_call_function_single_ipi() argument
209 send_ipi(cpumask_of(cpu), BFIN_IPI_CALL_FUNC); arch_send_call_function_single_ipi()
217 void smp_send_reschedule(int cpu) smp_send_reschedule() argument
219 send_ipi(cpumask_of(cpu), BFIN_IPI_RESCHEDULE); smp_send_reschedule()
249 int __cpu_up(unsigned int cpu, struct task_struct *idle) __cpu_up() argument
255 ret = platform_boot_secondary(cpu, idle); __cpu_up()
262 static void setup_secondary(unsigned int cpu) setup_secondary() argument
282 unsigned int cpu = smp_processor_id(); secondary_start_kernel() local
316 setup_secondary(cpu); secondary_start_kernel()
318 platform_secondary_init(cpu); secondary_start_kernel()
324 bfin_setup_caches(cpu); secondary_start_kernel()
326 notify_cpu_starting(cpu); secondary_start_kernel()
335 set_cpu_online(cpu, true); secondary_start_kernel()
354 unsigned int cpu; smp_cpus_done() local
356 for_each_online_cpu(cpu) smp_cpus_done()
382 unsigned int cpu = get_cpu(); resync_core_icache() local
384 icache_invld_count[cpu]++; resync_core_icache()
396 unsigned int cpu = get_cpu(); resync_core_dcache() local
398 dcache_invld_count[cpu]++; resync_core_dcache()
407 unsigned int cpu = smp_processor_id(); __cpu_disable() local
409 if (cpu == 0) __cpu_disable()
412 set_cpu_online(cpu, false); __cpu_disable()
416 int __cpu_die(unsigned int cpu) __cpu_die() argument
418 return cpu_wait_death(cpu, 5); __cpu_die()
/linux-4.1.27/arch/arm/kernel/
H A Dtopology.c14 #include <linux/cpu.h>
29 * cpu capacity scale management
33 * cpu capacity table
34 * This per cpu data structure describes the relative capacity of each core.
37 * can take this difference into account during load balance. A per cpu
45 unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu) arch_scale_cpu_capacity() argument
47 return per_cpu(cpu_scale, cpu); arch_scale_cpu_capacity()
50 static void set_capacity_scale(unsigned int cpu, unsigned long capacity) set_capacity_scale() argument
52 per_cpu(cpu_scale, cpu) = capacity; set_capacity_scale()
78 #define cpu_capacity(cpu) __cpu_capacity[cpu]
97 int cpu = 0; parse_dt_topology() local
102 for_each_possible_cpu(cpu) { for_each_possible_cpu()
106 /* too early to use cpu->of_node */ for_each_possible_cpu()
107 cn = of_get_cpu_node(cpu, NULL); for_each_possible_cpu()
109 pr_err("missing device node for CPU %d\n", cpu); for_each_possible_cpu()
137 cpu_capacity(cpu) = capacity; for_each_possible_cpu()
161 static void update_cpu_capacity(unsigned int cpu) update_cpu_capacity() argument
163 if (!cpu_capacity(cpu)) update_cpu_capacity()
166 set_capacity_scale(cpu, cpu_capacity(cpu) / middle_capacity); update_cpu_capacity()
169 cpu, arch_scale_cpu_capacity(NULL, cpu)); update_cpu_capacity()
178 * cpu topology table update_cpu_capacity()
183 const struct cpumask *cpu_coregroup_mask(int cpu) cpu_coregroup_mask() argument
185 return &cpu_topology[cpu].core_sibling; cpu_coregroup_mask()
192 const struct cpumask *cpu_corepower_mask(int cpu) cpu_corepower_mask() argument
194 return &cpu_topology[cpu].thread_sibling; cpu_corepower_mask()
200 int cpu; update_siblings_masks() local
203 for_each_possible_cpu(cpu) { for_each_possible_cpu()
204 cpu_topo = &cpu_topology[cpu]; for_each_possible_cpu()
210 if (cpu != cpuid) for_each_possible_cpu()
211 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); for_each_possible_cpu()
217 if (cpu != cpuid) for_each_possible_cpu()
218 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); for_each_possible_cpu()
224 * store_cpu_topology is called at boot when only one cpu is running
233 /* If the cpu topology has been already set, just return */ store_cpu_topology()
239 /* create cpu topology mapping */ store_cpu_topology()
272 pr_info("CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", store_cpu_topology()
293 * init_cpu_topology is called at boot when only one cpu is running
298 unsigned int cpu; init_cpu_topology() local
301 for_each_possible_cpu(cpu) { for_each_possible_cpu()
302 struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]); for_each_possible_cpu()
310 set_capacity_scale(cpu, SCHED_CAPACITY_SCALE); for_each_possible_cpu()
H A Dcpuidle.c56 int cpu = smp_processor_id(); arm_cpuidle_suspend() local
58 if (cpuidle_ops[cpu].suspend) arm_cpuidle_suspend()
59 ret = cpuidle_ops[cpu].suspend(cpu, index); arm_cpuidle_suspend()
86 * @dn: a pointer to a struct device node corresponding to a cpu node
87 * @cpu: the cpu identifier
97 static int __init arm_cpuidle_read_ops(struct device_node *dn, int cpu) arm_cpuidle_read_ops() argument
113 cpuidle_ops[cpu] = *ops; /* structure copy */ arm_cpuidle_read_ops()
122 * arm_cpuidle_init() - Initialize cpuidle_ops for a specific cpu
123 * @cpu: the cpu to be initialized
125 * Initialize the cpuidle ops with the device for the cpu and then call
126 * the cpu's idle initialization callback. This may fail if the underlying HW
131 * -ENODEV if it fails to find the cpu node in the device tree,
132 * -EOPNOTSUPP if it does not find a registered cpuidle_ops for this cpu,
137 int __init arm_cpuidle_init(int cpu) arm_cpuidle_init() argument
139 struct device_node *cpu_node = of_cpu_device_node_get(cpu); arm_cpuidle_init()
145 ret = arm_cpuidle_read_ops(cpu_node, cpu); arm_cpuidle_init()
146 if (!ret && cpuidle_ops[cpu].init) arm_cpuidle_init()
147 ret = cpuidle_ops[cpu].init(cpu_node, cpu); arm_cpuidle_init()
/linux-4.1.27/tools/testing/selftests/powerpc/pmu/ebb/
H A Dcpu_event_pinned_vs_ebb_test.c18 * Tests a pinned cpu event vs an EBB - in that order. The pinned cpu event
22 static int setup_cpu_event(struct event *event, int cpu) setup_cpu_event() argument
33 FAIL_IF(event_open_with_cpu(event, cpu)); setup_cpu_event()
43 int cpu, rc; cpu_event_pinned_vs_ebb() local
46 cpu = pick_online_cpu(); cpu_event_pinned_vs_ebb()
47 FAIL_IF(cpu < 0); cpu_event_pinned_vs_ebb()
48 FAIL_IF(bind_to_cpu(cpu)); cpu_event_pinned_vs_ebb()
59 /* We setup the cpu event first */ cpu_event_pinned_vs_ebb()
60 rc = setup_cpu_event(&event, cpu); cpu_event_pinned_vs_ebb()
83 /* The cpu event should have run */ cpu_event_pinned_vs_ebb()
H A Dcpu_event_vs_ebb_test.c18 * Tests a cpu event vs an EBB - in that order. The EBB should force the cpu
22 static int setup_cpu_event(struct event *event, int cpu) setup_cpu_event() argument
31 FAIL_IF(event_open_with_cpu(event, cpu)); setup_cpu_event()
41 int cpu, rc; cpu_event_vs_ebb() local
44 cpu = pick_online_cpu(); cpu_event_vs_ebb()
45 FAIL_IF(cpu < 0); cpu_event_vs_ebb()
46 FAIL_IF(bind_to_cpu(cpu)); cpu_event_vs_ebb()
57 /* We setup the cpu event first */ cpu_event_vs_ebb()
58 rc = setup_cpu_event(&event, cpu); cpu_event_vs_ebb()
81 /* The cpu event may have run */ cpu_event_vs_ebb()
H A Debb_vs_cpu_event_test.c18 * Tests an EBB vs a cpu event - in that order. The EBB should force the cpu
22 static int setup_cpu_event(struct event *event, int cpu) setup_cpu_event() argument
31 FAIL_IF(event_open_with_cpu(event, cpu)); setup_cpu_event()
41 int cpu, rc; ebb_vs_cpu_event() local
44 cpu = pick_online_cpu(); ebb_vs_cpu_event()
45 FAIL_IF(cpu < 0); ebb_vs_cpu_event()
46 FAIL_IF(bind_to_cpu(cpu)); ebb_vs_cpu_event()
61 rc = setup_cpu_event(&event, cpu); ebb_vs_cpu_event()
77 /* The cpu event may have run, but we don't expect 100% */ ebb_vs_cpu_event()
/linux-4.1.27/arch/x86/kernel/apic/
H A Dx2apic_cluster.c7 #include <linux/cpu.h>
21 static inline u32 x2apic_cluster(int cpu) x2apic_cluster() argument
23 return per_cpu(x86_cpu_to_logical_apicid, cpu) >> 16; x2apic_cluster()
31 unsigned int cpu, this_cpu; __x2apic_send_IPI_mask() local
51 for_each_cpu(cpu, ipi_mask_ptr) { for_each_cpu()
54 cpus_in_cluster_ptr = per_cpu(cpus_in_cluster, cpu); for_each_cpu()
134 unsigned int cpu; init_x2apic_ldr() local
139 for_each_online_cpu(cpu) { for_each_online_cpu()
140 if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu)) for_each_online_cpu()
142 cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu)); for_each_online_cpu()
143 cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu)); for_each_online_cpu()
154 unsigned int cpu; update_clusterinfo() local
171 for_each_online_cpu(cpu) { for_each_online_cpu()
172 if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu)) for_each_online_cpu()
174 cpumask_clear_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu)); for_each_online_cpu()
175 cpumask_clear_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu)); for_each_online_cpu()
191 int cpu = smp_processor_id(); x2apic_init_cpu_notifier() local
193 zalloc_cpumask_var(&per_cpu(cpus_in_cluster, cpu), GFP_KERNEL); x2apic_init_cpu_notifier()
194 zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL); x2apic_init_cpu_notifier()
196 BUG_ON(!per_cpu(cpus_in_cluster, cpu) || !per_cpu(ipi_mask, cpu)); x2apic_init_cpu_notifier()
198 cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, cpu)); x2apic_init_cpu_notifier()
219 static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask, cluster_vector_allocation_domain() argument
224 * etc will use a single cpu for the interrupt destination. cluster_vector_allocation_domain()
228 * derived from the first cpu in the mask) members specified cluster_vector_allocation_domain()
232 cpumask_copy(retmask, cpumask_of(cpu)); cluster_vector_allocation_domain()
234 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu)); cluster_vector_allocation_domain()
/linux-4.1.27/arch/parisc/include/asm/
H A Dsmp.h25 #define cpu_number_map(cpu) (cpu)
26 #define cpu_logical_map(cpu) (cpu)
30 extern void arch_send_call_function_single_ipi(int cpu);
35 #define raw_smp_processor_id() (current_thread_info()->cpu)
48 static inline void __cpu_die (unsigned int cpu) { __cpu_die() argument
/linux-4.1.27/arch/arm/mach-shmobile/
H A Dplatsmp.c22 void shmobile_smp_hook(unsigned int cpu, unsigned long fn, unsigned long arg) shmobile_smp_hook() argument
24 shmobile_smp_fn[cpu] = 0; shmobile_smp_hook()
27 shmobile_smp_mpidr[cpu] = cpu_logical_map(cpu); shmobile_smp_hook()
28 shmobile_smp_fn[cpu] = fn; shmobile_smp_hook()
29 shmobile_smp_arg[cpu] = arg; shmobile_smp_hook()
34 int shmobile_smp_cpu_disable(unsigned int cpu) shmobile_smp_cpu_disable() argument
H A Dplatsmp-apmu.c70 static int apmu_wrap(int cpu, int (*fn)(void __iomem *p, int cpu)) apmu_wrap() argument
72 void __iomem *p = apmu_cpus[cpu].iomem; apmu_wrap()
74 return p ? fn(p, apmu_cpus[cpu].bit) : -EINVAL; apmu_wrap()
77 static void apmu_init_cpu(struct resource *res, int cpu, int bit) apmu_init_cpu() argument
79 if ((cpu >= ARRAY_SIZE(apmu_cpus)) || apmu_cpus[cpu].iomem) apmu_init_cpu()
82 apmu_cpus[cpu].iomem = ioremap_nocache(res->start, resource_size(res)); apmu_init_cpu()
83 apmu_cpus[cpu].bit = bit; apmu_init_cpu()
85 pr_debug("apmu ioremap %d %d %pr\n", cpu, bit, res); apmu_init_cpu()
88 static void apmu_parse_cfg(void (*fn)(struct resource *res, int cpu, int bit), apmu_parse_cfg() argument
128 /* perform per-cpu setup */ shmobile_smp_apmu_prepare_cpus()
133 int shmobile_smp_apmu_boot_secondary(unsigned int cpu, struct task_struct *idle) shmobile_smp_apmu_boot_secondary() argument
136 shmobile_smp_hook(cpu, virt_to_phys(secondary_startup), 0); shmobile_smp_apmu_boot_secondary()
138 return apmu_wrap(cpu, apmu_power_on); shmobile_smp_apmu_boot_secondary()
173 void shmobile_smp_apmu_cpu_shutdown(unsigned int cpu) shmobile_smp_apmu_cpu_shutdown() argument
177 apmu_wrap(cpu, apmu_power_off); shmobile_smp_apmu_cpu_shutdown()
200 void shmobile_smp_apmu_cpu_die(unsigned int cpu) shmobile_smp_apmu_cpu_die() argument
203 shmobile_smp_hook(cpu, 0, 0); shmobile_smp_apmu_cpu_die()
206 shmobile_smp_apmu_cpu_shutdown(cpu); shmobile_smp_apmu_cpu_die()
212 int shmobile_smp_apmu_cpu_kill(unsigned int cpu) shmobile_smp_apmu_cpu_kill() argument
214 return apmu_wrap(cpu, apmu_power_off_poll); shmobile_smp_apmu_cpu_kill()
219 static int shmobile_smp_apmu_do_suspend(unsigned long cpu) shmobile_smp_apmu_do_suspend() argument
221 shmobile_smp_hook(cpu, virt_to_phys(cpu_resume), 0); shmobile_smp_apmu_do_suspend()
222 shmobile_smp_apmu_cpu_shutdown(cpu); shmobile_smp_apmu_do_suspend()
H A Dplatsmp-scu.c10 #include <linux/cpu.h>
23 unsigned int cpu = (long)hcpu; shmobile_smp_scu_notifier_call() local
28 shmobile_smp_hook(cpu, virt_to_phys(shmobile_boot_scu), shmobile_smp_scu_notifier_call()
55 void shmobile_smp_scu_cpu_die(unsigned int cpu) shmobile_smp_scu_cpu_die() argument
58 shmobile_smp_hook(cpu, 0, 0); shmobile_smp_scu_cpu_die()
70 static int shmobile_smp_scu_psr_core_disabled(int cpu) shmobile_smp_scu_psr_core_disabled() argument
72 unsigned long mask = SCU_PM_POWEROFF << (cpu * 8); shmobile_smp_scu_psr_core_disabled()
80 int shmobile_smp_scu_cpu_kill(unsigned int cpu) shmobile_smp_scu_cpu_kill() argument
89 if (shmobile_smp_scu_psr_core_disabled(cpu)) shmobile_smp_scu_cpu_kill()
/linux-4.1.27/arch/arm/mach-keystone/
H A Dplatsmp.c26 static int keystone_smp_boot_secondary(unsigned int cpu, keystone_smp_boot_secondary() argument
32 pr_debug("keystone-smp: booting cpu %d, vector %08lx\n", keystone_smp_boot_secondary()
33 cpu, start); keystone_smp_boot_secondary()
35 error = keystone_cpu_smc(KEYSTONE_MON_CPU_UP_IDX, cpu, start); keystone_smp_boot_secondary()
37 pr_err("CPU %d bringup failed with %d\n", cpu, error); keystone_smp_boot_secondary()
43 static void __cpuinit keystone_smp_secondary_initmem(unsigned int cpu) keystone_smp_secondary_initmem() argument
50 static inline void __cpuinit keystone_smp_secondary_initmem(unsigned int cpu) keystone_smp_secondary_initmem() argument
/linux-4.1.27/arch/ia64/uv/kernel/
H A Dsetup.c66 int nid, cpu, m_val, n_val; uv_setup() local
101 for_each_present_cpu(cpu) { for_each_present_cpu()
102 nid = cpu_to_node(cpu); for_each_present_cpu()
103 uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base; for_each_present_cpu()
104 uv_cpu_hub_info(cpu)->lowmem_remap_top = for_each_present_cpu()
106 uv_cpu_hub_info(cpu)->m_val = m_val; for_each_present_cpu()
107 uv_cpu_hub_info(cpu)->n_val = n_val; for_each_present_cpu()
108 uv_cpu_hub_info(cpu)->pnode_mask = (1 << n_val) -1; for_each_present_cpu()
109 uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1; for_each_present_cpu()
110 uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; for_each_present_cpu()
111 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; for_each_present_cpu()
112 uv_cpu_hub_info(cpu)->coherency_domain_number = 0;/* ZZZ */ for_each_present_cpu()
113 printk(KERN_DEBUG "UV cpu %d, nid %d\n", cpu, nid); for_each_present_cpu()
/linux-4.1.27/arch/blackfin/kernel/cplb-nompu/
H A Dcplbmgr.c39 static inline void write_dcplb_data(int cpu, int idx, unsigned long data, write_dcplb_data() argument
48 dcplb_tbl[cpu][idx].addr = addr; write_dcplb_data()
49 dcplb_tbl[cpu][idx].data = data; write_dcplb_data()
53 static inline void write_icplb_data(int cpu, int idx, unsigned long data, write_icplb_data() argument
62 icplb_tbl[cpu][idx].addr = addr; write_icplb_data()
63 icplb_tbl[cpu][idx].data = data; write_icplb_data()
74 static int evict_one_icplb(int cpu) evict_one_icplb() argument
76 int i = first_switched_icplb + icplb_rr_index[cpu]; evict_one_icplb()
79 icplb_rr_index[cpu] -= MAX_CPLBS - first_switched_icplb; evict_one_icplb()
81 icplb_rr_index[cpu]++; evict_one_icplb()
85 static int evict_one_dcplb(int cpu) evict_one_dcplb() argument
87 int i = first_switched_dcplb + dcplb_rr_index[cpu]; evict_one_dcplb()
90 dcplb_rr_index[cpu] -= MAX_CPLBS - first_switched_dcplb; evict_one_dcplb()
92 dcplb_rr_index[cpu]++; evict_one_dcplb()
96 MGR_ATTR static int icplb_miss(int cpu) icplb_miss() argument
103 nr_icplb_miss[cpu]++; icplb_miss()
105 nr_icplb_supv_miss[cpu]++; icplb_miss()
136 idx = evict_one_icplb(cpu); icplb_miss()
138 write_icplb_data(cpu, idx, i_data, addr); icplb_miss()
143 MGR_ATTR static int dcplb_miss(int cpu) dcplb_miss() argument
150 nr_dcplb_miss[cpu]++; dcplb_miss()
152 nr_dcplb_supv_miss[cpu]++; dcplb_miss()
208 idx = evict_one_dcplb(cpu); dcplb_miss()
210 write_dcplb_data(cpu, idx, d_data, addr); dcplb_miss()
218 unsigned int cpu = raw_smp_processor_id(); cplb_hdr() local
221 return icplb_miss(cpu); cplb_hdr()
223 return dcplb_miss(cpu); cplb_hdr()
/linux-4.1.27/arch/hexagon/kernel/
H A Dsmp.c31 #include <linux/cpu.h>
51 int cpu) __handle_ipi()
96 int cpu = smp_processor_id(); handle_ipi() local
97 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); handle_ipi()
101 __handle_ipi(&ops, ipi, cpu); handle_ipi()
108 unsigned long cpu; send_ipi() local
113 for_each_cpu(cpu, cpumask) { for_each_cpu()
114 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); for_each_cpu()
118 retval = __vmintop_post(BASE_IPI_IRQ+cpu); for_each_cpu()
122 BASE_IPI_IRQ+cpu); for_each_cpu()
147 unsigned int cpu; start_secondary() local
168 cpu = smp_processor_id(); start_secondary()
170 setup_irq(BASE_IPI_IRQ + cpu, &ipi_intdesc); start_secondary()
175 printk(KERN_INFO "%s cpu %d\n", __func__, current_thread_info()->cpu); start_secondary()
177 notify_cpu_starting(cpu); start_secondary()
179 set_cpu_online(cpu, true); start_secondary()
188 * called once for each present cpu
190 * maintains control until "cpu_online(cpu)" is set.
193 int __cpu_up(unsigned int cpu, struct task_struct *idle) __cpu_up() argument
198 thread->cpu = cpu; __cpu_up()
204 while (!cpu_online(cpu)) __cpu_up()
232 void smp_send_reschedule(int cpu) smp_send_reschedule() argument
234 send_ipi(cpumask_of(cpu), IPI_RESCHEDULE); smp_send_reschedule()
245 void arch_send_call_function_single_ipi(int cpu) arch_send_call_function_single_ipi() argument
247 send_ipi(cpumask_of(cpu), IPI_CALL_FUNC); arch_send_call_function_single_ipi()
50 __handle_ipi(unsigned long *ops, struct ipi_data *ipi, int cpu) __handle_ipi() argument
/linux-4.1.27/drivers/lguest/
H A Dhypercalls.c37 static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args) do_hcall() argument
57 kill_guest(cpu, "already have lguest_data"); do_hcall()
68 __lgread(cpu, msg, args->arg1, sizeof(msg)); do_hcall()
70 kill_guest(cpu, "CRASH: %s", msg); do_hcall()
72 cpu->lg->dead = ERR_PTR(-ERESTART); do_hcall()
78 guest_pagetable_clear_all(cpu); do_hcall()
80 guest_pagetable_flush_user(cpu); do_hcall()
88 guest_new_pagetable(cpu, args->arg1); do_hcall()
91 guest_set_stack(cpu, args->arg1, args->arg2, args->arg3); do_hcall()
95 guest_set_pte(cpu, args->arg1, args->arg2, do_hcall()
98 guest_set_pte(cpu, args->arg1, args->arg2, __pte(args->arg3)); do_hcall()
102 guest_set_pgd(cpu->lg, args->arg1, args->arg2); do_hcall()
106 guest_set_pmd(cpu->lg, args->arg1, args->arg2); do_hcall()
110 guest_set_clockevent(cpu, args->arg1); do_hcall()
114 cpu->ts = args->arg1; do_hcall()
118 cpu->halted = 1; do_hcall()
122 if (lguest_arch_do_hcall(cpu, args)) do_hcall()
123 kill_guest(cpu, "Bad hypercall %li\n", args->arg0); do_hcall()
136 static void do_async_hcalls(struct lg_cpu *cpu) do_async_hcalls() argument
142 if (copy_from_user(&st, &cpu->lg->lguest_data->hcall_status, sizeof(st))) do_async_hcalls()
153 unsigned int n = cpu->next_hcall; do_async_hcalls()
163 if (++cpu->next_hcall == LHCALL_RING_SIZE) do_async_hcalls()
164 cpu->next_hcall = 0; do_async_hcalls()
170 if (copy_from_user(&args, &cpu->lg->lguest_data->hcalls[n], do_async_hcalls()
172 kill_guest(cpu, "Fetching async hypercalls"); do_async_hcalls()
177 do_hcall(cpu, &args); do_async_hcalls()
180 if (put_user(0xFF, &cpu->lg->lguest_data->hcall_status[n])) { do_async_hcalls()
181 kill_guest(cpu, "Writing result for async hypercall"); do_async_hcalls()
189 if (cpu->pending.trap) do_async_hcalls()
198 static void initialize(struct lg_cpu *cpu) initialize() argument
204 if (cpu->hcall->arg0 != LHCALL_LGUEST_INIT) { initialize()
205 kill_guest(cpu, "hypercall %li before INIT", cpu->hcall->arg0); initialize()
209 if (lguest_arch_init_hypercalls(cpu)) initialize()
210 kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); initialize()
216 if (get_user(cpu->lg->noirq_iret, &cpu->lg->lguest_data->noirq_iret)) initialize()
217 kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); initialize()
223 write_timestamp(cpu); initialize()
226 page_table_guest_data_init(cpu); initialize()
234 guest_pagetable_clear_all(cpu); initialize()
256 void do_hypercalls(struct lg_cpu *cpu) do_hypercalls() argument
259 if (unlikely(!cpu->lg->lguest_data)) { do_hypercalls()
261 initialize(cpu); do_hypercalls()
263 cpu->hcall = NULL; do_hypercalls()
272 do_async_hcalls(cpu); do_hypercalls()
279 if (!cpu->pending.trap) { do_hypercalls()
280 do_hcall(cpu, cpu->hcall); do_hypercalls()
293 cpu->hcall = NULL; do_hypercalls()
301 void write_timestamp(struct lg_cpu *cpu) write_timestamp() argument
305 if (copy_to_user(&cpu->lg->lguest_data->time, write_timestamp()
307 kill_guest(cpu, "Writing timestamp"); write_timestamp()
H A Dlg.h24 /* We have two pages shared with guests, per cpu. */
66 int cpu_pgd; /* Which pgd this cpu is currently using */
130 #define lgread(cpu, addr, type) \
131 ({ type _v; __lgread((cpu), &_v, (addr), sizeof(_v)); _v; })
134 #define lgwrite(cpu, addr, type, val) \
137 __lgwrite((cpu), (addr), &(val), sizeof(val)); \
141 int run_guest(struct lg_cpu *cpu, unsigned long __user *user);
154 unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more);
155 void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more);
156 void set_interrupt(struct lg_cpu *cpu, unsigned int irq);
157 bool deliver_trap(struct lg_cpu *cpu, unsigned int num);
158 void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int i,
160 void guest_set_stack(struct lg_cpu *cpu, u32 seg, u32 esp, unsigned int pages);
161 void pin_stack_pages(struct lg_cpu *cpu);
164 void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt,
166 void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta);
167 bool send_notify_to_eventfd(struct lg_cpu *cpu);
168 void init_clockdev(struct lg_cpu *cpu);
175 void setup_guest_gdt(struct lg_cpu *cpu);
176 void load_guest_gdt_entry(struct lg_cpu *cpu, unsigned int i,
178 void guest_load_tls(struct lg_cpu *cpu, unsigned long tls_array);
179 void copy_gdt(const struct lg_cpu *cpu, struct desc_struct *gdt);
180 void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt);
185 void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable);
190 void guest_pagetable_clear_all(struct lg_cpu *cpu);
191 void guest_pagetable_flush_user(struct lg_cpu *cpu);
192 void guest_set_pte(struct lg_cpu *cpu, unsigned long gpgdir,
194 void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages);
195 bool demand_page(struct lg_cpu *cpu, unsigned long cr2, int errcode,
197 void pin_page(struct lg_cpu *cpu, unsigned long vaddr);
198 bool __guest_pa(struct lg_cpu *cpu, unsigned long vaddr, unsigned long *paddr);
199 unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr);
200 void page_table_guest_data_init(struct lg_cpu *cpu);
205 void lguest_arch_run_guest(struct lg_cpu *cpu);
206 void lguest_arch_handle_trap(struct lg_cpu *cpu);
207 int lguest_arch_init_hypercalls(struct lg_cpu *cpu);
208 int lguest_arch_do_hcall(struct lg_cpu *cpu, struct hcall_args *args);
209 void lguest_arch_setup_regs(struct lg_cpu *cpu, unsigned long start);
210 unsigned long *lguest_arch_regptr(struct lg_cpu *cpu, size_t reg_off, bool any);
220 void do_hypercalls(struct lg_cpu *cpu);
221 void write_timestamp(struct lg_cpu *cpu);
247 #define kill_guest(cpu, fmt...) \
249 if (!(cpu)->lg->dead) { \
250 (cpu)->lg->dead = kasprintf(GFP_ATOMIC, fmt); \
251 if (!(cpu)->lg->dead) \
252 (cpu)->lg->dead = ERR_PTR(-ENOMEM); \
/linux-4.1.27/arch/powerpc/platforms/cell/
H A Dpmu.c49 pmd_regs = cbe_get_cpu_pmd_regs(cpu); \
50 shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); \
58 shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); \
65 pmd_regs = cbe_get_cpu_pmd_regs(cpu); \
74 u32 cbe_read_phys_ctr(u32 cpu, u32 phys_ctr) cbe_read_phys_ctr() argument
93 void cbe_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val) cbe_write_phys_ctr() argument
105 pm_ctrl = cbe_read_pm(cpu, pm_control); cbe_write_phys_ctr()
111 cbe_write_pm(cpu, pm_control, pm_ctrl); cbe_write_phys_ctr()
113 shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); cbe_write_phys_ctr()
126 u32 cbe_read_ctr(u32 cpu, u32 ctr) cbe_read_ctr() argument
131 val = cbe_read_phys_ctr(cpu, phys_ctr); cbe_read_ctr()
133 if (cbe_get_ctr_size(cpu, phys_ctr) == 16) cbe_read_ctr()
140 void cbe_write_ctr(u32 cpu, u32 ctr, u32 val) cbe_write_ctr() argument
147 if (cbe_get_ctr_size(cpu, phys_ctr) == 16) { cbe_write_ctr()
148 phys_val = cbe_read_phys_ctr(cpu, phys_ctr); cbe_write_ctr()
156 cbe_write_phys_ctr(cpu, phys_ctr, val); cbe_write_ctr()
165 u32 cbe_read_pm07_control(u32 cpu, u32 ctr) cbe_read_pm07_control() argument
176 void cbe_write_pm07_control(u32 cpu, u32 ctr, u32 val) cbe_write_pm07_control() argument
187 u32 cbe_read_pm(u32 cpu, enum pm_reg_name reg) cbe_read_pm() argument
229 void cbe_write_pm(u32 cpu, enum pm_reg_name reg, u32 val) cbe_write_pm() argument
271 u32 cbe_get_ctr_size(u32 cpu, u32 phys_ctr) cbe_get_ctr_size() argument
276 pm_ctrl = cbe_read_pm(cpu, pm_control); cbe_get_ctr_size()
284 void cbe_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size) cbe_set_ctr_size() argument
289 pm_ctrl = cbe_read_pm(cpu, pm_control); cbe_set_ctr_size()
299 cbe_write_pm(cpu, pm_control, pm_ctrl); cbe_set_ctr_size()
309 void cbe_enable_pm(u32 cpu) cbe_enable_pm() argument
314 shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); cbe_enable_pm()
317 pm_ctrl = cbe_read_pm(cpu, pm_control) | CBE_PM_ENABLE_PERF_MON; cbe_enable_pm()
318 cbe_write_pm(cpu, pm_control, pm_ctrl); cbe_enable_pm()
322 void cbe_disable_pm(u32 cpu) cbe_disable_pm() argument
325 pm_ctrl = cbe_read_pm(cpu, pm_control) & ~CBE_PM_ENABLE_PERF_MON; cbe_disable_pm()
326 cbe_write_pm(cpu, pm_control, pm_ctrl); cbe_disable_pm()
336 void cbe_read_trace_buffer(u32 cpu, u64 *buf) cbe_read_trace_buffer() argument
338 struct cbe_pmd_regs __iomem *pmd_regs = cbe_get_cpu_pmd_regs(cpu); cbe_read_trace_buffer()
349 u32 cbe_get_and_clear_pm_interrupts(u32 cpu) cbe_get_and_clear_pm_interrupts() argument
352 return cbe_read_pm(cpu, pm_status); cbe_get_and_clear_pm_interrupts()
356 void cbe_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask) cbe_enable_pm_interrupts() argument
359 iic_set_interrupt_routing(cpu, thread, 0); cbe_enable_pm_interrupts()
363 cbe_write_pm(cpu, pm_status, mask); cbe_enable_pm_interrupts()
367 void cbe_disable_pm_interrupts(u32 cpu) cbe_disable_pm_interrupts() argument
369 cbe_get_and_clear_pm_interrupts(cpu); cbe_disable_pm_interrupts()
370 cbe_write_pm(cpu, pm_status, 0); cbe_disable_pm_interrupts()
/linux-4.1.27/arch/x86/kernel/cpu/
H A Dperf_event_amd_uncore.c16 #include <linux/cpu.h>
35 int cpu; member in struct:amd_uncore
67 return *per_cpu_ptr(amd_uncore_nb, event->cpu); event_to_amd_uncore()
69 return *per_cpu_ptr(amd_uncore_l2, event->cpu); event_to_amd_uncore()
202 if (event->cpu < 0) amd_uncore_event_init()
211 * to a single common cpu. amd_uncore_event_init()
213 event->cpu = uncore->cpu; amd_uncore_event_init()
287 static struct amd_uncore *amd_uncore_alloc(unsigned int cpu) amd_uncore_alloc() argument
290 cpu_to_node(cpu)); amd_uncore_alloc()
293 static int amd_uncore_cpu_up_prepare(unsigned int cpu) amd_uncore_cpu_up_prepare() argument
298 uncore_nb = amd_uncore_alloc(cpu); amd_uncore_cpu_up_prepare()
301 uncore_nb->cpu = cpu; amd_uncore_cpu_up_prepare()
307 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb; amd_uncore_cpu_up_prepare()
311 uncore_l2 = amd_uncore_alloc(cpu); amd_uncore_cpu_up_prepare()
314 uncore_l2->cpu = cpu; amd_uncore_cpu_up_prepare()
320 *per_cpu_ptr(amd_uncore_l2, cpu) = uncore_l2; amd_uncore_cpu_up_prepare()
334 unsigned int cpu; amd_uncore_find_online_sibling() local
337 for_each_online_cpu(cpu) { for_each_online_cpu()
338 that = *per_cpu_ptr(uncores, cpu); for_each_online_cpu()
357 static void amd_uncore_cpu_starting(unsigned int cpu) amd_uncore_cpu_starting() argument
363 uncore = *per_cpu_ptr(amd_uncore_nb, cpu); amd_uncore_cpu_starting()
368 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore; amd_uncore_cpu_starting()
372 unsigned int apicid = cpu_data(cpu).apicid; amd_uncore_cpu_starting()
375 uncore = *per_cpu_ptr(amd_uncore_l2, cpu); amd_uncore_cpu_starting()
381 *per_cpu_ptr(amd_uncore_l2, cpu) = uncore; amd_uncore_cpu_starting()
385 static void uncore_online(unsigned int cpu, uncore_online() argument
388 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu); uncore_online()
393 if (cpu == uncore->cpu) uncore_online()
394 cpumask_set_cpu(cpu, uncore->active_mask); uncore_online()
397 static void amd_uncore_cpu_online(unsigned int cpu) amd_uncore_cpu_online() argument
400 uncore_online(cpu, amd_uncore_nb); amd_uncore_cpu_online()
403 uncore_online(cpu, amd_uncore_l2); amd_uncore_cpu_online()
406 static void uncore_down_prepare(unsigned int cpu, uncore_down_prepare() argument
410 struct amd_uncore *this = *per_cpu_ptr(uncores, cpu); uncore_down_prepare()
412 if (this->cpu != cpu) uncore_down_prepare()
415 /* this cpu is going down, migrate to a shared sibling if possible */ for_each_online_cpu()
419 if (cpu == i) for_each_online_cpu()
423 perf_pmu_migrate_context(this->pmu, cpu, i); for_each_online_cpu()
424 cpumask_clear_cpu(cpu, that->active_mask); for_each_online_cpu()
426 that->cpu = i; for_each_online_cpu()
432 static void amd_uncore_cpu_down_prepare(unsigned int cpu) amd_uncore_cpu_down_prepare() argument
435 uncore_down_prepare(cpu, amd_uncore_nb); amd_uncore_cpu_down_prepare()
438 uncore_down_prepare(cpu, amd_uncore_l2); amd_uncore_cpu_down_prepare()
441 static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores) uncore_dead() argument
443 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu); uncore_dead()
445 if (cpu == uncore->cpu) uncore_dead()
446 cpumask_clear_cpu(cpu, uncore->active_mask); uncore_dead()
450 *per_cpu_ptr(uncores, cpu) = NULL; uncore_dead()
453 static void amd_uncore_cpu_dead(unsigned int cpu) amd_uncore_cpu_dead() argument
456 uncore_dead(cpu, amd_uncore_nb); amd_uncore_cpu_dead()
459 uncore_dead(cpu, amd_uncore_l2); amd_uncore_cpu_dead()
466 unsigned int cpu = (long)hcpu; amd_uncore_cpu_notifier() local
470 if (amd_uncore_cpu_up_prepare(cpu)) amd_uncore_cpu_notifier()
475 amd_uncore_cpu_starting(cpu); amd_uncore_cpu_notifier()
479 amd_uncore_cpu_online(cpu); amd_uncore_cpu_notifier()
483 amd_uncore_cpu_down_prepare(cpu); amd_uncore_cpu_notifier()
488 amd_uncore_cpu_dead(cpu); amd_uncore_cpu_notifier()
505 unsigned int cpu = smp_processor_id(); init_cpu_already_online() local
507 amd_uncore_cpu_starting(cpu); init_cpu_already_online()
508 amd_uncore_cpu_online(cpu); init_cpu_already_online()
513 unsigned int cpu = smp_processor_id(); cleanup_cpu_online() local
515 amd_uncore_cpu_dead(cpu); cleanup_cpu_online()
520 unsigned int cpu, cpu2; amd_uncore_init() local
563 for_each_online_cpu(cpu) { for_each_online_cpu()
564 ret = amd_uncore_cpu_up_prepare(cpu); for_each_online_cpu()
567 smp_call_function_single(cpu, init_cpu_already_online, NULL, 1); for_each_online_cpu()
578 if (cpu2 == cpu) for_each_online_cpu()
580 smp_call_function_single(cpu, cleanup_cpu_online, NULL, 1); for_each_online_cpu()
/linux-4.1.27/tools/perf/scripts/python/
H A Dnetdev-times.py21 irq_dic = {}; # key is cpu and value is a list which stacks irqs
23 net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
87 cpu = irq_list[0]['cpu']
100 print "%d.%06dsec cpu=%d" % \
101 (nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
227 def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
230 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
233 def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
236 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
239 def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
242 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
245 def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
247 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
251 def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, callchain, irq, ret):
252 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
255 def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, callchain, napi, dev_name):
256 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
260 def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr,
262 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
266 def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr,
268 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
272 def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm, callchain,
274 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
278 def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm, callchain,
280 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
284 def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain,
286 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
290 def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr):
291 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
295 def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm, callchain,
297 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
302 (name, context, cpu, time, pid, comm, irq, irq_name) = event_info
303 if cpu not in irq_dic.keys():
304 irq_dic[cpu] = []
305 irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
306 irq_dic[cpu].append(irq_record)
309 (name, context, cpu, time, pid, comm, irq, ret) = event_info
310 if cpu not in irq_dic.keys():
312 irq_record = irq_dic[cpu].pop()
318 irq_dic[cpu].append(irq_record)
321 (name, context, cpu, time, pid, comm, vec) = event_info
322 if cpu not in irq_dic.keys() \
323 or len(irq_dic[cpu]) == 0:
325 irq_record = irq_dic[cpu].pop()
332 irq_dic[cpu].append(irq_record)
335 (name, context, cpu, time, pid, comm, vec) = event_info
336 net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
339 (name, context, cpu, time, pid, comm, vec) = event_info
342 if cpu in irq_dic.keys():
343 irq_list = irq_dic[cpu]
344 del irq_dic[cpu]
345 if cpu in net_rx_dic.keys():
346 sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
347 event_list = net_rx_dic[cpu]['event_list']
348 del net_rx_dic[cpu]
357 (name, context, cpu, time, pid, comm, napi, dev_name) = event_info
358 if cpu in net_rx_dic.keys():
359 event_list = net_rx_dic[cpu]['event_list']
365 (name, context, cpu, time, pid, comm,
367 if cpu not in irq_dic.keys() \
368 or len(irq_dic[cpu]) == 0:
370 irq_record = irq_dic[cpu].pop()
378 irq_dic[cpu].append(irq_record)
383 (name, context, cpu, time, pid, comm,
385 if cpu in net_rx_dic.keys():
388 event_list = net_rx_dic[cpu]['event_list']
398 (name, context, cpu, time, pid, comm,
409 (name, context, cpu, time, pid, comm,
424 (name, context, cpu, time, pid, comm,
447 (name, context, cpu, time, pid, comm, skbaddr) = event_info
457 (name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
/linux-4.1.27/drivers/clk/mvebu/
H A Dclk-cpu.c36 int cpu; member in struct:cpu_clk
56 div = (reg >> (cpuclk->cpu * 8)) & SYS_CTRL_CLK_DIVIDER_MASK; clk_cpu_recalc_rate()
85 & (~(SYS_CTRL_CLK_DIVIDER_MASK << (cpuclk->cpu * 8)))) clk_cpu_off_set_rate()
86 | (div << (cpuclk->cpu * 8)); clk_cpu_off_set_rate()
89 reload_mask = 1 << (20 + cpuclk->cpu); clk_cpu_off_set_rate()
149 return mvebu_pmsu_dfs_request(cpuclk->cpu); clk_cpu_on_set_rate()
185 for_each_node_by_type(dn, "cpu") of_cpu_clk_setup()
196 for_each_node_by_type(dn, "cpu") { of_cpu_clk_setup()
201 int cpu, err; of_cpu_clk_setup() local
206 err = of_property_read_u32(dn, "reg", &cpu); of_cpu_clk_setup()
210 sprintf(clk_name, "cpu%d", cpu); of_cpu_clk_setup()
213 cpuclk[cpu].parent_name = __clk_get_name(parent_clk); of_cpu_clk_setup()
214 cpuclk[cpu].clk_name = clk_name; of_cpu_clk_setup()
215 cpuclk[cpu].cpu = cpu; of_cpu_clk_setup()
216 cpuclk[cpu].reg_base = clock_complex_base; of_cpu_clk_setup()
218 cpuclk[cpu].pmu_dfs = pmu_dfs_base + 4 * cpu; of_cpu_clk_setup()
219 cpuclk[cpu].hw.init = &init; of_cpu_clk_setup()
221 init.name = cpuclk[cpu].clk_name; of_cpu_clk_setup()
224 init.parent_names = &cpuclk[cpu].parent_name; of_cpu_clk_setup()
227 clk = clk_register(NULL, &cpuclk[cpu].hw); of_cpu_clk_setup()
230 clks[cpu] = clk; of_cpu_clk_setup()
247 CLK_OF_DECLARE(armada_xp_cpu_clock, "marvell,armada-xp-cpu-clock",
/linux-4.1.27/arch/nios2/kernel/
H A Dcpuinfo.c35 static inline u32 fcpu(struct device_node *cpu, const char *n) fcpu() argument
39 of_property_read_u32(cpu, n, &val); fcpu()
44 static inline u32 fcpu_has(struct device_node *cpu, const char *n) fcpu_has() argument
46 return of_get_property(cpu, n, NULL) ? 1 : 0; fcpu_has()
51 struct device_node *cpu; setup_cpuinfo() local
55 cpu = of_find_node_by_type(NULL, "cpu"); setup_cpuinfo()
56 if (!cpu) setup_cpuinfo()
59 if (!fcpu_has(cpu, "altr,has-initda")) setup_cpuinfo()
64 cpuinfo.cpu_clock_freq = fcpu(cpu, "clock-frequency"); setup_cpuinfo()
66 str = of_get_property(cpu, "altr,implementation", &len); setup_cpuinfo()
72 cpuinfo.has_div = fcpu_has(cpu, "altr,has-div"); setup_cpuinfo()
73 cpuinfo.has_mul = fcpu_has(cpu, "altr,has-mul"); setup_cpuinfo()
74 cpuinfo.has_mulx = fcpu_has(cpu, "altr,has-mulx"); setup_cpuinfo()
75 cpuinfo.mmu = fcpu_has(cpu, "altr,has-mmu"); setup_cpuinfo()
86 cpuinfo.tlb_num_ways = fcpu(cpu, "altr,tlb-num-ways"); setup_cpuinfo()
90 cpuinfo.icache_line_size = fcpu(cpu, "icache-line-size"); setup_cpuinfo()
91 cpuinfo.icache_size = fcpu(cpu, "icache-size"); setup_cpuinfo()
98 cpuinfo.dcache_line_size = fcpu(cpu, "dcache-line-size"); setup_cpuinfo()
104 cpuinfo.dcache_size = fcpu(cpu, "dcache-size"); setup_cpuinfo()
111 cpuinfo.tlb_pid_num_bits = fcpu(cpu, "altr,pid-num-bits"); setup_cpuinfo()
113 cpuinfo.tlb_num_entries = fcpu(cpu, "altr,tlb-num-entries"); setup_cpuinfo()
115 cpuinfo.tlb_ptr_sz = fcpu(cpu, "altr,tlb-ptr-sz"); setup_cpuinfo()
117 cpuinfo.reset_addr = fcpu(cpu, "altr,reset-addr"); setup_cpuinfo()
118 cpuinfo.exception_addr = fcpu(cpu, "altr,exception-addr"); setup_cpuinfo()
119 cpuinfo.fast_tlb_miss_exc_addr = fcpu(cpu, "altr,fast-tlb-miss-addr"); setup_cpuinfo()
/linux-4.1.27/arch/arm/mach-sunxi/
H A Dplatsmp.c24 #define CPUCFG_CPU_PWR_CLAMP_STATUS_REG(cpu) ((cpu) * 0x40 + 0x64)
25 #define CPUCFG_CPU_RST_CTRL_REG(cpu) (((cpu) + 1) * 0x40)
26 #define CPUCFG_CPU_CTRL_REG(cpu) (((cpu) + 1) * 0x40 + 0x04)
27 #define CPUCFG_CPU_STATUS_REG(cpu) (((cpu) + 1) * 0x40 + 0x08)
35 #define PRCM_CPU_PWR_CLAMP_REG(cpu) (((cpu) * 4) + 0x140)
71 static int sun6i_smp_boot_secondary(unsigned int cpu, sun6i_smp_boot_secondary() argument
87 writel(0, cpucfg_membase + CPUCFG_CPU_RST_CTRL_REG(cpu)); sun6i_smp_boot_secondary()
91 writel(reg & ~BIT(cpu), cpucfg_membase + CPUCFG_GEN_CTRL_REG); sun6i_smp_boot_secondary()
95 writel(reg & ~BIT(cpu), cpucfg_membase + CPUCFG_DBG_CTL1_REG); sun6i_smp_boot_secondary()
99 writel(0xff >> i, prcm_membase + PRCM_CPU_PWR_CLAMP_REG(cpu)); sun6i_smp_boot_secondary()
104 writel(reg & ~BIT(cpu), prcm_membase + PRCM_CPU_PWROFF_REG); sun6i_smp_boot_secondary()
108 writel(3, cpucfg_membase + CPUCFG_CPU_RST_CTRL_REG(cpu)); sun6i_smp_boot_secondary()
112 writel(reg | BIT(cpu), cpucfg_membase + CPUCFG_DBG_CTL1_REG); sun6i_smp_boot_secondary()
/linux-4.1.27/arch/alpha/kernel/
H A Dsmp.c30 #include <linux/cpu.h>
90 * Ideally sets up per-cpu profiling hooks. Doesn't do much now...
124 printk("??, cpu 0x%x already present??\n", cpuid); smp_callin()
150 /* inform the notifiers about the new cpu */ smp_callin()
174 /* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */
201 struct percpu_struct *cpu; send_secondary_console_msg() local
206 cpu = (struct percpu_struct *) send_secondary_console_msg()
217 *(unsigned int *)&cpu->ipc_buffer[0] = len; send_secondary_console_msg()
218 cp1 = (char *) &cpu->ipc_buffer[1]; send_secondary_console_msg()
242 struct percpu_struct *cpu; recv_secondary_console_msg() local
255 cpu = (struct percpu_struct *) recv_secondary_console_msg()
262 mycpu, i, cpu->halt_reason, cpu->flags)); recv_secondary_console_msg()
264 cnt = cpu->ipc_buffer[0] >> 32; recv_secondary_console_msg()
268 cp1 = (char *) &cpu->ipc_buffer[1]; recv_secondary_console_msg()
288 * Convince the console to have a secondary cpu begin execution.
293 struct percpu_struct *cpu; secondary_cpu_start() local
297 cpu = (struct percpu_struct *) secondary_cpu_start()
301 hwpcb = (struct pcb_struct *) cpu->hwpcb; secondary_cpu_start()
321 DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n", secondary_cpu_start()
336 cpu->flags |= 0x22; /* turn on Context Valid and Restart Capable */ secondary_cpu_start()
337 cpu->flags &= ~1; /* turn off Bootstrap In Progress */ secondary_cpu_start()
345 if (cpu->flags & 1) secondary_cpu_start()
359 * Bring one cpu online.
404 struct percpu_struct *cpubase, *cpu; setup_smp() local
408 printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n", setup_smp()
423 cpu = (struct percpu_struct *) setup_smp()
425 if ((cpu->flags & 0x1cc) == 0x1cc) { setup_smp()
429 cpu->pal_revision = boot_cpu_palrev; setup_smp()
433 i, cpu->flags, cpu->type)); setup_smp()
435 i, cpu->pal_revision)); setup_smp()
454 current_thread_info()->cpu = boot_cpuid; smp_prepare_cpus()
478 __cpu_up(unsigned int cpu, struct task_struct *tidle) __cpu_up() argument
480 smp_boot_one_cpu(cpu, tidle); __cpu_up()
482 return cpu_online(cpu) ? 0 : -ENOSYS; __cpu_up()
488 int cpu; smp_cpus_done() local
491 for(cpu = 0; cpu < NR_CPUS; cpu++) smp_cpus_done()
492 if (cpu_online(cpu)) smp_cpus_done()
493 bogosum += cpu_data[cpu].loops_per_jiffy; smp_cpus_done()
573 smp_send_reschedule(int cpu) smp_send_reschedule() argument
576 if (cpu == hard_smp_processor_id()) smp_send_reschedule()
580 send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); smp_send_reschedule()
591 printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n"); smp_send_stop()
601 void arch_send_call_function_single_ipi(int cpu) arch_send_call_function_single_ipi() argument
603 send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC); arch_send_call_function_single_ipi()
657 int cpu, this_cpu = smp_processor_id(); flush_tlb_mm() local
658 for (cpu = 0; cpu < NR_CPUS; cpu++) { flush_tlb_mm()
659 if (!cpu_online(cpu) || cpu == this_cpu) flush_tlb_mm()
661 if (mm->context[cpu]) flush_tlb_mm()
662 mm->context[cpu] = 0; flush_tlb_mm()
706 int cpu, this_cpu = smp_processor_id(); flush_tlb_page() local
707 for (cpu = 0; cpu < NR_CPUS; cpu++) { flush_tlb_page()
708 if (!cpu_online(cpu) || cpu == this_cpu) flush_tlb_page()
710 if (mm->context[cpu]) flush_tlb_page()
711 mm->context[cpu] = 0; flush_tlb_page()
762 int cpu, this_cpu = smp_processor_id(); flush_icache_user_range() local
763 for (cpu = 0; cpu < NR_CPUS; cpu++) { flush_icache_user_range()
764 if (!cpu_online(cpu) || cpu == this_cpu) flush_icache_user_range()
766 if (mm->context[cpu]) flush_icache_user_range()
767 mm->context[cpu] = 0; flush_icache_user_range()
/linux-4.1.27/arch/tile/kernel/
H A Dsmpboot.c23 #include <linux/cpu.h>
38 /* Called very early during startup to mark boot cpu as online */ smp_prepare_boot_cpu()
41 int cpu = smp_processor_id(); smp_prepare_boot_cpu() local
42 set_cpu_online(cpu, 1); smp_prepare_boot_cpu()
43 set_cpu_present(cpu, 1); smp_prepare_boot_cpu()
54 * until they get an IPI from the boot cpu to come online.
59 int cpu, cpu_count; smp_prepare_cpus() local
62 current_thread_info()->cpu = boot_cpu; smp_prepare_cpus()
70 pr_err("Couldn't set init affinity to boot cpu (%ld)\n", rc); smp_prepare_cpus()
85 for (cpu = 0; cpu < NR_CPUS; ++cpu) { smp_prepare_cpus()
88 if (cpu == boot_cpu) smp_prepare_cpus()
91 if (!cpu_possible(cpu)) { smp_prepare_cpus()
97 per_cpu(boot_sp, cpu) = 0; smp_prepare_cpus()
98 per_cpu(boot_pc, cpu) = (unsigned long) smp_nap; smp_prepare_cpus()
103 idle = fork_idle(cpu); smp_prepare_cpus()
105 panic("failed fork for CPU %d", cpu); smp_prepare_cpus()
109 per_cpu(boot_sp, cpu) = task_ksp0(idle); smp_prepare_cpus()
110 per_cpu(boot_pc, cpu) = idle->thread.pc; smp_prepare_cpus()
205 /* Set up tile-specific state for this cpu. */ online_secondary()
208 /* Set up tile-timer clock-event device on this cpu */ online_secondary()
214 int __cpu_up(unsigned int cpu, struct task_struct *tidle) __cpu_up() argument
218 for (; !cpumask_test_cpu(cpu, &cpu_started); timeout++) { __cpu_up()
220 pr_info("skipping unresponsive cpu%d\n", cpu); __cpu_up()
228 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; __cpu_up()
231 send_IPI_single(cpu, MSG_TAG_START_CPU); __cpu_up()
232 while (!cpumask_test_cpu(cpu, cpu_online_mask)) __cpu_up()
244 int cpu, next, rc; smp_cpus_done() local
252 * Pin ourselves to a single cpu in the initial affinity set smp_cpus_done()
255 * Use the last cpu just in case the whole chip has been smp_cpus_done()
259 * on this cpu. smp_cpus_done()
261 for (cpu = cpumask_first(&init_affinity); smp_cpus_done()
262 (next = cpumask_next(cpu, &init_affinity)) < nr_cpu_ids; smp_cpus_done()
263 cpu = next) smp_cpus_done()
265 rc = sched_setaffinity(current->pid, cpumask_of(cpu)); smp_cpus_done()
267 pr_err("Couldn't set init affinity to cpu %d (%d)\n", cpu, rc); smp_cpus_done()
/linux-4.1.27/arch/arm/mach-vexpress/
H A Dtc2_pm.c36 #define RESET_A15_NCORERESET(cpu) (1 << (2 + (cpu)))
37 #define RESET_A7_NCORERESET(cpu) (1 << (16 + (cpu)))
51 static int tc2_pm_cpu_powerup(unsigned int cpu, unsigned int cluster) tc2_pm_cpu_powerup() argument
53 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); tc2_pm_cpu_powerup()
54 if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) tc2_pm_cpu_powerup()
56 ve_spc_set_resume_addr(cluster, cpu, tc2_pm_cpu_powerup()
58 ve_spc_cpu_wakeup_irq(cluster, cpu, true); tc2_pm_cpu_powerup()
71 static void tc2_pm_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster) tc2_pm_cpu_powerdown_prepare() argument
73 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); tc2_pm_cpu_powerdown_prepare()
74 BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER); tc2_pm_cpu_powerdown_prepare()
75 ve_spc_cpu_wakeup_irq(cluster, cpu, true); tc2_pm_cpu_powerdown_prepare()
117 static int tc2_core_in_reset(unsigned int cpu, unsigned int cluster) tc2_core_in_reset() argument
120 RESET_A7_NCORERESET(cpu) tc2_core_in_reset()
121 : RESET_A15_NCORERESET(cpu); tc2_core_in_reset()
129 static int tc2_pm_wait_for_powerdown(unsigned int cpu, unsigned int cluster) tc2_pm_wait_for_powerdown() argument
133 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); tc2_pm_wait_for_powerdown()
134 BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER); tc2_pm_wait_for_powerdown()
137 pr_debug("%s(cpu=%u, cluster=%u): RESET_CTRL = 0x%08X\n", tc2_pm_wait_for_powerdown()
138 __func__, cpu, cluster, tc2_pm_wait_for_powerdown()
149 if (tc2_core_in_reset(cpu, cluster) || tc2_pm_wait_for_powerdown()
150 ve_spc_cpu_in_wfi(cpu, cluster)) tc2_pm_wait_for_powerdown()
160 static void tc2_pm_cpu_suspend_prepare(unsigned int cpu, unsigned int cluster) tc2_pm_cpu_suspend_prepare() argument
162 ve_spc_set_resume_addr(cluster, cpu, virt_to_phys(mcpm_entry_point)); tc2_pm_cpu_suspend_prepare()
165 static void tc2_pm_cpu_is_up(unsigned int cpu, unsigned int cluster) tc2_pm_cpu_is_up() argument
167 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); tc2_pm_cpu_is_up()
168 BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER); tc2_pm_cpu_is_up()
169 ve_spc_cpu_wakeup_irq(cluster, cpu, false); tc2_pm_cpu_is_up()
170 ve_spc_set_resume_addr(cluster, cpu, 0); tc2_pm_cpu_is_up()
207 unsigned int mpidr, cpu, cluster; tc2_pm_init() local
248 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); tc2_pm_init()
250 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); tc2_pm_init()
251 if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) { tc2_pm_init()
/linux-4.1.27/arch/s390/kernel/
H A Dsmp.c13 * The code outside of smp.c uses logical cpu numbers, only smp.c does
14 * the translation of logical to physical cpu ids. All new code that
15 * operates on physical cpu numbers needs to go into smp.c.
18 #define KMSG_COMPONENT "cpu"
31 #include <linux/cpu.h>
62 static DEFINE_PER_CPU(struct cpu *, cpu_device);
65 struct _lowcore *lowcore; /* lowcore page(s) for the cpu */
67 signed char state; /* physical cpu state */
69 u16 address; /* physical cpu address */
148 /* Status stored condition code is equivalent to cpu not running. */ pcpu_running()
153 * Find struct pcpu by cpu address.
157 int cpu; pcpu_find_address() local
159 for_each_cpu(cpu, mask) pcpu_find_address()
160 if (pcpu_devices[cpu].address == address) pcpu_find_address()
161 return pcpu_devices + cpu; pcpu_find_address()
178 static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) pcpu_alloc_lowcore() argument
199 lc->cpu_nr = cpu; pcpu_alloc_lowcore()
200 lc->spinlock_lockval = arch_spin_lockval(cpu); pcpu_alloc_lowcore()
206 lowcore_ptr[cpu] = lc; pcpu_alloc_lowcore()
234 static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) pcpu_prepare_secondary() argument
239 cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask); pcpu_prepare_secondary()
240 cpumask_set_cpu(cpu, mm_cpumask(&init_mm)); pcpu_prepare_secondary()
242 lc->cpu_nr = cpu; pcpu_prepare_secondary()
243 lc->spinlock_lockval = arch_spin_lockval(cpu); pcpu_prepare_secondary()
244 lc->percpu_offset = __per_cpu_offset[cpu]; pcpu_prepare_secondary()
280 * Call function via PSW restart on pcpu and stop the current cpu.
291 /* Stop target cpu (if func returns this stops the current cpu). */ pcpu_delegate()
293 /* Restart func on the target cpu and stop the current cpu. */ pcpu_delegate()
299 "0: sigp 0,%0,%2 # sigp restart to target cpu\n" pcpu_delegate()
301 "1: sigp 0,%1,%3 # sigp stop to current cpu\n" pcpu_delegate()
342 /* Use the current cpu if it is online. */ smp_call_online_cpu()
345 /* Use the first online cpu. */ smp_call_online_cpu()
362 int cpu; smp_find_processor_id() local
364 for_each_present_cpu(cpu) smp_find_processor_id()
365 if (pcpu_devices[cpu].address == address) smp_find_processor_id()
366 return cpu; smp_find_processor_id()
370 int smp_vcpu_scheduled(int cpu) smp_vcpu_scheduled() argument
372 return pcpu_running(pcpu_devices + cpu); smp_vcpu_scheduled()
375 void smp_yield_cpu(int cpu) smp_yield_cpu() argument
379 : : "d" (pcpu_devices[cpu].address)); smp_yield_cpu()
391 int cpu; smp_emergency_stop() local
394 for_each_cpu(cpu, cpumask) { for_each_cpu()
395 struct pcpu *pcpu = pcpu_devices + cpu; for_each_cpu()
403 for_each_cpu(cpu, cpumask)
404 if (pcpu_stopped(pcpu_devices + cpu))
405 cpumask_clear_cpu(cpu, cpumask);
418 int cpu; smp_send_stop() local
432 for_each_cpu(cpu, &cpumask) { smp_send_stop()
433 struct pcpu *pcpu = pcpu_devices + cpu; smp_send_stop()
467 int cpu; arch_send_call_function_ipi_mask() local
469 for_each_cpu(cpu, mask) arch_send_call_function_ipi_mask()
470 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single); arch_send_call_function_ipi_mask()
473 void arch_send_call_function_single_ipi(int cpu) arch_send_call_function_single_ipi() argument
475 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single); arch_send_call_function_single_ipi()
483 void smp_send_reschedule(int cpu) smp_send_reschedule() argument
485 pcpu_ec_call(pcpu_devices + cpu, ec_schedule); smp_send_reschedule()
534 static inline void __smp_store_cpu_state(int cpu, u16 address, int is_boot_cpu) __smp_store_cpu_state() argument
540 sa_ext = dump_save_area_create(cpu); __smp_store_cpu_state()
551 /* Get the registers of a non-boot cpu. */ __smp_store_cpu_state()
594 unsigned int cpu, address, i, j; smp_store_cpu_states() local
606 cpu = 0; smp_store_cpu_states()
609 if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type) smp_store_cpu_states()
611 for (j = 0; j <= smp_cpu_mtid; j++, cpu++) { smp_store_cpu_states()
612 address = (info->cpu[i].core_id << smp_cpu_mt_shift) + j; smp_store_cpu_states()
618 __smp_store_cpu_state(cpu, address, is_boot_cpu); smp_store_cpu_states()
623 int smp_store_status(int cpu) smp_store_status() argument
628 pcpu = pcpu_devices + cpu; smp_store_status()
642 void smp_cpu_set_polarization(int cpu, int val) smp_cpu_set_polarization() argument
644 pcpu_devices[cpu].polarization = val; smp_cpu_set_polarization()
647 int smp_cpu_get_polarization(int cpu) smp_cpu_get_polarization() argument
649 return pcpu_devices[cpu].polarization; smp_cpu_get_polarization()
666 info->cpu[info->configured].core_id = smp_get_cpu_info()
675 static int smp_add_present_cpu(int cpu);
681 int cpu, nr, i, j; __smp_rescan_cpus() local
686 cpu = cpumask_first(&avail); __smp_rescan_cpus()
687 for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) { __smp_rescan_cpus()
688 if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type) __smp_rescan_cpus()
690 address = info->cpu[i].core_id << smp_cpu_mt_shift; __smp_rescan_cpus()
694 pcpu = pcpu_devices + cpu; __smp_rescan_cpus()
697 (cpu >= info->configured*(smp_cpu_mtid + 1)) ? __smp_rescan_cpus()
699 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); __smp_rescan_cpus()
700 set_cpu_present(cpu, true); __smp_rescan_cpus()
701 if (sysfs_add && smp_add_present_cpu(cpu) != 0) __smp_rescan_cpus()
702 set_cpu_present(cpu, false); __smp_rescan_cpus()
705 cpu = cpumask_next(cpu, &avail); __smp_rescan_cpus()
706 if (cpu >= nr_cpu_ids) __smp_rescan_cpus()
715 unsigned int cpu, mtid, c_cpus, s_cpus; smp_detect_cpus() local
727 for (cpu = 0; cpu < info->combined; cpu++) smp_detect_cpus()
728 if (info->cpu[cpu].core_id == address) { smp_detect_cpus()
729 /* The boot cpu dictates the cpu type. */ smp_detect_cpus()
730 boot_cpu_type = info->cpu[cpu].type; smp_detect_cpus()
733 if (cpu >= info->combined) smp_detect_cpus()
749 for (cpu = 0; cpu < info->combined; cpu++) { smp_detect_cpus()
750 if (info->has_cpu_type && info->cpu[cpu].type != boot_cpu_type) smp_detect_cpus()
752 if (cpu < info->configured) smp_detect_cpus()
792 int __cpu_up(unsigned int cpu, struct task_struct *tidle) __cpu_up() argument
797 pcpu = pcpu_devices + cpu; __cpu_up()
800 base = cpu - (cpu % (smp_cpu_mtid + 1)); __cpu_up()
815 rc = pcpu_alloc_lowcore(pcpu, cpu); __cpu_up()
818 pcpu_prepare_secondary(pcpu, cpu); __cpu_up()
821 /* Wait until cpu puts itself in the online & active maps */ __cpu_up()
822 while (!cpu_online(cpu) || !cpu_active(cpu)) __cpu_up()
845 /* Disable pseudo page faults on this cpu. */ __cpu_disable()
857 void __cpu_die(unsigned int cpu) __cpu_die() argument
861 /* Wait until target cpu is down */ __cpu_die()
862 pcpu = pcpu_devices + cpu; __cpu_die()
867 cpumask_clear_cpu(cpu, mm_cpumask(&init_mm)); __cpu_die()
869 cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask); __cpu_die()
883 unsigned int possible, sclp, cpu; smp_fill_possible_mask() local
889 for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++) smp_fill_possible_mask()
890 set_cpu_possible(cpu, true); smp_fill_possible_mask()
955 int cpu, val, rc, i; cpu_configure_store() local
965 /* disallow configuration changes of online cpus and cpu 0 */ cpu_configure_store()
966 cpu = dev->id; cpu_configure_store()
967 cpu -= cpu % (smp_cpu_mtid + 1); cpu_configure_store()
968 if (cpu == 0) cpu_configure_store()
971 if (cpu_online(cpu + i)) cpu_configure_store()
973 pcpu = pcpu_devices + cpu; cpu_configure_store()
983 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i)) cpu_configure_store()
986 smp_cpu_set_polarization(cpu + i, cpu_configure_store()
998 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i)) cpu_configure_store()
1001 smp_cpu_set_polarization(cpu + i, cpu_configure_store()
1049 unsigned int cpu = (unsigned int)(long)hcpu; smp_cpu_notify() local
1050 struct device *s = &per_cpu(cpu_device, cpu)->dev; smp_cpu_notify()
1064 static int smp_add_present_cpu(int cpu) smp_add_present_cpu() argument
1067 struct cpu *c; smp_add_present_cpu()
1073 per_cpu(cpu_device, cpu) = c; smp_add_present_cpu()
1076 rc = register_cpu(c, cpu); smp_add_present_cpu()
1082 if (cpu_online(cpu)) { smp_add_present_cpu()
1093 if (cpu_online(cpu)) smp_add_present_cpu()
1141 int cpu, rc = 0; s390_smp_init() local
1149 for_each_present_cpu(cpu) { for_each_present_cpu()
1150 rc = smp_add_present_cpu(cpu); for_each_present_cpu()
H A Dtopology.c6 #define KMSG_COMPONENT "cpu"
18 #include <linux/cpu.h>
48 static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) cpu_group_map() argument
52 cpumask_copy(&mask, cpumask_of(cpu)); cpu_group_map()
56 if (cpumask_test_cpu(cpu, &info->mask)) cpu_group_map()
62 static cpumask_t cpu_thread_map(unsigned int cpu) cpu_thread_map() argument
67 cpumask_copy(&mask, cpumask_of(cpu)); cpu_thread_map()
70 cpu -= cpu % (smp_cpu_mtid + 1); cpu_thread_map()
72 if (cpu_present(cpu + i)) cpu_thread_map()
73 cpumask_set_cpu(cpu + i, &mask); cpu_thread_map()
152 add_cpus_to_mask(&tle->cpu, book, socket, 0); __tl_to_masks_generic()
177 socket = add_cpus_to_mask(&tle->cpu, book, socket, 1); __tl_to_masks_z10()
207 int cpu; topology_update_polarization_simple() local
210 for_each_possible_cpu(cpu) topology_update_polarization_simple()
211 smp_cpu_set_polarization(cpu, POLARIZATION_HRZ); topology_update_polarization_simple()
230 int cpu, rc; topology_set_cpu_management() local
240 for_each_possible_cpu(cpu) topology_set_cpu_management()
241 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); topology_set_cpu_management()
248 int cpu; update_cpu_masks() local
251 for_each_possible_cpu(cpu) { for_each_possible_cpu()
252 per_cpu(cpu_topology, cpu).thread_mask = cpu_thread_map(cpu); for_each_possible_cpu()
253 per_cpu(cpu_topology, cpu).core_mask = cpu_group_map(&socket_info, cpu); for_each_possible_cpu()
254 per_cpu(cpu_topology, cpu).book_mask = cpu_group_map(&book_info, cpu); for_each_possible_cpu()
256 per_cpu(cpu_topology, cpu).thread_id = cpu; for_each_possible_cpu()
257 per_cpu(cpu_topology, cpu).core_id = cpu; for_each_possible_cpu()
258 per_cpu(cpu_topology, cpu).socket_id = cpu; for_each_possible_cpu()
259 per_cpu(cpu_topology, cpu).book_id = cpu; for_each_possible_cpu()
277 int cpu; arch_update_cpu_topology() local
287 for_each_online_cpu(cpu) { for_each_online_cpu()
288 dev = get_cpu_device(cpu); for_each_online_cpu()
384 int cpu = dev->id; cpu_polarization_show() local
388 switch (smp_cpu_get_polarization(cpu)) { cpu_polarization_show()
419 int topology_cpu_init(struct cpu *cpu) topology_cpu_init() argument
421 return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group); topology_cpu_init()
424 static const struct cpumask *cpu_thread_mask(int cpu) cpu_thread_mask() argument
426 return &per_cpu(cpu_topology, cpu).thread_mask; cpu_thread_mask()
430 const struct cpumask *cpu_coregroup_mask(int cpu) cpu_coregroup_mask() argument
432 return &per_cpu(cpu_topology, cpu).core_mask; cpu_coregroup_mask()
435 static const struct cpumask *cpu_book_mask(int cpu) cpu_book_mask() argument
437 return &per_cpu(cpu_topology, cpu).book_mask; cpu_book_mask()
/linux-4.1.27/fs/pstore/
H A Dinternal.h18 unsigned int cpu; member in struct:pstore_ftrace_record
23 pstore_ftrace_encode_cpu(struct pstore_ftrace_record *rec, unsigned int cpu) pstore_ftrace_encode_cpu() argument
26 rec->cpu = cpu; pstore_ftrace_encode_cpu()
28 rec->ip |= cpu; pstore_ftrace_encode_cpu()
36 return rec->cpu; pstore_ftrace_decode_cpu()
/linux-4.1.27/fs/squashfs/
H A Ddecompressor_multi_percpu.c21 * variables, one thread per cpu core.
33 int err, cpu; squashfs_decompressor_create() local
39 for_each_possible_cpu(cpu) { for_each_possible_cpu()
40 stream = per_cpu_ptr(percpu, cpu); for_each_possible_cpu()
52 for_each_possible_cpu(cpu) { for_each_possible_cpu()
53 stream = per_cpu_ptr(percpu, cpu); for_each_possible_cpu()
66 int cpu; squashfs_decompressor_destroy() local
69 for_each_possible_cpu(cpu) { for_each_possible_cpu()
70 stream = per_cpu_ptr(percpu, cpu); for_each_possible_cpu()
/linux-4.1.27/arch/blackfin/mach-bf561/
H A Dsmp.c51 void platform_secondary_init(unsigned int cpu) platform_secondary_init() argument
76 int platform_boot_secondary(unsigned int cpu, struct task_struct *idle) platform_boot_secondary() argument
86 smp_send_reschedule(cpu); platform_boot_secondary()
97 if (cpu_online(cpu)) platform_boot_secondary()
103 if (cpu_online(cpu)) { platform_boot_secondary()
106 panic("CPU%u: processor failed to boot\n", cpu); platform_boot_secondary()
124 unsigned int cpu; platform_send_ipi() local
127 for_each_cpu(cpu, &callmap) { platform_send_ipi()
128 BUG_ON(cpu >= 2); platform_send_ipi()
130 bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (offset + cpu))); platform_send_ipi()
135 void platform_send_ipi_cpu(unsigned int cpu, int irq) platform_send_ipi_cpu() argument
138 BUG_ON(cpu >= 2); platform_send_ipi_cpu()
140 bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (offset + cpu))); platform_send_ipi_cpu()
144 void platform_clear_ipi(unsigned int cpu, int irq) platform_clear_ipi() argument
147 BUG_ON(cpu >= 2); platform_clear_ipi()
149 bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (offset + cpu))); platform_clear_ipi()
/linux-4.1.27/arch/arm/mach-prima2/
H A Dhotplug.c15 static inline void platform_do_lowpower(unsigned int cpu) platform_do_lowpower() argument
21 if (pen_release == cpu_logical_map(cpu)) { platform_do_lowpower()
35 void __ref sirfsoc_cpu_die(unsigned int cpu) sirfsoc_cpu_die() argument
37 platform_do_lowpower(cpu); sirfsoc_cpu_die()
/linux-4.1.27/scripts/gdb/linux/
H A Dcpus.py4 # per-cpu tools
30 return tasks.get_thread_info(tasks.get_task_by_pid(tid))['cpu']
36 def per_cpu(var_ptr, cpu):
37 if cpu == -1:
38 cpu = get_current_cpu()
41 "trap_block[{0}].__per_cpu_base".format(str(cpu)))
45 "__per_cpu_offset[{0}]".format(str(cpu)))
95 cpu = entry * bits_per_entry + bit
100 yield cpu
104 """Return per-cpu variable.
106 $lx_per_cpu("VAR"[, CPU]): Return the per-cpu variable called VAR for the
113 def invoke(self, var_name, cpu=-1):
115 return per_cpu(var_ptr, cpu)
124 $lx_current([CPU]): Return the per-cpu task variable for the given CPU
130 def invoke(self, cpu=-1):
132 return per_cpu(var_ptr, cpu).dereference()
/linux-4.1.27/fs/proc/
H A Dstat.c16 #define arch_irq_stat_cpu(cpu) 0
24 static cputime64_t get_idle_time(int cpu) get_idle_time() argument
28 idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE]; get_idle_time()
29 if (cpu_online(cpu) && !nr_iowait_cpu(cpu)) get_idle_time()
30 idle += arch_idle_time(cpu); get_idle_time()
34 static cputime64_t get_iowait_time(int cpu) get_iowait_time() argument
38 iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT]; get_iowait_time()
39 if (cpu_online(cpu) && nr_iowait_cpu(cpu)) get_iowait_time()
40 iowait += arch_idle_time(cpu); get_iowait_time()
46 static u64 get_idle_time(int cpu) get_idle_time() argument
50 if (cpu_online(cpu)) get_idle_time()
51 idle_time = get_cpu_idle_time_us(cpu, NULL); get_idle_time()
54 /* !NO_HZ or cpu offline so we can rely on cpustat.idle */ get_idle_time()
55 idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE]; get_idle_time()
62 static u64 get_iowait_time(int cpu) get_iowait_time() argument
66 if (cpu_online(cpu)) get_iowait_time()
67 iowait_time = get_cpu_iowait_time_us(cpu, NULL); get_iowait_time()
70 /* !NO_HZ or cpu offline so we can rely on cpustat.iowait */ get_iowait_time()
71 iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT]; get_iowait_time()
120 seq_puts(p, "cpu ");
145 seq_printf(p, "cpu%d", i); for_each_online_cpu()
/linux-4.1.27/arch/mips/sibyte/bcm1480/
H A Dirq.c55 void bcm1480_mask_irq(int cpu, int irq) bcm1480_mask_irq() argument
66 cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_spacing)); bcm1480_mask_irq()
68 ____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_spacing)); bcm1480_mask_irq()
72 void bcm1480_unmask_irq(int cpu, int irq) bcm1480_unmask_irq() argument
83 cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_spacing)); bcm1480_unmask_irq()
85 ____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_spacing)); bcm1480_unmask_irq()
94 int i = 0, old_cpu, cpu, int_on, k; bcm1480_set_affinity() local
101 cpu = cpu_logical_map(i); bcm1480_set_affinity()
121 bcm1480_irq_owner[irq] = cpu; bcm1480_set_affinity()
124 cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING))); bcm1480_set_affinity()
126 ____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING))); bcm1480_set_affinity()
235 * PCI and LDT to one cpu and everything else to the other
238 * On the second cpu, everything is set to IP5, which is
241 * can do cross-cpu function calls, as required by SMP
252 unsigned int i, cpu; arch_init_irq() local
260 for (cpu = 0; cpu < 4; cpu++) { arch_init_irq()
262 IOADDR(A_BCM1480_IMR_REGISTER(cpu, arch_init_irq()
269 for (cpu = 0; cpu < 4; cpu++) { arch_init_irq()
271 IOADDR(A_BCM1480_IMR_REGISTER(cpu, arch_init_irq()
280 * inter-cpu messages arch_init_irq()
283 for (cpu = 0; cpu < 4; cpu++) { arch_init_irq()
284 __raw_writeq(IMR_IP3_VAL, IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_INTERRUPT_MAP_BASE_H) + arch_init_irq()
290 for (cpu = 0; cpu < 4; cpu++) { arch_init_irq()
292 IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_MAILBOX_0_CLR_CPU))); arch_init_irq()
294 IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_MAILBOX_1_CLR_CPU))); arch_init_irq()
300 for (cpu = 0; cpu < 4; cpu++) { arch_init_irq()
301 __raw_writeq(tmp, IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_INTERRUPT_MASK_H))); arch_init_irq()
304 for (cpu = 0; cpu < 4; cpu++) { arch_init_irq()
305 __raw_writeq(tmp, IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_INTERRUPT_MASK_L))); arch_init_irq()
323 unsigned int cpu = smp_processor_id(); dispatch_ip2() local
331 base = A_BCM1480_IMR_MAPPER(cpu); dispatch_ip2()
347 unsigned int cpu = smp_processor_id(); plat_irq_dispatch() local
353 do_IRQ(K_BCM1480_INT_TIMER_0 + cpu); plat_irq_dispatch()
/linux-4.1.27/arch/sparc/kernel/
H A Dnmi.c57 int cpu; touch_nmi_watchdog() local
59 for_each_present_cpu(cpu) { for_each_present_cpu()
60 if (per_cpu(nmi_touch, cpu) != 1) for_each_present_cpu()
61 per_cpu(nmi_touch, cpu) = 1; for_each_present_cpu()
78 panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu); die_nmi()
80 WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu); die_nmi()
126 static inline unsigned int get_nmi_count(int cpu) get_nmi_count() argument
128 return cpu_data(cpu).__nmi_count; get_nmi_count()
137 static void report_broken_nmi(int cpu, int *prev_nmi_count) report_broken_nmi() argument
143 cpu, prev_nmi_count[cpu], get_nmi_count(cpu)); report_broken_nmi()
150 per_cpu(wd_enabled, cpu) = 0; report_broken_nmi()
164 int cpu, err; check_nmi_watchdog() local
179 for_each_possible_cpu(cpu) check_nmi_watchdog()
180 prev_nmi_count[cpu] = get_nmi_count(cpu); check_nmi_watchdog()
184 for_each_online_cpu(cpu) { for_each_online_cpu()
185 if (!per_cpu(wd_enabled, cpu)) for_each_online_cpu()
187 if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5) for_each_online_cpu()
188 report_broken_nmi(cpu, prev_nmi_count); for_each_online_cpu()
/linux-4.1.27/arch/arm/plat-samsung/
H A Dinit.c31 #include <plat/cpu.h>
34 static struct cpu_table *cpu; variable in typeref:struct:cpu_table
51 cpu = s3c_lookup_cpu(idcode, cputab, cputab_size); s3c_init_cpu()
53 if (cpu == NULL) { s3c_init_cpu()
58 printk("CPU %s (id 0x%08lx)\n", cpu->name, idcode); s3c_init_cpu()
60 if (cpu->init == NULL) { s3c_init_cpu()
61 printk(KERN_ERR "CPU %s support not enabled\n", cpu->name); s3c_init_cpu()
65 if (cpu->map_io) s3c_init_cpu()
66 cpu->map_io(); s3c_init_cpu()
83 if (cpu == NULL) s3c24xx_init_clocks()
84 panic("s3c24xx_init_clocks: no cpu setup?\n"); s3c24xx_init_clocks()
86 if (cpu->init_clocks == NULL) s3c24xx_init_clocks()
87 panic("s3c24xx_init_clocks: cpu has no clock init\n"); s3c24xx_init_clocks()
89 (cpu->init_clocks)(xtal); s3c24xx_init_clocks()
141 if (cpu == NULL) s3c24xx_init_uarts()
144 if (cpu->init_uarts == NULL && IS_ENABLED(CONFIG_SAMSUNG_ATAGS)) { s3c24xx_init_uarts()
145 printk(KERN_ERR "s3c24xx_init_uarts: cpu has no uart init\n"); s3c24xx_init_uarts()
147 (cpu->init_uarts)(cfg, no); s3c24xx_init_uarts()
155 // do the correct init for cpu s3c_arch_init()
157 if (cpu == NULL) { s3c_arch_init()
161 panic("s3c_arch_init: NULL cpu\n"); s3c_arch_init()
164 ret = (cpu->init)(); s3c_arch_init()
/linux-4.1.27/tools/virtio/virtio-trace/
H A Dtrace-agent.c24 "/sys/kernel/debug/tracing/per_cpu/cpu%d/trace_pipe_raw"
25 #define WRITE_PATH_FMT "/dev/virtio-ports/trace-path-cpu%d"
154 int cpu; agent_info_init() local
159 for (cpu = 0; cpu < s->cpus; cpu++) { agent_info_init()
161 in_path = make_input_path(cpu); agent_info_init()
167 out_path = make_output_path(cpu); agent_info_init()
174 rw_thread_init(cpu, in_path, out_path, s->use_stdout, agent_info_init()
175 s->pipe_size, s->rw_ti[cpu]); agent_info_init()
222 int cpu; agent_main_loop() local
226 for (cpu = 0; cpu < s->cpus; cpu++) agent_main_loop()
227 rw_thread_per_cpu[cpu] = rw_thread_run(s->rw_ti[cpu]); agent_main_loop()
232 for (cpu = 0; cpu < s->cpus; cpu++) { agent_main_loop()
235 ret = pthread_join(rw_thread_per_cpu[cpu], NULL); agent_main_loop()
237 pr_err("pthread_join() error:%d (cpu %d)\n", ret, cpu); agent_main_loop()
/linux-4.1.27/arch/x86/kernel/cpu/microcode/
H A Dcore.c32 * Made to use devfs (/dev/cpu/microcode) + cleanups.
82 #include <linux/cpu.h>
106 * All non cpu-hotplug-callback call sites use:
110 * the cpu-hotplug-callback call sites.
112 * We guarantee that only a single cpu is being
121 * Operations that are run on a target cpu:
137 static int collect_cpu_info_on_target(int cpu, struct cpu_signature *cpu_sig) collect_cpu_info_on_target() argument
142 ret = smp_call_function_single(cpu, collect_cpu_info_local, &ctx, 1); collect_cpu_info_on_target()
149 static int collect_cpu_info(int cpu) collect_cpu_info() argument
151 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; collect_cpu_info()
156 ret = collect_cpu_info_on_target(cpu, &uci->cpu_sig); collect_cpu_info()
174 static int apply_microcode_on_target(int cpu) apply_microcode_on_target() argument
179 ret = smp_call_function_single(cpu, apply_microcode_local, &ctx, 1); apply_microcode_on_target()
190 int cpu; do_microcode_update() local
192 for_each_online_cpu(cpu) { for_each_online_cpu()
193 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; for_each_online_cpu()
199 ustate = microcode_ops->request_microcode_user(cpu, buf, size); for_each_online_cpu()
204 apply_microcode_on_target(cpu); for_each_online_cpu()
250 .nodename = "cpu/microcode",
273 MODULE_ALIAS("devname:cpu/microcode");
282 static int reload_for_cpu(int cpu) reload_for_cpu() argument
284 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; reload_for_cpu()
291 ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev, true); reload_for_cpu()
293 apply_microcode_on_target(cpu); reload_for_cpu()
305 int cpu; reload_store() local
317 for_each_online_cpu(cpu) { for_each_online_cpu()
318 tmp_ret = reload_for_cpu(cpu); for_each_online_cpu()
320 pr_warn("Error reloading microcode on CPU %d\n", cpu); for_each_online_cpu()
368 static void microcode_fini_cpu(int cpu) microcode_fini_cpu() argument
370 microcode_ops->microcode_fini_cpu(cpu); microcode_fini_cpu()
373 static enum ucode_state microcode_resume_cpu(int cpu) microcode_resume_cpu() argument
375 pr_debug("CPU%d updated upon resume\n", cpu); microcode_resume_cpu()
377 if (apply_microcode_on_target(cpu)) microcode_resume_cpu()
383 static enum ucode_state microcode_init_cpu(int cpu, bool refresh_fw) microcode_init_cpu() argument
386 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; microcode_init_cpu()
391 if (collect_cpu_info(cpu)) microcode_init_cpu()
398 ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev, microcode_init_cpu()
402 pr_debug("CPU%d updated upon init\n", cpu); microcode_init_cpu()
403 apply_microcode_on_target(cpu); microcode_init_cpu()
409 static enum ucode_state microcode_update_cpu(int cpu) microcode_update_cpu() argument
411 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; microcode_update_cpu()
414 return microcode_resume_cpu(cpu); microcode_update_cpu()
416 return microcode_init_cpu(cpu, false); microcode_update_cpu()
421 int err, cpu = dev->id; mc_device_add() local
423 if (!cpu_online(cpu)) mc_device_add()
426 pr_debug("CPU%d added\n", cpu); mc_device_add()
432 if (microcode_init_cpu(cpu, true) == UCODE_ERROR) mc_device_add()
440 int cpu = dev->id; mc_device_remove() local
442 if (!cpu_online(cpu)) mc_device_remove()
445 pr_debug("CPU%d removed\n", cpu); mc_device_remove()
446 microcode_fini_cpu(cpu); mc_device_remove()
463 int cpu = smp_processor_id(); mc_bp_resume() local
464 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; mc_bp_resume()
467 microcode_ops->apply_microcode(cpu); mc_bp_resume()
479 unsigned int cpu = (unsigned long)hcpu; mc_cpu_callback() local
482 dev = get_cpu_device(cpu); mc_cpu_callback()
486 microcode_update_cpu(cpu); mc_cpu_callback()
487 pr_debug("CPU%d added\n", cpu); mc_cpu_callback()
495 pr_err("Failed to create group for CPU%d\n", cpu); mc_cpu_callback()
501 pr_debug("CPU%d removed\n", cpu); mc_cpu_callback()
516 microcode_fini_cpu(cpu); mc_cpu_callback()
/linux-4.1.27/arch/arm/common/
H A Dmcpm_entry.c25 void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr) mcpm_set_entry_vector() argument
28 mcpm_entry_vectors[cluster][cpu] = val; mcpm_set_entry_vector()
29 sync_cache_w(&mcpm_entry_vectors[cluster][cpu]); mcpm_set_entry_vector()
34 void mcpm_set_early_poke(unsigned cpu, unsigned cluster, mcpm_set_early_poke() argument
37 unsigned long *poke = &mcpm_entry_early_pokes[cluster][cpu][0]; mcpm_set_early_poke()
76 int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster) mcpm_cpu_power_up() argument
87 return platform_ops->power_up(cpu, cluster); mcpm_cpu_power_up()
89 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); mcpm_cpu_power_up()
98 cpu_is_down = !mcpm_cpu_use_count[cluster][cpu]; mcpm_cpu_power_up()
101 mcpm_cpu_use_count[cluster][cpu]++; mcpm_cpu_power_up()
110 BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 1 && mcpm_cpu_power_up()
111 mcpm_cpu_use_count[cluster][cpu] != 2); mcpm_cpu_power_up()
116 ret = platform_ops->cpu_powerup(cpu, cluster); mcpm_cpu_power_up()
127 unsigned int mpidr, cpu, cluster; mcpm_cpu_power_down() local
148 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); mcpm_cpu_power_down()
150 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); mcpm_cpu_power_down()
152 __mcpm_cpu_going_down(cpu, cluster); mcpm_cpu_power_down()
157 mcpm_cpu_use_count[cluster][cpu]--; mcpm_cpu_power_down()
158 BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 0 && mcpm_cpu_power_down()
159 mcpm_cpu_use_count[cluster][cpu] != 1); mcpm_cpu_power_down()
160 cpu_going_down = !mcpm_cpu_use_count[cluster][cpu]; mcpm_cpu_power_down()
163 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) { mcpm_cpu_power_down()
164 platform_ops->cpu_powerdown_prepare(cpu, cluster); mcpm_cpu_power_down()
171 platform_ops->cpu_powerdown_prepare(cpu, cluster); mcpm_cpu_power_down()
184 __mcpm_cpu_down(cpu, cluster); mcpm_cpu_power_down()
207 int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster) mcpm_wait_for_cpu_powerdown() argument
214 ret = platform_ops->wait_for_powerdown(cpu, cluster); mcpm_wait_for_cpu_powerdown()
216 pr_warn("%s: cpu %u, cluster %u failed to power down (%d)\n", mcpm_wait_for_cpu_powerdown()
217 __func__, cpu, cluster, ret); mcpm_wait_for_cpu_powerdown()
241 unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); mcpm_cpu_suspend() local
244 platform_ops->cpu_suspend_prepare(cpu, cluster); mcpm_cpu_suspend()
252 unsigned int mpidr, cpu, cluster; mcpm_cpu_powered_up() local
266 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); mcpm_cpu_powered_up()
271 cpu_was_down = !mcpm_cpu_use_count[cluster][cpu]; mcpm_cpu_powered_up()
277 mcpm_cpu_use_count[cluster][cpu] = 1; mcpm_cpu_powered_up()
279 platform_ops->cpu_is_up(cpu, cluster); mcpm_cpu_powered_up()
293 unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); nocache_trampoline() local
297 mcpm_set_entry_vector(cpu, cluster, cpu_resume); nocache_trampoline()
300 __mcpm_cpu_going_down(cpu, cluster); nocache_trampoline()
301 BUG_ON(!__mcpm_outbound_enter_critical(cpu, cluster)); nocache_trampoline()
304 __mcpm_cpu_down(cpu, cluster); nocache_trampoline()
340 * __mcpm_cpu_going_down: Indicates that the cpu is being torn down.
344 void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster) __mcpm_cpu_going_down() argument
346 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN; __mcpm_cpu_going_down()
347 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); __mcpm_cpu_going_down()
351 * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the
357 void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster) __mcpm_cpu_down() argument
360 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN; __mcpm_cpu_down()
361 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); __mcpm_cpu_down()
392 bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster) __mcpm_outbound_enter_critical() argument
417 if (i == cpu) __mcpm_outbound_enter_critical()
421 cpustate = c->cpus[i].cpu; __mcpm_outbound_enter_critical()
426 sync_cache_r(&c->cpus[i].cpu); __mcpm_outbound_enter_critical()
469 mcpm_sync.clusters[i].cpus[j].cpu = CPU_DOWN; mcpm_sync_init()
475 mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP; for_each_online_cpu()
H A Dmcpm_platsmp.c22 static void cpu_to_pcpu(unsigned int cpu, cpu_to_pcpu() argument
27 mpidr = cpu_logical_map(cpu); cpu_to_pcpu()
32 static int mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle) mcpm_boot_secondary() argument
37 cpu_to_pcpu(cpu, &pcpu, &pcluster); mcpm_boot_secondary()
40 __func__, cpu, pcpu, pcluster); mcpm_boot_secondary()
47 arch_send_wakeup_ipi_mask(cpumask_of(cpu)); mcpm_boot_secondary()
52 static void mcpm_secondary_init(unsigned int cpu) mcpm_secondary_init() argument
59 static int mcpm_cpu_kill(unsigned int cpu) mcpm_cpu_kill() argument
63 cpu_to_pcpu(cpu, &pcpu, &pcluster); mcpm_cpu_kill()
68 static int mcpm_cpu_disable(unsigned int cpu) mcpm_cpu_disable() argument
78 static void mcpm_cpu_die(unsigned int cpu) mcpm_cpu_die() argument
/linux-4.1.27/arch/mips/sgi-ip27/
H A Dip27-irq-pci.c64 static inline int alloc_level(int cpu, int irq) alloc_level() argument
66 struct hub_data *hub = hub_data(cpu_to_node(cpu)); alloc_level()
67 struct slice_data *si = cpu_data[cpu].data; alloc_level()
72 panic("Cpu %d flooded with devices", cpu); alloc_level()
82 int cpu, i; find_level() local
84 for_each_online_cpu(cpu) { for_each_online_cpu()
85 struct slice_data *si = cpu_data[cpu].data; for_each_online_cpu()
89 *cpunum = cpu; for_each_online_cpu()
95 panic("Could not identify cpu/level for irq %d", irq);
98 static int intr_connect_level(int cpu, int bit) intr_connect_level() argument
100 nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); intr_connect_level()
101 struct slice_data *si = cpu_data[cpu].data; intr_connect_level()
105 if (!cputoslice(cpu)) { intr_connect_level()
116 static int intr_disconnect_level(int cpu, int bit) intr_disconnect_level() argument
118 nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); intr_disconnect_level()
119 struct slice_data *si = cpu_data[cpu].data; intr_disconnect_level()
123 if (!cputoslice(cpu)) { intr_disconnect_level()
141 cpuid_t cpu; startup_bridge_irq() local
152 swlevel = find_level(&cpu, d->irq); startup_bridge_irq()
177 intr_connect_level(cpu, swlevel); startup_bridge_irq()
188 cpuid_t cpu; shutdown_bridge_irq() local
197 swlevel = find_level(&cpu, d->irq); shutdown_bridge_irq()
198 intr_disconnect_level(cpu, swlevel); shutdown_bridge_irq()
206 cpuid_t cpu; enable_bridge_irq() local
209 swlevel = find_level(&cpu, d->irq); /* Criminal offence */ enable_bridge_irq()
210 intr_connect_level(cpu, swlevel); enable_bridge_irq()
215 cpuid_t cpu; disable_bridge_irq() local
218 swlevel = find_level(&cpu, d->irq); /* Criminal offence */ disable_bridge_irq()
219 intr_disconnect_level(cpu, swlevel); disable_bridge_irq()
238 int swlevel, cpu; request_bridge_irq() local
248 cpu = bc->irq_cpu; request_bridge_irq()
249 swlevel = alloc_level(cpu, irq); request_bridge_irq()
257 nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); request_bridge_irq()
260 intr_connect_level(cpu, swlevel); request_bridge_irq()
/linux-4.1.27/tools/power/x86/x86_energy_perf_policy/
H A Dx86_energy_perf_policy.c38 int cpu = -1; variable
43 * -c cpu: limit action to a single CPU (default is all CPUs)
66 printf("%s: [-c cpu] [-v] " usage()
87 cpu = atoi(optarg); cmdline()
187 unsigned long long get_msr(int cpu, int offset) get_msr() argument
194 sprintf(msr_path, "/dev/cpu/%d/msr", cpu); get_msr()
205 printf("pread cpu%d 0x%x = %d\n", cpu, offset, retval); get_msr()
212 unsigned long long put_msr(int cpu, unsigned long long new_msr, int offset) put_msr() argument
219 sprintf(msr_path, "/dev/cpu/%d/msr", cpu); put_msr()
229 printf("pread cpu%d 0x%x = %d\n", cpu, offset, retval); put_msr()
236 printf("pwrite cpu%d 0x%x = %d\n", cpu, offset, retval); put_msr()
245 void print_msr(int cpu) print_msr() argument
247 printf("cpu%d: 0x%016llx\n", print_msr()
248 cpu, get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS)); print_msr()
251 void update_msr(int cpu) update_msr() argument
255 previous_msr = put_msr(cpu, new_bias, MSR_IA32_ENERGY_PERF_BIAS); update_msr()
258 printf("cpu%d msr0x%x 0x%016llx -> 0x%016llx\n", update_msr()
259 cpu, MSR_IA32_ENERGY_PERF_BIAS, previous_msr, new_bias); update_msr()
266 * run func() on every cpu in /dev/cpu
279 retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n"); for_every_cpu()
286 int cpu; for_every_cpu() local
289 "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", for_every_cpu()
290 &cpu); for_every_cpu()
294 func(cpu); for_every_cpu()
311 if (cpu != -1) { main()
313 print_msr(cpu); main()
315 update_msr(cpu); main()
/linux-4.1.27/drivers/hv/
H A Dhv.c309 static void hv_init_clockevent_device(struct clock_event_device *dev, int cpu) hv_init_clockevent_device() argument
313 dev->cpumask = cpumask_of(cpu); hv_init_clockevent_device()
330 int cpu; hv_synic_alloc() local
332 for_each_online_cpu(cpu) { for_each_online_cpu()
333 hv_context.event_dpc[cpu] = kmalloc(size, GFP_ATOMIC); for_each_online_cpu()
334 if (hv_context.event_dpc[cpu] == NULL) { for_each_online_cpu()
338 tasklet_init(hv_context.event_dpc[cpu], vmbus_on_event, cpu); for_each_online_cpu()
340 hv_context.clk_evt[cpu] = kzalloc(ced_size, GFP_ATOMIC); for_each_online_cpu()
341 if (hv_context.clk_evt[cpu] == NULL) { for_each_online_cpu()
345 hv_init_clockevent_device(hv_context.clk_evt[cpu], cpu); for_each_online_cpu()
347 hv_context.synic_message_page[cpu] = for_each_online_cpu()
350 if (hv_context.synic_message_page[cpu] == NULL) { for_each_online_cpu()
355 hv_context.synic_event_page[cpu] = for_each_online_cpu()
358 if (hv_context.synic_event_page[cpu] == NULL) { for_each_online_cpu()
363 hv_context.post_msg_page[cpu] = for_each_online_cpu()
366 if (hv_context.post_msg_page[cpu] == NULL) { for_each_online_cpu()
377 static void hv_synic_free_cpu(int cpu) hv_synic_free_cpu() argument
379 kfree(hv_context.event_dpc[cpu]); hv_synic_free_cpu()
380 kfree(hv_context.clk_evt[cpu]); hv_synic_free_cpu()
381 if (hv_context.synic_event_page[cpu]) hv_synic_free_cpu()
382 free_page((unsigned long)hv_context.synic_event_page[cpu]); hv_synic_free_cpu()
383 if (hv_context.synic_message_page[cpu]) hv_synic_free_cpu()
384 free_page((unsigned long)hv_context.synic_message_page[cpu]); hv_synic_free_cpu()
385 if (hv_context.post_msg_page[cpu]) hv_synic_free_cpu()
386 free_page((unsigned long)hv_context.post_msg_page[cpu]); hv_synic_free_cpu()
391 int cpu; hv_synic_free() local
393 for_each_online_cpu(cpu) hv_synic_free()
394 hv_synic_free_cpu(cpu); hv_synic_free()
413 int cpu = smp_processor_id(); hv_synic_init() local
424 simp.base_simp_gpa = virt_to_phys(hv_context.synic_message_page[cpu]) hv_synic_init()
432 siefp.base_siefp_gpa = virt_to_phys(hv_context.synic_event_page[cpu]) hv_synic_init()
461 hv_context.vp_index[cpu] = (u32)vp_index; hv_synic_init()
463 INIT_LIST_HEAD(&hv_context.percpu_list[cpu]); hv_synic_init()
466 * Register the per-cpu clockevent source. hv_synic_init()
469 clockevents_config_and_register(hv_context.clk_evt[cpu], hv_synic_init()
481 int cpu; hv_synic_clockevents_cleanup() local
486 for_each_online_cpu(cpu) hv_synic_clockevents_cleanup()
487 clockevents_unbind_device(hv_context.clk_evt[cpu], cpu); hv_synic_clockevents_cleanup()
499 int cpu = smp_processor_id(); hv_synic_cleanup() local
507 hv_context.clk_evt[cpu]); hv_synic_cleanup()
534 hv_synic_free_cpu(cpu); hv_synic_cleanup()
/linux-4.1.27/kernel/events/
H A Dhw_breakpoint.c45 #include <linux/cpu.h>
53 /* Number of pinned cpu breakpoints in a cpu */
57 /* Number of non-pinned cpu/task breakpoints in a cpu */
64 static struct bp_cpuinfo *get_bp_info(int cpu, enum bp_type_idx type) get_bp_info() argument
66 return per_cpu_ptr(bp_cpuinfo + type, cpu); get_bp_info()
98 * have in this cpu
100 static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type) max_task_bp_pinned() argument
102 unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned; max_task_bp_pinned()
117 static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type) task_bp_pinned() argument
126 (iter->cpu < 0 || cpu == iter->cpu)) task_bp_pinned()
135 if (bp->cpu >= 0) cpumask_of_bp()
136 return cpumask_of(bp->cpu); cpumask_of_bp()
142 * a given cpu (cpu > -1) or in all of them (cpu = -1).
149 int cpu; fetch_bp_busy_slots() local
151 for_each_cpu(cpu, cpumask) { for_each_cpu()
152 struct bp_cpuinfo *info = get_bp_info(cpu, type); for_each_cpu()
157 nr += max_task_bp_pinned(cpu, type); for_each_cpu()
159 nr += task_bp_pinned(cpu, bp, type); for_each_cpu()
173 * in a same cpu.
184 static void toggle_bp_task_slot(struct perf_event *bp, int cpu, toggle_bp_task_slot() argument
187 unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned; toggle_bp_task_slot()
190 old_idx = task_bp_pinned(cpu, bp, type) - 1; toggle_bp_task_slot()
207 int cpu; toggle_bp_slot() local
212 /* Pinned counter cpu profiling */ toggle_bp_slot()
214 get_bp_info(bp->cpu, type)->cpu_pinned += weight; toggle_bp_slot()
219 for_each_cpu(cpu, cpumask) toggle_bp_slot()
220 toggle_bp_task_slot(bp, cpu, type, weight); toggle_bp_slot()
244 * - If attached to a single cpu, check:
246 * (per_cpu(info->flexible, cpu) || (per_cpu(info->cpu_pinned, cpu)
247 * + max(per_cpu(info->tsk_pinned, cpu)))) < HBP_NUM
249 * -> If there are already non-pinned counters in this cpu, it means
252 * breakpoints (for this cpu) plus the number of per cpu breakpoint
253 * (for this cpu) doesn't cover every registers.
260 * -> This is roughly the same, except we check the number of per cpu
261 * bp for every cpu and we keep the max one. Same for the per tasks
267 * - If attached to a single cpu, check:
269 * ((per_cpu(info->flexible, cpu) > 1) + per_cpu(info->cpu_pinned, cpu)
270 * + max(per_cpu(info->tsk_pinned, cpu))) < HBP_NUM
505 int cpu; register_wide_hw_breakpoint() local
512 for_each_online_cpu(cpu) { for_each_online_cpu()
513 bp = perf_event_create_kernel_counter(attr, cpu, NULL, for_each_online_cpu()
520 per_cpu(*cpu_events, cpu) = bp; for_each_online_cpu()
534 * @cpu_events: the per cpu set of events to unregister
538 int cpu; unregister_wide_hw_breakpoint() local
540 for_each_possible_cpu(cpu) unregister_wide_hw_breakpoint()
541 unregister_hw_breakpoint(per_cpu(*cpu_events, cpu)); unregister_wide_hw_breakpoint()
621 int cpu, err_cpu; init_hw_breakpoint() local
627 for_each_possible_cpu(cpu) { for_each_possible_cpu()
629 struct bp_cpuinfo *info = get_bp_info(cpu, i); for_each_possible_cpu()
648 if (err_cpu == cpu) for_each_possible_cpu()
H A Dcallchain.c40 int cpu; release_callchain_buffers_rcu() local
44 for_each_possible_cpu(cpu) release_callchain_buffers_rcu()
45 kfree(entries->cpu_entries[cpu]); release_callchain_buffers_rcu()
61 int cpu; alloc_callchain_buffers() local
67 * accessed from NMI. Use a temporary manual per cpu allocation alloc_callchain_buffers()
78 for_each_possible_cpu(cpu) { for_each_possible_cpu()
79 entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL, for_each_possible_cpu()
80 cpu_to_node(cpu)); for_each_possible_cpu()
81 if (!entries->cpu_entries[cpu]) for_each_possible_cpu()
90 for_each_possible_cpu(cpu)
91 kfree(entries->cpu_entries[cpu]);
137 int cpu; get_callchain_entry() local
148 cpu = smp_processor_id(); get_callchain_entry()
150 return &entries->cpu_entries[cpu][*rctx]; get_callchain_entry()
/linux-4.1.27/tools/testing/selftests/powerpc/pmu/
H A Dlib.c21 int cpu; pick_online_cpu() local
31 for (cpu = 8; cpu < CPU_SETSIZE; cpu += 8) pick_online_cpu()
32 if (CPU_ISSET(cpu, &mask)) pick_online_cpu()
33 return cpu; pick_online_cpu()
36 for (cpu = CPU_SETSIZE - 1; cpu >= 0; cpu--) pick_online_cpu()
37 if (CPU_ISSET(cpu, &mask)) pick_online_cpu()
38 return cpu; pick_online_cpu()
44 int bind_to_cpu(int cpu) bind_to_cpu() argument
48 printf("Binding to cpu %d\n", cpu); bind_to_cpu()
51 CPU_SET(cpu, &mask); bind_to_cpu()
128 * We are just here to eat cpu and die. So make sure we can be killed, eat_cpu_child()
136 /* Soak up cpu forever */ eat_cpu_child()
145 int cpu, rc; eat_cpu() local
148 cpu = pick_online_cpu(); eat_cpu()
149 FAIL_IF(cpu < 0); eat_cpu()
150 FAIL_IF(bind_to_cpu(cpu)); eat_cpu()
/linux-4.1.27/drivers/cpuidle/
H A Dcoupled.c20 #include <linux/cpu.h>
34 * sequencing restrictions (on Tegra 2, cpu 0 must be the last to
35 * power down), or due to HW bugs (on OMAP4460, a cpu powering up
36 * will corrupt the gic state unless the other cpu runs a work
37 * around). Each cpu has a power state that it can enter without
38 * coordinating with the other cpu (usually Wait For Interrupt, or
44 * This file implements a solution, where each cpu will wait in the
54 * each cpu will increment the ready counter, and continue once the
56 * cpu exits idle, the other cpus will decrement their counter and
59 * requested_state stores the deepest coupled idle state each cpu
63 * variable is not locked. It is only written from the cpu that
64 * it stores (or by the on/offlining cpu if that cpu is offline),
80 * set in the struct cpuidle_device for each cpu.
91 * should ensure that the cpus all abort together if any cpu tries
103 * @prevent: flag to prevent coupled idle while a cpu is hotplugging
127 * __smp_call_function_single with the per cpu call_single_data struct already
134 * The cpuidle_coupled_poked mask is used to ensure that each cpu has been poked
142 * @dev: cpuidle_device of the calling cpu
150 * The atomic variable must be initialized to 0 before any cpu calls
151 * this function, will be reset to 0 before any cpu returns from this function.
190 * cpuidle_coupled_set_ready - mark a cpu as ready
191 * @coupled: the struct coupled that contains the current cpu
199 * cpuidle_coupled_set_not_ready - mark a cpu as not ready
200 * @coupled: the struct coupled that contains the current cpu
203 * is equal to the number of online cpus. Prevents a race where one cpu
205 * cpu has decremented its ready counter, leading to the ready counter going
227 * @coupled: the struct coupled that contains the current cpu
239 * @coupled: the struct coupled that contains the current cpu
251 * @coupled: the struct coupled that contains the current cpu
263 * @coupled: the struct coupled that contains the current cpu
275 * @dev: struct cpuidle_device for this cpu
276 * @coupled: the struct coupled that contains the current cpu
302 int cpu = (unsigned long)info; cpuidle_coupled_handle_poke() local
303 cpumask_set_cpu(cpu, &cpuidle_coupled_poked); cpuidle_coupled_handle_poke()
304 cpumask_clear_cpu(cpu, &cpuidle_coupled_poke_pending); cpuidle_coupled_handle_poke()
308 * cpuidle_coupled_poke - wake up a cpu that may be waiting
309 * @cpu: target cpu
311 * Ensures that the target cpu exits it's waiting idle state (if it is in it)
315 * If cpuidle_coupled_poked_mask is already set for the target cpu, that cpu
319 static void cpuidle_coupled_poke(int cpu) cpuidle_coupled_poke() argument
321 struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu); cpuidle_coupled_poke()
323 if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poke_pending)) cpuidle_coupled_poke()
324 smp_call_function_single_async(cpu, csd); cpuidle_coupled_poke()
329 * @dev: struct cpuidle_device for this cpu
330 * @coupled: the struct coupled that contains the current cpu
337 int cpu; cpuidle_coupled_poke_others() local
339 for_each_cpu(cpu, &coupled->coupled_cpus) cpuidle_coupled_poke_others()
340 if (cpu != this_cpu && cpu_online(cpu)) cpuidle_coupled_poke_others()
341 cpuidle_coupled_poke(cpu); cpuidle_coupled_poke_others()
345 * cpuidle_coupled_set_waiting - mark this cpu as in the wait loop
346 * @dev: struct cpuidle_device for this cpu
347 * @coupled: the struct coupled that contains the current cpu
348 * @next_state: the index in drv->states of the requested state for this cpu
353 static int cpuidle_coupled_set_waiting(int cpu, cpuidle_coupled_set_waiting() argument
356 coupled->requested_state[cpu] = next_state; cpuidle_coupled_set_waiting()
366 * cpuidle_coupled_set_not_waiting - mark this cpu as leaving the wait loop
367 * @dev: struct cpuidle_device for this cpu
368 * @coupled: the struct coupled that contains the current cpu
372 static void cpuidle_coupled_set_not_waiting(int cpu, cpuidle_coupled_set_not_waiting() argument
379 * notice that this cpu has cleared it's requested_state. cpuidle_coupled_set_not_waiting()
383 coupled->requested_state[cpu] = CPUIDLE_COUPLED_NOT_IDLE; cpuidle_coupled_set_not_waiting()
387 * cpuidle_coupled_set_done - mark this cpu as leaving the ready loop
388 * @cpu: the current cpu
389 * @coupled: the struct coupled that contains the current cpu
391 * Marks this cpu as no longer in the ready and waiting loops. Decrements
392 * the waiting count first to prevent another cpu looping back in and seeing
393 * this cpu as waiting just before it exits idle.
395 static void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled) cpuidle_coupled_set_done() argument
397 cpuidle_coupled_set_not_waiting(cpu, coupled); cpuidle_coupled_set_done()
403 * @cpu - this cpu
410 * the interrupt didn't schedule work that should take the cpu out of idle.
414 static int cpuidle_coupled_clear_pokes(int cpu) cpuidle_coupled_clear_pokes() argument
416 if (!cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending)) cpuidle_coupled_clear_pokes()
420 while (cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending)) cpuidle_coupled_clear_pokes()
440 * @dev: struct cpuidle_device for the current cpu
450 * the second stage will start. Each coupled cpu will spin until all cpus have
468 cpuidle_coupled_clear_pokes(dev->cpu); cpuidle_enter_state_coupled()
482 cpumask_clear_cpu(dev->cpu, &cpuidle_coupled_poked); cpuidle_enter_state_coupled()
484 w = cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state); cpuidle_enter_state_coupled()
486 * If this is the last cpu to enter the waiting state, poke cpuidle_enter_state_coupled()
493 cpumask_set_cpu(dev->cpu, &cpuidle_coupled_poked); cpuidle_enter_state_coupled()
494 cpuidle_coupled_poke_others(dev->cpu, coupled); cpuidle_enter_state_coupled()
500 * allowed for a single cpu. If this was not the poking cpu, wait cpuidle_enter_state_coupled()
507 !cpumask_test_cpu(dev->cpu, &cpuidle_coupled_poked)) { cpuidle_enter_state_coupled()
508 if (cpuidle_coupled_clear_pokes(dev->cpu)) cpuidle_enter_state_coupled()
512 cpuidle_coupled_set_not_waiting(dev->cpu, coupled); cpuidle_enter_state_coupled()
517 cpuidle_coupled_set_not_waiting(dev->cpu, coupled); cpuidle_enter_state_coupled()
526 cpuidle_coupled_clear_pokes(dev->cpu); cpuidle_enter_state_coupled()
528 cpuidle_coupled_set_not_waiting(dev->cpu, coupled); cpuidle_enter_state_coupled()
533 * Make sure final poke status for this cpu is visible before setting cpuidle_enter_state_coupled()
534 * cpu as ready. cpuidle_enter_state_coupled()
542 * cpu has incremented the ready counter, it cannot abort idle and must cpuidle_enter_state_coupled()
544 * another cpu leaves idle and decrements the waiting counter. cpuidle_enter_state_coupled()
563 * There is a small chance that a cpu left and reentered idle after this cpuidle_enter_state_coupled()
564 * cpu saw that all cpus were waiting. The cpu that reentered idle will cpuidle_enter_state_coupled()
565 * have sent this cpu a poke, which will still be pending after the cpuidle_enter_state_coupled()
573 cpuidle_coupled_set_done(dev->cpu, coupled); cpuidle_enter_state_coupled()
584 cpuidle_coupled_set_done(dev->cpu, coupled); cpuidle_enter_state_coupled()
589 * That leads to an inefficiency where a cpu receiving an interrupt cpuidle_enter_state_coupled()
592 * other cpus will need to spin waiting for the cpu that is processing cpuidle_enter_state_coupled()
605 * a cpu exits and re-enters the ready state because this cpu has cpuidle_enter_state_coupled()
623 * @dev: struct cpuidle_device for the current cpu
631 int cpu; cpuidle_coupled_register_device() local
639 for_each_cpu(cpu, &dev->coupled_cpus) { cpuidle_coupled_register_device()
640 other_dev = per_cpu(cpuidle_devices, cpu); cpuidle_coupled_register_device()
663 csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu); cpuidle_coupled_register_device()
665 csd->info = (void *)(unsigned long)dev->cpu; cpuidle_coupled_register_device()
672 * @dev: struct cpuidle_device for the current cpu
675 * cpu from the coupled idle set, and frees the cpuidle_coupled_info struct if
676 * this was the last cpu in the set.
692 * @coupled: the struct coupled that contains the cpu that is changing state
699 int cpu = get_cpu(); cpuidle_coupled_prevent_idle() local
703 cpuidle_coupled_poke_others(cpu, coupled); cpuidle_coupled_prevent_idle()
711 * @coupled: the struct coupled that contains the cpu that is changing state
718 int cpu = get_cpu(); cpuidle_coupled_allow_idle() local
727 cpuidle_coupled_poke_others(cpu, coupled); cpuidle_coupled_allow_idle()
735 * @hcpu: target cpu number
737 * Called when a cpu is brought on or offline using hotplug. Updates the
738 * coupled cpu set appropriately
743 int cpu = (unsigned long)hcpu; cpuidle_coupled_cpu_notify() local
760 dev = per_cpu(cpuidle_devices, cpu); cpuidle_coupled_cpu_notify()
/linux-4.1.27/arch/mips/include/asm/mach-loongson/
H A Dtopology.h6 #define cpu_to_node(cpu) (cpu_logical_map(cpu) >> 2)
/linux-4.1.27/arch/powerpc/sysdev/xics/
H A Dicp-native.c17 #include <linux/cpu.h>
54 int cpu = smp_processor_id(); icp_native_get_xirr() local
62 return in_be32(&icp_native_regs[cpu]->xirr.word); icp_native_get_xirr()
67 int cpu = smp_processor_id(); icp_native_set_xirr() local
69 out_be32(&icp_native_regs[cpu]->xirr.word, value); icp_native_set_xirr()
74 int cpu = smp_processor_id(); icp_native_set_cppr() local
76 out_8(&icp_native_regs[cpu]->xirr.bytes[0], value); icp_native_set_cppr()
101 int cpu = smp_processor_id(); icp_native_teardown_cpu() local
104 icp_native_set_qirr(cpu, 0xff); icp_native_teardown_cpu()
146 static void icp_native_cause_ipi(int cpu, unsigned long data) icp_native_cause_ipi() argument
148 kvmppc_set_host_ipi(cpu, 1); icp_native_cause_ipi()
151 (cpumask_test_cpu(cpu, cpu_sibling_mask(smp_processor_id())))) icp_native_cause_ipi()
152 doorbell_cause_ipi(cpu, data); icp_native_cause_ipi()
155 icp_native_set_qirr(cpu, IPI_PRIORITY); icp_native_cause_ipi()
171 int cpu = smp_processor_id(); icp_native_flush_interrupt() local
172 kvmppc_set_host_ipi(cpu, 0); icp_native_flush_interrupt()
173 icp_native_set_qirr(cpu, 0xff); icp_native_flush_interrupt()
175 pr_err("XICS: hw interrupt 0x%x to offline cpu, disabling\n", icp_native_flush_interrupt()
183 void xics_wake_cpu(int cpu) xics_wake_cpu() argument
185 icp_native_set_qirr(cpu, IPI_PRIORITY); xics_wake_cpu()
191 int cpu = smp_processor_id(); icp_native_ipi_action() local
193 kvmppc_set_host_ipi(cpu, 0); icp_native_ipi_action()
194 icp_native_set_qirr(cpu, 0xff); icp_native_ipi_action()
205 int i, cpu = -1; icp_native_map_one_cpu() local
214 cpu = i; for_each_possible_cpu()
222 if (cpu == -1)
226 cpu, hw_id);
231 cpu, hw_id);
235 icp_native_regs[cpu] = ioremap(addr, size);
236 kvmppc_set_xics_phys(cpu, addr);
237 if (!icp_native_regs[cpu]) {
240 cpu, hw_id, addr);
/linux-4.1.27/arch/blackfin/include/asm/
H A Dcpu.h14 struct cpu cpu; member in struct:blackfin_cpudata
/linux-4.1.27/arch/arm/mach-ux500/
H A DMakefile5 obj-y := cpu.o id.o timer.o pm.o
7 obj-$(CONFIG_UX500_SOC_DB8500) += cpu-db8500.o
/linux-4.1.27/lib/
H A Dcpu_rmap.c31 unsigned int cpu; alloc_cpu_rmap() local
55 for_each_possible_cpu(cpu) { for_each_possible_cpu()
56 rmap->near[cpu].index = cpu % size; for_each_possible_cpu()
57 rmap->near[cpu].dist = CPU_RMAP_DIST_INF; for_each_possible_cpu()
97 static bool cpu_rmap_copy_neigh(struct cpu_rmap *rmap, unsigned int cpu, cpu_rmap_copy_neigh() argument
103 if (rmap->near[cpu].dist > dist && for_each_cpu()
105 rmap->near[cpu].index = rmap->near[neigh].index; for_each_cpu()
106 rmap->near[cpu].dist = dist; for_each_cpu()
117 unsigned int cpu; debug_print_rmap() local
121 for_each_possible_cpu(cpu) { for_each_possible_cpu()
122 index = rmap->near[cpu].index; for_each_possible_cpu()
123 pr_info("cpu %d -> obj %u (distance %u)\n", for_each_possible_cpu()
124 cpu, index, rmap->near[cpu].dist); for_each_possible_cpu()
162 unsigned int cpu; cpu_rmap_update() local
170 for_each_online_cpu(cpu) { for_each_online_cpu()
171 if (rmap->near[cpu].index == index) { for_each_online_cpu()
172 rmap->near[cpu].dist = CPU_RMAP_DIST_INF; for_each_online_cpu()
173 cpumask_set_cpu(cpu, update_mask); for_each_online_cpu()
182 for_each_cpu(cpu, affinity) { for_each_cpu()
183 rmap->near[cpu].index = index; for_each_cpu()
184 rmap->near[cpu].dist = 0; for_each_cpu()
186 cpumask_of_node(cpu_to_node(cpu))); for_each_cpu()
192 for_each_cpu(cpu, update_mask) { for_each_cpu()
193 if (cpu_rmap_copy_neigh(rmap, cpu, for_each_cpu()
194 topology_thread_cpumask(cpu), 1)) for_each_cpu()
196 if (cpu_rmap_copy_neigh(rmap, cpu, for_each_cpu()
197 topology_core_cpumask(cpu), 2)) for_each_cpu()
199 if (cpu_rmap_copy_neigh(rmap, cpu, for_each_cpu()
200 cpumask_of_node(cpu_to_node(cpu)), 3)) for_each_cpu()
248 * @mask: cpu mask for new SMP affinity
H A Ddump_stack.c31 int cpu; dump_stack() local
34 * Permit this cpu to perform nested stack dumps while serialising dump_stack()
39 cpu = smp_processor_id(); dump_stack()
40 old = atomic_cmpxchg(&dump_lock, -1, cpu); dump_stack()
43 } else if (old == cpu) { dump_stack()
/linux-4.1.27/arch/sh/mm/
H A Dtlbflush_32.c17 unsigned int cpu = smp_processor_id(); local_flush_tlb_page() local
19 if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) { local_flush_tlb_page()
24 asid = cpu_asid(cpu, vma->vm_mm); local_flush_tlb_page()
43 unsigned int cpu = smp_processor_id(); local_flush_tlb_range() local
45 if (cpu_context(cpu, mm) != NO_CONTEXT) { local_flush_tlb_range()
52 cpu_context(cpu, mm) = NO_CONTEXT; local_flush_tlb_range()
54 activate_context(mm, cpu); local_flush_tlb_range()
59 asid = cpu_asid(cpu, mm); local_flush_tlb_range()
80 unsigned int cpu = smp_processor_id(); local_flush_tlb_kernel_range() local
92 asid = cpu_asid(cpu, &init_mm); local_flush_tlb_kernel_range()
108 unsigned int cpu = smp_processor_id(); local_flush_tlb_mm() local
112 if (cpu_context(cpu, mm) != NO_CONTEXT) { local_flush_tlb_mm()
116 cpu_context(cpu, mm) = NO_CONTEXT; local_flush_tlb_mm()
118 activate_context(mm, cpu); local_flush_tlb_mm()
/linux-4.1.27/arch/blackfin/kernel/
H A Dcplbinfo.c69 static void cplbinfo_seq_init(struct cplbinfo_data *cdata, unsigned int cpu) cplbinfo_seq_init() argument
73 cdata->tbl = icplb_tbl[cpu]; cplbinfo_seq_init()
77 cdata->tbl = dcplb_tbl[cpu]; cplbinfo_seq_init()
125 unsigned int cpu = (unsigned long)PDE_DATA(file_inode(file)); cplbinfo_open() local
130 cplb_type = cpu & CPLBINFO_DCPLB_FLAG ? 'D' : 'I'; cplbinfo_open()
131 cpu &= ~CPLBINFO_DCPLB_FLAG; cplbinfo_open()
133 if (!cpu_online(cpu)) cplbinfo_open()
144 cplbinfo_seq_init(cdata, cpu); cplbinfo_open()
160 unsigned int cpu; cplbinfo_init() local
166 for_each_possible_cpu(cpu) { for_each_possible_cpu()
167 sprintf(buf, "cpu%i", cpu); for_each_possible_cpu()
173 (void *)cpu); for_each_possible_cpu()
175 (void *)(cpu | CPLBINFO_DCPLB_FLAG)); for_each_possible_cpu()
/linux-4.1.27/arch/x86/include/uapi/asm/
H A Dmce.h14 __u64 tsc; /* cpu time stamp counter */
16 __u8 cpuvendor; /* cpu vendor as encoded in system.h */
22 __u8 cpu; /* cpu number; obsolete; use extcpu now */ member in struct:mce
24 __u32 extcpu; /* linux cpu number that detected the error */

Completed in 6197 milliseconds

1234567891011>>