Home
last modified time | relevance | path

Searched refs:cpu (Results 1 – 200 of 2358) sorted by relevance

12345678910>>...12

/linux-4.1.27/drivers/lguest/x86/
Dcore.c67 static struct lguest_pages *lguest_pages(unsigned int cpu) in lguest_pages() argument
69 return &(((struct lguest_pages *)(switcher_addr + PAGE_SIZE))[cpu]); in lguest_pages()
85 static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages) in copy_in_guest_info() argument
93 if (__this_cpu_read(lg_last_cpu) != cpu || cpu->last_pages != pages) { in copy_in_guest_info()
94 __this_cpu_write(lg_last_cpu, cpu); in copy_in_guest_info()
95 cpu->last_pages = pages; in copy_in_guest_info()
96 cpu->changed = CHANGED_ALL; in copy_in_guest_info()
108 map_switcher_in_guest(cpu, pages); in copy_in_guest_info()
114 pages->state.guest_tss.sp1 = cpu->esp1; in copy_in_guest_info()
115 pages->state.guest_tss.ss1 = cpu->ss1; in copy_in_guest_info()
[all …]
/linux-4.1.27/drivers/lguest/
Dhypercalls.c37 static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args) in do_hcall() argument
57 kill_guest(cpu, "already have lguest_data"); in do_hcall()
68 __lgread(cpu, msg, args->arg1, sizeof(msg)); in do_hcall()
70 kill_guest(cpu, "CRASH: %s", msg); in do_hcall()
72 cpu->lg->dead = ERR_PTR(-ERESTART); in do_hcall()
78 guest_pagetable_clear_all(cpu); in do_hcall()
80 guest_pagetable_flush_user(cpu); in do_hcall()
88 guest_new_pagetable(cpu, args->arg1); in do_hcall()
91 guest_set_stack(cpu, args->arg1, args->arg2, args->arg3); in do_hcall()
95 guest_set_pte(cpu, args->arg1, args->arg2, in do_hcall()
[all …]
Dinterrupts_and_traps.c51 static void push_guest_stack(struct lg_cpu *cpu, unsigned long *gstack, u32 val) in push_guest_stack() argument
55 lgwrite(cpu, *gstack, u32, val); in push_guest_stack()
68 static void push_guest_interrupt_stack(struct lg_cpu *cpu, bool has_err) in push_guest_interrupt_stack() argument
79 if ((cpu->regs->ss&0x3) != GUEST_PL) { in push_guest_interrupt_stack()
84 virtstack = cpu->esp1; in push_guest_interrupt_stack()
85 ss = cpu->ss1; in push_guest_interrupt_stack()
87 origstack = gstack = guest_pa(cpu, virtstack); in push_guest_interrupt_stack()
94 push_guest_stack(cpu, &gstack, cpu->regs->ss); in push_guest_interrupt_stack()
95 push_guest_stack(cpu, &gstack, cpu->regs->esp); in push_guest_interrupt_stack()
98 virtstack = cpu->regs->esp; in push_guest_interrupt_stack()
[all …]
Dpage_tables.c83 static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr) in spgd_addr() argument
88 return &cpu->lg->pgdirs[i].pgdir[index]; in spgd_addr()
97 static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) in spmd_addr() argument
115 static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) in spte_addr() argument
118 pmd_t *pmd = spmd_addr(cpu, spgd, vaddr); in spte_addr()
136 static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr) in gpgd_addr() argument
139 return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t); in gpgd_addr()
152 static unsigned long gpte_addr(struct lg_cpu *cpu, in gpte_addr() argument
162 static unsigned long gpte_addr(struct lg_cpu *cpu, in gpte_addr() argument
206 static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write) in gpte_to_spte() argument
[all …]
Dlg.h130 #define lgread(cpu, addr, type) \ argument
131 ({ type _v; __lgread((cpu), &_v, (addr), sizeof(_v)); _v; })
134 #define lgwrite(cpu, addr, type, val) \ argument
137 __lgwrite((cpu), (addr), &(val), sizeof(val)); \
141 int run_guest(struct lg_cpu *cpu, unsigned long __user *user);
154 unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more);
155 void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more);
156 void set_interrupt(struct lg_cpu *cpu, unsigned int irq);
157 bool deliver_trap(struct lg_cpu *cpu, unsigned int num);
158 void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int i,
[all …]
Dlguest_user.c19 static int getreg_setup(struct lg_cpu *cpu, const unsigned long __user *input) in getreg_setup() argument
33 cpu->reg_read = lguest_arch_regptr(cpu, which, true); in getreg_setup()
34 if (!cpu->reg_read) in getreg_setup()
41 static int setreg(struct lg_cpu *cpu, const unsigned long __user *input) in setreg() argument
53 reg = lguest_arch_regptr(cpu, which, false); in setreg()
67 static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input) in user_send_irq() argument
80 set_interrupt(cpu, irq); in user_send_irq()
88 static int trap(struct lg_cpu *cpu, const unsigned long __user *input) in trap() argument
95 if (!deliver_trap(cpu, trapnum)) in trap()
108 struct lg_cpu *cpu; in read() local
[all …]
Dsegments.c67 static void fixup_gdt_table(struct lg_cpu *cpu, unsigned start, unsigned end) in fixup_gdt_table() argument
84 if (cpu->arch.gdt[i].dpl == 0) in fixup_gdt_table()
85 cpu->arch.gdt[i].dpl |= GUEST_PL; in fixup_gdt_table()
93 cpu->arch.gdt[i].type |= 0x1; in fixup_gdt_table()
136 void setup_guest_gdt(struct lg_cpu *cpu) in setup_guest_gdt() argument
142 cpu->arch.gdt[GDT_ENTRY_KERNEL_CS] = FULL_EXEC_SEGMENT; in setup_guest_gdt()
143 cpu->arch.gdt[GDT_ENTRY_KERNEL_DS] = FULL_SEGMENT; in setup_guest_gdt()
144 cpu->arch.gdt[GDT_ENTRY_KERNEL_CS].dpl |= GUEST_PL; in setup_guest_gdt()
145 cpu->arch.gdt[GDT_ENTRY_KERNEL_DS].dpl |= GUEST_PL; in setup_guest_gdt()
152 void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt) in copy_gdt_tls() argument
[all …]
Dcore.c184 void __lgread(struct lg_cpu *cpu, void *b, unsigned long addr, unsigned bytes) in __lgread() argument
186 if (!lguest_address_ok(cpu->lg, addr, bytes) in __lgread()
187 || copy_from_user(b, cpu->lg->mem_base + addr, bytes) != 0) { in __lgread()
190 kill_guest(cpu, "bad read address %#lx len %u", addr, bytes); in __lgread()
195 void __lgwrite(struct lg_cpu *cpu, unsigned long addr, const void *b, in __lgwrite() argument
198 if (!lguest_address_ok(cpu->lg, addr, bytes) in __lgwrite()
199 || copy_to_user(cpu->lg->mem_base + addr, b, bytes) != 0) in __lgwrite()
200 kill_guest(cpu, "bad write address %#lx len %u", addr, bytes); in __lgwrite()
209 int run_guest(struct lg_cpu *cpu, unsigned long __user *user) in run_guest() argument
212 if (cpu->reg_read) { in run_guest()
[all …]
/linux-4.1.27/tools/testing/selftests/cpu-hotplug/
Dcpu-on-off-test.sh23 if ! ls $SYSFS/devices/system/cpu/cpu* > /dev/null 2>&1; then
24 echo $msg cpu hotplug is not supported >&2
29 online_cpus=`cat $SYSFS/devices/system/cpu/online`
33 offline_cpus=`cat $SYSFS/devices/system/cpu/offline`
49 for cpu in $SYSFS/devices/system/cpu/cpu*; do
50 if [ -f $cpu/online ] && grep -q $state $cpu/online; then
51 echo ${cpu##/*/cpu}
68 grep -q 1 $SYSFS/devices/system/cpu/cpu$1/online
73 grep -q 0 $SYSFS/devices/system/cpu/cpu$1/online
78 echo 1 > $SYSFS/devices/system/cpu/cpu$1/online
[all …]
/linux-4.1.27/arch/arm/boot/dts/
Daxm5516-cpus.dtsi17 cpu-map {
20 cpu = <&CPU0>;
23 cpu = <&CPU1>;
26 cpu = <&CPU2>;
29 cpu = <&CPU3>;
34 cpu = <&CPU4>;
37 cpu = <&CPU5>;
40 cpu = <&CPU6>;
43 cpu = <&CPU7>;
48 cpu = <&CPU8>;
[all …]
Dhip04.dtsi32 cpu-map {
35 cpu = <&CPU0>;
38 cpu = <&CPU1>;
41 cpu = <&CPU2>;
44 cpu = <&CPU3>;
49 cpu = <&CPU4>;
52 cpu = <&CPU5>;
55 cpu = <&CPU6>;
58 cpu = <&CPU7>;
63 cpu = <&CPU8>;
[all …]
/linux-4.1.27/arch/x86/xen/
Dsmp.c71 int cpu; in cpu_bringup() local
82 cpu = smp_processor_id(); in cpu_bringup()
83 smp_store_cpu_info(cpu); in cpu_bringup()
84 cpu_data(cpu).x86_max_cores = 1; in cpu_bringup()
85 set_cpu_sibling_map(cpu); in cpu_bringup()
89 notify_cpu_starting(cpu); in cpu_bringup()
91 set_cpu_online(cpu, true); in cpu_bringup()
93 cpu_set_state_online(cpu); /* Implies full memory barrier. */ in cpu_bringup()
104 asmlinkage __visible void cpu_bringup_and_idle(int cpu) in cpu_bringup_and_idle() argument
109 xen_pvh_secondary_vcpu_init(cpu); in cpu_bringup_and_idle()
[all …]
Dspinlock.c113 int cpu = smp_processor_id(); in xen_lock_spinning() local
148 cpumask_set_cpu(cpu, &waiting_cpus); in xen_lock_spinning()
193 cpumask_clear_cpu(cpu, &waiting_cpus); in xen_lock_spinning()
204 int cpu; in xen_unlock_kick() local
208 for_each_cpu(cpu, &waiting_cpus) { in xen_unlock_kick()
209 const struct xen_lock_waiting *w = &per_cpu(lock_waiting, cpu); in xen_unlock_kick()
215 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); in xen_unlock_kick()
227 void xen_init_lock_cpu(int cpu) in xen_init_lock_cpu() argument
235 WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n", in xen_init_lock_cpu()
236 cpu, per_cpu(lock_kicker_irq, cpu)); in xen_init_lock_cpu()
[all …]
Dtime.c104 void xen_setup_runstate_info(int cpu) in xen_setup_runstate_info() argument
108 area.addr.v = &per_cpu(xen_runstate, cpu); in xen_setup_runstate_info()
111 cpu, &area)) in xen_setup_runstate_info()
332 int cpu = smp_processor_id(); in xen_vcpuop_set_mode() local
340 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL)) in xen_vcpuop_set_mode()
346 if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, cpu, NULL) || in xen_vcpuop_set_mode()
347 HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL)) in xen_vcpuop_set_mode()
358 int cpu = smp_processor_id(); in xen_vcpuop_set_next_event() local
367 ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &single); in xen_vcpuop_set_next_event()
414 void xen_teardown_timer(int cpu) in xen_teardown_timer() argument
[all …]
/linux-4.1.27/arch/powerpc/kernel/
Dsmp.c214 void smp_muxed_ipi_set_data(int cpu, unsigned long data) in smp_muxed_ipi_set_data() argument
216 struct cpu_messages *info = &per_cpu(ipi_message, cpu); in smp_muxed_ipi_set_data()
221 void smp_muxed_ipi_message_pass(int cpu, int msg) in smp_muxed_ipi_message_pass() argument
223 struct cpu_messages *info = &per_cpu(ipi_message, cpu); in smp_muxed_ipi_message_pass()
235 smp_ops->cause_ipi(cpu, info->data); in smp_muxed_ipi_message_pass()
267 static inline void do_message_pass(int cpu, int msg) in do_message_pass() argument
270 smp_ops->message_pass(cpu, msg); in do_message_pass()
273 smp_muxed_ipi_message_pass(cpu, msg); in do_message_pass()
277 void smp_send_reschedule(int cpu) in smp_send_reschedule() argument
280 do_message_pass(cpu, PPC_MSG_RESCHEDULE); in smp_send_reschedule()
[all …]
Dtau_6xx.c52 void set_thresholds(unsigned long cpu) in set_thresholds() argument
59 mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TIE | THRM1_TID); in set_thresholds()
64 mtspr (SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | THRM1_TIE); in set_thresholds()
67 mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TID); in set_thresholds()
68 mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V); in set_thresholds()
72 void TAUupdate(int cpu) in TAUupdate() argument
84 if (tau[cpu].low >= step_size){ in TAUupdate()
85 tau[cpu].low -= step_size; in TAUupdate()
86 tau[cpu].high -= (step_size - window_expand); in TAUupdate()
88 tau[cpu].grew = 1; in TAUupdate()
[all …]
Dsysfs.c29 static DEFINE_PER_CPU(struct cpu, cpu_devices);
45 struct cpu *cpu = container_of(dev, struct cpu, dev); in store_smt_snooze_delay() local
53 per_cpu(smt_snooze_delay, cpu->dev.id) = snooze; in store_smt_snooze_delay()
61 struct cpu *cpu = container_of(dev, struct cpu, dev); in show_smt_snooze_delay() local
63 return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->dev.id)); in show_smt_snooze_delay()
71 unsigned int cpu; in setup_smt_snooze_delay() local
78 for_each_possible_cpu(cpu) in setup_smt_snooze_delay()
79 per_cpu(smt_snooze_delay, cpu) = snooze; in setup_smt_snooze_delay()
119 unsigned int cpu = dev->id; in show_pw20_state() local
121 smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1); in show_pw20_state()
[all …]
Dpaca.c60 static struct lppaca * __init new_lppaca(int cpu) in new_lppaca() argument
64 if (cpu < NR_LPPACAS) in new_lppaca()
65 return &lppaca[cpu]; in new_lppaca()
67 lp = extra_lppacas + (cpu - NR_LPPACAS); in new_lppaca()
114 static struct slb_shadow * __init init_slb_shadow(int cpu) in init_slb_shadow() argument
116 struct slb_shadow *s = &slb_shadow[cpu]; in init_slb_shadow()
150 void __init initialise_paca(struct paca_struct *new_paca, int cpu) in initialise_paca() argument
158 new_paca->lppaca_ptr = new_lppaca(cpu); in initialise_paca()
163 new_paca->paca_index = cpu; in initialise_paca()
173 new_paca->slb_shadow_ptr = init_slb_shadow(cpu); in initialise_paca()
[all …]
/linux-4.1.27/arch/arm/mach-tegra/
Dplatsmp.c39 static void tegra_secondary_init(unsigned int cpu) in tegra_secondary_init() argument
41 cpumask_set_cpu(cpu, &tegra_cpu_init_mask); in tegra_secondary_init()
45 static int tegra20_boot_secondary(unsigned int cpu, struct task_struct *idle) in tegra20_boot_secondary() argument
47 cpu = cpu_logical_map(cpu); in tegra20_boot_secondary()
57 tegra_put_cpu_in_reset(cpu); in tegra20_boot_secondary()
65 flowctrl_write_cpu_halt(cpu, 0); in tegra20_boot_secondary()
67 tegra_enable_cpu_clock(cpu); in tegra20_boot_secondary()
68 flowctrl_write_cpu_csr(cpu, 0); /* Clear flow controller CSR. */ in tegra20_boot_secondary()
69 tegra_cpu_out_of_reset(cpu); in tegra20_boot_secondary()
73 static int tegra30_boot_secondary(unsigned int cpu, struct task_struct *idle) in tegra30_boot_secondary() argument
[all …]
/linux-4.1.27/arch/arm64/kernel/
Dsmp.c77 static int boot_secondary(unsigned int cpu, struct task_struct *idle) in boot_secondary() argument
79 if (cpu_ops[cpu]->cpu_boot) in boot_secondary()
80 return cpu_ops[cpu]->cpu_boot(cpu); in boot_secondary()
87 int __cpu_up(unsigned int cpu, struct task_struct *idle) in __cpu_up() argument
101 ret = boot_secondary(cpu, idle); in __cpu_up()
110 if (!cpu_online(cpu)) { in __cpu_up()
111 pr_crit("CPU%u: failed to come online\n", cpu); in __cpu_up()
115 pr_err("CPU%u: failed to boot: %d\n", cpu, ret); in __cpu_up()
135 unsigned int cpu = smp_processor_id(); in secondary_start_kernel() local
143 cpumask_set_cpu(cpu, mm_cpumask(mm)); in secondary_start_kernel()
[all …]
Dcpuinfo.c51 unsigned int cpu = smp_processor_id(); in cpuinfo_detect_icache_policy() local
69 pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu); in cpuinfo_detect_icache_policy()
92 static int check_reg_mask(char *name, u64 mask, u64 boot, u64 cur, int cpu) in check_reg_mask() argument
98 name, (unsigned long)boot, cpu, (unsigned long)cur); in check_reg_mask()
103 #define CHECK_MASK(field, mask, boot, cur, cpu) \ argument
104 check_reg_mask(#field, mask, (boot)->reg_ ## field, (cur)->reg_ ## field, cpu)
106 #define CHECK(field, boot, cur, cpu) \ argument
107 CHECK_MASK(field, ~0ULL, boot, cur, cpu)
114 unsigned int cpu = smp_processor_id(); in cpuinfo_sanity_check() local
123 diff |= CHECK_MASK(ctr, 0xffff3fff, boot, cur, cpu); in cpuinfo_sanity_check()
[all …]
Dtopology.c29 int cpu; in get_cpu_for_node() local
35 for_each_possible_cpu(cpu) { in get_cpu_for_node()
36 if (of_get_cpu_node(cpu, NULL) == cpu_node) { in get_cpu_for_node()
38 return cpu; in get_cpu_for_node()
54 int cpu; in parse_core() local
62 cpu = get_cpu_for_node(t); in parse_core()
63 if (cpu >= 0) { in parse_core()
64 cpu_topology[cpu].cluster_id = cluster_id; in parse_core()
65 cpu_topology[cpu].core_id = core_id; in parse_core()
66 cpu_topology[cpu].thread_id = i; in parse_core()
[all …]
Dcpuidle.c18 int arm_cpuidle_init(unsigned int cpu) in arm_cpuidle_init() argument
21 struct device_node *cpu_node = of_cpu_device_node_get(cpu); in arm_cpuidle_init()
26 if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_init_idle) in arm_cpuidle_init()
27 ret = cpu_ops[cpu]->cpu_init_idle(cpu_node, cpu); in arm_cpuidle_init()
42 int cpu = smp_processor_id(); in arm_cpuidle_suspend() local
48 if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend) in arm_cpuidle_suspend()
50 return cpu_ops[cpu]->cpu_suspend(index); in arm_cpuidle_suspend()
/linux-4.1.27/kernel/
Dsmpboot.c28 struct task_struct *idle_thread_get(unsigned int cpu) in idle_thread_get() argument
30 struct task_struct *tsk = per_cpu(idle_threads, cpu); in idle_thread_get()
34 init_idle(tsk, cpu); in idle_thread_get()
49 static inline void idle_init(unsigned int cpu) in idle_init() argument
51 struct task_struct *tsk = per_cpu(idle_threads, cpu); in idle_init()
54 tsk = fork_idle(cpu); in idle_init()
56 pr_err("SMP: fork_idle() failed for CPU %u\n", cpu); in idle_init()
58 per_cpu(idle_threads, cpu) = tsk; in idle_init()
67 unsigned int cpu, boot_cpu; in idle_threads_init() local
71 for_each_possible_cpu(cpu) { in idle_threads_init()
[all …]
Dcpu.c272 void clear_tasks_mm_cpumask(int cpu) in clear_tasks_mm_cpumask() argument
283 WARN_ON(cpu_online(cpu)); in clear_tasks_mm_cpumask()
295 cpumask_clear_cpu(cpu, mm_cpumask(t->mm)); in clear_tasks_mm_cpumask()
350 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) in _cpu_down() argument
353 void *hcpu = (void *)(long)cpu; in _cpu_down()
363 if (!cpu_online(cpu)) in _cpu_down()
373 __func__, cpu); in _cpu_down()
392 smpboot_park_threads(cpu); in _cpu_down()
398 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); in _cpu_down()
401 smpboot_unpark_threads(cpu); in _cpu_down()
[all …]
Dsmp.c39 long cpu = (long)hcpu; in hotplug_cfd() local
40 struct call_function_data *cfd = &per_cpu(cfd_data, cpu); in hotplug_cfd()
46 cpu_to_node(cpu))) in hotplug_cfd()
91 void *cpu = (void *)(long)smp_processor_id(); in call_function_init() local
97 hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu); in call_function_init()
144 static int generic_exec_single(int cpu, struct call_single_data *csd, in generic_exec_single() argument
147 if (cpu == smp_processor_id()) { in generic_exec_single()
162 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) { in generic_exec_single()
181 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) in generic_exec_single()
182 arch_send_call_function_single_ipi(cpu); in generic_exec_single()
[all …]
Dwatchdog.c203 int cpu; in touch_all_softlockup_watchdogs() local
210 for_each_online_cpu(cpu) in touch_all_softlockup_watchdogs()
211 per_cpu(watchdog_touch_ts, cpu) = 0; in touch_all_softlockup_watchdogs()
320 static int watchdog_nmi_enable(unsigned int cpu);
321 static void watchdog_nmi_disable(unsigned int cpu);
440 static void watchdog_enable(unsigned int cpu) in watchdog_enable() argument
449 watchdog_nmi_enable(cpu); in watchdog_enable()
460 static void watchdog_disable(unsigned int cpu) in watchdog_disable() argument
467 watchdog_nmi_disable(cpu); in watchdog_disable()
470 static void watchdog_cleanup(unsigned int cpu, bool online) in watchdog_cleanup() argument
[all …]
Dprofile.c238 int cpu = smp_processor_id(); in __profile_flip_buffers() local
240 per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu); in __profile_flip_buffers()
245 int i, j, cpu; in profile_flip_buffers() local
251 for_each_online_cpu(cpu) { in profile_flip_buffers()
252 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j]; in profile_flip_buffers()
268 int i, cpu; in profile_discard_flip_buffers() local
274 for_each_online_cpu(cpu) { in profile_discard_flip_buffers()
275 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i]; in profile_discard_flip_buffers()
284 int i, j, cpu; in do_profile_hits() local
290 cpu = get_cpu(); in do_profile_hits()
[all …]
Dstop_machine.c74 static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) in cpu_stop_queue_work() argument
76 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); in cpu_stop_queue_work()
77 struct task_struct *p = per_cpu(cpu_stopper_task, cpu); in cpu_stop_queue_work()
116 int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg) in stop_one_cpu() argument
122 cpu_stop_queue_work(cpu, &work); in stop_one_cpu()
173 int cpu = smp_processor_id(), err = 0; in multi_cpu_stop() local
184 is_active = cpu == cpumask_first(cpu_online_mask); in multi_cpu_stop()
186 is_active = cpumask_test_cpu(cpu, msdata->active_cpus); in multi_cpu_stop()
319 void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, in stop_one_cpu_nowait() argument
323 cpu_stop_queue_work(cpu, work_buf); in stop_one_cpu_nowait()
[all …]
/linux-4.1.27/arch/microblaze/kernel/cpu/
Dcpuinfo-static.c23 void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu) in set_cpuinfo_static() argument
28 (fcpu(cpu, "xlnx,use-barrel") ? PVR0_USE_BARREL_MASK : 0) | in set_cpuinfo_static()
29 (fcpu(cpu, "xlnx,use-msr-instr") ? PVR2_USE_MSR_INSTR : 0) | in set_cpuinfo_static()
30 (fcpu(cpu, "xlnx,use-pcmp-instr") ? PVR2_USE_PCMP_INSTR : 0) | in set_cpuinfo_static()
31 (fcpu(cpu, "xlnx,use-div") ? PVR0_USE_DIV_MASK : 0); in set_cpuinfo_static()
43 ci->use_mult = fcpu(cpu, "xlnx,use-hw-mul"); in set_cpuinfo_static()
51 ci->use_fpu = fcpu(cpu, "xlnx,use-fpu"); in set_cpuinfo_static()
59 (fcpu(cpu, "xlnx,unaligned-exceptions") ? in set_cpuinfo_static()
61 (fcpu(cpu, "xlnx,ill-opcode-exception") ? in set_cpuinfo_static()
63 (fcpu(cpu, "xlnx,iopb-bus-exception") ? in set_cpuinfo_static()
[all …]
/linux-4.1.27/arch/arm/mach-bcm/
Dplatsmp-brcmstb.c67 static int per_cpu_sw_state_rd(u32 cpu) in per_cpu_sw_state_rd() argument
69 sync_cache_r(SHIFT_PERCPU_PTR(&per_cpu_sw_state, per_cpu_offset(cpu))); in per_cpu_sw_state_rd()
70 return per_cpu(per_cpu_sw_state, cpu); in per_cpu_sw_state_rd()
73 static void per_cpu_sw_state_wr(u32 cpu, int val) in per_cpu_sw_state_wr() argument
76 per_cpu(per_cpu_sw_state, cpu) = val; in per_cpu_sw_state_wr()
77 sync_cache_w(SHIFT_PERCPU_PTR(&per_cpu_sw_state, per_cpu_offset(cpu))); in per_cpu_sw_state_wr()
80 static inline void per_cpu_sw_state_wr(u32 cpu, int val) { } in per_cpu_sw_state_wr() argument
83 static void __iomem *pwr_ctrl_get_base(u32 cpu) in pwr_ctrl_get_base() argument
86 base += (cpu_logical_map(cpu) * 4); in pwr_ctrl_get_base()
90 static u32 pwr_ctrl_rd(u32 cpu) in pwr_ctrl_rd() argument
[all …]
/linux-4.1.27/drivers/base/
Dcacheinfo.c33 #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu)) argument
34 #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves) argument
35 #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list) argument
37 struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu) in get_cpu_cacheinfo() argument
39 return ci_cacheinfo(cpu); in get_cpu_cacheinfo()
43 static int cache_setup_of_node(unsigned int cpu) in cache_setup_of_node() argument
47 struct device *cpu_dev = get_cpu_device(cpu); in cache_setup_of_node()
48 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); in cache_setup_of_node()
56 pr_err("No cpu device for CPU %d\n", cpu); in cache_setup_of_node()
61 pr_err("Failed to find cpu%d device node\n", cpu); in cache_setup_of_node()
[all …]
Dcpu.c34 static void change_cpu_under_node(struct cpu *cpu, in change_cpu_under_node() argument
37 int cpuid = cpu->dev.id; in change_cpu_under_node()
40 cpu->node_id = to_nid; in change_cpu_under_node()
45 struct cpu *cpu = container_of(dev, struct cpu, dev); in cpu_subsys_online() local
61 change_cpu_under_node(cpu, from_nid, to_nid); in cpu_subsys_online()
71 void unregister_cpu(struct cpu *cpu) in unregister_cpu() argument
73 int logical_cpu = cpu->dev.id; in unregister_cpu()
77 device_unregister(&cpu->dev); in unregister_cpu()
141 struct cpu *cpu = container_of(dev, struct cpu, dev); in show_crash_notes() local
146 cpunum = cpu->dev.id; in show_crash_notes()
[all …]
/linux-4.1.27/tools/power/cpupower/lib/
Dcpufreq.c16 int cpufreq_cpu_exists(unsigned int cpu) in cpufreq_cpu_exists() argument
18 return sysfs_cpu_exists(cpu); in cpufreq_cpu_exists()
21 unsigned long cpufreq_get_freq_kernel(unsigned int cpu) in cpufreq_get_freq_kernel() argument
23 return sysfs_get_freq_kernel(cpu); in cpufreq_get_freq_kernel()
26 unsigned long cpufreq_get_freq_hardware(unsigned int cpu) in cpufreq_get_freq_hardware() argument
28 return sysfs_get_freq_hardware(cpu); in cpufreq_get_freq_hardware()
31 unsigned long cpufreq_get_transition_latency(unsigned int cpu) in cpufreq_get_transition_latency() argument
33 return sysfs_get_freq_transition_latency(cpu); in cpufreq_get_transition_latency()
36 int cpufreq_get_hardware_limits(unsigned int cpu, in cpufreq_get_hardware_limits() argument
42 return sysfs_get_freq_hardware_limits(cpu, min, max); in cpufreq_get_hardware_limits()
[all …]
Dsysfs.h2 extern unsigned int sysfs_cpu_exists(unsigned int cpu);
5 extern unsigned long sysfs_get_freq_kernel(unsigned int cpu);
6 extern unsigned long sysfs_get_freq_hardware(unsigned int cpu);
7 extern unsigned long sysfs_get_freq_transition_latency(unsigned int cpu);
8 extern int sysfs_get_freq_hardware_limits(unsigned int cpu,
10 extern char *sysfs_get_freq_driver(unsigned int cpu);
11 extern struct cpufreq_policy *sysfs_get_freq_policy(unsigned int cpu);
13 unsigned int cpu);
15 unsigned int cpu);
17 unsigned int cpu);
[all …]
Dcpufreq.h43 unsigned int cpu; member
66 extern int cpufreq_cpu_exists(unsigned int cpu);
76 extern unsigned long cpufreq_get_freq_kernel(unsigned int cpu);
78 extern unsigned long cpufreq_get_freq_hardware(unsigned int cpu);
80 #define cpufreq_get(cpu) cpufreq_get_freq_kernel(cpu); argument
87 extern unsigned long cpufreq_get_transition_latency(unsigned int cpu);
96 extern int cpufreq_get_hardware_limits(unsigned int cpu,
107 extern char *cpufreq_get_driver(unsigned int cpu);
119 extern struct cpufreq_policy *cpufreq_get_policy(unsigned int cpu);
133 *cpufreq_get_available_governors(unsigned int cpu);
[all …]
Dsysfs.c50 static unsigned int sysfs_cpufreq_read_file(unsigned int cpu, const char *fname, in sysfs_cpufreq_read_file() argument
56 cpu, fname); in sysfs_cpufreq_read_file()
62 static unsigned int sysfs_cpufreq_write_file(unsigned int cpu, in sysfs_cpufreq_write_file() argument
71 cpu, fname); in sysfs_cpufreq_write_file()
114 static unsigned long sysfs_cpufreq_get_one_value(unsigned int cpu, in sysfs_cpufreq_get_one_value() argument
125 len = sysfs_cpufreq_read_file(cpu, cpufreq_value_files[which], in sysfs_cpufreq_get_one_value()
153 static char *sysfs_cpufreq_get_one_string(unsigned int cpu, in sysfs_cpufreq_get_one_string() argument
163 len = sysfs_cpufreq_read_file(cpu, cpufreq_string_files[which], in sysfs_cpufreq_get_one_string()
195 static int sysfs_cpufreq_write_one_value(unsigned int cpu, in sysfs_cpufreq_write_one_value() argument
202 if (sysfs_cpufreq_write_file(cpu, cpufreq_write_files[which], in sysfs_cpufreq_write_one_value()
[all …]
/linux-4.1.27/arch/blackfin/mm/
Dsram-alloc.c68 unsigned int cpu; in l1sram_init() local
77 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) { in l1sram_init()
78 per_cpu(free_l1_ssram_head, cpu).next = in l1sram_init()
80 if (!per_cpu(free_l1_ssram_head, cpu).next) { in l1sram_init()
85 per_cpu(free_l1_ssram_head, cpu).next->paddr = (void *)get_l1_scratch_start_cpu(cpu) + reserve; in l1sram_init()
86 per_cpu(free_l1_ssram_head, cpu).next->size = L1_SCRATCH_LENGTH - reserve; in l1sram_init()
87 per_cpu(free_l1_ssram_head, cpu).next->pid = 0; in l1sram_init()
88 per_cpu(free_l1_ssram_head, cpu).next->next = NULL; in l1sram_init()
90 per_cpu(used_l1_ssram_head, cpu).next = NULL; in l1sram_init()
93 spin_lock_init(&per_cpu(l1sram_lock, cpu)); in l1sram_init()
[all …]
/linux-4.1.27/arch/arc/kernel/
Dsetup.c47 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; in read_arc_build_cfg_regs() local
48 FIX_PTR(cpu); in read_arc_build_cfg_regs()
50 READ_BCR(AUX_IDENTITY, cpu->core); in read_arc_build_cfg_regs()
51 READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa); in read_arc_build_cfg_regs()
53 READ_BCR(ARC_REG_TIMERS_BCR, cpu->timers); in read_arc_build_cfg_regs()
54 cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE); in read_arc_build_cfg_regs()
57 cpu->uncached_base = uncached_space.start << 24; in read_arc_build_cfg_regs()
59 READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy); in read_arc_build_cfg_regs()
61 cpu->extn.norm = read_aux_reg(ARC_REG_NORM_BCR) > 1 ? 1 : 0; /* 2,3 */ in read_arc_build_cfg_regs()
62 cpu->extn.barrel = read_aux_reg(ARC_REG_BARREL_BCR) > 1 ? 1 : 0; /* 2,3 */ in read_arc_build_cfg_regs()
[all …]
Dsmp.c92 void __weak arc_platform_smp_wait_to_boot(int cpu) in arc_platform_smp_wait_to_boot() argument
117 unsigned int cpu = smp_processor_id(); in start_kernel_secondary() local
125 cpumask_set_cpu(cpu, mm_cpumask(mm)); in start_kernel_secondary()
127 notify_cpu_starting(cpu); in start_kernel_secondary()
128 set_cpu_online(cpu, true); in start_kernel_secondary()
130 pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu); in start_kernel_secondary()
133 machine_desc->init_smp(cpu); in start_kernel_secondary()
152 int __cpu_up(unsigned int cpu, struct task_struct *idle) in __cpu_up() argument
158 pr_info("Idle Task [%d] %p", cpu, idle); in __cpu_up()
159 pr_info("Trying to bring up CPU%u ...\n", cpu); in __cpu_up()
[all …]
/linux-4.1.27/include/linux/
Dcpumask.h98 #define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask) argument
99 #define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask) argument
100 #define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask) argument
101 #define cpu_active(cpu) cpumask_test_cpu((cpu), cpu_active_mask) argument
107 #define cpu_online(cpu) ((cpu) == 0) argument
108 #define cpu_possible(cpu) ((cpu) == 0) argument
109 #define cpu_present(cpu) ((cpu) == 0) argument
110 #define cpu_active(cpu) ((cpu) == 0) argument
114 static inline unsigned int cpumask_check(unsigned int cpu) in cpumask_check() argument
117 WARN_ON_ONCE(cpu >= nr_cpumask_bits); in cpumask_check()
[all …]
Dtopology.h82 static inline int cpu_to_node(int cpu) in cpu_to_node() argument
84 return per_cpu(numa_node, cpu); in cpu_to_node()
96 static inline void set_cpu_numa_node(int cpu, int node) in set_cpu_numa_node() argument
98 per_cpu(numa_node, cpu) = node; in set_cpu_numa_node()
148 static inline int cpu_to_mem(int cpu) in cpu_to_mem() argument
150 return per_cpu(_numa_mem_, cpu); in cpu_to_mem()
155 static inline void set_cpu_numa_mem(int cpu, int node) in set_cpu_numa_mem() argument
157 per_cpu(_numa_mem_, cpu) = node; in set_cpu_numa_mem()
158 _node_numa_mem_[cpu_to_node(cpu)] = node; in set_cpu_numa_mem()
180 static inline int cpu_to_mem(int cpu) in cpu_to_mem() argument
[all …]
Dring_buffer.h100 int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full);
101 int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
109 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, int cpu);
121 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
124 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
128 ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu);
140 unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu);
142 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu);
147 struct ring_buffer *buffer_b, int cpu);
151 struct ring_buffer *buffer_b, int cpu) in ring_buffer_swap_cpu() argument
[all …]
Dtick.h23 extern void tick_cleanup_dead_cpu(int cpu);
31 static inline void tick_cleanup_dead_cpu(int cpu) { } in tick_cleanup_dead_cpu() argument
99 extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
100 extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
112 static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } in get_cpu_idle_time_us() argument
113 static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } in get_cpu_iowait_time_us() argument
129 static inline bool tick_nohz_full_cpu(int cpu) in tick_nohz_full_cpu() argument
134 return cpumask_test_cpu(cpu, tick_nohz_full_mask); in tick_nohz_full_cpu()
139 extern void tick_nohz_full_kick_cpu(int cpu);
144 static inline bool tick_nohz_full_cpu(int cpu) { return false; } in tick_nohz_full_cpu() argument
[all …]
/linux-4.1.27/drivers/xen/
Dcpu_hotplug.c11 static void enable_hotplug_cpu(int cpu) in enable_hotplug_cpu() argument
13 if (!cpu_present(cpu)) in enable_hotplug_cpu()
14 arch_register_cpu(cpu); in enable_hotplug_cpu()
16 set_cpu_present(cpu, true); in enable_hotplug_cpu()
19 static void disable_hotplug_cpu(int cpu) in disable_hotplug_cpu() argument
21 if (cpu_present(cpu)) in disable_hotplug_cpu()
22 arch_unregister_cpu(cpu); in disable_hotplug_cpu()
24 set_cpu_present(cpu, false); in disable_hotplug_cpu()
27 static int vcpu_online(unsigned int cpu) in vcpu_online() argument
32 sprintf(dir, "cpu/%u", cpu); in vcpu_online()
[all …]
/linux-4.1.27/arch/powerpc/include/asm/
Dsmp.h36 extern int cpu_to_chip_id(int cpu);
41 void (*message_pass)(int cpu, int msg);
43 void (*cause_ipi)(int cpu, unsigned long data);
66 void generic_cpu_die(unsigned int cpu);
67 void generic_set_cpu_dead(unsigned int cpu);
68 void generic_set_cpu_up(unsigned int cpu);
69 int generic_check_cpu_restart(unsigned int cpu);
79 #define raw_smp_processor_id() (current_thread_info()->cpu)
82 static inline int get_hard_smp_processor_id(int cpu) in get_hard_smp_processor_id() argument
84 return smp_hw_index[cpu]; in get_hard_smp_processor_id()
[all …]
Dcell-pmu.h79 extern u32 cbe_read_phys_ctr(u32 cpu, u32 phys_ctr);
80 extern void cbe_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val);
81 extern u32 cbe_read_ctr(u32 cpu, u32 ctr);
82 extern void cbe_write_ctr(u32 cpu, u32 ctr, u32 val);
84 extern u32 cbe_read_pm07_control(u32 cpu, u32 ctr);
85 extern void cbe_write_pm07_control(u32 cpu, u32 ctr, u32 val);
86 extern u32 cbe_read_pm(u32 cpu, enum pm_reg_name reg);
87 extern void cbe_write_pm(u32 cpu, enum pm_reg_name reg, u32 val);
89 extern u32 cbe_get_ctr_size(u32 cpu, u32 phys_ctr);
90 extern void cbe_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size);
[all …]
Dcputhreads.h67 int cpu_core_index_of_thread(int cpu);
70 static inline int cpu_core_index_of_thread(int cpu) { return cpu; } in cpu_core_index_of_thread() argument
74 static inline int cpu_thread_in_core(int cpu) in cpu_thread_in_core() argument
76 return cpu & (threads_per_core - 1); in cpu_thread_in_core()
79 static inline int cpu_thread_in_subcore(int cpu) in cpu_thread_in_subcore() argument
81 return cpu & (threads_per_subcore - 1); in cpu_thread_in_subcore()
84 static inline int cpu_first_thread_sibling(int cpu) in cpu_first_thread_sibling() argument
86 return cpu & ~(threads_per_core - 1); in cpu_first_thread_sibling()
89 static inline int cpu_last_thread_sibling(int cpu) in cpu_last_thread_sibling() argument
91 return cpu | (threads_per_core - 1); in cpu_last_thread_sibling()
/linux-4.1.27/arch/mips/kernel/
Dsmp.c81 static inline void set_cpu_sibling_map(int cpu) in set_cpu_sibling_map() argument
85 cpumask_set_cpu(cpu, &cpu_sibling_setup_map); in set_cpu_sibling_map()
89 if (cpu_data[cpu].package == cpu_data[i].package && in set_cpu_sibling_map()
90 cpu_data[cpu].core == cpu_data[i].core) { in set_cpu_sibling_map()
91 cpumask_set_cpu(i, &cpu_sibling_map[cpu]); in set_cpu_sibling_map()
92 cpumask_set_cpu(cpu, &cpu_sibling_map[i]); in set_cpu_sibling_map()
96 cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]); in set_cpu_sibling_map()
99 static inline void set_cpu_core_map(int cpu) in set_cpu_core_map() argument
103 cpumask_set_cpu(cpu, &cpu_core_setup_map); in set_cpu_core_map()
106 if (cpu_data[cpu].package == cpu_data[i].package) { in set_cpu_core_map()
[all …]
Dsmp-bmips.c51 static void bmips_set_reset_vec(int cpu, u32 val);
59 static void bmips43xx_send_ipi_single(int cpu, unsigned int action);
60 static void bmips5000_send_ipi_single(int cpu, unsigned int action);
68 #define CPUNUM(cpu, shift) (((cpu) + bmips_cpu_offset) << (shift)) argument
69 #define ACTION_CLR_IPI(cpu, ipi) (0x2000 | CPUNUM(cpu, 9) | ((ipi) << 8)) argument
70 #define ACTION_SET_IPI(cpu, ipi) (0x3000 | CPUNUM(cpu, 9) | ((ipi) << 8)) argument
71 #define ACTION_BOOT_THREAD(cpu) (0x08 | CPUNUM(cpu, 0)) argument
75 int i, cpu = 1, boot_cpu = 0; in bmips_smp_setup() local
141 __cpu_number_map[i] = cpu; in bmips_smp_setup()
142 __cpu_logical_map[cpu] = i; in bmips_smp_setup()
[all …]
Dcpu-probe.c291 static inline void set_elf_platform(int cpu, const char *plat) in set_elf_platform() argument
293 if (cpu == 0) in set_elf_platform()
659 static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) in cpu_probe_legacy() argument
664 __cpu_name[cpu] = "R2000"; in cpu_probe_legacy()
676 __cpu_name[cpu] = "R3081"; in cpu_probe_legacy()
679 __cpu_name[cpu] = "R3000A"; in cpu_probe_legacy()
683 __cpu_name[cpu] = "R3000"; in cpu_probe_legacy()
697 __cpu_name[cpu] = "R4400PC"; in cpu_probe_legacy()
700 __cpu_name[cpu] = "R4000PC"; in cpu_probe_legacy()
726 __cpu_name[cpu] = mc ? "R4400MC" : "R4400SC"; in cpu_probe_legacy()
[all …]
Dcevt-sb1250.c44 unsigned int cpu = smp_processor_id(); in sibyte_set_mode() local
47 cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); in sibyte_set_mode()
48 init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT)); in sibyte_set_mode()
72 unsigned int cpu = smp_processor_id(); in sibyte_next_event() local
75 cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); in sibyte_next_event()
76 init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT)); in sibyte_next_event()
87 unsigned int cpu = smp_processor_id(); in sibyte_counter_handler() local
98 cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); in sibyte_counter_handler()
112 unsigned int cpu = smp_processor_id(); in sb1250_clockevent_init() local
113 unsigned int irq = K_INT_TIMER_0 + cpu; in sb1250_clockevent_init()
[all …]
Dcevt-bcm1480.c46 unsigned int cpu = smp_processor_id(); in sibyte_set_mode() local
49 cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); in sibyte_set_mode()
50 init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT)); in sibyte_set_mode()
74 unsigned int cpu = smp_processor_id(); in sibyte_next_event() local
77 cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); in sibyte_next_event()
78 init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT)); in sibyte_next_event()
89 unsigned int cpu = smp_processor_id(); in sibyte_counter_handler() local
100 cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); in sibyte_counter_handler()
114 unsigned int cpu = smp_processor_id(); in sb1480_clockevent_init() local
115 unsigned int irq = K_BCM1480_INT_TIMER_0 + cpu; in sb1480_clockevent_init()
[all …]
/linux-4.1.27/Documentation/devicetree/bindings/arm/
Dtopology.txt16 The cpu nodes (bindings defined in [1]) represent the devices that
22 For instance in a system where CPUs support SMT, "cpu" nodes represent all
24 In systems where SMT is not supported "cpu" nodes represent all cores present
27 ARM topology bindings allow one to associate cpu nodes with hierarchical groups
36 If not stated otherwise, whenever a reference to a cpu node phandle is made its
37 value must point to a cpu node compliant with the cpu node bindings as
39 A topology description containing phandles to cpu nodes that are not compliant
43 2 - cpu-map node
46 The ARM CPU topology is defined within the cpu-map node, which is a direct
50 - cpu-map node
[all …]
Dcpus.txt6 the "cpus" node, which in turn contains a number of subnodes (ie "cpu")
7 defining properties for every cpu.
26 cpus and cpu node bindings definition
29 The ARM architecture, in accordance with the ePAPR, requires the cpus and cpu
34 Description: Container of cpu nodes
65 - cpu node
74 Definition: must be "cpu"
202 - cpu-release-addr
225 - cpu-idle-states
230 by this cpu [3].
[all …]
/linux-4.1.27/arch/arm/kernel/
Dsmp.c94 int __cpu_up(unsigned int cpu, struct task_struct *idle) in __cpu_up() argument
119 ret = smp_ops.smp_boot_secondary(cpu, idle); in __cpu_up()
128 if (!cpu_online(cpu)) { in __cpu_up()
129 pr_crit("CPU%u: failed to come online\n", cpu); in __cpu_up()
133 pr_err("CPU%u: failed to boot: %d\n", cpu, ret); in __cpu_up()
164 static int platform_cpu_kill(unsigned int cpu) in platform_cpu_kill() argument
167 return smp_ops.cpu_kill(cpu); in platform_cpu_kill()
171 static int platform_cpu_disable(unsigned int cpu) in platform_cpu_disable() argument
174 return smp_ops.cpu_disable(cpu); in platform_cpu_disable()
181 return cpu == 0 ? -EPERM : 0; in platform_cpu_disable()
[all …]
Dtopology.c45 unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu) in arch_scale_cpu_capacity() argument
47 return per_cpu(cpu_scale, cpu); in arch_scale_cpu_capacity()
50 static void set_capacity_scale(unsigned int cpu, unsigned long capacity) in set_capacity_scale() argument
52 per_cpu(cpu_scale, cpu) = capacity; in set_capacity_scale()
78 #define cpu_capacity(cpu) __cpu_capacity[cpu] argument
97 int cpu = 0; in parse_dt_topology() local
102 for_each_possible_cpu(cpu) { in parse_dt_topology()
107 cn = of_get_cpu_node(cpu, NULL); in parse_dt_topology()
109 pr_err("missing device node for CPU %d\n", cpu); in parse_dt_topology()
137 cpu_capacity(cpu) = capacity; in parse_dt_topology()
[all …]
Dcpuidle.c56 int cpu = smp_processor_id(); in arm_cpuidle_suspend() local
58 if (cpuidle_ops[cpu].suspend) in arm_cpuidle_suspend()
59 ret = cpuidle_ops[cpu].suspend(cpu, index); in arm_cpuidle_suspend()
97 static int __init arm_cpuidle_read_ops(struct device_node *dn, int cpu) in arm_cpuidle_read_ops() argument
113 cpuidle_ops[cpu] = *ops; /* structure copy */ in arm_cpuidle_read_ops()
137 int __init arm_cpuidle_init(int cpu) in arm_cpuidle_init() argument
139 struct device_node *cpu_node = of_cpu_device_node_get(cpu); in arm_cpuidle_init()
145 ret = arm_cpuidle_read_ops(cpu_node, cpu); in arm_cpuidle_init()
146 if (!ret && cpuidle_ops[cpu].init) in arm_cpuidle_init()
147 ret = cpuidle_ops[cpu].init(cpu_node, cpu); in arm_cpuidle_init()
/linux-4.1.27/arch/x86/kernel/
Dsetup_percpu.c69 unsigned int cpu; in pcpu_need_numa() local
71 for_each_possible_cpu(cpu) { in pcpu_need_numa()
72 int node = early_cpu_to_node(cpu); in pcpu_need_numa()
98 static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, in pcpu_alloc_bootmem() argument
103 int node = early_cpu_to_node(cpu); in pcpu_alloc_bootmem()
109 cpu, node); in pcpu_alloc_bootmem()
111 cpu, size, __pa(ptr)); in pcpu_alloc_bootmem()
116 cpu, size, node, __pa(ptr)); in pcpu_alloc_bootmem()
127 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) in pcpu_fc_alloc() argument
129 return pcpu_alloc_bootmem(cpu, size, align); in pcpu_fc_alloc()
[all …]
Dsmpboot.c390 void set_cpu_sibling_map(int cpu) in set_cpu_sibling_map() argument
394 struct cpuinfo_x86 *c = &cpu_data(cpu); in set_cpu_sibling_map()
398 cpumask_set_cpu(cpu, cpu_sibling_setup_mask); in set_cpu_sibling_map()
401 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); in set_cpu_sibling_map()
402 cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu)); in set_cpu_sibling_map()
403 cpumask_set_cpu(cpu, cpu_core_mask(cpu)); in set_cpu_sibling_map()
411 if ((i == cpu) || (has_smt && match_smt(c, o))) in set_cpu_sibling_map()
412 link_mask(sibling, cpu, i); in set_cpu_sibling_map()
414 if ((i == cpu) || (has_mp && match_llc(c, o))) in set_cpu_sibling_map()
415 link_mask(llc_shared, cpu, i); in set_cpu_sibling_map()
[all …]
Dmsr.c76 int cpu = iminor(file_inode(file)); in msr_read() local
84 err = rdmsr_safe_on_cpu(cpu, reg, &data[0], &data[1]); in msr_read()
104 int cpu = iminor(file_inode(file)); in msr_write() local
116 err = wrmsr_safe_on_cpu(cpu, reg, data[0], data[1]); in msr_write()
130 int cpu = iminor(file_inode(file)); in msr_ioctl() local
143 err = rdmsr_safe_regs_on_cpu(cpu, regs); in msr_ioctl()
159 err = wrmsr_safe_regs_on_cpu(cpu, regs); in msr_ioctl()
176 unsigned int cpu = iminor(file_inode(file)); in msr_open() local
182 if (cpu >= nr_cpu_ids || !cpu_online(cpu)) in msr_open()
185 c = &cpu_data(cpu); in msr_open()
[all …]
Dcpuid.c88 int cpu = iminor(file_inode(file)); in cpuid_read() local
99 err = smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1); in cpuid_read()
116 unsigned int cpu; in cpuid_open() local
119 cpu = iminor(file_inode(file)); in cpuid_open()
120 if (cpu >= nr_cpu_ids || !cpu_online(cpu)) in cpuid_open()
123 c = &cpu_data(cpu); in cpuid_open()
140 static int cpuid_device_create(int cpu) in cpuid_device_create() argument
144 dev = device_create(cpuid_class, NULL, MKDEV(CPUID_MAJOR, cpu), NULL, in cpuid_device_create()
145 "cpu%d", cpu); in cpuid_device_create()
149 static void cpuid_device_destroy(int cpu) in cpuid_device_destroy() argument
[all …]
Dtopology.c60 int __ref _debug_hotplug_cpu(int cpu, int action) in _debug_hotplug_cpu() argument
62 struct device *dev = get_cpu_device(cpu); in _debug_hotplug_cpu()
65 if (!cpu_is_hotpluggable(cpu)) in _debug_hotplug_cpu()
72 ret = cpu_down(cpu); in _debug_hotplug_cpu()
74 pr_info("CPU %u is now offline\n", cpu); in _debug_hotplug_cpu()
78 pr_debug("Can't offline CPU%d.\n", cpu); in _debug_hotplug_cpu()
81 ret = cpu_up(cpu); in _debug_hotplug_cpu()
86 pr_debug("Can't online CPU%d.\n", cpu); in _debug_hotplug_cpu()
140 per_cpu(cpu_devices, num).cpu.hotpluggable = 1; in arch_register_cpu()
142 return register_cpu(&per_cpu(cpu_devices, num).cpu, num); in arch_register_cpu()
[all …]
/linux-4.1.27/arch/arm64/boot/dts/cavium/
Dthunder-88xx.dtsi65 cpu@000 {
66 device_type = "cpu";
71 cpu@001 {
72 device_type = "cpu";
77 cpu@002 {
78 device_type = "cpu";
83 cpu@003 {
84 device_type = "cpu";
89 cpu@004 {
90 device_type = "cpu";
[all …]
/linux-4.1.27/arch/arm/common/
Dmcpm_entry.c25 void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr) in mcpm_set_entry_vector() argument
28 mcpm_entry_vectors[cluster][cpu] = val; in mcpm_set_entry_vector()
29 sync_cache_w(&mcpm_entry_vectors[cluster][cpu]); in mcpm_set_entry_vector()
34 void mcpm_set_early_poke(unsigned cpu, unsigned cluster, in mcpm_set_early_poke() argument
37 unsigned long *poke = &mcpm_entry_early_pokes[cluster][cpu][0]; in mcpm_set_early_poke()
76 int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster) in mcpm_cpu_power_up() argument
87 return platform_ops->power_up(cpu, cluster); in mcpm_cpu_power_up()
89 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); in mcpm_cpu_power_up()
98 cpu_is_down = !mcpm_cpu_use_count[cluster][cpu]; in mcpm_cpu_power_up()
101 mcpm_cpu_use_count[cluster][cpu]++; in mcpm_cpu_power_up()
[all …]
Dmcpm_platsmp.c22 static void cpu_to_pcpu(unsigned int cpu, in cpu_to_pcpu() argument
27 mpidr = cpu_logical_map(cpu); in cpu_to_pcpu()
32 static int mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle) in mcpm_boot_secondary() argument
37 cpu_to_pcpu(cpu, &pcpu, &pcluster); in mcpm_boot_secondary()
40 __func__, cpu, pcpu, pcluster); in mcpm_boot_secondary()
47 arch_send_wakeup_ipi_mask(cpumask_of(cpu)); in mcpm_boot_secondary()
52 static void mcpm_secondary_init(unsigned int cpu) in mcpm_secondary_init() argument
59 static int mcpm_cpu_kill(unsigned int cpu) in mcpm_cpu_kill() argument
63 cpu_to_pcpu(cpu, &pcpu, &pcluster); in mcpm_cpu_kill()
68 static int mcpm_cpu_disable(unsigned int cpu) in mcpm_cpu_disable() argument
[all …]
/linux-4.1.27/arch/s390/oprofile/
Dhwsampler.c80 static int smp_ctl_ssctl_stop(int cpu) in smp_ctl_ssctl_stop() argument
86 cb = &per_cpu(sampler_cpu_buffer, cpu); in smp_ctl_ssctl_stop()
92 smp_call_function_single(cpu, execute_ssctl, &ep, 1); in smp_ctl_ssctl_stop()
95 printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu); in smp_ctl_ssctl_stop()
100 smp_call_function_single(cpu, execute_qsi, &ep, 1); in smp_ctl_ssctl_stop()
110 static int smp_ctl_ssctl_deactivate(int cpu) in smp_ctl_ssctl_deactivate() argument
116 cb = &per_cpu(sampler_cpu_buffer, cpu); in smp_ctl_ssctl_deactivate()
122 smp_call_function_single(cpu, execute_ssctl, &ep, 1); in smp_ctl_ssctl_deactivate()
125 printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu); in smp_ctl_ssctl_deactivate()
128 smp_call_function_single(cpu, execute_qsi, &ep, 1); in smp_ctl_ssctl_deactivate()
[all …]
/linux-4.1.27/arch/ia64/kernel/
Derr_inject.c61 u32 cpu=dev->id; \
62 return sprintf(buf, "%lx\n", name[cpu]); \
70 unsigned int cpu=dev->id; \
71 name[cpu] = simple_strtoull(buf, NULL, 16); \
84 unsigned int cpu=dev->id; in show() local
88 printk(KERN_DEBUG "pal_mc_err_inject for cpu%d:\n", cpu); in show()
89 printk(KERN_DEBUG "err_type_info=%lx,\n", err_type_info[cpu]); in show()
90 printk(KERN_DEBUG "err_struct_info=%lx,\n", err_struct_info[cpu]); in show()
92 err_data_buffer[cpu].data1, in show()
93 err_data_buffer[cpu].data2, in show()
[all …]
Dsmpboot.c463 do_boot_cpu (int sapicid, int cpu, struct task_struct *idle) in do_boot_cpu() argument
468 Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid); in do_boot_cpu()
470 set_brendez_area(cpu); in do_boot_cpu()
471 platform_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0); in do_boot_cpu()
478 if (cpumask_test_cpu(cpu, &cpu_callin_map)) in do_boot_cpu()
485 if (!cpumask_test_cpu(cpu, &cpu_callin_map)) { in do_boot_cpu()
486 printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid); in do_boot_cpu()
487 ia64_cpu_to_sapicid[cpu] = -1; in do_boot_cpu()
488 set_cpu_online(cpu, false); /* was set in smp_callin() */ in do_boot_cpu()
510 int sapicid, cpu, i; in smp_build_cpu_map() local
[all …]
Dtopology.c51 sysfs_cpus[num].cpu.hotpluggable = 1; in arch_register_cpu()
54 return register_cpu(&sysfs_cpus[num].cpu, num); in arch_register_cpu()
60 unregister_cpu(&sysfs_cpus[num].cpu); in arch_unregister_cpu()
69 return register_cpu(&sysfs_cpus[num].cpu, num); in arch_register_cpu()
142 static void cache_shared_cpu_map_setup(unsigned int cpu, in cache_shared_cpu_map_setup() argument
149 if (cpu_data(cpu)->threads_per_core <= 1 && in cache_shared_cpu_map_setup()
150 cpu_data(cpu)->cores_per_socket <= 1) { in cache_shared_cpu_map_setup()
151 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); in cache_shared_cpu_map_setup()
164 if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id in cache_shared_cpu_map_setup()
177 static void cache_shared_cpu_map_setup(unsigned int cpu, in cache_shared_cpu_map_setup() argument
[all …]
Dsmp.c171 unsigned int cpu; in send_IPI_mask() local
173 for_each_cpu(cpu, mask) { in send_IPI_mask()
174 send_IPI_single(cpu, op); in send_IPI_mask()
210 unsigned int cpu, self_cpu; in kdump_smp_send_init() local
212 for_each_online_cpu(cpu) { in kdump_smp_send_init()
213 if (cpu != self_cpu) { in kdump_smp_send_init()
214 if(kdump_status[cpu] == 0) in kdump_smp_send_init()
215 platform_send_ipi(cpu, 0, IA64_IPI_DM_INIT, 0); in kdump_smp_send_init()
224 smp_send_reschedule (int cpu) in smp_send_reschedule() argument
226 platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0); in smp_send_reschedule()
[all …]
Dnuma.c33 void map_cpu_to_node(int cpu, int nid) in map_cpu_to_node() argument
37 cpu_to_node_map[cpu] = 0; in map_cpu_to_node()
41 oldnid = cpu_to_node_map[cpu]; in map_cpu_to_node()
42 if (cpumask_test_cpu(cpu, &node_to_cpu_mask[oldnid])) { in map_cpu_to_node()
49 cpu_to_node_map[cpu] = nid; in map_cpu_to_node()
50 cpumask_set_cpu(cpu, &node_to_cpu_mask[nid]); in map_cpu_to_node()
54 void unmap_cpu_from_node(int cpu, int nid) in unmap_cpu_from_node() argument
56 WARN_ON(!cpumask_test_cpu(cpu, &node_to_cpu_mask[nid])); in unmap_cpu_from_node()
57 WARN_ON(cpu_to_node_map[cpu] != nid); in unmap_cpu_from_node()
58 cpu_to_node_map[cpu] = 0; in unmap_cpu_from_node()
[all …]
/linux-4.1.27/arch/xtensa/include/asm/
Dmmu_context.h34 #define cpu_asid_cache(cpu) per_cpu(asid_cache, cpu) argument
68 static inline void get_new_mmu_context(struct mm_struct *mm, unsigned int cpu) in get_new_mmu_context() argument
70 unsigned long asid = cpu_asid_cache(cpu); in get_new_mmu_context()
79 cpu_asid_cache(cpu) = asid; in get_new_mmu_context()
80 mm->context.asid[cpu] = asid; in get_new_mmu_context()
81 mm->context.cpu = cpu; in get_new_mmu_context()
84 static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu) in get_mmu_context() argument
91 unsigned long asid = mm->context.asid[cpu]; in get_mmu_context()
94 ((asid ^ cpu_asid_cache(cpu)) & ~ASID_MASK)) in get_mmu_context()
95 get_new_mmu_context(mm, cpu); in get_mmu_context()
[all …]
/linux-4.1.27/tools/power/cpupower/utils/
Dcpufreq-info.c57 unsigned int cpu, nr_cpus; in proc_cpufreq_output() local
66 for (cpu = 0; cpu < nr_cpus; cpu++) { in proc_cpufreq_output()
67 policy = cpufreq_get_policy(cpu); in proc_cpufreq_output()
71 if (cpufreq_get_hardware_limits(cpu, &min, &max)) { in proc_cpufreq_output()
78 cpu , policy->min, max ? min_pctg : 0, policy->max, in proc_cpufreq_output()
166 static int get_boost_mode(unsigned int cpu) in get_boost_mode() argument
176 ret = cpufreq_has_boost_support(cpu, &support, &active, &b_states); in get_boost_mode()
179 " on CPU %d -- are you root?\n"), cpu); in get_boost_mode()
195 ret = decode_pstates(cpu, cpupower_cpu_info.family, b_states, in get_boost_mode()
220 intel_turbo_ratio = msr_intel_get_turbo_ratio(cpu); in get_boost_mode()
[all …]
Dcpuidle-info.c23 static void cpuidle_cpu_output(unsigned int cpu, int verbose) in cpuidle_cpu_output() argument
28 printf(_ ("Analyzing CPU %d:\n"), cpu); in cpuidle_cpu_output()
30 idlestates = sysfs_get_idlestate_count(cpu); in cpuidle_cpu_output()
32 printf(_("CPU %u: No idle states\n"), cpu); in cpuidle_cpu_output()
39 tmp = sysfs_get_idlestate_name(cpu, idlestate); in cpuidle_cpu_output()
51 int disabled = sysfs_is_idlestate_disabled(cpu, idlestate); in cpuidle_cpu_output()
55 tmp = sysfs_get_idlestate_name(cpu, idlestate); in cpuidle_cpu_output()
61 tmp = sysfs_get_idlestate_desc(cpu, idlestate); in cpuidle_cpu_output()
68 sysfs_get_idlestate_latency(cpu, idlestate)); in cpuidle_cpu_output()
70 sysfs_get_idlestate_usage(cpu, idlestate)); in cpuidle_cpu_output()
[all …]
Dcpuidle-set.c34 unsigned int cpu = 0, idlestate = 0, idlestates = 0; in cmd_idle_set() local
105 for (cpu = bitmask_first(cpus_chosen); in cmd_idle_set()
106 cpu <= bitmask_last(cpus_chosen); cpu++) { in cmd_idle_set()
108 if (!bitmask_isbitset(cpus_chosen, cpu)) in cmd_idle_set()
111 if (sysfs_is_cpu_online(cpu) != 1) in cmd_idle_set()
114 idlestates = sysfs_get_idlestate_count(cpu); in cmd_idle_set()
120 ret = sysfs_idlestate_disable(cpu, idlestate, 1); in cmd_idle_set()
122 printf(_("Idlestate %u disabled on CPU %u\n"), idlestate, cpu); in cmd_idle_set()
125 idlestate, cpu); in cmd_idle_set()
130 idlestate, cpu); in cmd_idle_set()
[all …]
Dcpufreq-set.c140 static int do_new_policy(unsigned int cpu, struct cpufreq_policy *new_pol) in do_new_policy() argument
142 struct cpufreq_policy *cur_pol = cpufreq_get_policy(cpu); in do_new_policy()
159 ret = cpufreq_set_policy(cpu, new_pol); in do_new_policy()
167 static int do_one_cpu(unsigned int cpu, struct cpufreq_policy *new_pol, in do_one_cpu() argument
172 return cpufreq_set_frequency(cpu, freq); in do_one_cpu()
179 return cpufreq_modify_policy_min(cpu, new_pol->min); in do_one_cpu()
181 return cpufreq_modify_policy_max(cpu, new_pol->max); in do_one_cpu()
183 return cpufreq_modify_policy_governor(cpu, in do_one_cpu()
188 return do_new_policy(cpu, new_pol); in do_one_cpu()
200 unsigned int cpu; in cmd_freq_set() local
[all …]
/linux-4.1.27/arch/x86/kernel/cpu/
Dperf_event_amd_uncore.c35 int cpu; member
67 return *per_cpu_ptr(amd_uncore_nb, event->cpu); in event_to_amd_uncore()
69 return *per_cpu_ptr(amd_uncore_l2, event->cpu); in event_to_amd_uncore()
202 if (event->cpu < 0) in amd_uncore_event_init()
213 event->cpu = uncore->cpu; in amd_uncore_event_init()
287 static struct amd_uncore *amd_uncore_alloc(unsigned int cpu) in amd_uncore_alloc() argument
290 cpu_to_node(cpu)); in amd_uncore_alloc()
293 static int amd_uncore_cpu_up_prepare(unsigned int cpu) in amd_uncore_cpu_up_prepare() argument
298 uncore_nb = amd_uncore_alloc(cpu); in amd_uncore_cpu_up_prepare()
301 uncore_nb->cpu = cpu; in amd_uncore_cpu_up_prepare()
[all …]
/linux-4.1.27/arch/s390/kernel/
Dsmp.c62 static DEFINE_PER_CPU(struct cpu *, cpu_device);
157 int cpu; in pcpu_find_address() local
159 for_each_cpu(cpu, mask) in pcpu_find_address()
160 if (pcpu_devices[cpu].address == address) in pcpu_find_address()
161 return pcpu_devices + cpu; in pcpu_find_address()
178 static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) in pcpu_alloc_lowcore() argument
199 lc->cpu_nr = cpu; in pcpu_alloc_lowcore()
200 lc->spinlock_lockval = arch_spin_lockval(cpu); in pcpu_alloc_lowcore()
206 lowcore_ptr[cpu] = lc; in pcpu_alloc_lowcore()
234 static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) in pcpu_prepare_secondary() argument
[all …]
Dtopology.c48 static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) in cpu_group_map() argument
52 cpumask_copy(&mask, cpumask_of(cpu)); in cpu_group_map()
56 if (cpumask_test_cpu(cpu, &info->mask)) in cpu_group_map()
62 static cpumask_t cpu_thread_map(unsigned int cpu) in cpu_thread_map() argument
67 cpumask_copy(&mask, cpumask_of(cpu)); in cpu_thread_map()
70 cpu -= cpu % (smp_cpu_mtid + 1); in cpu_thread_map()
72 if (cpu_present(cpu + i)) in cpu_thread_map()
73 cpumask_set_cpu(cpu + i, &mask); in cpu_thread_map()
152 add_cpus_to_mask(&tle->cpu, book, socket, 0); in __tl_to_masks_generic()
177 socket = add_cpus_to_mask(&tle->cpu, book, socket, 1); in __tl_to_masks_z10()
[all …]
/linux-4.1.27/arch/sh/kernel/
Dsmp.c48 static inline void smp_store_cpu_info(unsigned int cpu) in smp_store_cpu_info() argument
50 struct sh_cpuinfo *c = cpu_data + cpu; in smp_store_cpu_info()
59 unsigned int cpu = smp_processor_id(); in smp_prepare_cpus() local
62 current_thread_info()->cpu = cpu; in smp_prepare_cpus()
72 unsigned int cpu = smp_processor_id(); in smp_prepare_boot_cpu() local
74 __cpu_number_map[0] = cpu; in smp_prepare_boot_cpu()
75 __cpu_logical_map[0] = cpu; in smp_prepare_boot_cpu()
77 set_cpu_online(cpu, true); in smp_prepare_boot_cpu()
78 set_cpu_possible(cpu, true); in smp_prepare_boot_cpu()
80 per_cpu(cpu_state, cpu) = CPU_ONLINE; in smp_prepare_boot_cpu()
[all …]
Dirq.c118 void irq_ctx_init(int cpu) in irq_ctx_init() argument
122 if (hardirq_ctx[cpu]) in irq_ctx_init()
125 irqctx = (union irq_ctx *)&hardirq_stack[cpu * THREAD_SIZE]; in irq_ctx_init()
127 irqctx->tinfo.cpu = cpu; in irq_ctx_init()
131 hardirq_ctx[cpu] = irqctx; in irq_ctx_init()
133 irqctx = (union irq_ctx *)&softirq_stack[cpu * THREAD_SIZE]; in irq_ctx_init()
135 irqctx->tinfo.cpu = cpu; in irq_ctx_init()
139 softirq_ctx[cpu] = irqctx; in irq_ctx_init()
142 cpu, hardirq_ctx[cpu], softirq_ctx[cpu]); in irq_ctx_init()
145 void irq_ctx_exit(int cpu) in irq_ctx_exit() argument
[all …]
/linux-4.1.27/arch/tile/kernel/
Dsmpboot.c41 int cpu = smp_processor_id(); in smp_prepare_boot_cpu() local
42 set_cpu_online(cpu, 1); in smp_prepare_boot_cpu()
43 set_cpu_present(cpu, 1); in smp_prepare_boot_cpu()
59 int cpu, cpu_count; in smp_prepare_cpus() local
62 current_thread_info()->cpu = boot_cpu; in smp_prepare_cpus()
85 for (cpu = 0; cpu < NR_CPUS; ++cpu) { in smp_prepare_cpus()
88 if (cpu == boot_cpu) in smp_prepare_cpus()
91 if (!cpu_possible(cpu)) { in smp_prepare_cpus()
97 per_cpu(boot_sp, cpu) = 0; in smp_prepare_cpus()
98 per_cpu(boot_pc, cpu) = (unsigned long) smp_nap; in smp_prepare_cpus()
[all …]
Dsmp.c63 void send_IPI_single(int cpu, int tag) in send_IPI_single() argument
66 .y = cpu / smp_width, in send_IPI_single()
67 .x = cpu % smp_width, in send_IPI_single()
76 int cpu; in send_IPI_many() local
79 for_each_cpu(cpu, mask) { in send_IPI_many()
81 BUG_ON(cpu == my_cpu); in send_IPI_many()
83 r->y = cpu / smp_width; in send_IPI_many()
84 r->x = cpu % smp_width; in send_IPI_many()
222 int cpu = smp_processor_id(); in ipi_init() local
223 HV_Recipient recip = { .y = cpu_y(cpu), .x = cpu_x(cpu), in ipi_init()
[all …]
/linux-4.1.27/tools/power/cpupower/utils/helpers/
Dtopology.c23 static int sysfs_topology_read_file(unsigned int cpu, const char *fname, int *result) in sysfs_topology_read_file() argument
30 cpu, fname); in sysfs_topology_read_file()
51 else if (top1->cpu < top2->cpu) in __compare()
53 else if (top1->cpu > top2->cpu) in __compare()
67 int cpu, last_pkg, cpus = sysconf(_SC_NPROCESSORS_CONF); in get_cpu_topology() local
73 for (cpu = 0; cpu < cpus; cpu++) { in get_cpu_topology()
74 cpu_top->core_info[cpu].cpu = cpu; in get_cpu_topology()
75 cpu_top->core_info[cpu].is_online = sysfs_is_cpu_online(cpu); in get_cpu_topology()
77 cpu, in get_cpu_topology()
79 &(cpu_top->core_info[cpu].pkg)) < 0) in get_cpu_topology()
[all …]
Dsysfs.c48 int sysfs_is_cpu_online(unsigned int cpu) in sysfs_is_cpu_online() argument
58 snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u", cpu); in sysfs_is_cpu_online()
67 snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/online", cpu); in sysfs_is_cpu_online()
104 unsigned int sysfs_idlestate_file_exists(unsigned int cpu, in sysfs_idlestate_file_exists() argument
113 cpu, idlestate, fname); in sysfs_idlestate_file_exists()
125 unsigned int sysfs_idlestate_read_file(unsigned int cpu, unsigned int idlestate, in sysfs_idlestate_read_file() argument
133 cpu, idlestate, fname); in sysfs_idlestate_read_file()
158 unsigned int sysfs_idlestate_write_file(unsigned int cpu, in sysfs_idlestate_write_file() argument
168 cpu, idlestate, fname); in sysfs_idlestate_write_file()
204 static unsigned long long sysfs_idlestate_get_one_value(unsigned int cpu, in sysfs_idlestate_get_one_value() argument
[all …]
Dhelpers.h91 extern int get_cpu_info(unsigned int cpu, struct cpupower_cpu_info *cpu_info);
98 int cpu; member
126 extern int read_msr(int cpu, unsigned int idx, unsigned long long *val);
127 extern int write_msr(int cpu, unsigned int idx, unsigned long long val);
129 extern int msr_intel_set_perf_bias(unsigned int cpu, unsigned int val);
130 extern int msr_intel_get_perf_bias(unsigned int cpu);
131 extern unsigned long long msr_intel_get_turbo_ratio(unsigned int cpu);
147 extern int decode_pstates(unsigned int cpu, unsigned int cpu_family,
152 extern int cpufreq_has_boost_support(unsigned int cpu, int *support,
165 static inline int decode_pstates(unsigned int cpu, unsigned int cpu_family, in decode_pstates() argument
[all …]
Dsysfs.h10 extern unsigned int sysfs_idlestate_file_exists(unsigned int cpu,
14 extern int sysfs_is_cpu_online(unsigned int cpu);
16 extern int sysfs_is_idlestate_disabled(unsigned int cpu,
18 extern int sysfs_idlestate_disable(unsigned int cpu, unsigned int idlestate,
20 extern unsigned long sysfs_get_idlestate_latency(unsigned int cpu,
22 extern unsigned long sysfs_get_idlestate_usage(unsigned int cpu,
24 extern unsigned long long sysfs_get_idlestate_time(unsigned int cpu,
26 extern char *sysfs_get_idlestate_name(unsigned int cpu,
28 extern char *sysfs_get_idlestate_desc(unsigned int cpu,
30 extern unsigned int sysfs_get_idlestate_count(unsigned int cpu);
/linux-4.1.27/arch/powerpc/oprofile/
Dop_model_cell.c94 u16 cpu; /* Processor to modify */ member
219 pm_signal_local.cpu = node; in pm_rtas_reset_signals()
258 pm_signal_local[i].cpu = node; in pm_rtas_activate_signals()
373 static void write_pm_cntrl(int cpu) in write_pm_cntrl() argument
402 cbe_write_pm(cpu, pm_control, val); in write_pm_cntrl()
428 static inline void enable_ctr(u32 cpu, u32 ctr, u32 *pm07_cntrl) in enable_ctr() argument
432 cbe_write_pm07_control(cpu, ctr, pm07_cntrl[ctr]); in enable_ctr()
457 u32 cpu; in cell_virtual_cntr() local
492 for_each_online_cpu(cpu) { in cell_virtual_cntr()
493 if (cbe_get_hw_thread_id(cpu)) in cell_virtual_cntr()
[all …]
/linux-4.1.27/arch/blackfin/mach-common/
Dsmp.c81 static void ipi_cpu_stop(unsigned int cpu) in ipi_cpu_stop() argument
84 printk(KERN_CRIT "CPU%u: stopping\n", cpu); in ipi_cpu_stop()
88 set_cpu_online(cpu, false); in ipi_cpu_stop()
125 unsigned int cpu = smp_processor_id(); in ipi_handler_int0() local
127 platform_clear_ipi(cpu, IRQ_SUPPLE_0); in ipi_handler_int0()
134 int cpu = smp_processor_id(); in ipi_timer() local
135 struct clock_event_device *evt = &per_cpu(coretmr_events, cpu); in ipi_timer()
142 unsigned int cpu = smp_processor_id(); in ipi_handler_int1() local
146 platform_clear_ipi(cpu, IRQ_SUPPLE_1); in ipi_handler_int1()
165 ipi_cpu_stop(cpu); in ipi_handler_int1()
[all …]
/linux-4.1.27/arch/powerpc/platforms/pseries/
Dhotplug-cpu.c64 enum cpu_state_vals get_cpu_current_state(int cpu) in get_cpu_current_state() argument
66 return per_cpu(current_state, cpu); in get_cpu_current_state()
69 void set_cpu_current_state(int cpu, enum cpu_state_vals state) in set_cpu_current_state() argument
71 per_cpu(current_state, cpu) = state; in set_cpu_current_state()
74 enum cpu_state_vals get_preferred_offline_state(int cpu) in get_preferred_offline_state() argument
76 return per_cpu(preferred_offline_state, cpu); in get_preferred_offline_state()
79 void set_preferred_offline_state(int cpu, enum cpu_state_vals state) in set_preferred_offline_state() argument
81 per_cpu(preferred_offline_state, cpu) = state; in set_preferred_offline_state()
84 void set_default_offline_state(int cpu) in set_default_offline_state() argument
86 per_cpu(preferred_offline_state, cpu) = default_offline_state; in set_default_offline_state()
[all …]
Doffline_states.h13 extern enum cpu_state_vals get_cpu_current_state(int cpu);
14 extern void set_cpu_current_state(int cpu, enum cpu_state_vals state);
15 extern void set_preferred_offline_state(int cpu, enum cpu_state_vals state);
16 extern void set_default_offline_state(int cpu);
18 static inline enum cpu_state_vals get_cpu_current_state(int cpu) in get_cpu_current_state() argument
23 static inline void set_cpu_current_state(int cpu, enum cpu_state_vals state) in set_cpu_current_state() argument
27 static inline void set_preferred_offline_state(int cpu, enum cpu_state_vals state) in set_preferred_offline_state() argument
31 static inline void set_default_offline_state(int cpu) in set_default_offline_state() argument
36 extern enum cpu_state_vals get_preferred_offline_state(int cpu);
/linux-4.1.27/arch/arm/mach-hisi/
Dhotplug.c79 static void set_cpu_hi3620(int cpu, bool enable) in set_cpu_hi3620() argument
85 if ((cpu == 2) || (cpu == 3)) in set_cpu_hi3620()
86 writel_relaxed(CPU2_ISO_CTRL << (cpu - 2), in set_cpu_hi3620()
91 writel_relaxed(0x01 << cpu, ctrl_base + SCCPUCOREEN); in set_cpu_hi3620()
96 writel_relaxed(val << cpu, ctrl_base + SCCPURSTDIS); in set_cpu_hi3620()
99 writel_relaxed(val << cpu, ctrl_base + SCCPURSTEN); in set_cpu_hi3620()
102 if ((cpu == 2) || (cpu == 3)) in set_cpu_hi3620()
103 writel_relaxed(CPU2_ISO_CTRL << (cpu - 2), in set_cpu_hi3620()
109 val &= ~(CPU0_WFI_MASK_CFG << cpu); in set_cpu_hi3620()
115 writel_relaxed(val << cpu, ctrl_base + SCCPURSTDIS); in set_cpu_hi3620()
[all …]
Dplatmcpm.c97 static int hip04_mcpm_power_up(unsigned int cpu, unsigned int cluster) in hip04_mcpm_power_up() argument
104 if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER) in hip04_mcpm_power_up()
109 if (hip04_cpu_table[cluster][cpu]) in hip04_mcpm_power_up()
123 data = CORE_RESET_BIT(cpu) | NEON_RESET_BIT(cpu) | \ in hip04_mcpm_power_up()
124 CORE_DEBUG_RESET_BIT(cpu); in hip04_mcpm_power_up()
135 hip04_cpu_table[cluster][cpu]++; in hip04_mcpm_power_up()
143 unsigned int mpidr, cpu, cluster; in hip04_mcpm_power_down() local
147 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); in hip04_mcpm_power_down()
150 __mcpm_cpu_going_down(cpu, cluster); in hip04_mcpm_power_down()
154 hip04_cpu_table[cluster][cpu]--; in hip04_mcpm_power_down()
[all …]
Dplatsmp.c26 void hi3xxx_set_cpu_jump(int cpu, void *jump_addr) in hi3xxx_set_cpu_jump() argument
28 cpu = cpu_logical_map(cpu); in hi3xxx_set_cpu_jump()
29 if (!cpu || !ctrl_base) in hi3xxx_set_cpu_jump()
31 writel_relaxed(virt_to_phys(jump_addr), ctrl_base + ((cpu - 1) << 2)); in hi3xxx_set_cpu_jump()
34 int hi3xxx_get_cpu_jump(int cpu) in hi3xxx_get_cpu_jump() argument
36 cpu = cpu_logical_map(cpu); in hi3xxx_get_cpu_jump()
37 if (!cpu || !ctrl_base) in hi3xxx_get_cpu_jump()
39 return readl_relaxed(ctrl_base + ((cpu - 1) << 2)); in hi3xxx_get_cpu_jump()
84 static int hi3xxx_boot_secondary(unsigned int cpu, struct task_struct *idle) in hi3xxx_boot_secondary() argument
86 hi3xxx_set_cpu(cpu, true); in hi3xxx_boot_secondary()
[all …]
Dcore.h6 extern void hi3xxx_set_cpu_jump(int cpu, void *jump_addr);
7 extern int hi3xxx_get_cpu_jump(int cpu);
11 extern void hi3xxx_cpu_die(unsigned int cpu);
12 extern int hi3xxx_cpu_kill(unsigned int cpu);
13 extern void hi3xxx_set_cpu(int cpu, bool enable);
16 extern void hix5hd2_set_cpu(int cpu, bool enable);
17 extern void hix5hd2_cpu_die(unsigned int cpu);
20 extern void hip01_set_cpu(int cpu, bool enable);
21 extern void hip01_cpu_die(unsigned int cpu);
/linux-4.1.27/arch/arm/mach-vexpress/
Dtc2_pm.c36 #define RESET_A15_NCORERESET(cpu) (1 << (2 + (cpu))) argument
37 #define RESET_A7_NCORERESET(cpu) (1 << (16 + (cpu))) argument
51 static int tc2_pm_cpu_powerup(unsigned int cpu, unsigned int cluster) in tc2_pm_cpu_powerup() argument
53 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); in tc2_pm_cpu_powerup()
54 if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) in tc2_pm_cpu_powerup()
56 ve_spc_set_resume_addr(cluster, cpu, in tc2_pm_cpu_powerup()
58 ve_spc_cpu_wakeup_irq(cluster, cpu, true); in tc2_pm_cpu_powerup()
71 static void tc2_pm_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster) in tc2_pm_cpu_powerdown_prepare() argument
73 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); in tc2_pm_cpu_powerdown_prepare()
74 BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER); in tc2_pm_cpu_powerdown_prepare()
[all …]
/linux-4.1.27/arch/arm/mach-imx/
Dclk-cpu.c32 struct clk_cpu *cpu = to_clk_cpu(hw); in clk_cpu_recalc_rate() local
34 return clk_get_rate(cpu->div); in clk_cpu_recalc_rate()
40 struct clk_cpu *cpu = to_clk_cpu(hw); in clk_cpu_round_rate() local
42 return clk_round_rate(cpu->pll, rate); in clk_cpu_round_rate()
48 struct clk_cpu *cpu = to_clk_cpu(hw); in clk_cpu_set_rate() local
52 ret = clk_set_parent(cpu->mux, cpu->step); in clk_cpu_set_rate()
57 ret = clk_set_rate(cpu->pll, rate); in clk_cpu_set_rate()
59 clk_set_parent(cpu->mux, cpu->pll); in clk_cpu_set_rate()
63 clk_set_parent(cpu->mux, cpu->pll); in clk_cpu_set_rate()
66 clk_set_rate(cpu->div, rate); in clk_cpu_set_rate()
[all …]
Dsrc.c85 void imx_enable_cpu(int cpu, bool enable) in imx_enable_cpu() argument
89 cpu = cpu_logical_map(cpu); in imx_enable_cpu()
90 mask = 1 << (BP_SRC_SCR_CORE1_ENABLE + cpu - 1); in imx_enable_cpu()
94 val |= 1 << (BP_SRC_SCR_CORE1_RST + cpu - 1); in imx_enable_cpu()
99 void imx_set_cpu_jump(int cpu, void *jump_addr) in imx_set_cpu_jump() argument
101 cpu = cpu_logical_map(cpu); in imx_set_cpu_jump()
103 src_base + SRC_GPR1 + cpu * 8); in imx_set_cpu_jump()
106 u32 imx_get_cpu_arg(int cpu) in imx_get_cpu_arg() argument
108 cpu = cpu_logical_map(cpu); in imx_get_cpu_arg()
109 return readl_relaxed(src_base + SRC_GPR1 + cpu * 8 + 4); in imx_get_cpu_arg()
[all …]
/linux-4.1.27/arch/sh/include/asm/
Dmmu_context.h35 #define asid_cache(cpu) (cpu_data[cpu].asid_cache) argument
38 #define cpu_context(cpu, mm) ((mm)->context.id[cpu]) argument
40 #define cpu_asid(cpu, mm) \ argument
41 (cpu_context((cpu), (mm)) & MMU_CONTEXT_ASID_MASK)
57 static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu) in get_mmu_context() argument
59 unsigned long asid = asid_cache(cpu); in get_mmu_context()
62 if (((cpu_context(cpu, mm) ^ asid) & MMU_CONTEXT_VERSION_MASK) == 0) in get_mmu_context()
90 cpu_context(cpu, mm) = asid_cache(cpu) = asid; in get_mmu_context()
112 static inline void activate_context(struct mm_struct *mm, unsigned int cpu) in activate_context() argument
114 get_mmu_context(mm, cpu); in activate_context()
[all …]
Dsmp.h15 #define raw_smp_processor_id() (current_thread_info()->cpu)
19 #define cpu_number_map(cpu) __cpu_number_map[cpu] argument
23 #define cpu_logical_map(cpu) __cpu_logical_map[cpu] argument
40 void local_timer_setup(unsigned int cpu);
41 void local_timer_stop(unsigned int cpu);
43 void arch_send_call_function_single_ipi(int cpu);
47 void native_cpu_die(unsigned int cpu);
48 int native_cpu_disable(unsigned int cpu);
54 static inline void __cpu_die(unsigned int cpu) in __cpu_die() argument
58 mp_ops->cpu_die(cpu); in __cpu_die()
/linux-4.1.27/tools/power/cpupower/utils/idle_monitor/
Dmperf_monitor.c30 unsigned int cpu);
32 unsigned int cpu);
89 static int mperf_init_stats(unsigned int cpu) in mperf_init_stats() argument
94 ret = read_msr(cpu, MSR_APERF, &val); in mperf_init_stats()
95 aperf_previous_count[cpu] = val; in mperf_init_stats()
96 ret |= read_msr(cpu, MSR_MPERF, &val); in mperf_init_stats()
97 mperf_previous_count[cpu] = val; in mperf_init_stats()
98 is_valid[cpu] = !ret; in mperf_init_stats()
103 static int mperf_measure_stats(unsigned int cpu) in mperf_measure_stats() argument
108 ret = read_msr(cpu, MSR_APERF, &val); in mperf_measure_stats()
[all …]
Dsnb_idle.c28 unsigned int cpu);
62 unsigned int cpu) in snb_get_count() argument
82 if (read_msr(cpu, msr, val)) in snb_get_count()
88 unsigned int cpu) in snb_get_count_percent() argument
92 if (!is_valid[cpu]) in snb_get_count_percent()
96 (current_count[id][cpu] - previous_count[id][cpu])) / in snb_get_count_percent()
100 snb_cstates[id].name, previous_count[id][cpu], in snb_get_count_percent()
101 current_count[id][cpu], cpu); in snb_get_count_percent()
106 current_count[id][cpu] - previous_count[id][cpu], in snb_get_count_percent()
107 *percent, cpu); in snb_get_count_percent()
[all …]
Dhsw_ext_idle.c30 unsigned int cpu);
65 unsigned int cpu) in hsw_ext_get_count() argument
85 if (read_msr(cpu, msr, val)) in hsw_ext_get_count()
91 unsigned int cpu) in hsw_ext_get_count_percent() argument
95 if (!is_valid[cpu]) in hsw_ext_get_count_percent()
99 (current_count[id][cpu] - previous_count[id][cpu])) / in hsw_ext_get_count_percent()
103 hsw_ext_cstates[id].name, previous_count[id][cpu], in hsw_ext_get_count_percent()
104 current_count[id][cpu], cpu); in hsw_ext_get_count_percent()
109 current_count[id][cpu] - previous_count[id][cpu], in hsw_ext_get_count_percent()
110 *percent, cpu); in hsw_ext_get_count_percent()
[all …]
Dnhm_idle.c31 unsigned int cpu);
73 unsigned int cpu) in nhm_get_count() argument
96 if (read_msr(cpu, msr, val)) in nhm_get_count()
103 unsigned int cpu) in nhm_get_count_percent() argument
107 if (!is_valid[cpu]) in nhm_get_count_percent()
111 (current_count[id][cpu] - previous_count[id][cpu])) / in nhm_get_count_percent()
115 nhm_cstates[id].name, previous_count[id][cpu], in nhm_get_count_percent()
116 current_count[id][cpu], cpu); in nhm_get_count_percent()
121 current_count[id][cpu] - previous_count[id][cpu], in nhm_get_count_percent()
122 *percent, cpu); in nhm_get_count_percent()
[all …]
Dcpuidle_sysfs.c28 unsigned int cpu) in cpuidle_get_count_percent() argument
30 unsigned long long statediff = current_count[cpu][id] in cpuidle_get_count_percent()
31 - previous_count[cpu][id]; in cpuidle_get_count_percent()
33 cpuidle_cstates[id].name, timediff, *percent, cpu); in cpuidle_get_count_percent()
41 cpuidle_cstates[id].name, timediff, statediff, *percent, cpu); in cpuidle_get_count_percent()
48 int cpu, state; in cpuidle_start() local
50 for (cpu = 0; cpu < cpu_count; cpu++) { in cpuidle_start()
53 previous_count[cpu][state] = in cpuidle_start()
54 sysfs_get_idlestate_time(cpu, state); in cpuidle_start()
56 cpu, state, previous_count[cpu][state]); in cpuidle_start()
[all …]
Damd_fam14h_idle.c47 unsigned int cpu);
49 unsigned int cpu);
100 unsigned int cpu) in amd_fam14h_get_pci_info() argument
125 static int amd_fam14h_init(cstate_t *state, unsigned int cpu) in amd_fam14h_init() argument
130 ret = amd_fam14h_get_pci_info(state, &pci_offset, &enable_bit, cpu); in amd_fam14h_init()
151 (unsigned int) val, cpu); in amd_fam14h_init()
155 previous_count[state->id][cpu] = 0; in amd_fam14h_init()
160 static int amd_fam14h_disable(cstate_t *state, unsigned int cpu) in amd_fam14h_disable() argument
165 ret = amd_fam14h_get_pci_info(state, &pci_offset, &enable_bit, cpu); in amd_fam14h_disable()
182 current_count[state->id][cpu] = val; in amd_fam14h_disable()
[all …]
/linux-4.1.27/arch/mips/sibyte/bcm1480/
Dirq.c55 void bcm1480_mask_irq(int cpu, int irq) in bcm1480_mask_irq() argument
66 …cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_sp… in bcm1480_mask_irq()
68 …____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_sp… in bcm1480_mask_irq()
72 void bcm1480_unmask_irq(int cpu, int irq) in bcm1480_unmask_irq() argument
83 …cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_sp… in bcm1480_unmask_irq()
85 …____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_sp… in bcm1480_unmask_irq()
94 int i = 0, old_cpu, cpu, int_on, k; in bcm1480_set_affinity() local
101 cpu = cpu_logical_map(i); in bcm1480_set_affinity()
121 bcm1480_irq_owner[irq] = cpu; in bcm1480_set_affinity()
124 …cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BC… in bcm1480_set_affinity()
[all …]
/linux-4.1.27/arch/x86/include/asm/
Dsmp.h40 static inline struct cpumask *cpu_sibling_mask(int cpu) in cpu_sibling_mask() argument
42 return per_cpu(cpu_sibling_map, cpu); in cpu_sibling_mask()
45 static inline struct cpumask *cpu_core_mask(int cpu) in cpu_core_mask() argument
47 return per_cpu(cpu_core_map, cpu); in cpu_core_mask()
50 static inline struct cpumask *cpu_llc_shared_mask(int cpu) in cpu_llc_shared_mask() argument
52 return per_cpu(cpu_llc_shared_map, cpu); in cpu_llc_shared_mask()
72 void (*smp_send_reschedule)(int cpu);
74 int (*cpu_up)(unsigned cpu, struct task_struct *tidle);
76 void (*cpu_die)(unsigned int cpu);
80 void (*send_call_func_single_ipi)(int cpu);
[all …]
Dtopology.h57 extern int __cpu_to_node(int cpu);
60 extern int early_cpu_to_node(int cpu);
65 static inline int early_cpu_to_node(int cpu) in early_cpu_to_node() argument
67 return early_per_cpu(x86_cpu_to_node_map, cpu); in early_cpu_to_node()
109 static inline int early_cpu_to_node(int cpu) in early_cpu_to_node() argument
120 extern const struct cpumask *cpu_coregroup_mask(int cpu);
122 #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) argument
123 #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) argument
126 #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) argument
127 #define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) argument
Dnuma.h41 extern int numa_cpu_node(int cpu);
48 static inline int numa_cpu_node(int cpu) in numa_cpu_node() argument
59 extern void numa_set_node(int cpu, int node);
60 extern void numa_clear_node(int cpu);
62 extern void numa_add_cpu(int cpu);
63 extern void numa_remove_cpu(int cpu);
65 static inline void numa_set_node(int cpu, int node) { } in numa_set_node() argument
66 static inline void numa_clear_node(int cpu) { } in numa_clear_node() argument
68 static inline void numa_add_cpu(int cpu) { } in numa_add_cpu() argument
69 static inline void numa_remove_cpu(int cpu) { } in numa_remove_cpu() argument
[all …]
/linux-4.1.27/arch/s390/include/asm/
Dtopology.h7 struct cpu;
23 #define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id) argument
24 #define topology_thread_id(cpu) (per_cpu(cpu_topology, cpu).thread_id) argument
25 #define topology_thread_cpumask(cpu) (&per_cpu(cpu_topology, cpu).thread_mask) argument
26 #define topology_core_id(cpu) (per_cpu(cpu_topology, cpu).core_id) argument
27 #define topology_core_cpumask(cpu) (&per_cpu(cpu_topology, cpu).core_mask) argument
28 #define topology_book_id(cpu) (per_cpu(cpu_topology, cpu).book_id) argument
29 #define topology_book_cpumask(cpu) (&per_cpu(cpu_topology, cpu).book_mask) argument
33 int topology_cpu_init(struct cpu *);
38 const struct cpumask *cpu_coregroup_mask(int cpu);
[all …]
Dsmp.h22 extern int __cpu_up(unsigned int cpu, struct task_struct *tidle);
24 extern void arch_send_call_function_single_ipi(int cpu);
31 extern int smp_store_status(int cpu);
32 extern int smp_vcpu_scheduled(int cpu);
33 extern void smp_yield_cpu(int cpu);
34 extern void smp_cpu_set_polarization(int cpu, int val);
35 extern int smp_cpu_get_polarization(int cpu);
53 static inline int smp_store_status(int cpu) { return 0; } in smp_store_status() argument
54 static inline int smp_vcpu_scheduled(int cpu) { return 1; } in smp_vcpu_scheduled() argument
55 static inline void smp_yield_cpu(int cpu) { } in smp_yield_cpu() argument
[all …]
/linux-4.1.27/arch/blackfin/kernel/cplb-mpu/
Dcplbinit.c22 void __init generate_cplb_tables_cpu(unsigned int cpu) in generate_cplb_tables_cpu() argument
45 dcplb_tbl[cpu][i_d].addr = 0; in generate_cplb_tables_cpu()
46 dcplb_tbl[cpu][i_d++].data = SDRAM_OOPS | PAGE_SIZE_1KB; in generate_cplb_tables_cpu()
48 icplb_tbl[cpu][i_i].addr = 0; in generate_cplb_tables_cpu()
49 icplb_tbl[cpu][i_i++].data = CPLB_VALID | i_cache | CPLB_USER_RD | PAGE_SIZE_1KB; in generate_cplb_tables_cpu()
57 dcplb_tbl[cpu][i_d].addr = addr; in generate_cplb_tables_cpu()
58 dcplb_tbl[cpu][i_d++].data = d_data; in generate_cplb_tables_cpu()
59 icplb_tbl[cpu][i_i].addr = addr; in generate_cplb_tables_cpu()
60 icplb_tbl[cpu][i_i++].data = i_data | (addr == 0 ? CPLB_USER_RD : 0); in generate_cplb_tables_cpu()
66 dcplb_tbl[cpu][i_d].addr = addr; in generate_cplb_tables_cpu()
[all …]
Dcplbmgr.c68 MGR_ATTR static int evict_one_icplb(unsigned int cpu) in evict_one_icplb() argument
72 if ((icplb_tbl[cpu][i].data & CPLB_VALID) == 0) in evict_one_icplb()
74 i = first_switched_icplb + icplb_rr_index[cpu]; in evict_one_icplb()
77 icplb_rr_index[cpu] -= MAX_CPLBS - first_switched_icplb; in evict_one_icplb()
79 icplb_rr_index[cpu]++; in evict_one_icplb()
83 MGR_ATTR static int evict_one_dcplb(unsigned int cpu) in evict_one_dcplb() argument
87 if ((dcplb_tbl[cpu][i].data & CPLB_VALID) == 0) in evict_one_dcplb()
89 i = first_switched_dcplb + dcplb_rr_index[cpu]; in evict_one_dcplb()
92 dcplb_rr_index[cpu] -= MAX_CPLBS - first_switched_dcplb; in evict_one_dcplb()
94 dcplb_rr_index[cpu]++; in evict_one_dcplb()
[all …]
/linux-4.1.27/arch/mips/include/asm/
Dmmu_context.h85 #define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) argument
86 #define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK) argument
87 #define asid_cache(cpu) (cpu_data[cpu].asid_cache) argument
102 get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) in get_new_mmu_context() argument
105 unsigned long asid = asid_cache(cpu); in get_new_mmu_context()
119 cpu_context(cpu, mm) = asid_cache(cpu) = asid; in get_new_mmu_context()
142 unsigned int cpu = smp_processor_id(); in switch_mm() local
148 if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) in switch_mm()
149 get_new_mmu_context(next, cpu); in switch_mm()
150 write_c0_entryhi(cpu_asid(cpu, next)); in switch_mm()
[all …]
Dsmp.h28 #define raw_smp_processor_id() (current_thread_info()->cpu)
33 #define cpu_number_map(cpu) __cpu_number_map[cpu] argument
37 #define cpu_logical_map(cpu) __cpu_logical_map[cpu] argument
61 static inline void smp_send_reschedule(int cpu) in smp_send_reschedule() argument
65 mp_ops->send_ipi_single(cpu, SMP_RESCHEDULE_YOURSELF); in smp_send_reschedule()
76 static inline void __cpu_die(unsigned int cpu) in __cpu_die() argument
80 mp_ops->cpu_die(cpu); in __cpu_die()
88 static inline void arch_send_call_function_single_ipi(int cpu) in arch_send_call_function_single_ipi() argument
92 mp_ops->send_ipi_mask(cpumask_of(cpu), SMP_CALL_FUNCTION); in arch_send_call_function_single_ipi()
Dtopology.h15 #define topology_physical_package_id(cpu) (cpu_data[cpu].package) argument
16 #define topology_core_id(cpu) (cpu_data[cpu].core) argument
17 #define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) argument
18 #define topology_thread_cpumask(cpu) (&cpu_sibling_map[cpu]) argument
/linux-4.1.27/arch/mips/sgi-ip27/
Dip27-irq-pci.c64 static inline int alloc_level(int cpu, int irq) in alloc_level() argument
66 struct hub_data *hub = hub_data(cpu_to_node(cpu)); in alloc_level()
67 struct slice_data *si = cpu_data[cpu].data; in alloc_level()
72 panic("Cpu %d flooded with devices", cpu); in alloc_level()
82 int cpu, i; in find_level() local
84 for_each_online_cpu(cpu) { in find_level()
85 struct slice_data *si = cpu_data[cpu].data; in find_level()
89 *cpunum = cpu; in find_level()
98 static int intr_connect_level(int cpu, int bit) in intr_connect_level() argument
100 nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); in intr_connect_level()
[all …]
/linux-4.1.27/drivers/cpufreq/
Dintel_pstate.c101 int cpu; member
229 static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu) in intel_pstate_busy_pid_reset() argument
231 pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct); in intel_pstate_busy_pid_reset()
232 pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct); in intel_pstate_busy_pid_reset()
233 pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct); in intel_pstate_busy_pid_reset()
235 pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0); in intel_pstate_busy_pid_reset()
240 unsigned int cpu; in intel_pstate_reset_all_pid() local
242 for_each_online_cpu(cpu) { in intel_pstate_reset_all_pid()
243 if (all_cpu_data[cpu]) in intel_pstate_reset_all_pid()
244 intel_pstate_busy_pid_reset(all_cpu_data[cpu]); in intel_pstate_reset_all_pid()
[all …]
Dspeedstep-centrino.c234 struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu); in centrino_cpu_init_table() local
238 if (centrino_verify_cpu_id(cpu, model->cpu_id) && in centrino_cpu_init_table()
240 strcmp(cpu->x86_model_id, model->model_name) == 0)) in centrino_cpu_init_table()
247 cpu->x86_model_id); in centrino_cpu_init_table()
254 cpu->x86_model_id); in centrino_cpu_init_table()
259 per_cpu(centrino_model, policy->cpu) = model; in centrino_cpu_init_table()
285 static unsigned extract_clock(unsigned msr, unsigned int cpu, int failsafe) in extract_clock() argument
294 if ((per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_BANIAS]) || in extract_clock()
295 (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_A1]) || in extract_clock()
296 (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_B0])) { in extract_clock()
[all …]
Dppc_cbe_cpufreq.c50 static int set_pmode(unsigned int cpu, unsigned int slow_mode) in set_pmode() argument
55 rc = cbe_cpufreq_set_pmode_pmi(cpu, slow_mode); in set_pmode()
57 rc = cbe_cpufreq_set_pmode(cpu, slow_mode); in set_pmode()
59 pr_debug("register contains slow mode %d\n", cbe_cpufreq_get_pmode(cpu)); in set_pmode()
74 struct device_node *cpu; in cbe_cpufreq_cpu_init() local
76 cpu = of_get_cpu_node(policy->cpu, NULL); in cbe_cpufreq_cpu_init()
78 if (!cpu) in cbe_cpufreq_cpu_init()
81 pr_debug("init cpufreq on CPU %d\n", policy->cpu); in cbe_cpufreq_cpu_init()
86 if (!cbe_get_cpu_pmd_regs(policy->cpu) || in cbe_cpufreq_cpu_init()
87 !cbe_get_cpu_mic_tm_regs(policy->cpu)) { in cbe_cpufreq_cpu_init()
[all …]
Darm_big_little.c68 static inline int raw_cpu_to_cluster(int cpu) in raw_cpu_to_cluster() argument
70 return topology_physical_package_id(cpu); in raw_cpu_to_cluster()
73 static inline int cpu_to_cluster(int cpu) in cpu_to_cluster() argument
76 MAX_CLUSTERS : raw_cpu_to_cluster(cpu); in cpu_to_cluster()
98 static unsigned int clk_get_cpu_rate(unsigned int cpu) in clk_get_cpu_rate() argument
100 u32 cur_cluster = per_cpu(physical_cluster, cpu); in clk_get_cpu_rate()
107 pr_debug("%s: cpu: %d, cluster: %d, freq: %u\n", __func__, cpu, in clk_get_cpu_rate()
113 static unsigned int bL_cpufreq_get_rate(unsigned int cpu) in bL_cpufreq_get_rate() argument
117 cpu)); in bL_cpufreq_get_rate()
119 return per_cpu(cpu_last_req_freq, cpu); in bL_cpufreq_get_rate()
[all …]
Dsh-cpufreq.c33 static unsigned int sh_cpufreq_get(unsigned int cpu) in sh_cpufreq_get() argument
35 return (clk_get_rate(&per_cpu(sh_cpuclk, cpu)) + 500) / 1000; in sh_cpufreq_get()
45 unsigned int cpu = policy->cpu; in sh_cpufreq_target() local
46 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); in sh_cpufreq_target()
53 set_cpus_allowed_ptr(current, cpumask_of(cpu)); in sh_cpufreq_target()
55 BUG_ON(smp_processor_id() != cpu); in sh_cpufreq_target()
57 dev = get_cpu_device(cpu); in sh_cpufreq_target()
67 freqs.old = sh_cpufreq_get(cpu); in sh_cpufreq_target()
83 struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu); in sh_cpufreq_verify()
101 unsigned int cpu = policy->cpu; in sh_cpufreq_cpu_init() local
[all …]
Dp4-clockmod.c55 static unsigned int cpufreq_p4_get(unsigned int cpu);
57 static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate) in cpufreq_p4_setdc() argument
64 rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h); in cpufreq_p4_setdc()
67 pr_debug("CPU#%d currently thermal throttled\n", cpu); in cpufreq_p4_setdc()
69 if (has_N44_O17_errata[cpu] && in cpufreq_p4_setdc()
73 rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h); in cpufreq_p4_setdc()
75 pr_debug("CPU#%d disabling modulation\n", cpu); in cpufreq_p4_setdc()
76 wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l & ~(1<<4), h); in cpufreq_p4_setdc()
79 cpu, ((125 * newstate) / 10)); in cpufreq_p4_setdc()
87 wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l, h); in cpufreq_p4_setdc()
[all …]
/linux-4.1.27/arch/powerpc/platforms/cell/
Dpmu.c49 pmd_regs = cbe_get_cpu_pmd_regs(cpu); \
50 shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); \
58 shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); \
65 pmd_regs = cbe_get_cpu_pmd_regs(cpu); \
74 u32 cbe_read_phys_ctr(u32 cpu, u32 phys_ctr) in cbe_read_phys_ctr() argument
93 void cbe_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val) in cbe_write_phys_ctr() argument
105 pm_ctrl = cbe_read_pm(cpu, pm_control); in cbe_write_phys_ctr()
111 cbe_write_pm(cpu, pm_control, pm_ctrl); in cbe_write_phys_ctr()
113 shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); in cbe_write_phys_ctr()
126 u32 cbe_read_ctr(u32 cpu, u32 ctr) in cbe_read_ctr() argument
[all …]
Dcbe_regs.c90 struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu) in cbe_get_cpu_pmd_regs() argument
92 struct cbe_regs_map *map = cbe_thread_map[cpu].regs; in cbe_get_cpu_pmd_regs()
107 struct cbe_pmd_shadow_regs *cbe_get_cpu_pmd_shadow_regs(int cpu) in cbe_get_cpu_pmd_shadow_regs() argument
109 struct cbe_regs_map *map = cbe_thread_map[cpu].regs; in cbe_get_cpu_pmd_shadow_regs()
123 struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu) in cbe_get_cpu_iic_regs() argument
125 struct cbe_regs_map *map = cbe_thread_map[cpu].regs; in cbe_get_cpu_iic_regs()
139 struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu) in cbe_get_cpu_mic_tm_regs() argument
141 struct cbe_regs_map *map = cbe_thread_map[cpu].regs; in cbe_get_cpu_mic_tm_regs()
148 u32 cbe_get_hw_thread_id(int cpu) in cbe_get_hw_thread_id() argument
150 return cbe_thread_map[cpu].thread_id; in cbe_get_hw_thread_id()
[all …]
/linux-4.1.27/arch/metag/kernel/
Dsmp.c216 int __cpu_up(unsigned int cpu, struct task_struct *idle) in __cpu_up() argument
218 unsigned int thread = cpu_2_hwthread_id[cpu]; in __cpu_up()
246 if (!cpu_online(cpu)) in __cpu_up()
253 pr_crit("CPU%u: processor failed to boot\n", cpu); in __cpu_up()
270 unsigned int cpu = smp_processor_id(); in __cpu_disable() local
276 set_cpu_online(cpu, false); in __cpu_disable()
290 clear_tasks_mm_cpumask(cpu); in __cpu_disable()
299 void __cpu_die(unsigned int cpu) in __cpu_die() argument
301 if (!cpu_wait_death(cpu, 1)) in __cpu_die()
302 pr_err("CPU%u: unable to kill\n", cpu); in __cpu_die()
[all …]
/linux-4.1.27/arch/alpha/kernel/
Dsmp.c201 struct percpu_struct *cpu; in send_secondary_console_msg() local
206 cpu = (struct percpu_struct *) in send_secondary_console_msg()
217 *(unsigned int *)&cpu->ipc_buffer[0] = len; in send_secondary_console_msg()
218 cp1 = (char *) &cpu->ipc_buffer[1]; in send_secondary_console_msg()
242 struct percpu_struct *cpu; in recv_secondary_console_msg() local
255 cpu = (struct percpu_struct *) in recv_secondary_console_msg()
262 mycpu, i, cpu->halt_reason, cpu->flags)); in recv_secondary_console_msg()
264 cnt = cpu->ipc_buffer[0] >> 32; in recv_secondary_console_msg()
268 cp1 = (char *) &cpu->ipc_buffer[1]; in recv_secondary_console_msg()
293 struct percpu_struct *cpu; in secondary_cpu_start() local
[all …]
/linux-4.1.27/arch/x86/kernel/cpu/microcode/
Dcore.c137 static int collect_cpu_info_on_target(int cpu, struct cpu_signature *cpu_sig) in collect_cpu_info_on_target() argument
142 ret = smp_call_function_single(cpu, collect_cpu_info_local, &ctx, 1); in collect_cpu_info_on_target()
149 static int collect_cpu_info(int cpu) in collect_cpu_info() argument
151 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; in collect_cpu_info()
156 ret = collect_cpu_info_on_target(cpu, &uci->cpu_sig); in collect_cpu_info()
174 static int apply_microcode_on_target(int cpu) in apply_microcode_on_target() argument
179 ret = smp_call_function_single(cpu, apply_microcode_local, &ctx, 1); in apply_microcode_on_target()
190 int cpu; in do_microcode_update() local
192 for_each_online_cpu(cpu) { in do_microcode_update()
193 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; in do_microcode_update()
[all …]
/linux-4.1.27/drivers/cpuidle/
Dcoupled.c302 int cpu = (unsigned long)info; in cpuidle_coupled_handle_poke() local
303 cpumask_set_cpu(cpu, &cpuidle_coupled_poked); in cpuidle_coupled_handle_poke()
304 cpumask_clear_cpu(cpu, &cpuidle_coupled_poke_pending); in cpuidle_coupled_handle_poke()
319 static void cpuidle_coupled_poke(int cpu) in cpuidle_coupled_poke() argument
321 struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu); in cpuidle_coupled_poke()
323 if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poke_pending)) in cpuidle_coupled_poke()
324 smp_call_function_single_async(cpu, csd); in cpuidle_coupled_poke()
337 int cpu; in cpuidle_coupled_poke_others() local
339 for_each_cpu(cpu, &coupled->coupled_cpus) in cpuidle_coupled_poke_others()
340 if (cpu != this_cpu && cpu_online(cpu)) in cpuidle_coupled_poke_others()
[all …]
/linux-4.1.27/arch/sh/kernel/cpu/sh4a/
Dsmp-shx3.c33 unsigned int cpu = hard_smp_processor_id(); in ipi_interrupt_handler() local
34 unsigned int offs = 4 * cpu; in ipi_interrupt_handler()
48 unsigned int cpu = 0; in shx3_smp_setup() local
51 init_cpu_possible(cpumask_of(cpu)); in shx3_smp_setup()
54 __raw_writel(__raw_readl(STBCR_REG(cpu)) | STBCR_LTSLP, STBCR_REG(cpu)); in shx3_smp_setup()
88 static void shx3_start_cpu(unsigned int cpu, unsigned long entry_point) in shx3_start_cpu() argument
91 __raw_writel(entry_point, RESET_REG(cpu)); in shx3_start_cpu()
93 __raw_writel(virt_to_phys(entry_point), RESET_REG(cpu)); in shx3_start_cpu()
95 if (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP)) in shx3_start_cpu()
96 __raw_writel(STBCR_MSTP, STBCR_REG(cpu)); in shx3_start_cpu()
[all …]
/linux-4.1.27/tools/perf/tests/
Dopen-syscall-all-cpus.c9 int err = -1, fd, cpu; in test__open_syscall_event_on_all_cpus() local
48 for (cpu = 0; cpu < cpus->nr; ++cpu) { in test__open_syscall_event_on_all_cpus()
49 unsigned int ncalls = nr_open_calls + cpu; in test__open_syscall_event_on_all_cpus()
56 if (cpus->map[cpu] >= CPU_SETSIZE) { in test__open_syscall_event_on_all_cpus()
57 pr_debug("Ignoring CPU %d\n", cpus->map[cpu]); in test__open_syscall_event_on_all_cpus()
61 CPU_SET(cpus->map[cpu], &cpu_set); in test__open_syscall_event_on_all_cpus()
64 cpus->map[cpu], in test__open_syscall_event_on_all_cpus()
72 CPU_CLR(cpus->map[cpu], &cpu_set); in test__open_syscall_event_on_all_cpus()
87 for (cpu = 0; cpu < cpus->nr; ++cpu) { in test__open_syscall_event_on_all_cpus()
90 if (cpus->map[cpu] >= CPU_SETSIZE) in test__open_syscall_event_on_all_cpus()
[all …]
/linux-4.1.27/arch/xtensa/kernel/
Dsmp.c102 unsigned int cpu = smp_processor_id(); in smp_prepare_boot_cpu() local
103 BUG_ON(cpu != 0); in smp_prepare_boot_cpu()
104 cpu_asid_cache(cpu) = ASID_USER_FIRST; in smp_prepare_boot_cpu()
117 unsigned int cpu = smp_processor_id(); in secondary_start_kernel() local
124 __func__, boot_secondary_processors, cpu); in secondary_start_kernel()
130 __func__, boot_secondary_processors, cpu); in secondary_start_kernel()
141 cpumask_set_cpu(cpu, mm_cpumask(mm)); in secondary_start_kernel()
149 notify_cpu_starting(cpu); in secondary_start_kernel()
152 local_timer_setup(cpu); in secondary_start_kernel()
154 set_cpu_online(cpu, true); in secondary_start_kernel()
[all …]
/linux-4.1.27/arch/hexagon/kernel/
Dsmp.c51 int cpu) in __handle_ipi() argument
96 int cpu = smp_processor_id(); in handle_ipi() local
97 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); in handle_ipi()
101 __handle_ipi(&ops, ipi, cpu); in handle_ipi()
108 unsigned long cpu; in send_ipi() local
113 for_each_cpu(cpu, cpumask) { in send_ipi()
114 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); in send_ipi()
118 retval = __vmintop_post(BASE_IPI_IRQ+cpu); in send_ipi()
122 BASE_IPI_IRQ+cpu); in send_ipi()
147 unsigned int cpu; in start_secondary() local
[all …]
/linux-4.1.27/drivers/oprofile/
Dnmi_timer_int.c36 static int nmi_timer_start_cpu(int cpu) in nmi_timer_start_cpu() argument
38 struct perf_event *event = per_cpu(nmi_timer_events, cpu); in nmi_timer_start_cpu()
41 event = perf_event_create_kernel_counter(&nmi_timer_attr, cpu, NULL, in nmi_timer_start_cpu()
45 per_cpu(nmi_timer_events, cpu) = event; in nmi_timer_start_cpu()
54 static void nmi_timer_stop_cpu(int cpu) in nmi_timer_stop_cpu() argument
56 struct perf_event *event = per_cpu(nmi_timer_events, cpu); in nmi_timer_stop_cpu()
65 int cpu = (unsigned long)data; in nmi_timer_cpu_notifier() local
69 nmi_timer_start_cpu(cpu); in nmi_timer_cpu_notifier()
72 nmi_timer_stop_cpu(cpu); in nmi_timer_cpu_notifier()
84 int cpu; in nmi_timer_start() local
[all …]
Doprofile_perf.c39 u32 cpu = smp_processor_id(); in op_overflow_handler() local
42 if (per_cpu(perf_events, cpu)[id] == event) in op_overflow_handler()
49 "on cpu %u\n", cpu); in op_overflow_handler()
74 static int op_create_counter(int cpu, int event) in op_create_counter() argument
78 if (!counter_config[event].enabled || per_cpu(perf_events, cpu)[event]) in op_create_counter()
82 cpu, NULL, in op_create_counter()
91 "on CPU %d\n", event, cpu); in op_create_counter()
95 per_cpu(perf_events, cpu)[event] = pevent; in op_create_counter()
100 static void op_destroy_counter(int cpu, int event) in op_destroy_counter() argument
102 struct perf_event *pevent = per_cpu(perf_events, cpu)[event]; in op_destroy_counter()
[all …]
/linux-4.1.27/tools/perf/scripts/python/
Dnetdev-times.py87 cpu = irq_list[0]['cpu']
101 (nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
227 def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, callchain, vec): argument
230 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
233 def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, callchain, vec): argument
236 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
239 def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, callchain, vec): argument
242 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
245 def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm, argument
247 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
[all …]
/linux-4.1.27/arch/blackfin/kernel/cplb-nompu/
Dcplbmgr.c39 static inline void write_dcplb_data(int cpu, int idx, unsigned long data, in write_dcplb_data() argument
48 dcplb_tbl[cpu][idx].addr = addr; in write_dcplb_data()
49 dcplb_tbl[cpu][idx].data = data; in write_dcplb_data()
53 static inline void write_icplb_data(int cpu, int idx, unsigned long data, in write_icplb_data() argument
62 icplb_tbl[cpu][idx].addr = addr; in write_icplb_data()
63 icplb_tbl[cpu][idx].data = data; in write_icplb_data()
74 static int evict_one_icplb(int cpu) in evict_one_icplb() argument
76 int i = first_switched_icplb + icplb_rr_index[cpu]; in evict_one_icplb()
79 icplb_rr_index[cpu] -= MAX_CPLBS - first_switched_icplb; in evict_one_icplb()
81 icplb_rr_index[cpu]++; in evict_one_icplb()
[all …]
/linux-4.1.27/arch/nios2/kernel/
Dcpuinfo.c35 static inline u32 fcpu(struct device_node *cpu, const char *n) in fcpu() argument
39 of_property_read_u32(cpu, n, &val); in fcpu()
44 static inline u32 fcpu_has(struct device_node *cpu, const char *n) in fcpu_has() argument
46 return of_get_property(cpu, n, NULL) ? 1 : 0; in fcpu_has()
51 struct device_node *cpu; in setup_cpuinfo() local
55 cpu = of_find_node_by_type(NULL, "cpu"); in setup_cpuinfo()
56 if (!cpu) in setup_cpuinfo()
59 if (!fcpu_has(cpu, "altr,has-initda")) in setup_cpuinfo()
64 cpuinfo.cpu_clock_freq = fcpu(cpu, "clock-frequency"); in setup_cpuinfo()
66 str = of_get_property(cpu, "altr,implementation", &len); in setup_cpuinfo()
[all …]
/linux-4.1.27/arch/arm/mach-shmobile/
Dplatsmp-apmu.c70 static int apmu_wrap(int cpu, int (*fn)(void __iomem *p, int cpu)) in apmu_wrap() argument
72 void __iomem *p = apmu_cpus[cpu].iomem; in apmu_wrap()
74 return p ? fn(p, apmu_cpus[cpu].bit) : -EINVAL; in apmu_wrap()
77 static void apmu_init_cpu(struct resource *res, int cpu, int bit) in apmu_init_cpu() argument
79 if ((cpu >= ARRAY_SIZE(apmu_cpus)) || apmu_cpus[cpu].iomem) in apmu_init_cpu()
82 apmu_cpus[cpu].iomem = ioremap_nocache(res->start, resource_size(res)); in apmu_init_cpu()
83 apmu_cpus[cpu].bit = bit; in apmu_init_cpu()
85 pr_debug("apmu ioremap %d %d %pr\n", cpu, bit, res); in apmu_init_cpu()
88 static void apmu_parse_cfg(void (*fn)(struct resource *res, int cpu, int bit), in apmu_parse_cfg() argument
133 int shmobile_smp_apmu_boot_secondary(unsigned int cpu, struct task_struct *idle) in shmobile_smp_apmu_boot_secondary() argument
[all …]
/linux-4.1.27/arch/powerpc/sysdev/xics/
Dicp-native.c54 int cpu = smp_processor_id(); in icp_native_get_xirr() local
62 return in_be32(&icp_native_regs[cpu]->xirr.word); in icp_native_get_xirr()
67 int cpu = smp_processor_id(); in icp_native_set_xirr() local
69 out_be32(&icp_native_regs[cpu]->xirr.word, value); in icp_native_set_xirr()
74 int cpu = smp_processor_id(); in icp_native_set_cppr() local
76 out_8(&icp_native_regs[cpu]->xirr.bytes[0], value); in icp_native_set_cppr()
101 int cpu = smp_processor_id(); in icp_native_teardown_cpu() local
104 icp_native_set_qirr(cpu, 0xff); in icp_native_teardown_cpu()
146 static void icp_native_cause_ipi(int cpu, unsigned long data) in icp_native_cause_ipi() argument
148 kvmppc_set_host_ipi(cpu, 1); in icp_native_cause_ipi()
[all …]
/linux-4.1.27/lib/
Dcpu_rmap.c31 unsigned int cpu; in alloc_cpu_rmap() local
55 for_each_possible_cpu(cpu) { in alloc_cpu_rmap()
56 rmap->near[cpu].index = cpu % size; in alloc_cpu_rmap()
57 rmap->near[cpu].dist = CPU_RMAP_DIST_INF; in alloc_cpu_rmap()
97 static bool cpu_rmap_copy_neigh(struct cpu_rmap *rmap, unsigned int cpu, in cpu_rmap_copy_neigh() argument
103 if (rmap->near[cpu].dist > dist && in cpu_rmap_copy_neigh()
105 rmap->near[cpu].index = rmap->near[neigh].index; in cpu_rmap_copy_neigh()
106 rmap->near[cpu].dist = dist; in cpu_rmap_copy_neigh()
117 unsigned int cpu; in debug_print_rmap() local
121 for_each_possible_cpu(cpu) { in debug_print_rmap()
[all …]
/linux-4.1.27/arch/blackfin/
DMakefile60 cpu-$(CONFIG_BF512) := bf512
61 cpu-$(CONFIG_BF514) := bf514
62 cpu-$(CONFIG_BF516) := bf516
63 cpu-$(CONFIG_BF518) := bf518
64 cpu-$(CONFIG_BF522) := bf522
65 cpu-$(CONFIG_BF523) := bf523
66 cpu-$(CONFIG_BF524) := bf524
67 cpu-$(CONFIG_BF525) := bf525
68 cpu-$(CONFIG_BF526) := bf526
69 cpu-$(CONFIG_BF527) := bf527
[all …]
/linux-4.1.27/arch/arm/mach-sunxi/
Dplatsmp.c24 #define CPUCFG_CPU_PWR_CLAMP_STATUS_REG(cpu) ((cpu) * 0x40 + 0x64) argument
25 #define CPUCFG_CPU_RST_CTRL_REG(cpu) (((cpu) + 1) * 0x40) argument
26 #define CPUCFG_CPU_CTRL_REG(cpu) (((cpu) + 1) * 0x40 + 0x04) argument
27 #define CPUCFG_CPU_STATUS_REG(cpu) (((cpu) + 1) * 0x40 + 0x08) argument
35 #define PRCM_CPU_PWR_CLAMP_REG(cpu) (((cpu) * 4) + 0x140) argument
71 static int sun6i_smp_boot_secondary(unsigned int cpu, in sun6i_smp_boot_secondary() argument
87 writel(0, cpucfg_membase + CPUCFG_CPU_RST_CTRL_REG(cpu)); in sun6i_smp_boot_secondary()
91 writel(reg & ~BIT(cpu), cpucfg_membase + CPUCFG_GEN_CTRL_REG); in sun6i_smp_boot_secondary()
95 writel(reg & ~BIT(cpu), cpucfg_membase + CPUCFG_DBG_CTL1_REG); in sun6i_smp_boot_secondary()
99 writel(0xff >> i, prcm_membase + PRCM_CPU_PWR_CLAMP_REG(cpu)); in sun6i_smp_boot_secondary()
[all …]
/linux-4.1.27/arch/powerpc/platforms/ps3/
Dsmp.c42 static void ps3_smp_message_pass(int cpu, int msg) in ps3_smp_message_pass() argument
52 virq = per_cpu(ps3_ipi_virqs, cpu)[msg]; in ps3_smp_message_pass()
57 " (%d)\n", __func__, __LINE__, cpu, msg, result); in ps3_smp_message_pass()
62 int cpu; in ps3_smp_probe() local
64 for (cpu = 0; cpu < 2; cpu++) { in ps3_smp_probe()
66 unsigned int *virqs = per_cpu(ps3_ipi_virqs, cpu); in ps3_smp_probe()
69 DBG(" -> %s:%d: (%d)\n", __func__, __LINE__, cpu); in ps3_smp_probe()
83 result = ps3_event_receive_port_setup(cpu, &virqs[i]); in ps3_smp_probe()
89 __func__, __LINE__, cpu, i, virqs[i]); in ps3_smp_probe()
96 ps3_register_ipi_irq(cpu, virqs[i]); in ps3_smp_probe()
[all …]
/linux-4.1.27/arch/arc/include/asm/
Dmmu_context.h50 #define asid_mm(mm, cpu) mm->context.asid[cpu] argument
51 #define hw_pid(mm, cpu) (asid_mm(mm, cpu) & MM_CTXT_ASID_MASK) argument
54 #define asid_cpu(cpu) per_cpu(asid_cache, cpu) argument
62 const unsigned int cpu = smp_processor_id(); in get_new_mmu_context() local
77 if (!((asid_mm(mm, cpu) ^ asid_cpu(cpu)) & MM_CTXT_CYCLE_MASK)) in get_new_mmu_context()
81 if (unlikely(!(++asid_cpu(cpu) & MM_CTXT_ASID_MASK))) { in get_new_mmu_context()
90 if (!asid_cpu(cpu)) in get_new_mmu_context()
91 asid_cpu(cpu) = MM_CTXT_FIRST_CYCLE; in get_new_mmu_context()
95 asid_mm(mm, cpu) = asid_cpu(cpu); in get_new_mmu_context()
98 write_aux_reg(ARC_REG_PID, hw_pid(mm, cpu) | MMU_ENABLE); in get_new_mmu_context()
[all …]
/linux-4.1.27/arch/x86/platform/uv/
Duv_nmi.c197 static int uv_set_in_nmi(int cpu, struct uv_hub_nmi_s *hub_nmi) in uv_set_in_nmi() argument
202 atomic_set(&hub_nmi->cpu_owner, cpu); in uv_set_in_nmi()
204 atomic_set(&uv_nmi_cpu, cpu); in uv_set_in_nmi()
214 int cpu = smp_processor_id(); in uv_check_nmi() local
229 uv_set_in_nmi(cpu, hub_nmi); in uv_check_nmi()
252 uv_set_in_nmi(cpu, hub_nmi); in uv_check_nmi()
264 static inline void uv_clear_nmi(int cpu) in uv_clear_nmi() argument
268 if (cpu == atomic_read(&hub_nmi->cpu_owner)) { in uv_clear_nmi()
279 int cpu; in uv_nmi_nr_cpus_ping() local
281 for_each_cpu(cpu, uv_nmi_cpu_mask) in uv_nmi_nr_cpus_ping()
[all …]
Duv_time.c69 } cpu[1]; member
84 static void uv_rtc_send_IPI(int cpu) in uv_rtc_send_IPI() argument
89 apicid = cpu_physical_id(cpu); in uv_rtc_send_IPI()
112 static int uv_setup_intr(int cpu, u64 expires) in uv_setup_intr() argument
115 unsigned long apicid = cpu_physical_id(cpu) | uv_apicid_hibits; in uv_setup_intr()
116 int pnode = uv_cpu_to_pnode(cpu); in uv_setup_intr()
160 int cpu; in uv_rtc_allocate_timers() local
166 for_each_present_cpu(cpu) { in uv_rtc_allocate_timers()
167 int nid = cpu_to_node(cpu); in uv_rtc_allocate_timers()
168 int bid = uv_cpu_to_blade_id(cpu); in uv_rtc_allocate_timers()
[all …]
/linux-4.1.27/include/linux/clk/
Dtegra.h43 void (*wait_for_reset)(u32 cpu);
44 void (*put_in_reset)(u32 cpu);
45 void (*out_of_reset)(u32 cpu);
46 void (*enable_clock)(u32 cpu);
47 void (*disable_clock)(u32 cpu);
57 static inline void tegra_wait_cpu_in_reset(u32 cpu) in tegra_wait_cpu_in_reset() argument
62 tegra_cpu_car_ops->wait_for_reset(cpu); in tegra_wait_cpu_in_reset()
65 static inline void tegra_put_cpu_in_reset(u32 cpu) in tegra_put_cpu_in_reset() argument
70 tegra_cpu_car_ops->put_in_reset(cpu); in tegra_put_cpu_in_reset()
73 static inline void tegra_cpu_out_of_reset(u32 cpu) in tegra_cpu_out_of_reset() argument
[all …]
/linux-4.1.27/arch/powerpc/platforms/powernv/
Dsubcore.c153 int i, cpu = smp_processor_id(); in wait_for_sync_step() local
155 for (i = cpu + 1; i < cpu + threads_per_core; i++) in wait_for_sync_step()
178 int i, cpu; in unsplit_core() local
182 cpu = smp_processor_id(); in unsplit_core()
183 if (cpu_thread_in_core(cpu) != 0) { in unsplit_core()
187 per_cpu(split_state, cpu).step = SYNC_STEP_UNSPLIT; in unsplit_core()
200 for (i = cpu + 1; i < cpu + threads_per_core; i++) in unsplit_core()
212 int i, cpu; in split_core() local
219 cpu = smp_processor_id(); in split_core()
220 if (cpu_thread_in_core(cpu) != 0) { in split_core()
[all …]
/linux-4.1.27/kernel/time/
Dtick-broadcast.c39 static void tick_broadcast_clear_oneshot(int cpu);
42 static inline void tick_broadcast_clear_oneshot(int cpu) { } in tick_broadcast_clear_oneshot() argument
158 int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) in tick_device_uses_broadcast() argument
175 cpumask_set_cpu(cpu, tick_broadcast_mask); in tick_device_uses_broadcast()
187 cpumask_clear_cpu(cpu, tick_broadcast_mask); in tick_device_uses_broadcast()
195 if (!cpumask_test_cpu(cpu, tick_broadcast_on)) in tick_device_uses_broadcast()
196 cpumask_clear_cpu(cpu, tick_broadcast_mask); in tick_device_uses_broadcast()
208 tick_broadcast_clear_oneshot(cpu); in tick_device_uses_broadcast()
226 ret = cpumask_test_cpu(cpu, tick_broadcast_mask); in tick_device_uses_broadcast()
260 int cpu = smp_processor_id(); in tick_do_broadcast() local
[all …]
Dtick-common.c56 struct tick_device *tick_get_device(int cpu) in tick_get_device() argument
58 return &per_cpu(tick_cpu_device, cpu); in tick_get_device()
78 static void tick_periodic(int cpu) in tick_periodic() argument
80 if (tick_do_timer_cpu == cpu) { in tick_periodic()
100 int cpu = smp_processor_id(); in tick_handle_periodic() local
103 tick_periodic(cpu); in tick_handle_periodic()
126 tick_periodic(cpu); in tick_handle_periodic()
167 struct clock_event_device *newdev, int cpu, in tick_setup_device() argument
182 if (!tick_nohz_full_cpu(cpu)) in tick_setup_device()
183 tick_do_timer_cpu = cpu; in tick_setup_device()
[all …]
Dtick-sched.c44 struct tick_sched *tick_get_tick_sched(int cpu) in tick_get_tick_sched() argument
46 return &per_cpu(tick_cpu_sched, cpu); in tick_get_tick_sched()
114 int cpu = smp_processor_id(); in tick_sched_do_timer() local
125 && !tick_nohz_full_cpu(cpu)) in tick_sched_do_timer()
126 tick_do_timer_cpu = cpu; in tick_sched_do_timer()
130 if (tick_do_timer_cpu == cpu) in tick_sched_do_timer()
245 void tick_nohz_full_kick_cpu(int cpu) in tick_nohz_full_kick_cpu() argument
247 if (!tick_nohz_full_cpu(cpu)) in tick_nohz_full_kick_cpu()
250 irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu); in tick_nohz_full_kick_cpu()
314 unsigned int cpu = (unsigned long)hcpu; in tick_nohz_cpu_down_callback() local
[all …]
/linux-4.1.27/drivers/hv/
Dhv.c309 static void hv_init_clockevent_device(struct clock_event_device *dev, int cpu) in hv_init_clockevent_device() argument
313 dev->cpumask = cpumask_of(cpu); in hv_init_clockevent_device()
330 int cpu; in hv_synic_alloc() local
332 for_each_online_cpu(cpu) { in hv_synic_alloc()
333 hv_context.event_dpc[cpu] = kmalloc(size, GFP_ATOMIC); in hv_synic_alloc()
334 if (hv_context.event_dpc[cpu] == NULL) { in hv_synic_alloc()
338 tasklet_init(hv_context.event_dpc[cpu], vmbus_on_event, cpu); in hv_synic_alloc()
340 hv_context.clk_evt[cpu] = kzalloc(ced_size, GFP_ATOMIC); in hv_synic_alloc()
341 if (hv_context.clk_evt[cpu] == NULL) { in hv_synic_alloc()
345 hv_init_clockevent_device(hv_context.clk_evt[cpu], cpu); in hv_synic_alloc()
[all …]
/linux-4.1.27/arch/mips/mti-malta/
Dmalta-amon.c19 int amon_cpu_avail(int cpu) in amon_cpu_avail() argument
23 if (cpu < 0 || cpu >= NCPULAUNCH) { in amon_cpu_avail()
24 pr_debug("avail: cpu%d is out of range\n", cpu); in amon_cpu_avail()
28 launch += cpu; in amon_cpu_avail()
30 pr_debug("avail: cpu%d is not ready\n", cpu); in amon_cpu_avail()
34 pr_debug("avail: too late.. cpu%d is already gone\n", cpu); in amon_cpu_avail()
41 int amon_cpu_start(int cpu, in amon_cpu_start() argument
48 if (!amon_cpu_avail(cpu)) in amon_cpu_start()
50 if (cpu == smp_processor_id()) { in amon_cpu_start()
51 pr_debug("launch: I am cpu%d!\n", cpu); in amon_cpu_start()
[all …]
/linux-4.1.27/arch/sparc/include/asm/
Dtopology_64.h8 static inline int cpu_to_node(int cpu) in cpu_to_node() argument
10 return numa_cpu_lookup_table[cpu]; in cpu_to_node()
41 #define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id) argument
42 #define topology_core_id(cpu) (cpu_data(cpu).core_id) argument
43 #define topology_core_cpumask(cpu) (&cpu_core_sib_map[cpu]) argument
44 #define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) argument
49 static inline const struct cpumask *cpu_coregroup_mask(int cpu) in cpu_coregroup_mask() argument
51 return &cpu_core_map[cpu]; in cpu_coregroup_mask()
/linux-4.1.27/arch/x86/kernel/apic/
Dx2apic_cluster.c21 static inline u32 x2apic_cluster(int cpu) in x2apic_cluster() argument
23 return per_cpu(x86_cpu_to_logical_apicid, cpu) >> 16; in x2apic_cluster()
31 unsigned int cpu, this_cpu; in __x2apic_send_IPI_mask() local
51 for_each_cpu(cpu, ipi_mask_ptr) { in __x2apic_send_IPI_mask()
54 cpus_in_cluster_ptr = per_cpu(cpus_in_cluster, cpu); in __x2apic_send_IPI_mask()
134 unsigned int cpu; in init_x2apic_ldr() local
139 for_each_online_cpu(cpu) { in init_x2apic_ldr()
140 if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu)) in init_x2apic_ldr()
142 cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu)); in init_x2apic_ldr()
143 cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu)); in init_x2apic_ldr()
[all …]
Dx2apic_uv_x.c255 static void uv_send_IPI_one(int cpu, int vector) in uv_send_IPI_one() argument
260 apicid = per_cpu(x86_cpu_to_apicid, cpu); in uv_send_IPI_one()
267 unsigned int cpu; in uv_send_IPI_mask() local
269 for_each_cpu(cpu, mask) in uv_send_IPI_mask()
270 uv_send_IPI_one(cpu, vector); in uv_send_IPI_mask()
276 unsigned int cpu; in uv_send_IPI_mask_allbutself() local
278 for_each_cpu(cpu, mask) { in uv_send_IPI_mask_allbutself()
279 if (cpu != this_cpu) in uv_send_IPI_mask_allbutself()
280 uv_send_IPI_one(cpu, vector); in uv_send_IPI_mask_allbutself()
287 unsigned int cpu; in uv_send_IPI_allbutself() local
[all …]
/linux-4.1.27/arch/powerpc/mm/
Dnuma.c139 unsigned int cpu; in reset_numa_cpu_lookup_table() local
141 for_each_possible_cpu(cpu) in reset_numa_cpu_lookup_table()
142 numa_cpu_lookup_table[cpu] = -1; in reset_numa_cpu_lookup_table()
145 static void update_numa_cpu_lookup_table(unsigned int cpu, int node) in update_numa_cpu_lookup_table() argument
147 numa_cpu_lookup_table[cpu] = node; in update_numa_cpu_lookup_table()
150 static void map_cpu_to_node(int cpu, int node) in map_cpu_to_node() argument
152 update_numa_cpu_lookup_table(cpu, node); in map_cpu_to_node()
154 dbg("adding cpu %d to node %d\n", cpu, node); in map_cpu_to_node()
156 if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node]))) in map_cpu_to_node()
157 cpumask_set_cpu(cpu, node_to_cpumask_map[node]); in map_cpu_to_node()
[all …]
Dmmu_context_nohash.c88 unsigned int cpu, max, i; in steal_context_smp() local
116 for_each_cpu(cpu, mm_cpumask(mm)) { in steal_context_smp()
117 for (i = cpu_first_thread_sibling(cpu); in steal_context_smp()
118 i <= cpu_last_thread_sibling(cpu); i++) { in steal_context_smp()
122 cpu = i - 1; in steal_context_smp()
142 int cpu = smp_processor_id(); in steal_all_contexts() local
160 __clear_bit(id, stale_map[cpu]); in steal_all_contexts()
179 int cpu = smp_processor_id(); in steal_context_up() local
193 __clear_bit(id, stale_map[cpu]); in steal_context_up()
231 unsigned int i, id, cpu = smp_processor_id(); in switch_mmu_context() local
[all …]
/linux-4.1.27/drivers/thermal/
Dx86_pkg_temp_thermal.c120 *pkg_temp_thermal_get_phy_entry(unsigned int cpu) in pkg_temp_thermal_get_phy_entry() argument
122 u16 phys_proc_id = topology_physical_package_id(cpu); in pkg_temp_thermal_get_phy_entry()
142 static int get_tj_max(int cpu, u32 *tj_max) in get_tj_max() argument
148 err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx); in get_tj_max()
319 int cpu = smp_processor_id(); in pkg_temp_thermal_threshold_work_fn() local
320 int phy_id = topology_physical_package_id(cpu); in pkg_temp_thermal_threshold_work_fn()
321 struct phy_dev_entry *phdev = pkg_temp_thermal_get_phy_entry(cpu); in pkg_temp_thermal_threshold_work_fn()
358 int cpu = smp_processor_id(); in pkg_temp_thermal_platform_thermal_notify() local
359 int phy_id = topology_physical_package_id(cpu); in pkg_temp_thermal_platform_thermal_notify()
378 schedule_delayed_work_on(cpu, in pkg_temp_thermal_platform_thermal_notify()
[all …]
/linux-4.1.27/drivers/acpi/
Dprocessor_thermal.c57 #define reduction_pctg(cpu) \ argument
58 per_cpu(cpufreq_thermal_reduction_pctg, phys_package_first_cpu(cpu))
67 static int phys_package_first_cpu(int cpu) in phys_package_first_cpu() argument
70 int id = topology_physical_package_id(cpu); in phys_package_first_cpu()
78 static int cpu_has_cpufreq(unsigned int cpu) in cpu_has_cpufreq() argument
81 if (!acpi_thermal_cpufreq_is_init || cpufreq_get_policy(&policy, cpu)) in cpu_has_cpufreq()
97 (100 - reduction_pctg(policy->cpu) * 20) in acpi_thermal_cpufreq_notifier()
110 static int cpufreq_get_max_state(unsigned int cpu) in cpufreq_get_max_state() argument
112 if (!cpu_has_cpufreq(cpu)) in cpufreq_get_max_state()
118 static int cpufreq_get_cur_state(unsigned int cpu) in cpufreq_get_cur_state() argument
[all …]
/linux-4.1.27/tools/power/x86/x86_energy_perf_policy/
Dx86_energy_perf_policy.c38 int cpu = -1; variable
87 cpu = atoi(optarg); in cmdline()
187 unsigned long long get_msr(int cpu, int offset) in get_msr() argument
194 sprintf(msr_path, "/dev/cpu/%d/msr", cpu); in get_msr()
205 printf("pread cpu%d 0x%x = %d\n", cpu, offset, retval); in get_msr()
212 unsigned long long put_msr(int cpu, unsigned long long new_msr, int offset) in put_msr() argument
219 sprintf(msr_path, "/dev/cpu/%d/msr", cpu); in put_msr()
229 printf("pread cpu%d 0x%x = %d\n", cpu, offset, retval); in put_msr()
236 printf("pwrite cpu%d 0x%x = %d\n", cpu, offset, retval); in put_msr()
245 void print_msr(int cpu) in print_msr() argument
[all …]
/linux-4.1.27/arch/arm/plat-samsung/
Dinit.c34 static struct cpu_table *cpu; variable
51 cpu = s3c_lookup_cpu(idcode, cputab, cputab_size); in s3c_init_cpu()
53 if (cpu == NULL) { in s3c_init_cpu()
58 printk("CPU %s (id 0x%08lx)\n", cpu->name, idcode); in s3c_init_cpu()
60 if (cpu->init == NULL) { in s3c_init_cpu()
61 printk(KERN_ERR "CPU %s support not enabled\n", cpu->name); in s3c_init_cpu()
65 if (cpu->map_io) in s3c_init_cpu()
66 cpu->map_io(); in s3c_init_cpu()
83 if (cpu == NULL) in s3c24xx_init_clocks()
86 if (cpu->init_clocks == NULL) in s3c24xx_init_clocks()
[all …]
/linux-4.1.27/arch/blackfin/mach-bf561/
Dsmp.c51 void platform_secondary_init(unsigned int cpu) in platform_secondary_init() argument
76 int platform_boot_secondary(unsigned int cpu, struct task_struct *idle) in platform_boot_secondary() argument
86 smp_send_reschedule(cpu); in platform_boot_secondary()
97 if (cpu_online(cpu)) in platform_boot_secondary()
103 if (cpu_online(cpu)) { in platform_boot_secondary()
106 panic("CPU%u: processor failed to boot\n", cpu); in platform_boot_secondary()
124 unsigned int cpu; in platform_send_ipi() local
127 for_each_cpu(cpu, &callmap) { in platform_send_ipi()
128 BUG_ON(cpu >= 2); in platform_send_ipi()
130 bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (offset + cpu))); in platform_send_ipi()
[all …]
/linux-4.1.27/arch/cris/arch-v32/kernel/
Dirq.c51 int cpu; /* The CPU to which the IRQ is currently allocated. */ member
203 block_irq(int irq, int cpu) in block_irq() argument
211 intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu], in block_irq()
214 REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, in block_irq()
217 intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu], in block_irq()
220 REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, in block_irq()
227 unblock_irq(int irq, int cpu) in unblock_irq() argument
235 intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu], in unblock_irq()
238 REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, in unblock_irq()
241 intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu], in unblock_irq()
[all …]
/linux-4.1.27/drivers/hwmon/
Dcoretemp.c61 #define TO_PHYS_ID(cpu) (cpu_data(cpu).phys_proc_id) argument
62 #define TO_CORE_ID(cpu) (cpu_data(cpu).cpu_core_id) argument
63 #define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO) argument
66 #define for_each_sibling(i, cpu) for_each_cpu(i, cpu_sibling_mask(cpu)) argument
68 #define for_each_sibling(i, cpu) for (i = 0; false; ) argument
89 unsigned int cpu; member
140 rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx); in show_crit_alarm()
175 rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx); in show_temp()
419 static int chk_ucode_version(unsigned int cpu) in chk_ucode_version() argument
421 struct cpuinfo_x86 *c = &cpu_data(cpu); in chk_ucode_version()
[all …]
/linux-4.1.27/arch/tile/include/asm/
Dtopology.h27 static inline int cpu_to_node(int cpu) in cpu_to_node() argument
29 return cpu_2_node[cpu]; in cpu_to_node()
55 #define topology_physical_package_id(cpu) ((void)(cpu), 0) argument
56 #define topology_core_id(cpu) (cpu) argument
57 #define topology_core_cpumask(cpu) ((void)(cpu), cpu_online_mask) argument
58 #define topology_thread_cpumask(cpu) cpumask_of(cpu) argument
Dsmp.h54 static inline int cpu_x(int cpu) in cpu_x() argument
56 return cpu % smp_width; in cpu_x()
58 static inline int cpu_y(int cpu) in cpu_y() argument
60 return cpu / smp_width; in cpu_y()
81 static inline void arch_send_call_function_single_ipi(int cpu) in arch_send_call_function_single_ipi() argument
83 send_IPI_single(cpu, MSG_TAG_CALL_FUNCTION_SINGLE); in arch_send_call_function_single_ipi()
94 #define cpu_x(cpu) 0 argument
95 #define cpu_y(cpu) 0 argument
103 #define cpu_is_valid_lotar(cpu) cpumask_test_cpu((cpu), &cpu_lotar_map) argument
110 #define cpu_cacheable(cpu) cpumask_test_cpu((cpu), &cpu_cacheable_map) argument
/linux-4.1.27/tools/power/cpupower/debug/i386/
Dcentrino-decode.c29 static int rdmsr(unsigned int cpu, unsigned int msr, in rdmsr() argument
39 if (cpu > MCPU) in rdmsr()
42 sprintf(file, "/dev/cpu/%d/msr", cpu); in rdmsr()
76 static int decode_live(unsigned int cpu) in decode_live() argument
81 err = rdmsr(cpu, MSR_IA32_PERF_STATUS, &lo, &hi); in decode_live()
84 printf("can't get MSR_IA32_PERF_STATUS for cpu %d\n", cpu); in decode_live()
97 unsigned int cpu, mode = 0; in main() local
100 cpu = 0; in main()
102 cpu = strtoul(argv[1], NULL, 0); in main()
103 if (cpu >= MCPU) in main()
[all …]
/linux-4.1.27/arch/sparc/kernel/
Dsmp_64.c249 static void smp_start_sync_tick_client(int cpu);
251 static void smp_synchronize_one_tick(int cpu) in smp_synchronize_one_tick() argument
257 smp_start_sync_tick_client(cpu); in smp_synchronize_one_tick()
282 static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg, in ldom_startcpu_cpuid() argument
305 hdesc->cpu = cpu; in ldom_startcpu_cpuid()
308 tb = &trap_block[cpu]; in ldom_startcpu_cpuid()
327 hv_err = sun4v_cpu_start(cpu, trampoline_ra, in ldom_startcpu_cpuid()
344 static int smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle) in smp_boot_one_cpu() argument
359 ldom_startcpu_cpuid(cpu, in smp_boot_one_cpu()
364 prom_startcpu_cpuid(cpu, entry, cookie); in smp_boot_one_cpu()
[all …]
Dnmi.c57 int cpu; in touch_nmi_watchdog() local
59 for_each_present_cpu(cpu) { in touch_nmi_watchdog()
60 if (per_cpu(nmi_touch, cpu) != 1) in touch_nmi_watchdog()
61 per_cpu(nmi_touch, cpu) = 1; in touch_nmi_watchdog()
126 static inline unsigned int get_nmi_count(int cpu) in get_nmi_count() argument
128 return cpu_data(cpu).__nmi_count; in get_nmi_count()
137 static void report_broken_nmi(int cpu, int *prev_nmi_count) in report_broken_nmi() argument
143 cpu, prev_nmi_count[cpu], get_nmi_count(cpu)); in report_broken_nmi()
150 per_cpu(wd_enabled, cpu) = 0; in report_broken_nmi()
164 int cpu, err; in check_nmi_watchdog() local
[all …]
Dsmp_32.c80 int cpu, num = 0; in smp_cpus_done() local
82 for_each_online_cpu(cpu) { in smp_cpus_done()
84 bogosum += cpu_data(cpu).udelay_val; in smp_cpus_done()
124 void smp_send_reschedule(int cpu) in smp_send_reschedule() argument
131 sparc32_ipi_ops->resched(cpu); in smp_send_reschedule()
138 void arch_send_call_function_single_ipi(int cpu) in arch_send_call_function_single_ipi() argument
141 sparc32_ipi_ops->single(cpu); in arch_send_call_function_single_ipi()
146 int cpu; in arch_send_call_function_ipi_mask() local
149 for_each_cpu(cpu, mask) in arch_send_call_function_ipi_mask()
150 sparc32_ipi_ops->mask_one(cpu); in arch_send_call_function_ipi_mask()
[all …]
Dsun4d_smp.c84 while (current_set[cpuid]->cpu != cpuid) in sun4d_cpu_pre_online()
194 int cpu; in smp4d_ipi_init() local
199 for_each_possible_cpu(cpu) { in smp4d_ipi_init()
200 work = &per_cpu(sun4d_ipi_work, cpu); in smp4d_ipi_init()
231 static void sun4d_send_ipi(int cpu, int level) in sun4d_send_ipi() argument
233 cc_set_igen(IGEN_MESSAGE(0, cpu << 3, 6 + ((level >> 1) & 7), 1 << (level - 1))); in sun4d_send_ipi()
236 static void sun4d_ipi_single(int cpu) in sun4d_ipi_single() argument
238 struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu); in sun4d_ipi_single()
244 sun4d_send_ipi(cpu, SUN4D_IPI_IRQ); in sun4d_ipi_single()
247 static void sun4d_ipi_mask_one(int cpu) in sun4d_ipi_mask_one() argument
[all …]
/linux-4.1.27/arch/mips/loongson/loongson-3/
Dsmp.c241 static void loongson3_send_ipi_single(int cpu, unsigned int action) in loongson3_send_ipi_single() argument
243 loongson3_ipi_write32((u32)action, ipi_set0_regs[cpu_logical_map(cpu)]); in loongson3_send_ipi_single()
257 int i, cpu = smp_processor_id(); in loongson3_ipi_interrupt() local
261 action = loongson3_ipi_read32(ipi_status0_regs[cpu_logical_map(cpu)]); in loongson3_ipi_interrupt()
264 loongson3_ipi_write32((u32)action, ipi_clear0_regs[cpu_logical_map(cpu)]); in loongson3_ipi_interrupt()
273 BUG_ON(cpu != 0); in loongson3_ipi_interrupt()
288 unsigned int cpu = smp_processor_id(); in loongson3_init_secondary() local
298 per_cpu(cpu_state, cpu) = CPU_ONLINE; in loongson3_init_secondary()
299 cpu_data[cpu].core = in loongson3_init_secondary()
300 cpu_logical_map(cpu) % loongson_sysconf.cores_per_package; in loongson3_init_secondary()
[all …]
/linux-4.1.27/tools/testing/selftests/powerpc/pmu/
Dlib.c21 int cpu; in pick_online_cpu() local
31 for (cpu = 8; cpu < CPU_SETSIZE; cpu += 8) in pick_online_cpu()
32 if (CPU_ISSET(cpu, &mask)) in pick_online_cpu()
33 return cpu; in pick_online_cpu()
36 for (cpu = CPU_SETSIZE - 1; cpu >= 0; cpu--) in pick_online_cpu()
37 if (CPU_ISSET(cpu, &mask)) in pick_online_cpu()
38 return cpu; in pick_online_cpu()
44 int bind_to_cpu(int cpu) in bind_to_cpu() argument
48 printf("Binding to cpu %d\n", cpu); in bind_to_cpu()
51 CPU_SET(cpu, &mask); in bind_to_cpu()
[all …]
/linux-4.1.27/arch/x86/boot/
Dcpucheck.c86 err_flags[i] = req_flags[i] & ~cpu.flags[i]; in check_cpuflags()
106 memset(&cpu.flags, 0, sizeof cpu.flags); in check_cpu()
107 cpu.level = 3; in check_cpu()
110 cpu.level = 4; in check_cpu()
115 if (test_bit(X86_FEATURE_LM, cpu.flags)) in check_cpu()
116 cpu.level = 64; in check_cpu()
136 is_centaur() && cpu.model >= 6) { in check_cpu()
147 set_bit(X86_FEATURE_CX8, cpu.flags); in check_cpu()
159 : "+a" (level), "=d" (cpu.flags[0]) in check_cpu()
166 is_intel() && cpu.level == 6 && in check_cpu()
[all …]
/linux-4.1.27/arch/mn10300/kernel/
Dsmp.c46 static void run_sleep_cpu(unsigned int cpu);
47 static void run_wakeup_cpu(unsigned int cpu);
363 void arch_send_call_function_single_ipi(int cpu) in arch_send_call_function_single_ipi() argument
365 send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI); in arch_send_call_function_single_ipi()
372 void smp_send_reschedule(int cpu) in smp_send_reschedule() argument
374 send_IPI_mask(cpumask_of(cpu), RESCHEDULE_IPI); in smp_send_reschedule()
725 static void __init smp_store_cpu_info(int cpu) in smp_store_cpu_info() argument
727 struct mn10300_cpuinfo *ci = &cpu_data[cpu]; in smp_store_cpu_info()
773 task_thread_info(idle)->cpu = cpu_id; in do_boot_cpu()
821 static void __init smp_show_cpu_info(int cpu) in smp_show_cpu_info() argument
[all …]
/linux-4.1.27/kernel/trace/
Dtrace_kdb.c25 int cnt = 0, cpu; in ftrace_dump_buf() local
30 for_each_tracing_cpu(cpu) { in ftrace_dump_buf()
31 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); in ftrace_dump_buf()
49 for_each_tracing_cpu(cpu) { in ftrace_dump_buf()
50 iter.buffer_iter[cpu] = in ftrace_dump_buf()
51 ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu); in ftrace_dump_buf()
52 ring_buffer_read_start(iter.buffer_iter[cpu]); in ftrace_dump_buf()
53 tracing_iter_reset(&iter, cpu); in ftrace_dump_buf()
87 for_each_tracing_cpu(cpu) { in ftrace_dump_buf()
88 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); in ftrace_dump_buf()
[all …]
/linux-4.1.27/arch/arm/include/asm/
Dmcpm.h47 void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr);
54 void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
87 int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster);
135 int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster);
227 int (*cpu_powerup)(unsigned int cpu, unsigned int cluster);
229 void (*cpu_suspend_prepare)(unsigned int cpu, unsigned int cluster);
230 void (*cpu_powerdown_prepare)(unsigned int cpu, unsigned int cluster);
234 void (*cpu_is_up)(unsigned int cpu, unsigned int cluster);
236 int (*wait_for_powerdown)(unsigned int cpu, unsigned int cluster);
239 int (*power_up)(unsigned int cpu, unsigned int cluster);
[all …]
Dsmp_plat.h35 static inline unsigned int smp_cpuid_part(int cpu) in smp_cpuid_part() argument
37 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpu); in smp_cpuid_part()
72 #define cpu_logical_map(cpu) __cpu_logical_map[cpu] argument
81 int cpu; in get_logical_index() local
82 for (cpu = 0; cpu < nr_cpu_ids; cpu++) in get_logical_index()
83 if (cpu_logical_map(cpu) == mpidr) in get_logical_index()
84 return cpu; in get_logical_index()
Dtopology.h18 #define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id) argument
19 #define topology_core_id(cpu) (cpu_topology[cpu].core_id) argument
20 #define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling) argument
21 #define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) argument
25 const struct cpumask *cpu_coregroup_mask(int cpu);
/linux-4.1.27/arch/ia64/mm/
Dtlb.c72 int i, cpu; in wrap_mmu_context() local
90 cpu = get_cpu(); /* prevent preemption/migration */ in wrap_mmu_context()
92 if (i != cpu) in wrap_mmu_context()
347 int cpu = smp_processor_id(); in ia64_tlb_init() local
368 per_cpu(ia64_tr_num, cpu) = 8; in ia64_tlb_init()
371 per_cpu(ia64_tr_num, cpu) = vm_info_1.pal_vm_info_1_s.max_itr_entry+1; in ia64_tlb_init()
372 if (per_cpu(ia64_tr_num, cpu) > in ia64_tlb_init()
374 per_cpu(ia64_tr_num, cpu) = in ia64_tlb_init()
376 if (per_cpu(ia64_tr_num, cpu) > IA64_TR_ALLOC_MAX) { in ia64_tlb_init()
378 per_cpu(ia64_tr_num, cpu) = IA64_TR_ALLOC_MAX; in ia64_tlb_init()
[all …]
/linux-4.1.27/drivers/clk/mvebu/
Dclk-cpu.c36 int cpu; member
56 div = (reg >> (cpuclk->cpu * 8)) & SYS_CTRL_CLK_DIVIDER_MASK; in clk_cpu_recalc_rate()
85 & (~(SYS_CTRL_CLK_DIVIDER_MASK << (cpuclk->cpu * 8)))) in clk_cpu_off_set_rate()
86 | (div << (cpuclk->cpu * 8)); in clk_cpu_off_set_rate()
89 reload_mask = 1 << (20 + cpuclk->cpu); in clk_cpu_off_set_rate()
149 return mvebu_pmsu_dfs_request(cpuclk->cpu); in clk_cpu_on_set_rate()
201 int cpu, err; in of_cpu_clk_setup() local
206 err = of_property_read_u32(dn, "reg", &cpu); in of_cpu_clk_setup()
210 sprintf(clk_name, "cpu%d", cpu); in of_cpu_clk_setup()
213 cpuclk[cpu].parent_name = __clk_get_name(parent_clk); in of_cpu_clk_setup()
[all …]
/linux-4.1.27/mm/
Dpercpu-vm.c14 unsigned int cpu, int page_idx) in pcpu_chunk_page() argument
19 return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx)); in pcpu_chunk_page()
58 unsigned int cpu; in pcpu_free_pages() local
61 for_each_possible_cpu(cpu) { in pcpu_free_pages()
63 struct page *page = pages[pcpu_page_idx(cpu, i)]; in pcpu_free_pages()
86 unsigned int cpu, tcpu; in pcpu_alloc_pages() local
89 for_each_possible_cpu(cpu) { in pcpu_alloc_pages()
91 struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; in pcpu_alloc_pages()
93 *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0); in pcpu_alloc_pages()
102 __free_page(pages[pcpu_page_idx(cpu, i)]); in pcpu_alloc_pages()
[all …]
/linux-4.1.27/kernel/events/
Dhw_breakpoint.c64 static struct bp_cpuinfo *get_bp_info(int cpu, enum bp_type_idx type) in get_bp_info() argument
66 return per_cpu_ptr(bp_cpuinfo + type, cpu); in get_bp_info()
100 static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type) in max_task_bp_pinned() argument
102 unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned; in max_task_bp_pinned()
117 static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type) in task_bp_pinned() argument
126 (iter->cpu < 0 || cpu == iter->cpu)) in task_bp_pinned()
135 if (bp->cpu >= 0) in cpumask_of_bp()
136 return cpumask_of(bp->cpu); in cpumask_of_bp()
149 int cpu; in fetch_bp_busy_slots() local
151 for_each_cpu(cpu, cpumask) { in fetch_bp_busy_slots()
[all …]
/linux-4.1.27/arch/arm/mach-zynq/
Dplatsmp.c37 int zynq_cpun_start(u32 address, int cpu) in zynq_cpun_start() argument
50 zynq_slcr_cpu_stop(cpu); in zynq_cpun_start()
79 zynq_slcr_cpu_start(cpu); in zynq_cpun_start()
84 pr_warn("Can't start CPU%d: Wrong starting address %x\n", cpu, address); in zynq_cpun_start()
90 static int zynq_boot_secondary(unsigned int cpu, struct task_struct *idle) in zynq_boot_secondary() argument
92 return zynq_cpun_start(virt_to_phys(secondary_startup), cpu); in zynq_boot_secondary()
121 static void zynq_secondary_init(unsigned int cpu) in zynq_secondary_init() argument
127 static int zynq_cpu_kill(unsigned cpu) in zynq_cpu_kill() argument
131 while (zynq_slcr_cpu_state_read(cpu)) in zynq_cpu_kill()
135 zynq_slcr_cpu_stop(cpu); in zynq_cpu_kill()
[all …]
/linux-4.1.27/arch/x86/kernel/cpu/mcheck/
Dmce_amd.c123 "for bank %d, block %d (MSR%08X=0x%x%08x)\n", b->cpu, in lvt_off_valid()
131 b->cpu, apic, b->bank, b->block, b->address, hi, lo); in lvt_off_valid()
212 unsigned int cpu = smp_processor_id(); in mce_amd_feature_init() local
241 per_cpu(bank_map, cpu) |= (1 << bank); in mce_amd_feature_init()
244 b.cpu = cpu; in mce_amd_feature_init()
279 int cpu = smp_processor_id(); in amd_threshold_interrupt() local
285 if (!(per_cpu(bank_map, cpu) & (1 << bank))) in amd_threshold_interrupt()
370 smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1); in SHOW_FIELDS()
394 smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1); in store_threshold_limit()
403 rdmsr_on_cpu(b->cpu, b->address, &lo, &hi); in show_error_count()
[all …]
/linux-4.1.27/drivers/macintosh/
Dwindfarm_pm72.c209 static int read_one_cpu_vals(int cpu, s32 *temp, s32 *power) in read_one_cpu_vals() argument
215 rc = wf_sensor_get(sens_cpu_temp[cpu], &dtemp); in read_one_cpu_vals()
217 DBG(" CPU%d: temp reading error !\n", cpu); in read_one_cpu_vals()
220 DBG_LOTS(" CPU%d: temp = %d.%03d\n", cpu, FIX32TOPRINT((dtemp))); in read_one_cpu_vals()
224 rc = wf_sensor_get(sens_cpu_volts[cpu], &volts); in read_one_cpu_vals()
226 DBG(" CPU%d, volts reading error !\n", cpu); in read_one_cpu_vals()
229 DBG_LOTS(" CPU%d: volts = %d.%03d\n", cpu, FIX32TOPRINT((volts))); in read_one_cpu_vals()
232 rc = wf_sensor_get(sens_cpu_amps[cpu], &amps); in read_one_cpu_vals()
234 DBG(" CPU%d, current reading error !\n", cpu); in read_one_cpu_vals()
237 DBG_LOTS(" CPU%d: amps = %d.%03d\n", cpu, FIX32TOPRINT((amps))); in read_one_cpu_vals()
[all …]
/linux-4.1.27/arch/arm/mm/
Dcontext.c57 int cpu; in a15_erratum_get_cpumask() local
63 for_each_online_cpu(cpu) { in a15_erratum_get_cpumask()
64 if (cpu == this_cpu) in a15_erratum_get_cpumask()
70 asid = per_cpu(active_asids, cpu).counter; in a15_erratum_get_cpumask()
72 asid = per_cpu(reserved_asids, cpu); in a15_erratum_get_cpumask()
74 cpumask_set_cpu(cpu, mask); in a15_erratum_get_cpumask()
139 static void flush_context(unsigned int cpu) in flush_context() argument
170 int cpu; in is_reserved_asid() local
171 for_each_possible_cpu(cpu) in is_reserved_asid()
172 if (per_cpu(reserved_asids, cpu) == asid) in is_reserved_asid()
[all …]
/linux-4.1.27/drivers/watchdog/
Docteon-wdt-main.c224 static int cpu2core(int cpu) in cpu2core() argument
227 return cpu_logical_map(cpu); in cpu2core()
253 int cpu = core2cpu(core); in octeon_wdt_poke_irq() local
256 if (per_cpu_countdown[cpu] > 0) { in octeon_wdt_poke_irq()
259 per_cpu_countdown[cpu]--; in octeon_wdt_poke_irq()
263 cpumask_clear_cpu(cpu, &irq_enabled_cpus); in octeon_wdt_poke_irq()
377 static void octeon_wdt_disable_interrupt(int cpu) in octeon_wdt_disable_interrupt() argument
383 core = cpu2core(cpu); in octeon_wdt_disable_interrupt()
397 static void octeon_wdt_setup_interrupt(int cpu) in octeon_wdt_setup_interrupt() argument
403 core = cpu2core(cpu); in octeon_wdt_setup_interrupt()
[all …]
/linux-4.1.27/arch/arm64/include/asm/
Dtopology.h18 #define topology_physical_package_id(cpu) (cpu_topology[cpu].cluster_id) argument
19 #define topology_core_id(cpu) (cpu_topology[cpu].core_id) argument
20 #define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling) argument
21 #define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) argument
25 const struct cpumask *cpu_coregroup_mask(int cpu);
/linux-4.1.27/Documentation/ABI/testing/
Dsysfs-devices-system-cpu1 What: /sys/devices/system/cpu/
10 /sys/devices/system/cpu/cpu#/
12 What: /sys/devices/system/cpu/kernel_max
13 /sys/devices/system/cpu/offline
14 /sys/devices/system/cpu/online
15 /sys/devices/system/cpu/possible
16 /sys/devices/system/cpu/present
22 kernel_max: the maximum cpu index allowed by the kernel
40 What: /sys/devices/system/cpu/probe
41 /sys/devices/system/cpu/release
[all …]
/linux-4.1.27/arch/mips/sibyte/sb1250/
Dirq.c54 void sb1250_mask_irq(int cpu, int irq) in sb1250_mask_irq() argument
60 cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(cpu) + in sb1250_mask_irq()
63 ____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) + in sb1250_mask_irq()
68 void sb1250_unmask_irq(int cpu, int irq) in sb1250_unmask_irq() argument
74 cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(cpu) + in sb1250_unmask_irq()
77 ____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) + in sb1250_unmask_irq()
86 int i = 0, old_cpu, cpu, int_on; in sb1250_set_affinity() local
94 cpu = cpu_logical_map(i); in sb1250_set_affinity()
110 sb1250_irq_owner[irq] = cpu; in sb1250_set_affinity()
113 cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(cpu) + in sb1250_set_affinity()
[all …]
/linux-4.1.27/tools/virtio/virtio-trace/
Dtrace-agent.c154 int cpu; in agent_info_init() local
159 for (cpu = 0; cpu < s->cpus; cpu++) { in agent_info_init()
161 in_path = make_input_path(cpu); in agent_info_init()
167 out_path = make_output_path(cpu); in agent_info_init()
174 rw_thread_init(cpu, in_path, out_path, s->use_stdout, in agent_info_init()
175 s->pipe_size, s->rw_ti[cpu]); in agent_info_init()
222 int cpu; in agent_main_loop() local
226 for (cpu = 0; cpu < s->cpus; cpu++) in agent_main_loop()
227 rw_thread_per_cpu[cpu] = rw_thread_run(s->rw_ti[cpu]); in agent_main_loop()
232 for (cpu = 0; cpu < s->cpus; cpu++) { in agent_main_loop()
[all …]
/linux-4.1.27/arch/arm/mach-exynos/
Dplatsmp.c54 static inline void platform_do_lowpower(unsigned int cpu, int *spurious) in platform_do_lowpower() argument
56 u32 mpidr = cpu_logical_map(cpu); in platform_do_lowpower()
93 void exynos_cpu_power_down(int cpu) in exynos_cpu_power_down() argument
97 if (cpu == 0 && (soc_is_exynos5420() || soc_is_exynos5800())) { in exynos_cpu_power_down()
109 core_conf = pmu_raw_readl(EXYNOS_ARM_CORE_CONFIGURATION(cpu)); in exynos_cpu_power_down()
111 pmu_raw_writel(core_conf, EXYNOS_ARM_CORE_CONFIGURATION(cpu)); in exynos_cpu_power_down()
120 void exynos_cpu_power_up(int cpu) in exynos_cpu_power_up() argument
128 EXYNOS_ARM_CORE_CONFIGURATION(cpu)); in exynos_cpu_power_up()
136 int exynos_cpu_power_state(int cpu) in exynos_cpu_power_state() argument
138 return (pmu_raw_readl(EXYNOS_ARM_CORE_STATUS(cpu)) & in exynos_cpu_power_state()
[all …]
/linux-4.1.27/arch/sh/mm/
Dtlbflush_32.c17 unsigned int cpu = smp_processor_id(); in local_flush_tlb_page() local
19 if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) { in local_flush_tlb_page()
24 asid = cpu_asid(cpu, vma->vm_mm); in local_flush_tlb_page()
43 unsigned int cpu = smp_processor_id(); in local_flush_tlb_range() local
45 if (cpu_context(cpu, mm) != NO_CONTEXT) { in local_flush_tlb_range()
52 cpu_context(cpu, mm) = NO_CONTEXT; in local_flush_tlb_range()
54 activate_context(mm, cpu); in local_flush_tlb_range()
59 asid = cpu_asid(cpu, mm); in local_flush_tlb_range()
80 unsigned int cpu = smp_processor_id(); in local_flush_tlb_kernel_range() local
92 asid = cpu_asid(cpu, &init_mm); in local_flush_tlb_kernel_range()
[all …]
/linux-4.1.27/arch/arm/mach-qcom/
Dplatsmp.c52 static void __ref qcom_cpu_die(unsigned int cpu) in qcom_cpu_die() argument
58 static void qcom_secondary_init(unsigned int cpu) in qcom_secondary_init() argument
67 static int scss_release_secondary(unsigned int cpu) in scss_release_secondary() argument
92 static int kpssv1_release_secondary(unsigned int cpu) in kpssv1_release_secondary() argument
99 cpu_node = of_get_cpu_node(cpu, NULL); in kpssv1_release_secondary()
171 static int kpssv2_release_secondary(unsigned int cpu) in kpssv2_release_secondary() argument
179 cpu_node = of_get_cpu_node(cpu, NULL); in kpssv2_release_secondary()
273 static int qcom_boot_secondary(unsigned int cpu, int (*func)(unsigned int)) in qcom_boot_secondary() argument
277 if (!per_cpu(cold_boot_done, cpu)) { in qcom_boot_secondary()
278 ret = func(cpu); in qcom_boot_secondary()
[all …]
/linux-4.1.27/tools/perf/
Dbuiltin-stat.c146 static int (*aggr_get_id)(struct cpu_map *m, int cpu);
357 int cpu) in update_shadow_stats() argument
360 update_stats(&runtime_nsecs_stats[cpu], count[0]); in update_shadow_stats()
362 update_stats(&runtime_cycles_stats[cpu], count[0]); in update_shadow_stats()
365 update_stats(&runtime_cycles_in_tx_stats[cpu], count[0]); in update_shadow_stats()
368 update_stats(&runtime_transaction_stats[cpu], count[0]); in update_shadow_stats()
371 update_stats(&runtime_elision_stats[cpu], count[0]); in update_shadow_stats()
373 update_stats(&runtime_stalled_cycles_front_stats[cpu], count[0]); in update_shadow_stats()
375 update_stats(&runtime_stalled_cycles_back_stats[cpu], count[0]); in update_shadow_stats()
377 update_stats(&runtime_branches_stats[cpu], count[0]); in update_shadow_stats()
[all …]

12345678910>>...12