/linux-4.4.14/drivers/lguest/x86/ |
D | core.c | 67 static struct lguest_pages *lguest_pages(unsigned int cpu) in lguest_pages() argument 69 return &(((struct lguest_pages *)(switcher_addr + PAGE_SIZE))[cpu]); in lguest_pages() 85 static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages) in copy_in_guest_info() argument 93 if (__this_cpu_read(lg_last_cpu) != cpu || cpu->last_pages != pages) { in copy_in_guest_info() 94 __this_cpu_write(lg_last_cpu, cpu); in copy_in_guest_info() 95 cpu->last_pages = pages; in copy_in_guest_info() 96 cpu->changed = CHANGED_ALL; in copy_in_guest_info() 108 map_switcher_in_guest(cpu, pages); in copy_in_guest_info() 114 pages->state.guest_tss.sp1 = cpu->esp1; in copy_in_guest_info() 115 pages->state.guest_tss.ss1 = cpu->ss1; in copy_in_guest_info() [all …]
|
/linux-4.4.14/drivers/lguest/ |
D | hypercalls.c | 37 static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args) in do_hcall() argument 57 kill_guest(cpu, "already have lguest_data"); in do_hcall() 68 __lgread(cpu, msg, args->arg1, sizeof(msg)); in do_hcall() 70 kill_guest(cpu, "CRASH: %s", msg); in do_hcall() 72 cpu->lg->dead = ERR_PTR(-ERESTART); in do_hcall() 78 guest_pagetable_clear_all(cpu); in do_hcall() 80 guest_pagetable_flush_user(cpu); in do_hcall() 88 guest_new_pagetable(cpu, args->arg1); in do_hcall() 91 guest_set_stack(cpu, args->arg1, args->arg2, args->arg3); in do_hcall() 95 guest_set_pte(cpu, args->arg1, args->arg2, in do_hcall() [all …]
|
D | interrupts_and_traps.c | 51 static void push_guest_stack(struct lg_cpu *cpu, unsigned long *gstack, u32 val) in push_guest_stack() argument 55 lgwrite(cpu, *gstack, u32, val); in push_guest_stack() 68 static void push_guest_interrupt_stack(struct lg_cpu *cpu, bool has_err) in push_guest_interrupt_stack() argument 79 if ((cpu->regs->ss&0x3) != GUEST_PL) { in push_guest_interrupt_stack() 84 virtstack = cpu->esp1; in push_guest_interrupt_stack() 85 ss = cpu->ss1; in push_guest_interrupt_stack() 87 origstack = gstack = guest_pa(cpu, virtstack); in push_guest_interrupt_stack() 94 push_guest_stack(cpu, &gstack, cpu->regs->ss); in push_guest_interrupt_stack() 95 push_guest_stack(cpu, &gstack, cpu->regs->esp); in push_guest_interrupt_stack() 98 virtstack = cpu->regs->esp; in push_guest_interrupt_stack() [all …]
|
D | page_tables.c | 83 static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr) in spgd_addr() argument 88 return &cpu->lg->pgdirs[i].pgdir[index]; in spgd_addr() 97 static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) in spmd_addr() argument 115 static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) in spte_addr() argument 118 pmd_t *pmd = spmd_addr(cpu, spgd, vaddr); in spte_addr() 136 static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr) in gpgd_addr() argument 139 return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t); in gpgd_addr() 152 static unsigned long gpte_addr(struct lg_cpu *cpu, in gpte_addr() argument 162 static unsigned long gpte_addr(struct lg_cpu *cpu, in gpte_addr() argument 206 static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write) in gpte_to_spte() argument [all …]
|
D | lg.h | 130 #define lgread(cpu, addr, type) \ argument 131 ({ type _v; __lgread((cpu), &_v, (addr), sizeof(_v)); _v; }) 134 #define lgwrite(cpu, addr, type, val) \ argument 137 __lgwrite((cpu), (addr), &(val), sizeof(val)); \ 141 int run_guest(struct lg_cpu *cpu, unsigned long __user *user); 154 unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more); 155 void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more); 156 void set_interrupt(struct lg_cpu *cpu, unsigned int irq); 157 bool deliver_trap(struct lg_cpu *cpu, unsigned int num); 158 void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int i, [all …]
|
D | lguest_user.c | 19 static int getreg_setup(struct lg_cpu *cpu, const unsigned long __user *input) in getreg_setup() argument 33 cpu->reg_read = lguest_arch_regptr(cpu, which, true); in getreg_setup() 34 if (!cpu->reg_read) in getreg_setup() 41 static int setreg(struct lg_cpu *cpu, const unsigned long __user *input) in setreg() argument 53 reg = lguest_arch_regptr(cpu, which, false); in setreg() 67 static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input) in user_send_irq() argument 80 set_interrupt(cpu, irq); in user_send_irq() 88 static int trap(struct lg_cpu *cpu, const unsigned long __user *input) in trap() argument 95 if (!deliver_trap(cpu, trapnum)) in trap() 108 struct lg_cpu *cpu; in read() local [all …]
|
D | segments.c | 67 static void fixup_gdt_table(struct lg_cpu *cpu, unsigned start, unsigned end) in fixup_gdt_table() argument 84 if (cpu->arch.gdt[i].dpl == 0) in fixup_gdt_table() 85 cpu->arch.gdt[i].dpl |= GUEST_PL; in fixup_gdt_table() 93 cpu->arch.gdt[i].type |= 0x1; in fixup_gdt_table() 136 void setup_guest_gdt(struct lg_cpu *cpu) in setup_guest_gdt() argument 142 cpu->arch.gdt[GDT_ENTRY_KERNEL_CS] = FULL_EXEC_SEGMENT; in setup_guest_gdt() 143 cpu->arch.gdt[GDT_ENTRY_KERNEL_DS] = FULL_SEGMENT; in setup_guest_gdt() 144 cpu->arch.gdt[GDT_ENTRY_KERNEL_CS].dpl |= GUEST_PL; in setup_guest_gdt() 145 cpu->arch.gdt[GDT_ENTRY_KERNEL_DS].dpl |= GUEST_PL; in setup_guest_gdt() 152 void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt) in copy_gdt_tls() argument [all …]
|
D | core.c | 184 void __lgread(struct lg_cpu *cpu, void *b, unsigned long addr, unsigned bytes) in __lgread() argument 186 if (!lguest_address_ok(cpu->lg, addr, bytes) in __lgread() 187 || copy_from_user(b, cpu->lg->mem_base + addr, bytes) != 0) { in __lgread() 190 kill_guest(cpu, "bad read address %#lx len %u", addr, bytes); in __lgread() 195 void __lgwrite(struct lg_cpu *cpu, unsigned long addr, const void *b, in __lgwrite() argument 198 if (!lguest_address_ok(cpu->lg, addr, bytes) in __lgwrite() 199 || copy_to_user(cpu->lg->mem_base + addr, b, bytes) != 0) in __lgwrite() 200 kill_guest(cpu, "bad write address %#lx len %u", addr, bytes); in __lgwrite() 209 int run_guest(struct lg_cpu *cpu, unsigned long __user *user) in run_guest() argument 212 if (cpu->reg_read) { in run_guest() [all …]
|
/linux-4.4.14/tools/testing/selftests/cpu-hotplug/ |
D | cpu-on-off-test.sh | 23 if ! ls $SYSFS/devices/system/cpu/cpu* > /dev/null 2>&1; then 24 echo $msg cpu hotplug is not supported >&2 29 online_cpus=`cat $SYSFS/devices/system/cpu/online` 33 offline_cpus=`cat $SYSFS/devices/system/cpu/offline` 49 for cpu in $SYSFS/devices/system/cpu/cpu*; do 50 if [ -f $cpu/online ] && grep -q $state $cpu/online; then 51 echo ${cpu##/*/cpu} 68 grep -q 1 $SYSFS/devices/system/cpu/cpu$1/online 73 grep -q 0 $SYSFS/devices/system/cpu/cpu$1/online 78 echo 1 > $SYSFS/devices/system/cpu/cpu$1/online [all …]
|
/linux-4.4.14/arch/x86/xen/ |
D | smp.c | 74 int cpu; in cpu_bringup() local 85 cpu = smp_processor_id(); in cpu_bringup() 86 smp_store_cpu_info(cpu); in cpu_bringup() 87 cpu_data(cpu).x86_max_cores = 1; in cpu_bringup() 88 set_cpu_sibling_map(cpu); in cpu_bringup() 92 notify_cpu_starting(cpu); in cpu_bringup() 94 set_cpu_online(cpu, true); in cpu_bringup() 96 cpu_set_state_online(cpu); /* Implies full memory barrier. */ in cpu_bringup() 107 asmlinkage __visible void cpu_bringup_and_idle(int cpu) in cpu_bringup_and_idle() argument 112 xen_pvh_secondary_vcpu_init(cpu); in cpu_bringup_and_idle() [all …]
|
D | time.c | 104 void xen_setup_runstate_info(int cpu) in xen_setup_runstate_info() argument 108 area.addr.v = &per_cpu(xen_runstate, cpu); in xen_setup_runstate_info() 111 cpu, &area)) in xen_setup_runstate_info() 317 int cpu = smp_processor_id(); in xen_vcpuop_shutdown() local 319 if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, cpu, NULL) || in xen_vcpuop_shutdown() 320 HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL)) in xen_vcpuop_shutdown() 328 int cpu = smp_processor_id(); in xen_vcpuop_set_oneshot() local 330 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL)) in xen_vcpuop_set_oneshot() 339 int cpu = smp_processor_id(); in xen_vcpuop_set_next_event() local 348 ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &single); in xen_vcpuop_set_next_event() [all …]
|
D | spinlock.c | 28 static void xen_qlock_kick(int cpu) in xen_qlock_kick() argument 30 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); in xen_qlock_kick() 160 int cpu = smp_processor_id(); in xen_lock_spinning() local 195 cpumask_set_cpu(cpu, &waiting_cpus); in xen_lock_spinning() 240 cpumask_clear_cpu(cpu, &waiting_cpus); in xen_lock_spinning() 251 int cpu; in xen_unlock_kick() local 255 for_each_cpu(cpu, &waiting_cpus) { in xen_unlock_kick() 256 const struct xen_lock_waiting *w = &per_cpu(lock_waiting, cpu); in xen_unlock_kick() 262 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); in xen_unlock_kick() 275 void xen_init_lock_cpu(int cpu) in xen_init_lock_cpu() argument [all …]
|
/linux-4.4.14/arch/arm/boot/dts/ |
D | axm5516-cpus.dtsi | 17 cpu-map { 20 cpu = <&CPU0>; 23 cpu = <&CPU1>; 26 cpu = <&CPU2>; 29 cpu = <&CPU3>; 34 cpu = <&CPU4>; 37 cpu = <&CPU5>; 40 cpu = <&CPU6>; 43 cpu = <&CPU7>; 48 cpu = <&CPU8>; [all …]
|
D | hip04.dtsi | 32 cpu-map { 35 cpu = <&CPU0>; 38 cpu = <&CPU1>; 41 cpu = <&CPU2>; 44 cpu = <&CPU3>; 49 cpu = <&CPU4>; 52 cpu = <&CPU5>; 55 cpu = <&CPU6>; 58 cpu = <&CPU7>; 63 cpu = <&CPU8>; [all …]
|
/linux-4.4.14/arch/powerpc/kernel/ |
D | smp.c | 214 void smp_muxed_ipi_set_data(int cpu, unsigned long data) in smp_muxed_ipi_set_data() argument 216 struct cpu_messages *info = &per_cpu(ipi_message, cpu); in smp_muxed_ipi_set_data() 221 void smp_muxed_ipi_message_pass(int cpu, int msg) in smp_muxed_ipi_message_pass() argument 223 struct cpu_messages *info = &per_cpu(ipi_message, cpu); in smp_muxed_ipi_message_pass() 235 smp_ops->cause_ipi(cpu, info->data); in smp_muxed_ipi_message_pass() 267 static inline void do_message_pass(int cpu, int msg) in do_message_pass() argument 270 smp_ops->message_pass(cpu, msg); in do_message_pass() 273 smp_muxed_ipi_message_pass(cpu, msg); in do_message_pass() 277 void smp_send_reschedule(int cpu) in smp_send_reschedule() argument 280 do_message_pass(cpu, PPC_MSG_RESCHEDULE); in smp_send_reschedule() [all …]
|
D | tau_6xx.c | 52 void set_thresholds(unsigned long cpu) in set_thresholds() argument 59 mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TIE | THRM1_TID); in set_thresholds() 64 mtspr (SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | THRM1_TIE); in set_thresholds() 67 mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TID); in set_thresholds() 68 mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V); in set_thresholds() 72 void TAUupdate(int cpu) in TAUupdate() argument 84 if (tau[cpu].low >= step_size){ in TAUupdate() 85 tau[cpu].low -= step_size; in TAUupdate() 86 tau[cpu].high -= (step_size - window_expand); in TAUupdate() 88 tau[cpu].grew = 1; in TAUupdate() [all …]
|
D | sysfs.c | 29 static DEFINE_PER_CPU(struct cpu, cpu_devices); 45 struct cpu *cpu = container_of(dev, struct cpu, dev); in store_smt_snooze_delay() local 53 per_cpu(smt_snooze_delay, cpu->dev.id) = snooze; in store_smt_snooze_delay() 61 struct cpu *cpu = container_of(dev, struct cpu, dev); in show_smt_snooze_delay() local 63 return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->dev.id)); in show_smt_snooze_delay() 71 unsigned int cpu; in setup_smt_snooze_delay() local 78 for_each_possible_cpu(cpu) in setup_smt_snooze_delay() 79 per_cpu(smt_snooze_delay, cpu) = snooze; in setup_smt_snooze_delay() 119 unsigned int cpu = dev->id; in show_pw20_state() local 121 smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1); in show_pw20_state() [all …]
|
/linux-4.4.14/arch/arm/mach-tegra/ |
D | platsmp.c | 39 static void tegra_secondary_init(unsigned int cpu) in tegra_secondary_init() argument 41 cpumask_set_cpu(cpu, &tegra_cpu_init_mask); in tegra_secondary_init() 45 static int tegra20_boot_secondary(unsigned int cpu, struct task_struct *idle) in tegra20_boot_secondary() argument 47 cpu = cpu_logical_map(cpu); in tegra20_boot_secondary() 57 tegra_put_cpu_in_reset(cpu); in tegra20_boot_secondary() 65 flowctrl_write_cpu_halt(cpu, 0); in tegra20_boot_secondary() 67 tegra_enable_cpu_clock(cpu); in tegra20_boot_secondary() 68 flowctrl_write_cpu_csr(cpu, 0); /* Clear flow controller CSR. */ in tegra20_boot_secondary() 69 tegra_cpu_out_of_reset(cpu); in tegra20_boot_secondary() 73 static int tegra30_boot_secondary(unsigned int cpu, struct task_struct *idle) in tegra30_boot_secondary() argument [all …]
|
/linux-4.4.14/arch/microblaze/kernel/cpu/ |
D | cpuinfo-static.c | 23 void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu) in set_cpuinfo_static() argument 28 (fcpu(cpu, "xlnx,use-barrel") ? PVR0_USE_BARREL_MASK : 0) | in set_cpuinfo_static() 29 (fcpu(cpu, "xlnx,use-msr-instr") ? PVR2_USE_MSR_INSTR : 0) | in set_cpuinfo_static() 30 (fcpu(cpu, "xlnx,use-pcmp-instr") ? PVR2_USE_PCMP_INSTR : 0) | in set_cpuinfo_static() 31 (fcpu(cpu, "xlnx,use-div") ? PVR0_USE_DIV_MASK : 0); in set_cpuinfo_static() 43 ci->use_mult = fcpu(cpu, "xlnx,use-hw-mul"); in set_cpuinfo_static() 51 ci->use_fpu = fcpu(cpu, "xlnx,use-fpu"); in set_cpuinfo_static() 59 (fcpu(cpu, "xlnx,unaligned-exceptions") ? in set_cpuinfo_static() 61 (fcpu(cpu, "xlnx,ill-opcode-exception") ? in set_cpuinfo_static() 63 (fcpu(cpu, "xlnx,iopb-bus-exception") ? in set_cpuinfo_static() [all …]
|
/linux-4.4.14/kernel/ |
D | smpboot.c | 28 struct task_struct *idle_thread_get(unsigned int cpu) in idle_thread_get() argument 30 struct task_struct *tsk = per_cpu(idle_threads, cpu); in idle_thread_get() 34 init_idle(tsk, cpu); in idle_thread_get() 49 static inline void idle_init(unsigned int cpu) in idle_init() argument 51 struct task_struct *tsk = per_cpu(idle_threads, cpu); in idle_init() 54 tsk = fork_idle(cpu); in idle_init() 56 pr_err("SMP: fork_idle() failed for CPU %u\n", cpu); in idle_init() 58 per_cpu(idle_threads, cpu) = tsk; in idle_init() 67 unsigned int cpu, boot_cpu; in idle_threads_init() local 71 for_each_possible_cpu(cpu) { in idle_threads_init() [all …]
|
D | smp.c | 39 long cpu = (long)hcpu; in hotplug_cfd() local 40 struct call_function_data *cfd = &per_cpu(cfd_data, cpu); in hotplug_cfd() 46 cpu_to_node(cpu))) in hotplug_cfd() 91 void *cpu = (void *)(long)smp_processor_id(); in call_function_init() local 97 hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu); in call_function_init() 144 static int generic_exec_single(int cpu, struct call_single_data *csd, in generic_exec_single() argument 147 if (cpu == smp_processor_id()) { in generic_exec_single() 162 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) { in generic_exec_single() 181 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) in generic_exec_single() 182 arch_send_call_function_single_ipi(cpu); in generic_exec_single() [all …]
|
D | cpu.c | 261 void clear_tasks_mm_cpumask(int cpu) in clear_tasks_mm_cpumask() argument 272 WARN_ON(cpu_online(cpu)); in clear_tasks_mm_cpumask() 284 cpumask_clear_cpu(cpu, mm_cpumask(t->mm)); in clear_tasks_mm_cpumask() 339 static int _cpu_down(unsigned int cpu, int tasks_frozen) in _cpu_down() argument 342 void *hcpu = (void *)(long)cpu; in _cpu_down() 352 if (!cpu_online(cpu)) in _cpu_down() 362 __func__, cpu); in _cpu_down() 381 smpboot_park_threads(cpu); in _cpu_down() 392 err = stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); in _cpu_down() 399 BUG_ON(cpu_online(cpu)); in _cpu_down() [all …]
|
D | profile.c | 238 int cpu = smp_processor_id(); in __profile_flip_buffers() local 240 per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu); in __profile_flip_buffers() 245 int i, j, cpu; in profile_flip_buffers() local 251 for_each_online_cpu(cpu) { in profile_flip_buffers() 252 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j]; in profile_flip_buffers() 268 int i, cpu; in profile_discard_flip_buffers() local 274 for_each_online_cpu(cpu) { in profile_discard_flip_buffers() 275 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i]; in profile_discard_flip_buffers() 284 int i, j, cpu; in do_profile_hits() local 290 cpu = get_cpu(); in do_profile_hits() [all …]
|
D | watchdog.c | 69 #define for_each_watchdog_cpu(cpu) \ argument 70 for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask) 240 int cpu; in touch_all_softlockup_watchdogs() local 247 for_each_watchdog_cpu(cpu) in touch_all_softlockup_watchdogs() 248 per_cpu(watchdog_touch_ts, cpu) = 0; in touch_all_softlockup_watchdogs() 370 static int watchdog_nmi_enable(unsigned int cpu); 371 static void watchdog_nmi_disable(unsigned int cpu); 493 static void watchdog_enable(unsigned int cpu) in watchdog_enable() argument 502 watchdog_nmi_enable(cpu); in watchdog_enable() 513 static void watchdog_disable(unsigned int cpu) in watchdog_disable() argument [all …]
|
/linux-4.4.14/arch/arm/mach-bcm/ |
D | platsmp-brcmstb.c | 67 static int per_cpu_sw_state_rd(u32 cpu) in per_cpu_sw_state_rd() argument 69 sync_cache_r(SHIFT_PERCPU_PTR(&per_cpu_sw_state, per_cpu_offset(cpu))); in per_cpu_sw_state_rd() 70 return per_cpu(per_cpu_sw_state, cpu); in per_cpu_sw_state_rd() 73 static void per_cpu_sw_state_wr(u32 cpu, int val) in per_cpu_sw_state_wr() argument 76 per_cpu(per_cpu_sw_state, cpu) = val; in per_cpu_sw_state_wr() 77 sync_cache_w(SHIFT_PERCPU_PTR(&per_cpu_sw_state, per_cpu_offset(cpu))); in per_cpu_sw_state_wr() 80 static inline void per_cpu_sw_state_wr(u32 cpu, int val) { } in per_cpu_sw_state_wr() argument 83 static void __iomem *pwr_ctrl_get_base(u32 cpu) in pwr_ctrl_get_base() argument 86 base += (cpu_logical_map(cpu) * 4); in pwr_ctrl_get_base() 90 static u32 pwr_ctrl_rd(u32 cpu) in pwr_ctrl_rd() argument [all …]
|
/linux-4.4.14/drivers/base/ |
D | cacheinfo.c | 33 #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu)) argument 34 #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves) argument 35 #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list) argument 37 struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu) in get_cpu_cacheinfo() argument 39 return ci_cacheinfo(cpu); in get_cpu_cacheinfo() 43 static int cache_setup_of_node(unsigned int cpu) in cache_setup_of_node() argument 47 struct device *cpu_dev = get_cpu_device(cpu); in cache_setup_of_node() 48 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); in cache_setup_of_node() 56 pr_err("No cpu device for CPU %d\n", cpu); in cache_setup_of_node() 61 pr_err("Failed to find cpu%d device node\n", cpu); in cache_setup_of_node() [all …]
|
D | cpu.c | 35 static void change_cpu_under_node(struct cpu *cpu, in change_cpu_under_node() argument 38 int cpuid = cpu->dev.id; in change_cpu_under_node() 41 cpu->node_id = to_nid; in change_cpu_under_node() 46 struct cpu *cpu = container_of(dev, struct cpu, dev); in cpu_subsys_online() local 62 change_cpu_under_node(cpu, from_nid, to_nid); in cpu_subsys_online() 72 void unregister_cpu(struct cpu *cpu) in unregister_cpu() argument 74 int logical_cpu = cpu->dev.id; in unregister_cpu() 78 device_unregister(&cpu->dev); in unregister_cpu() 142 struct cpu *cpu = container_of(dev, struct cpu, dev); in show_crash_notes() local 147 cpunum = cpu->dev.id; in show_crash_notes() [all …]
|
/linux-4.4.14/tools/power/cpupower/lib/ |
D | cpufreq.c | 16 int cpufreq_cpu_exists(unsigned int cpu) in cpufreq_cpu_exists() argument 18 return sysfs_cpu_exists(cpu); in cpufreq_cpu_exists() 21 unsigned long cpufreq_get_freq_kernel(unsigned int cpu) in cpufreq_get_freq_kernel() argument 23 return sysfs_get_freq_kernel(cpu); in cpufreq_get_freq_kernel() 26 unsigned long cpufreq_get_freq_hardware(unsigned int cpu) in cpufreq_get_freq_hardware() argument 28 return sysfs_get_freq_hardware(cpu); in cpufreq_get_freq_hardware() 31 unsigned long cpufreq_get_transition_latency(unsigned int cpu) in cpufreq_get_transition_latency() argument 33 return sysfs_get_freq_transition_latency(cpu); in cpufreq_get_transition_latency() 36 int cpufreq_get_hardware_limits(unsigned int cpu, in cpufreq_get_hardware_limits() argument 42 return sysfs_get_freq_hardware_limits(cpu, min, max); in cpufreq_get_hardware_limits() [all …]
|
D | sysfs.h | 2 extern unsigned int sysfs_cpu_exists(unsigned int cpu); 5 extern unsigned long sysfs_get_freq_kernel(unsigned int cpu); 6 extern unsigned long sysfs_get_freq_hardware(unsigned int cpu); 7 extern unsigned long sysfs_get_freq_transition_latency(unsigned int cpu); 8 extern int sysfs_get_freq_hardware_limits(unsigned int cpu, 10 extern char *sysfs_get_freq_driver(unsigned int cpu); 11 extern struct cpufreq_policy *sysfs_get_freq_policy(unsigned int cpu); 13 unsigned int cpu); 15 unsigned int cpu); 17 unsigned int cpu); [all …]
|
D | cpufreq.h | 43 unsigned int cpu; member 66 extern int cpufreq_cpu_exists(unsigned int cpu); 76 extern unsigned long cpufreq_get_freq_kernel(unsigned int cpu); 78 extern unsigned long cpufreq_get_freq_hardware(unsigned int cpu); 80 #define cpufreq_get(cpu) cpufreq_get_freq_kernel(cpu); argument 87 extern unsigned long cpufreq_get_transition_latency(unsigned int cpu); 96 extern int cpufreq_get_hardware_limits(unsigned int cpu, 107 extern char *cpufreq_get_driver(unsigned int cpu); 119 extern struct cpufreq_policy *cpufreq_get_policy(unsigned int cpu); 133 *cpufreq_get_available_governors(unsigned int cpu); [all …]
|
D | sysfs.c | 50 static unsigned int sysfs_cpufreq_read_file(unsigned int cpu, const char *fname, in sysfs_cpufreq_read_file() argument 56 cpu, fname); in sysfs_cpufreq_read_file() 62 static unsigned int sysfs_cpufreq_write_file(unsigned int cpu, in sysfs_cpufreq_write_file() argument 71 cpu, fname); in sysfs_cpufreq_write_file() 114 static unsigned long sysfs_cpufreq_get_one_value(unsigned int cpu, in sysfs_cpufreq_get_one_value() argument 125 len = sysfs_cpufreq_read_file(cpu, cpufreq_value_files[which], in sysfs_cpufreq_get_one_value() 153 static char *sysfs_cpufreq_get_one_string(unsigned int cpu, in sysfs_cpufreq_get_one_string() argument 163 len = sysfs_cpufreq_read_file(cpu, cpufreq_string_files[which], in sysfs_cpufreq_get_one_string() 195 static int sysfs_cpufreq_write_one_value(unsigned int cpu, in sysfs_cpufreq_write_one_value() argument 202 if (sysfs_cpufreq_write_file(cpu, cpufreq_write_files[which], in sysfs_cpufreq_write_one_value() [all …]
|
/linux-4.4.14/arch/arc/kernel/ |
D | setup.c | 49 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; in read_arc_build_cfg_regs() local 51 FIX_PTR(cpu); in read_arc_build_cfg_regs() 53 READ_BCR(AUX_IDENTITY, cpu->core); in read_arc_build_cfg_regs() 54 READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa); in read_arc_build_cfg_regs() 56 READ_BCR(ARC_REG_TIMERS_BCR, cpu->timers); in read_arc_build_cfg_regs() 57 cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE); in read_arc_build_cfg_regs() 67 READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy); in read_arc_build_cfg_regs() 69 cpu->extn.norm = read_aux_reg(ARC_REG_NORM_BCR) > 1 ? 1 : 0; /* 2,3 */ in read_arc_build_cfg_regs() 70 cpu->extn.barrel = read_aux_reg(ARC_REG_BARREL_BCR) > 1 ? 1 : 0; /* 2,3 */ in read_arc_build_cfg_regs() 71 cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR) ? 1 : 0; /* 1,3 */ in read_arc_build_cfg_regs() [all …]
|
D | smp.c | 90 static void arc_default_smp_cpu_kick(int cpu, unsigned long pc) in arc_default_smp_cpu_kick() argument 92 BUG_ON(cpu == 0); in arc_default_smp_cpu_kick() 93 wake_flag = cpu; in arc_default_smp_cpu_kick() 96 void arc_platform_smp_wait_to_boot(int cpu) in arc_platform_smp_wait_to_boot() argument 98 while (wake_flag != cpu) in arc_platform_smp_wait_to_boot() 119 unsigned int cpu = smp_processor_id(); in start_kernel_secondary() local 127 cpumask_set_cpu(cpu, mm_cpumask(mm)); in start_kernel_secondary() 129 notify_cpu_starting(cpu); in start_kernel_secondary() 130 set_cpu_online(cpu, true); in start_kernel_secondary() 132 pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu); in start_kernel_secondary() [all …]
|
/linux-4.4.14/arch/blackfin/mm/ |
D | sram-alloc.c | 68 unsigned int cpu; in l1sram_init() local 77 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) { in l1sram_init() 78 per_cpu(free_l1_ssram_head, cpu).next = in l1sram_init() 80 if (!per_cpu(free_l1_ssram_head, cpu).next) { in l1sram_init() 85 per_cpu(free_l1_ssram_head, cpu).next->paddr = (void *)get_l1_scratch_start_cpu(cpu) + reserve; in l1sram_init() 86 per_cpu(free_l1_ssram_head, cpu).next->size = L1_SCRATCH_LENGTH - reserve; in l1sram_init() 87 per_cpu(free_l1_ssram_head, cpu).next->pid = 0; in l1sram_init() 88 per_cpu(free_l1_ssram_head, cpu).next->next = NULL; in l1sram_init() 90 per_cpu(used_l1_ssram_head, cpu).next = NULL; in l1sram_init() 93 spin_lock_init(&per_cpu(l1sram_lock, cpu)); in l1sram_init() [all …]
|
/linux-4.4.14/include/linux/ |
D | cpumask.h | 98 #define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask) argument 99 #define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask) argument 100 #define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask) argument 101 #define cpu_active(cpu) cpumask_test_cpu((cpu), cpu_active_mask) argument 107 #define cpu_online(cpu) ((cpu) == 0) argument 108 #define cpu_possible(cpu) ((cpu) == 0) argument 109 #define cpu_present(cpu) ((cpu) == 0) argument 110 #define cpu_active(cpu) ((cpu) == 0) argument 114 static inline unsigned int cpumask_check(unsigned int cpu) in cpumask_check() argument 117 WARN_ON_ONCE(cpu >= nr_cpumask_bits); in cpumask_check() [all …]
|
D | topology.h | 82 static inline int cpu_to_node(int cpu) in cpu_to_node() argument 84 return per_cpu(numa_node, cpu); in cpu_to_node() 96 static inline void set_cpu_numa_node(int cpu, int node) in set_cpu_numa_node() argument 98 per_cpu(numa_node, cpu) = node; in set_cpu_numa_node() 148 static inline int cpu_to_mem(int cpu) in cpu_to_mem() argument 150 return per_cpu(_numa_mem_, cpu); in cpu_to_mem() 155 static inline void set_cpu_numa_mem(int cpu, int node) in set_cpu_numa_mem() argument 157 per_cpu(_numa_mem_, cpu) = node; in set_cpu_numa_mem() 158 _node_numa_mem_[cpu_to_node(cpu)] = node; in set_cpu_numa_mem() 180 static inline int cpu_to_mem(int cpu) in cpu_to_mem() argument [all …]
|
D | ring_buffer.h | 100 int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full); 101 int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, 109 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, int cpu); 121 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts, 124 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts, 128 ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu); 140 unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu); 142 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu); 147 struct ring_buffer *buffer_b, int cpu); 151 struct ring_buffer *buffer_b, int cpu) in ring_buffer_swap_cpu() argument [all …]
|
/linux-4.4.14/arch/arm64/kernel/ |
D | smp.c | 79 static int boot_secondary(unsigned int cpu, struct task_struct *idle) in boot_secondary() argument 81 if (cpu_ops[cpu]->cpu_boot) in boot_secondary() 82 return cpu_ops[cpu]->cpu_boot(cpu); in boot_secondary() 89 int __cpu_up(unsigned int cpu, struct task_struct *idle) in __cpu_up() argument 103 ret = boot_secondary(cpu, idle); in __cpu_up() 112 if (!cpu_online(cpu)) { in __cpu_up() 113 pr_crit("CPU%u: failed to come online\n", cpu); in __cpu_up() 117 pr_err("CPU%u: failed to boot: %d\n", cpu, ret); in __cpu_up() 137 unsigned int cpu = smp_processor_id(); in secondary_start_kernel() local 166 if (cpu_ops[cpu]->cpu_postboot) in secondary_start_kernel() [all …]
|
D | topology.c | 29 int cpu; in get_cpu_for_node() local 35 for_each_possible_cpu(cpu) { in get_cpu_for_node() 36 if (of_get_cpu_node(cpu, NULL) == cpu_node) { in get_cpu_for_node() 38 return cpu; in get_cpu_for_node() 54 int cpu; in parse_core() local 62 cpu = get_cpu_for_node(t); in parse_core() 63 if (cpu >= 0) { in parse_core() 64 cpu_topology[cpu].cluster_id = cluster_id; in parse_core() 65 cpu_topology[cpu].core_id = core_id; in parse_core() 66 cpu_topology[cpu].thread_id = i; in parse_core() [all …]
|
D | psci.c | 35 static int __maybe_unused cpu_psci_cpu_init_idle(unsigned int cpu) in cpu_psci_cpu_init_idle() argument 41 cpu_node = of_get_cpu_node(cpu, NULL); in cpu_psci_cpu_init_idle() 91 per_cpu(psci_power_state, cpu) = psci_states; in cpu_psci_cpu_init_idle() 99 static int __init cpu_psci_cpu_init(unsigned int cpu) in cpu_psci_cpu_init() argument 104 static int __init cpu_psci_cpu_prepare(unsigned int cpu) in cpu_psci_cpu_prepare() argument 107 pr_err("no cpu_on method, not booting CPU%d\n", cpu); in cpu_psci_cpu_prepare() 114 static int cpu_psci_cpu_boot(unsigned int cpu) in cpu_psci_cpu_boot() argument 116 int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_entry)); in cpu_psci_cpu_boot() 118 pr_err("failed to boot CPU%d (%d)\n", cpu, err); in cpu_psci_cpu_boot() 124 static int cpu_psci_cpu_disable(unsigned int cpu) in cpu_psci_cpu_disable() argument [all …]
|
D | cpuidle.c | 18 int __init arm_cpuidle_init(unsigned int cpu) in arm_cpuidle_init() argument 22 if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_init_idle) in arm_cpuidle_init() 23 ret = cpu_ops[cpu]->cpu_init_idle(cpu); in arm_cpuidle_init() 37 int cpu = smp_processor_id(); in arm_cpuidle_suspend() local 43 if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend) in arm_cpuidle_suspend() 45 return cpu_ops[cpu]->cpu_suspend(index); in arm_cpuidle_suspend()
|
/linux-4.4.14/drivers/xen/ |
D | cpu_hotplug.c | 11 static void enable_hotplug_cpu(int cpu) in enable_hotplug_cpu() argument 13 if (!cpu_present(cpu)) in enable_hotplug_cpu() 14 xen_arch_register_cpu(cpu); in enable_hotplug_cpu() 16 set_cpu_present(cpu, true); in enable_hotplug_cpu() 19 static void disable_hotplug_cpu(int cpu) in disable_hotplug_cpu() argument 21 if (cpu_online(cpu)) { in disable_hotplug_cpu() 23 device_offline(get_cpu_device(cpu)); in disable_hotplug_cpu() 26 if (cpu_present(cpu)) in disable_hotplug_cpu() 27 xen_arch_unregister_cpu(cpu); in disable_hotplug_cpu() 29 set_cpu_present(cpu, false); in disable_hotplug_cpu() [all …]
|
/linux-4.4.14/drivers/cpufreq/ |
D | cppc_cpufreq.c | 39 struct cpudata *cpu; in cppc_cpufreq_set_target() local 43 cpu = all_cpu_data[policy->cpu]; in cppc_cpufreq_set_target() 45 cpu->perf_ctrls.desired_perf = target_freq; in cppc_cpufreq_set_target() 50 ret = cppc_set_perf(cpu->cpu, &cpu->perf_ctrls); in cppc_cpufreq_set_target() 55 cpu->cpu, ret); in cppc_cpufreq_set_target() 68 int cpu_num = policy->cpu; in cppc_cpufreq_stop_cpu() 69 struct cpudata *cpu = all_cpu_data[cpu_num]; in cppc_cpufreq_stop_cpu() local 72 cpu->perf_ctrls.desired_perf = cpu->perf_caps.lowest_perf; in cppc_cpufreq_stop_cpu() 74 ret = cppc_set_perf(cpu_num, &cpu->perf_ctrls); in cppc_cpufreq_stop_cpu() 77 cpu->perf_caps.lowest_perf, cpu_num, ret); in cppc_cpufreq_stop_cpu() [all …]
|
D | intel_pstate.c | 103 int cpu; member 252 static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu) in intel_pstate_busy_pid_reset() argument 254 pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct); in intel_pstate_busy_pid_reset() 255 pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct); in intel_pstate_busy_pid_reset() 256 pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct); in intel_pstate_busy_pid_reset() 258 pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0); in intel_pstate_busy_pid_reset() 263 unsigned int cpu; in intel_pstate_reset_all_pid() local 265 for_each_online_cpu(cpu) { in intel_pstate_reset_all_pid() 266 if (all_cpu_data[cpu]) in intel_pstate_reset_all_pid() 267 intel_pstate_busy_pid_reset(all_cpu_data[cpu]); in intel_pstate_reset_all_pid() [all …]
|
D | speedstep-centrino.c | 234 struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu); in centrino_cpu_init_table() local 238 if (centrino_verify_cpu_id(cpu, model->cpu_id) && in centrino_cpu_init_table() 240 strcmp(cpu->x86_model_id, model->model_name) == 0)) in centrino_cpu_init_table() 247 cpu->x86_model_id); in centrino_cpu_init_table() 254 cpu->x86_model_id); in centrino_cpu_init_table() 259 per_cpu(centrino_model, policy->cpu) = model; in centrino_cpu_init_table() 285 static unsigned extract_clock(unsigned msr, unsigned int cpu, int failsafe) in extract_clock() argument 294 if ((per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_BANIAS]) || in extract_clock() 295 (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_A1]) || in extract_clock() 296 (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_B0])) { in extract_clock() [all …]
|
D | ppc_cbe_cpufreq.c | 50 static int set_pmode(unsigned int cpu, unsigned int slow_mode) in set_pmode() argument 55 rc = cbe_cpufreq_set_pmode_pmi(cpu, slow_mode); in set_pmode() 57 rc = cbe_cpufreq_set_pmode(cpu, slow_mode); in set_pmode() 59 pr_debug("register contains slow mode %d\n", cbe_cpufreq_get_pmode(cpu)); in set_pmode() 74 struct device_node *cpu; in cbe_cpufreq_cpu_init() local 76 cpu = of_get_cpu_node(policy->cpu, NULL); in cbe_cpufreq_cpu_init() 78 if (!cpu) in cbe_cpufreq_cpu_init() 81 pr_debug("init cpufreq on CPU %d\n", policy->cpu); in cbe_cpufreq_cpu_init() 86 if (!cbe_get_cpu_pmd_regs(policy->cpu) || in cbe_cpufreq_cpu_init() 87 !cbe_get_cpu_mic_tm_regs(policy->cpu)) { in cbe_cpufreq_cpu_init() [all …]
|
D | arm_big_little.c | 71 static inline int raw_cpu_to_cluster(int cpu) in raw_cpu_to_cluster() argument 73 return topology_physical_package_id(cpu); in raw_cpu_to_cluster() 76 static inline int cpu_to_cluster(int cpu) in cpu_to_cluster() argument 79 MAX_CLUSTERS : raw_cpu_to_cluster(cpu); in cpu_to_cluster() 101 static unsigned int clk_get_cpu_rate(unsigned int cpu) in clk_get_cpu_rate() argument 103 u32 cur_cluster = per_cpu(physical_cluster, cpu); in clk_get_cpu_rate() 110 pr_debug("%s: cpu: %d, cluster: %d, freq: %u\n", __func__, cpu, in clk_get_cpu_rate() 116 static unsigned int bL_cpufreq_get_rate(unsigned int cpu) in bL_cpufreq_get_rate() argument 120 cpu)); in bL_cpufreq_get_rate() 122 return per_cpu(cpu_last_req_freq, cpu); in bL_cpufreq_get_rate() [all …]
|
D | sh-cpufreq.c | 33 static unsigned int sh_cpufreq_get(unsigned int cpu) in sh_cpufreq_get() argument 35 return (clk_get_rate(&per_cpu(sh_cpuclk, cpu)) + 500) / 1000; in sh_cpufreq_get() 45 unsigned int cpu = policy->cpu; in sh_cpufreq_target() local 46 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); in sh_cpufreq_target() 53 set_cpus_allowed_ptr(current, cpumask_of(cpu)); in sh_cpufreq_target() 55 BUG_ON(smp_processor_id() != cpu); in sh_cpufreq_target() 57 dev = get_cpu_device(cpu); in sh_cpufreq_target() 67 freqs.old = sh_cpufreq_get(cpu); in sh_cpufreq_target() 83 struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu); in sh_cpufreq_verify() 101 unsigned int cpu = policy->cpu; in sh_cpufreq_cpu_init() local [all …]
|
D | p4-clockmod.c | 55 static unsigned int cpufreq_p4_get(unsigned int cpu); 57 static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate) in cpufreq_p4_setdc() argument 64 rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h); in cpufreq_p4_setdc() 67 pr_debug("CPU#%d currently thermal throttled\n", cpu); in cpufreq_p4_setdc() 69 if (has_N44_O17_errata[cpu] && in cpufreq_p4_setdc() 73 rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h); in cpufreq_p4_setdc() 75 pr_debug("CPU#%d disabling modulation\n", cpu); in cpufreq_p4_setdc() 76 wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l & ~(1<<4), h); in cpufreq_p4_setdc() 79 cpu, ((125 * newstate) / 10)); in cpufreq_p4_setdc() 87 wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l, h); in cpufreq_p4_setdc() [all …]
|
/linux-4.4.14/arch/powerpc/include/asm/ |
D | smp.h | 36 extern int cpu_to_chip_id(int cpu); 41 void (*message_pass)(int cpu, int msg); 43 void (*cause_ipi)(int cpu, unsigned long data); 66 void generic_cpu_die(unsigned int cpu); 67 void generic_set_cpu_dead(unsigned int cpu); 68 void generic_set_cpu_up(unsigned int cpu); 69 int generic_check_cpu_restart(unsigned int cpu); 79 #define raw_smp_processor_id() (current_thread_info()->cpu) 82 static inline int get_hard_smp_processor_id(int cpu) in get_hard_smp_processor_id() argument 84 return smp_hw_index[cpu]; in get_hard_smp_processor_id() [all …]
|
D | cputhreads.h | 45 int i, cpu; in cpu_thread_mask_to_cores() local 51 cpu = cpumask_next_and(-1, &tmp, cpu_online_mask); in cpu_thread_mask_to_cores() 52 if (cpu < nr_cpu_ids) in cpu_thread_mask_to_cores() 53 cpumask_set_cpu(cpu, &res); in cpu_thread_mask_to_cores() 70 int cpu_core_index_of_thread(int cpu); 73 static inline int cpu_core_index_of_thread(int cpu) { return cpu; } in cpu_core_index_of_thread() argument 77 static inline int cpu_thread_in_core(int cpu) in cpu_thread_in_core() argument 79 return cpu & (threads_per_core - 1); in cpu_thread_in_core() 82 static inline int cpu_thread_in_subcore(int cpu) in cpu_thread_in_subcore() argument 84 return cpu & (threads_per_subcore - 1); in cpu_thread_in_subcore() [all …]
|
D | cell-pmu.h | 79 extern u32 cbe_read_phys_ctr(u32 cpu, u32 phys_ctr); 80 extern void cbe_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val); 81 extern u32 cbe_read_ctr(u32 cpu, u32 ctr); 82 extern void cbe_write_ctr(u32 cpu, u32 ctr, u32 val); 84 extern u32 cbe_read_pm07_control(u32 cpu, u32 ctr); 85 extern void cbe_write_pm07_control(u32 cpu, u32 ctr, u32 val); 86 extern u32 cbe_read_pm(u32 cpu, enum pm_reg_name reg); 87 extern void cbe_write_pm(u32 cpu, enum pm_reg_name reg, u32 val); 89 extern u32 cbe_get_ctr_size(u32 cpu, u32 phys_ctr); 90 extern void cbe_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size); [all …]
|
/linux-4.4.14/arch/arm/kernel/ |
D | smp.c | 98 int __cpu_up(unsigned int cpu, struct task_struct *idle) in __cpu_up() argument 123 ret = smp_ops.smp_boot_secondary(cpu, idle); in __cpu_up() 132 if (!cpu_online(cpu)) { in __cpu_up() 133 pr_crit("CPU%u: failed to come online\n", cpu); in __cpu_up() 137 pr_err("CPU%u: failed to boot: %d\n", cpu, ret); in __cpu_up() 168 static int platform_cpu_kill(unsigned int cpu) in platform_cpu_kill() argument 171 return smp_ops.cpu_kill(cpu); in platform_cpu_kill() 175 static int platform_cpu_disable(unsigned int cpu) in platform_cpu_disable() argument 178 return smp_ops.cpu_disable(cpu); in platform_cpu_disable() 183 int platform_can_hotplug_cpu(unsigned int cpu) in platform_can_hotplug_cpu() argument [all …]
|
D | topology.c | 45 unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu) in arch_scale_cpu_capacity() argument 47 return per_cpu(cpu_scale, cpu); in arch_scale_cpu_capacity() 50 static void set_capacity_scale(unsigned int cpu, unsigned long capacity) in set_capacity_scale() argument 52 per_cpu(cpu_scale, cpu) = capacity; in set_capacity_scale() 78 #define cpu_capacity(cpu) __cpu_capacity[cpu] argument 97 int cpu = 0; in parse_dt_topology() local 102 for_each_possible_cpu(cpu) { in parse_dt_topology() 107 cn = of_get_cpu_node(cpu, NULL); in parse_dt_topology() 109 pr_err("missing device node for CPU %d\n", cpu); in parse_dt_topology() 137 cpu_capacity(cpu) = capacity; in parse_dt_topology() [all …]
|
D | cpuidle.c | 56 int cpu = smp_processor_id(); in arm_cpuidle_suspend() local 58 if (cpuidle_ops[cpu].suspend) in arm_cpuidle_suspend() 59 ret = cpuidle_ops[cpu].suspend(cpu, index); in arm_cpuidle_suspend() 97 static int __init arm_cpuidle_read_ops(struct device_node *dn, int cpu) in arm_cpuidle_read_ops() argument 113 cpuidle_ops[cpu] = *ops; /* structure copy */ in arm_cpuidle_read_ops() 137 int __init arm_cpuidle_init(int cpu) in arm_cpuidle_init() argument 139 struct device_node *cpu_node = of_cpu_device_node_get(cpu); in arm_cpuidle_init() 145 ret = arm_cpuidle_read_ops(cpu_node, cpu); in arm_cpuidle_init() 146 if (!ret && cpuidle_ops[cpu].init) in arm_cpuidle_init() 147 ret = cpuidle_ops[cpu].init(cpu_node, cpu); in arm_cpuidle_init()
|
/linux-4.4.14/arch/mips/kernel/ |
D | smp.c | 82 static inline void set_cpu_sibling_map(int cpu) in set_cpu_sibling_map() argument 86 cpumask_set_cpu(cpu, &cpu_sibling_setup_map); in set_cpu_sibling_map() 90 if (cpu_data[cpu].package == cpu_data[i].package && in set_cpu_sibling_map() 91 cpu_data[cpu].core == cpu_data[i].core) { in set_cpu_sibling_map() 92 cpumask_set_cpu(i, &cpu_sibling_map[cpu]); in set_cpu_sibling_map() 93 cpumask_set_cpu(cpu, &cpu_sibling_map[i]); in set_cpu_sibling_map() 97 cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]); in set_cpu_sibling_map() 100 static inline void set_cpu_core_map(int cpu) in set_cpu_core_map() argument 104 cpumask_set_cpu(cpu, &cpu_core_setup_map); in set_cpu_core_map() 107 if (cpu_data[cpu].package == cpu_data[i].package) { in set_cpu_core_map() [all …]
|
D | smp-bmips.c | 51 static void bmips_set_reset_vec(int cpu, u32 val); 59 static void bmips43xx_send_ipi_single(int cpu, unsigned int action); 60 static void bmips5000_send_ipi_single(int cpu, unsigned int action); 68 #define CPUNUM(cpu, shift) (((cpu) + bmips_cpu_offset) << (shift)) argument 69 #define ACTION_CLR_IPI(cpu, ipi) (0x2000 | CPUNUM(cpu, 9) | ((ipi) << 8)) argument 70 #define ACTION_SET_IPI(cpu, ipi) (0x3000 | CPUNUM(cpu, 9) | ((ipi) << 8)) argument 71 #define ACTION_BOOT_THREAD(cpu) (0x08 | CPUNUM(cpu, 0)) argument 75 int i, cpu = 1, boot_cpu = 0; in bmips_smp_setup() local 141 __cpu_number_map[i] = cpu; in bmips_smp_setup() 142 __cpu_logical_map[cpu] = i; in bmips_smp_setup() [all …]
|
D | cpu-probe.c | 297 static inline void set_elf_platform(int cpu, const char *plat) in set_elf_platform() argument 299 if (cpu == 0) in set_elf_platform() 691 static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) in cpu_probe_legacy() argument 696 __cpu_name[cpu] = "R2000"; in cpu_probe_legacy() 708 __cpu_name[cpu] = "R3081"; in cpu_probe_legacy() 711 __cpu_name[cpu] = "R3000A"; in cpu_probe_legacy() 715 __cpu_name[cpu] = "R3000"; in cpu_probe_legacy() 729 __cpu_name[cpu] = "R4400PC"; in cpu_probe_legacy() 732 __cpu_name[cpu] = "R4000PC"; in cpu_probe_legacy() 758 __cpu_name[cpu] = mc ? "R4400MC" : "R4400SC"; in cpu_probe_legacy() [all …]
|
D | cevt-bcm1480.c | 46 unsigned int cpu = smp_processor_id(); in sibyte_set_periodic() local 49 cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); in sibyte_set_periodic() 50 init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT)); in sibyte_set_periodic() 60 unsigned int cpu = smp_processor_id(); in sibyte_shutdown() local 63 cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); in sibyte_shutdown() 72 unsigned int cpu = smp_processor_id(); in sibyte_next_event() local 75 cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); in sibyte_next_event() 76 init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT)); in sibyte_next_event() 87 unsigned int cpu = smp_processor_id(); in sibyte_counter_handler() local 98 cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); in sibyte_counter_handler() [all …]
|
D | cevt-sb1250.c | 56 unsigned int cpu = smp_processor_id(); in sibyte_set_periodic() local 59 cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); in sibyte_set_periodic() 60 init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT)); in sibyte_set_periodic() 71 unsigned int cpu = smp_processor_id(); in sibyte_next_event() local 74 cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); in sibyte_next_event() 75 init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT)); in sibyte_next_event() 86 unsigned int cpu = smp_processor_id(); in sibyte_counter_handler() local 97 cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); in sibyte_counter_handler() 111 unsigned int cpu = smp_processor_id(); in sb1250_clockevent_init() local 112 unsigned int irq = K_INT_TIMER_0 + cpu; in sb1250_clockevent_init() [all …]
|
/linux-4.4.14/Documentation/devicetree/bindings/arm/ |
D | topology.txt | 16 The cpu nodes (bindings defined in [1]) represent the devices that 22 For instance in a system where CPUs support SMT, "cpu" nodes represent all 24 In systems where SMT is not supported "cpu" nodes represent all cores present 27 ARM topology bindings allow one to associate cpu nodes with hierarchical groups 36 If not stated otherwise, whenever a reference to a cpu node phandle is made its 37 value must point to a cpu node compliant with the cpu node bindings as 39 A topology description containing phandles to cpu nodes that are not compliant 43 2 - cpu-map node 46 The ARM CPU topology is defined within the cpu-map node, which is a direct 50 - cpu-map node [all …]
|
D | cpus.txt | 6 the "cpus" node, which in turn contains a number of subnodes (ie "cpu") 7 defining properties for every cpu. 26 cpus and cpu node bindings definition 29 The ARM architecture, in accordance with the ePAPR, requires the cpus and cpu 34 Description: Container of cpu nodes 65 - cpu node 74 Definition: must be "cpu" 206 - cpu-release-addr 229 - cpu-idle-states 234 by this cpu [3]. [all …]
|
/linux-4.4.14/tools/power/cpupower/utils/helpers/ |
D | topology.c | 23 static int sysfs_topology_read_file(unsigned int cpu, const char *fname, int *result) in sysfs_topology_read_file() argument 30 cpu, fname); in sysfs_topology_read_file() 51 else if (top1->cpu < top2->cpu) in __compare() 53 else if (top1->cpu > top2->cpu) in __compare() 67 int cpu, last_pkg, cpus = sysconf(_SC_NPROCESSORS_CONF); in get_cpu_topology() local 73 for (cpu = 0; cpu < cpus; cpu++) { in get_cpu_topology() 74 cpu_top->core_info[cpu].cpu = cpu; in get_cpu_topology() 75 cpu_top->core_info[cpu].is_online = sysfs_is_cpu_online(cpu); in get_cpu_topology() 77 cpu, in get_cpu_topology() 79 &(cpu_top->core_info[cpu].pkg)) < 0) { in get_cpu_topology() [all …]
|
D | sysfs.c | 48 int sysfs_is_cpu_online(unsigned int cpu) in sysfs_is_cpu_online() argument 58 snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u", cpu); in sysfs_is_cpu_online() 67 snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/online", cpu); in sysfs_is_cpu_online() 104 unsigned int sysfs_idlestate_file_exists(unsigned int cpu, in sysfs_idlestate_file_exists() argument 113 cpu, idlestate, fname); in sysfs_idlestate_file_exists() 125 unsigned int sysfs_idlestate_read_file(unsigned int cpu, unsigned int idlestate, in sysfs_idlestate_read_file() argument 133 cpu, idlestate, fname); in sysfs_idlestate_read_file() 158 unsigned int sysfs_idlestate_write_file(unsigned int cpu, in sysfs_idlestate_write_file() argument 168 cpu, idlestate, fname); in sysfs_idlestate_write_file() 204 static unsigned long long sysfs_idlestate_get_one_value(unsigned int cpu, in sysfs_idlestate_get_one_value() argument [all …]
|
D | helpers.h | 91 extern int get_cpu_info(unsigned int cpu, struct cpupower_cpu_info *cpu_info); 98 int cpu; member 126 extern int read_msr(int cpu, unsigned int idx, unsigned long long *val); 127 extern int write_msr(int cpu, unsigned int idx, unsigned long long val); 129 extern int msr_intel_set_perf_bias(unsigned int cpu, unsigned int val); 130 extern int msr_intel_get_perf_bias(unsigned int cpu); 131 extern unsigned long long msr_intel_get_turbo_ratio(unsigned int cpu); 147 extern int decode_pstates(unsigned int cpu, unsigned int cpu_family, 152 extern int cpufreq_has_boost_support(unsigned int cpu, int *support, 165 static inline int decode_pstates(unsigned int cpu, unsigned int cpu_family, in decode_pstates() argument [all …]
|
D | sysfs.h | 10 extern unsigned int sysfs_idlestate_file_exists(unsigned int cpu, 14 extern int sysfs_is_cpu_online(unsigned int cpu); 16 extern int sysfs_is_idlestate_disabled(unsigned int cpu, 18 extern int sysfs_idlestate_disable(unsigned int cpu, unsigned int idlestate, 20 extern unsigned long sysfs_get_idlestate_latency(unsigned int cpu, 22 extern unsigned long sysfs_get_idlestate_usage(unsigned int cpu, 24 extern unsigned long long sysfs_get_idlestate_time(unsigned int cpu, 26 extern char *sysfs_get_idlestate_name(unsigned int cpu, 28 extern char *sysfs_get_idlestate_desc(unsigned int cpu, 30 extern unsigned int sysfs_get_idlestate_count(unsigned int cpu);
|
/linux-4.4.14/arch/x86/kernel/ |
D | setup_percpu.c | 69 unsigned int cpu; in pcpu_need_numa() local 71 for_each_possible_cpu(cpu) { in pcpu_need_numa() 72 int node = early_cpu_to_node(cpu); in pcpu_need_numa() 98 static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, in pcpu_alloc_bootmem() argument 103 int node = early_cpu_to_node(cpu); in pcpu_alloc_bootmem() 109 cpu, node); in pcpu_alloc_bootmem() 111 cpu, size, __pa(ptr)); in pcpu_alloc_bootmem() 116 cpu, size, node, __pa(ptr)); in pcpu_alloc_bootmem() 127 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) in pcpu_fc_alloc() argument 129 return pcpu_alloc_bootmem(cpu, size, align); in pcpu_fc_alloc() [all …]
|
D | smpboot.c | 372 void set_cpu_sibling_map(int cpu) in set_cpu_sibling_map() argument 376 struct cpuinfo_x86 *c = &cpu_data(cpu); in set_cpu_sibling_map() 380 cpumask_set_cpu(cpu, cpu_sibling_setup_mask); in set_cpu_sibling_map() 383 cpumask_set_cpu(cpu, topology_sibling_cpumask(cpu)); in set_cpu_sibling_map() 384 cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu)); in set_cpu_sibling_map() 385 cpumask_set_cpu(cpu, topology_core_cpumask(cpu)); in set_cpu_sibling_map() 393 if ((i == cpu) || (has_smt && match_smt(c, o))) in set_cpu_sibling_map() 394 link_mask(topology_sibling_cpumask, cpu, i); in set_cpu_sibling_map() 396 if ((i == cpu) || (has_mp && match_llc(c, o))) in set_cpu_sibling_map() 397 link_mask(cpu_llc_shared_mask, cpu, i); in set_cpu_sibling_map() [all …]
|
D | msr.c | 76 int cpu = iminor(file_inode(file)); in msr_read() local 84 err = rdmsr_safe_on_cpu(cpu, reg, &data[0], &data[1]); in msr_read() 104 int cpu = iminor(file_inode(file)); in msr_write() local 116 err = wrmsr_safe_on_cpu(cpu, reg, data[0], data[1]); in msr_write() 130 int cpu = iminor(file_inode(file)); in msr_ioctl() local 143 err = rdmsr_safe_regs_on_cpu(cpu, regs); in msr_ioctl() 159 err = wrmsr_safe_regs_on_cpu(cpu, regs); in msr_ioctl() 176 unsigned int cpu = iminor(file_inode(file)); in msr_open() local 182 if (cpu >= nr_cpu_ids || !cpu_online(cpu)) in msr_open() 185 c = &cpu_data(cpu); in msr_open() [all …]
|
D | cpuid.c | 88 int cpu = iminor(file_inode(file)); in cpuid_read() local 99 err = smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1); in cpuid_read() 116 unsigned int cpu; in cpuid_open() local 119 cpu = iminor(file_inode(file)); in cpuid_open() 120 if (cpu >= nr_cpu_ids || !cpu_online(cpu)) in cpuid_open() 123 c = &cpu_data(cpu); in cpuid_open() 140 static int cpuid_device_create(int cpu) in cpuid_device_create() argument 144 dev = device_create(cpuid_class, NULL, MKDEV(CPUID_MAJOR, cpu), NULL, in cpuid_device_create() 145 "cpu%d", cpu); in cpuid_device_create() 149 static void cpuid_device_destroy(int cpu) in cpuid_device_destroy() argument [all …]
|
D | kvmclock.c | 57 int cpu; in kvm_get_wallclock() local 64 cpu = get_cpu(); in kvm_get_wallclock() 66 vcpu_time = &hv_clock[cpu].pvti; in kvm_get_wallclock() 81 int cpu; in kvm_clock_read() local 84 cpu = smp_processor_id(); in kvm_clock_read() 85 src = &hv_clock[cpu].pvti; in kvm_clock_read() 131 int cpu; in kvm_get_tsc_khz() local 134 cpu = get_cpu(); in kvm_get_tsc_khz() 135 src = &hv_clock[cpu].pvti; in kvm_get_tsc_khz() 157 int cpu = smp_processor_id(); in kvm_check_and_clear_guest_paused() local [all …]
|
D | topology.c | 60 int _debug_hotplug_cpu(int cpu, int action) in _debug_hotplug_cpu() argument 62 struct device *dev = get_cpu_device(cpu); in _debug_hotplug_cpu() 65 if (!cpu_is_hotpluggable(cpu)) in _debug_hotplug_cpu() 72 ret = cpu_down(cpu); in _debug_hotplug_cpu() 74 pr_info("CPU %u is now offline\n", cpu); in _debug_hotplug_cpu() 78 pr_debug("Can't offline CPU%d.\n", cpu); in _debug_hotplug_cpu() 81 ret = cpu_up(cpu); in _debug_hotplug_cpu() 86 pr_debug("Can't online CPU%d.\n", cpu); in _debug_hotplug_cpu() 140 per_cpu(cpu_devices, num).cpu.hotpluggable = 1; in arch_register_cpu() 142 return register_cpu(&per_cpu(cpu_devices, num).cpu, num); in arch_register_cpu() [all …]
|
/linux-4.4.14/arch/arm64/boot/dts/cavium/ |
D | thunder-88xx.dtsi | 65 cpu@000 { 66 device_type = "cpu"; 71 cpu@001 { 72 device_type = "cpu"; 77 cpu@002 { 78 device_type = "cpu"; 83 cpu@003 { 84 device_type = "cpu"; 89 cpu@004 { 90 device_type = "cpu"; [all …]
|
/linux-4.4.14/tools/power/cpupower/utils/ |
D | cpufreq-info.c | 57 unsigned int cpu, nr_cpus; in proc_cpufreq_output() local 66 for (cpu = 0; cpu < nr_cpus; cpu++) { in proc_cpufreq_output() 67 policy = cpufreq_get_policy(cpu); in proc_cpufreq_output() 71 if (cpufreq_get_hardware_limits(cpu, &min, &max)) { in proc_cpufreq_output() 78 cpu , policy->min, max ? min_pctg : 0, policy->max, in proc_cpufreq_output() 166 static int get_boost_mode(unsigned int cpu) in get_boost_mode() argument 176 ret = cpufreq_has_boost_support(cpu, &support, &active, &b_states); in get_boost_mode() 179 " on CPU %d -- are you root?\n"), cpu); in get_boost_mode() 195 ret = decode_pstates(cpu, cpupower_cpu_info.family, b_states, in get_boost_mode() 220 intel_turbo_ratio = msr_intel_get_turbo_ratio(cpu); in get_boost_mode() [all …]
|
D | cpuidle-info.c | 23 static void cpuidle_cpu_output(unsigned int cpu, int verbose) in cpuidle_cpu_output() argument 28 printf(_ ("Analyzing CPU %d:\n"), cpu); in cpuidle_cpu_output() 30 idlestates = sysfs_get_idlestate_count(cpu); in cpuidle_cpu_output() 32 printf(_("CPU %u: No idle states\n"), cpu); in cpuidle_cpu_output() 39 tmp = sysfs_get_idlestate_name(cpu, idlestate); in cpuidle_cpu_output() 51 int disabled = sysfs_is_idlestate_disabled(cpu, idlestate); in cpuidle_cpu_output() 55 tmp = sysfs_get_idlestate_name(cpu, idlestate); in cpuidle_cpu_output() 61 tmp = sysfs_get_idlestate_desc(cpu, idlestate); in cpuidle_cpu_output() 68 sysfs_get_idlestate_latency(cpu, idlestate)); in cpuidle_cpu_output() 70 sysfs_get_idlestate_usage(cpu, idlestate)); in cpuidle_cpu_output() [all …]
|
D | cpuidle-set.c | 30 unsigned int cpu = 0, idlestate = 0, idlestates = 0; in cmd_idle_set() local 101 for (cpu = bitmask_first(cpus_chosen); in cmd_idle_set() 102 cpu <= bitmask_last(cpus_chosen); cpu++) { in cmd_idle_set() 104 if (!bitmask_isbitset(cpus_chosen, cpu)) in cmd_idle_set() 107 if (sysfs_is_cpu_online(cpu) != 1) in cmd_idle_set() 110 idlestates = sysfs_get_idlestate_count(cpu); in cmd_idle_set() 116 ret = sysfs_idlestate_disable(cpu, idlestate, 1); in cmd_idle_set() 118 printf(_("Idlestate %u disabled on CPU %u\n"), idlestate, cpu); in cmd_idle_set() 121 idlestate, cpu); in cmd_idle_set() 126 idlestate, cpu); in cmd_idle_set() [all …]
|
D | cpufreq-set.c | 141 static int do_new_policy(unsigned int cpu, struct cpufreq_policy *new_pol) in do_new_policy() argument 143 struct cpufreq_policy *cur_pol = cpufreq_get_policy(cpu); in do_new_policy() 160 ret = cpufreq_set_policy(cpu, new_pol); in do_new_policy() 168 static int do_one_cpu(unsigned int cpu, struct cpufreq_policy *new_pol, in do_one_cpu() argument 173 return cpufreq_set_frequency(cpu, freq); in do_one_cpu() 180 return cpufreq_modify_policy_min(cpu, new_pol->min); in do_one_cpu() 182 return cpufreq_modify_policy_max(cpu, new_pol->max); in do_one_cpu() 184 return cpufreq_modify_policy_governor(cpu, in do_one_cpu() 189 return do_new_policy(cpu, new_pol); in do_one_cpu() 201 unsigned int cpu; in cmd_freq_set() local [all …]
|
/linux-4.4.14/arch/s390/oprofile/ |
D | hwsampler.c | 80 static int smp_ctl_ssctl_stop(int cpu) in smp_ctl_ssctl_stop() argument 86 cb = &per_cpu(sampler_cpu_buffer, cpu); in smp_ctl_ssctl_stop() 92 smp_call_function_single(cpu, execute_ssctl, &ep, 1); in smp_ctl_ssctl_stop() 95 printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu); in smp_ctl_ssctl_stop() 100 smp_call_function_single(cpu, execute_qsi, &ep, 1); in smp_ctl_ssctl_stop() 110 static int smp_ctl_ssctl_deactivate(int cpu) in smp_ctl_ssctl_deactivate() argument 116 cb = &per_cpu(sampler_cpu_buffer, cpu); in smp_ctl_ssctl_deactivate() 122 smp_call_function_single(cpu, execute_ssctl, &ep, 1); in smp_ctl_ssctl_deactivate() 125 printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu); in smp_ctl_ssctl_deactivate() 128 smp_call_function_single(cpu, execute_qsi, &ep, 1); in smp_ctl_ssctl_deactivate() [all …]
|
/linux-4.4.14/arch/arm/common/ |
D | mcpm_entry.c | 36 static void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster) in __mcpm_cpu_going_down() argument 38 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN; in __mcpm_cpu_going_down() 39 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); in __mcpm_cpu_going_down() 49 static void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster) in __mcpm_cpu_down() argument 52 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN; in __mcpm_cpu_down() 53 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); in __mcpm_cpu_down() 84 static bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster) in __mcpm_outbound_enter_critical() argument 109 if (i == cpu) in __mcpm_outbound_enter_critical() 113 cpustate = c->cpus[i].cpu; in __mcpm_outbound_enter_critical() 118 sync_cache_r(&c->cpus[i].cpu); in __mcpm_outbound_enter_critical() [all …]
|
D | mcpm_platsmp.c | 22 static void cpu_to_pcpu(unsigned int cpu, in cpu_to_pcpu() argument 27 mpidr = cpu_logical_map(cpu); in cpu_to_pcpu() 32 static int mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle) in mcpm_boot_secondary() argument 37 cpu_to_pcpu(cpu, &pcpu, &pcluster); in mcpm_boot_secondary() 40 __func__, cpu, pcpu, pcluster); in mcpm_boot_secondary() 47 arch_send_wakeup_ipi_mask(cpumask_of(cpu)); in mcpm_boot_secondary() 52 static void mcpm_secondary_init(unsigned int cpu) in mcpm_secondary_init() argument 59 static int mcpm_cpu_kill(unsigned int cpu) in mcpm_cpu_kill() argument 63 cpu_to_pcpu(cpu, &pcpu, &pcluster); in mcpm_cpu_kill() 68 static bool mcpm_cpu_can_disable(unsigned int cpu) in mcpm_cpu_can_disable() argument [all …]
|
/linux-4.4.14/arch/ia64/kernel/ |
D | err_inject.c | 61 u32 cpu=dev->id; \ 62 return sprintf(buf, "%lx\n", name[cpu]); \ 70 unsigned int cpu=dev->id; \ 71 name[cpu] = simple_strtoull(buf, NULL, 16); \ 84 unsigned int cpu=dev->id; in show() local 88 printk(KERN_DEBUG "pal_mc_err_inject for cpu%d:\n", cpu); in show() 89 printk(KERN_DEBUG "err_type_info=%lx,\n", err_type_info[cpu]); in show() 90 printk(KERN_DEBUG "err_struct_info=%lx,\n", err_struct_info[cpu]); in show() 92 err_data_buffer[cpu].data1, in show() 93 err_data_buffer[cpu].data2, in show() [all …]
|
D | smpboot.c | 462 do_boot_cpu (int sapicid, int cpu, struct task_struct *idle) in do_boot_cpu() argument 467 Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid); in do_boot_cpu() 469 set_brendez_area(cpu); in do_boot_cpu() 470 platform_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0); in do_boot_cpu() 477 if (cpumask_test_cpu(cpu, &cpu_callin_map)) in do_boot_cpu() 484 if (!cpumask_test_cpu(cpu, &cpu_callin_map)) { in do_boot_cpu() 485 printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid); in do_boot_cpu() 486 ia64_cpu_to_sapicid[cpu] = -1; in do_boot_cpu() 487 set_cpu_online(cpu, false); /* was set in smp_callin() */ in do_boot_cpu() 509 int sapicid, cpu, i; in smp_build_cpu_map() local [all …]
|
D | topology.c | 51 sysfs_cpus[num].cpu.hotpluggable = 1; in arch_register_cpu() 54 return register_cpu(&sysfs_cpus[num].cpu, num); in arch_register_cpu() 60 unregister_cpu(&sysfs_cpus[num].cpu); in arch_unregister_cpu() 69 return register_cpu(&sysfs_cpus[num].cpu, num); in arch_register_cpu() 142 static void cache_shared_cpu_map_setup(unsigned int cpu, in cache_shared_cpu_map_setup() argument 149 if (cpu_data(cpu)->threads_per_core <= 1 && in cache_shared_cpu_map_setup() 150 cpu_data(cpu)->cores_per_socket <= 1) { in cache_shared_cpu_map_setup() 151 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); in cache_shared_cpu_map_setup() 164 if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id in cache_shared_cpu_map_setup() 177 static void cache_shared_cpu_map_setup(unsigned int cpu, in cache_shared_cpu_map_setup() argument [all …]
|
D | smp.c | 171 unsigned int cpu; in send_IPI_mask() local 173 for_each_cpu(cpu, mask) { in send_IPI_mask() 174 send_IPI_single(cpu, op); in send_IPI_mask() 210 unsigned int cpu, self_cpu; in kdump_smp_send_init() local 212 for_each_online_cpu(cpu) { in kdump_smp_send_init() 213 if (cpu != self_cpu) { in kdump_smp_send_init() 214 if(kdump_status[cpu] == 0) in kdump_smp_send_init() 215 platform_send_ipi(cpu, 0, IA64_IPI_DM_INIT, 0); in kdump_smp_send_init() 224 smp_send_reschedule (int cpu) in smp_send_reschedule() argument 226 platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0); in smp_send_reschedule() [all …]
|
D | numa.c | 33 void map_cpu_to_node(int cpu, int nid) in map_cpu_to_node() argument 37 cpu_to_node_map[cpu] = 0; in map_cpu_to_node() 41 oldnid = cpu_to_node_map[cpu]; in map_cpu_to_node() 42 if (cpumask_test_cpu(cpu, &node_to_cpu_mask[oldnid])) { in map_cpu_to_node() 49 cpu_to_node_map[cpu] = nid; in map_cpu_to_node() 50 cpumask_set_cpu(cpu, &node_to_cpu_mask[nid]); in map_cpu_to_node() 54 void unmap_cpu_from_node(int cpu, int nid) in unmap_cpu_from_node() argument 56 WARN_ON(!cpumask_test_cpu(cpu, &node_to_cpu_mask[nid])); in unmap_cpu_from_node() 57 WARN_ON(cpu_to_node_map[cpu] != nid); in unmap_cpu_from_node() 58 cpu_to_node_map[cpu] = 0; in unmap_cpu_from_node() [all …]
|
/linux-4.4.14/arch/xtensa/include/asm/ |
D | mmu_context.h | 34 #define cpu_asid_cache(cpu) per_cpu(asid_cache, cpu) argument 68 static inline void get_new_mmu_context(struct mm_struct *mm, unsigned int cpu) in get_new_mmu_context() argument 70 unsigned long asid = cpu_asid_cache(cpu); in get_new_mmu_context() 79 cpu_asid_cache(cpu) = asid; in get_new_mmu_context() 80 mm->context.asid[cpu] = asid; in get_new_mmu_context() 81 mm->context.cpu = cpu; in get_new_mmu_context() 84 static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu) in get_mmu_context() argument 91 unsigned long asid = mm->context.asid[cpu]; in get_mmu_context() 94 ((asid ^ cpu_asid_cache(cpu)) & ~ASID_MASK)) in get_mmu_context() 95 get_new_mmu_context(mm, cpu); in get_mmu_context() [all …]
|
/linux-4.4.14/arch/x86/kernel/cpu/ |
D | perf_event_amd_uncore.c | 35 int cpu; member 67 return *per_cpu_ptr(amd_uncore_nb, event->cpu); in event_to_amd_uncore() 69 return *per_cpu_ptr(amd_uncore_l2, event->cpu); in event_to_amd_uncore() 202 if (event->cpu < 0) in amd_uncore_event_init() 213 event->cpu = uncore->cpu; in amd_uncore_event_init() 287 static struct amd_uncore *amd_uncore_alloc(unsigned int cpu) in amd_uncore_alloc() argument 290 cpu_to_node(cpu)); in amd_uncore_alloc() 293 static int amd_uncore_cpu_up_prepare(unsigned int cpu) in amd_uncore_cpu_up_prepare() argument 298 uncore_nb = amd_uncore_alloc(cpu); in amd_uncore_cpu_up_prepare() 301 uncore_nb->cpu = cpu; in amd_uncore_cpu_up_prepare() [all …]
|
/linux-4.4.14/arch/sh/kernel/ |
D | smp.c | 48 static inline void smp_store_cpu_info(unsigned int cpu) in smp_store_cpu_info() argument 50 struct sh_cpuinfo *c = cpu_data + cpu; in smp_store_cpu_info() 59 unsigned int cpu = smp_processor_id(); in smp_prepare_cpus() local 62 current_thread_info()->cpu = cpu; in smp_prepare_cpus() 72 unsigned int cpu = smp_processor_id(); in smp_prepare_boot_cpu() local 74 __cpu_number_map[0] = cpu; in smp_prepare_boot_cpu() 75 __cpu_logical_map[0] = cpu; in smp_prepare_boot_cpu() 77 set_cpu_online(cpu, true); in smp_prepare_boot_cpu() 78 set_cpu_possible(cpu, true); in smp_prepare_boot_cpu() 80 per_cpu(cpu_state, cpu) = CPU_ONLINE; in smp_prepare_boot_cpu() [all …]
|
D | irq.c | 118 void irq_ctx_init(int cpu) in irq_ctx_init() argument 122 if (hardirq_ctx[cpu]) in irq_ctx_init() 125 irqctx = (union irq_ctx *)&hardirq_stack[cpu * THREAD_SIZE]; in irq_ctx_init() 127 irqctx->tinfo.cpu = cpu; in irq_ctx_init() 131 hardirq_ctx[cpu] = irqctx; in irq_ctx_init() 133 irqctx = (union irq_ctx *)&softirq_stack[cpu * THREAD_SIZE]; in irq_ctx_init() 135 irqctx->tinfo.cpu = cpu; in irq_ctx_init() 139 softirq_ctx[cpu] = irqctx; in irq_ctx_init() 142 cpu, hardirq_ctx[cpu], softirq_ctx[cpu]); in irq_ctx_init() 145 void irq_ctx_exit(int cpu) in irq_ctx_exit() argument [all …]
|
/linux-4.4.14/arch/tile/kernel/ |
D | smpboot.c | 41 int cpu = smp_processor_id(); in smp_prepare_boot_cpu() local 42 set_cpu_online(cpu, 1); in smp_prepare_boot_cpu() 43 set_cpu_present(cpu, 1); in smp_prepare_boot_cpu() 59 int cpu, cpu_count; in smp_prepare_cpus() local 62 current_thread_info()->cpu = boot_cpu; in smp_prepare_cpus() 85 for (cpu = 0; cpu < NR_CPUS; ++cpu) { in smp_prepare_cpus() 88 if (cpu == boot_cpu) in smp_prepare_cpus() 91 if (!cpu_possible(cpu)) { in smp_prepare_cpus() 97 per_cpu(boot_sp, cpu) = 0; in smp_prepare_cpus() 98 per_cpu(boot_pc, cpu) = (unsigned long) smp_nap; in smp_prepare_cpus() [all …]
|
D | smp.c | 63 void send_IPI_single(int cpu, int tag) in send_IPI_single() argument 66 .y = cpu / smp_width, in send_IPI_single() 67 .x = cpu % smp_width, in send_IPI_single() 76 int cpu; in send_IPI_many() local 79 for_each_cpu(cpu, mask) { in send_IPI_many() 81 BUG_ON(cpu == my_cpu); in send_IPI_many() 83 r->y = cpu / smp_width; in send_IPI_many() 84 r->x = cpu % smp_width; in send_IPI_many() 222 int cpu = smp_processor_id(); in ipi_init() local 223 HV_Recipient recip = { .y = cpu_y(cpu), .x = cpu_x(cpu), in ipi_init() [all …]
|
/linux-4.4.14/arch/powerpc/oprofile/ |
D | op_model_cell.c | 94 u16 cpu; /* Processor to modify */ member 219 pm_signal_local.cpu = node; in pm_rtas_reset_signals() 258 pm_signal_local[i].cpu = node; in pm_rtas_activate_signals() 373 static void write_pm_cntrl(int cpu) in write_pm_cntrl() argument 402 cbe_write_pm(cpu, pm_control, val); in write_pm_cntrl() 428 static inline void enable_ctr(u32 cpu, u32 ctr, u32 *pm07_cntrl) in enable_ctr() argument 432 cbe_write_pm07_control(cpu, ctr, pm07_cntrl[ctr]); in enable_ctr() 457 u32 cpu; in cell_virtual_cntr() local 492 for_each_online_cpu(cpu) { in cell_virtual_cntr() 493 if (cbe_get_hw_thread_id(cpu)) in cell_virtual_cntr() [all …]
|
/linux-4.4.14/arch/arm64/boot/dts/hisilicon/ |
D | hip05.dtsi | 29 cpu-map { 32 cpu = <&cpu0>; 35 cpu = <&cpu1>; 38 cpu = <&cpu2>; 41 cpu = <&cpu3>; 46 cpu = <&cpu4>; 49 cpu = <&cpu5>; 52 cpu = <&cpu6>; 55 cpu = <&cpu7>; 60 cpu = <&cpu8>; [all …]
|
D | hi6220.dtsi | 25 cpu-map { 28 cpu = <&cpu0>; 31 cpu = <&cpu1>; 34 cpu = <&cpu2>; 37 cpu = <&cpu3>; 42 cpu = <&cpu4>; 45 cpu = <&cpu5>; 48 cpu = <&cpu6>; 51 cpu = <&cpu7>; 56 cpu0: cpu@0 { [all …]
|
/linux-4.4.14/arch/powerpc/platforms/pseries/ |
D | hotplug-cpu.c | 64 enum cpu_state_vals get_cpu_current_state(int cpu) in get_cpu_current_state() argument 66 return per_cpu(current_state, cpu); in get_cpu_current_state() 69 void set_cpu_current_state(int cpu, enum cpu_state_vals state) in set_cpu_current_state() argument 71 per_cpu(current_state, cpu) = state; in set_cpu_current_state() 74 enum cpu_state_vals get_preferred_offline_state(int cpu) in get_preferred_offline_state() argument 76 return per_cpu(preferred_offline_state, cpu); in get_preferred_offline_state() 79 void set_preferred_offline_state(int cpu, enum cpu_state_vals state) in set_preferred_offline_state() argument 81 per_cpu(preferred_offline_state, cpu) = state; in set_preferred_offline_state() 84 void set_default_offline_state(int cpu) in set_default_offline_state() argument 86 per_cpu(preferred_offline_state, cpu) = default_offline_state; in set_default_offline_state() [all …]
|
D | offline_states.h | 13 extern enum cpu_state_vals get_cpu_current_state(int cpu); 14 extern void set_cpu_current_state(int cpu, enum cpu_state_vals state); 15 extern void set_preferred_offline_state(int cpu, enum cpu_state_vals state); 16 extern void set_default_offline_state(int cpu); 18 static inline enum cpu_state_vals get_cpu_current_state(int cpu) in get_cpu_current_state() argument 23 static inline void set_cpu_current_state(int cpu, enum cpu_state_vals state) in set_cpu_current_state() argument 27 static inline void set_preferred_offline_state(int cpu, enum cpu_state_vals state) in set_preferred_offline_state() argument 31 static inline void set_default_offline_state(int cpu) in set_default_offline_state() argument 36 extern enum cpu_state_vals get_preferred_offline_state(int cpu);
|
/linux-4.4.14/arch/blackfin/mach-common/ |
D | smp.c | 81 static void ipi_cpu_stop(unsigned int cpu) in ipi_cpu_stop() argument 84 printk(KERN_CRIT "CPU%u: stopping\n", cpu); in ipi_cpu_stop() 88 set_cpu_online(cpu, false); in ipi_cpu_stop() 125 unsigned int cpu = smp_processor_id(); in ipi_handler_int0() local 127 platform_clear_ipi(cpu, IRQ_SUPPLE_0); in ipi_handler_int0() 134 int cpu = smp_processor_id(); in ipi_timer() local 135 struct clock_event_device *evt = &per_cpu(coretmr_events, cpu); in ipi_timer() 142 unsigned int cpu = smp_processor_id(); in ipi_handler_int1() local 146 platform_clear_ipi(cpu, IRQ_SUPPLE_1); in ipi_handler_int1() 165 ipi_cpu_stop(cpu); in ipi_handler_int1() [all …]
|
/linux-4.4.14/arch/arm/mach-vexpress/ |
D | tc2_pm.c | 36 #define RESET_A15_NCORERESET(cpu) (1 << (2 + (cpu))) argument 37 #define RESET_A7_NCORERESET(cpu) (1 << (16 + (cpu))) argument 51 static int tc2_pm_cpu_powerup(unsigned int cpu, unsigned int cluster) in tc2_pm_cpu_powerup() argument 53 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); in tc2_pm_cpu_powerup() 54 if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) in tc2_pm_cpu_powerup() 56 ve_spc_set_resume_addr(cluster, cpu, in tc2_pm_cpu_powerup() 58 ve_spc_cpu_wakeup_irq(cluster, cpu, true); in tc2_pm_cpu_powerup() 71 static void tc2_pm_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster) in tc2_pm_cpu_powerdown_prepare() argument 73 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); in tc2_pm_cpu_powerdown_prepare() 74 BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER); in tc2_pm_cpu_powerdown_prepare() [all …]
|
/linux-4.4.14/arch/arm/mach-hisi/ |
D | hotplug.c | 79 static void set_cpu_hi3620(int cpu, bool enable) in set_cpu_hi3620() argument 85 if ((cpu == 2) || (cpu == 3)) in set_cpu_hi3620() 86 writel_relaxed(CPU2_ISO_CTRL << (cpu - 2), in set_cpu_hi3620() 91 writel_relaxed(0x01 << cpu, ctrl_base + SCCPUCOREEN); in set_cpu_hi3620() 96 writel_relaxed(val << cpu, ctrl_base + SCCPURSTDIS); in set_cpu_hi3620() 99 writel_relaxed(val << cpu, ctrl_base + SCCPURSTEN); in set_cpu_hi3620() 102 if ((cpu == 2) || (cpu == 3)) in set_cpu_hi3620() 103 writel_relaxed(CPU2_ISO_CTRL << (cpu - 2), in set_cpu_hi3620() 109 val &= ~(CPU0_WFI_MASK_CFG << cpu); in set_cpu_hi3620() 115 writel_relaxed(val << cpu, ctrl_base + SCCPURSTDIS); in set_cpu_hi3620() [all …]
|
D | platsmp.c | 26 void hi3xxx_set_cpu_jump(int cpu, void *jump_addr) in hi3xxx_set_cpu_jump() argument 28 cpu = cpu_logical_map(cpu); in hi3xxx_set_cpu_jump() 29 if (!cpu || !ctrl_base) in hi3xxx_set_cpu_jump() 31 writel_relaxed(virt_to_phys(jump_addr), ctrl_base + ((cpu - 1) << 2)); in hi3xxx_set_cpu_jump() 34 int hi3xxx_get_cpu_jump(int cpu) in hi3xxx_get_cpu_jump() argument 36 cpu = cpu_logical_map(cpu); in hi3xxx_get_cpu_jump() 37 if (!cpu || !ctrl_base) in hi3xxx_get_cpu_jump() 39 return readl_relaxed(ctrl_base + ((cpu - 1) << 2)); in hi3xxx_get_cpu_jump() 84 static int hi3xxx_boot_secondary(unsigned int cpu, struct task_struct *idle) in hi3xxx_boot_secondary() argument 86 hi3xxx_set_cpu(cpu, true); in hi3xxx_boot_secondary() [all …]
|
D | platmcpm.c | 103 unsigned int mpidr, cpu, cluster; in hip04_boot_secondary() local 108 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); in hip04_boot_secondary() 113 if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER) in hip04_boot_secondary() 118 if (hip04_cpu_table[cluster][cpu]) in hip04_boot_secondary() 133 data = CORE_RESET_BIT(cpu) | NEON_RESET_BIT(cpu) | \ in hip04_boot_secondary() 134 CORE_DEBUG_RESET_BIT(cpu); in hip04_boot_secondary() 149 hip04_cpu_table[cluster][cpu]++; in hip04_boot_secondary() 158 unsigned int mpidr, cpu, cluster; in hip04_cpu_die() local 162 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); in hip04_cpu_die() 166 hip04_cpu_table[cluster][cpu]--; in hip04_cpu_die() [all …]
|
D | core.h | 6 extern void hi3xxx_set_cpu_jump(int cpu, void *jump_addr); 7 extern int hi3xxx_get_cpu_jump(int cpu); 11 extern void hi3xxx_cpu_die(unsigned int cpu); 12 extern int hi3xxx_cpu_kill(unsigned int cpu); 13 extern void hi3xxx_set_cpu(int cpu, bool enable); 16 extern void hix5hd2_set_cpu(int cpu, bool enable); 17 extern void hix5hd2_cpu_die(unsigned int cpu); 20 extern void hip01_set_cpu(int cpu, bool enable); 21 extern void hip01_cpu_die(unsigned int cpu);
|
/linux-4.4.14/drivers/clk/imx/ |
D | clk-cpu.c | 33 struct clk_cpu *cpu = to_clk_cpu(hw); in clk_cpu_recalc_rate() local 35 return clk_get_rate(cpu->div); in clk_cpu_recalc_rate() 41 struct clk_cpu *cpu = to_clk_cpu(hw); in clk_cpu_round_rate() local 43 return clk_round_rate(cpu->pll, rate); in clk_cpu_round_rate() 49 struct clk_cpu *cpu = to_clk_cpu(hw); in clk_cpu_set_rate() local 53 ret = clk_set_parent(cpu->mux, cpu->step); in clk_cpu_set_rate() 58 ret = clk_set_rate(cpu->pll, rate); in clk_cpu_set_rate() 60 clk_set_parent(cpu->mux, cpu->pll); in clk_cpu_set_rate() 64 clk_set_parent(cpu->mux, cpu->pll); in clk_cpu_set_rate() 67 clk_set_rate(cpu->div, rate); in clk_cpu_set_rate() [all …]
|
/linux-4.4.14/arch/s390/kernel/ |
D | smp.c | 64 static DEFINE_PER_CPU(struct cpu *, cpu_device); 159 int cpu; in pcpu_find_address() local 161 for_each_cpu(cpu, mask) in pcpu_find_address() 162 if (pcpu_devices[cpu].address == address) in pcpu_find_address() 163 return pcpu_devices + cpu; in pcpu_find_address() 180 static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) in pcpu_alloc_lowcore() argument 201 lc->cpu_nr = cpu; in pcpu_alloc_lowcore() 202 lc->spinlock_lockval = arch_spin_lockval(cpu); in pcpu_alloc_lowcore() 208 lowcore_ptr[cpu] = lc; in pcpu_alloc_lowcore() 236 static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) in pcpu_prepare_secondary() argument [all …]
|
D | topology.c | 53 static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) in cpu_group_map() argument 57 cpumask_copy(&mask, cpumask_of(cpu)); in cpu_group_map() 61 if (cpumask_test_cpu(cpu, &info->mask)) in cpu_group_map() 67 static cpumask_t cpu_thread_map(unsigned int cpu) in cpu_thread_map() argument 72 cpumask_copy(&mask, cpumask_of(cpu)); in cpu_thread_map() 75 cpu -= cpu % (smp_cpu_mtid + 1); in cpu_thread_map() 77 if (cpu_present(cpu + i)) in cpu_thread_map() 78 cpumask_set_cpu(cpu + i, &mask); in cpu_thread_map() 159 add_cpus_to_mask(&tle->cpu, book, socket, 0); in __tl_to_masks_generic() 184 socket = add_cpus_to_mask(&tle->cpu, book, socket, 1); in __tl_to_masks_z10() [all …]
|
/linux-4.4.14/arch/sh/include/asm/ |
D | mmu_context.h | 35 #define asid_cache(cpu) (cpu_data[cpu].asid_cache) argument 38 #define cpu_context(cpu, mm) ((mm)->context.id[cpu]) argument 40 #define cpu_asid(cpu, mm) \ argument 41 (cpu_context((cpu), (mm)) & MMU_CONTEXT_ASID_MASK) 57 static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu) in get_mmu_context() argument 59 unsigned long asid = asid_cache(cpu); in get_mmu_context() 62 if (((cpu_context(cpu, mm) ^ asid) & MMU_CONTEXT_VERSION_MASK) == 0) in get_mmu_context() 90 cpu_context(cpu, mm) = asid_cache(cpu) = asid; in get_mmu_context() 112 static inline void activate_context(struct mm_struct *mm, unsigned int cpu) in activate_context() argument 114 get_mmu_context(mm, cpu); in activate_context() [all …]
|
D | smp.h | 15 #define raw_smp_processor_id() (current_thread_info()->cpu) 19 #define cpu_number_map(cpu) __cpu_number_map[cpu] argument 23 #define cpu_logical_map(cpu) __cpu_logical_map[cpu] argument 40 void local_timer_setup(unsigned int cpu); 41 void local_timer_stop(unsigned int cpu); 43 void arch_send_call_function_single_ipi(int cpu); 47 void native_cpu_die(unsigned int cpu); 48 int native_cpu_disable(unsigned int cpu); 54 static inline void __cpu_die(unsigned int cpu) in __cpu_die() argument 58 mp_ops->cpu_die(cpu); in __cpu_die()
|
/linux-4.4.14/tools/power/cpupower/utils/idle_monitor/ |
D | mperf_monitor.c | 30 unsigned int cpu); 32 unsigned int cpu); 89 static int mperf_init_stats(unsigned int cpu) in mperf_init_stats() argument 94 ret = read_msr(cpu, MSR_APERF, &val); in mperf_init_stats() 95 aperf_previous_count[cpu] = val; in mperf_init_stats() 96 ret |= read_msr(cpu, MSR_MPERF, &val); in mperf_init_stats() 97 mperf_previous_count[cpu] = val; in mperf_init_stats() 98 is_valid[cpu] = !ret; in mperf_init_stats() 103 static int mperf_measure_stats(unsigned int cpu) in mperf_measure_stats() argument 108 ret = read_msr(cpu, MSR_APERF, &val); in mperf_measure_stats() [all …]
|
D | snb_idle.c | 28 unsigned int cpu); 62 unsigned int cpu) in snb_get_count() argument 82 if (read_msr(cpu, msr, val)) in snb_get_count() 88 unsigned int cpu) in snb_get_count_percent() argument 92 if (!is_valid[cpu]) in snb_get_count_percent() 96 (current_count[id][cpu] - previous_count[id][cpu])) / in snb_get_count_percent() 100 snb_cstates[id].name, previous_count[id][cpu], in snb_get_count_percent() 101 current_count[id][cpu], cpu); in snb_get_count_percent() 106 current_count[id][cpu] - previous_count[id][cpu], in snb_get_count_percent() 107 *percent, cpu); in snb_get_count_percent() [all …]
|
D | hsw_ext_idle.c | 30 unsigned int cpu); 65 unsigned int cpu) in hsw_ext_get_count() argument 85 if (read_msr(cpu, msr, val)) in hsw_ext_get_count() 91 unsigned int cpu) in hsw_ext_get_count_percent() argument 95 if (!is_valid[cpu]) in hsw_ext_get_count_percent() 99 (current_count[id][cpu] - previous_count[id][cpu])) / in hsw_ext_get_count_percent() 103 hsw_ext_cstates[id].name, previous_count[id][cpu], in hsw_ext_get_count_percent() 104 current_count[id][cpu], cpu); in hsw_ext_get_count_percent() 109 current_count[id][cpu] - previous_count[id][cpu], in hsw_ext_get_count_percent() 110 *percent, cpu); in hsw_ext_get_count_percent() [all …]
|
D | nhm_idle.c | 31 unsigned int cpu); 73 unsigned int cpu) in nhm_get_count() argument 96 if (read_msr(cpu, msr, val)) in nhm_get_count() 103 unsigned int cpu) in nhm_get_count_percent() argument 107 if (!is_valid[cpu]) in nhm_get_count_percent() 111 (current_count[id][cpu] - previous_count[id][cpu])) / in nhm_get_count_percent() 115 nhm_cstates[id].name, previous_count[id][cpu], in nhm_get_count_percent() 116 current_count[id][cpu], cpu); in nhm_get_count_percent() 121 current_count[id][cpu] - previous_count[id][cpu], in nhm_get_count_percent() 122 *percent, cpu); in nhm_get_count_percent() [all …]
|
D | cpuidle_sysfs.c | 28 unsigned int cpu) in cpuidle_get_count_percent() argument 30 unsigned long long statediff = current_count[cpu][id] in cpuidle_get_count_percent() 31 - previous_count[cpu][id]; in cpuidle_get_count_percent() 33 cpuidle_cstates[id].name, timediff, *percent, cpu); in cpuidle_get_count_percent() 41 cpuidle_cstates[id].name, timediff, statediff, *percent, cpu); in cpuidle_get_count_percent() 48 int cpu, state; in cpuidle_start() local 50 for (cpu = 0; cpu < cpu_count; cpu++) { in cpuidle_start() 53 previous_count[cpu][state] = in cpuidle_start() 54 sysfs_get_idlestate_time(cpu, state); in cpuidle_start() 56 cpu, state, previous_count[cpu][state]); in cpuidle_start() [all …]
|
D | amd_fam14h_idle.c | 47 unsigned int cpu); 49 unsigned int cpu); 100 unsigned int cpu) in amd_fam14h_get_pci_info() argument 125 static int amd_fam14h_init(cstate_t *state, unsigned int cpu) in amd_fam14h_init() argument 130 ret = amd_fam14h_get_pci_info(state, &pci_offset, &enable_bit, cpu); in amd_fam14h_init() 151 (unsigned int) val, cpu); in amd_fam14h_init() 155 previous_count[state->id][cpu] = 0; in amd_fam14h_init() 160 static int amd_fam14h_disable(cstate_t *state, unsigned int cpu) in amd_fam14h_disable() argument 165 ret = amd_fam14h_get_pci_info(state, &pci_offset, &enable_bit, cpu); in amd_fam14h_disable() 182 current_count[state->id][cpu] = val; in amd_fam14h_disable() [all …]
|
D | cpupower-monitor.c | 135 void print_results(int topology_depth, int cpu) in print_results() argument 144 if (!bitmask_isbitset(cpus_chosen, cpu_top.core_info[cpu].cpu)) in print_results() 146 if (!cpu_top.core_info[cpu].is_online && in print_results() 147 cpu_top.core_info[cpu].pkg == -1) in print_results() 151 printf("%4d|", cpu_top.core_info[cpu].pkg); in print_results() 153 printf("%4d|", cpu_top.core_info[cpu].core); in print_results() 155 printf("%4d|", cpu_top.core_info[cpu].cpu); in print_results() 169 cpu_top.core_info[cpu].cpu); in print_results() 178 cpu_top.core_info[cpu].cpu); in print_results() 197 if (!cpu_top.core_info[cpu].is_online && in print_results() [all …]
|
/linux-4.4.14/arch/s390/include/asm/ |
D | topology.h | 8 struct cpu; 25 #define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id) argument 26 #define topology_thread_id(cpu) (per_cpu(cpu_topology, cpu).thread_id) argument 27 #define topology_sibling_cpumask(cpu) \ argument 28 (&per_cpu(cpu_topology, cpu).thread_mask) 29 #define topology_core_id(cpu) (per_cpu(cpu_topology, cpu).core_id) argument 30 #define topology_core_cpumask(cpu) (&per_cpu(cpu_topology, cpu).core_mask) argument 31 #define topology_book_id(cpu) (per_cpu(cpu_topology, cpu).book_id) argument 32 #define topology_book_cpumask(cpu) (&per_cpu(cpu_topology, cpu).book_mask) argument 36 int topology_cpu_init(struct cpu *); [all …]
|
D | smp.h | 22 extern int __cpu_up(unsigned int cpu, struct task_struct *tidle); 24 extern void arch_send_call_function_single_ipi(int cpu); 31 extern int smp_store_status(int cpu); 33 extern int smp_vcpu_scheduled(int cpu); 34 extern void smp_yield_cpu(int cpu); 35 extern void smp_cpu_set_polarization(int cpu, int val); 36 extern int smp_cpu_get_polarization(int cpu); 54 static inline int smp_store_status(int cpu) { return 0; } in smp_store_status() argument 55 static inline int smp_vcpu_scheduled(int cpu) { return 1; } in smp_vcpu_scheduled() argument 56 static inline void smp_yield_cpu(int cpu) { } in smp_yield_cpu() argument [all …]
|
/linux-4.4.14/arch/mips/sibyte/bcm1480/ |
D | irq.c | 55 void bcm1480_mask_irq(int cpu, int irq) in bcm1480_mask_irq() argument 66 …cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_sp… in bcm1480_mask_irq() 68 …____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_sp… in bcm1480_mask_irq() 72 void bcm1480_unmask_irq(int cpu, int irq) in bcm1480_unmask_irq() argument 83 …cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_sp… in bcm1480_unmask_irq() 85 …____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_sp… in bcm1480_unmask_irq() 94 int i = 0, old_cpu, cpu, int_on, k; in bcm1480_set_affinity() local 101 cpu = cpu_logical_map(i); in bcm1480_set_affinity() 121 bcm1480_irq_owner[irq] = cpu; in bcm1480_set_affinity() 124 …cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BC… in bcm1480_set_affinity() [all …]
|
/linux-4.4.14/arch/blackfin/kernel/cplb-mpu/ |
D | cplbinit.c | 22 void __init generate_cplb_tables_cpu(unsigned int cpu) in generate_cplb_tables_cpu() argument 45 dcplb_tbl[cpu][i_d].addr = 0; in generate_cplb_tables_cpu() 46 dcplb_tbl[cpu][i_d++].data = SDRAM_OOPS | PAGE_SIZE_1KB; in generate_cplb_tables_cpu() 48 icplb_tbl[cpu][i_i].addr = 0; in generate_cplb_tables_cpu() 49 icplb_tbl[cpu][i_i++].data = CPLB_VALID | i_cache | CPLB_USER_RD | PAGE_SIZE_1KB; in generate_cplb_tables_cpu() 57 dcplb_tbl[cpu][i_d].addr = addr; in generate_cplb_tables_cpu() 58 dcplb_tbl[cpu][i_d++].data = d_data; in generate_cplb_tables_cpu() 59 icplb_tbl[cpu][i_i].addr = addr; in generate_cplb_tables_cpu() 60 icplb_tbl[cpu][i_i++].data = i_data | (addr == 0 ? CPLB_USER_RD : 0); in generate_cplb_tables_cpu() 66 dcplb_tbl[cpu][i_d].addr = addr; in generate_cplb_tables_cpu() [all …]
|
D | cplbmgr.c | 68 MGR_ATTR static int evict_one_icplb(unsigned int cpu) in evict_one_icplb() argument 72 if ((icplb_tbl[cpu][i].data & CPLB_VALID) == 0) in evict_one_icplb() 74 i = first_switched_icplb + icplb_rr_index[cpu]; in evict_one_icplb() 77 icplb_rr_index[cpu] -= MAX_CPLBS - first_switched_icplb; in evict_one_icplb() 79 icplb_rr_index[cpu]++; in evict_one_icplb() 83 MGR_ATTR static int evict_one_dcplb(unsigned int cpu) in evict_one_dcplb() argument 87 if ((dcplb_tbl[cpu][i].data & CPLB_VALID) == 0) in evict_one_dcplb() 89 i = first_switched_dcplb + dcplb_rr_index[cpu]; in evict_one_dcplb() 92 dcplb_rr_index[cpu] -= MAX_CPLBS - first_switched_dcplb; in evict_one_dcplb() 94 dcplb_rr_index[cpu]++; in evict_one_dcplb() [all …]
|
/linux-4.4.14/arch/mips/include/asm/ |
D | mmu_context.h | 85 #define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) argument 86 #define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK) argument 87 #define asid_cache(cpu) (cpu_data[cpu].asid_cache) argument 102 get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) in get_new_mmu_context() argument 105 unsigned long asid = asid_cache(cpu); in get_new_mmu_context() 119 cpu_context(cpu, mm) = asid_cache(cpu) = asid; in get_new_mmu_context() 142 unsigned int cpu = smp_processor_id(); in switch_mm() local 148 if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) in switch_mm() 149 get_new_mmu_context(next, cpu); in switch_mm() 150 write_c0_entryhi(cpu_asid(cpu, next)); in switch_mm() [all …]
|
D | smp.h | 28 #define raw_smp_processor_id() (current_thread_info()->cpu) 33 #define cpu_number_map(cpu) __cpu_number_map[cpu] argument 37 #define cpu_logical_map(cpu) __cpu_logical_map[cpu] argument 61 static inline void smp_send_reschedule(int cpu) in smp_send_reschedule() argument 65 mp_ops->send_ipi_single(cpu, SMP_RESCHEDULE_YOURSELF); in smp_send_reschedule() 76 static inline void __cpu_die(unsigned int cpu) in __cpu_die() argument 80 mp_ops->cpu_die(cpu); in __cpu_die() 86 static inline void arch_send_call_function_single_ipi(int cpu) in arch_send_call_function_single_ipi() argument 90 mp_ops->send_ipi_mask(cpumask_of(cpu), SMP_CALL_FUNCTION); in arch_send_call_function_single_ipi()
|
/linux-4.4.14/arch/mips/sgi-ip27/ |
D | ip27-irq-pci.c | 64 static inline int alloc_level(int cpu, int irq) in alloc_level() argument 66 struct hub_data *hub = hub_data(cpu_to_node(cpu)); in alloc_level() 67 struct slice_data *si = cpu_data[cpu].data; in alloc_level() 72 panic("Cpu %d flooded with devices", cpu); in alloc_level() 82 int cpu, i; in find_level() local 84 for_each_online_cpu(cpu) { in find_level() 85 struct slice_data *si = cpu_data[cpu].data; in find_level() 89 *cpunum = cpu; in find_level() 98 static int intr_connect_level(int cpu, int bit) in intr_connect_level() argument 100 nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); in intr_connect_level() [all …]
|
/linux-4.4.14/arch/metag/kernel/ |
D | smp.c | 216 int __cpu_up(unsigned int cpu, struct task_struct *idle) in __cpu_up() argument 218 unsigned int thread = cpu_2_hwthread_id[cpu]; in __cpu_up() 246 if (!cpu_online(cpu)) in __cpu_up() 253 pr_crit("CPU%u: processor failed to boot\n", cpu); in __cpu_up() 270 unsigned int cpu = smp_processor_id(); in __cpu_disable() local 276 set_cpu_online(cpu, false); in __cpu_disable() 290 clear_tasks_mm_cpumask(cpu); in __cpu_disable() 299 void __cpu_die(unsigned int cpu) in __cpu_die() argument 301 if (!cpu_wait_death(cpu, 1)) in __cpu_die() 302 pr_err("CPU%u: unable to kill\n", cpu); in __cpu_die() [all …]
|
/linux-4.4.14/arch/powerpc/platforms/cell/ |
D | pmu.c | 49 pmd_regs = cbe_get_cpu_pmd_regs(cpu); \ 50 shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); \ 58 shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); \ 65 pmd_regs = cbe_get_cpu_pmd_regs(cpu); \ 74 u32 cbe_read_phys_ctr(u32 cpu, u32 phys_ctr) in cbe_read_phys_ctr() argument 93 void cbe_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val) in cbe_write_phys_ctr() argument 105 pm_ctrl = cbe_read_pm(cpu, pm_control); in cbe_write_phys_ctr() 111 cbe_write_pm(cpu, pm_control, pm_ctrl); in cbe_write_phys_ctr() 113 shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); in cbe_write_phys_ctr() 126 u32 cbe_read_ctr(u32 cpu, u32 ctr) in cbe_read_ctr() argument [all …]
|
/linux-4.4.14/arch/alpha/kernel/ |
D | smp.c | 201 struct percpu_struct *cpu; in send_secondary_console_msg() local 206 cpu = (struct percpu_struct *) in send_secondary_console_msg() 217 *(unsigned int *)&cpu->ipc_buffer[0] = len; in send_secondary_console_msg() 218 cp1 = (char *) &cpu->ipc_buffer[1]; in send_secondary_console_msg() 242 struct percpu_struct *cpu; in recv_secondary_console_msg() local 255 cpu = (struct percpu_struct *) in recv_secondary_console_msg() 262 mycpu, i, cpu->halt_reason, cpu->flags)); in recv_secondary_console_msg() 264 cnt = cpu->ipc_buffer[0] >> 32; in recv_secondary_console_msg() 268 cp1 = (char *) &cpu->ipc_buffer[1]; in recv_secondary_console_msg() 293 struct percpu_struct *cpu; in secondary_cpu_start() local [all …]
|
/linux-4.4.14/drivers/cpuidle/ |
D | coupled.c | 324 int cpu = (unsigned long)info; in cpuidle_coupled_handle_poke() local 325 cpumask_set_cpu(cpu, &cpuidle_coupled_poked); in cpuidle_coupled_handle_poke() 326 cpumask_clear_cpu(cpu, &cpuidle_coupled_poke_pending); in cpuidle_coupled_handle_poke() 341 static void cpuidle_coupled_poke(int cpu) in cpuidle_coupled_poke() argument 343 struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu); in cpuidle_coupled_poke() 345 if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poke_pending)) in cpuidle_coupled_poke() 346 smp_call_function_single_async(cpu, csd); in cpuidle_coupled_poke() 359 int cpu; in cpuidle_coupled_poke_others() local 361 for_each_cpu(cpu, &coupled->coupled_cpus) in cpuidle_coupled_poke_others() 362 if (cpu != this_cpu && cpu_online(cpu)) in cpuidle_coupled_poke_others() [all …]
|
/linux-4.4.14/arch/x86/include/asm/ |
D | smp.h | 40 static inline struct cpumask *cpu_llc_shared_mask(int cpu) in cpu_llc_shared_mask() argument 42 return per_cpu(cpu_llc_shared_map, cpu); in cpu_llc_shared_mask() 62 void (*smp_send_reschedule)(int cpu); 64 int (*cpu_up)(unsigned cpu, struct task_struct *tidle); 66 void (*cpu_die)(unsigned int cpu); 70 void (*send_call_func_single_ipi)(int cpu); 74 extern void set_cpu_sibling_map(int cpu); 107 static inline int __cpu_up(unsigned int cpu, struct task_struct *tidle) in __cpu_up() argument 109 return smp_ops.cpu_up(cpu, tidle); in __cpu_up() 117 static inline void __cpu_die(unsigned int cpu) in __cpu_die() argument [all …]
|
D | topology.h | 57 extern int __cpu_to_node(int cpu); 60 extern int early_cpu_to_node(int cpu); 65 static inline int early_cpu_to_node(int cpu) in early_cpu_to_node() argument 67 return early_per_cpu(x86_cpu_to_node_map, cpu); in early_cpu_to_node() 109 static inline int early_cpu_to_node(int cpu) in early_cpu_to_node() argument 120 extern const struct cpumask *cpu_coregroup_mask(int cpu); 122 #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) argument 123 #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) argument 126 #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) argument 127 #define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) argument
|
D | numa.h | 41 extern int numa_cpu_node(int cpu); 48 static inline int numa_cpu_node(int cpu) in numa_cpu_node() argument 59 extern void numa_set_node(int cpu, int node); 60 extern void numa_clear_node(int cpu); 62 extern void numa_add_cpu(int cpu); 63 extern void numa_remove_cpu(int cpu); 65 static inline void numa_set_node(int cpu, int node) { } in numa_set_node() argument 66 static inline void numa_clear_node(int cpu) { } in numa_clear_node() argument 68 static inline void numa_add_cpu(int cpu) { } in numa_add_cpu() argument 69 static inline void numa_remove_cpu(int cpu) { } in numa_remove_cpu() argument [all …]
|
/linux-4.4.14/arch/sh/kernel/cpu/sh4a/ |
D | smp-shx3.c | 33 unsigned int cpu = hard_smp_processor_id(); in ipi_interrupt_handler() local 34 unsigned int offs = 4 * cpu; in ipi_interrupt_handler() 48 unsigned int cpu = 0; in shx3_smp_setup() local 51 init_cpu_possible(cpumask_of(cpu)); in shx3_smp_setup() 54 __raw_writel(__raw_readl(STBCR_REG(cpu)) | STBCR_LTSLP, STBCR_REG(cpu)); in shx3_smp_setup() 88 static void shx3_start_cpu(unsigned int cpu, unsigned long entry_point) in shx3_start_cpu() argument 91 __raw_writel(entry_point, RESET_REG(cpu)); in shx3_start_cpu() 93 __raw_writel(virt_to_phys(entry_point), RESET_REG(cpu)); in shx3_start_cpu() 95 if (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP)) in shx3_start_cpu() 96 __raw_writel(STBCR_MSTP, STBCR_REG(cpu)); in shx3_start_cpu() [all …]
|
/linux-4.4.14/arch/xtensa/kernel/ |
D | smp.c | 102 unsigned int cpu = smp_processor_id(); in smp_prepare_boot_cpu() local 103 BUG_ON(cpu != 0); in smp_prepare_boot_cpu() 104 cpu_asid_cache(cpu) = ASID_USER_FIRST; in smp_prepare_boot_cpu() 117 unsigned int cpu = smp_processor_id(); in secondary_start_kernel() local 124 __func__, boot_secondary_processors, cpu); in secondary_start_kernel() 130 __func__, boot_secondary_processors, cpu); in secondary_start_kernel() 141 cpumask_set_cpu(cpu, mm_cpumask(mm)); in secondary_start_kernel() 149 notify_cpu_starting(cpu); in secondary_start_kernel() 152 local_timer_setup(cpu); in secondary_start_kernel() 154 set_cpu_online(cpu, true); in secondary_start_kernel() [all …]
|
/linux-4.4.14/tools/perf/util/ |
D | stat-shadow.c | 79 int cpu) in perf_stat__update_shadow_stats() argument 84 update_stats(&runtime_nsecs_stats[cpu], count[0]); in perf_stat__update_shadow_stats() 86 update_stats(&runtime_cycles_stats[ctx][cpu], count[0]); in perf_stat__update_shadow_stats() 88 update_stats(&runtime_cycles_in_tx_stats[ctx][cpu], count[0]); in perf_stat__update_shadow_stats() 90 update_stats(&runtime_transaction_stats[ctx][cpu], count[0]); in perf_stat__update_shadow_stats() 92 update_stats(&runtime_elision_stats[ctx][cpu], count[0]); in perf_stat__update_shadow_stats() 94 update_stats(&runtime_stalled_cycles_front_stats[ctx][cpu], count[0]); in perf_stat__update_shadow_stats() 96 update_stats(&runtime_stalled_cycles_back_stats[ctx][cpu], count[0]); in perf_stat__update_shadow_stats() 98 update_stats(&runtime_branches_stats[ctx][cpu], count[0]); in perf_stat__update_shadow_stats() 100 update_stats(&runtime_cacherefs_stats[ctx][cpu], count[0]); in perf_stat__update_shadow_stats() [all …]
|
D | env.c | 21 zfree(&env->cpu); in perf_env__exit() 63 int cpu, nr_cpus; in perf_env__read_cpu_topology_map() local 65 if (env->cpu != NULL) in perf_env__read_cpu_topology_map() 75 env->cpu = calloc(nr_cpus, sizeof(env->cpu[0])); in perf_env__read_cpu_topology_map() 76 if (env->cpu == NULL) in perf_env__read_cpu_topology_map() 79 for (cpu = 0; cpu < nr_cpus; ++cpu) { in perf_env__read_cpu_topology_map() 80 env->cpu[cpu].core_id = cpu_map__get_core_id(cpu); in perf_env__read_cpu_topology_map() 81 env->cpu[cpu].socket_id = cpu_map__get_socket_id(cpu); in perf_env__read_cpu_topology_map()
|
/linux-4.4.14/arch/hexagon/kernel/ |
D | smp.c | 51 int cpu) in __handle_ipi() argument 96 int cpu = smp_processor_id(); in handle_ipi() local 97 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); in handle_ipi() 101 __handle_ipi(&ops, ipi, cpu); in handle_ipi() 108 unsigned long cpu; in send_ipi() local 113 for_each_cpu(cpu, cpumask) { in send_ipi() 114 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); in send_ipi() 118 retval = __vmintop_post(BASE_IPI_IRQ+cpu); in send_ipi() 122 BASE_IPI_IRQ+cpu); in send_ipi() 147 unsigned int cpu; in start_secondary() local [all …]
|
/linux-4.4.14/drivers/oprofile/ |
D | nmi_timer_int.c | 36 static int nmi_timer_start_cpu(int cpu) in nmi_timer_start_cpu() argument 38 struct perf_event *event = per_cpu(nmi_timer_events, cpu); in nmi_timer_start_cpu() 41 event = perf_event_create_kernel_counter(&nmi_timer_attr, cpu, NULL, in nmi_timer_start_cpu() 45 per_cpu(nmi_timer_events, cpu) = event; in nmi_timer_start_cpu() 54 static void nmi_timer_stop_cpu(int cpu) in nmi_timer_stop_cpu() argument 56 struct perf_event *event = per_cpu(nmi_timer_events, cpu); in nmi_timer_stop_cpu() 65 int cpu = (unsigned long)data; in nmi_timer_cpu_notifier() local 69 nmi_timer_start_cpu(cpu); in nmi_timer_cpu_notifier() 72 nmi_timer_stop_cpu(cpu); in nmi_timer_cpu_notifier() 84 int cpu; in nmi_timer_start() local [all …]
|
D | oprofile_perf.c | 39 u32 cpu = smp_processor_id(); in op_overflow_handler() local 42 if (per_cpu(perf_events, cpu)[id] == event) in op_overflow_handler() 49 "on cpu %u\n", cpu); in op_overflow_handler() 74 static int op_create_counter(int cpu, int event) in op_create_counter() argument 78 if (!counter_config[event].enabled || per_cpu(perf_events, cpu)[event]) in op_create_counter() 82 cpu, NULL, in op_create_counter() 91 "on CPU %d\n", event, cpu); in op_create_counter() 95 per_cpu(perf_events, cpu)[event] = pevent; in op_create_counter() 100 static void op_destroy_counter(int cpu, int event) in op_destroy_counter() argument 102 struct perf_event *pevent = per_cpu(perf_events, cpu)[event]; in op_destroy_counter() [all …]
|
/linux-4.4.14/tools/perf/scripts/python/ |
D | netdev-times.py | 87 cpu = irq_list[0]['cpu'] 101 (nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu) 227 def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, callchain, vec): argument 230 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) 233 def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, callchain, vec): argument 236 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) 239 def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, callchain, vec): argument 242 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) 245 def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm, argument 247 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, [all …]
|
/linux-4.4.14/arch/x86/kernel/cpu/microcode/ |
D | core.c | 234 static int collect_cpu_info_on_target(int cpu, struct cpu_signature *cpu_sig) in collect_cpu_info_on_target() argument 239 ret = smp_call_function_single(cpu, collect_cpu_info_local, &ctx, 1); in collect_cpu_info_on_target() 246 static int collect_cpu_info(int cpu) in collect_cpu_info() argument 248 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; in collect_cpu_info() 253 ret = collect_cpu_info_on_target(cpu, &uci->cpu_sig); in collect_cpu_info() 271 static int apply_microcode_on_target(int cpu) in apply_microcode_on_target() argument 276 ret = smp_call_function_single(cpu, apply_microcode_local, &ctx, 1); in apply_microcode_on_target() 287 int cpu; in do_microcode_update() local 289 for_each_online_cpu(cpu) { in do_microcode_update() 290 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; in do_microcode_update() [all …]
|
/linux-4.4.14/arch/blackfin/kernel/cplb-nompu/ |
D | cplbmgr.c | 39 static inline void write_dcplb_data(int cpu, int idx, unsigned long data, in write_dcplb_data() argument 48 dcplb_tbl[cpu][idx].addr = addr; in write_dcplb_data() 49 dcplb_tbl[cpu][idx].data = data; in write_dcplb_data() 53 static inline void write_icplb_data(int cpu, int idx, unsigned long data, in write_icplb_data() argument 62 icplb_tbl[cpu][idx].addr = addr; in write_icplb_data() 63 icplb_tbl[cpu][idx].data = data; in write_icplb_data() 74 static int evict_one_icplb(int cpu) in evict_one_icplb() argument 76 int i = first_switched_icplb + icplb_rr_index[cpu]; in evict_one_icplb() 79 icplb_rr_index[cpu] -= MAX_CPLBS - first_switched_icplb; in evict_one_icplb() 81 icplb_rr_index[cpu]++; in evict_one_icplb() [all …]
|
/linux-4.4.14/arch/nios2/kernel/ |
D | cpuinfo.c | 35 static inline u32 fcpu(struct device_node *cpu, const char *n) in fcpu() argument 39 of_property_read_u32(cpu, n, &val); in fcpu() 44 static inline u32 fcpu_has(struct device_node *cpu, const char *n) in fcpu_has() argument 46 return of_get_property(cpu, n, NULL) ? 1 : 0; in fcpu_has() 51 struct device_node *cpu; in setup_cpuinfo() local 55 cpu = of_find_node_by_type(NULL, "cpu"); in setup_cpuinfo() 56 if (!cpu) in setup_cpuinfo() 59 if (!fcpu_has(cpu, "altr,has-initda")) in setup_cpuinfo() 64 cpuinfo.cpu_clock_freq = fcpu(cpu, "clock-frequency"); in setup_cpuinfo() 66 str = of_get_property(cpu, "altr,implementation", &len); in setup_cpuinfo() [all …]
|
/linux-4.4.14/arch/arm/mach-shmobile/ |
D | platsmp-apmu.c | 70 static int __maybe_unused apmu_wrap(int cpu, int (*fn)(void __iomem *p, int cpu)) in apmu_wrap() argument 72 void __iomem *p = apmu_cpus[cpu].iomem; in apmu_wrap() 74 return p ? fn(p, apmu_cpus[cpu].bit) : -EINVAL; in apmu_wrap() 77 static void apmu_init_cpu(struct resource *res, int cpu, int bit) in apmu_init_cpu() argument 79 if ((cpu >= ARRAY_SIZE(apmu_cpus)) || apmu_cpus[cpu].iomem) in apmu_init_cpu() 82 apmu_cpus[cpu].iomem = ioremap_nocache(res->start, resource_size(res)); in apmu_init_cpu() 83 apmu_cpus[cpu].bit = bit; in apmu_init_cpu() 85 pr_debug("apmu ioremap %d %d %pr\n", cpu, bit, res); in apmu_init_cpu() 88 static void apmu_parse_cfg(void (*fn)(struct resource *res, int cpu, int bit), in apmu_parse_cfg() argument 133 int shmobile_smp_apmu_boot_secondary(unsigned int cpu, struct task_struct *idle) in shmobile_smp_apmu_boot_secondary() argument [all …]
|
/linux-4.4.14/arch/arm/mach-sunxi/ |
D | platsmp.c | 24 #define CPUCFG_CPU_PWR_CLAMP_STATUS_REG(cpu) ((cpu) * 0x40 + 0x64) argument 25 #define CPUCFG_CPU_RST_CTRL_REG(cpu) (((cpu) + 1) * 0x40) argument 26 #define CPUCFG_CPU_CTRL_REG(cpu) (((cpu) + 1) * 0x40 + 0x04) argument 27 #define CPUCFG_CPU_STATUS_REG(cpu) (((cpu) + 1) * 0x40 + 0x08) argument 35 #define PRCM_CPU_PWR_CLAMP_REG(cpu) (((cpu) * 4) + 0x140) argument 71 static int sun6i_smp_boot_secondary(unsigned int cpu, in sun6i_smp_boot_secondary() argument 87 writel(0, cpucfg_membase + CPUCFG_CPU_RST_CTRL_REG(cpu)); in sun6i_smp_boot_secondary() 91 writel(reg & ~BIT(cpu), cpucfg_membase + CPUCFG_GEN_CTRL_REG); in sun6i_smp_boot_secondary() 95 writel(reg & ~BIT(cpu), cpucfg_membase + CPUCFG_DBG_CTL1_REG); in sun6i_smp_boot_secondary() 99 writel(0xff >> i, prcm_membase + PRCM_CPU_PWR_CLAMP_REG(cpu)); in sun6i_smp_boot_secondary() [all …]
|
/linux-4.4.14/lib/ |
D | cpu_rmap.c | 31 unsigned int cpu; in alloc_cpu_rmap() local 55 for_each_possible_cpu(cpu) { in alloc_cpu_rmap() 56 rmap->near[cpu].index = cpu % size; in alloc_cpu_rmap() 57 rmap->near[cpu].dist = CPU_RMAP_DIST_INF; in alloc_cpu_rmap() 97 static bool cpu_rmap_copy_neigh(struct cpu_rmap *rmap, unsigned int cpu, in cpu_rmap_copy_neigh() argument 103 if (rmap->near[cpu].dist > dist && in cpu_rmap_copy_neigh() 105 rmap->near[cpu].index = rmap->near[neigh].index; in cpu_rmap_copy_neigh() 106 rmap->near[cpu].dist = dist; in cpu_rmap_copy_neigh() 117 unsigned int cpu; in debug_print_rmap() local 121 for_each_possible_cpu(cpu) { in debug_print_rmap() [all …]
|
/linux-4.4.14/arch/powerpc/sysdev/xics/ |
D | icp-native.c | 54 int cpu = smp_processor_id(); in icp_native_get_xirr() local 62 return in_be32(&icp_native_regs[cpu]->xirr.word); in icp_native_get_xirr() 67 int cpu = smp_processor_id(); in icp_native_set_xirr() local 69 out_be32(&icp_native_regs[cpu]->xirr.word, value); in icp_native_set_xirr() 74 int cpu = smp_processor_id(); in icp_native_set_cppr() local 76 out_8(&icp_native_regs[cpu]->xirr.bytes[0], value); in icp_native_set_cppr() 101 int cpu = smp_processor_id(); in icp_native_teardown_cpu() local 104 icp_native_set_qirr(cpu, 0xff); in icp_native_teardown_cpu() 146 static void icp_native_cause_ipi(int cpu, unsigned long data) in icp_native_cause_ipi() argument 148 kvmppc_set_host_ipi(cpu, 1); in icp_native_cause_ipi() [all …]
|
/linux-4.4.14/tools/perf/tests/ |
D | openat-syscall-all-cpus.c | 12 int err = -1, fd, cpu; in test__openat_syscall_event_on_all_cpus() local 48 for (cpu = 0; cpu < cpus->nr; ++cpu) { in test__openat_syscall_event_on_all_cpus() 49 unsigned int ncalls = nr_openat_calls + cpu; in test__openat_syscall_event_on_all_cpus() 56 if (cpus->map[cpu] >= CPU_SETSIZE) { in test__openat_syscall_event_on_all_cpus() 57 pr_debug("Ignoring CPU %d\n", cpus->map[cpu]); in test__openat_syscall_event_on_all_cpus() 61 CPU_SET(cpus->map[cpu], &cpu_set); in test__openat_syscall_event_on_all_cpus() 64 cpus->map[cpu], in test__openat_syscall_event_on_all_cpus() 72 CPU_CLR(cpus->map[cpu], &cpu_set); in test__openat_syscall_event_on_all_cpus() 87 for (cpu = 0; cpu < cpus->nr; ++cpu) { in test__openat_syscall_event_on_all_cpus() 90 if (cpus->map[cpu] >= CPU_SETSIZE) in test__openat_syscall_event_on_all_cpus() [all …]
|
/linux-4.4.14/arch/blackfin/ |
D | Makefile | 60 cpu-$(CONFIG_BF512) := bf512 61 cpu-$(CONFIG_BF514) := bf514 62 cpu-$(CONFIG_BF516) := bf516 63 cpu-$(CONFIG_BF518) := bf518 64 cpu-$(CONFIG_BF522) := bf522 65 cpu-$(CONFIG_BF523) := bf523 66 cpu-$(CONFIG_BF524) := bf524 67 cpu-$(CONFIG_BF525) := bf525 68 cpu-$(CONFIG_BF526) := bf526 69 cpu-$(CONFIG_BF527) := bf527 [all …]
|
/linux-4.4.14/arch/powerpc/platforms/ps3/ |
D | smp.c | 42 static void ps3_smp_message_pass(int cpu, int msg) in ps3_smp_message_pass() argument 52 virq = per_cpu(ps3_ipi_virqs, cpu)[msg]; in ps3_smp_message_pass() 57 " (%d)\n", __func__, __LINE__, cpu, msg, result); in ps3_smp_message_pass() 62 int cpu; in ps3_smp_probe() local 64 for (cpu = 0; cpu < 2; cpu++) { in ps3_smp_probe() 66 unsigned int *virqs = per_cpu(ps3_ipi_virqs, cpu); in ps3_smp_probe() 69 DBG(" -> %s:%d: (%d)\n", __func__, __LINE__, cpu); in ps3_smp_probe() 83 result = ps3_event_receive_port_setup(cpu, &virqs[i]); in ps3_smp_probe() 89 __func__, __LINE__, cpu, i, virqs[i]); in ps3_smp_probe() 96 ps3_register_ipi_irq(cpu, virqs[i]); in ps3_smp_probe() [all …]
|
/linux-4.4.14/arch/arc/include/asm/ |
D | mmu_context.h | 50 #define asid_mm(mm, cpu) mm->context.asid[cpu] argument 51 #define hw_pid(mm, cpu) (asid_mm(mm, cpu) & MM_CTXT_ASID_MASK) argument 54 #define asid_cpu(cpu) per_cpu(asid_cache, cpu) argument 62 const unsigned int cpu = smp_processor_id(); in get_new_mmu_context() local 77 if (!((asid_mm(mm, cpu) ^ asid_cpu(cpu)) & MM_CTXT_CYCLE_MASK)) in get_new_mmu_context() 81 if (unlikely(!(++asid_cpu(cpu) & MM_CTXT_ASID_MASK))) { in get_new_mmu_context() 90 if (!asid_cpu(cpu)) in get_new_mmu_context() 91 asid_cpu(cpu) = MM_CTXT_FIRST_CYCLE; in get_new_mmu_context() 95 asid_mm(mm, cpu) = asid_cpu(cpu); in get_new_mmu_context() 98 write_aux_reg(ARC_REG_PID, hw_pid(mm, cpu) | MMU_ENABLE); in get_new_mmu_context() [all …]
|
/linux-4.4.14/arch/x86/platform/uv/ |
D | uv_nmi.c | 197 static int uv_set_in_nmi(int cpu, struct uv_hub_nmi_s *hub_nmi) in uv_set_in_nmi() argument 202 atomic_set(&hub_nmi->cpu_owner, cpu); in uv_set_in_nmi() 204 atomic_set(&uv_nmi_cpu, cpu); in uv_set_in_nmi() 214 int cpu = smp_processor_id(); in uv_check_nmi() local 229 uv_set_in_nmi(cpu, hub_nmi); in uv_check_nmi() 252 uv_set_in_nmi(cpu, hub_nmi); in uv_check_nmi() 264 static inline void uv_clear_nmi(int cpu) in uv_clear_nmi() argument 268 if (cpu == atomic_read(&hub_nmi->cpu_owner)) { in uv_clear_nmi() 279 int cpu; in uv_nmi_nr_cpus_ping() local 281 for_each_cpu(cpu, uv_nmi_cpu_mask) in uv_nmi_nr_cpus_ping() [all …]
|
D | uv_time.c | 68 } cpu[1]; member 83 static void uv_rtc_send_IPI(int cpu) in uv_rtc_send_IPI() argument 88 apicid = cpu_physical_id(cpu); in uv_rtc_send_IPI() 111 static int uv_setup_intr(int cpu, u64 expires) in uv_setup_intr() argument 114 unsigned long apicid = cpu_physical_id(cpu) | uv_apicid_hibits; in uv_setup_intr() 115 int pnode = uv_cpu_to_pnode(cpu); in uv_setup_intr() 159 int cpu; in uv_rtc_allocate_timers() local 165 for_each_present_cpu(cpu) { in uv_rtc_allocate_timers() 166 int nid = cpu_to_node(cpu); in uv_rtc_allocate_timers() 167 int bid = uv_cpu_to_blade_id(cpu); in uv_rtc_allocate_timers() [all …]
|
/linux-4.4.14/include/linux/clk/ |
D | tegra.h | 44 void (*wait_for_reset)(u32 cpu); 45 void (*put_in_reset)(u32 cpu); 46 void (*out_of_reset)(u32 cpu); 47 void (*enable_clock)(u32 cpu); 48 void (*disable_clock)(u32 cpu); 58 static inline void tegra_wait_cpu_in_reset(u32 cpu) in tegra_wait_cpu_in_reset() argument 63 tegra_cpu_car_ops->wait_for_reset(cpu); in tegra_wait_cpu_in_reset() 66 static inline void tegra_put_cpu_in_reset(u32 cpu) in tegra_put_cpu_in_reset() argument 71 tegra_cpu_car_ops->put_in_reset(cpu); in tegra_put_cpu_in_reset() 74 static inline void tegra_cpu_out_of_reset(u32 cpu) in tegra_cpu_out_of_reset() argument [all …]
|
/linux-4.4.14/kernel/time/ |
D | tick-broadcast.c | 39 static void tick_broadcast_clear_oneshot(int cpu); 42 static inline void tick_broadcast_clear_oneshot(int cpu) { } in tick_broadcast_clear_oneshot() argument 158 int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) in tick_device_uses_broadcast() argument 175 cpumask_set_cpu(cpu, tick_broadcast_mask); in tick_device_uses_broadcast() 187 cpumask_clear_cpu(cpu, tick_broadcast_mask); in tick_device_uses_broadcast() 195 if (!cpumask_test_cpu(cpu, tick_broadcast_on)) in tick_device_uses_broadcast() 196 cpumask_clear_cpu(cpu, tick_broadcast_mask); in tick_device_uses_broadcast() 208 tick_broadcast_clear_oneshot(cpu); in tick_device_uses_broadcast() 229 ret = cpumask_test_cpu(cpu, tick_broadcast_mask); in tick_device_uses_broadcast() 261 int cpu = smp_processor_id(); in tick_do_broadcast() local [all …]
|
D | tick-common.c | 57 struct tick_device *tick_get_device(int cpu) in tick_get_device() argument 59 return &per_cpu(tick_cpu_device, cpu); in tick_get_device() 79 static void tick_periodic(int cpu) in tick_periodic() argument 81 if (tick_do_timer_cpu == cpu) { in tick_periodic() 101 int cpu = smp_processor_id(); in tick_handle_periodic() local 104 tick_periodic(cpu); in tick_handle_periodic() 137 tick_periodic(cpu); in tick_handle_periodic() 178 struct clock_event_device *newdev, int cpu, in tick_setup_device() argument 193 if (!tick_nohz_full_cpu(cpu)) in tick_setup_device() 194 tick_do_timer_cpu = cpu; in tick_setup_device() [all …]
|
D | tick-sched.c | 44 struct tick_sched *tick_get_tick_sched(int cpu) in tick_get_tick_sched() argument 46 return &per_cpu(tick_cpu_sched, cpu); in tick_get_tick_sched() 114 int cpu = smp_processor_id(); in tick_sched_do_timer() local 125 && !tick_nohz_full_cpu(cpu)) in tick_sched_do_timer() 126 tick_do_timer_cpu = cpu; in tick_sched_do_timer() 130 if (tick_do_timer_cpu == cpu) in tick_sched_do_timer() 227 void tick_nohz_full_kick_cpu(int cpu) in tick_nohz_full_kick_cpu() argument 229 if (!tick_nohz_full_cpu(cpu)) in tick_nohz_full_kick_cpu() 232 irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu); in tick_nohz_full_kick_cpu() 296 unsigned int cpu = (unsigned long)hcpu; in tick_nohz_cpu_down_callback() local [all …]
|
/linux-4.4.14/arch/powerpc/platforms/powernv/ |
D | subcore.c | 153 int i, cpu = smp_processor_id(); in wait_for_sync_step() local 155 for (i = cpu + 1; i < cpu + threads_per_core; i++) in wait_for_sync_step() 178 int i, cpu; in unsplit_core() local 182 cpu = smp_processor_id(); in unsplit_core() 183 if (cpu_thread_in_core(cpu) != 0) { in unsplit_core() 187 per_cpu(split_state, cpu).step = SYNC_STEP_UNSPLIT; in unsplit_core() 200 for (i = cpu + 1; i < cpu + threads_per_core; i++) in unsplit_core() 212 int i, cpu; in split_core() local 219 cpu = smp_processor_id(); in split_core() 220 if (cpu_thread_in_core(cpu) != 0) { in split_core() [all …]
|
/linux-4.4.14/arch/mips/mti-malta/ |
D | malta-amon.c | 19 int amon_cpu_avail(int cpu) in amon_cpu_avail() argument 23 if (cpu < 0 || cpu >= NCPULAUNCH) { in amon_cpu_avail() 24 pr_debug("avail: cpu%d is out of range\n", cpu); in amon_cpu_avail() 28 launch += cpu; in amon_cpu_avail() 30 pr_debug("avail: cpu%d is not ready\n", cpu); in amon_cpu_avail() 34 pr_debug("avail: too late.. cpu%d is already gone\n", cpu); in amon_cpu_avail() 41 int amon_cpu_start(int cpu, in amon_cpu_start() argument 48 if (!amon_cpu_avail(cpu)) in amon_cpu_start() 50 if (cpu == smp_processor_id()) { in amon_cpu_start() 51 pr_debug("launch: I am cpu%d!\n", cpu); in amon_cpu_start() [all …]
|
/linux-4.4.14/tools/testing/selftests/vm/ |
D | userfaultfd.c | 111 unsigned long cpu = (unsigned long) arg; in locking_thread() local 123 seed += cpu; in locking_thread() 131 page_nr += cpu * nr_pages_per_cpu; in locking_thread() 190 page_nr, cpu, area_dst + page_nr * page_size, in locking_thread() 247 unsigned long cpu = (unsigned long) arg; in uffd_poll_thread() local 257 pollfd[1].fd = pipefd[cpu*2]; in uffd_poll_thread() 335 unsigned long cpu = (unsigned long) arg; in background_thread() local 338 for (page_nr = cpu * nr_pages_per_cpu; in background_thread() 339 page_nr < (cpu+1) * nr_pages_per_cpu; in background_thread() 348 unsigned long cpu; in stress() local [all …]
|
/linux-4.4.14/arch/arm/mach-imx/ |
D | src.c | 85 void imx_enable_cpu(int cpu, bool enable) in imx_enable_cpu() argument 89 cpu = cpu_logical_map(cpu); in imx_enable_cpu() 90 mask = 1 << (BP_SRC_SCR_CORE1_ENABLE + cpu - 1); in imx_enable_cpu() 94 val |= 1 << (BP_SRC_SCR_CORE1_RST + cpu - 1); in imx_enable_cpu() 99 void imx_set_cpu_jump(int cpu, void *jump_addr) in imx_set_cpu_jump() argument 101 cpu = cpu_logical_map(cpu); in imx_set_cpu_jump() 103 src_base + SRC_GPR1 + cpu * 8); in imx_set_cpu_jump() 106 u32 imx_get_cpu_arg(int cpu) in imx_get_cpu_arg() argument 108 cpu = cpu_logical_map(cpu); in imx_get_cpu_arg() 109 return readl_relaxed(src_base + SRC_GPR1 + cpu * 8 + 4); in imx_get_cpu_arg() [all …]
|
/linux-4.4.14/arch/x86/kernel/apic/ |
D | x2apic_cluster.c | 21 static inline u32 x2apic_cluster(int cpu) in x2apic_cluster() argument 23 return per_cpu(x86_cpu_to_logical_apicid, cpu) >> 16; in x2apic_cluster() 31 unsigned int cpu, this_cpu; in __x2apic_send_IPI_mask() local 51 for_each_cpu(cpu, ipi_mask_ptr) { in __x2apic_send_IPI_mask() 54 cpus_in_cluster_ptr = per_cpu(cpus_in_cluster, cpu); in __x2apic_send_IPI_mask() 134 unsigned int cpu; in init_x2apic_ldr() local 139 for_each_online_cpu(cpu) { in init_x2apic_ldr() 140 if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu)) in init_x2apic_ldr() 142 cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu)); in init_x2apic_ldr() 143 cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu)); in init_x2apic_ldr() [all …]
|
D | x2apic_uv_x.c | 254 static void uv_send_IPI_one(int cpu, int vector) in uv_send_IPI_one() argument 259 apicid = per_cpu(x86_cpu_to_apicid, cpu); in uv_send_IPI_one() 266 unsigned int cpu; in uv_send_IPI_mask() local 268 for_each_cpu(cpu, mask) in uv_send_IPI_mask() 269 uv_send_IPI_one(cpu, vector); in uv_send_IPI_mask() 275 unsigned int cpu; in uv_send_IPI_mask_allbutself() local 277 for_each_cpu(cpu, mask) { in uv_send_IPI_mask_allbutself() 278 if (cpu != this_cpu) in uv_send_IPI_mask_allbutself() 279 uv_send_IPI_one(cpu, vector); in uv_send_IPI_mask_allbutself() 286 unsigned int cpu; in uv_send_IPI_allbutself() local [all …]
|
/linux-4.4.14/arch/powerpc/mm/ |
D | numa.c | 139 unsigned int cpu; in reset_numa_cpu_lookup_table() local 141 for_each_possible_cpu(cpu) in reset_numa_cpu_lookup_table() 142 numa_cpu_lookup_table[cpu] = -1; in reset_numa_cpu_lookup_table() 145 static void update_numa_cpu_lookup_table(unsigned int cpu, int node) in update_numa_cpu_lookup_table() argument 147 numa_cpu_lookup_table[cpu] = node; in update_numa_cpu_lookup_table() 150 static void map_cpu_to_node(int cpu, int node) in map_cpu_to_node() argument 152 update_numa_cpu_lookup_table(cpu, node); in map_cpu_to_node() 154 dbg("adding cpu %d to node %d\n", cpu, node); in map_cpu_to_node() 156 if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node]))) in map_cpu_to_node() 157 cpumask_set_cpu(cpu, node_to_cpumask_map[node]); in map_cpu_to_node() [all …]
|
/linux-4.4.14/arch/mips/loongson64/loongson-3/ |
D | smp.c | 241 static void loongson3_send_ipi_single(int cpu, unsigned int action) in loongson3_send_ipi_single() argument 243 loongson3_ipi_write32((u32)action, ipi_set0_regs[cpu_logical_map(cpu)]); in loongson3_send_ipi_single() 257 int i, cpu = smp_processor_id(); in loongson3_ipi_interrupt() local 261 action = loongson3_ipi_read32(ipi_status0_regs[cpu_logical_map(cpu)]); in loongson3_ipi_interrupt() 264 loongson3_ipi_write32((u32)action, ipi_clear0_regs[cpu_logical_map(cpu)]); in loongson3_ipi_interrupt() 276 BUG_ON(cpu != 0); in loongson3_ipi_interrupt() 293 unsigned int cpu = smp_processor_id(); in loongson3_init_secondary() local 303 per_cpu(cpu_state, cpu) = CPU_ONLINE; in loongson3_init_secondary() 304 cpu_data[cpu].core = in loongson3_init_secondary() 305 cpu_logical_map(cpu) % loongson_sysconf.cores_per_package; in loongson3_init_secondary() [all …]
|
/linux-4.4.14/drivers/base/power/opp/ |
D | cpu.c | 128 int cpu, ret = 0; in dev_pm_opp_set_sharing_cpus() local 138 for_each_cpu(cpu, cpumask) { in dev_pm_opp_set_sharing_cpus() 139 if (cpu == cpu_dev->id) in dev_pm_opp_set_sharing_cpus() 142 dev = get_cpu_device(cpu); in dev_pm_opp_set_sharing_cpus() 145 __func__, cpu); in dev_pm_opp_set_sharing_cpus() 152 __func__, cpu); in dev_pm_opp_set_sharing_cpus() 167 int cpu; in dev_pm_opp_of_cpumask_remove_table() local 171 for_each_cpu(cpu, cpumask) { in dev_pm_opp_of_cpumask_remove_table() 172 cpu_dev = get_cpu_device(cpu); in dev_pm_opp_of_cpumask_remove_table() 175 cpu); in dev_pm_opp_of_cpumask_remove_table() [all …]
|
/linux-4.4.14/drivers/hv/ |
D | hv.c | 388 static void hv_init_clockevent_device(struct clock_event_device *dev, int cpu) in hv_init_clockevent_device() argument 392 dev->cpumask = cpumask_of(cpu); in hv_init_clockevent_device() 410 int cpu; in hv_synic_alloc() local 419 for_each_online_cpu(cpu) { in hv_synic_alloc() 420 hv_context.event_dpc[cpu] = kmalloc(size, GFP_ATOMIC); in hv_synic_alloc() 421 if (hv_context.event_dpc[cpu] == NULL) { in hv_synic_alloc() 425 tasklet_init(hv_context.event_dpc[cpu], vmbus_on_event, cpu); in hv_synic_alloc() 427 hv_context.clk_evt[cpu] = kzalloc(ced_size, GFP_ATOMIC); in hv_synic_alloc() 428 if (hv_context.clk_evt[cpu] == NULL) { in hv_synic_alloc() 433 hv_init_clockevent_device(hv_context.clk_evt[cpu], cpu); in hv_synic_alloc() [all …]
|
/linux-4.4.14/arch/sparc/include/asm/ |
D | topology_64.h | 8 static inline int cpu_to_node(int cpu) in cpu_to_node() argument 10 return numa_cpu_lookup_table[cpu]; in cpu_to_node() 44 #define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id) argument 45 #define topology_core_id(cpu) (cpu_data(cpu).core_id) argument 46 #define topology_core_cpumask(cpu) (&cpu_core_sib_map[cpu]) argument 47 #define topology_sibling_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) argument 52 static inline const struct cpumask *cpu_coregroup_mask(int cpu) in cpu_coregroup_mask() argument 54 return &cpu_core_map[cpu]; in cpu_coregroup_mask()
|
/linux-4.4.14/drivers/thermal/ |
D | x86_pkg_temp_thermal.c | 120 *pkg_temp_thermal_get_phy_entry(unsigned int cpu) in pkg_temp_thermal_get_phy_entry() argument 122 u16 phys_proc_id = topology_physical_package_id(cpu); in pkg_temp_thermal_get_phy_entry() 142 static int get_tj_max(int cpu, u32 *tj_max) in get_tj_max() argument 148 err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx); in get_tj_max() 319 int cpu = smp_processor_id(); in pkg_temp_thermal_threshold_work_fn() local 320 int phy_id = topology_physical_package_id(cpu); in pkg_temp_thermal_threshold_work_fn() 321 struct phy_dev_entry *phdev = pkg_temp_thermal_get_phy_entry(cpu); in pkg_temp_thermal_threshold_work_fn() 358 int cpu = smp_processor_id(); in pkg_temp_thermal_platform_thermal_notify() local 359 int phy_id = topology_physical_package_id(cpu); in pkg_temp_thermal_platform_thermal_notify() 378 schedule_delayed_work_on(cpu, in pkg_temp_thermal_platform_thermal_notify() [all …]
|
/linux-4.4.14/arch/arm/include/asm/ |
D | smp_plat.h | 35 static inline unsigned int smp_cpuid_part(int cpu) in smp_cpuid_part() argument 37 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpu); in smp_cpuid_part() 72 #define cpu_logical_map(cpu) __cpu_logical_map[cpu] argument 81 int cpu; in get_logical_index() local 82 for (cpu = 0; cpu < nr_cpu_ids; cpu++) in get_logical_index() 83 if (cpu_logical_map(cpu) == mpidr) in get_logical_index() 84 return cpu; in get_logical_index() 111 extern int platform_can_hotplug_cpu(unsigned int cpu); 113 static inline int platform_can_hotplug_cpu(unsigned int cpu) in platform_can_hotplug_cpu() argument
|
D | topology.h | 18 #define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id) argument 19 #define topology_core_id(cpu) (cpu_topology[cpu].core_id) argument 20 #define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling) argument 21 #define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) argument 25 const struct cpumask *cpu_coregroup_mask(int cpu);
|
/linux-4.4.14/drivers/acpi/ |
D | processor_thermal.c | 53 #define reduction_pctg(cpu) \ argument 54 per_cpu(cpufreq_thermal_reduction_pctg, phys_package_first_cpu(cpu)) 63 static int phys_package_first_cpu(int cpu) in phys_package_first_cpu() argument 66 int id = topology_physical_package_id(cpu); in phys_package_first_cpu() 74 static int cpu_has_cpufreq(unsigned int cpu) in cpu_has_cpufreq() argument 77 if (!acpi_thermal_cpufreq_is_init || cpufreq_get_policy(&policy, cpu)) in cpu_has_cpufreq() 93 (100 - reduction_pctg(policy->cpu) * 20) in acpi_thermal_cpufreq_notifier() 106 static int cpufreq_get_max_state(unsigned int cpu) in cpufreq_get_max_state() argument 108 if (!cpu_has_cpufreq(cpu)) in cpufreq_get_max_state() 114 static int cpufreq_get_cur_state(unsigned int cpu) in cpufreq_get_cur_state() argument [all …]
|
/linux-4.4.14/tools/power/x86/x86_energy_perf_policy/ |
D | x86_energy_perf_policy.c | 38 int cpu = -1; variable 87 cpu = atoi(optarg); in cmdline() 187 unsigned long long get_msr(int cpu, int offset) in get_msr() argument 194 sprintf(msr_path, "/dev/cpu/%d/msr", cpu); in get_msr() 205 printf("pread cpu%d 0x%x = %d\n", cpu, offset, retval); in get_msr() 212 unsigned long long put_msr(int cpu, unsigned long long new_msr, int offset) in put_msr() argument 219 sprintf(msr_path, "/dev/cpu/%d/msr", cpu); in put_msr() 229 printf("pread cpu%d 0x%x = %d\n", cpu, offset, retval); in put_msr() 236 printf("pwrite cpu%d 0x%x = %d\n", cpu, offset, retval); in put_msr() 245 void print_msr(int cpu) in print_msr() argument [all …]
|
/linux-4.4.14/arch/arm/plat-samsung/ |
D | init.c | 34 static struct cpu_table *cpu; variable 51 cpu = s3c_lookup_cpu(idcode, cputab, cputab_size); in s3c_init_cpu() 53 if (cpu == NULL) { in s3c_init_cpu() 58 printk("CPU %s (id 0x%08lx)\n", cpu->name, idcode); in s3c_init_cpu() 60 if (cpu->init == NULL) { in s3c_init_cpu() 61 printk(KERN_ERR "CPU %s support not enabled\n", cpu->name); in s3c_init_cpu() 65 if (cpu->map_io) in s3c_init_cpu() 66 cpu->map_io(); in s3c_init_cpu() 83 if (cpu == NULL) in s3c24xx_init_clocks() 86 if (cpu->init_clocks == NULL) in s3c24xx_init_clocks() [all …]
|
/linux-4.4.14/arch/blackfin/mach-bf561/ |
D | smp.c | 51 void platform_secondary_init(unsigned int cpu) in platform_secondary_init() argument 76 int platform_boot_secondary(unsigned int cpu, struct task_struct *idle) in platform_boot_secondary() argument 86 smp_send_reschedule(cpu); in platform_boot_secondary() 97 if (cpu_online(cpu)) in platform_boot_secondary() 103 if (cpu_online(cpu)) { in platform_boot_secondary() 106 panic("CPU%u: processor failed to boot\n", cpu); in platform_boot_secondary() 124 unsigned int cpu; in platform_send_ipi() local 127 for_each_cpu(cpu, &callmap) { in platform_send_ipi() 128 BUG_ON(cpu >= 2); in platform_send_ipi() 130 bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (offset + cpu))); in platform_send_ipi() [all …]
|
/linux-4.4.14/arch/cris/arch-v32/kernel/ |
D | irq.c | 51 int cpu; /* The CPU to which the IRQ is currently allocated. */ member 203 block_irq(int irq, int cpu) in block_irq() argument 211 intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu], in block_irq() 214 REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, in block_irq() 217 intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu], in block_irq() 220 REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, in block_irq() 227 unblock_irq(int irq, int cpu) in unblock_irq() argument 235 intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu], in unblock_irq() 238 REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, in unblock_irq() 241 intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu], in unblock_irq() [all …]
|
/linux-4.4.14/drivers/hwmon/ |
D | coretemp.c | 61 #define TO_PHYS_ID(cpu) (cpu_data(cpu).phys_proc_id) argument 62 #define TO_CORE_ID(cpu) (cpu_data(cpu).cpu_core_id) argument 63 #define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO) argument 66 #define for_each_sibling(i, cpu) \ argument 67 for_each_cpu(i, topology_sibling_cpumask(cpu)) 69 #define for_each_sibling(i, cpu) for (i = 0; false; ) argument 90 unsigned int cpu; member 141 rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx); in show_crit_alarm() 176 rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx); in show_temp() 420 static int chk_ucode_version(unsigned int cpu) in chk_ucode_version() argument [all …]
|
/linux-4.4.14/arch/tile/include/asm/ |
D | topology.h | 27 static inline int cpu_to_node(int cpu) in cpu_to_node() argument 29 return cpu_2_node[cpu]; in cpu_to_node() 55 #define topology_physical_package_id(cpu) ((void)(cpu), 0) argument 56 #define topology_core_id(cpu) (cpu) argument 57 #define topology_core_cpumask(cpu) ((void)(cpu), cpu_online_mask) argument 58 #define topology_sibling_cpumask(cpu) cpumask_of(cpu) argument
|
D | smp.h | 54 static inline int cpu_x(int cpu) in cpu_x() argument 56 return cpu % smp_width; in cpu_x() 58 static inline int cpu_y(int cpu) in cpu_y() argument 60 return cpu / smp_width; in cpu_y() 81 static inline void arch_send_call_function_single_ipi(int cpu) in arch_send_call_function_single_ipi() argument 83 send_IPI_single(cpu, MSG_TAG_CALL_FUNCTION_SINGLE); in arch_send_call_function_single_ipi() 94 #define cpu_x(cpu) 0 argument 95 #define cpu_y(cpu) 0 argument 103 #define cpu_is_valid_lotar(cpu) cpumask_test_cpu((cpu), &cpu_lotar_map) argument 110 #define cpu_cacheable(cpu) cpumask_test_cpu((cpu), &cpu_cacheable_map) argument
|
/linux-4.4.14/tools/power/cpupower/debug/i386/ |
D | centrino-decode.c | 29 static int rdmsr(unsigned int cpu, unsigned int msr, in rdmsr() argument 39 if (cpu > MCPU) in rdmsr() 42 sprintf(file, "/dev/cpu/%d/msr", cpu); in rdmsr() 76 static int decode_live(unsigned int cpu) in decode_live() argument 81 err = rdmsr(cpu, MSR_IA32_PERF_STATUS, &lo, &hi); in decode_live() 84 printf("can't get MSR_IA32_PERF_STATUS for cpu %d\n", cpu); in decode_live() 97 unsigned int cpu, mode = 0; in main() local 100 cpu = 0; in main() 102 cpu = strtoul(argv[1], NULL, 0); in main() 103 if (cpu >= MCPU) in main() [all …]
|
/linux-4.4.14/arch/sparc/kernel/ |
D | smp_64.c | 249 static void smp_start_sync_tick_client(int cpu); 251 static void smp_synchronize_one_tick(int cpu) in smp_synchronize_one_tick() argument 257 smp_start_sync_tick_client(cpu); in smp_synchronize_one_tick() 282 static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg, in ldom_startcpu_cpuid() argument 305 hdesc->cpu = cpu; in ldom_startcpu_cpuid() 308 tb = &trap_block[cpu]; in ldom_startcpu_cpuid() 327 hv_err = sun4v_cpu_start(cpu, trampoline_ra, in ldom_startcpu_cpuid() 344 static int smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle) in smp_boot_one_cpu() argument 359 ldom_startcpu_cpuid(cpu, in smp_boot_one_cpu() 364 prom_startcpu_cpuid(cpu, entry, cookie); in smp_boot_one_cpu() [all …]
|
D | nmi.c | 57 int cpu; in touch_nmi_watchdog() local 59 for_each_present_cpu(cpu) { in touch_nmi_watchdog() 60 if (per_cpu(nmi_touch, cpu) != 1) in touch_nmi_watchdog() 61 per_cpu(nmi_touch, cpu) = 1; in touch_nmi_watchdog() 126 static inline unsigned int get_nmi_count(int cpu) in get_nmi_count() argument 128 return cpu_data(cpu).__nmi_count; in get_nmi_count() 137 static void report_broken_nmi(int cpu, int *prev_nmi_count) in report_broken_nmi() argument 143 cpu, prev_nmi_count[cpu], get_nmi_count(cpu)); in report_broken_nmi() 150 per_cpu(wd_enabled, cpu) = 0; in report_broken_nmi() 164 int cpu, err; in check_nmi_watchdog() local [all …]
|
D | smp_32.c | 80 int cpu, num = 0; in smp_cpus_done() local 82 for_each_online_cpu(cpu) { in smp_cpus_done() 84 bogosum += cpu_data(cpu).udelay_val; in smp_cpus_done() 124 void smp_send_reschedule(int cpu) in smp_send_reschedule() argument 131 sparc32_ipi_ops->resched(cpu); in smp_send_reschedule() 138 void arch_send_call_function_single_ipi(int cpu) in arch_send_call_function_single_ipi() argument 141 sparc32_ipi_ops->single(cpu); in arch_send_call_function_single_ipi() 146 int cpu; in arch_send_call_function_ipi_mask() local 149 for_each_cpu(cpu, mask) in arch_send_call_function_ipi_mask() 150 sparc32_ipi_ops->mask_one(cpu); in arch_send_call_function_ipi_mask() [all …]
|
D | sun4d_smp.c | 84 while (current_set[cpuid]->cpu != cpuid) in sun4d_cpu_pre_online() 194 int cpu; in smp4d_ipi_init() local 199 for_each_possible_cpu(cpu) { in smp4d_ipi_init() 200 work = &per_cpu(sun4d_ipi_work, cpu); in smp4d_ipi_init() 231 static void sun4d_send_ipi(int cpu, int level) in sun4d_send_ipi() argument 233 cc_set_igen(IGEN_MESSAGE(0, cpu << 3, 6 + ((level >> 1) & 7), 1 << (level - 1))); in sun4d_send_ipi() 236 static void sun4d_ipi_single(int cpu) in sun4d_ipi_single() argument 238 struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu); in sun4d_ipi_single() 244 sun4d_send_ipi(cpu, SUN4D_IPI_IRQ); in sun4d_ipi_single() 247 static void sun4d_ipi_mask_one(int cpu) in sun4d_ipi_mask_one() argument [all …]
|
/linux-4.4.14/arch/arm64/boot/dts/arm/ |
D | juno.dts | 37 cpu-map { 40 cpu = <&A57_0>; 43 cpu = <&A57_1>; 49 cpu = <&A53_0>; 52 cpu = <&A53_1>; 55 cpu = <&A53_2>; 58 cpu = <&A53_3>; 63 A57_0: cpu@0 { 66 device_type = "cpu"; 72 A57_1: cpu@1 { [all …]
|
/linux-4.4.14/tools/testing/selftests/powerpc/pmu/ |
D | lib.c | 21 int cpu; in pick_online_cpu() local 31 for (cpu = 8; cpu < CPU_SETSIZE; cpu += 8) in pick_online_cpu() 32 if (CPU_ISSET(cpu, &mask)) in pick_online_cpu() 33 return cpu; in pick_online_cpu() 36 for (cpu = CPU_SETSIZE - 1; cpu >= 0; cpu--) in pick_online_cpu() 37 if (CPU_ISSET(cpu, &mask)) in pick_online_cpu() 38 return cpu; in pick_online_cpu() 44 int bind_to_cpu(int cpu) in bind_to_cpu() argument 48 printf("Binding to cpu %d\n", cpu); in bind_to_cpu() 51 CPU_SET(cpu, &mask); in bind_to_cpu() [all …]
|
/linux-4.4.14/arch/x86/boot/ |
D | cpucheck.c | 86 err_flags[i] = req_flags[i] & ~cpu.flags[i]; in check_cpuflags() 106 memset(&cpu.flags, 0, sizeof cpu.flags); in check_cpu() 107 cpu.level = 3; in check_cpu() 110 cpu.level = 4; in check_cpu() 115 if (test_bit(X86_FEATURE_LM, cpu.flags)) in check_cpu() 116 cpu.level = 64; in check_cpu() 136 is_centaur() && cpu.model >= 6) { in check_cpu() 147 set_bit(X86_FEATURE_CX8, cpu.flags); in check_cpu() 159 : "+a" (level), "=d" (cpu.flags[0]) in check_cpu() 166 is_intel() && cpu.level == 6 && in check_cpu() [all …]
|
/linux-4.4.14/arch/mn10300/kernel/ |
D | smp.c | 46 static void run_sleep_cpu(unsigned int cpu); 47 static void run_wakeup_cpu(unsigned int cpu); 363 void arch_send_call_function_single_ipi(int cpu) in arch_send_call_function_single_ipi() argument 365 send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI); in arch_send_call_function_single_ipi() 372 void smp_send_reschedule(int cpu) in smp_send_reschedule() argument 374 send_IPI_mask(cpumask_of(cpu), RESCHEDULE_IPI); in smp_send_reschedule() 725 static void __init smp_store_cpu_info(int cpu) in smp_store_cpu_info() argument 727 struct mn10300_cpuinfo *ci = &cpu_data[cpu]; in smp_store_cpu_info() 773 task_thread_info(idle)->cpu = cpu_id; in do_boot_cpu() 821 static void __init smp_show_cpu_info(int cpu) in smp_show_cpu_info() argument [all …]
|
/linux-4.4.14/arch/arm/mm/ |
D | context.c | 57 int cpu; in a15_erratum_get_cpumask() local 63 for_each_online_cpu(cpu) { in a15_erratum_get_cpumask() 64 if (cpu == this_cpu) in a15_erratum_get_cpumask() 70 asid = per_cpu(active_asids, cpu).counter; in a15_erratum_get_cpumask() 72 asid = per_cpu(reserved_asids, cpu); in a15_erratum_get_cpumask() 74 cpumask_set_cpu(cpu, mask); in a15_erratum_get_cpumask() 139 static void flush_context(unsigned int cpu) in flush_context() argument 170 int cpu; in check_update_reserved_asid() local 182 for_each_possible_cpu(cpu) { in check_update_reserved_asid() 183 if (per_cpu(reserved_asids, cpu) == asid) { in check_update_reserved_asid() [all …]
|
/linux-4.4.14/Documentation/devicetree/bindings/clock/ |
D | mt8173-cpu-dvfs.txt | 6 "cpu" - The multiplexer for clock input of CPU cluster. 7 "intermediate" - A parent of "cpu" clock which is used as "intermediate" clock 23 cpu0: cpu@0 { 24 device_type = "cpu"; 28 cpu-idle-states = <&CPU_SLEEP_0>; 31 clock-names = "cpu", "intermediate"; 34 cpu1: cpu@1 { 35 device_type = "cpu"; 39 cpu-idle-states = <&CPU_SLEEP_0>; 42 clock-names = "cpu", "intermediate"; [all …]
|
/linux-4.4.14/arch/ia64/mm/ |
D | tlb.c | 72 int i, cpu; in wrap_mmu_context() local 90 cpu = get_cpu(); /* prevent preemption/migration */ in wrap_mmu_context() 92 if (i != cpu) in wrap_mmu_context() 347 int cpu = smp_processor_id(); in ia64_tlb_init() local 368 per_cpu(ia64_tr_num, cpu) = 8; in ia64_tlb_init() 371 per_cpu(ia64_tr_num, cpu) = vm_info_1.pal_vm_info_1_s.max_itr_entry+1; in ia64_tlb_init() 372 if (per_cpu(ia64_tr_num, cpu) > in ia64_tlb_init() 374 per_cpu(ia64_tr_num, cpu) = in ia64_tlb_init() 376 if (per_cpu(ia64_tr_num, cpu) > IA64_TR_ALLOC_MAX) { in ia64_tlb_init() 378 per_cpu(ia64_tr_num, cpu) = IA64_TR_ALLOC_MAX; in ia64_tlb_init() [all …]
|
/linux-4.4.14/drivers/clk/mvebu/ |
D | clk-cpu.c | 37 int cpu; member 57 div = (reg >> (cpuclk->cpu * 8)) & SYS_CTRL_CLK_DIVIDER_MASK; in clk_cpu_recalc_rate() 86 & (~(SYS_CTRL_CLK_DIVIDER_MASK << (cpuclk->cpu * 8)))) in clk_cpu_off_set_rate() 87 | (div << (cpuclk->cpu * 8)); in clk_cpu_off_set_rate() 90 reload_mask = 1 << (20 + cpuclk->cpu); in clk_cpu_off_set_rate() 150 return mvebu_pmsu_dfs_request(cpuclk->cpu); in clk_cpu_on_set_rate() 201 int cpu, err; in of_cpu_clk_setup() local 206 err = of_property_read_u32(dn, "reg", &cpu); in of_cpu_clk_setup() 210 sprintf(clk_name, "cpu%d", cpu); in of_cpu_clk_setup() 212 cpuclk[cpu].parent_name = of_clk_get_parent_name(node, 0); in of_cpu_clk_setup() [all …]
|
/linux-4.4.14/mm/ |
D | percpu-vm.c | 14 unsigned int cpu, int page_idx) in pcpu_chunk_page() argument 19 return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx)); in pcpu_chunk_page() 58 unsigned int cpu; in pcpu_free_pages() local 61 for_each_possible_cpu(cpu) { in pcpu_free_pages() 63 struct page *page = pages[pcpu_page_idx(cpu, i)]; in pcpu_free_pages() 86 unsigned int cpu, tcpu; in pcpu_alloc_pages() local 89 for_each_possible_cpu(cpu) { in pcpu_alloc_pages() 91 struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; in pcpu_alloc_pages() 93 *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0); in pcpu_alloc_pages() 102 __free_page(pages[pcpu_page_idx(cpu, i)]); in pcpu_alloc_pages() [all …]
|
/linux-4.4.14/kernel/events/ |
D | hw_breakpoint.c | 64 static struct bp_cpuinfo *get_bp_info(int cpu, enum bp_type_idx type) in get_bp_info() argument 66 return per_cpu_ptr(bp_cpuinfo + type, cpu); in get_bp_info() 100 static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type) in max_task_bp_pinned() argument 102 unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned; in max_task_bp_pinned() 117 static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type) in task_bp_pinned() argument 126 (iter->cpu < 0 || cpu == iter->cpu)) in task_bp_pinned() 135 if (bp->cpu >= 0) in cpumask_of_bp() 136 return cpumask_of(bp->cpu); in cpumask_of_bp() 149 int cpu; in fetch_bp_busy_slots() local 151 for_each_cpu(cpu, cpumask) { in fetch_bp_busy_slots() [all …]
|
/linux-4.4.14/kernel/trace/ |
D | trace_kdb.c | 26 int cnt = 0, cpu; in ftrace_dump_buf() local 32 for_each_tracing_cpu(cpu) { in ftrace_dump_buf() 33 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); in ftrace_dump_buf() 51 for_each_tracing_cpu(cpu) { in ftrace_dump_buf() 52 iter.buffer_iter[cpu] = in ftrace_dump_buf() 53 ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu); in ftrace_dump_buf() 54 ring_buffer_read_start(iter.buffer_iter[cpu]); in ftrace_dump_buf() 55 tracing_iter_reset(&iter, cpu); in ftrace_dump_buf() 89 for_each_tracing_cpu(cpu) { in ftrace_dump_buf() 90 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); in ftrace_dump_buf() [all …]
|
/linux-4.4.14/arch/arm64/include/asm/ |
D | topology.h | 16 #define topology_physical_package_id(cpu) (cpu_topology[cpu].cluster_id) argument 17 #define topology_core_id(cpu) (cpu_topology[cpu].core_id) argument 18 #define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling) argument 19 #define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) argument 23 const struct cpumask *cpu_coregroup_mask(int cpu);
|
/linux-4.4.14/drivers/macintosh/ |
D | windfarm_pm72.c | 209 static int read_one_cpu_vals(int cpu, s32 *temp, s32 *power) in read_one_cpu_vals() argument 215 rc = wf_sensor_get(sens_cpu_temp[cpu], &dtemp); in read_one_cpu_vals() 217 DBG(" CPU%d: temp reading error !\n", cpu); in read_one_cpu_vals() 220 DBG_LOTS(" CPU%d: temp = %d.%03d\n", cpu, FIX32TOPRINT((dtemp))); in read_one_cpu_vals() 224 rc = wf_sensor_get(sens_cpu_volts[cpu], &volts); in read_one_cpu_vals() 226 DBG(" CPU%d, volts reading error !\n", cpu); in read_one_cpu_vals() 229 DBG_LOTS(" CPU%d: volts = %d.%03d\n", cpu, FIX32TOPRINT((volts))); in read_one_cpu_vals() 232 rc = wf_sensor_get(sens_cpu_amps[cpu], &s); in read_one_cpu_vals() 234 DBG(" CPU%d, current reading error !\n", cpu); in read_one_cpu_vals() 237 DBG_LOTS(" CPU%d: amps = %d.%03d\n", cpu, FIX32TOPRINT((amps))); in read_one_cpu_vals() [all …]
|
/linux-4.4.14/arch/arm/mach-zynq/ |
D | platsmp.c | 37 int zynq_cpun_start(u32 address, int cpu) in zynq_cpun_start() argument 50 zynq_slcr_cpu_stop(cpu); in zynq_cpun_start() 79 zynq_slcr_cpu_start(cpu); in zynq_cpun_start() 84 pr_warn("Can't start CPU%d: Wrong starting address %x\n", cpu, address); in zynq_cpun_start() 90 static int zynq_boot_secondary(unsigned int cpu, struct task_struct *idle) in zynq_boot_secondary() argument 92 return zynq_cpun_start(virt_to_phys(secondary_startup), cpu); in zynq_boot_secondary() 121 static void zynq_secondary_init(unsigned int cpu) in zynq_secondary_init() argument 127 static int zynq_cpu_kill(unsigned cpu) in zynq_cpu_kill() argument 131 while (zynq_slcr_cpu_state_read(cpu)) in zynq_cpu_kill() 135 zynq_slcr_cpu_stop(cpu); in zynq_cpu_kill() [all …]
|
/linux-4.4.14/drivers/watchdog/ |
D | octeon-wdt-main.c | 224 static int cpu2core(int cpu) in cpu2core() argument 227 return cpu_logical_map(cpu); in cpu2core() 253 int cpu = core2cpu(core); in octeon_wdt_poke_irq() local 256 if (per_cpu_countdown[cpu] > 0) { in octeon_wdt_poke_irq() 259 per_cpu_countdown[cpu]--; in octeon_wdt_poke_irq() 263 cpumask_clear_cpu(cpu, &irq_enabled_cpus); in octeon_wdt_poke_irq() 377 static void octeon_wdt_disable_interrupt(int cpu) in octeon_wdt_disable_interrupt() argument 383 core = cpu2core(cpu); in octeon_wdt_disable_interrupt() 397 static void octeon_wdt_setup_interrupt(int cpu) in octeon_wdt_setup_interrupt() argument 403 core = cpu2core(cpu); in octeon_wdt_setup_interrupt() [all …]
|
/linux-4.4.14/Documentation/ABI/testing/ |
D | sysfs-devices-system-cpu | 1 What: /sys/devices/system/cpu/ 10 /sys/devices/system/cpu/cpu#/ 12 What: /sys/devices/system/cpu/kernel_max 13 /sys/devices/system/cpu/offline 14 /sys/devices/system/cpu/online 15 /sys/devices/system/cpu/possible 16 /sys/devices/system/cpu/present 22 kernel_max: the maximum cpu index allowed by the kernel 40 What: /sys/devices/system/cpu/probe 41 /sys/devices/system/cpu/release [all …]
|
/linux-4.4.14/tools/virtio/virtio-trace/ |
D | trace-agent.c | 154 int cpu; in agent_info_init() local 159 for (cpu = 0; cpu < s->cpus; cpu++) { in agent_info_init() 161 in_path = make_input_path(cpu); in agent_info_init() 167 out_path = make_output_path(cpu); in agent_info_init() 174 rw_thread_init(cpu, in_path, out_path, s->use_stdout, in agent_info_init() 175 s->pipe_size, s->rw_ti[cpu]); in agent_info_init() 222 int cpu; in agent_main_loop() local 226 for (cpu = 0; cpu < s->cpus; cpu++) in agent_main_loop() 227 rw_thread_per_cpu[cpu] = rw_thread_run(s->rw_ti[cpu]); in agent_main_loop() 232 for (cpu = 0; cpu < s->cpus; cpu++) { in agent_main_loop() [all …]
|
/linux-4.4.14/arch/mips/sibyte/sb1250/ |
D | irq.c | 54 void sb1250_mask_irq(int cpu, int irq) in sb1250_mask_irq() argument 60 cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(cpu) + in sb1250_mask_irq() 63 ____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) + in sb1250_mask_irq() 68 void sb1250_unmask_irq(int cpu, int irq) in sb1250_unmask_irq() argument 74 cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(cpu) + in sb1250_unmask_irq() 77 ____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) + in sb1250_unmask_irq() 86 int i = 0, old_cpu, cpu, int_on; in sb1250_set_affinity() local 94 cpu = cpu_logical_map(i); in sb1250_set_affinity() 110 sb1250_irq_owner[irq] = cpu; in sb1250_set_affinity() 113 cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(cpu) + in sb1250_set_affinity() [all …]
|
/linux-4.4.14/drivers/xen/events/ |
D | events_fifo.c | 102 static int init_control_block(int cpu, in init_control_block() argument 105 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); in init_control_block() 116 init_control.vcpu = cpu; in init_control_block() 189 static void evtchn_fifo_bind_to_cpu(struct irq_info *info, unsigned cpu) in evtchn_fifo_bind_to_cpu() argument 282 static void consume_one_event(unsigned cpu, in consume_one_event() argument 287 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); in consume_one_event() 327 static void __evtchn_fifo_handle_events(unsigned cpu, bool drop) in __evtchn_fifo_handle_events() argument 333 control_block = per_cpu(cpu_control_block, cpu); in __evtchn_fifo_handle_events() 339 consume_one_event(cpu, control_block, q, &ready, drop); in __evtchn_fifo_handle_events() 344 static void evtchn_fifo_handle_events(unsigned cpu) in evtchn_fifo_handle_events() argument [all …]
|