Home
last modified time | relevance | path

Searched refs:cpus (Results 1 – 200 of 622) sorted by relevance

1234

/linux-4.4.14/tools/perf/util/
Dcpumap.c12 struct cpu_map *cpus; in cpu_map__default_new() local
19 cpus = malloc(sizeof(*cpus) + nr_cpus * sizeof(int)); in cpu_map__default_new()
20 if (cpus != NULL) { in cpu_map__default_new()
23 cpus->map[i] = i; in cpu_map__default_new()
25 cpus->nr = nr_cpus; in cpu_map__default_new()
26 atomic_set(&cpus->refcnt, 1); in cpu_map__default_new()
29 return cpus; in cpu_map__default_new()
35 struct cpu_map *cpus = malloc(sizeof(*cpus) + payload_size); in cpu_map__trim_new() local
37 if (cpus != NULL) { in cpu_map__trim_new()
38 cpus->nr = nr_cpus; in cpu_map__trim_new()
[all …]
Drecord.c60 struct cpu_map *cpus; in perf_probe_api() local
63 cpus = cpu_map__new(NULL); in perf_probe_api()
64 if (!cpus) in perf_probe_api()
66 cpu = cpus->map[0]; in perf_probe_api()
67 cpu_map__put(cpus); in perf_probe_api()
115 struct cpu_map *cpus; in perf_can_record_cpu_wide() local
118 cpus = cpu_map__new(NULL); in perf_can_record_cpu_wide()
119 if (!cpus) in perf_can_record_cpu_wide()
121 cpu = cpus->map[0]; in perf_can_record_cpu_wide()
122 cpu_map__put(cpus); in perf_can_record_cpu_wide()
[all …]
Devlist.c36 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, in perf_evlist__init() argument
44 perf_evlist__set_maps(evlist, cpus, threads); in perf_evlist__init()
119 cpu_map__put(evlist->cpus); in perf_evlist__delete()
121 evlist->cpus = NULL; in perf_evlist__delete()
136 cpu_map__put(evsel->cpus); in __perf_evlist__propagate_maps()
137 evsel->cpus = cpu_map__get(evlist->cpus); in __perf_evlist__propagate_maps()
138 } else if (evsel->cpus != evsel->own_cpus) { in __perf_evlist__propagate_maps()
139 cpu_map__put(evsel->cpus); in __perf_evlist__propagate_maps()
140 evsel->cpus = cpu_map__get(evsel->own_cpus); in __perf_evlist__propagate_maps()
341 int nr_cpus = cpu_map__nr(evlist->cpus); in perf_evlist__disable()
[all …]
Dcpumap.h26 int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp);
27 int cpu_map__build_core_map(struct cpu_map *cpus, struct cpu_map **corep);
91 int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res,
Dpython.c412 struct cpu_map *cpus; member
425 pcpus->cpus = cpu_map__new(cpustr); in pyrf_cpu_map__init()
426 if (pcpus->cpus == NULL) in pyrf_cpu_map__init()
433 cpu_map__put(pcpus->cpus); in pyrf_cpu_map__delete()
441 return pcpus->cpus->nr; in pyrf_cpu_map__length()
448 if (i >= pcpus->cpus->nr) in pyrf_cpu_map__item()
451 return Py_BuildValue("i", pcpus->cpus->map[i]); in pyrf_cpu_map__item()
671 struct cpu_map *cpus = NULL; in pyrf_evsel__open() local
685 cpus = ((struct pyrf_cpu_map *)pcpus)->cpus; in pyrf_evsel__open()
692 if (perf_evsel__open(evsel, cpus, threads) < 0) { in pyrf_evsel__open()
[all …]
Dtop.c98 top->evlist->cpus->nr > 1 ? "s" : "", in perf_top__header_snprintf()
105 top->evlist->cpus->nr, in perf_top__header_snprintf()
106 top->evlist->cpus->nr > 1 ? "s" : ""); in perf_top__header_snprintf()
Devsel.h102 struct cpu_map *cpus; member
141 return evsel->cpus; in perf_evsel__cpus()
233 struct cpu_map *cpus);
236 int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
Devlist.h57 struct cpu_map *cpus; member
70 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
162 void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
Dstat.c203 struct cpu_map *cpus = perf_evsel__cpus(counter); in check_per_pkg() local
211 if (cpu_map__empty(cpus)) in check_per_pkg()
233 s = cpu_map__get_socket(cpus, cpu, NULL); in check_per_pkg()
Devsel.c1058 cpu_map__put(evsel->cpus); in perf_evsel__exit()
1291 static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, in __perf_evsel__open() argument
1305 perf_evsel__alloc_fd(evsel, cpus->nr, nthreads) < 0) in __perf_evsel__open()
1337 for (cpu = 0; cpu < cpus->nr; cpu++) { in __perf_evsel__open()
1348 pid, cpus->map[cpu], group_fd, flags); in __perf_evsel__open()
1352 cpus->map[cpu], in __perf_evsel__open()
1468 int cpus[1]; member
1471 .cpus = { -1, },
1482 int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, in perf_evsel__open() argument
1485 if (cpus == NULL) { in perf_evsel__open()
[all …]
Dpmu.h24 struct cpu_map *cpus; member
Dsvghelper.h6 extern void open_svg(const char *filename, int cpus, int rows, u64 start, u64 end);
Dpmu.c432 struct cpu_map *cpus; in pmu_cpumask() local
448 cpus = cpu_map__read(file); in pmu_cpumask()
450 return cpus; in pmu_cpumask()
484 pmu->cpus = pmu_cpumask(name); in pmu_lookup()
/linux-4.4.14/tools/perf/tests/
Dopenat-syscall-all-cpus.c13 struct cpu_map *cpus; in test__openat_syscall_event_on_all_cpus() local
26 cpus = cpu_map__new(NULL); in test__openat_syscall_event_on_all_cpus()
27 if (cpus == NULL) { in test__openat_syscall_event_on_all_cpus()
41 if (perf_evsel__open(evsel, cpus, threads) < 0) { in test__openat_syscall_event_on_all_cpus()
48 for (cpu = 0; cpu < cpus->nr; ++cpu) { in test__openat_syscall_event_on_all_cpus()
56 if (cpus->map[cpu] >= CPU_SETSIZE) { in test__openat_syscall_event_on_all_cpus()
57 pr_debug("Ignoring CPU %d\n", cpus->map[cpu]); in test__openat_syscall_event_on_all_cpus()
61 CPU_SET(cpus->map[cpu], &cpu_set); in test__openat_syscall_event_on_all_cpus()
64 cpus->map[cpu], in test__openat_syscall_event_on_all_cpus()
72 CPU_CLR(cpus->map[cpu], &cpu_set); in test__openat_syscall_event_on_all_cpus()
[all …]
Dmmap-basic.c24 struct cpu_map *cpus; in test__basic_mmap() local
41 cpus = cpu_map__new(NULL); in test__basic_mmap()
42 if (cpus == NULL) { in test__basic_mmap()
48 CPU_SET(cpus->map[0], &cpu_set); in test__basic_mmap()
52 cpus->map[0], strerror_r(errno, sbuf, sizeof(sbuf))); in test__basic_mmap()
62 perf_evlist__set_maps(evlist, cpus, threads); in test__basic_mmap()
79 if (perf_evsel__open(evsels[i], cpus, threads) < 0) { in test__basic_mmap()
141 cpus = NULL; in test__basic_mmap()
144 cpu_map__put(cpus); in test__basic_mmap()
Dtask-exit.c46 struct cpu_map *cpus; in test__task_exit() local
63 cpus = cpu_map__dummy_new(); in test__task_exit()
65 if (!cpus || !threads) { in test__task_exit()
71 perf_evlist__set_maps(evlist, cpus, threads); in test__task_exit()
73 cpus = NULL; in test__task_exit()
125 cpu_map__put(cpus); in test__task_exit()
Dsw-clock.c37 struct cpu_map *cpus; in __test__sw_clock_freq() local
55 cpus = cpu_map__dummy_new(); in __test__sw_clock_freq()
57 if (!cpus || !threads) { in __test__sw_clock_freq()
63 perf_evlist__set_maps(evlist, cpus, threads); in __test__sw_clock_freq()
65 cpus = NULL; in __test__sw_clock_freq()
118 cpu_map__put(cpus); in __test__sw_clock_freq()
Dkeep-tracking.c64 struct cpu_map *cpus = NULL; in test__keep_tracking() local
73 cpus = cpu_map__new(NULL); in test__keep_tracking()
74 CHECK_NOT_NULL__(cpus); in test__keep_tracking()
79 perf_evlist__set_maps(evlist, cpus, threads); in test__keep_tracking()
147 cpu_map__put(cpus); in test__keep_tracking()
Dswitch-tracking.c322 struct cpu_map *cpus = NULL; in test__switch_tracking() local
335 cpus = cpu_map__new(NULL); in test__switch_tracking()
336 if (!cpus) { in test__switch_tracking()
347 perf_evlist__set_maps(evlist, cpus, threads); in test__switch_tracking()
563 cpu_map__put(cpus); in test__switch_tracking()
Dcode-reading.c452 struct cpu_map *cpus = NULL; in do_test_code_reading() local
512 cpus = cpu_map__new(NULL); in do_test_code_reading()
513 if (!cpus) { in do_test_code_reading()
527 perf_evlist__set_maps(evlist, cpus, threads); in do_test_code_reading()
594 cpu_map__put(cpus); in do_test_code_reading()
/linux-4.4.14/tools/power/cpupower/utils/
Dcpufreq-info.c250 struct cpufreq_affected_cpus *cpus; in debug_output_one() local
273 cpus = cpufreq_get_related_cpus(cpu); in debug_output_one()
274 if (cpus) { in debug_output_one()
276 while (cpus->next) { in debug_output_one()
277 printf("%d ", cpus->cpu); in debug_output_one()
278 cpus = cpus->next; in debug_output_one()
280 printf("%d\n", cpus->cpu); in debug_output_one()
281 cpufreq_put_related_cpus(cpus); in debug_output_one()
284 cpus = cpufreq_get_affected_cpus(cpu); in debug_output_one()
285 if (cpus) { in debug_output_one()
[all …]
Dcpufreq-set.c296 struct cpufreq_affected_cpus *cpus; in cmd_freq_set() local
302 cpus = cpufreq_get_related_cpus(cpu); in cmd_freq_set()
303 if (!cpus) in cmd_freq_set()
305 while (cpus->next) { in cmd_freq_set()
306 bitmask_setbit(cpus_chosen, cpus->cpu); in cmd_freq_set()
307 cpus = cpus->next; in cmd_freq_set()
309 cpufreq_put_related_cpus(cpus); in cmd_freq_set()
/linux-4.4.14/arch/arm/mach-shmobile/
Dtimer.c40 struct device_node *np, *cpus; in shmobile_init_delay() local
46 cpus = of_find_node_by_path("/cpus"); in shmobile_init_delay()
47 if (!cpus) in shmobile_init_delay()
50 for_each_child_of_node(cpus, np) { in shmobile_init_delay()
68 of_node_put(cpus); in shmobile_init_delay()
Dpm-rcar-gen2.c58 struct device_node *np, *cpus; in rcar_gen2_pm_init() local
67 cpus = of_find_node_by_path("/cpus"); in rcar_gen2_pm_init()
68 if (!cpus) in rcar_gen2_pm_init()
71 for_each_child_of_node(cpus, np) { in rcar_gen2_pm_init()
Dplatsmp-apmu.c99 for (bit = 0; bit < ARRAY_SIZE(apmu_config[k].cpus); bit++) { in apmu_parse_cfg()
100 id = apmu_config[k].cpus[bit]; in apmu_parse_cfg()
109 for (bit = 0; bit < ARRAY_SIZE(apmu_config[k].cpus); bit++) { in apmu_parse_cfg()
110 id = apmu_config[k].cpus[bit]; in apmu_parse_cfg()
Dsmp-r8a7790.c42 .cpus = { 0, 1, 2, 3 },
46 .cpus = { 0x100, 0x0101, 0x102, 0x103 },
Dplatsmp-apmu.h21 int cpus[4]; member
Dsmp-r8a7791.c31 .cpus = { 0, 1 },
/linux-4.4.14/arch/mips/cavium-octeon/
Dsmp.c103 int cpus; in octeon_smp_setup() local
120 cpus = 1; in octeon_smp_setup()
123 set_cpu_possible(cpus, true); in octeon_smp_setup()
124 set_cpu_present(cpus, true); in octeon_smp_setup()
125 __cpu_number_map[id] = cpus; in octeon_smp_setup()
126 __cpu_logical_map[cpus] = id; in octeon_smp_setup()
127 cpus++; in octeon_smp_setup()
140 set_cpu_possible(cpus, true); in octeon_smp_setup()
141 __cpu_number_map[id] = cpus; in octeon_smp_setup()
142 __cpu_logical_map[cpus] = id; in octeon_smp_setup()
[all …]
/linux-4.4.14/block/
Dblk-mq-cpumap.c38 cpumask_var_t cpus; in blk_mq_update_queue_map() local
40 if (!alloc_cpumask_var(&cpus, GFP_ATOMIC)) in blk_mq_update_queue_map()
43 cpumask_clear(cpus); in blk_mq_update_queue_map()
48 if (!cpumask_test_cpu(first_sibling, cpus)) in blk_mq_update_queue_map()
50 cpumask_set_cpu(i, cpus); in blk_mq_update_queue_map()
85 free_cpumask_var(cpus); in blk_mq_update_queue_map()
/linux-4.4.14/tools/perf/python/
Dtwatch.py19 cpus = perf.cpu_map()
35 evsel.open(cpus = cpus, threads = threads);
36 evlist = perf.evlist(cpus, threads)
41 for cpu in cpus:
/linux-4.4.14/arch/x86/kernel/
Dtsc_sync.c122 int cpus = 2; in check_tsc_sync_source() local
146 while (atomic_read(&start_count) != cpus-1) in check_tsc_sync_source()
155 while (atomic_read(&stop_count) != cpus-1) in check_tsc_sync_source()
188 int cpus = 2; in check_tsc_sync_target() local
199 while (atomic_read(&start_count) != cpus) in check_tsc_sync_target()
212 while (atomic_read(&stop_count) != cpus) in check_tsc_sync_target()
/linux-4.4.14/tools/power/cpupower/utils/helpers/
Dtopology.c67 int cpu, last_pkg, cpus = sysconf(_SC_NPROCESSORS_CONF); in get_cpu_topology() local
69 cpu_top->core_info = malloc(sizeof(struct cpuid_core_info) * cpus); in get_cpu_topology()
73 for (cpu = 0; cpu < cpus; cpu++) { in get_cpu_topology()
94 qsort(cpu_top->core_info, cpus, sizeof(struct cpuid_core_info), in get_cpu_topology()
101 for(cpu = 1; cpu < cpus; cpu++) { in get_cpu_topology()
119 return cpus; in get_cpu_topology()
/linux-4.4.14/drivers/firmware/
Dqcom_scm.c34 int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) in qcom_scm_set_cold_boot_addr() argument
36 return __qcom_scm_set_cold_boot_addr(entry, cpus); in qcom_scm_set_cold_boot_addr()
48 int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) in qcom_scm_set_warm_boot_addr() argument
50 return __qcom_scm_set_warm_boot_addr(entry, cpus); in qcom_scm_set_warm_boot_addr()
Dqcom_scm-32.c405 int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) in __qcom_scm_set_cold_boot_addr() argument
416 if (!cpus || (cpus && cpumask_empty(cpus))) in __qcom_scm_set_cold_boot_addr()
419 for_each_cpu(cpu, cpus) { in __qcom_scm_set_cold_boot_addr()
437 int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) in __qcom_scm_set_warm_boot_addr() argument
447 for_each_cpu(cpu, cpus) { in __qcom_scm_set_warm_boot_addr()
459 for_each_cpu(cpu, cpus) in __qcom_scm_set_warm_boot_addr()
Dqcom_scm-64.c25 int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) in __qcom_scm_set_cold_boot_addr() argument
38 int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) in __qcom_scm_set_warm_boot_addr() argument
Dqcom_scm.h22 extern int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus);
23 extern int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus);
/linux-4.4.14/drivers/irqchip/
Dirq-bcm7038-l1.c45 struct bcm7038_l1_cpu *cpus[NR_CPUS]; member
126 cpu = intc->cpus[cpu_logical_map(smp_processor_id())]; in bcm7038_l1_irq_handle()
128 cpu = intc->cpus[0]; in bcm7038_l1_irq_handle()
158 intc->cpus[cpu_idx]->mask_cache[word] &= ~mask; in __bcm7038_l1_unmask()
159 l1_writel(mask, intc->cpus[cpu_idx]->map_base + in __bcm7038_l1_unmask()
169 intc->cpus[cpu_idx]->mask_cache[word] |= mask; in __bcm7038_l1_mask()
170 l1_writel(mask, intc->cpus[cpu_idx]->map_base + in __bcm7038_l1_mask()
208 was_disabled = !!(intc->cpus[intc->affinity[hw]]->mask_cache[word] & in bcm7038_l1_set_affinity()
240 cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32), in bcm7038_l1_init_one()
315 intc->cpus[0]->map_base, IRQS_PER_WORD * intc->n_words); in bcm7038_l1_of_init()
[all …]
/linux-4.4.14/drivers/lguest/
Dlguest_user.c119 cpu = &lg->cpus[cpu_id]; in read()
158 if (id >= ARRAY_SIZE(cpu->lg->cpus)) in lg_cpu_start()
163 cpu->lg = container_of(cpu, struct lguest, cpus[id]); in lg_cpu_start()
256 err = lg_cpu_start(&lg->cpus[0], 0, args[2]); in initialize()
278 free_page(lg->cpus[0].regs_page); in initialize()
319 cpu = &lg->cpus[cpu_id]; in write()
378 hrtimer_cancel(&lg->cpus[i].hrt); in close()
380 free_page(lg->cpus[i].regs_page); in close()
385 mmput(lg->cpus[i].mm); in close()
Dpage_tables.c1035 kill_guest(&lg->cpus[0], "Attempt to set pgd %u/%u", in guest_set_pgd()
1046 if (!allocate_switcher_mapping(&lg->cpus[0])) { in guest_set_pgd()
1047 kill_guest(&lg->cpus[0], in guest_set_pgd()
1058 guest_pagetable_clear_all(&lg->cpus[0]); in guest_set_pmd()
1075 struct lg_cpu *cpu = &lg->cpus[0]; in init_guest_pagetable()
/linux-4.4.14/include/linux/
Dstop_machine.h117 int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
120 const struct cpumask *cpus);
124 const struct cpumask *cpus) in stop_machine() argument
135 const struct cpumask *cpus) in stop_machine_from_inactive_cpu() argument
137 return stop_machine(fn, data, cpus); in stop_machine_from_inactive_cpu()
Dqcom_scm.h16 extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus);
17 extern int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus);
Dcpufreq.h61 cpumask_var_t cpus; /* Online CPUs only */ member
147 return cpumask_weight(policy->cpus) > 1; in policy_is_shared()
/linux-4.4.14/tools/lib/traceevent/
Dplugin_function.c32 static int cpus = -1; variable
95 if (cpu > cpus) { in add_and_get_index()
107 for (i = cpus + 1; i <= cpu; i++) in add_and_get_index()
109 cpus = cpu; in add_and_get_index()
183 for (i = 0; i <= cpus; i++) { in PEVENT_PLUGIN_UNLOADER()
193 cpus = -1; in PEVENT_PLUGIN_UNLOADER()
Devent-parse.h480 int cpus; member
719 return pevent->cpus; in pevent_get_cpus()
722 static inline void pevent_set_cpus(struct pevent *pevent, int cpus) in pevent_set_cpus() argument
724 pevent->cpus = cpus; in pevent_set_cpus()
/linux-4.4.14/arch/arm/kernel/
Ddevtree.c77 struct device_node *cpu, *cpus; in arm_dt_init_cpu_maps() local
84 cpus = of_find_node_by_path("/cpus"); in arm_dt_init_cpu_maps()
86 if (!cpus) in arm_dt_init_cpu_maps()
89 for_each_child_of_node(cpus, cpu) { in arm_dt_init_cpu_maps()
166 set_smp_ops_by_method(cpus); in arm_dt_init_cpu_maps()
/linux-4.4.14/arch/arm/common/
Dmcpm_entry.c38 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN; in __mcpm_cpu_going_down()
39 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); in __mcpm_cpu_going_down()
52 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN; in __mcpm_cpu_down()
53 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); in __mcpm_cpu_down()
105 sync_cache_r(&c->cpus); in __mcpm_outbound_enter_critical()
113 cpustate = c->cpus[i].cpu; in __mcpm_outbound_enter_critical()
118 sync_cache_r(&c->cpus[i].cpu); in __mcpm_outbound_enter_critical()
440 mcpm_sync.clusters[i].cpus[j].cpu = CPU_DOWN; in mcpm_sync_init()
446 mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP; in mcpm_sync_init()
/linux-4.4.14/tools/virtio/virtio-trace/
Dtrace-agent.c62 s->cpus = get_total_cpus(); in agent_info_new()
66 for (i = 0; i < s->cpus; i++) in agent_info_new()
159 for (cpu = 0; cpu < s->cpus; cpu++) { in agent_info_init()
226 for (cpu = 0; cpu < s->cpus; cpu++) in agent_main_loop()
232 for (cpu = 0; cpu < s->cpus; cpu++) { in agent_main_loop()
248 for (i = 0; i < s->cpus; i++) { in agent_info_free()
Dtrace-agent.h20 int cpus; member
/linux-4.4.14/tools/testing/selftests/rcutorture/bin/
Dkvm.sh48 cpus=0
96 --cpus)
97 checkarg --cpus "(number)" "$#" "$2" '^[0-9]*$' '^--'
98 cpus=$2
207 awk < $T/cfgcpu.sort > $T/cfgcpu.pack -v ncpus=$cpus '
294 -v ncpus=$cpus \
/linux-4.4.14/arch/ia64/kernel/
Dsmp.c295 cpumask_var_t cpus; in smp_flush_tlb_mm() local
304 if (!alloc_cpumask_var(&cpus, GFP_ATOMIC)) { in smp_flush_tlb_mm()
308 cpumask_copy(cpus, mm_cpumask(mm)); in smp_flush_tlb_mm()
309 smp_call_function_many(cpus, in smp_flush_tlb_mm()
311 free_cpumask_var(cpus); in smp_flush_tlb_mm()
/linux-4.4.14/include/trace/events/
Dthermal.h82 TP_PROTO(const struct cpumask *cpus, unsigned long freq, u32 *load,
85 TP_ARGS(cpus, freq, load, load_len, dynamic_power, static_power),
97 __assign_bitmask(cpumask, cpumask_bits(cpus),
114 TP_PROTO(const struct cpumask *cpus, unsigned int freq,
117 TP_ARGS(cpus, freq, cdev_state, power),
127 __assign_bitmask(cpumask, cpumask_bits(cpus),
Dxen.h407 TP_PROTO(const struct cpumask *cpus, struct mm_struct *mm,
409 TP_ARGS(cpus, mm, addr, end),
416 TP_fast_assign(__entry->ncpus = cpumask_weight(cpus);
/linux-4.4.14/arch/powerpc/platforms/powermac/
Dsmp.c635 struct device_node *cpus; in smp_core99_pfunc_tb_freeze() local
638 cpus = of_find_node_by_path("/cpus"); in smp_core99_pfunc_tb_freeze()
639 BUG_ON(cpus == NULL); in smp_core99_pfunc_tb_freeze()
642 pmf_call_function(cpus, "cpu-timebase", &args); in smp_core99_pfunc_tb_freeze()
643 of_node_put(cpus); in smp_core99_pfunc_tb_freeze()
713 struct device_node *cpus = in smp_core99_setup() local
715 if (cpus && in smp_core99_setup()
716 of_get_property(cpus, "platform-cpu-timebase", NULL)) { in smp_core99_setup()
769 struct device_node *cpus; in smp_core99_probe() local
775 for (cpus = NULL; (cpus = of_find_node_by_type(cpus, "cpu")) != NULL;) in smp_core99_probe()
Dfeature.c1052 struct device_node *cpus; in core99_reset_cpu() local
1062 cpus = of_find_node_by_path("/cpus"); in core99_reset_cpu()
1063 if (cpus == NULL) in core99_reset_cpu()
1065 for (np = cpus->child; np != NULL; np = np->sibling) { in core99_reset_cpu()
1075 of_node_put(cpus); in core99_reset_cpu()
1507 struct device_node *cpus; in g5_reset_cpu() local
1513 cpus = of_find_node_by_path("/cpus"); in g5_reset_cpu()
1514 if (cpus == NULL) in g5_reset_cpu()
1516 for (np = cpus->child; np != NULL; np = np->sibling) { in g5_reset_cpu()
1526 of_node_put(cpus); in g5_reset_cpu()
[all …]
/linux-4.4.14/arch/powerpc/kernel/
Drtas.c837 cpumask_var_t cpus) in rtas_cpu_state_change_mask() argument
839 if (!cpumask_empty(cpus)) { in rtas_cpu_state_change_mask()
840 cpumask_clear(cpus); in rtas_cpu_state_change_mask()
850 cpumask_var_t cpus) in rtas_cpu_state_change_mask() argument
856 if (cpumask_empty(cpus)) in rtas_cpu_state_change_mask()
859 for_each_cpu(cpu, cpus) { in rtas_cpu_state_change_mask()
877 cpumask_shift_right(cpus, cpus, cpu); in rtas_cpu_state_change_mask()
878 cpumask_shift_left(cpus, cpus, cpu); in rtas_cpu_state_change_mask()
882 cpumask_clear_cpu(cpu, cpus); in rtas_cpu_state_change_mask()
891 int rtas_online_cpus_mask(cpumask_var_t cpus) in rtas_online_cpus_mask() argument
[all …]
/linux-4.4.14/drivers/cpuidle/
Dcoupled.c451 cpumask_t cpus; in cpuidle_coupled_any_pokes_pending() local
454 cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus); in cpuidle_coupled_any_pokes_pending()
455 ret = cpumask_and(&cpus, &cpuidle_coupled_poke_pending, &cpus); in cpuidle_coupled_any_pokes_pending()
638 cpumask_t cpus; in cpuidle_coupled_update_online_cpus() local
639 cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus); in cpuidle_coupled_update_online_cpus()
640 coupled->online_count = cpumask_weight(&cpus); in cpuidle_coupled_update_online_cpus()
/linux-4.4.14/tools/perf/arch/x86/tests/
Dperf-time-to-tsc.c51 struct cpu_map *cpus = NULL; in test__perf_time_to_tsc() local
65 cpus = cpu_map__new(NULL); in test__perf_time_to_tsc()
66 CHECK_NOT_NULL__(cpus); in test__perf_time_to_tsc()
71 perf_evlist__set_maps(evlist, cpus, threads); in test__perf_time_to_tsc()
/linux-4.4.14/drivers/misc/sgi-gru/
Dgrutlbpurge.c357 int cpus, shift = 0, n; in gru_tgh_flush_init() local
359 cpus = uv_blade_nr_possible_cpus(gru->gs_blade_id); in gru_tgh_flush_init()
362 if (cpus) { in gru_tgh_flush_init()
363 n = 1 << fls(cpus - 1); in gru_tgh_flush_init()
376 gru->gs_tgh_first_remote = (cpus + (1 << shift) - 1) >> shift; in gru_tgh_flush_init()
Dgrulib.h146 int cpus; member
/linux-4.4.14/Documentation/devicetree/bindings/cpufreq/
Darm_big_little_dt.txt8 under node /cpus/cpu@x. Where x is the first cpu inside a cluster.
10 FIXME: Cpus should boot in the order specified in DT and all cpus for a cluster
24 cpus {
Dcpufreq-spear.txt17 /cpus/cpu@0.
21 cpus {
Dtegra124-cpufreq.txt5 under node /cpus/cpu@0.
24 cpus {
Dcpufreq-dt.txt8 under node /cpus/cpu@0.
27 cpus {
/linux-4.4.14/Documentation/devicetree/bindings/arm/
Dcpus.txt6 the "cpus" node, which in turn contains a number of subnodes (ie "cpu")
26 cpus and cpu node bindings definition
29 The ARM architecture, in accordance with the ePAPR, requires the cpus and cpu
32 - cpus node
36 The node name must be "cpus".
38 A cpus node must define the following properties:
102 * If cpus node's #address-cells property is set to 2
110 * If cpus node's #address-cells property is set to 1
247 cpus {
278 cpus {
[all …]
Dkirkwood.txt12 cpus/cpu@0 with three clocks, "cpu_clk", "ddrclk" and "powersave",
18 cpus {
Dtopology.txt47 child of the cpus node and provides a container where the actual topology
62 The cpu-map node's parent node must be the cpus node.
169 cpus {
388 cpus {
475 Documentation/devicetree/bindings/arm/cpus.txt
Darm-boards189 - a "cpus" node describing the available cores and their associated
190 "enable-method"s. For more details see cpus.txt file.
202 cpus {
Dvexpress.txt56 Top-level standard "cpus" node is required. It must contain a node
59 cpus {
184 cpus {
Dal,alpine.txt25 cpus {
/linux-4.4.14/Documentation/ABI/testing/
Dsysfs-devices-system-xen_cpu16 Interface to online/offline Xen physical cpus
19 to online/offline physical cpus, except cpu0 due to several
Dsysfs-devices-system-cpu25 offline: cpus that are not online because they have been
26 HOTPLUGGED off or exceed the limit of cpus allowed by the
29 online: cpus that are online and being scheduled.
31 possible: cpus that have been allocated resources and can be
34 present: cpus that have been identified as being present in
253 shared_cpu_list: the list of logical cpus sharing the cache
255 shared_cpu_map: logical cpu mask containing the list of cpus sharing
/linux-4.4.14/samples/trace_events/
Dtrace-events-sample.h240 __bitmask( cpus, num_possible_cpus() )
249 __assign_bitmask(cpus, cpumask_bits(mask), num_possible_cpus());
296 __get_str(str), __get_bitmask(cpus))
/linux-4.4.14/kernel/
Dstop_machine.c536 static int __stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) in __stop_machine() argument
542 .active_cpus = cpus, in __stop_machine()
569 int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) in stop_machine() argument
575 ret = __stop_machine(fn, data, cpus); in stop_machine()
604 const struct cpumask *cpus) in stop_machine_from_inactive_cpu() argument
607 .active_cpus = cpus }; in stop_machine_from_inactive_cpu()
Dsmp.c669 cpumask_var_t cpus; in on_each_cpu_cond() local
674 if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) { in on_each_cpu_cond()
678 cpumask_set_cpu(cpu, cpus); in on_each_cpu_cond()
679 on_each_cpu_mask(cpus, func, info, wait); in on_each_cpu_cond()
681 free_cpumask_var(cpus); in on_each_cpu_cond()
/linux-4.4.14/Documentation/devicetree/bindings/arm/cpu-enable-method/
Dmarvell,berlin-smp7 be defined in the "cpus" node.
20 cpus {
Dal,alpine-smp8 "cpus" node.
21 cpus {
/linux-4.4.14/drivers/cpufreq/
Dspeedstep-ich.c262 policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask); in speedstep_target()
295 cpumask_copy(policy->cpus, topology_sibling_cpumask(policy->cpu)); in speedstep_cpu_init()
297 policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask); in speedstep_cpu_init()
Dcpufreq-dt.c219 ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, policy->cpus); in cpufreq_init()
241 dev_pm_opp_of_cpumask_add_table(policy->cpus); in cpufreq_init()
258 cpumask_setall(policy->cpus); in cpufreq_init()
264 ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus); in cpufreq_init()
371 dev_pm_opp_of_cpumask_remove_table(policy->cpus); in cpufreq_init()
Dcpufreq.c38 return cpumask_empty(policy->cpus); in policy_is_inactive()
235 cpumask_setall(policy->cpus); in cpufreq_generic_init()
245 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL; in cpufreq_cpu_get_raw()
406 for_each_cpu(freqs->cpu, policy->cpus) in cpufreq_notify_transition()
737 return cpufreq_show_cpus(policy->cpus, buf); in show_affected_cpus()
996 if (cpumask_test_cpu(cpu, policy->cpus)) in cpufreq_add_policy_cpu()
1008 cpumask_set_cpu(cpu, policy->cpus); in cpufreq_add_policy_cpu()
1037 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) in cpufreq_policy_alloc()
1060 free_cpumask_var(policy->cpus); in cpufreq_policy_alloc()
1109 free_cpumask_var(policy->cpus); in cpufreq_policy_free()
[all …]
Dcpufreq_governor.c64 for_each_cpu(j, policy->cpus) { in dbs_check_cpu()
184 for_each_cpu(i, policy->cpus) in gov_queue_work()
196 for_each_cpu(i, policy->cpus) { in gov_cancel_work()
300 for_each_cpu(j, policy->cpus) in free_common_dbs_info()
443 for_each_cpu(j, policy->cpus) { in cpufreq_governor_start()
Dcppc_cpufreq.c104 cpumask_copy(policy->cpus, cpu->shared_cpu_map); in cppc_cpufreq_cpu_init()
111 cpumask_set_cpu(policy->cpu, policy->cpus); in cppc_cpufreq_cpu_init()
Dp4-clockmod.c116 for_each_cpu(i, policy->cpus) in cpufreq_p4_target()
175 cpumask_copy(policy->cpus, topology_sibling_cpumask(policy->cpu)); in cpufreq_p4_cpu_init()
Dpowernv-cpufreq.c386 smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1); in powernv_cpufreq_target_index()
398 cpumask_set_cpu(base + i, policy->cpus); in powernv_cpufreq_cpu_init()
445 for_each_cpu(tcpu, policy.cpus) in powernv_cpufreq_work_fn()
Dacpi-cpufreq.c470 cmd.mask = policy->cpus; in acpi_cpufreq_target()
705 cpumask_copy(policy->cpus, perf->shared_cpu_map); in acpi_cpufreq_cpu_init()
713 cpumask_copy(policy->cpus, topology_core_cpumask(cpu)); in acpi_cpufreq_cpu_init()
717 cpumask_clear(policy->cpus); in acpi_cpufreq_cpu_init()
718 cpumask_set_cpu(cpu, policy->cpus); in acpi_cpufreq_cpu_init()
Dppc_cbe_cpufreq.c121 cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); in cbe_cpufreq_cpu_init()
Dspeedstep-centrino.c439 for_each_cpu(j, policy->cpus) { in centrino_target()
447 good_cpu = cpumask_any_and(policy->cpus, in centrino_target()
/linux-4.4.14/Documentation/devicetree/bindings/mips/img/
Dpistachio.txt10 A "cpus" node is required. Required properties:
22 cpus {
Dxilfpga.txt36 A "cpus" node is required. Required properties:
48 cpus {
/linux-4.4.14/arch/s390/hypfs/
Dhypfs_diag.c121 __u8 cpus; member
128 __u8 cpus; member
155 return ((struct part_hdr *)hdr)->cpus; in part_hdr__rcpus()
257 __u8 cpus; member
264 __u8 cpus; member
281 return ((struct phys_hdr *)hdr)->cpus; in phys_hdr__cpus()
283 return ((struct x_phys_hdr *)hdr)->cpus; in phys_hdr__cpus()
/linux-4.4.14/Documentation/devicetree/bindings/arm/bcm/
Dbrcm,bcm11351-cpu-method.txt8 properties in the "cpus" device tree node:
19 cpus {
/linux-4.4.14/arch/powerpc/platforms/cell/
Dcpufreq_spudemand.c112 for_each_cpu(i, policy->cpus) { in spu_gov_govern()
129 for_each_cpu (i, policy->cpus) { in spu_gov_govern()
/linux-4.4.14/Documentation/ia64/
Dmca.txt7 the OS is in any state. Including when one of the cpus is already
22 to all the other cpus, the slaves.
24 * Slave cpus that receive the MCA interrupt call down into SAL, they
29 sends an unmaskable INIT event to the slave cpus that have not
68 tasks. But (and its a big but), the cpus that received the MCA
97 cpus. SAL picks one of the cpus as the monarch and the rest are
103 versions of SAL out there. Some drive all the cpus as monarchs. Some
126 all cpus.
/linux-4.4.14/Documentation/accounting/
Dtaskstats.txt36 one of the cpus in the cpumask, its per-pid statistics are sent to the
82 Commands to register/deregister interest in exit data from a set of cpus
86 comma-separated cpu ranges e.g. to listen to exit data from cpus 1,2,3,5,7,8
88 in cpus before closing the listening socket, the kernel cleans up its interest
165 extended and the number of cpus grows large.
172 - create more listeners and reduce the number of cpus being listened to by
175 of cpus to which it listens, especially if they are listening to just one cpu.
/linux-4.4.14/arch/ia64/mm/
Ddiscontig.c119 unsigned long pernodesize = 0, cpus; in compute_pernodesize() local
121 cpus = early_nr_cpus_node(node); in compute_pernodesize()
122 pernodesize += PERCPU_PAGE_SIZE * cpus; in compute_pernodesize()
270 int cpus = early_nr_cpus_node(node); in fill_pernode() local
278 pernode += PERCPU_PAGE_SIZE * cpus; in fill_pernode()
/linux-4.4.14/arch/powerpc/boot/dts/fsl/
Dmpc8536ds.dts18 cpus {
19 #cpus = <1>;
Dmpc8536ds_36b.dts18 cpus {
19 #cpus = <1>;
Dp1020rdb-pc_camp_core0.dts33 cpus {
Dmpc8572ds_camp_core0.dts23 cpus {
Dmpc8572ds_camp_core1.dts24 cpus {
Dp1020rdb-pc_camp_core1.dts29 cpus {
/linux-4.4.14/tools/perf/arch/x86/util/
Dintel-bts.c119 const struct cpu_map *cpus = evlist->cpus; in intel_bts_recording_options() local
146 if (opts->full_auxtrace && !cpu_map__empty(cpus)) { in intel_bts_recording_options()
227 if (!cpu_map__empty(cpus)) in intel_bts_recording_options()
Dintel-pt.c334 per_cpu_mmaps = !cpu_map__empty(session->evlist->cpus); in intel_pt_info_fill()
504 const struct cpu_map *cpus = evlist->cpus; in intel_pt_recording_options() local
626 if (have_timing_info && !cpu_map__empty(cpus)) { in intel_pt_recording_options()
682 if (!cpu_map__empty(cpus)) in intel_pt_recording_options()
702 if (!cpu_map__empty(cpus)) { in intel_pt_recording_options()
713 if (!ptr->have_sched_switch && !cpu_map__empty(cpus) && in intel_pt_recording_options()
/linux-4.4.14/Documentation/cgroups/
Dcpusets.txt28 2.2 Adding/removing cpus
171 - cpuset.cpus: list of CPUs in that cpuset
218 The cpus and mems files in the root (top_cpuset) cpuset are
219 read-only. The cpus file automatically tracks the value of
410 setting), it requests that all the CPUs in that cpusets allowed 'cpuset.cpus'
460 If two cpusets have partially overlapping 'cpuset.cpus' allowed, and only
560 3 : search cpus in a node [= system wide on non-NUMA system]
616 If a cpuset has its 'cpuset.cpus' modified, then each task in that cpuset
650 with non-empty cpus. But the moving of some (or all) tasks might fail if
687 /bin/echo 2-3 > cpuset.cpus
[all …]
/linux-4.4.14/arch/mips/boot/dts/xilfpga/
DmicroAptiv.dtsi6 cpus {
/linux-4.4.14/scripts/gdb/
Dvmlinux-gdb.py30 import linux.cpus
/linux-4.4.14/Documentation/thermal/
Dcpu-cooling-api.txt26 clip_cpus: cpumask of cpus where the frequency constraints will happen.
37 clip_cpus: cpumask of cpus where the frequency constraints will happen.
46 cpus must have registered their OPPs using the OPP library.
51 static power consumed by these cpus (See 2.2 Static power).
174 `cpumask` is the cpumask of the cpus involved in the calculation.
179 temperature of the cpus described by `cpumask` is left for
/linux-4.4.14/Documentation/
Dcpu-hotplug.txt45 maxcpus=n Restrict boot time cpus to n. Say if you have 4 cpus, using
47 other cpus later online, read FAQ's for more info.
49 additional_cpus=n (*) Use this to limit hotpluggable cpus. This option sets
62 determine the number of potentially hot-pluggable cpus. The implementation
63 should only rely on this to count the # of cpus, but *MUST* not rely
65 BIOS doesn't mark such hot-pluggable cpus as disabled entries, one could
66 use this parameter "additional_cpus=x" to represent those cpus in the
69 possible_cpus=n [s390,x86_64] use this to set hotpluggable cpus.
118 If you merely need to avoid cpus going away, you could also use
177 Q: Does hot-add/hot-remove refer to physical add/remove of cpus?
DIRQ-affinity.txt11 IRQ affinity then the value will not change from the default of all cpus.
58 Here is an example of limiting that same irq (44) to cpus 1024 to 1031:
Dcputopology.txt92 above). [~cpu_online_mask + cpus >= NR_CPUS]
105 In this example, there are 64 CPUs in the system but cpus 32-63 exceed
/linux-4.4.14/Documentation/devicetree/bindings/mips/brcm/
Dbrcm,bmips.txt8 under the "cpus" node.
/linux-4.4.14/arch/mips/boot/dts/mti/
Dsead3.dts12 cpus {
/linux-4.4.14/arch/arm/boot/dts/
Dbcm47081.dtsi15 cpus {
Dbcm4708.dtsi15 cpus {
Dlpc4350.dtsi17 cpus {
Dlpc4357.dtsi17 cpus {
Daxm5516-amarillo.dts17 #include "axm5516-cpus.dtsi"
Dhip01-ca9x2.dts25 cpus {
Dstih41x.dtsi12 cpus {
Daxm5516-cpus.dtsi2 * arch/arm/boot/dts/axm5516-cpus.dtsi
13 cpus {
Dalphascale-asm9260.dtsi18 cpus {
Dvf500.dtsi15 cpus {
Ddra72x.dtsi15 cpus {
Domap34xx.dtsi16 cpus {
Domap443x.dtsi14 cpus {
Dhisi-x5hd2-dkb.dts21 cpus {
Dxenvm-4.2.dts23 cpus {
Domap4460.dtsi13 cpus {
Dk2l.dtsi15 cpus {
Domap36xx.dtsi20 cpus {
Dimx6dl.dtsi20 cpus {
Dk2hk.dtsi15 cpus {
Dpxa2xx.dtsi26 cpus {
Domap3-cm-t3x30.dtsi8 cpus {
Dmoxart.dtsi15 cpus {
Ds3c2416.dtsi23 cpus {
Decx-2000.dts29 cpus {
/linux-4.4.14/Documentation/DocBook/
Dkgdb.xml.db9 API-kgdb-roundup-cpus
/linux-4.4.14/arch/powerpc/boot/dts/
Diss4xx.dts22 dcr-parent = <&{/cpus/cpu@0}>;
28 cpus {
Damigaone.dts21 cpus {
22 #cpus = <1>;
Diss4xx-mpic.dts24 dcr-parent = <&{/cpus/cpu@0}>;
30 cpus {
Dsbc8548-pre.dtsi29 cpus {
Dep405.dts19 dcr-parent = <&{/cpus/cpu@0}>;
27 cpus {
Dacadia.dts18 dcr-parent = <&{/cpus/cpu@0}>;
26 cpus {
Dklondike.dts31 dcr-parent = <&{/cpus/cpu@0}>;
38 cpus {
Dwalnut.dts19 dcr-parent = <&{/cpus/cpu@0}>;
27 cpus {
Dgamecube.dts32 cpus {
Dwarp.dts19 dcr-parent = <&{/cpus/cpu@0}>;
26 cpus {
Dps3.dts54 cpus {
Dhotfoot.dts18 dcr-parent = <&{/cpus/cpu@0}>;
27 cpus {
/linux-4.4.14/arch/cris/boot/dts/
Detraxfs.dtsi6 cpus {
Dartpec3.dtsi6 cpus {
/linux-4.4.14/Documentation/devicetree/bindings/opp/
Dopp.txt131 cpus {
186 cpus {
266 cpus {
367 cpus {
423 cpus {
/linux-4.4.14/arch/arc/boot/dts/
Dskeleton.dtsi22 cpus {
Dnsimosci_hs_idu.dts66 RR distribute to all cpus */
/linux-4.4.14/arch/openrisc/boot/dts/
Dor1ksim.dts17 cpus {
/linux-4.4.14/arch/mips/boot/dts/ralink/
Drt2880.dtsi6 cpus {
Dmt7620a.dtsi6 cpus {
Drt3883.dtsi6 cpus {
Drt3050.dtsi6 cpus {
/linux-4.4.14/arch/powerpc/include/asm/
Drtas.h356 extern int rtas_online_cpus_mask(cpumask_var_t cpus);
357 extern int rtas_offline_cpus_mask(cpumask_var_t cpus);
/linux-4.4.14/Documentation/devicetree/bindings/arm/msm/
Dqcom,idle-state.txt15 clocks. In addition to gating the clocks, QCOM cpus use this instruction as a
18 hierarchy to enter standby states, when all cpus are idle. An interrupt brings
51 voltages reduced, provided all cpus enter this state. Since the span of low
/linux-4.4.14/Documentation/cpu-freq/
Dcpu-drivers.txt125 SMP systems normally have same clock source for a group of cpus. For these the
127 routine must initialize policy->cpus with mask of all possible cpus (Online +
129 policy->related_cpus and will reset policy->cpus to carry only online cpus.
/linux-4.4.14/arch/arm/include/asm/
Dmcpm.h291 } cpus[MAX_CPUS_PER_CLUSTER]; member
/linux-4.4.14/kernel/debug/kdb/
Dkdb_cmds26 defcmd dumpcpu "" "Same as dumpall but only tasks on cpus"
/linux-4.4.14/scripts/dtc/
Dlivetree.c555 struct node *cpus, *bootcpu; in guess_boot_cpuid() local
558 cpus = get_node_by_path(tree, "/cpus"); in guess_boot_cpuid()
559 if (!cpus) in guess_boot_cpuid()
563 bootcpu = cpus->children; in guess_boot_cpuid()
/linux-4.4.14/Documentation/scheduler/
Dsched-stats.txt16 cpus on the machine, while domain0 is the most tightly focused domain,
17 sometimes balancing only between pairs of cpus. At this time, there
19 field in the domain stats is a bit map indicating which cpus are affected
65 The first field is a bit mask indicating what cpus this domain operates over.
/linux-4.4.14/arch/mips/boot/dts/brcm/
Dbcm6328.dtsi6 cpus {
Dbcm6368.dtsi6 cpus {
Dbcm3384_viper.dtsi14 cpus {
Dbcm3384_zephyr.dtsi13 cpus {
Dbcm7125.dtsi6 cpus {
/linux-4.4.14/arch/tile/kernel/
Dsetup.c887 int cpu, node, cpus, i, x, y; in setup_numa_mapping() local
923 cpus = cpu; in setup_numa_mapping()
937 for (cpu = 0; cpu < cpus; ++cpu) { in setup_numa_mapping()
1327 int cpus, i, rc; in setup_cpu_maps() local
1349 cpus = 1; /* this cpu */ in setup_cpu_maps()
1351 for (i = 0; cpus < setup_max_cpus; ++i) in setup_cpu_maps()
1353 ++cpus; in setup_cpu_maps()
/linux-4.4.14/drivers/edac/
Dcpc925_edac.c597 struct device_node *cpus; in cpc925_cpu_mask_disabled() local
607 cpus = of_find_node_by_path("/cpus"); in cpc925_cpu_mask_disabled()
608 if (cpus == NULL) { in cpc925_cpu_mask_disabled()
613 while ((cpunode = of_get_next_child(cpus, cpunode)) != NULL) { in cpc925_cpu_mask_disabled()
638 of_node_put(cpus); in cpc925_cpu_mask_disabled()
/linux-4.4.14/scripts/gdb/linux/
Dmodules.py16 from linux import cpus, utils
/linux-4.4.14/arch/c6x/boot/dts/
Dtms320c6457.dtsi6 cpus {
Dtms320c6474.dtsi6 cpus {
Dtms320c6455.dtsi6 cpus {
Dtms320c6472.dtsi6 cpus {
Dtms320c6678.dtsi6 cpus {
/linux-4.4.14/drivers/crypto/qat/qat_dh895xcc/
Dadf_isr.c176 unsigned int cpu, cpus = num_online_cpus(); in adf_request_irqs() local
191 i) % cpus; in adf_request_irqs()
Dadf_drv.c125 int cpus = num_online_cpus(); in adf_dev_configure() local
127 int instances = min(cpus, banks); in adf_dev_configure()
/linux-4.4.14/arch/x86/include/asm/
Damd_nb.h46 atomic_t cpus; member
/linux-4.4.14/arch/arm/plat-samsung/include/plat/
Dcpu.h109 struct cpu_table *cpus, unsigned int cputab_size);
/linux-4.4.14/tools/power/x86/turbostat/
Dturbostat.c2851 } *cpus; in topology_probe() local
2863 cpus = calloc(1, (topo.max_cpu_num + 1) * sizeof(struct cpu_topology)); in topology_probe()
2864 if (cpus == NULL) in topology_probe()
2899 cpus[i].core_id = get_core_id(i); in topology_probe()
2900 if (cpus[i].core_id > max_core_id) in topology_probe()
2901 max_core_id = cpus[i].core_id; in topology_probe()
2903 cpus[i].physical_package_id = get_physical_package_id(i); in topology_probe()
2904 if (cpus[i].physical_package_id > max_package_id) in topology_probe()
2905 max_package_id = cpus[i].physical_package_id; in topology_probe()
2912 i, cpus[i].physical_package_id, cpus[i].core_id); in topology_probe()
[all …]
/linux-4.4.14/arch/h8300/boot/dts/
Dh8300h_sim.dts43 cpus {
Dh8s_sim.dts49 cpus {
Dedosk2674.dts50 cpus {
/linux-4.4.14/arch/s390/kernel/
Dtime.c716 atomic_t cpus; member
725 atomic_dec(&sync->cpus); in clock_sync_cpu()
774 while (atomic_read(&etr_sync->cpus) != 0) in etr_sync_clock()
840 atomic_set(&etr_sync.cpus, num_online_cpus() - 1); in etr_sync_clock_stop()
1553 while (atomic_read(&stp_sync->cpus) != 0) in stp_sync_clock()
1618 atomic_set(&stp_sync.cpus, num_online_cpus() - 1); in stp_work_fn()
Dcrash_dump.c490 int i, cpus = 0; in get_cpu_cnt() local
495 cpus++; in get_cpu_cnt()
497 return cpus; in get_cpu_cnt()
/linux-4.4.14/arch/mips/boot/dts/lantiq/
Ddanube.dtsi6 cpus {
/linux-4.4.14/kernel/power/
Dpower.h11 int cpus; member
/linux-4.4.14/tools/perf/
Dbuiltin-stat.c252 const int ncpus = cpu_map__nr(evsel_list->cpus), in handle_initial_delay()
636 int ncpus = cpu_map__nr(counter->cpus); in print_aggr_thread()
1049 if (cpu_map__build_socket_map(evsel_list->cpus, &aggr_map)) { in perf_stat_init_aggr_mode()
1056 if (cpu_map__build_core_map(evsel_list->cpus, &aggr_map)) { in perf_stat_init_aggr_mode()
1075 nr = cpu_map__get_max(evsel_list->cpus); in perf_stat_init_aggr_mode()
/linux-4.4.14/Documentation/power/
Dsuspend-and-cpuhotplug.txt22 |tasks | | cpus | | | | cpus | |tasks|
70 Note down these cpus in | P
93 | Call _cpu_up() [for all those cpus in the frozen_cpus mask, in a loop]
/linux-4.4.14/arch/mn10300/kernel/
Dsmp.c395 int cpus, ret = 0; in smp_nmi_call_function() local
397 cpus = num_online_cpus() - 1; in smp_nmi_call_function()
398 if (cpus < 1) in smp_nmi_call_function()
/linux-4.4.14/arch/mips/boot/dts/qca/
Dar9132.dtsi7 cpus {
/linux-4.4.14/arch/x86/kernel/cpu/mcheck/
Dmce_amd.c697 atomic_inc(&b->cpus); in threshold_create_bank()
720 atomic_set(&b->cpus, 1); in threshold_create_bank()
809 if (!atomic_dec_and_test(&b->cpus)) { in threshold_remove_bank()
/linux-4.4.14/arch/xtensa/boot/dts/
Dxtfpga.dtsi16 cpus {
/linux-4.4.14/arch/arm64/boot/dts/arm/
Djuno.dts33 cpus {
Drtsm_ve-aemv8a.dts30 cpus {

1234