/linux-4.4.14/tools/testing/selftests/rcutorture/bin/ |
D | cpus2use.sh | 25 ncpus=`grep '^processor' /proc/cpuinfo | wc -l` 27 awk -v ncpus=$ncpus '{ print ncpus * ($7 + $NF) / 100 }'` 28 awk -v ncpus=$ncpus -v idlecpus=$idlecpus < /dev/null '
|
D | kvm-build.sh | 62 ncpus=`cpus2use.sh` 63 make O=$builddir -j$ncpus $TORTURE_KMAKE_ARG > $builddir/Make.out 2>&1
|
D | kvm.sh | 207 awk < $T/cfgcpu.sort > $T/cfgcpu.pack -v ncpus=$cpus ' 294 -v ncpus=$cpus \
|
/linux-4.4.14/tools/perf/util/ |
D | counts.c | 5 struct perf_counts *perf_counts__new(int ncpus, int nthreads) in perf_counts__new() argument 12 values = xyarray__new(ncpus, nthreads, sizeof(struct perf_counts_values)); in perf_counts__new() 42 int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus, int nthreads) in perf_evsel__alloc_counts() argument 44 evsel->counts = perf_counts__new(ncpus, nthreads); in perf_evsel__alloc_counts()
|
D | stat.c | 126 int ncpus, int nthreads) in perf_evsel__alloc_prev_raw_counts() argument 130 counts = perf_counts__new(ncpus, nthreads); in perf_evsel__alloc_prev_raw_counts() 145 int ncpus = perf_evsel__nr_cpus(evsel); in perf_evsel__alloc_stats() local 149 perf_evsel__alloc_counts(evsel, ncpus, nthreads) < 0 || in perf_evsel__alloc_stats() 150 (alloc_raw && perf_evsel__alloc_prev_raw_counts(evsel, ncpus, nthreads) < 0)) in perf_evsel__alloc_stats() 287 int ncpus = perf_evsel__nr_cpus(counter); in process_counter_maps() local 294 for (cpu = 0; cpu < ncpus; cpu++) { in process_counter_maps()
|
D | counts.h | 30 struct perf_counts *perf_counts__new(int ncpus, int nthreads); 34 int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus, int nthreads);
|
D | evsel.h | 208 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads); 209 void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads); 228 int perf_evsel__apply_filter(struct perf_evsel *evsel, int ncpus, int nthreads, 230 int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads); 238 void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads);
|
D | evsel.c | 905 static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) in perf_evsel__alloc_fd() argument 912 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int)); in perf_evsel__alloc_fd() 915 for (cpu = 0; cpu < ncpus; cpu++) { in perf_evsel__alloc_fd() 925 static int perf_evsel__run_ioctl(struct perf_evsel *evsel, int ncpus, int nthreads, in perf_evsel__run_ioctl() argument 933 for (cpu = 0; cpu < ncpus; cpu++) { in perf_evsel__run_ioctl() 946 int perf_evsel__apply_filter(struct perf_evsel *evsel, int ncpus, int nthreads, in perf_evsel__apply_filter() argument 949 return perf_evsel__run_ioctl(evsel, ncpus, nthreads, in perf_evsel__apply_filter() 984 int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads) in perf_evsel__enable() argument 986 return perf_evsel__run_ioctl(evsel, ncpus, nthreads, in perf_evsel__enable() 991 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) in perf_evsel__alloc_id() argument [all …]
|
D | stat.h | 82 int ncpus, int nthreads);
|
D | data-convert-bt.c | 864 int ncpus; in setup_streams() local 870 ncpus = ph->env.nr_cpus_avail ?: MAX_CPUS; in setup_streams() 872 stream = zalloc(sizeof(*stream) * ncpus); in setup_streams() 879 cw->stream_cnt = ncpus; in setup_streams()
|
D | evlist.c | 1229 const int ncpus = cpu_map__nr(evlist->cpus), in perf_evlist__apply_filters() local 1240 err = perf_evsel__apply_filter(evsel, ncpus, nthreads, evsel->filter); in perf_evlist__apply_filters() 1430 int ncpus = cpu_map__nr(evlist->cpus); in perf_evlist__close() local 1435 n = evsel->cpus ? evsel->cpus->nr : ncpus; in perf_evlist__close()
|
D | header.c | 508 long ncpus; in build_cpu_topology() local 511 ncpus = sysconf(_SC_NPROCESSORS_CONF); in build_cpu_topology() 512 if (ncpus < 0) in build_cpu_topology() 515 nr = (u32)(ncpus & UINT_MAX); in build_cpu_topology()
|
/linux-4.4.14/arch/powerpc/platforms/powermac/ |
D | smp.c | 273 int i, ncpus; in smp_psurge_probe() local 303 ncpus = 4; in smp_psurge_probe() 315 ncpus = 2; in smp_psurge_probe() 329 if (ncpus > NR_CPUS) in smp_psurge_probe() 330 ncpus = NR_CPUS; in smp_psurge_probe() 331 for (i = 1; i < ncpus ; ++i) in smp_psurge_probe() 569 static void __init smp_core99_setup_i2c_hwsync(int ncpus) in smp_core99_setup_i2c_hwsync() argument 701 static void __init smp_core99_setup(int ncpus) in smp_core99_setup() argument 709 smp_core99_setup_i2c_hwsync(ncpus); in smp_core99_setup() 757 for (i = 1; i < ncpus; ++i) in smp_core99_setup() [all …]
|
/linux-4.4.14/arch/powerpc/kernel/ |
D | crash.c | 108 unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ in crash_kexec_prepare_cpus() local 124 while ((atomic_read(&cpus_in_crash) < ncpus) && (--msecs > 0)) in crash_kexec_prepare_cpus() 129 if (atomic_read(&cpus_in_crash) >= ncpus) { in crash_kexec_prepare_cpus() 135 ncpus - atomic_read(&cpus_in_crash)); in crash_kexec_prepare_cpus() 166 while (atomic_read(&cpus_in_crash) < ncpus) in crash_kexec_prepare_cpus()
|
/linux-4.4.14/drivers/clk/mvebu/ |
D | clk-cpu.c | 173 int ncpus = 0; in of_cpu_clk_setup() local 187 ncpus++; in of_cpu_clk_setup() 189 cpuclk = kzalloc(ncpus * sizeof(*cpuclk), GFP_KERNEL); in of_cpu_clk_setup() 193 clks = kzalloc(ncpus * sizeof(*clks), GFP_KERNEL); in of_cpu_clk_setup() 238 while(ncpus--) in of_cpu_clk_setup() 239 kfree(cpuclk[ncpus].clk_name); in of_cpu_clk_setup()
|
/linux-4.4.14/arch/sparc/kernel/ |
D | setup_32.c | 402 int i, ncpus, err; in topology_init() local 408 ncpus = 0; in topology_init() 409 while (!cpu_find_by_instance(ncpus, NULL, NULL)) in topology_init() 410 ncpus++; in topology_init() 411 ncpus_probed = ncpus; in topology_init()
|
D | ds.c | 477 static int dr_cpu_size_response(int ncpus) in dr_cpu_size_response() argument 481 (sizeof(struct dr_cpu_resp_entry) * ncpus)); in dr_cpu_size_response() 485 u64 handle, int resp_len, int ncpus, in dr_cpu_init_response() argument 500 tag->num_records = ncpus; in dr_cpu_init_response() 509 BUG_ON(i != ncpus); in dr_cpu_init_response() 512 static void dr_cpu_mark(struct ds_data *resp, int cpu, int ncpus, in dr_cpu_mark() argument 522 for (i = 0; i < ncpus; i++) { in dr_cpu_mark() 535 int resp_len, ncpus, cpu; in dr_cpu_configure() local 538 ncpus = cpumask_weight(mask); in dr_cpu_configure() 539 resp_len = dr_cpu_size_response(ncpus); in dr_cpu_configure() [all …]
|
D | sun4m_smp.c | 176 register int ncpus = SUN4M_NCPUS; in sun4m_cross_call() local 195 for (i = 0; i < ncpus; i++) { in sun4m_cross_call() 216 } while (++i < ncpus); in sun4m_cross_call() 224 } while (++i < ncpus); in sun4m_cross_call()
|
D | smp_64.c | 1152 int ncpus = num_online_cpus(); in smp_capture() local 1161 while (atomic_read(&smp_capture_registry) != ncpus) in smp_capture()
|
/linux-4.4.14/tools/perf/bench/ |
D | futex-wake.c | 38 static unsigned int ncpus, threads_starting, nthreads = 0; variable 96 CPU_SET(i % ncpus, &cpu); in block_threads() 127 ncpus = sysconf(_SC_NPROCESSORS_ONLN); in bench_futex_wake() 134 nthreads = ncpus; in bench_futex_wake()
|
D | futex-requeue.c | 37 static unsigned int ncpus, threads_starting, nthreads = 0; variable 90 CPU_SET(i % ncpus, &cpu); in block_threads() 119 ncpus = sysconf(_SC_NPROCESSORS_ONLN); in bench_futex_requeue() 126 nthreads = ncpus; in bench_futex_requeue()
|
D | futex-lock-pi.c | 30 static unsigned int ncpus, nthreads = 0; variable 127 CPU_SET(i % ncpus, &cpu); in create_threads() 149 ncpus = sysconf(_SC_NPROCESSORS_ONLN); in bench_futex_lock_pi() 156 nthreads = ncpus; in bench_futex_lock_pi()
|
D | futex-hash.c | 115 unsigned int i, ncpus; in bench_futex_hash() local 125 ncpus = sysconf(_SC_NPROCESSORS_ONLN); in bench_futex_hash() 132 nthreads = ncpus; in bench_futex_hash() 159 CPU_SET(i % ncpus, &cpu); in bench_futex_hash()
|
D | futex-wake-parallel.c | 40 static unsigned int ncpus, threads_starting; variable 126 CPU_SET(i % ncpus, &cpu); in block_threads() 215 ncpus = sysconf(_SC_NPROCESSORS_ONLN); in bench_futex_wake_parallel() 217 nblocked_threads = ncpus; in bench_futex_wake_parallel()
|
/linux-4.4.14/drivers/xen/ |
D | mcelog.c | 58 static uint32_t ncpus; variable 240 for (i = 0; i < ncpus; i++) in convert_log() 243 if (unlikely(i == ncpus)) { in convert_log() 370 ncpus = mc_op.u.mc_physcpuinfo.ncpus; in bind_virq_for_mce() 371 g_physinfo = kcalloc(ncpus, sizeof(struct mcinfo_logical_cpu), in bind_virq_for_mce()
|
/linux-4.4.14/arch/mips/kernel/ |
D | crash.c | 42 unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ in crash_kexec_prepare_cpus() local 53 while ((cpumask_weight(&cpus_in_crash) < ncpus) && (--msecs > 0)) { in crash_kexec_prepare_cpus()
|
/linux-4.4.14/drivers/misc/sgi-gru/ |
D | grukservices.c | 157 int ctxnum, ncpus; in gru_load_kernel_context() local 175 ncpus = uv_blade_nr_possible_cpus(blade_id); in gru_load_kernel_context() 177 GRU_NUM_KERNEL_CBR * ncpus + bs->bs_async_cbrs); in gru_load_kernel_context() 179 GRU_NUM_KERNEL_DSR_BYTES * ncpus + in gru_load_kernel_context() 375 int ncpus; in gru_lock_async_resource() local 378 ncpus = uv_blade_nr_possible_cpus(blade_id); in gru_lock_async_resource() 380 *cb = bs->kernel_cb + ncpus * GRU_HANDLE_STRIDE; in gru_lock_async_resource() 382 *dsr = bs->kernel_dsr + ncpus * GRU_NUM_KERNEL_DSR_BYTES; in gru_lock_async_resource()
|
/linux-4.4.14/arch/x86/platform/uv/ |
D | uv_time.c | 64 int ncpus; member 181 head->ncpus = uv_blade_nr_possible_cpus(bid); in uv_rtc_allocate_timers() 200 for (c = 0; c < head->ncpus; c++) { in uv_rtc_find_next_timer()
|
/linux-4.4.14/arch/xtensa/kernel/ |
D | smp.c | 90 unsigned int ncpus = get_core_count(); in smp_init_cpus() local 93 pr_info("%s: Core Count = %d\n", __func__, ncpus); in smp_init_cpus() 96 for (i = 0; i < ncpus; ++i) in smp_init_cpus()
|
/linux-4.4.14/arch/um/include/shared/ |
D | kern_util.h | 16 extern int ncpus;
|
/linux-4.4.14/drivers/block/aoe/ |
D | aoecmd.c | 45 static int ncpus; variable 1273 actual_id = f->t->d->aoeminor % ncpus; in ktio() 1346 id = f->t->d->aoeminor % ncpus; in ktcomplete() 1708 for (i = 0; i < ncpus; i++) { in aoe_flush_iocq() 1758 ncpus = num_online_cpus(); in aoecmd_init() 1760 iocq = kcalloc(ncpus, sizeof(struct iocq_ktio), GFP_KERNEL); in aoecmd_init() 1764 kts = kcalloc(ncpus, sizeof(struct ktstate), GFP_KERNEL); in aoecmd_init() 1770 ktiowq = kcalloc(ncpus, sizeof(wait_queue_head_t), GFP_KERNEL); in aoecmd_init() 1778 for (i = 0; i < ncpus; i++) { in aoecmd_init() 1811 for (i = 0; i < ncpus; i++) in aoecmd_exit()
|
/linux-4.4.14/tools/perf/ |
D | builtin-stat.c | 195 int ncpus = perf_evsel__nr_cpus(counter); in read_counter() local 205 for (cpu = 0; cpu < ncpus; cpu++) { in read_counter() 252 const int ncpus = cpu_map__nr(evsel_list->cpus), in handle_initial_delay() local 257 perf_evsel__enable(counter, ncpus, nthreads); in handle_initial_delay() 636 int ncpus = cpu_map__nr(counter->cpus); in print_aggr_thread() local 643 for (cpu = 0; cpu < ncpus; cpu++) { in print_aggr_thread()
|
/linux-4.4.14/include/trace/events/ |
D | xen.h | 411 __field(unsigned, ncpus) 416 TP_fast_assign(__entry->ncpus = cpumask_weight(cpus); 421 __entry->ncpus, __entry->mm, __entry->addr, __entry->end)
|
/linux-4.4.14/include/xen/interface/ |
D | xen-mca.h | 297 uint32_t ncpus; member
|
/linux-4.4.14/arch/um/kernel/ |
D | um_arch.c | 113 int ncpus = 1; variable
|
D | process.c | 51 for (i = 0; i < ncpus; i++) { in pid_to_processor_id()
|
/linux-4.4.14/drivers/thermal/ |
D | cpu_cooling.c | 594 u32 ncpus = cpumask_weight(&cpufreq_device->allowed_cpus); in cpufreq_get_requested_power() local 596 load_cpu = kcalloc(ncpus, sizeof(*load_cpu), GFP_KERNEL); in cpufreq_get_requested_power()
|
/linux-4.4.14/drivers/staging/rdma/ipath/ |
D | ipath_file_ops.c | 1696 int ncpus = num_online_cpus(), curcpu = -1, nset = 0; in find_best_unit() local 1702 current->pid, i, ncpus); in find_best_unit() 1707 if (curcpu != -1 && nset != ncpus) { in find_best_unit() 1709 prefunit = curcpu / (ncpus / npresent); in find_best_unit() 1713 npresent, ncpus, ncpus / npresent, in find_best_unit()
|
/linux-4.4.14/kernel/rcu/ |
D | tree.h | 466 int ncpus; /* # CPUs seen so far. */ member
|
D | tree.c | 3415 int ncpus = READ_ONCE(rsp->ncpus); in sync_exp_reset_tree_hotplug() local 3420 if (likely(ncpus == rsp->ncpus_snap)) in sync_exp_reset_tree_hotplug() 3422 rsp->ncpus_snap = ncpus; in sync_exp_reset_tree_hotplug() 4206 WRITE_ONCE(rsp->ncpus, READ_ONCE(rsp->ncpus) + 1); in rcu_init_percpu_data()
|
/linux-4.4.14/arch/powerpc/xmon/ |
D | xmon.c | 489 int ncpus = num_online_cpus(); in xmon_core() local 493 if (ncpus > 1) { in xmon_core() 497 if (cpumask_weight(&cpus_in_xmon) >= ncpus) in xmon_core()
|
/linux-4.4.14/drivers/net/ethernet/myricom/myri10ge/ |
D | myri10ge.c | 3837 int i, status, ncpus; in myri10ge_probe_slices() local 3840 ncpus = netif_get_num_default_rss_queues(); in myri10ge_probe_slices() 3843 (myri10ge_max_slices == -1 && ncpus < 2)) in myri10ge_probe_slices() 3902 myri10ge_max_slices = ncpus; in myri10ge_probe_slices()
|