ncpus              59 arch/mips/kernel/crash.c 	unsigned int ncpus;
ncpus              64 arch/mips/kernel/crash.c 	ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
ncpus              75 arch/mips/kernel/crash.c 	while ((cpumask_weight(&cpus_in_crash) < ncpus) && (--msecs > 0)) {
ncpus             107 arch/powerpc/kernel/crash.c 	unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
ncpus             114 arch/powerpc/kernel/crash.c 		ncpus = num_present_cpus() - 1;
ncpus             126 arch/powerpc/kernel/crash.c 	while ((atomic_read(&cpus_in_crash) < ncpus) && (--msecs > 0))
ncpus             131 arch/powerpc/kernel/crash.c 	if (atomic_read(&cpus_in_crash) >= ncpus) {
ncpus             137 arch/powerpc/kernel/crash.c 		ncpus - atomic_read(&cpus_in_crash));
ncpus             168 arch/powerpc/kernel/crash.c 		while (atomic_read(&cpus_in_crash) < ncpus)
ncpus             269 arch/powerpc/platforms/powermac/smp.c 	int i, ncpus;
ncpus             299 arch/powerpc/platforms/powermac/smp.c 		ncpus = 4;
ncpus             311 arch/powerpc/platforms/powermac/smp.c 		ncpus = 2;
ncpus             325 arch/powerpc/platforms/powermac/smp.c 	if (ncpus > NR_CPUS)
ncpus             326 arch/powerpc/platforms/powermac/smp.c 		ncpus = NR_CPUS;
ncpus             327 arch/powerpc/platforms/powermac/smp.c 	for (i = 1; i < ncpus ; ++i)
ncpus             566 arch/powerpc/platforms/powermac/smp.c static void __init smp_core99_setup_i2c_hwsync(int ncpus)
ncpus             698 arch/powerpc/platforms/powermac/smp.c static void __init smp_core99_setup(int ncpus)
ncpus             706 arch/powerpc/platforms/powermac/smp.c 		smp_core99_setup_i2c_hwsync(ncpus);
ncpus             754 arch/powerpc/platforms/powermac/smp.c 		for (i = 1; i < ncpus; ++i)
ncpus             767 arch/powerpc/platforms/powermac/smp.c 	int ncpus = 0;
ncpus             773 arch/powerpc/platforms/powermac/smp.c 		++ncpus;
ncpus             775 arch/powerpc/platforms/powermac/smp.c 	printk(KERN_INFO "PowerMac SMP probe found %d cpus\n", ncpus);
ncpus             778 arch/powerpc/platforms/powermac/smp.c 	if (ncpus <= 1)
ncpus             788 arch/powerpc/platforms/powermac/smp.c 	smp_core99_setup(ncpus);
ncpus             458 arch/powerpc/xmon/xmon.c static bool wait_for_other_cpus(int ncpus)
ncpus             464 arch/powerpc/xmon/xmon.c 		if (cpumask_weight(&cpus_in_xmon) >= ncpus)
ncpus             592 arch/powerpc/xmon/xmon.c 		int ncpus = num_online_cpus();
ncpus             596 arch/powerpc/xmon/xmon.c 		if (ncpus > 1) {
ncpus             604 arch/powerpc/xmon/xmon.c 			if (TRAP(regs) != 0x100 || !wait_for_other_cpus(ncpus))
ncpus             607 arch/powerpc/xmon/xmon.c 			wait_for_other_cpus(ncpus);
ncpus             479 arch/sparc/kernel/ds.c static int dr_cpu_size_response(int ncpus)
ncpus             483 arch/sparc/kernel/ds.c 		(sizeof(struct dr_cpu_resp_entry) * ncpus));
ncpus             487 arch/sparc/kernel/ds.c 				 u64 handle, int resp_len, int ncpus,
ncpus             502 arch/sparc/kernel/ds.c 	tag->num_records = ncpus;
ncpus             511 arch/sparc/kernel/ds.c 	BUG_ON(i != ncpus);
ncpus             514 arch/sparc/kernel/ds.c static void dr_cpu_mark(struct ds_data *resp, int cpu, int ncpus,
ncpus             524 arch/sparc/kernel/ds.c 	for (i = 0; i < ncpus; i++) {
ncpus             537 arch/sparc/kernel/ds.c 	int resp_len, ncpus, cpu;
ncpus             540 arch/sparc/kernel/ds.c 	ncpus = cpumask_weight(mask);
ncpus             541 arch/sparc/kernel/ds.c 	resp_len = dr_cpu_size_response(ncpus);
ncpus             547 arch/sparc/kernel/ds.c 			     resp_len, ncpus, mask,
ncpus             574 arch/sparc/kernel/ds.c 			dr_cpu_mark(resp, cpu, ncpus, res, stat);
ncpus             596 arch/sparc/kernel/ds.c 	int resp_len, ncpus, cpu;
ncpus             599 arch/sparc/kernel/ds.c 	ncpus = cpumask_weight(mask);
ncpus             600 arch/sparc/kernel/ds.c 	resp_len = dr_cpu_size_response(ncpus);
ncpus             606 arch/sparc/kernel/ds.c 			     resp_len, ncpus, mask,
ncpus             616 arch/sparc/kernel/ds.c 			dr_cpu_mark(resp, cpu, ncpus,
ncpus             401 arch/sparc/kernel/setup_32.c 	int i, ncpus, err;
ncpus             407 arch/sparc/kernel/setup_32.c 	ncpus = 0;
ncpus             408 arch/sparc/kernel/setup_32.c 	while (!cpu_find_by_instance(ncpus, NULL, NULL))
ncpus             409 arch/sparc/kernel/setup_32.c 		ncpus++;
ncpus             410 arch/sparc/kernel/setup_32.c 	ncpus_probed = ncpus;
ncpus            1175 arch/sparc/kernel/smp_64.c 		int ncpus = num_online_cpus();
ncpus            1184 arch/sparc/kernel/smp_64.c 		while (atomic_read(&smp_capture_registry) != ncpus)
ncpus             177 arch/sparc/kernel/sun4m_smp.c 		register int ncpus = SUN4M_NCPUS;
ncpus             196 arch/sparc/kernel/sun4m_smp.c 			for (i = 0; i < ncpus; i++) {
ncpus             217 arch/sparc/kernel/sun4m_smp.c 			} while (++i < ncpus);
ncpus             225 arch/sparc/kernel/sun4m_smp.c 			} while (++i < ncpus);
ncpus              16 arch/um/include/shared/kern_util.h extern int ncpus;
ncpus              54 arch/um/kernel/process.c 	for (i = 0; i < ncpus; i++) {
ncpus             109 arch/um/kernel/um_arch.c int ncpus = 1;
ncpus              16 arch/x86/include/asm/trace/hyperv.h 		    __field(unsigned int, ncpus)
ncpus              21 arch/x86/include/asm/trace/hyperv.h 	    TP_fast_assign(__entry->ncpus = cpumask_weight(cpus);
ncpus              27 arch/x86/include/asm/trace/hyperv.h 		      __entry->ncpus, __entry->mm,
ncpus              64 arch/x86/include/asm/trace/hyperv.h 		    __field(unsigned int, ncpus)
ncpus              67 arch/x86/include/asm/trace/hyperv.h 	    TP_fast_assign(__entry->ncpus = cpumask_weight(cpus);
ncpus              71 arch/x86/include/asm/trace/hyperv.h 		      __entry->ncpus, __entry->vector)
ncpus             226 arch/x86/kernel/kvmclock.c 	unsigned long ncpus;
ncpus             234 arch/x86/kernel/kvmclock.c 	ncpus = num_possible_cpus() - HVC_BOOT_ARRAY_SIZE;
ncpus             235 arch/x86/kernel/kvmclock.c 	order = get_order(ncpus * sizeof(*hvclock_mem));
ncpus            1408 arch/x86/kernel/smpboot.c 	int ncpus;
ncpus            1414 arch/x86/kernel/smpboot.c 	ncpus = cpu_data(0).booted_cores * topology_max_smt_threads();
ncpus            1415 arch/x86/kernel/smpboot.c 	__max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus);
ncpus              51 arch/x86/platform/uv/uv_time.c 	int		ncpus;
ncpus             168 arch/x86/platform/uv/uv_time.c 			head->ncpus = uv_blade_nr_possible_cpus(bid);
ncpus             187 arch/x86/platform/uv/uv_time.c 	for (c = 0; c < head->ncpus; c++) {
ncpus              93 arch/xtensa/kernel/smp.c 	unsigned int ncpus = get_core_count();
ncpus              96 arch/xtensa/kernel/smp.c 	pr_info("%s: Core Count = %d\n", __func__, ncpus);
ncpus              99 arch/xtensa/kernel/smp.c 	if (ncpus > NR_CPUS) {
ncpus             100 arch/xtensa/kernel/smp.c 		ncpus = NR_CPUS;
ncpus             101 arch/xtensa/kernel/smp.c 		pr_info("%s: limiting core count by %d\n", __func__, ncpus);
ncpus             104 arch/xtensa/kernel/smp.c 	for (i = 0; i < ncpus; ++i)
ncpus              45 drivers/block/aoe/aoecmd.c static int ncpus;
ncpus            1212 drivers/block/aoe/aoecmd.c 		actual_id = f->t->d->aoeminor % ncpus;
ncpus            1285 drivers/block/aoe/aoecmd.c 	id = f->t->d->aoeminor % ncpus;
ncpus            1645 drivers/block/aoe/aoecmd.c 	for (i = 0; i < ncpus; i++) {
ncpus            1695 drivers/block/aoe/aoecmd.c 	ncpus = num_online_cpus();
ncpus            1697 drivers/block/aoe/aoecmd.c 	iocq = kcalloc(ncpus, sizeof(struct iocq_ktio), GFP_KERNEL);
ncpus            1701 drivers/block/aoe/aoecmd.c 	kts = kcalloc(ncpus, sizeof(struct ktstate), GFP_KERNEL);
ncpus            1707 drivers/block/aoe/aoecmd.c 	ktiowq = kcalloc(ncpus, sizeof(wait_queue_head_t), GFP_KERNEL);
ncpus            1715 drivers/block/aoe/aoecmd.c 	for (i = 0; i < ncpus; i++) {
ncpus            1748 drivers/block/aoe/aoecmd.c 	for (i = 0; i < ncpus; i++)
ncpus             171 drivers/clk/mvebu/clk-cpu.c 	int ncpus = 0;
ncpus             185 drivers/clk/mvebu/clk-cpu.c 		ncpus++;
ncpus             187 drivers/clk/mvebu/clk-cpu.c 	cpuclk = kcalloc(ncpus, sizeof(*cpuclk), GFP_KERNEL);
ncpus             191 drivers/clk/mvebu/clk-cpu.c 	clks = kcalloc(ncpus, sizeof(*clks), GFP_KERNEL);
ncpus             236 drivers/clk/mvebu/clk-cpu.c 	while(ncpus--)
ncpus             237 drivers/clk/mvebu/clk-cpu.c 		kfree(cpuclk[ncpus].clk_name);
ncpus             438 drivers/gpu/drm/i915/selftests/i915_request.c 	unsigned int ncpus = num_online_cpus();
ncpus             449 drivers/gpu/drm/i915/selftests/i915_request.c 	threads = kmalloc_array(ncpus, sizeof(*threads), GFP_KERNEL);
ncpus             470 drivers/gpu/drm/i915/selftests/i915_request.c 	for (n = 0; n < ncpus; n++) {
ncpus             475 drivers/gpu/drm/i915/selftests/i915_request.c 			ncpus = n;
ncpus             484 drivers/gpu/drm/i915/selftests/i915_request.c 	for (n = 0; n < ncpus; n++) {
ncpus             496 drivers/gpu/drm/i915/selftests/i915_request.c 		ncpus);
ncpus            1106 drivers/gpu/drm/i915/selftests/i915_request.c 	unsigned int ncpus = num_online_cpus();
ncpus            1133 drivers/gpu/drm/i915/selftests/i915_request.c 	threads = kcalloc(ncpus * I915_NUM_ENGINES,
ncpus            1179 drivers/gpu/drm/i915/selftests/i915_request.c 		for (n = 0; n < ncpus; n++) {
ncpus            1191 drivers/gpu/drm/i915/selftests/i915_request.c 			threads[id * ncpus + n] = tsk;
ncpus            1202 drivers/gpu/drm/i915/selftests/i915_request.c 		for (n = 0; n < ncpus; n++) {
ncpus            1203 drivers/gpu/drm/i915/selftests/i915_request.c 			struct task_struct *tsk = threads[id * ncpus + n];
ncpus            1220 drivers/gpu/drm/i915/selftests/i915_request.c 		num_waits, num_fences, RUNTIME_INFO(i915)->num_engines, ncpus);
ncpus             144 drivers/misc/sgi-gru/grukservices.c 	int ctxnum, ncpus;
ncpus             162 drivers/misc/sgi-gru/grukservices.c 		ncpus = uv_blade_nr_possible_cpus(blade_id);
ncpus             164 drivers/misc/sgi-gru/grukservices.c 			GRU_NUM_KERNEL_CBR * ncpus + bs->bs_async_cbrs);
ncpus             166 drivers/misc/sgi-gru/grukservices.c 			GRU_NUM_KERNEL_DSR_BYTES * ncpus +
ncpus             362 drivers/misc/sgi-gru/grukservices.c 	int ncpus;
ncpus             365 drivers/misc/sgi-gru/grukservices.c 	ncpus = uv_blade_nr_possible_cpus(blade_id);
ncpus             367 drivers/misc/sgi-gru/grukservices.c 		*cb = bs->kernel_cb + ncpus * GRU_HANDLE_STRIDE;
ncpus             369 drivers/misc/sgi-gru/grukservices.c 		*dsr = bs->kernel_dsr + ncpus * GRU_NUM_KERNEL_DSR_BYTES;
ncpus            3638 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	int i, status, ncpus;
ncpus            3641 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	ncpus = netif_get_num_default_rss_queues();
ncpus            3644 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	    (myri10ge_max_slices == -1 && ncpus < 2))
ncpus            3703 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		myri10ge_max_slices = ncpus;
ncpus             375 drivers/thermal/cpu_cooling.c 		u32 ncpus = cpumask_weight(policy->related_cpus);
ncpus             377 drivers/thermal/cpu_cooling.c 		load_cpu = kcalloc(ncpus, sizeof(*load_cpu), GFP_KERNEL);
ncpus              58 drivers/xen/mcelog.c static uint32_t ncpus;
ncpus             240 drivers/xen/mcelog.c 	for (i = 0; i < ncpus; i++)
ncpus             243 drivers/xen/mcelog.c 	if (unlikely(i == ncpus)) {
ncpus             368 drivers/xen/mcelog.c 	ncpus = mc_op.u.mc_physcpuinfo.ncpus;
ncpus             369 drivers/xen/mcelog.c 	g_physinfo = kcalloc(ncpus, sizeof(struct mcinfo_logical_cpu),
ncpus             374 include/trace/events/xen.h 		    __field(unsigned, ncpus)
ncpus             379 include/trace/events/xen.h 	    TP_fast_assign(__entry->ncpus = cpumask_weight(cpus);
ncpus             384 include/trace/events/xen.h 		      __entry->ncpus, __entry->mm, __entry->addr, __entry->end)
ncpus             297 include/xen/interface/xen-mca.h 	uint32_t ncpus;
ncpus             103 kernel/irq/affinity.c 		unsigned ncpus;
ncpus             112 kernel/irq/affinity.c 	return ln->ncpus - rn->ncpus;
ncpus             139 kernel/irq/affinity.c 		node_vectors[n].ncpus = UINT_MAX;
ncpus             143 kernel/irq/affinity.c 		unsigned ncpus;
ncpus             146 kernel/irq/affinity.c 		ncpus = cpumask_weight(nmsk);
ncpus             148 kernel/irq/affinity.c 		if (!ncpus)
ncpus             150 kernel/irq/affinity.c 		remaining_ncpus += ncpus;
ncpus             151 kernel/irq/affinity.c 		node_vectors[n].ncpus = ncpus;
ncpus             228 kernel/irq/affinity.c 		unsigned nvectors, ncpus;
ncpus             230 kernel/irq/affinity.c 		if (node_vectors[n].ncpus == UINT_MAX)
ncpus             235 kernel/irq/affinity.c 		ncpus = node_vectors[n].ncpus;
ncpus             237 kernel/irq/affinity.c 				 numvecs * ncpus / remaining_ncpus);
ncpus             238 kernel/irq/affinity.c 		WARN_ON_ONCE(nvectors > ncpus);
ncpus             242 kernel/irq/affinity.c 		remaining_ncpus -= ncpus;
ncpus             291 kernel/irq/affinity.c 		unsigned int ncpus, v;
ncpus             299 kernel/irq/affinity.c 		ncpus = cpumask_weight(nmsk);
ncpus             300 kernel/irq/affinity.c 		if (!ncpus)
ncpus             303 kernel/irq/affinity.c 		WARN_ON_ONCE(nv->nvectors > ncpus);
ncpus             306 kernel/irq/affinity.c 		extra_vecs = ncpus - nv->nvectors * (ncpus / nv->nvectors);
ncpus             310 kernel/irq/affinity.c 			cpus_per_vec = ncpus / nv->nvectors;
ncpus             328 kernel/locking/test-ww_mutex.c static int test_cycle(unsigned int ncpus)
ncpus             333 kernel/locking/test-ww_mutex.c 	for (n = 2; n <= ncpus + 1; n++) {
ncpus             583 kernel/locking/test-ww_mutex.c 	int ncpus = num_online_cpus();
ncpus             606 kernel/locking/test-ww_mutex.c 	ret = test_cycle(ncpus);
ncpus             610 kernel/locking/test-ww_mutex.c 	ret = stress(16, 2*ncpus, STRESS_INORDER);
ncpus             614 kernel/locking/test-ww_mutex.c 	ret = stress(16, 2*ncpus, STRESS_REORDER);
ncpus             618 kernel/locking/test-ww_mutex.c 	ret = stress(4095, hweight32(STRESS_ALL)*ncpus, STRESS_ALL);
ncpus            3147 kernel/rcu/tree.c 	smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + nbits); /* ^^^ */
ncpus             295 kernel/rcu/tree.h 	int ncpus;				/* # CPUs seen so far. */
ncpus              77 kernel/rcu/tree_exp.h 	int ncpus = smp_load_acquire(&rcu_state.ncpus); /* Order vs. locking. */
ncpus              82 kernel/rcu/tree_exp.h 	if (likely(ncpus == rcu_state.ncpus_snap))
ncpus              84 kernel/rcu/tree_exp.h 	rcu_state.ncpus_snap = ncpus;
ncpus            1927 tools/perf/builtin-sched.c 	u32 ncpus = sched->max_cpu + 1;
ncpus            1934 tools/perf/builtin-sched.c 		for (i = 0, j = 0; i < ncpus; ++i) {
ncpus            1956 tools/perf/builtin-sched.c 		printf(" %*s ", ncpus, "");
ncpus            1972 tools/perf/builtin-sched.c 		printf(" %.*s ", ncpus, graph_dotted_line);
ncpus            1922 tools/perf/builtin-script.c 	int ncpus = perf_evsel__nr_cpus(counter);
ncpus            1936 tools/perf/builtin-script.c 		for (cpu = 0; cpu < ncpus; cpu++) {
ncpus             271 tools/perf/builtin-stat.c 	int ncpus, cpu, thread;
ncpus             274 tools/perf/builtin-stat.c 		ncpus = perf_evsel__nr_cpus(counter);
ncpus             276 tools/perf/builtin-stat.c 		ncpus = 1;
ncpus             285 tools/perf/builtin-stat.c 		for (cpu = 0; cpu < ncpus; cpu++) {
ncpus              42 tools/perf/lib/evsel.c int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
ncpus              44 tools/perf/lib/evsel.c 	evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
ncpus              48 tools/perf/lib/evsel.c 		for (cpu = 0; cpu < ncpus; cpu++) {
ncpus             234 tools/perf/lib/evsel.c int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
ncpus             236 tools/perf/lib/evsel.c 	if (ncpus == 0 || nthreads == 0)
ncpus             242 tools/perf/lib/evsel.c 	evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
ncpus             246 tools/perf/lib/evsel.c 	evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
ncpus              53 tools/perf/lib/include/internal/evsel.h int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
ncpus              59 tools/perf/lib/include/internal/evsel.h int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
ncpus               8 tools/perf/util/counts.c struct perf_counts *perf_counts__new(int ncpus, int nthreads)
ncpus              15 tools/perf/util/counts.c 		values = xyarray__new(ncpus, nthreads, sizeof(struct perf_counts_values));
ncpus              23 tools/perf/util/counts.c 		values = xyarray__new(ncpus, nthreads, sizeof(bool));
ncpus              56 tools/perf/util/counts.c int perf_evsel__alloc_counts(struct evsel *evsel, int ncpus, int nthreads)
ncpus              58 tools/perf/util/counts.c 	evsel->counts = perf_counts__new(ncpus, nthreads);
ncpus              38 tools/perf/util/counts.h struct perf_counts *perf_counts__new(int ncpus, int nthreads);
ncpus              42 tools/perf/util/counts.h int perf_evsel__alloc_counts(struct evsel *evsel, int ncpus, int nthreads);
ncpus             180 tools/perf/util/cputopo.c 	long ncpus;
ncpus             185 tools/perf/util/cputopo.c 	ncpus = cpu__max_present_cpu();
ncpus             194 tools/perf/util/cputopo.c 	nr = (u32)(ncpus & UINT_MAX);
ncpus            1331 tools/perf/util/data-convert-bt.c 	int ncpus;
ncpus            1337 tools/perf/util/data-convert-bt.c 	ncpus = ph->env.nr_cpus_avail ?: MAX_CPUS;
ncpus            1339 tools/perf/util/data-convert-bt.c 	stream = zalloc(sizeof(*stream) * ncpus);
ncpus            1346 tools/perf/util/data-convert-bt.c 	cw->stream_cnt = ncpus;
ncpus             696 tools/perf/util/stat-display.c 					int nthreads, int ncpus,
ncpus             711 tools/perf/util/stat-display.c 		for (cpu = 0; cpu < ncpus; cpu++) {
ncpus             749 tools/perf/util/stat-display.c 	int ncpus = perf_cpu_map__nr(counter->core.cpus);
ncpus             753 tools/perf/util/stat-display.c 	buf = sort_aggr_thread(counter, nthreads, ncpus, &sorted_threads, _target);
ncpus             148 tools/perf/util/stat.c 					     int ncpus, int nthreads)
ncpus             152 tools/perf/util/stat.c 	counts = perf_counts__new(ncpus, nthreads);
ncpus             176 tools/perf/util/stat.c 	int ncpus = perf_evsel__nr_cpus(evsel);
ncpus             180 tools/perf/util/stat.c 	    perf_evsel__alloc_counts(evsel, ncpus, nthreads) < 0 ||
ncpus             181 tools/perf/util/stat.c 	    (alloc_raw && perf_evsel__alloc_prev_raw_counts(evsel, ncpus, nthreads) < 0))
ncpus             336 tools/perf/util/stat.c 	int ncpus = perf_evsel__nr_cpus(counter);
ncpus             343 tools/perf/util/stat.c 		for (cpu = 0; cpu < ncpus; cpu++) {