Home
last modified time | relevance | path

Searched refs:threads (Results 1 – 156 of 156) sorted by relevance

/linux-4.1.27/tools/perf/util/
Dthread_map.c25 struct thread_map *threads; in thread_map__new_by_pid() local
36 threads = malloc(sizeof(*threads) + sizeof(pid_t) * items); in thread_map__new_by_pid()
37 if (threads != NULL) { in thread_map__new_by_pid()
39 threads->map[i] = atoi(namelist[i]->d_name); in thread_map__new_by_pid()
40 threads->nr = items; in thread_map__new_by_pid()
47 return threads; in thread_map__new_by_pid()
52 struct thread_map *threads = malloc(sizeof(*threads) + sizeof(pid_t)); in thread_map__new_by_tid() local
54 if (threads != NULL) { in thread_map__new_by_tid()
55 threads->map[0] = tid; in thread_map__new_by_tid()
56 threads->nr = 1; in thread_map__new_by_tid()
[all …]
Dthread_map.h21 void thread_map__delete(struct thread_map *threads);
23 size_t thread_map__fprintf(struct thread_map *threads, FILE *fp);
25 static inline int thread_map__nr(struct thread_map *threads) in thread_map__nr() argument
27 return threads ? threads->nr : 1; in thread_map__nr()
Dvalues.c14 values->threads = 0; in perf_read_values_init()
33 for (i = 0; i < values->threads; i++) in perf_read_values_destroy()
62 for (i = 0; i < values->threads; i++) in perf_read_values__findnew_thread()
66 if (values->threads == values->threads_max) in perf_read_values__findnew_thread()
69 i = values->threads++; in perf_read_values__findnew_thread()
91 for (i = 0; i < values->threads; i++) { in perf_read_values__enlarge_counters()
144 for (i = 0; i < values->threads; i++) { in perf_read_values__display_pretty()
165 for (i = 0; i < values->threads; i++) { in perf_read_values__display_pretty()
188 for (i = 0; i < values->threads; i++) { in perf_read_values__display_raw()
204 for (i = 0; i < values->threads; i++) { in perf_read_values__display_raw()
[all …]
Devlist.c36 struct thread_map *threads) in perf_evlist__init() argument
43 perf_evlist__set_maps(evlist, cpus, threads); in perf_evlist__init()
118 thread_map__delete(evlist->threads); in perf_evlist__delete()
120 evlist->threads = NULL; in perf_evlist__delete()
280 return thread_map__nr(evlist->threads); in perf_evlist__nr_threads()
413 int nr_threads = thread_map__nr(evlist->threads); in perf_evlist__alloc_pollfd()
541 if (!evsel->system_wide && evlist->threads && thread >= 0) in perf_evlist__set_sid_idx()
542 sid->tid = evlist->threads->map[thread]; in perf_evlist__set_sid_idx()
754 evlist->nr_mmaps = thread_map__nr(evlist->threads); in perf_evlist__alloc_mmap()
850 int nr_threads = thread_map__nr(evlist->threads); in perf_evlist__mmap_per_cpu()
[all …]
Dpython.c435 struct thread_map *threads; member
448 pthreads->threads = thread_map__new(pid, tid, uid); in pyrf_thread_map__init()
449 if (pthreads->threads == NULL) in pyrf_thread_map__init()
456 thread_map__delete(pthreads->threads); in pyrf_thread_map__delete()
464 return pthreads->threads->nr; in pyrf_thread_map__length()
471 if (i >= pthreads->threads->nr) in pyrf_thread_map__item()
474 return Py_BuildValue("i", pthreads->threads->map[i]); in pyrf_thread_map__item()
623 struct thread_map *threads = NULL; in pyrf_evsel__open() local
633 threads = ((struct pyrf_thread_map *)pthreads)->threads; in pyrf_evsel__open()
643 if (perf_evsel__open(evsel, cpus, threads) < 0) { in pyrf_evsel__open()
[all …]
Devlist.h51 struct thread_map *threads; member
65 struct thread_map *threads);
148 struct thread_map *threads) in perf_evlist__set_maps() argument
151 evlist->threads = threads; in perf_evlist__set_maps()
Dmachine.h32 struct rb_root threads; member
213 struct target *target, struct thread_map *threads,
217 struct thread_map *threads, bool data_mmap) in machine__synthesize_threads() argument
219 return __machine__synthesize_threads(machine, NULL, target, threads, in machine__synthesize_threads()
Dvalues.h7 int threads; member
Dmachine.c30 machine->threads = RB_ROOT; in machine__init()
94 struct rb_node *nd = rb_first(&machine->threads); in machine__delete_threads()
343 struct rb_node **p = &machine->threads.rb_node; in __machine__findnew_thread()
384 rb_insert_color(&th->rb_node, &machine->threads); in __machine__findnew_thread()
395 rb_erase(&th->rb_node, &machine->threads); in __machine__findnew_thread()
578 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) { in machine__fprintf()
1262 rb_erase(&th->rb_node, &machine->threads); in machine__remove_thread()
1757 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) { in machine__for_each_thread()
1773 struct target *target, struct thread_map *threads, in __machine__synthesize_threads() argument
1777 return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap); in __machine__synthesize_threads()
Devent.c453 struct thread_map *threads, in perf_event__synthesize_thread_map() argument
474 for (thread = 0; thread < threads->nr; ++thread) { in perf_event__synthesize_thread_map()
477 threads->map[thread], 0, in perf_event__synthesize_thread_map()
488 if ((int) comm_event->comm.pid != threads->map[thread]) { in perf_event__synthesize_thread_map()
492 for (j = 0; j < threads->nr; ++j) { in perf_event__synthesize_thread_map()
493 if ((int) comm_event->comm.pid == threads->map[j]) { in perf_event__synthesize_thread_map()
Devsel.c1135 struct thread_map *threads) in __perf_evsel__open() argument
1145 nthreads = threads->nr; in __perf_evsel__open()
1186 pid = threads->map[thread]; in __perf_evsel__open()
1303 int threads[1]; member
1306 .threads = { -1, },
1310 struct thread_map *threads) in perf_evsel__open() argument
1317 if (threads == NULL) in perf_evsel__open()
1318 threads = &empty_thread_map.map; in perf_evsel__open()
1320 return __perf_evsel__open(evsel, cpus, threads); in perf_evsel__open()
1330 struct thread_map *threads) in perf_evsel__open_per_thread() argument
[all …]
Devsel.h199 struct thread_map *threads);
201 struct thread_map *threads);
Devent.h311 struct thread_map *threads,
Dparse-events.c1245 int threads[1]; in is_event_supported() member
1248 .threads = { 0 }, in is_event_supported()
/linux-4.1.27/tools/perf/tests/
Dopen-syscall.c11 struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX); in test__open_syscall_event() local
14 if (threads == NULL) { in test__open_syscall_event()
30 if (perf_evsel__open_per_thread(evsel, threads) < 0) { in test__open_syscall_event()
55 perf_evsel__close_fd(evsel, 1, threads->nr); in test__open_syscall_event()
59 thread_map__delete(threads); in test__open_syscall_event()
Dmmap-basic.c22 struct thread_map *threads; in test__basic_mmap() local
36 threads = thread_map__new(-1, getpid(), UINT_MAX); in test__basic_mmap()
37 if (threads == NULL) { in test__basic_mmap()
63 perf_evlist__set_maps(evlist, cpus, threads); in test__basic_mmap()
80 if (perf_evsel__open(evsels[i], cpus, threads) < 0) { in test__basic_mmap()
143 threads = NULL; in test__basic_mmap()
147 thread_map__delete(threads); in test__basic_mmap()
Dopen-syscall-all-cpus.c14 struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX); in test__open_syscall_event_on_all_cpus() local
17 if (threads == NULL) { in test__open_syscall_event_on_all_cpus()
41 if (perf_evsel__open(evsel, cpus, threads) < 0) { in test__open_syscall_event_on_all_cpus()
109 perf_evsel__close_fd(evsel, 1, threads->nr); in test__open_syscall_event_on_all_cpus()
113 thread_map__delete(threads); in test__open_syscall_event_on_all_cpus()
Dmmap-thread-lookup.c26 static struct thread_data threads[THREADS]; variable
75 struct thread_data *td = &threads[i]; in thread_create()
95 struct thread_data *td0 = &threads[0]; in threads_create()
112 struct thread_data *td0 = &threads[0]; in threads_destroy()
121 err = pthread_join(threads[i].pt, NULL); in threads_destroy()
182 struct thread_data *td = &threads[i]; in mmap_events()
Dkeep-tracking.c63 struct thread_map *threads = NULL; in test__keep_tracking() local
70 threads = thread_map__new(-1, getpid(), UINT_MAX); in test__keep_tracking()
71 CHECK_NOT_NULL__(threads); in test__keep_tracking()
79 perf_evlist__set_maps(evlist, cpus, threads); in test__keep_tracking()
148 thread_map__delete(threads); in test__keep_tracking()
Dperf-time-to-tsc.c48 struct thread_map *threads = NULL; in test__perf_time_to_tsc() local
60 threads = thread_map__new(-1, getpid(), UINT_MAX); in test__perf_time_to_tsc()
61 CHECK_NOT_NULL__(threads); in test__perf_time_to_tsc()
69 perf_evlist__set_maps(evlist, cpus, threads); in test__perf_time_to_tsc()
Dcode-reading.c402 struct thread_map *threads = NULL; in do_test_code_reading() local
444 threads = thread_map__new_by_tid(pid); in do_test_code_reading()
445 if (!threads) { in do_test_code_reading()
450 ret = perf_event__synthesize_thread_map(NULL, threads, in do_test_code_reading()
478 perf_evlist__set_maps(evlist, cpus, threads); in do_test_code_reading()
543 thread_map__delete(threads); in do_test_code_reading()
Dtask-exit.c62 evlist->threads = thread_map__new_by_tid(-1); in test__task_exit()
63 if (!evlist->cpus || !evlist->threads) { in test__task_exit()
Dsw-clock.c54 evlist->threads = thread_map__new_by_tid(getpid()); in __test__sw_clock_freq()
55 if (!evlist->cpus || !evlist->threads) { in __test__sw_clock_freq()
Dswitch-tracking.c321 struct thread_map *threads = NULL; in test__switch_tracking() local
329 threads = thread_map__new(-1, getpid(), UINT_MAX); in test__switch_tracking()
330 if (!threads) { in test__switch_tracking()
347 perf_evlist__set_maps(evlist, cpus, threads); in test__switch_tracking()
564 thread_map__delete(threads); in test__switch_tracking()
Dopen-syscall-tp-fields.c48 evlist->threads->map[0] = getpid(); in test__syscall_open_tp_fields()
/linux-4.1.27/tools/perf/python/
Dtwatch.py20 threads = perf.thread_map()
25 evsel.open(cpus = cpus, threads = threads);
26 evlist = perf.evlist(cpus, threads)
/linux-4.1.27/tools/perf/bench/
Dsched-pipe.c80 struct thread_data threads[2], *td; in bench_sched_pipe() local
103 td = threads + t; in bench_sched_pipe()
120 td = threads + t; in bench_sched_pipe()
127 td = threads + t; in bench_sched_pipe()
138 worker_thread(threads + 0); in bench_sched_pipe()
141 worker_thread(threads + 1); in bench_sched_pipe()
Dnuma.c135 struct thread_data *threads; member
521 td = g->threads + t; in parse_setup_cpu_list()
643 td = g->threads + t; in parse_setup_node_list()
802 g->threads[task_nr].curr_cpu = cpu; in update_curr_cpu()
828 td = g->threads + task_nr; in count_process_nodes()
864 td = g->threads + task_nr; in count_node_processes()
931 struct thread_data *td = g->threads + t; in calc_convergence()
1157 this_cpu = g->threads[task_nr].curr_cpu; in worker_thread()
1228 td = g->threads + task_nr; in worker_process()
1243 td = g->threads + task_nr; in worker_process()
[all …]
/linux-4.1.27/kernel/locking/
Drtmutex-tester.c36 static struct task_struct *threads[MAX_RT_TEST_THREADS]; variable
146 if (threads[tid] == current) in schedule_rt_mutex_test()
306 ret = sched_setscheduler(threads[tid], SCHED_NORMAL, &schedpar); in sysfs_test_command()
314 ret = sched_setscheduler(threads[tid], SCHED_FIFO, &schedpar); in sysfs_test_command()
320 send_sig(SIGHUP, threads[tid], 0); in sysfs_test_command()
328 wake_up_process(threads[tid]); in sysfs_test_command()
348 tsk = threads[td->dev.id]; in sysfs_test_status()
383 threads[id] = kthread_run(test_func, &thread_data[id], "rt-test-%d", id); in init_test_thread()
384 if (IS_ERR(threads[id])) in init_test_thread()
385 return PTR_ERR(threads[id]); in init_test_thread()
/linux-4.1.27/Documentation/filesystems/nfs/
Dknfsd-stats.txt43 which contains all the nfsd threads and all the CPUs in the system,
71 This can happen either because there are too few nfsd threads in the
75 configuring more nfsd threads will probably improve the performance
77 already choosing not to wake idle nfsd threads because there are too
78 many nfsd threads which want to run but cannot, so configuring more
79 nfsd threads will make no difference whatsoever. The overloads-avoided
82 threads-woken
93 nfsd thread, despite the presence of idle nfsd threads, because
94 too many nfsd threads had been recently woken but could not get
99 runnable nfsd threads. The ideal rate of change for this counter
[all …]
Dnfsd-admin-interfaces.txt11 nfsd/threads.
26 nfsd is shut down by a write of 0 to nfsd/threads. All locks and state
29 Between startup and shutdown, the number of threads may be adjusted up
30 or down by additional writes to nfsd/threads or by writes to
/linux-4.1.27/tools/usb/
Dffs-test.c309 } threads[] = { variable
351 if (t != threads) { in cleanup_thread()
618 init_thread(threads); in main()
619 ep0_init(threads, legacy_descriptors); in main()
621 for (i = 1; i < sizeof threads / sizeof *threads; ++i) in main()
622 init_thread(threads + i); in main()
624 for (i = 1; i < sizeof threads / sizeof *threads; ++i) in main()
625 start_thread(threads + i); in main()
627 start_thread_helper(threads); in main()
629 for (i = 1; i < sizeof threads / sizeof *threads; ++i) in main()
[all …]
/linux-4.1.27/Documentation/power/
Dfreezing-of-tasks.txt7 kernel threads are controlled during hibernation or system-wide suspend (on some
14 PF_NOFREEZE unset (all user space processes and some kernel threads) are
24 fake signal to all user space processes, and wakes up all the kernel threads.
32 frozen before kernel threads.
39 signal-handling code, but the freezable kernel threads need to call it
59 threads must call try_to_freeze() somewhere or use one of the
75 - freezes all tasks (including kernel threads) because we can't freeze
76 kernel threads without freezing userspace tasks
79 - thaws only kernel threads; this is particularly useful if we need to do
80 anything special in between thawing of kernel threads and thawing of
[all …]
Dswsusp.txt165 kernel threads are controlled during hibernation or system-wide suspend (on some
/linux-4.1.27/tools/virtio/virtio-trace/
DREADME14 The read/write threads hold it.
17 the controller wake read/write threads.
18 5) The read/write threads start to read trace data from ring-buffers and
20 6) If the controller receives a stop order from a host, the read/write threads
31 trace-agent-ctl.c: includes controller function for read/write threads
32 trace-agent-rw.c: includes read/write threads function
103 read/write threads in the agent wait for start order from host. If you add -o
/linux-4.1.27/arch/powerpc/include/asm/
Dcputhreads.h42 static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads) in cpu_thread_mask_to_cores() argument
50 if (cpumask_intersects(threads, &tmp)) in cpu_thread_mask_to_cores()
/linux-4.1.27/Documentation/
Drobust-futex-ABI.txt23 threads in the kernel. Options on the sys_futex(2) system call support
31 probably causing deadlock or other such failure of the other threads
40 The pointer 'head' points to a structure in the threads address space
80 waiting for a lock on a threads exit if that next thread used the futex
100 It is anticipated that threads will use robust_futexes embedded in
115 entirely by user level code in the contending threads, and by the
121 There may exist thousands of futex lock structures in a threads shared
127 at different times by any of the threads with access to that region. The
128 thread currently holding such a lock, if any, is marked with the threads
156 exiting threads TID, then the kernel will do two things:
[all …]
Dunshare.txt27 Most legacy operating system kernels support an abstraction of threads
29 special resources and mechanisms to maintain these "threads". The Linux
31 between processes and "threads". The kernel allows processes to share
32 resources and thus they can achieve legacy "threads" behavior without
34 power of implementing threads in this manner comes not only from
37 threads. On Linux, at the time of thread creation using the clone system
39 between threads.
42 allows threads to selectively 'unshare' any resources that were being
45 of the discussion on POSIX threads on Linux. unshare augments the
46 usefulness of Linux threads for applications that would like to control
[all …]
Dcputopology.txt25 internal kernel map of cpuX's hardware threads within the same
30 internal kernel map of cpuX's hardware threads within the same
35 internal kernel map of cpuX's hardware threads within the same
Dworkqueue.txt85 Special purpose threads, called worker threads, execute the functions
87 worker threads become idle. These worker threads are managed in so
203 served by worker threads with elevated nice level.
354 Because the work functions are executed by generic worker threads
358 Worker threads show up in the process list as:
Dkernel-per-CPU-kthreads.txt52 1. Use irq affinity to force the irq threads to execute on
71 both kernel threads and interrupts to execute elsewhere.
121 forcing both kernel threads and interrupts to execute elsewhere.
127 kernel threads and interrupts to execute elsewhere.
145 calls and by forcing both kernel threads and interrupts
157 calls and by forcing both kernel threads and interrupts
Dpi-futex.txt46 combined with medium-prio construct-audio-data threads and low-prio
47 display-colory-stuff threads. Add video and decoding to the mix and
Dkref.txt98 This way, it doesn't matter what order the two threads handle the
Drobust-futexes.txt179 for new threads, without the need of another syscall.]
Dassoc_array.txt293 It is possible for other threads to iterate over or search the array under
Datomic_ops.txt57 all threads are guaranteed to be correct reflecting either the value that has
Dkernel-parameters.txt3040 the priority of the RCU boost threads (rcub/N)
3110 Set callbacks/threads for rcu_barrier() testing.
Dmemory-barriers.txt301 NOTE 1: Two threads of execution can update and access
/linux-4.1.27/Documentation/filesystems/pohmelfs/
Dinfo.txt4 All but index, number of crypto threads and maximum IO size can changed via remount.
36 Number of crypto processing threads. Threads are used both for RX and TX traffic.
37 Default is 2, or no threads if crypto operations are not supported.
85 Number of working threads is set to 10.
98 Number of worker threads specifies how many workers will be created for each client.
Ddesign_notes.txt45 servers and async threads will pick up replies in parallel, find appropriate transactions in the
70 Crypto performance heavily depends on the number of crypto threads, which asynchronously perform
/linux-4.1.27/tools/perf/Documentation/
Dperf-lock.txt27 'perf lock info' shows metadata like threads or addresses
57 --threads::
Dperf-sched.txt27 via perf sched record. (this is done by starting up mockup threads
29 threads can then replay the timings (CPU runtime and sleep patterns)
Dperf-bench.txt99 (20 sender and receiver threads per group)
100 (20 groups == 800 threads run)
Dexamples.txt71 Delta compression using up to 2 threads.
125 Delta compression using up to 2 threads.
Dperf-record.txt82 Record events in threads owned by uid. Name or number.
183 in per-cpu mode. The cgroup filesystem must be mounted. All threads belonging to
Dperf-trace.txt56 Record events in threads owned by uid. Name or number.
Dperf-report.txt36 --threads::
220 zooming into DSOs or threads, among other features. Use of --tui
Dperf-stat.txt101 in per-cpu mode. The cgroup filesystem must be mounted. All threads belonging to
Dperf-top.txt84 Record events in threads owned by uid. Name or number.
/linux-4.1.27/Documentation/thermal/
Dintel_powerclamp.txt42 idle injection across all online CPU threads was introduced. The goal
74 Injection is controlled by high priority kernel threads, spawned for
77 These kernel threads, with SCHED_FIFO class, are created to perform
82 migrated, unless the CPU is taken offline. In this case, threads
214 Per-CPU kernel threads are started/stopped upon receiving
216 keeps track of clamping kernel threads, even after they are migrated
233 case, little can be done from the idle injection threads. In most
257 counter summed over per CPU counting threads spawned for all running
278 will not show idle injection kernel threads.
281 idle time, powerclamp kernel threads will do idle injection, which
/linux-4.1.27/Documentation/locking/
Dlocktorture.txt14 This torture test consists of creating a number of kernel threads which
28 nwriters_stress Number of kernel threads that will stress exclusive lock
32 nreaders_stress Number of kernel threads that will stress shared lock
97 shuffle_interval The number of seconds to keep the test threads affinitied
121 (D): Min and max number of times threads failed to acquire the lock.
/linux-4.1.27/tools/power/x86/turbostat/
Dturbostat.c193 struct thread_data threads; member
473 if (t == &average.threads) { in format_counters()
658 format_counters(&average.threads, &average.cores, in format_all_counters()
859 average.threads.tsc += t->tsc; in sum_counters()
860 average.threads.aperf += t->aperf; in sum_counters()
861 average.threads.mperf += t->mperf; in sum_counters()
862 average.threads.c1 += t->c1; in sum_counters()
864 average.threads.extra_delta32 += t->extra_delta32; in sum_counters()
865 average.threads.extra_delta64 += t->extra_delta64; in sum_counters()
917 clear_counters(&average.threads, &average.cores, &average.packages); in compute_average()
[all …]
/linux-4.1.27/scripts/rt-tester/
Dt5-l4-pi-boost-deboost-setsched.tst48 # 5 threads 4 lock PI - modify priority of blocked threads
Dt2-l1-signal.tst48 # 2 threads 1 lock with priority inversion
Dt2-l1-pi.tst48 # 2 threads 1 lock with priority inversion
Dt3-l1-pi-1rt.tst48 # 3 threads 1 lock PI
Dt2-l2-2rt-deadlock.tst48 # 2 threads 2 lock
Dt3-l1-pi-3rt.tst48 # 3 threads 1 lock PI
Dt3-l2-pi.tst48 # 3 threads 2 lock PI
Dt2-l1-2rt-sameprio.tst48 # 2 threads 1 lock
Dt3-l1-pi-2rt.tst48 # 3 threads 1 lock PI
Dt3-l1-pi-steal.tst48 # 3 threads 1 lock PI steal pending ownership
Dt5-l4-pi-boost-deboost.tst48 # 5 threads 4 lock PI
Dt4-l2-pi-deboost.tst48 # 4 threads 2 lock PI
/linux-4.1.27/arch/arc/
DKconfig.debug11 threads to run on the system and also increase the pressure
/linux-4.1.27/net/rds/
DMakefile3 recv.o send.o stats.o sysctl.o threads.o transport.o \
/linux-4.1.27/tools/perf/scripts/python/
Dsched-migration.py29 threads = { 0 : "idle"} variable
32 return "%s:%d" % (threads[pid], pid)
342 threads[prev_pid] = prev_comm
343 threads[next_pid] = next_comm
/linux-4.1.27/Documentation/scheduler/
Dcompletion.txt9 If you have one or more threads of execution that must wait for some process
19 efficient code as both threads can continue until the result is actually
24 struct completion that tells the waiting threads of execution if they
91 work threads remains in-scope, and no references remain to on-stack data
214 (decrementing) the done element of struct completion. Waiting threads
Dsched-arch.txt29 threads need only ever query need_resched, and may never set or
Dsched-bwc.txt18 above at each period boundary. As threads consume this bandwidth it is
Dsched-deadline.txt359 start multiple threads with specific parameters. rt-app supports
372 The above creates 2 threads. The first one, scheduled by SCHED_DEADLINE,
/linux-4.1.27/kernel/
Dfork.c272 u64 threads; in set_max_threads() local
279 threads = MAX_THREADS; in set_max_threads()
281 threads = div64_u64((u64) totalram_pages * (u64) PAGE_SIZE, in set_max_threads()
284 if (threads > max_threads_suggested) in set_max_threads()
285 threads = max_threads_suggested; in set_max_threads()
287 max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS); in set_max_threads()
2067 int threads = max_threads; in sysctl_max_threads() local
2072 t.data = &threads; in sysctl_max_threads()
2080 set_max_threads(threads); in sysctl_max_threads()
/linux-4.1.27/arch/arm/vfp/
Dentry.S23 @ r10 = this threads thread_info structure
/linux-4.1.27/Documentation/powerpc/
Dcpu_families.txt15 | Old POWER | --------------> | RS64 (threads) |
/linux-4.1.27/drivers/dma/
Ddmatest.c165 struct list_head threads; member
178 list_for_each_entry(thread, &dtc->threads, node) { in is_threaded_test_run()
725 list_for_each_entry_safe(thread, _thread, &dtc->threads, node) { in dmatest_cleanup_channel()
780 list_add_tail(&thread->node, &dtc->threads); in dmatest_add_threads()
802 INIT_LIST_HEAD(&dtc->threads); in dmatest_add_channel()
/linux-4.1.27/arch/metag/
DKconfig.debug15 running more threads on a system and also reduces the pressure
DKconfig128 reordering of writes from different hardware threads when SMP is
/linux-4.1.27/tools/power/cpupower/utils/helpers/
Dhelpers.h109 unsigned int threads; /* per core */ member
/linux-4.1.27/Documentation/sysctl/
Dkernel.txt85 - threads-max
835 The soft lockup detector monitors CPUs for threads that are hogging the CPUs
836 without rescheduling voluntarily, and thus prevent the 'watchdog/N' threads
838 interrupts which are needed for the 'watchdog/N' threads to be woken up by
875 threads-max
877 This value controls the maximum number of threads that can be created
881 maximum number of threads is created, the thread structures occupy only
884 The minimum value that can be written to threads-max is 20.
885 The maximum value that can be written to threads-max is given by the
887 If a value outside of this range is written to threads-max an error
[all …]
Dvm.txt123 flusher threads will start writeback.
136 flusher threads will start writing out dirty data.
161 for writeout by the kernel flusher threads. It is expressed in 100'ths
179 The kernel flusher threads will periodically wake up and write `old' data
569 Enables a system-wide task dump (excluding kernel threads) to be produced
/linux-4.1.27/Documentation/vm/
Dactive_mm.txt25 doesn't need any user mappings - all kernel threads basically fall into
26 this category, but even "real" threads can temporarily say that for
Dnuma_memory_policy.txt51 [Linux kernel task] that installs the policy and any threads
52 subsequently created by that thread. Any sibling threads existing
84 space--a.k.a. threads--independent of when the policy is installed; and
Dcleancache.txt79 different Linux threads are simultaneously putting and invalidating a page
/linux-4.1.27/Documentation/ABI/testing/
Dsysfs-devices-system-cpu79 to other cores and threads in the same physical package.
90 core_siblings: internal kernel map of cpu#'s hardware threads
101 threads within the same core as cpu#
104 threads within the same core as cpu#
Dsysfs-devices-power60 be executed asynchronously (ie. in separate threads, in parallel
/linux-4.1.27/arch/powerpc/boot/dts/
Dps3.dts50 * threads is with an ibm,ppc-interrupt-server#s entry. We'll put one
/linux-4.1.27/Documentation/metag/
Dkernel-ABI.txt36 Extended context registers (EXT) may not be present on all hardware threads and
40 Global registers are shared between threads and are privilege protected.
100 context. A0.15 is global between hardware threads though which means it cannot
/linux-4.1.27/Documentation/accounting/
Dtaskstats.txt43 The latter contains the sum of per-pid stats for all threads in the thread
129 When a user queries to get per-tgid data, the sum of all other live threads in
131 threads of the same thread group.
Ddelay-accounting.txt24 delay statistics aggregated for all tasks (or threads) belonging to a
/linux-4.1.27/Documentation/s390/
Dkvm.txt22 threads and has not called KVM_S390_ENABLE_SIE before.
32 KVM_S390_ENABLE_SIE before. User processes that want to launch multiple threads
DDebugging390.txt60 the current running threads private area.
/linux-4.1.27/Documentation/RCU/
Dtorture.txt70 nfakewriters This is the number of RCU fake writer threads to run. Fake
71 writer threads repeatedly use the synchronous "wait for
79 nreaders This is the number of RCU reading threads supported.
100 The number of seconds to keep the test threads affinitied
Drcu.txt87 needed if you have CPU-bound realtime threads.
DRTFP.txt17 destruction until all threads running at that time have terminated, again
19 with short-lived threads, such as the K42 research operating system.
42 In 1990, Pugh [Pugh90] noted that explicitly tracking which threads
44 in the presence of non-terminating threads. However, this explicit
59 which are otherwise required to synchronize the threads at the end
75 time that reading threads can hold references, as there might well be in
Dstallwarn.txt214 is running at a higher priority than the RCU softirq threads.
/linux-4.1.27/drivers/android/
Dbinder.c297 struct rb_root threads; member
2522 struct rb_node **p = &proc->threads.rb_node; in binder_get_thread()
2545 rb_insert_color(&thread->rb_node, &proc->threads); in binder_get_thread()
2560 rb_erase(&thread->rb_node, &proc->threads); in binder_free_thread()
2993 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { in binder_deferred_flush()
3067 int threads, nodes, incoming_refs, outgoing_refs, buffers, in binder_deferred_release() local
3082 threads = 0; in binder_deferred_release()
3084 while ((n = rb_first(&proc->threads))) { in binder_deferred_release()
3088 threads++; in binder_deferred_release()
3162 __func__, proc->pid, threads, nodes, incoming_refs, in binder_deferred_release()
[all …]
/linux-4.1.27/arch/sh/
DKconfig.debug37 running more threads on a system and also reduces the pressure
/linux-4.1.27/Documentation/device-mapper/
Ddm-crypt.txt74 encryption threads to a single thread degrades performance
/linux-4.1.27/Documentation/scsi/
Dscsi-parameters.txt106 discovered. async scans them in kernel threads,
DChangeLog.sym53c8xx_2118 threads. A timer is also used to prevent from waiting indefinitely.
/linux-4.1.27/drivers/s390/net/
Dlcs.c321 lcs_set_allowed_threads(struct lcs_card *card, unsigned long threads) in lcs_set_allowed_threads() argument
326 card->thread_allowed_mask = threads; in lcs_set_allowed_threads()
331 lcs_threads_running(struct lcs_card *card, unsigned long threads) in lcs_threads_running() argument
337 rc = (card->thread_running_mask & threads); in lcs_threads_running()
343 lcs_wait_for_threads(struct lcs_card *card, unsigned long threads) in lcs_wait_for_threads() argument
346 lcs_threads_running(card, threads) == 0); in lcs_wait_for_threads()
Dqeth_core_main.c207 void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, in qeth_set_allowed_threads() argument
213 card->thread_allowed_mask = threads; in qeth_set_allowed_threads()
215 card->thread_start_mask &= threads; in qeth_set_allowed_threads()
221 int qeth_threads_running(struct qeth_card *card, unsigned long threads) in qeth_threads_running() argument
227 rc = (card->thread_running_mask & threads); in qeth_threads_running()
233 int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads) in qeth_wait_for_threads() argument
238 qeth_threads_running(card, threads) == 0); in qeth_wait_for_threads()
/linux-4.1.27/Documentation/networking/
Dpktgen.txt53 Viewing threads
93 Configuring threads and devices
/linux-4.1.27/tools/perf/
Dbuiltin-stat.c313 return perf_evsel__open_per_thread(evsel, evsel_list->threads); in create_perf_stat_counter()
511 int nthreads = thread_map__nr(evsel_list->threads); in read_counter()
604 nthreads = thread_map__nr(evsel_list->threads); in handle_initial_delay()
737 thread_map__nr(evsel_list->threads)); in __run_perf_stat()
Dbuiltin-record.c149 if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) { in record__open()
443 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads, in __cmd_record()
Dbuiltin-top.c887 top->evlist->threads) < 0) { in perf_top__start_counters()
953 top->evlist->threads, false); in __cmd_top()
Dbuiltin-trace.c1436 evlist->threads, trace__tool_process, false); in trace__symbols_init()
2232 else if (evlist->threads->map[0] == -1) in trace__run()
2250 trace->multiple_threads = evlist->threads->map[0] == -1 || in trace__run()
2251 evlist->threads->nr > 1 || in trace__run()
Dbuiltin-kvm.c1394 kvm->evlist->threads, false); in kvm_events_live()
/linux-4.1.27/Documentation/dmaengine/
Ddmatest.txt36 Once started a message like "dmatest: Started 1 threads using dma0chan0" is
/linux-4.1.27/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/
Dqe.txt22 threads.
/linux-4.1.27/Documentation/filesystems/
Dfiles.txt12 this would be the case for posix threads. As with the common
Dbtrfs.txt221 The number of worker threads to allocate. The default number is equal
Dgfs2-glocks.txt11 threads from making calls to the DLM, etc. at the same time. If a
Dproc.txt240 Threads number of threads
298 num_threads number of threads
1302 The "processes" line gives the number of processes and threads created, which
1306 The "procs_running" line gives the total number of threads that are
1307 running or ready to run (i.e., the total number of runnable threads).
Dramfs-rootfs-initramfs.txt291 the above threads) is:
/linux-4.1.27/Documentation/filesystems/caching/
Dobject.txt130 The work to be done by the various states was given CPU time by the threads of
141 workqueues don't necessarily have the right numbers of threads.
Dbackend-api.txt165 FS-Cache has a pool of threads that it uses to give CPU time to the
693 pool. One of the threads in the pool will invoke the retrieval record's
/linux-4.1.27/Documentation/kdump/
Dgdbmacros.txt176 Run info threads and lookup pid of thread #1
/linux-4.1.27/arch/powerpc/kvm/
DKconfig119 and in nap mode due to idle (cede) while other threads are still
/linux-4.1.27/fs/
Dbinfmt_elf.c1825 struct elf_thread_core_info *threads = info->thread; in free_note_info() local
1826 while (threads) { in free_note_info()
1828 struct elf_thread_core_info *t = threads; in free_note_info()
1829 threads = t->next; in free_note_info()
/linux-4.1.27/Documentation/block/
Dcfq-iosched.txt284 Hence doing tree idling on threads using REQ_NOIDLE flag on requests
286 time we do not idle on individual threads.
/linux-4.1.27/drivers/acpi/acpica/
Daclocal.h1077 acpi_thread_id *threads; member
/linux-4.1.27/Documentation/cgroups/
Dblkio-controller.txt22 You can do a very simple testing of running two dd threads in two different
46 launch two dd threads in different cgroup to read those files.
Dcgroups.txt239 Writing a thread group ID into this file moves all threads in that
442 threads in a threadgroup at once. Echoing the PID of any task in a
Dmemory.txt57 tasks # attach a task(thread) and show list of threads
/linux-4.1.27/
DREPORTING-BUGS76 threads or separate bugzilla entries. If you report several unrelated
/linux-4.1.27/Documentation/devicetree/bindings/arm/
Dtopology.txt23 threads existing in the system and map to the hierarchy level "thread" above.
/linux-4.1.27/init/
DKconfig678 int "Real-time priority to use for RCU worker threads"
685 assigned to the rcuc/n and rcub/n threads and is also the value
687 real-time application that has one or more CPU-bound threads
692 applications that do not have any CPU-bound threads.
696 multiple real-time threads that, taken together, fully utilize
1071 threads which belong to the cgroup specified and run on the
/linux-4.1.27/Documentation/frv/
Dfeatures.txt287 Note that kernel threads do not have a userspace context, and so will not
/linux-4.1.27/Documentation/serial/
Dtty.txt34 counts the number of threads of execution within an ldisc method (plus those
/linux-4.1.27/Documentation/laptops/
Dlaptop-mode.txt265 # exceeded, the kernel will wake flusher threads which will then reduce the
387 # exceeded, the kernel will wake flusher threads which will then reduce the
/linux-4.1.27/arch/m68k/
DKconfig.machine321 running more threads on a system and also reduces the pressure
/linux-4.1.27/scripts/
Dspelling.txt956 threds||threads
/linux-4.1.27/Documentation/hid/
Dhid-transport.txt85 its own worker threads.
/linux-4.1.27/kernel/trace/
DKconfig595 to each of the threads, where the IPI handler will also write
/linux-4.1.27/Documentation/PCI/
Dpci-error-recovery.txt169 >>> recovery implementation (e.g. waiting for all notification threads
/linux-4.1.27/Documentation/filesystems/configfs/
Dconfigfs.txt269 the item in other threads, the memory is safe. It may take some time
/linux-4.1.27/drivers/char/
DKconfig178 of threads across a large system which avoids bouncing a cacheline
/linux-4.1.27/Documentation/filesystems/cifs/
DCHANGES242 on requests on other threads. Improve POSIX locking emulation,
361 kills the cifsd thread (NB: killing the cifs kernel threads is not
/linux-4.1.27/Documentation/virtual/kvm/
Dapi.txt216 threads in one or more virtual CPU cores. (This is because the
217 hardware requires all the hardware threads in a CPU core to be in the
541 signal mask temporarily overrides the threads signal mask. Any
/linux-4.1.27/crypto/
DKconfig145 algorithm that executes in kernel threads.
/linux-4.1.27/Documentation/virtual/uml/
DUserModeLinux-HOWTO.txt123 14.3 Case 3 : Tracing thread panics caused by other threads
2474 slightly different because the kernel's threads are already being
4164 14.3. Case 3 : Tracing thread panics caused by other threads
/linux-4.1.27/Documentation/security/
Dkeys.txt280 The keyrings associated with new threads are each labeled with the context of
/linux-4.1.27/arch/mips/
DKconfig2143 bool "Dynamic FPU affinity for FP-intensive threads"
/linux-4.1.27/drivers/scsi/aic7xxx/
Daic79xx.reg3724 * The execution head pointer threads the head SCBs for
/linux-4.1.27/arch/arm/
DKconfig1728 between threads sharing the same address space if they invoke