Home
last modified time | relevance | path

Searched refs:workers (Results 1 – 13 of 13) sorted by relevance

/linux-4.1.27/Documentation/
Dworkqueue.txt40 number of workers as the number of CPUs. The kernel grew a lot of MT
123 number of the currently runnable workers. Generally, work items are
127 workers on the CPU, the worker-pool doesn't start execution of a new
130 are pending work items. This allows using a minimal number of workers
133 Keeping idle workers around doesn't cost other than the memory space
145 Forward progress guarantee relies on that workers can be created when
147 through the use of rescue workers. All work items which might be used
172 woker-pools which host workers which are not bound to any
181 of mostly unused workers across different CPUs as the issuer
206 each other. Each maintain its separate pool of workers and
[all …]
Dkernel-per-CPU-kthreads.txt193 d. As of v3.18, Christoph Lameter's on-demand vmstat workers
/linux-4.1.27/Documentation/filesystems/pohmelfs/
Dinfo.txt93 -w workers - number of workers per connected client. Default: 1.
98 Number of worker threads specifies how many workers will be created for each client.
/linux-4.1.27/drivers/gpu/drm/i915/
Di915_gem_userptr.c252 if (!obj->userptr.workers) in i915_mmu_notifier_add()
592 obj->userptr.workers--; in __i915_gem_userptr_get_pages_worker()
667 obj->userptr.workers < I915_GEM_USERPTR_MAX_WORKERS) { in i915_gem_userptr_get_pages()
673 obj->userptr.workers++; in i915_gem_userptr_get_pages()
Di915_drv.h2016 unsigned workers :4; member
/linux-4.1.27/drivers/md/
Draid5.h425 struct r5worker *workers; member
Draid5.c277 group->workers[0].working = true; in raid5_wakeup_stripe_thread()
279 queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work); in raid5_wakeup_stripe_thread()
284 if (group->workers[i].working == false) { in raid5_wakeup_stripe_thread()
285 group->workers[i].working = true; in raid5_wakeup_stripe_thread()
287 &group->workers[i].work); in raid5_wakeup_stripe_thread()
6165 kfree(old_groups[0].workers); in raid5_store_group_thread_cnt()
6201 struct r5worker *workers; in alloc_thread_groups() local
6211 workers = kzalloc(size * *group_cnt, GFP_NOIO); in alloc_thread_groups()
6214 if (!*worker_groups || !workers) { in alloc_thread_groups()
6215 kfree(workers); in alloc_thread_groups()
[all …]
/linux-4.1.27/net/l2tp/
DKconfig22 with home workers to connect to their offices.
/linux-4.1.27/kernel/
Dworkqueue.c169 struct list_head workers; /* A: attached workers */ member
394 list_for_each_entry((worker), &(pool)->workers, node) \
1687 list_add_tail(&worker->node, &pool->workers); in worker_attach_to_pool()
1708 if (list_empty(&pool->workers)) in worker_detach_from_pool()
3161 INIT_LIST_HEAD(&pool->workers); in init_worker_pool()
3242 if (!list_empty(&pool->workers)) in put_unbound_pool()
/linux-4.1.27/drivers/block/mtip32xx/
Dmtip32xx.c890 int do_irq_enable = 1, i, workers; in mtip_handle_irq() local
911 for (i = 0, workers = 0; i < MTIP_MAX_SLOT_GROUPS; in mtip_handle_irq()
916 workers++; in mtip_handle_irq()
919 atomic_set(&dd->irq_workers_active, workers); in mtip_handle_irq()
920 if (workers) { in mtip_handle_irq()
/linux-4.1.27/fs/btrfs/
Ddisk-io.c861 btrfs_queue_work(fs_info->workers, &async->work); in btrfs_wq_submit_bio()
2082 btrfs_destroy_workqueue(fs_info->workers); in btrfs_stop_all_workers()
2242 fs_info->workers = in btrfs_init_workqueues()
2299 if (!(fs_info->workers && fs_info->delalloc_workers && in btrfs_init_workqueues()
Dsuper.c1433 btrfs_workqueue_set_max(fs_info->workers, new_pool_size); in btrfs_resize_thread_pool()
Dctree.h1593 struct btrfs_workqueue *workers; member