Searched refs:workers (Results 1 – 13 of 13) sorted by relevance
/linux-4.4.14/Documentation/ |
D | workqueue.txt | 40 number of workers as the number of CPUs. The kernel grew a lot of MT 123 number of the currently runnable workers. Generally, work items are 127 workers on the CPU, the worker-pool doesn't start execution of a new 130 are pending work items. This allows using a minimal number of workers 133 Keeping idle workers around doesn't cost other than the memory space 145 Forward progress guarantee relies on that workers can be created when 147 through the use of rescue workers. All work items which might be used 172 woker-pools which host workers which are not bound to any 181 of mostly unused workers across different CPUs as the issuer 206 each other. Each maintain its separate pool of workers and [all …]
|
D | kernel-per-CPU-kthreads.txt | 193 d. As of v3.18, Christoph Lameter's on-demand vmstat workers
|
/linux-4.4.14/Documentation/filesystems/pohmelfs/ |
D | info.txt | 93 -w workers - number of workers per connected client. Default: 1. 98 Number of worker threads specifies how many workers will be created for each client.
|
/linux-4.4.14/drivers/gpu/drm/i915/ |
D | i915_gem_userptr.c | 219 if (!obj->userptr.workers) in i915_mmu_notifier_add() 617 obj->userptr.workers--; in __i915_gem_userptr_get_pages_worker() 653 if (obj->userptr.workers >= I915_GEM_USERPTR_MAX_WORKERS) in __i915_gem_userptr_get_pages_schedule() 661 obj->userptr.workers++; in __i915_gem_userptr_get_pages_schedule()
|
D | i915_drv.h | 2160 unsigned workers :4; member
|
/linux-4.4.14/drivers/md/ |
D | raid5.h | 431 struct r5worker *workers; member
|
D | raid5.c | 273 group->workers[0].working = true; in raid5_wakeup_stripe_thread() 275 queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work); in raid5_wakeup_stripe_thread() 280 if (group->workers[i].working == false) { in raid5_wakeup_stripe_thread() 281 group->workers[i].working = true; in raid5_wakeup_stripe_thread() 283 &group->workers[i].work); in raid5_wakeup_stripe_thread() 6201 kfree(old_groups[0].workers); in raid5_store_group_thread_cnt() 6237 struct r5worker *workers; in alloc_thread_groups() local 6247 workers = kzalloc(size * *group_cnt, GFP_NOIO); in alloc_thread_groups() 6250 if (!*worker_groups || !workers) { in alloc_thread_groups() 6251 kfree(workers); in alloc_thread_groups() [all …]
|
/linux-4.4.14/net/l2tp/ |
D | Kconfig | 22 with home workers to connect to their offices.
|
/linux-4.4.14/kernel/ |
D | workqueue.c | 169 struct list_head workers; /* A: attached workers */ member 389 list_for_each_entry((worker), &(pool)->workers, node) \ 1682 list_add_tail(&worker->node, &pool->workers); in worker_attach_to_pool() 1703 if (list_empty(&pool->workers)) in worker_detach_from_pool() 3124 INIT_LIST_HEAD(&pool->workers); in init_worker_pool() 3205 if (!list_empty(&pool->workers)) in put_unbound_pool()
|
/linux-4.4.14/drivers/block/mtip32xx/ |
D | mtip32xx.c | 881 int do_irq_enable = 1, i, workers; in mtip_handle_irq() local 902 for (i = 0, workers = 0; i < MTIP_MAX_SLOT_GROUPS; in mtip_handle_irq() 907 workers++; in mtip_handle_irq() 910 atomic_set(&dd->irq_workers_active, workers); in mtip_handle_irq() 911 if (workers) { in mtip_handle_irq()
|
/linux-4.4.14/fs/btrfs/ |
D | disk-io.c | 864 btrfs_queue_work(fs_info->workers, &async->work); in btrfs_wq_submit_bio() 2128 btrfs_destroy_workqueue(fs_info->workers); in btrfs_stop_all_workers() 2288 fs_info->workers = in btrfs_init_workqueues() 2345 if (!(fs_info->workers && fs_info->delalloc_workers && in btrfs_init_workqueues()
|
D | super.c | 1558 btrfs_workqueue_set_max(fs_info->workers, new_pool_size); in btrfs_resize_thread_pool()
|
D | ctree.h | 1621 struct btrfs_workqueue *workers; member
|