Lines Matching refs:pool

199 	struct worker_pool	*pool;		/* I: the associated pool */  member
361 #define for_each_cpu_worker_pool(pool, cpu) \ argument
362 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
363 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
364 (pool)++)
378 #define for_each_pool(pool, pi) \ argument
379 idr_for_each_entry(&worker_pool_idr, pool, pi) \
393 #define for_each_pool_worker(worker, pool) \ argument
394 list_for_each_entry((worker), &(pool)->workers, node) \
395 if (({ lockdep_assert_held(&pool->attach_mutex); false; })) { } \
545 static int worker_pool_assign_id(struct worker_pool *pool) in worker_pool_assign_id() argument
551 ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE, in worker_pool_assign_id()
554 pool->id = ret; in worker_pool_assign_id()
728 (data & WORK_STRUCT_WQ_DATA_MASK))->pool; in get_work_pool()
750 (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id; in get_work_pool_id()
776 static bool __need_more_worker(struct worker_pool *pool) in __need_more_worker() argument
778 return !atomic_read(&pool->nr_running); in __need_more_worker()
789 static bool need_more_worker(struct worker_pool *pool) in need_more_worker() argument
791 return !list_empty(&pool->worklist) && __need_more_worker(pool); in need_more_worker()
795 static bool may_start_working(struct worker_pool *pool) in may_start_working() argument
797 return pool->nr_idle; in may_start_working()
801 static bool keep_working(struct worker_pool *pool) in keep_working() argument
803 return !list_empty(&pool->worklist) && in keep_working()
804 atomic_read(&pool->nr_running) <= 1; in keep_working()
808 static bool need_to_create_worker(struct worker_pool *pool) in need_to_create_worker() argument
810 return need_more_worker(pool) && !may_start_working(pool); in need_to_create_worker()
814 static bool too_many_workers(struct worker_pool *pool) in too_many_workers() argument
816 bool managing = mutex_is_locked(&pool->manager_arb); in too_many_workers()
817 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ in too_many_workers()
818 int nr_busy = pool->nr_workers - nr_idle; in too_many_workers()
828 static struct worker *first_idle_worker(struct worker_pool *pool) in first_idle_worker() argument
830 if (unlikely(list_empty(&pool->idle_list))) in first_idle_worker()
833 return list_first_entry(&pool->idle_list, struct worker, entry); in first_idle_worker()
845 static void wake_up_worker(struct worker_pool *pool) in wake_up_worker() argument
847 struct worker *worker = first_idle_worker(pool); in wake_up_worker()
869 WARN_ON_ONCE(worker->pool->cpu != cpu); in wq_worker_waking_up()
870 atomic_inc(&worker->pool->nr_running); in wq_worker_waking_up()
892 struct worker_pool *pool; in wq_worker_sleeping() local
902 pool = worker->pool; in wq_worker_sleeping()
905 if (WARN_ON_ONCE(cpu != raw_smp_processor_id() || pool->cpu != cpu)) in wq_worker_sleeping()
919 if (atomic_dec_and_test(&pool->nr_running) && in wq_worker_sleeping()
920 !list_empty(&pool->worklist)) in wq_worker_sleeping()
921 to_wakeup = first_idle_worker(pool); in wq_worker_sleeping()
937 struct worker_pool *pool = worker->pool; in worker_set_flags() local
944 atomic_dec(&pool->nr_running); in worker_set_flags()
962 struct worker_pool *pool = worker->pool; in worker_clr_flags() local
976 atomic_inc(&pool->nr_running); in worker_clr_flags()
1012 static struct worker *find_worker_executing_work(struct worker_pool *pool, in find_worker_executing_work() argument
1017 hash_for_each_possible(pool->busy_hash, worker, hentry, in find_worker_executing_work()
1076 lockdep_assert_held(&pwq->pool->lock); in get_pwq()
1090 lockdep_assert_held(&pwq->pool->lock); in put_pwq()
1119 spin_lock_irq(&pwq->pool->lock); in put_pwq_unlocked()
1121 spin_unlock_irq(&pwq->pool->lock); in put_pwq_unlocked()
1130 move_linked_works(work, &pwq->pool->worklist, NULL); in pwq_activate_delayed_work()
1220 struct worker_pool *pool; in try_to_grab_pending() local
1246 pool = get_work_pool(work); in try_to_grab_pending()
1247 if (!pool) in try_to_grab_pending()
1250 spin_lock(&pool->lock); in try_to_grab_pending()
1260 if (pwq && pwq->pool == pool) { in try_to_grab_pending()
1277 set_work_pool_and_keep_pending(work, pool->id); in try_to_grab_pending()
1279 spin_unlock(&pool->lock); in try_to_grab_pending()
1282 spin_unlock(&pool->lock); in try_to_grab_pending()
1307 struct worker_pool *pool = pwq->pool; in insert_work() local
1321 if (__need_more_worker(pool)) in insert_work()
1322 wake_up_worker(pool); in insert_work()
1380 if (last_pool && last_pool != pwq->pool) { in __queue_work()
1392 spin_lock(&pwq->pool->lock); in __queue_work()
1395 spin_lock(&pwq->pool->lock); in __queue_work()
1408 spin_unlock(&pwq->pool->lock); in __queue_work()
1421 spin_unlock(&pwq->pool->lock); in __queue_work()
1431 worklist = &pwq->pool->worklist; in __queue_work()
1439 spin_unlock(&pwq->pool->lock); in __queue_work()
1595 struct worker_pool *pool = worker->pool; in worker_enter_idle() local
1604 pool->nr_idle++; in worker_enter_idle()
1608 list_add(&worker->entry, &pool->idle_list); in worker_enter_idle()
1610 if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) in worker_enter_idle()
1611 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); in worker_enter_idle()
1619 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) && in worker_enter_idle()
1620 pool->nr_workers == pool->nr_idle && in worker_enter_idle()
1621 atomic_read(&pool->nr_running)); in worker_enter_idle()
1635 struct worker_pool *pool = worker->pool; in worker_leave_idle() local
1640 pool->nr_idle--; in worker_leave_idle()
1669 struct worker_pool *pool) in worker_attach_to_pool() argument
1671 mutex_lock(&pool->attach_mutex); in worker_attach_to_pool()
1677 set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); in worker_attach_to_pool()
1684 if (pool->flags & POOL_DISASSOCIATED) in worker_attach_to_pool()
1687 list_add_tail(&worker->node, &pool->workers); in worker_attach_to_pool()
1689 mutex_unlock(&pool->attach_mutex); in worker_attach_to_pool()
1702 struct worker_pool *pool) in worker_detach_from_pool() argument
1706 mutex_lock(&pool->attach_mutex); in worker_detach_from_pool()
1708 if (list_empty(&pool->workers)) in worker_detach_from_pool()
1709 detach_completion = pool->detach_completion; in worker_detach_from_pool()
1710 mutex_unlock(&pool->attach_mutex); in worker_detach_from_pool()
1731 static struct worker *create_worker(struct worker_pool *pool) in create_worker() argument
1738 id = ida_simple_get(&pool->worker_ida, 0, 0, GFP_KERNEL); in create_worker()
1742 worker = alloc_worker(pool->node); in create_worker()
1746 worker->pool = pool; in create_worker()
1749 if (pool->cpu >= 0) in create_worker()
1750 snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id, in create_worker()
1751 pool->attrs->nice < 0 ? "H" : ""); in create_worker()
1753 snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id); in create_worker()
1755 worker->task = kthread_create_on_node(worker_thread, worker, pool->node, in create_worker()
1760 set_user_nice(worker->task, pool->attrs->nice); in create_worker()
1766 worker_attach_to_pool(worker, pool); in create_worker()
1769 spin_lock_irq(&pool->lock); in create_worker()
1770 worker->pool->nr_workers++; in create_worker()
1773 spin_unlock_irq(&pool->lock); in create_worker()
1779 ida_simple_remove(&pool->worker_ida, id); in create_worker()
1796 struct worker_pool *pool = worker->pool; in destroy_worker() local
1798 lockdep_assert_held(&pool->lock); in destroy_worker()
1806 pool->nr_workers--; in destroy_worker()
1807 pool->nr_idle--; in destroy_worker()
1816 struct worker_pool *pool = (void *)__pool; in idle_worker_timeout() local
1818 spin_lock_irq(&pool->lock); in idle_worker_timeout()
1820 while (too_many_workers(pool)) { in idle_worker_timeout()
1825 worker = list_entry(pool->idle_list.prev, struct worker, entry); in idle_worker_timeout()
1829 mod_timer(&pool->idle_timer, expires); in idle_worker_timeout()
1836 spin_unlock_irq(&pool->lock); in idle_worker_timeout()
1864 struct worker_pool *pool = (void *)__pool; in pool_mayday_timeout() local
1867 spin_lock_irq(&pool->lock); in pool_mayday_timeout()
1870 if (need_to_create_worker(pool)) { in pool_mayday_timeout()
1877 list_for_each_entry(work, &pool->worklist, entry) in pool_mayday_timeout()
1882 spin_unlock_irq(&pool->lock); in pool_mayday_timeout()
1884 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); in pool_mayday_timeout()
1905 static void maybe_create_worker(struct worker_pool *pool) in maybe_create_worker() argument
1906 __releases(&pool->lock) in maybe_create_worker()
1907 __acquires(&pool->lock) in maybe_create_worker()
1910 spin_unlock_irq(&pool->lock); in maybe_create_worker()
1913 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); in maybe_create_worker()
1916 if (create_worker(pool) || !need_to_create_worker(pool)) in maybe_create_worker()
1921 if (!need_to_create_worker(pool)) in maybe_create_worker()
1925 del_timer_sync(&pool->mayday_timer); in maybe_create_worker()
1926 spin_lock_irq(&pool->lock); in maybe_create_worker()
1932 if (need_to_create_worker(pool)) in maybe_create_worker()
1960 struct worker_pool *pool = worker->pool; in manage_workers() local
1972 if (!mutex_trylock(&pool->manager_arb)) in manage_workers()
1974 pool->manager = worker; in manage_workers()
1976 maybe_create_worker(pool); in manage_workers()
1978 pool->manager = NULL; in manage_workers()
1979 mutex_unlock(&pool->manager_arb); in manage_workers()
1998 __releases(&pool->lock) in process_one_work()
1999 __acquires(&pool->lock) in process_one_work()
2002 struct worker_pool *pool = worker->pool; in process_one_work() local
2019 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) && in process_one_work()
2020 raw_smp_processor_id() != pool->cpu); in process_one_work()
2028 collision = find_worker_executing_work(pool, work); in process_one_work()
2036 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); in process_one_work()
2060 if (need_more_worker(pool)) in process_one_work()
2061 wake_up_worker(pool); in process_one_work()
2069 set_work_pool_and_clear_pending(work, pool->id); in process_one_work()
2071 spin_unlock_irq(&pool->lock); in process_one_work()
2104 spin_lock_irq(&pool->lock); in process_one_work()
2155 struct worker_pool *pool = worker->pool; in worker_thread() local
2160 spin_lock_irq(&pool->lock); in worker_thread()
2164 spin_unlock_irq(&pool->lock); in worker_thread()
2169 ida_simple_remove(&pool->worker_ida, worker->id); in worker_thread()
2170 worker_detach_from_pool(worker, pool); in worker_thread()
2178 if (!need_more_worker(pool)) in worker_thread()
2182 if (unlikely(!may_start_working(pool)) && manage_workers(worker)) in worker_thread()
2203 list_first_entry(&pool->worklist, in worker_thread()
2215 } while (keep_working(pool)); in worker_thread()
2228 spin_unlock_irq(&pool->lock); in worker_thread()
2287 struct worker_pool *pool = pwq->pool; in rescuer_thread() local
2295 worker_attach_to_pool(rescuer, pool); in rescuer_thread()
2297 spin_lock_irq(&pool->lock); in rescuer_thread()
2298 rescuer->pool = pool; in rescuer_thread()
2305 list_for_each_entry_safe(work, n, &pool->worklist, entry) in rescuer_thread()
2321 if (need_to_create_worker(pool)) { in rescuer_thread()
2340 if (need_more_worker(pool)) in rescuer_thread()
2341 wake_up_worker(pool); in rescuer_thread()
2343 rescuer->pool = NULL; in rescuer_thread()
2344 spin_unlock_irq(&pool->lock); in rescuer_thread()
2346 worker_detach_from_pool(rescuer, pool); in rescuer_thread()
2482 struct worker_pool *pool = pwq->pool; in flush_workqueue_prep_pwqs() local
2484 spin_lock_irq(&pool->lock); in flush_workqueue_prep_pwqs()
2501 spin_unlock_irq(&pool->lock); in flush_workqueue_prep_pwqs()
2696 spin_lock_irq(&pwq->pool->lock); in drain_workqueue()
2698 spin_unlock_irq(&pwq->pool->lock); in drain_workqueue()
2721 struct worker_pool *pool; in start_flush_work() local
2727 pool = get_work_pool(work); in start_flush_work()
2728 if (!pool) { in start_flush_work()
2733 spin_lock(&pool->lock); in start_flush_work()
2737 if (unlikely(pwq->pool != pool)) in start_flush_work()
2740 worker = find_worker_executing_work(pool, work); in start_flush_work()
2747 spin_unlock_irq(&pool->lock); in start_flush_work()
2763 spin_unlock_irq(&pool->lock); in start_flush_work()
3141 static int init_worker_pool(struct worker_pool *pool) in init_worker_pool() argument
3143 spin_lock_init(&pool->lock); in init_worker_pool()
3144 pool->id = -1; in init_worker_pool()
3145 pool->cpu = -1; in init_worker_pool()
3146 pool->node = NUMA_NO_NODE; in init_worker_pool()
3147 pool->flags |= POOL_DISASSOCIATED; in init_worker_pool()
3148 INIT_LIST_HEAD(&pool->worklist); in init_worker_pool()
3149 INIT_LIST_HEAD(&pool->idle_list); in init_worker_pool()
3150 hash_init(pool->busy_hash); in init_worker_pool()
3152 init_timer_deferrable(&pool->idle_timer); in init_worker_pool()
3153 pool->idle_timer.function = idle_worker_timeout; in init_worker_pool()
3154 pool->idle_timer.data = (unsigned long)pool; in init_worker_pool()
3156 setup_timer(&pool->mayday_timer, pool_mayday_timeout, in init_worker_pool()
3157 (unsigned long)pool); in init_worker_pool()
3159 mutex_init(&pool->manager_arb); in init_worker_pool()
3160 mutex_init(&pool->attach_mutex); in init_worker_pool()
3161 INIT_LIST_HEAD(&pool->workers); in init_worker_pool()
3163 ida_init(&pool->worker_ida); in init_worker_pool()
3164 INIT_HLIST_NODE(&pool->hash_node); in init_worker_pool()
3165 pool->refcnt = 1; in init_worker_pool()
3168 pool->attrs = alloc_workqueue_attrs(GFP_KERNEL); in init_worker_pool()
3169 if (!pool->attrs) in init_worker_pool()
3190 struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu); in rcu_free_pool() local
3192 ida_destroy(&pool->worker_ida); in rcu_free_pool()
3193 free_workqueue_attrs(pool->attrs); in rcu_free_pool()
3194 kfree(pool); in rcu_free_pool()
3208 static void put_unbound_pool(struct worker_pool *pool) in put_unbound_pool() argument
3215 if (--pool->refcnt) in put_unbound_pool()
3219 if (WARN_ON(!(pool->cpu < 0)) || in put_unbound_pool()
3220 WARN_ON(!list_empty(&pool->worklist))) in put_unbound_pool()
3224 if (pool->id >= 0) in put_unbound_pool()
3225 idr_remove(&worker_pool_idr, pool->id); in put_unbound_pool()
3226 hash_del(&pool->hash_node); in put_unbound_pool()
3233 mutex_lock(&pool->manager_arb); in put_unbound_pool()
3235 spin_lock_irq(&pool->lock); in put_unbound_pool()
3236 while ((worker = first_idle_worker(pool))) in put_unbound_pool()
3238 WARN_ON(pool->nr_workers || pool->nr_idle); in put_unbound_pool()
3239 spin_unlock_irq(&pool->lock); in put_unbound_pool()
3241 mutex_lock(&pool->attach_mutex); in put_unbound_pool()
3242 if (!list_empty(&pool->workers)) in put_unbound_pool()
3243 pool->detach_completion = &detach_completion; in put_unbound_pool()
3244 mutex_unlock(&pool->attach_mutex); in put_unbound_pool()
3246 if (pool->detach_completion) in put_unbound_pool()
3247 wait_for_completion(pool->detach_completion); in put_unbound_pool()
3249 mutex_unlock(&pool->manager_arb); in put_unbound_pool()
3252 del_timer_sync(&pool->idle_timer); in put_unbound_pool()
3253 del_timer_sync(&pool->mayday_timer); in put_unbound_pool()
3256 call_rcu_sched(&pool->rcu, rcu_free_pool); in put_unbound_pool()
3276 struct worker_pool *pool; in get_unbound_pool() local
3282 hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) { in get_unbound_pool()
3283 if (wqattrs_equal(pool->attrs, attrs)) { in get_unbound_pool()
3284 pool->refcnt++; in get_unbound_pool()
3285 return pool; in get_unbound_pool()
3290 pool = kzalloc(sizeof(*pool), GFP_KERNEL); in get_unbound_pool()
3291 if (!pool || init_worker_pool(pool) < 0) in get_unbound_pool()
3294 lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */ in get_unbound_pool()
3295 copy_workqueue_attrs(pool->attrs, attrs); in get_unbound_pool()
3301 pool->attrs->no_numa = false; in get_unbound_pool()
3306 if (cpumask_subset(pool->attrs->cpumask, in get_unbound_pool()
3308 pool->node = node; in get_unbound_pool()
3314 if (worker_pool_assign_id(pool) < 0) in get_unbound_pool()
3318 if (!create_worker(pool)) in get_unbound_pool()
3322 hash_add(unbound_pool_hash, &pool->hash_node, hash); in get_unbound_pool()
3324 return pool; in get_unbound_pool()
3326 if (pool) in get_unbound_pool()
3327 put_unbound_pool(pool); in get_unbound_pool()
3346 struct worker_pool *pool = pwq->pool; in pwq_unbound_release_workfn() local
3358 put_unbound_pool(pool); in pwq_unbound_release_workfn()
3391 spin_lock_irq(&pwq->pool->lock); in pwq_adjust_max_active()
3409 wake_up_worker(pwq->pool); in pwq_adjust_max_active()
3414 spin_unlock_irq(&pwq->pool->lock); in pwq_adjust_max_active()
3419 struct worker_pool *pool) in init_pwq() argument
3425 pwq->pool = pool; in init_pwq()
3460 struct worker_pool *pool; in alloc_unbound_pwq() local
3465 pool = get_unbound_pool(attrs); in alloc_unbound_pwq()
3466 if (!pool) in alloc_unbound_pwq()
3469 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node); in alloc_unbound_pwq()
3471 put_unbound_pool(pool); in alloc_unbound_pwq()
3475 init_pwq(pwq, wq, pool); in alloc_unbound_pwq()
3762 if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask)) in wq_update_unbound_numa()
3790 spin_lock_irq(&wq->dfl_pwq->pool->lock); in wq_update_unbound_numa()
3792 spin_unlock_irq(&wq->dfl_pwq->pool->lock); in wq_update_unbound_numa()
4129 struct worker_pool *pool; in work_busy() local
4137 pool = get_work_pool(work); in work_busy()
4138 if (pool) { in work_busy()
4139 spin_lock(&pool->lock); in work_busy()
4140 if (find_worker_executing_work(pool, work)) in work_busy()
4142 spin_unlock(&pool->lock); in work_busy()
4227 static void pr_cont_pool_info(struct worker_pool *pool) in pr_cont_pool_info() argument
4229 pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask); in pr_cont_pool_info()
4230 if (pool->node != NUMA_NO_NODE) in pr_cont_pool_info()
4231 pr_cont(" node=%d", pool->node); in pr_cont_pool_info()
4232 pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice); in pr_cont_pool_info()
4251 struct worker_pool *pool = pwq->pool; in show_pwq() local
4257 pr_info(" pwq %d:", pool->id); in show_pwq()
4258 pr_cont_pool_info(pool); in show_pwq()
4263 hash_for_each(pool->busy_hash, bkt, worker, hentry) { in show_pwq()
4273 hash_for_each(pool->busy_hash, bkt, worker, hentry) { in show_pwq()
4288 list_for_each_entry(work, &pool->worklist, entry) { in show_pwq()
4298 list_for_each_entry(work, &pool->worklist, entry) { in show_pwq()
4329 struct worker_pool *pool; in show_workqueue_state() local
4353 spin_lock_irqsave(&pwq->pool->lock, flags); in show_workqueue_state()
4356 spin_unlock_irqrestore(&pwq->pool->lock, flags); in show_workqueue_state()
4360 for_each_pool(pool, pi) { in show_workqueue_state()
4364 spin_lock_irqsave(&pool->lock, flags); in show_workqueue_state()
4365 if (pool->nr_workers == pool->nr_idle) in show_workqueue_state()
4368 pr_info("pool %d:", pool->id); in show_workqueue_state()
4369 pr_cont_pool_info(pool); in show_workqueue_state()
4370 pr_cont(" workers=%d", pool->nr_workers); in show_workqueue_state()
4371 if (pool->manager) in show_workqueue_state()
4373 task_pid_nr(pool->manager->task)); in show_workqueue_state()
4374 list_for_each_entry(worker, &pool->idle_list, entry) { in show_workqueue_state()
4381 spin_unlock_irqrestore(&pool->lock, flags); in show_workqueue_state()
4405 struct worker_pool *pool; in wq_unbind_fn() local
4408 for_each_cpu_worker_pool(pool, cpu) { in wq_unbind_fn()
4409 mutex_lock(&pool->attach_mutex); in wq_unbind_fn()
4410 spin_lock_irq(&pool->lock); in wq_unbind_fn()
4419 for_each_pool_worker(worker, pool) in wq_unbind_fn()
4422 pool->flags |= POOL_DISASSOCIATED; in wq_unbind_fn()
4424 spin_unlock_irq(&pool->lock); in wq_unbind_fn()
4425 mutex_unlock(&pool->attach_mutex); in wq_unbind_fn()
4443 atomic_set(&pool->nr_running, 0); in wq_unbind_fn()
4450 spin_lock_irq(&pool->lock); in wq_unbind_fn()
4451 wake_up_worker(pool); in wq_unbind_fn()
4452 spin_unlock_irq(&pool->lock); in wq_unbind_fn()
4462 static void rebind_workers(struct worker_pool *pool) in rebind_workers() argument
4466 lockdep_assert_held(&pool->attach_mutex); in rebind_workers()
4475 for_each_pool_worker(worker, pool) in rebind_workers()
4477 pool->attrs->cpumask) < 0); in rebind_workers()
4479 spin_lock_irq(&pool->lock); in rebind_workers()
4486 if (!(pool->flags & POOL_DISASSOCIATED)) { in rebind_workers()
4487 spin_unlock_irq(&pool->lock); in rebind_workers()
4491 pool->flags &= ~POOL_DISASSOCIATED; in rebind_workers()
4493 for_each_pool_worker(worker, pool) { in rebind_workers()
4528 spin_unlock_irq(&pool->lock); in rebind_workers()
4541 static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu) in restore_unbound_workers_cpumask() argument
4546 lockdep_assert_held(&pool->attach_mutex); in restore_unbound_workers_cpumask()
4549 if (!cpumask_test_cpu(cpu, pool->attrs->cpumask)) in restore_unbound_workers_cpumask()
4553 cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask); in restore_unbound_workers_cpumask()
4558 for_each_pool_worker(worker, pool) in restore_unbound_workers_cpumask()
4560 pool->attrs->cpumask) < 0); in restore_unbound_workers_cpumask()
4572 struct worker_pool *pool; in workqueue_cpu_up_callback() local
4578 for_each_cpu_worker_pool(pool, cpu) { in workqueue_cpu_up_callback()
4579 if (pool->nr_workers) in workqueue_cpu_up_callback()
4581 if (!create_worker(pool)) in workqueue_cpu_up_callback()
4590 for_each_pool(pool, pi) { in workqueue_cpu_up_callback()
4591 mutex_lock(&pool->attach_mutex); in workqueue_cpu_up_callback()
4593 if (pool->cpu == cpu) in workqueue_cpu_up_callback()
4594 rebind_workers(pool); in workqueue_cpu_up_callback()
4595 else if (pool->cpu < 0) in workqueue_cpu_up_callback()
4596 restore_unbound_workers_cpumask(pool, cpu); in workqueue_cpu_up_callback()
4598 mutex_unlock(&pool->attach_mutex); in workqueue_cpu_up_callback()
4872 unbound_pwq_by_node(wq, node)->pool->id); in wq_pool_ids_show()
5170 struct worker_pool *pool; in init_workqueues() local
5173 for_each_cpu_worker_pool(pool, cpu) { in init_workqueues()
5174 BUG_ON(init_worker_pool(pool)); in init_workqueues()
5175 pool->cpu = cpu; in init_workqueues()
5176 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu)); in init_workqueues()
5177 pool->attrs->nice = std_nice[i++]; in init_workqueues()
5178 pool->node = cpu_to_node(cpu); in init_workqueues()
5182 BUG_ON(worker_pool_assign_id(pool)); in init_workqueues()
5189 struct worker_pool *pool; in init_workqueues() local
5191 for_each_cpu_worker_pool(pool, cpu) { in init_workqueues()
5192 pool->flags &= ~POOL_DISASSOCIATED; in init_workqueues()
5193 BUG_ON(!create_worker(pool)); in init_workqueues()