Lines Matching refs:pool

199 	struct worker_pool	*pool;		/* I: the associated pool */  member
356 #define for_each_cpu_worker_pool(pool, cpu) \ argument
357 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
358 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
359 (pool)++)
373 #define for_each_pool(pool, pi) \ argument
374 idr_for_each_entry(&worker_pool_idr, pool, pi) \
388 #define for_each_pool_worker(worker, pool) \ argument
389 list_for_each_entry((worker), &(pool)->workers, node) \
390 if (({ lockdep_assert_held(&pool->attach_mutex); false; })) { } \
540 static int worker_pool_assign_id(struct worker_pool *pool) in worker_pool_assign_id() argument
546 ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE, in worker_pool_assign_id()
549 pool->id = ret; in worker_pool_assign_id()
723 (data & WORK_STRUCT_WQ_DATA_MASK))->pool; in get_work_pool()
745 (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id; in get_work_pool_id()
771 static bool __need_more_worker(struct worker_pool *pool) in __need_more_worker() argument
773 return !atomic_read(&pool->nr_running); in __need_more_worker()
784 static bool need_more_worker(struct worker_pool *pool) in need_more_worker() argument
786 return !list_empty(&pool->worklist) && __need_more_worker(pool); in need_more_worker()
790 static bool may_start_working(struct worker_pool *pool) in may_start_working() argument
792 return pool->nr_idle; in may_start_working()
796 static bool keep_working(struct worker_pool *pool) in keep_working() argument
798 return !list_empty(&pool->worklist) && in keep_working()
799 atomic_read(&pool->nr_running) <= 1; in keep_working()
803 static bool need_to_create_worker(struct worker_pool *pool) in need_to_create_worker() argument
805 return need_more_worker(pool) && !may_start_working(pool); in need_to_create_worker()
809 static bool too_many_workers(struct worker_pool *pool) in too_many_workers() argument
811 bool managing = mutex_is_locked(&pool->manager_arb); in too_many_workers()
812 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ in too_many_workers()
813 int nr_busy = pool->nr_workers - nr_idle; in too_many_workers()
823 static struct worker *first_idle_worker(struct worker_pool *pool) in first_idle_worker() argument
825 if (unlikely(list_empty(&pool->idle_list))) in first_idle_worker()
828 return list_first_entry(&pool->idle_list, struct worker, entry); in first_idle_worker()
840 static void wake_up_worker(struct worker_pool *pool) in wake_up_worker() argument
842 struct worker *worker = first_idle_worker(pool); in wake_up_worker()
864 WARN_ON_ONCE(worker->pool->cpu != cpu); in wq_worker_waking_up()
865 atomic_inc(&worker->pool->nr_running); in wq_worker_waking_up()
887 struct worker_pool *pool; in wq_worker_sleeping() local
897 pool = worker->pool; in wq_worker_sleeping()
900 if (WARN_ON_ONCE(cpu != raw_smp_processor_id() || pool->cpu != cpu)) in wq_worker_sleeping()
914 if (atomic_dec_and_test(&pool->nr_running) && in wq_worker_sleeping()
915 !list_empty(&pool->worklist)) in wq_worker_sleeping()
916 to_wakeup = first_idle_worker(pool); in wq_worker_sleeping()
932 struct worker_pool *pool = worker->pool; in worker_set_flags() local
939 atomic_dec(&pool->nr_running); in worker_set_flags()
957 struct worker_pool *pool = worker->pool; in worker_clr_flags() local
971 atomic_inc(&pool->nr_running); in worker_clr_flags()
1007 static struct worker *find_worker_executing_work(struct worker_pool *pool, in find_worker_executing_work() argument
1012 hash_for_each_possible(pool->busy_hash, worker, hentry, in find_worker_executing_work()
1071 lockdep_assert_held(&pwq->pool->lock); in get_pwq()
1085 lockdep_assert_held(&pwq->pool->lock); in put_pwq()
1114 spin_lock_irq(&pwq->pool->lock); in put_pwq_unlocked()
1116 spin_unlock_irq(&pwq->pool->lock); in put_pwq_unlocked()
1125 move_linked_works(work, &pwq->pool->worklist, NULL); in pwq_activate_delayed_work()
1215 struct worker_pool *pool; in try_to_grab_pending() local
1241 pool = get_work_pool(work); in try_to_grab_pending()
1242 if (!pool) in try_to_grab_pending()
1245 spin_lock(&pool->lock); in try_to_grab_pending()
1255 if (pwq && pwq->pool == pool) { in try_to_grab_pending()
1272 set_work_pool_and_keep_pending(work, pool->id); in try_to_grab_pending()
1274 spin_unlock(&pool->lock); in try_to_grab_pending()
1277 spin_unlock(&pool->lock); in try_to_grab_pending()
1302 struct worker_pool *pool = pwq->pool; in insert_work() local
1316 if (__need_more_worker(pool)) in insert_work()
1317 wake_up_worker(pool); in insert_work()
1375 if (last_pool && last_pool != pwq->pool) { in __queue_work()
1387 spin_lock(&pwq->pool->lock); in __queue_work()
1390 spin_lock(&pwq->pool->lock); in __queue_work()
1403 spin_unlock(&pwq->pool->lock); in __queue_work()
1416 spin_unlock(&pwq->pool->lock); in __queue_work()
1426 worklist = &pwq->pool->worklist; in __queue_work()
1434 spin_unlock(&pwq->pool->lock); in __queue_work()
1590 struct worker_pool *pool = worker->pool; in worker_enter_idle() local
1599 pool->nr_idle++; in worker_enter_idle()
1603 list_add(&worker->entry, &pool->idle_list); in worker_enter_idle()
1605 if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) in worker_enter_idle()
1606 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); in worker_enter_idle()
1614 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) && in worker_enter_idle()
1615 pool->nr_workers == pool->nr_idle && in worker_enter_idle()
1616 atomic_read(&pool->nr_running)); in worker_enter_idle()
1630 struct worker_pool *pool = worker->pool; in worker_leave_idle() local
1635 pool->nr_idle--; in worker_leave_idle()
1664 struct worker_pool *pool) in worker_attach_to_pool() argument
1666 mutex_lock(&pool->attach_mutex); in worker_attach_to_pool()
1672 set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); in worker_attach_to_pool()
1679 if (pool->flags & POOL_DISASSOCIATED) in worker_attach_to_pool()
1682 list_add_tail(&worker->node, &pool->workers); in worker_attach_to_pool()
1684 mutex_unlock(&pool->attach_mutex); in worker_attach_to_pool()
1697 struct worker_pool *pool) in worker_detach_from_pool() argument
1701 mutex_lock(&pool->attach_mutex); in worker_detach_from_pool()
1703 if (list_empty(&pool->workers)) in worker_detach_from_pool()
1704 detach_completion = pool->detach_completion; in worker_detach_from_pool()
1705 mutex_unlock(&pool->attach_mutex); in worker_detach_from_pool()
1726 static struct worker *create_worker(struct worker_pool *pool) in create_worker() argument
1733 id = ida_simple_get(&pool->worker_ida, 0, 0, GFP_KERNEL); in create_worker()
1737 worker = alloc_worker(pool->node); in create_worker()
1741 worker->pool = pool; in create_worker()
1744 if (pool->cpu >= 0) in create_worker()
1745 snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id, in create_worker()
1746 pool->attrs->nice < 0 ? "H" : ""); in create_worker()
1748 snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id); in create_worker()
1750 worker->task = kthread_create_on_node(worker_thread, worker, pool->node, in create_worker()
1755 set_user_nice(worker->task, pool->attrs->nice); in create_worker()
1756 kthread_bind_mask(worker->task, pool->attrs->cpumask); in create_worker()
1759 worker_attach_to_pool(worker, pool); in create_worker()
1762 spin_lock_irq(&pool->lock); in create_worker()
1763 worker->pool->nr_workers++; in create_worker()
1766 spin_unlock_irq(&pool->lock); in create_worker()
1772 ida_simple_remove(&pool->worker_ida, id); in create_worker()
1789 struct worker_pool *pool = worker->pool; in destroy_worker() local
1791 lockdep_assert_held(&pool->lock); in destroy_worker()
1799 pool->nr_workers--; in destroy_worker()
1800 pool->nr_idle--; in destroy_worker()
1809 struct worker_pool *pool = (void *)__pool; in idle_worker_timeout() local
1811 spin_lock_irq(&pool->lock); in idle_worker_timeout()
1813 while (too_many_workers(pool)) { in idle_worker_timeout()
1818 worker = list_entry(pool->idle_list.prev, struct worker, entry); in idle_worker_timeout()
1822 mod_timer(&pool->idle_timer, expires); in idle_worker_timeout()
1829 spin_unlock_irq(&pool->lock); in idle_worker_timeout()
1857 struct worker_pool *pool = (void *)__pool; in pool_mayday_timeout() local
1860 spin_lock_irq(&pool->lock); in pool_mayday_timeout()
1863 if (need_to_create_worker(pool)) { in pool_mayday_timeout()
1870 list_for_each_entry(work, &pool->worklist, entry) in pool_mayday_timeout()
1875 spin_unlock_irq(&pool->lock); in pool_mayday_timeout()
1877 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); in pool_mayday_timeout()
1898 static void maybe_create_worker(struct worker_pool *pool) in maybe_create_worker() argument
1899 __releases(&pool->lock) in maybe_create_worker()
1900 __acquires(&pool->lock) in maybe_create_worker()
1903 spin_unlock_irq(&pool->lock); in maybe_create_worker()
1906 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); in maybe_create_worker()
1909 if (create_worker(pool) || !need_to_create_worker(pool)) in maybe_create_worker()
1914 if (!need_to_create_worker(pool)) in maybe_create_worker()
1918 del_timer_sync(&pool->mayday_timer); in maybe_create_worker()
1919 spin_lock_irq(&pool->lock); in maybe_create_worker()
1925 if (need_to_create_worker(pool)) in maybe_create_worker()
1953 struct worker_pool *pool = worker->pool; in manage_workers() local
1965 if (!mutex_trylock(&pool->manager_arb)) in manage_workers()
1967 pool->manager = worker; in manage_workers()
1969 maybe_create_worker(pool); in manage_workers()
1971 pool->manager = NULL; in manage_workers()
1972 mutex_unlock(&pool->manager_arb); in manage_workers()
1991 __releases(&pool->lock) in process_one_work()
1992 __acquires(&pool->lock) in process_one_work()
1995 struct worker_pool *pool = worker->pool; in process_one_work() local
2012 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) && in process_one_work()
2013 raw_smp_processor_id() != pool->cpu); in process_one_work()
2021 collision = find_worker_executing_work(pool, work); in process_one_work()
2029 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); in process_one_work()
2053 if (need_more_worker(pool)) in process_one_work()
2054 wake_up_worker(pool); in process_one_work()
2062 set_work_pool_and_clear_pending(work, pool->id); in process_one_work()
2064 spin_unlock_irq(&pool->lock); in process_one_work()
2097 spin_lock_irq(&pool->lock); in process_one_work()
2148 struct worker_pool *pool = worker->pool; in worker_thread() local
2153 spin_lock_irq(&pool->lock); in worker_thread()
2157 spin_unlock_irq(&pool->lock); in worker_thread()
2162 ida_simple_remove(&pool->worker_ida, worker->id); in worker_thread()
2163 worker_detach_from_pool(worker, pool); in worker_thread()
2171 if (!need_more_worker(pool)) in worker_thread()
2175 if (unlikely(!may_start_working(pool)) && manage_workers(worker)) in worker_thread()
2196 list_first_entry(&pool->worklist, in worker_thread()
2208 } while (keep_working(pool)); in worker_thread()
2221 spin_unlock_irq(&pool->lock); in worker_thread()
2280 struct worker_pool *pool = pwq->pool; in rescuer_thread() local
2288 worker_attach_to_pool(rescuer, pool); in rescuer_thread()
2290 spin_lock_irq(&pool->lock); in rescuer_thread()
2291 rescuer->pool = pool; in rescuer_thread()
2298 list_for_each_entry_safe(work, n, &pool->worklist, entry) in rescuer_thread()
2314 if (need_to_create_worker(pool)) { in rescuer_thread()
2333 if (need_more_worker(pool)) in rescuer_thread()
2334 wake_up_worker(pool); in rescuer_thread()
2336 rescuer->pool = NULL; in rescuer_thread()
2337 spin_unlock_irq(&pool->lock); in rescuer_thread()
2339 worker_detach_from_pool(rescuer, pool); in rescuer_thread()
2475 struct worker_pool *pool = pwq->pool; in flush_workqueue_prep_pwqs() local
2477 spin_lock_irq(&pool->lock); in flush_workqueue_prep_pwqs()
2494 spin_unlock_irq(&pool->lock); in flush_workqueue_prep_pwqs()
2689 spin_lock_irq(&pwq->pool->lock); in drain_workqueue()
2691 spin_unlock_irq(&pwq->pool->lock); in drain_workqueue()
2714 struct worker_pool *pool; in start_flush_work() local
2720 pool = get_work_pool(work); in start_flush_work()
2721 if (!pool) { in start_flush_work()
2726 spin_lock(&pool->lock); in start_flush_work()
2730 if (unlikely(pwq->pool != pool)) in start_flush_work()
2733 worker = find_worker_executing_work(pool, work); in start_flush_work()
2740 spin_unlock_irq(&pool->lock); in start_flush_work()
2756 spin_unlock_irq(&pool->lock); in start_flush_work()
3104 static int init_worker_pool(struct worker_pool *pool) in init_worker_pool() argument
3106 spin_lock_init(&pool->lock); in init_worker_pool()
3107 pool->id = -1; in init_worker_pool()
3108 pool->cpu = -1; in init_worker_pool()
3109 pool->node = NUMA_NO_NODE; in init_worker_pool()
3110 pool->flags |= POOL_DISASSOCIATED; in init_worker_pool()
3111 INIT_LIST_HEAD(&pool->worklist); in init_worker_pool()
3112 INIT_LIST_HEAD(&pool->idle_list); in init_worker_pool()
3113 hash_init(pool->busy_hash); in init_worker_pool()
3115 init_timer_deferrable(&pool->idle_timer); in init_worker_pool()
3116 pool->idle_timer.function = idle_worker_timeout; in init_worker_pool()
3117 pool->idle_timer.data = (unsigned long)pool; in init_worker_pool()
3119 setup_timer(&pool->mayday_timer, pool_mayday_timeout, in init_worker_pool()
3120 (unsigned long)pool); in init_worker_pool()
3122 mutex_init(&pool->manager_arb); in init_worker_pool()
3123 mutex_init(&pool->attach_mutex); in init_worker_pool()
3124 INIT_LIST_HEAD(&pool->workers); in init_worker_pool()
3126 ida_init(&pool->worker_ida); in init_worker_pool()
3127 INIT_HLIST_NODE(&pool->hash_node); in init_worker_pool()
3128 pool->refcnt = 1; in init_worker_pool()
3131 pool->attrs = alloc_workqueue_attrs(GFP_KERNEL); in init_worker_pool()
3132 if (!pool->attrs) in init_worker_pool()
3153 struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu); in rcu_free_pool() local
3155 ida_destroy(&pool->worker_ida); in rcu_free_pool()
3156 free_workqueue_attrs(pool->attrs); in rcu_free_pool()
3157 kfree(pool); in rcu_free_pool()
3171 static void put_unbound_pool(struct worker_pool *pool) in put_unbound_pool() argument
3178 if (--pool->refcnt) in put_unbound_pool()
3182 if (WARN_ON(!(pool->cpu < 0)) || in put_unbound_pool()
3183 WARN_ON(!list_empty(&pool->worklist))) in put_unbound_pool()
3187 if (pool->id >= 0) in put_unbound_pool()
3188 idr_remove(&worker_pool_idr, pool->id); in put_unbound_pool()
3189 hash_del(&pool->hash_node); in put_unbound_pool()
3196 mutex_lock(&pool->manager_arb); in put_unbound_pool()
3198 spin_lock_irq(&pool->lock); in put_unbound_pool()
3199 while ((worker = first_idle_worker(pool))) in put_unbound_pool()
3201 WARN_ON(pool->nr_workers || pool->nr_idle); in put_unbound_pool()
3202 spin_unlock_irq(&pool->lock); in put_unbound_pool()
3204 mutex_lock(&pool->attach_mutex); in put_unbound_pool()
3205 if (!list_empty(&pool->workers)) in put_unbound_pool()
3206 pool->detach_completion = &detach_completion; in put_unbound_pool()
3207 mutex_unlock(&pool->attach_mutex); in put_unbound_pool()
3209 if (pool->detach_completion) in put_unbound_pool()
3210 wait_for_completion(pool->detach_completion); in put_unbound_pool()
3212 mutex_unlock(&pool->manager_arb); in put_unbound_pool()
3215 del_timer_sync(&pool->idle_timer); in put_unbound_pool()
3216 del_timer_sync(&pool->mayday_timer); in put_unbound_pool()
3219 call_rcu_sched(&pool->rcu, rcu_free_pool); in put_unbound_pool()
3239 struct worker_pool *pool; in get_unbound_pool() local
3246 hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) { in get_unbound_pool()
3247 if (wqattrs_equal(pool->attrs, attrs)) { in get_unbound_pool()
3248 pool->refcnt++; in get_unbound_pool()
3249 return pool; in get_unbound_pool()
3265 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, target_node); in get_unbound_pool()
3266 if (!pool || init_worker_pool(pool) < 0) in get_unbound_pool()
3269 lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */ in get_unbound_pool()
3270 copy_workqueue_attrs(pool->attrs, attrs); in get_unbound_pool()
3271 pool->node = target_node; in get_unbound_pool()
3277 pool->attrs->no_numa = false; in get_unbound_pool()
3279 if (worker_pool_assign_id(pool) < 0) in get_unbound_pool()
3283 if (!create_worker(pool)) in get_unbound_pool()
3287 hash_add(unbound_pool_hash, &pool->hash_node, hash); in get_unbound_pool()
3289 return pool; in get_unbound_pool()
3291 if (pool) in get_unbound_pool()
3292 put_unbound_pool(pool); in get_unbound_pool()
3311 struct worker_pool *pool = pwq->pool; in pwq_unbound_release_workfn() local
3323 put_unbound_pool(pool); in pwq_unbound_release_workfn()
3356 spin_lock_irq(&pwq->pool->lock); in pwq_adjust_max_active()
3374 wake_up_worker(pwq->pool); in pwq_adjust_max_active()
3379 spin_unlock_irq(&pwq->pool->lock); in pwq_adjust_max_active()
3384 struct worker_pool *pool) in init_pwq() argument
3390 pwq->pool = pool; in init_pwq()
3425 struct worker_pool *pool; in alloc_unbound_pwq() local
3430 pool = get_unbound_pool(attrs); in alloc_unbound_pwq()
3431 if (!pool) in alloc_unbound_pwq()
3434 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node); in alloc_unbound_pwq()
3436 put_unbound_pool(pool); in alloc_unbound_pwq()
3440 init_pwq(pwq, wq, pool); in alloc_unbound_pwq()
3748 if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) { in wq_update_unbound_numa()
3749 if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask)) in wq_update_unbound_numa()
3770 spin_lock_irq(&wq->dfl_pwq->pool->lock); in wq_update_unbound_numa()
3772 spin_unlock_irq(&wq->dfl_pwq->pool->lock); in wq_update_unbound_numa()
4109 struct worker_pool *pool; in work_busy() local
4117 pool = get_work_pool(work); in work_busy()
4118 if (pool) { in work_busy()
4119 spin_lock(&pool->lock); in work_busy()
4120 if (find_worker_executing_work(pool, work)) in work_busy()
4122 spin_unlock(&pool->lock); in work_busy()
4207 static void pr_cont_pool_info(struct worker_pool *pool) in pr_cont_pool_info() argument
4209 pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask); in pr_cont_pool_info()
4210 if (pool->node != NUMA_NO_NODE) in pr_cont_pool_info()
4211 pr_cont(" node=%d", pool->node); in pr_cont_pool_info()
4212 pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice); in pr_cont_pool_info()
4231 struct worker_pool *pool = pwq->pool; in show_pwq() local
4237 pr_info(" pwq %d:", pool->id); in show_pwq()
4238 pr_cont_pool_info(pool); in show_pwq()
4243 hash_for_each(pool->busy_hash, bkt, worker, hentry) { in show_pwq()
4253 hash_for_each(pool->busy_hash, bkt, worker, hentry) { in show_pwq()
4268 list_for_each_entry(work, &pool->worklist, entry) { in show_pwq()
4278 list_for_each_entry(work, &pool->worklist, entry) { in show_pwq()
4309 struct worker_pool *pool; in show_workqueue_state() local
4333 spin_lock_irqsave(&pwq->pool->lock, flags); in show_workqueue_state()
4336 spin_unlock_irqrestore(&pwq->pool->lock, flags); in show_workqueue_state()
4340 for_each_pool(pool, pi) { in show_workqueue_state()
4344 spin_lock_irqsave(&pool->lock, flags); in show_workqueue_state()
4345 if (pool->nr_workers == pool->nr_idle) in show_workqueue_state()
4348 pr_info("pool %d:", pool->id); in show_workqueue_state()
4349 pr_cont_pool_info(pool); in show_workqueue_state()
4350 pr_cont(" workers=%d", pool->nr_workers); in show_workqueue_state()
4351 if (pool->manager) in show_workqueue_state()
4353 task_pid_nr(pool->manager->task)); in show_workqueue_state()
4354 list_for_each_entry(worker, &pool->idle_list, entry) { in show_workqueue_state()
4361 spin_unlock_irqrestore(&pool->lock, flags); in show_workqueue_state()
4385 struct worker_pool *pool; in wq_unbind_fn() local
4388 for_each_cpu_worker_pool(pool, cpu) { in wq_unbind_fn()
4389 mutex_lock(&pool->attach_mutex); in wq_unbind_fn()
4390 spin_lock_irq(&pool->lock); in wq_unbind_fn()
4399 for_each_pool_worker(worker, pool) in wq_unbind_fn()
4402 pool->flags |= POOL_DISASSOCIATED; in wq_unbind_fn()
4404 spin_unlock_irq(&pool->lock); in wq_unbind_fn()
4405 mutex_unlock(&pool->attach_mutex); in wq_unbind_fn()
4423 atomic_set(&pool->nr_running, 0); in wq_unbind_fn()
4430 spin_lock_irq(&pool->lock); in wq_unbind_fn()
4431 wake_up_worker(pool); in wq_unbind_fn()
4432 spin_unlock_irq(&pool->lock); in wq_unbind_fn()
4442 static void rebind_workers(struct worker_pool *pool) in rebind_workers() argument
4446 lockdep_assert_held(&pool->attach_mutex); in rebind_workers()
4455 for_each_pool_worker(worker, pool) in rebind_workers()
4457 pool->attrs->cpumask) < 0); in rebind_workers()
4459 spin_lock_irq(&pool->lock); in rebind_workers()
4466 if (!(pool->flags & POOL_DISASSOCIATED)) { in rebind_workers()
4467 spin_unlock_irq(&pool->lock); in rebind_workers()
4471 pool->flags &= ~POOL_DISASSOCIATED; in rebind_workers()
4473 for_each_pool_worker(worker, pool) { in rebind_workers()
4508 spin_unlock_irq(&pool->lock); in rebind_workers()
4521 static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu) in restore_unbound_workers_cpumask() argument
4526 lockdep_assert_held(&pool->attach_mutex); in restore_unbound_workers_cpumask()
4529 if (!cpumask_test_cpu(cpu, pool->attrs->cpumask)) in restore_unbound_workers_cpumask()
4533 cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask); in restore_unbound_workers_cpumask()
4538 for_each_pool_worker(worker, pool) in restore_unbound_workers_cpumask()
4540 pool->attrs->cpumask) < 0); in restore_unbound_workers_cpumask()
4552 struct worker_pool *pool; in workqueue_cpu_up_callback() local
4558 for_each_cpu_worker_pool(pool, cpu) { in workqueue_cpu_up_callback()
4559 if (pool->nr_workers) in workqueue_cpu_up_callback()
4561 if (!create_worker(pool)) in workqueue_cpu_up_callback()
4570 for_each_pool(pool, pi) { in workqueue_cpu_up_callback()
4571 mutex_lock(&pool->attach_mutex); in workqueue_cpu_up_callback()
4573 if (pool->cpu == cpu) in workqueue_cpu_up_callback()
4574 rebind_workers(pool); in workqueue_cpu_up_callback()
4575 else if (pool->cpu < 0) in workqueue_cpu_up_callback()
4576 restore_unbound_workers_cpumask(pool, cpu); in workqueue_cpu_up_callback()
4578 mutex_unlock(&pool->attach_mutex); in workqueue_cpu_up_callback()
4928 unbound_pwq_by_node(wq, node)->pool->id); in wq_pool_ids_show()
5281 struct worker_pool *pool; in init_workqueues() local
5284 for_each_cpu_worker_pool(pool, cpu) { in init_workqueues()
5285 BUG_ON(init_worker_pool(pool)); in init_workqueues()
5286 pool->cpu = cpu; in init_workqueues()
5287 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu)); in init_workqueues()
5288 pool->attrs->nice = std_nice[i++]; in init_workqueues()
5289 pool->node = cpu_to_node(cpu); in init_workqueues()
5293 BUG_ON(worker_pool_assign_id(pool)); in init_workqueues()
5300 struct worker_pool *pool; in init_workqueues() local
5302 for_each_cpu_worker_pool(pool, cpu) { in init_workqueues()
5303 pool->flags &= ~POOL_DISASSOCIATED; in init_workqueues()
5304 BUG_ON(!create_worker(pool)); in init_workqueues()