Lines Matching refs:pwq

405 #define for_each_pwq(pwq, wq)						\  argument
406 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node) \
627 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq, in set_work_pwq() argument
630 set_work_data(work, (unsigned long)pwq, in set_work_pwq()
1069 static void get_pwq(struct pool_workqueue *pwq) in get_pwq() argument
1071 lockdep_assert_held(&pwq->pool->lock); in get_pwq()
1072 WARN_ON_ONCE(pwq->refcnt <= 0); in get_pwq()
1073 pwq->refcnt++; in get_pwq()
1083 static void put_pwq(struct pool_workqueue *pwq) in put_pwq() argument
1085 lockdep_assert_held(&pwq->pool->lock); in put_pwq()
1086 if (likely(--pwq->refcnt)) in put_pwq()
1088 if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND))) in put_pwq()
1098 schedule_work(&pwq->unbound_release_work); in put_pwq()
1107 static void put_pwq_unlocked(struct pool_workqueue *pwq) in put_pwq_unlocked() argument
1109 if (pwq) { in put_pwq_unlocked()
1114 spin_lock_irq(&pwq->pool->lock); in put_pwq_unlocked()
1115 put_pwq(pwq); in put_pwq_unlocked()
1116 spin_unlock_irq(&pwq->pool->lock); in put_pwq_unlocked()
1122 struct pool_workqueue *pwq = get_work_pwq(work); in pwq_activate_delayed_work() local
1125 move_linked_works(work, &pwq->pool->worklist, NULL); in pwq_activate_delayed_work()
1127 pwq->nr_active++; in pwq_activate_delayed_work()
1130 static void pwq_activate_first_delayed(struct pool_workqueue *pwq) in pwq_activate_first_delayed() argument
1132 struct work_struct *work = list_first_entry(&pwq->delayed_works, in pwq_activate_first_delayed()
1149 static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color) in pwq_dec_nr_in_flight() argument
1155 pwq->nr_in_flight[color]--; in pwq_dec_nr_in_flight()
1157 pwq->nr_active--; in pwq_dec_nr_in_flight()
1158 if (!list_empty(&pwq->delayed_works)) { in pwq_dec_nr_in_flight()
1160 if (pwq->nr_active < pwq->max_active) in pwq_dec_nr_in_flight()
1161 pwq_activate_first_delayed(pwq); in pwq_dec_nr_in_flight()
1165 if (likely(pwq->flush_color != color)) in pwq_dec_nr_in_flight()
1169 if (pwq->nr_in_flight[color]) in pwq_dec_nr_in_flight()
1173 pwq->flush_color = -1; in pwq_dec_nr_in_flight()
1179 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush)) in pwq_dec_nr_in_flight()
1180 complete(&pwq->wq->first_flusher->done); in pwq_dec_nr_in_flight()
1182 put_pwq(pwq); in pwq_dec_nr_in_flight()
1216 struct pool_workqueue *pwq; in try_to_grab_pending() local
1254 pwq = get_work_pwq(work); in try_to_grab_pending()
1255 if (pwq && pwq->pool == pool) { in try_to_grab_pending()
1269 pwq_dec_nr_in_flight(pwq, get_work_color(work)); in try_to_grab_pending()
1299 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, in insert_work() argument
1302 struct worker_pool *pool = pwq->pool; in insert_work()
1305 set_work_pwq(work, pwq, extra_flags); in insert_work()
1307 get_pwq(pwq); in insert_work()
1339 struct pool_workqueue *pwq; in __queue_work() local
1365 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); in __queue_work()
1367 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); in __queue_work()
1375 if (last_pool && last_pool != pwq->pool) { in __queue_work()
1383 pwq = worker->current_pwq; in __queue_work()
1387 spin_lock(&pwq->pool->lock); in __queue_work()
1390 spin_lock(&pwq->pool->lock); in __queue_work()
1401 if (unlikely(!pwq->refcnt)) { in __queue_work()
1403 spin_unlock(&pwq->pool->lock); in __queue_work()
1413 trace_workqueue_queue_work(req_cpu, pwq, work); in __queue_work()
1416 spin_unlock(&pwq->pool->lock); in __queue_work()
1420 pwq->nr_in_flight[pwq->work_color]++; in __queue_work()
1421 work_flags = work_color_to_flags(pwq->work_color); in __queue_work()
1423 if (likely(pwq->nr_active < pwq->max_active)) { in __queue_work()
1425 pwq->nr_active++; in __queue_work()
1426 worklist = &pwq->pool->worklist; in __queue_work()
1429 worklist = &pwq->delayed_works; in __queue_work()
1432 insert_work(pwq, work, worklist, work_flags); in __queue_work()
1434 spin_unlock(&pwq->pool->lock); in __queue_work()
1834 struct pool_workqueue *pwq = get_work_pwq(work); in send_mayday() local
1835 struct workqueue_struct *wq = pwq->wq; in send_mayday()
1843 if (list_empty(&pwq->mayday_node)) { in send_mayday()
1849 get_pwq(pwq); in send_mayday()
1850 list_add_tail(&pwq->mayday_node, &wq->maydays); in send_mayday()
1994 struct pool_workqueue *pwq = get_work_pwq(work); in process_one_work() local
1996 bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE; in process_one_work()
2032 worker->current_pwq = pwq; in process_one_work()
2066 lock_map_acquire_read(&pwq->wq->lockdep_map); in process_one_work()
2076 lock_map_release(&pwq->wq->lockdep_map); in process_one_work()
2109 pwq_dec_nr_in_flight(pwq, work_color); in process_one_work()
2278 struct pool_workqueue *pwq = list_first_entry(&wq->maydays, in rescuer_thread() local
2280 struct worker_pool *pool = pwq->pool; in rescuer_thread()
2284 list_del_init(&pwq->mayday_node); in rescuer_thread()
2299 if (get_work_pwq(work) == pwq) in rescuer_thread()
2316 get_pwq(pwq); in rescuer_thread()
2317 list_move_tail(&pwq->mayday_node, &wq->maydays); in rescuer_thread()
2326 put_pwq(pwq); in rescuer_thread()
2394 static void insert_wq_barrier(struct pool_workqueue *pwq, in insert_wq_barrier() argument
2428 insert_work(pwq, &barr->work, head, in insert_wq_barrier()
2467 struct pool_workqueue *pwq; in flush_workqueue_prep_pwqs() local
2474 for_each_pwq(pwq, wq) { in flush_workqueue_prep_pwqs()
2475 struct worker_pool *pool = pwq->pool; in flush_workqueue_prep_pwqs()
2480 WARN_ON_ONCE(pwq->flush_color != -1); in flush_workqueue_prep_pwqs()
2482 if (pwq->nr_in_flight[flush_color]) { in flush_workqueue_prep_pwqs()
2483 pwq->flush_color = flush_color; in flush_workqueue_prep_pwqs()
2490 WARN_ON_ONCE(work_color != work_next_color(pwq->work_color)); in flush_workqueue_prep_pwqs()
2491 pwq->work_color = work_color; in flush_workqueue_prep_pwqs()
2670 struct pool_workqueue *pwq; in drain_workqueue() local
2686 for_each_pwq(pwq, wq) { in drain_workqueue()
2689 spin_lock_irq(&pwq->pool->lock); in drain_workqueue()
2690 drained = !pwq->nr_active && list_empty(&pwq->delayed_works); in drain_workqueue()
2691 spin_unlock_irq(&pwq->pool->lock); in drain_workqueue()
2715 struct pool_workqueue *pwq; in start_flush_work() local
2728 pwq = get_work_pwq(work); in start_flush_work()
2729 if (pwq) { in start_flush_work()
2730 if (unlikely(pwq->pool != pool)) in start_flush_work()
2736 pwq = worker->current_pwq; in start_flush_work()
2739 insert_wq_barrier(pwq, barr, work, worker); in start_flush_work()
2748 if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer) in start_flush_work()
2749 lock_map_acquire(&pwq->wq->lockdep_map); in start_flush_work()
2751 lock_map_acquire_read(&pwq->wq->lockdep_map); in start_flush_work()
2752 lock_map_release(&pwq->wq->lockdep_map); in start_flush_work()
3308 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue, in pwq_unbound_release_workfn() local
3310 struct workqueue_struct *wq = pwq->wq; in pwq_unbound_release_workfn()
3311 struct worker_pool *pool = pwq->pool; in pwq_unbound_release_workfn()
3318 list_del_rcu(&pwq->pwqs_node); in pwq_unbound_release_workfn()
3326 call_rcu_sched(&pwq->rcu, rcu_free_pwq); in pwq_unbound_release_workfn()
3344 static void pwq_adjust_max_active(struct pool_workqueue *pwq) in pwq_adjust_max_active() argument
3346 struct workqueue_struct *wq = pwq->wq; in pwq_adjust_max_active()
3353 if (!freezable && pwq->max_active == wq->saved_max_active) in pwq_adjust_max_active()
3356 spin_lock_irq(&pwq->pool->lock); in pwq_adjust_max_active()
3364 pwq->max_active = wq->saved_max_active; in pwq_adjust_max_active()
3366 while (!list_empty(&pwq->delayed_works) && in pwq_adjust_max_active()
3367 pwq->nr_active < pwq->max_active) in pwq_adjust_max_active()
3368 pwq_activate_first_delayed(pwq); in pwq_adjust_max_active()
3374 wake_up_worker(pwq->pool); in pwq_adjust_max_active()
3376 pwq->max_active = 0; in pwq_adjust_max_active()
3379 spin_unlock_irq(&pwq->pool->lock); in pwq_adjust_max_active()
3383 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, in init_pwq() argument
3386 BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK); in init_pwq()
3388 memset(pwq, 0, sizeof(*pwq)); in init_pwq()
3390 pwq->pool = pool; in init_pwq()
3391 pwq->wq = wq; in init_pwq()
3392 pwq->flush_color = -1; in init_pwq()
3393 pwq->refcnt = 1; in init_pwq()
3394 INIT_LIST_HEAD(&pwq->delayed_works); in init_pwq()
3395 INIT_LIST_HEAD(&pwq->pwqs_node); in init_pwq()
3396 INIT_LIST_HEAD(&pwq->mayday_node); in init_pwq()
3397 INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn); in init_pwq()
3401 static void link_pwq(struct pool_workqueue *pwq) in link_pwq() argument
3403 struct workqueue_struct *wq = pwq->wq; in link_pwq()
3408 if (!list_empty(&pwq->pwqs_node)) in link_pwq()
3412 pwq->work_color = wq->work_color; in link_pwq()
3415 pwq_adjust_max_active(pwq); in link_pwq()
3418 list_add_rcu(&pwq->pwqs_node, &wq->pwqs); in link_pwq()
3426 struct pool_workqueue *pwq; in alloc_unbound_pwq() local
3434 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node); in alloc_unbound_pwq()
3435 if (!pwq) { in alloc_unbound_pwq()
3440 init_pwq(pwq, wq, pool); in alloc_unbound_pwq()
3441 return pwq; in alloc_unbound_pwq()
3492 struct pool_workqueue *pwq) in numa_pwq_tbl_install() argument
3500 link_pwq(pwq); in numa_pwq_tbl_install()
3503 rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq); in numa_pwq_tbl_install()
3721 struct pool_workqueue *old_pwq = NULL, *pwq; in wq_update_unbound_numa() local
3740 pwq = unbound_pwq_by_node(wq, node); in wq_update_unbound_numa()
3749 if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask)) in wq_update_unbound_numa()
3756 pwq = alloc_unbound_pwq(wq, target_attrs); in wq_update_unbound_numa()
3757 if (!pwq) { in wq_update_unbound_numa()
3765 old_pwq = numa_pwq_tbl_install(wq, node, pwq); in wq_update_unbound_numa()
3790 struct pool_workqueue *pwq = in alloc_and_link_pwqs() local
3795 init_pwq(pwq, wq, &cpu_pools[highpri]); in alloc_and_link_pwqs()
3798 link_pwq(pwq); in alloc_and_link_pwqs()
3835 struct pool_workqueue *pwq; in __alloc_workqueue_key() local
3913 for_each_pwq(pwq, wq) in __alloc_workqueue_key()
3914 pwq_adjust_max_active(pwq); in __alloc_workqueue_key()
3941 struct pool_workqueue *pwq; in destroy_workqueue() local
3949 for_each_pwq(pwq, wq) { in destroy_workqueue()
3953 if (WARN_ON(pwq->nr_in_flight[i])) { in destroy_workqueue()
3959 if (WARN_ON((pwq != wq->dfl_pwq) && (pwq->refcnt > 1)) || in destroy_workqueue()
3960 WARN_ON(pwq->nr_active) || in destroy_workqueue()
3961 WARN_ON(!list_empty(&pwq->delayed_works))) { in destroy_workqueue()
3994 pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]); in destroy_workqueue()
3996 put_pwq_unlocked(pwq); in destroy_workqueue()
4003 pwq = wq->dfl_pwq; in destroy_workqueue()
4005 put_pwq_unlocked(pwq); in destroy_workqueue()
4022 struct pool_workqueue *pwq; in workqueue_set_max_active() local
4034 for_each_pwq(pwq, wq) in workqueue_set_max_active()
4035 pwq_adjust_max_active(pwq); in workqueue_set_max_active()
4076 struct pool_workqueue *pwq; in workqueue_congested() local
4085 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); in workqueue_congested()
4087 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); in workqueue_congested()
4089 ret = !list_empty(&pwq->delayed_works); in workqueue_congested()
4171 struct pool_workqueue *pwq = NULL; in print_worker_info() local
4190 probe_kernel_read(&pwq, &worker->current_pwq, sizeof(pwq)); in print_worker_info()
4191 probe_kernel_read(&wq, &pwq->wq, sizeof(wq)); in print_worker_info()
4229 static void show_pwq(struct pool_workqueue *pwq) in show_pwq() argument
4231 struct worker_pool *pool = pwq->pool; in show_pwq()
4240 pr_cont(" active=%d/%d%s\n", pwq->nr_active, pwq->max_active, in show_pwq()
4241 !list_empty(&pwq->mayday_node) ? " MAYDAY" : ""); in show_pwq()
4244 if (worker->current_pwq == pwq) { in show_pwq()
4254 if (worker->current_pwq != pwq) in show_pwq()
4259 worker == pwq->wq->rescuer ? "(RESCUER)" : "", in show_pwq()
4269 if (get_work_pwq(work) == pwq) { in show_pwq()
4279 if (get_work_pwq(work) != pwq) in show_pwq()
4288 if (!list_empty(&pwq->delayed_works)) { in show_pwq()
4292 list_for_each_entry(work, &pwq->delayed_works, entry) { in show_pwq()
4318 struct pool_workqueue *pwq; in show_workqueue_state() local
4321 for_each_pwq(pwq, wq) { in show_workqueue_state()
4322 if (pwq->nr_active || !list_empty(&pwq->delayed_works)) { in show_workqueue_state()
4332 for_each_pwq(pwq, wq) { in show_workqueue_state()
4333 spin_lock_irqsave(&pwq->pool->lock, flags); in show_workqueue_state()
4334 if (pwq->nr_active || !list_empty(&pwq->delayed_works)) in show_workqueue_state()
4335 show_pwq(pwq); in show_workqueue_state()
4336 spin_unlock_irqrestore(&pwq->pool->lock, flags); in show_workqueue_state()
4678 struct pool_workqueue *pwq; in freeze_workqueues_begin() local
4687 for_each_pwq(pwq, wq) in freeze_workqueues_begin()
4688 pwq_adjust_max_active(pwq); in freeze_workqueues_begin()
4712 struct pool_workqueue *pwq; in freeze_workqueues_busy() local
4726 for_each_pwq(pwq, wq) { in freeze_workqueues_busy()
4727 WARN_ON_ONCE(pwq->nr_active < 0); in freeze_workqueues_busy()
4728 if (pwq->nr_active) { in freeze_workqueues_busy()
4753 struct pool_workqueue *pwq; in thaw_workqueues() local
4765 for_each_pwq(pwq, wq) in thaw_workqueues()
4766 pwq_adjust_max_active(pwq); in thaw_workqueues()