Lines Matching refs:wq

200 	struct workqueue_struct *wq;		/* I: the owning workqueue */  member
335 static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
345 #define assert_rcu_or_wq_mutex(wq) \ argument
347 !lockdep_is_held(&wq->mutex), \
350 #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \ argument
352 !lockdep_is_held(&wq->mutex) && \
405 #define for_each_pwq(pwq, wq) \ argument
406 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node) \
407 if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \
567 static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq, in unbound_pwq_by_node() argument
570 assert_rcu_or_wq_mutex_or_pool_mutex(wq); in unbound_pwq_by_node()
579 return wq->dfl_pwq; in unbound_pwq_by_node()
581 return rcu_dereference_raw(wq->numa_pwq_tbl[node]); in unbound_pwq_by_node()
1088 if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND))) in put_pwq()
1179 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush)) in pwq_dec_nr_in_flight()
1180 complete(&pwq->wq->first_flusher->done); in pwq_dec_nr_in_flight()
1324 static bool is_chained_work(struct workqueue_struct *wq) in is_chained_work() argument
1333 return worker && worker->current_pwq->wq == wq; in is_chained_work()
1336 static void __queue_work(int cpu, struct workqueue_struct *wq, in __queue_work() argument
1356 if (unlikely(wq->flags & __WQ_DRAINING) && in __queue_work()
1357 WARN_ON_ONCE(!is_chained_work(wq))) in __queue_work()
1364 if (!(wq->flags & WQ_UNBOUND)) in __queue_work()
1365 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); in __queue_work()
1367 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); in __queue_work()
1382 if (worker && worker->current_pwq->wq == wq) { in __queue_work()
1402 if (wq->flags & WQ_UNBOUND) { in __queue_work()
1409 wq->name, cpu); in __queue_work()
1448 bool queue_work_on(int cpu, struct workqueue_struct *wq, in queue_work_on() argument
1457 __queue_work(cpu, wq, work); in queue_work_on()
1471 __queue_work(dwork->cpu, dwork->wq, &dwork->work); in delayed_work_timer_fn()
1475 static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, in __queue_delayed_work() argument
1493 __queue_work(cpu, wq, &dwork->work); in __queue_delayed_work()
1499 dwork->wq = wq; in __queue_delayed_work()
1520 bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, in queue_delayed_work_on() argument
1531 __queue_delayed_work(cpu, wq, dwork, delay); in queue_delayed_work_on()
1558 bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, in mod_delayed_work_on() argument
1569 __queue_delayed_work(cpu, wq, dwork, delay); in mod_delayed_work_on()
1835 struct workqueue_struct *wq = pwq->wq; in send_mayday() local
1839 if (!wq->rescuer) in send_mayday()
1850 list_add_tail(&pwq->mayday_node, &wq->maydays); in send_mayday()
1851 wake_up_process(wq->rescuer->task); in send_mayday()
1996 bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE; in process_one_work()
2066 lock_map_acquire_read(&pwq->wq->lockdep_map); in process_one_work()
2076 lock_map_release(&pwq->wq->lockdep_map); in process_one_work()
2250 struct workqueue_struct *wq = rescuer->rescue_wq; in rescuer_thread() local
2277 while (!list_empty(&wq->maydays)) { in rescuer_thread()
2278 struct pool_workqueue *pwq = list_first_entry(&wq->maydays, in rescuer_thread()
2317 list_move_tail(&pwq->mayday_node, &wq->maydays); in rescuer_thread()
2463 static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, in flush_workqueue_prep_pwqs() argument
2470 WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush)); in flush_workqueue_prep_pwqs()
2471 atomic_set(&wq->nr_pwqs_to_flush, 1); in flush_workqueue_prep_pwqs()
2474 for_each_pwq(pwq, wq) { in flush_workqueue_prep_pwqs()
2484 atomic_inc(&wq->nr_pwqs_to_flush); in flush_workqueue_prep_pwqs()
2497 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush)) in flush_workqueue_prep_pwqs()
2498 complete(&wq->first_flusher->done); in flush_workqueue_prep_pwqs()
2510 void flush_workqueue(struct workqueue_struct *wq) in flush_workqueue() argument
2519 lock_map_acquire(&wq->lockdep_map); in flush_workqueue()
2520 lock_map_release(&wq->lockdep_map); in flush_workqueue()
2522 mutex_lock(&wq->mutex); in flush_workqueue()
2527 next_color = work_next_color(wq->work_color); in flush_workqueue()
2529 if (next_color != wq->flush_color) { in flush_workqueue()
2535 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow)); in flush_workqueue()
2536 this_flusher.flush_color = wq->work_color; in flush_workqueue()
2537 wq->work_color = next_color; in flush_workqueue()
2539 if (!wq->first_flusher) { in flush_workqueue()
2541 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); in flush_workqueue()
2543 wq->first_flusher = &this_flusher; in flush_workqueue()
2545 if (!flush_workqueue_prep_pwqs(wq, wq->flush_color, in flush_workqueue()
2546 wq->work_color)) { in flush_workqueue()
2548 wq->flush_color = next_color; in flush_workqueue()
2549 wq->first_flusher = NULL; in flush_workqueue()
2554 WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color); in flush_workqueue()
2555 list_add_tail(&this_flusher.list, &wq->flusher_queue); in flush_workqueue()
2556 flush_workqueue_prep_pwqs(wq, -1, wq->work_color); in flush_workqueue()
2564 list_add_tail(&this_flusher.list, &wq->flusher_overflow); in flush_workqueue()
2567 mutex_unlock(&wq->mutex); in flush_workqueue()
2577 if (wq->first_flusher != &this_flusher) in flush_workqueue()
2580 mutex_lock(&wq->mutex); in flush_workqueue()
2583 if (wq->first_flusher != &this_flusher) in flush_workqueue()
2586 wq->first_flusher = NULL; in flush_workqueue()
2589 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); in flush_workqueue()
2595 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) { in flush_workqueue()
2596 if (next->flush_color != wq->flush_color) in flush_workqueue()
2602 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) && in flush_workqueue()
2603 wq->flush_color != work_next_color(wq->work_color)); in flush_workqueue()
2606 wq->flush_color = work_next_color(wq->flush_color); in flush_workqueue()
2609 if (!list_empty(&wq->flusher_overflow)) { in flush_workqueue()
2616 list_for_each_entry(tmp, &wq->flusher_overflow, list) in flush_workqueue()
2617 tmp->flush_color = wq->work_color; in flush_workqueue()
2619 wq->work_color = work_next_color(wq->work_color); in flush_workqueue()
2621 list_splice_tail_init(&wq->flusher_overflow, in flush_workqueue()
2622 &wq->flusher_queue); in flush_workqueue()
2623 flush_workqueue_prep_pwqs(wq, -1, wq->work_color); in flush_workqueue()
2626 if (list_empty(&wq->flusher_queue)) { in flush_workqueue()
2627 WARN_ON_ONCE(wq->flush_color != wq->work_color); in flush_workqueue()
2635 WARN_ON_ONCE(wq->flush_color == wq->work_color); in flush_workqueue()
2636 WARN_ON_ONCE(wq->flush_color != next->flush_color); in flush_workqueue()
2639 wq->first_flusher = next; in flush_workqueue()
2641 if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1)) in flush_workqueue()
2648 wq->first_flusher = NULL; in flush_workqueue()
2652 mutex_unlock(&wq->mutex); in flush_workqueue()
2667 void drain_workqueue(struct workqueue_struct *wq) in drain_workqueue() argument
2677 mutex_lock(&wq->mutex); in drain_workqueue()
2678 if (!wq->nr_drainers++) in drain_workqueue()
2679 wq->flags |= __WQ_DRAINING; in drain_workqueue()
2680 mutex_unlock(&wq->mutex); in drain_workqueue()
2682 flush_workqueue(wq); in drain_workqueue()
2684 mutex_lock(&wq->mutex); in drain_workqueue()
2686 for_each_pwq(pwq, wq) { in drain_workqueue()
2699 wq->name, flush_cnt); in drain_workqueue()
2701 mutex_unlock(&wq->mutex); in drain_workqueue()
2705 if (!--wq->nr_drainers) in drain_workqueue()
2706 wq->flags &= ~__WQ_DRAINING; in drain_workqueue()
2707 mutex_unlock(&wq->mutex); in drain_workqueue()
2748 if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer) in start_flush_work()
2749 lock_map_acquire(&pwq->wq->lockdep_map); in start_flush_work()
2751 lock_map_acquire_read(&pwq->wq->lockdep_map); in start_flush_work()
2752 lock_map_release(&pwq->wq->lockdep_map); in start_flush_work()
2900 __queue_work(dwork->cpu, dwork->wq, &dwork->work); in flush_delayed_work()
3139 struct workqueue_struct *wq = in rcu_free_wq() local
3142 if (!(wq->flags & WQ_UNBOUND)) in rcu_free_wq()
3143 free_percpu(wq->cpu_pwqs); in rcu_free_wq()
3145 free_workqueue_attrs(wq->unbound_attrs); in rcu_free_wq()
3147 kfree(wq->rescuer); in rcu_free_wq()
3148 kfree(wq); in rcu_free_wq()
3310 struct workqueue_struct *wq = pwq->wq; in pwq_unbound_release_workfn() local
3314 if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND))) in pwq_unbound_release_workfn()
3317 mutex_lock(&wq->mutex); in pwq_unbound_release_workfn()
3319 is_last = list_empty(&wq->pwqs); in pwq_unbound_release_workfn()
3320 mutex_unlock(&wq->mutex); in pwq_unbound_release_workfn()
3333 call_rcu_sched(&wq->rcu, rcu_free_wq); in pwq_unbound_release_workfn()
3346 struct workqueue_struct *wq = pwq->wq; in pwq_adjust_max_active() local
3347 bool freezable = wq->flags & WQ_FREEZABLE; in pwq_adjust_max_active()
3350 lockdep_assert_held(&wq->mutex); in pwq_adjust_max_active()
3353 if (!freezable && pwq->max_active == wq->saved_max_active) in pwq_adjust_max_active()
3364 pwq->max_active = wq->saved_max_active; in pwq_adjust_max_active()
3383 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, in init_pwq() argument
3391 pwq->wq = wq; in init_pwq()
3403 struct workqueue_struct *wq = pwq->wq; in link_pwq() local
3405 lockdep_assert_held(&wq->mutex); in link_pwq()
3412 pwq->work_color = wq->work_color; in link_pwq()
3418 list_add_rcu(&pwq->pwqs_node, &wq->pwqs); in link_pwq()
3422 static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq, in alloc_unbound_pwq() argument
3440 init_pwq(pwq, wq, pool); in alloc_unbound_pwq()
3490 static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq, in numa_pwq_tbl_install() argument
3497 lockdep_assert_held(&wq->mutex); in numa_pwq_tbl_install()
3502 old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]); in numa_pwq_tbl_install()
3503 rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq); in numa_pwq_tbl_install()
3509 struct workqueue_struct *wq; /* target workqueue */ member
3534 apply_wqattrs_prepare(struct workqueue_struct *wq, in apply_wqattrs_prepare() argument
3573 ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs); in apply_wqattrs_prepare()
3579 ctx->pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs); in apply_wqattrs_prepare()
3593 ctx->wq = wq; in apply_wqattrs_prepare()
3610 mutex_lock(&ctx->wq->mutex); in apply_wqattrs_commit()
3612 copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs); in apply_wqattrs_commit()
3616 ctx->pwq_tbl[node] = numa_pwq_tbl_install(ctx->wq, node, in apply_wqattrs_commit()
3621 swap(ctx->wq->dfl_pwq, ctx->dfl_pwq); in apply_wqattrs_commit()
3623 mutex_unlock(&ctx->wq->mutex); in apply_wqattrs_commit()
3639 static int apply_workqueue_attrs_locked(struct workqueue_struct *wq, in apply_workqueue_attrs_locked() argument
3646 if (WARN_ON(!(wq->flags & WQ_UNBOUND))) in apply_workqueue_attrs_locked()
3650 if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs))) in apply_workqueue_attrs_locked()
3653 ctx = apply_wqattrs_prepare(wq, attrs); in apply_workqueue_attrs_locked()
3682 int apply_workqueue_attrs(struct workqueue_struct *wq, in apply_workqueue_attrs() argument
3688 ret = apply_workqueue_attrs_locked(wq, attrs); in apply_workqueue_attrs()
3716 static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu, in wq_update_unbound_numa() argument
3727 if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND) || in wq_update_unbound_numa()
3728 wq->unbound_attrs->no_numa) in wq_update_unbound_numa()
3739 copy_workqueue_attrs(target_attrs, wq->unbound_attrs); in wq_update_unbound_numa()
3740 pwq = unbound_pwq_by_node(wq, node); in wq_update_unbound_numa()
3748 if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) { in wq_update_unbound_numa()
3756 pwq = alloc_unbound_pwq(wq, target_attrs); in wq_update_unbound_numa()
3759 wq->name); in wq_update_unbound_numa()
3764 mutex_lock(&wq->mutex); in wq_update_unbound_numa()
3765 old_pwq = numa_pwq_tbl_install(wq, node, pwq); in wq_update_unbound_numa()
3769 mutex_lock(&wq->mutex); in wq_update_unbound_numa()
3770 spin_lock_irq(&wq->dfl_pwq->pool->lock); in wq_update_unbound_numa()
3771 get_pwq(wq->dfl_pwq); in wq_update_unbound_numa()
3772 spin_unlock_irq(&wq->dfl_pwq->pool->lock); in wq_update_unbound_numa()
3773 old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq); in wq_update_unbound_numa()
3775 mutex_unlock(&wq->mutex); in wq_update_unbound_numa()
3779 static int alloc_and_link_pwqs(struct workqueue_struct *wq) in alloc_and_link_pwqs() argument
3781 bool highpri = wq->flags & WQ_HIGHPRI; in alloc_and_link_pwqs()
3784 if (!(wq->flags & WQ_UNBOUND)) { in alloc_and_link_pwqs()
3785 wq->cpu_pwqs = alloc_percpu(struct pool_workqueue); in alloc_and_link_pwqs()
3786 if (!wq->cpu_pwqs) in alloc_and_link_pwqs()
3791 per_cpu_ptr(wq->cpu_pwqs, cpu); in alloc_and_link_pwqs()
3795 init_pwq(pwq, wq, &cpu_pools[highpri]); in alloc_and_link_pwqs()
3797 mutex_lock(&wq->mutex); in alloc_and_link_pwqs()
3799 mutex_unlock(&wq->mutex); in alloc_and_link_pwqs()
3802 } else if (wq->flags & __WQ_ORDERED) { in alloc_and_link_pwqs()
3803 ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]); in alloc_and_link_pwqs()
3805 WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node || in alloc_and_link_pwqs()
3806 wq->pwqs.prev != &wq->dfl_pwq->pwqs_node), in alloc_and_link_pwqs()
3807 "ordering guarantee broken for workqueue %s\n", wq->name); in alloc_and_link_pwqs()
3810 return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]); in alloc_and_link_pwqs()
3834 struct workqueue_struct *wq; in __alloc_workqueue_key() local
3843 tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]); in __alloc_workqueue_key()
3845 wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL); in __alloc_workqueue_key()
3846 if (!wq) in __alloc_workqueue_key()
3850 wq->unbound_attrs = alloc_workqueue_attrs(GFP_KERNEL); in __alloc_workqueue_key()
3851 if (!wq->unbound_attrs) in __alloc_workqueue_key()
3856 vsnprintf(wq->name, sizeof(wq->name), fmt, args); in __alloc_workqueue_key()
3860 max_active = wq_clamp_max_active(max_active, flags, wq->name); in __alloc_workqueue_key()
3863 wq->flags = flags; in __alloc_workqueue_key()
3864 wq->saved_max_active = max_active; in __alloc_workqueue_key()
3865 mutex_init(&wq->mutex); in __alloc_workqueue_key()
3866 atomic_set(&wq->nr_pwqs_to_flush, 0); in __alloc_workqueue_key()
3867 INIT_LIST_HEAD(&wq->pwqs); in __alloc_workqueue_key()
3868 INIT_LIST_HEAD(&wq->flusher_queue); in __alloc_workqueue_key()
3869 INIT_LIST_HEAD(&wq->flusher_overflow); in __alloc_workqueue_key()
3870 INIT_LIST_HEAD(&wq->maydays); in __alloc_workqueue_key()
3872 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); in __alloc_workqueue_key()
3873 INIT_LIST_HEAD(&wq->list); in __alloc_workqueue_key()
3875 if (alloc_and_link_pwqs(wq) < 0) in __alloc_workqueue_key()
3889 rescuer->rescue_wq = wq; in __alloc_workqueue_key()
3891 wq->name); in __alloc_workqueue_key()
3897 wq->rescuer = rescuer; in __alloc_workqueue_key()
3902 if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq)) in __alloc_workqueue_key()
3912 mutex_lock(&wq->mutex); in __alloc_workqueue_key()
3913 for_each_pwq(pwq, wq) in __alloc_workqueue_key()
3915 mutex_unlock(&wq->mutex); in __alloc_workqueue_key()
3917 list_add_tail_rcu(&wq->list, &workqueues); in __alloc_workqueue_key()
3921 return wq; in __alloc_workqueue_key()
3924 free_workqueue_attrs(wq->unbound_attrs); in __alloc_workqueue_key()
3925 kfree(wq); in __alloc_workqueue_key()
3928 destroy_workqueue(wq); in __alloc_workqueue_key()
3939 void destroy_workqueue(struct workqueue_struct *wq) in destroy_workqueue() argument
3945 drain_workqueue(wq); in destroy_workqueue()
3948 mutex_lock(&wq->mutex); in destroy_workqueue()
3949 for_each_pwq(pwq, wq) { in destroy_workqueue()
3954 mutex_unlock(&wq->mutex); in destroy_workqueue()
3959 if (WARN_ON((pwq != wq->dfl_pwq) && (pwq->refcnt > 1)) || in destroy_workqueue()
3962 mutex_unlock(&wq->mutex); in destroy_workqueue()
3966 mutex_unlock(&wq->mutex); in destroy_workqueue()
3973 list_del_rcu(&wq->list); in destroy_workqueue()
3976 workqueue_sysfs_unregister(wq); in destroy_workqueue()
3978 if (wq->rescuer) in destroy_workqueue()
3979 kthread_stop(wq->rescuer->task); in destroy_workqueue()
3981 if (!(wq->flags & WQ_UNBOUND)) { in destroy_workqueue()
3986 call_rcu_sched(&wq->rcu, rcu_free_wq); in destroy_workqueue()
3994 pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]); in destroy_workqueue()
3995 RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL); in destroy_workqueue()
4003 pwq = wq->dfl_pwq; in destroy_workqueue()
4004 wq->dfl_pwq = NULL; in destroy_workqueue()
4020 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) in workqueue_set_max_active() argument
4025 if (WARN_ON(wq->flags & __WQ_ORDERED)) in workqueue_set_max_active()
4028 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); in workqueue_set_max_active()
4030 mutex_lock(&wq->mutex); in workqueue_set_max_active()
4032 wq->saved_max_active = max_active; in workqueue_set_max_active()
4034 for_each_pwq(pwq, wq) in workqueue_set_max_active()
4037 mutex_unlock(&wq->mutex); in workqueue_set_max_active()
4074 bool workqueue_congested(int cpu, struct workqueue_struct *wq) in workqueue_congested() argument
4084 if (!(wq->flags & WQ_UNBOUND)) in workqueue_congested()
4085 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); in workqueue_congested()
4087 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); in workqueue_congested()
4172 struct workqueue_struct *wq = NULL; in print_worker_info() local
4191 probe_kernel_read(&wq, &pwq->wq, sizeof(wq)); in print_worker_info()
4192 probe_kernel_read(name, wq->name, sizeof(name) - 1); in print_worker_info()
4259 worker == pwq->wq->rescuer ? "(RESCUER)" : "", in show_pwq()
4308 struct workqueue_struct *wq; in show_workqueue_state() local
4317 list_for_each_entry_rcu(wq, &workqueues, list) { in show_workqueue_state()
4321 for_each_pwq(pwq, wq) { in show_workqueue_state()
4330 pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags); in show_workqueue_state()
4332 for_each_pwq(pwq, wq) { in show_workqueue_state()
4553 struct workqueue_struct *wq; in workqueue_cpu_up_callback() local
4582 list_for_each_entry(wq, &workqueues, list) in workqueue_cpu_up_callback()
4583 wq_update_unbound_numa(wq, cpu, true); in workqueue_cpu_up_callback()
4601 struct workqueue_struct *wq; in workqueue_cpu_down_callback() local
4611 list_for_each_entry(wq, &workqueues, list) in workqueue_cpu_down_callback()
4612 wq_update_unbound_numa(wq, cpu, false); in workqueue_cpu_down_callback()
4677 struct workqueue_struct *wq; in freeze_workqueues_begin() local
4685 list_for_each_entry(wq, &workqueues, list) { in freeze_workqueues_begin()
4686 mutex_lock(&wq->mutex); in freeze_workqueues_begin()
4687 for_each_pwq(pwq, wq) in freeze_workqueues_begin()
4689 mutex_unlock(&wq->mutex); in freeze_workqueues_begin()
4711 struct workqueue_struct *wq; in freeze_workqueues_busy() local
4718 list_for_each_entry(wq, &workqueues, list) { in freeze_workqueues_busy()
4719 if (!(wq->flags & WQ_FREEZABLE)) in freeze_workqueues_busy()
4726 for_each_pwq(pwq, wq) { in freeze_workqueues_busy()
4752 struct workqueue_struct *wq; in thaw_workqueues() local
4763 list_for_each_entry(wq, &workqueues, list) { in thaw_workqueues()
4764 mutex_lock(&wq->mutex); in thaw_workqueues()
4765 for_each_pwq(pwq, wq) in thaw_workqueues()
4767 mutex_unlock(&wq->mutex); in thaw_workqueues()
4779 struct workqueue_struct *wq; in workqueue_apply_unbound_cpumask() local
4784 list_for_each_entry(wq, &workqueues, list) { in workqueue_apply_unbound_cpumask()
4785 if (!(wq->flags & WQ_UNBOUND)) in workqueue_apply_unbound_cpumask()
4788 if (wq->flags & __WQ_ORDERED) in workqueue_apply_unbound_cpumask()
4791 ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs); in workqueue_apply_unbound_cpumask()
4867 struct workqueue_struct *wq; member
4875 return wq_dev->wq; in dev_to_wq()
4881 struct workqueue_struct *wq = dev_to_wq(dev); in per_cpu_show() local
4883 return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND)); in per_cpu_show()
4890 struct workqueue_struct *wq = dev_to_wq(dev); in max_active_show() local
4892 return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active); in max_active_show()
4899 struct workqueue_struct *wq = dev_to_wq(dev); in max_active_store() local
4905 workqueue_set_max_active(wq, val); in max_active_store()
4920 struct workqueue_struct *wq = dev_to_wq(dev); in wq_pool_ids_show() local
4928 unbound_pwq_by_node(wq, node)->pool->id); in wq_pool_ids_show()
4940 struct workqueue_struct *wq = dev_to_wq(dev); in wq_nice_show() local
4943 mutex_lock(&wq->mutex); in wq_nice_show()
4944 written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice); in wq_nice_show()
4945 mutex_unlock(&wq->mutex); in wq_nice_show()
4951 static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq) in wq_sysfs_prep_attrs() argument
4961 copy_workqueue_attrs(attrs, wq->unbound_attrs); in wq_sysfs_prep_attrs()
4968 struct workqueue_struct *wq = dev_to_wq(dev); in wq_nice_store() local
4974 attrs = wq_sysfs_prep_attrs(wq); in wq_nice_store()
4980 ret = apply_workqueue_attrs_locked(wq, attrs); in wq_nice_store()
4993 struct workqueue_struct *wq = dev_to_wq(dev); in wq_cpumask_show() local
4996 mutex_lock(&wq->mutex); in wq_cpumask_show()
4998 cpumask_pr_args(wq->unbound_attrs->cpumask)); in wq_cpumask_show()
4999 mutex_unlock(&wq->mutex); in wq_cpumask_show()
5007 struct workqueue_struct *wq = dev_to_wq(dev); in wq_cpumask_store() local
5013 attrs = wq_sysfs_prep_attrs(wq); in wq_cpumask_store()
5019 ret = apply_workqueue_attrs_locked(wq, attrs); in wq_cpumask_store()
5030 struct workqueue_struct *wq = dev_to_wq(dev); in wq_numa_show() local
5033 mutex_lock(&wq->mutex); in wq_numa_show()
5035 !wq->unbound_attrs->no_numa); in wq_numa_show()
5036 mutex_unlock(&wq->mutex); in wq_numa_show()
5044 struct workqueue_struct *wq = dev_to_wq(dev); in wq_numa_store() local
5050 attrs = wq_sysfs_prep_attrs(wq); in wq_numa_store()
5057 ret = apply_workqueue_attrs_locked(wq, attrs); in wq_numa_store()
5147 int workqueue_sysfs_register(struct workqueue_struct *wq) in workqueue_sysfs_register() argument
5157 if (WARN_ON(wq->flags & __WQ_ORDERED)) in workqueue_sysfs_register()
5160 wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL); in workqueue_sysfs_register()
5164 wq_dev->wq = wq; in workqueue_sysfs_register()
5166 wq_dev->dev.init_name = wq->name; in workqueue_sysfs_register()
5178 wq->wq_dev = NULL; in workqueue_sysfs_register()
5182 if (wq->flags & WQ_UNBOUND) { in workqueue_sysfs_register()
5189 wq->wq_dev = NULL; in workqueue_sysfs_register()
5206 static void workqueue_sysfs_unregister(struct workqueue_struct *wq) in workqueue_sysfs_unregister() argument
5208 struct wq_device *wq_dev = wq->wq_dev; in workqueue_sysfs_unregister()
5210 if (!wq->wq_dev) in workqueue_sysfs_unregister()
5213 wq->wq_dev = NULL; in workqueue_sysfs_unregister()
5217 static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { } in workqueue_sysfs_unregister() argument