pwq               587 fs/eventpoll.c static void ep_remove_wait_queue(struct eppoll_entry *pwq)
pwq               598 fs/eventpoll.c 	whead = smp_load_acquire(&pwq->whead);
pwq               600 fs/eventpoll.c 		remove_wait_queue(whead, &pwq->wait);
pwq               612 fs/eventpoll.c 	struct eppoll_entry *pwq;
pwq               615 fs/eventpoll.c 		pwq = list_first_entry(lsthead, struct eppoll_entry, llink);
pwq               617 fs/eventpoll.c 		list_del(&pwq->llink);
pwq               618 fs/eventpoll.c 		ep_remove_wait_queue(pwq);
pwq               619 fs/eventpoll.c 		kmem_cache_free(pwq_cache, pwq);
pwq              1320 fs/eventpoll.c 	struct eppoll_entry *pwq;
pwq              1322 fs/eventpoll.c 	if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL))) {
pwq              1323 fs/eventpoll.c 		init_waitqueue_func_entry(&pwq->wait, ep_poll_callback);
pwq              1324 fs/eventpoll.c 		pwq->whead = whead;
pwq              1325 fs/eventpoll.c 		pwq->base = epi;
pwq              1327 fs/eventpoll.c 			add_wait_queue_exclusive(whead, &pwq->wait);
pwq              1329 fs/eventpoll.c 			add_wait_queue(whead, &pwq->wait);
pwq              1330 fs/eventpoll.c 		list_add_tail(&pwq->llink, &epi->pwqlist);
pwq               121 fs/select.c    void poll_initwait(struct poll_wqueues *pwq)
pwq               123 fs/select.c    	init_poll_funcptr(&pwq->pt, __pollwait);
pwq               124 fs/select.c    	pwq->polling_task = current;
pwq               125 fs/select.c    	pwq->triggered = 0;
pwq               126 fs/select.c    	pwq->error = 0;
pwq               127 fs/select.c    	pwq->table = NULL;
pwq               128 fs/select.c    	pwq->inline_index = 0;
pwq               138 fs/select.c    void poll_freewait(struct poll_wqueues *pwq)
pwq               140 fs/select.c    	struct poll_table_page * p = pwq->table;
pwq               142 fs/select.c    	for (i = 0; i < pwq->inline_index; i++)
pwq               143 fs/select.c    		free_poll_entry(pwq->inline_entries + i);
pwq               186 fs/select.c    	struct poll_wqueues *pwq = wait->private;
pwq               187 fs/select.c    	DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task);
pwq               197 fs/select.c    	pwq->triggered = 1;
pwq               224 fs/select.c    	struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt);
pwq               225 fs/select.c    	struct poll_table_entry *entry = poll_get_entry(pwq);
pwq               232 fs/select.c    	entry->wait.private = pwq;
pwq               236 fs/select.c    static int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
pwq               242 fs/select.c    	if (!pwq->triggered)
pwq               257 fs/select.c    	smp_store_mb(pwq->triggered, 0);
pwq               113 include/linux/poll.h extern void poll_initwait(struct poll_wqueues *pwq);
pwq               114 include/linux/poll.h extern void poll_freewait(struct poll_wqueues *pwq);
pwq                42 include/trace/events/workqueue.h 	TP_PROTO(unsigned int req_cpu, struct pool_workqueue *pwq,
pwq                45 include/trace/events/workqueue.h 	TP_ARGS(req_cpu, pwq, work),
pwq                58 include/trace/events/workqueue.h 		__entry->workqueue	= pwq->wq;
pwq                60 include/trace/events/workqueue.h 		__entry->cpu		= pwq->pool->cpu;
pwq               427 kernel/workqueue.c #define for_each_pwq(pwq, wq)						\
pwq               428 kernel/workqueue.c 	list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node,		\
pwq               624 kernel/workqueue.c static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
pwq               627 kernel/workqueue.c 	set_work_data(work, (unsigned long)pwq,
pwq              1090 kernel/workqueue.c static void get_pwq(struct pool_workqueue *pwq)
pwq              1092 kernel/workqueue.c 	lockdep_assert_held(&pwq->pool->lock);
pwq              1093 kernel/workqueue.c 	WARN_ON_ONCE(pwq->refcnt <= 0);
pwq              1094 kernel/workqueue.c 	pwq->refcnt++;
pwq              1104 kernel/workqueue.c static void put_pwq(struct pool_workqueue *pwq)
pwq              1106 kernel/workqueue.c 	lockdep_assert_held(&pwq->pool->lock);
pwq              1107 kernel/workqueue.c 	if (likely(--pwq->refcnt))
pwq              1109 kernel/workqueue.c 	if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND)))
pwq              1119 kernel/workqueue.c 	schedule_work(&pwq->unbound_release_work);
pwq              1128 kernel/workqueue.c static void put_pwq_unlocked(struct pool_workqueue *pwq)
pwq              1130 kernel/workqueue.c 	if (pwq) {
pwq              1135 kernel/workqueue.c 		spin_lock_irq(&pwq->pool->lock);
pwq              1136 kernel/workqueue.c 		put_pwq(pwq);
pwq              1137 kernel/workqueue.c 		spin_unlock_irq(&pwq->pool->lock);
pwq              1143 kernel/workqueue.c 	struct pool_workqueue *pwq = get_work_pwq(work);
pwq              1146 kernel/workqueue.c 	if (list_empty(&pwq->pool->worklist))
pwq              1147 kernel/workqueue.c 		pwq->pool->watchdog_ts = jiffies;
pwq              1148 kernel/workqueue.c 	move_linked_works(work, &pwq->pool->worklist, NULL);
pwq              1150 kernel/workqueue.c 	pwq->nr_active++;
pwq              1153 kernel/workqueue.c static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
pwq              1155 kernel/workqueue.c 	struct work_struct *work = list_first_entry(&pwq->delayed_works,
pwq              1172 kernel/workqueue.c static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
pwq              1178 kernel/workqueue.c 	pwq->nr_in_flight[color]--;
pwq              1180 kernel/workqueue.c 	pwq->nr_active--;
pwq              1181 kernel/workqueue.c 	if (!list_empty(&pwq->delayed_works)) {
pwq              1183 kernel/workqueue.c 		if (pwq->nr_active < pwq->max_active)
pwq              1184 kernel/workqueue.c 			pwq_activate_first_delayed(pwq);
pwq              1188 kernel/workqueue.c 	if (likely(pwq->flush_color != color))
pwq              1192 kernel/workqueue.c 	if (pwq->nr_in_flight[color])
pwq              1196 kernel/workqueue.c 	pwq->flush_color = -1;
pwq              1202 kernel/workqueue.c 	if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
pwq              1203 kernel/workqueue.c 		complete(&pwq->wq->first_flusher->done);
pwq              1205 kernel/workqueue.c 	put_pwq(pwq);
pwq              1239 kernel/workqueue.c 	struct pool_workqueue *pwq;
pwq              1278 kernel/workqueue.c 	pwq = get_work_pwq(work);
pwq              1279 kernel/workqueue.c 	if (pwq && pwq->pool == pool) {
pwq              1293 kernel/workqueue.c 		pwq_dec_nr_in_flight(pwq, get_work_color(work));
pwq              1325 kernel/workqueue.c static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
pwq              1328 kernel/workqueue.c 	struct worker_pool *pool = pwq->pool;
pwq              1331 kernel/workqueue.c 	set_work_pwq(work, pwq, extra_flags);
pwq              1333 kernel/workqueue.c 	get_pwq(pwq);
pwq              1398 kernel/workqueue.c 	struct pool_workqueue *pwq;
pwq              1424 kernel/workqueue.c 		pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
pwq              1428 kernel/workqueue.c 		pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
pwq              1437 kernel/workqueue.c 	if (last_pool && last_pool != pwq->pool) {
pwq              1445 kernel/workqueue.c 			pwq = worker->current_pwq;
pwq              1449 kernel/workqueue.c 			spin_lock(&pwq->pool->lock);
pwq              1452 kernel/workqueue.c 		spin_lock(&pwq->pool->lock);
pwq              1463 kernel/workqueue.c 	if (unlikely(!pwq->refcnt)) {
pwq              1465 kernel/workqueue.c 			spin_unlock(&pwq->pool->lock);
pwq              1475 kernel/workqueue.c 	trace_workqueue_queue_work(req_cpu, pwq, work);
pwq              1480 kernel/workqueue.c 	pwq->nr_in_flight[pwq->work_color]++;
pwq              1481 kernel/workqueue.c 	work_flags = work_color_to_flags(pwq->work_color);
pwq              1483 kernel/workqueue.c 	if (likely(pwq->nr_active < pwq->max_active)) {
pwq              1485 kernel/workqueue.c 		pwq->nr_active++;
pwq              1486 kernel/workqueue.c 		worklist = &pwq->pool->worklist;
pwq              1488 kernel/workqueue.c 			pwq->pool->watchdog_ts = jiffies;
pwq              1491 kernel/workqueue.c 		worklist = &pwq->delayed_works;
pwq              1494 kernel/workqueue.c 	insert_work(pwq, work, worklist, work_flags);
pwq              1497 kernel/workqueue.c 	spin_unlock(&pwq->pool->lock);
pwq              2016 kernel/workqueue.c 	struct pool_workqueue *pwq = get_work_pwq(work);
pwq              2017 kernel/workqueue.c 	struct workqueue_struct *wq = pwq->wq;
pwq              2025 kernel/workqueue.c 	if (list_empty(&pwq->mayday_node)) {
pwq              2031 kernel/workqueue.c 		get_pwq(pwq);
pwq              2032 kernel/workqueue.c 		list_add_tail(&pwq->mayday_node, &wq->maydays);
pwq              2169 kernel/workqueue.c 	struct pool_workqueue *pwq = get_work_pwq(work);
pwq              2171 kernel/workqueue.c 	bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
pwq              2207 kernel/workqueue.c 	worker->current_pwq = pwq;
pwq              2214 kernel/workqueue.c 	strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN);
pwq              2247 kernel/workqueue.c 	lock_map_acquire(&pwq->wq->lockdep_map);
pwq              2279 kernel/workqueue.c 	lock_map_release(&pwq->wq->lockdep_map);
pwq              2314 kernel/workqueue.c 	pwq_dec_nr_in_flight(pwq, work_color);
pwq              2495 kernel/workqueue.c 		struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
pwq              2497 kernel/workqueue.c 		struct worker_pool *pool = pwq->pool;
pwq              2502 kernel/workqueue.c 		list_del_init(&pwq->mayday_node);
pwq              2516 kernel/workqueue.c 			if (get_work_pwq(work) == pwq) {
pwq              2542 kernel/workqueue.c 				if (wq->rescuer && list_empty(&pwq->mayday_node)) {
pwq              2543 kernel/workqueue.c 					get_pwq(pwq);
pwq              2544 kernel/workqueue.c 					list_add_tail(&pwq->mayday_node, &wq->maydays);
pwq              2554 kernel/workqueue.c 		put_pwq(pwq);
pwq              2653 kernel/workqueue.c static void insert_wq_barrier(struct pool_workqueue *pwq,
pwq              2689 kernel/workqueue.c 	insert_work(pwq, &barr->work, head,
pwq              2728 kernel/workqueue.c 	struct pool_workqueue *pwq;
pwq              2735 kernel/workqueue.c 	for_each_pwq(pwq, wq) {
pwq              2736 kernel/workqueue.c 		struct worker_pool *pool = pwq->pool;
pwq              2741 kernel/workqueue.c 			WARN_ON_ONCE(pwq->flush_color != -1);
pwq              2743 kernel/workqueue.c 			if (pwq->nr_in_flight[flush_color]) {
pwq              2744 kernel/workqueue.c 				pwq->flush_color = flush_color;
pwq              2751 kernel/workqueue.c 			WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
pwq              2752 kernel/workqueue.c 			pwq->work_color = work_color;
pwq              2936 kernel/workqueue.c 	struct pool_workqueue *pwq;
pwq              2952 kernel/workqueue.c 	for_each_pwq(pwq, wq) {
pwq              2955 kernel/workqueue.c 		spin_lock_irq(&pwq->pool->lock);
pwq              2956 kernel/workqueue.c 		drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
pwq              2957 kernel/workqueue.c 		spin_unlock_irq(&pwq->pool->lock);
pwq              2982 kernel/workqueue.c 	struct pool_workqueue *pwq;
pwq              2995 kernel/workqueue.c 	pwq = get_work_pwq(work);
pwq              2996 kernel/workqueue.c 	if (pwq) {
pwq              2997 kernel/workqueue.c 		if (unlikely(pwq->pool != pool))
pwq              3003 kernel/workqueue.c 		pwq = worker->current_pwq;
pwq              3006 kernel/workqueue.c 	check_flush_dependency(pwq->wq, work);
pwq              3008 kernel/workqueue.c 	insert_wq_barrier(pwq, barr, work, worker);
pwq              3021 kernel/workqueue.c 	    (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) {
pwq              3022 kernel/workqueue.c 		lock_map_acquire(&pwq->wq->lockdep_map);
pwq              3023 kernel/workqueue.c 		lock_map_release(&pwq->wq->lockdep_map);
pwq              3659 kernel/workqueue.c 	struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
pwq              3661 kernel/workqueue.c 	struct workqueue_struct *wq = pwq->wq;
pwq              3662 kernel/workqueue.c 	struct worker_pool *pool = pwq->pool;
pwq              3669 kernel/workqueue.c 	list_del_rcu(&pwq->pwqs_node);
pwq              3677 kernel/workqueue.c 	call_rcu(&pwq->rcu, rcu_free_pwq);
pwq              3697 kernel/workqueue.c static void pwq_adjust_max_active(struct pool_workqueue *pwq)
pwq              3699 kernel/workqueue.c 	struct workqueue_struct *wq = pwq->wq;
pwq              3707 kernel/workqueue.c 	if (!freezable && pwq->max_active == wq->saved_max_active)
pwq              3711 kernel/workqueue.c 	spin_lock_irqsave(&pwq->pool->lock, flags);
pwq              3719 kernel/workqueue.c 		pwq->max_active = wq->saved_max_active;
pwq              3721 kernel/workqueue.c 		while (!list_empty(&pwq->delayed_works) &&
pwq              3722 kernel/workqueue.c 		       pwq->nr_active < pwq->max_active)
pwq              3723 kernel/workqueue.c 			pwq_activate_first_delayed(pwq);
pwq              3729 kernel/workqueue.c 		wake_up_worker(pwq->pool);
pwq              3731 kernel/workqueue.c 		pwq->max_active = 0;
pwq              3734 kernel/workqueue.c 	spin_unlock_irqrestore(&pwq->pool->lock, flags);
pwq              3738 kernel/workqueue.c static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
pwq              3741 kernel/workqueue.c 	BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
pwq              3743 kernel/workqueue.c 	memset(pwq, 0, sizeof(*pwq));
pwq              3745 kernel/workqueue.c 	pwq->pool = pool;
pwq              3746 kernel/workqueue.c 	pwq->wq = wq;
pwq              3747 kernel/workqueue.c 	pwq->flush_color = -1;
pwq              3748 kernel/workqueue.c 	pwq->refcnt = 1;
pwq              3749 kernel/workqueue.c 	INIT_LIST_HEAD(&pwq->delayed_works);
pwq              3750 kernel/workqueue.c 	INIT_LIST_HEAD(&pwq->pwqs_node);
pwq              3751 kernel/workqueue.c 	INIT_LIST_HEAD(&pwq->mayday_node);
pwq              3752 kernel/workqueue.c 	INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
pwq              3756 kernel/workqueue.c static void link_pwq(struct pool_workqueue *pwq)
pwq              3758 kernel/workqueue.c 	struct workqueue_struct *wq = pwq->wq;
pwq              3763 kernel/workqueue.c 	if (!list_empty(&pwq->pwqs_node))
pwq              3767 kernel/workqueue.c 	pwq->work_color = wq->work_color;
pwq              3770 kernel/workqueue.c 	pwq_adjust_max_active(pwq);
pwq              3773 kernel/workqueue.c 	list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
pwq              3781 kernel/workqueue.c 	struct pool_workqueue *pwq;
pwq              3789 kernel/workqueue.c 	pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
pwq              3790 kernel/workqueue.c 	if (!pwq) {
pwq              3795 kernel/workqueue.c 	init_pwq(pwq, wq, pool);
pwq              3796 kernel/workqueue.c 	return pwq;
pwq              3854 kernel/workqueue.c 						   struct pool_workqueue *pwq)
pwq              3862 kernel/workqueue.c 	link_pwq(pwq);
pwq              3865 kernel/workqueue.c 	rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq);
pwq              4087 kernel/workqueue.c 	struct pool_workqueue *old_pwq = NULL, *pwq;
pwq              4106 kernel/workqueue.c 	pwq = unbound_pwq_by_node(wq, node);
pwq              4115 kernel/workqueue.c 		if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask))
pwq              4122 kernel/workqueue.c 	pwq = alloc_unbound_pwq(wq, target_attrs);
pwq              4123 kernel/workqueue.c 	if (!pwq) {
pwq              4131 kernel/workqueue.c 	old_pwq = numa_pwq_tbl_install(wq, node, pwq);
pwq              4156 kernel/workqueue.c 			struct pool_workqueue *pwq =
pwq              4161 kernel/workqueue.c 			init_pwq(pwq, wq, &cpu_pools[highpri]);
pwq              4164 kernel/workqueue.c 			link_pwq(pwq);
pwq              4236 kernel/workqueue.c 	struct pool_workqueue *pwq;
pwq              4303 kernel/workqueue.c 	for_each_pwq(pwq, wq)
pwq              4304 kernel/workqueue.c 		pwq_adjust_max_active(pwq);
pwq              4334 kernel/workqueue.c 	struct pool_workqueue *pwq;
pwq              4362 kernel/workqueue.c 	for_each_pwq(pwq, wq) {
pwq              4366 kernel/workqueue.c 			if (WARN_ON(pwq->nr_in_flight[i])) {
pwq              4373 kernel/workqueue.c 		if (WARN_ON((pwq != wq->dfl_pwq) && (pwq->refcnt > 1)) ||
pwq              4374 kernel/workqueue.c 		    WARN_ON(pwq->nr_active) ||
pwq              4375 kernel/workqueue.c 		    WARN_ON(!list_empty(&pwq->delayed_works))) {
pwq              4405 kernel/workqueue.c 			pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
pwq              4407 kernel/workqueue.c 			put_pwq_unlocked(pwq);
pwq              4414 kernel/workqueue.c 		pwq = wq->dfl_pwq;
pwq              4416 kernel/workqueue.c 		put_pwq_unlocked(pwq);
pwq              4433 kernel/workqueue.c 	struct pool_workqueue *pwq;
pwq              4446 kernel/workqueue.c 	for_each_pwq(pwq, wq)
pwq              4447 kernel/workqueue.c 		pwq_adjust_max_active(pwq);
pwq              4504 kernel/workqueue.c 	struct pool_workqueue *pwq;
pwq              4514 kernel/workqueue.c 		pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
pwq              4516 kernel/workqueue.c 		pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
pwq              4518 kernel/workqueue.c 	ret = !list_empty(&pwq->delayed_works);
pwq              4601 kernel/workqueue.c 	struct pool_workqueue *pwq = NULL;
pwq              4619 kernel/workqueue.c 	probe_kernel_read(&pwq, &worker->current_pwq, sizeof(pwq));
pwq              4620 kernel/workqueue.c 	probe_kernel_read(&wq, &pwq->wq, sizeof(wq));
pwq              4654 kernel/workqueue.c static void show_pwq(struct pool_workqueue *pwq)
pwq              4656 kernel/workqueue.c 	struct worker_pool *pool = pwq->pool;
pwq              4666 kernel/workqueue.c 		pwq->nr_active, pwq->max_active, pwq->refcnt,
pwq              4667 kernel/workqueue.c 		!list_empty(&pwq->mayday_node) ? " MAYDAY" : "");
pwq              4670 kernel/workqueue.c 		if (worker->current_pwq == pwq) {
pwq              4680 kernel/workqueue.c 			if (worker->current_pwq != pwq)
pwq              4685 kernel/workqueue.c 				worker == pwq->wq->rescuer ? "(RESCUER)" : "",
pwq              4695 kernel/workqueue.c 		if (get_work_pwq(work) == pwq) {
pwq              4705 kernel/workqueue.c 			if (get_work_pwq(work) != pwq)
pwq              4714 kernel/workqueue.c 	if (!list_empty(&pwq->delayed_works)) {
pwq              4718 kernel/workqueue.c 		list_for_each_entry(work, &pwq->delayed_works, entry) {
pwq              4744 kernel/workqueue.c 		struct pool_workqueue *pwq;
pwq              4747 kernel/workqueue.c 		for_each_pwq(pwq, wq) {
pwq              4748 kernel/workqueue.c 			if (pwq->nr_active || !list_empty(&pwq->delayed_works)) {
pwq              4758 kernel/workqueue.c 		for_each_pwq(pwq, wq) {
pwq              4759 kernel/workqueue.c 			spin_lock_irqsave(&pwq->pool->lock, flags);
pwq              4760 kernel/workqueue.c 			if (pwq->nr_active || !list_empty(&pwq->delayed_works))
pwq              4761 kernel/workqueue.c 				show_pwq(pwq);
pwq              4762 kernel/workqueue.c 			spin_unlock_irqrestore(&pwq->pool->lock, flags);
pwq              5144 kernel/workqueue.c 	struct pool_workqueue *pwq;
pwq              5153 kernel/workqueue.c 		for_each_pwq(pwq, wq)
pwq              5154 kernel/workqueue.c 			pwq_adjust_max_active(pwq);
pwq              5178 kernel/workqueue.c 	struct pool_workqueue *pwq;
pwq              5192 kernel/workqueue.c 		for_each_pwq(pwq, wq) {
pwq              5193 kernel/workqueue.c 			WARN_ON_ONCE(pwq->nr_active < 0);
pwq              5194 kernel/workqueue.c 			if (pwq->nr_active) {
pwq              5219 kernel/workqueue.c 	struct pool_workqueue *pwq;
pwq              5231 kernel/workqueue.c 		for_each_pwq(pwq, wq)
pwq              5232 kernel/workqueue.c 			pwq_adjust_max_active(pwq);