worker            215 arch/x86/kvm/i8254.c 		kthread_queue_work(pit->worker, &pit->expired);
worker            275 arch/x86/kvm/i8254.c 	kthread_queue_work(pt->worker, &pt->expired);
worker            670 arch/x86/kvm/i8254.c 	pit->worker = kthread_create_worker(0, "kvm-pit/%d", pid_nr);
worker            671 arch/x86/kvm/i8254.c 	if (IS_ERR(pit->worker))
worker            714 arch/x86/kvm/i8254.c 	kthread_destroy_worker(pit->worker);
worker            733 arch/x86/kvm/i8254.c 		kthread_destroy_worker(pit->worker);
worker             48 arch/x86/kvm/i8254.h 	struct kthread_worker *worker;
worker            738 drivers/block/drbd/drbd_int.h 	struct drbd_thread worker;
worker            799 drivers/block/drbd/drbd_int.h 	struct work_struct worker;
worker           2276 drivers/block/drbd/drbd_main.c 	struct work_struct worker;
worker           2284 drivers/block/drbd/drbd_main.c 	struct retry_worker *retry = container_of(ws, struct retry_worker, worker);
worker           2348 drivers/block/drbd/drbd_main.c 	queue_work(retry.wq, &retry.worker);
worker           2627 drivers/block/drbd/drbd_main.c 			connection->worker.reset_cpu_mask = 1;
worker           2713 drivers/block/drbd/drbd_main.c 	drbd_thread_init(resource, &connection->worker, drbd_worker, "worker");
worker           2714 drivers/block/drbd/drbd_main.c 	connection->worker.connection = connection;
worker           2770 drivers/block/drbd/drbd_main.c 	INIT_WORK(&device->submit.worker, do_submit);
worker           3009 drivers/block/drbd/drbd_main.c 	INIT_WORK(&retry.worker, do_retry);
worker           3620 drivers/block/drbd/drbd_main.c 	D_ASSERT(device, current == first_peer_device(device)->connection->worker.task);
worker           3662 drivers/block/drbd/drbd_main.c 	D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
worker            369 drivers/block/drbd/drbd_nl.c 	if (current == connection->worker.task)
worker            398 drivers/block/drbd/drbd_nl.c 	if (current == connection->worker.task)
worker           1429 drivers/block/drbd/drbd_nl.c 	drbd_thread_start(&connection->worker);
worker           1445 drivers/block/drbd/drbd_nl.c 		drbd_thread_stop(&connection->worker);
worker           4434 drivers/block/drbd/drbd_nl.c 		if (get_t_state(&connection->worker) == RUNNING)
worker           4496 drivers/block/drbd/drbd_nl.c 		drbd_thread_stop(&connection->worker);
worker           1198 drivers/block/drbd/drbd_req.c 	queue_work(device->submit.wq, &device->submit.worker);
worker           1517 drivers/block/drbd/drbd_req.c 	struct drbd_device *device = container_of(ws, struct drbd_device, submit.worker);
worker            625 drivers/block/drbd/drbd_state.c 		D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
worker           1526 drivers/block/drbd/drbd_state.c 	D_ASSERT(device, current == first_peer_device(device)->connection->worker.task);
worker           1780 drivers/block/drbd/drbd_worker.c 	if (current == connection->worker.task) {
worker           2137 drivers/block/drbd/drbd_worker.c 		if (get_t_state(&connection->worker) != RUNNING)
worker            912 drivers/block/loop.c 	kthread_flush_worker(&lo->worker);
worker            924 drivers/block/loop.c 	kthread_init_worker(&lo->worker);
worker            926 drivers/block/loop.c 			&lo->worker, "loop%d", lo->lo_number);
worker           1952 drivers/block/loop.c 	kthread_queue_work(&lo->worker, &cmd->work);
worker             57 drivers/block/loop.h 	struct kthread_worker	worker;
worker            110 drivers/gpu/drm/drm_flip_work.c 	queue_work(wq, &work->worker);
worker            116 drivers/gpu/drm/drm_flip_work.c 	struct drm_flip_work *work = container_of(w, struct drm_flip_work, worker);
worker            156 drivers/gpu/drm/drm_flip_work.c 	INIT_WORK(&work->worker, flip_worker);
worker             46 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c 	INIT_WORK(&ct->worker, ct_incoming_request_worker_func);
worker            737 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c 	struct intel_guc_ct *ct = container_of(w, struct intel_guc_ct, worker);
worker            742 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c 		queue_work(system_unbound_wq, &ct->worker);
worker            784 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c 	queue_work(system_unbound_wq, &ct->worker);
worker             75 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h 	struct work_struct worker;
worker            391 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c 	kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work);
worker            241 drivers/gpu/drm/msm/msm_drv.c 			kthread_destroy_worker(&priv->event_thread[i].worker);
worker            504 drivers/gpu/drm/msm/msm_drv.c 		kthread_init_worker(&priv->event_thread[i].worker);
worker            508 drivers/gpu/drm/msm/msm_drv.c 				&priv->event_thread[i].worker,
worker            133 drivers/gpu/drm/msm/msm_drv.h 	struct kthread_worker worker;
worker            138 drivers/gpu/drm/nouveau/nouveau_drm.c 	list_for_each_entry_safe(work, wtmp, &cli->worker, head) {
worker            161 drivers/gpu/drm/nouveau/nouveau_drm.c 	list_add_tail(&work->head, &cli->worker);
worker            176 drivers/gpu/drm/nouveau/nouveau_drm.c 	WARN_ON(!list_empty(&cli->worker));
worker            224 drivers/gpu/drm/nouveau/nouveau_drm.c 	INIT_LIST_HEAD(&cli->worker);
worker            110 drivers/gpu/drm/nouveau/nouveau_drv.h 	struct list_head worker;
worker            184 drivers/hid/hid-bigbenff.c 	struct work_struct worker;
worker            191 drivers/hid/hid-bigbenff.c 		struct bigben_device, worker);
worker            248 drivers/hid/hid-bigbenff.c 		schedule_work(&bigben->worker);
worker            280 drivers/hid/hid-bigbenff.c 				schedule_work(&bigben->worker);
worker            312 drivers/hid/hid-bigbenff.c 	cancel_work_sync(&bigben->worker);
worker            353 drivers/hid/hid-bigbenff.c 	INIT_WORK(&bigben->worker, bigben_worker);
worker            394 drivers/hid/hid-bigbenff.c 	schedule_work(&bigben->worker);
worker             44 drivers/hid/hid-wiimote-core.c 						   worker);
worker            101 drivers/hid/hid-wiimote-core.c 		schedule_work(&wdata->queue.worker);
worker           1745 drivers/hid/hid-wiimote-core.c 	INIT_WORK(&wdata->queue.worker, wiimote_queue_worker);
worker           1779 drivers/hid/hid-wiimote-core.c 	cancel_work_sync(&wdata->queue.worker);
worker            109 drivers/hid/hid-wiimote.h 	struct work_struct worker;
worker             52 drivers/hid/uhid.c 	struct work_struct worker;
worker             59 drivers/hid/uhid.c 	struct uhid_device *uhid = container_of(work, struct uhid_device, worker);
worker            521 drivers/hid/uhid.c 	schedule_work(&uhid->worker);
worker            565 drivers/hid/uhid.c 	cancel_work_sync(&uhid->worker);
worker            629 drivers/hid/uhid.c 	INIT_WORK(&uhid->worker, uhid_device_add_worker);
worker             99 drivers/infiniband/core/fmr_pool.c 	struct kthread_worker	  *worker;
worker            183 drivers/infiniband/core/fmr_pool.c 		kthread_queue_work(pool->worker, &pool->work);
worker            253 drivers/infiniband/core/fmr_pool.c 	pool->worker =
worker            255 drivers/infiniband/core/fmr_pool.c 	if (IS_ERR(pool->worker)) {
worker            257 drivers/infiniband/core/fmr_pool.c 		ret = PTR_ERR(pool->worker);
worker            325 drivers/infiniband/core/fmr_pool.c 	kthread_destroy_worker(pool->worker);
worker            375 drivers/infiniband/core/fmr_pool.c 	kthread_queue_work(pool->worker, &pool->work);
worker            487 drivers/infiniband/core/fmr_pool.c 				kthread_queue_work(pool->worker, &pool->work);
worker            134 drivers/infiniband/hw/bnxt_re/bnxt_re.h 	struct delayed_work		worker;
worker           1290 drivers/infiniband/hw/bnxt_re/main.c 		cancel_delayed_work_sync(&rdev->worker);
worker           1330 drivers/infiniband/hw/bnxt_re/main.c 						worker.work);
worker           1333 drivers/infiniband/hw/bnxt_re/main.c 	schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
worker           1457 drivers/infiniband/hw/bnxt_re/main.c 		INIT_DELAYED_WORK(&rdev->worker, bnxt_re_worker);
worker           1459 drivers/infiniband/hw/bnxt_re/main.c 		schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
worker            336 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	tasklet_schedule(&nq->worker);
worker            344 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	tasklet_disable(&nq->worker);
worker            351 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		tasklet_kill(&nq->worker);
worker            390 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		tasklet_init(&nq->worker, bnxt_qplib_service_nq,
worker            393 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		tasklet_enable(&nq->worker);
worker            481 drivers/infiniband/hw/bnxt_re/qplib_fp.h 	struct tasklet_struct	worker;
worker            436 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	tasklet_schedule(&rcfw->worker);
worker            625 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	tasklet_disable(&rcfw->worker);
worker            633 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 		tasklet_kill(&rcfw->worker);
worker            674 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 		tasklet_init(&rcfw->worker,
worker            677 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 		tasklet_enable(&rcfw->worker);
worker            233 drivers/infiniband/hw/bnxt_re/qplib_rcfw.h 	struct tasklet_struct	worker;
worker             35 drivers/input/misc/msm-vibrator.c 	struct work_struct worker;
worker            119 drivers/input/misc/msm-vibrator.c 						     worker);
worker            141 drivers/input/misc/msm-vibrator.c 	schedule_work(&vibrator->worker);
worker            150 drivers/input/misc/msm-vibrator.c 	cancel_work_sync(&vibrator->worker);
worker            208 drivers/input/misc/msm-vibrator.c 	INIT_WORK(&vibrator->worker, msm_vibrator_worker);
worker            240 drivers/input/misc/msm-vibrator.c 	cancel_work_sync(&vibrator->worker);
worker             65 drivers/macintosh/ams/ams-core.c 	schedule_work(&ams_info.worker);
worker            185 drivers/macintosh/ams/ams-core.c 	INIT_WORK(&ams_info.worker, ams_worker);
worker            216 drivers/macintosh/ams/ams-core.c 	flush_work(&ams_info.worker);
worker             34 drivers/macintosh/ams/ams.h 	struct work_struct worker;
worker            138 drivers/md/dm-clone-target.c 	struct work_struct worker;
worker            258 drivers/md/dm-clone-target.c 	queue_work(clone->wq, &clone->worker);
worker           1287 drivers/md/dm-clone-target.c 	struct clone *clone = container_of(work, typeof(*clone), worker);
worker           1907 drivers/md/dm-clone-target.c 	INIT_WORK(&clone->worker, do_worker);
worker           1151 drivers/md/dm-era-target.c 	struct work_struct worker;
worker           1205 drivers/md/dm-era-target.c 		queue_work(era->wq, &era->worker);
worker           1309 drivers/md/dm-era-target.c 	struct era *era = container_of(ws, struct era, worker);
worker           1504 drivers/md/dm-era-target.c 	INIT_WORK(&era->worker, do_work);
worker            250 drivers/md/dm-thin.c 	struct work_struct worker;
worker            438 drivers/md/dm-thin.c 	queue_work(pool->wq, &pool->worker);
worker           2411 drivers/md/dm-thin.c 	struct pool *pool = container_of(ws, struct pool, worker);
worker           2457 drivers/md/dm-thin.c 	struct work_struct worker;
worker           2463 drivers/md/dm-thin.c 	return container_of(ws, struct pool_work, worker);
worker           2474 drivers/md/dm-thin.c 	INIT_WORK_ONSTACK(&pw->worker, fn);
worker           2476 drivers/md/dm-thin.c 	queue_work(pool->wq, &pw->worker);
worker           3007 drivers/md/dm-thin.c 	INIT_WORK(&pool->worker, do_worker);
worker           6179 drivers/md/raid5.c 				 struct r5worker *worker,
worker           6231 drivers/md/raid5.c 	struct r5worker *worker = container_of(work, struct r5worker, work);
worker           6232 drivers/md/raid5.c 	struct r5worker_group *group = worker->group;
worker           6247 drivers/md/raid5.c 		released = release_stripe_list(conf, worker->temp_inactive_list);
worker           6249 drivers/md/raid5.c 		batch_size = handle_active_stripes(conf, group_id, worker,
worker           6250 drivers/md/raid5.c 						   worker->temp_inactive_list);
worker           6251 drivers/md/raid5.c 		worker->working = false;
worker           6711 drivers/md/raid5.c 			struct r5worker *worker = group->workers + j;
worker           6712 drivers/md/raid5.c 			worker->group = group;
worker           6713 drivers/md/raid5.c 			INIT_WORK(&worker->work, raid5_do_work);
worker           6716 drivers/md/raid5.c 				INIT_LIST_HEAD(worker->temp_inactive_list + k);
worker            425 drivers/media/usb/hdpvr/hdpvr-core.c 	flush_work(&dev->worker);
worker            247 drivers/media/usb/hdpvr/hdpvr-video.c 						worker);
worker            311 drivers/media/usb/hdpvr/hdpvr-video.c 	INIT_WORK(&dev->worker, hdpvr_transmit_buffers);
worker            312 drivers/media/usb/hdpvr/hdpvr-video.c 	schedule_work(&dev->worker);
worker            344 drivers/media/usb/hdpvr/hdpvr-video.c 	flush_work(&dev->worker);
worker           1131 drivers/media/usb/hdpvr/hdpvr-video.c 	flush_work(&dev->worker);
worker            106 drivers/media/usb/hdpvr/hdpvr.h 	struct work_struct	worker;
worker           2203 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c static void brcmf_fws_dequeue_worker(struct work_struct *worker)
worker           2213 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	fws = container_of(worker, struct brcmf_fws_info, fws_dequeue_work);
worker            767 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c static void brcmf_msgbuf_txflow_worker(struct work_struct *worker)
worker            772 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 	msgbuf = container_of(worker, struct brcmf_msgbuf, txflow_work);
worker            700 drivers/platform/chrome/cros_ec_spi.c static void cros_ec_spi_high_pri_release(void *worker)
worker            702 drivers/platform/chrome/cros_ec_spi.c 	kthread_destroy_worker(worker);
worker             36 drivers/platform/olpc/olpc-ec.c 	struct work_struct worker;
worker             80 drivers/platform/olpc/olpc-ec.c 	struct olpc_ec_priv *ec = container_of(w, struct olpc_ec_priv, worker);
worker            106 drivers/platform/olpc/olpc-ec.c 	schedule_work(&ec->worker);
worker            124 drivers/platform/olpc/olpc-ec.c 	schedule_work(&ec->worker);
worker            418 drivers/platform/olpc/olpc-ec.c 	INIT_WORK(&ec->worker, olpc_ec_worker);
worker            154 drivers/s390/block/dasd_alias.c 	INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work);
worker            263 drivers/s390/block/dasd_alias.c 		cancel_work_sync(&lcu->suc_data.worker);
worker            889 drivers/s390/block/dasd_alias.c 				worker);
worker            951 drivers/s390/block/dasd_alias.c 	if (!schedule_work(&lcu->suc_data.worker))
worker            100 drivers/s390/block/dasd_eckd.c 	struct work_struct worker;
worker            107 drivers/s390/block/dasd_eckd.c 	struct work_struct worker;
worker            119 drivers/s390/block/dasd_eckd.c 	struct work_struct worker;
worker           1272 drivers/s390/block/dasd_eckd.c 	data = container_of(work, struct path_verification_work_data, worker);
worker           1444 drivers/s390/block/dasd_eckd.c 	INIT_WORK(&data->worker, do_path_verification_work);
worker           1448 drivers/s390/block/dasd_eckd.c 	schedule_work(&data->worker);
worker           1664 drivers/s390/block/dasd_eckd.c 	data = container_of(work, struct ext_pool_exhaust_work_data, worker);
worker           1689 drivers/s390/block/dasd_eckd.c 	INIT_WORK(&data->worker, dasd_eckd_ext_pool_exhaust_work);
worker           1700 drivers/s390/block/dasd_eckd.c 	schedule_work(&data->worker);
worker           6529 drivers/s390/block/dasd_eckd.c 	data = container_of(work, struct check_attention_work_data, worker);
worker           6561 drivers/s390/block/dasd_eckd.c 	INIT_WORK(&data->worker, dasd_eckd_check_attention_work);
worker           6565 drivers/s390/block/dasd_eckd.c 	schedule_work(&data->worker);
worker            620 drivers/s390/block/dasd_eckd.h 	struct work_struct worker;
worker           1150 drivers/s390/net/qeth_l2_main.c 	struct work_struct worker;
worker           1158 drivers/s390/net/qeth_l2_main.c 		container_of(work, struct qeth_bridge_state_data, worker);
worker           1213 drivers/s390/net/qeth_l2_main.c 	INIT_WORK(&data->worker, qeth_bridge_state_change_worker);
worker           1217 drivers/s390/net/qeth_l2_main.c 	queue_work(card->event_wq, &data->worker);
worker           1221 drivers/s390/net/qeth_l2_main.c 	struct work_struct worker;
worker           1229 drivers/s390/net/qeth_l2_main.c 		container_of(work, struct qeth_bridge_host_data, worker);
worker           1285 drivers/s390/net/qeth_l2_main.c 	INIT_WORK(&data->worker, qeth_bridge_host_event_worker);
worker           1289 drivers/s390/net/qeth_l2_main.c 	queue_work(card->event_wq, &data->worker);
worker             77 drivers/thermal/intel/intel_powerclamp.c 	struct kthread_worker *worker;
worker            405 drivers/thermal/intel/intel_powerclamp.c 		kthread_queue_delayed_work(w_data->worker,
worker            437 drivers/thermal/intel/intel_powerclamp.c 		kthread_queue_work(w_data->worker, &w_data->balancing_work);
worker            480 drivers/thermal/intel/intel_powerclamp.c 	struct kthread_worker *worker;
worker            482 drivers/thermal/intel/intel_powerclamp.c 	worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inj/%ld", cpu);
worker            483 drivers/thermal/intel/intel_powerclamp.c 	if (IS_ERR(worker))
worker            486 drivers/thermal/intel/intel_powerclamp.c 	w_data->worker = worker;
worker            491 drivers/thermal/intel/intel_powerclamp.c 	sched_setscheduler(worker->task, SCHED_FIFO, &sparam);
worker            495 drivers/thermal/intel/intel_powerclamp.c 	kthread_queue_work(w_data->worker, &w_data->balancing_work);
worker            502 drivers/thermal/intel/intel_powerclamp.c 	if (!w_data->worker)
worker            523 drivers/thermal/intel/intel_powerclamp.c 	kthread_destroy_worker(w_data->worker);
worker            525 drivers/thermal/intel/intel_powerclamp.c 	w_data->worker = NULL;
worker            237 drivers/vhost/vhost.c 	if (dev->worker) {
worker            257 drivers/vhost/vhost.c 	if (!dev->worker)
worker            266 drivers/vhost/vhost.c 		wake_up_process(dev->worker);
worker            469 drivers/vhost/vhost.c 	dev->worker = NULL;
worker            538 drivers/vhost/vhost.c 	struct task_struct *worker;
worker            549 drivers/vhost/vhost.c 	worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid);
worker            550 drivers/vhost/vhost.c 	if (IS_ERR(worker)) {
worker            551 drivers/vhost/vhost.c 		err = PTR_ERR(worker);
worker            555 drivers/vhost/vhost.c 	dev->worker = worker;
worker            556 drivers/vhost/vhost.c 	wake_up_process(worker);	/* avoid contributing to loadavg */
worker            568 drivers/vhost/vhost.c 	kthread_stop(worker);
worker            569 drivers/vhost/vhost.c 	dev->worker = NULL;
worker            682 drivers/vhost/vhost.c 	if (dev->worker) {
worker            683 drivers/vhost/vhost.c 		kthread_stop(dev->worker);
worker            684 drivers/vhost/vhost.c 		dev->worker = NULL;
worker            166 drivers/vhost/vhost.h 	struct task_struct *worker;
worker             76 include/drm/drm_flip_work.h 	struct work_struct worker;
worker             98 include/linux/kthread.h 	struct kthread_worker	*worker;
worker            108 include/linux/kthread.h #define KTHREAD_WORKER_INIT(worker)	{				\
worker            109 include/linux/kthread.h 	.lock = __RAW_SPIN_LOCK_UNLOCKED((worker).lock),		\
worker            110 include/linux/kthread.h 	.work_list = LIST_HEAD_INIT((worker).work_list),		\
worker            111 include/linux/kthread.h 	.delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\
worker            125 include/linux/kthread.h #define DEFINE_KTHREAD_WORKER(worker)					\
worker            126 include/linux/kthread.h 	struct kthread_worker worker = KTHREAD_WORKER_INIT(worker)
worker            140 include/linux/kthread.h # define KTHREAD_WORKER_INIT_ONSTACK(worker)				\
worker            141 include/linux/kthread.h 	({ kthread_init_worker(&worker); worker; })
worker            142 include/linux/kthread.h # define DEFINE_KTHREAD_WORKER_ONSTACK(worker)				\
worker            143 include/linux/kthread.h 	struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker)
worker            145 include/linux/kthread.h # define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker)
worker            148 include/linux/kthread.h extern void __kthread_init_worker(struct kthread_worker *worker,
worker            151 include/linux/kthread.h #define kthread_init_worker(worker)					\
worker            154 include/linux/kthread.h 		__kthread_init_worker((worker), "("#worker")->lock", &__key); \
worker            181 include/linux/kthread.h bool kthread_queue_work(struct kthread_worker *worker,
worker            184 include/linux/kthread.h bool kthread_queue_delayed_work(struct kthread_worker *worker,
worker            188 include/linux/kthread.h bool kthread_mod_delayed_work(struct kthread_worker *worker,
worker            193 include/linux/kthread.h void kthread_flush_worker(struct kthread_worker *worker);
worker            198 include/linux/kthread.h void kthread_destroy_worker(struct kthread_worker *worker);
worker            330 kernel/async.c 	struct worker *worker = current_wq_worker();
worker            332 kernel/async.c 	return worker && worker->current_func == async_run_entry_fn;
worker            606 kernel/kthread.c void __kthread_init_worker(struct kthread_worker *worker,
worker            610 kernel/kthread.c 	memset(worker, 0, sizeof(struct kthread_worker));
worker            611 kernel/kthread.c 	raw_spin_lock_init(&worker->lock);
worker            612 kernel/kthread.c 	lockdep_set_class_and_name(&worker->lock, key, name);
worker            613 kernel/kthread.c 	INIT_LIST_HEAD(&worker->work_list);
worker            614 kernel/kthread.c 	INIT_LIST_HEAD(&worker->delayed_work_list);
worker            635 kernel/kthread.c 	struct kthread_worker *worker = worker_ptr;
worker            642 kernel/kthread.c 	WARN_ON(worker->task && worker->task != current);
worker            643 kernel/kthread.c 	worker->task = current;
worker            645 kernel/kthread.c 	if (worker->flags & KTW_FREEZABLE)
worker            653 kernel/kthread.c 		raw_spin_lock_irq(&worker->lock);
worker            654 kernel/kthread.c 		worker->task = NULL;
worker            655 kernel/kthread.c 		raw_spin_unlock_irq(&worker->lock);
worker            660 kernel/kthread.c 	raw_spin_lock_irq(&worker->lock);
worker            661 kernel/kthread.c 	if (!list_empty(&worker->work_list)) {
worker            662 kernel/kthread.c 		work = list_first_entry(&worker->work_list,
worker            666 kernel/kthread.c 	worker->current_work = work;
worker            667 kernel/kthread.c 	raw_spin_unlock_irq(&worker->lock);
worker            685 kernel/kthread.c 	struct kthread_worker *worker;
worker            689 kernel/kthread.c 	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
worker            690 kernel/kthread.c 	if (!worker)
worker            693 kernel/kthread.c 	kthread_init_worker(worker);
worker            698 kernel/kthread.c 	task = __kthread_create_on_node(kthread_worker_fn, worker,
worker            706 kernel/kthread.c 	worker->flags = flags;
worker            707 kernel/kthread.c 	worker->task = task;
worker            709 kernel/kthread.c 	return worker;
worker            712 kernel/kthread.c 	kfree(worker);
worker            728 kernel/kthread.c 	struct kthread_worker *worker;
worker            732 kernel/kthread.c 	worker = __kthread_create_worker(-1, flags, namefmt, args);
worker            735 kernel/kthread.c 	return worker;
worker            760 kernel/kthread.c 	struct kthread_worker *worker;
worker            764 kernel/kthread.c 	worker = __kthread_create_worker(cpu, flags, namefmt, args);
worker            767 kernel/kthread.c 	return worker;
worker            776 kernel/kthread.c static inline bool queuing_blocked(struct kthread_worker *worker,
worker            779 kernel/kthread.c 	lockdep_assert_held(&worker->lock);
worker            784 kernel/kthread.c static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
worker            787 kernel/kthread.c 	lockdep_assert_held(&worker->lock);
worker            790 kernel/kthread.c 	WARN_ON_ONCE(work->worker && work->worker != worker);
worker            794 kernel/kthread.c static void kthread_insert_work(struct kthread_worker *worker,
worker            798 kernel/kthread.c 	kthread_insert_work_sanity_check(worker, work);
worker            801 kernel/kthread.c 	work->worker = worker;
worker            802 kernel/kthread.c 	if (!worker->current_work && likely(worker->task))
worker            803 kernel/kthread.c 		wake_up_process(worker->task);
worker            818 kernel/kthread.c bool kthread_queue_work(struct kthread_worker *worker,
worker            824 kernel/kthread.c 	raw_spin_lock_irqsave(&worker->lock, flags);
worker            825 kernel/kthread.c 	if (!queuing_blocked(worker, work)) {
worker            826 kernel/kthread.c 		kthread_insert_work(worker, work, &worker->work_list);
worker            829 kernel/kthread.c 	raw_spin_unlock_irqrestore(&worker->lock, flags);
worker            846 kernel/kthread.c 	struct kthread_worker *worker = work->worker;
worker            853 kernel/kthread.c 	if (WARN_ON_ONCE(!worker))
worker            856 kernel/kthread.c 	raw_spin_lock_irqsave(&worker->lock, flags);
worker            858 kernel/kthread.c 	WARN_ON_ONCE(work->worker != worker);
worker            863 kernel/kthread.c 	kthread_insert_work(worker, work, &worker->work_list);
worker            865 kernel/kthread.c 	raw_spin_unlock_irqrestore(&worker->lock, flags);
worker            869 kernel/kthread.c static void __kthread_queue_delayed_work(struct kthread_worker *worker,
worker            885 kernel/kthread.c 		kthread_insert_work(worker, work, &worker->work_list);
worker            890 kernel/kthread.c 	kthread_insert_work_sanity_check(worker, work);
worker            892 kernel/kthread.c 	list_add(&work->node, &worker->delayed_work_list);
worker            893 kernel/kthread.c 	work->worker = worker;
worker            913 kernel/kthread.c bool kthread_queue_delayed_work(struct kthread_worker *worker,
worker            921 kernel/kthread.c 	raw_spin_lock_irqsave(&worker->lock, flags);
worker            923 kernel/kthread.c 	if (!queuing_blocked(worker, work)) {
worker            924 kernel/kthread.c 		__kthread_queue_delayed_work(worker, dwork, delay);
worker            928 kernel/kthread.c 	raw_spin_unlock_irqrestore(&worker->lock, flags);
worker            957 kernel/kthread.c 	struct kthread_worker *worker;
worker            960 kernel/kthread.c 	worker = work->worker;
worker            961 kernel/kthread.c 	if (!worker)
worker            964 kernel/kthread.c 	raw_spin_lock_irq(&worker->lock);
worker            966 kernel/kthread.c 	WARN_ON_ONCE(work->worker != worker);
worker            969 kernel/kthread.c 		kthread_insert_work(worker, &fwork.work, work->node.next);
worker            970 kernel/kthread.c 	else if (worker->current_work == work)
worker            971 kernel/kthread.c 		kthread_insert_work(worker, &fwork.work,
worker            972 kernel/kthread.c 				    worker->work_list.next);
worker            976 kernel/kthread.c 	raw_spin_unlock_irq(&worker->lock);
worker           1000 kernel/kthread.c 		struct kthread_worker *worker = work->worker;
worker           1009 kernel/kthread.c 		raw_spin_unlock_irqrestore(&worker->lock, *flags);
worker           1011 kernel/kthread.c 		raw_spin_lock_irqsave(&worker->lock, *flags);
worker           1050 kernel/kthread.c bool kthread_mod_delayed_work(struct kthread_worker *worker,
worker           1058 kernel/kthread.c 	raw_spin_lock_irqsave(&worker->lock, flags);
worker           1061 kernel/kthread.c 	if (!work->worker)
worker           1065 kernel/kthread.c 	WARN_ON_ONCE(work->worker != worker);
worker           1073 kernel/kthread.c 	__kthread_queue_delayed_work(worker, dwork, delay);
worker           1075 kernel/kthread.c 	raw_spin_unlock_irqrestore(&worker->lock, flags);
worker           1082 kernel/kthread.c 	struct kthread_worker *worker = work->worker;
worker           1086 kernel/kthread.c 	if (!worker)
worker           1089 kernel/kthread.c 	raw_spin_lock_irqsave(&worker->lock, flags);
worker           1091 kernel/kthread.c 	WARN_ON_ONCE(work->worker != worker);
worker           1095 kernel/kthread.c 	if (worker->current_work != work)
worker           1103 kernel/kthread.c 	raw_spin_unlock_irqrestore(&worker->lock, flags);
worker           1105 kernel/kthread.c 	raw_spin_lock_irqsave(&worker->lock, flags);
worker           1109 kernel/kthread.c 	raw_spin_unlock_irqrestore(&worker->lock, flags);
worker           1158 kernel/kthread.c void kthread_flush_worker(struct kthread_worker *worker)
worker           1165 kernel/kthread.c 	kthread_queue_work(worker, &fwork.work);
worker           1178 kernel/kthread.c void kthread_destroy_worker(struct kthread_worker *worker)
worker           1182 kernel/kthread.c 	task = worker->task;
worker           1186 kernel/kthread.c 	kthread_flush_worker(worker);
worker           1188 kernel/kthread.c 	WARN_ON(!list_empty(&worker->work_list));
worker           1189 kernel/kthread.c 	kfree(worker);
worker             39 kernel/sched/cpufreq_schedutil.c 	struct			kthread_worker worker;
worker            581 kernel/sched/cpufreq_schedutil.c 	kthread_queue_work(&sg_policy->worker, &sg_policy->work);
worker            679 kernel/sched/cpufreq_schedutil.c 	kthread_init_worker(&sg_policy->worker);
worker            680 kernel/sched/cpufreq_schedutil.c 	thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
worker            711 kernel/sched/cpufreq_schedutil.c 	kthread_flush_worker(&sg_policy->worker);
worker            169 kernel/workqueue.c 	struct worker		*manager;	/* L: purely informational */
worker            251 kernel/workqueue.c 	struct worker		*rescuer;	/* I: rescue worker */
worker            410 kernel/workqueue.c #define for_each_pool_worker(worker, pool)				\
worker            411 kernel/workqueue.c 	list_for_each_entry((worker), &(pool)->workers, node)		\
worker            820 kernel/workqueue.c static struct worker *first_idle_worker(struct worker_pool *pool)
worker            825 kernel/workqueue.c 	return list_first_entry(&pool->idle_list, struct worker, entry);
worker            839 kernel/workqueue.c 	struct worker *worker = first_idle_worker(pool);
worker            841 kernel/workqueue.c 	if (likely(worker))
worker            842 kernel/workqueue.c 		wake_up_process(worker->task);
worker            853 kernel/workqueue.c 	struct worker *worker = kthread_data(task);
worker            855 kernel/workqueue.c 	if (!worker->sleeping)
worker            857 kernel/workqueue.c 	if (!(worker->flags & WORKER_NOT_RUNNING))
worker            858 kernel/workqueue.c 		atomic_inc(&worker->pool->nr_running);
worker            859 kernel/workqueue.c 	worker->sleeping = 0;
worker            871 kernel/workqueue.c 	struct worker *next, *worker = kthread_data(task);
worker            879 kernel/workqueue.c 	if (worker->flags & WORKER_NOT_RUNNING)
worker            882 kernel/workqueue.c 	pool = worker->pool;
worker            884 kernel/workqueue.c 	if (WARN_ON_ONCE(worker->sleeping))
worker            887 kernel/workqueue.c 	worker->sleeping = 1;
worker            936 kernel/workqueue.c 	struct worker *worker = kthread_data(task);
worker            938 kernel/workqueue.c 	return worker->last_func;
worker            951 kernel/workqueue.c static inline void worker_set_flags(struct worker *worker, unsigned int flags)
worker            953 kernel/workqueue.c 	struct worker_pool *pool = worker->pool;
worker            955 kernel/workqueue.c 	WARN_ON_ONCE(worker->task != current);
worker            959 kernel/workqueue.c 	    !(worker->flags & WORKER_NOT_RUNNING)) {
worker            963 kernel/workqueue.c 	worker->flags |= flags;
worker            976 kernel/workqueue.c static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
worker            978 kernel/workqueue.c 	struct worker_pool *pool = worker->pool;
worker            979 kernel/workqueue.c 	unsigned int oflags = worker->flags;
worker            981 kernel/workqueue.c 	WARN_ON_ONCE(worker->task != current);
worker            983 kernel/workqueue.c 	worker->flags &= ~flags;
worker            991 kernel/workqueue.c 		if (!(worker->flags & WORKER_NOT_RUNNING))
worker           1028 kernel/workqueue.c static struct worker *find_worker_executing_work(struct worker_pool *pool,
worker           1031 kernel/workqueue.c 	struct worker *worker;
worker           1033 kernel/workqueue.c 	hash_for_each_possible(pool->busy_hash, worker, hentry,
worker           1035 kernel/workqueue.c 		if (worker->current_work == work &&
worker           1036 kernel/workqueue.c 		    worker->current_func == work->func)
worker           1037 kernel/workqueue.c 			return worker;
worker           1352 kernel/workqueue.c 	struct worker *worker;
worker           1354 kernel/workqueue.c 	worker = current_wq_worker();
worker           1359 kernel/workqueue.c 	return worker && worker->current_pwq->wq == wq;
worker           1438 kernel/workqueue.c 		struct worker *worker;
worker           1442 kernel/workqueue.c 		worker = find_worker_executing_work(last_pool, work);
worker           1444 kernel/workqueue.c 		if (worker && worker->current_pwq->wq == wq) {
worker           1445 kernel/workqueue.c 			pwq = worker->current_pwq;
worker           1768 kernel/workqueue.c static void worker_enter_idle(struct worker *worker)
worker           1770 kernel/workqueue.c 	struct worker_pool *pool = worker->pool;
worker           1772 kernel/workqueue.c 	if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
worker           1773 kernel/workqueue.c 	    WARN_ON_ONCE(!list_empty(&worker->entry) &&
worker           1774 kernel/workqueue.c 			 (worker->hentry.next || worker->hentry.pprev)))
worker           1778 kernel/workqueue.c 	worker->flags |= WORKER_IDLE;
worker           1780 kernel/workqueue.c 	worker->last_active = jiffies;
worker           1783 kernel/workqueue.c 	list_add(&worker->entry, &pool->idle_list);
worker           1808 kernel/workqueue.c static void worker_leave_idle(struct worker *worker)
worker           1810 kernel/workqueue.c 	struct worker_pool *pool = worker->pool;
worker           1812 kernel/workqueue.c 	if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
worker           1814 kernel/workqueue.c 	worker_clr_flags(worker, WORKER_IDLE);
worker           1816 kernel/workqueue.c 	list_del_init(&worker->entry);
worker           1819 kernel/workqueue.c static struct worker *alloc_worker(int node)
worker           1821 kernel/workqueue.c 	struct worker *worker;
worker           1823 kernel/workqueue.c 	worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
worker           1824 kernel/workqueue.c 	if (worker) {
worker           1825 kernel/workqueue.c 		INIT_LIST_HEAD(&worker->entry);
worker           1826 kernel/workqueue.c 		INIT_LIST_HEAD(&worker->scheduled);
worker           1827 kernel/workqueue.c 		INIT_LIST_HEAD(&worker->node);
worker           1829 kernel/workqueue.c 		worker->flags = WORKER_PREP;
worker           1831 kernel/workqueue.c 	return worker;
worker           1843 kernel/workqueue.c static void worker_attach_to_pool(struct worker *worker,
worker           1852 kernel/workqueue.c 	set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
worker           1860 kernel/workqueue.c 		worker->flags |= WORKER_UNBOUND;
worker           1862 kernel/workqueue.c 	list_add_tail(&worker->node, &pool->workers);
worker           1863 kernel/workqueue.c 	worker->pool = pool;
worker           1876 kernel/workqueue.c static void worker_detach_from_pool(struct worker *worker)
worker           1878 kernel/workqueue.c 	struct worker_pool *pool = worker->pool;
worker           1883 kernel/workqueue.c 	list_del(&worker->node);
worker           1884 kernel/workqueue.c 	worker->pool = NULL;
worker           1891 kernel/workqueue.c 	worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
worker           1909 kernel/workqueue.c static struct worker *create_worker(struct worker_pool *pool)
worker           1911 kernel/workqueue.c 	struct worker *worker = NULL;
worker           1920 kernel/workqueue.c 	worker = alloc_worker(pool->node);
worker           1921 kernel/workqueue.c 	if (!worker)
worker           1924 kernel/workqueue.c 	worker->id = id;
worker           1932 kernel/workqueue.c 	worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
worker           1934 kernel/workqueue.c 	if (IS_ERR(worker->task))
worker           1937 kernel/workqueue.c 	set_user_nice(worker->task, pool->attrs->nice);
worker           1938 kernel/workqueue.c 	kthread_bind_mask(worker->task, pool->attrs->cpumask);
worker           1941 kernel/workqueue.c 	worker_attach_to_pool(worker, pool);
worker           1945 kernel/workqueue.c 	worker->pool->nr_workers++;
worker           1946 kernel/workqueue.c 	worker_enter_idle(worker);
worker           1947 kernel/workqueue.c 	wake_up_process(worker->task);
worker           1950 kernel/workqueue.c 	return worker;
worker           1955 kernel/workqueue.c 	kfree(worker);
worker           1969 kernel/workqueue.c static void destroy_worker(struct worker *worker)
worker           1971 kernel/workqueue.c 	struct worker_pool *pool = worker->pool;
worker           1976 kernel/workqueue.c 	if (WARN_ON(worker->current_work) ||
worker           1977 kernel/workqueue.c 	    WARN_ON(!list_empty(&worker->scheduled)) ||
worker           1978 kernel/workqueue.c 	    WARN_ON(!(worker->flags & WORKER_IDLE)))
worker           1984 kernel/workqueue.c 	list_del_init(&worker->entry);
worker           1985 kernel/workqueue.c 	worker->flags |= WORKER_DIE;
worker           1986 kernel/workqueue.c 	wake_up_process(worker->task);
worker           1996 kernel/workqueue.c 		struct worker *worker;
worker           2000 kernel/workqueue.c 		worker = list_entry(pool->idle_list.prev, struct worker, entry);
worker           2001 kernel/workqueue.c 		expires = worker->last_active + IDLE_WORKER_TIMEOUT;
worker           2008 kernel/workqueue.c 		destroy_worker(worker);
worker           2133 kernel/workqueue.c static bool manage_workers(struct worker *worker)
worker           2135 kernel/workqueue.c 	struct worker_pool *pool = worker->pool;
worker           2141 kernel/workqueue.c 	pool->manager = worker;
worker           2165 kernel/workqueue.c static void process_one_work(struct worker *worker, struct work_struct *work)
worker           2170 kernel/workqueue.c 	struct worker_pool *pool = worker->pool;
worker           2173 kernel/workqueue.c 	struct worker *collision;
worker           2204 kernel/workqueue.c 	hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
worker           2205 kernel/workqueue.c 	worker->current_work = work;
worker           2206 kernel/workqueue.c 	worker->current_func = work->func;
worker           2207 kernel/workqueue.c 	worker->current_pwq = pwq;
worker           2214 kernel/workqueue.c 	strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN);
worker           2225 kernel/workqueue.c 		worker_set_flags(worker, WORKER_CPU_INTENSIVE);
worker           2272 kernel/workqueue.c 	worker->current_func(work);
worker           2285 kernel/workqueue.c 		       worker->current_func);
worker           2304 kernel/workqueue.c 		worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
worker           2307 kernel/workqueue.c 	worker->last_func = worker->current_func;
worker           2310 kernel/workqueue.c 	hash_del(&worker->hentry);
worker           2311 kernel/workqueue.c 	worker->current_work = NULL;
worker           2312 kernel/workqueue.c 	worker->current_func = NULL;
worker           2313 kernel/workqueue.c 	worker->current_pwq = NULL;
worker           2329 kernel/workqueue.c static void process_scheduled_works(struct worker *worker)
worker           2331 kernel/workqueue.c 	while (!list_empty(&worker->scheduled)) {
worker           2332 kernel/workqueue.c 		struct work_struct *work = list_first_entry(&worker->scheduled,
worker           2334 kernel/workqueue.c 		process_one_work(worker, work);
worker           2362 kernel/workqueue.c 	struct worker *worker = __worker;
worker           2363 kernel/workqueue.c 	struct worker_pool *pool = worker->pool;
worker           2371 kernel/workqueue.c 	if (unlikely(worker->flags & WORKER_DIE)) {
worker           2373 kernel/workqueue.c 		WARN_ON_ONCE(!list_empty(&worker->entry));
worker           2376 kernel/workqueue.c 		set_task_comm(worker->task, "kworker/dying");
worker           2377 kernel/workqueue.c 		ida_simple_remove(&pool->worker_ida, worker->id);
worker           2378 kernel/workqueue.c 		worker_detach_from_pool(worker);
worker           2379 kernel/workqueue.c 		kfree(worker);
worker           2383 kernel/workqueue.c 	worker_leave_idle(worker);
worker           2390 kernel/workqueue.c 	if (unlikely(!may_start_working(pool)) && manage_workers(worker))
worker           2398 kernel/workqueue.c 	WARN_ON_ONCE(!list_empty(&worker->scheduled));
worker           2407 kernel/workqueue.c 	worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
worker           2418 kernel/workqueue.c 			process_one_work(worker, work);
worker           2419 kernel/workqueue.c 			if (unlikely(!list_empty(&worker->scheduled)))
worker           2420 kernel/workqueue.c 				process_scheduled_works(worker);
worker           2422 kernel/workqueue.c 			move_linked_works(work, &worker->scheduled, NULL);
worker           2423 kernel/workqueue.c 			process_scheduled_works(worker);
worker           2427 kernel/workqueue.c 	worker_set_flags(worker, WORKER_PREP);
worker           2436 kernel/workqueue.c 	worker_enter_idle(worker);
worker           2466 kernel/workqueue.c 	struct worker *rescuer = __rescuer;
worker           2600 kernel/workqueue.c 	struct worker *worker;
worker           2605 kernel/workqueue.c 	worker = current_wq_worker();
worker           2610 kernel/workqueue.c 	WARN_ONCE(worker && ((worker->current_pwq->wq->flags &
worker           2613 kernel/workqueue.c 		  worker->current_pwq->wq->name, worker->current_func,
worker           2655 kernel/workqueue.c 			      struct work_struct *target, struct worker *worker)
worker           2677 kernel/workqueue.c 	if (worker)
worker           2678 kernel/workqueue.c 		head = worker->scheduled.next;
worker           2980 kernel/workqueue.c 	struct worker *worker = NULL;
worker           3000 kernel/workqueue.c 		worker = find_worker_executing_work(pool, work);
worker           3001 kernel/workqueue.c 		if (!worker)
worker           3003 kernel/workqueue.c 		pwq = worker->current_pwq;
worker           3008 kernel/workqueue.c 	insert_wq_barrier(pwq, barr, work, worker);
worker           3525 kernel/workqueue.c 	struct worker *worker;
worker           3552 kernel/workqueue.c 	while ((worker = first_idle_worker(pool)))
worker           3553 kernel/workqueue.c 		destroy_worker(worker);
worker           4203 kernel/workqueue.c 	struct worker *rescuer;
worker           4348 kernel/workqueue.c 		struct worker *rescuer = wq->rescuer;
worker           4463 kernel/workqueue.c 	struct worker *worker = current_wq_worker();
worker           4465 kernel/workqueue.c 	return worker ? worker->current_work : NULL;
worker           4479 kernel/workqueue.c 	struct worker *worker = current_wq_worker();
worker           4481 kernel/workqueue.c 	return worker && worker->rescue_wq;
worker           4572 kernel/workqueue.c 	struct worker *worker = current_wq_worker();
worker           4575 kernel/workqueue.c 	if (worker) {
worker           4577 kernel/workqueue.c 		vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
worker           4603 kernel/workqueue.c 	struct worker *worker;
worker           4612 kernel/workqueue.c 	worker = kthread_probe_data(task);
worker           4618 kernel/workqueue.c 	probe_kernel_read(&fn, &worker->current_func, sizeof(fn));
worker           4619 kernel/workqueue.c 	probe_kernel_read(&pwq, &worker->current_pwq, sizeof(pwq));
worker           4622 kernel/workqueue.c 	probe_kernel_read(desc, worker->desc, sizeof(desc) - 1);
worker           4658 kernel/workqueue.c 	struct worker *worker;
worker           4669 kernel/workqueue.c 	hash_for_each(pool->busy_hash, bkt, worker, hentry) {
worker           4670 kernel/workqueue.c 		if (worker->current_pwq == pwq) {
worker           4679 kernel/workqueue.c 		hash_for_each(pool->busy_hash, bkt, worker, hentry) {
worker           4680 kernel/workqueue.c 			if (worker->current_pwq != pwq)
worker           4684 kernel/workqueue.c 				task_pid_nr(worker->task),
worker           4685 kernel/workqueue.c 				worker == pwq->wq->rescuer ? "(RESCUER)" : "",
worker           4686 kernel/workqueue.c 				worker->current_func);
worker           4687 kernel/workqueue.c 			list_for_each_entry(work, &worker->scheduled, entry)
worker           4773 kernel/workqueue.c 		struct worker *worker;
worker           4788 kernel/workqueue.c 		list_for_each_entry(worker, &pool->idle_list, entry) {
worker           4790 kernel/workqueue.c 				task_pid_nr(worker->task));
worker           4821 kernel/workqueue.c 		struct worker *worker = kthread_data(task);
worker           4822 kernel/workqueue.c 		struct worker_pool *pool = worker->pool;
worker           4831 kernel/workqueue.c 			if (worker->desc[0] != '\0') {
worker           4832 kernel/workqueue.c 				if (worker->current_work)
worker           4834 kernel/workqueue.c 						  worker->desc);
worker           4837 kernel/workqueue.c 						  worker->desc);
worker           4866 kernel/workqueue.c 	struct worker *worker;
worker           4879 kernel/workqueue.c 		for_each_pool_worker(worker, pool)
worker           4880 kernel/workqueue.c 			worker->flags |= WORKER_UNBOUND;
worker           4924 kernel/workqueue.c 	struct worker *worker;
worker           4935 kernel/workqueue.c 	for_each_pool_worker(worker, pool)
worker           4936 kernel/workqueue.c 		WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
worker           4943 kernel/workqueue.c 	for_each_pool_worker(worker, pool) {
worker           4944 kernel/workqueue.c 		unsigned int worker_flags = worker->flags;
worker           4955 kernel/workqueue.c 			wake_up_process(worker->task);
worker           4975 kernel/workqueue.c 		WRITE_ONCE(worker->flags, worker_flags);
worker           4994 kernel/workqueue.c 	struct worker *worker;
worker           5005 kernel/workqueue.c 	for_each_pool_worker(worker, pool)
worker           5006 kernel/workqueue.c 		WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
worker             65 kernel/workqueue_internal.h static inline struct worker *current_wq_worker(void)
worker            204 samples/seccomp/user-trap.c 	pid_t worker = 0 , tracer = 0;
worker            211 samples/seccomp/user-trap.c 	worker = fork();
worker            212 samples/seccomp/user-trap.c 	if (worker < 0) {
worker            217 samples/seccomp/user-trap.c 	if (worker == 0) {
worker            343 samples/seccomp/user-trap.c 	if (waitpid(worker, &status, 0) != worker) {
worker            368 samples/seccomp/user-trap.c 	if (worker > 0)
worker            369 samples/seccomp/user-trap.c 		kill(worker, SIGKILL);
worker            133 tools/perf/bench/epoll-ctl.c static inline void do_epoll_op(struct worker *w, int op, int fd)
worker            161 tools/perf/bench/epoll-ctl.c static inline void do_random_epoll_op(struct worker *w)
worker            175 tools/perf/bench/epoll-ctl.c 	struct worker *w = (struct worker *) arg;
worker            205 tools/perf/bench/epoll-ctl.c static void init_fdmaps(struct worker *w, int pct)
worker            224 tools/perf/bench/epoll-ctl.c static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
worker            235 tools/perf/bench/epoll-ctl.c 		struct worker *w = &worker[i];
worker            268 tools/perf/bench/epoll-ctl.c 				     (void *)(struct worker *) w);
worker            305 tools/perf/bench/epoll-ctl.c 	struct worker *worker = NULL;
worker            339 tools/perf/bench/epoll-ctl.c 	worker = calloc(nthreads, sizeof(*worker));
worker            340 tools/perf/bench/epoll-ctl.c 	if (!worker)
worker            366 tools/perf/bench/epoll-ctl.c 	do_threads(worker, cpu);
worker            379 tools/perf/bench/epoll-ctl.c 		ret = pthread_join(worker[i].thread, NULL);
worker            393 tools/perf/bench/epoll-ctl.c 			t[j] = worker[i].ops[j];
worker            399 tools/perf/bench/epoll-ctl.c 			       worker[i].tid, &worker[i].fdmap[0],
worker            403 tools/perf/bench/epoll-ctl.c 			       worker[i].tid, &worker[i].fdmap[0],
worker            404 tools/perf/bench/epoll-ctl.c 			       &worker[i].fdmap[nfds-1],
worker            187 tools/perf/bench/epoll-wait.c 	struct worker *w = (struct worker *) arg;
worker            241 tools/perf/bench/epoll-wait.c static void nest_epollfd(struct worker *w)
worker            293 tools/perf/bench/epoll-wait.c static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
worker            312 tools/perf/bench/epoll-wait.c 		struct worker *w = &worker[i];
worker            357 tools/perf/bench/epoll-wait.c 				     (void *)(struct worker *) w);
worker            370 tools/perf/bench/epoll-wait.c 	struct worker *worker = p;
worker            382 tools/perf/bench/epoll-wait.c 			shuffle((void *)worker, nthreads, sizeof(*worker));
worker            386 tools/perf/bench/epoll-wait.c 			struct worker *w = &worker[i];
worker            409 tools/perf/bench/epoll-wait.c 	struct worker *w1 = (struct worker *) p1;
worker            410 tools/perf/bench/epoll-wait.c 	struct worker *w2 = (struct worker *) p2;
worker            419 tools/perf/bench/epoll-wait.c 	struct worker *worker = NULL;
worker            458 tools/perf/bench/epoll-wait.c 	worker = calloc(nthreads, sizeof(*worker));
worker            459 tools/perf/bench/epoll-wait.c 	if (!worker) {
worker            484 tools/perf/bench/epoll-wait.c 	do_threads(worker, cpu);
worker            498 tools/perf/bench/epoll-wait.c 			     (void *)(struct worker *) worker);
worker            519 tools/perf/bench/epoll-wait.c 		qsort(worker, nthreads, sizeof(struct worker), cmpworker);
worker            522 tools/perf/bench/epoll-wait.c 		unsigned long t = worker[i].ops/runtime.tv_sec;
worker            528 tools/perf/bench/epoll-wait.c 			       worker[i].tid, &worker[i].fdmap[0], t);
worker            531 tools/perf/bench/epoll-wait.c 			       worker[i].tid, &worker[i].fdmap[0],
worker            532 tools/perf/bench/epoll-wait.c 			       &worker[i].fdmap[nfds-1], t);
worker             70 tools/perf/bench/futex-hash.c 	struct worker *w = (struct worker *) arg;
worker            127 tools/perf/bench/futex-hash.c 	struct worker *worker = NULL;
worker            147 tools/perf/bench/futex-hash.c 	worker = calloc(nthreads, sizeof(*worker));
worker            148 tools/perf/bench/futex-hash.c 	if (!worker)
worker            166 tools/perf/bench/futex-hash.c 		worker[i].tid = i;
worker            167 tools/perf/bench/futex-hash.c 		worker[i].futex = calloc(nfutexes, sizeof(*worker[i].futex));
worker            168 tools/perf/bench/futex-hash.c 		if (!worker[i].futex)
worker            178 tools/perf/bench/futex-hash.c 		ret = pthread_create(&worker[i].thread, &thread_attr, workerfn,
worker            179 tools/perf/bench/futex-hash.c 				     (void *)(struct worker *) &worker[i]);
worker            196 tools/perf/bench/futex-hash.c 		ret = pthread_join(worker[i].thread, NULL);
worker            207 tools/perf/bench/futex-hash.c 		unsigned long t = worker[i].ops/runtime.tv_sec;
worker            212 tools/perf/bench/futex-hash.c 				       worker[i].tid, &worker[i].futex[0], t);
worker            215 tools/perf/bench/futex-hash.c 				       worker[i].tid, &worker[i].futex[0],
worker            216 tools/perf/bench/futex-hash.c 				       &worker[i].futex[nfutexes-1], t);
worker            219 tools/perf/bench/futex-hash.c 		zfree(&worker[i].futex);
worker            224 tools/perf/bench/futex-hash.c 	free(worker);
worker             34 tools/perf/bench/futex-lock-pi.c static struct worker *worker;
worker             82 tools/perf/bench/futex-lock-pi.c 	struct worker *w = (struct worker *) arg;
worker            119 tools/perf/bench/futex-lock-pi.c static void create_threads(struct worker *w, pthread_attr_t thread_attr,
worker            128 tools/perf/bench/futex-lock-pi.c 		worker[i].tid = i;
worker            131 tools/perf/bench/futex-lock-pi.c 			worker[i].futex = calloc(1, sizeof(u_int32_t));
worker            132 tools/perf/bench/futex-lock-pi.c 			if (!worker[i].futex)
worker            135 tools/perf/bench/futex-lock-pi.c 			worker[i].futex = &global_futex;
worker            143 tools/perf/bench/futex-lock-pi.c 		if (pthread_create(&w[i].thread, &thread_attr, workerfn, &worker[i]))
worker            171 tools/perf/bench/futex-lock-pi.c 	worker = calloc(nthreads, sizeof(*worker));
worker            172 tools/perf/bench/futex-lock-pi.c 	if (!worker)
worker            190 tools/perf/bench/futex-lock-pi.c 	create_threads(worker, thread_attr, cpu);
worker            203 tools/perf/bench/futex-lock-pi.c 		ret = pthread_join(worker[i].thread, NULL);
worker            214 tools/perf/bench/futex-lock-pi.c 		unsigned long t = worker[i].ops/runtime.tv_sec;
worker            219 tools/perf/bench/futex-lock-pi.c 			       worker[i].tid, worker[i].futex, t);
worker            222 tools/perf/bench/futex-lock-pi.c 			zfree(&worker[i].futex);
worker            227 tools/perf/bench/futex-lock-pi.c 	free(worker);
worker             40 tools/perf/bench/futex-requeue.c static pthread_t *worker;
worker            138 tools/perf/bench/futex-requeue.c 	worker = calloc(nthreads, sizeof(*worker));
worker            139 tools/perf/bench/futex-requeue.c 	if (!worker)
worker            164 tools/perf/bench/futex-requeue.c 		block_threads(worker, thread_attr, cpu);
worker            203 tools/perf/bench/futex-requeue.c 			ret = pthread_join(worker[i], NULL);
worker            217 tools/perf/bench/futex-requeue.c 	free(worker);
worker             40 tools/perf/bench/futex-wake-parallel.c 	pthread_t worker;
worker            109 tools/perf/bench/futex-wake-parallel.c 		if (pthread_create(&td[i].worker, &thread_attr,
worker            117 tools/perf/bench/futex-wake-parallel.c 		if (pthread_join(td[i].worker, NULL))
worker             41 tools/perf/bench/futex-wake.c pthread_t *worker;
worker            146 tools/perf/bench/futex-wake.c 	worker = calloc(nthreads, sizeof(*worker));
worker            147 tools/perf/bench/futex-wake.c 	if (!worker)
worker            169 tools/perf/bench/futex-wake.c 		block_threads(worker, thread_attr, cpu);
worker            196 tools/perf/bench/futex-wake.c 			ret = pthread_join(worker[i], NULL);
worker            211 tools/perf/bench/futex-wake.c 	free(worker);
worker            104 tools/testing/selftests/powerpc/tm/tm-vmx-unavail.c 		pthread_create(&thread[i], NULL, &worker, NULL);