work 493 arch/alpha/include/asm/core_t2.h unsigned long r0, r1, work; work 497 arch/alpha/include/asm/core_t2.h work = (addr << 5) + T2_SPARSE_MEM + 0x18; work 498 arch/alpha/include/asm/core_t2.h r0 = *(vuip)(work); work 499 arch/alpha/include/asm/core_t2.h r1 = *(vuip)(work + (4 << 5)); work 541 arch/alpha/include/asm/core_t2.h unsigned long work; work 545 arch/alpha/include/asm/core_t2.h work = (addr << 5) + T2_SPARSE_MEM + 0x18; work 546 arch/alpha/include/asm/core_t2.h *(vuip)work = b; work 547 arch/alpha/include/asm/core_t2.h *(vuip)(work + (4 << 5)) = b >> 32; work 142 arch/arc/include/asm/entry-arcv2.h ; Saving pt_regs->sp correctly requires some extra work due to the way work 168 arch/arm/mach-s3c24xx/mach-gta02.c static void gta02_charger_worker(struct work_struct *work) work 787 arch/mips/cavium-octeon/executive/cvmx-helper.c cvmx_wqe_t *work; work 923 arch/mips/cavium-octeon/executive/cvmx-helper.c work = cvmx_pow_work_request_sync(CVMX_POW_WAIT); work 925 arch/mips/cavium-octeon/executive/cvmx-helper.c } while ((work == NULL) && (retry_cnt > 0)); work 932 arch/mips/cavium-octeon/executive/cvmx-helper.c if (work) work 933 arch/mips/cavium-octeon/executive/cvmx-helper.c cvmx_helper_free_packet_data(work); work 63 arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h 4: # core-16057 work around work 66 arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h 5: # No core-16057 work around work 53 arch/mips/include/asm/mach-loongson64/loongson_hwmon.h struct delayed_work work; work 126 arch/mips/include/asm/octeon/cvmx-helper-util.h static inline void cvmx_helper_free_packet_data(cvmx_wqe_t *work) work 133 arch/mips/include/asm/octeon/cvmx-helper-util.h number_buffers = work->word2.s.bufs; work 136 arch/mips/include/asm/octeon/cvmx-helper-util.h buffer_ptr = work->packet_ptr; work 147 arch/mips/include/asm/octeon/cvmx-helper-util.h if (cvmx_ptr_to_phys(work) == start_of_buffer) { work 598 arch/mips/include/asm/octeon/cvmx-wqe.h static inline int cvmx_wqe_get_port(cvmx_wqe_t *work) work 603 arch/mips/include/asm/octeon/cvmx-wqe.h port = work->word2.s_cn68xx.port; work 605 arch/mips/include/asm/octeon/cvmx-wqe.h port = work->word1.cn38xx.ipprt; work 610 arch/mips/include/asm/octeon/cvmx-wqe.h static inline void cvmx_wqe_set_port(cvmx_wqe_t *work, int port) work 613 arch/mips/include/asm/octeon/cvmx-wqe.h work->word2.s_cn68xx.port = port; work 615 arch/mips/include/asm/octeon/cvmx-wqe.h work->word1.cn38xx.ipprt = port; work 618 arch/mips/include/asm/octeon/cvmx-wqe.h static inline int cvmx_wqe_get_grp(cvmx_wqe_t *work) work 623 arch/mips/include/asm/octeon/cvmx-wqe.h grp = work->word1.cn68xx.grp; work 625 arch/mips/include/asm/octeon/cvmx-wqe.h grp = work->word1.cn38xx.grp; work 630 arch/mips/include/asm/octeon/cvmx-wqe.h static inline void cvmx_wqe_set_grp(cvmx_wqe_t *work, int grp) work 633 arch/mips/include/asm/octeon/cvmx-wqe.h work->word1.cn68xx.grp = grp; work 635 arch/mips/include/asm/octeon/cvmx-wqe.h work->word1.cn38xx.grp = grp; work 638 arch/mips/include/asm/octeon/cvmx-wqe.h static inline int cvmx_wqe_get_qos(cvmx_wqe_t *work) work 643 arch/mips/include/asm/octeon/cvmx-wqe.h qos = work->word1.cn68xx.qos; work 645 arch/mips/include/asm/octeon/cvmx-wqe.h qos = work->word1.cn38xx.qos; work 650 arch/mips/include/asm/octeon/cvmx-wqe.h static inline void cvmx_wqe_set_qos(cvmx_wqe_t *work, int qos) work 653 arch/mips/include/asm/octeon/cvmx-wqe.h work->word1.cn68xx.qos = qos; work 655 arch/mips/include/asm/octeon/cvmx-wqe.h work->word1.cn38xx.qos = qos; work 84 arch/mips/loongson64/lemote-2f/pm.c static void yeeloong_lid_update_task(struct work_struct *work) work 35 arch/powerpc/kernel/mce.c static void machine_check_process_queued_event(struct irq_work *work); work 36 arch/powerpc/kernel/mce.c static void machine_check_ue_irq_work(struct irq_work *work); work 38 arch/powerpc/kernel/mce.c static void machine_process_ue_event(struct work_struct *work); work 208 arch/powerpc/kernel/mce.c static void machine_check_ue_irq_work(struct irq_work *work) work 258 arch/powerpc/kernel/mce.c static void machine_process_ue_event(struct work_struct *work) work 300 arch/powerpc/kernel/mce.c static void machine_check_process_queued_event(struct irq_work *work) work 52 arch/powerpc/kvm/book3s_64_mmu_hv.c struct work_struct work; work 1455 arch/powerpc/kvm/book3s_64_mmu_hv.c static void resize_hpt_prepare_work(struct work_struct *work) work 1457 arch/powerpc/kvm/book3s_64_mmu_hv.c struct kvm_resize_hpt *resize = container_of(work, work 1459 arch/powerpc/kvm/book3s_64_mmu_hv.c work); work 1549 arch/powerpc/kvm/book3s_64_mmu_hv.c INIT_WORK(&resize->work, resize_hpt_prepare_work); work 1552 arch/powerpc/kvm/book3s_64_mmu_hv.c schedule_work(&resize->work); work 1446 arch/powerpc/mm/numa.c static void topology_work_fn(struct work_struct *work) work 123 arch/powerpc/oprofile/cell/spu_task_sync.c static void wq_sync_spu_buff(struct work_struct *work) work 30 arch/powerpc/platforms/85xx/sgy_cts1000.c static void gpio_halt_wfn(struct work_struct *work) work 26 arch/powerpc/platforms/cell/cpufreq_spudemand.c struct delayed_work work; work 46 arch/powerpc/platforms/cell/cpufreq_spudemand.c static void spu_gov_work(struct work_struct *work) work 52 arch/powerpc/platforms/cell/cpufreq_spudemand.c info = container_of(work, struct spu_gov_info_struct, work.work); work 61 arch/powerpc/platforms/cell/cpufreq_spudemand.c schedule_delayed_work_on(info->policy->cpu, &info->work, delay); work 67 arch/powerpc/platforms/cell/cpufreq_spudemand.c INIT_DEFERRABLE_WORK(&info->work, spu_gov_work); work 68 arch/powerpc/platforms/cell/cpufreq_spudemand.c schedule_delayed_work_on(info->policy->cpu, &info->work, delay); work 73 arch/powerpc/platforms/cell/cpufreq_spudemand.c cancel_delayed_work_sync(&info->work); work 23 arch/powerpc/platforms/powermac/backlight.c static void pmac_backlight_key_worker(struct work_struct *work); work 24 arch/powerpc/platforms/powermac/backlight.c static void pmac_backlight_set_legacy_worker(struct work_struct *work); work 101 arch/powerpc/platforms/powermac/backlight.c static void pmac_backlight_key_worker(struct work_struct *work) work 168 arch/powerpc/platforms/powermac/backlight.c static void pmac_backlight_set_legacy_worker(struct work_struct *work) work 265 arch/powerpc/platforms/powernv/opal-hmi.c static void hmi_event_handler(struct work_struct *work) work 76 arch/powerpc/platforms/powernv/opal-memory-errors.c static void mem_error_handler(struct work_struct *work) work 673 arch/powerpc/platforms/ps3/os-area.c static void os_area_queue_work_handler(struct work_struct *work) work 30 arch/powerpc/platforms/pseries/dlpar.c struct work_struct work; work 374 arch/powerpc/platforms/pseries/dlpar.c static void pseries_hp_work_fn(struct work_struct *work) work 377 arch/powerpc/platforms/pseries/dlpar.c container_of(work, struct pseries_hp_work, work); work 382 arch/powerpc/platforms/pseries/dlpar.c kfree((void *)work); work 387 arch/powerpc/platforms/pseries/dlpar.c struct pseries_hp_work *work; work 394 arch/powerpc/platforms/pseries/dlpar.c work = kmalloc(sizeof(struct pseries_hp_work), GFP_ATOMIC); work 395 arch/powerpc/platforms/pseries/dlpar.c if (work) { work 396 arch/powerpc/platforms/pseries/dlpar.c INIT_WORK((struct work_struct *)work, pseries_hp_work_fn); work 397 arch/powerpc/platforms/pseries/dlpar.c work->errlog = hp_errlog_copy; work 398 arch/powerpc/platforms/pseries/dlpar.c queue_work(pseries_hp_wq, (struct work_struct *)work); work 140 arch/powerpc/platforms/pseries/lpar.c struct delayed_work work; work 356 arch/powerpc/platforms/pseries/lpar.c static void process_dtl_buffer(struct work_struct *work) work 363 arch/powerpc/platforms/pseries/lpar.c struct dtl_worker *d = container_of(work, struct dtl_worker, work.work); work 400 arch/powerpc/platforms/pseries/lpar.c schedule_delayed_work_on(d->cpu, to_delayed_work(work), work 409 arch/powerpc/platforms/pseries/lpar.c INIT_DELAYED_WORK(&d->work, process_dtl_buffer); work 419 arch/powerpc/platforms/pseries/lpar.c schedule_delayed_work_on(cpu, &d->work, HZ / vcpudispatch_stats_freq); work 427 arch/powerpc/platforms/pseries/lpar.c cancel_delayed_work_sync(&d->work); work 26 arch/powerpc/platforms/pseries/ras.c static void mce_process_errlog_event(struct irq_work *work); work 706 arch/powerpc/platforms/pseries/ras.c static void mce_process_errlog_event(struct irq_work *work) work 383 arch/powerpc/platforms/pseries/vio.c static void vio_cmo_balance(struct work_struct *work) work 392 arch/powerpc/platforms/pseries/vio.c cmo = container_of(work, struct vio_cmo, balance_q.work); work 475 arch/powerpc/platforms/pseries/vio.c cancel_delayed_work(to_delayed_work(work)); work 479 arch/powerpc/sysdev/fsl_rmu.c static void fsl_pw_dpc(struct work_struct *work) work 481 arch/powerpc/sysdev/fsl_rmu.c struct fsl_rio_pw *pw = container_of(work, struct fsl_rio_pw, pw_work); work 37 arch/powerpc/sysdev/pmi.c struct work_struct work; work 83 arch/powerpc/sysdev/pmi.c schedule_work(&data->work); work 99 arch/powerpc/sysdev/pmi.c static void pmi_notify_handlers(struct work_struct *work) work 143 arch/powerpc/sysdev/pmi.c INIT_WORK(&data->work, pmi_notify_handlers); work 98 arch/s390/appldata/appldata_base.c static void appldata_work_fn(struct work_struct *work); work 125 arch/s390/appldata/appldata_base.c static void appldata_work_fn(struct work_struct *work) work 896 arch/s390/include/asm/kvm_host.h struct kvm_async_pf *work); work 899 arch/s390/include/asm/kvm_host.h struct kvm_async_pf *work); work 902 arch/s390/include/asm/kvm_host.h struct kvm_async_pf *work); work 502 arch/s390/kernel/time.c static void stp_work_fn(struct work_struct *work); work 643 arch/s390/kernel/time.c static void stp_work_fn(struct work_struct *work) work 50 arch/s390/kernel/topology.c static void topology_work_fn(struct work_struct *work); work 320 arch/s390/kernel/topology.c static void topology_work_fn(struct work_struct *work) work 3637 arch/s390/kvm/kvm-s390.c struct kvm_async_pf *work) work 3639 arch/s390/kvm/kvm-s390.c trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token); work 3640 arch/s390/kvm/kvm-s390.c __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token); work 3644 arch/s390/kvm/kvm-s390.c struct kvm_async_pf *work) work 3646 arch/s390/kvm/kvm-s390.c trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token); work 3647 arch/s390/kvm/kvm-s390.c __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token); work 3651 arch/s390/kvm/kvm-s390.c struct kvm_async_pf *work) work 30 arch/sh/drivers/push-switch.c schedule_work(&psw->work); work 33 arch/sh/drivers/push-switch.c static void switch_work_handler(struct work_struct *work) work 35 arch/sh/drivers/push-switch.c struct push_switch *psw = container_of(work, struct push_switch, work); work 77 arch/sh/drivers/push-switch.c INIT_WORK(&psw->work, switch_work_handler); work 104 arch/sh/drivers/push-switch.c flush_work(&psw->work); work 16 arch/sh/include/asm/push-switch.h struct work_struct work; work 277 arch/sparc/kernel/leon_smp.c struct leon_ipi_work *work; work 300 arch/sparc/kernel/leon_smp.c work = &per_cpu(leon_ipi_work, cpu); work 301 arch/sparc/kernel/leon_smp.c work->single = work->msk = work->resched = 0; work 314 arch/sparc/kernel/leon_smp.c struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu); work 317 arch/sparc/kernel/leon_smp.c work->single = 1; work 325 arch/sparc/kernel/leon_smp.c struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu); work 328 arch/sparc/kernel/leon_smp.c work->msk = 1; work 336 arch/sparc/kernel/leon_smp.c struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu); work 339 arch/sparc/kernel/leon_smp.c work->resched = 1; work 347 arch/sparc/kernel/leon_smp.c struct leon_ipi_work *work = this_cpu_ptr(&leon_ipi_work); work 349 arch/sparc/kernel/leon_smp.c if (work->single) { work 350 arch/sparc/kernel/leon_smp.c work->single = 0; work 353 arch/sparc/kernel/leon_smp.c if (work->msk) { work 354 arch/sparc/kernel/leon_smp.c work->msk = 0; work 357 arch/sparc/kernel/leon_smp.c if (work->resched) { work 358 arch/sparc/kernel/leon_smp.c work->resched = 0; work 196 arch/sparc/kernel/sun4d_smp.c struct sun4d_ipi_work *work; work 201 arch/sparc/kernel/sun4d_smp.c work = &per_cpu(sun4d_ipi_work, cpu); work 202 arch/sparc/kernel/sun4d_smp.c work->single = work->msk = work->resched = 0; work 208 arch/sparc/kernel/sun4d_smp.c struct sun4d_ipi_work *work = this_cpu_ptr(&sun4d_ipi_work); work 210 arch/sparc/kernel/sun4d_smp.c if (work->single) { work 211 arch/sparc/kernel/sun4d_smp.c work->single = 0; work 214 arch/sparc/kernel/sun4d_smp.c if (work->msk) { work 215 arch/sparc/kernel/sun4d_smp.c work->msk = 0; work 218 arch/sparc/kernel/sun4d_smp.c if (work->resched) { work 219 arch/sparc/kernel/sun4d_smp.c work->resched = 0; work 239 arch/sparc/kernel/sun4d_smp.c struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu); work 242 arch/sparc/kernel/sun4d_smp.c work->single = 1; work 250 arch/sparc/kernel/sun4d_smp.c struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu); work 253 arch/sparc/kernel/sun4d_smp.c work->msk = 1; work 261 arch/sparc/kernel/sun4d_smp.c struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu); work 264 arch/sparc/kernel/sun4d_smp.c work->resched = 1; work 131 arch/um/drivers/chan_kern.c static void line_timer_cb(struct work_struct *work) work 133 arch/um/drivers/chan_kern.c struct line *line = container_of(work, struct line, task.work); work 595 arch/um/drivers/line.c struct work_struct work; work 598 arch/um/drivers/line.c static void __free_winch(struct work_struct *work) work 600 arch/um/drivers/line.c struct winch *winch = container_of(work, struct winch, work); work 617 arch/um/drivers/line.c __free_winch(&winch->work); work 641 arch/um/drivers/line.c INIT_WORK(&winch->work, __free_winch); work 642 arch/um/drivers/line.c schedule_work(&winch->work); work 108 arch/um/drivers/net_kern.c static void uml_dev_close(struct work_struct *work) work 111 arch/um/drivers/net_kern.c container_of(work, struct uml_net_private, work); work 137 arch/um/drivers/net_kern.c schedule_work(&lp->work); work 421 arch/um/drivers/net_kern.c INIT_WORK(&lp->work, uml_dev_close); work 1176 arch/um/drivers/vector_kern.c static void vector_reset_tx(struct work_struct *work) work 1179 arch/um/drivers/vector_kern.c container_of(work, struct vector_private, reset_tx); work 29 arch/um/include/shared/net_kern.h struct work_struct work; work 75 arch/x86/entry/common.c u32 work; work 80 arch/x86/entry/common.c work = READ_ONCE(ti->flags); work 82 arch/x86/entry/common.c if (work & (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU)) { work 84 arch/x86/entry/common.c if (ret || (work & _TIF_SYSCALL_EMU)) work 92 arch/x86/entry/common.c if (work & _TIF_SECCOMP) { work 1584 arch/x86/include/asm/kvm_host.h struct kvm_async_pf *work); work 1586 arch/x86/include/asm/kvm_host.h struct kvm_async_pf *work); work 1588 arch/x86/include/asm/kvm_host.h struct kvm_async_pf *work); work 78 arch/x86/kernel/cpu/mce/dev-mcelog.c static void mce_do_trigger(struct work_struct *work) work 594 arch/x86/kernel/cpu/resctrl/internal.h void mbm_handle_overflow(struct work_struct *work); work 599 arch/x86/kernel/cpu/resctrl/internal.h void cqm_handle_limbo(struct work_struct *work); work 470 arch/x86/kernel/cpu/resctrl/monitor.c void cqm_handle_limbo(struct work_struct *work) work 507 arch/x86/kernel/cpu/resctrl/monitor.c void mbm_handle_overflow(struct work_struct *work) work 511 arch/x86/kernel/cpu/resctrl/rdtgroup.c struct callback_head work; work 520 arch/x86/kernel/cpu/resctrl/rdtgroup.c callback = container_of(head, struct task_move_callback, work); work 552 arch/x86/kernel/cpu/resctrl/rdtgroup.c callback->work.func = move_myself; work 560 arch/x86/kernel/cpu/resctrl/rdtgroup.c ret = task_work_add(tsk, &callback->work, true); work 1274 arch/x86/kernel/tsc.c static void tsc_refine_calibration_work(struct work_struct *work); work 1290 arch/x86/kernel/tsc.c static void tsc_refine_calibration_work(struct work_struct *work) work 239 arch/x86/kvm/i8254.c static void pit_do_work(struct kthread_work *work) work 241 arch/x86/kvm/i8254.c struct kvm_pit *pit = container_of(work, struct kvm_pit, expired); work 402 arch/x86/kvm/ioapic.c static void kvm_ioapic_eoi_inject_work(struct work_struct *work) work 405 arch/x86/kvm/ioapic.c struct kvm_ioapic *ioapic = container_of(work, struct kvm_ioapic, work 406 arch/x86/kvm/ioapic.c eoi_inject.work); work 2475 arch/x86/kvm/x86.c static void kvmclock_update_fn(struct work_struct *work) work 2478 arch/x86/kvm/x86.c struct delayed_work *dwork = to_delayed_work(work); work 2501 arch/x86/kvm/x86.c static void kvmclock_sync_fn(struct work_struct *work) work 2503 arch/x86/kvm/x86.c struct delayed_work *dwork = to_delayed_work(work); work 7198 arch/x86/kvm/x86.c static void pvclock_gtod_update_fn(struct work_struct *work) work 10063 arch/x86/kvm/x86.c void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) work 10067 arch/x86/kvm/x86.c if ((vcpu->arch.mmu->direct_map != work->arch.direct_map) || work 10068 arch/x86/kvm/x86.c work->wakeup_all) work 10076 arch/x86/kvm/x86.c work->arch.cr3 != vcpu->arch.mmu->get_cr3(vcpu)) work 10079 arch/x86/kvm/x86.c vcpu->arch.mmu->page_fault(vcpu, work->cr2_or_gpa, 0, true); work 10188 arch/x86/kvm/x86.c struct kvm_async_pf *work) work 10192 arch/x86/kvm/x86.c trace_kvm_async_pf_not_present(work->arch.token, work->cr2_or_gpa); work 10193 arch/x86/kvm/x86.c kvm_add_async_pf_gfn(vcpu, work->arch.gfn); work 10201 arch/x86/kvm/x86.c fault.address = work->arch.token; work 10218 arch/x86/kvm/x86.c struct kvm_async_pf *work) work 10223 arch/x86/kvm/x86.c if (work->wakeup_all) work 10224 arch/x86/kvm/x86.c work->arch.token = ~0; /* broadcast wakeup */ work 10226 arch/x86/kvm/x86.c kvm_del_async_pf_gfn(vcpu, work->arch.gfn); work 10227 arch/x86/kvm/x86.c trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa); work 10247 arch/x86/kvm/x86.c fault.address = work->arch.token; work 207 arch/x86/platform/olpc/olpc-xo1-sci.c static void process_sci_queue_work(struct work_struct *work) work 125 arch/x86/platform/olpc/olpc-xo15-sci.c static void process_sci_queue_work(struct work_struct *work) work 319 block/bio-integrity.c static void bio_integrity_verify_fn(struct work_struct *work) work 322 block/bio-integrity.c container_of(work, struct bio_integrity_payload, bip_work); work 340 block/bio.c static void bio_alloc_rescue(struct work_struct *work) work 342 block/bio.c struct bio_set *bs = container_of(work, struct bio_set, rescue_work); work 1707 block/bio.c static void bio_dirty_fn(struct work_struct *work); work 1716 block/bio.c static void bio_dirty_fn(struct work_struct *work) work 120 block/blk-cgroup.c static void blkg_async_bio_workfn(struct work_struct *work) work 122 block/blk-cgroup.c struct blkcg_gq *blkg = container_of(work, struct blkcg_gq, work 467 block/blk-core.c static void blk_timeout_work(struct work_struct *work) work 1654 block/blk-core.c int kblockd_schedule_work(struct work_struct *work) work 1656 block/blk-core.c return queue_work(kblockd_workqueue, work); work 1660 block/blk-core.c int kblockd_schedule_work_on(int cpu, struct work_struct *work) work 1662 block/blk-core.c return queue_work_on(cpu, kblockd_workqueue, work); work 95 block/blk-ioc.c static void ioc_release_fn(struct work_struct *work) work 97 block/blk-ioc.c struct io_context *ioc = container_of(work, struct io_context, work 741 block/blk-mq.c static void blk_mq_requeue_work(struct work_struct *work) work 744 block/blk-mq.c container_of(work, struct request_queue, requeue_work.work); work 930 block/blk-mq.c static void blk_mq_timeout_work(struct work_struct *work) work 933 block/blk-mq.c container_of(work, struct request_queue, timeout_work); work 1626 block/blk-mq.c static void blk_mq_run_work_fn(struct work_struct *work) work 1630 block/blk-mq.c hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work); work 884 block/blk-sysfs.c static void __blk_release_queue(struct work_struct *work) work 886 block/blk-sysfs.c struct request_queue *q = container_of(work, typeof(*q), release_work); work 1303 block/blk-throttle.c static void blk_throtl_dispatch_work_fn(struct work_struct *work) work 1305 block/blk-throttle.c struct throtl_data *td = container_of(work, struct throtl_data, work 1836 block/genhd.c static void disk_events_workfn(struct work_struct *work) work 1838 block/genhd.c struct delayed_work *dwork = to_delayed_work(work); work 248 block/partition-generic.c static void delete_partition_work_fn(struct work_struct *work) work 250 block/partition-generic.c struct hd_struct *part = container_of(to_rcu_work(work), struct hd_struct, work 38 crypto/cryptd.c struct work_struct work; work 93 crypto/cryptd.c static void cryptd_queue_worker(struct work_struct *work); work 107 crypto/cryptd.c INIT_WORK(&cpu_queue->work, cryptd_queue_worker); work 141 crypto/cryptd.c queue_work_on(cpu, cryptd_wq, &cpu_queue->work); work 157 crypto/cryptd.c static void cryptd_queue_worker(struct work_struct *work) work 162 crypto/cryptd.c cpu_queue = container_of(work, struct cryptd_cpu_queue, work); work 184 crypto/cryptd.c queue_work(cryptd_wq, &cpu_queue->work); work 168 crypto/crypto_engine.c static void crypto_pump_work(struct kthread_work *work) work 171 crypto/crypto_engine.c container_of(work, struct crypto_engine, pump_requests); work 1068 crypto/drbg.c static void drbg_async_seed(struct work_struct *work) work 1072 crypto/drbg.c struct drbg_state *drbg = container_of(work, struct drbg_state, work 226 drivers/acpi/acpi_video.c static void acpi_video_switch_brightness(struct work_struct *work); work 1419 drivers/acpi/acpi_video.c acpi_video_switch_brightness(struct work_struct *work) work 1421 drivers/acpi/acpi_video.c struct acpi_video_device *device = container_of(to_delayed_work(work), work 168 drivers/acpi/ec.c struct work_struct work; work 174 drivers/acpi/ec.c static void acpi_ec_event_handler(struct work_struct *work); work 175 drivers/acpi/ec.c static void acpi_ec_event_processor(struct work_struct *work); work 465 drivers/acpi/ec.c queue_work(ec_wq, &ec->work); work 1138 drivers/acpi/ec.c INIT_WORK(&q->work, acpi_ec_event_processor); work 1155 drivers/acpi/ec.c static void acpi_ec_event_processor(struct work_struct *work) work 1157 drivers/acpi/ec.c struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work); work 1206 drivers/acpi/ec.c if (!queue_work(ec_query_wq, &q->work)) { work 1237 drivers/acpi/ec.c static void acpi_ec_event_handler(struct work_struct *work) work 1240 drivers/acpi/ec.c struct acpi_ec *ec = container_of(work, struct acpi_ec, work); work 1349 drivers/acpi/ec.c INIT_WORK(&ec->work, acpi_ec_event_handler); work 84 drivers/acpi/internal.h bool acpi_queue_hotplug_work(struct work_struct *work); work 179 drivers/acpi/internal.h struct work_struct work; work 3233 drivers/acpi/nfit/core.c static void acpi_nfit_scrub(struct work_struct *work) work 3239 drivers/acpi/nfit/core.c acpi_desc = container_of(work, typeof(*acpi_desc), dwork.work); work 3487 drivers/acpi/nfit/core.c if (work_busy(&acpi_desc->dwork.work)) work 46 drivers/acpi/osl.c struct work_struct work; work 839 drivers/acpi/osl.c static void acpi_os_execute_deferred(struct work_struct *work) work 841 drivers/acpi/osl.c struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work); work 1099 drivers/acpi/osl.c INIT_WORK(&dpc->work, acpi_os_execute_deferred); work 1102 drivers/acpi/osl.c INIT_WORK(&dpc->work, acpi_os_execute_deferred); work 1118 drivers/acpi/osl.c ret = queue_work_on(0, queue, &dpc->work); work 1146 drivers/acpi/osl.c struct work_struct work; work 1151 drivers/acpi/osl.c static void acpi_hotplug_work_fn(struct work_struct *work) work 1153 drivers/acpi/osl.c struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work); work 1172 drivers/acpi/osl.c INIT_WORK(&hpw->work, acpi_hotplug_work_fn); work 1181 drivers/acpi/osl.c if (!queue_work(kacpi_hotplug_wq, &hpw->work)) { work 1188 drivers/acpi/osl.c bool acpi_queue_hotplug_work(struct work_struct *work) work 1190 drivers/acpi/osl.c return queue_work(kacpi_hotplug_wq, work); work 556 drivers/acpi/scan.c static DECLARE_WORK(work, acpi_device_del_work_fn); work 572 drivers/acpi/scan.c acpi_queue_hotplug_work(&work); work 2295 drivers/acpi/scan.c struct work_struct work; work 2300 drivers/acpi/scan.c static void acpi_table_events_fn(struct work_struct *work) work 2304 drivers/acpi/scan.c tew = container_of(work, struct acpi_table_events_work, work); work 2329 drivers/acpi/scan.c INIT_WORK(&tew->work, acpi_table_events_fn); work 2333 drivers/acpi/scan.c schedule_work(&tew->work); work 1051 drivers/acpi/thermal.c static void acpi_thermal_check_fn(struct work_struct *work) work 1053 drivers/acpi/thermal.c struct acpi_thermal *tz = container_of(work, struct acpi_thermal, work 364 drivers/acpi/video_detect.c static void acpi_video_backlight_notify_work(struct work_struct *work) work 242 drivers/android/binder.c struct binder_work work; work 309 drivers/android/binder.c struct binder_work work; work 350 drivers/android/binder.c struct binder_work work; work 574 drivers/android/binder.c struct binder_work work; work 796 drivers/android/binder.c binder_enqueue_work_ilocked(struct binder_work *work, work 800 drivers/android/binder.c BUG_ON(work->entry.next && !list_empty(&work->entry)); work 801 drivers/android/binder.c list_add_tail(&work->entry, target_list); work 817 drivers/android/binder.c struct binder_work *work) work 820 drivers/android/binder.c binder_enqueue_work_ilocked(work, &thread->todo); work 835 drivers/android/binder.c struct binder_work *work) work 838 drivers/android/binder.c binder_enqueue_work_ilocked(work, &thread->todo); work 852 drivers/android/binder.c struct binder_work *work) work 855 drivers/android/binder.c binder_enqueue_thread_work_ilocked(thread, work); work 860 drivers/android/binder.c binder_dequeue_work_ilocked(struct binder_work *work) work 862 drivers/android/binder.c list_del_init(&work->entry); work 874 drivers/android/binder.c binder_dequeue_work(struct binder_proc *proc, struct binder_work *work) work 877 drivers/android/binder.c binder_dequeue_work_ilocked(work); work 1145 drivers/android/binder.c node->work.type = BINDER_WORK_NODE; work 1150 drivers/android/binder.c INIT_LIST_HEAD(&node->work.entry); work 1212 drivers/android/binder.c binder_dequeue_work_ilocked(&node->work); work 1215 drivers/android/binder.c &node->work); work 1220 drivers/android/binder.c if (!node->has_weak_ref && list_empty(&node->work.entry)) { work 1229 drivers/android/binder.c binder_enqueue_work_ilocked(&node->work, target_list); work 1271 drivers/android/binder.c if (list_empty(&node->work.entry)) { work 1272 drivers/android/binder.c binder_enqueue_work_ilocked(&node->work, &proc->todo); work 1279 drivers/android/binder.c binder_dequeue_work_ilocked(&node->work); work 1285 drivers/android/binder.c BUG_ON(!list_empty(&node->work.entry)); work 1537 drivers/android/binder.c binder_dequeue_work(ref->proc, &ref->death->work); work 1960 drivers/android/binder.c &target_thread->reply_error.work); work 2784 drivers/android/binder.c binder_enqueue_thread_work_ilocked(thread, &t->work); work 2786 drivers/android/binder.c binder_enqueue_work_ilocked(&t->work, &proc->todo); work 2788 drivers/android/binder.c binder_enqueue_work_ilocked(&t->work, &node->async_todo); work 3421 drivers/android/binder.c t->work.type = BINDER_WORK_TRANSACTION; work 3432 drivers/android/binder.c binder_enqueue_thread_work_ilocked(target_thread, &t->work); work 3546 drivers/android/binder.c binder_enqueue_thread_work(thread, &thread->return_error.work); work 3550 drivers/android/binder.c binder_enqueue_thread_work(thread, &thread->return_error.work); work 3877 drivers/android/binder.c &thread->return_error.work); work 3920 drivers/android/binder.c INIT_LIST_HEAD(&death->work.entry); work 3924 drivers/android/binder.c ref->death->work.type = BINDER_WORK_DEAD_BINDER; work 3928 drivers/android/binder.c &ref->death->work, &proc->todo); work 3952 drivers/android/binder.c if (list_empty(&death->work.entry)) { work 3953 drivers/android/binder.c death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; work 3959 drivers/android/binder.c &death->work); work 3962 drivers/android/binder.c &death->work, work 3968 drivers/android/binder.c BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); work 3969 drivers/android/binder.c death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; work 3991 drivers/android/binder.c work); work 4008 drivers/android/binder.c binder_dequeue_work_ilocked(&death->work); work 4009 drivers/android/binder.c if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { work 4010 drivers/android/binder.c death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; work 4015 drivers/android/binder.c thread, &death->work); work 4018 drivers/android/binder.c &death->work, work 4262 drivers/android/binder.c t = container_of(w, struct binder_transaction, work); work 4266 drivers/android/binder.c w, struct binder_error, work); work 4293 drivers/android/binder.c struct binder_node *node = container_of(w, struct binder_node, work); work 4386 drivers/android/binder.c death = container_of(w, struct binder_ref_death, work); work 4591 drivers/android/binder.c t = container_of(w, struct binder_transaction, work); work 4598 drivers/android/binder.c w, struct binder_error, work); work 4614 drivers/android/binder.c death = container_of(w, struct binder_ref_death, work); work 4660 drivers/android/binder.c thread->return_error.work.type = BINDER_WORK_RETURN_ERROR; work 4662 drivers/android/binder.c thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR; work 5353 drivers/android/binder.c binder_dequeue_work_ilocked(&node->work); work 5391 drivers/android/binder.c BUG_ON(!list_empty(&ref->death->work.entry)); work 5392 drivers/android/binder.c ref->death->work.type = BINDER_WORK_DEAD_BINDER; work 5393 drivers/android/binder.c binder_enqueue_work_ilocked(&ref->death->work, work 5499 drivers/android/binder.c static void binder_deferred_func(struct work_struct *work) work 5592 drivers/android/binder.c t = container_of(w, struct binder_transaction, work); work 5598 drivers/android/binder.c w, struct binder_error, work); work 5607 drivers/android/binder.c node = container_of(w, struct binder_node, work); work 4780 drivers/ata/libata-scsi.c void ata_scsi_hotplug(struct work_struct *work) work 4783 drivers/ata/libata-scsi.c container_of(work, struct ata_port, hotplug_task.work); work 4890 drivers/ata/libata-scsi.c void ata_scsi_dev_rescan(struct work_struct *work) work 4893 drivers/ata/libata-scsi.c container_of(work, struct ata_port, scsi_rescan_task); work 1223 drivers/ata/libata-sff.c void ata_sff_queue_work(struct work_struct *work) work 1225 drivers/ata/libata-sff.c queue_work(ata_sff_wq, work); work 1272 drivers/ata/libata-sff.c static void ata_sff_pio_task(struct work_struct *work) work 1275 drivers/ata/libata-sff.c container_of(work, struct ata_port, sff_pio_task.work); work 125 drivers/ata/libata.h extern void ata_scsi_hotplug(struct work_struct *work); work 127 drivers/ata/libata.h extern void ata_scsi_dev_rescan(struct work_struct *work); work 213 drivers/ata/pata_arasan_cf.c struct work_struct work; work 518 drivers/ata/pata_arasan_cf.c static void data_xfer(struct work_struct *work) work 520 drivers/ata/pata_arasan_cf.c struct arasan_cf_dev *acdev = container_of(work, struct arasan_cf_dev, work 521 drivers/ata/pata_arasan_cf.c work); work 573 drivers/ata/pata_arasan_cf.c static void delayed_finish(struct work_struct *work) work 575 drivers/ata/pata_arasan_cf.c struct arasan_cf_dev *acdev = container_of(work, struct arasan_cf_dev, work 576 drivers/ata/pata_arasan_cf.c dwork.work); work 666 drivers/ata/pata_arasan_cf.c cancel_work_sync(&acdev->work); work 683 drivers/ata/pata_arasan_cf.c ata_sff_queue_work(&acdev->work); work 858 drivers/ata/pata_arasan_cf.c INIT_WORK(&acdev->work, data_xfer); work 134 drivers/atm/idt77252.c static void idt77252_softint(struct work_struct *work); work 2804 drivers/atm/idt77252.c idt77252_softint(struct work_struct *work) work 2807 drivers/atm/idt77252.c container_of(work, struct idt77252_dev, tqueue); work 259 drivers/auxdisplay/arm-charlcd.c static void charlcd_init_work(struct work_struct *work) work 262 drivers/auxdisplay/arm-charlcd.c container_of(work, struct charlcd, init_work.work); work 257 drivers/auxdisplay/cfag12864b.c static void cfag12864b_update(struct work_struct *work) work 119 drivers/auxdisplay/charlcd.c static void charlcd_bl_off(struct work_struct *work) work 121 drivers/auxdisplay/charlcd.c struct delayed_work *dwork = to_delayed_work(work); work 68 drivers/auxdisplay/ht16k33.c struct delayed_work work; work 120 drivers/auxdisplay/ht16k33.c schedule_delayed_work(&fbdev->work, work 127 drivers/auxdisplay/ht16k33.c static void ht16k33_fb_update(struct work_struct *work) work 130 drivers/auxdisplay/ht16k33.c container_of(work, struct ht16k33_fbdev, work.work); work 448 drivers/auxdisplay/ht16k33.c INIT_DELAYED_WORK(&fbdev->work, ht16k33_fb_update); work 512 drivers/auxdisplay/ht16k33.c cancel_delayed_work_sync(&fbdev->work); work 54 drivers/base/arch_topology.c static void update_topology_flags_workfn(struct work_struct *work); work 89 drivers/base/arch_topology.c static void update_topology_flags_workfn(struct work_struct *work) work 168 drivers/base/arch_topology.c static void parsing_done_workfn(struct work_struct *work); work 239 drivers/base/arch_topology.c static void parsing_done_workfn(struct work_struct *work) work 74 drivers/base/dd.c static void deferred_probe_work_func(struct work_struct *work) work 301 drivers/base/dd.c static void deferred_probe_timeout_work_func(struct work_struct *work) work 68 drivers/base/devcoredump.c devcd = container_of(wk, struct devcd_entry, del_wk.work); work 64 drivers/base/firmware_loader/main.c struct delayed_work work; work 957 drivers/base/firmware_loader/main.c struct work_struct work; work 966 drivers/base/firmware_loader/main.c static void request_firmware_work_func(struct work_struct *work) work 971 drivers/base/firmware_loader/main.c fw_work = container_of(work, struct firmware_work, work); work 1043 drivers/base/firmware_loader/main.c INIT_WORK(&fw_work->work, request_firmware_work_func); work 1044 drivers/base/firmware_loader/main.c schedule_work(&fw_work->work); work 1288 drivers/base/firmware_loader/main.c cancel_delayed_work_sync(&fwc->work); work 1315 drivers/base/firmware_loader/main.c static void device_uncache_fw_images_work(struct work_struct *work) work 1329 drivers/base/firmware_loader/main.c queue_delayed_work(system_power_efficient_wq, &fw_cache.work, work 1384 drivers/base/firmware_loader/main.c INIT_DELAYED_WORK(&fw_cache.work, work 844 drivers/base/node.c static void node_hugetlb_work(struct work_struct *work) work 846 drivers/base/node.c struct node *node = container_of(work, struct node, node_work); work 681 drivers/base/power/domain.c static void genpd_power_off_work_fn(struct work_struct *work) work 685 drivers/base/power/domain.c genpd = container_of(work, struct generic_pm_domain, power_off_work); work 438 drivers/base/power/runtime.c queue_work(pm_wq, &dev->power.work); work 619 drivers/base/power/runtime.c queue_work(pm_wq, &dev->power.work); work 804 drivers/base/power/runtime.c queue_work(pm_wq, &dev->power.work); work 889 drivers/base/power/runtime.c static void pm_runtime_work(struct work_struct *work) work 891 drivers/base/power/runtime.c struct device *dev = container_of(work, struct device, power.work); work 1251 drivers/base/power/runtime.c cancel_work_sync(&dev->power.work); work 1583 drivers/base/power/runtime.c INIT_WORK(&dev->power.work, pm_runtime_work); work 169 drivers/block/aoe/aoe.h struct work_struct work;/* disk create work struct */ work 446 drivers/block/aoe/aoeblk.c schedule_work(&d->work); work 890 drivers/block/aoe/aoecmd.c aoecmd_sleepwork(struct work_struct *work) work 892 drivers/block/aoe/aoecmd.c struct aoedev *d = container_of(work, struct aoedev, work); work 981 drivers/block/aoe/aoecmd.c schedule_work(&d->work); work 473 drivers/block/aoe/aoedev.c INIT_WORK(&d->work, aoecmd_sleepwork); work 3570 drivers/block/drbd/drbd_main.c struct bm_io_work *work = &device->bm_io_work; work 3573 drivers/block/drbd/drbd_main.c if (work->flags != BM_LOCKED_CHANGE_ALLOWED) { work 3577 drivers/block/drbd/drbd_main.c cnt, work->why); work 3581 drivers/block/drbd/drbd_main.c drbd_bm_lock(device, work->why, work->flags); work 3582 drivers/block/drbd/drbd_main.c rv = work->io_fn(device); work 3590 drivers/block/drbd/drbd_main.c if (work->done) work 3591 drivers/block/drbd/drbd_main.c work->done(device, rv); work 3594 drivers/block/drbd/drbd_main.c work->why = NULL; work 3595 drivers/block/drbd/drbd_main.c work->flags = 0; work 979 drivers/block/floppy.c static void floppy_work_workfn(struct work_struct *work) work 996 drivers/block/floppy.c static void fd_timer_workfn(struct work_struct *work) work 1824 drivers/block/floppy.c fd_timer.work.func, work 1828 drivers/block/floppy.c fd_timeout.work.func, work 1952 drivers/block/loop.c kthread_queue_work(&lo->worker, &cmd->work); work 1978 drivers/block/loop.c static void loop_queue_work(struct kthread_work *work) work 1981 drivers/block/loop.c container_of(work, struct loop_cmd, work); work 1991 drivers/block/loop.c kthread_init_work(&cmd->work, loop_queue_work); work 68 drivers/block/loop.h struct kthread_work work; work 767 drivers/block/mtip32xx/mtip32xx.c twork = &dd->work[i]; work 776 drivers/block/mtip32xx/mtip32xx.c twork = &dd->work[i]; work 781 drivers/block/mtip32xx/mtip32xx.c &twork->work); work 784 drivers/block/mtip32xx/mtip32xx.c if (likely(dd->work[0].completed)) work 786 drivers/block/mtip32xx/mtip32xx.c dd->work[0].completed); work 2894 drivers/block/mtip32xx/mtip32xx.c dd->work[i].port = dd->port; work 4091 drivers/block/mtip32xx/mtip32xx.c dd->work[0].cpu_binding = dd->isr_binding; work 4092 drivers/block/mtip32xx/mtip32xx.c dd->work[1].cpu_binding = get_least_used_cpu_on_node(dd->numa_node); work 4093 drivers/block/mtip32xx/mtip32xx.c dd->work[2].cpu_binding = get_least_used_cpu_on_node(dd->numa_node); work 4094 drivers/block/mtip32xx/mtip32xx.c dd->work[3].cpu_binding = dd->work[0].cpu_binding; work 4095 drivers/block/mtip32xx/mtip32xx.c dd->work[4].cpu_binding = dd->work[1].cpu_binding; work 4096 drivers/block/mtip32xx/mtip32xx.c dd->work[5].cpu_binding = dd->work[2].cpu_binding; work 4097 drivers/block/mtip32xx/mtip32xx.c dd->work[6].cpu_binding = dd->work[2].cpu_binding; work 4098 drivers/block/mtip32xx/mtip32xx.c dd->work[7].cpu_binding = dd->work[1].cpu_binding; work 4104 drivers/block/mtip32xx/mtip32xx.c if (dd->work[i].cpu_binding == cpu) { work 4113 drivers/block/mtip32xx/mtip32xx.c INIT_WORK(&dd->work[0].work, mtip_workq_sdbf0); work 4114 drivers/block/mtip32xx/mtip32xx.c INIT_WORK(&dd->work[1].work, mtip_workq_sdbf1); work 4115 drivers/block/mtip32xx/mtip32xx.c INIT_WORK(&dd->work[2].work, mtip_workq_sdbf2); work 4116 drivers/block/mtip32xx/mtip32xx.c INIT_WORK(&dd->work[3].work, mtip_workq_sdbf3); work 4117 drivers/block/mtip32xx/mtip32xx.c INIT_WORK(&dd->work[4].work, mtip_workq_sdbf4); work 4118 drivers/block/mtip32xx/mtip32xx.c INIT_WORK(&dd->work[5].work, mtip_workq_sdbf5); work 4119 drivers/block/mtip32xx/mtip32xx.c INIT_WORK(&dd->work[6].work, mtip_workq_sdbf6); work 4120 drivers/block/mtip32xx/mtip32xx.c INIT_WORK(&dd->work[7].work, mtip_workq_sdbf7); work 4164 drivers/block/mtip32xx/mtip32xx.c drop_cpu(dd->work[0].cpu_binding); work 4165 drivers/block/mtip32xx/mtip32xx.c drop_cpu(dd->work[1].cpu_binding); work 4166 drivers/block/mtip32xx/mtip32xx.c drop_cpu(dd->work[2].cpu_binding); work 4225 drivers/block/mtip32xx/mtip32xx.c drop_cpu(dd->work[0].cpu_binding); work 4226 drivers/block/mtip32xx/mtip32xx.c drop_cpu(dd->work[1].cpu_binding); work 4227 drivers/block/mtip32xx/mtip32xx.c drop_cpu(dd->work[2].cpu_binding); work 173 drivers/block/mtip32xx/mtip32xx.h struct work_struct work; work 180 drivers/block/mtip32xx/mtip32xx.h void mtip_workq_sdbf##group(struct work_struct *work) \ work 182 drivers/block/mtip32xx/mtip32xx.h struct mtip_work *w = (struct mtip_work *) work; \ work 462 drivers/block/mtip32xx/mtip32xx.h struct mtip_work work[MTIP_MAX_SLOT_GROUPS]; work 65 drivers/block/nbd.c struct work_struct work; work 71 drivers/block/nbd.c struct work_struct work; work 157 drivers/block/nbd.c static void nbd_dead_link_work(struct work_struct *work); work 269 drivers/block/nbd.c INIT_WORK(&args->work, nbd_dead_link_work); work 271 drivers/block/nbd.c queue_work(system_wq, &args->work); work 768 drivers/block/nbd.c static void recv_work(struct work_struct *work) work 770 drivers/block/nbd.c struct recv_thread_args *args = container_of(work, work 772 drivers/block/nbd.c work); work 1095 drivers/block/nbd.c INIT_WORK(&args->work, recv_work); work 1107 drivers/block/nbd.c queue_work(nbd->recv_workq, &args->work); work 1286 drivers/block/nbd.c INIT_WORK(&args->work, recv_work); work 1289 drivers/block/nbd.c queue_work(nbd->recv_workq, &args->work); work 2342 drivers/block/nbd.c static void nbd_dead_link_work(struct work_struct *work) work 2344 drivers/block/nbd.c struct link_dead_args *args = container_of(work, struct link_dead_args, work 2345 drivers/block/nbd.c work); work 367 drivers/block/paride/pd.c static void ps_tq_int(struct work_struct *work); work 379 drivers/block/paride/pd.c static void ps_tq_int(struct work_struct *work) work 38 drivers/block/paride/pseudo.h static void ps_tq_int(struct work_struct *work); work 73 drivers/block/paride/pseudo.h static void ps_tq_int(struct work_struct *work) work 350 drivers/block/rbd.c struct work_struct work; work 2880 drivers/block/rbd.c static void rbd_img_handle_request_work(struct work_struct *work) work 2883 drivers/block/rbd.c container_of(work, struct rbd_img_request, work); work 2890 drivers/block/rbd.c INIT_WORK(&img_req->work, rbd_img_handle_request_work); work 2892 drivers/block/rbd.c queue_work(rbd_wq, &img_req->work); work 3873 drivers/block/rbd.c static void rbd_notify_acquired_lock(struct work_struct *work) work 3875 drivers/block/rbd.c struct rbd_device *rbd_dev = container_of(work, struct rbd_device, work 3881 drivers/block/rbd.c static void rbd_notify_released_lock(struct work_struct *work) work 3883 drivers/block/rbd.c struct rbd_device *rbd_dev = container_of(work, struct rbd_device, work 4202 drivers/block/rbd.c static void rbd_acquire_lock(struct work_struct *work) work 4204 drivers/block/rbd.c struct rbd_device *rbd_dev = container_of(to_delayed_work(work), work 4303 drivers/block/rbd.c static void rbd_release_lock_work(struct work_struct *work) work 4305 drivers/block/rbd.c struct rbd_device *rbd_dev = container_of(work, struct rbd_device, work 4691 drivers/block/rbd.c static void rbd_reregister_watch(struct work_struct *work) work 4693 drivers/block/rbd.c struct rbd_device *rbd_dev = container_of(to_delayed_work(work), work 4794 drivers/block/rbd.c static void rbd_queue_workfn(struct work_struct *work) work 4796 drivers/block/rbd.c struct request *rq = blk_mq_rq_from_pdu(work); work 4914 drivers/block/rbd.c struct work_struct *work = blk_mq_rq_to_pdu(rq); work 4916 drivers/block/rbd.c queue_work(rbd_wq, work); work 5109 drivers/block/rbd.c struct work_struct *work = blk_mq_rq_to_pdu(rq); work 5111 drivers/block/rbd.c INIT_WORK(work, rbd_queue_workfn); work 460 drivers/block/rsxx/core.c static void card_event_handler(struct work_struct *work) work 467 drivers/block/rsxx/core.c card = container_of(work, struct rsxx_cardinfo, event_work); work 222 drivers/block/rsxx/cregs.c static void creg_cmd_done(struct work_struct *work) work 228 drivers/block/rsxx/cregs.c card = container_of(work, struct rsxx_cardinfo, work 578 drivers/block/rsxx/dma.c static void rsxx_schedule_issue(struct work_struct *work) work 582 drivers/block/rsxx/dma.c ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work); work 589 drivers/block/rsxx/dma.c static void rsxx_schedule_done(struct work_struct *work) work 593 drivers/block/rsxx/dma.c ctrl = container_of(work, struct rsxx_dma_ctrl, dma_done_work); work 702 drivers/block/skd_main.c static void skd_start_queue(struct work_struct *work) work 704 drivers/block/skd_main.c struct skd_device *skdev = container_of(work, typeof(*skdev), work 1661 drivers/block/skd_main.c static void skd_completion_worker(struct work_struct *work) work 1664 drivers/block/skd_main.c container_of(work, struct skd_device, completion_worker); work 93 drivers/block/sunvdc.c static void vdc_ldc_reset_work(struct work_struct *work); work 94 drivers/block/sunvdc.c static void vdc_ldc_reset_timer_work(struct work_struct *work); work 1144 drivers/block/sunvdc.c static void vdc_ldc_reset_timer_work(struct work_struct *work) work 1149 drivers/block/sunvdc.c port = container_of(work, struct vdc_port, ldc_reset_timer_work.work); work 1162 drivers/block/sunvdc.c static void vdc_ldc_reset_work(struct work_struct *work) work 1168 drivers/block/sunvdc.c port = container_of(work, struct vdc_port, ldc_reset_work); work 1015 drivers/block/sx8.c unsigned int work = 0; work 1044 drivers/block/sx8.c work++; work 1047 drivers/block/sx8.c VPRINTK("EXIT, work==%u\n", work); work 1048 drivers/block/sx8.c host->resp_idx += work; work 1095 drivers/block/sx8.c static void carm_fsm_task (struct work_struct *work) work 1098 drivers/block/sx8.c container_of(work, struct carm_host, fsm_task); work 554 drivers/block/virtio_blk.c static void virtblk_config_changed_work(struct work_struct *work) work 557 drivers/block/virtio_blk.c container_of(work, struct virtio_blk, config_work); work 346 drivers/block/xen-blkback/blkback.c void xen_blkbk_unmap_purged_grants(struct work_struct *work) work 352 drivers/block/xen-blkback/blkback.c struct xen_blkif_ring *ring = container_of(work, typeof(*ring), persistent_purge_work); work 754 drivers/block/xen-blkback/blkback.c struct gntab_unmap_queue_data* work = &req->gnttab_unmap_data; work 762 drivers/block/xen-blkback/blkback.c work->data = req; work 763 drivers/block/xen-blkback/blkback.c work->done = xen_blkbk_unmap_and_respond_callback; work 764 drivers/block/xen-blkback/blkback.c work->unmap_ops = req->unmap; work 765 drivers/block/xen-blkback/blkback.c work->kunmap_ops = NULL; work 766 drivers/block/xen-blkback/blkback.c work->pages = req->unmap_pages; work 767 drivers/block/xen-blkback/blkback.c work->count = invcount; work 393 drivers/block/xen-blkback/common.h void xen_blkbk_unmap_purged_grants(struct work_struct *work); work 47 drivers/block/xen-blkback/xenbus.c static void xen_blkif_deferred_free(struct work_struct *work) work 51 drivers/block/xen-blkback/xenbus.c blkif = container_of(work, struct xen_blkif, free_work); work 179 drivers/block/xen-blkfront.c struct work_struct work; work 473 drivers/block/xen-blkfront.c schedule_work(&rinfo->work); work 1202 drivers/block/xen-blkfront.c flush_work(&rinfo->work); work 1235 drivers/block/xen-blkfront.c static void blkif_restart_queue(struct work_struct *work) work 1237 drivers/block/xen-blkfront.c struct blkfront_ring_info *rinfo = container_of(work, struct blkfront_ring_info, work); work 1326 drivers/block/xen-blkfront.c flush_work(&rinfo->work); work 1934 drivers/block/xen-blkfront.c INIT_WORK(&rinfo->work, blkif_restart_queue); work 2692 drivers/block/xen-blkfront.c static void blkfront_delay_work(struct work_struct *work) work 773 drivers/block/zram/zram_drv.c struct work_struct work; work 781 drivers/block/zram/zram_drv.c static void zram_sync_read(struct work_struct *work) work 783 drivers/block/zram/zram_drv.c struct zram_work *zw = container_of(work, struct zram_work, work); work 799 drivers/block/zram/zram_drv.c struct zram_work work; work 801 drivers/block/zram/zram_drv.c work.bvec = *bvec; work 802 drivers/block/zram/zram_drv.c work.zram = zram; work 803 drivers/block/zram/zram_drv.c work.entry = entry; work 804 drivers/block/zram/zram_drv.c work.bio = bio; work 806 drivers/block/zram/zram_drv.c INIT_WORK_ONSTACK(&work.work, zram_sync_read); work 807 drivers/block/zram/zram_drv.c queue_work(system_unbound_wq, &work.work); work 808 drivers/block/zram/zram_drv.c flush_work(&work.work); work 809 drivers/block/zram/zram_drv.c destroy_work_on_stack(&work.work); work 53 drivers/bluetooth/bcm203x.c struct work_struct work; work 88 drivers/bluetooth/bcm203x.c schedule_work(&data->work); work 141 drivers/bluetooth/bcm203x.c static void bcm203x_work(struct work_struct *work) work 144 drivers/bluetooth/bcm203x.c container_of(work, struct bcm203x_data, work); work 224 drivers/bluetooth/bcm203x.c INIT_WORK(&data->work, bcm203x_work); work 229 drivers/bluetooth/bcm203x.c schedule_work(&data->work); work 241 drivers/bluetooth/bcm203x.c cancel_work_sync(&data->work); work 303 drivers/bluetooth/btmtksdio.c static void btmtksdio_tx_work(struct work_struct *work) work 305 drivers/bluetooth/btmtksdio.c struct btmtksdio_dev *bdev = container_of(work, struct btmtksdio_dev, work 368 drivers/bluetooth/btmtkuart.c static void btmtkuart_tx_work(struct work_struct *work) work 370 drivers/bluetooth/btmtkuart.c struct btmtkuart_dev *bdev = container_of(work, struct btmtkuart_dev, work 47 drivers/bluetooth/btsdio.c struct work_struct work; work 91 drivers/bluetooth/btsdio.c static void btsdio_work(struct work_struct *work) work 93 drivers/bluetooth/btsdio.c struct btsdio_data *data = container_of(work, struct btsdio_data, work); work 261 drivers/bluetooth/btsdio.c schedule_work(&data->work); work 300 drivers/bluetooth/btsdio.c INIT_WORK(&data->work, btsdio_work); work 453 drivers/bluetooth/btusb.c struct work_struct work; work 1263 drivers/bluetooth/btusb.c cancel_work_sync(&data->work); work 1478 drivers/bluetooth/btusb.c schedule_work(&data->work); work 1525 drivers/bluetooth/btusb.c static void btusb_work(struct work_struct *work) work 1527 drivers/bluetooth/btusb.c struct btusb_data *data = container_of(work, struct btusb_data, work); work 1592 drivers/bluetooth/btusb.c static void btusb_waker(struct work_struct *work) work 1594 drivers/bluetooth/btusb.c struct btusb_data *data = container_of(work, struct btusb_data, waker); work 3654 drivers/bluetooth/btusb.c INIT_WORK(&data->work, btusb_work); work 3987 drivers/bluetooth/btusb.c cancel_work_sync(&data->work); work 4093 drivers/bluetooth/btusb.c schedule_work(&data->work); work 71 drivers/bluetooth/hci_ath.c static void ath_hci_uart_work(struct work_struct *work) work 78 drivers/bluetooth/hci_ath.c ath = container_of(work, struct ath_struct, ctxtsw); work 935 drivers/bluetooth/hci_h5.c struct work_struct work; work 938 drivers/bluetooth/hci_h5.c static void h5_btrtl_reprobe_worker(struct work_struct *work) work 941 drivers/bluetooth/hci_h5.c container_of(work, struct h5_btrtl_reprobe, work); work 963 drivers/bluetooth/hci_h5.c INIT_WORK(&reprobe->work, h5_btrtl_reprobe_worker); work 965 drivers/bluetooth/hci_h5.c queue_work(system_long_wq, &reprobe->work); work 363 drivers/bluetooth/hci_intel.c static void intel_busy_work(struct work_struct *work) work 366 drivers/bluetooth/hci_intel.c struct intel_data *intel = container_of(work, struct intel_data, work 146 drivers/bluetooth/hci_ldisc.c static void hci_uart_write_work(struct work_struct *work) work 148 drivers/bluetooth/hci_ldisc.c struct hci_uart *hu = container_of(work, struct hci_uart, write_work); work 184 drivers/bluetooth/hci_ldisc.c void hci_uart_init_work(struct work_struct *work) work 186 drivers/bluetooth/hci_ldisc.c struct hci_uart *hu = container_of(work, struct hci_uart, init_ready); work 306 drivers/bluetooth/hci_qca.c static void qca_wq_awake_device(struct work_struct *work) work 308 drivers/bluetooth/hci_qca.c struct qca_data *qca = container_of(work, struct qca_data, work 337 drivers/bluetooth/hci_qca.c static void qca_wq_awake_rx(struct work_struct *work) work 339 drivers/bluetooth/hci_qca.c struct qca_data *qca = container_of(work, struct qca_data, work 365 drivers/bluetooth/hci_qca.c static void qca_wq_serial_rx_clock_vote_off(struct work_struct *work) work 367 drivers/bluetooth/hci_qca.c struct qca_data *qca = container_of(work, struct qca_data, work 376 drivers/bluetooth/hci_qca.c static void qca_wq_serial_tx_clock_vote_off(struct work_struct *work) work 378 drivers/bluetooth/hci_qca.c struct qca_data *qca = container_of(work, struct qca_data, work 59 drivers/bluetooth/hci_serdev.c static void hci_uart_write_work(struct work_struct *work) work 61 drivers/bluetooth/hci_serdev.c struct hci_uart *hu = container_of(work, struct hci_uart, write_work); work 105 drivers/bluetooth/hci_uart.h void hci_uart_init_work(struct work_struct *work); work 299 drivers/bluetooth/hci_vhci.c static void vhci_open_timeout(struct work_struct *work) work 301 drivers/bluetooth/hci_vhci.c struct vhci_data *data = container_of(work, struct vhci_data, work 302 drivers/bluetooth/hci_vhci.c open_timeout.work); work 156 drivers/bus/mips_cdmm.c struct mips_cdmm_work_dev *work = data; work 157 drivers/bus/mips_cdmm.c void (*fn)(struct mips_cdmm_device *) = work->fn; work 159 drivers/bus/mips_cdmm.c fn(work->dev); work 172 drivers/bus/mips_cdmm.c struct mips_cdmm_work_dev *work = data; work 173 drivers/bus/mips_cdmm.c int (*fn)(struct mips_cdmm_device *) = work->fn; work 175 drivers/bus/mips_cdmm.c return fn(work->dev); work 195 drivers/bus/mips_cdmm.c struct mips_cdmm_work_dev work = { \ work 201 drivers/bus/mips_cdmm.c mips_cdmm_##_ret##_work, &work); \ work 2401 drivers/bus/ti-sysc.c static void ti_sysc_idle(struct work_struct *work) work 2405 drivers/bus/ti-sysc.c ddata = container_of(work, struct sysc, idle_work.work); work 442 drivers/char/hw_random/n2-drv.c schedule_delayed_work(&np->work, 0); work 659 drivers/char/hw_random/n2-drv.c static void n2rng_work(struct work_struct *work) work 661 drivers/char/hw_random/n2-drv.c struct n2rng *np = container_of(work, struct n2rng, work.work); work 684 drivers/char/hw_random/n2-drv.c schedule_delayed_work(&np->work, HZ * 2); work 713 drivers/char/hw_random/n2-drv.c INIT_DELAYED_WORK(&np->work, n2rng_work); work 777 drivers/char/hw_random/n2-drv.c schedule_delayed_work(&np->work, 0); work 794 drivers/char/hw_random/n2-drv.c cancel_delayed_work_sync(&np->work); work 119 drivers/char/hw_random/n2rng.h struct delayed_work work; work 38 drivers/char/hw_random/omap3-rom-rng.c static void omap3_rom_rng_idle(struct work_struct *work) work 1167 drivers/char/ipmi/ipmi_msghandler.c static void free_user_work(struct work_struct *work) work 1169 drivers/char/ipmi/ipmi_msghandler.c struct ipmi_user *user = container_of(work, struct ipmi_user, work 2918 drivers/char/ipmi/ipmi_msghandler.c static void cleanup_bmc_work(struct work_struct *work) work 2920 drivers/char/ipmi/ipmi_msghandler.c struct bmc_device *bmc = container_of(work, struct bmc_device, work 3380 drivers/char/ipmi/ipmi_msghandler.c static void redo_bmc_reg(struct work_struct *work) work 3382 drivers/char/ipmi/ipmi_msghandler.c struct ipmi_smi *intf = container_of(work, struct ipmi_smi, work 412 drivers/char/pcmcia/synclink_cs.c static void bh_handler(struct work_struct *work); work 768 drivers/char/pcmcia/synclink_cs.c static void bh_handler(struct work_struct *work) work 770 drivers/char/pcmcia/synclink_cs.c MGSLPC_INFO *info = container_of(work, MGSLPC_INFO, task); work 898 drivers/char/pcmcia/synclink_cs.c int work = 0; work 944 drivers/char/pcmcia/synclink_cs.c work += tty_insert_flip_char(port, data, flag); work 956 drivers/char/pcmcia/synclink_cs.c if (work) work 553 drivers/char/random.c static void push_to_pool(struct work_struct *work); work 893 drivers/char/random.c static void do_numa_crng_init(struct work_struct *work) work 1433 drivers/char/random.c static void push_to_pool(struct work_struct *work) work 1435 drivers/char/random.c struct entropy_store *r = container_of(work, struct entropy_store, work 761 drivers/char/sonypi.c static void input_keyrelease(struct work_struct *work) work 56 drivers/char/tpm/tpm-dev-common.c static void tpm_dev_async_work(struct work_struct *work) work 59 drivers/char/tpm/tpm-dev-common.c container_of(work, struct file_priv, async_work); work 92 drivers/char/tpm/tpm-dev-common.c static void tpm_timeout_work(struct work_struct *work) work 94 drivers/char/tpm/tpm-dev-common.c struct file_priv *priv = container_of(work, struct file_priv, work 47 drivers/char/tpm/tpm_vtpm_proxy.c struct work_struct work; /* task that retrieves TPM timeouts */ work 451 drivers/char/tpm/tpm_vtpm_proxy.c static void vtpm_proxy_work(struct work_struct *work) work 453 drivers/char/tpm/tpm_vtpm_proxy.c struct proxy_dev *proxy_dev = container_of(work, struct proxy_dev, work 454 drivers/char/tpm/tpm_vtpm_proxy.c work); work 473 drivers/char/tpm/tpm_vtpm_proxy.c flush_work(&proxy_dev->work); work 481 drivers/char/tpm/tpm_vtpm_proxy.c queue_work(workqueue, &proxy_dev->work); work 499 drivers/char/tpm/tpm_vtpm_proxy.c INIT_WORK(&proxy_dev->work, vtpm_proxy_work); work 1703 drivers/char/virtio_console.c static void control_work_handler(struct work_struct *work) work 1710 drivers/char/virtio_console.c portdev = container_of(work, struct ports_device, control_work); work 1817 drivers/char/virtio_console.c static void config_work_handler(struct work_struct *work) work 1821 drivers/char/virtio_console.c portdev = container_of(work, struct ports_device, config_work); work 308 drivers/char/xillybus/xillybus_core.c static void xillybus_autoflush(struct work_struct *work); work 1164 drivers/char/xillybus/xillybus_core.c static void xillybus_autoflush(struct work_struct *work) work 1167 drivers/char/xillybus/xillybus_core.c work, struct delayed_work, work); work 57 drivers/clocksource/numachip.c static __init void numachip_timer_each(struct work_struct *work) work 1123 drivers/cpufreq/cpufreq.c static void handle_update(struct work_struct *work) work 1126 drivers/cpufreq/cpufreq.c container_of(work, struct cpufreq_policy, update); work 232 drivers/cpufreq/cpufreq_governor.c static void dbs_work_handler(struct work_struct *work) work 238 drivers/cpufreq/cpufreq_governor.c policy_dbs = container_of(work, struct policy_dbs_info, work); work 266 drivers/cpufreq/cpufreq_governor.c schedule_work_on(smp_processor_id(), &policy_dbs->work); work 364 drivers/cpufreq/cpufreq_governor.c INIT_WORK(&policy_dbs->work, dbs_work_handler); work 549 drivers/cpufreq/cpufreq_governor.c cancel_work_sync(&policy_dbs->work); work 91 drivers/cpufreq/cpufreq_governor.h struct work_struct work; work 335 drivers/cpufreq/intel_pstate.c static void intel_pstste_sched_itmt_work_fn(struct work_struct *work) work 902 drivers/cpufreq/powernv-cpufreq.c void powernv_cpufreq_work_fn(struct work_struct *work) work 904 drivers/cpufreq/powernv-cpufreq.c struct chip *chip = container_of(work, struct chip, throttle); work 253 drivers/crypto/atmel-i2c.c static void atmel_i2c_work_handler(struct work_struct *work) work 256 drivers/crypto/atmel-i2c.c container_of(work, struct atmel_i2c_work_data, work); work 273 drivers/crypto/atmel-i2c.c INIT_WORK(&work_data->work, atmel_i2c_work_handler); work 274 drivers/crypto/atmel-i2c.c schedule_work(&work_data->work); work 166 drivers/crypto/atmel-i2c.h struct work_struct work; work 27 drivers/crypto/cavium/nitrox/nitrox_common.h void backlog_qflush_work(struct work_struct *work); work 92 drivers/crypto/cavium/nitrox/nitrox_mbx.c static void pf2vf_resp_handler(struct work_struct *work) work 94 drivers/crypto/cavium/nitrox/nitrox_mbx.c struct pf2vf_work *pf2vf_resp = container_of(work, struct pf2vf_work, work 500 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c void backlog_qflush_work(struct work_struct *work) work 504 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c cmdq = container_of(work, struct nitrox_cmdq, backlog_qflush); work 82 drivers/crypto/ccp/ccp-crypto-main.c struct work_struct work; work 345 drivers/crypto/ccp/ccp-dev.c static void ccp_do_cmd_backlog(struct work_struct *work) work 347 drivers/crypto/ccp/ccp-dev.c struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work); work 412 drivers/crypto/ccp/ccp-dev.c INIT_WORK(&backlog->work, ccp_do_cmd_backlog); work 413 drivers/crypto/ccp/ccp-dev.c schedule_work(&backlog->work); work 75 drivers/crypto/ccree/cc_request_mgr.c static void comp_work_handler(struct work_struct *work); work 563 drivers/crypto/ccree/cc_request_mgr.c static void comp_work_handler(struct work_struct *work) work 566 drivers/crypto/ccree/cc_request_mgr.c container_of(work, struct cc_drvdata, compwork.work); work 53 drivers/crypto/chelsio/chcr_core.c static void detach_work_fn(struct work_struct *work) work 57 drivers/crypto/chelsio/chcr_core.c dev = container_of(work, struct chcr_dev, detach_work.work); work 418 drivers/crypto/hifn_795x.c struct delayed_work work; work 1774 drivers/crypto/hifn_795x.c static void hifn_work(struct work_struct *work) work 1776 drivers/crypto/hifn_795x.c struct delayed_work *dw = to_delayed_work(work); work 1777 drivers/crypto/hifn_795x.c struct hifn_device *dev = container_of(dw, struct hifn_device, work); work 1843 drivers/crypto/hifn_795x.c schedule_delayed_work(&dev->work, HZ); work 2556 drivers/crypto/hifn_795x.c INIT_DELAYED_WORK(&dev->work, hifn_work); work 2557 drivers/crypto/hifn_795x.c schedule_delayed_work(&dev->work, HZ); work 2600 drivers/crypto/hifn_795x.c cancel_delayed_work_sync(&dev->work); work 481 drivers/crypto/hisilicon/qm.c static void qm_qp_work_func(struct work_struct *work) work 485 drivers/crypto/hisilicon/qm.c qp = container_of(work, struct hisi_qp, work); work 500 drivers/crypto/hisilicon/qm.c queue_work(qp->wq, &qp->work); work 1147 drivers/crypto/hisilicon/qm.c INIT_WORK(&qp->work, qm_qp_work_func); work 192 drivers/crypto/hisilicon/qm.h struct work_struct work; work 1027 drivers/crypto/inside-secure/safexcel.c static void safexcel_dequeue_work(struct work_struct *work) work 1030 drivers/crypto/inside-secure/safexcel.c container_of(work, struct safexcel_work_data, work); work 1086 drivers/crypto/inside-secure/safexcel.c &priv->ring[ring].work_data.work); work 1495 drivers/crypto/inside-secure/safexcel.c INIT_WORK(&priv->ring[i].work_data.work, work 610 drivers/crypto/inside-secure/safexcel.h struct work_struct work; work 824 drivers/crypto/inside-secure/safexcel_cipher.c &priv->ring[ring].work_data.work); work 969 drivers/crypto/inside-secure/safexcel_cipher.c &priv->ring[ring].work_data.work); work 1045 drivers/crypto/inside-secure/safexcel_cipher.c &priv->ring[ring].work_data.work); work 453 drivers/crypto/inside-secure/safexcel_hash.c &priv->ring[ring].work_data.work); work 541 drivers/crypto/inside-secure/safexcel_hash.c &priv->ring[ring].work_data.work); work 633 drivers/crypto/inside-secure/safexcel_hash.c &priv->ring[ring].work_data.work); work 130 drivers/crypto/qat/qat_common/adf_aer.c static void adf_device_reset_worker(struct work_struct *work) work 133 drivers/crypto/qat/qat_common/adf_aer.c container_of(work, struct adf_reset_dev_data, reset_work); work 87 drivers/crypto/qat/qat_common/adf_sriov.c static void adf_iov_send_resp(struct work_struct *work) work 90 drivers/crypto/qat/qat_common/adf_sriov.c container_of(work, struct adf_pf2vf_resp, pf2vf_resp_work); work 72 drivers/crypto/qat/qat_common/adf_vf_isr.c struct work_struct work; work 101 drivers/crypto/qat/qat_common/adf_vf_isr.c static void adf_dev_stop_async(struct work_struct *work) work 104 drivers/crypto/qat/qat_common/adf_vf_isr.c container_of(work, struct adf_vf_stop_data, work); work 148 drivers/crypto/qat/qat_common/adf_vf_isr.c INIT_WORK(&stop_data->work, adf_dev_stop_async); work 149 drivers/crypto/qat/qat_common/adf_vf_isr.c queue_work(adf_vf_stop_wq, &stop_data->work); work 384 drivers/devfreq/devfreq.c static void devfreq_monitor(struct work_struct *work) work 387 drivers/devfreq/devfreq.c struct devfreq *devfreq = container_of(work, work 388 drivers/devfreq/devfreq.c struct devfreq, work.work); work 395 drivers/devfreq/devfreq.c queue_delayed_work(devfreq_wq, &devfreq->work, work 413 drivers/devfreq/devfreq.c INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor); work 415 drivers/devfreq/devfreq.c queue_delayed_work(devfreq_wq, &devfreq->work, work 430 drivers/devfreq/devfreq.c cancel_delayed_work_sync(&devfreq->work); work 457 drivers/devfreq/devfreq.c cancel_delayed_work_sync(&devfreq->work); work 477 drivers/devfreq/devfreq.c if (!delayed_work_pending(&devfreq->work) && work 479 drivers/devfreq/devfreq.c queue_delayed_work(devfreq_wq, &devfreq->work, work 516 drivers/devfreq/devfreq.c cancel_delayed_work_sync(&devfreq->work); work 522 drivers/devfreq/devfreq.c queue_delayed_work(devfreq_wq, &devfreq->work, work 530 drivers/devfreq/devfreq.c cancel_delayed_work_sync(&devfreq->work); work 533 drivers/devfreq/devfreq.c queue_delayed_work(devfreq_wq, &devfreq->work, work 47 drivers/dma-buf/dma-fence-array.c struct dma_fence_array *array = container_of(wrk, typeof(*array), work); work 65 drivers/dma-buf/dma-fence-array.c irq_work_queue(&array->work); work 167 drivers/dma-buf/dma-fence-array.c init_irq_work(&array->work, irq_dma_fence_array_work); work 121 drivers/dma-buf/dma-fence-chain.c static void dma_fence_chain_irq_work(struct irq_work *work) work 125 drivers/dma-buf/dma-fence-chain.c chain = container_of(work, typeof(*chain), work); work 139 drivers/dma-buf/dma-fence-chain.c irq_work_queue(&chain->work); work 240 drivers/dma-buf/dma-fence-chain.c init_irq_work(&chain->work, dma_fence_chain_irq_work); work 1057 drivers/dma/imx-sdma.c static void sdma_channel_terminate_work(struct work_struct *work) work 1059 drivers/dma/imx-sdma.c struct sdma_channel *sdmac = container_of(work, struct sdma_channel, work 370 drivers/edac/edac_device.c edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay)); work 372 drivers/edac/edac_device.c edac_queue_work(&edac_dev->work, edac_dev->delay); work 392 drivers/edac/edac_device.c INIT_DELAYED_WORK(&edac_dev->work, edac_device_workq_function); work 400 drivers/edac/edac_device.c edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay)); work 402 drivers/edac/edac_device.c edac_queue_work(&edac_dev->work, edac_dev->delay); work 416 drivers/edac/edac_device.c edac_stop_work(&edac_dev->work); work 437 drivers/edac/edac_device.c edac_mod_work(&edac_dev->work, jiffs); work 184 drivers/edac/edac_device.h struct delayed_work work; work 231 drivers/edac/edac_device.h container_of(w, struct mem_ctl_info, work) work 234 drivers/edac/edac_device.h container_of(w,struct edac_device_ctl_info,work) work 586 drivers/edac/edac_mc.c edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec())); work 606 drivers/edac/edac_mc.c edac_mod_work(&mci->work, value); work 756 drivers/edac/edac_mc.c INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function); work 757 drivers/edac/edac_mc.c edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec())); work 807 drivers/edac/edac_mc.c edac_stop_work(&mci->work); work 55 drivers/edac/edac_module.h bool edac_queue_work(struct delayed_work *work, unsigned long delay); work 56 drivers/edac/edac_module.h bool edac_stop_work(struct delayed_work *work); work 57 drivers/edac/edac_module.h bool edac_mod_work(struct delayed_work *work, unsigned long delay); work 193 drivers/edac/edac_pci.c edac_queue_work(&pci->work, delay); work 225 drivers/edac/edac_pci.c INIT_DELAYED_WORK(&pci->work, edac_pci_workq_function); work 226 drivers/edac/edac_pci.c edac_queue_work(&pci->work, msecs_to_jiffies(edac_pci_get_poll_msec())); work 273 drivers/edac/edac_pci.c edac_stop_work(&pci->work); work 56 drivers/edac/edac_pci.h struct delayed_work work; work 96 drivers/edac/edac_pci.h container_of(w, struct edac_pci_ctl_info,work) work 577 drivers/edac/i5100_edac.c static void i5100_refresh_scrubbing(struct work_struct *work) work 579 drivers/edac/i5100_edac.c struct delayed_work *i5100_scrubbing = to_delayed_work(work); work 6 drivers/edac/wq.c bool edac_queue_work(struct delayed_work *work, unsigned long delay) work 8 drivers/edac/wq.c return queue_delayed_work(wq, work, delay); work 12 drivers/edac/wq.c bool edac_mod_work(struct delayed_work *work, unsigned long delay) work 14 drivers/edac/wq.c return mod_delayed_work(wq, work, delay); work 18 drivers/edac/wq.c bool edac_stop_work(struct delayed_work *work) work 22 drivers/edac/wq.c ret = cancel_delayed_work_sync(work); work 55 drivers/extcon/extcon-adc-jack.c static void adc_jack_handler(struct work_struct *work) work 57 drivers/extcon/extcon-adc-jack.c struct adc_jack_data *data = container_of(to_delayed_work(work), work 157 drivers/extcon/extcon-adc-jack.c adc_jack_handler(&data->handler.work); work 166 drivers/extcon/extcon-adc-jack.c cancel_work_sync(&data->handler.work); work 795 drivers/extcon/extcon-arizona.c static void arizona_micd_timeout_work(struct work_struct *work) work 797 drivers/extcon/extcon-arizona.c struct arizona_extcon_info *info = container_of(work, work 799 drivers/extcon/extcon-arizona.c micd_timeout_work.work); work 814 drivers/extcon/extcon-arizona.c static void arizona_micd_detect(struct work_struct *work) work 816 drivers/extcon/extcon-arizona.c struct arizona_extcon_info *info = container_of(work, work 818 drivers/extcon/extcon-arizona.c micd_detect_work.work); work 1034 drivers/extcon/extcon-arizona.c arizona_micd_detect(&info->micd_detect_work.work); work 1039 drivers/extcon/extcon-arizona.c static void arizona_hpdet_work(struct work_struct *work) work 1041 drivers/extcon/extcon-arizona.c struct arizona_extcon_info *info = container_of(work, work 1043 drivers/extcon/extcon-arizona.c hpdet_work.work); work 177 drivers/extcon/extcon-axp288.c static void axp288_usb_role_work(struct work_struct *work) work 180 drivers/extcon/extcon-axp288.c container_of(work, struct axp288_extcon_info, role_work); work 36 drivers/extcon/extcon-gpio.c struct delayed_work work; work 44 drivers/extcon/extcon-gpio.c static void gpio_extcon_work(struct work_struct *work) work 48 drivers/extcon/extcon-gpio.c container_of(to_delayed_work(work), struct gpio_extcon_data, work 49 drivers/extcon/extcon-gpio.c work); work 59 drivers/extcon/extcon-gpio.c queue_delayed_work(system_power_efficient_wq, &data->work, work 115 drivers/extcon/extcon-gpio.c INIT_DELAYED_WORK(&data->work, gpio_extcon_work); work 129 drivers/extcon/extcon-gpio.c gpio_extcon_work(&data->work.work); work 138 drivers/extcon/extcon-gpio.c cancel_delayed_work_sync(&data->work); work 151 drivers/extcon/extcon-gpio.c &data->work, data->debounce_jiffies); work 28 drivers/extcon/extcon-intel-int3496.c struct delayed_work work; work 55 drivers/extcon/extcon-intel-int3496.c static void int3496_do_usb_id(struct work_struct *work) work 58 drivers/extcon/extcon-intel-int3496.c container_of(work, struct int3496_data, work.work); work 82 drivers/extcon/extcon-intel-int3496.c mod_delayed_work(system_wq, &data->work, DEBOUNCE_TIME); work 104 drivers/extcon/extcon-intel-int3496.c INIT_DELAYED_WORK(&data->work, int3496_do_usb_id); work 150 drivers/extcon/extcon-intel-int3496.c queue_delayed_work(system_wq, &data->work, 0); work 151 drivers/extcon/extcon-intel-int3496.c flush_delayed_work(&data->work); work 163 drivers/extcon/extcon-intel-int3496.c cancel_delayed_work_sync(&data->work); work 480 drivers/extcon/extcon-max14577.c static void max14577_muic_irq_work(struct work_struct *work) work 482 drivers/extcon/extcon-max14577.c struct max14577_muic_info *info = container_of(work, work 647 drivers/extcon/extcon-max14577.c static void max14577_muic_detect_cable_wq(struct work_struct *work) work 649 drivers/extcon/extcon-max14577.c struct max14577_muic_info *info = container_of(to_delayed_work(work), work 935 drivers/extcon/extcon-max77693.c static void max77693_muic_irq_work(struct work_struct *work) work 937 drivers/extcon/extcon-max77693.c struct max77693_muic_info *info = container_of(work, work 1059 drivers/extcon/extcon-max77693.c static void max77693_muic_detect_cable_wq(struct work_struct *work) work 1061 drivers/extcon/extcon-max77693.c struct max77693_muic_info *info = container_of(to_delayed_work(work), work 627 drivers/extcon/extcon-max77843.c static void max77843_muic_irq_work(struct work_struct *work) work 629 drivers/extcon/extcon-max77843.c struct max77843_muic_info *info = container_of(work, work 703 drivers/extcon/extcon-max77843.c static void max77843_muic_detect_cable_wq(struct work_struct *work) work 705 drivers/extcon/extcon-max77843.c struct max77843_muic_info *info = container_of(to_delayed_work(work), work 505 drivers/extcon/extcon-max8997.c static void max8997_muic_irq_work(struct work_struct *work) work 507 drivers/extcon/extcon-max8997.c struct max8997_muic_info *info = container_of(work, work 616 drivers/extcon/extcon-max8997.c static void max8997_muic_detect_cable_wq(struct work_struct *work) work 618 drivers/extcon/extcon-max8997.c struct max8997_muic_info *info = container_of(to_delayed_work(work), work 115 drivers/extcon/extcon-palmas.c static void palmas_gpio_id_detect(struct work_struct *work) work 118 drivers/extcon/extcon-palmas.c struct palmas_usb *palmas_usb = container_of(to_delayed_work(work), work 359 drivers/extcon/extcon-palmas.c palmas_gpio_id_detect(&palmas_usb->wq_detectid.work); work 409 drivers/extcon/extcon-palmas.c palmas_gpio_id_detect(&palmas_usb->wq_detectid.work); work 86 drivers/extcon/extcon-ptn5150.c static void ptn5150_irq_work(struct work_struct *work) work 88 drivers/extcon/extcon-ptn5150.c struct ptn5150_info *info = container_of(work, work 34 drivers/extcon/extcon-qcom-spmi-misc.c static void qcom_usb_extcon_detect_cable(struct work_struct *work) work 38 drivers/extcon/extcon-qcom-spmi-misc.c struct qcom_usb_extcon_info *info = container_of(to_delayed_work(work), work 103 drivers/extcon/extcon-qcom-spmi-misc.c qcom_usb_extcon_detect_cable(&info->wq_detcable.work); work 407 drivers/extcon/extcon-rt8973a.c static void rt8973a_muic_irq_work(struct work_struct *work) work 409 drivers/extcon/extcon-rt8973a.c struct rt8973a_muic_info *info = container_of(work, work 489 drivers/extcon/extcon-rt8973a.c static void rt8973a_muic_detect_cable_wq(struct work_struct *work) work 491 drivers/extcon/extcon-rt8973a.c struct rt8973a_muic_info *info = container_of(to_delayed_work(work), work 424 drivers/extcon/extcon-sm5502.c static void sm5502_muic_irq_work(struct work_struct *work) work 426 drivers/extcon/extcon-sm5502.c struct sm5502_muic_info *info = container_of(work, work 503 drivers/extcon/extcon-sm5502.c static void sm5502_muic_detect_cable_wq(struct work_struct *work) work 505 drivers/extcon/extcon-sm5502.c struct sm5502_muic_info *info = container_of(to_delayed_work(work), work 61 drivers/extcon/extcon-usb-gpio.c static void usb_extcon_detect_cable(struct work_struct *work) work 64 drivers/extcon/extcon-usb-gpio.c struct usb_extcon_info *info = container_of(to_delayed_work(work), work 192 drivers/extcon/extcon-usb-gpio.c usb_extcon_detect_cable(&info->wq_detcable.work); work 240 drivers/firewire/core-card.c static void br_work(struct work_struct *work) work 242 drivers/firewire/core-card.c struct fw_card *card = container_of(work, struct fw_card, br_work.work); work 287 drivers/firewire/core-card.c static void bm_work(struct work_struct *work) work 289 drivers/firewire/core-card.c struct fw_card *card = container_of(work, struct fw_card, bm_work.work); work 127 drivers/firewire/core-cdev.c struct delayed_work work; work 141 drivers/firewire/core-cdev.c if (!queue_delayed_work(fw_workqueue, &r->work, delay)) work 1234 drivers/firewire/core-cdev.c static void iso_resource_work(struct work_struct *work) work 1238 drivers/firewire/core-cdev.c container_of(work, struct iso_resource, work.work); work 1324 drivers/firewire/core-cdev.c cancel_delayed_work(&r->work); work 1364 drivers/firewire/core-cdev.c INIT_DELAYED_WORK(&r->work, iso_resource_work); work 770 drivers/firewire/core-device.c queue_delayed_work(fw_workqueue, &device->work, delay); work 789 drivers/firewire/core-device.c static void fw_device_shutdown(struct work_struct *work) work 792 drivers/firewire/core-device.c container_of(work, struct fw_device, work.work); work 862 drivers/firewire/core-device.c static void fw_device_update(struct work_struct *work) work 865 drivers/firewire/core-device.c container_of(work, struct fw_device, work.work); work 984 drivers/firewire/core-device.c static void fw_device_init(struct work_struct *work) work 987 drivers/firewire/core-device.c container_of(work, struct fw_device, work.work); work 1129 drivers/firewire/core-device.c static void fw_device_refresh(struct work_struct *work) work 1132 drivers/firewire/core-device.c container_of(work, struct fw_device, work.work); work 1147 drivers/firewire/core-device.c fw_device_update(work); work 1196 drivers/firewire/core-device.c static void fw_device_workfn(struct work_struct *work) work 1198 drivers/firewire/core-device.c struct fw_device *device = container_of(to_delayed_work(work), work 1199 drivers/firewire/core-device.c struct fw_device, work); work 1200 drivers/firewire/core-device.c device->workfn(work); work 1253 drivers/firewire/core-device.c INIT_DELAYED_WORK(&device->work, fw_device_workfn); work 1877 drivers/firewire/ohci.c static void bus_reset_work(struct work_struct *work) work 1880 drivers/firewire/ohci.c container_of(work, struct fw_ohci, bus_reset_work); work 137 drivers/firewire/sbp2.c struct delayed_work work; work 144 drivers/firewire/sbp2.c queue_delayed_work(fw_workqueue, &lu->work, delay); work 793 drivers/firewire/sbp2.c static void sbp2_reconnect(struct work_struct *work); work 795 drivers/firewire/sbp2.c static void sbp2_login(struct work_struct *work) work 798 drivers/firewire/sbp2.c container_of(work, struct sbp2_logical_unit, work.work); work 905 drivers/firewire/sbp2.c static void sbp2_reconnect(struct work_struct *work) work 908 drivers/firewire/sbp2.c container_of(work, struct sbp2_logical_unit, work.work); work 956 drivers/firewire/sbp2.c static void sbp2_lu_workfn(struct work_struct *work) work 958 drivers/firewire/sbp2.c struct sbp2_logical_unit *lu = container_of(to_delayed_work(work), work 959 drivers/firewire/sbp2.c struct sbp2_logical_unit, work); work 960 drivers/firewire/sbp2.c lu->workfn(work); work 990 drivers/firewire/sbp2.c INIT_DELAYED_WORK(&lu->work, sbp2_lu_workfn); work 1223 drivers/firewire/sbp2.c cancel_delayed_work_sync(&lu->work); work 608 drivers/firmware/efi/efivars.c static void efivar_update_sysfs_entries(struct work_struct *work) work 69 drivers/firmware/efi/runtime-wrappers.c INIT_WORK(&efi_rts_work.work, efi_call_rts); \ work 81 drivers/firmware/efi/runtime-wrappers.c if (queue_work(efi_rts_wq, &efi_rts_work.work)) \ work 178 drivers/firmware/efi/runtime-wrappers.c static void efi_call_rts(struct work_struct *work) work 65 drivers/firmware/imx/imx-scu-irq.c static void imx_scu_irq_work_handler(struct work_struct *work) work 75 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work); work 1876 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work) work 1878 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c struct delayed_work *dwork = to_delayed_work(work); work 2140 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c static void amdgpu_device_delayed_init_work_handler(struct work_struct *work) work 2143 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c container_of(work, struct amdgpu_device, delayed_init_work.work); work 2151 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work) work 2154 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work); work 46 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c struct amdgpu_flip_work *work = work 50 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c schedule_work(&work->flip_work.work); work 53 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c static bool amdgpu_display_flip_handle_fence(struct amdgpu_flip_work *work, work 63 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c if (!dma_fence_add_callback(fence, &work->cb, work 74 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c container_of(__work, struct delayed_work, work); work 75 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c struct amdgpu_flip_work *work = work 77 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c struct amdgpu_device *adev = work->adev; work 78 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[work->crtc_id]; work 85 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c if (amdgpu_display_flip_handle_fence(work, &work->excl)) work 88 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c for (i = 0; i < work->shared_count; ++i) work 89 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c if (amdgpu_display_flip_handle_fence(work, &work->shared[i])) work 96 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c (amdgpu_display_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0, work 101 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c (int)(work->target_vblank - work 103 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000)); work 111 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async); work 119 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c amdgpu_crtc->crtc_id, amdgpu_crtc, work); work 128 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c struct amdgpu_flip_work *work = work 133 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c r = amdgpu_bo_reserve(work->old_abo, true); work 135 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c r = amdgpu_bo_unpin(work->old_abo); work 139 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c amdgpu_bo_unreserve(work->old_abo); work 143 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c amdgpu_bo_unref(&work->old_abo); work 144 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c kfree(work->shared); work 145 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c kfree(work); work 158 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c struct amdgpu_flip_work *work; work 164 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c work = kzalloc(sizeof *work, GFP_KERNEL); work 165 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c if (work == NULL) work 168 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c INIT_DELAYED_WORK(&work->flip_work, amdgpu_display_flip_work_func); work 169 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c INIT_WORK(&work->unpin_work, amdgpu_display_unpin_work_func); work 171 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c work->event = event; work 172 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c work->adev = adev; work 173 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c work->crtc_id = amdgpu_crtc->crtc_id; work 174 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0; work 180 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c work->old_abo = gem_to_amdgpu_bo(obj); work 181 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c amdgpu_bo_ref(work->old_abo); work 208 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c r = dma_resv_get_fences_rcu(new_abo->tbo.base.resv, &work->excl, work 209 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c &work->shared_count, work 210 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c &work->shared); work 220 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c work->base = amdgpu_bo_gpu_offset(new_abo); work 221 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) + work 222 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c amdgpu_get_vblank_counter_kms(dev, work->crtc_id); work 234 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c amdgpu_crtc->pflip_works = work; work 238 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c amdgpu_crtc->crtc_id, amdgpu_crtc, work); work 242 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c amdgpu_display_flip_work_func(&work->flip_work.work); work 259 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c amdgpu_bo_unref(&work->old_abo); work 260 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c dma_fence_put(work->excl); work 261 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c for (i = 0; i < work->shared_count; ++i) work 262 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c dma_fence_put(work->shared[i]); work 263 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c kfree(work->shared); work 264 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c kfree(work); work 73 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h struct work_struct work; work 227 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h struct work_struct work; work 83 drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c static void amdgpu_hotplug_work_func(struct work_struct *work) work 85 drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c struct amdgpu_device *adev = container_of(work, struct amdgpu_device, work 166 drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c static void amdgpu_irq_handle_ih1(struct work_struct *work) work 168 drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c struct amdgpu_device *adev = container_of(work, struct amdgpu_device, work 181 drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c static void amdgpu_irq_handle_ih2(struct work_struct *work) work 183 drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c struct amdgpu_device *adev = container_of(work, struct amdgpu_device, work 73 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c static void amdgpu_mn_destroy(struct work_struct *work) work 75 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c struct amdgpu_mn *amn = container_of(work, struct amdgpu_mn, work); work 109 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c INIT_WORK(&amn->work, amdgpu_mn_destroy); work 110 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c schedule_work(&amn->work); work 59 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h struct work_struct work; work 2426 drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c void amdgpu_dpm_thermal_work_handler(struct work_struct *work) work 2429 drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c container_of(work, struct amdgpu_device, work 2430 drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c pm.dpm.thermal.work); work 41 drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h void amdgpu_dpm_thermal_work_handler(struct work_struct *work); work 1040 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c static void amdgpu_ras_interrupt_process_handler(struct work_struct *work) work 1043 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c container_of(work, struct ras_ih_data, ih_work); work 1202 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c static void amdgpu_ras_do_recovery(struct work_struct *work) work 1205 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c container_of(work, struct amdgpu_ras, recovery_work); work 121 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c static void amdgpu_uvd_idle_work_handler(struct work_struct *work); work 1178 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c static void amdgpu_uvd_idle_work_handler(struct work_struct *work) work 1181 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c container_of(work, struct amdgpu_device, uvd.idle_work.work); work 82 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c static void amdgpu_vce_idle_work_handler(struct work_struct *work); work 325 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c static void amdgpu_vce_idle_work_handler(struct work_struct *work) work 328 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c container_of(work, struct amdgpu_device, vce.idle_work.work); work 64 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c static void amdgpu_vcn_idle_work_handler(struct work_struct *work); work 287 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c static void amdgpu_vcn_idle_work_handler(struct work_struct *work) work 290 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c container_of(work, struct amdgpu_device, vcn.idle_work.work); work 1949 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c static void gfx_v8_0_sq_irq_work_func(struct work_struct *work); work 2009 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c INIT_WORK(&adev->gfx.sq_work.work, gfx_v8_0_sq_irq_work_func); work 6882 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c static void gfx_v8_0_sq_irq_work_func(struct work_struct *work) work 6885 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c struct amdgpu_device *adev = container_of(work, struct amdgpu_device, gfx.sq_work.work); work 6886 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c struct sq_work *sq_work = container_of(work, struct sq_work, work); work 6902 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c if (work_pending(&adev->gfx.sq_work.work)) { work 6906 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c schedule_work(&adev->gfx.sq_work.work); work 3020 drivers/gpu/drm/amd/amdgpu/kv_dpm.c INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler); work 3044 drivers/gpu/drm/amd/amdgpu/kv_dpm.c flush_work(&adev->pm.dpm.thermal.work); work 3211 drivers/gpu/drm/amd/amdgpu/kv_dpm.c schedule_work(&adev->pm.dpm.thermal.work); work 312 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c static void xgpu_ai_mailbox_flr_work(struct work_struct *work) work 314 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work); work 511 drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c static void xgpu_vi_mailbox_flr_work(struct work_struct *work) work 513 drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work); work 7577 drivers/gpu/drm/amd/amdgpu/si_dpm.c schedule_work(&adev->pm.dpm.thermal.work); work 7718 drivers/gpu/drm/amd/amdgpu/si_dpm.c INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler); work 7742 drivers/gpu/drm/amd/amdgpu/si_dpm.c flush_work(&adev->pm.dpm.thermal.work); work 62 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c static void kfd_process_hw_exception(struct work_struct *work); work 1852 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c static void kfd_process_hw_exception(struct work_struct *work) work 1854 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c struct device_queue_manager *dqm = container_of(work, work 143 drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c static void interrupt_wq(struct work_struct *work) work 145 drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c struct kfd_dev *dev = container_of(work, struct kfd_dev, work 68 drivers/gpu/drm/amd/amdkfd/kfd_process.c static void evict_process_worker(struct work_struct *work); work 69 drivers/gpu/drm/amd/amdkfd/kfd_process.c static void restore_process_worker(struct work_struct *work); work 449 drivers/gpu/drm/amd/amdkfd/kfd_process.c static void kfd_process_wq_release(struct work_struct *work) work 451 drivers/gpu/drm/amd/amdkfd/kfd_process.c struct kfd_process *p = container_of(work, struct kfd_process, work 1000 drivers/gpu/drm/amd/amdkfd/kfd_process.c static void evict_process_worker(struct work_struct *work) work 1006 drivers/gpu/drm/amd/amdkfd/kfd_process.c dwork = to_delayed_work(work); work 1037 drivers/gpu/drm/amd/amdkfd/kfd_process.c static void restore_process_worker(struct work_struct *work) work 1043 drivers/gpu/drm/amd/amdkfd/kfd_process.c dwork = to_delayed_work(work); work 75 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h struct work_struct work; work 112 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c static void dm_irq_work_func(struct work_struct *work) work 116 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c container_of(work, struct irq_list_head, work); work 387 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c INIT_WORK(&lh->work, dm_irq_work_func); work 415 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c flush_work(&lh->work); work 441 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c flush_work(&adev->dm.irq_handler_list_low_tab[src].work); work 506 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c struct work_struct *work = NULL; work 511 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c work = &adev->dm.irq_handler_list_low_tab[irq_source].work; work 515 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c if (work) { work 516 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c if (!schedule_work(work)) work 567 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c struct work_struct work; work 599 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c static void atmel_hlcdc_dc_atomic_work(struct work_struct *work) work 602 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c container_of(work, struct atmel_hlcdc_dc_commit, work); work 626 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c INIT_WORK(&commit->work, atmel_hlcdc_dc_atomic_work); work 646 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c queue_work(dc->wq, &commit->work); work 417 drivers/gpu/drm/bridge/adv7511/adv7511_drv.c static void adv7511_hpd_work(struct work_struct *work) work 419 drivers/gpu/drm/bridge/adv7511/adv7511_drv.c struct adv7511 *adv7511 = container_of(work, struct adv7511, hpd_work); work 2133 drivers/gpu/drm/bridge/sil-sii8620.c static void sii8620_extcon_work(struct work_struct *work) work 2136 drivers/gpu/drm/bridge/sil-sii8620.c container_of(work, struct sii8620, extcon_wq); work 175 drivers/gpu/drm/bridge/ti-tfp410.c static void tfp410_hpd_work_func(struct work_struct *work) work 179 drivers/gpu/drm/bridge/ti-tfp410.c dvi = container_of(work, struct tfp410, hpd_work.work); work 1633 drivers/gpu/drm/drm_atomic_helper.c static void commit_work(struct work_struct *work) work 1635 drivers/gpu/drm/drm_atomic_helper.c struct drm_atomic_state *state = container_of(work, work 162 drivers/gpu/drm/drm_connector.c void drm_connector_free_work_fn(struct work_struct *work) work 166 drivers/gpu/drm/drm_connector.c container_of(work, struct drm_device, mode_config.connector_free_work); work 183 drivers/gpu/drm/drm_crtc_internal.h void drm_connector_free_work_fn(struct work_struct *work); work 279 drivers/gpu/drm/drm_dp_cec.c static void drm_dp_cec_unregister_work(struct work_struct *work) work 281 drivers/gpu/drm/drm_dp_cec.c struct drm_dp_aux *aux = container_of(work, struct drm_dp_aux, work 282 drivers/gpu/drm/drm_dp_cec.c cec.unregister_work.work); work 1044 drivers/gpu/drm/drm_dp_helper.c static void drm_dp_aux_crc_work(struct work_struct *work) work 1046 drivers/gpu/drm/drm_dp_helper.c struct drm_dp_aux *aux = container_of(work, struct drm_dp_aux, work 1759 drivers/gpu/drm/drm_dp_mst_topology.c queue_work(system_long_wq, &mstb->mgr->work); work 1875 drivers/gpu/drm/drm_dp_mst_topology.c static void drm_dp_mst_link_probe_work(struct work_struct *work) work 1877 drivers/gpu/drm/drm_dp_mst_topology.c struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work); work 2748 drivers/gpu/drm/drm_dp_mst_topology.c queue_work(system_long_wq, &mgr->work); work 2790 drivers/gpu/drm/drm_dp_mst_topology.c flush_work(&mgr->work); work 3729 drivers/gpu/drm/drm_dp_mst_topology.c static void drm_dp_tx_work(struct work_struct *work) work 3731 drivers/gpu/drm/drm_dp_mst_topology.c struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work); work 3739 drivers/gpu/drm/drm_dp_mst_topology.c static void drm_dp_destroy_connector_work(struct work_struct *work) work 3741 drivers/gpu/drm/drm_dp_mst_topology.c struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work); work 3961 drivers/gpu/drm/drm_dp_mst_topology.c INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work); work 4005 drivers/gpu/drm/drm_dp_mst_topology.c flush_work(&mgr->work); work 371 drivers/gpu/drm/drm_fb_helper.c static void drm_fb_helper_resume_worker(struct work_struct *work) work 373 drivers/gpu/drm/drm_fb_helper.c struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper, work 399 drivers/gpu/drm/drm_fb_helper.c static void drm_fb_helper_dirty_work(struct work_struct *work) work 401 drivers/gpu/drm/drm_fb_helper.c struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper, work 57 drivers/gpu/drm/drm_flip_work.c void drm_flip_work_queue_task(struct drm_flip_work *work, work 62 drivers/gpu/drm/drm_flip_work.c spin_lock_irqsave(&work->lock, flags); work 63 drivers/gpu/drm/drm_flip_work.c list_add_tail(&task->node, &work->queued); work 64 drivers/gpu/drm/drm_flip_work.c spin_unlock_irqrestore(&work->lock, flags); work 76 drivers/gpu/drm/drm_flip_work.c void drm_flip_work_queue(struct drm_flip_work *work, void *val) work 83 drivers/gpu/drm/drm_flip_work.c drm_flip_work_queue_task(work, task); work 85 drivers/gpu/drm/drm_flip_work.c DRM_ERROR("%s could not allocate task!\n", work->name); work 86 drivers/gpu/drm/drm_flip_work.c work->func(work, val); work 101 drivers/gpu/drm/drm_flip_work.c void drm_flip_work_commit(struct drm_flip_work *work, work 106 drivers/gpu/drm/drm_flip_work.c spin_lock_irqsave(&work->lock, flags); work 107 drivers/gpu/drm/drm_flip_work.c list_splice_tail(&work->queued, &work->commited); work 108 drivers/gpu/drm/drm_flip_work.c INIT_LIST_HEAD(&work->queued); work 109 drivers/gpu/drm/drm_flip_work.c spin_unlock_irqrestore(&work->lock, flags); work 110 drivers/gpu/drm/drm_flip_work.c queue_work(wq, &work->worker); work 116 drivers/gpu/drm/drm_flip_work.c struct drm_flip_work *work = container_of(w, struct drm_flip_work, worker); work 124 drivers/gpu/drm/drm_flip_work.c spin_lock_irqsave(&work->lock, flags); work 125 drivers/gpu/drm/drm_flip_work.c list_splice_tail(&work->commited, &tasks); work 126 drivers/gpu/drm/drm_flip_work.c INIT_LIST_HEAD(&work->commited); work 127 drivers/gpu/drm/drm_flip_work.c spin_unlock_irqrestore(&work->lock, flags); work 133 drivers/gpu/drm/drm_flip_work.c work->func(work, task->data); work 147 drivers/gpu/drm/drm_flip_work.c void drm_flip_work_init(struct drm_flip_work *work, work 150 drivers/gpu/drm/drm_flip_work.c work->name = name; work 151 drivers/gpu/drm/drm_flip_work.c INIT_LIST_HEAD(&work->queued); work 152 drivers/gpu/drm/drm_flip_work.c INIT_LIST_HEAD(&work->commited); work 153 drivers/gpu/drm/drm_flip_work.c spin_lock_init(&work->lock); work 154 drivers/gpu/drm/drm_flip_work.c work->func = func; work 156 drivers/gpu/drm/drm_flip_work.c INIT_WORK(&work->worker, flip_worker); work 166 drivers/gpu/drm/drm_flip_work.c void drm_flip_work_cleanup(struct drm_flip_work *work) work 168 drivers/gpu/drm/drm_flip_work.c WARN_ON(!list_empty(&work->queued) || !list_empty(&work->commited)); work 398 drivers/gpu/drm/drm_framebuffer.c struct work_struct work; work 404 drivers/gpu/drm/drm_framebuffer.c struct drm_mode_rmfb_work *arg = container_of(w, typeof(*arg), work); work 467 drivers/gpu/drm/drm_framebuffer.c INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn); work 471 drivers/gpu/drm/drm_framebuffer.c schedule_work(&arg.work); work 472 drivers/gpu/drm/drm_framebuffer.c flush_work(&arg.work); work 473 drivers/gpu/drm/drm_framebuffer.c destroy_work_on_stack(&arg.work); work 682 drivers/gpu/drm/drm_framebuffer.c INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn); work 684 drivers/gpu/drm/drm_framebuffer.c schedule_work(&arg.work); work 685 drivers/gpu/drm/drm_framebuffer.c flush_work(&arg.work); work 686 drivers/gpu/drm/drm_framebuffer.c destroy_work_on_stack(&arg.work); work 576 drivers/gpu/drm/drm_probe_helper.c static void output_poll_execute(struct work_struct *work) work 578 drivers/gpu/drm/drm_probe_helper.c struct delayed_work *delayed_work = to_delayed_work(work); work 680 drivers/gpu/drm/drm_probe_helper.c struct work_struct *work = current_work(); work 682 drivers/gpu/drm/drm_probe_helper.c return work && work->func == output_poll_execute; work 67 drivers/gpu/drm/drm_self_refresh_helper.c static void drm_self_refresh_helper_entry_work(struct work_struct *work) work 70 drivers/gpu/drm/drm_self_refresh_helper.c to_delayed_work(work), work 341 drivers/gpu/drm/drm_writeback.c static void cleanup_work(struct work_struct *work) work 343 drivers/gpu/drm/drm_writeback.c struct drm_writeback_job *job = container_of(work, work 1337 drivers/gpu/drm/etnaviv/etnaviv_gpu.c static void sync_point_worker(struct work_struct *work) work 1339 drivers/gpu/drm/etnaviv/etnaviv_gpu.c struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu, work 870 drivers/gpu/drm/exynos/exynos_drm_g2d.c static void g2d_runqueue_worker(struct work_struct *work) work 872 drivers/gpu/drm/exynos/exynos_drm_g2d.c struct g2d_data *g2d = container_of(work, struct g2d_data, work 746 drivers/gpu/drm/exynos/exynos_drm_ipp.c static void exynos_drm_ipp_cleanup_work(struct work_struct *work) work 748 drivers/gpu/drm/exynos/exynos_drm_ipp.c struct exynos_drm_ipp_task *task = container_of(work, work 1656 drivers/gpu/drm/exynos/exynos_hdmi.c static void hdmi_hotplug_work_func(struct work_struct *work) work 1660 drivers/gpu/drm/exynos/exynos_hdmi.c hdata = container_of(work, struct hdmi_context, hotplug_work.work); work 421 drivers/gpu/drm/gma500/cdv_device.c static void cdv_hotplug_work_func(struct work_struct *work) work 423 drivers/gpu/drm/gma500/cdv_device.c struct drm_psb_private *dev_priv = container_of(work, struct drm_psb_private, work 173 drivers/gpu/drm/gma500/opregion.c static void psb_intel_opregion_asle_work(struct work_struct *work) work 176 drivers/gpu/drm/gma500/opregion.c container_of(work, struct psb_intel_opregion, asle_work); work 773 drivers/gpu/drm/i2c/tda998x_drv.c static void tda998x_detect_work(struct work_struct *work) work 776 drivers/gpu/drm/i2c/tda998x_drv.c container_of(work, struct tda998x_priv, detect_work); work 13872 drivers/gpu/drm/i915/display/intel_display.c static void intel_atomic_helper_free_state_worker(struct work_struct *work) work 13875 drivers/gpu/drm/i915/display/intel_display.c container_of(work, typeof(*dev_priv), atomic_helper.free_work); work 13907 drivers/gpu/drm/i915/display/intel_display.c static void intel_atomic_cleanup_work(struct work_struct *work) work 13910 drivers/gpu/drm/i915/display/intel_display.c container_of(work, struct drm_atomic_state, commit_work); work 14105 drivers/gpu/drm/i915/display/intel_display.c static void intel_atomic_commit_work(struct work_struct *work) work 14108 drivers/gpu/drm/i915/display/intel_display.c container_of(work, struct intel_atomic_state, base.commit_work); work 1985 drivers/gpu/drm/i915/display/intel_display_power.c intel_display_power_put_async_work(struct work_struct *work) work 1988 drivers/gpu/drm/i915/display/intel_display_power.c container_of(work, struct drm_i915_private, work 1989 drivers/gpu/drm/i915/display/intel_display_power.c power_domains.async_put_work.work); work 6790 drivers/gpu/drm/i915/display/intel_dp.c cancel_delayed_work_sync(&dev_priv->drrs.work); work 6793 drivers/gpu/drm/i915/display/intel_dp.c static void intel_edp_drrs_downclock_work(struct work_struct *work) work 6796 drivers/gpu/drm/i915/display/intel_dp.c container_of(work, typeof(*dev_priv), drrs.work.work); work 6844 drivers/gpu/drm/i915/display/intel_dp.c cancel_delayed_work(&dev_priv->drrs.work); work 6887 drivers/gpu/drm/i915/display/intel_dp.c cancel_delayed_work(&dev_priv->drrs.work); work 6911 drivers/gpu/drm/i915/display/intel_dp.c schedule_delayed_work(&dev_priv->drrs.work, work 6973 drivers/gpu/drm/i915/display/intel_dp.c INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work); work 7112 drivers/gpu/drm/i915/display/intel_dp.c static void intel_dp_modeset_retry_work_fn(struct work_struct *work) work 7117 drivers/gpu/drm/i915/display/intel_dp.c intel_connector = container_of(work, typeof(*intel_connector), work 1165 drivers/gpu/drm/i915/display/intel_fbc.c static void intel_fbc_underrun_work_fn(struct work_struct *work) work 1168 drivers/gpu/drm/i915/display/intel_fbc.c container_of(work, struct drm_i915_private, fbc.underrun_work); work 433 drivers/gpu/drm/i915/display/intel_fbdev.c static void intel_fbdev_suspend_worker(struct work_struct *work) work 435 drivers/gpu/drm/i915/display/intel_fbdev.c intel_fbdev_set_suspend(&container_of(work, work 864 drivers/gpu/drm/i915/display/intel_hdcp.c static void intel_hdcp_prop_work(struct work_struct *work) work 866 drivers/gpu/drm/i915/display/intel_hdcp.c struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp, work 1707 drivers/gpu/drm/i915/display/intel_hdcp.c static void intel_hdcp_check_work(struct work_struct *work) work 1709 drivers/gpu/drm/i915/display/intel_hdcp.c struct intel_hdcp *hdcp = container_of(to_delayed_work(work), work 231 drivers/gpu/drm/i915/display/intel_hotplug.c static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) work 234 drivers/gpu/drm/i915/display/intel_hotplug.c container_of(work, typeof(*dev_priv), work 235 drivers/gpu/drm/i915/display/intel_hotplug.c hotplug.reenable_work.work); work 308 drivers/gpu/drm/i915/display/intel_hotplug.c static void i915_digport_work_func(struct work_struct *work) work 311 drivers/gpu/drm/i915/display/intel_hotplug.c container_of(work, struct drm_i915_private, hotplug.dig_port_work); work 358 drivers/gpu/drm/i915/display/intel_hotplug.c static void i915_hotplug_work_func(struct work_struct *work) work 361 drivers/gpu/drm/i915/display/intel_hotplug.c container_of(work, struct drm_i915_private, work 362 drivers/gpu/drm/i915/display/intel_hotplug.c hotplug.hotplug_work.work); work 597 drivers/gpu/drm/i915/display/intel_hotplug.c static void i915_hpd_poll_init_work(struct work_struct *work) work 600 drivers/gpu/drm/i915/display/intel_hotplug.c container_of(work, struct drm_i915_private, work 556 drivers/gpu/drm/i915/display/intel_opregion.c static void asle_work(struct work_struct *work) work 559 drivers/gpu/drm/i915/display/intel_opregion.c container_of(work, struct intel_opregion, asle_work); work 229 drivers/gpu/drm/i915/display/intel_psr.c schedule_work(&dev_priv->psr.work); work 861 drivers/gpu/drm/i915/display/intel_psr.c cancel_work_sync(&dev_priv->psr.work); work 920 drivers/gpu/drm/i915/display/intel_psr.c schedule_work(&dev_priv->psr.work); work 1090 drivers/gpu/drm/i915/display/intel_psr.c static void intel_psr_work(struct work_struct *work) work 1093 drivers/gpu/drm/i915/display/intel_psr.c container_of(work, typeof(*dev_priv), psr.work); work 1198 drivers/gpu/drm/i915/display/intel_psr.c schedule_work(&dev_priv->psr.work); work 1249 drivers/gpu/drm/i915/display/intel_psr.c INIT_WORK(&dev_priv->psr.work, intel_psr_work); work 54 drivers/gpu/drm/i915/gem/i915_gem_clflush.c .work = clflush_work, work 97 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c struct work_struct work; work 132 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c static void clear_pages_signal_irq_worker(struct irq_work *work) work 134 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c struct clear_pages_work *w = container_of(work, typeof(*w), irq_work); work 155 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c static void clear_pages_worker(struct work_struct *work) work 157 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c struct clear_pages_work *w = container_of(work, typeof(*w), work); work 250 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c schedule_work(&w->work); work 270 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c struct clear_pages_work *work; work 278 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c work = kmalloc(sizeof(*work), GFP_KERNEL); work 279 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c if (!work) { work 284 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c work->value = value; work 285 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c work->sleeve = sleeve; work 286 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c work->ce = ce; work 288 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c INIT_WORK(&work->work, clear_pages_worker); work 290 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c init_irq_work(&work->irq_work, clear_pages_signal_irq_worker); work 292 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c dma_fence_init(&work->dma, &clear_pages_work_ops, &fence_lock, 0, 0); work 293 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c i915_sw_fence_init(&work->wait, clear_pages_work_notify); work 296 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c err = i915_sw_fence_await_reservation(&work->wait, work 301 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c dma_fence_set_error(&work->dma, err); work 303 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c dma_resv_add_excl_fence(obj->base.resv, &work->dma); work 308 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c dma_fence_get(&work->dma); work 309 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c i915_sw_fence_commit(&work->wait); work 362 drivers/gpu/drm/i915/gem/i915_gem_context.c static void contexts_free_worker(struct work_struct *work) work 365 drivers/gpu/drm/i915/gem/i915_gem_context.c container_of(work, typeof(*i915), contexts.free_work); work 205 drivers/gpu/drm/i915/gem/i915_gem_object.c static void __i915_gem_free_work(struct work_struct *work) work 208 drivers/gpu/drm/i915/gem/i915_gem_object.c container_of(work, struct drm_i915_private, mm.free_work); work 236 drivers/gpu/drm/i915/gem/i915_gem_object_types.h struct work_struct *work; work 45 drivers/gpu/drm/i915/gem/i915_gem_pm.c static void idle_work_handler(struct work_struct *work) work 48 drivers/gpu/drm/i915/gem/i915_gem_pm.c container_of(work, typeof(*i915), gem.idle_work); work 56 drivers/gpu/drm/i915/gem/i915_gem_pm.c !work_pending(work)); work 68 drivers/gpu/drm/i915/gem/i915_gem_pm.c static void retire_work_handler(struct work_struct *work) work 71 drivers/gpu/drm/i915/gem/i915_gem_pm.c container_of(work, typeof(*i915), gem.retire_work.work); work 165 drivers/gpu/drm/i915/gem/i915_gem_pm.c cancel_delayed_work_sync(&i915->gt.hangcheck.work); work 20 drivers/gpu/drm/i915/gem/i915_gem_pm.h void i915_gem_idle_work_handler(struct work_struct *work); work 26 drivers/gpu/drm/i915/gem/i915_gem_userptr.c struct work_struct work; work 389 drivers/gpu/drm/i915/gem/i915_gem_userptr.c __i915_mm_struct_free__worker(struct work_struct *work) work 391 drivers/gpu/drm/i915/gem/i915_gem_userptr.c struct i915_mm_struct *mm = container_of(work, typeof(*mm), work); work 406 drivers/gpu/drm/i915/gem/i915_gem_userptr.c INIT_WORK(&mm->work, __i915_mm_struct_free__worker); work 407 drivers/gpu/drm/i915/gem/i915_gem_userptr.c queue_work(mm->i915->mm.userptr_wq, &mm->work); work 423 drivers/gpu/drm/i915/gem/i915_gem_userptr.c struct work_struct work; work 474 drivers/gpu/drm/i915/gem/i915_gem_userptr.c struct get_pages_work *work = container_of(_work, typeof(*work), work); work 475 drivers/gpu/drm/i915/gem/i915_gem_userptr.c struct drm_i915_gem_object *obj = work->obj; work 497 drivers/gpu/drm/i915/gem/i915_gem_userptr.c (work->task, mm, work 513 drivers/gpu/drm/i915/gem/i915_gem_userptr.c if (obj->userptr.work == &work->work) { work 525 drivers/gpu/drm/i915/gem/i915_gem_userptr.c obj->userptr.work = ERR_CAST(pages); work 535 drivers/gpu/drm/i915/gem/i915_gem_userptr.c put_task_struct(work->task); work 536 drivers/gpu/drm/i915/gem/i915_gem_userptr.c kfree(work); work 542 drivers/gpu/drm/i915/gem/i915_gem_userptr.c struct get_pages_work *work; work 563 drivers/gpu/drm/i915/gem/i915_gem_userptr.c work = kmalloc(sizeof(*work), GFP_KERNEL); work 564 drivers/gpu/drm/i915/gem/i915_gem_userptr.c if (work == NULL) work 567 drivers/gpu/drm/i915/gem/i915_gem_userptr.c obj->userptr.work = &work->work; work 569 drivers/gpu/drm/i915/gem/i915_gem_userptr.c work->obj = i915_gem_object_get(obj); work 571 drivers/gpu/drm/i915/gem/i915_gem_userptr.c work->task = current; work 572 drivers/gpu/drm/i915/gem/i915_gem_userptr.c get_task_struct(work->task); work 574 drivers/gpu/drm/i915/gem/i915_gem_userptr.c INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker); work 575 drivers/gpu/drm/i915/gem/i915_gem_userptr.c queue_work(to_i915(obj->base.dev)->mm.userptr_wq, &work->work); work 606 drivers/gpu/drm/i915/gem/i915_gem_userptr.c if (obj->userptr.work) { work 608 drivers/gpu/drm/i915/gem/i915_gem_userptr.c if (IS_ERR(obj->userptr.work)) work 609 drivers/gpu/drm/i915/gem/i915_gem_userptr.c return PTR_ERR(obj->userptr.work); work 666 drivers/gpu/drm/i915/gem/i915_gem_userptr.c obj->userptr.work = NULL; work 209 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c static void signal_irq_work(struct irq_work *work) work 212 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c container_of(work, typeof(*engine), breadcrumbs.irq_work); work 33 drivers/gpu/drm/i915/gt/intel_gt_types.h struct delayed_work work; work 255 drivers/gpu/drm/i915/gt/intel_hangcheck.c static void hangcheck_elapsed(struct work_struct *work) work 258 drivers/gpu/drm/i915/gt/intel_hangcheck.c container_of(work, typeof(*gt), hangcheck.work.work); work 344 drivers/gpu/drm/i915/gt/intel_hangcheck.c queue_delayed_work(system_long_wq, >->hangcheck.work, delay); work 355 drivers/gpu/drm/i915/gt/intel_hangcheck.c INIT_DELAYED_WORK(>->hangcheck.work, hangcheck_elapsed); work 1278 drivers/gpu/drm/i915/gt/intel_reset.c static void intel_wedge_me(struct work_struct *work) work 1280 drivers/gpu/drm/i915/gt/intel_reset.c struct intel_wedge_me *w = container_of(work, typeof(*w), work.work); work 1296 drivers/gpu/drm/i915/gt/intel_reset.c INIT_DELAYED_WORK_ONSTACK(&w->work, intel_wedge_me); work 1297 drivers/gpu/drm/i915/gt/intel_reset.c schedule_delayed_work(&w->work, timeout); work 1302 drivers/gpu/drm/i915/gt/intel_reset.c cancel_delayed_work_sync(&w->work); work 1303 drivers/gpu/drm/i915/gt/intel_reset.c destroy_delayed_work_on_stack(&w->work); work 53 drivers/gpu/drm/i915/gt/intel_reset.h struct delayed_work work; work 1757 drivers/gpu/drm/i915/gt/selftest_hangcheck.c drain_delayed_work(>->hangcheck.work); /* flush param */ work 321 drivers/gpu/drm/i915/gt/uc/intel_guc_log.c static void capture_logs_work(struct work_struct *work) work 324 drivers/gpu/drm/i915/gt/uc/intel_guc_log.c container_of(work, struct intel_guc_log, relay.flush_work); work 117 drivers/gpu/drm/i915/gvt/kvmgt.c static void intel_vgpu_release_work(struct work_struct *work); work 849 drivers/gpu/drm/i915/gvt/kvmgt.c static void intel_vgpu_release_work(struct work_struct *work) work 851 drivers/gpu/drm/i915/gvt/kvmgt.c struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu, work 1042 drivers/gpu/drm/i915/i915_debugfs.c if (timer_pending(>->hangcheck.work.timer)) work 1044 drivers/gpu/drm/i915/i915_debugfs.c jiffies_to_msecs(gt->hangcheck.work.timer.expires - work 1046 drivers/gpu/drm/i915/i915_debugfs.c else if (delayed_work_pending(>->hangcheck.work)) work 1662 drivers/gpu/drm/i915/i915_drv.c cancel_delayed_work_sync(&i915->gt.hangcheck.work); work 323 drivers/gpu/drm/i915/i915_drv.h struct work_struct work; work 460 drivers/gpu/drm/i915/i915_drv.h struct delayed_work work; work 483 drivers/gpu/drm/i915/i915_drv.h struct work_struct work; work 545 drivers/gpu/drm/i915/i915_drv.h struct work_struct work; work 1732 drivers/gpu/drm/i915/i915_drv.h struct work_struct work; work 517 drivers/gpu/drm/i915/i915_gem_gtt.c static void __i915_vm_release(struct work_struct *work) work 520 drivers/gpu/drm/i915/i915_gem_gtt.c container_of(work, struct i915_address_space, rcu.work); work 23 drivers/gpu/drm/i915/i915_globals.c struct rcu_work work; work 40 drivers/gpu/drm/i915/i915_globals.c static void __i915_globals_park(struct work_struct *work) work 88 drivers/gpu/drm/i915/i915_globals.c INIT_RCU_WORK(&park.work, __i915_globals_park); work 107 drivers/gpu/drm/i915/i915_globals.c queue_rcu_work(system_wq, &park.work); work 120 drivers/gpu/drm/i915/i915_globals.c flush_rcu_work(&park.work); work 411 drivers/gpu/drm/i915/i915_irq.c cancel_work_sync(&rps->work); work 1169 drivers/gpu/drm/i915/i915_irq.c static void gen6_pm_rps_work(struct work_struct *work) work 1172 drivers/gpu/drm/i915/i915_irq.c container_of(work, struct drm_i915_private, gt_pm.rps.work); work 1277 drivers/gpu/drm/i915/i915_irq.c static void ivybridge_parity_work(struct work_struct *work) work 1280 drivers/gpu/drm/i915/i915_irq.c container_of(work, typeof(*dev_priv), l3_parity.error_work); work 1675 drivers/gpu/drm/i915/i915_irq.c schedule_work(&rps->work); work 1688 drivers/gpu/drm/i915/i915_irq.c schedule_work(&rps->work); work 2960 drivers/gpu/drm/i915/i915_irq.c schedule_work(&dev_priv->i945gm_vblank.work); work 3040 drivers/gpu/drm/i915/i915_irq.c schedule_work(&dev_priv->i945gm_vblank.work); work 3079 drivers/gpu/drm/i915/i915_irq.c static void i945gm_vblank_work_func(struct work_struct *work) work 3082 drivers/gpu/drm/i915/i915_irq.c container_of(work, struct drm_i915_private, i945gm_vblank.work); work 3117 drivers/gpu/drm/i915/i915_irq.c INIT_WORK(&dev_priv->i945gm_vblank.work, work 3129 drivers/gpu/drm/i915/i915_irq.c cancel_work_sync(&dev_priv->i945gm_vblank.work); work 4328 drivers/gpu/drm/i915/i915_irq.c INIT_WORK(&rps->work, gen6_pm_rps_work); work 43 drivers/gpu/drm/i915/i915_request.c struct irq_work work; work 125 drivers/gpu/drm/i915/i915_request.c struct execute_cb *cb = container_of(wrk, typeof(*cb), work); work 133 drivers/gpu/drm/i915/i915_request.c struct execute_cb *cb = container_of(wrk, typeof(*cb), work); work 152 drivers/gpu/drm/i915/i915_request.c irq_work_queue(&cb->work); work 356 drivers/gpu/drm/i915/i915_request.c init_irq_work(&cb->work, irq_execute_cb); work 361 drivers/gpu/drm/i915/i915_request.c cb->work.func = irq_execute_cb_hook; work 376 drivers/gpu/drm/i915/i915_sw_fence.c struct irq_work work; work 420 drivers/gpu/drm/i915/i915_sw_fence.c irq_work_queue(&cb->work); work 426 drivers/gpu/drm/i915/i915_sw_fence.c container_of(wrk, typeof(*cb), work); work 469 drivers/gpu/drm/i915/i915_sw_fence.c init_irq_work(&timer->work, irq_i915_sw_fence_work); work 9 drivers/gpu/drm/i915/i915_sw_fence_work.c static void fence_work(struct work_struct *work) work 11 drivers/gpu/drm/i915/i915_sw_fence_work.c struct dma_fence_work *f = container_of(work, typeof(*f), work); work 14 drivers/gpu/drm/i915/i915_sw_fence_work.c err = f->ops->work(f); work 33 drivers/gpu/drm/i915/i915_sw_fence_work.c queue_work(system_unbound_wq, &f->work); work 84 drivers/gpu/drm/i915/i915_sw_fence_work.c INIT_WORK(&f->work, fence_work); work 20 drivers/gpu/drm/i915/i915_sw_fence_work.h int (*work)(struct dma_fence_work *f); work 31 drivers/gpu/drm/i915/i915_sw_fence_work.h struct work_struct work; work 329 drivers/gpu/drm/i915/i915_sysfs.c schedule_work(&rps->work); work 623 drivers/gpu/drm/i915/intel_csr.c static void csr_load_work_fn(struct work_struct *work) work 629 drivers/gpu/drm/i915/intel_csr.c dev_priv = container_of(work, typeof(*dev_priv), csr.work); work 666 drivers/gpu/drm/i915/intel_csr.c INIT_WORK(&dev_priv->csr.work, csr_load_work_fn); work 730 drivers/gpu/drm/i915/intel_csr.c schedule_work(&dev_priv->csr.work); work 746 drivers/gpu/drm/i915/intel_csr.c flush_work(&dev_priv->csr.work); work 6899 drivers/gpu/drm/i915/intel_pm.c schedule_work(&rps->work); work 72 drivers/gpu/drm/i915/intel_wakeref.c INTEL_WAKEREF_BUG_ON(work_pending(&wf->work)); work 77 drivers/gpu/drm/i915/intel_wakeref.c schedule_work(&wf->work); work 86 drivers/gpu/drm/i915/intel_wakeref.c struct intel_wakeref *wf = container_of(wrk, typeof(*wf), work); work 107 drivers/gpu/drm/i915/intel_wakeref.c INIT_WORK(&wf->work, __intel_wakeref_put_work); work 46 drivers/gpu/drm/i915/intel_wakeref.h struct work_struct work; work 506 drivers/gpu/drm/i915/selftests/i915_sw_fence.c struct work_struct work; work 512 drivers/gpu/drm/i915/selftests/i915_sw_fence.c static void task_ipc(struct work_struct *work) work 514 drivers/gpu/drm/i915/selftests/i915_sw_fence.c struct task_ipc *ipc = container_of(work, typeof(*ipc), work); work 542 drivers/gpu/drm/i915/selftests/i915_sw_fence.c INIT_WORK_ONSTACK(&ipc.work, task_ipc); work 543 drivers/gpu/drm/i915/selftests/i915_sw_fence.c schedule_work(&ipc.work); work 561 drivers/gpu/drm/i915/selftests/i915_sw_fence.c flush_work(&ipc.work); work 562 drivers/gpu/drm/i915/selftests/i915_sw_fence.c destroy_work_on_stack(&ipc.work); work 106 drivers/gpu/drm/i915/selftests/mock_gem_device.c static void mock_retire_work_handler(struct work_struct *work) work 110 drivers/gpu/drm/i915/selftests/mock_gem_device.c static void mock_idle_work_handler(struct work_struct *work) work 321 drivers/gpu/drm/lima/lima_sched.c static void lima_sched_error_work(struct work_struct *work) work 324 drivers/gpu/drm/lima/lima_sched.c container_of(work, struct lima_sched_pipe, error_work); work 43 drivers/gpu/drm/mediatek/mtk_drm_drv.c schedule_work(&private->commit.work); work 78 drivers/gpu/drm/mediatek/mtk_drm_drv.c static void mtk_atomic_work(struct work_struct *work) work 80 drivers/gpu/drm/mediatek/mtk_drm_drv.c struct mtk_drm_private *private = container_of(work, work 81 drivers/gpu/drm/mediatek/mtk_drm_drv.c struct mtk_drm_private, commit.work); work 98 drivers/gpu/drm/mediatek/mtk_drm_drv.c flush_work(&private->commit.work); work 499 drivers/gpu/drm/mediatek/mtk_drm_drv.c INIT_WORK(&private->commit.work, mtk_atomic_work); work 49 drivers/gpu/drm/mediatek/mtk_drm_drv.h struct work_struct work; work 297 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c static void dpu_crtc_frame_event_work(struct kthread_work *work) work 299 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c struct dpu_crtc_frame_event *fevent = container_of(work, work 300 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c struct dpu_crtc_frame_event, work); work 391 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work); work 1316 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c kthread_init_work(&dpu_crtc->frame_events[i].work, work 95 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h struct kthread_work work; work 1402 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c static void dpu_encoder_off_work(struct work_struct *work) work 1404 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c struct dpu_encoder_virt *dpu_enc = container_of(work, work 1405 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c struct dpu_encoder_virt, delayed_off_work.work); work 1759 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work) work 1761 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c struct dpu_encoder_virt *dpu_enc = container_of(work, work 114 drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c static void unref_cursor_worker(struct drm_flip_work *work, void *val) work 117 drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c container_of(work, struct mdp4_crtc, unref_cursor_work); work 161 drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c static void unref_cursor_worker(struct drm_flip_work *work, void *val) work 164 drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c container_of(work, struct mdp5_crtc, unref_cursor_work); work 1408 drivers/gpu/drm/msm/dsi/dsi_host.c static void dsi_hpd_worker(struct work_struct *work) work 1411 drivers/gpu/drm/msm/dsi/dsi_host.c container_of(work, struct msm_dsi_host, hpd_work); work 1416 drivers/gpu/drm/msm/dsi/dsi_host.c static void dsi_err_worker(struct work_struct *work) work 1419 drivers/gpu/drm/msm/dsi/dsi_host.c container_of(work, struct msm_dsi_host, err_work); work 951 drivers/gpu/drm/msm/edp/edp_ctrl.c static void edp_ctrl_on_worker(struct work_struct *work) work 954 drivers/gpu/drm/msm/edp/edp_ctrl.c work, struct edp_ctrl, on_work); work 991 drivers/gpu/drm/msm/edp/edp_ctrl.c static void edp_ctrl_off_worker(struct work_struct *work) work 994 drivers/gpu/drm/msm/edp/edp_ctrl.c work, struct edp_ctrl, off_work); work 237 drivers/gpu/drm/msm/hdmi/hdmi_connector.c msm_hdmi_hotplug_work(struct work_struct *work) work 240 drivers/gpu/drm/msm/hdmi/hdmi_connector.c container_of(work, struct hdmi_connector, hpd_work); work 396 drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c static void msm_hdmi_hdcp_reauth_work(struct work_struct *work) work 398 drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(work, work 1234 drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c static void msm_hdmi_hdcp_auth_work(struct work_struct *work) work 1236 drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(work, work 76 drivers/gpu/drm/msm/msm_atomic.c queue_work(priv->wq, &timer->work); work 81 drivers/gpu/drm/msm/msm_atomic.c static void msm_atomic_pending_work(struct work_struct *work) work 83 drivers/gpu/drm/msm/msm_atomic.c struct msm_pending_timer *timer = container_of(work, work 84 drivers/gpu/drm/msm/msm_atomic.c struct msm_pending_timer, work); work 96 drivers/gpu/drm/msm/msm_atomic.c INIT_WORK(&timer->work, msm_atomic_pending_work); work 169 drivers/gpu/drm/msm/msm_drv.c struct work_struct work; work 175 drivers/gpu/drm/msm/msm_drv.c static void vblank_ctrl_worker(struct work_struct *work) work 177 drivers/gpu/drm/msm/msm_drv.c struct msm_vblank_work *vbl_work = container_of(work, work 178 drivers/gpu/drm/msm/msm_drv.c struct msm_vblank_work, work); work 199 drivers/gpu/drm/msm/msm_drv.c INIT_WORK(&vbl_work->work, vblank_ctrl_worker); work 205 drivers/gpu/drm/msm/msm_drv.c queue_work(priv->wq, &vbl_work->work); work 324 drivers/gpu/drm/msm/msm_drv.h void msm_gem_free_work(struct work_struct *work); work 931 drivers/gpu/drm/msm/msm_gem.c void msm_gem_free_work(struct work_struct *work) work 934 drivers/gpu/drm/msm/msm_gem.c container_of(work, struct msm_drm_private, free_work); work 123 drivers/gpu/drm/msm/msm_gem.h void msm_gem_free_work(struct work_struct *work); work 419 drivers/gpu/drm/msm/msm_gpu.c static void recover_worker(struct work_struct *work) work 421 drivers/gpu/drm/msm/msm_gpu.c struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work); work 706 drivers/gpu/drm/msm/msm_gpu.c static void retire_worker(struct work_struct *work) work 708 drivers/gpu/drm/msm/msm_gpu.c struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work); work 139 drivers/gpu/drm/msm/msm_kms.h struct work_struct work; work 2019 drivers/gpu/drm/nouveau/dispnv50/disp.c nv50_disp_atomic_commit_work(struct work_struct *work) work 2022 drivers/gpu/drm/nouveau/dispnv50/disp.c container_of(work, typeof(*state), commit_work); work 24 drivers/gpu/drm/nouveau/include/nvif/notify.h struct work_struct work; work 28 drivers/gpu/drm/nouveau/include/nvif/notify.h bool work, u8 type, void *data, u32 size, u32 reply, work 23 drivers/gpu/drm/nouveau/include/nvkm/core/notify.h struct work_struct work; work 32 drivers/gpu/drm/nouveau/include/nvkm/core/notify.h int (*func)(struct nvkm_notify *), bool work, work 12 drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h struct work_struct work; work 93 drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h struct work_struct work; work 22 drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h struct work_struct work; work 349 drivers/gpu/drm/nouveau/nouveau_display.c nouveau_display_hpd_work(struct work_struct *work) work 351 drivers/gpu/drm/nouveau/nouveau_display.c struct nouveau_drm *drm = container_of(work, typeof(*drm), hpd_work); work 135 drivers/gpu/drm/nouveau/nouveau_drm.c struct nouveau_cli *cli = container_of(w, typeof(*cli), work); work 136 drivers/gpu/drm/nouveau/nouveau_drm.c struct nouveau_cli_work *work, *wtmp; work 138 drivers/gpu/drm/nouveau/nouveau_drm.c list_for_each_entry_safe(work, wtmp, &cli->worker, head) { work 139 drivers/gpu/drm/nouveau/nouveau_drm.c if (!work->fence || nouveau_cli_work_ready(work->fence)) { work 140 drivers/gpu/drm/nouveau/nouveau_drm.c list_del(&work->head); work 141 drivers/gpu/drm/nouveau/nouveau_drm.c work->func(work); work 150 drivers/gpu/drm/nouveau/nouveau_drm.c struct nouveau_cli_work *work = container_of(cb, typeof(*work), cb); work 151 drivers/gpu/drm/nouveau/nouveau_drm.c schedule_work(&work->cli->work); work 156 drivers/gpu/drm/nouveau/nouveau_drm.c struct nouveau_cli_work *work) work 158 drivers/gpu/drm/nouveau/nouveau_drm.c work->fence = dma_fence_get(fence); work 159 drivers/gpu/drm/nouveau/nouveau_drm.c work->cli = cli; work 161 drivers/gpu/drm/nouveau/nouveau_drm.c list_add_tail(&work->head, &cli->worker); work 162 drivers/gpu/drm/nouveau/nouveau_drm.c if (dma_fence_add_callback(fence, &work->cb, nouveau_cli_work_fence)) work 163 drivers/gpu/drm/nouveau/nouveau_drm.c nouveau_cli_work_fence(fence, &work->cb); work 175 drivers/gpu/drm/nouveau/nouveau_drm.c flush_work(&cli->work); work 223 drivers/gpu/drm/nouveau/nouveau_drm.c INIT_WORK(&cli->work, nouveau_cli_work); work 109 drivers/gpu/drm/nouveau/nouveau_drv.h struct work_struct work; work 445 drivers/gpu/drm/nouveau/nouveau_fbcon.c nouveau_fbcon_set_suspend_work(struct work_struct *work) work 447 drivers/gpu/drm/nouveau/nouveau_fbcon.c struct nouveau_drm *drm = container_of(work, typeof(*drm), fbcon_work); work 91 drivers/gpu/drm/nouveau/nouveau_gem.c struct nouveau_cli_work work; work 105 drivers/gpu/drm/nouveau/nouveau_gem.c struct nouveau_gem_object_unmap *work = work 106 drivers/gpu/drm/nouveau/nouveau_gem.c container_of(w, typeof(*work), work); work 107 drivers/gpu/drm/nouveau/nouveau_gem.c nouveau_gem_object_delete(work->vma); work 108 drivers/gpu/drm/nouveau/nouveau_gem.c kfree(work); work 115 drivers/gpu/drm/nouveau/nouveau_gem.c struct nouveau_gem_object_unmap *work; work 124 drivers/gpu/drm/nouveau/nouveau_gem.c if (!(work = kmalloc(sizeof(*work), GFP_KERNEL))) { work 130 drivers/gpu/drm/nouveau/nouveau_gem.c work->work.func = nouveau_gem_object_delete_work; work 131 drivers/gpu/drm/nouveau/nouveau_gem.c work->vma = vma; work 132 drivers/gpu/drm/nouveau/nouveau_gem.c nouveau_cli_work_queue(vma->vmm->cli, fence, &work->work); work 57 drivers/gpu/drm/nouveau/nvif/notify.c flush_work(¬ify->work); work 105 drivers/gpu/drm/nouveau/nvif/notify.c nvif_notify_work(struct work_struct *work) work 107 drivers/gpu/drm/nouveau/nvif/notify.c struct nvif_notify *notify = container_of(work, typeof(*notify), work); work 132 drivers/gpu/drm/nouveau/nvif/notify.c schedule_work(¬ify->work); work 166 drivers/gpu/drm/nouveau/nvif/notify.c bool work, u8 event, void *data, u32 size, u32 reply, work 182 drivers/gpu/drm/nouveau/nvif/notify.c if (work) { work 183 drivers/gpu/drm/nouveau/nvif/notify.c INIT_WORK(¬ify->work, nvif_notify_work); work 45 drivers/gpu/drm/nouveau/nvkm/core/notify.c flush_work(¬ify->work); work 84 drivers/gpu/drm/nouveau/nvkm/core/notify.c nvkm_notify_work(struct work_struct *work) work 86 drivers/gpu/drm/nouveau/nvkm/core/notify.c struct nvkm_notify *notify = container_of(work, typeof(*notify), work); work 109 drivers/gpu/drm/nouveau/nvkm/core/notify.c schedule_work(¬ify->work); work 133 drivers/gpu/drm/nouveau/nvkm/core/notify.c int (*func)(struct nvkm_notify *), bool work, work 146 drivers/gpu/drm/nouveau/nvkm/core/notify.c if (ret = 0, work) { work 147 drivers/gpu/drm/nouveau/nvkm/core/notify.c INIT_WORK(¬ify->work, nvkm_notify_work); work 34 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c gf119_disp_super(struct work_struct *work) work 37 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c container_of(work, struct nv50_disp, supervisor); work 40 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c gv100_disp_super(struct work_struct *work) work 43 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c container_of(work, struct nv50_disp, supervisor); work 544 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c nv50_disp_super(struct work_struct *work) work 547 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c container_of(work, struct nv50_disp, supervisor); work 147 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c struct gf100_fifo *fifo = container_of(w, typeof(*fifo), recover.work); work 193 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c schedule_work(&fifo->recover.work); work 617 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c flush_work(&fifo->recover.work); work 692 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c INIT_WORK(&fifo->recover.work, gf100_fifo_recover_work); work 16 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h struct work_struct work; work 277 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c struct gk104_fifo *fifo = container_of(w, typeof(*fifo), recover.work); work 326 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c schedule_work(&fifo->recover.work); work 461 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c schedule_work(&fifo->recover.work); work 869 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c flush_work(&fifo->recover.work); work 1052 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c INIT_WORK(&fifo->recover.work, gk104_fifo_recover_work); work 17 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h struct work_struct work; work 47 drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c schedule_work(&sec2->work); work 60 drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c nvkm_sec2_recv(struct work_struct *work) work 62 drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c struct nvkm_sec2 *sec2 = container_of(work, typeof(*sec2), work); work 93 drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c flush_work(&sec2->work); work 114 drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c INIT_WORK(&sec2->work, nvkm_sec2_recv); work 298 drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c nvkm_pstate_work(struct work_struct *work) work 300 drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c struct nvkm_clk *clk = container_of(work, typeof(*clk), work); work 338 drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c schedule_work(&clk->work); work 584 drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c flush_work(&clk->work); work 677 drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c INIT_WORK(&clk->work, nvkm_pstate_work); work 55 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c nvkm_pmu_recv(struct work_struct *work) work 57 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c struct nvkm_pmu *pmu = container_of(work, typeof(*pmu), recv.work); work 87 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c flush_work(&pmu->recv.work); work 168 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c INIT_WORK(&pmu->recv.work, nvkm_pmu_recv); work 158 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c schedule_work(&pmu->recv.work); work 74 drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c nv_poweroff_work(struct work_struct *work) work 77 drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c kfree(work); work 120 drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c struct work_struct *work; work 122 drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c work = kmalloc(sizeof(*work), GFP_ATOMIC); work 123 drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c if (work) { work 124 drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c INIT_WORK(work, nv_poweroff_work); work 125 drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c schedule_work(work); work 95 drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c static void dsicm_te_timeout_work_callback(struct work_struct *work); work 100 drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c static void dsicm_ulps_work(struct work_struct *work); work 861 drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c static void dsicm_te_timeout_work_callback(struct work_struct *work) work 863 drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c struct panel_drv_data *ddata = container_of(work, struct panel_drv_data, work 864 drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c te_timeout_work.work); work 1087 drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c static void dsicm_ulps_work(struct work_struct *work) work 1089 drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c struct panel_drv_data *ddata = container_of(work, struct panel_drv_data, work 1090 drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c ulps_work.work); work 3910 drivers/gpu/drm/omapdrm/dss/dsi.c static void dsi_framedone_timeout_work_callback(struct work_struct *work) work 3912 drivers/gpu/drm/omapdrm/dss/dsi.c struct dsi_data *dsi = container_of(work, struct dsi_data, work 3913 drivers/gpu/drm/omapdrm/dss/dsi.c framedone_timeout_work.work); work 364 drivers/gpu/drm/omapdrm/omap_crtc.c container_of(data, struct omap_crtc, update_work.work); work 32 drivers/gpu/drm/omapdrm/omap_fbdev.c struct work_struct work; work 37 drivers/gpu/drm/omapdrm/omap_fbdev.c static void pan_worker(struct work_struct *work) work 39 drivers/gpu/drm/omapdrm/omap_fbdev.c struct omap_fbdev *fbdev = container_of(work, struct omap_fbdev, work); work 61 drivers/gpu/drm/omapdrm/omap_fbdev.c pan_worker(&fbdev->work); work 64 drivers/gpu/drm/omapdrm/omap_fbdev.c queue_work(priv->wq, &fbdev->work); work 239 drivers/gpu/drm/omapdrm/omap_fbdev.c INIT_WORK(&fbdev->work, pan_worker); work 74 drivers/gpu/drm/qxl/qxl_irq.c static void qxl_client_monitors_config_work_func(struct work_struct *work) work 76 drivers/gpu/drm/qxl/qxl_irq.c struct qxl_device *qdev = container_of(work, struct qxl_device, work 102 drivers/gpu/drm/qxl/qxl_kms.c static void qxl_gc_work(struct work_struct *work) work 104 drivers/gpu/drm/qxl/qxl_kms.c struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work); work 8113 drivers/gpu/drm/radeon/cik.c schedule_work(&rdev->pm.dpm.thermal.work); work 4920 drivers/gpu/drm/radeon/evergreen.c schedule_work(&rdev->pm.dpm.thermal.work); work 4332 drivers/gpu/drm/radeon/r600.c schedule_work(&rdev->pm.dpm.thermal.work); work 114 drivers/gpu/drm/radeon/r600_hdmi.c void r600_audio_update_hdmi(struct work_struct *work) work 116 drivers/gpu/drm/radeon/r600_hdmi.c struct radeon_device *rdev = container_of(work, struct radeon_device, work 1351 drivers/gpu/drm/radeon/radeon.h struct work_struct work; work 2870 drivers/gpu/drm/radeon/radeon.h void r600_audio_update_hdmi(struct work_struct *work); work 265 drivers/gpu/drm/radeon/radeon_display.c struct radeon_flip_work *work = work 270 drivers/gpu/drm/radeon/radeon_display.c r = radeon_bo_reserve(work->old_rbo, false); work 272 drivers/gpu/drm/radeon/radeon_display.c r = radeon_bo_unpin(work->old_rbo); work 276 drivers/gpu/drm/radeon/radeon_display.c radeon_bo_unreserve(work->old_rbo); work 280 drivers/gpu/drm/radeon/radeon_display.c drm_gem_object_put_unlocked(&work->old_rbo->tbo.base); work 281 drivers/gpu/drm/radeon/radeon_display.c kfree(work); work 368 drivers/gpu/drm/radeon/radeon_display.c struct radeon_flip_work *work; work 376 drivers/gpu/drm/radeon/radeon_display.c work = radeon_crtc->flip_work; work 391 drivers/gpu/drm/radeon/radeon_display.c if (work->event) work 392 drivers/gpu/drm/radeon/radeon_display.c drm_crtc_send_vblank_event(&radeon_crtc->base, work->event); work 397 drivers/gpu/drm/radeon/radeon_display.c radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id); work 398 drivers/gpu/drm/radeon/radeon_display.c queue_work(radeon_crtc->flip_queue, &work->unpin_work); work 410 drivers/gpu/drm/radeon/radeon_display.c struct radeon_flip_work *work = work 412 drivers/gpu/drm/radeon/radeon_display.c struct radeon_device *rdev = work->rdev; work 414 drivers/gpu/drm/radeon/radeon_display.c struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id]; work 422 drivers/gpu/drm/radeon/radeon_display.c if (work->fence) { work 425 drivers/gpu/drm/radeon/radeon_display.c fence = to_radeon_fence(work->fence); work 436 drivers/gpu/drm/radeon/radeon_display.c r = dma_fence_wait(work->fence, false); work 446 drivers/gpu/drm/radeon/radeon_display.c dma_fence_put(work->fence); work 447 drivers/gpu/drm/radeon/radeon_display.c work->fence = NULL; work 456 drivers/gpu/drm/radeon/radeon_display.c (radeon_get_crtc_scanoutpos(dev, work->crtc_id, 0, work 462 drivers/gpu/drm/radeon/radeon_display.c ((int) (work->target_vblank - work 463 drivers/gpu/drm/radeon/radeon_display.c dev->driver->get_vblank_counter(dev, work->crtc_id)) > 0))) work 473 drivers/gpu/drm/radeon/radeon_display.c radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base, work->async); work 491 drivers/gpu/drm/radeon/radeon_display.c struct radeon_flip_work *work; work 498 drivers/gpu/drm/radeon/radeon_display.c work = kzalloc(sizeof *work, GFP_KERNEL); work 499 drivers/gpu/drm/radeon/radeon_display.c if (work == NULL) work 502 drivers/gpu/drm/radeon/radeon_display.c INIT_WORK(&work->flip_work, radeon_flip_work_func); work 503 drivers/gpu/drm/radeon/radeon_display.c INIT_WORK(&work->unpin_work, radeon_unpin_work_func); work 505 drivers/gpu/drm/radeon/radeon_display.c work->rdev = rdev; work 506 drivers/gpu/drm/radeon/radeon_display.c work->crtc_id = radeon_crtc->crtc_id; work 507 drivers/gpu/drm/radeon/radeon_display.c work->event = event; work 508 drivers/gpu/drm/radeon/radeon_display.c work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0; work 515 drivers/gpu/drm/radeon/radeon_display.c work->old_rbo = gem_to_radeon_bo(obj); work 522 drivers/gpu/drm/radeon/radeon_display.c work->old_rbo, new_rbo); work 538 drivers/gpu/drm/radeon/radeon_display.c work->fence = dma_fence_get(dma_resv_get_excl(new_rbo->tbo.base.resv)); work 577 drivers/gpu/drm/radeon/radeon_display.c work->base = base; work 578 drivers/gpu/drm/radeon/radeon_display.c work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) + work 579 drivers/gpu/drm/radeon/radeon_display.c dev->driver->get_vblank_counter(dev, work->crtc_id); work 591 drivers/gpu/drm/radeon/radeon_display.c radeon_crtc->flip_work = work; work 598 drivers/gpu/drm/radeon/radeon_display.c queue_work(radeon_crtc->flip_queue, &work->flip_work); work 612 drivers/gpu/drm/radeon/radeon_display.c drm_gem_object_put_unlocked(&work->old_rbo->tbo.base); work 613 drivers/gpu/drm/radeon/radeon_display.c dma_fence_put(work->fence); work 614 drivers/gpu/drm/radeon/radeon_display.c kfree(work); work 275 drivers/gpu/drm/radeon/radeon_fence.c static void radeon_fence_check_lockup(struct work_struct *work) work 281 drivers/gpu/drm/radeon/radeon_fence.c fence_drv = container_of(work, struct radeon_fence_driver, work 282 drivers/gpu/drm/radeon/radeon_fence.c lockup_work.work); work 81 drivers/gpu/drm/radeon/radeon_irq_kms.c static void radeon_hotplug_work_func(struct work_struct *work) work 83 drivers/gpu/drm/radeon/radeon_irq_kms.c struct radeon_device *rdev = container_of(work, struct radeon_device, work 84 drivers/gpu/drm/radeon/radeon_irq_kms.c hotplug_work.work); work 102 drivers/gpu/drm/radeon/radeon_irq_kms.c static void radeon_dp_work_func(struct work_struct *work) work 104 drivers/gpu/drm/radeon/radeon_irq_kms.c struct radeon_device *rdev = container_of(work, struct radeon_device, work 49 drivers/gpu/drm/radeon/radeon_pm.c static void radeon_dynpm_idle_work_handler(struct work_struct *work); work 822 drivers/gpu/drm/radeon/radeon_pm.c static void radeon_dpm_thermal_work_handler(struct work_struct *work) work 825 drivers/gpu/drm/radeon/radeon_pm.c container_of(work, struct radeon_device, work 826 drivers/gpu/drm/radeon/radeon_pm.c pm.dpm.thermal.work); work 1391 drivers/gpu/drm/radeon/radeon_pm.c INIT_WORK(&rdev->pm.dpm.thermal.work, radeon_dpm_thermal_work_handler); work 1798 drivers/gpu/drm/radeon/radeon_pm.c static void radeon_dynpm_idle_work_handler(struct work_struct *work) work 1802 drivers/gpu/drm/radeon/radeon_pm.c rdev = container_of(work, struct radeon_device, work 1803 drivers/gpu/drm/radeon/radeon_pm.c pm.dynpm_idle_work.work); work 64 drivers/gpu/drm/radeon/radeon_uvd.c static void radeon_uvd_idle_work_handler(struct work_struct *work); work 870 drivers/gpu/drm/radeon/radeon_uvd.c static void radeon_uvd_idle_work_handler(struct work_struct *work) work 873 drivers/gpu/drm/radeon/radeon_uvd.c container_of(work, struct radeon_device, uvd.idle_work.work); work 47 drivers/gpu/drm/radeon/radeon_vce.c static void radeon_vce_idle_work_handler(struct work_struct *work); work 262 drivers/gpu/drm/radeon/radeon_vce.c static void radeon_vce_idle_work_handler(struct work_struct *work) work 265 drivers/gpu/drm/radeon/radeon_vce.c container_of(work, struct radeon_device, vce.idle_work.work); work 6444 drivers/gpu/drm/radeon/si.c schedule_work(&rdev->pm.dpm.thermal.work); work 914 drivers/gpu/drm/rockchip/cdn-dp-core.c static void cdn_dp_pd_event_work(struct work_struct *work) work 916 drivers/gpu/drm/rockchip/cdn-dp-core.c struct cdn_dp_device *dp = container_of(work, struct cdn_dp_device, work 1397 drivers/gpu/drm/rockchip/rockchip_drm_vop.c static void vop_fb_unref_worker(struct drm_flip_work *work, void *val) work 1399 drivers/gpu/drm/rockchip/rockchip_drm_vop.c struct vop *vop = container_of(work, struct vop, fb_unref_work); work 280 drivers/gpu/drm/scheduler/sched_main.c static void drm_sched_job_timedout(struct work_struct *work) work 286 drivers/gpu/drm/scheduler/sched_main.c sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work); work 48 drivers/gpu/drm/tegra/dpaux.c struct work_struct work; work 62 drivers/gpu/drm/tegra/dpaux.c static inline struct tegra_dpaux *work_to_dpaux(struct work_struct *work) work 64 drivers/gpu/drm/tegra/dpaux.c return container_of(work, struct tegra_dpaux, work); work 252 drivers/gpu/drm/tegra/dpaux.c static void tegra_dpaux_hotplug(struct work_struct *work) work 254 drivers/gpu/drm/tegra/dpaux.c struct tegra_dpaux *dpaux = work_to_dpaux(work); work 271 drivers/gpu/drm/tegra/dpaux.c schedule_work(&dpaux->work); work 439 drivers/gpu/drm/tegra/dpaux.c INIT_WORK(&dpaux->work, tegra_dpaux_hotplug); work 562 drivers/gpu/drm/tegra/dpaux.c cancel_work_sync(&dpaux->work); work 2353 drivers/gpu/drm/tegra/sor.c static void tegra_sor_hdmi_scdc_work(struct work_struct *work) work 2355 drivers/gpu/drm/tegra/sor.c struct tegra_sor *sor = container_of(work, struct tegra_sor, scdc.work); work 550 drivers/gpu/drm/tilcdc/tilcdc_crtc.c static void tilcdc_crtc_recover_work(struct work_struct *work) work 553 drivers/gpu/drm/tilcdc/tilcdc_crtc.c container_of(work, struct tilcdc_crtc, recover_work); work 95 drivers/gpu/drm/tiny/gm12u320.c struct work_struct work; work 345 drivers/gpu/drm/tiny/gm12u320.c static void gm12u320_fb_update_work(struct work_struct *work) work 348 drivers/gpu/drm/tiny/gm12u320.c container_of(work, struct gm12u320_device, fb_update.work); work 468 drivers/gpu/drm/tiny/gm12u320.c queue_work(gm12u320->fb_update.workq, &gm12u320->fb_update.work); work 478 drivers/gpu/drm/tiny/gm12u320.c cancel_work_sync(&gm12u320->fb_update.work); work 693 drivers/gpu/drm/tiny/gm12u320.c INIT_WORK(&gm12u320->fb_update.work, gm12u320_fb_update_work); work 661 drivers/gpu/drm/ttm/ttm_bo.c static void ttm_bo_delayed_workqueue(struct work_struct *work) work 664 drivers/gpu/drm/ttm/ttm_bo.c container_of(work, struct ttm_bo_device, wq.work); work 287 drivers/gpu/drm/ttm/ttm_memory.c static void ttm_shrink_work(struct work_struct *work) work 294 drivers/gpu/drm/ttm/ttm_memory.c container_of(work, struct ttm_mem_global, work); work 424 drivers/gpu/drm/ttm/ttm_memory.c INIT_WORK(&glob->work, ttm_shrink_work); work 502 drivers/gpu/drm/ttm/ttm_memory.c (void)queue_work(glob->swap_queue, &glob->work); work 122 drivers/gpu/drm/udl/udl_main.c static void udl_release_urb_work(struct work_struct *work) work 124 drivers/gpu/drm/udl/udl_main.c struct urb_node *unode = container_of(work, struct urb_node, work 125 drivers/gpu/drm/udl/udl_main.c release_urb_work.work); work 37 drivers/gpu/drm/v3d/v3d_irq.c v3d_overflow_mem_work(struct work_struct *work) work 40 drivers/gpu/drm/v3d/v3d_irq.c container_of(work, struct v3d_dev, overflow_mem_work); work 162 drivers/gpu/drm/vboxvideo/vbox_irq.c static void vbox_hotplug_worker(struct work_struct *work) work 164 drivers/gpu/drm/vboxvideo/vbox_irq.c struct vbox_private *vbox = container_of(work, struct vbox_private, work 589 drivers/gpu/drm/vc4/vc4_bo.c static void vc4_bo_cache_time_work(struct work_struct *work) work 592 drivers/gpu/drm/vc4/vc4_bo.c container_of(work, struct vc4_dev, bo_cache.time_work); work 303 drivers/gpu/drm/vc4/vc4_drv.h struct work_struct work; work 312 drivers/gpu/drm/vc4/vc4_gem.c vc4_reset_work(struct work_struct *work) work 315 drivers/gpu/drm/vc4/vc4_gem.c container_of(work, struct vc4_dev, hangcheck.reset_work); work 1008 drivers/gpu/drm/vc4/vc4_gem.c list_for_each_entry_safe(cb, cb_temp, &vc4->seqno_cb_list, work.entry) { work 1010 drivers/gpu/drm/vc4/vc4_gem.c list_del_init(&cb->work.entry); work 1011 drivers/gpu/drm/vc4/vc4_gem.c schedule_work(&cb->work); work 1018 drivers/gpu/drm/vc4/vc4_gem.c static void vc4_seqno_cb_work(struct work_struct *work) work 1020 drivers/gpu/drm/vc4/vc4_gem.c struct vc4_seqno_cb *cb = container_of(work, struct vc4_seqno_cb, work); work 1034 drivers/gpu/drm/vc4/vc4_gem.c INIT_WORK(&cb->work, vc4_seqno_cb_work); work 1039 drivers/gpu/drm/vc4/vc4_gem.c list_add_tail(&cb->work.entry, &vc4->seqno_cb_list); work 1041 drivers/gpu/drm/vc4/vc4_gem.c schedule_work(&cb->work); work 1053 drivers/gpu/drm/vc4/vc4_gem.c vc4_job_done_work(struct work_struct *work) work 1056 drivers/gpu/drm/vc4/vc4_gem.c container_of(work, struct vc4_dev, job_done_work); work 58 drivers/gpu/drm/vc4/vc4_irq.c vc4_overflow_mem_work(struct work_struct *work) work 61 drivers/gpu/drm/vc4/vc4_irq.c container_of(work, struct vc4_dev, overflow_mem_work); work 190 drivers/gpu/drm/vc4/vc4_kms.c static void commit_work(struct work_struct *work) work 192 drivers/gpu/drm/vc4/vc4_kms.c struct drm_atomic_state *state = container_of(work, work 493 drivers/gpu/drm/via/via_dmablit.c via_dmablit_workqueue(struct work_struct *work) work 495 drivers/gpu/drm/via/via_dmablit.c drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq); work 324 drivers/gpu/drm/virtio/virtgpu_drv.h void virtio_gpu_dequeue_ctrl_func(struct work_struct *work); work 325 drivers/gpu/drm/virtio/virtgpu_drv.h void virtio_gpu_dequeue_cursor_func(struct work_struct *work); work 326 drivers/gpu/drm/virtio/virtgpu_drv.h void virtio_gpu_dequeue_fence_func(struct work_struct *work); work 33 drivers/gpu/drm/virtio/virtgpu_kms.c static void virtio_gpu_config_changed_work_func(struct work_struct *work) work 36 drivers/gpu/drm/virtio/virtgpu_kms.c container_of(work, struct virtio_gpu_device, work 74 drivers/gpu/drm/virtio/virtgpu_kms.c void (*work_func)(struct work_struct *work)) work 176 drivers/gpu/drm/virtio/virtgpu_vq.c void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) work 179 drivers/gpu/drm/virtio/virtgpu_vq.c container_of(work, struct virtio_gpu_device, work 232 drivers/gpu/drm/virtio/virtgpu_vq.c void virtio_gpu_dequeue_cursor_func(struct work_struct *work) work 235 drivers/gpu/drm/virtio/virtgpu_vq.c container_of(work, struct virtio_gpu_device, work 151 drivers/gpu/drm/vkms/vkms_composer.c void vkms_composer_worker(struct work_struct *work) work 153 drivers/gpu/drm/vkms/vkms_composer.c struct vkms_crtc_state *crtc_state = container_of(work, work 143 drivers/gpu/drm/vkms/vkms_drv.h void vkms_composer_worker(struct work_struct *work); work 113 drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c struct work_struct work; work 402 drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c schedule_work(&man->work); work 512 drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c static void vmw_cmdbuf_work_func(struct work_struct *work) work 515 drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c container_of(work, struct vmw_cmdbuf_man, work); work 1345 drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c INIT_WORK(&man->work, &vmw_cmdbuf_work_func); work 1411 drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c (void) cancel_work_sync(&man->work); work 175 drivers/gpu/drm/vmwgfx/vmwgfx_fb.c static void vmw_fb_dirty_flush(struct work_struct *work) work 177 drivers/gpu/drm/vmwgfx/vmwgfx_fb.c struct vmw_fb_par *par = container_of(work, struct vmw_fb_par, work 178 drivers/gpu/drm/vmwgfx/vmwgfx_fb.c local_work.work); work 39 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c struct work_struct work; work 260 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c static void vmw_fence_work_func(struct work_struct *work) work 263 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c container_of(work, struct vmw_fence_manager, work); work 311 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c INIT_WORK(&fman->work, &vmw_fence_work_func); work 328 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c (void) cancel_work_sync(&fman->work); work 503 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c (void) schedule_work(&fman->work); work 171 drivers/gpu/drm/xen/xen_drm_front_kms.c static void pflip_to_worker(struct work_struct *work) work 173 drivers/gpu/drm/xen/xen_drm_front_kms.c struct delayed_work *delayed_work = to_delayed_work(work); work 77 drivers/gpu/host1x/dev.h void (*syncpt_thresh_work)(struct work_struct *work)); work 246 drivers/gpu/host1x/hw/cdma_hw.c static void cdma_timeout_handler(struct work_struct *work) work 253 drivers/gpu/host1x/hw/cdma_hw.c cdma = container_of(to_delayed_work(work), struct host1x_cdma, work 30 drivers/gpu/host1x/hw/intr_hw.c schedule_work(&syncpt->intr.work); work 91 drivers/gpu/host1x/hw/intr_hw.c INIT_WORK(&host->syncpt[i].intr.work, syncpt_thresh_work); work 136 drivers/gpu/host1x/hw/intr_hw.c cancel_work_sync(&host->syncpt[i].intr.work); work 190 drivers/gpu/host1x/intr.c static void syncpt_thresh_work(struct work_struct *work) work 193 drivers/gpu/host1x/intr.c container_of(work, struct host1x_syncpt_intr, work); work 43 drivers/gpu/host1x/intr.h struct work_struct work; work 251 drivers/greybus/interface.c static void gb_interface_mode_switch_work(struct work_struct *work) work 258 drivers/greybus/interface.c intf = container_of(work, struct gb_interface, mode_switch_work); work 272 drivers/greybus/operation.c static void gb_operation_work(struct work_struct *work) work 277 drivers/greybus/operation.c operation = container_of(work, struct gb_operation, work); work 305 drivers/greybus/operation.c queue_work(gb_operation_completion_wq, &operation->work); work 551 drivers/greybus/operation.c INIT_WORK(&operation->work, gb_operation_work); work 900 drivers/greybus/operation.c &operation->work); work 946 drivers/greybus/operation.c queue_work(connection->wq, &operation->work); work 1015 drivers/greybus/operation.c queue_work(gb_operation_completion_wq, &operation->work); work 1078 drivers/greybus/operation.c queue_work(gb_operation_completion_wq, &operation->work); work 1103 drivers/greybus/operation.c flush_work(&operation->work); work 18 drivers/greybus/svc.c struct work_struct work; work 1078 drivers/greybus/svc.c static void gb_svc_process_deferred_request(struct work_struct *work) work 1085 drivers/greybus/svc.c dr = container_of(work, struct gb_svc_deferred_request, work); work 1126 drivers/greybus/svc.c INIT_WORK(&dr->work, gb_svc_process_deferred_request); work 1128 drivers/greybus/svc.c queue_work(svc->wq, &dr->work); work 16 drivers/greybus/svc_watchdog.c struct delayed_work work; work 44 drivers/greybus/svc_watchdog.c static void greybus_reset(struct work_struct *work) work 63 drivers/greybus/svc_watchdog.c static void do_work(struct work_struct *work) work 69 drivers/greybus/svc_watchdog.c watchdog = container_of(work, struct gb_svc_watchdog, work.work); work 104 drivers/greybus/svc_watchdog.c schedule_delayed_work(&watchdog->work, SVC_WATCHDOG_PERIOD); work 121 drivers/greybus/svc_watchdog.c INIT_DELAYED_WORK(&watchdog->work, do_work); work 179 drivers/greybus/svc_watchdog.c schedule_delayed_work(&watchdog->work, SVC_WATCHDOG_PERIOD); work 195 drivers/greybus/svc_watchdog.c cancel_delayed_work_sync(&watchdog->work); work 93 drivers/hid/hid-asus.c struct work_struct work; work 358 drivers/hid/hid-asus.c schedule_work(&led->work); work 369 drivers/hid/hid-asus.c static void asus_kbd_backlight_work(struct work_struct *work) work 371 drivers/hid/hid-asus.c struct asus_kbd_leds *led = container_of(work, struct asus_kbd_leds, work); work 439 drivers/hid/hid-asus.c INIT_WORK(&drvdata->kbd_backlight->work, asus_kbd_backlight_work); work 941 drivers/hid/hid-asus.c cancel_work_sync(&drvdata->kbd_backlight->work); work 188 drivers/hid/hid-bigbenff.c static void bigben_worker(struct work_struct *work) work 190 drivers/hid/hid-bigbenff.c struct bigben_device *bigben = container_of(work, work 261 drivers/hid/hid-bigbenff.c bool work; work 271 drivers/hid/hid-bigbenff.c work = (bigben->led_state & BIT(n)); work 274 drivers/hid/hid-bigbenff.c work = !(bigben->led_state & BIT(n)); work 278 drivers/hid/hid-bigbenff.c if (work) { work 32 drivers/hid/hid-corsair.c struct work_struct work; work 198 drivers/hid/hid-corsair.c schedule_work(&led->work); work 201 drivers/hid/hid-corsair.c static void k90_backlight_work(struct work_struct *work) work 204 drivers/hid/hid-corsair.c struct k90_led *led = container_of(work, struct k90_led, work); work 226 drivers/hid/hid-corsair.c static void k90_record_led_work(struct work_struct *work) work 229 drivers/hid/hid-corsair.c struct k90_led *led = container_of(work, struct k90_led, work); work 450 drivers/hid/hid-corsair.c INIT_WORK(&drvdata->backlight->work, k90_backlight_work); work 495 drivers/hid/hid-corsair.c INIT_WORK(&k90->record_led.work, k90_record_led_work); work 511 drivers/hid/hid-corsair.c cancel_work_sync(&k90->record_led.work); work 528 drivers/hid/hid-corsair.c cancel_work_sync(&drvdata->backlight->work); work 544 drivers/hid/hid-corsair.c cancel_work_sync(&k90->record_led.work); work 1087 drivers/hid/hid-cp2112.c static void cp2112_gpio_poll_callback(struct work_struct *work) work 1089 drivers/hid/hid-cp2112.c struct cp2112_device *dev = container_of(work, struct cp2112_device, work 1090 drivers/hid/hid-cp2112.c gpio_poll_worker.work); work 30 drivers/hid/hid-elo.c struct delayed_work work; work 135 drivers/hid/hid-elo.c static void elo_work(struct work_struct *work) work 137 drivers/hid/hid-elo.c struct elo_priv *priv = container_of(work, struct elo_priv, work.work); work 182 drivers/hid/hid-elo.c queue_delayed_work(wq, &priv->work, ELO_PERIODIC_READ_INTERVAL); work 236 drivers/hid/hid-elo.c INIT_DELAYED_WORK(&priv->work, elo_work); work 255 drivers/hid/hid-elo.c queue_delayed_work(wq, &priv->work, ELO_PERIODIC_READ_INTERVAL); work 269 drivers/hid/hid-elo.c cancel_delayed_work_sync(&priv->work); work 48 drivers/hid/hid-gt683r.c struct work_struct work; work 73 drivers/hid/hid-gt683r.c schedule_work(&led->work); work 117 drivers/hid/hid-gt683r.c schedule_work(&led->work); work 178 drivers/hid/hid-gt683r.c static void gt683r_led_work(struct work_struct *work) work 183 drivers/hid/hid-gt683r.c struct gt683r_led *led = container_of(work, struct gt683r_led, work); work 236 drivers/hid/hid-gt683r.c INIT_WORK(&led->work, gt683r_led_work); work 294 drivers/hid/hid-gt683r.c flush_work(&led->work); work 1472 drivers/hid/hid-input.c static void hidinput_led_worker(struct work_struct *work) work 1474 drivers/hid/hid-input.c struct hid_device *hid = container_of(work, struct hid_device, work 144 drivers/hid/hid-logitech-dj.c struct work_struct work; work 515 drivers/hid/hid-logitech-dj.c static void delayedwork_callback(struct work_struct *work); work 603 drivers/hid/hid-logitech-dj.c INIT_WORK(&djrcv_dev->work, delayedwork_callback); work 750 drivers/hid/hid-logitech-dj.c static void delayedwork_callback(struct work_struct *work) work 753 drivers/hid/hid-logitech-dj.c container_of(work, struct dj_receiver_dev, work); work 783 drivers/hid/hid-logitech-dj.c schedule_work(&djrcv_dev->work); work 828 drivers/hid/hid-logitech-dj.c schedule_work(&djrcv_dev->work); work 866 drivers/hid/hid-logitech-dj.c schedule_work(&djrcv_dev->work); work 997 drivers/hid/hid-logitech-dj.c schedule_work(&djrcv_dev->work); work 1800 drivers/hid/hid-logitech-dj.c cancel_work_sync(&djrcv_dev->work); work 179 drivers/hid/hid-logitech-hidpp.c struct work_struct work; work 373 drivers/hid/hid-logitech-hidpp.c static void delayed_work_cb(struct work_struct *work) work 375 drivers/hid/hid-logitech-hidpp.c struct hidpp_device *hidpp = container_of(work, struct hidpp_device, work 376 drivers/hid/hid-logitech-hidpp.c work); work 1694 drivers/hid/hid-logitech-hidpp.c struct work_struct work; work 1752 drivers/hid/hid-logitech-hidpp.c struct hidpp_ff_work_data *wd = container_of(w, struct hidpp_ff_work_data, work); work 1826 drivers/hid/hid-logitech-hidpp.c INIT_WORK(&wd->work, hidpp_ff_work_handler); work 1835 drivers/hid/hid-logitech-hidpp.c queue_work(data->wq, &wd->work); work 3097 drivers/hid/hid-logitech-hidpp.c if (schedule_work(&hidpp->work) == 0) work 3596 drivers/hid/hid-logitech-hidpp.c INIT_WORK(&hidpp->work, delayed_work_cb); work 3685 drivers/hid/hid-logitech-hidpp.c cancel_work_sync(&hidpp->work); work 3700 drivers/hid/hid-logitech-hidpp.c cancel_work_sync(&hidpp->work); work 284 drivers/hid/hid-microsoft.c static void ms_ff_worker(struct work_struct *work) work 286 drivers/hid/hid-microsoft.c struct ms_data *ms = container_of(work, struct ms_data, ff_worker); work 72 drivers/hid/hid-picolcd_core.c struct picolcd_pending *work; work 81 drivers/hid/hid-picolcd_core.c work = kzalloc(sizeof(*work), GFP_KERNEL); work 82 drivers/hid/hid-picolcd_core.c if (!work) work 85 drivers/hid/hid-picolcd_core.c init_completion(&work->ready); work 86 drivers/hid/hid-picolcd_core.c work->out_report = report; work 87 drivers/hid/hid-picolcd_core.c work->in_report = NULL; work 88 drivers/hid/hid-picolcd_core.c work->raw_size = 0; work 98 drivers/hid/hid-picolcd_core.c kfree(work); work 99 drivers/hid/hid-picolcd_core.c work = NULL; work 101 drivers/hid/hid-picolcd_core.c data->pending = work; work 104 drivers/hid/hid-picolcd_core.c wait_for_completion_interruptible_timeout(&work->ready, HZ*2); work 110 drivers/hid/hid-picolcd_core.c return work; work 313 drivers/hid/hid-rmi.c static void rmi_reset_work(struct work_struct *work) work 315 drivers/hid/hid-rmi.c struct rmi_data *hdata = container_of(work, struct rmi_data, work 1721 drivers/hid/hid-sony.c static void dualshock4_calibration_work(struct work_struct *work) work 1723 drivers/hid/hid-sony.c struct sony_sc *sc = container_of(work, struct sony_sc, hotplug_worker); work 2219 drivers/hid/hid-sony.c static void sony_state_worker(struct work_struct *work) work 2221 drivers/hid/hid-sony.c struct sony_sc *sc = container_of(work, struct sony_sc, state_worker); work 561 drivers/hid/hid-steam.c static void steam_work_connect_cb(struct work_struct *work) work 563 drivers/hid/hid-steam.c struct steam_device *steam = container_of(work, struct steam_device, work 41 drivers/hid/hid-wiimote-core.c static void wiimote_queue_worker(struct work_struct *work) work 43 drivers/hid/hid-wiimote-core.c struct wiimote_queue *queue = container_of(work, struct wiimote_queue, work 1203 drivers/hid/hid-wiimote-core.c static void wiimote_init_worker(struct work_struct *work) work 1205 drivers/hid/hid-wiimote-core.c struct wiimote_data *wdata = container_of(work, struct wiimote_data, work 120 drivers/hid/hid-wiimote-modules.c static void wiimod_rumble_worker(struct work_struct *work) work 122 drivers/hid/hid-wiimote-modules.c struct wiimote_data *wdata = container_of(work, struct wiimote_data, work 212 drivers/hid/intel-ish-hid/ipc/pci-ish.c static void __maybe_unused ish_resume_handler(struct work_struct *work) work 843 drivers/hid/intel-ish-hid/ishtp-fw-loader.c static void load_fw_from_host_handler(struct work_struct *work) work 847 drivers/hid/intel-ish-hid/ishtp-fw-loader.c client_data = container_of(work, struct ishtp_cl_data, work 920 drivers/hid/intel-ish-hid/ishtp-fw-loader.c static void reset_handler(struct work_struct *work) work 927 drivers/hid/intel-ish-hid/ishtp-fw-loader.c client_data = container_of(work, struct ishtp_cl_data, work 740 drivers/hid/intel-ish-hid/ishtp-hid-client.c static void hid_ishtp_cl_reset_handler(struct work_struct *work) work 748 drivers/hid/intel-ish-hid/ishtp-hid-client.c client_data = container_of(work, struct ishtp_cl_data, work); work 819 drivers/hid/intel-ish-hid/ishtp-hid-client.c INIT_WORK(&client_data->work, hid_ishtp_cl_reset_handler); work 879 drivers/hid/intel-ish-hid/ishtp-hid-client.c schedule_work(&client_data->work); work 142 drivers/hid/intel-ish-hid/ishtp-hid.h struct work_struct work; work 542 drivers/hid/intel-ish-hid/ishtp/bus.c static void ishtp_bus_event_work(struct work_struct *work) work 546 drivers/hid/intel-ish-hid/ishtp/bus.c device = container_of(work, struct ishtp_cl_device, event_work); work 740 drivers/hid/intel-ish-hid/ishtp/hbm.c void bh_hbm_work_fn(struct work_struct *work) work 746 drivers/hid/intel-ish-hid/ishtp/hbm.c dev = container_of(work, struct ishtp_device, bh_hbm_work); work 300 drivers/hid/intel-ish-hid/ishtp/hbm.h void bh_hbm_work_fn(struct work_struct *work); work 57 drivers/hid/uhid.c static void uhid_device_add_worker(struct work_struct *work) work 59 drivers/hid/uhid.c struct uhid_device *uhid = container_of(work, struct uhid_device, worker); work 116 drivers/hid/usbhid/hid-core.c static void hid_reset(struct work_struct *work) work 119 drivers/hid/usbhid/hid-core.c container_of(work, struct usbhid_device, reset_work); work 235 drivers/hid/wacom.h void wacom_battery_work(struct work_struct *work); work 1666 drivers/hid/wacom_sys.c static void wacom_init_work(struct work_struct *work) work 1668 drivers/hid/wacom_sys.c struct wacom *wacom = container_of(work, struct wacom, init_work.work); work 2147 drivers/hid/wacom_sys.c void wacom_battery_work(struct work_struct *work) work 2149 drivers/hid/wacom_sys.c struct wacom *wacom = container_of(work, struct wacom, battery_work); work 2402 drivers/hid/wacom_sys.c static void wacom_wireless_work(struct work_struct *work) work 2404 drivers/hid/wacom_sys.c struct wacom *wacom = container_of(work, struct wacom, wireless_work); work 2609 drivers/hid/wacom_sys.c static void wacom_remote_work(struct work_struct *work) work 2611 drivers/hid/wacom_sys.c struct wacom *wacom = container_of(work, struct wacom, remote_work); work 2655 drivers/hid/wacom_sys.c static void wacom_mode_change_work(struct work_struct *work) work 2657 drivers/hid/wacom_sys.c struct wacom *wacom = container_of(work, struct wacom, mode_change_work); work 140 drivers/hsi/clients/ssi_protocol.c struct work_struct work; work 962 drivers/hsi/clients/ssi_protocol.c static void ssip_xmit_work(struct work_struct *work) work 965 drivers/hsi/clients/ssi_protocol.c container_of(work, struct ssi_protocol, work); work 1023 drivers/hsi/clients/ssi_protocol.c schedule_work(&ssi->work); work 1092 drivers/hsi/clients/ssi_protocol.c INIT_WORK(&ssi->work, ssip_xmit_work); work 96 drivers/hsi/controllers/omap_ssi.h struct work_struct work; work 174 drivers/hsi/controllers/omap_ssi_port.c static void ssi_process_errqueue(struct work_struct *work) work 180 drivers/hsi/controllers/omap_ssi_port.c omap_port = container_of(work, struct omap_ssi_port, errqueue_work.work); work 585 drivers/hsi/controllers/omap_ssi_port.c static void start_tx_work(struct work_struct *work) work 588 drivers/hsi/controllers/omap_ssi_port.c container_of(work, struct omap_ssi_port, work); work 611 drivers/hsi/controllers/omap_ssi_port.c schedule_work(&omap_port->work); work 1180 drivers/hsi/controllers/omap_ssi_port.c INIT_WORK(&omap_port->work, start_tx_work); work 437 drivers/hv/channel_mgmt.c static void vmbus_add_channel_work(struct work_struct *work) work 440 drivers/hv/channel_mgmt.c container_of(work, struct vmbus_channel, add_channel_work); work 222 drivers/hv/hv_util.c static void hv_set_host_time(struct work_struct *work) work 1033 drivers/hv/vmbus_drv.c struct work_struct work; work 1037 drivers/hv/vmbus_drv.c static void vmbus_onmessage_work(struct work_struct *work) work 1045 drivers/hv/vmbus_drv.c ctx = container_of(work, struct onmessage_work_context, work 1046 drivers/hv/vmbus_drv.c work); work 1081 drivers/hv/vmbus_drv.c INIT_WORK(&ctx->work, vmbus_onmessage_work); work 1097 drivers/hv/vmbus_drv.c &ctx->work); work 1104 drivers/hv/vmbus_drv.c &ctx->work); work 1108 drivers/hv/vmbus_drv.c queue_work(vmbus_connection.work_queue, &ctx->work); work 1147 drivers/hv/vmbus_drv.c INIT_WORK(&ctx->work, vmbus_onmessage_work); work 1151 drivers/hv/vmbus_drv.c &ctx->work); work 102 drivers/hwmon/ab8500.c static void ab8500_thermal_power_off(struct work_struct *work) work 104 drivers/hwmon/ab8500.c struct ab8500_temp *ab8500_data = container_of(work, work 105 drivers/hwmon/ab8500.c struct ab8500_temp, power_off_work.work); work 38 drivers/hwmon/abx500.c schedule_delayed_work(&data->work, DEFAULT_MONITOR_DELAY); work 51 drivers/hwmon/abx500.c cancel_delayed_work_sync(&data->work); work 55 drivers/hwmon/abx500.c static void gpadc_monitor(struct work_struct *work) work 62 drivers/hwmon/abx500.c data = container_of(work, struct abx500_temp, work.work); work 401 drivers/hwmon/abx500.c INIT_DEFERRABLE_WORK(&data->work, gpadc_monitor); work 436 drivers/hwmon/abx500.c cancel_delayed_work_sync(&data->work); work 449 drivers/hwmon/abx500.c cancel_delayed_work_sync(&data->work); work 60 drivers/hwmon/abx500.h struct delayed_work work; work 923 drivers/hwmon/applesmc.c static void applesmc_backlight_set(struct work_struct *work) work 58 drivers/hwmon/raspberrypi-hwmon.c static void get_values_poll(struct work_struct *work) work 62 drivers/hwmon/raspberrypi-hwmon.c data = container_of(work, struct rpi_hwmon_data, work 63 drivers/hwmon/raspberrypi-hwmon.c get_values_poll_work.work); work 436 drivers/hwmon/xgene-hwmon.c static void xgene_hwmon_evt_work(struct work_struct *work) work 442 drivers/hwmon/xgene-hwmon.c ctx = container_of(work, struct xgene_hwmon_dev, workq); work 140 drivers/hwtracing/coresight/coresight-etm-perf.c static void free_event_data(struct work_struct *work) work 146 drivers/hwtracing/coresight/coresight-etm-perf.c event_data = container_of(work, struct etm_event_data, work); work 204 drivers/hwtracing/coresight/coresight-etm-perf.c schedule_work(&event_data->work); work 219 drivers/hwtracing/coresight/coresight-etm-perf.c INIT_WORK(&event_data->work, free_event_data); work 54 drivers/hwtracing/coresight/coresight-etm-perf.h struct work_struct work; work 567 drivers/hwtracing/intel_th/core.c static void __intel_th_request_hub_module(struct work_struct *work) work 569 drivers/hwtracing/intel_th/core.c struct intel_th *th = container_of(work, struct intel_th, work 137 drivers/hwtracing/intel_th/msu.c struct work_struct work; work 1723 drivers/hwtracing/intel_th/msu.c static void msc_work(struct work_struct *work) work 1725 drivers/hwtracing/intel_th/msu.c struct msc *msc = container_of(work, struct msc, work); work 1760 drivers/hwtracing/intel_th/msu.c schedule_work(&msc->work); work 2092 drivers/hwtracing/intel_th/msu.c INIT_WORK(&msc->work, msc_work); work 192 drivers/i2c/busses/i2c-amd-mp2.h container_of(__work, struct amd_i2c_common, work.work) work 110 drivers/i2c/i2c-smbus.c static void smbalert_work(struct work_struct *work) work 114 drivers/i2c/i2c-smbus.c alert = container_of(work, struct i2c_smbus_alert, alert); work 2184 drivers/i3c/master.c queue_work(dev->common.master->wq, &slot->work); work 2188 drivers/i3c/master.c static void i3c_master_handle_ibi(struct work_struct *work) work 2190 drivers/i3c/master.c struct i3c_ibi_slot *slot = container_of(work, struct i3c_ibi_slot, work 2191 drivers/i3c/master.c work); work 2211 drivers/i3c/master.c INIT_WORK(&slot->work, i3c_master_handle_ibi); work 1515 drivers/i3c/master/i3c-master-cdns.c static void cdns_i3c_master_hj(struct work_struct *work) work 1517 drivers/i3c/master/i3c-master-cdns.c struct cdns_i3c_master *master = container_of(work, work 1158 drivers/ide/ide-probe.c static void drive_rq_insert_work(struct work_struct *work) work 1160 drivers/ide/ide-probe.c ide_drive_t *drive = container_of(work, ide_drive_t, rq_work); work 172 drivers/iio/adc/envelope-detector.c static void envelope_detector_timeout(struct work_struct *work) work 174 drivers/iio/adc/envelope-detector.c struct envelope *env = container_of(work, struct envelope, work 175 drivers/iio/adc/envelope-detector.c comp_timeout.work); work 255 drivers/iio/adc/xilinx-xadc-core.c static void xadc_zynq_unmask_worker(struct work_struct *work) work 257 drivers/iio/adc/xilinx-xadc-core.c struct xadc *xadc = container_of(work, struct xadc, zynq_unmask_work.work); work 125 drivers/iio/buffer/industrialio-buffer-dma.c static void iio_dma_buffer_cleanup_worker(struct work_struct *work) work 79 drivers/iio/chemical/atlas-ph-sensor.c struct irq_work work; work 346 drivers/iio/chemical/atlas-ph-sensor.c static void atlas_work_handler(struct irq_work *work) work 348 drivers/iio/chemical/atlas-ph-sensor.c struct atlas_data *data = container_of(work, struct atlas_data, work); work 378 drivers/iio/chemical/atlas-ph-sensor.c irq_work_queue(&data->work); work 587 drivers/iio/chemical/atlas-ph-sensor.c init_irq_work(&data->work, atlas_work_handler); work 193 drivers/iio/common/hid-sensors/hid-sensor-trigger.c static void hid_sensor_set_power_work(struct work_struct *work) work 195 drivers/iio/common/hid-sensors/hid-sensor-trigger.c struct hid_sensor_common *attrb = container_of(work, work 197 drivers/iio/common/hid-sensors/hid-sensor-trigger.c work); work 233 drivers/iio/common/hid-sensors/hid-sensor-trigger.c cancel_work_sync(&attrb->work); work 276 drivers/iio/common/hid-sensors/hid-sensor-trigger.c INIT_WORK(&attrb->work, hid_sensor_set_power_work); work 304 drivers/iio/common/hid-sensors/hid-sensor-trigger.c schedule_work(&attrb->work); work 155 drivers/iio/common/ssp_sensors/ssp_dev.c static void ssp_wdt_work_func(struct work_struct *work) work 157 drivers/iio/common/ssp_sensors/ssp_dev.c struct ssp_data *data = container_of(work, struct ssp_data, work_wdt); work 398 drivers/iio/common/ssp_sensors/ssp_dev.c static void ssp_refresh_task(struct work_struct *work) work 400 drivers/iio/common/ssp_sensors/ssp_dev.c struct ssp_data *data = container_of((struct delayed_work *)work, work 251 drivers/iio/light/gp2ap020a00f.c struct irq_work work; work 823 drivers/iio/light/gp2ap020a00f.c static void gp2ap020a00f_iio_trigger_work(struct irq_work *work) work 826 drivers/iio/light/gp2ap020a00f.c container_of(work, struct gp2ap020a00f_data, work); work 955 drivers/iio/light/gp2ap020a00f.c irq_work_queue(&priv->work); work 1566 drivers/iio/light/gp2ap020a00f.c init_irq_work(&data->work, gp2ap020a00f_iio_trigger_work); work 188 drivers/iio/light/tsl2563.c static void tsl2563_poweroff_work(struct work_struct *work) work 191 drivers/iio/light/tsl2563.c container_of(work, struct tsl2563_chip, poweroff_work.work); work 59 drivers/iio/proximity/as3935.c struct delayed_work work; work 242 drivers/iio/proximity/as3935.c static void as3935_event_work(struct work_struct *work) work 248 drivers/iio/proximity/as3935.c st = container_of(work, struct as3935_state, work.work); work 282 drivers/iio/proximity/as3935.c schedule_delayed_work(&st->work, msecs_to_jiffies(3)); work 353 drivers/iio/proximity/as3935.c cancel_delayed_work_sync(&st->work); work 441 drivers/iio/proximity/as3935.c INIT_DELAYED_WORK(&st->work, as3935_event_work); work 18 drivers/iio/trigger/iio-trig-sysfs.c struct irq_work work; work 92 drivers/iio/trigger/iio-trig-sysfs.c static void iio_sysfs_trigger_work(struct irq_work *work) work 94 drivers/iio/trigger/iio-trig-sysfs.c struct iio_sysfs_trig *trig = container_of(work, struct iio_sysfs_trig, work 95 drivers/iio/trigger/iio-trig-sysfs.c work); work 106 drivers/iio/trigger/iio-trig-sysfs.c irq_work_queue(&sysfs_trig->work); work 163 drivers/iio/trigger/iio-trig-sysfs.c init_irq_work(&t->work, iio_sysfs_trigger_work); work 65 drivers/infiniband/core/addr.c struct delayed_work work; work 305 drivers/infiniband/core/addr.c mod_delayed_work(addr_wq, &req->work, delay); work 623 drivers/infiniband/core/addr.c req = container_of(_work, struct addr_req, work.work); work 654 drivers/infiniband/core/addr.c cancel_delayed_work(&req->work); work 694 drivers/infiniband/core/addr.c INIT_DELAYED_WORK(&req->work, process_one_req); work 792 drivers/infiniband/core/addr.c cancel_delayed_work_sync(&found->work); work 53 drivers/infiniband/core/cache.c struct work_struct work; work 276 drivers/infiniband/core/cache.c static void free_gid_work(struct work_struct *work) work 279 drivers/infiniband/core/cache.c container_of(work, struct ib_gid_table_entry, del_work); work 1465 drivers/infiniband/core/cache.c struct ib_update_work *work = work 1466 drivers/infiniband/core/cache.c container_of(_work, struct ib_update_work, work); work 1472 drivers/infiniband/core/cache.c ret = ib_cache_update(work->event.device, work->event.element.port_num, work 1473 drivers/infiniband/core/cache.c work->enforce_security); work 1479 drivers/infiniband/core/cache.c if (!ret && work->event.event != IB_EVENT_GID_CHANGE) work 1480 drivers/infiniband/core/cache.c ib_dispatch_event_clients(&work->event); work 1482 drivers/infiniband/core/cache.c kfree(work); work 1487 drivers/infiniband/core/cache.c struct ib_update_work *work = work 1488 drivers/infiniband/core/cache.c container_of(_work, struct ib_update_work, work); work 1490 drivers/infiniband/core/cache.c ib_dispatch_event_clients(&work->event); work 1491 drivers/infiniband/core/cache.c kfree(work); work 1514 drivers/infiniband/core/cache.c struct ib_update_work *work; work 1516 drivers/infiniband/core/cache.c work = kzalloc(sizeof(*work), GFP_ATOMIC); work 1517 drivers/infiniband/core/cache.c if (!work) work 1521 drivers/infiniband/core/cache.c INIT_WORK(&work->work, ib_cache_event_task); work 1523 drivers/infiniband/core/cache.c INIT_WORK(&work->work, ib_generic_event_task); work 1525 drivers/infiniband/core/cache.c work->event = *event; work 1528 drivers/infiniband/core/cache.c work->enforce_security = true; work 1530 drivers/infiniband/core/cache.c queue_work(ib_wq, &work->work); work 238 drivers/infiniband/core/cm.c struct delayed_work work; work 249 drivers/infiniband/core/cm.c struct cm_work work; /* Must be first. */ work 307 drivers/infiniband/core/cm.c static void cm_work_handler(struct work_struct *work); work 728 drivers/infiniband/core/cm.c __be32 remote_id = timewait_info->work.remote_id; work 734 drivers/infiniband/core/cm.c if (be32_lt(remote_id, cur_timewait_info->work.remote_id)) work 736 drivers/infiniband/core/cm.c else if (be32_gt(remote_id, cur_timewait_info->work.remote_id)) work 760 drivers/infiniband/core/cm.c if (be32_lt(remote_id, timewait_info->work.remote_id)) work 762 drivers/infiniband/core/cm.c else if (be32_gt(remote_id, timewait_info->work.remote_id)) work 892 drivers/infiniband/core/cm.c struct cm_work *work; work 897 drivers/infiniband/core/cm.c work = list_entry(cm_id_priv->work_list.next, struct cm_work, list); work 898 drivers/infiniband/core/cm.c list_del(&work->list); work 899 drivers/infiniband/core/cm.c return work; work 902 drivers/infiniband/core/cm.c static void cm_free_work(struct cm_work *work) work 904 drivers/infiniband/core/cm.c if (work->mad_recv_wc) work 905 drivers/infiniband/core/cm.c ib_free_recv_mad(work->mad_recv_wc); work 906 drivers/infiniband/core/cm.c kfree(work); work 955 drivers/infiniband/core/cm.c timewait_info->work.local_id = local_id; work 956 drivers/infiniband/core/cm.c INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler); work 957 drivers/infiniband/core/cm.c timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT; work 987 drivers/infiniband/core/cm.c queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work, work 1011 drivers/infiniband/core/cm.c struct cm_work *work; work 1107 drivers/infiniband/core/cm.c while ((work = cm_dequeue_work(cm_id_priv)) != NULL) work 1108 drivers/infiniband/core/cm.c cm_free_work(work); work 1630 drivers/infiniband/core/cm.c static u16 cm_get_bth_pkey(struct cm_work *work) work 1632 drivers/infiniband/core/cm.c struct ib_device *ib_dev = work->port->cm_dev->ib_device; work 1633 drivers/infiniband/core/cm.c u8 port_num = work->port->port_num; work 1634 drivers/infiniband/core/cm.c u16 pkey_index = work->mad_recv_wc->wc->pkey_index; work 1657 drivers/infiniband/core/cm.c static void cm_opa_to_ib_sgid(struct cm_work *work, work 1660 drivers/infiniband/core/cm.c struct ib_device *dev = work->port->cm_dev->ib_device; work 1661 drivers/infiniband/core/cm.c u8 port_num = work->port->port_num; work 1677 drivers/infiniband/core/cm.c static void cm_format_req_event(struct cm_work *work, work 1684 drivers/infiniband/core/cm.c req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; work 1685 drivers/infiniband/core/cm.c param = &work->cm_event.param.req_rcvd; work 1687 drivers/infiniband/core/cm.c param->bth_pkey = cm_get_bth_pkey(work); work 1689 drivers/infiniband/core/cm.c param->primary_path = &work->path[0]; work 1690 drivers/infiniband/core/cm.c cm_opa_to_ib_sgid(work, param->primary_path); work 1692 drivers/infiniband/core/cm.c param->alternate_path = &work->path[1]; work 1693 drivers/infiniband/core/cm.c cm_opa_to_ib_sgid(work, param->alternate_path); work 1713 drivers/infiniband/core/cm.c work->cm_event.private_data = &req_msg->private_data; work 1717 drivers/infiniband/core/cm.c struct cm_work *work) work 1722 drivers/infiniband/core/cm.c ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event); work 1723 drivers/infiniband/core/cm.c cm_free_work(work); work 1727 drivers/infiniband/core/cm.c work = cm_dequeue_work(cm_id_priv); work 1729 drivers/infiniband/core/cm.c if (!work) work 1733 drivers/infiniband/core/cm.c &work->cm_event); work 1734 drivers/infiniband/core/cm.c cm_free_work(work); work 1797 drivers/infiniband/core/cm.c static void cm_dup_req_handler(struct cm_work *work, work 1803 drivers/infiniband/core/cm.c atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. work 1810 drivers/infiniband/core/cm.c ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); work 1840 drivers/infiniband/core/cm.c static struct cm_id_private * cm_match_req(struct cm_work *work, work 1848 drivers/infiniband/core/cm.c req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; work 1854 drivers/infiniband/core/cm.c cur_cm_id_priv = cm_get_id(timewait_info->work.local_id, work 1855 drivers/infiniband/core/cm.c timewait_info->work.remote_id); work 1858 drivers/infiniband/core/cm.c cm_dup_req_handler(work, cur_cm_id_priv); work 1868 drivers/infiniband/core/cm.c cur_cm_id_priv = cm_get_id(timewait_info->work.local_id, work 1869 drivers/infiniband/core/cm.c timewait_info->work.remote_id); work 1872 drivers/infiniband/core/cm.c cm_issue_rej(work->port, work->mad_recv_wc, work 1889 drivers/infiniband/core/cm.c cm_issue_rej(work->port, work->mad_recv_wc, work 1931 drivers/infiniband/core/cm.c static int cm_req_handler(struct cm_work *work) work 1940 drivers/infiniband/core/cm.c req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; work 1942 drivers/infiniband/core/cm.c cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL); work 1948 drivers/infiniband/core/cm.c ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc, work 1949 drivers/infiniband/core/cm.c work->mad_recv_wc->recv_buf.grh, work 1959 drivers/infiniband/core/cm.c cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id; work 1963 drivers/infiniband/core/cm.c listen_cm_id_priv = cm_match_req(work, cm_id_priv); work 1976 drivers/infiniband/core/cm.c cm_process_routed_req(req_msg, work->mad_recv_wc->wc); work 1978 drivers/infiniband/core/cm.c memset(&work->path[0], 0, sizeof(work->path[0])); work 1980 drivers/infiniband/core/cm.c memset(&work->path[1], 0, sizeof(work->path[1])); work 1985 drivers/infiniband/core/cm.c rdma_protocol_roce(work->port->cm_dev->ib_device, work 1986 drivers/infiniband/core/cm.c work->port->port_num)) { work 1987 drivers/infiniband/core/cm.c work->path[0].rec_type = work 1990 drivers/infiniband/core/cm.c cm_path_set_rec_type(work->port->cm_dev->ib_device, work 1991 drivers/infiniband/core/cm.c work->port->port_num, work 1992 drivers/infiniband/core/cm.c &work->path[0], work 1996 drivers/infiniband/core/cm.c work->path[1].rec_type = work->path[0].rec_type; work 1997 drivers/infiniband/core/cm.c cm_format_paths_from_req(req_msg, &work->path[0], work 1998 drivers/infiniband/core/cm.c &work->path[1]); work 2000 drivers/infiniband/core/cm.c sa_path_set_dmac(&work->path[0], work 2002 drivers/infiniband/core/cm.c work->path[0].hop_limit = grh->hop_limit; work 2003 drivers/infiniband/core/cm.c ret = cm_init_av_by_path(&work->path[0], gid_attr, &cm_id_priv->av, work 2008 drivers/infiniband/core/cm.c err = rdma_query_gid(work->port->cm_dev->ib_device, work 2009 drivers/infiniband/core/cm.c work->port->port_num, 0, work 2010 drivers/infiniband/core/cm.c &work->path[0].sgid); work 2016 drivers/infiniband/core/cm.c &work->path[0].sgid, work 2017 drivers/infiniband/core/cm.c sizeof(work->path[0].sgid), work 2022 drivers/infiniband/core/cm.c ret = cm_init_av_by_path(&work->path[1], NULL, work 2026 drivers/infiniband/core/cm.c &work->path[0].sgid, work 2027 drivers/infiniband/core/cm.c sizeof(work->path[0].sgid), NULL, 0); work 2045 drivers/infiniband/core/cm.c cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id); work 2046 drivers/infiniband/core/cm.c cm_process_work(cm_id_priv, work); work 2207 drivers/infiniband/core/cm.c static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type) work 2212 drivers/infiniband/core/cm.c rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; work 2213 drivers/infiniband/core/cm.c param = &work->cm_event.param.rep_rcvd; work 2225 drivers/infiniband/core/cm.c work->cm_event.private_data = &rep_msg->private_data; work 2228 drivers/infiniband/core/cm.c static void cm_dup_rep_handler(struct cm_work *work) work 2235 drivers/infiniband/core/cm.c rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad; work 2241 drivers/infiniband/core/cm.c atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. work 2243 drivers/infiniband/core/cm.c ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); work 2271 drivers/infiniband/core/cm.c static int cm_rep_handler(struct cm_work *work) work 2280 drivers/infiniband/core/cm.c rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; work 2283 drivers/infiniband/core/cm.c cm_dup_rep_handler(work); work 2289 drivers/infiniband/core/cm.c cm_format_rep_event(work, cm_id_priv->qp_type); work 2306 drivers/infiniband/core/cm.c cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id; work 2326 drivers/infiniband/core/cm.c cur_cm_id_priv = cm_get_id(timewait_info->work.local_id, work 2327 drivers/infiniband/core/cm.c timewait_info->work.remote_id); work 2331 drivers/infiniband/core/cm.c cm_issue_rej(work->port, work->mad_recv_wc, work 2369 drivers/infiniband/core/cm.c list_add_tail(&work->list, &cm_id_priv->work_list); work 2373 drivers/infiniband/core/cm.c cm_process_work(cm_id_priv, work); work 2383 drivers/infiniband/core/cm.c static int cm_establish_handler(struct cm_work *work) work 2389 drivers/infiniband/core/cm.c cm_id_priv = cm_acquire_id(work->local_id, work->remote_id); work 2402 drivers/infiniband/core/cm.c list_add_tail(&work->list, &cm_id_priv->work_list); work 2406 drivers/infiniband/core/cm.c cm_process_work(cm_id_priv, work); work 2415 drivers/infiniband/core/cm.c static int cm_rtu_handler(struct cm_work *work) work 2421 drivers/infiniband/core/cm.c rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad; work 2427 drivers/infiniband/core/cm.c work->cm_event.private_data = &rtu_msg->private_data; work 2433 drivers/infiniband/core/cm.c atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. work 2442 drivers/infiniband/core/cm.c list_add_tail(&work->list, &cm_id_priv->work_list); work 2446 drivers/infiniband/core/cm.c cm_process_work(cm_id_priv, work); work 2609 drivers/infiniband/core/cm.c static int cm_dreq_handler(struct cm_work *work) work 2616 drivers/infiniband/core/cm.c dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad; work 2620 drivers/infiniband/core/cm.c atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. work 2622 drivers/infiniband/core/cm.c cm_issue_drep(work->port, work->mad_recv_wc); work 2629 drivers/infiniband/core/cm.c work->cm_event.private_data = &dreq_msg->private_data; work 2648 drivers/infiniband/core/cm.c atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. work 2650 drivers/infiniband/core/cm.c msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc); work 2659 drivers/infiniband/core/cm.c if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) || work 2664 drivers/infiniband/core/cm.c atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. work 2677 drivers/infiniband/core/cm.c list_add_tail(&work->list, &cm_id_priv->work_list); work 2681 drivers/infiniband/core/cm.c cm_process_work(cm_id_priv, work); work 2691 drivers/infiniband/core/cm.c static int cm_drep_handler(struct cm_work *work) work 2697 drivers/infiniband/core/cm.c drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad; work 2703 drivers/infiniband/core/cm.c work->cm_event.private_data = &drep_msg->private_data; work 2716 drivers/infiniband/core/cm.c list_add_tail(&work->list, &cm_id_priv->work_list); work 2720 drivers/infiniband/core/cm.c cm_process_work(cm_id_priv, work); work 2792 drivers/infiniband/core/cm.c static void cm_format_rej_event(struct cm_work *work) work 2797 drivers/infiniband/core/cm.c rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; work 2798 drivers/infiniband/core/cm.c param = &work->cm_event.param.rej_rcvd; work 2802 drivers/infiniband/core/cm.c work->cm_event.private_data = &rej_msg->private_data; work 2822 drivers/infiniband/core/cm.c cm_local_id(timewait_info->work.local_id)); work 2838 drivers/infiniband/core/cm.c static int cm_rej_handler(struct cm_work *work) work 2844 drivers/infiniband/core/cm.c rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; work 2849 drivers/infiniband/core/cm.c cm_format_rej_event(work); work 2894 drivers/infiniband/core/cm.c list_add_tail(&work->list, &cm_id_priv->work_list); work 2898 drivers/infiniband/core/cm.c cm_process_work(cm_id_priv, work); work 3003 drivers/infiniband/core/cm.c static int cm_mra_handler(struct cm_work *work) work 3009 drivers/infiniband/core/cm.c mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad; work 3014 drivers/infiniband/core/cm.c work->cm_event.private_data = &mra_msg->private_data; work 3015 drivers/infiniband/core/cm.c work->cm_event.param.mra_rcvd.service_timeout = work 3042 drivers/infiniband/core/cm.c atomic_long_inc(&work->port-> work 3051 drivers/infiniband/core/cm.c atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. work 3065 drivers/infiniband/core/cm.c list_add_tail(&work->list, &cm_id_priv->work_list); work 3069 drivers/infiniband/core/cm.c cm_process_work(cm_id_priv, work); work 3216 drivers/infiniband/core/cm.c static int cm_lap_handler(struct cm_work *work) work 3227 drivers/infiniband/core/cm.c if (rdma_protocol_roce(work->port->cm_dev->ib_device, work 3228 drivers/infiniband/core/cm.c work->port->port_num)) work 3232 drivers/infiniband/core/cm.c lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad; work 3238 drivers/infiniband/core/cm.c param = &work->cm_event.param.lap_rcvd; work 3239 drivers/infiniband/core/cm.c memset(&work->path[0], 0, sizeof(work->path[1])); work 3240 drivers/infiniband/core/cm.c cm_path_set_rec_type(work->port->cm_dev->ib_device, work 3241 drivers/infiniband/core/cm.c work->port->port_num, work 3242 drivers/infiniband/core/cm.c &work->path[0], work 3244 drivers/infiniband/core/cm.c param->alternate_path = &work->path[0]; work 3246 drivers/infiniband/core/cm.c work->cm_event.private_data = &lap_msg->private_data; work 3257 drivers/infiniband/core/cm.c atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. work 3259 drivers/infiniband/core/cm.c msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc); work 3270 drivers/infiniband/core/cm.c if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) || work 3275 drivers/infiniband/core/cm.c atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. work 3282 drivers/infiniband/core/cm.c ret = cm_init_av_for_lap(work->port, work->mad_recv_wc->wc, work 3283 drivers/infiniband/core/cm.c work->mad_recv_wc->recv_buf.grh, work 3297 drivers/infiniband/core/cm.c list_add_tail(&work->list, &cm_id_priv->work_list); work 3301 drivers/infiniband/core/cm.c cm_process_work(cm_id_priv, work); work 3377 drivers/infiniband/core/cm.c static int cm_apr_handler(struct cm_work *work) work 3386 drivers/infiniband/core/cm.c if (rdma_protocol_roce(work->port->cm_dev->ib_device, work 3387 drivers/infiniband/core/cm.c work->port->port_num)) work 3390 drivers/infiniband/core/cm.c apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad; work 3396 drivers/infiniband/core/cm.c work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status; work 3397 drivers/infiniband/core/cm.c work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info; work 3398 drivers/infiniband/core/cm.c work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length; work 3399 drivers/infiniband/core/cm.c work->cm_event.private_data = &apr_msg->private_data; work 3414 drivers/infiniband/core/cm.c list_add_tail(&work->list, &cm_id_priv->work_list); work 3418 drivers/infiniband/core/cm.c cm_process_work(cm_id_priv, work); work 3427 drivers/infiniband/core/cm.c static int cm_timewait_handler(struct cm_work *work) work 3433 drivers/infiniband/core/cm.c timewait_info = (struct cm_timewait_info *)work; work 3438 drivers/infiniband/core/cm.c cm_id_priv = cm_acquire_id(timewait_info->work.local_id, work 3439 drivers/infiniband/core/cm.c timewait_info->work.remote_id); work 3452 drivers/infiniband/core/cm.c list_add_tail(&work->list, &cm_id_priv->work_list); work 3456 drivers/infiniband/core/cm.c cm_process_work(cm_id_priv, work); work 3531 drivers/infiniband/core/cm.c static void cm_format_sidr_req_event(struct cm_work *work, work 3539 drivers/infiniband/core/cm.c work->mad_recv_wc->recv_buf.mad; work 3540 drivers/infiniband/core/cm.c param = &work->cm_event.param.sidr_req_rcvd; work 3544 drivers/infiniband/core/cm.c param->bth_pkey = cm_get_bth_pkey(work); work 3545 drivers/infiniband/core/cm.c param->port = work->port->port_num; work 3547 drivers/infiniband/core/cm.c work->cm_event.private_data = &sidr_req_msg->private_data; work 3550 drivers/infiniband/core/cm.c static int cm_sidr_req_handler(struct cm_work *work) work 3558 drivers/infiniband/core/cm.c cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL); work 3565 drivers/infiniband/core/cm.c work->mad_recv_wc->recv_buf.mad; work 3566 drivers/infiniband/core/cm.c wc = work->mad_recv_wc->wc; work 3569 drivers/infiniband/core/cm.c ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc, work 3570 drivers/infiniband/core/cm.c work->mad_recv_wc->recv_buf.grh, work 3583 drivers/infiniband/core/cm.c atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. work 3604 drivers/infiniband/core/cm.c cm_format_sidr_req_event(work, cm_id_priv, &cur_cm_id_priv->id); work 3605 drivers/infiniband/core/cm.c cm_process_work(cm_id_priv, work); work 3681 drivers/infiniband/core/cm.c static void cm_format_sidr_rep_event(struct cm_work *work, work 3688 drivers/infiniband/core/cm.c work->mad_recv_wc->recv_buf.mad; work 3689 drivers/infiniband/core/cm.c param = &work->cm_event.param.sidr_rep_rcvd; work 3696 drivers/infiniband/core/cm.c work->cm_event.private_data = &sidr_rep_msg->private_data; work 3699 drivers/infiniband/core/cm.c static int cm_sidr_rep_handler(struct cm_work *work) work 3705 drivers/infiniband/core/cm.c work->mad_recv_wc->recv_buf.mad; work 3719 drivers/infiniband/core/cm.c cm_format_sidr_rep_event(work, cm_id_priv); work 3720 drivers/infiniband/core/cm.c cm_process_work(cm_id_priv, work); work 3824 drivers/infiniband/core/cm.c struct cm_work *work = container_of(_work, struct cm_work, work.work); work 3827 drivers/infiniband/core/cm.c switch (work->cm_event.event) { work 3829 drivers/infiniband/core/cm.c ret = cm_req_handler(work); work 3832 drivers/infiniband/core/cm.c ret = cm_mra_handler(work); work 3835 drivers/infiniband/core/cm.c ret = cm_rej_handler(work); work 3838 drivers/infiniband/core/cm.c ret = cm_rep_handler(work); work 3841 drivers/infiniband/core/cm.c ret = cm_rtu_handler(work); work 3844 drivers/infiniband/core/cm.c ret = cm_establish_handler(work); work 3847 drivers/infiniband/core/cm.c ret = cm_dreq_handler(work); work 3850 drivers/infiniband/core/cm.c ret = cm_drep_handler(work); work 3853 drivers/infiniband/core/cm.c ret = cm_sidr_req_handler(work); work 3856 drivers/infiniband/core/cm.c ret = cm_sidr_rep_handler(work); work 3859 drivers/infiniband/core/cm.c ret = cm_lap_handler(work); work 3862 drivers/infiniband/core/cm.c ret = cm_apr_handler(work); work 3865 drivers/infiniband/core/cm.c ret = cm_timewait_handler(work); work 3868 drivers/infiniband/core/cm.c pr_debug("cm_event.event: 0x%x\n", work->cm_event.event); work 3873 drivers/infiniband/core/cm.c cm_free_work(work); work 3879 drivers/infiniband/core/cm.c struct cm_work *work; work 3888 drivers/infiniband/core/cm.c work = kmalloc(sizeof *work, GFP_ATOMIC); work 3889 drivers/infiniband/core/cm.c if (!work) work 3912 drivers/infiniband/core/cm.c kfree(work); work 3922 drivers/infiniband/core/cm.c INIT_DELAYED_WORK(&work->work, cm_work_handler); work 3923 drivers/infiniband/core/cm.c work->local_id = cm_id->local_id; work 3924 drivers/infiniband/core/cm.c work->remote_id = cm_id->remote_id; work 3925 drivers/infiniband/core/cm.c work->mad_recv_wc = NULL; work 3926 drivers/infiniband/core/cm.c work->cm_event.event = IB_CM_USER_ESTABLISHED; work 3931 drivers/infiniband/core/cm.c queue_delayed_work(cm.wq, &work->work, 0); work 3933 drivers/infiniband/core/cm.c kfree(work); work 3994 drivers/infiniband/core/cm.c struct cm_work *work; work 4048 drivers/infiniband/core/cm.c work = kmalloc(struct_size(work, path, paths), GFP_KERNEL); work 4049 drivers/infiniband/core/cm.c if (!work) { work 4054 drivers/infiniband/core/cm.c INIT_DELAYED_WORK(&work->work, cm_work_handler); work 4055 drivers/infiniband/core/cm.c work->cm_event.event = event; work 4056 drivers/infiniband/core/cm.c work->mad_recv_wc = mad_recv_wc; work 4057 drivers/infiniband/core/cm.c work->port = port; work 4062 drivers/infiniband/core/cm.c queue_delayed_work(cm.wq, &work->work, 0); work 4068 drivers/infiniband/core/cm.c kfree(work); work 4515 drivers/infiniband/core/cm.c cancel_delayed_work(&timewait_info->work.work); work 374 drivers/infiniband/core/cma.c struct work_struct work; work 382 drivers/infiniband/core/cma.c struct work_struct work; work 388 drivers/infiniband/core/cma.c struct work_struct work; work 2556 drivers/infiniband/core/cma.c struct cma_work *work = context; work 2559 drivers/infiniband/core/cma.c route = &work->id->id.route; work 2565 drivers/infiniband/core/cma.c work->old_state = RDMA_CM_ROUTE_QUERY; work 2566 drivers/infiniband/core/cma.c work->new_state = RDMA_CM_ADDR_RESOLVED; work 2567 drivers/infiniband/core/cma.c work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; work 2568 drivers/infiniband/core/cma.c work->event.status = status; work 2573 drivers/infiniband/core/cma.c queue_work(cma_wq, &work->work); work 2577 drivers/infiniband/core/cma.c unsigned long timeout_ms, struct cma_work *work) work 2624 drivers/infiniband/core/cma.c work, &id_priv->query); work 2631 drivers/infiniband/core/cma.c struct cma_work *work = container_of(_work, struct cma_work, work); work 2632 drivers/infiniband/core/cma.c struct rdma_id_private *id_priv = work->id; work 2636 drivers/infiniband/core/cma.c if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) work 2639 drivers/infiniband/core/cma.c if (id_priv->id.event_handler(&id_priv->id, &work->event)) { work 2648 drivers/infiniband/core/cma.c kfree(work); work 2653 drivers/infiniband/core/cma.c struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work); work 2654 drivers/infiniband/core/cma.c struct rdma_id_private *id_priv = work->id; work 2662 drivers/infiniband/core/cma.c if (id_priv->id.event_handler(&id_priv->id, &work->event)) { work 2672 drivers/infiniband/core/cma.c kfree(work); work 2675 drivers/infiniband/core/cma.c static void cma_init_resolve_route_work(struct cma_work *work, work 2678 drivers/infiniband/core/cma.c work->id = id_priv; work 2679 drivers/infiniband/core/cma.c INIT_WORK(&work->work, cma_work_handler); work 2680 drivers/infiniband/core/cma.c work->old_state = RDMA_CM_ROUTE_QUERY; work 2681 drivers/infiniband/core/cma.c work->new_state = RDMA_CM_ROUTE_RESOLVED; work 2682 drivers/infiniband/core/cma.c work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; work 2685 drivers/infiniband/core/cma.c static void cma_init_resolve_addr_work(struct cma_work *work, work 2688 drivers/infiniband/core/cma.c work->id = id_priv; work 2689 drivers/infiniband/core/cma.c INIT_WORK(&work->work, cma_work_handler); work 2690 drivers/infiniband/core/cma.c work->old_state = RDMA_CM_ADDR_QUERY; work 2691 drivers/infiniband/core/cma.c work->new_state = RDMA_CM_ADDR_RESOLVED; work 2692 drivers/infiniband/core/cma.c work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; work 2699 drivers/infiniband/core/cma.c struct cma_work *work; work 2702 drivers/infiniband/core/cma.c work = kzalloc(sizeof *work, GFP_KERNEL); work 2703 drivers/infiniband/core/cma.c if (!work) work 2706 drivers/infiniband/core/cma.c cma_init_resolve_route_work(work, id_priv); work 2714 drivers/infiniband/core/cma.c ret = cma_query_ib_route(id_priv, timeout_ms, work); work 2723 drivers/infiniband/core/cma.c kfree(work); work 2820 drivers/infiniband/core/cma.c struct cma_work *work; work 2822 drivers/infiniband/core/cma.c work = kzalloc(sizeof *work, GFP_KERNEL); work 2823 drivers/infiniband/core/cma.c if (!work) work 2826 drivers/infiniband/core/cma.c cma_init_resolve_route_work(work, id_priv); work 2827 drivers/infiniband/core/cma.c queue_work(cma_wq, &work->work); work 2853 drivers/infiniband/core/cma.c struct cma_work *work; work 2862 drivers/infiniband/core/cma.c work = kzalloc(sizeof *work, GFP_KERNEL); work 2863 drivers/infiniband/core/cma.c if (!work) work 2906 drivers/infiniband/core/cma.c cma_init_resolve_route_work(work, id_priv); work 2907 drivers/infiniband/core/cma.c queue_work(cma_wq, &work->work); work 2916 drivers/infiniband/core/cma.c kfree(work); work 3078 drivers/infiniband/core/cma.c struct cma_work *work; work 3082 drivers/infiniband/core/cma.c work = kzalloc(sizeof *work, GFP_KERNEL); work 3083 drivers/infiniband/core/cma.c if (!work) work 3096 drivers/infiniband/core/cma.c cma_init_resolve_addr_work(work, id_priv); work 3097 drivers/infiniband/core/cma.c queue_work(cma_wq, &work->work); work 3100 drivers/infiniband/core/cma.c kfree(work); work 3106 drivers/infiniband/core/cma.c struct cma_work *work; work 3109 drivers/infiniband/core/cma.c work = kzalloc(sizeof *work, GFP_KERNEL); work 3110 drivers/infiniband/core/cma.c if (!work) work 3123 drivers/infiniband/core/cma.c cma_init_resolve_addr_work(work, id_priv); work 3124 drivers/infiniband/core/cma.c queue_work(cma_wq, &work->work); work 3127 drivers/infiniband/core/cma.c kfree(work); work 4304 drivers/infiniband/core/cma.c static void iboe_mcast_work_handler(struct work_struct *work) work 4306 drivers/infiniband/core/cma.c struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work); work 4348 drivers/infiniband/core/cma.c struct iboe_mcast_work *work; work 4361 drivers/infiniband/core/cma.c work = kzalloc(sizeof *work, GFP_KERNEL); work 4362 drivers/infiniband/core/cma.c if (!work) work 4409 drivers/infiniband/core/cma.c work->id = id_priv; work 4410 drivers/infiniband/core/cma.c work->mc = mc; work 4411 drivers/infiniband/core/cma.c INIT_WORK(&work->work, iboe_mcast_work_handler); work 4413 drivers/infiniband/core/cma.c queue_work(cma_wq, &work->work); work 4420 drivers/infiniband/core/cma.c kfree(work); work 4508 drivers/infiniband/core/cma.c struct cma_ndev_work *work; work 4517 drivers/infiniband/core/cma.c work = kzalloc(sizeof *work, GFP_KERNEL); work 4518 drivers/infiniband/core/cma.c if (!work) work 4521 drivers/infiniband/core/cma.c INIT_WORK(&work->work, cma_ndev_work_handler); work 4522 drivers/infiniband/core/cma.c work->id = id_priv; work 4523 drivers/infiniband/core/cma.c work->event.event = RDMA_CM_EVENT_ADDR_CHANGE; work 4525 drivers/infiniband/core/cma.c queue_work(cma_wq, &work->work); work 36 drivers/infiniband/core/cq.c struct dim *dim = container_of(w, struct dim, work); work 65 drivers/infiniband/core/cq.c INIT_WORK(&dim->work, ib_cq_rdma_dim_work); work 149 drivers/infiniband/core/cq.c static void ib_cq_poll_work(struct work_struct *work) work 151 drivers/infiniband/core/cq.c struct ib_cq *cq = container_of(work, struct ib_cq, work); work 158 drivers/infiniband/core/cq.c queue_work(cq->comp_wq, &cq->work); work 165 drivers/infiniband/core/cq.c queue_work(cq->comp_wq, &cq->work); work 232 drivers/infiniband/core/cq.c INIT_WORK(&cq->work, ib_cq_poll_work); work 301 drivers/infiniband/core/cq.c cancel_work_sync(&cq->work); work 310 drivers/infiniband/core/cq.c cancel_work_sync(&cq->dim->work); work 189 drivers/infiniband/core/device.c static void ib_unregister_work(struct work_struct *work); work 193 drivers/infiniband/core/device.c static void ib_policy_change_task(struct work_struct *work); work 815 drivers/infiniband/core/device.c static void ib_policy_change_task(struct work_struct *work) work 1543 drivers/infiniband/core/device.c static void ib_unregister_work(struct work_struct *work) work 1546 drivers/infiniband/core/device.c container_of(work, struct ib_device, unregistration_work); work 100 drivers/infiniband/core/fmr_pool.c struct kthread_work work; work 171 drivers/infiniband/core/fmr_pool.c static void ib_fmr_cleanup_func(struct kthread_work *work) work 173 drivers/infiniband/core/fmr_pool.c struct ib_fmr_pool *pool = container_of(work, struct ib_fmr_pool, work); work 183 drivers/infiniband/core/fmr_pool.c kthread_queue_work(pool->worker, &pool->work); work 260 drivers/infiniband/core/fmr_pool.c kthread_init_work(&pool->work, ib_fmr_cleanup_func); work 375 drivers/infiniband/core/fmr_pool.c kthread_queue_work(pool->worker, &pool->work); work 487 drivers/infiniband/core/fmr_pool.c kthread_queue_work(pool->worker, &pool->work); work 96 drivers/infiniband/core/iwcm.c struct work_struct work; work 143 drivers/infiniband/core/iwcm.c struct iwcm_work *work; work 147 drivers/infiniband/core/iwcm.c work = list_entry(cm_id_priv->work_free_list.next, struct iwcm_work, work 149 drivers/infiniband/core/iwcm.c list_del_init(&work->free_list); work 150 drivers/infiniband/core/iwcm.c return work; work 153 drivers/infiniband/core/iwcm.c static void put_work(struct iwcm_work *work) work 155 drivers/infiniband/core/iwcm.c list_add(&work->free_list, &work->cm_id->work_free_list); work 170 drivers/infiniband/core/iwcm.c struct iwcm_work *work; work 174 drivers/infiniband/core/iwcm.c work = kmalloc(sizeof(struct iwcm_work), GFP_KERNEL); work 175 drivers/infiniband/core/iwcm.c if (!work) { work 179 drivers/infiniband/core/iwcm.c work->cm_id = cm_id_priv; work 180 drivers/infiniband/core/iwcm.c INIT_LIST_HEAD(&work->list); work 181 drivers/infiniband/core/iwcm.c put_work(work); work 1018 drivers/infiniband/core/iwcm.c struct iwcm_work *work = container_of(_work, struct iwcm_work, work); work 1020 drivers/infiniband/core/iwcm.c struct iwcm_id_private *cm_id_priv = work->cm_id; work 1028 drivers/infiniband/core/iwcm.c work = list_entry(cm_id_priv->work_list.next, work 1030 drivers/infiniband/core/iwcm.c list_del_init(&work->list); work 1032 drivers/infiniband/core/iwcm.c levent = work->event; work 1033 drivers/infiniband/core/iwcm.c put_work(work); work 1069 drivers/infiniband/core/iwcm.c struct iwcm_work *work; work 1077 drivers/infiniband/core/iwcm.c work = get_work(cm_id_priv); work 1078 drivers/infiniband/core/iwcm.c if (!work) { work 1083 drivers/infiniband/core/iwcm.c INIT_WORK(&work->work, cm_work_handler); work 1084 drivers/infiniband/core/iwcm.c work->cm_id = cm_id_priv; work 1085 drivers/infiniband/core/iwcm.c work->event = *iw_event; work 1087 drivers/infiniband/core/iwcm.c if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST || work 1088 drivers/infiniband/core/iwcm.c work->event.event == IW_CM_EVENT_CONNECT_REPLY) && work 1089 drivers/infiniband/core/iwcm.c work->event.private_data_len) { work 1090 drivers/infiniband/core/iwcm.c ret = copy_private_data(&work->event); work 1092 drivers/infiniband/core/iwcm.c put_work(work); work 1099 drivers/infiniband/core/iwcm.c list_add_tail(&work->list, &cm_id_priv->work_list); work 1100 drivers/infiniband/core/iwcm.c queue_work(iwcm_wq, &work->work); work 1102 drivers/infiniband/core/iwcm.c list_add_tail(&work->list, &cm_id_priv->work_list); work 106 drivers/infiniband/core/mad.c static void timeout_sends(struct work_struct *work); work 107 drivers/infiniband/core/mad.c static void local_completions(struct work_struct *work); work 2742 drivers/infiniband/core/mad.c static void local_completions(struct work_struct *work) work 2754 drivers/infiniband/core/mad.c container_of(work, struct ib_mad_agent_private, local_work); work 2874 drivers/infiniband/core/mad.c static void timeout_sends(struct work_struct *work) work 2881 drivers/infiniband/core/mad.c mad_agent_priv = container_of(work, struct ib_mad_agent_private, work 2882 drivers/infiniband/core/mad.c timed_work.work); work 246 drivers/infiniband/core/mad_rmpp.c static void recv_timeout_handler(struct work_struct *work) work 249 drivers/infiniband/core/mad_rmpp.c container_of(work, struct mad_rmpp_recv, timeout_work.work); work 268 drivers/infiniband/core/mad_rmpp.c static void recv_cleanup_handler(struct work_struct *work) work 271 drivers/infiniband/core/mad_rmpp.c container_of(work, struct mad_rmpp_recv, cleanup_work.work); work 101 drivers/infiniband/core/multicast.c struct work_struct work; work 216 drivers/infiniband/core/multicast.c queue_work(mcast_wq, &group->work); work 424 drivers/infiniband/core/multicast.c static void mcast_work_handler(struct work_struct *work) work 432 drivers/infiniband/core/multicast.c group = container_of(work, typeof(*group), work); work 542 drivers/infiniband/core/multicast.c mcast_work_handler(&group->work); work 554 drivers/infiniband/core/multicast.c mcast_work_handler(&group->work); work 583 drivers/infiniband/core/multicast.c INIT_WORK(&group->work, mcast_work_handler); work 680 drivers/infiniband/core/multicast.c queue_work(mcast_wq, &group->work); work 783 drivers/infiniband/core/multicast.c queue_work(mcast_wq, &group->work); work 53 drivers/infiniband/core/roce_gid_mgmt.c struct work_struct work; work 68 drivers/infiniband/core/roce_gid_mgmt.c struct work_struct work; work 618 drivers/infiniband/core/roce_gid_mgmt.c struct netdev_event_work *work = work 619 drivers/infiniband/core/roce_gid_mgmt.c container_of(_work, struct netdev_event_work, work); work 622 drivers/infiniband/core/roce_gid_mgmt.c for (i = 0; i < ARRAY_SIZE(work->cmds) && work->cmds[i].cb; i++) { work 623 drivers/infiniband/core/roce_gid_mgmt.c ib_enum_all_roce_netdevs(work->cmds[i].filter, work 624 drivers/infiniband/core/roce_gid_mgmt.c work->cmds[i].filter_ndev, work 625 drivers/infiniband/core/roce_gid_mgmt.c work->cmds[i].cb, work 626 drivers/infiniband/core/roce_gid_mgmt.c work->cmds[i].ndev); work 627 drivers/infiniband/core/roce_gid_mgmt.c dev_put(work->cmds[i].ndev); work 628 drivers/infiniband/core/roce_gid_mgmt.c dev_put(work->cmds[i].filter_ndev); work 631 drivers/infiniband/core/roce_gid_mgmt.c kfree(work); work 653 drivers/infiniband/core/roce_gid_mgmt.c INIT_WORK(&ndev_work->work, netdevice_event_work_handler); work 655 drivers/infiniband/core/roce_gid_mgmt.c queue_work(gid_cache_wq, &ndev_work->work); work 803 drivers/infiniband/core/roce_gid_mgmt.c struct update_gid_event_work *work = work 804 drivers/infiniband/core/roce_gid_mgmt.c container_of(_work, struct update_gid_event_work, work); work 807 drivers/infiniband/core/roce_gid_mgmt.c work->gid_attr.ndev, work 808 drivers/infiniband/core/roce_gid_mgmt.c callback_for_addr_gid_device_scan, work); work 810 drivers/infiniband/core/roce_gid_mgmt.c dev_put(work->gid_attr.ndev); work 811 drivers/infiniband/core/roce_gid_mgmt.c kfree(work); work 817 drivers/infiniband/core/roce_gid_mgmt.c struct update_gid_event_work *work; work 836 drivers/infiniband/core/roce_gid_mgmt.c work = kmalloc(sizeof(*work), GFP_ATOMIC); work 837 drivers/infiniband/core/roce_gid_mgmt.c if (!work) work 840 drivers/infiniband/core/roce_gid_mgmt.c INIT_WORK(&work->work, update_gid_event_work_handler); work 842 drivers/infiniband/core/roce_gid_mgmt.c rdma_ip2gid(sa, &work->gid); work 843 drivers/infiniband/core/roce_gid_mgmt.c work->gid_op = gid_op; work 845 drivers/infiniband/core/roce_gid_mgmt.c memset(&work->gid_attr, 0, sizeof(work->gid_attr)); work 847 drivers/infiniband/core/roce_gid_mgmt.c work->gid_attr.ndev = ndev; work 849 drivers/infiniband/core/roce_gid_mgmt.c queue_work(gid_cache_wq, &work->work); work 975 drivers/infiniband/core/sa_query.c static void ib_nl_request_timeout(struct work_struct *work) work 2108 drivers/infiniband/core/sa_query.c static void update_ib_cpi(struct work_struct *work) work 2111 drivers/infiniband/core/sa_query.c container_of(work, struct ib_sa_port, ib_cpi_work.work); work 2216 drivers/infiniband/core/sa_query.c static void update_sm_ah(struct work_struct *work) work 2219 drivers/infiniband/core/sa_query.c container_of(work, struct ib_sa_port, update_task); work 185 drivers/infiniband/core/ucma.c static void ucma_close_event_id(struct work_struct *work) work 187 drivers/infiniband/core/ucma.c struct ucma_event *uevent_close = container_of(work, struct ucma_event, close_work); work 193 drivers/infiniband/core/ucma.c static void ucma_close_id(struct work_struct *work) work 195 drivers/infiniband/core/ucma.c struct ucma_context *ctx = container_of(work, struct ucma_context, close_work); work 93 drivers/infiniband/hw/bnxt_re/bnxt_re.h struct work_struct work; work 1327 drivers/infiniband/hw/bnxt_re/main.c static void bnxt_re_worker(struct work_struct *work) work 1329 drivers/infiniband/hw/bnxt_re/main.c struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev, work 1330 drivers/infiniband/hw/bnxt_re/main.c worker.work); work 1541 drivers/infiniband/hw/bnxt_re/main.c static void bnxt_re_task(struct work_struct *work) work 1547 drivers/infiniband/hw/bnxt_re/main.c re_work = container_of(work, struct bnxt_re_work, work); work 1667 drivers/infiniband/hw/bnxt_re/main.c INIT_WORK(&re_work->work, bnxt_re_task); work 1669 drivers/infiniband/hw/bnxt_re/main.c queue_work(bnxt_re_wq, &re_work->work); work 153 drivers/infiniband/hw/bnxt_re/qplib_fp.c static void bnxt_qpn_cqn_sched_task(struct work_struct *work) work 156 drivers/infiniband/hw/bnxt_re/qplib_fp.c container_of(work, struct bnxt_qplib_nq_work, work); work 1796 drivers/infiniband/hw/bnxt_re/qplib_fp.c INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task); work 1797 drivers/infiniband/hw/bnxt_re/qplib_fp.c queue_work(qp->scq->nq->cqn_wq, &nq_work->work); work 1886 drivers/infiniband/hw/bnxt_re/qplib_fp.c INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task); work 1887 drivers/infiniband/hw/bnxt_re/qplib_fp.c queue_work(qp->rcq->nq->cqn_wq, &nq_work->work); work 499 drivers/infiniband/hw/bnxt_re/qplib_fp.h struct work_struct work; work 91 drivers/infiniband/hw/cxgb3/iwch.c static void iwch_db_drop_task(struct work_struct *work) work 93 drivers/infiniband/hw/cxgb3/iwch.c struct iwch_dev *rnicp = container_of(work, struct iwch_dev, work 94 drivers/infiniband/hw/cxgb3/iwch.c db_drop_task.work); work 2166 drivers/infiniband/hw/cxgb3/iwch_cm.c static void process_work(struct work_struct *work) work 4290 drivers/infiniband/hw/cxgb4/cm.c static void process_work(struct work_struct *work) work 962 drivers/infiniband/hw/cxgb4/iw_cxgb4.h void c4iw_register_device(struct work_struct *work); work 535 drivers/infiniband/hw/cxgb4/provider.c void c4iw_register_device(struct work_struct *work) work 538 drivers/infiniband/hw/cxgb4/provider.c struct uld_ctx *ctx = container_of(work, struct uld_ctx, reg_work); work 6678 drivers/infiniband/hw/hfi1/chip.c void handle_sma_message(struct work_struct *work) work 6680 drivers/infiniband/hw/hfi1/chip.c struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, work 6891 drivers/infiniband/hw/hfi1/chip.c void handle_freeze(struct work_struct *work) work 6893 drivers/infiniband/hw/hfi1/chip.c struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, work 6988 drivers/infiniband/hw/hfi1/chip.c void handle_link_up(struct work_struct *work) work 6990 drivers/infiniband/hw/hfi1/chip.c struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, work 7116 drivers/infiniband/hw/hfi1/chip.c void handle_link_down(struct work_struct *work) work 7120 drivers/infiniband/hw/hfi1/chip.c struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, work 7205 drivers/infiniband/hw/hfi1/chip.c void handle_link_bounce(struct work_struct *work) work 7207 drivers/infiniband/hw/hfi1/chip.c struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, work 7441 drivers/infiniband/hw/hfi1/chip.c void handle_verify_cap(struct work_struct *work) work 7443 drivers/infiniband/hw/hfi1/chip.c struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, work 7710 drivers/infiniband/hw/hfi1/chip.c void handle_link_downgrade(struct work_struct *work) work 7712 drivers/infiniband/hw/hfi1/chip.c struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, work 9620 drivers/infiniband/hw/hfi1/chip.c void qsfp_event(struct work_struct *work) work 9626 drivers/infiniband/hw/hfi1/chip.c qd = container_of(work, struct qsfp_data, qsfp_work); work 9794 drivers/infiniband/hw/hfi1/chip.c void handle_start_link(struct work_struct *work) work 9796 drivers/infiniband/hw/hfi1/chip.c struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, work 9797 drivers/infiniband/hw/hfi1/chip.c start_link_work.work); work 12374 drivers/infiniband/hw/hfi1/chip.c static void do_update_synth_timer(struct work_struct *work) work 12383 drivers/infiniband/hw/hfi1/chip.c struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata, work 776 drivers/infiniband/hw/hfi1/chip.h void handle_verify_cap(struct work_struct *work); work 777 drivers/infiniband/hw/hfi1/chip.h void handle_freeze(struct work_struct *work); work 778 drivers/infiniband/hw/hfi1/chip.h void handle_link_up(struct work_struct *work); work 779 drivers/infiniband/hw/hfi1/chip.h void handle_link_down(struct work_struct *work); work 780 drivers/infiniband/hw/hfi1/chip.h void handle_link_downgrade(struct work_struct *work); work 781 drivers/infiniband/hw/hfi1/chip.h void handle_link_bounce(struct work_struct *work); work 782 drivers/infiniband/hw/hfi1/chip.h void handle_start_link(struct work_struct *work); work 783 drivers/infiniband/hw/hfi1/chip.h void handle_sma_message(struct work_struct *work); work 785 drivers/infiniband/hw/hfi1/chip.h void qsfp_event(struct work_struct *work); work 1146 drivers/infiniband/hw/hfi1/driver.c void receive_interrupt_work(struct work_struct *work) work 1148 drivers/infiniband/hw/hfi1/driver.c struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, work 1523 drivers/infiniband/hw/hfi1/hfi.h void receive_interrupt_work(struct work_struct *work); work 42 drivers/infiniband/hw/hfi1/iowait.c void (*func)(struct work_struct *work), work 43 drivers/infiniband/hw/hfi1/iowait.c void (*tidfunc)(struct work_struct *work), work 61 drivers/infiniband/hw/hfi1/iowait.h typedef void (*restart_t)(struct work_struct *work); work 172 drivers/infiniband/hw/hfi1/iowait.h void (*func)(struct work_struct *work), work 173 drivers/infiniband/hw/hfi1/iowait.h void (*tidfunc)(struct work_struct *work), work 76 drivers/infiniband/hw/hfi1/mmu_rb.c static void handle_remove(struct work_struct *work); work 339 drivers/infiniband/hw/hfi1/mmu_rb.c static void handle_remove(struct work_struct *work) work 341 drivers/infiniband/hw/hfi1/mmu_rb.c struct mmu_rb_handler *handler = container_of(work, work 111 drivers/infiniband/hw/hfi1/opfn.c void opfn_send_conn_request(struct work_struct *work) work 116 drivers/infiniband/hw/hfi1/opfn.c od = container_of(work, struct hfi1_opfn_data, opfn_work); work 77 drivers/infiniband/hw/hfi1/opfn.h void opfn_send_conn_request(struct work_struct *work); work 575 drivers/infiniband/hw/hfi1/pio.c static void sc_halted(struct work_struct *work) work 579 drivers/infiniband/hw/hfi1/pio.c sc = container_of(work, struct send_context, halt_work); work 517 drivers/infiniband/hw/hfi1/ruc.c void _hfi1_do_send(struct work_struct *work) work 519 drivers/infiniband/hw/hfi1/ruc.c struct iowait_work *w = container_of(work, struct iowait_work, iowork); work 448 drivers/infiniband/hw/hfi1/sdma.c static void sdma_field_flush(struct work_struct *work) work 452 drivers/infiniband/hw/hfi1/sdma.c container_of(work, struct sdma_engine, flush_worker); work 460 drivers/infiniband/hw/hfi1/sdma.c static void sdma_err_halt_wait(struct work_struct *work) work 462 drivers/infiniband/hw/hfi1/sdma.c struct sdma_engine *sde = container_of(work, struct sdma_engine, work 110 drivers/infiniband/hw/hfi1/tid_rdma.c static void tid_rdma_trigger_resume(struct work_struct *work); work 638 drivers/infiniband/hw/hfi1/tid_rdma.c static void tid_rdma_trigger_resume(struct work_struct *work) work 644 drivers/infiniband/hw/hfi1/tid_rdma.c tr = container_of(work, struct tid_rdma_qp_params, trigger_work); work 5328 drivers/infiniband/hw/hfi1/tid_rdma.c void _hfi1_do_tid_send(struct work_struct *work) work 5330 drivers/infiniband/hw/hfi1/tid_rdma.c struct iowait_work *w = container_of(work, struct iowait_work, iowork); work 313 drivers/infiniband/hw/hfi1/tid_rdma.h void _hfi1_do_tid_send(struct work_struct *work); work 452 drivers/infiniband/hw/hfi1/verbs.h void _hfi1_do_send(struct work_struct *work); work 295 drivers/infiniband/hw/hfi1/vnic_sdma.c struct iowait_work *work; work 299 drivers/infiniband/hw/hfi1/vnic_sdma.c work = iowait_get_ib_work(&vnic_sdma->wait); work 300 drivers/infiniband/hw/hfi1/vnic_sdma.c list_add_tail(&vnic_sdma->stx.list, &work->tx_head); work 916 drivers/infiniband/hw/hns/hns_roce_device.h struct work_struct work; work 940 drivers/infiniband/hw/hns/hns_roce_hw_v1.c static void hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct *work) work 945 drivers/infiniband/hw/hns/hns_roce_hw_v1.c lp_qp_work = container_of(work, struct hns_roce_recreate_lp_qp_work, work 946 drivers/infiniband/hw/hns/hns_roce_hw_v1.c work); work 977 drivers/infiniband/hw/hns/hns_roce_hw_v1.c INIT_WORK(&(lp_qp_work->work), hns_roce_v1_recreate_lp_qp_work_fn); work 985 drivers/infiniband/hw/hns/hns_roce_hw_v1.c queue_work(free_mr->free_mr_wq, &(lp_qp_work->work)); work 1027 drivers/infiniband/hw/hns/hns_roce_hw_v1.c static void hns_roce_v1_mr_free_work_fn(struct work_struct *work) work 1044 drivers/infiniband/hw/hns/hns_roce_hw_v1.c mr_work = container_of(work, struct hns_roce_mr_free_work, work); work 1125 drivers/infiniband/hw/hns/hns_roce_hw_v1.c INIT_WORK(&(mr_work->work), hns_roce_v1_mr_free_work_fn); work 1133 drivers/infiniband/hw/hns/hns_roce_hw_v1.c queue_work(free_mr->free_mr_wq, &(mr_work->work)); work 1056 drivers/infiniband/hw/hns/hns_roce_hw_v1.h struct work_struct work; work 1066 drivers/infiniband/hw/hns/hns_roce_hw_v1.h struct work_struct work; work 1074 drivers/infiniband/hw/hns/hns_roce_hw_v1.h struct work_struct work; work 4867 drivers/infiniband/hw/hns/hns_roce_hw_v2.c static void hns_roce_irq_work_handle(struct work_struct *work) work 4870 drivers/infiniband/hw/hns/hns_roce_hw_v2.c container_of(work, struct hns_roce_work, work); work 4940 drivers/infiniband/hw/hns/hns_roce_hw_v2.c INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle); work 4946 drivers/infiniband/hw/hns/hns_roce_hw_v2.c queue_work(hr_dev->irq_workq, &(irq_work->work)); work 214 drivers/infiniband/hw/i40iw/i40iw.h struct work_struct work; work 222 drivers/infiniband/hw/i40iw/i40iw.h struct work_struct work; work 68 drivers/infiniband/hw/i40iw/i40iw_cm.c static void i40iw_disconnect_worker(struct work_struct *work); work 3437 drivers/infiniband/hw/i40iw/i40iw_cm.c struct disconn_work *work; work 3442 drivers/infiniband/hw/i40iw/i40iw_cm.c work = kzalloc(sizeof(*work), GFP_ATOMIC); work 3443 drivers/infiniband/hw/i40iw/i40iw_cm.c if (!work) work 3452 drivers/infiniband/hw/i40iw/i40iw_cm.c kfree(work); work 3458 drivers/infiniband/hw/i40iw/i40iw_cm.c work->iwqp = iwqp; work 3459 drivers/infiniband/hw/i40iw/i40iw_cm.c INIT_WORK(&work->work, i40iw_disconnect_worker); work 3460 drivers/infiniband/hw/i40iw/i40iw_cm.c queue_work(cm_core->disconn_wq, &work->work); work 3619 drivers/infiniband/hw/i40iw/i40iw_cm.c static void i40iw_disconnect_worker(struct work_struct *work) work 3621 drivers/infiniband/hw/i40iw/i40iw_cm.c struct disconn_work *dwork = container_of(work, struct disconn_work, work); work 4168 drivers/infiniband/hw/i40iw/i40iw_cm.c static void i40iw_cm_event_handler(struct work_struct *work) work 4170 drivers/infiniband/hw/i40iw/i40iw_cm.c struct i40iw_cm_event *event = container_of(work, work 1731 drivers/infiniband/hw/i40iw/i40iw_main.c static void i40iw_l2params_worker(struct work_struct *work) work 1734 drivers/infiniband/hw/i40iw/i40iw_main.c container_of(work, struct l2params_work, work); work 1739 drivers/infiniband/hw/i40iw/i40iw_main.c kfree(work); work 1753 drivers/infiniband/hw/i40iw/i40iw_main.c struct l2params_work *work; work 1767 drivers/infiniband/hw/i40iw/i40iw_main.c work = kzalloc(sizeof(*work), GFP_KERNEL); work 1768 drivers/infiniband/hw/i40iw/i40iw_main.c if (!work) work 1773 drivers/infiniband/hw/i40iw/i40iw_main.c work->iwdev = iwdev; work 1774 drivers/infiniband/hw/i40iw/i40iw_main.c l2params = &work->l2params; work 1780 drivers/infiniband/hw/i40iw/i40iw_main.c INIT_WORK(&work->work, i40iw_l2params_worker); work 1781 drivers/infiniband/hw/i40iw/i40iw_main.c queue_work(iwdev->param_wq, &work->work); work 976 drivers/infiniband/hw/i40iw/i40iw_utils.c static void i40iw_cqp_generic_worker(struct work_struct *work) work 979 drivers/infiniband/hw/i40iw/i40iw_utils.c &((struct virtchnl_work *)work)->work_info; work 995 drivers/infiniband/hw/i40iw/i40iw_utils.c struct virtchnl_work *work; work 998 drivers/infiniband/hw/i40iw/i40iw_utils.c work = &iwdev->virtchnl_w[iw_vf_idx]; work 999 drivers/infiniband/hw/i40iw/i40iw_utils.c memcpy(&work->work_info, work_info, sizeof(*work_info)); work 1000 drivers/infiniband/hw/i40iw/i40iw_utils.c INIT_WORK(&work->work, i40iw_cqp_generic_worker); work 1001 drivers/infiniband/hw/i40iw/i40iw_utils.c queue_work(iwdev->virtchnl_wq, &work->work); work 1008 drivers/infiniband/hw/i40iw/i40iw_utils.c static void i40iw_cqp_manage_hmc_fcn_worker(struct work_struct *work) work 1011 drivers/infiniband/hw/i40iw/i40iw_utils.c ((struct virtchnl_work *)work)->cqp_request; work 1038 drivers/infiniband/hw/i40iw/i40iw_utils.c struct virtchnl_work *work; work 1048 drivers/infiniband/hw/i40iw/i40iw_utils.c work = &iwdev->virtchnl_w[hmcfcninfo->iw_vf_idx]; work 1049 drivers/infiniband/hw/i40iw/i40iw_utils.c work->cqp_request = cqp_request; work 1050 drivers/infiniband/hw/i40iw/i40iw_utils.c INIT_WORK(&work->work, i40iw_cqp_manage_hmc_fcn_worker); work 1051 drivers/infiniband/hw/i40iw/i40iw_utils.c queue_work(iwdev->virtchnl_wq, &work->work); work 120 drivers/infiniband/hw/i40iw/i40iw_verbs.h struct work_struct work; work 147 drivers/infiniband/hw/i40iw/i40iw_verbs.h struct work_struct work; work 744 drivers/infiniband/hw/mlx4/alias_GUID.c static void alias_guid_work(struct work_struct *work) work 746 drivers/infiniband/hw/mlx4/alias_GUID.c struct delayed_work *delay = to_delayed_work(work); work 167 drivers/infiniband/hw/mlx4/cm.c static void id_map_ent_timeout(struct work_struct *work) work 169 drivers/infiniband/hw/mlx4/cm.c struct delayed_work *delay = to_delayed_work(work); work 1185 drivers/infiniband/hw/mlx4/mad.c void handle_port_mgmt_change_event(struct work_struct *work) work 1187 drivers/infiniband/hw/mlx4/mad.c struct ib_event_work *ew = container_of(work, struct ib_event_work, work); work 1306 drivers/infiniband/hw/mlx4/mad.c queue_work(ctx->wq, &ctx->work); work 1732 drivers/infiniband/hw/mlx4/mad.c static void mlx4_ib_tunnel_comp_worker(struct work_struct *work) work 1738 drivers/infiniband/hw/mlx4/mad.c ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work); work 1896 drivers/infiniband/hw/mlx4/mad.c static void mlx4_ib_sqp_comp_worker(struct work_struct *work) work 1904 drivers/infiniband/hw/mlx4/mad.c ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work); work 2044 drivers/infiniband/hw/mlx4/mad.c INIT_WORK(&ctx->work, mlx4_ib_tunnel_comp_worker); work 2046 drivers/infiniband/hw/mlx4/mad.c INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker); work 2141 drivers/infiniband/hw/mlx4/mad.c void mlx4_ib_tunnels_update_work(struct work_struct *work) work 2145 drivers/infiniband/hw/mlx4/mad.c dmxw = container_of(work, struct mlx4_ib_demux_work, work); work 3081 drivers/infiniband/hw/mlx4/main.c INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work); work 3091 drivers/infiniband/hw/mlx4/main.c queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work); work 3164 drivers/infiniband/hw/mlx4/main.c static void handle_bonded_port_state_event(struct work_struct *work) work 3167 drivers/infiniband/hw/mlx4/main.c container_of(work, struct ib_event_work, work); work 3214 drivers/infiniband/hw/mlx4/main.c static void ib_sl2vl_update_work(struct work_struct *work) work 3216 drivers/infiniband/hw/mlx4/main.c struct ib_event_work *ew = container_of(work, struct ib_event_work, work); work 3232 drivers/infiniband/hw/mlx4/main.c INIT_WORK(&ew->work, ib_sl2vl_update_work); work 3235 drivers/infiniband/hw/mlx4/main.c queue_work(wq, &ew->work); work 3254 drivers/infiniband/hw/mlx4/main.c INIT_WORK(&ew->work, handle_bonded_port_state_event); work 3256 drivers/infiniband/hw/mlx4/main.c queue_work(wq, &ew->work); work 3298 drivers/infiniband/hw/mlx4/main.c INIT_WORK(&ew->work, handle_port_mgmt_change_event); work 3303 drivers/infiniband/hw/mlx4/main.c queue_work(wq, &ew->work); work 3305 drivers/infiniband/hw/mlx4/main.c handle_port_mgmt_change_event(&ew->work); work 108 drivers/infiniband/hw/mlx4/mcg.c struct work_struct work; work 539 drivers/infiniband/hw/mlx4/mcg.c static void mlx4_ib_mcg_timeout_handler(struct work_struct *work) work 541 drivers/infiniband/hw/mlx4/mcg.c struct delayed_work *delay = to_delayed_work(work); work 578 drivers/infiniband/hw/mlx4/mcg.c if (!queue_work(group->demux->mcg_wq, &group->work)) work 642 drivers/infiniband/hw/mlx4/mcg.c static void mlx4_ib_mcg_work_handler(struct work_struct *work) work 652 drivers/infiniband/hw/mlx4/mcg.c group = container_of(work, typeof(*group), work); work 837 drivers/infiniband/hw/mlx4/mcg.c INIT_WORK(&group->work, mlx4_ib_mcg_work_handler); work 878 drivers/infiniband/hw/mlx4/mcg.c if (!queue_work(group->demux->mcg_wq, &group->work)) work 914 drivers/infiniband/hw/mlx4/mcg.c if (!queue_work(ctx->mcg_wq, &group->work)) work 1114 drivers/infiniband/hw/mlx4/mcg.c struct work_struct work; work 1119 drivers/infiniband/hw/mlx4/mcg.c static void mcg_clean_task(struct work_struct *work) work 1121 drivers/infiniband/hw/mlx4/mcg.c struct clean_work *cw = container_of(work, struct clean_work, work); work 1130 drivers/infiniband/hw/mlx4/mcg.c struct clean_work *work; work 1143 drivers/infiniband/hw/mlx4/mcg.c work = kmalloc(sizeof *work, GFP_KERNEL); work 1144 drivers/infiniband/hw/mlx4/mcg.c if (!work) { work 1149 drivers/infiniband/hw/mlx4/mcg.c work->ctx = ctx; work 1150 drivers/infiniband/hw/mlx4/mcg.c work->destroy_wq = destroy_wq; work 1151 drivers/infiniband/hw/mlx4/mcg.c INIT_WORK(&work->work, mcg_clean_task); work 1152 drivers/infiniband/hw/mlx4/mcg.c queue_work(clean_wq, &work->work); work 422 drivers/infiniband/hw/mlx4/mlx4_ib.h struct work_struct work; work 460 drivers/infiniband/hw/mlx4/mlx4_ib.h struct work_struct work; work 623 drivers/infiniband/hw/mlx4/mlx4_ib.h struct work_struct work; work 840 drivers/infiniband/hw/mlx4/mlx4_ib.h void mlx4_ib_tunnels_update_work(struct work_struct *work); work 4461 drivers/infiniband/hw/mlx4/qp.c cancel_work_sync(&cq->work); work 902 drivers/infiniband/hw/mlx5/cq.c static void notify_soft_wc_handler(struct work_struct *work) work 904 drivers/infiniband/hw/mlx5/cq.c struct mlx5_ib_cq *cq = container_of(work, struct mlx5_ib_cq, work 86 drivers/infiniband/hw/mlx5/main.c struct work_struct work; work 4456 drivers/infiniband/hw/mlx5/main.c static void pkey_change_handler(struct work_struct *work) work 4459 drivers/infiniband/hw/mlx5/main.c container_of(work, struct mlx5_ib_port_resources, work 4526 drivers/infiniband/hw/mlx5/main.c static void delay_drop_handler(struct work_struct *work) work 4530 drivers/infiniband/hw/mlx5/main.c container_of(work, struct mlx5_ib_delay_drop, work 4609 drivers/infiniband/hw/mlx5/main.c struct mlx5_ib_event_work *work = work 4610 drivers/infiniband/hw/mlx5/main.c container_of(_work, struct mlx5_ib_event_work, work); work 4615 drivers/infiniband/hw/mlx5/main.c if (work->is_slave) { work 4616 drivers/infiniband/hw/mlx5/main.c ibdev = mlx5_ib_get_ibdev_from_mpi(work->mpi); work 4620 drivers/infiniband/hw/mlx5/main.c ibdev = work->dev; work 4623 drivers/infiniband/hw/mlx5/main.c switch (work->event) { work 4627 drivers/infiniband/hw/mlx5/main.c ibev.element.port_num = (u8)(unsigned long)work->param; work 4631 drivers/infiniband/hw/mlx5/main.c if (handle_port_change(ibdev, work->param, &ibev)) work 4635 drivers/infiniband/hw/mlx5/main.c handle_general_event(ibdev, work->param, &ibev); work 4654 drivers/infiniband/hw/mlx5/main.c kfree(work); work 4660 drivers/infiniband/hw/mlx5/main.c struct mlx5_ib_event_work *work; work 4662 drivers/infiniband/hw/mlx5/main.c work = kmalloc(sizeof(*work), GFP_ATOMIC); work 4663 drivers/infiniband/hw/mlx5/main.c if (!work) work 4666 drivers/infiniband/hw/mlx5/main.c INIT_WORK(&work->work, mlx5_ib_handle_event); work 4667 drivers/infiniband/hw/mlx5/main.c work->dev = container_of(nb, struct mlx5_ib_dev, mdev_events); work 4668 drivers/infiniband/hw/mlx5/main.c work->is_slave = false; work 4669 drivers/infiniband/hw/mlx5/main.c work->param = param; work 4670 drivers/infiniband/hw/mlx5/main.c work->event = event; work 4672 drivers/infiniband/hw/mlx5/main.c queue_work(mlx5_ib_event_wq, &work->work); work 4680 drivers/infiniband/hw/mlx5/main.c struct mlx5_ib_event_work *work; work 4682 drivers/infiniband/hw/mlx5/main.c work = kmalloc(sizeof(*work), GFP_ATOMIC); work 4683 drivers/infiniband/hw/mlx5/main.c if (!work) work 4686 drivers/infiniband/hw/mlx5/main.c INIT_WORK(&work->work, mlx5_ib_handle_event); work 4687 drivers/infiniband/hw/mlx5/main.c work->mpi = container_of(nb, struct mlx5_ib_multiport_info, mdev_events); work 4688 drivers/infiniband/hw/mlx5/main.c work->is_slave = true; work 4689 drivers/infiniband/hw/mlx5/main.c work->param = param; work 4690 drivers/infiniband/hw/mlx5/main.c work->event = event; work 4691 drivers/infiniband/hw/mlx5/main.c queue_work(mlx5_ib_event_wq, &work->work); work 685 drivers/infiniband/hw/mlx5/mlx5_ib.h struct work_struct work; work 940 drivers/infiniband/hw/mlx5/mlx5_ib.h struct work_struct work; work 379 drivers/infiniband/hw/mlx5/mr.c queue_work(cache->wq, &ent->work); work 399 drivers/infiniband/hw/mlx5/mr.c queue_work(cache->wq, &ent->work); work 406 drivers/infiniband/hw/mlx5/mr.c static void delayed_cache_work_func(struct work_struct *work) work 410 drivers/infiniband/hw/mlx5/mr.c ent = container_of(work, struct mlx5_cache_ent, dwork.work); work 414 drivers/infiniband/hw/mlx5/mr.c static void cache_work_func(struct work_struct *work) work 418 drivers/infiniband/hw/mlx5/mr.c ent = container_of(work, struct mlx5_cache_ent, work); work 452 drivers/infiniband/hw/mlx5/mr.c queue_work(cache->wq, &ent->work); work 487 drivers/infiniband/hw/mlx5/mr.c queue_work(cache->wq, &ent->work); work 492 drivers/infiniband/hw/mlx5/mr.c queue_work(cache->wq, &ent->work); work 519 drivers/infiniband/hw/mlx5/mr.c queue_work(cache->wq, &ent->work); work 532 drivers/infiniband/hw/mlx5/mr.c queue_work(cache->wq, &ent->work); work 631 drivers/infiniband/hw/mlx5/mr.c INIT_WORK(&ent->work, cache_work_func); work 652 drivers/infiniband/hw/mlx5/mr.c queue_work(cache->wq, &ent->work); work 77 drivers/infiniband/hw/mlx5/odp.c struct work_struct work; work 223 drivers/infiniband/hw/mlx5/odp.c static void mr_leaf_free_action(struct work_struct *work) work 225 drivers/infiniband/hw/mlx5/odp.c struct ib_umem_odp *odp = container_of(work, struct ib_umem_odp, work); work 324 drivers/infiniband/hw/mlx5/odp.c schedule_work(&umem_odp->work); work 514 drivers/infiniband/hw/mlx5/odp.c INIT_WORK(&odp->work, mr_leaf_free_action); work 601 drivers/infiniband/hw/mlx5/odp.c schedule_work(&umem_odp->work); work 1350 drivers/infiniband/hw/mlx5/odp.c static void mlx5_ib_eqe_pf_action(struct work_struct *work) work 1352 drivers/infiniband/hw/mlx5/odp.c struct mlx5_pagefault *pfault = container_of(work, work 1354 drivers/infiniband/hw/mlx5/odp.c work); work 1371 drivers/infiniband/hw/mlx5/odp.c schedule_work(&eq->work); work 1439 drivers/infiniband/hw/mlx5/odp.c INIT_WORK(&pfault->work, mlx5_ib_eqe_pf_action); work 1440 drivers/infiniband/hw/mlx5/odp.c queue_work(eq->wq, &pfault->work); work 1459 drivers/infiniband/hw/mlx5/odp.c schedule_work(&eq->work); work 1475 drivers/infiniband/hw/mlx5/odp.c static void mlx5_ib_eq_pf_action(struct work_struct *work) work 1478 drivers/infiniband/hw/mlx5/odp.c container_of(work, struct mlx5_ib_pf_eq, work); work 1498 drivers/infiniband/hw/mlx5/odp.c INIT_WORK(&eq->work, mlx5_ib_eq_pf_action); work 1549 drivers/infiniband/hw/mlx5/odp.c cancel_work_sync(&eq->work); work 1626 drivers/infiniband/hw/mlx5/odp.c struct work_struct work; work 1721 drivers/infiniband/hw/mlx5/odp.c static void mlx5_ib_prefetch_mr_work(struct work_struct *work) work 1724 drivers/infiniband/hw/mlx5/odp.c container_of(work, struct prefetch_mr_work, work); work 1743 drivers/infiniband/hw/mlx5/odp.c struct prefetch_mr_work *work; work 1754 drivers/infiniband/hw/mlx5/odp.c work = kvzalloc(struct_size(work, sg_list, num_sge), GFP_KERNEL); work 1755 drivers/infiniband/hw/mlx5/odp.c if (!work) work 1758 drivers/infiniband/hw/mlx5/odp.c memcpy(work->sg_list, sg_list, num_sge * sizeof(struct ib_sge)); work 1764 drivers/infiniband/hw/mlx5/odp.c work->pd = pd; work 1765 drivers/infiniband/hw/mlx5/odp.c work->pf_flags = pf_flags; work 1766 drivers/infiniband/hw/mlx5/odp.c work->num_sge = num_sge; work 1768 drivers/infiniband/hw/mlx5/odp.c INIT_WORK(&work->work, mlx5_ib_prefetch_mr_work); work 1774 drivers/infiniband/hw/mlx5/odp.c queue_work(system_unbound_wq, &work->work); work 1776 drivers/infiniband/hw/mlx5/odp.c kvfree(work); work 6423 drivers/infiniband/hw/mlx5/qp.c cancel_work_sync(&cq->work); work 59 drivers/infiniband/hw/mthca/mthca_catas.c static void catas_reset(struct work_struct *work) work 434 drivers/infiniband/hw/mthca/mthca_eq.c int work = 0; work 442 drivers/infiniband/hw/mthca/mthca_eq.c work = 1; work 449 drivers/infiniband/hw/mthca/mthca_eq.c return IRQ_RETVAL(work); work 78 drivers/infiniband/hw/ocrdma/ocrdma.h void ocrdma_eqd_set_task(struct work_struct *work); work 3160 drivers/infiniband/hw/ocrdma/ocrdma_hw.c void ocrdma_eqd_set_task(struct work_struct *work) work 3163 drivers/infiniband/hw/ocrdma/ocrdma_hw.c container_of(work, struct ocrdma_dev, eqd_work.work); work 193 drivers/infiniband/hw/qedr/qedr_iw_cm.c struct work_struct work; work 199 drivers/infiniband/hw/qedr/qedr_iw_cm.c static void qedr_iw_disconnect_worker(struct work_struct *work) work 202 drivers/infiniband/hw/qedr/qedr_iw_cm.c container_of(work, struct qedr_discon_work, work); work 248 drivers/infiniband/hw/qedr/qedr_iw_cm.c struct qedr_discon_work *work; work 252 drivers/infiniband/hw/qedr/qedr_iw_cm.c work = kzalloc(sizeof(*work), GFP_ATOMIC); work 253 drivers/infiniband/hw/qedr/qedr_iw_cm.c if (!work) work 262 drivers/infiniband/hw/qedr/qedr_iw_cm.c work->ep = ep; work 263 drivers/infiniband/hw/qedr/qedr_iw_cm.c work->event = params->event; work 264 drivers/infiniband/hw/qedr/qedr_iw_cm.c work->status = params->status; work 266 drivers/infiniband/hw/qedr/qedr_iw_cm.c INIT_WORK(&work->work, qedr_iw_disconnect_worker); work 267 drivers/infiniband/hw/qedr/qedr_iw_cm.c queue_work(dev->iwarp_wq, &work->work); work 2824 drivers/infiniband/hw/qedr/verbs.c int work = info->completed - info->completed_handled - 1; work 2826 drivers/infiniband/hw/qedr/verbs.c DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work); work 2827 drivers/infiniband/hw/qedr/verbs.c while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) { work 3513 drivers/infiniband/hw/qib/qib_iba7220.c static void autoneg_7220_work(struct work_struct *work) work 3520 drivers/infiniband/hw/qib/qib_iba7220.c ppd = &container_of(work, struct qib_chippport_specific, work 3521 drivers/infiniband/hw/qib/qib_iba7220.c autoneg_work.work)->pportdata; work 2998 drivers/infiniband/hw/qib/qib_iba7322.c queue_work(ib_wq, &qd->work); work 5366 drivers/infiniband/hw/qib/qib_iba7322.c static void autoneg_7322_work(struct work_struct *work) work 5372 drivers/infiniband/hw/qib/qib_iba7322.c ppd = container_of(work, struct qib_chippport_specific, work 5373 drivers/infiniband/hw/qib/qib_iba7322.c autoneg_work.work)->ppd; work 5489 drivers/infiniband/hw/qib/qib_iba7322.c static void ipg_7322_work(struct work_struct *work) work 5493 drivers/infiniband/hw/qib/qib_iba7322.c ppd = container_of(work, struct qib_chippport_specific, work 5494 drivers/infiniband/hw/qib/qib_iba7322.c ipg_work.work)->ppd; work 5587 drivers/infiniband/hw/qib/qib_iba7322.c queue_work(ib_wq, &qd->work); work 5935 drivers/infiniband/hw/qib/qib_iba7322.c static void qsfp_7322_event(struct work_struct *work) work 5944 drivers/infiniband/hw/qib/qib_iba7322.c qd = container_of(work, struct qib_qsfp_data, work); work 469 drivers/infiniband/hw/qib/qib_qsfp.c INIT_WORK(&qd->work, fevent); work 178 drivers/infiniband/hw/qib/qib_qsfp.h struct work_struct work; work 246 drivers/infiniband/hw/qib/qib_ruc.c void _qib_do_send(struct work_struct *work) work 248 drivers/infiniband/hw/qib/qib_ruc.c struct qib_qp_priv *priv = container_of(work, struct qib_qp_priv, work 330 drivers/infiniband/hw/qib/qib_verbs.h void _qib_do_send(struct work_struct *work); work 73 drivers/infiniband/hw/usnic/usnic_uiom.h struct work_struct work; work 253 drivers/infiniband/hw/vmw_pvrdma/pvrdma.h struct work_struct work; work 739 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c static void pvrdma_netdevice_event_work(struct work_struct *work) work 744 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c netdev_work = container_of(work, struct pvrdma_netdevice_work, work); work 771 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c INIT_WORK(&netdev_work->work, pvrdma_netdevice_event_work); work 774 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c queue_work(event_wq, &netdev_work->work); work 161 drivers/infiniband/sw/rdmavt/cq.c static void send_complete(struct work_struct *work) work 163 drivers/infiniband/sw/rdmavt/cq.c struct rvt_cq *cq = container_of(work, struct rvt_cq, comptask); work 805 drivers/infiniband/sw/rxe/rxe_qp.c static void rxe_qp_do_cleanup(struct work_struct *work) work 807 drivers/infiniband/sw/rxe/rxe_qp.c struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work); work 190 drivers/infiniband/sw/siw/siw_cm.c struct siw_cm_work *work; work 193 drivers/infiniband/sw/siw/siw_cm.c work = list_entry(w, struct siw_cm_work, list); work 194 drivers/infiniband/sw/siw/siw_cm.c list_del(&work->list); work 195 drivers/infiniband/sw/siw/siw_cm.c kfree(work); work 203 drivers/infiniband/sw/siw/siw_cm.c if (cancel_delayed_work(&cep->mpa_timer->work)) { work 212 drivers/infiniband/sw/siw/siw_cm.c static void siw_put_work(struct siw_cm_work *work) work 214 drivers/infiniband/sw/siw/siw_cm.c INIT_LIST_HEAD(&work->list); work 215 drivers/infiniband/sw/siw/siw_cm.c spin_lock_bh(&work->cep->lock); work 216 drivers/infiniband/sw/siw/siw_cm.c list_add(&work->list, &work->cep->work_freelist); work 217 drivers/infiniband/sw/siw/siw_cm.c spin_unlock_bh(&work->cep->lock); work 274 drivers/infiniband/sw/siw/siw_cm.c struct siw_cm_work *work = NULL; work 278 drivers/infiniband/sw/siw/siw_cm.c work = list_entry(cep->work_freelist.next, struct siw_cm_work, work 280 drivers/infiniband/sw/siw/siw_cm.c list_del_init(&work->list); work 283 drivers/infiniband/sw/siw/siw_cm.c return work; work 288 drivers/infiniband/sw/siw/siw_cm.c struct siw_cm_work *work; work 291 drivers/infiniband/sw/siw/siw_cm.c work = kmalloc(sizeof(*work), GFP_KERNEL); work 292 drivers/infiniband/sw/siw/siw_cm.c if (!work) { work 297 drivers/infiniband/sw/siw/siw_cm.c work->cep = cep; work 298 drivers/infiniband/sw/siw/siw_cm.c INIT_LIST_HEAD(&work->list); work 299 drivers/infiniband/sw/siw/siw_cm.c list_add(&work->list, &cep->work_freelist); work 1004 drivers/infiniband/sw/siw/siw_cm.c struct siw_cm_work *work; work 1008 drivers/infiniband/sw/siw/siw_cm.c work = container_of(w, struct siw_cm_work, work.work); work 1009 drivers/infiniband/sw/siw/siw_cm.c cep = work->cep; work 1013 drivers/infiniband/sw/siw/siw_cm.c work->type, cep->state); work 1017 drivers/infiniband/sw/siw/siw_cm.c switch (work->type) { work 1143 drivers/infiniband/sw/siw/siw_cm.c WARN(1, "Undefined CM work type: %d\n", work->type); work 1183 drivers/infiniband/sw/siw/siw_cm.c siw_put_work(work); work 1191 drivers/infiniband/sw/siw/siw_cm.c struct siw_cm_work *work = siw_get_work(cep); work 1194 drivers/infiniband/sw/siw/siw_cm.c if (!work) { work 1198 drivers/infiniband/sw/siw/siw_cm.c work->type = type; work 1199 drivers/infiniband/sw/siw/siw_cm.c work->cep = cep; work 1203 drivers/infiniband/sw/siw/siw_cm.c INIT_DELAYED_WORK(&work->work, siw_cm_work_handler); work 1206 drivers/infiniband/sw/siw/siw_cm.c cep->mpa_timer = work; work 1216 drivers/infiniband/sw/siw/siw_cm.c queue_delayed_work(siw_cm_wq, &work->work, delay); work 86 drivers/infiniband/sw/siw/siw_cm.h struct delayed_work work; work 436 drivers/infiniband/sw/siw/siw_main.c static void siw_netdev_down(struct work_struct *work) work 439 drivers/infiniband/sw/siw/siw_main.c container_of(work, struct siw_device, netdev_down); work 312 drivers/infiniband/ulp/ipoib/ipoib.h struct work_struct work; work 500 drivers/infiniband/ulp/ipoib/ipoib.h void ipoib_reap_ah(struct work_struct *work); work 510 drivers/infiniband/ulp/ipoib/ipoib.h void ipoib_ib_dev_flush_light(struct work_struct *work); work 511 drivers/infiniband/ulp/ipoib/ipoib.h void ipoib_ib_dev_flush_normal(struct work_struct *work); work 512 drivers/infiniband/ulp/ipoib/ipoib.h void ipoib_ib_dev_flush_heavy(struct work_struct *work); work 513 drivers/infiniband/ulp/ipoib/ipoib.h void ipoib_pkey_event(struct work_struct *work); work 524 drivers/infiniband/ulp/ipoib/ipoib.h void ipoib_mcast_join_task(struct work_struct *work); work 525 drivers/infiniband/ulp/ipoib/ipoib.h void ipoib_mcast_carrier_on_task(struct work_struct *work); work 528 drivers/infiniband/ulp/ipoib/ipoib.h void ipoib_mcast_restart_task(struct work_struct *work); work 1341 drivers/infiniband/ulp/ipoib/ipoib_cm.c static void ipoib_cm_tx_start(struct work_struct *work) work 1343 drivers/infiniband/ulp/ipoib/ipoib_cm.c struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, work 1401 drivers/infiniband/ulp/ipoib/ipoib_cm.c static void ipoib_cm_tx_reap(struct work_struct *work) work 1403 drivers/infiniband/ulp/ipoib/ipoib_cm.c struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, work 1426 drivers/infiniband/ulp/ipoib/ipoib_cm.c static void ipoib_cm_skb_reap(struct work_struct *work) work 1428 drivers/infiniband/ulp/ipoib/ipoib_cm.c struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, work 1475 drivers/infiniband/ulp/ipoib/ipoib_cm.c static void ipoib_cm_rx_reap(struct work_struct *work) work 1477 drivers/infiniband/ulp/ipoib/ipoib_cm.c ipoib_cm_free_rx_reap_list(container_of(work, struct ipoib_dev_priv, work 1481 drivers/infiniband/ulp/ipoib/ipoib_cm.c static void ipoib_cm_stale_task(struct work_struct *work) work 1483 drivers/infiniband/ulp/ipoib/ipoib_cm.c struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, work 1484 drivers/infiniband/ulp/ipoib/ipoib_cm.c cm.stale_task.work); work 345 drivers/infiniband/ulp/ipoib/ipoib_ib.c static void ipoib_qp_state_validate_work(struct work_struct *work) work 348 drivers/infiniband/ulp/ipoib/ipoib_ib.c container_of(work, struct ipoib_qp_state_validate, work); work 428 drivers/infiniband/ulp/ipoib/ipoib_ib.c INIT_WORK(&qp_work->work, ipoib_qp_state_validate_work); work 430 drivers/infiniband/ulp/ipoib/ipoib_ib.c queue_work(priv->wq, &qp_work->work); work 693 drivers/infiniband/ulp/ipoib/ipoib_ib.c void ipoib_reap_ah(struct work_struct *work) work 696 drivers/infiniband/ulp/ipoib/ipoib_ib.c container_of(work, struct ipoib_dev_priv, ah_reap_task.work); work 712 drivers/infiniband/ulp/ipoib/ipoib_ib.c ipoib_reap_ah(&priv->ah_reap_task.work); work 1264 drivers/infiniband/ulp/ipoib/ipoib_ib.c void ipoib_ib_dev_flush_light(struct work_struct *work) work 1267 drivers/infiniband/ulp/ipoib/ipoib_ib.c container_of(work, struct ipoib_dev_priv, flush_light); work 1272 drivers/infiniband/ulp/ipoib/ipoib_ib.c void ipoib_ib_dev_flush_normal(struct work_struct *work) work 1275 drivers/infiniband/ulp/ipoib/ipoib_ib.c container_of(work, struct ipoib_dev_priv, flush_normal); work 1280 drivers/infiniband/ulp/ipoib/ipoib_ib.c void ipoib_ib_dev_flush_heavy(struct work_struct *work) work 1283 drivers/infiniband/ulp/ipoib/ipoib_ib.c container_of(work, struct ipoib_dev_priv, flush_heavy); work 1352 drivers/infiniband/ulp/ipoib/ipoib_main.c static void ipoib_reap_neigh(struct work_struct *work) work 1355 drivers/infiniband/ulp/ipoib/ipoib_main.c container_of(work, struct ipoib_dev_priv, neigh_reap_task.work); work 326 drivers/infiniband/ulp/ipoib/ipoib_multicast.c void ipoib_mcast_carrier_on_task(struct work_struct *work) work 328 drivers/infiniband/ulp/ipoib/ipoib_multicast.c struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, work 565 drivers/infiniband/ulp/ipoib/ipoib_multicast.c void ipoib_mcast_join_task(struct work_struct *work) work 568 drivers/infiniband/ulp/ipoib/ipoib_multicast.c container_of(work, struct ipoib_dev_priv, mcast_task.work); work 882 drivers/infiniband/ulp/ipoib/ipoib_multicast.c void ipoib_mcast_restart_task(struct work_struct *work) work 885 drivers/infiniband/ulp/ipoib/ipoib_multicast.c container_of(work, struct ipoib_dev_priv, restart_task); work 207 drivers/infiniband/ulp/ipoib/ipoib_vlan.c struct work_struct work; work 221 drivers/infiniband/ulp/ipoib/ipoib_vlan.c static void ipoib_vlan_delete_task(struct work_struct *work) work 224 drivers/infiniband/ulp/ipoib/ipoib_vlan.c container_of(work, struct ipoib_vlan_delete_work, work); work 265 drivers/infiniband/ulp/ipoib/ipoib_vlan.c struct ipoib_vlan_delete_work *work; work 267 drivers/infiniband/ulp/ipoib/ipoib_vlan.c work = kmalloc(sizeof(*work), GFP_KERNEL); work 268 drivers/infiniband/ulp/ipoib/ipoib_vlan.c if (!work) { work 276 drivers/infiniband/ulp/ipoib/ipoib_vlan.c work->dev = priv->dev; work 277 drivers/infiniband/ulp/ipoib/ipoib_vlan.c INIT_WORK(&work->work, ipoib_vlan_delete_task); work 278 drivers/infiniband/ulp/ipoib/ipoib_vlan.c queue_work(ipoib_workqueue, &work->work); work 574 drivers/infiniband/ulp/iser/iscsi_iser.h void iser_release_work(struct work_struct *work); work 506 drivers/infiniband/ulp/iser/iser_verbs.c void iser_release_work(struct work_struct *work) work 510 drivers/infiniband/ulp/iser/iser_verbs.c iser_conn = container_of(work, struct iser_conn, release_work); work 50 drivers/infiniband/ulp/isert/ib_isert.c static void isert_release_work(struct work_struct *work); work 1740 drivers/infiniband/ulp/isert/ib_isert.c isert_do_control_comp(struct work_struct *work) work 1742 drivers/infiniband/ulp/isert/ib_isert.c struct isert_cmd *isert_cmd = container_of(work, work 2548 drivers/infiniband/ulp/isert/ib_isert.c static void isert_release_work(struct work_struct *work) work 2550 drivers/infiniband/ulp/isert/ib_isert.c struct isert_conn *isert_conn = container_of(work, work 1133 drivers/infiniband/ulp/srp/ib_srp.c static void srp_remove_work(struct work_struct *work) work 1136 drivers/infiniband/ulp/srp/ib_srp.c container_of(work, struct srp_target_port, remove_work); work 2315 drivers/infiniband/ulp/srp/ib_srp.c static void srp_tl_err_work(struct work_struct *work) work 2319 drivers/infiniband/ulp/srp/ib_srp.c target = container_of(work, struct srp_target_port, tl_err_work); work 173 drivers/infiniband/ulp/srpt/ib_srpt.c schedule_work(&sport->work); work 2926 drivers/infiniband/ulp/srpt/ib_srpt.c static void srpt_refresh_port_work(struct work_struct *work) work 2928 drivers/infiniband/ulp/srpt/ib_srpt.c struct srpt_port *sport = container_of(work, struct srpt_port, work); work 3182 drivers/infiniband/ulp/srpt/ib_srpt.c INIT_WORK(&sport->work, srpt_refresh_port_work); work 3238 drivers/infiniband/ulp/srpt/ib_srpt.c cancel_work_sync(&sdev->port[i].work); work 395 drivers/infiniband/ulp/srpt/ib_srpt.h struct work_struct work; work 318 drivers/input/gameport/gameport.c static void gameport_handle_events(struct work_struct *work) work 29 drivers/input/input-polldev.c queue_delayed_work(system_freezable_wq, &dev->work, delay); work 32 drivers/input/input-polldev.c static void input_polled_device_work(struct work_struct *work) work 35 drivers/input/input-polldev.c container_of(work, struct input_polled_dev, work.work); work 61 drivers/input/input-polldev.c cancel_delayed_work_sync(&dev->work); work 101 drivers/input/input-polldev.c cancel_delayed_work_sync(&polldev->work); work 307 drivers/input/input-polldev.c INIT_DELAYED_WORK(&dev->work, input_polled_device_work); work 23 drivers/input/input-poller.c struct delayed_work work; work 34 drivers/input/input-poller.c queue_delayed_work(system_freezable_wq, &poller->work, delay); work 37 drivers/input/input-poller.c static void input_dev_poller_work(struct work_struct *work) work 40 drivers/input/input-poller.c container_of(work, struct input_dev_poller, work.work); work 65 drivers/input/input-poller.c cancel_delayed_work_sync(&poller->work); work 85 drivers/input/input-poller.c INIT_DELAYED_WORK(&poller->work, input_dev_poller_work); work 161 drivers/input/input-poller.c cancel_delayed_work_sync(&poller->work); work 589 drivers/input/joystick/xpad.c struct work_struct work; /* init/remove device from callback */ work 748 drivers/input/joystick/xpad.c static void xpad_presence_work(struct work_struct *work) work 750 drivers/input/joystick/xpad.c struct usb_xpad *xpad = container_of(work, struct usb_xpad, work); work 798 drivers/input/joystick/xpad.c schedule_work(&xpad->work); work 1560 drivers/input/joystick/xpad.c flush_work(&xpad->work); work 1754 drivers/input/joystick/xpad.c INIT_WORK(&xpad->work, xpad_presence_work); work 43 drivers/input/keyboard/adp5588-keys.c struct delayed_work work; work 292 drivers/input/keyboard/adp5588-keys.c static void adp5588_work(struct work_struct *work) work 294 drivers/input/keyboard/adp5588-keys.c struct adp5588_kpad *kpad = container_of(work, work 295 drivers/input/keyboard/adp5588-keys.c struct adp5588_kpad, work.work); work 324 drivers/input/keyboard/adp5588-keys.c schedule_delayed_work(&kpad->work, kpad->delay); work 508 drivers/input/keyboard/adp5588-keys.c INIT_DELAYED_WORK(&kpad->work, adp5588_work); work 590 drivers/input/keyboard/adp5588-keys.c cancel_delayed_work_sync(&kpad->work); work 607 drivers/input/keyboard/adp5588-keys.c cancel_delayed_work_sync(&kpad->work); work 622 drivers/input/keyboard/adp5588-keys.c cancel_delayed_work_sync(&kpad->work); work 414 drivers/input/keyboard/applespi.c struct work_struct work; work 1322 drivers/input/keyboard/applespi.c static void applespi_worker(struct work_struct *work) work 1325 drivers/input/keyboard/applespi.c container_of(work, struct applespi_data, work); work 1341 drivers/input/keyboard/applespi.c schedule_work(&applespi->work); work 1647 drivers/input/keyboard/applespi.c INIT_WORK(&applespi->work, applespi_worker); work 588 drivers/input/keyboard/atkbd.c static void atkbd_event_work(struct work_struct *work) work 590 drivers/input/keyboard/atkbd.c struct atkbd *atkbd = container_of(work, struct atkbd, event_work.work); work 42 drivers/input/keyboard/gpio_keys.c struct delayed_work work; work 147 drivers/input/keyboard/gpio_keys.c cancel_delayed_work_sync(&bdata->work); work 379 drivers/input/keyboard/gpio_keys.c static void gpio_keys_gpio_work_func(struct work_struct *work) work 382 drivers/input/keyboard/gpio_keys.c container_of(work, struct gpio_button_data, work.work); work 412 drivers/input/keyboard/gpio_keys.c &bdata->work, work 472 drivers/input/keyboard/gpio_keys.c cancel_delayed_work_sync(&bdata->work); work 564 drivers/input/keyboard/gpio_keys.c INIT_DELAYED_WORK(&bdata->work, gpio_keys_gpio_work_func); work 566 drivers/input/keyboard/lkkbd.c static void lkkbd_reinit(struct work_struct *work) work 568 drivers/input/keyboard/lkkbd.c struct lkkbd *lk = container_of(work, struct lkkbd, tq); work 128 drivers/input/keyboard/lm8323.c struct work_struct work; work 153 drivers/input/keyboard/lm8323.c #define work_to_pwm(w) container_of(w, struct lm8323_pwm, work) work 356 drivers/input/keyboard/lm8323.c schedule_work(&pwm->work); work 441 drivers/input/keyboard/lm8323.c static void lm8323_pwm_work(struct work_struct *work) work 443 drivers/input/keyboard/lm8323.c struct lm8323_pwm *pwm = work_to_pwm(work); work 508 drivers/input/keyboard/lm8323.c schedule_work(&pwm->work); work 515 drivers/input/keyboard/lm8323.c schedule_work(&pwm->work); work 517 drivers/input/keyboard/lm8323.c lm8323_pwm_work(&pwm->work); work 570 drivers/input/keyboard/lm8323.c INIT_WORK(&pwm->work, lm8323_pwm_work); work 33 drivers/input/keyboard/matrix_keypad.c struct delayed_work work; work 114 drivers/input/keyboard/matrix_keypad.c static void matrix_keypad_scan(struct work_struct *work) work 117 drivers/input/keyboard/matrix_keypad.c container_of(work, struct matrix_keypad, work.work); work 189 drivers/input/keyboard/matrix_keypad.c schedule_delayed_work(&keypad->work, work 208 drivers/input/keyboard/matrix_keypad.c schedule_delayed_work(&keypad->work, 0); work 221 drivers/input/keyboard/matrix_keypad.c flush_delayed_work(&keypad->work); work 508 drivers/input/keyboard/matrix_keypad.c INIT_DELAYED_WORK(&keypad->work, matrix_keypad_scan); work 212 drivers/input/keyboard/qt2160.c static void qt2160_worker(struct work_struct *work) work 215 drivers/input/keyboard/qt2160.c container_of(work, struct qt2160_data, dwork.work); work 207 drivers/input/keyboard/sunkbd.c static void sunkbd_reinit(struct work_struct *work) work 209 drivers/input/keyboard/sunkbd.c struct sunkbd *sunkbd = container_of(work, struct sunkbd, tq); work 132 drivers/input/keyboard/tca6416-keypad.c static void tca6416_keys_work_func(struct work_struct *work) work 135 drivers/input/keyboard/tca6416-keypad.c container_of(work, struct tca6416_keypad_chip, dwork.work); work 25 drivers/input/misc/arizona-haptics.c struct work_struct work; work 31 drivers/input/misc/arizona-haptics.c static void arizona_haptics_work(struct work_struct *work) work 33 drivers/input/misc/arizona-haptics.c struct arizona_haptics *haptics = container_of(work, work 35 drivers/input/misc/arizona-haptics.c work); work 134 drivers/input/misc/arizona-haptics.c schedule_work(&haptics->work); work 144 drivers/input/misc/arizona-haptics.c cancel_work_sync(&haptics->work); work 172 drivers/input/misc/arizona-haptics.c INIT_WORK(&haptics->work, arizona_haptics_work); work 21 drivers/input/misc/da9052_onkey.c struct delayed_work work; work 49 drivers/input/misc/da9052_onkey.c schedule_delayed_work(&onkey->work, work 54 drivers/input/misc/da9052_onkey.c static void da9052_onkey_work(struct work_struct *work) work 56 drivers/input/misc/da9052_onkey.c struct da9052_onkey *onkey = container_of(work, struct da9052_onkey, work 57 drivers/input/misc/da9052_onkey.c work.work); work 93 drivers/input/misc/da9052_onkey.c INIT_DELAYED_WORK(&onkey->work, da9052_onkey_work); work 122 drivers/input/misc/da9052_onkey.c cancel_delayed_work_sync(&onkey->work); work 135 drivers/input/misc/da9052_onkey.c cancel_delayed_work_sync(&onkey->work); work 20 drivers/input/misc/da9055_onkey.c struct delayed_work work; work 47 drivers/input/misc/da9055_onkey.c schedule_delayed_work(&onkey->work, msecs_to_jiffies(10)); work 51 drivers/input/misc/da9055_onkey.c static void da9055_onkey_work(struct work_struct *work) work 53 drivers/input/misc/da9055_onkey.c struct da9055_onkey *onkey = container_of(work, struct da9055_onkey, work 54 drivers/input/misc/da9055_onkey.c work.work); work 103 drivers/input/misc/da9055_onkey.c INIT_DELAYED_WORK(&onkey->work, da9055_onkey_work); work 128 drivers/input/misc/da9055_onkey.c cancel_delayed_work_sync(&onkey->work); work 142 drivers/input/misc/da9055_onkey.c cancel_delayed_work_sync(&onkey->work); work 36 drivers/input/misc/da9063_onkey.c struct delayed_work work; work 82 drivers/input/misc/da9063_onkey.c static void da9063_poll_on(struct work_struct *work) work 84 drivers/input/misc/da9063_onkey.c struct da9063_onkey *onkey = container_of(work, work 86 drivers/input/misc/da9063_onkey.c work.work); work 156 drivers/input/misc/da9063_onkey.c schedule_delayed_work(&onkey->work, msecs_to_jiffies(50)); work 172 drivers/input/misc/da9063_onkey.c schedule_delayed_work(&onkey->work, 0); work 189 drivers/input/misc/da9063_onkey.c cancel_delayed_work_sync(&onkey->work); work 237 drivers/input/misc/da9063_onkey.c INIT_DELAYED_WORK(&onkey->work, da9063_poll_on); work 186 drivers/input/misc/drv260x.c struct work_struct work; work 248 drivers/input/misc/drv260x.c static void drv260x_worker(struct work_struct *work) work 250 drivers/input/misc/drv260x.c struct drv260x_data *haptics = container_of(work, struct drv260x_data, work); work 285 drivers/input/misc/drv260x.c schedule_work(&haptics->work); work 295 drivers/input/misc/drv260x.c cancel_work_sync(&haptics->work); work 549 drivers/input/misc/drv260x.c INIT_WORK(&haptics->work, drv260x_worker); work 57 drivers/input/misc/drv2665.c struct work_struct work; work 76 drivers/input/misc/drv2665.c static void drv2665_worker(struct work_struct *work) work 79 drivers/input/misc/drv2665.c container_of(work, struct drv2665_data, work); work 108 drivers/input/misc/drv2665.c schedule_work(&haptics->work); work 118 drivers/input/misc/drv2665.c cancel_work_sync(&haptics->work); work 197 drivers/input/misc/drv2665.c INIT_WORK(&haptics->work, drv2665_worker); work 104 drivers/input/misc/drv2667.c struct work_struct work; work 180 drivers/input/misc/drv2667.c static void drv2667_worker(struct work_struct *work) work 182 drivers/input/misc/drv2667.c struct drv2667_data *haptics = container_of(work, struct drv2667_data, work); work 238 drivers/input/misc/drv2667.c schedule_work(&haptics->work); work 248 drivers/input/misc/drv2667.c cancel_work_sync(&haptics->work); work 372 drivers/input/misc/drv2667.c INIT_WORK(&haptics->work, drv2667_worker); work 18 drivers/input/misc/gpio-beeper.c struct work_struct work; work 28 drivers/input/misc/gpio-beeper.c static void gpio_beeper_work(struct work_struct *work) work 30 drivers/input/misc/gpio-beeper.c struct gpio_beeper *beep = container_of(work, struct gpio_beeper, work); work 48 drivers/input/misc/gpio-beeper.c schedule_work(&beep->work); work 57 drivers/input/misc/gpio-beeper.c cancel_work_sync(&beep->work); work 78 drivers/input/misc/gpio-beeper.c INIT_WORK(&beep->work, gpio_beeper_work); work 66 drivers/input/misc/gpio-vibra.c static void gpio_vibrator_play_work(struct work_struct *work) work 69 drivers/input/misc/gpio-vibra.c container_of(work, struct gpio_vibrator, play_work); work 64 drivers/input/misc/max77693-haptic.c struct work_struct work; work 214 drivers/input/misc/max77693-haptic.c static void max77693_haptic_play_work(struct work_struct *work) work 217 drivers/input/misc/max77693-haptic.c container_of(work, struct max77693_haptic, work); work 253 drivers/input/misc/max77693-haptic.c schedule_work(&haptic->work); work 282 drivers/input/misc/max77693-haptic.c cancel_work_sync(&haptic->work); work 324 drivers/input/misc/max77693-haptic.c INIT_WORK(&haptic->work, max77693_haptic_play_work); work 38 drivers/input/misc/max8997_haptic.c struct work_struct work; work 206 drivers/input/misc/max8997_haptic.c static void max8997_haptic_play_effect_work(struct work_struct *work) work 209 drivers/input/misc/max8997_haptic.c container_of(work, struct max8997_haptic, work); work 226 drivers/input/misc/max8997_haptic.c schedule_work(&chip->work); work 235 drivers/input/misc/max8997_haptic.c cancel_work_sync(&chip->work); work 265 drivers/input/misc/max8997_haptic.c INIT_WORK(&chip->work, max8997_haptic_play_effect_work); work 115 drivers/input/misc/msm-vibrator.c static void msm_vibrator_worker(struct work_struct *work) work 117 drivers/input/misc/msm-vibrator.c struct msm_vibrator *vibrator = container_of(work, work 60 drivers/input/misc/palmas-pwrbutton.c static void palmas_power_button_work(struct work_struct *work) work 62 drivers/input/misc/palmas-pwrbutton.c struct palmas_pwron *pwron = container_of(work, work 64 drivers/input/misc/palmas-pwrbutton.c input_work.work); work 60 drivers/input/misc/pm8xxx-vibrator.c struct work_struct work; work 102 drivers/input/misc/pm8xxx-vibrator.c static void pm8xxx_work_handler(struct work_struct *work) work 104 drivers/input/misc/pm8xxx-vibrator.c struct pm8xxx_vib *vib = container_of(work, struct pm8xxx_vib, work); work 140 drivers/input/misc/pm8xxx-vibrator.c cancel_work_sync(&vib->work); work 162 drivers/input/misc/pm8xxx-vibrator.c schedule_work(&vib->work); work 187 drivers/input/misc/pm8xxx-vibrator.c INIT_WORK(&vib->work, pm8xxx_work_handler); work 22 drivers/input/misc/pwm-beeper.c struct work_struct work; work 69 drivers/input/misc/pwm-beeper.c static void pwm_beeper_work(struct work_struct *work) work 71 drivers/input/misc/pwm-beeper.c struct pwm_beeper *beeper = container_of(work, struct pwm_beeper, work); work 104 drivers/input/misc/pwm-beeper.c schedule_work(&beeper->work); work 111 drivers/input/misc/pwm-beeper.c cancel_work_sync(&beeper->work); work 162 drivers/input/misc/pwm-beeper.c INIT_WORK(&beeper->work, pwm_beeper_work); work 233 drivers/input/misc/pwm-beeper.c schedule_work(&beeper->work); work 89 drivers/input/misc/pwm-vibra.c static void pwm_vibrator_play_work(struct work_struct *work) work 91 drivers/input/misc/pwm-vibra.c struct pwm_vibrator *vibrator = container_of(work, work 25 drivers/input/misc/regulator-haptic.c struct work_struct work; work 81 drivers/input/misc/regulator-haptic.c static void regulator_haptic_work(struct work_struct *work) work 83 drivers/input/misc/regulator-haptic.c struct regulator_haptic *haptic = container_of(work, work 84 drivers/input/misc/regulator-haptic.c struct regulator_haptic, work); work 103 drivers/input/misc/regulator-haptic.c schedule_work(&haptic->work); work 112 drivers/input/misc/regulator-haptic.c cancel_work_sync(&haptic->work); work 157 drivers/input/misc/regulator-haptic.c INIT_WORK(&haptic->work, regulator_haptic_work); work 47 drivers/input/misc/sc27xx-vibra.c static void sc27xx_vibra_play_work(struct work_struct *work) work 49 drivers/input/misc/sc27xx-vibra.c struct vibra_info *info = container_of(work, struct vibra_info, work 21 drivers/input/misc/sirfsoc-onkey.c struct delayed_work work; work 38 drivers/input/misc/sirfsoc-onkey.c static void sirfsoc_pwrc_report_event(struct work_struct *work) work 41 drivers/input/misc/sirfsoc-onkey.c container_of(work, struct sirfsoc_pwrc_drvdata, work.work); work 44 drivers/input/misc/sirfsoc-onkey.c schedule_delayed_work(&pwrcdrv->work, work 64 drivers/input/misc/sirfsoc-onkey.c schedule_delayed_work(&pwrcdrv->work, work 97 drivers/input/misc/sirfsoc-onkey.c cancel_delayed_work_sync(&pwrcdrv->work); work 140 drivers/input/misc/sirfsoc-onkey.c INIT_DELAYED_WORK(&pwrcdrv->work, sirfsoc_pwrc_report_event); work 85 drivers/input/misc/twl4030-vibra.c static void vibra_play_work(struct work_struct *work) work 87 drivers/input/misc/twl4030-vibra.c struct vibra_info *info = container_of(work, work 166 drivers/input/misc/twl6040-vibra.c static void vibra_play_work(struct work_struct *work) work 168 drivers/input/misc/twl6040-vibra.c struct vibra_info *info = container_of(work, work 32 drivers/input/misc/wm831x-on.c struct delayed_work work; work 40 drivers/input/misc/wm831x-on.c static void wm831x_poll_on(struct work_struct *work) work 42 drivers/input/misc/wm831x-on.c struct wm831x_on *wm831x_on = container_of(work, struct wm831x_on, work 43 drivers/input/misc/wm831x-on.c work.work); work 59 drivers/input/misc/wm831x-on.c schedule_delayed_work(&wm831x_on->work, 100); work 66 drivers/input/misc/wm831x-on.c schedule_delayed_work(&wm831x_on->work, 0); work 86 drivers/input/misc/wm831x-on.c INIT_DELAYED_WORK(&wm831x_on->work, wm831x_poll_on); work 132 drivers/input/misc/wm831x-on.c cancel_delayed_work_sync(&wm831x_on->work); work 1399 drivers/input/mouse/alps.c static void alps_register_bare_ps2_mouse(struct work_struct *work) work 1402 drivers/input/mouse/alps.c container_of(work, struct alps_data, dev3_register_work.work); work 216 drivers/input/mouse/appletouch.c struct work_struct work; work 318 drivers/input/mouse/appletouch.c static void atp_reinit(struct work_struct *work) work 320 drivers/input/mouse/appletouch.c struct atp *dev = container_of(work, struct atp, work); work 781 drivers/input/mouse/appletouch.c schedule_work(&dev->work); work 812 drivers/input/mouse/appletouch.c cancel_work_sync(&dev->work); work 926 drivers/input/mouse/appletouch.c INIT_WORK(&dev->work, atp_reinit); work 914 drivers/input/mouse/hgpk.c static void hgpk_recalib_work(struct work_struct *work) work 916 drivers/input/mouse/hgpk.c struct delayed_work *w = to_delayed_work(work); work 239 drivers/input/mouse/psmouse-base.c void psmouse_queue_work(struct psmouse *psmouse, struct delayed_work *work, work 242 drivers/input/mouse/psmouse-base.c queue_delayed_work(kpsmoused_wq, work, delay); work 1334 drivers/input/mouse/psmouse-base.c static void psmouse_resync(struct work_struct *work) work 1337 drivers/input/mouse/psmouse-base.c container_of(work, struct psmouse, resync_work.work); work 128 drivers/input/mouse/psmouse-smbus.c struct work_struct work; work 132 drivers/input/mouse/psmouse-smbus.c static void psmouse_smbus_remove_i2c_device(struct work_struct *work) work 135 drivers/input/mouse/psmouse-smbus.c container_of(work, struct psmouse_smbus_removal_work, work); work 159 drivers/input/mouse/psmouse-smbus.c INIT_WORK(&rwork->work, psmouse_smbus_remove_i2c_device); work 162 drivers/input/mouse/psmouse-smbus.c schedule_work(&rwork->work); work 133 drivers/input/mouse/psmouse.h void psmouse_queue_work(struct psmouse *psmouse, struct delayed_work *work, work 431 drivers/input/mouse/synaptics_i2c.c static void synaptics_i2c_work_handler(struct work_struct *work) work 435 drivers/input/mouse/synaptics_i2c.c container_of(work, struct synaptics_i2c, dwork.work); work 108 drivers/input/rmi4/rmi_f54.c struct delayed_work work; work 203 drivers/input/rmi4/rmi_f54.c queue_delayed_work(f54->workqueue, &f54->work, 0); work 514 drivers/input/rmi4/rmi_f54.c static void rmi_f54_work(struct work_struct *work) work 516 drivers/input/rmi4/rmi_f54.c struct f54_data *f54 = container_of(work, struct f54_data, work.work); work 590 drivers/input/rmi4/rmi_f54.c queue_delayed_work(f54->workqueue, &f54->work, work 675 drivers/input/rmi4/rmi_f54.c INIT_DELAYED_WORK(&f54->work, rmi_f54_work); work 721 drivers/input/rmi4/rmi_f54.c cancel_delayed_work_sync(&f54->work); work 929 drivers/input/serio/hp_sdc.c static void request_module_delayed(struct work_struct *work) work 114 drivers/input/serio/ps2-gpio.c static void ps2_gpio_tx_work_fn(struct work_struct *work) work 116 drivers/input/serio/ps2-gpio.c struct delayed_work *dwork = to_delayed_work(work); work 199 drivers/input/serio/serio.c static void serio_handle_event(struct work_struct *work) work 206 drivers/input/tablet/pegasus_notetaker.c static void pegasus_init(struct work_struct *work) work 208 drivers/input/tablet/pegasus_notetaker.c struct pegasus *pegasus = container_of(work, struct pegasus, init); work 1457 drivers/input/touchscreen/cyttsp4_core.c static void cyttsp4_watchdog_work(struct work_struct *work) work 1460 drivers/input/touchscreen/cyttsp4_core.c container_of(work, struct cyttsp4, watchdog_work); work 1704 drivers/input/touchscreen/cyttsp4_core.c static void cyttsp4_startup_work_function(struct work_struct *work) work 1706 drivers/input/touchscreen/cyttsp4_core.c struct cyttsp4 *cd = container_of(work, struct cyttsp4, startup_work); work 223 drivers/input/touchscreen/da9034-ts.c static void da9034_tsi_work(struct work_struct *work) work 226 drivers/input/touchscreen/da9034-ts.c container_of(work, struct da9034_touch, tsi_work.work); work 102 drivers/input/touchscreen/da9052_tsi.c static void da9052_ts_pen_work(struct work_struct *work) work 104 drivers/input/touchscreen/da9052_tsi.c struct da9052_tsi *tsi = container_of(work, struct da9052_tsi, work 105 drivers/input/touchscreen/da9052_tsi.c ts_pen_work.work); work 21 drivers/input/touchscreen/hp680_ts_input.c static void do_softint(struct work_struct *work); work 24 drivers/input/touchscreen/hp680_ts_input.c static DECLARE_DELAYED_WORK(work, do_softint); work 26 drivers/input/touchscreen/hp680_ts_input.c static void do_softint(struct work_struct *work) work 72 drivers/input/touchscreen/hp680_ts_input.c schedule_delayed_work(&work, HZ / 20); work 111 drivers/input/touchscreen/hp680_ts_input.c cancel_delayed_work_sync(&work); work 119 drivers/input/touchscreen/hp680_ts_input.c cancel_delayed_work_sync(&work); work 173 drivers/input/touchscreen/ili210x.c static void ili210x_work(struct work_struct *work) work 175 drivers/input/touchscreen/ili210x.c struct ili210x *priv = container_of(work, struct ili210x, work 176 drivers/input/touchscreen/ili210x.c dwork.work); work 36 drivers/input/touchscreen/mc13783_ts.c struct delayed_work work; work 53 drivers/input/touchscreen/mc13783_ts.c schedule_delayed_work(&priv->work, 0); work 105 drivers/input/touchscreen/mc13783_ts.c schedule_delayed_work(&priv->work, HZ / 50); work 119 drivers/input/touchscreen/mc13783_ts.c static void mc13783_ts_work(struct work_struct *work) work 122 drivers/input/touchscreen/mc13783_ts.c container_of(work, struct mc13783_ts_priv, work.work); work 166 drivers/input/touchscreen/mc13783_ts.c cancel_delayed_work_sync(&priv->work); work 180 drivers/input/touchscreen/mc13783_ts.c INIT_DELAYED_WORK(&priv->work, mc13783_ts_work); work 23 drivers/input/touchscreen/pcap_ts.c struct delayed_work work; work 48 drivers/input/touchscreen/pcap_ts.c schedule_delayed_work(&pcap_ts->work, 0); work 60 drivers/input/touchscreen/pcap_ts.c schedule_delayed_work(&pcap_ts->work, 0); work 71 drivers/input/touchscreen/pcap_ts.c schedule_delayed_work(&pcap_ts->work, work 84 drivers/input/touchscreen/pcap_ts.c static void pcap_ts_work(struct work_struct *work) work 86 drivers/input/touchscreen/pcap_ts.c struct delayed_work *dw = to_delayed_work(work); work 87 drivers/input/touchscreen/pcap_ts.c struct pcap_ts *pcap_ts = container_of(dw, struct pcap_ts, work); work 109 drivers/input/touchscreen/pcap_ts.c schedule_delayed_work(&pcap_ts->work, 0); work 119 drivers/input/touchscreen/pcap_ts.c schedule_delayed_work(&pcap_ts->work, 0); work 128 drivers/input/touchscreen/pcap_ts.c cancel_delayed_work_sync(&pcap_ts->work); work 152 drivers/input/touchscreen/pcap_ts.c INIT_DELAYED_WORK(&pcap_ts->work, pcap_ts_work); work 205 drivers/input/touchscreen/pcap_ts.c cancel_delayed_work_sync(&pcap_ts->work); work 73 drivers/input/touchscreen/stmpe-ts.c struct delayed_work work; work 95 drivers/input/touchscreen/stmpe-ts.c static void stmpe_work(struct work_struct *work) work 101 drivers/input/touchscreen/stmpe-ts.c container_of(work, struct stmpe_touch, work.work); work 136 drivers/input/touchscreen/stmpe-ts.c cancel_delayed_work_sync(&ts->work); work 167 drivers/input/touchscreen/stmpe-ts.c schedule_delayed_work(&ts->work, msecs_to_jiffies(50)); work 251 drivers/input/touchscreen/stmpe-ts.c cancel_delayed_work_sync(&ts->work); work 312 drivers/input/touchscreen/stmpe-ts.c INIT_DELAYED_WORK(&ts->work, stmpe_work); work 358 drivers/input/touchscreen/tsc200x-core.c static void tsc200x_esd_work(struct work_struct *work) work 360 drivers/input/touchscreen/tsc200x-core.c struct tsc200x *ts = container_of(work, struct tsc200x, esd_work.work); work 69 drivers/input/touchscreen/wm831x-ts.c static void wm831x_pd_data_work(struct work_struct *work) work 72 drivers/input/touchscreen/wm831x-ts.c container_of(work, struct wm831x_ts, pd_data_work); work 290 drivers/input/touchscreen/wm97xx-core.c static void wm97xx_pen_irq_worker(struct work_struct *work) work 292 drivers/input/touchscreen/wm97xx-core.c struct wm97xx *wm = container_of(work, struct wm97xx, pen_event_work); work 472 drivers/input/touchscreen/wm97xx-core.c static void wm97xx_ts_reader(struct work_struct *work) work 475 drivers/input/touchscreen/wm97xx-core.c struct wm97xx *wm = container_of(work, struct wm97xx, ts_reader.work); work 69 drivers/iommu/amd_iommu_v2.c struct work_struct work; work 472 drivers/iommu/amd_iommu_v2.c static void do_fault(struct work_struct *work) work 474 drivers/iommu/amd_iommu_v2.c struct fault *fault = container_of(work, struct fault, work); work 580 drivers/iommu/amd_iommu_v2.c INIT_WORK(&fault->work, do_fault); work 582 drivers/iommu/amd_iommu_v2.c queue_work(iommu_wq, &fault->work); work 51 drivers/isdn/capi/kcapi.c struct work_struct work; work 270 drivers/isdn/capi/kcapi.c static void do_notify_work(struct work_struct *work) work 273 drivers/isdn/capi/kcapi.c container_of(work, struct capictr_event, work); work 291 drivers/isdn/capi/kcapi.c INIT_WORK(&event->work, do_notify_work); work 295 drivers/isdn/capi/kcapi.c queue_work(kcapi_wq, &event->work); work 313 drivers/isdn/capi/kcapi.c static void recv_handler(struct work_struct *work) work 317 drivers/isdn/capi/kcapi.c container_of(work, struct capi20_appl, recv_work); work 1005 drivers/isdn/mISDN/dsp_core.c dsp_send_bh(struct work_struct *work) work 1007 drivers/isdn/mISDN/dsp_core.c struct dsp *dsp = container_of(work, struct dsp, workq); work 803 drivers/isdn/mISDN/l1oip_core.c l1oip_send_bh(struct work_struct *work) work 805 drivers/isdn/mISDN/l1oip_core.c struct l1oip *hc = container_of(work, struct l1oip, workq); work 32 drivers/isdn/mISDN/timerdev.c u_int work; work 57 drivers/isdn/mISDN/timerdev.c dev->work = 0; work 108 drivers/isdn/mISDN/timerdev.c while (list_empty(list) && (dev->work == 0)) { work 112 drivers/isdn/mISDN/timerdev.c wait_event_interruptible(dev->wait, (dev->work || work 118 drivers/isdn/mISDN/timerdev.c if (dev->work) work 119 drivers/isdn/mISDN/timerdev.c dev->work = 0; work 146 drivers/isdn/mISDN/timerdev.c if (dev->work || !list_empty(&dev->expired)) work 150 drivers/isdn/mISDN/timerdev.c dev->work, list_empty(&dev->expired)); work 175 drivers/isdn/mISDN/timerdev.c dev->work = 1; work 53 drivers/leds/leds-cr0014114.c struct delayed_work work; work 144 drivers/leds/leds-cr0014114.c static void cr0014114_recount_work(struct work_struct *work) work 147 drivers/leds/leds-cr0014114.c struct cr0014114 *priv = container_of(work, work 149 drivers/leds/leds-cr0014114.c work.work); work 159 drivers/leds/leds-cr0014114.c schedule_delayed_work(&priv->work, CR_RECOUNT_DELAY); work 239 drivers/leds/leds-cr0014114.c INIT_DELAYED_WORK(&priv->work, cr0014114_recount_work); work 265 drivers/leds/leds-cr0014114.c schedule_delayed_work(&priv->work, CR_RECOUNT_DELAY); work 276 drivers/leds/leds-cr0014114.c cancel_delayed_work_sync(&priv->work); work 42 drivers/leds/leds-pca9532.c struct work_struct work; work 236 drivers/leds/leds-pca9532.c schedule_work(&data->work); work 241 drivers/leds/leds-pca9532.c static void pca9532_input_work(struct work_struct *work) work 244 drivers/leds/leds-pca9532.c container_of(work, struct pca9532_data, work); work 336 drivers/leds/leds-pca9532.c cancel_work_sync(&data->work); work 420 drivers/leds/leds-pca9532.c INIT_WORK(&data->work, pca9532_input_work); work 423 drivers/leds/leds-pca9532.c cancel_work_sync(&data->work); work 178 drivers/leds/leds-tca6507.c struct work_struct work; work 356 drivers/leds/leds-tca6507.c static void tca6507_work(struct work_struct *work) work 358 drivers/leds/leds-tca6507.c struct tca6507_chip *tca = container_of(work, struct tca6507_chip, work 359 drivers/leds/leds-tca6507.c work); work 553 drivers/leds/leds-tca6507.c schedule_work(&tca->work); work 621 drivers/leds/leds-tca6507.c schedule_work(&tca->work); work 780 drivers/leds/leds-tca6507.c INIT_WORK(&tca->work, tca6507_work); work 807 drivers/leds/leds-tca6507.c schedule_work(&tca->work); work 829 drivers/leds/leds-tca6507.c cancel_work_sync(&tca->work); work 42 drivers/leds/trigger/ledtrig-netdev.c struct delayed_work work; work 90 drivers/leds/trigger/ledtrig-netdev.c schedule_delayed_work(&trigger_data->work, 0); work 116 drivers/leds/trigger/ledtrig-netdev.c cancel_delayed_work_sync(&trigger_data->work); work 198 drivers/leds/trigger/ledtrig-netdev.c cancel_delayed_work_sync(&trigger_data->work); work 275 drivers/leds/trigger/ledtrig-netdev.c cancel_delayed_work_sync(&trigger_data->work); work 314 drivers/leds/trigger/ledtrig-netdev.c cancel_delayed_work_sync(&trigger_data->work); work 346 drivers/leds/trigger/ledtrig-netdev.c static void netdev_trig_work(struct work_struct *work) work 349 drivers/leds/trigger/ledtrig-netdev.c container_of(work, struct led_netdev_data, work.work); work 388 drivers/leds/trigger/ledtrig-netdev.c schedule_delayed_work(&trigger_data->work, work 406 drivers/leds/trigger/ledtrig-netdev.c INIT_DELAYED_WORK(&trigger_data->work, netdev_trig_work); work 431 drivers/leds/trigger/ledtrig-netdev.c cancel_delayed_work_sync(&trigger_data->work); work 25 drivers/lightnvm/pblk-core.c static void pblk_line_mark_bb(struct work_struct *work) work 27 drivers/lightnvm/pblk-core.c struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws, work 1652 drivers/lightnvm/pblk-core.c static void pblk_line_put_ws(struct work_struct *work) work 1654 drivers/lightnvm/pblk-core.c struct pblk_line_ws *line_put_ws = container_of(work, work 1847 drivers/lightnvm/pblk-core.c void pblk_line_close_ws(struct work_struct *work) work 1849 drivers/lightnvm/pblk-core.c struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws, work 1866 drivers/lightnvm/pblk-core.c void (*work)(struct work_struct *), gfp_t gfp_mask, work 1877 drivers/lightnvm/pblk-core.c INIT_WORK(&line_ws->ws, work); work 86 drivers/lightnvm/pblk-gc.c static void pblk_gc_line_ws(struct work_struct *work) work 88 drivers/lightnvm/pblk-gc.c struct pblk_line_ws *gc_rq_ws = container_of(work, work 176 drivers/lightnvm/pblk-gc.c static void pblk_gc_line_prepare_ws(struct work_struct *work) work 178 drivers/lightnvm/pblk-gc.c struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws, work 208 drivers/lightnvm/pblk-write.c static void pblk_submit_rec(struct work_struct *work) work 211 drivers/lightnvm/pblk-write.c container_of(work, struct pblk_rec_ctx, ws_rec); work 796 drivers/lightnvm/pblk.h void pblk_line_close_ws(struct work_struct *work); work 801 drivers/lightnvm/pblk.h void (*work)(struct work_struct *), gfp_t gfp_mask, work 87 drivers/macintosh/ams/ams-core.c static void ams_worker(struct work_struct *work) work 212 drivers/macintosh/rack-meter.c static void rackmeter_do_timer(struct work_struct *work) work 215 drivers/macintosh/rack-meter.c container_of(work, struct rackmeter_cpu, sniffer.work); work 67 drivers/mailbox/omap-mailbox.c struct work_struct work; work 255 drivers/mailbox/omap-mailbox.c static void mbox_rx_work(struct work_struct *work) work 258 drivers/mailbox/omap-mailbox.c container_of(work, struct omap_mbox_queue, work); work 310 drivers/mailbox/omap-mailbox.c schedule_work(&mbox->rxq->work); work 327 drivers/mailbox/omap-mailbox.c void (*work)(struct work_struct *)) work 331 drivers/mailbox/omap-mailbox.c if (!work) work 343 drivers/mailbox/omap-mailbox.c INIT_WORK(&mq->work, work); work 391 drivers/mailbox/omap-mailbox.c flush_work(&mbox->rxq->work); work 369 drivers/md/bcache/btree.c schedule_delayed_work(&b->work, 30 * HZ); work 473 drivers/md/bcache/btree.c cancel_delayed_work(&b->work); work 523 drivers/md/bcache/btree.c struct btree *b = container_of(to_delayed_work(w), struct btree, work); work 542 drivers/md/bcache/btree.c schedule_delayed_work(&b->work, 30 * HZ); work 630 drivers/md/bcache/btree.c INIT_DELAYED_WORK(&b->work, btree_node_write_work); work 821 drivers/md/bcache/btree.c cancel_delayed_work_sync(&b->work); work 1121 drivers/md/bcache/btree.c cancel_delayed_work(&b->work); work 144 drivers/md/bcache/btree.h struct delayed_work work; work 184 drivers/md/bcache/closure.c work_data_bits(&cl->work)) ? "Q" : "", work 151 drivers/md/bcache/closure.h struct work_struct work; work 247 drivers/md/bcache/closure.h INIT_WORK(&cl->work, cl->work.func); work 248 drivers/md/bcache/closure.h BUG_ON(!queue_work(wq, &cl->work)); work 590 drivers/md/bcache/journal.c static void journal_discard_work(struct work_struct *work) work 593 drivers/md/bcache/journal.c container_of(work, struct journal_device, discard_work); work 916 drivers/md/bcache/journal.c static void journal_write_work(struct work_struct *work) work 918 drivers/md/bcache/journal.c struct cache_set *c = container_of(to_delayed_work(work), work 920 drivers/md/bcache/journal.c journal.work); work 961 drivers/md/bcache/journal.c schedule_delayed_work(&c->journal.work, work 997 drivers/md/bcache/journal.c INIT_DELAYED_WORK(&j->work, journal_write_work); work 112 drivers/md/bcache/journal.h struct delayed_work work; work 1646 drivers/md/bcache/super.c cancel_delayed_work_sync(&c->journal.work); work 1648 drivers/md/bcache/super.c c->journal.work.work.func(&c->journal.work.work); work 169 drivers/md/bcache/writeback.c static void update_writeback_rate(struct work_struct *work) work 171 drivers/md/bcache/writeback.c struct cached_dev *dc = container_of(to_delayed_work(work), work 379 drivers/md/dm-bio-prison-v1.c int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work) work 390 drivers/md/dm-bio-prison-v1.c list_add(work, &ds->entries[ds->current_entry].work_items); work 134 drivers/md/dm-bio-prison-v1.h int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work); work 16 drivers/md/dm-cache-background-tracker.c struct policy_work work; work 90 drivers/md/dm-cache-background-tracker.c cmp = cmp_oblock(w->work.oblock, nw->work.oblock); work 118 drivers/md/dm-cache-background-tracker.c cmp = cmp_oblock(w->work.oblock, oblock); work 178 drivers/md/dm-cache-background-tracker.c struct policy_work *work, work 190 drivers/md/dm-cache-background-tracker.c memcpy(&w->work, work, sizeof(*work)); work 202 drivers/md/dm-cache-background-tracker.c *pwork = &w->work; work 206 drivers/md/dm-cache-background-tracker.c update_stats(b, &w->work, 1); work 215 drivers/md/dm-cache-background-tracker.c int btracker_issue(struct background_tracker *b, struct policy_work **work) work 224 drivers/md/dm-cache-background-tracker.c *work = &w->work; work 233 drivers/md/dm-cache-background-tracker.c struct bt_work *w = container_of(op, struct bt_work, work); work 235 drivers/md/dm-cache-background-tracker.c update_stats(b, &w->work, -1); work 32 drivers/md/dm-cache-background-tracker.h struct policy_work *work, work 38 drivers/md/dm-cache-background-tracker.h int btracker_issue(struct background_tracker *b, struct policy_work **work); work 24 drivers/md/dm-cache-policy-internal.h struct policy_work **work) work 27 drivers/md/dm-cache-policy-internal.h *work = NULL; work 31 drivers/md/dm-cache-policy-internal.h return p->lookup_with_work(p, oblock, cblock, data_dir, fast_copy, work); work 41 drivers/md/dm-cache-policy-internal.h struct policy_work *work, work 44 drivers/md/dm-cache-policy-internal.h return p->complete_background_work(p, work, success); work 1177 drivers/md/dm-cache-policy-smq.c struct policy_work work; work 1185 drivers/md/dm-cache-policy-smq.c work.op = POLICY_WRITEBACK; work 1186 drivers/md/dm-cache-policy-smq.c work.oblock = e->oblock; work 1187 drivers/md/dm-cache-policy-smq.c work.cblock = infer_cblock(mq, e); work 1189 drivers/md/dm-cache-policy-smq.c r = btracker_queue(mq->bg_work, &work, NULL); work 1200 drivers/md/dm-cache-policy-smq.c struct policy_work work; work 1216 drivers/md/dm-cache-policy-smq.c work.op = POLICY_DEMOTE; work 1217 drivers/md/dm-cache-policy-smq.c work.oblock = e->oblock; work 1218 drivers/md/dm-cache-policy-smq.c work.cblock = infer_cblock(mq, e); work 1219 drivers/md/dm-cache-policy-smq.c r = btracker_queue(mq->bg_work, &work, NULL); work 1231 drivers/md/dm-cache-policy-smq.c struct policy_work work; work 1256 drivers/md/dm-cache-policy-smq.c work.op = POLICY_PROMOTE; work 1257 drivers/md/dm-cache-policy-smq.c work.oblock = oblock; work 1258 drivers/md/dm-cache-policy-smq.c work.cblock = infer_cblock(mq, e); work 1259 drivers/md/dm-cache-policy-smq.c r = btracker_queue(mq->bg_work, &work, workp); work 1367 drivers/md/dm-cache-policy-smq.c struct policy_work **work, bool *background_work) work 1392 drivers/md/dm-cache-policy-smq.c queue_promotion(mq, oblock, work); work 1420 drivers/md/dm-cache-policy-smq.c struct policy_work **work) work 1428 drivers/md/dm-cache-policy-smq.c r = __lookup(mq, oblock, cblock, data_dir, fast_copy, work, &background_queued); work 1459 drivers/md/dm-cache-policy-smq.c struct policy_work *work, work 1463 drivers/md/dm-cache-policy-smq.c from_cblock(work->cblock)); work 1465 drivers/md/dm-cache-policy-smq.c switch (work->op) { work 1470 drivers/md/dm-cache-policy-smq.c e->oblock = work->oblock; work 1501 drivers/md/dm-cache-policy-smq.c btracker_complete(mq->bg_work, work); work 1505 drivers/md/dm-cache-policy-smq.c struct policy_work *work, work 1512 drivers/md/dm-cache-policy-smq.c __complete_background_work(mq, work, success); work 71 drivers/md/dm-cache-policy.h struct policy_work **work); work 85 drivers/md/dm-cache-policy.h struct policy_work *work, work 3104 drivers/md/dm-cache-target.c do_waker(&cache->waker.work); work 1285 drivers/md/dm-clone-target.c static void do_worker(struct work_struct *work) work 1287 drivers/md/dm-clone-target.c struct clone *clone = container_of(work, typeof(*clone), worker); work 1312 drivers/md/dm-clone-target.c static void do_waker(struct work_struct *work) work 1314 drivers/md/dm-clone-target.c struct clone *clone = container_of(to_delayed_work(work), struct clone, waker); work 2035 drivers/md/dm-clone-target.c do_waker(&clone->waker.work); work 66 drivers/md/dm-core.h struct work_struct work; work 69 drivers/md/dm-crypt.c struct work_struct work; work 1484 drivers/md/dm-crypt.c static void kcryptd_io_read_work(struct work_struct *work) work 1486 drivers/md/dm-crypt.c struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); work 1498 drivers/md/dm-crypt.c INIT_WORK(&io->work, kcryptd_io_read_work); work 1499 drivers/md/dm-crypt.c queue_work(cc->io_queue, &io->work); work 1712 drivers/md/dm-crypt.c static void kcryptd_crypt(struct work_struct *work) work 1714 drivers/md/dm-crypt.c struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); work 1726 drivers/md/dm-crypt.c INIT_WORK(&io->work, kcryptd_crypt); work 1727 drivers/md/dm-crypt.c queue_work(cc->crypt_queue, &io->work); work 112 drivers/md/dm-delay.c static void flush_expired_bios(struct work_struct *work) work 116 drivers/md/dm-delay.c dc = container_of(work, struct delay_c, flush_expired_bios); work 281 drivers/md/dm-integrity.c struct work_struct work; work 312 drivers/md/dm-integrity.c struct work_struct work; work 1436 drivers/md/dm-integrity.c INIT_WORK(&dio->work, integrity_bio_wait); work 1437 drivers/md/dm-integrity.c queue_work(ic->offload_wq, &dio->work); work 1505 drivers/md/dm-integrity.c struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work); work 1857 drivers/md/dm-integrity.c INIT_WORK(&dio->work, integrity_bio_wait); work 1858 drivers/md/dm-integrity.c queue_work(ic->offload_wq, &dio->work); work 1947 drivers/md/dm-integrity.c INIT_WORK(&dio->work, integrity_bio_wait); work 1948 drivers/md/dm-integrity.c queue_work(ic->wait_wq, &dio->work); work 1985 drivers/md/dm-integrity.c queue_work(ic->writer_wq, &bbs->work); work 2019 drivers/md/dm-integrity.c integrity_metadata(&dio->work); work 2025 drivers/md/dm-integrity.c INIT_WORK(&dio->work, integrity_metadata); work 2026 drivers/md/dm-integrity.c queue_work(ic->metadata_wq, &dio->work); work 2041 drivers/md/dm-integrity.c struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work); work 2465 drivers/md/dm-integrity.c struct bitmap_block_status *bbs = container_of(w, struct bitmap_block_status, work); work 2486 drivers/md/dm-integrity.c INIT_WORK(&dio->work, integrity_bio_wait); work 2487 drivers/md/dm-integrity.c queue_work(ic->offload_wq, &dio->work); work 2509 drivers/md/dm-integrity.c INIT_WORK(&dio->work, integrity_bio_wait); work 2510 drivers/md/dm-integrity.c queue_work(ic->offload_wq, &dio->work); work 2516 drivers/md/dm-integrity.c static void bitmap_flush_work(struct work_struct *work) work 2518 drivers/md/dm-integrity.c struct dm_integrity_c *ic = container_of(work, struct dm_integrity_c, bitmap_flush_work.work); work 4053 drivers/md/dm-integrity.c INIT_WORK(&bbs->work, bitmap_block_work); work 646 drivers/md/dm-kcopyd.c static void do_work(struct work_struct *work) work 648 drivers/md/dm-kcopyd.c struct dm_kcopyd_client *kc = container_of(work, work 156 drivers/md/dm-log-userspace-base.c static void do_flush(struct work_struct *work) work 159 drivers/md/dm-log-userspace-base.c struct log_c *lc = container_of(work, struct log_c, flush_log_work.work); work 107 drivers/md/dm-mpath.c static void trigger_event(struct work_struct *work); work 109 drivers/md/dm-mpath.c static void activate_path_work(struct work_struct *work); work 110 drivers/md/dm-mpath.c static void process_queued_bios(struct work_struct *work); work 649 drivers/md/dm-mpath.c static void process_queued_bios(struct work_struct *work) work 657 drivers/md/dm-mpath.c container_of(work, struct multipath, process_queued_bios); work 726 drivers/md/dm-mpath.c static void trigger_event(struct work_struct *work) work 729 drivers/md/dm-mpath.c container_of(work, struct multipath, trigger_event); work 1280 drivers/md/dm-mpath.c if (queue_work(kmpath_handlerd, &pgpath->activate_path.work)) work 1513 drivers/md/dm-mpath.c static void activate_path_work(struct work_struct *work) work 1516 drivers/md/dm-mpath.c container_of(work, struct pgpath, activate_path.work); work 839 drivers/md/dm-raid1.c static void trigger_event(struct work_struct *work) work 842 drivers/md/dm-raid1.c container_of(work, struct mirror_set, trigger_event); work 850 drivers/md/dm-raid1.c static void do_mirror(struct work_struct *work) work 852 drivers/md/dm-raid1.c struct mirror_set *ms = container_of(work, struct mirror_set, work 22 drivers/md/dm-rq.c struct kthread_work work; work 215 drivers/md/dm-snap-persistent.c struct work_struct work; work 219 drivers/md/dm-snap-persistent.c static void do_metadata(struct work_struct *work) work 221 drivers/md/dm-snap-persistent.c struct mdata_req *req = container_of(work, struct mdata_req, work); work 257 drivers/md/dm-snap-persistent.c INIT_WORK_ONSTACK(&req.work, do_metadata); work 258 drivers/md/dm-snap-persistent.c queue_work(ps->metadata_wq, &req.work); work 260 drivers/md/dm-snap-persistent.c destroy_work_on_stack(&req.work); work 51 drivers/md/dm-stripe.c static void trigger_event(struct work_struct *work) work 53 drivers/md/dm-stripe.c struct stripe_c *sc = container_of(work, struct stripe_c, work 3653 drivers/md/dm-thin.c do_waker(&pool->waker.work); work 4347 drivers/md/dm-thin.c struct list_head work; work 4352 drivers/md/dm-thin.c INIT_LIST_HEAD(&work); work 4353 drivers/md/dm-thin.c dm_deferred_entry_dec(h->shared_read_entry, &work); work 4356 drivers/md/dm-thin.c list_for_each_entry_safe(m, tmp, &work, list) { work 4364 drivers/md/dm-thin.c INIT_LIST_HEAD(&work); work 4365 drivers/md/dm-thin.c dm_deferred_entry_dec(h->all_io_entry, &work); work 4366 drivers/md/dm-thin.c if (!list_empty(&work)) { work 4368 drivers/md/dm-thin.c list_for_each_entry_safe(m, tmp, &work, list) work 44 drivers/md/dm-verity-target.c struct work_struct work; work 554 drivers/md/dm-verity-target.c struct dm_verity_io *io = container_of(w, struct dm_verity_io, work); work 568 drivers/md/dm-verity-target.c INIT_WORK(&io->work, verity_work); work 569 drivers/md/dm-verity-target.c queue_work(io->v->verify_wq, &io->work); work 577 drivers/md/dm-verity-target.c static void verity_prefetch_io(struct work_struct *work) work 580 drivers/md/dm-verity-target.c container_of(work, struct dm_verity_prefetch_work, work); work 622 drivers/md/dm-verity-target.c INIT_WORK(&pw->work, verity_prefetch_io); work 626 drivers/md/dm-verity-target.c queue_work(v->verify_wq, &pw->work); work 81 drivers/md/dm-verity.h struct work_struct work; work 768 drivers/md/dm-writecache.c static void writecache_flush_work(struct work_struct *work) work 770 drivers/md/dm-writecache.c struct dm_writecache *wc = container_of(work, struct dm_writecache, flush_work); work 1599 drivers/md/dm-writecache.c static void writecache_writeback(struct work_struct *work) work 1601 drivers/md/dm-writecache.c struct dm_writecache *wc = container_of(work, struct dm_writecache, writeback_work); work 18 drivers/md/dm-zoned-reclaim.c struct delayed_work work; work 451 drivers/md/dm-zoned-reclaim.c static void dmz_reclaim_work(struct work_struct *work) work 453 drivers/md/dm-zoned-reclaim.c struct dmz_reclaim *zrc = container_of(work, struct dmz_reclaim, work.work); work 463 drivers/md/dm-zoned-reclaim.c mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD); work 526 drivers/md/dm-zoned-reclaim.c INIT_DELAYED_WORK(&zrc->work, dmz_reclaim_work); work 535 drivers/md/dm-zoned-reclaim.c queue_delayed_work(zrc->wq, &zrc->work, 0); work 551 drivers/md/dm-zoned-reclaim.c cancel_delayed_work_sync(&zrc->work); work 562 drivers/md/dm-zoned-reclaim.c cancel_delayed_work_sync(&zrc->work); work 570 drivers/md/dm-zoned-reclaim.c queue_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD); work 587 drivers/md/dm-zoned-reclaim.c mod_delayed_work(zrc->wq, &zrc->work, 0); work 30 drivers/md/dm-zoned-target.c struct work_struct work; work 471 drivers/md/dm-zoned-target.c static void dmz_chunk_work(struct work_struct *work) work 473 drivers/md/dm-zoned-target.c struct dm_chunk_work *cw = container_of(work, struct dm_chunk_work, work); work 496 drivers/md/dm-zoned-target.c static void dmz_flush_work(struct work_struct *work) work 498 drivers/md/dm-zoned-target.c struct dmz_target *dmz = container_of(work, struct dmz_target, flush_work.work); work 546 drivers/md/dm-zoned-target.c INIT_WORK(&cw->work, dmz_chunk_work); work 562 drivers/md/dm-zoned-target.c if (queue_work(dmz->chunk_wq, &cw->work)) work 696 drivers/md/dm.c queue_work(md->wq, &md->work); work 1878 drivers/md/dm.c static void dm_wq_work(struct work_struct *work); work 1979 drivers/md/dm.c INIT_WORK(&md->work, dm_wq_work); work 2459 drivers/md/dm.c static void dm_wq_work(struct work_struct *work) work 2461 drivers/md/dm.c struct mapped_device *md = container_of(work, struct mapped_device, work 2462 drivers/md/dm.c work); work 2490 drivers/md/dm.c queue_work(md->wq, &md->work); work 661 drivers/md/raid5-cache.c static void r5l_submit_io_async(struct work_struct *work) work 663 drivers/md/raid5-cache.c struct r5l_log *log = container_of(work, struct r5l_log, work 682 drivers/md/raid5-cache.c static void r5c_disable_writeback_async(struct work_struct *work) work 684 drivers/md/raid5-cache.c struct r5l_log *log = container_of(work, struct r5l_log, work 198 drivers/md/raid5.c queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work); work 206 drivers/md/raid5.c &group->workers[i].work); work 6229 drivers/md/raid5.c static void raid5_do_work(struct work_struct *work) work 6231 drivers/md/raid5.c struct r5worker *worker = container_of(work, struct r5worker, work); work 6713 drivers/md/raid5.c INIT_WORK(&worker->work, raid5_do_work); work 510 drivers/md/raid5.h struct work_struct work; work 425 drivers/media/cec/cec-adap.c if (cancel_delayed_work(&data->work)) work 671 drivers/media/cec/cec-adap.c schedule_delayed_work(&data->work, work 718 drivers/media/cec/cec-adap.c static void cec_wait_timeout(struct work_struct *work) work 720 drivers/media/cec/cec-adap.c struct cec_data *data = container_of(work, struct cec_data, work.work); work 892 drivers/media/cec/cec-adap.c INIT_DELAYED_WORK(&data->work, cec_wait_timeout); work 912 drivers/media/cec/cec-adap.c cancel_delayed_work_sync(&data->work); work 1198 drivers/media/cec/cec-adap.c if (!cancel_delayed_work(&data->work)) { work 1215 drivers/media/dvb-core/dvb_net.c static void wq_set_multicast_list (struct work_struct *work) work 1218 drivers/media/dvb-core/dvb_net.c container_of(work, struct dvb_net_priv, set_multicast_list_wq); work 1256 drivers/media/dvb-core/dvb_net.c static void wq_restart_net_feed (struct work_struct *work) work 1259 drivers/media/dvb-core/dvb_net.c container_of(work, struct dvb_net_priv, restart_net_feed_wq); work 783 drivers/media/dvb-frontends/rtl2832.c static void rtl2832_i2c_gate_work(struct work_struct *work) work 785 drivers/media/dvb-frontends/rtl2832.c struct rtl2832_dev *dev = container_of(work, struct rtl2832_dev, i2c_gate_work.work); work 45 drivers/media/dvb-frontends/ts2020.c static void ts2020_stat_work(struct work_struct *work); work 148 drivers/media/dvb-frontends/ts2020.c ts2020_stat_work(&priv->stat_work.work); work 427 drivers/media/dvb-frontends/ts2020.c static void ts2020_stat_work(struct work_struct *work) work 429 drivers/media/dvb-frontends/ts2020.c struct ts2020_priv *priv = container_of(work, struct ts2020_priv, work 430 drivers/media/dvb-frontends/ts2020.c stat_work.work); work 462 drivers/media/dvb-frontends/ts2020.c ts2020_stat_work(&priv->stat_work.work); work 902 drivers/media/firewire/firedtv-avc.c void avc_remote_ctrl_work(struct work_struct *work) work 905 drivers/media/firewire/firedtv-avc.c container_of(work, struct firedtv, remote_ctrl_work); work 121 drivers/media/firewire/firedtv.h void avc_remote_ctrl_work(struct work_struct *work); work 809 drivers/media/i2c/ad9389b.c static void ad9389b_edid_handler(struct work_struct *work) work 811 drivers/media/i2c/ad9389b.c struct delayed_work *dwork = to_delayed_work(work); work 1488 drivers/media/i2c/adv7511-v4l2.c static void adv7511_edid_handler(struct work_struct *work) work 1490 drivers/media/i2c/adv7511-v4l2.c struct delayed_work *dwork = to_delayed_work(work); work 523 drivers/media/i2c/adv7604.c static void adv76xx_delayed_work_enable_hotplug(struct work_struct *work) work 525 drivers/media/i2c/adv7604.c struct delayed_work *dwork = to_delayed_work(work); work 705 drivers/media/i2c/adv7842.c static void adv7842_delayed_work_enable_hotplug(struct work_struct *work) work 707 drivers/media/i2c/adv7842.c struct delayed_work *dwork = to_delayed_work(work); work 600 drivers/media/i2c/cx25840/cx25840-core.c static void cx25840_work_handler(struct work_struct *work) work 602 drivers/media/i2c/cx25840/cx25840-core.c struct cx25840_state *state = container_of(work, struct cx25840_state, fw_work); work 307 drivers/media/i2c/ir-kbd-i2c.c static void ir_work(struct work_struct *work) work 310 drivers/media/i2c/ir-kbd-i2c.c struct IR_i2c *ir = container_of(work, struct IR_i2c, work.work); work 326 drivers/media/i2c/ir-kbd-i2c.c schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling_interval)); work 333 drivers/media/i2c/ir-kbd-i2c.c schedule_delayed_work(&ir->work, 0); work 342 drivers/media/i2c/ir-kbd-i2c.c cancel_delayed_work_sync(&ir->work); work 885 drivers/media/i2c/ir-kbd-i2c.c INIT_DELAYED_WORK(&ir->work, ir_work); work 921 drivers/media/i2c/ir-kbd-i2c.c cancel_delayed_work_sync(&ir->work); work 57 drivers/media/i2c/saa6588.c struct delayed_work work; work 318 drivers/media/i2c/saa6588.c static void saa6588_work(struct work_struct *work) work 320 drivers/media/i2c/saa6588.c struct saa6588 *s = container_of(work, struct saa6588, work.work); work 323 drivers/media/i2c/saa6588.c schedule_delayed_work(&s->work, msecs_to_jiffies(20)); work 482 drivers/media/i2c/saa6588.c INIT_DELAYED_WORK(&s->work, saa6588_work); work 483 drivers/media/i2c/saa6588.c schedule_delayed_work(&s->work, 0); work 494 drivers/media/i2c/saa6588.c cancel_delayed_work_sync(&s->work); work 1635 drivers/media/i2c/saa7115.c u8 work; work 1642 drivers/media/i2c/saa7115.c work = saa711x_read(sd, R_08_SYNC_CNTL); work 1643 drivers/media/i2c/saa7115.c work &= ~SAA7113_R_08_HTC_MASK; work 1644 drivers/media/i2c/saa7115.c work |= ((*data->saa7113_r08_htc) << SAA7113_R_08_HTC_OFFSET); work 1645 drivers/media/i2c/saa7115.c saa711x_write(sd, R_08_SYNC_CNTL, work); work 1649 drivers/media/i2c/saa7115.c work = saa711x_read(sd, R_10_CHROMA_CNTL_2); work 1650 drivers/media/i2c/saa7115.c work &= ~SAA7113_R_10_VRLN_MASK; work 1652 drivers/media/i2c/saa7115.c work |= (1 << SAA7113_R_10_VRLN_OFFSET); work 1653 drivers/media/i2c/saa7115.c saa711x_write(sd, R_10_CHROMA_CNTL_2, work); work 1657 drivers/media/i2c/saa7115.c work = saa711x_read(sd, R_10_CHROMA_CNTL_2); work 1658 drivers/media/i2c/saa7115.c work &= ~SAA7113_R_10_OFTS_MASK; work 1659 drivers/media/i2c/saa7115.c work |= (*data->saa7113_r10_ofts << SAA7113_R_10_OFTS_OFFSET); work 1660 drivers/media/i2c/saa7115.c saa711x_write(sd, R_10_CHROMA_CNTL_2, work); work 1664 drivers/media/i2c/saa7115.c work = saa711x_read(sd, R_12_RT_SIGNAL_CNTL); work 1665 drivers/media/i2c/saa7115.c work &= ~SAA7113_R_12_RTS0_MASK; work 1666 drivers/media/i2c/saa7115.c work |= (*data->saa7113_r12_rts0 << SAA7113_R_12_RTS0_OFFSET); work 1671 drivers/media/i2c/saa7115.c saa711x_write(sd, R_12_RT_SIGNAL_CNTL, work); work 1675 drivers/media/i2c/saa7115.c work = saa711x_read(sd, R_12_RT_SIGNAL_CNTL); work 1676 drivers/media/i2c/saa7115.c work &= ~SAA7113_R_12_RTS1_MASK; work 1677 drivers/media/i2c/saa7115.c work |= (*data->saa7113_r12_rts1 << SAA7113_R_12_RTS1_OFFSET); work 1678 drivers/media/i2c/saa7115.c saa711x_write(sd, R_12_RT_SIGNAL_CNTL, work); work 1682 drivers/media/i2c/saa7115.c work = saa711x_read(sd, R_13_RT_X_PORT_OUT_CNTL); work 1683 drivers/media/i2c/saa7115.c work &= ~SAA7113_R_13_ADLSB_MASK; work 1685 drivers/media/i2c/saa7115.c work |= (1 << SAA7113_R_13_ADLSB_OFFSET); work 1686 drivers/media/i2c/saa7115.c saa711x_write(sd, R_13_RT_X_PORT_OUT_CNTL, work); work 356 drivers/media/i2c/tc358743.c static void tc358743_delayed_work_enable_hotplug(struct work_struct *work) work 358 drivers/media/i2c/tc358743.c struct delayed_work *dwork = to_delayed_work(work); work 1485 drivers/media/i2c/tc358743.c static void tc358743_work_i2c_poll(struct work_struct *work) work 1487 drivers/media/i2c/tc358743.c struct tc358743_state *state = container_of(work, work 558 drivers/media/i2c/tda1997x.c static void tda1997x_delayed_work_enable_hpd(struct work_struct *work) work 560 drivers/media/i2c/tda1997x.c struct delayed_work *dwork = to_delayed_work(work); work 105 drivers/media/pci/b2c2/flexcop-pci.c static void flexcop_pci_irq_check_work(struct work_struct *work) work 108 drivers/media/pci/b2c2/flexcop-pci.c container_of(work, struct flexcop_pci, irq_check_work.work); work 185 drivers/media/pci/bt8xx/bttv-driver.c static void request_module_async(struct work_struct *work) work 199 drivers/media/pci/cobalt/cobalt-irq.c void cobalt_irq_work_handler(struct work_struct *work) work 202 drivers/media/pci/cobalt/cobalt-irq.c container_of(work, struct cobalt, irq_work_queue); work 12 drivers/media/pci/cobalt/cobalt-irq.h void cobalt_irq_work_handler(struct work_struct *work); work 241 drivers/media/pci/cx18/cx18-driver.c static void request_module_async(struct work_struct *work) work 243 drivers/media/pci/cx18/cx18-driver.c struct cx18 *dev = container_of(work, struct cx18, request_module_wk); work 705 drivers/media/pci/cx18/cx18-driver.c INIT_WORK(&cx->in_work_order[i].work, cx18_in_work_handler); work 1239 drivers/media/pci/cx18/cx18-driver.c cancel_work_sync(&cx->in_work_order[i].work); work 347 drivers/media/pci/cx18/cx18-driver.h struct work_struct work; work 368 drivers/media/pci/cx18/cx18-mailbox.c void cx18_in_work_handler(struct work_struct *work) work 371 drivers/media/pci/cx18/cx18-mailbox.c container_of(work, struct cx18_in_work_order, work); work 571 drivers/media/pci/cx18/cx18-mailbox.c queue_work(cx->in_work_queue, &order->work); work 79 drivers/media/pci/cx18/cx18-mailbox.h void cx18_in_work_handler(struct work_struct *work); work 717 drivers/media/pci/cx18/cx18-streams.c void cx18_out_work_handler(struct work_struct *work) work 720 drivers/media/pci/cx18/cx18-streams.c container_of(work, struct cx18_stream, out_work_order); work 42 drivers/media/pci/cx18/cx18-streams.h void cx18_out_work_handler(struct work_struct *work); work 102 drivers/media/pci/cx23885/altera-ci.c struct work_struct work; work 389 drivers/media/pci/cx23885/altera-ci.c static void netup_read_ci_status(struct work_struct *work) work 392 drivers/media/pci/cx23885/altera-ci.c container_of(work, struct fpga_internal, work); work 435 drivers/media/pci/cx23885/altera-ci.c schedule_work(&inter->work); work 771 drivers/media/pci/cx23885/altera-ci.c INIT_WORK(&inter->work, netup_read_ci_status); work 796 drivers/media/pci/cx23885/altera-ci.c schedule_work(&inter->work); work 74 drivers/media/pci/cx23885/cimax2.c struct work_struct work; work 335 drivers/media/pci/cx23885/cimax2.c static void netup_read_ci_status(struct work_struct *work) work 338 drivers/media/pci/cx23885/cimax2.c container_of(work, struct netup_ci_state, work); work 384 drivers/media/pci/cx23885/cimax2.c schedule_work(&state->work); work 391 drivers/media/pci/cx23885/cimax2.c schedule_work(&state->work); work 505 drivers/media/pci/cx23885/cimax2.c INIT_WORK(&state->work, netup_read_ci_status); work 506 drivers/media/pci/cx23885/cimax2.c schedule_work(&state->work); work 14 drivers/media/pci/cx23885/cx23885-av.c void cx23885_av_work_handler(struct work_struct *work) work 17 drivers/media/pci/cx23885/cx23885-av.c container_of(work, struct cx23885_dev, cx25840_work); work 12 drivers/media/pci/cx23885/cx23885-av.h void cx23885_av_work_handler(struct work_struct *work); work 24 drivers/media/pci/cx23885/cx23885-ir.c void cx23885_ir_rx_work_handler(struct work_struct *work) work 27 drivers/media/pci/cx23885/cx23885-ir.c container_of(work, struct cx23885_dev, ir_rx_work); work 47 drivers/media/pci/cx23885/cx23885-ir.c void cx23885_ir_tx_work_handler(struct work_struct *work) work 50 drivers/media/pci/cx23885/cx23885-ir.c container_of(work, struct cx23885_dev, ir_tx_work); work 15 drivers/media/pci/cx23885/cx23885-ir.h void cx23885_ir_rx_work_handler(struct work_struct *work); work 16 drivers/media/pci/cx23885/cx23885-ir.h void cx23885_ir_tx_work_handler(struct work_struct *work); work 42 drivers/media/pci/cx88/cx88-mpeg.c static void request_module_async(struct work_struct *work) work 44 drivers/media/pci/cx88/cx88-mpeg.c struct cx8802_dev *dev = container_of(work, struct cx8802_dev, work 2173 drivers/media/pci/ddbridge/ddbridge-core.c static void input_work(struct work_struct *work) work 2175 drivers/media/pci/ddbridge/ddbridge-core.c struct ddb_dma *dma = container_of(work, struct ddb_dma, work); work 2201 drivers/media/pci/ddbridge/ddbridge-core.c queue_work(ddb_wq, &dma->work); work 2204 drivers/media/pci/ddbridge/ddbridge-core.c static void output_work(struct work_struct *work) work 2206 drivers/media/pci/ddbridge/ddbridge-core.c struct ddb_dma *dma = container_of(work, struct ddb_dma, work); work 2228 drivers/media/pci/ddbridge/ddbridge-core.c queue_work(ddb_wq, &dma->work); work 2261 drivers/media/pci/ddbridge/ddbridge-core.c INIT_WORK(&dma->work, output_work); work 2268 drivers/media/pci/ddbridge/ddbridge-core.c INIT_WORK(&dma->work, input_work); work 2463 drivers/media/pci/ddbridge/ddbridge-core.c cancel_work_sync(&port->input[0]->dma->work); work 2465 drivers/media/pci/ddbridge/ddbridge-core.c cancel_work_sync(&port->input[1]->dma->work); work 2467 drivers/media/pci/ddbridge/ddbridge-core.c cancel_work_sync(&port->output->dma->work); work 153 drivers/media/pci/ddbridge/ddbridge.h struct work_struct work; work 314 drivers/media/pci/dm1105/dm1105.c struct work_struct work; work 344 drivers/media/pci/dm1105/dm1105.c struct work_struct work; work 656 drivers/media/pci/dm1105/dm1105.c static void dm1105_emit_key(struct work_struct *work) work 658 drivers/media/pci/dm1105/dm1105.c struct infrared *ir = container_of(work, struct infrared, work); work 672 drivers/media/pci/dm1105/dm1105.c static void dm1105_dmx_buffer(struct work_struct *work) work 674 drivers/media/pci/dm1105/dm1105.c struct dm1105_dev *dev = container_of(work, struct dm1105_dev, work); work 716 drivers/media/pci/dm1105/dm1105.c queue_work(dev->wq, &dev->work); work 720 drivers/media/pci/dm1105/dm1105.c schedule_work(&dev->ir.work); work 754 drivers/media/pci/dm1105/dm1105.c INIT_WORK(&dm1105->ir.work, dm1105_emit_key); work 1127 drivers/media/pci/dm1105/dm1105.c INIT_WORK(&dev->work, dm1105_dmx_buffer); work 287 drivers/media/pci/ivtv/ivtv-driver.c static void request_module_async(struct work_struct *work) work 289 drivers/media/pci/ivtv/ivtv-driver.c struct ivtv *dev = container_of(work, struct ivtv, request_module_wk); work 90 drivers/media/pci/ivtv/ivtv-irq.c void ivtv_irq_work_handler(struct kthread_work *work) work 92 drivers/media/pci/ivtv/ivtv-irq.c struct ivtv *itv = container_of(work, struct ivtv, irq_work); work 37 drivers/media/pci/ivtv/ivtv-irq.h void ivtv_irq_work_handler(struct kthread_work *work); work 27 drivers/media/pci/mantis/mantis_evm.c static void mantis_hifevm_work(struct work_struct *work) work 29 drivers/media/pci/mantis/mantis_evm.c struct mantis_ca *ca = container_of(work, struct mantis_ca, hif_evm_work); work 79 drivers/media/pci/mantis/mantis_uart.c static void mantis_uart_work(struct work_struct *work) work 81 drivers/media/pci/mantis/mantis_uart.c struct mantis_pci *mantis = container_of(work, struct mantis_pci, uart_work); work 66 drivers/media/pci/netup_unidvb/netup_unidvb.h struct work_struct work; work 241 drivers/media/pci/netup_unidvb/netup_unidvb_core.c queue_work(dma->ndev->wq, &dma->work); work 573 drivers/media/pci/netup_unidvb/netup_unidvb_core.c static void netup_unidvb_dma_worker(struct work_struct *work) work 575 drivers/media/pci/netup_unidvb/netup_unidvb_core.c struct netup_dma *dma = container_of(work, struct netup_dma, work); work 656 drivers/media/pci/netup_unidvb/netup_unidvb_core.c INIT_WORK(&dma->work, netup_unidvb_dma_worker); work 694 drivers/media/pci/netup_unidvb/netup_unidvb_core.c cancel_work_sync(&dma->work); work 150 drivers/media/pci/saa7134/saa7134-core.c static void request_module_async(struct work_struct *work){ work 151 drivers/media/pci/saa7134/saa7134-core.c struct saa7134_dev* dev = container_of(work, struct saa7134_dev, request_module_wk); work 205 drivers/media/pci/saa7134/saa7134-empress.c static void empress_signal_update(struct work_struct *work) work 208 drivers/media/pci/saa7134/saa7134-empress.c container_of(work, struct saa7134_dev, empress_workqueue); work 1313 drivers/media/platform/aspeed-video.c static void aspeed_video_resolution_work(struct work_struct *work) work 1315 drivers/media/platform/aspeed-video.c struct delayed_work *dwork = to_delayed_work(work); work 1689 drivers/media/platform/coda/coda-bit.c static void coda_seq_end_work(struct work_struct *work) work 1691 drivers/media/platform/coda/coda-bit.c struct coda_ctx *ctx = container_of(work, struct coda_ctx, seq_end_work); work 1995 drivers/media/platform/coda/coda-bit.c static void coda_dec_seq_init_work(struct work_struct *work) work 1997 drivers/media/platform/coda/coda-bit.c struct coda_ctx *ctx = container_of(work, work 1395 drivers/media/platform/coda/coda-common.c static void coda_pic_run_work(struct work_struct *work) work 1397 drivers/media/platform/coda/coda-common.c struct coda_ctx *ctx = container_of(work, struct coda_ctx, pic_run_work); work 195 drivers/media/platform/coda/coda.h void (*seq_init_work)(struct work_struct *work); work 196 drivers/media/platform/coda/coda.h void (*seq_end_work)(struct work_struct *work); work 73 drivers/media/platform/mtk-mdp/mtk_mdp_core.c static void mtk_mdp_wdt_worker(struct work_struct *work) work 76 drivers/media/platform/mtk-mdp/mtk_mdp_core.c container_of(work, struct mtk_mdp_dev, wdt_work); work 221 drivers/media/platform/mtk-mdp/mtk_mdp_core.h struct work_struct work; work 505 drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c static void mtk_mdp_m2m_worker(struct work_struct *work) work 508 drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c container_of(work, struct mtk_mdp_ctx, work); work 548 drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c queue_work(ctx->mdp_dev->job_wq, &ctx->work); work 1119 drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c INIT_WORK(&ctx->work, mtk_mdp_m2m_worker); work 342 drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c static void mtk_vdec_worker(struct work_struct *work) work 344 drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c struct mtk_vcodec_ctx *ctx = container_of(work, struct mtk_vcodec_ctx, work 30 drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c static void mtk_venc_worker(struct work_struct *work); work 1057 drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c static void mtk_venc_worker(struct work_struct *work) work 1059 drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c struct mtk_vcodec_ctx *ctx = container_of(work, struct mtk_vcodec_ctx, work 383 drivers/media/platform/omap3isp/ispccdc.c static void ccdc_lsc_free_table_work(struct work_struct *work) work 388 drivers/media/platform/omap3isp/ispccdc.c lsc = container_of(work, struct ispccdc_lsc, table_work); work 50 drivers/media/platform/qcom/venus/core.c schedule_delayed_work(&core->work, msecs_to_jiffies(100)); work 57 drivers/media/platform/qcom/venus/core.c static void venus_sys_error_handler(struct work_struct *work) work 60 drivers/media/platform/qcom/venus/core.c container_of(work, struct venus_core, work.work); work 93 drivers/media/platform/qcom/venus/core.c schedule_delayed_work(&core->work, msecs_to_jiffies(10)); work 268 drivers/media/platform/qcom/venus/core.c INIT_DELAYED_WORK(&core->work, venus_sys_error_handler); work 149 drivers/media/platform/qcom/venus/core.h struct delayed_work work; work 956 drivers/media/platform/qcom/venus/helpers.c static void delayed_process_buf_func(struct work_struct *work) work 962 drivers/media/platform/qcom/venus/helpers.c inst = container_of(work, struct venus_inst, delayed_process_work); work 164 drivers/media/platform/s5p-mfc/s5p_mfc.c static void s5p_mfc_watchdog_worker(struct work_struct *work) work 172 drivers/media/platform/s5p-mfc/s5p_mfc.c dev = container_of(work, struct s5p_mfc_dev, watchdog_work); work 922 drivers/media/platform/sti/delta/delta-v4l2.c static void delta_run_work(struct work_struct *work) work 924 drivers/media/platform/sti/delta/delta-v4l2.c struct delta_ctx *ctx = container_of(work, struct delta_ctx, run_work); work 805 drivers/media/platform/sti/hva/hva-v4l2.c static void hva_run_work(struct work_struct *work) work 807 drivers/media/platform/sti/hva/hva-v4l2.c struct hva_ctx *ctx = container_of(work, struct hva_ctx, run_work); work 620 drivers/media/platform/vicodec/codec-fwht.c s16 *work = tmp; work 631 drivers/media/platform/vicodec/codec-fwht.c *deltablock = *work - *reference; work 633 drivers/media/platform/vicodec/codec-fwht.c work++; work 627 drivers/media/platform/vim2m.c curr_ctx = container_of(w, struct vim2m_ctx, work_run.work); work 31 drivers/media/platform/vivid/vivid-cec.c cancel_delayed_work_sync(&cw->work); work 123 drivers/media/platform/vivid/vivid-cec.c static void vivid_cec_xfer_done_worker(struct work_struct *work) work 126 drivers/media/platform/vivid/vivid-cec.c container_of(work, struct vivid_cec_work, work.work); work 155 drivers/media/platform/vivid/vivid-cec.c static void vivid_cec_xfer_try_worker(struct work_struct *work) work 158 drivers/media/platform/vivid/vivid-cec.c container_of(work, struct vivid_cec_work, work.work); work 168 drivers/media/platform/vivid/vivid-cec.c INIT_DELAYED_WORK(&cw->work, vivid_cec_xfer_done_worker); work 172 drivers/media/platform/vivid/vivid-cec.c schedule_delayed_work(&cw->work, dev->cec_xfer_time_jiffies); work 211 drivers/media/platform/vivid/vivid-cec.c INIT_DELAYED_WORK(&cw->work, vivid_cec_xfer_done_worker); work 216 drivers/media/platform/vivid/vivid-cec.c INIT_DELAYED_WORK(&cw->work, vivid_cec_xfer_try_worker); work 221 drivers/media/platform/vivid/vivid-cec.c schedule_delayed_work(&cw->work, delta_jiffies < 0 ? 0 : delta_jiffies); work 115 drivers/media/platform/vivid/vivid-core.h struct delayed_work work; work 155 drivers/media/radio/radio-shark.c static void shark_led_work(struct work_struct *work) work 158 drivers/media/radio/radio-shark.c container_of(work, struct shark_device, led_work); work 142 drivers/media/radio/radio-shark2.c static void shark_led_work(struct work_struct *work) work 145 drivers/media/radio/radio-shark2.c container_of(work, struct shark_device, led_work); work 178 drivers/media/radio/si4713/si4713.c complete(&sdev->work); work 221 drivers/media/radio/si4713/si4713.c if (!wait_for_completion_timeout(&sdev->work, work 502 drivers/media/radio/si4713/si4713.c !wait_for_completion_timeout(&sdev->work, usecs_to_jiffies(usecs) + 1)) work 1477 drivers/media/radio/si4713/si4713.c init_completion(&sdev->work); work 238 drivers/media/radio/si4713/si4713.h struct completion work; work 1517 drivers/media/rc/mceusb.c static void mceusb_deferred_kevent(struct work_struct *work) work 1520 drivers/media/rc/mceusb.c container_of(work, struct mceusb_dev, kevent); work 396 drivers/media/tuners/si2157.c static void si2157_stat_work(struct work_struct *work) work 398 drivers/media/tuners/si2157.c struct si2157_dev *dev = container_of(work, struct si2157_dev, stat_work.work); work 1219 drivers/media/tuners/xc5000.c timer_sleep.work); work 96 drivers/media/usb/au0828/au0828-dvb.c static void au0828_restart_dvb_streaming(struct work_struct *work); work 335 drivers/media/usb/au0828/au0828-dvb.c static void au0828_restart_dvb_streaming(struct work_struct *work) work 337 drivers/media/usb/au0828/au0828-dvb.c struct au0828_dev *dev = container_of(work, struct au0828_dev, work 31 drivers/media/usb/au0828/au0828-input.c struct delayed_work work; work 216 drivers/media/usb/au0828/au0828-input.c static void au0828_rc_work(struct work_struct *work) work 218 drivers/media/usb/au0828/au0828-input.c struct au0828_rc *ir = container_of(work, struct au0828_rc, work.work); work 225 drivers/media/usb/au0828/au0828-input.c schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling)); work 232 drivers/media/usb/au0828/au0828-input.c INIT_DELAYED_WORK(&ir->work, au0828_rc_work); work 237 drivers/media/usb/au0828/au0828-input.c schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling)); work 246 drivers/media/usb/au0828/au0828-input.c cancel_delayed_work_sync(&ir->work); work 375 drivers/media/usb/au0828/au0828-input.c cancel_delayed_work_sync(&ir->work); work 395 drivers/media/usb/au0828/au0828-input.c schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling)); work 556 drivers/media/usb/cx231xx/cx231xx-audio.c static void audio_trigger(struct work_struct *work) work 558 drivers/media/usb/cx231xx/cx231xx-audio.c struct cx231xx *dev = container_of(work, struct cx231xx, wq_trigger); work 1521 drivers/media/usb/cx231xx/cx231xx-cards.c static void request_module_async(struct work_struct *work) work 1523 drivers/media/usb/cx231xx/cx231xx-cards.c struct cx231xx *dev = container_of(work, work 92 drivers/media/usb/dvb-usb-v2/dvb_usb_core.c static void dvb_usb_read_remote_control(struct work_struct *work) work 94 drivers/media/usb/dvb-usb-v2/dvb_usb_core.c struct dvb_usb_device *d = container_of(work, work 95 drivers/media/usb/dvb-usb-v2/dvb_usb_core.c struct dvb_usb_device, rc_query_work.work); work 598 drivers/media/usb/dvb-usb/cxusb-analog.c static void cxusb_medion_v_complete_work(struct work_struct *work) work 600 drivers/media/usb/dvb-usb/cxusb-analog.c struct cxusb_medion_dev *cxdev = container_of(work, work 109 drivers/media/usb/dvb-usb/dvb-usb-remote.c static void legacy_dvb_usb_read_remote_control(struct work_struct *work) work 112 drivers/media/usb/dvb-usb/dvb-usb-remote.c container_of(work, struct dvb_usb_device, rc_query_work.work); work 246 drivers/media/usb/dvb-usb/dvb-usb-remote.c static void dvb_usb_read_remote_control(struct work_struct *work) work 249 drivers/media/usb/dvb-usb/dvb-usb-remote.c container_of(work, struct dvb_usb_device, rc_query_work.work); work 297 drivers/media/usb/dvb-usb/technisat-usb2.c static void technisat_usb2_green_led_control(struct work_struct *work) work 300 drivers/media/usb/dvb-usb/technisat-usb2.c container_of(work, struct technisat_usb2_state, green_led_work.work); work 415 drivers/media/usb/em28xx/em28xx-audio.c static void audio_trigger(struct work_struct *work) work 418 drivers/media/usb/em28xx/em28xx-audio.c container_of(work, struct em28xx_audio, wq_trigger); work 3277 drivers/media/usb/em28xx/em28xx-cards.c static void request_module_async(struct work_struct *work) work 3279 drivers/media/usb/em28xx/em28xx-cards.c struct em28xx *dev = container_of(work, work 66 drivers/media/usb/em28xx/em28xx-input.c struct delayed_work work; work 366 drivers/media/usb/em28xx/em28xx-input.c static void em28xx_ir_work(struct work_struct *work) work 368 drivers/media/usb/em28xx/em28xx-input.c struct em28xx_IR *ir = container_of(work, struct em28xx_IR, work.work); work 374 drivers/media/usb/em28xx/em28xx-input.c schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling)); work 381 drivers/media/usb/em28xx/em28xx-input.c INIT_DELAYED_WORK(&ir->work, em28xx_ir_work); work 382 drivers/media/usb/em28xx/em28xx-input.c schedule_delayed_work(&ir->work, 0); work 391 drivers/media/usb/em28xx/em28xx-input.c cancel_delayed_work_sync(&ir->work); work 507 drivers/media/usb/em28xx/em28xx-input.c static void em28xx_query_buttons(struct work_struct *work) work 510 drivers/media/usb/em28xx/em28xx-input.c container_of(work, struct em28xx, buttons_query_work.work); work 885 drivers/media/usb/em28xx/em28xx-input.c cancel_delayed_work_sync(&ir->work); work 908 drivers/media/usb/em28xx/em28xx-input.c schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling)); work 74 drivers/media/usb/gspca/finepix.c static void dostream(struct work_struct *work) work 76 drivers/media/usb/gspca/finepix.c struct usb_fpix *dev = container_of(work, struct usb_fpix, work_struct); work 302 drivers/media/usb/gspca/jl2005bcd.c static void jl2005c_dostream(struct work_struct *work) work 304 drivers/media/usb/gspca/jl2005bcd.c struct sd *dev = container_of(work, struct sd, work_struct); work 80 drivers/media/usb/gspca/sn9c20x.c struct work_struct work; work 105 drivers/media/usb/gspca/sn9c20x.c static void qual_upd(struct work_struct *work); work 1651 drivers/media/usb/gspca/sn9c20x.c INIT_WORK(&sd->work, qual_upd); work 2068 drivers/media/usb/gspca/sn9c20x.c flush_work(&sd->work); work 2151 drivers/media/usb/gspca/sn9c20x.c static void qual_upd(struct work_struct *work) work 2153 drivers/media/usb/gspca/sn9c20x.c struct sd *sd = container_of(work, struct sd, work); work 2222 drivers/media/usb/gspca/sn9c20x.c schedule_work(&sd->work); work 43 drivers/media/usb/gspca/sonixj.c struct work_struct work; work 91 drivers/media/usb/gspca/sonixj.c static void qual_upd(struct work_struct *work); work 1538 drivers/media/usb/gspca/sonixj.c INIT_WORK(&sd->work, qual_upd); work 2156 drivers/media/usb/gspca/sonixj.c static void qual_upd(struct work_struct *work) work 2158 drivers/media/usb/gspca/sonixj.c struct sd *sd = container_of(work, struct sd, work); work 2565 drivers/media/usb/gspca/sonixj.c flush_work(&sd->work); work 2777 drivers/media/usb/gspca/sonixj.c schedule_work(&sd->work); work 198 drivers/media/usb/gspca/sq905.c static void sq905_dostream(struct work_struct *work) work 200 drivers/media/usb/gspca/sq905.c struct sd *dev = container_of(work, struct sd, work_struct); work 121 drivers/media/usb/gspca/sq905c.c static void sq905c_dostream(struct work_struct *work) work 123 drivers/media/usb/gspca/sq905c.c struct sd *dev = container_of(work, struct sd, work_struct); work 167 drivers/media/usb/gspca/vicam.c static void vicam_dostream(struct work_struct *work) work 169 drivers/media/usb/gspca/vicam.c struct sd *sd = container_of(work, struct sd, work_struct); work 41 drivers/media/usb/gspca/zc3xx.c struct work_struct work; work 5938 drivers/media/usb/gspca/zc3xx.c static void transfer_update(struct work_struct *work) work 5940 drivers/media/usb/gspca/zc3xx.c struct sd *sd = container_of(work, struct sd, work); work 6333 drivers/media/usb/gspca/zc3xx.c INIT_WORK(&sd->work, transfer_update); work 6841 drivers/media/usb/gspca/zc3xx.c schedule_work(&sd->work); work 6853 drivers/media/usb/gspca/zc3xx.c flush_work(&sd->work); work 244 drivers/media/usb/hdpvr/hdpvr-video.c static void hdpvr_transmit_buffers(struct work_struct *work) work 246 drivers/media/usb/hdpvr/hdpvr-video.c struct hdpvr_device *dev = container_of(work, struct hdpvr_device, work 118 drivers/media/usb/pulse8-cec/pulse8-cec.c struct work_struct work; work 135 drivers/media/usb/pulse8-cec/pulse8-cec.c static void pulse8_ping_eeprom_work_handler(struct work_struct *work); work 137 drivers/media/usb/pulse8-cec/pulse8-cec.c static void pulse8_irq_work_handler(struct work_struct *work) work 140 drivers/media/usb/pulse8-cec/pulse8-cec.c container_of(work, struct pulse8, work); work 195 drivers/media/usb/pulse8-cec/pulse8-cec.c schedule_work(&pulse8->work); work 206 drivers/media/usb/pulse8-cec/pulse8-cec.c schedule_work(&pulse8->work); work 670 drivers/media/usb/pulse8-cec/pulse8-cec.c INIT_WORK(&pulse8->work, pulse8_irq_work_handler); work 712 drivers/media/usb/pulse8-cec/pulse8-cec.c static void pulse8_ping_eeprom_work_handler(struct work_struct *work) work 715 drivers/media/usb/pulse8-cec/pulse8-cec.c container_of(work, struct pulse8, ping_eeprom_work.work); work 318 drivers/media/usb/pvrusb2/pvrusb2-hdw.c static void pvr2_hdw_worker_poll(struct work_struct *work); work 3190 drivers/media/usb/pvrusb2/pvrusb2-hdw.c static void pvr2_hdw_worker_poll(struct work_struct *work) work 3193 drivers/media/usb/pvrusb2/pvrusb2-hdw.c struct pvr2_hdw *hdw = container_of(work,struct pvr2_hdw,workpoll); work 45 drivers/media/usb/rainshadow-cec/rainshadow-cec.c struct work_struct work; work 107 drivers/media/usb/rainshadow-cec/rainshadow-cec.c static void rain_irq_work_handler(struct work_struct *work) work 110 drivers/media/usb/rainshadow-cec/rainshadow-cec.c container_of(work, struct rain, work); work 183 drivers/media/usb/rainshadow-cec/rainshadow-cec.c schedule_work(&rain->work); work 191 drivers/media/usb/rainshadow-cec/rainshadow-cec.c cancel_work_sync(&rain->work); work 328 drivers/media/usb/rainshadow-cec/rainshadow-cec.c INIT_WORK(&rain->work, rain_irq_work_handler); work 70 drivers/media/usb/siano/smsusb.c static void do_submit_urb(struct work_struct *work) work 72 drivers/media/usb/siano/smsusb.c struct smsusb_urb_t *surb = container_of(work, struct smsusb_urb_t, wq); work 322 drivers/media/usb/tm6000/tm6000-alsa.c static void audio_trigger(struct work_struct *work) work 324 drivers/media/usb/tm6000/tm6000-alsa.c struct tm6000_core *core = container_of(work, struct tm6000_core, work 1043 drivers/media/usb/tm6000/tm6000-cards.c static void request_module_async(struct work_struct *work) work 1045 drivers/media/usb/tm6000/tm6000-cards.c struct tm6000_core *dev = container_of(work, struct tm6000_core, work 54 drivers/media/usb/tm6000/tm6000-input.c struct delayed_work work; work 195 drivers/media/usb/tm6000/tm6000-input.c schedule_delayed_work(&ir->work, msecs_to_jiffies(URB_SUBMIT_DELAY)); work 213 drivers/media/usb/tm6000/tm6000-input.c schedule_delayed_work(&ir->work, msecs_to_jiffies(10)); work 216 drivers/media/usb/tm6000/tm6000-input.c static void tm6000_ir_handle_key(struct work_struct *work) work 218 drivers/media/usb/tm6000/tm6000-input.c struct tm6000_IR *ir = container_of(work, struct tm6000_IR, work.work); work 248 drivers/media/usb/tm6000/tm6000-input.c schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling)); work 251 drivers/media/usb/tm6000/tm6000-input.c static void tm6000_ir_int_work(struct work_struct *work) work 253 drivers/media/usb/tm6000/tm6000-input.c struct tm6000_IR *ir = container_of(work, struct tm6000_IR, work.work); work 269 drivers/media/usb/tm6000/tm6000-input.c schedule_delayed_work(&ir->work, msecs_to_jiffies(URB_SUBMIT_DELAY)); work 279 drivers/media/usb/tm6000/tm6000-input.c schedule_delayed_work(&ir->work, msecs_to_jiffies(URB_INT_LED_DELAY)); work 292 drivers/media/usb/tm6000/tm6000-input.c schedule_delayed_work(&ir->work, 0); work 303 drivers/media/usb/tm6000/tm6000-input.c cancel_delayed_work_sync(&ir->work); work 359 drivers/media/usb/tm6000/tm6000-input.c schedule_delayed_work(&ir->work, msecs_to_jiffies(URB_SUBMIT_DELAY)); work 436 drivers/media/usb/tm6000/tm6000-input.c INIT_DELAYED_WORK(&ir->work, tm6000_ir_int_work); work 441 drivers/media/usb/tm6000/tm6000-input.c INIT_DELAYED_WORK(&ir->work, tm6000_ir_handle_key); work 292 drivers/media/usb/usbtv/usbtv-audio.c static void snd_usbtv_trigger(struct work_struct *work) work 294 drivers/media/usb/usbtv/usbtv-audio.c struct usbtv *chip = container_of(work, struct usbtv, snd_trigger); work 1274 drivers/media/usb/uvc/uvc_ctrl.c static void uvc_ctrl_status_event_work(struct work_struct *work) work 1276 drivers/media/usb/uvc/uvc_ctrl.c struct uvc_device *dev = container_of(work, struct uvc_device, work 1277 drivers/media/usb/uvc/uvc_ctrl.c async_ctrl.work); work 1336 drivers/media/usb/uvc/uvc_ctrl.c schedule_work(&w->work); work 2278 drivers/media/usb/uvc/uvc_ctrl.c INIT_WORK(&dev->async_ctrl.work, uvc_ctrl_status_event_work); work 2349 drivers/media/usb/uvc/uvc_ctrl.c if (dev->async_ctrl.work.func) work 2350 drivers/media/usb/uvc/uvc_ctrl.c cancel_work_sync(&dev->async_ctrl.work); work 1106 drivers/media/usb/uvc/uvc_video.c static void uvc_video_copy_data_work(struct work_struct *work) work 1108 drivers/media/usb/uvc/uvc_video.c struct uvc_urb *uvc_urb = container_of(work, struct uvc_urb, work); work 1554 drivers/media/usb/uvc/uvc_video.c queue_work(stream->async_wq, &uvc_urb->work); work 2075 drivers/media/usb/uvc/uvc_video.c INIT_WORK(&uvc_urb->work, uvc_video_copy_data_work); work 532 drivers/media/usb/uvc/uvcvideo.h struct work_struct work; work 671 drivers/media/usb/uvc/uvcvideo.h struct work_struct work; work 370 drivers/media/v4l2-core/v4l2-mem2mem.c static void v4l2_m2m_device_run_work(struct work_struct *work) work 373 drivers/media/v4l2-core/v4l2-mem2mem.c container_of(work, struct v4l2_m2m_dev, job_work); work 429 drivers/memstick/core/memstick.c static void memstick_check(struct work_struct *work) work 431 drivers/memstick/core/memstick.c struct memstick_host *host = container_of(work, struct memstick_host, work 1867 drivers/memstick/core/ms_block.c static void msb_io_work(struct work_struct *work) work 1869 drivers/memstick/core/ms_block.c struct msb_data *msb = container_of(work, struct msb_data, io_work); work 389 drivers/memstick/host/rtsx_pci_ms.c static void rtsx_pci_ms_handle_req(struct work_struct *work) work 391 drivers/memstick/host/rtsx_pci_ms.c struct realtek_pci_ms *host = container_of(work, work 506 drivers/memstick/host/rtsx_usb_ms.c static void rtsx_usb_ms_handle_req(struct work_struct *work) work 508 drivers/memstick/host/rtsx_usb_ms.c struct rtsx_usb_ms *host = container_of(work, work 716 drivers/memstick/host/rtsx_usb_ms.c static void rtsx_usb_ms_poll_card(struct work_struct *work) work 718 drivers/memstick/host/rtsx_usb_ms.c struct rtsx_usb_ms *host = container_of(work, struct rtsx_usb_ms, work 719 drivers/memstick/host/rtsx_usb_ms.c poll_card.work); work 357 drivers/message/fusion/mptbase.c mpt_fault_reset_work(struct work_struct *work) work 360 drivers/message/fusion/mptbase.c container_of(work, MPT_ADAPTER, fault_reset_work.work); work 1068 drivers/message/fusion/mptfc.c mptfc_link_status_change(struct work_struct *work) work 1071 drivers/message/fusion/mptfc.c container_of(work, MPT_ADAPTER, fc_rescan_work); work 1080 drivers/message/fusion/mptfc.c mptfc_setup_reset(struct work_struct *work) work 1083 drivers/message/fusion/mptfc.c container_of(work, MPT_ADAPTER, fc_setup_reset_work); work 1114 drivers/message/fusion/mptfc.c mptfc_rescan_devices(struct work_struct *work) work 1117 drivers/message/fusion/mptfc.c container_of(work, MPT_ADAPTER, fc_rescan_work); work 1288 drivers/message/fusion/mptlan.c mpt_lan_post_receive_buckets_work(struct work_struct *work) work 1290 drivers/message/fusion/mptlan.c mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv, work 1291 drivers/message/fusion/mptlan.c post_buckets_task.work)); work 106 drivers/message/fusion/mptsas.c static void mptsas_firmware_event_work(struct work_struct *work); work 292 drivers/message/fusion/mptsas.c INIT_DELAYED_WORK(&fw_event->work, mptsas_firmware_event_work); work 297 drivers/message/fusion/mptsas.c &fw_event->work, delay); work 313 drivers/message/fusion/mptsas.c &fw_event->work, msecs_to_jiffies(delay)); work 358 drivers/message/fusion/mptsas.c if (cancel_delayed_work(&fw_event->work)) work 1617 drivers/message/fusion/mptsas.c mptsas_firmware_event_work(struct work_struct *work) work 1620 drivers/message/fusion/mptsas.c container_of(work, struct fw_event_work, work.work); work 109 drivers/message/fusion/mptsas.h struct delayed_work work; work 117 drivers/message/fusion/mptsas.h struct work_struct work; work 1105 drivers/message/fusion/mptspi.c struct work_struct work; work 1110 drivers/message/fusion/mptspi.c static void mpt_work_wrapper(struct work_struct *work) work 1113 drivers/message/fusion/mptspi.c container_of(work, struct work_queue_wrapper, work); work 1162 drivers/message/fusion/mptspi.c INIT_WORK(&wqw->work, mpt_work_wrapper); work 1166 drivers/message/fusion/mptspi.c schedule_work(&wqw->work); work 1256 drivers/message/fusion/mptspi.c mptspi_dv_renegotiate_work(struct work_struct *work) work 1259 drivers/message/fusion/mptspi.c container_of(work, struct work_queue_wrapper, work); work 1294 drivers/message/fusion/mptspi.c INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work); work 1297 drivers/message/fusion/mptspi.c schedule_work(&wqw->work); work 394 drivers/mfd/da903x.c static void da903x_irq_work(struct work_struct *work) work 397 drivers/mfd/da903x.c container_of(work, struct da903x_chip, irq_work); work 376 drivers/mfd/db8500-prcmu.c struct completion work; work 397 drivers/mfd/db8500-prcmu.c struct completion work; work 424 drivers/mfd/db8500-prcmu.c struct completion work; work 435 drivers/mfd/db8500-prcmu.c struct completion work; work 910 drivers/mfd/db8500-prcmu.c wait_for_completion(&mb1_transfer.work); work 1018 drivers/mfd/db8500-prcmu.c wait_for_completion(&mb1_transfer.work); work 1080 drivers/mfd/db8500-prcmu.c wait_for_completion(&mb1_transfer.work); work 1110 drivers/mfd/db8500-prcmu.c wait_for_completion(&mb1_transfer.work); work 1141 drivers/mfd/db8500-prcmu.c wait_for_completion(&mb1_transfer.work); work 1203 drivers/mfd/db8500-prcmu.c if (!wait_for_completion_timeout(&mb2_transfer.work, work 2013 drivers/mfd/db8500-prcmu.c wait_for_completion(&mb4_transfer.work); work 2031 drivers/mfd/db8500-prcmu.c wait_for_completion(&mb4_transfer.work); work 2052 drivers/mfd/db8500-prcmu.c wait_for_completion(&mb4_transfer.work); work 2071 drivers/mfd/db8500-prcmu.c wait_for_completion(&mb4_transfer.work); work 2109 drivers/mfd/db8500-prcmu.c wait_for_completion(&mb4_transfer.work); work 2192 drivers/mfd/db8500-prcmu.c if (!wait_for_completion_timeout(&mb5_transfer.work, work 2242 drivers/mfd/db8500-prcmu.c if (!wait_for_completion_timeout(&mb5_transfer.work, work 2382 drivers/mfd/db8500-prcmu.c wait_for_completion(&mb1_transfer.work); work 2461 drivers/mfd/db8500-prcmu.c complete(&mb1_transfer.work); work 2469 drivers/mfd/db8500-prcmu.c complete(&mb2_transfer.work); work 2505 drivers/mfd/db8500-prcmu.c complete(&mb4_transfer.work); work 2515 drivers/mfd/db8500-prcmu.c complete(&mb5_transfer.work); work 2569 drivers/mfd/db8500-prcmu.c static void prcmu_mask_work(struct work_struct *work) work 2752 drivers/mfd/db8500-prcmu.c init_completion(&mb1_transfer.work); work 2755 drivers/mfd/db8500-prcmu.c init_completion(&mb2_transfer.work); work 2761 drivers/mfd/db8500-prcmu.c init_completion(&mb4_transfer.work); work 2763 drivers/mfd/db8500-prcmu.c init_completion(&mb5_transfer.work); work 169 drivers/mfd/ezx-pcap.c static void pcap_msr_work(struct work_struct *work) work 171 drivers/mfd/ezx-pcap.c struct pcap_chip *pcap = container_of(work, struct pcap_chip, msr_work); work 176 drivers/mfd/ezx-pcap.c static void pcap_isr_work(struct work_struct *work) work 178 drivers/mfd/ezx-pcap.c struct pcap_chip *pcap = container_of(work, struct pcap_chip, isr_work); work 233 drivers/mfd/htc-i2cpld.c static void htcpld_chip_set_ni(struct work_struct *work) work 238 drivers/mfd/htc-i2cpld.c chip_data = container_of(work, struct htcpld_chip, set_val_work); work 156 drivers/mfd/menelaus.c struct work_struct work; work 763 drivers/mfd/menelaus.c container_of(_menelaus, struct menelaus_chip, work); work 801 drivers/mfd/menelaus.c (void)schedule_work(&menelaus->work); work 1198 drivers/mfd/menelaus.c INIT_WORK(&menelaus->work, menelaus_work); work 1221 drivers/mfd/menelaus.c flush_work(&menelaus->work); work 1230 drivers/mfd/menelaus.c flush_work(&menelaus->work); work 359 drivers/mfd/si476x-i2c.c static void si476x_core_drain_rds_fifo(struct work_struct *work) work 363 drivers/mfd/si476x-i2c.c struct si476x_core *core = container_of(work, struct si476x_core, work 515 drivers/mfd/si476x-i2c.c static void si476x_core_poll_loop(struct work_struct *work) work 517 drivers/mfd/si476x-i2c.c struct si476x_core *core = SI476X_WORK_TO_CORE(work); work 63 drivers/mfd/tps65010.c struct delayed_work work; work 232 drivers/mfd/tps65010.c queue_delayed_work(system_power_efficient_wq, &tps->work, work 390 drivers/mfd/tps65010.c queue_delayed_work(system_power_efficient_wq, &tps->work, work 397 drivers/mfd/tps65010.c static void tps65010_work(struct work_struct *work) work 401 drivers/mfd/tps65010.c tps = container_of(to_delayed_work(work), struct tps65010, work); work 439 drivers/mfd/tps65010.c queue_delayed_work(system_power_efficient_wq, &tps->work, 0); work 518 drivers/mfd/tps65010.c cancel_delayed_work_sync(&tps->work); work 544 drivers/mfd/tps65010.c INIT_DELAYED_WORK(&tps->work, tps65010_work); work 618 drivers/mfd/tps65010.c tps65010_work(&tps->work.work); work 705 drivers/mfd/tps65010.c queue_delayed_work(system_power_efficient_wq, &the_tps->work, work 557 drivers/misc/bh1770glc.c static void bh1770_prox_work(struct work_struct *work) work 560 drivers/misc/bh1770glc.c container_of(work, struct bh1770_chip, prox_work.work); work 914 drivers/misc/cardreader/rtsx_pcr.c static void rtsx_pci_card_detect(struct work_struct *work) work 922 drivers/misc/cardreader/rtsx_pcr.c dwork = to_delayed_work(work); work 1110 drivers/misc/cardreader/rtsx_pcr.c static void rtsx_pci_idle_work(struct work_struct *work) work 1112 drivers/misc/cardreader/rtsx_pcr.c struct delayed_work *dwork = to_delayed_work(work); work 460 drivers/misc/cxl/api.c struct cxl_ioctl_start_work *work) work 465 drivers/misc/cxl/api.c if (!(work->flags & CXL_START_WORK_NUM_IRQS)) work 466 drivers/misc/cxl/api.c work->num_interrupts = ctx->afu->pp_irqs; work 467 drivers/misc/cxl/api.c else if ((work->num_interrupts < ctx->afu->pp_irqs) || work 468 drivers/misc/cxl/api.c (work->num_interrupts > ctx->afu->irqs_max)) { work 472 drivers/misc/cxl/api.c rc = afu_register_irqs(ctx, work->num_interrupts); work 476 drivers/misc/cxl/api.c rc = cxl_start_context(ctx, work->work_element_descriptor, current); work 968 drivers/misc/cxl/cxl.h void cxl_handle_fault(struct work_struct *work); work 150 drivers/misc/cxl/file.c struct cxl_ioctl_start_work work; work 158 drivers/misc/cxl/file.c if (copy_from_user(&work, uwork, sizeof(work))) work 171 drivers/misc/cxl/file.c if (work.reserved1 || work.reserved2 || work.reserved3 || work 172 drivers/misc/cxl/file.c work.reserved4 || work.reserved5 || work 173 drivers/misc/cxl/file.c (work.flags & ~CXL_START_WORK_ALL)) { work 178 drivers/misc/cxl/file.c if (!(work.flags & CXL_START_WORK_NUM_IRQS)) work 179 drivers/misc/cxl/file.c work.num_interrupts = ctx->afu->pp_irqs; work 180 drivers/misc/cxl/file.c else if ((work.num_interrupts < ctx->afu->pp_irqs) || work 181 drivers/misc/cxl/file.c (work.num_interrupts > ctx->afu->irqs_max)) { work 186 drivers/misc/cxl/file.c if ((rc = afu_register_irqs(ctx, work.num_interrupts))) work 189 drivers/misc/cxl/file.c if (work.flags & CXL_START_WORK_AMR) work 190 drivers/misc/cxl/file.c amr = work.amr & mfspr(SPRN_UAMOR); work 192 drivers/misc/cxl/file.c if (work.flags & CXL_START_WORK_TID) work 195 drivers/misc/cxl/file.c ctx->mmio_err_ff = !!(work.flags & CXL_START_WORK_ERR_FF); work 251 drivers/misc/cxl/file.c trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr); work 253 drivers/misc/cxl/file.c if ((rc = cxl_ops->attach_process(ctx, false, work.work_element_descriptor, work 267 drivers/misc/cxl/file.c if (work.flags & CXL_START_WORK_TID) { work 268 drivers/misc/cxl/file.c work.tid = ctx->tidr; work 269 drivers/misc/cxl/file.c if (copy_to_user(uwork, &work, sizeof(work))) work 876 drivers/misc/cxl/guest.c static void afu_handle_errstate(struct work_struct *work) work 879 drivers/misc/cxl/guest.c container_of(to_delayed_work(work), struct cxl_afu_guest, work_err); work 160 drivers/misc/fastrpc.c struct completion work; work 352 drivers/misc/fastrpc.c static void fastrpc_context_put_wq(struct work_struct *work) work 355 drivers/misc/fastrpc.c container_of(work, struct fastrpc_invoke_ctx, put_work); work 454 drivers/misc/fastrpc.c init_completion(&ctx->work); work 938 drivers/misc/fastrpc.c err = wait_for_completion_interruptible(&ctx->work); work 1459 drivers/misc/fastrpc.c complete(&ctx->work); work 1505 drivers/misc/fastrpc.c complete(&ctx->work); work 14 drivers/misc/habanalabs/command_submission.c static void job_wq_completion(struct work_struct *work); work 253 drivers/misc/habanalabs/command_submission.c static void cs_timedout(struct work_struct *work) work 257 drivers/misc/habanalabs/command_submission.c struct hl_cs *cs = container_of(work, struct hl_cs, work 258 drivers/misc/habanalabs/command_submission.c work_tdr.work); work 379 drivers/misc/habanalabs/command_submission.c static void job_wq_completion(struct work_struct *work) work 381 drivers/misc/habanalabs/command_submission.c struct hl_cs_job *job = container_of(work, struct hl_cs_job, work 360 drivers/misc/habanalabs/device.c static void set_freq_to_low_job(struct work_struct *work) work 362 drivers/misc/habanalabs/device.c struct hl_device *hdev = container_of(work, struct hl_device, work 363 drivers/misc/habanalabs/device.c work_freq.work); work 376 drivers/misc/habanalabs/device.c static void hl_device_heartbeat(struct work_struct *work) work 378 drivers/misc/habanalabs/device.c struct hl_device *hdev = container_of(work, struct hl_device, work 379 drivers/misc/habanalabs/device.c work_heartbeat.work); work 781 drivers/misc/habanalabs/device.c static void device_hard_reset_pending(struct work_struct *work) work 784 drivers/misc/habanalabs/device.c container_of(work, struct hl_device_reset_work, reset_work); work 57 drivers/misc/habanalabs/irq.c static void irq_handle_eqe(struct work_struct *work) work 59 drivers/misc/habanalabs/irq.c struct hl_eqe_work *eqe_work = container_of(work, struct hl_eqe_work, work 266 drivers/misc/mei/bus.c static void mei_cl_bus_rx_work(struct work_struct *work) work 271 drivers/misc/mei/bus.c cldev = container_of(work, struct mei_cl_device, rx_work); work 288 drivers/misc/mei/bus.c static void mei_cl_bus_notif_work(struct work_struct *work) work 292 drivers/misc/mei/bus.c cldev = container_of(work, struct mei_cl_device, notif_work); work 1100 drivers/misc/mei/bus.c void mei_cl_bus_rescan_work(struct work_struct *work) work 1103 drivers/misc/mei/bus.c container_of(work, struct mei_device, bus_rescan_work); work 274 drivers/misc/mei/init.c static void mei_reset_work(struct work_struct *work) work 277 drivers/misc/mei/init.c container_of(work, struct mei_device, reset_work); work 494 drivers/misc/mei/interrupt.c void mei_timer(struct work_struct *work) work 497 drivers/misc/mei/interrupt.c struct mei_device *dev = container_of(work, work 498 drivers/misc/mei/interrupt.c struct mei_device, timer_work.work); work 317 drivers/misc/mei/mei_dev.h void mei_cl_bus_rescan_work(struct work_struct *work); work 604 drivers/misc/mei/mei_dev.h void mei_timer(struct work_struct *work); work 169 drivers/misc/mic/cosm/cosm_main.c static void cosm_reset_trigger_work(struct work_struct *work) work 171 drivers/misc/mic/cosm/cosm_main.c struct cosm_device *cdev = container_of(work, struct cosm_device, work 59 drivers/misc/mic/cosm/cosm_main.h void cosm_scif_work(struct work_struct *work); work 234 drivers/misc/mic/cosm/cosm_scif_server.c void cosm_scif_work(struct work_struct *work) work 236 drivers/misc/mic/cosm/cosm_scif_server.c struct cosm_device *cdev = container_of(work, struct cosm_device, work 570 drivers/misc/mic/scif/scif_api.c void scif_conn_handler(struct work_struct *work) work 279 drivers/misc/mic/scif/scif_dma.c void scif_mmu_notif_handler(struct work_struct *work) work 315 drivers/misc/mic/scif/scif_dma.c void scif_mmu_notif_handler(struct work_struct *work) work 679 drivers/misc/mic/scif/scif_dma.c iounmap_remote(void *virt, size_t size, struct scif_copy_work *work) work 681 drivers/misc/mic/scif/scif_dma.c scif_iounmap(virt, size, work->remote_dev); work 867 drivers/misc/mic/scif/scif_dma.c scif_rma_list_dma_copy_unaligned(struct scif_copy_work *work, work 871 drivers/misc/mic/scif/scif_dma.c struct scif_dma_comp_cb *comp_cb = work->comp_cb; work 874 drivers/misc/mic/scif/scif_dma.c size_t loop_len, nr_contig_bytes = 0, remaining_len = work->len; work 885 drivers/misc/mic/scif/scif_dma.c offset = work->dst_offset; work 886 drivers/misc/mic/scif/scif_dma.c window = work->dst_window; work 888 drivers/misc/mic/scif/scif_dma.c offset = work->src_offset; work 889 drivers/misc/mic/scif/scif_dma.c window = work->src_window; work 898 drivers/misc/mic/scif/scif_dma.c work->remote_dev, work 905 drivers/misc/mic/scif/scif_dma.c work->ordered && work 909 drivers/misc/mic/scif/scif_dma.c loop_len, work->ordered && work 911 drivers/misc/mic/scif/scif_dma.c iounmap_remote(window_virt_addr, loop_len, work); work 941 drivers/misc/mic/scif/scif_dma.c if (work->ordered && !tail_len && work 1037 drivers/misc/mic/scif/scif_dma.c work->remote_dev, work 1046 drivers/misc/mic/scif/scif_dma.c if (work->ordered) { work 1047 drivers/misc/mic/scif/scif_dma.c struct scif_dev *rdev = work->remote_dev; work 1055 drivers/misc/mic/scif/scif_dma.c tail_len, work->ordered); work 1058 drivers/misc/mic/scif/scif_dma.c tail_len, work->ordered); work 1059 drivers/misc/mic/scif/scif_dma.c iounmap_remote(window_virt_addr, tail_len, work); work 1088 drivers/misc/mic/scif/scif_dma.c static int _scif_rma_list_dma_copy_aligned(struct scif_copy_work *work, work 1097 drivers/misc/mic/scif/scif_dma.c struct scif_window *src_window = work->src_window; work 1098 drivers/misc/mic/scif/scif_dma.c struct scif_window *dst_window = work->dst_window; work 1099 drivers/misc/mic/scif/scif_dma.c s64 src_offset = work->src_offset, dst_offset = work->dst_offset; work 1105 drivers/misc/mic/scif/scif_dma.c remaining_len = work->len; work 1136 drivers/misc/mic/scif/scif_dma.c if (work->ordered && !(remaining_len - loop_len)) { work 1206 drivers/misc/mic/scif/scif_dma.c static int scif_rma_list_dma_copy_aligned(struct scif_copy_work *work, work 1217 drivers/misc/mic/scif/scif_dma.c struct scif_window *src_window = work->src_window; work 1218 drivers/misc/mic/scif/scif_dma.c struct scif_window *dst_window = work->dst_window; work 1219 drivers/misc/mic/scif/scif_dma.c s64 src_offset = work->src_offset, dst_offset = work->dst_offset; work 1225 drivers/misc/mic/scif/scif_dma.c remaining_len = work->len; work 1242 drivers/misc/mic/scif/scif_dma.c work->remote_dev, NULL); work 1251 drivers/misc/mic/scif/scif_dma.c work->remote_dev, NULL); work 1254 drivers/misc/mic/scif/scif_dma.c iounmap_remote(src_virt, loop_len, work); work 1260 drivers/misc/mic/scif/scif_dma.c work->ordered : false); work 1264 drivers/misc/mic/scif/scif_dma.c work->ordered : false); work 1266 drivers/misc/mic/scif/scif_dma.c iounmap_remote(src_virt, loop_len, work); work 1268 drivers/misc/mic/scif/scif_dma.c iounmap_remote(dst_virt, loop_len, work); work 1303 drivers/misc/mic/scif/scif_dma.c if (work->ordered && !tail_len && work 1384 drivers/misc/mic/scif/scif_dma.c if (work->ordered) { work 1385 drivers/misc/mic/scif/scif_dma.c struct scif_dev *rdev = work->remote_dev; work 1397 drivers/misc/mic/scif/scif_dma.c work->remote_dev, NULL); work 1407 drivers/misc/mic/scif/scif_dma.c work->remote_dev, NULL); work 1410 drivers/misc/mic/scif/scif_dma.c iounmap_remote(src_virt, loop_len, work); work 1416 drivers/misc/mic/scif/scif_dma.c work->ordered); work 1419 drivers/misc/mic/scif/scif_dma.c loop_len, work->ordered); work 1421 drivers/misc/mic/scif/scif_dma.c iounmap_remote(src_virt, loop_len, work); work 1424 drivers/misc/mic/scif/scif_dma.c iounmap_remote(dst_virt, loop_len, work); work 1440 drivers/misc/mic/scif/scif_dma.c static int scif_rma_list_cpu_copy(struct scif_copy_work *work) work 1445 drivers/misc/mic/scif/scif_dma.c s64 src_offset = work->src_offset, dst_offset = work->dst_offset; work 1446 drivers/misc/mic/scif/scif_dma.c struct scif_window *src_window = work->src_window; work 1447 drivers/misc/mic/scif/scif_dma.c struct scif_window *dst_window = work->dst_window; work 1453 drivers/misc/mic/scif/scif_dma.c remaining_len = work->len; work 1470 drivers/misc/mic/scif/scif_dma.c work->remote_dev, work 1483 drivers/misc/mic/scif/scif_dma.c work->remote_dev, work 1487 drivers/misc/mic/scif/scif_dma.c iounmap_remote(src_virt, loop_len, work); work 1492 drivers/misc/mic/scif/scif_dma.c if (work->loopback) { work 1504 drivers/misc/mic/scif/scif_dma.c iounmap_remote(src_virt, loop_len, work); work 1507 drivers/misc/mic/scif/scif_dma.c iounmap_remote(dst_virt, loop_len, work); work 1534 drivers/misc/mic/scif/scif_dma.c struct scif_copy_work *work, work 1538 drivers/misc/mic/scif/scif_dma.c s64 src_offset = work->src_offset, dst_offset = work->dst_offset; work 1545 drivers/misc/mic/scif/scif_dma.c return _scif_rma_list_dma_copy_aligned(work, chan); work 1551 drivers/misc/mic/scif/scif_dma.c return scif_rma_list_dma_copy_aligned(work, chan); work 1553 drivers/misc/mic/scif/scif_dma.c if (work->loopback) work 1554 drivers/misc/mic/scif/scif_dma.c return scif_rma_list_cpu_copy(work); work 1555 drivers/misc/mic/scif/scif_dma.c src_local = work->src_window->type == SCIF_WINDOW_SELF; work 1562 drivers/misc/mic/scif/scif_dma.c work->comp_cb = comp_cb; work 1566 drivers/misc/mic/scif/scif_dma.c if (work->len + (L1_CACHE_BYTES << 1) < SCIF_KMEM_UNALIGNED_BUF_SIZE) { work 1569 drivers/misc/mic/scif/scif_dma.c temp = kmalloc(work->len + (L1_CACHE_BYTES << 1), work 1587 drivers/misc/mic/scif/scif_dma.c scif_rma_local_cpu_copy(work->src_offset, work->src_window, work 1588 drivers/misc/mic/scif/scif_dma.c temp, work->len, true); work 1590 drivers/misc/mic/scif/scif_dma.c comp_cb->dst_window = work->dst_window; work 1591 drivers/misc/mic/scif/scif_dma.c comp_cb->dst_offset = work->dst_offset; work 1592 drivers/misc/mic/scif/scif_dma.c work->src_offset = work->src_offset - src_cache_off; work 1593 drivers/misc/mic/scif/scif_dma.c comp_cb->len = work->len; work 1594 drivers/misc/mic/scif/scif_dma.c work->len = ALIGN(work->len + src_cache_off, L1_CACHE_BYTES); work 1600 drivers/misc/mic/scif/scif_dma.c work->remote_dev, SCIF_KMEM_UNALIGNED_BUF_SIZE); work 1603 drivers/misc/mic/scif/scif_dma.c comp_cb->sdev = work->remote_dev; work 1604 drivers/misc/mic/scif/scif_dma.c if (scif_rma_list_dma_copy_unaligned(work, temp, chan, src_local) < 0) work 1607 drivers/misc/mic/scif/scif_dma.c work->fence_type = SCIF_DMA_INTR; work 177 drivers/misc/mic/scif/scif_epd.h void scif_conn_handler(struct work_struct *work); work 32 drivers/misc/mic/scif/scif_main.c static void scif_intr_bh_handler(struct work_struct *work) work 35 drivers/misc/mic/scif/scif_main.c container_of(work, struct scif_dev, intr_bh); work 75 drivers/misc/mic/scif/scif_main.c static void scif_qp_setup_handler(struct work_struct *work) work 77 drivers/misc/mic/scif/scif_main.c struct scif_dev *scifdev = container_of(work, struct scif_dev, work 78 drivers/misc/mic/scif/scif_main.c qp_dwork.work); work 271 drivers/misc/mic/scif/scif_main.h void scif_misc_handler(struct work_struct *work); work 688 drivers/misc/mic/scif/scif_nodeqp.c void scif_misc_handler(struct work_struct *work) work 846 drivers/misc/mic/scif/scif_nodeqp.c void scif_poll_qp_state(struct work_struct *work) work 850 drivers/misc/mic/scif/scif_nodeqp.c struct scif_dev *peerdev = container_of(work, struct scif_dev, work 851 drivers/misc/mic/scif/scif_nodeqp.c p2p_dwork.work); work 199 drivers/misc/mic/scif/scif_nodeqp.h void scif_poll_qp_state(struct work_struct *work); work 103 drivers/misc/mic/scif/scif_peer_bus.c void scif_add_peer_device(struct work_struct *work) work 105 drivers/misc/mic/scif/scif_peer_bus.c struct scif_dev *scifdev = container_of(work, struct scif_dev, work 18 drivers/misc/mic/scif/scif_peer_bus.h void scif_add_peer_device(struct work_struct *work); work 370 drivers/misc/mic/scif/scif_rma.h void scif_mmu_notif_handler(struct work_struct *work); work 678 drivers/misc/mic/vop/vop_main.c static void vop_hotplug_devices(struct work_struct *work) work 680 drivers/misc/mic/vop/vop_main.c struct vop_info *vi = container_of(work, struct vop_info, work 128 drivers/misc/mic/vop/vop_vringh.c static void vop_bh_handler(struct work_struct *work) work 130 drivers/misc/mic/vop/vop_vringh.c struct vop_vdev *vdev = container_of(work, struct vop_vdev, work 450 drivers/misc/ti-st/st_core.c static void work_fn_write_wakeup(struct work_struct *work) work 452 drivers/misc/ti-st/st_core.c struct st_data_s *st_gdata = container_of(work, struct st_data_s, work 135 drivers/misc/tifm_7xx1.c static void tifm_7xx1_switch_media(struct work_struct *work) work 137 drivers/misc/tifm_7xx1.c struct tifm_adapter *fm = container_of(work, struct tifm_adapter, work 308 drivers/misc/tifm_core.c void tifm_queue_work(struct work_struct *work) work 310 drivers/misc/tifm_core.c queue_work(workqueue, work); work 1476 drivers/misc/vmw_balloon.c static void vmballoon_work(struct work_struct *work) work 1478 drivers/misc/vmw_balloon.c struct delayed_work *dwork = to_delayed_work(work); work 37 drivers/misc/vmw_vmci/vmci_datagram.c struct work_struct work; work 136 drivers/misc/vmw_vmci/vmci_datagram.c static void dg_delayed_dispatch(struct work_struct *work) work 139 drivers/misc/vmw_vmci/vmci_datagram.c container_of(work, struct delayed_datagram_info, work); work 239 drivers/misc/vmw_vmci/vmci_datagram.c INIT_WORK(&dg_info->work, dg_delayed_dispatch); work 240 drivers/misc/vmw_vmci/vmci_datagram.c schedule_work(&dg_info->work); work 382 drivers/misc/vmw_vmci/vmci_datagram.c INIT_WORK(&dg_info->work, dg_delayed_dispatch); work 383 drivers/misc/vmw_vmci/vmci_datagram.c schedule_work(&dg_info->work); work 36 drivers/misc/vmw_vmci/vmci_doorbell.c struct work_struct work; work 280 drivers/misc/vmw_vmci/vmci_doorbell.c static void dbell_delayed_dispatch(struct work_struct *work) work 282 drivers/misc/vmw_vmci/vmci_doorbell.c struct dbell_entry *entry = container_of(work, work 283 drivers/misc/vmw_vmci/vmci_doorbell.c struct dbell_entry, work); work 313 drivers/misc/vmw_vmci/vmci_doorbell.c if (!schedule_work(&entry->work)) work 365 drivers/misc/vmw_vmci/vmci_doorbell.c if (!schedule_work(&dbell->work)) work 468 drivers/misc/vmw_vmci/vmci_doorbell.c INIT_WORK(&entry->work, dbell_delayed_dispatch); work 2031 drivers/mmc/core/block.c void mmc_blk_mq_complete_work(struct work_struct *work) work 2033 drivers/mmc/core/block.c struct mmc_queue *mq = container_of(work, struct mmc_queue, work 18 drivers/mmc/core/block.h void mmc_blk_mq_complete_work(struct work_struct *work); work 63 drivers/mmc/core/core.c static int mmc_schedule_delayed_work(struct delayed_work *work, work 72 drivers/mmc/core/core.c return queue_delayed_work(system_freezable_wq, work, delay); work 2279 drivers/mmc/core/core.c void mmc_rescan(struct work_struct *work) work 2282 drivers/mmc/core/core.c container_of(work, struct mmc_host, detect.work); work 69 drivers/mmc/core/core.h void mmc_rescan(struct work_struct *work); work 136 drivers/mmc/core/queue.c static void mmc_mq_recovery_handler(struct work_struct *work) work 138 drivers/mmc/core/queue.c struct mmc_queue *mq = container_of(work, struct mmc_queue, work 124 drivers/mmc/core/sdio_irq.c void sdio_irq_work(struct work_struct *work) work 127 drivers/mmc/core/sdio_irq.c container_of(work, struct mmc_host, sdio_irq_work.work); work 24 drivers/mmc/core/sdio_ops.h void sdio_irq_work(struct work_struct *work); work 961 drivers/mmc/host/alcor.c static void alcor_timeout_timer(struct work_struct *work) work 963 drivers/mmc/host/alcor.c struct delayed_work *d = to_delayed_work(work); work 826 drivers/mmc/host/bcm2835.c static void bcm2835_timeout(struct work_struct *work) work 828 drivers/mmc/host/bcm2835.c struct delayed_work *d = to_delayed_work(work); work 1052 drivers/mmc/host/bcm2835.c static void bcm2835_dma_complete_work(struct work_struct *work) work 1055 drivers/mmc/host/bcm2835.c container_of(work, struct bcm2835_host, dma_work); work 1366 drivers/mmc/host/mtk-sd.c static void msdc_request_timeout(struct work_struct *work) work 1368 drivers/mmc/host/mtk-sd.c struct msdc_host *host = container_of(work, struct msdc_host, work 1369 drivers/mmc/host/mtk-sd.c req_timeout.work); work 633 drivers/mmc/host/mxcmmc.c static void mxcmci_datawork(struct work_struct *work) work 635 drivers/mmc/host/mxcmmc.c struct mxcmci_host *host = container_of(work, struct mxcmci_host, work 240 drivers/mmc/host/omap.c static void mmc_omap_slot_release_work(struct work_struct *work) work 242 drivers/mmc/host/omap.c struct mmc_omap_host *host = container_of(work, struct mmc_omap_host, work 427 drivers/mmc/host/omap.c static void mmc_omap_send_stop_work(struct work_struct *work) work 429 drivers/mmc/host/omap.c struct mmc_omap_host *host = container_of(work, struct mmc_omap_host, work 594 drivers/mmc/host/omap.c static void mmc_omap_abort_command(struct work_struct *work) work 596 drivers/mmc/host/omap.c struct mmc_omap_host *host = container_of(work, struct mmc_omap_host, work 31 drivers/mmc/host/rtsx_pci_sdmmc.c struct work_struct work; work 786 drivers/mmc/host/rtsx_pci_sdmmc.c static void sd_request(struct work_struct *work) work 788 drivers/mmc/host/rtsx_pci_sdmmc.c struct realtek_pci_sdmmc *host = container_of(work, work 789 drivers/mmc/host/rtsx_pci_sdmmc.c struct realtek_pci_sdmmc, work); work 875 drivers/mmc/host/rtsx_pci_sdmmc.c schedule_work(&host->work); work 1399 drivers/mmc/host/rtsx_pci_sdmmc.c INIT_WORK(&host->work, sd_request); work 1427 drivers/mmc/host/rtsx_pci_sdmmc.c cancel_work_sync(&host->work); work 1447 drivers/mmc/host/rtsx_pci_sdmmc.c flush_work(&host->work); work 1284 drivers/mmc/host/rtsx_usb_sdmmc.c static void rtsx_usb_update_led(struct work_struct *work) work 1287 drivers/mmc/host/rtsx_usb_sdmmc.c container_of(work, struct rtsx_usb_sdmmc, led_work); work 2738 drivers/mmc/host/sdhci.c static void sdhci_complete_work(struct work_struct *work) work 2740 drivers/mmc/host/sdhci.c struct sdhci_host *host = container_of(work, struct sdhci_host, work 1314 drivers/mmc/host/sh_mmcif.c static void sh_mmcif_timeout_work(struct work_struct *work) work 1316 drivers/mmc/host/sh_mmcif.c struct delayed_work *d = to_delayed_work(work); work 187 drivers/mmc/host/tmio_mmc_core.c static void tmio_mmc_reset_work(struct work_struct *work) work 189 drivers/mmc/host/tmio_mmc_core.c struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host, work 190 drivers/mmc/host/tmio_mmc_core.c delayed_reset_work.work); work 597 drivers/mmc/host/tmio_mmc_core.c !work_pending(&mmc->detect.work)) work 858 drivers/mmc/host/tmio_mmc_core.c static void tmio_mmc_done_work(struct work_struct *work) work 860 drivers/mmc/host/tmio_mmc_core.c struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host, work 1652 drivers/mmc/host/usdhi6rol0.c if (!work_pending(&mmc->detect.work) && work 1667 drivers/mmc/host/usdhi6rol0.c static void usdhi6_timeout_work(struct work_struct *work) work 1669 drivers/mmc/host/usdhi6rol0.c struct delayed_work *d = to_delayed_work(work); work 978 drivers/mmc/host/via-sdmmc.c static void via_sdc_card_detect(struct work_struct *work) work 985 drivers/mmc/host/via-sdmmc.c host = container_of(work, struct via_crdr_mmc_host, carddet_work); work 677 drivers/mmc/host/vub300.c static void vub300_pollwork_thread(struct work_struct *work) work 679 drivers/mmc/host/vub300.c struct vub300_mmc_host *vub300 = container_of(work, work 680 drivers/mmc/host/vub300.c struct vub300_mmc_host, pollwork.work); work 708 drivers/mmc/host/vub300.c static void vub300_deadwork_thread(struct work_struct *work) work 711 drivers/mmc/host/vub300.c container_of(work, struct vub300_mmc_host, deadwork); work 1739 drivers/mmc/host/vub300.c static void vub300_cmndwork_thread(struct work_struct *work) work 1742 drivers/mmc/host/vub300.c container_of(work, struct vub300_mmc_host, cmndwork); work 120 drivers/mtd/mtdoops.c static void mtdoops_workfunc_erase(struct work_struct *work) work 123 drivers/mtd/mtdoops.c container_of(work, struct mtdoops_context, work_erase); work 208 drivers/mtd/mtdoops.c static void mtdoops_workfunc_write(struct work_struct *work) work 211 drivers/mtd/mtdoops.c container_of(work, struct mtdoops_context, work_write); work 679 drivers/mtd/nand/raw/r852.c static void r852_card_detect_work(struct work_struct *work) work 682 drivers/mtd/nand/raw/r852.c container_of(work, struct r852_device, card_detect_work.work); work 987 drivers/mtd/sm_ftl.c static void sm_cache_flush_work(struct work_struct *work) work 989 drivers/mtd/sm_ftl.c struct sm_ftl *ftl = container_of(work, struct sm_ftl, flush_work); work 65 drivers/mtd/ubi/block.c struct work_struct work; work 292 drivers/mtd/ubi/block.c static void ubiblock_do_work(struct work_struct *work) work 295 drivers/mtd/ubi/block.c struct ubiblock_pdu *pdu = container_of(work, struct ubiblock_pdu, work); work 323 drivers/mtd/ubi/block.c queue_work(dev->wq, &pdu->work); work 338 drivers/mtd/ubi/block.c INIT_WORK(&pdu->work, ubiblock_do_work); work 2283 drivers/net/bonding/bond_3ad.c void bond_3ad_state_machine_handler(struct work_struct *work) work 2285 drivers/net/bonding/bond_3ad.c struct bonding *bond = container_of(work, struct bonding, work 2286 drivers/net/bonding/bond_3ad.c ad_work.work); work 1508 drivers/net/bonding/bond_alb.c void bond_alb_monitor(struct work_struct *work) work 1510 drivers/net/bonding/bond_alb.c struct bonding *bond = container_of(work, struct bonding, work 1511 drivers/net/bonding/bond_alb.c alb_work.work); work 209 drivers/net/bonding/bond_main.c static void bond_slave_arr_handler(struct work_struct *work); work 212 drivers/net/bonding/bond_main.c static void bond_netdev_notify_work(struct work_struct *work); work 534 drivers/net/bonding/bond_main.c static void bond_resend_igmp_join_requests_delayed(struct work_struct *work) work 536 drivers/net/bonding/bond_main.c struct bonding *bond = container_of(work, struct bonding, work 537 drivers/net/bonding/bond_main.c mcast_work.work); work 1345 drivers/net/bonding/bond_main.c notify_work.work); work 2286 drivers/net/bonding/bond_main.c static void bond_mii_monitor(struct work_struct *work) work 2288 drivers/net/bonding/bond_main.c struct bonding *bond = container_of(work, struct bonding, work 2289 drivers/net/bonding/bond_main.c mii_work.work); work 3045 drivers/net/bonding/bond_main.c static void bond_arp_monitor(struct work_struct *work) work 3047 drivers/net/bonding/bond_main.c struct bonding *bond = container_of(work, struct bonding, work 3048 drivers/net/bonding/bond_main.c arp_work.work); work 3979 drivers/net/bonding/bond_main.c static void bond_slave_arr_handler(struct work_struct *work) work 3981 drivers/net/bonding/bond_main.c struct bonding *bond = container_of(work, struct bonding, work 3982 drivers/net/bonding/bond_main.c slave_arr_work.work); work 766 drivers/net/caif/caif_hsi.c static void cfhsi_wake_up(struct work_struct *work) work 773 drivers/net/caif/caif_hsi.c cfhsi = container_of(work, struct cfhsi, wake_up_work); work 893 drivers/net/caif/caif_hsi.c static void cfhsi_wake_down(struct work_struct *work) work 900 drivers/net/caif/caif_hsi.c cfhsi = container_of(work, struct cfhsi, wake_down_work); work 956 drivers/net/caif/caif_hsi.c static void cfhsi_out_of_sync(struct work_struct *work) work 960 drivers/net/caif/caif_hsi.c cfhsi = container_of(work, struct cfhsi, out_of_sync_work); work 299 drivers/net/caif/caif_serial.c static void ser_release(struct work_struct *work) work 629 drivers/net/caif/caif_spi.c INIT_WORK(&cfspi->work, cfspi_xfer); work 665 drivers/net/caif/caif_spi.c queue_work(cfspi->wq, &cfspi->work); work 61 drivers/net/caif/caif_spi_slave.c void cfspi_xfer(struct work_struct *work) work 67 drivers/net/caif/caif_spi_slave.c cfspi = container_of(work, struct cfspi, work); work 579 drivers/net/can/dev.c static void can_restart_work(struct work_struct *work) work 581 drivers/net/can/dev.c struct delayed_work *dwork = to_delayed_work(work); work 269 drivers/net/can/sja1000/peak_pci.c static void peak_pciec_led_work(struct work_struct *work) work 272 drivers/net/can/sja1000/peak_pci.c container_of(work, struct peak_pciec_card, led_work.work); work 311 drivers/net/can/slcan.c static void slcan_transmit(struct work_struct *work) work 313 drivers/net/can/slcan.c struct slcan *sl = container_of(work, struct slcan, tx_work); work 67 drivers/net/dsa/microchip/ksz_common.c static void ksz_mib_read_work(struct work_struct *work) work 69 drivers/net/dsa/microchip/ksz_common.c struct ksz_device *dev = container_of(work, struct ksz_device, work 355 drivers/net/dsa/mv88e6xxx/chip.c static void mv88e6xxx_irq_poll(struct kthread_work *work) work 357 drivers/net/dsa/mv88e6xxx/chip.c struct mv88e6xxx_chip *chip = container_of(work, work 359 drivers/net/dsa/mv88e6xxx/chip.c irq_poll_work.work); work 436 drivers/net/dsa/mv88e6xxx/ptp.c static void mv88e6xxx_ptp_overflow_check(struct work_struct *work) work 438 drivers/net/dsa/mv88e6xxx/ptp.c struct delayed_work *dw = to_delayed_work(work); work 2006 drivers/net/dsa/sja1105/sja1105_main.c static void sja1105_rxtstamp_work(struct work_struct *work) work 2008 drivers/net/dsa/sja1105/sja1105_main.c struct sja1105_tagger_data *data = to_tagger(work); work 339 drivers/net/dsa/sja1105/sja1105_ptp.c static void sja1105_ptp_overflow_check(struct work_struct *work) work 341 drivers/net/dsa/sja1105/sja1105_ptp.c struct delayed_work *dw = to_delayed_work(work); work 715 drivers/net/ethernet/amazon/ena/ena_netdev.c cancel_work_sync(&adapter->ena_napi[i].dim.work); work 1158 drivers/net/ethernet/amazon/ena/ena_netdev.c struct dim *dim = container_of(w, struct dim, work); work 1752 drivers/net/ethernet/amazon/ena/ena_netdev.c INIT_WORK(&adapter->ena_napi[i].dim.work, ena_dim_work); work 1759 drivers/net/ethernet/amazon/ena/ena_netdev.c cancel_work_sync(&adapter->ena_napi[i].dim.work); work 2834 drivers/net/ethernet/amazon/ena/ena_netdev.c static void ena_fw_reset_device(struct work_struct *work) work 2837 drivers/net/ethernet/amazon/ena/ena_netdev.c container_of(work, struct ena_adapter, reset_task); work 673 drivers/net/ethernet/amd/xgbe/xgbe-drv.c static void xgbe_service(struct work_struct *work) work 675 drivers/net/ethernet/amd/xgbe/xgbe-drv.c struct xgbe_prv_data *pdata = container_of(work, work 1477 drivers/net/ethernet/amd/xgbe/xgbe-drv.c static void xgbe_stopdev(struct work_struct *work) work 1479 drivers/net/ethernet/amd/xgbe/xgbe-drv.c struct xgbe_prv_data *pdata = container_of(work, work 1523 drivers/net/ethernet/amd/xgbe/xgbe-drv.c static void xgbe_restart(struct work_struct *work) work 1525 drivers/net/ethernet/amd/xgbe/xgbe-drv.c struct xgbe_prv_data *pdata = container_of(work, work 1536 drivers/net/ethernet/amd/xgbe/xgbe-drv.c static void xgbe_tx_tstamp(struct work_struct *work) work 1538 drivers/net/ethernet/amd/xgbe/xgbe-drv.c struct xgbe_prv_data *pdata = container_of(work, work 730 drivers/net/ethernet/amd/xgbe/xgbe-mdio.c static void xgbe_an_irq_work(struct work_struct *work) work 732 drivers/net/ethernet/amd/xgbe/xgbe-mdio.c struct xgbe_prv_data *pdata = container_of(work, work 916 drivers/net/ethernet/amd/xgbe/xgbe-mdio.c static void xgbe_an_state_machine(struct work_struct *work) work 918 drivers/net/ethernet/amd/xgbe/xgbe-mdio.c struct xgbe_prv_data *pdata = container_of(work, work 154 drivers/net/ethernet/apm/xgene/xgene_enet_main.h void (*link_state)(struct work_struct *work); work 534 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c static void xgene_enet_link_state(struct work_struct *work) work 536 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c struct xgene_enet_pdata *p = container_of(to_delayed_work(work), work 474 drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c static void xgene_enet_link_state(struct work_struct *work) work 476 drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c struct xgene_enet_pdata *pdata = container_of(to_delayed_work(work), work 29 drivers/net/ethernet/aquantia/atlantic/aq_main.c void aq_ndev_schedule_work(struct work_struct *work) work 31 drivers/net/ethernet/aquantia/atlantic/aq_main.c queue_work(aq_ndev_wq, work); work 15 drivers/net/ethernet/aquantia/atlantic/aq_main.h void aq_ndev_schedule_work(struct work_struct *work); work 189 drivers/net/ethernet/aquantia/atlantic/aq_nic.c static void aq_nic_service_task(struct work_struct *work) work 191 drivers/net/ethernet/aquantia/atlantic/aq_nic.c struct aq_nic_s *self = container_of(work, struct aq_nic_s, work 1421 drivers/net/ethernet/atheros/ag71xx.c static void ag71xx_restart_work_func(struct work_struct *work) work 1423 drivers/net/ethernet/atheros/ag71xx.c struct ag71xx *ag = container_of(work, struct ag71xx, work 1424 drivers/net/ethernet/atheros/ag71xx.c restart_work.work); work 224 drivers/net/ethernet/atheros/alx/main.c int work = 0; work 228 drivers/net/ethernet/atheros/alx/main.c while (work < budget) { work 239 drivers/net/ethernet/atheros/alx/main.c return work; work 279 drivers/net/ethernet/atheros/alx/main.c work++; work 294 drivers/net/ethernet/atheros/alx/main.c return work; work 304 drivers/net/ethernet/atheros/alx/main.c int work = 0; work 309 drivers/net/ethernet/atheros/alx/main.c work = alx_clean_rx_irq(np->rxq, budget); work 311 drivers/net/ethernet/atheros/alx/main.c if (!tx_complete || work == budget) work 314 drivers/net/ethernet/atheros/alx/main.c napi_complete_done(&np->napi, work); work 328 drivers/net/ethernet/atheros/alx/main.c return work; work 1346 drivers/net/ethernet/atheros/alx/main.c static void alx_link_check(struct work_struct *work) work 1350 drivers/net/ethernet/atheros/alx/main.c alx = container_of(work, struct alx_priv, link_check_wk); work 1357 drivers/net/ethernet/atheros/alx/main.c static void alx_reset(struct work_struct *work) work 1359 drivers/net/ethernet/atheros/alx/main.c struct alx_priv *alx = container_of(work, struct alx_priv, reset_wk); work 316 drivers/net/ethernet/atheros/atl1c/atl1c_main.c static void atl1c_common_task(struct work_struct *work) work 321 drivers/net/ethernet/atheros/atl1c/atl1c_main.c adapter = container_of(work, struct atl1c_adapter, common_task); work 143 drivers/net/ethernet/atheros/atl1e/atl1e_main.c static void atl1e_reset_task(struct work_struct *work) work 146 drivers/net/ethernet/atheros/atl1e/atl1e_main.c adapter = container_of(work, struct atl1e_adapter, reset_task); work 205 drivers/net/ethernet/atheros/atl1e/atl1e_main.c static void atl1e_link_chg_task(struct work_struct *work) work 210 drivers/net/ethernet/atheros/atl1e/atl1e_main.c adapter = container_of(work, struct atl1e_adapter, link_chg_task); work 2661 drivers/net/ethernet/atheros/atlx/atl1.c static void atl1_reset_dev_task(struct work_struct *work) work 2664 drivers/net/ethernet/atheros/atlx/atl1.c container_of(work, struct atl1_adapter, reset_dev_task); work 1102 drivers/net/ethernet/atheros/atlx/atl2.c static void atl2_reset_task(struct work_struct *work) work 1105 drivers/net/ethernet/atheros/atlx/atl2.c adapter = container_of(work, struct atl2_adapter, reset_task); work 1245 drivers/net/ethernet/atheros/atlx/atl2.c static void atl2_link_chg_task(struct work_struct *work) work 1250 drivers/net/ethernet/atheros/atlx/atl2.c adapter = container_of(work, struct atl2_adapter, link_chg_task); work 196 drivers/net/ethernet/atheros/atlx/atlx.c static void atlx_link_chg_task(struct work_struct *work) work 201 drivers/net/ethernet/atheros/atlx/atlx.c adapter = container_of(work, struct atlx_adapter, link_chg_task); work 291 drivers/net/ethernet/aurora/nb8800.c int work = 0; work 321 drivers/net/ethernet/aurora/nb8800.c work++; work 322 drivers/net/ethernet/aurora/nb8800.c } while (work < budget); work 324 drivers/net/ethernet/aurora/nb8800.c if (work) { work 332 drivers/net/ethernet/aurora/nb8800.c if (work < budget) { work 342 drivers/net/ethernet/aurora/nb8800.c napi_complete_done(napi, work); work 345 drivers/net/ethernet/aurora/nb8800.c return work; work 1088 drivers/net/ethernet/broadcom/bcmsysport.c static void bcm_sysport_dim_work(struct work_struct *work) work 1090 drivers/net/ethernet/broadcom/bcmsysport.c struct dim *dim = container_of(work, struct dim, work); work 1435 drivers/net/ethernet/broadcom/bcmsysport.c void (*cb)(struct work_struct *work)) work 1439 drivers/net/ethernet/broadcom/bcmsysport.c INIT_WORK(&dim->dim.work, cb); work 2034 drivers/net/ethernet/broadcom/bcmsysport.c cancel_work_sync(&priv->dim.dim.work); work 6450 drivers/net/ethernet/broadcom/bnx2.c bnx2_reset_task(struct work_struct *work) work 6452 drivers/net/ethernet/broadcom/bnx2.c struct bnx2 *bp = container_of(work, struct bnx2, reset_task); work 186 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h void bnx2x_dcbx_update(struct work_struct *work); work 5662 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c static void bnx2x_sp_task(struct work_struct *work) work 5664 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work); work 10282 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c static void bnx2x_sp_rtnl_task(struct work_struct *work) work 10284 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work); work 10430 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c static void bnx2x_period_task(struct work_struct *work) work 10432 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work); work 15225 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c static void bnx2x_ptp_task(struct work_struct *work) work 15227 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c struct bnx2x *bp = container_of(work, struct bnx2x, ptp_task); work 3191 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c void bnx2x_iov_task(struct work_struct *work) work 3193 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work); work 547 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h void bnx2x_iov_task(struct work_struct *work); work 619 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h static inline void bnx2x_iov_task(struct work_struct *work) {} work 8313 drivers/net/ethernet/broadcom/bnxt/bnxt.c cancel_work_sync(&cpr->dim.work); work 8328 drivers/net/ethernet/broadcom/bnxt/bnxt.c INIT_WORK(&cpr->dim.work, bnxt_dim_work); work 10238 drivers/net/ethernet/broadcom/bnxt/bnxt.c static void bnxt_sp_task(struct work_struct *work) work 10240 drivers/net/ethernet/broadcom/bnxt/bnxt.c struct bnxt *bp = container_of(work, struct bnxt, sp_task); work 10647 drivers/net/ethernet/broadcom/bnxt/bnxt.c static void bnxt_fw_reset_task(struct work_struct *work) work 10649 drivers/net/ethernet/broadcom/bnxt/bnxt.c struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work); work 2004 drivers/net/ethernet/broadcom/bnxt/bnxt.h void bnxt_dim_work(struct work_struct *work); work 14 drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c void bnxt_dim_work(struct work_struct *work) work 16 drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c struct dim *dim = container_of(work, struct dim, work); work 1873 drivers/net/ethernet/broadcom/cnic.c u32 num, int *work) work 1886 drivers/net/ethernet/broadcom/cnic.c *work = num; work 1893 drivers/net/ethernet/broadcom/cnic.c *work = num; work 1896 drivers/net/ethernet/broadcom/cnic.c *work = 2 + req2->num_additional_wqes; work 2120 drivers/net/ethernet/broadcom/cnic.c u32 num, int *work) work 2136 drivers/net/ethernet/broadcom/cnic.c *work = num; work 2141 drivers/net/ethernet/broadcom/cnic.c *work = 3; work 2143 drivers/net/ethernet/broadcom/cnic.c *work = 2; work 2145 drivers/net/ethernet/broadcom/cnic.c if (num < *work) { work 2146 drivers/net/ethernet/broadcom/cnic.c *work = num; work 2181 drivers/net/ethernet/broadcom/cnic.c kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1]; work 2281 drivers/net/ethernet/broadcom/cnic.c u32 num, int *work) work 2294 drivers/net/ethernet/broadcom/cnic.c *work = num; work 2301 drivers/net/ethernet/broadcom/cnic.c *work = 1; work 2305 drivers/net/ethernet/broadcom/cnic.c *work = 2; work 2333 drivers/net/ethernet/broadcom/cnic.c *work = 3; work 2338 drivers/net/ethernet/broadcom/cnic.c u32 num, int *work) work 2357 drivers/net/ethernet/broadcom/cnic.c *work = num; work 2365 drivers/net/ethernet/broadcom/cnic.c *work = 4; work 2657 drivers/net/ethernet/broadcom/cnic.c int i, work, ret; work 2667 drivers/net/ethernet/broadcom/cnic.c work = 1; work 2678 drivers/net/ethernet/broadcom/cnic.c num_wqes - i, &work); work 2688 drivers/net/ethernet/broadcom/cnic.c &work); work 2722 drivers/net/ethernet/broadcom/cnic.c i += work; work 2731 drivers/net/ethernet/broadcom/cnic.c int i, work, ret; work 2744 drivers/net/ethernet/broadcom/cnic.c work = 1; work 2749 drivers/net/ethernet/broadcom/cnic.c num_wqes - i, &work); work 2753 drivers/net/ethernet/broadcom/cnic.c num_wqes - i, &work); work 2787 drivers/net/ethernet/broadcom/cnic.c i += work; work 4267 drivers/net/ethernet/broadcom/cnic.c static void cnic_delete_task(struct work_struct *work) work 4274 drivers/net/ethernet/broadcom/cnic.c cp = container_of(work, struct cnic_local, delete_task.work); work 1921 drivers/net/ethernet/broadcom/genet/bcmgenet.c static void bcmgenet_dim_work(struct work_struct *work) work 1923 drivers/net/ethernet/broadcom/genet/bcmgenet.c struct dim *dim = container_of(work, struct dim, work); work 2084 drivers/net/ethernet/broadcom/genet/bcmgenet.c void (*cb)(struct work_struct *work)) work 2088 drivers/net/ethernet/broadcom/genet/bcmgenet.c INIT_WORK(&dim->dim.work, cb); work 2370 drivers/net/ethernet/broadcom/genet/bcmgenet.c cancel_work_sync(&ring->dim.dim.work); work 2375 drivers/net/ethernet/broadcom/genet/bcmgenet.c cancel_work_sync(&ring->dim.dim.work); work 2604 drivers/net/ethernet/broadcom/genet/bcmgenet.c static void bcmgenet_irq_task(struct work_struct *work) work 2608 drivers/net/ethernet/broadcom/genet/bcmgenet.c work, struct bcmgenet_priv, bcmgenet_irq_work); work 11190 drivers/net/ethernet/broadcom/tg3.c static void tg3_reset_task(struct work_struct *work) work 11192 drivers/net/ethernet/broadcom/tg3.c struct tg3 *tp = container_of(work, struct tg3, reset_task); work 1095 drivers/net/ethernet/brocade/bna/bnad.c bnad_tx_cleanup(struct delayed_work *work) work 1098 drivers/net/ethernet/brocade/bna/bnad.c container_of(work, struct bnad_tx_info, tx_cleanup_work); work 1174 drivers/net/ethernet/brocade/bna/bnad.c bnad_rx_cleanup(void *work) work 1177 drivers/net/ethernet/brocade/bna/bnad.c container_of(work, struct bnad_rx_info, rx_cleanup_work); work 725 drivers/net/ethernet/cadence/macb_main.c static void macb_tx_error_task(struct work_struct *work) work 727 drivers/net/ethernet/cadence/macb_main.c struct macb_queue *queue = container_of(work, struct macb_queue, work 326 drivers/net/ethernet/cadence/macb_ptp.c static void gem_tx_timestamp_flush(struct work_struct *work) work 329 drivers/net/ethernet/cadence/macb_ptp.c container_of(work, struct macb_queue, tx_ts_task); work 904 drivers/net/ethernet/calxeda/xgmac.c static void xgmac_tx_timeout_work(struct work_struct *work) work 908 drivers/net/ethernet/calxeda/xgmac.c container_of(work, struct xgmac_priv, tx_timeout_work); work 678 drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c static void cn23xx_pf_mbox_thread(struct work_struct *work) work 680 drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c struct cavium_wk *wk = (struct cavium_wk *)work; work 703 drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c schedule_delayed_work(&wk->work, msecs_to_jiffies(10)); work 749 drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c INIT_DELAYED_WORK(&mbox->mbox_poll_wk.work, work 759 drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c schedule_delayed_work(&oct->mbox[0]->mbox_poll_wk.work, work 783 drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c &oct->mbox[q_no]->mbox_poll_wk.work); work 986 drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c struct delayed_work *work; work 999 drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c work = &oct->mbox[q_no]->mbox_poll_wk.work; work 1000 drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c schedule_delayed_work(work, work 263 drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c static void cn23xx_vf_mbox_thread(struct work_struct *work) work 265 drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c struct cavium_wk *wk = (struct cavium_wk *)work; work 273 drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c cancel_delayed_work_sync(&oct->mbox[0]->mbox_poll_wk.work); work 306 drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c INIT_DELAYED_WORK(&mbox->mbox_poll_wk.work, work 482 drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c schedule_delayed_work(&oct->mbox[0]->mbox_poll_wk.work, work 435 drivers/net/ethernet/cavium/liquidio/lio_core.c queue_delayed_work(wq->wq, &wq->wk.work, work 439 drivers/net/ethernet/cavium/liquidio/lio_core.c static void octnet_poll_check_rxq_oom_status(struct work_struct *work) work 441 drivers/net/ethernet/cavium/liquidio/lio_core.c struct cavium_wk *wk = (struct cavium_wk *)work; work 471 drivers/net/ethernet/cavium/liquidio/lio_core.c INIT_DELAYED_WORK(&wq->wk.work, work 490 drivers/net/ethernet/cavium/liquidio/lio_core.c cancel_delayed_work_sync(&wq->wk.work); work 1446 drivers/net/ethernet/cavium/liquidio/lio_core.c void lio_fetch_stats(struct work_struct *work) work 1448 drivers/net/ethernet/cavium/liquidio/lio_core.c struct cavium_wk *wk = (struct cavium_wk *)work; work 1509 drivers/net/ethernet/cavium/liquidio/lio_core.c schedule_delayed_work(&lio->stats_wk.work, time_in_jiffies); work 541 drivers/net/ethernet/cavium/liquidio/lio_main.c static void octnet_link_status_change(struct work_struct *work) work 543 drivers/net/ethernet/cavium/liquidio/lio_main.c struct cavium_wk *wk = (struct cavium_wk *)work; work 570 drivers/net/ethernet/cavium/liquidio/lio_main.c INIT_DELAYED_WORK(&lio->link_status_wq.wk.work, work 582 drivers/net/ethernet/cavium/liquidio/lio_main.c cancel_delayed_work_sync(&lio->link_status_wq.wk.work); work 630 drivers/net/ethernet/cavium/liquidio/lio_main.c &lio->link_status_wq.wk.work, 0); work 641 drivers/net/ethernet/cavium/liquidio/lio_main.c static void lio_sync_octeon_time(struct work_struct *work) work 643 drivers/net/ethernet/cavium/liquidio/lio_main.c struct cavium_wk *wk = (struct cavium_wk *)work; work 683 drivers/net/ethernet/cavium/liquidio/lio_main.c &lio->sync_octeon_time_wq.wk.work, work 704 drivers/net/ethernet/cavium/liquidio/lio_main.c INIT_DELAYED_WORK(&lio->sync_octeon_time_wq.wk.work, work 708 drivers/net/ethernet/cavium/liquidio/lio_main.c &lio->sync_octeon_time_wq.wk.work, work 726 drivers/net/ethernet/cavium/liquidio/lio_main.c cancel_delayed_work_sync(&time_wq->wk.work); work 1142 drivers/net/ethernet/cavium/liquidio/lio_main.c cancel_delayed_work_sync(&oct->nic_poll_work.work); work 1762 drivers/net/ethernet/cavium/liquidio/lio_main.c static void octnet_poll_check_txq_status(struct work_struct *work) work 1764 drivers/net/ethernet/cavium/liquidio/lio_main.c struct cavium_wk *wk = (struct cavium_wk *)work; work 1772 drivers/net/ethernet/cavium/liquidio/lio_main.c &lio->txq_status_wq.wk.work, msecs_to_jiffies(1)); work 1790 drivers/net/ethernet/cavium/liquidio/lio_main.c INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work, work 1794 drivers/net/ethernet/cavium/liquidio/lio_main.c &lio->txq_status_wq.wk.work, msecs_to_jiffies(1)); work 1803 drivers/net/ethernet/cavium/liquidio/lio_main.c cancel_delayed_work_sync(&lio->txq_status_wq.wk.work); work 1857 drivers/net/ethernet/cavium/liquidio/lio_main.c INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats); work 1859 drivers/net/ethernet/cavium/liquidio/lio_main.c schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies work 1904 drivers/net/ethernet/cavium/liquidio/lio_main.c cancel_delayed_work_sync(&lio->stats_wk.work); work 3963 drivers/net/ethernet/cavium/liquidio/lio_main.c static void nic_starter(struct work_struct *work) work 3966 drivers/net/ethernet/cavium/liquidio/lio_main.c struct cavium_wk *wk = (struct cavium_wk *)work; work 3979 drivers/net/ethernet/cavium/liquidio/lio_main.c schedule_delayed_work(&oct->nic_poll_work.work, work 4142 drivers/net/ethernet/cavium/liquidio/lio_main.c INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter); work 4144 drivers/net/ethernet/cavium/liquidio/lio_main.c schedule_delayed_work(&octeon_dev->nic_poll_work.work, work 285 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c static void octnet_link_status_change(struct work_struct *work) work 287 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c struct cavium_wk *wk = (struct cavium_wk *)work; work 314 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c INIT_DELAYED_WORK(&lio->link_status_wq.wk.work, work 326 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c cancel_delayed_work_sync(&lio->link_status_wq.wk.work); work 372 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c &lio->link_status_wq.wk.work, 0); work 574 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c cancel_delayed_work_sync(&oct->nic_poll_work.work); work 932 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats); work 934 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies work 988 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c cancel_delayed_work_sync(&lio->stats_wk.work); work 462 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c lio_vf_rep_fetch_stats(struct work_struct *work) work 464 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c struct cavium_wk *wk = (struct cavium_wk *)work; work 485 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c schedule_delayed_work(&vf_rep->stats_wk.work, work 537 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c INIT_DELAYED_WORK(&vf_rep->stats_wk.work, work 540 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c schedule_delayed_work(&vf_rep->stats_wk.work, work 564 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c (&vf_rep->stats_wk.work); work 594 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c (&vf_rep->stats_wk.work); work 479 drivers/net/ethernet/cavium/liquidio/octeon_console.c static void check_console(struct work_struct *work) work 484 drivers/net/ethernet/cavium/liquidio/octeon_console.c struct cavium_wk *wk = (struct cavium_wk *)work; work 531 drivers/net/ethernet/cavium/liquidio/octeon_console.c schedule_delayed_work(&wk->work, msecs_to_jiffies(delay)); work 659 drivers/net/ethernet/cavium/liquidio/octeon_console.c struct delayed_work *work; work 688 drivers/net/ethernet/cavium/liquidio/octeon_console.c work = &oct->console_poll_work[console_num].work; work 692 drivers/net/ethernet/cavium/liquidio/octeon_console.c INIT_DELAYED_WORK(work, check_console); work 696 drivers/net/ethernet/cavium/liquidio/octeon_console.c schedule_delayed_work(work, msecs_to_jiffies(delay)); work 727 drivers/net/ethernet/cavium/liquidio/octeon_console.c work); work 304 drivers/net/ethernet/cavium/liquidio/octeon_device.h struct delayed_work work; work 227 drivers/net/ethernet/cavium/liquidio/octeon_network.h void lio_fetch_stats(struct work_struct *work); work 38 drivers/net/ethernet/cavium/liquidio/request_manager.c static void check_db_timeout(struct work_struct *work); work 161 drivers/net/ethernet/cavium/liquidio/request_manager.c INIT_DELAYED_WORK(&db_wq->wk.work, check_db_timeout); work 164 drivers/net/ethernet/cavium/liquidio/request_manager.c queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1)); work 174 drivers/net/ethernet/cavium/liquidio/request_manager.c cancel_delayed_work_sync(&oct->check_db_wq[iq_no].wk.work); work 442 drivers/net/ethernet/cavium/liquidio/request_manager.c queue_work(cwq->wq, &cwq->wk.work.work); work 531 drivers/net/ethernet/cavium/liquidio/request_manager.c static void check_db_timeout(struct work_struct *work) work 533 drivers/net/ethernet/cavium/liquidio/request_manager.c struct cavium_wk *wk = (struct cavium_wk *)work; work 540 drivers/net/ethernet/cavium/liquidio/request_manager.c queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(delay)); work 28 drivers/net/ethernet/cavium/liquidio/response_manager.c static void oct_poll_req_completion(struct work_struct *work); work 49 drivers/net/ethernet/cavium/liquidio/response_manager.c INIT_DELAYED_WORK(&cwq->wk.work, oct_poll_req_completion); work 58 drivers/net/ethernet/cavium/liquidio/response_manager.c cancel_delayed_work_sync(&oct->dma_comp_wq.wk.work); work 223 drivers/net/ethernet/cavium/liquidio/response_manager.c static void oct_poll_req_completion(struct work_struct *work) work 225 drivers/net/ethernet/cavium/liquidio/response_manager.c struct cavium_wk *wk = (struct cavium_wk *)work; work 233 drivers/net/ethernet/cavium/liquidio/response_manager.c queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(1)); work 271 drivers/net/ethernet/cavium/thunder/nic.h struct work_struct work; work 1444 drivers/net/ethernet/cavium/thunder/nicvf_main.c link_change_work.work); work 1754 drivers/net/ethernet/cavium/thunder/nicvf_main.c static void nicvf_reset_task(struct work_struct *work) work 1758 drivers/net/ethernet/cavium/thunder/nicvf_main.c nic = container_of(work, struct nicvf, reset_task); work 2039 drivers/net/ethernet/cavium/thunder/nicvf_main.c work); work 2096 drivers/net/ethernet/cavium/thunder/nicvf_main.c queue_work(nic->nicvf_rx_mode_wq, &nic->rx_mode_work.work); work 2262 drivers/net/ethernet/cavium/thunder/nicvf_main.c INIT_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task); work 451 drivers/net/ethernet/cavium/thunder/nicvf_queues.c void nicvf_rbdr_work(struct work_struct *work) work 453 drivers/net/ethernet/cavium/thunder/nicvf_queues.c struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work); work 352 drivers/net/ethernet/cavium/thunder/nicvf_queues.h void nicvf_rbdr_work(struct work_struct *work); work 997 drivers/net/ethernet/cavium/thunder/thunder_bgx.c static void bgx_poll_for_link(struct work_struct *work) work 1002 drivers/net/ethernet/cavium/thunder/thunder_bgx.c lmac = container_of(work, struct lmac, dwork.work); work 895 drivers/net/ethernet/chelsio/cxgb/cxgb2.c static void mac_stats_task(struct work_struct *work) work 899 drivers/net/ethernet/chelsio/cxgb/cxgb2.c container_of(work, struct adapter, stats_update_task.work); work 920 drivers/net/ethernet/chelsio/cxgb/cxgb2.c static void ext_intr_task(struct work_struct *work) work 923 drivers/net/ethernet/chelsio/cxgb/cxgb2.c container_of(work, struct adapter, ext_intr_handler_task); work 97 drivers/net/ethernet/chelsio/cxgb/my3126.c static void my3126_poll(struct work_struct *work) work 99 drivers/net/ethernet/chelsio/cxgb/my3126.c struct cphy *cphy = container_of(work, struct cphy, phy_update.work); work 2719 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c static void t3_adap_check_task(struct work_struct *work) work 2721 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c struct adapter *adapter = container_of(work, struct adapter, work 2722 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c adap_check_task.work); work 2797 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c static void db_full_task(struct work_struct *work) work 2799 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c struct adapter *adapter = container_of(work, struct adapter, work 2805 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c static void db_empty_task(struct work_struct *work) work 2807 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c struct adapter *adapter = container_of(work, struct adapter, work 2813 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c static void db_drop_task(struct work_struct *work) work 2815 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c struct adapter *adapter = container_of(work, struct adapter, work 2836 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c static void ext_intr_task(struct work_struct *work) work 2838 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c struct adapter *adapter = container_of(work, struct adapter, work 2978 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c static void fatal_error_task(struct work_struct *work) work 2980 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c struct adapter *adapter = container_of(work, struct adapter, work 560 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c static void t3_process_tid_release_list(struct work_struct *work) work 562 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c struct t3c_data *td = container_of(work, struct t3c_data, work 2717 drivers/net/ethernet/chelsio/cxgb3/sge.c int work; work 2719 drivers/net/ethernet/chelsio/cxgb3/sge.c work = process_responses(adap, rspq_to_qset(rq), -1); work 2722 drivers/net/ethernet/chelsio/cxgb3/sge.c return work; work 1385 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c static void process_tid_release_list(struct work_struct *work) work 1390 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c adap = container_of(work, struct adapter, tid_release_task); work 2104 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c static void process_db_full(struct work_struct *work) work 2108 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c adap = container_of(work, struct adapter, db_full_task); work 2177 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c static void process_db_drop(struct work_struct *work) work 2181 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c adap = container_of(work, struct adapter, db_drop_task); work 3442 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c static void notify_fatal_err(struct work_struct *work) work 3446 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c adap = container_of(work, struct adapter, fatal_err_notify_task); work 739 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c static void ch_flower_stats_handler(struct work_struct *work) work 741 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c struct adapter *adap = container_of(work, struct adapter, work 2089 drivers/net/ethernet/cisco/enic/enic_main.c static void enic_change_mtu_work(struct work_struct *work) work 2091 drivers/net/ethernet/cisco/enic/enic_main.c struct enic *enic = container_of(work, struct enic, change_mtu_work); work 2334 drivers/net/ethernet/cisco/enic/enic_main.c static void enic_reset(struct work_struct *work) work 2336 drivers/net/ethernet/cisco/enic/enic_main.c struct enic *enic = container_of(work, struct enic, reset); work 2357 drivers/net/ethernet/cisco/enic/enic_main.c static void enic_tx_hang_reset(struct work_struct *work) work 2359 drivers/net/ethernet/cisco/enic/enic_main.c struct enic *enic = container_of(work, struct enic, tx_hang_reset); work 28 drivers/net/ethernet/dec/tulip/21142.c void t21142_media_task(struct work_struct *work) work 31 drivers/net/ethernet/dec/tulip/21142.c container_of(work, struct tulip_private, media_work); work 17 drivers/net/ethernet/dec/tulip/timer.c void tulip_media_task(struct work_struct *work) work 20 drivers/net/ethernet/dec/tulip/timer.c container_of(work, struct tulip_private, media_work); work 472 drivers/net/ethernet/dec/tulip/tulip.h void t21142_media_task(struct work_struct *work); work 510 drivers/net/ethernet/dec/tulip/tulip.h void tulip_media_task(struct work_struct *work); work 598 drivers/net/ethernet/emulex/benet/be.h struct delayed_work work; work 681 drivers/net/ethernet/emulex/benet/be.h struct work_struct work; work 1847 drivers/net/ethernet/emulex/benet/be_main.c static void be_work_set_rx_mode(struct work_struct *work) work 1850 drivers/net/ethernet/emulex/benet/be_main.c container_of(work, struct be_cmd_work, work); work 3298 drivers/net/ethernet/emulex/benet/be_main.c int max_work = 0, work, i, num_evts; work 3313 drivers/net/ethernet/emulex/benet/be_main.c work = be_process_rx(rxo, napi, budget); work 3314 drivers/net/ethernet/emulex/benet/be_main.c max_work = max(work, max_work); work 3953 drivers/net/ethernet/emulex/benet/be_main.c cancel_delayed_work_sync(&adapter->work); work 4599 drivers/net/ethernet/emulex/benet/be_main.c queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000)); work 5043 drivers/net/ethernet/emulex/benet/be_main.c struct be_cmd_work *work; work 5045 drivers/net/ethernet/emulex/benet/be_main.c work = kzalloc(sizeof(*work), GFP_ATOMIC); work 5046 drivers/net/ethernet/emulex/benet/be_main.c if (!work) { work 5052 drivers/net/ethernet/emulex/benet/be_main.c INIT_WORK(&work->work, func); work 5053 drivers/net/ethernet/emulex/benet/be_main.c work->adapter = adapter; work 5054 drivers/net/ethernet/emulex/benet/be_main.c return work; work 5071 drivers/net/ethernet/emulex/benet/be_main.c static void be_work_add_vxlan_port(struct work_struct *work) work 5074 drivers/net/ethernet/emulex/benet/be_main.c container_of(work, struct be_cmd_work, work); work 5123 drivers/net/ethernet/emulex/benet/be_main.c static void be_work_del_vxlan_port(struct work_struct *work) work 5126 drivers/net/ethernet/emulex/benet/be_main.c container_of(work, struct be_cmd_work, work); work 5182 drivers/net/ethernet/emulex/benet/be_main.c queue_work(be_wq, &cmd_work->work); work 5284 drivers/net/ethernet/emulex/benet/be_main.c struct be_cmd_work *work; work 5286 drivers/net/ethernet/emulex/benet/be_main.c work = be_alloc_work(adapter, be_work_set_rx_mode); work 5287 drivers/net/ethernet/emulex/benet/be_main.c if (work) work 5288 drivers/net/ethernet/emulex/benet/be_main.c queue_work(be_wq, &work->work); work 5547 drivers/net/ethernet/emulex/benet/be_main.c static void be_err_detection_task(struct work_struct *work) work 5550 drivers/net/ethernet/emulex/benet/be_main.c container_of(work, struct be_error_recovery, work 5551 drivers/net/ethernet/emulex/benet/be_main.c err_detection_work.work); work 5619 drivers/net/ethernet/emulex/benet/be_main.c static void be_worker(struct work_struct *work) work 5622 drivers/net/ethernet/emulex/benet/be_main.c container_of(work, struct be_adapter, work.work); work 5663 drivers/net/ethernet/emulex/benet/be_main.c queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000)); work 5810 drivers/net/ethernet/emulex/benet/be_main.c INIT_DELAYED_WORK(&adapter->work, be_worker); work 6089 drivers/net/ethernet/emulex/benet/be_main.c cancel_delayed_work_sync(&adapter->work); work 1379 drivers/net/ethernet/faraday/ftgmac100.c static void ftgmac100_reset_task(struct work_struct *work) work 1381 drivers/net/ethernet/faraday/ftgmac100.c struct ftgmac100 *priv = container_of(work, struct ftgmac100, work 31 drivers/net/ethernet/freescale/enetc/enetc_msg.c static void enetc_msg_task(struct work_struct *work) work 33 drivers/net/ethernet/freescale/enetc/enetc_msg.c struct enetc_pf *pf = container_of(work, struct enetc_pf, msg_task); work 1208 drivers/net/ethernet/freescale/fec_main.c static void fec_enet_timeout_work(struct work_struct *work) work 1211 drivers/net/ethernet/freescale/fec_main.c container_of(work, struct fec_enet_private, tx_timeout_work); work 514 drivers/net/ethernet/freescale/fec_ptp.c static void fec_time_keep(struct work_struct *work) work 516 drivers/net/ethernet/freescale/fec_ptp.c struct delayed_work *dwork = to_delayed_work(work); work 617 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c static void fs_timeout_work(struct work_struct *work) work 619 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c struct fs_enet_private *fep = container_of(work, struct fs_enet_private, work 2088 drivers/net/ethernet/freescale/gianfar.c static void gfar_reset_task(struct work_struct *work) work 2090 drivers/net/ethernet/freescale/gianfar.c struct gfar_private *priv = container_of(work, struct gfar_private, work 3515 drivers/net/ethernet/freescale/ucc_geth.c static void ucc_geth_timeout_work(struct work_struct *work) work 3520 drivers/net/ethernet/freescale/ucc_geth.c ugeth = container_of(work, struct ucc_geth_private, timeout_work); work 889 drivers/net/ethernet/google/gve/gve_main.c static void gve_service_task(struct work_struct *work) work 891 drivers/net/ethernet/google/gve/gve_main.c struct gve_priv *priv = container_of(work, struct gve_priv, work 789 drivers/net/ethernet/hisilicon/hip04_eth.c static void hip04_tx_timeout_task(struct work_struct *work) work 793 drivers/net/ethernet/hisilicon/hip04_eth.c priv = container_of(work, struct hip04_priv, tx_timeout_task); work 887 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c static void hix5hd2_tx_timeout_task(struct work_struct *work) work 891 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c priv = container_of(work, struct hix5hd2_priv, tx_timeout_task); work 2053 drivers/net/ethernet/hisilicon/hns/hns_enet.c static void hns_nic_service_task(struct work_struct *work) work 2056 drivers/net/ethernet/hisilicon/hns/hns_enet.c = container_of(work, struct hns_nic_priv, service_task); work 3851 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c static void hclge_reset_service_task(struct work_struct *work) work 3854 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c container_of(work, struct hclge_dev, rst_service_task); work 3866 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c static void hclge_mailbox_service_task(struct work_struct *work) work 3869 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c container_of(work, struct hclge_dev, mbx_service_task); work 3898 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c static void hclge_service_task(struct work_struct *work) work 3901 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c container_of(work, struct hclge_dev, service_task.work); work 9106 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c if (hdev->service_task.work.func) work 1749 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c static void hclgevf_reset_service_task(struct work_struct *work) work 1752 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c container_of(work, struct hclgevf_dev, rst_service_task); work 1821 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c static void hclgevf_mailbox_service_task(struct work_struct *work) work 1825 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c hdev = container_of(work, struct hclgevf_dev, mbx_service_task); work 1846 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c static void hclgevf_keep_alive_task(struct work_struct *work) work 1852 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c hdev = container_of(work, struct hclgevf_dev, keep_alive_task); work 1864 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c static void hclgevf_service_task(struct work_struct *work) work 1869 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c hdev = container_of(work, struct hclgevf_dev, service_task); work 29 drivers/net/ethernet/huawei/hinic/hinic_dev.h struct work_struct work; work 84 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c #define work_to_aeq_work(work) \ work 85 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c container_of(work, struct hinic_eq_work, work) work 359 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c static void eq_irq_work(struct work_struct *work) work 361 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c struct hinic_eq_work *aeq_work = work_to_aeq_work(work); work 397 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c queue_work(aeqs->workq, &aeq_work->work); work 718 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c INIT_WORK(&aeq_work->work, eq_irq_work); work 764 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c cancel_work_sync(&aeq_work->work); work 168 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h struct work_struct work; work 62 drivers/net/ethernet/huawei/hinic/hinic_main.c #define work_to_rx_mode_work(work) \ work 63 drivers/net/ethernet/huawei/hinic/hinic_main.c container_of(work, struct hinic_rx_mode_work, work) work 723 drivers/net/ethernet/huawei/hinic/hinic_main.c static void set_rx_mode(struct work_struct *work) work 725 drivers/net/ethernet/huawei/hinic/hinic_main.c struct hinic_rx_mode_work *rx_mode_work = work_to_rx_mode_work(work); work 755 drivers/net/ethernet/huawei/hinic/hinic_main.c queue_work(nic_dev->workq, &rx_mode_work->work); work 1014 drivers/net/ethernet/huawei/hinic/hinic_main.c INIT_WORK(&rx_mode_work->work, set_rx_mode); work 1041 drivers/net/ethernet/huawei/hinic/hinic_main.c cancel_work_sync(&rx_mode_work->work); work 1129 drivers/net/ethernet/huawei/hinic/hinic_main.c cancel_work_sync(&rx_mode_work->work); work 343 drivers/net/ethernet/ibm/ehea/ehea_main.c static void ehea_update_stats(struct work_struct *work) work 346 drivers/net/ethernet/ibm/ehea/ehea_main.c container_of(work, struct ehea_port, stats_work.work); work 2677 drivers/net/ethernet/ibm/ehea/ehea_main.c static void ehea_reset_port(struct work_struct *work) work 2681 drivers/net/ethernet/ibm/ehea/ehea_main.c container_of(work, struct ehea_port, reset_task); work 764 drivers/net/ethernet/ibm/emac/core.c static void emac_reset_work(struct work_struct *work) work 766 drivers/net/ethernet/ibm/emac/core.c struct emac_instance *dev = container_of(work, struct emac_instance, reset_work); work 1333 drivers/net/ethernet/ibm/emac/core.c static void emac_link_timer(struct work_struct *work) work 1336 drivers/net/ethernet/ibm/emac/core.c container_of(to_delayed_work(work), work 2048 drivers/net/ethernet/ibm/ibmvnic.c static void __ibmvnic_reset(struct work_struct *work) work 2055 drivers/net/ethernet/ibm/ibmvnic.c adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset); work 2124 drivers/net/ethernet/ibm/ibmvnic.c static void __ibmvnic_delayed_reset(struct work_struct *work) work 2128 drivers/net/ethernet/ibm/ibmvnic.c adapter = container_of(work, struct ibmvnic_adapter, work 2129 drivers/net/ethernet/ibm/ibmvnic.c ibmvnic_delayed_reset.work); work 2328 drivers/net/ethernet/intel/e100.c static void e100_tx_timeout_task(struct work_struct *work) work 2330 drivers/net/ethernet/intel/e100.c struct nic *nic = container_of(work, struct nic, tx_timeout_task); work 104 drivers/net/ethernet/intel/e1000/e1000_main.c static void e1000_update_phy_info_task(struct work_struct *work); work 105 drivers/net/ethernet/intel/e1000/e1000_main.c static void e1000_watchdog(struct work_struct *work); work 106 drivers/net/ethernet/intel/e1000/e1000_main.c static void e1000_82547_tx_fifo_stall_task(struct work_struct *work); work 138 drivers/net/ethernet/intel/e1000/e1000_main.c static void e1000_reset_task(struct work_struct *work); work 2334 drivers/net/ethernet/intel/e1000/e1000_main.c static void e1000_update_phy_info_task(struct work_struct *work) work 2336 drivers/net/ethernet/intel/e1000/e1000_main.c struct e1000_adapter *adapter = container_of(work, work 2338 drivers/net/ethernet/intel/e1000/e1000_main.c phy_info_task.work); work 2347 drivers/net/ethernet/intel/e1000/e1000_main.c static void e1000_82547_tx_fifo_stall_task(struct work_struct *work) work 2349 drivers/net/ethernet/intel/e1000/e1000_main.c struct e1000_adapter *adapter = container_of(work, work 2351 drivers/net/ethernet/intel/e1000/e1000_main.c fifo_stall_task.work); work 2419 drivers/net/ethernet/intel/e1000/e1000_main.c static void e1000_watchdog(struct work_struct *work) work 2421 drivers/net/ethernet/intel/e1000/e1000_main.c struct e1000_adapter *adapter = container_of(work, work 2423 drivers/net/ethernet/intel/e1000/e1000_main.c watchdog_task.work); work 3500 drivers/net/ethernet/intel/e1000/e1000_main.c static void e1000_reset_task(struct work_struct *work) work 3503 drivers/net/ethernet/intel/e1000/e1000_main.c container_of(work, struct e1000_adapter, reset_task); work 1080 drivers/net/ethernet/intel/e1000e/netdev.c static void e1000_print_hw_hang(struct work_struct *work) work 1082 drivers/net/ethernet/intel/e1000e/netdev.c struct e1000_adapter *adapter = container_of(work, work 1165 drivers/net/ethernet/intel/e1000e/netdev.c static void e1000e_tx_hwtstamp_work(struct work_struct *work) work 1167 drivers/net/ethernet/intel/e1000e/netdev.c struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, work 1735 drivers/net/ethernet/intel/e1000e/netdev.c static void e1000e_downshift_workaround(struct work_struct *work) work 1737 drivers/net/ethernet/intel/e1000e/netdev.c struct e1000_adapter *adapter = container_of(work, work 4797 drivers/net/ethernet/intel/e1000e/netdev.c static void e1000e_update_phy_task(struct work_struct *work) work 4799 drivers/net/ethernet/intel/e1000e/netdev.c struct e1000_adapter *adapter = container_of(work, work 5167 drivers/net/ethernet/intel/e1000e/netdev.c static void e1000_watchdog_task(struct work_struct *work) work 5169 drivers/net/ethernet/intel/e1000e/netdev.c struct e1000_adapter *adapter = container_of(work, work 5949 drivers/net/ethernet/intel/e1000e/netdev.c static void e1000_reset_task(struct work_struct *work) work 5952 drivers/net/ethernet/intel/e1000e/netdev.c adapter = container_of(work, struct e1000_adapter, reset_task); work 237 drivers/net/ethernet/intel/e1000e/ptp.c static void e1000e_systim_overflow_work(struct work_struct *work) work 239 drivers/net/ethernet/intel/e1000e/ptp.c struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, work 240 drivers/net/ethernet/intel/e1000e/ptp.c systim_overflow_work.work); work 1457 drivers/net/ethernet/intel/fm10k/fm10k_main.c int work = fm10k_clean_rx_irq(q_vector, ring, per_ring_budget); work 1459 drivers/net/ethernet/intel/fm10k/fm10k_main.c work_done += work; work 1460 drivers/net/ethernet/intel/fm10k/fm10k_main.c if (work >= per_ring_budget) work 733 drivers/net/ethernet/intel/fm10k/fm10k_pci.c static void fm10k_service_task(struct work_struct *work) work 737 drivers/net/ethernet/intel/fm10k/fm10k_pci.c interface = container_of(work, struct fm10k_intfc, service_task); work 765 drivers/net/ethernet/intel/fm10k/fm10k_pci.c static void fm10k_macvlan_task(struct work_struct *work) work 774 drivers/net/ethernet/intel/fm10k/fm10k_pci.c dwork = to_delayed_work(work); work 10453 drivers/net/ethernet/intel/i40e/i40e_main.c static void i40e_service_task(struct work_struct *work) work 10455 drivers/net/ethernet/intel/i40e/i40e_main.c struct i40e_pf *pf = container_of(work, work 1891 drivers/net/ethernet/intel/iavf/iavf_main.c static void iavf_watchdog_task(struct work_struct *work) work 1893 drivers/net/ethernet/intel/iavf/iavf_main.c struct iavf_adapter *adapter = container_of(work, work 1895 drivers/net/ethernet/intel/iavf/iavf_main.c watchdog_task.work); work 2059 drivers/net/ethernet/intel/iavf/iavf_main.c static void iavf_reset_task(struct work_struct *work) work 2061 drivers/net/ethernet/intel/iavf/iavf_main.c struct iavf_adapter *adapter = container_of(work, work 2267 drivers/net/ethernet/intel/iavf/iavf_main.c static void iavf_adminq_task(struct work_struct *work) work 2270 drivers/net/ethernet/intel/iavf/iavf_main.c container_of(work, struct iavf_adapter, adminq_task); work 2356 drivers/net/ethernet/intel/iavf/iavf_main.c static void iavf_client_task(struct work_struct *work) work 2359 drivers/net/ethernet/intel/iavf/iavf_main.c container_of(work, struct iavf_adapter, client_task.work); work 3575 drivers/net/ethernet/intel/iavf/iavf_main.c static void iavf_init_task(struct work_struct *work) work 3577 drivers/net/ethernet/intel/iavf/iavf_main.c struct iavf_adapter *adapter = container_of(work, work 3579 drivers/net/ethernet/intel/iavf/iavf_main.c init_task.work); work 1489 drivers/net/ethernet/intel/ice/ice_main.c static void ice_service_task(struct work_struct *work) work 1491 drivers/net/ethernet/intel/ice/ice_main.c struct ice_pf *pf = container_of(work, struct ice_pf, serv_task); work 5220 drivers/net/ethernet/intel/igb/igb_main.c static void igb_watchdog_task(struct work_struct *work) work 5222 drivers/net/ethernet/intel/igb/igb_main.c struct igb_adapter *adapter = container_of(work, work 6192 drivers/net/ethernet/intel/igb/igb_main.c static void igb_reset_task(struct work_struct *work) work 6195 drivers/net/ethernet/intel/igb/igb_main.c adapter = container_of(work, struct igb_adapter, reset_task); work 683 drivers/net/ethernet/intel/igb/igb_ptp.c static void igb_ptp_tx_work(struct work_struct *work) work 685 drivers/net/ethernet/intel/igb/igb_ptp.c struct igb_adapter *adapter = container_of(work, struct igb_adapter, work 715 drivers/net/ethernet/intel/igb/igb_ptp.c static void igb_ptp_overflow_check(struct work_struct *work) work 718 drivers/net/ethernet/intel/igb/igb_ptp.c container_of(work, struct igb_adapter, ptp_overflow_work.work); work 1906 drivers/net/ethernet/intel/igbvf/netdev.c static void igbvf_watchdog_task(struct work_struct *work) work 1908 drivers/net/ethernet/intel/igbvf/netdev.c struct igbvf_adapter *adapter = container_of(work, work 2387 drivers/net/ethernet/intel/igbvf/netdev.c static void igbvf_reset_task(struct work_struct *work) work 2391 drivers/net/ethernet/intel/igbvf/netdev.c adapter = container_of(work, struct igbvf_adapter, reset_task); work 2174 drivers/net/ethernet/intel/igc/igc_main.c static void igc_reset_task(struct work_struct *work) work 2178 drivers/net/ethernet/intel/igc/igc_main.c adapter = container_of(work, struct igc_adapter, reset_task); work 2900 drivers/net/ethernet/intel/igc/igc_main.c static void igc_watchdog_task(struct work_struct *work) work 2902 drivers/net/ethernet/intel/igc/igc_main.c struct igc_adapter *adapter = container_of(work, work 74 drivers/net/ethernet/intel/ixgb/ixgb_main.c static void ixgb_tx_timeout_task(struct work_struct *work); work 1550 drivers/net/ethernet/intel/ixgb/ixgb_main.c ixgb_tx_timeout_task(struct work_struct *work) work 1553 drivers/net/ethernet/intel/ixgb/ixgb_main.c container_of(work, struct ixgb_adapter, tx_timeout_task); work 7906 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static void ixgbe_service_task(struct work_struct *work) work 7908 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c struct ixgbe_adapter *adapter = container_of(work, work 844 drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work) work 846 drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c struct ixgbe_adapter *adapter = container_of(work, struct ixgbe_adapter, work 3312 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c static void ixgbevf_service_task(struct work_struct *work) work 3314 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c struct ixgbevf_adapter *adapter = container_of(work, work 887 drivers/net/ethernet/korina.c static void korina_restart_task(struct work_struct *work) work 889 drivers/net/ethernet/korina.c struct korina_private *lp = container_of(work, work 1557 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c static void mvpp2_gather_hw_statistics(struct work_struct *work) work 1559 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c struct delayed_work *del_work = to_delayed_work(work); work 1585 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c mvpp2_gather_hw_statistics(&port->stats_work.work); work 686 drivers/net/ethernet/marvell/octeontx2/af/cgx.c static void cgx_lmac_linkup_work(struct work_struct *work) work 688 drivers/net/ethernet/marvell/octeontx2/af/cgx.c struct cgx *cgx = container_of(work, struct cgx, cgx_cmd_work); work 1486 drivers/net/ethernet/marvell/octeontx2/af/rvu.c static inline void rvu_afpf_mbox_handler(struct work_struct *work) work 1488 drivers/net/ethernet/marvell/octeontx2/af/rvu.c struct rvu_work *mwork = container_of(work, struct rvu_work, work); work 1493 drivers/net/ethernet/marvell/octeontx2/af/rvu.c static inline void rvu_afvf_mbox_handler(struct work_struct *work) work 1495 drivers/net/ethernet/marvell/octeontx2/af/rvu.c struct rvu_work *mwork = container_of(work, struct rvu_work, work); work 1567 drivers/net/ethernet/marvell/octeontx2/af/rvu.c static inline void rvu_afpf_mbox_up_handler(struct work_struct *work) work 1569 drivers/net/ethernet/marvell/octeontx2/af/rvu.c struct rvu_work *mwork = container_of(work, struct rvu_work, work); work 1574 drivers/net/ethernet/marvell/octeontx2/af/rvu.c static inline void rvu_afvf_mbox_up_handler(struct work_struct *work) work 1576 drivers/net/ethernet/marvell/octeontx2/af/rvu.c struct rvu_work *mwork = container_of(work, struct rvu_work, work); work 1654 drivers/net/ethernet/marvell/octeontx2/af/rvu.c INIT_WORK(&mwork->work, mbox_handler); work 1658 drivers/net/ethernet/marvell/octeontx2/af/rvu.c INIT_WORK(&mwork->work, mbox_up_handler); work 1701 drivers/net/ethernet/marvell/octeontx2/af/rvu.c queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work); work 1707 drivers/net/ethernet/marvell/octeontx2/af/rvu.c queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work); work 1821 drivers/net/ethernet/marvell/octeontx2/af/rvu.c static void rvu_flr_handler(struct work_struct *work) work 1823 drivers/net/ethernet/marvell/octeontx2/af/rvu.c struct rvu_work *flrwork = container_of(work, struct rvu_work, work); work 1867 drivers/net/ethernet/marvell/octeontx2/af/rvu.c queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work); work 1887 drivers/net/ethernet/marvell/octeontx2/af/rvu.c queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work); work 2219 drivers/net/ethernet/marvell/octeontx2/af/rvu.c INIT_WORK(&rvu->flr_wrk[dev].work, rvu_flr_handler); work 39 drivers/net/ethernet/marvell/octeontx2/af/rvu.h struct work_struct work; work 194 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c static void cgx_evhandler_task(struct work_struct *work) work 196 drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work); work 754 drivers/net/ethernet/marvell/pxa168_eth.c static void pxa168_eth_tx_timeout_task(struct work_struct *work) work 756 drivers/net/ethernet/marvell/pxa168_eth.c struct pxa168_eth_private *pep = container_of(work, work 3507 drivers/net/ethernet/marvell/sky2.c static void sky2_restart(struct work_struct *work) work 3509 drivers/net/ethernet/marvell/sky2.c struct sky2_hw *hw = container_of(work, struct sky2_hw, restart_work); work 2491 drivers/net/ethernet/mediatek/mtk_eth_soc.c static void mtk_pending_work(struct work_struct *work) work 2493 drivers/net/ethernet/mediatek/mtk_eth_soc.c struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work); work 263 drivers/net/ethernet/mellanox/mlx4/catas.c static void catas_reset(struct work_struct *work) work 266 drivers/net/ethernet/mellanox/mlx4/catas.c container_of(work, struct mlx4_dev_persistent, work 1846 drivers/net/ethernet/mellanox/mlx4/cmd.c struct mlx4_vf_immed_vlan_work *work; work 1876 drivers/net/ethernet/mellanox/mlx4/cmd.c work = kzalloc(sizeof(*work), GFP_KERNEL); work 1877 drivers/net/ethernet/mellanox/mlx4/cmd.c if (!work) work 1886 drivers/net/ethernet/mellanox/mlx4/cmd.c kfree(work); work 1895 drivers/net/ethernet/mellanox/mlx4/cmd.c work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN; work 1903 drivers/net/ethernet/mellanox/mlx4/cmd.c work->orig_vlan_id = vp_oper->state.default_vlan; work 1904 drivers/net/ethernet/mellanox/mlx4/cmd.c work->orig_vlan_ix = vp_oper->vlan_idx; work 1908 drivers/net/ethernet/mellanox/mlx4/cmd.c work->flags |= MLX4_VF_IMMED_VLAN_FLAG_QOS; work 1910 drivers/net/ethernet/mellanox/mlx4/cmd.c if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN) work 1920 drivers/net/ethernet/mellanox/mlx4/cmd.c work->flags |= MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE; work 1923 drivers/net/ethernet/mellanox/mlx4/cmd.c work->port = port; work 1924 drivers/net/ethernet/mellanox/mlx4/cmd.c work->slave = slave; work 1925 drivers/net/ethernet/mellanox/mlx4/cmd.c work->qos = vp_oper->state.default_qos; work 1926 drivers/net/ethernet/mellanox/mlx4/cmd.c work->qos_vport = vp_oper->state.qos_vport; work 1927 drivers/net/ethernet/mellanox/mlx4/cmd.c work->vlan_id = vp_oper->state.default_vlan; work 1928 drivers/net/ethernet/mellanox/mlx4/cmd.c work->vlan_ix = vp_oper->vlan_idx; work 1929 drivers/net/ethernet/mellanox/mlx4/cmd.c work->vlan_proto = vp_oper->state.vlan_proto; work 1930 drivers/net/ethernet/mellanox/mlx4/cmd.c work->priv = priv; work 1931 drivers/net/ethernet/mellanox/mlx4/cmd.c INIT_WORK(&work->work, mlx4_vf_immed_vlan_work_handler); work 1932 drivers/net/ethernet/mellanox/mlx4/cmd.c queue_work(priv->mfunc.master.comm_wq, &work->work); work 2233 drivers/net/ethernet/mellanox/mlx4/cmd.c void mlx4_master_comm_channel(struct work_struct *work) work 2236 drivers/net/ethernet/mellanox/mlx4/cmd.c container_of(work, work 161 drivers/net/ethernet/mellanox/mlx4/en_netdev.c struct work_struct work; work 197 drivers/net/ethernet/mellanox/mlx4/en_netdev.c static void mlx4_en_filter_work(struct work_struct *work) work 199 drivers/net/ethernet/mellanox/mlx4/en_netdev.c struct mlx4_en_filter *filter = container_of(work, work 201 drivers/net/ethernet/mellanox/mlx4/en_netdev.c work); work 300 drivers/net/ethernet/mellanox/mlx4/en_netdev.c INIT_WORK(&filter->work, mlx4_en_filter_work); work 409 drivers/net/ethernet/mellanox/mlx4/en_netdev.c queue_work(priv->mdev->workqueue, &filter->work); work 432 drivers/net/ethernet/mellanox/mlx4/en_netdev.c cancel_work_sync(&filter->work); work 449 drivers/net/ethernet/mellanox/mlx4/en_netdev.c !work_pending(&filter->work) && work 1251 drivers/net/ethernet/mellanox/mlx4/en_netdev.c static void mlx4_en_do_set_rx_mode(struct work_struct *work) work 1253 drivers/net/ethernet/mellanox/mlx4/en_netdev.c struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, work 1514 drivers/net/ethernet/mellanox/mlx4/en_netdev.c static void mlx4_en_do_get_stats(struct work_struct *work) work 1516 drivers/net/ethernet/mellanox/mlx4/en_netdev.c struct delayed_work *delay = to_delayed_work(work); work 1544 drivers/net/ethernet/mellanox/mlx4/en_netdev.c static void mlx4_en_service_task(struct work_struct *work) work 1546 drivers/net/ethernet/mellanox/mlx4/en_netdev.c struct delayed_work *delay = to_delayed_work(work); work 1563 drivers/net/ethernet/mellanox/mlx4/en_netdev.c static void mlx4_en_linkstate(struct work_struct *work) work 1565 drivers/net/ethernet/mellanox/mlx4/en_netdev.c struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, work 2004 drivers/net/ethernet/mellanox/mlx4/en_netdev.c static void mlx4_en_restart(struct work_struct *work) work 2006 drivers/net/ethernet/mellanox/mlx4/en_netdev.c struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, work 2640 drivers/net/ethernet/mellanox/mlx4/en_netdev.c static void mlx4_en_add_vxlan_offloads(struct work_struct *work) work 2643 drivers/net/ethernet/mellanox/mlx4/en_netdev.c struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, work 2659 drivers/net/ethernet/mellanox/mlx4/en_netdev.c static void mlx4_en_del_vxlan_offloads(struct work_struct *work) work 2662 drivers/net/ethernet/mellanox/mlx4/en_netdev.c struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, work 2976 drivers/net/ethernet/mellanox/mlx4/en_netdev.c struct work_struct work; work 2982 drivers/net/ethernet/mellanox/mlx4/en_netdev.c static void mlx4_en_bond_work(struct work_struct *work) work 2984 drivers/net/ethernet/mellanox/mlx4/en_netdev.c struct mlx4_en_bond *bond = container_of(work, work 2986 drivers/net/ethernet/mellanox/mlx4/en_netdev.c work); work 3022 drivers/net/ethernet/mellanox/mlx4/en_netdev.c INIT_WORK(&bond->work, mlx4_en_bond_work); work 3028 drivers/net/ethernet/mellanox/mlx4/en_netdev.c queue_work(priv->mdev->workqueue, &bond->work); work 136 drivers/net/ethernet/mellanox/mlx4/eq.c void mlx4_gen_slave_eqe(struct work_struct *work) work 139 drivers/net/ethernet/mellanox/mlx4/eq.c container_of(work, struct mlx4_mfunc_master_ctx, work 449 drivers/net/ethernet/mellanox/mlx4/eq.c void mlx4_master_handle_slave_flr(struct work_struct *work) work 452 drivers/net/ethernet/mellanox/mlx4/eq.c container_of(work, struct mlx4_mfunc_master_ctx, work 852 drivers/net/ethernet/mellanox/mlx4/eq.c int work = 0; work 858 drivers/net/ethernet/mellanox/mlx4/eq.c work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]); work 860 drivers/net/ethernet/mellanox/mlx4/eq.c return IRQ_RETVAL(work); work 2699 drivers/net/ethernet/mellanox/mlx4/fw.c void mlx4_opreq_action(struct work_struct *work) work 2701 drivers/net/ethernet/mellanox/mlx4/fw.c struct mlx4_priv *priv = container_of(work, struct mlx4_priv, work 254 drivers/net/ethernet/mellanox/mlx4/fw.h void mlx4_opreq_action(struct work_struct *work); work 651 drivers/net/ethernet/mellanox/mlx4/mlx4.h struct work_struct work; work 1059 drivers/net/ethernet/mellanox/mlx4/mlx4.h void mlx4_master_comm_channel(struct work_struct *work); work 1060 drivers/net/ethernet/mellanox/mlx4/mlx4.h void mlx4_gen_slave_eqe(struct work_struct *work); work 1061 drivers/net/ethernet/mellanox/mlx4/mlx4.h void mlx4_master_handle_slave_flr(struct work_struct *work); work 5266 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c struct mlx4_vf_immed_vlan_work *work) work 5269 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c ctx->qp_context.qos_vport = work->qos_vport; work 5274 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c struct mlx4_vf_immed_vlan_work *work = work 5275 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c container_of(_work, struct mlx4_vf_immed_vlan_work, work); work 5278 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c struct mlx4_dev *dev = &work->priv->dev; work 5280 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c &work->priv->mfunc.master.res_tracker; work 5282 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c &tracker->slave_list[work->slave].res_list[RES_QP]; work 5308 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c work->slave); work 5315 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */ work 5322 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c else if (!work->vlan_id) work 5325 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c else if (work->vlan_proto == htons(ETH_P_8021AD)) work 5341 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c if (qp->com.owner == work->slave) { work 5350 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c if (port != work->port) { work 5359 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c if (work->vlan_id == MLX4_VGT) { work 5371 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c upd_context->qp_context.pri_path.vlan_index = work->vlan_ix; work 5376 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c if (work->vlan_proto == htons(ETH_P_8021AD)) work 5385 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c ((work->qos & 0x7) << 3); work 5389 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c update_qos_vpp(upd_context, work); work 5398 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c work->slave, port, qp->local_qpn, err); work 5409 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c errors, work->slave, work->port); work 5414 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors && work 5415 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c NO_INDX != work->orig_vlan_ix) work 5416 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c __mlx4_unregister_vlan(&work->priv->dev, work->port, work 5417 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c work->orig_vlan_id); work 5419 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c kfree(work); work 91 drivers/net/ethernet/mellanox/mlx4/sense.c static void mlx4_sense_port(struct work_struct *work) work 93 drivers/net/ethernet/mellanox/mlx4/sense.c struct delayed_work *delay = to_delayed_work(work); work 830 drivers/net/ethernet/mellanox/mlx5/core/cmd.c static void cb_timeout_handler(struct work_struct *work) work 832 drivers/net/ethernet/mellanox/mlx5/core/cmd.c struct delayed_work *dwork = container_of(work, struct delayed_work, work 833 drivers/net/ethernet/mellanox/mlx5/core/cmd.c work); work 851 drivers/net/ethernet/mellanox/mlx5/core/cmd.c static void cmd_work_handler(struct work_struct *work) work 853 drivers/net/ethernet/mellanox/mlx5/core/cmd.c struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); work 983 drivers/net/ethernet/mellanox/mlx5/core/cmd.c cancel_work_sync(&ent->work)) { work 1045 drivers/net/ethernet/mellanox/mlx5/core/cmd.c INIT_WORK(&ent->work, cmd_work_handler); work 1047 drivers/net/ethernet/mellanox/mlx5/core/cmd.c cmd_work_handler(&ent->work); work 1048 drivers/net/ethernet/mellanox/mlx5/core/cmd.c } else if (!queue_work(cmd->wq, &ent->work)) { work 1780 drivers/net/ethernet/mellanox/mlx5/core/cmd.c struct mlx5_async_work *work = _work; work 1781 drivers/net/ethernet/mellanox/mlx5/core/cmd.c struct mlx5_async_ctx *ctx = work->ctx; work 1783 drivers/net/ethernet/mellanox/mlx5/core/cmd.c work->user_callback(status, work); work 1790 drivers/net/ethernet/mellanox/mlx5/core/cmd.c struct mlx5_async_work *work) work 1794 drivers/net/ethernet/mellanox/mlx5/core/cmd.c work->ctx = ctx; work 1795 drivers/net/ethernet/mellanox/mlx5/core/cmd.c work->user_callback = callback; work 1799 drivers/net/ethernet/mellanox/mlx5/core/cmd.c mlx5_cmd_exec_cb_handler, work, false); work 259 drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c static void mlx5_tracer_read_strings_db(struct work_struct *work) work 261 drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c struct mlx5_fw_tracer *tracer = container_of(work, struct mlx5_fw_tracer, work 661 drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c static void mlx5_fw_tracer_handle_traces(struct work_struct *work) work 664 drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c container_of(work, struct mlx5_fw_tracer, handle_traces_work); work 809 drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c static void mlx5_fw_tracer_ownership_change(struct work_struct *work) work 812 drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c container_of(work, struct mlx5_fw_tracer, ownership_change_work); work 801 drivers/net/ethernet/mellanox/mlx5/core/en.h struct delayed_work work; work 977 drivers/net/ethernet/mellanox/mlx5/core/en.h void mlx5e_set_rx_mode_work(struct work_struct *work); work 1191 drivers/net/ethernet/mellanox/mlx5/core/en.h void mlx5e_rx_dim_work(struct work_struct *work); work 1192 drivers/net/ethernet/mellanox/mlx5/core/en.h void mlx5e_tx_dim_work(struct work_struct *work); work 57 drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c static void mlx5e_hv_vhca_stats_work(struct work_struct *work) work 66 drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c dwork = to_delayed_work(work); work 67 drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c sagent = container_of(dwork, struct mlx5e_hv_vhca_stats_agent, work); work 85 drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c queue_delayed_work(priv->wq, &sagent->work, sagent->delay); work 106 drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c cancel_delayed_work_sync(&priv->stats_agent.work); work 113 drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c queue_delayed_work(priv->wq, &sagent->work, sagent->delay); work 120 drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c cancel_delayed_work_sync(&priv->stats_agent.work); work 149 drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c INIT_DELAYED_WORK(&priv->stats_agent.work, mlx5e_hv_vhca_stats_work); work 49 drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c static void mlx5e_monitor_counters_work(struct work_struct *work) work 51 drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, work 460 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c struct work_struct work; work 465 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c static void _update_xfrm_state(struct work_struct *work) work 469 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c container_of(work, struct mlx5e_ipsec_modify_state_work, work); work 501 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c INIT_WORK(&modify_work->work, _update_xfrm_state); work 502 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c WARN_ON(!queue_work(sa_entry->ipsec->wq, &modify_work->work)); work 566 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c static void arfs_handle_work(struct work_struct *work) work 568 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c struct arfs_rule *arfs_rule = container_of(work, work 44 drivers/net/ethernet/mellanox/mlx5/core/en_dim.c void mlx5e_rx_dim_work(struct work_struct *work) work 46 drivers/net/ethernet/mellanox/mlx5/core/en_dim.c struct dim *dim = container_of(work, struct dim, work); work 54 drivers/net/ethernet/mellanox/mlx5/core/en_dim.c void mlx5e_tx_dim_work(struct work_struct *work) work 56 drivers/net/ethernet/mellanox/mlx5/core/en_dim.c struct dim *dim = container_of(work, struct dim, work); work 593 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c void mlx5e_set_rx_mode_work(struct work_struct *work) work 595 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, work 149 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static void mlx5e_update_carrier_work(struct work_struct *work) work 151 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, work 180 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static void mlx5e_update_stats_work(struct work_struct *work) work 182 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, work 604 drivers/net/ethernet/mellanox/mlx5/core/en_main.c INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work); work 937 drivers/net/ethernet/mellanox/mlx5/core/en_main.c cancel_work_sync(&rq->dim.work); work 1178 drivers/net/ethernet/mellanox/mlx5/core/en_main.c INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work); work 1395 drivers/net/ethernet/mellanox/mlx5/core/en_main.c cancel_work_sync(&sq->dim.work); work 4189 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct work_struct work; work 4194 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static void mlx5e_vxlan_add_work(struct work_struct *work) work 4197 drivers/net/ethernet/mellanox/mlx5/core/en_main.c container_of(work, struct mlx5e_vxlan_work, work); work 4208 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static void mlx5e_vxlan_del_work(struct work_struct *work) work 4211 drivers/net/ethernet/mellanox/mlx5/core/en_main.c container_of(work, struct mlx5e_vxlan_work, work); work 4230 drivers/net/ethernet/mellanox/mlx5/core/en_main.c INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_work); work 4232 drivers/net/ethernet/mellanox/mlx5/core/en_main.c INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_work); work 4236 drivers/net/ethernet/mellanox/mlx5/core/en_main.c queue_work(priv->wq, &vxlan_work->work); work 4335 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static void mlx5e_tx_timeout_work(struct work_struct *work) work 4337 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, work 579 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c static void mlx5e_rep_neigh_stats_work(struct work_struct *work) work 581 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv, work 582 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c neigh_update.neigh_stats_work.work); work 639 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c static void mlx5e_rep_neigh_update(struct work_struct *work) work 642 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c container_of(work, struct mlx5e_neigh_hash_entry, neigh_update_work); work 4183 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c void mlx5e_tc_reoffload_flows_work(struct work_struct *work) work 4186 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c container_of(work, struct mlx5_rep_uplink_priv, work 91 drivers/net/ethernet/mellanox/mlx5/core/en_tc.h void mlx5e_tc_reoffload_flows_work(struct work_struct *work); work 945 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static void esw_vport_change_handler(struct work_struct *work) work 948 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c container_of(work, struct mlx5_vport, vport_change_handler); work 204 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h struct work_struct work; work 2113 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static void esw_functions_changed_event_handler(struct work_struct *work) work 2119 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c host_work = container_of(work, struct mlx5_host_work, work); work 2147 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c INIT_WORK(&host_work->work, esw_functions_changed_event_handler); work 2148 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c queue_work(esw->work_queue, &host_work->work); work 263 drivers/net/ethernet/mellanox/mlx5/core/events.c static void mlx5_pcie_event(struct work_struct *work) work 272 drivers/net/ethernet/mellanox/mlx5/core/events.c events = container_of(work, struct mlx5_events, pcie_core_work); work 228 drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c static void mlx5_fc_stats_work(struct work_struct *work) work 230 drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev, work 231 drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c priv.fc_stats.work.work); work 242 drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c queue_delayed_work(fc_stats->wq, &fc_stats->work, work 331 drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c mod_delayed_work(fc_stats->wq, &fc_stats->work, 0); work 357 drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c mod_delayed_work(fc_stats->wq, &fc_stats->work, 0); work 388 drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work); work 405 drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c cancel_delayed_work_sync(&dev->priv.fc_stats.work); work 525 drivers/net/ethernet/mellanox/mlx5/core/health.c static void mlx5_fw_reporter_err_work(struct work_struct *work) work 530 drivers/net/ethernet/mellanox/mlx5/core/health.c health = container_of(work, struct mlx5_core_health, report_work); work 613 drivers/net/ethernet/mellanox/mlx5/core/health.c static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work) work 620 drivers/net/ethernet/mellanox/mlx5/core/health.c health = container_of(work, struct mlx5_core_health, fatal_report_work); work 353 drivers/net/ethernet/mellanox/mlx5/core/lag.c static void mlx5_do_bond_work(struct work_struct *work) work 355 drivers/net/ethernet/mellanox/mlx5/core/lag.c struct delayed_work *delayed_work = to_delayed_work(work); work 100 drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c struct work_struct work; work 189 drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c static void mlx5_lag_fib_update(struct work_struct *work) work 192 drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c container_of(work, struct mlx5_fib_event_work, work); work 231 drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c INIT_WORK(&fib_work->work, mlx5_lag_fib_update); work 300 drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c queue_work(ldev->wq, &fib_work->work); work 100 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c static void mlx5_pps_out(struct work_struct *work) work 102 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps, work 129 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c static void mlx5_timestamp_overflow(struct work_struct *work) work 131 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c struct delayed_work *dwork = to_delayed_work(work); work 70 drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c static void mlx5_hv_vhca_invalidate_work(struct work_struct *work) work 76 drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c hwork = container_of(work, struct mlx5_hv_vhca_work, invalidate_work); work 99 drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c struct mlx5_hv_vhca_work *work; work 101 drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c work = kzalloc(sizeof(*work), GFP_ATOMIC); work 102 drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c if (!work) work 105 drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c INIT_WORK(&work->invalidate_work, mlx5_hv_vhca_invalidate_work); work 106 drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c work->hv_vhca = hv_vhca; work 107 drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c work->block_mask = block_mask; work 109 drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c queue_work(hv_vhca->work_queue, &work->invalidate_work); work 53 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c struct work_struct work; work 429 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c static void pages_work_handler(struct work_struct *work) work 431 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work); work 482 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c INIT_WORK(&req->work, pages_work_handler); work 483 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c queue_work(dev->priv.pg_wq, &req->work); work 502 drivers/net/ethernet/mellanox/mlxsw/core.c static void mlxsw_emad_trans_timeout_work(struct work_struct *work) work 504 drivers/net/ethernet/mellanox/mlxsw/core.c struct mlxsw_reg_trans *trans = container_of(work, work 506 drivers/net/ethernet/mellanox/mlxsw/core.c timeout_dw.work); work 2081 drivers/net/ethernet/mellanox/mlxsw/core.c bool mlxsw_core_schedule_work(struct work_struct *work) work 2083 drivers/net/ethernet/mellanox/mlxsw/core.c return queue_work(mlxsw_owq, work); work 198 drivers/net/ethernet/mellanox/mlxsw/core.h bool mlxsw_core_schedule_work(struct work_struct *work); work 175 drivers/net/ethernet/mellanox/mlxsw/spectrum.c void (*shaper_work)(struct work_struct *work); work 1157 drivers/net/ethernet/mellanox/mlxsw/spectrum.c static void update_stats_cache(struct work_struct *work) work 1160 drivers/net/ethernet/mellanox/mlxsw/spectrum.c container_of(work, struct mlxsw_sp_port, work 1161 drivers/net/ethernet/mellanox/mlxsw/spectrum.c periodic_hw_stats.update_dw.work); work 815 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c static void mlxsw_sp_acl_rule_activity_update_work(struct work_struct *work) work 817 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c struct mlxsw_sp_acl *acl = container_of(work, struct mlxsw_sp_acl, work 818 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c rule_activity_update.dw.work); work 751 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work) work 754 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c container_of(work, struct mlxsw_sp_acl_tcam_vregion, work 755 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c rehash.dw.work); work 995 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c static void mlxsw_sp_mr_stats_update(struct work_struct *work) work 997 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c struct mlxsw_sp_mr *mr = container_of(work, struct mlxsw_sp_mr, work 998 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c stats_update_dw.work); work 237 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c static void mlxsw_sp1_ptp_clock_overflow(struct work_struct *work) work 239 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c struct delayed_work *dwork = to_delayed_work(work); work 678 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c static void mlxsw_sp1_ptp_ht_gc(struct work_struct *work) work 680 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c struct delayed_work *dwork = to_delayed_work(work); work 1049 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c void mlxsw_sp1_ptp_shaper_work(struct work_struct *work) work 1051 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c struct delayed_work *dwork = to_delayed_work(work); work 57 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h void mlxsw_sp1_ptp_shaper_work(struct work_struct *work); work 123 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h static inline void mlxsw_sp1_ptp_shaper_work(struct work_struct *work) work 194 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h static inline void mlxsw_sp2_ptp_shaper_work(struct work_struct *work) work 2305 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c static void mlxsw_sp_router_neighs_update_work(struct work_struct *work) work 2310 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c router = container_of(work, struct mlxsw_sp_router, work 2311 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c neighs_update.dw.work); work 2321 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work) work 2326 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c router = container_of(work, struct mlxsw_sp_router, work 2327 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nexthop_probe_dw.work); work 2453 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct work_struct work; work 2458 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c static void mlxsw_sp_router_neigh_event_work(struct work_struct *work) work 2461 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c container_of(work, struct mlxsw_sp_netevent_work, work); work 2508 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work) work 2511 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c container_of(work, struct mlxsw_sp_netevent_work, work); work 2520 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c static void mlxsw_sp_router_update_priority_work(struct work_struct *work) work 2523 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c container_of(work, struct mlxsw_sp_netevent_work, work); work 2545 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c INIT_WORK(&net_work->work, cb); work 2547 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_core_schedule_work(&net_work->work); work 2599 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work); work 2608 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_core_schedule_work(&net_work->work); work 5921 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct work_struct work; work 5982 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c static void mlxsw_sp_router_fib4_event_work(struct work_struct *work) work 5985 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c container_of(work, struct mlxsw_sp_fib_event_work, work); work 6027 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c static void mlxsw_sp_router_fib6_event_work(struct work_struct *work) work 6030 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c container_of(work, struct mlxsw_sp_fib_event_work, work); work 6067 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work) work 6070 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c container_of(work, struct mlxsw_sp_fib_event_work, work); work 6298 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work); work 6302 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work); work 6309 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work); work 6314 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_core_schedule_work(&fib_work->work); work 7075 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct work_struct work; work 7081 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c static void mlxsw_sp_inet6addr_event_work(struct work_struct *work) work 7084 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c container_of(work, struct mlxsw_sp_inet6addr_event_work, work); work 7121 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work); work 7126 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_core_schedule_work(&inet6addr_work->work); work 1716 drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c struct work_struct work; work 1720 drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c static void mlxsw_sp_span_respin_work(struct work_struct *work) work 1723 drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c container_of(work, struct mlxsw_sp_span_respin_work, work); work 1739 drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c INIT_WORK(&respin_work->work, mlxsw_sp_span_respin_work); work 1742 drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c mlxsw_core_schedule_work(&respin_work->work); work 2729 drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c static void mlxsw_sp_fdb_notify_work(struct work_struct *work) work 2742 drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work); work 2763 drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c struct work_struct work; work 2871 drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work) work 2874 drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c container_of(work, struct mlxsw_sp_switchdev_event_work, work); work 3039 drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c static void mlxsw_sp_switchdev_vxlan_fdb_event_work(struct work_struct *work) work 3042 drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c container_of(work, struct mlxsw_sp_switchdev_event_work, work); work 3159 drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c INIT_WORK(&switchdev_work->work, work 3176 drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c INIT_WORK(&switchdev_work->work, work 3189 drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c mlxsw_core_schedule_work(&switchdev_work->work); work 1074 drivers/net/ethernet/micrel/ks8842.c static void ks8842_tx_timeout_work(struct work_struct *work) work 1077 drivers/net/ethernet/micrel/ks8842.c container_of(work, struct ks8842_adapter, timeout_work); work 756 drivers/net/ethernet/micrel/ks8851.c static void ks8851_tx_work(struct work_struct *work) work 758 drivers/net/ethernet/micrel/ks8851.c struct ks8851_net *ks = container_of(work, struct ks8851_net, tx_work); work 972 drivers/net/ethernet/micrel/ks8851.c static void ks8851_rxctrl_work(struct work_struct *work) work 974 drivers/net/ethernet/micrel/ks8851.c struct ks8851_net *ks = container_of(work, struct ks8851_net, rxctrl_work); work 6638 drivers/net/ethernet/micrel/ksz884x.c static void mib_read_work(struct work_struct *work) work 6641 drivers/net/ethernet/micrel/ksz884x.c container_of(work, struct dev_info, mib_read); work 1121 drivers/net/ethernet/microchip/enc28j60.c static void enc28j60_irq_work_handler(struct work_struct *work) work 1124 drivers/net/ethernet/microchip/enc28j60.c container_of(work, struct enc28j60_net, irq_work); work 1303 drivers/net/ethernet/microchip/enc28j60.c static void enc28j60_tx_work_handler(struct work_struct *work) work 1306 drivers/net/ethernet/microchip/enc28j60.c container_of(work, struct enc28j60_net, tx_work); work 1421 drivers/net/ethernet/microchip/enc28j60.c static void enc28j60_setrx_work_handler(struct work_struct *work) work 1424 drivers/net/ethernet/microchip/enc28j60.c container_of(work, struct enc28j60_net, setrx_work); work 1446 drivers/net/ethernet/microchip/enc28j60.c static void enc28j60_restart_work_handler(struct work_struct *work) work 1449 drivers/net/ethernet/microchip/enc28j60.c container_of(work, struct enc28j60_net, restart_work); work 1136 drivers/net/ethernet/mscc/ocelot.c static void ocelot_check_stats_work(struct work_struct *work) work 1138 drivers/net/ethernet/mscc/ocelot.c struct delayed_work *del_work = to_delayed_work(work); work 3408 drivers/net/ethernet/myricom/myri10ge/myri10ge.c static void myri10ge_watchdog(struct work_struct *work) work 3411 drivers/net/ethernet/myricom/myri10ge/myri10ge.c container_of(work, struct myri10ge_priv, watchdog_work); work 583 drivers/net/ethernet/natsemi/ns83820.c static inline void queue_refill(struct work_struct *work) work 585 drivers/net/ethernet/natsemi/ns83820.c struct ns83820 *dev = container_of(work, struct ns83820, tq_refill); work 6664 drivers/net/ethernet/neterion/s2io.c static void s2io_set_link(struct work_struct *work) work 6666 drivers/net/ethernet/neterion/s2io.c struct s2io_nic *nic = container_of(work, struct s2io_nic, work 7208 drivers/net/ethernet/neterion/s2io.c static void s2io_restart_nic(struct work_struct *work) work 7210 drivers/net/ethernet/neterion/s2io.c struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task); work 1085 drivers/net/ethernet/neterion/s2io.h static void s2io_set_link(struct work_struct *work); work 1789 drivers/net/ethernet/neterion/vxge/vxge-main.c static void vxge_reset(struct work_struct *work) work 1791 drivers/net/ethernet/neterion/vxge/vxge-main.c struct vxgedev *vdev = container_of(work, struct vxgedev, reset_task); work 606 drivers/net/ethernet/netronome/nfp/ccm_mbox.c static void nfp_ccm_mbox_post_runq_work(struct work_struct *work) work 611 drivers/net/ethernet/netronome/nfp/ccm_mbox.c nn = container_of(work, struct nfp_net, mbox_cmsg.runq_work); work 625 drivers/net/ethernet/netronome/nfp/ccm_mbox.c static void nfp_ccm_mbox_post_wait_work(struct work_struct *work) work 631 drivers/net/ethernet/netronome/nfp/ccm_mbox.c nn = container_of(work, struct nfp_net, mbox_cmsg.wait_work); work 301 drivers/net/ethernet/netronome/nfp/flower/cmsg.c void nfp_flower_cmsg_process_rx(struct work_struct *work) work 307 drivers/net/ethernet/netronome/nfp/flower/cmsg.c priv = container_of(work, struct nfp_flower_priv, cmsg_work); work 632 drivers/net/ethernet/netronome/nfp/flower/cmsg.h void nfp_flower_cmsg_process_rx(struct work_struct *work); work 264 drivers/net/ethernet/netronome/nfp/flower/lag_conf.c static void nfp_fl_lag_do_work(struct work_struct *work) work 273 drivers/net/ethernet/netronome/nfp/flower/lag_conf.c delayed_work = to_delayed_work(work); work 274 drivers/net/ethernet/netronome/nfp/flower/lag_conf.c lag = container_of(delayed_work, struct nfp_fl_lag, work); work 295 drivers/net/ethernet/netronome/nfp/flower/lag_conf.c schedule_delayed_work(&lag->work, work 364 drivers/net/ethernet/netronome/nfp/flower/lag_conf.c schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY); work 462 drivers/net/ethernet/netronome/nfp/flower/lag_conf.c schedule_delayed_work(&priv->nfp_lag.work, 0); work 474 drivers/net/ethernet/netronome/nfp/flower/lag_conf.c schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY); work 502 drivers/net/ethernet/netronome/nfp/flower/lag_conf.c schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY); work 583 drivers/net/ethernet/netronome/nfp/flower/lag_conf.c schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY); work 628 drivers/net/ethernet/netronome/nfp/flower/lag_conf.c schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY); work 665 drivers/net/ethernet/netronome/nfp/flower/lag_conf.c INIT_DELAYED_WORK(&lag->work, nfp_fl_lag_do_work); work 680 drivers/net/ethernet/netronome/nfp/flower/lag_conf.c cancel_delayed_work_sync(&lag->work); work 111 drivers/net/ethernet/netronome/nfp/flower/main.h struct delayed_work work; work 276 drivers/net/ethernet/netronome/nfp/flower/qos_conf.c static void update_stats_cache(struct work_struct *work) work 281 drivers/net/ethernet/netronome/nfp/flower/qos_conf.c delayed_work = to_delayed_work(work); work 599 drivers/net/ethernet/netronome/nfp/nfp_net_main.c static void nfp_net_refresh_vnics(struct work_struct *work) work 601 drivers/net/ethernet/netronome/nfp/nfp_net_main.c struct nfp_pf *pf = container_of(work, struct nfp_pf, work 682 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c static void pch_gbe_reset_task(struct work_struct *work) work 685 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c adapter = container_of(work, struct pch_gbe_adapter, reset_task); work 52 drivers/net/ethernet/pensando/ionic/ionic.h struct completion work; work 25 drivers/net/ethernet/pensando/ionic/ionic_lif.c static void ionic_lif_deferred_work(struct work_struct *work) work 27 drivers/net/ethernet/pensando/ionic/ionic_lif.c struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work); work 57 drivers/net/ethernet/pensando/ionic/ionic_lif.c schedule_work(&def->work); work 62 drivers/net/ethernet/pensando/ionic/ionic_lif.c struct ionic_deferred_work *work) work 65 drivers/net/ethernet/pensando/ionic/ionic_lif.c list_add_tail(&work->list, &def->list); work 67 drivers/net/ethernet/pensando/ionic/ionic_lif.c schedule_work(&def->work); work 106 drivers/net/ethernet/pensando/ionic/ionic_lif.c struct ionic_deferred_work *work; work 113 drivers/net/ethernet/pensando/ionic/ionic_lif.c work = kzalloc(sizeof(*work), GFP_ATOMIC); work 114 drivers/net/ethernet/pensando/ionic/ionic_lif.c if (!work) work 117 drivers/net/ethernet/pensando/ionic/ionic_lif.c work->type = IONIC_DW_TYPE_LINK_STATUS; work 118 drivers/net/ethernet/pensando/ionic/ionic_lif.c ionic_lif_deferred_enqueue(&lif->deferred, work); work 184 drivers/net/ethernet/pensando/ionic/ionic_lif.c .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), work 220 drivers/net/ethernet/pensando/ionic/ionic_lif.c .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), work 559 drivers/net/ethernet/pensando/ionic/ionic_lif.c .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), work 606 drivers/net/ethernet/pensando/ionic/ionic_lif.c .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), work 795 drivers/net/ethernet/pensando/ionic/ionic_lif.c .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), work 826 drivers/net/ethernet/pensando/ionic/ionic_lif.c .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), work 859 drivers/net/ethernet/pensando/ionic/ionic_lif.c struct ionic_deferred_work *work; work 886 drivers/net/ethernet/pensando/ionic/ionic_lif.c work = kzalloc(sizeof(*work), GFP_ATOMIC); work 887 drivers/net/ethernet/pensando/ionic/ionic_lif.c if (!work) { work 891 drivers/net/ethernet/pensando/ionic/ionic_lif.c work->type = add ? IONIC_DW_TYPE_RX_ADDR_ADD : work 893 drivers/net/ethernet/pensando/ionic/ionic_lif.c memcpy(work->addr, addr, ETH_ALEN); work 896 drivers/net/ethernet/pensando/ionic/ionic_lif.c ionic_lif_deferred_enqueue(&lif->deferred, work); work 922 drivers/net/ethernet/pensando/ionic/ionic_lif.c .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), work 958 drivers/net/ethernet/pensando/ionic/ionic_lif.c struct ionic_deferred_work *work; work 961 drivers/net/ethernet/pensando/ionic/ionic_lif.c work = kzalloc(sizeof(*work), GFP_ATOMIC); work 962 drivers/net/ethernet/pensando/ionic/ionic_lif.c if (!work) { work 966 drivers/net/ethernet/pensando/ionic/ionic_lif.c work->type = IONIC_DW_TYPE_RX_MODE; work 967 drivers/net/ethernet/pensando/ionic/ionic_lif.c work->rx_mode = rx_mode; work 969 drivers/net/ethernet/pensando/ionic/ionic_lif.c ionic_lif_deferred_enqueue(&lif->deferred, work); work 1069 drivers/net/ethernet/pensando/ionic/ionic_lif.c .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), work 1240 drivers/net/ethernet/pensando/ionic/ionic_lif.c .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), work 1283 drivers/net/ethernet/pensando/ionic/ionic_lif.c .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), work 1308 drivers/net/ethernet/pensando/ionic/ionic_lif.c .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), work 1338 drivers/net/ethernet/pensando/ionic/ionic_lif.c .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), work 1687 drivers/net/ethernet/pensando/ionic/ionic_lif.c INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work); work 1890 drivers/net/ethernet/pensando/ionic/ionic_lif.c .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), work 1935 drivers/net/ethernet/pensando/ionic/ionic_lif.c .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), work 2081 drivers/net/ethernet/pensando/ionic/ionic_lif.c .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), work 2159 drivers/net/ethernet/pensando/ionic/ionic_lif.c cancel_work_sync(&ionic->master_lif->deferred.work); work 106 drivers/net/ethernet/pensando/ionic/ionic_lif.h struct work_struct work; work 234 drivers/net/ethernet/pensando/ionic/ionic_main.c complete_all(&ctx->work); work 279 drivers/net/ethernet/pensando/ionic/ionic_main.c remaining = wait_for_completion_timeout(&ctx->work, work 23 drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), work 1765 drivers/net/ethernet/qlogic/netxen/netxen_nic.h void netxen_watchdog_task(struct work_struct *work); work 53 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c static void netxen_tx_timeout_task(struct work_struct *work); work 54 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c static void netxen_fw_poll_work(struct work_struct *work); work 2236 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c static void netxen_tx_timeout_task(struct work_struct *work) work 2239 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c container_of(work, struct netxen_adapter, tx_timeout_task); work 2505 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c netxen_attach_work(struct work_struct *work) work 2507 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c struct netxen_adapter *adapter = container_of(work, work 2508 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c struct netxen_adapter, fw_work.work); work 2535 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c netxen_fwinit_work(struct work_struct *work) work 2537 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c struct netxen_adapter *adapter = container_of(work, work 2538 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c struct netxen_adapter, fw_work.work); work 2600 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c netxen_detach_work(struct work_struct *work) work 2602 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c struct netxen_adapter *adapter = container_of(work, work 2603 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c struct netxen_adapter, fw_work.work); work 2718 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c netxen_fw_poll_work(struct work_struct *work) work 2720 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c struct netxen_adapter *adapter = container_of(work, work 2721 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c struct netxen_adapter, fw_work.work); work 1140 drivers/net/ethernet/qlogic/qed/qed_main.c static void qed_slowpath_task(struct work_struct *work) work 1142 drivers/net/ethernet/qlogic/qed/qed_main.c struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, work 1143 drivers/net/ethernet/qlogic/qed/qed_main.c slowpath_task.work); work 5170 drivers/net/ethernet/qlogic/qed/qed_sriov.c static void qed_iov_pf_task(struct work_struct *work) work 5173 drivers/net/ethernet/qlogic/qed/qed_sriov.c struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, work 5174 drivers/net/ethernet/qlogic/qed/qed_sriov.c iov_task.work); work 1704 drivers/net/ethernet/qlogic/qed/qed_vf.c void qed_iov_vf_task(struct work_struct *work) work 1706 drivers/net/ethernet/qlogic/qed/qed_vf.c struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, work 1707 drivers/net/ethernet/qlogic/qed/qed_vf.c iov_task.work); work 1052 drivers/net/ethernet/qlogic/qed/qed_vf.h void qed_iov_vf_task(struct work_struct *work); work 1236 drivers/net/ethernet/qlogic/qed/qed_vf.h static inline void qed_iov_vf_task(struct work_struct *work) work 961 drivers/net/ethernet/qlogic/qede/qede_main.c static void qede_sp_task(struct work_struct *work) work 963 drivers/net/ethernet/qlogic/qede/qede_main.c struct qede_dev *edev = container_of(work, struct qede_dev, work 964 drivers/net/ethernet/qlogic/qede/qede_main.c sp_task.work); work 41 drivers/net/ethernet/qlogic/qede/qede_ptp.c struct work_struct work; work 159 drivers/net/ethernet/qlogic/qede/qede_ptp.c static void qede_ptp_task(struct work_struct *work) work 168 drivers/net/ethernet/qlogic/qede/qede_ptp.c ptp = container_of(work, struct qede_ptp, work); work 187 drivers/net/ethernet/qlogic/qede/qede_ptp.c schedule_work(&ptp->work); work 410 drivers/net/ethernet/qlogic/qede/qede_ptp.c cancel_work_sync(&ptp->work); work 444 drivers/net/ethernet/qlogic/qede/qede_ptp.c INIT_WORK(&ptp->work, qede_ptp_task); work 553 drivers/net/ethernet/qlogic/qede/qede_ptp.c schedule_work(&ptp->work); work 83 drivers/net/ethernet/qlogic/qede/qede_rdma.c cancel_work_sync(&event_node->work); work 271 drivers/net/ethernet/qlogic/qede/qede_rdma.c if (!work_pending(&event_node->work)) { work 291 drivers/net/ethernet/qlogic/qede/qede_rdma.c static void qede_rdma_handle_event(struct work_struct *work) work 297 drivers/net/ethernet/qlogic/qede/qede_rdma.c event_node = container_of(work, struct qede_rdma_event_work, work); work 344 drivers/net/ethernet/qlogic/qede/qede_rdma.c INIT_WORK(&event_node->work, qede_rdma_handle_event); work 345 drivers/net/ethernet/qlogic/qede/qede_rdma.c queue_work(edev->rdma_info.rdma_wq, &event_node->work); work 1516 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_link_state_machine_work(struct work_struct *work) work 1519 drivers/net/ethernet/qlogic/qla3xxx.c container_of(work, struct ql3_adapter, link_state_work.work); work 3621 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_reset_work(struct work_struct *work) work 3624 drivers/net/ethernet/qlogic/qla3xxx.c container_of(work, struct ql3_adapter, reset_work.work); work 3723 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_tx_timeout_work(struct work_struct *work) work 3726 drivers/net/ethernet/qlogic/qla3xxx.c container_of(work, struct ql3_adapter, tx_timeout_work.work); work 1095 drivers/net/ethernet/qlogic/qlcnic/qlcnic.h struct work_struct work; work 1617 drivers/net/ethernet/qlogic/qlcnic/qlcnic.h void qlcnic_watchdog_task(struct work_struct *work); work 936 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c void qlcnic_83xx_idc_aen_work(struct work_struct *work) work 942 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c adapter = container_of(work, struct qlcnic_adapter, idc_aen_work.work); work 1036 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c static void qlcnic_83xx_mbx_poll_work(struct work_struct *work) work 1040 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c adapter = container_of(work, struct qlcnic_adapter, mbx_poll_work.work); work 3999 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c cancel_work_sync(&mbx->work); work 4021 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c queue_work(mbx->work_q, &mbx->work); work 4095 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c static void qlcnic_83xx_mailbox_worker(struct work_struct *work) work 4097 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox, work 4098 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c work); work 4184 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c INIT_WORK(&mbx->work, qlcnic_83xx_mailbox_worker); work 1125 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c void qlcnic_83xx_idc_poll_dev_state(struct work_struct *work) work 1130 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c adapter = container_of(work, struct qlcnic_adapter, fw_work.work); work 2543 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work); work 524 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c static void qlcnic_dcb_aen_work(struct work_struct *work) work 528 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c dcb = container_of(work, struct qlcnic_dcb, aen_work.work); work 219 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h void qlcnic_fw_poll_work(struct work_struct *work); work 60 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c static void qlcnic_attach_work(struct work_struct *work); work 61 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c static void qlcnic_fwinit_work(struct work_struct *work); work 453 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c if (!adapter->fw_work.work.func) work 3407 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c qlcnic_fwinit_work(struct work_struct *work) work 3409 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c struct qlcnic_adapter *adapter = container_of(work, work 3410 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c struct qlcnic_adapter, fw_work.work); work 3510 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c qlcnic_detach_work(struct work_struct *work) work 3512 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c struct qlcnic_adapter *adapter = container_of(work, work 3513 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c struct qlcnic_adapter, fw_work.work); work 3657 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c qlcnic_attach_work(struct work_struct *work) work 3659 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c struct qlcnic_adapter *adapter = container_of(work, work 3660 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c struct qlcnic_adapter, fw_work.work); work 3780 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c void qlcnic_fw_poll_work(struct work_struct *work) work 3782 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c struct qlcnic_adapter *adapter = container_of(work, work 3783 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c struct qlcnic_adapter, fw_work.work); work 32 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work); work 1047 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c static void qlcnic_sriov_process_bc_cmd(struct work_struct *work) work 1049 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c struct qlcnic_vf_info *vf = container_of(work, struct qlcnic_vf_info, work 1596 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work) work 1604 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c bc = container_of(work, struct qlcnic_back_channel, vf_async_work); work 1899 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work) work 1905 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c adapter = container_of(work, struct qlcnic_adapter, fw_work.work); work 1685 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c static void qlcnic_sriov_pf_process_flr(struct work_struct *work) work 1689 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c vf = container_of(work, struct qlcnic_vf_info, flr_work); work 398 drivers/net/ethernet/qualcomm/emac/emac.c static void emac_work_thread(struct work_struct *work) work 401 drivers/net/ethernet/qualcomm/emac/emac.c container_of(work, struct emac_adapter, work_thread); work 127 drivers/net/ethernet/qualcomm/qca_uart.c static void qcauart_transmit(struct work_struct *work) work 129 drivers/net/ethernet/qualcomm/qca_uart.c struct qcauart *qca = container_of(work, struct qcauart, tx_work); work 973 drivers/net/ethernet/realtek/8139cp.c unsigned work = 1000; work 977 drivers/net/ethernet/realtek/8139cp.c while (work--) { work 662 drivers/net/ethernet/realtek/8139too.c static void rtl8139_thread (struct work_struct *work); work 663 drivers/net/ethernet/realtek/8139too.c static void rtl8139_tx_timeout_task(struct work_struct *work); work 1608 drivers/net/ethernet/realtek/8139too.c static void rtl8139_thread (struct work_struct *work) work 1611 drivers/net/ethernet/realtek/8139too.c container_of(work, struct rtl8139_private, thread.work); work 1622 drivers/net/ethernet/realtek/8139too.c rtl8139_tx_timeout_task(work); work 1654 drivers/net/ethernet/realtek/8139too.c static void rtl8139_tx_timeout_task (struct work_struct *work) work 1657 drivers/net/ethernet/realtek/8139too.c container_of(work, struct rtl8139_private, thread.work); work 673 drivers/net/ethernet/realtek/r8169_main.c struct work_struct work; work 3855 drivers/net/ethernet/realtek/r8169_main.c schedule_work(&tp->wk.work); work 6271 drivers/net/ethernet/realtek/r8169_main.c static void rtl_task(struct work_struct *work) work 6280 drivers/net/ethernet/realtek/r8169_main.c container_of(work, struct rtl8169_private, wk.work); work 6414 drivers/net/ethernet/realtek/r8169_main.c cancel_work_sync(&tp->wk.work); work 7162 drivers/net/ethernet/realtek/r8169_main.c INIT_WORK(&tp->wk.work, rtl_task); work 1024 drivers/net/ethernet/renesas/ravb.h struct work_struct work; work 1439 drivers/net/ethernet/renesas/ravb_main.c schedule_work(&priv->work); work 1442 drivers/net/ethernet/renesas/ravb_main.c static void ravb_tx_timeout_work(struct work_struct *work) work 1444 drivers/net/ethernet/renesas/ravb_main.c struct ravb_private *priv = container_of(work, struct ravb_private, work 1445 drivers/net/ethernet/renesas/ravb_main.c work); work 2049 drivers/net/ethernet/renesas/ravb_main.c INIT_WORK(&priv->work, ravb_tx_timeout_work); work 2142 drivers/net/ethernet/rocker/rocker_main.c struct work_struct work; work 2151 drivers/net/ethernet/rocker/rocker_main.c static void rocker_router_fib_event_work(struct work_struct *work) work 2154 drivers/net/ethernet/rocker/rocker_main.c container_of(work, struct rocker_fib_event_work, work); work 2202 drivers/net/ethernet/rocker/rocker_main.c INIT_WORK(&fib_work->work, rocker_router_fib_event_work); work 2237 drivers/net/ethernet/rocker/rocker_main.c queue_work(rocker->rocker_owq, &fib_work->work); work 2736 drivers/net/ethernet/rocker/rocker_main.c struct work_struct work; work 2755 drivers/net/ethernet/rocker/rocker_main.c static void rocker_switchdev_event_work(struct work_struct *work) work 2758 drivers/net/ethernet/rocker/rocker_main.c container_of(work, struct rocker_switchdev_event_work, work); work 2812 drivers/net/ethernet/rocker/rocker_main.c INIT_WORK(&switchdev_work->work, rocker_switchdev_event_work); work 2838 drivers/net/ethernet/rocker/rocker_main.c &switchdev_work->work); work 1812 drivers/net/ethernet/rocker/rocker_ofdpa.c struct work_struct work; work 1819 drivers/net/ethernet/rocker/rocker_ofdpa.c static void ofdpa_port_fdb_learn_work(struct work_struct *work) work 1822 drivers/net/ethernet/rocker/rocker_ofdpa.c container_of(work, struct ofdpa_fdb_learn_work, work); work 1839 drivers/net/ethernet/rocker/rocker_ofdpa.c kfree(work); work 1872 drivers/net/ethernet/rocker/rocker_ofdpa.c INIT_WORK(&lw->work, ofdpa_port_fdb_learn_work); work 1879 drivers/net/ethernet/rocker/rocker_ofdpa.c schedule_work(&lw->work); work 2121 drivers/net/ethernet/sfc/efx.c monitor_work.work); work 1971 drivers/net/ethernet/sfc/falcon/efx.c monitor_work.work); work 789 drivers/net/ethernet/sfc/falcon/selftest.c selftest_work.work); work 777 drivers/net/ethernet/sfc/net_driver.h struct work_struct work; work 290 drivers/net/ethernet/sfc/ptp.c struct work_struct work; work 1384 drivers/net/ethernet/sfc/ptp.c static void efx_ptp_pps_worker(struct work_struct *work) work 1387 drivers/net/ethernet/sfc/ptp.c container_of(work, struct efx_ptp_data, pps_work); work 1399 drivers/net/ethernet/sfc/ptp.c static void efx_ptp_worker(struct work_struct *work) work 1402 drivers/net/ethernet/sfc/ptp.c container_of(work, struct efx_ptp_data, work); work 1477 drivers/net/ethernet/sfc/ptp.c INIT_WORK(&ptp->work, efx_ptp_worker); work 1565 drivers/net/ethernet/sfc/ptp.c cancel_work_sync(&efx->ptp_data->work); work 1695 drivers/net/ethernet/sfc/ptp.c queue_work(ptp->workwq, &ptp->work); work 1713 drivers/net/ethernet/sfc/ptp.c queue_work(ptp->workwq, &ptp->work); work 1853 drivers/net/ethernet/sfc/ptp.c queue_work(ptp->workwq, &ptp->work); work 1892 drivers/net/ethernet/sfc/ptp.c queue_work(ptp->workwq, &ptp->work); work 831 drivers/net/ethernet/sfc/rx.c work); work 980 drivers/net/ethernet/sfc/rx.c INIT_WORK(&req->work, efx_filter_rfs_work); work 983 drivers/net/ethernet/sfc/rx.c schedule_work(&req->work); work 789 drivers/net/ethernet/sfc/selftest.c selftest_work.work); work 870 drivers/net/ethernet/sfc/siena_sriov.c static void efx_siena_sriov_vfdi(struct work_struct *work) work 872 drivers/net/ethernet/sfc/siena_sriov.c struct siena_vf *vf = container_of(work, struct siena_vf, req); work 1007 drivers/net/ethernet/sfc/siena_sriov.c static void efx_siena_sriov_reset_vf_work(struct work_struct *work) work 1009 drivers/net/ethernet/sfc/siena_sriov.c struct siena_vf *vf = container_of(work, struct siena_vf, req); work 913 drivers/net/ethernet/sis/sis190.c static void sis190_phy_task(struct work_struct *work) work 916 drivers/net/ethernet/sis/sis190.c container_of(work, struct sis190_private, phy_task); work 874 drivers/net/ethernet/smsc/smc911x.c static void smc911x_phy_configure(struct work_struct *work) work 876 drivers/net/ethernet/smsc/smc911x.c struct smc911x_local *lp = container_of(work, struct smc911x_local, work 1030 drivers/net/ethernet/smsc/smc91x.c static void smc_phy_configure(struct work_struct *work) work 1033 drivers/net/ethernet/smsc/smc91x.c container_of(work, struct smc_local, phy_configure); work 4346 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c static void stmmac_service_task(struct work_struct *work) work 4348 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c struct stmmac_priv *priv = container_of(work, struct stmmac_priv, work 3993 drivers/net/ethernet/sun/cassini.c static void cas_reset_task(struct work_struct *work) work 3995 drivers/net/ethernet/sun/cassini.c struct cas *cp = container_of(work, struct cas, reset_task); work 6482 drivers/net/ethernet/sun/niu.c static void niu_reset_task(struct work_struct *work) work 6484 drivers/net/ethernet/sun/niu.c struct niu *np = container_of(work, struct niu, reset_task); work 2254 drivers/net/ethernet/sun/sungem.c static void gem_reset_task(struct work_struct *work) work 2256 drivers/net/ethernet/sun/sungem.c struct gem *gp = container_of(work, struct gem, reset_task); work 627 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c static void xlgmac_restart(struct work_struct *work) work 629 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c struct xlgmac_pdata *pdata = container_of(work, work 715 drivers/net/ethernet/ti/cpmac.c static void cpmac_hw_error(struct work_struct *work) work 718 drivers/net/ethernet/ti/cpmac.c container_of(work, struct cpmac_priv, reset_work); work 165 drivers/net/ethernet/ti/tlan.c static void tlan_tx_timeout_work(struct work_struct *work); work 1026 drivers/net/ethernet/ti/tlan.c static void tlan_tx_timeout_work(struct work_struct *work) work 1029 drivers/net/ethernet/ti/tlan.c container_of(work, struct tlan_priv, tlan_tqueue); work 1381 drivers/net/ethernet/toshiba/ps3_gelic_net.c static void gelic_net_tx_timeout_task(struct work_struct *work) work 1384 drivers/net/ethernet/toshiba/ps3_gelic_net.c container_of(work, struct gelic_card, tx_timeout_task); work 142 drivers/net/ethernet/toshiba/ps3_gelic_wireless.c static void gelic_eurus_sync_cmd_worker(struct work_struct *work) work 151 drivers/net/ethernet/toshiba/ps3_gelic_wireless.c cmd = container_of(work, struct gelic_eurus_cmd, work); work 221 drivers/net/ethernet/toshiba/ps3_gelic_wireless.c INIT_WORK(&cmd->work, gelic_eurus_sync_cmd_worker); work 223 drivers/net/ethernet/toshiba/ps3_gelic_wireless.c queue_work(wl->eurus_cmd_queue, &cmd->work); work 2118 drivers/net/ethernet/toshiba/ps3_gelic_wireless.c static void gelic_wl_event_worker(struct work_struct *work) work 2126 drivers/net/ethernet/toshiba/ps3_gelic_wireless.c wl = container_of(work, struct gelic_wl_info, event_work.work); work 2161 drivers/net/ethernet/toshiba/ps3_gelic_wireless.c static void gelic_wl_assoc_worker(struct work_struct *work) work 2171 drivers/net/ethernet/toshiba/ps3_gelic_wireless.c wl = container_of(work, struct gelic_wl_info, assoc_work.work); work 295 drivers/net/ethernet/toshiba/ps3_gelic_wireless.h struct work_struct work; work 2150 drivers/net/ethernet/toshiba/spider_net.c spider_net_tx_timeout_task(struct work_struct *work) work 2153 drivers/net/ethernet/toshiba/spider_net.c container_of(work, struct spider_net_card, tx_timeout_task); work 501 drivers/net/ethernet/toshiba/tc35815.c static void tc35815_restart_work(struct work_struct *work); work 1168 drivers/net/ethernet/toshiba/tc35815.c static void tc35815_restart_work(struct work_struct *work) work 1171 drivers/net/ethernet/toshiba/tc35815.c container_of(work, struct tc35815_local, restart_work); work 507 drivers/net/ethernet/via/via-rhine.c static void rhine_reset_task(struct work_struct *work); work 508 drivers/net/ethernet/via/via-rhine.c static void rhine_slow_event_task(struct work_struct *work); work 1729 drivers/net/ethernet/via/via-rhine.c static void rhine_reset_task(struct work_struct *work) work 1731 drivers/net/ethernet/via/via-rhine.c struct rhine_private *rp = container_of(work, struct rhine_private, work 2192 drivers/net/ethernet/via/via-rhine.c static void rhine_slow_event_task(struct work_struct *work) work 2195 drivers/net/ethernet/via/via-rhine.c container_of(work, struct rhine_private, slow_event_task); work 785 drivers/net/ethernet/wiznet/w5100.c static void w5100_restart_work(struct work_struct *work) work 787 drivers/net/ethernet/wiznet/w5100.c struct w5100_priv *priv = container_of(work, struct w5100_priv, work 818 drivers/net/ethernet/wiznet/w5100.c static void w5100_tx_work(struct work_struct *work) work 820 drivers/net/ethernet/wiznet/w5100.c struct w5100_priv *priv = container_of(work, struct w5100_priv, work 884 drivers/net/ethernet/wiznet/w5100.c static void w5100_rx_work(struct work_struct *work) work 886 drivers/net/ethernet/wiznet/w5100.c struct w5100_priv *priv = container_of(work, struct w5100_priv, work 963 drivers/net/ethernet/wiznet/w5100.c static void w5100_setrx_work(struct work_struct *work) work 965 drivers/net/ethernet/wiznet/w5100.c struct w5100_priv *priv = container_of(work, struct w5100_priv, work 1080 drivers/net/ethernet/xilinx/ll_temac_main.c static void ll_temac_restart_work_func(struct work_struct *work) work 1082 drivers/net/ethernet/xilinx/ll_temac_main.c struct temac_local *lp = container_of(work, struct temac_local, work 1083 drivers/net/ethernet/xilinx/ll_temac_main.c restart_work.work); work 292 drivers/net/ethernet/xircom/xirc2ps_cs.c static void xirc2ps_tx_timeout_task(struct work_struct *work); work 1194 drivers/net/ethernet/xircom/xirc2ps_cs.c xirc2ps_tx_timeout_task(struct work_struct *work) work 1197 drivers/net/ethernet/xircom/xirc2ps_cs.c container_of(work, struct local_info, tx_timeout_task); work 957 drivers/net/fjes/fjes_hw.c static void fjes_hw_update_zone_task(struct work_struct *work) work 959 drivers/net/fjes/fjes_hw.c struct fjes_hw *hw = container_of(work, work 1147 drivers/net/fjes/fjes_hw.c static void fjes_hw_epstop_task(struct work_struct *work) work 1149 drivers/net/fjes/fjes_hw.c struct fjes_hw *hw = container_of(work, struct fjes_hw, epstop_task); work 488 drivers/net/fjes/fjes_main.c static void fjes_tx_stall_task(struct work_struct *work) work 490 drivers/net/fjes/fjes_main.c struct fjes_adapter *adapter = container_of(work, work 544 drivers/net/fjes/fjes_main.c static void fjes_force_close_task(struct work_struct *work) work 546 drivers/net/fjes/fjes_main.c struct fjes_adapter *adapter = container_of(work, work 555 drivers/net/fjes/fjes_main.c static void fjes_raise_intr_rxdata_task(struct work_struct *work) work 557 drivers/net/fjes/fjes_main.c struct fjes_adapter *adapter = container_of(work, work 1354 drivers/net/fjes/fjes_main.c static void fjes_irq_watch_task(struct work_struct *work) work 1356 drivers/net/fjes/fjes_main.c struct fjes_adapter *adapter = container_of(to_delayed_work(work), work 1374 drivers/net/fjes/fjes_main.c static void fjes_watch_unshare_task(struct work_struct *work) work 1377 drivers/net/fjes/fjes_main.c container_of(work, struct fjes_adapter, unshare_watch_task); work 637 drivers/net/hamradio/baycom_epp.c static void epp_bh(struct work_struct *work) work 647 drivers/net/hamradio/baycom_epp.c bc = container_of(work, struct baycom_state, run_work.work); work 1891 drivers/net/hyperv/netvsc_drv.c container_of(w, struct net_device_context, dwork.work); work 2109 drivers/net/hyperv/netvsc_drv.c = container_of(w, struct net_device_context, vf_takeover.work); work 277 drivers/net/ieee802154/adf7242.c struct delayed_work work; work 579 drivers/net/ieee802154/adf7242.c mod_delayed_work(lp->wqueue, &lp->work, msecs_to_jiffies(400)); work 584 drivers/net/ieee802154/adf7242.c static void adf7242_rx_cal_work(struct work_struct *work) work 587 drivers/net/ieee802154/adf7242.c container_of(work, struct adf7242_local, work.work); work 714 drivers/net/ieee802154/adf7242.c cancel_delayed_work_sync(&lp->work); work 838 drivers/net/ieee802154/adf7242.c cancel_delayed_work_sync(&lp->work); work 977 drivers/net/ieee802154/adf7242.c mod_delayed_work(lp->wqueue, &lp->work, msecs_to_jiffies(400)); work 1260 drivers/net/ieee802154/adf7242.c INIT_DELAYED_WORK(&lp->work, adf7242_rx_cal_work); work 1309 drivers/net/ieee802154/adf7242.c cancel_delayed_work_sync(&lp->work); work 53 drivers/net/ieee802154/atusb.c struct delayed_work work; /* memory allocations */ work 232 drivers/net/ieee802154/atusb.c static void atusb_work_urbs(struct work_struct *work) work 235 drivers/net/ieee802154/atusb.c container_of(to_delayed_work(work), struct atusb, work); work 253 drivers/net/ieee802154/atusb.c schedule_delayed_work(&atusb->work, work 339 drivers/net/ieee802154/atusb.c schedule_delayed_work(&atusb->work, 0); work 458 drivers/net/ieee802154/atusb.c schedule_delayed_work(&atusb->work, 0); work 1034 drivers/net/ieee802154/atusb.c INIT_DELAYED_WORK(&atusb->work, atusb_work_urbs); work 1131 drivers/net/ieee802154/atusb.c cancel_delayed_work_sync(&atusb->work); work 385 drivers/net/ieee802154/ca8210.c struct work_struct work; work 701 drivers/net/ieee802154/ca8210.c static void ca8210_mlme_reset_worker(struct work_struct *work) work 704 drivers/net/ieee802154/ca8210.c work, work 706 drivers/net/ieee802154/ca8210.c work work 770 drivers/net/ieee802154/ca8210.c &mlme_reset_wpc->work, work 774 drivers/net/ieee802154/ca8210.c queue_work(priv->mlme_workqueue, &mlme_reset_wpc->work); work 873 drivers/net/ieee802154/cc2520.c static void cc2520_fifop_irqwork(struct work_struct *work) work 876 drivers/net/ieee802154/cc2520.c = container_of(work, struct cc2520_private, fifop_irqwork); work 156 drivers/net/ipvlan/ipvlan.h void ipvlan_process_multicast(struct work_struct *work); work 225 drivers/net/ipvlan/ipvlan_core.c void ipvlan_process_multicast(struct work_struct *work) work 227 drivers/net/ipvlan/ipvlan_core.c struct ipvl_port *port = container_of(work, struct ipvl_port, wq); work 445 drivers/net/netdevsim/dev.c static void nsim_dev_trap_report_work(struct work_struct *work) work 451 drivers/net/netdevsim/dev.c nsim_trap_data = container_of(work, struct nsim_trap_data, work 452 drivers/net/netdevsim/dev.c trap_report_dw.work); work 204 drivers/net/phy/dp83640.c static void rx_timestamp_work(struct work_struct *work); work 1408 drivers/net/phy/dp83640.c static void rx_timestamp_work(struct work_struct *work) work 1411 drivers/net/phy/dp83640.c container_of(work, struct dp83640_private, ts_work.work); work 849 drivers/net/phy/phy.c phy_state_machine(&phydev->state_queue.work); work 894 drivers/net/phy/phy.c void phy_state_machine(struct work_struct *work) work 896 drivers/net/phy/phy.c struct delayed_work *dwork = to_delayed_work(work); work 1775 drivers/net/phy/sfp.c static void sfp_timeout(struct work_struct *work) work 1777 drivers/net/phy/sfp.c struct sfp *sfp = container_of(work, struct sfp, timeout.work); work 1826 drivers/net/phy/sfp.c static void sfp_poll(struct work_struct *work) work 1828 drivers/net/phy/sfp.c struct sfp *sfp = container_of(work, struct sfp, poll.work); work 137 drivers/net/plip/plip.c static void plip_kick_bh(struct work_struct *work); work 138 drivers/net/plip/plip.c static void plip_bh(struct work_struct *work); work 139 drivers/net/plip/plip.c static void plip_timer_bh(struct work_struct *work); work 316 drivers/net/plip/plip.c plip_kick_bh(struct work_struct *work) work 319 drivers/net/plip/plip.c container_of(work, struct net_local, deferred.work); work 360 drivers/net/plip/plip.c plip_bh(struct work_struct *work) work 362 drivers/net/plip/plip.c struct net_local *nl = container_of(work, struct net_local, immediate); work 378 drivers/net/plip/plip.c plip_timer_bh(struct work_struct *work) work 381 drivers/net/plip/plip.c container_of(work, struct net_local, timer.work); work 461 drivers/net/ppp/pppoe.c static void pppoe_unbind_sock_work(struct work_struct *work) work 463 drivers/net/ppp/pppoe.c struct pppox_sock *po = container_of(work, struct pppox_sock, work 421 drivers/net/slip/slip.c static void slip_transmit(struct work_struct *work) work 423 drivers/net/slip/slip.c struct slip *sl = container_of(work, struct slip, tx_work); work 627 drivers/net/team/team.c static void team_notify_peers_work(struct work_struct *work) work 632 drivers/net/team/team.c team = container_of(work, struct team, notify_peers.dw.work); work 673 drivers/net/team/team.c static void team_mcast_rejoin_work(struct work_struct *work) work 678 drivers/net/team/team.c team = container_of(work, struct team, mcast_rejoin.dw.work); work 475 drivers/net/team/team_mode_loadbalance.c static void lb_stats_refresh(struct work_struct *work) work 488 drivers/net/team/team_mode_loadbalance.c lb_priv_ex = container_of(work, struct lb_priv_ex, work 489 drivers/net/team/team_mode_loadbalance.c stats.refresh_dw.work); work 584 drivers/net/thunderbolt.c static void tbnet_connected_work(struct work_struct *work) work 586 drivers/net/thunderbolt.c struct tbnet *net = container_of(work, typeof(*net), connected_work); work 634 drivers/net/thunderbolt.c static void tbnet_login_work(struct work_struct *work) work 636 drivers/net/thunderbolt.c struct tbnet *net = container_of(work, typeof(*net), login_work.work); work 662 drivers/net/thunderbolt.c static void tbnet_disconnect_work(struct work_struct *work) work 664 drivers/net/thunderbolt.c struct tbnet *net = container_of(work, typeof(*net), disconnect_work); work 273 drivers/net/usb/ipheth.c static void ipheth_carrier_check_work(struct work_struct *work) work 275 drivers/net/usb/ipheth.c struct ipheth_device *dev = container_of(work, struct ipheth_device, work 276 drivers/net/usb/ipheth.c carrier_work.work); work 519 drivers/net/usb/kaweth.c static void kaweth_resubmit_tl(struct work_struct *work) work 522 drivers/net/usb/kaweth.c container_of(work, struct kaweth_device, lowmem_work.work); work 1245 drivers/net/usb/lan78xx.c static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work) work 1247 drivers/net/usb/lan78xx.c set_bit(work, &dev->flags); work 1249 drivers/net/usb/lan78xx.c netdev_err(dev->net, "kevent %d may have been dropped\n", work); work 3511 drivers/net/usb/lan78xx.c static void lan78xx_delayedwork(struct work_struct *work) work 3516 drivers/net/usb/lan78xx.c dev = container_of(work, struct lan78xx_net, wq.work); work 1097 drivers/net/usb/pegasus.c static void check_carrier(struct work_struct *work) work 1099 drivers/net/usb/pegasus.c pegasus_t *pegasus = container_of(work, pegasus_t, carrier_check.work); work 4186 drivers/net/usb/r8152.c static void rtl_work_func_t(struct work_struct *work) work 4188 drivers/net/usb/r8152.c struct r8152 *tp = container_of(work, struct r8152, schedule.work); work 4224 drivers/net/usb/r8152.c static void rtl_hw_phy_work_func_t(struct work_struct *work) work 4226 drivers/net/usb/r8152.c struct r8152 *tp = container_of(work, struct r8152, hw_phy_work.work); work 4690 drivers/net/usb/r8152.c if (work_busy(&tp->schedule.work) || sw_linking != hw_linking) work 467 drivers/net/usb/sierra_net.c static void sierra_net_kevent(struct work_struct *work) work 470 drivers/net/usb/sierra_net.c container_of(work, struct sierra_net_data, sierra_net_kevent); work 568 drivers/net/usb/sierra_net.c static void sierra_net_defer_kevent(struct usbnet *dev, int work) work 572 drivers/net/usb/sierra_net.c set_bit(work, &priv->kevent_flags); work 634 drivers/net/usb/smsc95xx.c static void check_carrier(struct work_struct *work) work 636 drivers/net/usb/smsc95xx.c struct smsc95xx_priv *pdata = container_of(work, struct smsc95xx_priv, work 637 drivers/net/usb/smsc95xx.c carrier_check.work); work 454 drivers/net/usb/usbnet.c void usbnet_defer_kevent (struct usbnet *dev, int work) work 456 drivers/net/usb/usbnet.c set_bit (work, &dev->flags); work 458 drivers/net/usb/usbnet.c netdev_dbg(dev->net, "kevent %d may have been dropped\n", work); work 460 drivers/net/usb/usbnet.c netdev_dbg(dev->net, "kevent %d scheduled\n", work); work 1132 drivers/net/usb/usbnet.c usbnet_deferred_kevent (struct work_struct *work) work 1135 drivers/net/usb/usbnet.c container_of(work, struct usbnet, kevent); work 1289 drivers/net/virtio_net.c static void refill_work(struct work_struct *work) work 1292 drivers/net/virtio_net.c container_of(work, struct virtnet_info, refill.work); work 2603 drivers/net/virtio_net.c static void virtnet_config_changed_work(struct work_struct *work) work 2606 drivers/net/virtio_net.c container_of(work, struct virtnet_info, config_work); work 206 drivers/net/vmxnet3/vmxnet3_drv.c schedule_work(&adapter->work); work 3207 drivers/net/vmxnet3/vmxnet3_drv.c schedule_work(&adapter->work); work 3216 drivers/net/vmxnet3/vmxnet3_drv.c adapter = container_of(data, struct vmxnet3_adapter, work); work 3472 drivers/net/vmxnet3/vmxnet3_drv.c INIT_WORK(&adapter->work, vmxnet3_reset_work); work 3551 drivers/net/vmxnet3/vmxnet3_drv.c cancel_work_sync(&adapter->work); work 380 drivers/net/vmxnet3/vmxnet3_int.h struct work_struct work; work 707 drivers/net/wan/z85230.c int work=0; work 719 drivers/net/wan/z85230.c while(++work<5000) work 757 drivers/net/wan/z85230.c if(work==5000) work 616 drivers/net/wireless/ath/ar5523/ar5523.c static void ar5523_rx_refill_work(struct work_struct *work) work 618 drivers/net/wireless/ath/ar5523/ar5523.c struct ar5523 *ar = container_of(work, struct ar5523, rx_refill_work); work 883 drivers/net/wireless/ath/ar5523/ar5523.c static void ar5523_tx_work(struct work_struct *work) work 885 drivers/net/wireless/ath/ar5523/ar5523.c struct ar5523 *ar = container_of(work, struct ar5523, tx_work); work 901 drivers/net/wireless/ath/ar5523/ar5523.c static void ar5523_tx_wd_work(struct work_struct *work) work 903 drivers/net/wireless/ath/ar5523/ar5523.c struct ar5523 *ar = container_of(work, struct ar5523, tx_wd_work); work 966 drivers/net/wireless/ath/ar5523/ar5523.c static void ar5523_stat_work(struct work_struct *work) work 968 drivers/net/wireless/ath/ar5523/ar5523.c struct ar5523 *ar = container_of(work, struct ar5523, stat_work.work); work 2186 drivers/net/wireless/ath/ath10k/core.c static void ath10k_core_restart(struct work_struct *work) work 2188 drivers/net/wireless/ath/ath10k/core.c struct ath10k *ar = container_of(work, struct ath10k, restart_work); work 2259 drivers/net/wireless/ath/ath10k/core.c static void ath10k_core_set_coverage_class_work(struct work_struct *work) work 2261 drivers/net/wireless/ath/ath10k/core.c struct ath10k *ar = container_of(work, struct ath10k, work 3005 drivers/net/wireless/ath/ath10k/core.c static void ath10k_core_register_work(struct work_struct *work) work 3007 drivers/net/wireless/ath/ath10k/core.c struct ath10k *ar = container_of(work, struct ath10k, register_work); work 891 drivers/net/wireless/ath/ath10k/debug.c static void ath10k_debug_htt_stats_dwork(struct work_struct *work) work 893 drivers/net/wireless/ath/ath10k/debug.c struct ath10k *ar = container_of(work, struct ath10k, work 894 drivers/net/wireless/ath/ath10k/debug.c debug.htt_stats_dwork.work); work 2042 drivers/net/wireless/ath/ath10k/mac.c static void ath10k_mac_vif_ap_csa_work(struct work_struct *work) work 2044 drivers/net/wireless/ath/ath10k/mac.c struct ath10k_vif *arvif = container_of(work, struct ath10k_vif, work 2110 drivers/net/wireless/ath/ath10k/mac.c static void ath10k_mac_vif_sta_connection_loss_work(struct work_struct *work) work 2112 drivers/net/wireless/ath/ath10k/mac.c struct ath10k_vif *arvif = container_of(work, struct ath10k_vif, work 2113 drivers/net/wireless/ath/ath10k/mac.c connection_loss_work.work); work 3774 drivers/net/wireless/ath/ath10k/mac.c void ath10k_offchan_tx_work(struct work_struct *work) work 3776 drivers/net/wireless/ath/ath10k/mac.c struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work); work 3890 drivers/net/wireless/ath/ath10k/mac.c void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work) work 3892 drivers/net/wireless/ath/ath10k/mac.c struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work); work 4282 drivers/net/wireless/ath/ath10k/mac.c void ath10k_scan_timeout_work(struct work_struct *work) work 4284 drivers/net/wireless/ath/ath10k/mac.c struct ath10k *ar = container_of(work, struct ath10k, work 4285 drivers/net/wireless/ath/ath10k/mac.c scan.timeout.work); work 38 drivers/net/wireless/ath/ath10k/mac.h void ath10k_scan_timeout_work(struct work_struct *work); work 40 drivers/net/wireless/ath/ath10k/mac.h void ath10k_offchan_tx_work(struct work_struct *work); work 42 drivers/net/wireless/ath/ath10k/mac.h void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work); work 1748 drivers/net/wireless/ath/ath10k/pci.c static void ath10k_pci_fw_dump_work(struct work_struct *work) work 1750 drivers/net/wireless/ath/ath10k/pci.c struct ath10k_pci *ar_pci = container_of(work, struct ath10k_pci, work 918 drivers/net/wireless/ath/ath10k/qmi.c static void ath10k_qmi_driver_event_work(struct work_struct *work) work 920 drivers/net/wireless/ath/ath10k/qmi.c struct ath10k_qmi *qmi = container_of(work, struct ath10k_qmi, work 1296 drivers/net/wireless/ath/ath10k/sdio.c static void ath10k_sdio_write_async_work(struct work_struct *work) work 1298 drivers/net/wireless/ath/ath10k/sdio.c struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio, work 354 drivers/net/wireless/ath/ath10k/usb.c static void ath10k_usb_io_comp_work(struct work_struct *work) work 356 drivers/net/wireless/ath/ath10k/usb.c struct ath10k_usb_pipe *pipe = container_of(work, work 3902 drivers/net/wireless/ath/ath10k/wmi.c static void ath10k_radar_confirmation_work(struct work_struct *work) work 3904 drivers/net/wireless/ath/ath10k/wmi.c struct ath10k *ar = container_of(work, struct ath10k, work 5408 drivers/net/wireless/ath/ath10k/wmi.c static void ath10k_wmi_event_service_ready_work(struct work_struct *work) work 5410 drivers/net/wireless/ath/ath10k/wmi.c struct ath10k *ar = container_of(work, struct ath10k, svc_rdy_work); work 2404 drivers/net/wireless/ath/ath5k/base.c ath5k_calibrate_work(struct work_struct *work) work 2406 drivers/net/wireless/ath/ath5k/base.c struct ath5k_hw *ah = container_of(work, struct ath5k_hw, work 2461 drivers/net/wireless/ath/ath5k/base.c ath5k_tx_complete_poll_work(struct work_struct *work) work 2463 drivers/net/wireless/ath/ath5k/base.c struct ath5k_hw *ah = container_of(work, struct ath5k_hw, work 2464 drivers/net/wireless/ath/ath5k/base.c tx_complete_work.work); work 2970 drivers/net/wireless/ath/ath5k/base.c static void ath5k_reset_work(struct work_struct *work) work 2972 drivers/net/wireless/ath/ath5k/base.c struct ath5k_hw *ah = container_of(work, struct ath5k_hw, work 21 drivers/net/wireless/ath/ath6kl/recovery.c static void ath6kl_recovery_work(struct work_struct *work) work 23 drivers/net/wireless/ath/ath6kl/recovery.c struct ath6kl *ar = container_of(work, struct ath6kl, work 462 drivers/net/wireless/ath/ath6kl/sdio.c static void ath6kl_sdio_write_async_work(struct work_struct *work) work 467 drivers/net/wireless/ath/ath6kl/sdio.c ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work); work 580 drivers/net/wireless/ath/ath6kl/usb.c static void ath6kl_usb_io_comp_work(struct work_struct *work) work 582 drivers/net/wireless/ath/ath6kl/usb.c struct ath6kl_usb_pipe *pipe = container_of(work, work 741 drivers/net/wireless/ath/ath9k/ath9k.h void ath_hw_check_work(struct work_struct *work); work 742 drivers/net/wireless/ath/ath9k/ath9k.h void ath_reset_work(struct work_struct *work); work 744 drivers/net/wireless/ath/ath9k/ath9k.h void ath_hw_pll_work(struct work_struct *work); work 745 drivers/net/wireless/ath/ath9k/ath9k.h void ath_paprd_calibrate(struct work_struct *work); work 1324 drivers/net/wireless/ath/ath9k/channel.c static void ath_chanctx_work(struct work_struct *work) work 1326 drivers/net/wireless/ath/ath9k/channel.c struct ath_softc *sc = container_of(work, struct ath_softc, work 566 drivers/net/wireless/ath/ath9k/htc.h void ath9k_htc_ani_work(struct work_struct *work); work 598 drivers/net/wireless/ath/ath9k/htc.h void ath9k_ps_work(struct work_struct *work); work 611 drivers/net/wireless/ath/ath9k/htc.h void ath9k_led_work(struct work_struct *work); work 625 drivers/net/wireless/ath/ath9k/htc.h static inline void ath9k_led_work(struct work_struct *work) work 64 drivers/net/wireless/ath/ath9k/htc_drv_gpio.c static void ath_btcoex_period_work(struct work_struct *work) work 66 drivers/net/wireless/ath/ath9k/htc_drv_gpio.c struct ath9k_htc_priv *priv = container_of(work, struct ath9k_htc_priv, work 67 drivers/net/wireless/ath/ath9k/htc_drv_gpio.c coex_period_work.work); work 98 drivers/net/wireless/ath/ath9k/htc_drv_gpio.c static void ath_btcoex_duty_cycle_work(struct work_struct *work) work 100 drivers/net/wireless/ath/ath9k/htc_drv_gpio.c struct ath9k_htc_priv *priv = container_of(work, struct ath9k_htc_priv, work 101 drivers/net/wireless/ath/ath9k/htc_drv_gpio.c duty_cycle_work.work); work 226 drivers/net/wireless/ath/ath9k/htc_drv_gpio.c void ath9k_led_work(struct work_struct *work) work 228 drivers/net/wireless/ath/ath9k/htc_drv_gpio.c struct ath9k_htc_priv *priv = container_of(work, work 76 drivers/net/wireless/ath/ath9k/htc_drv_main.c void ath9k_ps_work(struct work_struct *work) work 79 drivers/net/wireless/ath/ath9k/htc_drv_main.c container_of(work, struct ath9k_htc_priv, work 768 drivers/net/wireless/ath/ath9k/htc_drv_main.c void ath9k_htc_ani_work(struct work_struct *work) work 771 drivers/net/wireless/ath/ath9k/htc_drv_main.c container_of(work, struct ath9k_htc_priv, ani_work.work); work 1288 drivers/net/wireless/ath/ath9k/htc_drv_main.c static void ath9k_htc_sta_rc_update_work(struct work_struct *work) work 1291 drivers/net/wireless/ath/ath9k/htc_drv_main.c container_of(work, struct ath9k_htc_sta, rc_update_work); work 56 drivers/net/wireless/ath/ath9k/link.c void ath_hw_check_work(struct work_struct *work) work 58 drivers/net/wireless/ath/ath9k/link.c struct ath_softc *sc = container_of(work, struct ath_softc, work 59 drivers/net/wireless/ath/ath9k/link.c hw_check_work.work); work 117 drivers/net/wireless/ath/ath9k/link.c void ath_hw_pll_work(struct work_struct *work) work 120 drivers/net/wireless/ath/ath9k/link.c struct ath_softc *sc = container_of(work, struct ath_softc, work 121 drivers/net/wireless/ath/ath9k/link.c hw_pll_work.work); work 212 drivers/net/wireless/ath/ath9k/link.c void ath_paprd_calibrate(struct work_struct *work) work 214 drivers/net/wireless/ath/ath9k/link.c struct ath_softc *sc = container_of(work, struct ath_softc, paprd_work); work 257 drivers/net/wireless/ath/ath9k/main.c goto work; work 267 drivers/net/wireless/ath/ath9k/main.c work: work 631 drivers/net/wireless/ath/ath9k/main.c void ath_reset_work(struct work_struct *work) work 633 drivers/net/wireless/ath/ath9k/main.c struct ath_softc *sc = container_of(work, struct ath_softc, hw_reset_work); work 236 drivers/net/wireless/ath/ath9k/mci.c static void ath9k_mci_work(struct work_struct *work) work 238 drivers/net/wireless/ath/ath9k/mci.c struct ath_softc *sc = container_of(work, struct ath_softc, mci_work); work 190 drivers/net/wireless/ath/ath9k/wmi.c void ath9k_fatal_work(struct work_struct *work) work 192 drivers/net/wireless/ath/ath9k/wmi.c struct ath9k_htc_priv *priv = container_of(work, struct ath9k_htc_priv, work 189 drivers/net/wireless/ath/ath9k/wmi.h void ath9k_fatal_work(struct work_struct *work); work 578 drivers/net/wireless/ath/carl9170/carl9170.h void carl9170_tx_janitor(struct work_struct *work); work 66 drivers/net/wireless/ath/carl9170/led.c static void carl9170_led_update(struct work_struct *work) work 68 drivers/net/wireless/ath/carl9170/led.c struct ar9170 *ar = container_of(work, struct ar9170, led_work.work); work 468 drivers/net/wireless/ath/carl9170/main.c static void carl9170_restart_work(struct work_struct *work) work 470 drivers/net/wireless/ath/carl9170/main.c struct ar9170 *ar = container_of(work, struct ar9170, work 544 drivers/net/wireless/ath/carl9170/main.c static void carl9170_ping_work(struct work_struct *work) work 546 drivers/net/wireless/ath/carl9170/main.c struct ar9170 *ar = container_of(work, struct ar9170, ping_work); work 860 drivers/net/wireless/ath/carl9170/main.c static void carl9170_ps_work(struct work_struct *work) work 862 drivers/net/wireless/ath/carl9170/main.c struct ar9170 *ar = container_of(work, struct ar9170, work 892 drivers/net/wireless/ath/carl9170/main.c static void carl9170_stat_work(struct work_struct *work) work 894 drivers/net/wireless/ath/carl9170/main.c struct ar9170 *ar = container_of(work, struct ar9170, stat_work.work); work 1396 drivers/net/wireless/ath/carl9170/main.c static void carl9170_ampdu_work(struct work_struct *work) work 1398 drivers/net/wireless/ath/carl9170/main.c struct ar9170 *ar = container_of(work, struct ar9170, work 639 drivers/net/wireless/ath/carl9170/tx.c void carl9170_tx_janitor(struct work_struct *work) work 641 drivers/net/wireless/ath/carl9170/tx.c struct ar9170 *ar = container_of(work, struct ar9170, work 642 drivers/net/wireless/ath/carl9170/tx.c tx_janitor.work); work 617 drivers/net/wireless/ath/wcn36xx/main.c static void wcn36xx_hw_scan_worker(struct work_struct *work) work 619 drivers/net/wireless/ath/wcn36xx/main.c struct wcn36xx *wcn = container_of(work, struct wcn36xx, scan_work); work 2551 drivers/net/wireless/ath/wcn36xx/smd.c static void wcn36xx_ind_smd_work(struct work_struct *work) work 2554 drivers/net/wireless/ath/wcn36xx/smd.c container_of(work, struct wcn36xx, hal_ind_work); work 2336 drivers/net/wireless/ath/wil6210/cfg80211.c void wil_probe_client_worker(struct work_struct *work) work 2338 drivers/net/wireless/ath/wil6210/cfg80211.c struct wil6210_vif *vif = container_of(work, struct wil6210_vif, work 471 drivers/net/wireless/ath/wil6210/main.c void wil_disconnect_worker(struct work_struct *work) work 473 drivers/net/wireless/ath/wil6210/main.c struct wil6210_vif *vif = container_of(work, work 537 drivers/net/wireless/ath/wil6210/main.c static void wil_fw_error_worker(struct work_struct *work) work 539 drivers/net/wireless/ath/wil6210/main.c struct wil6210_priv *wil = container_of(work, struct wil6210_priv, work 242 drivers/net/wireless/ath/wil6210/p2p.c void wil_p2p_listen_expired(struct work_struct *work) work 244 drivers/net/wireless/ath/wil6210/p2p.c struct wil_p2p_info *p2p = container_of(work, work 270 drivers/net/wireless/ath/wil6210/p2p.c void wil_p2p_search_expired(struct work_struct *work) work 272 drivers/net/wireless/ath/wil6210/p2p.c struct wil_p2p_info *p2p = container_of(work, work 302 drivers/net/wireless/ath/wil6210/p2p.c void wil_p2p_delayed_listen_work(struct work_struct *work) work 304 drivers/net/wireless/ath/wil6210/p2p.c struct wil_p2p_info *p2p = container_of(work, work 815 drivers/net/wireless/ath/wil6210/txrx.c void wil_enable_tx_key_worker(struct work_struct *work) work 817 drivers/net/wireless/ath/wil6210/txrx.c struct wil6210_vif *vif = container_of(work, work 1253 drivers/net/wireless/ath/wil6210/wil6210.h void wmi_event_worker(struct work_struct *work); work 1315 drivers/net/wireless/ath/wil6210/wil6210.h void wil_p2p_listen_expired(struct work_struct *work); work 1316 drivers/net/wireless/ath/wil6210/wil6210.h void wil_p2p_search_expired(struct work_struct *work); work 1318 drivers/net/wireless/ath/wil6210/wil6210.h void wil_p2p_delayed_listen_work(struct work_struct *work); work 1364 drivers/net/wireless/ath/wil6210/wil6210.h void wil_probe_client_worker(struct work_struct *work); work 1365 drivers/net/wireless/ath/wil6210/wil6210.h void wil_disconnect_worker(struct work_struct *work); work 1366 drivers/net/wireless/ath/wil6210/wil6210.h void wil_enable_tx_key_worker(struct work_struct *work); work 3391 drivers/net/wireless/ath/wil6210/wmi.c void wmi_event_worker(struct work_struct *work) work 3393 drivers/net/wireless/ath/wil6210/wmi.c struct wil6210_priv *wil = container_of(work, struct wil6210_priv, work 1467 drivers/net/wireless/atmel/at76c50x-usb.c static void at76_work_set_promisc(struct work_struct *work) work 1469 drivers/net/wireless/atmel/at76c50x-usb.c struct at76_priv *priv = container_of(work, struct at76_priv, work 1492 drivers/net/wireless/atmel/at76c50x-usb.c static void at76_work_submit_rx(struct work_struct *work) work 1494 drivers/net/wireless/atmel/at76c50x-usb.c struct at76_priv *priv = container_of(work, struct at76_priv, work 1713 drivers/net/wireless/atmel/at76c50x-usb.c static void at76_work_join_bssid(struct work_struct *work) work 1715 drivers/net/wireless/atmel/at76c50x-usb.c struct at76_priv *priv = container_of(work, struct at76_priv, work 1916 drivers/net/wireless/atmel/at76c50x-usb.c static void at76_dwork_hw_scan(struct work_struct *work) work 1918 drivers/net/wireless/atmel/at76c50x-usb.c struct at76_priv *priv = container_of(work, struct at76_priv, work 1919 drivers/net/wireless/atmel/at76c50x-usb.c dwork_hw_scan.work); work 75 drivers/net/wireless/broadcom/b43/leds.c static void b43_leds_work(struct work_struct *work) work 77 drivers/net/wireless/broadcom/b43/leds.c struct b43_leds *leds = container_of(work, struct b43_leds, work); work 104 drivers/net/wireless/broadcom/b43/leds.c ieee80211_queue_work(wl->hw, &wl->leds.work); work 324 drivers/net/wireless/broadcom/b43/leds.c cancel_work_sync(&leds->work); work 333 drivers/net/wireless/broadcom/b43/leds.c INIT_WORK(&dev->wl->leds.work, b43_leds_work); work 41 drivers/net/wireless/broadcom/b43/leds.h struct work_struct work; work 1797 drivers/net/wireless/broadcom/b43/main.c static void b43_beacon_update_trigger_work(struct work_struct *work) work 1799 drivers/net/wireless/broadcom/b43/main.c struct b43_wl *wl = container_of(work, struct b43_wl, work 2546 drivers/net/wireless/broadcom/b43/main.c static void b43_request_firmware(struct work_struct *work) work 2548 drivers/net/wireless/broadcom/b43/main.c struct b43_wl *wl = container_of(work, work 3425 drivers/net/wireless/broadcom/b43/main.c static void b43_periodic_work_handler(struct work_struct *work) work 3427 drivers/net/wireless/broadcom/b43/main.c struct b43_wldev *dev = container_of(work, struct b43_wldev, work 3428 drivers/net/wireless/broadcom/b43/main.c periodic_work.work); work 3454 drivers/net/wireless/broadcom/b43/main.c struct delayed_work *work = &dev->periodic_work; work 3457 drivers/net/wireless/broadcom/b43/main.c INIT_DELAYED_WORK(work, b43_periodic_work_handler); work 3458 drivers/net/wireless/broadcom/b43/main.c ieee80211_queue_delayed_work(dev->wl->hw, work, 0); work 3580 drivers/net/wireless/broadcom/b43/main.c static void b43_tx_work(struct work_struct *work) work 3582 drivers/net/wireless/broadcom/b43/main.c struct b43_wl *wl = container_of(work, struct b43_wl, tx_work); work 5196 drivers/net/wireless/broadcom/b43/main.c static void b43_chip_reset(struct work_struct *work) work 5199 drivers/net/wireless/broadcom/b43/main.c container_of(work, struct b43_wldev, restart_work); work 466 drivers/net/wireless/broadcom/b43/phy_common.c void b43_phy_txpower_adjust_work(struct work_struct *work) work 468 drivers/net/wireless/broadcom/b43/phy_common.c struct b43_wl *wl = container_of(work, struct b43_wl, work 422 drivers/net/wireless/broadcom/b43/phy_common.h void b43_phy_txpower_adjust_work(struct work_struct *work); work 1210 drivers/net/wireless/broadcom/b43legacy/main.c static void b43legacy_beacon_update_trigger_work(struct work_struct *work) work 1212 drivers/net/wireless/broadcom/b43legacy/main.c struct b43legacy_wl *wl = container_of(work, struct b43legacy_wl, work 1560 drivers/net/wireless/broadcom/b43legacy/main.c static void b43legacy_request_firmware(struct work_struct *work) work 1562 drivers/net/wireless/broadcom/b43legacy/main.c struct b43legacy_wl *wl = container_of(work, work 2313 drivers/net/wireless/broadcom/b43legacy/main.c static void b43legacy_periodic_work_handler(struct work_struct *work) work 2315 drivers/net/wireless/broadcom/b43legacy/main.c struct b43legacy_wldev *dev = container_of(work, struct b43legacy_wldev, work 2316 drivers/net/wireless/broadcom/b43legacy/main.c periodic_work.work); work 2342 drivers/net/wireless/broadcom/b43legacy/main.c struct delayed_work *work = &dev->periodic_work; work 2345 drivers/net/wireless/broadcom/b43legacy/main.c INIT_DELAYED_WORK(work, b43legacy_periodic_work_handler); work 2346 drivers/net/wireless/broadcom/b43legacy/main.c ieee80211_queue_delayed_work(dev->wl->hw, work, 0); work 2445 drivers/net/wireless/broadcom/b43legacy/main.c static void b43legacy_tx_work(struct work_struct *work) work 2447 drivers/net/wireless/broadcom/b43legacy/main.c struct b43legacy_wl *wl = container_of(work, struct b43legacy_wl, work 3551 drivers/net/wireless/broadcom/b43legacy/main.c static void b43legacy_chip_reset(struct work_struct *work) work 3554 drivers/net/wireless/broadcom/b43legacy/main.c container_of(work, struct b43legacy_wldev, restart_work); work 82 drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c struct work_struct work; work 278 drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c schedule_work(&bt_local->work); work 285 drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c static void brcmf_btcoex_handler(struct work_struct *work) work 288 drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c btci = container_of(work, struct brcmf_btcoex_info, work); work 377 drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c INIT_WORK(&btci->work, brcmf_btcoex_handler); work 399 drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c cancel_work_sync(&cfg->btcoex->work); work 419 drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c schedule_work(&btci->work); work 436 drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c schedule_work(&btci->work); work 3042 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c static void brcmf_cfg80211_escan_timeout_worker(struct work_struct *work) work 3045 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c container_of(work, struct brcmf_cfg80211_info, work 131 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c static void _brcmf_set_multicast_list(struct work_struct *work) work 133 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c struct brcmf_if *ifp = container_of(work, struct brcmf_if, work 198 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c static void _brcmf_update_ndtable(struct work_struct *work) work 200 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c struct brcmf_if *ifp = container_of(work, struct brcmf_if, work 221 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c static void _brcmf_update_ndtable(struct work_struct *work) work 1081 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c static void brcmf_core_bus_reset(struct work_struct *work) work 1083 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c struct brcmf_pub *drvr = container_of(work, struct brcmf_pub, work 1441 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c static void brcmf_driver_register(struct work_struct *work) work 213 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c static void brcmf_fweh_event_worker(struct work_struct *work) work 223 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c fweh = container_of(work, struct brcmf_fweh_info, event_work); work 562 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c struct brcmf_msgbuf_work_item *work = NULL; work 567 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c work = list_first_entry(&msgbuf->work_queue, work 569 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c list_del(&work->queue); work 573 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c return work; work 579 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c struct brcmf_msgbuf_work_item *work) work 591 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c flowid = work->flowid; work 618 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c create->msg.ifidx = work->ifidx; work 623 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c memcpy(create->sa, work->sa, ETH_ALEN); work 624 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c memcpy(create->da, work->da, ETH_ALEN); work 632 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c flowid, work->da, create->tid, work->ifidx); work 646 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c static void brcmf_msgbuf_flowring_worker(struct work_struct *work) work 651 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c msgbuf = container_of(work, struct brcmf_msgbuf, flowring_work); work 1631 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c struct brcmf_msgbuf_work_item *work; work 1638 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c work = list_first_entry(&msgbuf->work_queue, work 1641 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c list_del(&work->queue); work 1642 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c kfree(work); work 1095 drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c static void brcmf_p2p_afx_handler(struct work_struct *work) work 1097 drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c struct afx_hdl *afx_hdl = container_of(work, struct afx_hdl, afx_work); work 3701 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c static void brcmf_sdio_dataworker(struct work_struct *work) work 3703 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c struct brcmf_sdio *bus = container_of(work, struct brcmf_sdio, work 1301 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c static void brcms_driver_init(struct work_struct *work) work 1456 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c static void _brcms_timer(struct work_struct *work) work 1458 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c struct brcms_timer *t = container_of(work, struct brcms_timer, work 1459 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c dly_wrk.work); work 327 drivers/net/wireless/intel/ipw2x00/ipw2100.c static void ipw2100_wx_event_work(struct work_struct *work); work 1946 drivers/net/wireless/intel/ipw2x00/ipw2100.c static void ipw2100_reset_adapter(struct work_struct *work) work 1949 drivers/net/wireless/intel/ipw2x00/ipw2100.c container_of(work, struct ipw2100_priv, reset_work.work); work 2165 drivers/net/wireless/intel/ipw2x00/ipw2100.c static void ipw2100_scan_event(struct work_struct *work) work 2167 drivers/net/wireless/intel/ipw2x00/ipw2100.c struct ipw2100_priv *priv = container_of(work, struct ipw2100_priv, work 2168 drivers/net/wireless/intel/ipw2x00/ipw2100.c scan_event.work); work 5552 drivers/net/wireless/intel/ipw2x00/ipw2100.c static void ipw2100_security_work(struct work_struct *work) work 5555 drivers/net/wireless/intel/ipw2x00/ipw2100.c container_of(work, struct ipw2100_priv, security_work.work); work 5775 drivers/net/wireless/intel/ipw2x00/ipw2100.c ipw2100_reset_adapter(&priv->reset_work.work); work 5938 drivers/net/wireless/intel/ipw2x00/ipw2100.c static void ipw2100_hang_check(struct work_struct *work) work 5941 drivers/net/wireless/intel/ipw2x00/ipw2100.c container_of(work, struct ipw2100_priv, hang_check.work); work 5981 drivers/net/wireless/intel/ipw2x00/ipw2100.c static void ipw2100_rf_kill(struct work_struct *work) work 5984 drivers/net/wireless/intel/ipw2x00/ipw2100.c container_of(work, struct ipw2100_priv, rf_kill.work); work 8262 drivers/net/wireless/intel/ipw2x00/ipw2100.c static void ipw2100_wx_event_work(struct work_struct *work) work 8265 drivers/net/wireless/intel/ipw2x00/ipw2100.c container_of(work, struct ipw2100_priv, wx_event_work.work); work 211 drivers/net/wireless/intel/ipw2x00/ipw2200.c static void ipw_bg_up(struct work_struct *work); work 213 drivers/net/wireless/intel/ipw2x00/ipw2200.c static void ipw_bg_down(struct work_struct *work); work 900 drivers/net/wireless/intel/ipw2x00/ipw2200.c static void ipw_bg_led_link_on(struct work_struct *work) work 903 drivers/net/wireless/intel/ipw2x00/ipw2200.c container_of(work, struct ipw_priv, led_link_on.work); work 945 drivers/net/wireless/intel/ipw2x00/ipw2200.c static void ipw_bg_led_link_off(struct work_struct *work) work 948 drivers/net/wireless/intel/ipw2x00/ipw2200.c container_of(work, struct ipw_priv, led_link_off.work); work 1023 drivers/net/wireless/intel/ipw2x00/ipw2200.c static void ipw_bg_led_activity_off(struct work_struct *work) work 1026 drivers/net/wireless/intel/ipw2x00/ipw2200.c container_of(work, struct ipw_priv, led_act_off.work); work 2339 drivers/net/wireless/intel/ipw2x00/ipw2200.c static void ipw_bg_adapter_restart(struct work_struct *work) work 2342 drivers/net/wireless/intel/ipw2x00/ipw2200.c container_of(work, struct ipw_priv, adapter_restart); work 2370 drivers/net/wireless/intel/ipw2x00/ipw2200.c static void ipw_bg_scan_check(struct work_struct *work) work 2373 drivers/net/wireless/intel/ipw2x00/ipw2200.c container_of(work, struct ipw_priv, scan_check.work); work 3985 drivers/net/wireless/intel/ipw2x00/ipw2200.c static void ipw_bg_disassociate(struct work_struct *work) work 3988 drivers/net/wireless/intel/ipw2x00/ipw2200.c container_of(work, struct ipw_priv, disassociate); work 3994 drivers/net/wireless/intel/ipw2x00/ipw2200.c static void ipw_system_config(struct work_struct *work) work 3997 drivers/net/wireless/intel/ipw2x00/ipw2200.c container_of(work, struct ipw_priv, system_config); work 4364 drivers/net/wireless/intel/ipw2x00/ipw2200.c static void ipw_bg_gather_stats(struct work_struct *work) work 4367 drivers/net/wireless/intel/ipw2x00/ipw2200.c container_of(work, struct ipw_priv, gather_stats.work); work 4444 drivers/net/wireless/intel/ipw2x00/ipw2200.c static void ipw_scan_event(struct work_struct *work) work 4449 drivers/net/wireless/intel/ipw2x00/ipw2200.c container_of(work, struct ipw_priv, scan_event.work); work 5212 drivers/net/wireless/intel/ipw2x00/ipw2200.c static void ipw_bg_rx_queue_replenish(struct work_struct *work) work 5215 drivers/net/wireless/intel/ipw2x00/ipw2200.c container_of(work, struct ipw_priv, rx_replenish); work 5612 drivers/net/wireless/intel/ipw2x00/ipw2200.c static void ipw_merge_adhoc_network(struct work_struct *work) work 5615 drivers/net/wireless/intel/ipw2x00/ipw2200.c container_of(work, struct ipw_priv, merge_networks); work 6034 drivers/net/wireless/intel/ipw2x00/ipw2200.c static void ipw_bg_adhoc_check(struct work_struct *work) work 6037 drivers/net/wireless/intel/ipw2x00/ipw2200.c container_of(work, struct ipw_priv, adhoc_check.work); work 6425 drivers/net/wireless/intel/ipw2x00/ipw2200.c static void ipw_request_passive_scan(struct work_struct *work) work 6428 drivers/net/wireless/intel/ipw2x00/ipw2200.c container_of(work, struct ipw_priv, request_passive_scan.work); work 6432 drivers/net/wireless/intel/ipw2x00/ipw2200.c static void ipw_request_scan(struct work_struct *work) work 6435 drivers/net/wireless/intel/ipw2x00/ipw2200.c container_of(work, struct ipw_priv, request_scan.work); work 6439 drivers/net/wireless/intel/ipw2x00/ipw2200.c static void ipw_request_direct_scan(struct work_struct *work) work 6442 drivers/net/wireless/intel/ipw2x00/ipw2200.c container_of(work, struct ipw_priv, request_direct_scan.work); work 6446 drivers/net/wireless/intel/ipw2x00/ipw2200.c static void ipw_bg_abort_scan(struct work_struct *work) work 6449 drivers/net/wireless/intel/ipw2x00/ipw2200.c container_of(work, struct ipw_priv, abort_scan); work 7208 drivers/net/wireless/intel/ipw2x00/ipw2200.c static void ipw_bg_qos_activate(struct work_struct *work) work 7211 drivers/net/wireless/intel/ipw2x00/ipw2200.c container_of(work, struct ipw_priv, qos_activate); work 7514 drivers/net/wireless/intel/ipw2x00/ipw2200.c static void ipw_bg_roam(struct work_struct *work) work 7517 drivers/net/wireless/intel/ipw2x00/ipw2200.c container_of(work, struct ipw_priv, roam); work 7628 drivers/net/wireless/intel/ipw2x00/ipw2200.c static void ipw_bg_associate(struct work_struct *work) work 7631 drivers/net/wireless/intel/ipw2x00/ipw2200.c container_of(work, struct ipw_priv, associate); work 9444 drivers/net/wireless/intel/ipw2x00/ipw2200.c struct delayed_work *work = NULL; work 9456 drivers/net/wireless/intel/ipw2x00/ipw2200.c work = &priv->request_direct_scan; work 9458 drivers/net/wireless/intel/ipw2x00/ipw2200.c work = &priv->request_passive_scan; work 9462 drivers/net/wireless/intel/ipw2x00/ipw2200.c work = &priv->request_scan; work 9469 drivers/net/wireless/intel/ipw2x00/ipw2200.c schedule_delayed_work(work, 0); work 10577 drivers/net/wireless/intel/ipw2x00/ipw2200.c static void ipw_bg_rf_kill(struct work_struct *work) work 10580 drivers/net/wireless/intel/ipw2x00/ipw2200.c container_of(work, struct ipw_priv, rf_kill.work); work 10609 drivers/net/wireless/intel/ipw2x00/ipw2200.c static void ipw_bg_link_up(struct work_struct *work) work 10612 drivers/net/wireless/intel/ipw2x00/ipw2200.c container_of(work, struct ipw_priv, link_up); work 10640 drivers/net/wireless/intel/ipw2x00/ipw2200.c static void ipw_bg_link_down(struct work_struct *work) work 10643 drivers/net/wireless/intel/ipw2x00/ipw2200.c container_of(work, struct ipw_priv, link_down); work 11242 drivers/net/wireless/intel/ipw2x00/ipw2200.c static void ipw_bg_up(struct work_struct *work) work 11245 drivers/net/wireless/intel/ipw2x00/ipw2200.c container_of(work, struct ipw_priv, up); work 11313 drivers/net/wireless/intel/ipw2x00/ipw2200.c static void ipw_bg_down(struct work_struct *work) work 11316 drivers/net/wireless/intel/ipw2x00/ipw2200.c container_of(work, struct ipw_priv, down); work 2445 drivers/net/wireless/intel/iwlegacy/3945-mac.c container_of(data, struct il_priv, init_alive_start.work); work 2460 drivers/net/wireless/intel/iwlegacy/3945-mac.c container_of(data, struct il_priv, alive_start.work); work 2481 drivers/net/wireless/intel/iwlegacy/3945-mac.c container_of(data, struct il_priv, _3945.rfkill_poll.work); work 1862 drivers/net/wireless/intel/iwlegacy/3945.c il3945_bg_reg_txpower_periodic(struct work_struct *work) work 1864 drivers/net/wireless/intel/iwlegacy/3945.c struct il_priv *il = container_of(work, struct il_priv, work 1865 drivers/net/wireless/intel/iwlegacy/3945.c _3945.thermal_periodic.work); work 5622 drivers/net/wireless/intel/iwlegacy/4965-mac.c container_of(data, struct il_priv, init_alive_start.work); work 5637 drivers/net/wireless/intel/iwlegacy/4965-mac.c container_of(data, struct il_priv, alive_start.work); work 5649 drivers/net/wireless/intel/iwlegacy/4965-mac.c il4965_bg_run_time_calib_work(struct work_struct *work) work 5651 drivers/net/wireless/intel/iwlegacy/4965-mac.c struct il_priv *il = container_of(work, struct il_priv, work 6193 drivers/net/wireless/intel/iwlegacy/4965-mac.c il4965_bg_txpower_work(struct work_struct *work) work 6195 drivers/net/wireless/intel/iwlegacy/4965-mac.c struct il_priv *il = container_of(work, struct il_priv, work 1598 drivers/net/wireless/intel/iwlegacy/common.c container_of(data, struct il_priv, scan_check.work); work 1660 drivers/net/wireless/intel/iwlegacy/common.c il_bg_abort_scan(struct work_struct *work) work 1662 drivers/net/wireless/intel/iwlegacy/common.c struct il_priv *il = container_of(work, struct il_priv, abort_scan); work 1674 drivers/net/wireless/intel/iwlegacy/common.c il_bg_scan_completed(struct work_struct *work) work 1676 drivers/net/wireless/intel/iwlegacy/common.c struct il_priv *il = container_of(work, struct il_priv, scan_completed); work 391 drivers/net/wireless/intel/iwlwifi/dvm/lib.c static void iwlagn_bt_traffic_change_work(struct work_struct *work) work 394 drivers/net/wireless/intel/iwlwifi/dvm/lib.c container_of(work, struct iwl_priv, bt_traffic_change_work); work 270 drivers/net/wireless/intel/iwlwifi/dvm/main.c static void iwl_bg_beacon_update(struct work_struct *work) work 273 drivers/net/wireless/intel/iwlwifi/dvm/main.c container_of(work, struct iwl_priv, beacon_update); work 309 drivers/net/wireless/intel/iwlwifi/dvm/main.c static void iwl_bg_bt_runtime_config(struct work_struct *work) work 312 drivers/net/wireless/intel/iwlwifi/dvm/main.c container_of(work, struct iwl_priv, bt_runtime_config); work 327 drivers/net/wireless/intel/iwlwifi/dvm/main.c static void iwl_bg_bt_full_concurrency(struct work_struct *work) work 330 drivers/net/wireless/intel/iwlwifi/dvm/main.c container_of(work, struct iwl_priv, bt_full_concurrency); work 560 drivers/net/wireless/intel/iwlwifi/dvm/main.c static void iwl_bg_tx_flush(struct work_struct *work) work 563 drivers/net/wireless/intel/iwlwifi/dvm/main.c container_of(work, struct iwl_priv, tx_flush); work 963 drivers/net/wireless/intel/iwlwifi/dvm/main.c static void iwl_bg_run_time_calib_work(struct work_struct *work) work 965 drivers/net/wireless/intel/iwlwifi/dvm/main.c struct iwl_priv *priv = container_of(work, struct iwl_priv, work 980 drivers/net/wireless/intel/iwlwifi/dvm/scan.c static void iwl_bg_start_internal_scan(struct work_struct *work) work 983 drivers/net/wireless/intel/iwlwifi/dvm/scan.c container_of(work, struct iwl_priv, start_internal_scan); work 1008 drivers/net/wireless/intel/iwlwifi/dvm/scan.c container_of(data, struct iwl_priv, scan_check.work); work 1020 drivers/net/wireless/intel/iwlwifi/dvm/scan.c static void iwl_bg_abort_scan(struct work_struct *work) work 1022 drivers/net/wireless/intel/iwlwifi/dvm/scan.c struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan); work 1033 drivers/net/wireless/intel/iwlwifi/dvm/scan.c static void iwl_bg_scan_completed(struct work_struct *work) work 1036 drivers/net/wireless/intel/iwlwifi/dvm/scan.c container_of(work, struct iwl_priv, scan_completed); work 484 drivers/net/wireless/intel/iwlwifi/dvm/tt.c static void iwl_bg_ct_enter(struct work_struct *work) work 486 drivers/net/wireless/intel/iwlwifi/dvm/tt.c struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_enter); work 513 drivers/net/wireless/intel/iwlwifi/dvm/tt.c static void iwl_bg_ct_exit(struct work_struct *work) work 515 drivers/net/wireless/intel/iwlwifi/dvm/tt.c struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_exit); work 564 drivers/net/wireless/intel/iwlwifi/dvm/tt.c static void iwl_bg_tt_work(struct work_struct *work) work 566 drivers/net/wireless/intel/iwlwifi/dvm/tt.c struct iwl_priv *priv = container_of(work, struct iwl_priv, tt_work); work 2329 drivers/net/wireless/intel/iwlwifi/fw/dbg.c void iwl_fw_error_dump_wk(struct work_struct *work) work 2334 drivers/net/wireless/intel/iwlwifi/fw/dbg.c wks = container_of(work, typeof(fwrt->dump.wks[0]), wk.work); work 285 drivers/net/wireless/intel/iwlwifi/fw/dbg.h void iwl_fw_error_dump_wk(struct work_struct *work); work 204 drivers/net/wireless/intel/iwlwifi/fw/debugfs.c static void iwl_fw_timestamp_marker_wk(struct work_struct *work) work 208 drivers/net/wireless/intel/iwlwifi/fw/debugfs.c container_of(work, struct iwl_fw_runtime, timestamp.wk.work); work 1389 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c mvmvif = container_of(wk, struct iwl_mvm_vif, csa_work.work); work 631 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h struct delayed_work work; work 1724 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h void iwl_mvm_scan_timeout_wk(struct work_struct *work); work 2039 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h void iwl_mvm_tdls_ch_switch_work(struct work_struct *work); work 2051 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h void iwl_mvm_tcm_work(struct work_struct *work); work 542 drivers/net/wireless/intel/iwlwifi/mvm/ops.c static void iwl_mvm_tx_unblock_dwork(struct work_struct *work) work 545 drivers/net/wireless/intel/iwlwifi/mvm/ops.c container_of(work, struct iwl_mvm, cs_tx_unblock_dwork.work); work 718 drivers/net/wireless/intel/iwlwifi/mvm/ops.c INIT_DELAYED_WORK(&mvm->tcm.work, iwl_mvm_tcm_work); work 899 drivers/net/wireless/intel/iwlwifi/mvm/ops.c cancel_delayed_work_sync(&mvm->tcm.work); work 1225 drivers/net/wireless/intel/iwlwifi/mvm/ops.c struct work_struct work; work 1232 drivers/net/wireless/intel/iwlwifi/mvm/ops.c reprobe = container_of(wk, struct iwl_mvm_reprobe, work); work 1286 drivers/net/wireless/intel/iwlwifi/mvm/ops.c INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk); work 1287 drivers/net/wireless/intel/iwlwifi/mvm/ops.c schedule_work(&reprobe->work); work 280 drivers/net/wireless/intel/iwlwifi/mvm/rx.c schedule_delayed_work(&mvm->tcm.work, 0); work 1745 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c schedule_delayed_work(&mvm->tcm.work, 0); work 1705 drivers/net/wireless/intel/iwlwifi/mvm/scan.c void iwl_mvm_scan_timeout_wk(struct work_struct *work) work 1707 drivers/net/wireless/intel/iwlwifi/mvm/scan.c struct delayed_work *delayed_work = to_delayed_work(work); work 502 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c void iwl_mvm_tdls_ch_switch_work(struct work_struct *work) work 511 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c mvm = container_of(work, struct iwl_mvm, tdls_cs.dwork.work); work 277 drivers/net/wireless/intel/iwlwifi/mvm/tt.c static void check_exit_ctkill(struct work_struct *work) work 285 drivers/net/wireless/intel/iwlwifi/mvm/tt.c tt = container_of(work, struct iwl_mvm_tt_mgmt, ct_kill_exit.work); work 1040 drivers/net/wireless/intel/iwlwifi/mvm/tx.c schedule_delayed_work(&mvm->tcm.work, 0); work 1119 drivers/net/wireless/intel/iwlwifi/mvm/utils.c uapsd_nonagg_detected_wk.work); work 1355 drivers/net/wireless/intel/iwlwifi/mvm/utils.c schedule_delayed_work(&mvm->tcm.work, work_delay); work 1362 drivers/net/wireless/intel/iwlwifi/mvm/utils.c void iwl_mvm_tcm_work(struct work_struct *work) work 1364 drivers/net/wireless/intel/iwlwifi/mvm/utils.c struct delayed_work *delayed_work = to_delayed_work(work); work 1366 drivers/net/wireless/intel/iwlwifi/mvm/utils.c tcm.work); work 1377 drivers/net/wireless/intel/iwlwifi/mvm/utils.c cancel_delayed_work_sync(&mvm->tcm.work); work 1408 drivers/net/wireless/intel/iwlwifi/mvm/utils.c schedule_delayed_work(&mvm->tcm.work, MVM_TCM_PERIOD); work 1410 drivers/net/wireless/intel/iwlwifi/mvm/utils.c schedule_delayed_work(&mvm->tcm.work, MVM_LL_PERIOD); work 2024 drivers/net/wireless/intel/iwlwifi/pcie/trans.c struct work_struct work; work 2030 drivers/net/wireless/intel/iwlwifi/pcie/trans.c container_of(wk, struct iwl_trans_pcie_removal, work); work 2127 drivers/net/wireless/intel/iwlwifi/pcie/trans.c INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk); work 2129 drivers/net/wireless/intel/iwlwifi/pcie/trans.c schedule_work(&removal->work); work 39 drivers/net/wireless/intersil/hostap/hostap.h void hostap_set_multicast_list_queue(struct work_struct *work); work 59 drivers/net/wireless/intersil/hostap/hostap_ap.c static void handle_add_proc_queue(struct work_struct *work); work 62 drivers/net/wireless/intersil/hostap/hostap_ap.c static void handle_wds_oper_queue(struct work_struct *work); work 1063 drivers/net/wireless/intersil/hostap/hostap_ap.c static void handle_add_proc_queue(struct work_struct *work) work 1065 drivers/net/wireless/intersil/hostap/hostap_ap.c struct ap_data *ap = container_of(work, struct ap_data, work 1951 drivers/net/wireless/intersil/hostap/hostap_ap.c static void handle_wds_oper_queue(struct work_struct *work) work 1953 drivers/net/wireless/intersil/hostap/hostap_ap.c struct ap_data *ap = container_of(work, struct ap_data, work 1633 drivers/net/wireless/intersil/hostap/hostap_hw.c static void handle_reset_queue(struct work_struct *work) work 1635 drivers/net/wireless/intersil/hostap/hostap_hw.c local_info_t *local = container_of(work, local_info_t, reset_queue); work 2849 drivers/net/wireless/intersil/hostap/hostap_hw.c static void handle_comms_qual_update(struct work_struct *work) work 2852 drivers/net/wireless/intersil/hostap/hostap_hw.c container_of(work, local_info_t, comms_qual_update); work 2997 drivers/net/wireless/intersil/hostap/hostap_hw.c static void handle_set_tim_queue(struct work_struct *work) work 2999 drivers/net/wireless/intersil/hostap/hostap_hw.c local_info_t *local = container_of(work, local_info_t, set_tim_queue); work 484 drivers/net/wireless/intersil/hostap/hostap_info.c static void handle_info_queue(struct work_struct *work) work 486 drivers/net/wireless/intersil/hostap/hostap_info.c local_info_t *local = container_of(work, local_info_t, info_queue); work 727 drivers/net/wireless/intersil/hostap/hostap_main.c void hostap_set_multicast_list_queue(struct work_struct *work) work 730 drivers/net/wireless/intersil/hostap/hostap_main.c container_of(work, local_info_t, set_multicast_list_queue); work 1140 drivers/net/wireless/intersil/orinoco/main.c static void orinoco_join_ap(struct work_struct *work) work 1143 drivers/net/wireless/intersil/orinoco/main.c container_of(work, struct orinoco_private, join_work); work 1296 drivers/net/wireless/intersil/orinoco/main.c static void orinoco_send_wevents(struct work_struct *work) work 1299 drivers/net/wireless/intersil/orinoco/main.c container_of(work, struct orinoco_private, wevent_work); work 1351 drivers/net/wireless/intersil/orinoco/main.c static void orinoco_process_scan_results(struct work_struct *work) work 1354 drivers/net/wireless/intersil/orinoco/main.c container_of(work, struct orinoco_private, process_scan); work 1721 drivers/net/wireless/intersil/orinoco/main.c void orinoco_reset(struct work_struct *work) work 1724 drivers/net/wireless/intersil/orinoco/main.c container_of(work, struct orinoco_private, reset_work); work 33 drivers/net/wireless/intersil/orinoco/main.h void orinoco_reset(struct work_struct *work); work 27 drivers/net/wireless/intersil/p54/led.c static void p54_update_leds(struct work_struct *work) work 29 drivers/net/wireless/intersil/p54/led.c struct p54_common *priv = container_of(work, struct p54_common, work 30 drivers/net/wireless/intersil/p54/led.c led_work.work); work 190 drivers/net/wireless/intersil/p54/main.c ieee80211_queue_delayed_work(dev, &priv->work, 0); work 207 drivers/net/wireless/intersil/p54/main.c cancel_delayed_work_sync(&priv->work); work 421 drivers/net/wireless/intersil/p54/main.c static void p54_work(struct work_struct *work) work 423 drivers/net/wireless/intersil/p54/main.c struct p54_common *priv = container_of(work, struct p54_common, work 424 drivers/net/wireless/intersil/p54/main.c work.work); work 787 drivers/net/wireless/intersil/p54/main.c INIT_DELAYED_WORK(&priv->work, p54_work); work 257 drivers/net/wireless/intersil/p54/p54.h struct delayed_work work; work 387 drivers/net/wireless/intersil/p54/p54spi.c ieee80211_queue_work(priv->hw, &priv->work); work 475 drivers/net/wireless/intersil/p54/p54spi.c ieee80211_queue_work(priv->hw, &priv->work); work 478 drivers/net/wireless/intersil/p54/p54spi.c static void p54spi_work(struct work_struct *work) work 480 drivers/net/wireless/intersil/p54/p54spi.c struct p54s_priv *priv = container_of(work, struct p54s_priv, work); work 581 drivers/net/wireless/intersil/p54/p54spi.c cancel_work_sync(&priv->work); work 637 drivers/net/wireless/intersil/p54/p54spi.c INIT_WORK(&priv->work, p54spi_work); work 98 drivers/net/wireless/intersil/p54/p54spi.h struct work_struct work; work 393 drivers/net/wireless/intersil/p54/txrx.c ieee80211_queue_delayed_work(priv->hw, &priv->work, work 162 drivers/net/wireless/intersil/prism54/isl_ioctl.c prism54_update_stats(struct work_struct *work) work 164 drivers/net/wireless/intersil/prism54/isl_ioctl.c islpci_private *priv = container_of(work, islpci_private, stats_work); work 2469 drivers/net/wireless/intersil/prism54/isl_ioctl.c prism54_process_trap(struct work_struct *work) work 2472 drivers/net/wireless/intersil/prism54/isl_ioctl.c container_of(work, struct islpci_mgmtframe, ws); work 465 drivers/net/wireless/intersil/prism54/islpci_eth.c islpci_do_reset_and_wake(struct work_struct *work) work 467 drivers/net/wireless/intersil/prism54/islpci_eth.c islpci_private *priv = container_of(work, islpci_private, reset_task); work 2008 drivers/net/wireless/mac80211_hwsim.c static void hw_scan_work(struct work_struct *work) work 2011 drivers/net/wireless/mac80211_hwsim.c container_of(work, struct mac80211_hwsim_data, hw_scan.work); work 2164 drivers/net/wireless/mac80211_hwsim.c static void hw_roc_start(struct work_struct *work) work 2167 drivers/net/wireless/mac80211_hwsim.c container_of(work, struct mac80211_hwsim_data, roc_start.work); work 2181 drivers/net/wireless/mac80211_hwsim.c static void hw_roc_done(struct work_struct *work) work 2184 drivers/net/wireless/mac80211_hwsim.c container_of(work, struct mac80211_hwsim_data, roc_done.work); work 680 drivers/net/wireless/marvell/libertas/cfg.c static void lbs_scan_worker(struct work_struct *work) work 683 drivers/net/wireless/marvell/libertas/cfg.c container_of(work, struct lbs_private, scan_work.work); work 396 drivers/net/wireless/marvell/libertas/if_sdio.c static void if_sdio_host_to_card_worker(struct work_struct *work) work 403 drivers/net/wireless/marvell/libertas/if_sdio.c card = container_of(work, struct if_sdio_card, packet_worker); work 1034 drivers/net/wireless/marvell/libertas/if_sdio.c static void if_sdio_reset_card_worker(struct work_struct *work) work 843 drivers/net/wireless/marvell/libertas/if_spi.c static void if_spi_host_to_card_worker(struct work_struct *work) work 852 drivers/net/wireless/marvell/libertas/if_spi.c card = container_of(work, struct if_spi_card, packet_work); work 1072 drivers/net/wireless/marvell/libertas/if_spi.c static void if_spi_resume_worker(struct work_struct *work) work 1076 drivers/net/wireless/marvell/libertas/if_spi.c card = container_of(work, struct if_spi_card, resume_work); work 414 drivers/net/wireless/marvell/libertas/main.c static void lbs_set_mcast_worker(struct work_struct *work) work 416 drivers/net/wireless/marvell/libertas/main.c struct lbs_private *priv = container_of(work, struct lbs_private, mcast_work); work 83 drivers/net/wireless/marvell/libertas_tf/main.c static void lbtf_cmd_work(struct work_struct *work) work 85 drivers/net/wireless/marvell/libertas_tf/main.c struct lbtf_private *priv = container_of(work, struct lbtf_private, work 197 drivers/net/wireless/marvell/libertas_tf/main.c static void lbtf_tx_work(struct work_struct *work) work 199 drivers/net/wireless/marvell/libertas_tf/main.c struct lbtf_private *priv = container_of(work, struct lbtf_private, work 123 drivers/net/wireless/marvell/mwifiex/11h.c void mwifiex_dfs_cac_work_queue(struct work_struct *work) work 126 drivers/net/wireless/marvell/mwifiex/11h.c struct delayed_work *delayed_work = to_delayed_work(work); work 281 drivers/net/wireless/marvell/mwifiex/11h.c void mwifiex_dfs_chan_sw_work_queue(struct work_struct *work) work 284 drivers/net/wireless/marvell/mwifiex/11h.c struct delayed_work *delayed_work = to_delayed_work(work); work 1368 drivers/net/wireless/marvell/mwifiex/main.c static void mwifiex_rx_work_queue(struct work_struct *work) work 1371 drivers/net/wireless/marvell/mwifiex/main.c container_of(work, struct mwifiex_adapter, rx_work); work 1384 drivers/net/wireless/marvell/mwifiex/main.c static void mwifiex_main_work_queue(struct work_struct *work) work 1387 drivers/net/wireless/marvell/mwifiex/main.c container_of(work, struct mwifiex_adapter, main_work); work 862 drivers/net/wireless/marvell/mwifiex/main.h void (*iface_work)(struct work_struct *work); work 1664 drivers/net/wireless/marvell/mwifiex/main.h void mwifiex_dfs_cac_work_queue(struct work_struct *work); work 1665 drivers/net/wireless/marvell/mwifiex/main.h void mwifiex_dfs_chan_sw_work_queue(struct work_struct *work); work 52 drivers/net/wireless/marvell/mwifiex/pcie.c static void mwifiex_pcie_work(struct work_struct *work); work 254 drivers/net/wireless/marvell/mwifiex/pcie.c INIT_WORK(&card->work, mwifiex_pcie_work); work 329 drivers/net/wireless/marvell/mwifiex/pcie.c schedule_work(&card->work); work 2811 drivers/net/wireless/marvell/mwifiex/pcie.c static void mwifiex_pcie_work(struct work_struct *work) work 2814 drivers/net/wireless/marvell/mwifiex/pcie.c container_of(work, struct pcie_service_card, work); work 2831 drivers/net/wireless/marvell/mwifiex/pcie.c schedule_work(&card->work); work 2839 drivers/net/wireless/marvell/mwifiex/pcie.c schedule_work(&card->work); work 2998 drivers/net/wireless/marvell/mwifiex/pcie.c cancel_work_sync(&card->work); work 392 drivers/net/wireless/marvell/mwifiex/pcie.h struct work_struct work; work 34 drivers/net/wireless/marvell/mwifiex/sdio.c static void mwifiex_sdio_work(struct work_struct *work); work 124 drivers/net/wireless/marvell/mwifiex/sdio.c INIT_WORK(&card->work, mwifiex_sdio_work); work 480 drivers/net/wireless/marvell/mwifiex/sdio.c schedule_work(&card->work); work 2184 drivers/net/wireless/marvell/mwifiex/sdio.c cancel_work_sync(&card->work); work 2557 drivers/net/wireless/marvell/mwifiex/sdio.c static void mwifiex_sdio_work(struct work_struct *work) work 2560 drivers/net/wireless/marvell/mwifiex/sdio.c container_of(work, struct sdio_mmc_card, work); work 2576 drivers/net/wireless/marvell/mwifiex/sdio.c schedule_work(&card->work); work 2586 drivers/net/wireless/marvell/mwifiex/sdio.c schedule_work(&card->work); work 273 drivers/net/wireless/marvell/mwifiex/sdio.h struct work_struct work; work 3813 drivers/net/wireless/marvell/mwl8k.c static void mwl8k_watchdog_ba_events(struct work_struct *work) work 3819 drivers/net/wireless/marvell/mwl8k.c container_of(work, struct mwl8k_priv, watchdog_ba_handle); work 4899 drivers/net/wireless/marvell/mwl8k.c static void mwl8k_hw_restart_work(struct work_struct *work) work 4902 drivers/net/wireless/marvell/mwl8k.c container_of(work, struct mwl8k_priv, fw_reload); work 5626 drivers/net/wireless/marvell/mwl8k.c static void mwl8k_finalize_join_worker(struct work_struct *work) work 5629 drivers/net/wireless/marvell/mwl8k.c container_of(work, struct mwl8k_priv, finalize_join_worker); work 84 drivers/net/wireless/mediatek/mt76/agg-rx.c mt76_rx_aggr_reorder_work(struct work_struct *work) work 86 drivers/net/wireless/mediatek/mt76/agg-rx.c struct mt76_rx_tid *tid = container_of(work, struct mt76_rx_tid, work 87 drivers/net/wireless/mediatek/mt76/agg-rx.c reorder_work.work); work 1677 drivers/net/wireless/mediatek/mt76/mt7603/mac.c void mt7603_mac_work(struct work_struct *work) work 1679 drivers/net/wireless/mediatek/mt76/mt7603/mac.c struct mt7603_dev *dev = container_of(work, struct mt7603_dev, work 1680 drivers/net/wireless/mediatek/mt76/mt7603/mac.c mt76.mac_work.work); work 19 drivers/net/wireless/mediatek/mt76/mt7603/main.c mt7603_mac_work(&dev->mt76.mac_work.work); work 197 drivers/net/wireless/mediatek/mt76/mt7603/main.c mt7603_mac_work(&dev->mt76.mac_work.work); work 197 drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h void mt7603_mac_work(struct work_struct *work); work 1263 drivers/net/wireless/mediatek/mt76/mt7615/mac.c void mt7615_mac_work(struct work_struct *work) work 1267 drivers/net/wireless/mediatek/mt76/mt7615/mac.c dev = (struct mt7615_dev *)container_of(work, struct mt76_dev, work 1268 drivers/net/wireless/mediatek/mt76/mt7615/mac.c mac_work.work); work 270 drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h void mt7615_mac_work(struct work_struct *work); work 1100 drivers/net/wireless/mediatek/mt76/mt76x0/phy.c static void mt76x0_phy_calibration_work(struct work_struct *work) work 1102 drivers/net/wireless/mediatek/mt76/mt76x0/phy.c struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev, work 1103 drivers/net/wireless/mediatek/mt76/mt76x0/phy.c cal_work.work); work 162 drivers/net/wireless/mediatek/mt76/mt76x02.h void mt76x02_wdt_work(struct work_struct *work); work 1089 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c void mt76x02_mac_work(struct work_struct *work) work 1091 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev, work 1092 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c mt76.mac_work.work); work 193 drivers/net/wireless/mediatek/mt76/mt76x02_mac.h void mt76x02_mac_work(struct work_struct *work); work 547 drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c void mt76x02_wdt_work(struct work_struct *work) work 549 drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev, work 550 drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c wdt_work.work); work 166 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c static void mt76x02u_pre_tbtt_work(struct work_struct *work) work 169 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c container_of(work, struct mt76x02_dev, pre_tbtt_work); work 52 drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h void mt76x2_phy_calibrate(struct work_struct *work); work 32 drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h void mt76x2u_phy_calibrate(struct work_struct *work); work 281 drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c void mt76x2_phy_calibrate(struct work_struct *work) work 285 drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c dev = container_of(work, struct mt76x02_dev, cal_work.work); work 42 drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c void mt76x2u_phy_calibrate(struct work_struct *work) work 46 drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c dev = container_of(work, struct mt76x02_dev, cal_work.work); work 710 drivers/net/wireless/mediatek/mt76/usb.c static void mt76u_tx_status_data(struct work_struct *work) work 717 drivers/net/wireless/mediatek/mt76/usb.c usb = container_of(work, struct mt76_usb, stat_work.work); work 301 drivers/net/wireless/mediatek/mt7601u/mac.c void mt7601u_mac_work(struct work_struct *work) work 303 drivers/net/wireless/mediatek/mt7601u/mac.c struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev, work 304 drivers/net/wireless/mediatek/mt7601u/mac.c mac_work.work); work 287 drivers/net/wireless/mediatek/mt7601u/main.c ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work, work 52 drivers/net/wireless/mediatek/mt7601u/mt7601u.h struct delayed_work work; work 358 drivers/net/wireless/mediatek/mt7601u/mt7601u.h void mt7601u_mac_work(struct work_struct *work); work 373 drivers/net/wireless/mediatek/mt7601u/mt7601u.h void mt7601u_tx_stat(struct work_struct *work); work 448 drivers/net/wireless/mediatek/mt7601u/phy.c cancel_delayed_work_sync(&dev->freq_cal.work); work 462 drivers/net/wireless/mediatek/mt7601u/phy.c ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work, work 999 drivers/net/wireless/mediatek/mt7601u/phy.c static void mt7601u_phy_calibrate(struct work_struct *work) work 1001 drivers/net/wireless/mediatek/mt7601u/phy.c struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev, work 1002 drivers/net/wireless/mediatek/mt7601u/phy.c cal_work.work); work 1073 drivers/net/wireless/mediatek/mt7601u/phy.c static void mt7601u_phy_freq_cal(struct work_struct *work) work 1075 drivers/net/wireless/mediatek/mt7601u/phy.c struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev, work 1076 drivers/net/wireless/mediatek/mt7601u/phy.c freq_cal.work.work); work 1087 drivers/net/wireless/mediatek/mt7601u/phy.c ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work, delay); work 1098 drivers/net/wireless/mediatek/mt7601u/phy.c cancel_delayed_work_sync(&dev->freq_cal.work); work 1112 drivers/net/wireless/mediatek/mt7601u/phy.c ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work, work 1249 drivers/net/wireless/mediatek/mt7601u/phy.c INIT_DELAYED_WORK(&dev->freq_cal.work, mt7601u_phy_freq_cal); work 228 drivers/net/wireless/mediatek/mt7601u/tx.c void mt7601u_tx_stat(struct work_struct *work) work 230 drivers/net/wireless/mediatek/mt7601u/tx.c struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev, work 231 drivers/net/wireless/mediatek/mt7601u/tx.c stat_work.work); work 314 drivers/net/wireless/quantenna/qtnfmac/core.c static void qtnf_vif_reset_handler(struct work_struct *work) work 316 drivers/net/wireless/quantenna/qtnfmac/core.c struct qtnf_vif *vif = container_of(work, struct qtnf_vif, reset_work); work 368 drivers/net/wireless/quantenna/qtnfmac/core.c static void qtnf_mac_scan_timeout(struct work_struct *work) work 371 drivers/net/wireless/quantenna/qtnfmac/core.c container_of(work, struct qtnf_wmac, scan_timeout.work); work 377 drivers/net/wireless/quantenna/qtnfmac/core.c static void qtnf_vif_send_data_high_pri(struct work_struct *work) work 380 drivers/net/wireless/quantenna/qtnfmac/core.c container_of(work, struct qtnf_vif, high_pri_tx_work); work 141 drivers/net/wireless/quantenna/qtnfmac/core.h void qtnf_main_work_queue(struct work_struct *work); work 721 drivers/net/wireless/quantenna/qtnfmac/event.c void qtnf_event_work_handler(struct work_struct *work) work 723 drivers/net/wireless/quantenna/qtnfmac/event.c struct qtnf_bus *bus = container_of(work, struct qtnf_bus, event_work); work 12 drivers/net/wireless/quantenna/qtnfmac/event.h void qtnf_event_work_handler(struct work_struct *work); work 979 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c static void qtnf_pearl_fw_work_handler(struct work_struct *work) work 981 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c struct qtnf_bus *bus = container_of(work, struct qtnf_bus, fw_work); work 1030 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c static void qtnf_topaz_fw_work_handler(struct work_struct *work) work 1032 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c struct qtnf_bus *bus = container_of(work, struct qtnf_bus, fw_work); work 46 drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c static void qtnf_shm_ipc_irq_work(struct work_struct *work) work 48 drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c struct qtnf_shm_ipc *ipc = container_of(work, struct qtnf_shm_ipc, work 786 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c static void rt2800mmio_work_txdone(struct work_struct *work) work 789 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c container_of(work, struct rt2x00_dev, txdone_work); work 456 drivers/net/wireless/ralink/rt2x00/rt2800usb.c static void rt2800usb_work_txdone(struct work_struct *work) work 459 drivers/net/wireless/ralink/rt2x00/rt2800usb.c container_of(work, struct rt2x00_dev, txdone_work); work 316 drivers/net/wireless/ralink/rt2x00/rt2x00.h struct delayed_work work; work 136 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c static void rt2x00lib_intf_scheduled(struct work_struct *work) work 139 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c container_of(work, struct rt2x00_dev, intf_work); work 151 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c static void rt2x00lib_autowakeup(struct work_struct *work) work 154 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c container_of(work, struct rt2x00_dev, autowakeup_work.work); work 580 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c static void rt2x00lib_sleep(struct work_struct *work) work 583 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c container_of(work, struct rt2x00_dev, sleep_work); work 244 drivers/net/wireless/ralink/rt2x00/rt2x00link.c &link->work, LINK_TUNE_INTERVAL); work 249 drivers/net/wireless/ralink/rt2x00/rt2x00link.c cancel_delayed_work_sync(&rt2x00dev->link.work); work 341 drivers/net/wireless/ralink/rt2x00/rt2x00link.c static void rt2x00link_tuner(struct work_struct *work) work 344 drivers/net/wireless/ralink/rt2x00/rt2x00link.c container_of(work, struct rt2x00_dev, link.work.work); work 379 drivers/net/wireless/ralink/rt2x00/rt2x00link.c &link->work, LINK_TUNE_INTERVAL); work 398 drivers/net/wireless/ralink/rt2x00/rt2x00link.c static void rt2x00link_watchdog(struct work_struct *work) work 401 drivers/net/wireless/ralink/rt2x00/rt2x00link.c container_of(work, struct rt2x00_dev, link.watchdog_work.work); work 423 drivers/net/wireless/ralink/rt2x00/rt2x00link.c INIT_DELAYED_WORK(&link->work, rt2x00link_tuner); work 245 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c static void rt2x00usb_work_txdone(struct work_struct *work) work 248 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c container_of(work, struct rt2x00_dev, txdone_work); work 341 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c static void rt2x00usb_work_rxdone(struct work_struct *work) work 344 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c container_of(work, struct rt2x00_dev, rxdone_work); work 1280 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c static void rtl8180_beacon_work(struct work_struct *work) work 1283 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c container_of(work, struct rtl8180_vif, beacon_work.work); work 1558 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c schedule_work(&vif_priv->beacon_work.work); work 223 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c ieee80211_queue_delayed_work(hw, &priv->work, 0); work 883 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c static void rtl8187_work(struct work_struct *work) work 892 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c struct rtl8187_priv *priv = container_of(work, struct rtl8187_priv, work 893 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c work.work); work 1015 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c INIT_DELAYED_WORK(&priv->work, rtl8187_work); work 1051 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c cancel_delayed_work_sync(&priv->work); work 1063 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c static void rtl8187_beacon_work(struct work_struct *work) work 1066 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c container_of(work, struct rtl8187_vif, beacon_work.work); work 1298 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c schedule_work(&vif_priv->beacon_work.work); work 22 drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c static void led_turn_on(struct work_struct *work) work 28 drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c struct rtl8187_priv *priv = container_of(work, struct rtl8187_priv, work 29 drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c led_on.work); work 60 drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c static void led_turn_off(struct work_struct *work) work 66 drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c struct rtl8187_priv *priv = container_of(work, struct rtl8187_priv, work 67 drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c led_off.work); work 117 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8187.h struct delayed_work work; work 5111 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c static void rtl8xxxu_rx_urb_work(struct work_struct *work) work 5120 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c priv = container_of(work, struct rtl8xxxu_priv, rx_urb_wq); work 646 drivers/net/wireless/realtek/rtlwifi/ps.c void rtl_lps_change_work_callback(struct work_struct *work) work 649 drivers/net/wireless/realtek/rtlwifi/ps.c container_of(work, struct rtl_works, lps_change_work); work 26 drivers/net/wireless/realtek/rtlwifi/ps.h void rtl_lps_change_work_callback(struct work_struct *work); work 987 drivers/net/wireless/realtek/rtlwifi/usb.c static void rtl_fill_h2c_cmd_work_callback(struct work_struct *work) work 990 drivers/net/wireless/realtek/rtlwifi/usb.c container_of(work, struct rtl_works, fill_h2c_cmd); work 2473 drivers/net/wireless/realtek/rtw88/coex.c void rtw_coex_bt_relink_work(struct work_struct *work) work 2475 drivers/net/wireless/realtek/rtw88/coex.c struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, work 2476 drivers/net/wireless/realtek/rtw88/coex.c coex.bt_relink_work.work); work 2485 drivers/net/wireless/realtek/rtw88/coex.c void rtw_coex_bt_reenable_work(struct work_struct *work) work 2487 drivers/net/wireless/realtek/rtw88/coex.c struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, work 2488 drivers/net/wireless/realtek/rtw88/coex.c coex.bt_reenable_work.work); work 2496 drivers/net/wireless/realtek/rtw88/coex.c void rtw_coex_defreeze_work(struct work_struct *work) work 2498 drivers/net/wireless/realtek/rtw88/coex.c struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, work 2499 drivers/net/wireless/realtek/rtw88/coex.c coex.defreeze_work.work); work 354 drivers/net/wireless/realtek/rtw88/coex.h void rtw_coex_bt_relink_work(struct work_struct *work); work 355 drivers/net/wireless/realtek/rtw88/coex.h void rtw_coex_bt_reenable_work(struct work_struct *work); work 356 drivers/net/wireless/realtek/rtw88/coex.h void rtw_coex_defreeze_work(struct work_struct *work); work 148 drivers/net/wireless/realtek/rtw88/main.c static void rtw_watch_dog_work(struct work_struct *work) work 150 drivers/net/wireless/realtek/rtw88/main.c struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, work 151 drivers/net/wireless/realtek/rtw88/main.c watch_dog_work.work); work 194 drivers/net/wireless/realtek/rtw88/main.c static void rtw_c2h_work(struct work_struct *work) work 196 drivers/net/wireless/realtek/rtw88/main.c struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, c2h_work); work 94 drivers/net/wireless/realtek/rtw88/ps.c void rtw_lps_work(struct work_struct *work) work 96 drivers/net/wireless/realtek/rtw88/ps.c struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, work 97 drivers/net/wireless/realtek/rtw88/ps.c lps_work.work); work 13 drivers/net/wireless/realtek/rtw88/ps.h void rtw_lps_work(struct work_struct *work); work 422 drivers/net/wireless/rndis_wlan.c struct work_struct work; work 2118 drivers/net/wireless/rndis_wlan.c static void rndis_get_scan_results(struct work_struct *work) work 2121 drivers/net/wireless/rndis_wlan.c container_of(work, struct rndis_wlan_private, scan_work.work); work 2741 drivers/net/wireless/rndis_wlan.c queue_work(priv->workqueue, &priv->work); work 2865 drivers/net/wireless/rndis_wlan.c static void rndis_wlan_worker(struct work_struct *work) work 2868 drivers/net/wireless/rndis_wlan.c container_of(work, struct rndis_wlan_private, work); work 2890 drivers/net/wireless/rndis_wlan.c queue_work(priv->workqueue, &priv->work); work 3084 drivers/net/wireless/rndis_wlan.c queue_work(priv->workqueue, &priv->work); work 3092 drivers/net/wireless/rndis_wlan.c queue_work(priv->workqueue, &priv->work); work 3189 drivers/net/wireless/rndis_wlan.c static void rndis_device_poller(struct work_struct *work) work 3192 drivers/net/wireless/rndis_wlan.c container_of(work, struct rndis_wlan_private, work 3193 drivers/net/wireless/rndis_wlan.c dev_poller_work.work); work 3430 drivers/net/wireless/rndis_wlan.c INIT_WORK(&priv->work, rndis_wlan_worker); work 3514 drivers/net/wireless/rndis_wlan.c cancel_work_sync(&priv->work); work 3531 drivers/net/wireless/rndis_wlan.c cancel_work_sync(&priv->work); work 3575 drivers/net/wireless/rndis_wlan.c cancel_work_sync(&priv->work); work 48 drivers/net/wireless/st/cw1200/bh.c static void cw1200_bh_work(struct work_struct *work) work 51 drivers/net/wireless/st/cw1200/bh.c container_of(work, struct cw1200_common, bh_work); work 351 drivers/net/wireless/st/cw1200/main.c INIT_WORK(&priv->scan.work, cw1200_scan_work); work 122 drivers/net/wireless/st/cw1200/pm.c static long cw1200_suspend_work(struct delayed_work *work) work 124 drivers/net/wireless/st/cw1200/pm.c int ret = cancel_delayed_work(work); work 128 drivers/net/wireless/st/cw1200/pm.c tmo = work->timer.expires - jiffies; work 138 drivers/net/wireless/st/cw1200/pm.c struct delayed_work *work, work 144 drivers/net/wireless/st/cw1200/pm.c return queue_delayed_work(priv->workqueue, work, tmo); work 126 drivers/net/wireless/st/cw1200/scan.c queue_work(priv->workqueue, &priv->scan.work); work 130 drivers/net/wireless/st/cw1200/scan.c void cw1200_scan_work(struct work_struct *work) work 132 drivers/net/wireless/st/cw1200/scan.c struct cw1200_common *priv = container_of(work, struct cw1200_common, work 133 drivers/net/wireless/st/cw1200/scan.c scan.work); work 149 drivers/net/wireless/st/cw1200/scan.c cw1200_join_timeout(&priv->join_timeout.work); work 267 drivers/net/wireless/st/cw1200/scan.c queue_work(priv->workqueue, &priv->scan.work); work 300 drivers/net/wireless/st/cw1200/scan.c cw1200_scan_work(&priv->scan.work); work 330 drivers/net/wireless/st/cw1200/scan.c void cw1200_clear_recent_scan_work(struct work_struct *work) work 333 drivers/net/wireless/st/cw1200/scan.c container_of(work, struct cw1200_common, work 334 drivers/net/wireless/st/cw1200/scan.c clear_recent_scan_work.work); work 338 drivers/net/wireless/st/cw1200/scan.c void cw1200_scan_timeout(struct work_struct *work) work 341 drivers/net/wireless/st/cw1200/scan.c container_of(work, struct cw1200_common, scan.timeout.work); work 356 drivers/net/wireless/st/cw1200/scan.c void cw1200_probe_work(struct work_struct *work) work 359 drivers/net/wireless/st/cw1200/scan.c container_of(work, struct cw1200_common, scan.probe_work.work); work 23 drivers/net/wireless/st/cw1200/scan.h struct work_struct work; work 42 drivers/net/wireless/st/cw1200/scan.h void cw1200_scan_work(struct work_struct *work); work 43 drivers/net/wireless/st/cw1200/scan.h void cw1200_scan_timeout(struct work_struct *work); work 44 drivers/net/wireless/st/cw1200/scan.h void cw1200_clear_recent_scan_work(struct work_struct *work); work 51 drivers/net/wireless/st/cw1200/scan.h void cw1200_probe_work(struct work_struct *work); work 511 drivers/net/wireless/st/cw1200/sta.c void cw1200_update_filtering_work(struct work_struct *work) work 514 drivers/net/wireless/st/cw1200/sta.c container_of(work, struct cw1200_common, work 520 drivers/net/wireless/st/cw1200/sta.c void cw1200_set_beacon_wakeup_period_work(struct work_struct *work) work 523 drivers/net/wireless/st/cw1200/sta.c container_of(work, struct cw1200_common, work 841 drivers/net/wireless/st/cw1200/sta.c void cw1200_wep_key_work(struct work_struct *work) work 844 drivers/net/wireless/st/cw1200/sta.c container_of(work, struct cw1200_common, wep_key_work); work 963 drivers/net/wireless/st/cw1200/sta.c void cw1200_event_handler(struct work_struct *work) work 966 drivers/net/wireless/st/cw1200/sta.c container_of(work, struct cw1200_common, event_handler); work 1034 drivers/net/wireless/st/cw1200/sta.c void cw1200_bss_loss_work(struct work_struct *work) work 1037 drivers/net/wireless/st/cw1200/sta.c container_of(work, struct cw1200_common, bss_loss_work.work); work 1045 drivers/net/wireless/st/cw1200/sta.c void cw1200_bss_params_work(struct work_struct *work) work 1048 drivers/net/wireless/st/cw1200/sta.c container_of(work, struct cw1200_common, bss_params_work); work 1182 drivers/net/wireless/st/cw1200/sta.c void cw1200_join_complete_work(struct work_struct *work) work 1185 drivers/net/wireless/st/cw1200/sta.c container_of(work, struct cw1200_common, join_complete_work); work 1366 drivers/net/wireless/st/cw1200/sta.c void cw1200_join_timeout(struct work_struct *work) work 1369 drivers/net/wireless/st/cw1200/sta.c container_of(work, struct cw1200_common, join_timeout.work); work 1437 drivers/net/wireless/st/cw1200/sta.c void cw1200_unjoin_work(struct work_struct *work) work 1440 drivers/net/wireless/st/cw1200/sta.c container_of(work, struct cw1200_common, unjoin_work); work 1703 drivers/net/wireless/st/cw1200/sta.c void cw1200_set_tim_work(struct work_struct *work) work 1706 drivers/net/wireless/st/cw1200/sta.c container_of(work, struct cw1200_common, set_tim_work); work 1718 drivers/net/wireless/st/cw1200/sta.c void cw1200_set_cts_work(struct work_struct *work) work 1721 drivers/net/wireless/st/cw1200/sta.c container_of(work, struct cw1200_common, set_cts_work); work 2080 drivers/net/wireless/st/cw1200/sta.c void cw1200_multicast_start_work(struct work_struct *work) work 2083 drivers/net/wireless/st/cw1200/sta.c container_of(work, struct cw1200_common, multicast_start_work); work 2098 drivers/net/wireless/st/cw1200/sta.c void cw1200_multicast_stop_work(struct work_struct *work) work 2101 drivers/net/wireless/st/cw1200/sta.c container_of(work, struct cw1200_common, multicast_stop_work); work 58 drivers/net/wireless/st/cw1200/sta.h void cw1200_event_handler(struct work_struct *work); work 59 drivers/net/wireless/st/cw1200/sta.h void cw1200_bss_loss_work(struct work_struct *work); work 60 drivers/net/wireless/st/cw1200/sta.h void cw1200_bss_params_work(struct work_struct *work); work 61 drivers/net/wireless/st/cw1200/sta.h void cw1200_keep_alive_work(struct work_struct *work); work 62 drivers/net/wireless/st/cw1200/sta.h void cw1200_tx_failure_work(struct work_struct *work); work 78 drivers/net/wireless/st/cw1200/sta.h void cw1200_join_timeout(struct work_struct *work); work 79 drivers/net/wireless/st/cw1200/sta.h void cw1200_unjoin_work(struct work_struct *work); work 80 drivers/net/wireless/st/cw1200/sta.h void cw1200_join_complete_work(struct work_struct *work); work 81 drivers/net/wireless/st/cw1200/sta.h void cw1200_wep_key_work(struct work_struct *work); work 84 drivers/net/wireless/st/cw1200/sta.h void cw1200_update_filtering_work(struct work_struct *work); work 85 drivers/net/wireless/st/cw1200/sta.h void cw1200_set_beacon_wakeup_period_work(struct work_struct *work); work 90 drivers/net/wireless/st/cw1200/sta.h void cw1200_ba_work(struct work_struct *work); work 113 drivers/net/wireless/st/cw1200/sta.h void cw1200_set_tim_work(struct work_struct *work); work 114 drivers/net/wireless/st/cw1200/sta.h void cw1200_set_cts_work(struct work_struct *work); work 115 drivers/net/wireless/st/cw1200/sta.h void cw1200_multicast_start_work(struct work_struct *work); work 116 drivers/net/wireless/st/cw1200/sta.h void cw1200_multicast_stop_work(struct work_struct *work); work 385 drivers/net/wireless/st/cw1200/txrx.c void tx_policy_upload_work(struct work_struct *work) work 388 drivers/net/wireless/st/cw1200/txrx.c container_of(work, struct cw1200_common, tx_policy_upload_work); work 1268 drivers/net/wireless/st/cw1200/txrx.c void cw1200_link_id_reset(struct work_struct *work) work 1271 drivers/net/wireless/st/cw1200/txrx.c container_of(work, struct cw1200_common, linkid_reset_work); work 1364 drivers/net/wireless/st/cw1200/txrx.c void cw1200_link_id_work(struct work_struct *work) work 1367 drivers/net/wireless/st/cw1200/txrx.c container_of(work, struct cw1200_common, link_id_work); work 1369 drivers/net/wireless/st/cw1200/txrx.c cw1200_link_id_gc_work(&priv->link_id_gc_work.work); work 1373 drivers/net/wireless/st/cw1200/txrx.c void cw1200_link_id_gc_work(struct work_struct *work) work 1376 drivers/net/wireless/st/cw1200/txrx.c container_of(work, struct cw1200_common, link_id_gc_work.work); work 53 drivers/net/wireless/st/cw1200/txrx.h void tx_policy_upload_work(struct work_struct *work); work 82 drivers/net/wireless/st/cw1200/txrx.h void cw1200_tx_timeout(struct work_struct *work); work 93 drivers/net/wireless/st/cw1200/txrx.h void cw1200_link_id_reset(struct work_struct *work); work 99 drivers/net/wireless/st/cw1200/txrx.h void cw1200_link_id_work(struct work_struct *work); work 100 drivers/net/wireless/st/cw1200/txrx.h void cw1200_link_id_gc_work(struct work_struct *work); work 197 drivers/net/wireless/ti/wl1251/main.c static void wl1251_irq_work(struct work_struct *work) work 201 drivers/net/wireless/ti/wl1251/main.c container_of(work, struct wl1251, irq_work); work 16 drivers/net/wireless/ti/wl1251/ps.c void wl1251_elp_work(struct work_struct *work) work 21 drivers/net/wireless/ti/wl1251/ps.c dwork = to_delayed_work(work); work 18 drivers/net/wireless/ti/wl1251/ps.h void wl1251_elp_work(struct work_struct *work); work 330 drivers/net/wireless/ti/wl1251/tx.c void wl1251_tx_work(struct work_struct *work) work 332 drivers/net/wireless/ti/wl1251/tx.c struct wl1251 *wl = container_of(work, struct wl1251, tx_work); work 213 drivers/net/wireless/ti/wl1251/tx.h void wl1251_tx_work(struct work_struct *work); work 127 drivers/net/wireless/ti/wlcore/main.c static void wl1271_rx_streaming_enable_work(struct work_struct *work) work 130 drivers/net/wireless/ti/wlcore/main.c struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif, work 166 drivers/net/wireless/ti/wlcore/main.c static void wl1271_rx_streaming_disable_work(struct work_struct *work) work 169 drivers/net/wireless/ti/wlcore/main.c struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif, work 214 drivers/net/wireless/ti/wlcore/main.c static void wlcore_rc_update_work(struct work_struct *work) work 217 drivers/net/wireless/ti/wlcore/main.c struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif, work 249 drivers/net/wireless/ti/wlcore/main.c static void wl12xx_tx_watchdog_work(struct work_struct *work) work 254 drivers/net/wireless/ti/wlcore/main.c dwork = to_delayed_work(work); work 507 drivers/net/wireless/ti/wlcore/main.c static void wl1271_netstack_work(struct work_struct *work) work 510 drivers/net/wireless/ti/wlcore/main.c container_of(work, struct wl1271, netstack_work); work 918 drivers/net/wireless/ti/wlcore/main.c static void wl1271_recovery_work(struct work_struct *work) work 921 drivers/net/wireless/ti/wlcore/main.c container_of(work, struct wl1271, recovery_work); work 2031 drivers/net/wireless/ti/wlcore/main.c static void wlcore_channel_switch_work(struct work_struct *work) work 2039 drivers/net/wireless/ti/wlcore/main.c dwork = to_delayed_work(work); work 2071 drivers/net/wireless/ti/wlcore/main.c static void wlcore_connection_loss_work(struct work_struct *work) work 2078 drivers/net/wireless/ti/wlcore/main.c dwork = to_delayed_work(work); work 2099 drivers/net/wireless/ti/wlcore/main.c static void wlcore_pending_auth_complete_work(struct work_struct *work) work 2107 drivers/net/wireless/ti/wlcore/main.c dwork = to_delayed_work(work); work 5738 drivers/net/wireless/ti/wlcore/main.c static void wlcore_roc_complete_work(struct work_struct *work) work 5744 drivers/net/wireless/ti/wlcore/main.c dwork = to_delayed_work(work); work 20 drivers/net/wireless/ti/wlcore/scan.c void wl1271_scan_complete_work(struct work_struct *work) work 30 drivers/net/wireless/ti/wlcore/scan.c dwork = to_delayed_work(work); work 22 drivers/net/wireless/ti/wlcore/scan.h void wl1271_scan_complete_work(struct work_struct *work); work 852 drivers/net/wireless/ti/wlcore/tx.c void wl1271_tx_work(struct work_struct *work) work 854 drivers/net/wireless/ti/wlcore/tx.c struct wl1271 *wl = container_of(work, struct wl1271, tx_work); work 229 drivers/net/wireless/ti/wlcore/tx.h void wl1271_tx_work(struct work_struct *work); work 156 drivers/net/wireless/virt_wifi.c static void virt_wifi_scan_result(struct work_struct *work) work 167 drivers/net/wireless/virt_wifi.c container_of(work, struct virt_wifi_wiphy_priv, work 168 drivers/net/wireless/virt_wifi.c scan_result.work); work 238 drivers/net/wireless/virt_wifi.c static void virt_wifi_connect_complete(struct work_struct *work) work 241 drivers/net/wireless/virt_wifi.c container_of(work, struct virt_wifi_netdev_priv, connect.work); work 1180 drivers/net/wireless/zydas/zd1211rw/zd_mac.c static void zd_process_intr(struct work_struct *work) work 1184 drivers/net/wireless/zydas/zd1211rw/zd_mac.c struct zd_mac *mac = container_of(work, struct zd_mac, process_intr); work 1423 drivers/net/wireless/zydas/zd1211rw/zd_mac.c static void beacon_watchdog_handler(struct work_struct *work) work 1426 drivers/net/wireless/zydas/zd1211rw/zd_mac.c container_of(work, struct zd_mac, beacon.watchdog_work.work); work 1496 drivers/net/wireless/zydas/zd1211rw/zd_mac.c static void link_led_handler(struct work_struct *work) work 1499 drivers/net/wireless/zydas/zd1211rw/zd_mac.c container_of(work, struct zd_mac, housekeeping.link_led_work.work); work 1084 drivers/net/wireless/zydas/zd1211rw/zd_usb.c static void zd_tx_watchdog_handler(struct work_struct *work) work 1087 drivers/net/wireless/zydas/zd1211rw/zd_usb.c container_of(work, struct zd_usb, tx.watchdog_work.work); work 1130 drivers/net/wireless/zydas/zd1211rw/zd_usb.c static void zd_rx_idle_timer_handler(struct work_struct *work) work 1133 drivers/net/wireless/zydas/zd1211rw/zd_usb.c container_of(work, struct zd_usb, rx.idle_work.work); work 401 drivers/nfc/nfcmrvl/fw_dnld.c static void fw_dnld_rx_work(struct work_struct *work) work 405 drivers/nfc/nfcmrvl/fw_dnld.c struct nfcmrvl_fw_dnld *fw_dnld = container_of(work, work 283 drivers/nfc/nfcmrvl/usb.c static void nfcmrvl_waker(struct work_struct *work) work 286 drivers/nfc/nfcmrvl/usb.c container_of(work, struct nfcmrvl_usb_drv_data, waker); work 170 drivers/nfc/nfcsim.c static void nfcsim_send_wq(struct work_struct *work) work 172 drivers/nfc/nfcsim.c struct nfcsim *dev = container_of(work, struct nfcsim, send_work.work); work 182 drivers/nfc/nfcsim.c static void nfcsim_recv_wq(struct work_struct *work) work 184 drivers/nfc/nfcsim.c struct nfcsim *dev = container_of(work, struct nfcsim, recv_work); work 116 drivers/nfc/nxp-nci/core.c INIT_WORK(&info->fw_info.work, nxp_nci_fw_work); work 158 drivers/nfc/nxp-nci/core.c cancel_work_sync(&info->fw_info.work); work 158 drivers/nfc/nxp-nci/firmware.c schedule_work(&fw_info->work); work 169 drivers/nfc/nxp-nci/firmware.c void nxp_nci_fw_work(struct work_struct *work) work 175 drivers/nfc/nxp-nci/firmware.c fw_info = container_of(work, struct nxp_nci_fw_info, work); work 238 drivers/nfc/nxp-nci/firmware.c schedule_work(&fw_info->work); work 310 drivers/nfc/nxp-nci/firmware.c schedule_work(&fw_info->work); work 46 drivers/nfc/nxp-nci/nxp-nci.h struct work_struct work; work 68 drivers/nfc/nxp-nci/nxp-nci.h void nxp_nci_fw_work(struct work_struct *work); work 524 drivers/nfc/pn533/pn533.c static void pn533_wq_cmd_complete(struct work_struct *work) work 526 drivers/nfc/pn533/pn533.c struct pn533 *dev = container_of(work, struct pn533, cmd_complete_work); work 534 drivers/nfc/pn533/pn533.c static void pn533_wq_cmd(struct work_struct *work) work 536 drivers/nfc/pn533/pn533.c struct pn533 *dev = container_of(work, struct pn533, cmd_work); work 1047 drivers/nfc/pn533/pn533.c static void pn533_wq_tm_mi_recv(struct work_struct *work); work 1100 drivers/nfc/pn533/pn533.c static void pn533_wq_tm_mi_recv(struct work_struct *work) work 1102 drivers/nfc/pn533/pn533.c struct pn533 *dev = container_of(work, struct pn533, mi_tm_rx_work); work 1124 drivers/nfc/pn533/pn533.c static void pn533_wq_tm_mi_send(struct work_struct *work) work 1126 drivers/nfc/pn533/pn533.c struct pn533 *dev = container_of(work, struct pn533, mi_tm_tx_work); work 1162 drivers/nfc/pn533/pn533.c static void pn533_wq_tg_get_data(struct work_struct *work) work 1164 drivers/nfc/pn533/pn533.c struct pn533 *dev = container_of(work, struct pn533, tg_work); work 1259 drivers/nfc/pn533/pn533.c static void pn533_wq_rf(struct work_struct *work) work 1261 drivers/nfc/pn533/pn533.c struct pn533 *dev = container_of(work, struct pn533, rf_work); work 1505 drivers/nfc/pn533/pn533.c static void pn533_wq_poll(struct work_struct *work) work 1507 drivers/nfc/pn533/pn533.c struct pn533 *dev = container_of(work, struct pn533, poll_work.work); work 2242 drivers/nfc/pn533/pn533.c static void pn533_wq_mi_recv(struct work_struct *work) work 2244 drivers/nfc/pn533/pn533.c struct pn533 *dev = container_of(work, struct pn533, mi_rx_work); work 2292 drivers/nfc/pn533/pn533.c static void pn533_wq_mi_send(struct work_struct *work) work 2294 drivers/nfc/pn533/pn533.c struct pn533 *dev = container_of(work, struct pn533, mi_tx_work); work 737 drivers/nfc/pn544/i2c.c static void pn544_hci_i2c_fw_work(struct work_struct *work) work 739 drivers/nfc/pn544/i2c.c struct pn544_i2c_phy *phy = container_of(work, struct pn544_i2c_phy, work 891 drivers/nfc/port100.c static void port100_wq_cmd_complete(struct work_struct *work) work 893 drivers/nfc/port100.c struct port100 *dev = container_of(work, struct port100, work 196 drivers/nfc/st-nci/ndlc.c static void llt_ndlc_sm_work(struct work_struct *work) work 198 drivers/nfc/st-nci/ndlc.c struct llt_ndlc *ndlc = container_of(work, struct llt_ndlc, sm_work); work 110 drivers/nfc/st21nfca/dep.c static void st21nfca_tx_work(struct work_struct *work) work 112 drivers/nfc/st21nfca/dep.c struct st21nfca_hci_info *info = container_of(work, work 1005 drivers/nfc/trf7970a.c static void trf7970a_timeout_work_handler(struct work_struct *work) work 1007 drivers/nfc/trf7970a.c struct trf7970a *trf = container_of(work, struct trf7970a, work 1008 drivers/nfc/trf7970a.c timeout_work.work); work 867 drivers/ntb/hw/amd/ntb_hw_amd.c static void amd_link_hb(struct work_struct *work) work 869 drivers/ntb/hw/amd/ntb_hw_amd.c struct amd_ntb_dev *ndev = hb_ndev(work); work 219 drivers/ntb/hw/amd/ntb_hw_amd.h #define hb_ndev(__work) container_of(__work, struct amd_ntb_dev, hb_timer.work) work 189 drivers/ntb/hw/intel/ntb_hw_intel.h hb_timer.work) work 518 drivers/ntb/hw/mscc/ntb_hw_switchtec.c static void check_link_status_work(struct work_struct *work) work 522 drivers/ntb/hw/mscc/ntb_hw_switchtec.c sndev = container_of(work, struct switchtec_ntb, work 951 drivers/ntb/ntb_transport.c static void ntb_qp_link_cleanup_work(struct work_struct *work) work 953 drivers/ntb/ntb_transport.c struct ntb_transport_qp *qp = container_of(work, work 1002 drivers/ntb/ntb_transport.c static void ntb_transport_link_cleanup_work(struct work_struct *work) work 1005 drivers/ntb/ntb_transport.c container_of(work, struct ntb_transport_ctx, link_cleanup); work 1020 drivers/ntb/ntb_transport.c static void ntb_transport_link_work(struct work_struct *work) work 1023 drivers/ntb/ntb_transport.c container_of(work, struct ntb_transport_ctx, link_work.work); work 1124 drivers/ntb/ntb_transport.c static void ntb_qp_link_work(struct work_struct *work) work 1126 drivers/ntb/ntb_transport.c struct ntb_transport_qp *qp = container_of(work, work 1128 drivers/ntb/ntb_transport.c link_work.work); work 56 drivers/ntb/test/ntb_msi_test.c static void ntb_msit_setup_work(struct work_struct *work) work 58 drivers/ntb/test/ntb_msi_test.c struct ntb_msit_ctx *nm = container_of(work, struct ntb_msit_ctx, work 179 drivers/ntb/test/ntb_perf.c struct work_struct work; work 182 drivers/ntb/test/ntb_perf.c container_of(__work, struct perf_thread, work) work 622 drivers/ntb/test/ntb_perf.c static void perf_service_work(struct work_struct *work) work 624 drivers/ntb/test/ntb_perf.c struct perf_peer *peer = to_peer_service(work); work 990 drivers/ntb/test/ntb_perf.c static void perf_thread_work(struct work_struct *work) work 992 drivers/ntb/test/ntb_perf.c struct perf_thread *pthr = to_thread_work(work); work 1044 drivers/ntb/test/ntb_perf.c cancel_work_sync(&perf->threads[tidx].work); work 1070 drivers/ntb/test/ntb_perf.c (void)queue_work(perf_wq, &pthr->work); work 1136 drivers/ntb/test/ntb_perf.c INIT_WORK(&pthr->work, perf_thread_work); work 72 drivers/nvdimm/nd-core.h void nvdimm_security_overwrite_query(struct work_struct *work); work 79 drivers/nvdimm/nd-core.h static inline void nvdimm_security_overwrite_query(struct work_struct *work) work 463 drivers/nvdimm/security.c void nvdimm_security_overwrite_query(struct work_struct *work) work 466 drivers/nvdimm/security.c container_of(work, typeof(*nvdimm), dwork.work); work 178 drivers/nvme/host/core.c static void nvme_delete_ctrl_work(struct work_struct *work) work 181 drivers/nvme/host/core.c container_of(work, struct nvme_ctrl, delete_work); work 996 drivers/nvme/host/core.c static void nvme_keep_alive_work(struct work_struct *work) work 998 drivers/nvme/host/core.c struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), work 3722 drivers/nvme/host/core.c static void nvme_scan_work(struct work_struct *work) work 3725 drivers/nvme/host/core.c container_of(work, struct nvme_ctrl, scan_work); work 3838 drivers/nvme/host/core.c static void nvme_async_event_work(struct work_struct *work) work 3841 drivers/nvme/host/core.c container_of(work, struct nvme_ctrl, async_event_work); work 3875 drivers/nvme/host/core.c static void nvme_fw_act_work(struct work_struct *work) work 3877 drivers/nvme/host/core.c struct nvme_ctrl *ctrl = container_of(work, work 2935 drivers/nvme/host/fc.c nvme_fc_reset_ctrl_work(struct work_struct *work) work 2938 drivers/nvme/host/fc.c container_of(work, struct nvme_fc_ctrl, ctrl.reset_work); work 2959 drivers/nvme/host/fc.c nvme_fc_connect_err_work(struct work_struct *work) work 2962 drivers/nvme/host/fc.c container_of(work, struct nvme_fc_ctrl, err_work); work 2990 drivers/nvme/host/fc.c nvme_fc_connect_ctrl_work(struct work_struct *work) work 2995 drivers/nvme/host/fc.c container_of(to_delayed_work(work), work 339 drivers/nvme/host/multipath.c static void nvme_requeue_work(struct work_struct *work) work 342 drivers/nvme/host/multipath.c container_of(work, struct nvme_ns_head, requeue_work); work 566 drivers/nvme/host/multipath.c static void nvme_ana_work(struct work_struct *work) work 568 drivers/nvme/host/multipath.c struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work); work 2536 drivers/nvme/host/pci.c static void nvme_reset_work(struct work_struct *work) work 2539 drivers/nvme/host/pci.c container_of(work, struct nvme_dev, ctrl.reset_work); work 2666 drivers/nvme/host/pci.c static void nvme_remove_dead_ctrl_work(struct work_struct *work) work 2668 drivers/nvme/host/pci.c struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work); work 1045 drivers/nvme/host/rdma.c static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) work 1047 drivers/nvme/host/rdma.c struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work), work 1068 drivers/nvme/host/rdma.c static void nvme_rdma_error_recovery_work(struct work_struct *work) work 1070 drivers/nvme/host/rdma.c struct nvme_rdma_ctrl *ctrl = container_of(work, work 1907 drivers/nvme/host/rdma.c static void nvme_rdma_reset_ctrl_work(struct work_struct *work) work 1910 drivers/nvme/host/rdma.c container_of(work, struct nvme_rdma_ctrl, ctrl.reset_work); work 1880 drivers/nvme/host/tcp.c static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work) work 1882 drivers/nvme/host/tcp.c struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work), work 1904 drivers/nvme/host/tcp.c static void nvme_tcp_error_recovery_work(struct work_struct *work) work 1906 drivers/nvme/host/tcp.c struct nvme_tcp_ctrl *tcp_ctrl = container_of(work, work 1945 drivers/nvme/host/tcp.c static void nvme_reset_ctrl_work(struct work_struct *work) work 1948 drivers/nvme/host/tcp.c container_of(work, struct nvme_ctrl, reset_work); work 149 drivers/nvme/target/core.c static void nvmet_async_event_work(struct work_struct *work) work 152 drivers/nvme/target/core.c container_of(work, struct nvmet_ctrl, async_event_work); work 350 drivers/nvme/target/core.c static void nvmet_keep_alive_timer(struct work_struct *work) work 352 drivers/nvme/target/core.c struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work), work 1197 drivers/nvme/target/core.c static void nvmet_fatal_error_handler(struct work_struct *work) work 1200 drivers/nvme/target/core.c container_of(work, struct nvmet_ctrl, fatal_err_work); work 46 drivers/nvme/target/fc.c struct work_struct work; work 219 drivers/nvme/target/fc.c static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work); work 220 drivers/nvme/target/fc.c static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work); work 338 drivers/nvme/target/fc.c INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work); work 505 drivers/nvme/target/fc.c nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work) work 508 drivers/nvme/target/fc.c container_of(work, struct nvmet_fc_fcp_iod, defer_work); work 781 drivers/nvme/target/fc.c nvmet_fc_delete_assoc(struct work_struct *work) work 784 drivers/nvme/target/fc.c container_of(work, struct nvmet_fc_tgt_assoc, del_work); work 1652 drivers/nvme/target/fc.c nvmet_fc_handle_ls_rqst_work(struct work_struct *work) work 1655 drivers/nvme/target/fc.c container_of(work, struct nvmet_fc_ls_iod, work); work 1704 drivers/nvme/target/fc.c schedule_work(&iod->work); work 229 drivers/nvme/target/fcloop.c struct work_struct work; work 236 drivers/nvme/target/fcloop.c struct work_struct work; work 301 drivers/nvme/target/fcloop.c fcloop_tgt_lsrqst_done_work(struct work_struct *work) work 304 drivers/nvme/target/fcloop.c container_of(work, struct fcloop_lsreq, work); work 322 drivers/nvme/target/fcloop.c INIT_WORK(&tls_req->work, fcloop_tgt_lsrqst_done_work); work 327 drivers/nvme/target/fcloop.c schedule_work(&tls_req->work); work 351 drivers/nvme/target/fcloop.c schedule_work(&tls_req->work); work 361 drivers/nvme/target/fcloop.c fcloop_tgt_rscn_work(struct work_struct *work) work 364 drivers/nvme/target/fcloop.c container_of(work, struct fcloop_rscn, work); work 382 drivers/nvme/target/fcloop.c INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work); work 384 drivers/nvme/target/fcloop.c schedule_work(&tgt_rscn->work); work 429 drivers/nvme/target/fcloop.c fcloop_fcp_recv_work(struct work_struct *work) work 432 drivers/nvme/target/fcloop.c container_of(work, struct fcloop_fcpreq, fcp_rcv_work); work 465 drivers/nvme/target/fcloop.c fcloop_fcp_abort_recv_work(struct work_struct *work) work 468 drivers/nvme/target/fcloop.c container_of(work, struct fcloop_fcpreq, abort_rcv_work); work 510 drivers/nvme/target/fcloop.c fcloop_tgt_fcprqst_done_work(struct work_struct *work) work 513 drivers/nvme/target/fcloop.c container_of(work, struct fcloop_fcpreq, tio_done_work); work 220 drivers/nvme/target/io-cmd-file.c struct nvmet_req *req = container_of(w, struct nvmet_req, f.work); work 227 drivers/nvme/target/io-cmd-file.c INIT_WORK(&req->f.work, nvmet_file_buffered_io_work); work 228 drivers/nvme/target/io-cmd-file.c queue_work(buffered_io_wq, &req->f.work); work 269 drivers/nvme/target/io-cmd-file.c struct nvmet_req *req = container_of(w, struct nvmet_req, f.work); work 276 drivers/nvme/target/io-cmd-file.c INIT_WORK(&req->f.work, nvmet_file_flush_work); work 277 drivers/nvme/target/io-cmd-file.c schedule_work(&req->f.work); work 317 drivers/nvme/target/io-cmd-file.c struct nvmet_req *req = container_of(w, struct nvmet_req, f.work); work 334 drivers/nvme/target/io-cmd-file.c INIT_WORK(&req->f.work, nvmet_file_dsm_work); work 335 drivers/nvme/target/io-cmd-file.c schedule_work(&req->f.work); work 340 drivers/nvme/target/io-cmd-file.c struct nvmet_req *req = container_of(w, struct nvmet_req, f.work); work 362 drivers/nvme/target/io-cmd-file.c INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work); work 363 drivers/nvme/target/io-cmd-file.c schedule_work(&req->f.work); work 24 drivers/nvme/target/loop.c struct work_struct work; work 124 drivers/nvme/target/loop.c static void nvme_loop_execute_work(struct work_struct *work) work 127 drivers/nvme/target/loop.c container_of(work, struct nvme_loop_iod, work); work 170 drivers/nvme/target/loop.c schedule_work(&iod->work); work 191 drivers/nvme/target/loop.c schedule_work(&iod->work); work 200 drivers/nvme/target/loop.c INIT_WORK(&iod->work, nvme_loop_execute_work); work 444 drivers/nvme/target/loop.c static void nvme_loop_reset_ctrl_work(struct work_struct *work) work 447 drivers/nvme/target/loop.c container_of(work, struct nvme_loop_ctrl, ctrl.reset_work); work 303 drivers/nvme/target/nvmet.h struct work_struct work; work 38 drivers/oprofile/cpu_buffer.c static void wq_sync_buffer(struct work_struct *work); work 88 drivers/oprofile/cpu_buffer.c INIT_DELAYED_WORK(&b->work, wq_sync_buffer); work 110 drivers/oprofile/cpu_buffer.c schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i); work 127 drivers/oprofile/cpu_buffer.c flush_delayed_work(&b->work); work 452 drivers/oprofile/cpu_buffer.c static void wq_sync_buffer(struct work_struct *work) work 455 drivers/oprofile/cpu_buffer.c container_of(work, struct oprofile_cpu_buffer, work.work); work 457 drivers/oprofile/cpu_buffer.c cancel_delayed_work(&b->work); work 464 drivers/oprofile/cpu_buffer.c schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE); work 51 drivers/oprofile/cpu_buffer.h struct delayed_work work; work 94 drivers/oprofile/oprof.c static void switch_worker(struct work_struct *work); work 108 drivers/oprofile/oprof.c static void switch_worker(struct work_struct *work) work 1972 drivers/pci/controller/pci-hyperv.c static void pci_devices_present_work(struct work_struct *work) work 1984 drivers/pci/controller/pci-hyperv.c dr_wrk = container_of(work, struct hv_dr_work, wrk); work 2158 drivers/pci/controller/pci-hyperv.c static void hv_eject_device_work(struct work_struct *work) work 2171 drivers/pci/controller/pci-hyperv.c hpdev = container_of(work, struct hv_pci_dev, wrk); work 274 drivers/pci/endpoint/functions/pci-epf-test.c static void pci_epf_test_cmd_handler(struct work_struct *work) work 279 drivers/pci/endpoint/functions/pci-epf-test.c struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test, work 280 drivers/pci/endpoint/functions/pci-epf-test.c cmd_handler.work); work 550 drivers/pci/endpoint/functions/pci-epf-test.c queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work); work 308 drivers/pci/hotplug/cpqphp_core.c u32 work; work 339 drivers/pci/hotplug/cpqphp_core.c PCI_CLASS_REVISION, &work); work 341 drivers/pci/hotplug/cpqphp_core.c if ((work >> 8) == PCI_TO_PCI_BRIDGE_CLASS) { work 344 drivers/pci/hotplug/cpqphp_core.c PCI_PRIMARY_BUS, &work); work 346 drivers/pci/hotplug/cpqphp_core.c if (((work >> 8) & 0x000000FF) == (long) bus_num) work 202 drivers/pci/hotplug/cpqphp_pci.c u32 work; work 209 drivers/pci/hotplug/cpqphp_pci.c if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1) work 213 drivers/pci/hotplug/cpqphp_pci.c if ((work >> 8) != PCI_TO_PCI_BRIDGE_CLASS) { work 221 drivers/pci/hotplug/cpqphp_pci.c if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1) work 225 drivers/pci/hotplug/cpqphp_pci.c if ((work >> 8) == PCI_TO_PCI_BRIDGE_CLASS) { work 240 drivers/pci/hotplug/cpqphp_pci.c u32 work; work 253 drivers/pci/hotplug/cpqphp_pci.c pci_bus_read_config_dword(ctrl->pci_bus, *dev_num, PCI_VENDOR_ID, &work); work 254 drivers/pci/hotplug/cpqphp_pci.c if (!nobridge || (work == 0xffffffff)) work 258 drivers/pci/hotplug/cpqphp_pci.c pci_bus_read_config_dword(ctrl->pci_bus, *dev_num, PCI_CLASS_REVISION, &work); work 259 drivers/pci/hotplug/cpqphp_pci.c dbg("work >> 8 (%x) = BRIDGE (%x)\n", work >> 8, PCI_TO_PCI_BRIDGE_CLASS); work 261 drivers/pci/hotplug/cpqphp_pci.c if ((work >> 8) == PCI_TO_PCI_BRIDGE_CLASS) { work 161 drivers/pci/hotplug/pciehp.h void pciehp_queue_pushbutton_work(struct work_struct *work); work 144 drivers/pci/hotplug/pciehp_ctrl.c void pciehp_queue_pushbutton_work(struct work_struct *work) work 146 drivers/pci/hotplug/pciehp_ctrl.c struct controller *ctrl = container_of(work, struct controller, work 147 drivers/pci/hotplug/pciehp_ctrl.c button_work.work); work 24 drivers/pci/hotplug/pnv_php.c struct work_struct work; work 738 drivers/pci/hotplug/pnv_php.c static void pnv_php_event_handler(struct work_struct *work) work 741 drivers/pci/hotplug/pnv_php.c container_of(work, struct pnv_php_event, work); work 819 drivers/pci/hotplug/pnv_php.c INIT_WORK(&event->work, pnv_php_event_handler); work 822 drivers/pci/hotplug/pnv_php.c queue_work(php_slot->wq, &event->work); work 78 drivers/pci/hotplug/shpchp.h struct delayed_work work; /* work for button event */ work 87 drivers/pci/hotplug/shpchp.h struct work_struct work; work 169 drivers/pci/hotplug/shpchp.h void shpchp_queue_pushbutton_work(struct work_struct *work); work 95 drivers/pci/hotplug/shpchp_core.c INIT_DELAYED_WORK(&slot->work, shpchp_queue_pushbutton_work); work 136 drivers/pci/hotplug/shpchp_core.c cancel_delayed_work(&slot->work); work 24 drivers/pci/hotplug/shpchp_ctrl.c static void interrupt_event_handler(struct work_struct *work); work 38 drivers/pci/hotplug/shpchp_ctrl.c INIT_WORK(&info->work, interrupt_event_handler); work 40 drivers/pci/hotplug/shpchp_ctrl.c queue_work(p_slot->wq, &info->work); work 379 drivers/pci/hotplug/shpchp_ctrl.c struct work_struct work; work 389 drivers/pci/hotplug/shpchp_ctrl.c static void shpchp_pushbutton_thread(struct work_struct *work) work 392 drivers/pci/hotplug/shpchp_ctrl.c container_of(work, struct pushbutton_work_info, work); work 418 drivers/pci/hotplug/shpchp_ctrl.c void shpchp_queue_pushbutton_work(struct work_struct *work) work 420 drivers/pci/hotplug/shpchp_ctrl.c struct slot *p_slot = container_of(work, struct slot, work.work); work 430 drivers/pci/hotplug/shpchp_ctrl.c INIT_WORK(&info->work, shpchp_pushbutton_thread); work 444 drivers/pci/hotplug/shpchp_ctrl.c queue_work(p_slot->wq, &info->work); work 481 drivers/pci/hotplug/shpchp_ctrl.c queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ); work 492 drivers/pci/hotplug/shpchp_ctrl.c cancel_delayed_work(&p_slot->work); work 519 drivers/pci/hotplug/shpchp_ctrl.c static void interrupt_event_handler(struct work_struct *work) work 521 drivers/pci/hotplug/shpchp_ctrl.c struct event_info *info = container_of(work, struct event_info, work); work 645 drivers/pci/hotplug/shpchp_ctrl.c cancel_delayed_work(&p_slot->work); work 681 drivers/pci/hotplug/shpchp_ctrl.c cancel_delayed_work(&p_slot->work); work 53 drivers/pci/pci.c static void pci_pme_list_scan(struct work_struct *work); work 2068 drivers/pci/pci.c static void pci_pme_list_scan(struct work_struct *work) work 1023 drivers/pci/pcie/aer.c static void aer_recover_work_func(struct work_struct *work) work 44 drivers/pci/pcie/pme.c struct work_struct work; work 213 drivers/pci/pcie/pme.c static void pcie_pme_work_fn(struct work_struct *work) work 216 drivers/pci/pcie/pme.c container_of(work, struct pcie_pme_service_data, work); work 286 drivers/pci/pcie/pme.c schedule_work(&data->work); work 332 drivers/pci/pcie/pme.c INIT_WORK(&data->work, pcie_pme_work_fn); work 442 drivers/pci/pcie/pme.c cancel_work_sync(&data->work); work 233 drivers/pci/switch/switchtec.c static void mrpc_event_work(struct work_struct *work) work 237 drivers/pci/switch/switchtec.c stdev = container_of(work, struct switchtec_dev, mrpc_work); work 247 drivers/pci/switch/switchtec.c static void mrpc_timeout_work(struct work_struct *work) work 252 drivers/pci/switch/switchtec.c stdev = container_of(work, struct switchtec_dev, mrpc_timeout.work); work 1031 drivers/pci/switch/switchtec.c static void link_event_work(struct work_struct *work) work 1035 drivers/pci/switch/switchtec.c stdev = container_of(work, struct switchtec_dev, link_event_work); work 543 drivers/phy/allwinner/phy-sun4i-usb.c static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work) work 546 drivers/phy/allwinner/phy-sun4i-usb.c container_of(work, struct sun4i_usb_phy_data, detect.work); work 243 drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c static void extcon_work(struct work_struct *work) work 249 drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c driver = container_of(to_delayed_work(work), work 223 drivers/phy/motorola/phy-cpcap-usb.c static void cpcap_usb_detect(struct work_struct *work) work 230 drivers/phy/motorola/phy-cpcap-usb.c ddata = container_of(work, struct cpcap_phy_ddata, detect_work.work); work 199 drivers/phy/motorola/phy-mapphone-mdm6600.c static void phy_mdm6600_status(struct work_struct *work) work 206 drivers/phy/motorola/phy-mapphone-mdm6600.c ddata = container_of(work, struct phy_mdm6600, status_work.work); work 475 drivers/phy/motorola/phy-mapphone-mdm6600.c static void phy_mdm6600_deferred_power_on(struct work_struct *work) work 480 drivers/phy/motorola/phy-mapphone-mdm6600.c ddata = container_of(work, struct phy_mdm6600, bootup_work.work); work 508 drivers/phy/motorola/phy-mapphone-mdm6600.c static void phy_mdm6600_modem_wake(struct work_struct *work) work 512 drivers/phy/motorola/phy-mapphone-mdm6600.c ddata = container_of(work, struct phy_mdm6600, modem_wake_work.work); work 538 drivers/phy/motorola/phy-mapphone-mdm6600.c phy_mdm6600_modem_wake(&ddata->modem_wake_work.work); work 111 drivers/phy/renesas/phy-rcar-gen3-usb2.c struct work_struct work; work 130 drivers/phy/renesas/phy-rcar-gen3-usb2.c static void rcar_gen3_phy_usb2_work(struct work_struct *work) work 132 drivers/phy/renesas/phy-rcar-gen3-usb2.c struct rcar_gen3_chan *ch = container_of(work, struct rcar_gen3_chan, work 133 drivers/phy/renesas/phy-rcar-gen3-usb2.c work); work 203 drivers/phy/renesas/phy-rcar-gen3-usb2.c schedule_work(&ch->work); work 213 drivers/phy/renesas/phy-rcar-gen3-usb2.c schedule_work(&ch->work); work 620 drivers/phy/renesas/phy-rcar-gen3-usb2.c INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work); work 525 drivers/phy/rockchip/phy-rockchip-inno-usb2.c static void rockchip_usb2phy_otg_sm_work(struct work_struct *work) work 528 drivers/phy/rockchip/phy-rockchip-inno-usb2.c container_of(work, struct rockchip_usb2phy_port, work 529 drivers/phy/rockchip/phy-rockchip-inno-usb2.c otg_sm_work.work); work 684 drivers/phy/rockchip/phy-rockchip-inno-usb2.c static void rockchip_chg_detect_work(struct work_struct *work) work 687 drivers/phy/rockchip/phy-rockchip-inno-usb2.c container_of(work, struct rockchip_usb2phy_port, chg_work.work); work 765 drivers/phy/rockchip/phy-rockchip-inno-usb2.c rockchip_usb2phy_otg_sm_work(&rport->otg_sm_work.work); work 789 drivers/phy/rockchip/phy-rockchip-inno-usb2.c static void rockchip_usb2phy_sm_work(struct work_struct *work) work 792 drivers/phy/rockchip/phy-rockchip-inno-usb2.c container_of(work, struct rockchip_usb2phy_port, sm_work.work); work 901 drivers/phy/rockchip/phy-rockchip-inno-usb2.c rockchip_usb2phy_sm_work(&rport->sm_work.work); work 921 drivers/phy/rockchip/phy-rockchip-inno-usb2.c rockchip_usb2phy_otg_sm_work(&rport->otg_sm_work.work); work 608 drivers/phy/ti/phy-twl4030-usb.c static void twl4030_id_workaround_work(struct work_struct *work) work 610 drivers/phy/ti/phy-twl4030-usb.c struct twl4030_usb *twl = container_of(work, struct twl4030_usb, work 611 drivers/phy/ti/phy-twl4030-usb.c id_workaround_work.work); work 134 drivers/platform/chrome/cros_ec_ishtp.c static void ish_evt_handler(struct work_struct *work) work 137 drivers/platform/chrome/cros_ec_ishtp.c container_of(work, struct ishtp_cl_data, work_ec_evt); work 553 drivers/platform/chrome/cros_ec_ishtp.c static void reset_handler(struct work_struct *work) work 560 drivers/platform/chrome/cros_ec_ishtp.c container_of(work, struct ishtp_cl_data, work_ishtp_reset); work 94 drivers/platform/chrome/cros_ec_spi.c struct kthread_work work; work 637 drivers/platform/chrome/cros_ec_spi.c static void cros_ec_xfer_high_pri_work(struct kthread_work *work) work 641 drivers/platform/chrome/cros_ec_spi.c params = container_of(work, struct cros_ec_xfer_work_params, work); work 651 drivers/platform/chrome/cros_ec_spi.c .work = KTHREAD_WORK_INIT(params.work, work 667 drivers/platform/chrome/cros_ec_spi.c kthread_queue_work(ec_spi->high_pri_worker, ¶ms.work); work 668 drivers/platform/chrome/cros_ec_spi.c kthread_flush_work(¶ms.work); work 168 drivers/platform/chrome/cros_usbpd_logger.c static void cros_usbpd_log_check(struct work_struct *work) work 170 drivers/platform/chrome/cros_usbpd_logger.c struct logger_data *logger = container_of(to_delayed_work(work), work 164 drivers/platform/mellanox/mlxbf-tmfifo.c struct work_struct work; work 281 drivers/platform/mellanox/mlxbf-tmfifo.c schedule_work(&irq_info->fifo->work); work 405 drivers/platform/mellanox/mlxbf-tmfifo.c schedule_work(&fifo->work); work 812 drivers/platform/mellanox/mlxbf-tmfifo.c static void mlxbf_tmfifo_work_handler(struct work_struct *work) work 816 drivers/platform/mellanox/mlxbf-tmfifo.c fifo = container_of(work, struct mlxbf_tmfifo, work); work 868 drivers/platform/mellanox/mlxbf-tmfifo.c schedule_work(&fifo->work); work 1175 drivers/platform/mellanox/mlxbf-tmfifo.c cancel_work_sync(&fifo->work); work 1194 drivers/platform/mellanox/mlxbf-tmfifo.c INIT_WORK(&fifo->work, mlxbf_tmfifo_work_handler); work 415 drivers/platform/mellanox/mlxreg-hotplug.c static void mlxreg_hotplug_work_handler(struct work_struct *work) work 424 drivers/platform/mellanox/mlxreg-hotplug.c priv = container_of(work, struct mlxreg_hotplug_priv_data, work 425 drivers/platform/mellanox/mlxreg-hotplug.c dwork_irq.work); work 559 drivers/platform/mellanox/mlxreg-hotplug.c mlxreg_hotplug_work_handler(&priv->dwork_irq.work); work 140 drivers/platform/mips/cpu_hwmon.c static void do_thermal_timer(struct work_struct *work) work 217 drivers/platform/x86/asus-laptop.c struct work_struct work; work 561 drivers/platform/x86/asus-laptop.c queue_work(asus->led_workqueue, &led->work); work 564 drivers/platform/x86/asus-laptop.c static void asus_led_cdev_update(struct work_struct *work) work 566 drivers/platform/x86/asus-laptop.c struct asus_led *led = container_of(work, struct asus_led, work); work 622 drivers/platform/x86/asus-laptop.c queue_work(asus->led_workqueue, &led->work); work 625 drivers/platform/x86/asus-laptop.c static void asus_kled_cdev_update(struct work_struct *work) work 627 drivers/platform/x86/asus-laptop.c struct asus_led *led = container_of(work, struct asus_led, work); work 678 drivers/platform/x86/asus-laptop.c INIT_WORK(&led->work, asus_led_cdev_update); work 739 drivers/platform/x86/asus-laptop.c INIT_WORK(&led->work, asus_kled_cdev_update); work 91 drivers/platform/x86/asus-wireless.c static void led_state_update(struct work_struct *work) work 96 drivers/platform/x86/asus-wireless.c data = container_of(work, struct asus_wireless_data, led_work); work 478 drivers/platform/x86/asus-wmi.c static void tpd_led_update(struct work_struct *work) work 483 drivers/platform/x86/asus-wmi.c asus = container_of(work, struct asus_wmi, tpd_led_work); work 602 drivers/platform/x86/asus-wmi.c static void wlan_led_update(struct work_struct *work) work 607 drivers/platform/x86/asus-wmi.c asus = container_of(work, struct asus_wmi, wlan_led_work); work 635 drivers/platform/x86/asus-wmi.c static void lightbar_led_update(struct work_struct *work) work 640 drivers/platform/x86/asus-wmi.c asus = container_of(work, struct asus_wmi, lightbar_led_work); work 904 drivers/platform/x86/asus-wmi.c static void asus_hotplug_work(struct work_struct *work) work 908 drivers/platform/x86/asus-wmi.c asus = container_of(work, struct asus_wmi, hotplug_work); work 485 drivers/platform/x86/eeepc-laptop.c static void tpd_led_update(struct work_struct *work) work 489 drivers/platform/x86/eeepc-laptop.c eeepc = container_of(work, struct eeepc_laptop, tpd_led_work); work 50 drivers/platform/x86/gpd-pocket-fan.c struct delayed_work work; work 73 drivers/platform/x86/gpd-pocket-fan.c static void gpd_pocket_fan_worker(struct work_struct *work) work 76 drivers/platform/x86/gpd-pocket-fan.c container_of(work, struct gpd_pocket_fan_data, work.work); work 114 drivers/platform/x86/gpd-pocket-fan.c queue_delayed_work(system_wq, &fan->work, work 121 drivers/platform/x86/gpd-pocket-fan.c mod_delayed_work(system_wq, &fan->work, 0); work 155 drivers/platform/x86/gpd-pocket-fan.c INIT_DELAYED_WORK(&fan->work, gpd_pocket_fan_worker); work 184 drivers/platform/x86/gpd-pocket-fan.c cancel_delayed_work_sync(&fan->work); work 193 drivers/platform/x86/gpd-pocket-fan.c cancel_delayed_work_sync(&fan->work); work 39 drivers/platform/x86/hp_accel.c struct work_struct work; work 46 drivers/platform/x86/hp_accel.c static inline void delayed_set_status_worker(struct work_struct *work) work 49 drivers/platform/x86/hp_accel.c container_of(work, struct delayed_led_classdev, work); work 60 drivers/platform/x86/hp_accel.c schedule_work(&data->work); work 368 drivers/platform/x86/hp_accel.c INIT_WORK(&hpled_led.work, delayed_set_status_worker); work 373 drivers/platform/x86/hp_accel.c flush_work(&hpled_led.work); work 390 drivers/platform/x86/hp_accel.c flush_work(&hpled_led.work); work 83 drivers/platform/x86/intel_turbo_max_3.c static void itmt_legacy_work_fn(struct work_struct *work) work 1107 drivers/platform/x86/samsung-laptop.c static void kbd_led_update(struct work_struct *work) work 1111 drivers/platform/x86/samsung-laptop.c samsung = container_of(work, struct samsung_laptop, kbd_led_work); work 1725 drivers/platform/x86/toshiba_acpi.c static void toshiba_acpi_kbd_bl_work(struct work_struct *work); work 2404 drivers/platform/x86/toshiba_acpi.c static void toshiba_acpi_kbd_bl_work(struct work_struct *work) work 2682 drivers/platform/x86/toshiba_acpi.c static void toshiba_acpi_hotkey_work(struct work_struct *work) work 569 drivers/power/supply/ab8500_btemp.c static void ab8500_btemp_periodic_work(struct work_struct *work) work 573 drivers/power/supply/ab8500_btemp.c struct ab8500_btemp *di = container_of(work, work 574 drivers/power/supply/ab8500_btemp.c struct ab8500_btemp, btemp_periodic_work.work); work 1918 drivers/power/supply/ab8500_charger.c static void ab8500_charger_check_vbat_work(struct work_struct *work) work 1921 drivers/power/supply/ab8500_charger.c struct ab8500_charger *di = container_of(work, work 1922 drivers/power/supply/ab8500_charger.c struct ab8500_charger, check_vbat_work.work); work 1963 drivers/power/supply/ab8500_charger.c static void ab8500_charger_check_hw_failure_work(struct work_struct *work) work 1968 drivers/power/supply/ab8500_charger.c struct ab8500_charger *di = container_of(work, work 1969 drivers/power/supply/ab8500_charger.c struct ab8500_charger, check_hw_failure_work.work); work 2018 drivers/power/supply/ab8500_charger.c static void ab8500_charger_kick_watchdog_work(struct work_struct *work) work 2022 drivers/power/supply/ab8500_charger.c struct ab8500_charger *di = container_of(work, work 2023 drivers/power/supply/ab8500_charger.c struct ab8500_charger, kick_wd_work.work); work 2041 drivers/power/supply/ab8500_charger.c static void ab8500_charger_ac_work(struct work_struct *work) work 2045 drivers/power/supply/ab8500_charger.c struct ab8500_charger *di = container_of(work, work 2068 drivers/power/supply/ab8500_charger.c static void ab8500_charger_usb_attached_work(struct work_struct *work) work 2070 drivers/power/supply/ab8500_charger.c struct ab8500_charger *di = container_of(work, work 2072 drivers/power/supply/ab8500_charger.c usb_charger_attached_work.work); work 2105 drivers/power/supply/ab8500_charger.c static void ab8500_charger_ac_attached_work(struct work_struct *work) work 2108 drivers/power/supply/ab8500_charger.c struct ab8500_charger *di = container_of(work, work 2110 drivers/power/supply/ab8500_charger.c ac_charger_attached_work.work); work 2152 drivers/power/supply/ab8500_charger.c static void ab8500_charger_detect_usb_type_work(struct work_struct *work) work 2156 drivers/power/supply/ab8500_charger.c struct ab8500_charger *di = container_of(work, work 2212 drivers/power/supply/ab8500_charger.c static void ab8500_charger_usb_link_attach_work(struct work_struct *work) work 2215 drivers/power/supply/ab8500_charger.c container_of(work, struct ab8500_charger, attach_work.work); work 2236 drivers/power/supply/ab8500_charger.c static void ab8500_charger_usb_link_status_work(struct work_struct *work) work 2243 drivers/power/supply/ab8500_charger.c struct ab8500_charger *di = container_of(work, work 2366 drivers/power/supply/ab8500_charger.c static void ab8500_charger_usb_state_changed_work(struct work_struct *work) work 2371 drivers/power/supply/ab8500_charger.c struct ab8500_charger *di = container_of(work, work 2372 drivers/power/supply/ab8500_charger.c struct ab8500_charger, usb_state_changed_work.work); work 2433 drivers/power/supply/ab8500_charger.c static void ab8500_charger_check_usbchargernotok_work(struct work_struct *work) work 2439 drivers/power/supply/ab8500_charger.c struct ab8500_charger *di = container_of(work, work 2440 drivers/power/supply/ab8500_charger.c struct ab8500_charger, check_usbchgnotok_work.work); work 2472 drivers/power/supply/ab8500_charger.c struct work_struct *work) work 2477 drivers/power/supply/ab8500_charger.c struct ab8500_charger *di = container_of(work, work 2502 drivers/power/supply/ab8500_charger.c struct work_struct *work) work 2507 drivers/power/supply/ab8500_charger.c struct ab8500_charger *di = container_of(work, work 2629 drivers/power/supply/ab8500_charger.c static void ab8500_charger_vbus_drop_end_work(struct work_struct *work) work 2631 drivers/power/supply/ab8500_charger.c struct ab8500_charger *di = container_of(work, work 2632 drivers/power/supply/ab8500_charger.c struct ab8500_charger, vbus_drop_end_work.work); work 754 drivers/power/supply/ab8500_fg.c static void ab8500_fg_acc_cur_work(struct work_struct *work) work 760 drivers/power/supply/ab8500_fg.c struct ab8500_fg *di = container_of(work, work 1785 drivers/power/supply/ab8500_fg.c static void ab8500_fg_periodic_work(struct work_struct *work) work 1787 drivers/power/supply/ab8500_fg.c struct ab8500_fg *di = container_of(work, struct ab8500_fg, work 1788 drivers/power/supply/ab8500_fg.c fg_periodic_work.work); work 1820 drivers/power/supply/ab8500_fg.c static void ab8500_fg_check_hw_failure_work(struct work_struct *work) work 1825 drivers/power/supply/ab8500_fg.c struct ab8500_fg *di = container_of(work, struct ab8500_fg, work 1826 drivers/power/supply/ab8500_fg.c fg_check_hw_failure_work.work); work 1861 drivers/power/supply/ab8500_fg.c static void ab8500_fg_low_bat_work(struct work_struct *work) work 1865 drivers/power/supply/ab8500_fg.c struct ab8500_fg *di = container_of(work, struct ab8500_fg, work 1866 drivers/power/supply/ab8500_fg.c fg_low_bat_work.work); work 1965 drivers/power/supply/ab8500_fg.c static void ab8500_fg_instant_work(struct work_struct *work) work 1967 drivers/power/supply/ab8500_fg.c struct ab8500_fg *di = container_of(work, struct ab8500_fg, fg_work); work 2397 drivers/power/supply/ab8500_fg.c static void ab8500_fg_reinit_work(struct work_struct *work) work 2399 drivers/power/supply/ab8500_fg.c struct ab8500_fg *di = container_of(work, struct ab8500_fg, work 2400 drivers/power/supply/ab8500_fg.c fg_reinit_work.work); work 1642 drivers/power/supply/abx500_chargalg.c static void abx500_chargalg_periodic_work(struct work_struct *work) work 1644 drivers/power/supply/abx500_chargalg.c struct abx500_chargalg *di = container_of(work, work 1645 drivers/power/supply/abx500_chargalg.c struct abx500_chargalg, chargalg_periodic_work.work); work 1669 drivers/power/supply/abx500_chargalg.c static void abx500_chargalg_wd_work(struct work_struct *work) work 1672 drivers/power/supply/abx500_chargalg.c struct abx500_chargalg *di = container_of(work, work 1673 drivers/power/supply/abx500_chargalg.c struct abx500_chargalg, chargalg_wd_work.work); work 1691 drivers/power/supply/abx500_chargalg.c static void abx500_chargalg_work(struct work_struct *work) work 1693 drivers/power/supply/abx500_chargalg.c struct abx500_chargalg *di = container_of(work, work 79 drivers/power/supply/act8945a_charger.c struct work_struct work; work 433 drivers/power/supply/act8945a_charger.c static void act8945a_work(struct work_struct *work) work 436 drivers/power/supply/act8945a_charger.c container_of(work, struct act8945a_charger, work); work 448 drivers/power/supply/act8945a_charger.c schedule_work(&charger->work); work 630 drivers/power/supply/act8945a_charger.c INIT_WORK(&charger->work, act8945a_work); work 646 drivers/power/supply/act8945a_charger.c cancel_work_sync(&charger->work); work 77 drivers/power/supply/axp20x_usb_power.c static void axp20x_usb_power_poll_vbus(struct work_struct *work) work 80 drivers/power/supply/axp20x_usb_power.c container_of(work, struct axp20x_usb_power, vbus_detect.work); work 124 drivers/power/supply/axp288_charger.c struct work_struct work; work 134 drivers/power/supply/axp288_charger.c struct work_struct work; work 592 drivers/power/supply/axp288_charger.c static void axp288_charger_extcon_evt_worker(struct work_struct *work) work 595 drivers/power/supply/axp288_charger.c container_of(work, struct axp288_chrg_info, cable.work); work 649 drivers/power/supply/axp288_charger.c schedule_work(&info->cable.work); work 653 drivers/power/supply/axp288_charger.c static void axp288_charger_otg_evt_worker(struct work_struct *work) work 656 drivers/power/supply/axp288_charger.c container_of(work, struct axp288_chrg_info, otg.work); work 681 drivers/power/supply/axp288_charger.c schedule_work(&info->otg.work); work 787 drivers/power/supply/axp288_charger.c cancel_work_sync(&info->otg.work); work 788 drivers/power/supply/axp288_charger.c cancel_work_sync(&info->cable.work); work 857 drivers/power/supply/axp288_charger.c INIT_WORK(&info->cable.work, axp288_charger_extcon_evt_worker); work 865 drivers/power/supply/axp288_charger.c schedule_work(&info->cable.work); work 868 drivers/power/supply/axp288_charger.c INIT_WORK(&info->otg.work, axp288_charger_otg_evt_worker); work 877 drivers/power/supply/axp288_charger.c schedule_work(&info->otg.work); work 163 drivers/power/supply/bq2415x_charger.c struct delayed_work work; work 842 drivers/power/supply/bq2415x_charger.c schedule_delayed_work(&bq->work, 0); work 862 drivers/power/supply/bq2415x_charger.c schedule_delayed_work(&bq->work, BQ2415X_TIMER_TIMEOUT * HZ); work 866 drivers/power/supply/bq2415x_charger.c cancel_delayed_work_sync(&bq->work); work 885 drivers/power/supply/bq2415x_charger.c static void bq2415x_timer_work(struct work_struct *work) work 887 drivers/power/supply/bq2415x_charger.c struct bq2415x_device *bq = container_of(work, struct bq2415x_device, work 888 drivers/power/supply/bq2415x_charger.c work.work); work 985 drivers/power/supply/bq2415x_charger.c schedule_delayed_work(&bq->work, BQ2415X_TIMER_TIMEOUT * HZ); work 1031 drivers/power/supply/bq2415x_charger.c cancel_delayed_work_sync(&bq->work); work 1677 drivers/power/supply/bq2415x_charger.c INIT_DELAYED_WORK(&bq->work, bq2415x_timer_work); work 1198 drivers/power/supply/bq24190_charger.c static void bq24190_input_current_limit_work(struct work_struct *work) work 1201 drivers/power/supply/bq24190_charger.c container_of(work, struct bq24190_dev_info, work 1202 drivers/power/supply/bq24190_charger.c input_current_limit_work.work); work 591 drivers/power/supply/bq24257_charger.c static void bq24257_iilimit_setup_work(struct work_struct *work) work 593 drivers/power/supply/bq24257_charger.c struct bq24257_device *bq = container_of(work, struct bq24257_device, work 594 drivers/power/supply/bq24257_charger.c iilimit_setup_work.work); work 242 drivers/power/supply/bq24735-charger.c static void bq24735_poll(struct work_struct *work) work 244 drivers/power/supply/bq24735-charger.c struct bq24735 *charger = container_of(work, struct bq24735, poll.work); work 890 drivers/power/supply/bq27xxx_battery.c cancel_delayed_work_sync(&di->work); work 891 drivers/power/supply/bq27xxx_battery.c schedule_delayed_work(&di->work, 0); work 1626 drivers/power/supply/bq27xxx_battery.c static void bq27xxx_battery_poll(struct work_struct *work) work 1629 drivers/power/supply/bq27xxx_battery.c container_of(work, struct bq27xxx_device_info, work 1630 drivers/power/supply/bq27xxx_battery.c work.work); work 1635 drivers/power/supply/bq27xxx_battery.c schedule_delayed_work(&di->work, poll_interval * HZ); work 1769 drivers/power/supply/bq27xxx_battery.c cancel_delayed_work_sync(&di->work); work 1770 drivers/power/supply/bq27xxx_battery.c bq27xxx_battery_poll(&di->work.work); work 1855 drivers/power/supply/bq27xxx_battery.c cancel_delayed_work_sync(&di->work); work 1856 drivers/power/supply/bq27xxx_battery.c schedule_delayed_work(&di->work, 0); work 1867 drivers/power/supply/bq27xxx_battery.c INIT_DELAYED_WORK(&di->work, bq27xxx_battery_poll); work 1916 drivers/power/supply/bq27xxx_battery.c cancel_delayed_work_sync(&di->work); work 185 drivers/power/supply/bq27xxx_battery_i2c.c schedule_delayed_work(&di->work, 60 * HZ); work 513 drivers/power/supply/charger-manager.c static void fullbatt_vchk(struct work_struct *work) work 515 drivers/power/supply/charger-manager.c struct delayed_work *dwork = to_delayed_work(work); work 707 drivers/power/supply/charger-manager.c fullbatt_vchk(&cm->fullbatt_vchk_work.work); work 720 drivers/power/supply/charger-manager.c fullbatt_vchk(&cm->fullbatt_vchk_work.work); work 759 drivers/power/supply/charger-manager.c static void _setup_polling(struct work_struct *work) work 816 drivers/power/supply/charger-manager.c static void cm_monitor_poller(struct work_struct *work) work 1081 drivers/power/supply/charger-manager.c fullbatt_vchk(&cm->fullbatt_vchk_work.work); work 1128 drivers/power/supply/charger-manager.c static void charger_extcon_work(struct work_struct *work) work 1131 drivers/power/supply/charger-manager.c container_of(work, struct charger_cable, wq); work 191 drivers/power/supply/collie_battery.c static void collie_bat_work(struct work_struct *work) work 332 drivers/power/supply/cpcap-charger.c static void cpcap_charger_vbus_work(struct work_struct *work) work 338 drivers/power/supply/cpcap-charger.c ddata = container_of(work, struct cpcap_charger_ddata, work 339 drivers/power/supply/cpcap-charger.c vbus_work.work); work 437 drivers/power/supply/cpcap-charger.c static void cpcap_usb_detect(struct work_struct *work) work 443 drivers/power/supply/cpcap-charger.c ddata = container_of(work, struct cpcap_charger_ddata, work 444 drivers/power/supply/cpcap-charger.c detect_work.work); work 95 drivers/power/supply/da9030_battery.c struct delayed_work work; work 291 drivers/power/supply/da9030_battery.c static void da9030_charging_monitor(struct work_struct *work) work 295 drivers/power/supply/da9030_battery.c charger = container_of(work, struct da9030_charger, work.work); work 300 drivers/power/supply/da9030_battery.c schedule_delayed_work(&charger->work, charger->interval); work 406 drivers/power/supply/da9030_battery.c cancel_delayed_work_sync(&charger->work); work 407 drivers/power/supply/da9030_battery.c schedule_work(&charger->work.work); work 529 drivers/power/supply/da9030_battery.c INIT_DELAYED_WORK(&charger->work, da9030_charging_monitor); work 530 drivers/power/supply/da9030_battery.c schedule_delayed_work(&charger->work, charger->interval); work 559 drivers/power/supply/da9030_battery.c cancel_delayed_work(&charger->work); work 574 drivers/power/supply/da9030_battery.c cancel_delayed_work_sync(&charger->work); work 79 drivers/power/supply/da9150-fg.c struct delayed_work work; work 354 drivers/power/supply/da9150-fg.c static void da9150_fg_work(struct work_struct *work) work 356 drivers/power/supply/da9150-fg.c struct da9150_fg *fg = container_of(work, struct da9150_fg, work.work); work 362 drivers/power/supply/da9150-fg.c schedule_delayed_work(&fg->work, msecs_to_jiffies(fg->interval)); work 509 drivers/power/supply/da9150-fg.c INIT_DELAYED_WORK(&fg->work, da9150_fg_work); work 510 drivers/power/supply/da9150-fg.c schedule_delayed_work(&fg->work, work 533 drivers/power/supply/da9150-fg.c cancel_delayed_work(&fg->work); work 543 drivers/power/supply/da9150-fg.c cancel_delayed_work(&fg->work); work 557 drivers/power/supply/da9150-fg.c flush_delayed_work(&fg->work); work 478 drivers/power/supply/ds2760_battery.c static void ds2760_battery_work(struct work_struct *work) work 480 drivers/power/supply/ds2760_battery.c struct ds2760_device_info *di = container_of(work, work 481 drivers/power/supply/ds2760_battery.c struct ds2760_device_info, monitor_work.work); work 500 drivers/power/supply/ds2760_battery.c static void ds2760_battery_set_charged_work(struct work_struct *work) work 503 drivers/power/supply/ds2760_battery.c struct ds2760_device_info *di = container_of(work, work 504 drivers/power/supply/ds2760_battery.c struct ds2760_device_info, set_charged_work.work); work 288 drivers/power/supply/ds2782_battery.c static void ds278x_bat_work(struct work_struct *work) work 292 drivers/power/supply/ds2782_battery.c info = container_of(work, struct ds278x_info, bat_work.work); work 201 drivers/power/supply/generic-adc-battery.c static void gab_work(struct work_struct *work) work 208 drivers/power/supply/generic-adc-battery.c delayed_work = to_delayed_work(work); work 49 drivers/power/supply/ipaq_micro_battery.c static void micro_battery_work(struct work_struct *work) work 51 drivers/power/supply/ipaq_micro_battery.c struct micro_battery *mb = container_of(work, work 52 drivers/power/supply/ipaq_micro_battery.c struct micro_battery, update.work); work 52 drivers/power/supply/isp1704_charger.c struct work_struct work; work 226 drivers/power/supply/isp1704_charger.c container_of(data, struct isp1704_charger, work); work 299 drivers/power/supply/isp1704_charger.c schedule_work(&isp->work); work 439 drivers/power/supply/isp1704_charger.c INIT_WORK(&isp->work, isp1704_charger_work); work 467 drivers/power/supply/isp1704_charger.c schedule_work(&isp->work); work 97 drivers/power/supply/lp8727_charger.c struct delayed_work work; work 225 drivers/power/supply/lp8727_charger.c work.work); work 250 drivers/power/supply/lp8727_charger.c schedule_delayed_work(&pchg->work, pchg->debounce_jiffies); work 261 drivers/power/supply/lp8727_charger.c INIT_DELAYED_WORK(&pchg->work, lp8727_delayed_func); work 283 drivers/power/supply/lp8727_charger.c cancel_delayed_work_sync(&pchg->work); work 415 drivers/power/supply/lp8788-charger.c static void lp8788_charger_event(struct work_struct *work) work 418 drivers/power/supply/lp8788-charger.c container_of(work, struct lp8788_charger, charger_work); work 74 drivers/power/supply/ltc2941-battery-gauge.c struct delayed_work work; /* Work scheduler */ work 429 drivers/power/supply/ltc2941-battery-gauge.c static void ltc294x_work(struct work_struct *work) work 433 drivers/power/supply/ltc2941-battery-gauge.c info = container_of(work, struct ltc294x_info, work.work); work 435 drivers/power/supply/ltc2941-battery-gauge.c schedule_delayed_work(&info->work, LTC294X_WORK_DELAY * HZ); work 452 drivers/power/supply/ltc2941-battery-gauge.c cancel_delayed_work_sync(&info->work); work 549 drivers/power/supply/ltc2941-battery-gauge.c INIT_DELAYED_WORK(&info->work, ltc294x_work); work 563 drivers/power/supply/ltc2941-battery-gauge.c schedule_delayed_work(&info->work, LTC294X_WORK_DELAY * HZ); work 599 drivers/power/supply/ltc2941-battery-gauge.c cancel_delayed_work(&info->work); work 608 drivers/power/supply/ltc2941-battery-gauge.c schedule_delayed_work(&info->work, LTC294X_WORK_DELAY * HZ); work 135 drivers/power/supply/max14656_charger_detector.c static void max14656_irq_worker(struct work_struct *work) work 138 drivers/power/supply/max14656_charger_detector.c container_of(work, struct max14656_chip, irq_work.work); work 33 drivers/power/supply/max17040_battery.c struct delayed_work work; work 163 drivers/power/supply/max17040_battery.c static void max17040_work(struct work_struct *work) work 167 drivers/power/supply/max17040_battery.c chip = container_of(work, struct max17040_chip, work.work); work 174 drivers/power/supply/max17040_battery.c queue_delayed_work(system_power_efficient_wq, &chip->work, work 223 drivers/power/supply/max17040_battery.c INIT_DEFERRABLE_WORK(&chip->work, max17040_work); work 224 drivers/power/supply/max17040_battery.c queue_delayed_work(system_power_efficient_wq, &chip->work, work 235 drivers/power/supply/max17040_battery.c cancel_delayed_work(&chip->work); work 246 drivers/power/supply/max17040_battery.c cancel_delayed_work(&chip->work); work 255 drivers/power/supply/max17040_battery.c queue_delayed_work(system_power_efficient_wq, &chip->work, work 61 drivers/power/supply/max17042_battery.c struct work_struct work; work 857 drivers/power/supply/max17042_battery.c static void max17042_init_worker(struct work_struct *work) work 859 drivers/power/supply/max17042_battery.c struct max17042_chip *chip = container_of(work, work 860 drivers/power/supply/max17042_battery.c struct max17042_chip, work); work 1002 drivers/power/supply/max17042_battery.c cancel_work_sync(&chip->work); work 1110 drivers/power/supply/max17042_battery.c INIT_WORK(&chip->work, max17042_init_worker); work 1114 drivers/power/supply/max17042_battery.c schedule_work(&chip->work); work 140 drivers/power/supply/pda_power.c static void supply_work_func(struct work_struct *work) work 166 drivers/power/supply/pda_power.c static void charger_work_func(struct work_struct *work) work 192 drivers/power/supply/pda_power.c static void polling_work_func(struct work_struct *work) work 839 drivers/power/supply/pm2301_charger.c static void pm2xxx_charger_ac_work(struct work_struct *work) work 841 drivers/power/supply/pm2301_charger.c struct pm2xxx_charger *pm2 = container_of(work, work 849 drivers/power/supply/pm2301_charger.c static void pm2xxx_charger_check_hw_failure_work(struct work_struct *work) work 853 drivers/power/supply/pm2301_charger.c struct pm2xxx_charger *pm2 = container_of(work, work 854 drivers/power/supply/pm2301_charger.c struct pm2xxx_charger, check_hw_failure_work.work); work 874 drivers/power/supply/pm2301_charger.c struct work_struct *work) work 879 drivers/power/supply/pm2301_charger.c struct pm2xxx_charger *pm2 = container_of(work, struct pm2xxx_charger, work 76 drivers/power/supply/power_supply_core.c static void power_supply_changed_work(struct work_struct *work) work 79 drivers/power/supply/power_supply_core.c struct power_supply *psy = container_of(work, struct power_supply, work 138 drivers/power/supply/power_supply_core.c static void power_supply_deferred_register_work(struct work_struct *work) work 140 drivers/power/supply/power_supply_core.c struct power_supply *psy = container_of(work, struct power_supply, work 141 drivers/power/supply/power_supply_core.c deferred_register_work.work); work 1443 drivers/power/supply/rt9455_charger.c static void rt9455_pwr_rdy_work_callback(struct work_struct *work) work 1445 drivers/power/supply/rt9455_charger.c struct rt9455_info *info = container_of(work, struct rt9455_info, work 1446 drivers/power/supply/rt9455_charger.c pwr_rdy_work.work); work 1481 drivers/power/supply/rt9455_charger.c static void rt9455_max_charging_time_work_callback(struct work_struct *work) work 1483 drivers/power/supply/rt9455_charger.c struct rt9455_info *info = container_of(work, struct rt9455_info, work 1484 drivers/power/supply/rt9455_charger.c max_charging_time_work.work); work 1495 drivers/power/supply/rt9455_charger.c static void rt9455_batt_presence_work_callback(struct work_struct *work) work 1497 drivers/power/supply/rt9455_charger.c struct rt9455_info *info = container_of(work, struct rt9455_info, work 1498 drivers/power/supply/rt9455_charger.c batt_presence_work.work); work 247 drivers/power/supply/s3c_adc_battery.c static void s3c_adc_bat_work(struct work_struct *work) work 164 drivers/power/supply/sbs-battery.c struct delayed_work work; work 441 drivers/power/supply/sbs-battery.c cancel_delayed_work_sync(&chip->work); work 768 drivers/power/supply/sbs-battery.c cancel_delayed_work_sync(&chip->work); work 770 drivers/power/supply/sbs-battery.c schedule_delayed_work(&chip->work, HZ); work 774 drivers/power/supply/sbs-battery.c static void sbs_delayed_work(struct work_struct *work) work 779 drivers/power/supply/sbs-battery.c chip = container_of(work, struct sbs_info, work.work); work 803 drivers/power/supply/sbs-battery.c schedule_delayed_work(&chip->work, HZ); work 922 drivers/power/supply/sbs-battery.c INIT_DELAYED_WORK(&chip->work, sbs_delayed_work); work 936 drivers/power/supply/sbs-battery.c cancel_delayed_work_sync(&chip->work); work 950 drivers/power/supply/sbs-battery.c cancel_delayed_work_sync(&chip->work); work 40 drivers/power/supply/sbs-charger.c struct delayed_work work; work 107 drivers/power/supply/sbs-charger.c static void sbs_delayed_work(struct work_struct *work) work 109 drivers/power/supply/sbs-charger.c struct sbs_info *chip = container_of(work, struct sbs_info, work.work); work 113 drivers/power/supply/sbs-charger.c schedule_delayed_work(&chip->work, work 220 drivers/power/supply/sbs-charger.c INIT_DELAYED_WORK(&chip->work, sbs_delayed_work); work 221 drivers/power/supply/sbs-charger.c schedule_delayed_work(&chip->work, work 235 drivers/power/supply/sbs-charger.c cancel_delayed_work_sync(&chip->work); work 60 drivers/power/supply/sc2731_charger.c struct work_struct work; work 326 drivers/power/supply/sc2731_charger.c container_of(data, struct sc2731_charger_info, work); work 364 drivers/power/supply/sc2731_charger.c schedule_work(&info->work); work 452 drivers/power/supply/sc2731_charger.c schedule_work(&info->work); work 468 drivers/power/supply/sc2731_charger.c INIT_WORK(&info->work, sc2731_charger_work); work 195 drivers/power/supply/tosa_battery.c static void tosa_bat_work(struct work_struct *work) work 117 drivers/power/supply/twl4030_charger.c struct work_struct work; work 404 drivers/power/supply/twl4030_charger.c current_worker.work); work 651 drivers/power/supply/twl4030_charger.c struct twl4030_bci *bci = container_of(data, struct twl4030_bci, work); work 678 drivers/power/supply/twl4030_charger.c schedule_work(&bci->work); work 1007 drivers/power/supply/twl4030_charger.c INIT_WORK(&bci->work, twl4030_bci_usb_work); work 126 drivers/power/supply/wm97xx_battery.c static void wm97xx_bat_work(struct work_struct *work) work 114 drivers/power/supply/z2_battery.c static void z2_batt_work(struct work_struct *work) work 117 drivers/power/supply/z2_battery.c charger = container_of(work, struct z2_charger, bat_work); work 739 drivers/ps3/ps3-sys-manager.c .work = ps3_sys_manager_work, work 74 drivers/ps3/ps3-vuart.c struct ps3_vuart_work work; work 661 drivers/ps3/ps3-vuart.c static void ps3_vuart_work(struct work_struct *work) work 664 drivers/ps3/ps3-vuart.c ps3_vuart_work_to_system_bus_dev(work); work 669 drivers/ps3/ps3-vuart.c drv->work(dev); work 677 drivers/ps3/ps3-vuart.c if (priv->rx_list.work.trigger) { work 689 drivers/ps3/ps3-vuart.c schedule_work(&priv->rx_list.work.work); work 694 drivers/ps3/ps3-vuart.c priv->rx_list.work.trigger = bytes; work 706 drivers/ps3/ps3-vuart.c to_port_priv(dev)->rx_list.work.trigger = 0; work 795 drivers/ps3/ps3-vuart.c if (priv->rx_list.work.trigger && priv->rx_list.bytes_held work 796 drivers/ps3/ps3-vuart.c >= priv->rx_list.work.trigger) { work 798 drivers/ps3/ps3-vuart.c __func__, __LINE__, priv->rx_list.work.trigger); work 799 drivers/ps3/ps3-vuart.c priv->rx_list.work.trigger = 0; work 800 drivers/ps3/ps3-vuart.c schedule_work(&priv->rx_list.work.work); work 1035 drivers/ps3/ps3-vuart.c INIT_WORK(&priv->rx_list.work.work, ps3_vuart_work); work 1036 drivers/ps3/ps3-vuart.c priv->rx_list.work.trigger = 0; work 1037 drivers/ps3/ps3-vuart.c priv->rx_list.work.dev = dev; work 33 drivers/ps3/ps3av.c struct work_struct work; work 475 drivers/ps3/ps3av.c schedule_work(&ps3av->work); work 568 drivers/ps3/ps3av.c static void ps3avd(struct work_struct *work) work 943 drivers/ps3/ps3av.c INIT_WORK(&ps3av->work, ps3avd); work 1003 drivers/ps3/ps3av.c flush_work(&ps3av->work); work 23 drivers/ps3/vuart.h struct work_struct work; work 37 drivers/ps3/vuart.h void (*work)(struct ps3_system_bus_device *); work 60 drivers/ps3/vuart.h work); work 180 drivers/ptp/ptp_clock.c static void ptp_aux_kworker(struct kthread_work *work) work 182 drivers/ptp/ptp_clock.c struct ptp_clock *ptp = container_of(work, struct ptp_clock, work 183 drivers/ptp/ptp_clock.c aux_work.work); work 290 drivers/rapidio/devices/tsi721.c static void tsi721_pw_dpc(struct work_struct *work) work 292 drivers/rapidio/devices/tsi721.c struct tsi721_device *priv = container_of(work, struct tsi721_device, work 381 drivers/rapidio/devices/tsi721.c static void tsi721_db_dpc(struct work_struct *work) work 383 drivers/rapidio/devices/tsi721.c struct tsi721_device *priv = container_of(work, struct tsi721_device, work 2126 drivers/rapidio/rio.c struct work_struct work; work 2132 drivers/rapidio/rio.c struct rio_disc_work *work; work 2134 drivers/rapidio/rio.c work = container_of(_work, struct rio_disc_work, work); work 2136 drivers/rapidio/rio.c work->mport->id, work->mport->name); work 2137 drivers/rapidio/rio.c if (try_module_get(work->mport->nscan->owner)) { work 2138 drivers/rapidio/rio.c work->mport->nscan->discover(work->mport, 0); work 2139 drivers/rapidio/rio.c module_put(work->mport->nscan->owner); work 2146 drivers/rapidio/rio.c struct rio_disc_work *work; work 2185 drivers/rapidio/rio.c work = kcalloc(n, sizeof *work, GFP_KERNEL); work 2186 drivers/rapidio/rio.c if (!work) { work 2195 drivers/rapidio/rio.c work[n].mport = port; work 2196 drivers/rapidio/rio.c INIT_WORK(&work[n].work, disc_work_handler); work 2197 drivers/rapidio/rio.c queue_work(rio_wq, &work[n].work); work 2206 drivers/rapidio/rio.c kfree(work); work 202 drivers/rapidio/rio_cm.c struct work_struct work; work 580 drivers/rapidio/rio_cm.c static void rio_ibmsg_handler(struct work_struct *work) work 582 drivers/rapidio/rio_cm.c struct cm_dev *cm = container_of(work, struct cm_dev, rx_work); work 172 drivers/ras/cec.c static void cec_work_fn(struct work_struct *work) work 46 drivers/regulator/arizona-micsupp.c static void arizona_micsupp_check_cp(struct work_struct *work) work 49 drivers/regulator/arizona-micsupp.c container_of(work, struct arizona_micsupp, check_cp_work); work 2730 drivers/regulator/core.c static void regulator_disable_work(struct work_struct *work) work 2732 drivers/regulator/core.c struct regulator_dev *rdev = container_of(work, struct regulator_dev, work 2733 drivers/regulator/core.c disable_work.work); work 5243 drivers/regulator/core.c flush_work(&rdev->disable_work.work); work 5720 drivers/regulator/core.c static void regulator_init_complete_work_function(struct work_struct *work) work 1133 drivers/regulator/qcom_spmi-regulator.c static void spmi_regulator_vs_ocp_work(struct work_struct *work) work 1135 drivers/regulator/qcom_spmi-regulator.c struct delayed_work *dwork = to_delayed_work(work); work 141 drivers/remoteproc/keystone_remoteproc.c static void handle_event(struct work_struct *work) work 144 drivers/remoteproc/keystone_remoteproc.c container_of(work, struct keystone_rproc, workqueue); work 1693 drivers/remoteproc/remoteproc_core.c static void rproc_crash_handler_work(struct work_struct *work) work 1695 drivers/remoteproc/remoteproc_core.c struct rproc *rproc = container_of(work, struct rproc, crash_handler); work 207 drivers/rpmsg/qcom_glink_native.c static void qcom_glink_rx_done_work(struct work_struct *work); work 473 drivers/rpmsg/qcom_glink_native.c static void qcom_glink_rx_done_work(struct work_struct *work) work 475 drivers/rpmsg/qcom_glink_native.c struct glink_channel *channel = container_of(work, struct glink_channel, work 1510 drivers/rpmsg/qcom_glink_native.c static void qcom_glink_work(struct work_struct *work) work 1512 drivers/rpmsg/qcom_glink_native.c struct qcom_glink *glink = container_of(work, struct qcom_glink, work 1189 drivers/rpmsg/qcom_smd.c static void qcom_channel_scan_worker(struct work_struct *work) work 1191 drivers/rpmsg/qcom_smd.c struct qcom_smd_edge *edge = container_of(work, struct qcom_smd_edge, scan_work); work 1256 drivers/rpmsg/qcom_smd.c static void qcom_channel_state_worker(struct work_struct *work) work 1259 drivers/rpmsg/qcom_smd.c struct qcom_smd_edge *edge = container_of(work, work 44 drivers/rtc/dev.c static void rtc_uie_task(struct work_struct *work) work 47 drivers/rtc/dev.c container_of(work, struct rtc_device, uie_task); work 887 drivers/rtc/interface.c void rtc_timer_do_work(struct work_struct *work) work 895 drivers/rtc/interface.c container_of(work, struct rtc_device, irqwork); work 245 drivers/rtc/rtc-88pm860x.c static void calibrate_vrtc_work(struct work_struct *work) work 247 drivers/rtc/rtc-88pm860x.c struct pm860x_rtc_info *info = container_of(work, work 248 drivers/rtc/rtc-88pm860x.c struct pm860x_rtc_info, calib_work.work); work 88 drivers/rtc/rtc-ds1305.c struct work_struct work; work 439 drivers/rtc/rtc-ds1305.c static void ds1305_work(struct work_struct *work) work 441 drivers/rtc/rtc-ds1305.c struct ds1305 *ds1305 = container_of(work, struct ds1305, work); work 484 drivers/rtc/rtc-ds1305.c schedule_work(&ds1305->work); work 713 drivers/rtc/rtc-ds1305.c INIT_WORK(&ds1305->work, ds1305_work); work 735 drivers/rtc/rtc-ds1305.c cancel_work_sync(&ds1305->work); work 73 drivers/rtc/rtc-ds1374.c struct work_struct work; work 291 drivers/rtc/rtc-ds1374.c schedule_work(&ds1374->work); work 295 drivers/rtc/rtc-ds1374.c static void ds1374_work(struct work_struct *work) work 297 drivers/rtc/rtc-ds1374.c struct ds1374 *ds1374 = container_of(work, struct ds1374, work); work 626 drivers/rtc/rtc-ds1374.c INIT_WORK(&ds1374->work, ds1374_work); work 682 drivers/rtc/rtc-ds1374.c cancel_work_sync(&ds1374->work); work 117 drivers/rtc/rtc-imxdi.c struct work_struct work; work 715 drivers/rtc/rtc-imxdi.c schedule_work(&imxdi->work); work 726 drivers/rtc/rtc-imxdi.c static void dryice_work(struct work_struct *work) work 728 drivers/rtc/rtc-imxdi.c struct imxdi_dev *imxdi = container_of(work, work 729 drivers/rtc/rtc-imxdi.c struct imxdi_dev, work); work 772 drivers/rtc/rtc-imxdi.c INIT_WORK(&imxdi->work, dryice_work); work 833 drivers/rtc/rtc-imxdi.c flush_work(&imxdi->work); work 583 drivers/s390/block/dasd.c static void do_kick_device(struct work_struct *work) work 585 drivers/s390/block/dasd.c struct dasd_device *device = container_of(work, struct dasd_device, kick_work); work 606 drivers/s390/block/dasd.c static void do_reload_device(struct work_struct *work) work 608 drivers/s390/block/dasd.c struct dasd_device *device = container_of(work, struct dasd_device, work 627 drivers/s390/block/dasd.c static void do_restore_device(struct work_struct *work) work 629 drivers/s390/block/dasd.c struct dasd_device *device = container_of(work, struct dasd_device, work 4035 drivers/s390/block/dasd.c static void do_requeue_requests(struct work_struct *work) work 4037 drivers/s390/block/dasd.c struct dasd_device *device = container_of(work, struct dasd_device, work 528 drivers/s390/block/dasd_alias.c static void lcu_update_work(struct work_struct *work) work 536 drivers/s390/block/dasd_alias.c ruac_data = container_of(work, struct read_uac_work_data, dwork.work); work 881 drivers/s390/block/dasd_alias.c static void summary_unit_check_handling_work(struct work_struct *work) work 888 drivers/s390/block/dasd_alias.c suc_data = container_of(work, struct summary_unit_check_work_data, work 913 drivers/s390/block/dasd_alias.c void dasd_alias_handle_summary_unit_check(struct work_struct *work) work 915 drivers/s390/block/dasd_alias.c struct dasd_device *device = container_of(work, struct dasd_device, work 1260 drivers/s390/block/dasd_eckd.c static void do_path_verification_work(struct work_struct *work) work 1272 drivers/s390/block/dasd_eckd.c data = container_of(work, struct path_verification_work_data, worker); work 1277 drivers/s390/block/dasd_eckd.c schedule_work(work); work 1282 drivers/s390/block/dasd_eckd.c schedule_work(work); work 1658 drivers/s390/block/dasd_eckd.c static void dasd_eckd_ext_pool_exhaust_work(struct work_struct *work) work 1664 drivers/s390/block/dasd_eckd.c data = container_of(work, struct ext_pool_exhaust_work_data, worker); work 1935 drivers/s390/block/dasd_eckd.c static void dasd_eckd_do_validate_server(struct work_struct *work) work 1937 drivers/s390/block/dasd_eckd.c struct dasd_device *device = container_of(work, struct dasd_device, work 6522 drivers/s390/block/dasd_eckd.c static void dasd_eckd_check_attention_work(struct work_struct *work) work 6529 drivers/s390/block/dasd_eckd.c data = container_of(work, struct check_attention_work_data, worker); work 21 drivers/s390/char/ctrlchar.c ctrlchar_handle_sysrq(struct work_struct *work) work 23 drivers/s390/char/ctrlchar.c struct sysrq_work *sysrq = container_of(work, struct sysrq_work, work); work 30 drivers/s390/char/ctrlchar.c INIT_WORK(&sw->work, ctrlchar_handle_sysrq); work 31 drivers/s390/char/ctrlchar.c schedule_work(&sw->work); work 28 drivers/s390/char/ctrlchar.h struct work_struct work; work 46 drivers/s390/char/sclp_config.c static void sclp_cpu_capability_notify(struct work_struct *work) work 61 drivers/s390/char/sclp_config.c static void __ref sclp_cpu_change_notify(struct work_struct *work) work 37 drivers/s390/char/sclp_ocf.c static void sclp_ocf_change_notify(struct work_struct *work) work 123 drivers/s390/char/tape_34xx.c struct work_struct work; work 137 drivers/s390/char/tape_34xx.c tape_34xx_work_handler(struct work_struct *work) work 140 drivers/s390/char/tape_34xx.c container_of(work, struct tape_34xx_work, work); work 162 drivers/s390/char/tape_34xx.c INIT_WORK(&p->work, tape_34xx_work_handler); work 167 drivers/s390/char/tape_34xx.c schedule_work(&p->work); work 621 drivers/s390/char/tape_3590.c struct work_struct work; work 625 drivers/s390/char/tape_3590.c tape_3590_work_handler(struct work_struct *work) work 628 drivers/s390/char/tape_3590.c container_of(work, struct work_handler_data, work); work 659 drivers/s390/char/tape_3590.c INIT_WORK(&p->work, tape_3590_work_handler); work 664 drivers/s390/char/tape_3590.c queue_work(tape_3590_wq, &p->work); work 215 drivers/s390/char/tape_core.c struct work_struct work; work 219 drivers/s390/char/tape_core.c tape_med_state_work_handler(struct work_struct *work) work 224 drivers/s390/char/tape_core.c container_of(work, struct tape_med_state_work_data, work); work 255 drivers/s390/char/tape_core.c INIT_WORK(&p->work, tape_med_state_work_handler); work 258 drivers/s390/char/tape_core.c schedule_work(&p->work); work 860 drivers/s390/char/tape_core.c tape_delayed_next_request(struct work_struct *work) work 863 drivers/s390/char/tape_core.c container_of(work, struct tape_device, tape_dnr.work); work 123 drivers/s390/char/tty3270.c static void tty3270_resize_work(struct work_struct *work); work 846 drivers/s390/char/tty3270.c static void tty3270_resize_work(struct work_struct *work) work 848 drivers/s390/char/tty3270.c struct tty3270 *tp = container_of(work, struct tty3270, resize_work); work 226 drivers/s390/cio/ccwgroup.c static void ccwgroup_ungroup_workfn(struct work_struct *work) work 229 drivers/s390/cio/ccwgroup.c container_of(work, struct ccwgroup_device, ungroup_work); work 712 drivers/s390/cio/chp.c static void cfg_func(struct work_struct *work) work 150 drivers/s390/cio/css.c static void css_sch_todo(struct work_struct *work); work 615 drivers/s390/cio/css.c static void css_sch_todo(struct work_struct *work) work 621 drivers/s390/cio/css.c sch = container_of(work, struct subchannel, todo_work); work 733 drivers/s390/cio/device.c static void ccw_device_todo(struct work_struct *work); work 2068 drivers/s390/cio/device.c static void ccw_device_todo(struct work_struct *work) work 2075 drivers/s390/cio/device.c priv = container_of(work, struct ccw_device_private, todo_work); work 81 drivers/s390/cio/vfio_ccw_drv.c static void vfio_ccw_sch_io_todo(struct work_struct *work) work 87 drivers/s390/cio/vfio_ccw_drv.c private = container_of(work, struct vfio_ccw_private, io_work); work 458 drivers/s390/crypto/zcrypt_msgtype50.c struct completion work; work 473 drivers/s390/crypto/zcrypt_msgtype50.c ap_msg.private = &work; work 477 drivers/s390/crypto/zcrypt_msgtype50.c init_completion(&work); work 479 drivers/s390/crypto/zcrypt_msgtype50.c rc = wait_for_completion_interruptible(&work); work 504 drivers/s390/crypto/zcrypt_msgtype50.c struct completion work; work 519 drivers/s390/crypto/zcrypt_msgtype50.c ap_msg.private = &work; work 523 drivers/s390/crypto/zcrypt_msgtype50.c init_completion(&work); work 525 drivers/s390/crypto/zcrypt_msgtype50.c rc = wait_for_completion_interruptible(&work); work 35 drivers/s390/crypto/zcrypt_msgtype6.c struct completion work; work 958 drivers/s390/crypto/zcrypt_msgtype6.c complete(&(resp_type->work)); work 1001 drivers/s390/crypto/zcrypt_msgtype6.c complete(&(resp_type->work)); work 1033 drivers/s390/crypto/zcrypt_msgtype6.c init_completion(&resp_type.work); work 1035 drivers/s390/crypto/zcrypt_msgtype6.c rc = wait_for_completion_interruptible(&resp_type.work); work 1077 drivers/s390/crypto/zcrypt_msgtype6.c init_completion(&resp_type.work); work 1079 drivers/s390/crypto/zcrypt_msgtype6.c rc = wait_for_completion_interruptible(&resp_type.work); work 1136 drivers/s390/crypto/zcrypt_msgtype6.c init_completion(&rtype->work); work 1138 drivers/s390/crypto/zcrypt_msgtype6.c rc = wait_for_completion_interruptible(&rtype->work); work 1238 drivers/s390/crypto/zcrypt_msgtype6.c init_completion(&rtype->work); work 1240 drivers/s390/crypto/zcrypt_msgtype6.c rc = wait_for_completion_interruptible(&rtype->work); work 1299 drivers/s390/crypto/zcrypt_msgtype6.c init_completion(&rtype->work); work 1301 drivers/s390/crypto/zcrypt_msgtype6.c rc = wait_for_completion_interruptible(&rtype->work); work 1712 drivers/s390/net/lcs.c lcs_start_kernel_thread(struct work_struct *work) work 1714 drivers/s390/net/lcs.c struct lcs_card *card = container_of(work, struct lcs_card, kernel_thread_starter); work 78 drivers/s390/net/qeth_core_main.c static void qeth_close_dev_handler(struct work_struct *work) work 82 drivers/s390/net/qeth_core_main.c card = container_of(work, struct qeth_card, close_dev_work); work 1320 drivers/s390/net/qeth_core_main.c static void qeth_start_kernel_thread(struct work_struct *work) work 1323 drivers/s390/net/qeth_core_main.c struct qeth_card *card = container_of(work, struct qeth_card, work 3188 drivers/s390/net/qeth_core_main.c static void qeth_buffer_reclaim_work(struct work_struct *work) work 3190 drivers/s390/net/qeth_core_main.c struct qeth_card *card = container_of(work, struct qeth_card, work 3191 drivers/s390/net/qeth_core_main.c buffer_reclaim_work.work); work 507 drivers/s390/net/qeth_l2_main.c static void qeth_l2_rx_mode_work(struct work_struct *work) work 509 drivers/s390/net/qeth_l2_main.c struct qeth_card *card = container_of(work, struct qeth_card, work 1155 drivers/s390/net/qeth_l2_main.c static void qeth_bridge_state_change_worker(struct work_struct *work) work 1158 drivers/s390/net/qeth_l2_main.c container_of(work, struct qeth_bridge_state_data, worker); work 1226 drivers/s390/net/qeth_l2_main.c static void qeth_bridge_host_event_worker(struct work_struct *work) work 1229 drivers/s390/net/qeth_l2_main.c container_of(work, struct qeth_bridge_host_data, worker); work 1461 drivers/s390/net/qeth_l3_main.c static void qeth_l3_rx_mode_work(struct work_struct *work) work 1463 drivers/s390/net/qeth_l3_main.c struct qeth_card *card = container_of(work, struct qeth_card, work 2511 drivers/s390/net/qeth_l3_main.c struct work_struct work; work 2516 drivers/s390/net/qeth_l3_main.c #define to_ip_work(w) container_of((w), struct qeth_l3_ip_event_work, work) work 2518 drivers/s390/net/qeth_l3_main.c static void qeth_l3_add_ip_worker(struct work_struct *work) work 2520 drivers/s390/net/qeth_l3_main.c struct qeth_l3_ip_event_work *ip_work = to_ip_work(work); work 2523 drivers/s390/net/qeth_l3_main.c kfree(work); work 2526 drivers/s390/net/qeth_l3_main.c static void qeth_l3_delete_ip_worker(struct work_struct *work) work 2528 drivers/s390/net/qeth_l3_main.c struct qeth_l3_ip_event_work *ip_work = to_ip_work(work); work 2531 drivers/s390/net/qeth_l3_main.c kfree(work); work 2596 drivers/s390/net/qeth_l3_main.c INIT_WORK(&ip_work->work, qeth_l3_add_ip_worker); work 2598 drivers/s390/net/qeth_l3_main.c INIT_WORK(&ip_work->work, qeth_l3_delete_ip_worker); work 2606 drivers/s390/net/qeth_l3_main.c queue_work(card->cmd_wq, &ip_work->work); work 97 drivers/s390/net/smsgiucv_app.c static void smsg_event_work_fn(struct work_struct *work) work 287 drivers/s390/scsi/zfcp_aux.c static void _zfcp_status_read_scheduler(struct work_struct *work) work 289 drivers/s390/scsi/zfcp_aux.c zfcp_status_read_refill(container_of(work, struct zfcp_adapter, work 382 drivers/s390/scsi/zfcp_aux.c INIT_WORK(&adapter->events.work, zfcp_fc_post_event); work 99 drivers/s390/scsi/zfcp_fc.c void zfcp_fc_post_event(struct work_struct *work) work 103 drivers/s390/scsi/zfcp_fc.c struct zfcp_fc_events *events = container_of(work, work 104 drivers/s390/scsi/zfcp_fc.c struct zfcp_fc_events, work); work 143 drivers/s390/scsi/zfcp_fc.c queue_work(adapter->work_queue, &adapter->events.work); work 171 drivers/s390/scsi/zfcp_fc.c static void zfcp_fc_wka_port_offline(struct work_struct *work) work 173 drivers/s390/scsi/zfcp_fc.c struct delayed_work *dw = to_delayed_work(work); work 175 drivers/s390/scsi/zfcp_fc.c container_of(dw, struct zfcp_fc_wka_port, work); work 196 drivers/s390/scsi/zfcp_fc.c schedule_delayed_work(&wka_port->work, HZ / 100); work 210 drivers/s390/scsi/zfcp_fc.c INIT_DELAYED_WORK(&wka_port->work, zfcp_fc_wka_port_offline); work 215 drivers/s390/scsi/zfcp_fc.c cancel_delayed_work_sync(&wka->work); work 434 drivers/s390/scsi/zfcp_fc.c void zfcp_fc_port_did_lookup(struct work_struct *work) work 437 drivers/s390/scsi/zfcp_fc.c struct zfcp_port *port = container_of(work, struct zfcp_port, work 569 drivers/s390/scsi/zfcp_fc.c void zfcp_fc_link_test_work(struct work_struct *work) work 572 drivers/s390/scsi/zfcp_fc.c container_of(work, struct zfcp_port, test_link_work); work 784 drivers/s390/scsi/zfcp_fc.c void zfcp_fc_scan_ports(struct work_struct *work) work 786 drivers/s390/scsi/zfcp_fc.c struct delayed_work *dw = to_delayed_work(work); work 914 drivers/s390/scsi/zfcp_fc.c void zfcp_fc_sym_name_update(struct work_struct *work) work 916 drivers/s390/scsi/zfcp_fc.c struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter, work 54 drivers/s390/scsi/zfcp_fc.h struct work_struct work; work 204 drivers/s390/scsi/zfcp_fc.h struct delayed_work work; work 777 drivers/s390/scsi/zfcp_scsi.c void zfcp_scsi_rport_work(struct work_struct *work) work 779 drivers/s390/scsi/zfcp_scsi.c struct zfcp_port *port = container_of(work, struct zfcp_port, work 34 drivers/s390/scsi/zfcp_unit.c static void zfcp_unit_scsi_scan_work(struct work_struct *work) work 36 drivers/s390/scsi/zfcp_unit.c struct zfcp_unit *unit = container_of(work, struct zfcp_unit, work 678 drivers/scsi/NCR5380.c static void NCR5380_main(struct work_struct *work) work 681 drivers/scsi/NCR5380.c container_of(work, struct NCR5380_hostdata, main_task); work 275 drivers/scsi/NCR5380.h static void NCR5380_main(struct work_struct *work); work 2650 drivers/scsi/aacraid/aacraid.h static inline void aac_safw_rescan_worker(struct work_struct *work) work 2652 drivers/scsi/aacraid/aacraid.h struct aac_dev *dev = container_of(to_delayed_work(work), work 2673 drivers/scsi/aacraid/aacraid.h void aac_safw_rescan_worker(struct work_struct *work); work 1295 drivers/scsi/aha152x.c static void run(struct work_struct *work) work 129 drivers/scsi/arcmsr/arcmsr_hba.c static void arcmsr_message_isr_bh_fn(struct work_struct *work); work 783 drivers/scsi/arcmsr/arcmsr_hba.c static void arcmsr_message_isr_bh_fn(struct work_struct *work) work 785 drivers/scsi/arcmsr/arcmsr_hba.c struct AdapterControlBlock *acb = container_of(work, work 1828 drivers/scsi/be2iscsi/be_main.c static void beiscsi_mcc_work(struct work_struct *work) work 1833 drivers/scsi/be2iscsi/be_main.c pbe_eq = container_of(work, struct be_eq_obj, mcc_work); work 5147 drivers/scsi/be2iscsi/be_main.c static void beiscsi_boot_work(struct work_struct *work) work 5150 drivers/scsi/be2iscsi/be_main.c container_of(work, struct beiscsi_hba, boot_work); work 5193 drivers/scsi/be2iscsi/be_main.c static void beiscsi_eqd_update_work(struct work_struct *work) work 5205 drivers/scsi/be2iscsi/be_main.c phba = container_of(work, struct beiscsi_hba, eqd_update.work); work 5425 drivers/scsi/be2iscsi/be_main.c static void beiscsi_sess_work(struct work_struct *work) work 5429 drivers/scsi/be2iscsi/be_main.c phba = container_of(work, struct beiscsi_hba, sess_work); work 5438 drivers/scsi/be2iscsi/be_main.c static void beiscsi_recover_port(struct work_struct *work) work 5442 drivers/scsi/be2iscsi/be_main.c phba = container_of(work, struct beiscsi_hba, recover_port.work); work 26 drivers/scsi/bfa/bfad_im.c static void bfad_im_itnim_work_handler(struct work_struct *work); work 608 drivers/scsi/bfa/bfad_im.c bfad_im_port_delete_handler(struct work_struct *work) work 611 drivers/scsi/bfa/bfad_im.c container_of(work, struct bfad_im_port_s, port_delete_work); work 671 drivers/scsi/bfa/bfad_im.c static void bfad_aen_im_notify_handler(struct work_struct *work) work 674 drivers/scsi/bfa/bfad_im.c container_of(work, struct bfad_im_s, aen_im_notify_work); work 1104 drivers/scsi/bfa/bfad_im.c bfad_im_itnim_work_handler(struct work_struct *work) work 1106 drivers/scsi/bfa/bfad_im.c struct bfad_itnim_s *itnim = container_of(work, struct bfad_itnim_s, work 83 drivers/scsi/bnx2fc/bnx2fc_fcoe.c static void bnx2fc_destroy_work(struct work_struct *work); work 649 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct bnx2fc_work *work, *tmp; work 661 drivers/scsi/bnx2fc/bnx2fc_fcoe.c list_for_each_entry_safe(work, tmp, &work_list, list) { work 662 drivers/scsi/bnx2fc/bnx2fc_fcoe.c list_del_init(&work->list); work 663 drivers/scsi/bnx2fc/bnx2fc_fcoe.c bnx2fc_process_cq_compl(work->tgt, work->wqe); work 664 drivers/scsi/bnx2fc/bnx2fc_fcoe.c kfree(work); work 1694 drivers/scsi/bnx2fc/bnx2fc_fcoe.c static void bnx2fc_destroy_work(struct work_struct *work) work 1699 drivers/scsi/bnx2fc/bnx2fc_fcoe.c port = container_of(work, struct fcoe_port, destroy_work); work 2645 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct bnx2fc_work *work, *tmp; work 2656 drivers/scsi/bnx2fc/bnx2fc_fcoe.c list_for_each_entry_safe(work, tmp, &p->work_list, list) { work 2657 drivers/scsi/bnx2fc/bnx2fc_fcoe.c list_del_init(&work->list); work 2658 drivers/scsi/bnx2fc/bnx2fc_fcoe.c bnx2fc_process_cq_compl(work->tgt, work->wqe); work 2659 drivers/scsi/bnx2fc/bnx2fc_fcoe.c kfree(work); work 532 drivers/scsi/bnx2fc/bnx2fc_hwi.c static void bnx2fc_unsol_els_work(struct work_struct *work) work 539 drivers/scsi/bnx2fc/bnx2fc_hwi.c unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work); work 992 drivers/scsi/bnx2fc/bnx2fc_hwi.c struct bnx2fc_work *work; work 993 drivers/scsi/bnx2fc/bnx2fc_hwi.c work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC); work 994 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (!work) work 997 drivers/scsi/bnx2fc/bnx2fc_hwi.c INIT_LIST_HEAD(&work->list); work 998 drivers/scsi/bnx2fc/bnx2fc_hwi.c work->tgt = tgt; work 999 drivers/scsi/bnx2fc/bnx2fc_hwi.c work->wqe = wqe; work 1000 drivers/scsi/bnx2fc/bnx2fc_hwi.c return work; work 1008 drivers/scsi/bnx2fc/bnx2fc_hwi.c struct bnx2fc_work *work; work 1013 drivers/scsi/bnx2fc/bnx2fc_hwi.c work = bnx2fc_alloc_work(tgt, wqe); work 1014 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (work) { work 1015 drivers/scsi/bnx2fc/bnx2fc_hwi.c list_add_tail(&work->list, &fps->work_list); work 40 drivers/scsi/bnx2fc/bnx2fc_io.c static void bnx2fc_cmd_timeout(struct work_struct *work) work 42 drivers/scsi/bnx2fc/bnx2fc_io.c struct bnx2fc_cmd *io_req = container_of(work, struct bnx2fc_cmd, work 43 drivers/scsi/bnx2fc/bnx2fc_io.c timeout_work.work); work 1854 drivers/scsi/bnx2i/bnx2i_hwi.c struct bnx2i_work *work, *tmp; work 1865 drivers/scsi/bnx2i/bnx2i_hwi.c list_for_each_entry_safe(work, tmp, &work_list, list) { work 1866 drivers/scsi/bnx2i/bnx2i_hwi.c list_del_init(&work->list); work 1868 drivers/scsi/bnx2i/bnx2i_hwi.c bnx2i_process_scsi_cmd_resp(work->session, work 1869 drivers/scsi/bnx2i/bnx2i_hwi.c work->bnx2i_conn, work 1870 drivers/scsi/bnx2i/bnx2i_hwi.c &work->cqe); work 1871 drivers/scsi/bnx2i/bnx2i_hwi.c atomic_dec(&work->bnx2i_conn->work_cnt); work 1872 drivers/scsi/bnx2i/bnx2i_hwi.c kfree(work); work 435 drivers/scsi/bnx2i/bnx2i_init.c struct bnx2i_work *work, *tmp; work 444 drivers/scsi/bnx2i/bnx2i_init.c list_for_each_entry_safe(work, tmp, &p->work_list, list) { work 445 drivers/scsi/bnx2i/bnx2i_init.c list_del_init(&work->list); work 446 drivers/scsi/bnx2i/bnx2i_init.c bnx2i_process_scsi_cmd_resp(work->session, work 447 drivers/scsi/bnx2i/bnx2i_init.c work->bnx2i_conn, &work->cqe); work 448 drivers/scsi/bnx2i/bnx2i_init.c kfree(work); work 1481 drivers/scsi/bnx2i/bnx2i_iscsi.c struct bnx2i_work *work, *tmp; work 1494 drivers/scsi/bnx2i/bnx2i_iscsi.c list_for_each_entry_safe(work, tmp, work 1496 drivers/scsi/bnx2i/bnx2i_iscsi.c if (work->session == conn->session && work 1497 drivers/scsi/bnx2i/bnx2i_iscsi.c work->bnx2i_conn == bnx2i_conn) { work 1498 drivers/scsi/bnx2i/bnx2i_iscsi.c list_del_init(&work->list); work 1499 drivers/scsi/bnx2i/bnx2i_iscsi.c kfree(work); work 3975 drivers/scsi/csiostor/csio_hw.c csio_evtq_worker(struct work_struct *work) work 3977 drivers/scsi/csiostor/csio_hw.c struct csio_hw *hw = container_of(work, struct csio_hw, evtq_work); work 133 drivers/scsi/cxlflash/cxl_hw.c struct cxl_ioctl_start_work work = { 0 }; work 135 drivers/scsi/cxlflash/cxl_hw.c work.num_interrupts = irqs; work 136 drivers/scsi/cxlflash/cxl_hw.c work.flags = CXL_START_WORK_NUM_IRQS; work 138 drivers/scsi/cxlflash/cxl_hw.c return cxl_start_work(ctx_cookie, &work); work 3220 drivers/scsi/cxlflash/main.c static void cxlflash_worker_thread(struct work_struct *work) work 3222 drivers/scsi/cxlflash/main.c struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg, work 102 drivers/scsi/device_handler/scsi_dh_alua.c static void alua_rtpg_work(struct work_struct *work); work 792 drivers/scsi/device_handler/scsi_dh_alua.c static void alua_rtpg_work(struct work_struct *work) work 795 drivers/scsi/device_handler/scsi_dh_alua.c container_of(work, struct alua_port_group, rtpg_work.work); work 237 drivers/scsi/device_handler/scsi_dh_rdac.c static void send_mode_select(struct work_struct *work); work 527 drivers/scsi/device_handler/scsi_dh_rdac.c static void send_mode_select(struct work_struct *work) work 530 drivers/scsi/device_handler/scsi_dh_rdac.c container_of(work, struct rdac_controller, ms_work); work 775 drivers/scsi/esas2r/esas2r.h struct delayed_work work; work 1782 drivers/scsi/esas2r/esas2r_main.c esas2r_firmware_event_work(struct work_struct *work) work 1785 drivers/scsi/esas2r/esas2r_main.c container_of(work, struct esas2r_fw_event_work, work.work); work 1852 drivers/scsi/esas2r/esas2r_main.c INIT_DELAYED_WORK(&fw_event->work, esas2r_firmware_event_work); work 1854 drivers/scsi/esas2r/esas2r_main.c smp_processor_id(), a->fw_event_q, &fw_event->work, work 1300 drivers/scsi/fcoe/fcoe.c flush_work(&p->work); work 1433 drivers/scsi/fcoe/fcoe.c schedule_work_on(cpu, &fps->work); work 1736 drivers/scsi/fcoe/fcoe.c static void fcoe_receive_work(struct work_struct *work) work 1742 drivers/scsi/fcoe/fcoe.c p = container_of(work, struct fcoe_percpu_s, work); work 2094 drivers/scsi/fcoe/fcoe.c static void fcoe_destroy_work(struct work_struct *work) work 2101 drivers/scsi/fcoe/fcoe.c port = container_of(work, struct fcoe_port, destroy_work); work 2346 drivers/scsi/fcoe/fcoe.c flush_work(&pp->work); work 2493 drivers/scsi/fcoe/fcoe.c INIT_WORK(&p->work, fcoe_receive_work); work 1794 drivers/scsi/fcoe/fcoe_ctlr.c static void fcoe_ctlr_timer_work(struct work_struct *work) work 1806 drivers/scsi/fcoe/fcoe_ctlr.c fip = container_of(work, struct fcoe_ctlr, timer_work); work 715 drivers/scsi/fcoe/fcoe_sysfs.c struct work_struct *work) work 726 drivers/scsi/fcoe/fcoe_sysfs.c return queue_work(fcoe_ctlr_work_q(ctlr), work); work 756 drivers/scsi/fcoe/fcoe_sysfs.c struct delayed_work *work, work 768 drivers/scsi/fcoe/fcoe_sysfs.c return queue_delayed_work(fcoe_ctlr_devloss_work_q(ctlr), work, delay); work 902 drivers/scsi/fcoe/fcoe_sysfs.c static void fcoe_fcf_device_final_delete(struct work_struct *work) work 905 drivers/scsi/fcoe/fcoe_sysfs.c container_of(work, struct fcoe_fcf_device, delete_work); work 926 drivers/scsi/fcoe/fcoe_sysfs.c static void fip_timeout_deleted_fcf(struct work_struct *work) work 929 drivers/scsi/fcoe/fcoe_sysfs.c container_of(work, struct fcoe_fcf_device, dev_loss_work.work); work 115 drivers/scsi/fdomain.c struct work_struct work; work 261 drivers/scsi/fdomain.c static void fdomain_work(struct work_struct *work) work 263 drivers/scsi/fdomain.c struct fdomain *fd = container_of(work, struct fdomain, work); work 392 drivers/scsi/fdomain.c schedule_work(&fd->work); work 543 drivers/scsi/fdomain.c INIT_WORK(&fd->work, fdomain_work); work 574 drivers/scsi/fdomain.c cancel_work_sync(&fd->work); work 333 drivers/scsi/fnic/fnic.h void fnic_handle_frame(struct work_struct *work); work 334 drivers/scsi/fnic/fnic.h void fnic_handle_link(struct work_struct *work); work 335 drivers/scsi/fnic/fnic.h void fnic_handle_event(struct work_struct *work); work 368 drivers/scsi/fnic/fnic.h void fnic_handle_fip_frame(struct work_struct *work); work 49 drivers/scsi/fnic/fnic_fcs.c void fnic_handle_link(struct work_struct *work) work 51 drivers/scsi/fnic/fnic_fcs.c struct fnic *fnic = container_of(work, struct fnic, link_work); work 191 drivers/scsi/fnic/fnic_fcs.c void fnic_handle_frame(struct work_struct *work) work 193 drivers/scsi/fnic/fnic_fcs.c struct fnic *fnic = container_of(work, struct fnic, frame_work); work 244 drivers/scsi/fnic/fnic_fcs.c void fnic_handle_event(struct work_struct *work) work 246 drivers/scsi/fnic/fnic_fcs.c struct fnic *fnic = container_of(work, struct fnic, event_work); work 609 drivers/scsi/fnic/fnic_fcs.c void fnic_handle_fip_frame(struct work_struct *work) work 611 drivers/scsi/fnic/fnic_fcs.c struct fnic *fnic = container_of(work, struct fnic, fip_frame_work); work 123 drivers/scsi/hisi_sas/hisi_sas.h struct work_struct work; work 130 drivers/scsi/hisi_sas/hisi_sas.h .work = __WORK_INITIALIZER(r.work, \ work 585 drivers/scsi/hisi_sas/hisi_sas.h extern void hisi_sas_rst_work_handler(struct work_struct *work); work 586 drivers/scsi/hisi_sas/hisi_sas.h extern void hisi_sas_sync_rst_work_handler(struct work_struct *work); work 597 drivers/scsi/hisi_sas/hisi_sas.h extern void hisi_sas_debugfs_work_handler(struct work_struct *work); work 852 drivers/scsi/hisi_sas/hisi_sas_main.c static void hisi_sas_phyup_work(struct work_struct *work) work 855 drivers/scsi/hisi_sas/hisi_sas_main.c container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]); work 865 drivers/scsi/hisi_sas/hisi_sas_main.c static void hisi_sas_linkreset_work(struct work_struct *work) work 868 drivers/scsi/hisi_sas/hisi_sas_main.c container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]); work 1874 drivers/scsi/hisi_sas/hisi_sas_main.c queue_work(hisi_hba->wq, &r.work); work 2465 drivers/scsi/hisi_sas/hisi_sas_main.c void hisi_sas_rst_work_handler(struct work_struct *work) work 2468 drivers/scsi/hisi_sas/hisi_sas_main.c container_of(work, struct hisi_hba, rst_work); work 2474 drivers/scsi/hisi_sas/hisi_sas_main.c void hisi_sas_sync_rst_work_handler(struct work_struct *work) work 2477 drivers/scsi/hisi_sas/hisi_sas_main.c container_of(work, struct hisi_sas_rst, work); work 3681 drivers/scsi/hisi_sas/hisi_sas_main.c void hisi_sas_debugfs_work_handler(struct work_struct *work) work 3684 drivers/scsi/hisi_sas/hisi_sas_main.c container_of(work, struct hisi_hba, debugfs_work); work 604 drivers/scsi/hosts.c int scsi_queue_work(struct Scsi_Host *shost, struct work_struct *work) work 615 drivers/scsi/hosts.c return queue_work(shost->work_q, work); work 318 drivers/scsi/hpsa.c static void hpsa_command_resubmit_worker(struct work_struct *work); work 2475 drivers/scsi/hpsa.c INIT_WORK(&c->work, hpsa_command_resubmit_worker); work 2476 drivers/scsi/hpsa.c queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work); work 5569 drivers/scsi/hpsa.c static void hpsa_command_resubmit_worker(struct work_struct *work) work 5573 drivers/scsi/hpsa.c struct CommandList *c = container_of(work, struct CommandList, work); work 8497 drivers/scsi/hpsa.c static void hpsa_event_monitor_worker(struct work_struct *work) work 8499 drivers/scsi/hpsa.c struct ctlr_info *h = container_of(to_delayed_work(work), work 8522 drivers/scsi/hpsa.c static void hpsa_rescan_ctlr_worker(struct work_struct *work) work 8525 drivers/scsi/hpsa.c struct ctlr_info *h = container_of(to_delayed_work(work), work 8551 drivers/scsi/hpsa.c static void hpsa_monitor_ctlr_worker(struct work_struct *work) work 8554 drivers/scsi/hpsa.c struct ctlr_info *h = container_of(to_delayed_work(work), work 437 drivers/scsi/hpsa_cmd.h struct work_struct work; work 4689 drivers/scsi/ibmvscsi/ibmvfc.c static void ibmvfc_rport_add_thread(struct work_struct *work) work 4691 drivers/scsi/ibmvscsi/ibmvfc.c struct ibmvfc_host *vhost = container_of(work, struct ibmvfc_host, work 408 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c static void ibmvscsis_disconnect(struct work_struct *work) work 410 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c struct scsi_info *vscsi = container_of(work, struct scsi_info, work 2423 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c queue_work(vscsi->work_q, &cmd->work); work 2437 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c queue_work(vscsi->work_q, &cmd->work); work 2832 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c static void ibmvscsis_scheduler(struct work_struct *work) work 2834 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c struct ibmvscsis_cmd *cmd = container_of(work, struct ibmvscsis_cmd, work 2835 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c work); work 2893 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c INIT_WORK(&cmd->work, ibmvscsis_scheduler); work 160 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h struct work_struct work; work 719 drivers/scsi/imm.c static void imm_interrupt(struct work_struct *work) work 721 drivers/scsi/imm.c imm_struct *dev = container_of(work, imm_struct, imm_tq.work); work 3324 drivers/scsi/ipr.c static void ipr_add_remove_thread(struct work_struct *work) work 3330 drivers/scsi/ipr.c container_of(work, struct ipr_ioa_cfg, scsi_add_work_q); work 3394 drivers/scsi/ipr.c static void ipr_worker_thread(struct work_struct *work) work 3399 drivers/scsi/ipr.c container_of(work, struct ipr_ioa_cfg, work_q); work 8788 drivers/scsi/ipr.c static void ipr_reset_reset_work(struct work_struct *work) work 8790 drivers/scsi/ipr.c struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work); work 8821 drivers/scsi/ipr.c INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work); work 8822 drivers/scsi/ipr.c queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work); work 1617 drivers/scsi/ipr.h struct work_struct work; work 477 drivers/scsi/libfc/fc_disc.c static void fc_disc_timeout(struct work_struct *work) work 479 drivers/scsi/libfc/fc_disc.c struct fc_disc *disc = container_of(work, work 481 drivers/scsi/libfc/fc_disc.c disc_work.work); work 760 drivers/scsi/libfc/fc_exch.c static void fc_exch_timeout(struct work_struct *work) work 762 drivers/scsi/libfc/fc_exch.c struct fc_exch *ep = container_of(work, struct fc_exch, work 763 drivers/scsi/libfc/fc_exch.c timeout_work.work); work 1542 drivers/scsi/libfc/fc_lport.c static void fc_lport_timeout(struct work_struct *work) work 1545 drivers/scsi/libfc/fc_lport.c container_of(work, struct fc_lport, work 1546 drivers/scsi/libfc/fc_lport.c retry_work.work); work 256 drivers/scsi/libfc/fc_rport.c static void fc_rport_work(struct work_struct *work) work 260 drivers/scsi/libfc/fc_rport.c container_of(work, struct fc_rport_priv, event_work); work 568 drivers/scsi/libfc/fc_rport.c static void fc_rport_timeout(struct work_struct *work) work 571 drivers/scsi/libfc/fc_rport.c container_of(work, struct fc_rport_priv, retry_work.work); work 1585 drivers/scsi/libiscsi.c static void iscsi_xmitworker(struct work_struct *work) work 1588 drivers/scsi/libiscsi.c container_of(work, struct iscsi_conn, xmitwork); work 231 drivers/scsi/libsas/sas_discover.c static void sas_suspend_devices(struct work_struct *work) work 235 drivers/scsi/libsas/sas_discover.c struct sas_discovery_event *ev = to_sas_discovery_event(work); work 262 drivers/scsi/libsas/sas_discover.c static void sas_resume_devices(struct work_struct *work) work 264 drivers/scsi/libsas/sas_discover.c struct sas_discovery_event *ev = to_sas_discovery_event(work); work 434 drivers/scsi/libsas/sas_discover.c static void sas_discover_domain(struct work_struct *work) work 438 drivers/scsi/libsas/sas_discover.c struct sas_discovery_event *ev = to_sas_discovery_event(work); work 495 drivers/scsi/libsas/sas_discover.c static void sas_revalidate_domain(struct work_struct *work) work 498 drivers/scsi/libsas/sas_discover.c struct sas_discovery_event *ev = to_sas_discovery_event(work); work 538 drivers/scsi/libsas/sas_discover.c queue_work(ha->disco_q, &sw->work); work 564 drivers/scsi/libsas/sas_discover.c sas_chain_event(ev, &disc->pending, &disc->disc_work[ev].work, port->ha); work 589 drivers/scsi/libsas/sas_discover.c INIT_SAS_WORK(&disc->disc_work[i].work, sas_event_fns[i]); work 26 drivers/scsi/libsas/sas_event.c rc = queue_work(ha->event_q, &sw->work); work 31 drivers/scsi/libsas/sas_event.c static int sas_queue_event(int event, struct sas_work *work, work 38 drivers/scsi/libsas/sas_event.c rc = sas_queue_work(ha, work); work 64 drivers/scsi/libsas/sas_event.c sas_free_event(to_asd_sas_event(&sw->work)); work 118 drivers/scsi/libsas/sas_event.c static void sas_port_event_worker(struct work_struct *work) work 120 drivers/scsi/libsas/sas_event.c struct asd_sas_event *ev = to_asd_sas_event(work); work 122 drivers/scsi/libsas/sas_event.c sas_port_event_fns[ev->event](work); work 126 drivers/scsi/libsas/sas_event.c static void sas_phy_event_worker(struct work_struct *work) work 128 drivers/scsi/libsas/sas_event.c struct asd_sas_event *ev = to_asd_sas_event(work); work 130 drivers/scsi/libsas/sas_event.c sas_phy_event_fns[ev->event](work); work 148 drivers/scsi/libsas/sas_event.c ret = sas_queue_event(event, &ev->work, ha); work 169 drivers/scsi/libsas/sas_event.c ret = sas_queue_event(event, &ev->work, ha); work 450 drivers/scsi/libsas/sas_init.c static void phy_reset_work(struct work_struct *work) work 452 drivers/scsi/libsas/sas_init.c struct sas_phy_data *d = container_of(work, typeof(*d), reset_work.work); work 457 drivers/scsi/libsas/sas_init.c static void phy_enable_work(struct work_struct *work) work 459 drivers/scsi/libsas/sas_init.c struct sas_phy_data *d = container_of(work, typeof(*d), enable_work.work); work 64 drivers/scsi/libsas/sas_internal.h void sas_porte_bytes_dmaed(struct work_struct *work); work 65 drivers/scsi/libsas/sas_internal.h void sas_porte_broadcast_rcvd(struct work_struct *work); work 66 drivers/scsi/libsas/sas_internal.h void sas_porte_link_reset_err(struct work_struct *work); work 67 drivers/scsi/libsas/sas_internal.h void sas_porte_timer_event(struct work_struct *work); work 68 drivers/scsi/libsas/sas_internal.h void sas_porte_hard_reset(struct work_struct *work); work 88 drivers/scsi/libsas/sas_internal.h void sas_hae_reset(struct work_struct *work); work 17 drivers/scsi/libsas/sas_phy.c static void sas_phye_loss_of_signal(struct work_struct *work) work 19 drivers/scsi/libsas/sas_phy.c struct asd_sas_event *ev = to_asd_sas_event(work); work 26 drivers/scsi/libsas/sas_phy.c static void sas_phye_oob_done(struct work_struct *work) work 28 drivers/scsi/libsas/sas_phy.c struct asd_sas_event *ev = to_asd_sas_event(work); work 34 drivers/scsi/libsas/sas_phy.c static void sas_phye_oob_error(struct work_struct *work) work 36 drivers/scsi/libsas/sas_phy.c struct asd_sas_event *ev = to_asd_sas_event(work); work 63 drivers/scsi/libsas/sas_phy.c static void sas_phye_spinup_hold(struct work_struct *work) work 65 drivers/scsi/libsas/sas_phy.c struct asd_sas_event *ev = to_asd_sas_event(work); work 75 drivers/scsi/libsas/sas_phy.c static void sas_phye_resume_timeout(struct work_struct *work) work 77 drivers/scsi/libsas/sas_phy.c struct asd_sas_event *ev = to_asd_sas_event(work); work 92 drivers/scsi/libsas/sas_phy.c static void sas_phye_shutdown(struct work_struct *work) work 94 drivers/scsi/libsas/sas_phy.c struct asd_sas_event *ev = to_asd_sas_event(work); work 264 drivers/scsi/libsas/sas_port.c void sas_porte_bytes_dmaed(struct work_struct *work) work 266 drivers/scsi/libsas/sas_port.c struct asd_sas_event *ev = to_asd_sas_event(work); work 272 drivers/scsi/libsas/sas_port.c void sas_porte_broadcast_rcvd(struct work_struct *work) work 274 drivers/scsi/libsas/sas_port.c struct asd_sas_event *ev = to_asd_sas_event(work); work 290 drivers/scsi/libsas/sas_port.c void sas_porte_link_reset_err(struct work_struct *work) work 292 drivers/scsi/libsas/sas_port.c struct asd_sas_event *ev = to_asd_sas_event(work); work 298 drivers/scsi/libsas/sas_port.c void sas_porte_timer_event(struct work_struct *work) work 300 drivers/scsi/libsas/sas_port.c struct asd_sas_event *ev = to_asd_sas_event(work); work 306 drivers/scsi/libsas/sas_port.c void sas_porte_hard_reset(struct work_struct *work) work 308 drivers/scsi/libsas/sas_port.c struct asd_sas_event *ev = to_asd_sas_event(work); work 1237 drivers/scsi/lpfc/lpfc_init.c lpfc_hb_eq_delay_work(struct work_struct *work) work 1239 drivers/scsi/lpfc/lpfc_init.c struct lpfc_hba *phba = container_of(to_delayed_work(work), work 2146 drivers/scsi/lpfc/lpfc_nvmet.c lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *work) work 2150 drivers/scsi/lpfc/lpfc_nvmet.c container_of(work, struct lpfc_nvmet_ctxbuf, defer_work); work 13717 drivers/scsi/lpfc/lpfc_sli.c lpfc_sli4_sp_process_cq(struct work_struct *work) work 13719 drivers/scsi/lpfc/lpfc_sli.c struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork); work 13731 drivers/scsi/lpfc/lpfc_sli.c lpfc_sli4_dly_sp_process_cq(struct work_struct *work) work 13733 drivers/scsi/lpfc/lpfc_sli.c struct lpfc_queue *cq = container_of(to_delayed_work(work), work 14163 drivers/scsi/lpfc/lpfc_sli.c lpfc_sli4_hba_process_cq(struct work_struct *work) work 14165 drivers/scsi/lpfc/lpfc_sli.c struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork); work 14177 drivers/scsi/lpfc/lpfc_sli.c lpfc_sli4_dly_hba_process_cq(struct work_struct *work) work 14179 drivers/scsi/lpfc/lpfc_sli.c struct lpfc_queue *cq = container_of(to_delayed_work(work), work 228 drivers/scsi/megaraid/megaraid_sas_base.c void megasas_fusion_ocr_wq(struct work_struct *work); work 2272 drivers/scsi/megaraid/megaraid_sas_base.c process_fw_state_change_wq(struct work_struct *work); work 3109 drivers/scsi/megaraid/megaraid_sas_base.c static void megasas_aen_polling(struct work_struct *work); work 3813 drivers/scsi/megaraid/megaraid_sas_base.c process_fw_state_change_wq(struct work_struct *work) work 3816 drivers/scsi/megaraid/megaraid_sas_base.c container_of(work, struct megasas_instance, work_init); work 8682 drivers/scsi/megaraid/megaraid_sas_base.c megasas_aen_polling(struct work_struct *work) work 8685 drivers/scsi/megaraid/megaraid_sas_base.c container_of(work, struct megasas_aen_event, hotplug_work.work); work 1878 drivers/scsi/megaraid/megaraid_sas_fusion.c megasas_fault_detect_work(struct work_struct *work) work 1881 drivers/scsi/megaraid/megaraid_sas_fusion.c container_of(work, struct megasas_instance, work 1882 drivers/scsi/megaraid/megaraid_sas_fusion.c fw_fault_work.work); work 5146 drivers/scsi/megaraid/megaraid_sas_fusion.c void megasas_fusion_ocr_wq(struct work_struct *work) work 5149 drivers/scsi/megaraid/megaraid_sas_fusion.c container_of(work, struct megasas_instance, work_init); work 1378 drivers/scsi/megaraid/megaraid_sas_fusion.h void megasas_fusion_ocr_wq(struct work_struct *work); work 601 drivers/scsi/mpt3sas/mpt3sas_base.c _base_fault_reset_work(struct work_struct *work) work 604 drivers/scsi/mpt3sas/mpt3sas_base.c container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work); work 68 drivers/scsi/mpt3sas/mpt3sas_scsih.c static void _firmware_event_work(struct work_struct *work); work 201 drivers/scsi/mpt3sas/mpt3sas_scsih.c struct work_struct work; work 3139 drivers/scsi/mpt3sas/mpt3sas_scsih.c INIT_WORK(&fw_event->work, _firmware_event_work); work 3141 drivers/scsi/mpt3sas/mpt3sas_scsih.c queue_work(ioc->firmware_event_thread, &fw_event->work); work 3272 drivers/scsi/mpt3sas/mpt3sas_scsih.c if (cancel_work_sync(&fw_event->work)) work 9430 drivers/scsi/mpt3sas/mpt3sas_scsih.c _firmware_event_work(struct work_struct *work) work 9432 drivers/scsi/mpt3sas/mpt3sas_scsih.c struct fw_event_work *fw_event = container_of(work, work 9433 drivers/scsi/mpt3sas/mpt3sas_scsih.c struct fw_event_work, work); work 1876 drivers/scsi/mvsas/mv_sas.c static void mvs_work_queue(struct work_struct *work) work 1878 drivers/scsi/mvsas/mv_sas.c struct delayed_work *dw = container_of(work, struct delayed_work, work); work 1731 drivers/scsi/mvumi.c static void mvumi_scan_events(struct work_struct *work) work 1734 drivers/scsi/mvumi.c container_of(work, struct mvumi_events_wq, work_q); work 29 drivers/scsi/myrb.c static void myrb_monitor(struct work_struct *work); work 2413 drivers/scsi/myrb.c static void myrb_monitor(struct work_struct *work) work 2415 drivers/scsi/myrb.c struct myrb_hba *cb = container_of(work, work 2416 drivers/scsi/myrb.c struct myrb_hba, monitor_work.work); work 2107 drivers/scsi/myrs.c static void myrs_monitor(struct work_struct *work) work 2109 drivers/scsi/myrs.c struct myrs_hba *cs = container_of(work, struct myrs_hba, work 2110 drivers/scsi/myrs.c monitor_work.work); work 1501 drivers/scsi/pm8001/pm8001_hwi.c void pm8001_work_fn(struct work_struct *work) work 1503 drivers/scsi/pm8001/pm8001_hwi.c struct pm8001_work *pw = container_of(work, struct pm8001_work, work); work 1710 drivers/scsi/pm8001/pm8001_hwi.c INIT_WORK(&pw->work, pm8001_work_fn); work 1711 drivers/scsi/pm8001/pm8001_hwi.c queue_work(pm8001_wq, &pw->work); work 547 drivers/scsi/pm8001/pm8001_sas.h struct work_struct work; work 690 drivers/scsi/pm8001/pm8001_sas.h void pm8001_work_fn(struct work_struct *work); work 613 drivers/scsi/ppa.c static void ppa_interrupt(struct work_struct *work) work 615 drivers/scsi/ppa.c ppa_struct *dev = container_of(work, ppa_struct, ppa_tq.work); work 235 drivers/scsi/qedf/qedf.h struct work_struct work; work 252 drivers/scsi/qedf/qedf.h struct work_struct work; work 535 drivers/scsi/qedf/qedf.h extern void qedf_fp_io_handler(struct work_struct *work); work 537 drivers/scsi/qedf/qedf.h extern void qedf_wq_grcdump(struct work_struct *work); work 538 drivers/scsi/qedf/qedf.h void qedf_stag_change_work(struct work_struct *work); work 18 drivers/scsi/qedf/qedf_io.c static void qedf_cmd_timeout(struct work_struct *work) work 22 drivers/scsi/qedf/qedf_io.c container_of(work, struct qedf_ioreq, timeout_work.work); work 169 drivers/scsi/qedf/qedf_io.c static void qedf_handle_rrq(struct work_struct *work) work 172 drivers/scsi/qedf/qedf_io.c container_of(work, struct qedf_ioreq, rrq_work.work); work 2584 drivers/scsi/qedf/qedf_io.c INIT_WORK(&io_work->work, qedf_fp_io_handler); work 2592 drivers/scsi/qedf/qedf_io.c queue_work_on(smp_processor_id(), qedf_io_wq, &io_work->work); work 158 drivers/scsi/qedf/qedf_main.c static void qedf_handle_link_update(struct work_struct *work) work 161 drivers/scsi/qedf/qedf_main.c container_of(work, struct qedf_ctx, link_update.work); work 350 drivers/scsi/qedf/qedf_main.c static void qedf_link_recovery(struct work_struct *work) work 353 drivers/scsi/qedf/qedf_main.c container_of(work, struct qedf_ctx, link_recovery.work); work 2166 drivers/scsi/qedf/qedf_main.c INIT_WORK(&io_work->work, qedf_fp_io_handler); work 2174 drivers/scsi/qedf/qedf_main.c queue_work_on(cpu, qedf_io_wq, &io_work->work); work 2479 drivers/scsi/qedf/qedf_main.c static void qedf_ll2_process_skb(struct work_struct *work) work 2482 drivers/scsi/qedf/qedf_main.c container_of(work, struct qedf_skb_work, work); work 2544 drivers/scsi/qedf/qedf_main.c INIT_WORK(&skb_work->work, qedf_ll2_process_skb); work 2547 drivers/scsi/qedf/qedf_main.c queue_work(qedf->ll2_recv_wq, &skb_work->work); work 2558 drivers/scsi/qedf/qedf_main.c void qedf_fp_io_handler(struct work_struct *work) work 2561 drivers/scsi/qedf/qedf_main.c container_of(work, struct qedf_io_work, work); work 3674 drivers/scsi/qedf/qedf_main.c void qedf_wq_grcdump(struct work_struct *work) work 3677 drivers/scsi/qedf/qedf_main.c container_of(work, struct qedf_ctx, grcdump_work.work); work 146 drivers/scsi/qedi/qedi_fw.c static void qedi_tmf_resp_work(struct work_struct *work) work 149 drivers/scsi/qedi/qedi_fw.c container_of(work, struct qedi_cmd, tmf_work); work 738 drivers/scsi/qedi/qedi_fw.c struct qedi_work_map *work, *work_tmp; work 761 drivers/scsi/qedi/qedi_fw.c list_for_each_entry_safe(work, work_tmp, &qedi_conn->tmf_work_list, work 763 drivers/scsi/qedi/qedi_fw.c if (work->rtid == proto_itt) { work 765 drivers/scsi/qedi/qedi_fw.c qedi_cmd = work->qedi_cmd; work 775 drivers/scsi/qedi/qedi_fw.c rtid = work->rtid; work 777 drivers/scsi/qedi/qedi_fw.c list_del_init(&work->list); work 778 drivers/scsi/qedi/qedi_fw.c kfree(work); work 861 drivers/scsi/qedi/qedi_fw.c void qedi_fp_process_cqes(struct qedi_work *work) work 863 drivers/scsi/qedi/qedi_fw.c struct qedi_ctx *qedi = work->qedi; work 864 drivers/scsi/qedi/qedi_fw.c union iscsi_cqe *cqe = &work->cqe; work 873 drivers/scsi/qedi/qedi_fw.c u16 que_idx = work->que_idx; work 911 drivers/scsi/qedi/qedi_fw.c qedi_cmd = container_of(work, struct qedi_cmd, cqe_work); work 1357 drivers/scsi/qedi/qedi_fw.c static void qedi_tmf_work(struct work_struct *work) work 1360 drivers/scsi/qedi/qedi_fw.c container_of(work, struct qedi_cmd, tmf_work); work 62 drivers/scsi/qedi/qedi_gbl.h void qedi_fp_process_cqes(struct qedi_work *work); work 1158 drivers/scsi/qedi/qedi_iscsi.c static void qedi_offload_work(struct work_struct *work) work 1161 drivers/scsi/qedi/qedi_iscsi.c container_of(work, struct qedi_endpoint, offload_work); work 662 drivers/scsi/qedi/qedi_main.c struct skb_work_list *work; work 703 drivers/scsi/qedi/qedi_main.c work = kzalloc(sizeof(*work), GFP_ATOMIC); work 704 drivers/scsi/qedi/qedi_main.c if (!work) { work 711 drivers/scsi/qedi/qedi_main.c INIT_LIST_HEAD(&work->list); work 712 drivers/scsi/qedi/qedi_main.c work->skb = skb; work 715 drivers/scsi/qedi/qedi_main.c work->vlan_id = skb_vlan_tag_get(skb); work 717 drivers/scsi/qedi/qedi_main.c if (work->vlan_id) work 718 drivers/scsi/qedi/qedi_main.c __vlan_insert_tag(work->skb, htons(ETH_P_8021Q), work->vlan_id); work 721 drivers/scsi/qedi/qedi_main.c list_add_tail(&work->list, &qedi->ll2_skb_list); work 787 drivers/scsi/qedi/qedi_main.c struct skb_work_list *work, *work_tmp; work 790 drivers/scsi/qedi/qedi_main.c list_for_each_entry_safe(work, work_tmp, &qedi->ll2_skb_list, list) { work 791 drivers/scsi/qedi/qedi_main.c list_del(&work->list); work 792 drivers/scsi/qedi/qedi_main.c if (work->skb) work 793 drivers/scsi/qedi/qedi_main.c kfree_skb(work->skb); work 794 drivers/scsi/qedi/qedi_main.c kfree(work); work 802 drivers/scsi/qedi/qedi_main.c struct skb_work_list *work, *work_tmp; work 808 drivers/scsi/qedi/qedi_main.c list_for_each_entry_safe(work, work_tmp, &qedi->ll2_skb_list, work 810 drivers/scsi/qedi/qedi_main.c list_del(&work->list); work 811 drivers/scsi/qedi/qedi_main.c qedi_ll2_process_skb(qedi, work->skb, work->vlan_id); work 812 drivers/scsi/qedi/qedi_main.c kfree_skb(work->skb); work 813 drivers/scsi/qedi/qedi_main.c kfree(work); work 1860 drivers/scsi/qedi/qedi_main.c struct qedi_work *work, *tmp; work 1872 drivers/scsi/qedi/qedi_main.c list_for_each_entry_safe(work, tmp, &work_list, list) { work 1873 drivers/scsi/qedi/qedi_main.c list_del_init(&work->list); work 1874 drivers/scsi/qedi/qedi_main.c qedi_fp_process_cqes(work); work 1875 drivers/scsi/qedi/qedi_main.c if (!work->is_solicited) work 1876 drivers/scsi/qedi/qedi_main.c kfree(work); work 1910 drivers/scsi/qedi/qedi_main.c struct qedi_work *work, *tmp; work 1917 drivers/scsi/qedi/qedi_main.c list_for_each_entry_safe(work, tmp, &p->work_list, list) { work 1918 drivers/scsi/qedi/qedi_main.c list_del_init(&work->list); work 1919 drivers/scsi/qedi/qedi_main.c qedi_fp_process_cqes(work); work 1920 drivers/scsi/qedi/qedi_main.c if (!work->is_solicited) work 1921 drivers/scsi/qedi/qedi_main.c kfree(work); work 4211 drivers/scsi/qla2xxx/qla_gs.c void qla_scan_work_fn(struct work_struct *work) work 4213 drivers/scsi/qla2xxx/qla_gs.c struct fab_scan *s = container_of(to_delayed_work(work), work 5465 drivers/scsi/qla2xxx/qla_init.c void qla_register_fcport_fn(struct work_struct *work) work 5467 drivers/scsi/qla2xxx/qla_init.c fc_port_t *fcport = container_of(work, struct fc_port, reg_work); work 797 drivers/scsi/qla2xxx/qla_mid.c static void qla_do_work(struct work_struct *work) work 800 drivers/scsi/qla2xxx/qla_mid.c struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work); work 175 drivers/scsi/qla2xxx/qla_nvme.c static void qla_nvme_ls_complete(struct work_struct *work) work 178 drivers/scsi/qla2xxx/qla_nvme.c container_of(work, struct nvme_private, ls_work); work 209 drivers/scsi/qla2xxx/qla_nvme.c static void qla_nvme_abort_work(struct work_struct *work) work 212 drivers/scsi/qla2xxx/qla_nvme.c container_of(work, struct nvme_private, abort_work); work 2726 drivers/scsi/qla2xxx/qla_os.c static void qla2x00_iocb_work_fn(struct work_struct *work) work 2728 drivers/scsi/qla2xxx/qla_os.c struct scsi_qla_host *vha = container_of(work, work 5214 drivers/scsi/qla2xxx/qla_os.c LIST_HEAD(work); work 5218 drivers/scsi/qla2xxx/qla_os.c list_splice_init(&vha->work_list, &work); work 5221 drivers/scsi/qla2xxx/qla_os.c list_for_each_entry_safe(e, tmp, &work, list) { work 5318 drivers/scsi/qla2xxx/qla_os.c list_splice(&work, &vha->work_list); work 5455 drivers/scsi/qla2xxx/qla_os.c qla83xx_nic_core_unrecoverable_work(struct work_struct *work) work 5458 drivers/scsi/qla2xxx/qla_os.c container_of(work, struct qla_hw_data, nic_core_unrecoverable); work 5477 drivers/scsi/qla2xxx/qla_os.c qla83xx_idc_state_handler_work(struct work_struct *work) work 5480 drivers/scsi/qla2xxx/qla_os.c container_of(work, struct qla_hw_data, idc_state_handler); work 5523 drivers/scsi/qla2xxx/qla_os.c qla83xx_nic_core_reset_work(struct work_struct *work) work 5526 drivers/scsi/qla2xxx/qla_os.c container_of(work, struct qla_hw_data, nic_core_reset); work 5562 drivers/scsi/qla2xxx/qla_os.c qla83xx_service_idc_aen(struct work_struct *work) work 5565 drivers/scsi/qla2xxx/qla_os.c container_of(work, struct qla_hw_data, idc_aen); work 6050 drivers/scsi/qla2xxx/qla_os.c qla2x00_disable_board_on_pci_error(struct work_struct *work) work 6052 drivers/scsi/qla2xxx/qla_os.c struct qla_hw_data *ha = container_of(work, struct qla_hw_data, work 334 drivers/scsi/qla2xxx/qla_target.c void qlt_unknown_atio_work_fn(struct work_struct *work) work 336 drivers/scsi/qla2xxx/qla_target.c struct scsi_qla_host *vha = container_of(to_delayed_work(work), work 694 drivers/scsi/qla2xxx/qla_target.c void qla24xx_delete_sess_fn(struct work_struct *work) work 696 drivers/scsi/qla2xxx/qla_target.c fc_port_t *fcport = container_of(work, struct fc_port, del_work); work 947 drivers/scsi/qla2xxx/qla_target.c void qlt_free_session_done(struct work_struct *work) work 949 drivers/scsi/qla2xxx/qla_target.c struct fc_port *sess = container_of(work, struct fc_port, work 2014 drivers/scsi/qla2xxx/qla_target.c static void qlt_do_tmr_work(struct work_struct *work) work 2017 drivers/scsi/qla2xxx/qla_target.c container_of(work, struct qla_tgt_mgmt_cmd, work); work 2114 drivers/scsi/qla2xxx/qla_target.c INIT_WORK(&mcmd->work, qlt_do_tmr_work); work 2115 drivers/scsi/qla2xxx/qla_target.c queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq, &mcmd->work); work 4149 drivers/scsi/qla2xxx/qla_target.c static void qlt_do_work(struct work_struct *work) work 4151 drivers/scsi/qla2xxx/qla_target.c struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); work 4364 drivers/scsi/qla2xxx/qla_target.c INIT_WORK(&cmd->work, qlt_do_work); work 4366 drivers/scsi/qla2xxx/qla_target.c queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work); work 4370 drivers/scsi/qla2xxx/qla_target.c &cmd->work); work 4373 drivers/scsi/qla2xxx/qla_target.c &cmd->work); work 4375 drivers/scsi/qla2xxx/qla_target.c queue_work(qla_tgt_wq, &cmd->work); work 4435 drivers/scsi/qla2xxx/qla_target.c INIT_WORK(&mcmd->work, qlt_do_tmr_work); work 4437 drivers/scsi/qla2xxx/qla_target.c &mcmd->work); work 6284 drivers/scsi/qla2xxx/qla_target.c static void qlt_sess_work_fn(struct work_struct *work) work 6286 drivers/scsi/qla2xxx/qla_target.c struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work); work 7128 drivers/scsi/qla2xxx/qla_target.c qlt_handle_abts_recv_work(struct work_struct *work) work 7130 drivers/scsi/qla2xxx/qla_target.c struct qla_tgt_sess_op *op = container_of(work, work 7131 drivers/scsi/qla2xxx/qla_target.c struct qla_tgt_sess_op, work); work 7171 drivers/scsi/qla2xxx/qla_target.c INIT_WORK(&op->work, qlt_handle_abts_recv_work); work 7172 drivers/scsi/qla2xxx/qla_target.c queue_work(qla_tgt_wq, &op->work); work 821 drivers/scsi/qla2xxx/qla_target.h struct work_struct work; work 862 drivers/scsi/qla2xxx/qla_target.h struct work_struct work; work 957 drivers/scsi/qla2xxx/qla_target.h struct work_struct work; work 234 drivers/scsi/qla2xxx/tcm_qla2xxx.c static void tcm_qla2xxx_complete_mcmd(struct work_struct *work) work 236 drivers/scsi/qla2xxx/tcm_qla2xxx.c struct qla_tgt_mgmt_cmd *mcmd = container_of(work, work 255 drivers/scsi/qla2xxx/tcm_qla2xxx.c static void tcm_qla2xxx_complete_free(struct work_struct *work) work 257 drivers/scsi/qla2xxx/tcm_qla2xxx.c struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); work 284 drivers/scsi/qla2xxx/tcm_qla2xxx.c INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free); work 285 drivers/scsi/qla2xxx/tcm_qla2xxx.c queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work); work 466 drivers/scsi/qla2xxx/tcm_qla2xxx.c static void tcm_qla2xxx_handle_data_work(struct work_struct *work) work 468 drivers/scsi/qla2xxx/tcm_qla2xxx.c struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); work 522 drivers/scsi/qla2xxx/tcm_qla2xxx.c INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work); work 523 drivers/scsi/qla2xxx/tcm_qla2xxx.c queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work); work 5237 drivers/scsi/qla4xxx/ql4_os.c LIST_HEAD(work); work 5240 drivers/scsi/qla4xxx/ql4_os.c list_splice_init(&ha->work_list, &work); work 5243 drivers/scsi/qla4xxx/ql4_os.c list_for_each_entry_safe(e, tmp, &work, list) { work 5281 drivers/scsi/qla4xxx/ql4_os.c static void qla4xxx_do_dpc(struct work_struct *work) work 5284 drivers/scsi/qla4xxx/ql4_os.c container_of(work, struct scsi_qla_host, dpc_work); work 3791 drivers/scsi/scsi_debug.c static void sdebug_q_cmd_wq_complete(struct work_struct *work) work 3793 drivers/scsi/scsi_debug.c struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer, work 3794 drivers/scsi/scsi_debug.c ew.work); work 3917 drivers/scsi/scsi_debug.c cancel_work_sync(&sd_dp->ew.work); work 4379 drivers/scsi/scsi_debug.c INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete); work 4386 drivers/scsi/scsi_debug.c schedule_work(&sd_dp->ew.work); work 4706 drivers/scsi/scsi_debug.c char work[20]; work 4708 drivers/scsi/scsi_debug.c if (sscanf(buf, "%10s", work) == 1) { work 4709 drivers/scsi/scsi_debug.c if (strncasecmp(work, "0x", 2) == 0) { work 4710 drivers/scsi/scsi_debug.c if (kstrtoint(work + 2, 16, &opts) == 0) work 4713 drivers/scsi/scsi_debug.c if (kstrtoint(work, 10, &opts) == 0) work 130 drivers/scsi/scsi_error.c scmd_eh_abort_handler(struct work_struct *work) work 133 drivers/scsi/scsi_error.c container_of(work, struct scsi_cmnd, abort_work.work); work 526 drivers/scsi/scsi_lib.c void scsi_requeue_run_queue(struct work_struct *work) work 531 drivers/scsi/scsi_lib.c sdev = container_of(work, struct scsi_device, requeue_work); work 2411 drivers/scsi/scsi_lib.c void scsi_evt_thread(struct work_struct *work) work 2417 drivers/scsi/scsi_lib.c sdev = container_of(work, struct scsi_device, event_work); work 73 drivers/scsi/scsi_priv.h extern void scmd_eh_abort_handler(struct work_struct *work); work 94 drivers/scsi/scsi_priv.h extern void scsi_requeue_run_queue(struct work_struct *work); work 101 drivers/scsi/scsi_priv.h extern void scsi_evt_thread(struct work_struct *work); work 434 drivers/scsi/scsi_sysfs.c static void scsi_device_dev_release_usercontext(struct work_struct *work) work 442 drivers/scsi/scsi_sysfs.c sdev = container_of(work, struct scsi_device, ew.work); work 28 drivers/scsi/scsi_transport_fc.c static void fc_vport_sched_delete(struct work_struct *work); work 300 drivers/scsi/scsi_transport_fc.c static void fc_timeout_deleted_rport(struct work_struct *work); work 301 drivers/scsi/scsi_transport_fc.c static void fc_timeout_fail_rport_io(struct work_struct *work); work 302 drivers/scsi/scsi_transport_fc.c static void fc_scsi_scan_rport(struct work_struct *work); work 2318 drivers/scsi/scsi_transport_fc.c fc_queue_work(struct Scsi_Host *shost, struct work_struct *work) work 2329 drivers/scsi/scsi_transport_fc.c return queue_work(fc_host_work_q(shost), work); work 2360 drivers/scsi/scsi_transport_fc.c fc_queue_devloss_work(struct Scsi_Host *shost, struct delayed_work *work, work 2372 drivers/scsi/scsi_transport_fc.c return queue_delayed_work(fc_host_devloss_work_q(shost), work, delay); work 2484 drivers/scsi/scsi_transport_fc.c fc_starget_delete(struct work_struct *work) work 2487 drivers/scsi/scsi_transport_fc.c container_of(work, struct fc_rport, stgt_delete_work); work 2499 drivers/scsi/scsi_transport_fc.c fc_rport_final_delete(struct work_struct *work) work 2502 drivers/scsi/scsi_transport_fc.c container_of(work, struct fc_rport, rport_delete_work); work 3083 drivers/scsi/scsi_transport_fc.c fc_timeout_deleted_rport(struct work_struct *work) work 3086 drivers/scsi/scsi_transport_fc.c container_of(work, struct fc_rport, dev_loss_work.work); work 3220 drivers/scsi/scsi_transport_fc.c fc_timeout_fail_rport_io(struct work_struct *work) work 3223 drivers/scsi/scsi_transport_fc.c container_of(work, struct fc_rport, fail_io_work.work); work 3237 drivers/scsi/scsi_transport_fc.c fc_scsi_scan_rport(struct work_struct *work) work 3240 drivers/scsi/scsi_transport_fc.c container_of(work, struct fc_rport, scan_work); work 3530 drivers/scsi/scsi_transport_fc.c fc_vport_sched_delete(struct work_struct *work) work 3533 drivers/scsi/scsi_transport_fc.c container_of(work, struct fc_vport, vport_delete_work); work 1843 drivers/scsi/scsi_transport_iscsi.c static void iscsi_scan_session(struct work_struct *work) work 1846 drivers/scsi/scsi_transport_iscsi.c container_of(work, struct iscsi_cls_session, scan_work); work 1891 drivers/scsi/scsi_transport_iscsi.c static void session_recovery_timedout(struct work_struct *work) work 1894 drivers/scsi/scsi_transport_iscsi.c container_of(work, struct iscsi_cls_session, work 1895 drivers/scsi/scsi_transport_iscsi.c recovery_work.work); work 1923 drivers/scsi/scsi_transport_iscsi.c static void __iscsi_unblock_session(struct work_struct *work) work 1926 drivers/scsi/scsi_transport_iscsi.c container_of(work, struct iscsi_cls_session, work 1972 drivers/scsi/scsi_transport_iscsi.c static void __iscsi_block_session(struct work_struct *work) work 1975 drivers/scsi/scsi_transport_iscsi.c container_of(work, struct iscsi_cls_session, work 1997 drivers/scsi/scsi_transport_iscsi.c static void __iscsi_unbind_session(struct work_struct *work) work 2000 drivers/scsi/scsi_transport_iscsi.c container_of(work, struct iscsi_cls_session, work 1055 drivers/scsi/scsi_transport_spi.c struct work_struct work; work 1060 drivers/scsi/scsi_transport_spi.c spi_dv_device_work_wrapper(struct work_struct *work) work 1063 drivers/scsi/scsi_transport_spi.c container_of(work, struct work_queue_wrapper, work); work 1103 drivers/scsi/scsi_transport_spi.c INIT_WORK(&wqw->work, spi_dv_device_work_wrapper); work 1106 drivers/scsi/scsi_transport_spi.c schedule_work(&wqw->work); work 378 drivers/scsi/scsi_transport_srp.c static void srp_reconnect_work(struct work_struct *work) work 380 drivers/scsi/scsi_transport_srp.c struct srp_rport *rport = container_of(to_delayed_work(work), work 424 drivers/scsi/scsi_transport_srp.c static void rport_fast_io_fail_timedout(struct work_struct *work) work 426 drivers/scsi/scsi_transport_srp.c struct srp_rport *rport = container_of(to_delayed_work(work), work 443 drivers/scsi/scsi_transport_srp.c static void rport_dev_loss_timedout(struct work_struct *work) work 445 drivers/scsi/scsi_transport_srp.c struct srp_rport *rport = container_of(to_delayed_work(work), work 1305 drivers/scsi/sg.c sg_rq_end_io_usercontext(struct work_struct *work) work 1307 drivers/scsi/sg.c struct sg_request *srp = container_of(work, struct sg_request, ew.work); work 1412 drivers/scsi/sg.c INIT_WORK(&srp->ew.work, sg_rq_end_io_usercontext); work 1413 drivers/scsi/sg.c schedule_work(&srp->ew.work); work 2195 drivers/scsi/sg.c sg_remove_sfp_usercontext(struct work_struct *work) work 2197 drivers/scsi/sg.c struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work); work 2240 drivers/scsi/sg.c INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext); work 2241 drivers/scsi/sg.c schedule_work(&sfp->ew.work); work 56 drivers/scsi/smartpqi/smartpqi_init.c static void pqi_ctrl_offline_worker(struct work_struct *work); work 860 drivers/scsi/smartpqi/smartpqi_init.c static void pqi_update_time_worker(struct work_struct *work) work 865 drivers/scsi/smartpqi/smartpqi_init.c ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, work 1475 drivers/scsi/smartpqi/smartpqi_init.c static void pqi_rescan_worker(struct work_struct *work) work 1479 drivers/scsi/smartpqi/smartpqi_init.c ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, work 3172 drivers/scsi/smartpqi/smartpqi_init.c static void pqi_event_worker(struct work_struct *work) work 3178 drivers/scsi/smartpqi/smartpqi_init.c ctrl_info = container_of(work, struct pqi_ctrl_info, event_work); work 5168 drivers/scsi/smartpqi/smartpqi_init.c static void pqi_raid_bypass_retry_worker(struct work_struct *work) work 5172 drivers/scsi/smartpqi/smartpqi_init.c ctrl_info = container_of(work, struct pqi_ctrl_info, work 7823 drivers/scsi/smartpqi/smartpqi_init.c static void pqi_ctrl_offline_worker(struct work_struct *work) work 7827 drivers/scsi/smartpqi/smartpqi_init.c ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work); work 258 drivers/scsi/snic/snic.h struct work_struct work; work 38 drivers/scsi/snic/snic_ctl.c snic_handle_link(struct work_struct *work) work 40 drivers/scsi/snic/snic_ctl.c struct snic *snic = container_of(work, struct snic, link_work); work 164 drivers/scsi/snic/snic_disc.c snic_scsi_scan_tgt(struct work_struct *work) work 166 drivers/scsi/snic/snic_disc.c struct snic_tgt *tgt = container_of(work, struct snic_tgt, scan_work); work 222 drivers/scsi/snic/snic_disc.c snic_tgt_del(struct work_struct *work) work 224 drivers/scsi/snic/snic_disc.c struct snic_tgt *tgt = container_of(work, struct snic_tgt, del_work); work 336 drivers/scsi/snic/snic_disc.c snic_handle_tgt_disc(struct work_struct *work) work 338 drivers/scsi/snic/snic_disc.c struct snic *snic = container_of(work, struct snic, tgt_work); work 520 drivers/scsi/snic/snic_disc.c snic_handle_disc(struct work_struct *work) work 522 drivers/scsi/snic/snic_disc.c struct snic *snic = container_of(work, struct snic, disc_work); work 1451 drivers/scsi/stex.c static void stex_reset_work(struct work_struct *work) work 1453 drivers/scsi/stex.c struct st_hba *hba = container_of(work, struct st_hba, reset_work); work 489 drivers/scsi/storvsc_drv.c struct work_struct work; work 495 drivers/scsi/storvsc_drv.c static void storvsc_device_scan(struct work_struct *work) work 500 drivers/scsi/storvsc_drv.c wrk = container_of(work, struct storvsc_scan_work, work); work 512 drivers/scsi/storvsc_drv.c static void storvsc_host_scan(struct work_struct *work) work 517 drivers/scsi/storvsc_drv.c container_of(work, struct hv_host_device, host_scan_work); work 541 drivers/scsi/storvsc_drv.c static void storvsc_remove_lun(struct work_struct *work) work 546 drivers/scsi/storvsc_drv.c wrk = container_of(work, struct storvsc_scan_work, work); work 928 drivers/scsi/storvsc_drv.c void (*process_err_fn)(struct work_struct *work); work 996 drivers/scsi/storvsc_drv.c INIT_WORK(&wrk->work, process_err_fn); work 997 drivers/scsi/storvsc_drv.c queue_work(host_dev->handle_error_wq, &wrk->work); work 1187 drivers/scsi/ufs/ufshcd.c static void ufshcd_clk_scaling_suspend_work(struct work_struct *work) work 1189 drivers/scsi/ufs/ufshcd.c struct ufs_hba *hba = container_of(work, struct ufs_hba, work 1204 drivers/scsi/ufs/ufshcd.c static void ufshcd_clk_scaling_resume_work(struct work_struct *work) work 1206 drivers/scsi/ufs/ufshcd.c struct ufs_hba *hba = container_of(work, struct ufs_hba, work 1472 drivers/scsi/ufs/ufshcd.c static void ufshcd_ungate_work(struct work_struct *work) work 1476 drivers/scsi/ufs/ufshcd.c struct ufs_hba *hba = container_of(work, struct ufs_hba, work 1601 drivers/scsi/ufs/ufshcd.c static void ufshcd_gate_work(struct work_struct *work) work 1603 drivers/scsi/ufs/ufshcd.c struct ufs_hba *hba = container_of(work, struct ufs_hba, work 1604 drivers/scsi/ufs/ufshcd.c clk_gating.gate_work.work); work 5174 drivers/scsi/ufs/ufshcd.c static void ufshcd_exception_event_handler(struct work_struct *work) work 5179 drivers/scsi/ufs/ufshcd.c hba = container_of(work, struct ufs_hba, eeh_work); work 5285 drivers/scsi/ufs/ufshcd.c static void ufshcd_err_handler(struct work_struct *work) work 5295 drivers/scsi/ufs/ufshcd.c hba = container_of(work, struct ufs_hba, eh_work); work 60 drivers/scsi/virtio_scsi.c struct work_struct work; work 228 drivers/scsi/virtio_scsi.c static void virtscsi_handle_event(struct work_struct *work); work 237 drivers/scsi/virtio_scsi.c INIT_WORK(&event_node->work, virtscsi_handle_event); work 274 drivers/scsi/virtio_scsi.c cancel_work_sync(&vscsi->event_list[i].work); work 359 drivers/scsi/virtio_scsi.c static void virtscsi_handle_event(struct work_struct *work) work 362 drivers/scsi/virtio_scsi.c container_of(work, struct virtio_scsi_event_node, work); work 394 drivers/scsi/virtio_scsi.c queue_work(system_freezable_wq, &event_node->work); work 78 drivers/scsi/vmw_pvscsi.c struct work_struct work; work 1105 drivers/scsi/vmw_pvscsi.c adapter = container_of(data, struct pvscsi_adapter, work); work 1131 drivers/scsi/vmw_pvscsi.c INIT_WORK(&adapter->work, pvscsi_msg_workqueue_handler); work 1172 drivers/scsi/vmw_pvscsi.c queue_work(adapter->workqueue, &adapter->work); work 35 drivers/sh/maple/maple.c static void maple_dma_handler(struct work_struct *work); work 36 drivers/sh/maple/maple.c static void maple_vblank_handler(struct work_struct *work); work 446 drivers/sh/maple/maple.c static void maple_vblank_handler(struct work_struct *work) work 631 drivers/sh/maple/maple.c static void maple_dma_handler(struct work_struct *work) work 439 drivers/slimbus/qcom-ctrl.c static void qcom_slim_rxwq(struct work_struct *work) work 444 drivers/slimbus/qcom-ctrl.c struct qcom_slim_ctrl *ctrl = container_of(work, struct qcom_slim_ctrl, work 1149 drivers/slimbus/qcom-ngd-ctrl.c static void qcom_slim_ngd_master_worker(struct work_struct *work) work 1158 drivers/slimbus/qcom-ngd-ctrl.c ctrl = container_of(work, struct qcom_slim_ngd_ctrl, m_work); work 1168 drivers/soc/fsl/qbman/qman.c static void qm_congestion_task(struct work_struct *work); work 1169 drivers/soc/fsl/qbman/qman.c static void qm_mr_process_task(struct work_struct *work); work 1456 drivers/soc/fsl/qbman/qman.c static void qm_congestion_task(struct work_struct *work) work 1458 drivers/soc/fsl/qbman/qman.c struct qman_portal *p = container_of(work, struct qman_portal, work 1488 drivers/soc/fsl/qbman/qman.c static void qm_mr_process_task(struct work_struct *work) work 1490 drivers/soc/fsl/qbman/qman.c struct qman_portal *p = container_of(work, struct qman_portal, work 170 drivers/soc/qcom/apr.c static void apr_rxwq(struct work_struct *work) work 172 drivers/soc/qcom/apr.c struct apr *apr = container_of(work, struct apr, rx_work); work 522 drivers/soc/qcom/qmi_interface.c static void qmi_data_ready_work(struct work_struct *work) work 524 drivers/soc/qcom/qmi_interface.c struct qmi_handle *qmi = container_of(work, struct qmi_handle, work); work 579 drivers/soc/qcom/qmi_interface.c queue_work(qmi->wq, &qmi->work); work 633 drivers/soc/qcom/qmi_interface.c INIT_WORK(&qmi->work, qmi_data_ready_work); work 686 drivers/soc/qcom/qmi_interface.c cancel_work_sync(&qmi->work); work 284 drivers/soc/qcom/wcnss_ctrl.c static void wcnss_async_probe(struct work_struct *work) work 286 drivers/soc/qcom/wcnss_ctrl.c struct wcnss_ctrl *wcnss = container_of(work, struct wcnss_ctrl, probe_work); work 176 drivers/spi/spi-lantiq-ssc.c struct work_struct work; work 641 drivers/spi/spi-lantiq-ssc.c queue_work(spi->wq, &spi->work); work 673 drivers/spi/spi-lantiq-ssc.c queue_work(spi->wq, &spi->work); work 715 drivers/spi/spi-lantiq-ssc.c static void lantiq_ssc_bussy_work(struct work_struct *work) work 721 drivers/spi/spi-lantiq-ssc.c spi = container_of(work, typeof(*spi), work); work 915 drivers/spi/spi-lantiq-ssc.c INIT_WORK(&spi->work, lantiq_ssc_bussy_work); work 41 drivers/spi/spi-mpc52xx-psc.c struct work_struct work; work 200 drivers/spi/spi-mpc52xx-psc.c static void mpc52xx_psc_spi_work(struct work_struct *work) work 203 drivers/spi/spi-mpc52xx-psc.c container_of(work, struct mpc52xx_psc_spi, work); work 297 drivers/spi/spi-mpc52xx-psc.c schedule_work(&mps->work); work 420 drivers/spi/spi-mpc52xx-psc.c INIT_WORK(&mps->work, mpc52xx_psc_spi_work); work 473 drivers/spi/spi-mpc52xx-psc.c flush_work(&mps->work); work 77 drivers/spi/spi-mpc52xx.c struct work_struct work; work 328 drivers/spi/spi-mpc52xx.c schedule_work(&ms->work); work 346 drivers/spi/spi-mpc52xx.c static void mpc52xx_spi_wq(struct work_struct *work) work 348 drivers/spi/spi-mpc52xx.c struct mpc52xx_spi *ms = container_of(work, struct mpc52xx_spi, work); work 371 drivers/spi/spi-mpc52xx.c schedule_work(&ms->work); work 471 drivers/spi/spi-mpc52xx.c INIT_WORK(&ms->work, mpc52xx_spi_wq); work 274 drivers/spi/spi-sh.c static void spi_sh_work(struct work_struct *work) work 276 drivers/spi/spi-sh.c struct spi_sh_data *ss = container_of(work, struct spi_sh_data, ws); work 160 drivers/spi/spi-topcliff-pch.c struct work_struct work; work 508 drivers/spi/spi-topcliff-pch.c schedule_work(&data->work); work 664 drivers/spi/spi-topcliff-pch.c schedule_work(&data->work); work 1125 drivers/spi/spi-topcliff-pch.c data = container_of(pwork, struct pch_spi_data, work); work 1262 drivers/spi/spi-topcliff-pch.c flush_work(&data->work); work 1369 drivers/spi/spi-topcliff-pch.c INIT_WORK(&data->work, pch_spi_process_messages); work 75 drivers/spi/spi-txx9.c struct work_struct work; work 280 drivers/spi/spi-txx9.c static void txx9spi_work(struct work_struct *work) work 282 drivers/spi/spi-txx9.c struct txx9spi *c = container_of(work, struct txx9spi, work); work 317 drivers/spi/spi-txx9.c schedule_work(&c->work); work 338 drivers/spi/spi-txx9.c INIT_WORK(&c->work, txx9spi_work); work 408 drivers/spi/spi-txx9.c flush_work(&c->work); work 1413 drivers/spi/spi.c static void spi_pump_messages(struct kthread_work *work) work 1416 drivers/spi/spi.c container_of(work, struct spi_controller, pump_messages); work 383 drivers/staging/comedi/drivers/ni_pcidio.c int work = 0; work 405 drivers/staging/comedi/drivers/ni_pcidio.c work++; work 406 drivers/staging/comedi/drivers/ni_pcidio.c if (work > 20) { work 417 drivers/staging/comedi/drivers/ni_pcidio.c work++; work 418 drivers/staging/comedi/drivers/ni_pcidio.c if (work > 100) { work 1182 drivers/staging/fsl-dpaa2/ethsw/ethsw.c struct work_struct work; work 1188 drivers/staging/fsl-dpaa2/ethsw/ethsw.c static void ethsw_switchdev_event_work(struct work_struct *work) work 1191 drivers/staging/fsl-dpaa2/ethsw/ethsw.c container_of(work, struct ethsw_switchdev_event_work, work); work 1249 drivers/staging/fsl-dpaa2/ethsw/ethsw.c INIT_WORK(&switchdev_work->work, ethsw_switchdev_event_work); work 1273 drivers/staging/fsl-dpaa2/ethsw/ethsw.c queue_work(ethsw_owq, &switchdev_work->work); work 485 drivers/staging/fwserial/fwserial.c static void fwtty_do_hangup(struct work_struct *work) work 487 drivers/staging/fwserial/fwserial.c struct fwtty_port *port = to_port(work, hangup); work 498 drivers/staging/fwserial/fwserial.c static void fwtty_emit_breaks(struct work_struct *work) work 500 drivers/staging/fwserial/fwserial.c struct fwtty_port *port = to_port(to_delayed_work(work), emit_breaks); work 794 drivers/staging/fwserial/fwserial.c static void fwtty_drain_tx(struct work_struct *work) work 796 drivers/staging/fwserial/fwserial.c struct fwtty_port *port = to_port(to_delayed_work(work), drain); work 1961 drivers/staging/fwserial/fwserial.c static void fwserial_auto_connect(struct work_struct *work) work 1963 drivers/staging/fwserial/fwserial.c struct fwtty_peer *peer = to_peer(to_delayed_work(work), connect); work 1971 drivers/staging/fwserial/fwserial.c static void fwserial_peer_workfn(struct work_struct *work) work 1973 drivers/staging/fwserial/fwserial.c struct fwtty_peer *peer = to_peer(work, work); work 1975 drivers/staging/fwserial/fwserial.c peer->workfn(work); work 2042 drivers/staging/fwserial/fwserial.c INIT_WORK(&peer->work, fwserial_peer_workfn); work 2098 drivers/staging/fwserial/fwserial.c cancel_work_sync(&peer->work); work 2479 drivers/staging/fwserial/fwserial.c static void fwserial_handle_plug_req(struct work_struct *work) work 2481 drivers/staging/fwserial/fwserial.c struct fwtty_peer *peer = to_peer(work, work); work 2551 drivers/staging/fwserial/fwserial.c static void fwserial_handle_unplug_req(struct work_struct *work) work 2553 drivers/staging/fwserial/fwserial.c struct fwtty_peer *peer = to_peer(work, work); work 2637 drivers/staging/fwserial/fwserial.c if (work_pending(&peer->work)) { work 2644 drivers/staging/fwserial/fwserial.c queue_work(system_unbound_wq, &peer->work); work 2668 drivers/staging/fwserial/fwserial.c if (work_pending(&peer->work)) { work 2673 drivers/staging/fwserial/fwserial.c queue_work(system_unbound_wq, &peer->work); work 96 drivers/staging/fwserial/fwserial.h struct work_struct work; work 205 drivers/staging/gdm724x/gdm_mux.c static void do_rx(struct work_struct *work) work 208 drivers/staging/gdm724x/gdm_mux.c container_of(work, struct mux_dev, work_rx.work); work 265 drivers/staging/gdm724x/gdm_mux.c schedule_work(&mux_dev->work_rx.work); work 593 drivers/staging/gdm724x/gdm_mux.c cancel_work_sync(&mux_dev->work_rx.work); work 48 drivers/staging/gdm724x/gdm_usb.c static void do_tx(struct work_struct *work); work 49 drivers/staging/gdm724x/gdm_usb.c static void do_rx(struct work_struct *work); work 377 drivers/staging/gdm724x/gdm_usb.c static void do_rx(struct work_struct *work) work 380 drivers/staging/gdm724x/gdm_usb.c container_of(work, struct lte_udev, work_rx.work); work 466 drivers/staging/gdm724x/gdm_usb.c schedule_work(&udev->work_rx.work); work 558 drivers/staging/gdm724x/gdm_usb.c schedule_work(&udev->work_tx.work); work 634 drivers/staging/gdm724x/gdm_usb.c static void do_tx(struct work_struct *work) work 637 drivers/staging/gdm724x/gdm_usb.c container_of(work, struct lte_udev, work_tx.work); work 749 drivers/staging/gdm724x/gdm_usb.c schedule_work(&udev->work_tx.work); work 786 drivers/staging/gdm724x/gdm_usb.c schedule_work(&udev->work_tx.work); work 929 drivers/staging/gdm724x/gdm_usb.c cancel_work_sync(&udev->work_tx.work); work 930 drivers/staging/gdm724x/gdm_usb.c cancel_work_sync(&udev->work_rx.work); work 969 drivers/staging/gdm724x/gdm_usb.c schedule_work(&udev->work_tx.work); work 52 drivers/staging/greybus/bootrom.c static void gb_bootrom_timedout(struct work_struct *work) work 54 drivers/staging/greybus/bootrom.c struct delayed_work *dwork = to_delayed_work(work); work 129 drivers/staging/greybus/fw-download.c static void fw_request_timedout(struct work_struct *work) work 131 drivers/staging/greybus/fw-download.c struct delayed_work *dwork = to_delayed_work(work); work 32 drivers/staging/greybus/power_supply.c struct delayed_work work; work 735 drivers/staging/greybus/power_supply.c static void gb_power_supply_work(struct work_struct *work) work 737 drivers/staging/greybus/power_supply.c struct gb_power_supply *gbpsy = container_of(work, work 739 drivers/staging/greybus/power_supply.c work.work); work 750 drivers/staging/greybus/power_supply.c schedule_delayed_work(&gbpsy->work, gbpsy->update_interval); work 850 drivers/staging/greybus/power_supply.c cancel_delayed_work_sync(&gbpsy->work); work 924 drivers/staging/greybus/power_supply.c INIT_DELAYED_WORK(&gbpsy->work, gb_power_supply_work); work 925 drivers/staging/greybus/power_supply.c schedule_delayed_work(&gbpsy->work, 0); work 494 drivers/staging/greybus/sdio.c static void gb_sdio_mrq_work(struct work_struct *work) work 500 drivers/staging/greybus/sdio.c host = container_of(work, struct gb_sdio_host, mrqwork); work 233 drivers/staging/greybus/uart.c static void gb_uart_tx_write_work(struct work_struct *work) work 241 drivers/staging/greybus/uart.c gb_tty = container_of(work, struct gb_tty, tx_work); work 276 drivers/staging/greybus/uart.c schedule_work(work); work 67 drivers/staging/greybus/vibrator.c static void gb_vibrator_worker(struct work_struct *work) work 69 drivers/staging/greybus/vibrator.c struct delayed_work *delayed_work = to_delayed_work(work); work 89 drivers/staging/iio/impedance-analyzer/ad5933.c struct delayed_work work; work 586 drivers/staging/iio/impedance-analyzer/ad5933.c schedule_delayed_work(&st->work, work 595 drivers/staging/iio/impedance-analyzer/ad5933.c cancel_delayed_work_sync(&st->work); work 621 drivers/staging/iio/impedance-analyzer/ad5933.c static void ad5933_work(struct work_struct *work) work 623 drivers/staging/iio/impedance-analyzer/ad5933.c struct ad5933_state *st = container_of(work, work 624 drivers/staging/iio/impedance-analyzer/ad5933.c struct ad5933_state, work.work); work 635 drivers/staging/iio/impedance-analyzer/ad5933.c schedule_delayed_work(&st->work, st->poll_time_jiffies); work 662 drivers/staging/iio/impedance-analyzer/ad5933.c schedule_delayed_work(&st->work, st->poll_time_jiffies); work 675 drivers/staging/iio/impedance-analyzer/ad5933.c schedule_delayed_work(&st->work, st->poll_time_jiffies); work 735 drivers/staging/iio/impedance-analyzer/ad5933.c INIT_DELAYED_WORK(&st->work, ad5933_work); work 604 drivers/staging/isdn/gigaset/bas-gigaset.c static void int_in_work(struct work_struct *work) work 607 drivers/staging/isdn/gigaset/bas-gigaset.c container_of(work, struct bas_cardstate, int_in_wq); work 474 drivers/staging/ks7010/ks7010_sdio.c static void ks7010_rw_function(struct work_struct *work) work 476 drivers/staging/ks7010/ks7010_sdio.c struct ks_wlan_private *priv = container_of(work, work 478 drivers/staging/ks7010/ks7010_sdio.c rw_dwork.work); work 67 drivers/staging/ks7010/ks_hostif.c static void ks_wlan_hw_wakeup_task(struct work_struct *work) work 73 drivers/staging/ks7010/ks_hostif.c priv = container_of(work, struct ks_wlan_private, wakeup_work); work 139 drivers/staging/media/hantro/hantro_drv.c void hantro_watchdog(struct work_struct *work) work 144 drivers/staging/media/hantro/hantro_drv.c vpu = container_of(to_delayed_work(work), work 149 drivers/staging/media/hantro/hantro_hw.h void hantro_watchdog(struct work_struct *work); work 237 drivers/staging/media/meson/vdec/esparser.c void esparser_queue_all_src(struct work_struct *work) work 241 drivers/staging/media/meson/vdec/esparser.c container_of(work, struct amvdec_session, esparser_queue_work); work 28 drivers/staging/media/meson/vdec/esparser.h void esparser_queue_all_src(struct work_struct *work); work 46 drivers/staging/media/tegra-vde/dmabuf-cache.c static void tegra_vde_delayed_unmap(struct work_struct *work) work 51 drivers/staging/media/tegra-vde/dmabuf-cache.c entry = container_of(work, struct tegra_vde_cache_entry, work 52 drivers/staging/media/tegra-vde/dmabuf-cache.c dwork.work); work 135 drivers/staging/most/i2c/i2c.c pending_rx_work(&dev->rx.dwork.work); work 233 drivers/staging/most/i2c/i2c.c static void pending_rx_work(struct work_struct *work) work 235 drivers/staging/most/i2c/i2c.c struct hdm_i2c *dev = container_of(work, struct hdm_i2c, rx.dwork.work); work 373 drivers/staging/nvec/nvec.c static void nvec_request_master(struct work_struct *work) work 375 drivers/staging/nvec/nvec.c struct nvec_chip *nvec = container_of(work, struct nvec_chip, tx_work); work 437 drivers/staging/nvec/nvec.c static void nvec_dispatch(struct work_struct *work) work 439 drivers/staging/nvec/nvec.c struct nvec_chip *nvec = container_of(work, struct nvec_chip, rx_work); work 348 drivers/staging/nvec/nvec_power.c static void nvec_power_poll(struct work_struct *work) work 351 drivers/staging/nvec/nvec_power.c struct nvec_power *power = container_of(work, struct nvec_power, work 352 drivers/staging/nvec/nvec_power.c poller.work); work 369 drivers/staging/nvec/nvec_power.c schedule_delayed_work(to_delayed_work(work), msecs_to_jiffies(5000)); work 63 drivers/staging/octeon/ethernet-rx.c static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work) work 68 drivers/staging/octeon/ethernet-rx.c port = work->word0.pip.cn68xx.pknd; work 70 drivers/staging/octeon/ethernet-rx.c port = work->word1.cn38xx.ipprt; work 72 drivers/staging/octeon/ethernet-rx.c if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) { work 79 drivers/staging/octeon/ethernet-rx.c } else if (work->word2.snoip.err_code == 5 || work 80 drivers/staging/octeon/ethernet-rx.c work->word2.snoip.err_code == 7) { work 97 drivers/staging/octeon/ethernet-rx.c cvmx_phys_to_ptr(work->packet_ptr.s.addr); work 100 drivers/staging/octeon/ethernet-rx.c while (i < work->word1.len - 1) { work 109 drivers/staging/octeon/ethernet-rx.c work->packet_ptr.s.addr += i + 1; work 110 drivers/staging/octeon/ethernet-rx.c work->word1.len -= i + 5; work 113 drivers/staging/octeon/ethernet-rx.c work->packet_ptr.s.addr += i; work 114 drivers/staging/octeon/ethernet-rx.c work->word1.len -= i + 4; work 115 drivers/staging/octeon/ethernet-rx.c for (i = 0; i < work->word1.len; i++) { work 124 drivers/staging/octeon/ethernet-rx.c cvm_oct_free_work(work); work 130 drivers/staging/octeon/ethernet-rx.c port, work->word2.snoip.err_code); work 131 drivers/staging/octeon/ethernet-rx.c cvm_oct_free_work(work); work 138 drivers/staging/octeon/ethernet-rx.c static void copy_segments_to_skb(cvmx_wqe_t *work, struct sk_buff *skb) work 140 drivers/staging/octeon/ethernet-rx.c int segments = work->word2.s.bufs; work 141 drivers/staging/octeon/ethernet-rx.c union cvmx_buf_ptr segment_ptr = work->packet_ptr; work 142 drivers/staging/octeon/ethernet-rx.c int len = work->word1.len; work 218 drivers/staging/octeon/ethernet-rx.c cvmx_wqe_t *work; work 222 drivers/staging/octeon/ethernet-rx.c work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH); work 224 drivers/staging/octeon/ethernet-rx.c work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT); work 226 drivers/staging/octeon/ethernet-rx.c prefetch(work); work 228 drivers/staging/octeon/ethernet-rx.c if (!work) { work 245 drivers/staging/octeon/ethernet-rx.c (cvm_oct_get_buffer_ptr(work->packet_ptr) - work 256 drivers/staging/octeon/ethernet-rx.c skb_in_hw = work->word2.s.bufs == 1; work 264 drivers/staging/octeon/ethernet-rx.c port = work->word0.pip.cn68xx.pknd; work 266 drivers/staging/octeon/ethernet-rx.c port = work->word1.cn38xx.ipprt; work 271 drivers/staging/octeon/ethernet-rx.c if (unlikely(work->word2.snoip.rcv_error)) { work 272 drivers/staging/octeon/ethernet-rx.c if (cvm_oct_check_rcv_error(work)) work 282 drivers/staging/octeon/ethernet-rx.c skb->data = skb->head + work->packet_ptr.s.addr - work 285 drivers/staging/octeon/ethernet-rx.c skb->len = work->word1.len; work 293 drivers/staging/octeon/ethernet-rx.c skb = dev_alloc_skb(work->word1.len); work 295 drivers/staging/octeon/ethernet-rx.c cvm_oct_free_work(work); work 303 drivers/staging/octeon/ethernet-rx.c if (unlikely(work->word2.s.bufs == 0)) { work 304 drivers/staging/octeon/ethernet-rx.c u8 *ptr = work->packet_data; work 306 drivers/staging/octeon/ethernet-rx.c if (likely(!work->word2.s.not_IP)) { work 311 drivers/staging/octeon/ethernet-rx.c if (work->word2.s.is_v6) work 316 drivers/staging/octeon/ethernet-rx.c skb_put_data(skb, ptr, work->word1.len); work 319 drivers/staging/octeon/ethernet-rx.c copy_segments_to_skb(work, skb); work 335 drivers/staging/octeon/ethernet-rx.c if (unlikely(work->word2.s.not_IP || work 336 drivers/staging/octeon/ethernet-rx.c work->word2.s.IP_exc || work 337 drivers/staging/octeon/ethernet-rx.c work->word2.s.L4_error || work 338 drivers/staging/octeon/ethernet-rx.c !work->word2.s.tcp_or_udp)) work 379 drivers/staging/octeon/ethernet-rx.c cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1); work 381 drivers/staging/octeon/ethernet-rx.c cvm_oct_free_work(work); work 517 drivers/staging/octeon/ethernet-tx.c cvmx_wqe_t *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL); work 519 drivers/staging/octeon/ethernet-tx.c if (unlikely(!work)) { work 532 drivers/staging/octeon/ethernet-tx.c cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1); work 562 drivers/staging/octeon/ethernet-tx.c work->word0.pip.cn38xx.hw_chksum = skb->csum; work 563 drivers/staging/octeon/ethernet-tx.c work->word1.len = skb->len; work 564 drivers/staging/octeon/ethernet-tx.c cvmx_wqe_set_port(work, priv->port); work 565 drivers/staging/octeon/ethernet-tx.c cvmx_wqe_set_qos(work, priv->port & 0x7); work 566 drivers/staging/octeon/ethernet-tx.c cvmx_wqe_set_grp(work, pow_send_group); work 567 drivers/staging/octeon/ethernet-tx.c work->word1.tag_type = CVMX_HELPER_INPUT_TAG_TYPE; work 568 drivers/staging/octeon/ethernet-tx.c work->word1.tag = pow_send_group; /* FIXME */ work 570 drivers/staging/octeon/ethernet-tx.c work->word2.u64 = 0; work 571 drivers/staging/octeon/ethernet-tx.c work->word2.s.bufs = 1; work 572 drivers/staging/octeon/ethernet-tx.c work->packet_ptr.u64 = 0; work 573 drivers/staging/octeon/ethernet-tx.c work->packet_ptr.s.addr = cvmx_ptr_to_phys(copy_location); work 574 drivers/staging/octeon/ethernet-tx.c work->packet_ptr.s.pool = CVMX_FPA_PACKET_POOL; work 575 drivers/staging/octeon/ethernet-tx.c work->packet_ptr.s.size = CVMX_FPA_PACKET_POOL_SIZE; work 576 drivers/staging/octeon/ethernet-tx.c work->packet_ptr.s.back = (copy_location - packet_buffer) >> 7; work 579 drivers/staging/octeon/ethernet-tx.c work->word2.s.ip_offset = 14; work 581 drivers/staging/octeon/ethernet-tx.c work->word2.s.vlan_valid = 0; /* FIXME */ work 582 drivers/staging/octeon/ethernet-tx.c work->word2.s.vlan_cfi = 0; /* FIXME */ work 583 drivers/staging/octeon/ethernet-tx.c work->word2.s.vlan_id = 0; /* FIXME */ work 584 drivers/staging/octeon/ethernet-tx.c work->word2.s.dec_ipcomp = 0; /* FIXME */ work 586 drivers/staging/octeon/ethernet-tx.c work->word2.s.tcp_or_udp = work 591 drivers/staging/octeon/ethernet-tx.c work->word2.s.dec_ipsec = 0; work 593 drivers/staging/octeon/ethernet-tx.c work->word2.s.is_v6 = 0; work 595 drivers/staging/octeon/ethernet-tx.c work->word2.s.software = 0; work 597 drivers/staging/octeon/ethernet-tx.c work->word2.s.L4_error = 0; work 599 drivers/staging/octeon/ethernet-tx.c work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0) || work 604 drivers/staging/octeon/ethernet-tx.c work->word2.s.IP_exc = 0; work 606 drivers/staging/octeon/ethernet-tx.c work->word2.s.is_bcast = (skb->pkt_type == PACKET_BROADCAST); work 607 drivers/staging/octeon/ethernet-tx.c work->word2.s.is_mcast = (skb->pkt_type == PACKET_MULTICAST); work 610 drivers/staging/octeon/ethernet-tx.c work->word2.s.not_IP = 0; work 612 drivers/staging/octeon/ethernet-tx.c work->word2.s.rcv_error = 0; work 614 drivers/staging/octeon/ethernet-tx.c work->word2.s.err_code = 0; work 622 drivers/staging/octeon/ethernet-tx.c memcpy(work->packet_data, skb->data + 10, work 623 drivers/staging/octeon/ethernet-tx.c sizeof(work->packet_data)); work 626 drivers/staging/octeon/ethernet-tx.c work->word2.snoip.vlan_valid = 0; /* FIXME */ work 627 drivers/staging/octeon/ethernet-tx.c work->word2.snoip.vlan_cfi = 0; /* FIXME */ work 628 drivers/staging/octeon/ethernet-tx.c work->word2.snoip.vlan_id = 0; /* FIXME */ work 629 drivers/staging/octeon/ethernet-tx.c work->word2.snoip.software = 0; /* Hardware would set to zero */ work 631 drivers/staging/octeon/ethernet-tx.c work->word2.snoip.is_rarp = skb->protocol == htons(ETH_P_RARP); work 632 drivers/staging/octeon/ethernet-tx.c work->word2.snoip.is_arp = skb->protocol == htons(ETH_P_ARP); work 633 drivers/staging/octeon/ethernet-tx.c work->word2.snoip.is_bcast = work 635 drivers/staging/octeon/ethernet-tx.c work->word2.snoip.is_mcast = work 637 drivers/staging/octeon/ethernet-tx.c work->word2.snoip.not_IP = 1; /* IP was done up above */ work 640 drivers/staging/octeon/ethernet-tx.c work->word2.snoip.rcv_error = 0; work 642 drivers/staging/octeon/ethernet-tx.c work->word2.snoip.err_code = 0; work 644 drivers/staging/octeon/ethernet-tx.c memcpy(work->packet_data, skb->data, sizeof(work->packet_data)); work 648 drivers/staging/octeon/ethernet-tx.c cvmx_pow_work_submit(work, work->word1.tag, work->word1.tag_type, work 649 drivers/staging/octeon/ethernet-tx.c cvmx_wqe_get_qos(work), cvmx_wqe_get_grp(work)); work 106 drivers/staging/octeon/ethernet.c static void cvm_oct_rx_refill_worker(struct work_struct *work); work 109 drivers/staging/octeon/ethernet.c static void cvm_oct_rx_refill_worker(struct work_struct *work) work 124 drivers/staging/octeon/ethernet.c static void cvm_oct_periodic_worker(struct work_struct *work) work 126 drivers/staging/octeon/ethernet.c struct octeon_ethernet *priv = container_of(work, work 128 drivers/staging/octeon/ethernet.c port_periodic_work.work); work 175 drivers/staging/octeon/ethernet.c cvmx_wqe_t *work = work_queue_entry; work 177 drivers/staging/octeon/ethernet.c int segments = work->word2.s.bufs; work 178 drivers/staging/octeon/ethernet.c union cvmx_buf_ptr segment_ptr = work->packet_ptr; work 189 drivers/staging/octeon/ethernet.c cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1); work 1201 drivers/staging/octeon/octeon-stubs.h static inline int cvmx_wqe_get_grp(cvmx_wqe_t *work) work 1393 drivers/staging/octeon/octeon-stubs.h static inline void cvmx_wqe_set_port(cvmx_wqe_t *work, int port) work 1396 drivers/staging/octeon/octeon-stubs.h static inline void cvmx_wqe_set_qos(cvmx_wqe_t *work, int qos) work 1399 drivers/staging/octeon/octeon-stubs.h static inline int cvmx_wqe_get_qos(cvmx_wqe_t *work) work 1404 drivers/staging/octeon/octeon-stubs.h static inline void cvmx_wqe_set_grp(cvmx_wqe_t *work, int grp) work 271 drivers/staging/olpc_dcon/olpc_dcon.c static void dcon_source_switch(struct work_struct *work) work 273 drivers/staging/olpc_dcon/olpc_dcon.c struct dcon_priv *dcon = container_of(work, struct dcon_priv, work 2233 drivers/staging/qlge/qlge.h void ql_mpi_work(struct work_struct *work); work 2234 drivers/staging/qlge/qlge.h void ql_mpi_reset_work(struct work_struct *work); work 2235 drivers/staging/qlge/qlge.h void ql_mpi_core_to_log(struct work_struct *work); work 2241 drivers/staging/qlge/qlge.h void ql_mpi_idc_work(struct work_struct *work); work 2242 drivers/staging/qlge/qlge.h void ql_mpi_port_cfg_work(struct work_struct *work); work 1327 drivers/staging/qlge/qlge_dbg.c void ql_mpi_core_to_log(struct work_struct *work) work 1330 drivers/staging/qlge/qlge_dbg.c container_of(work, struct ql_adapter, mpi_core_to_log.work); work 4445 drivers/staging/qlge/qlge_main.c static void ql_asic_reset_work(struct work_struct *work) work 4448 drivers/staging/qlge/qlge_main.c container_of(work, struct ql_adapter, asic_reset_work.work); work 1119 drivers/staging/qlge/qlge_mpi.c void ql_mpi_port_cfg_work(struct work_struct *work) work 1122 drivers/staging/qlge/qlge_mpi.c container_of(work, struct ql_adapter, mpi_port_cfg_work.work); work 1159 drivers/staging/qlge/qlge_mpi.c void ql_mpi_idc_work(struct work_struct *work) work 1162 drivers/staging/qlge/qlge_mpi.c container_of(work, struct ql_adapter, mpi_idc_work.work); work 1234 drivers/staging/qlge/qlge_mpi.c void ql_mpi_work(struct work_struct *work) work 1237 drivers/staging/qlge/qlge_mpi.c container_of(work, struct ql_adapter, mpi_work.work); work 1263 drivers/staging/qlge/qlge_mpi.c void ql_mpi_reset_work(struct work_struct *work) work 1266 drivers/staging/qlge/qlge_mpi.c container_of(work, struct ql_adapter, mpi_reset_work.work); work 31 drivers/staging/rtl8188eu/core/rtw_led.c void BlinkWorkItemCallback(struct work_struct *work) work 33 drivers/staging/rtl8188eu/core/rtw_led.c struct LED_871x *pLed = container_of(work, struct LED_871x, work 85 drivers/staging/rtl8188eu/include/rtw_led.h void BlinkWorkItemCallback(struct work_struct *work); work 370 drivers/staging/rtl8192e/rtl8192e/rtl_core.c update_beacon_wq.work); work 2324 drivers/staging/rtl8192u/ieee80211/ieee80211.h void ieee80211_wx_sync_scan_wq(struct work_struct *work); work 472 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c static void ieee80211_softmac_scan_wq(struct work_struct *work) work 474 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c struct delayed_work *dwork = to_delayed_work(work); work 1270 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c static void ieee80211_associate_complete_wq(struct work_struct *work) work 1272 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, associate_complete_wq); work 1321 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c static void ieee80211_associate_procedure_wq(struct work_struct *work) work 1323 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, associate_procedure_wq); work 2203 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c static void ieee80211_start_ibss_wq(struct work_struct *work) work 2205 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c struct delayed_work *dwork = to_delayed_work(work); work 2367 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c static void ieee80211_associate_retry_wq(struct work_struct *work) work 2369 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c struct delayed_work *dwork = to_delayed_work(work); work 300 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c void ieee80211_wx_sync_scan_wq(struct work_struct *work) work 302 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, wx_sync_scan_wq); work 469 drivers/staging/rtl8192u/r8192U_core.c static void rtl8192_restart(struct work_struct *work); work 1753 drivers/staging/rtl8192u/r8192U_core.c static void rtl8192_update_beacon(struct work_struct *work) work 1755 drivers/staging/rtl8192u/r8192U_core.c struct r8192_priv *priv = container_of(work, struct r8192_priv, work 1756 drivers/staging/rtl8192u/r8192U_core.c update_beacon_wq.work); work 1773 drivers/staging/rtl8192u/r8192U_core.c static void rtl8192_qos_activate(struct work_struct *work) work 1775 drivers/staging/rtl8192u/r8192U_core.c struct r8192_priv *priv = container_of(work, struct r8192_priv, work 2219 drivers/staging/rtl8192u/r8192U_core.c static void rtl819x_watchdog_wqcallback(struct work_struct *work); work 3269 drivers/staging/rtl8192u/r8192U_core.c static void rtl819x_watchdog_wqcallback(struct work_struct *work) work 3271 drivers/staging/rtl8192u/r8192U_core.c struct delayed_work *dwork = to_delayed_work(work); work 3478 drivers/staging/rtl8192u/r8192U_core.c static void rtl8192_restart(struct work_struct *work) work 3480 drivers/staging/rtl8192u/r8192U_core.c struct r8192_priv *priv = container_of(work, struct r8192_priv, work 756 drivers/staging/rtl8192u/r8192U_dm.c void dm_txpower_trackingcallback(struct work_struct *work) work 758 drivers/staging/rtl8192u/r8192U_dm.c struct delayed_work *dwork = to_delayed_work(work); work 2318 drivers/staging/rtl8192u/r8192U_dm.c void dm_rf_pathcheck_workitemcallback(struct work_struct *work) work 2320 drivers/staging/rtl8192u/r8192U_dm.c struct delayed_work *dwork = to_delayed_work(work); work 162 drivers/staging/rtl8192u/r8192U_dm.h void dm_txpower_trackingcallback(struct work_struct *work); work 168 drivers/staging/rtl8192u/r8192U_dm.h void dm_rf_pathcheck_workitemcallback(struct work_struct *work); work 1640 drivers/staging/rtl8192u/r819xU_phy.c void InitialGainOperateWorkItemCallBack(struct work_struct *work) work 1642 drivers/staging/rtl8192u/r819xU_phy.c struct delayed_work *dwork = to_delayed_work(work); work 81 drivers/staging/rtl8192u/r819xU_phy.h void InitialGainOperateWorkItemCallBack(struct work_struct *work); work 67 drivers/staging/rtl8712/rtl8712_led.c static void BlinkWorkItemCallback(struct work_struct *work); work 828 drivers/staging/rtl8712/rtl8712_led.c static void BlinkWorkItemCallback(struct work_struct *work) work 830 drivers/staging/rtl8712/rtl8712_led.c struct LED_871x *pLed = container_of(work, struct LED_871x, work 126 drivers/staging/rtl8712/rtl871x_pwrctrl.c static void SetPSModeWorkItemCallback(struct work_struct *work) work 128 drivers/staging/rtl8712/rtl871x_pwrctrl.c struct pwrctrl_priv *pwrpriv = container_of(work, work 140 drivers/staging/rtl8712/rtl871x_pwrctrl.c static void rpwm_workitem_callback(struct work_struct *work) work 142 drivers/staging/rtl8712/rtl871x_pwrctrl.c struct pwrctrl_priv *pwrpriv = container_of(work, work 94 drivers/staging/rtl8712/xmit_linux.c void r8712_SetFilter(struct work_struct *work) work 96 drivers/staging/rtl8712/xmit_linux.c struct _adapter *adapter = container_of(work, struct _adapter, work 38 drivers/staging/rtl8712/xmit_osdep.h void r8712_SetFilter(struct work_struct *work); work 204 drivers/staging/rtl8723bs/core/rtw_cmd.c static void c2h_wk_callback(_workitem *work); work 1897 drivers/staging/rtl8723bs/core/rtw_cmd.c static void c2h_wk_callback(_workitem *work) work 1899 drivers/staging/rtl8723bs/core/rtw_cmd.c struct evt_priv *evtpriv = container_of(work, struct evt_priv, c2h_wk); work 751 drivers/staging/rtl8723bs/core/rtw_pwrctrl.c static void cpwm_event_callback(struct work_struct *work) work 753 drivers/staging/rtl8723bs/core/rtw_pwrctrl.c struct pwrctrl_priv *pwrpriv = container_of(work, struct pwrctrl_priv, cpwm_event); work 764 drivers/staging/rtl8723bs/core/rtw_pwrctrl.c static void rpwmtimeout_workitem_callback(struct work_struct *work) work 771 drivers/staging/rtl8723bs/core/rtw_pwrctrl.c pwrpriv = container_of(work, struct pwrctrl_priv, rpwmtimeoutwi); work 20 drivers/staging/speakup/selection.c struct work_struct work; work 32 drivers/staging/speakup/selection.c static void __speakup_set_selection(struct work_struct *work) work 35 drivers/staging/speakup/selection.c container_of(work, struct speakup_selection_work, work); work 61 drivers/staging/speakup/selection.c .work = __WORK_INITIALIZER(speakup_sel_work.work, work 90 drivers/staging/speakup/selection.c schedule_work_on(WORK_CPU_UNBOUND, &speakup_sel_work.work); work 99 drivers/staging/speakup/selection.c cancel_work_sync(&speakup_sel_work.work); work 109 drivers/staging/speakup/selection.c static void __speakup_paste_selection(struct work_struct *work) work 112 drivers/staging/speakup/selection.c container_of(work, struct speakup_selection_work, work); work 120 drivers/staging/speakup/selection.c .work = __WORK_INITIALIZER(speakup_paste_work.work, work 132 drivers/staging/speakup/selection.c schedule_work_on(WORK_CPU_UNBOUND, &speakup_paste_work.work); work 140 drivers/staging/speakup/selection.c cancel_work_sync(&speakup_paste_work.work); work 739 drivers/staging/unisys/visornic/visornic_main.c static void visornic_timeout_reset(struct work_struct *work) work 745 drivers/staging/unisys/visornic/visornic_main.c devdata = container_of(work, struct visornic_devdata, timeout_reset); work 566 drivers/staging/uwb/drp.c static void uwb_cnflt_update_work(struct work_struct *work) work 568 drivers/staging/uwb/drp.c struct uwb_cnflt_alien *cnflt = container_of(work, work 390 drivers/staging/uwb/rsv.c static void uwb_rsv_handle_timeout_work(struct work_struct *work) work 392 drivers/staging/uwb/rsv.c struct uwb_rsv *rsv = container_of(work, struct uwb_rsv, work 893 drivers/staging/uwb/rsv.c static void uwb_rsv_update_work(struct work_struct *work) work 895 drivers/staging/uwb/rsv.c struct uwb_rc *rc = container_of(work, struct uwb_rc, work 896 drivers/staging/uwb/rsv.c rsv_update_work.work); work 914 drivers/staging/uwb/rsv.c static void uwb_rsv_alien_bp_work(struct work_struct *work) work 916 drivers/staging/uwb/rsv.c struct uwb_rc *rc = container_of(work, struct uwb_rc, work 917 drivers/staging/uwb/rsv.c rsv_alien_bp_work.work); work 155 drivers/staging/uwb/whc-rc.c static void whcrc_event_work(struct work_struct *work) work 157 drivers/staging/uwb/whc-rc.c struct whcrc *whcrc = container_of(work, struct whcrc, event_work); work 122 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c struct work_struct work; work 244 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c static void buffer_work_cb(struct work_struct *work) work 247 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c container_of(work, struct mmal_msg_context, u.bulk.work); work 268 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c static void buffer_to_host_work_cb(struct work_struct *work) work 271 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c container_of(work, struct mmal_msg_context, work 388 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c INIT_WORK(&msg_context->u.bulk.work, buffer_work_cb); work 514 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c schedule_work(&msg_context->u.bulk.work); work 523 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c schedule_work(&msg_context->u.bulk.work); work 533 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c schedule_work(&msg_context->u.bulk.work); work 1125 drivers/staging/vt6655/device_main.c static void vnt_interrupt_work(struct work_struct *work) work 1128 drivers/staging/vt6655/device_main.c container_of(work, struct vnt_private, interrupt_work); work 81 drivers/staging/vt6656/wcmd.c void vnt_run_command(struct work_struct *work) work 84 drivers/staging/vt6656/wcmd.c container_of(work, struct vnt_private, run_command_work.work); work 48 drivers/staging/vt6656/wcmd.h void vnt_run_command(struct work_struct *work); work 83 drivers/staging/wilc1000/wilc_hif.c struct work_struct work; work 158 drivers/staging/wilc1000/wilc_hif.c INIT_WORK(&msg->work, msg->fn); work 163 drivers/staging/wilc1000/wilc_hif.c if (!queue_work(msg->vif->wilc->hif_workqueue, &msg->work)) work 397 drivers/staging/wilc1000/wilc_hif.c static void handle_connect_timeout(struct work_struct *work) work 399 drivers/staging/wilc1000/wilc_hif.c struct host_if_msg *msg = container_of(work, struct host_if_msg, work); work 574 drivers/staging/wilc1000/wilc_hif.c static void handle_rcvd_ntwrk_info(struct work_struct *work) work 576 drivers/staging/wilc1000/wilc_hif.c struct host_if_msg *msg = container_of(work, struct host_if_msg, work); work 726 drivers/staging/wilc1000/wilc_hif.c static void handle_rcvd_gnrl_async_info(struct work_struct *work) work 728 drivers/staging/wilc1000/wilc_hif.c struct host_if_msg *msg = container_of(work, struct host_if_msg, work); work 858 drivers/staging/wilc1000/wilc_hif.c static void handle_get_statistics(struct work_struct *work) work 860 drivers/staging/wilc1000/wilc_hif.c struct host_if_msg *msg = container_of(work, struct host_if_msg, work); work 979 drivers/staging/wilc1000/wilc_hif.c static void wilc_handle_listen_state_expired(struct work_struct *work) work 981 drivers/staging/wilc1000/wilc_hif.c struct host_if_msg *msg = container_of(work, struct host_if_msg, work); work 1010 drivers/staging/wilc1000/wilc_hif.c static void handle_set_mcast_filter(struct work_struct *work) work 1012 drivers/staging/wilc1000/wilc_hif.c struct host_if_msg *msg = container_of(work, struct host_if_msg, work); work 1046 drivers/staging/wilc1000/wilc_hif.c static void handle_scan_timer(struct work_struct *work) work 1048 drivers/staging/wilc1000/wilc_hif.c struct host_if_msg *msg = container_of(work, struct host_if_msg, work); work 1054 drivers/staging/wilc1000/wilc_hif.c static void handle_scan_complete(struct work_struct *work) work 1056 drivers/staging/wilc1000/wilc_hif.c struct host_if_msg *msg = container_of(work, struct host_if_msg, work); work 82 drivers/staging/wusbcore/devconnect.c static void wusbhc_devconnect_acked_work(struct work_struct *work); work 226 drivers/staging/wusbcore/devconnect.c static void wusbhc_devconnect_acked_work(struct work_struct *work) work 228 drivers/staging/wusbcore/devconnect.c struct wusb_dev *wusb_dev = container_of(work, struct wusb_dev, work 189 drivers/staging/wusbcore/host/whci/asl.c void scan_async_work(struct work_struct *work) work 191 drivers/staging/wusbcore/host/whci/asl.c struct whc *whc = container_of(work, struct whc, async_work); work 74 drivers/staging/wusbcore/host/whci/int.c void whc_dn_work(struct work_struct *work) work 76 drivers/staging/wusbcore/host/whci/int.c struct whc *whc = container_of(work, struct whc, dn_work); work 218 drivers/staging/wusbcore/host/whci/pzl.c void scan_periodic_work(struct work_struct *work) work 220 drivers/staging/wusbcore/host/whci/pzl.c struct whc *whc = container_of(work, struct whc, periodic_work); work 384 drivers/staging/wusbcore/host/whci/qset.c static void urb_dequeue_work(struct work_struct *work) work 386 drivers/staging/wusbcore/host/whci/qset.c struct whc_urb *wurb = container_of(work, struct whc_urb, dequeue_work); work 155 drivers/staging/wusbcore/host/whci/whcd.h void whc_dn_work(struct work_struct *work); work 165 drivers/staging/wusbcore/host/whci/whcd.h void scan_async_work(struct work_struct *work); work 175 drivers/staging/wusbcore/host/whci/whcd.h void scan_periodic_work(struct work_struct *work); work 19 drivers/staging/wusbcore/security.c static void wusbhc_gtk_rekey_work(struct work_struct *work); work 525 drivers/staging/wusbcore/security.c static void wusbhc_gtk_rekey_work(struct work_struct *work) work 527 drivers/staging/wusbcore/security.c struct wusbhc *wusbhc = container_of(work, work 47 drivers/staging/wusbcore/wa-nep.c struct work_struct work; work 83 drivers/staging/wusbcore/wa-nep.c work); work 189 drivers/staging/wusbcore/wa-nep.c INIT_WORK(&nw->work, wa_notif_dispatch); work 194 drivers/staging/wusbcore/wa-nep.c queue_work(wusbd, &nw->work); work 527 drivers/target/iscsi/cxgbit/cxgbit_main.c struct work_struct work; work 580 drivers/target/iscsi/cxgbit/cxgbit_main.c static void cxgbit_dcb_workfn(struct work_struct *work) work 588 drivers/target/iscsi/cxgbit/cxgbit_main.c dcb_work = container_of(work, struct cxgbit_dcb_work, work); work 647 drivers/target/iscsi/cxgbit/cxgbit_main.c INIT_WORK(&dcb_work->work, cxgbit_dcb_workfn); work 648 drivers/target/iscsi/cxgbit/cxgbit_main.c schedule_work(&dcb_work->work); work 565 drivers/target/iscsi/iscsi_target_nego.c static void iscsi_target_do_login_rx(struct work_struct *work) work 567 drivers/target/iscsi/iscsi_target_nego.c struct iscsi_conn *conn = container_of(work, work 568 drivers/target/iscsi/iscsi_target_nego.c struct iscsi_conn, login_work.work); work 96 drivers/target/loopback/tcm_loop.c static void tcm_loop_submission_work(struct work_struct *work) work 99 drivers/target/loopback/tcm_loop.c container_of(work, struct tcm_loop_cmd, work); work 185 drivers/target/loopback/tcm_loop.c INIT_WORK(&tl_cmd->work, tcm_loop_submission_work); work 186 drivers/target/loopback/tcm_loop.c queue_work(tcm_loop_workqueue, &tl_cmd->work); work 19 drivers/target/loopback/tcm_loop.h struct work_struct work; work 639 drivers/target/sbp/sbp_target.c static void session_maintenance_work(struct work_struct *work) work 641 drivers/target/sbp/sbp_target.c struct sbp_session *sess = container_of(work, struct sbp_session, work 642 drivers/target/sbp/sbp_target.c maint_work.work); work 733 drivers/target/sbp/sbp_target.c queue_work(system_unbound_wq, &agent->work); work 767 drivers/target/sbp/sbp_target.c queue_work(system_unbound_wq, &agent->work); work 854 drivers/target/sbp/sbp_target.c static void tgt_agent_process_work(struct work_struct *work) work 857 drivers/target/sbp/sbp_target.c container_of(work, struct sbp_target_request, work); work 930 drivers/target/sbp/sbp_target.c static void tgt_agent_fetch_work(struct work_struct *work) work 933 drivers/target/sbp/sbp_target.c container_of(work, struct sbp_target_agent, work); work 992 drivers/target/sbp/sbp_target.c INIT_WORK(&req->work, tgt_agent_process_work); work 993 drivers/target/sbp/sbp_target.c queue_work(system_unbound_wq, &req->work); work 1030 drivers/target/sbp/sbp_target.c INIT_WORK(&agent->work, tgt_agent_fetch_work); work 1047 drivers/target/sbp/sbp_target.c cancel_work_sync(&agent->work); work 1453 drivers/target/sbp/sbp_target.c static void sbp_mgt_agent_process(struct work_struct *work) work 1456 drivers/target/sbp/sbp_target.c container_of(work, struct sbp_management_agent, work); work 1624 drivers/target/sbp/sbp_target.c queue_work(system_unbound_wq, &agent->work); work 1653 drivers/target/sbp/sbp_target.c INIT_WORK(&agent->work, sbp_mgt_agent_process); work 1670 drivers/target/sbp/sbp_target.c cancel_work_sync(&agent->work); work 205 drivers/target/sbp/sbp_target.h struct work_struct work; work 215 drivers/target/sbp/sbp_target.h struct work_struct work; work 229 drivers/target/sbp/sbp_target.h struct work_struct work; work 151 drivers/target/target_core_internal.h void target_qf_do_work(struct work_struct *work); work 57 drivers/target/target_core_transport.c static void target_complete_ok_work(struct work_struct *work); work 712 drivers/target/target_core_transport.c static void target_complete_failure_work(struct work_struct *work) work 714 drivers/target/target_core_transport.c struct se_cmd *cmd = container_of(work, struct se_cmd, work); work 807 drivers/target/target_core_transport.c static void target_abort_work(struct work_struct *work) work 809 drivers/target/target_core_transport.c struct se_cmd *cmd = container_of(work, struct se_cmd, work); work 821 drivers/target/target_core_transport.c INIT_WORK(&cmd->work, target_abort_work); work 822 drivers/target/target_core_transport.c queue_work(target_completion_wq, &cmd->work); work 862 drivers/target/target_core_transport.c INIT_WORK(&cmd->work, success ? target_complete_ok_work : work 865 drivers/target/target_core_transport.c queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work); work 867 drivers/target/target_core_transport.c queue_work(target_completion_wq, &cmd->work); work 909 drivers/target/target_core_transport.c void target_qf_do_work(struct work_struct *work) work 911 drivers/target/target_core_transport.c struct se_device *dev = container_of(work, struct se_device, work 1364 drivers/target/target_core_transport.c INIT_WORK(&cmd->work, NULL); work 1709 drivers/target/target_core_transport.c static void target_complete_tmr_failure(struct work_struct *work) work 1711 drivers/target/target_core_transport.c struct se_cmd *se_cmd = container_of(work, struct se_cmd, work); work 1812 drivers/target/target_core_transport.c INIT_WORK(&se_cmd->work, target_complete_tmr_failure); work 1813 drivers/target/target_core_transport.c schedule_work(&se_cmd->work); work 1839 drivers/target/target_core_transport.c INIT_WORK(&cmd->work, target_abort_work); work 1840 drivers/target/target_core_transport.c queue_work(target_completion_wq, &cmd->work); work 2264 drivers/target/target_core_transport.c static void target_complete_ok_work(struct work_struct *work) work 2266 drivers/target/target_core_transport.c struct se_cmd *cmd = container_of(work, struct se_cmd, work); work 3292 drivers/target/target_core_transport.c static void target_tmr_work(struct work_struct *work) work 3294 drivers/target/target_core_transport.c struct se_cmd *cmd = container_of(work, struct se_cmd, work); work 3370 drivers/target/target_core_transport.c INIT_WORK(&cmd->work, target_tmr_work); work 3371 drivers/target/target_core_transport.c schedule_work(&cmd->work); work 2724 drivers/target/target_core_user.c static void tcmu_unmap_work_fn(struct work_struct *work) work 725 drivers/target/target_core_xcopy.c static void target_xcopy_do_work(struct work_struct *work) work 727 drivers/target/target_core_xcopy.c struct xcopy_op *xop = container_of(work, struct xcopy_op, xop_work); work 112 drivers/target/tcm_fc/tcm_fc.h struct work_struct work; work 421 drivers/target/tcm_fc/tfc_cmd.c static void ft_send_work(struct work_struct *work); work 450 drivers/target/tcm_fc/tfc_cmd.c INIT_WORK(&cmd->work, ft_send_work); work 451 drivers/target/tcm_fc/tfc_cmd.c queue_work(sess->tport->tpg->workqueue, &cmd->work); work 491 drivers/target/tcm_fc/tfc_cmd.c static void ft_send_work(struct work_struct *work) work 493 drivers/target/tcm_fc/tfc_cmd.c struct ft_cmd *cmd = container_of(work, struct ft_cmd, work); work 186 drivers/target/tcm_fc/tfc_io.c static void ft_execute_work(struct work_struct *work) work 188 drivers/target/tcm_fc/tfc_io.c struct ft_cmd *cmd = container_of(work, struct ft_cmd, work); work 318 drivers/target/tcm_fc/tfc_io.c INIT_WORK(&cmd->work, ft_execute_work); work 319 drivers/target/tcm_fc/tfc_io.c queue_work(cmd->sess->tport->tpg->workqueue, &cmd->work); work 50 drivers/thermal/da9062-thermal.c struct delayed_work work; work 60 drivers/thermal/da9062-thermal.c static void da9062_thermal_poll_on(struct work_struct *work) work 62 drivers/thermal/da9062-thermal.c struct da9062_thermal *thermal = container_of(work, work 64 drivers/thermal/da9062-thermal.c work.work); work 100 drivers/thermal/da9062-thermal.c queue_delayed_work(system_freezable_wq, &thermal->work, delay); work 119 drivers/thermal/da9062-thermal.c queue_delayed_work(system_freezable_wq, &thermal->work, 0); work 239 drivers/thermal/da9062-thermal.c INIT_DELAYED_WORK(&thermal->work, da9062_thermal_poll_on); work 287 drivers/thermal/da9062-thermal.c cancel_delayed_work_sync(&thermal->work); work 365 drivers/thermal/intel/intel_powerclamp.c static void clamp_balancing_func(struct kthread_work *work) work 373 drivers/thermal/intel/intel_powerclamp.c w_data = container_of(work, struct powerclamp_worker_data, work 410 drivers/thermal/intel/intel_powerclamp.c static void clamp_idle_injection_func(struct kthread_work *work) work 414 drivers/thermal/intel/intel_powerclamp.c w_data = container_of(work, struct powerclamp_worker_data, work 415 drivers/thermal/intel/intel_powerclamp.c idle_injection_work.work); work 52 drivers/thermal/intel/x86_pkg_temp_thermal.c struct delayed_work work; work 261 drivers/thermal/intel/x86_pkg_temp_thermal.c static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work) work 300 drivers/thermal/intel/x86_pkg_temp_thermal.c static void pkg_thermal_schedule_work(int cpu, struct delayed_work *work) work 304 drivers/thermal/intel/x86_pkg_temp_thermal.c schedule_delayed_work_on(cpu, work, ms); work 322 drivers/thermal/intel/x86_pkg_temp_thermal.c pkg_thermal_schedule_work(zonedev->cpu, &zonedev->work); work 354 drivers/thermal/intel/x86_pkg_temp_thermal.c INIT_DELAYED_WORK(&zonedev->work, pkg_temp_thermal_threshold_work_fn); work 443 drivers/thermal/intel/x86_pkg_temp_thermal.c cancel_delayed_work_sync(&zonedev->work); work 452 drivers/thermal/intel/x86_pkg_temp_thermal.c pkg_thermal_schedule_work(target, &zonedev->work); work 94 drivers/thermal/rcar_thermal.c struct delayed_work work; work 392 drivers/thermal/rcar_thermal.c static void rcar_thermal_work(struct work_struct *work) work 398 drivers/thermal/rcar_thermal.c priv = container_of(work, struct rcar_thermal_priv, work.work); work 458 drivers/thermal/rcar_thermal.c queue_delayed_work(system_freezable_wq, &priv->work, work 477 drivers/thermal/rcar_thermal.c cancel_delayed_work_sync(&priv->work); work 569 drivers/thermal/rcar_thermal.c INIT_DELAYED_WORK(&priv->work, rcar_thermal_work); work 773 drivers/thermal/samsung/exynos_tmu.c static void exynos_tmu_work(struct work_struct *work) work 775 drivers/thermal/samsung/exynos_tmu.c struct exynos_tmu_data *data = container_of(work, work 82 drivers/thermal/tegra/tegra-bpmp-thermal.c static void tz_device_update_work_fn(struct work_struct *work) work 86 drivers/thermal/tegra/tegra-bpmp-thermal.c zone = container_of(work, struct tegra_bpmp_thermal_zone, work 337 drivers/thermal/thermal_core.c static void thermal_emergency_poweroff_func(struct work_struct *work) work 514 drivers/thermal/thermal_core.c static void thermal_zone_device_check(struct work_struct *work) work 516 drivers/thermal/thermal_core.c struct thermal_zone_device *tz = container_of(work, struct work 518 drivers/thermal/thermal_core.c poll_queue.work); work 38 drivers/thermal/ti-soc-thermal/ti-thermal-common.c static void ti_thermal_work(struct work_struct *work) work 40 drivers/thermal/ti-soc-thermal/ti-thermal-common.c struct ti_thermal_data *data = container_of(work, work 475 drivers/thunderbolt/ctl.c schedule_work(&req->work); work 483 drivers/thunderbolt/ctl.c static void tb_cfg_request_work(struct work_struct *work) work 485 drivers/thunderbolt/ctl.c struct tb_cfg_request *req = container_of(work, typeof(*req), work); work 512 drivers/thunderbolt/ctl.c INIT_WORK(&req->work, tb_cfg_request_work); work 526 drivers/thunderbolt/ctl.c schedule_work(&req->work); work 549 drivers/thunderbolt/ctl.c schedule_work(&req->work); work 588 drivers/thunderbolt/ctl.c flush_work(&req->work); work 93 drivers/thunderbolt/ctl.h struct work_struct work; work 104 drivers/thunderbolt/icm.c struct work_struct work; work 1588 drivers/thunderbolt/icm.c static void icm_handle_notification(struct work_struct *work) work 1590 drivers/thunderbolt/icm.c struct icm_notification *n = container_of(work, typeof(*n), work); work 1636 drivers/thunderbolt/icm.c INIT_WORK(&n->work, icm_handle_notification); work 1640 drivers/thunderbolt/icm.c queue_work(tb->wq, &n->work); work 1958 drivers/thunderbolt/icm.c static void icm_rescan_work(struct work_struct *work) work 1960 drivers/thunderbolt/icm.c struct icm *icm = container_of(work, struct icm, rescan_work.work); work 228 drivers/thunderbolt/nhi.c static void ring_work(struct work_struct *work) work 230 drivers/thunderbolt/nhi.c struct tb_ring *ring = container_of(work, typeof(*ring), work); work 361 drivers/thunderbolt/nhi.c schedule_work(&ring->work); work 510 drivers/thunderbolt/nhi.c INIT_WORK(&ring->work, ring_work); work 693 drivers/thunderbolt/nhi.c schedule_work(&ring->work); work 694 drivers/thunderbolt/nhi.c flush_work(&ring->work); work 744 drivers/thunderbolt/nhi.c flush_work(&ring->work); work 804 drivers/thunderbolt/nhi.c static void nhi_interrupt_work(struct work_struct *work) work 806 drivers/thunderbolt/nhi.c struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work); work 32 drivers/thunderbolt/tb.c struct work_struct work; work 39 drivers/thunderbolt/tb.c static void tb_handle_hotplug(struct work_struct *work); work 53 drivers/thunderbolt/tb.c INIT_WORK(&ev->work, tb_handle_hotplug); work 54 drivers/thunderbolt/tb.c queue_work(tb->wq, &ev->work); work 501 drivers/thunderbolt/tb.c static void tb_handle_hotplug(struct work_struct *work) work 503 drivers/thunderbolt/tb.c struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work); work 26 drivers/thunderbolt/xdomain.c struct work_struct work; work 504 drivers/thunderbolt/xdomain.c static void tb_xdp_handle_request(struct work_struct *work) work 506 drivers/thunderbolt/xdomain.c struct xdomain_request_work *xw = container_of(work, typeof(*xw), work); work 593 drivers/thunderbolt/xdomain.c INIT_WORK(&xw->work, tb_xdp_handle_request); work 601 drivers/thunderbolt/xdomain.c schedule_work(&xw->work); work 894 drivers/thunderbolt/xdomain.c static void tb_xdomain_get_uuid(struct work_struct *work) work 896 drivers/thunderbolt/xdomain.c struct tb_xdomain *xd = container_of(work, typeof(*xd), work 897 drivers/thunderbolt/xdomain.c get_uuid_work.work); work 943 drivers/thunderbolt/xdomain.c static void tb_xdomain_get_properties(struct work_struct *work) work 945 drivers/thunderbolt/xdomain.c struct tb_xdomain *xd = container_of(work, typeof(*xd), work 946 drivers/thunderbolt/xdomain.c get_properties_work.work); work 1037 drivers/thunderbolt/xdomain.c static void tb_xdomain_properties_changed(struct work_struct *work) work 1039 drivers/thunderbolt/xdomain.c struct tb_xdomain *xd = container_of(work, typeof(*xd), work 1040 drivers/thunderbolt/xdomain.c properties_changed_work.work); work 563 drivers/tty/hvc/hvc_console.c static void hvc_set_winsz(struct work_struct *work) work 570 drivers/tty/hvc/hvc_console.c hp = container_of(work, struct hvc_struct, tty_resize); work 436 drivers/tty/hvc/hvc_iucv.c static void hvc_iucv_sndbuf_work(struct work_struct *work) work 440 drivers/tty/hvc/hvc_iucv.c priv = container_of(work, struct hvc_iucv_private, sndbuf_work.work); work 646 drivers/tty/hvc/hvsi.c static void hvsi_handshaker(struct work_struct *work) work 649 drivers/tty/hvc/hvsi.c container_of(work, struct hvsi_struct, handshaker); work 849 drivers/tty/hvc/hvsi.c static void hvsi_write_worker(struct work_struct *work) work 852 drivers/tty/hvc/hvsi.c container_of(work, struct hvsi_struct, writer.work); work 158 drivers/tty/ipwireless/tty.c int work = 0; work 168 drivers/tty/ipwireless/tty.c work = tty_insert_flip_string(&tty->port, data, length); work 170 drivers/tty/ipwireless/tty.c if (work != length) work 173 drivers/tty/ipwireless/tty.c length - work); work 175 drivers/tty/ipwireless/tty.c if (work) work 945 drivers/tty/serial/8250/8250_core.c static void serial_8250_overrun_backoff_work(struct work_struct *work) work 948 drivers/tty/serial/8250/8250_core.c container_of(to_delayed_work(work), struct uart_8250_port, work 571 drivers/tty/serial/8250/8250_omap.c static void omap8250_uart_qos_work(struct work_struct *work) work 575 drivers/tty/serial/8250/8250_omap.c priv = container_of(work, struct omap8250_priv, qos_work); work 119 drivers/tty/serial/digicolor-usart.c static void digicolor_rx_poll(struct work_struct *work) work 122 drivers/tty/serial/digicolor-usart.c container_of(to_delayed_work(work), work 121 drivers/tty/serial/max3100.c struct work_struct work; work 178 drivers/tty/serial/max3100.c queue_work(s->workqueue, &s->work); work 258 drivers/tty/serial/max3100.c struct max3100_port *s = container_of(w, struct max3100_port, work); work 604 drivers/tty/serial/max3100.c INIT_WORK(&s->work, max3100_work); work 830 drivers/tty/serial/omap-serial.c static void serial_omap_uart_qos_work(struct work_struct *work) work 832 drivers/tty/serial/omap-serial.c struct uart_omap_port *up = container_of(work, struct uart_omap_port, work 1397 drivers/tty/serial/sh-sci.c static void sci_dma_tx_work_fn(struct work_struct *work) work 1399 drivers/tty/serial/sh-sci.c struct sci_port *s = container_of(work, struct sci_port, work_tx); work 787 drivers/tty/synclink.c static void mgsl_bh_handler(struct work_struct *work); work 1054 drivers/tty/synclink.c static void mgsl_bh_handler(struct work_struct *work) work 1057 drivers/tty/synclink.c container_of(work, struct mgsl_struct, task); work 1435 drivers/tty/synclink.c int work = 0; work 1510 drivers/tty/synclink.c work += tty_insert_flip_char(&info->port, 0, TTY_OVERRUN); work 1520 drivers/tty/synclink.c if(work) work 479 drivers/tty/synclink_gt.c static void bh_handler(struct work_struct *work); work 1921 drivers/tty/synclink_gt.c static void bh_handler(struct work_struct *work) work 1923 drivers/tty/synclink_gt.c struct slgt_info *info = container_of(work, struct slgt_info, task); work 594 drivers/tty/synclinkmp.c static void bh_handler(struct work_struct *work); work 1980 drivers/tty/synclinkmp.c static void bh_handler(struct work_struct *work) work 1982 drivers/tty/synclinkmp.c SLMP_INFO *info = container_of(work, SLMP_INFO, task); work 746 drivers/tty/sysrq.c static void sysrq_reinject_alt_sysrq(struct work_struct *work) work 749 drivers/tty/sysrq.c container_of(work, struct sysrq_state, reinject_work); work 75 drivers/tty/tty_buffer.c queue_work(system_unbound_wq, &buf->work); work 413 drivers/tty/tty_buffer.c queue_work(system_unbound_wq, &buf->work); work 500 drivers/tty/tty_buffer.c static void flush_to_ldisc(struct work_struct *work) work 502 drivers/tty/tty_buffer.c struct tty_port *port = container_of(work, struct tty_port, buf.work); work 579 drivers/tty/tty_buffer.c INIT_WORK(&buf->work, flush_to_ldisc); work 608 drivers/tty/tty_buffer.c return queue_work(system_unbound_wq, &port->buf.work); work 613 drivers/tty/tty_buffer.c return cancel_work_sync(&port->buf.work); work 618 drivers/tty/tty_buffer.c flush_work(&port->buf.work); work 661 drivers/tty/tty_io.c static void do_tty_hangup(struct work_struct *work) work 664 drivers/tty/tty_io.c container_of(work, struct tty_struct, hangup_work); work 1444 drivers/tty/tty_io.c static void release_one_tty(struct work_struct *work) work 1447 drivers/tty/tty_io.c container_of(work, struct tty_struct, hangup_work); work 2935 drivers/tty/tty_io.c static void do_SAK_work(struct work_struct *work) work 2938 drivers/tty/tty_io.c container_of(work, struct tty_struct, SAK_work); work 1061 drivers/tty/vt/vt_ioctl.c void vc_SAK(struct work_struct *work) work 1064 drivers/tty/vt/vt_ioctl.c container_of(work, struct vc, SAK_work); work 195 drivers/usb/atm/cxacru.c static void cxacru_poll_status(struct work_struct *work); work 429 drivers/usb/atm/cxacru.c cxacru_poll_status(&instance->poll_work.work); work 818 drivers/usb/atm/cxacru.c cxacru_poll_status(&instance->poll_work.work); work 822 drivers/usb/atm/cxacru.c static void cxacru_poll_status(struct work_struct *work) work 825 drivers/usb/atm/cxacru.c container_of(work, struct cxacru_data, poll_work.work); work 483 drivers/usb/atm/speedtch.c static void speedtch_check_status(struct work_struct *work) work 486 drivers/usb/atm/speedtch.c container_of(work, struct speedtch_instance_data, work 898 drivers/usb/atm/ueagle-atm.c static void uea_load_page_e1(struct work_struct *work) work 900 drivers/usb/atm/ueagle-atm.c struct uea_softc *sc = container_of(work, struct uea_softc, task); work 1023 drivers/usb/atm/ueagle-atm.c static void uea_load_page_e4(struct work_struct *work) work 1025 drivers/usb/atm/ueagle-atm.c struct uea_softc *sc = container_of(work, struct uea_softc, task); work 536 drivers/usb/cdns3/ep0.c void cdns3_pending_setup_status_handler(struct work_struct *work) work 538 drivers/usb/cdns3/ep0.c struct cdns3_device *priv_dev = container_of(work, struct cdns3_device, work 693 drivers/usb/cdns3/gadget.c static void cdns3_free_aligned_request_buf(struct work_struct *work) work 695 drivers/usb/cdns3/gadget.c struct cdns3_device *priv_dev = container_of(work, struct cdns3_device, work 1307 drivers/usb/cdns3/gadget.h void cdns3_pending_setup_status_handler(struct work_struct *work); work 222 drivers/usb/chipidea/ci.h struct work_struct work; work 196 drivers/usb/chipidea/otg.c static void ci_otg_work(struct work_struct *work) work 198 drivers/usb/chipidea/otg.c struct ci_hdrc *ci = container_of(work, struct ci_hdrc, work); work 229 drivers/usb/chipidea/otg.c INIT_WORK(&ci->work, ci_otg_work); work 20 drivers/usb/chipidea/otg.h if (queue_work(ci->wq, &ci->work) == false) work 537 drivers/usb/class/cdc-acm.c schedule_work(&acm->work); work 567 drivers/usb/class/cdc-acm.c schedule_work(&acm->work); work 570 drivers/usb/class/cdc-acm.c static void acm_softint(struct work_struct *work) work 573 drivers/usb/class/cdc-acm.c struct acm *acm = container_of(work, struct acm, work); work 1357 drivers/usb/class/cdc-acm.c INIT_WORK(&acm->work, acm_softint); work 1567 drivers/usb/class/cdc-acm.c cancel_work_sync(&acm->work); work 1610 drivers/usb/class/cdc-acm.c cancel_work_sync(&acm->work); work 115 drivers/usb/class/cdc-acm.h struct work_struct work; /* work queue entry for various purposes*/ work 748 drivers/usb/class/cdc-wdm.c static void wdm_rxwork(struct work_struct *work) work 750 drivers/usb/class/cdc-wdm.c struct wdm_device *desc = container_of(work, struct wdm_device, rxwork); work 773 drivers/usb/class/cdc-wdm.c static void service_interrupt_work(struct work_struct *work) work 777 drivers/usb/class/cdc-wdm.c desc = container_of(work, struct wdm_device, service_outs_intr); work 60 drivers/usb/common/usb-conn-gpio.c static void usb_conn_detect_cable(struct work_struct *work) work 66 drivers/usb/common/usb-conn-gpio.c info = container_of(to_delayed_work(work), work 120 drivers/usb/common/usb-otg-fsm.c static void otg_hnp_polling_work(struct work_struct *work) work 122 drivers/usb/common/usb-otg-fsm.c struct otg_fsm *fsm = container_of(to_delayed_work(work), work 2233 drivers/usb/core/hcd.c static void hcd_resume_work(struct work_struct *work) work 2235 drivers/usb/core/hcd.c struct usb_hcd *hcd = container_of(work, struct usb_hcd, wakeup_work); work 2338 drivers/usb/core/hcd.c static void hcd_died_work(struct work_struct *work) work 2340 drivers/usb/core/hcd.c struct usb_hcd *hcd = container_of(work, struct usb_hcd, died_work); work 55 drivers/usb/core/hub.c static void hub_event(struct work_struct *work); work 469 drivers/usb/core/hub.c static void led_work(struct work_struct *work) work 472 drivers/usb/core/hub.c container_of(work, struct usb_hub, leds.work); work 774 drivers/usb/core/hub.c static void hub_tt_work(struct work_struct *work) work 777 drivers/usb/core/hub.c container_of(work, struct usb_hub, tt.clear_work); work 1288 drivers/usb/core/hub.c struct usb_hub *hub = container_of(ws, struct usb_hub, init_work.work); work 1295 drivers/usb/core/hub.c struct usb_hub *hub = container_of(ws, struct usb_hub, init_work.work); work 5386 drivers/usb/core/hub.c static void hub_event(struct work_struct *work) work 5396 drivers/usb/core/hub.c hub = container_of(work, struct usb_hub, events); work 2050 drivers/usb/core/message.c struct work_struct work; work 2055 drivers/usb/core/message.c static void driver_set_config_work(struct work_struct *work) work 2058 drivers/usb/core/message.c container_of(work, struct set_config_request, work); work 2117 drivers/usb/core/message.c INIT_WORK(&req->work, driver_set_config_work); work 2124 drivers/usb/core/message.c schedule_work(&req->work); work 3165 drivers/usb/dwc2/hcd.c static void dwc2_conn_id_status_change(struct work_struct *work) work 3167 drivers/usb/dwc2/hcd.c struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg, work 4171 drivers/usb/dwc2/hcd.c static void dwc2_hcd_start_func(struct work_struct *work) work 4173 drivers/usb/dwc2/hcd.c struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg, work 4174 drivers/usb/dwc2/hcd.c start_work.work); work 4183 drivers/usb/dwc2/hcd.c static void dwc2_hcd_reset_func(struct work_struct *work) work 4185 drivers/usb/dwc2/hcd.c struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg, work 4186 drivers/usb/dwc2/hcd.c reset_work.work); work 4202 drivers/usb/dwc2/hcd.c static void dwc2_hcd_phy_reset_func(struct work_struct *work) work 4204 drivers/usb/dwc2/hcd.c struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg, work 115 drivers/usb/dwc3/core.c static void __dwc3_set_mode(struct work_struct *work) work 117 drivers/usb/dwc3/core.c struct dwc3 *dwc = work_to_dwc(work); work 202 drivers/usb/dwc3/dwc3-pci.c static void dwc3_pci_resume_work(struct work_struct *work) work 204 drivers/usb/dwc3/dwc3-pci.c struct dwc3_pci *dwc = container_of(work, struct dwc3_pci, wakeup_work); work 222 drivers/usb/gadget/function/f_fs.c struct work_struct work; work 818 drivers/usb/gadget/function/f_fs.c static void ffs_user_copy_worker(struct work_struct *work) work 820 drivers/usb/gadget/function/f_fs.c struct ffs_io_data *io_data = container_of(work, struct ffs_io_data, work 821 drivers/usb/gadget/function/f_fs.c work); work 857 drivers/usb/gadget/function/f_fs.c INIT_WORK(&io_data->work, ffs_user_copy_worker); work 858 drivers/usb/gadget/function/f_fs.c queue_work(ffs->io_completion_wq, &io_data->work); work 3273 drivers/usb/gadget/function/f_fs.c static void ffs_reset_work(struct work_struct *work) work 3275 drivers/usb/gadget/function/f_fs.c struct ffs_data *ffs = container_of(work, work 1036 drivers/usb/gadget/function/f_tcm.c static void usbg_cmd_work(struct work_struct *work) work 1038 drivers/usb/gadget/function/f_tcm.c struct usbg_cmd *cmd = container_of(work, struct usbg_cmd, work); work 1157 drivers/usb/gadget/function/f_tcm.c INIT_WORK(&cmd->work, usbg_cmd_work); work 1158 drivers/usb/gadget/function/f_tcm.c queue_work(tpg->workqueue, &cmd->work); work 1166 drivers/usb/gadget/function/f_tcm.c static void bot_cmd_work(struct work_struct *work) work 1168 drivers/usb/gadget/function/f_tcm.c struct usbg_cmd *cmd = container_of(work, struct usbg_cmd, work); work 1241 drivers/usb/gadget/function/f_tcm.c INIT_WORK(&cmd->work, bot_cmd_work); work 1242 drivers/usb/gadget/function/f_tcm.c queue_work(tpg->workqueue, &cmd->work); work 2070 drivers/usb/gadget/function/f_tcm.c struct work_struct work; work 2077 drivers/usb/gadget/function/f_tcm.c struct guas_setup_wq *work = container_of(wq, struct guas_setup_wq, work 2078 drivers/usb/gadget/function/f_tcm.c work); work 2079 drivers/usb/gadget/function/f_tcm.c struct f_uas *fu = work->fu; work 2080 drivers/usb/gadget/function/f_tcm.c int alt = work->alt; work 2082 drivers/usb/gadget/function/f_tcm.c kfree(work); work 2101 drivers/usb/gadget/function/f_tcm.c struct guas_setup_wq *work; work 2103 drivers/usb/gadget/function/f_tcm.c work = kmalloc(sizeof(*work), GFP_ATOMIC); work 2104 drivers/usb/gadget/function/f_tcm.c if (!work) work 2106 drivers/usb/gadget/function/f_tcm.c INIT_WORK(&work->work, tcm_delayed_set_alt); work 2107 drivers/usb/gadget/function/f_tcm.c work->fu = fu; work 2108 drivers/usb/gadget/function/f_tcm.c work->alt = alt; work 2109 drivers/usb/gadget/function/f_tcm.c schedule_work(&work->work); work 70 drivers/usb/gadget/function/tcm.h struct work_struct work; work 75 drivers/usb/gadget/function/u_ether.c struct work_struct work; work 166 drivers/usb/gadget/function/u_ether.c if (!schedule_work(&dev->work)) work 425 drivers/usb/gadget/function/u_ether.c static void eth_work(struct work_struct *work) work 427 drivers/usb/gadget/function/u_ether.c struct eth_dev *dev = container_of(work, struct eth_dev, work); work 762 drivers/usb/gadget/function/u_ether.c INIT_WORK(&dev->work, eth_work); work 828 drivers/usb/gadget/function/u_ether.c INIT_WORK(&dev->work, eth_work); work 1027 drivers/usb/gadget/function/u_ether.c flush_work(&dev->work); work 355 drivers/usb/gadget/function/u_serial.c static void gs_rx_push(struct work_struct *work) work 357 drivers/usb/gadget/function/u_serial.c struct delayed_work *w = to_delayed_work(work); work 432 drivers/usb/gadget/legacy/inode.c struct work_struct work; work 458 drivers/usb/gadget/legacy/inode.c static void ep_user_copy_worker(struct work_struct *work) work 460 drivers/usb/gadget/legacy/inode.c struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work); work 511 drivers/usb/gadget/legacy/inode.c INIT_WORK(&priv->work, ep_user_copy_worker); work 512 drivers/usb/gadget/legacy/inode.c schedule_work(&priv->work); work 493 drivers/usb/gadget/udc/aspeed-vhub/hub.c static void ast_vhub_wake_work(struct work_struct *work) work 495 drivers/usb/gadget/udc/aspeed-vhub/hub.c struct ast_vhub *vhub = container_of(work, work 1529 drivers/usb/gadget/udc/at91_udc.c static void at91_vbus_timer_work(struct work_struct *work) work 1531 drivers/usb/gadget/udc/at91_udc.c struct at91_udc *udc = container_of(work, struct at91_udc, work 157 drivers/usb/gadget/udc/bdc/bdc_udc.c static void bdc_func_wake_timer(struct work_struct *work) work 159 drivers/usb/gadget/udc/bdc/bdc_udc.c struct bdc *bdc = container_of(work, struct bdc, func_wake_notify.work); work 1003 drivers/usb/gadget/udc/core.c static void usb_gadget_state_work(struct work_struct *work) work 1005 drivers/usb/gadget/udc/core.c struct usb_gadget *gadget = work_to_gadget(work); work 1016 drivers/usb/gadget/udc/core.c schedule_work(&gadget->work); work 1181 drivers/usb/gadget/udc/core.c INIT_WORK(&gadget->work, usb_gadget_state_work); work 1336 drivers/usb/gadget/udc/core.c flush_work(&gadget->work); work 648 drivers/usb/gadget/udc/lpc32xx_udc.c static void pullup_work(struct work_struct *work) work 651 drivers/usb/gadget/udc/lpc32xx_udc.c container_of(work, struct lpc32xx_udc, pullup_job); work 690 drivers/usb/gadget/udc/lpc32xx_udc.c static void power_work(struct work_struct *work) work 693 drivers/usb/gadget/udc/lpc32xx_udc.c container_of(work, struct lpc32xx_udc, power_job); work 2052 drivers/usb/gadget/udc/mv_udc_core.c static void mv_udc_vbus_work(struct work_struct *work) work 2057 drivers/usb/gadget/udc/mv_udc_core.c udc = container_of(work, struct mv_udc, vbus_work); work 439 drivers/usb/gadget/udc/renesas_usb3.c static void renesas_usb3_extcon_work(struct work_struct *work) work 441 drivers/usb/gadget/udc/renesas_usb3.c struct renesas_usb3 *usb3 = container_of(work, struct renesas_usb3, work 667 drivers/usb/gadget/udc/renesas_usb3.c static void renesas_usb3_role_work(struct work_struct *work) work 670 drivers/usb/gadget/udc/renesas_usb3.c container_of(work, struct renesas_usb3, role_work); work 71 drivers/usb/gadget/udc/snps_udc_plat.c static void udc_drd_work(struct work_struct *work) work 75 drivers/usb/gadget/udc/snps_udc_plat.c udc = container_of(to_delayed_work(work), work 129 drivers/usb/host/ohci-pci.c static void ohci_quirk_nec_worker(struct work_struct *work) work 131 drivers/usb/host/ohci-pci.c struct ohci_hcd *ohci = container_of(work, struct ohci_hcd, nec_work); work 474 drivers/usb/host/u132-hcd.c static void u132_hcd_monitor_work(struct work_struct *work) work 476 drivers/usb/host/u132-hcd.c struct u132 *u132 = container_of(work, struct u132, monitor.work); work 1297 drivers/usb/host/u132-hcd.c static void u132_hcd_ring_work_scheduler(struct work_struct *work) work 1300 drivers/usb/host/u132-hcd.c container_of(work, struct u132_ring, scheduler.work); work 1355 drivers/usb/host/u132-hcd.c static void u132_hcd_endp_work_scheduler(struct work_struct *work) work 1359 drivers/usb/host/u132-hcd.c container_of(work, struct u132_endp, scheduler.work); work 783 drivers/usb/host/xhci-dbgcap.c static void xhci_dbc_handle_events(struct work_struct *work) work 791 drivers/usb/host/xhci-dbgcap.c dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work); work 1336 drivers/usb/host/xhci-ring.c void xhci_handle_command_timeout(struct work_struct *work) work 1342 drivers/usb/host/xhci-ring.c xhci = container_of(to_delayed_work(work), struct xhci_hcd, cmd_timer); work 2123 drivers/usb/host/xhci.h void xhci_handle_command_timeout(struct work_struct *work); work 70 drivers/usb/misc/appledisplay.c struct delayed_work work; work 110 drivers/usb/misc/appledisplay.c schedule_delayed_work(&pdata->work, 0); work 186 drivers/usb/misc/appledisplay.c static void appledisplay_work(struct work_struct *work) work 189 drivers/usb/misc/appledisplay.c container_of(work, struct appledisplay, work.work); work 198 drivers/usb/misc/appledisplay.c schedule_delayed_work(&pdata->work, HZ / 8); work 231 drivers/usb/misc/appledisplay.c INIT_DELAYED_WORK(&pdata->work, appledisplay_work); work 307 drivers/usb/misc/appledisplay.c cancel_delayed_work_sync(&pdata->work); work 328 drivers/usb/misc/appledisplay.c cancel_delayed_work_sync(&pdata->work); work 457 drivers/usb/misc/ftdi-elan.c static void ftdi_elan_command_work(struct work_struct *work) work 460 drivers/usb/misc/ftdi-elan.c container_of(work, struct usb_ftdi, command_work.work); work 483 drivers/usb/misc/ftdi-elan.c static void ftdi_elan_respond_work(struct work_struct *work) work 486 drivers/usb/misc/ftdi-elan.c container_of(work, struct usb_ftdi, respond_work.work); work 518 drivers/usb/misc/ftdi-elan.c static void ftdi_elan_status_work(struct work_struct *work) work 521 drivers/usb/misc/ftdi-elan.c container_of(work, struct usb_ftdi, status_work.work); work 326 drivers/usb/misc/lvstest.c static void lvs_rh_work(struct work_struct *work) work 328 drivers/usb/misc/lvstest.c struct lvs_rh *lvs = container_of(work, struct lvs_rh, rh_work); work 189 drivers/usb/mtu3/mtu3_dr.c static void ssusb_id_work(struct work_struct *work) work 192 drivers/usb/mtu3/mtu3_dr.c container_of(work, struct otg_switch_mtk, id_work); work 200 drivers/usb/mtu3/mtu3_dr.c static void ssusb_vbus_work(struct work_struct *work) work 203 drivers/usb/mtu3/mtu3_dr.c container_of(work, struct otg_switch_mtk, vbus_work); work 1928 drivers/usb/musb/musb_core.c struct musb *musb = container_of(data, struct musb, irq_work.work); work 2135 drivers/usb/musb/musb_core.c static void musb_deassert_reset(struct work_struct *work) work 2140 drivers/usb/musb/musb_core.c musb = container_of(work, struct musb, deassert_reset_work.work); work 1632 drivers/usb/musb/musb_gadget.c static void musb_gadget_work(struct work_struct *work) work 1637 drivers/usb/musb/musb_gadget.c musb = container_of(work, struct musb, gadget_work.work); work 72 drivers/usb/musb/musb_host.h extern void musb_host_finish_resume(struct work_struct *work); work 107 drivers/usb/musb/musb_host.h static inline void musb_host_finish_resume(struct work_struct *work) {} work 21 drivers/usb/musb/musb_virthub.c void musb_host_finish_resume(struct work_struct *work) work 27 drivers/usb/musb/musb_virthub.c musb = container_of(work, struct musb, finish_resume_work.work); work 84 drivers/usb/musb/sunxi.c struct work_struct work; work 90 drivers/usb/musb/sunxi.c static void sunxi_musb_work(struct work_struct *work) work 92 drivers/usb/musb/sunxi.c struct sunxi_glue *glue = container_of(work, struct sunxi_glue, work); work 150 drivers/usb/musb/sunxi.c schedule_work(&glue->work); work 210 drivers/usb/musb/sunxi.c schedule_work(&glue->work); work 276 drivers/usb/musb/sunxi.c cancel_work_sync(&glue->work); work 304 drivers/usb/musb/sunxi.c schedule_work(&glue->work); work 363 drivers/usb/musb/sunxi.c schedule_work(&glue->work); work 377 drivers/usb/musb/sunxi.c schedule_work(&glue->work); work 714 drivers/usb/musb/sunxi.c INIT_WORK(&glue->work, sunxi_musb_work); work 591 drivers/usb/phy/phy-ab8500-usb.c static void ab8500_usb_phy_disable_work(struct work_struct *work) work 593 drivers/usb/phy/phy-ab8500-usb.c struct ab8500_usb *ab = container_of(work, struct ab8500_usb, work 649 drivers/usb/phy/phy-fsl-usb.c static void fsl_otg_event(struct work_struct *work) work 651 drivers/usb/phy/phy-fsl-usb.c struct fsl_otg *og = container_of(work, struct fsl_otg, otg_event.work); work 37 drivers/usb/phy/phy-gpio-vbus-usb.c struct delayed_work work; work 97 drivers/usb/phy/phy-gpio-vbus-usb.c static void gpio_vbus_work(struct work_struct *work) work 100 drivers/usb/phy/phy-gpio-vbus-usb.c container_of(work, struct gpio_vbus_data, work.work); work 166 drivers/usb/phy/phy-gpio-vbus-usb.c schedule_delayed_work(&gpio_vbus->work, msecs_to_jiffies(100)); work 316 drivers/usb/phy/phy-gpio-vbus-usb.c INIT_DELAYED_WORK(&gpio_vbus->work, gpio_vbus_work); work 343 drivers/usb/phy/phy-gpio-vbus-usb.c cancel_delayed_work_sync(&gpio_vbus->work); work 52 drivers/usb/phy/phy-isp1301-omap.c struct work_struct work; work 311 drivers/usb/phy/phy-isp1301-omap.c static void isp1301_defer_work(struct isp1301 *isp, int work) work 315 drivers/usb/phy/phy-isp1301-omap.c if (isp && !test_and_set_bit(work, &isp->todo)) { work 317 drivers/usb/phy/phy-isp1301-omap.c status = schedule_work(&isp->work); work 320 drivers/usb/phy/phy-isp1301-omap.c "work item %d may be lost\n", work); work 1086 drivers/usb/phy/phy-isp1301-omap.c isp1301_work(struct work_struct *work) work 1088 drivers/usb/phy/phy-isp1301-omap.c struct isp1301 *isp = container_of(work, struct isp1301, work); work 1216 drivers/usb/phy/phy-isp1301-omap.c flush_work(&isp->work); work 1497 drivers/usb/phy/phy-isp1301-omap.c INIT_WORK(&isp->work, isp1301_work); work 83 drivers/usb/phy/phy-mv-usb.c queue_delayed_work(mvotg->qwork, &mvotg->work, delay); work 401 drivers/usb/phy/phy-mv-usb.c static void mv_otg_work(struct work_struct *work) work 407 drivers/usb/phy/phy-mv-usb.c mvotg = container_of(to_delayed_work(work), struct mv_otg, work); work 701 drivers/usb/phy/phy-mv-usb.c INIT_DELAYED_WORK(&mvotg->work, mv_otg_work); work 148 drivers/usb/phy/phy-mv-usb.h struct delayed_work work; work 278 drivers/usb/phy/phy-twl6030-usb.c static void twl6030_status_work(struct work_struct *work) work 280 drivers/usb/phy/phy-twl6030-usb.c struct twl6030_usb *twl = container_of(work, struct twl6030_usb, work 281 drivers/usb/phy/phy-twl6030-usb.c get_status_work.work); work 97 drivers/usb/phy/phy.c static void usb_phy_notify_charger_work(struct work_struct *work) work 99 drivers/usb/phy/phy.c struct usb_phy *usb_phy = container_of(work, struct usb_phy, chg_work); work 510 drivers/usb/renesas_usbhs/common.c static void usbhsc_notify_hotplug(struct work_struct *work) work 512 drivers/usb/renesas_usbhs/common.c struct usbhs_priv *priv = container_of(work, work 514 drivers/usb/renesas_usbhs/common.c notify_hotplug_work.work); work 849 drivers/usb/renesas_usbhs/fifo.c static void xfer_work(struct work_struct *work) work 851 drivers/usb/renesas_usbhs/fifo.c struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work); work 914 drivers/usb/renesas_usbhs/fifo.c INIT_WORK(&pkt->work, xfer_work); work 915 drivers/usb/renesas_usbhs/fifo.c schedule_work(&pkt->work); work 1101 drivers/usb/renesas_usbhs/fifo.c INIT_WORK(&pkt->work, xfer_work); work 1102 drivers/usb/renesas_usbhs/fifo.c schedule_work(&pkt->work); work 51 drivers/usb/renesas_usbhs/fifo.h struct work_struct work; work 208 drivers/usb/serial/digi_acceleport.c static void digi_wakeup_write_lock(struct work_struct *work); work 367 drivers/usb/serial/digi_acceleport.c static void digi_wakeup_write_lock(struct work_struct *work) work 370 drivers/usb/serial/digi_acceleport.c container_of(work, struct digi_port, dp_wakeup_work); work 708 drivers/usb/serial/f81232.c static void f81232_interrupt_work(struct work_struct *work) work 711 drivers/usb/serial/f81232.c container_of(work, struct f81232_private, interrupt_work); work 716 drivers/usb/serial/f81232.c static void f81232_lsr_worker(struct work_struct *work) work 723 drivers/usb/serial/f81232.c priv = container_of(work, struct f81232_private, lsr_work); work 1313 drivers/usb/serial/f81534.c static void f81534_lsr_worker(struct work_struct *work) work 1320 drivers/usb/serial/f81534.c port_priv = container_of(work, struct f81534_port_private, lsr_work); work 2486 drivers/usb/serial/io_ti.c static void edge_heartbeat_work(struct work_struct *work) work 2491 drivers/usb/serial/io_ti.c serial = container_of(work, struct edgeport_serial, work 2492 drivers/usb/serial/io_ti.c heartbeat_work.work); work 100 drivers/usb/serial/keyspan_pda.c static void keyspan_pda_wakeup_write(struct work_struct *work) work 103 drivers/usb/serial/keyspan_pda.c container_of(work, struct keyspan_pda_private, wakeup_work); work 109 drivers/usb/serial/keyspan_pda.c static void keyspan_pda_request_unthrottle(struct work_struct *work) work 112 drivers/usb/serial/keyspan_pda.c container_of(work, struct keyspan_pda_private, unthrottle_work); work 192 drivers/usb/serial/oti6858.c static void setup_line(struct work_struct *work) work 194 drivers/usb/serial/oti6858.c struct oti6858_private *priv = container_of(work, work 195 drivers/usb/serial/oti6858.c struct oti6858_private, delayed_setup_work.work); work 260 drivers/usb/serial/oti6858.c static void send_data(struct work_struct *work) work 262 drivers/usb/serial/oti6858.c struct oti6858_private *priv = container_of(work, work 263 drivers/usb/serial/oti6858.c struct oti6858_private, delayed_write_work.work); work 539 drivers/usb/serial/usb-serial.c schedule_work(&port->work); work 543 drivers/usb/serial/usb-serial.c static void usb_serial_port_work(struct work_struct *work) work 546 drivers/usb/serial/usb-serial.c container_of(work, struct usb_serial_port, work); work 977 drivers/usb/serial/usb-serial.c INIT_WORK(&port->work, usb_serial_port_work); work 1100 drivers/usb/serial/usb-serial.c cancel_work_sync(&port->work); work 47 drivers/usb/storage/uas.c struct work_struct work; work 78 drivers/usb/storage/uas.c static void uas_do_work(struct work_struct *work); work 97 drivers/usb/storage/uas.c static void uas_do_work(struct work_struct *work) work 100 drivers/usb/storage/uas.c container_of(work, struct uas_dev_info, work); work 125 drivers/usb/storage/uas.c queue_work(workqueue, &devinfo->work); work 131 drivers/usb/storage/uas.c static void uas_scan_work(struct work_struct *work) work 134 drivers/usb/storage/uas.c container_of(work, struct uas_dev_info, scan_work); work 150 drivers/usb/storage/uas.c queue_work(workqueue, &devinfo->work); work 1013 drivers/usb/storage/uas.c INIT_WORK(&devinfo->work, uas_do_work); work 1077 drivers/usb/storage/uas.c flush_work(&devinfo->work); work 1195 drivers/usb/storage/uas.c cancel_work_sync(&devinfo->work); work 890 drivers/usb/storage/usb.c static void usb_stor_scan_dwork(struct work_struct *work) work 892 drivers/usb/storage/usb.c struct us_data *us = container_of(work, struct us_data, work 893 drivers/usb/storage/usb.c scan_dwork.work); work 61 drivers/usb/typec/altmodes/displayport.c struct work_struct work; work 180 drivers/usb/typec/altmodes/displayport.c static void dp_altmode_work(struct work_struct *work) work 182 drivers/usb/typec/altmodes/displayport.c struct dp_altmode *dp = container_of(work, struct dp_altmode, work); work 244 drivers/usb/typec/altmodes/displayport.c schedule_work(&dp->work); work 300 drivers/usb/typec/altmodes/displayport.c schedule_work(&dp->work); work 533 drivers/usb/typec/altmodes/displayport.c INIT_WORK(&dp->work, dp_altmode_work); work 544 drivers/usb/typec/altmodes/displayport.c schedule_work(&dp->work); work 555 drivers/usb/typec/altmodes/displayport.c cancel_work_sync(&dp->work); work 1061 drivers/usb/typec/tcpm/fusb302.c static void fusb302_bc_lvl_handler_work(struct work_struct *work) work 1063 drivers/usb/typec/tcpm/fusb302.c struct fusb302_chip *chip = container_of(work, struct fusb302_chip, work 1064 drivers/usb/typec/tcpm/fusb302.c bc_lvl_handler.work); work 1484 drivers/usb/typec/tcpm/fusb302.c static void fusb302_irq_work(struct work_struct *work) work 1486 drivers/usb/typec/tcpm/fusb302.c struct fusb302_chip *chip = container_of(work, struct fusb302_chip, work 335 drivers/usb/typec/tcpm/tcpm.c struct work_struct work; work 1346 drivers/usb/typec/tcpm/tcpm.c static void vdm_state_machine_work(struct work_struct *work) work 1348 drivers/usb/typec/tcpm/tcpm.c struct tcpm_port *port = container_of(work, struct tcpm_port, work 1349 drivers/usb/typec/tcpm/tcpm.c vdm_state_machine.work); work 1943 drivers/usb/typec/tcpm/tcpm.c static void tcpm_pd_rx_handler(struct work_struct *work) work 1945 drivers/usb/typec/tcpm/tcpm.c struct pd_rx_event *event = container_of(work, work 1946 drivers/usb/typec/tcpm/tcpm.c struct pd_rx_event, work); work 2005 drivers/usb/typec/tcpm/tcpm.c INIT_WORK(&event->work, tcpm_pd_rx_handler); work 2008 drivers/usb/typec/tcpm/tcpm.c queue_work(port->wq, &event->work); work 3580 drivers/usb/typec/tcpm/tcpm.c static void tcpm_state_machine_work(struct work_struct *work) work 3582 drivers/usb/typec/tcpm/tcpm.c struct tcpm_port *port = container_of(work, struct tcpm_port, work 3583 drivers/usb/typec/tcpm/tcpm.c state_machine.work); work 3936 drivers/usb/typec/tcpm/tcpm.c static void tcpm_pd_event_handler(struct work_struct *work) work 3938 drivers/usb/typec/tcpm/tcpm.c struct tcpm_port *port = container_of(work, struct tcpm_port, work 22 drivers/usb/typec/ucsi/displayport.c struct work_struct work; work 96 drivers/usb/typec/ucsi/displayport.c schedule_work(&dp->work); work 132 drivers/usb/typec/ucsi/displayport.c schedule_work(&dp->work); work 232 drivers/usb/typec/ucsi/displayport.c schedule_work(&dp->work); work 249 drivers/usb/typec/ucsi/displayport.c static void ucsi_displayport_work(struct work_struct *work) work 251 drivers/usb/typec/ucsi/displayport.c struct ucsi_dp *dp = container_of(work, struct ucsi_dp, work); work 315 drivers/usb/typec/ucsi/displayport.c INIT_WORK(&dp->work, ucsi_displayport_work); work 524 drivers/usb/typec/ucsi/ucsi.c static void ucsi_connector_change(struct work_struct *work) work 526 drivers/usb/typec/ucsi/ucsi.c struct ucsi_connector *con = container_of(work, struct ucsi_connector, work 527 drivers/usb/typec/ucsi/ucsi.c work); work 629 drivers/usb/typec/ucsi/ucsi.c schedule_work(&con->work); work 810 drivers/usb/typec/ucsi/ucsi.c INIT_WORK(&con->work, ucsi_connector_change); work 901 drivers/usb/typec/ucsi/ucsi.c static void ucsi_init(struct work_struct *work) work 903 drivers/usb/typec/ucsi/ucsi.c struct ucsi *ucsi = container_of(work, struct ucsi, work); work 992 drivers/usb/typec/ucsi/ucsi.c INIT_WORK(&ucsi->work, ucsi_init); work 1003 drivers/usb/typec/ucsi/ucsi.c queue_work(system_long_wq, &ucsi->work); work 1021 drivers/usb/typec/ucsi/ucsi.c cancel_work_sync(&ucsi->work); work 1028 drivers/usb/typec/ucsi/ucsi.c cancel_work_sync(&ucsi->connector[i].work); work 394 drivers/usb/typec/ucsi/ucsi.h struct work_struct work; work 414 drivers/usb/typec/ucsi/ucsi.h struct work_struct work; work 193 drivers/usb/typec/ucsi/ucsi_ccg.c struct work_struct work; work 1039 drivers/usb/typec/ucsi/ucsi_ccg.c static void ccg_update_firmware(struct work_struct *work) work 1041 drivers/usb/typec/ucsi/ucsi_ccg.c struct ucsi_ccg *uc = container_of(work, struct ucsi_ccg, work); work 1076 drivers/usb/typec/ucsi/ucsi_ccg.c schedule_work(&uc->work); work 1109 drivers/usb/typec/ucsi/ucsi_ccg.c INIT_WORK(&uc->work, ccg_update_firmware); work 1177 drivers/usb/typec/ucsi/ucsi_ccg.c cancel_work_sync(&uc->work); work 62 drivers/usb/usbip/usbip_event.c static void event_handler(struct work_struct *work) work 428 drivers/vfio/vfio.c struct work_struct work; work 432 drivers/vfio/vfio.c static void vfio_group_put_bg(struct work_struct *work) work 436 drivers/vfio/vfio.c do_work = container_of(work, struct vfio_group_put_work, work); work 450 drivers/vfio/vfio.c INIT_WORK(&do_work->work, vfio_group_put_bg); work 452 drivers/vfio/vfio.c schedule_work(&do_work->work); work 85 drivers/vfio/virqfd.c static void virqfd_shutdown(struct work_struct *work) work 87 drivers/vfio/virqfd.c struct virqfd *virqfd = container_of(work, struct virqfd, shutdown); work 97 drivers/vfio/virqfd.c static void virqfd_inject(struct work_struct *work) work 99 drivers/vfio/virqfd.c struct virqfd *virqfd = container_of(work, struct virqfd, inject); work 1241 drivers/vhost/net.c static void handle_tx_kick(struct vhost_work *work) work 1243 drivers/vhost/net.c struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, work 1244 drivers/vhost/net.c poll.work); work 1250 drivers/vhost/net.c static void handle_rx_kick(struct vhost_work *work) work 1252 drivers/vhost/net.c struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, work 1253 drivers/vhost/net.c poll.work); work 1259 drivers/vhost/net.c static void handle_tx_net(struct vhost_work *work) work 1261 drivers/vhost/net.c struct vhost_net *net = container_of(work, struct vhost_net, work 1262 drivers/vhost/net.c poll[VHOST_NET_VQ_TX].work); work 1266 drivers/vhost/net.c static void handle_rx_net(struct vhost_work *work) work 1268 drivers/vhost/net.c struct vhost_net *net = container_of(work, struct vhost_net, work 1269 drivers/vhost/net.c poll[VHOST_NET_VQ_RX].work); work 106 drivers/vhost/scsi.c struct work_struct work; work 496 drivers/vhost/scsi.c static void vhost_scsi_evt_work(struct vhost_work *work) work 498 drivers/vhost/scsi.c struct vhost_scsi *vs = container_of(work, struct vhost_scsi, work 518 drivers/vhost/scsi.c static void vhost_scsi_complete_cmd_work(struct vhost_work *work) work 520 drivers/vhost/scsi.c struct vhost_scsi *vs = container_of(work, struct vhost_scsi, work 756 drivers/vhost/scsi.c static void vhost_scsi_submission_work(struct work_struct *work) work 759 drivers/vhost/scsi.c container_of(work, struct vhost_scsi_cmd, work); work 1107 drivers/vhost/scsi.c INIT_WORK(&cmd->work, vhost_scsi_submission_work); work 1108 drivers/vhost/scsi.c queue_work(vhost_scsi_workqueue, &cmd->work); work 1278 drivers/vhost/scsi.c static void vhost_scsi_ctl_handle_kick(struct vhost_work *work) work 1280 drivers/vhost/scsi.c struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, work 1281 drivers/vhost/scsi.c poll.work); work 1318 drivers/vhost/scsi.c static void vhost_scsi_evt_handle_kick(struct vhost_work *work) work 1320 drivers/vhost/scsi.c struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, work 1321 drivers/vhost/scsi.c poll.work); work 1334 drivers/vhost/scsi.c static void vhost_scsi_handle_kick(struct vhost_work *work) work 1336 drivers/vhost/scsi.c struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, work 1337 drivers/vhost/scsi.c poll.work); work 96 drivers/vhost/test.c static void handle_vq_kick(struct vhost_work *work) work 98 drivers/vhost/test.c struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, work 99 drivers/vhost/test.c poll.work); work 146 drivers/vhost/vhost.c struct vhost_work work; work 150 drivers/vhost/vhost.c static void vhost_flush_work(struct vhost_work *work) work 154 drivers/vhost/vhost.c s = container_of(work, struct vhost_flush_struct, work); work 180 drivers/vhost/vhost.c void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn) work 182 drivers/vhost/vhost.c clear_bit(VHOST_WORK_QUEUED, &work->flags); work 183 drivers/vhost/vhost.c work->fn = fn; work 197 drivers/vhost/vhost.c vhost_work_init(&poll->work, fn); work 233 drivers/vhost/vhost.c void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work) work 239 drivers/vhost/vhost.c vhost_work_init(&flush.work, vhost_flush_work); work 241 drivers/vhost/vhost.c vhost_work_queue(dev, &flush.work); work 251 drivers/vhost/vhost.c vhost_work_flush(poll->dev, &poll->work); work 255 drivers/vhost/vhost.c void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work) work 260 drivers/vhost/vhost.c if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) { work 265 drivers/vhost/vhost.c llist_add(&work->node, &dev->work_list); work 280 drivers/vhost/vhost.c vhost_work_queue(poll->dev, &poll->work); work 334 drivers/vhost/vhost.c struct vhost_work *work, *work_next; work 357 drivers/vhost/vhost.c llist_for_each_entry_safe(work, work_next, node, node) { work 358 drivers/vhost/vhost.c clear_bit(VHOST_WORK_QUEUED, &work->flags); work 360 drivers/vhost/vhost.c work->fn(work); work 504 drivers/vhost/vhost.c struct vhost_work work; work 509 drivers/vhost/vhost.c static void vhost_attach_cgroups_work(struct vhost_work *work) work 513 drivers/vhost/vhost.c s = container_of(work, struct vhost_attach_cgroups_struct, work); work 522 drivers/vhost/vhost.c vhost_work_init(&attach.work, vhost_attach_cgroups_work); work 523 drivers/vhost/vhost.c vhost_work_queue(dev, &attach.work); work 524 drivers/vhost/vhost.c vhost_work_flush(dev, &attach.work); work 17 drivers/vhost/vhost.h typedef void (*vhost_work_fn_t)(struct vhost_work *work); work 32 drivers/vhost/vhost.h struct vhost_work work; work 37 drivers/vhost/vhost.h void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn); work 38 drivers/vhost/vhost.h void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work); work 47 drivers/vhost/vhost.h void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work); work 228 drivers/vhost/vsock.c static void vhost_transport_send_pkt_work(struct vhost_work *work) work 233 drivers/vhost/vsock.c vsock = container_of(work, struct vhost_vsock, send_pkt_work); work 387 drivers/vhost/vsock.c static void vhost_vsock_handle_tx_kick(struct vhost_work *work) work 389 drivers/vhost/vsock.c struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, work 390 drivers/vhost/vsock.c poll.work); work 461 drivers/vhost/vsock.c static void vhost_vsock_handle_rx_kick(struct vhost_work *work) work 463 drivers/vhost/vsock.c struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, work 464 drivers/vhost/vsock.c poll.work); work 98 drivers/video/backlight/adp8860_bl.c struct work_struct work; work 180 drivers/video/backlight/adp8860_bl.c static void adp8860_led_work(struct work_struct *work) work 182 drivers/video/backlight/adp8860_bl.c struct adp8860_led *led = container_of(work, struct adp8860_led, work); work 195 drivers/video/backlight/adp8860_bl.c schedule_work(&led->work); work 268 drivers/video/backlight/adp8860_bl.c INIT_WORK(&led_dat->work, adp8860_led_work); work 292 drivers/video/backlight/adp8860_bl.c cancel_work_sync(&led[i].work); work 307 drivers/video/backlight/adp8860_bl.c cancel_work_sync(&data->led[i].work); work 121 drivers/video/backlight/adp8870_bl.c struct work_struct work; work 195 drivers/video/backlight/adp8870_bl.c static void adp8870_led_work(struct work_struct *work) work 197 drivers/video/backlight/adp8870_bl.c struct adp8870_led *led = container_of(work, struct adp8870_led, work); work 213 drivers/video/backlight/adp8870_bl.c schedule_work(&led->work); work 294 drivers/video/backlight/adp8870_bl.c INIT_WORK(&led_dat->work, adp8870_led_work); work 318 drivers/video/backlight/adp8870_bl.c cancel_work_sync(&led[i].work); work 333 drivers/video/backlight/adp8870_bl.c cancel_work_sync(&data->led[i].work); work 44 drivers/video/backlight/lm3630a_bl.c struct delayed_work work; work 111 drivers/video/backlight/lm3630a_bl.c static void lm3630a_delayed_func(struct work_struct *work) work 116 drivers/video/backlight/lm3630a_bl.c pchip = container_of(work, struct lm3630a_chip, work.work); work 134 drivers/video/backlight/lm3630a_bl.c queue_delayed_work(pchip->irqthread, &pchip->work, delay); work 152 drivers/video/backlight/lm3630a_bl.c INIT_DELAYED_WORK(&pchip->work, lm3630a_delayed_func); work 858 drivers/video/fbdev/atmel_lcdfb.c static void atmel_lcdfb_task(struct work_struct *work) work 861 drivers/video/fbdev/atmel_lcdfb.c container_of(work, struct atmel_lcdfb_info, task); work 177 drivers/video/fbdev/core/fb_defio.c static void fb_deferred_io_work(struct work_struct *work) work 179 drivers/video/fbdev/core/fb_defio.c struct fb_info *info = container_of(work, struct fb_info, work 180 drivers/video/fbdev/core/fb_defio.c deferred_work.work); work 388 drivers/video/fbdev/core/fbcon.c static void fb_flashcursor(struct work_struct *work) work 390 drivers/video/fbdev/core/fbcon.c struct fb_info *info = container_of(work, struct fb_info, queue); work 3570 drivers/video/fbdev/core/fbcon.c static void fbcon_register_existing_fbs(struct work_struct *work) work 523 drivers/video/fbdev/hyperv_fb.c struct hvfb_par *par = container_of(w, struct hvfb_par, dwork.work); work 389 drivers/video/fbdev/omap/lcd_mipid.c static void mipid_esd_work(struct work_struct *work) work 391 drivers/video/fbdev/omap/lcd_mipid.c struct mipid_device *md = container_of(work, struct mipid_device, work 392 drivers/video/fbdev/omap/lcd_mipid.c esd_work.work); work 83 drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c static void dsicm_te_timeout_work_callback(struct work_struct *work); work 88 drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c static void dsicm_ulps_work(struct work_struct *work); work 849 drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c static void dsicm_te_timeout_work_callback(struct work_struct *work) work 851 drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c struct panel_drv_data *ddata = container_of(work, struct panel_drv_data, work 852 drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c te_timeout_work.work); work 1077 drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c static void dsicm_ulps_work(struct work_struct *work) work 1079 drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c struct panel_drv_data *ddata = container_of(work, struct panel_drv_data, work 1080 drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c ulps_work.work); work 323 drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c static void dispc_error_worker(struct work_struct *work) work 4004 drivers/video/fbdev/omap2/omapfb/dss/dsi.c static void dsi_framedone_timeout_work_callback(struct work_struct *work) work 4006 drivers/video/fbdev/omap2/omapfb/dss/dsi.c struct dsi_data *dsi = container_of(work, struct dsi_data, work 4007 drivers/video/fbdev/omap2/omapfb/dss/dsi.c framedone_timeout_work.work); work 1658 drivers/video/fbdev/omap2/omapfb/omapfb-main.c static void omapfb_auto_update_work(struct work_struct *work) work 1667 drivers/video/fbdev/omap2/omapfb/omapfb-main.c d = container_of(work, struct omapfb_display_data, work 1668 drivers/video/fbdev/omap2/omapfb/omapfb-main.c auto_update_work.work); work 1715 drivers/video/fbdev/omap2/omapfb/omapfb-main.c omapfb_auto_update_work(&d->auto_update_work.work); work 1639 drivers/video/fbdev/pxafb.c static void pxafb_task(struct work_struct *work) work 1642 drivers/video/fbdev/pxafb.c container_of(work, struct pxafb_info, task); work 1120 drivers/video/fbdev/smscufx.c static void ufx_release_urb_work(struct work_struct *work) work 1122 drivers/video/fbdev/smscufx.c struct urb_node *unode = container_of(work, struct urb_node, work 1123 drivers/video/fbdev/smscufx.c release_urb_work.work); work 1128 drivers/video/fbdev/smscufx.c static void ufx_free_framebuffer_work(struct work_struct *work) work 1130 drivers/video/fbdev/smscufx.c struct ufx_data *dev = container_of(work, struct ufx_data, work 1131 drivers/video/fbdev/smscufx.c free_framebuffer_work.work); work 350 drivers/virt/vboxguest/vboxguest_core.c static void vbg_balloon_work(struct work_struct *work) work 353 drivers/virt/vboxguest/vboxguest_core.c container_of(work, struct vbg_dev, mem_balloon.work); work 808 drivers/virt/vboxguest/vboxguest_core.c INIT_WORK(&gdev->mem_balloon.work, vbg_balloon_work); work 1631 drivers/virt/vboxguest/vboxguest_core.c schedule_work(&gdev->mem_balloon.work); work 23 drivers/virt/vboxguest/vboxguest_core.h struct work_struct work; work 432 drivers/virtio/virtio_balloon.c static void update_balloon_stats_func(struct work_struct *work) work 436 drivers/virtio/virtio_balloon.c vb = container_of(work, struct virtio_balloon, work 441 drivers/virtio/virtio_balloon.c static void update_balloon_size_func(struct work_struct *work) work 446 drivers/virtio/virtio_balloon.c vb = container_of(work, struct virtio_balloon, work 460 drivers/virtio/virtio_balloon.c queue_work(system_freezable_wq, work); work 672 drivers/virtio/virtio_balloon.c static void report_free_page_func(struct work_struct *work) work 674 drivers/virtio/virtio_balloon.c struct virtio_balloon *vb = container_of(work, struct virtio_balloon, work 1209 drivers/visorbus/visorchipset.c static void setup_crash_devices_work_queue(struct work_struct *work) work 1499 drivers/visorbus/visorchipset.c static void controlvm_periodic_work(struct work_struct *work) work 397 drivers/watchdog/mei_wdt.c static void mei_wdt_unregister_work(struct work_struct *work) work 399 drivers/watchdog/mei_wdt.c struct mei_wdt *wdt = container_of(work, struct mei_wdt, unregister); work 47 drivers/watchdog/retu_wdt.c static void retu_wdt_ping_work(struct work_struct *work) work 49 drivers/watchdog/retu_wdt.c struct retu_wdt_dev *wdev = container_of(to_delayed_work(work), work 68 drivers/watchdog/watchdog_dev.c struct kthread_work work; work 233 drivers/watchdog/watchdog_dev.c static void watchdog_ping_work(struct kthread_work *work) work 237 drivers/watchdog/watchdog_dev.c wd_data = container_of(work, struct watchdog_core_data, work); work 251 drivers/watchdog/watchdog_dev.c kthread_queue_work(watchdog_kworker, &wd_data->work); work 977 drivers/watchdog/watchdog_dev.c kthread_init_work(&wd_data->work, watchdog_ping_work); work 1075 drivers/watchdog/watchdog_dev.c kthread_cancel_work_sync(&wd_data->work); work 150 drivers/xen/balloon.c static void balloon_process(struct work_struct *work); work 516 drivers/xen/balloon.c static void balloon_process(struct work_struct *work) work 1099 drivers/xen/grant-table.c static void gnttab_unmap_work(struct work_struct *work) work 1102 drivers/xen/grant-table.c *unmap_data = container_of(work, work 1104 drivers/xen/grant-table.c gnttab_work.work); work 322 drivers/xen/mcelog.c static void xen_mce_work_fn(struct work_struct *work) work 336 drivers/xen/pcpu.c static void xen_pcpu_work_fn(struct work_struct *work) work 224 drivers/xen/pvcalls-back.c static void pvcalls_back_ioworker(struct work_struct *work) work 226 drivers/xen/pvcalls-back.c struct pvcalls_ioworker *ioworker = container_of(work, work 503 drivers/xen/pvcalls-back.c static void __pvcalls_back_accept(struct work_struct *work) work 506 drivers/xen/pvcalls-back.c work, struct sockpass_mapping, register_work); work 97 drivers/xen/xenbus/xenbus_probe_frontend.c struct xenbus_device *xdev = container_of(w, struct xenbus_device, work); work 111 drivers/xen/xenbus/xenbus_probe_frontend.c schedule_work(&xdev->work); work 123 drivers/xen/xenbus/xenbus_probe_frontend.c INIT_WORK(&xdev->work, xenbus_frontend_delayed_resume); work 65 fs/affs/super.c static void flush_superblock(struct work_struct *work) work 70 fs/affs/super.c sbi = container_of(work, struct affs_sb_info, sb_work.work); work 648 fs/afs/cell.c static void afs_manage_cell(struct work_struct *work) work 650 fs/afs/cell.c struct afs_cell *cell = container_of(work, struct afs_cell, manager); work 754 fs/afs/cell.c void afs_manage_cells(struct work_struct *work) work 756 fs/afs/cell.c struct afs_net *net = container_of(work, struct afs_net, cells_manager); work 44 fs/afs/cmservice.c .work = SRXAFSCB_CallBack, work 55 fs/afs/cmservice.c .work = SRXAFSCB_InitCallBackState, work 66 fs/afs/cmservice.c .work = SRXAFSCB_InitCallBackState, work 77 fs/afs/cmservice.c .work = SRXAFSCB_Probe, work 88 fs/afs/cmservice.c .work = SRXAFSCB_ProbeUuid, work 99 fs/afs/cmservice.c .work = SRXAFSCB_TellMeAboutYourself, work 110 fs/afs/cmservice.c .work = SRXAFSCB_CallBack, work 260 fs/afs/cmservice.c static void SRXAFSCB_CallBack(struct work_struct *work) work 262 fs/afs/cmservice.c struct afs_call *call = container_of(work, struct afs_call, work); work 385 fs/afs/cmservice.c static void SRXAFSCB_InitCallBackState(struct work_struct *work) work 387 fs/afs/cmservice.c struct afs_call *call = container_of(work, struct afs_call, work); work 482 fs/afs/cmservice.c static void SRXAFSCB_Probe(struct work_struct *work) work 484 fs/afs/cmservice.c struct afs_call *call = container_of(work, struct afs_call, work); work 514 fs/afs/cmservice.c static void SRXAFSCB_ProbeUuid(struct work_struct *work) work 516 fs/afs/cmservice.c struct afs_call *call = container_of(work, struct afs_call, work); work 590 fs/afs/cmservice.c static void SRXAFSCB_TellMeAboutYourself(struct work_struct *work) work 592 fs/afs/cmservice.c struct afs_call *call = container_of(work, struct afs_call, work); work 292 fs/afs/flock.c void afs_lock_work(struct work_struct *work) work 295 fs/afs/flock.c container_of(work, struct afs_vnode, lock_work.work); work 108 fs/afs/internal.h struct work_struct work; /* actual work processor */ work 191 fs/afs/internal.h void (*work)(struct work_struct *work); work 24 fs/afs/mntpt.c static void afs_mntpt_expiry_timed_out(struct work_struct *work); work 204 fs/afs/mntpt.c static void afs_mntpt_expiry_timed_out(struct work_struct *work) work 217 fs/afs/rxrpc.c if (call->type->work) { work 218 fs/afs/rxrpc.c INIT_WORK(&call->work, call->type->work); work 221 fs/afs/rxrpc.c if (!queue_work(afs_wq, &call->work)) work 721 fs/afs/rxrpc.c static void afs_process_async_call(struct work_struct *work) work 723 fs/afs/rxrpc.c struct afs_call *call = container_of(work, struct afs_call, async_work); work 746 fs/afs/rxrpc.c void afs_charge_preallocation(struct work_struct *work) work 749 fs/afs/rxrpc.c container_of(work, struct afs_net, charge_preallocation_work); work 438 fs/afs/server.c void afs_manage_servers(struct work_struct *work) work 440 fs/afs/server.c struct afs_net *net = container_of(work, struct afs_net, fs_manager); work 177 fs/aio.c struct work_struct work; work 189 fs/aio.c struct work_struct work; work 583 fs/aio.c static void free_ioctx(struct work_struct *work) work 585 fs/aio.c struct kioctx *ctx = container_of(to_rcu_work(work), struct kioctx, work 1590 fs/aio.c static void aio_fsync_work(struct work_struct *work) work 1592 fs/aio.c struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work); work 1616 fs/aio.c INIT_WORK(&req->work, aio_fsync_work); work 1617 fs/aio.c schedule_work(&req->work); work 1621 fs/aio.c static void aio_poll_put_work(struct work_struct *work) work 1623 fs/aio.c struct poll_iocb *req = container_of(work, struct poll_iocb, work); work 1629 fs/aio.c static void aio_poll_complete_work(struct work_struct *work) work 1631 fs/aio.c struct poll_iocb *req = container_of(work, struct poll_iocb, work); work 1671 fs/aio.c schedule_work(&aiocb->poll.work); work 1706 fs/aio.c INIT_WORK(&req->work, aio_poll_put_work); work 1707 fs/aio.c schedule_work(&req->work); work 1713 fs/aio.c schedule_work(&req->work); work 1756 fs/aio.c INIT_WORK(&req->work, aio_poll_complete_work); work 63 fs/btrfs/async-thread.c btrfs_work_owner(const struct btrfs_work *work) work 65 fs/btrfs/async-thread.c return work->wq->fs_info; work 226 fs/btrfs/async-thread.c struct btrfs_work *work; work 236 fs/btrfs/async-thread.c work = list_entry(list->next, struct btrfs_work, work 238 fs/btrfs/async-thread.c if (!test_bit(WORK_DONE_BIT, &work->flags)) work 247 fs/btrfs/async-thread.c if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags)) work 249 fs/btrfs/async-thread.c trace_btrfs_ordered_sched(work); work 251 fs/btrfs/async-thread.c work->ordered_func(work); work 255 fs/btrfs/async-thread.c list_del(&work->ordered_list); work 258 fs/btrfs/async-thread.c if (work == self) { work 288 fs/btrfs/async-thread.c wtag = work; work 289 fs/btrfs/async-thread.c work->ordered_free(work); work 304 fs/btrfs/async-thread.c struct btrfs_work *work = container_of(normal_work, struct btrfs_work, work 318 fs/btrfs/async-thread.c if (work->ordered_func) work 320 fs/btrfs/async-thread.c wq = work->wq; work 322 fs/btrfs/async-thread.c wtag = work; work 324 fs/btrfs/async-thread.c trace_btrfs_work_sched(work); work 326 fs/btrfs/async-thread.c work->func(work); work 328 fs/btrfs/async-thread.c set_bit(WORK_DONE_BIT, &work->flags); work 329 fs/btrfs/async-thread.c run_ordered_work(wq, work); work 335 fs/btrfs/async-thread.c void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func, work 338 fs/btrfs/async-thread.c work->func = func; work 339 fs/btrfs/async-thread.c work->ordered_func = ordered_func; work 340 fs/btrfs/async-thread.c work->ordered_free = ordered_free; work 341 fs/btrfs/async-thread.c INIT_WORK(&work->normal_work, btrfs_work_helper); work 342 fs/btrfs/async-thread.c INIT_LIST_HEAD(&work->ordered_list); work 343 fs/btrfs/async-thread.c work->flags = 0; work 347 fs/btrfs/async-thread.c struct btrfs_work *work) work 351 fs/btrfs/async-thread.c work->wq = wq; work 353 fs/btrfs/async-thread.c if (work->ordered_func) { work 355 fs/btrfs/async-thread.c list_add_tail(&work->ordered_list, &wq->ordered_list); work 358 fs/btrfs/async-thread.c trace_btrfs_work_queued(work); work 359 fs/btrfs/async-thread.c queue_work(wq->normal_wq, &work->normal_work); work 363 fs/btrfs/async-thread.c struct btrfs_work *work) work 367 fs/btrfs/async-thread.c if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high) work 371 fs/btrfs/async-thread.c __btrfs_queue_work(dest_wq, work); work 401 fs/btrfs/async-thread.c void btrfs_set_work_high_priority(struct btrfs_work *work) work 403 fs/btrfs/async-thread.c set_bit(WORK_HIGH_PRIO_BIT, &work->flags); work 37 fs/btrfs/async-thread.h void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func, work 40 fs/btrfs/async-thread.h struct btrfs_work *work); work 43 fs/btrfs/async-thread.h void btrfs_set_work_high_priority(struct btrfs_work *work); work 44 fs/btrfs/async-thread.h struct btrfs_fs_info *btrfs_work_owner(const struct btrfs_work *work); work 629 fs/btrfs/block-group.c static noinline void caching_thread(struct btrfs_work *work) work 636 fs/btrfs/block-group.c caching_ctl = container_of(work, struct btrfs_caching_control, work); work 698 fs/btrfs/block-group.c btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL); work 810 fs/btrfs/block-group.c btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work); work 36 fs/btrfs/block-group.h struct btrfs_work work; work 456 fs/btrfs/ctree.h void btrfs_init_async_reclaim_work(struct work_struct *work); work 1309 fs/btrfs/delayed-inode.c struct btrfs_work work; work 1312 fs/btrfs/delayed-inode.c static void btrfs_async_run_delayed_root(struct btrfs_work *work) work 1323 fs/btrfs/delayed-inode.c async_work = container_of(work, struct btrfs_async_delayed_work, work); work 1383 fs/btrfs/delayed-inode.c btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL, work 1387 fs/btrfs/delayed-inode.c btrfs_queue_work(fs_info->delayed_workers, &async_work->work); work 53 fs/btrfs/disk-io.c static void end_workqueue_fn(struct btrfs_work *work); work 78 fs/btrfs/disk-io.c struct btrfs_work work; work 115 fs/btrfs/disk-io.c struct btrfs_work work; work 733 fs/btrfs/disk-io.c btrfs_init_work(&end_io_wq->work, end_workqueue_fn, NULL, NULL); work 734 fs/btrfs/disk-io.c btrfs_queue_work(wq, &end_io_wq->work); work 758 fs/btrfs/disk-io.c static void run_one_async_start(struct btrfs_work *work) work 763 fs/btrfs/disk-io.c async = container_of(work, struct async_submit_bio, work); work 778 fs/btrfs/disk-io.c static void run_one_async_done(struct btrfs_work *work) work 784 fs/btrfs/disk-io.c async = container_of(work, struct async_submit_bio, work); work 802 fs/btrfs/disk-io.c static void run_one_async_free(struct btrfs_work *work) work 806 fs/btrfs/disk-io.c async = container_of(work, struct async_submit_bio, work); work 826 fs/btrfs/disk-io.c btrfs_init_work(&async->work, run_one_async_start, run_one_async_done, work 834 fs/btrfs/disk-io.c btrfs_set_work_high_priority(&async->work); work 836 fs/btrfs/disk-io.c btrfs_queue_work(fs_info->workers, &async->work); work 1637 fs/btrfs/disk-io.c static void end_workqueue_fn(struct btrfs_work *work) work 1642 fs/btrfs/disk-io.c end_io_wq = container_of(work, struct btrfs_end_io_wq, work); work 371 fs/btrfs/inode.c struct btrfs_work work; work 1152 fs/btrfs/inode.c static noinline void async_cow_start(struct btrfs_work *work) work 1157 fs/btrfs/inode.c async_chunk = container_of(work, struct async_chunk, work); work 1169 fs/btrfs/inode.c static noinline void async_cow_submit(struct btrfs_work *work) work 1171 fs/btrfs/inode.c struct async_chunk *async_chunk = container_of(work, struct async_chunk, work 1172 fs/btrfs/inode.c work); work 1173 fs/btrfs/inode.c struct btrfs_fs_info *fs_info = btrfs_work_owner(work); work 1194 fs/btrfs/inode.c static noinline void async_cow_free(struct btrfs_work *work) work 1198 fs/btrfs/inode.c async_chunk = container_of(work, struct async_chunk, work); work 1288 fs/btrfs/inode.c btrfs_init_work(&async_chunk[i].work, async_cow_start, work 1294 fs/btrfs/inode.c btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work); work 2172 fs/btrfs/inode.c struct btrfs_work work; work 2175 fs/btrfs/inode.c static void btrfs_writepage_fixup_worker(struct btrfs_work *work) work 2188 fs/btrfs/inode.c fixup = container_of(work, struct btrfs_writepage_fixup, work); work 2351 fs/btrfs/inode.c btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL); work 2354 fs/btrfs/inode.c btrfs_queue_work(fs_info->fixup_workers, &fixup->work); work 3332 fs/btrfs/inode.c static void finish_ordered_fn(struct btrfs_work *work) work 3335 fs/btrfs/inode.c ordered_extent = container_of(work, struct btrfs_ordered_extent, work); work 3359 fs/btrfs/inode.c btrfs_init_work(&ordered_extent->work, finish_ordered_fn, NULL, NULL); work 3360 fs/btrfs/inode.c btrfs_queue_work(wq, &ordered_extent->work); work 8336 fs/btrfs/inode.c btrfs_init_work(&ordered->work, finish_ordered_fn, NULL, work 8338 fs/btrfs/inode.c btrfs_queue_work(wq, &ordered->work); work 10183 fs/btrfs/inode.c struct btrfs_work work; work 10186 fs/btrfs/inode.c static void btrfs_run_delalloc_work(struct btrfs_work *work) work 10191 fs/btrfs/inode.c delalloc_work = container_of(work, struct btrfs_delalloc_work, work 10192 fs/btrfs/inode.c work); work 10205 fs/btrfs/inode.c struct btrfs_delalloc_work *work; work 10207 fs/btrfs/inode.c work = kmalloc(sizeof(*work), GFP_NOFS); work 10208 fs/btrfs/inode.c if (!work) work 10211 fs/btrfs/inode.c init_completion(&work->completion); work 10212 fs/btrfs/inode.c INIT_LIST_HEAD(&work->list); work 10213 fs/btrfs/inode.c work->inode = inode; work 10214 fs/btrfs/inode.c btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL); work 10216 fs/btrfs/inode.c return work; work 10227 fs/btrfs/inode.c struct btrfs_delalloc_work *work, *next; work 10254 fs/btrfs/inode.c work = btrfs_alloc_delalloc_work(inode); work 10255 fs/btrfs/inode.c if (!work) { work 10260 fs/btrfs/inode.c list_add_tail(&work->list, &works); work 10262 fs/btrfs/inode.c &work->work); work 10272 fs/btrfs/inode.c list_for_each_entry_safe(work, next, &works, list) { work 10273 fs/btrfs/inode.c list_del_init(&work->list); work 10274 fs/btrfs/inode.c wait_for_completion(&work->completion); work 10275 fs/btrfs/inode.c kfree(work); work 506 fs/btrfs/ordered-data.c static void btrfs_run_ordered_extent_work(struct btrfs_work *work) work 510 fs/btrfs/ordered-data.c ordered = container_of(work, struct btrfs_ordered_extent, flush_work); work 125 fs/btrfs/ordered-data.h struct btrfs_work work; work 3133 fs/btrfs/qgroup.c static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) work 3135 fs/btrfs/qgroup.c struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info, work 80 fs/btrfs/raid56.c struct btrfs_work work; work 179 fs/btrfs/raid56.c static void rmw_work(struct btrfs_work *work); work 180 fs/btrfs/raid56.c static void read_rebuild_work(struct btrfs_work *work); work 189 fs/btrfs/raid56.c static void scrub_parity_work(struct btrfs_work *work); work 193 fs/btrfs/raid56.c btrfs_init_work(&rbio->work, work_func, NULL, NULL); work 194 fs/btrfs/raid56.c btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work); work 1666 fs/btrfs/raid56.c struct btrfs_work work; work 1733 fs/btrfs/raid56.c static void unplug_work(struct btrfs_work *work) work 1736 fs/btrfs/raid56.c plug = container_of(work, struct btrfs_plug_cb, work); work 1746 fs/btrfs/raid56.c btrfs_init_work(&plug->work, unplug_work, NULL, NULL); work 1748 fs/btrfs/raid56.c &plug->work); work 2235 fs/btrfs/raid56.c static void rmw_work(struct btrfs_work *work) work 2239 fs/btrfs/raid56.c rbio = container_of(work, struct btrfs_raid_bio, work); work 2243 fs/btrfs/raid56.c static void read_rebuild_work(struct btrfs_work *work) work 2247 fs/btrfs/raid56.c rbio = container_of(work, struct btrfs_raid_bio, work); work 2706 fs/btrfs/raid56.c static void scrub_parity_work(struct btrfs_work *work) work 2710 fs/btrfs/raid56.c rbio = container_of(work, struct btrfs_raid_bio, work); work 79 fs/btrfs/reada.c struct btrfs_work work; work 752 fs/btrfs/reada.c static void reada_start_machine_worker(struct btrfs_work *work) work 757 fs/btrfs/reada.c rmw = container_of(work, struct reada_machine_work, work); work 822 fs/btrfs/reada.c btrfs_init_work(&rmw->work, reada_start_machine_worker, NULL, NULL); work 825 fs/btrfs/reada.c btrfs_queue_work(fs_info->readahead_workers, &rmw->work); work 98 fs/btrfs/scrub.c struct btrfs_work work; work 118 fs/btrfs/scrub.c struct btrfs_work work; work 140 fs/btrfs/scrub.c struct btrfs_work work; work 242 fs/btrfs/scrub.c static void scrub_bio_end_io_worker(struct btrfs_work *work); work 253 fs/btrfs/scrub.c static void scrub_wr_bio_end_io_worker(struct btrfs_work *work); work 601 fs/btrfs/scrub.c btrfs_init_work(&sbio->work, scrub_bio_end_io_worker, NULL, work 1723 fs/btrfs/scrub.c btrfs_init_work(&sbio->work, scrub_wr_bio_end_io_worker, NULL, NULL); work 1724 fs/btrfs/scrub.c btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work); work 1727 fs/btrfs/scrub.c static void scrub_wr_bio_end_io_worker(struct btrfs_work *work) work 1729 fs/btrfs/scrub.c struct scrub_bio *sbio = container_of(work, struct scrub_bio, work); work 2116 fs/btrfs/scrub.c btrfs_queue_work(fs_info->scrub_workers, &sblock->work); work 2119 fs/btrfs/scrub.c static void scrub_missing_raid56_worker(struct btrfs_work *work) work 2121 fs/btrfs/scrub.c struct scrub_block *sblock = container_of(work, struct scrub_block, work); work 2205 fs/btrfs/scrub.c btrfs_init_work(&sblock->work, scrub_missing_raid56_worker, NULL, NULL); work 2319 fs/btrfs/scrub.c btrfs_queue_work(fs_info->scrub_workers, &sbio->work); work 2322 fs/btrfs/scrub.c static void scrub_bio_end_io_worker(struct btrfs_work *work) work 2324 fs/btrfs/scrub.c struct scrub_bio *sbio = container_of(work, struct scrub_bio, work); work 2722 fs/btrfs/scrub.c static void scrub_parity_bio_endio_worker(struct btrfs_work *work) work 2724 fs/btrfs/scrub.c struct scrub_parity *sparity = container_of(work, struct scrub_parity, work 2725 fs/btrfs/scrub.c work); work 2743 fs/btrfs/scrub.c btrfs_init_work(&sparity->work, scrub_parity_bio_endio_worker, NULL, work 2745 fs/btrfs/scrub.c btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work); work 757 fs/btrfs/space-info.c static void btrfs_async_reclaim_metadata_space(struct work_struct *work) work 766 fs/btrfs/space-info.c fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work); work 831 fs/btrfs/space-info.c void btrfs_init_async_reclaim_work(struct work_struct *work) work 833 fs/btrfs/space-info.c INIT_WORK(work, btrfs_async_reclaim_metadata_space); work 1765 fs/btrfs/transaction.c struct work_struct work; work 1768 fs/btrfs/transaction.c static void do_async_commit(struct work_struct *work) work 1771 fs/btrfs/transaction.c container_of(work, struct btrfs_async_commit, work); work 1797 fs/btrfs/transaction.c INIT_WORK(&ac->work, do_async_commit); work 1818 fs/btrfs/transaction.c schedule_work(&ac->work); work 684 fs/btrfs/volumes.c &device->work); work 702 fs/btrfs/volumes.c static void pending_bios_fn(struct btrfs_work *work) work 706 fs/btrfs/volumes.c device = container_of(work, struct btrfs_device, work); work 6509 fs/btrfs/volumes.c btrfs_queue_work(fs_info->submit_workers, &device->work); work 6728 fs/btrfs/volumes.c btrfs_init_work(&dev->work, pending_bios_fn, NULL, NULL); work 134 fs/btrfs/volumes.h struct btrfs_work work; work 37 fs/cachefiles/namei.c object->fscache.flags, work_busy(&object->fscache.work), work 203 fs/cachefiles/namei.c if (work_pending(&xobject->fscache.work)) { work 741 fs/ceph/file.c struct work_struct work; work 745 fs/ceph/file.c static void ceph_aio_retry_work(struct work_struct *work); work 812 fs/ceph/file.c INIT_WORK(&aio_work->work, ceph_aio_retry_work); work 815 fs/ceph/file.c &aio_work->work); work 859 fs/ceph/file.c static void ceph_aio_retry_work(struct work_struct *work) work 862 fs/ceph/file.c container_of(work, struct ceph_aio_work, work); work 37 fs/ceph/inode.c static void ceph_inode_work(struct work_struct *work); work 1961 fs/ceph/inode.c static void ceph_inode_work(struct work_struct *work) work 1963 fs/ceph/inode.c struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, work 60 fs/ceph/mds_client.c static void ceph_cap_release_work(struct work_struct *work); work 61 fs/ceph/mds_client.c static void ceph_cap_reclaim_work(struct work_struct *work); work 1951 fs/ceph/mds_client.c static void ceph_cap_release_work(struct work_struct *work) work 1954 fs/ceph/mds_client.c container_of(work, struct ceph_mds_session, s_cap_release_work); work 1993 fs/ceph/mds_client.c static void ceph_cap_reclaim_work(struct work_struct *work) work 1996 fs/ceph/mds_client.c container_of(work, struct ceph_mds_client, cap_reclaim_work); work 4059 fs/ceph/mds_client.c static void delayed_work(struct work_struct *work) work 4063 fs/ceph/mds_client.c container_of(work, struct ceph_mds_client, delayed_work.work); work 29 fs/cifs/cifs_dfs_ref.c static void cifs_dfs_expire_automounts(struct work_struct *work); work 34 fs/cifs/cifs_dfs_ref.c static void cifs_dfs_expire_automounts(struct work_struct *work) work 1319 fs/cifs/cifsglob.h struct work_struct work; work 1346 fs/cifs/cifsglob.h struct work_struct work; work 1558 fs/cifs/cifsglob.h struct work_struct work; work 1915 fs/cifs/cifsglob.h void cifs_oplock_break(struct work_struct *work); work 558 fs/cifs/cifsproto.h void cifs_writev_complete(struct work_struct *work); work 580 fs/cifs/cifsproto.h void smb2_cached_lease_break(struct work_struct *work); work 1741 fs/cifs/cifssmb.c queue_work(cifsiod_wq, &rdata->work); work 2163 fs/cifs/cifssmb.c cifs_writev_complete(struct work_struct *work) work 2165 fs/cifs/cifssmb.c struct cifs_writedata *wdata = container_of(work, work 2166 fs/cifs/cifssmb.c struct cifs_writedata, work); work 2215 fs/cifs/cifssmb.c INIT_WORK(&wdata->work, complete); work 2265 fs/cifs/cifssmb.c queue_work(cifsiod_wq, &wdata->work); work 337 fs/cifs/connect.c static void cifs_prune_tlinks(struct work_struct *work); work 676 fs/cifs/connect.c cifs_echo_request(struct work_struct *work) work 679 fs/cifs/connect.c struct TCP_Server_Info *server = container_of(work, work 680 fs/cifs/connect.c struct TCP_Server_Info, echo.work); work 5468 fs/cifs/connect.c cifs_prune_tlinks(struct work_struct *work) work 5470 fs/cifs/connect.c struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info, work 5471 fs/cifs/connect.c prune_tlinks.work); work 77 fs/cifs/dfs_cache.c static void refresh_cache_worker(struct work_struct *work); work 1449 fs/cifs/dfs_cache.c static void refresh_cache_worker(struct work_struct *work) work 1451 fs/cifs/dfs_cache.c struct dfs_cache *dc = container_of(work, struct dfs_cache, work 1452 fs/cifs/dfs_cache.c dc_refresh.work); work 291 fs/cifs/file.c static void cifsFileInfo_put_work(struct work_struct *work); work 409 fs/cifs/file.c static void cifsFileInfo_put_work(struct work_struct *work) work 411 fs/cifs/file.c struct cifsFileInfo *cifs_file = container_of(work, work 2694 fs/cifs/file.c cifs_uncached_writev_complete(struct work_struct *work) work 2696 fs/cifs/file.c struct cifs_writedata *wdata = container_of(work, work 2697 fs/cifs/file.c struct cifs_writedata, work); work 3278 fs/cifs/file.c INIT_WORK(&rdata->work, complete); work 3396 fs/cifs/file.c cifs_uncached_readv_complete(struct work_struct *work) work 3398 fs/cifs/file.c struct cifs_readdata *rdata = container_of(work, work 3399 fs/cifs/file.c struct cifs_readdata, work); work 4092 fs/cifs/file.c cifs_readv_complete(struct work_struct *work) work 4095 fs/cifs/file.c struct cifs_readdata *rdata = container_of(work, work 4096 fs/cifs/file.c struct cifs_readdata, work); work 4669 fs/cifs/file.c void cifs_oplock_break(struct work_struct *work) work 4671 fs/cifs/file.c struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo, work 498 fs/cifs/smb2misc.c cifs_ses_oplock_break(struct work_struct *work) work 500 fs/cifs/smb2misc.c struct smb2_lease_break_work *lw = container_of(work, work 733 fs/cifs/smb2misc.c smb2_cancelled_close_fid(struct work_struct *work) work 735 fs/cifs/smb2misc.c struct close_cancelled_open *cancelled = container_of(work, work 736 fs/cifs/smb2misc.c struct close_cancelled_open, work); work 760 fs/cifs/smb2misc.c INIT_WORK(&cancelled->work, smb2_cancelled_close_fid); work 761 fs/cifs/smb2misc.c WARN_ON(queue_work(cifsiod_wq, &cancelled->work) == false); work 619 fs/cifs/smb2ops.c smb2_cached_lease_break(struct work_struct *work) work 621 fs/cifs/smb2ops.c struct cached_fid *cfid = container_of(work, work 4092 fs/cifs/smb2ops.c static void smb2_decrypt_offload(struct work_struct *work) work 4094 fs/cifs/smb2ops.c struct smb2_decrypt_work *dw = container_of(work, work 3346 fs/cifs/smb2pdu.c void smb2_reconnect_server(struct work_struct *work) work 3348 fs/cifs/smb2pdu.c struct TCP_Server_Info *server = container_of(work, work 3349 fs/cifs/smb2pdu.c struct TCP_Server_Info, reconnect.work); work 3704 fs/cifs/smb2pdu.c queue_work(cifsiod_wq, &rdata->work); work 3932 fs/cifs/smb2pdu.c queue_work(cifsiod_wq, &wdata->work); work 60 fs/cifs/smb2proto.h extern void smb2_echo_request(struct work_struct *work); work 116 fs/cifs/smb2proto.h extern void smb2_reconnect_server(struct work_struct *work); work 220 fs/cifs/smb2proto.h void smb2_cancelled_close_fid(struct work_struct *work); work 158 fs/cifs/smbdirect.c static void smbd_disconnect_rdma_work(struct work_struct *work) work 161 fs/cifs/smbdirect.c container_of(work, struct smbd_connection, disconnect_work); work 407 fs/cifs/smbdirect.c static void smbd_post_send_credits(struct work_struct *work) work 414 fs/cifs/smbdirect.c container_of(work, struct smbd_connection, work 462 fs/cifs/smbdirect.c static void smbd_recv_done_work(struct work_struct *work) work 465 fs/cifs/smbdirect.c container_of(work, struct smbd_connection, recv_done_work); work 1350 fs/cifs/smbdirect.c static void send_immediate_work(struct work_struct *work) work 1353 fs/cifs/smbdirect.c work, struct smbd_connection, work 1354 fs/cifs/smbdirect.c send_immediate_work.work); work 1364 fs/cifs/smbdirect.c static void idle_connection_timer(struct work_struct *work) work 1367 fs/cifs/smbdirect.c work, struct smbd_connection, work 1368 fs/cifs/smbdirect.c idle_timer_work.work); work 2266 fs/cifs/smbdirect.c static void smbd_mr_recovery_work(struct work_struct *work) work 2269 fs/cifs/smbdirect.c container_of(work, struct smbd_connection, mr_recovery_work); work 53 fs/crypto/bio.c static void completion_pages(struct work_struct *work) work 55 fs/crypto/bio.c struct fscrypt_ctx *ctx = container_of(work, struct fscrypt_ctx, work); work 65 fs/crypto/bio.c INIT_WORK(&ctx->work, completion_pages); work 67 fs/crypto/bio.c fscrypt_enqueue_decrypt_work(&ctx->work); work 55 fs/crypto/crypto.c void fscrypt_enqueue_decrypt_work(struct work_struct *work) work 57 fs/crypto/crypto.c queue_work(fscrypt_read_workqueue, work); work 337 fs/direct-io.c static void dio_aio_complete_work(struct work_struct *work) work 339 fs/direct-io.c struct dio *dio = container_of(work, struct dio, complete_work); work 213 fs/dlm/ast.c void dlm_callback_work(struct work_struct *work) work 215 fs/dlm/ast.c struct dlm_lkb *lkb = container_of(work, struct dlm_lkb, lkb_cb_work); work 22 fs/dlm/ast.h void dlm_callback_work(struct work_struct *work); work 169 fs/dlm/lowcomms.c static void process_recv_sockets(struct work_struct *work); work 170 fs/dlm/lowcomms.c static void process_send_sockets(struct work_struct *work); work 1599 fs/dlm/lowcomms.c static void process_recv_sockets(struct work_struct *work) work 1601 fs/dlm/lowcomms.c struct connection *con = container_of(work, struct connection, rwork); work 1611 fs/dlm/lowcomms.c static void process_send_sockets(struct work_struct *work) work 1613 fs/dlm/lowcomms.c struct connection *con = container_of(work, struct connection, swork); work 718 fs/erofs/zdata.c queue_work(z_erofs_workqueue, &io->u.work); work 978 fs/erofs/zdata.c static void z_erofs_vle_unzip_wq(struct work_struct *work) work 981 fs/erofs/zdata.c container_of(work, struct z_erofs_unzip_io_sb, io.u.work); work 1136 fs/erofs/zdata.c INIT_WORK(&io->u.work, z_erofs_vle_unzip_wq); work 93 fs/erofs/zdata.h struct work_struct work; work 3342 fs/ext4/ext4.h extern void ext4_end_io_rsv_work(struct work_struct *work); work 236 fs/ext4/page-io.c void ext4_end_io_rsv_work(struct work_struct *work) work 238 fs/ext4/page-io.c struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info, work 65 fs/ext4/readpage.c struct work_struct work; work 96 fs/ext4/readpage.c static void decrypt_work(struct work_struct *work) work 99 fs/ext4/readpage.c container_of(work, struct bio_post_read_ctx, work); work 106 fs/ext4/readpage.c static void verity_work(struct work_struct *work) work 109 fs/ext4/readpage.c container_of(work, struct bio_post_read_ctx, work); work 138 fs/ext4/readpage.c INIT_WORK(&ctx->work, decrypt_work); work 139 fs/ext4/readpage.c fscrypt_enqueue_decrypt_work(&ctx->work); work 146 fs/ext4/readpage.c INIT_WORK(&ctx->work, verity_work); work 147 fs/ext4/readpage.c fsverity_enqueue_verify_work(&ctx->work); work 82 fs/f2fs/data.c struct work_struct work; work 114 fs/f2fs/data.c static void decrypt_work(struct work_struct *work) work 117 fs/f2fs/data.c container_of(work, struct bio_post_read_ctx, work); work 124 fs/f2fs/data.c static void verity_work(struct work_struct *work) work 127 fs/f2fs/data.c container_of(work, struct bio_post_read_ctx, work); work 144 fs/f2fs/data.c INIT_WORK(&ctx->work, decrypt_work); work 145 fs/f2fs/data.c fscrypt_enqueue_decrypt_work(&ctx->work); work 152 fs/f2fs/data.c INIT_WORK(&ctx->work, verity_work); work 153 fs/f2fs/data.c fsverity_enqueue_verify_work(&ctx->work); work 359 fs/fat/dir.c unsigned char c, work[MSDOS_NAME]; work 370 fs/fat/dir.c memcpy(work, de->name, sizeof(work)); work 374 fs/fat/dir.c if (work[0] == 0x05) work 375 fs/fat/dir.c work[0] = 0xE5; work 379 fs/fat/dir.c c = work[i]; work 382 fs/fat/dir.c chl = fat_shortname2uni(nls_disk, &work[i], 8 - i, work 399 fs/fat/dir.c ptname[i] = work[i]; work 415 fs/fat/dir.c c = work[k]; work 418 fs/fat/dir.c chl = fat_shortname2uni(nls_disk, &work[k], MSDOS_NAME - k, work 439 fs/fat/dir.c ptname[i] = work[k]; work 311 fs/file_table.c static void ____fput(struct callback_head *work) work 313 fs/file_table.c __fput(container_of(work, struct file, f_u.fu_rcuhead)); work 161 fs/fs-writeback.c struct wb_writeback_work *work) work 163 fs/fs-writeback.c struct wb_completion *done = work->done; work 165 fs/fs-writeback.c if (work->auto_free) work 166 fs/fs-writeback.c kfree(work); work 177 fs/fs-writeback.c struct wb_writeback_work *work) work 179 fs/fs-writeback.c trace_writeback_queue(wb, work); work 181 fs/fs-writeback.c if (work->done) work 182 fs/fs-writeback.c atomic_inc(&work->done->cnt); work 187 fs/fs-writeback.c list_add_tail(&work->list, &wb->work_list); work 190 fs/fs-writeback.c finish_writeback_work(wb, work); work 338 fs/fs-writeback.c struct work_struct work; work 351 fs/fs-writeback.c static void inode_switch_wbs_work_fn(struct work_struct *work) work 354 fs/fs-writeback.c container_of(work, struct inode_switch_wbs_context, work); work 477 fs/fs-writeback.c INIT_WORK(&isw->work, inode_switch_wbs_work_fn); work 478 fs/fs-writeback.c queue_work(isw_wq, &isw->work); work 853 fs/fs-writeback.c struct wb_writeback_work *work; work 871 fs/fs-writeback.c work = kmalloc(sizeof(*work), GFP_ATOMIC); work 872 fs/fs-writeback.c if (work) { work 873 fs/fs-writeback.c *work = *base_work; work 874 fs/fs-writeback.c work->nr_pages = nr_pages; work 875 fs/fs-writeback.c work->auto_free = 1; work 876 fs/fs-writeback.c wb_queue_work(wb, work); work 881 fs/fs-writeback.c work = &fallback_work; work 882 fs/fs-writeback.c *work = *base_work; work 883 fs/fs-writeback.c work->nr_pages = nr_pages; work 884 fs/fs-writeback.c work->auto_free = 0; work 885 fs/fs-writeback.c work->done = &fallback_work_done; work 887 fs/fs-writeback.c wb_queue_work(wb, work); work 924 fs/fs-writeback.c struct wb_writeback_work *work; work 968 fs/fs-writeback.c work = kzalloc(sizeof(*work), GFP_NOWAIT | __GFP_NOWARN); work 969 fs/fs-writeback.c if (work) { work 970 fs/fs-writeback.c work->nr_pages = nr; work 971 fs/fs-writeback.c work->sync_mode = WB_SYNC_NONE; work 972 fs/fs-writeback.c work->range_cyclic = 1; work 973 fs/fs-writeback.c work->reason = reason; work 974 fs/fs-writeback.c work->done = done; work 975 fs/fs-writeback.c work->auto_free = 1; work 976 fs/fs-writeback.c wb_queue_work(wb, work); work 1229 fs/fs-writeback.c struct wb_writeback_work *work) work 1241 fs/fs-writeback.c older_than_this = work->older_than_this; work 1242 fs/fs-writeback.c else if (!work->for_sync) { work 1292 fs/fs-writeback.c static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work) work 1298 fs/fs-writeback.c moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, work); work 1300 fs/fs-writeback.c EXPIRE_DIRTY_ATIME, work); work 1303 fs/fs-writeback.c trace_writeback_queue_io(wb, work, moved); work 1585 fs/fs-writeback.c struct wb_writeback_work *work) work 1602 fs/fs-writeback.c if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages) work 1607 fs/fs-writeback.c pages = min(pages, work->nr_pages); work 1626 fs/fs-writeback.c struct wb_writeback_work *work) work 1629 fs/fs-writeback.c .sync_mode = work->sync_mode, work 1630 fs/fs-writeback.c .tagged_writepages = work->tagged_writepages, work 1631 fs/fs-writeback.c .for_kupdate = work->for_kupdate, work 1632 fs/fs-writeback.c .for_background = work->for_background, work 1633 fs/fs-writeback.c .for_sync = work->for_sync, work 1634 fs/fs-writeback.c .range_cyclic = work->range_cyclic, work 1647 fs/fs-writeback.c if (work->sb) { work 1708 fs/fs-writeback.c write_chunk = writeback_chunk_size(wb, work); work 1719 fs/fs-writeback.c work->nr_pages -= write_chunk - wbc.nr_to_write; work 1759 fs/fs-writeback.c if (work->nr_pages <= 0) work 1767 fs/fs-writeback.c struct wb_writeback_work *work) work 1785 fs/fs-writeback.c wrote += writeback_sb_inodes(sb, wb, work); work 1792 fs/fs-writeback.c if (work->nr_pages <= 0) work 1803 fs/fs-writeback.c struct wb_writeback_work work = { work 1814 fs/fs-writeback.c queue_io(wb, &work); work 1815 fs/fs-writeback.c __writeback_inodes_wb(wb, &work); work 1819 fs/fs-writeback.c return nr_pages - work.nr_pages; work 1838 fs/fs-writeback.c struct wb_writeback_work *work) work 1841 fs/fs-writeback.c long nr_pages = work->nr_pages; work 1848 fs/fs-writeback.c work->older_than_this = &oldest_jif; work 1856 fs/fs-writeback.c if (work->nr_pages <= 0) work 1865 fs/fs-writeback.c if ((work->for_background || work->for_kupdate) && work 1873 fs/fs-writeback.c if (work->for_background && !wb_over_bg_thresh(wb)) work 1882 fs/fs-writeback.c if (work->for_kupdate) { work 1885 fs/fs-writeback.c } else if (work->for_background) work 1888 fs/fs-writeback.c trace_writeback_start(wb, work); work 1890 fs/fs-writeback.c queue_io(wb, work); work 1891 fs/fs-writeback.c if (work->sb) work 1892 fs/fs-writeback.c progress = writeback_sb_inodes(work->sb, wb, work); work 1894 fs/fs-writeback.c progress = __writeback_inodes_wb(wb, work); work 1895 fs/fs-writeback.c trace_writeback_written(wb, work); work 1919 fs/fs-writeback.c trace_writeback_wait(wb, work); work 1930 fs/fs-writeback.c return nr_pages - work->nr_pages; work 1938 fs/fs-writeback.c struct wb_writeback_work *work = NULL; work 1942 fs/fs-writeback.c work = list_entry(wb->work_list.next, work 1944 fs/fs-writeback.c list_del_init(&work->list); work 1947 fs/fs-writeback.c return work; work 1954 fs/fs-writeback.c struct wb_writeback_work work = { work 1962 fs/fs-writeback.c return wb_writeback(wb, &work); work 1988 fs/fs-writeback.c struct wb_writeback_work work = { work 1996 fs/fs-writeback.c return wb_writeback(wb, &work); work 2011 fs/fs-writeback.c struct wb_writeback_work work = { work 2018 fs/fs-writeback.c nr_pages = wb_writeback(wb, &work); work 2031 fs/fs-writeback.c struct wb_writeback_work *work; work 2035 fs/fs-writeback.c while ((work = get_next_work_item(wb)) != NULL) { work 2036 fs/fs-writeback.c trace_writeback_exec(wb, work); work 2037 fs/fs-writeback.c wrote += wb_writeback(wb, work); work 2038 fs/fs-writeback.c finish_writeback_work(wb, work); work 2060 fs/fs-writeback.c void wb_workfn(struct work_struct *work) work 2062 fs/fs-writeback.c struct bdi_writeback *wb = container_of(to_delayed_work(work), work 2455 fs/fs-writeback.c struct wb_writeback_work work = { work 2468 fs/fs-writeback.c bdi_split_work_to_wbs(sb->s_bdi, &work, skip_if_busy); work 2533 fs/fs-writeback.c struct wb_writeback_work work = { work 2554 fs/fs-writeback.c bdi_split_work_to_wbs(bdi, &work, false); work 229 fs/fscache/object-list.c FILTER(work_busy(&obj->work), WORK, NOWORK); work 246 fs/fscache/object-list.c work_busy(&obj->work)); work 45 fs/fscache/object.c .work = f \ work 51 fs/fscache/object.c #define transit_to(state) ({ prefetch(&STATE(state)->work); STATE(state); }) work 66 fs/fscache/object.c .work = NULL, \ work 195 fs/fscache/object.c ASSERT(state->work != NULL); work 206 fs/fscache/object.c if (!state->work) { work 234 fs/fscache/object.c new_state = state->work(object, event); work 251 fs/fscache/object.c if (state->work) { work 276 fs/fscache/object.c static void fscache_object_work_func(struct work_struct *work) work 279 fs/fscache/object.c container_of(work, struct fscache_object, work); work 315 fs/fscache/object.c INIT_WORK(&object->work, fscache_object_work_func); work 821 fs/fscache/object.c if (queue_work(fscache_object_wq, &object->work)) { work 37 fs/fscache/operation.c INIT_WORK(&op->work, fscache_op_work_func); work 78 fs/fscache/operation.c if (!queue_work(fscache_op_wq, &op->work)) work 561 fs/fscache/operation.c void fscache_operation_gc(struct work_struct *work) work 566 fs/fscache/operation.c container_of(work, struct fscache_cache, op_gc); work 615 fs/fscache/operation.c void fscache_op_work_func(struct work_struct *work) work 618 fs/fscache/operation.c container_of(work, struct fscache_operation, work); work 238 fs/fuse/virtio_fs.c static void virtio_fs_hiprio_done_work(struct work_struct *work) work 240 fs/fuse/virtio_fs.c struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq, work 260 fs/fuse/virtio_fs.c static void virtio_fs_request_dispatch_work(struct work_struct *work) work 263 fs/fuse/virtio_fs.c struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq, work 264 fs/fuse/virtio_fs.c dispatch_work.work); work 316 fs/fuse/virtio_fs.c static void virtio_fs_hiprio_dispatch_work(struct work_struct *work) work 319 fs/fuse/virtio_fs.c struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq, work 320 fs/fuse/virtio_fs.c dispatch_work.work); work 446 fs/fuse/virtio_fs.c static void virtio_fs_requests_done_work(struct work_struct *work) work 448 fs/fuse/virtio_fs.c struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq, work 675 fs/gfs2/glock.c static void delete_work_func(struct work_struct *work) work 677 fs/gfs2/glock.c struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete); work 697 fs/gfs2/glock.c static void glock_work_func(struct work_struct *work) work 700 fs/gfs2/glock.c struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); work 574 fs/gfs2/lock_dlm.c static void gfs2_control_func(struct work_struct *work) work 576 fs/gfs2/lock_dlm.c struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_control_work.work); work 293 fs/gfs2/recovery.c void gfs2_recover_func(struct work_struct *work) work 295 fs/gfs2/recovery.c struct gfs2_jdesc *jd = container_of(work, struct gfs2_jdesc, jd_work); work 28 fs/gfs2/recovery.h extern void gfs2_recover_func(struct work_struct *work); work 718 fs/gfs2/super.c void gfs2_freeze_func(struct work_struct *work) work 722 fs/gfs2/super.c struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_freeze_work); work 44 fs/gfs2/super.h extern void gfs2_freeze_func(struct work_struct *work); work 54 fs/hfs/super.c static void flush_mdb(struct work_struct *work) work 59 fs/hfs/super.c sbi = container_of(work, struct hfs_sb_info, mdb_work.work); work 247 fs/hfsplus/super.c static void delayed_sync_fs(struct work_struct *work) work 252 fs/hfsplus/super.c sbi = container_of(work, struct hfsplus_sb_info, sync_work.work); work 341 fs/io_uring.c struct work_struct work; work 367 fs/io_uring.c static void io_sq_wq_submit_work(struct work_struct *work); work 506 fs/io_uring.c queue_work(ctx->sqo_wq[rw], &req->work); work 702 fs/io_uring.c INIT_WORK(&nxt->work, io_sq_wq_submit_work); work 1789 fs/io_uring.c static void io_poll_complete_work(struct work_struct *work) work 1791 fs/io_uring.c struct io_kiocb *req = container_of(work, struct io_kiocb, work); work 1894 fs/io_uring.c INIT_WORK(&req->work, io_poll_complete_work); work 2086 fs/io_uring.c INIT_WORK(&req->work, io_sq_wq_submit_work); work 2190 fs/io_uring.c static void io_sq_wq_submit_work(struct work_struct *work) work 2192 fs/io_uring.c struct io_kiocb *req = container_of(work, struct io_kiocb, work); work 2435 fs/io_uring.c INIT_WORK(&req->work, io_sq_wq_submit_work); work 46 fs/iomap/direct-io.c struct work_struct work; work 128 fs/iomap/direct-io.c static void iomap_dio_complete_work(struct work_struct *work) work 130 fs/iomap/direct-io.c struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work); work 162 fs/iomap/direct-io.c INIT_WORK(&dio->aio.work, iomap_dio_complete_work); work 163 fs/iomap/direct-io.c queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work); work 165 fs/iomap/direct-io.c iomap_dio_complete_work(&dio->aio.work); work 1152 fs/jffs2/wbuf.c static struct jffs2_sb_info *work_to_sb(struct work_struct *work) work 1156 fs/jffs2/wbuf.c dwork = to_delayed_work(work); work 1160 fs/jffs2/wbuf.c static void delayed_wbuf_sync(struct work_struct *work) work 1162 fs/jffs2/wbuf.c struct jffs2_sb_info *c = work_to_sb(work); work 865 fs/kernfs/file.c static void kernfs_notify_workfn(struct work_struct *work) work 331 fs/mbcache.c static void mb_cache_shrink_worker(struct work_struct *work) work 333 fs/mbcache.c struct mb_cache *cache = container_of(work, struct mb_cache, work 232 fs/nfs/blocklayout/blocklayout.c static void bl_read_cleanup(struct work_struct *work) work 237 fs/nfs/blocklayout/blocklayout.c task = container_of(work, struct rpc_task, u.tk_work); work 364 fs/nfs/blocklayout/blocklayout.c static void bl_write_cleanup(struct work_struct *work) work 366 fs/nfs/blocklayout/blocklayout.c struct rpc_task *task = container_of(work, struct rpc_task, u.tk_work); work 90 fs/nfs/direct.c struct work_struct work; work 103 fs/nfs/direct.c static void nfs_direct_write_schedule_work(struct work_struct *work); work 309 fs/nfs/direct.c INIT_WORK(&dreq->work, nfs_direct_write_schedule_work); work 732 fs/nfs/direct.c static void nfs_direct_write_schedule_work(struct work_struct *work) work 734 fs/nfs/direct.c struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work); work 753 fs/nfs/direct.c queue_work(nfsiod_workqueue, &dreq->work); /* Calls nfs_direct_write_schedule_work */ work 25 fs/nfs/namespace.c static void nfs_expire_automounts(struct work_struct *work); work 198 fs/nfs/namespace.c static void nfs_expire_automounts(struct work_struct *work) work 55 fs/nfs/nfs4renewd.c nfs4_renew_state(struct work_struct *work) work 59 fs/nfs/nfs4renewd.c container_of(work, struct nfs_client, cl_renewd.work); work 505 fs/nfsd/filecache.c nfsd_file_delayed_close(struct work_struct *work) work 1238 fs/nfsd/nfs4callback.c nfsd4_run_cb_work(struct work_struct *work) work 1241 fs/nfsd/nfs4callback.c container_of(work, struct nfsd4_callback, cb_work); work 801 fs/nilfs2/segment.c static void nilfs_iput_work_func(struct work_struct *work) work 803 fs/nilfs2/segment.c struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info, work 88 fs/notify/mark.c static void fsnotify_mark_destroy_workfn(struct work_struct *work); work 91 fs/notify/mark.c static void fsnotify_connector_destroy_workfn(struct work_struct *work); work 155 fs/notify/mark.c static void fsnotify_connector_destroy_workfn(struct work_struct *work) work 822 fs/notify/mark.c static void fsnotify_mark_destroy_workfn(struct work_struct *work) work 6076 fs/ocfs2/alloc.c static void ocfs2_truncate_log_worker(struct work_struct *work) work 6080 fs/ocfs2/alloc.c container_of(work, struct ocfs2_super, work 6081 fs/ocfs2/alloc.c osb_truncate_log_wq.work); work 287 fs/ocfs2/cluster/heartbeat.c static void o2hb_write_timeout(struct work_struct *work) work 291 fs/ocfs2/cluster/heartbeat.c container_of(work, struct o2hb_region, work 292 fs/ocfs2/cluster/heartbeat.c hr_write_timeout_work.work); work 371 fs/ocfs2/cluster/heartbeat.c static void o2hb_nego_timeout(struct work_struct *work) work 377 fs/ocfs2/cluster/heartbeat.c reg = container_of(work, struct o2hb_region, hr_nego_timeout_work.work); work 92 fs/ocfs2/cluster/quorum.c static void o2quo_make_decision(struct work_struct *work) work 124 fs/ocfs2/cluster/tcp.c static void o2net_sc_connect_completed(struct work_struct *work); work 125 fs/ocfs2/cluster/tcp.c static void o2net_rx_until_empty(struct work_struct *work); work 126 fs/ocfs2/cluster/tcp.c static void o2net_shutdown_sc(struct work_struct *work); work 128 fs/ocfs2/cluster/tcp.c static void o2net_sc_send_keep_req(struct work_struct *work); work 460 fs/ocfs2/cluster/tcp.c struct work_struct *work) work 463 fs/ocfs2/cluster/tcp.c if (!queue_work(o2net_wq, work)) work 467 fs/ocfs2/cluster/tcp.c struct delayed_work *work, work 471 fs/ocfs2/cluster/tcp.c if (!queue_delayed_work(o2net_wq, work, delay)) work 475 fs/ocfs2/cluster/tcp.c struct delayed_work *work) work 477 fs/ocfs2/cluster/tcp.c if (cancel_delayed_work(work)) work 713 fs/ocfs2/cluster/tcp.c static void o2net_shutdown_sc(struct work_struct *work) work 716 fs/ocfs2/cluster/tcp.c container_of(work, struct o2net_sock_container, work 1424 fs/ocfs2/cluster/tcp.c static void o2net_rx_until_empty(struct work_struct *work) work 1427 fs/ocfs2/cluster/tcp.c container_of(work, struct o2net_sock_container, sc_rx_work); work 1475 fs/ocfs2/cluster/tcp.c static void o2net_sc_connect_completed(struct work_struct *work) work 1478 fs/ocfs2/cluster/tcp.c container_of(work, struct o2net_sock_container, work 1491 fs/ocfs2/cluster/tcp.c static void o2net_sc_send_keep_req(struct work_struct *work) work 1494 fs/ocfs2/cluster/tcp.c container_of(work, struct o2net_sock_container, work 1495 fs/ocfs2/cluster/tcp.c sc_keepalive_work.work); work 1563 fs/ocfs2/cluster/tcp.c static void o2net_start_connect(struct work_struct *work) work 1566 fs/ocfs2/cluster/tcp.c container_of(work, struct o2net_node, nn_connect_work.work); work 1690 fs/ocfs2/cluster/tcp.c static void o2net_connect_expired(struct work_struct *work) work 1693 fs/ocfs2/cluster/tcp.c container_of(work, struct o2net_node, nn_connect_expired.work); work 1709 fs/ocfs2/cluster/tcp.c static void o2net_still_up(struct work_struct *work) work 1712 fs/ocfs2/cluster/tcp.c container_of(work, struct o2net_node, nn_still_up.work); work 1947 fs/ocfs2/cluster/tcp.c static void o2net_accept_many(struct work_struct *work) work 191 fs/ocfs2/dlm/dlmcommon.h void dlm_dispatch_work(struct work_struct *work); work 137 fs/ocfs2/dlm/dlmrecovery.c void dlm_dispatch_work(struct work_struct *work) work 140 fs/ocfs2/dlm/dlmrecovery.c container_of(work, struct dlm_ctxt, dispatched_work); work 163 fs/ocfs2/dlmfs/userdlm.c static void user_dlm_unblock_lock(struct work_struct *work); work 285 fs/ocfs2/dlmfs/userdlm.c static void user_dlm_unblock_lock(struct work_struct *work) work 289 fs/ocfs2/dlmfs/userdlm.c container_of(work, struct user_lock_res, l_work); work 1217 fs/ocfs2/journal.c void ocfs2_complete_recovery(struct work_struct *work) work 1221 fs/ocfs2/journal.c container_of(work, struct ocfs2_journal, j_recovery_work); work 1970 fs/ocfs2/journal.c static void ocfs2_orphan_scan_work(struct work_struct *work) work 1975 fs/ocfs2/journal.c os = container_of(work, struct ocfs2_orphan_scan, work 1976 fs/ocfs2/journal.c os_orphan_scan_work.work); work 148 fs/ocfs2/journal.h void ocfs2_complete_recovery(struct work_struct *work); work 227 fs/ocfs2/localalloc.c void ocfs2_la_enable_worker(struct work_struct *work) work 230 fs/ocfs2/localalloc.c container_of(work, struct ocfs2_super, work 231 fs/ocfs2/localalloc.c la_enable_wq.work); work 52 fs/ocfs2/localalloc.h void ocfs2_la_enable_worker(struct work_struct *work); work 119 fs/ocfs2/quota.h void ocfs2_drop_dquot_refs(struct work_struct *work); work 69 fs/ocfs2/quota_global.c static void qsync_work_fn(struct work_struct *work); work 636 fs/ocfs2/quota_global.c static void qsync_work_fn(struct work_struct *work) work 638 fs/ocfs2/quota_global.c struct ocfs2_mem_dqinfo *oinfo = container_of(work, work 640 fs/ocfs2/quota_global.c dqi_sync_work.work); work 697 fs/ocfs2/quota_global.c void ocfs2_drop_dquot_refs(struct work_struct *work) work 699 fs/ocfs2/quota_global.c struct ocfs2_super *osb = container_of(work, struct ocfs2_super, work 619 fs/overlayfs/super.c struct dentry *work; work 628 fs/overlayfs/super.c work = lookup_one_len(name, ofs->workbasedir, strlen(name)); work 630 fs/overlayfs/super.c if (!IS_ERR(work)) { work 636 fs/overlayfs/super.c if (work->d_inode) { work 645 fs/overlayfs/super.c ovl_workdir_cleanup(dir, mnt, work, 0); work 646 fs/overlayfs/super.c dput(work); work 650 fs/overlayfs/super.c work = ovl_create_real(dir, work, OVL_CATTR(attr.ia_mode)); work 651 fs/overlayfs/super.c err = PTR_ERR(work); work 652 fs/overlayfs/super.c if (IS_ERR(work)) work 668 fs/overlayfs/super.c err = vfs_removexattr(work, XATTR_NAME_POSIX_ACL_DEFAULT); work 672 fs/overlayfs/super.c err = vfs_removexattr(work, XATTR_NAME_POSIX_ACL_ACCESS); work 677 fs/overlayfs/super.c inode_lock(work->d_inode); work 678 fs/overlayfs/super.c err = notify_change(work, &attr, NULL); work 679 fs/overlayfs/super.c inode_unlock(work->d_inode); work 683 fs/overlayfs/super.c err = PTR_ERR(work); work 690 fs/overlayfs/super.c return work; work 693 fs/overlayfs/super.c dput(work); work 697 fs/overlayfs/super.c work = NULL; work 779 fs/pstore/platform.c static void pstore_dowork(struct work_struct *work) work 99 fs/reiserfs/journal.c static void flush_async_commits(struct work_struct *work); work 3528 fs/reiserfs/journal.c static void flush_async_commits(struct work_struct *work) work 3531 fs/reiserfs/journal.c container_of(work, struct reiserfs_journal, j_work.work); work 84 fs/reiserfs/super.c static void flush_old_commits(struct work_struct *work) work 89 fs/reiserfs/super.c sbi = container_of(work, struct reiserfs_sb_info, old_work.work); work 157 fs/super.c static void destroy_super_work(struct work_struct *work) work 159 fs/super.c struct super_block *s = container_of(work, struct super_block, work 1008 fs/super.c static void do_emergency_remount(struct work_struct *work) work 1011 fs/super.c kfree(work); work 1017 fs/super.c struct work_struct *work; work 1019 fs/super.c work = kmalloc(sizeof(*work), GFP_ATOMIC); work 1020 fs/super.c if (work) { work 1021 fs/super.c INIT_WORK(work, do_emergency_remount); work 1022 fs/super.c schedule_work(work); work 1037 fs/super.c static void do_thaw_all(struct work_struct *work) work 1040 fs/super.c kfree(work); work 1051 fs/super.c struct work_struct *work; work 1053 fs/super.c work = kmalloc(sizeof(*work), GFP_ATOMIC); work 1054 fs/super.c if (work) { work 1055 fs/super.c INIT_WORK(work, do_thaw_all); work 1056 fs/super.c schedule_work(work); work 128 fs/sync.c static void do_sync_work(struct work_struct *work) work 143 fs/sync.c kfree(work); work 148 fs/sync.c struct work_struct *work; work 150 fs/sync.c work = kmalloc(sizeof(*work), GFP_ATOMIC); work 151 fs/sync.c if (work) { work 152 fs/sync.c INIT_WORK(work, do_sync_work); work 153 fs/sync.c schedule_work(work); work 716 fs/ufs/super.c static void delayed_sync_fs(struct work_struct *work) work 720 fs/ufs/super.c sbi = container_of(work, struct ufs_sb_info, sync_work.work); work 253 fs/verity/verify.c void fsverity_enqueue_verify_work(struct work_struct *work) work 255 fs/verity/verify.c queue_work(fsverity_read_workqueue, work); work 341 fs/xfs/libxfs/xfs_ag.c aghdr_init_work_f work, work 351 fs/xfs/libxfs/xfs_ag.c (*work)(mp, bp, id); work 362 fs/xfs/libxfs/xfs_ag.c aghdr_init_work_f work; work 388 fs/xfs/libxfs/xfs_ag.c .work = &xfs_sbblock_init, work 395 fs/xfs/libxfs/xfs_ag.c .work = &xfs_agfblock_init, work 402 fs/xfs/libxfs/xfs_ag.c .work = &xfs_agflblock_init, work 409 fs/xfs/libxfs/xfs_ag.c .work = &xfs_agiblock_init, work 416 fs/xfs/libxfs/xfs_ag.c .work = &xfs_bnoroot_init, work 423 fs/xfs/libxfs/xfs_ag.c .work = &xfs_cntroot_init, work 430 fs/xfs/libxfs/xfs_ag.c .work = &xfs_btroot_init, work 438 fs/xfs/libxfs/xfs_ag.c .work = &xfs_btroot_init, work 446 fs/xfs/libxfs/xfs_ag.c .work = &xfs_rmaproot_init, work 453 fs/xfs/libxfs/xfs_ag.c .work = &xfs_btroot_init, work 473 fs/xfs/libxfs/xfs_ag.c error = xfs_ag_init_hdr(mp, id, dp->work, dp->ops); work 2843 fs/xfs/libxfs/xfs_btree.c struct work_struct work; work 2851 fs/xfs/libxfs/xfs_btree.c struct work_struct *work) work 2853 fs/xfs/libxfs/xfs_btree.c struct xfs_btree_split_args *args = container_of(work, work 2854 fs/xfs/libxfs/xfs_btree.c struct xfs_btree_split_args, work); work 2904 fs/xfs/libxfs/xfs_btree.c INIT_WORK_ONSTACK(&args.work, xfs_btree_split_worker); work 2905 fs/xfs/libxfs/xfs_btree.c queue_work(xfs_alloc_wq, &args.work); work 2907 fs/xfs/libxfs/xfs_btree.c destroy_work_on_stack(&args.work); work 372 fs/xfs/xfs_aops.c struct work_struct *work) work 379 fs/xfs/xfs_aops.c ip = container_of(work, struct xfs_inode, i_ioend_work); work 1178 fs/xfs/xfs_buf.c struct work_struct *work) work 1181 fs/xfs/xfs_buf.c container_of(work, xfs_buf_t, b_ioend_work); work 170 fs/xfs/xfs_icache.c struct work_struct *work) work 172 fs/xfs/xfs_icache.c struct xfs_mount *mp = container_of(to_delayed_work(work), work 906 fs/xfs/xfs_icache.c struct work_struct *work) work 908 fs/xfs/xfs_icache.c struct xfs_mount *mp = container_of(to_delayed_work(work), work 938 fs/xfs/xfs_icache.c struct work_struct *work) work 940 fs/xfs/xfs_icache.c struct xfs_mount *mp = container_of(to_delayed_work(work), work 52 fs/xfs/xfs_icache.h void xfs_reclaim_worker(struct work_struct *work); work 519 fs/xfs/xfs_inode.h void xfs_end_io(struct work_struct *work); work 1230 fs/xfs/xfs_log.c struct work_struct *work) work 1233 fs/xfs/xfs_log.c container_of(work, struct xlog_in_core, ic_end_io_work); work 1317 fs/xfs/xfs_log.c struct work_struct *work) work 1319 fs/xfs/xfs_log.c struct xlog *log = container_of(to_delayed_work(work), work 502 fs/xfs/xfs_log_cil.c struct work_struct *work) work 505 fs/xfs/xfs_log_cil.c container_of(work, struct xfs_cil_ctx, discard_endio_work); work 887 fs/xfs/xfs_log_cil.c struct work_struct *work) work 889 fs/xfs/xfs_log_cil.c struct xfs_cil *cil = container_of(work, struct xfs_cil, work 101 fs/xfs/xfs_mru_cache.c struct delayed_work work; /* Workqueue data for reaping. */ work 206 fs/xfs/xfs_mru_cache.c queue_delayed_work(xfs_mru_reap_wq, &mru->work, work 266 fs/xfs/xfs_mru_cache.c struct work_struct *work) work 269 fs/xfs/xfs_mru_cache.c container_of(work, struct xfs_mru_cache, work.work); work 287 fs/xfs/xfs_mru_cache.c queue_delayed_work(xfs_mru_reap_wq, &mru->work, next); work 358 fs/xfs/xfs_mru_cache.c INIT_DELAYED_WORK(&mru->work, _xfs_mru_cache_reap); work 390 fs/xfs/xfs_mru_cache.c cancel_delayed_work_sync(&mru->work); work 39 fs/xfs/xfs_pwork.c struct work_struct *work) work 45 fs/xfs/xfs_pwork.c pwork = container_of(work, struct xfs_pwork, work); work 92 fs/xfs/xfs_pwork.c INIT_WORK(&pwork->work, xfs_pwork_work); work 95 fs/xfs/xfs_pwork.c queue_work(pctl->wq, &pwork->work); work 31 fs/xfs/xfs_pwork.h struct work_struct work; work 570 include/drm/drm_dp_mst_helper.h struct work_struct work; work 52 include/drm/drm_flip_work.h typedef void (*drm_flip_func_t)(struct drm_flip_work *work, void *val); work 83 include/drm/drm_flip_work.h void drm_flip_work_queue_task(struct drm_flip_work *work, work 85 include/drm/drm_flip_work.h void drm_flip_work_queue(struct drm_flip_work *work, void *val); work 86 include/drm/drm_flip_work.h void drm_flip_work_commit(struct drm_flip_work *work, work 88 include/drm/drm_flip_work.h void drm_flip_work_init(struct drm_flip_work *work, work 90 include/drm/drm_flip_work.h void drm_flip_work_cleanup(struct drm_flip_work *work); work 70 include/drm/ttm/ttm_memory.h struct work_struct work; work 46 include/linux/backing-dev.h void wb_workfn(struct work_struct *work); work 1499 include/linux/blkdev.h int kblockd_schedule_work(struct work_struct *work); work 1500 include/linux/blkdev.h int kblockd_schedule_work_on(int cpu, struct work_struct *work); work 104 include/linux/bpf.h struct work_struct work; work 421 include/linux/bpf.h struct work_struct work; work 641 include/linux/ccp.h struct work_struct work; work 322 include/linux/ceph/messenger.h struct delayed_work work; /* send|recv work */ work 36 include/linux/completion.h #define COMPLETION_INITIALIZER(work) \ work 37 include/linux/completion.h { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } work 39 include/linux/completion.h #define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ work 40 include/linux/completion.h (*({ init_completion_map(&(work), &(map)); &(work); })) work 42 include/linux/completion.h #define COMPLETION_INITIALIZER_ONSTACK(work) \ work 43 include/linux/completion.h (*({ init_completion(&work); &work; })) work 53 include/linux/completion.h #define DECLARE_COMPLETION(work) \ work 54 include/linux/completion.h struct completion work = COMPLETION_INITIALIZER(work) work 69 include/linux/completion.h # define DECLARE_COMPLETION_ONSTACK(work) \ work 70 include/linux/completion.h struct completion work = COMPLETION_INITIALIZER_ONSTACK(work) work 71 include/linux/completion.h # define DECLARE_COMPLETION_ONSTACK_MAP(work, map) \ work 72 include/linux/completion.h struct completion work = COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) work 74 include/linux/completion.h # define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work) work 75 include/linux/completion.h # define DECLARE_COMPLETION_ONSTACK_MAP(work, map) DECLARE_COMPLETION(work) work 157 include/linux/console_struct.h extern void vc_SAK(struct work_struct *work); work 342 include/linux/crush/crush.h struct crush_work_bucket **work; /* Per-bucket working store */ work 157 include/linux/devfreq.h struct delayed_work work; work 98 include/linux/dim.h struct work_struct work; work 45 include/linux/dma-fence-array.h struct irq_work work; work 33 include/linux/dma-fence-chain.h struct irq_work work; work 656 include/linux/edac.h struct delayed_work work; work 1770 include/linux/efi.h struct work_struct work; work 205 include/linux/firewire.h struct delayed_work work; work 98 include/linux/fscache-cache.h struct work_struct work; /* record for async ops */ work 129 include/linux/fscache-cache.h extern void fscache_op_work_func(struct work_struct *work); work 338 include/linux/fscache-cache.h const struct fscache_state *(*work)(struct fscache_object *object, work 380 include/linux/fscache-cache.h struct work_struct work; /* attention scheduling record */ work 72 include/linux/fscrypt.h struct work_struct work; work 294 include/linux/fscrypt.h static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work) work 135 include/linux/fsverity.h extern void fsverity_enqueue_verify_work(struct work_struct *work); work 189 include/linux/fsverity.h static inline void fsverity_enqueue_verify_work(struct work_struct *work) work 102 include/linux/greybus/operation.h struct work_struct work; work 234 include/linux/hid-sensor-hub.h struct work_struct work; work 112 include/linux/i3c/master.h struct work_struct work; work 47 include/linux/input-polldev.h struct delayed_work work; work 258 include/linux/interrupt.h struct work_struct work; work 18 include/linux/irq_sim.h struct irq_work work; work 31 include/linux/irq_work.h void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *)) work 33 include/linux/irq_work.h work->flags = 0; work 34 include/linux/irq_work.h work->func = func; work 39 include/linux/irq_work.h bool irq_work_queue(struct irq_work *work); work 40 include/linux/irq_work.h bool irq_work_queue_on(struct irq_work *work, int cpu); work 43 include/linux/irq_work.h void irq_work_sync(struct irq_work *work); work 12 include/linux/jump_label_ratelimit.h struct delayed_work work; work 18 include/linux/jump_label_ratelimit.h struct delayed_work work; work 24 include/linux/jump_label_ratelimit.h struct delayed_work work; work 28 include/linux/jump_label_ratelimit.h __static_key_slow_dec_deferred(&(x)->key, &(x)->work, (x)->timeout) work 30 include/linux/jump_label_ratelimit.h __static_key_slow_dec_deferred(&(x)->key.key, &(x)->work, (x)->timeout) work 33 include/linux/jump_label_ratelimit.h __static_key_deferred_flush((x), &(x)->work) work 37 include/linux/jump_label_ratelimit.h struct delayed_work *work, work 39 include/linux/jump_label_ratelimit.h extern void __static_key_deferred_flush(void *key, struct delayed_work *work); work 43 include/linux/jump_label_ratelimit.h extern void jump_label_update_timeout(struct work_struct *work); work 49 include/linux/jump_label_ratelimit.h .work = __DELAYED_WORK_INITIALIZER((name).work, \ work 58 include/linux/jump_label_ratelimit.h .work = __DELAYED_WORK_INITIALIZER((name).work, \ work 79 include/linux/kthread.h typedef void (*kthread_work_func_t)(struct kthread_work *work); work 104 include/linux/kthread.h struct kthread_work work; work 114 include/linux/kthread.h #define KTHREAD_WORK_INIT(work, fn) { \ work 115 include/linux/kthread.h .node = LIST_HEAD_INIT((work).node), \ work 120 include/linux/kthread.h .work = KTHREAD_WORK_INIT((dwork).work, (fn)), \ work 128 include/linux/kthread.h #define DEFINE_KTHREAD_WORK(work, fn) \ work 129 include/linux/kthread.h struct kthread_work work = KTHREAD_WORK_INIT(work, fn) work 157 include/linux/kthread.h #define kthread_init_work(work, fn) \ work 159 include/linux/kthread.h memset((work), 0, sizeof(struct kthread_work)); \ work 160 include/linux/kthread.h INIT_LIST_HEAD(&(work)->node); \ work 161 include/linux/kthread.h (work)->func = (fn); \ work 166 include/linux/kthread.h kthread_init_work(&(dwork)->work, (fn)); \ work 182 include/linux/kthread.h struct kthread_work *work); work 192 include/linux/kthread.h void kthread_flush_work(struct kthread_work *work); work 195 include/linux/kthread.h bool kthread_cancel_work_sync(struct kthread_work *work); work 196 include/linux/kthread.h bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *work); work 202 include/linux/kvm_host.h struct work_struct work; work 31 include/linux/leds-pca9532.h struct work_struct work; work 1851 include/linux/libata.h extern void ata_sff_queue_work(struct work_struct *work); work 713 include/linux/mfd/wm8350/pmic.h struct work_struct work; work 1039 include/linux/mlx4/device.h void handle_port_mgmt_change_event(struct work_struct *work); work 492 include/linux/mlx5/driver.h struct delayed_work work; work 762 include/linux/mlx5/driver.h struct work_struct work; work 916 include/linux/mlx5/driver.h struct mlx5_async_work *work); work 65 include/linux/padata.h struct work_struct work; work 83 include/linux/padata.h struct work_struct work; work 1146 include/linux/phy.h void phy_state_machine(struct work_struct *work); work 602 include/linux/pm.h struct work_struct work; work 44 include/linux/pm_qos.h struct delayed_work work; /* for pm_qos_update_request_timeout */ work 28 include/linux/pm_runtime.h static inline bool queue_pm_work(struct work_struct *work) work 30 include/linux/pm_runtime.h return queue_work(pm_wq, work); work 119 include/linux/pm_runtime.h static inline bool queue_pm_work(struct work_struct *work) { return false; } work 69 include/linux/power/bq27xxx_battery.h struct delayed_work work; work 53 include/linux/qed/qede_rdma.h struct work_struct work; work 211 include/linux/rtc.h void rtc_timer_do_work(struct work_struct *work); work 104 include/linux/skmsg.h struct work_struct work; work 221 include/linux/soc/qcom/qmi.h struct work_struct work; work 36 include/linux/srcutree.h struct work_struct work; /* Context for CB invoking. */ work 84 include/linux/srcutree.h struct delayed_work work; work 100 include/linux/srcutree.h .work = __DELAYED_WORK_INITIALIZER(name.work, NULL, 0), \ work 46 include/linux/stop_machine.h struct work_struct work; work 61 include/linux/stop_machine.h static void stop_one_cpu_nowait_workfn(struct work_struct *work) work 64 include/linux/stop_machine.h container_of(work, struct cpu_stop_work, work); work 75 include/linux/stop_machine.h INIT_WORK(&work_buf->work, stop_one_cpu_nowait_workfn); work 78 include/linux/stop_machine.h schedule_work(&work_buf->work); work 491 include/linux/thunderbolt.h struct work_struct work; work 148 include/linux/tifm.h void tifm_queue_work(struct work_struct *work); work 87 include/linux/tty.h struct work_struct work; work 20 include/linux/umh.h struct work_struct work; work 396 include/linux/usb/gadget.h struct work_struct work; work 431 include/linux/usb/gadget.h #define work_to_gadget(w) (container_of((w), struct usb_gadget, work)) work 113 include/linux/usb/serial.h struct work_struct work; work 82 include/linux/user_namespace.h struct work_struct work; work 27 include/linux/vmpressure.h struct work_struct work; work 21 include/linux/workqueue.h typedef void (*work_func_t)(struct work_struct *work); work 28 include/linux/workqueue.h #define work_data_bits(work) ((unsigned long *)(&(work)->data)) work 116 include/linux/workqueue.h struct work_struct work; work 125 include/linux/workqueue.h struct work_struct work; work 158 include/linux/workqueue.h static inline struct delayed_work *to_delayed_work(struct work_struct *work) work 160 include/linux/workqueue.h return container_of(work, struct delayed_work, work); work 163 include/linux/workqueue.h static inline struct rcu_work *to_rcu_work(struct work_struct *work) work 165 include/linux/workqueue.h return container_of(work, struct rcu_work, work); work 169 include/linux/workqueue.h struct work_struct work; work 192 include/linux/workqueue.h .work = __WORK_INITIALIZER((n).work, (f)), \ work 207 include/linux/workqueue.h extern void __init_work(struct work_struct *work, int onstack); work 208 include/linux/workqueue.h extern void destroy_work_on_stack(struct work_struct *work); work 209 include/linux/workqueue.h extern void destroy_delayed_work_on_stack(struct delayed_work *work); work 210 include/linux/workqueue.h static inline unsigned int work_static(struct work_struct *work) work 212 include/linux/workqueue.h return *work_data_bits(work) & WORK_STRUCT_STATIC; work 215 include/linux/workqueue.h static inline void __init_work(struct work_struct *work, int onstack) { } work 216 include/linux/workqueue.h static inline void destroy_work_on_stack(struct work_struct *work) { } work 217 include/linux/workqueue.h static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { } work 218 include/linux/workqueue.h static inline unsigned int work_static(struct work_struct *work) { return 0; } work 257 include/linux/workqueue.h INIT_WORK(&(_work)->work, (_func)); \ work 265 include/linux/workqueue.h INIT_WORK_ONSTACK(&(_work)->work, (_func)); \ work 284 include/linux/workqueue.h INIT_WORK(&(_work)->work, (_func)) work 287 include/linux/workqueue.h INIT_WORK_ONSTACK(&(_work)->work, (_func)) work 293 include/linux/workqueue.h #define work_pending(work) \ work 294 include/linux/workqueue.h test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) work 302 include/linux/workqueue.h work_pending(&(w)->work) work 445 include/linux/workqueue.h struct work_struct *work); work 447 include/linux/workqueue.h struct work_struct *work); work 449 include/linux/workqueue.h struct delayed_work *work, unsigned long delay); work 461 include/linux/workqueue.h extern bool flush_work(struct work_struct *work); work 462 include/linux/workqueue.h extern bool cancel_work_sync(struct work_struct *work); work 475 include/linux/workqueue.h extern unsigned int work_busy(struct work_struct *work); work 492 include/linux/workqueue.h struct work_struct *work) work 494 include/linux/workqueue.h return queue_work_on(WORK_CPU_UNBOUND, wq, work); work 534 include/linux/workqueue.h static inline bool schedule_work_on(int cpu, struct work_struct *work) work 536 include/linux/workqueue.h return queue_work_on(cpu, system_wq, work); work 550 include/linux/workqueue.h static inline bool schedule_work(struct work_struct *work) work 552 include/linux/workqueue.h return queue_work(system_wq, work); work 340 include/linux/writeback.h void laptop_mode_sync(struct work_struct *work); work 66 include/media/cec.h struct delayed_work work; work 21 include/media/i2c/ir-kbd-i2c.h struct delayed_work work; work 190 include/misc/cxl.h struct cxl_ioctl_start_work *work); work 816 include/net/bluetooth/l2cap.h struct delayed_work *work, long timeout) work 823 include/net/bluetooth/l2cap.h if (!cancel_delayed_work(work)) work 826 include/net/bluetooth/l2cap.h schedule_delayed_work(work, timeout); work 830 include/net/bluetooth/l2cap.h struct delayed_work *work) work 836 include/net/bluetooth/l2cap.h ret = cancel_delayed_work(work); work 114 include/net/caif/caif_spi.h struct work_struct work; work 153 include/net/caif/caif_spi.h void cfspi_xfer(struct work_struct *work); work 5346 include/net/mac80211.h void ieee80211_queue_work(struct ieee80211_hw *hw, struct work_struct *work); work 24 include/net/netns/xfrm.h struct work_struct work; work 77 include/net/strparser.h struct work_struct work; work 149 include/net/tls.h struct delayed_work work; work 55 include/net/xdp_sock.h struct work_struct work; work 51 include/rdma/ib_umem.h struct work_struct work; work 83 include/rdma/ib_umem_odp.h struct work_struct work; work 1554 include/rdma/ib_verbs.h struct work_struct work; work 325 include/scsi/libfcoe.h struct work_struct work; work 207 include/scsi/libsas.h struct work_struct work; work 218 include/scsi/libsas.h INIT_WORK(&sw->work, fn); work 223 include/scsi/libsas.h struct sas_work work; work 227 include/scsi/libsas.h static inline struct sas_discovery_event *to_sas_discovery_event(struct work_struct *work) work 229 include/scsi/libsas.h struct sas_discovery_event *ev = container_of(work, typeof(*ev), work.work); work 255 include/scsi/libsas.h struct sas_work work; work 282 include/scsi/libsas.h struct sas_work work; work 287 include/scsi/libsas.h static inline struct asd_sas_event *to_asd_sas_event(struct work_struct *work) work 289 include/scsi/libsas.h struct asd_sas_event *ev = container_of(work, typeof(*ev), work.work); work 298 include/scsi/libsas.h INIT_SAS_WORK(&ev->work, fn); work 292 include/sound/ak4113.h struct delayed_work work; work 175 include/sound/ak4114.h struct delayed_work work; work 687 include/sound/soc.h struct delayed_work work; work 509 include/target/target_core_base.h struct work_struct work; work 1363 include/trace/events/btrfs.h TP_PROTO(const struct btrfs_work *work), work 1365 include/trace/events/btrfs.h TP_ARGS(work), work 1368 include/trace/events/btrfs.h __field( const void *, work ) work 1376 include/trace/events/btrfs.h TP_fast_assign_btrfs(btrfs_work_owner(work), work 1377 include/trace/events/btrfs.h __entry->work = work; work 1378 include/trace/events/btrfs.h __entry->wq = work->wq; work 1379 include/trace/events/btrfs.h __entry->func = work->func; work 1380 include/trace/events/btrfs.h __entry->ordered_func = work->ordered_func; work 1381 include/trace/events/btrfs.h __entry->ordered_free = work->ordered_free; work 1382 include/trace/events/btrfs.h __entry->normal_work = &work->normal_work; work 1387 include/trace/events/btrfs.h __entry->work, __entry->normal_work, __entry->wq, work 1415 include/trace/events/btrfs.h TP_PROTO(const struct btrfs_work *work), work 1417 include/trace/events/btrfs.h TP_ARGS(work) work 1422 include/trace/events/btrfs.h TP_PROTO(const struct btrfs_work *work), work 1424 include/trace/events/btrfs.h TP_ARGS(work) work 1436 include/trace/events/btrfs.h TP_PROTO(const struct btrfs_work *work), work 1438 include/trace/events/btrfs.h TP_ARGS(work) work 16 include/trace/events/napi.h TP_PROTO(struct napi_struct *napi, int work, int budget), work 18 include/trace/events/napi.h TP_ARGS(napi, work, budget), work 23 include/trace/events/napi.h __field( int, work) work 30 include/trace/events/napi.h __entry->work = work; work 36 include/trace/events/napi.h __entry->work, __entry->budget) work 13 include/trace/events/workqueue.h TP_PROTO(struct work_struct *work), work 15 include/trace/events/workqueue.h TP_ARGS(work), work 18 include/trace/events/workqueue.h __field( void *, work ) work 22 include/trace/events/workqueue.h __entry->work = work; work 25 include/trace/events/workqueue.h TP_printk("work struct %p", __entry->work) work 43 include/trace/events/workqueue.h struct work_struct *work), work 45 include/trace/events/workqueue.h TP_ARGS(req_cpu, pwq, work), work 48 include/trace/events/workqueue.h __field( void *, work ) work 56 include/trace/events/workqueue.h __entry->work = work; work 57 include/trace/events/workqueue.h __entry->function = work->func; work 64 include/trace/events/workqueue.h __entry->work, __entry->function, __entry->workqueue, work 78 include/trace/events/workqueue.h TP_PROTO(struct work_struct *work), work 80 include/trace/events/workqueue.h TP_ARGS(work) work 91 include/trace/events/workqueue.h TP_PROTO(struct work_struct *work), work 93 include/trace/events/workqueue.h TP_ARGS(work), work 96 include/trace/events/workqueue.h __field( void *, work ) work 101 include/trace/events/workqueue.h __entry->work = work; work 102 include/trace/events/workqueue.h __entry->function = work->func; work 105 include/trace/events/workqueue.h TP_printk("work struct %p: function %ps", __entry->work, __entry->function) work 116 include/trace/events/workqueue.h TP_PROTO(struct work_struct *work), work 118 include/trace/events/workqueue.h TP_ARGS(work) work 349 include/trace/events/writeback.h TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), work 350 include/trace/events/writeback.h TP_ARGS(wb, work), work 364 include/trace/events/writeback.h __entry->nr_pages = work->nr_pages; work 365 include/trace/events/writeback.h __entry->sb_dev = work->sb ? work->sb->s_dev : 0; work 366 include/trace/events/writeback.h __entry->sync_mode = work->sync_mode; work 367 include/trace/events/writeback.h __entry->for_kupdate = work->for_kupdate; work 368 include/trace/events/writeback.h __entry->range_cyclic = work->range_cyclic; work 369 include/trace/events/writeback.h __entry->for_background = work->for_background; work 370 include/trace/events/writeback.h __entry->reason = work->reason; work 388 include/trace/events/writeback.h TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), \ work 389 include/trace/events/writeback.h TP_ARGS(wb, work)) work 501 include/trace/events/writeback.h struct wb_writeback_work *work, work 503 include/trace/events/writeback.h TP_ARGS(wb, work, moved), work 513 include/trace/events/writeback.h unsigned long *older_than_this = work->older_than_this; work 519 include/trace/events/writeback.h __entry->reason = work->reason; work 444 include/uapi/linux/pkt_sched.h __u64 work; /* total work done */ work 78 include/xen/xenbus.h struct work_struct work; work 92 kernel/acct.c struct work_struct work; work 175 kernel/acct.c schedule_work(&acct->work); work 183 kernel/acct.c static void close_work(struct work_struct *work) work 185 kernel/acct.c struct bsd_acct_struct *acct = container_of(work, struct bsd_acct_struct, work); work 246 kernel/acct.c INIT_WORK(&acct->work, close_work); work 70 kernel/async.c struct work_struct work; work 109 kernel/async.c static void async_run_entry_fn(struct work_struct *work) work 112 kernel/async.c container_of(work, struct async_entry, work); work 191 kernel/async.c INIT_WORK(&entry->work, async_run_entry_fn); work 212 kernel/async.c queue_work_node(node, system_unbound_wq, &entry->work); work 36 kernel/bpf/cgroup.c static void cgroup_bpf_release(struct work_struct *work) work 38 kernel/bpf/cgroup.c struct cgroup *p, *cgrp = container_of(work, struct cgroup, work 1967 kernel/bpf/core.c static void bpf_prog_free_deferred(struct work_struct *work) work 1972 kernel/bpf/core.c aux = container_of(work, struct bpf_prog_aux, work); work 1994 kernel/bpf/core.c INIT_WORK(&aux->work, bpf_prog_free_deferred); work 1995 kernel/bpf/core.c schedule_work(&aux->work); work 148 kernel/bpf/cpumap.c static void cpu_map_kthread_stop(struct work_struct *work) work 152 kernel/bpf/cpumap.c rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq); work 41 kernel/bpf/stackmap.c struct stack_map_irq_work *work; work 43 kernel/bpf/stackmap.c work = container_of(entry, struct stack_map_irq_work, irq_work); work 44 kernel/bpf/stackmap.c up_read_non_owner(work->sem); work 45 kernel/bpf/stackmap.c work->sem = NULL; work 288 kernel/bpf/stackmap.c struct stack_map_irq_work *work = NULL; work 291 kernel/bpf/stackmap.c work = this_cpu_ptr(&up_read_work); work 292 kernel/bpf/stackmap.c if (work->irq_work.flags & IRQ_WORK_BUSY) work 332 kernel/bpf/stackmap.c if (!work) { work 335 kernel/bpf/stackmap.c work->sem = ¤t->mm->mmap_sem; work 336 kernel/bpf/stackmap.c irq_work_queue(&work->irq_work); work 624 kernel/bpf/stackmap.c struct stack_map_irq_work *work; work 627 kernel/bpf/stackmap.c work = per_cpu_ptr(&up_read_work, cpu); work 628 kernel/bpf/stackmap.c init_irq_work(&work->irq_work, do_up_read); work 302 kernel/bpf/syscall.c static void bpf_map_free_deferred(struct work_struct *work) work 304 kernel/bpf/syscall.c struct bpf_map *map = container_of(work, struct bpf_map, work); work 331 kernel/bpf/syscall.c INIT_WORK(&map->work, bpf_map_free_deferred); work 332 kernel/bpf/syscall.c schedule_work(&map->work); work 272 kernel/cgroup/cgroup-internal.h void cgroup1_release_agent(struct work_struct *work); work 213 kernel/cgroup/cgroup-v1.c static void cgroup_pidlist_destroy_work_fn(struct work_struct *work) work 215 kernel/cgroup/cgroup-v1.c struct delayed_work *dwork = to_delayed_work(work); work 775 kernel/cgroup/cgroup-v1.c void cgroup1_release_agent(struct work_struct *work) work 778 kernel/cgroup/cgroup-v1.c container_of(work, struct cgroup, release_agent_work); work 4967 kernel/cgroup/cgroup.c static void css_free_rwork_fn(struct work_struct *work) work 4969 kernel/cgroup/cgroup.c struct cgroup_subsys_state *css = container_of(to_rcu_work(work), work 5017 kernel/cgroup/cgroup.c static void css_release_work_fn(struct work_struct *work) work 5020 kernel/cgroup/cgroup.c container_of(work, struct cgroup_subsys_state, destroy_work); work 5440 kernel/cgroup/cgroup.c static void css_killed_work_fn(struct work_struct *work) work 5443 kernel/cgroup/cgroup.c container_of(work, struct cgroup_subsys_state, destroy_work); work 355 kernel/cgroup/cpuset.c static void cpuset_hotplug_workfn(struct work_struct *work); work 1568 kernel/cgroup/cpuset.c struct work_struct work; work 1574 kernel/cgroup/cpuset.c static void cpuset_migrate_mm_workfn(struct work_struct *work) work 1577 kernel/cgroup/cpuset.c container_of(work, struct cpuset_migrate_mm_work, work); work 1595 kernel/cgroup/cpuset.c INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn); work 1596 kernel/cgroup/cpuset.c queue_work(cpuset_migrate_mm_wq, &mwork->work); work 3121 kernel/cgroup/cpuset.c static void cpuset_hotplug_workfn(struct work_struct *work) work 376 kernel/events/core.c static void perf_sched_delayed(struct work_struct *work); work 4444 kernel/events/core.c static void perf_sched_delayed(struct work_struct *work) work 17 kernel/events/internal.h struct work_struct work; work 843 kernel/events/ring_buffer.c static void rb_free_work(struct work_struct *work) work 849 kernel/events/ring_buffer.c rb = container_of(work, struct ring_buffer, work); work 863 kernel/events/ring_buffer.c schedule_work(&rb->work); work 879 kernel/events/ring_buffer.c INIT_WORK(&rb->work, rb_free_work); work 1792 kernel/events/uprobes.c static void dup_xol_work(struct callback_head *work) work 699 kernel/fork.c static void mmdrop_async_fn(struct work_struct *work) work 703 kernel/fork.c mm = container_of(work, struct mm_struct, async_put_work); work 1105 kernel/fork.c static void mmput_async_fn(struct work_struct *work) work 1107 kernel/fork.c struct mm_struct *mm = container_of(work, struct mm_struct, work 46 kernel/irq/irq_sim.c static void irq_sim_handle_irq(struct irq_work *work) work 53 kernel/irq/irq_sim.c work_ctx = container_of(work, struct irq_sim_work_ctx, work); work 106 kernel/irq/irq_sim.c init_irq_work(&sim->work_ctx.work, irq_sim_handle_irq); work 121 kernel/irq/irq_sim.c irq_work_sync(&sim->work_ctx.work); work 178 kernel/irq/irq_sim.c irq_work_queue(&sim->work_ctx.work); work 287 kernel/irq/manage.c if (!schedule_work(&desc->affinity_notify->work)) { work 329 kernel/irq/manage.c static void irq_affinity_notify(struct work_struct *work) work 332 kernel/irq/manage.c container_of(work, struct irq_affinity_notify, work); work 382 kernel/irq/manage.c INIT_WORK(¬ify->work, irq_affinity_notify); work 391 kernel/irq/manage.c if (cancel_work_sync(&old_notify->work)) { work 30 kernel/irq_work.c static bool irq_work_claim(struct irq_work *work) work 38 kernel/irq_work.c flags = work->flags & ~IRQ_WORK_PENDING; work 41 kernel/irq_work.c oflags = cmpxchg(&work->flags, flags, nflags); work 61 kernel/irq_work.c static void __irq_work_queue_local(struct irq_work *work) work 64 kernel/irq_work.c if (work->flags & IRQ_WORK_LAZY) { work 65 kernel/irq_work.c if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && work 69 kernel/irq_work.c if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) work 75 kernel/irq_work.c bool irq_work_queue(struct irq_work *work) work 78 kernel/irq_work.c if (!irq_work_claim(work)) work 83 kernel/irq_work.c __irq_work_queue_local(work); work 96 kernel/irq_work.c bool irq_work_queue_on(struct irq_work *work, int cpu) work 99 kernel/irq_work.c return irq_work_queue(work); work 106 kernel/irq_work.c if (!irq_work_claim(work)) work 113 kernel/irq_work.c if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) work 116 kernel/irq_work.c __irq_work_queue_local(work); work 144 kernel/irq_work.c struct irq_work *work, *tmp; work 154 kernel/irq_work.c llist_for_each_entry_safe(work, tmp, llnode, llnode) { work 162 kernel/irq_work.c flags = work->flags & ~IRQ_WORK_PENDING; work 163 kernel/irq_work.c xchg(&work->flags, flags); work 165 kernel/irq_work.c work->func(work); work 170 kernel/irq_work.c (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY); work 198 kernel/irq_work.c void irq_work_sync(struct irq_work *work) work 202 kernel/irq_work.c while (work->flags & IRQ_WORK_BUSY) work 259 kernel/jump_label.c void jump_label_update_timeout(struct work_struct *work) work 262 kernel/jump_label.c container_of(work, struct static_key_deferred, work.work); work 281 kernel/jump_label.c struct delayed_work *work, work 289 kernel/jump_label.c schedule_delayed_work(work, timeout); work 293 kernel/jump_label.c void __static_key_deferred_flush(void *key, struct delayed_work *work) work 296 kernel/jump_label.c flush_delayed_work(work); work 305 kernel/jump_label.c INIT_DELAYED_WORK(&key->work, jump_label_update_timeout); work 463 kernel/kprobes.c static void kprobe_optimizer(struct work_struct *work); work 555 kernel/kprobes.c static void kprobe_optimizer(struct work_struct *work) work 636 kernel/kthread.c struct kthread_work *work; work 659 kernel/kthread.c work = NULL; work 662 kernel/kthread.c work = list_first_entry(&worker->work_list, work 664 kernel/kthread.c list_del_init(&work->node); work 666 kernel/kthread.c worker->current_work = work; work 669 kernel/kthread.c if (work) { work 671 kernel/kthread.c work->func(work); work 777 kernel/kthread.c struct kthread_work *work) work 781 kernel/kthread.c return !list_empty(&work->node) || work->canceling; work 785 kernel/kthread.c struct kthread_work *work) work 788 kernel/kthread.c WARN_ON_ONCE(!list_empty(&work->node)); work 790 kernel/kthread.c WARN_ON_ONCE(work->worker && work->worker != worker); work 795 kernel/kthread.c struct kthread_work *work, work 798 kernel/kthread.c kthread_insert_work_sanity_check(worker, work); work 800 kernel/kthread.c list_add_tail(&work->node, pos); work 801 kernel/kthread.c work->worker = worker; work 819 kernel/kthread.c struct kthread_work *work) work 825 kernel/kthread.c if (!queuing_blocked(worker, work)) { work 826 kernel/kthread.c kthread_insert_work(worker, work, &worker->work_list); work 845 kernel/kthread.c struct kthread_work *work = &dwork->work; work 846 kernel/kthread.c struct kthread_worker *worker = work->worker; work 858 kernel/kthread.c WARN_ON_ONCE(work->worker != worker); work 861 kernel/kthread.c WARN_ON_ONCE(list_empty(&work->node)); work 862 kernel/kthread.c list_del_init(&work->node); work 863 kernel/kthread.c kthread_insert_work(worker, work, &worker->work_list); work 874 kernel/kthread.c struct kthread_work *work = &dwork->work; work 885 kernel/kthread.c kthread_insert_work(worker, work, &worker->work_list); work 890 kernel/kthread.c kthread_insert_work_sanity_check(worker, work); work 892 kernel/kthread.c list_add(&work->node, &worker->delayed_work_list); work 893 kernel/kthread.c work->worker = worker; work 917 kernel/kthread.c struct kthread_work *work = &dwork->work; work 923 kernel/kthread.c if (!queuing_blocked(worker, work)) { work 934 kernel/kthread.c struct kthread_work work; work 938 kernel/kthread.c static void kthread_flush_work_fn(struct kthread_work *work) work 941 kernel/kthread.c container_of(work, struct kthread_flush_work, work); work 951 kernel/kthread.c void kthread_flush_work(struct kthread_work *work) work 954 kernel/kthread.c KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), work 960 kernel/kthread.c worker = work->worker; work 966 kernel/kthread.c WARN_ON_ONCE(work->worker != worker); work 968 kernel/kthread.c if (!list_empty(&work->node)) work 969 kernel/kthread.c kthread_insert_work(worker, &fwork.work, work->node.next); work 970 kernel/kthread.c else if (worker->current_work == work) work 971 kernel/kthread.c kthread_insert_work(worker, &fwork.work, work 993 kernel/kthread.c static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork, work 999 kernel/kthread.c container_of(work, struct kthread_delayed_work, work); work 1000 kernel/kthread.c struct kthread_worker *worker = work->worker; work 1008 kernel/kthread.c work->canceling++; work 1012 kernel/kthread.c work->canceling--; work 1019 kernel/kthread.c if (!list_empty(&work->node)) { work 1020 kernel/kthread.c list_del_init(&work->node); work 1054 kernel/kthread.c struct kthread_work *work = &dwork->work; work 1061 kernel/kthread.c if (!work->worker) work 1065 kernel/kthread.c WARN_ON_ONCE(work->worker != worker); work 1068 kernel/kthread.c if (work->canceling) work 1071 kernel/kthread.c ret = __kthread_cancel_work(work, true, &flags); work 1080 kernel/kthread.c static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork) work 1082 kernel/kthread.c struct kthread_worker *worker = work->worker; work 1091 kernel/kthread.c WARN_ON_ONCE(work->worker != worker); work 1093 kernel/kthread.c ret = __kthread_cancel_work(work, is_dwork, &flags); work 1095 kernel/kthread.c if (worker->current_work != work) work 1102 kernel/kthread.c work->canceling++; work 1104 kernel/kthread.c kthread_flush_work(work); work 1106 kernel/kthread.c work->canceling--; work 1130 kernel/kthread.c bool kthread_cancel_work_sync(struct kthread_work *work) work 1132 kernel/kthread.c return __kthread_cancel_work_sync(work, false); work 1147 kernel/kthread.c return __kthread_cancel_work_sync(&dwork->work, true); work 1161 kernel/kthread.c KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), work 1165 kernel/kthread.c kthread_queue_work(worker, &fwork.work); work 672 kernel/livepatch/core.c static void klp_free_patch_work_fn(struct work_struct *work) work 675 kernel/livepatch/core.c container_of(work, struct klp_patch, free_work); work 32 kernel/livepatch/transition.c static void klp_transition_work_fn(struct work_struct *work) work 48 kernel/livepatch/transition.c static void klp_sync(struct work_struct *work) work 20 kernel/locking/test-ww_mutex.c struct work_struct work; work 31 kernel/locking/test-ww_mutex.c static void test_mutex_work(struct work_struct *work) work 33 kernel/locking/test-ww_mutex.c struct test_mutex *mtx = container_of(work, typeof(*mtx), work); work 58 kernel/locking/test-ww_mutex.c INIT_WORK_ONSTACK(&mtx.work, test_mutex_work); work 64 kernel/locking/test-ww_mutex.c schedule_work(&mtx.work); work 92 kernel/locking/test-ww_mutex.c flush_work(&mtx.work); work 93 kernel/locking/test-ww_mutex.c destroy_work_on_stack(&mtx.work); work 148 kernel/locking/test-ww_mutex.c struct work_struct work; work 157 kernel/locking/test-ww_mutex.c static void test_abba_work(struct work_struct *work) work 159 kernel/locking/test-ww_mutex.c struct test_abba *abba = container_of(work, typeof(*abba), work); work 192 kernel/locking/test-ww_mutex.c INIT_WORK_ONSTACK(&abba.work, test_abba_work); work 197 kernel/locking/test-ww_mutex.c schedule_work(&abba.work); work 217 kernel/locking/test-ww_mutex.c flush_work(&abba.work); work 218 kernel/locking/test-ww_mutex.c destroy_work_on_stack(&abba.work); work 238 kernel/locking/test-ww_mutex.c struct work_struct work; work 246 kernel/locking/test-ww_mutex.c static void test_cycle_work(struct work_struct *work) work 248 kernel/locking/test-ww_mutex.c struct test_cycle *cycle = container_of(work, typeof(*cycle), work); work 300 kernel/locking/test-ww_mutex.c INIT_WORK(&cycle->work, test_cycle_work); work 305 kernel/locking/test-ww_mutex.c queue_work(wq, &cycles[n].work); work 343 kernel/locking/test-ww_mutex.c struct work_struct work; work 378 kernel/locking/test-ww_mutex.c static void stress_inorder_work(struct work_struct *work) work 380 kernel/locking/test-ww_mutex.c struct stress *stress = container_of(work, typeof(*stress), work); work 437 kernel/locking/test-ww_mutex.c static void stress_reorder_work(struct work_struct *work) work 439 kernel/locking/test-ww_mutex.c struct stress *stress = container_of(work, typeof(*stress), work); work 497 kernel/locking/test-ww_mutex.c static void stress_one_work(struct work_struct *work) work 499 kernel/locking/test-ww_mutex.c struct stress *stress = container_of(work, typeof(*stress), work); work 538 kernel/locking/test-ww_mutex.c void (*fn)(struct work_struct *work); work 563 kernel/locking/test-ww_mutex.c INIT_WORK(&stress->work, fn); work 568 kernel/locking/test-ww_mutex.c queue_work(wq, &stress->work); work 69 kernel/padata.c struct padata_parallel_queue, work); work 154 kernel/padata.c queue_work(pinst->parallel_wq, &queue->work); work 253 kernel/padata.c queue_work_on(cb_cpu, pinst->serial_wq, &squeue->work); work 274 kernel/padata.c static void invoke_padata_reorder(struct work_struct *work) work 279 kernel/padata.c pd = container_of(work, struct parallel_data, reorder_work); work 292 kernel/padata.c squeue = container_of(serial_work, struct padata_serial_queue, work); work 408 kernel/padata.c INIT_WORK(&squeue->work, padata_serial_worker); work 423 kernel/padata.c INIT_WORK(&pqueue->work, padata_parallel_worker); work 62 kernel/pid_namespace.c static void proc_cleanup_work(struct work_struct *work) work 64 kernel/pid_namespace.c struct pid_namespace *ns = container_of(work, struct pid_namespace, proc_work); work 27 kernel/power/autosleep.c static void try_to_suspend(struct work_struct *work) work 367 kernel/power/qos.c static void pm_qos_work_fn(struct work_struct *work) work 369 kernel/power/qos.c struct pm_qos_request *req = container_of(to_delayed_work(work), work 371 kernel/power/qos.c work); work 400 kernel/power/qos.c INIT_DELAYED_WORK(&req->work, pm_qos_work_fn); work 428 kernel/power/qos.c cancel_delayed_work_sync(&req->work); work 450 kernel/power/qos.c cancel_delayed_work_sync(&req->work); work 459 kernel/power/qos.c schedule_delayed_work(&req->work, usecs_to_jiffies(timeout_us)); work 481 kernel/power/qos.c cancel_delayed_work_sync(&req->work); work 88 kernel/power/wakelock.c static void __wakelocks_gc(struct work_struct *work); work 103 kernel/power/wakelock.c static void __wakelocks_gc(struct work_struct *work) work 39 kernel/printk/printk_safe.c struct irq_work work; /* IRQ work that flushes the buffer */ work 54 kernel/printk/printk_safe.c irq_work_queue(&s->work); work 179 kernel/printk/printk_safe.c static void __printk_safe_flush(struct irq_work *work) work 184 kernel/printk/printk_safe.c container_of(work, struct printk_safe_seq_buf, work); work 249 kernel/printk/printk_safe.c __printk_safe_flush(&per_cpu(nmi_print_seq, cpu).work); work 251 kernel/printk/printk_safe.c __printk_safe_flush(&per_cpu(safe_print_seq, cpu).work); work 396 kernel/printk/printk_safe.c init_irq_work(&s->work, __printk_safe_flush); work 400 kernel/printk/printk_safe.c init_irq_work(&s->work, __printk_safe_flush); work 45 kernel/rcu/srcutree.c static void srcu_invoke_callbacks(struct work_struct *work); work 47 kernel/rcu/srcutree.c static void process_srcu(struct work_struct *work); work 147 kernel/rcu/srcutree.c INIT_WORK(&sdp->work, srcu_invoke_callbacks); work 177 kernel/rcu/srcutree.c INIT_DELAYED_WORK(&ssp->work, process_srcu); work 378 kernel/rcu/srcutree.c flush_delayed_work(&ssp->work); work 383 kernel/rcu/srcutree.c flush_work(&sdp->work); work 462 kernel/rcu/srcutree.c queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work); work 469 kernel/rcu/srcutree.c queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work); work 685 kernel/rcu/srcutree.c queue_delayed_work(rcu_gp_wq, &ssp->work, work 687 kernel/rcu/srcutree.c else if (list_empty(&ssp->work.work.entry)) work 688 kernel/rcu/srcutree.c list_add(&ssp->work.work.entry, &srcu_boot_list); work 1157 kernel/rcu/srcutree.c static void srcu_invoke_callbacks(struct work_struct *work) work 1165 kernel/rcu/srcutree.c sdp = container_of(work, struct srcu_data, work); work 1226 kernel/rcu/srcutree.c queue_delayed_work(rcu_gp_wq, &ssp->work, delay); work 1232 kernel/rcu/srcutree.c static void process_srcu(struct work_struct *work) work 1236 kernel/rcu/srcutree.c ssp = container_of(work, struct srcu_struct, work.work); work 1309 kernel/rcu/srcutree.c work.work.entry); work 1311 kernel/rcu/srcutree.c list_del_init(&ssp->work.work.entry); work 1312 kernel/rcu/srcutree.c queue_work(rcu_gp_wq, &ssp->work.work); work 2444 kernel/rcu/tree.c char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work); work 2452 kernel/rcu/tree.c work = *workp; work 2455 kernel/rcu/tree.c if (work) work 480 kernel/reboot.c static void poweroff_work_func(struct work_struct *work) work 502 kernel/reboot.c static void reboot_work_func(struct work_struct *work) work 336 kernel/relay.c static void wakeup_readers(struct irq_work *work) work 340 kernel/relay.c buf = container_of(work, struct rchan_buf, wakeup_work); work 154 kernel/sched/clock.c static void __sched_clock_work(struct work_struct *work) work 3616 kernel/sched/core.c struct delayed_work work; work 3648 kernel/sched/core.c static void sched_tick_remote(struct work_struct *work) work 3650 kernel/sched/core.c struct delayed_work *dwork = to_delayed_work(work); work 3651 kernel/sched/core.c struct tick_work *twork = container_of(dwork, struct tick_work, work); work 3718 kernel/sched/core.c INIT_DELAYED_WORK(&twork->work, sched_tick_remote); work 3719 kernel/sched/core.c queue_delayed_work(system_unbound_wq, &twork->work, HZ); work 37 kernel/sched/cpufreq_schedutil.c struct kthread_work work; work 549 kernel/sched/cpufreq_schedutil.c static void sugov_work(struct kthread_work *work) work 551 kernel/sched/cpufreq_schedutil.c struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work); work 581 kernel/sched/cpufreq_schedutil.c kthread_queue_work(&sg_policy->worker, &sg_policy->work); work 678 kernel/sched/cpufreq_schedutil.c kthread_init_work(&sg_policy->work, sugov_work); work 877 kernel/sched/cpufreq_schedutil.c kthread_cancel_work_sync(&sg_policy->work); work 922 kernel/sched/cpufreq_schedutil.c static void rebuild_sd_workfn(struct work_struct *work) work 2484 kernel/sched/fair.c static void task_numa_work(struct callback_head *work) work 2495 kernel/sched/fair.c SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work)); work 2497 kernel/sched/fair.c work->next = work; work 2675 kernel/sched/fair.c struct callback_head *work = &curr->numa_work; work 2681 kernel/sched/fair.c if ((curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work) work 2699 kernel/sched/fair.c task_work_add(curr, work, true); work 180 kernel/sched/psi.c static void psi_avgs_work(struct work_struct *work); work 410 kernel/sched/psi.c static void psi_avgs_work(struct work_struct *work) work 418 kernel/sched/psi.c dwork = to_delayed_work(work); work 579 kernel/sched/psi.c static void psi_poll_work(struct kthread_work *work) work 586 kernel/sched/psi.c dwork = container_of(work, struct kthread_delayed_work, work); work 2013 kernel/sched/rt.c void rto_push_irq_work_func(struct irq_work *work) work 2016 kernel/sched/rt.c container_of(work, struct root_domain, rto_push_work); work 800 kernel/sched/sched.h extern void rto_push_irq_work_func(struct irq_work *work); work 778 kernel/smp.c struct work_struct work; work 786 kernel/smp.c static void smp_call_on_cpu_callback(struct work_struct *work) work 790 kernel/smp.c sscs = container_of(work, struct smp_call_on_cpu_struct, work); work 809 kernel/smp.c INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback); work 814 kernel/smp.c queue_work_on(cpu, system_wq, &sscs.work); work 69 kernel/stop_machine.c struct cpu_stop_work *work, work 72 kernel/stop_machine.c list_add_tail(&work->list, &stopper->works); work 77 kernel/stop_machine.c static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) work 88 kernel/stop_machine.c __cpu_stop_queue_work(stopper, work, &wakeq); work 89 kernel/stop_machine.c else if (work->done) work 90 kernel/stop_machine.c cpu_stop_signal_done(work->done); work 126 kernel/stop_machine.c struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done }; work 129 kernel/stop_machine.c if (!cpu_stop_queue_work(cpu, &work)) work 377 kernel/stop_machine.c struct cpu_stop_work *work; work 390 kernel/stop_machine.c work = &per_cpu(cpu_stopper.stop_work, cpu); work 391 kernel/stop_machine.c work->fn = fn; work 392 kernel/stop_machine.c work->arg = arg; work 393 kernel/stop_machine.c work->done = done; work 394 kernel/stop_machine.c if (cpu_stop_queue_work(cpu, work)) work 500 kernel/stop_machine.c struct cpu_stop_work *work; work 503 kernel/stop_machine.c work = NULL; work 506 kernel/stop_machine.c work = list_first_entry(&stopper->works, work 508 kernel/stop_machine.c list_del_init(&work->list); work 512 kernel/stop_machine.c if (work) { work 513 kernel/stop_machine.c cpu_stop_fn_t fn = work->fn; work 514 kernel/stop_machine.c void *arg = work->arg; work 515 kernel/stop_machine.c struct cpu_stop_done *done = work->done; work 28 kernel/task_work.c task_work_add(struct task_struct *task, struct callback_head *work, bool notify) work 36 kernel/task_work.c work->next = head; work 37 kernel/task_work.c } while (cmpxchg(&task->task_works, head, work) != head); work 59 kernel/task_work.c struct callback_head *work; work 71 kernel/task_work.c while ((work = READ_ONCE(*pprev))) { work 72 kernel/task_work.c if (work->func != func) work 73 kernel/task_work.c pprev = &work->next; work 74 kernel/task_work.c else if (cmpxchg(pprev, work, work->next) == work) work 79 kernel/task_work.c return work; work 93 kernel/task_work.c struct callback_head *work, *head, *next; work 102 kernel/task_work.c work = READ_ONCE(task->task_works); work 103 kernel/task_work.c head = !work && (task->flags & PF_EXITING) ? work 105 kernel/task_work.c } while (cmpxchg(&task->task_works, work, head) != work); work 108 kernel/task_work.c if (!work) work 112 kernel/task_work.c next = work->next; work 113 kernel/task_work.c work->func(work); work 114 kernel/task_work.c work = next; work 116 kernel/task_work.c } while (work); work 97 kernel/time/clocksource.c static void clocksource_watchdog_work(struct work_struct *work); work 127 kernel/time/clocksource.c static void clocksource_watchdog_work(struct work_struct *work) work 744 kernel/time/hrtimer.c static void clock_was_set_work(struct work_struct *work) work 497 kernel/time/ntp.c static void sync_hw_clock(struct work_struct *work); work 614 kernel/time/ntp.c static void sync_hw_clock(struct work_struct *work) work 230 kernel/time/tick-sched.c static void nohz_full_kick_func(struct irq_work *work) work 219 kernel/time/timer.c static void timer_update_keys(struct work_struct *work); work 238 kernel/time/timer.c static void timer_update_keys(struct work_struct *work) work 631 kernel/trace/bpf_trace.c struct send_signal_irq_work *work; work 633 kernel/trace/bpf_trace.c work = container_of(entry, struct send_signal_irq_work, irq_work); work 634 kernel/trace/bpf_trace.c group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, PIDTYPE_TGID); work 639 kernel/trace/bpf_trace.c struct send_signal_irq_work *work = NULL; work 660 kernel/trace/bpf_trace.c work = this_cpu_ptr(&send_signal_work); work 661 kernel/trace/bpf_trace.c if (work->irq_work.flags & IRQ_WORK_BUSY) work 668 kernel/trace/bpf_trace.c work->task = current; work 669 kernel/trace/bpf_trace.c work->sig = sig; work 670 kernel/trace/bpf_trace.c irq_work_queue(&work->irq_work); work 1461 kernel/trace/bpf_trace.c struct send_signal_irq_work *work; work 1464 kernel/trace/bpf_trace.c work = per_cpu_ptr(&send_signal_work, cpu); work 1465 kernel/trace/bpf_trace.c init_irq_work(&work->irq_work, do_bpf_send_signal); work 155 kernel/trace/ftrace.c static void ftrace_sync(struct work_struct *work) work 31 kernel/trace/ring_buffer.c static void update_pages_handler(struct work_struct *work); work 404 kernel/trace/ring_buffer.c struct irq_work work; work 555 kernel/trace/ring_buffer.c static void rb_wake_up_waiters(struct irq_work *work) work 557 kernel/trace/ring_buffer.c struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work); work 580 kernel/trace/ring_buffer.c struct rb_irq_work *work; work 589 kernel/trace/ring_buffer.c work = &buffer->irq_work; work 596 kernel/trace/ring_buffer.c work = &cpu_buffer->irq_work; work 602 kernel/trace/ring_buffer.c prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE); work 604 kernel/trace/ring_buffer.c prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); work 627 kernel/trace/ring_buffer.c work->full_waiters_pending = true; work 629 kernel/trace/ring_buffer.c work->waiters_pending = true; work 666 kernel/trace/ring_buffer.c finish_wait(&work->full_waiters, &wait); work 668 kernel/trace/ring_buffer.c finish_wait(&work->waiters, &wait); work 691 kernel/trace/ring_buffer.c struct rb_irq_work *work; work 694 kernel/trace/ring_buffer.c work = &buffer->irq_work; work 700 kernel/trace/ring_buffer.c work = &cpu_buffer->irq_work; work 703 kernel/trace/ring_buffer.c poll_wait(filp, &work->waiters, poll_table); work 704 kernel/trace/ring_buffer.c work->waiters_pending = true; work 1305 kernel/trace/ring_buffer.c init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters); work 1400 kernel/trace/ring_buffer.c init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters); work 1697 kernel/trace/ring_buffer.c static void update_pages_handler(struct work_struct *work) work 1699 kernel/trace/ring_buffer.c struct ring_buffer_per_cpu *cpu_buffer = container_of(work, work 2621 kernel/trace/ring_buffer.c irq_work_queue(&buffer->irq_work.work); work 2627 kernel/trace/ring_buffer.c irq_work_queue(&cpu_buffer->irq_work.work); work 2650 kernel/trace/ring_buffer.c irq_work_queue(&cpu_buffer->irq_work.work); work 181 kernel/umh.c static void call_usermodehelper_exec_work(struct work_struct *work) work 184 kernel/umh.c container_of(work, struct subprocess_info, work); work 390 kernel/umh.c INIT_WORK(&sub_info->work, call_usermodehelper_exec_work); work 426 kernel/umh.c INIT_WORK(&sub_info->work, call_usermodehelper_exec_work); work 590 kernel/umh.c queue_work(system_unbound_wq, &sub_info->work); work 30 kernel/user_namespace.c static void free_user_ns(struct work_struct *work); work 120 kernel/user_namespace.c INIT_WORK(&ns->work, free_user_ns); work 174 kernel/user_namespace.c static void free_user_ns(struct work_struct *work) work 177 kernel/user_namespace.c container_of(work, struct user_namespace, work); work 205 kernel/user_namespace.c schedule_work(&ns->work); work 444 kernel/workqueue.c struct work_struct *work = addr; work 446 kernel/workqueue.c return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work)); work 455 kernel/workqueue.c struct work_struct *work = addr; work 459 kernel/workqueue.c cancel_work_sync(work); work 460 kernel/workqueue.c debug_object_init(work, &work_debug_descr); work 473 kernel/workqueue.c struct work_struct *work = addr; work 477 kernel/workqueue.c cancel_work_sync(work); work 478 kernel/workqueue.c debug_object_free(work, &work_debug_descr); work 493 kernel/workqueue.c static inline void debug_work_activate(struct work_struct *work) work 495 kernel/workqueue.c debug_object_activate(work, &work_debug_descr); work 498 kernel/workqueue.c static inline void debug_work_deactivate(struct work_struct *work) work 500 kernel/workqueue.c debug_object_deactivate(work, &work_debug_descr); work 503 kernel/workqueue.c void __init_work(struct work_struct *work, int onstack) work 506 kernel/workqueue.c debug_object_init_on_stack(work, &work_debug_descr); work 508 kernel/workqueue.c debug_object_init(work, &work_debug_descr); work 512 kernel/workqueue.c void destroy_work_on_stack(struct work_struct *work) work 514 kernel/workqueue.c debug_object_free(work, &work_debug_descr); work 518 kernel/workqueue.c void destroy_delayed_work_on_stack(struct delayed_work *work) work 520 kernel/workqueue.c destroy_timer_on_stack(&work->timer); work 521 kernel/workqueue.c debug_object_free(&work->work, &work_debug_descr); work 526 kernel/workqueue.c static inline void debug_work_activate(struct work_struct *work) { } work 527 kernel/workqueue.c static inline void debug_work_deactivate(struct work_struct *work) { } work 586 kernel/workqueue.c static int get_work_color(struct work_struct *work) work 588 kernel/workqueue.c return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) & work 617 kernel/workqueue.c static inline void set_work_data(struct work_struct *work, unsigned long data, work 620 kernel/workqueue.c WARN_ON_ONCE(!work_pending(work)); work 621 kernel/workqueue.c atomic_long_set(&work->data, data | flags | work_static(work)); work 624 kernel/workqueue.c static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq, work 627 kernel/workqueue.c set_work_data(work, (unsigned long)pwq, work 631 kernel/workqueue.c static void set_work_pool_and_keep_pending(struct work_struct *work, work 634 kernel/workqueue.c set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, work 638 kernel/workqueue.c static void set_work_pool_and_clear_pending(struct work_struct *work, work 648 kernel/workqueue.c set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); work 680 kernel/workqueue.c static void clear_work_data(struct work_struct *work) work 683 kernel/workqueue.c set_work_data(work, WORK_STRUCT_NO_POOL, 0); work 686 kernel/workqueue.c static struct pool_workqueue *get_work_pwq(struct work_struct *work) work 688 kernel/workqueue.c unsigned long data = atomic_long_read(&work->data); work 711 kernel/workqueue.c static struct worker_pool *get_work_pool(struct work_struct *work) work 713 kernel/workqueue.c unsigned long data = atomic_long_read(&work->data); work 736 kernel/workqueue.c static int get_work_pool_id(struct work_struct *work) work 738 kernel/workqueue.c unsigned long data = atomic_long_read(&work->data); work 747 kernel/workqueue.c static void mark_work_canceling(struct work_struct *work) work 749 kernel/workqueue.c unsigned long pool_id = get_work_pool_id(work); work 752 kernel/workqueue.c set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING); work 755 kernel/workqueue.c static bool work_is_canceling(struct work_struct *work) work 757 kernel/workqueue.c unsigned long data = atomic_long_read(&work->data); work 1029 kernel/workqueue.c struct work_struct *work) work 1034 kernel/workqueue.c (unsigned long)work) work 1035 kernel/workqueue.c if (worker->current_work == work && work 1036 kernel/workqueue.c worker->current_func == work->func) work 1059 kernel/workqueue.c static void move_linked_works(struct work_struct *work, struct list_head *head, work 1068 kernel/workqueue.c list_for_each_entry_safe_from(work, n, NULL, entry) { work 1069 kernel/workqueue.c list_move_tail(&work->entry, head); work 1070 kernel/workqueue.c if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) work 1141 kernel/workqueue.c static void pwq_activate_delayed_work(struct work_struct *work) work 1143 kernel/workqueue.c struct pool_workqueue *pwq = get_work_pwq(work); work 1145 kernel/workqueue.c trace_workqueue_activate_work(work); work 1148 kernel/workqueue.c move_linked_works(work, &pwq->pool->worklist, NULL); work 1149 kernel/workqueue.c __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); work 1155 kernel/workqueue.c struct work_struct *work = list_first_entry(&pwq->delayed_works, work 1158 kernel/workqueue.c pwq_activate_delayed_work(work); work 1235 kernel/workqueue.c static int try_to_grab_pending(struct work_struct *work, bool is_dwork, work 1245 kernel/workqueue.c struct delayed_work *dwork = to_delayed_work(work); work 1257 kernel/workqueue.c if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) work 1265 kernel/workqueue.c pool = get_work_pool(work); work 1278 kernel/workqueue.c pwq = get_work_pwq(work); work 1280 kernel/workqueue.c debug_work_deactivate(work); work 1289 kernel/workqueue.c if (*work_data_bits(work) & WORK_STRUCT_DELAYED) work 1290 kernel/workqueue.c pwq_activate_delayed_work(work); work 1292 kernel/workqueue.c list_del_init(&work->entry); work 1293 kernel/workqueue.c pwq_dec_nr_in_flight(pwq, get_work_color(work)); work 1296 kernel/workqueue.c set_work_pool_and_keep_pending(work, pool->id); work 1306 kernel/workqueue.c if (work_is_canceling(work)) work 1325 kernel/workqueue.c static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, work 1331 kernel/workqueue.c set_work_pwq(work, pwq, extra_flags); work 1332 kernel/workqueue.c list_add_tail(&work->entry, head); work 1396 kernel/workqueue.c struct work_struct *work) work 1412 kernel/workqueue.c debug_work_activate(work); work 1436 kernel/workqueue.c last_pool = get_work_pool(work); work 1442 kernel/workqueue.c worker = find_worker_executing_work(last_pool, work); work 1475 kernel/workqueue.c trace_workqueue_queue_work(req_cpu, pwq, work); work 1477 kernel/workqueue.c if (WARN_ON(!list_empty(&work->entry))) work 1484 kernel/workqueue.c trace_workqueue_activate_work(work); work 1494 kernel/workqueue.c insert_work(pwq, work, worklist, work_flags); work 1513 kernel/workqueue.c struct work_struct *work) work 1520 kernel/workqueue.c if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { work 1521 kernel/workqueue.c __queue_work(cpu, wq, work); work 1584 kernel/workqueue.c struct work_struct *work) work 1602 kernel/workqueue.c if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { work 1605 kernel/workqueue.c __queue_work(cpu, wq, work); work 1619 kernel/workqueue.c __queue_work(dwork->cpu, dwork->wq, &dwork->work); work 1627 kernel/workqueue.c struct work_struct *work = &dwork->work; work 1632 kernel/workqueue.c WARN_ON_ONCE(!list_empty(&work->entry)); work 1641 kernel/workqueue.c __queue_work(cpu, wq, &dwork->work); work 1669 kernel/workqueue.c struct work_struct *work = &dwork->work; work 1676 kernel/workqueue.c if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { work 1711 kernel/workqueue.c ret = try_to_grab_pending(&dwork->work, true, &flags); work 1730 kernel/workqueue.c __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work); work 1746 kernel/workqueue.c struct work_struct *work = &rwork->work; work 1748 kernel/workqueue.c if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { work 2014 kernel/workqueue.c static void send_mayday(struct work_struct *work) work 2016 kernel/workqueue.c struct pool_workqueue *pwq = get_work_pwq(work); work 2040 kernel/workqueue.c struct work_struct *work; work 2052 kernel/workqueue.c list_for_each_entry(work, &pool->worklist, entry) work 2053 kernel/workqueue.c send_mayday(work); work 2165 kernel/workqueue.c static void process_one_work(struct worker *worker, struct work_struct *work) work 2169 kernel/workqueue.c struct pool_workqueue *pwq = get_work_pwq(work); work 2184 kernel/workqueue.c lockdep_copy_map(&lockdep_map, &work->lockdep_map); work 2196 kernel/workqueue.c collision = find_worker_executing_work(pool, work); work 2198 kernel/workqueue.c move_linked_works(work, &collision->scheduled, NULL); work 2203 kernel/workqueue.c debug_work_deactivate(work); work 2204 kernel/workqueue.c hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); work 2205 kernel/workqueue.c worker->current_work = work; work 2206 kernel/workqueue.c worker->current_func = work->func; work 2208 kernel/workqueue.c work_color = get_work_color(work); work 2216 kernel/workqueue.c list_del_init(&work->entry); work 2243 kernel/workqueue.c set_work_pool_and_clear_pending(work, pool->id); work 2271 kernel/workqueue.c trace_workqueue_execute_start(work); work 2272 kernel/workqueue.c worker->current_func(work); work 2277 kernel/workqueue.c trace_workqueue_execute_end(work); work 2332 kernel/workqueue.c struct work_struct *work = list_first_entry(&worker->scheduled, work 2334 kernel/workqueue.c process_one_work(worker, work); work 2410 kernel/workqueue.c struct work_struct *work = work 2416 kernel/workqueue.c if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) { work 2418 kernel/workqueue.c process_one_work(worker, work); work 2422 kernel/workqueue.c move_linked_works(work, &worker->scheduled, NULL); work 2498 kernel/workqueue.c struct work_struct *work, *n; work 2515 kernel/workqueue.c list_for_each_entry_safe(work, n, &pool->worklist, entry) { work 2516 kernel/workqueue.c if (get_work_pwq(work) == pwq) { work 2519 kernel/workqueue.c move_linked_works(work, scheduled, &n); work 2618 kernel/workqueue.c struct work_struct work; work 2623 kernel/workqueue.c static void wq_barrier_func(struct work_struct *work) work 2625 kernel/workqueue.c struct wq_barrier *barr = container_of(work, struct wq_barrier, work); work 2666 kernel/workqueue.c INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); work 2667 kernel/workqueue.c __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); work 2688 kernel/workqueue.c debug_work_activate(&barr->work); work 2689 kernel/workqueue.c insert_work(pwq, &barr->work, head, work 2977 kernel/workqueue.c static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, work 2987 kernel/workqueue.c pool = get_work_pool(work); work 2995 kernel/workqueue.c pwq = get_work_pwq(work); work 3000 kernel/workqueue.c worker = find_worker_executing_work(pool, work); work 3006 kernel/workqueue.c check_flush_dependency(pwq->wq, work); work 3008 kernel/workqueue.c insert_wq_barrier(pwq, barr, work, worker); work 3033 kernel/workqueue.c static bool __flush_work(struct work_struct *work, bool from_cancel) work 3040 kernel/workqueue.c if (WARN_ON(!work->func)) work 3044 kernel/workqueue.c lock_map_acquire(&work->lockdep_map); work 3045 kernel/workqueue.c lock_map_release(&work->lockdep_map); work 3048 kernel/workqueue.c if (start_flush_work(work, &barr, from_cancel)) { work 3050 kernel/workqueue.c destroy_work_on_stack(&barr.work); work 3068 kernel/workqueue.c bool flush_work(struct work_struct *work) work 3070 kernel/workqueue.c return __flush_work(work, false); work 3076 kernel/workqueue.c struct work_struct *work; work 3083 kernel/workqueue.c if (cwait->work != key) work 3088 kernel/workqueue.c static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) work 3095 kernel/workqueue.c ret = try_to_grab_pending(work, is_dwork, &flags); work 3117 kernel/workqueue.c cwait.work = work; work 3121 kernel/workqueue.c if (work_is_canceling(work)) work 3128 kernel/workqueue.c mark_work_canceling(work); work 3136 kernel/workqueue.c __flush_work(work, true); work 3138 kernel/workqueue.c clear_work_data(work); work 3147 kernel/workqueue.c __wake_up(&cancel_waitq, TASK_NORMAL, 1, work); work 3170 kernel/workqueue.c bool cancel_work_sync(struct work_struct *work) work 3172 kernel/workqueue.c return __cancel_work_timer(work, false); work 3192 kernel/workqueue.c __queue_work(dwork->cpu, dwork->wq, &dwork->work); work 3194 kernel/workqueue.c return flush_work(&dwork->work); work 3208 kernel/workqueue.c if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) { work 3210 kernel/workqueue.c flush_work(&rwork->work); work 3213 kernel/workqueue.c return flush_work(&rwork->work); work 3218 kernel/workqueue.c static bool __cancel_work(struct work_struct *work, bool is_dwork) work 3224 kernel/workqueue.c ret = try_to_grab_pending(work, is_dwork, &flags); work 3230 kernel/workqueue.c set_work_pool_and_clear_pending(work, get_work_pool_id(work)); work 3253 kernel/workqueue.c return __cancel_work(&dwork->work, true); work 3268 kernel/workqueue.c return __cancel_work_timer(&dwork->work, true); work 3295 kernel/workqueue.c struct work_struct *work = per_cpu_ptr(works, cpu); work 3297 kernel/workqueue.c INIT_WORK(work, func); work 3298 kernel/workqueue.c schedule_work_on(cpu, work); work 3324 kernel/workqueue.c fn(&ew->work); work 3328 kernel/workqueue.c INIT_WORK(&ew->work, fn); work 3329 kernel/workqueue.c schedule_work(&ew->work); work 3657 kernel/workqueue.c static void pwq_unbound_release_workfn(struct work_struct *work) work 3659 kernel/workqueue.c struct pool_workqueue *pwq = container_of(work, struct pool_workqueue, work 4537 kernel/workqueue.c unsigned int work_busy(struct work_struct *work) work 4543 kernel/workqueue.c if (work_pending(work)) work 4547 kernel/workqueue.c pool = get_work_pool(work); work 4550 kernel/workqueue.c if (find_worker_executing_work(pool, work)) work 4640 kernel/workqueue.c static void pr_cont_work(bool comma, struct work_struct *work) work 4642 kernel/workqueue.c if (work->func == wq_barrier_func) { work 4645 kernel/workqueue.c barr = container_of(work, struct wq_barrier, work); work 4650 kernel/workqueue.c pr_cont("%s %ps", comma ? "," : "", work->func); work 4657 kernel/workqueue.c struct work_struct *work; work 4687 kernel/workqueue.c list_for_each_entry(work, &worker->scheduled, entry) work 4688 kernel/workqueue.c pr_cont_work(false, work); work 4694 kernel/workqueue.c list_for_each_entry(work, &pool->worklist, entry) { work 4695 kernel/workqueue.c if (get_work_pwq(work) == pwq) { work 4704 kernel/workqueue.c list_for_each_entry(work, &pool->worklist, entry) { work 4705 kernel/workqueue.c if (get_work_pwq(work) != pwq) work 4708 kernel/workqueue.c pr_cont_work(comma, work); work 4709 kernel/workqueue.c comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); work 4718 kernel/workqueue.c list_for_each_entry(work, &pwq->delayed_works, entry) { work 4719 kernel/workqueue.c pr_cont_work(comma, work); work 4720 kernel/workqueue.c comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); work 5069 kernel/workqueue.c struct work_struct work; work 5075 kernel/workqueue.c static void work_for_cpu_fn(struct work_struct *work) work 5077 kernel/workqueue.c struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work); work 5097 kernel/workqueue.c INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); work 5098 kernel/workqueue.c schedule_work_on(cpu, &wfc.work); work 5099 kernel/workqueue.c flush_work(&wfc.work); work 5100 kernel/workqueue.c destroy_work_on_stack(&wfc.work); work 102 lib/debugobjects.c static void free_obj_work(struct work_struct *work); work 291 lib/debugobjects.c static void free_obj_work(struct work_struct *work) work 347 lib/debugobjects.c bool work; work 377 lib/debugobjects.c work = (obj_pool_free > debug_objects_pool_size) && obj_cache && work 381 lib/debugobjects.c if (work) { work 233 lib/dim/net_dim.c schedule_work(&dim->work); work 94 lib/dim/rdma_dim.c schedule_work(&dim->work); work 88 lib/irq_poll.c int work, weight; work 108 lib/irq_poll.c work = 0; work 110 lib/irq_poll.c work = iop->poll(iop, weight); work 112 lib/irq_poll.c budget -= work; work 124 lib/irq_poll.c if (work >= weight) { work 704 lib/kobject.c static void kobject_delayed_cleanup(struct work_struct *work) work 706 lib/kobject.c kobject_cleanup(container_of(to_delayed_work(work), work 15 lib/livepatch/test_klp_callbacks_busy.c static void busymod_work_func(struct work_struct *work); work 16 lib/livepatch/test_klp_callbacks_busy.c static DECLARE_DELAYED_WORK(work, busymod_work_func); work 18 lib/livepatch/test_klp_callbacks_busy.c static void busymod_work_func(struct work_struct *work) work 28 lib/livepatch/test_klp_callbacks_busy.c schedule_delayed_work(&work, work 35 lib/livepatch/test_klp_callbacks_busy.c cancel_delayed_work_sync(&work); work 55 lib/livepatch/test_klp_callbacks_demo.c static void patched_work_func(struct work_struct *work) work 8 lib/once.c struct work_struct work; work 14 lib/once.c struct once_work *work; work 16 lib/once.c work = container_of(w, struct once_work, work); work 17 lib/once.c BUG_ON(!static_key_enabled(work->key)); work 18 lib/once.c static_branch_disable(work->key); work 19 lib/once.c kfree(work); work 30 lib/once.c INIT_WORK(&w->work, once_deferred); work 32 lib/once.c schedule_work(&w->work); work 398 lib/rhashtable.c static void rht_deferred_worker(struct work_struct *work) work 404 lib/rhashtable.c ht = container_of(work, struct rhashtable, run_work); work 699 lib/vsprintf.c static void enable_ptr_key_workfn(struct work_struct *work) work 481 lib/zlib_inflate/inflate.c &(state->lenbits), state->work); work 547 lib/zlib_inflate/inflate.c &(state->lenbits), state->work); work 556 lib/zlib_inflate/inflate.c &(state->next), &(state->distbits), state->work); work 108 lib/zlib_inflate/inflate.h unsigned short work[288]; /* work area for code table building */ work 24 lib/zlib_inflate/inftrees.c code **table, unsigned *bits, unsigned short *work) work 133 lib/zlib_inflate/inftrees.c if (lens[sym] != 0) work[offs[lens[sym]]++] = (unsigned short)sym; work 170 lib/zlib_inflate/inftrees.c base = extra = work; /* dummy value--not used */ work 205 lib/zlib_inflate/inftrees.c if ((int)(work[sym]) < end) { work 207 lib/zlib_inflate/inftrees.c this.val = work[sym]; work 209 lib/zlib_inflate/inftrees.c else if ((int)(work[sym]) > end) { work 210 lib/zlib_inflate/inftrees.c this.op = (unsigned char)(extra[work[sym]]); work 211 lib/zlib_inflate/inftrees.c this.val = base[work[sym]]; work 242 lib/zlib_inflate/inftrees.c len = lens[work[sym]]; work 58 lib/zlib_inflate/inftrees.h unsigned *bits, unsigned short *work); work 481 mm/backing-dev.c static void cgwb_release_workfn(struct work_struct *work) work 483 mm/backing-dev.c struct bdi_writeback *wb = container_of(work, struct bdi_writeback, work 1334 mm/hugetlb.c static void free_hpage_workfn(struct work_struct *work) work 1862 mm/kmemleak.c static void kmemleak_do_cleanup(struct work_struct *work) work 2165 mm/memcontrol.c struct work_struct work; work 2297 mm/memcontrol.c drain_local_stock(&stock->work); work 2299 mm/memcontrol.c schedule_work_on(cpu, &stock->work); work 2366 mm/memcontrol.c static void high_work_func(struct work_struct *work) work 2370 mm/memcontrol.c memcg = container_of(work, struct mem_cgroup, high_work); work 2870 mm/memcontrol.c struct work_struct work; work 2876 mm/memcontrol.c container_of(w, struct memcg_kmem_cache_create_work, work); work 2903 mm/memcontrol.c INIT_WORK(&cw->work, memcg_kmem_cache_create_func); work 2905 mm/memcontrol.c queue_work(memcg_kmem_cache_wq, &cw->work); work 4621 mm/memcontrol.c static void memcg_event_remove(struct work_struct *work) work 4624 mm/memcontrol.c container_of(work, struct mem_cgroup_event, remove); work 6987 mm/memcontrol.c INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, work 1436 mm/memory-failure.c struct work_struct work; work 1469 mm/memory-failure.c schedule_work_on(smp_processor_id(), &mf_cpu->work); work 1478 mm/memory-failure.c static void memory_failure_work_func(struct work_struct *work) work 1508 mm/memory-failure.c INIT_WORK(&mf_cpu->work, memory_failure_work_func); work 104 mm/page_alloc.c struct work_struct work; work 2864 mm/page_alloc.c static void drain_local_pages_wq(struct work_struct *work) work 2868 mm/page_alloc.c drain = container_of(work, struct pcpu_drain, work); work 2952 mm/page_alloc.c INIT_WORK(&drain->work, drain_local_pages_wq); work 2953 mm/page_alloc.c queue_work_on(cpu, mm_percpu_wq, &drain->work); work 2956 mm/page_alloc.c flush_work(&per_cpu_ptr(&pcpu_drain, cpu)->work); work 187 mm/percpu.c static void pcpu_balance_workfn(struct work_struct *work); work 1832 mm/percpu.c static void pcpu_balance_workfn(struct work_struct *work) work 514 mm/slab.c if (reap_work->work.func == NULL) { work 1083 mm/slab.c per_cpu(slab_reap_work, cpu).work.func = NULL; work 3984 mm/slab.c struct delayed_work *work = to_delayed_work(w); work 4032 mm/slab.c schedule_delayed_work_on(smp_processor_id(), work, work 91 mm/slab.h struct work_struct work; work 45 mm/slab_common.c static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work); work 569 mm/slab_common.c static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work) work 710 mm/slab_common.c static void kmemcg_workfn(struct work_struct *work) work 712 mm/slab_common.c struct kmem_cache *s = container_of(work, struct kmem_cache, work 713 mm/slab_common.c memcg_params.work); work 736 mm/slab_common.c INIT_WORK(&s->memcg_params.work, kmemcg_workfn); work 737 mm/slab_common.c queue_work(memcg_kmem_cache_wq, &s->memcg_params.work); work 756 mm/slab_common.c INIT_WORK(&s->memcg_params.work, kmemcg_workfn); work 757 mm/slab_common.c queue_work(memcg_kmem_cache_wq, &s->memcg_params.work); work 5721 mm/slub.c static void sysfs_slab_remove_workfn(struct work_struct *work) work 5724 mm/slub.c container_of(work, struct kmem_cache, kobj_remove_work); work 726 mm/swap.c struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); work 734 mm/swap.c INIT_WORK(work, lru_add_drain_per_cpu); work 735 mm/swap.c queue_work_on(cpu, mm_percpu_wq, work); work 492 mm/swapfile.c static void swap_discard_work(struct work_struct *work) work 496 mm/swapfile.c si = container_of(work, struct swap_info_struct, discard_work); work 70 mm/vmpressure.c static struct vmpressure *work_to_vmpressure(struct work_struct *work) work 72 mm/vmpressure.c return container_of(work, struct vmpressure, work); work 181 mm/vmpressure.c static void vmpressure_work_fn(struct work_struct *work) work 183 mm/vmpressure.c struct vmpressure *vmpr = work_to_vmpressure(work); work 278 mm/vmpressure.c schedule_work(&vmpr->work); work 454 mm/vmpressure.c INIT_WORK(&vmpr->work, vmpressure_work_fn); work 470 mm/vmpressure.c flush_work(&vmpr->work); work 1753 mm/vmstat.c static void refresh_vm_stats(struct work_struct *work) work 117 mm/z3fold.c struct work_struct work; work 166 mm/z3fold.c struct work_struct work; work 328 mm/z3fold.c INIT_WORK(&zhdr->work, compact_page_work); work 457 mm/z3fold.c queue_work(pool->release_wq, &pool->work); work 492 mm/z3fold.c struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work); work 504 mm/z3fold.c cancel_work_sync(&zhdr->work); work 649 mm/z3fold.c work); work 802 mm/z3fold.c INIT_WORK(&pool->work, free_pages_work); work 924 mm/z3fold.c cancel_work_sync(&zhdr->work); work 1071 mm/z3fold.c queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work); work 1394 mm/z3fold.c if (work_pending(&zhdr->work)) { work 1404 mm/z3fold.c INIT_WORK(&new_zhdr->work, compact_page_work); work 1430 mm/z3fold.c queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work); work 2188 mm/zsmalloc.c static void async_free_zspage(struct work_struct *work) work 2196 mm/zsmalloc.c struct zs_pool *pool = container_of(work, struct zs_pool, work 126 mm/zswap.c struct work_struct work; work 625 mm/zswap.c static void __zswap_pool_release(struct work_struct *work) work 627 mm/zswap.c struct zswap_pool *pool = container_of(work, typeof(*pool), work); work 650 mm/zswap.c INIT_WORK(&pool->work, __zswap_pool_release); work 651 mm/zswap.c schedule_work(&pool->work); work 148 net/9p/trans_fd.c static void p9_poll_workfn(struct work_struct *work); work 275 net/9p/trans_fd.c static void p9_read_work(struct work_struct *work) work 281 net/9p/trans_fd.c m = container_of(work, struct p9_conn, rq); work 436 net/9p/trans_fd.c static void p9_write_work(struct work_struct *work) work 443 net/9p/trans_fd.c m = container_of(work, struct p9_conn, wq); work 1108 net/9p/trans_fd.c static void p9_poll_workfn(struct work_struct *work) work 70 net/9p/trans_xen.c struct work_struct work; work 193 net/9p/trans_xen.c static void p9_xen_response(struct work_struct *work) work 202 net/9p/trans_xen.c ring = container_of(work, struct xen_9pfs_dataring, work); work 263 net/9p/trans_xen.c schedule_work(&ring->work); work 334 net/9p/trans_xen.c INIT_WORK(&ring->work, p9_xen_response); work 1229 net/atm/lec.c static void lec_arp_check_expire(struct work_struct *work); work 1669 net/atm/lec.c static void lec_arp_check_expire(struct work_struct *work) work 1673 net/atm/lec.c container_of(work, struct lec_priv, lec_arp_work.work); work 61 net/batman-adv/bat_iv_ogm.c static void batadv_iv_send_outstanding_bat_ogm_packet(struct work_struct *work); work 1693 net/batman-adv/bat_iv_ogm.c static void batadv_iv_send_outstanding_bat_ogm_packet(struct work_struct *work) work 1700 net/batman-adv/bat_iv_ogm.c delayed_work = to_delayed_work(work); work 167 net/batman-adv/bat_v_elp.c void batadv_v_elp_throughput_metric_update(struct work_struct *work) work 172 net/batman-adv/bat_v_elp.c neigh_bat_v = container_of(work, struct batadv_hardif_neigh_node_bat_v, work 255 net/batman-adv/bat_v_elp.c static void batadv_v_elp_periodic_work(struct work_struct *work) work 266 net/batman-adv/bat_v_elp.c bat_v = container_of(work, struct batadv_hard_iface_bat_v, elp_wq.work); work 22 net/batman-adv/bat_v_elp.h void batadv_v_elp_throughput_metric_update(struct work_struct *work); work 368 net/batman-adv/bat_v_ogm.c static void batadv_v_ogm_send(struct work_struct *work) work 373 net/batman-adv/bat_v_ogm.c bat_v = container_of(work, struct batadv_priv_bat_v, ogm_wq.work); work 387 net/batman-adv/bat_v_ogm.c void batadv_v_ogm_aggr_work(struct work_struct *work) work 392 net/batman-adv/bat_v_ogm.c batv = container_of(work, struct batadv_hard_iface_bat_v, aggr_wq.work); work 18 net/batman-adv/bat_v_ogm.h void batadv_v_ogm_aggr_work(struct work_struct *work); work 54 net/batman-adv/bridge_loop_avoidance.c static void batadv_bla_periodic_work(struct work_struct *work); work 453 net/batman-adv/bridge_loop_avoidance.c static void batadv_bla_loopdetect_report(struct work_struct *work) work 459 net/batman-adv/bridge_loop_avoidance.c backbone_gw = container_of(work, struct batadv_bla_backbone_gw, work 1419 net/batman-adv/bridge_loop_avoidance.c static void batadv_bla_periodic_work(struct work_struct *work) work 1431 net/batman-adv/bridge_loop_avoidance.c delayed_work = to_delayed_work(work); work 1432 net/batman-adv/bridge_loop_avoidance.c priv_bla = container_of(delayed_work, struct batadv_priv_bla, work); work 1504 net/batman-adv/bridge_loop_avoidance.c queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work, work 1571 net/batman-adv/bridge_loop_avoidance.c INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work); work 1573 net/batman-adv/bridge_loop_avoidance.c queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work, work 1735 net/batman-adv/bridge_loop_avoidance.c cancel_delayed_work_sync(&bat_priv->bla.work); work 97 net/batman-adv/distributed-arp-table.c static void batadv_dat_purge(struct work_struct *work); work 105 net/batman-adv/distributed-arp-table.c INIT_DELAYED_WORK(&bat_priv->dat.work, batadv_dat_purge); work 106 net/batman-adv/distributed-arp-table.c queue_delayed_work(batadv_event_workqueue, &bat_priv->dat.work, work 194 net/batman-adv/distributed-arp-table.c static void batadv_dat_purge(struct work_struct *work) work 200 net/batman-adv/distributed-arp-table.c delayed_work = to_delayed_work(work); work 201 net/batman-adv/distributed-arp-table.c priv_dat = container_of(delayed_work, struct batadv_priv_dat, work); work 838 net/batman-adv/distributed-arp-table.c cancel_delayed_work_sync(&bat_priv->dat.work); work 63 net/batman-adv/multicast.c static void batadv_mcast_mla_update(struct work_struct *work); work 71 net/batman-adv/multicast.c queue_delayed_work(batadv_event_workqueue, &bat_priv->mcast.work, work 938 net/batman-adv/multicast.c static void batadv_mcast_mla_update(struct work_struct *work) work 944 net/batman-adv/multicast.c delayed_work = to_delayed_work(work); work 945 net/batman-adv/multicast.c priv_mcast = container_of(delayed_work, struct batadv_priv_mcast, work); work 2046 net/batman-adv/multicast.c INIT_DELAYED_WORK(&bat_priv->mcast.work, batadv_mcast_mla_update); work 2401 net/batman-adv/multicast.c cancel_delayed_work_sync(&bat_priv->mcast.work); work 53 net/batman-adv/network-coding.c static void batadv_nc_worker(struct work_struct *work); work 79 net/batman-adv/network-coding.c queue_delayed_work(batadv_event_workqueue, &bat_priv->nc.work, work 164 net/batman-adv/network-coding.c INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker); work 707 net/batman-adv/network-coding.c static void batadv_nc_worker(struct work_struct *work) work 714 net/batman-adv/network-coding.c delayed_work = to_delayed_work(work); work 715 net/batman-adv/network-coding.c priv_nc = container_of(delayed_work, struct batadv_priv_nc, work); work 1872 net/batman-adv/network-coding.c cancel_delayed_work_sync(&bat_priv->nc.work); work 87 net/batman-adv/originator.c static void batadv_purge_orig(struct work_struct *work); work 1372 net/batman-adv/originator.c static void batadv_purge_orig(struct work_struct *work) work 1377 net/batman-adv/originator.c delayed_work = to_delayed_work(work); work 43 net/batman-adv/send.c static void batadv_send_outstanding_bcast_packet(struct work_struct *work); work 849 net/batman-adv/send.c static void batadv_send_outstanding_bcast_packet(struct work_struct *work) work 865 net/batman-adv/send.c delayed_work = to_delayed_work(work); work 1027 net/batman-adv/sysfs.c static void batadv_store_mesh_iface_work(struct work_struct *work) work 1032 net/batman-adv/sysfs.c store_work = container_of(work, struct batadv_store_mesh_work, work); work 1071 net/batman-adv/sysfs.c INIT_WORK(&store_work->work, batadv_store_mesh_iface_work); work 1076 net/batman-adv/sysfs.c queue_work(batadv_event_workqueue, &store_work->work); work 444 net/batman-adv/tp_meter.c static void batadv_tp_sender_finish(struct work_struct *work) work 449 net/batman-adv/tp_meter.c delayed_work = to_delayed_work(work); work 69 net/batman-adv/translation-table.c static void batadv_tt_purge(struct work_struct *work); work 3791 net/batman-adv/translation-table.c static void batadv_tt_purge(struct work_struct *work) work 3797 net/batman-adv/translation-table.c delayed_work = to_delayed_work(work); work 3798 net/batman-adv/translation-table.c priv_tt = container_of(delayed_work, struct batadv_priv_tt, work); work 3806 net/batman-adv/translation-table.c queue_delayed_work(batadv_event_workqueue, &bat_priv->tt.work, work 3821 net/batman-adv/translation-table.c cancel_delayed_work_sync(&bat_priv->tt.work); work 4417 net/batman-adv/translation-table.c INIT_DELAYED_WORK(&bat_priv->tt.work, batadv_tt_purge); work 4418 net/batman-adv/translation-table.c queue_delayed_work(batadv_event_workqueue, &bat_priv->tt.work, work 1024 net/batman-adv/types.h struct delayed_work work; work 1082 net/batman-adv/types.h struct delayed_work work; work 1181 net/batman-adv/types.h struct delayed_work work; work 1295 net/batman-adv/types.h struct delayed_work work; work 1304 net/batman-adv/types.h struct delayed_work work; work 2470 net/batman-adv/types.h struct work_struct work; work 617 net/bluetooth/6lowpan.c static void do_notify_peers(struct work_struct *work) work 619 net/bluetooth/6lowpan.c struct lowpan_btle_dev *dev = container_of(work, struct lowpan_btle_dev, work 620 net/bluetooth/6lowpan.c notify_peers.work); work 771 net/bluetooth/6lowpan.c static void delete_netdev(struct work_struct *work) work 773 net/bluetooth/6lowpan.c struct lowpan_btle_dev *entry = container_of(work, work 1056 net/bluetooth/6lowpan.c struct work_struct work; work 1060 net/bluetooth/6lowpan.c static void do_enable_set(struct work_struct *work) work 1062 net/bluetooth/6lowpan.c struct set_enable *set_enable = container_of(work, work 1063 net/bluetooth/6lowpan.c struct set_enable, work); work 1092 net/bluetooth/6lowpan.c INIT_WORK(&set_enable->work, do_enable_set); work 1094 net/bluetooth/6lowpan.c schedule_work(&set_enable->work); work 137 net/bluetooth/hci_conn.c static void le_scan_cleanup(struct work_struct *work) work 139 net/bluetooth/hci_conn.c struct hci_conn *conn = container_of(work, struct hci_conn, work 396 net/bluetooth/hci_conn.c static void hci_conn_timeout(struct work_struct *work) work 398 net/bluetooth/hci_conn.c struct hci_conn *conn = container_of(work, struct hci_conn, work 399 net/bluetooth/hci_conn.c disc_work.work); work 427 net/bluetooth/hci_conn.c static void hci_conn_idle(struct work_struct *work) work 429 net/bluetooth/hci_conn.c struct hci_conn *conn = container_of(work, struct hci_conn, work 430 net/bluetooth/hci_conn.c idle_work.work); work 461 net/bluetooth/hci_conn.c static void hci_conn_auto_accept(struct work_struct *work) work 463 net/bluetooth/hci_conn.c struct hci_conn *conn = container_of(work, struct hci_conn, work 464 net/bluetooth/hci_conn.c auto_accept_work.work); work 470 net/bluetooth/hci_conn.c static void le_conn_timeout(struct work_struct *work) work 472 net/bluetooth/hci_conn.c struct hci_conn *conn = container_of(work, struct hci_conn, work 473 net/bluetooth/hci_conn.c le_conn_timeout.work); work 46 net/bluetooth/hci_core.c static void hci_rx_work(struct work_struct *work); work 47 net/bluetooth/hci_core.c static void hci_cmd_work(struct work_struct *work); work 48 net/bluetooth/hci_core.c static void hci_tx_work(struct work_struct *work); work 2175 net/bluetooth/hci_core.c static void hci_power_on(struct work_struct *work) work 2177 net/bluetooth/hci_core.c struct hci_dev *hdev = container_of(work, struct hci_dev, power_on); work 2247 net/bluetooth/hci_core.c static void hci_power_off(struct work_struct *work) work 2249 net/bluetooth/hci_core.c struct hci_dev *hdev = container_of(work, struct hci_dev, work 2250 net/bluetooth/hci_core.c power_off.work); work 2257 net/bluetooth/hci_core.c static void hci_error_reset(struct work_struct *work) work 2259 net/bluetooth/hci_core.c struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset); work 2630 net/bluetooth/hci_core.c static void hci_cmd_timeout(struct work_struct *work) work 2632 net/bluetooth/hci_core.c struct hci_dev *hdev = container_of(work, struct hci_dev, work 2633 net/bluetooth/hci_core.c cmd_timer.work); work 2827 net/bluetooth/hci_core.c static void adv_instance_rpa_expired(struct work_struct *work) work 2829 net/bluetooth/hci_core.c struct adv_info *adv_instance = container_of(work, struct adv_info, work 2830 net/bluetooth/hci_core.c rpa_expired_cb.work); work 4261 net/bluetooth/hci_core.c static void hci_tx_work(struct work_struct *work) work 4263 net/bluetooth/hci_core.c struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work); work 4449 net/bluetooth/hci_core.c static void hci_rx_work(struct work_struct *work) work 4451 net/bluetooth/hci_core.c struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work); work 4511 net/bluetooth/hci_core.c static void hci_cmd_work(struct work_struct *work) work 4513 net/bluetooth/hci_core.c struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work); work 1441 net/bluetooth/hci_request.c static void adv_timeout_expire(struct work_struct *work) work 1443 net/bluetooth/hci_request.c struct hci_dev *hdev = container_of(work, struct hci_dev, work 1444 net/bluetooth/hci_request.c adv_instance_expire.work); work 2042 net/bluetooth/hci_request.c static void scan_update_work(struct work_struct *work) work 2044 net/bluetooth/hci_request.c struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update); work 2080 net/bluetooth/hci_request.c static void connectable_update_work(struct work_struct *work) work 2082 net/bluetooth/hci_request.c struct hci_dev *hdev = container_of(work, struct hci_dev, work 2193 net/bluetooth/hci_request.c static void discoverable_update_work(struct work_struct *work) work 2195 net/bluetooth/hci_request.c struct hci_dev *hdev = container_of(work, struct hci_dev, work 2304 net/bluetooth/hci_request.c static void bg_scan_update(struct work_struct *work) work 2306 net/bluetooth/hci_request.c struct hci_dev *hdev = container_of(work, struct hci_dev, work 2358 net/bluetooth/hci_request.c static void le_scan_disable_work(struct work_struct *work) work 2360 net/bluetooth/hci_request.c struct hci_dev *hdev = container_of(work, struct hci_dev, work 2361 net/bluetooth/hci_request.c le_scan_disable.work); work 2448 net/bluetooth/hci_request.c static void le_scan_restart_work(struct work_struct *work) work 2450 net/bluetooth/hci_request.c struct hci_dev *hdev = container_of(work, struct hci_dev, work 2451 net/bluetooth/hci_request.c le_scan_restart.work); work 2683 net/bluetooth/hci_request.c static void discov_update(struct work_struct *work) work 2685 net/bluetooth/hci_request.c struct hci_dev *hdev = container_of(work, struct hci_dev, work 2710 net/bluetooth/hci_request.c static void discov_off(struct work_struct *work) work 2712 net/bluetooth/hci_request.c struct hci_dev *hdev = container_of(work, struct hci_dev, work 2713 net/bluetooth/hci_request.c discov_off.work); work 889 net/bluetooth/hidp/core.c static void hidp_session_dev_work(struct work_struct *work) work 891 net/bluetooth/hidp/core.c struct hidp_session *session = container_of(work, work 412 net/bluetooth/l2cap_core.c static void l2cap_chan_timeout(struct work_struct *work) work 414 net/bluetooth/l2cap_core.c struct l2cap_chan *chan = container_of(work, struct l2cap_chan, work 415 net/bluetooth/l2cap_core.c chan_timer.work); work 665 net/bluetooth/l2cap_core.c static void l2cap_conn_update_id_addr(struct work_struct *work) work 667 net/bluetooth/l2cap_core.c struct l2cap_conn *conn = container_of(work, struct l2cap_conn, work 1609 net/bluetooth/l2cap_core.c static void l2cap_info_timeout(struct work_struct *work) work 1611 net/bluetooth/l2cap_core.c struct l2cap_conn *conn = container_of(work, struct l2cap_conn, work 1612 net/bluetooth/l2cap_core.c info_timer.work); work 1830 net/bluetooth/l2cap_core.c static void l2cap_monitor_timeout(struct work_struct *work) work 1832 net/bluetooth/l2cap_core.c struct l2cap_chan *chan = container_of(work, struct l2cap_chan, work 1833 net/bluetooth/l2cap_core.c monitor_timer.work); work 1851 net/bluetooth/l2cap_core.c static void l2cap_retrans_timeout(struct work_struct *work) work 1853 net/bluetooth/l2cap_core.c struct l2cap_chan *chan = container_of(work, struct l2cap_chan, work 1854 net/bluetooth/l2cap_core.c retrans_timer.work); work 3086 net/bluetooth/l2cap_core.c static void l2cap_ack_timeout(struct work_struct *work) work 3088 net/bluetooth/l2cap_core.c struct l2cap_chan *chan = container_of(work, struct l2cap_chan, work 3089 net/bluetooth/l2cap_core.c ack_timer.work); work 7070 net/bluetooth/l2cap_core.c static void process_pending_rx(struct work_struct *work) work 7072 net/bluetooth/l2cap_core.c struct l2cap_conn *conn = container_of(work, struct l2cap_conn, work 904 net/bluetooth/mgmt.c static void service_cache_off(struct work_struct *work) work 906 net/bluetooth/mgmt.c struct hci_dev *hdev = container_of(work, struct hci_dev, work 907 net/bluetooth/mgmt.c service_cache.work); work 925 net/bluetooth/mgmt.c static void rpa_expired(struct work_struct *work) work 927 net/bluetooth/mgmt.c struct hci_dev *hdev = container_of(work, struct hci_dev, work 928 net/bluetooth/mgmt.c rpa_expired.work); work 1090 net/bluetooth/mgmt.c queue_work(hdev->req_workqueue, &hdev->power_off.work); work 1200 net/bluetooth/mgmt.c queue_work(hdev->req_workqueue, &hdev->power_off.work); work 7321 net/bluetooth/mgmt.c queue_work(hdev->req_workqueue, &hdev->power_off.work); work 7381 net/bluetooth/mgmt.c queue_work(hdev->req_workqueue, &hdev->power_off.work); work 1363 net/bluetooth/smp.c static void smp_timeout(struct work_struct *work) work 1365 net/bluetooth/smp.c struct smp_chan *smp = container_of(work, struct smp_chan, work 1366 net/bluetooth/smp.c security_timer.work); work 332 net/bridge/br_fdb.c void br_fdb_cleanup(struct work_struct *work) work 334 net/bridge/br_fdb.c struct net_bridge *br = container_of(work, struct net_bridge, work 335 net/bridge/br_fdb.c gc_work.work); work 557 net/bridge/br_private.h void br_fdb_cleanup(struct work_struct *work); work 138 net/caif/chnl_net.c static void close_work(struct work_struct *work) work 75 net/ceph/crush/mapper.c struct crush_work_bucket *work, work 82 net/ceph/crush/mapper.c if (work->perm_x != (__u32)x || work->perm_n == 0) { work 84 net/ceph/crush/mapper.c work->perm_x = x; work 90 net/ceph/crush/mapper.c work->perm[0] = s; work 91 net/ceph/crush/mapper.c work->perm_n = 0xffff; /* magic value, see below */ work 96 net/ceph/crush/mapper.c work->perm[i] = i; work 97 net/ceph/crush/mapper.c work->perm_n = 0; work 98 net/ceph/crush/mapper.c } else if (work->perm_n == 0xffff) { work 101 net/ceph/crush/mapper.c work->perm[i] = i; work 102 net/ceph/crush/mapper.c work->perm[work->perm[0]] = 0; work 103 net/ceph/crush/mapper.c work->perm_n = 1; work 107 net/ceph/crush/mapper.c for (i = 0; i < work->perm_n; i++) work 108 net/ceph/crush/mapper.c dprintk(" perm_choose have %d: %d\n", i, work->perm[i]); work 109 net/ceph/crush/mapper.c while (work->perm_n <= pr) { work 110 net/ceph/crush/mapper.c unsigned int p = work->perm_n; work 116 net/ceph/crush/mapper.c unsigned int t = work->perm[p + i]; work 117 net/ceph/crush/mapper.c work->perm[p + i] = work->perm[p]; work 118 net/ceph/crush/mapper.c work->perm[p] = t; work 122 net/ceph/crush/mapper.c work->perm_n++; work 125 net/ceph/crush/mapper.c dprintk(" perm_choose %d: %d\n", i, work->perm[i]); work 127 net/ceph/crush/mapper.c s = work->perm[pr]; work 136 net/ceph/crush/mapper.c struct crush_work_bucket *work, int x, int r) work 138 net/ceph/crush/mapper.c return bucket_perm_choose(&bucket->h, work, x, r); work 377 net/ceph/crush/mapper.c struct crush_work_bucket *work, work 388 net/ceph/crush/mapper.c work, x, r); work 450 net/ceph/crush/mapper.c struct crush_work *work, work 510 net/ceph/crush/mapper.c in, work->work[-1-in->id], work 514 net/ceph/crush/mapper.c in, work->work[-1-in->id], work 563 net/ceph/crush/mapper.c work, work 644 net/ceph/crush/mapper.c struct crush_work *work, work 725 net/ceph/crush/mapper.c in, work->work[-1-in->id], work 777 net/ceph/crush/mapper.c work, work 862 net/ceph/crush/mapper.c w->work = v; work 868 net/ceph/crush/mapper.c w->work[b] = v; work 874 net/ceph/crush/mapper.c w->work[b]->perm_x = 0; work 875 net/ceph/crush/mapper.c w->work[b]->perm_n = 0; work 876 net/ceph/crush/mapper.c w->work[b]->perm = v; work 740 net/ceph/messenger.c INIT_DELAYED_WORK(&con->work, ceph_con_workfn); work 2823 net/ceph/messenger.c if (!queue_delayed_work(ceph_msgr_wq, &con->work, delay)) { work 2840 net/ceph/messenger.c if (cancel_delayed_work(&con->work)) { work 2917 net/ceph/messenger.c static void ceph_con_workfn(struct work_struct *work) work 2919 net/ceph/messenger.c struct ceph_connection *con = container_of(work, struct ceph_connection, work 2920 net/ceph/messenger.c work.work); work 973 net/ceph/mon_client.c static void delayed_work(struct work_struct *work) work 976 net/ceph/mon_client.c container_of(work, struct ceph_mon_client, delayed_work.work); work 2417 net/ceph/osd_client.c static void complete_request_workfn(struct work_struct *work) work 2420 net/ceph/osd_client.c container_of(work, struct ceph_osd_request, r_complete_work); work 2810 net/ceph/osd_client.c struct work_struct work; work 2839 net/ceph/osd_client.c INIT_WORK(&lwork->work, workfn); work 2868 net/ceph/osd_client.c queue_work(osdc->notify_wq, &lwork->work); work 2873 net/ceph/osd_client.c struct linger_work *lwork = container_of(w, struct linger_work, work); work 2896 net/ceph/osd_client.c struct linger_work *lwork = container_of(w, struct linger_work, work); work 3270 net/ceph/osd_client.c static void handle_timeout(struct work_struct *work) work 3273 net/ceph/osd_client.c container_of(work, struct ceph_osd_client, timeout_work.work); work 3359 net/ceph/osd_client.c static void handle_osds_timeout(struct work_struct *work) work 3362 net/ceph/osd_client.c container_of(work, struct ceph_osd_client, work 3363 net/ceph/osd_client.c osds_timeout_work.work); work 1755 net/core/dev.c static void netstamp_clear(struct work_struct *work) work 5221 net/core/dev.c static void flush_backlog(struct work_struct *work) work 5853 net/core/dev.c int work = 0; work 5872 net/core/dev.c if (++work >= quota) work 5873 net/core/dev.c return work; work 5898 net/core/dev.c return work; work 6101 net/core/dev.c int work = 0; work 6120 net/core/dev.c work = napi_poll(napi, BUSY_POLL_BUDGET); work 6121 net/core/dev.c trace_napi_poll(napi, work, BUSY_POLL_BUDGET); work 6124 net/core/dev.c if (work > 0) work 6126 net/core/dev.c LINUX_MIB_BUSYPOLLRXPACKETS, work); work 6291 net/core/dev.c int work, weight; work 6305 net/core/dev.c work = 0; work 6307 net/core/dev.c work = n->poll(n, weight); work 6308 net/core/dev.c trace_napi_poll(n, work, weight); work 6311 net/core/dev.c WARN_ON_ONCE(work > weight); work 6313 net/core/dev.c if (likely(work < weight)) work 6349 net/core/dev.c return work; work 6291 net/core/devlink.c static void devlink_port_type_warn(struct work_struct *work) work 113 net/core/drop_monitor.c int work, int budget); work 114 net/core/drop_monitor.c void (*work_item_func)(struct work_struct *work); work 115 net/core/drop_monitor.c void (*hw_work_item_func)(struct work_struct *work); work 186 net/core/drop_monitor.c static void send_dm_alert(struct work_struct *work) work 191 net/core/drop_monitor.c data = container_of(work, struct per_cpu_dm_data, dm_alert_work); work 268 net/core/drop_monitor.c int work, int budget) work 404 net/core/drop_monitor.c static void net_dm_hw_summary_work(struct work_struct *work) work 411 net/core/drop_monitor.c hw_data = container_of(work, struct per_cpu_dm_data, dm_alert_work); work 529 net/core/drop_monitor.c int work, int budget) work 687 net/core/drop_monitor.c static void net_dm_packet_work(struct work_struct *work) work 694 net/core/drop_monitor.c data = container_of(work, struct per_cpu_dm_data, dm_alert_work); work 880 net/core/drop_monitor.c static void net_dm_hw_packet_work(struct work_struct *work) work 887 net/core/drop_monitor.c hw_data = container_of(work, struct per_cpu_dm_data, dm_alert_work); work 885 net/core/neighbour.c static void neigh_periodic_work(struct work_struct *work) work 887 net/core/neighbour.c struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work); work 549 net/core/net_namespace.c static void cleanup_net(struct work_struct *work) work 98 net/core/netpoll.c static void queue_process(struct work_struct *work) work 101 net/core/netpoll.c container_of(work, struct netpoll_info, tx_work.work); work 140 net/core/netpoll.c int work; work 152 net/core/netpoll.c work = napi->poll(napi, 0); work 153 net/core/netpoll.c WARN_ONCE(work, "%pS exceeded budget in poll\n", napi->poll); work 154 net/core/netpoll.c trace_napi_poll(napi, work, 0); work 441 net/core/skmsg.c static void sk_psock_backlog(struct work_struct *work) work 443 net/core/skmsg.c struct sk_psock *psock = container_of(work, struct sk_psock, work); work 508 net/core/skmsg.c INIT_WORK(&psock->work, sk_psock_backlog); work 572 net/core/skmsg.c cancel_work_sync(&psock->work); work 709 net/core/skmsg.c schedule_work(&psock->work); work 729 net/core/skmsg.c schedule_work(&psock_other->work); work 802 net/core/skmsg.c schedule_work(&psock->work); work 110 net/core/sock_diag.c struct work_struct work; work 120 net/core/sock_diag.c static void sock_diag_broadcast_destroy_work(struct work_struct *work) work 123 net/core/sock_diag.c container_of(work, struct broadcast_sk, work); work 160 net/core/sock_diag.c INIT_WORK(&bsk->work, sock_diag_broadcast_destroy_work); work 161 net/core/sock_diag.c queue_work(broadcast_wq, &bsk->work); work 305 net/dsa/dsa.c bool dsa_schedule_work(struct work_struct *work) work 307 net/dsa/dsa.c return queue_work(dsa_owq, work); work 86 net/dsa/dsa_priv.h bool dsa_schedule_work(struct work_struct *work); work 550 net/dsa/slave.c static void dsa_port_xmit_work(struct work_struct *work) work 552 net/dsa/slave.c struct dsa_port *dp = container_of(work, struct dsa_port, xmit_work); work 1561 net/dsa/slave.c struct work_struct work; work 1567 net/dsa/slave.c static void dsa_slave_switchdev_event_work(struct work_struct *work) work 1570 net/dsa/slave.c container_of(work, struct dsa_switchdev_event_work, work); work 1650 net/dsa/slave.c INIT_WORK(&switchdev_work->work, work 1667 net/dsa/slave.c dsa_schedule_work(&switchdev_work->work); work 468 net/ipv4/devinet.c static void check_lifetime(struct work_struct *work); work 697 net/ipv4/devinet.c static void check_lifetime(struct work_struct *work) work 148 net/ipv4/inet_fragment.c static void fqdir_work_fn(struct work_struct *work) work 150 net/ipv4/inet_fragment.c struct fqdir *fqdir = container_of(work, struct fqdir, destroy_work); work 605 net/ipv6/route.c struct work_struct work; work 613 net/ipv6/route.c struct __rt6_probe_work *work = work 614 net/ipv6/route.c container_of(w, struct __rt6_probe_work, work); work 616 net/ipv6/route.c addrconf_addr_solict_mult(&work->target, &mcaddr); work 617 net/ipv6/route.c ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0); work 618 net/ipv6/route.c dev_put(work->dev); work 619 net/ipv6/route.c kfree(work); work 624 net/ipv6/route.c struct __rt6_probe_work *work = NULL; work 656 net/ipv6/route.c work = kmalloc(sizeof(*work), GFP_ATOMIC); work 657 net/ipv6/route.c if (work) work 663 net/ipv6/route.c work = kmalloc(sizeof(*work), GFP_ATOMIC); work 666 net/ipv6/route.c if (!work || cmpxchg(&fib6_nh->last_probe, work 668 net/ipv6/route.c kfree(work); work 670 net/ipv6/route.c INIT_WORK(&work->work, rt6_probe_deferred); work 671 net/ipv6/route.c work->target = *nh_gw; work 673 net/ipv6/route.c work->dev = dev; work 674 net/ipv6/route.c schedule_work(&work->work); work 142 net/iucv/iucv.c static void iucv_work_fn(struct work_struct *work); work 1773 net/iucv/iucv.c static void iucv_work_fn(struct work_struct *work) work 1808 net/iucv/iucv.c struct iucv_irq_list *work; work 1818 net/iucv/iucv.c work = kmalloc(sizeof(struct iucv_irq_list), GFP_ATOMIC); work 1819 net/iucv/iucv.c if (!work) { work 1823 net/iucv/iucv.c memcpy(&work->data, p, sizeof(work->data)); work 1827 net/iucv/iucv.c list_add_tail(&work->list, &iucv_work_queue); work 1831 net/iucv/iucv.c list_add_tail(&work->list, &iucv_task_queue); work 1249 net/l2tp/l2tp_core.c static void l2tp_tunnel_del_work(struct work_struct *work) work 1251 net/l2tp/l2tp_core.c struct l2tp_tunnel *tunnel = container_of(work, struct l2tp_tunnel, work 143 net/mac80211/agg-rx.c ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work); work 169 net/mac80211/agg-rx.c ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work); work 522 net/mac80211/agg-rx.c ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); work 541 net/mac80211/agg-rx.c ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); work 689 net/mac80211/agg-tx.c ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); work 796 net/mac80211/agg-tx.c ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); work 850 net/mac80211/agg-tx.c ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); work 905 net/mac80211/agg-tx.c ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); work 3162 net/mac80211/cfg.c void ieee80211_csa_finalize_work(struct work_struct *work) work 3165 net/mac80211/cfg.c container_of(work, struct ieee80211_sub_if_data, work 316 net/mac80211/ht.c cancel_work_sync(&sta->ampdu_mlme.work); work 333 net/mac80211/ht.c void ieee80211_ba_session_work(struct work_struct *work) work 336 net/mac80211/ht.c container_of(work, struct sta_info, ampdu_mlme.work); work 536 net/mac80211/ht.c void ieee80211_request_smps_mgd_work(struct work_struct *work) work 539 net/mac80211/ht.c container_of(work, struct ieee80211_sub_if_data, work 547 net/mac80211/ht.c void ieee80211_request_smps_ap_work(struct work_struct *work) work 550 net/mac80211/ht.c container_of(work, struct ieee80211_sub_if_data, work 746 net/mac80211/ibss.c static void ieee80211_csa_connection_drop_work(struct work_struct *work) work 749 net/mac80211/ibss.c container_of(work, struct ieee80211_sub_if_data, work 759 net/mac80211/ibss.c ieee80211_queue_work(&sdata->local->hw, &sdata->work); work 1250 net/mac80211/ibss.c ieee80211_queue_work(&local->hw, &sdata->work); work 1726 net/mac80211/ibss.c ieee80211_queue_work(&sdata->local->hw, &sdata->work); work 1856 net/mac80211/ibss.c ieee80211_queue_work(&local->hw, &sdata->work); work 938 net/mac80211/ieee80211_i.h struct work_struct work; work 1671 net/mac80211/ieee80211_i.h void ieee80211_scan_work(struct work_struct *work); work 1701 net/mac80211/ieee80211_i.h void ieee80211_sched_scan_stopped_work(struct work_struct *work); work 1721 net/mac80211/ieee80211_i.h void ieee80211_csa_finalize_work(struct work_struct *work); work 1798 net/mac80211/ieee80211_i.h void ieee80211_request_smps_ap_work(struct work_struct *work); work 1799 net/mac80211/ieee80211_i.h void ieee80211_request_smps_mgd_work(struct work_struct *work); work 1834 net/mac80211/ieee80211_i.h void ieee80211_ba_session_work(struct work_struct *work); work 2004 net/mac80211/ieee80211_i.h void ieee80211_dynamic_ps_enable_work(struct work_struct *work); work 2005 net/mac80211/ieee80211_i.h void ieee80211_dynamic_ps_disable_work(struct work_struct *work); work 2204 net/mac80211/ieee80211_i.h void ieee80211_dfs_cac_timer_work(struct work_struct *work); work 2206 net/mac80211/ieee80211_i.h void ieee80211_dfs_radar_detected_work(struct work_struct *work); work 45 net/mac80211/iface.c static void ieee80211_iface_work(struct work_struct *work); work 447 net/mac80211/iface.c INIT_WORK(&sdata->work, ieee80211_iface_work); work 979 net/mac80211/iface.c cancel_work_sync(&sdata->work); work 1228 net/mac80211/iface.c static void ieee80211_iface_work(struct work_struct *work) work 1231 net/mac80211/iface.c container_of(work, struct ieee80211_sub_if_data, work); work 1376 net/mac80211/iface.c static void ieee80211_recalc_smps_work(struct work_struct *work) work 1379 net/mac80211/iface.c container_of(work, struct ieee80211_sub_if_data, recalc_smps); work 1415 net/mac80211/iface.c INIT_WORK(&sdata->work, ieee80211_iface_work); work 1066 net/mac80211/key.c dec_tailroom_needed_wk.work); work 83 net/mac80211/main.c static void ieee80211_reconfig_filter(struct work_struct *work) work 86 net/mac80211/main.c container_of(work, struct ieee80211_local, reconfig_filter); work 243 net/mac80211/main.c static void ieee80211_restart_work(struct work_struct *work) work 246 net/mac80211/main.c container_of(work, struct ieee80211_local, restart_work); work 47 net/mac80211/mesh.c ieee80211_queue_work(&local->hw, &sdata->work); work 595 net/mac80211/mesh.c ieee80211_queue_work(&sdata->local->hw, &sdata->work); work 606 net/mac80211/mesh.c ieee80211_queue_work(&sdata->local->hw, &sdata->work); work 937 net/mac80211/mesh.c ieee80211_queue_work(&sdata->local->hw, &sdata->work); work 962 net/mac80211/mesh.c ieee80211_queue_work(&local->hw, &sdata->work); work 1011 net/mac80211/mesh_hwmp.c ieee80211_queue_work(&sdata->local->hw, &sdata->work); work 1018 net/mac80211/mesh_hwmp.c ieee80211_queue_work(&sdata->local->hw, &sdata->work); work 1077 net/mac80211/mlme.c static void ieee80211_chswitch_work(struct work_struct *work) work 1080 net/mac80211/mlme.c container_of(work, struct ieee80211_sub_if_data, u.mgd.chswitch_work); work 1721 net/mac80211/mlme.c void ieee80211_dynamic_ps_disable_work(struct work_struct *work) work 1724 net/mac80211/mlme.c container_of(work, struct ieee80211_local, work 1738 net/mac80211/mlme.c void ieee80211_dynamic_ps_enable_work(struct work_struct *work) work 1741 net/mac80211/mlme.c container_of(work, struct ieee80211_local, work 1814 net/mac80211/mlme.c void ieee80211_dfs_cac_timer_work(struct work_struct *work) work 1816 net/mac80211/mlme.c struct delayed_work *delayed_work = to_delayed_work(work); work 1916 net/mac80211/mlme.c static void ieee80211_sta_handle_tspec_ac_params_wk(struct work_struct *work) work 1920 net/mac80211/mlme.c sdata = container_of(work, struct ieee80211_sub_if_data, work 1921 net/mac80211/mlme.c u.mgd.tx_tspec_wk.work); work 2469 net/mac80211/mlme.c ieee80211_queue_work(&sdata->local->hw, &sdata->work); work 2709 net/mac80211/mlme.c static void ieee80211_beacon_connection_loss_work(struct work_struct *work) work 2712 net/mac80211/mlme.c container_of(work, struct ieee80211_sub_if_data, work 2728 net/mac80211/mlme.c static void ieee80211_csa_connection_drop_work(struct work_struct *work) work 2731 net/mac80211/mlme.c container_of(work, struct ieee80211_sub_if_data, work 4154 net/mac80211/mlme.c ieee80211_queue_work(&sdata->local->hw, &sdata->work); work 4294 net/mac80211/mlme.c ieee80211_queue_work(&local->hw, &sdata->work); work 4471 net/mac80211/mlme.c static void ieee80211_sta_monitor_work(struct work_struct *work) work 4474 net/mac80211/mlme.c container_of(work, struct ieee80211_sub_if_data, work 83 net/mac80211/ocb.c ieee80211_queue_work(&local->hw, &sdata->work); work 159 net/mac80211/ocb.c ieee80211_queue_work(&local->hw, &sdata->work); work 199 net/mac80211/ocb.c ieee80211_queue_work(&local->hw, &sdata->work); work 279 net/mac80211/offchannel.c static void ieee80211_hw_roc_start(struct work_struct *work) work 282 net/mac80211/offchannel.c container_of(work, struct ieee80211_local, hw_roc_start); work 472 net/mac80211/offchannel.c static void ieee80211_roc_work(struct work_struct *work) work 475 net/mac80211/offchannel.c container_of(work, struct ieee80211_local, roc_work.work); work 482 net/mac80211/offchannel.c static void ieee80211_hw_roc_done(struct work_struct *work) work 485 net/mac80211/offchannel.c container_of(work, struct ieee80211_local, hw_roc_done); work 281 net/mac80211/rx.c ieee80211_queue_work(&sdata->local->hw, &sdata->work); work 1416 net/mac80211/rx.c ieee80211_queue_work(&local->hw, &rx->sdata->work); work 3346 net/mac80211/rx.c ieee80211_queue_work(&local->hw, &sdata->work); work 3501 net/mac80211/rx.c ieee80211_queue_work(&rx->local->hw, &sdata->work); work 457 net/mac80211/scan.c ieee80211_queue_work(&sdata->local->hw, &sdata->work); work 989 net/mac80211/scan.c void ieee80211_scan_work(struct work_struct *work) work 992 net/mac80211/scan.c container_of(work, struct ieee80211_local, scan_work.work); work 1369 net/mac80211/scan.c void ieee80211_sched_scan_stopped_work(struct work_struct *work) work 1372 net/mac80211/scan.c container_of(work, struct ieee80211_local, work 326 net/mac80211/sta_info.c INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work); work 278 net/mac80211/sta_info.h struct work_struct work; work 30 net/mac80211/tdls.c u.mgd.tdls_peer_del_work.work); work 870 net/mac80211/util.c void ieee80211_queue_work(struct ieee80211_hw *hw, struct work_struct *work) work 877 net/mac80211/util.c queue_work(local->workqueue, work); work 2500 net/mac80211/util.c ieee80211_queue_work(&local->hw, &sdata->work); work 3344 net/mac80211/util.c void ieee80211_dfs_radar_detected_work(struct work_struct *work) work 3347 net/mac80211/util.c container_of(work, struct ieee80211_local, radar_detected_work); work 124 net/mac802154/ieee802154_i.h void ieee802154_xmit_worker(struct work_struct *work); work 25 net/mac802154/tx.c void ieee802154_xmit_worker(struct work_struct *work) work 28 net/mac802154/tx.c container_of(work, struct ieee802154_local, tx_work); work 307 net/ncsi/internal.h struct work_struct work; /* For channel management */ work 404 net/ncsi/ncsi-manage.c schedule_work(&ndp->work); work 972 net/ncsi/ncsi-manage.c schedule_work(&ndp->work); work 995 net/ncsi/ncsi-manage.c schedule_work(&ndp->work); work 1005 net/ncsi/ncsi-manage.c schedule_work(&ndp->work); work 1312 net/ncsi/ncsi-manage.c schedule_work(&ndp->work); work 1316 net/ncsi/ncsi-manage.c schedule_work(&ndp->work); work 1402 net/ncsi/ncsi-manage.c static void ncsi_dev_work(struct work_struct *work) work 1404 net/ncsi/ncsi-manage.c struct ncsi_dev_priv *ndp = container_of(work, work 1405 net/ncsi/ncsi-manage.c struct ncsi_dev_priv, work); work 1647 net/ncsi/ncsi-manage.c INIT_WORK(&ndp->work, ncsi_dev_work); work 1688 net/ncsi/ncsi-manage.c schedule_work(&ndp->work); work 1810 net/ncsi/ncsi-manage.c schedule_work(&ndp->work); work 586 net/netfilter/ipset/ip_set_hash_gen.h mtype_gc(struct work_struct *work) work 595 net/netfilter/ipset/ip_set_hash_gen.h gc = container_of(work, struct htable_gc, dwork.work); work 219 net/netfilter/ipvs/ip_vs_ctl.c static void defense_work_handler(struct work_struct *work) work 222 net/netfilter/ipvs/ip_vs_ctl.c container_of(work, struct netns_ipvs, defense_work.work); work 4070 net/netfilter/ipvs/ip_vs_ctl.c cancel_work_sync(&ipvs->defense_work.work); work 1633 net/netfilter/ipvs/ip_vs_sync.c static void master_wakeup_work_handler(struct work_struct *work) work 1636 net/netfilter/ipvs/ip_vs_sync.c container_of(work, struct ipvs_master_sync_state, work 1637 net/netfilter/ipvs/ip_vs_sync.c master_wakeup_work.work); work 444 net/netfilter/nf_conncount.c static void tree_gc_worker(struct work_struct *work) work 446 net/netfilter/nf_conncount.c struct nf_conncount_data *data = container_of(work, struct nf_conncount_data, gc_work); work 496 net/netfilter/nf_conncount.c schedule_work(work); work 1221 net/netfilter/nf_conntrack_core.c static void gc_worker(struct work_struct *work) work 1230 net/netfilter/nf_conntrack_core.c gc_work = container_of(work, struct conntrack_gc_work, dwork.work); work 84 net/netfilter/nf_conntrack_ecache.c static void ecache_work(struct work_struct *work) work 87 net/netfilter/nf_conntrack_ecache.c container_of(work, struct netns_ct, ecache_dwork.work); work 338 net/netfilter/nf_flow_table_core.c static void nf_flow_offload_work_gc(struct work_struct *work) work 342 net/netfilter/nf_flow_table_core.c flow_table = container_of(work, struct nf_flowtable, gc_work.work); work 191 net/netfilter/nf_nat_masquerade.c struct work_struct work; work 197 net/netfilter/nf_nat_masquerade.c static int inet6_cmp(struct nf_conn *ct, void *work) work 199 net/netfilter/nf_nat_masquerade.c struct masq_dev_work *w = (struct masq_dev_work *)work; work 210 net/netfilter/nf_nat_masquerade.c static void iterate_cleanup_work(struct work_struct *work) work 214 net/netfilter/nf_nat_masquerade.c w = container_of(work, struct masq_dev_work, work); work 254 net/netfilter/nf_nat_masquerade.c INIT_WORK(&w->work, iterate_cleanup_work); work 258 net/netfilter/nf_nat_masquerade.c schedule_work(&w->work); work 296 net/netfilter/nft_set_hash.c static void nft_rhash_gc(struct work_struct *work) work 304 net/netfilter/nft_set_hash.c priv = container_of(work, struct nft_rhash, gc_work.work); work 385 net/netfilter/nft_set_rbtree.c static void nft_rbtree_gc(struct work_struct *work) work 393 net/netfilter/nft_set_rbtree.c priv = container_of(work, struct nft_rbtree, gc_work.work); work 34 net/netfilter/xt_IDLETIMER.c struct work_struct work; work 81 net/netfilter/xt_IDLETIMER.c static void idletimer_tg_work(struct work_struct *work) work 83 net/netfilter/xt_IDLETIMER.c struct idletimer_tg *timer = container_of(work, struct idletimer_tg, work 84 net/netfilter/xt_IDLETIMER.c work); work 95 net/netfilter/xt_IDLETIMER.c schedule_work(&timer->work); work 148 net/netfilter/xt_IDLETIMER.c INIT_WORK(&info->timer->work, idletimer_tg_work); work 238 net/netfilter/xt_IDLETIMER.c cancel_work_sync(&info->timer->work); work 273 net/netfilter/xt_hashlimit.c static void htable_gc(struct work_struct *work); work 379 net/netfilter/xt_hashlimit.c static void htable_gc(struct work_struct *work) work 383 net/netfilter/xt_hashlimit.c ht = container_of(work, struct xt_hashlimit_htable, gc_work.work); work 406 net/netlink/af_netlink.c static void netlink_sock_destruct_work(struct work_struct *work) work 408 net/netlink/af_netlink.c struct netlink_sock *nlk = container_of(work, struct netlink_sock, work 409 net/netlink/af_netlink.c work); work 724 net/netlink/af_netlink.c INIT_WORK(&nlk->work, netlink_sock_destruct_work); work 725 net/netlink/af_netlink.c schedule_work(&nlk->work); work 49 net/netlink/af_netlink.h struct work_struct work; work 978 net/nfc/core.c static void nfc_check_pres_work(struct work_struct *work) work 980 net/nfc/core.c struct nfc_dev *dev = container_of(work, struct nfc_dev, work 110 net/nfc/digital_core.c static void digital_wq_cmd_complete(struct work_struct *work) work 113 net/nfc/digital_core.c struct nfc_digital_dev *ddev = container_of(work, work 152 net/nfc/digital_core.c static void digital_wq_cmd(struct work_struct *work) work 157 net/nfc/digital_core.c struct nfc_digital_dev *ddev = container_of(work, work 422 net/nfc/digital_core.c static void digital_wq_poll(struct work_struct *work) work 426 net/nfc/digital_core.c struct nfc_digital_dev *ddev = container_of(work, work 428 net/nfc/digital_core.c poll_work.work); work 63 net/nfc/hci/core.c static void nfc_hci_msg_tx_work(struct work_struct *work) work 65 net/nfc/hci/core.c struct nfc_hci_dev *hdev = container_of(work, struct nfc_hci_dev, work 126 net/nfc/hci/core.c static void nfc_hci_msg_rx_work(struct work_struct *work) work 128 net/nfc/hci/core.c struct nfc_hci_dev *hdev = container_of(work, struct nfc_hci_dev, work 599 net/nfc/hci/llc_shdlc.c static void llc_shdlc_sm_work(struct work_struct *work) work 601 net/nfc/hci/llc_shdlc.c struct llc_shdlc *shdlc = container_of(work, struct llc_shdlc, sm_work); work 225 net/nfc/llcp_core.c static void nfc_llcp_timeout_work(struct work_struct *work) work 227 net/nfc/llcp_core.c struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local, work 242 net/nfc/llcp_core.c static void nfc_llcp_sdreq_timeout_work(struct work_struct *work) work 248 net/nfc/llcp_core.c struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local, work 714 net/nfc/llcp_core.c static void nfc_llcp_tx_work(struct work_struct *work) work 716 net/nfc/llcp_core.c struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local, work 1470 net/nfc/llcp_core.c static void nfc_llcp_rx_work(struct work_struct *work) work 1472 net/nfc/llcp_core.c struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local, work 38 net/nfc/nci/core.c static void nci_cmd_work(struct work_struct *work); work 39 net/nfc/nci/core.c static void nci_rx_work(struct work_struct *work); work 40 net/nfc/nci/core.c static void nci_tx_work(struct work_struct *work); work 1422 net/nfc/nci/core.c static void nci_tx_work(struct work_struct *work) work 1424 net/nfc/nci/core.c struct nci_dev *ndev = container_of(work, struct nci_dev, tx_work); work 1459 net/nfc/nci/core.c static void nci_rx_work(struct work_struct *work) work 1461 net/nfc/nci/core.c struct nci_dev *ndev = container_of(work, struct nci_dev, rx_work); work 1505 net/nfc/nci/core.c static void nci_cmd_work(struct work_struct *work) work 1507 net/nfc/nci/core.c struct nci_dev *ndev = container_of(work, struct nci_dev, cmd_work); work 409 net/nfc/nci/hci.c static void nci_hci_msg_rx_work(struct work_struct *work) work 412 net/nfc/nci/hci.c container_of(work, struct nci_hci_dev, msg_rx_work); work 83 net/nfc/nci/uart.c static void nci_uart_write_work(struct work_struct *work) work 85 net/nfc/nci/uart.c struct nci_uart *nu = container_of(work, struct nci_uart, write_work); work 1783 net/nfc/netlink.c static void nfc_urelease_event_work(struct work_struct *work) work 1785 net/nfc/netlink.c struct urelease_work *w = container_of(work, struct urelease_work, w); work 176 net/nfc/rawsock.c static void rawsock_tx_work(struct work_struct *work) work 178 net/nfc/rawsock.c struct sock *sk = to_rawsock_sk(work); work 236 net/openvswitch/datapath.h void ovs_dp_notify_wq(struct work_struct *work); work 34 net/openvswitch/dp_notify.c void ovs_dp_notify_wq(struct work_struct *work) work 36 net/openvswitch/dp_notify.c struct ovs_net *ovs_net = container_of(work, struct ovs_net, dp_notify_work); work 127 net/qrtr/qrtr.c struct work_struct work; work 153 net/qrtr/qrtr.c cancel_work_sync(&node->work); work 320 net/qrtr/qrtr.c schedule_work(&node->work); work 362 net/qrtr/qrtr.c static void qrtr_node_rx_work(struct work_struct *work) work 364 net/qrtr/qrtr.c struct qrtr_node *node = container_of(work, struct qrtr_node, work); work 429 net/qrtr/qrtr.c INIT_WORK(&node->work, qrtr_node_rx_work); work 98 net/rds/ib.c static void rds_ib_dev_free(struct work_struct *work) work 101 net/rds/ib.c struct rds_ib_device *rds_ibdev = container_of(work, work 470 net/rds/ib_rdma.c static void rds_ib_mr_pool_flush_worker(struct work_struct *work) work 472 net/rds/ib_rdma.c struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work); work 519 net/rds/tcp.c static void rds_tcp_accept_worker(struct work_struct *work) work 521 net/rds/tcp.c struct rds_tcp_net *rtn = container_of(work, work 162 net/rds/threads.c void rds_connect_worker(struct work_struct *work) work 164 net/rds/threads.c struct rds_conn_path *cp = container_of(work, work 166 net/rds/threads.c cp_conn_w.work); work 191 net/rds/threads.c void rds_send_worker(struct work_struct *work) work 193 net/rds/threads.c struct rds_conn_path *cp = container_of(work, work 195 net/rds/threads.c cp_send_w.work); work 217 net/rds/threads.c void rds_recv_worker(struct work_struct *work) work 219 net/rds/threads.c struct rds_conn_path *cp = container_of(work, work 221 net/rds/threads.c cp_recv_w.work); work 241 net/rds/threads.c void rds_shutdown_worker(struct work_struct *work) work 243 net/rds/threads.c struct rds_conn_path *cp = container_of(work, work 174 net/rfkill/core.c static void rfkill_global_led_trigger_worker(struct work_struct *work) work 960 net/rfkill/core.c static void rfkill_poll(struct work_struct *work) work 964 net/rfkill/core.c rfkill = container_of(work, struct rfkill, poll_work.work); work 978 net/rfkill/core.c static void rfkill_uevent_work(struct work_struct *work) work 982 net/rfkill/core.c rfkill = container_of(work, struct rfkill, uevent_work); work 989 net/rfkill/core.c static void rfkill_sync_work(struct work_struct *work) work 994 net/rfkill/core.c rfkill = container_of(work, struct rfkill, sync_work); work 94 net/rfkill/input.c static void rfkill_op_handler(struct work_struct *work) work 296 net/rxrpc/call_event.c void rxrpc_process_call(struct work_struct *work) work 299 net/rxrpc/call_event.c container_of(work, struct rxrpc_call, processor); work 566 net/rxrpc/call_object.c static void rxrpc_destroy_call(struct work_struct *work) work 568 net/rxrpc/call_object.c struct rxrpc_call *call = container_of(work, struct rxrpc_call, processor); work 1067 net/rxrpc/conn_client.c void rxrpc_discard_expired_client_conns(struct work_struct *work) work 1071 net/rxrpc/conn_client.c container_of(work, struct rxrpc_net, client_conn_reaper); work 487 net/rxrpc/conn_event.c void rxrpc_process_connection(struct work_struct *work) work 490 net/rxrpc/conn_event.c container_of(work, struct rxrpc_connection, processor); work 379 net/rxrpc/conn_object.c void rxrpc_service_connection_reaper(struct work_struct *work) work 383 net/rxrpc/conn_object.c container_of(work, struct rxrpc_net, service_conn_reaper); work 445 net/rxrpc/local_object.c static void rxrpc_local_processor(struct work_struct *work) work 448 net/rxrpc/local_object.c container_of(work, struct rxrpc_local, processor); work 355 net/rxrpc/peer_event.c void rxrpc_peer_keepalive_worker(struct work_struct *work) work 358 net/rxrpc/peer_event.c container_of(work, struct rxrpc_net, peer_keepalive_work); work 96 net/sched/cls_basic.c static void basic_delete_filter_work(struct work_struct *work) work 98 net/sched/cls_basic.c struct basic_filter *f = container_of(to_rcu_work(work), work 277 net/sched/cls_bpf.c static void cls_bpf_delete_prog_work(struct work_struct *work) work 279 net/sched/cls_bpf.c struct cls_bpf_prog *prog = container_of(to_rcu_work(work), work 66 net/sched/cls_cgroup.c static void cls_cgroup_destroy_work(struct work_struct *work) work 68 net/sched/cls_cgroup.c struct cls_cgroup_head *head = container_of(to_rcu_work(work), work 377 net/sched/cls_flow.c static void flow_destroy_filter_work(struct work_struct *work) work 379 net/sched/cls_flow.c struct flow_filter *f = container_of(to_rcu_work(work), work 358 net/sched/cls_flower.c static void fl_mask_free_work(struct work_struct *work) work 360 net/sched/cls_flower.c struct fl_flow_mask *mask = container_of(to_rcu_work(work), work 366 net/sched/cls_flower.c static void fl_uninit_mask_free_work(struct work_struct *work) work 368 net/sched/cls_flower.c struct fl_flow_mask *mask = container_of(to_rcu_work(work), work 407 net/sched/cls_flower.c static void fl_destroy_filter_work(struct work_struct *work) work 409 net/sched/cls_flower.c struct cls_fl_filter *f = container_of(to_rcu_work(work), work 551 net/sched/cls_flower.c static void fl_destroy_sleepable(struct work_struct *work) work 553 net/sched/cls_flower.c struct cls_fl_head *head = container_of(to_rcu_work(work), work 120 net/sched/cls_fw.c static void fw_delete_filter_work(struct work_struct *work) work 122 net/sched/cls_fw.c struct fw_filter *f = container_of(to_rcu_work(work), work 56 net/sched/cls_matchall.c static void mall_destroy_work(struct work_struct *work) work 58 net/sched/cls_matchall.c struct cls_mall_head *head = container_of(to_rcu_work(work), work 260 net/sched/cls_route.c static void route4_delete_filter_work(struct work_struct *work) work 262 net/sched/cls_route.c struct route4_filter *f = container_of(to_rcu_work(work), work 288 net/sched/cls_rsvp.h static void rsvp_delete_filter_work(struct work_struct *work) work 290 net/sched/cls_rsvp.h struct rsvp_filter *f = container_of(to_rcu_work(work), work 167 net/sched/cls_tcindex.c static void tcindex_destroy_rexts_work(struct work_struct *work) work 171 net/sched/cls_tcindex.c r = container_of(to_rcu_work(work), work 186 net/sched/cls_tcindex.c static void tcindex_destroy_fexts_work(struct work_struct *work) work 188 net/sched/cls_tcindex.c struct tcindex_filter *f = container_of(to_rcu_work(work), work 248 net/sched/cls_tcindex.c static void tcindex_destroy_work(struct work_struct *work) work 250 net/sched/cls_tcindex.c struct tcindex_data *p = container_of(to_rcu_work(work), work 281 net/sched/cls_tcindex.c static void tcindex_partial_destroy_work(struct work_struct *work) work 283 net/sched/cls_tcindex.c struct tcindex_data *p = container_of(to_rcu_work(work), work 417 net/sched/cls_u32.c static void u32_delete_key_work(struct work_struct *work) work 419 net/sched/cls_u32.c struct tc_u_knode *key = container_of(to_rcu_work(work), work 434 net/sched/cls_u32.c static void u32_delete_key_freepf_work(struct work_struct *work) work 436 net/sched/cls_u32.c struct tc_u_knode *key = container_of(to_rcu_work(work), work 1327 net/sched/sch_hfsc.c xstats.work = cl->cl_total; work 160 net/sched/sch_htb.c struct work_struct work; work 942 net/sched/sch_htb.c schedule_work(&q->work); work 985 net/sched/sch_htb.c static void htb_work_func(struct work_struct *work) work 987 net/sched/sch_htb.c struct htb_sched *q = container_of(work, struct htb_sched, work); work 1004 net/sched/sch_htb.c INIT_WORK(&q->work, htb_work_func); work 1225 net/sched/sch_htb.c cancel_work_sync(&q->work); work 44 net/sctp/associola.c static void sctp_assoc_bh_rcv(struct work_struct *work); work 976 net/sctp/associola.c static void sctp_assoc_bh_rcv(struct work_struct *work) work 979 net/sctp/associola.c container_of(work, struct sctp_association, work 36 net/sctp/endpointola.c static void sctp_endpoint_bh_rcv(struct work_struct *work); work 313 net/sctp/endpointola.c static void sctp_endpoint_bh_rcv(struct work_struct *work) work 316 net/sctp/endpointola.c container_of(work, struct sctp_endpoint, work 773 net/smc/af_smc.c static void smc_connect_work(struct work_struct *work) work 775 net/smc/af_smc.c struct smc_sock *smc = container_of(work, struct smc_sock, work 1232 net/smc/af_smc.c static void smc_listen_work(struct work_struct *work) work 1234 net/smc/af_smc.c struct smc_sock *new_smc = container_of(work, struct smc_sock, work 1361 net/smc/af_smc.c static void smc_tcp_listen_work(struct work_struct *work) work 1363 net/smc/af_smc.c struct smc_sock *lsmc = container_of(work, struct smc_sock, work 333 net/smc/smc_close.c static void smc_close_passive_work(struct work_struct *work) work 335 net/smc/smc_close.c struct smc_connection *conn = container_of(work, work 155 net/smc/smc_core.c static void smc_lgr_free_work(struct work_struct *work) work 157 net/smc/smc_core.c struct smc_link_group *lgr = container_of(to_delayed_work(work), work 236 net/smc/smc_ib.c static void smc_ib_port_event_work(struct work_struct *work) work 239 net/smc/smc_ib.c work, struct smc_ib_device, port_event_work); work 182 net/smc/smc_ism.c struct work_struct work; work 240 net/smc/smc_ism.c static void smc_ism_event_work(struct work_struct *work) work 243 net/smc/smc_ism.c container_of(work, struct smc_ism_event_work, work); work 349 net/smc/smc_ism.c INIT_WORK(&wrk->work, smc_ism_event_work); work 352 net/smc/smc_ism.c queue_work(smcd->event_wq, &wrk->work); work 360 net/smc/smc_llc.c struct work_struct work; work 367 net/smc/smc_llc.c static void smc_llc_send_message_work(struct work_struct *work) work 369 net/smc/smc_llc.c struct smc_llc_send_work *llcwrk = container_of(work, work 370 net/smc/smc_llc.c struct smc_llc_send_work, work); work 393 net/smc/smc_llc.c INIT_WORK(&wrk->work, smc_llc_send_message_work); work 397 net/smc/smc_llc.c queue_work(link->llc_wq, &wrk->work); work 595 net/smc/smc_llc.c static void smc_llc_testlink_work(struct work_struct *work) work 597 net/smc/smc_llc.c struct smc_link *link = container_of(to_delayed_work(work), work 567 net/smc/smc_tx.c void smc_tx_work(struct work_struct *work) work 569 net/smc/smc_tx.c struct smc_connection *conn = container_of(to_delayed_work(work), work 30 net/smc/smc_tx.h void smc_tx_work(struct work_struct *work); work 387 net/strparser/strparser.c queue_work(strp_wq, &strp->work); work 397 net/strparser/strparser.c queue_work(strp_wq, &strp->work); work 415 net/strparser/strparser.c queue_work(strp_wq, &strp->work); work 423 net/strparser/strparser.c do_strp_work(container_of(w, struct strparser, work)); work 429 net/strparser/strparser.c msg_timer_work.work); work 483 net/strparser/strparser.c INIT_WORK(&strp->work, strp_work); work 509 net/strparser/strparser.c queue_work(strp_wq, &strp->work); work 521 net/strparser/strparser.c cancel_work_sync(&strp->work); work 538 net/strparser/strparser.c queue_work(strp_wq, &strp->work); work 360 net/sunrpc/cache.c static void do_cache_clean(struct work_struct *work); work 482 net/sunrpc/cache.c static void do_cache_clean(struct work_struct *work) work 83 net/sunrpc/rpc_pipe.c rpc_timeout_upcall_queue(struct work_struct *work) work 87 net/sunrpc/rpc_pipe.c container_of(work, struct rpc_pipe, queue_timeout.work); work 753 net/sunrpc/sched.c static void __rpc_queue_timer_fn(struct work_struct *work) work 755 net/sunrpc/sched.c struct rpc_wait_queue *queue = container_of(work, work 757 net/sunrpc/sched.c timer_list.dwork.work); work 984 net/sunrpc/sched.c static void rpc_async_schedule(struct work_struct *work) work 988 net/sunrpc/sched.c __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); work 1150 net/sunrpc/sched.c static void rpc_async_release(struct work_struct *work) work 1154 net/sunrpc/sched.c rpc_free_task(container_of(work, struct rpc_task, u.tk_work)); work 656 net/sunrpc/xprt.c static void xprt_autoclose(struct work_struct *work) work 659 net/sunrpc/xprt.c container_of(work, struct rpc_xprt, task_cleanup); work 1934 net/sunrpc/xprt.c static void xprt_destroy_cb(struct work_struct *work) work 1937 net/sunrpc/xprt.c container_of(work, struct rpc_xprt, task_cleanup); work 114 net/sunrpc/xprtrdma/frwr_ops.c frwr_mr_recycle_worker(struct work_struct *work) work 116 net/sunrpc/xprtrdma/frwr_ops.c struct rpcrdma_mr *mr = container_of(work, struct rpcrdma_mr, work 581 net/sunrpc/xprtrdma/svc_rdma_transport.c static void __svc_rdma_free(struct work_struct *work) work 584 net/sunrpc/xprtrdma/svc_rdma_transport.c container_of(work, struct svcxprt_rdma, sc_work); work 236 net/sunrpc/xprtrdma/transport.c xprt_rdma_connect_worker(struct work_struct *work) work 238 net/sunrpc/xprtrdma/transport.c struct rpcrdma_xprt *r_xprt = container_of(work, struct rpcrdma_xprt, work 239 net/sunrpc/xprtrdma/transport.c rx_connect_worker.work); work 979 net/sunrpc/xprtrdma/verbs.c rpcrdma_mr_refresh_worker(struct work_struct *work) work 981 net/sunrpc/xprtrdma/verbs.c struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer, work 724 net/sunrpc/xprtsock.c static void xs_stream_data_receive_workfn(struct work_struct *work) work 727 net/sunrpc/xprtsock.c container_of(work, struct sock_xprt, recv_worker); work 1445 net/sunrpc/xprtsock.c static void xs_udp_data_receive_workfn(struct work_struct *work) work 1448 net/sunrpc/xprtsock.c container_of(work, struct sock_xprt, recv_worker); work 1892 net/sunrpc/xprtsock.c static void xs_dummy_setup_socket(struct work_struct *work) work 2161 net/sunrpc/xprtsock.c static void xs_udp_setup_socket(struct work_struct *work) work 2164 net/sunrpc/xprtsock.c container_of(work, struct sock_xprt, connect_worker.work); work 2354 net/sunrpc/xprtsock.c static void xs_tcp_setup_socket(struct work_struct *work) work 2357 net/sunrpc/xprtsock.c container_of(work, struct sock_xprt, connect_worker.work); work 2502 net/sunrpc/xprtsock.c static void xs_error_handle(struct work_struct *work) work 2504 net/sunrpc/xprtsock.c struct sock_xprt *transport = container_of(work, work 72 net/switchdev/switchdev.c static void switchdev_deferred_process_work(struct work_struct *work) work 109 net/tipc/net.c struct work_struct work; work 145 net/tipc/net.c static void tipc_net_finalize_work(struct work_struct *work) work 149 net/tipc/net.c fwork = container_of(work, struct tipc_net_work, work); work 160 net/tipc/net.c INIT_WORK(&fwork->work, tipc_net_finalize_work); work 163 net/tipc/net.c schedule_work(&fwork->work); work 114 net/tipc/topsrv.c static void tipc_conn_recv_work(struct work_struct *work); work 115 net/tipc/topsrv.c static void tipc_conn_send_work(struct work_struct *work); work 299 net/tipc/topsrv.c static void tipc_conn_send_work(struct work_struct *work) work 301 net/tipc/topsrv.c struct tipc_conn *con = container_of(work, struct tipc_conn, swork); work 413 net/tipc/topsrv.c static void tipc_conn_recv_work(struct work_struct *work) work 415 net/tipc/topsrv.c struct tipc_conn *con = container_of(work, struct tipc_conn, rwork); work 448 net/tipc/topsrv.c static void tipc_topsrv_accept(struct work_struct *work) work 450 net/tipc/topsrv.c struct tipc_topsrv *srv = container_of(work, struct tipc_topsrv, awork); work 95 net/tipc/udp_media.c struct work_struct work; work 794 net/tipc/udp_media.c static void cleanup_bearer(struct work_struct *work) work 796 net/tipc/udp_media.c struct udp_bearer *ub = container_of(work, struct udp_bearer, work); work 825 net/tipc/udp_media.c INIT_WORK(&ub->work, cleanup_bearer); work 826 net/tipc/udp_media.c schedule_work(&ub->work); work 46 net/tls/tls_device.c static void tls_device_gc_task(struct work_struct *work); work 67 net/tls/tls_device.c static void tls_device_gc_task(struct work_struct *work) work 481 net/tls/tls_sw.c schedule_delayed_work(&ctx->tx_work.work, 1); work 1123 net/tls/tls_sw.c cancel_delayed_work(&ctx->tx_work.work); work 1242 net/tls/tls_sw.c cancel_delayed_work(&ctx->tx_work.work); work 2116 net/tls/tls_sw.c cancel_delayed_work_sync(&ctx->tx_work.work); work 2211 net/tls/tls_sw.c static void tx_work_handler(struct work_struct *work) work 2213 net/tls/tls_sw.c struct delayed_work *delayed_work = to_delayed_work(work); work 2215 net/tls/tls_sw.c struct tx_work, work); work 2243 net/tls/tls_sw.c schedule_delayed_work(&tx_ctx->tx_work.work, 0); work 2315 net/tls/tls_sw.c INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler); work 424 net/vmw_vsock/af_vsock.c static void vsock_pending_work(struct work_struct *work) work 431 net/vmw_vsock/af_vsock.c vsk = container_of(work, struct vsock_sock, pending_work.work); work 572 net/vmw_vsock/af_vsock.c static void vsock_connect_timeout(struct work_struct *work); work 1098 net/vmw_vsock/af_vsock.c static void vsock_connect_timeout(struct work_struct *work) work 1104 net/vmw_vsock/af_vsock.c vsk = container_of(work, struct vsock_sock, connect_work.work); work 478 net/vmw_vsock/hyperv_transport.c static void hvs_close_timeout(struct work_struct *work) work 481 net/vmw_vsock/hyperv_transport.c container_of(work, struct vsock_sock, close_work.work); work 89 net/vmw_vsock/virtio_transport.c static void virtio_transport_loopback_work(struct work_struct *work) work 92 net/vmw_vsock/virtio_transport.c container_of(work, struct virtio_vsock, loopback_work); work 131 net/vmw_vsock/virtio_transport.c virtio_transport_send_pkt_work(struct work_struct *work) work 134 net/vmw_vsock/virtio_transport.c container_of(work, struct virtio_vsock, send_pkt_work); work 330 net/vmw_vsock/virtio_transport.c static void virtio_transport_tx_work(struct work_struct *work) work 333 net/vmw_vsock/virtio_transport.c container_of(work, struct virtio_vsock, tx_work); work 373 net/vmw_vsock/virtio_transport.c static void virtio_transport_rx_work(struct work_struct *work) work 376 net/vmw_vsock/virtio_transport.c container_of(work, struct virtio_vsock, rx_work); work 485 net/vmw_vsock/virtio_transport.c static void virtio_transport_event_work(struct work_struct *work) work 488 net/vmw_vsock/virtio_transport.c container_of(work, struct virtio_vsock, event_work); work 769 net/vmw_vsock/virtio_transport_common.c static void virtio_transport_close_timeout(struct work_struct *work) work 772 net/vmw_vsock/virtio_transport_common.c container_of(work, struct vsock_sock, close_work.work); work 37 net/vmw_vsock/vmci_transport.c static void vmci_transport_recv_pkt_work(struct work_struct *work); work 38 net/vmw_vsock/vmci_transport.c static void vmci_transport_cleanup(struct work_struct *work); work 62 net/vmw_vsock/vmci_transport.c struct work_struct work; work 792 net/vmw_vsock/vmci_transport.c INIT_WORK(&recv_pkt_info->work, vmci_transport_recv_pkt_work); work 794 net/vmw_vsock/vmci_transport.c schedule_work(&recv_pkt_info->work); work 897 net/vmw_vsock/vmci_transport.c static void vmci_transport_recv_pkt_work(struct work_struct *work) work 904 net/vmw_vsock/vmci_transport.c container_of(work, struct vmci_transport_recv_pkt_info, work); work 1634 net/vmw_vsock/vmci_transport.c static void vmci_transport_cleanup(struct work_struct *work) work 303 net/wireless/core.c static void cfg80211_rfkill_block_work(struct work_struct *work) work 307 net/wireless/core.c rdev = container_of(work, struct cfg80211_registered_device, work 312 net/wireless/core.c static void cfg80211_event_work(struct work_struct *work) work 316 net/wireless/core.c rdev = container_of(work, struct cfg80211_registered_device, work 336 net/wireless/core.c static void cfg80211_destroy_iface_wk(struct work_struct *work) work 340 net/wireless/core.c rdev = container_of(work, struct cfg80211_registered_device, work 348 net/wireless/core.c static void cfg80211_sched_scan_stop_wk(struct work_struct *work) work 353 net/wireless/core.c rdev = container_of(work, struct cfg80211_registered_device, work 364 net/wireless/core.c static void cfg80211_propagate_radar_detect_wk(struct work_struct *work) work 368 net/wireless/core.c rdev = container_of(work, struct cfg80211_registered_device, work 380 net/wireless/core.c static void cfg80211_propagate_cac_done_wk(struct work_struct *work) work 384 net/wireless/core.c rdev = container_of(work, struct cfg80211_registered_device, work 420 net/wireless/core.h void cfg80211_autodisconnect_wk(struct work_struct *work); work 423 net/wireless/core.h void cfg80211_conn_work(struct work_struct *work); work 445 net/wireless/core.h void cfg80211_sched_scan_results_wk(struct work_struct *work); work 480 net/wireless/core.h void cfg80211_dfs_channels_update_work(struct work_struct *work); work 555 net/wireless/core.h void cfg80211_pmsr_free_wk(struct work_struct *work); work 763 net/wireless/mlme.c void cfg80211_dfs_channels_update_work(struct work_struct *work) work 765 net/wireless/mlme.c struct delayed_work *delayed_work = to_delayed_work(work); work 558 net/wireless/pmsr.c void cfg80211_pmsr_free_wk(struct work_struct *work) work 560 net/wireless/pmsr.c struct wireless_dev *wdev = container_of(work, struct wireless_dev, work 215 net/wireless/reg.c static void reg_check_chans_work(struct work_struct *work); work 218 net/wireless/reg.c static void reg_todo(struct work_struct *work); work 463 net/wireless/reg.c static void reg_regdb_apply(struct work_struct *work) work 512 net/wireless/reg.c static void crda_timeout_work(struct work_struct *work); work 515 net/wireless/reg.c static void crda_timeout_work(struct work_struct *work) work 2191 net/wireless/reg.c static void reg_check_chans_work(struct work_struct *work) work 2892 net/wireless/reg.c static void reg_todo(struct work_struct *work) work 601 net/wireless/scan.c void cfg80211_sched_scan_results_wk(struct work_struct *work) work 606 net/wireless/scan.c rdev = container_of(work, struct cfg80211_registered_device, work 228 net/wireless/sme.c void cfg80211_conn_work(struct work_struct *work) work 231 net/wireless/sme.c container_of(work, struct cfg80211_registered_device, conn_work); work 662 net/wireless/sme.c static void disconnect_work(struct work_struct *work) work 1299 net/wireless/sme.c void cfg80211_autodisconnect_wk(struct work_struct *work) work 1302 net/wireless/sme.c container_of(work, struct wireless_dev, disconnect_wk); work 411 net/wireless/wext-core.c static void wireless_nlevent_process(struct work_struct *work) work 259 net/xdp/xdp_umem.c static void xdp_umem_release_deferred(struct work_struct *work) work 261 net/xdp/xdp_umem.c struct xdp_umem *umem = container_of(work, struct xdp_umem, work); work 277 net/xdp/xdp_umem.c INIT_WORK(&umem->work, xdp_umem_release_deferred); work 278 net/xdp/xdp_umem.c schedule_work(&umem->work); work 669 net/xfrm/xfrm_policy.c static void xfrm_hash_resize(struct work_struct *work) work 671 net/xfrm/xfrm_policy.c struct net *net = container_of(work, struct net, xfrm.policy_hash_work); work 1207 net/xfrm/xfrm_policy.c static void xfrm_hash_rebuild(struct work_struct *work) work 1209 net/xfrm/xfrm_policy.c struct net *net = container_of(work, struct net, work 1210 net/xfrm/xfrm_policy.c xfrm.policy_hthresh.work); work 1352 net/xfrm/xfrm_policy.c schedule_work(&net->xfrm.policy_hthresh.work); work 4053 net/xfrm/xfrm_policy.c INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild); work 37 net/xfrm/xfrm_state.c static void xfrm_state_gc_task(struct work_struct *work); work 118 net/xfrm/xfrm_state.c static void xfrm_hash_resize(struct work_struct *work) work 120 net/xfrm/xfrm_state.c struct net *net = container_of(work, struct net, xfrm.state_hash_work); work 505 net/xfrm/xfrm_state.c static void xfrm_state_gc_task(struct work_struct *work) work 34 samples/livepatch/livepatch-callbacks-busymod.c static void busymod_work_func(struct work_struct *work); work 35 samples/livepatch/livepatch-callbacks-busymod.c static DECLARE_DELAYED_WORK(work, busymod_work_func); work 37 samples/livepatch/livepatch-callbacks-busymod.c static void busymod_work_func(struct work_struct *work) work 47 samples/livepatch/livepatch-callbacks-busymod.c schedule_delayed_work(&work, work 54 samples/livepatch/livepatch-callbacks-busymod.c cancel_delayed_work_sync(&work); work 132 samples/livepatch/livepatch-callbacks-demo.c static void patched_work_func(struct work_struct *work) work 140 samples/livepatch/livepatch-shadow-mod.c static void alloc_work_func(struct work_struct *work); work 143 samples/livepatch/livepatch-shadow-mod.c static void alloc_work_func(struct work_struct *work) work 165 samples/livepatch/livepatch-shadow-mod.c static void cleanup_work_func(struct work_struct *work); work 168 samples/livepatch/livepatch-shadow-mod.c static void cleanup_work_func(struct work_struct *work) work 18 samples/trace_printk/trace-printk.c static void trace_printk_irq_work(struct irq_work *work) work 60 security/apparmor/include/policy_unpack.h struct work_struct work; work 151 security/apparmor/policy_unpack.c static void do_loaddata_free(struct work_struct *work) work 153 security/apparmor/policy_unpack.c struct aa_loaddata *d = container_of(work, struct aa_loaddata, work); work 174 security/apparmor/policy_unpack.c INIT_WORK(&d->work, do_loaddata_free); work 175 security/apparmor/policy_unpack.c schedule_work(&d->work); work 21 security/keys/gc.c static void key_garbage_collector(struct work_struct *work); work 168 security/keys/gc.c static void key_garbage_collector(struct work_struct *work) work 314 security/selinux/ss/avtab.c u32 work = nrules; work 320 security/selinux/ss/avtab.c while (work) { work 321 security/selinux/ss/avtab.c work = work >> 1; work 41 security/yama/yama_lsm.c static void yama_relation_cleanup(struct work_struct *work); work 45 security/yama/yama_lsm.c struct callback_head work; work 51 security/yama/yama_lsm.c static void __report_access(struct callback_head *work) work 54 security/yama/yama_lsm.c container_of(work, struct access_report_info, work); work 96 security/yama/yama_lsm.c init_task_work(&info->work, __report_access); work 102 security/yama/yama_lsm.c if (task_work_add(current, &info->work, true) == 0) work 115 security/yama/yama_lsm.c static void yama_relation_cleanup(struct work_struct *work) work 63 sound/aoa/aoa-gpio.h struct delayed_work work; work 210 sound/aoa/core/gpio-feature.c static void ftr_handle_notify(struct work_struct *work) work 213 sound/aoa/core/gpio-feature.c container_of(work, struct gpio_notification, work.work); work 275 sound/aoa/core/gpio-feature.c INIT_DELAYED_WORK(&rt->headphone_notify.work, ftr_handle_notify); work 276 sound/aoa/core/gpio-feature.c INIT_DELAYED_WORK(&rt->line_in_notify.work, ftr_handle_notify); work 277 sound/aoa/core/gpio-feature.c INIT_DELAYED_WORK(&rt->line_out_notify.work, ftr_handle_notify); work 293 sound/aoa/core/gpio-feature.c cancel_delayed_work_sync(&rt->headphone_notify.work); work 294 sound/aoa/core/gpio-feature.c cancel_delayed_work_sync(&rt->line_in_notify.work); work 295 sound/aoa/core/gpio-feature.c cancel_delayed_work_sync(&rt->line_out_notify.work); work 305 sound/aoa/core/gpio-feature.c schedule_delayed_work(¬if->work, 0); work 72 sound/aoa/core/gpio-pmf.c static void pmf_handle_notify(struct work_struct *work) work 75 sound/aoa/core/gpio-pmf.c container_of(work, struct gpio_notification, work.work); work 87 sound/aoa/core/gpio-pmf.c INIT_DELAYED_WORK(&rt->headphone_notify.work, pmf_handle_notify); work 88 sound/aoa/core/gpio-pmf.c INIT_DELAYED_WORK(&rt->line_in_notify.work, pmf_handle_notify); work 89 sound/aoa/core/gpio-pmf.c INIT_DELAYED_WORK(&rt->line_out_notify.work, pmf_handle_notify); work 109 sound/aoa/core/gpio-pmf.c cancel_delayed_work_sync(&rt->headphone_notify.work); work 110 sound/aoa/core/gpio-pmf.c cancel_delayed_work_sync(&rt->line_in_notify.work); work 111 sound/aoa/core/gpio-pmf.c cancel_delayed_work_sync(&rt->line_out_notify.work); work 126 sound/aoa/core/gpio-pmf.c schedule_delayed_work(¬if->work, 0); work 57 sound/core/compress_offload.c static void error_delayed_work(struct work_struct *work); work 732 sound/core/compress_offload.c static void error_delayed_work(struct work_struct *work) work 736 sound/core/compress_offload.c stream = container_of(work, struct snd_compr_stream, error_work.work); work 91 sound/core/rawmidi.c static void snd_rawmidi_input_event_work(struct work_struct *work) work 94 sound/core/rawmidi.c container_of(work, struct snd_rawmidi_runtime, event_work); work 52 sound/core/seq/oss/seq_oss_init.c static void async_call_lookup_ports(struct work_struct *work) work 126 sound/core/seq/seq_virmidi.c static void snd_vmidi_output_work(struct work_struct *work) work 133 sound/core/seq/seq_virmidi.c vmidi = container_of(work, struct snd_virmidi, output_work); work 98 sound/core/seq_device.c static void autoload_drivers(struct work_struct *work) work 166 sound/firewire/bebob/bebob.c do_registration(struct work_struct *work) work 169 sound/firewire/bebob/bebob.c container_of(work, struct snd_bebob, dwork.work); work 135 sound/firewire/dice/dice.c static void do_registration(struct work_struct *work) work 137 sound/firewire/dice/dice.c struct snd_dice *dice = container_of(work, struct snd_dice, dwork.work); work 51 sound/firewire/digi00x/digi00x.c static void do_registration(struct work_struct *work) work 54 sound/firewire/digi00x/digi00x.c container_of(work, struct snd_dg00x, dwork.work); work 114 sound/firewire/fireface/ff-transaction.c static void transmit_midi0_msg(struct work_struct *work) work 116 sound/firewire/fireface/ff-transaction.c struct snd_ff *ff = container_of(work, struct snd_ff, rx_midi_work[0]); work 121 sound/firewire/fireface/ff-transaction.c static void transmit_midi1_msg(struct work_struct *work) work 123 sound/firewire/fireface/ff-transaction.c struct snd_ff *ff = container_of(work, struct snd_ff, rx_midi_work[1]); work 37 sound/firewire/fireface/ff.c static void do_registration(struct work_struct *work) work 39 sound/firewire/fireface/ff.c struct snd_ff *ff = container_of(work, struct snd_ff, dwork.work); work 136 sound/firewire/fireface/ff.c cancel_work_sync(&ff->dwork.work); work 200 sound/firewire/fireworks/fireworks.c do_registration(struct work_struct *work) work 202 sound/firewire/fireworks/fireworks.c struct snd_efw *efw = container_of(work, struct snd_efw, dwork.work); work 62 sound/firewire/motu/motu.c static void do_registration(struct work_struct *work) work 64 sound/firewire/motu/motu.c struct snd_motu *motu = container_of(work, struct snd_motu, dwork.work); work 28 sound/firewire/oxfw/oxfw-scs1x.c struct work_struct work; work 138 sound/firewire/oxfw/oxfw-scs1x.c schedule_work(&scs->work); work 174 sound/firewire/oxfw/oxfw-scs1x.c static void scs_output_work(struct work_struct *work) work 176 sound/firewire/oxfw/oxfw-scs1x.c struct fw_scs1x *scs = container_of(work, struct fw_scs1x, work); work 322 sound/firewire/oxfw/oxfw-scs1x.c schedule_work(&scs->work); work 412 sound/firewire/oxfw/oxfw-scs1x.c INIT_WORK(&scs->work, scs_output_work); work 182 sound/firewire/oxfw/oxfw.c static void do_registration(struct work_struct *work) work 184 sound/firewire/oxfw/oxfw.c struct snd_oxfw *oxfw = container_of(work, struct snd_oxfw, dwork.work); work 168 sound/firewire/tascam/tascam-transaction.c schedule_work(&port->work); work 171 sound/firewire/tascam/tascam-transaction.c static void midi_port_work(struct work_struct *work) work 174 sound/firewire/tascam/tascam-transaction.c container_of(work, struct snd_fw_async_midi_port, work); work 188 sound/firewire/tascam/tascam-transaction.c schedule_work(&port->work); work 202 sound/firewire/tascam/tascam-transaction.c schedule_work(&port->work); work 322 sound/firewire/tascam/tascam-transaction.c INIT_WORK(&tscm->out_ports[i].work, midi_port_work); work 98 sound/firewire/tascam/tascam.c static void do_registration(struct work_struct *work) work 100 sound/firewire/tascam/tascam.c struct snd_tscm *tscm = container_of(work, struct snd_tscm, dwork.work); work 49 sound/firewire/tascam/tascam.h struct work_struct work; work 187 sound/firewire/tascam/tascam.h schedule_work(&port->work); work 195 sound/firewire/tascam/tascam.h cancel_work_sync(&port->work); work 15 sound/hda/hdac_bus.c static void snd_hdac_bus_process_unsol_events(struct work_struct *work); work 154 sound/hda/hdac_bus.c static void snd_hdac_bus_process_unsol_events(struct work_struct *work) work 156 sound/hda/hdac_bus.c struct hdac_bus *bus = container_of(work, struct hdac_bus, unsol_work); work 25 sound/i2c/other/ak4113.c static void ak4113_stats(struct work_struct *work); work 45 sound/i2c/other/ak4113.c cancel_delayed_work_sync(&chip->work); work 75 sound/i2c/other/ak4113.c INIT_DELAYED_WORK(&chip->work, ak4113_stats); work 129 sound/i2c/other/ak4113.c cancel_delayed_work_sync(&chip->work); work 135 sound/i2c/other/ak4113.c schedule_delayed_work(&chip->work, HZ / 10); work 507 sound/i2c/other/ak4113.c schedule_delayed_work(&ak4113->work, HZ / 10); work 615 sound/i2c/other/ak4113.c static void ak4113_stats(struct work_struct *work) work 617 sound/i2c/other/ak4113.c struct ak4113 *chip = container_of(work, struct ak4113, work.work); work 623 sound/i2c/other/ak4113.c schedule_delayed_work(&chip->work, HZ / 10); work 630 sound/i2c/other/ak4113.c cancel_delayed_work_sync(&chip->work); work 24 sound/i2c/other/ak4114.c static void ak4114_stats(struct work_struct *work); work 55 sound/i2c/other/ak4114.c cancel_delayed_work_sync(&chip->work); work 86 sound/i2c/other/ak4114.c INIT_DELAYED_WORK(&chip->work, ak4114_stats); work 144 sound/i2c/other/ak4114.c cancel_delayed_work_sync(&chip->work); work 150 sound/i2c/other/ak4114.c schedule_delayed_work(&chip->work, HZ / 10); work 492 sound/i2c/other/ak4114.c schedule_delayed_work(&ak4114->work, HZ / 10); work 601 sound/i2c/other/ak4114.c static void ak4114_stats(struct work_struct *work) work 603 sound/i2c/other/ak4114.c struct ak4114 *chip = container_of(work, struct ak4114, work.work); work 608 sound/i2c/other/ak4114.c schedule_delayed_work(&chip->work, HZ / 10); work 615 sound/i2c/other/ak4114.c cancel_delayed_work_sync(&chip->work); work 1967 sound/pci/ac97/ac97_codec.c static void do_update_power(struct work_struct *work) work 1970 sound/pci/ac97/ac97_codec.c container_of(work, struct snd_ac97, power_work.work)); work 730 sound/pci/emu10k1/emu10k1_main.c static void emu1010_firmware_work(struct work_struct *work) work 736 sound/pci/emu10k1/emu10k1_main.c emu = container_of(work, struct snd_emu10k1, work 737 sound/pci/emu10k1/emu10k1_main.c emu1010.firmware_work.work); work 1879 sound/pci/es1968.c static void es1968_update_hw_volume(struct work_struct *work) work 1881 sound/pci/es1968.c struct es1968 *chip = container_of(work, struct es1968, hwvol_work); work 44 sound/pci/hda/hda_beep.c static void snd_hda_generate_beep(struct work_struct *work) work 47 sound/pci/hda/hda_beep.c container_of(work, struct hda_beep, beep_work); work 639 sound/pci/hda/hda_codec.c static void hda_jackpoll_work(struct work_struct *work) work 642 sound/pci/hda/hda_codec.c container_of(work, struct hda_codec, jackpoll_work.work); work 2915 sound/pci/hda/hda_codec.c hda_jackpoll_work(&codec->jackpoll_work.work); work 3072 sound/pci/hda/hda_codec.c hda_jackpoll_work(&codec->jackpoll_work.work); work 3998 sound/pci/hda/hda_codec.c if (current_work() != &codec->jackpoll_work.work) work 723 sound/pci/hda/hda_intel.c static void azx_irq_pending_work(struct work_struct *work) work 725 sound/pci/hda/hda_intel.c struct hda_intel *hda = container_of(work, struct hda_intel, irq_pending_work); work 1672 sound/pci/hda/hda_intel.c static void azx_probe_work(struct work_struct *work) work 1674 sound/pci/hda/hda_intel.c struct hda_intel *hda = container_of(work, struct hda_intel, probe_work); work 366 sound/pci/hda/hda_tegra.c static void hda_tegra_probe_work(struct work_struct *work); work 464 sound/pci/hda/hda_tegra.c static void hda_tegra_probe_work(struct work_struct *work) work 466 sound/pci/hda/hda_tegra.c struct hda_tegra *hda = container_of(work, struct hda_tegra, probe_work); work 4454 sound/pci/hda/patch_ca0132.c static void ca0132_unsol_hp_delayed(struct work_struct *work) work 4457 sound/pci/hda/patch_ca0132.c to_delayed_work(work), struct ca0132_spec, unsol_hp_work); work 90 sound/pci/hda/patch_hdmi.c struct delayed_work work; work 1562 sound/pci/hda/patch_hdmi.c schedule_delayed_work(&per_pin->work, msecs_to_jiffies(300)); work 1672 sound/pci/hda/patch_hdmi.c static void hdmi_repoll_eld(struct work_struct *work) work 1675 sound/pci/hda/patch_hdmi.c container_of(to_delayed_work(work), struct hdmi_spec_per_pin, work); work 2268 sound/pci/hda/patch_hdmi.c INIT_DELAYED_WORK(&per_pin->work, hdmi_repoll_eld); work 2338 sound/pci/hda/patch_hdmi.c cancel_delayed_work_sync(&per_pin->work); work 203 sound/pci/ice1712/psc724.c static void psc724_update_hp_jack_state(struct work_struct *work) work 205 sound/pci/ice1712/psc724.c struct psc724_spec *spec = container_of(work, struct psc724_spec, work 206 sound/pci/ice1712/psc724.c hp_work.work); work 1528 sound/pci/maestro3.c static void snd_m3_update_hw_volume(struct work_struct *work) work 1530 sound/pci/maestro3.c struct snd_m3 *chip = container_of(work, struct snd_m3, hwvol_work); work 118 sound/pci/oxygen/oxygen_lib.c static void oxygen_spdif_input_bits_changed(struct work_struct *work) work 120 sound/pci/oxygen/oxygen_lib.c struct oxygen *chip = container_of(work, struct oxygen, work 179 sound/pci/oxygen/oxygen_lib.c static void oxygen_gpio_changed(struct work_struct *work) work 181 sound/pci/oxygen/oxygen_lib.c struct oxygen *chip = container_of(work, struct oxygen, gpio_work); work 957 sound/ppc/tumbler.c static void device_change_handler(struct work_struct *work) work 255 sound/sh/aica.c static void run_spu_dma(struct work_struct *work) work 261 sound/sh/aica.c container_of(work, struct snd_card_aica, spu_dma_work); work 471 sound/soc/codecs/ak4613.c static void ak4613_dummy_write(struct work_struct *work) work 473 sound/soc/codecs/ak4613.c struct ak4613_priv *priv = container_of(work, work 897 sound/soc/codecs/cs42l52.c static void cs42l52_beep_work(struct work_struct *work) work 900 sound/soc/codecs/cs42l52.c container_of(work, struct cs42l52_private, beep_work); work 961 sound/soc/codecs/cs42l56.c static void cs42l56_beep_work(struct work_struct *work) work 964 sound/soc/codecs/cs42l56.c container_of(work, struct cs42l56_private, beep_work); work 2059 sound/soc/codecs/cs43130.c cs43130 = container_of(wk, struct cs43130_private, work); work 2266 sound/soc/codecs/cs43130.c !work_busy(&cs43130->work)) { work 2268 sound/soc/codecs/cs43130.c queue_work(cs43130->wq, &cs43130->work); work 2324 sound/soc/codecs/cs43130.c INIT_WORK(&cs43130->work, cs43130_imp_meas); work 2597 sound/soc/codecs/cs43130.c cancel_work_sync(&cs43130->work); work 533 sound/soc/codecs/cs43130.h struct work_struct work; work 52 sound/soc/codecs/da7219-aad.c static void da7219_aad_btn_det_work(struct work_struct *work) work 55 sound/soc/codecs/da7219-aad.c container_of(work, struct da7219_aad_priv, btn_det_work); work 108 sound/soc/codecs/da7219-aad.c static void da7219_aad_hptest_work(struct work_struct *work) work 111 sound/soc/codecs/da7219-aad.c container_of(work, struct da7219_aad_priv, hptest_work); work 205 sound/soc/codecs/hdac_hdmi.c static void hdac_hdmi_jack_dapm_work(struct work_struct *work) work 209 sound/soc/codecs/hdac_hdmi.c port = container_of(work, struct hdac_hdmi_port, dapm_work); work 2060 sound/soc/codecs/max98090.c static void max98090_pll_det_enable_work(struct work_struct *work) work 2063 sound/soc/codecs/max98090.c container_of(work, struct max98090_priv, work 2064 sound/soc/codecs/max98090.c pll_det_enable_work.work); work 2093 sound/soc/codecs/max98090.c static void max98090_pll_det_disable_work(struct work_struct *work) work 2096 sound/soc/codecs/max98090.c container_of(work, struct max98090_priv, pll_det_disable_work); work 2126 sound/soc/codecs/max98090.c static void max98090_jack_work(struct work_struct *work) work 2128 sound/soc/codecs/max98090.c struct max98090_priv *max98090 = container_of(work, work 2130 sound/soc/codecs/max98090.c jack_work.work); work 892 sound/soc/codecs/nau8824.c static void nau8824_jdet_work(struct work_struct *work) work 895 sound/soc/codecs/nau8824.c work, struct nau8824, jdet_work); work 801 sound/soc/codecs/nau8825.c static void nau8825_xtalk_work(struct work_struct *work) work 804 sound/soc/codecs/nau8825.c work, struct nau8825, xtalk_work); work 31 sound/soc/codecs/pcm1789.c struct work_struct work; work 131 sound/soc/codecs/pcm1789.c static void pcm1789_work_queue(struct work_struct *work) work 133 sound/soc/codecs/pcm1789.c struct pcm1789_private *priv = container_of(work, work 135 sound/soc/codecs/pcm1789.c work); work 154 sound/soc/codecs/pcm1789.c schedule_work(&priv->work); work 254 sound/soc/codecs/pcm1789.c INIT_WORK(&pcm1789->work, pcm1789_work_queue); work 265 sound/soc/codecs/pcm1789.c flush_work(&priv->work); work 2194 sound/soc/codecs/rt1011.c static void rt1011_calibration_work(struct work_struct *work) work 2197 sound/soc/codecs/rt1011.c container_of(work, struct rt1011_priv, cali_work); work 377 sound/soc/codecs/rt274.c static void rt274_jack_detect_work(struct work_struct *work) work 380 sound/soc/codecs/rt274.c container_of(work, struct rt274_priv, jack_detect_work.work); work 286 sound/soc/codecs/rt286.c static void rt286_jack_detect_work(struct work_struct *work) work 289 sound/soc/codecs/rt286.c container_of(work, struct rt286_priv, jack_detect_work.work); work 303 sound/soc/codecs/rt298.c static void rt298_jack_detect_work(struct work_struct *work) work 306 sound/soc/codecs/rt298.c container_of(work, struct rt298_priv, jack_detect_work.work); work 74 sound/soc/codecs/rt5514-spi.c static void rt5514_spi_copy_work(struct work_struct *work) work 77 sound/soc/codecs/rt5514-spi.c container_of(work, struct rt5514_dsp, copy_work.work); work 2191 sound/soc/codecs/rt5640.c static void rt5640_button_press_work(struct work_struct *work) work 2194 sound/soc/codecs/rt5640.c container_of(work, struct rt5640_priv, bp_work.work); work 2289 sound/soc/codecs/rt5640.c static void rt5640_jack_work(struct work_struct *work) work 2292 sound/soc/codecs/rt5640.c container_of(work, struct rt5640_priv, jack_work); work 3251 sound/soc/codecs/rt5645.c static void rt5645_jack_detect_work(struct work_struct *work) work 3254 sound/soc/codecs/rt5645.c container_of(work, struct rt5645_priv, jack_detect_work.work); work 3348 sound/soc/codecs/rt5645.c static void rt5645_rcclock_work(struct work_struct *work) work 3351 sound/soc/codecs/rt5645.c container_of(work, struct rt5645_priv, rcclock_work.work); work 1673 sound/soc/codecs/rt5651.c static void rt5651_button_press_work(struct work_struct *work) work 1676 sound/soc/codecs/rt5651.c container_of(work, struct rt5651_priv, bp_work.work); work 1781 sound/soc/codecs/rt5651.c static void rt5651_jack_detect_work(struct work_struct *work) work 1784 sound/soc/codecs/rt5651.c container_of(work, struct rt5651_priv, jack_detect_work); work 1390 sound/soc/codecs/rt5659.c static void rt5659_jack_detect_work(struct work_struct *work) work 1393 sound/soc/codecs/rt5659.c container_of(work, struct rt5659_priv, jack_detect_work.work); work 1461 sound/soc/codecs/rt5659.c static void rt5659_jack_detect_intel_hd_header(struct work_struct *work) work 1464 sound/soc/codecs/rt5659.c container_of(work, struct rt5659_priv, jack_detect_work.work); work 1899 sound/soc/codecs/rt5663.c static void rt5663_jack_detect_work(struct work_struct *work) work 1902 sound/soc/codecs/rt5663.c container_of(work, struct rt5663_priv, jack_detect_work.work); work 2000 sound/soc/codecs/rt5663.c static void rt5663_jd_unplug_work(struct work_struct *work) work 2003 sound/soc/codecs/rt5663.c container_of(work, struct rt5663_priv, jd_unplug_work.work); work 1243 sound/soc/codecs/rt5665.c static void rt5665_jd_check_handler(struct work_struct *work) work 1245 sound/soc/codecs/rt5665.c struct rt5665_priv *rt5665 = container_of(work, struct rt5665_priv, work 1246 sound/soc/codecs/rt5665.c jd_check_work.work); work 1290 sound/soc/codecs/rt5665.c static void rt5665_jack_detect_handler(struct work_struct *work) work 1293 sound/soc/codecs/rt5665.c container_of(work, struct rt5665_priv, jack_detect_work.work); work 4747 sound/soc/codecs/rt5665.c static void rt5665_calibrate_handler(struct work_struct *work) work 4749 sound/soc/codecs/rt5665.c struct rt5665_priv *rt5665 = container_of(work, struct rt5665_priv, work 4750 sound/soc/codecs/rt5665.c calibrate_work.work); work 953 sound/soc/codecs/rt5668.c static void rt5668_jd_check_handler(struct work_struct *work) work 955 sound/soc/codecs/rt5668.c struct rt5668_priv *rt5668 = container_of(work, struct rt5668_priv, work 956 sound/soc/codecs/rt5668.c jd_check_work.work); work 1019 sound/soc/codecs/rt5668.c static void rt5668_jack_detect_handler(struct work_struct *work) work 1022 sound/soc/codecs/rt5668.c container_of(work, struct rt5668_priv, jack_detect_work.work); work 975 sound/soc/codecs/rt5682.c static void rt5682_jd_check_handler(struct work_struct *work) work 977 sound/soc/codecs/rt5682.c struct rt5682_priv *rt5682 = container_of(work, struct rt5682_priv, work 978 sound/soc/codecs/rt5682.c jd_check_work.work); work 1049 sound/soc/codecs/rt5682.c static void rt5682_jack_detect_handler(struct work_struct *work) work 1052 sound/soc/codecs/rt5682.c container_of(work, struct rt5682_priv, jack_detect_work.work); work 392 sound/soc/codecs/sta32x.c static void sta32x_watchdog(struct work_struct *work) work 394 sound/soc/codecs/sta32x.c struct sta32x_priv *sta32x = container_of(work, struct sta32x_priv, work 395 sound/soc/codecs/sta32x.c watchdog_work.work); work 217 sound/soc/codecs/tas5720.c static void tas5720_fault_check_work(struct work_struct *work) work 219 sound/soc/codecs/tas5720.c struct tas5720_data *tas5720 = container_of(work, struct tas5720_data, work 220 sound/soc/codecs/tas5720.c fault_check_work.work); work 402 sound/soc/codecs/tas6424.c static void tas6424_fault_check_work(struct work_struct *work) work 404 sound/soc/codecs/tas6424.c struct tas6424_data *tas6424 = container_of(work, struct tas6424_data, work 405 sound/soc/codecs/tas6424.c fault_check_work.work); work 79 sound/soc/codecs/tlv320dac33.c struct work_struct work; work 716 sound/soc/codecs/tlv320dac33.c static void dac33_work(struct work_struct *work) work 722 sound/soc/codecs/tlv320dac33.c dac33 = container_of(work, struct tlv320dac33_priv, work); work 762 sound/soc/codecs/tlv320dac33.c schedule_work(&dac33->work); work 1118 sound/soc/codecs/tlv320dac33.c schedule_work(&dac33->work); work 1126 sound/soc/codecs/tlv320dac33.c schedule_work(&dac33->work); work 1399 sound/soc/codecs/tlv320dac33.c INIT_WORK(&dac33->work, dac33_work); work 1418 sound/soc/codecs/tlv320dac33.c flush_work(&dac33->work); work 47 sound/soc/codecs/twl6040.c struct delayed_work work; work 301 sound/soc/codecs/twl6040.c static void twl6040_accessory_work(struct work_struct *work) work 303 sound/soc/codecs/twl6040.c struct twl6040_data *priv = container_of(work, work 304 sound/soc/codecs/twl6040.c struct twl6040_data, hs_jack.work.work); work 318 sound/soc/codecs/twl6040.c &priv->hs_jack.work, msecs_to_jiffies(200)); work 1114 sound/soc/codecs/twl6040.c INIT_DELAYED_WORK(&priv->hs_jack.work, twl6040_accessory_work); work 36 sound/soc/codecs/uda1380.c struct work_struct work; work 176 sound/soc/codecs/uda1380.c static void uda1380_flush_work(struct work_struct *work) work 178 sound/soc/codecs/uda1380.c struct uda1380_priv *uda1380 = container_of(work, struct uda1380_priv, work); work 518 sound/soc/codecs/uda1380.c schedule_work(&uda1380->work); work 524 sound/soc/codecs/uda1380.c schedule_work(&uda1380->work); work 708 sound/soc/codecs/uda1380.c INIT_WORK(&uda1380->work, uda1380_flush_work); work 54 sound/soc/codecs/wm8350.c struct delayed_work work; work 207 sound/soc/codecs/wm8350.c static void wm8350_pga_work(struct work_struct *work) work 210 sound/soc/codecs/wm8350.c container_of(work, struct wm8350_data, pga_work.work); work 1255 sound/soc/codecs/wm8350.c static void wm8350_hpl_work(struct work_struct *work) work 1258 sound/soc/codecs/wm8350.c container_of(work, struct wm8350_data, hpl.work.work); work 1263 sound/soc/codecs/wm8350.c static void wm8350_hpr_work(struct work_struct *work) work 1266 sound/soc/codecs/wm8350.c container_of(work, struct wm8350_data, hpr.work.work); work 1284 sound/soc/codecs/wm8350.c &priv->hpl.work, msecs_to_jiffies(200)); work 1302 sound/soc/codecs/wm8350.c &priv->hpr.work, msecs_to_jiffies(200)); work 1490 sound/soc/codecs/wm8350.c INIT_DELAYED_WORK(&priv->hpl.work, wm8350_hpl_work); work 1491 sound/soc/codecs/wm8350.c INIT_DELAYED_WORK(&priv->hpr.work, wm8350_hpr_work); work 1570 sound/soc/codecs/wm8350.c cancel_delayed_work_sync(&priv->hpl.work); work 1571 sound/soc/codecs/wm8350.c cancel_delayed_work_sync(&priv->hpr.work); work 1319 sound/soc/codecs/wm8753.c static void wm8753_charge_work(struct work_struct *work) work 1322 sound/soc/codecs/wm8753.c container_of(work, struct wm8753_priv, charge_work.work); work 2976 sound/soc/codecs/wm8962.c static void wm8962_mic_work(struct work_struct *work) work 2978 sound/soc/codecs/wm8962.c struct wm8962_priv *wm8962 = container_of(work, work 2980 sound/soc/codecs/wm8962.c mic_work.work); work 3153 sound/soc/codecs/wm8962.c static void wm8962_beep_work(struct work_struct *work) work 3156 sound/soc/codecs/wm8962.c container_of(work, struct wm8962_priv, beep_work); work 551 sound/soc/codecs/wm8971.c static void wm8971_charge_work(struct work_struct *work) work 554 sound/soc/codecs/wm8971.c container_of(work, struct wm8971_priv, charge_work.work); work 3426 sound/soc/codecs/wm8994.c static void wm8994_mic_work(struct work_struct *work) work 3428 sound/soc/codecs/wm8994.c struct wm8994_priv *priv = container_of(work, work 3430 sound/soc/codecs/wm8994.c mic_work.work); work 3552 sound/soc/codecs/wm8994.c static void wm8958_open_circuit_work(struct work_struct *work) work 3554 sound/soc/codecs/wm8994.c struct wm8994_priv *wm8994 = container_of(work, work 3556 sound/soc/codecs/wm8994.c open_circuit_work.work); work 3624 sound/soc/codecs/wm8994.c static void wm1811_mic_work(struct work_struct *work) work 3626 sound/soc/codecs/wm8994.c struct wm8994_priv *wm8994 = container_of(work, struct wm8994_priv, work 3627 sound/soc/codecs/wm8994.c mic_work.work); work 3748 sound/soc/codecs/wm8994.c static void wm1811_jackdet_bootstrap(struct work_struct *work) work 3750 sound/soc/codecs/wm8994.c struct wm8994_priv *wm8994 = container_of(work, work 3752 sound/soc/codecs/wm8994.c jackdet_bootstrap.work); work 3861 sound/soc/codecs/wm8994.c static void wm8958_mic_work(struct work_struct *work) work 3863 sound/soc/codecs/wm8994.c struct wm8994_priv *wm8994 = container_of(work, work 3865 sound/soc/codecs/wm8994.c mic_complete_work.work); work 1240 sound/soc/codecs/wm_adsp.c struct work_struct work; work 1388 sound/soc/codecs/wm_adsp.c static void wm_adsp_ctl_work(struct work_struct *work) work 1390 sound/soc/codecs/wm_adsp.c struct wmfw_ctl_work *ctl_work = container_of(work, work 1392 sound/soc/codecs/wm_adsp.c work); work 1504 sound/soc/codecs/wm_adsp.c INIT_WORK(&ctl_work->work, wm_adsp_ctl_work); work 1505 sound/soc/codecs/wm_adsp.c schedule_work(&ctl_work->work); work 2911 sound/soc/codecs/wm_adsp.c static void wm_adsp_boot_work(struct work_struct *work) work 2913 sound/soc/codecs/wm_adsp.c struct wm_adsp *dsp = container_of(work, work 192 sound/soc/intel/atom/sst/sst.c void sst_process_pending_msg(struct work_struct *work) work 194 sound/soc/intel/atom/sst/sst.c struct intel_sst_drv *ctx = container_of(work, work 492 sound/soc/intel/atom/sst/sst.h void sst_process_pending_msg(struct work_struct *work); work 45 sound/soc/intel/baytrail/sst-baytrail-pcm.c struct work_struct work; work 158 sound/soc/intel/baytrail/sst-baytrail-pcm.c static void sst_byt_pcm_work(struct work_struct *work) work 161 sound/soc/intel/baytrail/sst-baytrail-pcm.c container_of(work, struct sst_byt_pcm_data, work); work 184 sound/soc/intel/baytrail/sst-baytrail-pcm.c schedule_work(&pcm_data->work); work 283 sound/soc/intel/baytrail/sst-baytrail-pcm.c cancel_work_sync(&pcm_data->work); work 374 sound/soc/intel/baytrail/sst-baytrail-pcm.c INIT_WORK(&priv_data->pcm[i].work, sst_byt_pcm_work); work 152 sound/soc/intel/common/sst-ipc.c static void ipc_tx_msgs(struct work_struct *work) work 155 sound/soc/intel/common/sst-ipc.c container_of(work, struct sst_generic_ipc, kwork); work 466 sound/soc/intel/haswell/sst-haswell-ipc.c static void hsw_notification_work(struct work_struct *work) work 468 sound/soc/intel/haswell/sst-haswell-ipc.c struct sst_hsw_stream *stream = container_of(work, work 287 sound/soc/intel/skylake/bxt-sst.c static void bxt_set_dsp_D0i3(struct work_struct *work) work 291 sound/soc/intel/skylake/bxt-sst.c struct skl_dev *skl = container_of(work, work 292 sound/soc/intel/skylake/bxt-sst.c struct skl_dev, d0i3.work.work); work 342 sound/soc/intel/skylake/bxt-sst.c schedule_delayed_work(&d0i3->work, work 358 sound/soc/intel/skylake/bxt-sst.c cancel_delayed_work_sync(&skl->d0i3.work); work 587 sound/soc/intel/skylake/bxt-sst.c INIT_DELAYED_WORK(&skl->d0i3.work, bxt_set_dsp_D0i3); work 348 sound/soc/intel/skylake/skl-messages.c dwork = &skl->d0i3.work; work 350 sound/soc/intel/skylake/skl-messages.c if (dwork->work.func) { work 59 sound/soc/intel/skylake/skl-sst-ipc.h struct delayed_work work; work 797 sound/soc/intel/skylake/skl.c static void skl_probe_work(struct work_struct *work) work 799 sound/soc/intel/skylake/skl.c struct skl_dev *skl = container_of(work, struct skl_dev, probe_work); work 221 sound/soc/soc-compress.c static void close_delayed_work(struct work_struct *work) work 224 sound/soc/soc-compress.c container_of(work, struct snd_soc_pcm_runtime, delayed_work.work); work 576 sound/soc/soc-core.c static void soc_resume_deferred(struct work_struct *work) work 579 sound/soc/soc-core.c container_of(work, struct snd_soc_card, work 282 sound/soc/soc-jack.c queue_delayed_work(system_power_efficient_wq, &gpio->work, work 289 sound/soc/soc-jack.c static void gpio_work(struct work_struct *work) work 293 sound/soc/soc-jack.c gpio = container_of(work, struct snd_soc_jack_gpio, work.work); work 311 sound/soc/soc-jack.c queue_delayed_work(system_power_efficient_wq, &gpio->work, 0); work 327 sound/soc/soc-jack.c cancel_delayed_work_sync(&gpios[i].work); work 404 sound/soc/soc-jack.c INIT_DELAYED_WORK(&gpios[i].work, gpio_work); work 435 sound/soc/soc-jack.c schedule_delayed_work(&gpios[i].work, work 650 sound/soc/soc-pcm.c static void close_delayed_work(struct work_struct *work) work 653 sound/soc/soc-pcm.c container_of(work, struct snd_soc_pcm_runtime, delayed_work.work); work 673 sound/soc/soc-pcm.c static void codec2codec_close_delayed_work(struct work_struct *work) work 461 sound/soc/sof/core.c static void sof_probe_work(struct work_struct *work) work 464 sound/soc/sof/core.c container_of(work, struct snd_sof_dev, probe_work); work 58 sound/soc/sof/pcm.c static void sof_pcm_period_elapsed_work(struct work_struct *work) work 61 sound/soc/sof/pcm.c container_of(work, struct snd_sof_pcm_stream, work 708 sound/usb/line6/driver.c static void line6_startup_work(struct work_struct *work) work 711 sound/usb/line6/driver.c container_of(work, struct usb_line6, startup_work.work); work 227 sound/usb/mixer_scarlett_gen2.c struct delayed_work work; work 698 sound/usb/mixer_scarlett_gen2.c static void scarlett2_config_save_work(struct work_struct *work) work 701 sound/usb/mixer_scarlett_gen2.c container_of(work, struct scarlett2_mixer_data, work.work); work 725 sound/usb/mixer_scarlett_gen2.c cancel_delayed_work_sync(&private->work); work 745 sound/usb/mixer_scarlett_gen2.c schedule_delayed_work(&private->work, msecs_to_jiffies(2000)); work 1726 sound/usb/mixer_scarlett_gen2.c cancel_delayed_work_sync(&private->work); work 1735 sound/usb/mixer_scarlett_gen2.c if (cancel_delayed_work_sync(&private->work)) work 1818 sound/usb/mixer_scarlett_gen2.c INIT_DELAYED_WORK(&private->work, scarlett2_config_save_work); work 1563 sound/x86/intel_hdmi_audio.c static void had_audio_wq(struct work_struct *work) work 1566 sound/x86/intel_hdmi_audio.c container_of(work, struct snd_intelhad, hdmi_audio_wq); work 443 tools/include/uapi/linux/pkt_sched.h __u64 work; /* total work done */ work 213 tools/testing/nvdimm/test/nfit.c struct work_struct work; work 814 tools/testing/nvdimm/test/nfit.c static void uc_error_notify(struct work_struct *work) work 816 tools/testing/nvdimm/test/nfit.c struct nfit_test *t = container_of(work, typeof(*t), work); work 842 tools/testing/nvdimm/test/nfit.c queue_work(nfit_wq, &t->work); work 3240 tools/testing/nvdimm/test/nfit.c INIT_WORK(&nfit_test->work, uc_error_notify); work 16 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/workqueues.h typedef void (*work_func_t)(struct work_struct *work); work 40 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/workqueues.h struct work_struct work; work 49 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/workqueues.h static inline bool schedule_work(struct work_struct *work) work 55 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/workqueues.h static inline bool schedule_work_on(int cpu, struct work_struct *work) work 62 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/workqueues.h struct work_struct *work) work 82 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/workqueues.h #define INIT_DELAYED_WORK(w, f) INIT_WORK(&(w)->work, (f)) work 92 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/workqueues.h .work = __WORK_INITIALIZER((n).work, (f)), \ work 21 virt/kvm/async_pf.c struct kvm_async_pf *work) work 24 virt/kvm/async_pf.c kvm_arch_async_page_present(vcpu, work); work 28 virt/kvm/async_pf.c struct kvm_async_pf *work) work 31 virt/kvm/async_pf.c kvm_arch_async_page_present(vcpu, work); work 60 virt/kvm/async_pf.c static void async_pf_execute(struct work_struct *work) work 63 virt/kvm/async_pf.c container_of(work, struct kvm_async_pf, work); work 110 virt/kvm/async_pf.c struct kvm_async_pf *work = work 112 virt/kvm/async_pf.c typeof(*work), queue); work 113 virt/kvm/async_pf.c list_del(&work->queue); work 119 virt/kvm/async_pf.c if (!work->vcpu) work 124 virt/kvm/async_pf.c flush_work(&work->work); work 126 virt/kvm/async_pf.c if (cancel_work_sync(&work->work)) { work 127 virt/kvm/async_pf.c mmput(work->mm); work 129 virt/kvm/async_pf.c kmem_cache_free(async_pf_cache, work); work 136 virt/kvm/async_pf.c struct kvm_async_pf *work = work 138 virt/kvm/async_pf.c typeof(*work), link); work 139 virt/kvm/async_pf.c list_del(&work->link); work 140 virt/kvm/async_pf.c kmem_cache_free(async_pf_cache, work); work 149 virt/kvm/async_pf.c struct kvm_async_pf *work; work 154 virt/kvm/async_pf.c work = list_first_entry(&vcpu->async_pf.done, typeof(*work), work 156 virt/kvm/async_pf.c list_del(&work->link); work 159 virt/kvm/async_pf.c kvm_arch_async_page_ready(vcpu, work); work 160 virt/kvm/async_pf.c kvm_async_page_present_async(vcpu, work); work 162 virt/kvm/async_pf.c list_del(&work->queue); work 164 virt/kvm/async_pf.c kmem_cache_free(async_pf_cache, work); work 171 virt/kvm/async_pf.c struct kvm_async_pf *work; work 182 virt/kvm/async_pf.c work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN); work 183 virt/kvm/async_pf.c if (!work) work 186 virt/kvm/async_pf.c work->wakeup_all = false; work 187 virt/kvm/async_pf.c work->vcpu = vcpu; work 188 virt/kvm/async_pf.c work->cr2_or_gpa = cr2_or_gpa; work 189 virt/kvm/async_pf.c work->addr = hva; work 190 virt/kvm/async_pf.c work->arch = *arch; work 191 virt/kvm/async_pf.c work->mm = current->mm; work 192 virt/kvm/async_pf.c mmget(work->mm); work 193 virt/kvm/async_pf.c kvm_get_kvm(work->vcpu->kvm); work 197 virt/kvm/async_pf.c if (unlikely(kvm_is_error_hva(work->addr))) work 200 virt/kvm/async_pf.c INIT_WORK(&work->work, async_pf_execute); work 201 virt/kvm/async_pf.c if (!schedule_work(&work->work)) work 204 virt/kvm/async_pf.c list_add_tail(&work->queue, &vcpu->async_pf.queue); work 206 virt/kvm/async_pf.c kvm_arch_async_page_not_present(vcpu, work); work 209 virt/kvm/async_pf.c kvm_put_kvm(work->vcpu->kvm); work 210 virt/kvm/async_pf.c mmput(work->mm); work 211 virt/kvm/async_pf.c kmem_cache_free(async_pf_cache, work); work 217 virt/kvm/async_pf.c struct kvm_async_pf *work; work 222 virt/kvm/async_pf.c work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC); work 223 virt/kvm/async_pf.c if (!work) work 226 virt/kvm/async_pf.c work->wakeup_all = true; work 227 virt/kvm/async_pf.c INIT_LIST_HEAD(&work->queue); /* for list_del to work */ work 230 virt/kvm/async_pf.c list_add_tail(&work->link, &vcpu->async_pf.done); work 42 virt/kvm/eventfd.c irqfd_inject(struct work_struct *work) work 45 virt/kvm/eventfd.c container_of(work, struct kvm_kernel_irqfd, inject); work 112 virt/kvm/eventfd.c irqfd_shutdown(struct work_struct *work) work 115 virt/kvm/eventfd.c container_of(work, struct kvm_kernel_irqfd, shutdown);