wrk 45 drivers/dma-buf/dma-fence-array.c static void irq_dma_fence_array_work(struct irq_work *wrk) wrk 47 drivers/dma-buf/dma-fence-array.c struct dma_fence_array *array = container_of(wrk, typeof(*array), work); wrk 123 drivers/gpu/drm/i915/i915_request.c static void irq_execute_cb(struct irq_work *wrk) wrk 125 drivers/gpu/drm/i915/i915_request.c struct execute_cb *cb = container_of(wrk, typeof(*cb), work); wrk 131 drivers/gpu/drm/i915/i915_request.c static void irq_execute_cb_hook(struct irq_work *wrk) wrk 133 drivers/gpu/drm/i915/i915_request.c struct execute_cb *cb = container_of(wrk, typeof(*cb), work); wrk 139 drivers/gpu/drm/i915/i915_request.c irq_execute_cb(wrk); wrk 563 drivers/gpu/drm/i915/i915_request.c static void irq_semaphore_cb(struct irq_work *wrk) wrk 566 drivers/gpu/drm/i915/i915_request.c container_of(wrk, typeof(*rq), semaphore_work); wrk 423 drivers/gpu/drm/i915/i915_sw_fence.c static void irq_i915_sw_fence_work(struct irq_work *wrk) wrk 426 drivers/gpu/drm/i915/i915_sw_fence.c container_of(wrk, typeof(*cb), work); wrk 84 drivers/gpu/drm/i915/intel_wakeref.c static void __intel_wakeref_put_work(struct work_struct *wrk) wrk 86 drivers/gpu/drm/i915/intel_wakeref.c struct intel_wakeref *wf = container_of(wrk, typeof(*wf), work); wrk 451 drivers/hv/hv_balloon.c struct work_struct wrk; wrk 457 drivers/hv/hv_balloon.c struct work_struct wrk; wrk 1510 drivers/hv/hv_balloon.c schedule_work(&dm_device.balloon_wrk.wrk); wrk 1545 drivers/hv/hv_balloon.c schedule_work(&dm_device.ha_wrk.wrk); wrk 1680 drivers/hv/hv_balloon.c INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up); wrk 1681 drivers/hv/hv_balloon.c INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req); wrk 1725 drivers/hv/hv_balloon.c cancel_work_sync(&dm->balloon_wrk.wrk); wrk 1726 drivers/hv/hv_balloon.c cancel_work_sync(&dm->ha_wrk.wrk); wrk 12 drivers/mtd/ubi/fastmap-wl.c static void update_fastmap_work_fn(struct work_struct *wrk) wrk 14 drivers/mtd/ubi/fastmap-wl.c struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work); wrk 279 drivers/mtd/ubi/fastmap-wl.c struct ubi_work *wrk; wrk 289 drivers/mtd/ubi/fastmap-wl.c wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); wrk 290 drivers/mtd/ubi/fastmap-wl.c if (!wrk) { wrk 297 drivers/mtd/ubi/fastmap-wl.c wrk->anchor = 1; wrk 298 drivers/mtd/ubi/fastmap-wl.c wrk->func = &wear_leveling_worker; wrk 299 drivers/mtd/ubi/fastmap-wl.c __schedule_ubi_work(ubi, wrk); wrk 347 drivers/mtd/ubi/fastmap-wl.c int ubi_is_erase_work(struct ubi_work *wrk) wrk 349 drivers/mtd/ubi/fastmap-wl.c return wrk->func == erase_worker; wrk 802 drivers/mtd/ubi/ubi.h int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int shutdown); wrk 916 drivers/mtd/ubi/ubi.h int ubi_is_erase_work(struct ubi_work *wrk); wrk 191 drivers/mtd/ubi/wl.c struct ubi_work *wrk; wrk 209 drivers/mtd/ubi/wl.c wrk = list_entry(ubi->works.next, struct ubi_work, list); wrk 210 drivers/mtd/ubi/wl.c list_del(&wrk->list); wrk 220 drivers/mtd/ubi/wl.c err = wrk->func(ubi, wrk, 0); wrk 550 drivers/mtd/ubi/wl.c static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) wrk 553 drivers/mtd/ubi/wl.c list_add_tail(&wrk->list, &ubi->works); wrk 569 drivers/mtd/ubi/wl.c static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) wrk 572 drivers/mtd/ubi/wl.c __schedule_ubi_work(ubi, wrk); wrk 654 drivers/mtd/ubi/wl.c static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, wrk 660 drivers/mtd/ubi/wl.c int anchor = wrk->anchor; wrk 667 drivers/mtd/ubi/wl.c kfree(wrk); wrk 1014 drivers/mtd/ubi/wl.c struct ubi_work *wrk; wrk 1048 drivers/mtd/ubi/wl.c wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); wrk 1049 drivers/mtd/ubi/wl.c if (!wrk) { wrk 1054 drivers/mtd/ubi/wl.c wrk->anchor = 0; wrk 1055 drivers/mtd/ubi/wl.c wrk->func = &wear_leveling_worker; wrk 1057 drivers/mtd/ubi/wl.c __schedule_ubi_work(ubi, wrk); wrk 1059 drivers/mtd/ubi/wl.c schedule_ubi_work(ubi, wrk); wrk 1398 drivers/mtd/ubi/wl.c struct ubi_work *wrk, *tmp; wrk 1403 drivers/mtd/ubi/wl.c list_for_each_entry_safe(wrk, tmp, &ubi->works, list) { wrk 1404 drivers/mtd/ubi/wl.c if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) && wrk 1405 drivers/mtd/ubi/wl.c (lnum == UBI_ALL || wrk->lnum == lnum)) { wrk 1406 drivers/mtd/ubi/wl.c list_del(&wrk->list); wrk 1411 drivers/mtd/ubi/wl.c err = wrk->func(ubi, wrk, 0); wrk 1677 drivers/mtd/ubi/wl.c struct ubi_work *wrk; wrk 1679 drivers/mtd/ubi/wl.c wrk = list_entry(ubi->works.next, struct ubi_work, list); wrk 1680 drivers/mtd/ubi/wl.c list_del(&wrk->list); wrk 1681 drivers/mtd/ubi/wl.c wrk->func(ubi, wrk, 1); wrk 6 drivers/mtd/ubi/wl.h static void update_fastmap_work_fn(struct work_struct *wrk); wrk 506 drivers/pci/controller/pci-hyperv.c struct work_struct wrk; wrk 533 drivers/pci/controller/pci-hyperv.c struct work_struct wrk; wrk 1984 drivers/pci/controller/pci-hyperv.c dr_wrk = container_of(work, struct hv_dr_work, wrk); wrk 2122 drivers/pci/controller/pci-hyperv.c INIT_WORK(&dr_wrk->wrk, pci_devices_present_work); wrk 2145 drivers/pci/controller/pci-hyperv.c queue_work(hbus->wq, &dr_wrk->wrk); wrk 2171 drivers/pci/controller/pci-hyperv.c hpdev = container_of(work, struct hv_pci_dev, wrk); wrk 2228 drivers/pci/controller/pci-hyperv.c INIT_WORK(&hpdev->wrk, hv_eject_device_work); wrk 2230 drivers/pci/controller/pci-hyperv.c queue_work(hpdev->hbus->wq, &hpdev->wrk); wrk 497 drivers/scsi/storvsc_drv.c struct storvsc_scan_work *wrk; wrk 500 drivers/scsi/storvsc_drv.c wrk = container_of(work, struct storvsc_scan_work, work); wrk 502 drivers/scsi/storvsc_drv.c sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun); wrk 509 drivers/scsi/storvsc_drv.c kfree(wrk); wrk 543 drivers/scsi/storvsc_drv.c struct storvsc_scan_work *wrk; wrk 546 drivers/scsi/storvsc_drv.c wrk = container_of(work, struct storvsc_scan_work, work); wrk 547 drivers/scsi/storvsc_drv.c if (!scsi_host_get(wrk->host)) wrk 550 drivers/scsi/storvsc_drv.c sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun); wrk 556 drivers/scsi/storvsc_drv.c scsi_host_put(wrk->host); wrk 559 drivers/scsi/storvsc_drv.c kfree(wrk); wrk 927 drivers/scsi/storvsc_drv.c struct storvsc_scan_work *wrk; wrk 987 drivers/scsi/storvsc_drv.c wrk = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC); wrk 988 drivers/scsi/storvsc_drv.c if (!wrk) { wrk 993 drivers/scsi/storvsc_drv.c wrk->host = host; wrk 994 drivers/scsi/storvsc_drv.c wrk->lun = vm_srb->lun; wrk 995 drivers/scsi/storvsc_drv.c wrk->tgt_id = vm_srb->target_id; wrk 996 drivers/scsi/storvsc_drv.c INIT_WORK(&wrk->work, process_err_fn); wrk 997 drivers/scsi/storvsc_drv.c queue_work(host_dev->handle_error_wq, &wrk->work); wrk 629 kernel/power/swap.c unsigned char wrk[LZO1X_1_MEM_COMPRESS]; /* compression workspace */ wrk 653 kernel/power/swap.c d->wrk); wrk 170 net/core/link_watch.c LIST_HEAD(wrk); wrk 192 net/core/link_watch.c list_splice_init(&lweventlist, &wrk); wrk 194 net/core/link_watch.c while (!list_empty(&wrk) && do_dev > 0) { wrk 196 net/core/link_watch.c dev = list_first_entry(&wrk, struct net_device, link_watch_list); wrk 210 net/core/link_watch.c list_splice_init(&wrk, &lweventlist); wrk 202 net/smc/smc_ism.c static void smcd_handle_sw_event(struct smc_ism_event_work *wrk) wrk 206 net/smc/smc_ism.c ev_info.info = wrk->event.info; wrk 207 net/smc/smc_ism.c switch (wrk->event.code) { wrk 209 net/smc/smc_ism.c smc_smcd_terminate(wrk->smcd, wrk->event.tok, ev_info.vlan_id); wrk 214 net/smc/smc_ism.c wrk->smcd->ops->signal_event(wrk->smcd, wrk 215 net/smc/smc_ism.c wrk->event.tok, wrk 242 net/smc/smc_ism.c struct smc_ism_event_work *wrk = wrk 245 net/smc/smc_ism.c switch (wrk->event.type) { wrk 247 net/smc/smc_ism.c smc_smcd_terminate(wrk->smcd, wrk->event.tok, VLAN_VID_MASK); wrk 252 net/smc/smc_ism.c smcd_handle_sw_event(wrk); wrk 255 net/smc/smc_ism.c kfree(wrk); wrk 343 net/smc/smc_ism.c struct smc_ism_event_work *wrk; wrk 346 net/smc/smc_ism.c wrk = kmalloc(sizeof(*wrk), GFP_ATOMIC); wrk 347 net/smc/smc_ism.c if (!wrk) wrk 349 net/smc/smc_ism.c INIT_WORK(&wrk->work, smc_ism_event_work); wrk 350 net/smc/smc_ism.c wrk->smcd = smcd; wrk 351 net/smc/smc_ism.c wrk->event = *event; wrk 352 net/smc/smc_ism.c queue_work(smcd->event_wq, &wrk->work); wrk 389 net/smc/smc_llc.c struct smc_llc_send_work *wrk = kmalloc(sizeof(*wrk), GFP_ATOMIC); wrk 391 net/smc/smc_llc.c if (!wrk) wrk 393 net/smc/smc_llc.c INIT_WORK(&wrk->work, smc_llc_send_message_work); wrk 394 net/smc/smc_llc.c wrk->link = link; wrk 395 net/smc/smc_llc.c wrk->llclen = llclen; wrk 396 net/smc/smc_llc.c memcpy(&wrk->llcbuf, llcbuf, llclen); wrk 397 net/smc/smc_llc.c queue_work(link->llc_wq, &wrk->work);