rwork 134 drivers/input/mouse/psmouse-smbus.c struct psmouse_smbus_removal_work *rwork = rwork 137 drivers/input/mouse/psmouse-smbus.c dev_dbg(&rwork->client->dev, "destroying SMBus companion device\n"); rwork 138 drivers/input/mouse/psmouse-smbus.c i2c_unregister_device(rwork->client); rwork 140 drivers/input/mouse/psmouse-smbus.c kfree(rwork); rwork 155 drivers/input/mouse/psmouse-smbus.c struct psmouse_smbus_removal_work *rwork; rwork 157 drivers/input/mouse/psmouse-smbus.c rwork = kzalloc(sizeof(*rwork), GFP_KERNEL); rwork 158 drivers/input/mouse/psmouse-smbus.c if (rwork) { rwork 159 drivers/input/mouse/psmouse-smbus.c INIT_WORK(&rwork->work, psmouse_smbus_remove_i2c_device); rwork 160 drivers/input/mouse/psmouse-smbus.c rwork->client = client; rwork 162 drivers/input/mouse/psmouse-smbus.c schedule_work(&rwork->work); rwork 123 fs/dlm/lowcomms.c struct work_struct rwork; /* Receive workqueue */ rwork 220 fs/dlm/lowcomms.c INIT_WORK(&con->rwork, process_recv_sockets); rwork 417 fs/dlm/lowcomms.c queue_work(recv_workqueue, &con->rwork); rwork 597 fs/dlm/lowcomms.c if (rx && !closing && cancel_work_sync(&con->rwork)) { rwork 709 fs/dlm/lowcomms.c queue_work(recv_workqueue, &con->rwork); rwork 805 fs/dlm/lowcomms.c INIT_WORK(&othercon->rwork, process_recv_sockets); rwork 840 fs/dlm/lowcomms.c queue_work(recv_workqueue, &addcon->rwork); rwork 925 fs/dlm/lowcomms.c INIT_WORK(&othercon->rwork, process_recv_sockets); rwork 957 fs/dlm/lowcomms.c queue_work(recv_workqueue, &addcon->rwork); rwork 1601 fs/dlm/lowcomms.c struct connection *con = container_of(work, struct connection, rwork); rwork 452 include/linux/workqueue.h extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork); rwork 468 include/linux/workqueue.h extern bool flush_rcu_work(struct rcu_work *rwork); rwork 36 include/net/pkt_cls.h bool tcf_queue_work(struct rcu_work *rwork, work_func_t func); rwork 1726 kernel/workqueue.c struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu); rwork 1730 kernel/workqueue.c __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work); rwork 1744 kernel/workqueue.c bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork) rwork 1746 kernel/workqueue.c struct work_struct *work = &rwork->work; rwork 1749 kernel/workqueue.c rwork->wq = wq; rwork 1750 kernel/workqueue.c call_rcu(&rwork->rcu, rcu_work_rcufn); rwork 3206 kernel/workqueue.c bool flush_rcu_work(struct rcu_work *rwork) rwork 3208 kernel/workqueue.c if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) { rwork 3210 kernel/workqueue.c flush_work(&rwork->work); rwork 3213 kernel/workqueue.c return flush_work(&rwork->work); rwork 203 net/sched/cls_api.c bool tcf_queue_work(struct rcu_work *rwork, work_func_t func) rwork 205 net/sched/cls_api.c INIT_RCU_WORK(rwork, func); rwork 206 net/sched/cls_api.c return queue_rcu_work(tc_filter_wq, rwork); rwork 36 net/sched/cls_basic.c struct rcu_work rwork; rwork 100 net/sched/cls_basic.c rwork); rwork 117 net/sched/cls_basic.c tcf_queue_work(&f->rwork, basic_delete_filter_work); rwork 135 net/sched/cls_basic.c tcf_queue_work(&f->rwork, basic_delete_filter_work); rwork 234 net/sched/cls_basic.c tcf_queue_work(&fold->rwork, basic_delete_filter_work); rwork 50 net/sched/cls_bpf.c struct rcu_work rwork; rwork 281 net/sched/cls_bpf.c rwork); rwork 297 net/sched/cls_bpf.c tcf_queue_work(&prog->rwork, cls_bpf_delete_prog_work); rwork 523 net/sched/cls_bpf.c tcf_queue_work(&oldprog->rwork, cls_bpf_delete_prog_work); rwork 22 net/sched/cls_cgroup.c struct rcu_work rwork; rwork 70 net/sched/cls_cgroup.c rwork); rwork 123 net/sched/cls_cgroup.c tcf_queue_work(&head->rwork, cls_cgroup_destroy_work); rwork 140 net/sched/cls_cgroup.c tcf_queue_work(&head->rwork, cls_cgroup_destroy_work); rwork 56 net/sched/cls_flow.c struct rcu_work rwork; rwork 381 net/sched/cls_flow.c rwork); rwork 554 net/sched/cls_flow.c tcf_queue_work(&fold->rwork, flow_destroy_filter_work); rwork 574 net/sched/cls_flow.c tcf_queue_work(&f->rwork, flow_destroy_filter_work); rwork 600 net/sched/cls_flow.c tcf_queue_work(&f->rwork, flow_destroy_filter_work); rwork 81 net/sched/cls_flower.c struct rcu_work rwork; rwork 98 net/sched/cls_flower.c struct rcu_work rwork; rwork 114 net/sched/cls_flower.c struct rcu_work rwork; rwork 361 net/sched/cls_flower.c struct fl_flow_mask, rwork); rwork 369 net/sched/cls_flower.c struct fl_flow_mask, rwork); rwork 385 net/sched/cls_flower.c tcf_queue_work(&mask->rwork, fl_mask_free_work); rwork 410 net/sched/cls_flower.c struct cls_fl_filter, rwork); rwork 503 net/sched/cls_flower.c tcf_queue_work(&f->rwork, fl_destroy_filter_work); rwork 555 net/sched/cls_flower.c rwork); rwork 580 net/sched/cls_flower.c tcf_queue_work(&head->rwork, fl_destroy_sleepable); rwork 1693 net/sched/cls_flower.c tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work); rwork 1713 net/sched/cls_flower.c tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work); rwork 40 net/sched/cls_fw.c struct rcu_work rwork; rwork 124 net/sched/cls_fw.c rwork); rwork 146 net/sched/cls_fw.c tcf_queue_work(&f->rwork, fw_delete_filter_work); rwork 175 net/sched/cls_fw.c tcf_queue_work(&f->rwork, fw_delete_filter_work); rwork 297 net/sched/cls_fw.c tcf_queue_work(&f->rwork, fw_delete_filter_work); rwork 23 net/sched/cls_matchall.c struct rcu_work rwork; rwork 60 net/sched/cls_matchall.c rwork); rwork 142 net/sched/cls_matchall.c tcf_queue_work(&head->rwork, mall_destroy_work); rwork 56 net/sched/cls_route.c struct rcu_work rwork; rwork 264 net/sched/cls_route.c rwork); rwork 272 net/sched/cls_route.c tcf_queue_work(&f->rwork, route4_delete_filter_work); rwork 344 net/sched/cls_route.c tcf_queue_work(&f->rwork, route4_delete_filter_work); rwork 550 net/sched/cls_route.c tcf_queue_work(&fold->rwork, route4_delete_filter_work); rwork 96 net/sched/cls_rsvp.h struct rcu_work rwork; rwork 292 net/sched/cls_rsvp.h rwork); rwork 306 net/sched/cls_rsvp.h tcf_queue_work(&f->rwork, rsvp_delete_filter_work); rwork 36 net/sched/cls_tcindex.c struct rcu_work rwork; rwork 43 net/sched/cls_tcindex.c struct rcu_work rwork; rwork 57 net/sched/cls_tcindex.c struct rcu_work rwork; rwork 173 net/sched/cls_tcindex.c rwork); rwork 190 net/sched/cls_tcindex.c rwork); rwork 232 net/sched/cls_tcindex.c tcf_queue_work(&f->rwork, tcindex_destroy_fexts_work); rwork 239 net/sched/cls_tcindex.c tcf_queue_work(&r->rwork, tcindex_destroy_rexts_work); rwork 252 net/sched/cls_tcindex.c rwork); rwork 285 net/sched/cls_tcindex.c rwork); rwork 507 net/sched/cls_tcindex.c tcf_queue_work(&oldp->rwork, tcindex_partial_destroy_work); rwork 609 net/sched/cls_tcindex.c tcf_queue_work(&r->rwork, rwork 626 net/sched/cls_tcindex.c tcf_queue_work(&p->rwork, tcindex_destroy_work); rwork 62 net/sched/cls_u32.c struct rcu_work rwork; rwork 421 net/sched/cls_u32.c rwork); rwork 438 net/sched/cls_u32.c rwork); rwork 462 net/sched/cls_u32.c tcf_queue_work(&key->rwork, u32_delete_key_freepf_work); rwork 584 net/sched/cls_u32.c tcf_queue_work(&n->rwork, u32_delete_key_freepf_work); rwork 922 net/sched/cls_u32.c tcf_queue_work(&n->rwork, u32_delete_key_work); rwork 101 net/tipc/topsrv.c struct work_struct rwork; rwork 195 net/tipc/topsrv.c INIT_WORK(&con->rwork, tipc_conn_recv_work); rwork 415 net/tipc/topsrv.c struct tipc_conn *con = container_of(work, struct tipc_conn, rwork); rwork 442 net/tipc/topsrv.c if (!queue_work(con->server->rcv_wq, &con->rwork))