close_work        105 drivers/infiniband/core/ucma.c 	struct work_struct	close_work;
close_work        125 drivers/infiniband/core/ucma.c 	struct work_struct	close_work;
close_work        187 drivers/infiniband/core/ucma.c 	struct ucma_event *uevent_close =  container_of(work, struct ucma_event, close_work);
close_work        195 drivers/infiniband/core/ucma.c 	struct ucma_context *ctx =  container_of(work, struct ucma_context, close_work);
close_work        215 drivers/infiniband/core/ucma.c 	INIT_WORK(&ctx->close_work, ucma_close_id);
close_work        322 drivers/infiniband/core/ucma.c 		queue_work(ctx->file->close_wq, &ctx->close_work);
close_work        330 drivers/infiniband/core/ucma.c 			INIT_WORK(&con_req_eve->close_work, ucma_close_event_id);
close_work        331 drivers/infiniband/core/ucma.c 			queue_work(ctx->file->close_wq, &con_req_eve->close_work);
close_work         61 include/net/af_vsock.h 	struct delayed_work close_work;
close_work        246 kernel/acct.c  	INIT_WORK(&acct->work, close_work);
close_work        152 net/caif/chnl_net.c static DECLARE_WORK(close_worker, close_work);
close_work        185 net/smc/smc.h  	struct work_struct	close_work;	/* peer sent some closing */
close_work        303 net/smc/smc_cdc.c 		if (!schedule_work(&conn->close_work))
close_work        337 net/smc/smc_close.c 						   close_work);
close_work        492 net/smc/smc_close.c 	INIT_WORK(&smc->conn.close_work, smc_close_passive_work);
close_work        482 net/smc/smc_core.c 		if (!schedule_work(&conn->close_work))
close_work        259 net/vmw_vsock/hyperv_transport.c 	    (!cancel_timeout || cancel_delayed_work(&vsk->close_work))) {
close_work        481 net/vmw_vsock/hyperv_transport.c 		container_of(work, struct vsock_sock, close_work.work);
close_work        511 net/vmw_vsock/hyperv_transport.c 	INIT_DELAYED_WORK(&vsk->close_work, hvs_close_timeout);
close_work        513 net/vmw_vsock/hyperv_transport.c 	schedule_delayed_work(&vsk->close_work, HVS_CLOSE_TIMEOUT);
close_work        759 net/vmw_vsock/virtio_transport_common.c 	    (!cancel_timeout || cancel_delayed_work(&vsk->close_work))) {
close_work        772 net/vmw_vsock/virtio_transport_common.c 		container_of(work, struct vsock_sock, close_work.work);
close_work        816 net/vmw_vsock/virtio_transport_common.c 	INIT_DELAYED_WORK(&vsk->close_work,
close_work        819 net/vmw_vsock/virtio_transport_common.c 	schedule_delayed_work(&vsk->close_work, VSOCK_CLOSE_TIMEOUT);