Lines Matching refs:work
245 struct work_struct work; member
253 struct work_struct work; member
259 struct work_struct work; member
2089 struct cma_work *work = context; in cma_query_handler() local
2092 route = &work->id->id.route; in cma_query_handler()
2098 work->old_state = RDMA_CM_ROUTE_QUERY; in cma_query_handler()
2099 work->new_state = RDMA_CM_ADDR_RESOLVED; in cma_query_handler()
2100 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; in cma_query_handler()
2101 work->event.status = status; in cma_query_handler()
2104 queue_work(cma_wq, &work->work); in cma_query_handler()
2108 struct cma_work *work) in cma_query_ib_route() argument
2149 work, &id_priv->query); in cma_query_ib_route()
2156 struct cma_work *work = container_of(_work, struct cma_work, work); in cma_work_handler() local
2157 struct rdma_id_private *id_priv = work->id; in cma_work_handler()
2161 if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) in cma_work_handler()
2164 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { in cma_work_handler()
2173 kfree(work); in cma_work_handler()
2178 struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work); in cma_ndev_work_handler() local
2179 struct rdma_id_private *id_priv = work->id; in cma_ndev_work_handler()
2187 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { in cma_ndev_work_handler()
2197 kfree(work); in cma_ndev_work_handler()
2203 struct cma_work *work; in cma_resolve_ib_route() local
2206 work = kzalloc(sizeof *work, GFP_KERNEL); in cma_resolve_ib_route()
2207 if (!work) in cma_resolve_ib_route()
2210 work->id = id_priv; in cma_resolve_ib_route()
2211 INIT_WORK(&work->work, cma_work_handler); in cma_resolve_ib_route()
2212 work->old_state = RDMA_CM_ROUTE_QUERY; in cma_resolve_ib_route()
2213 work->new_state = RDMA_CM_ROUTE_RESOLVED; in cma_resolve_ib_route()
2214 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; in cma_resolve_ib_route()
2222 ret = cma_query_ib_route(id_priv, timeout_ms, work); in cma_resolve_ib_route()
2231 kfree(work); in cma_resolve_ib_route()
2263 struct cma_work *work; in cma_resolve_iw_route() local
2265 work = kzalloc(sizeof *work, GFP_KERNEL); in cma_resolve_iw_route()
2266 if (!work) in cma_resolve_iw_route()
2269 work->id = id_priv; in cma_resolve_iw_route()
2270 INIT_WORK(&work->work, cma_work_handler); in cma_resolve_iw_route()
2271 work->old_state = RDMA_CM_ROUTE_QUERY; in cma_resolve_iw_route()
2272 work->new_state = RDMA_CM_ROUTE_RESOLVED; in cma_resolve_iw_route()
2273 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; in cma_resolve_iw_route()
2274 queue_work(cma_wq, &work->work); in cma_resolve_iw_route()
2302 struct cma_work *work; in cma_resolve_iboe_route() local
2307 work = kzalloc(sizeof *work, GFP_KERNEL); in cma_resolve_iboe_route()
2308 if (!work) in cma_resolve_iboe_route()
2311 work->id = id_priv; in cma_resolve_iboe_route()
2312 INIT_WORK(&work->work, cma_work_handler); in cma_resolve_iboe_route()
2355 work->old_state = RDMA_CM_ROUTE_QUERY; in cma_resolve_iboe_route()
2356 work->new_state = RDMA_CM_ROUTE_RESOLVED; in cma_resolve_iboe_route()
2357 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; in cma_resolve_iboe_route()
2358 work->event.status = 0; in cma_resolve_iboe_route()
2360 queue_work(cma_wq, &work->work); in cma_resolve_iboe_route()
2368 kfree(work); in cma_resolve_iboe_route()
2516 struct cma_work *work; in cma_resolve_loopback() local
2520 work = kzalloc(sizeof *work, GFP_KERNEL); in cma_resolve_loopback()
2521 if (!work) in cma_resolve_loopback()
2533 work->id = id_priv; in cma_resolve_loopback()
2534 INIT_WORK(&work->work, cma_work_handler); in cma_resolve_loopback()
2535 work->old_state = RDMA_CM_ADDR_QUERY; in cma_resolve_loopback()
2536 work->new_state = RDMA_CM_ADDR_RESOLVED; in cma_resolve_loopback()
2537 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; in cma_resolve_loopback()
2538 queue_work(cma_wq, &work->work); in cma_resolve_loopback()
2541 kfree(work); in cma_resolve_loopback()
2547 struct cma_work *work; in cma_resolve_ib_addr() local
2550 work = kzalloc(sizeof *work, GFP_KERNEL); in cma_resolve_ib_addr()
2551 if (!work) in cma_resolve_ib_addr()
2563 work->id = id_priv; in cma_resolve_ib_addr()
2564 INIT_WORK(&work->work, cma_work_handler); in cma_resolve_ib_addr()
2565 work->old_state = RDMA_CM_ADDR_QUERY; in cma_resolve_ib_addr()
2566 work->new_state = RDMA_CM_ADDR_RESOLVED; in cma_resolve_ib_addr()
2567 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; in cma_resolve_ib_addr()
2568 queue_work(cma_wq, &work->work); in cma_resolve_ib_addr()
2571 kfree(work); in cma_resolve_ib_addr()
3632 static void iboe_mcast_work_handler(struct work_struct *work) in iboe_mcast_work_handler() argument
3634 struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work); in iboe_mcast_work_handler()
3673 struct iboe_mcast_work *work; in cma_iboe_join_multicast() local
3682 work = kzalloc(sizeof *work, GFP_KERNEL); in cma_iboe_join_multicast()
3683 if (!work) in cma_iboe_join_multicast()
3714 work->id = id_priv; in cma_iboe_join_multicast()
3715 work->mc = mc; in cma_iboe_join_multicast()
3716 INIT_WORK(&work->work, iboe_mcast_work_handler); in cma_iboe_join_multicast()
3718 queue_work(cma_wq, &work->work); in cma_iboe_join_multicast()
3725 kfree(work); in cma_iboe_join_multicast()
3806 struct cma_ndev_work *work; in cma_netdev_change() local
3815 work = kzalloc(sizeof *work, GFP_KERNEL); in cma_netdev_change()
3816 if (!work) in cma_netdev_change()
3819 INIT_WORK(&work->work, cma_ndev_work_handler); in cma_netdev_change()
3820 work->id = id_priv; in cma_netdev_change()
3821 work->event.event = RDMA_CM_EVENT_ADDR_CHANGE; in cma_netdev_change()
3823 queue_work(cma_wq, &work->work); in cma_netdev_change()