Lines Matching refs:work
163 struct work_struct work; member
171 struct work_struct work; member
177 struct work_struct work; member
1695 struct cma_work *work = context; in cma_query_handler() local
1698 route = &work->id->id.route; in cma_query_handler()
1704 work->old_state = RDMA_CM_ROUTE_QUERY; in cma_query_handler()
1705 work->new_state = RDMA_CM_ADDR_RESOLVED; in cma_query_handler()
1706 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; in cma_query_handler()
1707 work->event.status = status; in cma_query_handler()
1710 queue_work(cma_wq, &work->work); in cma_query_handler()
1714 struct cma_work *work) in cma_query_ib_route() argument
1755 work, &id_priv->query); in cma_query_ib_route()
1762 struct cma_work *work = container_of(_work, struct cma_work, work); in cma_work_handler() local
1763 struct rdma_id_private *id_priv = work->id; in cma_work_handler()
1767 if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) in cma_work_handler()
1770 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { in cma_work_handler()
1779 kfree(work); in cma_work_handler()
1784 struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work); in cma_ndev_work_handler() local
1785 struct rdma_id_private *id_priv = work->id; in cma_ndev_work_handler()
1793 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { in cma_ndev_work_handler()
1803 kfree(work); in cma_ndev_work_handler()
1809 struct cma_work *work; in cma_resolve_ib_route() local
1812 work = kzalloc(sizeof *work, GFP_KERNEL); in cma_resolve_ib_route()
1813 if (!work) in cma_resolve_ib_route()
1816 work->id = id_priv; in cma_resolve_ib_route()
1817 INIT_WORK(&work->work, cma_work_handler); in cma_resolve_ib_route()
1818 work->old_state = RDMA_CM_ROUTE_QUERY; in cma_resolve_ib_route()
1819 work->new_state = RDMA_CM_ROUTE_RESOLVED; in cma_resolve_ib_route()
1820 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; in cma_resolve_ib_route()
1828 ret = cma_query_ib_route(id_priv, timeout_ms, work); in cma_resolve_ib_route()
1837 kfree(work); in cma_resolve_ib_route()
1869 struct cma_work *work; in cma_resolve_iw_route() local
1871 work = kzalloc(sizeof *work, GFP_KERNEL); in cma_resolve_iw_route()
1872 if (!work) in cma_resolve_iw_route()
1875 work->id = id_priv; in cma_resolve_iw_route()
1876 INIT_WORK(&work->work, cma_work_handler); in cma_resolve_iw_route()
1877 work->old_state = RDMA_CM_ROUTE_QUERY; in cma_resolve_iw_route()
1878 work->new_state = RDMA_CM_ROUTE_RESOLVED; in cma_resolve_iw_route()
1879 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; in cma_resolve_iw_route()
1880 queue_work(cma_wq, &work->work); in cma_resolve_iw_route()
1908 struct cma_work *work; in cma_resolve_iboe_route() local
1913 work = kzalloc(sizeof *work, GFP_KERNEL); in cma_resolve_iboe_route()
1914 if (!work) in cma_resolve_iboe_route()
1917 work->id = id_priv; in cma_resolve_iboe_route()
1918 INIT_WORK(&work->work, cma_work_handler); in cma_resolve_iboe_route()
1960 work->old_state = RDMA_CM_ROUTE_QUERY; in cma_resolve_iboe_route()
1961 work->new_state = RDMA_CM_ROUTE_RESOLVED; in cma_resolve_iboe_route()
1962 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; in cma_resolve_iboe_route()
1963 work->event.status = 0; in cma_resolve_iboe_route()
1965 queue_work(cma_wq, &work->work); in cma_resolve_iboe_route()
1973 kfree(work); in cma_resolve_iboe_route()
2132 struct cma_work *work; in cma_resolve_loopback() local
2136 work = kzalloc(sizeof *work, GFP_KERNEL); in cma_resolve_loopback()
2137 if (!work) in cma_resolve_loopback()
2149 work->id = id_priv; in cma_resolve_loopback()
2150 INIT_WORK(&work->work, cma_work_handler); in cma_resolve_loopback()
2151 work->old_state = RDMA_CM_ADDR_QUERY; in cma_resolve_loopback()
2152 work->new_state = RDMA_CM_ADDR_RESOLVED; in cma_resolve_loopback()
2153 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; in cma_resolve_loopback()
2154 queue_work(cma_wq, &work->work); in cma_resolve_loopback()
2157 kfree(work); in cma_resolve_loopback()
2163 struct cma_work *work; in cma_resolve_ib_addr() local
2166 work = kzalloc(sizeof *work, GFP_KERNEL); in cma_resolve_ib_addr()
2167 if (!work) in cma_resolve_ib_addr()
2179 work->id = id_priv; in cma_resolve_ib_addr()
2180 INIT_WORK(&work->work, cma_work_handler); in cma_resolve_ib_addr()
2181 work->old_state = RDMA_CM_ADDR_QUERY; in cma_resolve_ib_addr()
2182 work->new_state = RDMA_CM_ADDR_RESOLVED; in cma_resolve_ib_addr()
2183 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; in cma_resolve_ib_addr()
2184 queue_work(cma_wq, &work->work); in cma_resolve_ib_addr()
2187 kfree(work); in cma_resolve_ib_addr()
3259 static void iboe_mcast_work_handler(struct work_struct *work) in iboe_mcast_work_handler() argument
3261 struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work); in iboe_mcast_work_handler()
3300 struct iboe_mcast_work *work; in cma_iboe_join_multicast() local
3309 work = kzalloc(sizeof *work, GFP_KERNEL); in cma_iboe_join_multicast()
3310 if (!work) in cma_iboe_join_multicast()
3341 work->id = id_priv; in cma_iboe_join_multicast()
3342 work->mc = mc; in cma_iboe_join_multicast()
3343 INIT_WORK(&work->work, iboe_mcast_work_handler); in cma_iboe_join_multicast()
3345 queue_work(cma_wq, &work->work); in cma_iboe_join_multicast()
3352 kfree(work); in cma_iboe_join_multicast()
3448 struct cma_ndev_work *work; in cma_netdev_change() local
3456 work = kzalloc(sizeof *work, GFP_KERNEL); in cma_netdev_change()
3457 if (!work) in cma_netdev_change()
3460 INIT_WORK(&work->work, cma_ndev_work_handler); in cma_netdev_change()
3461 work->id = id_priv; in cma_netdev_change()
3462 work->event.event = RDMA_CM_EVENT_ADDR_CHANGE; in cma_netdev_change()
3464 queue_work(cma_wq, &work->work); in cma_netdev_change()