Lines Matching refs:ctx

99 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)  in mlx4_ib_get_new_demux_tid()  argument
101 return cpu_to_be64(atomic_inc_return(&ctx->tid)) | in mlx4_ib_get_new_demux_tid()
1127 struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context; in mlx4_ib_tunnel_comp_handler() local
1128 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev); in mlx4_ib_tunnel_comp_handler()
1130 if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE) in mlx4_ib_tunnel_comp_handler()
1131 queue_work(ctx->wq, &ctx->work); in mlx4_ib_tunnel_comp_handler()
1135 static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx, in mlx4_ib_post_pv_qp_buf() argument
1148 sg_list.lkey = ctx->pd->local_dma_lkey; in mlx4_ib_post_pv_qp_buf()
1155 ib_dma_sync_single_for_device(ctx->ib_dev, tun_qp->ring[index].map, in mlx4_ib_post_pv_qp_buf()
1303 static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc *wc) in mlx4_ib_multiplex_mad() argument
1305 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev); in mlx4_ib_multiplex_mad()
1306 struct mlx4_ib_demux_pv_qp *tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc->wr_id)]; in mlx4_ib_multiplex_mad()
1319 (wc->src_qp & 0x1) != ctx->port - 1 || in mlx4_ib_multiplex_mad()
1321 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d\n", wc->src_qp); in mlx4_ib_multiplex_mad()
1325 if (slave != ctx->slave) { in mlx4_ib_multiplex_mad()
1326 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: " in mlx4_ib_multiplex_mad()
1332 ib_dma_sync_single_for_cpu(ctx->ib_dev, tun_qp->ring[wr_ix].map, in mlx4_ib_multiplex_mad()
1345 mlx4_ib_warn(ctx->ib_dev, "egress mad has non-null tid msb:%d " in mlx4_ib_multiplex_mad()
1360 !mlx4_vf_smi_enabled(dev->dev, slave, ctx->port)) in mlx4_ib_multiplex_mad()
1364 if (mlx4_ib_multiplex_sa_handler(ctx->ib_dev, ctx->port, slave, in mlx4_ib_multiplex_mad()
1369 if (mlx4_ib_multiplex_cm_handler(ctx->ib_dev, ctx->port, slave, in mlx4_ib_multiplex_mad()
1381 mlx4_ib_warn(ctx->ib_dev, "dropping unsupported egress mad from class:%d " in mlx4_ib_multiplex_mad()
1390 ah.ibah.device = ctx->ib_dev; in mlx4_ib_multiplex_mad()
1400 fill_in_real_sgid_index(dev, slave, ctx->port, &ah_attr); in mlx4_ib_multiplex_mad()
1405 mlx4_get_slave_default_vlan(dev->dev, ctx->port, slave, in mlx4_ib_multiplex_mad()
1408 mlx4_ib_send_to_wire(dev, slave, ctx->port, in mlx4_ib_multiplex_mad()
1417 static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx, in mlx4_ib_alloc_pv_bufs() argument
1427 tun_qp = &ctx->qp[qp_type]; in mlx4_ib_alloc_pv_bufs()
1455 tun_qp->ring[i].map = ib_dma_map_single(ctx->ib_dev, in mlx4_ib_alloc_pv_bufs()
1459 if (ib_dma_mapping_error(ctx->ib_dev, tun_qp->ring[i].map)) { in mlx4_ib_alloc_pv_bufs()
1471 ib_dma_map_single(ctx->ib_dev, in mlx4_ib_alloc_pv_bufs()
1475 if (ib_dma_mapping_error(ctx->ib_dev, in mlx4_ib_alloc_pv_bufs()
1492 ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map, in mlx4_ib_alloc_pv_bufs()
1502 ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map, in mlx4_ib_alloc_pv_bufs()
1511 static void mlx4_ib_free_pv_qp_bufs(struct mlx4_ib_demux_pv_ctx *ctx, in mlx4_ib_free_pv_qp_bufs() argument
1521 tun_qp = &ctx->qp[qp_type]; in mlx4_ib_free_pv_qp_bufs()
1532 ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map, in mlx4_ib_free_pv_qp_bufs()
1538 ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map, in mlx4_ib_free_pv_qp_bufs()
1550 struct mlx4_ib_demux_pv_ctx *ctx; in mlx4_ib_tunnel_comp_worker() local
1554 ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work); in mlx4_ib_tunnel_comp_worker()
1555 ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP); in mlx4_ib_tunnel_comp_worker()
1557 while (ib_poll_cq(ctx->cq, 1, &wc) == 1) { in mlx4_ib_tunnel_comp_worker()
1558 tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)]; in mlx4_ib_tunnel_comp_worker()
1562 mlx4_ib_multiplex_mad(ctx, &wc); in mlx4_ib_tunnel_comp_worker()
1563 ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, in mlx4_ib_tunnel_comp_worker()
1589 ctx->slave, wc.status, wc.wr_id); in mlx4_ib_tunnel_comp_worker()
1612 static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx, in create_pv_sqp() argument
1624 tun_qp = &ctx->qp[qp_type]; in create_pv_sqp()
1627 qp_init_attr.init_attr.send_cq = ctx->cq; in create_pv_sqp()
1628 qp_init_attr.init_attr.recv_cq = ctx->cq; in create_pv_sqp()
1637 qp_init_attr.port = ctx->port; in create_pv_sqp()
1638 qp_init_attr.slave = ctx->slave; in create_pv_sqp()
1647 qp_init_attr.init_attr.port_num = ctx->port; in create_pv_sqp()
1648 qp_init_attr.init_attr.qp_context = ctx; in create_pv_sqp()
1650 tun_qp->qp = ib_create_qp(ctx->pd, &qp_init_attr.init_attr); in create_pv_sqp()
1663 ret = find_slave_port_pkey_ix(to_mdev(ctx->ib_dev), ctx->slave, in create_pv_sqp()
1664 ctx->port, IB_DEFAULT_PKEY_FULL, in create_pv_sqp()
1668 to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0]; in create_pv_sqp()
1670 attr.port_num = ctx->port; in create_pv_sqp()
1694 ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, i); in create_pv_sqp()
1714 struct mlx4_ib_demux_pv_ctx *ctx; in mlx4_ib_sqp_comp_worker() local
1720 ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work); in mlx4_ib_sqp_comp_worker()
1721 ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP); in mlx4_ib_sqp_comp_worker()
1723 while (mlx4_ib_poll_cq(ctx->cq, 1, &wc) == 1) { in mlx4_ib_sqp_comp_worker()
1724 sqp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)]; in mlx4_ib_sqp_comp_worker()
1743 mlx4_ib_demux_mad(ctx->ib_dev, ctx->port, &wc, grh, mad); in mlx4_ib_sqp_comp_worker()
1744 if (mlx4_ib_post_pv_qp_buf(ctx, sqp, wc.wr_id & in mlx4_ib_sqp_comp_worker()
1756 ctx->slave, wc.status, wc.wr_id); in mlx4_ib_sqp_comp_worker()
1773 struct mlx4_ib_demux_pv_ctx *ctx; in alloc_pv_object() local
1776 ctx = kzalloc(sizeof (struct mlx4_ib_demux_pv_ctx), GFP_KERNEL); in alloc_pv_object()
1777 if (!ctx) { in alloc_pv_object()
1783 ctx->ib_dev = &dev->ib_dev; in alloc_pv_object()
1784 ctx->port = port; in alloc_pv_object()
1785 ctx->slave = slave; in alloc_pv_object()
1786 *ret_ctx = ctx; in alloc_pv_object()
1799 int create_tun, struct mlx4_ib_demux_pv_ctx *ctx) in create_pv_resources() argument
1804 if (ctx->state != DEMUX_PV_STATE_DOWN) in create_pv_resources()
1807 ctx->state = DEMUX_PV_STATE_STARTING; in create_pv_resources()
1809 if (rdma_port_get_link_layer(ibdev, ctx->port) == in create_pv_resources()
1811 ctx->has_smi = 1; in create_pv_resources()
1813 if (ctx->has_smi) { in create_pv_resources()
1814 ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_SMI, create_tun); in create_pv_resources()
1821 ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_GSI, create_tun); in create_pv_resources()
1828 if (ctx->has_smi) in create_pv_resources()
1832 ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler, in create_pv_resources()
1833 NULL, ctx, &cq_attr); in create_pv_resources()
1834 if (IS_ERR(ctx->cq)) { in create_pv_resources()
1835 ret = PTR_ERR(ctx->cq); in create_pv_resources()
1840 ctx->pd = ib_alloc_pd(ctx->ib_dev); in create_pv_resources()
1841 if (IS_ERR(ctx->pd)) { in create_pv_resources()
1842 ret = PTR_ERR(ctx->pd); in create_pv_resources()
1847 if (ctx->has_smi) { in create_pv_resources()
1848 ret = create_pv_sqp(ctx, IB_QPT_SMI, create_tun); in create_pv_resources()
1856 ret = create_pv_sqp(ctx, IB_QPT_GSI, create_tun); in create_pv_resources()
1864 INIT_WORK(&ctx->work, mlx4_ib_tunnel_comp_worker); in create_pv_resources()
1866 INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker); in create_pv_resources()
1868 ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq; in create_pv_resources()
1870 ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP); in create_pv_resources()
1875 ctx->state = DEMUX_PV_STATE_ACTIVE; in create_pv_resources()
1879 ctx->wq = NULL; in create_pv_resources()
1880 ib_destroy_qp(ctx->qp[1].qp); in create_pv_resources()
1881 ctx->qp[1].qp = NULL; in create_pv_resources()
1885 if (ctx->has_smi) in create_pv_resources()
1886 ib_destroy_qp(ctx->qp[0].qp); in create_pv_resources()
1887 ctx->qp[0].qp = NULL; in create_pv_resources()
1890 ib_dealloc_pd(ctx->pd); in create_pv_resources()
1891 ctx->pd = NULL; in create_pv_resources()
1894 ib_destroy_cq(ctx->cq); in create_pv_resources()
1895 ctx->cq = NULL; in create_pv_resources()
1898 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, create_tun); in create_pv_resources()
1901 if (ctx->has_smi) in create_pv_resources()
1902 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, create_tun); in create_pv_resources()
1904 ctx->state = DEMUX_PV_STATE_DOWN; in create_pv_resources()
1909 struct mlx4_ib_demux_pv_ctx *ctx, int flush) in destroy_pv_resources() argument
1911 if (!ctx) in destroy_pv_resources()
1913 if (ctx->state > DEMUX_PV_STATE_DOWN) { in destroy_pv_resources()
1914 ctx->state = DEMUX_PV_STATE_DOWNING; in destroy_pv_resources()
1916 flush_workqueue(ctx->wq); in destroy_pv_resources()
1917 if (ctx->has_smi) { in destroy_pv_resources()
1918 ib_destroy_qp(ctx->qp[0].qp); in destroy_pv_resources()
1919 ctx->qp[0].qp = NULL; in destroy_pv_resources()
1920 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, 1); in destroy_pv_resources()
1922 ib_destroy_qp(ctx->qp[1].qp); in destroy_pv_resources()
1923 ctx->qp[1].qp = NULL; in destroy_pv_resources()
1924 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, 1); in destroy_pv_resources()
1925 ib_dealloc_pd(ctx->pd); in destroy_pv_resources()
1926 ctx->pd = NULL; in destroy_pv_resources()
1927 ib_destroy_cq(ctx->cq); in destroy_pv_resources()
1928 ctx->cq = NULL; in destroy_pv_resources()
1929 ctx->state = DEMUX_PV_STATE_DOWN; in destroy_pv_resources()
1973 struct mlx4_ib_demux_ctx *ctx, in mlx4_ib_alloc_demux_ctx() argument
1980 ctx->tun = kcalloc(dev->dev->caps.sqp_demux, in mlx4_ib_alloc_demux_ctx()
1982 if (!ctx->tun) in mlx4_ib_alloc_demux_ctx()
1985 ctx->dev = dev; in mlx4_ib_alloc_demux_ctx()
1986 ctx->port = port; in mlx4_ib_alloc_demux_ctx()
1987 ctx->ib_dev = &dev->ib_dev; in mlx4_ib_alloc_demux_ctx()
1999 ret = alloc_pv_object(dev, i, port, &ctx->tun[i]); in mlx4_ib_alloc_demux_ctx()
2006 ret = mlx4_ib_mcg_port_init(ctx); in mlx4_ib_alloc_demux_ctx()
2013 ctx->wq = create_singlethread_workqueue(name); in mlx4_ib_alloc_demux_ctx()
2014 if (!ctx->wq) { in mlx4_ib_alloc_demux_ctx()
2021 ctx->ud_wq = create_singlethread_workqueue(name); in mlx4_ib_alloc_demux_ctx()
2022 if (!ctx->ud_wq) { in mlx4_ib_alloc_demux_ctx()
2031 destroy_workqueue(ctx->wq); in mlx4_ib_alloc_demux_ctx()
2032 ctx->wq = NULL; in mlx4_ib_alloc_demux_ctx()
2035 mlx4_ib_mcg_port_cleanup(ctx, 1); in mlx4_ib_alloc_demux_ctx()
2039 kfree(ctx->tun); in mlx4_ib_alloc_demux_ctx()
2040 ctx->tun = NULL; in mlx4_ib_alloc_demux_ctx()
2065 static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx) in mlx4_ib_free_demux_ctx() argument
2068 if (ctx) { in mlx4_ib_free_demux_ctx()
2069 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev); in mlx4_ib_free_demux_ctx()
2070 mlx4_ib_mcg_port_cleanup(ctx, 1); in mlx4_ib_free_demux_ctx()
2072 if (!ctx->tun[i]) in mlx4_ib_free_demux_ctx()
2074 if (ctx->tun[i]->state > DEMUX_PV_STATE_DOWN) in mlx4_ib_free_demux_ctx()
2075 ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING; in mlx4_ib_free_demux_ctx()
2077 flush_workqueue(ctx->wq); in mlx4_ib_free_demux_ctx()
2079 destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0); in mlx4_ib_free_demux_ctx()
2080 free_pv_object(dev, i, ctx->port); in mlx4_ib_free_demux_ctx()
2082 kfree(ctx->tun); in mlx4_ib_free_demux_ctx()
2083 destroy_workqueue(ctx->ud_wq); in mlx4_ib_free_demux_ctx()
2084 destroy_workqueue(ctx->wq); in mlx4_ib_free_demux_ctx()