Lines Matching refs:ctx
107 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx) in mlx4_ib_get_new_demux_tid() argument
109 return cpu_to_be64(atomic_inc_return(&ctx->tid)) | in mlx4_ib_get_new_demux_tid()
1109 struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context; in mlx4_ib_tunnel_comp_handler() local
1110 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev); in mlx4_ib_tunnel_comp_handler()
1112 if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE) in mlx4_ib_tunnel_comp_handler()
1113 queue_work(ctx->wq, &ctx->work); in mlx4_ib_tunnel_comp_handler()
1117 static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx, in mlx4_ib_post_pv_qp_buf() argument
1130 sg_list.lkey = ctx->mr->lkey; in mlx4_ib_post_pv_qp_buf()
1137 ib_dma_sync_single_for_device(ctx->ib_dev, tun_qp->ring[index].map, in mlx4_ib_post_pv_qp_buf()
1281 static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc *wc) in mlx4_ib_multiplex_mad() argument
1283 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev); in mlx4_ib_multiplex_mad()
1284 struct mlx4_ib_demux_pv_qp *tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc->wr_id)]; in mlx4_ib_multiplex_mad()
1296 (wc->src_qp & 0x1) != ctx->port - 1 || in mlx4_ib_multiplex_mad()
1298 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d\n", wc->src_qp); in mlx4_ib_multiplex_mad()
1302 if (slave != ctx->slave) { in mlx4_ib_multiplex_mad()
1303 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: " in mlx4_ib_multiplex_mad()
1309 ib_dma_sync_single_for_cpu(ctx->ib_dev, tun_qp->ring[wr_ix].map, in mlx4_ib_multiplex_mad()
1322 mlx4_ib_warn(ctx->ib_dev, "egress mad has non-null tid msb:%d " in mlx4_ib_multiplex_mad()
1337 !mlx4_vf_smi_enabled(dev->dev, slave, ctx->port)) in mlx4_ib_multiplex_mad()
1341 if (mlx4_ib_multiplex_sa_handler(ctx->ib_dev, ctx->port, slave, in mlx4_ib_multiplex_mad()
1346 if (mlx4_ib_multiplex_cm_handler(ctx->ib_dev, ctx->port, slave, in mlx4_ib_multiplex_mad()
1358 mlx4_ib_warn(ctx->ib_dev, "dropping unsupported egress mad from class:%d " in mlx4_ib_multiplex_mad()
1367 ah.ibah.device = ctx->ib_dev; in mlx4_ib_multiplex_mad()
1370 fill_in_real_sgid_index(dev, slave, ctx->port, &ah_attr); in mlx4_ib_multiplex_mad()
1379 mlx4_get_slave_default_vlan(dev->dev, ctx->port, slave, in mlx4_ib_multiplex_mad()
1382 mlx4_ib_send_to_wire(dev, slave, ctx->port, in mlx4_ib_multiplex_mad()
1391 static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx, in mlx4_ib_alloc_pv_bufs() argument
1401 tun_qp = &ctx->qp[qp_type]; in mlx4_ib_alloc_pv_bufs()
1429 tun_qp->ring[i].map = ib_dma_map_single(ctx->ib_dev, in mlx4_ib_alloc_pv_bufs()
1433 if (ib_dma_mapping_error(ctx->ib_dev, tun_qp->ring[i].map)) { in mlx4_ib_alloc_pv_bufs()
1445 ib_dma_map_single(ctx->ib_dev, in mlx4_ib_alloc_pv_bufs()
1449 if (ib_dma_mapping_error(ctx->ib_dev, in mlx4_ib_alloc_pv_bufs()
1466 ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map, in mlx4_ib_alloc_pv_bufs()
1476 ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map, in mlx4_ib_alloc_pv_bufs()
1485 static void mlx4_ib_free_pv_qp_bufs(struct mlx4_ib_demux_pv_ctx *ctx, in mlx4_ib_free_pv_qp_bufs() argument
1495 tun_qp = &ctx->qp[qp_type]; in mlx4_ib_free_pv_qp_bufs()
1506 ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map, in mlx4_ib_free_pv_qp_bufs()
1512 ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map, in mlx4_ib_free_pv_qp_bufs()
1524 struct mlx4_ib_demux_pv_ctx *ctx; in mlx4_ib_tunnel_comp_worker() local
1528 ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work); in mlx4_ib_tunnel_comp_worker()
1529 ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP); in mlx4_ib_tunnel_comp_worker()
1531 while (ib_poll_cq(ctx->cq, 1, &wc) == 1) { in mlx4_ib_tunnel_comp_worker()
1532 tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)]; in mlx4_ib_tunnel_comp_worker()
1536 mlx4_ib_multiplex_mad(ctx, &wc); in mlx4_ib_tunnel_comp_worker()
1537 ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, in mlx4_ib_tunnel_comp_worker()
1563 ctx->slave, wc.status, wc.wr_id); in mlx4_ib_tunnel_comp_worker()
1586 static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx, in create_pv_sqp() argument
1598 tun_qp = &ctx->qp[qp_type]; in create_pv_sqp()
1601 qp_init_attr.init_attr.send_cq = ctx->cq; in create_pv_sqp()
1602 qp_init_attr.init_attr.recv_cq = ctx->cq; in create_pv_sqp()
1611 qp_init_attr.port = ctx->port; in create_pv_sqp()
1612 qp_init_attr.slave = ctx->slave; in create_pv_sqp()
1621 qp_init_attr.init_attr.port_num = ctx->port; in create_pv_sqp()
1622 qp_init_attr.init_attr.qp_context = ctx; in create_pv_sqp()
1624 tun_qp->qp = ib_create_qp(ctx->pd, &qp_init_attr.init_attr); in create_pv_sqp()
1637 ret = find_slave_port_pkey_ix(to_mdev(ctx->ib_dev), ctx->slave, in create_pv_sqp()
1638 ctx->port, IB_DEFAULT_PKEY_FULL, in create_pv_sqp()
1642 to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0]; in create_pv_sqp()
1644 attr.port_num = ctx->port; in create_pv_sqp()
1668 ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, i); in create_pv_sqp()
1688 struct mlx4_ib_demux_pv_ctx *ctx; in mlx4_ib_sqp_comp_worker() local
1694 ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work); in mlx4_ib_sqp_comp_worker()
1695 ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP); in mlx4_ib_sqp_comp_worker()
1697 while (mlx4_ib_poll_cq(ctx->cq, 1, &wc) == 1) { in mlx4_ib_sqp_comp_worker()
1698 sqp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)]; in mlx4_ib_sqp_comp_worker()
1717 mlx4_ib_demux_mad(ctx->ib_dev, ctx->port, &wc, grh, mad); in mlx4_ib_sqp_comp_worker()
1718 if (mlx4_ib_post_pv_qp_buf(ctx, sqp, wc.wr_id & in mlx4_ib_sqp_comp_worker()
1730 ctx->slave, wc.status, wc.wr_id); in mlx4_ib_sqp_comp_worker()
1747 struct mlx4_ib_demux_pv_ctx *ctx; in alloc_pv_object() local
1750 ctx = kzalloc(sizeof (struct mlx4_ib_demux_pv_ctx), GFP_KERNEL); in alloc_pv_object()
1751 if (!ctx) { in alloc_pv_object()
1757 ctx->ib_dev = &dev->ib_dev; in alloc_pv_object()
1758 ctx->port = port; in alloc_pv_object()
1759 ctx->slave = slave; in alloc_pv_object()
1760 *ret_ctx = ctx; in alloc_pv_object()
1773 int create_tun, struct mlx4_ib_demux_pv_ctx *ctx) in create_pv_resources() argument
1777 if (ctx->state != DEMUX_PV_STATE_DOWN) in create_pv_resources()
1780 ctx->state = DEMUX_PV_STATE_STARTING; in create_pv_resources()
1782 if (rdma_port_get_link_layer(ibdev, ctx->port) == in create_pv_resources()
1784 ctx->has_smi = 1; in create_pv_resources()
1786 if (ctx->has_smi) { in create_pv_resources()
1787 ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_SMI, create_tun); in create_pv_resources()
1794 ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_GSI, create_tun); in create_pv_resources()
1801 if (ctx->has_smi) in create_pv_resources()
1804 ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler, in create_pv_resources()
1805 NULL, ctx, cq_size, 0); in create_pv_resources()
1806 if (IS_ERR(ctx->cq)) { in create_pv_resources()
1807 ret = PTR_ERR(ctx->cq); in create_pv_resources()
1812 ctx->pd = ib_alloc_pd(ctx->ib_dev); in create_pv_resources()
1813 if (IS_ERR(ctx->pd)) { in create_pv_resources()
1814 ret = PTR_ERR(ctx->pd); in create_pv_resources()
1819 ctx->mr = ib_get_dma_mr(ctx->pd, IB_ACCESS_LOCAL_WRITE); in create_pv_resources()
1820 if (IS_ERR(ctx->mr)) { in create_pv_resources()
1821 ret = PTR_ERR(ctx->mr); in create_pv_resources()
1826 if (ctx->has_smi) { in create_pv_resources()
1827 ret = create_pv_sqp(ctx, IB_QPT_SMI, create_tun); in create_pv_resources()
1835 ret = create_pv_sqp(ctx, IB_QPT_GSI, create_tun); in create_pv_resources()
1843 INIT_WORK(&ctx->work, mlx4_ib_tunnel_comp_worker); in create_pv_resources()
1845 INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker); in create_pv_resources()
1847 ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq; in create_pv_resources()
1849 ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP); in create_pv_resources()
1854 ctx->state = DEMUX_PV_STATE_ACTIVE; in create_pv_resources()
1858 ctx->wq = NULL; in create_pv_resources()
1859 ib_destroy_qp(ctx->qp[1].qp); in create_pv_resources()
1860 ctx->qp[1].qp = NULL; in create_pv_resources()
1864 if (ctx->has_smi) in create_pv_resources()
1865 ib_destroy_qp(ctx->qp[0].qp); in create_pv_resources()
1866 ctx->qp[0].qp = NULL; in create_pv_resources()
1869 ib_dereg_mr(ctx->mr); in create_pv_resources()
1870 ctx->mr = NULL; in create_pv_resources()
1873 ib_dealloc_pd(ctx->pd); in create_pv_resources()
1874 ctx->pd = NULL; in create_pv_resources()
1877 ib_destroy_cq(ctx->cq); in create_pv_resources()
1878 ctx->cq = NULL; in create_pv_resources()
1881 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, create_tun); in create_pv_resources()
1884 if (ctx->has_smi) in create_pv_resources()
1885 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, create_tun); in create_pv_resources()
1887 ctx->state = DEMUX_PV_STATE_DOWN; in create_pv_resources()
1892 struct mlx4_ib_demux_pv_ctx *ctx, int flush) in destroy_pv_resources() argument
1894 if (!ctx) in destroy_pv_resources()
1896 if (ctx->state > DEMUX_PV_STATE_DOWN) { in destroy_pv_resources()
1897 ctx->state = DEMUX_PV_STATE_DOWNING; in destroy_pv_resources()
1899 flush_workqueue(ctx->wq); in destroy_pv_resources()
1900 if (ctx->has_smi) { in destroy_pv_resources()
1901 ib_destroy_qp(ctx->qp[0].qp); in destroy_pv_resources()
1902 ctx->qp[0].qp = NULL; in destroy_pv_resources()
1903 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, 1); in destroy_pv_resources()
1905 ib_destroy_qp(ctx->qp[1].qp); in destroy_pv_resources()
1906 ctx->qp[1].qp = NULL; in destroy_pv_resources()
1907 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, 1); in destroy_pv_resources()
1908 ib_dereg_mr(ctx->mr); in destroy_pv_resources()
1909 ctx->mr = NULL; in destroy_pv_resources()
1910 ib_dealloc_pd(ctx->pd); in destroy_pv_resources()
1911 ctx->pd = NULL; in destroy_pv_resources()
1912 ib_destroy_cq(ctx->cq); in destroy_pv_resources()
1913 ctx->cq = NULL; in destroy_pv_resources()
1914 ctx->state = DEMUX_PV_STATE_DOWN; in destroy_pv_resources()
1958 struct mlx4_ib_demux_ctx *ctx, in mlx4_ib_alloc_demux_ctx() argument
1965 ctx->tun = kcalloc(dev->dev->caps.sqp_demux, in mlx4_ib_alloc_demux_ctx()
1967 if (!ctx->tun) in mlx4_ib_alloc_demux_ctx()
1970 ctx->dev = dev; in mlx4_ib_alloc_demux_ctx()
1971 ctx->port = port; in mlx4_ib_alloc_demux_ctx()
1972 ctx->ib_dev = &dev->ib_dev; in mlx4_ib_alloc_demux_ctx()
1984 ret = alloc_pv_object(dev, i, port, &ctx->tun[i]); in mlx4_ib_alloc_demux_ctx()
1991 ret = mlx4_ib_mcg_port_init(ctx); in mlx4_ib_alloc_demux_ctx()
1998 ctx->wq = create_singlethread_workqueue(name); in mlx4_ib_alloc_demux_ctx()
1999 if (!ctx->wq) { in mlx4_ib_alloc_demux_ctx()
2006 ctx->ud_wq = create_singlethread_workqueue(name); in mlx4_ib_alloc_demux_ctx()
2007 if (!ctx->ud_wq) { in mlx4_ib_alloc_demux_ctx()
2016 destroy_workqueue(ctx->wq); in mlx4_ib_alloc_demux_ctx()
2017 ctx->wq = NULL; in mlx4_ib_alloc_demux_ctx()
2020 mlx4_ib_mcg_port_cleanup(ctx, 1); in mlx4_ib_alloc_demux_ctx()
2024 kfree(ctx->tun); in mlx4_ib_alloc_demux_ctx()
2025 ctx->tun = NULL; in mlx4_ib_alloc_demux_ctx()
2052 static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx) in mlx4_ib_free_demux_ctx() argument
2055 if (ctx) { in mlx4_ib_free_demux_ctx()
2056 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev); in mlx4_ib_free_demux_ctx()
2057 mlx4_ib_mcg_port_cleanup(ctx, 1); in mlx4_ib_free_demux_ctx()
2059 if (!ctx->tun[i]) in mlx4_ib_free_demux_ctx()
2061 if (ctx->tun[i]->state > DEMUX_PV_STATE_DOWN) in mlx4_ib_free_demux_ctx()
2062 ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING; in mlx4_ib_free_demux_ctx()
2064 flush_workqueue(ctx->wq); in mlx4_ib_free_demux_ctx()
2066 destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0); in mlx4_ib_free_demux_ctx()
2067 free_pv_object(dev, i, ctx->port); in mlx4_ib_free_demux_ctx()
2069 kfree(ctx->tun); in mlx4_ib_free_demux_ctx()
2070 destroy_workqueue(ctx->ud_wq); in mlx4_ib_free_demux_ctx()
2071 destroy_workqueue(ctx->wq); in mlx4_ib_free_demux_ctx()