/linux-4.1.27/drivers/net/ethernet/mellanox/mlx4/ |
D | qp.c | 51 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; in mlx4_qp_event() local 54 spin_lock(&qp_table->lock); in mlx4_qp_event() 60 spin_unlock(&qp_table->lock); in mlx4_qp_event() 220 struct mlx4_qp_table *qp_table = &priv->qp_table; in __mlx4_qp_reserve_range() local 233 *base = mlx4_zone_alloc_entries(qp_table->zones, uid, cnt, align, in __mlx4_qp_reserve_range() 271 struct mlx4_qp_table *qp_table = &priv->qp_table; in __mlx4_qp_release_range() local 275 mlx4_zone_free_entries_unique(qp_table->zones, base_qpn, cnt); in __mlx4_qp_release_range() 301 struct mlx4_qp_table *qp_table = &priv->qp_table; in __mlx4_qp_alloc_icm() local 304 err = mlx4_table_get(dev, &qp_table->qp_table, qpn, gfp); in __mlx4_qp_alloc_icm() 308 err = mlx4_table_get(dev, &qp_table->auxc_table, qpn, gfp); in __mlx4_qp_alloc_icm() [all …]
|
D | profile.c | 192 for (priv->qp_table.rdmarc_shift = 0; in mlx4_make_profile() 193 request->num_qp << priv->qp_table.rdmarc_shift < profile[i].num; in mlx4_make_profile() 194 ++priv->qp_table.rdmarc_shift) in mlx4_make_profile() 196 dev->caps.max_qp_dest_rdma = 1 << priv->qp_table.rdmarc_shift; in mlx4_make_profile() 197 priv->qp_table.rdmarc_base = (u32) profile[i].start; in mlx4_make_profile() 199 init_hca->log_rd_per_qp = priv->qp_table.rdmarc_shift; in mlx4_make_profile()
|
D | main.c | 1323 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table, in mlx4_init_cmpt_table() 1369 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); in mlx4_init_cmpt_table() 1453 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table, in mlx4_init_icm() 1464 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table, in mlx4_init_icm() 1475 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table, in mlx4_init_icm() 1486 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table, in mlx4_init_icm() 1488 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, in mlx4_init_icm() 1544 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); in mlx4_init_icm() 1547 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); in mlx4_init_icm() 1550 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); in mlx4_init_icm() [all …]
|
D | mlx4.h | 703 struct mlx4_icm_table qp_table; member 864 struct mlx4_qp_table qp_table; member
|
/linux-4.1.27/drivers/infiniband/hw/mthca/ |
D | mthca_qp.c | 197 return qp->qpn >= dev->qp_table.sqp_start && in is_sqp() 198 qp->qpn <= dev->qp_table.sqp_start + 3; in is_sqp() 203 return qp->qpn >= dev->qp_table.sqp_start && in is_qp0() 204 qp->qpn <= dev->qp_table.sqp_start + 1; in is_qp0() 243 spin_lock(&dev->qp_table.lock); in mthca_qp_event() 244 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); in mthca_qp_event() 247 spin_unlock(&dev->qp_table.lock); in mthca_qp_event() 264 spin_lock(&dev->qp_table.lock); in mthca_qp_event() 267 spin_unlock(&dev->qp_table.lock); in mthca_qp_event() 754 cpu_to_be32(dev->qp_table.rdb_base + in __mthca_modify_qp() [all …]
|
D | mthca_main.c | 444 mdev->qp_table.qp_table = mthca_alloc_icm_table(mdev, init_hca->qpc_base, in mthca_init_icm() 449 if (!mdev->qp_table.qp_table) { in mthca_init_icm() 455 mdev->qp_table.eqp_table = mthca_alloc_icm_table(mdev, init_hca->eqpc_base, in mthca_init_icm() 460 if (!mdev->qp_table.eqp_table) { in mthca_init_icm() 466 mdev->qp_table.rdb_table = mthca_alloc_icm_table(mdev, init_hca->rdb_base, in mthca_init_icm() 469 mdev->qp_table.rdb_shift, 0, in mthca_init_icm() 471 if (!mdev->qp_table.rdb_table) { in mthca_init_icm() 531 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); in mthca_init_icm() 534 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); in mthca_init_icm() 537 mthca_free_icm_table(mdev, mdev->qp_table.qp_table); in mthca_init_icm() [all …]
|
D | mthca_profile.c | 210 for (dev->qp_table.rdb_shift = 0; in mthca_make_profile() 211 request->num_qp << dev->qp_table.rdb_shift < profile[i].num; in mthca_make_profile() 212 ++dev->qp_table.rdb_shift) in mthca_make_profile() 214 dev->qp_table.rdb_base = (u32) profile[i].start; in mthca_make_profile()
|
D | mthca_dev.h | 260 struct mthca_icm_table *qp_table; member 345 struct mthca_qp_table qp_table; member
|
D | mthca_cq.c | 525 *cur_qp = mthca_array_get(&dev->qp_table.qp, in mthca_poll_one()
|
D | mthca_provider.c | 101 props->max_qp_rd_atom = 1 << mdev->qp_table.rdb_shift; in mthca_query_device()
|
/linux-4.1.27/drivers/infiniband/hw/ipath/ |
D | ipath_qp.c | 861 err = ipath_alloc_qpn(&dev->qp_table, qp, in ipath_create_qp() 941 ipath_free_qp(&dev->qp_table, qp); in ipath_create_qp() 942 free_qpn(&dev->qp_table, qp->ibqp.qp_num); in ipath_create_qp() 985 ipath_free_qp(&dev->qp_table, qp); in ipath_destroy_qp() 1000 free_qpn(&dev->qp_table, qp->ibqp.qp_num); in ipath_destroy_qp() 1027 idev->qp_table.last = 1; /* QPN 0 and 1 are special. */ in ipath_init_qp_table() 1028 idev->qp_table.max = size; in ipath_init_qp_table() 1029 idev->qp_table.nmaps = 1; in ipath_init_qp_table() 1030 idev->qp_table.table = kzalloc(size * sizeof(*idev->qp_table.table), in ipath_init_qp_table() 1032 if (idev->qp_table.table == NULL) { in ipath_init_qp_table() [all …]
|
D | ipath_verbs.c | 652 qp = ipath_lookup_qpn(&dev->qp_table, qp_num); in ipath_ib_rcv() 2024 spin_lock_init(&idev->qp_table.lock); in ipath_register_ib_device() 2203 kfree(idev->qp_table.table); in ipath_register_ib_device() 2239 qps_inuse = ipath_free_all_qps(&dev->qp_table); in ipath_unregister_ib_device() 2243 kfree(dev->qp_table.table); in ipath_unregister_ib_device()
|
D | ipath_ud.c | 68 qp = ipath_lookup_qpn(&dev->qp_table, swqe->wr.wr.ud.remote_qpn); in ipath_ud_loopback()
|
D | ipath_verbs.h | 549 struct ipath_qp_table qp_table; member
|
D | ipath_ruc.c | 276 qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn); in ipath_ruc_loopback()
|
/linux-4.1.27/drivers/infiniband/hw/amso1100/ |
D | c2_qp.c | 386 spin_lock_irq(&c2dev->qp_table.lock); in c2_alloc_qpn() 388 ret = idr_alloc_cyclic(&c2dev->qp_table.idr, qp, 0, 0, GFP_NOWAIT); in c2_alloc_qpn() 392 spin_unlock_irq(&c2dev->qp_table.lock); in c2_alloc_qpn() 399 spin_lock_irq(&c2dev->qp_table.lock); in c2_free_qpn() 400 idr_remove(&c2dev->qp_table.idr, qpn); in c2_free_qpn() 401 spin_unlock_irq(&c2dev->qp_table.lock); in c2_free_qpn() 409 spin_lock_irqsave(&c2dev->qp_table.lock, flags); in c2_find_qpn() 410 qp = idr_find(&c2dev->qp_table.idr, qpn); in c2_find_qpn() 411 spin_unlock_irqrestore(&c2dev->qp_table.lock, flags); in c2_find_qpn() 1017 spin_lock_init(&c2dev->qp_table.lock); in c2_init_qp_table() [all …]
|
D | c2.h | 313 struct c2_qp_table qp_table; member
|
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx5/core/ |
D | qp.c | 45 struct mlx5_qp_table *table = &dev->priv.qp_table; in mlx5_get_rsc() 185 struct mlx5_qp_table *table = &dev->priv.qp_table; in mlx5_core_create_qp() 246 struct mlx5_qp_table *table = &dev->priv.qp_table; in mlx5_core_destroy_qp() 339 struct mlx5_qp_table *table = &dev->priv.qp_table; in mlx5_init_qp_table()
|
/linux-4.1.27/drivers/infiniband/hw/qib/ |
D | qib_qp.c | 237 qp->next = dev->qp_table[n]; in insert_qp() 238 rcu_assign_pointer(dev->qp_table[n], qp); in insert_qp() 268 qpp = &dev->qp_table[n]; in remove_qp() 317 qp = rcu_dereference_protected(dev->qp_table[n], in qib_free_all_qps() 319 RCU_INIT_POINTER(dev->qp_table[n], NULL); in qib_free_all_qps() 355 for (qp = rcu_dereference(dev->qp_table[n]); qp; in qib_lookup_qpn() 1350 qp = rcu_dereference(dev->qp_table[n]); in qib_qp_iter_next()
|
D | qib_verbs.c | 2059 dev->qp_table = kmalloc_array( in qib_register_ib_device() 2061 sizeof(*dev->qp_table), in qib_register_ib_device() 2063 if (!dev->qp_table) { in qib_register_ib_device() 2068 RCU_INIT_POINTER(dev->qp_table[i], NULL); in qib_register_ib_device() 2277 kfree(dev->qp_table); in qib_register_ib_device() 2330 kfree(dev->qp_table); in qib_unregister_ib_device()
|
D | qib_verbs.h | 762 struct qib_qp __rcu **qp_table; member
|
/linux-4.1.27/include/linux/mlx5/ |
D | qp.h | 575 return radix_tree_lookup(&dev->priv.qp_table.tree, qpn); in __mlx5_qp_lookup()
|
D | driver.h | 488 struct mlx5_qp_table qp_table; member
|
/linux-4.1.27/drivers/infiniband/hw/nes/ |
D | nes.c | 301 nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = NULL; in nes_cqp_rem_ref_callback() 371 return &nesadapter->qp_table[qpn - NES_FIRST_QPN]->ibqp; in nes_get_qp()
|
D | nes_hw.h | 1062 struct nes_qp **qp_table; member
|
D | nes_hw.c | 445 …nesadapter->qp_table = (struct nes_qp **)(&nesadapter->allocated_arps[BITS_TO_LONGS(arp_table_size… in nes_init_adapter() 3388 context = (unsigned long)nesadapter->qp_table[le32_to_cpu( in nes_terminate_connection() 3562 context = (unsigned long)nesadapter->qp_table[le32_to_cpu( in nes_process_iwarp_aeqe()
|
D | nes_mgt.c | 801 context = (unsigned long)nesadapter->qp_table[qp_id - NES_FIRST_QPN]; in nes_mgt_ce_handler()
|
D | nes_verbs.c | 1418 nesdev->nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = nesqp; in nes_create_qp()
|