/linux-4.4.14/drivers/net/ethernet/mellanox/mlx4/ |
D | qp.c | 51 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; in mlx4_qp_event() local 54 spin_lock(&qp_table->lock); in mlx4_qp_event() 60 spin_unlock(&qp_table->lock); in mlx4_qp_event() 220 struct mlx4_qp_table *qp_table = &priv->qp_table; in __mlx4_qp_reserve_range() local 233 *base = mlx4_zone_alloc_entries(qp_table->zones, uid, cnt, align, in __mlx4_qp_reserve_range() 271 struct mlx4_qp_table *qp_table = &priv->qp_table; in __mlx4_qp_release_range() local 275 mlx4_zone_free_entries_unique(qp_table->zones, base_qpn, cnt); in __mlx4_qp_release_range() 301 struct mlx4_qp_table *qp_table = &priv->qp_table; in __mlx4_qp_alloc_icm() local 304 err = mlx4_table_get(dev, &qp_table->qp_table, qpn, gfp); in __mlx4_qp_alloc_icm() 308 err = mlx4_table_get(dev, &qp_table->auxc_table, qpn, gfp); in __mlx4_qp_alloc_icm() [all …]
|
D | profile.c | 188 for (priv->qp_table.rdmarc_shift = 0; in mlx4_make_profile() 189 request->num_qp << priv->qp_table.rdmarc_shift < profile[i].num; in mlx4_make_profile() 190 ++priv->qp_table.rdmarc_shift) in mlx4_make_profile() 192 dev->caps.max_qp_dest_rdma = 1 << priv->qp_table.rdmarc_shift; in mlx4_make_profile() 193 priv->qp_table.rdmarc_base = (u32) profile[i].start; in mlx4_make_profile() 195 init_hca->log_rd_per_qp = priv->qp_table.rdmarc_shift; in mlx4_make_profile()
|
D | main.c | 1350 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table, in mlx4_init_cmpt_table() 1396 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); in mlx4_init_cmpt_table() 1480 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table, in mlx4_init_icm() 1491 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table, in mlx4_init_icm() 1502 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table, in mlx4_init_icm() 1513 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table, in mlx4_init_icm() 1515 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, in mlx4_init_icm() 1571 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); in mlx4_init_icm() 1574 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); in mlx4_init_icm() 1577 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); in mlx4_init_icm() [all …]
|
D | mlx4.h | 714 struct mlx4_icm_table qp_table; member 879 struct mlx4_qp_table qp_table; member
|
/linux-4.4.14/drivers/infiniband/hw/mthca/ |
D | mthca_qp.c | 197 return qp->qpn >= dev->qp_table.sqp_start && in is_sqp() 198 qp->qpn <= dev->qp_table.sqp_start + 3; in is_sqp() 203 return qp->qpn >= dev->qp_table.sqp_start && in is_qp0() 204 qp->qpn <= dev->qp_table.sqp_start + 1; in is_qp0() 243 spin_lock(&dev->qp_table.lock); in mthca_qp_event() 244 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); in mthca_qp_event() 247 spin_unlock(&dev->qp_table.lock); in mthca_qp_event() 264 spin_lock(&dev->qp_table.lock); in mthca_qp_event() 267 spin_unlock(&dev->qp_table.lock); in mthca_qp_event() 754 cpu_to_be32(dev->qp_table.rdb_base + in __mthca_modify_qp() [all …]
|
D | mthca_main.c | 444 mdev->qp_table.qp_table = mthca_alloc_icm_table(mdev, init_hca->qpc_base, in mthca_init_icm() 449 if (!mdev->qp_table.qp_table) { in mthca_init_icm() 455 mdev->qp_table.eqp_table = mthca_alloc_icm_table(mdev, init_hca->eqpc_base, in mthca_init_icm() 460 if (!mdev->qp_table.eqp_table) { in mthca_init_icm() 466 mdev->qp_table.rdb_table = mthca_alloc_icm_table(mdev, init_hca->rdb_base, in mthca_init_icm() 469 mdev->qp_table.rdb_shift, 0, in mthca_init_icm() 471 if (!mdev->qp_table.rdb_table) { in mthca_init_icm() 531 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); in mthca_init_icm() 534 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); in mthca_init_icm() 537 mthca_free_icm_table(mdev, mdev->qp_table.qp_table); in mthca_init_icm() [all …]
|
D | mthca_profile.c | 206 for (dev->qp_table.rdb_shift = 0; in mthca_make_profile() 207 request->num_qp << dev->qp_table.rdb_shift < profile[i].num; in mthca_make_profile() 208 ++dev->qp_table.rdb_shift) in mthca_make_profile() 210 dev->qp_table.rdb_base = (u32) profile[i].start; in mthca_make_profile()
|
D | mthca_dev.h | 260 struct mthca_icm_table *qp_table; member 345 struct mthca_qp_table qp_table; member
|
D | mthca_cq.c | 525 *cur_qp = mthca_array_get(&dev->qp_table.qp, in mthca_poll_one()
|
D | mthca_provider.c | 105 props->max_qp_rd_atom = 1 << mdev->qp_table.rdb_shift; in mthca_query_device()
|
/linux-4.4.14/drivers/staging/rdma/amso1100/ |
D | c2_qp.c | 386 spin_lock_irq(&c2dev->qp_table.lock); in c2_alloc_qpn() 388 ret = idr_alloc_cyclic(&c2dev->qp_table.idr, qp, 0, 0, GFP_NOWAIT); in c2_alloc_qpn() 392 spin_unlock_irq(&c2dev->qp_table.lock); in c2_alloc_qpn() 399 spin_lock_irq(&c2dev->qp_table.lock); in c2_free_qpn() 400 idr_remove(&c2dev->qp_table.idr, qpn); in c2_free_qpn() 401 spin_unlock_irq(&c2dev->qp_table.lock); in c2_free_qpn() 409 spin_lock_irqsave(&c2dev->qp_table.lock, flags); in c2_find_qpn() 410 qp = idr_find(&c2dev->qp_table.idr, qpn); in c2_find_qpn() 411 spin_unlock_irqrestore(&c2dev->qp_table.lock, flags); in c2_find_qpn() 1017 spin_lock_init(&c2dev->qp_table.lock); in c2_init_qp_table() [all …]
|
D | c2.h | 313 struct c2_qp_table qp_table; member
|
/linux-4.4.14/drivers/staging/rdma/ipath/ |
D | ipath_qp.c | 860 err = ipath_alloc_qpn(&dev->qp_table, qp, in ipath_create_qp() 940 ipath_free_qp(&dev->qp_table, qp); in ipath_create_qp() 941 free_qpn(&dev->qp_table, qp->ibqp.qp_num); in ipath_create_qp() 984 ipath_free_qp(&dev->qp_table, qp); in ipath_destroy_qp() 999 free_qpn(&dev->qp_table, qp->ibqp.qp_num); in ipath_destroy_qp() 1026 idev->qp_table.last = 1; /* QPN 0 and 1 are special. */ in ipath_init_qp_table() 1027 idev->qp_table.max = size; in ipath_init_qp_table() 1028 idev->qp_table.nmaps = 1; in ipath_init_qp_table() 1029 idev->qp_table.table = kcalloc(size, sizeof(*idev->qp_table.table), in ipath_init_qp_table() 1031 if (idev->qp_table.table == NULL) { in ipath_init_qp_table() [all …]
|
D | ipath_verbs.c | 665 qp = ipath_lookup_qpn(&dev->qp_table, qp_num); in ipath_ib_rcv() 2058 spin_lock_init(&idev->qp_table.lock); in ipath_register_ib_device() 2238 kfree(idev->qp_table.table); in ipath_register_ib_device() 2274 qps_inuse = ipath_free_all_qps(&dev->qp_table); in ipath_unregister_ib_device() 2278 kfree(dev->qp_table.table); in ipath_unregister_ib_device()
|
D | ipath_ud.c | 67 qp = ipath_lookup_qpn(&dev->qp_table, swqe->ud_wr.remote_qpn); in ipath_ud_loopback()
|
D | ipath_ruc.c | 275 qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn); in ipath_ruc_loopback()
|
D | ipath_verbs.h | 555 struct ipath_qp_table qp_table; member
|
/linux-4.4.14/drivers/staging/rdma/hfi1/ |
D | qp.h | 83 struct hfi1_qp __rcu **qp_table; member 112 for (qp = rcu_dereference(dev->qp_dev->qp_table[n]); qp; in hfi1_lookup_qpn()
|
D | qp.c | 245 qp->next = dev->qp_dev->qp_table[n]; in insert_qp() 246 rcu_assign_pointer(dev->qp_dev->qp_table[n], qp); in insert_qp() 277 qpp = &dev->qp_dev->qp_table[n]; in remove_qp() 331 qp = rcu_dereference_protected(dev->qp_dev->qp_table[n], in free_all_qps() 333 RCU_INIT_POINTER(dev->qp_dev->qp_table[n], NULL); in free_all_qps() 1475 dev->qp_dev->qp_table = in hfi1_qp_init() 1477 sizeof(*dev->qp_dev->qp_table), in hfi1_qp_init() 1479 if (!dev->qp_dev->qp_table) in hfi1_qp_init() 1482 RCU_INIT_POINTER(dev->qp_dev->qp_table[i], NULL); in hfi1_qp_init() 1491 kfree(dev->qp_dev->qp_table); in hfi1_qp_init() [all …]
|
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx5/core/ |
D | qp.c | 45 struct mlx5_qp_table *table = &dev->priv.qp_table; in mlx5_get_rsc() 185 struct mlx5_qp_table *table = &dev->priv.qp_table; in mlx5_core_create_qp() 253 struct mlx5_qp_table *table = &dev->priv.qp_table; in mlx5_core_destroy_qp() 346 struct mlx5_qp_table *table = &dev->priv.qp_table; in mlx5_init_qp_table()
|
/linux-4.4.14/drivers/infiniband/hw/qib/ |
D | qib_qp.c | 237 qp->next = dev->qp_table[n]; in insert_qp() 238 rcu_assign_pointer(dev->qp_table[n], qp); in insert_qp() 268 qpp = &dev->qp_table[n]; in remove_qp() 317 qp = rcu_dereference_protected(dev->qp_table[n], in qib_free_all_qps() 319 RCU_INIT_POINTER(dev->qp_table[n], NULL); in qib_free_all_qps() 355 for (qp = rcu_dereference(dev->qp_table[n]); qp; in qib_lookup_qpn() 1350 qp = rcu_dereference(dev->qp_table[n]); in qib_qp_iter_next()
|
D | qib_verbs.c | 2096 dev->qp_table = kmalloc_array( in qib_register_ib_device() 2098 sizeof(*dev->qp_table), in qib_register_ib_device() 2100 if (!dev->qp_table) { in qib_register_ib_device() 2105 RCU_INIT_POINTER(dev->qp_table[i], NULL); in qib_register_ib_device() 2314 kfree(dev->qp_table); in qib_register_ib_device() 2367 kfree(dev->qp_table); in qib_unregister_ib_device()
|
D | qib_verbs.h | 770 struct qib_qp __rcu **qp_table; member
|
/linux-4.4.14/include/linux/mlx5/ |
D | qp.h | 600 return radix_tree_lookup(&dev->priv.qp_table.tree, qpn); in __mlx5_qp_lookup()
|
D | driver.h | 456 struct mlx5_qp_table qp_table; member
|
/linux-4.4.14/drivers/infiniband/hw/nes/ |
D | nes.c | 301 nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = NULL; in nes_cqp_rem_ref_callback() 371 return &nesadapter->qp_table[qpn - NES_FIRST_QPN]->ibqp; in nes_get_qp()
|
D | nes_hw.h | 1062 struct nes_qp **qp_table; member
|
D | nes_hw.c | 445 …nesadapter->qp_table = (struct nes_qp **)(&nesadapter->allocated_arps[BITS_TO_LONGS(arp_table_size… in nes_init_adapter() 3388 context = (unsigned long)nesadapter->qp_table[le32_to_cpu( in nes_terminate_connection() 3562 context = (unsigned long)nesadapter->qp_table[le32_to_cpu( in nes_process_iwarp_aeqe()
|
D | nes_mgt.c | 801 context = (unsigned long)nesadapter->qp_table[qp_id - NES_FIRST_QPN]; in nes_mgt_ce_handler()
|
D | nes_verbs.c | 1398 nesdev->nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = nesqp; in nes_create_qp()
|