/linux-4.4.14/drivers/infiniband/hw/mthca/ |
D | mthca_eq.c | 397 if (dev->eq_table.clr_mask) in mthca_tavor_interrupt() 398 writel(dev->eq_table.clr_mask, dev->eq_table.clr_int); in mthca_tavor_interrupt() 408 if (ecr & dev->eq_table.eq[i].eqn_mask) { in mthca_tavor_interrupt() 409 if (mthca_eq_int(dev, &dev->eq_table.eq[i])) in mthca_tavor_interrupt() 410 tavor_set_eq_ci(dev, &dev->eq_table.eq[i], in mthca_tavor_interrupt() 411 dev->eq_table.eq[i].cons_index); in mthca_tavor_interrupt() 412 tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn); in mthca_tavor_interrupt() 437 if (dev->eq_table.clr_mask) in mthca_arbel_interrupt() 438 writel(dev->eq_table.clr_mask, dev->eq_table.clr_int); in mthca_arbel_interrupt() 441 if (mthca_eq_int(dev, &dev->eq_table.eq[i])) { in mthca_arbel_interrupt() [all …]
|
D | mthca_main.c | 683 mdev->eq_table.inta_pin = adapter.inta_pin; in mthca_init_hca() 763 dev->eq_table.eq[MTHCA_EQ_CMD].msi_x_vector); in mthca_setup_hca() 865 mdev->eq_table.eq[MTHCA_EQ_COMP ].msi_x_vector = entries[0].vector; in mthca_enable_msi_x() 866 mdev->eq_table.eq[MTHCA_EQ_ASYNC].msi_x_vector = entries[1].vector; in mthca_enable_msi_x() 867 mdev->eq_table.eq[MTHCA_EQ_CMD ].msi_x_vector = entries[2].vector; in mthca_enable_msi_x()
|
D | mthca_cq.c | 839 cq_context->error_eqn = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn); in mthca_init_cq() 840 cq_context->comp_eqn = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_COMP].eqn); in mthca_init_cq() 940 synchronize_irq(dev->eq_table.eq[MTHCA_EQ_COMP].msi_x_vector); in mthca_free_cq()
|
D | mthca_dev.h | 342 struct mthca_eq_table eq_table; member
|
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx4/ |
D | eq.c | 229 struct mlx4_eq *eq = &priv->eq_table.eq[vec]; in mlx4_set_eq_affinity_hint() 839 writel(priv->eq_table.clr_mask, priv->eq_table.clr_int); in mlx4_interrupt() 842 work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]); in mlx4_interrupt() 925 if (!priv->eq_table.uar_map[index]) { in mlx4_get_eq_uar() 926 priv->eq_table.uar_map[index] = in mlx4_get_eq_uar() 930 if (!priv->eq_table.uar_map[index]) { in mlx4_get_eq_uar() 937 return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4); in mlx4_get_eq_uar() 946 if (priv->eq_table.uar_map[i]) { in mlx4_unmap_uar() 947 iounmap(priv->eq_table.uar_map[i]); in mlx4_unmap_uar() 948 priv->eq_table.uar_map[i] = NULL; in mlx4_unmap_uar() [all …]
|
D | cq.c | 322 cq_context->comp_eqn = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn; in mlx4_cq_alloc() 342 &priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].tasklet_ctx; in mlx4_cq_alloc() 346 cq->irq = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].irq; in mlx4_cq_alloc() 371 synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq); in mlx4_cq_free() 372 if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq != in mlx4_cq_free() 373 priv->eq_table.eq[MLX4_EQ_ASYNC].irq) in mlx4_cq_free() 374 synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq); in mlx4_cq_free()
|
D | main.c | 1379 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, in mlx4_init_cmpt_table() 1441 err = mlx4_init_icm_table(dev, &priv->eq_table.table, in mlx4_init_icm() 1589 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); in mlx4_init_icm() 1592 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); in mlx4_init_icm() 1619 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); in mlx4_free_icms() 1620 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); in mlx4_free_icms() 2209 priv->eq_table.inta_pin = adapter.inta_pin; in mlx4_init_hca() 2508 priv->eq_table.eq[MLX4_EQ_ASYNC].irq); in mlx4_setup_hca() 2512 priv->eq_table.eq[MLX4_EQ_ASYNC].irq); in mlx4_setup_hca() 2657 eq = &priv->eq_table.eq[eqn]; in mlx4_init_affinity_hint() [all …]
|
D | mlx4.h | 877 struct mlx4_eq_table eq_table; member
|
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx5/core/ |
D | eq.c | 440 spin_lock_init(&dev->priv.eq_table.lock); in mlx5_eq_init() 455 struct mlx5_eq_table *table = &dev->priv.eq_table; in mlx5_start_eqs() 503 struct mlx5_eq_table *table = &dev->priv.eq_table; in mlx5_stop_eqs()
|
D | main.c | 257 struct mlx5_eq_table *table = &priv->eq_table; in mlx5_enable_msix() 548 for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) { in mlx5_irq_set_affinity_hints() 567 for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) in mlx5_irq_clear_affinity_hints() 574 struct mlx5_eq_table *table = &dev->priv.eq_table; in mlx5_vector2eqn() 595 struct mlx5_eq_table *table = &dev->priv.eq_table; in free_comp_eqs() 613 struct mlx5_eq_table *table = &dev->priv.eq_table; in alloc_comp_eqs()
|
D | en.h | 628 return min_t(int, mdev->priv.eq_table.num_comp_vectors, in mlx5e_get_max_num_channels()
|
/linux-4.4.14/drivers/infiniband/hw/mlx4/ |
D | main.c | 2072 ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors, in mlx4_ib_alloc_eqs() 2073 sizeof(ibdev->eq_table[0]), GFP_KERNEL); in mlx4_ib_alloc_eqs() 2074 if (!ibdev->eq_table) in mlx4_ib_alloc_eqs() 2082 ibdev->eq_table[eq] = total_eqs; in mlx4_ib_alloc_eqs() 2084 &ibdev->eq_table[eq])) in mlx4_ib_alloc_eqs() 2087 ibdev->eq_table[eq] = -1; in mlx4_ib_alloc_eqs() 2092 ibdev->eq_table[i++] = -1) in mlx4_ib_alloc_eqs() 2105 if (!ibdev->eq_table) in mlx4_ib_free_eqs() 2112 mlx4_release_eq(dev, ibdev->eq_table[i]); in mlx4_ib_free_eqs() 2114 kfree(ibdev->eq_table); in mlx4_ib_free_eqs() [all …]
|
D | cq.c | 238 if (dev->eq_table) in mlx4_ib_create_cq() 239 vector = dev->eq_table[vector % ibdev->num_comp_vectors]; in mlx4_ib_create_cq()
|
D | mlx4_ib.h | 563 int *eq_table; member
|
/linux-4.4.14/include/linux/mlx5/ |
D | driver.h | 436 struct mlx5_eq_table eq_table; member
|
/linux-4.4.14/drivers/infiniband/hw/mlx5/ |
D | main.c | 1359 dev->mdev->priv.eq_table.num_comp_vectors; in mlx5_ib_add()
|