root/drivers/infiniband/hw/mlx5/ib_rep.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mlx5_ib_set_vport_rep
  2. mlx5_ib_vport_rep_load
  3. mlx5_ib_vport_rep_unload
  4. mlx5_ib_vport_get_proto_dev
  5. mlx5_ib_register_vport_reps
  6. mlx5_ib_unregister_vport_reps
  7. mlx5_ib_eswitch_mode
  8. mlx5_ib_get_rep_ibdev
  9. mlx5_ib_get_rep_netdev
  10. mlx5_ib_get_uplink_ibdev
  11. mlx5_ib_vport_rep
  12. create_flow_rule_vport_sq

   1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
   2 /*
   3  * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
   4  */
   5 
   6 #include <linux/mlx5/vport.h>
   7 #include "ib_rep.h"
   8 #include "srq.h"
   9 
  10 static int
  11 mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
  12 {
  13         struct mlx5_ib_dev *ibdev;
  14         int vport_index;
  15 
  16         ibdev = mlx5_ib_get_uplink_ibdev(dev->priv.eswitch);
  17         vport_index = rep->vport_index;
  18 
  19         ibdev->port[vport_index].rep = rep;
  20         rep->rep_data[REP_IB].priv = ibdev;
  21         write_lock(&ibdev->port[vport_index].roce.netdev_lock);
  22         ibdev->port[vport_index].roce.netdev =
  23                 mlx5_ib_get_rep_netdev(dev->priv.eswitch, rep->vport);
  24         write_unlock(&ibdev->port[vport_index].roce.netdev_lock);
  25 
  26         return 0;
  27 }
  28 
  29 static int
  30 mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
  31 {
  32         int num_ports = mlx5_eswitch_get_total_vports(dev);
  33         const struct mlx5_ib_profile *profile;
  34         struct mlx5_ib_dev *ibdev;
  35         int vport_index;
  36 
  37         if (rep->vport == MLX5_VPORT_UPLINK)
  38                 profile = &uplink_rep_profile;
  39         else
  40                 return mlx5_ib_set_vport_rep(dev, rep);
  41 
  42         ibdev = ib_alloc_device(mlx5_ib_dev, ib_dev);
  43         if (!ibdev)
  44                 return -ENOMEM;
  45 
  46         ibdev->port = kcalloc(num_ports, sizeof(*ibdev->port),
  47                               GFP_KERNEL);
  48         if (!ibdev->port) {
  49                 ib_dealloc_device(&ibdev->ib_dev);
  50                 return -ENOMEM;
  51         }
  52 
  53         ibdev->is_rep = true;
  54         vport_index = rep->vport_index;
  55         ibdev->port[vport_index].rep = rep;
  56         ibdev->port[vport_index].roce.netdev =
  57                 mlx5_ib_get_rep_netdev(dev->priv.eswitch, rep->vport);
  58         ibdev->mdev = dev;
  59         ibdev->num_ports = num_ports;
  60 
  61         if (!__mlx5_ib_add(ibdev, profile))
  62                 return -EINVAL;
  63 
  64         rep->rep_data[REP_IB].priv = ibdev;
  65 
  66         return 0;
  67 }
  68 
  69 static void
  70 mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep)
  71 {
  72         struct mlx5_ib_dev *dev = mlx5_ib_rep_to_dev(rep);
  73         struct mlx5_ib_port *port;
  74 
  75         port = &dev->port[rep->vport_index];
  76         write_lock(&port->roce.netdev_lock);
  77         port->roce.netdev = NULL;
  78         write_unlock(&port->roce.netdev_lock);
  79         rep->rep_data[REP_IB].priv = NULL;
  80         port->rep = NULL;
  81 
  82         if (rep->vport == MLX5_VPORT_UPLINK)
  83                 __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
  84 }
  85 
  86 static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep)
  87 {
  88         return mlx5_ib_rep_to_dev(rep);
  89 }
  90 
  91 static const struct mlx5_eswitch_rep_ops rep_ops = {
  92         .load = mlx5_ib_vport_rep_load,
  93         .unload = mlx5_ib_vport_rep_unload,
  94         .get_proto_dev = mlx5_ib_vport_get_proto_dev,
  95 };
  96 
  97 void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev)
  98 {
  99         struct mlx5_eswitch *esw = mdev->priv.eswitch;
 100 
 101         mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_IB);
 102 }
 103 
 104 void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev)
 105 {
 106         struct mlx5_eswitch *esw = mdev->priv.eswitch;
 107 
 108         mlx5_eswitch_unregister_vport_reps(esw, REP_IB);
 109 }
 110 
 111 u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw)
 112 {
 113         return mlx5_eswitch_mode(esw);
 114 }
 115 
 116 struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw,
 117                                           u16 vport_num)
 118 {
 119         return mlx5_eswitch_get_proto_dev(esw, vport_num, REP_IB);
 120 }
 121 
 122 struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
 123                                           u16 vport_num)
 124 {
 125         return mlx5_eswitch_get_proto_dev(esw, vport_num, REP_ETH);
 126 }
 127 
 128 struct mlx5_ib_dev *mlx5_ib_get_uplink_ibdev(struct mlx5_eswitch *esw)
 129 {
 130         return mlx5_eswitch_uplink_get_proto_dev(esw, REP_IB);
 131 }
 132 
 133 struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw,
 134                                            u16 vport_num)
 135 {
 136         return mlx5_eswitch_vport_rep(esw, vport_num);
 137 }
 138 
 139 struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
 140                                                    struct mlx5_ib_sq *sq,
 141                                                    u16 port)
 142 {
 143         struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
 144         struct mlx5_eswitch_rep *rep;
 145 
 146         if (!dev->is_rep || !port)
 147                 return NULL;
 148 
 149         if (!dev->port[port - 1].rep)
 150                 return ERR_PTR(-EINVAL);
 151 
 152         rep = dev->port[port - 1].rep;
 153 
 154         return mlx5_eswitch_add_send_to_vport_rule(esw, rep->vport,
 155                                                    sq->base.mqp.qpn);
 156 }

/* [<][>][^][v][top][bottom][index][help] */