root/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mtt_states_str
  2. res_tracker_lookup
  3. res_tracker_insert
  4. resource_str
  5. mlx4_grant_resource
  6. mlx4_release_resource
  7. initialize_res_quotas
  8. mlx4_init_quotas
  9. mlx4_calc_res_counter_guaranteed
  10. mlx4_init_resource_tracker
  11. mlx4_free_resource_tracker
  12. update_pkey_index
  13. update_gid
  14. update_vport_qp_param
  15. mpt_mask
  16. mlx4_resource_type_to_str
  17. find_res
  18. _get_res
  19. mlx4_get_slave_from_resource_id
  20. put_res
  21. handle_existing_counter
  22. handle_unexisting_counter
  23. handle_counter
  24. alloc_qp_tr
  25. alloc_mtt_tr
  26. alloc_mpt_tr
  27. alloc_eq_tr
  28. alloc_cq_tr
  29. alloc_srq_tr
  30. alloc_counter_tr
  31. alloc_xrcdn_tr
  32. alloc_fs_rule_tr
  33. alloc_tr
  34. mlx4_calc_vf_counters
  35. add_res_range
  36. remove_qp_ok
  37. remove_mtt_ok
  38. remove_mpt_ok
  39. remove_eq_ok
  40. remove_counter_ok
  41. remove_xrcdn_ok
  42. remove_fs_rule_ok
  43. remove_cq_ok
  44. remove_srq_ok
  45. remove_ok
  46. rem_res_range
  47. qp_res_start_move_to
  48. mr_res_start_move_to
  49. eq_res_start_move_to
  50. cq_res_start_move_to
  51. srq_res_start_move_to
  52. res_abort_move
  53. res_end_move
  54. valid_reserved
  55. fw_reserved
  56. qp_alloc_res
  57. mtt_alloc_res
  58. mpt_alloc_res
  59. cq_alloc_res
  60. srq_alloc_res
  61. mac_find_smac_ix_in_slave
  62. mac_add_to_slave
  63. mac_del_from_slave
  64. rem_slave_macs
  65. mac_alloc_res
  66. vlan_add_to_slave
  67. vlan_del_from_slave
  68. rem_slave_vlans
  69. vlan_alloc_res
  70. counter_alloc_res
  71. xrcdn_alloc_res
  72. mlx4_ALLOC_RES_wrapper
  73. qp_free_res
  74. mtt_free_res
  75. mpt_free_res
  76. cq_free_res
  77. srq_free_res
  78. mac_free_res
  79. vlan_free_res
  80. counter_free_res
  81. xrcdn_free_res
  82. mlx4_FREE_RES_wrapper
  83. mr_phys_mpt
  84. mr_get_mtt_addr
  85. mr_get_mtt_size
  86. mr_get_pd
  87. mr_is_fmr
  88. mr_is_bind_enabled
  89. mr_is_region
  90. qp_get_mtt_addr
  91. srq_get_mtt_addr
  92. qp_get_mtt_size
  93. check_mtt_range
  94. mlx4_SW2HW_MPT_wrapper
  95. mlx4_HW2SW_MPT_wrapper
  96. mlx4_QUERY_MPT_wrapper
  97. qp_get_rcqn
  98. qp_get_scqn
  99. qp_get_srqn
  100. adjust_proxy_tun_qkey
  101. mlx4_RST2INIT_QP_wrapper
  102. eq_get_mtt_addr
  103. eq_get_mtt_size
  104. cq_get_mtt_addr
  105. cq_get_mtt_size
  106. mlx4_SW2HW_EQ_wrapper
  107. mlx4_CONFIG_DEV_wrapper
  108. get_containing_mtt
  109. verify_qp_parameters
  110. mlx4_WRITE_MTT_wrapper
  111. mlx4_HW2SW_EQ_wrapper
  112. mlx4_GEN_EQE
  113. mlx4_QUERY_EQ_wrapper
  114. mlx4_SW2HW_CQ_wrapper
  115. mlx4_HW2SW_CQ_wrapper
  116. mlx4_QUERY_CQ_wrapper
  117. handle_resize
  118. mlx4_MODIFY_CQ_wrapper
  119. srq_get_mtt_size
  120. mlx4_SW2HW_SRQ_wrapper
  121. mlx4_HW2SW_SRQ_wrapper
  122. mlx4_QUERY_SRQ_wrapper
  123. mlx4_ARM_SRQ_wrapper
  124. mlx4_GEN_QP_wrapper
  125. mlx4_INIT2INIT_QP_wrapper
  126. adjust_qp_sched_queue
  127. roce_verify_mac
  128. mlx4_INIT2RTR_QP_wrapper
  129. mlx4_RTR2RTS_QP_wrapper
  130. mlx4_RTS2RTS_QP_wrapper
  131. mlx4_SQERR2RTS_QP_wrapper
  132. mlx4_SQD2SQD_QP_wrapper
  133. mlx4_SQD2RTS_QP_wrapper
  134. mlx4_2RST_QP_wrapper
  135. find_gid
  136. add_mcg_res
  137. rem_mcg_res
  138. qp_attach
  139. qp_detach
  140. mlx4_adjust_port
  141. mlx4_QP_ATTACH_wrapper
  142. validate_eth_header_mac
  143. add_eth_header
  144. mlx4_UPDATE_QP_wrapper
  145. qp_attach_mbox_size
  146. mlx4_QP_FLOW_STEERING_ATTACH_wrapper
  147. mlx4_undo_mirror_rule
  148. mlx4_QP_FLOW_STEERING_DETACH_wrapper
  149. mlx4_QUERY_IF_STAT_wrapper
  150. detach_qp
  151. _move_all_busy
  152. move_all_busy
  153. rem_slave_qps
  154. rem_slave_srqs
  155. rem_slave_cqs
  156. rem_slave_mrs
  157. rem_slave_mtts
  158. mlx4_do_mirror_rule
  159. mlx4_mirror_fs_rules
  160. mlx4_bond_fs_rules
  161. mlx4_unbond_fs_rules
  162. rem_slave_fs_rule
  163. rem_slave_eqs
  164. rem_slave_counters
  165. rem_slave_xrcdns
  166. mlx4_delete_all_resources_for_slave
  167. update_qos_vpp
  168. mlx4_vf_immed_vlan_work_handler

   1 /*
   2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
   3  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
   4  * All rights reserved.
   5  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
   6  *
   7  * This software is available to you under a choice of one of two
   8  * licenses.  You may choose to be licensed under the terms of the GNU
   9  * General Public License (GPL) Version 2, available from the file
  10  * COPYING in the main directory of this source tree, or the
  11  * OpenIB.org BSD license below:
  12  *
  13  *     Redistribution and use in source and binary forms, with or
  14  *     without modification, are permitted provided that the following
  15  *     conditions are met:
  16  *
  17  *      - Redistributions of source code must retain the above
  18  *        copyright notice, this list of conditions and the following
  19  *        disclaimer.
  20  *
  21  *      - Redistributions in binary form must reproduce the above
  22  *        copyright notice, this list of conditions and the following
  23  *        disclaimer in the documentation and/or other materials
  24  *        provided with the distribution.
  25  *
  26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33  * SOFTWARE.
  34  */
  35 
  36 #include <linux/sched.h>
  37 #include <linux/pci.h>
  38 #include <linux/errno.h>
  39 #include <linux/kernel.h>
  40 #include <linux/io.h>
  41 #include <linux/slab.h>
  42 #include <linux/mlx4/cmd.h>
  43 #include <linux/mlx4/qp.h>
  44 #include <linux/if_ether.h>
  45 #include <linux/etherdevice.h>
  46 
  47 #include "mlx4.h"
  48 #include "fw.h"
  49 #include "mlx4_stats.h"
  50 
  51 #define MLX4_MAC_VALID          (1ull << 63)
  52 #define MLX4_PF_COUNTERS_PER_PORT       2
  53 #define MLX4_VF_COUNTERS_PER_PORT       1
  54 
  55 struct mac_res {
  56         struct list_head list;
  57         u64 mac;
  58         int ref_count;
  59         u8 smac_index;
  60         u8 port;
  61 };
  62 
  63 struct vlan_res {
  64         struct list_head list;
  65         u16 vlan;
  66         int ref_count;
  67         int vlan_index;
  68         u8 port;
  69 };
  70 
  71 struct res_common {
  72         struct list_head        list;
  73         struct rb_node          node;
  74         u64                     res_id;
  75         int                     owner;
  76         int                     state;
  77         int                     from_state;
  78         int                     to_state;
  79         int                     removing;
  80         const char              *func_name;
  81 };
  82 
  83 enum {
  84         RES_ANY_BUSY = 1
  85 };
  86 
  87 struct res_gid {
  88         struct list_head        list;
  89         u8                      gid[16];
  90         enum mlx4_protocol      prot;
  91         enum mlx4_steer_type    steer;
  92         u64                     reg_id;
  93 };
  94 
  95 enum res_qp_states {
  96         RES_QP_BUSY = RES_ANY_BUSY,
  97 
  98         /* QP number was allocated */
  99         RES_QP_RESERVED,
 100 
 101         /* ICM memory for QP context was mapped */
 102         RES_QP_MAPPED,
 103 
 104         /* QP is in hw ownership */
 105         RES_QP_HW
 106 };
 107 
 108 struct res_qp {
 109         struct res_common       com;
 110         struct res_mtt         *mtt;
 111         struct res_cq          *rcq;
 112         struct res_cq          *scq;
 113         struct res_srq         *srq;
 114         struct list_head        mcg_list;
 115         spinlock_t              mcg_spl;
 116         int                     local_qpn;
 117         atomic_t                ref_count;
 118         u32                     qpc_flags;
 119         /* saved qp params before VST enforcement in order to restore on VGT */
 120         u8                      sched_queue;
 121         __be32                  param3;
 122         u8                      vlan_control;
 123         u8                      fvl_rx;
 124         u8                      pri_path_fl;
 125         u8                      vlan_index;
 126         u8                      feup;
 127 };
 128 
 129 enum res_mtt_states {
 130         RES_MTT_BUSY = RES_ANY_BUSY,
 131         RES_MTT_ALLOCATED,
 132 };
 133 
 134 static inline const char *mtt_states_str(enum res_mtt_states state)
 135 {
 136         switch (state) {
 137         case RES_MTT_BUSY: return "RES_MTT_BUSY";
 138         case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
 139         default: return "Unknown";
 140         }
 141 }
 142 
 143 struct res_mtt {
 144         struct res_common       com;
 145         int                     order;
 146         atomic_t                ref_count;
 147 };
 148 
 149 enum res_mpt_states {
 150         RES_MPT_BUSY = RES_ANY_BUSY,
 151         RES_MPT_RESERVED,
 152         RES_MPT_MAPPED,
 153         RES_MPT_HW,
 154 };
 155 
 156 struct res_mpt {
 157         struct res_common       com;
 158         struct res_mtt         *mtt;
 159         int                     key;
 160 };
 161 
 162 enum res_eq_states {
 163         RES_EQ_BUSY = RES_ANY_BUSY,
 164         RES_EQ_RESERVED,
 165         RES_EQ_HW,
 166 };
 167 
 168 struct res_eq {
 169         struct res_common       com;
 170         struct res_mtt         *mtt;
 171 };
 172 
 173 enum res_cq_states {
 174         RES_CQ_BUSY = RES_ANY_BUSY,
 175         RES_CQ_ALLOCATED,
 176         RES_CQ_HW,
 177 };
 178 
 179 struct res_cq {
 180         struct res_common       com;
 181         struct res_mtt         *mtt;
 182         atomic_t                ref_count;
 183 };
 184 
 185 enum res_srq_states {
 186         RES_SRQ_BUSY = RES_ANY_BUSY,
 187         RES_SRQ_ALLOCATED,
 188         RES_SRQ_HW,
 189 };
 190 
 191 struct res_srq {
 192         struct res_common       com;
 193         struct res_mtt         *mtt;
 194         struct res_cq          *cq;
 195         atomic_t                ref_count;
 196 };
 197 
 198 enum res_counter_states {
 199         RES_COUNTER_BUSY = RES_ANY_BUSY,
 200         RES_COUNTER_ALLOCATED,
 201 };
 202 
 203 struct res_counter {
 204         struct res_common       com;
 205         int                     port;
 206 };
 207 
 208 enum res_xrcdn_states {
 209         RES_XRCD_BUSY = RES_ANY_BUSY,
 210         RES_XRCD_ALLOCATED,
 211 };
 212 
 213 struct res_xrcdn {
 214         struct res_common       com;
 215         int                     port;
 216 };
 217 
 218 enum res_fs_rule_states {
 219         RES_FS_RULE_BUSY = RES_ANY_BUSY,
 220         RES_FS_RULE_ALLOCATED,
 221 };
 222 
 223 struct res_fs_rule {
 224         struct res_common       com;
 225         int                     qpn;
 226         /* VF DMFS mbox with port flipped */
 227         void                    *mirr_mbox;
 228         /* > 0 --> apply mirror when getting into HA mode      */
 229         /* = 0 --> un-apply mirror when getting out of HA mode */
 230         u32                     mirr_mbox_size;
 231         struct list_head        mirr_list;
 232         u64                     mirr_rule_id;
 233 };
 234 
 235 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
 236 {
 237         struct rb_node *node = root->rb_node;
 238 
 239         while (node) {
 240                 struct res_common *res = rb_entry(node, struct res_common,
 241                                                   node);
 242 
 243                 if (res_id < res->res_id)
 244                         node = node->rb_left;
 245                 else if (res_id > res->res_id)
 246                         node = node->rb_right;
 247                 else
 248                         return res;
 249         }
 250         return NULL;
 251 }
 252 
 253 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
 254 {
 255         struct rb_node **new = &(root->rb_node), *parent = NULL;
 256 
 257         /* Figure out where to put new node */
 258         while (*new) {
 259                 struct res_common *this = rb_entry(*new, struct res_common,
 260                                                    node);
 261 
 262                 parent = *new;
 263                 if (res->res_id < this->res_id)
 264                         new = &((*new)->rb_left);
 265                 else if (res->res_id > this->res_id)
 266                         new = &((*new)->rb_right);
 267                 else
 268                         return -EEXIST;
 269         }
 270 
 271         /* Add new node and rebalance tree. */
 272         rb_link_node(&res->node, parent, new);
 273         rb_insert_color(&res->node, root);
 274 
 275         return 0;
 276 }
 277 
 278 enum qp_transition {
 279         QP_TRANS_INIT2RTR,
 280         QP_TRANS_RTR2RTS,
 281         QP_TRANS_RTS2RTS,
 282         QP_TRANS_SQERR2RTS,
 283         QP_TRANS_SQD2SQD,
 284         QP_TRANS_SQD2RTS
 285 };
 286 
 287 /* For Debug uses */
 288 static const char *resource_str(enum mlx4_resource rt)
 289 {
 290         switch (rt) {
 291         case RES_QP: return "RES_QP";
 292         case RES_CQ: return "RES_CQ";
 293         case RES_SRQ: return "RES_SRQ";
 294         case RES_MPT: return "RES_MPT";
 295         case RES_MTT: return "RES_MTT";
 296         case RES_MAC: return  "RES_MAC";
 297         case RES_VLAN: return  "RES_VLAN";
 298         case RES_EQ: return "RES_EQ";
 299         case RES_COUNTER: return "RES_COUNTER";
 300         case RES_FS_RULE: return "RES_FS_RULE";
 301         case RES_XRCD: return "RES_XRCD";
 302         default: return "Unknown resource type !!!";
 303         };
 304 }
 305 
 306 static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
 307 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
 308                                       enum mlx4_resource res_type, int count,
 309                                       int port)
 310 {
 311         struct mlx4_priv *priv = mlx4_priv(dev);
 312         struct resource_allocator *res_alloc =
 313                 &priv->mfunc.master.res_tracker.res_alloc[res_type];
 314         int err = -EDQUOT;
 315         int allocated, free, reserved, guaranteed, from_free;
 316         int from_rsvd;
 317 
 318         if (slave > dev->persist->num_vfs)
 319                 return -EINVAL;
 320 
 321         spin_lock(&res_alloc->alloc_lock);
 322         allocated = (port > 0) ?
 323                 res_alloc->allocated[(port - 1) *
 324                 (dev->persist->num_vfs + 1) + slave] :
 325                 res_alloc->allocated[slave];
 326         free = (port > 0) ? res_alloc->res_port_free[port - 1] :
 327                 res_alloc->res_free;
 328         reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
 329                 res_alloc->res_reserved;
 330         guaranteed = res_alloc->guaranteed[slave];
 331 
 332         if (allocated + count > res_alloc->quota[slave]) {
 333                 mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
 334                           slave, port, resource_str(res_type), count,
 335                           allocated, res_alloc->quota[slave]);
 336                 goto out;
 337         }
 338 
 339         if (allocated + count <= guaranteed) {
 340                 err = 0;
 341                 from_rsvd = count;
 342         } else {
 343                 /* portion may need to be obtained from free area */
 344                 if (guaranteed - allocated > 0)
 345                         from_free = count - (guaranteed - allocated);
 346                 else
 347                         from_free = count;
 348 
 349                 from_rsvd = count - from_free;
 350 
 351                 if (free - from_free >= reserved)
 352                         err = 0;
 353                 else
 354                         mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
 355                                   slave, port, resource_str(res_type), free,
 356                                   from_free, reserved);
 357         }
 358 
 359         if (!err) {
 360                 /* grant the request */
 361                 if (port > 0) {
 362                         res_alloc->allocated[(port - 1) *
 363                         (dev->persist->num_vfs + 1) + slave] += count;
 364                         res_alloc->res_port_free[port - 1] -= count;
 365                         res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
 366                 } else {
 367                         res_alloc->allocated[slave] += count;
 368                         res_alloc->res_free -= count;
 369                         res_alloc->res_reserved -= from_rsvd;
 370                 }
 371         }
 372 
 373 out:
 374         spin_unlock(&res_alloc->alloc_lock);
 375         return err;
 376 }
 377 
 378 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
 379                                     enum mlx4_resource res_type, int count,
 380                                     int port)
 381 {
 382         struct mlx4_priv *priv = mlx4_priv(dev);
 383         struct resource_allocator *res_alloc =
 384                 &priv->mfunc.master.res_tracker.res_alloc[res_type];
 385         int allocated, guaranteed, from_rsvd;
 386 
 387         if (slave > dev->persist->num_vfs)
 388                 return;
 389 
 390         spin_lock(&res_alloc->alloc_lock);
 391 
 392         allocated = (port > 0) ?
 393                 res_alloc->allocated[(port - 1) *
 394                 (dev->persist->num_vfs + 1) + slave] :
 395                 res_alloc->allocated[slave];
 396         guaranteed = res_alloc->guaranteed[slave];
 397 
 398         if (allocated - count >= guaranteed) {
 399                 from_rsvd = 0;
 400         } else {
 401                 /* portion may need to be returned to reserved area */
 402                 if (allocated - guaranteed > 0)
 403                         from_rsvd = count - (allocated - guaranteed);
 404                 else
 405                         from_rsvd = count;
 406         }
 407 
 408         if (port > 0) {
 409                 res_alloc->allocated[(port - 1) *
 410                 (dev->persist->num_vfs + 1) + slave] -= count;
 411                 res_alloc->res_port_free[port - 1] += count;
 412                 res_alloc->res_port_rsvd[port - 1] += from_rsvd;
 413         } else {
 414                 res_alloc->allocated[slave] -= count;
 415                 res_alloc->res_free += count;
 416                 res_alloc->res_reserved += from_rsvd;
 417         }
 418 
 419         spin_unlock(&res_alloc->alloc_lock);
 420         return;
 421 }
 422 
 423 static inline void initialize_res_quotas(struct mlx4_dev *dev,
 424                                          struct resource_allocator *res_alloc,
 425                                          enum mlx4_resource res_type,
 426                                          int vf, int num_instances)
 427 {
 428         res_alloc->guaranteed[vf] = num_instances /
 429                                     (2 * (dev->persist->num_vfs + 1));
 430         res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
 431         if (vf == mlx4_master_func_num(dev)) {
 432                 res_alloc->res_free = num_instances;
 433                 if (res_type == RES_MTT) {
 434                         /* reserved mtts will be taken out of the PF allocation */
 435                         res_alloc->res_free += dev->caps.reserved_mtts;
 436                         res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
 437                         res_alloc->quota[vf] += dev->caps.reserved_mtts;
 438                 }
 439         }
 440 }
 441 
 442 void mlx4_init_quotas(struct mlx4_dev *dev)
 443 {
 444         struct mlx4_priv *priv = mlx4_priv(dev);
 445         int pf;
 446 
 447         /* quotas for VFs are initialized in mlx4_slave_cap */
 448         if (mlx4_is_slave(dev))
 449                 return;
 450 
 451         if (!mlx4_is_mfunc(dev)) {
 452                 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
 453                         mlx4_num_reserved_sqps(dev);
 454                 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
 455                 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
 456                 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
 457                 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
 458                 return;
 459         }
 460 
 461         pf = mlx4_master_func_num(dev);
 462         dev->quotas.qp =
 463                 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
 464         dev->quotas.cq =
 465                 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
 466         dev->quotas.srq =
 467                 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
 468         dev->quotas.mtt =
 469                 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
 470         dev->quotas.mpt =
 471                 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
 472 }
 473 
 474 static int
 475 mlx4_calc_res_counter_guaranteed(struct mlx4_dev *dev,
 476                                  struct resource_allocator *res_alloc,
 477                                  int vf)
 478 {
 479         struct mlx4_active_ports actv_ports;
 480         int ports, counters_guaranteed;
 481 
 482         /* For master, only allocate according to the number of phys ports */
 483         if (vf == mlx4_master_func_num(dev))
 484                 return MLX4_PF_COUNTERS_PER_PORT * dev->caps.num_ports;
 485 
 486         /* calculate real number of ports for the VF */
 487         actv_ports = mlx4_get_active_ports(dev, vf);
 488         ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
 489         counters_guaranteed = ports * MLX4_VF_COUNTERS_PER_PORT;
 490 
 491         /* If we do not have enough counters for this VF, do not
 492          * allocate any for it. '-1' to reduce the sink counter.
 493          */
 494         if ((res_alloc->res_reserved + counters_guaranteed) >
 495             (dev->caps.max_counters - 1))
 496                 return 0;
 497 
 498         return counters_guaranteed;
 499 }
 500 
 501 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
 502 {
 503         struct mlx4_priv *priv = mlx4_priv(dev);
 504         int i, j;
 505         int t;
 506 
 507         priv->mfunc.master.res_tracker.slave_list =
 508                 kcalloc(dev->num_slaves, sizeof(struct slave_list),
 509                         GFP_KERNEL);
 510         if (!priv->mfunc.master.res_tracker.slave_list)
 511                 return -ENOMEM;
 512 
 513         for (i = 0 ; i < dev->num_slaves; i++) {
 514                 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
 515                         INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
 516                                        slave_list[i].res_list[t]);
 517                 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
 518         }
 519 
 520         mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
 521                  dev->num_slaves);
 522         for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
 523                 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
 524 
 525         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
 526                 struct resource_allocator *res_alloc =
 527                         &priv->mfunc.master.res_tracker.res_alloc[i];
 528                 res_alloc->quota = kmalloc_array(dev->persist->num_vfs + 1,
 529                                                  sizeof(int),
 530                                                  GFP_KERNEL);
 531                 res_alloc->guaranteed = kmalloc_array(dev->persist->num_vfs + 1,
 532                                                       sizeof(int),
 533                                                       GFP_KERNEL);
 534                 if (i == RES_MAC || i == RES_VLAN)
 535                         res_alloc->allocated =
 536                                 kcalloc(MLX4_MAX_PORTS *
 537                                                 (dev->persist->num_vfs + 1),
 538                                         sizeof(int), GFP_KERNEL);
 539                 else
 540                         res_alloc->allocated =
 541                                 kcalloc(dev->persist->num_vfs + 1,
 542                                         sizeof(int), GFP_KERNEL);
 543                 /* Reduce the sink counter */
 544                 if (i == RES_COUNTER)
 545                         res_alloc->res_free = dev->caps.max_counters - 1;
 546 
 547                 if (!res_alloc->quota || !res_alloc->guaranteed ||
 548                     !res_alloc->allocated)
 549                         goto no_mem_err;
 550 
 551                 spin_lock_init(&res_alloc->alloc_lock);
 552                 for (t = 0; t < dev->persist->num_vfs + 1; t++) {
 553                         struct mlx4_active_ports actv_ports =
 554                                 mlx4_get_active_ports(dev, t);
 555                         switch (i) {
 556                         case RES_QP:
 557                                 initialize_res_quotas(dev, res_alloc, RES_QP,
 558                                                       t, dev->caps.num_qps -
 559                                                       dev->caps.reserved_qps -
 560                                                       mlx4_num_reserved_sqps(dev));
 561                                 break;
 562                         case RES_CQ:
 563                                 initialize_res_quotas(dev, res_alloc, RES_CQ,
 564                                                       t, dev->caps.num_cqs -
 565                                                       dev->caps.reserved_cqs);
 566                                 break;
 567                         case RES_SRQ:
 568                                 initialize_res_quotas(dev, res_alloc, RES_SRQ,
 569                                                       t, dev->caps.num_srqs -
 570                                                       dev->caps.reserved_srqs);
 571                                 break;
 572                         case RES_MPT:
 573                                 initialize_res_quotas(dev, res_alloc, RES_MPT,
 574                                                       t, dev->caps.num_mpts -
 575                                                       dev->caps.reserved_mrws);
 576                                 break;
 577                         case RES_MTT:
 578                                 initialize_res_quotas(dev, res_alloc, RES_MTT,
 579                                                       t, dev->caps.num_mtts -
 580                                                       dev->caps.reserved_mtts);
 581                                 break;
 582                         case RES_MAC:
 583                                 if (t == mlx4_master_func_num(dev)) {
 584                                         int max_vfs_pport = 0;
 585                                         /* Calculate the max vfs per port for */
 586                                         /* both ports.                        */
 587                                         for (j = 0; j < dev->caps.num_ports;
 588                                              j++) {
 589                                                 struct mlx4_slaves_pport slaves_pport =
 590                                                         mlx4_phys_to_slaves_pport(dev, j + 1);
 591                                                 unsigned current_slaves =
 592                                                         bitmap_weight(slaves_pport.slaves,
 593                                                                       dev->caps.num_ports) - 1;
 594                                                 if (max_vfs_pport < current_slaves)
 595                                                         max_vfs_pport =
 596                                                                 current_slaves;
 597                                         }
 598                                         res_alloc->quota[t] =
 599                                                 MLX4_MAX_MAC_NUM -
 600                                                 2 * max_vfs_pport;
 601                                         res_alloc->guaranteed[t] = 2;
 602                                         for (j = 0; j < MLX4_MAX_PORTS; j++)
 603                                                 res_alloc->res_port_free[j] =
 604                                                         MLX4_MAX_MAC_NUM;
 605                                 } else {
 606                                         res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
 607                                         res_alloc->guaranteed[t] = 2;
 608                                 }
 609                                 break;
 610                         case RES_VLAN:
 611                                 if (t == mlx4_master_func_num(dev)) {
 612                                         res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
 613                                         res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
 614                                         for (j = 0; j < MLX4_MAX_PORTS; j++)
 615                                                 res_alloc->res_port_free[j] =
 616                                                         res_alloc->quota[t];
 617                                 } else {
 618                                         res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
 619                                         res_alloc->guaranteed[t] = 0;
 620                                 }
 621                                 break;
 622                         case RES_COUNTER:
 623                                 res_alloc->quota[t] = dev->caps.max_counters;
 624                                 res_alloc->guaranteed[t] =
 625                                         mlx4_calc_res_counter_guaranteed(dev, res_alloc, t);
 626                                 break;
 627                         default:
 628                                 break;
 629                         }
 630                         if (i == RES_MAC || i == RES_VLAN) {
 631                                 for (j = 0; j < dev->caps.num_ports; j++)
 632                                         if (test_bit(j, actv_ports.ports))
 633                                                 res_alloc->res_port_rsvd[j] +=
 634                                                         res_alloc->guaranteed[t];
 635                         } else {
 636                                 res_alloc->res_reserved += res_alloc->guaranteed[t];
 637                         }
 638                 }
 639         }
 640         spin_lock_init(&priv->mfunc.master.res_tracker.lock);
 641         return 0;
 642 
 643 no_mem_err:
 644         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
 645                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
 646                 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
 647                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
 648                 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
 649                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
 650                 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
 651         }
 652         return -ENOMEM;
 653 }
 654 
 655 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
 656                                 enum mlx4_res_tracker_free_type type)
 657 {
 658         struct mlx4_priv *priv = mlx4_priv(dev);
 659         int i;
 660 
 661         if (priv->mfunc.master.res_tracker.slave_list) {
 662                 if (type != RES_TR_FREE_STRUCTS_ONLY) {
 663                         for (i = 0; i < dev->num_slaves; i++) {
 664                                 if (type == RES_TR_FREE_ALL ||
 665                                     dev->caps.function != i)
 666                                         mlx4_delete_all_resources_for_slave(dev, i);
 667                         }
 668                         /* free master's vlans */
 669                         i = dev->caps.function;
 670                         mlx4_reset_roce_gids(dev, i);
 671                         mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
 672                         rem_slave_vlans(dev, i);
 673                         mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
 674                 }
 675 
 676                 if (type != RES_TR_FREE_SLAVES_ONLY) {
 677                         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
 678                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
 679                                 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
 680                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
 681                                 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
 682                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
 683                                 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
 684                         }
 685                         kfree(priv->mfunc.master.res_tracker.slave_list);
 686                         priv->mfunc.master.res_tracker.slave_list = NULL;
 687                 }
 688         }
 689 }
 690 
 691 static void update_pkey_index(struct mlx4_dev *dev, int slave,
 692                               struct mlx4_cmd_mailbox *inbox)
 693 {
 694         u8 sched = *(u8 *)(inbox->buf + 64);
 695         u8 orig_index = *(u8 *)(inbox->buf + 35);
 696         u8 new_index;
 697         struct mlx4_priv *priv = mlx4_priv(dev);
 698         int port;
 699 
 700         port = (sched >> 6 & 1) + 1;
 701 
 702         new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
 703         *(u8 *)(inbox->buf + 35) = new_index;
 704 }
 705 
 706 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
 707                        u8 slave)
 708 {
 709         struct mlx4_qp_context  *qp_ctx = inbox->buf + 8;
 710         enum mlx4_qp_optpar     optpar = be32_to_cpu(*(__be32 *) inbox->buf);
 711         u32                     ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
 712         int port;
 713 
 714         if (MLX4_QP_ST_UD == ts) {
 715                 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
 716                 if (mlx4_is_eth(dev, port))
 717                         qp_ctx->pri_path.mgid_index =
 718                                 mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
 719                 else
 720                         qp_ctx->pri_path.mgid_index = slave | 0x80;
 721 
 722         } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
 723                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
 724                         port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
 725                         if (mlx4_is_eth(dev, port)) {
 726                                 qp_ctx->pri_path.mgid_index +=
 727                                         mlx4_get_base_gid_ix(dev, slave, port);
 728                                 qp_ctx->pri_path.mgid_index &= 0x7f;
 729                         } else {
 730                                 qp_ctx->pri_path.mgid_index = slave & 0x7F;
 731                         }
 732                 }
 733                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
 734                         port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
 735                         if (mlx4_is_eth(dev, port)) {
 736                                 qp_ctx->alt_path.mgid_index +=
 737                                         mlx4_get_base_gid_ix(dev, slave, port);
 738                                 qp_ctx->alt_path.mgid_index &= 0x7f;
 739                         } else {
 740                                 qp_ctx->alt_path.mgid_index = slave & 0x7F;
 741                         }
 742                 }
 743         }
 744 }
 745 
 746 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
 747                           u8 slave, int port);
 748 
 749 static int update_vport_qp_param(struct mlx4_dev *dev,
 750                                  struct mlx4_cmd_mailbox *inbox,
 751                                  u8 slave, u32 qpn)
 752 {
 753         struct mlx4_qp_context  *qpc = inbox->buf + 8;
 754         struct mlx4_vport_oper_state *vp_oper;
 755         struct mlx4_priv *priv;
 756         u32 qp_type;
 757         int port, err = 0;
 758 
 759         port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
 760         priv = mlx4_priv(dev);
 761         vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
 762         qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
 763 
 764         err = handle_counter(dev, qpc, slave, port);
 765         if (err)
 766                 goto out;
 767 
 768         if (MLX4_VGT != vp_oper->state.default_vlan) {
 769                 /* the reserved QPs (special, proxy, tunnel)
 770                  * do not operate over vlans
 771                  */
 772                 if (mlx4_is_qp_reserved(dev, qpn))
 773                         return 0;
 774 
 775                 /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
 776                 if (qp_type == MLX4_QP_ST_UD ||
 777                     (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
 778                         if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
 779                                 *(__be32 *)inbox->buf =
 780                                         cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
 781                                         MLX4_QP_OPTPAR_VLAN_STRIPPING);
 782                                 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
 783                         } else {
 784                                 struct mlx4_update_qp_params params = {.flags = 0};
 785 
 786                                 err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
 787                                 if (err)
 788                                         goto out;
 789                         }
 790                 }
 791 
 792                 /* preserve IF_COUNTER flag */
 793                 qpc->pri_path.vlan_control &=
 794                         MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER;
 795                 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
 796                     dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
 797                         qpc->pri_path.vlan_control |=
 798                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
 799                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
 800                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
 801                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
 802                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
 803                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
 804                 } else if (0 != vp_oper->state.default_vlan) {
 805                         if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD)) {
 806                                 /* vst QinQ should block untagged on TX,
 807                                  * but cvlan is in payload and phv is set so
 808                                  * hw see it as untagged. Block tagged instead.
 809                                  */
 810                                 qpc->pri_path.vlan_control |=
 811                                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
 812                                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
 813                                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
 814                                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
 815                         } else { /* vst 802.1Q */
 816                                 qpc->pri_path.vlan_control |=
 817                                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
 818                                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
 819                                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
 820                         }
 821                 } else { /* priority tagged */
 822                         qpc->pri_path.vlan_control |=
 823                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
 824                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
 825                 }
 826 
 827                 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
 828                 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
 829                 qpc->pri_path.fl |= MLX4_FL_ETH_HIDE_CQE_VLAN;
 830                 if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD))
 831                         qpc->pri_path.fl |= MLX4_FL_SV;
 832                 else
 833                         qpc->pri_path.fl |= MLX4_FL_CV;
 834                 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
 835                 qpc->pri_path.sched_queue &= 0xC7;
 836                 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
 837                 qpc->qos_vport = vp_oper->state.qos_vport;
 838         }
 839         if (vp_oper->state.spoofchk) {
 840                 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
 841                 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
 842         }
 843 out:
 844         return err;
 845 }
 846 
 847 static int mpt_mask(struct mlx4_dev *dev)
 848 {
 849         return dev->caps.num_mpts - 1;
 850 }
 851 
 852 static const char *mlx4_resource_type_to_str(enum mlx4_resource t)
 853 {
 854         switch (t) {
 855         case RES_QP:
 856                 return "QP";
 857         case RES_CQ:
 858                 return "CQ";
 859         case RES_SRQ:
 860                 return "SRQ";
 861         case RES_XRCD:
 862                 return "XRCD";
 863         case RES_MPT:
 864                 return "MPT";
 865         case RES_MTT:
 866                 return "MTT";
 867         case RES_MAC:
 868                 return "MAC";
 869         case RES_VLAN:
 870                 return "VLAN";
 871         case RES_COUNTER:
 872                 return "COUNTER";
 873         case RES_FS_RULE:
 874                 return "FS_RULE";
 875         case RES_EQ:
 876                 return "EQ";
 877         default:
 878                 return "INVALID RESOURCE";
 879         }
 880 }
 881 
 882 static void *find_res(struct mlx4_dev *dev, u64 res_id,
 883                       enum mlx4_resource type)
 884 {
 885         struct mlx4_priv *priv = mlx4_priv(dev);
 886 
 887         return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
 888                                   res_id);
 889 }
 890 
 891 static int _get_res(struct mlx4_dev *dev, int slave, u64 res_id,
 892                     enum mlx4_resource type,
 893                     void *res, const char *func_name)
 894 {
 895         struct res_common *r;
 896         int err = 0;
 897 
 898         spin_lock_irq(mlx4_tlock(dev));
 899         r = find_res(dev, res_id, type);
 900         if (!r) {
 901                 err = -ENONET;
 902                 goto exit;
 903         }
 904 
 905         if (r->state == RES_ANY_BUSY) {
 906                 mlx4_warn(dev,
 907                           "%s(%d) trying to get resource %llx of type %s, but it's already taken by %s\n",
 908                           func_name, slave, res_id, mlx4_resource_type_to_str(type),
 909                           r->func_name);
 910                 err = -EBUSY;
 911                 goto exit;
 912         }
 913 
 914         if (r->owner != slave) {
 915                 err = -EPERM;
 916                 goto exit;
 917         }
 918 
 919         r->from_state = r->state;
 920         r->state = RES_ANY_BUSY;
 921         r->func_name = func_name;
 922 
 923         if (res)
 924                 *((struct res_common **)res) = r;
 925 
 926 exit:
 927         spin_unlock_irq(mlx4_tlock(dev));
 928         return err;
 929 }
 930 
 931 #define get_res(dev, slave, res_id, type, res) \
 932         _get_res((dev), (slave), (res_id), (type), (res), __func__)
 933 
 934 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
 935                                     enum mlx4_resource type,
 936                                     u64 res_id, int *slave)
 937 {
 938 
 939         struct res_common *r;
 940         int err = -ENOENT;
 941         int id = res_id;
 942 
 943         if (type == RES_QP)
 944                 id &= 0x7fffff;
 945         spin_lock(mlx4_tlock(dev));
 946 
 947         r = find_res(dev, id, type);
 948         if (r) {
 949                 *slave = r->owner;
 950                 err = 0;
 951         }
 952         spin_unlock(mlx4_tlock(dev));
 953 
 954         return err;
 955 }
 956 
 957 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
 958                     enum mlx4_resource type)
 959 {
 960         struct res_common *r;
 961 
 962         spin_lock_irq(mlx4_tlock(dev));
 963         r = find_res(dev, res_id, type);
 964         if (r) {
 965                 r->state = r->from_state;
 966                 r->func_name = "";
 967         }
 968         spin_unlock_irq(mlx4_tlock(dev));
 969 }
 970 
 971 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
 972                              u64 in_param, u64 *out_param, int port);
 973 
 974 static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port,
 975                                    int counter_index)
 976 {
 977         struct res_common *r;
 978         struct res_counter *counter;
 979         int ret = 0;
 980 
 981         if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
 982                 return ret;
 983 
 984         spin_lock_irq(mlx4_tlock(dev));
 985         r = find_res(dev, counter_index, RES_COUNTER);
 986         if (!r || r->owner != slave) {
 987                 ret = -EINVAL;
 988         } else {
 989                 counter = container_of(r, struct res_counter, com);
 990                 if (!counter->port)
 991                         counter->port = port;
 992         }
 993 
 994         spin_unlock_irq(mlx4_tlock(dev));
 995         return ret;
 996 }
 997 
 998 static int handle_unexisting_counter(struct mlx4_dev *dev,
 999                                      struct mlx4_qp_context *qpc, u8 slave,
1000                                      int port)
1001 {
1002         struct mlx4_priv *priv = mlx4_priv(dev);
1003         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1004         struct res_common *tmp;
1005         struct res_counter *counter;
1006         u64 counter_idx = MLX4_SINK_COUNTER_INDEX(dev);
1007         int err = 0;
1008 
1009         spin_lock_irq(mlx4_tlock(dev));
1010         list_for_each_entry(tmp,
1011                             &tracker->slave_list[slave].res_list[RES_COUNTER],
1012                             list) {
1013                 counter = container_of(tmp, struct res_counter, com);
1014                 if (port == counter->port) {
1015                         qpc->pri_path.counter_index  = counter->com.res_id;
1016                         spin_unlock_irq(mlx4_tlock(dev));
1017                         return 0;
1018                 }
1019         }
1020         spin_unlock_irq(mlx4_tlock(dev));
1021 
1022         /* No existing counter, need to allocate a new counter */
1023         err = counter_alloc_res(dev, slave, RES_OP_RESERVE, 0, 0, &counter_idx,
1024                                 port);
1025         if (err == -ENOENT) {
1026                 err = 0;
1027         } else if (err && err != -ENOSPC) {
1028                 mlx4_err(dev, "%s: failed to create new counter for slave %d err %d\n",
1029                          __func__, slave, err);
1030         } else {
1031                 qpc->pri_path.counter_index = counter_idx;
1032                 mlx4_dbg(dev, "%s: alloc new counter for slave %d index %d\n",
1033                          __func__, slave, qpc->pri_path.counter_index);
1034                 err = 0;
1035         }
1036 
1037         return err;
1038 }
1039 
1040 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
1041                           u8 slave, int port)
1042 {
1043         if (qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX(dev))
1044                 return handle_existing_counter(dev, slave, port,
1045                                                qpc->pri_path.counter_index);
1046 
1047         return handle_unexisting_counter(dev, qpc, slave, port);
1048 }
1049 
1050 static struct res_common *alloc_qp_tr(int id)
1051 {
1052         struct res_qp *ret;
1053 
1054         ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1055         if (!ret)
1056                 return NULL;
1057 
1058         ret->com.res_id = id;
1059         ret->com.state = RES_QP_RESERVED;
1060         ret->local_qpn = id;
1061         INIT_LIST_HEAD(&ret->mcg_list);
1062         spin_lock_init(&ret->mcg_spl);
1063         atomic_set(&ret->ref_count, 0);
1064 
1065         return &ret->com;
1066 }
1067 
1068 static struct res_common *alloc_mtt_tr(int id, int order)
1069 {
1070         struct res_mtt *ret;
1071 
1072         ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1073         if (!ret)
1074                 return NULL;
1075 
1076         ret->com.res_id = id;
1077         ret->order = order;
1078         ret->com.state = RES_MTT_ALLOCATED;
1079         atomic_set(&ret->ref_count, 0);
1080 
1081         return &ret->com;
1082 }
1083 
1084 static struct res_common *alloc_mpt_tr(int id, int key)
1085 {
1086         struct res_mpt *ret;
1087 
1088         ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1089         if (!ret)
1090                 return NULL;
1091 
1092         ret->com.res_id = id;
1093         ret->com.state = RES_MPT_RESERVED;
1094         ret->key = key;
1095 
1096         return &ret->com;
1097 }
1098 
1099 static struct res_common *alloc_eq_tr(int id)
1100 {
1101         struct res_eq *ret;
1102 
1103         ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1104         if (!ret)
1105                 return NULL;
1106 
1107         ret->com.res_id = id;
1108         ret->com.state = RES_EQ_RESERVED;
1109 
1110         return &ret->com;
1111 }
1112 
1113 static struct res_common *alloc_cq_tr(int id)
1114 {
1115         struct res_cq *ret;
1116 
1117         ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1118         if (!ret)
1119                 return NULL;
1120 
1121         ret->com.res_id = id;
1122         ret->com.state = RES_CQ_ALLOCATED;
1123         atomic_set(&ret->ref_count, 0);
1124 
1125         return &ret->com;
1126 }
1127 
1128 static struct res_common *alloc_srq_tr(int id)
1129 {
1130         struct res_srq *ret;
1131 
1132         ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1133         if (!ret)
1134                 return NULL;
1135 
1136         ret->com.res_id = id;
1137         ret->com.state = RES_SRQ_ALLOCATED;
1138         atomic_set(&ret->ref_count, 0);
1139 
1140         return &ret->com;
1141 }
1142 
1143 static struct res_common *alloc_counter_tr(int id, int port)
1144 {
1145         struct res_counter *ret;
1146 
1147         ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1148         if (!ret)
1149                 return NULL;
1150 
1151         ret->com.res_id = id;
1152         ret->com.state = RES_COUNTER_ALLOCATED;
1153         ret->port = port;
1154 
1155         return &ret->com;
1156 }
1157 
1158 static struct res_common *alloc_xrcdn_tr(int id)
1159 {
1160         struct res_xrcdn *ret;
1161 
1162         ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1163         if (!ret)
1164                 return NULL;
1165 
1166         ret->com.res_id = id;
1167         ret->com.state = RES_XRCD_ALLOCATED;
1168 
1169         return &ret->com;
1170 }
1171 
1172 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
1173 {
1174         struct res_fs_rule *ret;
1175 
1176         ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1177         if (!ret)
1178                 return NULL;
1179 
1180         ret->com.res_id = id;
1181         ret->com.state = RES_FS_RULE_ALLOCATED;
1182         ret->qpn = qpn;
1183         return &ret->com;
1184 }
1185 
1186 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
1187                                    int extra)
1188 {
1189         struct res_common *ret;
1190 
1191         switch (type) {
1192         case RES_QP:
1193                 ret = alloc_qp_tr(id);
1194                 break;
1195         case RES_MPT:
1196                 ret = alloc_mpt_tr(id, extra);
1197                 break;
1198         case RES_MTT:
1199                 ret = alloc_mtt_tr(id, extra);
1200                 break;
1201         case RES_EQ:
1202                 ret = alloc_eq_tr(id);
1203                 break;
1204         case RES_CQ:
1205                 ret = alloc_cq_tr(id);
1206                 break;
1207         case RES_SRQ:
1208                 ret = alloc_srq_tr(id);
1209                 break;
1210         case RES_MAC:
1211                 pr_err("implementation missing\n");
1212                 return NULL;
1213         case RES_COUNTER:
1214                 ret = alloc_counter_tr(id, extra);
1215                 break;
1216         case RES_XRCD:
1217                 ret = alloc_xrcdn_tr(id);
1218                 break;
1219         case RES_FS_RULE:
1220                 ret = alloc_fs_rule_tr(id, extra);
1221                 break;
1222         default:
1223                 return NULL;
1224         }
1225         if (ret)
1226                 ret->owner = slave;
1227 
1228         return ret;
1229 }
1230 
1231 int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port,
1232                           struct mlx4_counter *data)
1233 {
1234         struct mlx4_priv *priv = mlx4_priv(dev);
1235         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1236         struct res_common *tmp;
1237         struct res_counter *counter;
1238         int *counters_arr;
1239         int i = 0, err = 0;
1240 
1241         memset(data, 0, sizeof(*data));
1242 
1243         counters_arr = kmalloc_array(dev->caps.max_counters,
1244                                      sizeof(*counters_arr), GFP_KERNEL);
1245         if (!counters_arr)
1246                 return -ENOMEM;
1247 
1248         spin_lock_irq(mlx4_tlock(dev));
1249         list_for_each_entry(tmp,
1250                             &tracker->slave_list[slave].res_list[RES_COUNTER],
1251                             list) {
1252                 counter = container_of(tmp, struct res_counter, com);
1253                 if (counter->port == port) {
1254                         counters_arr[i] = (int)tmp->res_id;
1255                         i++;
1256                 }
1257         }
1258         spin_unlock_irq(mlx4_tlock(dev));
1259         counters_arr[i] = -1;
1260 
1261         i = 0;
1262 
1263         while (counters_arr[i] != -1) {
1264                 err = mlx4_get_counter_stats(dev, counters_arr[i], data,
1265                                              0);
1266                 if (err) {
1267                         memset(data, 0, sizeof(*data));
1268                         goto table_changed;
1269                 }
1270                 i++;
1271         }
1272 
1273 table_changed:
1274         kfree(counters_arr);
1275         return 0;
1276 }
1277 
1278 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1279                          enum mlx4_resource type, int extra)
1280 {
1281         int i;
1282         int err;
1283         struct mlx4_priv *priv = mlx4_priv(dev);
1284         struct res_common **res_arr;
1285         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1286         struct rb_root *root = &tracker->res_tree[type];
1287 
1288         res_arr = kcalloc(count, sizeof(*res_arr), GFP_KERNEL);
1289         if (!res_arr)
1290                 return -ENOMEM;
1291 
1292         for (i = 0; i < count; ++i) {
1293                 res_arr[i] = alloc_tr(base + i, type, slave, extra);
1294                 if (!res_arr[i]) {
1295                         for (--i; i >= 0; --i)
1296                                 kfree(res_arr[i]);
1297 
1298                         kfree(res_arr);
1299                         return -ENOMEM;
1300                 }
1301         }
1302 
1303         spin_lock_irq(mlx4_tlock(dev));
1304         for (i = 0; i < count; ++i) {
1305                 if (find_res(dev, base + i, type)) {
1306                         err = -EEXIST;
1307                         goto undo;
1308                 }
1309                 err = res_tracker_insert(root, res_arr[i]);
1310                 if (err)
1311                         goto undo;
1312                 list_add_tail(&res_arr[i]->list,
1313                               &tracker->slave_list[slave].res_list[type]);
1314         }
1315         spin_unlock_irq(mlx4_tlock(dev));
1316         kfree(res_arr);
1317 
1318         return 0;
1319 
1320 undo:
1321         for (--i; i >= 0; --i) {
1322                 rb_erase(&res_arr[i]->node, root);
1323                 list_del_init(&res_arr[i]->list);
1324         }
1325 
1326         spin_unlock_irq(mlx4_tlock(dev));
1327 
1328         for (i = 0; i < count; ++i)
1329                 kfree(res_arr[i]);
1330 
1331         kfree(res_arr);
1332 
1333         return err;
1334 }
1335 
1336 static int remove_qp_ok(struct res_qp *res)
1337 {
1338         if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1339             !list_empty(&res->mcg_list)) {
1340                 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1341                        res->com.state, atomic_read(&res->ref_count));
1342                 return -EBUSY;
1343         } else if (res->com.state != RES_QP_RESERVED) {
1344                 return -EPERM;
1345         }
1346 
1347         return 0;
1348 }
1349 
1350 static int remove_mtt_ok(struct res_mtt *res, int order)
1351 {
1352         if (res->com.state == RES_MTT_BUSY ||
1353             atomic_read(&res->ref_count)) {
1354                 pr_devel("%s-%d: state %s, ref_count %d\n",
1355                          __func__, __LINE__,
1356                          mtt_states_str(res->com.state),
1357                          atomic_read(&res->ref_count));
1358                 return -EBUSY;
1359         } else if (res->com.state != RES_MTT_ALLOCATED)
1360                 return -EPERM;
1361         else if (res->order != order)
1362                 return -EINVAL;
1363 
1364         return 0;
1365 }
1366 
1367 static int remove_mpt_ok(struct res_mpt *res)
1368 {
1369         if (res->com.state == RES_MPT_BUSY)
1370                 return -EBUSY;
1371         else if (res->com.state != RES_MPT_RESERVED)
1372                 return -EPERM;
1373 
1374         return 0;
1375 }
1376 
1377 static int remove_eq_ok(struct res_eq *res)
1378 {
1379         if (res->com.state == RES_MPT_BUSY)
1380                 return -EBUSY;
1381         else if (res->com.state != RES_MPT_RESERVED)
1382                 return -EPERM;
1383 
1384         return 0;
1385 }
1386 
1387 static int remove_counter_ok(struct res_counter *res)
1388 {
1389         if (res->com.state == RES_COUNTER_BUSY)
1390                 return -EBUSY;
1391         else if (res->com.state != RES_COUNTER_ALLOCATED)
1392                 return -EPERM;
1393 
1394         return 0;
1395 }
1396 
1397 static int remove_xrcdn_ok(struct res_xrcdn *res)
1398 {
1399         if (res->com.state == RES_XRCD_BUSY)
1400                 return -EBUSY;
1401         else if (res->com.state != RES_XRCD_ALLOCATED)
1402                 return -EPERM;
1403 
1404         return 0;
1405 }
1406 
1407 static int remove_fs_rule_ok(struct res_fs_rule *res)
1408 {
1409         if (res->com.state == RES_FS_RULE_BUSY)
1410                 return -EBUSY;
1411         else if (res->com.state != RES_FS_RULE_ALLOCATED)
1412                 return -EPERM;
1413 
1414         return 0;
1415 }
1416 
1417 static int remove_cq_ok(struct res_cq *res)
1418 {
1419         if (res->com.state == RES_CQ_BUSY)
1420                 return -EBUSY;
1421         else if (res->com.state != RES_CQ_ALLOCATED)
1422                 return -EPERM;
1423 
1424         return 0;
1425 }
1426 
1427 static int remove_srq_ok(struct res_srq *res)
1428 {
1429         if (res->com.state == RES_SRQ_BUSY)
1430                 return -EBUSY;
1431         else if (res->com.state != RES_SRQ_ALLOCATED)
1432                 return -EPERM;
1433 
1434         return 0;
1435 }
1436 
1437 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1438 {
1439         switch (type) {
1440         case RES_QP:
1441                 return remove_qp_ok((struct res_qp *)res);
1442         case RES_CQ:
1443                 return remove_cq_ok((struct res_cq *)res);
1444         case RES_SRQ:
1445                 return remove_srq_ok((struct res_srq *)res);
1446         case RES_MPT:
1447                 return remove_mpt_ok((struct res_mpt *)res);
1448         case RES_MTT:
1449                 return remove_mtt_ok((struct res_mtt *)res, extra);
1450         case RES_MAC:
1451                 return -EOPNOTSUPP;
1452         case RES_EQ:
1453                 return remove_eq_ok((struct res_eq *)res);
1454         case RES_COUNTER:
1455                 return remove_counter_ok((struct res_counter *)res);
1456         case RES_XRCD:
1457                 return remove_xrcdn_ok((struct res_xrcdn *)res);
1458         case RES_FS_RULE:
1459                 return remove_fs_rule_ok((struct res_fs_rule *)res);
1460         default:
1461                 return -EINVAL;
1462         }
1463 }
1464 
1465 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1466                          enum mlx4_resource type, int extra)
1467 {
1468         u64 i;
1469         int err;
1470         struct mlx4_priv *priv = mlx4_priv(dev);
1471         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1472         struct res_common *r;
1473 
1474         spin_lock_irq(mlx4_tlock(dev));
1475         for (i = base; i < base + count; ++i) {
1476                 r = res_tracker_lookup(&tracker->res_tree[type], i);
1477                 if (!r) {
1478                         err = -ENOENT;
1479                         goto out;
1480                 }
1481                 if (r->owner != slave) {
1482                         err = -EPERM;
1483                         goto out;
1484                 }
1485                 err = remove_ok(r, type, extra);
1486                 if (err)
1487                         goto out;
1488         }
1489 
1490         for (i = base; i < base + count; ++i) {
1491                 r = res_tracker_lookup(&tracker->res_tree[type], i);
1492                 rb_erase(&r->node, &tracker->res_tree[type]);
1493                 list_del(&r->list);
1494                 kfree(r);
1495         }
1496         err = 0;
1497 
1498 out:
1499         spin_unlock_irq(mlx4_tlock(dev));
1500 
1501         return err;
1502 }
1503 
1504 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1505                                 enum res_qp_states state, struct res_qp **qp,
1506                                 int alloc)
1507 {
1508         struct mlx4_priv *priv = mlx4_priv(dev);
1509         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1510         struct res_qp *r;
1511         int err = 0;
1512 
1513         spin_lock_irq(mlx4_tlock(dev));
1514         r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1515         if (!r)
1516                 err = -ENOENT;
1517         else if (r->com.owner != slave)
1518                 err = -EPERM;
1519         else {
1520                 switch (state) {
1521                 case RES_QP_BUSY:
1522                         mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1523                                  __func__, r->com.res_id);
1524                         err = -EBUSY;
1525                         break;
1526 
1527                 case RES_QP_RESERVED:
1528                         if (r->com.state == RES_QP_MAPPED && !alloc)
1529                                 break;
1530 
1531                         mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1532                         err = -EINVAL;
1533                         break;
1534 
1535                 case RES_QP_MAPPED:
1536                         if ((r->com.state == RES_QP_RESERVED && alloc) ||
1537                             r->com.state == RES_QP_HW)
1538                                 break;
1539                         else {
1540                                 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1541                                           r->com.res_id);
1542                                 err = -EINVAL;
1543                         }
1544 
1545                         break;
1546 
1547                 case RES_QP_HW:
1548                         if (r->com.state != RES_QP_MAPPED)
1549                                 err = -EINVAL;
1550                         break;
1551                 default:
1552                         err = -EINVAL;
1553                 }
1554 
1555                 if (!err) {
1556                         r->com.from_state = r->com.state;
1557                         r->com.to_state = state;
1558                         r->com.state = RES_QP_BUSY;
1559                         if (qp)
1560                                 *qp = r;
1561                 }
1562         }
1563 
1564         spin_unlock_irq(mlx4_tlock(dev));
1565 
1566         return err;
1567 }
1568 
1569 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1570                                 enum res_mpt_states state, struct res_mpt **mpt)
1571 {
1572         struct mlx4_priv *priv = mlx4_priv(dev);
1573         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1574         struct res_mpt *r;
1575         int err = 0;
1576 
1577         spin_lock_irq(mlx4_tlock(dev));
1578         r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1579         if (!r)
1580                 err = -ENOENT;
1581         else if (r->com.owner != slave)
1582                 err = -EPERM;
1583         else {
1584                 switch (state) {
1585                 case RES_MPT_BUSY:
1586                         err = -EINVAL;
1587                         break;
1588 
1589                 case RES_MPT_RESERVED:
1590                         if (r->com.state != RES_MPT_MAPPED)
1591                                 err = -EINVAL;
1592                         break;
1593 
1594                 case RES_MPT_MAPPED:
1595                         if (r->com.state != RES_MPT_RESERVED &&
1596                             r->com.state != RES_MPT_HW)
1597                                 err = -EINVAL;
1598                         break;
1599 
1600                 case RES_MPT_HW:
1601                         if (r->com.state != RES_MPT_MAPPED)
1602                                 err = -EINVAL;
1603                         break;
1604                 default:
1605                         err = -EINVAL;
1606                 }
1607 
1608                 if (!err) {
1609                         r->com.from_state = r->com.state;
1610                         r->com.to_state = state;
1611                         r->com.state = RES_MPT_BUSY;
1612                         if (mpt)
1613                                 *mpt = r;
1614                 }
1615         }
1616 
1617         spin_unlock_irq(mlx4_tlock(dev));
1618 
1619         return err;
1620 }
1621 
1622 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1623                                 enum res_eq_states state, struct res_eq **eq)
1624 {
1625         struct mlx4_priv *priv = mlx4_priv(dev);
1626         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1627         struct res_eq *r;
1628         int err = 0;
1629 
1630         spin_lock_irq(mlx4_tlock(dev));
1631         r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1632         if (!r)
1633                 err = -ENOENT;
1634         else if (r->com.owner != slave)
1635                 err = -EPERM;
1636         else {
1637                 switch (state) {
1638                 case RES_EQ_BUSY:
1639                         err = -EINVAL;
1640                         break;
1641 
1642                 case RES_EQ_RESERVED:
1643                         if (r->com.state != RES_EQ_HW)
1644                                 err = -EINVAL;
1645                         break;
1646 
1647                 case RES_EQ_HW:
1648                         if (r->com.state != RES_EQ_RESERVED)
1649                                 err = -EINVAL;
1650                         break;
1651 
1652                 default:
1653                         err = -EINVAL;
1654                 }
1655 
1656                 if (!err) {
1657                         r->com.from_state = r->com.state;
1658                         r->com.to_state = state;
1659                         r->com.state = RES_EQ_BUSY;
1660                 }
1661         }
1662 
1663         spin_unlock_irq(mlx4_tlock(dev));
1664 
1665         if (!err && eq)
1666                 *eq = r;
1667 
1668         return err;
1669 }
1670 
1671 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1672                                 enum res_cq_states state, struct res_cq **cq)
1673 {
1674         struct mlx4_priv *priv = mlx4_priv(dev);
1675         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1676         struct res_cq *r;
1677         int err;
1678 
1679         spin_lock_irq(mlx4_tlock(dev));
1680         r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1681         if (!r) {
1682                 err = -ENOENT;
1683         } else if (r->com.owner != slave) {
1684                 err = -EPERM;
1685         } else if (state == RES_CQ_ALLOCATED) {
1686                 if (r->com.state != RES_CQ_HW)
1687                         err = -EINVAL;
1688                 else if (atomic_read(&r->ref_count))
1689                         err = -EBUSY;
1690                 else
1691                         err = 0;
1692         } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1693                 err = -EINVAL;
1694         } else {
1695                 err = 0;
1696         }
1697 
1698         if (!err) {
1699                 r->com.from_state = r->com.state;
1700                 r->com.to_state = state;
1701                 r->com.state = RES_CQ_BUSY;
1702                 if (cq)
1703                         *cq = r;
1704         }
1705 
1706         spin_unlock_irq(mlx4_tlock(dev));
1707 
1708         return err;
1709 }
1710 
1711 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1712                                  enum res_srq_states state, struct res_srq **srq)
1713 {
1714         struct mlx4_priv *priv = mlx4_priv(dev);
1715         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1716         struct res_srq *r;
1717         int err = 0;
1718 
1719         spin_lock_irq(mlx4_tlock(dev));
1720         r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1721         if (!r) {
1722                 err = -ENOENT;
1723         } else if (r->com.owner != slave) {
1724                 err = -EPERM;
1725         } else if (state == RES_SRQ_ALLOCATED) {
1726                 if (r->com.state != RES_SRQ_HW)
1727                         err = -EINVAL;
1728                 else if (atomic_read(&r->ref_count))
1729                         err = -EBUSY;
1730         } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1731                 err = -EINVAL;
1732         }
1733 
1734         if (!err) {
1735                 r->com.from_state = r->com.state;
1736                 r->com.to_state = state;
1737                 r->com.state = RES_SRQ_BUSY;
1738                 if (srq)
1739                         *srq = r;
1740         }
1741 
1742         spin_unlock_irq(mlx4_tlock(dev));
1743 
1744         return err;
1745 }
1746 
1747 static void res_abort_move(struct mlx4_dev *dev, int slave,
1748                            enum mlx4_resource type, int id)
1749 {
1750         struct mlx4_priv *priv = mlx4_priv(dev);
1751         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1752         struct res_common *r;
1753 
1754         spin_lock_irq(mlx4_tlock(dev));
1755         r = res_tracker_lookup(&tracker->res_tree[type], id);
1756         if (r && (r->owner == slave))
1757                 r->state = r->from_state;
1758         spin_unlock_irq(mlx4_tlock(dev));
1759 }
1760 
1761 static void res_end_move(struct mlx4_dev *dev, int slave,
1762                          enum mlx4_resource type, int id)
1763 {
1764         struct mlx4_priv *priv = mlx4_priv(dev);
1765         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1766         struct res_common *r;
1767 
1768         spin_lock_irq(mlx4_tlock(dev));
1769         r = res_tracker_lookup(&tracker->res_tree[type], id);
1770         if (r && (r->owner == slave))
1771                 r->state = r->to_state;
1772         spin_unlock_irq(mlx4_tlock(dev));
1773 }
1774 
1775 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1776 {
1777         return mlx4_is_qp_reserved(dev, qpn) &&
1778                 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1779 }
1780 
1781 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1782 {
1783         return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1784 }
1785 
1786 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1787                         u64 in_param, u64 *out_param)
1788 {
1789         int err;
1790         int count;
1791         int align;
1792         int base;
1793         int qpn;
1794         u8 flags;
1795 
1796         switch (op) {
1797         case RES_OP_RESERVE:
1798                 count = get_param_l(&in_param) & 0xffffff;
1799                 /* Turn off all unsupported QP allocation flags that the
1800                  * slave tries to set.
1801                  */
1802                 flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
1803                 align = get_param_h(&in_param);
1804                 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1805                 if (err)
1806                         return err;
1807 
1808                 err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
1809                 if (err) {
1810                         mlx4_release_resource(dev, slave, RES_QP, count, 0);
1811                         return err;
1812                 }
1813 
1814                 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1815                 if (err) {
1816                         mlx4_release_resource(dev, slave, RES_QP, count, 0);
1817                         __mlx4_qp_release_range(dev, base, count);
1818                         return err;
1819                 }
1820                 set_param_l(out_param, base);
1821                 break;
1822         case RES_OP_MAP_ICM:
1823                 qpn = get_param_l(&in_param) & 0x7fffff;
1824                 if (valid_reserved(dev, slave, qpn)) {
1825                         err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1826                         if (err)
1827                                 return err;
1828                 }
1829 
1830                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1831                                            NULL, 1);
1832                 if (err)
1833                         return err;
1834 
1835                 if (!fw_reserved(dev, qpn)) {
1836                         err = __mlx4_qp_alloc_icm(dev, qpn);
1837                         if (err) {
1838                                 res_abort_move(dev, slave, RES_QP, qpn);
1839                                 return err;
1840                         }
1841                 }
1842 
1843                 res_end_move(dev, slave, RES_QP, qpn);
1844                 break;
1845 
1846         default:
1847                 err = -EINVAL;
1848                 break;
1849         }
1850         return err;
1851 }
1852 
1853 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1854                          u64 in_param, u64 *out_param)
1855 {
1856         int err = -EINVAL;
1857         int base;
1858         int order;
1859 
1860         if (op != RES_OP_RESERVE_AND_MAP)
1861                 return err;
1862 
1863         order = get_param_l(&in_param);
1864 
1865         err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1866         if (err)
1867                 return err;
1868 
1869         base = __mlx4_alloc_mtt_range(dev, order);
1870         if (base == -1) {
1871                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1872                 return -ENOMEM;
1873         }
1874 
1875         err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1876         if (err) {
1877                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1878                 __mlx4_free_mtt_range(dev, base, order);
1879         } else {
1880                 set_param_l(out_param, base);
1881         }
1882 
1883         return err;
1884 }
1885 
1886 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1887                          u64 in_param, u64 *out_param)
1888 {
1889         int err = -EINVAL;
1890         int index;
1891         int id;
1892         struct res_mpt *mpt;
1893 
1894         switch (op) {
1895         case RES_OP_RESERVE:
1896                 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1897                 if (err)
1898                         break;
1899 
1900                 index = __mlx4_mpt_reserve(dev);
1901                 if (index == -1) {
1902                         mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1903                         break;
1904                 }
1905                 id = index & mpt_mask(dev);
1906 
1907                 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1908                 if (err) {
1909                         mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1910                         __mlx4_mpt_release(dev, index);
1911                         break;
1912                 }
1913                 set_param_l(out_param, index);
1914                 break;
1915         case RES_OP_MAP_ICM:
1916                 index = get_param_l(&in_param);
1917                 id = index & mpt_mask(dev);
1918                 err = mr_res_start_move_to(dev, slave, id,
1919                                            RES_MPT_MAPPED, &mpt);
1920                 if (err)
1921                         return err;
1922 
1923                 err = __mlx4_mpt_alloc_icm(dev, mpt->key);
1924                 if (err) {
1925                         res_abort_move(dev, slave, RES_MPT, id);
1926                         return err;
1927                 }
1928 
1929                 res_end_move(dev, slave, RES_MPT, id);
1930                 break;
1931         }
1932         return err;
1933 }
1934 
1935 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1936                         u64 in_param, u64 *out_param)
1937 {
1938         int cqn;
1939         int err;
1940 
1941         switch (op) {
1942         case RES_OP_RESERVE_AND_MAP:
1943                 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1944                 if (err)
1945                         break;
1946 
1947                 err = __mlx4_cq_alloc_icm(dev, &cqn);
1948                 if (err) {
1949                         mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1950                         break;
1951                 }
1952 
1953                 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1954                 if (err) {
1955                         mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1956                         __mlx4_cq_free_icm(dev, cqn);
1957                         break;
1958                 }
1959 
1960                 set_param_l(out_param, cqn);
1961                 break;
1962 
1963         default:
1964                 err = -EINVAL;
1965         }
1966 
1967         return err;
1968 }
1969 
1970 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1971                          u64 in_param, u64 *out_param)
1972 {
1973         int srqn;
1974         int err;
1975 
1976         switch (op) {
1977         case RES_OP_RESERVE_AND_MAP:
1978                 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1979                 if (err)
1980                         break;
1981 
1982                 err = __mlx4_srq_alloc_icm(dev, &srqn);
1983                 if (err) {
1984                         mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1985                         break;
1986                 }
1987 
1988                 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1989                 if (err) {
1990                         mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1991                         __mlx4_srq_free_icm(dev, srqn);
1992                         break;
1993                 }
1994 
1995                 set_param_l(out_param, srqn);
1996                 break;
1997 
1998         default:
1999                 err = -EINVAL;
2000         }
2001 
2002         return err;
2003 }
2004 
2005 static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
2006                                      u8 smac_index, u64 *mac)
2007 {
2008         struct mlx4_priv *priv = mlx4_priv(dev);
2009         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2010         struct list_head *mac_list =
2011                 &tracker->slave_list[slave].res_list[RES_MAC];
2012         struct mac_res *res, *tmp;
2013 
2014         list_for_each_entry_safe(res, tmp, mac_list, list) {
2015                 if (res->smac_index == smac_index && res->port == (u8) port) {
2016                         *mac = res->mac;
2017                         return 0;
2018                 }
2019         }
2020         return -ENOENT;
2021 }
2022 
2023 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
2024 {
2025         struct mlx4_priv *priv = mlx4_priv(dev);
2026         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2027         struct list_head *mac_list =
2028                 &tracker->slave_list[slave].res_list[RES_MAC];
2029         struct mac_res *res, *tmp;
2030 
2031         list_for_each_entry_safe(res, tmp, mac_list, list) {
2032                 if (res->mac == mac && res->port == (u8) port) {
2033                         /* mac found. update ref count */
2034                         ++res->ref_count;
2035                         return 0;
2036                 }
2037         }
2038 
2039         if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
2040                 return -EINVAL;
2041         res = kzalloc(sizeof(*res), GFP_KERNEL);
2042         if (!res) {
2043                 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
2044                 return -ENOMEM;
2045         }
2046         res->mac = mac;
2047         res->port = (u8) port;
2048         res->smac_index = smac_index;
2049         res->ref_count = 1;
2050         list_add_tail(&res->list,
2051                       &tracker->slave_list[slave].res_list[RES_MAC]);
2052         return 0;
2053 }
2054 
2055 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
2056                                int port)
2057 {
2058         struct mlx4_priv *priv = mlx4_priv(dev);
2059         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2060         struct list_head *mac_list =
2061                 &tracker->slave_list[slave].res_list[RES_MAC];
2062         struct mac_res *res, *tmp;
2063 
2064         list_for_each_entry_safe(res, tmp, mac_list, list) {
2065                 if (res->mac == mac && res->port == (u8) port) {
2066                         if (!--res->ref_count) {
2067                                 list_del(&res->list);
2068                                 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
2069                                 kfree(res);
2070                         }
2071                         break;
2072                 }
2073         }
2074 }
2075 
2076 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
2077 {
2078         struct mlx4_priv *priv = mlx4_priv(dev);
2079         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2080         struct list_head *mac_list =
2081                 &tracker->slave_list[slave].res_list[RES_MAC];
2082         struct mac_res *res, *tmp;
2083         int i;
2084 
2085         list_for_each_entry_safe(res, tmp, mac_list, list) {
2086                 list_del(&res->list);
2087                 /* dereference the mac the num times the slave referenced it */
2088                 for (i = 0; i < res->ref_count; i++)
2089                         __mlx4_unregister_mac(dev, res->port, res->mac);
2090                 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
2091                 kfree(res);
2092         }
2093 }
2094 
2095 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2096                          u64 in_param, u64 *out_param, int in_port)
2097 {
2098         int err = -EINVAL;
2099         int port;
2100         u64 mac;
2101         u8 smac_index;
2102 
2103         if (op != RES_OP_RESERVE_AND_MAP)
2104                 return err;
2105 
2106         port = !in_port ? get_param_l(out_param) : in_port;
2107         port = mlx4_slave_convert_port(
2108                         dev, slave, port);
2109 
2110         if (port < 0)
2111                 return -EINVAL;
2112         mac = in_param;
2113 
2114         err = __mlx4_register_mac(dev, port, mac);
2115         if (err >= 0) {
2116                 smac_index = err;
2117                 set_param_l(out_param, err);
2118                 err = 0;
2119         }
2120 
2121         if (!err) {
2122                 err = mac_add_to_slave(dev, slave, mac, port, smac_index);
2123                 if (err)
2124                         __mlx4_unregister_mac(dev, port, mac);
2125         }
2126         return err;
2127 }
2128 
2129 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2130                              int port, int vlan_index)
2131 {
2132         struct mlx4_priv *priv = mlx4_priv(dev);
2133         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2134         struct list_head *vlan_list =
2135                 &tracker->slave_list[slave].res_list[RES_VLAN];
2136         struct vlan_res *res, *tmp;
2137 
2138         list_for_each_entry_safe(res, tmp, vlan_list, list) {
2139                 if (res->vlan == vlan && res->port == (u8) port) {
2140                         /* vlan found. update ref count */
2141                         ++res->ref_count;
2142                         return 0;
2143                 }
2144         }
2145 
2146         if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
2147                 return -EINVAL;
2148         res = kzalloc(sizeof(*res), GFP_KERNEL);
2149         if (!res) {
2150                 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
2151                 return -ENOMEM;
2152         }
2153         res->vlan = vlan;
2154         res->port = (u8) port;
2155         res->vlan_index = vlan_index;
2156         res->ref_count = 1;
2157         list_add_tail(&res->list,
2158                       &tracker->slave_list[slave].res_list[RES_VLAN]);
2159         return 0;
2160 }
2161 
2162 
2163 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2164                                 int port)
2165 {
2166         struct mlx4_priv *priv = mlx4_priv(dev);
2167         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2168         struct list_head *vlan_list =
2169                 &tracker->slave_list[slave].res_list[RES_VLAN];
2170         struct vlan_res *res, *tmp;
2171 
2172         list_for_each_entry_safe(res, tmp, vlan_list, list) {
2173                 if (res->vlan == vlan && res->port == (u8) port) {
2174                         if (!--res->ref_count) {
2175                                 list_del(&res->list);
2176                                 mlx4_release_resource(dev, slave, RES_VLAN,
2177                                                       1, port);
2178                                 kfree(res);
2179                         }
2180                         break;
2181                 }
2182         }
2183 }
2184 
2185 static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
2186 {
2187         struct mlx4_priv *priv = mlx4_priv(dev);
2188         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2189         struct list_head *vlan_list =
2190                 &tracker->slave_list[slave].res_list[RES_VLAN];
2191         struct vlan_res *res, *tmp;
2192         int i;
2193 
2194         list_for_each_entry_safe(res, tmp, vlan_list, list) {
2195                 list_del(&res->list);
2196                 /* dereference the vlan the num times the slave referenced it */
2197                 for (i = 0; i < res->ref_count; i++)
2198                         __mlx4_unregister_vlan(dev, res->port, res->vlan);
2199                 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
2200                 kfree(res);
2201         }
2202 }
2203 
2204 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2205                           u64 in_param, u64 *out_param, int in_port)
2206 {
2207         struct mlx4_priv *priv = mlx4_priv(dev);
2208         struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2209         int err;
2210         u16 vlan;
2211         int vlan_index;
2212         int port;
2213 
2214         port = !in_port ? get_param_l(out_param) : in_port;
2215 
2216         if (!port || op != RES_OP_RESERVE_AND_MAP)
2217                 return -EINVAL;
2218 
2219         port = mlx4_slave_convert_port(
2220                         dev, slave, port);
2221 
2222         if (port < 0)
2223                 return -EINVAL;
2224         /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
2225         if (!in_port && port > 0 && port <= dev->caps.num_ports) {
2226                 slave_state[slave].old_vlan_api = true;
2227                 return 0;
2228         }
2229 
2230         vlan = (u16) in_param;
2231 
2232         err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
2233         if (!err) {
2234                 set_param_l(out_param, (u32) vlan_index);
2235                 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
2236                 if (err)
2237                         __mlx4_unregister_vlan(dev, port, vlan);
2238         }
2239         return err;
2240 }
2241 
2242 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2243                              u64 in_param, u64 *out_param, int port)
2244 {
2245         u32 index;
2246         int err;
2247 
2248         if (op != RES_OP_RESERVE)
2249                 return -EINVAL;
2250 
2251         err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
2252         if (err)
2253                 return err;
2254 
2255         err = __mlx4_counter_alloc(dev, &index);
2256         if (err) {
2257                 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2258                 return err;
2259         }
2260 
2261         err = add_res_range(dev, slave, index, 1, RES_COUNTER, port);
2262         if (err) {
2263                 __mlx4_counter_free(dev, index);
2264                 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2265         } else {
2266                 set_param_l(out_param, index);
2267         }
2268 
2269         return err;
2270 }
2271 
2272 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2273                            u64 in_param, u64 *out_param)
2274 {
2275         u32 xrcdn;
2276         int err;
2277 
2278         if (op != RES_OP_RESERVE)
2279                 return -EINVAL;
2280 
2281         err = __mlx4_xrcd_alloc(dev, &xrcdn);
2282         if (err)
2283                 return err;
2284 
2285         err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2286         if (err)
2287                 __mlx4_xrcd_free(dev, xrcdn);
2288         else
2289                 set_param_l(out_param, xrcdn);
2290 
2291         return err;
2292 }
2293 
2294 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2295                            struct mlx4_vhcr *vhcr,
2296                            struct mlx4_cmd_mailbox *inbox,
2297                            struct mlx4_cmd_mailbox *outbox,
2298                            struct mlx4_cmd_info *cmd)
2299 {
2300         int err;
2301         int alop = vhcr->op_modifier;
2302 
2303         switch (vhcr->in_modifier & 0xFF) {
2304         case RES_QP:
2305                 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2306                                    vhcr->in_param, &vhcr->out_param);
2307                 break;
2308 
2309         case RES_MTT:
2310                 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2311                                     vhcr->in_param, &vhcr->out_param);
2312                 break;
2313 
2314         case RES_MPT:
2315                 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2316                                     vhcr->in_param, &vhcr->out_param);
2317                 break;
2318 
2319         case RES_CQ:
2320                 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2321                                    vhcr->in_param, &vhcr->out_param);
2322                 break;
2323 
2324         case RES_SRQ:
2325                 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2326                                     vhcr->in_param, &vhcr->out_param);
2327                 break;
2328 
2329         case RES_MAC:
2330                 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
2331                                     vhcr->in_param, &vhcr->out_param,
2332                                     (vhcr->in_modifier >> 8) & 0xFF);
2333                 break;
2334 
2335         case RES_VLAN:
2336                 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
2337                                      vhcr->in_param, &vhcr->out_param,
2338                                      (vhcr->in_modifier >> 8) & 0xFF);
2339                 break;
2340 
2341         case RES_COUNTER:
2342                 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2343                                         vhcr->in_param, &vhcr->out_param, 0);
2344                 break;
2345 
2346         case RES_XRCD:
2347                 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2348                                       vhcr->in_param, &vhcr->out_param);
2349                 break;
2350 
2351         default:
2352                 err = -EINVAL;
2353                 break;
2354         }
2355 
2356         return err;
2357 }
2358 
2359 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2360                        u64 in_param)
2361 {
2362         int err;
2363         int count;
2364         int base;
2365         int qpn;
2366 
2367         switch (op) {
2368         case RES_OP_RESERVE:
2369                 base = get_param_l(&in_param) & 0x7fffff;
2370                 count = get_param_h(&in_param);
2371                 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2372                 if (err)
2373                         break;
2374                 mlx4_release_resource(dev, slave, RES_QP, count, 0);
2375                 __mlx4_qp_release_range(dev, base, count);
2376                 break;
2377         case RES_OP_MAP_ICM:
2378                 qpn = get_param_l(&in_param) & 0x7fffff;
2379                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2380                                            NULL, 0);
2381                 if (err)
2382                         return err;
2383 
2384                 if (!fw_reserved(dev, qpn))
2385                         __mlx4_qp_free_icm(dev, qpn);
2386 
2387                 res_end_move(dev, slave, RES_QP, qpn);
2388 
2389                 if (valid_reserved(dev, slave, qpn))
2390                         err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2391                 break;
2392         default:
2393                 err = -EINVAL;
2394                 break;
2395         }
2396         return err;
2397 }
2398 
2399 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2400                         u64 in_param, u64 *out_param)
2401 {
2402         int err = -EINVAL;
2403         int base;
2404         int order;
2405 
2406         if (op != RES_OP_RESERVE_AND_MAP)
2407                 return err;
2408 
2409         base = get_param_l(&in_param);
2410         order = get_param_h(&in_param);
2411         err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2412         if (!err) {
2413                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2414                 __mlx4_free_mtt_range(dev, base, order);
2415         }
2416         return err;
2417 }
2418 
2419 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2420                         u64 in_param)
2421 {
2422         int err = -EINVAL;
2423         int index;
2424         int id;
2425         struct res_mpt *mpt;
2426 
2427         switch (op) {
2428         case RES_OP_RESERVE:
2429                 index = get_param_l(&in_param);
2430                 id = index & mpt_mask(dev);
2431                 err = get_res(dev, slave, id, RES_MPT, &mpt);
2432                 if (err)
2433                         break;
2434                 index = mpt->key;
2435                 put_res(dev, slave, id, RES_MPT);
2436 
2437                 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2438                 if (err)
2439                         break;
2440                 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2441                 __mlx4_mpt_release(dev, index);
2442                 break;
2443         case RES_OP_MAP_ICM:
2444                 index = get_param_l(&in_param);
2445                 id = index & mpt_mask(dev);
2446                 err = mr_res_start_move_to(dev, slave, id,
2447                                            RES_MPT_RESERVED, &mpt);
2448                 if (err)
2449                         return err;
2450 
2451                 __mlx4_mpt_free_icm(dev, mpt->key);
2452                 res_end_move(dev, slave, RES_MPT, id);
2453                 break;
2454         default:
2455                 err = -EINVAL;
2456                 break;
2457         }
2458         return err;
2459 }
2460 
2461 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2462                        u64 in_param, u64 *out_param)
2463 {
2464         int cqn;
2465         int err;
2466 
2467         switch (op) {
2468         case RES_OP_RESERVE_AND_MAP:
2469                 cqn = get_param_l(&in_param);
2470                 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2471                 if (err)
2472                         break;
2473 
2474                 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2475                 __mlx4_cq_free_icm(dev, cqn);
2476                 break;
2477 
2478         default:
2479                 err = -EINVAL;
2480                 break;
2481         }
2482 
2483         return err;
2484 }
2485 
2486 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2487                         u64 in_param, u64 *out_param)
2488 {
2489         int srqn;
2490         int err;
2491 
2492         switch (op) {
2493         case RES_OP_RESERVE_AND_MAP:
2494                 srqn = get_param_l(&in_param);
2495                 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2496                 if (err)
2497                         break;
2498 
2499                 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2500                 __mlx4_srq_free_icm(dev, srqn);
2501                 break;
2502 
2503         default:
2504                 err = -EINVAL;
2505                 break;
2506         }
2507 
2508         return err;
2509 }
2510 
2511 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2512                             u64 in_param, u64 *out_param, int in_port)
2513 {
2514         int port;
2515         int err = 0;
2516 
2517         switch (op) {
2518         case RES_OP_RESERVE_AND_MAP:
2519                 port = !in_port ? get_param_l(out_param) : in_port;
2520                 port = mlx4_slave_convert_port(
2521                                 dev, slave, port);
2522 
2523                 if (port < 0)
2524                         return -EINVAL;
2525                 mac_del_from_slave(dev, slave, in_param, port);
2526                 __mlx4_unregister_mac(dev, port, in_param);
2527                 break;
2528         default:
2529                 err = -EINVAL;
2530                 break;
2531         }
2532 
2533         return err;
2534 
2535 }
2536 
2537 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2538                             u64 in_param, u64 *out_param, int port)
2539 {
2540         struct mlx4_priv *priv = mlx4_priv(dev);
2541         struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2542         int err = 0;
2543 
2544         port = mlx4_slave_convert_port(
2545                         dev, slave, port);
2546 
2547         if (port < 0)
2548                 return -EINVAL;
2549         switch (op) {
2550         case RES_OP_RESERVE_AND_MAP:
2551                 if (slave_state[slave].old_vlan_api)
2552                         return 0;
2553                 if (!port)
2554                         return -EINVAL;
2555                 vlan_del_from_slave(dev, slave, in_param, port);
2556                 __mlx4_unregister_vlan(dev, port, in_param);
2557                 break;
2558         default:
2559                 err = -EINVAL;
2560                 break;
2561         }
2562 
2563         return err;
2564 }
2565 
2566 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2567                             u64 in_param, u64 *out_param)
2568 {
2569         int index;
2570         int err;
2571 
2572         if (op != RES_OP_RESERVE)
2573                 return -EINVAL;
2574 
2575         index = get_param_l(&in_param);
2576         if (index == MLX4_SINK_COUNTER_INDEX(dev))
2577                 return 0;
2578 
2579         err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2580         if (err)
2581                 return err;
2582 
2583         __mlx4_counter_free(dev, index);
2584         mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2585 
2586         return err;
2587 }
2588 
2589 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2590                           u64 in_param, u64 *out_param)
2591 {
2592         int xrcdn;
2593         int err;
2594 
2595         if (op != RES_OP_RESERVE)
2596                 return -EINVAL;
2597 
2598         xrcdn = get_param_l(&in_param);
2599         err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2600         if (err)
2601                 return err;
2602 
2603         __mlx4_xrcd_free(dev, xrcdn);
2604 
2605         return err;
2606 }
2607 
2608 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2609                           struct mlx4_vhcr *vhcr,
2610                           struct mlx4_cmd_mailbox *inbox,
2611                           struct mlx4_cmd_mailbox *outbox,
2612                           struct mlx4_cmd_info *cmd)
2613 {
2614         int err = -EINVAL;
2615         int alop = vhcr->op_modifier;
2616 
2617         switch (vhcr->in_modifier & 0xFF) {
2618         case RES_QP:
2619                 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2620                                   vhcr->in_param);
2621                 break;
2622 
2623         case RES_MTT:
2624                 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2625                                    vhcr->in_param, &vhcr->out_param);
2626                 break;
2627 
2628         case RES_MPT:
2629                 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2630                                    vhcr->in_param);
2631                 break;
2632 
2633         case RES_CQ:
2634                 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2635                                   vhcr->in_param, &vhcr->out_param);
2636                 break;
2637 
2638         case RES_SRQ:
2639                 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2640                                    vhcr->in_param, &vhcr->out_param);
2641                 break;
2642 
2643         case RES_MAC:
2644                 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2645                                    vhcr->in_param, &vhcr->out_param,
2646                                    (vhcr->in_modifier >> 8) & 0xFF);
2647                 break;
2648 
2649         case RES_VLAN:
2650                 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2651                                     vhcr->in_param, &vhcr->out_param,
2652                                     (vhcr->in_modifier >> 8) & 0xFF);
2653                 break;
2654 
2655         case RES_COUNTER:
2656                 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2657                                        vhcr->in_param, &vhcr->out_param);
2658                 break;
2659 
2660         case RES_XRCD:
2661                 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2662                                      vhcr->in_param, &vhcr->out_param);
2663 
2664         default:
2665                 break;
2666         }
2667         return err;
2668 }
2669 
2670 /* ugly but other choices are uglier */
2671 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2672 {
2673         return (be32_to_cpu(mpt->flags) >> 9) & 1;
2674 }
2675 
2676 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2677 {
2678         return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2679 }
2680 
2681 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2682 {
2683         return be32_to_cpu(mpt->mtt_sz);
2684 }
2685 
2686 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2687 {
2688         return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2689 }
2690 
2691 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2692 {
2693         return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2694 }
2695 
2696 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2697 {
2698         return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2699 }
2700 
2701 static int mr_is_region(struct mlx4_mpt_entry *mpt)
2702 {
2703         return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2704 }
2705 
2706 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2707 {
2708         return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2709 }
2710 
2711 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2712 {
2713         return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2714 }
2715 
2716 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2717 {
2718         int page_shift = (qpc->log_page_size & 0x3f) + 12;
2719         int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2720         int log_sq_sride = qpc->sq_size_stride & 7;
2721         int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2722         int log_rq_stride = qpc->rq_size_stride & 7;
2723         int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2724         int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2725         u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2726         int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2727         int sq_size;
2728         int rq_size;
2729         int total_pages;
2730         int total_mem;
2731         int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2732         int tot;
2733 
2734         sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2735         rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2736         total_mem = sq_size + rq_size;
2737         tot = (total_mem + (page_offset << 6)) >> page_shift;
2738         total_pages = !tot ? 1 : roundup_pow_of_two(tot);
2739 
2740         return total_pages;
2741 }
2742 
2743 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2744                            int size, struct res_mtt *mtt)
2745 {
2746         int res_start = mtt->com.res_id;
2747         int res_size = (1 << mtt->order);
2748 
2749         if (start < res_start || start + size > res_start + res_size)
2750                 return -EPERM;
2751         return 0;
2752 }
2753 
2754 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2755                            struct mlx4_vhcr *vhcr,
2756                            struct mlx4_cmd_mailbox *inbox,
2757                            struct mlx4_cmd_mailbox *outbox,
2758                            struct mlx4_cmd_info *cmd)
2759 {
2760         int err;
2761         int index = vhcr->in_modifier;
2762         struct res_mtt *mtt;
2763         struct res_mpt *mpt = NULL;
2764         int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2765         int phys;
2766         int id;
2767         u32 pd;
2768         int pd_slave;
2769 
2770         id = index & mpt_mask(dev);
2771         err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2772         if (err)
2773                 return err;
2774 
2775         /* Disable memory windows for VFs. */
2776         if (!mr_is_region(inbox->buf)) {
2777                 err = -EPERM;
2778                 goto ex_abort;
2779         }
2780 
2781         /* Make sure that the PD bits related to the slave id are zeros. */
2782         pd = mr_get_pd(inbox->buf);
2783         pd_slave = (pd >> 17) & 0x7f;
2784         if (pd_slave != 0 && --pd_slave != slave) {
2785                 err = -EPERM;
2786                 goto ex_abort;
2787         }
2788 
2789         if (mr_is_fmr(inbox->buf)) {
2790                 /* FMR and Bind Enable are forbidden in slave devices. */
2791                 if (mr_is_bind_enabled(inbox->buf)) {
2792                         err = -EPERM;
2793                         goto ex_abort;
2794                 }
2795                 /* FMR and Memory Windows are also forbidden. */
2796                 if (!mr_is_region(inbox->buf)) {
2797                         err = -EPERM;
2798                         goto ex_abort;
2799                 }
2800         }
2801 
2802         phys = mr_phys_mpt(inbox->buf);
2803         if (!phys) {
2804                 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2805                 if (err)
2806                         goto ex_abort;
2807 
2808                 err = check_mtt_range(dev, slave, mtt_base,
2809                                       mr_get_mtt_size(inbox->buf), mtt);
2810                 if (err)
2811                         goto ex_put;
2812 
2813                 mpt->mtt = mtt;
2814         }
2815 
2816         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2817         if (err)
2818                 goto ex_put;
2819 
2820         if (!phys) {
2821                 atomic_inc(&mtt->ref_count);
2822                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2823         }
2824 
2825         res_end_move(dev, slave, RES_MPT, id);
2826         return 0;
2827 
2828 ex_put:
2829         if (!phys)
2830                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2831 ex_abort:
2832         res_abort_move(dev, slave, RES_MPT, id);
2833 
2834         return err;
2835 }
2836 
2837 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2838                            struct mlx4_vhcr *vhcr,
2839                            struct mlx4_cmd_mailbox *inbox,
2840                            struct mlx4_cmd_mailbox *outbox,
2841                            struct mlx4_cmd_info *cmd)
2842 {
2843         int err;
2844         int index = vhcr->in_modifier;
2845         struct res_mpt *mpt;
2846         int id;
2847 
2848         id = index & mpt_mask(dev);
2849         err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2850         if (err)
2851                 return err;
2852 
2853         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2854         if (err)
2855                 goto ex_abort;
2856 
2857         if (mpt->mtt)
2858                 atomic_dec(&mpt->mtt->ref_count);
2859 
2860         res_end_move(dev, slave, RES_MPT, id);
2861         return 0;
2862 
2863 ex_abort:
2864         res_abort_move(dev, slave, RES_MPT, id);
2865 
2866         return err;
2867 }
2868 
2869 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2870                            struct mlx4_vhcr *vhcr,
2871                            struct mlx4_cmd_mailbox *inbox,
2872                            struct mlx4_cmd_mailbox *outbox,
2873                            struct mlx4_cmd_info *cmd)
2874 {
2875         int err;
2876         int index = vhcr->in_modifier;
2877         struct res_mpt *mpt;
2878         int id;
2879 
2880         id = index & mpt_mask(dev);
2881         err = get_res(dev, slave, id, RES_MPT, &mpt);
2882         if (err)
2883                 return err;
2884 
2885         if (mpt->com.from_state == RES_MPT_MAPPED) {
2886                 /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2887                  * that, the VF must read the MPT. But since the MPT entry memory is not
2888                  * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2889                  * entry contents. To guarantee that the MPT cannot be changed, the driver
2890                  * must perform HW2SW_MPT before this query and return the MPT entry to HW
2891                  * ownership fofollowing the change. The change here allows the VF to
2892                  * perform QUERY_MPT also when the entry is in SW ownership.
2893                  */
2894                 struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
2895                                         &mlx4_priv(dev)->mr_table.dmpt_table,
2896                                         mpt->key, NULL);
2897 
2898                 if (NULL == mpt_entry || NULL == outbox->buf) {
2899                         err = -EINVAL;
2900                         goto out;
2901                 }
2902 
2903                 memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
2904 
2905                 err = 0;
2906         } else if (mpt->com.from_state == RES_MPT_HW) {
2907                 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2908         } else {
2909                 err = -EBUSY;
2910                 goto out;
2911         }
2912 
2913 
2914 out:
2915         put_res(dev, slave, id, RES_MPT);
2916         return err;
2917 }
2918 
2919 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2920 {
2921         return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2922 }
2923 
2924 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2925 {
2926         return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2927 }
2928 
2929 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2930 {
2931         return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2932 }
2933 
2934 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2935                                   struct mlx4_qp_context *context)
2936 {
2937         u32 qpn = vhcr->in_modifier & 0xffffff;
2938         u32 qkey = 0;
2939 
2940         if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2941                 return;
2942 
2943         /* adjust qkey in qp context */
2944         context->qkey = cpu_to_be32(qkey);
2945 }
2946 
2947 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
2948                                  struct mlx4_qp_context *qpc,
2949                                  struct mlx4_cmd_mailbox *inbox);
2950 
2951 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2952                              struct mlx4_vhcr *vhcr,
2953                              struct mlx4_cmd_mailbox *inbox,
2954                              struct mlx4_cmd_mailbox *outbox,
2955                              struct mlx4_cmd_info *cmd)
2956 {
2957         int err;
2958         int qpn = vhcr->in_modifier & 0x7fffff;
2959         struct res_mtt *mtt;
2960         struct res_qp *qp;
2961         struct mlx4_qp_context *qpc = inbox->buf + 8;
2962         int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2963         int mtt_size = qp_get_mtt_size(qpc);
2964         struct res_cq *rcq;
2965         struct res_cq *scq;
2966         int rcqn = qp_get_rcqn(qpc);
2967         int scqn = qp_get_scqn(qpc);
2968         u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2969         int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2970         struct res_srq *srq;
2971         int local_qpn = vhcr->in_modifier & 0xffffff;
2972 
2973         err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
2974         if (err)
2975                 return err;
2976 
2977         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2978         if (err)
2979                 return err;
2980         qp->local_qpn = local_qpn;
2981         qp->sched_queue = 0;
2982         qp->param3 = 0;
2983         qp->vlan_control = 0;
2984         qp->fvl_rx = 0;
2985         qp->pri_path_fl = 0;
2986         qp->vlan_index = 0;
2987         qp->feup = 0;
2988         qp->qpc_flags = be32_to_cpu(qpc->flags);
2989 
2990         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2991         if (err)
2992                 goto ex_abort;
2993 
2994         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2995         if (err)
2996                 goto ex_put_mtt;
2997 
2998         err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2999         if (err)
3000                 goto ex_put_mtt;
3001 
3002         if (scqn != rcqn) {
3003                 err = get_res(dev, slave, scqn, RES_CQ, &scq);
3004                 if (err)
3005                         goto ex_put_rcq;
3006         } else
3007                 scq = rcq;
3008 
3009         if (use_srq) {
3010                 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3011                 if (err)
3012                         goto ex_put_scq;
3013         }
3014 
3015         adjust_proxy_tun_qkey(dev, vhcr, qpc);
3016         update_pkey_index(dev, slave, inbox);
3017         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3018         if (err)
3019                 goto ex_put_srq;
3020         atomic_inc(&mtt->ref_count);
3021         qp->mtt = mtt;
3022         atomic_inc(&rcq->ref_count);
3023         qp->rcq = rcq;
3024         atomic_inc(&scq->ref_count);
3025         qp->scq = scq;
3026 
3027         if (scqn != rcqn)
3028                 put_res(dev, slave, scqn, RES_CQ);
3029 
3030         if (use_srq) {
3031                 atomic_inc(&srq->ref_count);
3032                 put_res(dev, slave, srqn, RES_SRQ);
3033                 qp->srq = srq;
3034         }
3035 
3036         /* Save param3 for dynamic changes from VST back to VGT */
3037         qp->param3 = qpc->param3;
3038         put_res(dev, slave, rcqn, RES_CQ);
3039         put_res(dev, slave, mtt_base, RES_MTT);
3040         res_end_move(dev, slave, RES_QP, qpn);
3041 
3042         return 0;
3043 
3044 ex_put_srq:
3045         if (use_srq)
3046                 put_res(dev, slave, srqn, RES_SRQ);
3047 ex_put_scq:
3048         if (scqn != rcqn)
3049                 put_res(dev, slave, scqn, RES_CQ);
3050 ex_put_rcq:
3051         put_res(dev, slave, rcqn, RES_CQ);
3052 ex_put_mtt:
3053         put_res(dev, slave, mtt_base, RES_MTT);
3054 ex_abort:
3055         res_abort_move(dev, slave, RES_QP, qpn);
3056 
3057         return err;
3058 }
3059 
3060 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
3061 {
3062         return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
3063 }
3064 
3065 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
3066 {
3067         int log_eq_size = eqc->log_eq_size & 0x1f;
3068         int page_shift = (eqc->log_page_size & 0x3f) + 12;
3069 
3070         if (log_eq_size + 5 < page_shift)
3071                 return 1;
3072 
3073         return 1 << (log_eq_size + 5 - page_shift);
3074 }
3075 
3076 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
3077 {
3078         return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
3079 }
3080 
3081 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
3082 {
3083         int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
3084         int page_shift = (cqc->log_page_size & 0x3f) + 12;
3085 
3086         if (log_cq_size + 5 < page_shift)
3087                 return 1;
3088 
3089         return 1 << (log_cq_size + 5 - page_shift);
3090 }
3091 
3092 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3093                           struct mlx4_vhcr *vhcr,
3094                           struct mlx4_cmd_mailbox *inbox,
3095                           struct mlx4_cmd_mailbox *outbox,
3096                           struct mlx4_cmd_info *cmd)
3097 {
3098         int err;
3099         int eqn = vhcr->in_modifier;
3100         int res_id = (slave << 10) | eqn;
3101         struct mlx4_eq_context *eqc = inbox->buf;
3102         int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
3103         int mtt_size = eq_get_mtt_size(eqc);
3104         struct res_eq *eq;
3105         struct res_mtt *mtt;
3106 
3107         err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3108         if (err)
3109                 return err;
3110         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
3111         if (err)
3112                 goto out_add;
3113 
3114         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3115         if (err)
3116                 goto out_move;
3117 
3118         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
3119         if (err)
3120                 goto out_put;
3121 
3122         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3123         if (err)
3124                 goto out_put;
3125 
3126         atomic_inc(&mtt->ref_count);
3127         eq->mtt = mtt;
3128         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3129         res_end_move(dev, slave, RES_EQ, res_id);
3130         return 0;
3131 
3132 out_put:
3133         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3134 out_move:
3135         res_abort_move(dev, slave, RES_EQ, res_id);
3136 out_add:
3137         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3138         return err;
3139 }
3140 
3141 int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
3142                             struct mlx4_vhcr *vhcr,
3143                             struct mlx4_cmd_mailbox *inbox,
3144                             struct mlx4_cmd_mailbox *outbox,
3145                             struct mlx4_cmd_info *cmd)
3146 {
3147         int err;
3148         u8 get = vhcr->op_modifier;
3149 
3150         if (get != 1)
3151                 return -EPERM;
3152 
3153         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3154 
3155         return err;
3156 }
3157 
3158 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
3159                               int len, struct res_mtt **res)
3160 {
3161         struct mlx4_priv *priv = mlx4_priv(dev);
3162         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3163         struct res_mtt *mtt;
3164         int err = -EINVAL;
3165 
3166         spin_lock_irq(mlx4_tlock(dev));
3167         list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
3168                             com.list) {
3169                 if (!check_mtt_range(dev, slave, start, len, mtt)) {
3170                         *res = mtt;
3171                         mtt->com.from_state = mtt->com.state;
3172                         mtt->com.state = RES_MTT_BUSY;
3173                         err = 0;
3174                         break;
3175                 }
3176         }
3177         spin_unlock_irq(mlx4_tlock(dev));
3178 
3179         return err;
3180 }
3181 
3182 static int verify_qp_parameters(struct mlx4_dev *dev,
3183                                 struct mlx4_vhcr *vhcr,
3184                                 struct mlx4_cmd_mailbox *inbox,
3185                                 enum qp_transition transition, u8 slave)
3186 {
3187         u32                     qp_type;
3188         u32                     qpn;
3189         struct mlx4_qp_context  *qp_ctx;
3190         enum mlx4_qp_optpar     optpar;
3191         int port;
3192         int num_gids;
3193 
3194         qp_ctx  = inbox->buf + 8;
3195         qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
3196         optpar  = be32_to_cpu(*(__be32 *) inbox->buf);
3197 
3198         if (slave != mlx4_master_func_num(dev)) {
3199                 qp_ctx->params2 &= ~cpu_to_be32(MLX4_QP_BIT_FPP);
3200                 /* setting QP rate-limit is disallowed for VFs */
3201                 if (qp_ctx->rate_limit_params)
3202                         return -EPERM;
3203         }
3204 
3205         switch (qp_type) {
3206         case MLX4_QP_ST_RC:
3207         case MLX4_QP_ST_XRC:
3208         case MLX4_QP_ST_UC:
3209                 switch (transition) {
3210                 case QP_TRANS_INIT2RTR:
3211                 case QP_TRANS_RTR2RTS:
3212                 case QP_TRANS_RTS2RTS:
3213                 case QP_TRANS_SQD2SQD:
3214                 case QP_TRANS_SQD2RTS:
3215                         if (slave != mlx4_master_func_num(dev)) {
3216                                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
3217                                         port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3218                                         if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3219                                                 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3220                                         else
3221                                                 num_gids = 1;
3222                                         if (qp_ctx->pri_path.mgid_index >= num_gids)
3223                                                 return -EINVAL;
3224                                 }
3225                                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3226                                         port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
3227                                         if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3228                                                 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3229                                         else
3230                                                 num_gids = 1;
3231                                         if (qp_ctx->alt_path.mgid_index >= num_gids)
3232                                                 return -EINVAL;
3233                                 }
3234                         }
3235                         break;
3236                 default:
3237                         break;
3238                 }
3239                 break;
3240 
3241         case MLX4_QP_ST_MLX:
3242                 qpn = vhcr->in_modifier & 0x7fffff;
3243                 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3244                 if (transition == QP_TRANS_INIT2RTR &&
3245                     slave != mlx4_master_func_num(dev) &&
3246                     mlx4_is_qp_reserved(dev, qpn) &&
3247                     !mlx4_vf_smi_enabled(dev, slave, port)) {
3248                         /* only enabled VFs may create MLX proxy QPs */
3249                         mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
3250                                  __func__, slave, port);
3251                         return -EPERM;
3252                 }
3253                 break;
3254 
3255         default:
3256                 break;
3257         }
3258 
3259         return 0;
3260 }
3261 
3262 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
3263                            struct mlx4_vhcr *vhcr,
3264                            struct mlx4_cmd_mailbox *inbox,
3265                            struct mlx4_cmd_mailbox *outbox,
3266                            struct mlx4_cmd_info *cmd)
3267 {
3268         struct mlx4_mtt mtt;
3269         __be64 *page_list = inbox->buf;
3270         u64 *pg_list = (u64 *)page_list;
3271         int i;
3272         struct res_mtt *rmtt = NULL;
3273         int start = be64_to_cpu(page_list[0]);
3274         int npages = vhcr->in_modifier;
3275         int err;
3276 
3277         err = get_containing_mtt(dev, slave, start, npages, &rmtt);
3278         if (err)
3279                 return err;
3280 
3281         /* Call the SW implementation of write_mtt:
3282          * - Prepare a dummy mtt struct
3283          * - Translate inbox contents to simple addresses in host endianness */
3284         mtt.offset = 0;  /* TBD this is broken but I don't handle it since
3285                             we don't really use it */
3286         mtt.order = 0;
3287         mtt.page_shift = 0;
3288         for (i = 0; i < npages; ++i)
3289                 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
3290 
3291         err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
3292                                ((u64 *)page_list + 2));
3293 
3294         if (rmtt)
3295                 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
3296 
3297         return err;
3298 }
3299 
3300 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3301                           struct mlx4_vhcr *vhcr,
3302                           struct mlx4_cmd_mailbox *inbox,
3303                           struct mlx4_cmd_mailbox *outbox,
3304                           struct mlx4_cmd_info *cmd)
3305 {
3306         int eqn = vhcr->in_modifier;
3307         int res_id = eqn | (slave << 10);
3308         struct res_eq *eq;
3309         int err;
3310 
3311         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
3312         if (err)
3313                 return err;
3314 
3315         err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
3316         if (err)
3317                 goto ex_abort;
3318 
3319         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3320         if (err)
3321                 goto ex_put;
3322 
3323         atomic_dec(&eq->mtt->ref_count);
3324         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3325         res_end_move(dev, slave, RES_EQ, res_id);
3326         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3327 
3328         return 0;
3329 
3330 ex_put:
3331         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3332 ex_abort:
3333         res_abort_move(dev, slave, RES_EQ, res_id);
3334 
3335         return err;
3336 }
3337 
3338 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3339 {
3340         struct mlx4_priv *priv = mlx4_priv(dev);
3341         struct mlx4_slave_event_eq_info *event_eq;
3342         struct mlx4_cmd_mailbox *mailbox;
3343         u32 in_modifier = 0;
3344         int err;
3345         int res_id;
3346         struct res_eq *req;
3347 
3348         if (!priv->mfunc.master.slave_state)
3349                 return -EINVAL;
3350 
3351         /* check for slave valid, slave not PF, and slave active */
3352         if (slave < 0 || slave > dev->persist->num_vfs ||
3353             slave == dev->caps.function ||
3354             !priv->mfunc.master.slave_state[slave].active)
3355                 return 0;
3356 
3357         event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
3358 
3359         /* Create the event only if the slave is registered */
3360         if (event_eq->eqn < 0)
3361                 return 0;
3362 
3363         mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3364         res_id = (slave << 10) | event_eq->eqn;
3365         err = get_res(dev, slave, res_id, RES_EQ, &req);
3366         if (err)
3367                 goto unlock;
3368 
3369         if (req->com.from_state != RES_EQ_HW) {
3370                 err = -EINVAL;
3371                 goto put;
3372         }
3373 
3374         mailbox = mlx4_alloc_cmd_mailbox(dev);
3375         if (IS_ERR(mailbox)) {
3376                 err = PTR_ERR(mailbox);
3377                 goto put;
3378         }
3379 
3380         if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3381                 ++event_eq->token;
3382                 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3383         }
3384 
3385         memcpy(mailbox->buf, (u8 *) eqe, 28);
3386 
3387         in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16);
3388 
3389         err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3390                        MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3391                        MLX4_CMD_NATIVE);
3392 
3393         put_res(dev, slave, res_id, RES_EQ);
3394         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3395         mlx4_free_cmd_mailbox(dev, mailbox);
3396         return err;
3397 
3398 put:
3399         put_res(dev, slave, res_id, RES_EQ);
3400 
3401 unlock:
3402         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3403         return err;
3404 }
3405 
3406 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3407                           struct mlx4_vhcr *vhcr,
3408                           struct mlx4_cmd_mailbox *inbox,
3409                           struct mlx4_cmd_mailbox *outbox,
3410                           struct mlx4_cmd_info *cmd)
3411 {
3412         int eqn = vhcr->in_modifier;
3413         int res_id = eqn | (slave << 10);
3414         struct res_eq *eq;
3415         int err;
3416 
3417         err = get_res(dev, slave, res_id, RES_EQ, &eq);
3418         if (err)
3419                 return err;
3420 
3421         if (eq->com.from_state != RES_EQ_HW) {
3422                 err = -EINVAL;
3423                 goto ex_put;
3424         }
3425 
3426         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3427 
3428 ex_put:
3429         put_res(dev, slave, res_id, RES_EQ);
3430         return err;
3431 }
3432 
3433 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3434                           struct mlx4_vhcr *vhcr,
3435                           struct mlx4_cmd_mailbox *inbox,
3436                           struct mlx4_cmd_mailbox *outbox,
3437                           struct mlx4_cmd_info *cmd)
3438 {
3439         int err;
3440         int cqn = vhcr->in_modifier;
3441         struct mlx4_cq_context *cqc = inbox->buf;
3442         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3443         struct res_cq *cq = NULL;
3444         struct res_mtt *mtt;
3445 
3446         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3447         if (err)
3448                 return err;
3449         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3450         if (err)
3451                 goto out_move;
3452         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3453         if (err)
3454                 goto out_put;
3455         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3456         if (err)
3457                 goto out_put;
3458         atomic_inc(&mtt->ref_count);
3459         cq->mtt = mtt;
3460         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3461         res_end_move(dev, slave, RES_CQ, cqn);
3462         return 0;
3463 
3464 out_put:
3465         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3466 out_move:
3467         res_abort_move(dev, slave, RES_CQ, cqn);
3468         return err;
3469 }
3470 
3471 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3472                           struct mlx4_vhcr *vhcr,
3473                           struct mlx4_cmd_mailbox *inbox,
3474                           struct mlx4_cmd_mailbox *outbox,
3475                           struct mlx4_cmd_info *cmd)
3476 {
3477         int err;
3478         int cqn = vhcr->in_modifier;
3479         struct res_cq *cq = NULL;
3480 
3481         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3482         if (err)
3483                 return err;
3484         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3485         if (err)
3486                 goto out_move;
3487         atomic_dec(&cq->mtt->ref_count);
3488         res_end_move(dev, slave, RES_CQ, cqn);
3489         return 0;
3490 
3491 out_move:
3492         res_abort_move(dev, slave, RES_CQ, cqn);
3493         return err;
3494 }
3495 
3496 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3497                           struct mlx4_vhcr *vhcr,
3498                           struct mlx4_cmd_mailbox *inbox,
3499                           struct mlx4_cmd_mailbox *outbox,
3500                           struct mlx4_cmd_info *cmd)
3501 {
3502         int cqn = vhcr->in_modifier;
3503         struct res_cq *cq;
3504         int err;
3505 
3506         err = get_res(dev, slave, cqn, RES_CQ, &cq);
3507         if (err)
3508                 return err;
3509 
3510         if (cq->com.from_state != RES_CQ_HW)
3511                 goto ex_put;
3512 
3513         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3514 ex_put:
3515         put_res(dev, slave, cqn, RES_CQ);
3516 
3517         return err;
3518 }
3519 
3520 static int handle_resize(struct mlx4_dev *dev, int slave,
3521                          struct mlx4_vhcr *vhcr,
3522                          struct mlx4_cmd_mailbox *inbox,
3523                          struct mlx4_cmd_mailbox *outbox,
3524                          struct mlx4_cmd_info *cmd,
3525                          struct res_cq *cq)
3526 {
3527         int err;
3528         struct res_mtt *orig_mtt;
3529         struct res_mtt *mtt;
3530         struct mlx4_cq_context *cqc = inbox->buf;
3531         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3532 
3533         err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3534         if (err)
3535                 return err;
3536 
3537         if (orig_mtt != cq->mtt) {
3538                 err = -EINVAL;
3539                 goto ex_put;
3540         }
3541 
3542         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3543         if (err)
3544                 goto ex_put;
3545 
3546         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3547         if (err)
3548                 goto ex_put1;
3549         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3550         if (err)
3551                 goto ex_put1;
3552         atomic_dec(&orig_mtt->ref_count);
3553         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3554         atomic_inc(&mtt->ref_count);
3555         cq->mtt = mtt;
3556         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3557         return 0;
3558 
3559 ex_put1:
3560         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3561 ex_put:
3562         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3563 
3564         return err;
3565 
3566 }
3567 
3568 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3569                            struct mlx4_vhcr *vhcr,
3570                            struct mlx4_cmd_mailbox *inbox,
3571                            struct mlx4_cmd_mailbox *outbox,
3572                            struct mlx4_cmd_info *cmd)
3573 {
3574         int cqn = vhcr->in_modifier;
3575         struct res_cq *cq;
3576         int err;
3577 
3578         err = get_res(dev, slave, cqn, RES_CQ, &cq);
3579         if (err)
3580                 return err;
3581 
3582         if (cq->com.from_state != RES_CQ_HW)
3583                 goto ex_put;
3584 
3585         if (vhcr->op_modifier == 0) {
3586                 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3587                 goto ex_put;
3588         }
3589 
3590         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3591 ex_put:
3592         put_res(dev, slave, cqn, RES_CQ);
3593 
3594         return err;
3595 }
3596 
3597 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3598 {
3599         int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3600         int log_rq_stride = srqc->logstride & 7;
3601         int page_shift = (srqc->log_page_size & 0x3f) + 12;
3602 
3603         if (log_srq_size + log_rq_stride + 4 < page_shift)
3604                 return 1;
3605 
3606         return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3607 }
3608 
3609 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3610                            struct mlx4_vhcr *vhcr,
3611                            struct mlx4_cmd_mailbox *inbox,
3612                            struct mlx4_cmd_mailbox *outbox,
3613                            struct mlx4_cmd_info *cmd)
3614 {
3615         int err;
3616         int srqn = vhcr->in_modifier;
3617         struct res_mtt *mtt;
3618         struct res_srq *srq = NULL;
3619         struct mlx4_srq_context *srqc = inbox->buf;
3620         int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3621 
3622         if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3623                 return -EINVAL;
3624 
3625         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3626         if (err)
3627                 return err;
3628         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3629         if (err)
3630                 goto ex_abort;
3631         err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3632                               mtt);
3633         if (err)
3634                 goto ex_put_mtt;
3635 
3636         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3637         if (err)
3638                 goto ex_put_mtt;
3639 
3640         atomic_inc(&mtt->ref_count);
3641         srq->mtt = mtt;
3642         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3643         res_end_move(dev, slave, RES_SRQ, srqn);
3644         return 0;
3645 
3646 ex_put_mtt:
3647         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3648 ex_abort:
3649         res_abort_move(dev, slave, RES_SRQ, srqn);
3650 
3651         return err;
3652 }
3653 
3654 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3655                            struct mlx4_vhcr *vhcr,
3656                            struct mlx4_cmd_mailbox *inbox,
3657                            struct mlx4_cmd_mailbox *outbox,
3658                            struct mlx4_cmd_info *cmd)
3659 {
3660         int err;
3661         int srqn = vhcr->in_modifier;
3662         struct res_srq *srq = NULL;
3663 
3664         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3665         if (err)
3666                 return err;
3667         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3668         if (err)
3669                 goto ex_abort;
3670         atomic_dec(&srq->mtt->ref_count);
3671         if (srq->cq)
3672                 atomic_dec(&srq->cq->ref_count);
3673         res_end_move(dev, slave, RES_SRQ, srqn);
3674 
3675         return 0;
3676 
3677 ex_abort:
3678         res_abort_move(dev, slave, RES_SRQ, srqn);
3679 
3680         return err;
3681 }
3682 
3683 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3684                            struct mlx4_vhcr *vhcr,
3685                            struct mlx4_cmd_mailbox *inbox,
3686                            struct mlx4_cmd_mailbox *outbox,
3687                            struct mlx4_cmd_info *cmd)
3688 {
3689         int err;
3690         int srqn = vhcr->in_modifier;
3691         struct res_srq *srq;
3692 
3693         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3694         if (err)
3695                 return err;
3696         if (srq->com.from_state != RES_SRQ_HW) {
3697                 err = -EBUSY;
3698                 goto out;
3699         }
3700         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3701 out:
3702         put_res(dev, slave, srqn, RES_SRQ);
3703         return err;
3704 }
3705 
3706 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3707                          struct mlx4_vhcr *vhcr,
3708                          struct mlx4_cmd_mailbox *inbox,
3709                          struct mlx4_cmd_mailbox *outbox,
3710                          struct mlx4_cmd_info *cmd)
3711 {
3712         int err;
3713         int srqn = vhcr->in_modifier;
3714         struct res_srq *srq;
3715 
3716         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3717         if (err)
3718                 return err;
3719 
3720         if (srq->com.from_state != RES_SRQ_HW) {
3721                 err = -EBUSY;
3722                 goto out;
3723         }
3724 
3725         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3726 out:
3727         put_res(dev, slave, srqn, RES_SRQ);
3728         return err;
3729 }
3730 
3731 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3732                         struct mlx4_vhcr *vhcr,
3733                         struct mlx4_cmd_mailbox *inbox,
3734                         struct mlx4_cmd_mailbox *outbox,
3735                         struct mlx4_cmd_info *cmd)
3736 {
3737         int err;
3738         int qpn = vhcr->in_modifier & 0x7fffff;
3739         struct res_qp *qp;
3740 
3741         err = get_res(dev, slave, qpn, RES_QP, &qp);
3742         if (err)
3743                 return err;
3744         if (qp->com.from_state != RES_QP_HW) {
3745                 err = -EBUSY;
3746                 goto out;
3747         }
3748 
3749         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3750 out:
3751         put_res(dev, slave, qpn, RES_QP);
3752         return err;
3753 }
3754 
3755 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3756                               struct mlx4_vhcr *vhcr,
3757                               struct mlx4_cmd_mailbox *inbox,
3758                               struct mlx4_cmd_mailbox *outbox,
3759                               struct mlx4_cmd_info *cmd)
3760 {
3761         struct mlx4_qp_context *context = inbox->buf + 8;
3762         adjust_proxy_tun_qkey(dev, vhcr, context);
3763         update_pkey_index(dev, slave, inbox);
3764         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3765 }
3766 
3767 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3768                                   struct mlx4_qp_context *qpc,
3769                                   struct mlx4_cmd_mailbox *inbox)
3770 {
3771         enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3772         u8 pri_sched_queue;
3773         int port = mlx4_slave_convert_port(
3774                    dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3775 
3776         if (port < 0)
3777                 return -EINVAL;
3778 
3779         pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3780                           ((port & 1) << 6);
3781 
3782         if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) ||
3783             qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) {
3784                 qpc->pri_path.sched_queue = pri_sched_queue;
3785         }
3786 
3787         if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3788                 port = mlx4_slave_convert_port(
3789                                 dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3790                                 + 1) - 1;
3791                 if (port < 0)
3792                         return -EINVAL;
3793                 qpc->alt_path.sched_queue =
3794                         (qpc->alt_path.sched_queue & ~(1 << 6)) |
3795                         (port & 1) << 6;
3796         }
3797         return 0;
3798 }
3799 
3800 static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3801                                 struct mlx4_qp_context *qpc,
3802                                 struct mlx4_cmd_mailbox *inbox)
3803 {
3804         u64 mac;
3805         int port;
3806         u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3807         u8 sched = *(u8 *)(inbox->buf + 64);
3808         u8 smac_ix;
3809 
3810         port = (sched >> 6 & 1) + 1;
3811         if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3812                 smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3813                 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3814                         return -ENOENT;
3815         }
3816         return 0;
3817 }
3818 
3819 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3820                              struct mlx4_vhcr *vhcr,
3821                              struct mlx4_cmd_mailbox *inbox,
3822                              struct mlx4_cmd_mailbox *outbox,
3823                              struct mlx4_cmd_info *cmd)
3824 {
3825         int err;
3826         struct mlx4_qp_context *qpc = inbox->buf + 8;
3827         int qpn = vhcr->in_modifier & 0x7fffff;
3828         struct res_qp *qp;
3829         u8 orig_sched_queue;
3830         u8 orig_vlan_control = qpc->pri_path.vlan_control;
3831         u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3832         u8 orig_pri_path_fl = qpc->pri_path.fl;
3833         u8 orig_vlan_index = qpc->pri_path.vlan_index;
3834         u8 orig_feup = qpc->pri_path.feup;
3835 
3836         err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3837         if (err)
3838                 return err;
3839         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
3840         if (err)
3841                 return err;
3842 
3843         if (roce_verify_mac(dev, slave, qpc, inbox))
3844                 return -EINVAL;
3845 
3846         update_pkey_index(dev, slave, inbox);
3847         update_gid(dev, inbox, (u8)slave);
3848         adjust_proxy_tun_qkey(dev, vhcr, qpc);
3849         orig_sched_queue = qpc->pri_path.sched_queue;
3850 
3851         err = get_res(dev, slave, qpn, RES_QP, &qp);
3852         if (err)
3853                 return err;
3854         if (qp->com.from_state != RES_QP_HW) {
3855                 err = -EBUSY;
3856                 goto out;
3857         }
3858 
3859         err = update_vport_qp_param(dev, inbox, slave, qpn);
3860         if (err)
3861                 goto out;
3862 
3863         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3864 out:
3865         /* if no error, save sched queue value passed in by VF. This is
3866          * essentially the QOS value provided by the VF. This will be useful
3867          * if we allow dynamic changes from VST back to VGT
3868          */
3869         if (!err) {
3870                 qp->sched_queue = orig_sched_queue;
3871                 qp->vlan_control = orig_vlan_control;
3872                 qp->fvl_rx      =  orig_fvl_rx;
3873                 qp->pri_path_fl = orig_pri_path_fl;
3874                 qp->vlan_index  = orig_vlan_index;
3875                 qp->feup        = orig_feup;
3876         }
3877         put_res(dev, slave, qpn, RES_QP);
3878         return err;
3879 }
3880 
3881 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3882                             struct mlx4_vhcr *vhcr,
3883                             struct mlx4_cmd_mailbox *inbox,
3884                             struct mlx4_cmd_mailbox *outbox,
3885                             struct mlx4_cmd_info *cmd)
3886 {
3887         int err;
3888         struct mlx4_qp_context *context = inbox->buf + 8;
3889 
3890         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3891         if (err)
3892                 return err;
3893         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
3894         if (err)
3895                 return err;
3896 
3897         update_pkey_index(dev, slave, inbox);
3898         update_gid(dev, inbox, (u8)slave);
3899         adjust_proxy_tun_qkey(dev, vhcr, context);
3900         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3901 }
3902 
3903 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3904                             struct mlx4_vhcr *vhcr,
3905                             struct mlx4_cmd_mailbox *inbox,
3906                             struct mlx4_cmd_mailbox *outbox,
3907                             struct mlx4_cmd_info *cmd)
3908 {
3909         int err;
3910         struct mlx4_qp_context *context = inbox->buf + 8;
3911 
3912         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3913         if (err)
3914                 return err;
3915         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
3916         if (err)
3917                 return err;
3918 
3919         update_pkey_index(dev, slave, inbox);
3920         update_gid(dev, inbox, (u8)slave);
3921         adjust_proxy_tun_qkey(dev, vhcr, context);
3922         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3923 }
3924 
3925 
3926 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3927                               struct mlx4_vhcr *vhcr,
3928                               struct mlx4_cmd_mailbox *inbox,
3929                               struct mlx4_cmd_mailbox *outbox,
3930                               struct mlx4_cmd_info *cmd)
3931 {
3932         struct mlx4_qp_context *context = inbox->buf + 8;
3933         int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3934         if (err)
3935                 return err;
3936         adjust_proxy_tun_qkey(dev, vhcr, context);
3937         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3938 }
3939 
3940 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3941                             struct mlx4_vhcr *vhcr,
3942                             struct mlx4_cmd_mailbox *inbox,
3943                             struct mlx4_cmd_mailbox *outbox,
3944                             struct mlx4_cmd_info *cmd)
3945 {
3946         int err;
3947         struct mlx4_qp_context *context = inbox->buf + 8;
3948 
3949         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3950         if (err)
3951                 return err;
3952         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
3953         if (err)
3954                 return err;
3955 
3956         adjust_proxy_tun_qkey(dev, vhcr, context);
3957         update_gid(dev, inbox, (u8)slave);
3958         update_pkey_index(dev, slave, inbox);
3959         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3960 }
3961 
3962 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3963                             struct mlx4_vhcr *vhcr,
3964                             struct mlx4_cmd_mailbox *inbox,
3965                             struct mlx4_cmd_mailbox *outbox,
3966                             struct mlx4_cmd_info *cmd)
3967 {
3968         int err;
3969         struct mlx4_qp_context *context = inbox->buf + 8;
3970 
3971         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3972         if (err)
3973                 return err;
3974         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
3975         if (err)
3976                 return err;
3977 
3978         adjust_proxy_tun_qkey(dev, vhcr, context);
3979         update_gid(dev, inbox, (u8)slave);
3980         update_pkey_index(dev, slave, inbox);
3981         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3982 }
3983 
3984 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3985                          struct mlx4_vhcr *vhcr,
3986                          struct mlx4_cmd_mailbox *inbox,
3987                          struct mlx4_cmd_mailbox *outbox,
3988                          struct mlx4_cmd_info *cmd)
3989 {
3990         int err;
3991         int qpn = vhcr->in_modifier & 0x7fffff;
3992         struct res_qp *qp;
3993 
3994         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3995         if (err)
3996                 return err;
3997         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3998         if (err)
3999                 goto ex_abort;
4000 
4001         atomic_dec(&qp->mtt->ref_count);
4002         atomic_dec(&qp->rcq->ref_count);
4003         atomic_dec(&qp->scq->ref_count);
4004         if (qp->srq)
4005                 atomic_dec(&qp->srq->ref_count);
4006         res_end_move(dev, slave, RES_QP, qpn);
4007         return 0;
4008 
4009 ex_abort:
4010         res_abort_move(dev, slave, RES_QP, qpn);
4011 
4012         return err;
4013 }
4014 
4015 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
4016                                 struct res_qp *rqp, u8 *gid)
4017 {
4018         struct res_gid *res;
4019 
4020         list_for_each_entry(res, &rqp->mcg_list, list) {
4021                 if (!memcmp(res->gid, gid, 16))
4022                         return res;
4023         }
4024         return NULL;
4025 }
4026 
4027 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
4028                        u8 *gid, enum mlx4_protocol prot,
4029                        enum mlx4_steer_type steer, u64 reg_id)
4030 {
4031         struct res_gid *res;
4032         int err;
4033 
4034         res = kzalloc(sizeof(*res), GFP_KERNEL);
4035         if (!res)
4036                 return -ENOMEM;
4037 
4038         spin_lock_irq(&rqp->mcg_spl);
4039         if (find_gid(dev, slave, rqp, gid)) {
4040                 kfree(res);
4041                 err = -EEXIST;
4042         } else {
4043                 memcpy(res->gid, gid, 16);
4044                 res->prot = prot;
4045                 res->steer = steer;
4046                 res->reg_id = reg_id;
4047                 list_add_tail(&res->list, &rqp->mcg_list);
4048                 err = 0;
4049         }
4050         spin_unlock_irq(&rqp->mcg_spl);
4051 
4052         return err;
4053 }
4054 
4055 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
4056                        u8 *gid, enum mlx4_protocol prot,
4057                        enum mlx4_steer_type steer, u64 *reg_id)
4058 {
4059         struct res_gid *res;
4060         int err;
4061 
4062         spin_lock_irq(&rqp->mcg_spl);
4063         res = find_gid(dev, slave, rqp, gid);
4064         if (!res || res->prot != prot || res->steer != steer)
4065                 err = -EINVAL;
4066         else {
4067                 *reg_id = res->reg_id;
4068                 list_del(&res->list);
4069                 kfree(res);
4070                 err = 0;
4071         }
4072         spin_unlock_irq(&rqp->mcg_spl);
4073 
4074         return err;
4075 }
4076 
4077 static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
4078                      u8 gid[16], int block_loopback, enum mlx4_protocol prot,
4079                      enum mlx4_steer_type type, u64 *reg_id)
4080 {
4081         switch (dev->caps.steering_mode) {
4082         case MLX4_STEERING_MODE_DEVICE_MANAGED: {
4083                 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4084                 if (port < 0)
4085                         return port;
4086                 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
4087                                                 block_loopback, prot,
4088                                                 reg_id);
4089         }
4090         case MLX4_STEERING_MODE_B0:
4091                 if (prot == MLX4_PROT_ETH) {
4092                         int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4093                         if (port < 0)
4094                                 return port;
4095                         gid[5] = port;
4096                 }
4097                 return mlx4_qp_attach_common(dev, qp, gid,
4098                                             block_loopback, prot, type);
4099         default:
4100                 return -EINVAL;
4101         }
4102 }
4103 
4104 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
4105                      u8 gid[16], enum mlx4_protocol prot,
4106                      enum mlx4_steer_type type, u64 reg_id)
4107 {
4108         switch (dev->caps.steering_mode) {
4109         case MLX4_STEERING_MODE_DEVICE_MANAGED:
4110                 return mlx4_flow_detach(dev, reg_id);
4111         case MLX4_STEERING_MODE_B0:
4112                 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
4113         default:
4114                 return -EINVAL;
4115         }
4116 }
4117 
4118 static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
4119                             u8 *gid, enum mlx4_protocol prot)
4120 {
4121         int real_port;
4122 
4123         if (prot != MLX4_PROT_ETH)
4124                 return 0;
4125 
4126         if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
4127             dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
4128                 real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
4129                 if (real_port < 0)
4130                         return -EINVAL;
4131                 gid[5] = real_port;
4132         }
4133 
4134         return 0;
4135 }
4136 
4137 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4138                                struct mlx4_vhcr *vhcr,
4139                                struct mlx4_cmd_mailbox *inbox,
4140                                struct mlx4_cmd_mailbox *outbox,
4141                                struct mlx4_cmd_info *cmd)
4142 {
4143         struct mlx4_qp qp; /* dummy for calling attach/detach */
4144         u8 *gid = inbox->buf;
4145         enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
4146         int err;
4147         int qpn;
4148         struct res_qp *rqp;
4149         u64 reg_id = 0;
4150         int attach = vhcr->op_modifier;
4151         int block_loopback = vhcr->in_modifier >> 31;
4152         u8 steer_type_mask = 2;
4153         enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
4154 
4155         qpn = vhcr->in_modifier & 0xffffff;
4156         err = get_res(dev, slave, qpn, RES_QP, &rqp);
4157         if (err)
4158                 return err;
4159 
4160         qp.qpn = qpn;
4161         if (attach) {
4162                 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
4163                                 type, &reg_id);
4164                 if (err) {
4165                         pr_err("Fail to attach rule to qp 0x%x\n", qpn);
4166                         goto ex_put;
4167                 }
4168                 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
4169                 if (err)
4170                         goto ex_detach;
4171         } else {
4172                 err = mlx4_adjust_port(dev, slave, gid, prot);
4173                 if (err)
4174                         goto ex_put;
4175 
4176                 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
4177                 if (err)
4178                         goto ex_put;
4179 
4180                 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
4181                 if (err)
4182                         pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
4183                                qpn, reg_id);
4184         }
4185         put_res(dev, slave, qpn, RES_QP);
4186         return err;
4187 
4188 ex_detach:
4189         qp_detach(dev, &qp, gid, prot, type, reg_id);
4190 ex_put:
4191         put_res(dev, slave, qpn, RES_QP);
4192         return err;
4193 }
4194 
4195 /*
4196  * MAC validation for Flow Steering rules.
4197  * VF can attach rules only with a mac address which is assigned to it.
4198  */
4199 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
4200                                    struct list_head *rlist)
4201 {
4202         struct mac_res *res, *tmp;
4203         __be64 be_mac;
4204 
4205         /* make sure it isn't multicast or broadcast mac*/
4206         if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
4207             !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
4208                 list_for_each_entry_safe(res, tmp, rlist, list) {
4209                         be_mac = cpu_to_be64(res->mac << 16);
4210                         if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
4211                                 return 0;
4212                 }
4213                 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
4214                        eth_header->eth.dst_mac, slave);
4215                 return -EINVAL;
4216         }
4217         return 0;
4218 }
4219 
4220 /*
4221  * In case of missing eth header, append eth header with a MAC address
4222  * assigned to the VF.
4223  */
4224 static int add_eth_header(struct mlx4_dev *dev, int slave,
4225                           struct mlx4_cmd_mailbox *inbox,
4226                           struct list_head *rlist, int header_id)
4227 {
4228         struct mac_res *res, *tmp;
4229         u8 port;
4230         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4231         struct mlx4_net_trans_rule_hw_eth *eth_header;
4232         struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
4233         struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
4234         __be64 be_mac = 0;
4235         __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
4236 
4237         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4238         port = ctrl->port;
4239         eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
4240 
4241         /* Clear a space in the inbox for eth header */
4242         switch (header_id) {
4243         case MLX4_NET_TRANS_RULE_ID_IPV4:
4244                 ip_header =
4245                         (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
4246                 memmove(ip_header, eth_header,
4247                         sizeof(*ip_header) + sizeof(*l4_header));
4248                 break;
4249         case MLX4_NET_TRANS_RULE_ID_TCP:
4250         case MLX4_NET_TRANS_RULE_ID_UDP:
4251                 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
4252                             (eth_header + 1);
4253                 memmove(l4_header, eth_header, sizeof(*l4_header));
4254                 break;
4255         default:
4256                 return -EINVAL;
4257         }
4258         list_for_each_entry_safe(res, tmp, rlist, list) {
4259                 if (port == res->port) {
4260                         be_mac = cpu_to_be64(res->mac << 16);
4261                         break;
4262                 }
4263         }
4264         if (!be_mac) {
4265                 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
4266                        port);
4267                 return -EINVAL;
4268         }
4269 
4270         memset(eth_header, 0, sizeof(*eth_header));
4271         eth_header->size = sizeof(*eth_header) >> 2;
4272         eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
4273         memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
4274         memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
4275 
4276         return 0;
4277 
4278 }
4279 
4280 #define MLX4_UPD_QP_PATH_MASK_SUPPORTED      (                                \
4281         1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX                     |\
4282         1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)
4283 int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
4284                            struct mlx4_vhcr *vhcr,
4285                            struct mlx4_cmd_mailbox *inbox,
4286                            struct mlx4_cmd_mailbox *outbox,
4287                            struct mlx4_cmd_info *cmd_info)
4288 {
4289         int err;
4290         u32 qpn = vhcr->in_modifier & 0xffffff;
4291         struct res_qp *rqp;
4292         u64 mac;
4293         unsigned port;
4294         u64 pri_addr_path_mask;
4295         struct mlx4_update_qp_context *cmd;
4296         int smac_index;
4297 
4298         cmd = (struct mlx4_update_qp_context *)inbox->buf;
4299 
4300         pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
4301         if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
4302             (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
4303                 return -EPERM;
4304 
4305         if ((pri_addr_path_mask &
4306              (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)) &&
4307                 !(dev->caps.flags2 &
4308                   MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) {
4309                 mlx4_warn(dev, "Src check LB for slave %d isn't supported\n",
4310                           slave);
4311                 return -EOPNOTSUPP;
4312         }
4313 
4314         /* Just change the smac for the QP */
4315         err = get_res(dev, slave, qpn, RES_QP, &rqp);
4316         if (err) {
4317                 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
4318                 return err;
4319         }
4320 
4321         port = (rqp->sched_queue >> 6 & 1) + 1;
4322 
4323         if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
4324                 smac_index = cmd->qp_context.pri_path.grh_mylmc;
4325                 err = mac_find_smac_ix_in_slave(dev, slave, port,
4326                                                 smac_index, &mac);
4327 
4328                 if (err) {
4329                         mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4330                                  qpn, smac_index);
4331                         goto err_mac;
4332                 }
4333         }
4334 
4335         err = mlx4_cmd(dev, inbox->dma,
4336                        vhcr->in_modifier, 0,
4337                        MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
4338                        MLX4_CMD_NATIVE);
4339         if (err) {
4340                 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
4341                 goto err_mac;
4342         }
4343 
4344 err_mac:
4345         put_res(dev, slave, qpn, RES_QP);
4346         return err;
4347 }
4348 
4349 static u32 qp_attach_mbox_size(void *mbox)
4350 {
4351         u32 size = sizeof(struct mlx4_net_trans_rule_hw_ctrl);
4352         struct _rule_hw  *rule_header;
4353 
4354         rule_header = (struct _rule_hw *)(mbox + size);
4355 
4356         while (rule_header->size) {
4357                 size += rule_header->size * sizeof(u32);
4358                 rule_header += 1;
4359         }
4360         return size;
4361 }
4362 
4363 static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule);
4364 
4365 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4366                                          struct mlx4_vhcr *vhcr,
4367                                          struct mlx4_cmd_mailbox *inbox,
4368                                          struct mlx4_cmd_mailbox *outbox,
4369                                          struct mlx4_cmd_info *cmd)
4370 {
4371 
4372         struct mlx4_priv *priv = mlx4_priv(dev);
4373         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4374         struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
4375         int err;
4376         int qpn;
4377         struct res_qp *rqp;
4378         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4379         struct _rule_hw  *rule_header;
4380         int header_id;
4381         struct res_fs_rule *rrule;
4382         u32 mbox_size;
4383 
4384         if (dev->caps.steering_mode !=
4385             MLX4_STEERING_MODE_DEVICE_MANAGED)
4386                 return -EOPNOTSUPP;
4387 
4388         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4389         err = mlx4_slave_convert_port(dev, slave, ctrl->port);
4390         if (err <= 0)
4391                 return -EINVAL;
4392         ctrl->port = err;
4393         qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
4394         err = get_res(dev, slave, qpn, RES_QP, &rqp);
4395         if (err) {
4396                 pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
4397                 return err;
4398         }
4399         rule_header = (struct _rule_hw *)(ctrl + 1);
4400         header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4401 
4402         if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
4403                 mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
4404 
4405         switch (header_id) {
4406         case MLX4_NET_TRANS_RULE_ID_ETH:
4407                 if (validate_eth_header_mac(slave, rule_header, rlist)) {
4408                         err = -EINVAL;
4409                         goto err_put_qp;
4410                 }
4411                 break;
4412         case MLX4_NET_TRANS_RULE_ID_IB:
4413                 break;
4414         case MLX4_NET_TRANS_RULE_ID_IPV4:
4415         case MLX4_NET_TRANS_RULE_ID_TCP:
4416         case MLX4_NET_TRANS_RULE_ID_UDP:
4417                 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4418                 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4419                         err = -EINVAL;
4420                         goto err_put_qp;
4421                 }
4422                 vhcr->in_modifier +=
4423                         sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4424                 break;
4425         default:
4426                 pr_err("Corrupted mailbox\n");
4427                 err = -EINVAL;
4428                 goto err_put_qp;
4429         }
4430 
4431         err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4432                            vhcr->in_modifier, 0,
4433                            MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4434                            MLX4_CMD_NATIVE);
4435         if (err)
4436                 goto err_put_qp;
4437 
4438 
4439         err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
4440         if (err) {
4441                 mlx4_err(dev, "Fail to add flow steering resources\n");
4442                 goto err_detach;
4443         }
4444 
4445         err = get_res(dev, slave, vhcr->out_param, RES_FS_RULE, &rrule);
4446         if (err)
4447                 goto err_detach;
4448 
4449         mbox_size = qp_attach_mbox_size(inbox->buf);
4450         rrule->mirr_mbox = kmalloc(mbox_size, GFP_KERNEL);
4451         if (!rrule->mirr_mbox) {
4452                 err = -ENOMEM;
4453                 goto err_put_rule;
4454         }
4455         rrule->mirr_mbox_size = mbox_size;
4456         rrule->mirr_rule_id = 0;
4457         memcpy(rrule->mirr_mbox, inbox->buf, mbox_size);
4458 
4459         /* set different port */
4460         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)rrule->mirr_mbox;
4461         if (ctrl->port == 1)
4462                 ctrl->port = 2;
4463         else
4464                 ctrl->port = 1;
4465 
4466         if (mlx4_is_bonded(dev))
4467                 mlx4_do_mirror_rule(dev, rrule);
4468 
4469         atomic_inc(&rqp->ref_count);
4470 
4471 err_put_rule:
4472         put_res(dev, slave, vhcr->out_param, RES_FS_RULE);
4473 err_detach:
4474         /* detach rule on error */
4475         if (err)
4476                 mlx4_cmd(dev, vhcr->out_param, 0, 0,
4477                          MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4478                          MLX4_CMD_NATIVE);
4479 err_put_qp:
4480         put_res(dev, slave, qpn, RES_QP);
4481         return err;
4482 }
4483 
4484 static int mlx4_undo_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
4485 {
4486         int err;
4487 
4488         err = rem_res_range(dev, fs_rule->com.owner, fs_rule->com.res_id, 1, RES_FS_RULE, 0);
4489         if (err) {
4490                 mlx4_err(dev, "Fail to remove flow steering resources\n");
4491                 return err;
4492         }
4493 
4494         mlx4_cmd(dev, fs_rule->com.res_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
4495                  MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
4496         return 0;
4497 }
4498 
4499 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4500                                          struct mlx4_vhcr *vhcr,
4501                                          struct mlx4_cmd_mailbox *inbox,
4502                                          struct mlx4_cmd_mailbox *outbox,
4503                                          struct mlx4_cmd_info *cmd)
4504 {
4505         int err;
4506         struct res_qp *rqp;
4507         struct res_fs_rule *rrule;
4508         u64 mirr_reg_id;
4509         int qpn;
4510 
4511         if (dev->caps.steering_mode !=
4512             MLX4_STEERING_MODE_DEVICE_MANAGED)
4513                 return -EOPNOTSUPP;
4514 
4515         err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4516         if (err)
4517                 return err;
4518 
4519         if (!rrule->mirr_mbox) {
4520                 mlx4_err(dev, "Mirror rules cannot be removed explicitly\n");
4521                 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4522                 return -EINVAL;
4523         }
4524         mirr_reg_id = rrule->mirr_rule_id;
4525         kfree(rrule->mirr_mbox);
4526         qpn = rrule->qpn;
4527 
4528         /* Release the rule form busy state before removal */
4529         put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4530         err = get_res(dev, slave, qpn, RES_QP, &rqp);
4531         if (err)
4532                 return err;
4533 
4534         if (mirr_reg_id && mlx4_is_bonded(dev)) {
4535                 err = get_res(dev, slave, mirr_reg_id, RES_FS_RULE, &rrule);
4536                 if (err) {
4537                         mlx4_err(dev, "Fail to get resource of mirror rule\n");
4538                 } else {
4539                         put_res(dev, slave, mirr_reg_id, RES_FS_RULE);
4540                         mlx4_undo_mirror_rule(dev, rrule);
4541                 }
4542         }
4543         err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4544         if (err) {
4545                 mlx4_err(dev, "Fail to remove flow steering resources\n");
4546                 goto out;
4547         }
4548 
4549         err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4550                        MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4551                        MLX4_CMD_NATIVE);
4552         if (!err)
4553                 atomic_dec(&rqp->ref_count);
4554 out:
4555         put_res(dev, slave, qpn, RES_QP);
4556         return err;
4557 }
4558 
4559 enum {
4560         BUSY_MAX_RETRIES = 10
4561 };
4562 
4563 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4564                                struct mlx4_vhcr *vhcr,
4565                                struct mlx4_cmd_mailbox *inbox,
4566                                struct mlx4_cmd_mailbox *outbox,
4567                                struct mlx4_cmd_info *cmd)
4568 {
4569         int err;
4570         int index = vhcr->in_modifier & 0xffff;
4571 
4572         err = get_res(dev, slave, index, RES_COUNTER, NULL);
4573         if (err)
4574                 return err;
4575 
4576         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4577         put_res(dev, slave, index, RES_COUNTER);
4578         return err;
4579 }
4580 
4581 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4582 {
4583         struct res_gid *rgid;
4584         struct res_gid *tmp;
4585         struct mlx4_qp qp; /* dummy for calling attach/detach */
4586 
4587         list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
4588                 switch (dev->caps.steering_mode) {
4589                 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4590                         mlx4_flow_detach(dev, rgid->reg_id);
4591                         break;
4592                 case MLX4_STEERING_MODE_B0:
4593                         qp.qpn = rqp->local_qpn;
4594                         (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4595                                                      rgid->prot, rgid->steer);
4596                         break;
4597                 }
4598                 list_del(&rgid->list);
4599                 kfree(rgid);
4600         }
4601 }
4602 
4603 static int _move_all_busy(struct mlx4_dev *dev, int slave,
4604                           enum mlx4_resource type, int print)
4605 {
4606         struct mlx4_priv *priv = mlx4_priv(dev);
4607         struct mlx4_resource_tracker *tracker =
4608                 &priv->mfunc.master.res_tracker;
4609         struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4610         struct res_common *r;
4611         struct res_common *tmp;
4612         int busy;
4613 
4614         busy = 0;
4615         spin_lock_irq(mlx4_tlock(dev));
4616         list_for_each_entry_safe(r, tmp, rlist, list) {
4617                 if (r->owner == slave) {
4618                         if (!r->removing) {
4619                                 if (r->state == RES_ANY_BUSY) {
4620                                         if (print)
4621                                                 mlx4_dbg(dev,
4622                                                          "%s id 0x%llx is busy\n",
4623                                                           resource_str(type),
4624                                                           r->res_id);
4625                                         ++busy;
4626                                 } else {
4627                                         r->from_state = r->state;
4628                                         r->state = RES_ANY_BUSY;
4629                                         r->removing = 1;
4630                                 }
4631                         }
4632                 }
4633         }
4634         spin_unlock_irq(mlx4_tlock(dev));
4635 
4636         return busy;
4637 }
4638 
4639 static int move_all_busy(struct mlx4_dev *dev, int slave,
4640                          enum mlx4_resource type)
4641 {
4642         unsigned long begin;
4643         int busy;
4644 
4645         begin = jiffies;
4646         do {
4647                 busy = _move_all_busy(dev, slave, type, 0);
4648                 if (time_after(jiffies, begin + 5 * HZ))
4649                         break;
4650                 if (busy)
4651                         cond_resched();
4652         } while (busy);
4653 
4654         if (busy)
4655                 busy = _move_all_busy(dev, slave, type, 1);
4656 
4657         return busy;
4658 }
4659 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4660 {
4661         struct mlx4_priv *priv = mlx4_priv(dev);
4662         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4663         struct list_head *qp_list =
4664                 &tracker->slave_list[slave].res_list[RES_QP];
4665         struct res_qp *qp;
4666         struct res_qp *tmp;
4667         int state;
4668         u64 in_param;
4669         int qpn;
4670         int err;
4671 
4672         err = move_all_busy(dev, slave, RES_QP);
4673         if (err)
4674                 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4675                           slave);
4676 
4677         spin_lock_irq(mlx4_tlock(dev));
4678         list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4679                 spin_unlock_irq(mlx4_tlock(dev));
4680                 if (qp->com.owner == slave) {
4681                         qpn = qp->com.res_id;
4682                         detach_qp(dev, slave, qp);
4683                         state = qp->com.from_state;
4684                         while (state != 0) {
4685                                 switch (state) {
4686                                 case RES_QP_RESERVED:
4687                                         spin_lock_irq(mlx4_tlock(dev));
4688                                         rb_erase(&qp->com.node,
4689                                                  &tracker->res_tree[RES_QP]);
4690                                         list_del(&qp->com.list);
4691                                         spin_unlock_irq(mlx4_tlock(dev));
4692                                         if (!valid_reserved(dev, slave, qpn)) {
4693                                                 __mlx4_qp_release_range(dev, qpn, 1);
4694                                                 mlx4_release_resource(dev, slave,
4695                                                                       RES_QP, 1, 0);
4696                                         }
4697                                         kfree(qp);
4698                                         state = 0;
4699                                         break;
4700                                 case RES_QP_MAPPED:
4701                                         if (!valid_reserved(dev, slave, qpn))
4702                                                 __mlx4_qp_free_icm(dev, qpn);
4703                                         state = RES_QP_RESERVED;
4704                                         break;
4705                                 case RES_QP_HW:
4706                                         in_param = slave;
4707                                         err = mlx4_cmd(dev, in_param,
4708                                                        qp->local_qpn, 2,
4709                                                        MLX4_CMD_2RST_QP,
4710                                                        MLX4_CMD_TIME_CLASS_A,
4711                                                        MLX4_CMD_NATIVE);
4712                                         if (err)
4713                                                 mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4714                                                          slave, qp->local_qpn);
4715                                         atomic_dec(&qp->rcq->ref_count);
4716                                         atomic_dec(&qp->scq->ref_count);
4717                                         atomic_dec(&qp->mtt->ref_count);
4718                                         if (qp->srq)
4719                                                 atomic_dec(&qp->srq->ref_count);
4720                                         state = RES_QP_MAPPED;
4721                                         break;
4722                                 default:
4723                                         state = 0;
4724                                 }
4725                         }
4726                 }
4727                 spin_lock_irq(mlx4_tlock(dev));
4728         }
4729         spin_unlock_irq(mlx4_tlock(dev));
4730 }
4731 
4732 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4733 {
4734         struct mlx4_priv *priv = mlx4_priv(dev);
4735         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4736         struct list_head *srq_list =
4737                 &tracker->slave_list[slave].res_list[RES_SRQ];
4738         struct res_srq *srq;
4739         struct res_srq *tmp;
4740         int state;
4741         u64 in_param;
4742         int srqn;
4743         int err;
4744 
4745         err = move_all_busy(dev, slave, RES_SRQ);
4746         if (err)
4747                 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4748                           slave);
4749 
4750         spin_lock_irq(mlx4_tlock(dev));
4751         list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4752                 spin_unlock_irq(mlx4_tlock(dev));
4753                 if (srq->com.owner == slave) {
4754                         srqn = srq->com.res_id;
4755                         state = srq->com.from_state;
4756                         while (state != 0) {
4757                                 switch (state) {
4758                                 case RES_SRQ_ALLOCATED:
4759                                         __mlx4_srq_free_icm(dev, srqn);
4760                                         spin_lock_irq(mlx4_tlock(dev));
4761                                         rb_erase(&srq->com.node,
4762                                                  &tracker->res_tree[RES_SRQ]);
4763                                         list_del(&srq->com.list);
4764                                         spin_unlock_irq(mlx4_tlock(dev));
4765                                         mlx4_release_resource(dev, slave,
4766                                                               RES_SRQ, 1, 0);
4767                                         kfree(srq);
4768                                         state = 0;
4769                                         break;
4770 
4771                                 case RES_SRQ_HW:
4772                                         in_param = slave;
4773                                         err = mlx4_cmd(dev, in_param, srqn, 1,
4774                                                        MLX4_CMD_HW2SW_SRQ,
4775                                                        MLX4_CMD_TIME_CLASS_A,
4776                                                        MLX4_CMD_NATIVE);
4777                                         if (err)
4778                                                 mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4779                                                          slave, srqn);
4780 
4781                                         atomic_dec(&srq->mtt->ref_count);
4782                                         if (srq->cq)
4783                                                 atomic_dec(&srq->cq->ref_count);
4784                                         state = RES_SRQ_ALLOCATED;
4785                                         break;
4786 
4787                                 default:
4788                                         state = 0;
4789                                 }
4790                         }
4791                 }
4792                 spin_lock_irq(mlx4_tlock(dev));
4793         }
4794         spin_unlock_irq(mlx4_tlock(dev));
4795 }
4796 
4797 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4798 {
4799         struct mlx4_priv *priv = mlx4_priv(dev);
4800         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4801         struct list_head *cq_list =
4802                 &tracker->slave_list[slave].res_list[RES_CQ];
4803         struct res_cq *cq;
4804         struct res_cq *tmp;
4805         int state;
4806         u64 in_param;
4807         int cqn;
4808         int err;
4809 
4810         err = move_all_busy(dev, slave, RES_CQ);
4811         if (err)
4812                 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4813                           slave);
4814 
4815         spin_lock_irq(mlx4_tlock(dev));
4816         list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4817                 spin_unlock_irq(mlx4_tlock(dev));
4818                 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4819                         cqn = cq->com.res_id;
4820                         state = cq->com.from_state;
4821                         while (state != 0) {
4822                                 switch (state) {
4823                                 case RES_CQ_ALLOCATED:
4824                                         __mlx4_cq_free_icm(dev, cqn);
4825                                         spin_lock_irq(mlx4_tlock(dev));
4826                                         rb_erase(&cq->com.node,
4827                                                  &tracker->res_tree[RES_CQ]);
4828                                         list_del(&cq->com.list);
4829                                         spin_unlock_irq(mlx4_tlock(dev));
4830                                         mlx4_release_resource(dev, slave,
4831                                                               RES_CQ, 1, 0);
4832                                         kfree(cq);
4833                                         state = 0;
4834                                         break;
4835 
4836                                 case RES_CQ_HW:
4837                                         in_param = slave;
4838                                         err = mlx4_cmd(dev, in_param, cqn, 1,
4839                                                        MLX4_CMD_HW2SW_CQ,
4840                                                        MLX4_CMD_TIME_CLASS_A,
4841                                                        MLX4_CMD_NATIVE);
4842                                         if (err)
4843                                                 mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4844                                                          slave, cqn);
4845                                         atomic_dec(&cq->mtt->ref_count);
4846                                         state = RES_CQ_ALLOCATED;
4847                                         break;
4848 
4849                                 default:
4850                                         state = 0;
4851                                 }
4852                         }
4853                 }
4854                 spin_lock_irq(mlx4_tlock(dev));
4855         }
4856         spin_unlock_irq(mlx4_tlock(dev));
4857 }
4858 
4859 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4860 {
4861         struct mlx4_priv *priv = mlx4_priv(dev);
4862         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4863         struct list_head *mpt_list =
4864                 &tracker->slave_list[slave].res_list[RES_MPT];
4865         struct res_mpt *mpt;
4866         struct res_mpt *tmp;
4867         int state;
4868         u64 in_param;
4869         int mptn;
4870         int err;
4871 
4872         err = move_all_busy(dev, slave, RES_MPT);
4873         if (err)
4874                 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4875                           slave);
4876 
4877         spin_lock_irq(mlx4_tlock(dev));
4878         list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4879                 spin_unlock_irq(mlx4_tlock(dev));
4880                 if (mpt->com.owner == slave) {
4881                         mptn = mpt->com.res_id;
4882                         state = mpt->com.from_state;
4883                         while (state != 0) {
4884                                 switch (state) {
4885                                 case RES_MPT_RESERVED:
4886                                         __mlx4_mpt_release(dev, mpt->key);
4887                                         spin_lock_irq(mlx4_tlock(dev));
4888                                         rb_erase(&mpt->com.node,
4889                                                  &tracker->res_tree[RES_MPT]);
4890                                         list_del(&mpt->com.list);
4891                                         spin_unlock_irq(mlx4_tlock(dev));
4892                                         mlx4_release_resource(dev, slave,
4893                                                               RES_MPT, 1, 0);
4894                                         kfree(mpt);
4895                                         state = 0;
4896                                         break;
4897 
4898                                 case RES_MPT_MAPPED:
4899                                         __mlx4_mpt_free_icm(dev, mpt->key);
4900                                         state = RES_MPT_RESERVED;
4901                                         break;
4902 
4903                                 case RES_MPT_HW:
4904                                         in_param = slave;
4905                                         err = mlx4_cmd(dev, in_param, mptn, 0,
4906                                                      MLX4_CMD_HW2SW_MPT,
4907                                                      MLX4_CMD_TIME_CLASS_A,
4908                                                      MLX4_CMD_NATIVE);
4909                                         if (err)
4910                                                 mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4911                                                          slave, mptn);
4912                                         if (mpt->mtt)
4913                                                 atomic_dec(&mpt->mtt->ref_count);
4914                                         state = RES_MPT_MAPPED;
4915                                         break;
4916                                 default:
4917                                         state = 0;
4918                                 }
4919                         }
4920                 }
4921                 spin_lock_irq(mlx4_tlock(dev));
4922         }
4923         spin_unlock_irq(mlx4_tlock(dev));
4924 }
4925 
4926 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4927 {
4928         struct mlx4_priv *priv = mlx4_priv(dev);
4929         struct mlx4_resource_tracker *tracker =
4930                 &priv->mfunc.master.res_tracker;
4931         struct list_head *mtt_list =
4932                 &tracker->slave_list[slave].res_list[RES_MTT];
4933         struct res_mtt *mtt;
4934         struct res_mtt *tmp;
4935         int state;
4936         int base;
4937         int err;
4938 
4939         err = move_all_busy(dev, slave, RES_MTT);
4940         if (err)
4941                 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts  - too busy for slave %d\n",
4942                           slave);
4943 
4944         spin_lock_irq(mlx4_tlock(dev));
4945         list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4946                 spin_unlock_irq(mlx4_tlock(dev));
4947                 if (mtt->com.owner == slave) {
4948                         base = mtt->com.res_id;
4949                         state = mtt->com.from_state;
4950                         while (state != 0) {
4951                                 switch (state) {
4952                                 case RES_MTT_ALLOCATED:
4953                                         __mlx4_free_mtt_range(dev, base,
4954                                                               mtt->order);
4955                                         spin_lock_irq(mlx4_tlock(dev));
4956                                         rb_erase(&mtt->com.node,
4957                                                  &tracker->res_tree[RES_MTT]);
4958                                         list_del(&mtt->com.list);
4959                                         spin_unlock_irq(mlx4_tlock(dev));
4960                                         mlx4_release_resource(dev, slave, RES_MTT,
4961                                                               1 << mtt->order, 0);
4962                                         kfree(mtt);
4963                                         state = 0;
4964                                         break;
4965 
4966                                 default:
4967                                         state = 0;
4968                                 }
4969                         }
4970                 }
4971                 spin_lock_irq(mlx4_tlock(dev));
4972         }
4973         spin_unlock_irq(mlx4_tlock(dev));
4974 }
4975 
4976 static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
4977 {
4978         struct mlx4_cmd_mailbox *mailbox;
4979         int err;
4980         struct res_fs_rule *mirr_rule;
4981         u64 reg_id;
4982 
4983         mailbox = mlx4_alloc_cmd_mailbox(dev);
4984         if (IS_ERR(mailbox))
4985                 return PTR_ERR(mailbox);
4986 
4987         if (!fs_rule->mirr_mbox) {
4988                 mlx4_err(dev, "rule mirroring mailbox is null\n");
4989                 return -EINVAL;
4990         }
4991         memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size);
4992         err = mlx4_cmd_imm(dev, mailbox->dma, &reg_id, fs_rule->mirr_mbox_size >> 2, 0,
4993                            MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4994                            MLX4_CMD_NATIVE);
4995         mlx4_free_cmd_mailbox(dev, mailbox);
4996 
4997         if (err)
4998                 goto err;
4999 
5000         err = add_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, fs_rule->qpn);
5001         if (err)
5002                 goto err_detach;
5003 
5004         err = get_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE, &mirr_rule);
5005         if (err)
5006                 goto err_rem;
5007 
5008         fs_rule->mirr_rule_id = reg_id;
5009         mirr_rule->mirr_rule_id = 0;
5010         mirr_rule->mirr_mbox_size = 0;
5011         mirr_rule->mirr_mbox = NULL;
5012         put_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE);
5013 
5014         return 0;
5015 err_rem:
5016         rem_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, 0);
5017 err_detach:
5018         mlx4_cmd(dev, reg_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
5019                  MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
5020 err:
5021         return err;
5022 }
5023 
5024 static int mlx4_mirror_fs_rules(struct mlx4_dev *dev, bool bond)
5025 {
5026         struct mlx4_priv *priv = mlx4_priv(dev);
5027         struct mlx4_resource_tracker *tracker =
5028                 &priv->mfunc.master.res_tracker;
5029         struct rb_root *root = &tracker->res_tree[RES_FS_RULE];
5030         struct rb_node *p;
5031         struct res_fs_rule *fs_rule;
5032         int err = 0;
5033         LIST_HEAD(mirr_list);
5034 
5035         for (p = rb_first(root); p; p = rb_next(p)) {
5036                 fs_rule = rb_entry(p, struct res_fs_rule, com.node);
5037                 if ((bond && fs_rule->mirr_mbox_size) ||
5038                     (!bond && !fs_rule->mirr_mbox_size))
5039                         list_add_tail(&fs_rule->mirr_list, &mirr_list);
5040         }
5041 
5042         list_for_each_entry(fs_rule, &mirr_list, mirr_list) {
5043                 if (bond)
5044                         err += mlx4_do_mirror_rule(dev, fs_rule);
5045                 else
5046                         err += mlx4_undo_mirror_rule(dev, fs_rule);
5047         }
5048         return err;
5049 }
5050 
5051 int mlx4_bond_fs_rules(struct mlx4_dev *dev)
5052 {
5053         return mlx4_mirror_fs_rules(dev, true);
5054 }
5055 
5056 int mlx4_unbond_fs_rules(struct mlx4_dev *dev)
5057 {
5058         return mlx4_mirror_fs_rules(dev, false);
5059 }
5060 
5061 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
5062 {
5063         struct mlx4_priv *priv = mlx4_priv(dev);
5064         struct mlx4_resource_tracker *tracker =
5065                 &priv->mfunc.master.res_tracker;
5066         struct list_head *fs_rule_list =
5067                 &tracker->slave_list[slave].res_list[RES_FS_RULE];
5068         struct res_fs_rule *fs_rule;
5069         struct res_fs_rule *tmp;
5070         int state;
5071         u64 base;
5072         int err;
5073 
5074         err = move_all_busy(dev, slave, RES_FS_RULE);
5075         if (err)
5076                 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
5077                           slave);
5078 
5079         spin_lock_irq(mlx4_tlock(dev));
5080         list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
5081                 spin_unlock_irq(mlx4_tlock(dev));
5082                 if (fs_rule->com.owner == slave) {
5083                         base = fs_rule->com.res_id;
5084                         state = fs_rule->com.from_state;
5085                         while (state != 0) {
5086                                 switch (state) {
5087                                 case RES_FS_RULE_ALLOCATED:
5088                                         /* detach rule */
5089                                         err = mlx4_cmd(dev, base, 0, 0,
5090                                                        MLX4_QP_FLOW_STEERING_DETACH,
5091                                                        MLX4_CMD_TIME_CLASS_A,
5092                                                        MLX4_CMD_NATIVE);
5093 
5094                                         spin_lock_irq(mlx4_tlock(dev));
5095                                         rb_erase(&fs_rule->com.node,
5096                                                  &tracker->res_tree[RES_FS_RULE]);
5097                                         list_del(&fs_rule->com.list);
5098                                         spin_unlock_irq(mlx4_tlock(dev));
5099                                         kfree(fs_rule->mirr_mbox);
5100                                         kfree(fs_rule);
5101                                         state = 0;
5102                                         break;
5103 
5104                                 default:
5105                                         state = 0;
5106                                 }
5107                         }
5108                 }
5109                 spin_lock_irq(mlx4_tlock(dev));
5110         }
5111         spin_unlock_irq(mlx4_tlock(dev));
5112 }
5113 
5114 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
5115 {
5116         struct mlx4_priv *priv = mlx4_priv(dev);
5117         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5118         struct list_head *eq_list =
5119                 &tracker->slave_list[slave].res_list[RES_EQ];
5120         struct res_eq *eq;
5121         struct res_eq *tmp;
5122         int err;
5123         int state;
5124         int eqn;
5125 
5126         err = move_all_busy(dev, slave, RES_EQ);
5127         if (err)
5128                 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
5129                           slave);
5130 
5131         spin_lock_irq(mlx4_tlock(dev));
5132         list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
5133                 spin_unlock_irq(mlx4_tlock(dev));
5134                 if (eq->com.owner == slave) {
5135                         eqn = eq->com.res_id;
5136                         state = eq->com.from_state;
5137                         while (state != 0) {
5138                                 switch (state) {
5139                                 case RES_EQ_RESERVED:
5140                                         spin_lock_irq(mlx4_tlock(dev));
5141                                         rb_erase(&eq->com.node,
5142                                                  &tracker->res_tree[RES_EQ]);
5143                                         list_del(&eq->com.list);
5144                                         spin_unlock_irq(mlx4_tlock(dev));
5145                                         kfree(eq);
5146                                         state = 0;
5147                                         break;
5148 
5149                                 case RES_EQ_HW:
5150                                         err = mlx4_cmd(dev, slave, eqn & 0x3ff,
5151                                                        1, MLX4_CMD_HW2SW_EQ,
5152                                                        MLX4_CMD_TIME_CLASS_A,
5153                                                        MLX4_CMD_NATIVE);
5154                                         if (err)
5155                                                 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
5156                                                          slave, eqn & 0x3ff);
5157                                         atomic_dec(&eq->mtt->ref_count);
5158                                         state = RES_EQ_RESERVED;
5159                                         break;
5160 
5161                                 default:
5162                                         state = 0;
5163                                 }
5164                         }
5165                 }
5166                 spin_lock_irq(mlx4_tlock(dev));
5167         }
5168         spin_unlock_irq(mlx4_tlock(dev));
5169 }
5170 
5171 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
5172 {
5173         struct mlx4_priv *priv = mlx4_priv(dev);
5174         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5175         struct list_head *counter_list =
5176                 &tracker->slave_list[slave].res_list[RES_COUNTER];
5177         struct res_counter *counter;
5178         struct res_counter *tmp;
5179         int err;
5180         int *counters_arr = NULL;
5181         int i, j;
5182 
5183         err = move_all_busy(dev, slave, RES_COUNTER);
5184         if (err)
5185                 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
5186                           slave);
5187 
5188         counters_arr = kmalloc_array(dev->caps.max_counters,
5189                                      sizeof(*counters_arr), GFP_KERNEL);
5190         if (!counters_arr)
5191                 return;
5192 
5193         do {
5194                 i = 0;
5195                 j = 0;
5196                 spin_lock_irq(mlx4_tlock(dev));
5197                 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
5198                         if (counter->com.owner == slave) {
5199                                 counters_arr[i++] = counter->com.res_id;
5200                                 rb_erase(&counter->com.node,
5201                                          &tracker->res_tree[RES_COUNTER]);
5202                                 list_del(&counter->com.list);
5203                                 kfree(counter);
5204                         }
5205                 }
5206                 spin_unlock_irq(mlx4_tlock(dev));
5207 
5208                 while (j < i) {
5209                         __mlx4_counter_free(dev, counters_arr[j++]);
5210                         mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
5211                 }
5212         } while (i);
5213 
5214         kfree(counters_arr);
5215 }
5216 
5217 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
5218 {
5219         struct mlx4_priv *priv = mlx4_priv(dev);
5220         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5221         struct list_head *xrcdn_list =
5222                 &tracker->slave_list[slave].res_list[RES_XRCD];
5223         struct res_xrcdn *xrcd;
5224         struct res_xrcdn *tmp;
5225         int err;
5226         int xrcdn;
5227 
5228         err = move_all_busy(dev, slave, RES_XRCD);
5229         if (err)
5230                 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
5231                           slave);
5232 
5233         spin_lock_irq(mlx4_tlock(dev));
5234         list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
5235                 if (xrcd->com.owner == slave) {
5236                         xrcdn = xrcd->com.res_id;
5237                         rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
5238                         list_del(&xrcd->com.list);
5239                         kfree(xrcd);
5240                         __mlx4_xrcd_free(dev, xrcdn);
5241                 }
5242         }
5243         spin_unlock_irq(mlx4_tlock(dev));
5244 }
5245 
5246 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
5247 {
5248         struct mlx4_priv *priv = mlx4_priv(dev);
5249         mlx4_reset_roce_gids(dev, slave);
5250         mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5251         rem_slave_vlans(dev, slave);
5252         rem_slave_macs(dev, slave);
5253         rem_slave_fs_rule(dev, slave);
5254         rem_slave_qps(dev, slave);
5255         rem_slave_srqs(dev, slave);
5256         rem_slave_cqs(dev, slave);
5257         rem_slave_mrs(dev, slave);
5258         rem_slave_eqs(dev, slave);
5259         rem_slave_mtts(dev, slave);
5260         rem_slave_counters(dev, slave);
5261         rem_slave_xrcdns(dev, slave);
5262         mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5263 }
5264 
5265 static void update_qos_vpp(struct mlx4_update_qp_context *ctx,
5266                            struct mlx4_vf_immed_vlan_work *work)
5267 {
5268         ctx->qp_mask |= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_QOS_VPP);
5269         ctx->qp_context.qos_vport = work->qos_vport;
5270 }
5271 
5272 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
5273 {
5274         struct mlx4_vf_immed_vlan_work *work =
5275                 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
5276         struct mlx4_cmd_mailbox *mailbox;
5277         struct mlx4_update_qp_context *upd_context;
5278         struct mlx4_dev *dev = &work->priv->dev;
5279         struct mlx4_resource_tracker *tracker =
5280                 &work->priv->mfunc.master.res_tracker;
5281         struct list_head *qp_list =
5282                 &tracker->slave_list[work->slave].res_list[RES_QP];
5283         struct res_qp *qp;
5284         struct res_qp *tmp;
5285         u64 qp_path_mask_vlan_ctrl =
5286                        ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
5287                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
5288                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
5289                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
5290                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
5291                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
5292 
5293         u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
5294                        (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
5295                        (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
5296                        (1ULL << MLX4_UPD_QP_PATH_MASK_SV) |
5297                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
5298                        (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
5299                        (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
5300                        (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
5301 
5302         int err;
5303         int port, errors = 0;
5304         u8 vlan_control;
5305 
5306         if (mlx4_is_slave(dev)) {
5307                 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
5308                           work->slave);
5309                 goto out;
5310         }
5311 
5312         mailbox = mlx4_alloc_cmd_mailbox(dev);
5313         if (IS_ERR(mailbox))
5314                 goto out;
5315         if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
5316                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5317                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
5318                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
5319                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5320                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
5321                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5322         else if (!work->vlan_id)
5323                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5324                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5325         else if (work->vlan_proto == htons(ETH_P_8021AD))
5326                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
5327                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5328                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5329                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
5330         else  /* vst 802.1Q */
5331                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5332                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5333                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
5334 
5335         upd_context = mailbox->buf;
5336         upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
5337 
5338         spin_lock_irq(mlx4_tlock(dev));
5339         list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
5340                 spin_unlock_irq(mlx4_tlock(dev));
5341                 if (qp->com.owner == work->slave) {
5342                         if (qp->com.from_state != RES_QP_HW ||
5343                             !qp->sched_queue ||  /* no INIT2RTR trans yet */
5344                             mlx4_is_qp_reserved(dev, qp->local_qpn) ||
5345                             qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
5346                                 spin_lock_irq(mlx4_tlock(dev));
5347                                 continue;
5348                         }
5349                         port = (qp->sched_queue >> 6 & 1) + 1;
5350                         if (port != work->port) {
5351                                 spin_lock_irq(mlx4_tlock(dev));
5352                                 continue;
5353                         }
5354                         if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
5355                                 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
5356                         else
5357                                 upd_context->primary_addr_path_mask =
5358                                         cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
5359                         if (work->vlan_id == MLX4_VGT) {
5360                                 upd_context->qp_context.param3 = qp->param3;
5361                                 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
5362                                 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
5363                                 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
5364                                 upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
5365                                 upd_context->qp_context.pri_path.feup = qp->feup;
5366                                 upd_context->qp_context.pri_path.sched_queue =
5367                                         qp->sched_queue;
5368                         } else {
5369                                 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
5370                                 upd_context->qp_context.pri_path.vlan_control = vlan_control;
5371                                 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
5372                                 upd_context->qp_context.pri_path.fvl_rx =
5373                                         qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
5374                                 upd_context->qp_context.pri_path.fl =
5375                                         qp->pri_path_fl | MLX4_FL_ETH_HIDE_CQE_VLAN;
5376                                 if (work->vlan_proto == htons(ETH_P_8021AD))
5377                                         upd_context->qp_context.pri_path.fl |= MLX4_FL_SV;
5378                                 else
5379                                         upd_context->qp_context.pri_path.fl |= MLX4_FL_CV;
5380                                 upd_context->qp_context.pri_path.feup =
5381                                         qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
5382                                 upd_context->qp_context.pri_path.sched_queue =
5383                                         qp->sched_queue & 0xC7;
5384                                 upd_context->qp_context.pri_path.sched_queue |=
5385                                         ((work->qos & 0x7) << 3);
5386 
5387                                 if (dev->caps.flags2 &
5388                                     MLX4_DEV_CAP_FLAG2_QOS_VPP)
5389                                         update_qos_vpp(upd_context, work);
5390                         }
5391 
5392                         err = mlx4_cmd(dev, mailbox->dma,
5393                                        qp->local_qpn & 0xffffff,
5394                                        0, MLX4_CMD_UPDATE_QP,
5395                                        MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
5396                         if (err) {
5397                                 mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
5398                                           work->slave, port, qp->local_qpn, err);
5399                                 errors++;
5400                         }
5401                 }
5402                 spin_lock_irq(mlx4_tlock(dev));
5403         }
5404         spin_unlock_irq(mlx4_tlock(dev));
5405         mlx4_free_cmd_mailbox(dev, mailbox);
5406 
5407         if (errors)
5408                 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
5409                          errors, work->slave, work->port);
5410 
5411         /* unregister previous vlan_id if needed and we had no errors
5412          * while updating the QPs
5413          */
5414         if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
5415             NO_INDX != work->orig_vlan_ix)
5416                 __mlx4_unregister_vlan(&work->priv->dev, work->port,
5417                                        work->orig_vlan_id);
5418 out:
5419         kfree(work);
5420         return;
5421 }

/* [<][>][^][v][top][bottom][index][help] */