root/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. dr_rule_append_to_miss_list
  2. dr_rule_create_collision_htbl
  3. dr_rule_create_collision_entry
  4. dr_rule_handle_one_ste_in_update_list
  5. dr_rule_send_update_list
  6. dr_rule_find_ste_in_miss_list
  7. dr_rule_rehash_handle_collision
  8. dr_rule_rehash_copy_ste_ctrl
  9. dr_rule_rehash_copy_ste
  10. dr_rule_rehash_copy_miss_list
  11. dr_rule_rehash_copy_htbl
  12. dr_rule_rehash_htbl
  13. dr_rule_rehash
  14. dr_rule_handle_collision
  15. dr_rule_remove_action_members
  16. dr_rule_add_action_members
  17. mlx5dr_rule_update_rule_member
  18. dr_rule_clean_rule_members
  19. dr_get_bits_per_mask
  20. dr_rule_need_enlarge_hash
  21. dr_rule_add_member
  22. dr_rule_handle_action_stes
  23. dr_rule_handle_empty_entry
  24. dr_rule_handle_ste_branch
  25. dr_rule_cmp_value_to_mask
  26. dr_rule_verify
  27. dr_rule_destroy_rule_nic
  28. dr_rule_destroy_rule_fdb
  29. dr_rule_destroy_rule
  30. dr_rule_is_ipv6
  31. dr_rule_skip
  32. dr_rule_create_rule_nic
  33. dr_rule_create_rule_fdb
  34. dr_rule_create_rule
  35. mlx5dr_rule_create
  36. mlx5dr_rule_destroy

   1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
   2 /* Copyright (c) 2019 Mellanox Technologies. */
   3 
   4 #include "dr_types.h"
   5 
   6 #define DR_RULE_MAX_STE_CHAIN (DR_RULE_MAX_STES + DR_ACTION_MAX_STES)
   7 
   8 struct mlx5dr_rule_action_member {
   9         struct mlx5dr_action *action;
  10         struct list_head list;
  11 };
  12 
  13 static int dr_rule_append_to_miss_list(struct mlx5dr_ste *new_last_ste,
  14                                        struct list_head *miss_list,
  15                                        struct list_head *send_list)
  16 {
  17         struct mlx5dr_ste_send_info *ste_info_last;
  18         struct mlx5dr_ste *last_ste;
  19 
  20         /* The new entry will be inserted after the last */
  21         last_ste = list_last_entry(miss_list, struct mlx5dr_ste, miss_list_node);
  22         WARN_ON(!last_ste);
  23 
  24         ste_info_last = kzalloc(sizeof(*ste_info_last), GFP_KERNEL);
  25         if (!ste_info_last)
  26                 return -ENOMEM;
  27 
  28         mlx5dr_ste_set_miss_addr(last_ste->hw_ste,
  29                                  mlx5dr_ste_get_icm_addr(new_last_ste));
  30         list_add_tail(&new_last_ste->miss_list_node, miss_list);
  31 
  32         mlx5dr_send_fill_and_append_ste_send_info(last_ste, DR_STE_SIZE_REDUCED,
  33                                                   0, last_ste->hw_ste,
  34                                                   ste_info_last, send_list, true);
  35 
  36         return 0;
  37 }
  38 
  39 static struct mlx5dr_ste *
  40 dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
  41                               struct mlx5dr_matcher_rx_tx *nic_matcher,
  42                               u8 *hw_ste)
  43 {
  44         struct mlx5dr_domain *dmn = matcher->tbl->dmn;
  45         struct mlx5dr_ste_htbl *new_htbl;
  46         struct mlx5dr_ste *ste;
  47 
  48         /* Create new table for miss entry */
  49         new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
  50                                          DR_CHUNK_SIZE_1,
  51                                          MLX5DR_STE_LU_TYPE_DONT_CARE,
  52                                          0);
  53         if (!new_htbl) {
  54                 mlx5dr_dbg(dmn, "Failed allocating collision table\n");
  55                 return NULL;
  56         }
  57 
  58         /* One and only entry, never grows */
  59         ste = new_htbl->ste_arr;
  60         mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr);
  61         mlx5dr_htbl_get(new_htbl);
  62 
  63         return ste;
  64 }
  65 
  66 static struct mlx5dr_ste *
  67 dr_rule_create_collision_entry(struct mlx5dr_matcher *matcher,
  68                                struct mlx5dr_matcher_rx_tx *nic_matcher,
  69                                u8 *hw_ste,
  70                                struct mlx5dr_ste *orig_ste)
  71 {
  72         struct mlx5dr_ste *ste;
  73 
  74         ste = dr_rule_create_collision_htbl(matcher, nic_matcher, hw_ste);
  75         if (!ste) {
  76                 mlx5dr_dbg(matcher->tbl->dmn, "Failed creating collision entry\n");
  77                 return NULL;
  78         }
  79 
  80         ste->ste_chain_location = orig_ste->ste_chain_location;
  81 
  82         /* In collision entry, all members share the same miss_list_head */
  83         ste->htbl->miss_list = mlx5dr_ste_get_miss_list(orig_ste);
  84 
  85         /* Next table */
  86         if (mlx5dr_ste_create_next_htbl(matcher, nic_matcher, ste, hw_ste,
  87                                         DR_CHUNK_SIZE_1)) {
  88                 mlx5dr_dbg(matcher->tbl->dmn, "Failed allocating table\n");
  89                 goto free_tbl;
  90         }
  91 
  92         return ste;
  93 
  94 free_tbl:
  95         mlx5dr_ste_free(ste, matcher, nic_matcher);
  96         return NULL;
  97 }
  98 
  99 static int
 100 dr_rule_handle_one_ste_in_update_list(struct mlx5dr_ste_send_info *ste_info,
 101                                       struct mlx5dr_domain *dmn)
 102 {
 103         int ret;
 104 
 105         list_del(&ste_info->send_list);
 106         ret = mlx5dr_send_postsend_ste(dmn, ste_info->ste, ste_info->data,
 107                                        ste_info->size, ste_info->offset);
 108         if (ret)
 109                 goto out;
 110         /* Copy data to ste, only reduced size, the last 16B (mask)
 111          * is already written to the hw.
 112          */
 113         memcpy(ste_info->ste->hw_ste, ste_info->data, DR_STE_SIZE_REDUCED);
 114 
 115 out:
 116         kfree(ste_info);
 117         return ret;
 118 }
 119 
 120 static int dr_rule_send_update_list(struct list_head *send_ste_list,
 121                                     struct mlx5dr_domain *dmn,
 122                                     bool is_reverse)
 123 {
 124         struct mlx5dr_ste_send_info *ste_info, *tmp_ste_info;
 125         int ret;
 126 
 127         if (is_reverse) {
 128                 list_for_each_entry_safe_reverse(ste_info, tmp_ste_info,
 129                                                  send_ste_list, send_list) {
 130                         ret = dr_rule_handle_one_ste_in_update_list(ste_info,
 131                                                                     dmn);
 132                         if (ret)
 133                                 return ret;
 134                 }
 135         } else {
 136                 list_for_each_entry_safe(ste_info, tmp_ste_info,
 137                                          send_ste_list, send_list) {
 138                         ret = dr_rule_handle_one_ste_in_update_list(ste_info,
 139                                                                     dmn);
 140                         if (ret)
 141                                 return ret;
 142                 }
 143         }
 144 
 145         return 0;
 146 }
 147 
 148 static struct mlx5dr_ste *
 149 dr_rule_find_ste_in_miss_list(struct list_head *miss_list, u8 *hw_ste)
 150 {
 151         struct mlx5dr_ste *ste;
 152 
 153         if (list_empty(miss_list))
 154                 return NULL;
 155 
 156         /* Check if hw_ste is present in the list */
 157         list_for_each_entry(ste, miss_list, miss_list_node) {
 158                 if (mlx5dr_ste_equal_tag(ste->hw_ste, hw_ste))
 159                         return ste;
 160         }
 161 
 162         return NULL;
 163 }
 164 
 165 static struct mlx5dr_ste *
 166 dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
 167                                 struct mlx5dr_matcher_rx_tx *nic_matcher,
 168                                 struct list_head *update_list,
 169                                 struct mlx5dr_ste *col_ste,
 170                                 u8 *hw_ste)
 171 {
 172         struct mlx5dr_ste *new_ste;
 173         int ret;
 174 
 175         new_ste = dr_rule_create_collision_htbl(matcher, nic_matcher, hw_ste);
 176         if (!new_ste)
 177                 return NULL;
 178 
 179         /* In collision entry, all members share the same miss_list_head */
 180         new_ste->htbl->miss_list = mlx5dr_ste_get_miss_list(col_ste);
 181 
 182         /* Update the previous from the list */
 183         ret = dr_rule_append_to_miss_list(new_ste,
 184                                           mlx5dr_ste_get_miss_list(col_ste),
 185                                           update_list);
 186         if (ret) {
 187                 mlx5dr_dbg(matcher->tbl->dmn, "Failed update dup entry\n");
 188                 goto err_exit;
 189         }
 190 
 191         return new_ste;
 192 
 193 err_exit:
 194         mlx5dr_ste_free(new_ste, matcher, nic_matcher);
 195         return NULL;
 196 }
 197 
 198 static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher,
 199                                          struct mlx5dr_matcher_rx_tx *nic_matcher,
 200                                          struct mlx5dr_ste *cur_ste,
 201                                          struct mlx5dr_ste *new_ste)
 202 {
 203         new_ste->next_htbl = cur_ste->next_htbl;
 204         new_ste->ste_chain_location = cur_ste->ste_chain_location;
 205 
 206         if (!mlx5dr_ste_is_last_in_rule(nic_matcher, new_ste->ste_chain_location))
 207                 new_ste->next_htbl->pointing_ste = new_ste;
 208 
 209         /* We need to copy the refcount since this ste
 210          * may have been traversed several times
 211          */
 212         new_ste->refcount = cur_ste->refcount;
 213 
 214         /* Link old STEs rule_mem list to the new ste */
 215         mlx5dr_rule_update_rule_member(cur_ste, new_ste);
 216         INIT_LIST_HEAD(&new_ste->rule_list);
 217         list_splice_tail_init(&cur_ste->rule_list, &new_ste->rule_list);
 218 }
 219 
 220 static struct mlx5dr_ste *
 221 dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
 222                         struct mlx5dr_matcher_rx_tx *nic_matcher,
 223                         struct mlx5dr_ste *cur_ste,
 224                         struct mlx5dr_ste_htbl *new_htbl,
 225                         struct list_head *update_list)
 226 {
 227         struct mlx5dr_ste_send_info *ste_info;
 228         bool use_update_list = false;
 229         u8 hw_ste[DR_STE_SIZE] = {};
 230         struct mlx5dr_ste *new_ste;
 231         int new_idx;
 232         u8 sb_idx;
 233 
 234         /* Copy STE mask from the matcher */
 235         sb_idx = cur_ste->ste_chain_location - 1;
 236         mlx5dr_ste_set_bit_mask(hw_ste, nic_matcher->ste_builder[sb_idx].bit_mask);
 237 
 238         /* Copy STE control and tag */
 239         memcpy(hw_ste, cur_ste->hw_ste, DR_STE_SIZE_REDUCED);
 240         mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr);
 241 
 242         new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl);
 243         new_ste = &new_htbl->ste_arr[new_idx];
 244 
 245         if (mlx5dr_ste_not_used_ste(new_ste)) {
 246                 mlx5dr_htbl_get(new_htbl);
 247                 list_add_tail(&new_ste->miss_list_node,
 248                               mlx5dr_ste_get_miss_list(new_ste));
 249         } else {
 250                 new_ste = dr_rule_rehash_handle_collision(matcher,
 251                                                           nic_matcher,
 252                                                           update_list,
 253                                                           new_ste,
 254                                                           hw_ste);
 255                 if (!new_ste) {
 256                         mlx5dr_dbg(matcher->tbl->dmn, "Failed adding collision entry, index: %d\n",
 257                                    new_idx);
 258                         return NULL;
 259                 }
 260                 new_htbl->ctrl.num_of_collisions++;
 261                 use_update_list = true;
 262         }
 263 
 264         memcpy(new_ste->hw_ste, hw_ste, DR_STE_SIZE_REDUCED);
 265 
 266         new_htbl->ctrl.num_of_valid_entries++;
 267 
 268         if (use_update_list) {
 269                 ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
 270                 if (!ste_info)
 271                         goto err_exit;
 272 
 273                 mlx5dr_send_fill_and_append_ste_send_info(new_ste, DR_STE_SIZE, 0,
 274                                                           hw_ste, ste_info,
 275                                                           update_list, true);
 276         }
 277 
 278         dr_rule_rehash_copy_ste_ctrl(matcher, nic_matcher, cur_ste, new_ste);
 279 
 280         return new_ste;
 281 
 282 err_exit:
 283         mlx5dr_ste_free(new_ste, matcher, nic_matcher);
 284         return NULL;
 285 }
 286 
 287 static int dr_rule_rehash_copy_miss_list(struct mlx5dr_matcher *matcher,
 288                                          struct mlx5dr_matcher_rx_tx *nic_matcher,
 289                                          struct list_head *cur_miss_list,
 290                                          struct mlx5dr_ste_htbl *new_htbl,
 291                                          struct list_head *update_list)
 292 {
 293         struct mlx5dr_ste *tmp_ste, *cur_ste, *new_ste;
 294 
 295         if (list_empty(cur_miss_list))
 296                 return 0;
 297 
 298         list_for_each_entry_safe(cur_ste, tmp_ste, cur_miss_list, miss_list_node) {
 299                 new_ste = dr_rule_rehash_copy_ste(matcher,
 300                                                   nic_matcher,
 301                                                   cur_ste,
 302                                                   new_htbl,
 303                                                   update_list);
 304                 if (!new_ste)
 305                         goto err_insert;
 306 
 307                 list_del(&cur_ste->miss_list_node);
 308                 mlx5dr_htbl_put(cur_ste->htbl);
 309         }
 310         return 0;
 311 
 312 err_insert:
 313         mlx5dr_err(matcher->tbl->dmn, "Fatal error during resize\n");
 314         WARN_ON(true);
 315         return -EINVAL;
 316 }
 317 
 318 static int dr_rule_rehash_copy_htbl(struct mlx5dr_matcher *matcher,
 319                                     struct mlx5dr_matcher_rx_tx *nic_matcher,
 320                                     struct mlx5dr_ste_htbl *cur_htbl,
 321                                     struct mlx5dr_ste_htbl *new_htbl,
 322                                     struct list_head *update_list)
 323 {
 324         struct mlx5dr_ste *cur_ste;
 325         int cur_entries;
 326         int err = 0;
 327         int i;
 328 
 329         cur_entries = mlx5dr_icm_pool_chunk_size_to_entries(cur_htbl->chunk_size);
 330 
 331         if (cur_entries < 1) {
 332                 mlx5dr_dbg(matcher->tbl->dmn, "Invalid number of entries\n");
 333                 return -EINVAL;
 334         }
 335 
 336         for (i = 0; i < cur_entries; i++) {
 337                 cur_ste = &cur_htbl->ste_arr[i];
 338                 if (mlx5dr_ste_not_used_ste(cur_ste)) /* Empty, nothing to copy */
 339                         continue;
 340 
 341                 err = dr_rule_rehash_copy_miss_list(matcher,
 342                                                     nic_matcher,
 343                                                     mlx5dr_ste_get_miss_list(cur_ste),
 344                                                     new_htbl,
 345                                                     update_list);
 346                 if (err)
 347                         goto clean_copy;
 348         }
 349 
 350 clean_copy:
 351         return err;
 352 }
 353 
 354 static struct mlx5dr_ste_htbl *
 355 dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
 356                     struct mlx5dr_rule_rx_tx *nic_rule,
 357                     struct mlx5dr_ste_htbl *cur_htbl,
 358                     u8 ste_location,
 359                     struct list_head *update_list,
 360                     enum mlx5dr_icm_chunk_size new_size)
 361 {
 362         struct mlx5dr_ste_send_info *del_ste_info, *tmp_ste_info;
 363         struct mlx5dr_matcher *matcher = rule->matcher;
 364         struct mlx5dr_domain *dmn = matcher->tbl->dmn;
 365         struct mlx5dr_matcher_rx_tx *nic_matcher;
 366         struct mlx5dr_ste_send_info *ste_info;
 367         struct mlx5dr_htbl_connect_info info;
 368         struct mlx5dr_domain_rx_tx *nic_dmn;
 369         u8 formatted_ste[DR_STE_SIZE] = {};
 370         LIST_HEAD(rehash_table_send_list);
 371         struct mlx5dr_ste *ste_to_update;
 372         struct mlx5dr_ste_htbl *new_htbl;
 373         int err;
 374 
 375         nic_matcher = nic_rule->nic_matcher;
 376         nic_dmn = nic_matcher->nic_tbl->nic_dmn;
 377 
 378         ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
 379         if (!ste_info)
 380                 return NULL;
 381 
 382         new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
 383                                          new_size,
 384                                          cur_htbl->lu_type,
 385                                          cur_htbl->byte_mask);
 386         if (!new_htbl) {
 387                 mlx5dr_err(dmn, "Failed to allocate new hash table\n");
 388                 goto free_ste_info;
 389         }
 390 
 391         /* Write new table to HW */
 392         info.type = CONNECT_MISS;
 393         info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
 394         mlx5dr_ste_set_formatted_ste(dmn->info.caps.gvmi,
 395                                      nic_dmn,
 396                                      new_htbl,
 397                                      formatted_ste,
 398                                      &info);
 399 
 400         new_htbl->pointing_ste = cur_htbl->pointing_ste;
 401         new_htbl->pointing_ste->next_htbl = new_htbl;
 402         err = dr_rule_rehash_copy_htbl(matcher,
 403                                        nic_matcher,
 404                                        cur_htbl,
 405                                        new_htbl,
 406                                        &rehash_table_send_list);
 407         if (err)
 408                 goto free_new_htbl;
 409 
 410         if (mlx5dr_send_postsend_htbl(dmn, new_htbl, formatted_ste,
 411                                       nic_matcher->ste_builder[ste_location - 1].bit_mask)) {
 412                 mlx5dr_err(dmn, "Failed writing table to HW\n");
 413                 goto free_new_htbl;
 414         }
 415 
 416         /* Writing to the hw is done in regular order of rehash_table_send_list,
 417          * in order to have the origin data written before the miss address of
 418          * collision entries, if exists.
 419          */
 420         if (dr_rule_send_update_list(&rehash_table_send_list, dmn, false)) {
 421                 mlx5dr_err(dmn, "Failed updating table to HW\n");
 422                 goto free_ste_list;
 423         }
 424 
 425         /* Connect previous hash table to current */
 426         if (ste_location == 1) {
 427                 /* The previous table is an anchor, anchors size is always one STE */
 428                 struct mlx5dr_ste_htbl *prev_htbl = cur_htbl->pointing_ste->htbl;
 429 
 430                 /* On matcher s_anchor we keep an extra refcount */
 431                 mlx5dr_htbl_get(new_htbl);
 432                 mlx5dr_htbl_put(cur_htbl);
 433 
 434                 nic_matcher->s_htbl = new_htbl;
 435 
 436                 /* It is safe to operate dr_ste_set_hit_addr on the hw_ste here
 437                  * (48B len) which works only on first 32B
 438                  */
 439                 mlx5dr_ste_set_hit_addr(prev_htbl->ste_arr[0].hw_ste,
 440                                         new_htbl->chunk->icm_addr,
 441                                         new_htbl->chunk->num_of_entries);
 442 
 443                 ste_to_update = &prev_htbl->ste_arr[0];
 444         } else {
 445                 mlx5dr_ste_set_hit_addr_by_next_htbl(cur_htbl->pointing_ste->hw_ste,
 446                                                      new_htbl);
 447                 ste_to_update = cur_htbl->pointing_ste;
 448         }
 449 
 450         mlx5dr_send_fill_and_append_ste_send_info(ste_to_update, DR_STE_SIZE_REDUCED,
 451                                                   0, ste_to_update->hw_ste, ste_info,
 452                                                   update_list, false);
 453 
 454         return new_htbl;
 455 
 456 free_ste_list:
 457         /* Clean all ste_info's from the new table */
 458         list_for_each_entry_safe(del_ste_info, tmp_ste_info,
 459                                  &rehash_table_send_list, send_list) {
 460                 list_del(&del_ste_info->send_list);
 461                 kfree(del_ste_info);
 462         }
 463 
 464 free_new_htbl:
 465         mlx5dr_ste_htbl_free(new_htbl);
 466 free_ste_info:
 467         kfree(ste_info);
 468         mlx5dr_info(dmn, "Failed creating rehash table\n");
 469         return NULL;
 470 }
 471 
 472 static struct mlx5dr_ste_htbl *dr_rule_rehash(struct mlx5dr_rule *rule,
 473                                               struct mlx5dr_rule_rx_tx *nic_rule,
 474                                               struct mlx5dr_ste_htbl *cur_htbl,
 475                                               u8 ste_location,
 476                                               struct list_head *update_list)
 477 {
 478         struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
 479         enum mlx5dr_icm_chunk_size new_size;
 480 
 481         new_size = mlx5dr_icm_next_higher_chunk(cur_htbl->chunk_size);
 482         new_size = min_t(u32, new_size, dmn->info.max_log_sw_icm_sz);
 483 
 484         if (new_size == cur_htbl->chunk_size)
 485                 return NULL; /* Skip rehash, we already at the max size */
 486 
 487         return dr_rule_rehash_htbl(rule, nic_rule, cur_htbl, ste_location,
 488                                    update_list, new_size);
 489 }
 490 
 491 static struct mlx5dr_ste *
 492 dr_rule_handle_collision(struct mlx5dr_matcher *matcher,
 493                          struct mlx5dr_matcher_rx_tx *nic_matcher,
 494                          struct mlx5dr_ste *ste,
 495                          u8 *hw_ste,
 496                          struct list_head *miss_list,
 497                          struct list_head *send_list)
 498 {
 499         struct mlx5dr_ste_send_info *ste_info;
 500         struct mlx5dr_ste *new_ste;
 501 
 502         ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
 503         if (!ste_info)
 504                 return NULL;
 505 
 506         new_ste = dr_rule_create_collision_entry(matcher, nic_matcher, hw_ste, ste);
 507         if (!new_ste)
 508                 goto free_send_info;
 509 
 510         if (dr_rule_append_to_miss_list(new_ste, miss_list, send_list)) {
 511                 mlx5dr_dbg(matcher->tbl->dmn, "Failed to update prev miss_list\n");
 512                 goto err_exit;
 513         }
 514 
 515         mlx5dr_send_fill_and_append_ste_send_info(new_ste, DR_STE_SIZE, 0, hw_ste,
 516                                                   ste_info, send_list, false);
 517 
 518         ste->htbl->ctrl.num_of_collisions++;
 519         ste->htbl->ctrl.num_of_valid_entries++;
 520 
 521         return new_ste;
 522 
 523 err_exit:
 524         mlx5dr_ste_free(new_ste, matcher, nic_matcher);
 525 free_send_info:
 526         kfree(ste_info);
 527         return NULL;
 528 }
 529 
 530 static void dr_rule_remove_action_members(struct mlx5dr_rule *rule)
 531 {
 532         struct mlx5dr_rule_action_member *action_mem;
 533         struct mlx5dr_rule_action_member *tmp;
 534 
 535         list_for_each_entry_safe(action_mem, tmp, &rule->rule_actions_list, list) {
 536                 list_del(&action_mem->list);
 537                 refcount_dec(&action_mem->action->refcount);
 538                 kvfree(action_mem);
 539         }
 540 }
 541 
 542 static int dr_rule_add_action_members(struct mlx5dr_rule *rule,
 543                                       size_t num_actions,
 544                                       struct mlx5dr_action *actions[])
 545 {
 546         struct mlx5dr_rule_action_member *action_mem;
 547         int i;
 548 
 549         for (i = 0; i < num_actions; i++) {
 550                 action_mem = kvzalloc(sizeof(*action_mem), GFP_KERNEL);
 551                 if (!action_mem)
 552                         goto free_action_members;
 553 
 554                 action_mem->action = actions[i];
 555                 INIT_LIST_HEAD(&action_mem->list);
 556                 list_add_tail(&action_mem->list, &rule->rule_actions_list);
 557                 refcount_inc(&action_mem->action->refcount);
 558         }
 559 
 560         return 0;
 561 
 562 free_action_members:
 563         dr_rule_remove_action_members(rule);
 564         return -ENOMEM;
 565 }
 566 
 567 /* While the pointer of ste is no longer valid, like while moving ste to be
 568  * the first in the miss_list, and to be in the origin table,
 569  * all rule-members that are attached to this ste should update their ste member
 570  * to the new pointer
 571  */
 572 void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *ste,
 573                                     struct mlx5dr_ste *new_ste)
 574 {
 575         struct mlx5dr_rule_member *rule_mem;
 576 
 577         if (!list_empty(&ste->rule_list))
 578                 list_for_each_entry(rule_mem, &ste->rule_list, use_ste_list)
 579                         rule_mem->ste = new_ste;
 580 }
 581 
 582 static void dr_rule_clean_rule_members(struct mlx5dr_rule *rule,
 583                                        struct mlx5dr_rule_rx_tx *nic_rule)
 584 {
 585         struct mlx5dr_rule_member *rule_mem;
 586         struct mlx5dr_rule_member *tmp_mem;
 587 
 588         if (list_empty(&nic_rule->rule_members_list))
 589                 return;
 590         list_for_each_entry_safe(rule_mem, tmp_mem, &nic_rule->rule_members_list, list) {
 591                 list_del(&rule_mem->list);
 592                 list_del(&rule_mem->use_ste_list);
 593                 mlx5dr_ste_put(rule_mem->ste, rule->matcher, nic_rule->nic_matcher);
 594                 kvfree(rule_mem);
 595         }
 596 }
 597 
 598 static u16 dr_get_bits_per_mask(u16 byte_mask)
 599 {
 600         u16 bits = 0;
 601 
 602         while (byte_mask) {
 603                 byte_mask = byte_mask & (byte_mask - 1);
 604                 bits++;
 605         }
 606 
 607         return bits;
 608 }
 609 
 610 static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
 611                                       struct mlx5dr_domain *dmn,
 612                                       struct mlx5dr_domain_rx_tx *nic_dmn)
 613 {
 614         struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl;
 615 
 616         if (dmn->info.max_log_sw_icm_sz <= htbl->chunk_size)
 617                 return false;
 618 
 619         if (!ctrl->may_grow)
 620                 return false;
 621 
 622         if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk_size)
 623                 return false;
 624 
 625         if (ctrl->num_of_collisions >= ctrl->increase_threshold &&
 626             (ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= ctrl->increase_threshold)
 627                 return true;
 628 
 629         return false;
 630 }
 631 
 632 static int dr_rule_add_member(struct mlx5dr_rule_rx_tx *nic_rule,
 633                               struct mlx5dr_ste *ste)
 634 {
 635         struct mlx5dr_rule_member *rule_mem;
 636 
 637         rule_mem = kvzalloc(sizeof(*rule_mem), GFP_KERNEL);
 638         if (!rule_mem)
 639                 return -ENOMEM;
 640 
 641         INIT_LIST_HEAD(&rule_mem->list);
 642         INIT_LIST_HEAD(&rule_mem->use_ste_list);
 643 
 644         rule_mem->ste = ste;
 645         list_add_tail(&rule_mem->list, &nic_rule->rule_members_list);
 646 
 647         list_add_tail(&rule_mem->use_ste_list, &ste->rule_list);
 648 
 649         return 0;
 650 }
 651 
 652 static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
 653                                       struct mlx5dr_rule_rx_tx *nic_rule,
 654                                       struct list_head *send_ste_list,
 655                                       struct mlx5dr_ste *last_ste,
 656                                       u8 *hw_ste_arr,
 657                                       u32 new_hw_ste_arr_sz)
 658 {
 659         struct mlx5dr_matcher_rx_tx *nic_matcher = nic_rule->nic_matcher;
 660         struct mlx5dr_ste_send_info *ste_info_arr[DR_ACTION_MAX_STES];
 661         u8 num_of_builders = nic_matcher->num_of_builders;
 662         struct mlx5dr_matcher *matcher = rule->matcher;
 663         u8 *curr_hw_ste, *prev_hw_ste;
 664         struct mlx5dr_ste *action_ste;
 665         int i, k, ret;
 666 
 667         /* Two cases:
 668          * 1. num_of_builders is equal to new_hw_ste_arr_sz, the action in the ste
 669          * 2. num_of_builders is less then new_hw_ste_arr_sz, new ste was added
 670          *    to support the action.
 671          */
 672         if (num_of_builders == new_hw_ste_arr_sz)
 673                 return 0;
 674 
 675         for (i = num_of_builders, k = 0; i < new_hw_ste_arr_sz; i++, k++) {
 676                 curr_hw_ste = hw_ste_arr + i * DR_STE_SIZE;
 677                 prev_hw_ste = (i == 0) ? curr_hw_ste : hw_ste_arr + ((i - 1) * DR_STE_SIZE);
 678                 action_ste = dr_rule_create_collision_htbl(matcher,
 679                                                            nic_matcher,
 680                                                            curr_hw_ste);
 681                 if (!action_ste)
 682                         return -ENOMEM;
 683 
 684                 mlx5dr_ste_get(action_ste);
 685 
 686                 /* While free ste we go over the miss list, so add this ste to the list */
 687                 list_add_tail(&action_ste->miss_list_node,
 688                               mlx5dr_ste_get_miss_list(action_ste));
 689 
 690                 ste_info_arr[k] = kzalloc(sizeof(*ste_info_arr[k]),
 691                                           GFP_KERNEL);
 692                 if (!ste_info_arr[k])
 693                         goto err_exit;
 694 
 695                 /* Point current ste to the new action */
 696                 mlx5dr_ste_set_hit_addr_by_next_htbl(prev_hw_ste, action_ste->htbl);
 697                 ret = dr_rule_add_member(nic_rule, action_ste);
 698                 if (ret) {
 699                         mlx5dr_dbg(matcher->tbl->dmn, "Failed adding rule member\n");
 700                         goto free_ste_info;
 701                 }
 702                 mlx5dr_send_fill_and_append_ste_send_info(action_ste, DR_STE_SIZE, 0,
 703                                                           curr_hw_ste,
 704                                                           ste_info_arr[k],
 705                                                           send_ste_list, false);
 706         }
 707 
 708         return 0;
 709 
 710 free_ste_info:
 711         kfree(ste_info_arr[k]);
 712 err_exit:
 713         mlx5dr_ste_put(action_ste, matcher, nic_matcher);
 714         return -ENOMEM;
 715 }
 716 
 717 static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
 718                                       struct mlx5dr_matcher_rx_tx *nic_matcher,
 719                                       struct mlx5dr_ste_htbl *cur_htbl,
 720                                       struct mlx5dr_ste *ste,
 721                                       u8 ste_location,
 722                                       u8 *hw_ste,
 723                                       struct list_head *miss_list,
 724                                       struct list_head *send_list)
 725 {
 726         struct mlx5dr_ste_send_info *ste_info;
 727 
 728         /* Take ref on table, only on first time this ste is used */
 729         mlx5dr_htbl_get(cur_htbl);
 730 
 731         /* new entry -> new branch */
 732         list_add_tail(&ste->miss_list_node, miss_list);
 733 
 734         mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr);
 735 
 736         ste->ste_chain_location = ste_location;
 737 
 738         ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
 739         if (!ste_info)
 740                 goto clean_ste_setting;
 741 
 742         if (mlx5dr_ste_create_next_htbl(matcher,
 743                                         nic_matcher,
 744                                         ste,
 745                                         hw_ste,
 746                                         DR_CHUNK_SIZE_1)) {
 747                 mlx5dr_dbg(matcher->tbl->dmn, "Failed allocating table\n");
 748                 goto clean_ste_info;
 749         }
 750 
 751         cur_htbl->ctrl.num_of_valid_entries++;
 752 
 753         mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE, 0, hw_ste,
 754                                                   ste_info, send_list, false);
 755 
 756         return 0;
 757 
 758 clean_ste_info:
 759         kfree(ste_info);
 760 clean_ste_setting:
 761         list_del_init(&ste->miss_list_node);
 762         mlx5dr_htbl_put(cur_htbl);
 763 
 764         return -ENOMEM;
 765 }
 766 
 767 static struct mlx5dr_ste *
 768 dr_rule_handle_ste_branch(struct mlx5dr_rule *rule,
 769                           struct mlx5dr_rule_rx_tx *nic_rule,
 770                           struct list_head *send_ste_list,
 771                           struct mlx5dr_ste_htbl *cur_htbl,
 772                           u8 *hw_ste,
 773                           u8 ste_location,
 774                           struct mlx5dr_ste_htbl **put_htbl)
 775 {
 776         struct mlx5dr_matcher *matcher = rule->matcher;
 777         struct mlx5dr_domain *dmn = matcher->tbl->dmn;
 778         struct mlx5dr_matcher_rx_tx *nic_matcher;
 779         struct mlx5dr_domain_rx_tx *nic_dmn;
 780         struct mlx5dr_ste_htbl *new_htbl;
 781         struct mlx5dr_ste *matched_ste;
 782         struct list_head *miss_list;
 783         bool skip_rehash = false;
 784         struct mlx5dr_ste *ste;
 785         int index;
 786 
 787         nic_matcher = nic_rule->nic_matcher;
 788         nic_dmn = nic_matcher->nic_tbl->nic_dmn;
 789 
 790 again:
 791         index = mlx5dr_ste_calc_hash_index(hw_ste, cur_htbl);
 792         miss_list = &cur_htbl->chunk->miss_list[index];
 793         ste = &cur_htbl->ste_arr[index];
 794 
 795         if (mlx5dr_ste_not_used_ste(ste)) {
 796                 if (dr_rule_handle_empty_entry(matcher, nic_matcher, cur_htbl,
 797                                                ste, ste_location,
 798                                                hw_ste, miss_list,
 799                                                send_ste_list))
 800                         return NULL;
 801         } else {
 802                 /* Hash table index in use, check if this ste is in the miss list */
 803                 matched_ste = dr_rule_find_ste_in_miss_list(miss_list, hw_ste);
 804                 if (matched_ste) {
 805                         /* If it is last STE in the chain, and has the same tag
 806                          * it means that all the previous stes are the same,
 807                          * if so, this rule is duplicated.
 808                          */
 809                         if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste_location))
 810                                 return matched_ste;
 811 
 812                         mlx5dr_dbg(dmn, "Duplicate rule inserted\n");
 813                 }
 814 
 815                 if (!skip_rehash && dr_rule_need_enlarge_hash(cur_htbl, dmn, nic_dmn)) {
 816                         /* Hash table index in use, try to resize of the hash */
 817                         skip_rehash = true;
 818 
 819                         /* Hold the table till we update.
 820                          * Release in dr_rule_create_rule()
 821                          */
 822                         *put_htbl = cur_htbl;
 823                         mlx5dr_htbl_get(cur_htbl);
 824 
 825                         new_htbl = dr_rule_rehash(rule, nic_rule, cur_htbl,
 826                                                   ste_location, send_ste_list);
 827                         if (!new_htbl) {
 828                                 mlx5dr_htbl_put(cur_htbl);
 829                                 mlx5dr_info(dmn, "failed creating rehash table, htbl-log_size: %d\n",
 830                                             cur_htbl->chunk_size);
 831                         } else {
 832                                 cur_htbl = new_htbl;
 833                         }
 834                         goto again;
 835                 } else {
 836                         /* Hash table index in use, add another collision (miss) */
 837                         ste = dr_rule_handle_collision(matcher,
 838                                                        nic_matcher,
 839                                                        ste,
 840                                                        hw_ste,
 841                                                        miss_list,
 842                                                        send_ste_list);
 843                         if (!ste) {
 844                                 mlx5dr_dbg(dmn, "failed adding collision entry, index: %d\n",
 845                                            index);
 846                                 return NULL;
 847                         }
 848                 }
 849         }
 850         return ste;
 851 }
 852 
 853 static bool dr_rule_cmp_value_to_mask(u8 *mask, u8 *value,
 854                                       u32 s_idx, u32 e_idx)
 855 {
 856         u32 i;
 857 
 858         for (i = s_idx; i < e_idx; i++) {
 859                 if (value[i] & ~mask[i]) {
 860                         pr_info("Rule parameters contains a value not specified by mask\n");
 861                         return false;
 862                 }
 863         }
 864         return true;
 865 }
 866 
 867 static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
 868                            struct mlx5dr_match_parameters *value,
 869                            struct mlx5dr_match_param *param)
 870 {
 871         u8 match_criteria = matcher->match_criteria;
 872         size_t value_size = value->match_sz;
 873         u8 *mask_p = (u8 *)&matcher->mask;
 874         u8 *param_p = (u8 *)param;
 875         u32 s_idx, e_idx;
 876 
 877         if (!value_size ||
 878             (value_size > sizeof(struct mlx5dr_match_param) ||
 879              (value_size % sizeof(u32)))) {
 880                 mlx5dr_dbg(matcher->tbl->dmn, "Rule parameters length is incorrect\n");
 881                 return false;
 882         }
 883 
 884         mlx5dr_ste_copy_param(matcher->match_criteria, param, value);
 885 
 886         if (match_criteria & DR_MATCHER_CRITERIA_OUTER) {
 887                 s_idx = offsetof(struct mlx5dr_match_param, outer);
 888                 e_idx = min(s_idx + sizeof(param->outer), value_size);
 889 
 890                 if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
 891                         mlx5dr_dbg(matcher->tbl->dmn, "Rule outer parameters contains a value not specified by mask\n");
 892                         return false;
 893                 }
 894         }
 895 
 896         if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
 897                 s_idx = offsetof(struct mlx5dr_match_param, misc);
 898                 e_idx = min(s_idx + sizeof(param->misc), value_size);
 899 
 900                 if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
 901                         mlx5dr_dbg(matcher->tbl->dmn, "Rule misc parameters contains a value not specified by mask\n");
 902                         return false;
 903                 }
 904         }
 905 
 906         if (match_criteria & DR_MATCHER_CRITERIA_INNER) {
 907                 s_idx = offsetof(struct mlx5dr_match_param, inner);
 908                 e_idx = min(s_idx + sizeof(param->inner), value_size);
 909 
 910                 if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
 911                         mlx5dr_dbg(matcher->tbl->dmn, "Rule inner parameters contains a value not specified by mask\n");
 912                         return false;
 913                 }
 914         }
 915 
 916         if (match_criteria & DR_MATCHER_CRITERIA_MISC2) {
 917                 s_idx = offsetof(struct mlx5dr_match_param, misc2);
 918                 e_idx = min(s_idx + sizeof(param->misc2), value_size);
 919 
 920                 if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
 921                         mlx5dr_dbg(matcher->tbl->dmn, "Rule misc2 parameters contains a value not specified by mask\n");
 922                         return false;
 923                 }
 924         }
 925 
 926         if (match_criteria & DR_MATCHER_CRITERIA_MISC3) {
 927                 s_idx = offsetof(struct mlx5dr_match_param, misc3);
 928                 e_idx = min(s_idx + sizeof(param->misc3), value_size);
 929 
 930                 if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
 931                         mlx5dr_dbg(matcher->tbl->dmn, "Rule misc3 parameters contains a value not specified by mask\n");
 932                         return false;
 933                 }
 934         }
 935         return true;
 936 }
 937 
 938 static int dr_rule_destroy_rule_nic(struct mlx5dr_rule *rule,
 939                                     struct mlx5dr_rule_rx_tx *nic_rule)
 940 {
 941         dr_rule_clean_rule_members(rule, nic_rule);
 942         return 0;
 943 }
 944 
 945 static int dr_rule_destroy_rule_fdb(struct mlx5dr_rule *rule)
 946 {
 947         dr_rule_destroy_rule_nic(rule, &rule->rx);
 948         dr_rule_destroy_rule_nic(rule, &rule->tx);
 949         return 0;
 950 }
 951 
 952 static int dr_rule_destroy_rule(struct mlx5dr_rule *rule)
 953 {
 954         struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
 955 
 956         switch (dmn->type) {
 957         case MLX5DR_DOMAIN_TYPE_NIC_RX:
 958                 dr_rule_destroy_rule_nic(rule, &rule->rx);
 959                 break;
 960         case MLX5DR_DOMAIN_TYPE_NIC_TX:
 961                 dr_rule_destroy_rule_nic(rule, &rule->tx);
 962                 break;
 963         case MLX5DR_DOMAIN_TYPE_FDB:
 964                 dr_rule_destroy_rule_fdb(rule);
 965                 break;
 966         default:
 967                 return -EINVAL;
 968         }
 969 
 970         dr_rule_remove_action_members(rule);
 971         kfree(rule);
 972         return 0;
 973 }
 974 
 975 static bool dr_rule_is_ipv6(struct mlx5dr_match_param *param)
 976 {
 977         return (param->outer.ip_version == 6 ||
 978                 param->inner.ip_version == 6 ||
 979                 param->outer.ethertype == ETH_P_IPV6 ||
 980                 param->inner.ethertype == ETH_P_IPV6);
 981 }
 982 
 983 static bool dr_rule_skip(enum mlx5dr_domain_type domain,
 984                          enum mlx5dr_ste_entry_type ste_type,
 985                          struct mlx5dr_match_param *mask,
 986                          struct mlx5dr_match_param *value)
 987 {
 988         if (domain != MLX5DR_DOMAIN_TYPE_FDB)
 989                 return false;
 990 
 991         if (mask->misc.source_port) {
 992                 if (ste_type == MLX5DR_STE_TYPE_RX)
 993                         if (value->misc.source_port != WIRE_PORT)
 994                                 return true;
 995 
 996                 if (ste_type == MLX5DR_STE_TYPE_TX)
 997                         if (value->misc.source_port == WIRE_PORT)
 998                                 return true;
 999         }
1000 
1001         /* Metadata C can be used to describe the source vport */
1002         if (mask->misc2.metadata_reg_c_0) {
1003                 if (ste_type == MLX5DR_STE_TYPE_RX)
1004                         if ((value->misc2.metadata_reg_c_0 & WIRE_PORT) != WIRE_PORT)
1005                                 return true;
1006 
1007                 if (ste_type == MLX5DR_STE_TYPE_TX)
1008                         if ((value->misc2.metadata_reg_c_0 & WIRE_PORT) == WIRE_PORT)
1009                                 return true;
1010         }
1011         return false;
1012 }
1013 
1014 static int
1015 dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
1016                         struct mlx5dr_rule_rx_tx *nic_rule,
1017                         struct mlx5dr_match_param *param,
1018                         size_t num_actions,
1019                         struct mlx5dr_action *actions[])
1020 {
1021         struct mlx5dr_ste_send_info *ste_info, *tmp_ste_info;
1022         struct mlx5dr_matcher *matcher = rule->matcher;
1023         struct mlx5dr_domain *dmn = matcher->tbl->dmn;
1024         struct mlx5dr_matcher_rx_tx *nic_matcher;
1025         struct mlx5dr_domain_rx_tx *nic_dmn;
1026         struct mlx5dr_ste_htbl *htbl = NULL;
1027         struct mlx5dr_ste_htbl *cur_htbl;
1028         struct mlx5dr_ste *ste = NULL;
1029         LIST_HEAD(send_ste_list);
1030         u8 *hw_ste_arr = NULL;
1031         u32 new_hw_ste_arr_sz;
1032         int ret, i;
1033 
1034         nic_matcher = nic_rule->nic_matcher;
1035         nic_dmn = nic_matcher->nic_tbl->nic_dmn;
1036 
1037         INIT_LIST_HEAD(&nic_rule->rule_members_list);
1038 
1039         if (dr_rule_skip(dmn->type, nic_dmn->ste_type, &matcher->mask, param))
1040                 return 0;
1041 
1042         ret = mlx5dr_matcher_select_builders(matcher,
1043                                              nic_matcher,
1044                                              dr_rule_is_ipv6(param));
1045         if (ret)
1046                 goto out_err;
1047 
1048         hw_ste_arr = kzalloc(DR_RULE_MAX_STE_CHAIN * DR_STE_SIZE, GFP_KERNEL);
1049         if (!hw_ste_arr) {
1050                 ret = -ENOMEM;
1051                 goto out_err;
1052         }
1053 
1054         /* Set the tag values inside the ste array */
1055         ret = mlx5dr_ste_build_ste_arr(matcher, nic_matcher, param, hw_ste_arr);
1056         if (ret)
1057                 goto free_hw_ste;
1058 
1059         /* Set the actions values/addresses inside the ste array */
1060         ret = mlx5dr_actions_build_ste_arr(matcher, nic_matcher, actions,
1061                                            num_actions, hw_ste_arr,
1062                                            &new_hw_ste_arr_sz);
1063         if (ret)
1064                 goto free_hw_ste;
1065 
1066         cur_htbl = nic_matcher->s_htbl;
1067 
1068         /* Go over the array of STEs, and build dr_ste accordingly.
1069          * The loop is over only the builders which are equal or less to the
1070          * number of stes, in case we have actions that lives in other stes.
1071          */
1072         for (i = 0; i < nic_matcher->num_of_builders; i++) {
1073                 /* Calculate CRC and keep new ste entry */
1074                 u8 *cur_hw_ste_ent = hw_ste_arr + (i * DR_STE_SIZE);
1075 
1076                 ste = dr_rule_handle_ste_branch(rule,
1077                                                 nic_rule,
1078                                                 &send_ste_list,
1079                                                 cur_htbl,
1080                                                 cur_hw_ste_ent,
1081                                                 i + 1,
1082                                                 &htbl);
1083                 if (!ste) {
1084                         mlx5dr_err(dmn, "Failed creating next branch\n");
1085                         ret = -ENOENT;
1086                         goto free_rule;
1087                 }
1088 
1089                 cur_htbl = ste->next_htbl;
1090 
1091                 /* Keep all STEs in the rule struct */
1092                 ret = dr_rule_add_member(nic_rule, ste);
1093                 if (ret) {
1094                         mlx5dr_dbg(dmn, "Failed adding rule member index %d\n", i);
1095                         goto free_ste;
1096                 }
1097 
1098                 mlx5dr_ste_get(ste);
1099         }
1100 
1101         /* Connect actions */
1102         ret = dr_rule_handle_action_stes(rule, nic_rule, &send_ste_list,
1103                                          ste, hw_ste_arr, new_hw_ste_arr_sz);
1104         if (ret) {
1105                 mlx5dr_dbg(dmn, "Failed apply actions\n");
1106                 goto free_rule;
1107         }
1108         ret = dr_rule_send_update_list(&send_ste_list, dmn, true);
1109         if (ret) {
1110                 mlx5dr_err(dmn, "Failed sending ste!\n");
1111                 goto free_rule;
1112         }
1113 
1114         if (htbl)
1115                 mlx5dr_htbl_put(htbl);
1116 
1117         kfree(hw_ste_arr);
1118 
1119         return 0;
1120 
1121 free_ste:
1122         mlx5dr_ste_put(ste, matcher, nic_matcher);
1123 free_rule:
1124         dr_rule_clean_rule_members(rule, nic_rule);
1125         /* Clean all ste_info's */
1126         list_for_each_entry_safe(ste_info, tmp_ste_info, &send_ste_list, send_list) {
1127                 list_del(&ste_info->send_list);
1128                 kfree(ste_info);
1129         }
1130 free_hw_ste:
1131         kfree(hw_ste_arr);
1132 out_err:
1133         return ret;
1134 }
1135 
1136 static int
1137 dr_rule_create_rule_fdb(struct mlx5dr_rule *rule,
1138                         struct mlx5dr_match_param *param,
1139                         size_t num_actions,
1140                         struct mlx5dr_action *actions[])
1141 {
1142         struct mlx5dr_match_param copy_param = {};
1143         int ret;
1144 
1145         /* Copy match_param since they will be consumed during the first
1146          * nic_rule insertion.
1147          */
1148         memcpy(&copy_param, param, sizeof(struct mlx5dr_match_param));
1149 
1150         ret = dr_rule_create_rule_nic(rule, &rule->rx, param,
1151                                       num_actions, actions);
1152         if (ret)
1153                 return ret;
1154 
1155         ret = dr_rule_create_rule_nic(rule, &rule->tx, &copy_param,
1156                                       num_actions, actions);
1157         if (ret)
1158                 goto destroy_rule_nic_rx;
1159 
1160         return 0;
1161 
1162 destroy_rule_nic_rx:
1163         dr_rule_destroy_rule_nic(rule, &rule->rx);
1164         return ret;
1165 }
1166 
1167 static struct mlx5dr_rule *
1168 dr_rule_create_rule(struct mlx5dr_matcher *matcher,
1169                     struct mlx5dr_match_parameters *value,
1170                     size_t num_actions,
1171                     struct mlx5dr_action *actions[])
1172 {
1173         struct mlx5dr_domain *dmn = matcher->tbl->dmn;
1174         struct mlx5dr_match_param param = {};
1175         struct mlx5dr_rule *rule;
1176         int ret;
1177 
1178         if (!dr_rule_verify(matcher, value, &param))
1179                 return NULL;
1180 
1181         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1182         if (!rule)
1183                 return NULL;
1184 
1185         rule->matcher = matcher;
1186         INIT_LIST_HEAD(&rule->rule_actions_list);
1187 
1188         ret = dr_rule_add_action_members(rule, num_actions, actions);
1189         if (ret)
1190                 goto free_rule;
1191 
1192         switch (dmn->type) {
1193         case MLX5DR_DOMAIN_TYPE_NIC_RX:
1194                 rule->rx.nic_matcher = &matcher->rx;
1195                 ret = dr_rule_create_rule_nic(rule, &rule->rx, &param,
1196                                               num_actions, actions);
1197                 break;
1198         case MLX5DR_DOMAIN_TYPE_NIC_TX:
1199                 rule->tx.nic_matcher = &matcher->tx;
1200                 ret = dr_rule_create_rule_nic(rule, &rule->tx, &param,
1201                                               num_actions, actions);
1202                 break;
1203         case MLX5DR_DOMAIN_TYPE_FDB:
1204                 rule->rx.nic_matcher = &matcher->rx;
1205                 rule->tx.nic_matcher = &matcher->tx;
1206                 ret = dr_rule_create_rule_fdb(rule, &param,
1207                                               num_actions, actions);
1208                 break;
1209         default:
1210                 ret = -EINVAL;
1211                 break;
1212         }
1213 
1214         if (ret)
1215                 goto remove_action_members;
1216 
1217         return rule;
1218 
1219 remove_action_members:
1220         dr_rule_remove_action_members(rule);
1221 free_rule:
1222         kfree(rule);
1223         mlx5dr_info(dmn, "Failed creating rule\n");
1224         return NULL;
1225 }
1226 
1227 struct mlx5dr_rule *mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
1228                                        struct mlx5dr_match_parameters *value,
1229                                        size_t num_actions,
1230                                        struct mlx5dr_action *actions[])
1231 {
1232         struct mlx5dr_rule *rule;
1233 
1234         mutex_lock(&matcher->tbl->dmn->mutex);
1235         refcount_inc(&matcher->refcount);
1236 
1237         rule = dr_rule_create_rule(matcher, value, num_actions, actions);
1238         if (!rule)
1239                 refcount_dec(&matcher->refcount);
1240 
1241         mutex_unlock(&matcher->tbl->dmn->mutex);
1242 
1243         return rule;
1244 }
1245 
1246 int mlx5dr_rule_destroy(struct mlx5dr_rule *rule)
1247 {
1248         struct mlx5dr_matcher *matcher = rule->matcher;
1249         struct mlx5dr_table *tbl = rule->matcher->tbl;
1250         int ret;
1251 
1252         mutex_lock(&tbl->dmn->mutex);
1253 
1254         ret = dr_rule_destroy_rule(rule);
1255 
1256         mutex_unlock(&tbl->dmn->mutex);
1257 
1258         if (!ret)
1259                 refcount_dec(&matcher->refcount);
1260         return ret;
1261 }

/* [<][>][^][v][top][bottom][index][help] */