root/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. bnxt_flow_get_dst_fid
  2. bnxt_tc_parse_redir
  3. bnxt_tc_parse_vlan
  4. bnxt_tc_parse_tunnel_set
  5. bnxt_tc_parse_actions
  6. bnxt_tc_parse_flow
  7. bnxt_hwrm_cfa_flow_free
  8. ipv6_mask_len
  9. is_wildcard
  10. is_exactmatch
  11. is_vlan_tci_allowed
  12. bits_set
  13. bnxt_hwrm_cfa_flow_alloc
  14. hwrm_cfa_decap_filter_alloc
  15. hwrm_cfa_decap_filter_free
  16. hwrm_cfa_encap_record_alloc
  17. hwrm_cfa_encap_record_free
  18. bnxt_tc_put_l2_node
  19. bnxt_tc_get_l2_node
  20. bnxt_tc_get_ref_flow_handle
  21. bnxt_tc_can_offload
  22. bnxt_tc_put_tunnel_node
  23. bnxt_tc_get_tunnel_node
  24. bnxt_tc_get_ref_decap_handle
  25. bnxt_tc_put_decap_l2_node
  26. bnxt_tc_put_decap_handle
  27. bnxt_tc_resolve_tunnel_hdrs
  28. bnxt_tc_get_decap_handle
  29. bnxt_tc_put_encap_handle
  30. bnxt_tc_get_encap_handle
  31. bnxt_tc_put_tunnel_handle
  32. bnxt_tc_get_tunnel_handle
  33. __bnxt_tc_del_flow
  34. bnxt_tc_set_flow_dir
  35. bnxt_tc_set_src_fid
  36. bnxt_tc_add_flow
  37. bnxt_tc_del_flow
  38. bnxt_tc_get_flow_stats
  39. bnxt_fill_cfa_stats_req
  40. bnxt_hwrm_cfa_flow_stats_get
  41. accumulate_val
  42. bnxt_flow_stats_accum
  43. bnxt_tc_flow_stats_batch_update
  44. bnxt_tc_flow_stats_batch_prep
  45. bnxt_tc_flow_stats_work
  46. bnxt_tc_setup_flower
  47. bnxt_init_tc
  48. bnxt_shutdown_tc

   1 /* Broadcom NetXtreme-C/E network driver.
   2  *
   3  * Copyright (c) 2017 Broadcom Limited
   4  *
   5  * This program is free software; you can redistribute it and/or modify
   6  * it under the terms of the GNU General Public License as published by
   7  * the Free Software Foundation.
   8  */
   9 
  10 #include <linux/netdevice.h>
  11 #include <linux/inetdevice.h>
  12 #include <linux/if_vlan.h>
  13 #include <net/flow_dissector.h>
  14 #include <net/pkt_cls.h>
  15 #include <net/tc_act/tc_gact.h>
  16 #include <net/tc_act/tc_skbedit.h>
  17 #include <net/tc_act/tc_mirred.h>
  18 #include <net/tc_act/tc_vlan.h>
  19 #include <net/tc_act/tc_tunnel_key.h>
  20 
  21 #include "bnxt_hsi.h"
  22 #include "bnxt.h"
  23 #include "bnxt_sriov.h"
  24 #include "bnxt_tc.h"
  25 #include "bnxt_vfr.h"
  26 
  27 #define BNXT_FID_INVALID                        0xffff
  28 #define VLAN_TCI(vid, prio)     ((vid) | ((prio) << VLAN_PRIO_SHIFT))
  29 
  30 #define is_vlan_pcp_wildcarded(vlan_tci_mask)   \
  31         ((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == 0x0000)
  32 #define is_vlan_pcp_exactmatch(vlan_tci_mask)   \
  33         ((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == VLAN_PRIO_MASK)
  34 #define is_vlan_pcp_zero(vlan_tci)      \
  35         ((ntohs(vlan_tci) & VLAN_PRIO_MASK) == 0x0000)
  36 #define is_vid_exactmatch(vlan_tci_mask)        \
  37         ((ntohs(vlan_tci_mask) & VLAN_VID_MASK) == VLAN_VID_MASK)
  38 
  39 /* Return the dst fid of the func for flow forwarding
  40  * For PFs: src_fid is the fid of the PF
  41  * For VF-reps: src_fid the fid of the VF
  42  */
  43 static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev)
  44 {
  45         struct bnxt *bp;
  46 
  47         /* check if dev belongs to the same switch */
  48         if (!netdev_port_same_parent_id(pf_bp->dev, dev)) {
  49                 netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch",
  50                             dev->ifindex);
  51                 return BNXT_FID_INVALID;
  52         }
  53 
  54         /* Is dev a VF-rep? */
  55         if (bnxt_dev_is_vf_rep(dev))
  56                 return bnxt_vf_rep_get_fid(dev);
  57 
  58         bp = netdev_priv(dev);
  59         return bp->pf.fw_fid;
  60 }
  61 
  62 static int bnxt_tc_parse_redir(struct bnxt *bp,
  63                                struct bnxt_tc_actions *actions,
  64                                const struct flow_action_entry *act)
  65 {
  66         struct net_device *dev = act->dev;
  67 
  68         if (!dev) {
  69                 netdev_info(bp->dev, "no dev in mirred action");
  70                 return -EINVAL;
  71         }
  72 
  73         actions->flags |= BNXT_TC_ACTION_FLAG_FWD;
  74         actions->dst_dev = dev;
  75         return 0;
  76 }
  77 
  78 static int bnxt_tc_parse_vlan(struct bnxt *bp,
  79                               struct bnxt_tc_actions *actions,
  80                               const struct flow_action_entry *act)
  81 {
  82         switch (act->id) {
  83         case FLOW_ACTION_VLAN_POP:
  84                 actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN;
  85                 break;
  86         case FLOW_ACTION_VLAN_PUSH:
  87                 actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN;
  88                 actions->push_vlan_tci = htons(act->vlan.vid);
  89                 actions->push_vlan_tpid = act->vlan.proto;
  90                 break;
  91         default:
  92                 return -EOPNOTSUPP;
  93         }
  94         return 0;
  95 }
  96 
  97 static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
  98                                     struct bnxt_tc_actions *actions,
  99                                     const struct flow_action_entry *act)
 100 {
 101         const struct ip_tunnel_info *tun_info = act->tunnel;
 102         const struct ip_tunnel_key *tun_key = &tun_info->key;
 103 
 104         if (ip_tunnel_info_af(tun_info) != AF_INET) {
 105                 netdev_info(bp->dev, "only IPv4 tunnel-encap is supported");
 106                 return -EOPNOTSUPP;
 107         }
 108 
 109         actions->tun_encap_key = *tun_key;
 110         actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP;
 111         return 0;
 112 }
 113 
 114 static int bnxt_tc_parse_actions(struct bnxt *bp,
 115                                  struct bnxt_tc_actions *actions,
 116                                  struct flow_action *flow_action)
 117 {
 118         struct flow_action_entry *act;
 119         int i, rc;
 120 
 121         if (!flow_action_has_entries(flow_action)) {
 122                 netdev_info(bp->dev, "no actions");
 123                 return -EINVAL;
 124         }
 125 
 126         flow_action_for_each(i, act, flow_action) {
 127                 switch (act->id) {
 128                 case FLOW_ACTION_DROP:
 129                         actions->flags |= BNXT_TC_ACTION_FLAG_DROP;
 130                         return 0; /* don't bother with other actions */
 131                 case FLOW_ACTION_REDIRECT:
 132                         rc = bnxt_tc_parse_redir(bp, actions, act);
 133                         if (rc)
 134                                 return rc;
 135                         break;
 136                 case FLOW_ACTION_VLAN_POP:
 137                 case FLOW_ACTION_VLAN_PUSH:
 138                 case FLOW_ACTION_VLAN_MANGLE:
 139                         rc = bnxt_tc_parse_vlan(bp, actions, act);
 140                         if (rc)
 141                                 return rc;
 142                         break;
 143                 case FLOW_ACTION_TUNNEL_ENCAP:
 144                         rc = bnxt_tc_parse_tunnel_set(bp, actions, act);
 145                         if (rc)
 146                                 return rc;
 147                         break;
 148                 case FLOW_ACTION_TUNNEL_DECAP:
 149                         actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_DECAP;
 150                         break;
 151                 default:
 152                         break;
 153                 }
 154         }
 155 
 156         if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
 157                 if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
 158                         /* dst_fid is PF's fid */
 159                         actions->dst_fid = bp->pf.fw_fid;
 160                 } else {
 161                         /* find the FID from dst_dev */
 162                         actions->dst_fid =
 163                                 bnxt_flow_get_dst_fid(bp, actions->dst_dev);
 164                         if (actions->dst_fid == BNXT_FID_INVALID)
 165                                 return -EINVAL;
 166                 }
 167         }
 168 
 169         return 0;
 170 }
 171 
 172 static int bnxt_tc_parse_flow(struct bnxt *bp,
 173                               struct flow_cls_offload *tc_flow_cmd,
 174                               struct bnxt_tc_flow *flow)
 175 {
 176         struct flow_rule *rule = flow_cls_offload_flow_rule(tc_flow_cmd);
 177         struct flow_dissector *dissector = rule->match.dissector;
 178 
 179         /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
 180         if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 ||
 181             (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) {
 182                 netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x",
 183                             dissector->used_keys);
 184                 return -EOPNOTSUPP;
 185         }
 186 
 187         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
 188                 struct flow_match_basic match;
 189 
 190                 flow_rule_match_basic(rule, &match);
 191                 flow->l2_key.ether_type = match.key->n_proto;
 192                 flow->l2_mask.ether_type = match.mask->n_proto;
 193 
 194                 if (match.key->n_proto == htons(ETH_P_IP) ||
 195                     match.key->n_proto == htons(ETH_P_IPV6)) {
 196                         flow->l4_key.ip_proto = match.key->ip_proto;
 197                         flow->l4_mask.ip_proto = match.mask->ip_proto;
 198                 }
 199         }
 200 
 201         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
 202                 struct flow_match_eth_addrs match;
 203 
 204                 flow_rule_match_eth_addrs(rule, &match);
 205                 flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS;
 206                 ether_addr_copy(flow->l2_key.dmac, match.key->dst);
 207                 ether_addr_copy(flow->l2_mask.dmac, match.mask->dst);
 208                 ether_addr_copy(flow->l2_key.smac, match.key->src);
 209                 ether_addr_copy(flow->l2_mask.smac, match.mask->src);
 210         }
 211 
 212         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
 213                 struct flow_match_vlan match;
 214 
 215                 flow_rule_match_vlan(rule, &match);
 216                 flow->l2_key.inner_vlan_tci =
 217                         cpu_to_be16(VLAN_TCI(match.key->vlan_id,
 218                                              match.key->vlan_priority));
 219                 flow->l2_mask.inner_vlan_tci =
 220                         cpu_to_be16((VLAN_TCI(match.mask->vlan_id,
 221                                               match.mask->vlan_priority)));
 222                 flow->l2_key.inner_vlan_tpid = htons(ETH_P_8021Q);
 223                 flow->l2_mask.inner_vlan_tpid = htons(0xffff);
 224                 flow->l2_key.num_vlans = 1;
 225         }
 226 
 227         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
 228                 struct flow_match_ipv4_addrs match;
 229 
 230                 flow_rule_match_ipv4_addrs(rule, &match);
 231                 flow->flags |= BNXT_TC_FLOW_FLAGS_IPV4_ADDRS;
 232                 flow->l3_key.ipv4.daddr.s_addr = match.key->dst;
 233                 flow->l3_mask.ipv4.daddr.s_addr = match.mask->dst;
 234                 flow->l3_key.ipv4.saddr.s_addr = match.key->src;
 235                 flow->l3_mask.ipv4.saddr.s_addr = match.mask->src;
 236         } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
 237                 struct flow_match_ipv6_addrs match;
 238 
 239                 flow_rule_match_ipv6_addrs(rule, &match);
 240                 flow->flags |= BNXT_TC_FLOW_FLAGS_IPV6_ADDRS;
 241                 flow->l3_key.ipv6.daddr = match.key->dst;
 242                 flow->l3_mask.ipv6.daddr = match.mask->dst;
 243                 flow->l3_key.ipv6.saddr = match.key->src;
 244                 flow->l3_mask.ipv6.saddr = match.mask->src;
 245         }
 246 
 247         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
 248                 struct flow_match_ports match;
 249 
 250                 flow_rule_match_ports(rule, &match);
 251                 flow->flags |= BNXT_TC_FLOW_FLAGS_PORTS;
 252                 flow->l4_key.ports.dport = match.key->dst;
 253                 flow->l4_mask.ports.dport = match.mask->dst;
 254                 flow->l4_key.ports.sport = match.key->src;
 255                 flow->l4_mask.ports.sport = match.mask->src;
 256         }
 257 
 258         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) {
 259                 struct flow_match_icmp match;
 260 
 261                 flow_rule_match_icmp(rule, &match);
 262                 flow->flags |= BNXT_TC_FLOW_FLAGS_ICMP;
 263                 flow->l4_key.icmp.type = match.key->type;
 264                 flow->l4_key.icmp.code = match.key->code;
 265                 flow->l4_mask.icmp.type = match.mask->type;
 266                 flow->l4_mask.icmp.code = match.mask->code;
 267         }
 268 
 269         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
 270                 struct flow_match_ipv4_addrs match;
 271 
 272                 flow_rule_match_enc_ipv4_addrs(rule, &match);
 273                 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS;
 274                 flow->tun_key.u.ipv4.dst = match.key->dst;
 275                 flow->tun_mask.u.ipv4.dst = match.mask->dst;
 276                 flow->tun_key.u.ipv4.src = match.key->src;
 277                 flow->tun_mask.u.ipv4.src = match.mask->src;
 278         } else if (flow_rule_match_key(rule,
 279                                       FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
 280                 return -EOPNOTSUPP;
 281         }
 282 
 283         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
 284                 struct flow_match_enc_keyid match;
 285 
 286                 flow_rule_match_enc_keyid(rule, &match);
 287                 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ID;
 288                 flow->tun_key.tun_id = key32_to_tunnel_id(match.key->keyid);
 289                 flow->tun_mask.tun_id = key32_to_tunnel_id(match.mask->keyid);
 290         }
 291 
 292         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
 293                 struct flow_match_ports match;
 294 
 295                 flow_rule_match_enc_ports(rule, &match);
 296                 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_PORTS;
 297                 flow->tun_key.tp_dst = match.key->dst;
 298                 flow->tun_mask.tp_dst = match.mask->dst;
 299                 flow->tun_key.tp_src = match.key->src;
 300                 flow->tun_mask.tp_src = match.mask->src;
 301         }
 302 
 303         return bnxt_tc_parse_actions(bp, &flow->actions, &rule->action);
 304 }
 305 
 306 static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp,
 307                                    struct bnxt_tc_flow_node *flow_node)
 308 {
 309         struct hwrm_cfa_flow_free_input req = { 0 };
 310         int rc;
 311 
 312         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1);
 313         if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
 314                 req.ext_flow_handle = flow_node->ext_flow_handle;
 315         else
 316                 req.flow_handle = flow_node->flow_handle;
 317 
 318         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 319         if (rc)
 320                 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
 321 
 322         return rc;
 323 }
 324 
 325 static int ipv6_mask_len(struct in6_addr *mask)
 326 {
 327         int mask_len = 0, i;
 328 
 329         for (i = 0; i < 4; i++)
 330                 mask_len += inet_mask_len(mask->s6_addr32[i]);
 331 
 332         return mask_len;
 333 }
 334 
 335 static bool is_wildcard(void *mask, int len)
 336 {
 337         const u8 *p = mask;
 338         int i;
 339 
 340         for (i = 0; i < len; i++) {
 341                 if (p[i] != 0)
 342                         return false;
 343         }
 344         return true;
 345 }
 346 
 347 static bool is_exactmatch(void *mask, int len)
 348 {
 349         const u8 *p = mask;
 350         int i;
 351 
 352         for (i = 0; i < len; i++)
 353                 if (p[i] != 0xff)
 354                         return false;
 355 
 356         return true;
 357 }
 358 
 359 static bool is_vlan_tci_allowed(__be16  vlan_tci_mask,
 360                                 __be16  vlan_tci)
 361 {
 362         /* VLAN priority must be either exactly zero or fully wildcarded and
 363          * VLAN id must be exact match.
 364          */
 365         if (is_vid_exactmatch(vlan_tci_mask) &&
 366             ((is_vlan_pcp_exactmatch(vlan_tci_mask) &&
 367               is_vlan_pcp_zero(vlan_tci)) ||
 368              is_vlan_pcp_wildcarded(vlan_tci_mask)))
 369                 return true;
 370 
 371         return false;
 372 }
 373 
 374 static bool bits_set(void *key, int len)
 375 {
 376         const u8 *p = key;
 377         int i;
 378 
 379         for (i = 0; i < len; i++)
 380                 if (p[i] != 0)
 381                         return true;
 382 
 383         return false;
 384 }
 385 
 386 static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
 387                                     __le16 ref_flow_handle,
 388                                     __le32 tunnel_handle,
 389                                     struct bnxt_tc_flow_node *flow_node)
 390 {
 391         struct bnxt_tc_actions *actions = &flow->actions;
 392         struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask;
 393         struct bnxt_tc_l3_key *l3_key = &flow->l3_key;
 394         struct hwrm_cfa_flow_alloc_input req = { 0 };
 395         struct hwrm_cfa_flow_alloc_output *resp;
 396         u16 flow_flags = 0, action_flags = 0;
 397         int rc;
 398 
 399         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_ALLOC, -1, -1);
 400 
 401         req.src_fid = cpu_to_le16(flow->src_fid);
 402         req.ref_flow_handle = ref_flow_handle;
 403 
 404         if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP ||
 405             actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
 406                 req.tunnel_handle = tunnel_handle;
 407                 flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL;
 408                 action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL;
 409         }
 410 
 411         req.ethertype = flow->l2_key.ether_type;
 412         req.ip_proto = flow->l4_key.ip_proto;
 413 
 414         if (flow->flags & BNXT_TC_FLOW_FLAGS_ETH_ADDRS) {
 415                 memcpy(req.dmac, flow->l2_key.dmac, ETH_ALEN);
 416                 memcpy(req.smac, flow->l2_key.smac, ETH_ALEN);
 417         }
 418 
 419         if (flow->l2_key.num_vlans > 0) {
 420                 flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE;
 421                 /* FW expects the inner_vlan_tci value to be set
 422                  * in outer_vlan_tci when num_vlans is 1 (which is
 423                  * always the case in TC.)
 424                  */
 425                 req.outer_vlan_tci = flow->l2_key.inner_vlan_tci;
 426         }
 427 
 428         /* If all IP and L4 fields are wildcarded then this is an L2 flow */
 429         if (is_wildcard(l3_mask, sizeof(*l3_mask)) &&
 430             is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) {
 431                 flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2;
 432         } else {
 433                 flow_flags |= flow->l2_key.ether_type == htons(ETH_P_IP) ?
 434                                 CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 :
 435                                 CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6;
 436 
 437                 if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV4_ADDRS) {
 438                         req.ip_dst[0] = l3_key->ipv4.daddr.s_addr;
 439                         req.ip_dst_mask_len =
 440                                 inet_mask_len(l3_mask->ipv4.daddr.s_addr);
 441                         req.ip_src[0] = l3_key->ipv4.saddr.s_addr;
 442                         req.ip_src_mask_len =
 443                                 inet_mask_len(l3_mask->ipv4.saddr.s_addr);
 444                 } else if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV6_ADDRS) {
 445                         memcpy(req.ip_dst, l3_key->ipv6.daddr.s6_addr32,
 446                                sizeof(req.ip_dst));
 447                         req.ip_dst_mask_len =
 448                                         ipv6_mask_len(&l3_mask->ipv6.daddr);
 449                         memcpy(req.ip_src, l3_key->ipv6.saddr.s6_addr32,
 450                                sizeof(req.ip_src));
 451                         req.ip_src_mask_len =
 452                                         ipv6_mask_len(&l3_mask->ipv6.saddr);
 453                 }
 454         }
 455 
 456         if (flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) {
 457                 req.l4_src_port = flow->l4_key.ports.sport;
 458                 req.l4_src_port_mask = flow->l4_mask.ports.sport;
 459                 req.l4_dst_port = flow->l4_key.ports.dport;
 460                 req.l4_dst_port_mask = flow->l4_mask.ports.dport;
 461         } else if (flow->flags & BNXT_TC_FLOW_FLAGS_ICMP) {
 462                 /* l4 ports serve as type/code when ip_proto is ICMP */
 463                 req.l4_src_port = htons(flow->l4_key.icmp.type);
 464                 req.l4_src_port_mask = htons(flow->l4_mask.icmp.type);
 465                 req.l4_dst_port = htons(flow->l4_key.icmp.code);
 466                 req.l4_dst_port_mask = htons(flow->l4_mask.icmp.code);
 467         }
 468         req.flags = cpu_to_le16(flow_flags);
 469 
 470         if (actions->flags & BNXT_TC_ACTION_FLAG_DROP) {
 471                 action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP;
 472         } else {
 473                 if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
 474                         action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD;
 475                         req.dst_fid = cpu_to_le16(actions->dst_fid);
 476                 }
 477                 if (actions->flags & BNXT_TC_ACTION_FLAG_PUSH_VLAN) {
 478                         action_flags |=
 479                             CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
 480                         req.l2_rewrite_vlan_tpid = actions->push_vlan_tpid;
 481                         req.l2_rewrite_vlan_tci = actions->push_vlan_tci;
 482                         memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
 483                         memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
 484                 }
 485                 if (actions->flags & BNXT_TC_ACTION_FLAG_POP_VLAN) {
 486                         action_flags |=
 487                             CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
 488                         /* Rewrite config with tpid = 0 implies vlan pop */
 489                         req.l2_rewrite_vlan_tpid = 0;
 490                         memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
 491                         memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
 492                 }
 493         }
 494         req.action_flags = cpu_to_le16(action_flags);
 495 
 496         mutex_lock(&bp->hwrm_cmd_lock);
 497         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 498         if (!rc) {
 499                 resp = bnxt_get_hwrm_resp_addr(bp, &req);
 500                 /* CFA_FLOW_ALLOC response interpretation:
 501                  *                  fw with          fw with
 502                  *                  16-bit           64-bit
 503                  *                  flow handle      flow handle
 504                  *                  ===========      ===========
 505                  * flow_handle      flow handle      flow context id
 506                  * ext_flow_handle  INVALID          flow handle
 507                  * flow_id          INVALID          flow counter id
 508                  */
 509                 flow_node->flow_handle = resp->flow_handle;
 510                 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) {
 511                         flow_node->ext_flow_handle = resp->ext_flow_handle;
 512                         flow_node->flow_id = resp->flow_id;
 513                 }
 514         }
 515         mutex_unlock(&bp->hwrm_cmd_lock);
 516         return rc;
 517 }
 518 
 519 static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
 520                                        struct bnxt_tc_flow *flow,
 521                                        struct bnxt_tc_l2_key *l2_info,
 522                                        __le32 ref_decap_handle,
 523                                        __le32 *decap_filter_handle)
 524 {
 525         struct hwrm_cfa_decap_filter_alloc_input req = { 0 };
 526         struct hwrm_cfa_decap_filter_alloc_output *resp;
 527         struct ip_tunnel_key *tun_key = &flow->tun_key;
 528         u32 enables = 0;
 529         int rc;
 530 
 531         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_ALLOC, -1, -1);
 532 
 533         req.flags = cpu_to_le32(CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL);
 534         enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE |
 535                    CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL;
 536         req.tunnel_type = CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
 537         req.ip_protocol = CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP;
 538 
 539         if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ID) {
 540                 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID;
 541                 /* tunnel_id is wrongly defined in hsi defn. as __le32 */
 542                 req.tunnel_id = tunnel_id_to_key32(tun_key->tun_id);
 543         }
 544 
 545         if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) {
 546                 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR;
 547                 ether_addr_copy(req.dst_macaddr, l2_info->dmac);
 548         }
 549         if (l2_info->num_vlans) {
 550                 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID;
 551                 req.t_ivlan_vid = l2_info->inner_vlan_tci;
 552         }
 553 
 554         enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE;
 555         req.ethertype = htons(ETH_P_IP);
 556 
 557         if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS) {
 558                 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |
 559                            CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |
 560                            CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE;
 561                 req.ip_addr_type = CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
 562                 req.dst_ipaddr[0] = tun_key->u.ipv4.dst;
 563                 req.src_ipaddr[0] = tun_key->u.ipv4.src;
 564         }
 565 
 566         if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_PORTS) {
 567                 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT;
 568                 req.dst_port = tun_key->tp_dst;
 569         }
 570 
 571         /* Eventhough the decap_handle returned by hwrm_cfa_decap_filter_alloc
 572          * is defined as __le32, l2_ctxt_ref_id is defined in HSI as __le16.
 573          */
 574         req.l2_ctxt_ref_id = (__force __le16)ref_decap_handle;
 575         req.enables = cpu_to_le32(enables);
 576 
 577         mutex_lock(&bp->hwrm_cmd_lock);
 578         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 579         if (!rc) {
 580                 resp = bnxt_get_hwrm_resp_addr(bp, &req);
 581                 *decap_filter_handle = resp->decap_filter_id;
 582         } else {
 583                 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
 584         }
 585         mutex_unlock(&bp->hwrm_cmd_lock);
 586 
 587         return rc;
 588 }
 589 
 590 static int hwrm_cfa_decap_filter_free(struct bnxt *bp,
 591                                       __le32 decap_filter_handle)
 592 {
 593         struct hwrm_cfa_decap_filter_free_input req = { 0 };
 594         int rc;
 595 
 596         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_FREE, -1, -1);
 597         req.decap_filter_id = decap_filter_handle;
 598 
 599         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 600         if (rc)
 601                 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
 602 
 603         return rc;
 604 }
 605 
 606 static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
 607                                        struct ip_tunnel_key *encap_key,
 608                                        struct bnxt_tc_l2_key *l2_info,
 609                                        __le32 *encap_record_handle)
 610 {
 611         struct hwrm_cfa_encap_record_alloc_input req = { 0 };
 612         struct hwrm_cfa_encap_record_alloc_output *resp;
 613         struct hwrm_cfa_encap_data_vxlan *encap =
 614                         (struct hwrm_cfa_encap_data_vxlan *)&req.encap_data;
 615         struct hwrm_vxlan_ipv4_hdr *encap_ipv4 =
 616                                 (struct hwrm_vxlan_ipv4_hdr *)encap->l3;
 617         int rc;
 618 
 619         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_ALLOC, -1, -1);
 620 
 621         req.encap_type = CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN;
 622 
 623         ether_addr_copy(encap->dst_mac_addr, l2_info->dmac);
 624         ether_addr_copy(encap->src_mac_addr, l2_info->smac);
 625         if (l2_info->num_vlans) {
 626                 encap->num_vlan_tags = l2_info->num_vlans;
 627                 encap->ovlan_tci = l2_info->inner_vlan_tci;
 628                 encap->ovlan_tpid = l2_info->inner_vlan_tpid;
 629         }
 630 
 631         encap_ipv4->ver_hlen = 4 << VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT;
 632         encap_ipv4->ver_hlen |= 5 << VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT;
 633         encap_ipv4->ttl = encap_key->ttl;
 634 
 635         encap_ipv4->dest_ip_addr = encap_key->u.ipv4.dst;
 636         encap_ipv4->src_ip_addr = encap_key->u.ipv4.src;
 637         encap_ipv4->protocol = IPPROTO_UDP;
 638 
 639         encap->dst_port = encap_key->tp_dst;
 640         encap->vni = tunnel_id_to_key32(encap_key->tun_id);
 641 
 642         mutex_lock(&bp->hwrm_cmd_lock);
 643         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 644         if (!rc) {
 645                 resp = bnxt_get_hwrm_resp_addr(bp, &req);
 646                 *encap_record_handle = resp->encap_record_id;
 647         } else {
 648                 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
 649         }
 650         mutex_unlock(&bp->hwrm_cmd_lock);
 651 
 652         return rc;
 653 }
 654 
 655 static int hwrm_cfa_encap_record_free(struct bnxt *bp,
 656                                       __le32 encap_record_handle)
 657 {
 658         struct hwrm_cfa_encap_record_free_input req = { 0 };
 659         int rc;
 660 
 661         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_FREE, -1, -1);
 662         req.encap_record_id = encap_record_handle;
 663 
 664         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 665         if (rc)
 666                 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
 667 
 668         return rc;
 669 }
 670 
 671 static int bnxt_tc_put_l2_node(struct bnxt *bp,
 672                                struct bnxt_tc_flow_node *flow_node)
 673 {
 674         struct bnxt_tc_l2_node *l2_node = flow_node->l2_node;
 675         struct bnxt_tc_info *tc_info = bp->tc_info;
 676         int rc;
 677 
 678         /* remove flow_node from the L2 shared flow list */
 679         list_del(&flow_node->l2_list_node);
 680         if (--l2_node->refcount == 0) {
 681                 rc =  rhashtable_remove_fast(&tc_info->l2_table, &l2_node->node,
 682                                              tc_info->l2_ht_params);
 683                 if (rc)
 684                         netdev_err(bp->dev,
 685                                    "Error: %s: rhashtable_remove_fast: %d",
 686                                    __func__, rc);
 687                 kfree_rcu(l2_node, rcu);
 688         }
 689         return 0;
 690 }
 691 
 692 static struct bnxt_tc_l2_node *
 693 bnxt_tc_get_l2_node(struct bnxt *bp, struct rhashtable *l2_table,
 694                     struct rhashtable_params ht_params,
 695                     struct bnxt_tc_l2_key *l2_key)
 696 {
 697         struct bnxt_tc_l2_node *l2_node;
 698         int rc;
 699 
 700         l2_node = rhashtable_lookup_fast(l2_table, l2_key, ht_params);
 701         if (!l2_node) {
 702                 l2_node = kzalloc(sizeof(*l2_node), GFP_KERNEL);
 703                 if (!l2_node) {
 704                         rc = -ENOMEM;
 705                         return NULL;
 706                 }
 707 
 708                 l2_node->key = *l2_key;
 709                 rc = rhashtable_insert_fast(l2_table, &l2_node->node,
 710                                             ht_params);
 711                 if (rc) {
 712                         kfree_rcu(l2_node, rcu);
 713                         netdev_err(bp->dev,
 714                                    "Error: %s: rhashtable_insert_fast: %d",
 715                                    __func__, rc);
 716                         return NULL;
 717                 }
 718                 INIT_LIST_HEAD(&l2_node->common_l2_flows);
 719         }
 720         return l2_node;
 721 }
 722 
 723 /* Get the ref_flow_handle for a flow by checking if there are any other
 724  * flows that share the same L2 key as this flow.
 725  */
 726 static int
 727 bnxt_tc_get_ref_flow_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
 728                             struct bnxt_tc_flow_node *flow_node,
 729                             __le16 *ref_flow_handle)
 730 {
 731         struct bnxt_tc_info *tc_info = bp->tc_info;
 732         struct bnxt_tc_flow_node *ref_flow_node;
 733         struct bnxt_tc_l2_node *l2_node;
 734 
 735         l2_node = bnxt_tc_get_l2_node(bp, &tc_info->l2_table,
 736                                       tc_info->l2_ht_params,
 737                                       &flow->l2_key);
 738         if (!l2_node)
 739                 return -1;
 740 
 741         /* If any other flow is using this l2_node, use it's flow_handle
 742          * as the ref_flow_handle
 743          */
 744         if (l2_node->refcount > 0) {
 745                 ref_flow_node = list_first_entry(&l2_node->common_l2_flows,
 746                                                  struct bnxt_tc_flow_node,
 747                                                  l2_list_node);
 748                 *ref_flow_handle = ref_flow_node->flow_handle;
 749         } else {
 750                 *ref_flow_handle = cpu_to_le16(0xffff);
 751         }
 752 
 753         /* Insert the l2_node into the flow_node so that subsequent flows
 754          * with a matching l2 key can use the flow_handle of this flow
 755          * as their ref_flow_handle
 756          */
 757         flow_node->l2_node = l2_node;
 758         list_add(&flow_node->l2_list_node, &l2_node->common_l2_flows);
 759         l2_node->refcount++;
 760         return 0;
 761 }
 762 
 763 /* After the flow parsing is done, this routine is used for checking
 764  * if there are any aspects of the flow that prevent it from being
 765  * offloaded.
 766  */
 767 static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow)
 768 {
 769         /* If L4 ports are specified then ip_proto must be TCP or UDP */
 770         if ((flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) &&
 771             (flow->l4_key.ip_proto != IPPROTO_TCP &&
 772              flow->l4_key.ip_proto != IPPROTO_UDP)) {
 773                 netdev_info(bp->dev, "Cannot offload non-TCP/UDP (%d) ports",
 774                             flow->l4_key.ip_proto);
 775                 return false;
 776         }
 777 
 778         /* Currently source/dest MAC cannot be partial wildcard  */
 779         if (bits_set(&flow->l2_key.smac, sizeof(flow->l2_key.smac)) &&
 780             !is_exactmatch(flow->l2_mask.smac, sizeof(flow->l2_mask.smac))) {
 781                 netdev_info(bp->dev, "Wildcard match unsupported for Source MAC\n");
 782                 return false;
 783         }
 784         if (bits_set(&flow->l2_key.dmac, sizeof(flow->l2_key.dmac)) &&
 785             !is_exactmatch(&flow->l2_mask.dmac, sizeof(flow->l2_mask.dmac))) {
 786                 netdev_info(bp->dev, "Wildcard match unsupported for Dest MAC\n");
 787                 return false;
 788         }
 789 
 790         /* Currently VLAN fields cannot be partial wildcard */
 791         if (bits_set(&flow->l2_key.inner_vlan_tci,
 792                      sizeof(flow->l2_key.inner_vlan_tci)) &&
 793             !is_vlan_tci_allowed(flow->l2_mask.inner_vlan_tci,
 794                                  flow->l2_key.inner_vlan_tci)) {
 795                 netdev_info(bp->dev, "Unsupported VLAN TCI\n");
 796                 return false;
 797         }
 798         if (bits_set(&flow->l2_key.inner_vlan_tpid,
 799                      sizeof(flow->l2_key.inner_vlan_tpid)) &&
 800             !is_exactmatch(&flow->l2_mask.inner_vlan_tpid,
 801                            sizeof(flow->l2_mask.inner_vlan_tpid))) {
 802                 netdev_info(bp->dev, "Wildcard match unsupported for VLAN TPID\n");
 803                 return false;
 804         }
 805 
 806         /* Currently Ethertype must be set */
 807         if (!is_exactmatch(&flow->l2_mask.ether_type,
 808                            sizeof(flow->l2_mask.ether_type))) {
 809                 netdev_info(bp->dev, "Wildcard match unsupported for Ethertype\n");
 810                 return false;
 811         }
 812 
 813         return true;
 814 }
 815 
 816 /* Returns the final refcount of the node on success
 817  * or a -ve error code on failure
 818  */
 819 static int bnxt_tc_put_tunnel_node(struct bnxt *bp,
 820                                    struct rhashtable *tunnel_table,
 821                                    struct rhashtable_params *ht_params,
 822                                    struct bnxt_tc_tunnel_node *tunnel_node)
 823 {
 824         int rc;
 825 
 826         if (--tunnel_node->refcount == 0) {
 827                 rc =  rhashtable_remove_fast(tunnel_table, &tunnel_node->node,
 828                                              *ht_params);
 829                 if (rc) {
 830                         netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc);
 831                         rc = -1;
 832                 }
 833                 kfree_rcu(tunnel_node, rcu);
 834                 return rc;
 835         } else {
 836                 return tunnel_node->refcount;
 837         }
 838 }
 839 
 840 /* Get (or add) either encap or decap tunnel node from/to the supplied
 841  * hash table.
 842  */
 843 static struct bnxt_tc_tunnel_node *
 844 bnxt_tc_get_tunnel_node(struct bnxt *bp, struct rhashtable *tunnel_table,
 845                         struct rhashtable_params *ht_params,
 846                         struct ip_tunnel_key *tun_key)
 847 {
 848         struct bnxt_tc_tunnel_node *tunnel_node;
 849         int rc;
 850 
 851         tunnel_node = rhashtable_lookup_fast(tunnel_table, tun_key, *ht_params);
 852         if (!tunnel_node) {
 853                 tunnel_node = kzalloc(sizeof(*tunnel_node), GFP_KERNEL);
 854                 if (!tunnel_node) {
 855                         rc = -ENOMEM;
 856                         goto err;
 857                 }
 858 
 859                 tunnel_node->key = *tun_key;
 860                 tunnel_node->tunnel_handle = INVALID_TUNNEL_HANDLE;
 861                 rc = rhashtable_insert_fast(tunnel_table, &tunnel_node->node,
 862                                             *ht_params);
 863                 if (rc) {
 864                         kfree_rcu(tunnel_node, rcu);
 865                         goto err;
 866                 }
 867         }
 868         tunnel_node->refcount++;
 869         return tunnel_node;
 870 err:
 871         netdev_info(bp->dev, "error rc=%d", rc);
 872         return NULL;
 873 }
 874 
 875 static int bnxt_tc_get_ref_decap_handle(struct bnxt *bp,
 876                                         struct bnxt_tc_flow *flow,
 877                                         struct bnxt_tc_l2_key *l2_key,
 878                                         struct bnxt_tc_flow_node *flow_node,
 879                                         __le32 *ref_decap_handle)
 880 {
 881         struct bnxt_tc_info *tc_info = bp->tc_info;
 882         struct bnxt_tc_flow_node *ref_flow_node;
 883         struct bnxt_tc_l2_node *decap_l2_node;
 884 
 885         decap_l2_node = bnxt_tc_get_l2_node(bp, &tc_info->decap_l2_table,
 886                                             tc_info->decap_l2_ht_params,
 887                                             l2_key);
 888         if (!decap_l2_node)
 889                 return -1;
 890 
 891         /* If any other flow is using this decap_l2_node, use it's decap_handle
 892          * as the ref_decap_handle
 893          */
 894         if (decap_l2_node->refcount > 0) {
 895                 ref_flow_node =
 896                         list_first_entry(&decap_l2_node->common_l2_flows,
 897                                          struct bnxt_tc_flow_node,
 898                                          decap_l2_list_node);
 899                 *ref_decap_handle = ref_flow_node->decap_node->tunnel_handle;
 900         } else {
 901                 *ref_decap_handle = INVALID_TUNNEL_HANDLE;
 902         }
 903 
 904         /* Insert the l2_node into the flow_node so that subsequent flows
 905          * with a matching decap l2 key can use the decap_filter_handle of
 906          * this flow as their ref_decap_handle
 907          */
 908         flow_node->decap_l2_node = decap_l2_node;
 909         list_add(&flow_node->decap_l2_list_node,
 910                  &decap_l2_node->common_l2_flows);
 911         decap_l2_node->refcount++;
 912         return 0;
 913 }
 914 
 915 static void bnxt_tc_put_decap_l2_node(struct bnxt *bp,
 916                                       struct bnxt_tc_flow_node *flow_node)
 917 {
 918         struct bnxt_tc_l2_node *decap_l2_node = flow_node->decap_l2_node;
 919         struct bnxt_tc_info *tc_info = bp->tc_info;
 920         int rc;
 921 
 922         /* remove flow_node from the decap L2 sharing flow list */
 923         list_del(&flow_node->decap_l2_list_node);
 924         if (--decap_l2_node->refcount == 0) {
 925                 rc =  rhashtable_remove_fast(&tc_info->decap_l2_table,
 926                                              &decap_l2_node->node,
 927                                              tc_info->decap_l2_ht_params);
 928                 if (rc)
 929                         netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc);
 930                 kfree_rcu(decap_l2_node, rcu);
 931         }
 932 }
 933 
 934 static void bnxt_tc_put_decap_handle(struct bnxt *bp,
 935                                      struct bnxt_tc_flow_node *flow_node)
 936 {
 937         __le32 decap_handle = flow_node->decap_node->tunnel_handle;
 938         struct bnxt_tc_info *tc_info = bp->tc_info;
 939         int rc;
 940 
 941         if (flow_node->decap_l2_node)
 942                 bnxt_tc_put_decap_l2_node(bp, flow_node);
 943 
 944         rc = bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table,
 945                                      &tc_info->decap_ht_params,
 946                                      flow_node->decap_node);
 947         if (!rc && decap_handle != INVALID_TUNNEL_HANDLE)
 948                 hwrm_cfa_decap_filter_free(bp, decap_handle);
 949 }
 950 
 951 static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp,
 952                                        struct ip_tunnel_key *tun_key,
 953                                        struct bnxt_tc_l2_key *l2_info)
 954 {
 955 #ifdef CONFIG_INET
 956         struct net_device *real_dst_dev = bp->dev;
 957         struct flowi4 flow = { {0} };
 958         struct net_device *dst_dev;
 959         struct neighbour *nbr;
 960         struct rtable *rt;
 961         int rc;
 962 
 963         flow.flowi4_proto = IPPROTO_UDP;
 964         flow.fl4_dport = tun_key->tp_dst;
 965         flow.daddr = tun_key->u.ipv4.dst;
 966 
 967         rt = ip_route_output_key(dev_net(real_dst_dev), &flow);
 968         if (IS_ERR(rt)) {
 969                 netdev_info(bp->dev, "no route to %pI4b", &flow.daddr);
 970                 return -EOPNOTSUPP;
 971         }
 972 
 973         /* The route must either point to the real_dst_dev or a dst_dev that
 974          * uses the real_dst_dev.
 975          */
 976         dst_dev = rt->dst.dev;
 977         if (is_vlan_dev(dst_dev)) {
 978 #if IS_ENABLED(CONFIG_VLAN_8021Q)
 979                 struct vlan_dev_priv *vlan = vlan_dev_priv(dst_dev);
 980 
 981                 if (vlan->real_dev != real_dst_dev) {
 982                         netdev_info(bp->dev,
 983                                     "dst_dev(%s) doesn't use PF-if(%s)",
 984                                     netdev_name(dst_dev),
 985                                     netdev_name(real_dst_dev));
 986                         rc = -EOPNOTSUPP;
 987                         goto put_rt;
 988                 }
 989                 l2_info->inner_vlan_tci = htons(vlan->vlan_id);
 990                 l2_info->inner_vlan_tpid = vlan->vlan_proto;
 991                 l2_info->num_vlans = 1;
 992 #endif
 993         } else if (dst_dev != real_dst_dev) {
 994                 netdev_info(bp->dev,
 995                             "dst_dev(%s) for %pI4b is not PF-if(%s)",
 996                             netdev_name(dst_dev), &flow.daddr,
 997                             netdev_name(real_dst_dev));
 998                 rc = -EOPNOTSUPP;
 999                 goto put_rt;
1000         }
1001 
1002         nbr = dst_neigh_lookup(&rt->dst, &flow.daddr);
1003         if (!nbr) {
1004                 netdev_info(bp->dev, "can't lookup neighbor for %pI4b",
1005                             &flow.daddr);
1006                 rc = -EOPNOTSUPP;
1007                 goto put_rt;
1008         }
1009 
1010         tun_key->u.ipv4.src = flow.saddr;
1011         tun_key->ttl = ip4_dst_hoplimit(&rt->dst);
1012         neigh_ha_snapshot(l2_info->dmac, nbr, dst_dev);
1013         ether_addr_copy(l2_info->smac, dst_dev->dev_addr);
1014         neigh_release(nbr);
1015         ip_rt_put(rt);
1016 
1017         return 0;
1018 put_rt:
1019         ip_rt_put(rt);
1020         return rc;
1021 #else
1022         return -EOPNOTSUPP;
1023 #endif
1024 }
1025 
1026 static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
1027                                     struct bnxt_tc_flow_node *flow_node,
1028                                     __le32 *decap_filter_handle)
1029 {
1030         struct ip_tunnel_key *decap_key = &flow->tun_key;
1031         struct bnxt_tc_info *tc_info = bp->tc_info;
1032         struct bnxt_tc_l2_key l2_info = { {0} };
1033         struct bnxt_tc_tunnel_node *decap_node;
1034         struct ip_tunnel_key tun_key = { 0 };
1035         struct bnxt_tc_l2_key *decap_l2_info;
1036         __le32 ref_decap_handle;
1037         int rc;
1038 
1039         /* Check if there's another flow using the same tunnel decap.
1040          * If not, add this tunnel to the table and resolve the other
1041          * tunnel header fileds. Ignore src_port in the tunnel_key,
1042          * since it is not required for decap filters.
1043          */
1044         decap_key->tp_src = 0;
1045         decap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->decap_table,
1046                                              &tc_info->decap_ht_params,
1047                                              decap_key);
1048         if (!decap_node)
1049                 return -ENOMEM;
1050 
1051         flow_node->decap_node = decap_node;
1052 
1053         if (decap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)
1054                 goto done;
1055 
1056         /* Resolve the L2 fields for tunnel decap
1057          * Resolve the route for remote vtep (saddr) of the decap key
1058          * Find it's next-hop mac addrs
1059          */
1060         tun_key.u.ipv4.dst = flow->tun_key.u.ipv4.src;
1061         tun_key.tp_dst = flow->tun_key.tp_dst;
1062         rc = bnxt_tc_resolve_tunnel_hdrs(bp, &tun_key, &l2_info);
1063         if (rc)
1064                 goto put_decap;
1065 
1066         decap_l2_info = &decap_node->l2_info;
1067         /* decap smac is wildcarded */
1068         ether_addr_copy(decap_l2_info->dmac, l2_info.smac);
1069         if (l2_info.num_vlans) {
1070                 decap_l2_info->num_vlans = l2_info.num_vlans;
1071                 decap_l2_info->inner_vlan_tpid = l2_info.inner_vlan_tpid;
1072                 decap_l2_info->inner_vlan_tci = l2_info.inner_vlan_tci;
1073         }
1074         flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS;
1075 
1076         /* For getting a decap_filter_handle we first need to check if
1077          * there are any other decap flows that share the same tunnel L2
1078          * key and if so, pass that flow's decap_filter_handle as the
1079          * ref_decap_handle for this flow.
1080          */
1081         rc = bnxt_tc_get_ref_decap_handle(bp, flow, decap_l2_info, flow_node,
1082                                           &ref_decap_handle);
1083         if (rc)
1084                 goto put_decap;
1085 
1086         /* Issue the hwrm cmd to allocate a decap filter handle */
1087         rc = hwrm_cfa_decap_filter_alloc(bp, flow, decap_l2_info,
1088                                          ref_decap_handle,
1089                                          &decap_node->tunnel_handle);
1090         if (rc)
1091                 goto put_decap_l2;
1092 
1093 done:
1094         *decap_filter_handle = decap_node->tunnel_handle;
1095         return 0;
1096 
1097 put_decap_l2:
1098         bnxt_tc_put_decap_l2_node(bp, flow_node);
1099 put_decap:
1100         bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table,
1101                                 &tc_info->decap_ht_params,
1102                                 flow_node->decap_node);
1103         return rc;
1104 }
1105 
1106 static void bnxt_tc_put_encap_handle(struct bnxt *bp,
1107                                      struct bnxt_tc_tunnel_node *encap_node)
1108 {
1109         __le32 encap_handle = encap_node->tunnel_handle;
1110         struct bnxt_tc_info *tc_info = bp->tc_info;
1111         int rc;
1112 
1113         rc = bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table,
1114                                      &tc_info->encap_ht_params, encap_node);
1115         if (!rc && encap_handle != INVALID_TUNNEL_HANDLE)
1116                 hwrm_cfa_encap_record_free(bp, encap_handle);
1117 }
1118 
1119 /* Lookup the tunnel encap table and check if there's an encap_handle
1120  * alloc'd already.
1121  * If not, query L2 info via a route lookup and issue an encap_record_alloc
1122  * cmd to FW.
1123  */
1124 static int bnxt_tc_get_encap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
1125                                     struct bnxt_tc_flow_node *flow_node,
1126                                     __le32 *encap_handle)
1127 {
1128         struct ip_tunnel_key *encap_key = &flow->actions.tun_encap_key;
1129         struct bnxt_tc_info *tc_info = bp->tc_info;
1130         struct bnxt_tc_tunnel_node *encap_node;
1131         int rc;
1132 
1133         /* Check if there's another flow using the same tunnel encap.
1134          * If not, add this tunnel to the table and resolve the other
1135          * tunnel header fileds
1136          */
1137         encap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->encap_table,
1138                                              &tc_info->encap_ht_params,
1139                                              encap_key);
1140         if (!encap_node)
1141                 return -ENOMEM;
1142 
1143         flow_node->encap_node = encap_node;
1144 
1145         if (encap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)
1146                 goto done;
1147 
1148         rc = bnxt_tc_resolve_tunnel_hdrs(bp, encap_key, &encap_node->l2_info);
1149         if (rc)
1150                 goto put_encap;
1151 
1152         /* Allocate a new tunnel encap record */
1153         rc = hwrm_cfa_encap_record_alloc(bp, encap_key, &encap_node->l2_info,
1154                                          &encap_node->tunnel_handle);
1155         if (rc)
1156                 goto put_encap;
1157 
1158 done:
1159         *encap_handle = encap_node->tunnel_handle;
1160         return 0;
1161 
1162 put_encap:
1163         bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table,
1164                                 &tc_info->encap_ht_params, encap_node);
1165         return rc;
1166 }
1167 
1168 static void bnxt_tc_put_tunnel_handle(struct bnxt *bp,
1169                                       struct bnxt_tc_flow *flow,
1170                                       struct bnxt_tc_flow_node *flow_node)
1171 {
1172         if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
1173                 bnxt_tc_put_decap_handle(bp, flow_node);
1174         else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP)
1175                 bnxt_tc_put_encap_handle(bp, flow_node->encap_node);
1176 }
1177 
1178 static int bnxt_tc_get_tunnel_handle(struct bnxt *bp,
1179                                      struct bnxt_tc_flow *flow,
1180                                      struct bnxt_tc_flow_node *flow_node,
1181                                      __le32 *tunnel_handle)
1182 {
1183         if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
1184                 return bnxt_tc_get_decap_handle(bp, flow, flow_node,
1185                                                 tunnel_handle);
1186         else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP)
1187                 return bnxt_tc_get_encap_handle(bp, flow, flow_node,
1188                                                 tunnel_handle);
1189         else
1190                 return 0;
1191 }
1192 static int __bnxt_tc_del_flow(struct bnxt *bp,
1193                               struct bnxt_tc_flow_node *flow_node)
1194 {
1195         struct bnxt_tc_info *tc_info = bp->tc_info;
1196         int rc;
1197 
1198         /* send HWRM cmd to free the flow-id */
1199         bnxt_hwrm_cfa_flow_free(bp, flow_node);
1200 
1201         mutex_lock(&tc_info->lock);
1202 
1203         /* release references to any tunnel encap/decap nodes */
1204         bnxt_tc_put_tunnel_handle(bp, &flow_node->flow, flow_node);
1205 
1206         /* release reference to l2 node */
1207         bnxt_tc_put_l2_node(bp, flow_node);
1208 
1209         mutex_unlock(&tc_info->lock);
1210 
1211         rc = rhashtable_remove_fast(&tc_info->flow_table, &flow_node->node,
1212                                     tc_info->flow_ht_params);
1213         if (rc)
1214                 netdev_err(bp->dev, "Error: %s: rhashtable_remove_fast rc=%d",
1215                            __func__, rc);
1216 
1217         kfree_rcu(flow_node, rcu);
1218         return 0;
1219 }
1220 
1221 static void bnxt_tc_set_flow_dir(struct bnxt *bp, struct bnxt_tc_flow *flow,
1222                                  u16 src_fid)
1223 {
1224         flow->l2_key.dir = (bp->pf.fw_fid == src_fid) ? BNXT_DIR_RX : BNXT_DIR_TX;
1225 }
1226 
1227 static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow,
1228                                 u16 src_fid)
1229 {
1230         if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
1231                 flow->src_fid = bp->pf.fw_fid;
1232         else
1233                 flow->src_fid = src_fid;
1234 }
1235 
1236 /* Add a new flow or replace an existing flow.
1237  * Notes on locking:
1238  * There are essentially two critical sections here.
1239  * 1. while adding a new flow
1240  *    a) lookup l2-key
1241  *    b) issue HWRM cmd and get flow_handle
1242  *    c) link l2-key with flow
1243  * 2. while deleting a flow
1244  *    a) unlinking l2-key from flow
1245  * A lock is needed to protect these two critical sections.
1246  *
1247  * The hash-tables are already protected by the rhashtable API.
1248  */
1249 static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
1250                             struct flow_cls_offload *tc_flow_cmd)
1251 {
1252         struct bnxt_tc_flow_node *new_node, *old_node;
1253         struct bnxt_tc_info *tc_info = bp->tc_info;
1254         struct bnxt_tc_flow *flow;
1255         __le32 tunnel_handle = 0;
1256         __le16 ref_flow_handle;
1257         int rc;
1258 
1259         /* allocate memory for the new flow and it's node */
1260         new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
1261         if (!new_node) {
1262                 rc = -ENOMEM;
1263                 goto done;
1264         }
1265         new_node->cookie = tc_flow_cmd->cookie;
1266         flow = &new_node->flow;
1267 
1268         rc = bnxt_tc_parse_flow(bp, tc_flow_cmd, flow);
1269         if (rc)
1270                 goto free_node;
1271 
1272         bnxt_tc_set_src_fid(bp, flow, src_fid);
1273         bnxt_tc_set_flow_dir(bp, flow, flow->src_fid);
1274 
1275         if (!bnxt_tc_can_offload(bp, flow)) {
1276                 rc = -EOPNOTSUPP;
1277                 goto free_node;
1278         }
1279 
1280         /* If a flow exists with the same cookie, delete it */
1281         old_node = rhashtable_lookup_fast(&tc_info->flow_table,
1282                                           &tc_flow_cmd->cookie,
1283                                           tc_info->flow_ht_params);
1284         if (old_node)
1285                 __bnxt_tc_del_flow(bp, old_node);
1286 
1287         /* Check if the L2 part of the flow has been offloaded already.
1288          * If so, bump up it's refcnt and get it's reference handle.
1289          */
1290         mutex_lock(&tc_info->lock);
1291         rc = bnxt_tc_get_ref_flow_handle(bp, flow, new_node, &ref_flow_handle);
1292         if (rc)
1293                 goto unlock;
1294 
1295         /* If the flow involves tunnel encap/decap, get tunnel_handle */
1296         rc = bnxt_tc_get_tunnel_handle(bp, flow, new_node, &tunnel_handle);
1297         if (rc)
1298                 goto put_l2;
1299 
1300         /* send HWRM cmd to alloc the flow */
1301         rc = bnxt_hwrm_cfa_flow_alloc(bp, flow, ref_flow_handle,
1302                                       tunnel_handle, new_node);
1303         if (rc)
1304                 goto put_tunnel;
1305 
1306         flow->lastused = jiffies;
1307         spin_lock_init(&flow->stats_lock);
1308         /* add new flow to flow-table */
1309         rc = rhashtable_insert_fast(&tc_info->flow_table, &new_node->node,
1310                                     tc_info->flow_ht_params);
1311         if (rc)
1312                 goto hwrm_flow_free;
1313 
1314         mutex_unlock(&tc_info->lock);
1315         return 0;
1316 
1317 hwrm_flow_free:
1318         bnxt_hwrm_cfa_flow_free(bp, new_node);
1319 put_tunnel:
1320         bnxt_tc_put_tunnel_handle(bp, flow, new_node);
1321 put_l2:
1322         bnxt_tc_put_l2_node(bp, new_node);
1323 unlock:
1324         mutex_unlock(&tc_info->lock);
1325 free_node:
1326         kfree_rcu(new_node, rcu);
1327 done:
1328         netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d",
1329                    __func__, tc_flow_cmd->cookie, rc);
1330         return rc;
1331 }
1332 
1333 static int bnxt_tc_del_flow(struct bnxt *bp,
1334                             struct flow_cls_offload *tc_flow_cmd)
1335 {
1336         struct bnxt_tc_info *tc_info = bp->tc_info;
1337         struct bnxt_tc_flow_node *flow_node;
1338 
1339         flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
1340                                            &tc_flow_cmd->cookie,
1341                                            tc_info->flow_ht_params);
1342         if (!flow_node)
1343                 return -EINVAL;
1344 
1345         return __bnxt_tc_del_flow(bp, flow_node);
1346 }
1347 
1348 static int bnxt_tc_get_flow_stats(struct bnxt *bp,
1349                                   struct flow_cls_offload *tc_flow_cmd)
1350 {
1351         struct bnxt_tc_flow_stats stats, *curr_stats, *prev_stats;
1352         struct bnxt_tc_info *tc_info = bp->tc_info;
1353         struct bnxt_tc_flow_node *flow_node;
1354         struct bnxt_tc_flow *flow;
1355         unsigned long lastused;
1356 
1357         flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
1358                                            &tc_flow_cmd->cookie,
1359                                            tc_info->flow_ht_params);
1360         if (!flow_node)
1361                 return -1;
1362 
1363         flow = &flow_node->flow;
1364         curr_stats = &flow->stats;
1365         prev_stats = &flow->prev_stats;
1366 
1367         spin_lock(&flow->stats_lock);
1368         stats.packets = curr_stats->packets - prev_stats->packets;
1369         stats.bytes = curr_stats->bytes - prev_stats->bytes;
1370         *prev_stats = *curr_stats;
1371         lastused = flow->lastused;
1372         spin_unlock(&flow->stats_lock);
1373 
1374         flow_stats_update(&tc_flow_cmd->stats, stats.bytes, stats.packets,
1375                           lastused);
1376         return 0;
1377 }
1378 
1379 static void bnxt_fill_cfa_stats_req(struct bnxt *bp,
1380                                     struct bnxt_tc_flow_node *flow_node,
1381                                     __le16 *flow_handle, __le32 *flow_id)
1382 {
1383         u16 handle;
1384 
1385         if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) {
1386                 *flow_id = flow_node->flow_id;
1387 
1388                 /* If flow_id is used to fetch flow stats then:
1389                  * 1. lower 12 bits of flow_handle must be set to all 1s.
1390                  * 2. 15th bit of flow_handle must specify the flow
1391                  *    direction (TX/RX).
1392                  */
1393                 if (flow_node->flow.l2_key.dir == BNXT_DIR_RX)
1394                         handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX |
1395                                  CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK;
1396                 else
1397                         handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK;
1398 
1399                 *flow_handle = cpu_to_le16(handle);
1400         } else {
1401                 *flow_handle = flow_node->flow_handle;
1402         }
1403 }
1404 
1405 static int
1406 bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
1407                              struct bnxt_tc_stats_batch stats_batch[])
1408 {
1409         struct hwrm_cfa_flow_stats_input req = { 0 };
1410         struct hwrm_cfa_flow_stats_output *resp;
1411         __le16 *req_flow_handles = &req.flow_handle_0;
1412         __le32 *req_flow_ids = &req.flow_id_0;
1413         int rc, i;
1414 
1415         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1);
1416         req.num_flows = cpu_to_le16(num_flows);
1417         for (i = 0; i < num_flows; i++) {
1418                 struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
1419 
1420                 bnxt_fill_cfa_stats_req(bp, flow_node,
1421                                         &req_flow_handles[i], &req_flow_ids[i]);
1422         }
1423 
1424         mutex_lock(&bp->hwrm_cmd_lock);
1425         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1426         if (!rc) {
1427                 __le64 *resp_packets;
1428                 __le64 *resp_bytes;
1429 
1430                 resp = bnxt_get_hwrm_resp_addr(bp, &req);
1431                 resp_packets = &resp->packet_0;
1432                 resp_bytes = &resp->byte_0;
1433 
1434                 for (i = 0; i < num_flows; i++) {
1435                         stats_batch[i].hw_stats.packets =
1436                                                 le64_to_cpu(resp_packets[i]);
1437                         stats_batch[i].hw_stats.bytes =
1438                                                 le64_to_cpu(resp_bytes[i]);
1439                 }
1440         } else {
1441                 netdev_info(bp->dev, "error rc=%d", rc);
1442         }
1443         mutex_unlock(&bp->hwrm_cmd_lock);
1444 
1445         return rc;
1446 }
1447 
1448 /* Add val to accum while handling a possible wraparound
1449  * of val. Eventhough val is of type u64, its actual width
1450  * is denoted by mask and will wrap-around beyond that width.
1451  */
1452 static void accumulate_val(u64 *accum, u64 val, u64 mask)
1453 {
1454 #define low_bits(x, mask)               ((x) & (mask))
1455 #define high_bits(x, mask)              ((x) & ~(mask))
1456         bool wrapped = val < low_bits(*accum, mask);
1457 
1458         *accum = high_bits(*accum, mask) + val;
1459         if (wrapped)
1460                 *accum += (mask + 1);
1461 }
1462 
1463 /* The HW counters' width is much less than 64bits.
1464  * Handle possible wrap-around while updating the stat counters
1465  */
1466 static void bnxt_flow_stats_accum(struct bnxt_tc_info *tc_info,
1467                                   struct bnxt_tc_flow_stats *acc_stats,
1468                                   struct bnxt_tc_flow_stats *hw_stats)
1469 {
1470         accumulate_val(&acc_stats->bytes, hw_stats->bytes, tc_info->bytes_mask);
1471         accumulate_val(&acc_stats->packets, hw_stats->packets,
1472                        tc_info->packets_mask);
1473 }
1474 
1475 static int
1476 bnxt_tc_flow_stats_batch_update(struct bnxt *bp, int num_flows,
1477                                 struct bnxt_tc_stats_batch stats_batch[])
1478 {
1479         struct bnxt_tc_info *tc_info = bp->tc_info;
1480         int rc, i;
1481 
1482         rc = bnxt_hwrm_cfa_flow_stats_get(bp, num_flows, stats_batch);
1483         if (rc)
1484                 return rc;
1485 
1486         for (i = 0; i < num_flows; i++) {
1487                 struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
1488                 struct bnxt_tc_flow *flow = &flow_node->flow;
1489 
1490                 spin_lock(&flow->stats_lock);
1491                 bnxt_flow_stats_accum(tc_info, &flow->stats,
1492                                       &stats_batch[i].hw_stats);
1493                 if (flow->stats.packets != flow->prev_stats.packets)
1494                         flow->lastused = jiffies;
1495                 spin_unlock(&flow->stats_lock);
1496         }
1497 
1498         return 0;
1499 }
1500 
1501 static int
1502 bnxt_tc_flow_stats_batch_prep(struct bnxt *bp,
1503                               struct bnxt_tc_stats_batch stats_batch[],
1504                               int *num_flows)
1505 {
1506         struct bnxt_tc_info *tc_info = bp->tc_info;
1507         struct rhashtable_iter *iter = &tc_info->iter;
1508         void *flow_node;
1509         int rc, i;
1510 
1511         rhashtable_walk_start(iter);
1512 
1513         rc = 0;
1514         for (i = 0; i < BNXT_FLOW_STATS_BATCH_MAX; i++) {
1515                 flow_node = rhashtable_walk_next(iter);
1516                 if (IS_ERR(flow_node)) {
1517                         i = 0;
1518                         if (PTR_ERR(flow_node) == -EAGAIN) {
1519                                 continue;
1520                         } else {
1521                                 rc = PTR_ERR(flow_node);
1522                                 goto done;
1523                         }
1524                 }
1525 
1526                 /* No more flows */
1527                 if (!flow_node)
1528                         goto done;
1529 
1530                 stats_batch[i].flow_node = flow_node;
1531         }
1532 done:
1533         rhashtable_walk_stop(iter);
1534         *num_flows = i;
1535         return rc;
1536 }
1537 
1538 void bnxt_tc_flow_stats_work(struct bnxt *bp)
1539 {
1540         struct bnxt_tc_info *tc_info = bp->tc_info;
1541         int num_flows, rc;
1542 
1543         num_flows = atomic_read(&tc_info->flow_table.nelems);
1544         if (!num_flows)
1545                 return;
1546 
1547         rhashtable_walk_enter(&tc_info->flow_table, &tc_info->iter);
1548 
1549         for (;;) {
1550                 rc = bnxt_tc_flow_stats_batch_prep(bp, tc_info->stats_batch,
1551                                                    &num_flows);
1552                 if (rc) {
1553                         if (rc == -EAGAIN)
1554                                 continue;
1555                         break;
1556                 }
1557 
1558                 if (!num_flows)
1559                         break;
1560 
1561                 bnxt_tc_flow_stats_batch_update(bp, num_flows,
1562                                                 tc_info->stats_batch);
1563         }
1564 
1565         rhashtable_walk_exit(&tc_info->iter);
1566 }
1567 
1568 int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
1569                          struct flow_cls_offload *cls_flower)
1570 {
1571         switch (cls_flower->command) {
1572         case FLOW_CLS_REPLACE:
1573                 return bnxt_tc_add_flow(bp, src_fid, cls_flower);
1574         case FLOW_CLS_DESTROY:
1575                 return bnxt_tc_del_flow(bp, cls_flower);
1576         case FLOW_CLS_STATS:
1577                 return bnxt_tc_get_flow_stats(bp, cls_flower);
1578         default:
1579                 return -EOPNOTSUPP;
1580         }
1581 }
1582 
1583 static const struct rhashtable_params bnxt_tc_flow_ht_params = {
1584         .head_offset = offsetof(struct bnxt_tc_flow_node, node),
1585         .key_offset = offsetof(struct bnxt_tc_flow_node, cookie),
1586         .key_len = sizeof(((struct bnxt_tc_flow_node *)0)->cookie),
1587         .automatic_shrinking = true
1588 };
1589 
1590 static const struct rhashtable_params bnxt_tc_l2_ht_params = {
1591         .head_offset = offsetof(struct bnxt_tc_l2_node, node),
1592         .key_offset = offsetof(struct bnxt_tc_l2_node, key),
1593         .key_len = BNXT_TC_L2_KEY_LEN,
1594         .automatic_shrinking = true
1595 };
1596 
1597 static const struct rhashtable_params bnxt_tc_decap_l2_ht_params = {
1598         .head_offset = offsetof(struct bnxt_tc_l2_node, node),
1599         .key_offset = offsetof(struct bnxt_tc_l2_node, key),
1600         .key_len = BNXT_TC_L2_KEY_LEN,
1601         .automatic_shrinking = true
1602 };
1603 
1604 static const struct rhashtable_params bnxt_tc_tunnel_ht_params = {
1605         .head_offset = offsetof(struct bnxt_tc_tunnel_node, node),
1606         .key_offset = offsetof(struct bnxt_tc_tunnel_node, key),
1607         .key_len = sizeof(struct ip_tunnel_key),
1608         .automatic_shrinking = true
1609 };
1610 
1611 /* convert counter width in bits to a mask */
1612 #define mask(width)             ((u64)~0 >> (64 - (width)))
1613 
1614 int bnxt_init_tc(struct bnxt *bp)
1615 {
1616         struct bnxt_tc_info *tc_info;
1617         int rc;
1618 
1619         if (bp->hwrm_spec_code < 0x10803) {
1620                 netdev_warn(bp->dev,
1621                             "Firmware does not support TC flower offload.\n");
1622                 return -ENOTSUPP;
1623         }
1624 
1625         tc_info = kzalloc(sizeof(*tc_info), GFP_KERNEL);
1626         if (!tc_info)
1627                 return -ENOMEM;
1628         mutex_init(&tc_info->lock);
1629 
1630         /* Counter widths are programmed by FW */
1631         tc_info->bytes_mask = mask(36);
1632         tc_info->packets_mask = mask(28);
1633 
1634         tc_info->flow_ht_params = bnxt_tc_flow_ht_params;
1635         rc = rhashtable_init(&tc_info->flow_table, &tc_info->flow_ht_params);
1636         if (rc)
1637                 goto free_tc_info;
1638 
1639         tc_info->l2_ht_params = bnxt_tc_l2_ht_params;
1640         rc = rhashtable_init(&tc_info->l2_table, &tc_info->l2_ht_params);
1641         if (rc)
1642                 goto destroy_flow_table;
1643 
1644         tc_info->decap_l2_ht_params = bnxt_tc_decap_l2_ht_params;
1645         rc = rhashtable_init(&tc_info->decap_l2_table,
1646                              &tc_info->decap_l2_ht_params);
1647         if (rc)
1648                 goto destroy_l2_table;
1649 
1650         tc_info->decap_ht_params = bnxt_tc_tunnel_ht_params;
1651         rc = rhashtable_init(&tc_info->decap_table,
1652                              &tc_info->decap_ht_params);
1653         if (rc)
1654                 goto destroy_decap_l2_table;
1655 
1656         tc_info->encap_ht_params = bnxt_tc_tunnel_ht_params;
1657         rc = rhashtable_init(&tc_info->encap_table,
1658                              &tc_info->encap_ht_params);
1659         if (rc)
1660                 goto destroy_decap_table;
1661 
1662         tc_info->enabled = true;
1663         bp->dev->hw_features |= NETIF_F_HW_TC;
1664         bp->dev->features |= NETIF_F_HW_TC;
1665         bp->tc_info = tc_info;
1666         return 0;
1667 
1668 destroy_decap_table:
1669         rhashtable_destroy(&tc_info->decap_table);
1670 destroy_decap_l2_table:
1671         rhashtable_destroy(&tc_info->decap_l2_table);
1672 destroy_l2_table:
1673         rhashtable_destroy(&tc_info->l2_table);
1674 destroy_flow_table:
1675         rhashtable_destroy(&tc_info->flow_table);
1676 free_tc_info:
1677         kfree(tc_info);
1678         return rc;
1679 }
1680 
1681 void bnxt_shutdown_tc(struct bnxt *bp)
1682 {
1683         struct bnxt_tc_info *tc_info = bp->tc_info;
1684 
1685         if (!bnxt_tc_flower_enabled(bp))
1686                 return;
1687 
1688         rhashtable_destroy(&tc_info->flow_table);
1689         rhashtable_destroy(&tc_info->l2_table);
1690         rhashtable_destroy(&tc_info->decap_l2_table);
1691         rhashtable_destroy(&tc_info->decap_table);
1692         rhashtable_destroy(&tc_info->encap_table);
1693         kfree(tc_info);
1694         bp->tc_info = NULL;
1695 }

/* [<][>][^][v][top][bottom][index][help] */