root/drivers/net/ethernet/netronome/nfp/flower/cmsg.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. nfp_flower_cmsg_get_hdr
  2. nfp_flower_cmsg_alloc
  3. nfp_flower_cmsg_mac_repr_start
  4. nfp_flower_cmsg_mac_repr_add
  5. nfp_flower_cmsg_portmod
  6. nfp_flower_cmsg_portreify
  7. nfp_flower_process_mtu_ack
  8. nfp_flower_cmsg_portmod_rx
  9. nfp_flower_cmsg_portreify_rx
  10. nfp_flower_cmsg_merge_hint_rx
  11. nfp_flower_cmsg_process_one_rx
  12. nfp_flower_cmsg_process_rx
  13. nfp_flower_queue_ctl_msg
  14. nfp_flower_cmsg_rx

   1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
   2 /* Copyright (C) 2015-2018 Netronome Systems, Inc. */
   3 
   4 #include <linux/bitfield.h>
   5 #include <linux/netdevice.h>
   6 #include <linux/skbuff.h>
   7 #include <linux/workqueue.h>
   8 #include <net/dst_metadata.h>
   9 
  10 #include "main.h"
  11 #include "../nfp_net.h"
  12 #include "../nfp_net_repr.h"
  13 #include "./cmsg.h"
  14 
  15 static struct nfp_flower_cmsg_hdr *
  16 nfp_flower_cmsg_get_hdr(struct sk_buff *skb)
  17 {
  18         return (struct nfp_flower_cmsg_hdr *)skb->data;
  19 }
  20 
  21 struct sk_buff *
  22 nfp_flower_cmsg_alloc(struct nfp_app *app, unsigned int size,
  23                       enum nfp_flower_cmsg_type_port type, gfp_t flag)
  24 {
  25         struct nfp_flower_cmsg_hdr *ch;
  26         struct sk_buff *skb;
  27 
  28         size += NFP_FLOWER_CMSG_HLEN;
  29 
  30         skb = nfp_app_ctrl_msg_alloc(app, size, flag);
  31         if (!skb)
  32                 return NULL;
  33 
  34         ch = nfp_flower_cmsg_get_hdr(skb);
  35         ch->pad = 0;
  36         ch->version = NFP_FLOWER_CMSG_VER1;
  37         ch->type = type;
  38         skb_put(skb, size);
  39 
  40         return skb;
  41 }
  42 
  43 struct sk_buff *
  44 nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports)
  45 {
  46         struct nfp_flower_cmsg_mac_repr *msg;
  47         struct sk_buff *skb;
  48 
  49         skb = nfp_flower_cmsg_alloc(app, struct_size(msg, ports, num_ports),
  50                                     NFP_FLOWER_CMSG_TYPE_MAC_REPR, GFP_KERNEL);
  51         if (!skb)
  52                 return NULL;
  53 
  54         msg = nfp_flower_cmsg_get_data(skb);
  55         memset(msg->reserved, 0, sizeof(msg->reserved));
  56         msg->num_ports = num_ports;
  57 
  58         return skb;
  59 }
  60 
  61 void
  62 nfp_flower_cmsg_mac_repr_add(struct sk_buff *skb, unsigned int idx,
  63                              unsigned int nbi, unsigned int nbi_port,
  64                              unsigned int phys_port)
  65 {
  66         struct nfp_flower_cmsg_mac_repr *msg;
  67 
  68         msg = nfp_flower_cmsg_get_data(skb);
  69         msg->ports[idx].idx = idx;
  70         msg->ports[idx].info = nbi & NFP_FLOWER_CMSG_MAC_REPR_NBI;
  71         msg->ports[idx].nbi_port = nbi_port;
  72         msg->ports[idx].phys_port = phys_port;
  73 }
  74 
  75 int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok,
  76                             unsigned int mtu, bool mtu_only)
  77 {
  78         struct nfp_flower_cmsg_portmod *msg;
  79         struct sk_buff *skb;
  80 
  81         skb = nfp_flower_cmsg_alloc(repr->app, sizeof(*msg),
  82                                     NFP_FLOWER_CMSG_TYPE_PORT_MOD, GFP_KERNEL);
  83         if (!skb)
  84                 return -ENOMEM;
  85 
  86         msg = nfp_flower_cmsg_get_data(skb);
  87         msg->portnum = cpu_to_be32(repr->dst->u.port_info.port_id);
  88         msg->reserved = 0;
  89         msg->info = carrier_ok;
  90 
  91         if (mtu_only)
  92                 msg->info |= NFP_FLOWER_CMSG_PORTMOD_MTU_CHANGE_ONLY;
  93 
  94         msg->mtu = cpu_to_be16(mtu);
  95 
  96         nfp_ctrl_tx(repr->app->ctrl, skb);
  97 
  98         return 0;
  99 }
 100 
 101 int nfp_flower_cmsg_portreify(struct nfp_repr *repr, bool exists)
 102 {
 103         struct nfp_flower_cmsg_portreify *msg;
 104         struct sk_buff *skb;
 105 
 106         skb = nfp_flower_cmsg_alloc(repr->app, sizeof(*msg),
 107                                     NFP_FLOWER_CMSG_TYPE_PORT_REIFY,
 108                                     GFP_KERNEL);
 109         if (!skb)
 110                 return -ENOMEM;
 111 
 112         msg = nfp_flower_cmsg_get_data(skb);
 113         msg->portnum = cpu_to_be32(repr->dst->u.port_info.port_id);
 114         msg->reserved = 0;
 115         msg->info = cpu_to_be16(exists);
 116 
 117         nfp_ctrl_tx(repr->app->ctrl, skb);
 118 
 119         return 0;
 120 }
 121 
 122 static bool
 123 nfp_flower_process_mtu_ack(struct nfp_app *app, struct sk_buff *skb)
 124 {
 125         struct nfp_flower_priv *app_priv = app->priv;
 126         struct nfp_flower_cmsg_portmod *msg;
 127 
 128         msg = nfp_flower_cmsg_get_data(skb);
 129 
 130         if (!(msg->info & NFP_FLOWER_CMSG_PORTMOD_MTU_CHANGE_ONLY))
 131                 return false;
 132 
 133         spin_lock_bh(&app_priv->mtu_conf.lock);
 134         if (!app_priv->mtu_conf.requested_val ||
 135             app_priv->mtu_conf.portnum != be32_to_cpu(msg->portnum) ||
 136             be16_to_cpu(msg->mtu) != app_priv->mtu_conf.requested_val) {
 137                 /* Not an ack for requested MTU change. */
 138                 spin_unlock_bh(&app_priv->mtu_conf.lock);
 139                 return false;
 140         }
 141 
 142         app_priv->mtu_conf.ack = true;
 143         app_priv->mtu_conf.requested_val = 0;
 144         wake_up(&app_priv->mtu_conf.wait_q);
 145         spin_unlock_bh(&app_priv->mtu_conf.lock);
 146 
 147         return true;
 148 }
 149 
 150 static void
 151 nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb)
 152 {
 153         struct nfp_flower_cmsg_portmod *msg;
 154         struct net_device *netdev;
 155         bool link;
 156 
 157         msg = nfp_flower_cmsg_get_data(skb);
 158         link = msg->info & NFP_FLOWER_CMSG_PORTMOD_INFO_LINK;
 159 
 160         rtnl_lock();
 161         rcu_read_lock();
 162         netdev = nfp_app_dev_get(app, be32_to_cpu(msg->portnum), NULL);
 163         rcu_read_unlock();
 164         if (!netdev) {
 165                 nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n",
 166                                      be32_to_cpu(msg->portnum));
 167                 rtnl_unlock();
 168                 return;
 169         }
 170 
 171         if (link) {
 172                 u16 mtu = be16_to_cpu(msg->mtu);
 173 
 174                 netif_carrier_on(netdev);
 175 
 176                 /* An MTU of 0 from the firmware should be ignored */
 177                 if (mtu)
 178                         dev_set_mtu(netdev, mtu);
 179         } else {
 180                 netif_carrier_off(netdev);
 181         }
 182         rtnl_unlock();
 183 }
 184 
 185 static void
 186 nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb)
 187 {
 188         struct nfp_flower_priv *priv = app->priv;
 189         struct nfp_flower_cmsg_portreify *msg;
 190         bool exists;
 191 
 192         msg = nfp_flower_cmsg_get_data(skb);
 193 
 194         rcu_read_lock();
 195         exists = !!nfp_app_dev_get(app, be32_to_cpu(msg->portnum), NULL);
 196         rcu_read_unlock();
 197         if (!exists) {
 198                 nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n",
 199                                      be32_to_cpu(msg->portnum));
 200                 return;
 201         }
 202 
 203         atomic_inc(&priv->reify_replies);
 204         wake_up(&priv->reify_wait_queue);
 205 }
 206 
 207 static void
 208 nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb)
 209 {
 210         unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
 211         struct nfp_flower_cmsg_merge_hint *msg;
 212         struct nfp_fl_payload *sub_flows[2];
 213         int err, i, flow_cnt;
 214 
 215         msg = nfp_flower_cmsg_get_data(skb);
 216         /* msg->count starts at 0 and always assumes at least 1 entry. */
 217         flow_cnt = msg->count + 1;
 218 
 219         if (msg_len < struct_size(msg, flow, flow_cnt)) {
 220                 nfp_flower_cmsg_warn(app, "Merge hint ctrl msg too short - %d bytes but expect %zd\n",
 221                                      msg_len, struct_size(msg, flow, flow_cnt));
 222                 return;
 223         }
 224 
 225         if (flow_cnt != 2) {
 226                 nfp_flower_cmsg_warn(app, "Merge hint contains %d flows - two are expected\n",
 227                                      flow_cnt);
 228                 return;
 229         }
 230 
 231         rtnl_lock();
 232         for (i = 0; i < flow_cnt; i++) {
 233                 u32 ctx = be32_to_cpu(msg->flow[i].host_ctx);
 234 
 235                 sub_flows[i] = nfp_flower_get_fl_payload_from_ctx(app, ctx);
 236                 if (!sub_flows[i]) {
 237                         nfp_flower_cmsg_warn(app, "Invalid flow in merge hint\n");
 238                         goto err_rtnl_unlock;
 239                 }
 240         }
 241 
 242         err = nfp_flower_merge_offloaded_flows(app, sub_flows[0], sub_flows[1]);
 243         /* Only warn on memory fail. Hint veto will not break functionality. */
 244         if (err == -ENOMEM)
 245                 nfp_flower_cmsg_warn(app, "Flow merge memory fail.\n");
 246 
 247 err_rtnl_unlock:
 248         rtnl_unlock();
 249 }
 250 
 251 static void
 252 nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
 253 {
 254         struct nfp_flower_priv *app_priv = app->priv;
 255         struct nfp_flower_cmsg_hdr *cmsg_hdr;
 256         enum nfp_flower_cmsg_type_port type;
 257         bool skb_stored = false;
 258 
 259         cmsg_hdr = nfp_flower_cmsg_get_hdr(skb);
 260 
 261         type = cmsg_hdr->type;
 262         switch (type) {
 263         case NFP_FLOWER_CMSG_TYPE_PORT_MOD:
 264                 nfp_flower_cmsg_portmod_rx(app, skb);
 265                 break;
 266         case NFP_FLOWER_CMSG_TYPE_MERGE_HINT:
 267                 if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE) {
 268                         nfp_flower_cmsg_merge_hint_rx(app, skb);
 269                         break;
 270                 }
 271                 goto err_default;
 272         case NFP_FLOWER_CMSG_TYPE_NO_NEIGH:
 273                 nfp_tunnel_request_route(app, skb);
 274                 break;
 275         case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS:
 276                 nfp_tunnel_keep_alive(app, skb);
 277                 break;
 278         case NFP_FLOWER_CMSG_TYPE_QOS_STATS:
 279                 nfp_flower_stats_rlim_reply(app, skb);
 280                 break;
 281         case NFP_FLOWER_CMSG_TYPE_LAG_CONFIG:
 282                 if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
 283                         skb_stored = nfp_flower_lag_unprocessed_msg(app, skb);
 284                         break;
 285                 }
 286                 /* fall through */
 287         default:
 288 err_default:
 289                 nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n",
 290                                      type);
 291                 goto out;
 292         }
 293 
 294         if (!skb_stored)
 295                 dev_consume_skb_any(skb);
 296         return;
 297 out:
 298         dev_kfree_skb_any(skb);
 299 }
 300 
 301 void nfp_flower_cmsg_process_rx(struct work_struct *work)
 302 {
 303         struct sk_buff_head cmsg_joined;
 304         struct nfp_flower_priv *priv;
 305         struct sk_buff *skb;
 306 
 307         priv = container_of(work, struct nfp_flower_priv, cmsg_work);
 308         skb_queue_head_init(&cmsg_joined);
 309 
 310         spin_lock_bh(&priv->cmsg_skbs_high.lock);
 311         skb_queue_splice_tail_init(&priv->cmsg_skbs_high, &cmsg_joined);
 312         spin_unlock_bh(&priv->cmsg_skbs_high.lock);
 313 
 314         spin_lock_bh(&priv->cmsg_skbs_low.lock);
 315         skb_queue_splice_tail_init(&priv->cmsg_skbs_low, &cmsg_joined);
 316         spin_unlock_bh(&priv->cmsg_skbs_low.lock);
 317 
 318         while ((skb = __skb_dequeue(&cmsg_joined)))
 319                 nfp_flower_cmsg_process_one_rx(priv->app, skb);
 320 }
 321 
 322 static void
 323 nfp_flower_queue_ctl_msg(struct nfp_app *app, struct sk_buff *skb, int type)
 324 {
 325         struct nfp_flower_priv *priv = app->priv;
 326         struct sk_buff_head *skb_head;
 327 
 328         if (type == NFP_FLOWER_CMSG_TYPE_PORT_MOD)
 329                 skb_head = &priv->cmsg_skbs_high;
 330         else
 331                 skb_head = &priv->cmsg_skbs_low;
 332 
 333         if (skb_queue_len(skb_head) >= NFP_FLOWER_WORKQ_MAX_SKBS) {
 334                 nfp_flower_cmsg_warn(app, "Dropping queued control messages\n");
 335                 dev_kfree_skb_any(skb);
 336                 return;
 337         }
 338 
 339         skb_queue_tail(skb_head, skb);
 340         schedule_work(&priv->cmsg_work);
 341 }
 342 
 343 void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
 344 {
 345         struct nfp_flower_cmsg_hdr *cmsg_hdr;
 346 
 347         cmsg_hdr = nfp_flower_cmsg_get_hdr(skb);
 348 
 349         if (unlikely(cmsg_hdr->version != NFP_FLOWER_CMSG_VER1)) {
 350                 nfp_flower_cmsg_warn(app, "Cannot handle repr control version %u\n",
 351                                      cmsg_hdr->version);
 352                 dev_kfree_skb_any(skb);
 353                 return;
 354         }
 355 
 356         if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_FLOW_STATS) {
 357                 /* We need to deal with stats updates from HW asap */
 358                 nfp_flower_rx_flow_stats(app, skb);
 359                 dev_consume_skb_any(skb);
 360         } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_PORT_MOD &&
 361                    nfp_flower_process_mtu_ack(app, skb)) {
 362                 /* Handle MTU acks outside wq to prevent RTNL conflict. */
 363                 dev_consume_skb_any(skb);
 364         } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH) {
 365                 /* Acks from the NFP that the route is added - ignore. */
 366                 dev_consume_skb_any(skb);
 367         } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY) {
 368                 /* Handle REIFY acks outside wq to prevent RTNL conflict. */
 369                 nfp_flower_cmsg_portreify_rx(app, skb);
 370                 dev_consume_skb_any(skb);
 371         } else {
 372                 nfp_flower_queue_ctl_msg(app, skb, cmsg_hdr->type);
 373         }
 374 }

/* [<][>][^][v][top][bottom][index][help] */