root/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. nfp_fl_get_next_pkt_number
  2. nfp_fl_increment_version
  3. nfp_fl_lag_group_create
  4. nfp_fl_lag_find_group_for_master_with_lag
  5. nfp_flower_lag_populate_pre_action
  6. nfp_flower_lag_get_output_id
  7. nfp_fl_lag_config_group
  8. nfp_fl_lag_do_work
  9. nfp_fl_lag_put_unprocessed
  10. nfp_fl_send_unprocessed
  11. nfp_flower_lag_unprocessed_msg
  12. nfp_fl_lag_schedule_group_remove
  13. nfp_fl_lag_schedule_group_delete
  14. nfp_fl_lag_changeupper_event
  15. nfp_fl_lag_changels_event
  16. nfp_flower_lag_netdev_event
  17. nfp_flower_lag_reset
  18. nfp_flower_lag_init
  19. nfp_flower_lag_cleanup

   1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
   2 /* Copyright (C) 2018 Netronome Systems, Inc. */
   3 
   4 #include "main.h"
   5 
   6 /* LAG group config flags. */
   7 #define NFP_FL_LAG_LAST                 BIT(1)
   8 #define NFP_FL_LAG_FIRST                BIT(2)
   9 #define NFP_FL_LAG_DATA                 BIT(3)
  10 #define NFP_FL_LAG_XON                  BIT(4)
  11 #define NFP_FL_LAG_SYNC                 BIT(5)
  12 #define NFP_FL_LAG_SWITCH               BIT(6)
  13 #define NFP_FL_LAG_RESET                BIT(7)
  14 
  15 /* LAG port state flags. */
  16 #define NFP_PORT_LAG_LINK_UP            BIT(0)
  17 #define NFP_PORT_LAG_TX_ENABLED         BIT(1)
  18 #define NFP_PORT_LAG_CHANGED            BIT(2)
  19 
  20 enum nfp_fl_lag_batch {
  21         NFP_FL_LAG_BATCH_FIRST,
  22         NFP_FL_LAG_BATCH_MEMBER,
  23         NFP_FL_LAG_BATCH_FINISHED
  24 };
  25 
  26 /**
  27  * struct nfp_flower_cmsg_lag_config - control message payload for LAG config
  28  * @ctrl_flags: Configuration flags
  29  * @reserved:   Reserved for future use
  30  * @ttl:        Time to live of packet - host always sets to 0xff
  31  * @pkt_number: Config message packet number - increment for each message
  32  * @batch_ver:  Batch version of messages - increment for each batch of messages
  33  * @group_id:   Group ID applicable
  34  * @group_inst: Group instance number - increment when group is reused
  35  * @members:    Array of 32-bit words listing all active group members
  36  */
  37 struct nfp_flower_cmsg_lag_config {
  38         u8 ctrl_flags;
  39         u8 reserved[2];
  40         u8 ttl;
  41         __be32 pkt_number;
  42         __be32 batch_ver;
  43         __be32 group_id;
  44         __be32 group_inst;
  45         __be32 members[];
  46 };
  47 
  48 /**
  49  * struct nfp_fl_lag_group - list entry for each LAG group
  50  * @group_id:           Assigned group ID for host/kernel sync
  51  * @group_inst:         Group instance in case of ID reuse
  52  * @list:               List entry
  53  * @master_ndev:        Group master Netdev
  54  * @dirty:              Marked if the group needs synced to HW
  55  * @offloaded:          Marked if the group is currently offloaded to NIC
  56  * @to_remove:          Marked if the group should be removed from NIC
  57  * @to_destroy:         Marked if the group should be removed from driver
  58  * @slave_cnt:          Number of slaves in group
  59  */
  60 struct nfp_fl_lag_group {
  61         unsigned int group_id;
  62         u8 group_inst;
  63         struct list_head list;
  64         struct net_device *master_ndev;
  65         bool dirty;
  66         bool offloaded;
  67         bool to_remove;
  68         bool to_destroy;
  69         unsigned int slave_cnt;
  70 };
  71 
  72 #define NFP_FL_LAG_PKT_NUMBER_MASK      GENMASK(30, 0)
  73 #define NFP_FL_LAG_VERSION_MASK         GENMASK(22, 0)
  74 #define NFP_FL_LAG_HOST_TTL             0xff
  75 
  76 /* Use this ID with zero members to ack a batch config */
  77 #define NFP_FL_LAG_SYNC_ID              0
  78 #define NFP_FL_LAG_GROUP_MIN            1 /* ID 0 reserved */
  79 #define NFP_FL_LAG_GROUP_MAX            32 /* IDs 1 to 31 are valid */
  80 
  81 /* wait for more config */
  82 #define NFP_FL_LAG_DELAY                (msecs_to_jiffies(2))
  83 
  84 #define NFP_FL_LAG_RETRANS_LIMIT        100 /* max retrans cmsgs to store */
  85 
  86 static unsigned int nfp_fl_get_next_pkt_number(struct nfp_fl_lag *lag)
  87 {
  88         lag->pkt_num++;
  89         lag->pkt_num &= NFP_FL_LAG_PKT_NUMBER_MASK;
  90 
  91         return lag->pkt_num;
  92 }
  93 
  94 static void nfp_fl_increment_version(struct nfp_fl_lag *lag)
  95 {
  96         /* LSB is not considered by firmware so add 2 for each increment. */
  97         lag->batch_ver += 2;
  98         lag->batch_ver &= NFP_FL_LAG_VERSION_MASK;
  99 
 100         /* Zero is reserved by firmware. */
 101         if (!lag->batch_ver)
 102                 lag->batch_ver += 2;
 103 }
 104 
 105 static struct nfp_fl_lag_group *
 106 nfp_fl_lag_group_create(struct nfp_fl_lag *lag, struct net_device *master)
 107 {
 108         struct nfp_fl_lag_group *group;
 109         struct nfp_flower_priv *priv;
 110         int id;
 111 
 112         priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
 113 
 114         id = ida_simple_get(&lag->ida_handle, NFP_FL_LAG_GROUP_MIN,
 115                             NFP_FL_LAG_GROUP_MAX, GFP_KERNEL);
 116         if (id < 0) {
 117                 nfp_flower_cmsg_warn(priv->app,
 118                                      "No more bonding groups available\n");
 119                 return ERR_PTR(id);
 120         }
 121 
 122         group = kmalloc(sizeof(*group), GFP_KERNEL);
 123         if (!group) {
 124                 ida_simple_remove(&lag->ida_handle, id);
 125                 return ERR_PTR(-ENOMEM);
 126         }
 127 
 128         group->group_id = id;
 129         group->master_ndev = master;
 130         group->dirty = true;
 131         group->offloaded = false;
 132         group->to_remove = false;
 133         group->to_destroy = false;
 134         group->slave_cnt = 0;
 135         group->group_inst = ++lag->global_inst;
 136         list_add_tail(&group->list, &lag->group_list);
 137 
 138         return group;
 139 }
 140 
 141 static struct nfp_fl_lag_group *
 142 nfp_fl_lag_find_group_for_master_with_lag(struct nfp_fl_lag *lag,
 143                                           struct net_device *master)
 144 {
 145         struct nfp_fl_lag_group *entry;
 146 
 147         if (!master)
 148                 return NULL;
 149 
 150         list_for_each_entry(entry, &lag->group_list, list)
 151                 if (entry->master_ndev == master)
 152                         return entry;
 153 
 154         return NULL;
 155 }
 156 
 157 int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
 158                                        struct net_device *master,
 159                                        struct nfp_fl_pre_lag *pre_act,
 160                                        struct netlink_ext_ack *extack)
 161 {
 162         struct nfp_flower_priv *priv = app->priv;
 163         struct nfp_fl_lag_group *group = NULL;
 164         __be32 temp_vers;
 165 
 166         mutex_lock(&priv->nfp_lag.lock);
 167         group = nfp_fl_lag_find_group_for_master_with_lag(&priv->nfp_lag,
 168                                                           master);
 169         if (!group) {
 170                 mutex_unlock(&priv->nfp_lag.lock);
 171                 NL_SET_ERR_MSG_MOD(extack, "invalid entry: group does not exist for LAG action");
 172                 return -ENOENT;
 173         }
 174 
 175         pre_act->group_id = cpu_to_be16(group->group_id);
 176         temp_vers = cpu_to_be32(priv->nfp_lag.batch_ver <<
 177                                 NFP_FL_PRE_LAG_VER_OFF);
 178         memcpy(pre_act->lag_version, &temp_vers, 3);
 179         pre_act->instance = group->group_inst;
 180         mutex_unlock(&priv->nfp_lag.lock);
 181 
 182         return 0;
 183 }
 184 
 185 int nfp_flower_lag_get_output_id(struct nfp_app *app, struct net_device *master)
 186 {
 187         struct nfp_flower_priv *priv = app->priv;
 188         struct nfp_fl_lag_group *group = NULL;
 189         int group_id = -ENOENT;
 190 
 191         mutex_lock(&priv->nfp_lag.lock);
 192         group = nfp_fl_lag_find_group_for_master_with_lag(&priv->nfp_lag,
 193                                                           master);
 194         if (group)
 195                 group_id = group->group_id;
 196         mutex_unlock(&priv->nfp_lag.lock);
 197 
 198         return group_id;
 199 }
 200 
 201 static int
 202 nfp_fl_lag_config_group(struct nfp_fl_lag *lag, struct nfp_fl_lag_group *group,
 203                         struct net_device **active_members,
 204                         unsigned int member_cnt, enum nfp_fl_lag_batch *batch)
 205 {
 206         struct nfp_flower_cmsg_lag_config *cmsg_payload;
 207         struct nfp_flower_priv *priv;
 208         unsigned long int flags;
 209         unsigned int size, i;
 210         struct sk_buff *skb;
 211 
 212         priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
 213         size = sizeof(*cmsg_payload) + sizeof(__be32) * member_cnt;
 214         skb = nfp_flower_cmsg_alloc(priv->app, size,
 215                                     NFP_FLOWER_CMSG_TYPE_LAG_CONFIG,
 216                                     GFP_KERNEL);
 217         if (!skb)
 218                 return -ENOMEM;
 219 
 220         cmsg_payload = nfp_flower_cmsg_get_data(skb);
 221         flags = 0;
 222 
 223         /* Increment batch version for each new batch of config messages. */
 224         if (*batch == NFP_FL_LAG_BATCH_FIRST) {
 225                 flags |= NFP_FL_LAG_FIRST;
 226                 nfp_fl_increment_version(lag);
 227                 *batch = NFP_FL_LAG_BATCH_MEMBER;
 228         }
 229 
 230         /* If it is a reset msg then it is also the end of the batch. */
 231         if (lag->rst_cfg) {
 232                 flags |= NFP_FL_LAG_RESET;
 233                 *batch = NFP_FL_LAG_BATCH_FINISHED;
 234         }
 235 
 236         /* To signal the end of a batch, both the switch and last flags are set
 237          * and the the reserved SYNC group ID is used.
 238          */
 239         if (*batch == NFP_FL_LAG_BATCH_FINISHED) {
 240                 flags |= NFP_FL_LAG_SWITCH | NFP_FL_LAG_LAST;
 241                 lag->rst_cfg = false;
 242                 cmsg_payload->group_id = cpu_to_be32(NFP_FL_LAG_SYNC_ID);
 243                 cmsg_payload->group_inst = 0;
 244         } else {
 245                 cmsg_payload->group_id = cpu_to_be32(group->group_id);
 246                 cmsg_payload->group_inst = cpu_to_be32(group->group_inst);
 247         }
 248 
 249         cmsg_payload->reserved[0] = 0;
 250         cmsg_payload->reserved[1] = 0;
 251         cmsg_payload->ttl = NFP_FL_LAG_HOST_TTL;
 252         cmsg_payload->ctrl_flags = flags;
 253         cmsg_payload->batch_ver = cpu_to_be32(lag->batch_ver);
 254         cmsg_payload->pkt_number = cpu_to_be32(nfp_fl_get_next_pkt_number(lag));
 255 
 256         for (i = 0; i < member_cnt; i++)
 257                 cmsg_payload->members[i] =
 258                         cpu_to_be32(nfp_repr_get_port_id(active_members[i]));
 259 
 260         nfp_ctrl_tx(priv->app->ctrl, skb);
 261         return 0;
 262 }
 263 
 264 static void nfp_fl_lag_do_work(struct work_struct *work)
 265 {
 266         enum nfp_fl_lag_batch batch = NFP_FL_LAG_BATCH_FIRST;
 267         struct nfp_fl_lag_group *entry, *storage;
 268         struct delayed_work *delayed_work;
 269         struct nfp_flower_priv *priv;
 270         struct nfp_fl_lag *lag;
 271         int err;
 272 
 273         delayed_work = to_delayed_work(work);
 274         lag = container_of(delayed_work, struct nfp_fl_lag, work);
 275         priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
 276 
 277         mutex_lock(&lag->lock);
 278         list_for_each_entry_safe(entry, storage, &lag->group_list, list) {
 279                 struct net_device *iter_netdev, **acti_netdevs;
 280                 struct nfp_flower_repr_priv *repr_priv;
 281                 int active_count = 0, slaves = 0;
 282                 struct nfp_repr *repr;
 283                 unsigned long *flags;
 284 
 285                 if (entry->to_remove) {
 286                         /* Active count of 0 deletes group on hw. */
 287                         err = nfp_fl_lag_config_group(lag, entry, NULL, 0,
 288                                                       &batch);
 289                         if (!err) {
 290                                 entry->to_remove = false;
 291                                 entry->offloaded = false;
 292                         } else {
 293                                 nfp_flower_cmsg_warn(priv->app,
 294                                                      "group delete failed\n");
 295                                 schedule_delayed_work(&lag->work,
 296                                                       NFP_FL_LAG_DELAY);
 297                                 continue;
 298                         }
 299 
 300                         if (entry->to_destroy) {
 301                                 ida_simple_remove(&lag->ida_handle,
 302                                                   entry->group_id);
 303                                 list_del(&entry->list);
 304                                 kfree(entry);
 305                         }
 306                         continue;
 307                 }
 308 
 309                 acti_netdevs = kmalloc_array(entry->slave_cnt,
 310                                              sizeof(*acti_netdevs), GFP_KERNEL);
 311 
 312                 /* Include sanity check in the loop. It may be that a bond has
 313                  * changed between processing the last notification and the
 314                  * work queue triggering. If the number of slaves has changed
 315                  * or it now contains netdevs that cannot be offloaded, ignore
 316                  * the group until pending notifications are processed.
 317                  */
 318                 rcu_read_lock();
 319                 for_each_netdev_in_bond_rcu(entry->master_ndev, iter_netdev) {
 320                         if (!nfp_netdev_is_nfp_repr(iter_netdev)) {
 321                                 slaves = 0;
 322                                 break;
 323                         }
 324 
 325                         repr = netdev_priv(iter_netdev);
 326 
 327                         if (repr->app != priv->app) {
 328                                 slaves = 0;
 329                                 break;
 330                         }
 331 
 332                         slaves++;
 333                         if (slaves > entry->slave_cnt)
 334                                 break;
 335 
 336                         /* Check the ports for state changes. */
 337                         repr_priv = repr->app_priv;
 338                         flags = &repr_priv->lag_port_flags;
 339 
 340                         if (*flags & NFP_PORT_LAG_CHANGED) {
 341                                 *flags &= ~NFP_PORT_LAG_CHANGED;
 342                                 entry->dirty = true;
 343                         }
 344 
 345                         if ((*flags & NFP_PORT_LAG_TX_ENABLED) &&
 346                             (*flags & NFP_PORT_LAG_LINK_UP))
 347                                 acti_netdevs[active_count++] = iter_netdev;
 348                 }
 349                 rcu_read_unlock();
 350 
 351                 if (slaves != entry->slave_cnt || !entry->dirty) {
 352                         kfree(acti_netdevs);
 353                         continue;
 354                 }
 355 
 356                 err = nfp_fl_lag_config_group(lag, entry, acti_netdevs,
 357                                               active_count, &batch);
 358                 if (!err) {
 359                         entry->offloaded = true;
 360                         entry->dirty = false;
 361                 } else {
 362                         nfp_flower_cmsg_warn(priv->app,
 363                                              "group offload failed\n");
 364                         schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
 365                 }
 366 
 367                 kfree(acti_netdevs);
 368         }
 369 
 370         /* End the config batch if at least one packet has been batched. */
 371         if (batch == NFP_FL_LAG_BATCH_MEMBER) {
 372                 batch = NFP_FL_LAG_BATCH_FINISHED;
 373                 err = nfp_fl_lag_config_group(lag, NULL, NULL, 0, &batch);
 374                 if (err)
 375                         nfp_flower_cmsg_warn(priv->app,
 376                                              "group batch end cmsg failed\n");
 377         }
 378 
 379         mutex_unlock(&lag->lock);
 380 }
 381 
 382 static int
 383 nfp_fl_lag_put_unprocessed(struct nfp_fl_lag *lag, struct sk_buff *skb)
 384 {
 385         struct nfp_flower_cmsg_lag_config *cmsg_payload;
 386 
 387         cmsg_payload = nfp_flower_cmsg_get_data(skb);
 388         if (be32_to_cpu(cmsg_payload->group_id) >= NFP_FL_LAG_GROUP_MAX)
 389                 return -EINVAL;
 390 
 391         /* Drop cmsg retrans if storage limit is exceeded to prevent
 392          * overloading. If the fw notices that expected messages have not been
 393          * received in a given time block, it will request a full resync.
 394          */
 395         if (skb_queue_len(&lag->retrans_skbs) >= NFP_FL_LAG_RETRANS_LIMIT)
 396                 return -ENOSPC;
 397 
 398         __skb_queue_tail(&lag->retrans_skbs, skb);
 399 
 400         return 0;
 401 }
 402 
 403 static void nfp_fl_send_unprocessed(struct nfp_fl_lag *lag)
 404 {
 405         struct nfp_flower_priv *priv;
 406         struct sk_buff *skb;
 407 
 408         priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
 409 
 410         while ((skb = __skb_dequeue(&lag->retrans_skbs)))
 411                 nfp_ctrl_tx(priv->app->ctrl, skb);
 412 }
 413 
 414 bool nfp_flower_lag_unprocessed_msg(struct nfp_app *app, struct sk_buff *skb)
 415 {
 416         struct nfp_flower_cmsg_lag_config *cmsg_payload;
 417         struct nfp_flower_priv *priv = app->priv;
 418         struct nfp_fl_lag_group *group_entry;
 419         unsigned long int flags;
 420         bool store_skb = false;
 421         int err;
 422 
 423         cmsg_payload = nfp_flower_cmsg_get_data(skb);
 424         flags = cmsg_payload->ctrl_flags;
 425 
 426         /* Note the intentional fall through below. If DATA and XON are both
 427          * set, the message will stored and sent again with the rest of the
 428          * unprocessed messages list.
 429          */
 430 
 431         /* Store */
 432         if (flags & NFP_FL_LAG_DATA)
 433                 if (!nfp_fl_lag_put_unprocessed(&priv->nfp_lag, skb))
 434                         store_skb = true;
 435 
 436         /* Send stored */
 437         if (flags & NFP_FL_LAG_XON)
 438                 nfp_fl_send_unprocessed(&priv->nfp_lag);
 439 
 440         /* Resend all */
 441         if (flags & NFP_FL_LAG_SYNC) {
 442                 /* To resend all config:
 443                  * 1) Clear all unprocessed messages
 444                  * 2) Mark all groups dirty
 445                  * 3) Reset NFP group config
 446                  * 4) Schedule a LAG config update
 447                  */
 448 
 449                 __skb_queue_purge(&priv->nfp_lag.retrans_skbs);
 450 
 451                 mutex_lock(&priv->nfp_lag.lock);
 452                 list_for_each_entry(group_entry, &priv->nfp_lag.group_list,
 453                                     list)
 454                         group_entry->dirty = true;
 455 
 456                 err = nfp_flower_lag_reset(&priv->nfp_lag);
 457                 if (err)
 458                         nfp_flower_cmsg_warn(priv->app,
 459                                              "mem err in group reset msg\n");
 460                 mutex_unlock(&priv->nfp_lag.lock);
 461 
 462                 schedule_delayed_work(&priv->nfp_lag.work, 0);
 463         }
 464 
 465         return store_skb;
 466 }
 467 
 468 static void
 469 nfp_fl_lag_schedule_group_remove(struct nfp_fl_lag *lag,
 470                                  struct nfp_fl_lag_group *group)
 471 {
 472         group->to_remove = true;
 473 
 474         schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
 475 }
 476 
 477 static void
 478 nfp_fl_lag_schedule_group_delete(struct nfp_fl_lag *lag,
 479                                  struct net_device *master)
 480 {
 481         struct nfp_fl_lag_group *group;
 482         struct nfp_flower_priv *priv;
 483 
 484         priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
 485 
 486         if (!netif_is_bond_master(master))
 487                 return;
 488 
 489         mutex_lock(&lag->lock);
 490         group = nfp_fl_lag_find_group_for_master_with_lag(lag, master);
 491         if (!group) {
 492                 mutex_unlock(&lag->lock);
 493                 nfp_warn(priv->app->cpp, "untracked bond got unregistered %s\n",
 494                          netdev_name(master));
 495                 return;
 496         }
 497 
 498         group->to_remove = true;
 499         group->to_destroy = true;
 500         mutex_unlock(&lag->lock);
 501 
 502         schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
 503 }
 504 
 505 static int
 506 nfp_fl_lag_changeupper_event(struct nfp_fl_lag *lag,
 507                              struct netdev_notifier_changeupper_info *info)
 508 {
 509         struct net_device *upper = info->upper_dev, *iter_netdev;
 510         struct netdev_lag_upper_info *lag_upper_info;
 511         struct nfp_fl_lag_group *group;
 512         struct nfp_flower_priv *priv;
 513         unsigned int slave_count = 0;
 514         bool can_offload = true;
 515         struct nfp_repr *repr;
 516 
 517         if (!netif_is_lag_master(upper))
 518                 return 0;
 519 
 520         priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
 521 
 522         rcu_read_lock();
 523         for_each_netdev_in_bond_rcu(upper, iter_netdev) {
 524                 if (!nfp_netdev_is_nfp_repr(iter_netdev)) {
 525                         can_offload = false;
 526                         break;
 527                 }
 528                 repr = netdev_priv(iter_netdev);
 529 
 530                 /* Ensure all ports are created by the same app/on same card. */
 531                 if (repr->app != priv->app) {
 532                         can_offload = false;
 533                         break;
 534                 }
 535 
 536                 slave_count++;
 537         }
 538         rcu_read_unlock();
 539 
 540         lag_upper_info = info->upper_info;
 541 
 542         /* Firmware supports active/backup and L3/L4 hash bonds. */
 543         if (lag_upper_info &&
 544             lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
 545             (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH ||
 546              (lag_upper_info->hash_type != NETDEV_LAG_HASH_L34 &&
 547               lag_upper_info->hash_type != NETDEV_LAG_HASH_E34 &&
 548               lag_upper_info->hash_type != NETDEV_LAG_HASH_UNKNOWN))) {
 549                 can_offload = false;
 550                 nfp_flower_cmsg_warn(priv->app,
 551                                      "Unable to offload tx_type %u hash %u\n",
 552                                      lag_upper_info->tx_type,
 553                                      lag_upper_info->hash_type);
 554         }
 555 
 556         mutex_lock(&lag->lock);
 557         group = nfp_fl_lag_find_group_for_master_with_lag(lag, upper);
 558 
 559         if (slave_count == 0 || !can_offload) {
 560                 /* Cannot offload the group - remove if previously offloaded. */
 561                 if (group && group->offloaded)
 562                         nfp_fl_lag_schedule_group_remove(lag, group);
 563 
 564                 mutex_unlock(&lag->lock);
 565                 return 0;
 566         }
 567 
 568         if (!group) {
 569                 group = nfp_fl_lag_group_create(lag, upper);
 570                 if (IS_ERR(group)) {
 571                         mutex_unlock(&lag->lock);
 572                         return PTR_ERR(group);
 573                 }
 574         }
 575 
 576         group->dirty = true;
 577         group->slave_cnt = slave_count;
 578 
 579         /* Group may have been on queue for removal but is now offfloable. */
 580         group->to_remove = false;
 581         mutex_unlock(&lag->lock);
 582 
 583         schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
 584         return 0;
 585 }
 586 
 587 static void
 588 nfp_fl_lag_changels_event(struct nfp_fl_lag *lag, struct net_device *netdev,
 589                           struct netdev_notifier_changelowerstate_info *info)
 590 {
 591         struct netdev_lag_lower_state_info *lag_lower_info;
 592         struct nfp_flower_repr_priv *repr_priv;
 593         struct nfp_flower_priv *priv;
 594         struct nfp_repr *repr;
 595         unsigned long *flags;
 596 
 597         if (!netif_is_lag_port(netdev) || !nfp_netdev_is_nfp_repr(netdev))
 598                 return;
 599 
 600         lag_lower_info = info->lower_state_info;
 601         if (!lag_lower_info)
 602                 return;
 603 
 604         priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
 605         repr = netdev_priv(netdev);
 606 
 607         /* Verify that the repr is associated with this app. */
 608         if (repr->app != priv->app)
 609                 return;
 610 
 611         repr_priv = repr->app_priv;
 612         flags = &repr_priv->lag_port_flags;
 613 
 614         mutex_lock(&lag->lock);
 615         if (lag_lower_info->link_up)
 616                 *flags |= NFP_PORT_LAG_LINK_UP;
 617         else
 618                 *flags &= ~NFP_PORT_LAG_LINK_UP;
 619 
 620         if (lag_lower_info->tx_enabled)
 621                 *flags |= NFP_PORT_LAG_TX_ENABLED;
 622         else
 623                 *flags &= ~NFP_PORT_LAG_TX_ENABLED;
 624 
 625         *flags |= NFP_PORT_LAG_CHANGED;
 626         mutex_unlock(&lag->lock);
 627 
 628         schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
 629 }
 630 
 631 int nfp_flower_lag_netdev_event(struct nfp_flower_priv *priv,
 632                                 struct net_device *netdev,
 633                                 unsigned long event, void *ptr)
 634 {
 635         struct nfp_fl_lag *lag = &priv->nfp_lag;
 636         int err;
 637 
 638         switch (event) {
 639         case NETDEV_CHANGEUPPER:
 640                 err = nfp_fl_lag_changeupper_event(lag, ptr);
 641                 if (err)
 642                         return NOTIFY_BAD;
 643                 return NOTIFY_OK;
 644         case NETDEV_CHANGELOWERSTATE:
 645                 nfp_fl_lag_changels_event(lag, netdev, ptr);
 646                 return NOTIFY_OK;
 647         case NETDEV_UNREGISTER:
 648                 nfp_fl_lag_schedule_group_delete(lag, netdev);
 649                 return NOTIFY_OK;
 650         }
 651 
 652         return NOTIFY_DONE;
 653 }
 654 
 655 int nfp_flower_lag_reset(struct nfp_fl_lag *lag)
 656 {
 657         enum nfp_fl_lag_batch batch = NFP_FL_LAG_BATCH_FIRST;
 658 
 659         lag->rst_cfg = true;
 660         return nfp_fl_lag_config_group(lag, NULL, NULL, 0, &batch);
 661 }
 662 
 663 void nfp_flower_lag_init(struct nfp_fl_lag *lag)
 664 {
 665         INIT_DELAYED_WORK(&lag->work, nfp_fl_lag_do_work);
 666         INIT_LIST_HEAD(&lag->group_list);
 667         mutex_init(&lag->lock);
 668         ida_init(&lag->ida_handle);
 669 
 670         __skb_queue_head_init(&lag->retrans_skbs);
 671 
 672         /* 0 is a reserved batch version so increment to first valid value. */
 673         nfp_fl_increment_version(lag);
 674 }
 675 
 676 void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag)
 677 {
 678         struct nfp_fl_lag_group *entry, *storage;
 679 
 680         cancel_delayed_work_sync(&lag->work);
 681 
 682         __skb_queue_purge(&lag->retrans_skbs);
 683 
 684         /* Remove all groups. */
 685         mutex_lock(&lag->lock);
 686         list_for_each_entry_safe(entry, storage, &lag->group_list, list) {
 687                 list_del(&entry->list);
 688                 kfree(entry);
 689         }
 690         mutex_unlock(&lag->lock);
 691         mutex_destroy(&lag->lock);
 692         ida_destroy(&lag->ida_handle);
 693 }

/* [<][>][^][v][top][bottom][index][help] */