root/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. nfp_flower_install_rate_limiter
  2. nfp_flower_remove_rate_limiter
  3. nfp_flower_stats_rlim_reply
  4. nfp_flower_stats_rlim_request
  5. nfp_flower_stats_rlim_request_all
  6. update_stats_cache
  7. nfp_flower_stats_rate_limiter
  8. nfp_flower_qos_init
  9. nfp_flower_qos_cleanup
  10. nfp_flower_setup_qos_offload

   1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
   2 /* Copyright (C) 2019 Netronome Systems, Inc. */
   3 
   4 #include <linux/math64.h>
   5 #include <net/pkt_cls.h>
   6 #include <net/pkt_sched.h>
   7 
   8 #include "cmsg.h"
   9 #include "main.h"
  10 #include "../nfp_port.h"
  11 
  12 #define NFP_FL_QOS_UPDATE               msecs_to_jiffies(1000)
  13 
  14 struct nfp_police_cfg_head {
  15         __be32 flags_opts;
  16         __be32 port;
  17 };
  18 
  19 /* Police cmsg for configuring a trTCM traffic conditioner (8W/32B)
  20  * See RFC 2698 for more details.
  21  * ----------------------------------------------------------------
  22  *    3                   2                   1
  23  *  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
  24  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  25  * |                          Flag options                         |
  26  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  27  * |                          Port Ingress                         |
  28  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  29  * |                        Token Bucket Peak                      |
  30  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  31  * |                     Token Bucket Committed                    |
  32  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  33  * |                         Peak Burst Size                       |
  34  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  35  * |                      Committed Burst Size                     |
  36  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  37  * |                      Peak Information Rate                    |
  38  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  39  * |                    Committed Information Rate                 |
  40  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  41  */
  42 struct nfp_police_config {
  43         struct nfp_police_cfg_head head;
  44         __be32 bkt_tkn_p;
  45         __be32 bkt_tkn_c;
  46         __be32 pbs;
  47         __be32 cbs;
  48         __be32 pir;
  49         __be32 cir;
  50 };
  51 
  52 struct nfp_police_stats_reply {
  53         struct nfp_police_cfg_head head;
  54         __be64 pass_bytes;
  55         __be64 pass_pkts;
  56         __be64 drop_bytes;
  57         __be64 drop_pkts;
  58 };
  59 
  60 static int
  61 nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
  62                                 struct tc_cls_matchall_offload *flow,
  63                                 struct netlink_ext_ack *extack)
  64 {
  65         struct flow_action_entry *action = &flow->rule->action.entries[0];
  66         struct nfp_flower_priv *fl_priv = app->priv;
  67         struct nfp_flower_repr_priv *repr_priv;
  68         struct nfp_police_config *config;
  69         struct nfp_repr *repr;
  70         struct sk_buff *skb;
  71         u32 netdev_port_id;
  72         u64 burst, rate;
  73 
  74         if (!nfp_netdev_is_nfp_repr(netdev)) {
  75                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
  76                 return -EOPNOTSUPP;
  77         }
  78         repr = netdev_priv(netdev);
  79         repr_priv = repr->app_priv;
  80 
  81         if (repr_priv->block_shared) {
  82                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on shared blocks");
  83                 return -EOPNOTSUPP;
  84         }
  85 
  86         if (repr->port->type != NFP_PORT_VF_PORT) {
  87                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on non-VF ports");
  88                 return -EOPNOTSUPP;
  89         }
  90 
  91         if (!flow_offload_has_one_action(&flow->rule->action)) {
  92                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires a single action");
  93                 return -EOPNOTSUPP;
  94         }
  95 
  96         if (flow->common.prio != 1) {
  97                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires highest priority");
  98                 return -EOPNOTSUPP;
  99         }
 100 
 101         if (action->id != FLOW_ACTION_POLICE) {
 102                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires police action");
 103                 return -EOPNOTSUPP;
 104         }
 105 
 106         rate = action->police.rate_bytes_ps;
 107         burst = div_u64(rate * PSCHED_NS2TICKS(action->police.burst),
 108                         PSCHED_TICKS_PER_SEC);
 109         netdev_port_id = nfp_repr_get_port_id(netdev);
 110 
 111         skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config),
 112                                     NFP_FLOWER_CMSG_TYPE_QOS_MOD, GFP_KERNEL);
 113         if (!skb)
 114                 return -ENOMEM;
 115 
 116         config = nfp_flower_cmsg_get_data(skb);
 117         memset(config, 0, sizeof(struct nfp_police_config));
 118         config->head.port = cpu_to_be32(netdev_port_id);
 119         config->bkt_tkn_p = cpu_to_be32(burst);
 120         config->bkt_tkn_c = cpu_to_be32(burst);
 121         config->pbs = cpu_to_be32(burst);
 122         config->cbs = cpu_to_be32(burst);
 123         config->pir = cpu_to_be32(rate);
 124         config->cir = cpu_to_be32(rate);
 125         nfp_ctrl_tx(repr->app->ctrl, skb);
 126 
 127         repr_priv->qos_table.netdev_port_id = netdev_port_id;
 128         fl_priv->qos_rate_limiters++;
 129         if (fl_priv->qos_rate_limiters == 1)
 130                 schedule_delayed_work(&fl_priv->qos_stats_work,
 131                                       NFP_FL_QOS_UPDATE);
 132 
 133         return 0;
 134 }
 135 
 136 static int
 137 nfp_flower_remove_rate_limiter(struct nfp_app *app, struct net_device *netdev,
 138                                struct tc_cls_matchall_offload *flow,
 139                                struct netlink_ext_ack *extack)
 140 {
 141         struct nfp_flower_priv *fl_priv = app->priv;
 142         struct nfp_flower_repr_priv *repr_priv;
 143         struct nfp_police_config *config;
 144         struct nfp_repr *repr;
 145         struct sk_buff *skb;
 146         u32 netdev_port_id;
 147 
 148         if (!nfp_netdev_is_nfp_repr(netdev)) {
 149                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
 150                 return -EOPNOTSUPP;
 151         }
 152         repr = netdev_priv(netdev);
 153 
 154         netdev_port_id = nfp_repr_get_port_id(netdev);
 155         repr_priv = repr->app_priv;
 156 
 157         if (!repr_priv->qos_table.netdev_port_id) {
 158                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot remove qos entry that does not exist");
 159                 return -EOPNOTSUPP;
 160         }
 161 
 162         skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config),
 163                                     NFP_FLOWER_CMSG_TYPE_QOS_DEL, GFP_KERNEL);
 164         if (!skb)
 165                 return -ENOMEM;
 166 
 167         /* Clear all qos associate data for this interface */
 168         memset(&repr_priv->qos_table, 0, sizeof(struct nfp_fl_qos));
 169         fl_priv->qos_rate_limiters--;
 170         if (!fl_priv->qos_rate_limiters)
 171                 cancel_delayed_work_sync(&fl_priv->qos_stats_work);
 172 
 173         config = nfp_flower_cmsg_get_data(skb);
 174         memset(config, 0, sizeof(struct nfp_police_config));
 175         config->head.port = cpu_to_be32(netdev_port_id);
 176         nfp_ctrl_tx(repr->app->ctrl, skb);
 177 
 178         return 0;
 179 }
 180 
 181 void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb)
 182 {
 183         struct nfp_flower_priv *fl_priv = app->priv;
 184         struct nfp_flower_repr_priv *repr_priv;
 185         struct nfp_police_stats_reply *msg;
 186         struct nfp_stat_pair *curr_stats;
 187         struct nfp_stat_pair *prev_stats;
 188         struct net_device *netdev;
 189         struct nfp_repr *repr;
 190         u32 netdev_port_id;
 191 
 192         msg = nfp_flower_cmsg_get_data(skb);
 193         netdev_port_id = be32_to_cpu(msg->head.port);
 194         rcu_read_lock();
 195         netdev = nfp_app_dev_get(app, netdev_port_id, NULL);
 196         if (!netdev)
 197                 goto exit_unlock_rcu;
 198 
 199         repr = netdev_priv(netdev);
 200         repr_priv = repr->app_priv;
 201         curr_stats = &repr_priv->qos_table.curr_stats;
 202         prev_stats = &repr_priv->qos_table.prev_stats;
 203 
 204         spin_lock_bh(&fl_priv->qos_stats_lock);
 205         curr_stats->pkts = be64_to_cpu(msg->pass_pkts) +
 206                            be64_to_cpu(msg->drop_pkts);
 207         curr_stats->bytes = be64_to_cpu(msg->pass_bytes) +
 208                             be64_to_cpu(msg->drop_bytes);
 209 
 210         if (!repr_priv->qos_table.last_update) {
 211                 prev_stats->pkts = curr_stats->pkts;
 212                 prev_stats->bytes = curr_stats->bytes;
 213         }
 214 
 215         repr_priv->qos_table.last_update = jiffies;
 216         spin_unlock_bh(&fl_priv->qos_stats_lock);
 217 
 218 exit_unlock_rcu:
 219         rcu_read_unlock();
 220 }
 221 
 222 static void
 223 nfp_flower_stats_rlim_request(struct nfp_flower_priv *fl_priv,
 224                               u32 netdev_port_id)
 225 {
 226         struct nfp_police_cfg_head *head;
 227         struct sk_buff *skb;
 228 
 229         skb = nfp_flower_cmsg_alloc(fl_priv->app,
 230                                     sizeof(struct nfp_police_cfg_head),
 231                                     NFP_FLOWER_CMSG_TYPE_QOS_STATS,
 232                                     GFP_ATOMIC);
 233         if (!skb)
 234                 return;
 235 
 236         head = nfp_flower_cmsg_get_data(skb);
 237         memset(head, 0, sizeof(struct nfp_police_cfg_head));
 238         head->port = cpu_to_be32(netdev_port_id);
 239 
 240         nfp_ctrl_tx(fl_priv->app->ctrl, skb);
 241 }
 242 
 243 static void
 244 nfp_flower_stats_rlim_request_all(struct nfp_flower_priv *fl_priv)
 245 {
 246         struct nfp_reprs *repr_set;
 247         int i;
 248 
 249         rcu_read_lock();
 250         repr_set = rcu_dereference(fl_priv->app->reprs[NFP_REPR_TYPE_VF]);
 251         if (!repr_set)
 252                 goto exit_unlock_rcu;
 253 
 254         for (i = 0; i < repr_set->num_reprs; i++) {
 255                 struct net_device *netdev;
 256 
 257                 netdev = rcu_dereference(repr_set->reprs[i]);
 258                 if (netdev) {
 259                         struct nfp_repr *priv = netdev_priv(netdev);
 260                         struct nfp_flower_repr_priv *repr_priv;
 261                         u32 netdev_port_id;
 262 
 263                         repr_priv = priv->app_priv;
 264                         netdev_port_id = repr_priv->qos_table.netdev_port_id;
 265                         if (!netdev_port_id)
 266                                 continue;
 267 
 268                         nfp_flower_stats_rlim_request(fl_priv, netdev_port_id);
 269                 }
 270         }
 271 
 272 exit_unlock_rcu:
 273         rcu_read_unlock();
 274 }
 275 
 276 static void update_stats_cache(struct work_struct *work)
 277 {
 278         struct delayed_work *delayed_work;
 279         struct nfp_flower_priv *fl_priv;
 280 
 281         delayed_work = to_delayed_work(work);
 282         fl_priv = container_of(delayed_work, struct nfp_flower_priv,
 283                                qos_stats_work);
 284 
 285         nfp_flower_stats_rlim_request_all(fl_priv);
 286         schedule_delayed_work(&fl_priv->qos_stats_work, NFP_FL_QOS_UPDATE);
 287 }
 288 
 289 static int
 290 nfp_flower_stats_rate_limiter(struct nfp_app *app, struct net_device *netdev,
 291                               struct tc_cls_matchall_offload *flow,
 292                               struct netlink_ext_ack *extack)
 293 {
 294         struct nfp_flower_priv *fl_priv = app->priv;
 295         struct nfp_flower_repr_priv *repr_priv;
 296         struct nfp_stat_pair *curr_stats;
 297         struct nfp_stat_pair *prev_stats;
 298         u64 diff_bytes, diff_pkts;
 299         struct nfp_repr *repr;
 300 
 301         if (!nfp_netdev_is_nfp_repr(netdev)) {
 302                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
 303                 return -EOPNOTSUPP;
 304         }
 305         repr = netdev_priv(netdev);
 306 
 307         repr_priv = repr->app_priv;
 308         if (!repr_priv->qos_table.netdev_port_id) {
 309                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot find qos entry for stats update");
 310                 return -EOPNOTSUPP;
 311         }
 312 
 313         spin_lock_bh(&fl_priv->qos_stats_lock);
 314         curr_stats = &repr_priv->qos_table.curr_stats;
 315         prev_stats = &repr_priv->qos_table.prev_stats;
 316         diff_pkts = curr_stats->pkts - prev_stats->pkts;
 317         diff_bytes = curr_stats->bytes - prev_stats->bytes;
 318         prev_stats->pkts = curr_stats->pkts;
 319         prev_stats->bytes = curr_stats->bytes;
 320         spin_unlock_bh(&fl_priv->qos_stats_lock);
 321 
 322         flow_stats_update(&flow->stats, diff_bytes, diff_pkts,
 323                           repr_priv->qos_table.last_update);
 324         return 0;
 325 }
 326 
 327 void nfp_flower_qos_init(struct nfp_app *app)
 328 {
 329         struct nfp_flower_priv *fl_priv = app->priv;
 330 
 331         spin_lock_init(&fl_priv->qos_stats_lock);
 332         INIT_DELAYED_WORK(&fl_priv->qos_stats_work, &update_stats_cache);
 333 }
 334 
 335 void nfp_flower_qos_cleanup(struct nfp_app *app)
 336 {
 337         struct nfp_flower_priv *fl_priv = app->priv;
 338 
 339         cancel_delayed_work_sync(&fl_priv->qos_stats_work);
 340 }
 341 
 342 int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
 343                                  struct tc_cls_matchall_offload *flow)
 344 {
 345         struct netlink_ext_ack *extack = flow->common.extack;
 346         struct nfp_flower_priv *fl_priv = app->priv;
 347 
 348         if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)) {
 349                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support qos rate limit offload");
 350                 return -EOPNOTSUPP;
 351         }
 352 
 353         switch (flow->command) {
 354         case TC_CLSMATCHALL_REPLACE:
 355                 return nfp_flower_install_rate_limiter(app, netdev, flow,
 356                                                        extack);
 357         case TC_CLSMATCHALL_DESTROY:
 358                 return nfp_flower_remove_rate_limiter(app, netdev, flow,
 359                                                       extack);
 360         case TC_CLSMATCHALL_STATS:
 361                 return nfp_flower_stats_rate_limiter(app, netdev, flow,
 362                                                      extack);
 363         default:
 364                 return -EOPNOTSUPP;
 365         }
 366 }

/* [<][>][^][v][top][bottom][index][help] */