root/drivers/net/can/rx-offload.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. can_rx_offload_get_cb
  2. can_rx_offload_le
  3. can_rx_offload_inc
  4. can_rx_offload_napi_poll
  5. __skb_queue_add_sort
  6. can_rx_offload_compare
  7. can_rx_offload_offload_one
  8. can_rx_offload_irq_offload_timestamp
  9. can_rx_offload_irq_offload_fifo
  10. can_rx_offload_queue_sorted
  11. can_rx_offload_get_echo_skb
  12. can_rx_offload_queue_tail
  13. can_rx_offload_init_queue
  14. can_rx_offload_add_timestamp
  15. can_rx_offload_add_fifo
  16. can_rx_offload_enable
  17. can_rx_offload_del
  18. can_rx_offload_reset

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (c) 2014 David Jander, Protonic Holland
   4  * Copyright (C) 2014-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
   5  */
   6 
   7 #include <linux/can/dev.h>
   8 #include <linux/can/rx-offload.h>
   9 
  10 struct can_rx_offload_cb {
  11         u32 timestamp;
  12 };
  13 
  14 static inline struct can_rx_offload_cb *can_rx_offload_get_cb(struct sk_buff *skb)
  15 {
  16         BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb));
  17 
  18         return (struct can_rx_offload_cb *)skb->cb;
  19 }
  20 
  21 static inline bool can_rx_offload_le(struct can_rx_offload *offload, unsigned int a, unsigned int b)
  22 {
  23         if (offload->inc)
  24                 return a <= b;
  25         else
  26                 return a >= b;
  27 }
  28 
  29 static inline unsigned int can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val)
  30 {
  31         if (offload->inc)
  32                 return (*val)++;
  33         else
  34                 return (*val)--;
  35 }
  36 
  37 static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
  38 {
  39         struct can_rx_offload *offload = container_of(napi, struct can_rx_offload, napi);
  40         struct net_device *dev = offload->dev;
  41         struct net_device_stats *stats = &dev->stats;
  42         struct sk_buff *skb;
  43         int work_done = 0;
  44 
  45         while ((work_done < quota) &&
  46                (skb = skb_dequeue(&offload->skb_queue))) {
  47                 struct can_frame *cf = (struct can_frame *)skb->data;
  48 
  49                 work_done++;
  50                 stats->rx_packets++;
  51                 stats->rx_bytes += cf->can_dlc;
  52                 netif_receive_skb(skb);
  53         }
  54 
  55         if (work_done < quota) {
  56                 napi_complete_done(napi, work_done);
  57 
  58                 /* Check if there was another interrupt */
  59                 if (!skb_queue_empty(&offload->skb_queue))
  60                         napi_reschedule(&offload->napi);
  61         }
  62 
  63         can_led_event(offload->dev, CAN_LED_EVENT_RX);
  64 
  65         return work_done;
  66 }
  67 
  68 static inline void __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new,
  69                                         int (*compare)(struct sk_buff *a, struct sk_buff *b))
  70 {
  71         struct sk_buff *pos, *insert = NULL;
  72 
  73         skb_queue_reverse_walk(head, pos) {
  74                 const struct can_rx_offload_cb *cb_pos, *cb_new;
  75 
  76                 cb_pos = can_rx_offload_get_cb(pos);
  77                 cb_new = can_rx_offload_get_cb(new);
  78 
  79                 netdev_dbg(new->dev,
  80                            "%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n",
  81                            __func__,
  82                            cb_pos->timestamp, cb_new->timestamp,
  83                            cb_new->timestamp - cb_pos->timestamp,
  84                            skb_queue_len(head));
  85 
  86                 if (compare(pos, new) < 0)
  87                         continue;
  88                 insert = pos;
  89                 break;
  90         }
  91         if (!insert)
  92                 __skb_queue_head(head, new);
  93         else
  94                 __skb_queue_after(head, insert, new);
  95 }
  96 
  97 static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
  98 {
  99         const struct can_rx_offload_cb *cb_a, *cb_b;
 100 
 101         cb_a = can_rx_offload_get_cb(a);
 102         cb_b = can_rx_offload_get_cb(b);
 103 
 104         /* Substract two u32 and return result as int, to keep
 105          * difference steady around the u32 overflow.
 106          */
 107         return cb_b->timestamp - cb_a->timestamp;
 108 }
 109 
 110 /**
 111  * can_rx_offload_offload_one() - Read one CAN frame from HW
 112  * @offload: pointer to rx_offload context
 113  * @n: number of mailbox to read
 114  *
 115  * The task of this function is to read a CAN frame from mailbox @n
 116  * from the device and return the mailbox's content as a struct
 117  * sk_buff.
 118  *
 119  * If the struct can_rx_offload::skb_queue exceeds the maximal queue
 120  * length (struct can_rx_offload::skb_queue_len_max) or no skb can be
 121  * allocated, the mailbox contents is discarded by reading it into an
 122  * overflow buffer. This way the mailbox is marked as free by the
 123  * driver.
 124  *
 125  * Return: A pointer to skb containing the CAN frame on success.
 126  *
 127  *         NULL if the mailbox @n is empty.
 128  *
 129  *         ERR_PTR() in case of an error
 130  */
 131 static struct sk_buff *
 132 can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
 133 {
 134         struct sk_buff *skb = NULL, *skb_error = NULL;
 135         struct can_rx_offload_cb *cb;
 136         struct can_frame *cf;
 137         int ret;
 138 
 139         if (likely(skb_queue_len(&offload->skb_queue) <
 140                    offload->skb_queue_len_max)) {
 141                 skb = alloc_can_skb(offload->dev, &cf);
 142                 if (unlikely(!skb))
 143                         skb_error = ERR_PTR(-ENOMEM);   /* skb alloc failed */
 144         } else {
 145                 skb_error = ERR_PTR(-ENOBUFS);          /* skb_queue is full */
 146         }
 147 
 148         /* If queue is full or skb not available, drop by reading into
 149          * overflow buffer.
 150          */
 151         if (unlikely(skb_error)) {
 152                 struct can_frame cf_overflow;
 153                 u32 timestamp;
 154 
 155                 ret = offload->mailbox_read(offload, &cf_overflow,
 156                                             &timestamp, n);
 157 
 158                 /* Mailbox was empty. */
 159                 if (unlikely(!ret))
 160                         return NULL;
 161 
 162                 /* Mailbox has been read and we're dropping it or
 163                  * there was a problem reading the mailbox.
 164                  *
 165                  * Increment error counters in any case.
 166                  */
 167                 offload->dev->stats.rx_dropped++;
 168                 offload->dev->stats.rx_fifo_errors++;
 169 
 170                 /* There was a problem reading the mailbox, propagate
 171                  * error value.
 172                  */
 173                 if (unlikely(ret < 0))
 174                         return ERR_PTR(ret);
 175 
 176                 return skb_error;
 177         }
 178 
 179         cb = can_rx_offload_get_cb(skb);
 180         ret = offload->mailbox_read(offload, cf, &cb->timestamp, n);
 181 
 182         /* Mailbox was empty. */
 183         if (unlikely(!ret)) {
 184                 kfree_skb(skb);
 185                 return NULL;
 186         }
 187 
 188         /* There was a problem reading the mailbox, propagate error value. */
 189         if (unlikely(ret < 0)) {
 190                 kfree_skb(skb);
 191 
 192                 offload->dev->stats.rx_dropped++;
 193                 offload->dev->stats.rx_fifo_errors++;
 194 
 195                 return ERR_PTR(ret);
 196         }
 197 
 198         /* Mailbox was read. */
 199         return skb;
 200 }
 201 
 202 int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pending)
 203 {
 204         struct sk_buff_head skb_queue;
 205         unsigned int i;
 206 
 207         __skb_queue_head_init(&skb_queue);
 208 
 209         for (i = offload->mb_first;
 210              can_rx_offload_le(offload, i, offload->mb_last);
 211              can_rx_offload_inc(offload, &i)) {
 212                 struct sk_buff *skb;
 213 
 214                 if (!(pending & BIT_ULL(i)))
 215                         continue;
 216 
 217                 skb = can_rx_offload_offload_one(offload, i);
 218                 if (IS_ERR_OR_NULL(skb))
 219                         continue;
 220 
 221                 __skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare);
 222         }
 223 
 224         if (!skb_queue_empty(&skb_queue)) {
 225                 unsigned long flags;
 226                 u32 queue_len;
 227 
 228                 spin_lock_irqsave(&offload->skb_queue.lock, flags);
 229                 skb_queue_splice_tail(&skb_queue, &offload->skb_queue);
 230                 spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
 231 
 232                 if ((queue_len = skb_queue_len(&offload->skb_queue)) >
 233                     (offload->skb_queue_len_max / 8))
 234                         netdev_dbg(offload->dev, "%s: queue_len=%d\n",
 235                                    __func__, queue_len);
 236 
 237                 can_rx_offload_schedule(offload);
 238         }
 239 
 240         return skb_queue_len(&skb_queue);
 241 }
 242 EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp);
 243 
 244 int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
 245 {
 246         struct sk_buff *skb;
 247         int received = 0;
 248 
 249         while (1) {
 250                 skb = can_rx_offload_offload_one(offload, 0);
 251                 if (IS_ERR(skb))
 252                         continue;
 253                 if (!skb)
 254                         break;
 255 
 256                 skb_queue_tail(&offload->skb_queue, skb);
 257                 received++;
 258         }
 259 
 260         if (received)
 261                 can_rx_offload_schedule(offload);
 262 
 263         return received;
 264 }
 265 EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
 266 
 267 int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
 268                                 struct sk_buff *skb, u32 timestamp)
 269 {
 270         struct can_rx_offload_cb *cb;
 271         unsigned long flags;
 272 
 273         if (skb_queue_len(&offload->skb_queue) >
 274             offload->skb_queue_len_max) {
 275                 kfree_skb(skb);
 276                 return -ENOBUFS;
 277         }
 278 
 279         cb = can_rx_offload_get_cb(skb);
 280         cb->timestamp = timestamp;
 281 
 282         spin_lock_irqsave(&offload->skb_queue.lock, flags);
 283         __skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare);
 284         spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
 285 
 286         can_rx_offload_schedule(offload);
 287 
 288         return 0;
 289 }
 290 EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted);
 291 
 292 unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
 293                                          unsigned int idx, u32 timestamp)
 294 {
 295         struct net_device *dev = offload->dev;
 296         struct net_device_stats *stats = &dev->stats;
 297         struct sk_buff *skb;
 298         u8 len;
 299         int err;
 300 
 301         skb = __can_get_echo_skb(dev, idx, &len);
 302         if (!skb)
 303                 return 0;
 304 
 305         err = can_rx_offload_queue_sorted(offload, skb, timestamp);
 306         if (err) {
 307                 stats->rx_errors++;
 308                 stats->tx_fifo_errors++;
 309         }
 310 
 311         return len;
 312 }
 313 EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb);
 314 
 315 int can_rx_offload_queue_tail(struct can_rx_offload *offload,
 316                               struct sk_buff *skb)
 317 {
 318         if (skb_queue_len(&offload->skb_queue) >
 319             offload->skb_queue_len_max) {
 320                 kfree_skb(skb);
 321                 return -ENOBUFS;
 322         }
 323 
 324         skb_queue_tail(&offload->skb_queue, skb);
 325         can_rx_offload_schedule(offload);
 326 
 327         return 0;
 328 }
 329 EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
 330 
 331 static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
 332 {
 333         offload->dev = dev;
 334 
 335         /* Limit queue len to 4x the weight (rounted to next power of two) */
 336         offload->skb_queue_len_max = 2 << fls(weight);
 337         offload->skb_queue_len_max *= 4;
 338         skb_queue_head_init(&offload->skb_queue);
 339 
 340         can_rx_offload_reset(offload);
 341         netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight);
 342 
 343         dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n",
 344                 __func__, offload->skb_queue_len_max);
 345 
 346         return 0;
 347 }
 348 
 349 int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload)
 350 {
 351         unsigned int weight;
 352 
 353         if (offload->mb_first > BITS_PER_LONG_LONG ||
 354             offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read)
 355                 return -EINVAL;
 356 
 357         if (offload->mb_first < offload->mb_last) {
 358                 offload->inc = true;
 359                 weight = offload->mb_last - offload->mb_first;
 360         } else {
 361                 offload->inc = false;
 362                 weight = offload->mb_first - offload->mb_last;
 363         }
 364 
 365         return can_rx_offload_init_queue(dev, offload, weight);
 366 }
 367 EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp);
 368 
 369 int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
 370 {
 371         if (!offload->mailbox_read)
 372                 return -EINVAL;
 373 
 374         return can_rx_offload_init_queue(dev, offload, weight);
 375 }
 376 EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo);
 377 
 378 void can_rx_offload_enable(struct can_rx_offload *offload)
 379 {
 380         can_rx_offload_reset(offload);
 381         napi_enable(&offload->napi);
 382 }
 383 EXPORT_SYMBOL_GPL(can_rx_offload_enable);
 384 
 385 void can_rx_offload_del(struct can_rx_offload *offload)
 386 {
 387         netif_napi_del(&offload->napi);
 388         skb_queue_purge(&offload->skb_queue);
 389 }
 390 EXPORT_SYMBOL_GPL(can_rx_offload_del);
 391 
 392 void can_rx_offload_reset(struct can_rx_offload *offload)
 393 {
 394 }
 395 EXPORT_SYMBOL_GPL(can_rx_offload_reset);

/* [<][>][^][v][top][bottom][index][help] */