This source file includes following definitions.
- cbs_child_enqueue
- cbs_enqueue_offload
- cbs_enqueue_soft
- cbs_enqueue
- timediff_to_credits
- delay_from_credits
- credits_from_len
- cbs_child_dequeue
- cbs_dequeue_soft
- cbs_dequeue_offload
- cbs_dequeue
- cbs_disable_offload
- cbs_enable_offload
- cbs_set_port_rate
- cbs_dev_notifier
- cbs_change
- cbs_init
- cbs_destroy
- cbs_dump
- cbs_dump_class
- cbs_graft
- cbs_leaf
- cbs_find
- cbs_walk
- cbs_module_init
- cbs_module_exit
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53 #include <linux/module.h>
54 #include <linux/types.h>
55 #include <linux/kernel.h>
56 #include <linux/string.h>
57 #include <linux/errno.h>
58 #include <linux/skbuff.h>
59 #include <net/netevent.h>
60 #include <net/netlink.h>
61 #include <net/sch_generic.h>
62 #include <net/pkt_sched.h>
63
64 static LIST_HEAD(cbs_list);
65 static DEFINE_SPINLOCK(cbs_list_lock);
66
67 #define BYTES_PER_KBIT (1000LL / 8)
68
69 struct cbs_sched_data {
70 bool offload;
71 int queue;
72 atomic64_t port_rate;
73 s64 last;
74 s64 credits;
75 s32 locredit;
76 s32 hicredit;
77 s64 sendslope;
78 s64 idleslope;
79 struct qdisc_watchdog watchdog;
80 int (*enqueue)(struct sk_buff *skb, struct Qdisc *sch,
81 struct sk_buff **to_free);
82 struct sk_buff *(*dequeue)(struct Qdisc *sch);
83 struct Qdisc *qdisc;
84 struct list_head cbs_list;
85 };
86
87 static int cbs_child_enqueue(struct sk_buff *skb, struct Qdisc *sch,
88 struct Qdisc *child,
89 struct sk_buff **to_free)
90 {
91 unsigned int len = qdisc_pkt_len(skb);
92 int err;
93
94 err = child->ops->enqueue(skb, child, to_free);
95 if (err != NET_XMIT_SUCCESS)
96 return err;
97
98 sch->qstats.backlog += len;
99 sch->q.qlen++;
100
101 return NET_XMIT_SUCCESS;
102 }
103
104 static int cbs_enqueue_offload(struct sk_buff *skb, struct Qdisc *sch,
105 struct sk_buff **to_free)
106 {
107 struct cbs_sched_data *q = qdisc_priv(sch);
108 struct Qdisc *qdisc = q->qdisc;
109
110 return cbs_child_enqueue(skb, sch, qdisc, to_free);
111 }
112
113 static int cbs_enqueue_soft(struct sk_buff *skb, struct Qdisc *sch,
114 struct sk_buff **to_free)
115 {
116 struct cbs_sched_data *q = qdisc_priv(sch);
117 struct Qdisc *qdisc = q->qdisc;
118
119 if (sch->q.qlen == 0 && q->credits > 0) {
120
121
122
123 q->credits = 0;
124 q->last = ktime_get_ns();
125 }
126
127 return cbs_child_enqueue(skb, sch, qdisc, to_free);
128 }
129
130 static int cbs_enqueue(struct sk_buff *skb, struct Qdisc *sch,
131 struct sk_buff **to_free)
132 {
133 struct cbs_sched_data *q = qdisc_priv(sch);
134
135 return q->enqueue(skb, sch, to_free);
136 }
137
138
139 static s64 timediff_to_credits(s64 timediff, s64 slope)
140 {
141 return div64_s64(timediff * slope, NSEC_PER_SEC);
142 }
143
144 static s64 delay_from_credits(s64 credits, s64 slope)
145 {
146 if (unlikely(slope == 0))
147 return S64_MAX;
148
149 return div64_s64(-credits * NSEC_PER_SEC, slope);
150 }
151
152 static s64 credits_from_len(unsigned int len, s64 slope, s64 port_rate)
153 {
154 if (unlikely(port_rate == 0))
155 return S64_MAX;
156
157 return div64_s64(len * slope, port_rate);
158 }
159
160 static struct sk_buff *cbs_child_dequeue(struct Qdisc *sch, struct Qdisc *child)
161 {
162 struct sk_buff *skb;
163
164 skb = child->ops->dequeue(child);
165 if (!skb)
166 return NULL;
167
168 qdisc_qstats_backlog_dec(sch, skb);
169 qdisc_bstats_update(sch, skb);
170 sch->q.qlen--;
171
172 return skb;
173 }
174
175 static struct sk_buff *cbs_dequeue_soft(struct Qdisc *sch)
176 {
177 struct cbs_sched_data *q = qdisc_priv(sch);
178 struct Qdisc *qdisc = q->qdisc;
179 s64 now = ktime_get_ns();
180 struct sk_buff *skb;
181 s64 credits;
182 int len;
183
184
185 if (now < q->last) {
186 qdisc_watchdog_schedule_ns(&q->watchdog, q->last);
187 return NULL;
188 }
189 if (q->credits < 0) {
190 credits = timediff_to_credits(now - q->last, q->idleslope);
191
192 credits = q->credits + credits;
193 q->credits = min_t(s64, credits, q->hicredit);
194
195 if (q->credits < 0) {
196 s64 delay;
197
198 delay = delay_from_credits(q->credits, q->idleslope);
199 qdisc_watchdog_schedule_ns(&q->watchdog, now + delay);
200
201 q->last = now;
202
203 return NULL;
204 }
205 }
206 skb = cbs_child_dequeue(sch, qdisc);
207 if (!skb)
208 return NULL;
209
210 len = qdisc_pkt_len(skb);
211
212
213
214
215 credits = credits_from_len(len, q->sendslope,
216 atomic64_read(&q->port_rate));
217 credits += q->credits;
218
219 q->credits = max_t(s64, credits, q->locredit);
220
221 if (unlikely(atomic64_read(&q->port_rate) == 0))
222 q->last = now;
223 else
224 q->last = now + div64_s64(len * NSEC_PER_SEC,
225 atomic64_read(&q->port_rate));
226
227 return skb;
228 }
229
230 static struct sk_buff *cbs_dequeue_offload(struct Qdisc *sch)
231 {
232 struct cbs_sched_data *q = qdisc_priv(sch);
233 struct Qdisc *qdisc = q->qdisc;
234
235 return cbs_child_dequeue(sch, qdisc);
236 }
237
238 static struct sk_buff *cbs_dequeue(struct Qdisc *sch)
239 {
240 struct cbs_sched_data *q = qdisc_priv(sch);
241
242 return q->dequeue(sch);
243 }
244
245 static const struct nla_policy cbs_policy[TCA_CBS_MAX + 1] = {
246 [TCA_CBS_PARMS] = { .len = sizeof(struct tc_cbs_qopt) },
247 };
248
249 static void cbs_disable_offload(struct net_device *dev,
250 struct cbs_sched_data *q)
251 {
252 struct tc_cbs_qopt_offload cbs = { };
253 const struct net_device_ops *ops;
254 int err;
255
256 if (!q->offload)
257 return;
258
259 q->enqueue = cbs_enqueue_soft;
260 q->dequeue = cbs_dequeue_soft;
261
262 ops = dev->netdev_ops;
263 if (!ops->ndo_setup_tc)
264 return;
265
266 cbs.queue = q->queue;
267 cbs.enable = 0;
268
269 err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_CBS, &cbs);
270 if (err < 0)
271 pr_warn("Couldn't disable CBS offload for queue %d\n",
272 cbs.queue);
273 }
274
275 static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q,
276 const struct tc_cbs_qopt *opt,
277 struct netlink_ext_ack *extack)
278 {
279 const struct net_device_ops *ops = dev->netdev_ops;
280 struct tc_cbs_qopt_offload cbs = { };
281 int err;
282
283 if (!ops->ndo_setup_tc) {
284 NL_SET_ERR_MSG(extack, "Specified device does not support cbs offload");
285 return -EOPNOTSUPP;
286 }
287
288 cbs.queue = q->queue;
289
290 cbs.enable = 1;
291 cbs.hicredit = opt->hicredit;
292 cbs.locredit = opt->locredit;
293 cbs.idleslope = opt->idleslope;
294 cbs.sendslope = opt->sendslope;
295
296 err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_CBS, &cbs);
297 if (err < 0) {
298 NL_SET_ERR_MSG(extack, "Specified device failed to setup cbs hardware offload");
299 return err;
300 }
301
302 q->enqueue = cbs_enqueue_offload;
303 q->dequeue = cbs_dequeue_offload;
304
305 return 0;
306 }
307
308 static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q)
309 {
310 struct ethtool_link_ksettings ecmd;
311 int speed = SPEED_10;
312 int port_rate;
313 int err;
314
315 err = __ethtool_get_link_ksettings(dev, &ecmd);
316 if (err < 0)
317 goto skip;
318
319 if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN)
320 speed = ecmd.base.speed;
321
322 skip:
323 port_rate = speed * 1000 * BYTES_PER_KBIT;
324
325 atomic64_set(&q->port_rate, port_rate);
326 netdev_dbg(dev, "cbs: set %s's port_rate to: %lld, linkspeed: %d\n",
327 dev->name, (long long)atomic64_read(&q->port_rate),
328 ecmd.base.speed);
329 }
330
331 static int cbs_dev_notifier(struct notifier_block *nb, unsigned long event,
332 void *ptr)
333 {
334 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
335 struct cbs_sched_data *q;
336 struct net_device *qdev;
337 bool found = false;
338
339 ASSERT_RTNL();
340
341 if (event != NETDEV_UP && event != NETDEV_CHANGE)
342 return NOTIFY_DONE;
343
344 spin_lock(&cbs_list_lock);
345 list_for_each_entry(q, &cbs_list, cbs_list) {
346 qdev = qdisc_dev(q->qdisc);
347 if (qdev == dev) {
348 found = true;
349 break;
350 }
351 }
352 spin_unlock(&cbs_list_lock);
353
354 if (found)
355 cbs_set_port_rate(dev, q);
356
357 return NOTIFY_DONE;
358 }
359
360 static int cbs_change(struct Qdisc *sch, struct nlattr *opt,
361 struct netlink_ext_ack *extack)
362 {
363 struct cbs_sched_data *q = qdisc_priv(sch);
364 struct net_device *dev = qdisc_dev(sch);
365 struct nlattr *tb[TCA_CBS_MAX + 1];
366 struct tc_cbs_qopt *qopt;
367 int err;
368
369 err = nla_parse_nested_deprecated(tb, TCA_CBS_MAX, opt, cbs_policy,
370 extack);
371 if (err < 0)
372 return err;
373
374 if (!tb[TCA_CBS_PARMS]) {
375 NL_SET_ERR_MSG(extack, "Missing CBS parameter which are mandatory");
376 return -EINVAL;
377 }
378
379 qopt = nla_data(tb[TCA_CBS_PARMS]);
380
381 if (!qopt->offload) {
382 cbs_set_port_rate(dev, q);
383 cbs_disable_offload(dev, q);
384 } else {
385 err = cbs_enable_offload(dev, q, qopt, extack);
386 if (err < 0)
387 return err;
388 }
389
390
391 q->hicredit = qopt->hicredit;
392 q->locredit = qopt->locredit;
393 q->idleslope = qopt->idleslope * BYTES_PER_KBIT;
394 q->sendslope = qopt->sendslope * BYTES_PER_KBIT;
395 q->offload = qopt->offload;
396
397 return 0;
398 }
399
400 static int cbs_init(struct Qdisc *sch, struct nlattr *opt,
401 struct netlink_ext_ack *extack)
402 {
403 struct cbs_sched_data *q = qdisc_priv(sch);
404 struct net_device *dev = qdisc_dev(sch);
405
406 if (!opt) {
407 NL_SET_ERR_MSG(extack, "Missing CBS qdisc options which are mandatory");
408 return -EINVAL;
409 }
410
411 q->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
412 sch->handle, extack);
413 if (!q->qdisc)
414 return -ENOMEM;
415
416 spin_lock(&cbs_list_lock);
417 list_add(&q->cbs_list, &cbs_list);
418 spin_unlock(&cbs_list_lock);
419
420 qdisc_hash_add(q->qdisc, false);
421
422 q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0);
423
424 q->enqueue = cbs_enqueue_soft;
425 q->dequeue = cbs_dequeue_soft;
426
427 qdisc_watchdog_init(&q->watchdog, sch);
428
429 return cbs_change(sch, opt, extack);
430 }
431
432 static void cbs_destroy(struct Qdisc *sch)
433 {
434 struct cbs_sched_data *q = qdisc_priv(sch);
435 struct net_device *dev = qdisc_dev(sch);
436
437
438 if (!q->qdisc)
439 return;
440
441 qdisc_watchdog_cancel(&q->watchdog);
442 cbs_disable_offload(dev, q);
443
444 spin_lock(&cbs_list_lock);
445 list_del(&q->cbs_list);
446 spin_unlock(&cbs_list_lock);
447
448 qdisc_put(q->qdisc);
449 }
450
451 static int cbs_dump(struct Qdisc *sch, struct sk_buff *skb)
452 {
453 struct cbs_sched_data *q = qdisc_priv(sch);
454 struct tc_cbs_qopt opt = { };
455 struct nlattr *nest;
456
457 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
458 if (!nest)
459 goto nla_put_failure;
460
461 opt.hicredit = q->hicredit;
462 opt.locredit = q->locredit;
463 opt.sendslope = div64_s64(q->sendslope, BYTES_PER_KBIT);
464 opt.idleslope = div64_s64(q->idleslope, BYTES_PER_KBIT);
465 opt.offload = q->offload;
466
467 if (nla_put(skb, TCA_CBS_PARMS, sizeof(opt), &opt))
468 goto nla_put_failure;
469
470 return nla_nest_end(skb, nest);
471
472 nla_put_failure:
473 nla_nest_cancel(skb, nest);
474 return -1;
475 }
476
477 static int cbs_dump_class(struct Qdisc *sch, unsigned long cl,
478 struct sk_buff *skb, struct tcmsg *tcm)
479 {
480 struct cbs_sched_data *q = qdisc_priv(sch);
481
482 if (cl != 1 || !q->qdisc)
483 return -ENOENT;
484
485 tcm->tcm_handle |= TC_H_MIN(1);
486 tcm->tcm_info = q->qdisc->handle;
487
488 return 0;
489 }
490
491 static int cbs_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
492 struct Qdisc **old, struct netlink_ext_ack *extack)
493 {
494 struct cbs_sched_data *q = qdisc_priv(sch);
495
496 if (!new) {
497 new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
498 sch->handle, NULL);
499 if (!new)
500 new = &noop_qdisc;
501 }
502
503 *old = qdisc_replace(sch, new, &q->qdisc);
504 return 0;
505 }
506
507 static struct Qdisc *cbs_leaf(struct Qdisc *sch, unsigned long arg)
508 {
509 struct cbs_sched_data *q = qdisc_priv(sch);
510
511 return q->qdisc;
512 }
513
514 static unsigned long cbs_find(struct Qdisc *sch, u32 classid)
515 {
516 return 1;
517 }
518
519 static void cbs_walk(struct Qdisc *sch, struct qdisc_walker *walker)
520 {
521 if (!walker->stop) {
522 if (walker->count >= walker->skip) {
523 if (walker->fn(sch, 1, walker) < 0) {
524 walker->stop = 1;
525 return;
526 }
527 }
528 walker->count++;
529 }
530 }
531
532 static const struct Qdisc_class_ops cbs_class_ops = {
533 .graft = cbs_graft,
534 .leaf = cbs_leaf,
535 .find = cbs_find,
536 .walk = cbs_walk,
537 .dump = cbs_dump_class,
538 };
539
540 static struct Qdisc_ops cbs_qdisc_ops __read_mostly = {
541 .id = "cbs",
542 .cl_ops = &cbs_class_ops,
543 .priv_size = sizeof(struct cbs_sched_data),
544 .enqueue = cbs_enqueue,
545 .dequeue = cbs_dequeue,
546 .peek = qdisc_peek_dequeued,
547 .init = cbs_init,
548 .reset = qdisc_reset_queue,
549 .destroy = cbs_destroy,
550 .change = cbs_change,
551 .dump = cbs_dump,
552 .owner = THIS_MODULE,
553 };
554
555 static struct notifier_block cbs_device_notifier = {
556 .notifier_call = cbs_dev_notifier,
557 };
558
559 static int __init cbs_module_init(void)
560 {
561 int err;
562
563 err = register_netdevice_notifier(&cbs_device_notifier);
564 if (err)
565 return err;
566
567 err = register_qdisc(&cbs_qdisc_ops);
568 if (err)
569 unregister_netdevice_notifier(&cbs_device_notifier);
570
571 return err;
572 }
573
574 static void __exit cbs_module_exit(void)
575 {
576 unregister_qdisc(&cbs_qdisc_ops);
577 unregister_netdevice_notifier(&cbs_device_notifier);
578 }
579 module_init(cbs_module_init)
580 module_exit(cbs_module_exit)
581 MODULE_LICENSE("GPL");